LLVM 22.0.0git
CodeGenPrepare.cpp
Go to the documentation of this file.
1//===- CodeGenPrepare.cpp - Prepare a function for code generation --------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This pass munges the code in the input function to better prepare it for
10// SelectionDAG-based code generation. This works around limitations in it's
11// basic-block-at-a-time approach. It should eventually be removed.
12//
13//===----------------------------------------------------------------------===//
14
16#include "llvm/ADT/APInt.h"
17#include "llvm/ADT/ArrayRef.h"
18#include "llvm/ADT/DenseMap.h"
19#include "llvm/ADT/MapVector.h"
21#include "llvm/ADT/STLExtras.h"
24#include "llvm/ADT/Statistic.h"
45#include "llvm/Config/llvm-config.h"
46#include "llvm/IR/Argument.h"
47#include "llvm/IR/Attributes.h"
48#include "llvm/IR/BasicBlock.h"
49#include "llvm/IR/Constant.h"
50#include "llvm/IR/Constants.h"
51#include "llvm/IR/DataLayout.h"
52#include "llvm/IR/DebugInfo.h"
54#include "llvm/IR/Dominators.h"
55#include "llvm/IR/Function.h"
57#include "llvm/IR/GlobalValue.h"
59#include "llvm/IR/IRBuilder.h"
60#include "llvm/IR/InlineAsm.h"
61#include "llvm/IR/InstrTypes.h"
62#include "llvm/IR/Instruction.h"
65#include "llvm/IR/Intrinsics.h"
66#include "llvm/IR/IntrinsicsAArch64.h"
67#include "llvm/IR/LLVMContext.h"
68#include "llvm/IR/MDBuilder.h"
69#include "llvm/IR/Module.h"
70#include "llvm/IR/Operator.h"
73#include "llvm/IR/Statepoint.h"
74#include "llvm/IR/Type.h"
75#include "llvm/IR/Use.h"
76#include "llvm/IR/User.h"
77#include "llvm/IR/Value.h"
78#include "llvm/IR/ValueHandle.h"
79#include "llvm/IR/ValueMap.h"
81#include "llvm/Pass.h"
87#include "llvm/Support/Debug.h"
97#include <algorithm>
98#include <cassert>
99#include <cstdint>
100#include <iterator>
101#include <limits>
102#include <memory>
103#include <optional>
104#include <utility>
105#include <vector>
106
107using namespace llvm;
108using namespace llvm::PatternMatch;
109
110#define DEBUG_TYPE "codegenprepare"
111
112STATISTIC(NumBlocksElim, "Number of blocks eliminated");
113STATISTIC(NumPHIsElim, "Number of trivial PHIs eliminated");
114STATISTIC(NumGEPsElim, "Number of GEPs converted to casts");
115STATISTIC(NumCmpUses, "Number of uses of Cmp expressions replaced with uses of "
116 "sunken Cmps");
117STATISTIC(NumCastUses, "Number of uses of Cast expressions replaced with uses "
118 "of sunken Casts");
119STATISTIC(NumMemoryInsts, "Number of memory instructions whose address "
120 "computations were sunk");
121STATISTIC(NumMemoryInstsPhiCreated,
122 "Number of phis created when address "
123 "computations were sunk to memory instructions");
124STATISTIC(NumMemoryInstsSelectCreated,
125 "Number of select created when address "
126 "computations were sunk to memory instructions");
127STATISTIC(NumExtsMoved, "Number of [s|z]ext instructions combined with loads");
128STATISTIC(NumExtUses, "Number of uses of [s|z]ext instructions optimized");
129STATISTIC(NumAndsAdded,
130 "Number of and mask instructions added to form ext loads");
131STATISTIC(NumAndUses, "Number of uses of and mask instructions optimized");
132STATISTIC(NumRetsDup, "Number of return instructions duplicated");
133STATISTIC(NumDbgValueMoved, "Number of debug value instructions moved");
134STATISTIC(NumSelectsExpanded, "Number of selects turned into branches");
135STATISTIC(NumStoreExtractExposed, "Number of store(extractelement) exposed");
136
138 "disable-cgp-branch-opts", cl::Hidden, cl::init(false),
139 cl::desc("Disable branch optimizations in CodeGenPrepare"));
140
141static cl::opt<bool>
142 DisableGCOpts("disable-cgp-gc-opts", cl::Hidden, cl::init(false),
143 cl::desc("Disable GC optimizations in CodeGenPrepare"));
144
145static cl::opt<bool>
146 DisableSelectToBranch("disable-cgp-select2branch", cl::Hidden,
147 cl::init(false),
148 cl::desc("Disable select to branch conversion."));
149
150static cl::opt<bool>
151 AddrSinkUsingGEPs("addr-sink-using-gep", cl::Hidden, cl::init(true),
152 cl::desc("Address sinking in CGP using GEPs."));
153
154static cl::opt<bool>
155 EnableAndCmpSinking("enable-andcmp-sinking", cl::Hidden, cl::init(true),
156 cl::desc("Enable sinking and/cmp into branches."));
157
159 "disable-cgp-store-extract", cl::Hidden, cl::init(false),
160 cl::desc("Disable store(extract) optimizations in CodeGenPrepare"));
161
163 "stress-cgp-store-extract", cl::Hidden, cl::init(false),
164 cl::desc("Stress test store(extract) optimizations in CodeGenPrepare"));
165
167 "disable-cgp-ext-ld-promotion", cl::Hidden, cl::init(false),
168 cl::desc("Disable ext(promotable(ld)) -> promoted(ext(ld)) optimization in "
169 "CodeGenPrepare"));
170
172 "stress-cgp-ext-ld-promotion", cl::Hidden, cl::init(false),
173 cl::desc("Stress test ext(promotable(ld)) -> promoted(ext(ld)) "
174 "optimization in CodeGenPrepare"));
175
177 "disable-preheader-prot", cl::Hidden, cl::init(false),
178 cl::desc("Disable protection against removing loop preheaders"));
179
181 "profile-guided-section-prefix", cl::Hidden, cl::init(true),
182 cl::desc("Use profile info to add section prefix for hot/cold functions"));
183
185 "profile-unknown-in-special-section", cl::Hidden,
186 cl::desc("In profiling mode like sampleFDO, if a function doesn't have "
187 "profile, we cannot tell the function is cold for sure because "
188 "it may be a function newly added without ever being sampled. "
189 "With the flag enabled, compiler can put such profile unknown "
190 "functions into a special section, so runtime system can choose "
191 "to handle it in a different way than .text section, to save "
192 "RAM for example. "));
193
195 "bbsections-guided-section-prefix", cl::Hidden, cl::init(true),
196 cl::desc("Use the basic-block-sections profile to determine the text "
197 "section prefix for hot functions. Functions with "
198 "basic-block-sections profile will be placed in `.text.hot` "
199 "regardless of their FDO profile info. Other functions won't be "
200 "impacted, i.e., their prefixes will be decided by FDO/sampleFDO "
201 "profiles."));
202
204 "cgp-freq-ratio-to-skip-merge", cl::Hidden, cl::init(2),
205 cl::desc("Skip merging empty blocks if (frequency of empty block) / "
206 "(frequency of destination block) is greater than this ratio"));
207
209 "force-split-store", cl::Hidden, cl::init(false),
210 cl::desc("Force store splitting no matter what the target query says."));
211
213 "cgp-type-promotion-merge", cl::Hidden,
214 cl::desc("Enable merging of redundant sexts when one is dominating"
215 " the other."),
216 cl::init(true));
217
219 "disable-complex-addr-modes", cl::Hidden, cl::init(false),
220 cl::desc("Disables combining addressing modes with different parts "
221 "in optimizeMemoryInst."));
222
223static cl::opt<bool>
224 AddrSinkNewPhis("addr-sink-new-phis", cl::Hidden, cl::init(false),
225 cl::desc("Allow creation of Phis in Address sinking."));
226
228 "addr-sink-new-select", cl::Hidden, cl::init(true),
229 cl::desc("Allow creation of selects in Address sinking."));
230
232 "addr-sink-combine-base-reg", cl::Hidden, cl::init(true),
233 cl::desc("Allow combining of BaseReg field in Address sinking."));
234
236 "addr-sink-combine-base-gv", cl::Hidden, cl::init(true),
237 cl::desc("Allow combining of BaseGV field in Address sinking."));
238
240 "addr-sink-combine-base-offs", cl::Hidden, cl::init(true),
241 cl::desc("Allow combining of BaseOffs field in Address sinking."));
242
244 "addr-sink-combine-scaled-reg", cl::Hidden, cl::init(true),
245 cl::desc("Allow combining of ScaledReg field in Address sinking."));
246
247static cl::opt<bool>
248 EnableGEPOffsetSplit("cgp-split-large-offset-gep", cl::Hidden,
249 cl::init(true),
250 cl::desc("Enable splitting large offset of GEP."));
251
253 "cgp-icmp-eq2icmp-st", cl::Hidden, cl::init(false),
254 cl::desc("Enable ICMP_EQ to ICMP_S(L|G)T conversion."));
255
256static cl::opt<bool>
257 VerifyBFIUpdates("cgp-verify-bfi-updates", cl::Hidden, cl::init(false),
258 cl::desc("Enable BFI update verification for "
259 "CodeGenPrepare."));
260
261static cl::opt<bool>
262 OptimizePhiTypes("cgp-optimize-phi-types", cl::Hidden, cl::init(true),
263 cl::desc("Enable converting phi types in CodeGenPrepare"));
264
266 HugeFuncThresholdInCGPP("cgpp-huge-func", cl::init(10000), cl::Hidden,
267 cl::desc("Least BB number of huge function."));
268
270 MaxAddressUsersToScan("cgp-max-address-users-to-scan", cl::init(100),
272 cl::desc("Max number of address users to look at"));
273
274static cl::opt<bool>
275 DisableDeletePHIs("disable-cgp-delete-phis", cl::Hidden, cl::init(false),
276 cl::desc("Disable elimination of dead PHI nodes."));
277
278namespace {
279
280enum ExtType {
281 ZeroExtension, // Zero extension has been seen.
282 SignExtension, // Sign extension has been seen.
283 BothExtension // This extension type is used if we saw sext after
284 // ZeroExtension had been set, or if we saw zext after
285 // SignExtension had been set. It makes the type
286 // information of a promoted instruction invalid.
287};
288
289enum ModifyDT {
290 NotModifyDT, // Not Modify any DT.
291 ModifyBBDT, // Modify the Basic Block Dominator Tree.
292 ModifyInstDT // Modify the Instruction Dominator in a Basic Block,
293 // This usually means we move/delete/insert instruction
294 // in a Basic Block. So we should re-iterate instructions
295 // in such Basic Block.
296};
297
298using SetOfInstrs = SmallPtrSet<Instruction *, 16>;
299using TypeIsSExt = PointerIntPair<Type *, 2, ExtType>;
300using InstrToOrigTy = DenseMap<Instruction *, TypeIsSExt>;
302using ValueToSExts = MapVector<Value *, SExts>;
303
304class TypePromotionTransaction;
305
306class CodeGenPrepare {
307 friend class CodeGenPrepareLegacyPass;
308 const TargetMachine *TM = nullptr;
309 const TargetSubtargetInfo *SubtargetInfo = nullptr;
310 const TargetLowering *TLI = nullptr;
311 const TargetRegisterInfo *TRI = nullptr;
312 const TargetTransformInfo *TTI = nullptr;
313 const BasicBlockSectionsProfileReader *BBSectionsProfileReader = nullptr;
314 const TargetLibraryInfo *TLInfo = nullptr;
315 LoopInfo *LI = nullptr;
316 std::unique_ptr<BlockFrequencyInfo> BFI;
317 std::unique_ptr<BranchProbabilityInfo> BPI;
318 ProfileSummaryInfo *PSI = nullptr;
319
320 /// As we scan instructions optimizing them, this is the next instruction
321 /// to optimize. Transforms that can invalidate this should update it.
322 BasicBlock::iterator CurInstIterator;
323
324 /// Keeps track of non-local addresses that have been sunk into a block.
325 /// This allows us to avoid inserting duplicate code for blocks with
326 /// multiple load/stores of the same address. The usage of WeakTrackingVH
327 /// enables SunkAddrs to be treated as a cache whose entries can be
328 /// invalidated if a sunken address computation has been erased.
329 ValueMap<Value *, WeakTrackingVH> SunkAddrs;
330
331 /// Keeps track of all instructions inserted for the current function.
332 SetOfInstrs InsertedInsts;
333
334 /// Keeps track of the type of the related instruction before their
335 /// promotion for the current function.
336 InstrToOrigTy PromotedInsts;
337
338 /// Keep track of instructions removed during promotion.
339 SetOfInstrs RemovedInsts;
340
341 /// Keep track of sext chains based on their initial value.
342 DenseMap<Value *, Instruction *> SeenChainsForSExt;
343
344 /// Keep track of GEPs accessing the same data structures such as structs or
345 /// arrays that are candidates to be split later because of their large
346 /// size.
347 MapVector<AssertingVH<Value>,
349 LargeOffsetGEPMap;
350
351 /// Keep track of new GEP base after splitting the GEPs having large offset.
352 SmallSet<AssertingVH<Value>, 2> NewGEPBases;
353
354 /// Map serial numbers to Large offset GEPs.
355 DenseMap<AssertingVH<GetElementPtrInst>, int> LargeOffsetGEPID;
356
357 /// Keep track of SExt promoted.
358 ValueToSExts ValToSExtendedUses;
359
360 /// True if the function has the OptSize attribute.
361 bool OptSize;
362
363 /// DataLayout for the Function being processed.
364 const DataLayout *DL = nullptr;
365
366 /// Building the dominator tree can be expensive, so we only build it
367 /// lazily and update it when required.
368 std::unique_ptr<DominatorTree> DT;
369
370public:
371 CodeGenPrepare() = default;
372 CodeGenPrepare(const TargetMachine *TM) : TM(TM){};
373 /// If encounter huge function, we need to limit the build time.
374 bool IsHugeFunc = false;
375
376 /// FreshBBs is like worklist, it collected the updated BBs which need
377 /// to be optimized again.
378 /// Note: Consider building time in this pass, when a BB updated, we need
379 /// to insert such BB into FreshBBs for huge function.
380 SmallPtrSet<BasicBlock *, 32> FreshBBs;
381
382 void releaseMemory() {
383 // Clear per function information.
384 InsertedInsts.clear();
385 PromotedInsts.clear();
386 FreshBBs.clear();
387 BPI.reset();
388 BFI.reset();
389 }
390
391 bool run(Function &F, FunctionAnalysisManager &AM);
392
393private:
394 template <typename F>
395 void resetIteratorIfInvalidatedWhileCalling(BasicBlock *BB, F f) {
396 // Substituting can cause recursive simplifications, which can invalidate
397 // our iterator. Use a WeakTrackingVH to hold onto it in case this
398 // happens.
399 Value *CurValue = &*CurInstIterator;
400 WeakTrackingVH IterHandle(CurValue);
401
402 f();
403
404 // If the iterator instruction was recursively deleted, start over at the
405 // start of the block.
406 if (IterHandle != CurValue) {
407 CurInstIterator = BB->begin();
408 SunkAddrs.clear();
409 }
410 }
411
412 // Get the DominatorTree, building if necessary.
413 DominatorTree &getDT(Function &F) {
414 if (!DT)
415 DT = std::make_unique<DominatorTree>(F);
416 return *DT;
417 }
418
419 void removeAllAssertingVHReferences(Value *V);
420 bool eliminateAssumptions(Function &F);
421 bool eliminateFallThrough(Function &F, DominatorTree *DT = nullptr);
422 bool eliminateMostlyEmptyBlocks(Function &F);
423 BasicBlock *findDestBlockOfMergeableEmptyBlock(BasicBlock *BB);
424 bool canMergeBlocks(const BasicBlock *BB, const BasicBlock *DestBB) const;
425 void eliminateMostlyEmptyBlock(BasicBlock *BB);
426 bool isMergingEmptyBlockProfitable(BasicBlock *BB, BasicBlock *DestBB,
427 bool isPreheader);
428 bool makeBitReverse(Instruction &I);
429 bool optimizeBlock(BasicBlock &BB, ModifyDT &ModifiedDT);
430 bool optimizeInst(Instruction *I, ModifyDT &ModifiedDT);
431 bool optimizeMemoryInst(Instruction *MemoryInst, Value *Addr, Type *AccessTy,
432 unsigned AddrSpace);
433 bool optimizeGatherScatterInst(Instruction *MemoryInst, Value *Ptr);
434 bool optimizeMulWithOverflow(Instruction *I, bool IsSigned,
435 ModifyDT &ModifiedDT);
436 bool optimizeInlineAsmInst(CallInst *CS);
437 bool optimizeCallInst(CallInst *CI, ModifyDT &ModifiedDT);
438 bool optimizeExt(Instruction *&I);
439 bool optimizeExtUses(Instruction *I);
440 bool optimizeLoadExt(LoadInst *Load);
441 bool optimizeShiftInst(BinaryOperator *BO);
442 bool optimizeFunnelShift(IntrinsicInst *Fsh);
443 bool optimizeSelectInst(SelectInst *SI);
444 bool optimizeShuffleVectorInst(ShuffleVectorInst *SVI);
445 bool optimizeSwitchType(SwitchInst *SI);
446 bool optimizeSwitchPhiConstants(SwitchInst *SI);
447 bool optimizeSwitchInst(SwitchInst *SI);
448 bool optimizeExtractElementInst(Instruction *Inst);
449 bool dupRetToEnableTailCallOpts(BasicBlock *BB, ModifyDT &ModifiedDT);
450 bool fixupDbgVariableRecord(DbgVariableRecord &I);
451 bool fixupDbgVariableRecordsOnInst(Instruction &I);
452 bool placeDbgValues(Function &F);
453 bool placePseudoProbes(Function &F);
454 bool canFormExtLd(const SmallVectorImpl<Instruction *> &MovedExts,
455 LoadInst *&LI, Instruction *&Inst, bool HasPromoted);
456 bool tryToPromoteExts(TypePromotionTransaction &TPT,
457 const SmallVectorImpl<Instruction *> &Exts,
458 SmallVectorImpl<Instruction *> &ProfitablyMovedExts,
459 unsigned CreatedInstsCost = 0);
460 bool mergeSExts(Function &F);
461 bool splitLargeGEPOffsets();
462 bool optimizePhiType(PHINode *Inst, SmallPtrSetImpl<PHINode *> &Visited,
463 SmallPtrSetImpl<Instruction *> &DeletedInstrs);
464 bool optimizePhiTypes(Function &F);
465 bool performAddressTypePromotion(
466 Instruction *&Inst, bool AllowPromotionWithoutCommonHeader,
467 bool HasPromoted, TypePromotionTransaction &TPT,
468 SmallVectorImpl<Instruction *> &SpeculativelyMovedExts);
469 bool splitBranchCondition(Function &F, ModifyDT &ModifiedDT);
470 bool simplifyOffsetableRelocate(GCStatepointInst &I);
471
472 bool tryToSinkFreeOperands(Instruction *I);
473 bool replaceMathCmpWithIntrinsic(BinaryOperator *BO, Value *Arg0, Value *Arg1,
474 CmpInst *Cmp, Intrinsic::ID IID);
475 bool optimizeCmp(CmpInst *Cmp, ModifyDT &ModifiedDT);
476 bool optimizeURem(Instruction *Rem);
477 bool combineToUSubWithOverflow(CmpInst *Cmp, ModifyDT &ModifiedDT);
478 bool combineToUAddWithOverflow(CmpInst *Cmp, ModifyDT &ModifiedDT);
479 bool unfoldPowerOf2Test(CmpInst *Cmp);
480 void verifyBFIUpdates(Function &F);
481 bool _run(Function &F);
482};
483
484class CodeGenPrepareLegacyPass : public FunctionPass {
485public:
486 static char ID; // Pass identification, replacement for typeid
487
488 CodeGenPrepareLegacyPass() : FunctionPass(ID) {
490 }
491
492 bool runOnFunction(Function &F) override;
493
494 StringRef getPassName() const override { return "CodeGen Prepare"; }
495
496 void getAnalysisUsage(AnalysisUsage &AU) const override {
497 // FIXME: When we can selectively preserve passes, preserve the domtree.
498 AU.addRequired<ProfileSummaryInfoWrapperPass>();
499 AU.addRequired<TargetLibraryInfoWrapperPass>();
500 AU.addRequired<TargetPassConfig>();
501 AU.addRequired<TargetTransformInfoWrapperPass>();
502 AU.addRequired<LoopInfoWrapperPass>();
503 AU.addUsedIfAvailable<BasicBlockSectionsProfileReaderWrapperPass>();
504 }
505};
506
507} // end anonymous namespace
508
509char CodeGenPrepareLegacyPass::ID = 0;
510
511bool CodeGenPrepareLegacyPass::runOnFunction(Function &F) {
512 if (skipFunction(F))
513 return false;
514 auto TM = &getAnalysis<TargetPassConfig>().getTM<TargetMachine>();
515 CodeGenPrepare CGP(TM);
516 CGP.DL = &F.getDataLayout();
517 CGP.SubtargetInfo = TM->getSubtargetImpl(F);
518 CGP.TLI = CGP.SubtargetInfo->getTargetLowering();
519 CGP.TRI = CGP.SubtargetInfo->getRegisterInfo();
520 CGP.TLInfo = &getAnalysis<TargetLibraryInfoWrapperPass>().getTLI(F);
521 CGP.TTI = &getAnalysis<TargetTransformInfoWrapperPass>().getTTI(F);
522 CGP.LI = &getAnalysis<LoopInfoWrapperPass>().getLoopInfo();
523 CGP.BPI.reset(new BranchProbabilityInfo(F, *CGP.LI));
524 CGP.BFI.reset(new BlockFrequencyInfo(F, *CGP.BPI, *CGP.LI));
525 CGP.PSI = &getAnalysis<ProfileSummaryInfoWrapperPass>().getPSI();
526 auto BBSPRWP =
527 getAnalysisIfAvailable<BasicBlockSectionsProfileReaderWrapperPass>();
528 CGP.BBSectionsProfileReader = BBSPRWP ? &BBSPRWP->getBBSPR() : nullptr;
529
530 return CGP._run(F);
531}
532
533INITIALIZE_PASS_BEGIN(CodeGenPrepareLegacyPass, DEBUG_TYPE,
534 "Optimize for code generation", false, false)
541INITIALIZE_PASS_END(CodeGenPrepareLegacyPass, DEBUG_TYPE,
542 "Optimize for code generation", false, false)
543
545 return new CodeGenPrepareLegacyPass();
546}
547
550 CodeGenPrepare CGP(TM);
551
552 bool Changed = CGP.run(F, AM);
553 if (!Changed)
554 return PreservedAnalyses::all();
555
559 return PA;
560}
561
562bool CodeGenPrepare::run(Function &F, FunctionAnalysisManager &AM) {
563 DL = &F.getDataLayout();
564 SubtargetInfo = TM->getSubtargetImpl(F);
565 TLI = SubtargetInfo->getTargetLowering();
566 TRI = SubtargetInfo->getRegisterInfo();
567 TLInfo = &AM.getResult<TargetLibraryAnalysis>(F);
569 LI = &AM.getResult<LoopAnalysis>(F);
570 BPI.reset(new BranchProbabilityInfo(F, *LI));
571 BFI.reset(new BlockFrequencyInfo(F, *BPI, *LI));
572 auto &MAMProxy = AM.getResult<ModuleAnalysisManagerFunctionProxy>(F);
573 PSI = MAMProxy.getCachedResult<ProfileSummaryAnalysis>(*F.getParent());
574 BBSectionsProfileReader =
576 return _run(F);
577}
578
579bool CodeGenPrepare::_run(Function &F) {
580 bool EverMadeChange = false;
581
582 OptSize = F.hasOptSize();
583 // Use the basic-block-sections profile to promote hot functions to .text.hot
584 // if requested.
585 if (BBSectionsGuidedSectionPrefix && BBSectionsProfileReader &&
586 BBSectionsProfileReader->isFunctionHot(F.getName())) {
587 (void)F.setSectionPrefix("hot");
588 } else if (ProfileGuidedSectionPrefix) {
589 // The hot attribute overwrites profile count based hotness while profile
590 // counts based hotness overwrite the cold attribute.
591 // This is a conservative behabvior.
592 if (F.hasFnAttribute(Attribute::Hot) ||
593 PSI->isFunctionHotInCallGraph(&F, *BFI))
594 (void)F.setSectionPrefix("hot");
595 // If PSI shows this function is not hot, we will placed the function
596 // into unlikely section if (1) PSI shows this is a cold function, or
597 // (2) the function has a attribute of cold.
598 else if (PSI->isFunctionColdInCallGraph(&F, *BFI) ||
599 F.hasFnAttribute(Attribute::Cold))
600 (void)F.setSectionPrefix("unlikely");
603 (void)F.setSectionPrefix("unknown");
604 }
605
606 /// This optimization identifies DIV instructions that can be
607 /// profitably bypassed and carried out with a shorter, faster divide.
608 if (!OptSize && !PSI->hasHugeWorkingSetSize() && TLI->isSlowDivBypassed()) {
609 const DenseMap<unsigned int, unsigned int> &BypassWidths =
611 BasicBlock *BB = &*F.begin();
612 while (BB != nullptr) {
613 // bypassSlowDivision may create new BBs, but we don't want to reapply the
614 // optimization to those blocks.
615 BasicBlock *Next = BB->getNextNode();
616 if (!llvm::shouldOptimizeForSize(BB, PSI, BFI.get()))
617 EverMadeChange |= bypassSlowDivision(BB, BypassWidths);
618 BB = Next;
619 }
620 }
621
622 // Get rid of @llvm.assume builtins before attempting to eliminate empty
623 // blocks, since there might be blocks that only contain @llvm.assume calls
624 // (plus arguments that we can get rid of).
625 EverMadeChange |= eliminateAssumptions(F);
626
627 // Eliminate blocks that contain only PHI nodes and an
628 // unconditional branch.
629 EverMadeChange |= eliminateMostlyEmptyBlocks(F);
630
631 ModifyDT ModifiedDT = ModifyDT::NotModifyDT;
633 EverMadeChange |= splitBranchCondition(F, ModifiedDT);
634
635 // Split some critical edges where one of the sources is an indirect branch,
636 // to help generate sane code for PHIs involving such edges.
637 EverMadeChange |=
638 SplitIndirectBrCriticalEdges(F, /*IgnoreBlocksWithoutPHI=*/true);
639
640 // If we are optimzing huge function, we need to consider the build time.
641 // Because the basic algorithm's complex is near O(N!).
642 IsHugeFunc = F.size() > HugeFuncThresholdInCGPP;
643
644 // Transformations above may invalidate dominator tree and/or loop info.
645 DT.reset();
646 LI->releaseMemory();
647 LI->analyze(getDT(F));
648
649 bool MadeChange = true;
650 bool FuncIterated = false;
651 while (MadeChange) {
652 MadeChange = false;
653
654 for (BasicBlock &BB : llvm::make_early_inc_range(F)) {
655 if (FuncIterated && !FreshBBs.contains(&BB))
656 continue;
657
658 ModifyDT ModifiedDTOnIteration = ModifyDT::NotModifyDT;
659 bool Changed = optimizeBlock(BB, ModifiedDTOnIteration);
660
661 if (ModifiedDTOnIteration == ModifyDT::ModifyBBDT)
662 DT.reset();
663
664 MadeChange |= Changed;
665 if (IsHugeFunc) {
666 // If the BB is updated, it may still has chance to be optimized.
667 // This usually happen at sink optimization.
668 // For example:
669 //
670 // bb0:
671 // %and = and i32 %a, 4
672 // %cmp = icmp eq i32 %and, 0
673 //
674 // If the %cmp sink to other BB, the %and will has chance to sink.
675 if (Changed)
676 FreshBBs.insert(&BB);
677 else if (FuncIterated)
678 FreshBBs.erase(&BB);
679 } else {
680 // For small/normal functions, we restart BB iteration if the dominator
681 // tree of the Function was changed.
682 if (ModifiedDTOnIteration != ModifyDT::NotModifyDT)
683 break;
684 }
685 }
686 // We have iterated all the BB in the (only work for huge) function.
687 FuncIterated = IsHugeFunc;
688
689 if (EnableTypePromotionMerge && !ValToSExtendedUses.empty())
690 MadeChange |= mergeSExts(F);
691 if (!LargeOffsetGEPMap.empty())
692 MadeChange |= splitLargeGEPOffsets();
693 MadeChange |= optimizePhiTypes(F);
694
695 if (MadeChange)
696 eliminateFallThrough(F, DT.get());
697
698#ifndef NDEBUG
699 if (MadeChange && VerifyLoopInfo)
700 LI->verify(getDT(F));
701#endif
702
703 // Really free removed instructions during promotion.
704 for (Instruction *I : RemovedInsts)
705 I->deleteValue();
706
707 EverMadeChange |= MadeChange;
708 SeenChainsForSExt.clear();
709 ValToSExtendedUses.clear();
710 RemovedInsts.clear();
711 LargeOffsetGEPMap.clear();
712 LargeOffsetGEPID.clear();
713 }
714
715 NewGEPBases.clear();
716 SunkAddrs.clear();
717
718 if (!DisableBranchOpts) {
719 MadeChange = false;
720 // Use a set vector to get deterministic iteration order. The order the
721 // blocks are removed may affect whether or not PHI nodes in successors
722 // are removed.
723 SmallSetVector<BasicBlock *, 8> WorkList;
724 for (BasicBlock &BB : F) {
726 MadeChange |= ConstantFoldTerminator(&BB, true);
727 if (!MadeChange)
728 continue;
729
730 for (BasicBlock *Succ : Successors)
731 if (pred_empty(Succ))
732 WorkList.insert(Succ);
733 }
734
735 // Delete the dead blocks and any of their dead successors.
736 MadeChange |= !WorkList.empty();
737 while (!WorkList.empty()) {
738 BasicBlock *BB = WorkList.pop_back_val();
740
741 DeleteDeadBlock(BB);
742
743 for (BasicBlock *Succ : Successors)
744 if (pred_empty(Succ))
745 WorkList.insert(Succ);
746 }
747
748 // Merge pairs of basic blocks with unconditional branches, connected by
749 // a single edge.
750 if (EverMadeChange || MadeChange)
751 MadeChange |= eliminateFallThrough(F);
752
753 EverMadeChange |= MadeChange;
754 }
755
756 if (!DisableGCOpts) {
758 for (BasicBlock &BB : F)
759 for (Instruction &I : BB)
760 if (auto *SP = dyn_cast<GCStatepointInst>(&I))
761 Statepoints.push_back(SP);
762 for (auto &I : Statepoints)
763 EverMadeChange |= simplifyOffsetableRelocate(*I);
764 }
765
766 // Do this last to clean up use-before-def scenarios introduced by other
767 // preparatory transforms.
768 EverMadeChange |= placeDbgValues(F);
769 EverMadeChange |= placePseudoProbes(F);
770
771#ifndef NDEBUG
773 verifyBFIUpdates(F);
774#endif
775
776 return EverMadeChange;
777}
778
779bool CodeGenPrepare::eliminateAssumptions(Function &F) {
780 bool MadeChange = false;
781 for (BasicBlock &BB : F) {
782 CurInstIterator = BB.begin();
783 while (CurInstIterator != BB.end()) {
784 Instruction *I = &*(CurInstIterator++);
785 if (auto *Assume = dyn_cast<AssumeInst>(I)) {
786 MadeChange = true;
787 Value *Operand = Assume->getOperand(0);
788 Assume->eraseFromParent();
789
790 resetIteratorIfInvalidatedWhileCalling(&BB, [&]() {
791 RecursivelyDeleteTriviallyDeadInstructions(Operand, TLInfo, nullptr);
792 });
793 }
794 }
795 }
796 return MadeChange;
797}
798
799/// An instruction is about to be deleted, so remove all references to it in our
800/// GEP-tracking data strcutures.
801void CodeGenPrepare::removeAllAssertingVHReferences(Value *V) {
802 LargeOffsetGEPMap.erase(V);
803 NewGEPBases.erase(V);
804
806 if (!GEP)
807 return;
808
809 LargeOffsetGEPID.erase(GEP);
810
811 auto VecI = LargeOffsetGEPMap.find(GEP->getPointerOperand());
812 if (VecI == LargeOffsetGEPMap.end())
813 return;
814
815 auto &GEPVector = VecI->second;
816 llvm::erase_if(GEPVector, [=](auto &Elt) { return Elt.first == GEP; });
817
818 if (GEPVector.empty())
819 LargeOffsetGEPMap.erase(VecI);
820}
821
822// Verify BFI has been updated correctly by recomputing BFI and comparing them.
823[[maybe_unused]] void CodeGenPrepare::verifyBFIUpdates(Function &F) {
824 DominatorTree NewDT(F);
825 LoopInfo NewLI(NewDT);
826 BranchProbabilityInfo NewBPI(F, NewLI, TLInfo);
827 BlockFrequencyInfo NewBFI(F, NewBPI, NewLI);
828 NewBFI.verifyMatch(*BFI);
829}
830
831/// Merge basic blocks which are connected by a single edge, where one of the
832/// basic blocks has a single successor pointing to the other basic block,
833/// which has a single predecessor.
834bool CodeGenPrepare::eliminateFallThrough(Function &F, DominatorTree *DT) {
835 bool Changed = false;
836 // Scan all of the blocks in the function, except for the entry block.
837 // Use a temporary array to avoid iterator being invalidated when
838 // deleting blocks.
841
842 SmallSet<WeakTrackingVH, 16> Preds;
843 for (auto &Block : Blocks) {
845 if (!BB)
846 continue;
847 // If the destination block has a single pred, then this is a trivial
848 // edge, just collapse it.
849 BasicBlock *SinglePred = BB->getSinglePredecessor();
850
851 // Don't merge if BB's address is taken.
852 if (!SinglePred || SinglePred == BB || BB->hasAddressTaken())
853 continue;
854
855 // Make an effort to skip unreachable blocks.
856 if (DT && !DT->isReachableFromEntry(BB))
857 continue;
858
859 BranchInst *Term = dyn_cast<BranchInst>(SinglePred->getTerminator());
860 if (Term && !Term->isConditional()) {
861 Changed = true;
862 LLVM_DEBUG(dbgs() << "To merge:\n" << *BB << "\n\n\n");
863
864 // Merge BB into SinglePred and delete it.
865 MergeBlockIntoPredecessor(BB, /* DTU */ nullptr, LI, /* MSSAU */ nullptr,
866 /* MemDep */ nullptr,
867 /* PredecessorWithTwoSuccessors */ false, DT);
868 Preds.insert(SinglePred);
869
870 if (IsHugeFunc) {
871 // Update FreshBBs to optimize the merged BB.
872 FreshBBs.insert(SinglePred);
873 FreshBBs.erase(BB);
874 }
875 }
876 }
877
878 // (Repeatedly) merging blocks into their predecessors can create redundant
879 // debug intrinsics.
880 for (const auto &Pred : Preds)
881 if (auto *BB = cast_or_null<BasicBlock>(Pred))
883
884 return Changed;
885}
886
887/// Find a destination block from BB if BB is mergeable empty block.
888BasicBlock *CodeGenPrepare::findDestBlockOfMergeableEmptyBlock(BasicBlock *BB) {
889 // If this block doesn't end with an uncond branch, ignore it.
890 BranchInst *BI = dyn_cast<BranchInst>(BB->getTerminator());
891 if (!BI || !BI->isUnconditional())
892 return nullptr;
893
894 // If the instruction before the branch (skipping debug info) isn't a phi
895 // node, then other stuff is happening here.
897 if (BBI != BB->begin()) {
898 --BBI;
899 if (!isa<PHINode>(BBI))
900 return nullptr;
901 }
902
903 // Do not break infinite loops.
904 BasicBlock *DestBB = BI->getSuccessor(0);
905 if (DestBB == BB)
906 return nullptr;
907
908 if (!canMergeBlocks(BB, DestBB))
909 DestBB = nullptr;
910
911 return DestBB;
912}
913
914/// Eliminate blocks that contain only PHI nodes, debug info directives, and an
915/// unconditional branch. Passes before isel (e.g. LSR/loopsimplify) often split
916/// edges in ways that are non-optimal for isel. Start by eliminating these
917/// blocks so we can split them the way we want them.
918bool CodeGenPrepare::eliminateMostlyEmptyBlocks(Function &F) {
919 SmallPtrSet<BasicBlock *, 16> Preheaders;
920 SmallVector<Loop *, 16> LoopList(LI->begin(), LI->end());
921 while (!LoopList.empty()) {
922 Loop *L = LoopList.pop_back_val();
923 llvm::append_range(LoopList, *L);
924 if (BasicBlock *Preheader = L->getLoopPreheader())
925 Preheaders.insert(Preheader);
926 }
927
928 bool MadeChange = false;
929 // Copy blocks into a temporary array to avoid iterator invalidation issues
930 // as we remove them.
931 // Note that this intentionally skips the entry block.
933 for (auto &Block : llvm::drop_begin(F)) {
934 // Delete phi nodes that could block deleting other empty blocks.
936 MadeChange |= DeleteDeadPHIs(&Block, TLInfo);
937 Blocks.push_back(&Block);
938 }
939
940 for (auto &Block : Blocks) {
942 if (!BB)
943 continue;
944 BasicBlock *DestBB = findDestBlockOfMergeableEmptyBlock(BB);
945 if (!DestBB ||
946 !isMergingEmptyBlockProfitable(BB, DestBB, Preheaders.count(BB)))
947 continue;
948
949 eliminateMostlyEmptyBlock(BB);
950 MadeChange = true;
951 }
952 return MadeChange;
953}
954
955bool CodeGenPrepare::isMergingEmptyBlockProfitable(BasicBlock *BB,
956 BasicBlock *DestBB,
957 bool isPreheader) {
958 // Do not delete loop preheaders if doing so would create a critical edge.
959 // Loop preheaders can be good locations to spill registers. If the
960 // preheader is deleted and we create a critical edge, registers may be
961 // spilled in the loop body instead.
962 if (!DisablePreheaderProtect && isPreheader &&
963 !(BB->getSinglePredecessor() &&
965 return false;
966
967 // Skip merging if the block's successor is also a successor to any callbr
968 // that leads to this block.
969 // FIXME: Is this really needed? Is this a correctness issue?
970 for (BasicBlock *Pred : predecessors(BB)) {
971 if (isa<CallBrInst>(Pred->getTerminator()) &&
972 llvm::is_contained(successors(Pred), DestBB))
973 return false;
974 }
975
976 // Try to skip merging if the unique predecessor of BB is terminated by a
977 // switch or indirect branch instruction, and BB is used as an incoming block
978 // of PHIs in DestBB. In such case, merging BB and DestBB would cause ISel to
979 // add COPY instructions in the predecessor of BB instead of BB (if it is not
980 // merged). Note that the critical edge created by merging such blocks wont be
981 // split in MachineSink because the jump table is not analyzable. By keeping
982 // such empty block (BB), ISel will place COPY instructions in BB, not in the
983 // predecessor of BB.
984 BasicBlock *Pred = BB->getUniquePredecessor();
985 if (!Pred || !(isa<SwitchInst>(Pred->getTerminator()) ||
987 return true;
988
989 if (BB->getTerminator() != &*BB->getFirstNonPHIOrDbg())
990 return true;
991
992 // We use a simple cost heuristic which determine skipping merging is
993 // profitable if the cost of skipping merging is less than the cost of
994 // merging : Cost(skipping merging) < Cost(merging BB), where the
995 // Cost(skipping merging) is Freq(BB) * (Cost(Copy) + Cost(Branch)), and
996 // the Cost(merging BB) is Freq(Pred) * Cost(Copy).
997 // Assuming Cost(Copy) == Cost(Branch), we could simplify it to :
998 // Freq(Pred) / Freq(BB) > 2.
999 // Note that if there are multiple empty blocks sharing the same incoming
1000 // value for the PHIs in the DestBB, we consider them together. In such
1001 // case, Cost(merging BB) will be the sum of their frequencies.
1002
1003 if (!isa<PHINode>(DestBB->begin()))
1004 return true;
1005
1006 SmallPtrSet<BasicBlock *, 16> SameIncomingValueBBs;
1007
1008 // Find all other incoming blocks from which incoming values of all PHIs in
1009 // DestBB are the same as the ones from BB.
1010 for (BasicBlock *DestBBPred : predecessors(DestBB)) {
1011 if (DestBBPred == BB)
1012 continue;
1013
1014 if (llvm::all_of(DestBB->phis(), [&](const PHINode &DestPN) {
1015 return DestPN.getIncomingValueForBlock(BB) ==
1016 DestPN.getIncomingValueForBlock(DestBBPred);
1017 }))
1018 SameIncomingValueBBs.insert(DestBBPred);
1019 }
1020
1021 // See if all BB's incoming values are same as the value from Pred. In this
1022 // case, no reason to skip merging because COPYs are expected to be place in
1023 // Pred already.
1024 if (SameIncomingValueBBs.count(Pred))
1025 return true;
1026
1027 BlockFrequency PredFreq = BFI->getBlockFreq(Pred);
1028 BlockFrequency BBFreq = BFI->getBlockFreq(BB);
1029
1030 for (auto *SameValueBB : SameIncomingValueBBs)
1031 if (SameValueBB->getUniquePredecessor() == Pred &&
1032 DestBB == findDestBlockOfMergeableEmptyBlock(SameValueBB))
1033 BBFreq += BFI->getBlockFreq(SameValueBB);
1034
1035 std::optional<BlockFrequency> Limit = BBFreq.mul(FreqRatioToSkipMerge);
1036 return !Limit || PredFreq <= *Limit;
1037}
1038
1039/// Return true if we can merge BB into DestBB if there is a single
1040/// unconditional branch between them, and BB contains no other non-phi
1041/// instructions.
1042bool CodeGenPrepare::canMergeBlocks(const BasicBlock *BB,
1043 const BasicBlock *DestBB) const {
1044 // We only want to eliminate blocks whose phi nodes are used by phi nodes in
1045 // the successor. If there are more complex condition (e.g. preheaders),
1046 // don't mess around with them.
1047 for (const PHINode &PN : BB->phis()) {
1048 for (const User *U : PN.users()) {
1049 const Instruction *UI = cast<Instruction>(U);
1050 if (UI->getParent() != DestBB || !isa<PHINode>(UI))
1051 return false;
1052 // If User is inside DestBB block and it is a PHINode then check
1053 // incoming value. If incoming value is not from BB then this is
1054 // a complex condition (e.g. preheaders) we want to avoid here.
1055 if (UI->getParent() == DestBB) {
1056 if (const PHINode *UPN = dyn_cast<PHINode>(UI))
1057 for (unsigned I = 0, E = UPN->getNumIncomingValues(); I != E; ++I) {
1058 Instruction *Insn = dyn_cast<Instruction>(UPN->getIncomingValue(I));
1059 if (Insn && Insn->getParent() == BB &&
1060 Insn->getParent() != UPN->getIncomingBlock(I))
1061 return false;
1062 }
1063 }
1064 }
1065 }
1066
1067 // If BB and DestBB contain any common predecessors, then the phi nodes in BB
1068 // and DestBB may have conflicting incoming values for the block. If so, we
1069 // can't merge the block.
1070 const PHINode *DestBBPN = dyn_cast<PHINode>(DestBB->begin());
1071 if (!DestBBPN)
1072 return true; // no conflict.
1073
1074 // Collect the preds of BB.
1075 SmallPtrSet<const BasicBlock *, 16> BBPreds;
1076 if (const PHINode *BBPN = dyn_cast<PHINode>(BB->begin())) {
1077 // It is faster to get preds from a PHI than with pred_iterator.
1078 for (unsigned i = 0, e = BBPN->getNumIncomingValues(); i != e; ++i)
1079 BBPreds.insert(BBPN->getIncomingBlock(i));
1080 } else {
1081 BBPreds.insert_range(predecessors(BB));
1082 }
1083
1084 // Walk the preds of DestBB.
1085 for (unsigned i = 0, e = DestBBPN->getNumIncomingValues(); i != e; ++i) {
1086 BasicBlock *Pred = DestBBPN->getIncomingBlock(i);
1087 if (BBPreds.count(Pred)) { // Common predecessor?
1088 for (const PHINode &PN : DestBB->phis()) {
1089 const Value *V1 = PN.getIncomingValueForBlock(Pred);
1090 const Value *V2 = PN.getIncomingValueForBlock(BB);
1091
1092 // If V2 is a phi node in BB, look up what the mapped value will be.
1093 if (const PHINode *V2PN = dyn_cast<PHINode>(V2))
1094 if (V2PN->getParent() == BB)
1095 V2 = V2PN->getIncomingValueForBlock(Pred);
1096
1097 // If there is a conflict, bail out.
1098 if (V1 != V2)
1099 return false;
1100 }
1101 }
1102 }
1103
1104 return true;
1105}
1106
1107/// Replace all old uses with new ones, and push the updated BBs into FreshBBs.
1108static void replaceAllUsesWith(Value *Old, Value *New,
1110 bool IsHuge) {
1111 auto *OldI = dyn_cast<Instruction>(Old);
1112 if (OldI) {
1113 for (Value::user_iterator UI = OldI->user_begin(), E = OldI->user_end();
1114 UI != E; ++UI) {
1116 if (IsHuge)
1117 FreshBBs.insert(User->getParent());
1118 }
1119 }
1120 Old->replaceAllUsesWith(New);
1121}
1122
1123/// Eliminate a basic block that has only phi's and an unconditional branch in
1124/// it.
1125void CodeGenPrepare::eliminateMostlyEmptyBlock(BasicBlock *BB) {
1126 BranchInst *BI = cast<BranchInst>(BB->getTerminator());
1127 BasicBlock *DestBB = BI->getSuccessor(0);
1128
1129 LLVM_DEBUG(dbgs() << "MERGING MOSTLY EMPTY BLOCKS - BEFORE:\n"
1130 << *BB << *DestBB);
1131
1132 // If the destination block has a single pred, then this is a trivial edge,
1133 // just collapse it.
1134 if (BasicBlock *SinglePred = DestBB->getSinglePredecessor()) {
1135 if (SinglePred != DestBB) {
1136 assert(SinglePred == BB &&
1137 "Single predecessor not the same as predecessor");
1138 // Merge DestBB into SinglePred/BB and delete it.
1140 // Note: BB(=SinglePred) will not be deleted on this path.
1141 // DestBB(=its single successor) is the one that was deleted.
1142 LLVM_DEBUG(dbgs() << "AFTER:\n" << *SinglePred << "\n\n\n");
1143
1144 if (IsHugeFunc) {
1145 // Update FreshBBs to optimize the merged BB.
1146 FreshBBs.insert(SinglePred);
1147 FreshBBs.erase(DestBB);
1148 }
1149 return;
1150 }
1151 }
1152
1153 // Otherwise, we have multiple predecessors of BB. Update the PHIs in DestBB
1154 // to handle the new incoming edges it is about to have.
1155 for (PHINode &PN : DestBB->phis()) {
1156 // Remove the incoming value for BB, and remember it.
1157 Value *InVal = PN.removeIncomingValue(BB, false);
1158
1159 // Two options: either the InVal is a phi node defined in BB or it is some
1160 // value that dominates BB.
1161 PHINode *InValPhi = dyn_cast<PHINode>(InVal);
1162 if (InValPhi && InValPhi->getParent() == BB) {
1163 // Add all of the input values of the input PHI as inputs of this phi.
1164 for (unsigned i = 0, e = InValPhi->getNumIncomingValues(); i != e; ++i)
1165 PN.addIncoming(InValPhi->getIncomingValue(i),
1166 InValPhi->getIncomingBlock(i));
1167 } else {
1168 // Otherwise, add one instance of the dominating value for each edge that
1169 // we will be adding.
1170 if (PHINode *BBPN = dyn_cast<PHINode>(BB->begin())) {
1171 for (unsigned i = 0, e = BBPN->getNumIncomingValues(); i != e; ++i)
1172 PN.addIncoming(InVal, BBPN->getIncomingBlock(i));
1173 } else {
1174 for (BasicBlock *Pred : predecessors(BB))
1175 PN.addIncoming(InVal, Pred);
1176 }
1177 }
1178 }
1179
1180 // Preserve loop Metadata.
1181 if (BI->hasMetadata(LLVMContext::MD_loop)) {
1182 for (auto *Pred : predecessors(BB))
1183 Pred->getTerminator()->copyMetadata(*BI, LLVMContext::MD_loop);
1184 }
1185
1186 // The PHIs are now updated, change everything that refers to BB to use
1187 // DestBB and remove BB.
1188 BB->replaceAllUsesWith(DestBB);
1189 BB->eraseFromParent();
1190 ++NumBlocksElim;
1191
1192 LLVM_DEBUG(dbgs() << "AFTER:\n" << *DestBB << "\n\n\n");
1193}
1194
1195// Computes a map of base pointer relocation instructions to corresponding
1196// derived pointer relocation instructions given a vector of all relocate calls
1198 const SmallVectorImpl<GCRelocateInst *> &AllRelocateCalls,
1200 &RelocateInstMap) {
1201 // Collect information in two maps: one primarily for locating the base object
1202 // while filling the second map; the second map is the final structure holding
1203 // a mapping between Base and corresponding Derived relocate calls
1205 for (auto *ThisRelocate : AllRelocateCalls) {
1206 auto K = std::make_pair(ThisRelocate->getBasePtrIndex(),
1207 ThisRelocate->getDerivedPtrIndex());
1208 RelocateIdxMap.insert(std::make_pair(K, ThisRelocate));
1209 }
1210 for (auto &Item : RelocateIdxMap) {
1211 std::pair<unsigned, unsigned> Key = Item.first;
1212 if (Key.first == Key.second)
1213 // Base relocation: nothing to insert
1214 continue;
1215
1216 GCRelocateInst *I = Item.second;
1217 auto BaseKey = std::make_pair(Key.first, Key.first);
1218
1219 // We're iterating over RelocateIdxMap so we cannot modify it.
1220 auto MaybeBase = RelocateIdxMap.find(BaseKey);
1221 if (MaybeBase == RelocateIdxMap.end())
1222 // TODO: We might want to insert a new base object relocate and gep off
1223 // that, if there are enough derived object relocates.
1224 continue;
1225
1226 RelocateInstMap[MaybeBase->second].push_back(I);
1227 }
1228}
1229
1230// Accepts a GEP and extracts the operands into a vector provided they're all
1231// small integer constants
1233 SmallVectorImpl<Value *> &OffsetV) {
1234 for (unsigned i = 1; i < GEP->getNumOperands(); i++) {
1235 // Only accept small constant integer operands
1236 auto *Op = dyn_cast<ConstantInt>(GEP->getOperand(i));
1237 if (!Op || Op->getZExtValue() > 20)
1238 return false;
1239 }
1240
1241 for (unsigned i = 1; i < GEP->getNumOperands(); i++)
1242 OffsetV.push_back(GEP->getOperand(i));
1243 return true;
1244}
1245
1246// Takes a RelocatedBase (base pointer relocation instruction) and Targets to
1247// replace, computes a replacement, and affects it.
1248static bool
1250 const SmallVectorImpl<GCRelocateInst *> &Targets) {
1251 bool MadeChange = false;
1252 // We must ensure the relocation of derived pointer is defined after
1253 // relocation of base pointer. If we find a relocation corresponding to base
1254 // defined earlier than relocation of base then we move relocation of base
1255 // right before found relocation. We consider only relocation in the same
1256 // basic block as relocation of base. Relocations from other basic block will
1257 // be skipped by optimization and we do not care about them.
1258 for (auto R = RelocatedBase->getParent()->getFirstInsertionPt();
1259 &*R != RelocatedBase; ++R)
1260 if (auto *RI = dyn_cast<GCRelocateInst>(R))
1261 if (RI->getStatepoint() == RelocatedBase->getStatepoint())
1262 if (RI->getBasePtrIndex() == RelocatedBase->getBasePtrIndex()) {
1263 RelocatedBase->moveBefore(RI->getIterator());
1264 MadeChange = true;
1265 break;
1266 }
1267
1268 for (GCRelocateInst *ToReplace : Targets) {
1269 assert(ToReplace->getBasePtrIndex() == RelocatedBase->getBasePtrIndex() &&
1270 "Not relocating a derived object of the original base object");
1271 if (ToReplace->getBasePtrIndex() == ToReplace->getDerivedPtrIndex()) {
1272 // A duplicate relocate call. TODO: coalesce duplicates.
1273 continue;
1274 }
1275
1276 if (RelocatedBase->getParent() != ToReplace->getParent()) {
1277 // Base and derived relocates are in different basic blocks.
1278 // In this case transform is only valid when base dominates derived
1279 // relocate. However it would be too expensive to check dominance
1280 // for each such relocate, so we skip the whole transformation.
1281 continue;
1282 }
1283
1284 Value *Base = ToReplace->getBasePtr();
1285 auto *Derived = dyn_cast<GetElementPtrInst>(ToReplace->getDerivedPtr());
1286 if (!Derived || Derived->getPointerOperand() != Base)
1287 continue;
1288
1290 if (!getGEPSmallConstantIntOffsetV(Derived, OffsetV))
1291 continue;
1292
1293 // Create a Builder and replace the target callsite with a gep
1294 assert(RelocatedBase->getNextNode() &&
1295 "Should always have one since it's not a terminator");
1296
1297 // Insert after RelocatedBase
1298 IRBuilder<> Builder(RelocatedBase->getNextNode());
1299 Builder.SetCurrentDebugLocation(ToReplace->getDebugLoc());
1300
1301 // If gc_relocate does not match the actual type, cast it to the right type.
1302 // In theory, there must be a bitcast after gc_relocate if the type does not
1303 // match, and we should reuse it to get the derived pointer. But it could be
1304 // cases like this:
1305 // bb1:
1306 // ...
1307 // %g1 = call coldcc i8 addrspace(1)*
1308 // @llvm.experimental.gc.relocate.p1i8(...) br label %merge
1309 //
1310 // bb2:
1311 // ...
1312 // %g2 = call coldcc i8 addrspace(1)*
1313 // @llvm.experimental.gc.relocate.p1i8(...) br label %merge
1314 //
1315 // merge:
1316 // %p1 = phi i8 addrspace(1)* [ %g1, %bb1 ], [ %g2, %bb2 ]
1317 // %cast = bitcast i8 addrspace(1)* %p1 in to i32 addrspace(1)*
1318 //
1319 // In this case, we can not find the bitcast any more. So we insert a new
1320 // bitcast no matter there is already one or not. In this way, we can handle
1321 // all cases, and the extra bitcast should be optimized away in later
1322 // passes.
1323 Value *ActualRelocatedBase = RelocatedBase;
1324 if (RelocatedBase->getType() != Base->getType()) {
1325 ActualRelocatedBase =
1326 Builder.CreateBitCast(RelocatedBase, Base->getType());
1327 }
1328 Value *Replacement =
1329 Builder.CreateGEP(Derived->getSourceElementType(), ActualRelocatedBase,
1330 ArrayRef(OffsetV));
1331 Replacement->takeName(ToReplace);
1332 // If the newly generated derived pointer's type does not match the original
1333 // derived pointer's type, cast the new derived pointer to match it. Same
1334 // reasoning as above.
1335 Value *ActualReplacement = Replacement;
1336 if (Replacement->getType() != ToReplace->getType()) {
1337 ActualReplacement =
1338 Builder.CreateBitCast(Replacement, ToReplace->getType());
1339 }
1340 ToReplace->replaceAllUsesWith(ActualReplacement);
1341 ToReplace->eraseFromParent();
1342
1343 MadeChange = true;
1344 }
1345 return MadeChange;
1346}
1347
1348// Turns this:
1349//
1350// %base = ...
1351// %ptr = gep %base + 15
1352// %tok = statepoint (%fun, i32 0, i32 0, i32 0, %base, %ptr)
1353// %base' = relocate(%tok, i32 4, i32 4)
1354// %ptr' = relocate(%tok, i32 4, i32 5)
1355// %val = load %ptr'
1356//
1357// into this:
1358//
1359// %base = ...
1360// %ptr = gep %base + 15
1361// %tok = statepoint (%fun, i32 0, i32 0, i32 0, %base, %ptr)
1362// %base' = gc.relocate(%tok, i32 4, i32 4)
1363// %ptr' = gep %base' + 15
1364// %val = load %ptr'
1365bool CodeGenPrepare::simplifyOffsetableRelocate(GCStatepointInst &I) {
1366 bool MadeChange = false;
1367 SmallVector<GCRelocateInst *, 2> AllRelocateCalls;
1368 for (auto *U : I.users())
1369 if (GCRelocateInst *Relocate = dyn_cast<GCRelocateInst>(U))
1370 // Collect all the relocate calls associated with a statepoint
1371 AllRelocateCalls.push_back(Relocate);
1372
1373 // We need at least one base pointer relocation + one derived pointer
1374 // relocation to mangle
1375 if (AllRelocateCalls.size() < 2)
1376 return false;
1377
1378 // RelocateInstMap is a mapping from the base relocate instruction to the
1379 // corresponding derived relocate instructions
1380 MapVector<GCRelocateInst *, SmallVector<GCRelocateInst *, 0>> RelocateInstMap;
1381 computeBaseDerivedRelocateMap(AllRelocateCalls, RelocateInstMap);
1382 if (RelocateInstMap.empty())
1383 return false;
1384
1385 for (auto &Item : RelocateInstMap)
1386 // Item.first is the RelocatedBase to offset against
1387 // Item.second is the vector of Targets to replace
1388 MadeChange = simplifyRelocatesOffABase(Item.first, Item.second);
1389 return MadeChange;
1390}
1391
1392/// Sink the specified cast instruction into its user blocks.
1393static bool SinkCast(CastInst *CI) {
1394 BasicBlock *DefBB = CI->getParent();
1395
1396 /// InsertedCasts - Only insert a cast in each block once.
1398
1399 bool MadeChange = false;
1400 for (Value::user_iterator UI = CI->user_begin(), E = CI->user_end();
1401 UI != E;) {
1402 Use &TheUse = UI.getUse();
1404
1405 // Figure out which BB this cast is used in. For PHI's this is the
1406 // appropriate predecessor block.
1407 BasicBlock *UserBB = User->getParent();
1408 if (PHINode *PN = dyn_cast<PHINode>(User)) {
1409 UserBB = PN->getIncomingBlock(TheUse);
1410 }
1411
1412 // Preincrement use iterator so we don't invalidate it.
1413 ++UI;
1414
1415 // The first insertion point of a block containing an EH pad is after the
1416 // pad. If the pad is the user, we cannot sink the cast past the pad.
1417 if (User->isEHPad())
1418 continue;
1419
1420 // If the block selected to receive the cast is an EH pad that does not
1421 // allow non-PHI instructions before the terminator, we can't sink the
1422 // cast.
1423 if (UserBB->getTerminator()->isEHPad())
1424 continue;
1425
1426 // If this user is in the same block as the cast, don't change the cast.
1427 if (UserBB == DefBB)
1428 continue;
1429
1430 // If we have already inserted a cast into this block, use it.
1431 CastInst *&InsertedCast = InsertedCasts[UserBB];
1432
1433 if (!InsertedCast) {
1434 BasicBlock::iterator InsertPt = UserBB->getFirstInsertionPt();
1435 assert(InsertPt != UserBB->end());
1436 InsertedCast = cast<CastInst>(CI->clone());
1437 InsertedCast->insertBefore(*UserBB, InsertPt);
1438 }
1439
1440 // Replace a use of the cast with a use of the new cast.
1441 TheUse = InsertedCast;
1442 MadeChange = true;
1443 ++NumCastUses;
1444 }
1445
1446 // If we removed all uses, nuke the cast.
1447 if (CI->use_empty()) {
1448 salvageDebugInfo(*CI);
1449 CI->eraseFromParent();
1450 MadeChange = true;
1451 }
1452
1453 return MadeChange;
1454}
1455
1456/// If the specified cast instruction is a noop copy (e.g. it's casting from
1457/// one pointer type to another, i32->i8 on PPC), sink it into user blocks to
1458/// reduce the number of virtual registers that must be created and coalesced.
1459///
1460/// Return true if any changes are made.
1462 const DataLayout &DL) {
1463 // Sink only "cheap" (or nop) address-space casts. This is a weaker condition
1464 // than sinking only nop casts, but is helpful on some platforms.
1465 if (auto *ASC = dyn_cast<AddrSpaceCastInst>(CI)) {
1466 if (!TLI.isFreeAddrSpaceCast(ASC->getSrcAddressSpace(),
1467 ASC->getDestAddressSpace()))
1468 return false;
1469 }
1470
1471 // If this is a noop copy,
1472 EVT SrcVT = TLI.getValueType(DL, CI->getOperand(0)->getType());
1473 EVT DstVT = TLI.getValueType(DL, CI->getType());
1474
1475 // This is an fp<->int conversion?
1476 if (SrcVT.isInteger() != DstVT.isInteger())
1477 return false;
1478
1479 // If this is an extension, it will be a zero or sign extension, which
1480 // isn't a noop.
1481 if (SrcVT.bitsLT(DstVT))
1482 return false;
1483
1484 // If these values will be promoted, find out what they will be promoted
1485 // to. This helps us consider truncates on PPC as noop copies when they
1486 // are.
1487 if (TLI.getTypeAction(CI->getContext(), SrcVT) ==
1489 SrcVT = TLI.getTypeToTransformTo(CI->getContext(), SrcVT);
1490 if (TLI.getTypeAction(CI->getContext(), DstVT) ==
1492 DstVT = TLI.getTypeToTransformTo(CI->getContext(), DstVT);
1493
1494 // If, after promotion, these are the same types, this is a noop copy.
1495 if (SrcVT != DstVT)
1496 return false;
1497
1498 return SinkCast(CI);
1499}
1500
1501// Match a simple increment by constant operation. Note that if a sub is
1502// matched, the step is negated (as if the step had been canonicalized to
1503// an add, even though we leave the instruction alone.)
1504static bool matchIncrement(const Instruction *IVInc, Instruction *&LHS,
1505 Constant *&Step) {
1506 if (match(IVInc, m_Add(m_Instruction(LHS), m_Constant(Step))) ||
1508 m_Instruction(LHS), m_Constant(Step)))))
1509 return true;
1510 if (match(IVInc, m_Sub(m_Instruction(LHS), m_Constant(Step))) ||
1512 m_Instruction(LHS), m_Constant(Step))))) {
1513 Step = ConstantExpr::getNeg(Step);
1514 return true;
1515 }
1516 return false;
1517}
1518
1519/// If given \p PN is an inductive variable with value IVInc coming from the
1520/// backedge, and on each iteration it gets increased by Step, return pair
1521/// <IVInc, Step>. Otherwise, return std::nullopt.
1522static std::optional<std::pair<Instruction *, Constant *>>
1523getIVIncrement(const PHINode *PN, const LoopInfo *LI) {
1524 const Loop *L = LI->getLoopFor(PN->getParent());
1525 if (!L || L->getHeader() != PN->getParent() || !L->getLoopLatch())
1526 return std::nullopt;
1527 auto *IVInc =
1528 dyn_cast<Instruction>(PN->getIncomingValueForBlock(L->getLoopLatch()));
1529 if (!IVInc || LI->getLoopFor(IVInc->getParent()) != L)
1530 return std::nullopt;
1531 Instruction *LHS = nullptr;
1532 Constant *Step = nullptr;
1533 if (matchIncrement(IVInc, LHS, Step) && LHS == PN)
1534 return std::make_pair(IVInc, Step);
1535 return std::nullopt;
1536}
1537
1538static bool isIVIncrement(const Value *V, const LoopInfo *LI) {
1539 auto *I = dyn_cast<Instruction>(V);
1540 if (!I)
1541 return false;
1542 Instruction *LHS = nullptr;
1543 Constant *Step = nullptr;
1544 if (!matchIncrement(I, LHS, Step))
1545 return false;
1546 if (auto *PN = dyn_cast<PHINode>(LHS))
1547 if (auto IVInc = getIVIncrement(PN, LI))
1548 return IVInc->first == I;
1549 return false;
1550}
1551
1552bool CodeGenPrepare::replaceMathCmpWithIntrinsic(BinaryOperator *BO,
1553 Value *Arg0, Value *Arg1,
1554 CmpInst *Cmp,
1555 Intrinsic::ID IID) {
1556 auto IsReplacableIVIncrement = [this, &Cmp](BinaryOperator *BO) {
1557 if (!isIVIncrement(BO, LI))
1558 return false;
1559 const Loop *L = LI->getLoopFor(BO->getParent());
1560 assert(L && "L should not be null after isIVIncrement()");
1561 // Do not risk on moving increment into a child loop.
1562 if (LI->getLoopFor(Cmp->getParent()) != L)
1563 return false;
1564
1565 // Finally, we need to ensure that the insert point will dominate all
1566 // existing uses of the increment.
1567
1568 auto &DT = getDT(*BO->getParent()->getParent());
1569 if (DT.dominates(Cmp->getParent(), BO->getParent()))
1570 // If we're moving up the dom tree, all uses are trivially dominated.
1571 // (This is the common case for code produced by LSR.)
1572 return true;
1573
1574 // Otherwise, special case the single use in the phi recurrence.
1575 return BO->hasOneUse() && DT.dominates(Cmp->getParent(), L->getLoopLatch());
1576 };
1577 if (BO->getParent() != Cmp->getParent() && !IsReplacableIVIncrement(BO)) {
1578 // We used to use a dominator tree here to allow multi-block optimization.
1579 // But that was problematic because:
1580 // 1. It could cause a perf regression by hoisting the math op into the
1581 // critical path.
1582 // 2. It could cause a perf regression by creating a value that was live
1583 // across multiple blocks and increasing register pressure.
1584 // 3. Use of a dominator tree could cause large compile-time regression.
1585 // This is because we recompute the DT on every change in the main CGP
1586 // run-loop. The recomputing is probably unnecessary in many cases, so if
1587 // that was fixed, using a DT here would be ok.
1588 //
1589 // There is one important particular case we still want to handle: if BO is
1590 // the IV increment. Important properties that make it profitable:
1591 // - We can speculate IV increment anywhere in the loop (as long as the
1592 // indvar Phi is its only user);
1593 // - Upon computing Cmp, we effectively compute something equivalent to the
1594 // IV increment (despite it loops differently in the IR). So moving it up
1595 // to the cmp point does not really increase register pressure.
1596 return false;
1597 }
1598
1599 // We allow matching the canonical IR (add X, C) back to (usubo X, -C).
1600 if (BO->getOpcode() == Instruction::Add &&
1601 IID == Intrinsic::usub_with_overflow) {
1602 assert(isa<Constant>(Arg1) && "Unexpected input for usubo");
1604 }
1605
1606 // Insert at the first instruction of the pair.
1607 Instruction *InsertPt = nullptr;
1608 for (Instruction &Iter : *Cmp->getParent()) {
1609 // If BO is an XOR, it is not guaranteed that it comes after both inputs to
1610 // the overflow intrinsic are defined.
1611 if ((BO->getOpcode() != Instruction::Xor && &Iter == BO) || &Iter == Cmp) {
1612 InsertPt = &Iter;
1613 break;
1614 }
1615 }
1616 assert(InsertPt != nullptr && "Parent block did not contain cmp or binop");
1617
1618 IRBuilder<> Builder(InsertPt);
1619 Value *MathOV = Builder.CreateBinaryIntrinsic(IID, Arg0, Arg1);
1620 if (BO->getOpcode() != Instruction::Xor) {
1621 Value *Math = Builder.CreateExtractValue(MathOV, 0, "math");
1622 replaceAllUsesWith(BO, Math, FreshBBs, IsHugeFunc);
1623 } else
1624 assert(BO->hasOneUse() &&
1625 "Patterns with XOr should use the BO only in the compare");
1626 Value *OV = Builder.CreateExtractValue(MathOV, 1, "ov");
1627 replaceAllUsesWith(Cmp, OV, FreshBBs, IsHugeFunc);
1628 Cmp->eraseFromParent();
1629 BO->eraseFromParent();
1630 return true;
1631}
1632
1633/// Match special-case patterns that check for unsigned add overflow.
1635 BinaryOperator *&Add) {
1636 // Add = add A, 1; Cmp = icmp eq A,-1 (overflow if A is max val)
1637 // Add = add A,-1; Cmp = icmp ne A, 0 (overflow if A is non-zero)
1638 Value *A = Cmp->getOperand(0), *B = Cmp->getOperand(1);
1639
1640 // We are not expecting non-canonical/degenerate code. Just bail out.
1641 if (isa<Constant>(A))
1642 return false;
1643
1644 ICmpInst::Predicate Pred = Cmp->getPredicate();
1645 if (Pred == ICmpInst::ICMP_EQ && match(B, m_AllOnes()))
1646 B = ConstantInt::get(B->getType(), 1);
1647 else if (Pred == ICmpInst::ICMP_NE && match(B, m_ZeroInt()))
1648 B = Constant::getAllOnesValue(B->getType());
1649 else
1650 return false;
1651
1652 // Check the users of the variable operand of the compare looking for an add
1653 // with the adjusted constant.
1654 for (User *U : A->users()) {
1655 if (match(U, m_Add(m_Specific(A), m_Specific(B)))) {
1657 return true;
1658 }
1659 }
1660 return false;
1661}
1662
1663/// Try to combine the compare into a call to the llvm.uadd.with.overflow
1664/// intrinsic. Return true if any changes were made.
1665bool CodeGenPrepare::combineToUAddWithOverflow(CmpInst *Cmp,
1666 ModifyDT &ModifiedDT) {
1667 bool EdgeCase = false;
1668 Value *A, *B;
1669 BinaryOperator *Add;
1670 if (!match(Cmp, m_UAddWithOverflow(m_Value(A), m_Value(B), m_BinOp(Add)))) {
1672 return false;
1673 // Set A and B in case we match matchUAddWithOverflowConstantEdgeCases.
1674 A = Add->getOperand(0);
1675 B = Add->getOperand(1);
1676 EdgeCase = true;
1677 }
1678
1680 TLI->getValueType(*DL, Add->getType()),
1681 Add->hasNUsesOrMore(EdgeCase ? 1 : 2)))
1682 return false;
1683
1684 // We don't want to move around uses of condition values this late, so we
1685 // check if it is legal to create the call to the intrinsic in the basic
1686 // block containing the icmp.
1687 if (Add->getParent() != Cmp->getParent() && !Add->hasOneUse())
1688 return false;
1689
1690 if (!replaceMathCmpWithIntrinsic(Add, A, B, Cmp,
1691 Intrinsic::uadd_with_overflow))
1692 return false;
1693
1694 // Reset callers - do not crash by iterating over a dead instruction.
1695 ModifiedDT = ModifyDT::ModifyInstDT;
1696 return true;
1697}
1698
1699bool CodeGenPrepare::combineToUSubWithOverflow(CmpInst *Cmp,
1700 ModifyDT &ModifiedDT) {
1701 // We are not expecting non-canonical/degenerate code. Just bail out.
1702 Value *A = Cmp->getOperand(0), *B = Cmp->getOperand(1);
1703 if (isa<Constant>(A) && isa<Constant>(B))
1704 return false;
1705
1706 // Convert (A u> B) to (A u< B) to simplify pattern matching.
1707 ICmpInst::Predicate Pred = Cmp->getPredicate();
1708 if (Pred == ICmpInst::ICMP_UGT) {
1709 std::swap(A, B);
1710 Pred = ICmpInst::ICMP_ULT;
1711 }
1712 // Convert special-case: (A == 0) is the same as (A u< 1).
1713 if (Pred == ICmpInst::ICMP_EQ && match(B, m_ZeroInt())) {
1714 B = ConstantInt::get(B->getType(), 1);
1715 Pred = ICmpInst::ICMP_ULT;
1716 }
1717 // Convert special-case: (A != 0) is the same as (0 u< A).
1718 if (Pred == ICmpInst::ICMP_NE && match(B, m_ZeroInt())) {
1719 std::swap(A, B);
1720 Pred = ICmpInst::ICMP_ULT;
1721 }
1722 if (Pred != ICmpInst::ICMP_ULT)
1723 return false;
1724
1725 // Walk the users of a variable operand of a compare looking for a subtract or
1726 // add with that same operand. Also match the 2nd operand of the compare to
1727 // the add/sub, but that may be a negated constant operand of an add.
1728 Value *CmpVariableOperand = isa<Constant>(A) ? B : A;
1729 BinaryOperator *Sub = nullptr;
1730 for (User *U : CmpVariableOperand->users()) {
1731 // A - B, A u< B --> usubo(A, B)
1732 if (match(U, m_Sub(m_Specific(A), m_Specific(B)))) {
1734 break;
1735 }
1736
1737 // A + (-C), A u< C (canonicalized form of (sub A, C))
1738 const APInt *CmpC, *AddC;
1739 if (match(U, m_Add(m_Specific(A), m_APInt(AddC))) &&
1740 match(B, m_APInt(CmpC)) && *AddC == -(*CmpC)) {
1742 break;
1743 }
1744 }
1745 if (!Sub)
1746 return false;
1747
1749 TLI->getValueType(*DL, Sub->getType()),
1750 Sub->hasNUsesOrMore(1)))
1751 return false;
1752
1753 // We don't want to move around uses of condition values this late, so we
1754 // check if it is legal to create the call to the intrinsic in the basic
1755 // block containing the icmp.
1756 if (Sub->getParent() != Cmp->getParent() && !Sub->hasOneUse())
1757 return false;
1758
1759 if (!replaceMathCmpWithIntrinsic(Sub, Sub->getOperand(0), Sub->getOperand(1),
1760 Cmp, Intrinsic::usub_with_overflow))
1761 return false;
1762
1763 // Reset callers - do not crash by iterating over a dead instruction.
1764 ModifiedDT = ModifyDT::ModifyInstDT;
1765 return true;
1766}
1767
1768// Decanonicalizes icmp+ctpop power-of-two test if ctpop is slow.
1769// The same transformation exists in DAG combiner, but we repeat it here because
1770// DAG builder can break the pattern by moving icmp into a successor block.
1771bool CodeGenPrepare::unfoldPowerOf2Test(CmpInst *Cmp) {
1772 CmpPredicate Pred;
1773 Value *X;
1774 const APInt *C;
1775
1776 // (icmp (ctpop x), c)
1779 return false;
1780
1781 // We're only interested in "is power of 2 [or zero]" patterns.
1782 bool IsStrictlyPowerOf2Test = ICmpInst::isEquality(Pred) && *C == 1;
1783 bool IsPowerOf2OrZeroTest = (Pred == CmpInst::ICMP_ULT && *C == 2) ||
1784 (Pred == CmpInst::ICMP_UGT && *C == 1);
1785 if (!IsStrictlyPowerOf2Test && !IsPowerOf2OrZeroTest)
1786 return false;
1787
1788 // Some targets have better codegen for `ctpop(x) u</u>= 2/1`than for
1789 // `ctpop(x) ==/!= 1`. If ctpop is fast, only try changing the comparison,
1790 // and otherwise expand ctpop into a few simple instructions.
1791 Type *OpTy = X->getType();
1792 if (TLI->isCtpopFast(TLI->getValueType(*DL, OpTy))) {
1793 // Look for `ctpop(x) ==/!= 1`, where `ctpop(x)` is known to be non-zero.
1794 if (!IsStrictlyPowerOf2Test || !isKnownNonZero(Cmp->getOperand(0), *DL))
1795 return false;
1796
1797 // ctpop(x) == 1 -> ctpop(x) u< 2
1798 // ctpop(x) != 1 -> ctpop(x) u> 1
1799 if (Pred == ICmpInst::ICMP_EQ) {
1800 Cmp->setOperand(1, ConstantInt::get(OpTy, 2));
1801 Cmp->setPredicate(ICmpInst::ICMP_ULT);
1802 } else {
1803 Cmp->setPredicate(ICmpInst::ICMP_UGT);
1804 }
1805 return true;
1806 }
1807
1808 Value *NewCmp;
1809 if (IsPowerOf2OrZeroTest ||
1810 (IsStrictlyPowerOf2Test && isKnownNonZero(Cmp->getOperand(0), *DL))) {
1811 // ctpop(x) u< 2 -> (x & (x - 1)) == 0
1812 // ctpop(x) u> 1 -> (x & (x - 1)) != 0
1813 IRBuilder<> Builder(Cmp);
1814 Value *Sub = Builder.CreateAdd(X, Constant::getAllOnesValue(OpTy));
1815 Value *And = Builder.CreateAnd(X, Sub);
1816 CmpInst::Predicate NewPred =
1817 (Pred == CmpInst::ICMP_ULT || Pred == CmpInst::ICMP_EQ)
1819 : CmpInst::ICMP_NE;
1820 NewCmp = Builder.CreateICmp(NewPred, And, ConstantInt::getNullValue(OpTy));
1821 } else {
1822 // ctpop(x) == 1 -> (x ^ (x - 1)) u> (x - 1)
1823 // ctpop(x) != 1 -> (x ^ (x - 1)) u<= (x - 1)
1824 IRBuilder<> Builder(Cmp);
1825 Value *Sub = Builder.CreateAdd(X, Constant::getAllOnesValue(OpTy));
1826 Value *Xor = Builder.CreateXor(X, Sub);
1827 CmpInst::Predicate NewPred =
1829 NewCmp = Builder.CreateICmp(NewPred, Xor, Sub);
1830 }
1831
1832 Cmp->replaceAllUsesWith(NewCmp);
1834 return true;
1835}
1836
1837/// Sink the given CmpInst into user blocks to reduce the number of virtual
1838/// registers that must be created and coalesced. This is a clear win except on
1839/// targets with multiple condition code registers (PowerPC), where it might
1840/// lose; some adjustment may be wanted there.
1841///
1842/// Return true if any changes are made.
1843static bool sinkCmpExpression(CmpInst *Cmp, const TargetLowering &TLI,
1844 const DataLayout &DL) {
1845 if (TLI.hasMultipleConditionRegisters(EVT::getEVT(Cmp->getType())))
1846 return false;
1847
1848 // Avoid sinking soft-FP comparisons, since this can move them into a loop.
1849 if (TLI.useSoftFloat() && isa<FCmpInst>(Cmp))
1850 return false;
1851
1852 bool UsedInPhiOrCurrentBlock = any_of(Cmp->users(), [Cmp](User *U) {
1853 return isa<PHINode>(U) ||
1854 cast<Instruction>(U)->getParent() == Cmp->getParent();
1855 });
1856
1857 // Avoid sinking larger than legal integer comparisons unless its ONLY used in
1858 // another BB.
1859 if (UsedInPhiOrCurrentBlock && Cmp->getOperand(0)->getType()->isIntegerTy() &&
1860 Cmp->getOperand(0)->getType()->getScalarSizeInBits() >
1861 DL.getLargestLegalIntTypeSizeInBits())
1862 return false;
1863
1864 // Only insert a cmp in each block once.
1866
1867 bool MadeChange = false;
1868 for (Value::user_iterator UI = Cmp->user_begin(), E = Cmp->user_end();
1869 UI != E;) {
1870 Use &TheUse = UI.getUse();
1872
1873 // Preincrement use iterator so we don't invalidate it.
1874 ++UI;
1875
1876 // Don't bother for PHI nodes.
1877 if (isa<PHINode>(User))
1878 continue;
1879
1880 // Figure out which BB this cmp is used in.
1881 BasicBlock *UserBB = User->getParent();
1882 BasicBlock *DefBB = Cmp->getParent();
1883
1884 // If this user is in the same block as the cmp, don't change the cmp.
1885 if (UserBB == DefBB)
1886 continue;
1887
1888 // If we have already inserted a cmp into this block, use it.
1889 CmpInst *&InsertedCmp = InsertedCmps[UserBB];
1890
1891 if (!InsertedCmp) {
1892 BasicBlock::iterator InsertPt = UserBB->getFirstInsertionPt();
1893 assert(InsertPt != UserBB->end());
1894 InsertedCmp = CmpInst::Create(Cmp->getOpcode(), Cmp->getPredicate(),
1895 Cmp->getOperand(0), Cmp->getOperand(1), "");
1896 InsertedCmp->insertBefore(*UserBB, InsertPt);
1897 // Propagate the debug info.
1898 InsertedCmp->setDebugLoc(Cmp->getDebugLoc());
1899 }
1900
1901 // Replace a use of the cmp with a use of the new cmp.
1902 TheUse = InsertedCmp;
1903 MadeChange = true;
1904 ++NumCmpUses;
1905 }
1906
1907 // If we removed all uses, nuke the cmp.
1908 if (Cmp->use_empty()) {
1909 Cmp->eraseFromParent();
1910 MadeChange = true;
1911 }
1912
1913 return MadeChange;
1914}
1915
1916/// For pattern like:
1917///
1918/// DomCond = icmp sgt/slt CmpOp0, CmpOp1 (might not be in DomBB)
1919/// ...
1920/// DomBB:
1921/// ...
1922/// br DomCond, TrueBB, CmpBB
1923/// CmpBB: (with DomBB being the single predecessor)
1924/// ...
1925/// Cmp = icmp eq CmpOp0, CmpOp1
1926/// ...
1927///
1928/// It would use two comparison on targets that lowering of icmp sgt/slt is
1929/// different from lowering of icmp eq (PowerPC). This function try to convert
1930/// 'Cmp = icmp eq CmpOp0, CmpOp1' to ' Cmp = icmp slt/sgt CmpOp0, CmpOp1'.
1931/// After that, DomCond and Cmp can use the same comparison so reduce one
1932/// comparison.
1933///
1934/// Return true if any changes are made.
1936 const TargetLowering &TLI) {
1938 return false;
1939
1940 ICmpInst::Predicate Pred = Cmp->getPredicate();
1941 if (Pred != ICmpInst::ICMP_EQ)
1942 return false;
1943
1944 // If icmp eq has users other than BranchInst and SelectInst, converting it to
1945 // icmp slt/sgt would introduce more redundant LLVM IR.
1946 for (User *U : Cmp->users()) {
1947 if (isa<BranchInst>(U))
1948 continue;
1949 if (isa<SelectInst>(U) && cast<SelectInst>(U)->getCondition() == Cmp)
1950 continue;
1951 return false;
1952 }
1953
1954 // This is a cheap/incomplete check for dominance - just match a single
1955 // predecessor with a conditional branch.
1956 BasicBlock *CmpBB = Cmp->getParent();
1957 BasicBlock *DomBB = CmpBB->getSinglePredecessor();
1958 if (!DomBB)
1959 return false;
1960
1961 // We want to ensure that the only way control gets to the comparison of
1962 // interest is that a less/greater than comparison on the same operands is
1963 // false.
1964 Value *DomCond;
1965 BasicBlock *TrueBB, *FalseBB;
1966 if (!match(DomBB->getTerminator(), m_Br(m_Value(DomCond), TrueBB, FalseBB)))
1967 return false;
1968 if (CmpBB != FalseBB)
1969 return false;
1970
1971 Value *CmpOp0 = Cmp->getOperand(0), *CmpOp1 = Cmp->getOperand(1);
1972 CmpPredicate DomPred;
1973 if (!match(DomCond, m_ICmp(DomPred, m_Specific(CmpOp0), m_Specific(CmpOp1))))
1974 return false;
1975 if (DomPred != ICmpInst::ICMP_SGT && DomPred != ICmpInst::ICMP_SLT)
1976 return false;
1977
1978 // Convert the equality comparison to the opposite of the dominating
1979 // comparison and swap the direction for all branch/select users.
1980 // We have conceptually converted:
1981 // Res = (a < b) ? <LT_RES> : (a == b) ? <EQ_RES> : <GT_RES>;
1982 // to
1983 // Res = (a < b) ? <LT_RES> : (a > b) ? <GT_RES> : <EQ_RES>;
1984 // And similarly for branches.
1985 for (User *U : Cmp->users()) {
1986 if (auto *BI = dyn_cast<BranchInst>(U)) {
1987 assert(BI->isConditional() && "Must be conditional");
1988 BI->swapSuccessors();
1989 continue;
1990 }
1991 if (auto *SI = dyn_cast<SelectInst>(U)) {
1992 // Swap operands
1993 SI->swapValues();
1994 SI->swapProfMetadata();
1995 continue;
1996 }
1997 llvm_unreachable("Must be a branch or a select");
1998 }
1999 Cmp->setPredicate(CmpInst::getSwappedPredicate(DomPred));
2000 return true;
2001}
2002
2003/// Many architectures use the same instruction for both subtract and cmp. Try
2004/// to swap cmp operands to match subtract operations to allow for CSE.
2006 Value *Op0 = Cmp->getOperand(0);
2007 Value *Op1 = Cmp->getOperand(1);
2008 if (!Op0->getType()->isIntegerTy() || isa<Constant>(Op0) ||
2009 isa<Constant>(Op1) || Op0 == Op1)
2010 return false;
2011
2012 // If a subtract already has the same operands as a compare, swapping would be
2013 // bad. If a subtract has the same operands as a compare but in reverse order,
2014 // then swapping is good.
2015 int GoodToSwap = 0;
2016 unsigned NumInspected = 0;
2017 for (const User *U : Op0->users()) {
2018 // Avoid walking many users.
2019 if (++NumInspected > 128)
2020 return false;
2021 if (match(U, m_Sub(m_Specific(Op1), m_Specific(Op0))))
2022 GoodToSwap++;
2023 else if (match(U, m_Sub(m_Specific(Op0), m_Specific(Op1))))
2024 GoodToSwap--;
2025 }
2026
2027 if (GoodToSwap > 0) {
2028 Cmp->swapOperands();
2029 return true;
2030 }
2031 return false;
2032}
2033
2034static bool foldFCmpToFPClassTest(CmpInst *Cmp, const TargetLowering &TLI,
2035 const DataLayout &DL) {
2036 FCmpInst *FCmp = dyn_cast<FCmpInst>(Cmp);
2037 if (!FCmp)
2038 return false;
2039
2040 // Don't fold if the target offers free fabs and the predicate is legal.
2041 EVT VT = TLI.getValueType(DL, Cmp->getOperand(0)->getType());
2042 if (TLI.isFAbsFree(VT) &&
2044 VT.getSimpleVT()))
2045 return false;
2046
2047 // Reverse the canonicalization if it is a FP class test
2048 auto ShouldReverseTransform = [](FPClassTest ClassTest) {
2049 return ClassTest == fcInf || ClassTest == (fcInf | fcNan);
2050 };
2051 auto [ClassVal, ClassTest] =
2052 fcmpToClassTest(FCmp->getPredicate(), *FCmp->getParent()->getParent(),
2053 FCmp->getOperand(0), FCmp->getOperand(1));
2054 if (!ClassVal)
2055 return false;
2056
2057 if (!ShouldReverseTransform(ClassTest) && !ShouldReverseTransform(~ClassTest))
2058 return false;
2059
2060 IRBuilder<> Builder(Cmp);
2061 Value *IsFPClass = Builder.createIsFPClass(ClassVal, ClassTest);
2062 Cmp->replaceAllUsesWith(IsFPClass);
2064 return true;
2065}
2066
2068 Instruction *Rem, const LoopInfo *LI, Value *&RemAmtOut, Value *&AddInstOut,
2069 Value *&AddOffsetOut, PHINode *&LoopIncrPNOut) {
2070 Value *Incr, *RemAmt;
2071 // NB: If RemAmt is a power of 2 it *should* have been transformed by now.
2072 if (!match(Rem, m_URem(m_Value(Incr), m_Value(RemAmt))))
2073 return false;
2074
2075 Value *AddInst, *AddOffset;
2076 // Find out loop increment PHI.
2077 auto *PN = dyn_cast<PHINode>(Incr);
2078 if (PN != nullptr) {
2079 AddInst = nullptr;
2080 AddOffset = nullptr;
2081 } else {
2082 // Search through a NUW add on top of the loop increment.
2083 Value *V0, *V1;
2084 if (!match(Incr, m_NUWAdd(m_Value(V0), m_Value(V1))))
2085 return false;
2086
2087 AddInst = Incr;
2088 PN = dyn_cast<PHINode>(V0);
2089 if (PN != nullptr) {
2090 AddOffset = V1;
2091 } else {
2092 PN = dyn_cast<PHINode>(V1);
2093 AddOffset = V0;
2094 }
2095 }
2096
2097 if (!PN)
2098 return false;
2099
2100 // This isn't strictly necessary, what we really need is one increment and any
2101 // amount of initial values all being the same.
2102 if (PN->getNumIncomingValues() != 2)
2103 return false;
2104
2105 // Only trivially analyzable loops.
2106 Loop *L = LI->getLoopFor(PN->getParent());
2107 if (!L || !L->getLoopPreheader() || !L->getLoopLatch())
2108 return false;
2109
2110 // Req that the remainder is in the loop
2111 if (!L->contains(Rem))
2112 return false;
2113
2114 // Only works if the remainder amount is a loop invaraint
2115 if (!L->isLoopInvariant(RemAmt))
2116 return false;
2117
2118 // Only works if the AddOffset is a loop invaraint
2119 if (AddOffset && !L->isLoopInvariant(AddOffset))
2120 return false;
2121
2122 // Is the PHI a loop increment?
2123 auto LoopIncrInfo = getIVIncrement(PN, LI);
2124 if (!LoopIncrInfo)
2125 return false;
2126
2127 // We need remainder_amount % increment_amount to be zero. Increment of one
2128 // satisfies that without any special logic and is overwhelmingly the common
2129 // case.
2130 if (!match(LoopIncrInfo->second, m_One()))
2131 return false;
2132
2133 // Need the increment to not overflow.
2134 if (!match(LoopIncrInfo->first, m_c_NUWAdd(m_Specific(PN), m_Value())))
2135 return false;
2136
2137 // Set output variables.
2138 RemAmtOut = RemAmt;
2139 LoopIncrPNOut = PN;
2140 AddInstOut = AddInst;
2141 AddOffsetOut = AddOffset;
2142
2143 return true;
2144}
2145
2146// Try to transform:
2147//
2148// for(i = Start; i < End; ++i)
2149// Rem = (i nuw+ IncrLoopInvariant) u% RemAmtLoopInvariant;
2150//
2151// ->
2152//
2153// Rem = (Start nuw+ IncrLoopInvariant) % RemAmtLoopInvariant;
2154// for(i = Start; i < End; ++i, ++rem)
2155// Rem = rem == RemAmtLoopInvariant ? 0 : Rem;
2157 const LoopInfo *LI,
2159 bool IsHuge) {
2160 Value *AddOffset, *RemAmt, *AddInst;
2161 PHINode *LoopIncrPN;
2162 if (!isRemOfLoopIncrementWithLoopInvariant(Rem, LI, RemAmt, AddInst,
2163 AddOffset, LoopIncrPN))
2164 return false;
2165
2166 // Only non-constant remainder as the extra IV is probably not profitable
2167 // in that case.
2168 //
2169 // Potential TODO(1): `urem` of a const ends up as `mul` + `shift` + `add`. If
2170 // we can rule out register pressure and ensure this `urem` is executed each
2171 // iteration, its probably profitable to handle the const case as well.
2172 //
2173 // Potential TODO(2): Should we have a check for how "nested" this remainder
2174 // operation is? The new code runs every iteration so if the remainder is
2175 // guarded behind unlikely conditions this might not be worth it.
2176 if (match(RemAmt, m_ImmConstant()))
2177 return false;
2178
2179 Loop *L = LI->getLoopFor(LoopIncrPN->getParent());
2180 Value *Start = LoopIncrPN->getIncomingValueForBlock(L->getLoopPreheader());
2181 // If we have add create initial value for remainder.
2182 // The logic here is:
2183 // (urem (add nuw Start, IncrLoopInvariant), RemAmtLoopInvariant
2184 //
2185 // Only proceed if the expression simplifies (otherwise we can't fully
2186 // optimize out the urem).
2187 if (AddInst) {
2188 assert(AddOffset && "We found an add but missing values");
2189 // Without dom-condition/assumption cache we aren't likely to get much out
2190 // of a context instruction.
2191 Start = simplifyAddInst(Start, AddOffset,
2192 match(AddInst, m_NSWAdd(m_Value(), m_Value())),
2193 /*IsNUW=*/true, *DL);
2194 if (!Start)
2195 return false;
2196 }
2197
2198 // If we can't fully optimize out the `rem`, skip this transform.
2199 Start = simplifyURemInst(Start, RemAmt, *DL);
2200 if (!Start)
2201 return false;
2202
2203 // Create new remainder with induction variable.
2204 Type *Ty = Rem->getType();
2205 IRBuilder<> Builder(Rem->getContext());
2206
2207 Builder.SetInsertPoint(LoopIncrPN);
2208 PHINode *NewRem = Builder.CreatePHI(Ty, 2);
2209
2210 Builder.SetInsertPoint(cast<Instruction>(
2211 LoopIncrPN->getIncomingValueForBlock(L->getLoopLatch())));
2212 // `(add (urem x, y), 1)` is always nuw.
2213 Value *RemAdd = Builder.CreateNUWAdd(NewRem, ConstantInt::get(Ty, 1));
2214 Value *RemCmp = Builder.CreateICmp(ICmpInst::ICMP_EQ, RemAdd, RemAmt);
2215 Value *RemSel =
2216 Builder.CreateSelect(RemCmp, Constant::getNullValue(Ty), RemAdd);
2217
2218 NewRem->addIncoming(Start, L->getLoopPreheader());
2219 NewRem->addIncoming(RemSel, L->getLoopLatch());
2220
2221 // Insert all touched BBs.
2222 FreshBBs.insert(LoopIncrPN->getParent());
2223 FreshBBs.insert(L->getLoopLatch());
2224 FreshBBs.insert(Rem->getParent());
2225 if (AddInst)
2226 FreshBBs.insert(cast<Instruction>(AddInst)->getParent());
2227 replaceAllUsesWith(Rem, NewRem, FreshBBs, IsHuge);
2228 Rem->eraseFromParent();
2229 if (AddInst && AddInst->use_empty())
2230 cast<Instruction>(AddInst)->eraseFromParent();
2231 return true;
2232}
2233
2234bool CodeGenPrepare::optimizeURem(Instruction *Rem) {
2235 if (foldURemOfLoopIncrement(Rem, DL, LI, FreshBBs, IsHugeFunc))
2236 return true;
2237 return false;
2238}
2239
2240bool CodeGenPrepare::optimizeCmp(CmpInst *Cmp, ModifyDT &ModifiedDT) {
2241 if (sinkCmpExpression(Cmp, *TLI, *DL))
2242 return true;
2243
2244 if (combineToUAddWithOverflow(Cmp, ModifiedDT))
2245 return true;
2246
2247 if (combineToUSubWithOverflow(Cmp, ModifiedDT))
2248 return true;
2249
2250 if (unfoldPowerOf2Test(Cmp))
2251 return true;
2252
2253 if (foldICmpWithDominatingICmp(Cmp, *TLI))
2254 return true;
2255
2257 return true;
2258
2259 if (foldFCmpToFPClassTest(Cmp, *TLI, *DL))
2260 return true;
2261
2262 return false;
2263}
2264
2265/// Duplicate and sink the given 'and' instruction into user blocks where it is
2266/// used in a compare to allow isel to generate better code for targets where
2267/// this operation can be combined.
2268///
2269/// Return true if any changes are made.
2271 SetOfInstrs &InsertedInsts) {
2272 // Double-check that we're not trying to optimize an instruction that was
2273 // already optimized by some other part of this pass.
2274 assert(!InsertedInsts.count(AndI) &&
2275 "Attempting to optimize already optimized and instruction");
2276 (void)InsertedInsts;
2277
2278 // Nothing to do for single use in same basic block.
2279 if (AndI->hasOneUse() &&
2280 AndI->getParent() == cast<Instruction>(*AndI->user_begin())->getParent())
2281 return false;
2282
2283 // Try to avoid cases where sinking/duplicating is likely to increase register
2284 // pressure.
2285 if (!isa<ConstantInt>(AndI->getOperand(0)) &&
2286 !isa<ConstantInt>(AndI->getOperand(1)) &&
2287 AndI->getOperand(0)->hasOneUse() && AndI->getOperand(1)->hasOneUse())
2288 return false;
2289
2290 for (auto *U : AndI->users()) {
2292
2293 // Only sink 'and' feeding icmp with 0.
2294 if (!isa<ICmpInst>(User))
2295 return false;
2296
2297 auto *CmpC = dyn_cast<ConstantInt>(User->getOperand(1));
2298 if (!CmpC || !CmpC->isZero())
2299 return false;
2300 }
2301
2302 if (!TLI.isMaskAndCmp0FoldingBeneficial(*AndI))
2303 return false;
2304
2305 LLVM_DEBUG(dbgs() << "found 'and' feeding only icmp 0;\n");
2306 LLVM_DEBUG(AndI->getParent()->dump());
2307
2308 // Push the 'and' into the same block as the icmp 0. There should only be
2309 // one (icmp (and, 0)) in each block, since CSE/GVN should have removed any
2310 // others, so we don't need to keep track of which BBs we insert into.
2311 for (Value::user_iterator UI = AndI->user_begin(), E = AndI->user_end();
2312 UI != E;) {
2313 Use &TheUse = UI.getUse();
2315
2316 // Preincrement use iterator so we don't invalidate it.
2317 ++UI;
2318
2319 LLVM_DEBUG(dbgs() << "sinking 'and' use: " << *User << "\n");
2320
2321 // Keep the 'and' in the same place if the use is already in the same block.
2322 Instruction *InsertPt =
2323 User->getParent() == AndI->getParent() ? AndI : User;
2324 Instruction *InsertedAnd = BinaryOperator::Create(
2325 Instruction::And, AndI->getOperand(0), AndI->getOperand(1), "",
2326 InsertPt->getIterator());
2327 // Propagate the debug info.
2328 InsertedAnd->setDebugLoc(AndI->getDebugLoc());
2329
2330 // Replace a use of the 'and' with a use of the new 'and'.
2331 TheUse = InsertedAnd;
2332 ++NumAndUses;
2333 LLVM_DEBUG(User->getParent()->dump());
2334 }
2335
2336 // We removed all uses, nuke the and.
2337 AndI->eraseFromParent();
2338 return true;
2339}
2340
2341/// Check if the candidates could be combined with a shift instruction, which
2342/// includes:
2343/// 1. Truncate instruction
2344/// 2. And instruction and the imm is a mask of the low bits:
2345/// imm & (imm+1) == 0
2347 if (!isa<TruncInst>(User)) {
2348 if (User->getOpcode() != Instruction::And ||
2350 return false;
2351
2352 const APInt &Cimm = cast<ConstantInt>(User->getOperand(1))->getValue();
2353
2354 if ((Cimm & (Cimm + 1)).getBoolValue())
2355 return false;
2356 }
2357 return true;
2358}
2359
2360/// Sink both shift and truncate instruction to the use of truncate's BB.
2361static bool
2364 const TargetLowering &TLI, const DataLayout &DL) {
2365 BasicBlock *UserBB = User->getParent();
2367 auto *TruncI = cast<TruncInst>(User);
2368 bool MadeChange = false;
2369
2370 for (Value::user_iterator TruncUI = TruncI->user_begin(),
2371 TruncE = TruncI->user_end();
2372 TruncUI != TruncE;) {
2373
2374 Use &TruncTheUse = TruncUI.getUse();
2375 Instruction *TruncUser = cast<Instruction>(*TruncUI);
2376 // Preincrement use iterator so we don't invalidate it.
2377
2378 ++TruncUI;
2379
2380 int ISDOpcode = TLI.InstructionOpcodeToISD(TruncUser->getOpcode());
2381 if (!ISDOpcode)
2382 continue;
2383
2384 // If the use is actually a legal node, there will not be an
2385 // implicit truncate.
2386 // FIXME: always querying the result type is just an
2387 // approximation; some nodes' legality is determined by the
2388 // operand or other means. There's no good way to find out though.
2390 ISDOpcode, TLI.getValueType(DL, TruncUser->getType(), true)))
2391 continue;
2392
2393 // Don't bother for PHI nodes.
2394 if (isa<PHINode>(TruncUser))
2395 continue;
2396
2397 BasicBlock *TruncUserBB = TruncUser->getParent();
2398
2399 if (UserBB == TruncUserBB)
2400 continue;
2401
2402 BinaryOperator *&InsertedShift = InsertedShifts[TruncUserBB];
2403 CastInst *&InsertedTrunc = InsertedTruncs[TruncUserBB];
2404
2405 if (!InsertedShift && !InsertedTrunc) {
2406 BasicBlock::iterator InsertPt = TruncUserBB->getFirstInsertionPt();
2407 assert(InsertPt != TruncUserBB->end());
2408 // Sink the shift
2409 if (ShiftI->getOpcode() == Instruction::AShr)
2410 InsertedShift =
2411 BinaryOperator::CreateAShr(ShiftI->getOperand(0), CI, "");
2412 else
2413 InsertedShift =
2414 BinaryOperator::CreateLShr(ShiftI->getOperand(0), CI, "");
2415 InsertedShift->setDebugLoc(ShiftI->getDebugLoc());
2416 InsertedShift->insertBefore(*TruncUserBB, InsertPt);
2417
2418 // Sink the trunc
2419 BasicBlock::iterator TruncInsertPt = TruncUserBB->getFirstInsertionPt();
2420 TruncInsertPt++;
2421 // It will go ahead of any debug-info.
2422 TruncInsertPt.setHeadBit(true);
2423 assert(TruncInsertPt != TruncUserBB->end());
2424
2425 InsertedTrunc = CastInst::Create(TruncI->getOpcode(), InsertedShift,
2426 TruncI->getType(), "");
2427 InsertedTrunc->insertBefore(*TruncUserBB, TruncInsertPt);
2428 InsertedTrunc->setDebugLoc(TruncI->getDebugLoc());
2429
2430 MadeChange = true;
2431
2432 TruncTheUse = InsertedTrunc;
2433 }
2434 }
2435 return MadeChange;
2436}
2437
2438/// Sink the shift *right* instruction into user blocks if the uses could
2439/// potentially be combined with this shift instruction and generate BitExtract
2440/// instruction. It will only be applied if the architecture supports BitExtract
2441/// instruction. Here is an example:
2442/// BB1:
2443/// %x.extract.shift = lshr i64 %arg1, 32
2444/// BB2:
2445/// %x.extract.trunc = trunc i64 %x.extract.shift to i16
2446/// ==>
2447///
2448/// BB2:
2449/// %x.extract.shift.1 = lshr i64 %arg1, 32
2450/// %x.extract.trunc = trunc i64 %x.extract.shift.1 to i16
2451///
2452/// CodeGen will recognize the pattern in BB2 and generate BitExtract
2453/// instruction.
2454/// Return true if any changes are made.
2456 const TargetLowering &TLI,
2457 const DataLayout &DL) {
2458 BasicBlock *DefBB = ShiftI->getParent();
2459
2460 /// Only insert instructions in each block once.
2462
2463 bool shiftIsLegal = TLI.isTypeLegal(TLI.getValueType(DL, ShiftI->getType()));
2464
2465 bool MadeChange = false;
2466 for (Value::user_iterator UI = ShiftI->user_begin(), E = ShiftI->user_end();
2467 UI != E;) {
2468 Use &TheUse = UI.getUse();
2470 // Preincrement use iterator so we don't invalidate it.
2471 ++UI;
2472
2473 // Don't bother for PHI nodes.
2474 if (isa<PHINode>(User))
2475 continue;
2476
2478 continue;
2479
2480 BasicBlock *UserBB = User->getParent();
2481
2482 if (UserBB == DefBB) {
2483 // If the shift and truncate instruction are in the same BB. The use of
2484 // the truncate(TruncUse) may still introduce another truncate if not
2485 // legal. In this case, we would like to sink both shift and truncate
2486 // instruction to the BB of TruncUse.
2487 // for example:
2488 // BB1:
2489 // i64 shift.result = lshr i64 opnd, imm
2490 // trunc.result = trunc shift.result to i16
2491 //
2492 // BB2:
2493 // ----> We will have an implicit truncate here if the architecture does
2494 // not have i16 compare.
2495 // cmp i16 trunc.result, opnd2
2496 //
2497 if (isa<TruncInst>(User) &&
2498 shiftIsLegal
2499 // If the type of the truncate is legal, no truncate will be
2500 // introduced in other basic blocks.
2501 && (!TLI.isTypeLegal(TLI.getValueType(DL, User->getType()))))
2502 MadeChange =
2503 SinkShiftAndTruncate(ShiftI, User, CI, InsertedShifts, TLI, DL);
2504
2505 continue;
2506 }
2507 // If we have already inserted a shift into this block, use it.
2508 BinaryOperator *&InsertedShift = InsertedShifts[UserBB];
2509
2510 if (!InsertedShift) {
2511 BasicBlock::iterator InsertPt = UserBB->getFirstInsertionPt();
2512 assert(InsertPt != UserBB->end());
2513
2514 if (ShiftI->getOpcode() == Instruction::AShr)
2515 InsertedShift =
2516 BinaryOperator::CreateAShr(ShiftI->getOperand(0), CI, "");
2517 else
2518 InsertedShift =
2519 BinaryOperator::CreateLShr(ShiftI->getOperand(0), CI, "");
2520 InsertedShift->insertBefore(*UserBB, InsertPt);
2521 InsertedShift->setDebugLoc(ShiftI->getDebugLoc());
2522
2523 MadeChange = true;
2524 }
2525
2526 // Replace a use of the shift with a use of the new shift.
2527 TheUse = InsertedShift;
2528 }
2529
2530 // If we removed all uses, or there are none, nuke the shift.
2531 if (ShiftI->use_empty()) {
2532 salvageDebugInfo(*ShiftI);
2533 ShiftI->eraseFromParent();
2534 MadeChange = true;
2535 }
2536
2537 return MadeChange;
2538}
2539
2540/// If counting leading or trailing zeros is an expensive operation and a zero
2541/// input is defined, add a check for zero to avoid calling the intrinsic.
2542///
2543/// We want to transform:
2544/// %z = call i64 @llvm.cttz.i64(i64 %A, i1 false)
2545///
2546/// into:
2547/// entry:
2548/// %cmpz = icmp eq i64 %A, 0
2549/// br i1 %cmpz, label %cond.end, label %cond.false
2550/// cond.false:
2551/// %z = call i64 @llvm.cttz.i64(i64 %A, i1 true)
2552/// br label %cond.end
2553/// cond.end:
2554/// %ctz = phi i64 [ 64, %entry ], [ %z, %cond.false ]
2555///
2556/// If the transform is performed, return true and set ModifiedDT to true.
2557static bool despeculateCountZeros(IntrinsicInst *CountZeros, LoopInfo &LI,
2558 const TargetLowering *TLI,
2559 const DataLayout *DL, ModifyDT &ModifiedDT,
2561 bool IsHugeFunc) {
2562 // If a zero input is undefined, it doesn't make sense to despeculate that.
2563 if (match(CountZeros->getOperand(1), m_One()))
2564 return false;
2565
2566 // If it's cheap to speculate, there's nothing to do.
2567 Type *Ty = CountZeros->getType();
2568 auto IntrinsicID = CountZeros->getIntrinsicID();
2569 if ((IntrinsicID == Intrinsic::cttz && TLI->isCheapToSpeculateCttz(Ty)) ||
2570 (IntrinsicID == Intrinsic::ctlz && TLI->isCheapToSpeculateCtlz(Ty)))
2571 return false;
2572
2573 // Only handle scalar cases. Anything else requires too much work.
2574 unsigned SizeInBits = Ty->getScalarSizeInBits();
2575 if (Ty->isVectorTy())
2576 return false;
2577
2578 // Bail if the value is never zero.
2579 Use &Op = CountZeros->getOperandUse(0);
2580 if (isKnownNonZero(Op, *DL))
2581 return false;
2582
2583 // The intrinsic will be sunk behind a compare against zero and branch.
2584 BasicBlock *StartBlock = CountZeros->getParent();
2585 BasicBlock *CallBlock = StartBlock->splitBasicBlock(CountZeros, "cond.false");
2586 if (IsHugeFunc)
2587 FreshBBs.insert(CallBlock);
2588
2589 // Create another block after the count zero intrinsic. A PHI will be added
2590 // in this block to select the result of the intrinsic or the bit-width
2591 // constant if the input to the intrinsic is zero.
2592 BasicBlock::iterator SplitPt = std::next(BasicBlock::iterator(CountZeros));
2593 // Any debug-info after CountZeros should not be included.
2594 SplitPt.setHeadBit(true);
2595 BasicBlock *EndBlock = CallBlock->splitBasicBlock(SplitPt, "cond.end");
2596 if (IsHugeFunc)
2597 FreshBBs.insert(EndBlock);
2598
2599 // Update the LoopInfo. The new blocks are in the same loop as the start
2600 // block.
2601 if (Loop *L = LI.getLoopFor(StartBlock)) {
2602 L->addBasicBlockToLoop(CallBlock, LI);
2603 L->addBasicBlockToLoop(EndBlock, LI);
2604 }
2605
2606 // Set up a builder to create a compare, conditional branch, and PHI.
2607 IRBuilder<> Builder(CountZeros->getContext());
2608 Builder.SetInsertPoint(StartBlock->getTerminator());
2609 Builder.SetCurrentDebugLocation(CountZeros->getDebugLoc());
2610
2611 // Replace the unconditional branch that was created by the first split with
2612 // a compare against zero and a conditional branch.
2613 Value *Zero = Constant::getNullValue(Ty);
2614 // Avoid introducing branch on poison. This also replaces the ctz operand.
2616 Op = Builder.CreateFreeze(Op, Op->getName() + ".fr");
2617 Value *Cmp = Builder.CreateICmpEQ(Op, Zero, "cmpz");
2618 Builder.CreateCondBr(Cmp, EndBlock, CallBlock);
2619 StartBlock->getTerminator()->eraseFromParent();
2620
2621 // Create a PHI in the end block to select either the output of the intrinsic
2622 // or the bit width of the operand.
2623 Builder.SetInsertPoint(EndBlock, EndBlock->begin());
2624 PHINode *PN = Builder.CreatePHI(Ty, 2, "ctz");
2625 replaceAllUsesWith(CountZeros, PN, FreshBBs, IsHugeFunc);
2626 Value *BitWidth = Builder.getInt(APInt(SizeInBits, SizeInBits));
2627 PN->addIncoming(BitWidth, StartBlock);
2628 PN->addIncoming(CountZeros, CallBlock);
2629
2630 // We are explicitly handling the zero case, so we can set the intrinsic's
2631 // undefined zero argument to 'true'. This will also prevent reprocessing the
2632 // intrinsic; we only despeculate when a zero input is defined.
2633 CountZeros->setArgOperand(1, Builder.getTrue());
2634 ModifiedDT = ModifyDT::ModifyBBDT;
2635 return true;
2636}
2637
2638bool CodeGenPrepare::optimizeCallInst(CallInst *CI, ModifyDT &ModifiedDT) {
2639 BasicBlock *BB = CI->getParent();
2640
2641 // Sink address computing for memory operands into the block.
2642 if (CI->isInlineAsm() && optimizeInlineAsmInst(CI))
2643 return true;
2644
2645 // Align the pointer arguments to this call if the target thinks it's a good
2646 // idea
2647 unsigned MinSize;
2648 Align PrefAlign;
2649 if (TLI->shouldAlignPointerArgs(CI, MinSize, PrefAlign)) {
2650 for (auto &Arg : CI->args()) {
2651 // We want to align both objects whose address is used directly and
2652 // objects whose address is used in casts and GEPs, though it only makes
2653 // sense for GEPs if the offset is a multiple of the desired alignment and
2654 // if size - offset meets the size threshold.
2655 if (!Arg->getType()->isPointerTy())
2656 continue;
2657 APInt Offset(DL->getIndexSizeInBits(
2658 cast<PointerType>(Arg->getType())->getAddressSpace()),
2659 0);
2660 Value *Val = Arg->stripAndAccumulateInBoundsConstantOffsets(*DL, Offset);
2661 uint64_t Offset2 = Offset.getLimitedValue();
2662 if (!isAligned(PrefAlign, Offset2))
2663 continue;
2664 AllocaInst *AI;
2665 if ((AI = dyn_cast<AllocaInst>(Val)) && AI->getAlign() < PrefAlign &&
2666 DL->getTypeAllocSize(AI->getAllocatedType()) >= MinSize + Offset2)
2667 AI->setAlignment(PrefAlign);
2668 // Global variables can only be aligned if they are defined in this
2669 // object (i.e. they are uniquely initialized in this object), and
2670 // over-aligning global variables that have an explicit section is
2671 // forbidden.
2672 GlobalVariable *GV;
2673 if ((GV = dyn_cast<GlobalVariable>(Val)) && GV->canIncreaseAlignment() &&
2674 GV->getPointerAlignment(*DL) < PrefAlign &&
2675 DL->getTypeAllocSize(GV->getValueType()) >= MinSize + Offset2)
2676 GV->setAlignment(PrefAlign);
2677 }
2678 }
2679 // If this is a memcpy (or similar) then we may be able to improve the
2680 // alignment.
2681 if (MemIntrinsic *MI = dyn_cast<MemIntrinsic>(CI)) {
2682 Align DestAlign = getKnownAlignment(MI->getDest(), *DL);
2683 MaybeAlign MIDestAlign = MI->getDestAlign();
2684 if (!MIDestAlign || DestAlign > *MIDestAlign)
2685 MI->setDestAlignment(DestAlign);
2686 if (MemTransferInst *MTI = dyn_cast<MemTransferInst>(MI)) {
2687 MaybeAlign MTISrcAlign = MTI->getSourceAlign();
2688 Align SrcAlign = getKnownAlignment(MTI->getSource(), *DL);
2689 if (!MTISrcAlign || SrcAlign > *MTISrcAlign)
2690 MTI->setSourceAlignment(SrcAlign);
2691 }
2692 }
2693
2694 // If we have a cold call site, try to sink addressing computation into the
2695 // cold block. This interacts with our handling for loads and stores to
2696 // ensure that we can fold all uses of a potential addressing computation
2697 // into their uses. TODO: generalize this to work over profiling data
2698 if (CI->hasFnAttr(Attribute::Cold) &&
2699 !llvm::shouldOptimizeForSize(BB, PSI, BFI.get()))
2700 for (auto &Arg : CI->args()) {
2701 if (!Arg->getType()->isPointerTy())
2702 continue;
2703 unsigned AS = Arg->getType()->getPointerAddressSpace();
2704 if (optimizeMemoryInst(CI, Arg, Arg->getType(), AS))
2705 return true;
2706 }
2707
2708 IntrinsicInst *II = dyn_cast<IntrinsicInst>(CI);
2709 if (II) {
2710 switch (II->getIntrinsicID()) {
2711 default:
2712 break;
2713 case Intrinsic::assume:
2714 llvm_unreachable("llvm.assume should have been removed already");
2715 case Intrinsic::allow_runtime_check:
2716 case Intrinsic::allow_ubsan_check:
2717 case Intrinsic::experimental_widenable_condition: {
2718 // Give up on future widening opportunities so that we can fold away dead
2719 // paths and merge blocks before going into block-local instruction
2720 // selection.
2721 if (II->use_empty()) {
2722 II->eraseFromParent();
2723 return true;
2724 }
2725 Constant *RetVal = ConstantInt::getTrue(II->getContext());
2726 resetIteratorIfInvalidatedWhileCalling(BB, [&]() {
2727 replaceAndRecursivelySimplify(CI, RetVal, TLInfo, nullptr);
2728 });
2729 return true;
2730 }
2731 case Intrinsic::objectsize:
2732 llvm_unreachable("llvm.objectsize.* should have been lowered already");
2733 case Intrinsic::is_constant:
2734 llvm_unreachable("llvm.is.constant.* should have been lowered already");
2735 case Intrinsic::aarch64_stlxr:
2736 case Intrinsic::aarch64_stxr: {
2737 ZExtInst *ExtVal = dyn_cast<ZExtInst>(CI->getArgOperand(0));
2738 if (!ExtVal || !ExtVal->hasOneUse() ||
2739 ExtVal->getParent() == CI->getParent())
2740 return false;
2741 // Sink a zext feeding stlxr/stxr before it, so it can be folded into it.
2742 ExtVal->moveBefore(CI->getIterator());
2743 // Mark this instruction as "inserted by CGP", so that other
2744 // optimizations don't touch it.
2745 InsertedInsts.insert(ExtVal);
2746 return true;
2747 }
2748
2749 case Intrinsic::launder_invariant_group:
2750 case Intrinsic::strip_invariant_group: {
2751 Value *ArgVal = II->getArgOperand(0);
2752 auto it = LargeOffsetGEPMap.find(II);
2753 if (it != LargeOffsetGEPMap.end()) {
2754 // Merge entries in LargeOffsetGEPMap to reflect the RAUW.
2755 // Make sure not to have to deal with iterator invalidation
2756 // after possibly adding ArgVal to LargeOffsetGEPMap.
2757 auto GEPs = std::move(it->second);
2758 LargeOffsetGEPMap[ArgVal].append(GEPs.begin(), GEPs.end());
2759 LargeOffsetGEPMap.erase(II);
2760 }
2761
2762 replaceAllUsesWith(II, ArgVal, FreshBBs, IsHugeFunc);
2763 II->eraseFromParent();
2764 return true;
2765 }
2766 case Intrinsic::cttz:
2767 case Intrinsic::ctlz:
2768 // If counting zeros is expensive, try to avoid it.
2769 return despeculateCountZeros(II, *LI, TLI, DL, ModifiedDT, FreshBBs,
2770 IsHugeFunc);
2771 case Intrinsic::fshl:
2772 case Intrinsic::fshr:
2773 return optimizeFunnelShift(II);
2774 case Intrinsic::masked_gather:
2775 return optimizeGatherScatterInst(II, II->getArgOperand(0));
2776 case Intrinsic::masked_scatter:
2777 return optimizeGatherScatterInst(II, II->getArgOperand(1));
2778 case Intrinsic::masked_load:
2779 // Treat v1X masked load as load X type.
2780 if (auto *VT = dyn_cast<FixedVectorType>(II->getType())) {
2781 if (VT->getNumElements() == 1) {
2782 Value *PtrVal = II->getArgOperand(0);
2783 unsigned AS = PtrVal->getType()->getPointerAddressSpace();
2784 if (optimizeMemoryInst(II, PtrVal, VT->getElementType(), AS))
2785 return true;
2786 }
2787 }
2788 return false;
2789 case Intrinsic::masked_store:
2790 // Treat v1X masked store as store X type.
2791 if (auto *VT =
2792 dyn_cast<FixedVectorType>(II->getArgOperand(0)->getType())) {
2793 if (VT->getNumElements() == 1) {
2794 Value *PtrVal = II->getArgOperand(1);
2795 unsigned AS = PtrVal->getType()->getPointerAddressSpace();
2796 if (optimizeMemoryInst(II, PtrVal, VT->getElementType(), AS))
2797 return true;
2798 }
2799 }
2800 return false;
2801 case Intrinsic::umul_with_overflow:
2802 return optimizeMulWithOverflow(II, /*IsSigned=*/false, ModifiedDT);
2803 case Intrinsic::smul_with_overflow:
2804 return optimizeMulWithOverflow(II, /*IsSigned=*/true, ModifiedDT);
2805 }
2806
2807 SmallVector<Value *, 2> PtrOps;
2808 Type *AccessTy;
2809 if (TLI->getAddrModeArguments(II, PtrOps, AccessTy))
2810 while (!PtrOps.empty()) {
2811 Value *PtrVal = PtrOps.pop_back_val();
2812 unsigned AS = PtrVal->getType()->getPointerAddressSpace();
2813 if (optimizeMemoryInst(II, PtrVal, AccessTy, AS))
2814 return true;
2815 }
2816 }
2817
2818 // From here on out we're working with named functions.
2819 auto *Callee = CI->getCalledFunction();
2820 if (!Callee)
2821 return false;
2822
2823 // Lower all default uses of _chk calls. This is very similar
2824 // to what InstCombineCalls does, but here we are only lowering calls
2825 // to fortified library functions (e.g. __memcpy_chk) that have the default
2826 // "don't know" as the objectsize. Anything else should be left alone.
2827 FortifiedLibCallSimplifier Simplifier(TLInfo, true);
2828 IRBuilder<> Builder(CI);
2829 if (Value *V = Simplifier.optimizeCall(CI, Builder)) {
2830 replaceAllUsesWith(CI, V, FreshBBs, IsHugeFunc);
2831 CI->eraseFromParent();
2832 return true;
2833 }
2834
2835 // SCCP may have propagated, among other things, C++ static variables across
2836 // calls. If this happens to be the case, we may want to undo it in order to
2837 // avoid redundant pointer computation of the constant, as the function method
2838 // returning the constant needs to be executed anyways.
2839 auto GetUniformReturnValue = [](const Function *F) -> GlobalVariable * {
2840 if (!F->getReturnType()->isPointerTy())
2841 return nullptr;
2842
2843 GlobalVariable *UniformValue = nullptr;
2844 for (auto &BB : *F) {
2845 if (auto *RI = dyn_cast<ReturnInst>(BB.getTerminator())) {
2846 if (auto *V = dyn_cast<GlobalVariable>(RI->getReturnValue())) {
2847 if (!UniformValue)
2848 UniformValue = V;
2849 else if (V != UniformValue)
2850 return nullptr;
2851 } else {
2852 return nullptr;
2853 }
2854 }
2855 }
2856
2857 return UniformValue;
2858 };
2859
2860 if (Callee->hasExactDefinition()) {
2861 if (GlobalVariable *RV = GetUniformReturnValue(Callee)) {
2862 bool MadeChange = false;
2863 for (Use &U : make_early_inc_range(RV->uses())) {
2864 auto *I = dyn_cast<Instruction>(U.getUser());
2865 if (!I || I->getParent() != CI->getParent()) {
2866 // Limit to the same basic block to avoid extending the call-site live
2867 // range, which otherwise could increase register pressure.
2868 continue;
2869 }
2870 if (CI->comesBefore(I)) {
2871 U.set(CI);
2872 MadeChange = true;
2873 }
2874 }
2875
2876 return MadeChange;
2877 }
2878 }
2879
2880 return false;
2881}
2882
2884 const CallInst *CI) {
2885 assert(CI && CI->use_empty());
2886
2887 if (const auto *II = dyn_cast<IntrinsicInst>(CI))
2888 switch (II->getIntrinsicID()) {
2889 case Intrinsic::memset:
2890 case Intrinsic::memcpy:
2891 case Intrinsic::memmove:
2892 return true;
2893 default:
2894 return false;
2895 }
2896
2897 LibFunc LF;
2898 Function *Callee = CI->getCalledFunction();
2899 if (Callee && TLInfo && TLInfo->getLibFunc(*Callee, LF))
2900 switch (LF) {
2901 case LibFunc_strcpy:
2902 case LibFunc_strncpy:
2903 case LibFunc_strcat:
2904 case LibFunc_strncat:
2905 return true;
2906 default:
2907 return false;
2908 }
2909
2910 return false;
2911}
2912
2913/// Look for opportunities to duplicate return instructions to the predecessor
2914/// to enable tail call optimizations. The case it is currently looking for is
2915/// the following one. Known intrinsics or library function that may be tail
2916/// called are taken into account as well.
2917/// @code
2918/// bb0:
2919/// %tmp0 = tail call i32 @f0()
2920/// br label %return
2921/// bb1:
2922/// %tmp1 = tail call i32 @f1()
2923/// br label %return
2924/// bb2:
2925/// %tmp2 = tail call i32 @f2()
2926/// br label %return
2927/// return:
2928/// %retval = phi i32 [ %tmp0, %bb0 ], [ %tmp1, %bb1 ], [ %tmp2, %bb2 ]
2929/// ret i32 %retval
2930/// @endcode
2931///
2932/// =>
2933///
2934/// @code
2935/// bb0:
2936/// %tmp0 = tail call i32 @f0()
2937/// ret i32 %tmp0
2938/// bb1:
2939/// %tmp1 = tail call i32 @f1()
2940/// ret i32 %tmp1
2941/// bb2:
2942/// %tmp2 = tail call i32 @f2()
2943/// ret i32 %tmp2
2944/// @endcode
2945bool CodeGenPrepare::dupRetToEnableTailCallOpts(BasicBlock *BB,
2946 ModifyDT &ModifiedDT) {
2947 if (!BB->getTerminator())
2948 return false;
2949
2950 ReturnInst *RetI = dyn_cast<ReturnInst>(BB->getTerminator());
2951 if (!RetI)
2952 return false;
2953
2954 assert(LI->getLoopFor(BB) == nullptr && "A return block cannot be in a loop");
2955
2956 PHINode *PN = nullptr;
2957 ExtractValueInst *EVI = nullptr;
2958 BitCastInst *BCI = nullptr;
2959 Value *V = RetI->getReturnValue();
2960 if (V) {
2961 BCI = dyn_cast<BitCastInst>(V);
2962 if (BCI)
2963 V = BCI->getOperand(0);
2964
2966 if (EVI) {
2967 V = EVI->getOperand(0);
2968 if (!llvm::all_of(EVI->indices(), [](unsigned idx) { return idx == 0; }))
2969 return false;
2970 }
2971
2972 PN = dyn_cast<PHINode>(V);
2973 }
2974
2975 if (PN && PN->getParent() != BB)
2976 return false;
2977
2978 auto isLifetimeEndOrBitCastFor = [](const Instruction *Inst) {
2979 const BitCastInst *BC = dyn_cast<BitCastInst>(Inst);
2980 if (BC && BC->hasOneUse())
2981 Inst = BC->user_back();
2982
2983 if (const IntrinsicInst *II = dyn_cast<IntrinsicInst>(Inst))
2984 return II->getIntrinsicID() == Intrinsic::lifetime_end;
2985 return false;
2986 };
2987
2989
2990 auto isFakeUse = [&FakeUses](const Instruction *Inst) {
2991 if (auto *II = dyn_cast<IntrinsicInst>(Inst);
2992 II && II->getIntrinsicID() == Intrinsic::fake_use) {
2993 // Record the instruction so it can be preserved when the exit block is
2994 // removed. Do not preserve the fake use that uses the result of the
2995 // PHI instruction.
2996 // Do not copy fake uses that use the result of a PHI node.
2997 // FIXME: If we do want to copy the fake use into the return blocks, we
2998 // have to figure out which of the PHI node operands to use for each
2999 // copy.
3000 if (!isa<PHINode>(II->getOperand(0))) {
3001 FakeUses.push_back(II);
3002 }
3003 return true;
3004 }
3005
3006 return false;
3007 };
3008
3009 // Make sure there are no instructions between the first instruction
3010 // and return.
3012 // Skip over pseudo-probes and the bitcast.
3013 while (&*BI == BCI || &*BI == EVI || isa<PseudoProbeInst>(BI) ||
3014 isLifetimeEndOrBitCastFor(&*BI) || isFakeUse(&*BI))
3015 BI = std::next(BI);
3016 if (&*BI != RetI)
3017 return false;
3018
3019 /// Only dup the ReturnInst if the CallInst is likely to be emitted as a tail
3020 /// call.
3021 const Function *F = BB->getParent();
3022 SmallVector<BasicBlock *, 4> TailCallBBs;
3023 // Record the call instructions so we can insert any fake uses
3024 // that need to be preserved before them.
3026 if (PN) {
3027 for (unsigned I = 0, E = PN->getNumIncomingValues(); I != E; ++I) {
3028 // Look through bitcasts.
3029 Value *IncomingVal = PN->getIncomingValue(I)->stripPointerCasts();
3030 CallInst *CI = dyn_cast<CallInst>(IncomingVal);
3031 BasicBlock *PredBB = PN->getIncomingBlock(I);
3032 // Make sure the phi value is indeed produced by the tail call.
3033 if (CI && CI->hasOneUse() && CI->getParent() == PredBB &&
3034 TLI->mayBeEmittedAsTailCall(CI) &&
3035 attributesPermitTailCall(F, CI, RetI, *TLI)) {
3036 TailCallBBs.push_back(PredBB);
3037 CallInsts.push_back(CI);
3038 } else {
3039 // Consider the cases in which the phi value is indirectly produced by
3040 // the tail call, for example when encountering memset(), memmove(),
3041 // strcpy(), whose return value may have been optimized out. In such
3042 // cases, the value needs to be the first function argument.
3043 //
3044 // bb0:
3045 // tail call void @llvm.memset.p0.i64(ptr %0, i8 0, i64 %1)
3046 // br label %return
3047 // return:
3048 // %phi = phi ptr [ %0, %bb0 ], [ %2, %entry ]
3049 if (PredBB && PredBB->getSingleSuccessor() == BB)
3051 PredBB->getTerminator()->getPrevNode());
3052
3053 if (CI && CI->use_empty() &&
3054 isIntrinsicOrLFToBeTailCalled(TLInfo, CI) &&
3055 IncomingVal == CI->getArgOperand(0) &&
3056 TLI->mayBeEmittedAsTailCall(CI) &&
3057 attributesPermitTailCall(F, CI, RetI, *TLI)) {
3058 TailCallBBs.push_back(PredBB);
3059 CallInsts.push_back(CI);
3060 }
3061 }
3062 }
3063 } else {
3064 SmallPtrSet<BasicBlock *, 4> VisitedBBs;
3065 for (BasicBlock *Pred : predecessors(BB)) {
3066 if (!VisitedBBs.insert(Pred).second)
3067 continue;
3068 if (Instruction *I = Pred->rbegin()->getPrevNode()) {
3069 CallInst *CI = dyn_cast<CallInst>(I);
3070 if (CI && CI->use_empty() && TLI->mayBeEmittedAsTailCall(CI) &&
3071 attributesPermitTailCall(F, CI, RetI, *TLI)) {
3072 // Either we return void or the return value must be the first
3073 // argument of a known intrinsic or library function.
3074 if (!V || isa<UndefValue>(V) ||
3075 (isIntrinsicOrLFToBeTailCalled(TLInfo, CI) &&
3076 V == CI->getArgOperand(0))) {
3077 TailCallBBs.push_back(Pred);
3078 CallInsts.push_back(CI);
3079 }
3080 }
3081 }
3082 }
3083 }
3084
3085 bool Changed = false;
3086 for (auto const &TailCallBB : TailCallBBs) {
3087 // Make sure the call instruction is followed by an unconditional branch to
3088 // the return block.
3089 BranchInst *BI = dyn_cast<BranchInst>(TailCallBB->getTerminator());
3090 if (!BI || !BI->isUnconditional() || BI->getSuccessor(0) != BB)
3091 continue;
3092
3093 // Duplicate the return into TailCallBB.
3094 (void)FoldReturnIntoUncondBranch(RetI, BB, TailCallBB);
3096 BFI->getBlockFreq(BB) >= BFI->getBlockFreq(TailCallBB));
3097 BFI->setBlockFreq(BB,
3098 (BFI->getBlockFreq(BB) - BFI->getBlockFreq(TailCallBB)));
3099 ModifiedDT = ModifyDT::ModifyBBDT;
3100 Changed = true;
3101 ++NumRetsDup;
3102 }
3103
3104 // If we eliminated all predecessors of the block, delete the block now.
3105 if (Changed && !BB->hasAddressTaken() && pred_empty(BB)) {
3106 // Copy the fake uses found in the original return block to all blocks
3107 // that contain tail calls.
3108 for (auto *CI : CallInsts) {
3109 for (auto const *FakeUse : FakeUses) {
3110 auto *ClonedInst = FakeUse->clone();
3111 ClonedInst->insertBefore(CI->getIterator());
3112 }
3113 }
3114 BB->eraseFromParent();
3115 }
3116
3117 return Changed;
3118}
3119
3120//===----------------------------------------------------------------------===//
3121// Memory Optimization
3122//===----------------------------------------------------------------------===//
3123
3124namespace {
3125
3126/// This is an extended version of TargetLowering::AddrMode
3127/// which holds actual Value*'s for register values.
3128struct ExtAddrMode : public TargetLowering::AddrMode {
3129 Value *BaseReg = nullptr;
3130 Value *ScaledReg = nullptr;
3131 Value *OriginalValue = nullptr;
3132 bool InBounds = true;
3133
3134 enum FieldName {
3135 NoField = 0x00,
3136 BaseRegField = 0x01,
3137 BaseGVField = 0x02,
3138 BaseOffsField = 0x04,
3139 ScaledRegField = 0x08,
3140 ScaleField = 0x10,
3141 MultipleFields = 0xff
3142 };
3143
3144 ExtAddrMode() = default;
3145
3146 void print(raw_ostream &OS) const;
3147 void dump() const;
3148
3149 // Replace From in ExtAddrMode with To.
3150 // E.g., SExt insts may be promoted and deleted. We should replace them with
3151 // the promoted values.
3152 void replaceWith(Value *From, Value *To) {
3153 if (ScaledReg == From)
3154 ScaledReg = To;
3155 }
3156
3157 FieldName compare(const ExtAddrMode &other) {
3158 // First check that the types are the same on each field, as differing types
3159 // is something we can't cope with later on.
3160 if (BaseReg && other.BaseReg &&
3161 BaseReg->getType() != other.BaseReg->getType())
3162 return MultipleFields;
3163 if (BaseGV && other.BaseGV && BaseGV->getType() != other.BaseGV->getType())
3164 return MultipleFields;
3165 if (ScaledReg && other.ScaledReg &&
3166 ScaledReg->getType() != other.ScaledReg->getType())
3167 return MultipleFields;
3168
3169 // Conservatively reject 'inbounds' mismatches.
3170 if (InBounds != other.InBounds)
3171 return MultipleFields;
3172
3173 // Check each field to see if it differs.
3174 unsigned Result = NoField;
3175 if (BaseReg != other.BaseReg)
3176 Result |= BaseRegField;
3177 if (BaseGV != other.BaseGV)
3178 Result |= BaseGVField;
3179 if (BaseOffs != other.BaseOffs)
3180 Result |= BaseOffsField;
3181 if (ScaledReg != other.ScaledReg)
3182 Result |= ScaledRegField;
3183 // Don't count 0 as being a different scale, because that actually means
3184 // unscaled (which will already be counted by having no ScaledReg).
3185 if (Scale && other.Scale && Scale != other.Scale)
3186 Result |= ScaleField;
3187
3188 if (llvm::popcount(Result) > 1)
3189 return MultipleFields;
3190 else
3191 return static_cast<FieldName>(Result);
3192 }
3193
3194 // An AddrMode is trivial if it involves no calculation i.e. it is just a base
3195 // with no offset.
3196 bool isTrivial() {
3197 // An AddrMode is (BaseGV + BaseReg + BaseOffs + ScaleReg * Scale) so it is
3198 // trivial if at most one of these terms is nonzero, except that BaseGV and
3199 // BaseReg both being zero actually means a null pointer value, which we
3200 // consider to be 'non-zero' here.
3201 return !BaseOffs && !Scale && !(BaseGV && BaseReg);
3202 }
3203
3204 Value *GetFieldAsValue(FieldName Field, Type *IntPtrTy) {
3205 switch (Field) {
3206 default:
3207 return nullptr;
3208 case BaseRegField:
3209 return BaseReg;
3210 case BaseGVField:
3211 return BaseGV;
3212 case ScaledRegField:
3213 return ScaledReg;
3214 case BaseOffsField:
3215 return ConstantInt::getSigned(IntPtrTy, BaseOffs);
3216 }
3217 }
3218
3219 void SetCombinedField(FieldName Field, Value *V,
3220 const SmallVectorImpl<ExtAddrMode> &AddrModes) {
3221 switch (Field) {
3222 default:
3223 llvm_unreachable("Unhandled fields are expected to be rejected earlier");
3224 break;
3225 case ExtAddrMode::BaseRegField:
3226 BaseReg = V;
3227 break;
3228 case ExtAddrMode::BaseGVField:
3229 // A combined BaseGV is an Instruction, not a GlobalValue, so it goes
3230 // in the BaseReg field.
3231 assert(BaseReg == nullptr);
3232 BaseReg = V;
3233 BaseGV = nullptr;
3234 break;
3235 case ExtAddrMode::ScaledRegField:
3236 ScaledReg = V;
3237 // If we have a mix of scaled and unscaled addrmodes then we want scale
3238 // to be the scale and not zero.
3239 if (!Scale)
3240 for (const ExtAddrMode &AM : AddrModes)
3241 if (AM.Scale) {
3242 Scale = AM.Scale;
3243 break;
3244 }
3245 break;
3246 case ExtAddrMode::BaseOffsField:
3247 // The offset is no longer a constant, so it goes in ScaledReg with a
3248 // scale of 1.
3249 assert(ScaledReg == nullptr);
3250 ScaledReg = V;
3251 Scale = 1;
3252 BaseOffs = 0;
3253 break;
3254 }
3255 }
3256};
3257
3258#ifndef NDEBUG
3259static inline raw_ostream &operator<<(raw_ostream &OS, const ExtAddrMode &AM) {
3260 AM.print(OS);
3261 return OS;
3262}
3263#endif
3264
3265#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
3266void ExtAddrMode::print(raw_ostream &OS) const {
3267 bool NeedPlus = false;
3268 OS << "[";
3269 if (InBounds)
3270 OS << "inbounds ";
3271 if (BaseGV) {
3272 OS << "GV:";
3273 BaseGV->printAsOperand(OS, /*PrintType=*/false);
3274 NeedPlus = true;
3275 }
3276
3277 if (BaseOffs) {
3278 OS << (NeedPlus ? " + " : "") << BaseOffs;
3279 NeedPlus = true;
3280 }
3281
3282 if (BaseReg) {
3283 OS << (NeedPlus ? " + " : "") << "Base:";
3284 BaseReg->printAsOperand(OS, /*PrintType=*/false);
3285 NeedPlus = true;
3286 }
3287 if (Scale) {
3288 OS << (NeedPlus ? " + " : "") << Scale << "*";
3289 ScaledReg->printAsOperand(OS, /*PrintType=*/false);
3290 }
3291
3292 OS << ']';
3293}
3294
3295LLVM_DUMP_METHOD void ExtAddrMode::dump() const {
3296 print(dbgs());
3297 dbgs() << '\n';
3298}
3299#endif
3300
3301} // end anonymous namespace
3302
3303namespace {
3304
3305/// This class provides transaction based operation on the IR.
3306/// Every change made through this class is recorded in the internal state and
3307/// can be undone (rollback) until commit is called.
3308/// CGP does not check if instructions could be speculatively executed when
3309/// moved. Preserving the original location would pessimize the debugging
3310/// experience, as well as negatively impact the quality of sample PGO.
3311class TypePromotionTransaction {
3312 /// This represents the common interface of the individual transaction.
3313 /// Each class implements the logic for doing one specific modification on
3314 /// the IR via the TypePromotionTransaction.
3315 class TypePromotionAction {
3316 protected:
3317 /// The Instruction modified.
3318 Instruction *Inst;
3319
3320 public:
3321 /// Constructor of the action.
3322 /// The constructor performs the related action on the IR.
3323 TypePromotionAction(Instruction *Inst) : Inst(Inst) {}
3324
3325 virtual ~TypePromotionAction() = default;
3326
3327 /// Undo the modification done by this action.
3328 /// When this method is called, the IR must be in the same state as it was
3329 /// before this action was applied.
3330 /// \pre Undoing the action works if and only if the IR is in the exact same
3331 /// state as it was directly after this action was applied.
3332 virtual void undo() = 0;
3333
3334 /// Advocate every change made by this action.
3335 /// When the results on the IR of the action are to be kept, it is important
3336 /// to call this function, otherwise hidden information may be kept forever.
3337 virtual void commit() {
3338 // Nothing to be done, this action is not doing anything.
3339 }
3340 };
3341
3342 /// Utility to remember the position of an instruction.
3343 class InsertionHandler {
3344 /// Position of an instruction.
3345 /// Either an instruction:
3346 /// - Is the first in a basic block: BB is used.
3347 /// - Has a previous instruction: PrevInst is used.
3348 struct {
3349 BasicBlock::iterator PrevInst;
3350 BasicBlock *BB;
3351 } Point;
3352 std::optional<DbgRecord::self_iterator> BeforeDbgRecord = std::nullopt;
3353
3354 /// Remember whether or not the instruction had a previous instruction.
3355 bool HasPrevInstruction;
3356
3357 public:
3358 /// Record the position of \p Inst.
3359 InsertionHandler(Instruction *Inst) {
3360 HasPrevInstruction = (Inst != &*(Inst->getParent()->begin()));
3361 BasicBlock *BB = Inst->getParent();
3362
3363 // Record where we would have to re-insert the instruction in the sequence
3364 // of DbgRecords, if we ended up reinserting.
3365 BeforeDbgRecord = Inst->getDbgReinsertionPosition();
3366
3367 if (HasPrevInstruction) {
3368 Point.PrevInst = std::prev(Inst->getIterator());
3369 } else {
3370 Point.BB = BB;
3371 }
3372 }
3373
3374 /// Insert \p Inst at the recorded position.
3375 void insert(Instruction *Inst) {
3376 if (HasPrevInstruction) {
3377 if (Inst->getParent())
3378 Inst->removeFromParent();
3379 Inst->insertAfter(Point.PrevInst);
3380 } else {
3381 BasicBlock::iterator Position = Point.BB->getFirstInsertionPt();
3382 if (Inst->getParent())
3383 Inst->moveBefore(*Point.BB, Position);
3384 else
3385 Inst->insertBefore(*Point.BB, Position);
3386 }
3387
3388 Inst->getParent()->reinsertInstInDbgRecords(Inst, BeforeDbgRecord);
3389 }
3390 };
3391
3392 /// Move an instruction before another.
3393 class InstructionMoveBefore : public TypePromotionAction {
3394 /// Original position of the instruction.
3395 InsertionHandler Position;
3396
3397 public:
3398 /// Move \p Inst before \p Before.
3399 InstructionMoveBefore(Instruction *Inst, BasicBlock::iterator Before)
3400 : TypePromotionAction(Inst), Position(Inst) {
3401 LLVM_DEBUG(dbgs() << "Do: move: " << *Inst << "\nbefore: " << *Before
3402 << "\n");
3403 Inst->moveBefore(Before);
3404 }
3405
3406 /// Move the instruction back to its original position.
3407 void undo() override {
3408 LLVM_DEBUG(dbgs() << "Undo: moveBefore: " << *Inst << "\n");
3409 Position.insert(Inst);
3410 }
3411 };
3412
3413 /// Set the operand of an instruction with a new value.
3414 class OperandSetter : public TypePromotionAction {
3415 /// Original operand of the instruction.
3416 Value *Origin;
3417
3418 /// Index of the modified instruction.
3419 unsigned Idx;
3420
3421 public:
3422 /// Set \p Idx operand of \p Inst with \p NewVal.
3423 OperandSetter(Instruction *Inst, unsigned Idx, Value *NewVal)
3424 : TypePromotionAction(Inst), Idx(Idx) {
3425 LLVM_DEBUG(dbgs() << "Do: setOperand: " << Idx << "\n"
3426 << "for:" << *Inst << "\n"
3427 << "with:" << *NewVal << "\n");
3428 Origin = Inst->getOperand(Idx);
3429 Inst->setOperand(Idx, NewVal);
3430 }
3431
3432 /// Restore the original value of the instruction.
3433 void undo() override {
3434 LLVM_DEBUG(dbgs() << "Undo: setOperand:" << Idx << "\n"
3435 << "for: " << *Inst << "\n"
3436 << "with: " << *Origin << "\n");
3437 Inst->setOperand(Idx, Origin);
3438 }
3439 };
3440
3441 /// Hide the operands of an instruction.
3442 /// Do as if this instruction was not using any of its operands.
3443 class OperandsHider : public TypePromotionAction {
3444 /// The list of original operands.
3445 SmallVector<Value *, 4> OriginalValues;
3446
3447 public:
3448 /// Remove \p Inst from the uses of the operands of \p Inst.
3449 OperandsHider(Instruction *Inst) : TypePromotionAction(Inst) {
3450 LLVM_DEBUG(dbgs() << "Do: OperandsHider: " << *Inst << "\n");
3451 unsigned NumOpnds = Inst->getNumOperands();
3452 OriginalValues.reserve(NumOpnds);
3453 for (unsigned It = 0; It < NumOpnds; ++It) {
3454 // Save the current operand.
3455 Value *Val = Inst->getOperand(It);
3456 OriginalValues.push_back(Val);
3457 // Set a dummy one.
3458 // We could use OperandSetter here, but that would imply an overhead
3459 // that we are not willing to pay.
3460 Inst->setOperand(It, PoisonValue::get(Val->getType()));
3461 }
3462 }
3463
3464 /// Restore the original list of uses.
3465 void undo() override {
3466 LLVM_DEBUG(dbgs() << "Undo: OperandsHider: " << *Inst << "\n");
3467 for (unsigned It = 0, EndIt = OriginalValues.size(); It != EndIt; ++It)
3468 Inst->setOperand(It, OriginalValues[It]);
3469 }
3470 };
3471
3472 /// Build a truncate instruction.
3473 class TruncBuilder : public TypePromotionAction {
3474 Value *Val;
3475
3476 public:
3477 /// Build a truncate instruction of \p Opnd producing a \p Ty
3478 /// result.
3479 /// trunc Opnd to Ty.
3480 TruncBuilder(Instruction *Opnd, Type *Ty) : TypePromotionAction(Opnd) {
3481 IRBuilder<> Builder(Opnd);
3482 Builder.SetCurrentDebugLocation(DebugLoc());
3483 Val = Builder.CreateTrunc(Opnd, Ty, "promoted");
3484 LLVM_DEBUG(dbgs() << "Do: TruncBuilder: " << *Val << "\n");
3485 }
3486
3487 /// Get the built value.
3488 Value *getBuiltValue() { return Val; }
3489
3490 /// Remove the built instruction.
3491 void undo() override {
3492 LLVM_DEBUG(dbgs() << "Undo: TruncBuilder: " << *Val << "\n");
3493 if (Instruction *IVal = dyn_cast<Instruction>(Val))
3494 IVal->eraseFromParent();
3495 }
3496 };
3497
3498 /// Build a sign extension instruction.
3499 class SExtBuilder : public TypePromotionAction {
3500 Value *Val;
3501
3502 public:
3503 /// Build a sign extension instruction of \p Opnd producing a \p Ty
3504 /// result.
3505 /// sext Opnd to Ty.
3506 SExtBuilder(Instruction *InsertPt, Value *Opnd, Type *Ty)
3507 : TypePromotionAction(InsertPt) {
3508 IRBuilder<> Builder(InsertPt);
3509 Val = Builder.CreateSExt(Opnd, Ty, "promoted");
3510 LLVM_DEBUG(dbgs() << "Do: SExtBuilder: " << *Val << "\n");
3511 }
3512
3513 /// Get the built value.
3514 Value *getBuiltValue() { return Val; }
3515
3516 /// Remove the built instruction.
3517 void undo() override {
3518 LLVM_DEBUG(dbgs() << "Undo: SExtBuilder: " << *Val << "\n");
3519 if (Instruction *IVal = dyn_cast<Instruction>(Val))
3520 IVal->eraseFromParent();
3521 }
3522 };
3523
3524 /// Build a zero extension instruction.
3525 class ZExtBuilder : public TypePromotionAction {
3526 Value *Val;
3527
3528 public:
3529 /// Build a zero extension instruction of \p Opnd producing a \p Ty
3530 /// result.
3531 /// zext Opnd to Ty.
3532 ZExtBuilder(Instruction *InsertPt, Value *Opnd, Type *Ty)
3533 : TypePromotionAction(InsertPt) {
3534 IRBuilder<> Builder(InsertPt);
3535 Builder.SetCurrentDebugLocation(DebugLoc());
3536 Val = Builder.CreateZExt(Opnd, Ty, "promoted");
3537 LLVM_DEBUG(dbgs() << "Do: ZExtBuilder: " << *Val << "\n");
3538 }
3539
3540 /// Get the built value.
3541 Value *getBuiltValue() { return Val; }
3542
3543 /// Remove the built instruction.
3544 void undo() override {
3545 LLVM_DEBUG(dbgs() << "Undo: ZExtBuilder: " << *Val << "\n");
3546 if (Instruction *IVal = dyn_cast<Instruction>(Val))
3547 IVal->eraseFromParent();
3548 }
3549 };
3550
3551 /// Mutate an instruction to another type.
3552 class TypeMutator : public TypePromotionAction {
3553 /// Record the original type.
3554 Type *OrigTy;
3555
3556 public:
3557 /// Mutate the type of \p Inst into \p NewTy.
3558 TypeMutator(Instruction *Inst, Type *NewTy)
3559 : TypePromotionAction(Inst), OrigTy(Inst->getType()) {
3560 LLVM_DEBUG(dbgs() << "Do: MutateType: " << *Inst << " with " << *NewTy
3561 << "\n");
3562 Inst->mutateType(NewTy);
3563 }
3564
3565 /// Mutate the instruction back to its original type.
3566 void undo() override {
3567 LLVM_DEBUG(dbgs() << "Undo: MutateType: " << *Inst << " with " << *OrigTy
3568 << "\n");
3569 Inst->mutateType(OrigTy);
3570 }
3571 };
3572
3573 /// Replace the uses of an instruction by another instruction.
3574 class UsesReplacer : public TypePromotionAction {
3575 /// Helper structure to keep track of the replaced uses.
3576 struct InstructionAndIdx {
3577 /// The instruction using the instruction.
3578 Instruction *Inst;
3579
3580 /// The index where this instruction is used for Inst.
3581 unsigned Idx;
3582
3583 InstructionAndIdx(Instruction *Inst, unsigned Idx)
3584 : Inst(Inst), Idx(Idx) {}
3585 };
3586
3587 /// Keep track of the original uses (pair Instruction, Index).
3589 /// Keep track of the debug users.
3590 SmallVector<DbgVariableRecord *, 1> DbgVariableRecords;
3591
3592 /// Keep track of the new value so that we can undo it by replacing
3593 /// instances of the new value with the original value.
3594 Value *New;
3595
3597
3598 public:
3599 /// Replace all the use of \p Inst by \p New.
3600 UsesReplacer(Instruction *Inst, Value *New)
3601 : TypePromotionAction(Inst), New(New) {
3602 LLVM_DEBUG(dbgs() << "Do: UsersReplacer: " << *Inst << " with " << *New
3603 << "\n");
3604 // Record the original uses.
3605 for (Use &U : Inst->uses()) {
3606 Instruction *UserI = cast<Instruction>(U.getUser());
3607 OriginalUses.push_back(InstructionAndIdx(UserI, U.getOperandNo()));
3608 }
3609 // Record the debug uses separately. They are not in the instruction's
3610 // use list, but they are replaced by RAUW.
3611 findDbgValues(Inst, DbgVariableRecords);
3612
3613 // Now, we can replace the uses.
3614 Inst->replaceAllUsesWith(New);
3615 }
3616
3617 /// Reassign the original uses of Inst to Inst.
3618 void undo() override {
3619 LLVM_DEBUG(dbgs() << "Undo: UsersReplacer: " << *Inst << "\n");
3620 for (InstructionAndIdx &Use : OriginalUses)
3621 Use.Inst->setOperand(Use.Idx, Inst);
3622 // RAUW has replaced all original uses with references to the new value,
3623 // including the debug uses. Since we are undoing the replacements,
3624 // the original debug uses must also be reinstated to maintain the
3625 // correctness and utility of debug value records.
3626 for (DbgVariableRecord *DVR : DbgVariableRecords)
3627 DVR->replaceVariableLocationOp(New, Inst);
3628 }
3629 };
3630
3631 /// Remove an instruction from the IR.
3632 class InstructionRemover : public TypePromotionAction {
3633 /// Original position of the instruction.
3634 InsertionHandler Inserter;
3635
3636 /// Helper structure to hide all the link to the instruction. In other
3637 /// words, this helps to do as if the instruction was removed.
3638 OperandsHider Hider;
3639
3640 /// Keep track of the uses replaced, if any.
3641 UsesReplacer *Replacer = nullptr;
3642
3643 /// Keep track of instructions removed.
3644 SetOfInstrs &RemovedInsts;
3645
3646 public:
3647 /// Remove all reference of \p Inst and optionally replace all its
3648 /// uses with New.
3649 /// \p RemovedInsts Keep track of the instructions removed by this Action.
3650 /// \pre If !Inst->use_empty(), then New != nullptr
3651 InstructionRemover(Instruction *Inst, SetOfInstrs &RemovedInsts,
3652 Value *New = nullptr)
3653 : TypePromotionAction(Inst), Inserter(Inst), Hider(Inst),
3654 RemovedInsts(RemovedInsts) {
3655 if (New)
3656 Replacer = new UsesReplacer(Inst, New);
3657 LLVM_DEBUG(dbgs() << "Do: InstructionRemover: " << *Inst << "\n");
3658 RemovedInsts.insert(Inst);
3659 /// The instructions removed here will be freed after completing
3660 /// optimizeBlock() for all blocks as we need to keep track of the
3661 /// removed instructions during promotion.
3662 Inst->removeFromParent();
3663 }
3664
3665 ~InstructionRemover() override { delete Replacer; }
3666
3667 InstructionRemover &operator=(const InstructionRemover &other) = delete;
3668 InstructionRemover(const InstructionRemover &other) = delete;
3669
3670 /// Resurrect the instruction and reassign it to the proper uses if
3671 /// new value was provided when build this action.
3672 void undo() override {
3673 LLVM_DEBUG(dbgs() << "Undo: InstructionRemover: " << *Inst << "\n");
3674 Inserter.insert(Inst);
3675 if (Replacer)
3676 Replacer->undo();
3677 Hider.undo();
3678 RemovedInsts.erase(Inst);
3679 }
3680 };
3681
3682public:
3683 /// Restoration point.
3684 /// The restoration point is a pointer to an action instead of an iterator
3685 /// because the iterator may be invalidated but not the pointer.
3686 using ConstRestorationPt = const TypePromotionAction *;
3687
3688 TypePromotionTransaction(SetOfInstrs &RemovedInsts)
3689 : RemovedInsts(RemovedInsts) {}
3690
3691 /// Advocate every changes made in that transaction. Return true if any change
3692 /// happen.
3693 bool commit();
3694
3695 /// Undo all the changes made after the given point.
3696 void rollback(ConstRestorationPt Point);
3697
3698 /// Get the current restoration point.
3699 ConstRestorationPt getRestorationPoint() const;
3700
3701 /// \name API for IR modification with state keeping to support rollback.
3702 /// @{
3703 /// Same as Instruction::setOperand.
3704 void setOperand(Instruction *Inst, unsigned Idx, Value *NewVal);
3705
3706 /// Same as Instruction::eraseFromParent.
3707 void eraseInstruction(Instruction *Inst, Value *NewVal = nullptr);
3708
3709 /// Same as Value::replaceAllUsesWith.
3710 void replaceAllUsesWith(Instruction *Inst, Value *New);
3711
3712 /// Same as Value::mutateType.
3713 void mutateType(Instruction *Inst, Type *NewTy);
3714
3715 /// Same as IRBuilder::createTrunc.
3716 Value *createTrunc(Instruction *Opnd, Type *Ty);
3717
3718 /// Same as IRBuilder::createSExt.
3719 Value *createSExt(Instruction *Inst, Value *Opnd, Type *Ty);
3720
3721 /// Same as IRBuilder::createZExt.
3722 Value *createZExt(Instruction *Inst, Value *Opnd, Type *Ty);
3723
3724private:
3725 /// The ordered list of actions made so far.
3727
3728 using CommitPt =
3729 SmallVectorImpl<std::unique_ptr<TypePromotionAction>>::iterator;
3730
3731 SetOfInstrs &RemovedInsts;
3732};
3733
3734} // end anonymous namespace
3735
3736void TypePromotionTransaction::setOperand(Instruction *Inst, unsigned Idx,
3737 Value *NewVal) {
3738 Actions.push_back(std::make_unique<TypePromotionTransaction::OperandSetter>(
3739 Inst, Idx, NewVal));
3740}
3741
3742void TypePromotionTransaction::eraseInstruction(Instruction *Inst,
3743 Value *NewVal) {
3744 Actions.push_back(
3745 std::make_unique<TypePromotionTransaction::InstructionRemover>(
3746 Inst, RemovedInsts, NewVal));
3747}
3748
3749void TypePromotionTransaction::replaceAllUsesWith(Instruction *Inst,
3750 Value *New) {
3751 Actions.push_back(
3752 std::make_unique<TypePromotionTransaction::UsesReplacer>(Inst, New));
3753}
3754
3755void TypePromotionTransaction::mutateType(Instruction *Inst, Type *NewTy) {
3756 Actions.push_back(
3757 std::make_unique<TypePromotionTransaction::TypeMutator>(Inst, NewTy));
3758}
3759
3760Value *TypePromotionTransaction::createTrunc(Instruction *Opnd, Type *Ty) {
3761 std::unique_ptr<TruncBuilder> Ptr(new TruncBuilder(Opnd, Ty));
3762 Value *Val = Ptr->getBuiltValue();
3763 Actions.push_back(std::move(Ptr));
3764 return Val;
3765}
3766
3767Value *TypePromotionTransaction::createSExt(Instruction *Inst, Value *Opnd,
3768 Type *Ty) {
3769 std::unique_ptr<SExtBuilder> Ptr(new SExtBuilder(Inst, Opnd, Ty));
3770 Value *Val = Ptr->getBuiltValue();
3771 Actions.push_back(std::move(Ptr));
3772 return Val;
3773}
3774
3775Value *TypePromotionTransaction::createZExt(Instruction *Inst, Value *Opnd,
3776 Type *Ty) {
3777 std::unique_ptr<ZExtBuilder> Ptr(new ZExtBuilder(Inst, Opnd, Ty));
3778 Value *Val = Ptr->getBuiltValue();
3779 Actions.push_back(std::move(Ptr));
3780 return Val;
3781}
3782
3783TypePromotionTransaction::ConstRestorationPt
3784TypePromotionTransaction::getRestorationPoint() const {
3785 return !Actions.empty() ? Actions.back().get() : nullptr;
3786}
3787
3788bool TypePromotionTransaction::commit() {
3789 for (std::unique_ptr<TypePromotionAction> &Action : Actions)
3790 Action->commit();
3791 bool Modified = !Actions.empty();
3792 Actions.clear();
3793 return Modified;
3794}
3795
3796void TypePromotionTransaction::rollback(
3797 TypePromotionTransaction::ConstRestorationPt Point) {
3798 while (!Actions.empty() && Point != Actions.back().get()) {
3799 std::unique_ptr<TypePromotionAction> Curr = Actions.pop_back_val();
3800 Curr->undo();
3801 }
3802}
3803
3804namespace {
3805
3806/// A helper class for matching addressing modes.
3807///
3808/// This encapsulates the logic for matching the target-legal addressing modes.
3809class AddressingModeMatcher {
3810 SmallVectorImpl<Instruction *> &AddrModeInsts;
3811 const TargetLowering &TLI;
3812 const TargetRegisterInfo &TRI;
3813 const DataLayout &DL;
3814 const LoopInfo &LI;
3815 const std::function<const DominatorTree &()> getDTFn;
3816
3817 /// AccessTy/MemoryInst - This is the type for the access (e.g. double) and
3818 /// the memory instruction that we're computing this address for.
3819 Type *AccessTy;
3820 unsigned AddrSpace;
3821 Instruction *MemoryInst;
3822
3823 /// This is the addressing mode that we're building up. This is
3824 /// part of the return value of this addressing mode matching stuff.
3825 ExtAddrMode &AddrMode;
3826
3827 /// The instructions inserted by other CodeGenPrepare optimizations.
3828 const SetOfInstrs &InsertedInsts;
3829
3830 /// A map from the instructions to their type before promotion.
3831 InstrToOrigTy &PromotedInsts;
3832
3833 /// The ongoing transaction where every action should be registered.
3834 TypePromotionTransaction &TPT;
3835
3836 // A GEP which has too large offset to be folded into the addressing mode.
3837 std::pair<AssertingVH<GetElementPtrInst>, int64_t> &LargeOffsetGEP;
3838
3839 /// This is set to true when we should not do profitability checks.
3840 /// When true, IsProfitableToFoldIntoAddressingMode always returns true.
3841 bool IgnoreProfitability;
3842
3843 /// True if we are optimizing for size.
3844 bool OptSize = false;
3845
3846 ProfileSummaryInfo *PSI;
3847 BlockFrequencyInfo *BFI;
3848
3849 AddressingModeMatcher(
3850 SmallVectorImpl<Instruction *> &AMI, const TargetLowering &TLI,
3851 const TargetRegisterInfo &TRI, const LoopInfo &LI,
3852 const std::function<const DominatorTree &()> getDTFn, Type *AT,
3853 unsigned AS, Instruction *MI, ExtAddrMode &AM,
3854 const SetOfInstrs &InsertedInsts, InstrToOrigTy &PromotedInsts,
3855 TypePromotionTransaction &TPT,
3856 std::pair<AssertingVH<GetElementPtrInst>, int64_t> &LargeOffsetGEP,
3857 bool OptSize, ProfileSummaryInfo *PSI, BlockFrequencyInfo *BFI)
3858 : AddrModeInsts(AMI), TLI(TLI), TRI(TRI),
3859 DL(MI->getDataLayout()), LI(LI), getDTFn(getDTFn),
3860 AccessTy(AT), AddrSpace(AS), MemoryInst(MI), AddrMode(AM),
3861 InsertedInsts(InsertedInsts), PromotedInsts(PromotedInsts), TPT(TPT),
3862 LargeOffsetGEP(LargeOffsetGEP), OptSize(OptSize), PSI(PSI), BFI(BFI) {
3863 IgnoreProfitability = false;
3864 }
3865
3866public:
3867 /// Find the maximal addressing mode that a load/store of V can fold,
3868 /// give an access type of AccessTy. This returns a list of involved
3869 /// instructions in AddrModeInsts.
3870 /// \p InsertedInsts The instructions inserted by other CodeGenPrepare
3871 /// optimizations.
3872 /// \p PromotedInsts maps the instructions to their type before promotion.
3873 /// \p The ongoing transaction where every action should be registered.
3874 static ExtAddrMode
3875 Match(Value *V, Type *AccessTy, unsigned AS, Instruction *MemoryInst,
3876 SmallVectorImpl<Instruction *> &AddrModeInsts,
3877 const TargetLowering &TLI, const LoopInfo &LI,
3878 const std::function<const DominatorTree &()> getDTFn,
3879 const TargetRegisterInfo &TRI, const SetOfInstrs &InsertedInsts,
3880 InstrToOrigTy &PromotedInsts, TypePromotionTransaction &TPT,
3881 std::pair<AssertingVH<GetElementPtrInst>, int64_t> &LargeOffsetGEP,
3882 bool OptSize, ProfileSummaryInfo *PSI, BlockFrequencyInfo *BFI) {
3883 ExtAddrMode Result;
3884
3885 bool Success = AddressingModeMatcher(AddrModeInsts, TLI, TRI, LI, getDTFn,
3886 AccessTy, AS, MemoryInst, Result,
3887 InsertedInsts, PromotedInsts, TPT,
3888 LargeOffsetGEP, OptSize, PSI, BFI)
3889 .matchAddr(V, 0);
3890 (void)Success;
3891 assert(Success && "Couldn't select *anything*?");
3892 return Result;
3893 }
3894
3895private:
3896 bool matchScaledValue(Value *ScaleReg, int64_t Scale, unsigned Depth);
3897 bool matchAddr(Value *Addr, unsigned Depth);
3898 bool matchOperationAddr(User *AddrInst, unsigned Opcode, unsigned Depth,
3899 bool *MovedAway = nullptr);
3900 bool isProfitableToFoldIntoAddressingMode(Instruction *I,
3901 ExtAddrMode &AMBefore,
3902 ExtAddrMode &AMAfter);
3903 bool valueAlreadyLiveAtInst(Value *Val, Value *KnownLive1, Value *KnownLive2);
3904 bool isPromotionProfitable(unsigned NewCost, unsigned OldCost,
3905 Value *PromotedOperand) const;
3906};
3907
3908class PhiNodeSet;
3909
3910/// An iterator for PhiNodeSet.
3911class PhiNodeSetIterator {
3912 PhiNodeSet *const Set;
3913 size_t CurrentIndex = 0;
3914
3915public:
3916 /// The constructor. Start should point to either a valid element, or be equal
3917 /// to the size of the underlying SmallVector of the PhiNodeSet.
3918 PhiNodeSetIterator(PhiNodeSet *const Set, size_t Start);
3919 PHINode *operator*() const;
3920 PhiNodeSetIterator &operator++();
3921 bool operator==(const PhiNodeSetIterator &RHS) const;
3922 bool operator!=(const PhiNodeSetIterator &RHS) const;
3923};
3924
3925/// Keeps a set of PHINodes.
3926///
3927/// This is a minimal set implementation for a specific use case:
3928/// It is very fast when there are very few elements, but also provides good
3929/// performance when there are many. It is similar to SmallPtrSet, but also
3930/// provides iteration by insertion order, which is deterministic and stable
3931/// across runs. It is also similar to SmallSetVector, but provides removing
3932/// elements in O(1) time. This is achieved by not actually removing the element
3933/// from the underlying vector, so comes at the cost of using more memory, but
3934/// that is fine, since PhiNodeSets are used as short lived objects.
3935class PhiNodeSet {
3936 friend class PhiNodeSetIterator;
3937
3938 using MapType = SmallDenseMap<PHINode *, size_t, 32>;
3939 using iterator = PhiNodeSetIterator;
3940
3941 /// Keeps the elements in the order of their insertion in the underlying
3942 /// vector. To achieve constant time removal, it never deletes any element.
3944
3945 /// Keeps the elements in the underlying set implementation. This (and not the
3946 /// NodeList defined above) is the source of truth on whether an element
3947 /// is actually in the collection.
3948 MapType NodeMap;
3949
3950 /// Points to the first valid (not deleted) element when the set is not empty
3951 /// and the value is not zero. Equals to the size of the underlying vector
3952 /// when the set is empty. When the value is 0, as in the beginning, the
3953 /// first element may or may not be valid.
3954 size_t FirstValidElement = 0;
3955
3956public:
3957 /// Inserts a new element to the collection.
3958 /// \returns true if the element is actually added, i.e. was not in the
3959 /// collection before the operation.
3960 bool insert(PHINode *Ptr) {
3961 if (NodeMap.insert(std::make_pair(Ptr, NodeList.size())).second) {
3962 NodeList.push_back(Ptr);
3963 return true;
3964 }
3965 return false;
3966 }
3967
3968 /// Removes the element from the collection.
3969 /// \returns whether the element is actually removed, i.e. was in the
3970 /// collection before the operation.
3971 bool erase(PHINode *Ptr) {
3972 if (NodeMap.erase(Ptr)) {
3973 SkipRemovedElements(FirstValidElement);
3974 return true;
3975 }
3976 return false;
3977 }
3978
3979 /// Removes all elements and clears the collection.
3980 void clear() {
3981 NodeMap.clear();
3982 NodeList.clear();
3983 FirstValidElement = 0;
3984 }
3985
3986 /// \returns an iterator that will iterate the elements in the order of
3987 /// insertion.
3988 iterator begin() {
3989 if (FirstValidElement == 0)
3990 SkipRemovedElements(FirstValidElement);
3991 return PhiNodeSetIterator(this, FirstValidElement);
3992 }
3993
3994 /// \returns an iterator that points to the end of the collection.
3995 iterator end() { return PhiNodeSetIterator(this, NodeList.size()); }
3996
3997 /// Returns the number of elements in the collection.
3998 size_t size() const { return NodeMap.size(); }
3999
4000 /// \returns 1 if the given element is in the collection, and 0 if otherwise.
4001 size_t count(PHINode *Ptr) const { return NodeMap.count(Ptr); }
4002
4003private:
4004 /// Updates the CurrentIndex so that it will point to a valid element.
4005 ///
4006 /// If the element of NodeList at CurrentIndex is valid, it does not
4007 /// change it. If there are no more valid elements, it updates CurrentIndex
4008 /// to point to the end of the NodeList.
4009 void SkipRemovedElements(size_t &CurrentIndex) {
4010 while (CurrentIndex < NodeList.size()) {
4011 auto it = NodeMap.find(NodeList[CurrentIndex]);
4012 // If the element has been deleted and added again later, NodeMap will
4013 // point to a different index, so CurrentIndex will still be invalid.
4014 if (it != NodeMap.end() && it->second == CurrentIndex)
4015 break;
4016 ++CurrentIndex;
4017 }
4018 }
4019};
4020
4021PhiNodeSetIterator::PhiNodeSetIterator(PhiNodeSet *const Set, size_t Start)
4022 : Set(Set), CurrentIndex(Start) {}
4023
4024PHINode *PhiNodeSetIterator::operator*() const {
4025 assert(CurrentIndex < Set->NodeList.size() &&
4026 "PhiNodeSet access out of range");
4027 return Set->NodeList[CurrentIndex];
4028}
4029
4030PhiNodeSetIterator &PhiNodeSetIterator::operator++() {
4031 assert(CurrentIndex < Set->NodeList.size() &&
4032 "PhiNodeSet access out of range");
4033 ++CurrentIndex;
4034 Set->SkipRemovedElements(CurrentIndex);
4035 return *this;
4036}
4037
4038bool PhiNodeSetIterator::operator==(const PhiNodeSetIterator &RHS) const {
4039 return CurrentIndex == RHS.CurrentIndex;
4040}
4041
4042bool PhiNodeSetIterator::operator!=(const PhiNodeSetIterator &RHS) const {
4043 return !((*this) == RHS);
4044}
4045
4046/// Keep track of simplification of Phi nodes.
4047/// Accept the set of all phi nodes and erase phi node from this set
4048/// if it is simplified.
4049class SimplificationTracker {
4050 DenseMap<Value *, Value *> Storage;
4051 // Tracks newly created Phi nodes. The elements are iterated by insertion
4052 // order.
4053 PhiNodeSet AllPhiNodes;
4054 // Tracks newly created Select nodes.
4055 SmallPtrSet<SelectInst *, 32> AllSelectNodes;
4056
4057public:
4058 Value *Get(Value *V) {
4059 do {
4060 auto SV = Storage.find(V);
4061 if (SV == Storage.end())
4062 return V;
4063 V = SV->second;
4064 } while (true);
4065 }
4066
4067 void Put(Value *From, Value *To) { Storage.insert({From, To}); }
4068
4069 void ReplacePhi(PHINode *From, PHINode *To) {
4070 Value *OldReplacement = Get(From);
4071 while (OldReplacement != From) {
4072 From = To;
4073 To = dyn_cast<PHINode>(OldReplacement);
4074 OldReplacement = Get(From);
4075 }
4076 assert(To && Get(To) == To && "Replacement PHI node is already replaced.");
4077 Put(From, To);
4078 From->replaceAllUsesWith(To);
4079 AllPhiNodes.erase(From);
4080 From->eraseFromParent();
4081 }
4082
4083 PhiNodeSet &newPhiNodes() { return AllPhiNodes; }
4084
4085 void insertNewPhi(PHINode *PN) { AllPhiNodes.insert(PN); }
4086
4087 void insertNewSelect(SelectInst *SI) { AllSelectNodes.insert(SI); }
4088
4089 unsigned countNewPhiNodes() const { return AllPhiNodes.size(); }
4090
4091 unsigned countNewSelectNodes() const { return AllSelectNodes.size(); }
4092
4093 void destroyNewNodes(Type *CommonType) {
4094 // For safe erasing, replace the uses with dummy value first.
4095 auto *Dummy = PoisonValue::get(CommonType);
4096 for (auto *I : AllPhiNodes) {
4097 I->replaceAllUsesWith(Dummy);
4098 I->eraseFromParent();
4099 }
4100 AllPhiNodes.clear();
4101 for (auto *I : AllSelectNodes) {
4102 I->replaceAllUsesWith(Dummy);
4103 I->eraseFromParent();
4104 }
4105 AllSelectNodes.clear();
4106 }
4107};
4108
4109/// A helper class for combining addressing modes.
4110class AddressingModeCombiner {
4111 typedef DenseMap<Value *, Value *> FoldAddrToValueMapping;
4112 typedef std::pair<PHINode *, PHINode *> PHIPair;
4113
4114private:
4115 /// The addressing modes we've collected.
4117
4118 /// The field in which the AddrModes differ, when we have more than one.
4119 ExtAddrMode::FieldName DifferentField = ExtAddrMode::NoField;
4120
4121 /// Are the AddrModes that we have all just equal to their original values?
4122 bool AllAddrModesTrivial = true;
4123
4124 /// Common Type for all different fields in addressing modes.
4125 Type *CommonType = nullptr;
4126
4127 const DataLayout &DL;
4128
4129 /// Original Address.
4130 Value *Original;
4131
4132 /// Common value among addresses
4133 Value *CommonValue = nullptr;
4134
4135public:
4136 AddressingModeCombiner(const DataLayout &DL, Value *OriginalValue)
4137 : DL(DL), Original(OriginalValue) {}
4138
4139 ~AddressingModeCombiner() { eraseCommonValueIfDead(); }
4140
4141 /// Get the combined AddrMode
4142 const ExtAddrMode &getAddrMode() const { return AddrModes[0]; }
4143
4144 /// Add a new AddrMode if it's compatible with the AddrModes we already
4145 /// have.
4146 /// \return True iff we succeeded in doing so.
4147 bool addNewAddrMode(ExtAddrMode &NewAddrMode) {
4148 // Take note of if we have any non-trivial AddrModes, as we need to detect
4149 // when all AddrModes are trivial as then we would introduce a phi or select
4150 // which just duplicates what's already there.
4151 AllAddrModesTrivial = AllAddrModesTrivial && NewAddrMode.isTrivial();
4152
4153 // If this is the first addrmode then everything is fine.
4154 if (AddrModes.empty()) {
4155 AddrModes.emplace_back(NewAddrMode);
4156 return true;
4157 }
4158
4159 // Figure out how different this is from the other address modes, which we
4160 // can do just by comparing against the first one given that we only care
4161 // about the cumulative difference.
4162 ExtAddrMode::FieldName ThisDifferentField =
4163 AddrModes[0].compare(NewAddrMode);
4164 if (DifferentField == ExtAddrMode::NoField)
4165 DifferentField = ThisDifferentField;
4166 else if (DifferentField != ThisDifferentField)
4167 DifferentField = ExtAddrMode::MultipleFields;
4168
4169 // If NewAddrMode differs in more than one dimension we cannot handle it.
4170 bool CanHandle = DifferentField != ExtAddrMode::MultipleFields;
4171
4172 // If Scale Field is different then we reject.
4173 CanHandle = CanHandle && DifferentField != ExtAddrMode::ScaleField;
4174
4175 // We also must reject the case when base offset is different and
4176 // scale reg is not null, we cannot handle this case due to merge of
4177 // different offsets will be used as ScaleReg.
4178 CanHandle = CanHandle && (DifferentField != ExtAddrMode::BaseOffsField ||
4179 !NewAddrMode.ScaledReg);
4180
4181 // We also must reject the case when GV is different and BaseReg installed
4182 // due to we want to use base reg as a merge of GV values.
4183 CanHandle = CanHandle && (DifferentField != ExtAddrMode::BaseGVField ||
4184 !NewAddrMode.HasBaseReg);
4185
4186 // Even if NewAddMode is the same we still need to collect it due to
4187 // original value is different. And later we will need all original values
4188 // as anchors during finding the common Phi node.
4189 if (CanHandle)
4190 AddrModes.emplace_back(NewAddrMode);
4191 else
4192 AddrModes.clear();
4193
4194 return CanHandle;
4195 }
4196
4197 /// Combine the addressing modes we've collected into a single
4198 /// addressing mode.
4199 /// \return True iff we successfully combined them or we only had one so
4200 /// didn't need to combine them anyway.
4201 bool combineAddrModes() {
4202 // If we have no AddrModes then they can't be combined.
4203 if (AddrModes.size() == 0)
4204 return false;
4205
4206 // A single AddrMode can trivially be combined.
4207 if (AddrModes.size() == 1 || DifferentField == ExtAddrMode::NoField)
4208 return true;
4209
4210 // If the AddrModes we collected are all just equal to the value they are
4211 // derived from then combining them wouldn't do anything useful.
4212 if (AllAddrModesTrivial)
4213 return false;
4214
4215 if (!addrModeCombiningAllowed())
4216 return false;
4217
4218 // Build a map between <original value, basic block where we saw it> to
4219 // value of base register.
4220 // Bail out if there is no common type.
4221 FoldAddrToValueMapping Map;
4222 if (!initializeMap(Map))
4223 return false;
4224
4225 CommonValue = findCommon(Map);
4226 if (CommonValue)
4227 AddrModes[0].SetCombinedField(DifferentField, CommonValue, AddrModes);
4228 return CommonValue != nullptr;
4229 }
4230
4231private:
4232 /// `CommonValue` may be a placeholder inserted by us.
4233 /// If the placeholder is not used, we should remove this dead instruction.
4234 void eraseCommonValueIfDead() {
4235 if (CommonValue && CommonValue->use_empty())
4236 if (Instruction *CommonInst = dyn_cast<Instruction>(CommonValue))
4237 CommonInst->eraseFromParent();
4238 }
4239
4240 /// Initialize Map with anchor values. For address seen
4241 /// we set the value of different field saw in this address.
4242 /// At the same time we find a common type for different field we will
4243 /// use to create new Phi/Select nodes. Keep it in CommonType field.
4244 /// Return false if there is no common type found.
4245 bool initializeMap(FoldAddrToValueMapping &Map) {
4246 // Keep track of keys where the value is null. We will need to replace it
4247 // with constant null when we know the common type.
4248 SmallVector<Value *, 2> NullValue;
4249 Type *IntPtrTy = DL.getIntPtrType(AddrModes[0].OriginalValue->getType());
4250 for (auto &AM : AddrModes) {
4251 Value *DV = AM.GetFieldAsValue(DifferentField, IntPtrTy);
4252 if (DV) {
4253 auto *Type = DV->getType();
4254 if (CommonType && CommonType != Type)
4255 return false;
4256 CommonType = Type;
4257 Map[AM.OriginalValue] = DV;
4258 } else {
4259 NullValue.push_back(AM.OriginalValue);
4260 }
4261 }
4262 assert(CommonType && "At least one non-null value must be!");
4263 for (auto *V : NullValue)
4264 Map[V] = Constant::getNullValue(CommonType);
4265 return true;
4266 }
4267
4268 /// We have mapping between value A and other value B where B was a field in
4269 /// addressing mode represented by A. Also we have an original value C
4270 /// representing an address we start with. Traversing from C through phi and
4271 /// selects we ended up with A's in a map. This utility function tries to find
4272 /// a value V which is a field in addressing mode C and traversing through phi
4273 /// nodes and selects we will end up in corresponded values B in a map.
4274 /// The utility will create a new Phi/Selects if needed.
4275 // The simple example looks as follows:
4276 // BB1:
4277 // p1 = b1 + 40
4278 // br cond BB2, BB3
4279 // BB2:
4280 // p2 = b2 + 40
4281 // br BB3
4282 // BB3:
4283 // p = phi [p1, BB1], [p2, BB2]
4284 // v = load p
4285 // Map is
4286 // p1 -> b1
4287 // p2 -> b2
4288 // Request is
4289 // p -> ?
4290 // The function tries to find or build phi [b1, BB1], [b2, BB2] in BB3.
4291 Value *findCommon(FoldAddrToValueMapping &Map) {
4292 // Tracks the simplification of newly created phi nodes. The reason we use
4293 // this mapping is because we will add new created Phi nodes in AddrToBase.
4294 // Simplification of Phi nodes is recursive, so some Phi node may
4295 // be simplified after we added it to AddrToBase. In reality this
4296 // simplification is possible only if original phi/selects were not
4297 // simplified yet.
4298 // Using this mapping we can find the current value in AddrToBase.
4299 SimplificationTracker ST;
4300
4301 // First step, DFS to create PHI nodes for all intermediate blocks.
4302 // Also fill traverse order for the second step.
4303 SmallVector<Value *, 32> TraverseOrder;
4304 InsertPlaceholders(Map, TraverseOrder, ST);
4305
4306 // Second Step, fill new nodes by merged values and simplify if possible.
4307 FillPlaceholders(Map, TraverseOrder, ST);
4308
4309 if (!AddrSinkNewSelects && ST.countNewSelectNodes() > 0) {
4310 ST.destroyNewNodes(CommonType);
4311 return nullptr;
4312 }
4313
4314 // Now we'd like to match New Phi nodes to existed ones.
4315 unsigned PhiNotMatchedCount = 0;
4316 if (!MatchPhiSet(ST, AddrSinkNewPhis, PhiNotMatchedCount)) {
4317 ST.destroyNewNodes(CommonType);
4318 return nullptr;
4319 }
4320
4321 auto *Result = ST.Get(Map.find(Original)->second);
4322 if (Result) {
4323 NumMemoryInstsPhiCreated += ST.countNewPhiNodes() + PhiNotMatchedCount;
4324 NumMemoryInstsSelectCreated += ST.countNewSelectNodes();
4325 }
4326 return Result;
4327 }
4328
4329 /// Try to match PHI node to Candidate.
4330 /// Matcher tracks the matched Phi nodes.
4331 bool MatchPhiNode(PHINode *PHI, PHINode *Candidate,
4332 SmallSetVector<PHIPair, 8> &Matcher,
4333 PhiNodeSet &PhiNodesToMatch) {
4334 SmallVector<PHIPair, 8> WorkList;
4335 Matcher.insert({PHI, Candidate});
4336 SmallPtrSet<PHINode *, 8> MatchedPHIs;
4337 MatchedPHIs.insert(PHI);
4338 WorkList.push_back({PHI, Candidate});
4339 SmallSet<PHIPair, 8> Visited;
4340 while (!WorkList.empty()) {
4341 auto Item = WorkList.pop_back_val();
4342 if (!Visited.insert(Item).second)
4343 continue;
4344 // We iterate over all incoming values to Phi to compare them.
4345 // If values are different and both of them Phi and the first one is a
4346 // Phi we added (subject to match) and both of them is in the same basic
4347 // block then we can match our pair if values match. So we state that
4348 // these values match and add it to work list to verify that.
4349 for (auto *B : Item.first->blocks()) {
4350 Value *FirstValue = Item.first->getIncomingValueForBlock(B);
4351 Value *SecondValue = Item.second->getIncomingValueForBlock(B);
4352 if (FirstValue == SecondValue)
4353 continue;
4354
4355 PHINode *FirstPhi = dyn_cast<PHINode>(FirstValue);
4356 PHINode *SecondPhi = dyn_cast<PHINode>(SecondValue);
4357
4358 // One of them is not Phi or
4359 // The first one is not Phi node from the set we'd like to match or
4360 // Phi nodes from different basic blocks then
4361 // we will not be able to match.
4362 if (!FirstPhi || !SecondPhi || !PhiNodesToMatch.count(FirstPhi) ||
4363 FirstPhi->getParent() != SecondPhi->getParent())
4364 return false;
4365
4366 // If we already matched them then continue.
4367 if (Matcher.count({FirstPhi, SecondPhi}))
4368 continue;
4369 // So the values are different and does not match. So we need them to
4370 // match. (But we register no more than one match per PHI node, so that
4371 // we won't later try to replace them twice.)
4372 if (MatchedPHIs.insert(FirstPhi).second)
4373 Matcher.insert({FirstPhi, SecondPhi});
4374 // But me must check it.
4375 WorkList.push_back({FirstPhi, SecondPhi});
4376 }
4377 }
4378 return true;
4379 }
4380
4381 /// For the given set of PHI nodes (in the SimplificationTracker) try
4382 /// to find their equivalents.
4383 /// Returns false if this matching fails and creation of new Phi is disabled.
4384 bool MatchPhiSet(SimplificationTracker &ST, bool AllowNewPhiNodes,
4385 unsigned &PhiNotMatchedCount) {
4386 // Matched and PhiNodesToMatch iterate their elements in a deterministic
4387 // order, so the replacements (ReplacePhi) are also done in a deterministic
4388 // order.
4389 SmallSetVector<PHIPair, 8> Matched;
4390 SmallPtrSet<PHINode *, 8> WillNotMatch;
4391 PhiNodeSet &PhiNodesToMatch = ST.newPhiNodes();
4392 while (PhiNodesToMatch.size()) {
4393 PHINode *PHI = *PhiNodesToMatch.begin();
4394
4395 // Add us, if no Phi nodes in the basic block we do not match.
4396 WillNotMatch.clear();
4397 WillNotMatch.insert(PHI);
4398
4399 // Traverse all Phis until we found equivalent or fail to do that.
4400 bool IsMatched = false;
4401 for (auto &P : PHI->getParent()->phis()) {
4402 // Skip new Phi nodes.
4403 if (PhiNodesToMatch.count(&P))
4404 continue;
4405 if ((IsMatched = MatchPhiNode(PHI, &P, Matched, PhiNodesToMatch)))
4406 break;
4407 // If it does not match, collect all Phi nodes from matcher.
4408 // if we end up with no match, them all these Phi nodes will not match
4409 // later.
4410 WillNotMatch.insert_range(llvm::make_first_range(Matched));
4411 Matched.clear();
4412 }
4413 if (IsMatched) {
4414 // Replace all matched values and erase them.
4415 for (auto MV : Matched)
4416 ST.ReplacePhi(MV.first, MV.second);
4417 Matched.clear();
4418 continue;
4419 }
4420 // If we are not allowed to create new nodes then bail out.
4421 if (!AllowNewPhiNodes)
4422 return false;
4423 // Just remove all seen values in matcher. They will not match anything.
4424 PhiNotMatchedCount += WillNotMatch.size();
4425 for (auto *P : WillNotMatch)
4426 PhiNodesToMatch.erase(P);
4427 }
4428 return true;
4429 }
4430 /// Fill the placeholders with values from predecessors and simplify them.
4431 void FillPlaceholders(FoldAddrToValueMapping &Map,
4432 SmallVectorImpl<Value *> &TraverseOrder,
4433 SimplificationTracker &ST) {
4434 while (!TraverseOrder.empty()) {
4435 Value *Current = TraverseOrder.pop_back_val();
4436 assert(Map.contains(Current) && "No node to fill!!!");
4437 Value *V = Map[Current];
4438
4439 if (SelectInst *Select = dyn_cast<SelectInst>(V)) {
4440 // CurrentValue also must be Select.
4441 auto *CurrentSelect = cast<SelectInst>(Current);
4442 auto *TrueValue = CurrentSelect->getTrueValue();
4443 assert(Map.contains(TrueValue) && "No True Value!");
4444 Select->setTrueValue(ST.Get(Map[TrueValue]));
4445 auto *FalseValue = CurrentSelect->getFalseValue();
4446 assert(Map.contains(FalseValue) && "No False Value!");
4447 Select->setFalseValue(ST.Get(Map[FalseValue]));
4448 } else {
4449 // Must be a Phi node then.
4450 auto *PHI = cast<PHINode>(V);
4451 // Fill the Phi node with values from predecessors.
4452 for (auto *B : predecessors(PHI->getParent())) {
4453 Value *PV = cast<PHINode>(Current)->getIncomingValueForBlock(B);
4454 assert(Map.contains(PV) && "No predecessor Value!");
4455 PHI->addIncoming(ST.Get(Map[PV]), B);
4456 }
4457 }
4458 }
4459 }
4460
4461 /// Starting from original value recursively iterates over def-use chain up to
4462 /// known ending values represented in a map. For each traversed phi/select
4463 /// inserts a placeholder Phi or Select.
4464 /// Reports all new created Phi/Select nodes by adding them to set.
4465 /// Also reports and order in what values have been traversed.
4466 void InsertPlaceholders(FoldAddrToValueMapping &Map,
4467 SmallVectorImpl<Value *> &TraverseOrder,
4468 SimplificationTracker &ST) {
4469 SmallVector<Value *, 32> Worklist;
4470 assert((isa<PHINode>(Original) || isa<SelectInst>(Original)) &&
4471 "Address must be a Phi or Select node");
4472 auto *Dummy = PoisonValue::get(CommonType);
4473 Worklist.push_back(Original);
4474 while (!Worklist.empty()) {
4475 Value *Current = Worklist.pop_back_val();
4476 // if it is already visited or it is an ending value then skip it.
4477 if (Map.contains(Current))
4478 continue;
4479 TraverseOrder.push_back(Current);
4480
4481 // CurrentValue must be a Phi node or select. All others must be covered
4482 // by anchors.
4483 if (SelectInst *CurrentSelect = dyn_cast<SelectInst>(Current)) {
4484 // Is it OK to get metadata from OrigSelect?!
4485 // Create a Select placeholder with dummy value.
4486 SelectInst *Select =
4487 SelectInst::Create(CurrentSelect->getCondition(), Dummy, Dummy,
4488 CurrentSelect->getName(),
4489 CurrentSelect->getIterator(), CurrentSelect);
4490 Map[Current] = Select;
4491 ST.insertNewSelect(Select);
4492 // We are interested in True and False values.
4493 Worklist.push_back(CurrentSelect->getTrueValue());
4494 Worklist.push_back(CurrentSelect->getFalseValue());
4495 } else {
4496 // It must be a Phi node then.
4497 PHINode *CurrentPhi = cast<PHINode>(Current);
4498 unsigned PredCount = CurrentPhi->getNumIncomingValues();
4499 PHINode *PHI =
4500 PHINode::Create(CommonType, PredCount, "sunk_phi", CurrentPhi->getIterator());
4501 Map[Current] = PHI;
4502 ST.insertNewPhi(PHI);
4503 append_range(Worklist, CurrentPhi->incoming_values());
4504 }
4505 }
4506 }
4507
4508 bool addrModeCombiningAllowed() {
4510 return false;
4511 switch (DifferentField) {
4512 default:
4513 return false;
4514 case ExtAddrMode::BaseRegField:
4516 case ExtAddrMode::BaseGVField:
4517 return AddrSinkCombineBaseGV;
4518 case ExtAddrMode::BaseOffsField:
4520 case ExtAddrMode::ScaledRegField:
4522 }
4523 }
4524};
4525} // end anonymous namespace
4526
4527/// Try adding ScaleReg*Scale to the current addressing mode.
4528/// Return true and update AddrMode if this addr mode is legal for the target,
4529/// false if not.
4530bool AddressingModeMatcher::matchScaledValue(Value *ScaleReg, int64_t Scale,
4531 unsigned Depth) {
4532 // If Scale is 1, then this is the same as adding ScaleReg to the addressing
4533 // mode. Just process that directly.
4534 if (Scale == 1)
4535 return matchAddr(ScaleReg, Depth);
4536
4537 // If the scale is 0, it takes nothing to add this.
4538 if (Scale == 0)
4539 return true;
4540
4541 // If we already have a scale of this value, we can add to it, otherwise, we
4542 // need an available scale field.
4543 if (AddrMode.Scale != 0 && AddrMode.ScaledReg != ScaleReg)
4544 return false;
4545
4546 ExtAddrMode TestAddrMode = AddrMode;
4547
4548 // Add scale to turn X*4+X*3 -> X*7. This could also do things like
4549 // [A+B + A*7] -> [B+A*8].
4550 TestAddrMode.Scale += Scale;
4551 TestAddrMode.ScaledReg = ScaleReg;
4552
4553 // If the new address isn't legal, bail out.
4554 if (!TLI.isLegalAddressingMode(DL, TestAddrMode, AccessTy, AddrSpace))
4555 return false;
4556
4557 // It was legal, so commit it.
4558 AddrMode = TestAddrMode;
4559
4560 // Okay, we decided that we can add ScaleReg+Scale to AddrMode. Check now
4561 // to see if ScaleReg is actually X+C. If so, we can turn this into adding
4562 // X*Scale + C*Scale to addr mode. If we found available IV increment, do not
4563 // go any further: we can reuse it and cannot eliminate it.
4564 ConstantInt *CI = nullptr;
4565 Value *AddLHS = nullptr;
4566 if (isa<Instruction>(ScaleReg) && // not a constant expr.
4567 match(ScaleReg, m_Add(m_Value(AddLHS), m_ConstantInt(CI))) &&
4568 !isIVIncrement(ScaleReg, &LI) && CI->getValue().isSignedIntN(64)) {
4569 TestAddrMode.InBounds = false;
4570 TestAddrMode.ScaledReg = AddLHS;
4571 TestAddrMode.BaseOffs += CI->getSExtValue() * TestAddrMode.Scale;
4572
4573 // If this addressing mode is legal, commit it and remember that we folded
4574 // this instruction.
4575 if (TLI.isLegalAddressingMode(DL, TestAddrMode, AccessTy, AddrSpace)) {
4576 AddrModeInsts.push_back(cast<Instruction>(ScaleReg));
4577 AddrMode = TestAddrMode;
4578 return true;
4579 }
4580 // Restore status quo.
4581 TestAddrMode = AddrMode;
4582 }
4583
4584 // If this is an add recurrence with a constant step, return the increment
4585 // instruction and the canonicalized step.
4586 auto GetConstantStep =
4587 [this](const Value *V) -> std::optional<std::pair<Instruction *, APInt>> {
4588 auto *PN = dyn_cast<PHINode>(V);
4589 if (!PN)
4590 return std::nullopt;
4591 auto IVInc = getIVIncrement(PN, &LI);
4592 if (!IVInc)
4593 return std::nullopt;
4594 // TODO: The result of the intrinsics above is two-complement. However when
4595 // IV inc is expressed as add or sub, iv.next is potentially a poison value.
4596 // If it has nuw or nsw flags, we need to make sure that these flags are
4597 // inferrable at the point of memory instruction. Otherwise we are replacing
4598 // well-defined two-complement computation with poison. Currently, to avoid
4599 // potentially complex analysis needed to prove this, we reject such cases.
4600 if (auto *OIVInc = dyn_cast<OverflowingBinaryOperator>(IVInc->first))
4601 if (OIVInc->hasNoSignedWrap() || OIVInc->hasNoUnsignedWrap())
4602 return std::nullopt;
4603 if (auto *ConstantStep = dyn_cast<ConstantInt>(IVInc->second))
4604 return std::make_pair(IVInc->first, ConstantStep->getValue());
4605 return std::nullopt;
4606 };
4607
4608 // Try to account for the following special case:
4609 // 1. ScaleReg is an inductive variable;
4610 // 2. We use it with non-zero offset;
4611 // 3. IV's increment is available at the point of memory instruction.
4612 //
4613 // In this case, we may reuse the IV increment instead of the IV Phi to
4614 // achieve the following advantages:
4615 // 1. If IV step matches the offset, we will have no need in the offset;
4616 // 2. Even if they don't match, we will reduce the overlap of living IV
4617 // and IV increment, that will potentially lead to better register
4618 // assignment.
4619 if (AddrMode.BaseOffs) {
4620 if (auto IVStep = GetConstantStep(ScaleReg)) {
4621 Instruction *IVInc = IVStep->first;
4622 // The following assert is important to ensure a lack of infinite loops.
4623 // This transforms is (intentionally) the inverse of the one just above.
4624 // If they don't agree on the definition of an increment, we'd alternate
4625 // back and forth indefinitely.
4626 assert(isIVIncrement(IVInc, &LI) && "implied by GetConstantStep");
4627 APInt Step = IVStep->second;
4628 APInt Offset = Step * AddrMode.Scale;
4629 if (Offset.isSignedIntN(64)) {
4630 TestAddrMode.InBounds = false;
4631 TestAddrMode.ScaledReg = IVInc;
4632 TestAddrMode.BaseOffs -= Offset.getLimitedValue();
4633 // If this addressing mode is legal, commit it..
4634 // (Note that we defer the (expensive) domtree base legality check
4635 // to the very last possible point.)
4636 if (TLI.isLegalAddressingMode(DL, TestAddrMode, AccessTy, AddrSpace) &&
4637 getDTFn().dominates(IVInc, MemoryInst)) {
4638 AddrModeInsts.push_back(cast<Instruction>(IVInc));
4639 AddrMode = TestAddrMode;
4640 return true;
4641 }
4642 // Restore status quo.
4643 TestAddrMode = AddrMode;
4644 }
4645 }
4646 }
4647
4648 // Otherwise, just return what we have.
4649 return true;
4650}
4651
4652/// This is a little filter, which returns true if an addressing computation
4653/// involving I might be folded into a load/store accessing it.
4654/// This doesn't need to be perfect, but needs to accept at least
4655/// the set of instructions that MatchOperationAddr can.
4657 switch (I->getOpcode()) {
4658 case Instruction::BitCast:
4659 case Instruction::AddrSpaceCast:
4660 // Don't touch identity bitcasts.
4661 if (I->getType() == I->getOperand(0)->getType())
4662 return false;
4663 return I->getType()->isIntOrPtrTy();
4664 case Instruction::PtrToInt:
4665 // PtrToInt is always a noop, as we know that the int type is pointer sized.
4666 return true;
4667 case Instruction::IntToPtr:
4668 // We know the input is intptr_t, so this is foldable.
4669 return true;
4670 case Instruction::Add:
4671 return true;
4672 case Instruction::Mul:
4673 case Instruction::Shl:
4674 // Can only handle X*C and X << C.
4675 return isa<ConstantInt>(I->getOperand(1));
4676 case Instruction::GetElementPtr:
4677 return true;
4678 default:
4679 return false;
4680 }
4681}
4682
4683/// Check whether or not \p Val is a legal instruction for \p TLI.
4684/// \note \p Val is assumed to be the product of some type promotion.
4685/// Therefore if \p Val has an undefined state in \p TLI, this is assumed
4686/// to be legal, as the non-promoted value would have had the same state.
4688 const DataLayout &DL, Value *Val) {
4689 Instruction *PromotedInst = dyn_cast<Instruction>(Val);
4690 if (!PromotedInst)
4691 return false;
4692 int ISDOpcode = TLI.InstructionOpcodeToISD(PromotedInst->getOpcode());
4693 // If the ISDOpcode is undefined, it was undefined before the promotion.
4694 if (!ISDOpcode)
4695 return true;
4696 // Otherwise, check if the promoted instruction is legal or not.
4697 return TLI.isOperationLegalOrCustom(
4698 ISDOpcode, TLI.getValueType(DL, PromotedInst->getType()));
4699}
4700
4701namespace {
4702
4703/// Hepler class to perform type promotion.
4704class TypePromotionHelper {
4705 /// Utility function to add a promoted instruction \p ExtOpnd to
4706 /// \p PromotedInsts and record the type of extension we have seen.
4707 static void addPromotedInst(InstrToOrigTy &PromotedInsts,
4708 Instruction *ExtOpnd, bool IsSExt) {
4709 ExtType ExtTy = IsSExt ? SignExtension : ZeroExtension;
4710 auto [It, Inserted] = PromotedInsts.try_emplace(ExtOpnd);
4711 if (!Inserted) {
4712 // If the new extension is same as original, the information in
4713 // PromotedInsts[ExtOpnd] is still correct.
4714 if (It->second.getInt() == ExtTy)
4715 return;
4716
4717 // Now the new extension is different from old extension, we make
4718 // the type information invalid by setting extension type to
4719 // BothExtension.
4720 ExtTy = BothExtension;
4721 }
4722 It->second = TypeIsSExt(ExtOpnd->getType(), ExtTy);
4723 }
4724
4725 /// Utility function to query the original type of instruction \p Opnd
4726 /// with a matched extension type. If the extension doesn't match, we
4727 /// cannot use the information we had on the original type.
4728 /// BothExtension doesn't match any extension type.
4729 static const Type *getOrigType(const InstrToOrigTy &PromotedInsts,
4730 Instruction *Opnd, bool IsSExt) {
4731 ExtType ExtTy = IsSExt ? SignExtension : ZeroExtension;
4732 InstrToOrigTy::const_iterator It = PromotedInsts.find(Opnd);
4733 if (It != PromotedInsts.end() && It->second.getInt() == ExtTy)
4734 return It->second.getPointer();
4735 return nullptr;
4736 }
4737
4738 /// Utility function to check whether or not a sign or zero extension
4739 /// of \p Inst with \p ConsideredExtType can be moved through \p Inst by
4740 /// either using the operands of \p Inst or promoting \p Inst.
4741 /// The type of the extension is defined by \p IsSExt.
4742 /// In other words, check if:
4743 /// ext (Ty Inst opnd1 opnd2 ... opndN) to ConsideredExtType.
4744 /// #1 Promotion applies:
4745 /// ConsideredExtType Inst (ext opnd1 to ConsideredExtType, ...).
4746 /// #2 Operand reuses:
4747 /// ext opnd1 to ConsideredExtType.
4748 /// \p PromotedInsts maps the instructions to their type before promotion.
4749 static bool canGetThrough(const Instruction *Inst, Type *ConsideredExtType,
4750 const InstrToOrigTy &PromotedInsts, bool IsSExt);
4751
4752 /// Utility function to determine if \p OpIdx should be promoted when
4753 /// promoting \p Inst.
4754 static bool shouldExtOperand(const Instruction *Inst, int OpIdx) {
4755 return !(isa<SelectInst>(Inst) && OpIdx == 0);
4756 }
4757
4758 /// Utility function to promote the operand of \p Ext when this
4759 /// operand is a promotable trunc or sext or zext.
4760 /// \p PromotedInsts maps the instructions to their type before promotion.
4761 /// \p CreatedInstsCost[out] contains the cost of all instructions
4762 /// created to promote the operand of Ext.
4763 /// Newly added extensions are inserted in \p Exts.
4764 /// Newly added truncates are inserted in \p Truncs.
4765 /// Should never be called directly.
4766 /// \return The promoted value which is used instead of Ext.
4767 static Value *promoteOperandForTruncAndAnyExt(
4768 Instruction *Ext, TypePromotionTransaction &TPT,
4769 InstrToOrigTy &PromotedInsts, unsigned &CreatedInstsCost,
4770 SmallVectorImpl<Instruction *> *Exts,
4771 SmallVectorImpl<Instruction *> *Truncs, const TargetLowering &TLI);
4772
4773 /// Utility function to promote the operand of \p Ext when this
4774 /// operand is promotable and is not a supported trunc or sext.
4775 /// \p PromotedInsts maps the instructions to their type before promotion.
4776 /// \p CreatedInstsCost[out] contains the cost of all the instructions
4777 /// created to promote the operand of Ext.
4778 /// Newly added extensions are inserted in \p Exts.
4779 /// Newly added truncates are inserted in \p Truncs.
4780 /// Should never be called directly.
4781 /// \return The promoted value which is used instead of Ext.
4782 static Value *promoteOperandForOther(Instruction *Ext,
4783 TypePromotionTransaction &TPT,
4784 InstrToOrigTy &PromotedInsts,
4785 unsigned &CreatedInstsCost,
4786 SmallVectorImpl<Instruction *> *Exts,
4787 SmallVectorImpl<Instruction *> *Truncs,
4788 const TargetLowering &TLI, bool IsSExt);
4789
4790 /// \see promoteOperandForOther.
4791 static Value *signExtendOperandForOther(
4792 Instruction *Ext, TypePromotionTransaction &TPT,
4793 InstrToOrigTy &PromotedInsts, unsigned &CreatedInstsCost,
4794 SmallVectorImpl<Instruction *> *Exts,
4795 SmallVectorImpl<Instruction *> *Truncs, const TargetLowering &TLI) {
4796 return promoteOperandForOther(Ext, TPT, PromotedInsts, CreatedInstsCost,
4797 Exts, Truncs, TLI, true);
4798 }
4799
4800 /// \see promoteOperandForOther.
4801 static Value *zeroExtendOperandForOther(
4802 Instruction *Ext, TypePromotionTransaction &TPT,
4803 InstrToOrigTy &PromotedInsts, unsigned &CreatedInstsCost,
4804 SmallVectorImpl<Instruction *> *Exts,
4805 SmallVectorImpl<Instruction *> *Truncs, const TargetLowering &TLI) {
4806 return promoteOperandForOther(Ext, TPT, PromotedInsts, CreatedInstsCost,
4807 Exts, Truncs, TLI, false);
4808 }
4809
4810public:
4811 /// Type for the utility function that promotes the operand of Ext.
4812 using Action = Value *(*)(Instruction *Ext, TypePromotionTransaction &TPT,
4813 InstrToOrigTy &PromotedInsts,
4814 unsigned &CreatedInstsCost,
4815 SmallVectorImpl<Instruction *> *Exts,
4816 SmallVectorImpl<Instruction *> *Truncs,
4817 const TargetLowering &TLI);
4818
4819 /// Given a sign/zero extend instruction \p Ext, return the appropriate
4820 /// action to promote the operand of \p Ext instead of using Ext.
4821 /// \return NULL if no promotable action is possible with the current
4822 /// sign extension.
4823 /// \p InsertedInsts keeps track of all the instructions inserted by the
4824 /// other CodeGenPrepare optimizations. This information is important
4825 /// because we do not want to promote these instructions as CodeGenPrepare
4826 /// will reinsert them later. Thus creating an infinite loop: create/remove.
4827 /// \p PromotedInsts maps the instructions to their type before promotion.
4828 static Action getAction(Instruction *Ext, const SetOfInstrs &InsertedInsts,
4829 const TargetLowering &TLI,
4830 const InstrToOrigTy &PromotedInsts);
4831};
4832
4833} // end anonymous namespace
4834
4835bool TypePromotionHelper::canGetThrough(const Instruction *Inst,
4836 Type *ConsideredExtType,
4837 const InstrToOrigTy &PromotedInsts,
4838 bool IsSExt) {
4839 // The promotion helper does not know how to deal with vector types yet.
4840 // To be able to fix that, we would need to fix the places where we
4841 // statically extend, e.g., constants and such.
4842 if (Inst->getType()->isVectorTy())
4843 return false;
4844
4845 // We can always get through zext.
4846 if (isa<ZExtInst>(Inst))
4847 return true;
4848
4849 // sext(sext) is ok too.
4850 if (IsSExt && isa<SExtInst>(Inst))
4851 return true;
4852
4853 // We can get through binary operator, if it is legal. In other words, the
4854 // binary operator must have a nuw or nsw flag.
4855 if (const auto *BinOp = dyn_cast<BinaryOperator>(Inst))
4856 if (isa<OverflowingBinaryOperator>(BinOp) &&
4857 ((!IsSExt && BinOp->hasNoUnsignedWrap()) ||
4858 (IsSExt && BinOp->hasNoSignedWrap())))
4859 return true;
4860
4861 // ext(and(opnd, cst)) --> and(ext(opnd), ext(cst))
4862 if ((Inst->getOpcode() == Instruction::And ||
4863 Inst->getOpcode() == Instruction::Or))
4864 return true;
4865
4866 // ext(xor(opnd, cst)) --> xor(ext(opnd), ext(cst))
4867 if (Inst->getOpcode() == Instruction::Xor) {
4868 // Make sure it is not a NOT.
4869 if (const auto *Cst = dyn_cast<ConstantInt>(Inst->getOperand(1)))
4870 if (!Cst->getValue().isAllOnes())
4871 return true;
4872 }
4873
4874 // zext(shrl(opnd, cst)) --> shrl(zext(opnd), zext(cst))
4875 // It may change a poisoned value into a regular value, like
4876 // zext i32 (shrl i8 %val, 12) --> shrl i32 (zext i8 %val), 12
4877 // poisoned value regular value
4878 // It should be OK since undef covers valid value.
4879 if (Inst->getOpcode() == Instruction::LShr && !IsSExt)
4880 return true;
4881
4882 // and(ext(shl(opnd, cst)), cst) --> and(shl(ext(opnd), ext(cst)), cst)
4883 // It may change a poisoned value into a regular value, like
4884 // zext i32 (shl i8 %val, 12) --> shl i32 (zext i8 %val), 12
4885 // poisoned value regular value
4886 // It should be OK since undef covers valid value.
4887 if (Inst->getOpcode() == Instruction::Shl && Inst->hasOneUse()) {
4888 const auto *ExtInst = cast<const Instruction>(*Inst->user_begin());
4889 if (ExtInst->hasOneUse()) {
4890 const auto *AndInst = dyn_cast<const Instruction>(*ExtInst->user_begin());
4891 if (AndInst && AndInst->getOpcode() == Instruction::And) {
4892 const auto *Cst = dyn_cast<ConstantInt>(AndInst->getOperand(1));
4893 if (Cst &&
4894 Cst->getValue().isIntN(Inst->getType()->getIntegerBitWidth()))
4895 return true;
4896 }
4897 }
4898 }
4899
4900 // Check if we can do the following simplification.
4901 // ext(trunc(opnd)) --> ext(opnd)
4902 if (!isa<TruncInst>(Inst))
4903 return false;
4904
4905 Value *OpndVal = Inst->getOperand(0);
4906 // Check if we can use this operand in the extension.
4907 // If the type is larger than the result type of the extension, we cannot.
4908 if (!OpndVal->getType()->isIntegerTy() ||
4909 OpndVal->getType()->getIntegerBitWidth() >
4910 ConsideredExtType->getIntegerBitWidth())
4911 return false;
4912
4913 // If the operand of the truncate is not an instruction, we will not have
4914 // any information on the dropped bits.
4915 // (Actually we could for constant but it is not worth the extra logic).
4916 Instruction *Opnd = dyn_cast<Instruction>(OpndVal);
4917 if (!Opnd)
4918 return false;
4919
4920 // Check if the source of the type is narrow enough.
4921 // I.e., check that trunc just drops extended bits of the same kind of
4922 // the extension.
4923 // #1 get the type of the operand and check the kind of the extended bits.
4924 const Type *OpndType = getOrigType(PromotedInsts, Opnd, IsSExt);
4925 if (OpndType)
4926 ;
4927 else if ((IsSExt && isa<SExtInst>(Opnd)) || (!IsSExt && isa<ZExtInst>(Opnd)))
4928 OpndType = Opnd->getOperand(0)->getType();
4929 else
4930 return false;
4931
4932 // #2 check that the truncate just drops extended bits.
4933 return Inst->getType()->getIntegerBitWidth() >=
4934 OpndType->getIntegerBitWidth();
4935}
4936
4937TypePromotionHelper::Action TypePromotionHelper::getAction(
4938 Instruction *Ext, const SetOfInstrs &InsertedInsts,
4939 const TargetLowering &TLI, const InstrToOrigTy &PromotedInsts) {
4940 assert((isa<SExtInst>(Ext) || isa<ZExtInst>(Ext)) &&
4941 "Unexpected instruction type");
4942 Instruction *ExtOpnd = dyn_cast<Instruction>(Ext->getOperand(0));
4943 Type *ExtTy = Ext->getType();
4944 bool IsSExt = isa<SExtInst>(Ext);
4945 // If the operand of the extension is not an instruction, we cannot
4946 // get through.
4947 // If it, check we can get through.
4948 if (!ExtOpnd || !canGetThrough(ExtOpnd, ExtTy, PromotedInsts, IsSExt))
4949 return nullptr;
4950
4951 // Do not promote if the operand has been added by codegenprepare.
4952 // Otherwise, it means we are undoing an optimization that is likely to be
4953 // redone, thus causing potential infinite loop.
4954 if (isa<TruncInst>(ExtOpnd) && InsertedInsts.count(ExtOpnd))
4955 return nullptr;
4956
4957 // SExt or Trunc instructions.
4958 // Return the related handler.
4959 if (isa<SExtInst>(ExtOpnd) || isa<TruncInst>(ExtOpnd) ||
4960 isa<ZExtInst>(ExtOpnd))
4961 return promoteOperandForTruncAndAnyExt;
4962
4963 // Regular instruction.
4964 // Abort early if we will have to insert non-free instructions.
4965 if (!ExtOpnd->hasOneUse() && !TLI.isTruncateFree(ExtTy, ExtOpnd->getType()))
4966 return nullptr;
4967 return IsSExt ? signExtendOperandForOther : zeroExtendOperandForOther;
4968}
4969
4970Value *TypePromotionHelper::promoteOperandForTruncAndAnyExt(
4971 Instruction *SExt, TypePromotionTransaction &TPT,
4972 InstrToOrigTy &PromotedInsts, unsigned &CreatedInstsCost,
4973 SmallVectorImpl<Instruction *> *Exts,
4974 SmallVectorImpl<Instruction *> *Truncs, const TargetLowering &TLI) {
4975 // By construction, the operand of SExt is an instruction. Otherwise we cannot
4976 // get through it and this method should not be called.
4977 Instruction *SExtOpnd = cast<Instruction>(SExt->getOperand(0));
4978 Value *ExtVal = SExt;
4979 bool HasMergedNonFreeExt = false;
4980 if (isa<ZExtInst>(SExtOpnd)) {
4981 // Replace s|zext(zext(opnd))
4982 // => zext(opnd).
4983 HasMergedNonFreeExt = !TLI.isExtFree(SExtOpnd);
4984 Value *ZExt =
4985 TPT.createZExt(SExt, SExtOpnd->getOperand(0), SExt->getType());
4986 TPT.replaceAllUsesWith(SExt, ZExt);
4987 TPT.eraseInstruction(SExt);
4988 ExtVal = ZExt;
4989 } else {
4990 // Replace z|sext(trunc(opnd)) or sext(sext(opnd))
4991 // => z|sext(opnd).
4992 TPT.setOperand(SExt, 0, SExtOpnd->getOperand(0));
4993 }
4994 CreatedInstsCost = 0;
4995
4996 // Remove dead code.
4997 if (SExtOpnd->use_empty())
4998 TPT.eraseInstruction(SExtOpnd);
4999
5000 // Check if the extension is still needed.
5001 Instruction *ExtInst = dyn_cast<Instruction>(ExtVal);
5002 if (!ExtInst || ExtInst->getType() != ExtInst->getOperand(0)->getType()) {
5003 if (ExtInst) {
5004 if (Exts)
5005 Exts->push_back(ExtInst);
5006 CreatedInstsCost = !TLI.isExtFree(ExtInst) && !HasMergedNonFreeExt;
5007 }
5008 return ExtVal;
5009 }
5010
5011 // At this point we have: ext ty opnd to ty.
5012 // Reassign the uses of ExtInst to the opnd and remove ExtInst.
5013 Value *NextVal = ExtInst->getOperand(0);
5014 TPT.eraseInstruction(ExtInst, NextVal);
5015 return NextVal;
5016}
5017
5018Value *TypePromotionHelper::promoteOperandForOther(
5019 Instruction *Ext, TypePromotionTransaction &TPT,
5020 InstrToOrigTy &PromotedInsts, unsigned &CreatedInstsCost,
5021 SmallVectorImpl<Instruction *> *Exts,
5022 SmallVectorImpl<Instruction *> *Truncs, const TargetLowering &TLI,
5023 bool IsSExt) {
5024 // By construction, the operand of Ext is an instruction. Otherwise we cannot
5025 // get through it and this method should not be called.
5026 Instruction *ExtOpnd = cast<Instruction>(Ext->getOperand(0));
5027 CreatedInstsCost = 0;
5028 if (!ExtOpnd->hasOneUse()) {
5029 // ExtOpnd will be promoted.
5030 // All its uses, but Ext, will need to use a truncated value of the
5031 // promoted version.
5032 // Create the truncate now.
5033 Value *Trunc = TPT.createTrunc(Ext, ExtOpnd->getType());
5034 if (Instruction *ITrunc = dyn_cast<Instruction>(Trunc)) {
5035 // Insert it just after the definition.
5036 ITrunc->moveAfter(ExtOpnd);
5037 if (Truncs)
5038 Truncs->push_back(ITrunc);
5039 }
5040
5041 TPT.replaceAllUsesWith(ExtOpnd, Trunc);
5042 // Restore the operand of Ext (which has been replaced by the previous call
5043 // to replaceAllUsesWith) to avoid creating a cycle trunc <-> sext.
5044 TPT.setOperand(Ext, 0, ExtOpnd);
5045 }
5046
5047 // Get through the Instruction:
5048 // 1. Update its type.
5049 // 2. Replace the uses of Ext by Inst.
5050 // 3. Extend each operand that needs to be extended.
5051
5052 // Remember the original type of the instruction before promotion.
5053 // This is useful to know that the high bits are sign extended bits.
5054 addPromotedInst(PromotedInsts, ExtOpnd, IsSExt);
5055 // Step #1.
5056 TPT.mutateType(ExtOpnd, Ext->getType());
5057 // Step #2.
5058 TPT.replaceAllUsesWith(Ext, ExtOpnd);
5059 // Step #3.
5060 LLVM_DEBUG(dbgs() << "Propagate Ext to operands\n");
5061 for (int OpIdx = 0, EndOpIdx = ExtOpnd->getNumOperands(); OpIdx != EndOpIdx;
5062 ++OpIdx) {
5063 LLVM_DEBUG(dbgs() << "Operand:\n" << *(ExtOpnd->getOperand(OpIdx)) << '\n');
5064 if (ExtOpnd->getOperand(OpIdx)->getType() == Ext->getType() ||
5065 !shouldExtOperand(ExtOpnd, OpIdx)) {
5066 LLVM_DEBUG(dbgs() << "No need to propagate\n");
5067 continue;
5068 }
5069 // Check if we can statically extend the operand.
5070 Value *Opnd = ExtOpnd->getOperand(OpIdx);
5071 if (const ConstantInt *Cst = dyn_cast<ConstantInt>(Opnd)) {
5072 LLVM_DEBUG(dbgs() << "Statically extend\n");
5073 unsigned BitWidth = Ext->getType()->getIntegerBitWidth();
5074 APInt CstVal = IsSExt ? Cst->getValue().sext(BitWidth)
5075 : Cst->getValue().zext(BitWidth);
5076 TPT.setOperand(ExtOpnd, OpIdx, ConstantInt::get(Ext->getType(), CstVal));
5077 continue;
5078 }
5079 // UndefValue are typed, so we have to statically sign extend them.
5080 if (isa<UndefValue>(Opnd)) {
5081 LLVM_DEBUG(dbgs() << "Statically extend\n");
5082 TPT.setOperand(ExtOpnd, OpIdx, UndefValue::get(Ext->getType()));
5083 continue;
5084 }
5085
5086 // Otherwise we have to explicitly sign extend the operand.
5087 Value *ValForExtOpnd = IsSExt
5088 ? TPT.createSExt(ExtOpnd, Opnd, Ext->getType())
5089 : TPT.createZExt(ExtOpnd, Opnd, Ext->getType());
5090 TPT.setOperand(ExtOpnd, OpIdx, ValForExtOpnd);
5091 Instruction *InstForExtOpnd = dyn_cast<Instruction>(ValForExtOpnd);
5092 if (!InstForExtOpnd)
5093 continue;
5094
5095 if (Exts)
5096 Exts->push_back(InstForExtOpnd);
5097
5098 CreatedInstsCost += !TLI.isExtFree(InstForExtOpnd);
5099 }
5100 LLVM_DEBUG(dbgs() << "Extension is useless now\n");
5101 TPT.eraseInstruction(Ext);
5102 return ExtOpnd;
5103}
5104
5105/// Check whether or not promoting an instruction to a wider type is profitable.
5106/// \p NewCost gives the cost of extension instructions created by the
5107/// promotion.
5108/// \p OldCost gives the cost of extension instructions before the promotion
5109/// plus the number of instructions that have been
5110/// matched in the addressing mode the promotion.
5111/// \p PromotedOperand is the value that has been promoted.
5112/// \return True if the promotion is profitable, false otherwise.
5113bool AddressingModeMatcher::isPromotionProfitable(
5114 unsigned NewCost, unsigned OldCost, Value *PromotedOperand) const {
5115 LLVM_DEBUG(dbgs() << "OldCost: " << OldCost << "\tNewCost: " << NewCost
5116 << '\n');
5117 // The cost of the new extensions is greater than the cost of the
5118 // old extension plus what we folded.
5119 // This is not profitable.
5120 if (NewCost > OldCost)
5121 return false;
5122 if (NewCost < OldCost)
5123 return true;
5124 // The promotion is neutral but it may help folding the sign extension in
5125 // loads for instance.
5126 // Check that we did not create an illegal instruction.
5127 return isPromotedInstructionLegal(TLI, DL, PromotedOperand);
5128}
5129
5130/// Given an instruction or constant expr, see if we can fold the operation
5131/// into the addressing mode. If so, update the addressing mode and return
5132/// true, otherwise return false without modifying AddrMode.
5133/// If \p MovedAway is not NULL, it contains the information of whether or
5134/// not AddrInst has to be folded into the addressing mode on success.
5135/// If \p MovedAway == true, \p AddrInst will not be part of the addressing
5136/// because it has been moved away.
5137/// Thus AddrInst must not be added in the matched instructions.
5138/// This state can happen when AddrInst is a sext, since it may be moved away.
5139/// Therefore, AddrInst may not be valid when MovedAway is true and it must
5140/// not be referenced anymore.
5141bool AddressingModeMatcher::matchOperationAddr(User *AddrInst, unsigned Opcode,
5142 unsigned Depth,
5143 bool *MovedAway) {
5144 // Avoid exponential behavior on extremely deep expression trees.
5145 if (Depth >= 5)
5146 return false;
5147
5148 // By default, all matched instructions stay in place.
5149 if (MovedAway)
5150 *MovedAway = false;
5151
5152 switch (Opcode) {
5153 case Instruction::PtrToInt:
5154 // PtrToInt is always a noop, as we know that the int type is pointer sized.
5155 return matchAddr(AddrInst->getOperand(0), Depth);
5156 case Instruction::IntToPtr: {
5157 auto AS = AddrInst->getType()->getPointerAddressSpace();
5158 auto PtrTy = MVT::getIntegerVT(DL.getPointerSizeInBits(AS));
5159 // This inttoptr is a no-op if the integer type is pointer sized.
5160 if (TLI.getValueType(DL, AddrInst->getOperand(0)->getType()) == PtrTy)
5161 return matchAddr(AddrInst->getOperand(0), Depth);
5162 return false;
5163 }
5164 case Instruction::BitCast:
5165 // BitCast is always a noop, and we can handle it as long as it is
5166 // int->int or pointer->pointer (we don't want int<->fp or something).
5167 if (AddrInst->getOperand(0)->getType()->isIntOrPtrTy() &&
5168 // Don't touch identity bitcasts. These were probably put here by LSR,
5169 // and we don't want to mess around with them. Assume it knows what it
5170 // is doing.
5171 AddrInst->getOperand(0)->getType() != AddrInst->getType())
5172 return matchAddr(AddrInst->getOperand(0), Depth);
5173 return false;
5174 case Instruction::AddrSpaceCast: {
5175 unsigned SrcAS =
5176 AddrInst->getOperand(0)->getType()->getPointerAddressSpace();
5177 unsigned DestAS = AddrInst->getType()->getPointerAddressSpace();
5178 if (TLI.getTargetMachine().isNoopAddrSpaceCast(SrcAS, DestAS))
5179 return matchAddr(AddrInst->getOperand(0), Depth);
5180 return false;
5181 }
5182 case Instruction::Add: {
5183 // Check to see if we can merge in one operand, then the other. If so, we
5184 // win.
5185 ExtAddrMode BackupAddrMode = AddrMode;
5186 unsigned OldSize = AddrModeInsts.size();
5187 // Start a transaction at this point.
5188 // The LHS may match but not the RHS.
5189 // Therefore, we need a higher level restoration point to undo partially
5190 // matched operation.
5191 TypePromotionTransaction::ConstRestorationPt LastKnownGood =
5192 TPT.getRestorationPoint();
5193
5194 // Try to match an integer constant second to increase its chance of ending
5195 // up in `BaseOffs`, resp. decrease its chance of ending up in `BaseReg`.
5196 int First = 0, Second = 1;
5197 if (isa<ConstantInt>(AddrInst->getOperand(First))
5198 && !isa<ConstantInt>(AddrInst->getOperand(Second)))
5199 std::swap(First, Second);
5200 AddrMode.InBounds = false;
5201 if (matchAddr(AddrInst->getOperand(First), Depth + 1) &&
5202 matchAddr(AddrInst->getOperand(Second), Depth + 1))
5203 return true;
5204
5205 // Restore the old addr mode info.
5206 AddrMode = BackupAddrMode;
5207 AddrModeInsts.resize(OldSize);
5208 TPT.rollback(LastKnownGood);
5209
5210 // Otherwise this was over-aggressive. Try merging operands in the opposite
5211 // order.
5212 if (matchAddr(AddrInst->getOperand(Second), Depth + 1) &&
5213 matchAddr(AddrInst->getOperand(First), Depth + 1))
5214 return true;
5215
5216 // Otherwise we definitely can't merge the ADD in.
5217 AddrMode = BackupAddrMode;
5218 AddrModeInsts.resize(OldSize);
5219 TPT.rollback(LastKnownGood);
5220 break;
5221 }
5222 // case Instruction::Or:
5223 // TODO: We can handle "Or Val, Imm" iff this OR is equivalent to an ADD.
5224 // break;
5225 case Instruction::Mul:
5226 case Instruction::Shl: {
5227 // Can only handle X*C and X << C.
5228 AddrMode.InBounds = false;
5229 ConstantInt *RHS = dyn_cast<ConstantInt>(AddrInst->getOperand(1));
5230 if (!RHS || RHS->getBitWidth() > 64)
5231 return false;
5232 int64_t Scale = Opcode == Instruction::Shl
5233 ? 1LL << RHS->getLimitedValue(RHS->getBitWidth() - 1)
5234 : RHS->getSExtValue();
5235
5236 return matchScaledValue(AddrInst->getOperand(0), Scale, Depth);
5237 }
5238 case Instruction::GetElementPtr: {
5239 // Scan the GEP. We check it if it contains constant offsets and at most
5240 // one variable offset.
5241 int VariableOperand = -1;
5242 unsigned VariableScale = 0;
5243
5244 int64_t ConstantOffset = 0;
5245 gep_type_iterator GTI = gep_type_begin(AddrInst);
5246 for (unsigned i = 1, e = AddrInst->getNumOperands(); i != e; ++i, ++GTI) {
5247 if (StructType *STy = GTI.getStructTypeOrNull()) {
5248 const StructLayout *SL = DL.getStructLayout(STy);
5249 unsigned Idx =
5250 cast<ConstantInt>(AddrInst->getOperand(i))->getZExtValue();
5251 ConstantOffset += SL->getElementOffset(Idx);
5252 } else {
5253 TypeSize TS = GTI.getSequentialElementStride(DL);
5254 if (TS.isNonZero()) {
5255 // The optimisations below currently only work for fixed offsets.
5256 if (TS.isScalable())
5257 return false;
5258 int64_t TypeSize = TS.getFixedValue();
5259 if (ConstantInt *CI =
5260 dyn_cast<ConstantInt>(AddrInst->getOperand(i))) {
5261 const APInt &CVal = CI->getValue();
5262 if (CVal.getSignificantBits() <= 64) {
5263 ConstantOffset += CVal.getSExtValue() * TypeSize;
5264 continue;
5265 }
5266 }
5267 // We only allow one variable index at the moment.
5268 if (VariableOperand != -1)
5269 return false;
5270
5271 // Remember the variable index.
5272 VariableOperand = i;
5273 VariableScale = TypeSize;
5274 }
5275 }
5276 }
5277
5278 // A common case is for the GEP to only do a constant offset. In this case,
5279 // just add it to the disp field and check validity.
5280 if (VariableOperand == -1) {
5281 AddrMode.BaseOffs += ConstantOffset;
5282 if (matchAddr(AddrInst->getOperand(0), Depth + 1)) {
5283 if (!cast<GEPOperator>(AddrInst)->isInBounds())
5284 AddrMode.InBounds = false;
5285 return true;
5286 }
5287 AddrMode.BaseOffs -= ConstantOffset;
5288
5290 TLI.shouldConsiderGEPOffsetSplit() && Depth == 0 &&
5291 ConstantOffset > 0) {
5292 // Record GEPs with non-zero offsets as candidates for splitting in
5293 // the event that the offset cannot fit into the r+i addressing mode.
5294 // Simple and common case that only one GEP is used in calculating the
5295 // address for the memory access.
5296 Value *Base = AddrInst->getOperand(0);
5297 auto *BaseI = dyn_cast<Instruction>(Base);
5298 auto *GEP = cast<GetElementPtrInst>(AddrInst);
5300 (BaseI && !isa<CastInst>(BaseI) &&
5301 !isa<GetElementPtrInst>(BaseI))) {
5302 // Make sure the parent block allows inserting non-PHI instructions
5303 // before the terminator.
5304 BasicBlock *Parent = BaseI ? BaseI->getParent()
5305 : &GEP->getFunction()->getEntryBlock();
5306 if (!Parent->getTerminator()->isEHPad())
5307 LargeOffsetGEP = std::make_pair(GEP, ConstantOffset);
5308 }
5309 }
5310
5311 return false;
5312 }
5313
5314 // Save the valid addressing mode in case we can't match.
5315 ExtAddrMode BackupAddrMode = AddrMode;
5316 unsigned OldSize = AddrModeInsts.size();
5317
5318 // See if the scale and offset amount is valid for this target.
5319 AddrMode.BaseOffs += ConstantOffset;
5320 if (!cast<GEPOperator>(AddrInst)->isInBounds())
5321 AddrMode.InBounds = false;
5322
5323 // Match the base operand of the GEP.
5324 if (!matchAddr(AddrInst->getOperand(0), Depth + 1)) {
5325 // If it couldn't be matched, just stuff the value in a register.
5326 if (AddrMode.HasBaseReg) {
5327 AddrMode = BackupAddrMode;
5328 AddrModeInsts.resize(OldSize);
5329 return false;
5330 }
5331 AddrMode.HasBaseReg = true;
5332 AddrMode.BaseReg = AddrInst->getOperand(0);
5333 }
5334
5335 // Match the remaining variable portion of the GEP.
5336 if (!matchScaledValue(AddrInst->getOperand(VariableOperand), VariableScale,
5337 Depth)) {
5338 // If it couldn't be matched, try stuffing the base into a register
5339 // instead of matching it, and retrying the match of the scale.
5340 AddrMode = BackupAddrMode;
5341 AddrModeInsts.resize(OldSize);
5342 if (AddrMode.HasBaseReg)
5343 return false;
5344 AddrMode.HasBaseReg = true;
5345 AddrMode.BaseReg = AddrInst->getOperand(0);
5346 AddrMode.BaseOffs += ConstantOffset;
5347 if (!matchScaledValue(AddrInst->getOperand(VariableOperand),
5348 VariableScale, Depth)) {
5349 // If even that didn't work, bail.
5350 AddrMode = BackupAddrMode;
5351 AddrModeInsts.resize(OldSize);
5352 return false;
5353 }
5354 }
5355
5356 return true;
5357 }
5358 case Instruction::SExt:
5359 case Instruction::ZExt: {
5360 Instruction *Ext = dyn_cast<Instruction>(AddrInst);
5361 if (!Ext)
5362 return false;
5363
5364 // Try to move this ext out of the way of the addressing mode.
5365 // Ask for a method for doing so.
5366 TypePromotionHelper::Action TPH =
5367 TypePromotionHelper::getAction(Ext, InsertedInsts, TLI, PromotedInsts);
5368 if (!TPH)
5369 return false;
5370
5371 TypePromotionTransaction::ConstRestorationPt LastKnownGood =
5372 TPT.getRestorationPoint();
5373 unsigned CreatedInstsCost = 0;
5374 unsigned ExtCost = !TLI.isExtFree(Ext);
5375 Value *PromotedOperand =
5376 TPH(Ext, TPT, PromotedInsts, CreatedInstsCost, nullptr, nullptr, TLI);
5377 // SExt has been moved away.
5378 // Thus either it will be rematched later in the recursive calls or it is
5379 // gone. Anyway, we must not fold it into the addressing mode at this point.
5380 // E.g.,
5381 // op = add opnd, 1
5382 // idx = ext op
5383 // addr = gep base, idx
5384 // is now:
5385 // promotedOpnd = ext opnd <- no match here
5386 // op = promoted_add promotedOpnd, 1 <- match (later in recursive calls)
5387 // addr = gep base, op <- match
5388 if (MovedAway)
5389 *MovedAway = true;
5390
5391 assert(PromotedOperand &&
5392 "TypePromotionHelper should have filtered out those cases");
5393
5394 ExtAddrMode BackupAddrMode = AddrMode;
5395 unsigned OldSize = AddrModeInsts.size();
5396
5397 if (!matchAddr(PromotedOperand, Depth) ||
5398 // The total of the new cost is equal to the cost of the created
5399 // instructions.
5400 // The total of the old cost is equal to the cost of the extension plus
5401 // what we have saved in the addressing mode.
5402 !isPromotionProfitable(CreatedInstsCost,
5403 ExtCost + (AddrModeInsts.size() - OldSize),
5404 PromotedOperand)) {
5405 AddrMode = BackupAddrMode;
5406 AddrModeInsts.resize(OldSize);
5407 LLVM_DEBUG(dbgs() << "Sign extension does not pay off: rollback\n");
5408 TPT.rollback(LastKnownGood);
5409 return false;
5410 }
5411
5412 // SExt has been deleted. Make sure it is not referenced by the AddrMode.
5413 AddrMode.replaceWith(Ext, PromotedOperand);
5414 return true;
5415 }
5416 case Instruction::Call:
5417 if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(AddrInst)) {
5418 if (II->getIntrinsicID() == Intrinsic::threadlocal_address) {
5419 GlobalValue &GV = cast<GlobalValue>(*II->getArgOperand(0));
5420 if (TLI.addressingModeSupportsTLS(GV))
5421 return matchAddr(AddrInst->getOperand(0), Depth);
5422 }
5423 }
5424 break;
5425 }
5426 return false;
5427}
5428
5429/// If we can, try to add the value of 'Addr' into the current addressing mode.
5430/// If Addr can't be added to AddrMode this returns false and leaves AddrMode
5431/// unmodified. This assumes that Addr is either a pointer type or intptr_t
5432/// for the target.
5433///
5434bool AddressingModeMatcher::matchAddr(Value *Addr, unsigned Depth) {
5435 // Start a transaction at this point that we will rollback if the matching
5436 // fails.
5437 TypePromotionTransaction::ConstRestorationPt LastKnownGood =
5438 TPT.getRestorationPoint();
5439 if (ConstantInt *CI = dyn_cast<ConstantInt>(Addr)) {
5440 if (CI->getValue().isSignedIntN(64)) {
5441 // Check if the addition would result in a signed overflow.
5442 int64_t Result;
5443 bool Overflow =
5444 AddOverflow(AddrMode.BaseOffs, CI->getSExtValue(), Result);
5445 if (!Overflow) {
5446 // Fold in immediates if legal for the target.
5447 AddrMode.BaseOffs = Result;
5448 if (TLI.isLegalAddressingMode(DL, AddrMode, AccessTy, AddrSpace))
5449 return true;
5450 AddrMode.BaseOffs -= CI->getSExtValue();
5451 }
5452 }
5453 } else if (GlobalValue *GV = dyn_cast<GlobalValue>(Addr)) {
5454 // If this is a global variable, try to fold it into the addressing mode.
5455 if (!AddrMode.BaseGV) {
5456 AddrMode.BaseGV = GV;
5457 if (TLI.isLegalAddressingMode(DL, AddrMode, AccessTy, AddrSpace))
5458 return true;
5459 AddrMode.BaseGV = nullptr;
5460 }
5461 } else if (Instruction *I = dyn_cast<Instruction>(Addr)) {
5462 ExtAddrMode BackupAddrMode = AddrMode;
5463 unsigned OldSize = AddrModeInsts.size();
5464
5465 // Check to see if it is possible to fold this operation.
5466 bool MovedAway = false;
5467 if (matchOperationAddr(I, I->getOpcode(), Depth, &MovedAway)) {
5468 // This instruction may have been moved away. If so, there is nothing
5469 // to check here.
5470 if (MovedAway)
5471 return true;
5472 // Okay, it's possible to fold this. Check to see if it is actually
5473 // *profitable* to do so. We use a simple cost model to avoid increasing
5474 // register pressure too much.
5475 if (I->hasOneUse() ||
5476 isProfitableToFoldIntoAddressingMode(I, BackupAddrMode, AddrMode)) {
5477 AddrModeInsts.push_back(I);
5478 return true;
5479 }
5480
5481 // It isn't profitable to do this, roll back.
5482 AddrMode = BackupAddrMode;
5483 AddrModeInsts.resize(OldSize);
5484 TPT.rollback(LastKnownGood);
5485 }
5486 } else if (ConstantExpr *CE = dyn_cast<ConstantExpr>(Addr)) {
5487 if (matchOperationAddr(CE, CE->getOpcode(), Depth))
5488 return true;
5489 TPT.rollback(LastKnownGood);
5490 } else if (isa<ConstantPointerNull>(Addr)) {
5491 // Null pointer gets folded without affecting the addressing mode.
5492 return true;
5493 }
5494
5495 // Worse case, the target should support [reg] addressing modes. :)
5496 if (!AddrMode.HasBaseReg) {
5497 AddrMode.HasBaseReg = true;
5498 AddrMode.BaseReg = Addr;
5499 // Still check for legality in case the target supports [imm] but not [i+r].
5500 if (TLI.isLegalAddressingMode(DL, AddrMode, AccessTy, AddrSpace))
5501 return true;
5502 AddrMode.HasBaseReg = false;
5503 AddrMode.BaseReg = nullptr;
5504 }
5505
5506 // If the base register is already taken, see if we can do [r+r].
5507 if (AddrMode.Scale == 0) {
5508 AddrMode.Scale = 1;
5509 AddrMode.ScaledReg = Addr;
5510 if (TLI.isLegalAddressingMode(DL, AddrMode, AccessTy, AddrSpace))
5511 return true;
5512 AddrMode.Scale = 0;
5513 AddrMode.ScaledReg = nullptr;
5514 }
5515 // Couldn't match.
5516 TPT.rollback(LastKnownGood);
5517 return false;
5518}
5519
5520/// Check to see if all uses of OpVal by the specified inline asm call are due
5521/// to memory operands. If so, return true, otherwise return false.
5523 const TargetLowering &TLI,
5524 const TargetRegisterInfo &TRI) {
5525 const Function *F = CI->getFunction();
5526 TargetLowering::AsmOperandInfoVector TargetConstraints =
5527 TLI.ParseConstraints(F->getDataLayout(), &TRI, *CI);
5528
5529 for (TargetLowering::AsmOperandInfo &OpInfo : TargetConstraints) {
5530 // Compute the constraint code and ConstraintType to use.
5531 TLI.ComputeConstraintToUse(OpInfo, SDValue());
5532
5533 // If this asm operand is our Value*, and if it isn't an indirect memory
5534 // operand, we can't fold it! TODO: Also handle C_Address?
5535 if (OpInfo.CallOperandVal == OpVal &&
5536 (OpInfo.ConstraintType != TargetLowering::C_Memory ||
5537 !OpInfo.isIndirect))
5538 return false;
5539 }
5540
5541 return true;
5542}
5543
5544/// Recursively walk all the uses of I until we find a memory use.
5545/// If we find an obviously non-foldable instruction, return true.
5546/// Add accessed addresses and types to MemoryUses.
5548 Instruction *I, SmallVectorImpl<std::pair<Use *, Type *>> &MemoryUses,
5549 SmallPtrSetImpl<Instruction *> &ConsideredInsts, const TargetLowering &TLI,
5550 const TargetRegisterInfo &TRI, bool OptSize, ProfileSummaryInfo *PSI,
5551 BlockFrequencyInfo *BFI, unsigned &SeenInsts) {
5552 // If we already considered this instruction, we're done.
5553 if (!ConsideredInsts.insert(I).second)
5554 return false;
5555
5556 // If this is an obviously unfoldable instruction, bail out.
5557 if (!MightBeFoldableInst(I))
5558 return true;
5559
5560 // Loop over all the uses, recursively processing them.
5561 for (Use &U : I->uses()) {
5562 // Conservatively return true if we're seeing a large number or a deep chain
5563 // of users. This avoids excessive compilation times in pathological cases.
5564 if (SeenInsts++ >= MaxAddressUsersToScan)
5565 return true;
5566
5567 Instruction *UserI = cast<Instruction>(U.getUser());
5568 if (LoadInst *LI = dyn_cast<LoadInst>(UserI)) {
5569 MemoryUses.push_back({&U, LI->getType()});
5570 continue;
5571 }
5572
5573 if (StoreInst *SI = dyn_cast<StoreInst>(UserI)) {
5574 if (U.getOperandNo() != StoreInst::getPointerOperandIndex())
5575 return true; // Storing addr, not into addr.
5576 MemoryUses.push_back({&U, SI->getValueOperand()->getType()});
5577 continue;
5578 }
5579
5580 if (AtomicRMWInst *RMW = dyn_cast<AtomicRMWInst>(UserI)) {
5581 if (U.getOperandNo() != AtomicRMWInst::getPointerOperandIndex())
5582 return true; // Storing addr, not into addr.
5583 MemoryUses.push_back({&U, RMW->getValOperand()->getType()});
5584 continue;
5585 }
5586
5588 if (U.getOperandNo() != AtomicCmpXchgInst::getPointerOperandIndex())
5589 return true; // Storing addr, not into addr.
5590 MemoryUses.push_back({&U, CmpX->getCompareOperand()->getType()});
5591 continue;
5592 }
5593
5596 Type *AccessTy;
5597 if (!TLI.getAddrModeArguments(II, PtrOps, AccessTy))
5598 return true;
5599
5600 if (!find(PtrOps, U.get()))
5601 return true;
5602
5603 MemoryUses.push_back({&U, AccessTy});
5604 continue;
5605 }
5606
5607 if (CallInst *CI = dyn_cast<CallInst>(UserI)) {
5608 if (CI->hasFnAttr(Attribute::Cold)) {
5609 // If this is a cold call, we can sink the addressing calculation into
5610 // the cold path. See optimizeCallInst
5611 if (!llvm::shouldOptimizeForSize(CI->getParent(), PSI, BFI))
5612 continue;
5613 }
5614
5615 InlineAsm *IA = dyn_cast<InlineAsm>(CI->getCalledOperand());
5616 if (!IA)
5617 return true;
5618
5619 // If this is a memory operand, we're cool, otherwise bail out.
5620 if (!IsOperandAMemoryOperand(CI, IA, I, TLI, TRI))
5621 return true;
5622 continue;
5623 }
5624
5625 if (FindAllMemoryUses(UserI, MemoryUses, ConsideredInsts, TLI, TRI, OptSize,
5626 PSI, BFI, SeenInsts))
5627 return true;
5628 }
5629
5630 return false;
5631}
5632
5634 Instruction *I, SmallVectorImpl<std::pair<Use *, Type *>> &MemoryUses,
5635 const TargetLowering &TLI, const TargetRegisterInfo &TRI, bool OptSize,
5637 unsigned SeenInsts = 0;
5638 SmallPtrSet<Instruction *, 16> ConsideredInsts;
5639 return FindAllMemoryUses(I, MemoryUses, ConsideredInsts, TLI, TRI, OptSize,
5640 PSI, BFI, SeenInsts);
5641}
5642
5643
5644/// Return true if Val is already known to be live at the use site that we're
5645/// folding it into. If so, there is no cost to include it in the addressing
5646/// mode. KnownLive1 and KnownLive2 are two values that we know are live at the
5647/// instruction already.
5648bool AddressingModeMatcher::valueAlreadyLiveAtInst(Value *Val,
5649 Value *KnownLive1,
5650 Value *KnownLive2) {
5651 // If Val is either of the known-live values, we know it is live!
5652 if (Val == nullptr || Val == KnownLive1 || Val == KnownLive2)
5653 return true;
5654
5655 // All values other than instructions and arguments (e.g. constants) are live.
5656 if (!isa<Instruction>(Val) && !isa<Argument>(Val))
5657 return true;
5658
5659 // If Val is a constant sized alloca in the entry block, it is live, this is
5660 // true because it is just a reference to the stack/frame pointer, which is
5661 // live for the whole function.
5662 if (AllocaInst *AI = dyn_cast<AllocaInst>(Val))
5663 if (AI->isStaticAlloca())
5664 return true;
5665
5666 // Check to see if this value is already used in the memory instruction's
5667 // block. If so, it's already live into the block at the very least, so we
5668 // can reasonably fold it.
5669 return Val->isUsedInBasicBlock(MemoryInst->getParent());
5670}
5671
5672/// It is possible for the addressing mode of the machine to fold the specified
5673/// instruction into a load or store that ultimately uses it.
5674/// However, the specified instruction has multiple uses.
5675/// Given this, it may actually increase register pressure to fold it
5676/// into the load. For example, consider this code:
5677///
5678/// X = ...
5679/// Y = X+1
5680/// use(Y) -> nonload/store
5681/// Z = Y+1
5682/// load Z
5683///
5684/// In this case, Y has multiple uses, and can be folded into the load of Z
5685/// (yielding load [X+2]). However, doing this will cause both "X" and "X+1" to
5686/// be live at the use(Y) line. If we don't fold Y into load Z, we use one
5687/// fewer register. Since Y can't be folded into "use(Y)" we don't increase the
5688/// number of computations either.
5689///
5690/// Note that this (like most of CodeGenPrepare) is just a rough heuristic. If
5691/// X was live across 'load Z' for other reasons, we actually *would* want to
5692/// fold the addressing mode in the Z case. This would make Y die earlier.
5693bool AddressingModeMatcher::isProfitableToFoldIntoAddressingMode(
5694 Instruction *I, ExtAddrMode &AMBefore, ExtAddrMode &AMAfter) {
5695 if (IgnoreProfitability)
5696 return true;
5697
5698 // AMBefore is the addressing mode before this instruction was folded into it,
5699 // and AMAfter is the addressing mode after the instruction was folded. Get
5700 // the set of registers referenced by AMAfter and subtract out those
5701 // referenced by AMBefore: this is the set of values which folding in this
5702 // address extends the lifetime of.
5703 //
5704 // Note that there are only two potential values being referenced here,
5705 // BaseReg and ScaleReg (global addresses are always available, as are any
5706 // folded immediates).
5707 Value *BaseReg = AMAfter.BaseReg, *ScaledReg = AMAfter.ScaledReg;
5708
5709 // If the BaseReg or ScaledReg was referenced by the previous addrmode, their
5710 // lifetime wasn't extended by adding this instruction.
5711 if (valueAlreadyLiveAtInst(BaseReg, AMBefore.BaseReg, AMBefore.ScaledReg))
5712 BaseReg = nullptr;
5713 if (valueAlreadyLiveAtInst(ScaledReg, AMBefore.BaseReg, AMBefore.ScaledReg))
5714 ScaledReg = nullptr;
5715
5716 // If folding this instruction (and it's subexprs) didn't extend any live
5717 // ranges, we're ok with it.
5718 if (!BaseReg && !ScaledReg)
5719 return true;
5720
5721 // If all uses of this instruction can have the address mode sunk into them,
5722 // we can remove the addressing mode and effectively trade one live register
5723 // for another (at worst.) In this context, folding an addressing mode into
5724 // the use is just a particularly nice way of sinking it.
5726 if (FindAllMemoryUses(I, MemoryUses, TLI, TRI, OptSize, PSI, BFI))
5727 return false; // Has a non-memory, non-foldable use!
5728
5729 // Now that we know that all uses of this instruction are part of a chain of
5730 // computation involving only operations that could theoretically be folded
5731 // into a memory use, loop over each of these memory operation uses and see
5732 // if they could *actually* fold the instruction. The assumption is that
5733 // addressing modes are cheap and that duplicating the computation involved
5734 // many times is worthwhile, even on a fastpath. For sinking candidates
5735 // (i.e. cold call sites), this serves as a way to prevent excessive code
5736 // growth since most architectures have some reasonable small and fast way to
5737 // compute an effective address. (i.e LEA on x86)
5738 SmallVector<Instruction *, 32> MatchedAddrModeInsts;
5739 for (const std::pair<Use *, Type *> &Pair : MemoryUses) {
5740 Value *Address = Pair.first->get();
5741 Instruction *UserI = cast<Instruction>(Pair.first->getUser());
5742 Type *AddressAccessTy = Pair.second;
5743 unsigned AS = Address->getType()->getPointerAddressSpace();
5744
5745 // Do a match against the root of this address, ignoring profitability. This
5746 // will tell us if the addressing mode for the memory operation will
5747 // *actually* cover the shared instruction.
5748 ExtAddrMode Result;
5749 std::pair<AssertingVH<GetElementPtrInst>, int64_t> LargeOffsetGEP(nullptr,
5750 0);
5751 TypePromotionTransaction::ConstRestorationPt LastKnownGood =
5752 TPT.getRestorationPoint();
5753 AddressingModeMatcher Matcher(MatchedAddrModeInsts, TLI, TRI, LI, getDTFn,
5754 AddressAccessTy, AS, UserI, Result,
5755 InsertedInsts, PromotedInsts, TPT,
5756 LargeOffsetGEP, OptSize, PSI, BFI);
5757 Matcher.IgnoreProfitability = true;
5758 bool Success = Matcher.matchAddr(Address, 0);
5759 (void)Success;
5760 assert(Success && "Couldn't select *anything*?");
5761
5762 // The match was to check the profitability, the changes made are not
5763 // part of the original matcher. Therefore, they should be dropped
5764 // otherwise the original matcher will not present the right state.
5765 TPT.rollback(LastKnownGood);
5766
5767 // If the match didn't cover I, then it won't be shared by it.
5768 if (!is_contained(MatchedAddrModeInsts, I))
5769 return false;
5770
5771 MatchedAddrModeInsts.clear();
5772 }
5773
5774 return true;
5775}
5776
5777/// Return true if the specified values are defined in a
5778/// different basic block than BB.
5779static bool IsNonLocalValue(Value *V, BasicBlock *BB) {
5781 return I->getParent() != BB;
5782 return false;
5783}
5784
5785// Find an insert position of Addr for MemoryInst. We can't guarantee MemoryInst
5786// is the first instruction that will use Addr. So we need to find the first
5787// user of Addr in current BB.
5789 Value *SunkAddr) {
5790 if (Addr->hasOneUse())
5791 return MemoryInst->getIterator();
5792
5793 // We already have a SunkAddr in current BB, but we may need to insert cast
5794 // instruction after it.
5795 if (SunkAddr) {
5796 if (Instruction *AddrInst = dyn_cast<Instruction>(SunkAddr))
5797 return std::next(AddrInst->getIterator());
5798 }
5799
5800 // Find the first user of Addr in current BB.
5801 Instruction *Earliest = MemoryInst;
5802 for (User *U : Addr->users()) {
5803 Instruction *UserInst = dyn_cast<Instruction>(U);
5804 if (UserInst && UserInst->getParent() == MemoryInst->getParent()) {
5805 if (isa<PHINode>(UserInst) || UserInst->isDebugOrPseudoInst())
5806 continue;
5807 if (UserInst->comesBefore(Earliest))
5808 Earliest = UserInst;
5809 }
5810 }
5811 return Earliest->getIterator();
5812}
5813
5814/// Sink addressing mode computation immediate before MemoryInst if doing so
5815/// can be done without increasing register pressure. The need for the
5816/// register pressure constraint means this can end up being an all or nothing
5817/// decision for all uses of the same addressing computation.
5818///
5819/// Load and Store Instructions often have addressing modes that can do
5820/// significant amounts of computation. As such, instruction selection will try
5821/// to get the load or store to do as much computation as possible for the
5822/// program. The problem is that isel can only see within a single block. As
5823/// such, we sink as much legal addressing mode work into the block as possible.
5824///
5825/// This method is used to optimize both load/store and inline asms with memory
5826/// operands. It's also used to sink addressing computations feeding into cold
5827/// call sites into their (cold) basic block.
5828///
5829/// The motivation for handling sinking into cold blocks is that doing so can
5830/// both enable other address mode sinking (by satisfying the register pressure
5831/// constraint above), and reduce register pressure globally (by removing the
5832/// addressing mode computation from the fast path entirely.).
5833bool CodeGenPrepare::optimizeMemoryInst(Instruction *MemoryInst, Value *Addr,
5834 Type *AccessTy, unsigned AddrSpace) {
5835 Value *Repl = Addr;
5836
5837 // Try to collapse single-value PHI nodes. This is necessary to undo
5838 // unprofitable PRE transformations.
5839 SmallVector<Value *, 8> worklist;
5840 SmallPtrSet<Value *, 16> Visited;
5841 worklist.push_back(Addr);
5842
5843 // Use a worklist to iteratively look through PHI and select nodes, and
5844 // ensure that the addressing mode obtained from the non-PHI/select roots of
5845 // the graph are compatible.
5846 bool PhiOrSelectSeen = false;
5847 SmallVector<Instruction *, 16> AddrModeInsts;
5848 AddressingModeCombiner AddrModes(*DL, Addr);
5849 TypePromotionTransaction TPT(RemovedInsts);
5850 TypePromotionTransaction::ConstRestorationPt LastKnownGood =
5851 TPT.getRestorationPoint();
5852 while (!worklist.empty()) {
5853 Value *V = worklist.pop_back_val();
5854
5855 // We allow traversing cyclic Phi nodes.
5856 // In case of success after this loop we ensure that traversing through
5857 // Phi nodes ends up with all cases to compute address of the form
5858 // BaseGV + Base + Scale * Index + Offset
5859 // where Scale and Offset are constans and BaseGV, Base and Index
5860 // are exactly the same Values in all cases.
5861 // It means that BaseGV, Scale and Offset dominate our memory instruction
5862 // and have the same value as they had in address computation represented
5863 // as Phi. So we can safely sink address computation to memory instruction.
5864 if (!Visited.insert(V).second)
5865 continue;
5866
5867 // For a PHI node, push all of its incoming values.
5868 if (PHINode *P = dyn_cast<PHINode>(V)) {
5869 append_range(worklist, P->incoming_values());
5870 PhiOrSelectSeen = true;
5871 continue;
5872 }
5873 // Similar for select.
5874 if (SelectInst *SI = dyn_cast<SelectInst>(V)) {
5875 worklist.push_back(SI->getFalseValue());
5876 worklist.push_back(SI->getTrueValue());
5877 PhiOrSelectSeen = true;
5878 continue;
5879 }
5880
5881 // For non-PHIs, determine the addressing mode being computed. Note that
5882 // the result may differ depending on what other uses our candidate
5883 // addressing instructions might have.
5884 AddrModeInsts.clear();
5885 std::pair<AssertingVH<GetElementPtrInst>, int64_t> LargeOffsetGEP(nullptr,
5886 0);
5887 // Defer the query (and possible computation of) the dom tree to point of
5888 // actual use. It's expected that most address matches don't actually need
5889 // the domtree.
5890 auto getDTFn = [MemoryInst, this]() -> const DominatorTree & {
5891 Function *F = MemoryInst->getParent()->getParent();
5892 return this->getDT(*F);
5893 };
5894 ExtAddrMode NewAddrMode = AddressingModeMatcher::Match(
5895 V, AccessTy, AddrSpace, MemoryInst, AddrModeInsts, *TLI, *LI, getDTFn,
5896 *TRI, InsertedInsts, PromotedInsts, TPT, LargeOffsetGEP, OptSize, PSI,
5897 BFI.get());
5898
5899 GetElementPtrInst *GEP = LargeOffsetGEP.first;
5900 if (GEP && !NewGEPBases.count(GEP)) {
5901 // If splitting the underlying data structure can reduce the offset of a
5902 // GEP, collect the GEP. Skip the GEPs that are the new bases of
5903 // previously split data structures.
5904 LargeOffsetGEPMap[GEP->getPointerOperand()].push_back(LargeOffsetGEP);
5905 LargeOffsetGEPID.insert(std::make_pair(GEP, LargeOffsetGEPID.size()));
5906 }
5907
5908 NewAddrMode.OriginalValue = V;
5909 if (!AddrModes.addNewAddrMode(NewAddrMode))
5910 break;
5911 }
5912
5913 // Try to combine the AddrModes we've collected. If we couldn't collect any,
5914 // or we have multiple but either couldn't combine them or combining them
5915 // wouldn't do anything useful, bail out now.
5916 if (!AddrModes.combineAddrModes()) {
5917 TPT.rollback(LastKnownGood);
5918 return false;
5919 }
5920 bool Modified = TPT.commit();
5921
5922 // Get the combined AddrMode (or the only AddrMode, if we only had one).
5923 ExtAddrMode AddrMode = AddrModes.getAddrMode();
5924
5925 // If all the instructions matched are already in this BB, don't do anything.
5926 // If we saw a Phi node then it is not local definitely, and if we saw a
5927 // select then we want to push the address calculation past it even if it's
5928 // already in this BB.
5929 if (!PhiOrSelectSeen && none_of(AddrModeInsts, [&](Value *V) {
5930 return IsNonLocalValue(V, MemoryInst->getParent());
5931 })) {
5932 LLVM_DEBUG(dbgs() << "CGP: Found local addrmode: " << AddrMode
5933 << "\n");
5934 return Modified;
5935 }
5936
5937 // Now that we determined the addressing expression we want to use and know
5938 // that we have to sink it into this block. Check to see if we have already
5939 // done this for some other load/store instr in this block. If so, reuse
5940 // the computation. Before attempting reuse, check if the address is valid
5941 // as it may have been erased.
5942
5943 WeakTrackingVH SunkAddrVH = SunkAddrs[Addr];
5944
5945 Value *SunkAddr = SunkAddrVH.pointsToAliveValue() ? SunkAddrVH : nullptr;
5946 Type *IntPtrTy = DL->getIntPtrType(Addr->getType());
5947
5948 // The current BB may be optimized multiple times, we can't guarantee the
5949 // reuse of Addr happens later, call findInsertPos to find an appropriate
5950 // insert position.
5951 auto InsertPos = findInsertPos(Addr, MemoryInst, SunkAddr);
5952
5953 // TODO: Adjust insert point considering (Base|Scaled)Reg if possible.
5954 if (!SunkAddr) {
5955 auto &DT = getDT(*MemoryInst->getFunction());
5956 if ((AddrMode.BaseReg && !DT.dominates(AddrMode.BaseReg, &*InsertPos)) ||
5957 (AddrMode.ScaledReg && !DT.dominates(AddrMode.ScaledReg, &*InsertPos)))
5958 return Modified;
5959 }
5960
5961 IRBuilder<> Builder(MemoryInst->getParent(), InsertPos);
5962
5963 if (SunkAddr) {
5964 LLVM_DEBUG(dbgs() << "CGP: Reusing nonlocal addrmode: " << AddrMode
5965 << " for " << *MemoryInst << "\n");
5966 if (SunkAddr->getType() != Addr->getType()) {
5967 if (SunkAddr->getType()->getPointerAddressSpace() !=
5968 Addr->getType()->getPointerAddressSpace() &&
5969 !DL->isNonIntegralPointerType(Addr->getType())) {
5970 // There are two reasons the address spaces might not match: a no-op
5971 // addrspacecast, or a ptrtoint/inttoptr pair. Either way, we emit a
5972 // ptrtoint/inttoptr pair to ensure we match the original semantics.
5973 // TODO: allow bitcast between different address space pointers with the
5974 // same size.
5975 SunkAddr = Builder.CreatePtrToInt(SunkAddr, IntPtrTy, "sunkaddr");
5976 SunkAddr =
5977 Builder.CreateIntToPtr(SunkAddr, Addr->getType(), "sunkaddr");
5978 } else
5979 SunkAddr = Builder.CreatePointerCast(SunkAddr, Addr->getType());
5980 }
5982 SubtargetInfo->addrSinkUsingGEPs())) {
5983 // By default, we use the GEP-based method when AA is used later. This
5984 // prevents new inttoptr/ptrtoint pairs from degrading AA capabilities.
5985 LLVM_DEBUG(dbgs() << "CGP: SINKING nonlocal addrmode: " << AddrMode
5986 << " for " << *MemoryInst << "\n");
5987 Value *ResultPtr = nullptr, *ResultIndex = nullptr;
5988
5989 // First, find the pointer.
5990 if (AddrMode.BaseReg && AddrMode.BaseReg->getType()->isPointerTy()) {
5991 ResultPtr = AddrMode.BaseReg;
5992 AddrMode.BaseReg = nullptr;
5993 }
5994
5995 if (AddrMode.Scale && AddrMode.ScaledReg->getType()->isPointerTy()) {
5996 // We can't add more than one pointer together, nor can we scale a
5997 // pointer (both of which seem meaningless).
5998 if (ResultPtr || AddrMode.Scale != 1)
5999 return Modified;
6000
6001 ResultPtr = AddrMode.ScaledReg;
6002 AddrMode.Scale = 0;
6003 }
6004
6005 // It is only safe to sign extend the BaseReg if we know that the math
6006 // required to create it did not overflow before we extend it. Since
6007 // the original IR value was tossed in favor of a constant back when
6008 // the AddrMode was created we need to bail out gracefully if widths
6009 // do not match instead of extending it.
6010 //
6011 // (See below for code to add the scale.)
6012 if (AddrMode.Scale) {
6013 Type *ScaledRegTy = AddrMode.ScaledReg->getType();
6014 if (cast<IntegerType>(IntPtrTy)->getBitWidth() >
6015 cast<IntegerType>(ScaledRegTy)->getBitWidth())
6016 return Modified;
6017 }
6018
6019 GlobalValue *BaseGV = AddrMode.BaseGV;
6020 if (BaseGV != nullptr) {
6021 if (ResultPtr)
6022 return Modified;
6023
6024 if (BaseGV->isThreadLocal()) {
6025 ResultPtr = Builder.CreateThreadLocalAddress(BaseGV);
6026 } else {
6027 ResultPtr = BaseGV;
6028 }
6029 }
6030
6031 // If the real base value actually came from an inttoptr, then the matcher
6032 // will look through it and provide only the integer value. In that case,
6033 // use it here.
6034 if (!DL->isNonIntegralPointerType(Addr->getType())) {
6035 if (!ResultPtr && AddrMode.BaseReg) {
6036 ResultPtr = Builder.CreateIntToPtr(AddrMode.BaseReg, Addr->getType(),
6037 "sunkaddr");
6038 AddrMode.BaseReg = nullptr;
6039 } else if (!ResultPtr && AddrMode.Scale == 1) {
6040 ResultPtr = Builder.CreateIntToPtr(AddrMode.ScaledReg, Addr->getType(),
6041 "sunkaddr");
6042 AddrMode.Scale = 0;
6043 }
6044 }
6045
6046 if (!ResultPtr && !AddrMode.BaseReg && !AddrMode.Scale &&
6047 !AddrMode.BaseOffs) {
6048 SunkAddr = Constant::getNullValue(Addr->getType());
6049 } else if (!ResultPtr) {
6050 return Modified;
6051 } else {
6052 Type *I8PtrTy =
6053 Builder.getPtrTy(Addr->getType()->getPointerAddressSpace());
6054
6055 // Start with the base register. Do this first so that subsequent address
6056 // matching finds it last, which will prevent it from trying to match it
6057 // as the scaled value in case it happens to be a mul. That would be
6058 // problematic if we've sunk a different mul for the scale, because then
6059 // we'd end up sinking both muls.
6060 if (AddrMode.BaseReg) {
6061 Value *V = AddrMode.BaseReg;
6062 if (V->getType() != IntPtrTy)
6063 V = Builder.CreateIntCast(V, IntPtrTy, /*isSigned=*/true, "sunkaddr");
6064
6065 ResultIndex = V;
6066 }
6067
6068 // Add the scale value.
6069 if (AddrMode.Scale) {
6070 Value *V = AddrMode.ScaledReg;
6071 if (V->getType() == IntPtrTy) {
6072 // done.
6073 } else {
6074 assert(cast<IntegerType>(IntPtrTy)->getBitWidth() <
6075 cast<IntegerType>(V->getType())->getBitWidth() &&
6076 "We can't transform if ScaledReg is too narrow");
6077 V = Builder.CreateTrunc(V, IntPtrTy, "sunkaddr");
6078 }
6079
6080 if (AddrMode.Scale != 1)
6081 V = Builder.CreateMul(V, ConstantInt::get(IntPtrTy, AddrMode.Scale),
6082 "sunkaddr");
6083 if (ResultIndex)
6084 ResultIndex = Builder.CreateAdd(ResultIndex, V, "sunkaddr");
6085 else
6086 ResultIndex = V;
6087 }
6088
6089 // Add in the Base Offset if present.
6090 if (AddrMode.BaseOffs) {
6091 Value *V = ConstantInt::getSigned(IntPtrTy, AddrMode.BaseOffs);
6092 if (ResultIndex) {
6093 // We need to add this separately from the scale above to help with
6094 // SDAG consecutive load/store merging.
6095 if (ResultPtr->getType() != I8PtrTy)
6096 ResultPtr = Builder.CreatePointerCast(ResultPtr, I8PtrTy);
6097 ResultPtr = Builder.CreatePtrAdd(ResultPtr, ResultIndex, "sunkaddr",
6098 AddrMode.InBounds);
6099 }
6100
6101 ResultIndex = V;
6102 }
6103
6104 if (!ResultIndex) {
6105 auto PtrInst = dyn_cast<Instruction>(ResultPtr);
6106 // We know that we have a pointer without any offsets. If this pointer
6107 // originates from a different basic block than the current one, we
6108 // must be able to recreate it in the current basic block.
6109 // We do not support the recreation of any instructions yet.
6110 if (PtrInst && PtrInst->getParent() != MemoryInst->getParent())
6111 return Modified;
6112 SunkAddr = ResultPtr;
6113 } else {
6114 if (ResultPtr->getType() != I8PtrTy)
6115 ResultPtr = Builder.CreatePointerCast(ResultPtr, I8PtrTy);
6116 SunkAddr = Builder.CreatePtrAdd(ResultPtr, ResultIndex, "sunkaddr",
6117 AddrMode.InBounds);
6118 }
6119
6120 if (SunkAddr->getType() != Addr->getType()) {
6121 if (SunkAddr->getType()->getPointerAddressSpace() !=
6122 Addr->getType()->getPointerAddressSpace() &&
6123 !DL->isNonIntegralPointerType(Addr->getType())) {
6124 // There are two reasons the address spaces might not match: a no-op
6125 // addrspacecast, or a ptrtoint/inttoptr pair. Either way, we emit a
6126 // ptrtoint/inttoptr pair to ensure we match the original semantics.
6127 // TODO: allow bitcast between different address space pointers with
6128 // the same size.
6129 SunkAddr = Builder.CreatePtrToInt(SunkAddr, IntPtrTy, "sunkaddr");
6130 SunkAddr =
6131 Builder.CreateIntToPtr(SunkAddr, Addr->getType(), "sunkaddr");
6132 } else
6133 SunkAddr = Builder.CreatePointerCast(SunkAddr, Addr->getType());
6134 }
6135 }
6136 } else {
6137 // We'd require a ptrtoint/inttoptr down the line, which we can't do for
6138 // non-integral pointers, so in that case bail out now.
6139 Type *BaseTy = AddrMode.BaseReg ? AddrMode.BaseReg->getType() : nullptr;
6140 Type *ScaleTy = AddrMode.Scale ? AddrMode.ScaledReg->getType() : nullptr;
6141 PointerType *BasePtrTy = dyn_cast_or_null<PointerType>(BaseTy);
6142 PointerType *ScalePtrTy = dyn_cast_or_null<PointerType>(ScaleTy);
6143 if (DL->isNonIntegralPointerType(Addr->getType()) ||
6144 (BasePtrTy && DL->isNonIntegralPointerType(BasePtrTy)) ||
6145 (ScalePtrTy && DL->isNonIntegralPointerType(ScalePtrTy)) ||
6146 (AddrMode.BaseGV &&
6147 DL->isNonIntegralPointerType(AddrMode.BaseGV->getType())))
6148 return Modified;
6149
6150 LLVM_DEBUG(dbgs() << "CGP: SINKING nonlocal addrmode: " << AddrMode
6151 << " for " << *MemoryInst << "\n");
6152 Type *IntPtrTy = DL->getIntPtrType(Addr->getType());
6153 Value *Result = nullptr;
6154
6155 // Start with the base register. Do this first so that subsequent address
6156 // matching finds it last, which will prevent it from trying to match it
6157 // as the scaled value in case it happens to be a mul. That would be
6158 // problematic if we've sunk a different mul for the scale, because then
6159 // we'd end up sinking both muls.
6160 if (AddrMode.BaseReg) {
6161 Value *V = AddrMode.BaseReg;
6162 if (V->getType()->isPointerTy())
6163 V = Builder.CreatePtrToInt(V, IntPtrTy, "sunkaddr");
6164 if (V->getType() != IntPtrTy)
6165 V = Builder.CreateIntCast(V, IntPtrTy, /*isSigned=*/true, "sunkaddr");
6166 Result = V;
6167 }
6168
6169 // Add the scale value.
6170 if (AddrMode.Scale) {
6171 Value *V = AddrMode.ScaledReg;
6172 if (V->getType() == IntPtrTy) {
6173 // done.
6174 } else if (V->getType()->isPointerTy()) {
6175 V = Builder.CreatePtrToInt(V, IntPtrTy, "sunkaddr");
6176 } else if (cast<IntegerType>(IntPtrTy)->getBitWidth() <
6177 cast<IntegerType>(V->getType())->getBitWidth()) {
6178 V = Builder.CreateTrunc(V, IntPtrTy, "sunkaddr");
6179 } else {
6180 // It is only safe to sign extend the BaseReg if we know that the math
6181 // required to create it did not overflow before we extend it. Since
6182 // the original IR value was tossed in favor of a constant back when
6183 // the AddrMode was created we need to bail out gracefully if widths
6184 // do not match instead of extending it.
6186 if (I && (Result != AddrMode.BaseReg))
6187 I->eraseFromParent();
6188 return Modified;
6189 }
6190 if (AddrMode.Scale != 1)
6191 V = Builder.CreateMul(V, ConstantInt::get(IntPtrTy, AddrMode.Scale),
6192 "sunkaddr");
6193 if (Result)
6194 Result = Builder.CreateAdd(Result, V, "sunkaddr");
6195 else
6196 Result = V;
6197 }
6198
6199 // Add in the BaseGV if present.
6200 GlobalValue *BaseGV = AddrMode.BaseGV;
6201 if (BaseGV != nullptr) {
6202 Value *BaseGVPtr;
6203 if (BaseGV->isThreadLocal()) {
6204 BaseGVPtr = Builder.CreateThreadLocalAddress(BaseGV);
6205 } else {
6206 BaseGVPtr = BaseGV;
6207 }
6208 Value *V = Builder.CreatePtrToInt(BaseGVPtr, IntPtrTy, "sunkaddr");
6209 if (Result)
6210 Result = Builder.CreateAdd(Result, V, "sunkaddr");
6211 else
6212 Result = V;
6213 }
6214
6215 // Add in the Base Offset if present.
6216 if (AddrMode.BaseOffs) {
6217 Value *V = ConstantInt::getSigned(IntPtrTy, AddrMode.BaseOffs);
6218 if (Result)
6219 Result = Builder.CreateAdd(Result, V, "sunkaddr");
6220 else
6221 Result = V;
6222 }
6223
6224 if (!Result)
6225 SunkAddr = Constant::getNullValue(Addr->getType());
6226 else
6227 SunkAddr = Builder.CreateIntToPtr(Result, Addr->getType(), "sunkaddr");
6228 }
6229
6230 MemoryInst->replaceUsesOfWith(Repl, SunkAddr);
6231 // Store the newly computed address into the cache. In the case we reused a
6232 // value, this should be idempotent.
6233 SunkAddrs[Addr] = WeakTrackingVH(SunkAddr);
6234
6235 // If we have no uses, recursively delete the value and all dead instructions
6236 // using it.
6237 if (Repl->use_empty()) {
6238 resetIteratorIfInvalidatedWhileCalling(CurInstIterator->getParent(), [&]() {
6239 RecursivelyDeleteTriviallyDeadInstructions(
6240 Repl, TLInfo, nullptr,
6241 [&](Value *V) { removeAllAssertingVHReferences(V); });
6242 });
6243 }
6244 ++NumMemoryInsts;
6245 return true;
6246}
6247
6248/// Rewrite GEP input to gather/scatter to enable SelectionDAGBuilder to find
6249/// a uniform base to use for ISD::MGATHER/MSCATTER. SelectionDAGBuilder can
6250/// only handle a 2 operand GEP in the same basic block or a splat constant
6251/// vector. The 2 operands to the GEP must have a scalar pointer and a vector
6252/// index.
6253///
6254/// If the existing GEP has a vector base pointer that is splat, we can look
6255/// through the splat to find the scalar pointer. If we can't find a scalar
6256/// pointer there's nothing we can do.
6257///
6258/// If we have a GEP with more than 2 indices where the middle indices are all
6259/// zeroes, we can replace it with 2 GEPs where the second has 2 operands.
6260///
6261/// If the final index isn't a vector or is a splat, we can emit a scalar GEP
6262/// followed by a GEP with an all zeroes vector index. This will enable
6263/// SelectionDAGBuilder to use the scalar GEP as the uniform base and have a
6264/// zero index.
6265bool CodeGenPrepare::optimizeGatherScatterInst(Instruction *MemoryInst,
6266 Value *Ptr) {
6267 Value *NewAddr;
6268
6269 if (const auto *GEP = dyn_cast<GetElementPtrInst>(Ptr)) {
6270 // Don't optimize GEPs that don't have indices.
6271 if (!GEP->hasIndices())
6272 return false;
6273
6274 // If the GEP and the gather/scatter aren't in the same BB, don't optimize.
6275 // FIXME: We should support this by sinking the GEP.
6276 if (MemoryInst->getParent() != GEP->getParent())
6277 return false;
6278
6279 SmallVector<Value *, 2> Ops(GEP->operands());
6280
6281 bool RewriteGEP = false;
6282
6283 if (Ops[0]->getType()->isVectorTy()) {
6284 Ops[0] = getSplatValue(Ops[0]);
6285 if (!Ops[0])
6286 return false;
6287 RewriteGEP = true;
6288 }
6289
6290 unsigned FinalIndex = Ops.size() - 1;
6291
6292 // Ensure all but the last index is 0.
6293 // FIXME: This isn't strictly required. All that's required is that they are
6294 // all scalars or splats.
6295 for (unsigned i = 1; i < FinalIndex; ++i) {
6296 auto *C = dyn_cast<Constant>(Ops[i]);
6297 if (!C)
6298 return false;
6299 if (isa<VectorType>(C->getType()))
6300 C = C->getSplatValue();
6301 auto *CI = dyn_cast_or_null<ConstantInt>(C);
6302 if (!CI || !CI->isZero())
6303 return false;
6304 // Scalarize the index if needed.
6305 Ops[i] = CI;
6306 }
6307
6308 // Try to scalarize the final index.
6309 if (Ops[FinalIndex]->getType()->isVectorTy()) {
6310 if (Value *V = getSplatValue(Ops[FinalIndex])) {
6311 auto *C = dyn_cast<ConstantInt>(V);
6312 // Don't scalarize all zeros vector.
6313 if (!C || !C->isZero()) {
6314 Ops[FinalIndex] = V;
6315 RewriteGEP = true;
6316 }
6317 }
6318 }
6319
6320 // If we made any changes or the we have extra operands, we need to generate
6321 // new instructions.
6322 if (!RewriteGEP && Ops.size() == 2)
6323 return false;
6324
6325 auto NumElts = cast<VectorType>(Ptr->getType())->getElementCount();
6326
6327 IRBuilder<> Builder(MemoryInst);
6328
6329 Type *SourceTy = GEP->getSourceElementType();
6330 Type *ScalarIndexTy = DL->getIndexType(Ops[0]->getType()->getScalarType());
6331
6332 // If the final index isn't a vector, emit a scalar GEP containing all ops
6333 // and a vector GEP with all zeroes final index.
6334 if (!Ops[FinalIndex]->getType()->isVectorTy()) {
6335 NewAddr = Builder.CreateGEP(SourceTy, Ops[0], ArrayRef(Ops).drop_front());
6336 auto *IndexTy = VectorType::get(ScalarIndexTy, NumElts);
6337 auto *SecondTy = GetElementPtrInst::getIndexedType(
6338 SourceTy, ArrayRef(Ops).drop_front());
6339 NewAddr =
6340 Builder.CreateGEP(SecondTy, NewAddr, Constant::getNullValue(IndexTy));
6341 } else {
6342 Value *Base = Ops[0];
6343 Value *Index = Ops[FinalIndex];
6344
6345 // Create a scalar GEP if there are more than 2 operands.
6346 if (Ops.size() != 2) {
6347 // Replace the last index with 0.
6348 Ops[FinalIndex] =
6349 Constant::getNullValue(Ops[FinalIndex]->getType()->getScalarType());
6350 Base = Builder.CreateGEP(SourceTy, Base, ArrayRef(Ops).drop_front());
6352 SourceTy, ArrayRef(Ops).drop_front());
6353 }
6354
6355 // Now create the GEP with scalar pointer and vector index.
6356 NewAddr = Builder.CreateGEP(SourceTy, Base, Index);
6357 }
6358 } else if (!isa<Constant>(Ptr)) {
6359 // Not a GEP, maybe its a splat and we can create a GEP to enable
6360 // SelectionDAGBuilder to use it as a uniform base.
6361 Value *V = getSplatValue(Ptr);
6362 if (!V)
6363 return false;
6364
6365 auto NumElts = cast<VectorType>(Ptr->getType())->getElementCount();
6366
6367 IRBuilder<> Builder(MemoryInst);
6368
6369 // Emit a vector GEP with a scalar pointer and all 0s vector index.
6370 Type *ScalarIndexTy = DL->getIndexType(V->getType()->getScalarType());
6371 auto *IndexTy = VectorType::get(ScalarIndexTy, NumElts);
6372 Type *ScalarTy;
6373 if (cast<IntrinsicInst>(MemoryInst)->getIntrinsicID() ==
6374 Intrinsic::masked_gather) {
6375 ScalarTy = MemoryInst->getType()->getScalarType();
6376 } else {
6377 assert(cast<IntrinsicInst>(MemoryInst)->getIntrinsicID() ==
6378 Intrinsic::masked_scatter);
6379 ScalarTy = MemoryInst->getOperand(0)->getType()->getScalarType();
6380 }
6381 NewAddr = Builder.CreateGEP(ScalarTy, V, Constant::getNullValue(IndexTy));
6382 } else {
6383 // Constant, SelectionDAGBuilder knows to check if its a splat.
6384 return false;
6385 }
6386
6387 MemoryInst->replaceUsesOfWith(Ptr, NewAddr);
6388
6389 // If we have no uses, recursively delete the value and all dead instructions
6390 // using it.
6391 if (Ptr->use_empty())
6393 Ptr, TLInfo, nullptr,
6394 [&](Value *V) { removeAllAssertingVHReferences(V); });
6395
6396 return true;
6397}
6398
6399// This is a helper for CodeGenPrepare::optimizeMulWithOverflow.
6400// Check the pattern we are interested in where there are maximum 2 uses
6401// of the intrinsic which are the extract instructions.
6403 ExtractValueInst *&OverflowExtract) {
6404 // Bail out if it's more than 2 users:
6405 if (I->hasNUsesOrMore(3))
6406 return false;
6407
6408 for (User *U : I->users()) {
6409 auto *Extract = dyn_cast<ExtractValueInst>(U);
6410 if (!Extract || Extract->getNumIndices() != 1)
6411 return false;
6412
6413 unsigned Index = Extract->getIndices()[0];
6414 if (Index == 0)
6415 MulExtract = Extract;
6416 else if (Index == 1)
6417 OverflowExtract = Extract;
6418 else
6419 return false;
6420 }
6421 return true;
6422}
6423
6424// Rewrite the mul_with_overflow intrinsic by checking if both of the
6425// operands' value ranges are within the legal type. If so, we can optimize the
6426// multiplication algorithm. This code is supposed to be written during the step
6427// of type legalization, but given that we need to reconstruct the IR which is
6428// not doable there, we do it here.
6429// The IR after the optimization will look like:
6430// entry:
6431// if signed:
6432// ( (lhs_lo>>BW-1) ^ lhs_hi) || ( (rhs_lo>>BW-1) ^ rhs_hi) ? overflow,
6433// overflow_no
6434// else:
6435// (lhs_hi != 0) || (rhs_hi != 0) ? overflow, overflow_no
6436// overflow_no:
6437// overflow:
6438// overflow.res:
6439// \returns true if optimization was applied
6440// TODO: This optimization can be further improved to optimize branching on
6441// overflow where the 'overflow_no' BB can branch directly to the false
6442// successor of overflow, but that would add additional complexity so we leave
6443// it for future work.
6444bool CodeGenPrepare::optimizeMulWithOverflow(Instruction *I, bool IsSigned,
6445 ModifyDT &ModifiedDT) {
6446 // Check if target supports this optimization.
6448 I->getContext(),
6449 TLI->getValueType(*DL, I->getType()->getContainedType(0))))
6450 return false;
6451
6452 ExtractValueInst *MulExtract = nullptr, *OverflowExtract = nullptr;
6453 if (!matchOverflowPattern(I, MulExtract, OverflowExtract))
6454 return false;
6455
6456 // Keep track of the instruction to stop reoptimizing it again.
6457 InsertedInsts.insert(I);
6458
6459 Value *LHS = I->getOperand(0);
6460 Value *RHS = I->getOperand(1);
6461 Type *Ty = LHS->getType();
6462 unsigned VTHalfBitWidth = Ty->getScalarSizeInBits() / 2;
6463 Type *LegalTy = Ty->getWithNewBitWidth(VTHalfBitWidth);
6464
6465 // New BBs:
6466 BasicBlock *OverflowEntryBB =
6467 I->getParent()->splitBasicBlock(I, "", /*Before*/ true);
6468 OverflowEntryBB->takeName(I->getParent());
6469 // Keep the 'br' instruction that is generated as a result of the split to be
6470 // erased/replaced later.
6471 Instruction *OldTerminator = OverflowEntryBB->getTerminator();
6472 BasicBlock *NoOverflowBB =
6473 BasicBlock::Create(I->getContext(), "overflow.no", I->getFunction());
6474 NoOverflowBB->moveAfter(OverflowEntryBB);
6475 BasicBlock *OverflowBB =
6476 BasicBlock::Create(I->getContext(), "overflow", I->getFunction());
6477 OverflowBB->moveAfter(NoOverflowBB);
6478
6479 // BB overflow.entry:
6480 IRBuilder<> Builder(OverflowEntryBB);
6481 // Extract low and high halves of LHS:
6482 Value *LoLHS = Builder.CreateTrunc(LHS, LegalTy, "lo.lhs");
6483 Value *HiLHS = Builder.CreateLShr(LHS, VTHalfBitWidth, "lhs.lsr");
6484 HiLHS = Builder.CreateTrunc(HiLHS, LegalTy, "hi.lhs");
6485
6486 // Extract low and high halves of RHS:
6487 Value *LoRHS = Builder.CreateTrunc(RHS, LegalTy, "lo.rhs");
6488 Value *HiRHS = Builder.CreateLShr(RHS, VTHalfBitWidth, "rhs.lsr");
6489 HiRHS = Builder.CreateTrunc(HiRHS, LegalTy, "hi.rhs");
6490
6491 Value *IsAnyBitTrue;
6492 if (IsSigned) {
6493 Value *SignLoLHS =
6494 Builder.CreateAShr(LoLHS, VTHalfBitWidth - 1, "sign.lo.lhs");
6495 Value *SignLoRHS =
6496 Builder.CreateAShr(LoRHS, VTHalfBitWidth - 1, "sign.lo.rhs");
6497 Value *XorLHS = Builder.CreateXor(HiLHS, SignLoLHS);
6498 Value *XorRHS = Builder.CreateXor(HiRHS, SignLoRHS);
6499 Value *Or = Builder.CreateOr(XorLHS, XorRHS, "or.lhs.rhs");
6500 IsAnyBitTrue = Builder.CreateCmp(ICmpInst::ICMP_NE, Or,
6501 ConstantInt::getNullValue(Or->getType()));
6502 } else {
6503 Value *CmpLHS = Builder.CreateCmp(ICmpInst::ICMP_NE, HiLHS,
6504 ConstantInt::getNullValue(LegalTy));
6505 Value *CmpRHS = Builder.CreateCmp(ICmpInst::ICMP_NE, HiRHS,
6506 ConstantInt::getNullValue(LegalTy));
6507 IsAnyBitTrue = Builder.CreateOr(CmpLHS, CmpRHS, "or.lhs.rhs");
6508 }
6509 Builder.CreateCondBr(IsAnyBitTrue, OverflowBB, NoOverflowBB);
6510
6511 // BB overflow.no:
6512 Builder.SetInsertPoint(NoOverflowBB);
6513 Value *ExtLoLHS, *ExtLoRHS;
6514 if (IsSigned) {
6515 ExtLoLHS = Builder.CreateSExt(LoLHS, Ty, "lo.lhs.ext");
6516 ExtLoRHS = Builder.CreateSExt(LoRHS, Ty, "lo.rhs.ext");
6517 } else {
6518 ExtLoLHS = Builder.CreateZExt(LoLHS, Ty, "lo.lhs.ext");
6519 ExtLoRHS = Builder.CreateZExt(LoRHS, Ty, "lo.rhs.ext");
6520 }
6521
6522 Value *Mul = Builder.CreateMul(ExtLoLHS, ExtLoRHS, "mul.overflow.no");
6523
6524 // Create the 'overflow.res' BB to merge the results of
6525 // the two paths:
6526 BasicBlock *OverflowResBB = I->getParent();
6527 OverflowResBB->setName("overflow.res");
6528
6529 // BB overflow.no: jump to overflow.res BB
6530 Builder.CreateBr(OverflowResBB);
6531 // No we don't need the old terminator in overflow.entry BB, erase it:
6532 OldTerminator->eraseFromParent();
6533
6534 // BB overflow.res:
6535 Builder.SetInsertPoint(OverflowResBB, OverflowResBB->getFirstInsertionPt());
6536 // Create PHI nodes to merge results from no.overflow BB and overflow BB to
6537 // replace the extract instructions.
6538 PHINode *OverflowResPHI = Builder.CreatePHI(Ty, 2),
6539 *OverflowFlagPHI =
6540 Builder.CreatePHI(IntegerType::getInt1Ty(I->getContext()), 2);
6541
6542 // Add the incoming values from no.overflow BB and later from overflow BB.
6543 OverflowResPHI->addIncoming(Mul, NoOverflowBB);
6544 OverflowFlagPHI->addIncoming(ConstantInt::getFalse(I->getContext()),
6545 NoOverflowBB);
6546
6547 // Replace all users of MulExtract and OverflowExtract to use the PHI nodes.
6548 if (MulExtract) {
6549 MulExtract->replaceAllUsesWith(OverflowResPHI);
6550 MulExtract->eraseFromParent();
6551 }
6552 if (OverflowExtract) {
6553 OverflowExtract->replaceAllUsesWith(OverflowFlagPHI);
6554 OverflowExtract->eraseFromParent();
6555 }
6556
6557 // Remove the intrinsic from parent (overflow.res BB) as it will be part of
6558 // overflow BB
6559 I->removeFromParent();
6560 // BB overflow:
6561 I->insertInto(OverflowBB, OverflowBB->end());
6562 Builder.SetInsertPoint(OverflowBB, OverflowBB->end());
6563 Value *MulOverflow = Builder.CreateExtractValue(I, {0}, "mul.overflow");
6564 Value *OverflowFlag = Builder.CreateExtractValue(I, {1}, "overflow.flag");
6565 Builder.CreateBr(OverflowResBB);
6566
6567 // Add The Extracted values to the PHINodes in the overflow.res BB.
6568 OverflowResPHI->addIncoming(MulOverflow, OverflowBB);
6569 OverflowFlagPHI->addIncoming(OverflowFlag, OverflowBB);
6570
6571 ModifiedDT = ModifyDT::ModifyBBDT;
6572 return true;
6573}
6574
6575/// If there are any memory operands, use OptimizeMemoryInst to sink their
6576/// address computing into the block when possible / profitable.
6577bool CodeGenPrepare::optimizeInlineAsmInst(CallInst *CS) {
6578 bool MadeChange = false;
6579
6580 const TargetRegisterInfo *TRI =
6582 TargetLowering::AsmOperandInfoVector TargetConstraints =
6583 TLI->ParseConstraints(*DL, TRI, *CS);
6584 unsigned ArgNo = 0;
6585 for (TargetLowering::AsmOperandInfo &OpInfo : TargetConstraints) {
6586 // Compute the constraint code and ConstraintType to use.
6587 TLI->ComputeConstraintToUse(OpInfo, SDValue());
6588
6589 // TODO: Also handle C_Address?
6590 if (OpInfo.ConstraintType == TargetLowering::C_Memory &&
6591 OpInfo.isIndirect) {
6592 Value *OpVal = CS->getArgOperand(ArgNo++);
6593 MadeChange |= optimizeMemoryInst(CS, OpVal, OpVal->getType(), ~0u);
6594 } else if (OpInfo.Type == InlineAsm::isInput)
6595 ArgNo++;
6596 }
6597
6598 return MadeChange;
6599}
6600
6601/// Check if all the uses of \p Val are equivalent (or free) zero or
6602/// sign extensions.
6603static bool hasSameExtUse(Value *Val, const TargetLowering &TLI) {
6604 assert(!Val->use_empty() && "Input must have at least one use");
6605 const Instruction *FirstUser = cast<Instruction>(*Val->user_begin());
6606 bool IsSExt = isa<SExtInst>(FirstUser);
6607 Type *ExtTy = FirstUser->getType();
6608 for (const User *U : Val->users()) {
6609 const Instruction *UI = cast<Instruction>(U);
6610 if ((IsSExt && !isa<SExtInst>(UI)) || (!IsSExt && !isa<ZExtInst>(UI)))
6611 return false;
6612 Type *CurTy = UI->getType();
6613 // Same input and output types: Same instruction after CSE.
6614 if (CurTy == ExtTy)
6615 continue;
6616
6617 // If IsSExt is true, we are in this situation:
6618 // a = Val
6619 // b = sext ty1 a to ty2
6620 // c = sext ty1 a to ty3
6621 // Assuming ty2 is shorter than ty3, this could be turned into:
6622 // a = Val
6623 // b = sext ty1 a to ty2
6624 // c = sext ty2 b to ty3
6625 // However, the last sext is not free.
6626 if (IsSExt)
6627 return false;
6628
6629 // This is a ZExt, maybe this is free to extend from one type to another.
6630 // In that case, we would not account for a different use.
6631 Type *NarrowTy;
6632 Type *LargeTy;
6633 if (ExtTy->getScalarType()->getIntegerBitWidth() >
6634 CurTy->getScalarType()->getIntegerBitWidth()) {
6635 NarrowTy = CurTy;
6636 LargeTy = ExtTy;
6637 } else {
6638 NarrowTy = ExtTy;
6639 LargeTy = CurTy;
6640 }
6641
6642 if (!TLI.isZExtFree(NarrowTy, LargeTy))
6643 return false;
6644 }
6645 // All uses are the same or can be derived from one another for free.
6646 return true;
6647}
6648
6649/// Try to speculatively promote extensions in \p Exts and continue
6650/// promoting through newly promoted operands recursively as far as doing so is
6651/// profitable. Save extensions profitably moved up, in \p ProfitablyMovedExts.
6652/// When some promotion happened, \p TPT contains the proper state to revert
6653/// them.
6654///
6655/// \return true if some promotion happened, false otherwise.
6656bool CodeGenPrepare::tryToPromoteExts(
6657 TypePromotionTransaction &TPT, const SmallVectorImpl<Instruction *> &Exts,
6658 SmallVectorImpl<Instruction *> &ProfitablyMovedExts,
6659 unsigned CreatedInstsCost) {
6660 bool Promoted = false;
6661
6662 // Iterate over all the extensions to try to promote them.
6663 for (auto *I : Exts) {
6664 // Early check if we directly have ext(load).
6665 if (isa<LoadInst>(I->getOperand(0))) {
6666 ProfitablyMovedExts.push_back(I);
6667 continue;
6668 }
6669
6670 // Check whether or not we want to do any promotion. The reason we have
6671 // this check inside the for loop is to catch the case where an extension
6672 // is directly fed by a load because in such case the extension can be moved
6673 // up without any promotion on its operands.
6675 return false;
6676
6677 // Get the action to perform the promotion.
6678 TypePromotionHelper::Action TPH =
6679 TypePromotionHelper::getAction(I, InsertedInsts, *TLI, PromotedInsts);
6680 // Check if we can promote.
6681 if (!TPH) {
6682 // Save the current extension as we cannot move up through its operand.
6683 ProfitablyMovedExts.push_back(I);
6684 continue;
6685 }
6686
6687 // Save the current state.
6688 TypePromotionTransaction::ConstRestorationPt LastKnownGood =
6689 TPT.getRestorationPoint();
6690 SmallVector<Instruction *, 4> NewExts;
6691 unsigned NewCreatedInstsCost = 0;
6692 unsigned ExtCost = !TLI->isExtFree(I);
6693 // Promote.
6694 Value *PromotedVal = TPH(I, TPT, PromotedInsts, NewCreatedInstsCost,
6695 &NewExts, nullptr, *TLI);
6696 assert(PromotedVal &&
6697 "TypePromotionHelper should have filtered out those cases");
6698
6699 // We would be able to merge only one extension in a load.
6700 // Therefore, if we have more than 1 new extension we heuristically
6701 // cut this search path, because it means we degrade the code quality.
6702 // With exactly 2, the transformation is neutral, because we will merge
6703 // one extension but leave one. However, we optimistically keep going,
6704 // because the new extension may be removed too. Also avoid replacing a
6705 // single free extension with multiple extensions, as this increases the
6706 // number of IR instructions while not providing any savings.
6707 long long TotalCreatedInstsCost = CreatedInstsCost + NewCreatedInstsCost;
6708 // FIXME: It would be possible to propagate a negative value instead of
6709 // conservatively ceiling it to 0.
6710 TotalCreatedInstsCost =
6711 std::max((long long)0, (TotalCreatedInstsCost - ExtCost));
6712 if (!StressExtLdPromotion &&
6713 (TotalCreatedInstsCost > 1 ||
6714 !isPromotedInstructionLegal(*TLI, *DL, PromotedVal) ||
6715 (ExtCost == 0 && NewExts.size() > 1))) {
6716 // This promotion is not profitable, rollback to the previous state, and
6717 // save the current extension in ProfitablyMovedExts as the latest
6718 // speculative promotion turned out to be unprofitable.
6719 TPT.rollback(LastKnownGood);
6720 ProfitablyMovedExts.push_back(I);
6721 continue;
6722 }
6723 // Continue promoting NewExts as far as doing so is profitable.
6724 SmallVector<Instruction *, 2> NewlyMovedExts;
6725 (void)tryToPromoteExts(TPT, NewExts, NewlyMovedExts, TotalCreatedInstsCost);
6726 bool NewPromoted = false;
6727 for (auto *ExtInst : NewlyMovedExts) {
6728 Instruction *MovedExt = cast<Instruction>(ExtInst);
6729 Value *ExtOperand = MovedExt->getOperand(0);
6730 // If we have reached to a load, we need this extra profitability check
6731 // as it could potentially be merged into an ext(load).
6732 if (isa<LoadInst>(ExtOperand) &&
6733 !(StressExtLdPromotion || NewCreatedInstsCost <= ExtCost ||
6734 (ExtOperand->hasOneUse() || hasSameExtUse(ExtOperand, *TLI))))
6735 continue;
6736
6737 ProfitablyMovedExts.push_back(MovedExt);
6738 NewPromoted = true;
6739 }
6740
6741 // If none of speculative promotions for NewExts is profitable, rollback
6742 // and save the current extension (I) as the last profitable extension.
6743 if (!NewPromoted) {
6744 TPT.rollback(LastKnownGood);
6745 ProfitablyMovedExts.push_back(I);
6746 continue;
6747 }
6748 // The promotion is profitable.
6749 Promoted = true;
6750 }
6751 return Promoted;
6752}
6753
6754/// Merging redundant sexts when one is dominating the other.
6755bool CodeGenPrepare::mergeSExts(Function &F) {
6756 bool Changed = false;
6757 for (auto &Entry : ValToSExtendedUses) {
6758 SExts &Insts = Entry.second;
6759 SExts CurPts;
6760 for (Instruction *Inst : Insts) {
6761 if (RemovedInsts.count(Inst) || !isa<SExtInst>(Inst) ||
6762 Inst->getOperand(0) != Entry.first)
6763 continue;
6764 bool inserted = false;
6765 for (auto &Pt : CurPts) {
6766 if (getDT(F).dominates(Inst, Pt)) {
6767 replaceAllUsesWith(Pt, Inst, FreshBBs, IsHugeFunc);
6768 RemovedInsts.insert(Pt);
6769 Pt->removeFromParent();
6770 Pt = Inst;
6771 inserted = true;
6772 Changed = true;
6773 break;
6774 }
6775 if (!getDT(F).dominates(Pt, Inst))
6776 // Give up if we need to merge in a common dominator as the
6777 // experiments show it is not profitable.
6778 continue;
6779 replaceAllUsesWith(Inst, Pt, FreshBBs, IsHugeFunc);
6780 RemovedInsts.insert(Inst);
6781 Inst->removeFromParent();
6782 inserted = true;
6783 Changed = true;
6784 break;
6785 }
6786 if (!inserted)
6787 CurPts.push_back(Inst);
6788 }
6789 }
6790 return Changed;
6791}
6792
6793// Splitting large data structures so that the GEPs accessing them can have
6794// smaller offsets so that they can be sunk to the same blocks as their users.
6795// For example, a large struct starting from %base is split into two parts
6796// where the second part starts from %new_base.
6797//
6798// Before:
6799// BB0:
6800// %base =
6801//
6802// BB1:
6803// %gep0 = gep %base, off0
6804// %gep1 = gep %base, off1
6805// %gep2 = gep %base, off2
6806//
6807// BB2:
6808// %load1 = load %gep0
6809// %load2 = load %gep1
6810// %load3 = load %gep2
6811//
6812// After:
6813// BB0:
6814// %base =
6815// %new_base = gep %base, off0
6816//
6817// BB1:
6818// %new_gep0 = %new_base
6819// %new_gep1 = gep %new_base, off1 - off0
6820// %new_gep2 = gep %new_base, off2 - off0
6821//
6822// BB2:
6823// %load1 = load i32, i32* %new_gep0
6824// %load2 = load i32, i32* %new_gep1
6825// %load3 = load i32, i32* %new_gep2
6826//
6827// %new_gep1 and %new_gep2 can be sunk to BB2 now after the splitting because
6828// their offsets are smaller enough to fit into the addressing mode.
6829bool CodeGenPrepare::splitLargeGEPOffsets() {
6830 bool Changed = false;
6831 for (auto &Entry : LargeOffsetGEPMap) {
6832 Value *OldBase = Entry.first;
6833 SmallVectorImpl<std::pair<AssertingVH<GetElementPtrInst>, int64_t>>
6834 &LargeOffsetGEPs = Entry.second;
6835 auto compareGEPOffset =
6836 [&](const std::pair<GetElementPtrInst *, int64_t> &LHS,
6837 const std::pair<GetElementPtrInst *, int64_t> &RHS) {
6838 if (LHS.first == RHS.first)
6839 return false;
6840 if (LHS.second != RHS.second)
6841 return LHS.second < RHS.second;
6842 return LargeOffsetGEPID[LHS.first] < LargeOffsetGEPID[RHS.first];
6843 };
6844 // Sorting all the GEPs of the same data structures based on the offsets.
6845 llvm::sort(LargeOffsetGEPs, compareGEPOffset);
6846 LargeOffsetGEPs.erase(llvm::unique(LargeOffsetGEPs), LargeOffsetGEPs.end());
6847 // Skip if all the GEPs have the same offsets.
6848 if (LargeOffsetGEPs.front().second == LargeOffsetGEPs.back().second)
6849 continue;
6850 GetElementPtrInst *BaseGEP = LargeOffsetGEPs.begin()->first;
6851 int64_t BaseOffset = LargeOffsetGEPs.begin()->second;
6852 Value *NewBaseGEP = nullptr;
6853
6854 auto createNewBase = [&](int64_t BaseOffset, Value *OldBase,
6855 GetElementPtrInst *GEP) {
6856 LLVMContext &Ctx = GEP->getContext();
6857 Type *PtrIdxTy = DL->getIndexType(GEP->getType());
6858 Type *I8PtrTy =
6859 PointerType::get(Ctx, GEP->getType()->getPointerAddressSpace());
6860
6861 BasicBlock::iterator NewBaseInsertPt;
6862 BasicBlock *NewBaseInsertBB;
6863 if (auto *BaseI = dyn_cast<Instruction>(OldBase)) {
6864 // If the base of the struct is an instruction, the new base will be
6865 // inserted close to it.
6866 NewBaseInsertBB = BaseI->getParent();
6867 if (isa<PHINode>(BaseI))
6868 NewBaseInsertPt = NewBaseInsertBB->getFirstInsertionPt();
6869 else if (InvokeInst *Invoke = dyn_cast<InvokeInst>(BaseI)) {
6870 NewBaseInsertBB =
6871 SplitEdge(NewBaseInsertBB, Invoke->getNormalDest(), DT.get(), LI);
6872 NewBaseInsertPt = NewBaseInsertBB->getFirstInsertionPt();
6873 } else
6874 NewBaseInsertPt = std::next(BaseI->getIterator());
6875 } else {
6876 // If the current base is an argument or global value, the new base
6877 // will be inserted to the entry block.
6878 NewBaseInsertBB = &BaseGEP->getFunction()->getEntryBlock();
6879 NewBaseInsertPt = NewBaseInsertBB->getFirstInsertionPt();
6880 }
6881 IRBuilder<> NewBaseBuilder(NewBaseInsertBB, NewBaseInsertPt);
6882 // Create a new base.
6883 Value *BaseIndex = ConstantInt::get(PtrIdxTy, BaseOffset);
6884 NewBaseGEP = OldBase;
6885 if (NewBaseGEP->getType() != I8PtrTy)
6886 NewBaseGEP = NewBaseBuilder.CreatePointerCast(NewBaseGEP, I8PtrTy);
6887 NewBaseGEP =
6888 NewBaseBuilder.CreatePtrAdd(NewBaseGEP, BaseIndex, "splitgep");
6889 NewGEPBases.insert(NewBaseGEP);
6890 return;
6891 };
6892
6893 // Check whether all the offsets can be encoded with prefered common base.
6894 if (int64_t PreferBase = TLI->getPreferredLargeGEPBaseOffset(
6895 LargeOffsetGEPs.front().second, LargeOffsetGEPs.back().second)) {
6896 BaseOffset = PreferBase;
6897 // Create a new base if the offset of the BaseGEP can be decoded with one
6898 // instruction.
6899 createNewBase(BaseOffset, OldBase, BaseGEP);
6900 }
6901
6902 auto *LargeOffsetGEP = LargeOffsetGEPs.begin();
6903 while (LargeOffsetGEP != LargeOffsetGEPs.end()) {
6904 GetElementPtrInst *GEP = LargeOffsetGEP->first;
6905 int64_t Offset = LargeOffsetGEP->second;
6906 if (Offset != BaseOffset) {
6907 TargetLowering::AddrMode AddrMode;
6908 AddrMode.HasBaseReg = true;
6909 AddrMode.BaseOffs = Offset - BaseOffset;
6910 // The result type of the GEP might not be the type of the memory
6911 // access.
6912 if (!TLI->isLegalAddressingMode(*DL, AddrMode,
6913 GEP->getResultElementType(),
6914 GEP->getAddressSpace())) {
6915 // We need to create a new base if the offset to the current base is
6916 // too large to fit into the addressing mode. So, a very large struct
6917 // may be split into several parts.
6918 BaseGEP = GEP;
6919 BaseOffset = Offset;
6920 NewBaseGEP = nullptr;
6921 }
6922 }
6923
6924 // Generate a new GEP to replace the current one.
6925 Type *PtrIdxTy = DL->getIndexType(GEP->getType());
6926
6927 if (!NewBaseGEP) {
6928 // Create a new base if we don't have one yet. Find the insertion
6929 // pointer for the new base first.
6930 createNewBase(BaseOffset, OldBase, GEP);
6931 }
6932
6933 IRBuilder<> Builder(GEP);
6934 Value *NewGEP = NewBaseGEP;
6935 if (Offset != BaseOffset) {
6936 // Calculate the new offset for the new GEP.
6937 Value *Index = ConstantInt::get(PtrIdxTy, Offset - BaseOffset);
6938 NewGEP = Builder.CreatePtrAdd(NewBaseGEP, Index);
6939 }
6940 replaceAllUsesWith(GEP, NewGEP, FreshBBs, IsHugeFunc);
6941 LargeOffsetGEPID.erase(GEP);
6942 LargeOffsetGEP = LargeOffsetGEPs.erase(LargeOffsetGEP);
6943 GEP->eraseFromParent();
6944 Changed = true;
6945 }
6946 }
6947 return Changed;
6948}
6949
6950bool CodeGenPrepare::optimizePhiType(
6951 PHINode *I, SmallPtrSetImpl<PHINode *> &Visited,
6952 SmallPtrSetImpl<Instruction *> &DeletedInstrs) {
6953 // We are looking for a collection on interconnected phi nodes that together
6954 // only use loads/bitcasts and are used by stores/bitcasts, and the bitcasts
6955 // are of the same type. Convert the whole set of nodes to the type of the
6956 // bitcast.
6957 Type *PhiTy = I->getType();
6958 Type *ConvertTy = nullptr;
6959 if (Visited.count(I) ||
6960 (!I->getType()->isIntegerTy() && !I->getType()->isFloatingPointTy()))
6961 return false;
6962
6963 SmallVector<Instruction *, 4> Worklist;
6964 Worklist.push_back(cast<Instruction>(I));
6965 SmallPtrSet<PHINode *, 4> PhiNodes;
6966 SmallPtrSet<ConstantData *, 4> Constants;
6967 PhiNodes.insert(I);
6968 Visited.insert(I);
6969 SmallPtrSet<Instruction *, 4> Defs;
6970 SmallPtrSet<Instruction *, 4> Uses;
6971 // This works by adding extra bitcasts between load/stores and removing
6972 // existing bicasts. If we have a phi(bitcast(load)) or a store(bitcast(phi))
6973 // we can get in the situation where we remove a bitcast in one iteration
6974 // just to add it again in the next. We need to ensure that at least one
6975 // bitcast we remove are anchored to something that will not change back.
6976 bool AnyAnchored = false;
6977
6978 while (!Worklist.empty()) {
6979 Instruction *II = Worklist.pop_back_val();
6980
6981 if (auto *Phi = dyn_cast<PHINode>(II)) {
6982 // Handle Defs, which might also be PHI's
6983 for (Value *V : Phi->incoming_values()) {
6984 if (auto *OpPhi = dyn_cast<PHINode>(V)) {
6985 if (!PhiNodes.count(OpPhi)) {
6986 if (!Visited.insert(OpPhi).second)
6987 return false;
6988 PhiNodes.insert(OpPhi);
6989 Worklist.push_back(OpPhi);
6990 }
6991 } else if (auto *OpLoad = dyn_cast<LoadInst>(V)) {
6992 if (!OpLoad->isSimple())
6993 return false;
6994 if (Defs.insert(OpLoad).second)
6995 Worklist.push_back(OpLoad);
6996 } else if (auto *OpEx = dyn_cast<ExtractElementInst>(V)) {
6997 if (Defs.insert(OpEx).second)
6998 Worklist.push_back(OpEx);
6999 } else if (auto *OpBC = dyn_cast<BitCastInst>(V)) {
7000 if (!ConvertTy)
7001 ConvertTy = OpBC->getOperand(0)->getType();
7002 if (OpBC->getOperand(0)->getType() != ConvertTy)
7003 return false;
7004 if (Defs.insert(OpBC).second) {
7005 Worklist.push_back(OpBC);
7006 AnyAnchored |= !isa<LoadInst>(OpBC->getOperand(0)) &&
7007 !isa<ExtractElementInst>(OpBC->getOperand(0));
7008 }
7009 } else if (auto *OpC = dyn_cast<ConstantData>(V))
7010 Constants.insert(OpC);
7011 else
7012 return false;
7013 }
7014 }
7015
7016 // Handle uses which might also be phi's
7017 for (User *V : II->users()) {
7018 if (auto *OpPhi = dyn_cast<PHINode>(V)) {
7019 if (!PhiNodes.count(OpPhi)) {
7020 if (Visited.count(OpPhi))
7021 return false;
7022 PhiNodes.insert(OpPhi);
7023 Visited.insert(OpPhi);
7024 Worklist.push_back(OpPhi);
7025 }
7026 } else if (auto *OpStore = dyn_cast<StoreInst>(V)) {
7027 if (!OpStore->isSimple() || OpStore->getOperand(0) != II)
7028 return false;
7029 Uses.insert(OpStore);
7030 } else if (auto *OpBC = dyn_cast<BitCastInst>(V)) {
7031 if (!ConvertTy)
7032 ConvertTy = OpBC->getType();
7033 if (OpBC->getType() != ConvertTy)
7034 return false;
7035 Uses.insert(OpBC);
7036 AnyAnchored |=
7037 any_of(OpBC->users(), [](User *U) { return !isa<StoreInst>(U); });
7038 } else {
7039 return false;
7040 }
7041 }
7042 }
7043
7044 if (!ConvertTy || !AnyAnchored ||
7045 !TLI->shouldConvertPhiType(PhiTy, ConvertTy))
7046 return false;
7047
7048 LLVM_DEBUG(dbgs() << "Converting " << *I << "\n and connected nodes to "
7049 << *ConvertTy << "\n");
7050
7051 // Create all the new phi nodes of the new type, and bitcast any loads to the
7052 // correct type.
7053 ValueToValueMap ValMap;
7054 for (ConstantData *C : Constants)
7055 ValMap[C] = ConstantExpr::getBitCast(C, ConvertTy);
7056 for (Instruction *D : Defs) {
7057 if (isa<BitCastInst>(D)) {
7058 ValMap[D] = D->getOperand(0);
7059 DeletedInstrs.insert(D);
7060 } else {
7061 BasicBlock::iterator insertPt = std::next(D->getIterator());
7062 ValMap[D] = new BitCastInst(D, ConvertTy, D->getName() + ".bc", insertPt);
7063 }
7064 }
7065 for (PHINode *Phi : PhiNodes)
7066 ValMap[Phi] = PHINode::Create(ConvertTy, Phi->getNumIncomingValues(),
7067 Phi->getName() + ".tc", Phi->getIterator());
7068 // Pipe together all the PhiNodes.
7069 for (PHINode *Phi : PhiNodes) {
7070 PHINode *NewPhi = cast<PHINode>(ValMap[Phi]);
7071 for (int i = 0, e = Phi->getNumIncomingValues(); i < e; i++)
7072 NewPhi->addIncoming(ValMap[Phi->getIncomingValue(i)],
7073 Phi->getIncomingBlock(i));
7074 Visited.insert(NewPhi);
7075 }
7076 // And finally pipe up the stores and bitcasts
7077 for (Instruction *U : Uses) {
7078 if (isa<BitCastInst>(U)) {
7079 DeletedInstrs.insert(U);
7080 replaceAllUsesWith(U, ValMap[U->getOperand(0)], FreshBBs, IsHugeFunc);
7081 } else {
7082 U->setOperand(0, new BitCastInst(ValMap[U->getOperand(0)], PhiTy, "bc",
7083 U->getIterator()));
7084 }
7085 }
7086
7087 // Save the removed phis to be deleted later.
7088 DeletedInstrs.insert_range(PhiNodes);
7089 return true;
7090}
7091
7092bool CodeGenPrepare::optimizePhiTypes(Function &F) {
7093 if (!OptimizePhiTypes)
7094 return false;
7095
7096 bool Changed = false;
7097 SmallPtrSet<PHINode *, 4> Visited;
7098 SmallPtrSet<Instruction *, 4> DeletedInstrs;
7099
7100 // Attempt to optimize all the phis in the functions to the correct type.
7101 for (auto &BB : F)
7102 for (auto &Phi : BB.phis())
7103 Changed |= optimizePhiType(&Phi, Visited, DeletedInstrs);
7104
7105 // Remove any old phi's that have been converted.
7106 for (auto *I : DeletedInstrs) {
7107 replaceAllUsesWith(I, PoisonValue::get(I->getType()), FreshBBs, IsHugeFunc);
7108 I->eraseFromParent();
7109 }
7110
7111 return Changed;
7112}
7113
7114/// Return true, if an ext(load) can be formed from an extension in
7115/// \p MovedExts.
7116bool CodeGenPrepare::canFormExtLd(
7117 const SmallVectorImpl<Instruction *> &MovedExts, LoadInst *&LI,
7118 Instruction *&Inst, bool HasPromoted) {
7119 for (auto *MovedExtInst : MovedExts) {
7120 if (isa<LoadInst>(MovedExtInst->getOperand(0))) {
7121 LI = cast<LoadInst>(MovedExtInst->getOperand(0));
7122 Inst = MovedExtInst;
7123 break;
7124 }
7125 }
7126 if (!LI)
7127 return false;
7128
7129 // If they're already in the same block, there's nothing to do.
7130 // Make the cheap checks first if we did not promote.
7131 // If we promoted, we need to check if it is indeed profitable.
7132 if (!HasPromoted && LI->getParent() == Inst->getParent())
7133 return false;
7134
7135 return TLI->isExtLoad(LI, Inst, *DL);
7136}
7137
7138/// Move a zext or sext fed by a load into the same basic block as the load,
7139/// unless conditions are unfavorable. This allows SelectionDAG to fold the
7140/// extend into the load.
7141///
7142/// E.g.,
7143/// \code
7144/// %ld = load i32* %addr
7145/// %add = add nuw i32 %ld, 4
7146/// %zext = zext i32 %add to i64
7147// \endcode
7148/// =>
7149/// \code
7150/// %ld = load i32* %addr
7151/// %zext = zext i32 %ld to i64
7152/// %add = add nuw i64 %zext, 4
7153/// \encode
7154/// Note that the promotion in %add to i64 is done in tryToPromoteExts(), which
7155/// allow us to match zext(load i32*) to i64.
7156///
7157/// Also, try to promote the computations used to obtain a sign extended
7158/// value used into memory accesses.
7159/// E.g.,
7160/// \code
7161/// a = add nsw i32 b, 3
7162/// d = sext i32 a to i64
7163/// e = getelementptr ..., i64 d
7164/// \endcode
7165/// =>
7166/// \code
7167/// f = sext i32 b to i64
7168/// a = add nsw i64 f, 3
7169/// e = getelementptr ..., i64 a
7170/// \endcode
7171///
7172/// \p Inst[in/out] the extension may be modified during the process if some
7173/// promotions apply.
7174bool CodeGenPrepare::optimizeExt(Instruction *&Inst) {
7175 bool AllowPromotionWithoutCommonHeader = false;
7176 /// See if it is an interesting sext operations for the address type
7177 /// promotion before trying to promote it, e.g., the ones with the right
7178 /// type and used in memory accesses.
7179 bool ATPConsiderable = TTI->shouldConsiderAddressTypePromotion(
7180 *Inst, AllowPromotionWithoutCommonHeader);
7181 TypePromotionTransaction TPT(RemovedInsts);
7182 TypePromotionTransaction::ConstRestorationPt LastKnownGood =
7183 TPT.getRestorationPoint();
7185 SmallVector<Instruction *, 2> SpeculativelyMovedExts;
7186 Exts.push_back(Inst);
7187
7188 bool HasPromoted = tryToPromoteExts(TPT, Exts, SpeculativelyMovedExts);
7189
7190 // Look for a load being extended.
7191 LoadInst *LI = nullptr;
7192 Instruction *ExtFedByLoad;
7193
7194 // Try to promote a chain of computation if it allows to form an extended
7195 // load.
7196 if (canFormExtLd(SpeculativelyMovedExts, LI, ExtFedByLoad, HasPromoted)) {
7197 assert(LI && ExtFedByLoad && "Expect a valid load and extension");
7198 TPT.commit();
7199 // Move the extend into the same block as the load.
7200 ExtFedByLoad->moveAfter(LI);
7201 ++NumExtsMoved;
7202 Inst = ExtFedByLoad;
7203 return true;
7204 }
7205
7206 // Continue promoting SExts if known as considerable depending on targets.
7207 if (ATPConsiderable &&
7208 performAddressTypePromotion(Inst, AllowPromotionWithoutCommonHeader,
7209 HasPromoted, TPT, SpeculativelyMovedExts))
7210 return true;
7211
7212 TPT.rollback(LastKnownGood);
7213 return false;
7214}
7215
7216// Perform address type promotion if doing so is profitable.
7217// If AllowPromotionWithoutCommonHeader == false, we should find other sext
7218// instructions that sign extended the same initial value. However, if
7219// AllowPromotionWithoutCommonHeader == true, we expect promoting the
7220// extension is just profitable.
7221bool CodeGenPrepare::performAddressTypePromotion(
7222 Instruction *&Inst, bool AllowPromotionWithoutCommonHeader,
7223 bool HasPromoted, TypePromotionTransaction &TPT,
7224 SmallVectorImpl<Instruction *> &SpeculativelyMovedExts) {
7225 bool Promoted = false;
7226 SmallPtrSet<Instruction *, 1> UnhandledExts;
7227 bool AllSeenFirst = true;
7228 for (auto *I : SpeculativelyMovedExts) {
7229 Value *HeadOfChain = I->getOperand(0);
7230 DenseMap<Value *, Instruction *>::iterator AlreadySeen =
7231 SeenChainsForSExt.find(HeadOfChain);
7232 // If there is an unhandled SExt which has the same header, try to promote
7233 // it as well.
7234 if (AlreadySeen != SeenChainsForSExt.end()) {
7235 if (AlreadySeen->second != nullptr)
7236 UnhandledExts.insert(AlreadySeen->second);
7237 AllSeenFirst = false;
7238 }
7239 }
7240
7241 if (!AllSeenFirst || (AllowPromotionWithoutCommonHeader &&
7242 SpeculativelyMovedExts.size() == 1)) {
7243 TPT.commit();
7244 if (HasPromoted)
7245 Promoted = true;
7246 for (auto *I : SpeculativelyMovedExts) {
7247 Value *HeadOfChain = I->getOperand(0);
7248 SeenChainsForSExt[HeadOfChain] = nullptr;
7249 ValToSExtendedUses[HeadOfChain].push_back(I);
7250 }
7251 // Update Inst as promotion happen.
7252 Inst = SpeculativelyMovedExts.pop_back_val();
7253 } else {
7254 // This is the first chain visited from the header, keep the current chain
7255 // as unhandled. Defer to promote this until we encounter another SExt
7256 // chain derived from the same header.
7257 for (auto *I : SpeculativelyMovedExts) {
7258 Value *HeadOfChain = I->getOperand(0);
7259 SeenChainsForSExt[HeadOfChain] = Inst;
7260 }
7261 return false;
7262 }
7263
7264 if (!AllSeenFirst && !UnhandledExts.empty())
7265 for (auto *VisitedSExt : UnhandledExts) {
7266 if (RemovedInsts.count(VisitedSExt))
7267 continue;
7268 TypePromotionTransaction TPT(RemovedInsts);
7270 SmallVector<Instruction *, 2> Chains;
7271 Exts.push_back(VisitedSExt);
7272 bool HasPromoted = tryToPromoteExts(TPT, Exts, Chains);
7273 TPT.commit();
7274 if (HasPromoted)
7275 Promoted = true;
7276 for (auto *I : Chains) {
7277 Value *HeadOfChain = I->getOperand(0);
7278 // Mark this as handled.
7279 SeenChainsForSExt[HeadOfChain] = nullptr;
7280 ValToSExtendedUses[HeadOfChain].push_back(I);
7281 }
7282 }
7283 return Promoted;
7284}
7285
7286bool CodeGenPrepare::optimizeExtUses(Instruction *I) {
7287 BasicBlock *DefBB = I->getParent();
7288
7289 // If the result of a {s|z}ext and its source are both live out, rewrite all
7290 // other uses of the source with result of extension.
7291 Value *Src = I->getOperand(0);
7292 if (Src->hasOneUse())
7293 return false;
7294
7295 // Only do this xform if truncating is free.
7296 if (!TLI->isTruncateFree(I->getType(), Src->getType()))
7297 return false;
7298
7299 // Only safe to perform the optimization if the source is also defined in
7300 // this block.
7301 if (!isa<Instruction>(Src) || DefBB != cast<Instruction>(Src)->getParent())
7302 return false;
7303
7304 bool DefIsLiveOut = false;
7305 for (User *U : I->users()) {
7307
7308 // Figure out which BB this ext is used in.
7309 BasicBlock *UserBB = UI->getParent();
7310 if (UserBB == DefBB)
7311 continue;
7312 DefIsLiveOut = true;
7313 break;
7314 }
7315 if (!DefIsLiveOut)
7316 return false;
7317
7318 // Make sure none of the uses are PHI nodes.
7319 for (User *U : Src->users()) {
7321 BasicBlock *UserBB = UI->getParent();
7322 if (UserBB == DefBB)
7323 continue;
7324 // Be conservative. We don't want this xform to end up introducing
7325 // reloads just before load / store instructions.
7326 if (isa<PHINode>(UI) || isa<LoadInst>(UI) || isa<StoreInst>(UI))
7327 return false;
7328 }
7329
7330 // InsertedTruncs - Only insert one trunc in each block once.
7331 DenseMap<BasicBlock *, Instruction *> InsertedTruncs;
7332
7333 bool MadeChange = false;
7334 for (Use &U : Src->uses()) {
7335 Instruction *User = cast<Instruction>(U.getUser());
7336
7337 // Figure out which BB this ext is used in.
7338 BasicBlock *UserBB = User->getParent();
7339 if (UserBB == DefBB)
7340 continue;
7341
7342 // Both src and def are live in this block. Rewrite the use.
7343 Instruction *&InsertedTrunc = InsertedTruncs[UserBB];
7344
7345 if (!InsertedTrunc) {
7346 BasicBlock::iterator InsertPt = UserBB->getFirstInsertionPt();
7347 assert(InsertPt != UserBB->end());
7348 InsertedTrunc = new TruncInst(I, Src->getType(), "");
7349 InsertedTrunc->insertBefore(*UserBB, InsertPt);
7350 InsertedInsts.insert(InsertedTrunc);
7351 }
7352
7353 // Replace a use of the {s|z}ext source with a use of the result.
7354 U = InsertedTrunc;
7355 ++NumExtUses;
7356 MadeChange = true;
7357 }
7358
7359 return MadeChange;
7360}
7361
7362// Find loads whose uses only use some of the loaded value's bits. Add an "and"
7363// just after the load if the target can fold this into one extload instruction,
7364// with the hope of eliminating some of the other later "and" instructions using
7365// the loaded value. "and"s that are made trivially redundant by the insertion
7366// of the new "and" are removed by this function, while others (e.g. those whose
7367// path from the load goes through a phi) are left for isel to potentially
7368// remove.
7369//
7370// For example:
7371//
7372// b0:
7373// x = load i32
7374// ...
7375// b1:
7376// y = and x, 0xff
7377// z = use y
7378//
7379// becomes:
7380//
7381// b0:
7382// x = load i32
7383// x' = and x, 0xff
7384// ...
7385// b1:
7386// z = use x'
7387//
7388// whereas:
7389//
7390// b0:
7391// x1 = load i32
7392// ...
7393// b1:
7394// x2 = load i32
7395// ...
7396// b2:
7397// x = phi x1, x2
7398// y = and x, 0xff
7399//
7400// becomes (after a call to optimizeLoadExt for each load):
7401//
7402// b0:
7403// x1 = load i32
7404// x1' = and x1, 0xff
7405// ...
7406// b1:
7407// x2 = load i32
7408// x2' = and x2, 0xff
7409// ...
7410// b2:
7411// x = phi x1', x2'
7412// y = and x, 0xff
7413bool CodeGenPrepare::optimizeLoadExt(LoadInst *Load) {
7414 if (!Load->isSimple() || !Load->getType()->isIntOrPtrTy())
7415 return false;
7416
7417 // Skip loads we've already transformed.
7418 if (Load->hasOneUse() &&
7419 InsertedInsts.count(cast<Instruction>(*Load->user_begin())))
7420 return false;
7421
7422 // Look at all uses of Load, looking through phis, to determine how many bits
7423 // of the loaded value are needed.
7424 SmallVector<Instruction *, 8> WorkList;
7425 SmallPtrSet<Instruction *, 16> Visited;
7426 SmallVector<Instruction *, 8> AndsToMaybeRemove;
7427 SmallVector<Instruction *, 8> DropFlags;
7428 for (auto *U : Load->users())
7429 WorkList.push_back(cast<Instruction>(U));
7430
7431 EVT LoadResultVT = TLI->getValueType(*DL, Load->getType());
7432 unsigned BitWidth = LoadResultVT.getSizeInBits();
7433 // If the BitWidth is 0, do not try to optimize the type
7434 if (BitWidth == 0)
7435 return false;
7436
7437 APInt DemandBits(BitWidth, 0);
7438 APInt WidestAndBits(BitWidth, 0);
7439
7440 while (!WorkList.empty()) {
7441 Instruction *I = WorkList.pop_back_val();
7442
7443 // Break use-def graph loops.
7444 if (!Visited.insert(I).second)
7445 continue;
7446
7447 // For a PHI node, push all of its users.
7448 if (auto *Phi = dyn_cast<PHINode>(I)) {
7449 for (auto *U : Phi->users())
7450 WorkList.push_back(cast<Instruction>(U));
7451 continue;
7452 }
7453
7454 switch (I->getOpcode()) {
7455 case Instruction::And: {
7456 auto *AndC = dyn_cast<ConstantInt>(I->getOperand(1));
7457 if (!AndC)
7458 return false;
7459 APInt AndBits = AndC->getValue();
7460 DemandBits |= AndBits;
7461 // Keep track of the widest and mask we see.
7462 if (AndBits.ugt(WidestAndBits))
7463 WidestAndBits = AndBits;
7464 if (AndBits == WidestAndBits && I->getOperand(0) == Load)
7465 AndsToMaybeRemove.push_back(I);
7466 break;
7467 }
7468
7469 case Instruction::Shl: {
7470 auto *ShlC = dyn_cast<ConstantInt>(I->getOperand(1));
7471 if (!ShlC)
7472 return false;
7473 uint64_t ShiftAmt = ShlC->getLimitedValue(BitWidth - 1);
7474 DemandBits.setLowBits(BitWidth - ShiftAmt);
7475 DropFlags.push_back(I);
7476 break;
7477 }
7478
7479 case Instruction::Trunc: {
7480 EVT TruncVT = TLI->getValueType(*DL, I->getType());
7481 unsigned TruncBitWidth = TruncVT.getSizeInBits();
7482 DemandBits.setLowBits(TruncBitWidth);
7483 DropFlags.push_back(I);
7484 break;
7485 }
7486
7487 default:
7488 return false;
7489 }
7490 }
7491
7492 uint32_t ActiveBits = DemandBits.getActiveBits();
7493 // Avoid hoisting (and (load x) 1) since it is unlikely to be folded by the
7494 // target even if isLoadExtLegal says an i1 EXTLOAD is valid. For example,
7495 // for the AArch64 target isLoadExtLegal(ZEXTLOAD, i32, i1) returns true, but
7496 // (and (load x) 1) is not matched as a single instruction, rather as a LDR
7497 // followed by an AND.
7498 // TODO: Look into removing this restriction by fixing backends to either
7499 // return false for isLoadExtLegal for i1 or have them select this pattern to
7500 // a single instruction.
7501 //
7502 // Also avoid hoisting if we didn't see any ands with the exact DemandBits
7503 // mask, since these are the only ands that will be removed by isel.
7504 if (ActiveBits <= 1 || !DemandBits.isMask(ActiveBits) ||
7505 WidestAndBits != DemandBits)
7506 return false;
7507
7508 LLVMContext &Ctx = Load->getType()->getContext();
7509 Type *TruncTy = Type::getIntNTy(Ctx, ActiveBits);
7510 EVT TruncVT = TLI->getValueType(*DL, TruncTy);
7511
7512 // Reject cases that won't be matched as extloads.
7513 if (!LoadResultVT.bitsGT(TruncVT) || !TruncVT.isRound() ||
7514 !TLI->isLoadExtLegal(ISD::ZEXTLOAD, LoadResultVT, TruncVT))
7515 return false;
7516
7517 IRBuilder<> Builder(Load->getNextNode());
7518 auto *NewAnd = cast<Instruction>(
7519 Builder.CreateAnd(Load, ConstantInt::get(Ctx, DemandBits)));
7520 // Mark this instruction as "inserted by CGP", so that other
7521 // optimizations don't touch it.
7522 InsertedInsts.insert(NewAnd);
7523
7524 // Replace all uses of load with new and (except for the use of load in the
7525 // new and itself).
7526 replaceAllUsesWith(Load, NewAnd, FreshBBs, IsHugeFunc);
7527 NewAnd->setOperand(0, Load);
7528
7529 // Remove any and instructions that are now redundant.
7530 for (auto *And : AndsToMaybeRemove)
7531 // Check that the and mask is the same as the one we decided to put on the
7532 // new and.
7533 if (cast<ConstantInt>(And->getOperand(1))->getValue() == DemandBits) {
7534 replaceAllUsesWith(And, NewAnd, FreshBBs, IsHugeFunc);
7535 if (&*CurInstIterator == And)
7536 CurInstIterator = std::next(And->getIterator());
7537 And->eraseFromParent();
7538 ++NumAndUses;
7539 }
7540
7541 // NSW flags may not longer hold.
7542 for (auto *Inst : DropFlags)
7543 Inst->setHasNoSignedWrap(false);
7544
7545 ++NumAndsAdded;
7546 return true;
7547}
7548
7549/// Check if V (an operand of a select instruction) is an expensive instruction
7550/// that is only used once.
7552 auto *I = dyn_cast<Instruction>(V);
7553 // If it's safe to speculatively execute, then it should not have side
7554 // effects; therefore, it's safe to sink and possibly *not* execute.
7555 return I && I->hasOneUse() && isSafeToSpeculativelyExecute(I) &&
7556 TTI->isExpensiveToSpeculativelyExecute(I);
7557}
7558
7559/// Returns true if a SelectInst should be turned into an explicit branch.
7561 const TargetLowering *TLI,
7562 SelectInst *SI) {
7563 // If even a predictable select is cheap, then a branch can't be cheaper.
7564 if (!TLI->isPredictableSelectExpensive())
7565 return false;
7566
7567 // FIXME: This should use the same heuristics as IfConversion to determine
7568 // whether a select is better represented as a branch.
7569
7570 // If metadata tells us that the select condition is obviously predictable,
7571 // then we want to replace the select with a branch.
7572 uint64_t TrueWeight, FalseWeight;
7573 if (extractBranchWeights(*SI, TrueWeight, FalseWeight)) {
7574 uint64_t Max = std::max(TrueWeight, FalseWeight);
7575 uint64_t Sum = TrueWeight + FalseWeight;
7576 if (Sum != 0) {
7577 auto Probability = BranchProbability::getBranchProbability(Max, Sum);
7578 if (Probability > TTI->getPredictableBranchThreshold())
7579 return true;
7580 }
7581 }
7582
7583 CmpInst *Cmp = dyn_cast<CmpInst>(SI->getCondition());
7584
7585 // If a branch is predictable, an out-of-order CPU can avoid blocking on its
7586 // comparison condition. If the compare has more than one use, there's
7587 // probably another cmov or setcc around, so it's not worth emitting a branch.
7588 if (!Cmp || !Cmp->hasOneUse())
7589 return false;
7590
7591 // If either operand of the select is expensive and only needed on one side
7592 // of the select, we should form a branch.
7593 if (sinkSelectOperand(TTI, SI->getTrueValue()) ||
7594 sinkSelectOperand(TTI, SI->getFalseValue()))
7595 return true;
7596
7597 return false;
7598}
7599
7600/// If \p isTrue is true, return the true value of \p SI, otherwise return
7601/// false value of \p SI. If the true/false value of \p SI is defined by any
7602/// select instructions in \p Selects, look through the defining select
7603/// instruction until the true/false value is not defined in \p Selects.
7604static Value *
7606 const SmallPtrSet<const Instruction *, 2> &Selects) {
7607 Value *V = nullptr;
7608
7609 for (SelectInst *DefSI = SI; DefSI != nullptr && Selects.count(DefSI);
7610 DefSI = dyn_cast<SelectInst>(V)) {
7611 assert(DefSI->getCondition() == SI->getCondition() &&
7612 "The condition of DefSI does not match with SI");
7613 V = (isTrue ? DefSI->getTrueValue() : DefSI->getFalseValue());
7614 }
7615
7616 assert(V && "Failed to get select true/false value");
7617 return V;
7618}
7619
7620bool CodeGenPrepare::optimizeShiftInst(BinaryOperator *Shift) {
7621 assert(Shift->isShift() && "Expected a shift");
7622
7623 // If this is (1) a vector shift, (2) shifts by scalars are cheaper than
7624 // general vector shifts, and (3) the shift amount is a select-of-splatted
7625 // values, hoist the shifts before the select:
7626 // shift Op0, (select Cond, TVal, FVal) -->
7627 // select Cond, (shift Op0, TVal), (shift Op0, FVal)
7628 //
7629 // This is inverting a generic IR transform when we know that the cost of a
7630 // general vector shift is more than the cost of 2 shift-by-scalars.
7631 // We can't do this effectively in SDAG because we may not be able to
7632 // determine if the select operands are splats from within a basic block.
7633 Type *Ty = Shift->getType();
7634 if (!Ty->isVectorTy() || !TTI->isVectorShiftByScalarCheap(Ty))
7635 return false;
7636 Value *Cond, *TVal, *FVal;
7637 if (!match(Shift->getOperand(1),
7638 m_OneUse(m_Select(m_Value(Cond), m_Value(TVal), m_Value(FVal)))))
7639 return false;
7640 if (!isSplatValue(TVal) || !isSplatValue(FVal))
7641 return false;
7642
7643 IRBuilder<> Builder(Shift);
7644 BinaryOperator::BinaryOps Opcode = Shift->getOpcode();
7645 Value *NewTVal = Builder.CreateBinOp(Opcode, Shift->getOperand(0), TVal);
7646 Value *NewFVal = Builder.CreateBinOp(Opcode, Shift->getOperand(0), FVal);
7647 Value *NewSel = Builder.CreateSelect(Cond, NewTVal, NewFVal);
7648 replaceAllUsesWith(Shift, NewSel, FreshBBs, IsHugeFunc);
7649 Shift->eraseFromParent();
7650 return true;
7651}
7652
7653bool CodeGenPrepare::optimizeFunnelShift(IntrinsicInst *Fsh) {
7654 Intrinsic::ID Opcode = Fsh->getIntrinsicID();
7655 assert((Opcode == Intrinsic::fshl || Opcode == Intrinsic::fshr) &&
7656 "Expected a funnel shift");
7657
7658 // If this is (1) a vector funnel shift, (2) shifts by scalars are cheaper
7659 // than general vector shifts, and (3) the shift amount is select-of-splatted
7660 // values, hoist the funnel shifts before the select:
7661 // fsh Op0, Op1, (select Cond, TVal, FVal) -->
7662 // select Cond, (fsh Op0, Op1, TVal), (fsh Op0, Op1, FVal)
7663 //
7664 // This is inverting a generic IR transform when we know that the cost of a
7665 // general vector shift is more than the cost of 2 shift-by-scalars.
7666 // We can't do this effectively in SDAG because we may not be able to
7667 // determine if the select operands are splats from within a basic block.
7668 Type *Ty = Fsh->getType();
7669 if (!Ty->isVectorTy() || !TTI->isVectorShiftByScalarCheap(Ty))
7670 return false;
7671 Value *Cond, *TVal, *FVal;
7672 if (!match(Fsh->getOperand(2),
7673 m_OneUse(m_Select(m_Value(Cond), m_Value(TVal), m_Value(FVal)))))
7674 return false;
7675 if (!isSplatValue(TVal) || !isSplatValue(FVal))
7676 return false;
7677
7678 IRBuilder<> Builder(Fsh);
7679 Value *X = Fsh->getOperand(0), *Y = Fsh->getOperand(1);
7680 Value *NewTVal = Builder.CreateIntrinsic(Opcode, Ty, {X, Y, TVal});
7681 Value *NewFVal = Builder.CreateIntrinsic(Opcode, Ty, {X, Y, FVal});
7682 Value *NewSel = Builder.CreateSelect(Cond, NewTVal, NewFVal);
7683 replaceAllUsesWith(Fsh, NewSel, FreshBBs, IsHugeFunc);
7684 Fsh->eraseFromParent();
7685 return true;
7686}
7687
7688/// If we have a SelectInst that will likely profit from branch prediction,
7689/// turn it into a branch.
7690bool CodeGenPrepare::optimizeSelectInst(SelectInst *SI) {
7692 return false;
7693
7694 // If the SelectOptimize pass is enabled, selects have already been optimized.
7696 return false;
7697
7698 // Find all consecutive select instructions that share the same condition.
7700 ASI.push_back(SI);
7702 It != SI->getParent()->end(); ++It) {
7703 SelectInst *I = dyn_cast<SelectInst>(&*It);
7704 if (I && SI->getCondition() == I->getCondition()) {
7705 ASI.push_back(I);
7706 } else {
7707 break;
7708 }
7709 }
7710
7711 SelectInst *LastSI = ASI.back();
7712 // Increment the current iterator to skip all the rest of select instructions
7713 // because they will be either "not lowered" or "all lowered" to branch.
7714 CurInstIterator = std::next(LastSI->getIterator());
7715 // Examine debug-info attached to the consecutive select instructions. They
7716 // won't be individually optimised by optimizeInst, so we need to perform
7717 // DbgVariableRecord maintenence here instead.
7718 for (SelectInst *SI : ArrayRef(ASI).drop_front())
7719 fixupDbgVariableRecordsOnInst(*SI);
7720
7721 bool VectorCond = !SI->getCondition()->getType()->isIntegerTy(1);
7722
7723 // Can we convert the 'select' to CF ?
7724 if (VectorCond || SI->getMetadata(LLVMContext::MD_unpredictable))
7725 return false;
7726
7727 TargetLowering::SelectSupportKind SelectKind;
7728 if (SI->getType()->isVectorTy())
7729 SelectKind = TargetLowering::ScalarCondVectorVal;
7730 else
7731 SelectKind = TargetLowering::ScalarValSelect;
7732
7733 if (TLI->isSelectSupported(SelectKind) &&
7735 llvm::shouldOptimizeForSize(SI->getParent(), PSI, BFI.get())))
7736 return false;
7737
7738 // The DominatorTree needs to be rebuilt by any consumers after this
7739 // transformation. We simply reset here rather than setting the ModifiedDT
7740 // flag to avoid restarting the function walk in runOnFunction for each
7741 // select optimized.
7742 DT.reset();
7743
7744 // Transform a sequence like this:
7745 // start:
7746 // %cmp = cmp uge i32 %a, %b
7747 // %sel = select i1 %cmp, i32 %c, i32 %d
7748 //
7749 // Into:
7750 // start:
7751 // %cmp = cmp uge i32 %a, %b
7752 // %cmp.frozen = freeze %cmp
7753 // br i1 %cmp.frozen, label %select.true, label %select.false
7754 // select.true:
7755 // br label %select.end
7756 // select.false:
7757 // br label %select.end
7758 // select.end:
7759 // %sel = phi i32 [ %c, %select.true ], [ %d, %select.false ]
7760 //
7761 // %cmp should be frozen, otherwise it may introduce undefined behavior.
7762 // In addition, we may sink instructions that produce %c or %d from
7763 // the entry block into the destination(s) of the new branch.
7764 // If the true or false blocks do not contain a sunken instruction, that
7765 // block and its branch may be optimized away. In that case, one side of the
7766 // first branch will point directly to select.end, and the corresponding PHI
7767 // predecessor block will be the start block.
7768
7769 // Collect values that go on the true side and the values that go on the false
7770 // side.
7771 SmallVector<Instruction *> TrueInstrs, FalseInstrs;
7772 for (SelectInst *SI : ASI) {
7773 if (Value *V = SI->getTrueValue(); sinkSelectOperand(TTI, V))
7774 TrueInstrs.push_back(cast<Instruction>(V));
7775 if (Value *V = SI->getFalseValue(); sinkSelectOperand(TTI, V))
7776 FalseInstrs.push_back(cast<Instruction>(V));
7777 }
7778
7779 // Split the select block, according to how many (if any) values go on each
7780 // side.
7781 BasicBlock *StartBlock = SI->getParent();
7782 BasicBlock::iterator SplitPt = std::next(BasicBlock::iterator(LastSI));
7783 // We should split before any debug-info.
7784 SplitPt.setHeadBit(true);
7785
7786 IRBuilder<> IB(SI);
7787 auto *CondFr = IB.CreateFreeze(SI->getCondition(), SI->getName() + ".frozen");
7788
7789 BasicBlock *TrueBlock = nullptr;
7790 BasicBlock *FalseBlock = nullptr;
7791 BasicBlock *EndBlock = nullptr;
7792 BranchInst *TrueBranch = nullptr;
7793 BranchInst *FalseBranch = nullptr;
7794 if (TrueInstrs.size() == 0) {
7796 CondFr, SplitPt, false, nullptr, nullptr, LI));
7797 FalseBlock = FalseBranch->getParent();
7798 EndBlock = cast<BasicBlock>(FalseBranch->getOperand(0));
7799 } else if (FalseInstrs.size() == 0) {
7801 CondFr, SplitPt, false, nullptr, nullptr, LI));
7802 TrueBlock = TrueBranch->getParent();
7803 EndBlock = cast<BasicBlock>(TrueBranch->getOperand(0));
7804 } else {
7805 Instruction *ThenTerm = nullptr;
7806 Instruction *ElseTerm = nullptr;
7807 SplitBlockAndInsertIfThenElse(CondFr, SplitPt, &ThenTerm, &ElseTerm,
7808 nullptr, nullptr, LI);
7809 TrueBranch = cast<BranchInst>(ThenTerm);
7810 FalseBranch = cast<BranchInst>(ElseTerm);
7811 TrueBlock = TrueBranch->getParent();
7812 FalseBlock = FalseBranch->getParent();
7813 EndBlock = cast<BasicBlock>(TrueBranch->getOperand(0));
7814 }
7815
7816 EndBlock->setName("select.end");
7817 if (TrueBlock)
7818 TrueBlock->setName("select.true.sink");
7819 if (FalseBlock)
7820 FalseBlock->setName(FalseInstrs.size() == 0 ? "select.false"
7821 : "select.false.sink");
7822
7823 if (IsHugeFunc) {
7824 if (TrueBlock)
7825 FreshBBs.insert(TrueBlock);
7826 if (FalseBlock)
7827 FreshBBs.insert(FalseBlock);
7828 FreshBBs.insert(EndBlock);
7829 }
7830
7831 BFI->setBlockFreq(EndBlock, BFI->getBlockFreq(StartBlock));
7832
7833 static const unsigned MD[] = {
7834 LLVMContext::MD_prof, LLVMContext::MD_unpredictable,
7835 LLVMContext::MD_make_implicit, LLVMContext::MD_dbg};
7836 StartBlock->getTerminator()->copyMetadata(*SI, MD);
7837
7838 // Sink expensive instructions into the conditional blocks to avoid executing
7839 // them speculatively.
7840 for (Instruction *I : TrueInstrs)
7841 I->moveBefore(TrueBranch->getIterator());
7842 for (Instruction *I : FalseInstrs)
7843 I->moveBefore(FalseBranch->getIterator());
7844
7845 // If we did not create a new block for one of the 'true' or 'false' paths
7846 // of the condition, it means that side of the branch goes to the end block
7847 // directly and the path originates from the start block from the point of
7848 // view of the new PHI.
7849 if (TrueBlock == nullptr)
7850 TrueBlock = StartBlock;
7851 else if (FalseBlock == nullptr)
7852 FalseBlock = StartBlock;
7853
7854 SmallPtrSet<const Instruction *, 2> INS(llvm::from_range, ASI);
7855 // Use reverse iterator because later select may use the value of the
7856 // earlier select, and we need to propagate value through earlier select
7857 // to get the PHI operand.
7858 for (SelectInst *SI : llvm::reverse(ASI)) {
7859 // The select itself is replaced with a PHI Node.
7860 PHINode *PN = PHINode::Create(SI->getType(), 2, "");
7861 PN->insertBefore(EndBlock->begin());
7862 PN->takeName(SI);
7863 PN->addIncoming(getTrueOrFalseValue(SI, true, INS), TrueBlock);
7864 PN->addIncoming(getTrueOrFalseValue(SI, false, INS), FalseBlock);
7865 PN->setDebugLoc(SI->getDebugLoc());
7866
7867 replaceAllUsesWith(SI, PN, FreshBBs, IsHugeFunc);
7868 SI->eraseFromParent();
7869 INS.erase(SI);
7870 ++NumSelectsExpanded;
7871 }
7872
7873 // Instruct OptimizeBlock to skip to the next block.
7874 CurInstIterator = StartBlock->end();
7875 return true;
7876}
7877
7878/// Some targets only accept certain types for splat inputs. For example a VDUP
7879/// in MVE takes a GPR (integer) register, and the instruction that incorporate
7880/// a VDUP (such as a VADD qd, qm, rm) also require a gpr register.
7881bool CodeGenPrepare::optimizeShuffleVectorInst(ShuffleVectorInst *SVI) {
7882 // Accept shuf(insertelem(undef/poison, val, 0), undef/poison, <0,0,..>) only
7884 m_Undef(), m_ZeroMask())))
7885 return false;
7886 Type *NewType = TLI->shouldConvertSplatType(SVI);
7887 if (!NewType)
7888 return false;
7889
7890 auto *SVIVecType = cast<FixedVectorType>(SVI->getType());
7891 assert(!NewType->isVectorTy() && "Expected a scalar type!");
7892 assert(NewType->getScalarSizeInBits() == SVIVecType->getScalarSizeInBits() &&
7893 "Expected a type of the same size!");
7894 auto *NewVecType =
7895 FixedVectorType::get(NewType, SVIVecType->getNumElements());
7896
7897 // Create a bitcast (shuffle (insert (bitcast(..))))
7898 IRBuilder<> Builder(SVI->getContext());
7899 Builder.SetInsertPoint(SVI);
7900 Value *BC1 = Builder.CreateBitCast(
7901 cast<Instruction>(SVI->getOperand(0))->getOperand(1), NewType);
7902 Value *Shuffle = Builder.CreateVectorSplat(NewVecType->getNumElements(), BC1);
7903 Value *BC2 = Builder.CreateBitCast(Shuffle, SVIVecType);
7904
7905 replaceAllUsesWith(SVI, BC2, FreshBBs, IsHugeFunc);
7907 SVI, TLInfo, nullptr,
7908 [&](Value *V) { removeAllAssertingVHReferences(V); });
7909
7910 // Also hoist the bitcast up to its operand if it they are not in the same
7911 // block.
7912 if (auto *BCI = dyn_cast<Instruction>(BC1))
7913 if (auto *Op = dyn_cast<Instruction>(BCI->getOperand(0)))
7914 if (BCI->getParent() != Op->getParent() && !isa<PHINode>(Op) &&
7915 !Op->isTerminator() && !Op->isEHPad())
7916 BCI->moveAfter(Op);
7917
7918 return true;
7919}
7920
7921bool CodeGenPrepare::tryToSinkFreeOperands(Instruction *I) {
7922 // If the operands of I can be folded into a target instruction together with
7923 // I, duplicate and sink them.
7924 SmallVector<Use *, 4> OpsToSink;
7925 if (!TTI->isProfitableToSinkOperands(I, OpsToSink))
7926 return false;
7927
7928 // OpsToSink can contain multiple uses in a use chain (e.g.
7929 // (%u1 with %u1 = shufflevector), (%u2 with %u2 = zext %u1)). The dominating
7930 // uses must come first, so we process the ops in reverse order so as to not
7931 // create invalid IR.
7932 BasicBlock *TargetBB = I->getParent();
7933 bool Changed = false;
7934 SmallVector<Use *, 4> ToReplace;
7935 Instruction *InsertPoint = I;
7936 DenseMap<const Instruction *, unsigned long> InstOrdering;
7937 unsigned long InstNumber = 0;
7938 for (const auto &I : *TargetBB)
7939 InstOrdering[&I] = InstNumber++;
7940
7941 for (Use *U : reverse(OpsToSink)) {
7942 auto *UI = cast<Instruction>(U->get());
7943 if (isa<PHINode>(UI))
7944 continue;
7945 if (UI->getParent() == TargetBB) {
7946 if (InstOrdering[UI] < InstOrdering[InsertPoint])
7947 InsertPoint = UI;
7948 continue;
7949 }
7950 ToReplace.push_back(U);
7951 }
7952
7953 SetVector<Instruction *> MaybeDead;
7954 DenseMap<Instruction *, Instruction *> NewInstructions;
7955 for (Use *U : ToReplace) {
7956 auto *UI = cast<Instruction>(U->get());
7957 Instruction *NI = UI->clone();
7958
7959 if (IsHugeFunc) {
7960 // Now we clone an instruction, its operands' defs may sink to this BB
7961 // now. So we put the operands defs' BBs into FreshBBs to do optimization.
7962 for (Value *Op : NI->operands())
7963 if (auto *OpDef = dyn_cast<Instruction>(Op))
7964 FreshBBs.insert(OpDef->getParent());
7965 }
7966
7967 NewInstructions[UI] = NI;
7968 MaybeDead.insert(UI);
7969 LLVM_DEBUG(dbgs() << "Sinking " << *UI << " to user " << *I << "\n");
7970 NI->insertBefore(InsertPoint->getIterator());
7971 InsertPoint = NI;
7972 InsertedInsts.insert(NI);
7973
7974 // Update the use for the new instruction, making sure that we update the
7975 // sunk instruction uses, if it is part of a chain that has already been
7976 // sunk.
7977 Instruction *OldI = cast<Instruction>(U->getUser());
7978 if (auto It = NewInstructions.find(OldI); It != NewInstructions.end())
7979 It->second->setOperand(U->getOperandNo(), NI);
7980 else
7981 U->set(NI);
7982 Changed = true;
7983 }
7984
7985 // Remove instructions that are dead after sinking.
7986 for (auto *I : MaybeDead) {
7987 if (!I->hasNUsesOrMore(1)) {
7988 LLVM_DEBUG(dbgs() << "Removing dead instruction: " << *I << "\n");
7989 I->eraseFromParent();
7990 }
7991 }
7992
7993 return Changed;
7994}
7995
7996bool CodeGenPrepare::optimizeSwitchType(SwitchInst *SI) {
7997 Value *Cond = SI->getCondition();
7998 Type *OldType = Cond->getType();
7999 LLVMContext &Context = Cond->getContext();
8000 EVT OldVT = TLI->getValueType(*DL, OldType);
8001 MVT RegType = TLI->getPreferredSwitchConditionType(Context, OldVT);
8002 unsigned RegWidth = RegType.getSizeInBits();
8003
8004 if (RegWidth <= cast<IntegerType>(OldType)->getBitWidth())
8005 return false;
8006
8007 // If the register width is greater than the type width, expand the condition
8008 // of the switch instruction and each case constant to the width of the
8009 // register. By widening the type of the switch condition, subsequent
8010 // comparisons (for case comparisons) will not need to be extended to the
8011 // preferred register width, so we will potentially eliminate N-1 extends,
8012 // where N is the number of cases in the switch.
8013 auto *NewType = Type::getIntNTy(Context, RegWidth);
8014
8015 // Extend the switch condition and case constants using the target preferred
8016 // extend unless the switch condition is a function argument with an extend
8017 // attribute. In that case, we can avoid an unnecessary mask/extension by
8018 // matching the argument extension instead.
8019 Instruction::CastOps ExtType = Instruction::ZExt;
8020 // Some targets prefer SExt over ZExt.
8021 if (TLI->isSExtCheaperThanZExt(OldVT, RegType))
8022 ExtType = Instruction::SExt;
8023
8024 if (auto *Arg = dyn_cast<Argument>(Cond)) {
8025 if (Arg->hasSExtAttr())
8026 ExtType = Instruction::SExt;
8027 if (Arg->hasZExtAttr())
8028 ExtType = Instruction::ZExt;
8029 }
8030
8031 auto *ExtInst = CastInst::Create(ExtType, Cond, NewType);
8032 ExtInst->insertBefore(SI->getIterator());
8033 ExtInst->setDebugLoc(SI->getDebugLoc());
8034 SI->setCondition(ExtInst);
8035 for (auto Case : SI->cases()) {
8036 const APInt &NarrowConst = Case.getCaseValue()->getValue();
8037 APInt WideConst = (ExtType == Instruction::ZExt)
8038 ? NarrowConst.zext(RegWidth)
8039 : NarrowConst.sext(RegWidth);
8040 Case.setValue(ConstantInt::get(Context, WideConst));
8041 }
8042
8043 return true;
8044}
8045
8046bool CodeGenPrepare::optimizeSwitchPhiConstants(SwitchInst *SI) {
8047 // The SCCP optimization tends to produce code like this:
8048 // switch(x) { case 42: phi(42, ...) }
8049 // Materializing the constant for the phi-argument needs instructions; So we
8050 // change the code to:
8051 // switch(x) { case 42: phi(x, ...) }
8052
8053 Value *Condition = SI->getCondition();
8054 // Avoid endless loop in degenerate case.
8055 if (isa<ConstantInt>(*Condition))
8056 return false;
8057
8058 bool Changed = false;
8059 BasicBlock *SwitchBB = SI->getParent();
8060 Type *ConditionType = Condition->getType();
8061
8062 for (const SwitchInst::CaseHandle &Case : SI->cases()) {
8063 ConstantInt *CaseValue = Case.getCaseValue();
8064 BasicBlock *CaseBB = Case.getCaseSuccessor();
8065 // Set to true if we previously checked that `CaseBB` is only reached by
8066 // a single case from this switch.
8067 bool CheckedForSinglePred = false;
8068 for (PHINode &PHI : CaseBB->phis()) {
8069 Type *PHIType = PHI.getType();
8070 // If ZExt is free then we can also catch patterns like this:
8071 // switch((i32)x) { case 42: phi((i64)42, ...); }
8072 // and replace `(i64)42` with `zext i32 %x to i64`.
8073 bool TryZExt =
8074 PHIType->isIntegerTy() &&
8075 PHIType->getIntegerBitWidth() > ConditionType->getIntegerBitWidth() &&
8076 TLI->isZExtFree(ConditionType, PHIType);
8077 if (PHIType == ConditionType || TryZExt) {
8078 // Set to true to skip this case because of multiple preds.
8079 bool SkipCase = false;
8080 Value *Replacement = nullptr;
8081 for (unsigned I = 0, E = PHI.getNumIncomingValues(); I != E; I++) {
8082 Value *PHIValue = PHI.getIncomingValue(I);
8083 if (PHIValue != CaseValue) {
8084 if (!TryZExt)
8085 continue;
8086 ConstantInt *PHIValueInt = dyn_cast<ConstantInt>(PHIValue);
8087 if (!PHIValueInt ||
8088 PHIValueInt->getValue() !=
8089 CaseValue->getValue().zext(PHIType->getIntegerBitWidth()))
8090 continue;
8091 }
8092 if (PHI.getIncomingBlock(I) != SwitchBB)
8093 continue;
8094 // We cannot optimize if there are multiple case labels jumping to
8095 // this block. This check may get expensive when there are many
8096 // case labels so we test for it last.
8097 if (!CheckedForSinglePred) {
8098 CheckedForSinglePred = true;
8099 if (SI->findCaseDest(CaseBB) == nullptr) {
8100 SkipCase = true;
8101 break;
8102 }
8103 }
8104
8105 if (Replacement == nullptr) {
8106 if (PHIValue == CaseValue) {
8107 Replacement = Condition;
8108 } else {
8109 IRBuilder<> Builder(SI);
8110 Replacement = Builder.CreateZExt(Condition, PHIType);
8111 }
8112 }
8113 PHI.setIncomingValue(I, Replacement);
8114 Changed = true;
8115 }
8116 if (SkipCase)
8117 break;
8118 }
8119 }
8120 }
8121 return Changed;
8122}
8123
8124bool CodeGenPrepare::optimizeSwitchInst(SwitchInst *SI) {
8125 bool Changed = optimizeSwitchType(SI);
8126 Changed |= optimizeSwitchPhiConstants(SI);
8127 return Changed;
8128}
8129
8130namespace {
8131
8132/// Helper class to promote a scalar operation to a vector one.
8133/// This class is used to move downward extractelement transition.
8134/// E.g.,
8135/// a = vector_op <2 x i32>
8136/// b = extractelement <2 x i32> a, i32 0
8137/// c = scalar_op b
8138/// store c
8139///
8140/// =>
8141/// a = vector_op <2 x i32>
8142/// c = vector_op a (equivalent to scalar_op on the related lane)
8143/// * d = extractelement <2 x i32> c, i32 0
8144/// * store d
8145/// Assuming both extractelement and store can be combine, we get rid of the
8146/// transition.
8147class VectorPromoteHelper {
8148 /// DataLayout associated with the current module.
8149 const DataLayout &DL;
8150
8151 /// Used to perform some checks on the legality of vector operations.
8152 const TargetLowering &TLI;
8153
8154 /// Used to estimated the cost of the promoted chain.
8155 const TargetTransformInfo &TTI;
8156
8157 /// The transition being moved downwards.
8158 Instruction *Transition;
8159
8160 /// The sequence of instructions to be promoted.
8161 SmallVector<Instruction *, 4> InstsToBePromoted;
8162
8163 /// Cost of combining a store and an extract.
8164 unsigned StoreExtractCombineCost;
8165
8166 /// Instruction that will be combined with the transition.
8167 Instruction *CombineInst = nullptr;
8168
8169 /// The instruction that represents the current end of the transition.
8170 /// Since we are faking the promotion until we reach the end of the chain
8171 /// of computation, we need a way to get the current end of the transition.
8172 Instruction *getEndOfTransition() const {
8173 if (InstsToBePromoted.empty())
8174 return Transition;
8175 return InstsToBePromoted.back();
8176 }
8177
8178 /// Return the index of the original value in the transition.
8179 /// E.g., for "extractelement <2 x i32> c, i32 1" the original value,
8180 /// c, is at index 0.
8181 unsigned getTransitionOriginalValueIdx() const {
8182 assert(isa<ExtractElementInst>(Transition) &&
8183 "Other kind of transitions are not supported yet");
8184 return 0;
8185 }
8186
8187 /// Return the index of the index in the transition.
8188 /// E.g., for "extractelement <2 x i32> c, i32 0" the index
8189 /// is at index 1.
8190 unsigned getTransitionIdx() const {
8191 assert(isa<ExtractElementInst>(Transition) &&
8192 "Other kind of transitions are not supported yet");
8193 return 1;
8194 }
8195
8196 /// Get the type of the transition.
8197 /// This is the type of the original value.
8198 /// E.g., for "extractelement <2 x i32> c, i32 1" the type of the
8199 /// transition is <2 x i32>.
8200 Type *getTransitionType() const {
8201 return Transition->getOperand(getTransitionOriginalValueIdx())->getType();
8202 }
8203
8204 /// Promote \p ToBePromoted by moving \p Def downward through.
8205 /// I.e., we have the following sequence:
8206 /// Def = Transition <ty1> a to <ty2>
8207 /// b = ToBePromoted <ty2> Def, ...
8208 /// =>
8209 /// b = ToBePromoted <ty1> a, ...
8210 /// Def = Transition <ty1> ToBePromoted to <ty2>
8211 void promoteImpl(Instruction *ToBePromoted);
8212
8213 /// Check whether or not it is profitable to promote all the
8214 /// instructions enqueued to be promoted.
8215 bool isProfitableToPromote() {
8216 Value *ValIdx = Transition->getOperand(getTransitionOriginalValueIdx());
8217 unsigned Index = isa<ConstantInt>(ValIdx)
8218 ? cast<ConstantInt>(ValIdx)->getZExtValue()
8219 : -1;
8220 Type *PromotedType = getTransitionType();
8221
8222 StoreInst *ST = cast<StoreInst>(CombineInst);
8223 unsigned AS = ST->getPointerAddressSpace();
8224 // Check if this store is supported.
8226 TLI.getValueType(DL, ST->getValueOperand()->getType()), AS,
8227 ST->getAlign())) {
8228 // If this is not supported, there is no way we can combine
8229 // the extract with the store.
8230 return false;
8231 }
8232
8233 // The scalar chain of computation has to pay for the transition
8234 // scalar to vector.
8235 // The vector chain has to account for the combining cost.
8238 InstructionCost ScalarCost =
8239 TTI.getVectorInstrCost(*Transition, PromotedType, CostKind, Index);
8240 InstructionCost VectorCost = StoreExtractCombineCost;
8241 for (const auto &Inst : InstsToBePromoted) {
8242 // Compute the cost.
8243 // By construction, all instructions being promoted are arithmetic ones.
8244 // Moreover, one argument is a constant that can be viewed as a splat
8245 // constant.
8246 Value *Arg0 = Inst->getOperand(0);
8247 bool IsArg0Constant = isa<UndefValue>(Arg0) || isa<ConstantInt>(Arg0) ||
8248 isa<ConstantFP>(Arg0);
8249 TargetTransformInfo::OperandValueInfo Arg0Info, Arg1Info;
8250 if (IsArg0Constant)
8252 else
8254
8255 ScalarCost += TTI.getArithmeticInstrCost(
8256 Inst->getOpcode(), Inst->getType(), CostKind, Arg0Info, Arg1Info);
8257 VectorCost += TTI.getArithmeticInstrCost(Inst->getOpcode(), PromotedType,
8258 CostKind, Arg0Info, Arg1Info);
8259 }
8260 LLVM_DEBUG(
8261 dbgs() << "Estimated cost of computation to be promoted:\nScalar: "
8262 << ScalarCost << "\nVector: " << VectorCost << '\n');
8263 return ScalarCost > VectorCost;
8264 }
8265
8266 /// Generate a constant vector with \p Val with the same
8267 /// number of elements as the transition.
8268 /// \p UseSplat defines whether or not \p Val should be replicated
8269 /// across the whole vector.
8270 /// In other words, if UseSplat == true, we generate <Val, Val, ..., Val>,
8271 /// otherwise we generate a vector with as many poison as possible:
8272 /// <poison, ..., poison, Val, poison, ..., poison> where \p Val is only
8273 /// used at the index of the extract.
8274 Value *getConstantVector(Constant *Val, bool UseSplat) const {
8275 unsigned ExtractIdx = std::numeric_limits<unsigned>::max();
8276 if (!UseSplat) {
8277 // If we cannot determine where the constant must be, we have to
8278 // use a splat constant.
8279 Value *ValExtractIdx = Transition->getOperand(getTransitionIdx());
8280 if (ConstantInt *CstVal = dyn_cast<ConstantInt>(ValExtractIdx))
8281 ExtractIdx = CstVal->getSExtValue();
8282 else
8283 UseSplat = true;
8284 }
8285
8286 ElementCount EC = cast<VectorType>(getTransitionType())->getElementCount();
8287 if (UseSplat)
8288 return ConstantVector::getSplat(EC, Val);
8289
8290 if (!EC.isScalable()) {
8292 PoisonValue *PoisonVal = PoisonValue::get(Val->getType());
8293 for (unsigned Idx = 0; Idx != EC.getKnownMinValue(); ++Idx) {
8294 if (Idx == ExtractIdx)
8295 ConstVec.push_back(Val);
8296 else
8297 ConstVec.push_back(PoisonVal);
8298 }
8299 return ConstantVector::get(ConstVec);
8300 } else
8302 "Generate scalable vector for non-splat is unimplemented");
8303 }
8304
8305 /// Check if promoting to a vector type an operand at \p OperandIdx
8306 /// in \p Use can trigger undefined behavior.
8307 static bool canCauseUndefinedBehavior(const Instruction *Use,
8308 unsigned OperandIdx) {
8309 // This is not safe to introduce undef when the operand is on
8310 // the right hand side of a division-like instruction.
8311 if (OperandIdx != 1)
8312 return false;
8313 switch (Use->getOpcode()) {
8314 default:
8315 return false;
8316 case Instruction::SDiv:
8317 case Instruction::UDiv:
8318 case Instruction::SRem:
8319 case Instruction::URem:
8320 return true;
8321 case Instruction::FDiv:
8322 case Instruction::FRem:
8323 return !Use->hasNoNaNs();
8324 }
8325 llvm_unreachable(nullptr);
8326 }
8327
8328public:
8329 VectorPromoteHelper(const DataLayout &DL, const TargetLowering &TLI,
8330 const TargetTransformInfo &TTI, Instruction *Transition,
8331 unsigned CombineCost)
8332 : DL(DL), TLI(TLI), TTI(TTI), Transition(Transition),
8333 StoreExtractCombineCost(CombineCost) {
8334 assert(Transition && "Do not know how to promote null");
8335 }
8336
8337 /// Check if we can promote \p ToBePromoted to \p Type.
8338 bool canPromote(const Instruction *ToBePromoted) const {
8339 // We could support CastInst too.
8340 return isa<BinaryOperator>(ToBePromoted);
8341 }
8342
8343 /// Check if it is profitable to promote \p ToBePromoted
8344 /// by moving downward the transition through.
8345 bool shouldPromote(const Instruction *ToBePromoted) const {
8346 // Promote only if all the operands can be statically expanded.
8347 // Indeed, we do not want to introduce any new kind of transitions.
8348 for (const Use &U : ToBePromoted->operands()) {
8349 const Value *Val = U.get();
8350 if (Val == getEndOfTransition()) {
8351 // If the use is a division and the transition is on the rhs,
8352 // we cannot promote the operation, otherwise we may create a
8353 // division by zero.
8354 if (canCauseUndefinedBehavior(ToBePromoted, U.getOperandNo()))
8355 return false;
8356 continue;
8357 }
8358 if (!isa<ConstantInt>(Val) && !isa<UndefValue>(Val) &&
8359 !isa<ConstantFP>(Val))
8360 return false;
8361 }
8362 // Check that the resulting operation is legal.
8363 int ISDOpcode = TLI.InstructionOpcodeToISD(ToBePromoted->getOpcode());
8364 if (!ISDOpcode)
8365 return false;
8366 return StressStoreExtract ||
8368 ISDOpcode, TLI.getValueType(DL, getTransitionType(), true));
8369 }
8370
8371 /// Check whether or not \p Use can be combined
8372 /// with the transition.
8373 /// I.e., is it possible to do Use(Transition) => AnotherUse?
8374 bool canCombine(const Instruction *Use) { return isa<StoreInst>(Use); }
8375
8376 /// Record \p ToBePromoted as part of the chain to be promoted.
8377 void enqueueForPromotion(Instruction *ToBePromoted) {
8378 InstsToBePromoted.push_back(ToBePromoted);
8379 }
8380
8381 /// Set the instruction that will be combined with the transition.
8382 void recordCombineInstruction(Instruction *ToBeCombined) {
8383 assert(canCombine(ToBeCombined) && "Unsupported instruction to combine");
8384 CombineInst = ToBeCombined;
8385 }
8386
8387 /// Promote all the instructions enqueued for promotion if it is
8388 /// is profitable.
8389 /// \return True if the promotion happened, false otherwise.
8390 bool promote() {
8391 // Check if there is something to promote.
8392 // Right now, if we do not have anything to combine with,
8393 // we assume the promotion is not profitable.
8394 if (InstsToBePromoted.empty() || !CombineInst)
8395 return false;
8396
8397 // Check cost.
8398 if (!StressStoreExtract && !isProfitableToPromote())
8399 return false;
8400
8401 // Promote.
8402 for (auto &ToBePromoted : InstsToBePromoted)
8403 promoteImpl(ToBePromoted);
8404 InstsToBePromoted.clear();
8405 return true;
8406 }
8407};
8408
8409} // end anonymous namespace
8410
8411void VectorPromoteHelper::promoteImpl(Instruction *ToBePromoted) {
8412 // At this point, we know that all the operands of ToBePromoted but Def
8413 // can be statically promoted.
8414 // For Def, we need to use its parameter in ToBePromoted:
8415 // b = ToBePromoted ty1 a
8416 // Def = Transition ty1 b to ty2
8417 // Move the transition down.
8418 // 1. Replace all uses of the promoted operation by the transition.
8419 // = ... b => = ... Def.
8420 assert(ToBePromoted->getType() == Transition->getType() &&
8421 "The type of the result of the transition does not match "
8422 "the final type");
8423 ToBePromoted->replaceAllUsesWith(Transition);
8424 // 2. Update the type of the uses.
8425 // b = ToBePromoted ty2 Def => b = ToBePromoted ty1 Def.
8426 Type *TransitionTy = getTransitionType();
8427 ToBePromoted->mutateType(TransitionTy);
8428 // 3. Update all the operands of the promoted operation with promoted
8429 // operands.
8430 // b = ToBePromoted ty1 Def => b = ToBePromoted ty1 a.
8431 for (Use &U : ToBePromoted->operands()) {
8432 Value *Val = U.get();
8433 Value *NewVal = nullptr;
8434 if (Val == Transition)
8435 NewVal = Transition->getOperand(getTransitionOriginalValueIdx());
8436 else if (isa<UndefValue>(Val) || isa<ConstantInt>(Val) ||
8437 isa<ConstantFP>(Val)) {
8438 // Use a splat constant if it is not safe to use undef.
8439 NewVal = getConstantVector(
8440 cast<Constant>(Val),
8441 isa<UndefValue>(Val) ||
8442 canCauseUndefinedBehavior(ToBePromoted, U.getOperandNo()));
8443 } else
8444 llvm_unreachable("Did you modified shouldPromote and forgot to update "
8445 "this?");
8446 ToBePromoted->setOperand(U.getOperandNo(), NewVal);
8447 }
8448 Transition->moveAfter(ToBePromoted);
8449 Transition->setOperand(getTransitionOriginalValueIdx(), ToBePromoted);
8450}
8451
8452/// Some targets can do store(extractelement) with one instruction.
8453/// Try to push the extractelement towards the stores when the target
8454/// has this feature and this is profitable.
8455bool CodeGenPrepare::optimizeExtractElementInst(Instruction *Inst) {
8456 unsigned CombineCost = std::numeric_limits<unsigned>::max();
8457 if (DisableStoreExtract ||
8460 Inst->getOperand(1), CombineCost)))
8461 return false;
8462
8463 // At this point we know that Inst is a vector to scalar transition.
8464 // Try to move it down the def-use chain, until:
8465 // - We can combine the transition with its single use
8466 // => we got rid of the transition.
8467 // - We escape the current basic block
8468 // => we would need to check that we are moving it at a cheaper place and
8469 // we do not do that for now.
8470 BasicBlock *Parent = Inst->getParent();
8471 LLVM_DEBUG(dbgs() << "Found an interesting transition: " << *Inst << '\n');
8472 VectorPromoteHelper VPH(*DL, *TLI, *TTI, Inst, CombineCost);
8473 // If the transition has more than one use, assume this is not going to be
8474 // beneficial.
8475 while (Inst->hasOneUse()) {
8476 Instruction *ToBePromoted = cast<Instruction>(*Inst->user_begin());
8477 LLVM_DEBUG(dbgs() << "Use: " << *ToBePromoted << '\n');
8478
8479 if (ToBePromoted->getParent() != Parent) {
8480 LLVM_DEBUG(dbgs() << "Instruction to promote is in a different block ("
8481 << ToBePromoted->getParent()->getName()
8482 << ") than the transition (" << Parent->getName()
8483 << ").\n");
8484 return false;
8485 }
8486
8487 if (VPH.canCombine(ToBePromoted)) {
8488 LLVM_DEBUG(dbgs() << "Assume " << *Inst << '\n'
8489 << "will be combined with: " << *ToBePromoted << '\n');
8490 VPH.recordCombineInstruction(ToBePromoted);
8491 bool Changed = VPH.promote();
8492 NumStoreExtractExposed += Changed;
8493 return Changed;
8494 }
8495
8496 LLVM_DEBUG(dbgs() << "Try promoting.\n");
8497 if (!VPH.canPromote(ToBePromoted) || !VPH.shouldPromote(ToBePromoted))
8498 return false;
8499
8500 LLVM_DEBUG(dbgs() << "Promoting is possible... Enqueue for promotion!\n");
8501
8502 VPH.enqueueForPromotion(ToBePromoted);
8503 Inst = ToBePromoted;
8504 }
8505 return false;
8506}
8507
8508/// For the instruction sequence of store below, F and I values
8509/// are bundled together as an i64 value before being stored into memory.
8510/// Sometimes it is more efficient to generate separate stores for F and I,
8511/// which can remove the bitwise instructions or sink them to colder places.
8512///
8513/// (store (or (zext (bitcast F to i32) to i64),
8514/// (shl (zext I to i64), 32)), addr) -->
8515/// (store F, addr) and (store I, addr+4)
8516///
8517/// Similarly, splitting for other merged store can also be beneficial, like:
8518/// For pair of {i32, i32}, i64 store --> two i32 stores.
8519/// For pair of {i32, i16}, i64 store --> two i32 stores.
8520/// For pair of {i16, i16}, i32 store --> two i16 stores.
8521/// For pair of {i16, i8}, i32 store --> two i16 stores.
8522/// For pair of {i8, i8}, i16 store --> two i8 stores.
8523///
8524/// We allow each target to determine specifically which kind of splitting is
8525/// supported.
8526///
8527/// The store patterns are commonly seen from the simple code snippet below
8528/// if only std::make_pair(...) is sroa transformed before inlined into hoo.
8529/// void goo(const std::pair<int, float> &);
8530/// hoo() {
8531/// ...
8532/// goo(std::make_pair(tmp, ftmp));
8533/// ...
8534/// }
8535///
8536/// Although we already have similar splitting in DAG Combine, we duplicate
8537/// it in CodeGenPrepare to catch the case in which pattern is across
8538/// multiple BBs. The logic in DAG Combine is kept to catch case generated
8539/// during code expansion.
8541 const TargetLowering &TLI) {
8542 // Handle simple but common cases only.
8543 Type *StoreType = SI.getValueOperand()->getType();
8544
8545 // The code below assumes shifting a value by <number of bits>,
8546 // whereas scalable vectors would have to be shifted by
8547 // <2log(vscale) + number of bits> in order to store the
8548 // low/high parts. Bailing out for now.
8549 if (StoreType->isScalableTy())
8550 return false;
8551
8552 if (!DL.typeSizeEqualsStoreSize(StoreType) ||
8553 DL.getTypeSizeInBits(StoreType) == 0)
8554 return false;
8555
8556 unsigned HalfValBitSize = DL.getTypeSizeInBits(StoreType) / 2;
8557 Type *SplitStoreType = Type::getIntNTy(SI.getContext(), HalfValBitSize);
8558 if (!DL.typeSizeEqualsStoreSize(SplitStoreType))
8559 return false;
8560
8561 // Don't split the store if it is volatile.
8562 if (SI.isVolatile())
8563 return false;
8564
8565 // Match the following patterns:
8566 // (store (or (zext LValue to i64),
8567 // (shl (zext HValue to i64), 32)), HalfValBitSize)
8568 // or
8569 // (store (or (shl (zext HValue to i64), 32)), HalfValBitSize)
8570 // (zext LValue to i64),
8571 // Expect both operands of OR and the first operand of SHL have only
8572 // one use.
8573 Value *LValue, *HValue;
8574 if (!match(SI.getValueOperand(),
8577 m_SpecificInt(HalfValBitSize))))))
8578 return false;
8579
8580 // Check LValue and HValue are int with size less or equal than 32.
8581 if (!LValue->getType()->isIntegerTy() ||
8582 DL.getTypeSizeInBits(LValue->getType()) > HalfValBitSize ||
8583 !HValue->getType()->isIntegerTy() ||
8584 DL.getTypeSizeInBits(HValue->getType()) > HalfValBitSize)
8585 return false;
8586
8587 // If LValue/HValue is a bitcast instruction, use the EVT before bitcast
8588 // as the input of target query.
8589 auto *LBC = dyn_cast<BitCastInst>(LValue);
8590 auto *HBC = dyn_cast<BitCastInst>(HValue);
8591 EVT LowTy = LBC ? EVT::getEVT(LBC->getOperand(0)->getType())
8592 : EVT::getEVT(LValue->getType());
8593 EVT HighTy = HBC ? EVT::getEVT(HBC->getOperand(0)->getType())
8594 : EVT::getEVT(HValue->getType());
8595 if (!ForceSplitStore && !TLI.isMultiStoresCheaperThanBitsMerge(LowTy, HighTy))
8596 return false;
8597
8598 // Start to split store.
8599 IRBuilder<> Builder(SI.getContext());
8600 Builder.SetInsertPoint(&SI);
8601
8602 // If LValue/HValue is a bitcast in another BB, create a new one in current
8603 // BB so it may be merged with the splitted stores by dag combiner.
8604 if (LBC && LBC->getParent() != SI.getParent())
8605 LValue = Builder.CreateBitCast(LBC->getOperand(0), LBC->getType());
8606 if (HBC && HBC->getParent() != SI.getParent())
8607 HValue = Builder.CreateBitCast(HBC->getOperand(0), HBC->getType());
8608
8609 bool IsLE = SI.getDataLayout().isLittleEndian();
8610 auto CreateSplitStore = [&](Value *V, bool Upper) {
8611 V = Builder.CreateZExtOrBitCast(V, SplitStoreType);
8612 Value *Addr = SI.getPointerOperand();
8613 Align Alignment = SI.getAlign();
8614 const bool IsOffsetStore = (IsLE && Upper) || (!IsLE && !Upper);
8615 if (IsOffsetStore) {
8616 Addr = Builder.CreateGEP(
8617 SplitStoreType, Addr,
8618 ConstantInt::get(Type::getInt32Ty(SI.getContext()), 1));
8619
8620 // When splitting the store in half, naturally one half will retain the
8621 // alignment of the original wider store, regardless of whether it was
8622 // over-aligned or not, while the other will require adjustment.
8623 Alignment = commonAlignment(Alignment, HalfValBitSize / 8);
8624 }
8625 Builder.CreateAlignedStore(V, Addr, Alignment);
8626 };
8627
8628 CreateSplitStore(LValue, false);
8629 CreateSplitStore(HValue, true);
8630
8631 // Delete the old store.
8632 SI.eraseFromParent();
8633 return true;
8634}
8635
8636// Return true if the GEP has two operands, the first operand is of a sequential
8637// type, and the second operand is a constant.
8640 return GEP->getNumOperands() == 2 && I.isSequential() &&
8641 isa<ConstantInt>(GEP->getOperand(1));
8642}
8643
8644// Try unmerging GEPs to reduce liveness interference (register pressure) across
8645// IndirectBr edges. Since IndirectBr edges tend to touch on many blocks,
8646// reducing liveness interference across those edges benefits global register
8647// allocation. Currently handles only certain cases.
8648//
8649// For example, unmerge %GEPI and %UGEPI as below.
8650//
8651// ---------- BEFORE ----------
8652// SrcBlock:
8653// ...
8654// %GEPIOp = ...
8655// ...
8656// %GEPI = gep %GEPIOp, Idx
8657// ...
8658// indirectbr ... [ label %DstB0, label %DstB1, ... label %DstBi ... ]
8659// (* %GEPI is alive on the indirectbr edges due to other uses ahead)
8660// (* %GEPIOp is alive on the indirectbr edges only because of it's used by
8661// %UGEPI)
8662//
8663// DstB0: ... (there may be a gep similar to %UGEPI to be unmerged)
8664// DstB1: ... (there may be a gep similar to %UGEPI to be unmerged)
8665// ...
8666//
8667// DstBi:
8668// ...
8669// %UGEPI = gep %GEPIOp, UIdx
8670// ...
8671// ---------------------------
8672//
8673// ---------- AFTER ----------
8674// SrcBlock:
8675// ... (same as above)
8676// (* %GEPI is still alive on the indirectbr edges)
8677// (* %GEPIOp is no longer alive on the indirectbr edges as a result of the
8678// unmerging)
8679// ...
8680//
8681// DstBi:
8682// ...
8683// %UGEPI = gep %GEPI, (UIdx-Idx)
8684// ...
8685// ---------------------------
8686//
8687// The register pressure on the IndirectBr edges is reduced because %GEPIOp is
8688// no longer alive on them.
8689//
8690// We try to unmerge GEPs here in CodGenPrepare, as opposed to limiting merging
8691// of GEPs in the first place in InstCombiner::visitGetElementPtrInst() so as
8692// not to disable further simplications and optimizations as a result of GEP
8693// merging.
8694//
8695// Note this unmerging may increase the length of the data flow critical path
8696// (the path from %GEPIOp to %UGEPI would go through %GEPI), which is a tradeoff
8697// between the register pressure and the length of data-flow critical
8698// path. Restricting this to the uncommon IndirectBr case would minimize the
8699// impact of potentially longer critical path, if any, and the impact on compile
8700// time.
8702 const TargetTransformInfo *TTI) {
8703 BasicBlock *SrcBlock = GEPI->getParent();
8704 // Check that SrcBlock ends with an IndirectBr. If not, give up. The common
8705 // (non-IndirectBr) cases exit early here.
8706 if (!isa<IndirectBrInst>(SrcBlock->getTerminator()))
8707 return false;
8708 // Check that GEPI is a simple gep with a single constant index.
8709 if (!GEPSequentialConstIndexed(GEPI))
8710 return false;
8711 ConstantInt *GEPIIdx = cast<ConstantInt>(GEPI->getOperand(1));
8712 // Check that GEPI is a cheap one.
8713 if (TTI->getIntImmCost(GEPIIdx->getValue(), GEPIIdx->getType(),
8716 return false;
8717 Value *GEPIOp = GEPI->getOperand(0);
8718 // Check that GEPIOp is an instruction that's also defined in SrcBlock.
8719 if (!isa<Instruction>(GEPIOp))
8720 return false;
8721 auto *GEPIOpI = cast<Instruction>(GEPIOp);
8722 if (GEPIOpI->getParent() != SrcBlock)
8723 return false;
8724 // Check that GEP is used outside the block, meaning it's alive on the
8725 // IndirectBr edge(s).
8726 if (llvm::none_of(GEPI->users(), [&](User *Usr) {
8727 if (auto *I = dyn_cast<Instruction>(Usr)) {
8728 if (I->getParent() != SrcBlock) {
8729 return true;
8730 }
8731 }
8732 return false;
8733 }))
8734 return false;
8735 // The second elements of the GEP chains to be unmerged.
8736 std::vector<GetElementPtrInst *> UGEPIs;
8737 // Check each user of GEPIOp to check if unmerging would make GEPIOp not alive
8738 // on IndirectBr edges.
8739 for (User *Usr : GEPIOp->users()) {
8740 if (Usr == GEPI)
8741 continue;
8742 // Check if Usr is an Instruction. If not, give up.
8743 if (!isa<Instruction>(Usr))
8744 return false;
8745 auto *UI = cast<Instruction>(Usr);
8746 // Check if Usr in the same block as GEPIOp, which is fine, skip.
8747 if (UI->getParent() == SrcBlock)
8748 continue;
8749 // Check if Usr is a GEP. If not, give up.
8750 if (!isa<GetElementPtrInst>(Usr))
8751 return false;
8752 auto *UGEPI = cast<GetElementPtrInst>(Usr);
8753 // Check if UGEPI is a simple gep with a single constant index and GEPIOp is
8754 // the pointer operand to it. If so, record it in the vector. If not, give
8755 // up.
8756 if (!GEPSequentialConstIndexed(UGEPI))
8757 return false;
8758 if (UGEPI->getOperand(0) != GEPIOp)
8759 return false;
8760 if (UGEPI->getSourceElementType() != GEPI->getSourceElementType())
8761 return false;
8762 if (GEPIIdx->getType() !=
8763 cast<ConstantInt>(UGEPI->getOperand(1))->getType())
8764 return false;
8765 ConstantInt *UGEPIIdx = cast<ConstantInt>(UGEPI->getOperand(1));
8766 if (TTI->getIntImmCost(UGEPIIdx->getValue(), UGEPIIdx->getType(),
8769 return false;
8770 UGEPIs.push_back(UGEPI);
8771 }
8772 if (UGEPIs.size() == 0)
8773 return false;
8774 // Check the materializing cost of (Uidx-Idx).
8775 for (GetElementPtrInst *UGEPI : UGEPIs) {
8776 ConstantInt *UGEPIIdx = cast<ConstantInt>(UGEPI->getOperand(1));
8777 APInt NewIdx = UGEPIIdx->getValue() - GEPIIdx->getValue();
8779 NewIdx, GEPIIdx->getType(), TargetTransformInfo::TCK_SizeAndLatency);
8780 if (ImmCost > TargetTransformInfo::TCC_Basic)
8781 return false;
8782 }
8783 // Now unmerge between GEPI and UGEPIs.
8784 for (GetElementPtrInst *UGEPI : UGEPIs) {
8785 UGEPI->setOperand(0, GEPI);
8786 ConstantInt *UGEPIIdx = cast<ConstantInt>(UGEPI->getOperand(1));
8787 Constant *NewUGEPIIdx = ConstantInt::get(
8788 GEPIIdx->getType(), UGEPIIdx->getValue() - GEPIIdx->getValue());
8789 UGEPI->setOperand(1, NewUGEPIIdx);
8790 // If GEPI is not inbounds but UGEPI is inbounds, change UGEPI to not
8791 // inbounds to avoid UB.
8792 if (!GEPI->isInBounds()) {
8793 UGEPI->setIsInBounds(false);
8794 }
8795 }
8796 // After unmerging, verify that GEPIOp is actually only used in SrcBlock (not
8797 // alive on IndirectBr edges).
8798 assert(llvm::none_of(GEPIOp->users(),
8799 [&](User *Usr) {
8800 return cast<Instruction>(Usr)->getParent() != SrcBlock;
8801 }) &&
8802 "GEPIOp is used outside SrcBlock");
8803 return true;
8804}
8805
8806static bool optimizeBranch(BranchInst *Branch, const TargetLowering &TLI,
8808 bool IsHugeFunc) {
8809 // Try and convert
8810 // %c = icmp ult %x, 8
8811 // br %c, bla, blb
8812 // %tc = lshr %x, 3
8813 // to
8814 // %tc = lshr %x, 3
8815 // %c = icmp eq %tc, 0
8816 // br %c, bla, blb
8817 // Creating the cmp to zero can be better for the backend, especially if the
8818 // lshr produces flags that can be used automatically.
8819 if (!TLI.preferZeroCompareBranch() || !Branch->isConditional())
8820 return false;
8821
8822 ICmpInst *Cmp = dyn_cast<ICmpInst>(Branch->getCondition());
8823 if (!Cmp || !isa<ConstantInt>(Cmp->getOperand(1)) || !Cmp->hasOneUse())
8824 return false;
8825
8826 Value *X = Cmp->getOperand(0);
8827 if (!X->hasUseList())
8828 return false;
8829
8830 APInt CmpC = cast<ConstantInt>(Cmp->getOperand(1))->getValue();
8831
8832 for (auto *U : X->users()) {
8834 // A quick dominance check
8835 if (!UI ||
8836 (UI->getParent() != Branch->getParent() &&
8837 UI->getParent() != Branch->getSuccessor(0) &&
8838 UI->getParent() != Branch->getSuccessor(1)) ||
8839 (UI->getParent() != Branch->getParent() &&
8840 !UI->getParent()->getSinglePredecessor()))
8841 continue;
8842
8843 if (CmpC.isPowerOf2() && Cmp->getPredicate() == ICmpInst::ICMP_ULT &&
8844 match(UI, m_Shr(m_Specific(X), m_SpecificInt(CmpC.logBase2())))) {
8845 IRBuilder<> Builder(Branch);
8846 if (UI->getParent() != Branch->getParent())
8847 UI->moveBefore(Branch->getIterator());
8849 Value *NewCmp = Builder.CreateCmp(ICmpInst::ICMP_EQ, UI,
8850 ConstantInt::get(UI->getType(), 0));
8851 LLVM_DEBUG(dbgs() << "Converting " << *Cmp << "\n");
8852 LLVM_DEBUG(dbgs() << " to compare on zero: " << *NewCmp << "\n");
8853 replaceAllUsesWith(Cmp, NewCmp, FreshBBs, IsHugeFunc);
8854 return true;
8855 }
8856 if (Cmp->isEquality() &&
8857 (match(UI, m_Add(m_Specific(X), m_SpecificInt(-CmpC))) ||
8858 match(UI, m_Sub(m_Specific(X), m_SpecificInt(CmpC))) ||
8859 match(UI, m_Xor(m_Specific(X), m_SpecificInt(CmpC))))) {
8860 IRBuilder<> Builder(Branch);
8861 if (UI->getParent() != Branch->getParent())
8862 UI->moveBefore(Branch->getIterator());
8864 Value *NewCmp = Builder.CreateCmp(Cmp->getPredicate(), UI,
8865 ConstantInt::get(UI->getType(), 0));
8866 LLVM_DEBUG(dbgs() << "Converting " << *Cmp << "\n");
8867 LLVM_DEBUG(dbgs() << " to compare on zero: " << *NewCmp << "\n");
8868 replaceAllUsesWith(Cmp, NewCmp, FreshBBs, IsHugeFunc);
8869 return true;
8870 }
8871 }
8872 return false;
8873}
8874
8875bool CodeGenPrepare::optimizeInst(Instruction *I, ModifyDT &ModifiedDT) {
8876 bool AnyChange = false;
8877 AnyChange = fixupDbgVariableRecordsOnInst(*I);
8878
8879 // Bail out if we inserted the instruction to prevent optimizations from
8880 // stepping on each other's toes.
8881 if (InsertedInsts.count(I))
8882 return AnyChange;
8883
8884 // TODO: Move into the switch on opcode below here.
8885 if (PHINode *P = dyn_cast<PHINode>(I)) {
8886 // It is possible for very late stage optimizations (such as SimplifyCFG)
8887 // to introduce PHI nodes too late to be cleaned up. If we detect such a
8888 // trivial PHI, go ahead and zap it here.
8889 if (Value *V = simplifyInstruction(P, {*DL, TLInfo})) {
8890 LargeOffsetGEPMap.erase(P);
8891 replaceAllUsesWith(P, V, FreshBBs, IsHugeFunc);
8892 P->eraseFromParent();
8893 ++NumPHIsElim;
8894 return true;
8895 }
8896 return AnyChange;
8897 }
8898
8899 if (CastInst *CI = dyn_cast<CastInst>(I)) {
8900 // If the source of the cast is a constant, then this should have
8901 // already been constant folded. The only reason NOT to constant fold
8902 // it is if something (e.g. LSR) was careful to place the constant
8903 // evaluation in a block other than then one that uses it (e.g. to hoist
8904 // the address of globals out of a loop). If this is the case, we don't
8905 // want to forward-subst the cast.
8906 if (isa<Constant>(CI->getOperand(0)))
8907 return AnyChange;
8908
8909 if (OptimizeNoopCopyExpression(CI, *TLI, *DL))
8910 return true;
8911
8913 isa<TruncInst>(I)) &&
8915 I, LI->getLoopFor(I->getParent()), *TTI))
8916 return true;
8917
8918 if (isa<ZExtInst>(I) || isa<SExtInst>(I)) {
8919 /// Sink a zext or sext into its user blocks if the target type doesn't
8920 /// fit in one register
8921 if (TLI->getTypeAction(CI->getContext(),
8922 TLI->getValueType(*DL, CI->getType())) ==
8923 TargetLowering::TypeExpandInteger) {
8924 return SinkCast(CI);
8925 } else {
8927 I, LI->getLoopFor(I->getParent()), *TTI))
8928 return true;
8929
8930 bool MadeChange = optimizeExt(I);
8931 return MadeChange | optimizeExtUses(I);
8932 }
8933 }
8934 return AnyChange;
8935 }
8936
8937 if (auto *Cmp = dyn_cast<CmpInst>(I))
8938 if (optimizeCmp(Cmp, ModifiedDT))
8939 return true;
8940
8941 if (match(I, m_URem(m_Value(), m_Value())))
8942 if (optimizeURem(I))
8943 return true;
8944
8945 if (LoadInst *LI = dyn_cast<LoadInst>(I)) {
8946 LI->setMetadata(LLVMContext::MD_invariant_group, nullptr);
8947 bool Modified = optimizeLoadExt(LI);
8948 unsigned AS = LI->getPointerAddressSpace();
8949 Modified |= optimizeMemoryInst(I, I->getOperand(0), LI->getType(), AS);
8950 return Modified;
8951 }
8952
8953 if (StoreInst *SI = dyn_cast<StoreInst>(I)) {
8954 if (splitMergedValStore(*SI, *DL, *TLI))
8955 return true;
8956 SI->setMetadata(LLVMContext::MD_invariant_group, nullptr);
8957 unsigned AS = SI->getPointerAddressSpace();
8958 return optimizeMemoryInst(I, SI->getOperand(1),
8959 SI->getOperand(0)->getType(), AS);
8960 }
8961
8962 if (AtomicRMWInst *RMW = dyn_cast<AtomicRMWInst>(I)) {
8963 unsigned AS = RMW->getPointerAddressSpace();
8964 return optimizeMemoryInst(I, RMW->getPointerOperand(), RMW->getType(), AS);
8965 }
8966
8967 if (AtomicCmpXchgInst *CmpX = dyn_cast<AtomicCmpXchgInst>(I)) {
8968 unsigned AS = CmpX->getPointerAddressSpace();
8969 return optimizeMemoryInst(I, CmpX->getPointerOperand(),
8970 CmpX->getCompareOperand()->getType(), AS);
8971 }
8972
8973 BinaryOperator *BinOp = dyn_cast<BinaryOperator>(I);
8974
8975 if (BinOp && BinOp->getOpcode() == Instruction::And && EnableAndCmpSinking &&
8976 sinkAndCmp0Expression(BinOp, *TLI, InsertedInsts))
8977 return true;
8978
8979 // TODO: Move this into the switch on opcode - it handles shifts already.
8980 if (BinOp && (BinOp->getOpcode() == Instruction::AShr ||
8981 BinOp->getOpcode() == Instruction::LShr)) {
8982 ConstantInt *CI = dyn_cast<ConstantInt>(BinOp->getOperand(1));
8983 if (CI && TLI->hasExtractBitsInsn())
8984 if (OptimizeExtractBits(BinOp, CI, *TLI, *DL))
8985 return true;
8986 }
8987
8988 if (GetElementPtrInst *GEPI = dyn_cast<GetElementPtrInst>(I)) {
8989 if (GEPI->hasAllZeroIndices()) {
8990 /// The GEP operand must be a pointer, so must its result -> BitCast
8991 Instruction *NC = new BitCastInst(GEPI->getOperand(0), GEPI->getType(),
8992 GEPI->getName(), GEPI->getIterator());
8993 NC->setDebugLoc(GEPI->getDebugLoc());
8994 replaceAllUsesWith(GEPI, NC, FreshBBs, IsHugeFunc);
8996 GEPI, TLInfo, nullptr,
8997 [&](Value *V) { removeAllAssertingVHReferences(V); });
8998 ++NumGEPsElim;
8999 optimizeInst(NC, ModifiedDT);
9000 return true;
9001 }
9003 return true;
9004 }
9005 }
9006
9007 if (FreezeInst *FI = dyn_cast<FreezeInst>(I)) {
9008 // freeze(icmp a, const)) -> icmp (freeze a), const
9009 // This helps generate efficient conditional jumps.
9010 Instruction *CmpI = nullptr;
9011 if (ICmpInst *II = dyn_cast<ICmpInst>(FI->getOperand(0)))
9012 CmpI = II;
9013 else if (FCmpInst *F = dyn_cast<FCmpInst>(FI->getOperand(0)))
9014 CmpI = F->getFastMathFlags().none() ? F : nullptr;
9015
9016 if (CmpI && CmpI->hasOneUse()) {
9017 auto Op0 = CmpI->getOperand(0), Op1 = CmpI->getOperand(1);
9018 bool Const0 = isa<ConstantInt>(Op0) || isa<ConstantFP>(Op0) ||
9020 bool Const1 = isa<ConstantInt>(Op1) || isa<ConstantFP>(Op1) ||
9022 if (Const0 || Const1) {
9023 if (!Const0 || !Const1) {
9024 auto *F = new FreezeInst(Const0 ? Op1 : Op0, "", CmpI->getIterator());
9025 F->takeName(FI);
9026 CmpI->setOperand(Const0 ? 1 : 0, F);
9027 }
9028 replaceAllUsesWith(FI, CmpI, FreshBBs, IsHugeFunc);
9029 FI->eraseFromParent();
9030 return true;
9031 }
9032 }
9033 return AnyChange;
9034 }
9035
9036 if (tryToSinkFreeOperands(I))
9037 return true;
9038
9039 switch (I->getOpcode()) {
9040 case Instruction::Shl:
9041 case Instruction::LShr:
9042 case Instruction::AShr:
9043 return optimizeShiftInst(cast<BinaryOperator>(I));
9044 case Instruction::Call:
9045 return optimizeCallInst(cast<CallInst>(I), ModifiedDT);
9046 case Instruction::Select:
9047 return optimizeSelectInst(cast<SelectInst>(I));
9048 case Instruction::ShuffleVector:
9049 return optimizeShuffleVectorInst(cast<ShuffleVectorInst>(I));
9050 case Instruction::Switch:
9051 return optimizeSwitchInst(cast<SwitchInst>(I));
9052 case Instruction::ExtractElement:
9053 return optimizeExtractElementInst(cast<ExtractElementInst>(I));
9054 case Instruction::Br:
9055 return optimizeBranch(cast<BranchInst>(I), *TLI, FreshBBs, IsHugeFunc);
9056 }
9057
9058 return AnyChange;
9059}
9060
9061/// Given an OR instruction, check to see if this is a bitreverse
9062/// idiom. If so, insert the new intrinsic and return true.
9063bool CodeGenPrepare::makeBitReverse(Instruction &I) {
9064 if (!I.getType()->isIntegerTy() ||
9066 TLI->getValueType(*DL, I.getType(), true)))
9067 return false;
9068
9069 SmallVector<Instruction *, 4> Insts;
9070 if (!recognizeBSwapOrBitReverseIdiom(&I, false, true, Insts))
9071 return false;
9072 Instruction *LastInst = Insts.back();
9073 replaceAllUsesWith(&I, LastInst, FreshBBs, IsHugeFunc);
9075 &I, TLInfo, nullptr,
9076 [&](Value *V) { removeAllAssertingVHReferences(V); });
9077 return true;
9078}
9079
9080// In this pass we look for GEP and cast instructions that are used
9081// across basic blocks and rewrite them to improve basic-block-at-a-time
9082// selection.
9083bool CodeGenPrepare::optimizeBlock(BasicBlock &BB, ModifyDT &ModifiedDT) {
9084 SunkAddrs.clear();
9085 bool MadeChange = false;
9086
9087 do {
9088 CurInstIterator = BB.begin();
9089 ModifiedDT = ModifyDT::NotModifyDT;
9090 while (CurInstIterator != BB.end()) {
9091 MadeChange |= optimizeInst(&*CurInstIterator++, ModifiedDT);
9092 if (ModifiedDT != ModifyDT::NotModifyDT) {
9093 // For huge function we tend to quickly go though the inner optmization
9094 // opportunities in the BB. So we go back to the BB head to re-optimize
9095 // each instruction instead of go back to the function head.
9096 if (IsHugeFunc) {
9097 DT.reset();
9098 getDT(*BB.getParent());
9099 break;
9100 } else {
9101 return true;
9102 }
9103 }
9104 }
9105 } while (ModifiedDT == ModifyDT::ModifyInstDT);
9106
9107 bool MadeBitReverse = true;
9108 while (MadeBitReverse) {
9109 MadeBitReverse = false;
9110 for (auto &I : reverse(BB)) {
9111 if (makeBitReverse(I)) {
9112 MadeBitReverse = MadeChange = true;
9113 break;
9114 }
9115 }
9116 }
9117 MadeChange |= dupRetToEnableTailCallOpts(&BB, ModifiedDT);
9118
9119 return MadeChange;
9120}
9121
9122bool CodeGenPrepare::fixupDbgVariableRecordsOnInst(Instruction &I) {
9123 bool AnyChange = false;
9124 for (DbgVariableRecord &DVR : filterDbgVars(I.getDbgRecordRange()))
9125 AnyChange |= fixupDbgVariableRecord(DVR);
9126 return AnyChange;
9127}
9128
9129// FIXME: should updating debug-info really cause the "changed" flag to fire,
9130// which can cause a function to be reprocessed?
9131bool CodeGenPrepare::fixupDbgVariableRecord(DbgVariableRecord &DVR) {
9132 if (DVR.Type != DbgVariableRecord::LocationType::Value &&
9133 DVR.Type != DbgVariableRecord::LocationType::Assign)
9134 return false;
9135
9136 // Does this DbgVariableRecord refer to a sunk address calculation?
9137 bool AnyChange = false;
9138 SmallDenseSet<Value *> LocationOps(DVR.location_ops().begin(),
9139 DVR.location_ops().end());
9140 for (Value *Location : LocationOps) {
9141 WeakTrackingVH SunkAddrVH = SunkAddrs[Location];
9142 Value *SunkAddr = SunkAddrVH.pointsToAliveValue() ? SunkAddrVH : nullptr;
9143 if (SunkAddr) {
9144 // Point dbg.value at locally computed address, which should give the best
9145 // opportunity to be accurately lowered. This update may change the type
9146 // of pointer being referred to; however this makes no difference to
9147 // debugging information, and we can't generate bitcasts that may affect
9148 // codegen.
9149 DVR.replaceVariableLocationOp(Location, SunkAddr);
9150 AnyChange = true;
9151 }
9152 }
9153 return AnyChange;
9154}
9155
9157 DVR->removeFromParent();
9158 BasicBlock *VIBB = VI->getParent();
9159 if (isa<PHINode>(VI))
9160 VIBB->insertDbgRecordBefore(DVR, VIBB->getFirstInsertionPt());
9161 else
9162 VIBB->insertDbgRecordAfter(DVR, &*VI);
9163}
9164
9165// A llvm.dbg.value may be using a value before its definition, due to
9166// optimizations in this pass and others. Scan for such dbg.values, and rescue
9167// them by moving the dbg.value to immediately after the value definition.
9168// FIXME: Ideally this should never be necessary, and this has the potential
9169// to re-order dbg.value intrinsics.
9170bool CodeGenPrepare::placeDbgValues(Function &F) {
9171 bool MadeChange = false;
9172 DominatorTree DT(F);
9173
9174 auto DbgProcessor = [&](auto *DbgItem, Instruction *Position) {
9175 SmallVector<Instruction *, 4> VIs;
9176 for (Value *V : DbgItem->location_ops())
9177 if (Instruction *VI = dyn_cast_or_null<Instruction>(V))
9178 VIs.push_back(VI);
9179
9180 // This item may depend on multiple instructions, complicating any
9181 // potential sink. This block takes the defensive approach, opting to
9182 // "undef" the item if it has more than one instruction and any of them do
9183 // not dominate iem.
9184 for (Instruction *VI : VIs) {
9185 if (VI->isTerminator())
9186 continue;
9187
9188 // If VI is a phi in a block with an EHPad terminator, we can't insert
9189 // after it.
9190 if (isa<PHINode>(VI) && VI->getParent()->getTerminator()->isEHPad())
9191 continue;
9192
9193 // If the defining instruction dominates the dbg.value, we do not need
9194 // to move the dbg.value.
9195 if (DT.dominates(VI, Position))
9196 continue;
9197
9198 // If we depend on multiple instructions and any of them doesn't
9199 // dominate this DVI, we probably can't salvage it: moving it to
9200 // after any of the instructions could cause us to lose the others.
9201 if (VIs.size() > 1) {
9202 LLVM_DEBUG(
9203 dbgs()
9204 << "Unable to find valid location for Debug Value, undefing:\n"
9205 << *DbgItem);
9206 DbgItem->setKillLocation();
9207 break;
9208 }
9209
9210 LLVM_DEBUG(dbgs() << "Moving Debug Value before :\n"
9211 << *DbgItem << ' ' << *VI);
9212 DbgInserterHelper(DbgItem, VI->getIterator());
9213 MadeChange = true;
9214 ++NumDbgValueMoved;
9215 }
9216 };
9217
9218 for (BasicBlock &BB : F) {
9219 for (Instruction &Insn : llvm::make_early_inc_range(BB)) {
9220 // Process any DbgVariableRecord records attached to this
9221 // instruction.
9222 for (DbgVariableRecord &DVR : llvm::make_early_inc_range(
9223 filterDbgVars(Insn.getDbgRecordRange()))) {
9224 if (DVR.Type != DbgVariableRecord::LocationType::Value)
9225 continue;
9226 DbgProcessor(&DVR, &Insn);
9227 }
9228 }
9229 }
9230
9231 return MadeChange;
9232}
9233
9234// Group scattered pseudo probes in a block to favor SelectionDAG. Scattered
9235// probes can be chained dependencies of other regular DAG nodes and block DAG
9236// combine optimizations.
9237bool CodeGenPrepare::placePseudoProbes(Function &F) {
9238 bool MadeChange = false;
9239 for (auto &Block : F) {
9240 // Move the rest probes to the beginning of the block.
9241 auto FirstInst = Block.getFirstInsertionPt();
9242 while (FirstInst != Block.end() && FirstInst->isDebugOrPseudoInst())
9243 ++FirstInst;
9244 BasicBlock::iterator I(FirstInst);
9245 I++;
9246 while (I != Block.end()) {
9247 if (auto *II = dyn_cast<PseudoProbeInst>(I++)) {
9248 II->moveBefore(FirstInst);
9249 MadeChange = true;
9250 }
9251 }
9252 }
9253 return MadeChange;
9254}
9255
9256/// Scale down both weights to fit into uint32_t.
9257static void scaleWeights(uint64_t &NewTrue, uint64_t &NewFalse) {
9258 uint64_t NewMax = (NewTrue > NewFalse) ? NewTrue : NewFalse;
9259 uint32_t Scale = (NewMax / std::numeric_limits<uint32_t>::max()) + 1;
9260 NewTrue = NewTrue / Scale;
9261 NewFalse = NewFalse / Scale;
9262}
9263
9264/// Some targets prefer to split a conditional branch like:
9265/// \code
9266/// %0 = icmp ne i32 %a, 0
9267/// %1 = icmp ne i32 %b, 0
9268/// %or.cond = or i1 %0, %1
9269/// br i1 %or.cond, label %TrueBB, label %FalseBB
9270/// \endcode
9271/// into multiple branch instructions like:
9272/// \code
9273/// bb1:
9274/// %0 = icmp ne i32 %a, 0
9275/// br i1 %0, label %TrueBB, label %bb2
9276/// bb2:
9277/// %1 = icmp ne i32 %b, 0
9278/// br i1 %1, label %TrueBB, label %FalseBB
9279/// \endcode
9280/// This usually allows instruction selection to do even further optimizations
9281/// and combine the compare with the branch instruction. Currently this is
9282/// applied for targets which have "cheap" jump instructions.
9283///
9284/// FIXME: Remove the (equivalent?) implementation in SelectionDAG.
9285///
9286bool CodeGenPrepare::splitBranchCondition(Function &F, ModifyDT &ModifiedDT) {
9287 if (!TM->Options.EnableFastISel || TLI->isJumpExpensive())
9288 return false;
9289
9290 bool MadeChange = false;
9291 for (auto &BB : F) {
9292 // Does this BB end with the following?
9293 // %cond1 = icmp|fcmp|binary instruction ...
9294 // %cond2 = icmp|fcmp|binary instruction ...
9295 // %cond.or = or|and i1 %cond1, cond2
9296 // br i1 %cond.or label %dest1, label %dest2"
9297 Instruction *LogicOp;
9298 BasicBlock *TBB, *FBB;
9299 if (!match(BB.getTerminator(),
9300 m_Br(m_OneUse(m_Instruction(LogicOp)), TBB, FBB)))
9301 continue;
9302
9303 auto *Br1 = cast<BranchInst>(BB.getTerminator());
9304 if (Br1->getMetadata(LLVMContext::MD_unpredictable))
9305 continue;
9306
9307 // The merging of mostly empty BB can cause a degenerate branch.
9308 if (TBB == FBB)
9309 continue;
9310
9311 unsigned Opc;
9312 Value *Cond1, *Cond2;
9313 if (match(LogicOp,
9314 m_LogicalAnd(m_OneUse(m_Value(Cond1)), m_OneUse(m_Value(Cond2)))))
9315 Opc = Instruction::And;
9316 else if (match(LogicOp, m_LogicalOr(m_OneUse(m_Value(Cond1)),
9317 m_OneUse(m_Value(Cond2)))))
9318 Opc = Instruction::Or;
9319 else
9320 continue;
9321
9322 auto IsGoodCond = [](Value *Cond) {
9323 return match(
9324 Cond,
9326 m_LogicalOr(m_Value(), m_Value()))));
9327 };
9328 if (!IsGoodCond(Cond1) || !IsGoodCond(Cond2))
9329 continue;
9330
9331 LLVM_DEBUG(dbgs() << "Before branch condition splitting\n"; BB.dump());
9332
9333 // Create a new BB.
9334 auto *TmpBB =
9335 BasicBlock::Create(BB.getContext(), BB.getName() + ".cond.split",
9336 BB.getParent(), BB.getNextNode());
9337 if (IsHugeFunc)
9338 FreshBBs.insert(TmpBB);
9339
9340 // Update original basic block by using the first condition directly by the
9341 // branch instruction and removing the no longer needed and/or instruction.
9342 Br1->setCondition(Cond1);
9343 LogicOp->eraseFromParent();
9344
9345 // Depending on the condition we have to either replace the true or the
9346 // false successor of the original branch instruction.
9347 if (Opc == Instruction::And)
9348 Br1->setSuccessor(0, TmpBB);
9349 else
9350 Br1->setSuccessor(1, TmpBB);
9351
9352 // Fill in the new basic block.
9353 auto *Br2 = IRBuilder<>(TmpBB).CreateCondBr(Cond2, TBB, FBB);
9354 if (auto *I = dyn_cast<Instruction>(Cond2)) {
9355 I->removeFromParent();
9356 I->insertBefore(Br2->getIterator());
9357 }
9358
9359 // Update PHI nodes in both successors. The original BB needs to be
9360 // replaced in one successor's PHI nodes, because the branch comes now from
9361 // the newly generated BB (NewBB). In the other successor we need to add one
9362 // incoming edge to the PHI nodes, because both branch instructions target
9363 // now the same successor. Depending on the original branch condition
9364 // (and/or) we have to swap the successors (TrueDest, FalseDest), so that
9365 // we perform the correct update for the PHI nodes.
9366 // This doesn't change the successor order of the just created branch
9367 // instruction (or any other instruction).
9368 if (Opc == Instruction::Or)
9369 std::swap(TBB, FBB);
9370
9371 // Replace the old BB with the new BB.
9372 TBB->replacePhiUsesWith(&BB, TmpBB);
9373
9374 // Add another incoming edge from the new BB.
9375 for (PHINode &PN : FBB->phis()) {
9376 auto *Val = PN.getIncomingValueForBlock(&BB);
9377 PN.addIncoming(Val, TmpBB);
9378 }
9379
9380 // Update the branch weights (from SelectionDAGBuilder::
9381 // FindMergedConditions).
9382 if (Opc == Instruction::Or) {
9383 // Codegen X | Y as:
9384 // BB1:
9385 // jmp_if_X TBB
9386 // jmp TmpBB
9387 // TmpBB:
9388 // jmp_if_Y TBB
9389 // jmp FBB
9390 //
9391
9392 // We have flexibility in setting Prob for BB1 and Prob for NewBB.
9393 // The requirement is that
9394 // TrueProb for BB1 + (FalseProb for BB1 * TrueProb for TmpBB)
9395 // = TrueProb for original BB.
9396 // Assuming the original weights are A and B, one choice is to set BB1's
9397 // weights to A and A+2B, and set TmpBB's weights to A and 2B. This choice
9398 // assumes that
9399 // TrueProb for BB1 == FalseProb for BB1 * TrueProb for TmpBB.
9400 // Another choice is to assume TrueProb for BB1 equals to TrueProb for
9401 // TmpBB, but the math is more complicated.
9402 uint64_t TrueWeight, FalseWeight;
9403 if (extractBranchWeights(*Br1, TrueWeight, FalseWeight)) {
9404 uint64_t NewTrueWeight = TrueWeight;
9405 uint64_t NewFalseWeight = TrueWeight + 2 * FalseWeight;
9406 scaleWeights(NewTrueWeight, NewFalseWeight);
9407 Br1->setMetadata(LLVMContext::MD_prof,
9408 MDBuilder(Br1->getContext())
9409 .createBranchWeights(TrueWeight, FalseWeight,
9410 hasBranchWeightOrigin(*Br1)));
9411
9412 NewTrueWeight = TrueWeight;
9413 NewFalseWeight = 2 * FalseWeight;
9414 scaleWeights(NewTrueWeight, NewFalseWeight);
9415 Br2->setMetadata(LLVMContext::MD_prof,
9416 MDBuilder(Br2->getContext())
9417 .createBranchWeights(TrueWeight, FalseWeight));
9418 }
9419 } else {
9420 // Codegen X & Y as:
9421 // BB1:
9422 // jmp_if_X TmpBB
9423 // jmp FBB
9424 // TmpBB:
9425 // jmp_if_Y TBB
9426 // jmp FBB
9427 //
9428 // This requires creation of TmpBB after CurBB.
9429
9430 // We have flexibility in setting Prob for BB1 and Prob for TmpBB.
9431 // The requirement is that
9432 // FalseProb for BB1 + (TrueProb for BB1 * FalseProb for TmpBB)
9433 // = FalseProb for original BB.
9434 // Assuming the original weights are A and B, one choice is to set BB1's
9435 // weights to 2A+B and B, and set TmpBB's weights to 2A and B. This choice
9436 // assumes that
9437 // FalseProb for BB1 == TrueProb for BB1 * FalseProb for TmpBB.
9438 uint64_t TrueWeight, FalseWeight;
9439 if (extractBranchWeights(*Br1, TrueWeight, FalseWeight)) {
9440 uint64_t NewTrueWeight = 2 * TrueWeight + FalseWeight;
9441 uint64_t NewFalseWeight = FalseWeight;
9442 scaleWeights(NewTrueWeight, NewFalseWeight);
9443 Br1->setMetadata(LLVMContext::MD_prof,
9444 MDBuilder(Br1->getContext())
9445 .createBranchWeights(TrueWeight, FalseWeight));
9446
9447 NewTrueWeight = 2 * TrueWeight;
9448 NewFalseWeight = FalseWeight;
9449 scaleWeights(NewTrueWeight, NewFalseWeight);
9450 Br2->setMetadata(LLVMContext::MD_prof,
9451 MDBuilder(Br2->getContext())
9452 .createBranchWeights(TrueWeight, FalseWeight));
9453 }
9454 }
9455
9456 ModifiedDT = ModifyDT::ModifyBBDT;
9457 MadeChange = true;
9458
9459 LLVM_DEBUG(dbgs() << "After branch condition splitting\n"; BB.dump();
9460 TmpBB->dump());
9461 }
9462 return MadeChange;
9463}
#define Success
return SDValue()
static unsigned getIntrinsicID(const SDNode *N)
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
AMDGPU Register Bank Select
Rewrite undef for PHI
This file implements a class to represent arbitrary precision integral constant values and operations...
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
static void print(raw_ostream &Out, object::Archive::Kind Kind, T Val)
This file contains the simple types necessary to represent the attributes associated with functions a...
static const Function * getParent(const Value *V)
static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")
static GCRegistry::Add< StatepointGC > D("statepoint-example", "an example strategy for statepoint")
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
static bool sinkAndCmp0Expression(Instruction *AndI, const TargetLowering &TLI, SetOfInstrs &InsertedInsts)
Duplicate and sink the given 'and' instruction into user blocks where it is used in a compare to allo...
static bool SinkShiftAndTruncate(BinaryOperator *ShiftI, Instruction *User, ConstantInt *CI, DenseMap< BasicBlock *, BinaryOperator * > &InsertedShifts, const TargetLowering &TLI, const DataLayout &DL)
Sink both shift and truncate instruction to the use of truncate's BB.
static bool getGEPSmallConstantIntOffsetV(GetElementPtrInst *GEP, SmallVectorImpl< Value * > &OffsetV)
static bool sinkSelectOperand(const TargetTransformInfo *TTI, Value *V)
Check if V (an operand of a select instruction) is an expensive instruction that is only used once.
static bool isExtractBitsCandidateUse(Instruction *User)
Check if the candidates could be combined with a shift instruction, which includes:
static cl::opt< unsigned > MaxAddressUsersToScan("cgp-max-address-users-to-scan", cl::init(100), cl::Hidden, cl::desc("Max number of address users to look at"))
static cl::opt< bool > OptimizePhiTypes("cgp-optimize-phi-types", cl::Hidden, cl::init(true), cl::desc("Enable converting phi types in CodeGenPrepare"))
static cl::opt< bool > DisableStoreExtract("disable-cgp-store-extract", cl::Hidden, cl::init(false), cl::desc("Disable store(extract) optimizations in CodeGenPrepare"))
static bool foldFCmpToFPClassTest(CmpInst *Cmp, const TargetLowering &TLI, const DataLayout &DL)
static void scaleWeights(uint64_t &NewTrue, uint64_t &NewFalse)
Scale down both weights to fit into uint32_t.
static cl::opt< bool > ProfileUnknownInSpecialSection("profile-unknown-in-special-section", cl::Hidden, cl::desc("In profiling mode like sampleFDO, if a function doesn't have " "profile, we cannot tell the function is cold for sure because " "it may be a function newly added without ever being sampled. " "With the flag enabled, compiler can put such profile unknown " "functions into a special section, so runtime system can choose " "to handle it in a different way than .text section, to save " "RAM for example. "))
static bool OptimizeExtractBits(BinaryOperator *ShiftI, ConstantInt *CI, const TargetLowering &TLI, const DataLayout &DL)
Sink the shift right instruction into user blocks if the uses could potentially be combined with this...
static cl::opt< bool > DisableExtLdPromotion("disable-cgp-ext-ld-promotion", cl::Hidden, cl::init(false), cl::desc("Disable ext(promotable(ld)) -> promoted(ext(ld)) optimization in " "CodeGenPrepare"))
static cl::opt< bool > DisablePreheaderProtect("disable-preheader-prot", cl::Hidden, cl::init(false), cl::desc("Disable protection against removing loop preheaders"))
static cl::opt< bool > AddrSinkCombineBaseOffs("addr-sink-combine-base-offs", cl::Hidden, cl::init(true), cl::desc("Allow combining of BaseOffs field in Address sinking."))
static bool OptimizeNoopCopyExpression(CastInst *CI, const TargetLowering &TLI, const DataLayout &DL)
If the specified cast instruction is a noop copy (e.g.
static bool splitMergedValStore(StoreInst &SI, const DataLayout &DL, const TargetLowering &TLI)
For the instruction sequence of store below, F and I values are bundled together as an i64 value befo...
static bool SinkCast(CastInst *CI)
Sink the specified cast instruction into its user blocks.
static bool swapICmpOperandsToExposeCSEOpportunities(CmpInst *Cmp)
Many architectures use the same instruction for both subtract and cmp.
static cl::opt< bool > AddrSinkCombineBaseReg("addr-sink-combine-base-reg", cl::Hidden, cl::init(true), cl::desc("Allow combining of BaseReg field in Address sinking."))
static bool FindAllMemoryUses(Instruction *I, SmallVectorImpl< std::pair< Use *, Type * > > &MemoryUses, SmallPtrSetImpl< Instruction * > &ConsideredInsts, const TargetLowering &TLI, const TargetRegisterInfo &TRI, bool OptSize, ProfileSummaryInfo *PSI, BlockFrequencyInfo *BFI, unsigned &SeenInsts)
Recursively walk all the uses of I until we find a memory use.
static cl::opt< bool > StressStoreExtract("stress-cgp-store-extract", cl::Hidden, cl::init(false), cl::desc("Stress test store(extract) optimizations in CodeGenPrepare"))
static bool isFormingBranchFromSelectProfitable(const TargetTransformInfo *TTI, const TargetLowering *TLI, SelectInst *SI)
Returns true if a SelectInst should be turned into an explicit branch.
static std::optional< std::pair< Instruction *, Constant * > > getIVIncrement(const PHINode *PN, const LoopInfo *LI)
If given PN is an inductive variable with value IVInc coming from the backedge, and on each iteration...
static cl::opt< bool > AddrSinkCombineBaseGV("addr-sink-combine-base-gv", cl::Hidden, cl::init(true), cl::desc("Allow combining of BaseGV field in Address sinking."))
static cl::opt< bool > AddrSinkUsingGEPs("addr-sink-using-gep", cl::Hidden, cl::init(true), cl::desc("Address sinking in CGP using GEPs."))
static Value * getTrueOrFalseValue(SelectInst *SI, bool isTrue, const SmallPtrSet< const Instruction *, 2 > &Selects)
If isTrue is true, return the true value of SI, otherwise return false value of SI.
static cl::opt< bool > DisableBranchOpts("disable-cgp-branch-opts", cl::Hidden, cl::init(false), cl::desc("Disable branch optimizations in CodeGenPrepare"))
static cl::opt< bool > EnableTypePromotionMerge("cgp-type-promotion-merge", cl::Hidden, cl::desc("Enable merging of redundant sexts when one is dominating" " the other."), cl::init(true))
static cl::opt< bool > ProfileGuidedSectionPrefix("profile-guided-section-prefix", cl::Hidden, cl::init(true), cl::desc("Use profile info to add section prefix for hot/cold functions"))
static cl::opt< unsigned > HugeFuncThresholdInCGPP("cgpp-huge-func", cl::init(10000), cl::Hidden, cl::desc("Least BB number of huge function."))
static cl::opt< bool > AddrSinkNewSelects("addr-sink-new-select", cl::Hidden, cl::init(true), cl::desc("Allow creation of selects in Address sinking."))
static bool foldURemOfLoopIncrement(Instruction *Rem, const DataLayout *DL, const LoopInfo *LI, SmallPtrSet< BasicBlock *, 32 > &FreshBBs, bool IsHuge)
static bool optimizeBranch(BranchInst *Branch, const TargetLowering &TLI, SmallPtrSet< BasicBlock *, 32 > &FreshBBs, bool IsHugeFunc)
static bool tryUnmergingGEPsAcrossIndirectBr(GetElementPtrInst *GEPI, const TargetTransformInfo *TTI)
static bool IsOperandAMemoryOperand(CallInst *CI, InlineAsm *IA, Value *OpVal, const TargetLowering &TLI, const TargetRegisterInfo &TRI)
Check to see if all uses of OpVal by the specified inline asm call are due to memory operands.
static bool isIntrinsicOrLFToBeTailCalled(const TargetLibraryInfo *TLInfo, const CallInst *CI)
static void replaceAllUsesWith(Value *Old, Value *New, SmallPtrSet< BasicBlock *, 32 > &FreshBBs, bool IsHuge)
Replace all old uses with new ones, and push the updated BBs into FreshBBs.
static cl::opt< bool > ForceSplitStore("force-split-store", cl::Hidden, cl::init(false), cl::desc("Force store splitting no matter what the target query says."))
static bool matchOverflowPattern(Instruction *&I, ExtractValueInst *&MulExtract, ExtractValueInst *&OverflowExtract)
static void computeBaseDerivedRelocateMap(const SmallVectorImpl< GCRelocateInst * > &AllRelocateCalls, MapVector< GCRelocateInst *, SmallVector< GCRelocateInst *, 0 > > &RelocateInstMap)
static bool simplifyRelocatesOffABase(GCRelocateInst *RelocatedBase, const SmallVectorImpl< GCRelocateInst * > &Targets)
static cl::opt< bool > AddrSinkCombineScaledReg("addr-sink-combine-scaled-reg", cl::Hidden, cl::init(true), cl::desc("Allow combining of ScaledReg field in Address sinking."))
static bool foldICmpWithDominatingICmp(CmpInst *Cmp, const TargetLowering &TLI)
For pattern like:
static bool MightBeFoldableInst(Instruction *I)
This is a little filter, which returns true if an addressing computation involving I might be folded ...
static bool matchIncrement(const Instruction *IVInc, Instruction *&LHS, Constant *&Step)
static cl::opt< bool > EnableGEPOffsetSplit("cgp-split-large-offset-gep", cl::Hidden, cl::init(true), cl::desc("Enable splitting large offset of GEP."))
static cl::opt< bool > DisableComplexAddrModes("disable-complex-addr-modes", cl::Hidden, cl::init(false), cl::desc("Disables combining addressing modes with different parts " "in optimizeMemoryInst."))
static cl::opt< bool > EnableICMP_EQToICMP_ST("cgp-icmp-eq2icmp-st", cl::Hidden, cl::init(false), cl::desc("Enable ICMP_EQ to ICMP_S(L|G)T conversion."))
static cl::opt< bool > VerifyBFIUpdates("cgp-verify-bfi-updates", cl::Hidden, cl::init(false), cl::desc("Enable BFI update verification for " "CodeGenPrepare."))
static cl::opt< bool > BBSectionsGuidedSectionPrefix("bbsections-guided-section-prefix", cl::Hidden, cl::init(true), cl::desc("Use the basic-block-sections profile to determine the text " "section prefix for hot functions. Functions with " "basic-block-sections profile will be placed in `.text.hot` " "regardless of their FDO profile info. Other functions won't be " "impacted, i.e., their prefixes will be decided by FDO/sampleFDO " "profiles."))
static bool isRemOfLoopIncrementWithLoopInvariant(Instruction *Rem, const LoopInfo *LI, Value *&RemAmtOut, Value *&AddInstOut, Value *&AddOffsetOut, PHINode *&LoopIncrPNOut)
static bool isIVIncrement(const Value *V, const LoopInfo *LI)
static cl::opt< bool > DisableGCOpts("disable-cgp-gc-opts", cl::Hidden, cl::init(false), cl::desc("Disable GC optimizations in CodeGenPrepare"))
static bool GEPSequentialConstIndexed(GetElementPtrInst *GEP)
static void DbgInserterHelper(DbgVariableRecord *DVR, BasicBlock::iterator VI)
static bool isPromotedInstructionLegal(const TargetLowering &TLI, const DataLayout &DL, Value *Val)
Check whether or not Val is a legal instruction for TLI.
static cl::opt< uint64_t > FreqRatioToSkipMerge("cgp-freq-ratio-to-skip-merge", cl::Hidden, cl::init(2), cl::desc("Skip merging empty blocks if (frequency of empty block) / " "(frequency of destination block) is greater than this ratio"))
static BasicBlock::iterator findInsertPos(Value *Addr, Instruction *MemoryInst, Value *SunkAddr)
static bool IsNonLocalValue(Value *V, BasicBlock *BB)
Return true if the specified values are defined in a different basic block than BB.
static cl::opt< bool > EnableAndCmpSinking("enable-andcmp-sinking", cl::Hidden, cl::init(true), cl::desc("Enable sinking and/cmp into branches."))
static bool sinkCmpExpression(CmpInst *Cmp, const TargetLowering &TLI, const DataLayout &DL)
Sink the given CmpInst into user blocks to reduce the number of virtual registers that must be create...
static bool hasSameExtUse(Value *Val, const TargetLowering &TLI)
Check if all the uses of Val are equivalent (or free) zero or sign extensions.
static bool despeculateCountZeros(IntrinsicInst *CountZeros, LoopInfo &LI, const TargetLowering *TLI, const DataLayout *DL, ModifyDT &ModifiedDT, SmallPtrSet< BasicBlock *, 32 > &FreshBBs, bool IsHugeFunc)
If counting leading or trailing zeros is an expensive operation and a zero input is defined,...
static cl::opt< bool > StressExtLdPromotion("stress-cgp-ext-ld-promotion", cl::Hidden, cl::init(false), cl::desc("Stress test ext(promotable(ld)) -> promoted(ext(ld)) " "optimization in CodeGenPrepare"))
static bool matchUAddWithOverflowConstantEdgeCases(CmpInst *Cmp, BinaryOperator *&Add)
Match special-case patterns that check for unsigned add overflow.
static cl::opt< bool > DisableSelectToBranch("disable-cgp-select2branch", cl::Hidden, cl::init(false), cl::desc("Disable select to branch conversion."))
static cl::opt< bool > DisableDeletePHIs("disable-cgp-delete-phis", cl::Hidden, cl::init(false), cl::desc("Disable elimination of dead PHI nodes."))
static cl::opt< bool > AddrSinkNewPhis("addr-sink-new-phis", cl::Hidden, cl::init(false), cl::desc("Allow creation of Phis in Address sinking."))
Defines an IR pass for CodeGen Prepare.
#define LLVM_DUMP_METHOD
Mark debug helper function definitions like dump() that should not be stripped from debug builds.
Definition Compiler.h:638
This file contains the declarations for the subclasses of Constant, which represent the different fla...
static cl::opt< OutputCostKind > CostKind("cost-kind", cl::desc("Target cost kind"), cl::init(OutputCostKind::RecipThroughput), cl::values(clEnumValN(OutputCostKind::RecipThroughput, "throughput", "Reciprocal throughput"), clEnumValN(OutputCostKind::Latency, "latency", "Instruction latency"), clEnumValN(OutputCostKind::CodeSize, "code-size", "Code size"), clEnumValN(OutputCostKind::SizeAndLatency, "size-latency", "Code size and latency"), clEnumValN(OutputCostKind::All, "all", "Print all cost kinds")))
This file defines the DenseMap class.
static bool runOnFunction(Function &F, bool PostInlining)
#define DEBUG_TYPE
static Value * getCondition(Instruction *I)
Hexagon Common GEP
IRTranslator LLVM IR MI
Module.h This file contains the declarations for the Module class.
This defines the Use class.
iv users
Definition IVUsers.cpp:48
const AbstractManglingParser< Derived, Alloc >::OperatorInfo AbstractManglingParser< Derived, Alloc >::Ops[]
static void eraseInstruction(Instruction &I, ICFLoopSafetyInfo &SafetyInfo, MemorySSAUpdater &MSSAU)
Definition LICM.cpp:1450
#define F(x, y, z)
Definition MD5.cpp:54
#define I(x, y, z)
Definition MD5.cpp:57
Register const TargetRegisterInfo * TRI
This file implements a map that provides insertion order iteration.
MachineInstr unsigned OpIdx
uint64_t IntrinsicInst * II
OptimizedStructLayoutField Field
#define P(N)
#define INITIALIZE_PASS_DEPENDENCY(depName)
Definition PassSupport.h:42
#define INITIALIZE_PASS_END(passName, arg, name, cfg, analysis)
Definition PassSupport.h:44
#define INITIALIZE_PASS_BEGIN(passName, arg, name, cfg, analysis)
Definition PassSupport.h:39
This file defines the PointerIntPair class.
This file contains the declarations for profiling metadata utility functions.
const SmallVectorImpl< MachineOperand > MachineBasicBlock * TBB
const SmallVectorImpl< MachineOperand > & Cond
static bool dominates(InstrPosIndexes &PosIndexes, const MachineInstr &A, const MachineInstr &B)
Remove Loads Into Fake Uses
This file contains some templates that are useful if you are working with the STL at all.
static bool optimizeBlock(BasicBlock &BB, bool &ModifiedDT, const TargetTransformInfo &TTI, const DataLayout &DL, bool HasBranchDivergence, DomTreeUpdater *DTU)
static bool optimizeCallInst(CallInst *CI, bool &ModifiedDT, const TargetTransformInfo &TTI, const DataLayout &DL, bool HasBranchDivergence, DomTreeUpdater *DTU)
This file defines the SmallPtrSet class.
This file defines the SmallVector class.
This file defines the 'Statistic' class, which is designed to be an easy way to expose various metric...
#define STATISTIC(VARNAME, DESC)
Definition Statistic.h:171
#define LLVM_DEBUG(...)
Definition Debug.h:114
static TableGen::Emitter::Opt Y("gen-skeleton-entry", EmitSkeleton, "Generate example skeleton entry")
static TableGen::Emitter::OptClass< SkeletonEmitter > X("gen-skeleton-class", "Generate example skeleton class")
static SymbolRef::Type getType(const Symbol *Sym)
Definition TapiFile.cpp:39
static bool canCombine(MachineBasicBlock &MBB, MachineOperand &MO, unsigned CombineOpc=0)
This file describes how to lower LLVM code to machine code.
static cl::opt< bool > DisableSelectOptimize("disable-select-optimize", cl::init(true), cl::Hidden, cl::desc("Disable the select-optimization pass from running"))
Disable the select optimization pass.
Target-Independent Code Generator Pass Configuration Options pass.
This pass exposes codegen information to IR-level passes.
static unsigned getBitWidth(Type *Ty, const DataLayout &DL)
Returns the bitwidth of the given scalar or pointer type.
static Constant * getConstantVector(MVT VT, ArrayRef< APInt > Bits, const APInt &Undefs, LLVMContext &C)
Value * RHS
Value * LHS
BinaryOperator * Mul
Class for arbitrary precision integers.
Definition APInt.h:78
LLVM_ABI APInt zext(unsigned width) const
Zero extend to a new width.
Definition APInt.cpp:1012
bool ugt(const APInt &RHS) const
Unsigned greater than comparison.
Definition APInt.h:1183
bool isZero() const
Determine if this value is zero, i.e. all bits are clear.
Definition APInt.h:381
bool isSignedIntN(unsigned N) const
Check if this APInt has an N-bits signed integer value.
Definition APInt.h:436
unsigned getSignificantBits() const
Get the minimum bit size for this signed APInt.
Definition APInt.h:1532
unsigned logBase2() const
Definition APInt.h:1762
LLVM_ABI APInt sext(unsigned width) const
Sign extend to a new width.
Definition APInt.cpp:985
bool isPowerOf2() const
Check if this APInt's value is a power of two greater than zero.
Definition APInt.h:441
int64_t getSExtValue() const
Get sign extended value.
Definition APInt.h:1563
LLVM_ABI bool isStaticAlloca() const
Return true if this alloca is in the entry block of the function and is a constant size.
Align getAlign() const
Return the alignment of the memory that is being allocated by the instruction.
Type * getAllocatedType() const
Return the type that is being allocated by the instruction.
void setAlignment(Align Align)
PassT::Result * getCachedResult(IRUnitT &IR) const
Get the cached result of an analysis pass for a given IR unit.
PassT::Result & getResult(IRUnitT &IR, ExtraArgTs... ExtraArgs)
Get the result of an analysis pass for a given IR unit.
AnalysisUsage & addUsedIfAvailable()
Add the specified Pass class to the set of analyses used by this pass.
AnalysisUsage & addRequired()
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
Definition ArrayRef.h:40
An instruction that atomically checks whether a specified value is in a memory location,...
static unsigned getPointerOperandIndex()
an instruction that atomically reads a memory location, combines it with another value,...
static unsigned getPointerOperandIndex()
Analysis pass providing the BasicBlockSectionsProfileReader.
bool isFunctionHot(StringRef FuncName) const
LLVM Basic Block Representation.
Definition BasicBlock.h:62
iterator end()
Definition BasicBlock.h:472
iterator begin()
Instruction iterator methods.
Definition BasicBlock.h:459
iterator_range< const_phi_iterator > phis() const
Returns a range that iterates over the phis in the basic block.
Definition BasicBlock.h:528
LLVM_ABI const_iterator getFirstInsertionPt() const
Returns an iterator to the first instruction in this block that is suitable for inserting a non-PHI i...
const Function * getParent() const
Return the enclosing method, or null if none.
Definition BasicBlock.h:213
bool hasAddressTaken() const
Returns true if there are any uses of this basic block other than direct branches,...
Definition BasicBlock.h:690
LLVM_ABI InstListType::const_iterator getFirstNonPHIIt() const
Returns an iterator to the first instruction in this block that is not a PHINode instruction.
LLVM_ABI void insertDbgRecordBefore(DbgRecord *DR, InstListType::iterator Here)
Insert a DbgRecord into a block at the position given by Here.
InstListType::const_iterator const_iterator
Definition BasicBlock.h:171
static BasicBlock * Create(LLVMContext &Context, const Twine &Name="", Function *Parent=nullptr, BasicBlock *InsertBefore=nullptr)
Creates a new BasicBlock.
Definition BasicBlock.h:206
LLVM_ABI void moveAfter(BasicBlock *MovePos)
Unlink this basic block from its current function and insert it right after MovePos in the function M...
LLVM_ABI InstListType::const_iterator getFirstNonPHIOrDbg(bool SkipPseudoOp=true) const
Returns a pointer to the first instruction in this block that is not a PHINode or a debug intrinsic,...
LLVM_ABI BasicBlock * splitBasicBlock(iterator I, const Twine &BBName="", bool Before=false)
Split the basic block into two basic blocks at the specified instruction.
LLVM_ABI const BasicBlock * getSinglePredecessor() const
Return the predecessor of this block if it has a single predecessor block.
LLVM_ABI const BasicBlock * getUniquePredecessor() const
Return the predecessor of this block if it has a unique predecessor block.
LLVM_ABI const BasicBlock * getSingleSuccessor() const
Return the successor of this block if it has a single successor.
LLVM_ABI SymbolTableList< BasicBlock >::iterator eraseFromParent()
Unlink 'this' from the containing function and delete it.
LLVM_ABI void insertDbgRecordAfter(DbgRecord *DR, Instruction *I)
Insert a DbgRecord into a block at the position given by I.
InstListType::iterator iterator
Instruction iterators...
Definition BasicBlock.h:170
LLVM_ABI LLVMContext & getContext() const
Get the context in which this basic block lives.
const Instruction * getTerminator() const LLVM_READONLY
Returns the terminator instruction if the block is well formed or null if the block is not well forme...
Definition BasicBlock.h:233
BinaryOps getOpcode() const
Definition InstrTypes.h:374
static LLVM_ABI BinaryOperator * Create(BinaryOps Op, Value *S1, Value *S2, const Twine &Name=Twine(), InsertPosition InsertBefore=nullptr)
Construct a binary instruction, given the opcode and the two operands.
BlockFrequencyInfo pass uses BlockFrequencyInfoImpl implementation to estimate IR basic block frequen...
LLVM_ABI void setBlockFreq(const BasicBlock *BB, BlockFrequency Freq)
LLVM_ABI BlockFrequency getBlockFreq(const BasicBlock *BB) const
getblockFreq - Return block frequency.
Conditional or Unconditional Branch instruction.
LLVM_ABI void swapSuccessors()
Swap the successors of this branch instruction.
bool isConditional() const
BasicBlock * getSuccessor(unsigned i) const
bool isUnconditional() const
Analysis providing branch probability information.
static LLVM_ABI BranchProbability getBranchProbability(uint64_t Numerator, uint64_t Denominator)
bool isInlineAsm() const
Check if this call is an inline asm statement.
Function * getCalledFunction() const
Returns the function called, or null if this is an indirect function invocation or the function signa...
bool hasFnAttr(Attribute::AttrKind Kind) const
Determine whether this call has the given attribute.
Value * getArgOperand(unsigned i) const
void setArgOperand(unsigned i, Value *v)
iterator_range< User::op_iterator > args()
Iteration adapter for range-for loops.
This class represents a function call, abstracting a target machine's calling convention.
This is the base class for all instructions that perform data casts.
Definition InstrTypes.h:448
static LLVM_ABI CastInst * Create(Instruction::CastOps, Value *S, Type *Ty, const Twine &Name="", InsertPosition InsertBefore=nullptr)
Provides a way to construct any of the CastInst subclasses using an opcode instead of the subclass's ...
This class is the base class for the comparison instructions.
Definition InstrTypes.h:664
Predicate
This enumeration lists the possible predicates for CmpInst subclasses.
Definition InstrTypes.h:676
@ ICMP_SLT
signed less than
Definition InstrTypes.h:705
@ ICMP_UGT
unsigned greater than
Definition InstrTypes.h:699
@ ICMP_SGT
signed greater than
Definition InstrTypes.h:703
@ ICMP_ULT
unsigned less than
Definition InstrTypes.h:701
@ ICMP_NE
not equal
Definition InstrTypes.h:698
@ ICMP_ULE
unsigned less or equal
Definition InstrTypes.h:702
Predicate getSwappedPredicate() const
For example, EQ->EQ, SLE->SGE, ULT->UGT, OEQ->OEQ, ULE->UGE, OLT->OGT, etc.
Definition InstrTypes.h:827
static LLVM_ABI CmpInst * Create(OtherOps Op, Predicate Pred, Value *S1, Value *S2, const Twine &Name="", InsertPosition InsertBefore=nullptr)
Construct a compare instruction, given the opcode, the predicate and the two operands.
Predicate getPredicate() const
Return the predicate for this instruction.
Definition InstrTypes.h:765
An abstraction over a floating-point predicate, and a pack of an integer predicate with samesign info...
PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM)
static LLVM_ABI Constant * getBitCast(Constant *C, Type *Ty, bool OnlyIfReduced=false)
static LLVM_ABI Constant * getNeg(Constant *C, bool HasNSW=false)
This is the shared class of boolean and integer constants.
Definition Constants.h:87
static LLVM_ABI ConstantInt * getTrue(LLVMContext &Context)
bool isZero() const
This is just a convenience method to make client code smaller for a common code.
Definition Constants.h:222
static LLVM_ABI ConstantInt * getFalse(LLVMContext &Context)
int64_t getSExtValue() const
Return the constant as a 64-bit integer value after it has been sign extended as appropriate for the ...
Definition Constants.h:177
static ConstantInt * getSigned(IntegerType *Ty, int64_t V, bool ImplicitTrunc=true)
Return a ConstantInt with the specified value for the specified type.
Definition Constants.h:138
const APInt & getValue() const
Return the constant as an APInt value reference.
Definition Constants.h:162
static LLVM_ABI Constant * getSplat(ElementCount EC, Constant *Elt)
Return a ConstantVector with the specified constant in each element.
static LLVM_ABI Constant * get(ArrayRef< Constant * > V)
This is an important base class in LLVM.
Definition Constant.h:43
static LLVM_ABI Constant * getAllOnesValue(Type *Ty)
static LLVM_ABI Constant * getNullValue(Type *Ty)
Constructor to create a '0' constant of arbitrary type.
A parsed version of the target data layout string in and methods for querying it.
Definition DataLayout.h:64
LLVM_ABI void removeFromParent()
Record of a variable value-assignment, aka a non instruction representation of the dbg....
LocationType Type
Classification of the debug-info record that this DbgVariableRecord represents.
LLVM_ABI void replaceVariableLocationOp(Value *OldValue, Value *NewValue, bool AllowEmpty=false)
LLVM_ABI iterator_range< location_op_iterator > location_ops() const
Get the locations corresponding to the variable referenced by the debug info intrinsic.
iterator find(const_arg_type_t< KeyT > Val)
Definition DenseMap.h:178
bool erase(const KeyT &Val)
Definition DenseMap.h:330
unsigned size() const
Definition DenseMap.h:110
iterator end()
Definition DenseMap.h:81
std::pair< iterator, bool > insert(const std::pair< KeyT, ValueT > &KV)
Definition DenseMap.h:241
LLVM_ABI bool isReachableFromEntry(const Use &U) const
Provide an overload for a Use.
LLVM_ABI bool dominates(const BasicBlock *BB, const Use &U) const
Return true if the (end of the) basic block BB dominates the use U.
This instruction extracts a struct member or array element value from an aggregate value.
iterator_range< idx_iterator > indices() const
This instruction compares its operands according to the predicate given to the constructor.
bool none() const
Definition FMF.h:57
static LLVM_ABI FixedVectorType * get(Type *ElementType, unsigned NumElts)
Definition Type.cpp:802
FunctionPass class - This class is used to implement most global optimizations.
Definition Pass.h:314
const BasicBlock & getEntryBlock() const
Definition Function.h:807
LLVM_ABI const Value * getStatepoint() const
The statepoint with which this gc.relocate is associated.
Represents calls to the gc.relocate intrinsic.
unsigned getBasePtrIndex() const
The index into the associate statepoint's argument list which contains the base pointer of the pointe...
an instruction for type-safe pointer arithmetic to access elements of arrays and structs
static LLVM_ABI Type * getIndexedType(Type *Ty, ArrayRef< Value * > IdxList)
Returns the result type of a getelementptr with the given source element type and indexes.
LLVM_ABI bool canIncreaseAlignment() const
Returns true if the alignment of the value can be unilaterally increased.
Definition Globals.cpp:342
bool isThreadLocal() const
If the value is "Thread Local", its value isn't shared by the threads.
Type * getValueType() const
void setAlignment(Align Align)
Sets the alignment attribute of the GlobalVariable.
This instruction compares its operands according to the predicate given to the constructor.
bool isEquality() const
Return true if this predicate is either EQ or NE.
This provides a uniform API for creating instructions and inserting them into a basic block: either a...
Definition IRBuilder.h:2788
LLVM_ABI Instruction * clone() const
Create a copy of 'this' instruction that is identical in all ways except the following:
LLVM_ABI void removeFromParent()
This method unlinks 'this' from the containing basic block, but does not delete it.
LLVM_ABI bool isDebugOrPseudoInst() const LLVM_READONLY
Return true if the instruction is a DbgInfoIntrinsic or PseudoProbeInst.
LLVM_ABI void setHasNoSignedWrap(bool b=true)
Set or clear the nsw flag on this instruction, which must be an operator which supports this flag.
const DebugLoc & getDebugLoc() const
Return the debug location for this node as a DebugLoc.
LLVM_ABI void moveAfter(Instruction *MovePos)
Unlink this instruction from its current basic block and insert it into the basic block that MovePos ...
bool hasMetadata() const
Return true if this instruction has any metadata attached to it.
LLVM_ABI void moveBefore(InstListType::iterator InsertPos)
Unlink this instruction from its current basic block and insert it into the basic block that MovePos ...
LLVM_ABI void insertBefore(InstListType::iterator InsertPos)
Insert an unlinked instruction into a basic block immediately before the specified position.
bool isEHPad() const
Return true if the instruction is a variety of EH-block.
LLVM_ABI InstListType::iterator eraseFromParent()
This method unlinks 'this' from the containing basic block and deletes it.
Instruction * user_back()
Specialize the methods defined in Value, as we know that an instruction can only be used by other ins...
LLVM_ABI const Function * getFunction() const
Return the function this instruction belongs to.
LLVM_ABI bool comesBefore(const Instruction *Other) const
Given an instruction Other in the same basic block as this instruction, return true if this instructi...
LLVM_ABI void setMetadata(unsigned KindID, MDNode *Node)
Set the metadata of the specified kind to the specified node.
LLVM_ABI FastMathFlags getFastMathFlags() const LLVM_READONLY
Convenience function for getting all the fast-math flags, which must be an operator which supports th...
unsigned getOpcode() const
Returns a member of one of the enums like Instruction::Add.
bool isShift() const
LLVM_ABI void dropPoisonGeneratingFlags()
Drops flags that may cause this instruction to evaluate to poison despite having non-poison inputs.
LLVM_ABI std::optional< simple_ilist< DbgRecord >::iterator > getDbgReinsertionPosition()
Return an iterator to the position of the "Next" DbgRecord after this instruction,...
void setDebugLoc(DebugLoc Loc)
Set the debug location information for this instruction.
LLVM_ABI void copyMetadata(const Instruction &SrcInst, ArrayRef< unsigned > WL=ArrayRef< unsigned >())
Copy metadata from SrcInst to this instruction.
LLVM_ABI void insertAfter(Instruction *InsertPos)
Insert an unlinked instruction into a basic block immediately after the specified instruction.
A wrapper class for inspecting calls to intrinsic functions.
Intrinsic::ID getIntrinsicID() const
Return the intrinsic ID of this intrinsic.
An instruction for reading from memory.
unsigned getPointerAddressSpace() const
Returns the address space of the pointer operand.
Analysis pass that exposes the LoopInfo for a function.
Definition LoopInfo.h:569
LoopT * getLoopFor(const BlockT *BB) const
Return the inner most loop that BB lives in.
The legacy pass manager's analysis pass to compute loop information.
Definition LoopInfo.h:596
Represents a single loop in the control flow graph.
Definition LoopInfo.h:40
TypeSize getSizeInBits() const
Returns the size of the specified MVT in bits.
static MVT getIntegerVT(unsigned BitWidth)
LLVM_ABI void replacePhiUsesWith(MachineBasicBlock *Old, MachineBasicBlock *New)
Update all phi nodes in this basic block to refer to basic block New instead of basic block Old.
This class implements a map that also provides access to all stored values in a deterministic order.
Definition MapVector.h:36
iterator end()
Definition MapVector.h:67
VectorType::iterator erase(typename VectorType::iterator Iterator)
Remove the element given by Iterator.
Definition MapVector.h:194
iterator find(const KeyT &Key)
Definition MapVector.h:154
bool empty() const
Definition MapVector.h:77
std::pair< iterator, bool > insert(const std::pair< KeyT, ValueT > &KV)
Definition MapVector.h:124
void addIncoming(Value *V, BasicBlock *BB)
Add an incoming value to the end of the PHI list.
op_range incoming_values()
Value * getIncomingValueForBlock(const BasicBlock *BB) const
BasicBlock * getIncomingBlock(unsigned i) const
Return incoming basic block number i.
Value * getIncomingValue(unsigned i) const
Return incoming value number x.
unsigned getNumIncomingValues() const
Return the number of incoming edges.
static PHINode * Create(Type *Ty, unsigned NumReservedValues, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
Constructors - NumReservedValues is a hint for the number of incoming edges that this phi node will h...
static LLVM_ABI PassRegistry * getPassRegistry()
getPassRegistry - Access the global registry object, which is automatically initialized at applicatio...
PointerIntPair - This class implements a pair of a pointer and small integer.
static LLVM_ABI PoisonValue * get(Type *T)
Static factory methods - Return an 'poison' object of the specified type.
A set of analyses that are preserved following a run of a transformation pass.
Definition Analysis.h:112
static PreservedAnalyses all()
Construct a special preserved set that preserves all passes.
Definition Analysis.h:118
PreservedAnalyses & preserve()
Mark an analysis as preserved.
Definition Analysis.h:132
An analysis pass based on the new PM to deliver ProfileSummaryInfo.
An analysis pass based on legacy pass manager to deliver ProfileSummaryInfo.
Analysis providing profile information.
bool isFunctionColdInCallGraph(const FuncT *F, BFIT &BFI) const
Returns true if F contains only cold code.
LLVM_ABI bool isFunctionHotnessUnknown(const Function &F) const
Returns true if the hotness of F is unknown.
bool isFunctionHotInCallGraph(const FuncT *F, BFIT &BFI) const
Returns true if F contains hot code.
LLVM_ABI bool hasPartialSampleProfile() const
Returns true if module M has partial-profile sample profile.
LLVM_ABI bool hasHugeWorkingSetSize() const
Returns true if the working set size of the code is considered huge.
Value * getReturnValue() const
Convenience accessor. Returns null if there is no return value.
This class represents the LLVM 'select' instruction.
static SelectInst * Create(Value *C, Value *S1, Value *S2, const Twine &NameStr="", InsertPosition InsertBefore=nullptr, const Instruction *MDFrom=nullptr)
size_type count(const_arg_type key) const
Count the number of elements of a given key in the SetVector.
Definition SetVector.h:262
void clear()
Completely clear the SetVector.
Definition SetVector.h:267
bool empty() const
Determine if the SetVector is empty or not.
Definition SetVector.h:100
bool insert(const value_type &X)
Insert a new element into the SetVector.
Definition SetVector.h:151
value_type pop_back_val()
Definition SetVector.h:279
VectorType * getType() const
Overload to return most specific vector type.
size_type size() const
Definition SmallPtrSet.h:99
A templated base class for SmallPtrSet which provides the typesafe interface that is common across al...
bool erase(PtrType Ptr)
Remove pointer from the set.
size_type count(ConstPtrType Ptr) const
count - Return 1 if the specified pointer is in the set, 0 otherwise.
void insert_range(Range &&R)
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
bool contains(ConstPtrType Ptr) const
SmallPtrSet - This class implements a set which is optimized for holding SmallSize or less elements.
size_type count(const T &V) const
count - Return 1 if the element is in the set, 0 otherwise.
Definition SmallSet.h:175
bool erase(const T &V)
Definition SmallSet.h:199
std::pair< const_iterator, bool > insert(const T &V)
insert - Insert an element into the set if it isn't already there.
Definition SmallSet.h:183
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
reference emplace_back(ArgTypes &&... Args)
void reserve(size_type N)
iterator erase(const_iterator CI)
typename SuperClass::iterator iterator
void resize(size_type N)
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
An instruction for storing to memory.
static unsigned getPointerOperandIndex()
TypeSize getElementOffset(unsigned Idx) const
Definition DataLayout.h:754
Analysis pass providing the TargetTransformInfo.
Analysis pass providing the TargetLibraryInfo.
Provides information about what library functions are available for the current target.
bool getLibFunc(StringRef funcName, LibFunc &F) const
Searches for a particular function name.
int InstructionOpcodeToISD(unsigned Opcode) const
Get the ISD node that corresponds to the Instruction class opcode.
EVT getValueType(const DataLayout &DL, Type *Ty, bool AllowUnknown=false) const
Return the EVT corresponding to this LLVM type.
virtual bool isSelectSupported(SelectSupportKind) const
virtual bool isEqualityCmpFoldedWithSignedCmp() const
Return true if instruction generated for equality comparison is folded with instruction generated for...
virtual bool shouldFormOverflowOp(unsigned Opcode, EVT VT, bool MathUsed) const
Try to convert math with an overflow comparison into the corresponding DAG node operation.
virtual bool isMaskAndCmp0FoldingBeneficial(const Instruction &AndI) const
Return if the target supports combining a chain like:
virtual bool shouldOptimizeMulOverflowWithZeroHighBits(LLVMContext &Context, EVT VT) const
bool isExtLoad(const LoadInst *Load, const Instruction *Ext, const DataLayout &DL) const
Return true if Load and Ext can form an ExtLoad.
virtual bool isSExtCheaperThanZExt(EVT FromTy, EVT ToTy) const
Return true if sign-extension from FromTy to ToTy is cheaper than zero-extension.
const TargetMachine & getTargetMachine() const
virtual bool isCtpopFast(EVT VT) const
Return true if ctpop instruction is fast.
virtual bool isZExtFree(Type *FromTy, Type *ToTy) const
Return true if any actual instruction that defines a value of type FromTy implicitly zero-extends the...
bool enableExtLdPromotion() const
Return true if the target wants to use the optimization that turns ext(promotableInst1(....
virtual bool isCheapToSpeculateCttz(Type *Ty) const
Return true if it is cheap to speculate a call to intrinsic cttz.
bool isJumpExpensive() const
Return true if Flow Control is an expensive operation that should be avoided.
bool hasExtractBitsInsn() const
Return true if the target has BitExtract instructions.
virtual bool allowsMisalignedMemoryAccesses(EVT, unsigned AddrSpace=0, Align Alignment=Align(1), MachineMemOperand::Flags Flags=MachineMemOperand::MONone, unsigned *=nullptr) const
Determine if the target supports unaligned memory accesses.
bool isSlowDivBypassed() const
Returns true if target has indicated at least one type should be bypassed.
virtual bool isTruncateFree(Type *FromTy, Type *ToTy) const
Return true if it's free to truncate a value of type FromTy to type ToTy.
virtual bool hasMultipleConditionRegisters(EVT VT) const
Does the target have multiple (allocatable) condition registers that can be used to store the results...
virtual EVT getTypeToTransformTo(LLVMContext &Context, EVT VT) const
For types supported by the target, this is an identity function.
virtual MVT getPreferredSwitchConditionType(LLVMContext &Context, EVT ConditionVT) const
Returns preferred type for switch condition.
bool isCondCodeLegal(ISD::CondCode CC, MVT VT) const
Return true if the specified condition code is legal for a comparison of the specified types on this ...
virtual bool canCombineStoreAndExtract(Type *VectorTy, Value *Idx, unsigned &Cost) const
Return true if the target can combine store(extractelement VectorTy,Idx).
bool isTypeLegal(EVT VT) const
Return true if the target has native support for the specified value type.
virtual bool isFreeAddrSpaceCast(unsigned SrcAS, unsigned DestAS) const
Returns true if a cast from SrcAS to DestAS is "cheap", such that e.g.
virtual bool shouldConsiderGEPOffsetSplit() const
bool isExtFree(const Instruction *I) const
Return true if the extension represented by I is free.
bool isOperationLegalOrCustom(unsigned Op, EVT VT, bool LegalOnly=false) const
Return true if the specified operation is legal on this target or can be made legal with custom lower...
bool isPredictableSelectExpensive() const
Return true if selects are only cheaper than branches if the branch is unlikely to be predicted right...
virtual bool isMultiStoresCheaperThanBitsMerge(EVT LTy, EVT HTy) const
Return true if it is cheaper to split the store of a merged int val from a pair of smaller values int...
virtual bool getAddrModeArguments(const IntrinsicInst *, SmallVectorImpl< Value * > &, Type *&) const
CodeGenPrepare sinks address calculations into the same BB as Load/Store instructions reading the add...
bool isLoadExtLegal(unsigned ExtType, EVT ValVT, EVT MemVT) const
Return true if the specified load with extension is legal on this target.
const DenseMap< unsigned int, unsigned int > & getBypassSlowDivWidths() const
Returns map of slow types for division or remainder with corresponding fast types.
virtual bool isCheapToSpeculateCtlz(Type *Ty) const
Return true if it is cheap to speculate a call to intrinsic ctlz.
virtual bool useSoftFloat() const
virtual int64_t getPreferredLargeGEPBaseOffset(int64_t MinOffset, int64_t MaxOffset) const
Return the prefered common base offset.
LegalizeTypeAction getTypeAction(LLVMContext &Context, EVT VT) const
Return how we should legalize values of this type, either it is already legal (return 'Legal') or we ...
virtual bool shouldAlignPointerArgs(CallInst *, unsigned &, Align &) const
Return true if the pointer arguments to CI should be aligned by aligning the object whose address is ...
virtual Type * shouldConvertSplatType(ShuffleVectorInst *SVI) const
Given a shuffle vector SVI representing a vector splat, return a new scalar type of size equal to SVI...
virtual bool addressingModeSupportsTLS(const GlobalValue &) const
Returns true if the targets addressing mode can target thread local storage (TLS).
virtual bool shouldConvertPhiType(Type *From, Type *To) const
Given a set in interconnected phis of type 'From' that are loaded/stored or bitcast to type 'To',...
virtual bool isFAbsFree(EVT VT) const
Return true if an fabs operation is free to the point where it is never worthwhile to replace it with...
virtual bool preferZeroCompareBranch() const
Return true if the heuristic to prefer icmp eq zero should be used in code gen prepare.
virtual bool isLegalAddressingMode(const DataLayout &DL, const AddrMode &AM, Type *Ty, unsigned AddrSpace, Instruction *I=nullptr) const
Return true if the addressing mode represented by AM is legal for this target, for a load/store of th...
virtual bool optimizeExtendOrTruncateConversion(Instruction *I, Loop *L, const TargetTransformInfo &TTI) const
Try to optimize extending or truncating conversion instructions (like zext, trunc,...
This class defines information used to lower LLVM code to legal SelectionDAG operators that the targe...
std::vector< AsmOperandInfo > AsmOperandInfoVector
virtual AsmOperandInfoVector ParseConstraints(const DataLayout &DL, const TargetRegisterInfo *TRI, const CallBase &Call) const
Split up the constraint string from the inline assembly value into the specific constraints and their...
virtual void ComputeConstraintToUse(AsmOperandInfo &OpInfo, SDValue Op, SelectionDAG *DAG=nullptr) const
Determines the constraint code and constraint type to use for the specific AsmOperandInfo,...
virtual bool mayBeEmittedAsTailCall(const CallInst *) const
Return true if the target may be able emit the call instruction as a tail call.
virtual bool isNoopAddrSpaceCast(unsigned SrcAS, unsigned DestAS) const
Returns true if a cast between SrcAS and DestAS is a noop.
virtual const TargetSubtargetInfo * getSubtargetImpl(const Function &) const
Virtual method implemented by subclasses that returns a reference to that target's TargetSubtargetInf...
TargetOptions Options
unsigned EnableFastISel
EnableFastISel - This flag enables fast-path instruction selection which trades away generated code q...
Target-Independent Code Generator Pass Configuration Options.
TargetRegisterInfo base class - We assume that the target defines a static array of TargetRegisterDes...
virtual const TargetRegisterInfo * getRegisterInfo() const =0
Return the target's register information.
virtual const TargetLowering * getTargetLowering() const
virtual bool addrSinkUsingGEPs() const
Sink addresses into blocks using GEP instructions rather than pointer casts and arithmetic.
Wrapper pass for TargetTransformInfo.
This pass provides access to the codegen interfaces that are needed for IR-level transformations.
LLVM_ABI InstructionCost getVectorInstrCost(unsigned Opcode, Type *Val, TTI::TargetCostKind CostKind, unsigned Index=-1, const Value *Op0=nullptr, const Value *Op1=nullptr) const
TargetCostKind
The kind of cost model.
@ TCK_RecipThroughput
Reciprocal throughput.
@ TCK_SizeAndLatency
The weighted sum of size and latency.
LLVM_ABI InstructionCost getArithmeticInstrCost(unsigned Opcode, Type *Ty, TTI::TargetCostKind CostKind=TTI::TCK_RecipThroughput, TTI::OperandValueInfo Opd1Info={TTI::OK_AnyValue, TTI::OP_None}, TTI::OperandValueInfo Opd2Info={TTI::OK_AnyValue, TTI::OP_None}, ArrayRef< const Value * > Args={}, const Instruction *CxtI=nullptr, const TargetLibraryInfo *TLibInfo=nullptr) const
This is an approximation of reciprocal throughput of a math/logic op.
LLVM_ABI InstructionCost getIntImmCost(const APInt &Imm, Type *Ty, TargetCostKind CostKind) const
Return the expected cost of materializing for the given integer immediate of the specified type.
LLVM_ABI bool shouldConsiderAddressTypePromotion(const Instruction &I, bool &AllowPromotionWithoutCommonHeader) const
@ TCC_Basic
The cost of a typical 'add' instruction.
LLVM_ABI bool isVectorShiftByScalarCheap(Type *Ty) const
Return true if it's significantly cheaper to shift a vector by a uniform scalar than by an amount whi...
LLVM_ABI bool isProfitableToSinkOperands(Instruction *I, SmallVectorImpl< Use * > &Ops) const
Return true if sinking I's operands to the same basic block as I is profitable, e....
The instances of the Type class are immutable: once they are created, they are never changed.
Definition Type.h:45
LLVM_ABI unsigned getIntegerBitWidth() const
bool isVectorTy() const
True if this is an instance of VectorType.
Definition Type.h:273
LLVM_ABI bool isScalableTy(SmallPtrSetImpl< const Type * > &Visited) const
Return true if this is a type whose size is a known multiple of vscale.
Definition Type.cpp:61
static LLVM_ABI IntegerType * getInt32Ty(LLVMContext &C)
Definition Type.cpp:296
LLVM_ABI unsigned getPointerAddressSpace() const
Get the address space of this pointer or pointer vector type.
Type * getScalarType() const
If this is a vector type, return the element type, otherwise return 'this'.
Definition Type.h:352
LLVM_ABI Type * getWithNewBitWidth(unsigned NewBitWidth) const
Given an integer or vector type, change the lane bitwidth to NewBitwidth, whilst keeping the old numb...
LLVM_ABI unsigned getScalarSizeInBits() const LLVM_READONLY
If this is a vector type, return the getPrimitiveSizeInBits value for the element type.
Definition Type.cpp:230
bool isIntOrPtrTy() const
Return true if this is an integer type or a pointer type.
Definition Type.h:255
bool isIntegerTy() const
True if this is an instance of IntegerType.
Definition Type.h:240
static LLVM_ABI IntegerType * getIntNTy(LLVMContext &C, unsigned N)
Definition Type.cpp:300
static LLVM_ABI UndefValue * get(Type *T)
Static factory methods - Return an 'undef' object of the specified type.
A Use represents the edge between a Value definition and its users.
Definition Use.h:35
op_range operands()
Definition User.h:293
const Use & getOperandUse(unsigned i) const
Definition User.h:246
void setOperand(unsigned i, Value *Val)
Definition User.h:238
LLVM_ABI bool replaceUsesOfWith(Value *From, Value *To)
Replace uses of one Value with another.
Definition User.cpp:25
Value * getOperand(unsigned i) const
Definition User.h:233
unsigned getNumOperands() const
Definition User.h:255
LLVM Value Representation.
Definition Value.h:75
Type * getType() const
All values are typed, get the type of this value.
Definition Value.h:256
user_iterator user_begin()
Definition Value.h:402
LLVM_ABI void setName(const Twine &Name)
Change the name of the value.
Definition Value.cpp:397
bool hasOneUse() const
Return true if there is exactly one use of this value.
Definition Value.h:439
LLVM_ABI void replaceAllUsesWith(Value *V)
Change all uses of this to point to a new Value.
Definition Value.cpp:553
iterator_range< user_iterator > users()
Definition Value.h:426
LLVM_ABI Align getPointerAlignment(const DataLayout &DL) const
Returns an alignment of the pointer value.
Definition Value.cpp:963
LLVM_ABI bool isUsedInBasicBlock(const BasicBlock *BB) const
Check if this value is used in the specified basic block.
Definition Value.cpp:242
LLVM_ABI void printAsOperand(raw_ostream &O, bool PrintType=true, const Module *M=nullptr) const
Print the name of this Value out to the specified raw_ostream.
LLVM_ABI const Value * stripPointerCasts() const
Strip off pointer casts, all-zero GEPs and address space casts.
Definition Value.cpp:708
bool use_empty() const
Definition Value.h:346
user_iterator user_end()
Definition Value.h:410
LLVM_ABI LLVMContext & getContext() const
All values hold a context through their type.
Definition Value.cpp:1106
iterator_range< use_iterator > uses()
Definition Value.h:380
void mutateType(Type *Ty)
Mutate the type of this Value to be of the specified type.
Definition Value.h:838
user_iterator_impl< User > user_iterator
Definition Value.h:391
LLVM_ABI StringRef getName() const
Return a constant reference to the value's name.
Definition Value.cpp:322
LLVM_ABI void takeName(Value *V)
Transfer the name from V to this value.
Definition Value.cpp:403
LLVM_ABI void dump() const
Support for debugging, callable in GDB: V->dump()
bool pointsToAliveValue() const
int getNumOccurrences() const
constexpr ScalarTy getFixedValue() const
Definition TypeSize.h:200
constexpr bool isNonZero() const
Definition TypeSize.h:155
constexpr bool isScalable() const
Returns whether the quantity is scaled by a runtime quantity (vscale).
Definition TypeSize.h:168
TypeSize getSequentialElementStride(const DataLayout &DL) const
const ParentTy * getParent() const
Definition ilist_node.h:34
self_iterator getIterator()
Definition ilist_node.h:123
NodeTy * getNextNode()
Get the next node, or nullptr for the list tail.
Definition ilist_node.h:348
Changed
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
constexpr char Align[]
Key for Kernel::Arg::Metadata::mAlign.
@ Entry
Definition COFF.h:862
@ C
The default llvm calling convention, compatible with C.
Definition CallingConv.h:34
unsigned getAddrMode(MCInstrInfo const &MCII, MCInst const &MCI)
@ BasicBlock
Various leaf nodes.
Definition ISDOpcodes.h:81
SpecificConstantMatch m_ZeroInt()
Convenience matchers for specific integer values.
OneUse_match< SubPat > m_OneUse(const SubPat &SP)
cst_pred_ty< is_all_ones > m_AllOnes()
Match an integer or vector with all bits set.
BinaryOp_match< LHS, RHS, Instruction::Add > m_Add(const LHS &L, const RHS &R)
class_match< BinaryOperator > m_BinOp()
Match an arbitrary binary operation and ignore it.
OverflowingBinaryOp_match< LHS, RHS, Instruction::Add, OverflowingBinaryOperator::NoUnsignedWrap > m_NUWAdd(const LHS &L, const RHS &R)
BinaryOp_match< LHS, RHS, Instruction::URem > m_URem(const LHS &L, const RHS &R)
class_match< Constant > m_Constant()
Match an arbitrary Constant and ignore it.
ap_match< APInt > m_APInt(const APInt *&Res)
Match a ConstantInt or splatted ConstantVector, binding the specified pointer to the contained APInt.
BinaryOp_match< LHS, RHS, Instruction::Xor > m_Xor(const LHS &L, const RHS &R)
ap_match< APInt > m_APIntAllowPoison(const APInt *&Res)
Match APInt while allowing poison in splat vector constants.
specific_intval< false > m_SpecificInt(const APInt &V)
Match a specific integer value or vector with all elements equal to the value.
bool match(Val *V, const Pattern &P)
bind_ty< Instruction > m_Instruction(Instruction *&I)
Match an instruction, capturing it if we match.
specificval_ty m_Specific(const Value *V)
Match if we have a specific specified value.
BinOpPred_match< LHS, RHS, is_right_shift_op > m_Shr(const LHS &L, const RHS &R)
Matches logical shift operations.
OverflowingBinaryOp_match< LHS, RHS, Instruction::Add, OverflowingBinaryOperator::NoUnsignedWrap, true > m_c_NUWAdd(const LHS &L, const RHS &R)
class_match< ConstantInt > m_ConstantInt()
Match an arbitrary ConstantInt and ignore it.
cst_pred_ty< is_one > m_One()
Match an integer 1 or a vector with all elements equal to 1.
IntrinsicID_match m_Intrinsic()
Match intrinsic calls like this: m_Intrinsic<Intrinsic::fabs>(m_Value(X))
ThreeOps_match< Cond, LHS, RHS, Instruction::Select > m_Select(const Cond &C, const LHS &L, const RHS &R)
Matches SelectInst.
ExtractValue_match< Ind, Val_t > m_ExtractValue(const Val_t &V)
Match a single index ExtractValue instruction.
auto m_LogicalOr()
Matches L || R where L and R are arbitrary values.
TwoOps_match< V1_t, V2_t, Instruction::ShuffleVector > m_Shuffle(const V1_t &v1, const V2_t &v2)
Matches ShuffleVectorInst independently of mask value.
CastInst_match< OpTy, ZExtInst > m_ZExt(const OpTy &Op)
Matches ZExt.
class_match< CmpInst > m_Cmp()
Matches any compare instruction and ignore it.
brc_match< Cond_t, bind_ty< BasicBlock >, bind_ty< BasicBlock > > m_Br(const Cond_t &C, BasicBlock *&T, BasicBlock *&F)
match_immconstant_ty m_ImmConstant()
Match an arbitrary immediate Constant and ignore it.
class_match< Value > m_Value()
Match an arbitrary value and ignore it.
OverflowingBinaryOp_match< LHS, RHS, Instruction::Add, OverflowingBinaryOperator::NoSignedWrap > m_NSWAdd(const LHS &L, const RHS &R)
CmpClass_match< LHS, RHS, ICmpInst > m_ICmp(CmpPredicate &Pred, const LHS &L, const RHS &R)
BinaryOp_match< LHS, RHS, Instruction::Shl > m_Shl(const LHS &L, const RHS &R)
UAddWithOverflow_match< LHS_t, RHS_t, Sum_t > m_UAddWithOverflow(const LHS_t &L, const RHS_t &R, const Sum_t &S)
Match an icmp instruction checking for unsigned overflow on addition.
auto m_LogicalAnd()
Matches L && R where L and R are arbitrary values.
auto m_Undef()
Match an arbitrary undef constant.
BinaryOp_match< LHS, RHS, Instruction::Or, true > m_c_Or(const LHS &L, const RHS &R)
Matches an Or with LHS and RHS in either order.
ThreeOps_match< Val_t, Elt_t, Idx_t, Instruction::InsertElement > m_InsertElt(const Val_t &Val, const Elt_t &Elt, const Idx_t &Idx)
Matches InsertElementInst.
BinaryOp_match< LHS, RHS, Instruction::Sub > m_Sub(const LHS &L, const RHS &R)
match_combine_or< LTy, RTy > m_CombineOr(const LTy &L, const RTy &R)
Combine two pattern matchers matching L || R.
int compare(DigitsT LDigits, int16_t LScale, DigitsT RDigits, int16_t RScale)
Compare two scaled numbers.
@ CE
Windows NT (Windows on ARM)
Definition MCAsmInfo.h:48
initializer< Ty > init(const Ty &Val)
PointerTypeMap run(const Module &M)
Compute the PointerTypeMap for the module M.
@ Assume
Do not drop type tests (default).
@ User
could "use" a pointer
NodeAddr< PhiNode * > Phi
Definition RDFGraph.h:390
NodeAddr< UseNode * > Use
Definition RDFGraph.h:385
SmallVector< Node, 4 > NodeList
Definition RDFGraph.h:550
iterator end() const
Definition BasicBlock.h:89
friend class Instruction
Iterator for Instructions in a `BasicBlock.
Definition BasicBlock.h:73
LLVM_ABI iterator begin() const
BaseReg
Stack frame base register. Bit 0 of FREInfo.Info.
Definition SFrame.h:77
This is an optimization pass for GlobalISel generic memory operations.
auto drop_begin(T &&RangeOrContainer, size_t N=1)
Return a range covering RangeOrContainer with the first N elements excluded.
Definition STLExtras.h:316
void dump(const SparseBitVector< ElementSize > &LHS, raw_ostream &out)
@ Offset
Definition DWP.cpp:532
FunctionAddr VTableAddr Value
Definition InstrProf.h:137
std::enable_if_t< std::is_signed_v< T >, T > MulOverflow(T X, T Y, T &Result)
Multiply two signed integers, computing the two's complement truncated result, returning true if an o...
Definition MathExtras.h:753
auto find(R &&Range, const T &Val)
Provide wrappers to std::find which take ranges instead of having to pass begin/end explicitly.
Definition STLExtras.h:1763
LLVM_ABI bool RemoveRedundantDbgInstrs(BasicBlock *BB)
Try to remove redundant dbg.value instructions from given basic block.
bool all_of(R &&range, UnaryPredicate P)
Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly.
Definition STLExtras.h:1737
auto size(R &&Range, std::enable_if_t< std::is_base_of< std::random_access_iterator_tag, typename std::iterator_traits< decltype(Range.begin())>::iterator_category >::value, void > *=nullptr)
Get the size of a range.
Definition STLExtras.h:1667
LLVM_ABI bool RecursivelyDeleteTriviallyDeadInstructions(Value *V, const TargetLibraryInfo *TLI=nullptr, MemorySSAUpdater *MSSAU=nullptr, std::function< void(Value *)> AboutToDeleteCallback=std::function< void(Value *)>())
If the specified value is a trivially dead instruction, delete it.
Definition Local.cpp:533
LLVM_ABI bool ConstantFoldTerminator(BasicBlock *BB, bool DeleteDeadConditions=false, const TargetLibraryInfo *TLI=nullptr, DomTreeUpdater *DTU=nullptr)
If a terminator instruction is predicated on a constant value, convert it into an unconditional branc...
Definition Local.cpp:134
LLVM_ABI void findDbgValues(Value *V, SmallVectorImpl< DbgVariableRecord * > &DbgVariableRecords)
Finds the dbg.values describing a value.
decltype(auto) dyn_cast(const From &Val)
dyn_cast<X> - Return the argument parameter cast to the specified type.
Definition Casting.h:643
APInt operator*(APInt a, uint64_t RHS)
Definition APInt.h:2236
bool isAligned(Align Lhs, uint64_t SizeInBytes)
Checks that SizeInBytes is a multiple of the alignment.
Definition Alignment.h:134
LLVM_ABI void salvageDebugInfo(const MachineRegisterInfo &MRI, MachineInstr &MI)
Assuming the instruction MI is going to be deleted, attempt to salvage debug users of MI by writing t...
Definition Utils.cpp:1731
auto successors(const MachineBasicBlock *BB)
OuterAnalysisManagerProxy< ModuleAnalysisManager, Function > ModuleAnalysisManagerFunctionProxy
Provide the ModuleAnalysisManager to Function proxy.
LLVM_ABI ReturnInst * FoldReturnIntoUncondBranch(ReturnInst *RI, BasicBlock *BB, BasicBlock *Pred, DomTreeUpdater *DTU=nullptr)
This method duplicates the specified return instruction into a predecessor which ends in an unconditi...
bool operator!=(uint64_t V1, const APInt &V2)
Definition APInt.h:2114
constexpr from_range_t from_range
LLVM_ABI Instruction * SplitBlockAndInsertIfElse(Value *Cond, BasicBlock::iterator SplitBefore, bool Unreachable, MDNode *BranchWeights=nullptr, DomTreeUpdater *DTU=nullptr, LoopInfo *LI=nullptr, BasicBlock *ElseBlock=nullptr)
Similar to SplitBlockAndInsertIfThen, but the inserted block is on the false path of the branch.
void append_range(Container &C, Range &&R)
Wrapper function to append range R to container C.
Definition STLExtras.h:2184
LLVM_ABI bool shouldOptimizeForSize(const MachineFunction *MF, ProfileSummaryInfo *PSI, const MachineBlockFrequencyInfo *BFI, PGSOQueryType QueryType=PGSOQueryType::Other)
Returns true if machine function MF is suggested to be size-optimized based on the profile.
iterator_range< early_inc_iterator_impl< detail::IterOfRange< RangeT > > > make_early_inc_range(RangeT &&Range)
Make a range that does early increment to allow mutation of the underlying range without disrupting i...
Definition STLExtras.h:632
auto cast_or_null(const Y &Val)
Definition Casting.h:714
LLVM_ABI void DeleteDeadBlock(BasicBlock *BB, DomTreeUpdater *DTU=nullptr, bool KeepOneInputPHIs=false)
Delete the specified block, which must have no predecessors.
LLVM_ABI void initializeCodeGenPrepareLegacyPassPass(PassRegistry &)
LLVM_ABI bool isSafeToSpeculativelyExecute(const Instruction *I, const Instruction *CtxI=nullptr, AssumptionCache *AC=nullptr, const DominatorTree *DT=nullptr, const TargetLibraryInfo *TLI=nullptr, bool UseVariableInfo=true, bool IgnoreUBImplyingAttrs=true)
Return true if the instruction does not have any effects besides calculating the result and does not ...
auto unique(Range &&R, Predicate P)
Definition STLExtras.h:2124
LLVM_ABI Value * getSplatValue(const Value *V)
Get splat value if the input is a splat vector or return nullptr.
LLVM_ABI bool hasBranchWeightOrigin(const Instruction &I)
Check if Branch Weight Metadata has an "expected" field from an llvm.expect* intrinsic.
bool operator==(const AddressRangeValuePair &LHS, const AddressRangeValuePair &RHS)
constexpr int popcount(T Value) noexcept
Count the number of set bits in a value.
Definition bit.h:154
LLVM_ABI bool SplitIndirectBrCriticalEdges(Function &F, bool IgnoreBlocksWithoutPHI, BranchProbabilityInfo *BPI=nullptr, BlockFrequencyInfo *BFI=nullptr)
LLVM_ABI Value * simplifyInstruction(Instruction *I, const SimplifyQuery &Q)
See if we can compute a simplified version of this instruction.
LLVM_ABI Value * simplifyAddInst(Value *LHS, Value *RHS, bool IsNSW, bool IsNUW, const SimplifyQuery &Q)
Given operands for an Add, fold the result or return null.
auto dyn_cast_or_null(const Y &Val)
Definition Casting.h:753
Align getKnownAlignment(Value *V, const DataLayout &DL, const Instruction *CxtI=nullptr, AssumptionCache *AC=nullptr, const DominatorTree *DT=nullptr)
Try to infer an alignment for the specified pointer.
Definition Local.h:252
void erase(Container &C, ValueType V)
Wrapper function to remove a value from a container:
Definition STLExtras.h:2176
bool any_of(R &&range, UnaryPredicate P)
Provide wrappers to std::any_of which take ranges instead of having to pass begin/end explicitly.
Definition STLExtras.h:1744
LLVM_ABI bool isSplatValue(const Value *V, int Index=-1, unsigned Depth=0)
Return true if each element of the vector value V is poisoned or equal to every other non-poisoned el...
LLVM_ABI bool DeleteDeadPHIs(BasicBlock *BB, const TargetLibraryInfo *TLI=nullptr, MemorySSAUpdater *MSSAU=nullptr)
Examine each PHI in the given block and delete it if it is dead.
LLVM_ABI bool replaceAndRecursivelySimplify(Instruction *I, Value *SimpleV, const TargetLibraryInfo *TLI=nullptr, const DominatorTree *DT=nullptr, AssumptionCache *AC=nullptr, SmallSetVector< Instruction *, 8 > *UnsimplifiedUsers=nullptr)
Replace all uses of 'I' with 'SimpleV' and simplify the uses recursively.
auto reverse(ContainerTy &&C)
Definition STLExtras.h:406
LLVM_ABI bool recognizeBSwapOrBitReverseIdiom(Instruction *I, bool MatchBSwaps, bool MatchBitReversals, SmallVectorImpl< Instruction * > &InsertedInsts)
Try to match a bswap or bitreverse idiom.
Definition Local.cpp:3761
void sort(IteratorTy Start, IteratorTy End)
Definition STLExtras.h:1634
FPClassTest
Floating-point class tests, supported by 'is_fpclass' intrinsic.
LLVM_ABI void SplitBlockAndInsertIfThenElse(Value *Cond, BasicBlock::iterator SplitBefore, Instruction **ThenTerm, Instruction **ElseTerm, MDNode *BranchWeights=nullptr, DomTreeUpdater *DTU=nullptr, LoopInfo *LI=nullptr)
SplitBlockAndInsertIfThenElse is similar to SplitBlockAndInsertIfThen, but also creates the ElseBlock...
LLVM_ABI raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
Definition Debug.cpp:207
bool none_of(R &&Range, UnaryPredicate P)
Provide wrappers to std::none_of which take ranges instead of having to pass begin/end explicitly.
Definition STLExtras.h:1751
auto make_first_range(ContainerTy &&c)
Given a container of pairs, return a range over the first elements.
Definition STLExtras.h:1397
generic_gep_type_iterator<> gep_type_iterator
LLVM_ABI FunctionPass * createCodeGenPrepareLegacyPass()
createCodeGenPrepareLegacyPass - Transform the code to expose more pattern matching during instructio...
ISD::CondCode getFCmpCondCode(FCmpInst::Predicate Pred)
getFCmpCondCode - Return the ISD condition code corresponding to the given LLVM IR floating-point con...
Definition Analysis.cpp:207
LLVM_ABI bool VerifyLoopInfo
Enable verification of loop info.
Definition LoopInfo.cpp:51
class LLVM_GSL_OWNER SmallVector
Forward declaration of SmallVector so that calculateSmallVectorDefaultInlinedElements can reference s...
bool isa(const From &Val)
isa<X> - Return true if the parameter to the template is an instance of one of the template type argu...
Definition Casting.h:547
LLVM_ATTRIBUTE_VISIBILITY_DEFAULT AnalysisKey InnerAnalysisManagerProxy< AnalysisManagerT, IRUnitT, ExtraArgTs... >::Key
LLVM_ABI bool isKnownNonZero(const Value *V, const SimplifyQuery &Q, unsigned Depth=0)
Return true if the given value is known to be non-zero when defined.
@ First
Helpers to iterate all locations in the MemoryEffectsBase class.
Definition ModRef.h:74
bool attributesPermitTailCall(const Function *F, const Instruction *I, const ReturnInst *Ret, const TargetLoweringBase &TLI, bool *AllowDifferingSizes=nullptr)
Test if given that the input instruction is in the tail call position, if there is an attribute misma...
Definition Analysis.cpp:592
TargetTransformInfo TTI
IRBuilder(LLVMContext &, FolderTy, InserterTy, MDNode *, ArrayRef< OperandBundleDef >) -> IRBuilder< FolderTy, InserterTy >
LLVM_ABI bool MergeBlockIntoPredecessor(BasicBlock *BB, DomTreeUpdater *DTU=nullptr, LoopInfo *LI=nullptr, MemorySSAUpdater *MSSAU=nullptr, MemoryDependenceResults *MemDep=nullptr, bool PredecessorWithTwoSuccessors=false, DominatorTree *DT=nullptr)
Attempts to merge a block into its predecessor, if possible.
@ Or
Bitwise or logical OR of integers.
@ Xor
Bitwise or logical XOR of integers.
@ And
Bitwise or logical AND of integers.
@ Sub
Subtraction of integers.
@ Add
Sum of integers.
FunctionAddr VTableAddr Next
Definition InstrProf.h:141
auto count(R &&Range, const E &Element)
Wrapper function around std::count to count the number of times an element Element occurs in the give...
Definition STLExtras.h:2002
DWARFExpression::Operation Op
raw_ostream & operator<<(raw_ostream &OS, const APFixedPoint &FX)
LLVM_ABI bool isGuaranteedNotToBeUndefOrPoison(const Value *V, AssumptionCache *AC=nullptr, const Instruction *CtxI=nullptr, const DominatorTree *DT=nullptr, unsigned Depth=0)
Return true if this function can prove that V does not have undef bits and is never poison.
ArrayRef(const T &OneElt) -> ArrayRef< T >
constexpr unsigned BitWidth
LLVM_ABI bool extractBranchWeights(const MDNode *ProfileData, SmallVectorImpl< uint32_t > &Weights)
Extract branch weights from MD_prof metadata.
decltype(auto) cast(const From &Val)
cast<X> - Return the argument parameter cast to the specified type.
Definition Casting.h:559
bool bypassSlowDivision(BasicBlock *BB, const DenseMap< unsigned int, unsigned int > &BypassWidth)
This optimization identifies DIV instructions in a BB that can be profitably bypassed and carried out...
gep_type_iterator gep_type_begin(const User *GEP)
void erase_if(Container &C, UnaryPredicate P)
Provide a container algorithm similar to C++ Library Fundamentals v2's erase_if which is equivalent t...
Definition STLExtras.h:2168
auto predecessors(const MachineBasicBlock *BB)
iterator_range< pointer_iterator< WrappedIteratorT > > make_pointer_range(RangeT &&Range)
Definition iterator.h:365
bool is_contained(R &&Range, const E &Element)
Returns true if Element is found in Range.
Definition STLExtras.h:1945
Align commonAlignment(Align A, uint64_t Offset)
Returns the alignment that satisfies both alignments.
Definition Alignment.h:201
bool pred_empty(const BasicBlock *BB)
Definition CFG.h:119
std::enable_if_t< std::is_signed_v< T >, T > AddOverflow(T X, T Y, T &Result)
Add two signed integers, computing the two's complement truncated result, returning true if overflow ...
Definition MathExtras.h:701
LLVM_ABI Instruction * SplitBlockAndInsertIfThen(Value *Cond, BasicBlock::iterator SplitBefore, bool Unreachable, MDNode *BranchWeights=nullptr, DomTreeUpdater *DTU=nullptr, LoopInfo *LI=nullptr, BasicBlock *ThenBlock=nullptr)
Split the containing block at the specified instruction - everything before SplitBefore stays in the ...
AnalysisManager< Function > FunctionAnalysisManager
Convenience typedef for the Function analysis manager.
LLVM_ABI BasicBlock * SplitEdge(BasicBlock *From, BasicBlock *To, DominatorTree *DT=nullptr, LoopInfo *LI=nullptr, MemorySSAUpdater *MSSAU=nullptr, const Twine &BBName="")
Split the edge connecting the specified blocks, and return the newly created basic block between From...
std::pair< Value *, FPClassTest > fcmpToClassTest(FCmpInst::Predicate Pred, const Function &F, Value *LHS, Value *RHS, bool LookThroughSrc=true)
Returns a pair of values, which if passed to llvm.is.fpclass, returns the same result as an fcmp with...
static auto filterDbgVars(iterator_range< simple_ilist< DbgRecord >::iterator > R)
Filter the DbgRecord range to DbgVariableRecord types only and downcast.
LLVM_ABI Value * simplifyURemInst(Value *LHS, Value *RHS, const SimplifyQuery &Q)
Given operands for a URem, fold the result or return null.
DenseMap< const Value *, Value * > ValueToValueMap
LLVM_ABI CGPassBuilderOption getCGPassBuilderOption()
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.
Definition BitVector.h:872
#define NC
Definition regutils.h:42
This struct is a compact representation of a valid (non-zero power of two) alignment.
Definition Alignment.h:39
Extended Value Type.
Definition ValueTypes.h:35
bool bitsGT(EVT VT) const
Return true if this has more bits than VT.
Definition ValueTypes.h:284
bool bitsLT(EVT VT) const
Return true if this has less bits than VT.
Definition ValueTypes.h:300
TypeSize getSizeInBits() const
Return the size of the specified value type in bits.
Definition ValueTypes.h:373
static LLVM_ABI EVT getEVT(Type *Ty, bool HandleUnknown=false)
Return the value type corresponding to the specified type.
MVT getSimpleVT() const
Return the SimpleValueType held in the specified simple EVT.
Definition ValueTypes.h:316
bool isRound() const
Return true if the size is a power-of-two number of bytes.
Definition ValueTypes.h:248
bool isInteger() const
Return true if this is an integer or a vector integer type.
Definition ValueTypes.h:152
This contains information for each constraint that we are lowering.