LLVM 23.0.0git
SimplifyCFG.cpp
Go to the documentation of this file.
1//===- SimplifyCFG.cpp - Code to perform CFG simplification ---------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// Peephole optimize the CFG.
10//
11//===----------------------------------------------------------------------===//
12
13#include "llvm/ADT/APInt.h"
14#include "llvm/ADT/ArrayRef.h"
15#include "llvm/ADT/DenseMap.h"
16#include "llvm/ADT/MapVector.h"
17#include "llvm/ADT/STLExtras.h"
18#include "llvm/ADT/Sequence.h"
20#include "llvm/ADT/SetVector.h"
23#include "llvm/ADT/Statistic.h"
24#include "llvm/ADT/StringRef.h"
31#include "llvm/Analysis/Loads.h"
36#include "llvm/IR/Attributes.h"
37#include "llvm/IR/BasicBlock.h"
38#include "llvm/IR/CFG.h"
39#include "llvm/IR/Constant.h"
41#include "llvm/IR/Constants.h"
42#include "llvm/IR/DataLayout.h"
43#include "llvm/IR/DebugInfo.h"
45#include "llvm/IR/Function.h"
46#include "llvm/IR/GlobalValue.h"
48#include "llvm/IR/IRBuilder.h"
49#include "llvm/IR/InstrTypes.h"
50#include "llvm/IR/Instruction.h"
53#include "llvm/IR/LLVMContext.h"
54#include "llvm/IR/MDBuilder.h"
56#include "llvm/IR/Metadata.h"
57#include "llvm/IR/Module.h"
58#include "llvm/IR/NoFolder.h"
59#include "llvm/IR/Operator.h"
62#include "llvm/IR/Type.h"
63#include "llvm/IR/Use.h"
64#include "llvm/IR/User.h"
65#include "llvm/IR/Value.h"
66#include "llvm/IR/ValueHandle.h"
70#include "llvm/Support/Debug.h"
80#include <algorithm>
81#include <cassert>
82#include <climits>
83#include <cmath>
84#include <cstddef>
85#include <cstdint>
86#include <iterator>
87#include <map>
88#include <optional>
89#include <set>
90#include <tuple>
91#include <utility>
92#include <vector>
93
94using namespace llvm;
95using namespace PatternMatch;
96
97#define DEBUG_TYPE "simplifycfg"
98
99namespace llvm {
100
102 "simplifycfg-require-and-preserve-domtree", cl::Hidden,
103
104 cl::desc(
105 "Temporary development switch used to gradually uplift SimplifyCFG "
106 "into preserving DomTree,"));
107
108// Chosen as 2 so as to be cheap, but still to have enough power to fold
109// a select, so the "clamp" idiom (of a min followed by a max) will be caught.
110// To catch this, we need to fold a compare and a select, hence '2' being the
111// minimum reasonable default.
113 "phi-node-folding-threshold", cl::Hidden, cl::init(2),
114 cl::desc(
115 "Control the amount of phi node folding to perform (default = 2)"));
116
118 "two-entry-phi-node-folding-threshold", cl::Hidden, cl::init(4),
119 cl::desc("Control the maximal total instruction cost that we are willing "
120 "to speculatively execute to fold a 2-entry PHI node into a "
121 "select (default = 4)"));
122
123static cl::opt<bool>
124 HoistCommon("simplifycfg-hoist-common", cl::Hidden, cl::init(true),
125 cl::desc("Hoist common instructions up to the parent block"));
126
128 "simplifycfg-hoist-loads-with-cond-faulting", cl::Hidden, cl::init(true),
129 cl::desc("Hoist loads if the target supports conditional faulting"));
130
132 "simplifycfg-hoist-stores-with-cond-faulting", cl::Hidden, cl::init(true),
133 cl::desc("Hoist stores if the target supports conditional faulting"));
134
136 "hoist-loads-stores-with-cond-faulting-threshold", cl::Hidden, cl::init(6),
137 cl::desc("Control the maximal conditional load/store that we are willing "
138 "to speculatively execute to eliminate conditional branch "
139 "(default = 6)"));
140
142 HoistCommonSkipLimit("simplifycfg-hoist-common-skip-limit", cl::Hidden,
143 cl::init(20),
144 cl::desc("Allow reordering across at most this many "
145 "instructions when hoisting"));
146
147static cl::opt<bool>
148 SinkCommon("simplifycfg-sink-common", cl::Hidden, cl::init(true),
149 cl::desc("Sink common instructions down to the end block"));
150
152 "simplifycfg-hoist-cond-stores", cl::Hidden, cl::init(true),
153 cl::desc("Hoist conditional stores if an unconditional store precedes"));
154
156 "simplifycfg-merge-cond-stores", cl::Hidden, cl::init(true),
157 cl::desc("Hoist conditional stores even if an unconditional store does not "
158 "precede - hoist multiple conditional stores into a single "
159 "predicated store"));
160
162 "simplifycfg-merge-cond-stores-aggressively", cl::Hidden, cl::init(false),
163 cl::desc("When merging conditional stores, do so even if the resultant "
164 "basic blocks are unlikely to be if-converted as a result"));
165
167 "speculate-one-expensive-inst", cl::Hidden, cl::init(true),
168 cl::desc("Allow exactly one expensive instruction to be speculatively "
169 "executed"));
170
172 "max-speculation-depth", cl::Hidden, cl::init(10),
173 cl::desc("Limit maximum recursion depth when calculating costs of "
174 "speculatively executed instructions"));
175
176static cl::opt<int>
177 MaxSmallBlockSize("simplifycfg-max-small-block-size", cl::Hidden,
178 cl::init(10),
179 cl::desc("Max size of a block which is still considered "
180 "small enough to thread through"));
181
182// Two is chosen to allow one negation and a logical combine.
184 BranchFoldThreshold("simplifycfg-branch-fold-threshold", cl::Hidden,
185 cl::init(2),
186 cl::desc("Maximum cost of combining conditions when "
187 "folding branches"));
188
190 "simplifycfg-branch-fold-common-dest-vector-multiplier", cl::Hidden,
191 cl::init(2),
192 cl::desc("Multiplier to apply to threshold when determining whether or not "
193 "to fold branch to common destination when vector operations are "
194 "present"));
195
197 "simplifycfg-merge-compatible-invokes", cl::Hidden, cl::init(true),
198 cl::desc("Allow SimplifyCFG to merge invokes together when appropriate"));
199
201 "max-switch-cases-per-result", cl::Hidden, cl::init(16),
202 cl::desc("Limit cases to analyze when converting a switch to select"));
203
205 "max-jump-threading-live-blocks", cl::Hidden, cl::init(24),
206 cl::desc("Limit number of blocks a define in a threaded block is allowed "
207 "to be live in"));
208
210
211} // end namespace llvm
212
213STATISTIC(NumBitMaps, "Number of switch instructions turned into bitmaps");
214STATISTIC(NumLinearMaps,
215 "Number of switch instructions turned into linear mapping");
216STATISTIC(NumLookupTables,
217 "Number of switch instructions turned into lookup tables");
219 NumLookupTablesHoles,
220 "Number of switch instructions turned into lookup tables (holes checked)");
221STATISTIC(NumTableCmpReuses, "Number of reused switch table lookup compares");
222STATISTIC(NumFoldValueComparisonIntoPredecessors,
223 "Number of value comparisons folded into predecessor basic blocks");
224STATISTIC(NumFoldBranchToCommonDest,
225 "Number of branches folded into predecessor basic block");
227 NumHoistCommonCode,
228 "Number of common instruction 'blocks' hoisted up to the begin block");
229STATISTIC(NumHoistCommonInstrs,
230 "Number of common instructions hoisted up to the begin block");
231STATISTIC(NumSinkCommonCode,
232 "Number of common instruction 'blocks' sunk down to the end block");
233STATISTIC(NumSinkCommonInstrs,
234 "Number of common instructions sunk down to the end block");
235STATISTIC(NumSpeculations, "Number of speculative executed instructions");
236STATISTIC(NumInvokes,
237 "Number of invokes with empty resume blocks simplified into calls");
238STATISTIC(NumInvokesMerged, "Number of invokes that were merged together");
239STATISTIC(NumInvokeSetsFormed, "Number of invoke sets that were formed");
240
241namespace {
242
243// The first field contains the value that the switch produces when a certain
244// case group is selected, and the second field is a vector containing the
245// cases composing the case group.
246using SwitchCaseResultVectorTy =
248
249// The first field contains the phi node that generates a result of the switch
250// and the second field contains the value generated for a certain case in the
251// switch for that PHI.
252using SwitchCaseResultsTy = SmallVector<std::pair<PHINode *, Constant *>, 4>;
253
254/// ValueEqualityComparisonCase - Represents a case of a switch.
255struct ValueEqualityComparisonCase {
257 BasicBlock *Dest;
258
259 ValueEqualityComparisonCase(ConstantInt *Value, BasicBlock *Dest)
260 : Value(Value), Dest(Dest) {}
261
262 bool operator<(ValueEqualityComparisonCase RHS) const {
263 // Comparing pointers is ok as we only rely on the order for uniquing.
264 return Value < RHS.Value;
265 }
266
267 bool operator==(BasicBlock *RHSDest) const { return Dest == RHSDest; }
268};
269
270class SimplifyCFGOpt {
271 const TargetTransformInfo &TTI;
272 DomTreeUpdater *DTU;
273 const DataLayout &DL;
274 ArrayRef<WeakVH> LoopHeaders;
275 const SimplifyCFGOptions &Options;
276 bool Resimplify;
277
278 Value *isValueEqualityComparison(Instruction *TI);
279 BasicBlock *getValueEqualityComparisonCases(
280 Instruction *TI, std::vector<ValueEqualityComparisonCase> &Cases);
281 bool simplifyEqualityComparisonWithOnlyPredecessor(Instruction *TI,
282 BasicBlock *Pred,
283 IRBuilder<> &Builder);
284 bool performValueComparisonIntoPredecessorFolding(Instruction *TI, Value *&CV,
285 Instruction *PTI,
286 IRBuilder<> &Builder);
287 bool foldValueComparisonIntoPredecessors(Instruction *TI,
288 IRBuilder<> &Builder);
289
290 bool simplifyResume(ResumeInst *RI, IRBuilder<> &Builder);
291 bool simplifySingleResume(ResumeInst *RI);
292 bool simplifyCommonResume(ResumeInst *RI);
293 bool simplifyCleanupReturn(CleanupReturnInst *RI);
294 bool simplifyUnreachable(UnreachableInst *UI);
295 bool simplifySwitch(SwitchInst *SI, IRBuilder<> &Builder);
296 bool simplifyDuplicateSwitchArms(SwitchInst *SI, DomTreeUpdater *DTU);
297 bool simplifyIndirectBr(IndirectBrInst *IBI);
298 bool simplifyUncondBranch(UncondBrInst *BI, IRBuilder<> &Builder);
299 bool simplifyCondBranch(CondBrInst *BI, IRBuilder<> &Builder);
300 bool foldCondBranchOnValueKnownInPredecessor(CondBrInst *BI);
301
302 bool tryToSimplifyUncondBranchWithICmpInIt(ICmpInst *ICI,
303 IRBuilder<> &Builder);
304 bool tryToSimplifyUncondBranchWithICmpSelectInIt(ICmpInst *ICI,
305 SelectInst *Select,
306 IRBuilder<> &Builder);
307 bool hoistCommonCodeFromSuccessors(Instruction *TI, bool AllInstsEqOnly);
308 bool hoistSuccIdenticalTerminatorToSwitchOrIf(
309 Instruction *TI, Instruction *I1,
310 SmallVectorImpl<Instruction *> &OtherSuccTIs,
311 ArrayRef<BasicBlock *> UniqueSuccessors);
312 bool speculativelyExecuteBB(CondBrInst *BI, BasicBlock *ThenBB);
313 bool simplifyTerminatorOnSelect(Instruction *OldTerm, Value *Cond,
314 BasicBlock *TrueBB, BasicBlock *FalseBB,
315 uint32_t TrueWeight, uint32_t FalseWeight);
316 bool simplifyBranchOnICmpChain(CondBrInst *BI, IRBuilder<> &Builder,
317 const DataLayout &DL);
318 bool simplifySwitchOnSelect(SwitchInst *SI, SelectInst *Select);
319 bool simplifyIndirectBrOnSelect(IndirectBrInst *IBI, SelectInst *SI);
320 bool turnSwitchRangeIntoICmp(SwitchInst *SI, IRBuilder<> &Builder);
321 bool simplifyDuplicatePredecessors(BasicBlock *Succ, DomTreeUpdater *DTU);
322
323public:
324 SimplifyCFGOpt(const TargetTransformInfo &TTI, DomTreeUpdater *DTU,
325 const DataLayout &DL, ArrayRef<WeakVH> LoopHeaders,
326 const SimplifyCFGOptions &Opts)
327 : TTI(TTI), DTU(DTU), DL(DL), LoopHeaders(LoopHeaders), Options(Opts) {
328 assert((!DTU || !DTU->hasPostDomTree()) &&
329 "SimplifyCFG is not yet capable of maintaining validity of a "
330 "PostDomTree, so don't ask for it.");
331 }
332
333 bool simplifyOnce(BasicBlock *BB);
334 bool run(BasicBlock *BB);
335
336 // Helper to set Resimplify and return change indication.
337 bool requestResimplify() {
338 Resimplify = true;
339 return true;
340 }
341};
342
343// we synthesize a || b as select a, true, b
344// we synthesize a && b as select a, b, false
345// this function determines if SI is playing one of those roles.
346[[maybe_unused]] bool
347isSelectInRoleOfConjunctionOrDisjunction(const SelectInst *SI) {
348 return ((isa<ConstantInt>(SI->getTrueValue()) &&
349 (dyn_cast<ConstantInt>(SI->getTrueValue())->isOne())) ||
350 (isa<ConstantInt>(SI->getFalseValue()) &&
351 (dyn_cast<ConstantInt>(SI->getFalseValue())->isNullValue())));
352}
353
354} // end anonymous namespace
355
356/// Return true if all the PHI nodes in the basic block \p BB
357/// receive compatible (identical) incoming values when coming from
358/// all of the predecessor blocks that are specified in \p IncomingBlocks.
359///
360/// Note that if the values aren't exactly identical, but \p EquivalenceSet
361/// is provided, and *both* of the values are present in the set,
362/// then they are considered equal.
364 BasicBlock *BB, ArrayRef<BasicBlock *> IncomingBlocks,
365 SmallPtrSetImpl<Value *> *EquivalenceSet = nullptr) {
366 assert(IncomingBlocks.size() == 2 &&
367 "Only for a pair of incoming blocks at the time!");
368
369 // FIXME: it is okay if one of the incoming values is an `undef` value,
370 // iff the other incoming value is guaranteed to be a non-poison value.
371 // FIXME: it is okay if one of the incoming values is a `poison` value.
372 return all_of(BB->phis(), [IncomingBlocks, EquivalenceSet](PHINode &PN) {
373 Value *IV0 = PN.getIncomingValueForBlock(IncomingBlocks[0]);
374 Value *IV1 = PN.getIncomingValueForBlock(IncomingBlocks[1]);
375 if (IV0 == IV1)
376 return true;
377 if (EquivalenceSet && EquivalenceSet->contains(IV0) &&
378 EquivalenceSet->contains(IV1))
379 return true;
380 return false;
381 });
382}
383
384/// Return true if it is safe to merge these two
385/// terminator instructions together.
386static bool
388 SmallSetVector<BasicBlock *, 4> *FailBlocks = nullptr) {
389 if (SI1 == SI2)
390 return false; // Can't merge with self!
391
392 // It is not safe to merge these two switch instructions if they have a common
393 // successor, and if that successor has a PHI node, and if *that* PHI node has
394 // conflicting incoming values from the two switch blocks.
395 BasicBlock *SI1BB = SI1->getParent();
396 BasicBlock *SI2BB = SI2->getParent();
397
399 bool Fail = false;
400 for (BasicBlock *Succ : successors(SI2BB)) {
401 if (!SI1Succs.count(Succ))
402 continue;
403 if (incomingValuesAreCompatible(Succ, {SI1BB, SI2BB}))
404 continue;
405 Fail = true;
406 if (FailBlocks)
407 FailBlocks->insert(Succ);
408 else
409 break;
410 }
411
412 return !Fail;
413}
414
415/// Update PHI nodes in Succ to indicate that there will now be entries in it
416/// from the 'NewPred' block. The values that will be flowing into the PHI nodes
417/// will be the same as those coming in from ExistPred, an existing predecessor
418/// of Succ.
419static void addPredecessorToBlock(BasicBlock *Succ, BasicBlock *NewPred,
420 BasicBlock *ExistPred,
421 MemorySSAUpdater *MSSAU = nullptr) {
422 for (PHINode &PN : Succ->phis())
423 PN.addIncoming(PN.getIncomingValueForBlock(ExistPred), NewPred);
424 if (MSSAU)
425 if (auto *MPhi = MSSAU->getMemorySSA()->getMemoryAccess(Succ))
426 MPhi->addIncoming(MPhi->getIncomingValueForBlock(ExistPred), NewPred);
427}
428
429/// Compute an abstract "cost" of speculating the given instruction,
430/// which is assumed to be safe to speculate. TCC_Free means cheap,
431/// TCC_Basic means less cheap, and TCC_Expensive means prohibitively
432/// expensive.
434 const TargetTransformInfo &TTI) {
435 return TTI.getInstructionCost(I, TargetTransformInfo::TCK_SizeAndLatency);
436}
437
438/// If we have a merge point of an "if condition" as accepted above,
439/// return true if the specified value dominates the block. We don't handle
440/// the true generality of domination here, just a special case which works
441/// well enough for us.
442///
443/// If AggressiveInsts is non-null, and if V does not dominate BB, we check to
444/// see if V (which must be an instruction) and its recursive operands
445/// that do not dominate BB have a combined cost lower than Budget and
446/// are non-trapping. If both are true, the instruction is inserted into the
447/// set and true is returned.
448///
449/// The cost for most non-trapping instructions is defined as 1 except for
450/// Select whose cost is 2.
451///
452/// After this function returns, Cost is increased by the cost of
453/// V plus its non-dominating operands. If that cost is greater than
454/// Budget, false is returned and Cost is undefined.
456 Value *V, BasicBlock *BB, Instruction *InsertPt,
457 SmallPtrSetImpl<Instruction *> &AggressiveInsts, InstructionCost &Cost,
459 SmallPtrSetImpl<Instruction *> &ZeroCostInstructions, unsigned Depth = 0) {
460 // It is possible to hit a zero-cost cycle (phi/gep instructions for example),
461 // so limit the recursion depth.
462 // TODO: While this recursion limit does prevent pathological behavior, it
463 // would be better to track visited instructions to avoid cycles.
465 return false;
466
468 if (!I) {
469 // Non-instructions dominate all instructions and can be executed
470 // unconditionally.
471 return true;
472 }
473 BasicBlock *PBB = I->getParent();
474
475 // We don't want to allow weird loops that might have the "if condition" in
476 // the bottom of this block.
477 if (PBB == BB)
478 return false;
479
480 // If this instruction is defined in a block that contains an unconditional
481 // branch to BB, then it must be in the 'conditional' part of the "if
482 // statement". If not, it definitely dominates the region.
484 if (!BI || BI->getSuccessor() != BB)
485 return true;
486
487 // If we have seen this instruction before, don't count it again.
488 if (AggressiveInsts.count(I))
489 return true;
490
491 // Okay, it looks like the instruction IS in the "condition". Check to
492 // see if it's a cheap instruction to unconditionally compute, and if it
493 // only uses stuff defined outside of the condition. If so, hoist it out.
494 if (!isSafeToSpeculativelyExecute(I, InsertPt, AC))
495 return false;
496
497 // Overflow arithmetic instruction plus extract value are usually generated
498 // when a division is being replaced. But, in this case, the zero check may
499 // still be kept in the code. In that case it would be worth to hoist these
500 // two instruction out of the basic block. Let's treat this pattern as one
501 // single cheap instruction here!
502 WithOverflowInst *OverflowInst;
503 if (match(I, m_ExtractValue<1>(m_OneUse(m_WithOverflowInst(OverflowInst))))) {
504 ZeroCostInstructions.insert(OverflowInst);
505 Cost += 1;
506 } else if (!ZeroCostInstructions.contains(I))
507 Cost += computeSpeculationCost(I, TTI);
508
509 // Allow exactly one instruction to be speculated regardless of its cost
510 // (as long as it is safe to do so).
511 // This is intended to flatten the CFG even if the instruction is a division
512 // or other expensive operation. The speculation of an expensive instruction
513 // is expected to be undone in CodeGenPrepare if the speculation has not
514 // enabled further IR optimizations.
515 if (Cost > Budget &&
516 (!SpeculateOneExpensiveInst || !AggressiveInsts.empty() || Depth > 0 ||
517 !Cost.isValid()))
518 return false;
519
520 // Okay, we can only really hoist these out if their operands do
521 // not take us over the cost threshold.
522 for (Use &Op : I->operands())
523 if (!dominatesMergePoint(Op, BB, InsertPt, AggressiveInsts, Cost, Budget,
524 TTI, AC, ZeroCostInstructions, Depth + 1))
525 return false;
526 // Okay, it's safe to do this! Remember this instruction.
527 AggressiveInsts.insert(I);
528 return true;
529}
530
531/// Extract ConstantInt from value, looking through IntToPtr
532/// and PointerNullValue. Return NULL if value is not a constant int.
534 // Normal constant int.
536 if (CI || !isa<Constant>(V) || !V->getType()->isPointerTy())
537 return CI;
538
539 // It is not safe to look through inttoptr or ptrtoint when using unstable
540 // pointer types.
541 if (DL.hasUnstableRepresentation(V->getType()))
542 return nullptr;
543
544 // This is some kind of pointer constant. Turn it into a pointer-sized
545 // ConstantInt if possible.
546 IntegerType *IntPtrTy = cast<IntegerType>(DL.getIntPtrType(V->getType()));
547
548 // Null pointer means 0, see SelectionDAGBuilder::getValue(const Value*).
550 return ConstantInt::get(IntPtrTy, 0);
551
552 // IntToPtr const int, we can look through this if the semantics of
553 // inttoptr for this address space are a simple (truncating) bitcast.
555 if (CE->getOpcode() == Instruction::IntToPtr)
556 if (ConstantInt *CI = dyn_cast<ConstantInt>(CE->getOperand(0))) {
557 // The constant is very likely to have the right type already.
558 if (CI->getType() == IntPtrTy)
559 return CI;
560 else
561 return cast<ConstantInt>(
562 ConstantFoldIntegerCast(CI, IntPtrTy, /*isSigned=*/false, DL));
563 }
564 return nullptr;
565}
566
567namespace {
568
569/// Given a chain of or (||) or and (&&) comparison of a value against a
570/// constant, this will try to recover the information required for a switch
571/// structure.
572/// It will depth-first traverse the chain of comparison, seeking for patterns
573/// like %a == 12 or %a < 4 and combine them to produce a set of integer
574/// representing the different cases for the switch.
575/// Note that if the chain is composed of '||' it will build the set of elements
576/// that matches the comparisons (i.e. any of this value validate the chain)
577/// while for a chain of '&&' it will build the set elements that make the test
578/// fail.
579struct ConstantComparesGatherer {
580 const DataLayout &DL;
581
582 /// Value found for the switch comparison
583 Value *CompValue = nullptr;
584
585 /// Extra clause to be checked before the switch
586 Value *Extra = nullptr;
587
588 /// Set of integers to match in switch
590
591 /// Number of comparisons matched in the and/or chain
592 unsigned UsedICmps = 0;
593
594 /// If the elements in Vals matches the comparisons
595 bool IsEq = false;
596
597 // Used to check if the first matched CompValue shall be the Extra check.
598 bool IgnoreFirstMatch = false;
599 bool MultipleMatches = false;
600
601 /// Construct and compute the result for the comparison instruction Cond
602 ConstantComparesGatherer(Instruction *Cond, const DataLayout &DL) : DL(DL) {
603 gather(Cond);
604 if (CompValue || !MultipleMatches)
605 return;
606 Extra = nullptr;
607 Vals.clear();
608 UsedICmps = 0;
609 IgnoreFirstMatch = true;
610 gather(Cond);
611 }
612
613 ConstantComparesGatherer(const ConstantComparesGatherer &) = delete;
614 ConstantComparesGatherer &
615 operator=(const ConstantComparesGatherer &) = delete;
616
617private:
618 /// Try to set the current value used for the comparison, it succeeds only if
619 /// it wasn't set before or if the new value is the same as the old one
620 bool setValueOnce(Value *NewVal) {
621 if (IgnoreFirstMatch) {
622 IgnoreFirstMatch = false;
623 return false;
624 }
625 if (CompValue && CompValue != NewVal) {
626 MultipleMatches = true;
627 return false;
628 }
629 CompValue = NewVal;
630 return true;
631 }
632
633 /// Try to match Instruction "I" as a comparison against a constant and
634 /// populates the array Vals with the set of values that match (or do not
635 /// match depending on isEQ).
636 /// Return false on failure. On success, the Value the comparison matched
637 /// against is placed in CompValue.
638 /// If CompValue is already set, the function is expected to fail if a match
639 /// is found but the value compared to is different.
640 bool matchInstruction(Instruction *I, bool isEQ) {
641 if (match(I, m_Not(m_Instruction(I))))
642 isEQ = !isEQ;
643
644 Value *Val;
645 if (match(I, m_NUWTrunc(m_Value(Val)))) {
646 // If we already have a value for the switch, it has to match!
647 if (!setValueOnce(Val))
648 return false;
649 UsedICmps++;
650 Vals.push_back(ConstantInt::get(cast<IntegerType>(Val->getType()), isEQ));
651 return true;
652 }
653 // If this is an icmp against a constant, handle this as one of the cases.
654 ICmpInst *ICI;
655 ConstantInt *C;
656 if (!((ICI = dyn_cast<ICmpInst>(I)) &&
657 (C = getConstantInt(I->getOperand(1), DL)))) {
658 return false;
659 }
660
661 Value *RHSVal;
662 const APInt *RHSC;
663
664 // Pattern match a special case
665 // (x & ~2^z) == y --> x == y || x == y|2^z
666 // This undoes a transformation done by instcombine to fuse 2 compares.
667 if (ICI->getPredicate() == (isEQ ? ICmpInst::ICMP_EQ : ICmpInst::ICMP_NE)) {
668 // It's a little bit hard to see why the following transformations are
669 // correct. Here is a CVC3 program to verify them for 64-bit values:
670
671 /*
672 ONE : BITVECTOR(64) = BVZEROEXTEND(0bin1, 63);
673 x : BITVECTOR(64);
674 y : BITVECTOR(64);
675 z : BITVECTOR(64);
676 mask : BITVECTOR(64) = BVSHL(ONE, z);
677 QUERY( (y & ~mask = y) =>
678 ((x & ~mask = y) <=> (x = y OR x = (y | mask)))
679 );
680 QUERY( (y | mask = y) =>
681 ((x | mask = y) <=> (x = y OR x = (y & ~mask)))
682 );
683 */
684
685 // Please note that each pattern must be a dual implication (<--> or
686 // iff). One directional implication can create spurious matches. If the
687 // implication is only one-way, an unsatisfiable condition on the left
688 // side can imply a satisfiable condition on the right side. Dual
689 // implication ensures that satisfiable conditions are transformed to
690 // other satisfiable conditions and unsatisfiable conditions are
691 // transformed to other unsatisfiable conditions.
692
693 // Here is a concrete example of a unsatisfiable condition on the left
694 // implying a satisfiable condition on the right:
695 //
696 // mask = (1 << z)
697 // (x & ~mask) == y --> (x == y || x == (y | mask))
698 //
699 // Substituting y = 3, z = 0 yields:
700 // (x & -2) == 3 --> (x == 3 || x == 2)
701
702 // Pattern match a special case:
703 /*
704 QUERY( (y & ~mask = y) =>
705 ((x & ~mask = y) <=> (x = y OR x = (y | mask)))
706 );
707 */
708 if (match(ICI->getOperand(0),
709 m_And(m_Value(RHSVal), m_APInt(RHSC)))) {
710 APInt Mask = ~*RHSC;
711 if (Mask.isPowerOf2() && (C->getValue() & ~Mask) == C->getValue()) {
712 // If we already have a value for the switch, it has to match!
713 if (!setValueOnce(RHSVal))
714 return false;
715
716 Vals.push_back(C);
717 Vals.push_back(
718 ConstantInt::get(C->getContext(),
719 C->getValue() | Mask));
720 UsedICmps++;
721 return true;
722 }
723 }
724
725 // Pattern match a special case:
726 /*
727 QUERY( (y | mask = y) =>
728 ((x | mask = y) <=> (x = y OR x = (y & ~mask)))
729 );
730 */
731 if (match(ICI->getOperand(0),
732 m_Or(m_Value(RHSVal), m_APInt(RHSC)))) {
733 APInt Mask = *RHSC;
734 if (Mask.isPowerOf2() && (C->getValue() | Mask) == C->getValue()) {
735 // If we already have a value for the switch, it has to match!
736 if (!setValueOnce(RHSVal))
737 return false;
738
739 Vals.push_back(C);
740 Vals.push_back(ConstantInt::get(C->getContext(),
741 C->getValue() & ~Mask));
742 UsedICmps++;
743 return true;
744 }
745 }
746
747 // If we already have a value for the switch, it has to match!
748 if (!setValueOnce(ICI->getOperand(0)))
749 return false;
750
751 UsedICmps++;
752 Vals.push_back(C);
753 return true;
754 }
755
756 // If we have "x ult 3", for example, then we can add 0,1,2 to the set.
757 ConstantRange Span =
759
760 // Shift the range if the compare is fed by an add. This is the range
761 // compare idiom as emitted by instcombine.
762 Value *CandidateVal = I->getOperand(0);
763 if (match(I->getOperand(0), m_Add(m_Value(RHSVal), m_APInt(RHSC)))) {
764 Span = Span.subtract(*RHSC);
765 CandidateVal = RHSVal;
766 }
767
768 // If this is an and/!= check, then we are looking to build the set of
769 // value that *don't* pass the and chain. I.e. to turn "x ugt 2" into
770 // x != 0 && x != 1.
771 if (!isEQ)
772 Span = Span.inverse();
773
774 // If there are a ton of values, we don't want to make a ginormous switch.
775 if (Span.isSizeLargerThan(8) || Span.isEmptySet()) {
776 return false;
777 }
778
779 // If we already have a value for the switch, it has to match!
780 if (!setValueOnce(CandidateVal))
781 return false;
782
783 // Add all values from the range to the set
784 APInt Tmp = Span.getLower();
785 do
786 Vals.push_back(ConstantInt::get(I->getContext(), Tmp));
787 while (++Tmp != Span.getUpper());
788
789 UsedICmps++;
790 return true;
791 }
792
793 /// Given a potentially 'or'd or 'and'd together collection of icmp
794 /// eq/ne/lt/gt instructions that compare a value against a constant, extract
795 /// the value being compared, and stick the list constants into the Vals
796 /// vector.
797 /// One "Extra" case is allowed to differ from the other.
798 void gather(Value *V) {
799 Value *Op0, *Op1;
800 if (match(V, m_LogicalOr(m_Value(Op0), m_Value(Op1))))
801 IsEq = true;
802 else if (match(V, m_LogicalAnd(m_Value(Op0), m_Value(Op1))))
803 IsEq = false;
804 else
805 return;
806 // Keep a stack (SmallVector for efficiency) for depth-first traversal
807 SmallVector<Value *, 8> DFT{Op0, Op1};
808 SmallPtrSet<Value *, 8> Visited{V, Op0, Op1};
809
810 while (!DFT.empty()) {
811 V = DFT.pop_back_val();
812
813 if (Instruction *I = dyn_cast<Instruction>(V)) {
814 // If it is a || (or && depending on isEQ), process the operands.
815 if (IsEq ? match(I, m_LogicalOr(m_Value(Op0), m_Value(Op1)))
816 : match(I, m_LogicalAnd(m_Value(Op0), m_Value(Op1)))) {
817 if (Visited.insert(Op1).second)
818 DFT.push_back(Op1);
819 if (Visited.insert(Op0).second)
820 DFT.push_back(Op0);
821
822 continue;
823 }
824
825 // Try to match the current instruction
826 if (matchInstruction(I, IsEq))
827 // Match succeed, continue the loop
828 continue;
829 }
830
831 // One element of the sequence of || (or &&) could not be match as a
832 // comparison against the same value as the others.
833 // We allow only one "Extra" case to be checked before the switch
834 if (!Extra) {
835 Extra = V;
836 continue;
837 }
838 // Failed to parse a proper sequence, abort now
839 CompValue = nullptr;
840 break;
841 }
842 }
843};
844
845} // end anonymous namespace
846
848 MemorySSAUpdater *MSSAU = nullptr) {
849 Instruction *Cond = nullptr;
851 Cond = dyn_cast<Instruction>(SI->getCondition());
852 } else if (CondBrInst *BI = dyn_cast<CondBrInst>(TI)) {
853 Cond = dyn_cast<Instruction>(BI->getCondition());
854 } else if (IndirectBrInst *IBI = dyn_cast<IndirectBrInst>(TI)) {
855 Cond = dyn_cast<Instruction>(IBI->getAddress());
856 }
857
858 TI->eraseFromParent();
859 if (Cond)
861}
862
863/// Return true if the specified terminator checks
864/// to see if a value is equal to constant integer value.
865Value *SimplifyCFGOpt::isValueEqualityComparison(Instruction *TI) {
866 Value *CV = nullptr;
867 if (SwitchInst *SI = dyn_cast<SwitchInst>(TI)) {
868 // Do not permit merging of large switch instructions into their
869 // predecessors unless there is only one predecessor.
870 if (!SI->getParent()->hasNPredecessorsOrMore(128 / SI->getNumSuccessors()))
871 CV = SI->getCondition();
872 } else if (CondBrInst *BI = dyn_cast<CondBrInst>(TI))
873 if (BI->getCondition()->hasOneUse()) {
874 if (ICmpInst *ICI = dyn_cast<ICmpInst>(BI->getCondition())) {
875 if (ICI->isEquality() && getConstantInt(ICI->getOperand(1), DL))
876 CV = ICI->getOperand(0);
877 } else if (auto *Trunc = dyn_cast<TruncInst>(BI->getCondition())) {
878 if (Trunc->hasNoUnsignedWrap())
879 CV = Trunc->getOperand(0);
880 }
881 }
882
883 // Unwrap any lossless ptrtoint cast (except for unstable pointers).
884 if (CV) {
885 if (PtrToIntInst *PTII = dyn_cast<PtrToIntInst>(CV)) {
886 Value *Ptr = PTII->getPointerOperand();
887 if (DL.hasUnstableRepresentation(Ptr->getType()))
888 return CV;
889 if (PTII->getType() == DL.getIntPtrType(Ptr->getType()))
890 CV = Ptr;
891 }
892 }
893 return CV;
894}
895
896/// Given a value comparison instruction,
897/// decode all of the 'cases' that it represents and return the 'default' block.
898BasicBlock *SimplifyCFGOpt::getValueEqualityComparisonCases(
899 Instruction *TI, std::vector<ValueEqualityComparisonCase> &Cases) {
900 if (SwitchInst *SI = dyn_cast<SwitchInst>(TI)) {
901 Cases.reserve(SI->getNumCases());
902 for (auto Case : SI->cases())
903 Cases.push_back(ValueEqualityComparisonCase(Case.getCaseValue(),
904 Case.getCaseSuccessor()));
905 return SI->getDefaultDest();
906 }
907
908 CondBrInst *BI = cast<CondBrInst>(TI);
909 Value *Cond = BI->getCondition();
910 ICmpInst::Predicate Pred;
911 ConstantInt *C;
912 if (auto *ICI = dyn_cast<ICmpInst>(Cond)) {
913 Pred = ICI->getPredicate();
914 C = getConstantInt(ICI->getOperand(1), DL);
915 } else {
916 Pred = ICmpInst::ICMP_NE;
917 auto *Trunc = cast<TruncInst>(Cond);
918 C = ConstantInt::get(cast<IntegerType>(Trunc->getOperand(0)->getType()), 0);
919 }
920 BasicBlock *Succ = BI->getSuccessor(Pred == ICmpInst::ICMP_NE);
921 Cases.push_back(ValueEqualityComparisonCase(C, Succ));
922 return BI->getSuccessor(Pred == ICmpInst::ICMP_EQ);
923}
924
925/// Given a vector of bb/value pairs, remove any entries
926/// in the list that match the specified block.
927static void
929 std::vector<ValueEqualityComparisonCase> &Cases) {
930 llvm::erase(Cases, BB);
931}
932
933/// Return true if there are any keys in C1 that exist in C2 as well.
934static bool valuesOverlap(std::vector<ValueEqualityComparisonCase> &C1,
935 std::vector<ValueEqualityComparisonCase> &C2) {
936 std::vector<ValueEqualityComparisonCase> *V1 = &C1, *V2 = &C2;
937
938 // Make V1 be smaller than V2.
939 if (V1->size() > V2->size())
940 std::swap(V1, V2);
941
942 if (V1->empty())
943 return false;
944 if (V1->size() == 1) {
945 // Just scan V2.
946 ConstantInt *TheVal = (*V1)[0].Value;
947 for (const ValueEqualityComparisonCase &VECC : *V2)
948 if (TheVal == VECC.Value)
949 return true;
950 }
951
952 // Otherwise, just sort both lists and compare element by element.
953 array_pod_sort(V1->begin(), V1->end());
954 array_pod_sort(V2->begin(), V2->end());
955 unsigned i1 = 0, i2 = 0, e1 = V1->size(), e2 = V2->size();
956 while (i1 != e1 && i2 != e2) {
957 if ((*V1)[i1].Value == (*V2)[i2].Value)
958 return true;
959 if ((*V1)[i1].Value < (*V2)[i2].Value)
960 ++i1;
961 else
962 ++i2;
963 }
964 return false;
965}
966
967/// If TI is known to be a terminator instruction and its block is known to
968/// only have a single predecessor block, check to see if that predecessor is
969/// also a value comparison with the same value, and if that comparison
970/// determines the outcome of this comparison. If so, simplify TI. This does a
971/// very limited form of jump threading.
972bool SimplifyCFGOpt::simplifyEqualityComparisonWithOnlyPredecessor(
973 Instruction *TI, BasicBlock *Pred, IRBuilder<> &Builder) {
974 Value *PredVal = isValueEqualityComparison(Pred->getTerminator());
975 if (!PredVal)
976 return false; // Not a value comparison in predecessor.
977
978 Value *ThisVal = isValueEqualityComparison(TI);
979 assert(ThisVal && "This isn't a value comparison!!");
980 if (ThisVal != PredVal)
981 return false; // Different predicates.
982
983 // TODO: Preserve branch weight metadata, similarly to how
984 // foldValueComparisonIntoPredecessors preserves it.
985
986 // Find out information about when control will move from Pred to TI's block.
987 std::vector<ValueEqualityComparisonCase> PredCases;
988 BasicBlock *PredDef =
989 getValueEqualityComparisonCases(Pred->getTerminator(), PredCases);
990 eliminateBlockCases(PredDef, PredCases); // Remove default from cases.
991
992 // Find information about how control leaves this block.
993 std::vector<ValueEqualityComparisonCase> ThisCases;
994 BasicBlock *ThisDef = getValueEqualityComparisonCases(TI, ThisCases);
995 eliminateBlockCases(ThisDef, ThisCases); // Remove default from cases.
996
997 // If TI's block is the default block from Pred's comparison, potentially
998 // simplify TI based on this knowledge.
999 if (PredDef == TI->getParent()) {
1000 // If we are here, we know that the value is none of those cases listed in
1001 // PredCases. If there are any cases in ThisCases that are in PredCases, we
1002 // can simplify TI.
1003 if (!valuesOverlap(PredCases, ThisCases))
1004 return false;
1005
1006 if (isa<CondBrInst>(TI)) {
1007 // Okay, one of the successors of this condbr is dead. Convert it to a
1008 // uncond br.
1009 assert(ThisCases.size() == 1 && "Branch can only have one case!");
1010 // Insert the new branch.
1011 Instruction *NI = Builder.CreateBr(ThisDef);
1012 (void)NI;
1013
1014 // Remove PHI node entries for the dead edge.
1015 ThisCases[0].Dest->removePredecessor(PredDef);
1016
1017 LLVM_DEBUG(dbgs() << "Threading pred instr: " << *Pred->getTerminator()
1018 << "Through successor TI: " << *TI << "Leaving: " << *NI
1019 << "\n");
1020
1022
1023 if (DTU)
1024 DTU->applyUpdates(
1025 {{DominatorTree::Delete, PredDef, ThisCases[0].Dest}});
1026
1027 return true;
1028 }
1029
1030 SwitchInstProfUpdateWrapper SI = *cast<SwitchInst>(TI);
1031 // Okay, TI has cases that are statically dead, prune them away.
1032 SmallPtrSet<Constant *, 16> DeadCases;
1033 for (const ValueEqualityComparisonCase &Case : PredCases)
1034 DeadCases.insert(Case.Value);
1035
1036 LLVM_DEBUG(dbgs() << "Threading pred instr: " << *Pred->getTerminator()
1037 << "Through successor TI: " << *TI);
1038
1039 SmallDenseMap<BasicBlock *, int, 8> NumPerSuccessorCases;
1040 for (SwitchInst::CaseIt i = SI->case_end(), e = SI->case_begin(); i != e;) {
1041 --i;
1042 auto *Successor = i->getCaseSuccessor();
1043 if (DTU)
1044 ++NumPerSuccessorCases[Successor];
1045 if (DeadCases.count(i->getCaseValue())) {
1046 Successor->removePredecessor(PredDef);
1047 SI.removeCase(i);
1048 if (DTU)
1049 --NumPerSuccessorCases[Successor];
1050 }
1051 }
1052
1053 if (DTU) {
1054 std::vector<DominatorTree::UpdateType> Updates;
1055 for (const std::pair<BasicBlock *, int> &I : NumPerSuccessorCases)
1056 if (I.second == 0)
1057 Updates.push_back({DominatorTree::Delete, PredDef, I.first});
1058 DTU->applyUpdates(Updates);
1059 }
1060
1061 LLVM_DEBUG(dbgs() << "Leaving: " << *TI << "\n");
1062 return true;
1063 }
1064
1065 // Otherwise, TI's block must correspond to some matched value. Find out
1066 // which value (or set of values) this is.
1067 ConstantInt *TIV = nullptr;
1068 BasicBlock *TIBB = TI->getParent();
1069 for (const auto &[Value, Dest] : PredCases)
1070 if (Dest == TIBB) {
1071 if (TIV)
1072 return false; // Cannot handle multiple values coming to this block.
1073 TIV = Value;
1074 }
1075 assert(TIV && "No edge from pred to succ?");
1076
1077 // Okay, we found the one constant that our value can be if we get into TI's
1078 // BB. Find out which successor will unconditionally be branched to.
1079 BasicBlock *TheRealDest = nullptr;
1080 for (const auto &[Value, Dest] : ThisCases)
1081 if (Value == TIV) {
1082 TheRealDest = Dest;
1083 break;
1084 }
1085
1086 // If not handled by any explicit cases, it is handled by the default case.
1087 if (!TheRealDest)
1088 TheRealDest = ThisDef;
1089
1090 SmallPtrSet<BasicBlock *, 2> RemovedSuccs;
1091
1092 // Remove PHI node entries for dead edges.
1093 BasicBlock *CheckEdge = TheRealDest;
1094 for (BasicBlock *Succ : successors(TIBB))
1095 if (Succ != CheckEdge) {
1096 if (Succ != TheRealDest)
1097 RemovedSuccs.insert(Succ);
1098 Succ->removePredecessor(TIBB);
1099 } else
1100 CheckEdge = nullptr;
1101
1102 // Insert the new branch.
1103 Instruction *NI = Builder.CreateBr(TheRealDest);
1104 (void)NI;
1105
1106 LLVM_DEBUG(dbgs() << "Threading pred instr: " << *Pred->getTerminator()
1107 << "Through successor TI: " << *TI << "Leaving: " << *NI
1108 << "\n");
1109
1111 if (DTU) {
1112 SmallVector<DominatorTree::UpdateType, 2> Updates;
1113 Updates.reserve(RemovedSuccs.size());
1114 for (auto *RemovedSucc : RemovedSuccs)
1115 Updates.push_back({DominatorTree::Delete, TIBB, RemovedSucc});
1116 DTU->applyUpdates(Updates);
1117 }
1118 return true;
1119}
1120
1121namespace {
1122
1123/// This class implements a stable ordering of constant
1124/// integers that does not depend on their address. This is important for
1125/// applications that sort ConstantInt's to ensure uniqueness.
1126struct ConstantIntOrdering {
1127 bool operator()(const ConstantInt *LHS, const ConstantInt *RHS) const {
1128 return LHS->getValue().ult(RHS->getValue());
1129 }
1130};
1131
1132} // end anonymous namespace
1133
1135 ConstantInt *const *P2) {
1136 const ConstantInt *LHS = *P1;
1137 const ConstantInt *RHS = *P2;
1138 if (LHS == RHS)
1139 return 0;
1140 return LHS->getValue().ult(RHS->getValue()) ? 1 : -1;
1141}
1142
1143/// Get Weights of a given terminator, the default weight is at the front
1144/// of the vector. If TI is a conditional eq, we need to swap the branch-weight
1145/// metadata.
1147 SmallVectorImpl<uint64_t> &Weights) {
1148 MDNode *MD = TI->getMetadata(LLVMContext::MD_prof);
1149 assert(MD && "Invalid branch-weight metadata");
1150 extractFromBranchWeightMD64(MD, Weights);
1151
1152 // If TI is a conditional eq, the default case is the false case,
1153 // and the corresponding branch-weight data is at index 2. We swap the
1154 // default weight to be the first entry.
1155 if (CondBrInst *BI = dyn_cast<CondBrInst>(TI)) {
1156 assert(Weights.size() == 2);
1157 auto *ICI = dyn_cast<ICmpInst>(BI->getCondition());
1158 if (!ICI)
1159 return;
1160
1161 if (ICI->getPredicate() == ICmpInst::ICMP_EQ)
1162 std::swap(Weights.front(), Weights.back());
1163 }
1164}
1165
1167 BasicBlock *BB, BasicBlock *PredBlock, ValueToValueMapTy &VMap) {
1168 Instruction *PTI = PredBlock->getTerminator();
1169
1170 // If we have bonus instructions, clone them into the predecessor block.
1171 // Note that there may be multiple predecessor blocks, so we cannot move
1172 // bonus instructions to a predecessor block.
1173 for (Instruction &BonusInst : *BB) {
1174 if (BonusInst.isTerminator())
1175 continue;
1176
1177 Instruction *NewBonusInst = BonusInst.clone();
1178
1179 if (!NewBonusInst->getDebugLoc().isSameSourceLocation(PTI->getDebugLoc())) {
1180 // Unless the instruction has the same !dbg location as the original
1181 // branch, drop it. When we fold the bonus instructions we want to make
1182 // sure we reset their debug locations in order to avoid stepping on
1183 // dead code caused by folding dead branches.
1184 NewBonusInst->setDebugLoc(DebugLoc::getDropped());
1185 } else if (const DebugLoc &DL = NewBonusInst->getDebugLoc()) {
1186 mapAtomInstance(DL, VMap);
1187 }
1188
1189 RemapInstruction(NewBonusInst, VMap,
1191
1192 // If we speculated an instruction, we need to drop any metadata that may
1193 // result in undefined behavior, as the metadata might have been valid
1194 // only given the branch precondition.
1195 // Similarly strip attributes on call parameters that may cause UB in
1196 // location the call is moved to.
1197 NewBonusInst->dropUBImplyingAttrsAndMetadata();
1198
1199 NewBonusInst->insertInto(PredBlock, PTI->getIterator());
1200 auto Range = NewBonusInst->cloneDebugInfoFrom(&BonusInst);
1201 RemapDbgRecordRange(NewBonusInst->getModule(), Range, VMap,
1203
1204 NewBonusInst->takeName(&BonusInst);
1205 BonusInst.setName(NewBonusInst->getName() + ".old");
1206 VMap[&BonusInst] = NewBonusInst;
1207
1208 // Update (liveout) uses of bonus instructions,
1209 // now that the bonus instruction has been cloned into predecessor.
1210 // Note that we expect to be in a block-closed SSA form for this to work!
1211 for (Use &U : make_early_inc_range(BonusInst.uses())) {
1212 auto *UI = cast<Instruction>(U.getUser());
1213 auto *PN = dyn_cast<PHINode>(UI);
1214 if (!PN) {
1215 assert(UI->getParent() == BB && BonusInst.comesBefore(UI) &&
1216 "If the user is not a PHI node, then it should be in the same "
1217 "block as, and come after, the original bonus instruction.");
1218 continue; // Keep using the original bonus instruction.
1219 }
1220 // Is this the block-closed SSA form PHI node?
1221 if (PN->getIncomingBlock(U) == BB)
1222 continue; // Great, keep using the original bonus instruction.
1223 // The only other alternative is an "use" when coming from
1224 // the predecessor block - here we should refer to the cloned bonus instr.
1225 assert(PN->getIncomingBlock(U) == PredBlock &&
1226 "Not in block-closed SSA form?");
1227 U.set(NewBonusInst);
1228 }
1229 }
1230
1231 // Key Instructions: We may have propagated atom info into the pred. If the
1232 // pred's terminator already has atom info do nothing as merging would drop
1233 // one atom group anyway. If it doesn't, propagte the remapped atom group
1234 // from BB's terminator.
1235 if (auto &PredDL = PTI->getDebugLoc()) {
1236 auto &DL = BB->getTerminator()->getDebugLoc();
1237 if (!PredDL->getAtomGroup() && DL && DL->getAtomGroup() &&
1238 PredDL.isSameSourceLocation(DL)) {
1239 PTI->setDebugLoc(DL);
1240 RemapSourceAtom(PTI, VMap);
1241 }
1242 }
1243}
1244
1245bool SimplifyCFGOpt::performValueComparisonIntoPredecessorFolding(
1246 Instruction *TI, Value *&CV, Instruction *PTI, IRBuilder<> &Builder) {
1247 BasicBlock *BB = TI->getParent();
1248 BasicBlock *Pred = PTI->getParent();
1249
1251
1252 // Figure out which 'cases' to copy from SI to PSI.
1253 std::vector<ValueEqualityComparisonCase> BBCases;
1254 BasicBlock *BBDefault = getValueEqualityComparisonCases(TI, BBCases);
1255
1256 std::vector<ValueEqualityComparisonCase> PredCases;
1257 BasicBlock *PredDefault = getValueEqualityComparisonCases(PTI, PredCases);
1258
1259 // Based on whether the default edge from PTI goes to BB or not, fill in
1260 // PredCases and PredDefault with the new switch cases we would like to
1261 // build.
1262 SmallMapVector<BasicBlock *, int, 8> NewSuccessors;
1263
1264 // Update the branch weight metadata along the way
1265 SmallVector<uint64_t, 8> Weights;
1266 bool PredHasWeights = hasBranchWeightMD(*PTI);
1267 bool SuccHasWeights = hasBranchWeightMD(*TI);
1268
1269 if (PredHasWeights) {
1270 getBranchWeights(PTI, Weights);
1271 // branch-weight metadata is inconsistent here.
1272 if (Weights.size() != 1 + PredCases.size())
1273 PredHasWeights = SuccHasWeights = false;
1274 } else if (SuccHasWeights)
1275 // If there are no predecessor weights but there are successor weights,
1276 // populate Weights with 1, which will later be scaled to the sum of
1277 // successor's weights
1278 Weights.assign(1 + PredCases.size(), 1);
1279
1280 SmallVector<uint64_t, 8> SuccWeights;
1281 if (SuccHasWeights) {
1282 getBranchWeights(TI, SuccWeights);
1283 // branch-weight metadata is inconsistent here.
1284 if (SuccWeights.size() != 1 + BBCases.size())
1285 PredHasWeights = SuccHasWeights = false;
1286 } else if (PredHasWeights)
1287 SuccWeights.assign(1 + BBCases.size(), 1);
1288
1289 if (PredDefault == BB) {
1290 // If this is the default destination from PTI, only the edges in TI
1291 // that don't occur in PTI, or that branch to BB will be activated.
1292 std::set<ConstantInt *, ConstantIntOrdering> PTIHandled;
1293 for (unsigned i = 0, e = PredCases.size(); i != e; ++i)
1294 if (PredCases[i].Dest != BB)
1295 PTIHandled.insert(PredCases[i].Value);
1296 else {
1297 // The default destination is BB, we don't need explicit targets.
1298 std::swap(PredCases[i], PredCases.back());
1299
1300 if (PredHasWeights || SuccHasWeights) {
1301 // Increase weight for the default case.
1302 Weights[0] += Weights[i + 1];
1303 std::swap(Weights[i + 1], Weights.back());
1304 Weights.pop_back();
1305 }
1306
1307 PredCases.pop_back();
1308 --i;
1309 --e;
1310 }
1311
1312 // Reconstruct the new switch statement we will be building.
1313 if (PredDefault != BBDefault) {
1314 PredDefault->removePredecessor(Pred);
1315 if (DTU && PredDefault != BB)
1316 Updates.push_back({DominatorTree::Delete, Pred, PredDefault});
1317 PredDefault = BBDefault;
1318 ++NewSuccessors[BBDefault];
1319 }
1320
1321 unsigned CasesFromPred = Weights.size();
1322 uint64_t ValidTotalSuccWeight = 0;
1323 for (unsigned i = 0, e = BBCases.size(); i != e; ++i)
1324 if (!PTIHandled.count(BBCases[i].Value) && BBCases[i].Dest != BBDefault) {
1325 PredCases.push_back(BBCases[i]);
1326 ++NewSuccessors[BBCases[i].Dest];
1327 if (SuccHasWeights || PredHasWeights) {
1328 // The default weight is at index 0, so weight for the ith case
1329 // should be at index i+1. Scale the cases from successor by
1330 // PredDefaultWeight (Weights[0]).
1331 Weights.push_back(Weights[0] * SuccWeights[i + 1]);
1332 ValidTotalSuccWeight += SuccWeights[i + 1];
1333 }
1334 }
1335
1336 if (SuccHasWeights || PredHasWeights) {
1337 ValidTotalSuccWeight += SuccWeights[0];
1338 // Scale the cases from predecessor by ValidTotalSuccWeight.
1339 for (unsigned i = 1; i < CasesFromPred; ++i)
1340 Weights[i] *= ValidTotalSuccWeight;
1341 // Scale the default weight by SuccDefaultWeight (SuccWeights[0]).
1342 Weights[0] *= SuccWeights[0];
1343 }
1344 } else {
1345 // If this is not the default destination from PSI, only the edges
1346 // in SI that occur in PSI with a destination of BB will be
1347 // activated.
1348 std::set<ConstantInt *, ConstantIntOrdering> PTIHandled;
1349 std::map<ConstantInt *, uint64_t> WeightsForHandled;
1350 for (unsigned i = 0, e = PredCases.size(); i != e; ++i)
1351 if (PredCases[i].Dest == BB) {
1352 PTIHandled.insert(PredCases[i].Value);
1353
1354 if (PredHasWeights || SuccHasWeights) {
1355 WeightsForHandled[PredCases[i].Value] = Weights[i + 1];
1356 std::swap(Weights[i + 1], Weights.back());
1357 Weights.pop_back();
1358 }
1359
1360 std::swap(PredCases[i], PredCases.back());
1361 PredCases.pop_back();
1362 --i;
1363 --e;
1364 }
1365
1366 // Okay, now we know which constants were sent to BB from the
1367 // predecessor. Figure out where they will all go now.
1368 for (const ValueEqualityComparisonCase &Case : BBCases)
1369 if (PTIHandled.count(Case.Value)) {
1370 // If this is one we are capable of getting...
1371 if (PredHasWeights || SuccHasWeights)
1372 Weights.push_back(WeightsForHandled[Case.Value]);
1373 PredCases.push_back(Case);
1374 ++NewSuccessors[Case.Dest];
1375 PTIHandled.erase(Case.Value); // This constant is taken care of
1376 }
1377
1378 // If there are any constants vectored to BB that TI doesn't handle,
1379 // they must go to the default destination of TI.
1380 for (ConstantInt *I : PTIHandled) {
1381 if (PredHasWeights || SuccHasWeights)
1382 Weights.push_back(WeightsForHandled[I]);
1383 PredCases.push_back(ValueEqualityComparisonCase(I, BBDefault));
1384 ++NewSuccessors[BBDefault];
1385 }
1386 }
1387
1388 // Okay, at this point, we know which new successor Pred will get. Make
1389 // sure we update the number of entries in the PHI nodes for these
1390 // successors.
1391 SmallPtrSet<BasicBlock *, 2> SuccsOfPred;
1392 if (DTU) {
1393 SuccsOfPred = {llvm::from_range, successors(Pred)};
1394 Updates.reserve(Updates.size() + NewSuccessors.size());
1395 }
1396 for (const std::pair<BasicBlock *, int /*Num*/> &NewSuccessor :
1397 NewSuccessors) {
1398 for (auto I : seq(NewSuccessor.second)) {
1399 (void)I;
1400 addPredecessorToBlock(NewSuccessor.first, Pred, BB);
1401 }
1402 if (DTU && !SuccsOfPred.contains(NewSuccessor.first))
1403 Updates.push_back({DominatorTree::Insert, Pred, NewSuccessor.first});
1404 }
1405
1406 Builder.SetInsertPoint(PTI);
1407 // Convert pointer to int before we switch.
1408 if (CV->getType()->isPointerTy()) {
1409 assert(!DL.hasUnstableRepresentation(CV->getType()) &&
1410 "Should not end up here with unstable pointers");
1411 CV =
1412 Builder.CreatePtrToInt(CV, DL.getIntPtrType(CV->getType()), "magicptr");
1413 }
1414
1415 // Now that the successors are updated, create the new Switch instruction.
1416 SwitchInst *NewSI = Builder.CreateSwitch(CV, PredDefault, PredCases.size());
1417 NewSI->setDebugLoc(PTI->getDebugLoc());
1418 for (ValueEqualityComparisonCase &V : PredCases)
1419 NewSI->addCase(V.Value, V.Dest);
1420
1421 if (PredHasWeights || SuccHasWeights)
1422 setFittedBranchWeights(*NewSI, Weights, /*IsExpected=*/false,
1423 /*ElideAllZero=*/true);
1424
1426
1427 // Okay, last check. If BB is still a successor of PSI, then we must
1428 // have an infinite loop case. If so, add an infinitely looping block
1429 // to handle the case to preserve the behavior of the code.
1430 BasicBlock *InfLoopBlock = nullptr;
1431 for (unsigned i = 0, e = NewSI->getNumSuccessors(); i != e; ++i)
1432 if (NewSI->getSuccessor(i) == BB) {
1433 if (!InfLoopBlock) {
1434 // Insert it at the end of the function, because it's either code,
1435 // or it won't matter if it's hot. :)
1436 InfLoopBlock =
1437 BasicBlock::Create(BB->getContext(), "infloop", BB->getParent());
1438 UncondBrInst::Create(InfLoopBlock, InfLoopBlock);
1439 if (DTU)
1440 Updates.push_back(
1441 {DominatorTree::Insert, InfLoopBlock, InfLoopBlock});
1442 }
1443 NewSI->setSuccessor(i, InfLoopBlock);
1444 }
1445
1446 if (DTU) {
1447 if (InfLoopBlock)
1448 Updates.push_back({DominatorTree::Insert, Pred, InfLoopBlock});
1449
1450 Updates.push_back({DominatorTree::Delete, Pred, BB});
1451
1452 DTU->applyUpdates(Updates);
1453 }
1454
1455 ++NumFoldValueComparisonIntoPredecessors;
1456 return true;
1457}
1458
1459/// The specified terminator is a value equality comparison instruction
1460/// (either a switch or a branch on "X == c").
1461/// See if any of the predecessors of the terminator block are value comparisons
1462/// on the same value. If so, and if safe to do so, fold them together.
1463bool SimplifyCFGOpt::foldValueComparisonIntoPredecessors(Instruction *TI,
1464 IRBuilder<> &Builder) {
1465 BasicBlock *BB = TI->getParent();
1466 Value *CV = isValueEqualityComparison(TI); // CondVal
1467 assert(CV && "Not a comparison?");
1468
1469 bool Changed = false;
1470
1471 SmallSetVector<BasicBlock *, 16> Preds(pred_begin(BB), pred_end(BB));
1472 while (!Preds.empty()) {
1473 BasicBlock *Pred = Preds.pop_back_val();
1474 Instruction *PTI = Pred->getTerminator();
1475
1476 // Don't try to fold into itself.
1477 if (Pred == BB)
1478 continue;
1479
1480 // See if the predecessor is a comparison with the same value.
1481 Value *PCV = isValueEqualityComparison(PTI); // PredCondVal
1482 if (PCV != CV)
1483 continue;
1484
1485 SmallSetVector<BasicBlock *, 4> FailBlocks;
1486 if (!safeToMergeTerminators(TI, PTI, &FailBlocks)) {
1487 for (auto *Succ : FailBlocks) {
1488 if (!SplitBlockPredecessors(Succ, TI->getParent(), ".fold.split", DTU))
1489 return false;
1490 }
1491 }
1492
1493 performValueComparisonIntoPredecessorFolding(TI, CV, PTI, Builder);
1494 Changed = true;
1495 }
1496 return Changed;
1497}
1498
1499// If we would need to insert a select that uses the value of this invoke
1500// (comments in hoistSuccIdenticalTerminatorToSwitchOrIf explain why we would
1501// need to do this), we can't hoist the invoke, as there is nowhere to put the
1502// select in this case.
1504 Instruction *I1, Instruction *I2) {
1505 for (BasicBlock *Succ : successors(BB1)) {
1506 for (const PHINode &PN : Succ->phis()) {
1507 Value *BB1V = PN.getIncomingValueForBlock(BB1);
1508 Value *BB2V = PN.getIncomingValueForBlock(BB2);
1509 if (BB1V != BB2V && (BB1V == I1 || BB2V == I2)) {
1510 return false;
1511 }
1512 }
1513 }
1514 return true;
1515}
1516
1517// Get interesting characteristics of instructions that
1518// `hoistCommonCodeFromSuccessors` didn't hoist. They restrict what kind of
1519// instructions can be reordered across.
1525
1527 unsigned Flags = 0;
1528 if (I->mayReadFromMemory())
1529 Flags |= SkipReadMem;
1530 // We can't arbitrarily move around allocas, e.g. moving allocas (especially
1531 // inalloca) across stacksave/stackrestore boundaries.
1532 if (I->mayHaveSideEffects() || isa<AllocaInst>(I))
1533 Flags |= SkipSideEffect;
1535 Flags |= SkipImplicitControlFlow;
1536 return Flags;
1537}
1538
1539// Returns true if it is safe to reorder an instruction across preceding
1540// instructions in a basic block.
1541static bool isSafeToHoistInstr(Instruction *I, unsigned Flags) {
1542 // Don't reorder a store over a load.
1543 if ((Flags & SkipReadMem) && I->mayWriteToMemory())
1544 return false;
1545
1546 // If we have seen an instruction with side effects, it's unsafe to reorder an
1547 // instruction which reads memory or itself has side effects.
1548 if ((Flags & SkipSideEffect) &&
1549 (I->mayReadFromMemory() || I->mayHaveSideEffects() || isa<AllocaInst>(I)))
1550 return false;
1551
1552 // Reordering across an instruction which does not necessarily transfer
1553 // control to the next instruction is speculation.
1555 return false;
1556
1557 // Hoisting of llvm.deoptimize is only legal together with the next return
1558 // instruction, which this pass is not always able to do.
1559 if (auto *CB = dyn_cast<CallBase>(I))
1560 if (CB->getIntrinsicID() == Intrinsic::experimental_deoptimize)
1561 return false;
1562
1563 // It's also unsafe/illegal to hoist an instruction above its instruction
1564 // operands
1565 BasicBlock *BB = I->getParent();
1566 for (Value *Op : I->operands()) {
1567 if (auto *J = dyn_cast<Instruction>(Op))
1568 if (J->getParent() == BB)
1569 return false;
1570 }
1571
1572 return true;
1573}
1574
1575static bool passingValueIsAlwaysUndefined(Value *V, Instruction *I, bool PtrValueMayBeModified = false);
1576
1577/// Helper function for hoistCommonCodeFromSuccessors. Return true if identical
1578/// instructions \p I1 and \p I2 can and should be hoisted.
1580 const TargetTransformInfo &TTI) {
1581 // If we're going to hoist a call, make sure that the two instructions
1582 // we're commoning/hoisting are both marked with musttail, or neither of
1583 // them is marked as such. Otherwise, we might end up in a situation where
1584 // we hoist from a block where the terminator is a `ret` to a block where
1585 // the terminator is a `br`, and `musttail` calls expect to be followed by
1586 // a return.
1587 auto *C1 = dyn_cast<CallInst>(I1);
1588 auto *C2 = dyn_cast<CallInst>(I2);
1589 if (C1 && C2)
1590 if (C1->isMustTailCall() != C2->isMustTailCall())
1591 return false;
1592
1593 if (!TTI.isProfitableToHoist(I1) || !TTI.isProfitableToHoist(I2))
1594 return false;
1595
1596 // If any of the two call sites has nomerge or convergent attribute, stop
1597 // hoisting.
1598 if (const auto *CB1 = dyn_cast<CallBase>(I1))
1599 if (CB1->cannotMerge() || CB1->isConvergent())
1600 return false;
1601 if (const auto *CB2 = dyn_cast<CallBase>(I2))
1602 if (CB2->cannotMerge() || CB2->isConvergent())
1603 return false;
1604
1605 return true;
1606}
1607
1608/// Hoists DbgVariableRecords from \p I1 and \p OtherInstrs that are identical
1609/// in lock-step to \p TI. This matches how dbg.* intrinsics are hoisting in
1610/// hoistCommonCodeFromSuccessors. e.g. The input:
1611/// I1 DVRs: { x, z },
1612/// OtherInsts: { I2 DVRs: { x, y, z } }
1613/// would result in hoisting only DbgVariableRecord x.
1615 Instruction *TI, Instruction *I1,
1616 SmallVectorImpl<Instruction *> &OtherInsts) {
1617 if (!I1->hasDbgRecords())
1618 return;
1619 using CurrentAndEndIt =
1620 std::pair<DbgRecord::self_iterator, DbgRecord::self_iterator>;
1621 // Vector of {Current, End} iterators.
1623 Itrs.reserve(OtherInsts.size() + 1);
1624 // Helper lambdas for lock-step checks:
1625 // Return true if this Current == End.
1626 auto atEnd = [](const CurrentAndEndIt &Pair) {
1627 return Pair.first == Pair.second;
1628 };
1629 // Return true if all Current are identical.
1630 auto allIdentical = [](const SmallVector<CurrentAndEndIt> &Itrs) {
1631 return all_of(make_first_range(ArrayRef(Itrs).drop_front()),
1633 return Itrs[0].first->isIdenticalToWhenDefined(*I);
1634 });
1635 };
1636
1637 // Collect the iterators.
1638 Itrs.push_back(
1639 {I1->getDbgRecordRange().begin(), I1->getDbgRecordRange().end()});
1640 for (Instruction *Other : OtherInsts) {
1641 if (!Other->hasDbgRecords())
1642 return;
1643 Itrs.push_back(
1644 {Other->getDbgRecordRange().begin(), Other->getDbgRecordRange().end()});
1645 }
1646
1647 // Iterate in lock-step until any of the DbgRecord lists are exausted. If
1648 // the lock-step DbgRecord are identical, hoist all of them to TI.
1649 // This replicates the dbg.* intrinsic behaviour in
1650 // hoistCommonCodeFromSuccessors.
1651 while (none_of(Itrs, atEnd)) {
1652 bool HoistDVRs = allIdentical(Itrs);
1653 for (CurrentAndEndIt &Pair : Itrs) {
1654 // Increment Current iterator now as we may be about to move the
1655 // DbgRecord.
1656 DbgRecord &DR = *Pair.first++;
1657 if (HoistDVRs) {
1658 DR.removeFromParent();
1659 TI->getParent()->insertDbgRecordBefore(&DR, TI->getIterator());
1660 }
1661 }
1662 }
1663}
1664
1666 const Instruction *I2) {
1667 if (I1->isIdenticalToWhenDefined(I2, /*IntersectAttrs=*/true))
1668 return true;
1669
1670 if (auto *Cmp1 = dyn_cast<CmpInst>(I1))
1671 if (auto *Cmp2 = dyn_cast<CmpInst>(I2))
1672 return Cmp1->getPredicate() == Cmp2->getSwappedPredicate() &&
1673 Cmp1->getOperand(0) == Cmp2->getOperand(1) &&
1674 Cmp1->getOperand(1) == Cmp2->getOperand(0);
1675
1676 if (I1->isCommutative() && I1->isSameOperationAs(I2)) {
1677 return I1->getOperand(0) == I2->getOperand(1) &&
1678 I1->getOperand(1) == I2->getOperand(0) &&
1679 equal(drop_begin(I1->operands(), 2), drop_begin(I2->operands(), 2));
1680 }
1681
1682 return false;
1683}
1684
1685/// If the target supports conditional faulting,
1686/// we look for the following pattern:
1687/// \code
1688/// BB:
1689/// ...
1690/// %cond = icmp ult %x, %y
1691/// br i1 %cond, label %TrueBB, label %FalseBB
1692/// FalseBB:
1693/// store i32 1, ptr %q, align 4
1694/// ...
1695/// TrueBB:
1696/// %maskedloadstore = load i32, ptr %b, align 4
1697/// store i32 %maskedloadstore, ptr %p, align 4
1698/// ...
1699/// \endcode
1700///
1701/// and transform it into:
1702///
1703/// \code
1704/// BB:
1705/// ...
1706/// %cond = icmp ult %x, %y
1707/// %maskedloadstore = cload i32, ptr %b, %cond
1708/// cstore i32 %maskedloadstore, ptr %p, %cond
1709/// cstore i32 1, ptr %q, ~%cond
1710/// br i1 %cond, label %TrueBB, label %FalseBB
1711/// FalseBB:
1712/// ...
1713/// TrueBB:
1714/// ...
1715/// \endcode
1716///
1717/// where cload/cstore are represented by llvm.masked.load/store intrinsics,
1718/// e.g.
1719///
1720/// \code
1721/// %vcond = bitcast i1 %cond to <1 x i1>
1722/// %v0 = call <1 x i32> @llvm.masked.load.v1i32.p0
1723/// (ptr %b, i32 4, <1 x i1> %vcond, <1 x i32> poison)
1724/// %maskedloadstore = bitcast <1 x i32> %v0 to i32
1725/// call void @llvm.masked.store.v1i32.p0
1726/// (<1 x i32> %v0, ptr %p, i32 4, <1 x i1> %vcond)
1727/// %cond.not = xor i1 %cond, true
1728/// %vcond.not = bitcast i1 %cond.not to <1 x i>
1729/// call void @llvm.masked.store.v1i32.p0
1730/// (<1 x i32> <i32 1>, ptr %q, i32 4, <1x i1> %vcond.not)
1731/// \endcode
1732///
1733/// So we need to turn hoisted load/store into cload/cstore.
1734///
1735/// \param BI The branch instruction.
1736/// \param SpeculatedConditionalLoadsStores The load/store instructions that
1737/// will be speculated.
1738/// \param Invert indicates if speculates FalseBB. Only used in triangle CFG.
1740 CondBrInst *BI,
1741 SmallVectorImpl<Instruction *> &SpeculatedConditionalLoadsStores,
1742 std::optional<bool> Invert, Instruction *Sel) {
1743 auto &Context = BI->getParent()->getContext();
1744 auto *VCondTy = FixedVectorType::get(Type::getInt1Ty(Context), 1);
1745 auto *Cond = BI->getCondition();
1746 // Construct the condition if needed.
1747 BasicBlock *BB = BI->getParent();
1748 Value *Mask = nullptr;
1749 Value *MaskFalse = nullptr;
1750 Value *MaskTrue = nullptr;
1751 if (Invert.has_value()) {
1752 IRBuilder<> Builder(Sel ? Sel : SpeculatedConditionalLoadsStores.back());
1753 Mask = Builder.CreateBitCast(
1754 *Invert ? Builder.CreateXor(Cond, ConstantInt::getTrue(Context)) : Cond,
1755 VCondTy);
1756 } else {
1757 IRBuilder<> Builder(BI);
1758 MaskFalse = Builder.CreateBitCast(
1759 Builder.CreateXor(Cond, ConstantInt::getTrue(Context)), VCondTy);
1760 MaskTrue = Builder.CreateBitCast(Cond, VCondTy);
1761 }
1762 auto PeekThroughBitcasts = [](Value *V) {
1763 while (auto *BitCast = dyn_cast<BitCastInst>(V))
1764 V = BitCast->getOperand(0);
1765 return V;
1766 };
1767 for (auto *I : SpeculatedConditionalLoadsStores) {
1768 IRBuilder<> Builder(Invert.has_value() ? I : BI);
1769 if (!Invert.has_value())
1770 Mask = I->getParent() == BI->getSuccessor(0) ? MaskTrue : MaskFalse;
1771 // We currently assume conditional faulting load/store is supported for
1772 // scalar types only when creating new instructions. This can be easily
1773 // extended for vector types in the future.
1774 assert(!getLoadStoreType(I)->isVectorTy() && "not implemented");
1775 auto *Op0 = I->getOperand(0);
1776 CallInst *MaskedLoadStore = nullptr;
1777 if (auto *LI = dyn_cast<LoadInst>(I)) {
1778 // Handle Load.
1779 auto *Ty = I->getType();
1780 PHINode *PN = nullptr;
1781 Value *PassThru = nullptr;
1782 if (Invert.has_value())
1783 for (User *U : I->users()) {
1784 if ((PN = dyn_cast<PHINode>(U))) {
1785 PassThru = Builder.CreateBitCast(
1786 PeekThroughBitcasts(PN->getIncomingValueForBlock(BB)),
1787 FixedVectorType::get(Ty, 1));
1788 } else if (auto *Ins = cast<Instruction>(U);
1789 Sel && Ins->getParent() == BB) {
1790 // This happens when store or/and a speculative instruction between
1791 // load and store were hoisted to the BB. Make sure the masked load
1792 // inserted before its use.
1793 // We assume there's one of such use.
1794 Builder.SetInsertPoint(Ins);
1795 }
1796 }
1797 MaskedLoadStore = Builder.CreateMaskedLoad(
1798 FixedVectorType::get(Ty, 1), Op0, LI->getAlign(), Mask, PassThru);
1799 Value *NewLoadStore = Builder.CreateBitCast(MaskedLoadStore, Ty);
1800 if (PN)
1801 PN->setIncomingValue(PN->getBasicBlockIndex(BB), NewLoadStore);
1802 I->replaceAllUsesWith(NewLoadStore);
1803 } else {
1804 // Handle Store.
1805 auto *StoredVal = Builder.CreateBitCast(
1806 PeekThroughBitcasts(Op0), FixedVectorType::get(Op0->getType(), 1));
1807 MaskedLoadStore = Builder.CreateMaskedStore(
1808 StoredVal, I->getOperand(1), cast<StoreInst>(I)->getAlign(), Mask);
1809 }
1810 // For non-debug metadata, only !annotation, !range, !nonnull and !align are
1811 // kept when hoisting (see Instruction::dropUBImplyingAttrsAndMetadata).
1812 //
1813 // !nonnull, !align : Not support pointer type, no need to keep.
1814 // !range: Load type is changed from scalar to vector, but the metadata on
1815 // vector specifies a per-element range, so the semantics stay the
1816 // same. Keep it.
1817 // !annotation: Not impact semantics. Keep it.
1818 if (const MDNode *Ranges = I->getMetadata(LLVMContext::MD_range))
1819 MaskedLoadStore->addRangeRetAttr(getConstantRangeFromMetadata(*Ranges));
1820 I->dropUBImplyingAttrsAndUnknownMetadata({LLVMContext::MD_annotation});
1821 // FIXME: DIAssignID is not supported for masked store yet.
1822 // (Verifier::visitDIAssignIDMetadata)
1824 I->eraseMetadataIf([](unsigned MDKind, MDNode *Node) {
1825 return Node->getMetadataID() == Metadata::DIAssignIDKind;
1826 });
1827 MaskedLoadStore->copyMetadata(*I);
1828 I->eraseFromParent();
1829 }
1830}
1831
1833 const TargetTransformInfo &TTI) {
1834 // Not handle volatile or atomic.
1835 bool IsStore = false;
1836 if (auto *L = dyn_cast<LoadInst>(I)) {
1837 if (!L->isSimple() || !HoistLoadsWithCondFaulting)
1838 return false;
1839 } else if (auto *S = dyn_cast<StoreInst>(I)) {
1840 if (!S->isSimple() || !HoistStoresWithCondFaulting)
1841 return false;
1842 IsStore = true;
1843 } else
1844 return false;
1845
1846 // llvm.masked.load/store use i32 for alignment while load/store use i64.
1847 // That's why we have the alignment limitation.
1848 // FIXME: Update the prototype of the intrinsics?
1849 return TTI.hasConditionalLoadStoreForType(getLoadStoreType(I), IsStore) &&
1851}
1852
1853/// Hoist any common code in the successor blocks up into the block. This
1854/// function guarantees that BB dominates all successors. If AllInstsEqOnly is
1855/// given, only perform hoisting in case all successors blocks contain matching
1856/// instructions only. In that case, all instructions can be hoisted and the
1857/// original branch will be replaced and selects for PHIs are added.
1858bool SimplifyCFGOpt::hoistCommonCodeFromSuccessors(Instruction *TI,
1859 bool AllInstsEqOnly) {
1860 // This does very trivial matching, with limited scanning, to find identical
1861 // instructions in the two blocks. In particular, we don't want to get into
1862 // O(N1*N2*...) situations here where Ni are the sizes of these successors. As
1863 // such, we currently just scan for obviously identical instructions in an
1864 // identical order, possibly separated by the same number of non-identical
1865 // instructions.
1866 BasicBlock *BB = TI->getParent();
1867 unsigned int SuccSize = succ_size(BB);
1868 if (SuccSize < 2)
1869 return false;
1870
1871 // If either of the blocks has it's address taken, then we can't do this fold,
1872 // because the code we'd hoist would no longer run when we jump into the block
1873 // by it's address.
1874 SmallSetVector<BasicBlock *, 4> UniqueSuccessors(from_range, successors(BB));
1875 for (auto *Succ : UniqueSuccessors) {
1876 if (Succ->hasAddressTaken())
1877 return false;
1878 // Use getUniquePredecessor instead of getSinglePredecessor to support
1879 // multi-cases successors in switch.
1880 if (Succ->getUniquePredecessor())
1881 continue;
1882 // If Succ has >1 predecessors, continue to check if the Succ contains only
1883 // one `unreachable` inst. Since executing `unreachable` inst is an UB, we
1884 // can relax the condition based on the assumptiom that the program would
1885 // never enter Succ and trigger such an UB.
1886 if (isa<UnreachableInst>(*Succ->begin()))
1887 continue;
1888 return false;
1889 }
1890 // The second of pair is a SkipFlags bitmask.
1891 using SuccIterPair = std::pair<BasicBlock::iterator, unsigned>;
1892 SmallVector<SuccIterPair, 8> SuccIterPairs;
1893 for (auto *Succ : UniqueSuccessors) {
1894 BasicBlock::iterator SuccItr = Succ->begin();
1895 if (isa<PHINode>(*SuccItr))
1896 return false;
1897 SuccIterPairs.push_back(SuccIterPair(SuccItr, 0));
1898 }
1899
1900 if (AllInstsEqOnly) {
1901 // Check if all instructions in the successor blocks match. This allows
1902 // hoisting all instructions and removing the blocks we are hoisting from,
1903 // so does not add any new instructions.
1904
1905 // Check if sizes and terminators of all successors match.
1906 unsigned Size0 = UniqueSuccessors[0]->size();
1907 Instruction *Term0 = UniqueSuccessors[0]->getTerminator();
1908 bool AllSame =
1909 all_of(drop_begin(UniqueSuccessors), [Term0, Size0](BasicBlock *Succ) {
1910 return Succ->getTerminator()->isIdenticalTo(Term0) &&
1911 Succ->size() == Size0;
1912 });
1913 if (!AllSame)
1914 return false;
1915 LockstepReverseIterator<true> LRI(UniqueSuccessors.getArrayRef());
1916 while (LRI.isValid()) {
1917 Instruction *I0 = (*LRI)[0];
1918 if (any_of(*LRI, [I0](Instruction *I) {
1919 return !areIdenticalUpToCommutativity(I0, I);
1920 })) {
1921 return false;
1922 }
1923 --LRI;
1924 }
1925 // Now we know that all instructions in all successors can be hoisted. Let
1926 // the loop below handle the hoisting.
1927 }
1928
1929 // Count how many instructions were not hoisted so far. There's a limit on how
1930 // many instructions we skip, serving as a compilation time control as well as
1931 // preventing excessive increase of life ranges.
1932 unsigned NumSkipped = 0;
1933 // If we find an unreachable instruction at the beginning of a basic block, we
1934 // can still hoist instructions from the rest of the basic blocks.
1935 if (SuccIterPairs.size() > 2) {
1936 erase_if(SuccIterPairs,
1937 [](const auto &Pair) { return isa<UnreachableInst>(Pair.first); });
1938 if (SuccIterPairs.size() < 2)
1939 return false;
1940 }
1941
1942 bool Changed = false;
1943
1944 for (;;) {
1945 auto *SuccIterPairBegin = SuccIterPairs.begin();
1946 auto &BB1ItrPair = *SuccIterPairBegin++;
1947 auto OtherSuccIterPairRange =
1948 iterator_range(SuccIterPairBegin, SuccIterPairs.end());
1949 auto OtherSuccIterRange = make_first_range(OtherSuccIterPairRange);
1950
1951 Instruction *I1 = &*BB1ItrPair.first;
1952
1953 bool AllInstsAreIdentical = true;
1954 bool HasTerminator = I1->isTerminator();
1955 for (auto &SuccIter : OtherSuccIterRange) {
1956 Instruction *I2 = &*SuccIter;
1957 HasTerminator |= I2->isTerminator();
1958 if (AllInstsAreIdentical && (!areIdenticalUpToCommutativity(I1, I2) ||
1959 MMRAMetadata(*I1) != MMRAMetadata(*I2)))
1960 AllInstsAreIdentical = false;
1961 }
1962
1963 SmallVector<Instruction *, 8> OtherInsts;
1964 for (auto &SuccIter : OtherSuccIterRange)
1965 OtherInsts.push_back(&*SuccIter);
1966
1967 // If we are hoisting the terminator instruction, don't move one (making a
1968 // broken BB), instead clone it, and remove BI.
1969 if (HasTerminator) {
1970 // Even if BB, which contains only one unreachable instruction, is ignored
1971 // at the beginning of the loop, we can hoist the terminator instruction.
1972 // If any instructions remain in the block, we cannot hoist terminators.
1973 if (NumSkipped || !AllInstsAreIdentical) {
1974 hoistLockstepIdenticalDbgVariableRecords(TI, I1, OtherInsts);
1975 return Changed;
1976 }
1977
1978 return hoistSuccIdenticalTerminatorToSwitchOrIf(
1979 TI, I1, OtherInsts, UniqueSuccessors.getArrayRef()) ||
1980 Changed;
1981 }
1982
1983 if (AllInstsAreIdentical) {
1984 unsigned SkipFlagsBB1 = BB1ItrPair.second;
1985 AllInstsAreIdentical =
1986 isSafeToHoistInstr(I1, SkipFlagsBB1) &&
1987 all_of(OtherSuccIterPairRange, [=](const auto &Pair) {
1988 Instruction *I2 = &*Pair.first;
1989 unsigned SkipFlagsBB2 = Pair.second;
1990 // Even if the instructions are identical, it may not
1991 // be safe to hoist them if we have skipped over
1992 // instructions with side effects or their operands
1993 // weren't hoisted.
1994 return isSafeToHoistInstr(I2, SkipFlagsBB2) &&
1996 });
1997 }
1998
1999 if (AllInstsAreIdentical) {
2000 BB1ItrPair.first++;
2001 // For a normal instruction, we just move one to right before the
2002 // branch, then replace all uses of the other with the first. Finally,
2003 // we remove the now redundant second instruction.
2004 hoistLockstepIdenticalDbgVariableRecords(TI, I1, OtherInsts);
2005 // We've just hoisted DbgVariableRecords; move I1 after them (before TI)
2006 // and leave any that were not hoisted behind (by calling moveBefore
2007 // rather than moveBeforePreserving).
2008 I1->moveBefore(TI->getIterator());
2009 for (auto &SuccIter : OtherSuccIterRange) {
2010 Instruction *I2 = &*SuccIter++;
2011 assert(I2 != I1);
2012 if (!I2->use_empty())
2013 I2->replaceAllUsesWith(I1);
2014 I1->andIRFlags(I2);
2015 if (auto *CB = dyn_cast<CallBase>(I1)) {
2016 bool Success = CB->tryIntersectAttributes(cast<CallBase>(I2));
2017 assert(Success && "We should not be trying to hoist callbases "
2018 "with non-intersectable attributes");
2019 // For NDEBUG Compile.
2020 (void)Success;
2021 }
2022
2023 combineMetadataForCSE(I1, I2, true);
2024 // I1 and I2 are being combined into a single instruction. Its debug
2025 // location is the merged locations of the original instructions.
2026 I1->applyMergedLocation(I1->getDebugLoc(), I2->getDebugLoc());
2027 I2->eraseFromParent();
2028 }
2029 if (!Changed)
2030 NumHoistCommonCode += SuccIterPairs.size();
2031 Changed = true;
2032 NumHoistCommonInstrs += SuccIterPairs.size();
2033 } else {
2034 if (NumSkipped >= HoistCommonSkipLimit) {
2035 hoistLockstepIdenticalDbgVariableRecords(TI, I1, OtherInsts);
2036 return Changed;
2037 }
2038 // We are about to skip over a pair of non-identical instructions. Record
2039 // if any have characteristics that would prevent reordering instructions
2040 // across them.
2041 for (auto &SuccIterPair : SuccIterPairs) {
2042 Instruction *I = &*SuccIterPair.first++;
2043 SuccIterPair.second |= skippedInstrFlags(I);
2044 }
2045 ++NumSkipped;
2046 }
2047 }
2048}
2049
2050bool SimplifyCFGOpt::hoistSuccIdenticalTerminatorToSwitchOrIf(
2051 Instruction *TI, Instruction *I1,
2052 SmallVectorImpl<Instruction *> &OtherSuccTIs,
2053 ArrayRef<BasicBlock *> UniqueSuccessors) {
2054
2055 auto *BI = dyn_cast<CondBrInst>(TI);
2056
2057 bool Changed = false;
2058 BasicBlock *TIParent = TI->getParent();
2059 BasicBlock *BB1 = I1->getParent();
2060
2061 // Use only for an if statement.
2062 auto *I2 = *OtherSuccTIs.begin();
2063 auto *BB2 = I2->getParent();
2064 if (BI) {
2065 assert(OtherSuccTIs.size() == 1);
2066 assert(BI->getSuccessor(0) == I1->getParent());
2067 assert(BI->getSuccessor(1) == I2->getParent());
2068 }
2069
2070 // In the case of an if statement, we try to hoist an invoke.
2071 // FIXME: Can we define a safety predicate for CallBr?
2072 // FIXME: Test case llvm/test/Transforms/SimplifyCFG/2009-06-15-InvokeCrash.ll
2073 // removed in 4c923b3b3fd0ac1edebf0603265ca3ba51724937 commit?
2074 if (isa<InvokeInst>(I1) && (!BI || !isSafeToHoistInvoke(BB1, BB2, I1, I2)))
2075 return false;
2076
2077 // TODO: callbr hoisting currently disabled pending further study.
2078 if (isa<CallBrInst>(I1))
2079 return false;
2080
2081 for (BasicBlock *Succ : successors(BB1)) {
2082 for (PHINode &PN : Succ->phis()) {
2083 Value *BB1V = PN.getIncomingValueForBlock(BB1);
2084 for (Instruction *OtherSuccTI : OtherSuccTIs) {
2085 Value *BB2V = PN.getIncomingValueForBlock(OtherSuccTI->getParent());
2086 if (BB1V == BB2V)
2087 continue;
2088
2089 // In the case of an if statement, check for
2090 // passingValueIsAlwaysUndefined here because we would rather eliminate
2091 // undefined control flow then converting it to a select.
2092 if (!BI || passingValueIsAlwaysUndefined(BB1V, &PN) ||
2094 return false;
2095 }
2096 }
2097 }
2098
2099 // Hoist DbgVariableRecords attached to the terminator to match dbg.*
2100 // intrinsic hoisting behaviour in hoistCommonCodeFromSuccessors.
2101 hoistLockstepIdenticalDbgVariableRecords(TI, I1, OtherSuccTIs);
2102 // Clone the terminator and hoist it into the pred, without any debug info.
2103 Instruction *NT = I1->clone();
2104 NT->insertInto(TIParent, TI->getIterator());
2105 if (!NT->getType()->isVoidTy()) {
2106 I1->replaceAllUsesWith(NT);
2107 for (Instruction *OtherSuccTI : OtherSuccTIs)
2108 OtherSuccTI->replaceAllUsesWith(NT);
2109 NT->takeName(I1);
2110 }
2111 Changed = true;
2112 NumHoistCommonInstrs += OtherSuccTIs.size() + 1;
2113
2114 // Ensure terminator gets a debug location, even an unknown one, in case
2115 // it involves inlinable calls.
2117 Locs.push_back(I1->getDebugLoc());
2118 for (auto *OtherSuccTI : OtherSuccTIs)
2119 Locs.push_back(OtherSuccTI->getDebugLoc());
2120 NT->setDebugLoc(DebugLoc::getMergedLocations(Locs));
2121
2122 // PHIs created below will adopt NT's merged DebugLoc.
2123 IRBuilder<NoFolder> Builder(NT);
2124
2125 // In the case of an if statement, hoisting one of the terminators from our
2126 // successor is a great thing. Unfortunately, the successors of the if/else
2127 // blocks may have PHI nodes in them. If they do, all PHI entries for BB1/BB2
2128 // must agree for all PHI nodes, so we insert select instruction to compute
2129 // the final result.
2130 if (BI) {
2131 std::map<std::pair<Value *, Value *>, SelectInst *> InsertedSelects;
2132 for (BasicBlock *Succ : successors(BB1)) {
2133 for (PHINode &PN : Succ->phis()) {
2134 Value *BB1V = PN.getIncomingValueForBlock(BB1);
2135 Value *BB2V = PN.getIncomingValueForBlock(BB2);
2136 if (BB1V == BB2V)
2137 continue;
2138
2139 // These values do not agree. Insert a select instruction before NT
2140 // that determines the right value.
2141 SelectInst *&SI = InsertedSelects[std::make_pair(BB1V, BB2V)];
2142 if (!SI) {
2143 // Propagate fast-math-flags from phi node to its replacement select.
2145 BI->getCondition(), BB1V, BB2V,
2146 isa<FPMathOperator>(PN) ? &PN : nullptr,
2147 BB1V->getName() + "." + BB2V->getName(), BI));
2148 }
2149
2150 // Make the PHI node use the select for all incoming values for BB1/BB2
2151 for (unsigned i = 0, e = PN.getNumIncomingValues(); i != e; ++i)
2152 if (PN.getIncomingBlock(i) == BB1 || PN.getIncomingBlock(i) == BB2)
2153 PN.setIncomingValue(i, SI);
2154 }
2155 }
2156 }
2157
2159
2160 // Update any PHI nodes in our new successors.
2161 for (BasicBlock *Succ : successors(BB1)) {
2162 addPredecessorToBlock(Succ, TIParent, BB1);
2163 if (DTU)
2164 Updates.push_back({DominatorTree::Insert, TIParent, Succ});
2165 }
2166
2167 if (DTU) {
2168 // TI might be a switch with multi-cases destination, so we need to care for
2169 // the duplication of successors.
2170 for (BasicBlock *Succ : UniqueSuccessors)
2171 Updates.push_back({DominatorTree::Delete, TIParent, Succ});
2172 }
2173
2175 if (DTU)
2176 DTU->applyUpdates(Updates);
2177 return Changed;
2178}
2179
2180// TODO: Refine this. This should avoid cases like turning constant memcpy sizes
2181// into variables.
2183 int OpIdx) {
2184 // Divide/Remainder by constant is typically much cheaper than by variable.
2185 if (I->isIntDivRem())
2186 return OpIdx != 1;
2187 return !isa<IntrinsicInst>(I);
2188}
2189
2190// All instructions in Insts belong to different blocks that all unconditionally
2191// branch to a common successor. Analyze each instruction and return true if it
2192// would be possible to sink them into their successor, creating one common
2193// instruction instead. For every value that would be required to be provided by
2194// PHI node (because an operand varies in each input block), add to PHIOperands.
2197 DenseMap<const Use *, SmallVector<Value *, 4>> &PHIOperands) {
2198 // Prune out obviously bad instructions to move. Each instruction must have
2199 // the same number of uses, and we check later that the uses are consistent.
2200 std::optional<unsigned> NumUses;
2201 for (auto *I : Insts) {
2202 // These instructions may change or break semantics if moved.
2203 if (isa<PHINode>(I) || I->isEHPad() || isa<AllocaInst>(I) ||
2204 I->getType()->isTokenTy())
2205 return false;
2206
2207 // Do not try to sink an instruction in an infinite loop - it can cause
2208 // this algorithm to infinite loop.
2209 if (I->getParent()->getSingleSuccessor() == I->getParent())
2210 return false;
2211
2212 // Conservatively return false if I is an inline-asm instruction. Sinking
2213 // and merging inline-asm instructions can potentially create arguments
2214 // that cannot satisfy the inline-asm constraints.
2215 // If the instruction has nomerge or convergent attribute, return false.
2216 if (const auto *C = dyn_cast<CallBase>(I))
2217 if (C->isInlineAsm() || C->cannotMerge() || C->isConvergent())
2218 return false;
2219
2220 if (!NumUses)
2221 NumUses = I->getNumUses();
2222 else if (NumUses != I->getNumUses())
2223 return false;
2224 }
2225
2226 const Instruction *I0 = Insts.front();
2227 const auto I0MMRA = MMRAMetadata(*I0);
2228 for (auto *I : Insts) {
2229 if (!I->isSameOperationAs(I0, Instruction::CompareUsingIntersectedAttrs))
2230 return false;
2231
2232 // Treat MMRAs conservatively. This pass can be quite aggressive and
2233 // could drop a lot of MMRAs otherwise.
2234 if (MMRAMetadata(*I) != I0MMRA)
2235 return false;
2236 }
2237
2238 // Uses must be consistent: If I0 is used in a phi node in the sink target,
2239 // then the other phi operands must match the instructions from Insts. This
2240 // also has to hold true for any phi nodes that would be created as a result
2241 // of sinking. Both of these cases are represented by PhiOperands.
2242 for (const Use &U : I0->uses()) {
2243 auto It = PHIOperands.find(&U);
2244 if (It == PHIOperands.end())
2245 // There may be uses in other blocks when sinking into a loop header.
2246 return false;
2247 if (!equal(Insts, It->second))
2248 return false;
2249 }
2250
2251 // For calls to be sinkable, they must all be indirect, or have same callee.
2252 // I.e. if we have two direct calls to different callees, we don't want to
2253 // turn that into an indirect call. Likewise, if we have an indirect call,
2254 // and a direct call, we don't actually want to have a single indirect call.
2255 if (isa<CallBase>(I0)) {
2256 auto IsIndirectCall = [](const Instruction *I) {
2257 return cast<CallBase>(I)->isIndirectCall();
2258 };
2259 bool HaveIndirectCalls = any_of(Insts, IsIndirectCall);
2260 bool AllCallsAreIndirect = all_of(Insts, IsIndirectCall);
2261 if (HaveIndirectCalls) {
2262 if (!AllCallsAreIndirect)
2263 return false;
2264 } else {
2265 // All callees must be identical.
2266 Value *Callee = nullptr;
2267 for (const Instruction *I : Insts) {
2268 Value *CurrCallee = cast<CallBase>(I)->getCalledOperand();
2269 if (!Callee)
2270 Callee = CurrCallee;
2271 else if (Callee != CurrCallee)
2272 return false;
2273 }
2274 }
2275 }
2276
2277 for (unsigned OI = 0, OE = I0->getNumOperands(); OI != OE; ++OI) {
2278 Value *Op = I0->getOperand(OI);
2279 auto SameAsI0 = [&I0, OI](const Instruction *I) {
2280 assert(I->getNumOperands() == I0->getNumOperands());
2281 return I->getOperand(OI) == I0->getOperand(OI);
2282 };
2283 if (!all_of(Insts, SameAsI0)) {
2286 // We can't create a PHI from this GEP.
2287 return false;
2288 auto &Ops = PHIOperands[&I0->getOperandUse(OI)];
2289 for (auto *I : Insts)
2290 Ops.push_back(I->getOperand(OI));
2291 }
2292 }
2293 return true;
2294}
2295
2296// Assuming canSinkInstructions(Blocks) has returned true, sink the last
2297// instruction of every block in Blocks to their common successor, commoning
2298// into one instruction.
2300 auto *BBEnd = Blocks[0]->getTerminator()->getSuccessor(0);
2301
2302 // canSinkInstructions returning true guarantees that every block has at
2303 // least one non-terminator instruction.
2305 for (auto *BB : Blocks) {
2306 Instruction *I = BB->getTerminator();
2307 I = I->getPrevNode();
2308 Insts.push_back(I);
2309 }
2310
2311 // We don't need to do any more checking here; canSinkInstructions should
2312 // have done it all for us.
2313 SmallVector<Value*, 4> NewOperands;
2314 Instruction *I0 = Insts.front();
2315 for (unsigned O = 0, E = I0->getNumOperands(); O != E; ++O) {
2316 // This check is different to that in canSinkInstructions. There, we
2317 // cared about the global view once simplifycfg (and instcombine) have
2318 // completed - it takes into account PHIs that become trivially
2319 // simplifiable. However here we need a more local view; if an operand
2320 // differs we create a PHI and rely on instcombine to clean up the very
2321 // small mess we may make.
2322 bool NeedPHI = any_of(Insts, [&I0, O](const Instruction *I) {
2323 return I->getOperand(O) != I0->getOperand(O);
2324 });
2325 if (!NeedPHI) {
2326 NewOperands.push_back(I0->getOperand(O));
2327 continue;
2328 }
2329
2330 // Create a new PHI in the successor block and populate it.
2331 auto *Op = I0->getOperand(O);
2332 assert(!Op->getType()->isTokenTy() && "Can't PHI tokens!");
2333 auto *PN =
2334 PHINode::Create(Op->getType(), Insts.size(), Op->getName() + ".sink");
2335 PN->insertBefore(BBEnd->begin());
2336 for (auto *I : Insts)
2337 PN->addIncoming(I->getOperand(O), I->getParent());
2338 NewOperands.push_back(PN);
2339 }
2340
2341 // Arbitrarily use I0 as the new "common" instruction; remap its operands
2342 // and move it to the start of the successor block.
2343 for (unsigned O = 0, E = I0->getNumOperands(); O != E; ++O)
2344 I0->getOperandUse(O).set(NewOperands[O]);
2345
2346 I0->moveBefore(*BBEnd, BBEnd->getFirstInsertionPt());
2347
2348 // Update metadata and IR flags, and merge debug locations.
2349 for (auto *I : Insts)
2350 if (I != I0) {
2351 // The debug location for the "common" instruction is the merged locations
2352 // of all the commoned instructions. We start with the original location
2353 // of the "common" instruction and iteratively merge each location in the
2354 // loop below.
2355 // This is an N-way merge, which will be inefficient if I0 is a CallInst.
2356 // However, as N-way merge for CallInst is rare, so we use simplified API
2357 // instead of using complex API for N-way merge.
2358 I0->applyMergedLocation(I0->getDebugLoc(), I->getDebugLoc());
2359 combineMetadataForCSE(I0, I, true);
2360 I0->andIRFlags(I);
2361 if (auto *CB = dyn_cast<CallBase>(I0)) {
2362 bool Success = CB->tryIntersectAttributes(cast<CallBase>(I));
2363 assert(Success && "We should not be trying to sink callbases "
2364 "with non-intersectable attributes");
2365 // For NDEBUG Compile.
2366 (void)Success;
2367 }
2368 }
2369
2370 for (User *U : make_early_inc_range(I0->users())) {
2371 // canSinkLastInstruction checked that all instructions are only used by
2372 // phi nodes in a way that allows replacing the phi node with the common
2373 // instruction.
2374 auto *PN = cast<PHINode>(U);
2375 PN->replaceAllUsesWith(I0);
2376 PN->eraseFromParent();
2377 }
2378
2379 // Finally nuke all instructions apart from the common instruction.
2380 for (auto *I : Insts) {
2381 if (I == I0)
2382 continue;
2383 // The remaining uses are debug users, replace those with the common inst.
2384 // In most (all?) cases this just introduces a use-before-def.
2385 assert(I->user_empty() && "Inst unexpectedly still has non-dbg users");
2386 I->replaceAllUsesWith(I0);
2387 I->eraseFromParent();
2388 }
2389}
2390
2391/// Check whether BB's predecessors end with unconditional branches. If it is
2392/// true, sink any common code from the predecessors to BB.
2394 DomTreeUpdater *DTU) {
2395 // We support two situations:
2396 // (1) all incoming arcs are unconditional
2397 // (2) there are non-unconditional incoming arcs
2398 //
2399 // (2) is very common in switch defaults and
2400 // else-if patterns;
2401 //
2402 // if (a) f(1);
2403 // else if (b) f(2);
2404 //
2405 // produces:
2406 //
2407 // [if]
2408 // / \
2409 // [f(1)] [if]
2410 // | | \
2411 // | | |
2412 // | [f(2)]|
2413 // \ | /
2414 // [ end ]
2415 //
2416 // [end] has two unconditional predecessor arcs and one conditional. The
2417 // conditional refers to the implicit empty 'else' arc. This conditional
2418 // arc can also be caused by an empty default block in a switch.
2419 //
2420 // In this case, we attempt to sink code from all *unconditional* arcs.
2421 // If we can sink instructions from these arcs (determined during the scan
2422 // phase below) we insert a common successor for all unconditional arcs and
2423 // connect that to [end], to enable sinking:
2424 //
2425 // [if]
2426 // / \
2427 // [x(1)] [if]
2428 // | | \
2429 // | | \
2430 // | [x(2)] |
2431 // \ / |
2432 // [sink.split] |
2433 // \ /
2434 // [ end ]
2435 //
2436 SmallVector<BasicBlock*,4> UnconditionalPreds;
2437 bool HaveNonUnconditionalPredecessors = false;
2438 for (auto *PredBB : predecessors(BB)) {
2439 auto *PredBr = dyn_cast<UncondBrInst>(PredBB->getTerminator());
2440 if (PredBr)
2441 UnconditionalPreds.push_back(PredBB);
2442 else
2443 HaveNonUnconditionalPredecessors = true;
2444 }
2445 if (UnconditionalPreds.size() < 2)
2446 return false;
2447
2448 // We take a two-step approach to tail sinking. First we scan from the end of
2449 // each block upwards in lockstep. If the n'th instruction from the end of each
2450 // block can be sunk, those instructions are added to ValuesToSink and we
2451 // carry on. If we can sink an instruction but need to PHI-merge some operands
2452 // (because they're not identical in each instruction) we add these to
2453 // PHIOperands.
2454 // We prepopulate PHIOperands with the phis that already exist in BB.
2456 for (PHINode &PN : BB->phis()) {
2458 for (const Use &U : PN.incoming_values())
2459 IncomingVals.insert({PN.getIncomingBlock(U), &U});
2460 auto &Ops = PHIOperands[IncomingVals[UnconditionalPreds[0]]];
2461 for (BasicBlock *Pred : UnconditionalPreds)
2462 Ops.push_back(*IncomingVals[Pred]);
2463 }
2464
2465 int ScanIdx = 0;
2466 SmallPtrSet<Value*,4> InstructionsToSink;
2467 LockstepReverseIterator<true> LRI(UnconditionalPreds);
2468 while (LRI.isValid() &&
2469 canSinkInstructions(*LRI, PHIOperands)) {
2470 LLVM_DEBUG(dbgs() << "SINK: instruction can be sunk: " << *(*LRI)[0]
2471 << "\n");
2472 InstructionsToSink.insert_range(*LRI);
2473 ++ScanIdx;
2474 --LRI;
2475 }
2476
2477 // If no instructions can be sunk, early-return.
2478 if (ScanIdx == 0)
2479 return false;
2480
2481 bool followedByDeoptOrUnreachable = IsBlockFollowedByDeoptOrUnreachable(BB);
2482
2483 if (!followedByDeoptOrUnreachable) {
2484 // Check whether this is the pointer operand of a load/store.
2485 auto IsMemOperand = [](Use &U) {
2486 auto *I = cast<Instruction>(U.getUser());
2487 if (isa<LoadInst>(I))
2488 return U.getOperandNo() == LoadInst::getPointerOperandIndex();
2489 if (isa<StoreInst>(I))
2490 return U.getOperandNo() == StoreInst::getPointerOperandIndex();
2491 return false;
2492 };
2493
2494 // Okay, we *could* sink last ScanIdx instructions. But how many can we
2495 // actually sink before encountering instruction that is unprofitable to
2496 // sink?
2497 auto ProfitableToSinkInstruction = [&](LockstepReverseIterator<true> &LRI) {
2498 unsigned NumPHIInsts = 0;
2499 for (Use &U : (*LRI)[0]->operands()) {
2500 auto It = PHIOperands.find(&U);
2501 if (It != PHIOperands.end() && !all_of(It->second, [&](Value *V) {
2502 return InstructionsToSink.contains(V);
2503 })) {
2504 ++NumPHIInsts;
2505 // Do not separate a load/store from the gep producing the address.
2506 // The gep can likely be folded into the load/store as an addressing
2507 // mode. Additionally, a load of a gep is easier to analyze than a
2508 // load of a phi.
2509 if (IsMemOperand(U) &&
2510 any_of(It->second, [](Value *V) { return isa<GEPOperator>(V); }))
2511 return false;
2512 // FIXME: this check is overly optimistic. We may end up not sinking
2513 // said instruction, due to the very same profitability check.
2514 // See @creating_too_many_phis in sink-common-code.ll.
2515 }
2516 }
2517 LLVM_DEBUG(dbgs() << "SINK: #phi insts: " << NumPHIInsts << "\n");
2518 return NumPHIInsts <= 1;
2519 };
2520
2521 // We've determined that we are going to sink last ScanIdx instructions,
2522 // and recorded them in InstructionsToSink. Now, some instructions may be
2523 // unprofitable to sink. But that determination depends on the instructions
2524 // that we are going to sink.
2525
2526 // First, forward scan: find the first instruction unprofitable to sink,
2527 // recording all the ones that are profitable to sink.
2528 // FIXME: would it be better, after we detect that not all are profitable.
2529 // to either record the profitable ones, or erase the unprofitable ones?
2530 // Maybe we need to choose (at runtime) the one that will touch least
2531 // instrs?
2532 LRI.reset();
2533 int Idx = 0;
2534 SmallPtrSet<Value *, 4> InstructionsProfitableToSink;
2535 while (Idx < ScanIdx) {
2536 if (!ProfitableToSinkInstruction(LRI)) {
2537 // Too many PHIs would be created.
2538 LLVM_DEBUG(
2539 dbgs() << "SINK: stopping here, too many PHIs would be created!\n");
2540 break;
2541 }
2542 InstructionsProfitableToSink.insert_range(*LRI);
2543 --LRI;
2544 ++Idx;
2545 }
2546
2547 // If no instructions can be sunk, early-return.
2548 if (Idx == 0)
2549 return false;
2550
2551 // Did we determine that (only) some instructions are unprofitable to sink?
2552 if (Idx < ScanIdx) {
2553 // Okay, some instructions are unprofitable.
2554 ScanIdx = Idx;
2555 InstructionsToSink = InstructionsProfitableToSink;
2556
2557 // But, that may make other instructions unprofitable, too.
2558 // So, do a backward scan, do any earlier instructions become
2559 // unprofitable?
2560 assert(
2561 !ProfitableToSinkInstruction(LRI) &&
2562 "We already know that the last instruction is unprofitable to sink");
2563 ++LRI;
2564 --Idx;
2565 while (Idx >= 0) {
2566 // If we detect that an instruction becomes unprofitable to sink,
2567 // all earlier instructions won't be sunk either,
2568 // so preemptively keep InstructionsProfitableToSink in sync.
2569 // FIXME: is this the most performant approach?
2570 for (auto *I : *LRI)
2571 InstructionsProfitableToSink.erase(I);
2572 if (!ProfitableToSinkInstruction(LRI)) {
2573 // Everything starting with this instruction won't be sunk.
2574 ScanIdx = Idx;
2575 InstructionsToSink = InstructionsProfitableToSink;
2576 }
2577 ++LRI;
2578 --Idx;
2579 }
2580 }
2581
2582 // If no instructions can be sunk, early-return.
2583 if (ScanIdx == 0)
2584 return false;
2585 }
2586
2587 bool Changed = false;
2588
2589 if (HaveNonUnconditionalPredecessors) {
2590 if (!followedByDeoptOrUnreachable) {
2591 // It is always legal to sink common instructions from unconditional
2592 // predecessors. However, if not all predecessors are unconditional,
2593 // this transformation might be pessimizing. So as a rule of thumb,
2594 // don't do it unless we'd sink at least one non-speculatable instruction.
2595 // See https://bugs.llvm.org/show_bug.cgi?id=30244
2596 LRI.reset();
2597 int Idx = 0;
2598 bool Profitable = false;
2599 while (Idx < ScanIdx) {
2600 if (!isSafeToSpeculativelyExecute((*LRI)[0])) {
2601 Profitable = true;
2602 break;
2603 }
2604 --LRI;
2605 ++Idx;
2606 }
2607 if (!Profitable)
2608 return false;
2609 }
2610
2611 LLVM_DEBUG(dbgs() << "SINK: Splitting edge\n");
2612 // We have a conditional edge and we're going to sink some instructions.
2613 // Insert a new block postdominating all blocks we're going to sink from.
2614 if (!SplitBlockPredecessors(BB, UnconditionalPreds, ".sink.split", DTU))
2615 // Edges couldn't be split.
2616 return false;
2617 Changed = true;
2618 }
2619
2620 // Now that we've analyzed all potential sinking candidates, perform the
2621 // actual sink. We iteratively sink the last non-terminator of the source
2622 // blocks into their common successor unless doing so would require too
2623 // many PHI instructions to be generated (currently only one PHI is allowed
2624 // per sunk instruction).
2625 //
2626 // We can use InstructionsToSink to discount values needing PHI-merging that will
2627 // actually be sunk in a later iteration. This allows us to be more
2628 // aggressive in what we sink. This does allow a false positive where we
2629 // sink presuming a later value will also be sunk, but stop half way through
2630 // and never actually sink it which means we produce more PHIs than intended.
2631 // This is unlikely in practice though.
2632 int SinkIdx = 0;
2633 for (; SinkIdx != ScanIdx; ++SinkIdx) {
2634 LLVM_DEBUG(dbgs() << "SINK: Sink: "
2635 << *UnconditionalPreds[0]->getTerminator()->getPrevNode()
2636 << "\n");
2637
2638 // Because we've sunk every instruction in turn, the current instruction to
2639 // sink is always at index 0.
2640 LRI.reset();
2641
2642 sinkLastInstruction(UnconditionalPreds);
2643 NumSinkCommonInstrs++;
2644 Changed = true;
2645 }
2646 if (SinkIdx != 0)
2647 ++NumSinkCommonCode;
2648 return Changed;
2649}
2650
2651namespace {
2652
2653struct CompatibleSets {
2654 using SetTy = SmallVector<InvokeInst *, 2>;
2655
2657
2658 static bool shouldBelongToSameSet(ArrayRef<InvokeInst *> Invokes);
2659
2660 SetTy &getCompatibleSet(InvokeInst *II);
2661
2662 void insert(InvokeInst *II);
2663};
2664
2665CompatibleSets::SetTy &CompatibleSets::getCompatibleSet(InvokeInst *II) {
2666 // Perform a linear scan over all the existing sets, see if the new `invoke`
2667 // is compatible with any particular set. Since we know that all the `invokes`
2668 // within a set are compatible, only check the first `invoke` in each set.
2669 // WARNING: at worst, this has quadratic complexity.
2670 for (CompatibleSets::SetTy &Set : Sets) {
2671 if (CompatibleSets::shouldBelongToSameSet({Set.front(), II}))
2672 return Set;
2673 }
2674
2675 // Otherwise, we either had no sets yet, or this invoke forms a new set.
2676 return Sets.emplace_back();
2677}
2678
2679void CompatibleSets::insert(InvokeInst *II) {
2680 getCompatibleSet(II).emplace_back(II);
2681}
2682
2683bool CompatibleSets::shouldBelongToSameSet(ArrayRef<InvokeInst *> Invokes) {
2684 assert(Invokes.size() == 2 && "Always called with exactly two candidates.");
2685
2686 // Can we theoretically merge these `invoke`s?
2687 auto IsIllegalToMerge = [](InvokeInst *II) {
2688 return II->cannotMerge() || II->isInlineAsm();
2689 };
2690 if (any_of(Invokes, IsIllegalToMerge))
2691 return false;
2692
2693 // Either both `invoke`s must be direct,
2694 // or both `invoke`s must be indirect.
2695 auto IsIndirectCall = [](InvokeInst *II) { return II->isIndirectCall(); };
2696 bool HaveIndirectCalls = any_of(Invokes, IsIndirectCall);
2697 bool AllCallsAreIndirect = all_of(Invokes, IsIndirectCall);
2698 if (HaveIndirectCalls) {
2699 if (!AllCallsAreIndirect)
2700 return false;
2701 } else {
2702 // All callees must be identical.
2703 Value *Callee = nullptr;
2704 for (InvokeInst *II : Invokes) {
2705 Value *CurrCallee = II->getCalledOperand();
2706 assert(CurrCallee && "There is always a called operand.");
2707 if (!Callee)
2708 Callee = CurrCallee;
2709 else if (Callee != CurrCallee)
2710 return false;
2711 }
2712 }
2713
2714 // Either both `invoke`s must not have a normal destination,
2715 // or both `invoke`s must have a normal destination,
2716 auto HasNormalDest = [](InvokeInst *II) {
2717 return !isa<UnreachableInst>(II->getNormalDest()->getFirstNonPHIOrDbg());
2718 };
2719 if (any_of(Invokes, HasNormalDest)) {
2720 // Do not merge `invoke` that does not have a normal destination with one
2721 // that does have a normal destination, even though doing so would be legal.
2722 if (!all_of(Invokes, HasNormalDest))
2723 return false;
2724
2725 // All normal destinations must be identical.
2726 BasicBlock *NormalBB = nullptr;
2727 for (InvokeInst *II : Invokes) {
2728 BasicBlock *CurrNormalBB = II->getNormalDest();
2729 assert(CurrNormalBB && "There is always a 'continue to' basic block.");
2730 if (!NormalBB)
2731 NormalBB = CurrNormalBB;
2732 else if (NormalBB != CurrNormalBB)
2733 return false;
2734 }
2735
2736 // In the normal destination, the incoming values for these two `invoke`s
2737 // must be compatible.
2738 SmallPtrSet<Value *, 16> EquivalenceSet(llvm::from_range, Invokes);
2740 NormalBB, {Invokes[0]->getParent(), Invokes[1]->getParent()},
2741 &EquivalenceSet))
2742 return false;
2743 }
2744
2745#ifndef NDEBUG
2746 // All unwind destinations must be identical.
2747 // We know that because we have started from said unwind destination.
2748 BasicBlock *UnwindBB = nullptr;
2749 for (InvokeInst *II : Invokes) {
2750 BasicBlock *CurrUnwindBB = II->getUnwindDest();
2751 assert(CurrUnwindBB && "There is always an 'unwind to' basic block.");
2752 if (!UnwindBB)
2753 UnwindBB = CurrUnwindBB;
2754 else
2755 assert(UnwindBB == CurrUnwindBB && "Unexpected unwind destination.");
2756 }
2757#endif
2758
2759 // In the unwind destination, the incoming values for these two `invoke`s
2760 // must be compatible.
2762 Invokes.front()->getUnwindDest(),
2763 {Invokes[0]->getParent(), Invokes[1]->getParent()}))
2764 return false;
2765
2766 // Ignoring arguments, these `invoke`s must be identical,
2767 // including operand bundles.
2768 const InvokeInst *II0 = Invokes.front();
2769 for (auto *II : Invokes.drop_front())
2770 if (!II->isSameOperationAs(II0, Instruction::CompareUsingIntersectedAttrs))
2771 return false;
2772
2773 // Can we theoretically form the data operands for the merged `invoke`?
2774 auto IsIllegalToMergeArguments = [](auto Ops) {
2775 Use &U0 = std::get<0>(Ops);
2776 Use &U1 = std::get<1>(Ops);
2777 if (U0 == U1)
2778 return false;
2780 U0.getOperandNo());
2781 };
2782 assert(Invokes.size() == 2 && "Always called with exactly two candidates.");
2783 if (any_of(zip(Invokes[0]->data_ops(), Invokes[1]->data_ops()),
2784 IsIllegalToMergeArguments))
2785 return false;
2786
2787 return true;
2788}
2789
2790} // namespace
2791
2792// Merge all invokes in the provided set, all of which are compatible
2793// as per the `CompatibleSets::shouldBelongToSameSet()`.
2795 DomTreeUpdater *DTU) {
2796 assert(Invokes.size() >= 2 && "Must have at least two invokes to merge.");
2797
2799 if (DTU)
2800 Updates.reserve(2 + 3 * Invokes.size());
2801
2802 bool HasNormalDest =
2803 !isa<UnreachableInst>(Invokes[0]->getNormalDest()->getFirstNonPHIOrDbg());
2804
2805 // Clone one of the invokes into a new basic block.
2806 // Since they are all compatible, it doesn't matter which invoke is cloned.
2807 InvokeInst *MergedInvoke = [&Invokes, HasNormalDest]() {
2808 InvokeInst *II0 = Invokes.front();
2809 BasicBlock *II0BB = II0->getParent();
2810 BasicBlock *InsertBeforeBlock =
2811 II0->getParent()->getIterator()->getNextNode();
2812 Function *Func = II0BB->getParent();
2813 LLVMContext &Ctx = II0->getContext();
2814
2815 BasicBlock *MergedInvokeBB = BasicBlock::Create(
2816 Ctx, II0BB->getName() + ".invoke", Func, InsertBeforeBlock);
2817
2818 auto *MergedInvoke = cast<InvokeInst>(II0->clone());
2819 // NOTE: all invokes have the same attributes, so no handling needed.
2820 MergedInvoke->insertInto(MergedInvokeBB, MergedInvokeBB->end());
2821
2822 if (!HasNormalDest) {
2823 // This set does not have a normal destination,
2824 // so just form a new block with unreachable terminator.
2825 BasicBlock *MergedNormalDest = BasicBlock::Create(
2826 Ctx, II0BB->getName() + ".cont", Func, InsertBeforeBlock);
2827 auto *UI = new UnreachableInst(Ctx, MergedNormalDest);
2828 UI->setDebugLoc(DebugLoc::getTemporary());
2829 MergedInvoke->setNormalDest(MergedNormalDest);
2830 }
2831
2832 // The unwind destination, however, remainds identical for all invokes here.
2833
2834 return MergedInvoke;
2835 }();
2836
2837 if (DTU) {
2838 // Predecessor blocks that contained these invokes will now branch to
2839 // the new block that contains the merged invoke, ...
2840 for (InvokeInst *II : Invokes)
2841 Updates.push_back(
2842 {DominatorTree::Insert, II->getParent(), MergedInvoke->getParent()});
2843
2844 // ... which has the new `unreachable` block as normal destination,
2845 // or unwinds to the (same for all `invoke`s in this set) `landingpad`,
2846 for (BasicBlock *SuccBBOfMergedInvoke : successors(MergedInvoke))
2847 Updates.push_back({DominatorTree::Insert, MergedInvoke->getParent(),
2848 SuccBBOfMergedInvoke});
2849
2850 // Since predecessor blocks now unconditionally branch to a new block,
2851 // they no longer branch to their original successors.
2852 for (InvokeInst *II : Invokes)
2853 for (BasicBlock *SuccOfPredBB : successors(II->getParent()))
2854 Updates.push_back(
2855 {DominatorTree::Delete, II->getParent(), SuccOfPredBB});
2856 }
2857
2858 bool IsIndirectCall = Invokes[0]->isIndirectCall();
2859
2860 // Form the merged operands for the merged invoke.
2861 for (Use &U : MergedInvoke->operands()) {
2862 // Only PHI together the indirect callees and data operands.
2863 if (MergedInvoke->isCallee(&U)) {
2864 if (!IsIndirectCall)
2865 continue;
2866 } else if (!MergedInvoke->isDataOperand(&U))
2867 continue;
2868
2869 // Don't create trivial PHI's with all-identical incoming values.
2870 bool NeedPHI = any_of(Invokes, [&U](InvokeInst *II) {
2871 return II->getOperand(U.getOperandNo()) != U.get();
2872 });
2873 if (!NeedPHI)
2874 continue;
2875
2876 // Form a PHI out of all the data ops under this index.
2878 U->getType(), /*NumReservedValues=*/Invokes.size(), "", MergedInvoke->getIterator());
2879 for (InvokeInst *II : Invokes)
2880 PN->addIncoming(II->getOperand(U.getOperandNo()), II->getParent());
2881
2882 U.set(PN);
2883 }
2884
2885 // We've ensured that each PHI node has compatible (identical) incoming values
2886 // when coming from each of the `invoke`s in the current merge set,
2887 // so update the PHI nodes accordingly.
2888 for (BasicBlock *Succ : successors(MergedInvoke))
2889 addPredecessorToBlock(Succ, /*NewPred=*/MergedInvoke->getParent(),
2890 /*ExistPred=*/Invokes.front()->getParent());
2891
2892 // And finally, replace the original `invoke`s with an unconditional branch
2893 // to the block with the merged `invoke`. Also, give that merged `invoke`
2894 // the merged debugloc of all the original `invoke`s.
2895 DILocation *MergedDebugLoc = nullptr;
2896 for (InvokeInst *II : Invokes) {
2897 // Compute the debug location common to all the original `invoke`s.
2898 if (!MergedDebugLoc)
2899 MergedDebugLoc = II->getDebugLoc();
2900 else
2901 MergedDebugLoc =
2902 DebugLoc::getMergedLocation(MergedDebugLoc, II->getDebugLoc());
2903
2904 // And replace the old `invoke` with an unconditionally branch
2905 // to the block with the merged `invoke`.
2906 for (BasicBlock *OrigSuccBB : successors(II->getParent()))
2907 OrigSuccBB->removePredecessor(II->getParent());
2908 auto *BI = UncondBrInst::Create(MergedInvoke->getParent(), II->getParent());
2909 // The unconditional branch is part of the replacement for the original
2910 // invoke, so should use its DebugLoc.
2911 BI->setDebugLoc(II->getDebugLoc());
2912 bool Success = MergedInvoke->tryIntersectAttributes(II);
2913 assert(Success && "Merged invokes with incompatible attributes");
2914 // For NDEBUG Compile
2915 (void)Success;
2916 II->replaceAllUsesWith(MergedInvoke);
2917 II->eraseFromParent();
2918 ++NumInvokesMerged;
2919 }
2920 MergedInvoke->setDebugLoc(MergedDebugLoc);
2921 ++NumInvokeSetsFormed;
2922
2923 if (DTU)
2924 DTU->applyUpdates(Updates);
2925}
2926
2927/// If this block is a `landingpad` exception handling block, categorize all
2928/// the predecessor `invoke`s into sets, with all `invoke`s in each set
2929/// being "mergeable" together, and then merge invokes in each set together.
2930///
2931/// This is a weird mix of hoisting and sinking. Visually, it goes from:
2932/// [...] [...]
2933/// | |
2934/// [invoke0] [invoke1]
2935/// / \ / \
2936/// [cont0] [landingpad] [cont1]
2937/// to:
2938/// [...] [...]
2939/// \ /
2940/// [invoke]
2941/// / \
2942/// [cont] [landingpad]
2943///
2944/// But of course we can only do that if the invokes share the `landingpad`,
2945/// edges invoke0->cont0 and invoke1->cont1 are "compatible",
2946/// and the invoked functions are "compatible".
2949 return false;
2950
2951 bool Changed = false;
2952
2953 // FIXME: generalize to all exception handling blocks?
2954 if (!BB->isLandingPad())
2955 return Changed;
2956
2957 CompatibleSets Grouper;
2958
2959 // Record all the predecessors of this `landingpad`. As per verifier,
2960 // the only allowed predecessor is the unwind edge of an `invoke`.
2961 // We want to group "compatible" `invokes` into the same set to be merged.
2962 for (BasicBlock *PredBB : predecessors(BB))
2963 Grouper.insert(cast<InvokeInst>(PredBB->getTerminator()));
2964
2965 // And now, merge `invoke`s that were grouped togeter.
2966 for (ArrayRef<InvokeInst *> Invokes : Grouper.Sets) {
2967 if (Invokes.size() < 2)
2968 continue;
2969 Changed = true;
2970 mergeCompatibleInvokesImpl(Invokes, DTU);
2971 }
2972
2973 return Changed;
2974}
2975
2976namespace {
2977/// Track ephemeral values, which should be ignored for cost-modelling
2978/// purposes. Requires walking instructions in reverse order.
2979class EphemeralValueTracker {
2980 SmallPtrSet<const Instruction *, 32> EphValues;
2981
2982 bool isEphemeral(const Instruction *I) {
2983 if (isa<AssumeInst>(I))
2984 return true;
2985 return !I->mayHaveSideEffects() && !I->isTerminator() &&
2986 all_of(I->users(), [&](const User *U) {
2987 return EphValues.count(cast<Instruction>(U));
2988 });
2989 }
2990
2991public:
2992 bool track(const Instruction *I) {
2993 if (isEphemeral(I)) {
2994 EphValues.insert(I);
2995 return true;
2996 }
2997 return false;
2998 }
2999
3000 bool contains(const Instruction *I) const { return EphValues.contains(I); }
3001};
3002} // namespace
3003
3004/// Determine if we can hoist sink a sole store instruction out of a
3005/// conditional block.
3006///
3007/// We are looking for code like the following:
3008/// BrBB:
3009/// store i32 %add, i32* %arrayidx2
3010/// ... // No other stores or function calls (we could be calling a memory
3011/// ... // function).
3012/// %cmp = icmp ult %x, %y
3013/// br i1 %cmp, label %EndBB, label %ThenBB
3014/// ThenBB:
3015/// store i32 %add5, i32* %arrayidx2
3016/// br label EndBB
3017/// EndBB:
3018/// ...
3019/// We are going to transform this into:
3020/// BrBB:
3021/// store i32 %add, i32* %arrayidx2
3022/// ... //
3023/// %cmp = icmp ult %x, %y
3024/// %add.add5 = select i1 %cmp, i32 %add, %add5
3025/// store i32 %add.add5, i32* %arrayidx2
3026/// ...
3027///
3028/// \return The pointer to the value of the previous store if the store can be
3029/// hoisted into the predecessor block. 0 otherwise.
3031 BasicBlock *StoreBB, BasicBlock *EndBB) {
3032 StoreInst *StoreToHoist = dyn_cast<StoreInst>(I);
3033 if (!StoreToHoist)
3034 return nullptr;
3035
3036 // Volatile or atomic.
3037 if (!StoreToHoist->isSimple())
3038 return nullptr;
3039
3040 Value *StorePtr = StoreToHoist->getPointerOperand();
3041 Type *StoreTy = StoreToHoist->getValueOperand()->getType();
3042
3043 // Look for a store to the same pointer in BrBB.
3044 unsigned MaxNumInstToLookAt = 9;
3045 // Skip pseudo probe intrinsic calls which are not really killing any memory
3046 // accesses.
3047 for (Instruction &CurI : reverse(*BrBB)) {
3048 if (!MaxNumInstToLookAt)
3049 break;
3050 --MaxNumInstToLookAt;
3051
3052 if (isa<PseudoProbeInst>(CurI))
3053 continue;
3054
3055 // Could be calling an instruction that affects memory like free().
3056 if (CurI.mayWriteToMemory() && !isa<StoreInst>(CurI))
3057 return nullptr;
3058
3059 if (auto *SI = dyn_cast<StoreInst>(&CurI)) {
3060 // Found the previous store to same location and type. Make sure it is
3061 // simple, to avoid introducing a spurious non-atomic write after an
3062 // atomic write.
3063 if (SI->getPointerOperand() == StorePtr &&
3064 SI->getValueOperand()->getType() == StoreTy && SI->isSimple() &&
3065 SI->getAlign() >= StoreToHoist->getAlign())
3066 // Found the previous store, return its value operand.
3067 return SI->getValueOperand();
3068 return nullptr; // Unknown store.
3069 }
3070
3071 if (auto *LI = dyn_cast<LoadInst>(&CurI)) {
3072 if (LI->getPointerOperand() == StorePtr && LI->getType() == StoreTy &&
3073 LI->isSimple() && LI->getAlign() >= StoreToHoist->getAlign()) {
3074 Value *Obj = getUnderlyingObject(StorePtr);
3075 bool ExplicitlyDereferenceableOnly;
3076 if (isWritableObject(Obj, ExplicitlyDereferenceableOnly) &&
3078 PointerMayBeCaptured(Obj, /*ReturnCaptures=*/false,
3080 (!ExplicitlyDereferenceableOnly ||
3081 isDereferenceablePointer(StorePtr, StoreTy,
3082 LI->getDataLayout()))) {
3083 // Found a previous load, return it.
3084 return LI;
3085 }
3086 }
3087 // The load didn't work out, but we may still find a store.
3088 }
3089 }
3090
3091 return nullptr;
3092}
3093
3094/// Estimate the cost of the insertion(s) and check that the PHI nodes can be
3095/// converted to selects.
3097 BasicBlock *EndBB,
3098 unsigned &SpeculatedInstructions,
3099 InstructionCost &Cost,
3100 const TargetTransformInfo &TTI) {
3102 BB->getParent()->hasMinSize()
3105
3106 bool HaveRewritablePHIs = false;
3107 for (PHINode &PN : EndBB->phis()) {
3108 Value *OrigV = PN.getIncomingValueForBlock(BB);
3109 Value *ThenV = PN.getIncomingValueForBlock(ThenBB);
3110
3111 // FIXME: Try to remove some of the duplication with
3112 // hoistCommonCodeFromSuccessors. Skip PHIs which are trivial.
3113 if (ThenV == OrigV)
3114 continue;
3115
3116 Cost += TTI.getCmpSelInstrCost(Instruction::Select, PN.getType(),
3117 CmpInst::makeCmpResultType(PN.getType()),
3119
3120 // Don't convert to selects if we could remove undefined behavior instead.
3121 if (passingValueIsAlwaysUndefined(OrigV, &PN) ||
3123 return false;
3124
3125 HaveRewritablePHIs = true;
3126 ConstantExpr *OrigCE = dyn_cast<ConstantExpr>(OrigV);
3127 ConstantExpr *ThenCE = dyn_cast<ConstantExpr>(ThenV);
3128 if (!OrigCE && !ThenCE)
3129 continue; // Known cheap (FIXME: Maybe not true for aggregates).
3130
3131 InstructionCost OrigCost = OrigCE ? computeSpeculationCost(OrigCE, TTI) : 0;
3132 InstructionCost ThenCost = ThenCE ? computeSpeculationCost(ThenCE, TTI) : 0;
3133 InstructionCost MaxCost =
3135 if (OrigCost + ThenCost > MaxCost)
3136 return false;
3137
3138 // Account for the cost of an unfolded ConstantExpr which could end up
3139 // getting expanded into Instructions.
3140 // FIXME: This doesn't account for how many operations are combined in the
3141 // constant expression.
3142 ++SpeculatedInstructions;
3143 if (SpeculatedInstructions > 1)
3144 return false;
3145 }
3146
3147 return HaveRewritablePHIs;
3148}
3149
3151 std::optional<bool> Invert,
3152 const TargetTransformInfo &TTI) {
3153 // If the branch is non-unpredictable, and is predicted to *not* branch to
3154 // the `then` block, then avoid speculating it.
3155 if (BI->getMetadata(LLVMContext::MD_unpredictable))
3156 return true;
3157
3158 uint64_t TWeight, FWeight;
3159 if (!extractBranchWeights(*BI, TWeight, FWeight) || (TWeight + FWeight) == 0)
3160 return true;
3161
3162 if (!Invert.has_value())
3163 return false;
3164
3165 uint64_t EndWeight = *Invert ? TWeight : FWeight;
3166 BranchProbability BIEndProb =
3167 BranchProbability::getBranchProbability(EndWeight, TWeight + FWeight);
3168 BranchProbability Likely = TTI.getPredictableBranchThreshold();
3169 return BIEndProb < Likely;
3170}
3171
3172/// Speculate a conditional basic block flattening the CFG.
3173///
3174/// Note that this is a very risky transform currently. Speculating
3175/// instructions like this is most often not desirable. Instead, there is an MI
3176/// pass which can do it with full awareness of the resource constraints.
3177/// However, some cases are "obvious" and we should do directly. An example of
3178/// this is speculating a single, reasonably cheap instruction.
3179///
3180/// There is only one distinct advantage to flattening the CFG at the IR level:
3181/// it makes very common but simplistic optimizations such as are common in
3182/// instcombine and the DAG combiner more powerful by removing CFG edges and
3183/// modeling their effects with easier to reason about SSA value graphs.
3184///
3185///
3186/// An illustration of this transform is turning this IR:
3187/// \code
3188/// BB:
3189/// %cmp = icmp ult %x, %y
3190/// br i1 %cmp, label %EndBB, label %ThenBB
3191/// ThenBB:
3192/// %sub = sub %x, %y
3193/// br label BB2
3194/// EndBB:
3195/// %phi = phi [ %sub, %ThenBB ], [ 0, %BB ]
3196/// ...
3197/// \endcode
3198///
3199/// Into this IR:
3200/// \code
3201/// BB:
3202/// %cmp = icmp ult %x, %y
3203/// %sub = sub %x, %y
3204/// %cond = select i1 %cmp, 0, %sub
3205/// ...
3206/// \endcode
3207///
3208/// \returns true if the conditional block is removed.
3209bool SimplifyCFGOpt::speculativelyExecuteBB(CondBrInst *BI,
3210 BasicBlock *ThenBB) {
3211 if (!Options.SpeculateBlocks)
3212 return false;
3213
3214 // Be conservative for now. FP select instruction can often be expensive.
3215 Value *BrCond = BI->getCondition();
3216 if (isa<FCmpInst>(BrCond))
3217 return false;
3218
3219 BasicBlock *BB = BI->getParent();
3220 BasicBlock *EndBB = ThenBB->getTerminator()->getSuccessor(0);
3221 InstructionCost Budget =
3223
3224 // If ThenBB is actually on the false edge of the conditional branch, remember
3225 // to swap the select operands later.
3226 bool Invert = false;
3227 if (ThenBB != BI->getSuccessor(0)) {
3228 assert(ThenBB == BI->getSuccessor(1) && "No edge from 'if' block?");
3229 Invert = true;
3230 }
3231 assert(EndBB == BI->getSuccessor(!Invert) && "No edge from to end block");
3232
3233 if (!isProfitableToSpeculate(BI, Invert, TTI))
3234 return false;
3235
3236 // Keep a count of how many times instructions are used within ThenBB when
3237 // they are candidates for sinking into ThenBB. Specifically:
3238 // - They are defined in BB, and
3239 // - They have no side effects, and
3240 // - All of their uses are in ThenBB.
3241 SmallDenseMap<Instruction *, unsigned, 4> SinkCandidateUseCounts;
3242
3243 SmallVector<Instruction *, 4> SpeculatedPseudoProbes;
3244
3245 unsigned SpeculatedInstructions = 0;
3246 bool HoistLoadsStores = Options.HoistLoadsStoresWithCondFaulting;
3247 SmallVector<Instruction *, 2> SpeculatedConditionalLoadsStores;
3248 Value *SpeculatedStoreValue = nullptr;
3249 StoreInst *SpeculatedStore = nullptr;
3250 EphemeralValueTracker EphTracker;
3251 for (Instruction &I : reverse(drop_end(*ThenBB))) {
3252 // Skip pseudo probes. The consequence is we lose track of the branch
3253 // probability for ThenBB, which is fine since the optimization here takes
3254 // place regardless of the branch probability.
3255 if (isa<PseudoProbeInst>(I)) {
3256 // The probe should be deleted so that it will not be over-counted when
3257 // the samples collected on the non-conditional path are counted towards
3258 // the conditional path. We leave it for the counts inference algorithm to
3259 // figure out a proper count for an unknown probe.
3260 SpeculatedPseudoProbes.push_back(&I);
3261 continue;
3262 }
3263
3264 // Ignore ephemeral values, they will be dropped by the transform.
3265 if (EphTracker.track(&I))
3266 continue;
3267
3268 // Only speculatively execute a single instruction (not counting the
3269 // terminator) for now.
3270 bool IsSafeCheapLoadStore = HoistLoadsStores &&
3272 SpeculatedConditionalLoadsStores.size() <
3274 // Not count load/store into cost if target supports conditional faulting
3275 // b/c it's cheap to speculate it.
3276 if (IsSafeCheapLoadStore)
3277 SpeculatedConditionalLoadsStores.push_back(&I);
3278 else
3279 ++SpeculatedInstructions;
3280
3281 if (SpeculatedInstructions > 1)
3282 return false;
3283
3284 // Don't hoist the instruction if it's unsafe or expensive.
3285 if (!IsSafeCheapLoadStore &&
3287 !(HoistCondStores && !SpeculatedStoreValue &&
3288 (SpeculatedStoreValue =
3289 isSafeToSpeculateStore(&I, BB, ThenBB, EndBB))))
3290 return false;
3291 if (!IsSafeCheapLoadStore && !SpeculatedStoreValue &&
3294 return false;
3295
3296 // Store the store speculation candidate.
3297 if (!SpeculatedStore && SpeculatedStoreValue)
3298 SpeculatedStore = cast<StoreInst>(&I);
3299
3300 // Do not hoist the instruction if any of its operands are defined but not
3301 // used in BB. The transformation will prevent the operand from
3302 // being sunk into the use block.
3303 for (Use &Op : I.operands()) {
3305 if (!OpI || OpI->getParent() != BB || OpI->mayHaveSideEffects())
3306 continue; // Not a candidate for sinking.
3307
3308 ++SinkCandidateUseCounts[OpI];
3309 }
3310 }
3311
3312 // Consider any sink candidates which are only used in ThenBB as costs for
3313 // speculation. Note, while we iterate over a DenseMap here, we are summing
3314 // and so iteration order isn't significant.
3315 for (const auto &[Inst, Count] : SinkCandidateUseCounts)
3316 if (Inst->hasNUses(Count)) {
3317 ++SpeculatedInstructions;
3318 if (SpeculatedInstructions > 1)
3319 return false;
3320 }
3321
3322 // Check that we can insert the selects and that it's not too expensive to do
3323 // so.
3324 bool Convert =
3325 SpeculatedStore != nullptr || !SpeculatedConditionalLoadsStores.empty();
3327 Convert |= validateAndCostRequiredSelects(BB, ThenBB, EndBB,
3328 SpeculatedInstructions, Cost, TTI);
3329 if (!Convert || Cost > Budget)
3330 return false;
3331
3332 // If we get here, we can hoist the instruction and if-convert.
3333 LLVM_DEBUG(dbgs() << "SPECULATIVELY EXECUTING BB" << *ThenBB << "\n";);
3334
3335 Instruction *Sel = nullptr;
3336 // Insert a select of the value of the speculated store.
3337 if (SpeculatedStoreValue) {
3338 IRBuilder<NoFolder> Builder(BI);
3339 Value *OrigV = SpeculatedStore->getValueOperand();
3340 Value *TrueV = SpeculatedStore->getValueOperand();
3341 Value *FalseV = SpeculatedStoreValue;
3342 if (Invert)
3343 std::swap(TrueV, FalseV);
3344 Value *S = Builder.CreateSelect(
3345 BrCond, TrueV, FalseV, "spec.store.select", BI);
3346 Sel = cast<Instruction>(S);
3347 SpeculatedStore->setOperand(0, S);
3348 SpeculatedStore->applyMergedLocation(BI->getDebugLoc(),
3349 SpeculatedStore->getDebugLoc());
3350 // The value stored is still conditional, but the store itself is now
3351 // unconditonally executed, so we must be sure that any linked dbg.assign
3352 // intrinsics are tracking the new stored value (the result of the
3353 // select). If we don't, and the store were to be removed by another pass
3354 // (e.g. DSE), then we'd eventually end up emitting a location describing
3355 // the conditional value, unconditionally.
3356 //
3357 // === Before this transformation ===
3358 // pred:
3359 // store %one, %x.dest, !DIAssignID !1
3360 // dbg.assign %one, "x", ..., !1, ...
3361 // br %cond if.then
3362 //
3363 // if.then:
3364 // store %two, %x.dest, !DIAssignID !2
3365 // dbg.assign %two, "x", ..., !2, ...
3366 //
3367 // === After this transformation ===
3368 // pred:
3369 // store %one, %x.dest, !DIAssignID !1
3370 // dbg.assign %one, "x", ..., !1
3371 /// ...
3372 // %merge = select %cond, %two, %one
3373 // store %merge, %x.dest, !DIAssignID !2
3374 // dbg.assign %merge, "x", ..., !2
3375 for (DbgVariableRecord *DbgAssign :
3376 at::getDVRAssignmentMarkers(SpeculatedStore))
3377 if (llvm::is_contained(DbgAssign->location_ops(), OrigV))
3378 DbgAssign->replaceVariableLocationOp(OrigV, S);
3379 }
3380
3381 // Metadata can be dependent on the condition we are hoisting above.
3382 // Strip all UB-implying metadata on the instruction. Drop the debug loc
3383 // to avoid making it appear as if the condition is a constant, which would
3384 // be misleading while debugging.
3385 // Similarly strip attributes that maybe dependent on condition we are
3386 // hoisting above.
3387 for (auto &I : make_early_inc_range(*ThenBB)) {
3388 if (!SpeculatedStoreValue || &I != SpeculatedStore) {
3389 I.dropLocation();
3390 }
3391 I.dropUBImplyingAttrsAndMetadata();
3392
3393 // Drop ephemeral values.
3394 if (EphTracker.contains(&I)) {
3395 I.replaceAllUsesWith(PoisonValue::get(I.getType()));
3396 I.eraseFromParent();
3397 }
3398 }
3399
3400 // Hoist the instructions.
3401 // Drop DbgVariableRecords attached to these instructions.
3402 for (auto &It : *ThenBB)
3403 for (DbgRecord &DR : make_early_inc_range(It.getDbgRecordRange()))
3404 // Drop all records except assign-kind DbgVariableRecords (dbg.assign
3405 // equivalent).
3406 if (DbgVariableRecord *DVR = dyn_cast<DbgVariableRecord>(&DR);
3407 !DVR || !DVR->isDbgAssign())
3408 It.dropOneDbgRecord(&DR);
3409 BB->splice(BI->getIterator(), ThenBB, ThenBB->begin(),
3410 std::prev(ThenBB->end()));
3411
3412 if (!SpeculatedConditionalLoadsStores.empty())
3413 hoistConditionalLoadsStores(BI, SpeculatedConditionalLoadsStores, Invert,
3414 Sel);
3415
3416 // Insert selects and rewrite the PHI operands.
3417 IRBuilder<NoFolder> Builder(BI);
3418 for (PHINode &PN : EndBB->phis()) {
3419 unsigned OrigI = PN.getBasicBlockIndex(BB);
3420 unsigned ThenI = PN.getBasicBlockIndex(ThenBB);
3421 Value *OrigV = PN.getIncomingValue(OrigI);
3422 Value *ThenV = PN.getIncomingValue(ThenI);
3423
3424 // Skip PHIs which are trivial.
3425 if (OrigV == ThenV)
3426 continue;
3427
3428 // Create a select whose true value is the speculatively executed value and
3429 // false value is the pre-existing value. Swap them if the branch
3430 // destinations were inverted.
3431 Value *TrueV = ThenV, *FalseV = OrigV;
3432 if (Invert)
3433 std::swap(TrueV, FalseV);
3434 Value *V = Builder.CreateSelect(BrCond, TrueV, FalseV, "spec.select", BI);
3435 PN.setIncomingValue(OrigI, V);
3436 PN.setIncomingValue(ThenI, V);
3437 }
3438
3439 // Remove speculated pseudo probes.
3440 for (Instruction *I : SpeculatedPseudoProbes)
3441 I->eraseFromParent();
3442
3443 ++NumSpeculations;
3444 return true;
3445}
3446
3448
3449// Return false if number of blocks searched is too much.
3450static bool findReaching(BasicBlock *BB, BasicBlock *DefBB,
3451 BlocksSet &ReachesNonLocalUses) {
3452 if (BB == DefBB)
3453 return true;
3454 if (!ReachesNonLocalUses.insert(BB).second)
3455 return true;
3456
3457 if (ReachesNonLocalUses.size() > MaxJumpThreadingLiveBlocks)
3458 return false;
3459 for (BasicBlock *Pred : predecessors(BB))
3460 if (!findReaching(Pred, DefBB, ReachesNonLocalUses))
3461 return false;
3462 return true;
3463}
3464
3465/// Return true if we can thread a branch across this block.
3467 BlocksSet &NonLocalUseBlocks) {
3468 int Size = 0;
3469 EphemeralValueTracker EphTracker;
3470
3471 // Walk the loop in reverse so that we can identify ephemeral values properly
3472 // (values only feeding assumes).
3473 for (Instruction &I : reverse(*BB)) {
3474 // Can't fold blocks that contain noduplicate or convergent calls.
3475 if (CallInst *CI = dyn_cast<CallInst>(&I))
3476 if (CI->cannotDuplicate() || CI->isConvergent())
3477 return false;
3478
3479 // Ignore ephemeral values which are deleted during codegen.
3480 // We will delete Phis while threading, so Phis should not be accounted in
3481 // block's size.
3482 if (!EphTracker.track(&I) && !isa<PHINode>(I)) {
3483 if (Size++ > MaxSmallBlockSize)
3484 return false; // Don't clone large BB's.
3485 }
3486
3487 // Record blocks with non-local uses of values defined in the current basic
3488 // block.
3489 for (User *U : I.users()) {
3491 BasicBlock *UsedInBB = UI->getParent();
3492 if (UsedInBB == BB) {
3493 if (isa<PHINode>(UI))
3494 return false;
3495 } else
3496 NonLocalUseBlocks.insert(UsedInBB);
3497 }
3498
3499 // Looks ok, continue checking.
3500 }
3501
3502 return true;
3503}
3504
3506 BasicBlock *To) {
3507 // Don't look past the block defining the value, we might get the value from
3508 // a previous loop iteration.
3509 auto *I = dyn_cast<Instruction>(V);
3510 if (I && I->getParent() == To)
3511 return nullptr;
3512
3513 // We know the value if the From block branches on it.
3514 auto *BI = dyn_cast<CondBrInst>(From->getTerminator());
3515 if (BI && BI->getCondition() == V &&
3516 BI->getSuccessor(0) != BI->getSuccessor(1))
3517 return BI->getSuccessor(0) == To ? ConstantInt::getTrue(BI->getContext())
3519
3520 return nullptr;
3521}
3522
3523/// If we have a conditional branch on something for which we know the constant
3524/// value in predecessors (e.g. a phi node in the current block), thread edges
3525/// from the predecessor to their ultimate destination.
3526static std::optional<bool>
3528 const DataLayout &DL,
3529 AssumptionCache *AC) {
3531 BasicBlock *BB = BI->getParent();
3532 Value *Cond = BI->getCondition();
3534 if (PN && PN->getParent() == BB) {
3535 // Degenerate case of a single entry PHI.
3536 if (PN->getNumIncomingValues() == 1) {
3538 return true;
3539 }
3540
3541 for (Use &U : PN->incoming_values())
3542 if (auto *CB = dyn_cast<ConstantInt>(U))
3543 KnownValues[CB].insert(PN->getIncomingBlock(U));
3544 } else {
3545 for (BasicBlock *Pred : predecessors(BB)) {
3546 if (ConstantInt *CB = getKnownValueOnEdge(Cond, Pred, BB))
3547 KnownValues[CB].insert(Pred);
3548 }
3549 }
3550
3551 if (KnownValues.empty())
3552 return false;
3553
3554 // Now we know that this block has multiple preds and two succs.
3555 // Check that the block is small enough and record which non-local blocks use
3556 // values defined in the block.
3557
3558 BlocksSet NonLocalUseBlocks;
3559 BlocksSet ReachesNonLocalUseBlocks;
3560 if (!blockIsSimpleEnoughToThreadThrough(BB, NonLocalUseBlocks))
3561 return false;
3562
3563 // Jump-threading can only be done to destinations where no values defined
3564 // in BB are live.
3565
3566 // Quickly check if both destinations have uses. If so, jump-threading cannot
3567 // be done.
3568 if (NonLocalUseBlocks.contains(BI->getSuccessor(0)) &&
3569 NonLocalUseBlocks.contains(BI->getSuccessor(1)))
3570 return false;
3571
3572 // Search backward from NonLocalUseBlocks to find which blocks
3573 // reach non-local uses.
3574 for (BasicBlock *UseBB : NonLocalUseBlocks)
3575 // Give up if too many blocks are searched.
3576 if (!findReaching(UseBB, BB, ReachesNonLocalUseBlocks))
3577 return false;
3578
3579 for (const auto &Pair : KnownValues) {
3580 ConstantInt *CB = Pair.first;
3581 ArrayRef<BasicBlock *> PredBBs = Pair.second.getArrayRef();
3582 BasicBlock *RealDest = BI->getSuccessor(!CB->getZExtValue());
3583
3584 // Okay, we now know that all edges from PredBB should be revectored to
3585 // branch to RealDest.
3586 if (RealDest == BB)
3587 continue; // Skip self loops.
3588
3589 // Skip if the predecessor's terminator is an indirect branch.
3590 if (any_of(PredBBs, [](BasicBlock *PredBB) {
3591 return isa<IndirectBrInst>(PredBB->getTerminator());
3592 }))
3593 continue;
3594
3595 // Only revector to RealDest if no values defined in BB are live.
3596 if (ReachesNonLocalUseBlocks.contains(RealDest))
3597 continue;
3598
3599 LLVM_DEBUG({
3600 dbgs() << "Condition " << *Cond << " in " << BB->getName()
3601 << " has value " << *Pair.first << " in predecessors:\n";
3602 for (const BasicBlock *PredBB : Pair.second)
3603 dbgs() << " " << PredBB->getName() << "\n";
3604 dbgs() << "Threading to destination " << RealDest->getName() << ".\n";
3605 });
3606
3607 // Split the predecessors we are threading into a new edge block. We'll
3608 // clone the instructions into this block, and then redirect it to RealDest.
3609 BasicBlock *EdgeBB = SplitBlockPredecessors(BB, PredBBs, ".critedge", DTU);
3610 if (!EdgeBB)
3611 continue;
3612
3613 // TODO: These just exist to reduce test diff, we can drop them if we like.
3614 EdgeBB->setName(RealDest->getName() + ".critedge");
3615 EdgeBB->moveBefore(RealDest);
3616
3617 // Update PHI nodes.
3618 addPredecessorToBlock(RealDest, EdgeBB, BB);
3619
3620 // BB may have instructions that are being threaded over. Clone these
3621 // instructions into EdgeBB. We know that there will be no uses of the
3622 // cloned instructions outside of EdgeBB.
3623 BasicBlock::iterator InsertPt = EdgeBB->getFirstInsertionPt();
3624 ValueToValueMapTy TranslateMap; // Track translated values.
3625 TranslateMap[Cond] = CB;
3626
3627 // RemoveDIs: track instructions that we optimise away while folding, so
3628 // that we can copy DbgVariableRecords from them later.
3629 BasicBlock::iterator SrcDbgCursor = BB->begin();
3630 for (BasicBlock::iterator BBI = BB->begin(); &*BBI != BI; ++BBI) {
3631 if (PHINode *PN = dyn_cast<PHINode>(BBI)) {
3632 TranslateMap[PN] = PN->getIncomingValueForBlock(EdgeBB);
3633 continue;
3634 }
3635 // Clone the instruction.
3636 Instruction *N = BBI->clone();
3637 // Insert the new instruction into its new home.
3638 N->insertInto(EdgeBB, InsertPt);
3639
3640 if (BBI->hasName())
3641 N->setName(BBI->getName() + ".c");
3642
3643 // Update operands due to translation.
3644 // Key Instructions: Remap all the atom groups.
3645 if (const DebugLoc &DL = BBI->getDebugLoc())
3646 mapAtomInstance(DL, TranslateMap);
3647 RemapInstruction(N, TranslateMap,
3649
3650 // Check for trivial simplification.
3651 if (Value *V = simplifyInstruction(N, {DL, nullptr, nullptr, AC})) {
3652 if (!BBI->use_empty())
3653 TranslateMap[&*BBI] = V;
3654 if (!N->mayHaveSideEffects()) {
3655 N->eraseFromParent(); // Instruction folded away, don't need actual
3656 // inst
3657 N = nullptr;
3658 }
3659 } else {
3660 if (!BBI->use_empty())
3661 TranslateMap[&*BBI] = N;
3662 }
3663 if (N) {
3664 // Copy all debug-info attached to instructions from the last we
3665 // successfully clone, up to this instruction (they might have been
3666 // folded away).
3667 for (; SrcDbgCursor != BBI; ++SrcDbgCursor)
3668 N->cloneDebugInfoFrom(&*SrcDbgCursor);
3669 SrcDbgCursor = std::next(BBI);
3670 // Clone debug-info on this instruction too.
3671 N->cloneDebugInfoFrom(&*BBI);
3672
3673 // Register the new instruction with the assumption cache if necessary.
3674 if (auto *Assume = dyn_cast<AssumeInst>(N))
3675 if (AC)
3676 AC->registerAssumption(Assume);
3677 }
3678 }
3679
3680 for (; &*SrcDbgCursor != BI; ++SrcDbgCursor)
3681 InsertPt->cloneDebugInfoFrom(&*SrcDbgCursor);
3682 InsertPt->cloneDebugInfoFrom(BI);
3683
3684 BB->removePredecessor(EdgeBB);
3685 UncondBrInst *EdgeBI = cast<UncondBrInst>(EdgeBB->getTerminator());
3686 EdgeBI->setSuccessor(0, RealDest);
3687 EdgeBI->setDebugLoc(BI->getDebugLoc());
3688
3689 if (DTU) {
3691 Updates.push_back({DominatorTree::Delete, EdgeBB, BB});
3692 Updates.push_back({DominatorTree::Insert, EdgeBB, RealDest});
3693 DTU->applyUpdates(Updates);
3694 }
3695
3696 // For simplicity, we created a separate basic block for the edge. Merge
3697 // it back into the predecessor if possible. This not only avoids
3698 // unnecessary SimplifyCFG iterations, but also makes sure that we don't
3699 // bypass the check for trivial cycles above.
3700 MergeBlockIntoPredecessor(EdgeBB, DTU);
3701
3702 // Signal repeat, simplifying any other constants.
3703 return std::nullopt;
3704 }
3705
3706 return false;
3707}
3708
3709bool SimplifyCFGOpt::foldCondBranchOnValueKnownInPredecessor(CondBrInst *BI) {
3710 // Note: If BB is a loop header then there is a risk that threading introduces
3711 // a non-canonical loop by moving a back edge. So we avoid this optimization
3712 // for loop headers if NeedCanonicalLoop is set.
3713 if (Options.NeedCanonicalLoop && is_contained(LoopHeaders, BI->getParent()))
3714 return false;
3715
3716 std::optional<bool> Result;
3717 bool EverChanged = false;
3718 do {
3719 // Note that None means "we changed things, but recurse further."
3720 Result =
3722 EverChanged |= Result == std::nullopt || *Result;
3723 } while (Result == std::nullopt);
3724 return EverChanged;
3725}
3726
3727/// Given a BB that starts with the specified two-entry PHI node,
3728/// see if we can eliminate it.
3731 const DataLayout &DL,
3732 bool SpeculateUnpredictables) {
3733 // Ok, this is a two entry PHI node. Check to see if this is a simple "if
3734 // statement", which has a very simple dominance structure. Basically, we
3735 // are trying to find the condition that is being branched on, which
3736 // subsequently causes this merge to happen. We really want control
3737 // dependence information for this check, but simplifycfg can't keep it up
3738 // to date, and this catches most of the cases we care about anyway.
3739 BasicBlock *BB = PN->getParent();
3740
3741 BasicBlock *IfTrue, *IfFalse;
3742 CondBrInst *DomBI = GetIfCondition(BB, IfTrue, IfFalse);
3743 if (!DomBI)
3744 return false;
3745 Value *IfCond = DomBI->getCondition();
3746 // Don't bother if the branch will be constant folded trivially.
3747 if (isa<ConstantInt>(IfCond))
3748 return false;
3749
3750 BasicBlock *DomBlock = DomBI->getParent();
3752 llvm::copy_if(PN->blocks(), std::back_inserter(IfBlocks),
3753 [](BasicBlock *IfBlock) {
3754 return isa<UncondBrInst>(IfBlock->getTerminator());
3755 });
3756 assert((IfBlocks.size() == 1 || IfBlocks.size() == 2) &&
3757 "Will have either one or two blocks to speculate.");
3758
3759 // If the branch is non-unpredictable, see if we either predictably jump to
3760 // the merge bb (if we have only a single 'then' block), or if we predictably
3761 // jump to one specific 'then' block (if we have two of them).
3762 // It isn't beneficial to speculatively execute the code
3763 // from the block that we know is predictably not entered.
3764 bool IsUnpredictable = DomBI->getMetadata(LLVMContext::MD_unpredictable);
3765 if (!IsUnpredictable) {
3766 uint64_t TWeight, FWeight;
3767 if (extractBranchWeights(*DomBI, TWeight, FWeight) &&
3768 (TWeight + FWeight) != 0) {
3769 BranchProbability BITrueProb =
3770 BranchProbability::getBranchProbability(TWeight, TWeight + FWeight);
3771 BranchProbability Likely = TTI.getPredictableBranchThreshold();
3772 BranchProbability BIFalseProb = BITrueProb.getCompl();
3773 if (IfBlocks.size() == 1) {
3774 BranchProbability BIBBProb =
3775 DomBI->getSuccessor(0) == BB ? BITrueProb : BIFalseProb;
3776 if (BIBBProb >= Likely)
3777 return false;
3778 } else {
3779 if (BITrueProb >= Likely || BIFalseProb >= Likely)
3780 return false;
3781 }
3782 }
3783 }
3784
3785 // Don't try to fold an unreachable block. For example, the phi node itself
3786 // can't be the candidate if-condition for a select that we want to form.
3787 if (auto *IfCondPhiInst = dyn_cast<PHINode>(IfCond))
3788 if (IfCondPhiInst->getParent() == BB)
3789 return false;
3790
3791 // Okay, we found that we can merge this two-entry phi node into a select.
3792 // Doing so would require us to fold *all* two entry phi nodes in this block.
3793 // At some point this becomes non-profitable (particularly if the target
3794 // doesn't support cmov's). Only do this transformation if there are two or
3795 // fewer PHI nodes in this block.
3796 unsigned NumPhis = 0;
3797 for (BasicBlock::iterator I = BB->begin(); isa<PHINode>(I); ++NumPhis, ++I)
3798 if (NumPhis > 2)
3799 return false;
3800
3801 // Loop over the PHI's seeing if we can promote them all to select
3802 // instructions. While we are at it, keep track of the instructions
3803 // that need to be moved to the dominating block.
3804 SmallPtrSet<Instruction *, 4> AggressiveInsts;
3805 SmallPtrSet<Instruction *, 2> ZeroCostInstructions;
3806 InstructionCost Cost = 0;
3807 InstructionCost Budget =
3809 if (SpeculateUnpredictables && IsUnpredictable)
3810 Budget += TTI.getBranchMispredictPenalty();
3811
3812 bool Changed = false;
3813 for (BasicBlock::iterator II = BB->begin(); isa<PHINode>(II);) {
3814 PHINode *PN = cast<PHINode>(II++);
3815 if (Value *V = simplifyInstruction(PN, {DL, PN})) {
3816 PN->replaceAllUsesWith(V);
3817 PN->eraseFromParent();
3818 Changed = true;
3819 continue;
3820 }
3821
3822 if (!dominatesMergePoint(PN->getIncomingValue(0), BB, DomBI,
3823 AggressiveInsts, Cost, Budget, TTI, AC,
3824 ZeroCostInstructions) ||
3825 !dominatesMergePoint(PN->getIncomingValue(1), BB, DomBI,
3826 AggressiveInsts, Cost, Budget, TTI, AC,
3827 ZeroCostInstructions))
3828 return Changed;
3829 }
3830
3831 // If we folded the first phi, PN dangles at this point. Refresh it. If
3832 // we ran out of PHIs then we simplified them all.
3833 PN = dyn_cast<PHINode>(BB->begin());
3834 if (!PN)
3835 return true;
3836
3837 // Don't fold i1 branches on PHIs which contain binary operators or
3838 // (possibly inverted) select form of or/ands if their parameters are
3839 // an equality test.
3840 auto IsBinOpOrAndEq = [](Value *V) {
3841 CmpPredicate Pred;
3842 if (match(V, m_CombineOr(
3844 m_BinOp(m_Cmp(Pred, m_Value(), m_Value()), m_Value()),
3845 m_BinOp(m_Value(), m_Cmp(Pred, m_Value(), m_Value()))),
3847 m_Cmp(Pred, m_Value(), m_Value()))))) {
3848 return CmpInst::isEquality(Pred);
3849 }
3850 return false;
3851 };
3852 if (PN->getType()->isIntegerTy(1) &&
3853 (IsBinOpOrAndEq(PN->getIncomingValue(0)) ||
3854 IsBinOpOrAndEq(PN->getIncomingValue(1)) || IsBinOpOrAndEq(IfCond)))
3855 return Changed;
3856
3857 // If all PHI nodes are promotable, check to make sure that all instructions
3858 // in the predecessor blocks can be promoted as well. If not, we won't be able
3859 // to get rid of the control flow, so it's not worth promoting to select
3860 // instructions.
3861 for (BasicBlock *IfBlock : IfBlocks)
3862 for (BasicBlock::iterator I = IfBlock->begin(); !I->isTerminator(); ++I)
3863 if (!AggressiveInsts.count(&*I) && !I->isDebugOrPseudoInst()) {
3864 // This is not an aggressive instruction that we can promote.
3865 // Because of this, we won't be able to get rid of the control flow, so
3866 // the xform is not worth it.
3867 return Changed;
3868 }
3869
3870 // If either of the blocks has it's address taken, we can't do this fold.
3871 if (any_of(IfBlocks,
3872 [](BasicBlock *IfBlock) { return IfBlock->hasAddressTaken(); }))
3873 return Changed;
3874
3875 LLVM_DEBUG(dbgs() << "FOUND IF CONDITION! " << *IfCond;
3876 if (IsUnpredictable) dbgs() << " (unpredictable)";
3877 dbgs() << " T: " << IfTrue->getName()
3878 << " F: " << IfFalse->getName() << "\n");
3879
3880 // If we can still promote the PHI nodes after this gauntlet of tests,
3881 // do all of the PHI's now.
3882
3883 // Move all 'aggressive' instructions, which are defined in the
3884 // conditional parts of the if's up to the dominating block.
3885 for (BasicBlock *IfBlock : IfBlocks)
3886 hoistAllInstructionsInto(DomBlock, DomBI, IfBlock);
3887
3888 IRBuilder<NoFolder> Builder(DomBI);
3889 // Propagate fast-math-flags from phi nodes to replacement selects.
3890 while (PHINode *PN = dyn_cast<PHINode>(BB->begin())) {
3891 // Change the PHI node into a select instruction.
3892 Value *TrueVal = PN->getIncomingValueForBlock(IfTrue);
3893 Value *FalseVal = PN->getIncomingValueForBlock(IfFalse);
3894
3895 Value *Sel = Builder.CreateSelectFMF(IfCond, TrueVal, FalseVal,
3896 isa<FPMathOperator>(PN) ? PN : nullptr,
3897 "", DomBI);
3898 PN->replaceAllUsesWith(Sel);
3899 Sel->takeName(PN);
3900 PN->eraseFromParent();
3901 }
3902
3903 // At this point, all IfBlocks are empty, so our if statement
3904 // has been flattened. Change DomBlock to jump directly to our new block to
3905 // avoid other simplifycfg's kicking in on the diamond.
3906 Builder.CreateBr(BB);
3907
3909 if (DTU) {
3910 Updates.push_back({DominatorTree::Insert, DomBlock, BB});
3911 for (auto *Successor : successors(DomBlock))
3912 Updates.push_back({DominatorTree::Delete, DomBlock, Successor});
3913 }
3914
3915 DomBI->eraseFromParent();
3916 if (DTU)
3917 DTU->applyUpdates(Updates);
3918
3919 return true;
3920}
3921
3924 Value *RHS, const Twine &Name = "") {
3925 // Try to relax logical op to binary op.
3926 if (impliesPoison(RHS, LHS))
3927 return Builder.CreateBinOp(Opc, LHS, RHS, Name);
3928 if (Opc == Instruction::And)
3929 return Builder.CreateLogicalAnd(LHS, RHS, Name);
3930 if (Opc == Instruction::Or)
3931 return Builder.CreateLogicalOr(LHS, RHS, Name);
3932 llvm_unreachable("Invalid logical opcode");
3933}
3934
3935/// Return true if either PBI or BI has branch weight available, and store
3936/// the weights in {Pred|Succ}{True|False}Weight. If one of PBI and BI does
3937/// not have branch weight, use 1:1 as its weight.
3939 uint64_t &PredTrueWeight,
3940 uint64_t &PredFalseWeight,
3941 uint64_t &SuccTrueWeight,
3942 uint64_t &SuccFalseWeight) {
3943 bool PredHasWeights =
3944 extractBranchWeights(*PBI, PredTrueWeight, PredFalseWeight);
3945 bool SuccHasWeights =
3946 extractBranchWeights(*BI, SuccTrueWeight, SuccFalseWeight);
3947 if (PredHasWeights || SuccHasWeights) {
3948 if (!PredHasWeights)
3949 PredTrueWeight = PredFalseWeight = 1;
3950 if (!SuccHasWeights)
3951 SuccTrueWeight = SuccFalseWeight = 1;
3952 return true;
3953 } else {
3954 return false;
3955 }
3956}
3957
3958/// Determine if the two branches share a common destination and deduce a glue
3959/// that joins the branches' conditions to arrive at the common destination if
3960/// that would be profitable.
3961static std::optional<std::tuple<BasicBlock *, Instruction::BinaryOps, bool>>
3963 const TargetTransformInfo *TTI) {
3964 assert(BI && PBI && "Both blocks must end with a conditional branches.");
3966 "PredBB must be a predecessor of BB.");
3967
3968 // We have the potential to fold the conditions together, but if the
3969 // predecessor branch is predictable, we may not want to merge them.
3970 uint64_t PTWeight, PFWeight;
3971 BranchProbability PBITrueProb, Likely;
3972 if (TTI && !PBI->getMetadata(LLVMContext::MD_unpredictable) &&
3973 extractBranchWeights(*PBI, PTWeight, PFWeight) &&
3974 (PTWeight + PFWeight) != 0) {
3975 PBITrueProb =
3976 BranchProbability::getBranchProbability(PTWeight, PTWeight + PFWeight);
3977 Likely = TTI->getPredictableBranchThreshold();
3978 }
3979
3980 if (PBI->getSuccessor(0) == BI->getSuccessor(0)) {
3981 // Speculate the 2nd condition unless the 1st is probably true.
3982 if (PBITrueProb.isUnknown() || PBITrueProb < Likely)
3983 return {{BI->getSuccessor(0), Instruction::Or, false}};
3984 } else if (PBI->getSuccessor(1) == BI->getSuccessor(1)) {
3985 // Speculate the 2nd condition unless the 1st is probably false.
3986 if (PBITrueProb.isUnknown() || PBITrueProb.getCompl() < Likely)
3987 return {{BI->getSuccessor(1), Instruction::And, false}};
3988 } else if (PBI->getSuccessor(0) == BI->getSuccessor(1)) {
3989 // Speculate the 2nd condition unless the 1st is probably true.
3990 if (PBITrueProb.isUnknown() || PBITrueProb < Likely)
3991 return {{BI->getSuccessor(1), Instruction::And, true}};
3992 } else if (PBI->getSuccessor(1) == BI->getSuccessor(0)) {
3993 // Speculate the 2nd condition unless the 1st is probably false.
3994 if (PBITrueProb.isUnknown() || PBITrueProb.getCompl() < Likely)
3995 return {{BI->getSuccessor(0), Instruction::Or, true}};
3996 }
3997 return std::nullopt;
3998}
3999
4001 DomTreeUpdater *DTU,
4002 MemorySSAUpdater *MSSAU,
4003 const TargetTransformInfo *TTI) {
4004 BasicBlock *BB = BI->getParent();
4005 BasicBlock *PredBlock = PBI->getParent();
4006
4007 // Determine if the two branches share a common destination.
4008 BasicBlock *CommonSucc;
4010 bool InvertPredCond;
4011 std::tie(CommonSucc, Opc, InvertPredCond) =
4013
4014 LLVM_DEBUG(dbgs() << "FOLDING BRANCH TO COMMON DEST:\n" << *PBI << *BB);
4015
4016 IRBuilder<> Builder(PBI);
4017 // The builder is used to create instructions to eliminate the branch in BB.
4018 // If BB's terminator has !annotation metadata, add it to the new
4019 // instructions.
4020 Builder.CollectMetadataToCopy(BB->getTerminator(),
4021 {LLVMContext::MD_annotation});
4022
4023 // If we need to invert the condition in the pred block to match, do so now.
4024 if (InvertPredCond) {
4025 InvertBranch(PBI, Builder);
4026 }
4027
4028 BasicBlock *UniqueSucc =
4029 PBI->getSuccessor(0) == BB ? BI->getSuccessor(0) : BI->getSuccessor(1);
4030
4031 // Before cloning instructions, notify the successor basic block that it
4032 // is about to have a new predecessor. This will update PHI nodes,
4033 // which will allow us to update live-out uses of bonus instructions.
4034 addPredecessorToBlock(UniqueSucc, PredBlock, BB, MSSAU);
4035
4036 // Try to update branch weights.
4037 uint64_t PredTrueWeight, PredFalseWeight, SuccTrueWeight, SuccFalseWeight;
4038 SmallVector<uint64_t, 2> MDWeights;
4039 if (extractPredSuccWeights(PBI, BI, PredTrueWeight, PredFalseWeight,
4040 SuccTrueWeight, SuccFalseWeight)) {
4041
4042 if (PBI->getSuccessor(0) == BB) {
4043 // PBI: br i1 %x, BB, FalseDest
4044 // BI: br i1 %y, UniqueSucc, FalseDest
4045 // TrueWeight is TrueWeight for PBI * TrueWeight for BI.
4046 MDWeights.push_back(PredTrueWeight * SuccTrueWeight);
4047 // FalseWeight is FalseWeight for PBI * TotalWeight for BI +
4048 // TrueWeight for PBI * FalseWeight for BI.
4049 // We assume that total weights of a CondBrInst can fit into 32 bits.
4050 // Therefore, we will not have overflow using 64-bit arithmetic.
4051 MDWeights.push_back(PredFalseWeight * (SuccFalseWeight + SuccTrueWeight) +
4052 PredTrueWeight * SuccFalseWeight);
4053 } else {
4054 // PBI: br i1 %x, TrueDest, BB
4055 // BI: br i1 %y, TrueDest, UniqueSucc
4056 // TrueWeight is TrueWeight for PBI * TotalWeight for BI +
4057 // FalseWeight for PBI * TrueWeight for BI.
4058 MDWeights.push_back(PredTrueWeight * (SuccFalseWeight + SuccTrueWeight) +
4059 PredFalseWeight * SuccTrueWeight);
4060 // FalseWeight is FalseWeight for PBI * FalseWeight for BI.
4061 MDWeights.push_back(PredFalseWeight * SuccFalseWeight);
4062 }
4063
4064 setFittedBranchWeights(*PBI, MDWeights, /*IsExpected=*/false,
4065 /*ElideAllZero=*/true);
4066
4067 // TODO: If BB is reachable from all paths through PredBlock, then we
4068 // could replace PBI's branch probabilities with BI's.
4069 } else
4070 PBI->setMetadata(LLVMContext::MD_prof, nullptr);
4071
4072 // Now, update the CFG.
4073 PBI->setSuccessor(PBI->getSuccessor(0) != BB, UniqueSucc);
4074
4075 if (DTU)
4076 DTU->applyUpdates({{DominatorTree::Insert, PredBlock, UniqueSucc},
4077 {DominatorTree::Delete, PredBlock, BB}});
4078
4079 // If BI was a loop latch, it may have had associated loop metadata.
4080 // We need to copy it to the new latch, that is, PBI.
4081 if (MDNode *LoopMD = BI->getMetadata(LLVMContext::MD_loop))
4082 PBI->setMetadata(LLVMContext::MD_loop, LoopMD);
4083
4084 ValueToValueMapTy VMap; // maps original values to cloned values
4086
4087 Module *M = BB->getModule();
4088
4089 PredBlock->getTerminator()->cloneDebugInfoFrom(BB->getTerminator());
4090 for (DbgVariableRecord &DVR :
4092 RemapDbgRecord(M, &DVR, VMap,
4094 }
4095
4096 // Now that the Cond was cloned into the predecessor basic block,
4097 // or/and the two conditions together.
4098 Value *BICond = VMap[BI->getCondition()];
4099 PBI->setCondition(
4100 createLogicalOp(Builder, Opc, PBI->getCondition(), BICond, "or.cond"));
4102 if (auto *SI = dyn_cast<SelectInst>(PBI->getCondition()))
4103 if (!MDWeights.empty()) {
4104 assert(isSelectInRoleOfConjunctionOrDisjunction(SI));
4105 setFittedBranchWeights(*SI, {MDWeights[0], MDWeights[1]},
4106 /*IsExpected=*/false, /*ElideAllZero=*/true);
4107 }
4108
4109 ++NumFoldBranchToCommonDest;
4110 return true;
4111}
4112
4113/// Return if an instruction's type or any of its operands' types are a vector
4114/// type.
4115static bool isVectorOp(Instruction &I) {
4116 return I.getType()->isVectorTy() || any_of(I.operands(), [](Use &U) {
4117 return U->getType()->isVectorTy();
4118 });
4119}
4120
4121/// If this basic block is simple enough, and if a predecessor branches to us
4122/// and one of our successors, fold the block into the predecessor and use
4123/// logical operations to pick the right destination.
4125 MemorySSAUpdater *MSSAU,
4126 const TargetTransformInfo *TTI,
4127 unsigned BonusInstThreshold) {
4128 BasicBlock *BB = BI->getParent();
4132
4134
4136 Cond->getParent() != BB || !Cond->hasOneUse())
4137 return false;
4138
4139 // Finally, don't infinitely unroll conditional loops.
4140 if (is_contained(successors(BB), BB))
4141 return false;
4142
4143 // With which predecessors will we want to deal with?
4145 for (BasicBlock *PredBlock : predecessors(BB)) {
4146 CondBrInst *PBI = dyn_cast<CondBrInst>(PredBlock->getTerminator());
4147
4148 // Check that we have two conditional branches. If there is a PHI node in
4149 // the common successor, verify that the same value flows in from both
4150 // blocks.
4151 if (!PBI || !safeToMergeTerminators(BI, PBI))
4152 continue;
4153
4154 // Determine if the two branches share a common destination.
4155 BasicBlock *CommonSucc;
4157 bool InvertPredCond;
4158 if (auto Recipe = shouldFoldCondBranchesToCommonDestination(BI, PBI, TTI))
4159 std::tie(CommonSucc, Opc, InvertPredCond) = *Recipe;
4160 else
4161 continue;
4162
4163 // Check the cost of inserting the necessary logic before performing the
4164 // transformation.
4165 if (TTI) {
4166 Type *Ty = BI->getCondition()->getType();
4167 InstructionCost Cost = TTI->getArithmeticInstrCost(Opc, Ty, CostKind);
4168 if (InvertPredCond && (!PBI->getCondition()->hasOneUse() ||
4169 !isa<CmpInst>(PBI->getCondition())))
4170 Cost += TTI->getArithmeticInstrCost(Instruction::Xor, Ty, CostKind);
4171
4173 continue;
4174 }
4175
4176 // Ok, we do want to deal with this predecessor. Record it.
4177 Preds.emplace_back(PredBlock);
4178 }
4179
4180 // If there aren't any predecessors into which we can fold,
4181 // don't bother checking the cost.
4182 if (Preds.empty())
4183 return false;
4184
4185 // Only allow this transformation if computing the condition doesn't involve
4186 // too many instructions and these involved instructions can be executed
4187 // unconditionally. We denote all involved instructions except the condition
4188 // as "bonus instructions", and only allow this transformation when the
4189 // number of the bonus instructions we'll need to create when cloning into
4190 // each predecessor does not exceed a certain threshold.
4191 unsigned NumBonusInsts = 0;
4192 bool SawVectorOp = false;
4193 const unsigned PredCount = Preds.size();
4194 for (Instruction &I : *BB) {
4195 // Don't check the branch condition comparison itself.
4196 if (&I == Cond)
4197 continue;
4198 // Ignore the terminator.
4200 continue;
4201 // I must be safe to execute unconditionally.
4203 return false;
4204 SawVectorOp |= isVectorOp(I);
4205
4206 // Account for the cost of duplicating this instruction into each
4207 // predecessor. Ignore free instructions.
4208 if (!TTI || TTI->getInstructionCost(&I, CostKind) !=
4210 NumBonusInsts += PredCount;
4211
4212 // Early exits once we reach the limit.
4213 if (NumBonusInsts >
4214 BonusInstThreshold * BranchFoldToCommonDestVectorMultiplier)
4215 return false;
4216 }
4217
4218 auto IsBCSSAUse = [BB, &I](Use &U) {
4219 auto *UI = cast<Instruction>(U.getUser());
4220 if (auto *PN = dyn_cast<PHINode>(UI))
4221 return PN->getIncomingBlock(U) == BB;
4222 return UI->getParent() == BB && I.comesBefore(UI);
4223 };
4224
4225 // Does this instruction require rewriting of uses?
4226 if (!all_of(I.uses(), IsBCSSAUse))
4227 return false;
4228 }
4229 if (NumBonusInsts >
4230 BonusInstThreshold *
4231 (SawVectorOp ? BranchFoldToCommonDestVectorMultiplier : 1))
4232 return false;
4233
4234 // Ok, we have the budget. Perform the transformation.
4235 for (BasicBlock *PredBlock : Preds) {
4236 auto *PBI = cast<CondBrInst>(PredBlock->getTerminator());
4237 return performBranchToCommonDestFolding(BI, PBI, DTU, MSSAU, TTI);
4238 }
4239 return false;
4240}
4241
4242// If there is only one store in BB1 and BB2, return it, otherwise return
4243// nullptr.
4245 StoreInst *S = nullptr;
4246 for (auto *BB : {BB1, BB2}) {
4247 if (!BB)
4248 continue;
4249 for (auto &I : *BB)
4250 if (auto *SI = dyn_cast<StoreInst>(&I)) {
4251 if (S)
4252 // Multiple stores seen.
4253 return nullptr;
4254 else
4255 S = SI;
4256 }
4257 }
4258 return S;
4259}
4260
4262 Value *AlternativeV = nullptr) {
4263 // PHI is going to be a PHI node that allows the value V that is defined in
4264 // BB to be referenced in BB's only successor.
4265 //
4266 // If AlternativeV is nullptr, the only value we care about in PHI is V. It
4267 // doesn't matter to us what the other operand is (it'll never get used). We
4268 // could just create a new PHI with an undef incoming value, but that could
4269 // increase register pressure if EarlyCSE/InstCombine can't fold it with some
4270 // other PHI. So here we directly look for some PHI in BB's successor with V
4271 // as an incoming operand. If we find one, we use it, else we create a new
4272 // one.
4273 //
4274 // If AlternativeV is not nullptr, we care about both incoming values in PHI.
4275 // PHI must be exactly: phi <ty> [ %BB, %V ], [ %OtherBB, %AlternativeV]
4276 // where OtherBB is the single other predecessor of BB's only successor.
4277 PHINode *PHI = nullptr;
4278 BasicBlock *Succ = BB->getSingleSuccessor();
4279
4280 for (auto I = Succ->begin(); isa<PHINode>(I); ++I)
4281 if (cast<PHINode>(I)->getIncomingValueForBlock(BB) == V) {
4282 PHI = cast<PHINode>(I);
4283 if (!AlternativeV)
4284 break;
4285
4286 assert(Succ->hasNPredecessors(2));
4287 auto PredI = pred_begin(Succ);
4288 BasicBlock *OtherPredBB = *PredI == BB ? *++PredI : *PredI;
4289 if (PHI->getIncomingValueForBlock(OtherPredBB) == AlternativeV)
4290 break;
4291 PHI = nullptr;
4292 }
4293 if (PHI)
4294 return PHI;
4295
4296 // If V is not an instruction defined in BB, just return it.
4297 if (!AlternativeV &&
4298 (!isa<Instruction>(V) || cast<Instruction>(V)->getParent() != BB))
4299 return V;
4300
4301 PHI = PHINode::Create(V->getType(), 2, "simplifycfg.merge");
4302 PHI->insertBefore(Succ->begin());
4303 PHI->addIncoming(V, BB);
4304 for (BasicBlock *PredBB : predecessors(Succ))
4305 if (PredBB != BB)
4306 PHI->addIncoming(
4307 AlternativeV ? AlternativeV : PoisonValue::get(V->getType()), PredBB);
4308 return PHI;
4309}
4310
4312 BasicBlock *PTB, BasicBlock *PFB, BasicBlock *QTB, BasicBlock *QFB,
4313 BasicBlock *PostBB, Value *Address, bool InvertPCond, bool InvertQCond,
4314 DomTreeUpdater *DTU, const DataLayout &DL, const TargetTransformInfo &TTI) {
4315 // For every pointer, there must be exactly two stores, one coming from
4316 // PTB or PFB, and the other from QTB or QFB. We don't support more than one
4317 // store (to any address) in PTB,PFB or QTB,QFB.
4318 // FIXME: We could relax this restriction with a bit more work and performance
4319 // testing.
4320 StoreInst *PStore = findUniqueStoreInBlocks(PTB, PFB);
4321 StoreInst *QStore = findUniqueStoreInBlocks(QTB, QFB);
4322 if (!PStore || !QStore)
4323 return false;
4324
4325 // Now check the stores are compatible.
4326 if (!QStore->isUnordered() || !PStore->isUnordered() ||
4327 PStore->getValueOperand()->getType() !=
4328 QStore->getValueOperand()->getType())
4329 return false;
4330
4331 // Check that sinking the store won't cause program behavior changes. Sinking
4332 // the store out of the Q blocks won't change any behavior as we're sinking
4333 // from a block to its unconditional successor. But we're moving a store from
4334 // the P blocks down through the middle block (QBI) and past both QFB and QTB.
4335 // So we need to check that there are no aliasing loads or stores in
4336 // QBI, QTB and QFB. We also need to check there are no conflicting memory
4337 // operations between PStore and the end of its parent block.
4338 //
4339 // The ideal way to do this is to query AliasAnalysis, but we don't
4340 // preserve AA currently so that is dangerous. Be super safe and just
4341 // check there are no other memory operations at all.
4342 for (auto &I : *QFB->getSinglePredecessor())
4343 if (I.mayReadOrWriteMemory())
4344 return false;
4345 for (auto &I : *QFB)
4346 if (&I != QStore && I.mayReadOrWriteMemory())
4347 return false;
4348 if (QTB)
4349 for (auto &I : *QTB)
4350 if (&I != QStore && I.mayReadOrWriteMemory())
4351 return false;
4352 for (auto I = BasicBlock::iterator(PStore), E = PStore->getParent()->end();
4353 I != E; ++I)
4354 if (&*I != PStore && I->mayReadOrWriteMemory())
4355 return false;
4356
4357 // If we're not in aggressive mode, we only optimize if we have some
4358 // confidence that by optimizing we'll allow P and/or Q to be if-converted.
4359 auto IsWorthwhile = [&](BasicBlock *BB, ArrayRef<StoreInst *> FreeStores) {
4360 if (!BB)
4361 return true;
4362 // Heuristic: if the block can be if-converted/phi-folded and the
4363 // instructions inside are all cheap (arithmetic/GEPs), it's worthwhile to
4364 // thread this store.
4365 InstructionCost Cost = 0;
4366 InstructionCost Budget =
4368 for (auto &I : *BB) {
4369 // Consider terminator instruction to be free.
4370 if (I.isTerminator())
4371 continue;
4372 // If this is one the stores that we want to speculate out of this BB,
4373 // then don't count it's cost, consider it to be free.
4374 if (auto *S = dyn_cast<StoreInst>(&I))
4375 if (llvm::find(FreeStores, S))
4376 continue;
4377 // Else, we have a white-list of instructions that we are ak speculating.
4379 return false; // Not in white-list - not worthwhile folding.
4380 // And finally, if this is a non-free instruction that we are okay
4381 // speculating, ensure that we consider the speculation budget.
4382 Cost +=
4383 TTI.getInstructionCost(&I, TargetTransformInfo::TCK_SizeAndLatency);
4384 if (Cost > Budget)
4385 return false; // Eagerly refuse to fold as soon as we're out of budget.
4386 }
4387 assert(Cost <= Budget &&
4388 "When we run out of budget we will eagerly return from within the "
4389 "per-instruction loop.");
4390 return true;
4391 };
4392
4393 const std::array<StoreInst *, 2> FreeStores = {PStore, QStore};
4395 (!IsWorthwhile(PTB, FreeStores) || !IsWorthwhile(PFB, FreeStores) ||
4396 !IsWorthwhile(QTB, FreeStores) || !IsWorthwhile(QFB, FreeStores)))
4397 return false;
4398
4399 // If PostBB has more than two predecessors, we need to split it so we can
4400 // sink the store.
4401 if (std::next(pred_begin(PostBB), 2) != pred_end(PostBB)) {
4402 // We know that QFB's only successor is PostBB. And QFB has a single
4403 // predecessor. If QTB exists, then its only successor is also PostBB.
4404 // If QTB does not exist, then QFB's only predecessor has a conditional
4405 // branch to QFB and PostBB.
4406 BasicBlock *TruePred = QTB ? QTB : QFB->getSinglePredecessor();
4407 BasicBlock *NewBB =
4408 SplitBlockPredecessors(PostBB, {QFB, TruePred}, "condstore.split", DTU);
4409 if (!NewBB)
4410 return false;
4411 PostBB = NewBB;
4412 }
4413
4414 // OK, we're going to sink the stores to PostBB. The store has to be
4415 // conditional though, so first create the predicate.
4416 CondBrInst *PBranch =
4418 CondBrInst *QBranch =
4420 Value *PCond = PBranch->getCondition();
4421 Value *QCond = QBranch->getCondition();
4422
4424 PStore->getParent());
4426 QStore->getParent(), PPHI);
4427
4428 BasicBlock::iterator PostBBFirst = PostBB->getFirstInsertionPt();
4429 IRBuilder<> QB(PostBB, PostBBFirst);
4430 QB.SetCurrentDebugLocation(PostBBFirst->getStableDebugLoc());
4431
4432 InvertPCond ^= (PStore->getParent() != PTB);
4433 InvertQCond ^= (QStore->getParent() != QTB);
4434 Value *PPred = InvertPCond ? QB.CreateNot(PCond) : PCond;
4435 Value *QPred = InvertQCond ? QB.CreateNot(QCond) : QCond;
4436
4437 Value *CombinedPred = QB.CreateOr(PPred, QPred);
4438
4439 BasicBlock::iterator InsertPt = QB.GetInsertPoint();
4440 auto *T = SplitBlockAndInsertIfThen(CombinedPred, InsertPt,
4441 /*Unreachable=*/false,
4442 /*BranchWeights=*/nullptr, DTU);
4443 if (hasBranchWeightMD(*PBranch) && hasBranchWeightMD(*QBranch) &&
4445 SmallVector<uint32_t, 2> PWeights, QWeights;
4446 extractBranchWeights(*PBranch, PWeights);
4447 extractBranchWeights(*QBranch, QWeights);
4448 if (InvertPCond)
4449 std::swap(PWeights[0], PWeights[1]);
4450 if (InvertQCond)
4451 std::swap(QWeights[0], QWeights[1]);
4452 auto CombinedWeights = getDisjunctionWeights(PWeights, QWeights);
4454 {CombinedWeights[0], CombinedWeights[1]},
4455 /*IsExpected=*/false, /*ElideAllZero=*/true);
4456 }
4457
4458 QB.SetInsertPoint(T);
4459 StoreInst *SI = cast<StoreInst>(QB.CreateStore(QPHI, Address));
4460 SI->setAAMetadata(PStore->getAAMetadata().merge(QStore->getAAMetadata()));
4461 // Choose the minimum alignment. If we could prove both stores execute, we
4462 // could use biggest one. In this case, though, we only know that one of the
4463 // stores executes. And we don't know it's safe to take the alignment from a
4464 // store that doesn't execute.
4465 SI->setAlignment(std::min(PStore->getAlign(), QStore->getAlign()));
4466
4467 QStore->eraseFromParent();
4468 PStore->eraseFromParent();
4469
4470 return true;
4471}
4472
4474 DomTreeUpdater *DTU, const DataLayout &DL,
4475 const TargetTransformInfo &TTI) {
4476 // The intention here is to find diamonds or triangles (see below) where each
4477 // conditional block contains a store to the same address. Both of these
4478 // stores are conditional, so they can't be unconditionally sunk. But it may
4479 // be profitable to speculatively sink the stores into one merged store at the
4480 // end, and predicate the merged store on the union of the two conditions of
4481 // PBI and QBI.
4482 //
4483 // This can reduce the number of stores executed if both of the conditions are
4484 // true, and can allow the blocks to become small enough to be if-converted.
4485 // This optimization will also chain, so that ladders of test-and-set
4486 // sequences can be if-converted away.
4487 //
4488 // We only deal with simple diamonds or triangles:
4489 //
4490 // PBI or PBI or a combination of the two
4491 // / \ | \
4492 // PTB PFB | PFB
4493 // \ / | /
4494 // QBI QBI
4495 // / \ | \
4496 // QTB QFB | QFB
4497 // \ / | /
4498 // PostBB PostBB
4499 //
4500 // We model triangles as a type of diamond with a nullptr "true" block.
4501 // Triangles are canonicalized so that the fallthrough edge is represented by
4502 // a true condition, as in the diagram above.
4503 BasicBlock *PTB = PBI->getSuccessor(0);
4504 BasicBlock *PFB = PBI->getSuccessor(1);
4505 BasicBlock *QTB = QBI->getSuccessor(0);
4506 BasicBlock *QFB = QBI->getSuccessor(1);
4507 BasicBlock *PostBB = QFB->getSingleSuccessor();
4508
4509 // Make sure we have a good guess for PostBB. If QTB's only successor is
4510 // QFB, then QFB is a better PostBB.
4511 if (QTB->getSingleSuccessor() == QFB)
4512 PostBB = QFB;
4513
4514 // If we couldn't find a good PostBB, stop.
4515 if (!PostBB)
4516 return false;
4517
4518 bool InvertPCond = false, InvertQCond = false;
4519 // Canonicalize fallthroughs to the true branches.
4520 if (PFB == QBI->getParent()) {
4521 std::swap(PFB, PTB);
4522 InvertPCond = true;
4523 }
4524 if (QFB == PostBB) {
4525 std::swap(QFB, QTB);
4526 InvertQCond = true;
4527 }
4528
4529 // From this point on we can assume PTB or QTB may be fallthroughs but PFB
4530 // and QFB may not. Model fallthroughs as a nullptr block.
4531 if (PTB == QBI->getParent())
4532 PTB = nullptr;
4533 if (QTB == PostBB)
4534 QTB = nullptr;
4535
4536 // Legality bailouts. We must have at least the non-fallthrough blocks and
4537 // the post-dominating block, and the non-fallthroughs must only have one
4538 // predecessor.
4539 auto HasOnePredAndOneSucc = [](BasicBlock *BB, BasicBlock *P, BasicBlock *S) {
4540 return BB->getSinglePredecessor() == P && BB->getSingleSuccessor() == S;
4541 };
4542 if (!HasOnePredAndOneSucc(PFB, PBI->getParent(), QBI->getParent()) ||
4543 !HasOnePredAndOneSucc(QFB, QBI->getParent(), PostBB))
4544 return false;
4545 if ((PTB && !HasOnePredAndOneSucc(PTB, PBI->getParent(), QBI->getParent())) ||
4546 (QTB && !HasOnePredAndOneSucc(QTB, QBI->getParent(), PostBB)))
4547 return false;
4548 if (!QBI->getParent()->hasNUses(2))
4549 return false;
4550
4551 // OK, this is a sequence of two diamonds or triangles.
4552 // Check if there are stores in PTB or PFB that are repeated in QTB or QFB.
4553 SmallPtrSet<Value *, 4> PStoreAddresses, QStoreAddresses;
4554 for (auto *BB : {PTB, PFB}) {
4555 if (!BB)
4556 continue;
4557 for (auto &I : *BB)
4559 PStoreAddresses.insert(SI->getPointerOperand());
4560 }
4561 for (auto *BB : {QTB, QFB}) {
4562 if (!BB)
4563 continue;
4564 for (auto &I : *BB)
4566 QStoreAddresses.insert(SI->getPointerOperand());
4567 }
4568
4569 set_intersect(PStoreAddresses, QStoreAddresses);
4570 // set_intersect mutates PStoreAddresses in place. Rename it here to make it
4571 // clear what it contains.
4572 auto &CommonAddresses = PStoreAddresses;
4573
4574 bool Changed = false;
4575 for (auto *Address : CommonAddresses)
4576 Changed |=
4577 mergeConditionalStoreToAddress(PTB, PFB, QTB, QFB, PostBB, Address,
4578 InvertPCond, InvertQCond, DTU, DL, TTI);
4579 return Changed;
4580}
4581
4582/// If the previous block ended with a widenable branch, determine if reusing
4583/// the target block is profitable and legal. This will have the effect of
4584/// "widening" PBI, but doesn't require us to reason about hosting safety.
4586 DomTreeUpdater *DTU) {
4587 // TODO: This can be generalized in two important ways:
4588 // 1) We can allow phi nodes in IfFalseBB and simply reuse all the input
4589 // values from the PBI edge.
4590 // 2) We can sink side effecting instructions into BI's fallthrough
4591 // successor provided they doesn't contribute to computation of
4592 // BI's condition.
4593 BasicBlock *IfTrueBB = PBI->getSuccessor(0);
4594 BasicBlock *IfFalseBB = PBI->getSuccessor(1);
4595 if (!isWidenableBranch(PBI) || IfTrueBB != BI->getParent() ||
4596 !BI->getParent()->getSinglePredecessor())
4597 return false;
4598 if (!IfFalseBB->phis().empty())
4599 return false; // TODO
4600 // This helps avoid infinite loop with SimplifyCondBranchToCondBranch which
4601 // may undo the transform done here.
4602 // TODO: There might be a more fine-grained solution to this.
4603 if (!llvm::succ_empty(IfFalseBB))
4604 return false;
4605 // Use lambda to lazily compute expensive condition after cheap ones.
4606 auto NoSideEffects = [](BasicBlock &BB) {
4607 return llvm::none_of(BB, [](const Instruction &I) {
4608 return I.mayWriteToMemory() || I.mayHaveSideEffects();
4609 });
4610 };
4611 if (BI->getSuccessor(1) != IfFalseBB && // no inf looping
4612 BI->getSuccessor(1)->getTerminatingDeoptimizeCall() && // profitability
4613 NoSideEffects(*BI->getParent())) {
4614 auto *OldSuccessor = BI->getSuccessor(1);
4615 OldSuccessor->removePredecessor(BI->getParent());
4616 BI->setSuccessor(1, IfFalseBB);
4617 if (DTU)
4618 DTU->applyUpdates(
4619 {{DominatorTree::Insert, BI->getParent(), IfFalseBB},
4620 {DominatorTree::Delete, BI->getParent(), OldSuccessor}});
4621 return true;
4622 }
4623 if (BI->getSuccessor(0) != IfFalseBB && // no inf looping
4624 BI->getSuccessor(0)->getTerminatingDeoptimizeCall() && // profitability
4625 NoSideEffects(*BI->getParent())) {
4626 auto *OldSuccessor = BI->getSuccessor(0);
4627 OldSuccessor->removePredecessor(BI->getParent());
4628 BI->setSuccessor(0, IfFalseBB);
4629 if (DTU)
4630 DTU->applyUpdates(
4631 {{DominatorTree::Insert, BI->getParent(), IfFalseBB},
4632 {DominatorTree::Delete, BI->getParent(), OldSuccessor}});
4633 return true;
4634 }
4635 return false;
4636}
4637
4638/// If we have a conditional branch as a predecessor of another block,
4639/// this function tries to simplify it. We know
4640/// that PBI and BI are both conditional branches, and BI is in one of the
4641/// successor blocks of PBI - PBI branches to BI.
4643 DomTreeUpdater *DTU,
4644 const DataLayout &DL,
4645 const TargetTransformInfo &TTI) {
4646 BasicBlock *BB = BI->getParent();
4647
4648 // If this block ends with a branch instruction, and if there is a
4649 // predecessor that ends on a branch of the same condition, make
4650 // this conditional branch redundant.
4651 if (PBI->getCondition() == BI->getCondition() &&
4652 PBI->getSuccessor(0) != PBI->getSuccessor(1)) {
4653 // Okay, the outcome of this conditional branch is statically
4654 // knowable. If this block had a single pred, handle specially, otherwise
4655 // foldCondBranchOnValueKnownInPredecessor() will handle it.
4656 if (BB->getSinglePredecessor()) {
4657 // Turn this into a branch on constant.
4658 bool CondIsTrue = PBI->getSuccessor(0) == BB;
4659 BI->setCondition(
4660 ConstantInt::get(Type::getInt1Ty(BB->getContext()), CondIsTrue));
4661 return true; // Nuke the branch on constant.
4662 }
4663 }
4664
4665 // If the previous block ended with a widenable branch, determine if reusing
4666 // the target block is profitable and legal. This will have the effect of
4667 // "widening" PBI, but doesn't require us to reason about hosting safety.
4668 if (tryWidenCondBranchToCondBranch(PBI, BI, DTU))
4669 return true;
4670
4671 // If both branches are conditional and both contain stores to the same
4672 // address, remove the stores from the conditionals and create a conditional
4673 // merged store at the end.
4674 if (MergeCondStores && mergeConditionalStores(PBI, BI, DTU, DL, TTI))
4675 return true;
4676
4677 // If this is a conditional branch in an empty block, and if any
4678 // predecessors are a conditional branch to one of our destinations,
4679 // fold the conditions into logical ops and one cond br.
4680
4681 // Ignore dbg intrinsics.
4682 if (&*BB->begin() != BI)
4683 return false;
4684
4685 int PBIOp, BIOp;
4686 if (PBI->getSuccessor(0) == BI->getSuccessor(0)) {
4687 PBIOp = 0;
4688 BIOp = 0;
4689 } else if (PBI->getSuccessor(0) == BI->getSuccessor(1)) {
4690 PBIOp = 0;
4691 BIOp = 1;
4692 } else if (PBI->getSuccessor(1) == BI->getSuccessor(0)) {
4693 PBIOp = 1;
4694 BIOp = 0;
4695 } else if (PBI->getSuccessor(1) == BI->getSuccessor(1)) {
4696 PBIOp = 1;
4697 BIOp = 1;
4698 } else {
4699 return false;
4700 }
4701
4702 // Check to make sure that the other destination of this branch
4703 // isn't BB itself. If so, this is an infinite loop that will
4704 // keep getting unwound.
4705 if (PBI->getSuccessor(PBIOp) == BB)
4706 return false;
4707
4708 // If predecessor's branch probability to BB is too low don't merge branches.
4709 SmallVector<uint32_t, 2> PredWeights;
4710 if (!PBI->getMetadata(LLVMContext::MD_unpredictable) &&
4711 extractBranchWeights(*PBI, PredWeights) &&
4712 (static_cast<uint64_t>(PredWeights[0]) + PredWeights[1]) != 0) {
4713
4715 PredWeights[PBIOp],
4716 static_cast<uint64_t>(PredWeights[0]) + PredWeights[1]);
4717
4718 BranchProbability Likely = TTI.getPredictableBranchThreshold();
4719 if (CommonDestProb >= Likely)
4720 return false;
4721 }
4722
4723 // Do not perform this transformation if it would require
4724 // insertion of a large number of select instructions. For targets
4725 // without predication/cmovs, this is a big pessimization.
4726
4727 BasicBlock *CommonDest = PBI->getSuccessor(PBIOp);
4728 BasicBlock *RemovedDest = PBI->getSuccessor(PBIOp ^ 1);
4729 unsigned NumPhis = 0;
4730 for (BasicBlock::iterator II = CommonDest->begin(); isa<PHINode>(II);
4731 ++II, ++NumPhis) {
4732 if (NumPhis > 2) // Disable this xform.
4733 return false;
4734 }
4735
4736 // Finally, if everything is ok, fold the branches to logical ops.
4737 BasicBlock *OtherDest = BI->getSuccessor(BIOp ^ 1);
4738
4739 LLVM_DEBUG(dbgs() << "FOLDING BRs:" << *PBI->getParent()
4740 << "AND: " << *BI->getParent());
4741
4743
4744 // If OtherDest *is* BB, then BB is a basic block with a single conditional
4745 // branch in it, where one edge (OtherDest) goes back to itself but the other
4746 // exits. We don't *know* that the program avoids the infinite loop
4747 // (even though that seems likely). If we do this xform naively, we'll end up
4748 // recursively unpeeling the loop. Since we know that (after the xform is
4749 // done) that the block *is* infinite if reached, we just make it an obviously
4750 // infinite loop with no cond branch.
4751 if (OtherDest == BB) {
4752 // Insert it at the end of the function, because it's either code,
4753 // or it won't matter if it's hot. :)
4754 BasicBlock *InfLoopBlock =
4755 BasicBlock::Create(BB->getContext(), "infloop", BB->getParent());
4756 UncondBrInst::Create(InfLoopBlock, InfLoopBlock);
4757 if (DTU)
4758 Updates.push_back({DominatorTree::Insert, InfLoopBlock, InfLoopBlock});
4759 OtherDest = InfLoopBlock;
4760 }
4761
4762 LLVM_DEBUG(dbgs() << *PBI->getParent()->getParent());
4763
4764 // BI may have other predecessors. Because of this, we leave
4765 // it alone, but modify PBI.
4766
4767 // Make sure we get to CommonDest on True&True directions.
4768 Value *PBICond = PBI->getCondition();
4769 IRBuilder<NoFolder> Builder(PBI);
4770 if (PBIOp)
4771 PBICond = Builder.CreateNot(PBICond, PBICond->getName() + ".not");
4772
4773 Value *BICond = BI->getCondition();
4774 if (BIOp)
4775 BICond = Builder.CreateNot(BICond, BICond->getName() + ".not");
4776
4777 // Merge the conditions.
4778 Value *Cond =
4779 createLogicalOp(Builder, Instruction::Or, PBICond, BICond, "brmerge");
4780
4781 // Modify PBI to branch on the new condition to the new dests.
4782 PBI->setCondition(Cond);
4783 PBI->setSuccessor(0, CommonDest);
4784 PBI->setSuccessor(1, OtherDest);
4785
4786 if (DTU) {
4787 Updates.push_back({DominatorTree::Insert, PBI->getParent(), OtherDest});
4788 Updates.push_back({DominatorTree::Delete, PBI->getParent(), RemovedDest});
4789
4790 DTU->applyUpdates(Updates);
4791 }
4792
4793 // Update branch weight for PBI.
4794 uint64_t PredTrueWeight, PredFalseWeight, SuccTrueWeight, SuccFalseWeight;
4795 uint64_t PredCommon, PredOther, SuccCommon, SuccOther;
4796 bool HasWeights =
4797 extractPredSuccWeights(PBI, BI, PredTrueWeight, PredFalseWeight,
4798 SuccTrueWeight, SuccFalseWeight);
4799 if (HasWeights) {
4800 PredCommon = PBIOp ? PredFalseWeight : PredTrueWeight;
4801 PredOther = PBIOp ? PredTrueWeight : PredFalseWeight;
4802 SuccCommon = BIOp ? SuccFalseWeight : SuccTrueWeight;
4803 SuccOther = BIOp ? SuccTrueWeight : SuccFalseWeight;
4804 // The weight to CommonDest should be PredCommon * SuccTotal +
4805 // PredOther * SuccCommon.
4806 // The weight to OtherDest should be PredOther * SuccOther.
4807 uint64_t NewWeights[2] = {PredCommon * (SuccCommon + SuccOther) +
4808 PredOther * SuccCommon,
4809 PredOther * SuccOther};
4810
4811 setFittedBranchWeights(*PBI, NewWeights, /*IsExpected=*/false,
4812 /*ElideAllZero=*/true);
4813 // Cond may be a select instruction with the first operand set to "true", or
4814 // the second to "false" (see how createLogicalOp works for `and` and `or`)
4816 if (auto *SI = dyn_cast<SelectInst>(Cond)) {
4817 assert(isSelectInRoleOfConjunctionOrDisjunction(SI));
4818 // The select is predicated on PBICond
4820 // The corresponding probabilities are what was referred to above as
4821 // PredCommon and PredOther.
4822 setFittedBranchWeights(*SI, {PredCommon, PredOther},
4823 /*IsExpected=*/false, /*ElideAllZero=*/true);
4824 }
4825 }
4826
4827 // OtherDest may have phi nodes. If so, add an entry from PBI's
4828 // block that are identical to the entries for BI's block.
4829 addPredecessorToBlock(OtherDest, PBI->getParent(), BB);
4830
4831 // We know that the CommonDest already had an edge from PBI to
4832 // it. If it has PHIs though, the PHIs may have different
4833 // entries for BB and PBI's BB. If so, insert a select to make
4834 // them agree.
4835 for (PHINode &PN : CommonDest->phis()) {
4836 Value *BIV = PN.getIncomingValueForBlock(BB);
4837 unsigned PBBIdx = PN.getBasicBlockIndex(PBI->getParent());
4838 Value *PBIV = PN.getIncomingValue(PBBIdx);
4839 if (BIV != PBIV) {
4840 // Insert a select in PBI to pick the right value.
4842 Builder.CreateSelect(PBICond, PBIV, BIV, PBIV->getName() + ".mux"));
4843 PN.setIncomingValue(PBBIdx, NV);
4844 // The select has the same condition as PBI, in the same BB. The
4845 // probabilities don't change.
4846 if (HasWeights) {
4847 uint64_t TrueWeight = PBIOp ? PredFalseWeight : PredTrueWeight;
4848 uint64_t FalseWeight = PBIOp ? PredTrueWeight : PredFalseWeight;
4849 setFittedBranchWeights(*NV, {TrueWeight, FalseWeight},
4850 /*IsExpected=*/false, /*ElideAllZero=*/true);
4851 }
4852 }
4853 }
4854
4855 LLVM_DEBUG(dbgs() << "INTO: " << *PBI->getParent());
4856 LLVM_DEBUG(dbgs() << *PBI->getParent()->getParent());
4857
4858 // This basic block is probably dead. We know it has at least
4859 // one fewer predecessor.
4860 return true;
4861}
4862
4863// Simplifies a terminator by replacing it with a branch to TrueBB if Cond is
4864// true or to FalseBB if Cond is false.
4865// Takes care of updating the successors and removing the old terminator.
4866// Also makes sure not to introduce new successors by assuming that edges to
4867// non-successor TrueBBs and FalseBBs aren't reachable.
4868bool SimplifyCFGOpt::simplifyTerminatorOnSelect(Instruction *OldTerm,
4869 Value *Cond, BasicBlock *TrueBB,
4870 BasicBlock *FalseBB,
4871 uint32_t TrueWeight,
4872 uint32_t FalseWeight) {
4873 auto *BB = OldTerm->getParent();
4874 // Remove any superfluous successor edges from the CFG.
4875 // First, figure out which successors to preserve.
4876 // If TrueBB and FalseBB are equal, only try to preserve one copy of that
4877 // successor.
4878 BasicBlock *KeepEdge1 = TrueBB;
4879 BasicBlock *KeepEdge2 = TrueBB != FalseBB ? FalseBB : nullptr;
4880
4881 SmallSetVector<BasicBlock *, 2> RemovedSuccessors;
4882
4883 // Then remove the rest.
4884 for (BasicBlock *Succ : successors(OldTerm)) {
4885 // Make sure only to keep exactly one copy of each edge.
4886 if (Succ == KeepEdge1)
4887 KeepEdge1 = nullptr;
4888 else if (Succ == KeepEdge2)
4889 KeepEdge2 = nullptr;
4890 else {
4891 Succ->removePredecessor(BB,
4892 /*KeepOneInputPHIs=*/true);
4893
4894 if (Succ != TrueBB && Succ != FalseBB)
4895 RemovedSuccessors.insert(Succ);
4896 }
4897 }
4898
4899 IRBuilder<> Builder(OldTerm);
4900 Builder.SetCurrentDebugLocation(OldTerm->getDebugLoc());
4901
4902 // Insert an appropriate new terminator.
4903 if (!KeepEdge1 && !KeepEdge2) {
4904 if (TrueBB == FalseBB) {
4905 // We were only looking for one successor, and it was present.
4906 // Create an unconditional branch to it.
4907 Builder.CreateBr(TrueBB);
4908 } else {
4909 // We found both of the successors we were looking for.
4910 // Create a conditional branch sharing the condition of the select.
4911 CondBrInst *NewBI = Builder.CreateCondBr(Cond, TrueBB, FalseBB);
4912 setBranchWeights(*NewBI, {TrueWeight, FalseWeight},
4913 /*IsExpected=*/false, /*ElideAllZero=*/true);
4914 }
4915 } else if (KeepEdge1 && (KeepEdge2 || TrueBB == FalseBB)) {
4916 // Neither of the selected blocks were successors, so this
4917 // terminator must be unreachable.
4918 new UnreachableInst(OldTerm->getContext(), OldTerm->getIterator());
4919 } else {
4920 // One of the selected values was a successor, but the other wasn't.
4921 // Insert an unconditional branch to the one that was found;
4922 // the edge to the one that wasn't must be unreachable.
4923 if (!KeepEdge1) {
4924 // Only TrueBB was found.
4925 Builder.CreateBr(TrueBB);
4926 } else {
4927 // Only FalseBB was found.
4928 Builder.CreateBr(FalseBB);
4929 }
4930 }
4931
4933
4934 if (DTU) {
4935 SmallVector<DominatorTree::UpdateType, 2> Updates;
4936 Updates.reserve(RemovedSuccessors.size());
4937 for (auto *RemovedSuccessor : RemovedSuccessors)
4938 Updates.push_back({DominatorTree::Delete, BB, RemovedSuccessor});
4939 DTU->applyUpdates(Updates);
4940 }
4941
4942 return true;
4943}
4944
4945// Replaces
4946// (switch (select cond, X, Y)) on constant X, Y
4947// with a branch - conditional if X and Y lead to distinct BBs,
4948// unconditional otherwise.
4949bool SimplifyCFGOpt::simplifySwitchOnSelect(SwitchInst *SI,
4950 SelectInst *Select) {
4951 // Check for constant integer values in the select.
4952 ConstantInt *TrueVal = dyn_cast<ConstantInt>(Select->getTrueValue());
4953 ConstantInt *FalseVal = dyn_cast<ConstantInt>(Select->getFalseValue());
4954 if (!TrueVal || !FalseVal)
4955 return false;
4956
4957 // Find the relevant condition and destinations.
4958 Value *Condition = Select->getCondition();
4959 BasicBlock *TrueBB = SI->findCaseValue(TrueVal)->getCaseSuccessor();
4960 BasicBlock *FalseBB = SI->findCaseValue(FalseVal)->getCaseSuccessor();
4961
4962 // Get weight for TrueBB and FalseBB.
4963 uint32_t TrueWeight = 0, FalseWeight = 0;
4964 SmallVector<uint64_t, 8> Weights;
4965 bool HasWeights = hasBranchWeightMD(*SI);
4966 if (HasWeights) {
4967 getBranchWeights(SI, Weights);
4968 if (Weights.size() == 1 + SI->getNumCases()) {
4969 TrueWeight =
4970 (uint32_t)Weights[SI->findCaseValue(TrueVal)->getSuccessorIndex()];
4971 FalseWeight =
4972 (uint32_t)Weights[SI->findCaseValue(FalseVal)->getSuccessorIndex()];
4973 }
4974 }
4975
4976 // Perform the actual simplification.
4977 return simplifyTerminatorOnSelect(SI, Condition, TrueBB, FalseBB, TrueWeight,
4978 FalseWeight);
4979}
4980
4981// Replaces
4982// (indirectbr (select cond, blockaddress(@fn, BlockA),
4983// blockaddress(@fn, BlockB)))
4984// with
4985// (br cond, BlockA, BlockB).
4986bool SimplifyCFGOpt::simplifyIndirectBrOnSelect(IndirectBrInst *IBI,
4987 SelectInst *SI) {
4988 // Check that both operands of the select are block addresses.
4989 BlockAddress *TBA = dyn_cast<BlockAddress>(SI->getTrueValue());
4990 BlockAddress *FBA = dyn_cast<BlockAddress>(SI->getFalseValue());
4991 if (!TBA || !FBA)
4992 return false;
4993
4994 // Extract the actual blocks.
4995 BasicBlock *TrueBB = TBA->getBasicBlock();
4996 BasicBlock *FalseBB = FBA->getBasicBlock();
4997
4998 // The select's profile becomes the profile of the conditional branch that
4999 // replaces the indirect branch.
5000 SmallVector<uint32_t> SelectBranchWeights(2);
5002 extractBranchWeights(*SI, SelectBranchWeights);
5003 // Perform the actual simplification.
5004 return simplifyTerminatorOnSelect(IBI, SI->getCondition(), TrueBB, FalseBB,
5005 SelectBranchWeights[0],
5006 SelectBranchWeights[1]);
5007}
5008
5009/// This is called when we find an icmp instruction
5010/// (a seteq/setne with a constant) as the only instruction in a
5011/// block that ends with an uncond branch. We are looking for a very specific
5012/// pattern that occurs when "A == 1 || A == 2 || A == 3" gets simplified. In
5013/// this case, we merge the first two "or's of icmp" into a switch, but then the
5014/// default value goes to an uncond block with a seteq in it, we get something
5015/// like:
5016///
5017/// switch i8 %A, label %DEFAULT [ i8 1, label %end i8 2, label %end ]
5018/// DEFAULT:
5019/// %tmp = icmp eq i8 %A, 92
5020/// br label %end
5021/// end:
5022/// ... = phi i1 [ true, %entry ], [ %tmp, %DEFAULT ], [ true, %entry ]
5023///
5024/// We prefer to split the edge to 'end' so that there is a true/false entry to
5025/// the PHI, merging the third icmp into the switch.
5026bool SimplifyCFGOpt::tryToSimplifyUncondBranchWithICmpInIt(
5027 ICmpInst *ICI, IRBuilder<> &Builder) {
5028 // Select == nullptr means we assume that there is a hidden no-op select
5029 // instruction of `_ = select %icmp, true, false` after `%icmp = icmp ...`
5030 return tryToSimplifyUncondBranchWithICmpSelectInIt(ICI, nullptr, Builder);
5031}
5032
5033/// Similar to tryToSimplifyUncondBranchWithICmpInIt, but handle a more generic
5034/// case. This is called when we find an icmp instruction (a seteq/setne with a
5035/// constant) and its following select instruction as the only TWO instructions
5036/// in a block that ends with an uncond branch. We are looking for a very
5037/// specific pattern that occurs when "
5038/// if (A == 1) return C1;
5039/// if (A == 2) return C2;
5040/// if (A < 3) return C3;
5041/// return C4;
5042/// " gets simplified. In this case, we merge the first two "branches of icmp"
5043/// into a switch, but then the default value goes to an uncond block with a lt
5044/// icmp and select in it, as InstCombine can not simplify "A < 3" as "A == 2".
5045/// After SimplifyCFG and other subsequent optimizations (e.g., SCCP), we might
5046/// get something like:
5047///
5048/// case1:
5049/// switch i8 %A, label %DEFAULT [ i8 0, label %end i8 1, label %case2 ]
5050/// case2:
5051/// br label %end
5052/// DEFAULT:
5053/// %tmp = icmp eq i8 %A, 2
5054/// %val = select i1 %tmp, i8 C3, i8 C4
5055/// br label %end
5056/// end:
5057/// _ = phi i8 [ C1, %case1 ], [ C2, %case2 ], [ %val, %DEFAULT ]
5058///
5059/// We prefer to split the edge to 'end' so that there are TWO entries of V3/V4
5060/// to the PHI, merging the icmp & select into the switch, as follows:
5061///
5062/// case1:
5063/// switch i8 %A, label %DEFAULT [
5064/// i8 0, label %end
5065/// i8 1, label %case2
5066/// i8 2, label %case3
5067/// ]
5068/// case2:
5069/// br label %end
5070/// case3:
5071/// br label %end
5072/// DEFAULT:
5073/// br label %end
5074/// end:
5075/// _ = phi i8 [ C1, %case1 ], [ C2, %case2 ], [ C3, %case2 ], [ C4, %DEFAULT]
5076bool SimplifyCFGOpt::tryToSimplifyUncondBranchWithICmpSelectInIt(
5077 ICmpInst *ICI, SelectInst *Select, IRBuilder<> &Builder) {
5078 BasicBlock *BB = ICI->getParent();
5079
5080 // If the block has any PHIs in it or the icmp/select has multiple uses, it is
5081 // too complex.
5082 /// TODO: support multi-phis in succ BB of select's BB.
5083 if (isa<PHINode>(BB->begin()) || !ICI->hasOneUse() ||
5084 (Select && !Select->hasOneUse()))
5085 return false;
5086
5087 // The pattern we're looking for is where our only predecessor is a switch on
5088 // 'V' and this block is the default case for the switch. In this case we can
5089 // fold the compared value into the switch to simplify things.
5090 BasicBlock *Pred = BB->getSinglePredecessor();
5091 if (!Pred || !isa<SwitchInst>(Pred->getTerminator()))
5092 return false;
5093
5094 Value *IcmpCond;
5095 ConstantInt *NewCaseVal;
5096 CmpPredicate Predicate;
5097
5098 // Match icmp X, C
5099 if (!match(ICI,
5100 m_ICmp(Predicate, m_Value(IcmpCond), m_ConstantInt(NewCaseVal))))
5101 return false;
5102
5103 Value *SelectCond, *SelectTrueVal, *SelectFalseVal;
5105 if (!Select) {
5106 // If Select == nullptr, we can assume that there is a hidden no-op select
5107 // just after icmp
5108 SelectCond = ICI;
5109 SelectTrueVal = Builder.getTrue();
5110 SelectFalseVal = Builder.getFalse();
5111 User = ICI->user_back();
5112 } else {
5113 SelectCond = Select->getCondition();
5114 // Check if the select condition is the same as the icmp condition.
5115 if (SelectCond != ICI)
5116 return false;
5117 SelectTrueVal = Select->getTrueValue();
5118 SelectFalseVal = Select->getFalseValue();
5119 User = Select->user_back();
5120 }
5121
5122 SwitchInst *SI = cast<SwitchInst>(Pred->getTerminator());
5123 if (SI->getCondition() != IcmpCond)
5124 return false;
5125
5126 // If BB is reachable on a non-default case, then we simply know the value of
5127 // V in this block. Substitute it and constant fold the icmp instruction
5128 // away.
5129 if (SI->getDefaultDest() != BB) {
5130 ConstantInt *VVal = SI->findCaseDest(BB);
5131 assert(VVal && "Should have a unique destination value");
5132 ICI->setOperand(0, VVal);
5133
5134 if (Value *V = simplifyInstruction(ICI, {DL, ICI})) {
5135 ICI->replaceAllUsesWith(V);
5136 ICI->eraseFromParent();
5137 }
5138 // BB is now empty, so it is likely to simplify away.
5139 return requestResimplify();
5140 }
5141
5142 // Ok, the block is reachable from the default dest. If the constant we're
5143 // comparing exists in one of the other edges, then we can constant fold ICI
5144 // and zap it.
5145 if (SI->findCaseValue(NewCaseVal) != SI->case_default()) {
5146 Value *V;
5147 if (Predicate == ICmpInst::ICMP_EQ)
5149 else
5151
5152 ICI->replaceAllUsesWith(V);
5153 ICI->eraseFromParent();
5154 // BB is now empty, so it is likely to simplify away.
5155 return requestResimplify();
5156 }
5157
5158 // The use of the select has to be in the 'end' block, by the only PHI node in
5159 // the block.
5160 BasicBlock *SuccBlock = BB->getTerminator()->getSuccessor(0);
5161 PHINode *PHIUse = dyn_cast<PHINode>(User);
5162 if (PHIUse == nullptr || PHIUse != &SuccBlock->front() ||
5164 return false;
5165
5166 // If the icmp is a SETEQ, then the default dest gets SelectFalseVal, the new
5167 // edge gets SelectTrueVal in the PHI.
5168 Value *DefaultCst = SelectFalseVal;
5169 Value *NewCst = SelectTrueVal;
5170
5171 if (ICI->getPredicate() == ICmpInst::ICMP_NE)
5172 std::swap(DefaultCst, NewCst);
5173
5174 // Replace Select (which is used by the PHI for the default value) with
5175 // SelectFalseVal or SelectTrueVal depending on if ICI is EQ or NE.
5176 if (Select) {
5177 Select->replaceAllUsesWith(DefaultCst);
5178 Select->eraseFromParent();
5179 } else {
5180 ICI->replaceAllUsesWith(DefaultCst);
5181 }
5182 ICI->eraseFromParent();
5183
5184 SmallVector<DominatorTree::UpdateType, 2> Updates;
5185
5186 // Okay, the switch goes to this block on a default value. Add an edge from
5187 // the switch to the merge point on the compared value.
5188 BasicBlock *NewBB =
5189 BasicBlock::Create(BB->getContext(), "switch.edge", BB->getParent(), BB);
5190 {
5191 SwitchInstProfUpdateWrapper SIW(*SI);
5192 auto W0 = SIW.getSuccessorWeight(0);
5194 if (W0) {
5195 NewW = ((uint64_t(*W0) + 1) >> 1);
5196 SIW.setSuccessorWeight(0, *NewW);
5197 }
5198 SIW.addCase(NewCaseVal, NewBB, NewW);
5199 if (DTU)
5200 Updates.push_back({DominatorTree::Insert, Pred, NewBB});
5201 }
5202
5203 // NewBB branches to the phi block, add the uncond branch and the phi entry.
5204 Builder.SetInsertPoint(NewBB);
5205 Builder.SetCurrentDebugLocation(SI->getDebugLoc());
5206 Builder.CreateBr(SuccBlock);
5207 PHIUse->addIncoming(NewCst, NewBB);
5208 if (DTU) {
5209 Updates.push_back({DominatorTree::Insert, NewBB, SuccBlock});
5210 DTU->applyUpdates(Updates);
5211 }
5212 return true;
5213}
5214
5215/// Check to see if it is branching on an or/and chain of icmp instructions, and
5216/// fold it into a switch instruction if so.
5217bool SimplifyCFGOpt::simplifyBranchOnICmpChain(CondBrInst *BI,
5218 IRBuilder<> &Builder,
5219 const DataLayout &DL) {
5221 if (!Cond)
5222 return false;
5223
5224 // Change br (X == 0 | X == 1), T, F into a switch instruction.
5225 // If this is a bunch of seteq's or'd together, or if it's a bunch of
5226 // 'setne's and'ed together, collect them.
5227
5228 // Try to gather values from a chain of and/or to be turned into a switch
5229 ConstantComparesGatherer ConstantCompare(Cond, DL);
5230 // Unpack the result
5231 SmallVectorImpl<ConstantInt *> &Values = ConstantCompare.Vals;
5232 Value *CompVal = ConstantCompare.CompValue;
5233 unsigned UsedICmps = ConstantCompare.UsedICmps;
5234 Value *ExtraCase = ConstantCompare.Extra;
5235 bool TrueWhenEqual = ConstantCompare.IsEq;
5236
5237 // If we didn't have a multiply compared value, fail.
5238 if (!CompVal)
5239 return false;
5240
5241 // Avoid turning single icmps into a switch.
5242 if (UsedICmps <= 1)
5243 return false;
5244
5245 // There might be duplicate constants in the list, which the switch
5246 // instruction can't handle, remove them now.
5247 array_pod_sort(Values.begin(), Values.end(), constantIntSortPredicate);
5248 Values.erase(llvm::unique(Values), Values.end());
5249
5250 // If Extra was used, we require at least two switch values to do the
5251 // transformation. A switch with one value is just a conditional branch.
5252 if (ExtraCase && Values.size() < 2)
5253 return false;
5254
5255 SmallVector<uint32_t> BranchWeights;
5256 const bool HasProfile = !ProfcheckDisableMetadataFixes &&
5257 extractBranchWeights(*BI, BranchWeights);
5258
5259 // Figure out which block is which destination.
5260 BasicBlock *DefaultBB = BI->getSuccessor(1);
5261 BasicBlock *EdgeBB = BI->getSuccessor(0);
5262 if (!TrueWhenEqual) {
5263 std::swap(DefaultBB, EdgeBB);
5264 if (HasProfile)
5265 std::swap(BranchWeights[0], BranchWeights[1]);
5266 }
5267
5268 BasicBlock *BB = BI->getParent();
5269
5270 LLVM_DEBUG(dbgs() << "Converting 'icmp' chain with " << Values.size()
5271 << " cases into SWITCH. BB is:\n"
5272 << *BB);
5273
5274 SmallVector<DominatorTree::UpdateType, 2> Updates;
5275
5276 // If there are any extra values that couldn't be folded into the switch
5277 // then we evaluate them with an explicit branch first. Split the block
5278 // right before the condbr to handle it.
5279 if (ExtraCase) {
5280 BasicBlock *NewBB = SplitBlock(BB, BI, DTU, /*LI=*/nullptr,
5281 /*MSSAU=*/nullptr, "switch.early.test");
5282
5283 // Remove the uncond branch added to the old block.
5284 Instruction *OldTI = BB->getTerminator();
5285 Builder.SetInsertPoint(OldTI);
5286
5287 // There can be an unintended UB if extra values are Poison. Before the
5288 // transformation, extra values may not be evaluated according to the
5289 // condition, and it will not raise UB. But after transformation, we are
5290 // evaluating extra values before checking the condition, and it will raise
5291 // UB. It can be solved by adding freeze instruction to extra values.
5292 AssumptionCache *AC = Options.AC;
5293
5294 if (!isGuaranteedNotToBeUndefOrPoison(ExtraCase, AC, BI, nullptr))
5295 ExtraCase = Builder.CreateFreeze(ExtraCase);
5296
5297 // We don't have any info about this condition.
5298 auto *Br = TrueWhenEqual ? Builder.CreateCondBr(ExtraCase, EdgeBB, NewBB)
5299 : Builder.CreateCondBr(ExtraCase, NewBB, EdgeBB);
5301
5302 OldTI->eraseFromParent();
5303
5304 if (DTU)
5305 Updates.push_back({DominatorTree::Insert, BB, EdgeBB});
5306
5307 // If there are PHI nodes in EdgeBB, then we need to add a new entry to them
5308 // for the edge we just added.
5309 addPredecessorToBlock(EdgeBB, BB, NewBB);
5310
5311 LLVM_DEBUG(dbgs() << " ** 'icmp' chain unhandled condition: " << *ExtraCase
5312 << "\nEXTRABB = " << *BB);
5313 BB = NewBB;
5314 }
5315
5316 Builder.SetInsertPoint(BI);
5317 // Convert pointer to int before we switch.
5318 if (CompVal->getType()->isPointerTy()) {
5319 assert(!DL.hasUnstableRepresentation(CompVal->getType()) &&
5320 "Should not end up here with unstable pointers");
5321 CompVal = Builder.CreatePtrToInt(
5322 CompVal, DL.getIntPtrType(CompVal->getType()), "magicptr");
5323 }
5324
5325 // Check if we can represent the values as a contiguous range. If so, we use a
5326 // range check + conditional branch instead of a switch.
5327 if (Values.front()->getValue() - Values.back()->getValue() ==
5328 Values.size() - 1) {
5329 ConstantRange RangeToCheck = ConstantRange::getNonEmpty(
5330 Values.back()->getValue(), Values.front()->getValue() + 1);
5331 APInt Offset, RHS;
5332 ICmpInst::Predicate Pred;
5333 RangeToCheck.getEquivalentICmp(Pred, RHS, Offset);
5334 Value *X = CompVal;
5335 if (!Offset.isZero())
5336 X = Builder.CreateAdd(X, ConstantInt::get(CompVal->getType(), Offset));
5337 Value *Cond =
5338 Builder.CreateICmp(Pred, X, ConstantInt::get(CompVal->getType(), RHS));
5339 CondBrInst *NewBI = Builder.CreateCondBr(Cond, EdgeBB, DefaultBB);
5340 if (HasProfile)
5341 setBranchWeights(*NewBI, BranchWeights, /*IsExpected=*/false);
5342 // We don't need to update PHI nodes since we don't add any new edges.
5343 } else {
5344 // Create the new switch instruction now.
5345 SwitchInst *New = Builder.CreateSwitch(CompVal, DefaultBB, Values.size());
5346 if (HasProfile) {
5347 // We know the weight of the default case. We don't know the weight of the
5348 // other cases, but rather than completely lose profiling info, we split
5349 // the remaining probability equally over them.
5350 SmallVector<uint32_t> NewWeights(Values.size() + 1);
5351 NewWeights[0] = BranchWeights[1]; // this is the default, and we swapped
5352 // if TrueWhenEqual.
5353 for (auto &V : drop_begin(NewWeights))
5354 V = BranchWeights[0] / Values.size();
5355 setBranchWeights(*New, NewWeights, /*IsExpected=*/false);
5356 }
5357
5358 // Add all of the 'cases' to the switch instruction.
5359 for (ConstantInt *Val : Values)
5360 New->addCase(Val, EdgeBB);
5361
5362 // We added edges from PI to the EdgeBB. As such, if there were any
5363 // PHI nodes in EdgeBB, they need entries to be added corresponding to
5364 // the number of edges added.
5365 for (BasicBlock::iterator BBI = EdgeBB->begin(); isa<PHINode>(BBI); ++BBI) {
5366 PHINode *PN = cast<PHINode>(BBI);
5367 Value *InVal = PN->getIncomingValueForBlock(BB);
5368 for (unsigned i = 0, e = Values.size() - 1; i != e; ++i)
5369 PN->addIncoming(InVal, BB);
5370 }
5371 }
5372
5373 // Erase the old branch instruction.
5375 if (DTU)
5376 DTU->applyUpdates(Updates);
5377
5378 LLVM_DEBUG(dbgs() << " ** 'icmp' chain result is:\n" << *BB << '\n');
5379 return true;
5380}
5381
5382bool SimplifyCFGOpt::simplifyResume(ResumeInst *RI, IRBuilder<> &Builder) {
5383 if (isa<PHINode>(RI->getValue()))
5384 return simplifyCommonResume(RI);
5385 else if (isa<LandingPadInst>(RI->getParent()->getFirstNonPHIIt()) &&
5386 RI->getValue() == &*RI->getParent()->getFirstNonPHIIt())
5387 // The resume must unwind the exception that caused control to branch here.
5388 return simplifySingleResume(RI);
5389
5390 return false;
5391}
5392
5393// Check if cleanup block is empty
5395 for (Instruction &I : R) {
5396 auto *II = dyn_cast<IntrinsicInst>(&I);
5397 if (!II)
5398 return false;
5399
5400 Intrinsic::ID IntrinsicID = II->getIntrinsicID();
5401 switch (IntrinsicID) {
5402 case Intrinsic::dbg_declare:
5403 case Intrinsic::dbg_value:
5404 case Intrinsic::dbg_label:
5405 case Intrinsic::lifetime_end:
5406 break;
5407 default:
5408 return false;
5409 }
5410 }
5411 return true;
5412}
5413
5414// Simplify resume that is shared by several landing pads (phi of landing pad).
5415bool SimplifyCFGOpt::simplifyCommonResume(ResumeInst *RI) {
5416 BasicBlock *BB = RI->getParent();
5417
5418 // Check that there are no other instructions except for debug and lifetime
5419 // intrinsics between the phi's and resume instruction.
5420 if (!isCleanupBlockEmpty(make_range(RI->getParent()->getFirstNonPHIIt(),
5421 BB->getTerminator()->getIterator())))
5422 return false;
5423
5424 SmallSetVector<BasicBlock *, 4> TrivialUnwindBlocks;
5425 auto *PhiLPInst = cast<PHINode>(RI->getValue());
5426
5427 // Check incoming blocks to see if any of them are trivial.
5428 for (unsigned Idx = 0, End = PhiLPInst->getNumIncomingValues(); Idx != End;
5429 Idx++) {
5430 auto *IncomingBB = PhiLPInst->getIncomingBlock(Idx);
5431 auto *IncomingValue = PhiLPInst->getIncomingValue(Idx);
5432
5433 // If the block has other successors, we can not delete it because
5434 // it has other dependents.
5435 if (IncomingBB->getUniqueSuccessor() != BB)
5436 continue;
5437
5438 auto *LandingPad = dyn_cast<LandingPadInst>(IncomingBB->getFirstNonPHIIt());
5439 // Not the landing pad that caused the control to branch here.
5440 if (IncomingValue != LandingPad)
5441 continue;
5442
5444 make_range(LandingPad->getNextNode(), IncomingBB->getTerminator())))
5445 TrivialUnwindBlocks.insert(IncomingBB);
5446 }
5447
5448 // If no trivial unwind blocks, don't do any simplifications.
5449 if (TrivialUnwindBlocks.empty())
5450 return false;
5451
5452 // Turn all invokes that unwind here into calls.
5453 for (auto *TrivialBB : TrivialUnwindBlocks) {
5454 // Blocks that will be simplified should be removed from the phi node.
5455 // Note there could be multiple edges to the resume block, and we need
5456 // to remove them all.
5457 while (PhiLPInst->getBasicBlockIndex(TrivialBB) != -1)
5458 BB->removePredecessor(TrivialBB, true);
5459
5460 for (BasicBlock *Pred :
5462 removeUnwindEdge(Pred, DTU);
5463 ++NumInvokes;
5464 }
5465
5466 // In each SimplifyCFG run, only the current processed block can be erased.
5467 // Otherwise, it will break the iteration of SimplifyCFG pass. So instead
5468 // of erasing TrivialBB, we only remove the branch to the common resume
5469 // block so that we can later erase the resume block since it has no
5470 // predecessors.
5471 TrivialBB->getTerminator()->eraseFromParent();
5472 new UnreachableInst(RI->getContext(), TrivialBB);
5473 if (DTU)
5474 DTU->applyUpdates({{DominatorTree::Delete, TrivialBB, BB}});
5475 }
5476
5477 // Delete the resume block if all its predecessors have been removed.
5478 if (pred_empty(BB))
5479 DeleteDeadBlock(BB, DTU);
5480
5481 return !TrivialUnwindBlocks.empty();
5482}
5483
5484// Simplify resume that is only used by a single (non-phi) landing pad.
5485bool SimplifyCFGOpt::simplifySingleResume(ResumeInst *RI) {
5486 BasicBlock *BB = RI->getParent();
5487 auto *LPInst = cast<LandingPadInst>(BB->getFirstNonPHIIt());
5488 assert(RI->getValue() == LPInst &&
5489 "Resume must unwind the exception that caused control to here");
5490
5491 // Check that there are no other instructions except for debug intrinsics.
5493 make_range<Instruction *>(LPInst->getNextNode(), RI)))
5494 return false;
5495
5496 // Turn all invokes that unwind here into calls and delete the basic block.
5497 for (BasicBlock *Pred : llvm::make_early_inc_range(predecessors(BB))) {
5498 removeUnwindEdge(Pred, DTU);
5499 ++NumInvokes;
5500 }
5501
5502 // The landingpad is now unreachable. Zap it.
5503 DeleteDeadBlock(BB, DTU);
5504 return true;
5505}
5506
5508 // If this is a trivial cleanup pad that executes no instructions, it can be
5509 // eliminated. If the cleanup pad continues to the caller, any predecessor
5510 // that is an EH pad will be updated to continue to the caller and any
5511 // predecessor that terminates with an invoke instruction will have its invoke
5512 // instruction converted to a call instruction. If the cleanup pad being
5513 // simplified does not continue to the caller, each predecessor will be
5514 // updated to continue to the unwind destination of the cleanup pad being
5515 // simplified.
5516 BasicBlock *BB = RI->getParent();
5517 CleanupPadInst *CPInst = RI->getCleanupPad();
5518 if (CPInst->getParent() != BB)
5519 // This isn't an empty cleanup.
5520 return false;
5521
5522 // We cannot kill the pad if it has multiple uses. This typically arises
5523 // from unreachable basic blocks.
5524 if (!CPInst->hasOneUse())
5525 return false;
5526
5527 // Check that there are no other instructions except for benign intrinsics.
5529 make_range<Instruction *>(CPInst->getNextNode(), RI)))
5530 return false;
5531
5532 // If the cleanup return we are simplifying unwinds to the caller, this will
5533 // set UnwindDest to nullptr.
5534 BasicBlock *UnwindDest = RI->getUnwindDest();
5535
5536 // We're about to remove BB from the control flow. Before we do, sink any
5537 // PHINodes into the unwind destination. Doing this before changing the
5538 // control flow avoids some potentially slow checks, since we can currently
5539 // be certain that UnwindDest and BB have no common predecessors (since they
5540 // are both EH pads).
5541 if (UnwindDest) {
5542 // First, go through the PHI nodes in UnwindDest and update any nodes that
5543 // reference the block we are removing
5544 for (PHINode &DestPN : UnwindDest->phis()) {
5545 int Idx = DestPN.getBasicBlockIndex(BB);
5546 // Since BB unwinds to UnwindDest, it has to be in the PHI node.
5547 assert(Idx != -1);
5548 // This PHI node has an incoming value that corresponds to a control
5549 // path through the cleanup pad we are removing. If the incoming
5550 // value is in the cleanup pad, it must be a PHINode (because we
5551 // verified above that the block is otherwise empty). Otherwise, the
5552 // value is either a constant or a value that dominates the cleanup
5553 // pad being removed.
5554 //
5555 // Because BB and UnwindDest are both EH pads, all of their
5556 // predecessors must unwind to these blocks, and since no instruction
5557 // can have multiple unwind destinations, there will be no overlap in
5558 // incoming blocks between SrcPN and DestPN.
5559 Value *SrcVal = DestPN.getIncomingValue(Idx);
5560 PHINode *SrcPN = dyn_cast<PHINode>(SrcVal);
5561
5562 bool NeedPHITranslation = SrcPN && SrcPN->getParent() == BB;
5563 for (auto *Pred : predecessors(BB)) {
5564 Value *Incoming =
5565 NeedPHITranslation ? SrcPN->getIncomingValueForBlock(Pred) : SrcVal;
5566 DestPN.addIncoming(Incoming, Pred);
5567 }
5568 }
5569
5570 // Sink any remaining PHI nodes directly into UnwindDest.
5571 BasicBlock::iterator InsertPt = UnwindDest->getFirstNonPHIIt();
5572 for (PHINode &PN : make_early_inc_range(BB->phis())) {
5573 if (PN.use_empty() || !PN.isUsedOutsideOfBlock(BB))
5574 // If the PHI node has no uses or all of its uses are in this basic
5575 // block (meaning they are debug or lifetime intrinsics), just leave
5576 // it. It will be erased when we erase BB below.
5577 continue;
5578
5579 // Otherwise, sink this PHI node into UnwindDest.
5580 // Any predecessors to UnwindDest which are not already represented
5581 // must be back edges which inherit the value from the path through
5582 // BB. In this case, the PHI value must reference itself.
5583 for (auto *pred : predecessors(UnwindDest))
5584 if (pred != BB)
5585 PN.addIncoming(&PN, pred);
5586 PN.moveBefore(InsertPt);
5587 // Also, add a dummy incoming value for the original BB itself,
5588 // so that the PHI is well-formed until we drop said predecessor.
5589 PN.addIncoming(PoisonValue::get(PN.getType()), BB);
5590 }
5591 }
5592
5593 std::vector<DominatorTree::UpdateType> Updates;
5594
5595 // We use make_early_inc_range here because we will remove all predecessors.
5597 if (UnwindDest == nullptr) {
5598 if (DTU) {
5599 DTU->applyUpdates(Updates);
5600 Updates.clear();
5601 }
5602 removeUnwindEdge(PredBB, DTU);
5603 ++NumInvokes;
5604 } else {
5605 BB->removePredecessor(PredBB);
5606 Instruction *TI = PredBB->getTerminator();
5607 TI->replaceUsesOfWith(BB, UnwindDest);
5608 if (DTU) {
5609 Updates.push_back({DominatorTree::Insert, PredBB, UnwindDest});
5610 Updates.push_back({DominatorTree::Delete, PredBB, BB});
5611 }
5612 }
5613 }
5614
5615 if (DTU)
5616 DTU->applyUpdates(Updates);
5617
5618 DeleteDeadBlock(BB, DTU);
5619
5620 return true;
5621}
5622
5623// Try to merge two cleanuppads together.
5625 // Skip any cleanuprets which unwind to caller, there is nothing to merge
5626 // with.
5627 BasicBlock *UnwindDest = RI->getUnwindDest();
5628 if (!UnwindDest)
5629 return false;
5630
5631 // This cleanupret isn't the only predecessor of this cleanuppad, it wouldn't
5632 // be safe to merge without code duplication.
5633 if (UnwindDest->getSinglePredecessor() != RI->getParent())
5634 return false;
5635
5636 // Verify that our cleanuppad's unwind destination is another cleanuppad.
5637 auto *SuccessorCleanupPad = dyn_cast<CleanupPadInst>(&UnwindDest->front());
5638 if (!SuccessorCleanupPad)
5639 return false;
5640
5641 CleanupPadInst *PredecessorCleanupPad = RI->getCleanupPad();
5642 // Replace any uses of the successor cleanupad with the predecessor pad
5643 // The only cleanuppad uses should be this cleanupret, it's cleanupret and
5644 // funclet bundle operands.
5645 SuccessorCleanupPad->replaceAllUsesWith(PredecessorCleanupPad);
5646 // Remove the old cleanuppad.
5647 SuccessorCleanupPad->eraseFromParent();
5648 // Now, we simply replace the cleanupret with a branch to the unwind
5649 // destination.
5650 UncondBrInst::Create(UnwindDest, RI->getParent());
5651 RI->eraseFromParent();
5652
5653 return true;
5654}
5655
5656bool SimplifyCFGOpt::simplifyCleanupReturn(CleanupReturnInst *RI) {
5657 // It is possible to transiantly have an undef cleanuppad operand because we
5658 // have deleted some, but not all, dead blocks.
5659 // Eventually, this block will be deleted.
5660 if (isa<UndefValue>(RI->getOperand(0)))
5661 return false;
5662
5663 if (mergeCleanupPad(RI))
5664 return true;
5665
5666 if (removeEmptyCleanup(RI, DTU))
5667 return true;
5668
5669 return false;
5670}
5671
5672// WARNING: keep in sync with InstCombinerImpl::visitUnreachableInst()!
5673bool SimplifyCFGOpt::simplifyUnreachable(UnreachableInst *UI) {
5674 BasicBlock *BB = UI->getParent();
5675
5676 bool Changed = false;
5677
5678 // Ensure that any debug-info records that used to occur after the Unreachable
5679 // are moved to in front of it -- otherwise they'll "dangle" at the end of
5680 // the block.
5682
5683 // Debug-info records on the unreachable inst itself should be deleted, as
5684 // below we delete everything past the final executable instruction.
5685 UI->dropDbgRecords();
5686
5687 // If there are any instructions immediately before the unreachable that can
5688 // be removed, do so.
5689 while (UI->getIterator() != BB->begin()) {
5691 --BBI;
5692
5694 break; // Can not drop any more instructions. We're done here.
5695 // Otherwise, this instruction can be freely erased,
5696 // even if it is not side-effect free.
5697
5698 // Note that deleting EH's here is in fact okay, although it involves a bit
5699 // of subtle reasoning. If this inst is an EH, all the predecessors of this
5700 // block will be the unwind edges of Invoke/CatchSwitch/CleanupReturn,
5701 // and we can therefore guarantee this block will be erased.
5702
5703 // If we're deleting this, we're deleting any subsequent debug info, so
5704 // delete DbgRecords.
5705 BBI->dropDbgRecords();
5706
5707 // Delete this instruction (any uses are guaranteed to be dead)
5708 BBI->replaceAllUsesWith(PoisonValue::get(BBI->getType()));
5709 BBI->eraseFromParent();
5710 Changed = true;
5711 }
5712
5713 // If the unreachable instruction is the first in the block, take a gander
5714 // at all of the predecessors of this instruction, and simplify them.
5715 if (&BB->front() != UI)
5716 return Changed;
5717
5718 std::vector<DominatorTree::UpdateType> Updates;
5719
5720 SmallSetVector<BasicBlock *, 8> Preds(pred_begin(BB), pred_end(BB));
5721 for (BasicBlock *Predecessor : Preds) {
5722 Instruction *TI = Predecessor->getTerminator();
5723 IRBuilder<> Builder(TI);
5724 if (isa<UncondBrInst>(TI)) {
5725 new UnreachableInst(TI->getContext(), TI->getIterator());
5726 TI->eraseFromParent();
5727 Changed = true;
5728 if (DTU)
5729 Updates.push_back({DominatorTree::Delete, Predecessor, BB});
5730 } else if (auto *BI = dyn_cast<CondBrInst>(TI)) {
5731 // We could either have a proper unconditional branch,
5732 // or a degenerate conditional branch with matching destinations.
5733 if (BI->getSuccessor(0) == BI->getSuccessor(1)) {
5734 new UnreachableInst(TI->getContext(), TI->getIterator());
5735 TI->eraseFromParent();
5736 Changed = true;
5737 } else {
5738 Value* Cond = BI->getCondition();
5739 assert(BI->getSuccessor(0) != BI->getSuccessor(1) &&
5740 "The destinations are guaranteed to be different here.");
5741 CallInst *Assumption;
5742 if (BI->getSuccessor(0) == BB) {
5743 Assumption = Builder.CreateAssumption(Builder.CreateNot(Cond));
5744 Builder.CreateBr(BI->getSuccessor(1));
5745 } else {
5746 assert(BI->getSuccessor(1) == BB && "Incorrect CFG");
5747 Assumption = Builder.CreateAssumption(Cond);
5748 Builder.CreateBr(BI->getSuccessor(0));
5749 }
5750 if (Options.AC)
5751 Options.AC->registerAssumption(cast<AssumeInst>(Assumption));
5752
5754 Changed = true;
5755 }
5756 if (DTU)
5757 Updates.push_back({DominatorTree::Delete, Predecessor, BB});
5758 } else if (auto *SI = dyn_cast<SwitchInst>(TI)) {
5759 SwitchInstProfUpdateWrapper SU(*SI);
5760 for (auto i = SU->case_begin(), e = SU->case_end(); i != e;) {
5761 if (i->getCaseSuccessor() != BB) {
5762 ++i;
5763 continue;
5764 }
5765 BB->removePredecessor(SU->getParent());
5766 i = SU.removeCase(i);
5767 e = SU->case_end();
5768 Changed = true;
5769 }
5770 // Note that the default destination can't be removed!
5771 if (DTU && SI->getDefaultDest() != BB)
5772 Updates.push_back({DominatorTree::Delete, Predecessor, BB});
5773 } else if (auto *II = dyn_cast<InvokeInst>(TI)) {
5774 if (II->getUnwindDest() == BB) {
5775 if (DTU) {
5776 DTU->applyUpdates(Updates);
5777 Updates.clear();
5778 }
5779 auto *CI = cast<CallInst>(removeUnwindEdge(TI->getParent(), DTU));
5780 if (!CI->doesNotThrow())
5781 CI->setDoesNotThrow();
5782 Changed = true;
5783 }
5784 } else if (auto *CSI = dyn_cast<CatchSwitchInst>(TI)) {
5785 if (CSI->getUnwindDest() == BB) {
5786 if (DTU) {
5787 DTU->applyUpdates(Updates);
5788 Updates.clear();
5789 }
5790 removeUnwindEdge(TI->getParent(), DTU);
5791 Changed = true;
5792 continue;
5793 }
5794
5795 for (CatchSwitchInst::handler_iterator I = CSI->handler_begin(),
5796 E = CSI->handler_end();
5797 I != E; ++I) {
5798 if (*I == BB) {
5799 CSI->removeHandler(I);
5800 --I;
5801 --E;
5802 Changed = true;
5803 }
5804 }
5805 if (DTU)
5806 Updates.push_back({DominatorTree::Delete, Predecessor, BB});
5807 if (CSI->getNumHandlers() == 0) {
5808 if (CSI->hasUnwindDest()) {
5809 // Redirect all predecessors of the block containing CatchSwitchInst
5810 // to instead branch to the CatchSwitchInst's unwind destination.
5811 if (DTU) {
5812 for (auto *PredecessorOfPredecessor : predecessors(Predecessor)) {
5813 Updates.push_back({DominatorTree::Insert,
5814 PredecessorOfPredecessor,
5815 CSI->getUnwindDest()});
5816 Updates.push_back({DominatorTree::Delete,
5817 PredecessorOfPredecessor, Predecessor});
5818 }
5819 }
5820 Predecessor->replaceAllUsesWith(CSI->getUnwindDest());
5821 } else {
5822 // Rewrite all preds to unwind to caller (or from invoke to call).
5823 if (DTU) {
5824 DTU->applyUpdates(Updates);
5825 Updates.clear();
5826 }
5827 SmallVector<BasicBlock *, 8> EHPreds(predecessors(Predecessor));
5828 for (BasicBlock *EHPred : EHPreds)
5829 removeUnwindEdge(EHPred, DTU);
5830 }
5831 // The catchswitch is no longer reachable.
5832 new UnreachableInst(CSI->getContext(), CSI->getIterator());
5833 CSI->eraseFromParent();
5834 Changed = true;
5835 }
5836 } else if (auto *CRI = dyn_cast<CleanupReturnInst>(TI)) {
5837 (void)CRI;
5838 assert(CRI->hasUnwindDest() && CRI->getUnwindDest() == BB &&
5839 "Expected to always have an unwind to BB.");
5840 if (DTU)
5841 Updates.push_back({DominatorTree::Delete, Predecessor, BB});
5842 new UnreachableInst(TI->getContext(), TI->getIterator());
5843 TI->eraseFromParent();
5844 Changed = true;
5845 }
5846 }
5847
5848 if (DTU)
5849 DTU->applyUpdates(Updates);
5850
5851 // If this block is now dead, remove it.
5852 if (pred_empty(BB) && BB != &BB->getParent()->getEntryBlock()) {
5853 DeleteDeadBlock(BB, DTU);
5854 return true;
5855 }
5856
5857 return Changed;
5858}
5859
5868
5869static std::optional<ContiguousCasesResult>
5872 BasicBlock *Dest, BasicBlock *OtherDest) {
5873 assert(Cases.size() >= 1);
5874
5876 const APInt &Min = Cases.back()->getValue();
5877 const APInt &Max = Cases.front()->getValue();
5878 APInt Offset = Max - Min;
5879 size_t ContiguousOffset = Cases.size() - 1;
5880 if (Offset == ContiguousOffset) {
5881 return ContiguousCasesResult{
5882 /*Min=*/Cases.back(),
5883 /*Max=*/Cases.front(),
5884 /*Dest=*/Dest,
5885 /*OtherDest=*/OtherDest,
5886 /*Cases=*/&Cases,
5887 /*OtherCases=*/&OtherCases,
5888 };
5889 }
5890 ConstantRange CR = computeConstantRange(Condition, /*ForSigned=*/false);
5891 // If this is a wrapping contiguous range, that is, [Min, OtherMin] +
5892 // [OtherMax, Max] (also [OtherMax, OtherMin]), [OtherMin+1, OtherMax-1] is a
5893 // contiguous range for the other destination. N.B. If CR is not a full range,
5894 // Max+1 is not equal to Min. It's not continuous in arithmetic.
5895 if (Max == CR.getUnsignedMax() && Min == CR.getUnsignedMin()) {
5896 assert(Cases.size() >= 2);
5897 auto *It =
5898 std::adjacent_find(Cases.begin(), Cases.end(), [](auto L, auto R) {
5899 return L->getValue() != R->getValue() + 1;
5900 });
5901 if (It == Cases.end())
5902 return std::nullopt;
5903 auto [OtherMax, OtherMin] = std::make_pair(*It, *std::next(It));
5904 if ((Max - OtherMax->getValue()) + (OtherMin->getValue() - Min) ==
5905 Cases.size() - 2) {
5906 return ContiguousCasesResult{
5907 /*Min=*/cast<ConstantInt>(
5908 ConstantInt::get(OtherMin->getType(), OtherMin->getValue() + 1)),
5909 /*Max=*/
5911 ConstantInt::get(OtherMax->getType(), OtherMax->getValue() - 1)),
5912 /*Dest=*/OtherDest,
5913 /*OtherDest=*/Dest,
5914 /*Cases=*/&OtherCases,
5915 /*OtherCases=*/&Cases,
5916 };
5917 }
5918 }
5919 return std::nullopt;
5920}
5921
5923 DomTreeUpdater *DTU,
5924 bool RemoveOrigDefaultBlock = true) {
5925 LLVM_DEBUG(dbgs() << "SimplifyCFG: switch default is dead.\n");
5926 auto *BB = Switch->getParent();
5927 auto *OrigDefaultBlock = Switch->getDefaultDest();
5928 if (RemoveOrigDefaultBlock)
5929 OrigDefaultBlock->removePredecessor(BB);
5930 BasicBlock *NewDefaultBlock = BasicBlock::Create(
5931 BB->getContext(), BB->getName() + ".unreachabledefault", BB->getParent(),
5932 OrigDefaultBlock);
5933 auto *UI = new UnreachableInst(Switch->getContext(), NewDefaultBlock);
5935 Switch->setDefaultDest(&*NewDefaultBlock);
5936 if (DTU) {
5938 Updates.push_back({DominatorTree::Insert, BB, &*NewDefaultBlock});
5939 if (RemoveOrigDefaultBlock &&
5940 !is_contained(successors(BB), OrigDefaultBlock))
5941 Updates.push_back({DominatorTree::Delete, BB, &*OrigDefaultBlock});
5942 DTU->applyUpdates(Updates);
5943 }
5944}
5945
5946/// Turn a switch into an integer range comparison and branch.
5947/// Switches with more than 2 destinations are ignored.
5948/// Switches with 1 destination are also ignored.
5949bool SimplifyCFGOpt::turnSwitchRangeIntoICmp(SwitchInst *SI,
5950 IRBuilder<> &Builder) {
5951 assert(SI->getNumCases() > 1 && "Degenerate switch?");
5952
5953 bool HasDefault = !SI->defaultDestUnreachable();
5954
5955 auto *BB = SI->getParent();
5956 // Partition the cases into two sets with different destinations.
5957 BasicBlock *DestA = HasDefault ? SI->getDefaultDest() : nullptr;
5958 BasicBlock *DestB = nullptr;
5961
5962 for (auto Case : SI->cases()) {
5963 BasicBlock *Dest = Case.getCaseSuccessor();
5964 if (!DestA)
5965 DestA = Dest;
5966 if (Dest == DestA) {
5967 CasesA.push_back(Case.getCaseValue());
5968 continue;
5969 }
5970 if (!DestB)
5971 DestB = Dest;
5972 if (Dest == DestB) {
5973 CasesB.push_back(Case.getCaseValue());
5974 continue;
5975 }
5976 return false; // More than two destinations.
5977 }
5978 if (!DestB)
5979 return false; // All destinations are the same and the default is unreachable
5980
5981 assert(DestA && DestB &&
5982 "Single-destination switch should have been folded.");
5983 assert(DestA != DestB);
5984 assert(DestB != SI->getDefaultDest());
5985 assert(!CasesB.empty() && "There must be non-default cases.");
5986 assert(!CasesA.empty() || HasDefault);
5987
5988 // Figure out if one of the sets of cases form a contiguous range.
5989 std::optional<ContiguousCasesResult> ContiguousCases;
5990
5991 // Only one icmp is needed when there is only one case.
5992 if (!HasDefault && CasesA.size() == 1)
5993 ContiguousCases = ContiguousCasesResult{
5994 /*Min=*/CasesA[0],
5995 /*Max=*/CasesA[0],
5996 /*Dest=*/DestA,
5997 /*OtherDest=*/DestB,
5998 /*Cases=*/&CasesA,
5999 /*OtherCases=*/&CasesB,
6000 };
6001 else if (CasesB.size() == 1)
6002 ContiguousCases = ContiguousCasesResult{
6003 /*Min=*/CasesB[0],
6004 /*Max=*/CasesB[0],
6005 /*Dest=*/DestB,
6006 /*OtherDest=*/DestA,
6007 /*Cases=*/&CasesB,
6008 /*OtherCases=*/&CasesA,
6009 };
6010 // Correctness: Cases to the default destination cannot be contiguous cases.
6011 else if (!HasDefault)
6012 ContiguousCases =
6013 findContiguousCases(SI->getCondition(), CasesA, CasesB, DestA, DestB);
6014
6015 if (!ContiguousCases)
6016 ContiguousCases =
6017 findContiguousCases(SI->getCondition(), CasesB, CasesA, DestB, DestA);
6018
6019 if (!ContiguousCases)
6020 return false;
6021
6022 auto [Min, Max, Dest, OtherDest, Cases, OtherCases] = *ContiguousCases;
6023
6024 // Start building the compare and branch.
6025
6027 Constant *NumCases = ConstantInt::get(Offset->getType(),
6028 Max->getValue() - Min->getValue() + 1);
6029 Instruction *NewBI;
6030 if (NumCases->isOneValue()) {
6031 assert(Max->getValue() == Min->getValue());
6032 Value *Cmp = Builder.CreateICmpEQ(SI->getCondition(), Min);
6033 NewBI = Builder.CreateCondBr(Cmp, Dest, OtherDest);
6034 }
6035 // If NumCases overflowed, then all possible values jump to the successor.
6036 else if (NumCases->isNullValue() && !Cases->empty()) {
6037 NewBI = Builder.CreateBr(Dest);
6038 } else {
6039 Value *Sub = SI->getCondition();
6040 if (!Offset->isNullValue())
6041 Sub = Builder.CreateAdd(Sub, Offset, Sub->getName() + ".off");
6042 Value *Cmp = Builder.CreateICmpULT(Sub, NumCases, "switch");
6043 NewBI = Builder.CreateCondBr(Cmp, Dest, OtherDest);
6044 }
6045
6046 // Update weight for the newly-created conditional branch.
6047 if (hasBranchWeightMD(*SI) && isa<CondBrInst>(NewBI)) {
6048 SmallVector<uint64_t, 8> Weights;
6049 getBranchWeights(SI, Weights);
6050 if (Weights.size() == 1 + SI->getNumCases()) {
6051 uint64_t TrueWeight = 0;
6052 uint64_t FalseWeight = 0;
6053 for (size_t I = 0, E = Weights.size(); I != E; ++I) {
6054 if (SI->getSuccessor(I) == Dest)
6055 TrueWeight += Weights[I];
6056 else
6057 FalseWeight += Weights[I];
6058 }
6059 while (TrueWeight > UINT32_MAX || FalseWeight > UINT32_MAX) {
6060 TrueWeight /= 2;
6061 FalseWeight /= 2;
6062 }
6063 setFittedBranchWeights(*NewBI, {TrueWeight, FalseWeight},
6064 /*IsExpected=*/false, /*ElideAllZero=*/true);
6065 }
6066 }
6067
6068 // Prune obsolete incoming values off the successors' PHI nodes.
6069 for (auto &PHI : make_early_inc_range(Dest->phis())) {
6070 unsigned PreviousEdges = Cases->size();
6071 if (Dest == SI->getDefaultDest())
6072 ++PreviousEdges;
6073 for (unsigned I = 0, E = PreviousEdges - 1; I != E; ++I)
6074 PHI.removeIncomingValue(SI->getParent());
6075 }
6076 for (auto &PHI : make_early_inc_range(OtherDest->phis())) {
6077 unsigned PreviousEdges = OtherCases->size();
6078 if (OtherDest == SI->getDefaultDest())
6079 ++PreviousEdges;
6080 unsigned E = PreviousEdges - 1;
6081 // Remove all incoming values from OtherDest if OtherDest is unreachable.
6082 if (isa<UncondBrInst>(NewBI))
6083 ++E;
6084 for (unsigned I = 0; I != E; ++I)
6085 PHI.removeIncomingValue(SI->getParent());
6086 }
6087
6088 // Clean up the default block - it may have phis or other instructions before
6089 // the unreachable terminator.
6090 if (!HasDefault)
6092
6093 auto *UnreachableDefault = SI->getDefaultDest();
6094
6095 // Drop the switch.
6096 SI->eraseFromParent();
6097
6098 if (!HasDefault && DTU)
6099 DTU->applyUpdates({{DominatorTree::Delete, BB, UnreachableDefault}});
6100
6101 return true;
6102}
6103
6104/// Compute masked bits for the condition of a switch
6105/// and use it to remove dead cases.
6107 AssumptionCache *AC,
6108 const DataLayout &DL) {
6109 Value *Cond = SI->getCondition();
6110 KnownBits Known = computeKnownBits(Cond, DL, AC, SI);
6112 bool IsKnownValuesValid = collectPossibleValues(Cond, KnownValues, 4);
6113
6114 // We can also eliminate cases by determining that their values are outside of
6115 // the limited range of the condition based on how many significant (non-sign)
6116 // bits are in the condition value.
6117 unsigned MaxSignificantBitsInCond =
6119
6120 // Gather dead cases.
6122 SmallDenseMap<BasicBlock *, int, 8> NumPerSuccessorCases;
6123 SmallVector<BasicBlock *, 8> UniqueSuccessors;
6124 for (const auto &Case : SI->cases()) {
6125 auto *Successor = Case.getCaseSuccessor();
6126 if (DTU) {
6127 auto [It, Inserted] = NumPerSuccessorCases.try_emplace(Successor);
6128 if (Inserted)
6129 UniqueSuccessors.push_back(Successor);
6130 ++It->second;
6131 }
6132 ConstantInt *CaseC = Case.getCaseValue();
6133 const APInt &CaseVal = CaseC->getValue();
6134 if (Known.Zero.intersects(CaseVal) || !Known.One.isSubsetOf(CaseVal) ||
6135 (CaseVal.getSignificantBits() > MaxSignificantBitsInCond) ||
6136 (IsKnownValuesValid && !KnownValues.contains(CaseC))) {
6137 DeadCases.push_back(CaseC);
6138 if (DTU)
6139 --NumPerSuccessorCases[Successor];
6140 LLVM_DEBUG(dbgs() << "SimplifyCFG: switch case " << CaseVal
6141 << " is dead.\n");
6142 } else if (IsKnownValuesValid)
6143 KnownValues.erase(CaseC);
6144 }
6145
6146 // If we can prove that the cases must cover all possible values, the
6147 // default destination becomes dead and we can remove it. If we know some
6148 // of the bits in the value, we can use that to more precisely compute the
6149 // number of possible unique case values.
6150 bool HasDefault = !SI->defaultDestUnreachable();
6151 const unsigned NumUnknownBits =
6152 Known.getBitWidth() - (Known.Zero | Known.One).popcount();
6153 assert(NumUnknownBits <= Known.getBitWidth());
6154 if (HasDefault && DeadCases.empty()) {
6155 if (IsKnownValuesValid && all_of(KnownValues, IsaPred<UndefValue>)) {
6157 return true;
6158 }
6159
6160 if (NumUnknownBits < 64 /* avoid overflow */) {
6161 uint64_t AllNumCases = 1ULL << NumUnknownBits;
6162 if (SI->getNumCases() == AllNumCases) {
6164 return true;
6165 }
6166 // When only one case value is missing, replace default with that case.
6167 // Eliminating the default branch will provide more opportunities for
6168 // optimization, such as lookup tables.
6169 if (SI->getNumCases() == AllNumCases - 1) {
6170 assert(NumUnknownBits > 1 && "Should be canonicalized to a branch");
6171 IntegerType *CondTy = cast<IntegerType>(Cond->getType());
6172 if (CondTy->getIntegerBitWidth() > 64 ||
6173 !DL.fitsInLegalInteger(CondTy->getIntegerBitWidth()))
6174 return false;
6175
6176 uint64_t MissingCaseVal = 0;
6177 for (const auto &Case : SI->cases())
6178 MissingCaseVal ^= Case.getCaseValue()->getValue().getLimitedValue();
6179 auto *MissingCase = cast<ConstantInt>(
6180 ConstantInt::get(Cond->getType(), MissingCaseVal));
6182 SIW.addCase(MissingCase, SI->getDefaultDest(),
6183 SIW.getSuccessorWeight(0));
6185 /*RemoveOrigDefaultBlock*/ false);
6186 SIW.setSuccessorWeight(0, 0);
6187 return true;
6188 }
6189 }
6190 }
6191
6192 if (DeadCases.empty())
6193 return false;
6194
6196 for (ConstantInt *DeadCase : DeadCases) {
6197 SwitchInst::CaseIt CaseI = SI->findCaseValue(DeadCase);
6198 assert(CaseI != SI->case_default() &&
6199 "Case was not found. Probably mistake in DeadCases forming.");
6200 // Prune unused values from PHI nodes.
6201 CaseI->getCaseSuccessor()->removePredecessor(SI->getParent());
6202 SIW.removeCase(CaseI);
6203 }
6204
6205 if (DTU) {
6206 std::vector<DominatorTree::UpdateType> Updates;
6207 for (auto *Successor : UniqueSuccessors)
6208 if (NumPerSuccessorCases[Successor] == 0)
6209 Updates.push_back({DominatorTree::Delete, SI->getParent(), Successor});
6210 DTU->applyUpdates(Updates);
6211 }
6212
6213 return true;
6214}
6215
6216/// If BB would be eligible for simplification by
6217/// TryToSimplifyUncondBranchFromEmptyBlock (i.e. it is empty and terminated
6218/// by an unconditional branch), look at the phi node for BB in the successor
6219/// block and see if the incoming value is equal to CaseValue. If so, return
6220/// the phi node, and set PhiIndex to BB's index in the phi node.
6222 BasicBlock *BB, int *PhiIndex) {
6223 if (&*BB->getFirstNonPHIIt() != BB->getTerminator())
6224 return nullptr; // BB must be empty to be a candidate for simplification.
6225 if (!BB->getSinglePredecessor())
6226 return nullptr; // BB must be dominated by the switch.
6227
6229 if (!Branch)
6230 return nullptr; // Terminator must be unconditional branch.
6231
6232 BasicBlock *Succ = Branch->getSuccessor();
6233
6234 for (PHINode &PHI : Succ->phis()) {
6235 int Idx = PHI.getBasicBlockIndex(BB);
6236 assert(Idx >= 0 && "PHI has no entry for predecessor?");
6237
6238 Value *InValue = PHI.getIncomingValue(Idx);
6239 if (InValue != CaseValue)
6240 continue;
6241
6242 *PhiIndex = Idx;
6243 return &PHI;
6244 }
6245
6246 return nullptr;
6247}
6248
6249/// Try to forward the condition of a switch instruction to a phi node
6250/// dominated by the switch, if that would mean that some of the destination
6251/// blocks of the switch can be folded away. Return true if a change is made.
6253 using ForwardingNodesMap = DenseMap<PHINode *, SmallVector<int, 4>>;
6254
6255 ForwardingNodesMap ForwardingNodes;
6256 BasicBlock *SwitchBlock = SI->getParent();
6257 bool Changed = false;
6258 for (const auto &Case : SI->cases()) {
6259 ConstantInt *CaseValue = Case.getCaseValue();
6260 BasicBlock *CaseDest = Case.getCaseSuccessor();
6261
6262 // Replace phi operands in successor blocks that are using the constant case
6263 // value rather than the switch condition variable:
6264 // switchbb:
6265 // switch i32 %x, label %default [
6266 // i32 17, label %succ
6267 // ...
6268 // succ:
6269 // %r = phi i32 ... [ 17, %switchbb ] ...
6270 // -->
6271 // %r = phi i32 ... [ %x, %switchbb ] ...
6272
6273 for (PHINode &Phi : CaseDest->phis()) {
6274 // This only works if there is exactly 1 incoming edge from the switch to
6275 // a phi. If there is >1, that means multiple cases of the switch map to 1
6276 // value in the phi, and that phi value is not the switch condition. Thus,
6277 // this transform would not make sense (the phi would be invalid because
6278 // a phi can't have different incoming values from the same block).
6279 int SwitchBBIdx = Phi.getBasicBlockIndex(SwitchBlock);
6280 if (Phi.getIncomingValue(SwitchBBIdx) == CaseValue &&
6281 count(Phi.blocks(), SwitchBlock) == 1) {
6282 Phi.setIncomingValue(SwitchBBIdx, SI->getCondition());
6283 Changed = true;
6284 }
6285 }
6286
6287 // Collect phi nodes that are indirectly using this switch's case constants.
6288 int PhiIdx;
6289 if (auto *Phi = findPHIForConditionForwarding(CaseValue, CaseDest, &PhiIdx))
6290 ForwardingNodes[Phi].push_back(PhiIdx);
6291 }
6292
6293 for (auto &ForwardingNode : ForwardingNodes) {
6294 PHINode *Phi = ForwardingNode.first;
6295 SmallVectorImpl<int> &Indexes = ForwardingNode.second;
6296 // Check if it helps to fold PHI.
6297 if (Indexes.size() < 2 && !llvm::is_contained(Phi->incoming_values(), SI->getCondition()))
6298 continue;
6299
6300 for (int Index : Indexes)
6301 Phi->setIncomingValue(Index, SI->getCondition());
6302 Changed = true;
6303 }
6304
6305 return Changed;
6306}
6307
6308/// Return true if the backend will be able to handle
6309/// initializing an array of constants like C.
6311 if (C->isThreadDependent())
6312 return false;
6313 if (C->isDLLImportDependent())
6314 return false;
6315
6318 return false;
6319
6320 // Globals cannot contain scalable types.
6321 if (C->getType()->isScalableTy())
6322 return false;
6323
6325 // Pointer casts and in-bounds GEPs will not prohibit the backend from
6326 // materializing the array of constants.
6327 Constant *StrippedC = cast<Constant>(CE->stripInBoundsConstantOffsets());
6328 if (StrippedC == C || !validLookupTableConstant(StrippedC, TTI))
6329 return false;
6330 }
6331
6332 if (!TTI.shouldBuildLookupTablesForConstant(C))
6333 return false;
6334
6335 return true;
6336}
6337
6338/// If V is a Constant, return it. Otherwise, try to look up
6339/// its constant value in ConstantPool, returning 0 if it's not there.
6340static Constant *
6343 if (Constant *C = dyn_cast<Constant>(V))
6344 return C;
6345 return ConstantPool.lookup(V);
6346}
6347
6348/// Try to fold instruction I into a constant. This works for
6349/// simple instructions such as binary operations where both operands are
6350/// constant or can be replaced by constants from the ConstantPool. Returns the
6351/// resulting constant on success, 0 otherwise.
6352static Constant *
6356 Constant *A = lookupConstant(Select->getCondition(), ConstantPool);
6357 if (!A)
6358 return nullptr;
6359 if (A->isAllOnesValue())
6360 return lookupConstant(Select->getTrueValue(), ConstantPool);
6361 if (A->isNullValue())
6362 return lookupConstant(Select->getFalseValue(), ConstantPool);
6363 return nullptr;
6364 }
6365
6367 for (unsigned N = 0, E = I->getNumOperands(); N != E; ++N) {
6368 if (Constant *A = lookupConstant(I->getOperand(N), ConstantPool))
6369 COps.push_back(A);
6370 else
6371 return nullptr;
6372 }
6373
6374 return ConstantFoldInstOperands(I, COps, DL);
6375}
6376
6377/// Try to determine the resulting constant values in phi nodes
6378/// at the common destination basic block, *CommonDest, for one of the case
6379/// destinations CaseDest corresponding to value CaseVal (nullptr for the
6380/// default case), of a switch instruction SI.
6381static bool
6383 BasicBlock **CommonDest,
6384 SmallVectorImpl<std::pair<PHINode *, Constant *>> &Res,
6385 const DataLayout &DL, const TargetTransformInfo &TTI) {
6386 // The block from which we enter the common destination.
6387 BasicBlock *Pred = SI->getParent();
6388
6389 // If CaseDest is empty except for some side-effect free instructions through
6390 // which we can constant-propagate the CaseVal, continue to its successor.
6392 ConstantPool.insert(std::make_pair(SI->getCondition(), CaseVal));
6393 for (Instruction &I : *CaseDest) {
6394 if (I.isTerminator()) {
6395 // If the terminator is a simple branch, continue to the next block.
6396 if (I.getNumSuccessors() != 1 || I.isSpecialTerminator())
6397 return false;
6398 Pred = CaseDest;
6399 CaseDest = I.getSuccessor(0);
6400 } else if (Constant *C = constantFold(&I, DL, ConstantPool)) {
6401 // Instruction is side-effect free and constant.
6402
6403 // If the instruction has uses outside this block or a phi node slot for
6404 // the block, it is not safe to bypass the instruction since it would then
6405 // no longer dominate all its uses.
6406 for (auto &Use : I.uses()) {
6407 User *User = Use.getUser();
6409 if (I->getParent() == CaseDest)
6410 continue;
6411 if (PHINode *Phi = dyn_cast<PHINode>(User))
6412 if (Phi->getIncomingBlock(Use) == CaseDest)
6413 continue;
6414 return false;
6415 }
6416
6417 ConstantPool.insert(std::make_pair(&I, C));
6418 } else {
6419 break;
6420 }
6421 }
6422
6423 // If we did not have a CommonDest before, use the current one.
6424 if (!*CommonDest)
6425 *CommonDest = CaseDest;
6426 // If the destination isn't the common one, abort.
6427 if (CaseDest != *CommonDest)
6428 return false;
6429
6430 // Get the values for this case from phi nodes in the destination block.
6431 for (PHINode &PHI : (*CommonDest)->phis()) {
6432 int Idx = PHI.getBasicBlockIndex(Pred);
6433 if (Idx == -1)
6434 continue;
6435
6436 Constant *ConstVal =
6437 lookupConstant(PHI.getIncomingValue(Idx), ConstantPool);
6438 if (!ConstVal)
6439 return false;
6440
6441 // Be conservative about which kinds of constants we support.
6442 if (!validLookupTableConstant(ConstVal, TTI))
6443 return false;
6444
6445 Res.push_back(std::make_pair(&PHI, ConstVal));
6446 }
6447
6448 return Res.size() > 0;
6449}
6450
6451// Helper function used to add CaseVal to the list of cases that generate
6452// Result. Returns the updated number of cases that generate this result.
6453static size_t mapCaseToResult(ConstantInt *CaseVal,
6454 SwitchCaseResultVectorTy &UniqueResults,
6455 Constant *Result) {
6456 for (auto &I : UniqueResults) {
6457 if (I.first == Result) {
6458 I.second.push_back(CaseVal);
6459 return I.second.size();
6460 }
6461 }
6462 UniqueResults.push_back(
6463 std::make_pair(Result, SmallVector<ConstantInt *, 4>(1, CaseVal)));
6464 return 1;
6465}
6466
6467// Helper function that initializes a map containing
6468// results for the PHI node of the common destination block for a switch
6469// instruction. Returns false if multiple PHI nodes have been found or if
6470// there is not a common destination block for the switch.
6472 BasicBlock *&CommonDest,
6473 SwitchCaseResultVectorTy &UniqueResults,
6474 Constant *&DefaultResult,
6475 const DataLayout &DL,
6476 const TargetTransformInfo &TTI,
6477 uintptr_t MaxUniqueResults) {
6478 for (const auto &I : SI->cases()) {
6479 ConstantInt *CaseVal = I.getCaseValue();
6480
6481 // Resulting value at phi nodes for this case value.
6482 SwitchCaseResultsTy Results;
6483 if (!getCaseResults(SI, CaseVal, I.getCaseSuccessor(), &CommonDest, Results,
6484 DL, TTI))
6485 return false;
6486
6487 // Only one value per case is permitted.
6488 if (Results.size() > 1)
6489 return false;
6490
6491 // Add the case->result mapping to UniqueResults.
6492 const size_t NumCasesForResult =
6493 mapCaseToResult(CaseVal, UniqueResults, Results.begin()->second);
6494
6495 // Early out if there are too many cases for this result.
6496 if (NumCasesForResult > MaxSwitchCasesPerResult)
6497 return false;
6498
6499 // Early out if there are too many unique results.
6500 if (UniqueResults.size() > MaxUniqueResults)
6501 return false;
6502
6503 // Check the PHI consistency.
6504 if (!PHI)
6505 PHI = Results[0].first;
6506 else if (PHI != Results[0].first)
6507 return false;
6508 }
6509 // Find the default result value.
6511 getCaseResults(SI, nullptr, SI->getDefaultDest(), &CommonDest, DefaultResults,
6512 DL, TTI);
6513 // If the default value is not found abort unless the default destination
6514 // is unreachable.
6515 DefaultResult =
6516 DefaultResults.size() == 1 ? DefaultResults.begin()->second : nullptr;
6517
6518 return DefaultResult || SI->defaultDestUnreachable();
6519}
6520
6521// Helper function that checks if it is possible to transform a switch with only
6522// two cases (or two cases + default) that produces a result into a select.
6523// TODO: Handle switches with more than 2 cases that map to the same result.
6524// The branch weights correspond to the provided Condition (i.e. if Condition is
6525// modified from the original SwitchInst, the caller must adjust the weights)
6526static Value *foldSwitchToSelect(const SwitchCaseResultVectorTy &ResultVector,
6527 Constant *DefaultResult, Value *Condition,
6528 IRBuilder<> &Builder, const DataLayout &DL,
6529 ArrayRef<uint32_t> BranchWeights) {
6530 // If we are selecting between only two cases transform into a simple
6531 // select or a two-way select if default is possible.
6532 // Example:
6533 // switch (a) { %0 = icmp eq i32 %a, 10
6534 // case 10: return 42; %1 = select i1 %0, i32 42, i32 4
6535 // case 20: return 2; ----> %2 = icmp eq i32 %a, 20
6536 // default: return 4; %3 = select i1 %2, i32 2, i32 %1
6537 // }
6538
6539 const bool HasBranchWeights =
6540 !BranchWeights.empty() && !ProfcheckDisableMetadataFixes;
6541
6542 if (ResultVector.size() == 2 && ResultVector[0].second.size() == 1 &&
6543 ResultVector[1].second.size() == 1) {
6544 ConstantInt *FirstCase = ResultVector[0].second[0];
6545 ConstantInt *SecondCase = ResultVector[1].second[0];
6546 Value *SelectValue = ResultVector[1].first;
6547 if (DefaultResult) {
6548 Value *ValueCompare =
6549 Builder.CreateICmpEQ(Condition, SecondCase, "switch.selectcmp");
6550 SelectValue = Builder.CreateSelect(ValueCompare, ResultVector[1].first,
6551 DefaultResult, "switch.select");
6552 if (auto *SI = dyn_cast<SelectInst>(SelectValue);
6553 SI && HasBranchWeights) {
6554 // We start with 3 probabilities, where the numerator is the
6555 // corresponding BranchWeights[i], and the denominator is the sum over
6556 // BranchWeights. We want the probability and negative probability of
6557 // Condition == SecondCase.
6558 assert(BranchWeights.size() == 3);
6560 *SI, {BranchWeights[2], BranchWeights[0] + BranchWeights[1]},
6561 /*IsExpected=*/false, /*ElideAllZero=*/true);
6562 }
6563 }
6564 Value *ValueCompare =
6565 Builder.CreateICmpEQ(Condition, FirstCase, "switch.selectcmp");
6566 Value *Ret = Builder.CreateSelect(ValueCompare, ResultVector[0].first,
6567 SelectValue, "switch.select");
6568 if (auto *SI = dyn_cast<SelectInst>(Ret); SI && HasBranchWeights) {
6569 // We may have had a DefaultResult. Base the position of the first and
6570 // second's branch weights accordingly. Also the proability that Condition
6571 // != FirstCase needs to take that into account.
6572 assert(BranchWeights.size() >= 2);
6573 size_t FirstCasePos = (Condition != nullptr);
6574 size_t SecondCasePos = FirstCasePos + 1;
6575 uint32_t DefaultCase = (Condition != nullptr) ? BranchWeights[0] : 0;
6577 {BranchWeights[FirstCasePos],
6578 DefaultCase + BranchWeights[SecondCasePos]},
6579 /*IsExpected=*/false, /*ElideAllZero=*/true);
6580 }
6581 return Ret;
6582 }
6583
6584 // Handle the degenerate case where two cases have the same result value.
6585 if (ResultVector.size() == 1 && DefaultResult) {
6586 ArrayRef<ConstantInt *> CaseValues = ResultVector[0].second;
6587 unsigned CaseCount = CaseValues.size();
6588 // n bits group cases map to the same result:
6589 // case 0,4 -> Cond & 0b1..1011 == 0 ? result : default
6590 // case 0,2,4,6 -> Cond & 0b1..1001 == 0 ? result : default
6591 // case 0,2,8,10 -> Cond & 0b1..0101 == 0 ? result : default
6592 if (isPowerOf2_32(CaseCount)) {
6593 ConstantInt *MinCaseVal = CaseValues[0];
6594 // If there are bits that are set exclusively by CaseValues, we
6595 // can transform the switch into a select if the conjunction of
6596 // all the values uniquely identify CaseValues.
6597 APInt AndMask = APInt::getAllOnes(MinCaseVal->getBitWidth());
6598
6599 // Find the minimum value and compute the and of all the case values.
6600 for (auto *Case : CaseValues) {
6601 if (Case->getValue().slt(MinCaseVal->getValue()))
6602 MinCaseVal = Case;
6603 AndMask &= Case->getValue();
6604 }
6605 KnownBits Known = computeKnownBits(Condition, DL);
6606
6607 if (!AndMask.isZero() && Known.getMaxValue().uge(AndMask)) {
6608 // Compute the number of bits that are free to vary.
6609 unsigned FreeBits = Known.countMaxActiveBits() - AndMask.popcount();
6610
6611 // Check if the number of values covered by the mask is equal
6612 // to the number of cases.
6613 if (FreeBits == Log2_32(CaseCount)) {
6614 Value *And = Builder.CreateAnd(Condition, AndMask);
6615 Value *Cmp = Builder.CreateICmpEQ(
6616 And, Constant::getIntegerValue(And->getType(), AndMask));
6617 Value *Ret =
6618 Builder.CreateSelect(Cmp, ResultVector[0].first, DefaultResult);
6619 if (auto *SI = dyn_cast<SelectInst>(Ret); SI && HasBranchWeights) {
6620 // We know there's a Default case. We base the resulting branch
6621 // weights off its probability.
6622 assert(BranchWeights.size() >= 2);
6624 *SI,
6625 {accumulate(drop_begin(BranchWeights), 0U), BranchWeights[0]},
6626 /*IsExpected=*/false, /*ElideAllZero=*/true);
6627 }
6628 return Ret;
6629 }
6630 }
6631
6632 // Mark the bits case number touched.
6633 APInt BitMask = APInt::getZero(MinCaseVal->getBitWidth());
6634 for (auto *Case : CaseValues)
6635 BitMask |= (Case->getValue() - MinCaseVal->getValue());
6636
6637 // Check if cases with the same result can cover all number
6638 // in touched bits.
6639 if (BitMask.popcount() == Log2_32(CaseCount)) {
6640 if (!MinCaseVal->isNullValue())
6641 Condition = Builder.CreateSub(Condition, MinCaseVal);
6642 Value *And = Builder.CreateAnd(Condition, ~BitMask, "switch.and");
6643 Value *Cmp = Builder.CreateICmpEQ(
6644 And, Constant::getNullValue(And->getType()), "switch.selectcmp");
6645 Value *Ret =
6646 Builder.CreateSelect(Cmp, ResultVector[0].first, DefaultResult);
6647 if (auto *SI = dyn_cast<SelectInst>(Ret); SI && HasBranchWeights) {
6648 assert(BranchWeights.size() >= 2);
6650 *SI,
6651 {accumulate(drop_begin(BranchWeights), 0U), BranchWeights[0]},
6652 /*IsExpected=*/false, /*ElideAllZero=*/true);
6653 }
6654 return Ret;
6655 }
6656 }
6657
6658 // Handle the degenerate case where two cases have the same value.
6659 if (CaseValues.size() == 2) {
6660 Value *Cmp1 = Builder.CreateICmpEQ(Condition, CaseValues[0],
6661 "switch.selectcmp.case1");
6662 Value *Cmp2 = Builder.CreateICmpEQ(Condition, CaseValues[1],
6663 "switch.selectcmp.case2");
6664 Value *Cmp = Builder.CreateOr(Cmp1, Cmp2, "switch.selectcmp");
6665 Value *Ret =
6666 Builder.CreateSelect(Cmp, ResultVector[0].first, DefaultResult);
6667 if (auto *SI = dyn_cast<SelectInst>(Ret); SI && HasBranchWeights) {
6668 assert(BranchWeights.size() >= 2);
6670 *SI, {accumulate(drop_begin(BranchWeights), 0U), BranchWeights[0]},
6671 /*IsExpected=*/false, /*ElideAllZero=*/true);
6672 }
6673 return Ret;
6674 }
6675 }
6676
6677 return nullptr;
6678}
6679
6680// Helper function to cleanup a switch instruction that has been converted into
6681// a select, fixing up PHI nodes and basic blocks.
6683 Value *SelectValue,
6684 IRBuilder<> &Builder,
6685 DomTreeUpdater *DTU) {
6686 std::vector<DominatorTree::UpdateType> Updates;
6687
6688 BasicBlock *SelectBB = SI->getParent();
6689 BasicBlock *DestBB = PHI->getParent();
6690
6691 if (DTU && !is_contained(predecessors(DestBB), SelectBB))
6692 Updates.push_back({DominatorTree::Insert, SelectBB, DestBB});
6693 Builder.CreateBr(DestBB);
6694
6695 // Remove the switch.
6696
6697 PHI->removeIncomingValueIf(
6698 [&](unsigned Idx) { return PHI->getIncomingBlock(Idx) == SelectBB; });
6699 PHI->addIncoming(SelectValue, SelectBB);
6700
6701 SmallPtrSet<BasicBlock *, 4> RemovedSuccessors;
6702 for (unsigned i = 0, e = SI->getNumSuccessors(); i < e; ++i) {
6703 BasicBlock *Succ = SI->getSuccessor(i);
6704
6705 if (Succ == DestBB)
6706 continue;
6707 Succ->removePredecessor(SelectBB);
6708 if (DTU && RemovedSuccessors.insert(Succ).second)
6709 Updates.push_back({DominatorTree::Delete, SelectBB, Succ});
6710 }
6711 SI->eraseFromParent();
6712 if (DTU)
6713 DTU->applyUpdates(Updates);
6714}
6715
6716/// If a switch is only used to initialize one or more phi nodes in a common
6717/// successor block with only two different constant values, try to replace the
6718/// switch with a select. Returns true if the fold was made.
6720 DomTreeUpdater *DTU, const DataLayout &DL,
6721 const TargetTransformInfo &TTI) {
6722 Value *const Cond = SI->getCondition();
6723 PHINode *PHI = nullptr;
6724 BasicBlock *CommonDest = nullptr;
6725 Constant *DefaultResult;
6726 SwitchCaseResultVectorTy UniqueResults;
6727 // Collect all the cases that will deliver the same value from the switch.
6728 if (!initializeUniqueCases(SI, PHI, CommonDest, UniqueResults, DefaultResult,
6729 DL, TTI, /*MaxUniqueResults*/ 2))
6730 return false;
6731
6732 assert(PHI != nullptr && "PHI for value select not found");
6733 Builder.SetInsertPoint(SI);
6734 SmallVector<uint32_t, 4> BranchWeights;
6736 [[maybe_unused]] auto HasWeights =
6738 assert(!HasWeights == (BranchWeights.empty()));
6739 }
6740 assert(BranchWeights.empty() ||
6741 (BranchWeights.size() >=
6742 UniqueResults.size() + (DefaultResult != nullptr)));
6743
6744 Value *SelectValue = foldSwitchToSelect(UniqueResults, DefaultResult, Cond,
6745 Builder, DL, BranchWeights);
6746 if (!SelectValue)
6747 return false;
6748
6749 removeSwitchAfterSelectFold(SI, PHI, SelectValue, Builder, DTU);
6750 return true;
6751}
6752
6753namespace {
6754
6755/// This class finds alternatives for switches to ultimately
6756/// replace the switch.
6757class SwitchReplacement {
6758public:
6759 /// Create a helper for optimizations to use as a switch replacement.
6760 /// Find a better representation for the content of Values,
6761 /// using DefaultValue to fill any holes in the table.
6762 SwitchReplacement(
6763 Module &M, uint64_t TableSize, ConstantInt *Offset,
6764 const SmallVectorImpl<std::pair<ConstantInt *, Constant *>> &Values,
6765 Constant *DefaultValue, const DataLayout &DL, const StringRef &FuncName);
6766
6767 /// Build instructions with Builder to retrieve values using Index
6768 /// and replace the switch.
6769 Value *replaceSwitch(Value *Index, IRBuilder<> &Builder, const DataLayout &DL,
6770 Function *Func);
6771
6772 /// Return true if a table with TableSize elements of
6773 /// type ElementType would fit in a target-legal register.
6774 static bool wouldFitInRegister(const DataLayout &DL, uint64_t TableSize,
6775 Type *ElementType);
6776
6777 /// Return the default value of the switch.
6778 Constant *getDefaultValue();
6779
6780 /// Return true if the replacement is a lookup table.
6781 bool isLookupTable();
6782
6783 /// Return true if the replacement is a bit map.
6784 bool isBitMap();
6785
6786private:
6787 // Depending on the switch, there are different alternatives.
6788 enum {
6789 // For switches where each case contains the same value, we just have to
6790 // store that single value and return it for each lookup.
6791 SingleValueKind,
6792
6793 // For switches where there is a linear relationship between table index
6794 // and values. We calculate the result with a simple multiplication
6795 // and addition instead of a table lookup.
6796 LinearMapKind,
6797
6798 // For small tables with integer elements, we can pack them into a bitmap
6799 // that fits into a target-legal register. Values are retrieved by
6800 // shift and mask operations.
6801 BitMapKind,
6802
6803 // The table is stored as an array of values. Values are retrieved by load
6804 // instructions from the table.
6805 LookupTableKind
6806 } Kind;
6807
6808 // The default value of the switch.
6809 Constant *DefaultValue;
6810
6811 // The type of the output values.
6812 Type *ValueType;
6813
6814 // For SingleValueKind, this is the single value.
6815 Constant *SingleValue = nullptr;
6816
6817 // For BitMapKind, this is the bitmap.
6818 ConstantInt *BitMap = nullptr;
6819 IntegerType *BitMapElementTy = nullptr;
6820
6821 // For LinearMapKind, these are the constants used to derive the value.
6822 ConstantInt *LinearOffset = nullptr;
6823 ConstantInt *LinearMultiplier = nullptr;
6824 bool LinearMapValWrapped = false;
6825
6826 // For LookupTableKind, this is the table.
6827 Constant *Initializer = nullptr;
6828};
6829
6830} // end anonymous namespace
6831
6832SwitchReplacement::SwitchReplacement(
6833 Module &M, uint64_t TableSize, ConstantInt *Offset,
6834 const SmallVectorImpl<std::pair<ConstantInt *, Constant *>> &Values,
6835 Constant *DefaultValue, const DataLayout &DL, const StringRef &FuncName)
6836 : DefaultValue(DefaultValue) {
6837 assert(Values.size() && "Can't build lookup table without values!");
6838 assert(TableSize >= Values.size() && "Can't fit values in table!");
6839
6840 // If all values in the table are equal, this is that value.
6841 SingleValue = Values.begin()->second;
6842
6843 ValueType = Values.begin()->second->getType();
6844
6845 // Build up the table contents.
6846 SmallVector<Constant *, 64> TableContents(TableSize);
6847 for (const auto &[CaseVal, CaseRes] : Values) {
6848 assert(CaseRes->getType() == ValueType);
6849
6850 uint64_t Idx = (CaseVal->getValue() - Offset->getValue()).getLimitedValue();
6851 TableContents[Idx] = CaseRes;
6852
6853 if (SingleValue && !isa<PoisonValue>(CaseRes) && CaseRes != SingleValue)
6854 SingleValue = isa<PoisonValue>(SingleValue) ? CaseRes : nullptr;
6855 }
6856
6857 // Fill in any holes in the table with the default result.
6858 if (Values.size() < TableSize) {
6859 assert(DefaultValue &&
6860 "Need a default value to fill the lookup table holes.");
6861 assert(DefaultValue->getType() == ValueType);
6862 for (uint64_t I = 0; I < TableSize; ++I) {
6863 if (!TableContents[I])
6864 TableContents[I] = DefaultValue;
6865 }
6866
6867 // If the default value is poison, all the holes are poison.
6868 bool DefaultValueIsPoison = isa<PoisonValue>(DefaultValue);
6869
6870 if (DefaultValue != SingleValue && !DefaultValueIsPoison)
6871 SingleValue = nullptr;
6872 }
6873
6874 // If each element in the table contains the same value, we only need to store
6875 // that single value.
6876 if (SingleValue) {
6877 Kind = SingleValueKind;
6878 return;
6879 }
6880
6881 // Check if we can derive the value with a linear transformation from the
6882 // table index.
6884 bool LinearMappingPossible = true;
6885 APInt PrevVal;
6886 APInt DistToPrev;
6887 // When linear map is monotonic and signed overflow doesn't happen on
6888 // maximum index, we can attach nsw on Add and Mul.
6889 bool NonMonotonic = false;
6890 assert(TableSize >= 2 && "Should be a SingleValue table.");
6891 // Check if there is the same distance between two consecutive values.
6892 for (uint64_t I = 0; I < TableSize; ++I) {
6893 ConstantInt *ConstVal = dyn_cast<ConstantInt>(TableContents[I]);
6894
6895 if (!ConstVal && isa<PoisonValue>(TableContents[I])) {
6896 // This is an poison, so it's (probably) a lookup table hole.
6897 // To prevent any regressions from before we switched to using poison as
6898 // the default value, holes will fall back to using the first value.
6899 // This can be removed once we add proper handling for poisons in lookup
6900 // tables.
6901 ConstVal = dyn_cast<ConstantInt>(Values[0].second);
6902 }
6903
6904 if (!ConstVal) {
6905 // This is an undef. We could deal with it, but undefs in lookup tables
6906 // are very seldom. It's probably not worth the additional complexity.
6907 LinearMappingPossible = false;
6908 break;
6909 }
6910 const APInt &Val = ConstVal->getValue();
6911 if (I != 0) {
6912 APInt Dist = Val - PrevVal;
6913 if (I == 1) {
6914 DistToPrev = Dist;
6915 } else if (Dist != DistToPrev) {
6916 LinearMappingPossible = false;
6917 break;
6918 }
6919 NonMonotonic |=
6920 Dist.isStrictlyPositive() ? Val.sle(PrevVal) : Val.sgt(PrevVal);
6921 }
6922 PrevVal = Val;
6923 }
6924 if (LinearMappingPossible) {
6925 LinearOffset = cast<ConstantInt>(TableContents[0]);
6926 LinearMultiplier = ConstantInt::get(M.getContext(), DistToPrev);
6927 APInt M = LinearMultiplier->getValue();
6928 bool MayWrap = true;
6929 if (isIntN(M.getBitWidth(), TableSize - 1))
6930 (void)M.smul_ov(APInt(M.getBitWidth(), TableSize - 1), MayWrap);
6931 LinearMapValWrapped = NonMonotonic || MayWrap;
6932 Kind = LinearMapKind;
6933 return;
6934 }
6935 }
6936
6937 // If the type is integer and the table fits in a register, build a bitmap.
6938 if (wouldFitInRegister(DL, TableSize, ValueType)) {
6940 APInt TableInt(TableSize * IT->getBitWidth(), 0);
6941 for (uint64_t I = TableSize; I > 0; --I) {
6942 TableInt <<= IT->getBitWidth();
6943 // Insert values into the bitmap. Undef values are set to zero.
6944 if (!isa<UndefValue>(TableContents[I - 1])) {
6945 ConstantInt *Val = cast<ConstantInt>(TableContents[I - 1]);
6946 TableInt |= Val->getValue().zext(TableInt.getBitWidth());
6947 }
6948 }
6949 BitMap = ConstantInt::get(M.getContext(), TableInt);
6950 BitMapElementTy = IT;
6951 Kind = BitMapKind;
6952 return;
6953 }
6954
6955 // Store the table in an array.
6956 auto *TableTy = ArrayType::get(ValueType, TableSize);
6957 Initializer = ConstantArray::get(TableTy, TableContents);
6958
6959 Kind = LookupTableKind;
6960}
6961
6962Value *SwitchReplacement::replaceSwitch(Value *Index, IRBuilder<> &Builder,
6963 const DataLayout &DL, Function *Func) {
6964 switch (Kind) {
6965 case SingleValueKind:
6966 return SingleValue;
6967 case LinearMapKind: {
6968 ++NumLinearMaps;
6969 // Derive the result value from the input value.
6970 Value *Result = Builder.CreateIntCast(Index, LinearMultiplier->getType(),
6971 false, "switch.idx.cast");
6972 if (!LinearMultiplier->isOne())
6973 Result = Builder.CreateMul(Result, LinearMultiplier, "switch.idx.mult",
6974 /*HasNUW = */ false,
6975 /*HasNSW = */ !LinearMapValWrapped);
6976
6977 if (!LinearOffset->isZero())
6978 Result = Builder.CreateAdd(Result, LinearOffset, "switch.offset",
6979 /*HasNUW = */ false,
6980 /*HasNSW = */ !LinearMapValWrapped);
6981 return Result;
6982 }
6983 case BitMapKind: {
6984 ++NumBitMaps;
6985 // Type of the bitmap (e.g. i59).
6986 IntegerType *MapTy = BitMap->getIntegerType();
6987
6988 // Cast Index to the same type as the bitmap.
6989 // Note: The Index is <= the number of elements in the table, so
6990 // truncating it to the width of the bitmask is safe.
6991 Value *ShiftAmt = Builder.CreateZExtOrTrunc(Index, MapTy, "switch.cast");
6992
6993 // Multiply the shift amount by the element width. NUW/NSW can always be
6994 // set, because wouldFitInRegister guarantees Index * ShiftAmt is in
6995 // BitMap's bit width.
6996 ShiftAmt = Builder.CreateMul(
6997 ShiftAmt, ConstantInt::get(MapTy, BitMapElementTy->getBitWidth()),
6998 "switch.shiftamt",/*HasNUW =*/true,/*HasNSW =*/true);
6999
7000 // Shift down.
7001 Value *DownShifted =
7002 Builder.CreateLShr(BitMap, ShiftAmt, "switch.downshift");
7003 // Mask off.
7004 return Builder.CreateTrunc(DownShifted, BitMapElementTy, "switch.masked");
7005 }
7006 case LookupTableKind: {
7007 ++NumLookupTables;
7008 auto *Table =
7009 new GlobalVariable(*Func->getParent(), Initializer->getType(),
7010 /*isConstant=*/true, GlobalVariable::PrivateLinkage,
7011 Initializer, "switch.table." + Func->getName());
7012 Table->setUnnamedAddr(GlobalValue::UnnamedAddr::Global);
7013 // Set the alignment to that of an array items. We will be only loading one
7014 // value out of it.
7015 Table->setAlignment(DL.getPrefTypeAlign(ValueType));
7016 Type *IndexTy = DL.getIndexType(Table->getType());
7017 auto *ArrayTy = cast<ArrayType>(Table->getValueType());
7018
7019 if (Index->getType() != IndexTy) {
7020 unsigned OldBitWidth = Index->getType()->getIntegerBitWidth();
7021 Index = Builder.CreateZExtOrTrunc(Index, IndexTy);
7022 if (auto *Zext = dyn_cast<ZExtInst>(Index))
7023 Zext->setNonNeg(
7024 isUIntN(OldBitWidth - 1, ArrayTy->getNumElements() - 1));
7025 }
7026
7027 Value *GEPIndices[] = {ConstantInt::get(IndexTy, 0), Index};
7028 Value *GEP =
7029 Builder.CreateInBoundsGEP(ArrayTy, Table, GEPIndices, "switch.gep");
7030 return Builder.CreateLoad(ArrayTy->getElementType(), GEP, "switch.load");
7031 }
7032 }
7033 llvm_unreachable("Unknown helper kind!");
7034}
7035
7036bool SwitchReplacement::wouldFitInRegister(const DataLayout &DL,
7037 uint64_t TableSize,
7038 Type *ElementType) {
7039 auto *IT = dyn_cast<IntegerType>(ElementType);
7040 if (!IT)
7041 return false;
7042 // FIXME: If the type is wider than it needs to be, e.g. i8 but all values
7043 // are <= 15, we could try to narrow the type.
7044
7045 // Avoid overflow, fitsInLegalInteger uses unsigned int for the width.
7046 if (TableSize >= UINT_MAX / IT->getBitWidth())
7047 return false;
7048 return DL.fitsInLegalInteger(TableSize * IT->getBitWidth());
7049}
7050
7052 const DataLayout &DL) {
7053 // Allow any legal type.
7054 if (TTI.isTypeLegal(Ty))
7055 return true;
7056
7057 auto *IT = dyn_cast<IntegerType>(Ty);
7058 if (!IT)
7059 return false;
7060
7061 // Also allow power of 2 integer types that have at least 8 bits and fit in
7062 // a register. These types are common in frontend languages and targets
7063 // usually support loads of these types.
7064 // TODO: We could relax this to any integer that fits in a register and rely
7065 // on ABI alignment and padding in the table to allow the load to be widened.
7066 // Or we could widen the constants and truncate the load.
7067 unsigned BitWidth = IT->getBitWidth();
7068 return BitWidth >= 8 && isPowerOf2_32(BitWidth) &&
7069 DL.fitsInLegalInteger(IT->getBitWidth());
7070}
7071
7072Constant *SwitchReplacement::getDefaultValue() { return DefaultValue; }
7073
7074bool SwitchReplacement::isLookupTable() { return Kind == LookupTableKind; }
7075
7076bool SwitchReplacement::isBitMap() { return Kind == BitMapKind; }
7077
7078static bool isSwitchDense(uint64_t NumCases, uint64_t CaseRange) {
7079 // 40% is the default density for building a jump table in optsize/minsize
7080 // mode. See also TargetLoweringBase::isSuitableForJumpTable(), which this
7081 // function was based on.
7082 const uint64_t MinDensity = 40;
7083
7084 if (CaseRange >= UINT64_MAX / 100)
7085 return false; // Avoid multiplication overflows below.
7086
7087 return NumCases * 100 >= CaseRange * MinDensity;
7088}
7089
7091 uint64_t Diff = (uint64_t)Values.back() - (uint64_t)Values.front();
7092 uint64_t Range = Diff + 1;
7093 if (Range < Diff)
7094 return false; // Overflow.
7095
7096 return isSwitchDense(Values.size(), Range);
7097}
7098
7099/// Determine whether a lookup table should be built for this switch, based on
7100/// the number of cases, size of the table, and the types of the results.
7101// TODO: We could support larger than legal types by limiting based on the
7102// number of loads required and/or table size. If the constants are small we
7103// could use smaller table entries and extend after the load.
7105 const TargetTransformInfo &TTI,
7106 const DataLayout &DL,
7107 const SmallVector<Type *> &ResultTypes) {
7108 if (SI->getNumCases() > TableSize)
7109 return false; // TableSize overflowed.
7110
7111 bool AllTablesFitInRegister = true;
7112 bool HasIllegalType = false;
7113 for (const auto &Ty : ResultTypes) {
7114 // Saturate this flag to true.
7115 HasIllegalType = HasIllegalType || !isTypeLegalForLookupTable(Ty, TTI, DL);
7116
7117 // Saturate this flag to false.
7118 AllTablesFitInRegister =
7119 AllTablesFitInRegister &&
7120 SwitchReplacement::wouldFitInRegister(DL, TableSize, Ty);
7121
7122 // If both flags saturate, we're done. NOTE: This *only* works with
7123 // saturating flags, and all flags have to saturate first due to the
7124 // non-deterministic behavior of iterating over a dense map.
7125 if (HasIllegalType && !AllTablesFitInRegister)
7126 break;
7127 }
7128
7129 // If each table would fit in a register, we should build it anyway.
7130 if (AllTablesFitInRegister)
7131 return true;
7132
7133 // Don't build a table that doesn't fit in-register if it has illegal types.
7134 if (HasIllegalType)
7135 return false;
7136
7137 return isSwitchDense(SI->getNumCases(), TableSize);
7138}
7139
7141 ConstantInt &MinCaseVal, const ConstantInt &MaxCaseVal,
7142 bool HasDefaultResults, const SmallVector<Type *> &ResultTypes,
7143 const DataLayout &DL, const TargetTransformInfo &TTI) {
7144 if (MinCaseVal.isNullValue())
7145 return true;
7146 if (MinCaseVal.isNegative() ||
7147 MaxCaseVal.getLimitedValue() == std::numeric_limits<uint64_t>::max() ||
7148 !HasDefaultResults)
7149 return false;
7150 return all_of(ResultTypes, [&](const auto &ResultType) {
7151 return SwitchReplacement::wouldFitInRegister(
7152 DL, MaxCaseVal.getLimitedValue() + 1 /* TableSize */, ResultType);
7153 });
7154}
7155
7156/// Try to reuse the switch table index compare. Following pattern:
7157/// \code
7158/// if (idx < tablesize)
7159/// r = table[idx]; // table does not contain default_value
7160/// else
7161/// r = default_value;
7162/// if (r != default_value)
7163/// ...
7164/// \endcode
7165/// Is optimized to:
7166/// \code
7167/// cond = idx < tablesize;
7168/// if (cond)
7169/// r = table[idx];
7170/// else
7171/// r = default_value;
7172/// if (cond)
7173/// ...
7174/// \endcode
7175/// Jump threading will then eliminate the second if(cond).
7177 User *PhiUser, BasicBlock *PhiBlock, CondBrInst *RangeCheckBranch,
7178 Constant *DefaultValue,
7179 const SmallVectorImpl<std::pair<ConstantInt *, Constant *>> &Values) {
7181 if (!CmpInst)
7182 return;
7183
7184 // We require that the compare is in the same block as the phi so that jump
7185 // threading can do its work afterwards.
7186 if (CmpInst->getParent() != PhiBlock)
7187 return;
7188
7190 if (!CmpOp1)
7191 return;
7192
7193 Value *RangeCmp = RangeCheckBranch->getCondition();
7194 Constant *TrueConst = ConstantInt::getTrue(RangeCmp->getType());
7195 Constant *FalseConst = ConstantInt::getFalse(RangeCmp->getType());
7196
7197 // Check if the compare with the default value is constant true or false.
7198 const DataLayout &DL = PhiBlock->getDataLayout();
7200 CmpInst->getPredicate(), DefaultValue, CmpOp1, DL);
7201 if (DefaultConst != TrueConst && DefaultConst != FalseConst)
7202 return;
7203
7204 // Check if the compare with the case values is distinct from the default
7205 // compare result.
7206 for (auto ValuePair : Values) {
7208 CmpInst->getPredicate(), ValuePair.second, CmpOp1, DL);
7209 if (!CaseConst || CaseConst == DefaultConst ||
7210 (CaseConst != TrueConst && CaseConst != FalseConst))
7211 return;
7212 }
7213
7214 // Check if the branch instruction dominates the phi node. It's a simple
7215 // dominance check, but sufficient for our needs.
7216 // Although this check is invariant in the calling loops, it's better to do it
7217 // at this late stage. Practically we do it at most once for a switch.
7218 BasicBlock *BranchBlock = RangeCheckBranch->getParent();
7219 for (BasicBlock *Pred : predecessors(PhiBlock)) {
7220 if (Pred != BranchBlock && Pred->getUniquePredecessor() != BranchBlock)
7221 return;
7222 }
7223
7224 if (DefaultConst == FalseConst) {
7225 // The compare yields the same result. We can replace it.
7226 CmpInst->replaceAllUsesWith(RangeCmp);
7227 ++NumTableCmpReuses;
7228 } else {
7229 // The compare yields the same result, just inverted. We can replace it.
7230 Value *InvertedTableCmp = BinaryOperator::CreateXor(
7231 RangeCmp, ConstantInt::get(RangeCmp->getType(), 1), "inverted.cmp",
7232 RangeCheckBranch->getIterator());
7233 CmpInst->replaceAllUsesWith(InvertedTableCmp);
7234 ++NumTableCmpReuses;
7235 }
7236}
7237
7238/// If the switch is only used to initialize one or more phi nodes in a common
7239/// successor block with different constant values, replace the switch with
7240/// lookup tables.
7242 DomTreeUpdater *DTU, const DataLayout &DL,
7243 const TargetTransformInfo &TTI,
7244 bool ConvertSwitchToLookupTable) {
7245 assert(SI->getNumCases() > 1 && "Degenerate switch?");
7246
7247 BasicBlock *BB = SI->getParent();
7248 Function *Fn = BB->getParent();
7249
7250 // FIXME: If the switch is too sparse for a lookup table, perhaps we could
7251 // split off a dense part and build a lookup table for that.
7252
7253 // FIXME: This creates arrays of GEPs to constant strings, which means each
7254 // GEP needs a runtime relocation in PIC code. We should just build one big
7255 // string and lookup indices into that.
7256
7257 // Ignore switches with less than three cases. Lookup tables will not make
7258 // them faster, so we don't analyze them.
7259 if (SI->getNumCases() < 3)
7260 return false;
7261
7262 // Figure out the corresponding result for each case value and phi node in the
7263 // common destination, as well as the min and max case values.
7264 assert(!SI->cases().empty());
7265 SwitchInst::CaseIt CI = SI->case_begin();
7266 ConstantInt *MinCaseVal = CI->getCaseValue();
7267 ConstantInt *MaxCaseVal = CI->getCaseValue();
7268
7269 BasicBlock *CommonDest = nullptr;
7270
7271 using ResultListTy = SmallVector<std::pair<ConstantInt *, Constant *>, 4>;
7273
7275 SmallVector<Type *> ResultTypes;
7277
7278 for (SwitchInst::CaseIt E = SI->case_end(); CI != E; ++CI) {
7279 ConstantInt *CaseVal = CI->getCaseValue();
7280 if (CaseVal->getValue().slt(MinCaseVal->getValue()))
7281 MinCaseVal = CaseVal;
7282 if (CaseVal->getValue().sgt(MaxCaseVal->getValue()))
7283 MaxCaseVal = CaseVal;
7284
7285 // Resulting value at phi nodes for this case value.
7287 ResultsTy Results;
7288 if (!getCaseResults(SI, CaseVal, CI->getCaseSuccessor(), &CommonDest,
7289 Results, DL, TTI))
7290 return false;
7291
7292 // Append the result and result types from this case to the list for each
7293 // phi.
7294 for (const auto &I : Results) {
7295 PHINode *PHI = I.first;
7296 Constant *Value = I.second;
7297 auto [It, Inserted] = ResultLists.try_emplace(PHI);
7298 if (Inserted)
7299 PHIs.push_back(PHI);
7300 It->second.push_back(std::make_pair(CaseVal, Value));
7301 ResultTypes.push_back(PHI->getType());
7302 }
7303 }
7304
7305 // If the table has holes, we need a constant result for the default case
7306 // or a bitmask that fits in a register.
7307 SmallVector<std::pair<PHINode *, Constant *>, 4> DefaultResultsList;
7308 bool HasDefaultResults =
7309 getCaseResults(SI, nullptr, SI->getDefaultDest(), &CommonDest,
7310 DefaultResultsList, DL, TTI);
7311 for (const auto &I : DefaultResultsList) {
7312 PHINode *PHI = I.first;
7313 Constant *Result = I.second;
7314 DefaultResults[PHI] = Result;
7315 }
7316
7317 bool UseSwitchConditionAsTableIndex = shouldUseSwitchConditionAsTableIndex(
7318 *MinCaseVal, *MaxCaseVal, HasDefaultResults, ResultTypes, DL, TTI);
7319 uint64_t TableSize;
7320 ConstantInt *TableIndexOffset;
7321 if (UseSwitchConditionAsTableIndex) {
7322 TableSize = MaxCaseVal->getLimitedValue() + 1;
7323 TableIndexOffset = ConstantInt::get(MaxCaseVal->getIntegerType(), 0);
7324 } else {
7325 TableSize =
7326 (MaxCaseVal->getValue() - MinCaseVal->getValue()).getLimitedValue() + 1;
7327
7328 TableIndexOffset = MinCaseVal;
7329 }
7330
7331 // If the default destination is unreachable, or if the lookup table covers
7332 // all values of the conditional variable, branch directly to the lookup table
7333 // BB. Otherwise, check that the condition is within the case range.
7334 uint64_t NumResults = ResultLists[PHIs[0]].size();
7335 bool DefaultIsReachable = !SI->defaultDestUnreachable();
7336
7337 bool TableHasHoles = (NumResults < TableSize);
7338
7339 // If the table has holes but the default destination doesn't produce any
7340 // constant results, the lookup table entries corresponding to the holes will
7341 // contain poison.
7342 bool AllHolesArePoison = TableHasHoles && !HasDefaultResults;
7343
7344 // If the default destination doesn't produce a constant result but is still
7345 // reachable, and the lookup table has holes, we need to use a mask to
7346 // determine if the current index should load from the lookup table or jump
7347 // to the default case.
7348 // The mask is unnecessary if the table has holes but the default destination
7349 // is unreachable, as in that case the holes must also be unreachable.
7350 bool NeedMask = AllHolesArePoison && DefaultIsReachable;
7351 if (NeedMask) {
7352 // As an extra penalty for the validity test we require more cases.
7353 if (SI->getNumCases() < 4) // FIXME: Find best threshold value (benchmark).
7354 return false;
7355 if (!DL.fitsInLegalInteger(TableSize))
7356 return false;
7357 }
7358
7359 if (!shouldBuildLookupTable(SI, TableSize, TTI, DL, ResultTypes))
7360 return false;
7361
7362 // Compute the table index value.
7363 Value *TableIndex;
7364 if (UseSwitchConditionAsTableIndex) {
7365 TableIndex = SI->getCondition();
7366 if (HasDefaultResults) {
7367 // Grow the table to cover all possible index values to avoid the range
7368 // check. It will use the default result to fill in the table hole later,
7369 // so make sure it exist.
7370 ConstantRange CR =
7371 computeConstantRange(TableIndex, /* ForSigned */ false);
7372 // Grow the table shouldn't have any size impact by checking
7373 // wouldFitInRegister.
7374 // TODO: Consider growing the table also when it doesn't fit in a register
7375 // if no optsize is specified.
7376 const uint64_t UpperBound = CR.getUpper().getLimitedValue();
7377 if (!CR.isUpperWrapped() &&
7378 all_of(ResultTypes, [&](const auto &ResultType) {
7379 return SwitchReplacement::wouldFitInRegister(DL, UpperBound,
7380 ResultType);
7381 })) {
7382 // There may be some case index larger than the UpperBound (unreachable
7383 // case), so make sure the table size does not get smaller.
7384 TableSize = std::max(UpperBound, TableSize);
7385 // The default branch is unreachable after we enlarge the lookup table.
7386 // Adjust DefaultIsReachable to reuse code path.
7387 DefaultIsReachable = false;
7388 }
7389 }
7390 }
7391
7392 // Keep track of the switch replacement for each phi
7394 for (PHINode *PHI : PHIs) {
7395 const auto &ResultList = ResultLists[PHI];
7396
7397 Type *ResultType = ResultList.begin()->second->getType();
7398 // Use any value to fill the lookup table holes.
7399 Constant *DefaultVal =
7400 AllHolesArePoison ? PoisonValue::get(ResultType) : DefaultResults[PHI];
7401 StringRef FuncName = Fn->getName();
7402 SwitchReplacement Replacement(*Fn->getParent(), TableSize, TableIndexOffset,
7403 ResultList, DefaultVal, DL, FuncName);
7404 PhiToReplacementMap.insert({PHI, Replacement});
7405 }
7406
7407 bool AnyLookupTables = any_of(
7408 PhiToReplacementMap, [](auto &KV) { return KV.second.isLookupTable(); });
7409 bool AnyBitMaps = any_of(PhiToReplacementMap,
7410 [](auto &KV) { return KV.second.isBitMap(); });
7411
7412 // A few conditions prevent the generation of lookup tables:
7413 // 1. The target does not support lookup tables.
7414 // 2. The "no-jump-tables" function attribute is set.
7415 // However, these objections do not apply to other switch replacements, like
7416 // the bitmap, so we only stop here if any of these conditions are met and we
7417 // want to create a LUT. Otherwise, continue with the switch replacement.
7418 if (AnyLookupTables &&
7419 (!TTI.shouldBuildLookupTables() ||
7420 Fn->getFnAttribute("no-jump-tables").getValueAsBool()))
7421 return false;
7422
7423 // In the early optimization pipeline, disable formation of lookup tables,
7424 // bit maps and mask checks, as they may inhibit further optimization.
7425 if (!ConvertSwitchToLookupTable &&
7426 (AnyLookupTables || AnyBitMaps || NeedMask))
7427 return false;
7428
7429 Builder.SetInsertPoint(SI);
7430 // TableIndex is the switch condition - TableIndexOffset if we don't
7431 // use the condition directly
7432 if (!UseSwitchConditionAsTableIndex) {
7433 // If the default is unreachable, all case values are s>= MinCaseVal. Then
7434 // we can try to attach nsw.
7435 bool MayWrap = true;
7436 if (!DefaultIsReachable) {
7437 APInt Res =
7438 MaxCaseVal->getValue().ssub_ov(MinCaseVal->getValue(), MayWrap);
7439 (void)Res;
7440 }
7441 TableIndex = Builder.CreateSub(SI->getCondition(), TableIndexOffset,
7442 "switch.tableidx", /*HasNUW =*/false,
7443 /*HasNSW =*/!MayWrap);
7444 }
7445
7446 std::vector<DominatorTree::UpdateType> Updates;
7447
7448 // Compute the maximum table size representable by the integer type we are
7449 // switching upon.
7450 unsigned CaseSize = MinCaseVal->getType()->getPrimitiveSizeInBits();
7451 uint64_t MaxTableSize = CaseSize > 63 ? UINT64_MAX : 1ULL << CaseSize;
7452 assert(MaxTableSize >= TableSize &&
7453 "It is impossible for a switch to have more entries than the max "
7454 "representable value of its input integer type's size.");
7455
7456 // Create the BB that does the lookups.
7457 Module &Mod = *CommonDest->getParent()->getParent();
7458 BasicBlock *LookupBB = BasicBlock::Create(
7459 Mod.getContext(), "switch.lookup", CommonDest->getParent(), CommonDest);
7460
7461 CondBrInst *RangeCheckBranch = nullptr;
7462 CondBrInst *CondBranch = nullptr;
7463
7464 Builder.SetInsertPoint(SI);
7465 const bool GeneratingCoveredLookupTable = (MaxTableSize == TableSize);
7466 if (!DefaultIsReachable || GeneratingCoveredLookupTable) {
7467 Builder.CreateBr(LookupBB);
7468 if (DTU)
7469 Updates.push_back({DominatorTree::Insert, BB, LookupBB});
7470 // Note: We call removeProdecessor later since we need to be able to get the
7471 // PHI value for the default case in case we're using a bit mask.
7472 } else {
7473 Value *Cmp = Builder.CreateICmpULT(
7474 TableIndex, ConstantInt::get(MinCaseVal->getType(), TableSize));
7475 RangeCheckBranch =
7476 Builder.CreateCondBr(Cmp, LookupBB, SI->getDefaultDest());
7477 CondBranch = RangeCheckBranch;
7478 if (DTU)
7479 Updates.push_back({DominatorTree::Insert, BB, LookupBB});
7480 }
7481
7482 // Populate the BB that does the lookups.
7483 Builder.SetInsertPoint(LookupBB);
7484
7485 if (NeedMask) {
7486 // Before doing the lookup, we do the hole check. The LookupBB is therefore
7487 // re-purposed to do the hole check, and we create a new LookupBB.
7488 BasicBlock *MaskBB = LookupBB;
7489 MaskBB->setName("switch.hole_check");
7490 LookupBB = BasicBlock::Create(Mod.getContext(), "switch.lookup",
7491 CommonDest->getParent(), CommonDest);
7492
7493 // Make the mask's bitwidth at least 8-bit and a power-of-2 to avoid
7494 // unnecessary illegal types.
7495 uint64_t TableSizePowOf2 = NextPowerOf2(std::max(7ULL, TableSize - 1ULL));
7496 APInt MaskInt(TableSizePowOf2, 0);
7497 APInt One(TableSizePowOf2, 1);
7498 // Build bitmask; fill in a 1 bit for every case.
7499 const ResultListTy &ResultList = ResultLists[PHIs[0]];
7500 for (const auto &Result : ResultList) {
7501 uint64_t Idx = (Result.first->getValue() - TableIndexOffset->getValue())
7502 .getLimitedValue();
7503 MaskInt |= One << Idx;
7504 }
7505 ConstantInt *TableMask = ConstantInt::get(Mod.getContext(), MaskInt);
7506
7507 // Get the TableIndex'th bit of the bitmask.
7508 // If this bit is 0 (meaning hole) jump to the default destination,
7509 // else continue with table lookup.
7510 IntegerType *MapTy = TableMask->getIntegerType();
7511 Value *MaskIndex =
7512 Builder.CreateZExtOrTrunc(TableIndex, MapTy, "switch.maskindex");
7513 Value *Shifted = Builder.CreateLShr(TableMask, MaskIndex, "switch.shifted");
7514 Value *LoBit = Builder.CreateTrunc(
7515 Shifted, Type::getInt1Ty(Mod.getContext()), "switch.lobit");
7516 CondBranch = Builder.CreateCondBr(LoBit, LookupBB, SI->getDefaultDest());
7517 if (DTU) {
7518 Updates.push_back({DominatorTree::Insert, MaskBB, LookupBB});
7519 Updates.push_back({DominatorTree::Insert, MaskBB, SI->getDefaultDest()});
7520 }
7521 Builder.SetInsertPoint(LookupBB);
7522 addPredecessorToBlock(SI->getDefaultDest(), MaskBB, BB);
7523 }
7524
7525 if (!DefaultIsReachable || GeneratingCoveredLookupTable) {
7526 // We cached PHINodes in PHIs. To avoid accessing deleted PHINodes later,
7527 // do not delete PHINodes here.
7528 SI->getDefaultDest()->removePredecessor(BB,
7529 /*KeepOneInputPHIs=*/true);
7530 if (DTU)
7531 Updates.push_back({DominatorTree::Delete, BB, SI->getDefaultDest()});
7532 }
7533
7534 for (PHINode *PHI : PHIs) {
7535 const ResultListTy &ResultList = ResultLists[PHI];
7536 auto Replacement = PhiToReplacementMap.at(PHI);
7537 auto *Result = Replacement.replaceSwitch(TableIndex, Builder, DL, Fn);
7538 // Do a small peephole optimization: re-use the switch table compare if
7539 // possible.
7540 if (!TableHasHoles && HasDefaultResults && RangeCheckBranch) {
7541 BasicBlock *PhiBlock = PHI->getParent();
7542 // Search for compare instructions which use the phi.
7543 for (auto *User : PHI->users()) {
7544 reuseTableCompare(User, PhiBlock, RangeCheckBranch,
7545 Replacement.getDefaultValue(), ResultList);
7546 }
7547 }
7548
7549 PHI->addIncoming(Result, LookupBB);
7550 }
7551
7552 Builder.CreateBr(CommonDest);
7553 if (DTU)
7554 Updates.push_back({DominatorTree::Insert, LookupBB, CommonDest});
7555
7556 SmallVector<uint32_t> BranchWeights;
7557 const bool HasBranchWeights = CondBranch && !ProfcheckDisableMetadataFixes &&
7558 extractBranchWeights(*SI, BranchWeights);
7559 uint64_t ToLookupWeight = 0;
7560 uint64_t ToDefaultWeight = 0;
7561
7562 // Remove the switch.
7563 SmallPtrSet<BasicBlock *, 8> RemovedSuccessors;
7564 for (unsigned I = 0, E = SI->getNumSuccessors(); I < E; ++I) {
7565 BasicBlock *Succ = SI->getSuccessor(I);
7566
7567 if (Succ == SI->getDefaultDest()) {
7568 if (HasBranchWeights)
7569 ToDefaultWeight += BranchWeights[I];
7570 continue;
7571 }
7572 Succ->removePredecessor(BB);
7573 if (DTU && RemovedSuccessors.insert(Succ).second)
7574 Updates.push_back({DominatorTree::Delete, BB, Succ});
7575 if (HasBranchWeights)
7576 ToLookupWeight += BranchWeights[I];
7577 }
7578 SI->eraseFromParent();
7579 if (HasBranchWeights)
7580 setFittedBranchWeights(*CondBranch, {ToLookupWeight, ToDefaultWeight},
7581 /*IsExpected=*/false);
7582 if (DTU)
7583 DTU->applyUpdates(Updates);
7584
7585 if (NeedMask)
7586 ++NumLookupTablesHoles;
7587 return true;
7588}
7589
7590/// Try to transform a switch that has "holes" in it to a contiguous sequence
7591/// of cases.
7592///
7593/// A switch such as: switch(i) {case 5: case 9: case 13: case 17:} can be
7594/// range-reduced to: switch ((i-5) / 4) {case 0: case 1: case 2: case 3:}.
7595///
7596/// This converts a sparse switch into a dense switch which allows better
7597/// lowering and could also allow transforming into a lookup table.
7599 const DataLayout &DL,
7600 const TargetTransformInfo &TTI) {
7601 auto *CondTy = cast<IntegerType>(SI->getCondition()->getType());
7602 if (CondTy->getIntegerBitWidth() > 64 ||
7603 !DL.fitsInLegalInteger(CondTy->getIntegerBitWidth()))
7604 return false;
7605 // Only bother with this optimization if there are more than 3 switch cases;
7606 // SDAG will only bother creating jump tables for 4 or more cases.
7607 if (SI->getNumCases() < 4)
7608 return false;
7609
7610 // This transform is agnostic to the signedness of the input or case values. We
7611 // can treat the case values as signed or unsigned. We can optimize more common
7612 // cases such as a sequence crossing zero {-4,0,4,8} if we interpret case values
7613 // as signed.
7615 for (const auto &C : SI->cases())
7616 Values.push_back(C.getCaseValue()->getValue().getSExtValue());
7617 llvm::sort(Values);
7618
7619 // If the switch is already dense, there's nothing useful to do here.
7620 if (isSwitchDense(Values))
7621 return false;
7622
7623 // First, transform the values such that they start at zero and ascend.
7624 int64_t Base = Values[0];
7625 for (auto &V : Values)
7626 V -= (uint64_t)(Base);
7627
7628 // Now we have signed numbers that have been shifted so that, given enough
7629 // precision, there are no negative values. Since the rest of the transform
7630 // is bitwise only, we switch now to an unsigned representation.
7631
7632 // This transform can be done speculatively because it is so cheap - it
7633 // results in a single rotate operation being inserted.
7634
7635 // countTrailingZeros(0) returns 64. As Values is guaranteed to have more than
7636 // one element and LLVM disallows duplicate cases, Shift is guaranteed to be
7637 // less than 64.
7638 unsigned Shift = 64;
7639 for (auto &V : Values)
7640 Shift = std::min(Shift, (unsigned)llvm::countr_zero((uint64_t)V));
7641 assert(Shift < 64);
7642 if (Shift > 0)
7643 for (auto &V : Values)
7644 V = (int64_t)((uint64_t)V >> Shift);
7645
7646 if (!isSwitchDense(Values))
7647 // Transform didn't create a dense switch.
7648 return false;
7649
7650 // The obvious transform is to shift the switch condition right and emit a
7651 // check that the condition actually cleanly divided by GCD, i.e.
7652 // C & (1 << Shift - 1) == 0
7653 // inserting a new CFG edge to handle the case where it didn't divide cleanly.
7654 //
7655 // A cheaper way of doing this is a simple ROTR(C, Shift). This performs the
7656 // shift and puts the shifted-off bits in the uppermost bits. If any of these
7657 // are nonzero then the switch condition will be very large and will hit the
7658 // default case.
7659
7660 auto *Ty = cast<IntegerType>(SI->getCondition()->getType());
7661 Builder.SetInsertPoint(SI);
7662 Value *Sub =
7663 Builder.CreateSub(SI->getCondition(), ConstantInt::getSigned(Ty, Base));
7664 Value *Rot = Builder.CreateIntrinsic(
7665 Ty, Intrinsic::fshl,
7666 {Sub, Sub, ConstantInt::get(Ty, Ty->getBitWidth() - Shift)});
7667 SI->replaceUsesOfWith(SI->getCondition(), Rot);
7668
7669 for (auto Case : SI->cases()) {
7670 auto *Orig = Case.getCaseValue();
7671 auto Sub = Orig->getValue() - APInt(Ty->getBitWidth(), Base, true);
7672 Case.setValue(cast<ConstantInt>(ConstantInt::get(Ty, Sub.lshr(Shift))));
7673 }
7674 return true;
7675}
7676
7677/// Tries to transform the switch when the condition is umin with a constant.
7678/// In that case, the default branch can be replaced by the constant's branch.
7679/// This method also removes dead cases when the simplification cannot replace
7680/// the default branch.
7681///
7682/// For example:
7683/// switch(umin(a, 3)) {
7684/// case 0:
7685/// case 1:
7686/// case 2:
7687/// case 3:
7688/// case 4:
7689/// // ...
7690/// default:
7691/// unreachable
7692/// }
7693///
7694/// Transforms into:
7695///
7696/// switch(a) {
7697/// case 0:
7698/// case 1:
7699/// case 2:
7700/// default:
7701/// // This is case 3
7702/// }
7704 Value *A;
7706
7707 if (!match(SI->getCondition(), m_UMin(m_Value(A), m_ConstantInt(Constant))))
7708 return false;
7709
7712 BasicBlock *BB = SIW->getParent();
7713
7714 // Dead cases are removed even when the simplification fails.
7715 // A case is dead when its value is higher than the Constant.
7716 for (auto I = SI->case_begin(), E = SI->case_end(); I != E;) {
7717 if (!I->getCaseValue()->getValue().ugt(Constant->getValue())) {
7718 ++I;
7719 continue;
7720 }
7721 BasicBlock *DeadCaseBB = I->getCaseSuccessor();
7722 DeadCaseBB->removePredecessor(BB);
7723 Updates.push_back({DominatorTree::Delete, BB, DeadCaseBB});
7724 I = SIW.removeCase(I);
7725 E = SIW->case_end();
7726 }
7727
7728 auto Case = SI->findCaseValue(Constant);
7729 // If the case value is not found, `findCaseValue` returns the default case.
7730 // In this scenario, since there is no explicit `case 3:`, the simplification
7731 // fails. The simplification also fails when the switch’s default destination
7732 // is reachable.
7733 if (!SI->defaultDestUnreachable() || Case == SI->case_default()) {
7734 if (DTU)
7735 DTU->applyUpdates(Updates);
7736 return !Updates.empty();
7737 }
7738
7739 BasicBlock *Unreachable = SI->getDefaultDest();
7740 SIW.replaceDefaultDest(Case);
7741 SIW.removeCase(Case);
7742 SIW->setCondition(A);
7743
7744 Updates.push_back({DominatorTree::Delete, BB, Unreachable});
7745
7746 if (DTU)
7747 DTU->applyUpdates(Updates);
7748
7749 return true;
7750}
7751
7752/// Tries to transform switch of powers of two to reduce switch range.
7753/// For example, switch like:
7754/// switch (C) { case 1: case 2: case 64: case 128: }
7755/// will be transformed to:
7756/// switch (count_trailing_zeros(C)) { case 0: case 1: case 6: case 7: }
7757///
7758/// This transformation allows better lowering and may transform the switch
7759/// instruction into a sequence of bit manipulation and a smaller
7760/// log2(C)-indexed value table (instead of traditionally emitting a load of the
7761/// address of the jump target, and indirectly jump to it).
7763 DomTreeUpdater *DTU,
7764 const DataLayout &DL,
7765 const TargetTransformInfo &TTI) {
7766 Value *Condition = SI->getCondition();
7767 LLVMContext &Context = SI->getContext();
7768 auto *CondTy = cast<IntegerType>(Condition->getType());
7769
7770 if (CondTy->getIntegerBitWidth() > 64 ||
7771 !DL.fitsInLegalInteger(CondTy->getIntegerBitWidth()))
7772 return false;
7773
7774 // Ensure trailing zeroes count intrinsic emission is not too expensive.
7775 IntrinsicCostAttributes Attrs(Intrinsic::cttz, CondTy,
7776 {Condition, ConstantInt::getTrue(Context)});
7777 if (TTI.getIntrinsicInstrCost(Attrs, TTI::TCK_SizeAndLatency) >
7778 TTI::TCC_Basic * 2)
7779 return false;
7780
7781 // Only bother with this optimization if there are more than 3 switch cases.
7782 // SDAG will start emitting jump tables for 4 or more cases.
7783 if (SI->getNumCases() < 4)
7784 return false;
7785
7786 // Check that switch cases are powers of two.
7788 for (const auto &Case : SI->cases()) {
7789 uint64_t CaseValue = Case.getCaseValue()->getValue().getZExtValue();
7790 if (llvm::has_single_bit(CaseValue))
7791 Values.push_back(CaseValue);
7792 else
7793 return false;
7794 }
7795
7796 // isSwichDense requires case values to be sorted.
7797 llvm::sort(Values);
7798 if (!isSwitchDense(Values.size(), llvm::countr_zero(Values.back()) -
7799 llvm::countr_zero(Values.front()) + 1))
7800 // Transform is unable to generate dense switch.
7801 return false;
7802
7803 Builder.SetInsertPoint(SI);
7804
7805 if (!SI->defaultDestUnreachable()) {
7806 // Let non-power-of-two inputs jump to the default case, when the latter is
7807 // reachable.
7808 auto *PopC = Builder.CreateUnaryIntrinsic(Intrinsic::ctpop, Condition);
7809 auto *IsPow2 = Builder.CreateICmpEQ(PopC, ConstantInt::get(CondTy, 1));
7810
7811 auto *OrigBB = SI->getParent();
7812 auto *DefaultCaseBB = SI->getDefaultDest();
7813 BasicBlock *SplitBB = SplitBlock(OrigBB, SI, DTU);
7814 auto It = OrigBB->getTerminator()->getIterator();
7815 SmallVector<uint32_t> Weights;
7816 auto HasWeights =
7818 auto *BI = CondBrInst::Create(IsPow2, SplitBB, DefaultCaseBB, It);
7819 if (HasWeights && any_of(Weights, not_equal_to(0))) {
7820 // IsPow2 covers a subset of the cases in which we'd go to the default
7821 // label. The other is those powers of 2 that don't appear in the case
7822 // statement. We don't know the distribution of the values coming in, so
7823 // the safest is to split 50-50 the original probability to `default`.
7824 uint64_t OrigDenominator =
7826 SmallVector<uint64_t> NewWeights(2);
7827 NewWeights[1] = Weights[0] / 2;
7828 NewWeights[0] = OrigDenominator - NewWeights[1];
7829 setFittedBranchWeights(*BI, NewWeights, /*IsExpected=*/false);
7830 // The probability of executing the default block stays constant. It was
7831 // p_d = Weights[0] / OrigDenominator
7832 // we rewrite as W/D
7833 // We want to find the probability of the default branch of the switch
7834 // statement. Let's call it X. We have W/D = W/2D + X * (1-W/2D)
7835 // i.e. the original probability is the probability we go to the default
7836 // branch from the BI branch, or we take the default branch on the SI.
7837 // Meaning X = W / (2D - W), or (W/2) / (D - W/2)
7838 // This matches using W/2 for the default branch probability numerator and
7839 // D-W/2 as the denominator.
7840 Weights[0] = NewWeights[1];
7841 uint64_t CasesDenominator = OrigDenominator - Weights[0];
7842 for (auto &W : drop_begin(Weights))
7843 W = NewWeights[0] * static_cast<double>(W) / CasesDenominator;
7844
7845 setBranchWeights(*SI, Weights, /*IsExpected=*/false);
7846 }
7847 // BI is handling the default case for SI, and so should share its DebugLoc.
7848 BI->setDebugLoc(SI->getDebugLoc());
7849 It->eraseFromParent();
7850
7851 addPredecessorToBlock(DefaultCaseBB, OrigBB, SplitBB);
7852 if (DTU)
7853 DTU->applyUpdates({{DominatorTree::Insert, OrigBB, DefaultCaseBB}});
7854 }
7855
7856 // Replace each case with its trailing zeros number.
7857 for (auto &Case : SI->cases()) {
7858 auto *OrigValue = Case.getCaseValue();
7859 Case.setValue(ConstantInt::get(OrigValue->getIntegerType(),
7860 OrigValue->getValue().countr_zero()));
7861 }
7862
7863 // Replace condition with its trailing zeros number.
7864 auto *ConditionTrailingZeros = Builder.CreateIntrinsic(
7865 Intrinsic::cttz, {CondTy}, {Condition, ConstantInt::getTrue(Context)});
7866
7867 SI->setCondition(ConditionTrailingZeros);
7868
7869 return true;
7870}
7871
7872/// Fold switch over ucmp/scmp intrinsic to br if two of the switch arms have
7873/// the same destination.
7875 DomTreeUpdater *DTU) {
7876 auto *Cmp = dyn_cast<CmpIntrinsic>(SI->getCondition());
7877 if (!Cmp || !Cmp->hasOneUse())
7878 return false;
7879
7881 bool HasWeights = extractBranchWeights(getBranchWeightMDNode(*SI), Weights);
7882 if (!HasWeights)
7883 Weights.resize(4); // Avoid checking HasWeights everywhere.
7884
7885 // Normalize to [us]cmp == Res ? Succ : OtherSucc.
7886 int64_t Res;
7887 BasicBlock *Succ, *OtherSucc;
7888 uint32_t SuccWeight = 0, OtherSuccWeight = 0;
7889 BasicBlock *Unreachable = nullptr;
7890
7891 if (SI->getNumCases() == 2) {
7892 // Find which of 1, 0 or -1 is missing (handled by default dest).
7893 SmallSet<int64_t, 3> Missing;
7894 Missing.insert(1);
7895 Missing.insert(0);
7896 Missing.insert(-1);
7897
7898 Succ = SI->getDefaultDest();
7899 SuccWeight = Weights[0];
7900 OtherSucc = nullptr;
7901 for (auto &Case : SI->cases()) {
7902 std::optional<int64_t> Val =
7903 Case.getCaseValue()->getValue().trySExtValue();
7904 if (!Val)
7905 return false;
7906 if (!Missing.erase(*Val))
7907 return false;
7908 if (OtherSucc && OtherSucc != Case.getCaseSuccessor())
7909 return false;
7910 OtherSucc = Case.getCaseSuccessor();
7911 OtherSuccWeight += Weights[Case.getSuccessorIndex()];
7912 }
7913
7914 assert(Missing.size() == 1 && "Should have one case left");
7915 Res = *Missing.begin();
7916 } else if (SI->getNumCases() == 3 && SI->defaultDestUnreachable()) {
7917 // Normalize so that Succ is taken once and OtherSucc twice.
7918 Unreachable = SI->getDefaultDest();
7919 Succ = OtherSucc = nullptr;
7920 for (auto &Case : SI->cases()) {
7921 BasicBlock *NewSucc = Case.getCaseSuccessor();
7922 uint32_t Weight = Weights[Case.getSuccessorIndex()];
7923 if (!OtherSucc || OtherSucc == NewSucc) {
7924 OtherSucc = NewSucc;
7925 OtherSuccWeight += Weight;
7926 } else if (!Succ) {
7927 Succ = NewSucc;
7928 SuccWeight = Weight;
7929 } else if (Succ == NewSucc) {
7930 std::swap(Succ, OtherSucc);
7931 std::swap(SuccWeight, OtherSuccWeight);
7932 } else
7933 return false;
7934 }
7935 for (auto &Case : SI->cases()) {
7936 std::optional<int64_t> Val =
7937 Case.getCaseValue()->getValue().trySExtValue();
7938 if (!Val || (Val != 1 && Val != 0 && Val != -1))
7939 return false;
7940 if (Case.getCaseSuccessor() == Succ) {
7941 Res = *Val;
7942 break;
7943 }
7944 }
7945 } else {
7946 return false;
7947 }
7948
7949 // Determine predicate for the missing case.
7951 switch (Res) {
7952 case 1:
7953 Pred = ICmpInst::ICMP_UGT;
7954 break;
7955 case 0:
7956 Pred = ICmpInst::ICMP_EQ;
7957 break;
7958 case -1:
7959 Pred = ICmpInst::ICMP_ULT;
7960 break;
7961 }
7962 if (Cmp->isSigned())
7963 Pred = ICmpInst::getSignedPredicate(Pred);
7964
7965 MDNode *NewWeights = nullptr;
7966 if (HasWeights)
7967 NewWeights = MDBuilder(SI->getContext())
7968 .createBranchWeights(SuccWeight, OtherSuccWeight);
7969
7970 BasicBlock *BB = SI->getParent();
7971 Builder.SetInsertPoint(SI->getIterator());
7972 Value *ICmp = Builder.CreateICmp(Pred, Cmp->getLHS(), Cmp->getRHS());
7973 Builder.CreateCondBr(ICmp, Succ, OtherSucc, NewWeights,
7974 SI->getMetadata(LLVMContext::MD_unpredictable));
7975 OtherSucc->removePredecessor(BB);
7976 if (Unreachable)
7977 Unreachable->removePredecessor(BB);
7978 SI->eraseFromParent();
7979 Cmp->eraseFromParent();
7980 if (DTU && Unreachable)
7981 DTU->applyUpdates({{DominatorTree::Delete, BB, Unreachable}});
7982 return true;
7983}
7984
7985/// Checking whether two BBs are equal depends on the contents of the
7986/// BasicBlock and the incoming values of their successor PHINodes.
7987/// PHINode::getIncomingValueForBlock is O(|Preds|), so we'd like to avoid
7988/// calling this function on each BasicBlock every time isEqual is called,
7989/// especially since the same BasicBlock may be passed as an argument multiple
7990/// times. To do this, we can precompute a map of PHINode -> Pred BasicBlock ->
7991/// IncomingValue and add it in the Wrapper so isEqual can do O(1) checking
7992/// of the incoming values.
7995
7996 // One Phi usually has < 8 incoming values.
8000
8001 // We only merge the identical non-entry BBs with
8002 // - terminator unconditional br to Succ (pending relaxation),
8003 // - does not have address taken / weird control.
8004 static bool canBeMerged(const BasicBlock *BB) {
8005 assert(BB && "Expected non-null BB");
8006 // Entry block cannot be eliminated or have predecessors.
8007 if (BB->isEntryBlock())
8008 return false;
8009
8010 // Single successor and must be Succ.
8011 // FIXME: Relax that the terminator is a BranchInst by checking for equality
8012 // on other kinds of terminators. We decide to only support unconditional
8013 // branches for now for compile time reasons.
8014 auto *BI = dyn_cast<UncondBrInst>(BB->getTerminator());
8015 if (!BI)
8016 return false;
8017
8018 // Avoid blocks that are "address-taken" (blockaddress) or have unusual
8019 // uses.
8020 if (BB->hasAddressTaken() || BB->isEHPad())
8021 return false;
8022
8023 // TODO: relax this condition to merge equal blocks with >1 instructions?
8024 // Here, we use a O(1) form of the O(n) comparison of `size() != 1`.
8025 if (&BB->front() != &BB->back())
8026 return false;
8027
8028 // The BB must have at least one predecessor.
8029 if (pred_empty(BB))
8030 return false;
8031
8032 return true;
8033 }
8034};
8035
8037 static const EqualBBWrapper *getEmptyKey() {
8038 return static_cast<EqualBBWrapper *>(DenseMapInfo<void *>::getEmptyKey());
8039 }
8041 return static_cast<EqualBBWrapper *>(
8043 }
8044 static unsigned getHashValue(const EqualBBWrapper *EBW) {
8045 BasicBlock *BB = EBW->BB;
8047 assert(BB->size() == 1 && "Expected just a single branch in the BB");
8048
8049 // Since we assume the BB is just a single UncondBrInst with a single
8050 // successor, we hash as the BB and the incoming Values of its successor
8051 // PHIs. Initially, we tried to just use the successor BB as the hash, but
8052 // including the incoming PHI values leads to better performance.
8053 // We also tried to build a map from BB -> Succs.IncomingValues ahead of
8054 // time and passing it in EqualBBWrapper, but this slowed down the average
8055 // compile time without having any impact on the worst case compile time.
8056 BasicBlock *Succ = BI->getSuccessor();
8057 auto PhiValsForBB = map_range(Succ->phis(), [&](PHINode &Phi) {
8058 return (*EBW->PhiPredIVs)[&Phi][BB];
8059 });
8060 return hash_combine(Succ, hash_combine_range(PhiValsForBB));
8061 }
8062 static bool isEqual(const EqualBBWrapper *LHS, const EqualBBWrapper *RHS) {
8065 if (LHS == EKey || RHS == EKey || LHS == TKey || RHS == TKey)
8066 return LHS == RHS;
8067
8068 BasicBlock *A = LHS->BB;
8069 BasicBlock *B = RHS->BB;
8070
8071 // FIXME: we checked that the size of A and B are both 1 in
8072 // mergeIdenticalUncondBBs to make the Case list smaller to
8073 // improve performance. If we decide to support BasicBlocks with more
8074 // than just a single instruction, we need to check that A.size() ==
8075 // B.size() here, and we need to check more than just the BranchInsts
8076 // for equality.
8077
8078 UncondBrInst *ABI = cast<UncondBrInst>(A->getTerminator());
8079 UncondBrInst *BBI = cast<UncondBrInst>(B->getTerminator());
8080 if (ABI->getSuccessor() != BBI->getSuccessor())
8081 return false;
8082
8083 // Need to check that PHIs in successor have matching values.
8084 BasicBlock *Succ = ABI->getSuccessor();
8085 auto IfPhiIVMatch = [&](PHINode &Phi) {
8086 // Replace O(|Pred|) Phi.getIncomingValueForBlock with this O(1) hashmap
8087 // query.
8088 auto &PredIVs = (*LHS->PhiPredIVs)[&Phi];
8089 return PredIVs[A] == PredIVs[B];
8090 };
8091 return all_of(Succ->phis(), IfPhiIVMatch);
8092 }
8093};
8094
8095// Merge identical BBs into one of them.
8097 DomTreeUpdater *DTU) {
8098 if (Candidates.size() < 2)
8099 return false;
8100
8101 // Build Cases. Skip BBs that are not candidates for simplification. Mark
8102 // PHINodes which need to be processed into PhiPredIVs. We decide to process
8103 // an entire PHI at once after the loop, opposed to calling
8104 // getIncomingValueForBlock inside this loop, since each call to
8105 // getIncomingValueForBlock is O(|Preds|).
8106 EqualBBWrapper::Phi2IVsMap PhiPredIVs;
8108 BBs2Merge.reserve(Candidates.size());
8110
8111 for (BasicBlock *BB : Candidates) {
8112 BasicBlock *Succ = BB->getSingleSuccessor();
8113 assert(Succ && "Expected unconditional BB");
8114 BBs2Merge.emplace_back(EqualBBWrapper{BB, &PhiPredIVs});
8115 Phis.insert_range(make_pointer_range(Succ->phis()));
8116 }
8117
8118 // Precompute a data structure to improve performance of isEqual for
8119 // EqualBBWrapper.
8120 PhiPredIVs.reserve(Phis.size());
8121 for (PHINode *Phi : Phis) {
8122 auto &IVs =
8123 PhiPredIVs.try_emplace(Phi, Phi->getNumIncomingValues()).first->second;
8124 // Pre-fill all incoming for O(1) lookup as Phi.getIncomingValueForBlock is
8125 // O(|Pred|).
8126 for (auto &IV : Phi->incoming_values())
8127 IVs.insert({Phi->getIncomingBlock(IV), IV.get()});
8128 }
8129
8130 // Group duplicates using DenseSet with custom equality/hashing.
8131 // Build a set such that if the EqualBBWrapper exists in the set and another
8132 // EqualBBWrapper isEqual, then the equivalent EqualBBWrapper which is not in
8133 // the set should be replaced with the one in the set. If the EqualBBWrapper
8134 // is not in the set, then it should be added to the set so other
8135 // EqualBBWrapper can check against it in the same manner. We use
8136 // EqualBBWrapper instead of just BasicBlock because we'd like to pass around
8137 // information to isEquality, getHashValue, and when doing the replacement
8138 // with better performance.
8140 Keep.reserve(BBs2Merge.size());
8141
8143 Updates.reserve(BBs2Merge.size() * 2);
8144
8145 bool MadeChange = false;
8146
8147 // Helper: redirect all edges X -> DeadPred to X -> LivePred.
8148 auto RedirectIncomingEdges = [&](BasicBlock *Dead, BasicBlock *Live) {
8151 if (DTU) {
8152 // All predecessors of DeadPred (except the common predecessor) will be
8153 // moved to LivePred.
8154 Updates.reserve(Updates.size() + DeadPreds.size() * 2);
8156 predecessors(Live));
8157 for (BasicBlock *PredOfDead : DeadPreds) {
8158 // Do not modify those common predecessors of DeadPred and LivePred.
8159 if (!LivePreds.contains(PredOfDead))
8160 Updates.push_back({DominatorTree::Insert, PredOfDead, Live});
8161 Updates.push_back({DominatorTree::Delete, PredOfDead, Dead});
8162 }
8163 }
8164 LLVM_DEBUG(dbgs() << "Replacing duplicate pred BB ";
8165 Dead->printAsOperand(dbgs()); dbgs() << " with pred ";
8166 Live->printAsOperand(dbgs()); dbgs() << " for ";
8167 Live->getSingleSuccessor()->printAsOperand(dbgs());
8168 dbgs() << "\n");
8169 // Replace successors in all predecessors of DeadPred.
8170 for (BasicBlock *PredOfDead : DeadPreds) {
8171 Instruction *T = PredOfDead->getTerminator();
8172 T->replaceSuccessorWith(Dead, Live);
8173 }
8174 };
8175
8176 // Try to eliminate duplicate predecessors.
8177 for (const auto &EBW : BBs2Merge) {
8178 // EBW is a candidate for simplification. If we find a duplicate BB,
8179 // replace it.
8180 const auto &[It, Inserted] = Keep.insert(&EBW);
8181 if (Inserted)
8182 continue;
8183
8184 // Found duplicate: merge P into canonical predecessor It->Pred.
8185 BasicBlock *KeepBB = (*It)->BB;
8186 BasicBlock *DeadBB = EBW.BB;
8187
8188 // Avoid merging a BB with itself.
8189 if (KeepBB == DeadBB)
8190 continue;
8191
8192 // Redirect all edges into DeadPred to KeepPred.
8193 RedirectIncomingEdges(DeadBB, KeepBB);
8194
8195 // Now DeadBB should become unreachable; leave DCE to later,
8196 // but we can try to simplify it if it only branches to Succ.
8197 // (We won't erase here to keep the routine simple and DT-safe.)
8198 assert(pred_empty(DeadBB) && "DeadBB should be unreachable.");
8199 MadeChange = true;
8200 }
8201
8202 if (DTU && !Updates.empty())
8203 DTU->applyUpdates(Updates);
8204
8205 return MadeChange;
8206}
8207
8208bool SimplifyCFGOpt::simplifyDuplicateSwitchArms(SwitchInst *SI,
8209 DomTreeUpdater *DTU) {
8210 // Collect candidate switch-arms top-down.
8211 SmallSetVector<BasicBlock *, 16> FilteredArms(
8214 return mergeIdenticalBBs(FilteredArms.getArrayRef(), DTU);
8215}
8216
8217bool SimplifyCFGOpt::simplifyDuplicatePredecessors(BasicBlock *BB,
8218 DomTreeUpdater *DTU) {
8219 // Need at least 2 predecessors to do anything.
8220 if (!BB || !BB->hasNPredecessorsOrMore(2))
8221 return false;
8222
8223 // Compilation time consideration: retain the canonical loop, otherwise, we
8224 // require more time in the later loop canonicalization.
8225 if (Options.NeedCanonicalLoop && is_contained(LoopHeaders, BB))
8226 return false;
8227
8228 // Collect candidate predecessors bottom-up.
8229 SmallSetVector<BasicBlock *, 8> FilteredPreds(
8232 return mergeIdenticalBBs(FilteredPreds.getArrayRef(), DTU);
8233}
8234
8235bool SimplifyCFGOpt::simplifySwitch(SwitchInst *SI, IRBuilder<> &Builder) {
8236 BasicBlock *BB = SI->getParent();
8237
8238 if (isValueEqualityComparison(SI)) {
8239 // If we only have one predecessor, and if it is a branch on this value,
8240 // see if that predecessor totally determines the outcome of this switch.
8241 if (BasicBlock *OnlyPred = BB->getSinglePredecessor())
8242 if (simplifyEqualityComparisonWithOnlyPredecessor(SI, OnlyPred, Builder))
8243 return requestResimplify();
8244
8245 Value *Cond = SI->getCondition();
8246 if (SelectInst *Select = dyn_cast<SelectInst>(Cond))
8247 if (simplifySwitchOnSelect(SI, Select))
8248 return requestResimplify();
8249
8250 // If the block only contains the switch, see if we can fold the block
8251 // away into any preds.
8252 if (SI == &*BB->begin())
8253 if (foldValueComparisonIntoPredecessors(SI, Builder))
8254 return requestResimplify();
8255 }
8256
8257 // Try to transform the switch into an icmp and a branch.
8258 // The conversion from switch to comparison may lose information on
8259 // impossible switch values, so disable it early in the pipeline.
8260 if (Options.ConvertSwitchRangeToICmp && turnSwitchRangeIntoICmp(SI, Builder))
8261 return requestResimplify();
8262
8263 // Remove unreachable cases.
8264 if (eliminateDeadSwitchCases(SI, DTU, Options.AC, DL))
8265 return requestResimplify();
8266
8267 if (simplifySwitchOfCmpIntrinsic(SI, Builder, DTU))
8268 return requestResimplify();
8269
8270 if (trySwitchToSelect(SI, Builder, DTU, DL, TTI))
8271 return requestResimplify();
8272
8273 if (Options.ForwardSwitchCondToPhi && forwardSwitchConditionToPHI(SI))
8274 return requestResimplify();
8275
8276 // The conversion of switches to arithmetic or lookup table is disabled in
8277 // the early optimization pipeline, as it may lose information or make the
8278 // resulting code harder to analyze.
8279 if (Options.ConvertSwitchToArithmetic || Options.ConvertSwitchToLookupTable)
8280 if (simplifySwitchLookup(SI, Builder, DTU, DL, TTI,
8281 Options.ConvertSwitchToLookupTable))
8282 return requestResimplify();
8283
8284 if (simplifySwitchOfPowersOfTwo(SI, Builder, DTU, DL, TTI))
8285 return requestResimplify();
8286
8287 if (reduceSwitchRange(SI, Builder, DL, TTI))
8288 return requestResimplify();
8289
8290 if (HoistCommon &&
8291 hoistCommonCodeFromSuccessors(SI, !Options.HoistCommonInsts))
8292 return requestResimplify();
8293
8294 // We can merge identical switch arms early to enhance more aggressive
8295 // optimization on switch.
8296 if (simplifyDuplicateSwitchArms(SI, DTU))
8297 return requestResimplify();
8298
8299 if (simplifySwitchWhenUMin(SI, DTU))
8300 return requestResimplify();
8301
8302 return false;
8303}
8304
8305bool SimplifyCFGOpt::simplifyIndirectBr(IndirectBrInst *IBI) {
8306 BasicBlock *BB = IBI->getParent();
8307 bool Changed = false;
8308 SmallVector<uint32_t> BranchWeights;
8309 const bool HasBranchWeights = !ProfcheckDisableMetadataFixes &&
8310 extractBranchWeights(*IBI, BranchWeights);
8311
8312 DenseMap<const BasicBlock *, uint64_t> TargetWeight;
8313 if (HasBranchWeights)
8314 for (size_t I = 0, E = IBI->getNumDestinations(); I < E; ++I)
8315 TargetWeight[IBI->getDestination(I)] += BranchWeights[I];
8316
8317 // Eliminate redundant destinations.
8318 SmallPtrSet<Value *, 8> Succs;
8319 SmallSetVector<BasicBlock *, 8> RemovedSuccs;
8320 for (unsigned I = 0, E = IBI->getNumDestinations(); I != E; ++I) {
8321 BasicBlock *Dest = IBI->getDestination(I);
8322 if (!Dest->hasAddressTaken() || !Succs.insert(Dest).second) {
8323 if (!Dest->hasAddressTaken())
8324 RemovedSuccs.insert(Dest);
8325 Dest->removePredecessor(BB);
8326 IBI->removeDestination(I);
8327 --I;
8328 --E;
8329 Changed = true;
8330 }
8331 }
8332
8333 if (DTU) {
8334 std::vector<DominatorTree::UpdateType> Updates;
8335 Updates.reserve(RemovedSuccs.size());
8336 for (auto *RemovedSucc : RemovedSuccs)
8337 Updates.push_back({DominatorTree::Delete, BB, RemovedSucc});
8338 DTU->applyUpdates(Updates);
8339 }
8340
8341 if (IBI->getNumDestinations() == 0) {
8342 // If the indirectbr has no successors, change it to unreachable.
8343 new UnreachableInst(IBI->getContext(), IBI->getIterator());
8345 return true;
8346 }
8347
8348 if (IBI->getNumDestinations() == 1) {
8349 // If the indirectbr has one successor, change it to a direct branch.
8352 return true;
8353 }
8354 if (HasBranchWeights) {
8355 SmallVector<uint64_t> NewBranchWeights(IBI->getNumDestinations());
8356 for (size_t I = 0, E = IBI->getNumDestinations(); I < E; ++I)
8357 NewBranchWeights[I] += TargetWeight.find(IBI->getDestination(I))->second;
8358 setFittedBranchWeights(*IBI, NewBranchWeights, /*IsExpected=*/false);
8359 }
8360 if (SelectInst *SI = dyn_cast<SelectInst>(IBI->getAddress())) {
8361 if (simplifyIndirectBrOnSelect(IBI, SI))
8362 return requestResimplify();
8363 }
8364 return Changed;
8365}
8366
8367/// Given an block with only a single landing pad and a unconditional branch
8368/// try to find another basic block which this one can be merged with. This
8369/// handles cases where we have multiple invokes with unique landing pads, but
8370/// a shared handler.
8371///
8372/// We specifically choose to not worry about merging non-empty blocks
8373/// here. That is a PRE/scheduling problem and is best solved elsewhere. In
8374/// practice, the optimizer produces empty landing pad blocks quite frequently
8375/// when dealing with exception dense code. (see: instcombine, gvn, if-else
8376/// sinking in this file)
8377///
8378/// This is primarily a code size optimization. We need to avoid performing
8379/// any transform which might inhibit optimization (such as our ability to
8380/// specialize a particular handler via tail commoning). We do this by not
8381/// merging any blocks which require us to introduce a phi. Since the same
8382/// values are flowing through both blocks, we don't lose any ability to
8383/// specialize. If anything, we make such specialization more likely.
8384///
8385/// TODO - This transformation could remove entries from a phi in the target
8386/// block when the inputs in the phi are the same for the two blocks being
8387/// merged. In some cases, this could result in removal of the PHI entirely.
8389 BasicBlock *BB, DomTreeUpdater *DTU) {
8390 auto Succ = BB->getUniqueSuccessor();
8391 assert(Succ);
8392 // If there's a phi in the successor block, we'd likely have to introduce
8393 // a phi into the merged landing pad block.
8394 if (isa<PHINode>(*Succ->begin()))
8395 return false;
8396
8397 for (BasicBlock *OtherPred : predecessors(Succ)) {
8398 if (BB == OtherPred)
8399 continue;
8400 BasicBlock::iterator I = OtherPred->begin();
8402 if (!LPad2 || !LPad2->isIdenticalTo(LPad))
8403 continue;
8404 ++I;
8406 if (!BI2 || !BI2->isIdenticalTo(BI))
8407 continue;
8408
8409 std::vector<DominatorTree::UpdateType> Updates;
8410
8411 // We've found an identical block. Update our predecessors to take that
8412 // path instead and make ourselves dead.
8414 for (BasicBlock *Pred : UniquePreds) {
8415 InvokeInst *II = cast<InvokeInst>(Pred->getTerminator());
8416 assert(II->getNormalDest() != BB && II->getUnwindDest() == BB &&
8417 "unexpected successor");
8418 II->setUnwindDest(OtherPred);
8419 if (DTU) {
8420 Updates.push_back({DominatorTree::Insert, Pred, OtherPred});
8421 Updates.push_back({DominatorTree::Delete, Pred, BB});
8422 }
8423 }
8424
8426 for (BasicBlock *Succ : UniqueSuccs) {
8427 Succ->removePredecessor(BB);
8428 if (DTU)
8429 Updates.push_back({DominatorTree::Delete, BB, Succ});
8430 }
8431
8432 IRBuilder<> Builder(BI);
8433 Builder.CreateUnreachable();
8434 BI->eraseFromParent();
8435 if (DTU)
8436 DTU->applyUpdates(Updates);
8437 return true;
8438 }
8439 return false;
8440}
8441
8442bool SimplifyCFGOpt::simplifyUncondBranch(UncondBrInst *BI,
8443 IRBuilder<> &Builder) {
8444 BasicBlock *BB = BI->getParent();
8445 BasicBlock *Succ = BI->getSuccessor(0);
8446
8447 // If the Terminator is the only non-phi instruction, simplify the block.
8448 // If LoopHeader is provided, check if the block or its successor is a loop
8449 // header. (This is for early invocations before loop simplify and
8450 // vectorization to keep canonical loop forms for nested loops. These blocks
8451 // can be eliminated when the pass is invoked later in the back-end.)
8452 // Note that if BB has only one predecessor then we do not introduce new
8453 // backedge, so we can eliminate BB.
8454 bool NeedCanonicalLoop =
8455 Options.NeedCanonicalLoop &&
8456 (!LoopHeaders.empty() && BB->hasNPredecessorsOrMore(2) &&
8457 (is_contained(LoopHeaders, BB) || is_contained(LoopHeaders, Succ)));
8459 if (I->isTerminator() && BB != &BB->getParent()->getEntryBlock() &&
8460 !NeedCanonicalLoop && TryToSimplifyUncondBranchFromEmptyBlock(BB, DTU))
8461 return true;
8462
8463 // If the only instruction in the block is a seteq/setne comparison against a
8464 // constant, try to simplify the block.
8465 if (ICmpInst *ICI = dyn_cast<ICmpInst>(I)) {
8466 if (ICI->isEquality() && isa<ConstantInt>(ICI->getOperand(1))) {
8467 ++I;
8468 if (I->isTerminator() &&
8469 tryToSimplifyUncondBranchWithICmpInIt(ICI, Builder))
8470 return true;
8471 if (isa<SelectInst>(I) && I->getNextNode()->isTerminator() &&
8472 tryToSimplifyUncondBranchWithICmpSelectInIt(ICI, cast<SelectInst>(I),
8473 Builder))
8474 return true;
8475 }
8476 }
8477
8478 // See if we can merge an empty landing pad block with another which is
8479 // equivalent.
8480 if (LandingPadInst *LPad = dyn_cast<LandingPadInst>(I)) {
8481 ++I;
8482 if (I->isTerminator() && tryToMergeLandingPad(LPad, BI, BB, DTU))
8483 return true;
8484 }
8485
8486 return false;
8487}
8488
8490 BasicBlock *PredPred = nullptr;
8491 for (auto *P : predecessors(BB)) {
8492 BasicBlock *PPred = P->getSinglePredecessor();
8493 if (!PPred || (PredPred && PredPred != PPred))
8494 return nullptr;
8495 PredPred = PPred;
8496 }
8497 return PredPred;
8498}
8499
8500/// Fold the following pattern:
8501/// bb0:
8502/// br i1 %cond1, label %bb1, label %bb2
8503/// bb1:
8504/// br i1 %cond2, label %bb3, label %bb4
8505/// bb2:
8506/// br i1 %cond2, label %bb4, label %bb3
8507/// bb3:
8508/// ...
8509/// bb4:
8510/// ...
8511/// into
8512/// bb0:
8513/// %cond = xor i1 %cond1, %cond2
8514/// br i1 %cond, label %bb4, label %bb3
8515/// bb3:
8516/// ...
8517/// bb4:
8518/// ...
8519/// NOTE: %cond2 always dominates the terminator of bb0.
8521 BasicBlock *BB = BI->getParent();
8522 BasicBlock *BB1 = BI->getSuccessor(0);
8523 BasicBlock *BB2 = BI->getSuccessor(1);
8524 auto IsSimpleSuccessor = [BB](BasicBlock *Succ, CondBrInst *&SuccBI) {
8525 if (Succ == BB)
8526 return false;
8527 if (&Succ->front() != Succ->getTerminator())
8528 return false;
8529 SuccBI = dyn_cast<CondBrInst>(Succ->getTerminator());
8530 if (!SuccBI)
8531 return false;
8532 BasicBlock *Succ1 = SuccBI->getSuccessor(0);
8533 BasicBlock *Succ2 = SuccBI->getSuccessor(1);
8534 return Succ1 != Succ && Succ2 != Succ && Succ1 != BB && Succ2 != BB &&
8535 !isa<PHINode>(Succ1->front()) && !isa<PHINode>(Succ2->front());
8536 };
8537 CondBrInst *BB1BI, *BB2BI;
8538 if (!IsSimpleSuccessor(BB1, BB1BI) || !IsSimpleSuccessor(BB2, BB2BI))
8539 return false;
8540
8541 if (BB1BI->getCondition() != BB2BI->getCondition() ||
8542 BB1BI->getSuccessor(0) != BB2BI->getSuccessor(1) ||
8543 BB1BI->getSuccessor(1) != BB2BI->getSuccessor(0))
8544 return false;
8545
8546 BasicBlock *BB3 = BB1BI->getSuccessor(0);
8547 BasicBlock *BB4 = BB1BI->getSuccessor(1);
8548 IRBuilder<> Builder(BI);
8549 BI->setCondition(
8550 Builder.CreateXor(BI->getCondition(), BB1BI->getCondition()));
8551 BB1->removePredecessor(BB);
8552 BI->setSuccessor(0, BB4);
8553 BB2->removePredecessor(BB);
8554 BI->setSuccessor(1, BB3);
8555 if (DTU) {
8557 Updates.push_back({DominatorTree::Delete, BB, BB1});
8558 Updates.push_back({DominatorTree::Insert, BB, BB4});
8559 Updates.push_back({DominatorTree::Delete, BB, BB2});
8560 Updates.push_back({DominatorTree::Insert, BB, BB3});
8561
8562 DTU->applyUpdates(Updates);
8563 }
8564 bool HasWeight = false;
8565 uint64_t BBTWeight, BBFWeight;
8566 if (extractBranchWeights(*BI, BBTWeight, BBFWeight))
8567 HasWeight = true;
8568 else
8569 BBTWeight = BBFWeight = 1;
8570 uint64_t BB1TWeight, BB1FWeight;
8571 if (extractBranchWeights(*BB1BI, BB1TWeight, BB1FWeight))
8572 HasWeight = true;
8573 else
8574 BB1TWeight = BB1FWeight = 1;
8575 uint64_t BB2TWeight, BB2FWeight;
8576 if (extractBranchWeights(*BB2BI, BB2TWeight, BB2FWeight))
8577 HasWeight = true;
8578 else
8579 BB2TWeight = BB2FWeight = 1;
8580 if (HasWeight) {
8581 uint64_t Weights[2] = {BBTWeight * BB1FWeight + BBFWeight * BB2TWeight,
8582 BBTWeight * BB1TWeight + BBFWeight * BB2FWeight};
8583 setFittedBranchWeights(*BI, Weights, /*IsExpected=*/false,
8584 /*ElideAllZero=*/true);
8585 }
8586 return true;
8587}
8588
8589bool SimplifyCFGOpt::simplifyCondBranch(CondBrInst *BI, IRBuilder<> &Builder) {
8590 assert(
8592 BI->getSuccessor(0) != BI->getSuccessor(1) &&
8593 "Tautological conditional branch should have been eliminated already.");
8594
8595 BasicBlock *BB = BI->getParent();
8596 if (!Options.SimplifyCondBranch ||
8597 BI->getFunction()->hasFnAttribute(Attribute::OptForFuzzing))
8598 return false;
8599
8600 // Conditional branch
8601 if (isValueEqualityComparison(BI)) {
8602 // If we only have one predecessor, and if it is a branch on this value,
8603 // see if that predecessor totally determines the outcome of this
8604 // switch.
8605 if (BasicBlock *OnlyPred = BB->getSinglePredecessor())
8606 if (simplifyEqualityComparisonWithOnlyPredecessor(BI, OnlyPred, Builder))
8607 return requestResimplify();
8608
8609 // This block must be empty, except for the setcond inst, if it exists.
8610 // Ignore pseudo intrinsics.
8611 for (auto &I : *BB) {
8612 if (isa<PseudoProbeInst>(I) ||
8613 &I == cast<Instruction>(BI->getCondition()))
8614 continue;
8615 if (&I == BI)
8616 if (foldValueComparisonIntoPredecessors(BI, Builder))
8617 return requestResimplify();
8618 break;
8619 }
8620 }
8621
8622 // Try to turn "br (X == 0 | X == 1), T, F" into a switch instruction.
8623 if (simplifyBranchOnICmpChain(BI, Builder, DL))
8624 return true;
8625
8626 // If this basic block has dominating predecessor blocks and the dominating
8627 // blocks' conditions imply BI's condition, we know the direction of BI.
8628 std::optional<bool> Imp = isImpliedByDomCondition(BI->getCondition(), BI, DL);
8629 if (Imp) {
8630 // Turn this into a branch on constant.
8631 auto *OldCond = BI->getCondition();
8632 ConstantInt *TorF = *Imp ? ConstantInt::getTrue(BB->getContext())
8633 : ConstantInt::getFalse(BB->getContext());
8634 BI->setCondition(TorF);
8636 return requestResimplify();
8637 }
8638
8639 // If this basic block is ONLY a compare and a branch, and if a predecessor
8640 // branches to us and one of our successors, fold the comparison into the
8641 // predecessor and use logical operations to pick the right destination.
8642 if (Options.SpeculateBlocks &&
8643 foldBranchToCommonDest(BI, DTU, /*MSSAU=*/nullptr, &TTI,
8644 Options.BonusInstThreshold))
8645 return requestResimplify();
8646
8647 // We have a conditional branch to two blocks that are only reachable
8648 // from BI. We know that the condbr dominates the two blocks, so see if
8649 // there is any identical code in the "then" and "else" blocks. If so, we
8650 // can hoist it up to the branching block.
8651 if (BI->getSuccessor(0)->getSinglePredecessor()) {
8652 if (BI->getSuccessor(1)->getSinglePredecessor()) {
8653 if (HoistCommon &&
8654 hoistCommonCodeFromSuccessors(BI, !Options.HoistCommonInsts))
8655 return requestResimplify();
8656
8657 if (BI && Options.HoistLoadsStoresWithCondFaulting &&
8658 isProfitableToSpeculate(BI, std::nullopt, TTI)) {
8659 SmallVector<Instruction *, 2> SpeculatedConditionalLoadsStores;
8660 auto CanSpeculateConditionalLoadsStores = [&]() {
8661 for (auto *Succ : successors(BB)) {
8662 for (Instruction &I : *Succ) {
8663 if (I.isTerminator()) {
8664 if (I.getNumSuccessors() > 1)
8665 return false;
8666 continue;
8667 } else if (!isSafeCheapLoadStore(&I, TTI) ||
8668 SpeculatedConditionalLoadsStores.size() ==
8670 return false;
8671 }
8672 SpeculatedConditionalLoadsStores.push_back(&I);
8673 }
8674 }
8675 return !SpeculatedConditionalLoadsStores.empty();
8676 };
8677
8678 if (CanSpeculateConditionalLoadsStores()) {
8679 hoistConditionalLoadsStores(BI, SpeculatedConditionalLoadsStores,
8680 std::nullopt, nullptr);
8681 return requestResimplify();
8682 }
8683 }
8684 } else {
8685 // If Successor #1 has multiple preds, we may be able to conditionally
8686 // execute Successor #0 if it branches to Successor #1.
8687 Instruction *Succ0TI = BI->getSuccessor(0)->getTerminator();
8688 if (Succ0TI->getNumSuccessors() == 1 &&
8689 Succ0TI->getSuccessor(0) == BI->getSuccessor(1))
8690 if (speculativelyExecuteBB(BI, BI->getSuccessor(0)))
8691 return requestResimplify();
8692 }
8693 } else if (BI->getSuccessor(1)->getSinglePredecessor()) {
8694 // If Successor #0 has multiple preds, we may be able to conditionally
8695 // execute Successor #1 if it branches to Successor #0.
8696 Instruction *Succ1TI = BI->getSuccessor(1)->getTerminator();
8697 if (Succ1TI->getNumSuccessors() == 1 &&
8698 Succ1TI->getSuccessor(0) == BI->getSuccessor(0))
8699 if (speculativelyExecuteBB(BI, BI->getSuccessor(1)))
8700 return requestResimplify();
8701 }
8702
8703 // If this is a branch on something for which we know the constant value in
8704 // predecessors (e.g. a phi node in the current block), thread control
8705 // through this block.
8706 if (foldCondBranchOnValueKnownInPredecessor(BI))
8707 return requestResimplify();
8708
8709 // Scan predecessor blocks for conditional branches.
8710 for (BasicBlock *Pred : predecessors(BB))
8711 if (CondBrInst *PBI = dyn_cast<CondBrInst>(Pred->getTerminator()))
8712 if (PBI != BI)
8713 if (SimplifyCondBranchToCondBranch(PBI, BI, DTU, DL, TTI))
8714 return requestResimplify();
8715
8716 // Look for diamond patterns.
8717 if (MergeCondStores)
8718 if (BasicBlock *PrevBB = allPredecessorsComeFromSameSource(BB))
8719 if (CondBrInst *PBI = dyn_cast<CondBrInst>(PrevBB->getTerminator()))
8720 if (PBI != BI)
8721 if (mergeConditionalStores(PBI, BI, DTU, DL, TTI))
8722 return requestResimplify();
8723
8724 // Look for nested conditional branches.
8725 if (mergeNestedCondBranch(BI, DTU))
8726 return requestResimplify();
8727
8728 return false;
8729}
8730
8731/// Check if passing a value to an instruction will cause undefined behavior.
8732static bool passingValueIsAlwaysUndefined(Value *V, Instruction *I, bool PtrValueMayBeModified) {
8733 assert(V->getType() == I->getType() && "Mismatched types");
8735 if (!C)
8736 return false;
8737
8738 if (I->use_empty())
8739 return false;
8740
8741 if (C->isNullValue() || isa<UndefValue>(C)) {
8742 // Only look at the first use we can handle, avoid hurting compile time with
8743 // long uselists
8744 auto FindUse = llvm::find_if(I->uses(), [](auto &U) {
8745 auto *Use = cast<Instruction>(U.getUser());
8746 // Change this list when we want to add new instructions.
8747 switch (Use->getOpcode()) {
8748 default:
8749 return false;
8750 case Instruction::GetElementPtr:
8751 case Instruction::Ret:
8752 case Instruction::BitCast:
8753 case Instruction::Load:
8754 case Instruction::Store:
8755 case Instruction::Call:
8756 case Instruction::CallBr:
8757 case Instruction::Invoke:
8758 case Instruction::UDiv:
8759 case Instruction::URem:
8760 // Note: signed div/rem of INT_MIN / -1 is also immediate UB, not
8761 // implemented to avoid code complexity as it is unclear how useful such
8762 // logic is.
8763 case Instruction::SDiv:
8764 case Instruction::SRem:
8765 return true;
8766 }
8767 });
8768 if (FindUse == I->use_end())
8769 return false;
8770 auto &Use = *FindUse;
8771 auto *User = cast<Instruction>(Use.getUser());
8772 // Bail out if User is not in the same BB as I or User == I or User comes
8773 // before I in the block. The latter two can be the case if User is a
8774 // PHI node.
8775 if (User->getParent() != I->getParent() || User == I ||
8776 User->comesBefore(I))
8777 return false;
8778
8779 // Now make sure that there are no instructions in between that can alter
8780 // control flow (eg. calls)
8781 auto InstrRange =
8782 make_range(std::next(I->getIterator()), User->getIterator());
8783 if (any_of(InstrRange, [](Instruction &I) {
8785 }))
8786 return false;
8787
8788 // Look through GEPs. A load from a GEP derived from NULL is still undefined
8790 if (GEP->getPointerOperand() == I) {
8791 // The type of GEP may differ from the type of base pointer.
8792 // Bail out on vector GEPs, as they are not handled by other checks.
8793 if (GEP->getType()->isVectorTy())
8794 return false;
8795 // The current base address is null, there are four cases to consider:
8796 // getelementptr (TY, null, 0) -> null
8797 // getelementptr (TY, null, not zero) -> may be modified
8798 // getelementptr inbounds (TY, null, 0) -> null
8799 // getelementptr inbounds (TY, null, not zero) -> poison iff null is
8800 // undefined?
8801 if (!GEP->hasAllZeroIndices() &&
8802 (!GEP->isInBounds() ||
8803 NullPointerIsDefined(GEP->getFunction(),
8804 GEP->getPointerAddressSpace())))
8805 PtrValueMayBeModified = true;
8806 return passingValueIsAlwaysUndefined(V, GEP, PtrValueMayBeModified);
8807 }
8808
8809 // Look through return.
8810 if (ReturnInst *Ret = dyn_cast<ReturnInst>(User)) {
8811 bool HasNoUndefAttr =
8812 Ret->getFunction()->hasRetAttribute(Attribute::NoUndef);
8813 // Return undefined to a noundef return value is undefined.
8814 if (isa<UndefValue>(C) && HasNoUndefAttr)
8815 return true;
8816 // Return null to a nonnull+noundef return value is undefined.
8817 if (C->isNullValue() && HasNoUndefAttr &&
8818 Ret->getFunction()->hasRetAttribute(Attribute::NonNull)) {
8819 return !PtrValueMayBeModified;
8820 }
8821 }
8822
8823 // Load from null is undefined.
8824 if (LoadInst *LI = dyn_cast<LoadInst>(User))
8825 if (!LI->isVolatile())
8826 return !NullPointerIsDefined(LI->getFunction(),
8827 LI->getPointerAddressSpace());
8828
8829 // Store to null is undefined.
8831 if (!SI->isVolatile())
8832 return (!NullPointerIsDefined(SI->getFunction(),
8833 SI->getPointerAddressSpace())) &&
8834 SI->getPointerOperand() == I;
8835
8836 // llvm.assume(false/undef) always triggers immediate UB.
8837 if (auto *Assume = dyn_cast<AssumeInst>(User)) {
8838 // Ignore assume operand bundles.
8839 if (I == Assume->getArgOperand(0))
8840 return true;
8841 }
8842
8843 if (auto *CB = dyn_cast<CallBase>(User)) {
8844 if (C->isNullValue() && NullPointerIsDefined(CB->getFunction()))
8845 return false;
8846 // A call to null is undefined.
8847 if (CB->getCalledOperand() == I)
8848 return true;
8849
8850 if (CB->isArgOperand(&Use)) {
8851 unsigned ArgIdx = CB->getArgOperandNo(&Use);
8852 // Passing null to a nonnnull+noundef argument is undefined.
8854 CB->paramHasNonNullAttr(ArgIdx, /*AllowUndefOrPoison=*/false))
8855 return !PtrValueMayBeModified;
8856 // Passing undef to a noundef argument is undefined.
8857 if (isa<UndefValue>(C) && CB->isPassingUndefUB(ArgIdx))
8858 return true;
8859 }
8860 }
8861 // Div/Rem by zero is immediate UB
8862 if (match(User, m_BinOp(m_Value(), m_Specific(I))) && User->isIntDivRem())
8863 return true;
8864 }
8865 return false;
8866}
8867
8868/// If BB has an incoming value that will always trigger undefined behavior
8869/// (eg. null pointer dereference), remove the branch leading here.
8871 DomTreeUpdater *DTU,
8872 AssumptionCache *AC) {
8873 for (PHINode &PHI : BB->phis())
8874 for (unsigned i = 0, e = PHI.getNumIncomingValues(); i != e; ++i)
8875 if (passingValueIsAlwaysUndefined(PHI.getIncomingValue(i), &PHI)) {
8876 BasicBlock *Predecessor = PHI.getIncomingBlock(i);
8877 Instruction *T = Predecessor->getTerminator();
8878 IRBuilder<> Builder(T);
8879 if (isa<UncondBrInst>(T)) {
8880 BB->removePredecessor(Predecessor);
8881 // Turn unconditional branches into unreachables.
8882 Builder.CreateUnreachable();
8883 T->eraseFromParent();
8884 if (DTU)
8885 DTU->applyUpdates({{DominatorTree::Delete, Predecessor, BB}});
8886 return true;
8887 } else if (CondBrInst *BI = dyn_cast<CondBrInst>(T)) {
8888 BB->removePredecessor(Predecessor);
8889 // Preserve guarding condition in assume, because it might not be
8890 // inferrable from any dominating condition.
8891 Value *Cond = BI->getCondition();
8892 CallInst *Assumption;
8893 if (BI->getSuccessor(0) == BB)
8894 Assumption = Builder.CreateAssumption(Builder.CreateNot(Cond));
8895 else
8896 Assumption = Builder.CreateAssumption(Cond);
8897 if (AC)
8898 AC->registerAssumption(cast<AssumeInst>(Assumption));
8899 Builder.CreateBr(BI->getSuccessor(0) == BB ? BI->getSuccessor(1)
8900 : BI->getSuccessor(0));
8901 BI->eraseFromParent();
8902 if (DTU)
8903 DTU->applyUpdates({{DominatorTree::Delete, Predecessor, BB}});
8904 return true;
8905 } else if (SwitchInst *SI = dyn_cast<SwitchInst>(T)) {
8906 // Redirect all branches leading to UB into
8907 // a newly created unreachable block.
8908 BasicBlock *Unreachable = BasicBlock::Create(
8909 Predecessor->getContext(), "unreachable", BB->getParent(), BB);
8910 Builder.SetInsertPoint(Unreachable);
8911 // The new block contains only one instruction: Unreachable
8912 Builder.CreateUnreachable();
8913 for (const auto &Case : SI->cases())
8914 if (Case.getCaseSuccessor() == BB) {
8915 BB->removePredecessor(Predecessor);
8916 Case.setSuccessor(Unreachable);
8917 }
8918 if (SI->getDefaultDest() == BB) {
8919 BB->removePredecessor(Predecessor);
8920 SI->setDefaultDest(Unreachable);
8921 }
8922
8923 if (DTU)
8924 DTU->applyUpdates(
8925 { { DominatorTree::Insert, Predecessor, Unreachable },
8926 { DominatorTree::Delete, Predecessor, BB } });
8927 return true;
8928 }
8929 }
8930
8931 return false;
8932}
8933
8934bool SimplifyCFGOpt::simplifyOnce(BasicBlock *BB) {
8935 bool Changed = false;
8936
8937 assert(BB && BB->getParent() && "Block not embedded in function!");
8938 assert(BB->getTerminator() && "Degenerate basic block encountered!");
8939
8940 // Remove basic blocks that have no predecessors (except the entry block)...
8941 // or that just have themself as a predecessor. These are unreachable.
8942 if ((pred_empty(BB) && BB != &BB->getParent()->getEntryBlock()) ||
8943 BB->getSinglePredecessor() == BB) {
8944 LLVM_DEBUG(dbgs() << "Removing BB: \n" << *BB);
8945 DeleteDeadBlock(BB, DTU);
8946 return true;
8947 }
8948
8949 // Check to see if we can constant propagate this terminator instruction
8950 // away...
8951 Changed |= ConstantFoldTerminator(BB, /*DeleteDeadConditions=*/true,
8952 /*TLI=*/nullptr, DTU);
8953
8954 // Check for and eliminate duplicate PHI nodes in this block.
8956
8957 // Check for and remove branches that will always cause undefined behavior.
8959 return requestResimplify();
8960
8961 // Merge basic blocks into their predecessor if there is only one distinct
8962 // pred, and if there is only one distinct successor of the predecessor, and
8963 // if there are no PHI nodes.
8964 if (MergeBlockIntoPredecessor(BB, DTU))
8965 return true;
8966
8967 if (SinkCommon && Options.SinkCommonInsts) {
8968 if (sinkCommonCodeFromPredecessors(BB, DTU) ||
8969 mergeCompatibleInvokes(BB, DTU)) {
8970 // sinkCommonCodeFromPredecessors() does not automatically CSE PHI's,
8971 // so we may now how duplicate PHI's.
8972 // Let's rerun EliminateDuplicatePHINodes() first,
8973 // before foldTwoEntryPHINode() potentially converts them into select's,
8974 // after which we'd need a whole EarlyCSE pass run to cleanup them.
8975 return true;
8976 }
8977 // Merge identical predecessors of this block.
8978 if (simplifyDuplicatePredecessors(BB, DTU))
8979 return true;
8980 }
8981
8982 if (Options.SpeculateBlocks &&
8983 !BB->getParent()->hasFnAttribute(Attribute::OptForFuzzing)) {
8984 // If there is a trivial two-entry PHI node in this basic block, and we can
8985 // eliminate it, do so now.
8986 if (auto *PN = dyn_cast<PHINode>(BB->begin()))
8987 if (PN->getNumIncomingValues() == 2)
8988 if (foldTwoEntryPHINode(PN, TTI, DTU, Options.AC, DL,
8989 Options.SpeculateUnpredictables))
8990 return true;
8991 }
8992
8993 IRBuilder<> Builder(BB);
8995 Builder.SetInsertPoint(Terminator);
8996 switch (Terminator->getOpcode()) {
8997 case Instruction::UncondBr:
8998 Changed |= simplifyUncondBranch(cast<UncondBrInst>(Terminator), Builder);
8999 break;
9000 case Instruction::CondBr:
9001 Changed |= simplifyCondBranch(cast<CondBrInst>(Terminator), Builder);
9002 break;
9003 case Instruction::Resume:
9004 Changed |= simplifyResume(cast<ResumeInst>(Terminator), Builder);
9005 break;
9006 case Instruction::CleanupRet:
9007 Changed |= simplifyCleanupReturn(cast<CleanupReturnInst>(Terminator));
9008 break;
9009 case Instruction::Switch:
9010 Changed |= simplifySwitch(cast<SwitchInst>(Terminator), Builder);
9011 break;
9012 case Instruction::Unreachable:
9013 Changed |= simplifyUnreachable(cast<UnreachableInst>(Terminator));
9014 break;
9015 case Instruction::IndirectBr:
9016 Changed |= simplifyIndirectBr(cast<IndirectBrInst>(Terminator));
9017 break;
9018 }
9019
9020 return Changed;
9021}
9022
9023bool SimplifyCFGOpt::run(BasicBlock *BB) {
9024 bool Changed = false;
9025
9026 // Repeated simplify BB as long as resimplification is requested.
9027 do {
9028 Resimplify = false;
9029
9030 // Perform one round of simplifcation. Resimplify flag will be set if
9031 // another iteration is requested.
9032 Changed |= simplifyOnce(BB);
9033 } while (Resimplify);
9034
9035 return Changed;
9036}
9037
9040 ArrayRef<WeakVH> LoopHeaders) {
9041 return SimplifyCFGOpt(TTI, DTU, BB->getDataLayout(), LoopHeaders,
9042 Options)
9043 .run(BB);
9044}
#define Fail
#define Success
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
aarch64 promote const
AMDGPU Register Bank Select
Rewrite undef for PHI
This file implements a class to represent arbitrary precision integral constant values and operations...
static MachineBasicBlock * OtherSucc(MachineBasicBlock *MBB, MachineBasicBlock *Succ)
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
static cl::opt< ITMode > IT(cl::desc("IT block support"), cl::Hidden, cl::init(DefaultIT), cl::values(clEnumValN(DefaultIT, "arm-default-it", "Generate any type of IT block"), clEnumValN(RestrictedIT, "arm-restrict-it", "Disallow complex IT blocks")))
Function Alias Analysis Results
This file contains the simple types necessary to represent the attributes associated with functions a...
static const Function * getParent(const Value *V)
#define X(NUM, ENUM, NAME)
Definition ELF.h:849
static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
This file contains the declarations for the subclasses of Constant, which represent the different fla...
static cl::opt< OutputCostKind > CostKind("cost-kind", cl::desc("Target cost kind"), cl::init(OutputCostKind::RecipThroughput), cl::values(clEnumValN(OutputCostKind::RecipThroughput, "throughput", "Reciprocal throughput"), clEnumValN(OutputCostKind::Latency, "latency", "Instruction latency"), clEnumValN(OutputCostKind::CodeSize, "code-size", "Code size"), clEnumValN(OutputCostKind::SizeAndLatency, "size-latency", "Code size and latency"), clEnumValN(OutputCostKind::All, "all", "Print all cost kinds")))
This file defines the DenseMap class.
#define DEBUG_TYPE
static Value * getCondition(Instruction *I)
Hexagon Common GEP
static bool IsIndirectCall(const MachineInstr *MI)
This file provides various utilities for inspecting and working with the control flow graph in LLVM I...
Module.h This file contains the declarations for the Module class.
This defines the Use class.
static Constant * getFalse(Type *Ty)
For a boolean type or a vector of boolean type, return false or a vector with every element false.
const AbstractManglingParser< Derived, Alloc >::OperatorInfo AbstractManglingParser< Derived, Alloc >::Ops[]
static LVOptions Options
Definition LVOptions.cpp:25
#define I(x, y, z)
Definition MD5.cpp:57
Machine Check Debug Module
This file implements a map that provides insertion order iteration.
This file provides utility for Memory Model Relaxation Annotations (MMRAs).
This file exposes an interface to building/using memory SSA to walk memory instructions using a use/d...
This file contains the declarations for metadata subclasses.
#define T
MachineInstr unsigned OpIdx
ConstantRange Range(APInt(BitWidth, Low), APInt(BitWidth, High))
uint64_t IntrinsicInst * II
#define P(N)
if(auto Err=PB.parsePassPipeline(MPM, Passes)) return wrap(std MPM run * Mod
This file contains the declarations for profiling metadata utility functions.
const SmallVectorImpl< MachineOperand > & Cond
This file contains some templates that are useful if you are working with the STL at all.
static bool contains(SmallPtrSetImpl< ConstantExpr * > &Cache, ConstantExpr *Expr, Constant *C)
Definition Value.cpp:487
Provides some synthesis utilities to produce sequences of values.
This file defines generic set operations that may be used on set's of different types,...
This file implements a set that has insertion order iteration characteristics.
static std::optional< ContiguousCasesResult > findContiguousCases(Value *Condition, SmallVectorImpl< ConstantInt * > &Cases, SmallVectorImpl< ConstantInt * > &OtherCases, BasicBlock *Dest, BasicBlock *OtherDest)
static void addPredecessorToBlock(BasicBlock *Succ, BasicBlock *NewPred, BasicBlock *ExistPred, MemorySSAUpdater *MSSAU=nullptr)
Update PHI nodes in Succ to indicate that there will now be entries in it from the 'NewPred' block.
static bool validLookupTableConstant(Constant *C, const TargetTransformInfo &TTI)
Return true if the backend will be able to handle initializing an array of constants like C.
static StoreInst * findUniqueStoreInBlocks(BasicBlock *BB1, BasicBlock *BB2)
static bool validateAndCostRequiredSelects(BasicBlock *BB, BasicBlock *ThenBB, BasicBlock *EndBB, unsigned &SpeculatedInstructions, InstructionCost &Cost, const TargetTransformInfo &TTI)
Estimate the cost of the insertion(s) and check that the PHI nodes can be converted to selects.
static bool simplifySwitchLookup(SwitchInst *SI, IRBuilder<> &Builder, DomTreeUpdater *DTU, const DataLayout &DL, const TargetTransformInfo &TTI, bool ConvertSwitchToLookupTable)
If the switch is only used to initialize one or more phi nodes in a common successor block with diffe...
static void removeSwitchAfterSelectFold(SwitchInst *SI, PHINode *PHI, Value *SelectValue, IRBuilder<> &Builder, DomTreeUpdater *DTU)
static bool valuesOverlap(std::vector< ValueEqualityComparisonCase > &C1, std::vector< ValueEqualityComparisonCase > &C2)
Return true if there are any keys in C1 that exist in C2 as well.
static bool isProfitableToSpeculate(const CondBrInst *BI, std::optional< bool > Invert, const TargetTransformInfo &TTI)
static bool mergeConditionalStoreToAddress(BasicBlock *PTB, BasicBlock *PFB, BasicBlock *QTB, BasicBlock *QFB, BasicBlock *PostBB, Value *Address, bool InvertPCond, bool InvertQCond, DomTreeUpdater *DTU, const DataLayout &DL, const TargetTransformInfo &TTI)
static bool mergeCleanupPad(CleanupReturnInst *RI)
static bool isVectorOp(Instruction &I)
Return if an instruction's type or any of its operands' types are a vector type.
static BasicBlock * allPredecessorsComeFromSameSource(BasicBlock *BB)
static void cloneInstructionsIntoPredecessorBlockAndUpdateSSAUses(BasicBlock *BB, BasicBlock *PredBlock, ValueToValueMapTy &VMap)
static int constantIntSortPredicate(ConstantInt *const *P1, ConstantInt *const *P2)
static bool getCaseResults(SwitchInst *SI, ConstantInt *CaseVal, BasicBlock *CaseDest, BasicBlock **CommonDest, SmallVectorImpl< std::pair< PHINode *, Constant * > > &Res, const DataLayout &DL, const TargetTransformInfo &TTI)
Try to determine the resulting constant values in phi nodes at the common destination basic block,...
static bool passingValueIsAlwaysUndefined(Value *V, Instruction *I, bool PtrValueMayBeModified=false)
Check if passing a value to an instruction will cause undefined behavior.
static std::optional< std::tuple< BasicBlock *, Instruction::BinaryOps, bool > > shouldFoldCondBranchesToCommonDestination(CondBrInst *BI, CondBrInst *PBI, const TargetTransformInfo *TTI)
Determine if the two branches share a common destination and deduce a glue that joins the branches' c...
static bool isSafeToHoistInstr(Instruction *I, unsigned Flags)
static bool isSafeToHoistInvoke(BasicBlock *BB1, BasicBlock *BB2, Instruction *I1, Instruction *I2)
static ConstantInt * getConstantInt(Value *V, const DataLayout &DL)
Extract ConstantInt from value, looking through IntToPtr and PointerNullValue.
static bool simplifySwitchOfCmpIntrinsic(SwitchInst *SI, IRBuilderBase &Builder, DomTreeUpdater *DTU)
Fold switch over ucmp/scmp intrinsic to br if two of the switch arms have the same destination.
static bool shouldBuildLookupTable(SwitchInst *SI, uint64_t TableSize, const TargetTransformInfo &TTI, const DataLayout &DL, const SmallVector< Type * > &ResultTypes)
Determine whether a lookup table should be built for this switch, based on the number of cases,...
static Constant * constantFold(Instruction *I, const DataLayout &DL, const SmallDenseMap< Value *, Constant * > &ConstantPool)
Try to fold instruction I into a constant.
static bool areIdenticalUpToCommutativity(const Instruction *I1, const Instruction *I2)
static bool forwardSwitchConditionToPHI(SwitchInst *SI)
Try to forward the condition of a switch instruction to a phi node dominated by the switch,...
static PHINode * findPHIForConditionForwarding(ConstantInt *CaseValue, BasicBlock *BB, int *PhiIndex)
If BB would be eligible for simplification by TryToSimplifyUncondBranchFromEmptyBlock (i....
static bool simplifySwitchOfPowersOfTwo(SwitchInst *SI, IRBuilder<> &Builder, DomTreeUpdater *DTU, const DataLayout &DL, const TargetTransformInfo &TTI)
Tries to transform switch of powers of two to reduce switch range.
static bool isCleanupBlockEmpty(iterator_range< BasicBlock::iterator > R)
static Value * ensureValueAvailableInSuccessor(Value *V, BasicBlock *BB, Value *AlternativeV=nullptr)
static Value * createLogicalOp(IRBuilderBase &Builder, Instruction::BinaryOps Opc, Value *LHS, Value *RHS, const Twine &Name="")
static void hoistConditionalLoadsStores(CondBrInst *BI, SmallVectorImpl< Instruction * > &SpeculatedConditionalLoadsStores, std::optional< bool > Invert, Instruction *Sel)
If the target supports conditional faulting, we look for the following pattern:
static bool shouldHoistCommonInstructions(Instruction *I1, Instruction *I2, const TargetTransformInfo &TTI)
Helper function for hoistCommonCodeFromSuccessors.
static bool reduceSwitchRange(SwitchInst *SI, IRBuilder<> &Builder, const DataLayout &DL, const TargetTransformInfo &TTI)
Try to transform a switch that has "holes" in it to a contiguous sequence of cases.
static bool safeToMergeTerminators(Instruction *SI1, Instruction *SI2, SmallSetVector< BasicBlock *, 4 > *FailBlocks=nullptr)
Return true if it is safe to merge these two terminator instructions together.
SkipFlags
@ SkipReadMem
@ SkipSideEffect
@ SkipImplicitControlFlow
static bool incomingValuesAreCompatible(BasicBlock *BB, ArrayRef< BasicBlock * > IncomingBlocks, SmallPtrSetImpl< Value * > *EquivalenceSet=nullptr)
Return true if all the PHI nodes in the basic block BB receive compatible (identical) incoming values...
static bool trySwitchToSelect(SwitchInst *SI, IRBuilder<> &Builder, DomTreeUpdater *DTU, const DataLayout &DL, const TargetTransformInfo &TTI)
If a switch is only used to initialize one or more phi nodes in a common successor block with only tw...
static void createUnreachableSwitchDefault(SwitchInst *Switch, DomTreeUpdater *DTU, bool RemoveOrigDefaultBlock=true)
static Value * foldSwitchToSelect(const SwitchCaseResultVectorTy &ResultVector, Constant *DefaultResult, Value *Condition, IRBuilder<> &Builder, const DataLayout &DL, ArrayRef< uint32_t > BranchWeights)
static bool isSwitchDense(uint64_t NumCases, uint64_t CaseRange)
static bool sinkCommonCodeFromPredecessors(BasicBlock *BB, DomTreeUpdater *DTU)
Check whether BB's predecessors end with unconditional branches.
static bool isTypeLegalForLookupTable(Type *Ty, const TargetTransformInfo &TTI, const DataLayout &DL)
static bool eliminateDeadSwitchCases(SwitchInst *SI, DomTreeUpdater *DTU, AssumptionCache *AC, const DataLayout &DL)
Compute masked bits for the condition of a switch and use it to remove dead cases.
static bool blockIsSimpleEnoughToThreadThrough(BasicBlock *BB, BlocksSet &NonLocalUseBlocks)
Return true if we can thread a branch across this block.
static Value * isSafeToSpeculateStore(Instruction *I, BasicBlock *BrBB, BasicBlock *StoreBB, BasicBlock *EndBB)
Determine if we can hoist sink a sole store instruction out of a conditional block.
static std::optional< bool > foldCondBranchOnValueKnownInPredecessorImpl(CondBrInst *BI, DomTreeUpdater *DTU, const DataLayout &DL, AssumptionCache *AC)
If we have a conditional branch on something for which we know the constant value in predecessors (e....
static bool foldTwoEntryPHINode(PHINode *PN, const TargetTransformInfo &TTI, DomTreeUpdater *DTU, AssumptionCache *AC, const DataLayout &DL, bool SpeculateUnpredictables)
Given a BB that starts with the specified two-entry PHI node, see if we can eliminate it.
static bool findReaching(BasicBlock *BB, BasicBlock *DefBB, BlocksSet &ReachesNonLocalUses)
static bool extractPredSuccWeights(CondBrInst *PBI, CondBrInst *BI, uint64_t &PredTrueWeight, uint64_t &PredFalseWeight, uint64_t &SuccTrueWeight, uint64_t &SuccFalseWeight)
Return true if either PBI or BI has branch weight available, and store the weights in {Pred|Succ}...
static bool initializeUniqueCases(SwitchInst *SI, PHINode *&PHI, BasicBlock *&CommonDest, SwitchCaseResultVectorTy &UniqueResults, Constant *&DefaultResult, const DataLayout &DL, const TargetTransformInfo &TTI, uintptr_t MaxUniqueResults)
static bool shouldUseSwitchConditionAsTableIndex(ConstantInt &MinCaseVal, const ConstantInt &MaxCaseVal, bool HasDefaultResults, const SmallVector< Type * > &ResultTypes, const DataLayout &DL, const TargetTransformInfo &TTI)
static InstructionCost computeSpeculationCost(const User *I, const TargetTransformInfo &TTI)
Compute an abstract "cost" of speculating the given instruction, which is assumed to be safe to specu...
static bool performBranchToCommonDestFolding(CondBrInst *BI, CondBrInst *PBI, DomTreeUpdater *DTU, MemorySSAUpdater *MSSAU, const TargetTransformInfo *TTI)
SmallPtrSet< BasicBlock *, 8 > BlocksSet
static unsigned skippedInstrFlags(Instruction *I)
static bool mergeCompatibleInvokes(BasicBlock *BB, DomTreeUpdater *DTU)
If this block is a landingpad exception handling block, categorize all the predecessor invokes into s...
static bool replacingOperandWithVariableIsCheap(const Instruction *I, int OpIdx)
static void eraseTerminatorAndDCECond(Instruction *TI, MemorySSAUpdater *MSSAU=nullptr)
static void eliminateBlockCases(BasicBlock *BB, std::vector< ValueEqualityComparisonCase > &Cases)
Given a vector of bb/value pairs, remove any entries in the list that match the specified block.
static bool mergeConditionalStores(CondBrInst *PBI, CondBrInst *QBI, DomTreeUpdater *DTU, const DataLayout &DL, const TargetTransformInfo &TTI)
static bool mergeNestedCondBranch(CondBrInst *BI, DomTreeUpdater *DTU)
Fold the following pattern: bb0: br i1 cond1, label bb1, label bb2 bb1: br i1 cond2,...
static void sinkLastInstruction(ArrayRef< BasicBlock * > Blocks)
static size_t mapCaseToResult(ConstantInt *CaseVal, SwitchCaseResultVectorTy &UniqueResults, Constant *Result)
static bool tryWidenCondBranchToCondBranch(CondBrInst *PBI, CondBrInst *BI, DomTreeUpdater *DTU)
If the previous block ended with a widenable branch, determine if reusing the target block is profita...
static void mergeCompatibleInvokesImpl(ArrayRef< InvokeInst * > Invokes, DomTreeUpdater *DTU)
static bool mergeIdenticalBBs(ArrayRef< BasicBlock * > Candidates, DomTreeUpdater *DTU)
static void getBranchWeights(Instruction *TI, SmallVectorImpl< uint64_t > &Weights)
Get Weights of a given terminator, the default weight is at the front of the vector.
static bool tryToMergeLandingPad(LandingPadInst *LPad, UncondBrInst *BI, BasicBlock *BB, DomTreeUpdater *DTU)
Given an block with only a single landing pad and a unconditional branch try to find another basic bl...
static Constant * lookupConstant(Value *V, const SmallDenseMap< Value *, Constant * > &ConstantPool)
If V is a Constant, return it.
static bool SimplifyCondBranchToCondBranch(CondBrInst *PBI, CondBrInst *BI, DomTreeUpdater *DTU, const DataLayout &DL, const TargetTransformInfo &TTI)
If we have a conditional branch as a predecessor of another block, this function tries to simplify it...
static bool canSinkInstructions(ArrayRef< Instruction * > Insts, DenseMap< const Use *, SmallVector< Value *, 4 > > &PHIOperands)
static void hoistLockstepIdenticalDbgVariableRecords(Instruction *TI, Instruction *I1, SmallVectorImpl< Instruction * > &OtherInsts)
Hoists DbgVariableRecords from I1 and OtherInstrs that are identical in lock-step to TI.
static bool removeEmptyCleanup(CleanupReturnInst *RI, DomTreeUpdater *DTU)
static bool removeUndefIntroducingPredecessor(BasicBlock *BB, DomTreeUpdater *DTU, AssumptionCache *AC)
If BB has an incoming value that will always trigger undefined behavior (eg.
static bool simplifySwitchWhenUMin(SwitchInst *SI, DomTreeUpdater *DTU)
Tries to transform the switch when the condition is umin with a constant.
static bool isSafeCheapLoadStore(const Instruction *I, const TargetTransformInfo &TTI)
static ConstantInt * getKnownValueOnEdge(Value *V, BasicBlock *From, BasicBlock *To)
static bool dominatesMergePoint(Value *V, BasicBlock *BB, Instruction *InsertPt, SmallPtrSetImpl< Instruction * > &AggressiveInsts, InstructionCost &Cost, InstructionCost Budget, const TargetTransformInfo &TTI, AssumptionCache *AC, SmallPtrSetImpl< Instruction * > &ZeroCostInstructions, unsigned Depth=0)
If we have a merge point of an "if condition" as accepted above, return true if the specified value d...
static void reuseTableCompare(User *PhiUser, BasicBlock *PhiBlock, CondBrInst *RangeCheckBranch, Constant *DefaultValue, const SmallVectorImpl< std::pair< ConstantInt *, Constant * > > &Values)
Try to reuse the switch table index compare.
This file defines the SmallPtrSet class.
This file defines the SmallVector class.
This file defines the 'Statistic' class, which is designed to be an easy way to expose various metric...
#define STATISTIC(VARNAME, DESC)
Definition Statistic.h:171
#define LLVM_DEBUG(...)
Definition Debug.h:114
This pass exposes codegen information to IR-level passes.
Value * RHS
Value * LHS
static const uint32_t IV[8]
Definition blake3_impl.h:83
Class for arbitrary precision integers.
Definition APInt.h:78
static APInt getAllOnes(unsigned numBits)
Return an APInt of a specified width with all bits set.
Definition APInt.h:235
LLVM_ABI APInt zext(unsigned width) const
Zero extend to a new width.
Definition APInt.cpp:1023
unsigned popcount() const
Count the number of bits set.
Definition APInt.h:1685
bool sgt(const APInt &RHS) const
Signed greater than comparison.
Definition APInt.h:1208
bool isZero() const
Determine if this value is zero, i.e. all bits are clear.
Definition APInt.h:381
bool intersects(const APInt &RHS) const
This operation tests if there are any pairs of corresponding bits between this APInt and RHS that are...
Definition APInt.h:1256
bool sle(const APInt &RHS) const
Signed less or equal comparison.
Definition APInt.h:1173
unsigned getSignificantBits() const
Get the minimum bit size for this signed APInt.
Definition APInt.h:1546
bool isStrictlyPositive() const
Determine if this APInt Value is positive.
Definition APInt.h:357
uint64_t getLimitedValue(uint64_t Limit=UINT64_MAX) const
If this value is smaller than the specified limit, return it, otherwise return the limit value.
Definition APInt.h:476
LLVM_ABI APInt smul_ov(const APInt &RHS, bool &Overflow) const
Definition APInt.cpp:1971
bool isSubsetOf(const APInt &RHS) const
This operation checks that all bits set in this APInt are also set in RHS.
Definition APInt.h:1264
bool slt(const APInt &RHS) const
Signed less than comparison.
Definition APInt.h:1137
static APInt getZero(unsigned numBits)
Get the '0' value for the specified bit-width.
Definition APInt.h:201
std::optional< int64_t > trySExtValue() const
Get sign extended value if possible.
Definition APInt.h:1589
LLVM_ABI APInt ssub_ov(const APInt &RHS, bool &Overflow) const
Definition APInt.cpp:1952
bool uge(const APInt &RHS) const
Unsigned greater or equal comparison.
Definition APInt.h:1228
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
Definition ArrayRef.h:40
const T & back() const
back - Get the last element.
Definition ArrayRef.h:151
const T & front() const
front - Get the first element.
Definition ArrayRef.h:145
size_t size() const
size - Get the array size.
Definition ArrayRef.h:142
bool empty() const
empty - Check if the array is empty.
Definition ArrayRef.h:137
static LLVM_ABI ArrayType * get(Type *ElementType, uint64_t NumElements)
This static method is the primary way to construct an ArrayType.
A cache of @llvm.assume calls within a function.
LLVM_ABI void registerAssumption(AssumeInst *CI)
Add an @llvm.assume intrinsic to this function's cache.
LLVM_ABI bool getValueAsBool() const
Return the attribute's value as a boolean.
LLVM Basic Block Representation.
Definition BasicBlock.h:62
iterator end()
Definition BasicBlock.h:462
iterator begin()
Instruction iterator methods.
Definition BasicBlock.h:449
iterator_range< const_phi_iterator > phis() const
Returns a range that iterates over the phis in the basic block.
Definition BasicBlock.h:518
LLVM_ABI const_iterator getFirstInsertionPt() const
Returns an iterator to the first instruction in this block that is suitable for inserting a non-PHI i...
const Function * getParent() const
Return the enclosing method, or null if none.
Definition BasicBlock.h:213
bool hasAddressTaken() const
Returns true if there are any uses of this basic block other than direct branches,...
Definition BasicBlock.h:675
LLVM_ABI InstListType::const_iterator getFirstNonPHIIt() const
Returns an iterator to the first instruction in this block that is not a PHINode instruction.
static BasicBlock * Create(LLVMContext &Context, const Twine &Name="", Function *Parent=nullptr, BasicBlock *InsertBefore=nullptr)
Creates a new BasicBlock.
Definition BasicBlock.h:206
LLVM_ABI InstListType::const_iterator getFirstNonPHIOrDbg(bool SkipPseudoOp=true) const
Returns a pointer to the first instruction in this block that is not a PHINode or a debug intrinsic,...
LLVM_ABI bool hasNPredecessors(unsigned N) const
Return true if this block has exactly N predecessors.
LLVM_ABI const BasicBlock * getUniqueSuccessor() const
Return the successor of this block if it has a unique successor.
LLVM_ABI const BasicBlock * getSinglePredecessor() const
Return the predecessor of this block if it has a single predecessor block.
const Instruction & front() const
Definition BasicBlock.h:472
LLVM_ABI const CallInst * getTerminatingDeoptimizeCall() const
Returns the call instruction calling @llvm.experimental.deoptimize prior to the terminating return in...
LLVM_ABI const BasicBlock * getUniquePredecessor() const
Return the predecessor of this block if it has a unique predecessor block.
LLVM_ABI const BasicBlock * getSingleSuccessor() const
Return the successor of this block if it has a single successor.
LLVM_ABI void flushTerminatorDbgRecords()
Eject any debug-info trailing at the end of a block.
LLVM_ABI const DataLayout & getDataLayout() const
Get the data layout of the module this basic block belongs to.
InstListType::iterator iterator
Instruction iterators...
Definition BasicBlock.h:170
LLVM_ABI LLVMContext & getContext() const
Get the context in which this basic block lives.
size_t size() const
Definition BasicBlock.h:470
LLVM_ABI bool isLandingPad() const
Return true if this basic block is a landing pad.
LLVM_ABI bool hasNPredecessorsOrMore(unsigned N) const
Return true if this block has N predecessors or more.
const Instruction * getTerminator() const LLVM_READONLY
Returns the terminator instruction if the block is well formed or null if the block is not well forme...
Definition BasicBlock.h:233
void splice(BasicBlock::iterator ToIt, BasicBlock *FromBB)
Transfer all instructions from FromBB to this basic block at ToIt.
Definition BasicBlock.h:647
LLVM_ABI const Module * getModule() const
Return the module owning the function this basic block belongs to, or nullptr if the function does no...
LLVM_ABI void removePredecessor(BasicBlock *Pred, bool KeepOneInputPHIs=false)
Update PHI nodes in this BasicBlock before removal of predecessor Pred.
BasicBlock * getBasicBlock() const
Definition Constants.h:1100
static LLVM_ABI BranchProbability getBranchProbability(uint64_t Numerator, uint64_t Denominator)
BranchProbability getCompl() const
void addRangeRetAttr(const ConstantRange &CR)
adds the range attribute to the list of attributes.
bool isCallee(Value::const_user_iterator UI) const
Determine whether the passed iterator points to the callee operand's Use.
bool isDataOperand(const Use *U) const
bool tryIntersectAttributes(const CallBase *Other)
Try to intersect the attributes from 'this' CallBase and the 'Other' CallBase.
This class represents a function call, abstracting a target machine's calling convention.
mapped_iterator< op_iterator, DerefFnTy > handler_iterator
CleanupPadInst * getCleanupPad() const
Convenience accessor.
BasicBlock * getUnwindDest() const
This class is the base class for the comparison instructions.
Definition InstrTypes.h:664
static Type * makeCmpResultType(Type *opnd_type)
Create a result type for fcmp/icmp.
Definition InstrTypes.h:986
bool isEquality() const
Determine if this is an equals/not equals predicate.
Definition InstrTypes.h:915
Predicate
This enumeration lists the possible predicates for CmpInst subclasses.
Definition InstrTypes.h:676
@ ICMP_UGT
unsigned greater than
Definition InstrTypes.h:699
@ ICMP_ULT
unsigned less than
Definition InstrTypes.h:701
Predicate getPredicate() const
Return the predicate for this instruction.
Definition InstrTypes.h:765
An abstraction over a floating-point predicate, and a pack of an integer predicate with samesign info...
Conditional Branch instruction.
static CondBrInst * Create(Value *Cond, BasicBlock *IfTrue, BasicBlock *IfFalse, InsertPosition InsertBefore=nullptr)
void setSuccessor(unsigned idx, BasicBlock *NewSucc)
void setCondition(Value *V)
Value * getCondition() const
BasicBlock * getSuccessor(unsigned i) const
static LLVM_ABI Constant * get(ArrayType *T, ArrayRef< Constant * > V)
A vector constant whose element type is a simple 1/2/4/8-byte integer or float/double,...
Definition Constants.h:932
A constant value that is initialized with an expression using other constant values.
Definition Constants.h:1291
static LLVM_ABI Constant * getNeg(Constant *C, bool HasNSW=false)
ConstantFP - Floating Point Values [float, double].
Definition Constants.h:420
This is the shared class of boolean and integer constants.
Definition Constants.h:87
bool isOne() const
This is just a convenience method to make client code smaller for a common case.
Definition Constants.h:225
bool isNegative() const
Definition Constants.h:214
uint64_t getLimitedValue(uint64_t Limit=~0ULL) const
getLimitedValue - If the value is smaller than the specified limit, return it, otherwise return the l...
Definition Constants.h:269
IntegerType * getIntegerType() const
Variant of the getType() method to always return an IntegerType, which reduces the amount of casting ...
Definition Constants.h:198
static LLVM_ABI ConstantInt * getTrue(LLVMContext &Context)
static ConstantInt * getSigned(IntegerType *Ty, int64_t V, bool ImplicitTrunc=false)
Return a ConstantInt with the specified value for the specified type.
Definition Constants.h:135
bool isZero() const
This is just a convenience method to make client code smaller for a common code.
Definition Constants.h:219
static LLVM_ABI ConstantInt * getFalse(LLVMContext &Context)
unsigned getBitWidth() const
getBitWidth - Return the scalar bitwidth of this constant.
Definition Constants.h:162
uint64_t getZExtValue() const
Return the constant as a 64-bit unsigned integer value after it has been zero extended as appropriate...
Definition Constants.h:168
const APInt & getValue() const
Return the constant as an APInt value reference.
Definition Constants.h:159
A constant pointer value that points to null.
Definition Constants.h:701
This class represents a range of values.
LLVM_ABI bool getEquivalentICmp(CmpInst::Predicate &Pred, APInt &RHS) const
Set up Pred and RHS such that ConstantRange::makeExactICmpRegion(Pred, RHS) == *this.
LLVM_ABI ConstantRange subtract(const APInt &CI) const
Subtract the specified constant from the endpoints of this constant range.
const APInt & getLower() const
Return the lower value for this range.
LLVM_ABI APInt getUnsignedMin() const
Return the smallest unsigned value contained in the ConstantRange.
LLVM_ABI bool isEmptySet() const
Return true if this set contains no members.
LLVM_ABI bool isSizeLargerThan(uint64_t MaxSize) const
Compare set size of this range with Value.
const APInt & getUpper() const
Return the upper value for this range.
LLVM_ABI bool isUpperWrapped() const
Return true if the exclusive upper bound wraps around the unsigned domain.
static LLVM_ABI ConstantRange makeExactICmpRegion(CmpInst::Predicate Pred, const APInt &Other)
Produce the exact range such that all values in the returned range satisfy the given predicate with a...
LLVM_ABI ConstantRange inverse() const
Return a new range that is the logical not of the current set.
LLVM_ABI APInt getUnsignedMax() const
Return the largest unsigned value contained in the ConstantRange.
static ConstantRange getNonEmpty(APInt Lower, APInt Upper)
Create non-empty constant range with the given bounds.
This is an important base class in LLVM.
Definition Constant.h:43
static LLVM_ABI Constant * getIntegerValue(Type *Ty, const APInt &V)
Return the value for an integer or pointer constant, or a vector thereof, with the given scalar value...
LLVM_ABI bool isOneValue() const
Returns true if the value is one.
static LLVM_ABI Constant * getNullValue(Type *Ty)
Constructor to create a '0' constant of arbitrary type.
LLVM_ABI bool isNullValue() const
Return true if this is the value that would be returned by getNullValue.
Definition Constants.cpp:74
A parsed version of the target data layout string in and methods for querying it.
Definition DataLayout.h:64
Base class for non-instruction debug metadata records that have positions within IR.
LLVM_ABI void removeFromParent()
simple_ilist< DbgRecord >::iterator self_iterator
Record of a variable value-assignment, aka a non instruction representation of the dbg....
A debug info location.
Definition DebugLoc.h:123
bool isSameSourceLocation(const DebugLoc &Other) const
Return true if the source locations match, ignoring isImplicitCode and source atom info.
Definition DebugLoc.h:255
static DebugLoc getTemporary()
Definition DebugLoc.h:160
static LLVM_ABI DebugLoc getMergedLocation(DebugLoc LocA, DebugLoc LocB)
When two instructions are combined into a single instruction we also need to combine the original loc...
Definition DebugLoc.cpp:179
static LLVM_ABI DebugLoc getMergedLocations(ArrayRef< DebugLoc > Locs)
Try to combine the vector of locations passed as input in a single one.
Definition DebugLoc.cpp:166
static DebugLoc getDropped()
Definition DebugLoc.h:163
ValueT & at(const_arg_type_t< KeyT > Val)
at - Return the entry for the specified key, or abort if no such entry exists.
Definition DenseMap.h:224
iterator find(const_arg_type_t< KeyT > Val)
Definition DenseMap.h:178
std::pair< iterator, bool > try_emplace(KeyT &&Key, Ts &&...Args)
Definition DenseMap.h:256
unsigned size() const
Definition DenseMap.h:110
iterator end()
Definition DenseMap.h:81
std::pair< iterator, bool > insert(const std::pair< KeyT, ValueT > &KV)
Definition DenseMap.h:241
void reserve(size_type NumEntries)
Grow the densemap so that it can contain at least NumEntries items before resizing again.
Definition DenseMap.h:114
Implements a dense probed hash-table based set.
Definition DenseSet.h:279
static LLVM_ABI FixedVectorType * get(Type *ElementType, unsigned NumElts)
Definition Type.cpp:873
const BasicBlock & getEntryBlock() const
Definition Function.h:809
Attribute getFnAttribute(Attribute::AttrKind Kind) const
Return the attribute for the given attribute kind.
Definition Function.cpp:763
bool hasMinSize() const
Optimize this function for minimum size (-Oz).
Definition Function.h:711
bool hasFnAttribute(Attribute::AttrKind Kind) const
Return true if the function has the attribute.
Definition Function.cpp:728
void applyUpdates(ArrayRef< UpdateT > Updates)
Submit updates to all available trees.
an instruction for type-safe pointer arithmetic to access elements of arrays and structs
Module * getParent()
Get the module that this global value is contained inside of...
This instruction compares its operands according to the predicate given to the constructor.
Predicate getSignedPredicate() const
For example, EQ->EQ, SLE->SLE, UGT->SGT, etc.
static bool isEquality(Predicate P)
Return true if this predicate is either EQ or NE.
Common base class shared among various IRBuilders.
Definition IRBuilder.h:114
Value * CreateICmpULT(Value *LHS, Value *RHS, const Twine &Name="")
Definition IRBuilder.h:2347
Value * CreateZExtOrTrunc(Value *V, Type *DestTy, const Twine &Name="")
Create a ZExt or Trunc from the integer value V to DestTy.
Definition IRBuilder.h:2095
CondBrInst * CreateCondBr(Value *Cond, BasicBlock *True, BasicBlock *False, MDNode *BranchWeights=nullptr, MDNode *Unpredictable=nullptr)
Create a conditional 'br Cond, TrueDest, FalseDest' instruction.
Definition IRBuilder.h:1223
LLVM_ABI Value * CreateSelectFMF(Value *C, Value *True, Value *False, FMFSource FMFSource, const Twine &Name="", Instruction *MDFrom=nullptr)
LLVM_ABI CallInst * CreateAssumption(Value *Cond, ArrayRef< OperandBundleDef > OpBundles={})
Create an assume intrinsic call that allows the optimizer to assume that the provided condition will ...
ConstantInt * getTrue()
Get the constant value for i1 true.
Definition IRBuilder.h:502
LLVM_ABI Value * CreateSelect(Value *C, Value *True, Value *False, const Twine &Name="", Instruction *MDFrom=nullptr)
BasicBlock::iterator GetInsertPoint() const
Definition IRBuilder.h:202
Value * CreateFreeze(Value *V, const Twine &Name="")
Definition IRBuilder.h:2650
Value * CreateLShr(Value *LHS, Value *RHS, const Twine &Name="", bool isExact=false)
Definition IRBuilder.h:1539
void SetCurrentDebugLocation(DebugLoc L)
Set location information used by debugging information.
Definition IRBuilder.h:247
Value * CreateInBoundsGEP(Type *Ty, Value *Ptr, ArrayRef< Value * > IdxList, const Twine &Name="")
Definition IRBuilder.h:1975
UncondBrInst * CreateBr(BasicBlock *Dest)
Create an unconditional 'br label X' instruction.
Definition IRBuilder.h:1217
Value * CreateNot(Value *V, const Twine &Name="")
Definition IRBuilder.h:1835
SwitchInst * CreateSwitch(Value *V, BasicBlock *Dest, unsigned NumCases=10, MDNode *BranchWeights=nullptr, MDNode *Unpredictable=nullptr)
Create a switch instruction with the specified value, default dest, and with a hint for the number of...
Definition IRBuilder.h:1246
Value * CreateICmpEQ(Value *LHS, Value *RHS, const Twine &Name="")
Definition IRBuilder.h:2331
LoadInst * CreateLoad(Type *Ty, Value *Ptr, const char *Name)
Provided to resolve 'CreateLoad(Ty, Ptr, "...")' correctly, instead of converting the string to 'bool...
Definition IRBuilder.h:1877
StoreInst * CreateStore(Value *Val, Value *Ptr, bool isVolatile=false)
Definition IRBuilder.h:1890
Value * CreateAdd(Value *LHS, Value *RHS, const Twine &Name="", bool HasNUW=false, bool HasNSW=false)
Definition IRBuilder.h:1429
Value * CreatePtrToInt(Value *V, Type *DestTy, const Twine &Name="")
Definition IRBuilder.h:2189
ConstantInt * getFalse()
Get the constant value for i1 false.
Definition IRBuilder.h:507
Value * CreateTrunc(Value *V, Type *DestTy, const Twine &Name="", bool IsNUW=false, bool IsNSW=false)
Definition IRBuilder.h:2063
Value * CreateIntCast(Value *V, Type *DestTy, bool isSigned, const Twine &Name="")
Definition IRBuilder.h:2272
void SetInsertPoint(BasicBlock *TheBB)
This specifies that created instructions should be appended to the end of the specified block.
Definition IRBuilder.h:207
Value * CreateICmp(CmpInst::Predicate P, Value *LHS, Value *RHS, const Twine &Name="")
Definition IRBuilder.h:2441
Value * CreateOr(Value *LHS, Value *RHS, const Twine &Name="", bool IsDisjoint=false)
Definition IRBuilder.h:1599
Value * CreateMul(Value *LHS, Value *RHS, const Twine &Name="", bool HasNUW=false, bool HasNSW=false)
Definition IRBuilder.h:1463
This provides a uniform API for creating instructions and inserting them into a basic block: either a...
Definition IRBuilder.h:2811
Indirect Branch Instruction.
BasicBlock * getDestination(unsigned i)
Return the specified destination.
unsigned getNumDestinations() const
return the number of possible destinations in this indirectbr instruction.
LLVM_ABI void removeDestination(unsigned i)
This method removes the specified successor from the indirectbr instruction.
LLVM_ABI void dropUBImplyingAttrsAndMetadata(ArrayRef< unsigned > Keep={})
Drop any attributes or metadata that can cause immediate undefined behavior.
LLVM_ABI Instruction * clone() const
Create a copy of 'this' instruction that is identical in all ways except the following:
LLVM_ABI iterator_range< simple_ilist< DbgRecord >::iterator > cloneDebugInfoFrom(const Instruction *From, std::optional< simple_ilist< DbgRecord >::iterator > FromHere=std::nullopt, bool InsertAtHead=false)
Clone any debug-info attached to From onto this instruction.
LLVM_ABI unsigned getNumSuccessors() const LLVM_READONLY
Return the number of successors that this instruction has.
iterator_range< simple_ilist< DbgRecord >::iterator > getDbgRecordRange() const
Return a range over the DbgRecords attached to this instruction.
const DebugLoc & getDebugLoc() const
Return the debug location for this node as a DebugLoc.
LLVM_ABI const Module * getModule() const
Return the module owning the function this instruction belongs to or nullptr it the function does not...
LLVM_ABI void andIRFlags(const Value *V)
Logical 'and' of any supported wrapping, exact, and fast-math flags of V and this instruction.
LLVM_ABI void moveBefore(InstListType::iterator InsertPos)
Unlink this instruction from its current basic block and insert it into the basic block that MovePos ...
LLVM_ABI InstListType::iterator eraseFromParent()
This method unlinks 'this' from the containing basic block and deletes it.
Instruction * user_back()
Specialize the methods defined in Value, as we know that an instruction can only be used by other ins...
LLVM_ABI const Function * getFunction() const
Return the function this instruction belongs to.
MDNode * getMetadata(unsigned KindID) const
Get the metadata of given kind attached to this Instruction.
LLVM_ABI BasicBlock * getSuccessor(unsigned Idx) const LLVM_READONLY
Return the specified successor. This instruction must be a terminator.
LLVM_ABI bool mayHaveSideEffects() const LLVM_READONLY
Return true if the instruction may have side effects.
bool isTerminator() const
LLVM_ABI bool isUsedOutsideOfBlock(const BasicBlock *BB) const LLVM_READONLY
Return true if there are any uses of this instruction in blocks other than the specified block.
LLVM_ABI void setMetadata(unsigned KindID, MDNode *Node)
Set the metadata of the specified kind to the specified node.
@ CompareUsingIntersectedAttrs
Check for equivalence with intersected callbase attrs.
LLVM_ABI AAMDNodes getAAMetadata() const
Returns the AA metadata for this instruction.
LLVM_ABI bool isIdenticalTo(const Instruction *I) const LLVM_READONLY
Return true if the specified instruction is exactly identical to the current one.
void setDebugLoc(DebugLoc Loc)
Set the debug location information for this instruction.
LLVM_ABI void copyMetadata(const Instruction &SrcInst, ArrayRef< unsigned > WL=ArrayRef< unsigned >())
Copy metadata from SrcInst to this instruction.
LLVM_ABI void applyMergedLocation(DebugLoc LocA, DebugLoc LocB)
Merge 2 debug locations and apply it to the Instruction.
LLVM_ABI void dropDbgRecords()
Erase any DbgRecords attached to this instruction.
LLVM_ABI InstListType::iterator insertInto(BasicBlock *ParentBB, InstListType::iterator It)
Inserts an unlinked instruction into ParentBB at position It and returns the iterator of the inserted...
Class to represent integer types.
unsigned getBitWidth() const
Get the number of bits in this IntegerType.
Invoke instruction.
void setNormalDest(BasicBlock *B)
This is an important class for using LLVM in a threaded context.
Definition LLVMContext.h:68
The landingpad instruction holds all of the information necessary to generate correct exception handl...
An instruction for reading from memory.
static unsigned getPointerOperandIndex()
Iterates through instructions in a set of blocks in reverse order from the first non-terminator.
LLVM_ABI MDNode * createBranchWeights(uint32_t TrueWeight, uint32_t FalseWeight, bool IsExpected=false)
Return metadata containing two branch weights.
Definition MDBuilder.cpp:38
Metadata node.
Definition Metadata.h:1080
Helper class to manipulate !mmra metadata nodes.
bool empty() const
Definition MapVector.h:77
std::pair< iterator, bool > insert(const std::pair< KeyT, ValueT > &KV)
Definition MapVector.h:124
size_type size() const
Definition MapVector.h:56
A Module instance is used to store all the information related to an LLVM module.
Definition Module.h:67
void addIncoming(Value *V, BasicBlock *BB)
Add an incoming value to the end of the PHI list.
iterator_range< const_block_iterator > blocks() const
op_range incoming_values()
void setIncomingValue(unsigned i, Value *V)
Value * getIncomingValueForBlock(const BasicBlock *BB) const
BasicBlock * getIncomingBlock(unsigned i) const
Return incoming basic block number i.
Value * getIncomingValue(unsigned i) const
Return incoming value number x.
int getBasicBlockIndex(const BasicBlock *BB) const
Return the first index of the specified basic block in the value list for this PHI.
unsigned getNumIncomingValues() const
Return the number of incoming edges.
static PHINode * Create(Type *Ty, unsigned NumReservedValues, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
Constructors - NumReservedValues is a hint for the number of incoming edges that this phi node will h...
static LLVM_ABI PoisonValue * get(Type *T)
Static factory methods - Return an 'poison' object of the specified type.
Value * getValue() const
Convenience accessor.
Return a value (possibly void), from a function.
This class represents the LLVM 'select' instruction.
size_type size() const
Determine the number of elements in the SetVector.
Definition SetVector.h:103
void insert_range(Range &&R)
Definition SetVector.h:176
bool empty() const
Determine if the SetVector is empty or not.
Definition SetVector.h:100
bool insert(const value_type &X)
Insert a new element into the SetVector.
Definition SetVector.h:151
size_type size() const
Definition SmallPtrSet.h:99
A templated base class for SmallPtrSet which provides the typesafe interface that is common across al...
bool erase(PtrType Ptr)
Remove pointer from the set.
size_type count(ConstPtrType Ptr) const
count - Return 1 if the specified pointer is in the set, 0 otherwise.
void insert_range(Range &&R)
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
bool contains(ConstPtrType Ptr) const
SmallPtrSet - This class implements a set which is optimized for holding SmallSize or less elements.
A SetVector that performs no allocations if smaller than a certain size.
Definition SetVector.h:339
SmallSet - This maintains a set of unique values, optimizing for the case when the set is small (less...
Definition SmallSet.h:134
std::pair< const_iterator, bool > insert(const T &V)
insert - Insert an element into the set if it isn't already there.
Definition SmallSet.h:184
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
void assign(size_type NumElts, ValueParamT Elt)
reference emplace_back(ArgTypes &&... Args)
void reserve(size_type N)
iterator erase(const_iterator CI)
void resize(size_type N)
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
An instruction for storing to memory.
Align getAlign() const
bool isSimple() const
Value * getValueOperand()
bool isUnordered() const
static unsigned getPointerOperandIndex()
Value * getPointerOperand()
StringRef - Represent a constant reference to a string, i.e.
Definition StringRef.h:55
A wrapper class to simplify modification of SwitchInst cases along with their prof branch_weights met...
LLVM_ABI void setSuccessorWeight(unsigned idx, CaseWeightOpt W)
LLVM_ABI void addCase(ConstantInt *OnVal, BasicBlock *Dest, CaseWeightOpt W)
Delegate the call to the underlying SwitchInst::addCase() and set the specified branch weight for the...
LLVM_ABI CaseWeightOpt getSuccessorWeight(unsigned idx)
LLVM_ABI void replaceDefaultDest(SwitchInst::CaseIt I)
Replace the default destination by given case.
std::optional< uint32_t > CaseWeightOpt
LLVM_ABI SwitchInst::CaseIt removeCase(SwitchInst::CaseIt I)
Delegate the call to the underlying SwitchInst::removeCase() and remove correspondent branch weight.
Multiway switch.
CaseIt case_end()
Returns a read/write iterator that points one past the last in the SwitchInst.
BasicBlock * getSuccessor(unsigned idx) const
void setCondition(Value *V)
LLVM_ABI void addCase(ConstantInt *OnVal, BasicBlock *Dest)
Add an entry to the switch instruction.
CaseIteratorImpl< CaseHandle > CaseIt
void setSuccessor(unsigned idx, BasicBlock *NewSucc)
unsigned getNumSuccessors() const
This pass provides access to the codegen interfaces that are needed for IR-level transformations.
TargetCostKind
The kind of cost model.
@ TCK_CodeSize
Instruction code size.
@ TCK_SizeAndLatency
The weighted sum of size and latency.
@ TCC_Free
Expected to fold away in lowering.
@ TCC_Basic
The cost of a typical 'add' instruction.
Twine - A lightweight data structure for efficiently representing the concatenation of temporary valu...
Definition Twine.h:82
The instances of the Type class are immutable: once they are created, they are never changed.
Definition Type.h:46
bool isPointerTy() const
True if this is an instance of PointerType.
Definition Type.h:284
LLVM_ABI TypeSize getPrimitiveSizeInBits() const LLVM_READONLY
Return the basic size of this type if it is a primitive type.
Definition Type.cpp:201
static LLVM_ABI IntegerType * getInt1Ty(LLVMContext &C)
Definition Type.cpp:310
bool isIntegerTy() const
True if this is an instance of IntegerType.
Definition Type.h:257
Unconditional Branch instruction.
static UncondBrInst * Create(BasicBlock *IfTrue, InsertPosition InsertBefore=nullptr)
void setSuccessor(BasicBlock *NewSucc)
BasicBlock * getSuccessor(unsigned i=0) const
'undef' values are things that do not have specified contents.
Definition Constants.h:1606
This function has undefined behavior.
A Use represents the edge between a Value definition and its users.
Definition Use.h:35
LLVM_ABI unsigned getOperandNo() const
Return the operand # of this use in its User.
Definition Use.cpp:35
LLVM_ABI void set(Value *Val)
Definition Value.h:907
User * getUser() const
Returns the User that contains this Use.
Definition Use.h:61
op_range operands()
Definition User.h:267
const Use & getOperandUse(unsigned i) const
Definition User.h:220
void setOperand(unsigned i, Value *Val)
Definition User.h:212
LLVM_ABI bool replaceUsesOfWith(Value *From, Value *To)
Replace uses of one Value with another.
Definition User.cpp:25
Value * getOperand(unsigned i) const
Definition User.h:207
unsigned getNumOperands() const
Definition User.h:229
LLVM Value Representation.
Definition Value.h:75
Type * getType() const
All values are typed, get the type of this value.
Definition Value.h:256
static constexpr uint64_t MaximumAlignment
Definition Value.h:832
LLVM_ABI Value(Type *Ty, unsigned scid)
Definition Value.cpp:53
LLVM_ABI void setName(const Twine &Name)
Change the name of the value.
Definition Value.cpp:397
bool hasOneUse() const
Return true if there is exactly one use of this value.
Definition Value.h:440
LLVM_ABI void replaceAllUsesWith(Value *V)
Change all uses of this to point to a new Value.
Definition Value.cpp:553
LLVMContext & getContext() const
All values hold a context through their type.
Definition Value.h:259
iterator_range< user_iterator > users()
Definition Value.h:427
bool use_empty() const
Definition Value.h:347
iterator_range< use_iterator > uses()
Definition Value.h:381
LLVM_ABI StringRef getName() const
Return a constant reference to the value's name.
Definition Value.cpp:322
LLVM_ABI void takeName(Value *V)
Transfer the name from V to this value.
Definition Value.cpp:403
Represents an op.with.overflow intrinsic.
const ParentTy * getParent() const
Definition ilist_node.h:34
self_iterator getIterator()
Definition ilist_node.h:123
NodeTy * getNextNode()
Get the next node, or nullptr for the list tail.
Definition ilist_node.h:348
A range adaptor for a pair of iterators.
Changed
#define UINT64_MAX
Definition DataTypes.h:77
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
constexpr std::underlying_type_t< E > Mask()
Get a bitmask with 1s in all places up to the high-order bit of E's largest value.
@ C
The default llvm calling convention, compatible with C.
Definition CallingConv.h:34
@ BasicBlock
Various leaf nodes.
Definition ISDOpcodes.h:81
BinaryOp_match< SrcTy, SpecificConstantMatch, TargetOpcode::G_XOR, true > m_Not(const SrcTy &&Src)
Matches a register not-ed by a G_XOR.
OneUse_match< SubPat > m_OneUse(const SubPat &SP)
Predicate
Predicate - These are "(BI << 5) | BO" for various predicates.
BinaryOp_match< LHS, RHS, Instruction::And > m_And(const LHS &L, const RHS &R)
BinaryOp_match< LHS, RHS, Instruction::Add > m_Add(const LHS &L, const RHS &R)
class_match< BinaryOperator > m_BinOp()
Match an arbitrary binary operation and ignore it.
ap_match< APInt > m_APInt(const APInt *&Res)
Match a ConstantInt or splatted ConstantVector, binding the specified pointer to the contained APInt.
bool match(Val *V, const Pattern &P)
bind_ty< Instruction > m_Instruction(Instruction *&I)
Match an instruction, capturing it if we match.
specificval_ty m_Specific(const Value *V)
Match if we have a specific specified value.
class_match< ConstantInt > m_ConstantInt()
Match an arbitrary ConstantInt and ignore it.
ExtractValue_match< Ind, Val_t > m_ExtractValue(const Val_t &V)
Match a single index ExtractValue instruction.
bind_ty< WithOverflowInst > m_WithOverflowInst(WithOverflowInst *&I)
Match a with overflow intrinsic, capturing it if we match.
auto m_LogicalOr()
Matches L || R where L and R are arbitrary values.
ThreeOps_match< decltype(m_Value()), LHS, RHS, Instruction::Select, true > m_c_Select(const LHS &L, const RHS &R)
Match Select(C, LHS, RHS) or Select(C, RHS, LHS)
class_match< CmpInst > m_Cmp()
Matches any compare instruction and ignore it.
match_immconstant_ty m_ImmConstant()
Match an arbitrary immediate Constant and ignore it.
NoWrapTrunc_match< OpTy, TruncInst::NoUnsignedWrap > m_NUWTrunc(const OpTy &Op)
Matches trunc nuw.
class_match< Value > m_Value()
Match an arbitrary value and ignore it.
CmpClass_match< LHS, RHS, ICmpInst > m_ICmp(CmpPredicate &Pred, const LHS &L, const RHS &R)
auto m_LogicalAnd()
Matches L && R where L and R are arbitrary values.
BinaryOp_match< LHS, RHS, Instruction::Or > m_Or(const LHS &L, const RHS &R)
MaxMin_match< ICmpInst, LHS, RHS, umin_pred_ty > m_UMin(const LHS &L, const RHS &R)
match_combine_or< LTy, RTy > m_CombineOr(const LTy &L, const RTy &R)
Combine two pattern matchers matching L || R.
SmallVector< DbgVariableRecord * > getDVRAssignmentMarkers(const Instruction *Inst)
Return a range of dbg_assign records for which Inst performs the assignment they encode.
Definition DebugInfo.h:203
LLVM_ABI void deleteAssignmentMarkers(const Instruction *Inst)
Delete the llvm.dbg.assign intrinsics linked to Inst.
initializer< Ty > init(const Ty &Val)
PointerTypeMap run(const Module &M)
Compute the PointerTypeMap for the module M.
constexpr double e
@ User
could "use" a pointer
NodeAddr< UseNode * > Use
Definition RDFGraph.h:385
NodeAddr< FuncNode * > Func
Definition RDFGraph.h:393
Context & getContext() const
Definition BasicBlock.h:99
friend class Instruction
Iterator for Instructions in a `BasicBlock.
Definition BasicBlock.h:73
This is an optimization pass for GlobalISel generic memory operations.
Definition Types.h:26
auto drop_begin(T &&RangeOrContainer, size_t N=1)
Return a range covering RangeOrContainer with the first N elements excluded.
Definition STLExtras.h:316
@ Offset
Definition DWP.cpp:532
detail::zippy< detail::zip_shortest, T, U, Args... > zip(T &&t, U &&u, Args &&...args)
zip iterator for two or more iteratable types.
Definition STLExtras.h:831
bool operator<(int64_t V1, const APSInt &V2)
Definition APSInt.h:360
constexpr auto not_equal_to(T &&Arg)
Functor variant of std::not_equal_to that can be used as a UnaryPredicate in functional algorithms li...
Definition STLExtras.h:2180
FunctionAddr VTableAddr Value
Definition InstrProf.h:137
auto find(R &&Range, const T &Val)
Provide wrappers to std::find which take ranges instead of having to pass begin/end explicitly.
Definition STLExtras.h:1765
cl::opt< bool > ProfcheckDisableMetadataFixes
Definition Metadata.cpp:64
bool all_of(R &&range, UnaryPredicate P)
Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly.
Definition STLExtras.h:1739
LLVM_ABI bool RecursivelyDeleteTriviallyDeadInstructions(Value *V, const TargetLibraryInfo *TLI=nullptr, MemorySSAUpdater *MSSAU=nullptr, std::function< void(Value *)> AboutToDeleteCallback=std::function< void(Value *)>())
If the specified value is a trivially dead instruction, delete it.
Definition Local.cpp:535
bool succ_empty(const Instruction *I)
Definition CFG.h:153
LLVM_ABI bool IsBlockFollowedByDeoptOrUnreachable(const BasicBlock *BB)
Check if we can prove that all paths starting from this block converge to a block that either has a @...
LLVM_ABI bool ConstantFoldTerminator(BasicBlock *BB, bool DeleteDeadConditions=false, const TargetLibraryInfo *TLI=nullptr, DomTreeUpdater *DTU=nullptr)
If a terminator instruction is predicated on a constant value, convert it into an unconditional branc...
Definition Local.cpp:134
static cl::opt< unsigned > MaxSwitchCasesPerResult("max-switch-cases-per-result", cl::Hidden, cl::init(16), cl::desc("Limit cases to analyze when converting a switch to select"))
InstructionCost Cost
static cl::opt< bool > SpeculateOneExpensiveInst("speculate-one-expensive-inst", cl::Hidden, cl::init(true), cl::desc("Allow exactly one expensive instruction to be speculatively " "executed"))
@ Dead
Unused definition.
auto pred_end(const MachineBasicBlock *BB)
void set_intersect(S1Ty &S1, const S2Ty &S2)
set_intersect(A, B) - Compute A := A ^ B Identical to set_intersection, except that it works on set<>...
LLVM_ABI void setExplicitlyUnknownBranchWeightsIfProfiled(Instruction &I, StringRef PassName, const Function *F=nullptr)
Like setExplicitlyUnknownBranchWeights(...), but only sets unknown branch weights in the new instruct...
decltype(auto) dyn_cast(const From &Val)
dyn_cast<X> - Return the argument parameter cast to the specified type.
Definition Casting.h:643
auto successors(const MachineBasicBlock *BB)
auto accumulate(R &&Range, E &&Init)
Wrapper for std::accumulate.
Definition STLExtras.h:1702
constexpr from_range_t from_range
iterator_range< T > make_range(T x, T y)
Convenience function for iterating over sub-ranges.
LLVM_ABI MDNode * getBranchWeightMDNode(const Instruction &I)
Get the branch weights metadata node.
constexpr bool isUIntN(unsigned N, uint64_t x)
Checks if an unsigned integer fits into the given (dynamic) bit width.
Definition MathExtras.h:243
LLVM_ABI Constant * ConstantFoldCompareInstOperands(unsigned Predicate, Constant *LHS, Constant *RHS, const DataLayout &DL, const TargetLibraryInfo *TLI=nullptr, const Instruction *I=nullptr)
Attempt to constant fold a compare instruction (icmp/fcmp) with the specified operands.
iterator_range< early_inc_iterator_impl< detail::IterOfRange< RangeT > > > make_early_inc_range(RangeT &&Range)
Make a range that does early increment to allow mutation of the underlying range without disrupting i...
Definition STLExtras.h:634
Align getLoadStoreAlignment(const Value *I)
A helper function that returns the alignment of load or store instruction.
LLVM_ABI void DeleteDeadBlock(BasicBlock *BB, DomTreeUpdater *DTU=nullptr, bool KeepOneInputPHIs=false)
Delete the specified block, which must have no predecessors.
LLVM_ABI bool isSafeToSpeculativelyExecute(const Instruction *I, const Instruction *CtxI=nullptr, AssumptionCache *AC=nullptr, const DominatorTree *DT=nullptr, const TargetLibraryInfo *TLI=nullptr, bool UseVariableInfo=true, bool IgnoreUBImplyingAttrs=true)
Return true if the instruction does not have any effects besides calculating the result and does not ...
auto unique(Range &&R, Predicate P)
Definition STLExtras.h:2134
static cl::opt< unsigned > MaxSpeculationDepth("max-speculation-depth", cl::Hidden, cl::init(10), cl::desc("Limit maximum recursion depth when calculating costs of " "speculatively executed instructions"))
OutputIt copy_if(R &&Range, OutputIt Out, UnaryPredicate P)
Provide wrappers to std::copy_if which take ranges instead of having to pass begin/end explicitly.
Definition STLExtras.h:1791
static cl::opt< unsigned > PHINodeFoldingThreshold("phi-node-folding-threshold", cl::Hidden, cl::init(2), cl::desc("Control the amount of phi node folding to perform (default = 2)"))
bool operator==(const AddressRangeValuePair &LHS, const AddressRangeValuePair &RHS)
static cl::opt< bool > MergeCondStoresAggressively("simplifycfg-merge-cond-stores-aggressively", cl::Hidden, cl::init(false), cl::desc("When merging conditional stores, do so even if the resultant " "basic blocks are unlikely to be if-converted as a result"))
constexpr int popcount(T Value) noexcept
Count the number of set bits in a value.
Definition bit.h:154
LLVM_ABI ConstantRange getConstantRangeFromMetadata(const MDNode &RangeMD)
Parse out a conservative ConstantRange from !range metadata.
auto map_range(ContainerTy &&C, FuncTy F)
Return a range that applies F to the elements of C.
Definition STLExtras.h:366
static cl::opt< unsigned > BranchFoldThreshold("simplifycfg-branch-fold-threshold", cl::Hidden, cl::init(2), cl::desc("Maximum cost of combining conditions when " "folding branches"))
LLVM_ABI ConstantRange computeConstantRange(const Value *V, bool ForSigned, bool UseInstrInfo=true, AssumptionCache *AC=nullptr, const Instruction *CtxI=nullptr, const DominatorTree *DT=nullptr, unsigned Depth=0)
Determine the possible constant range of an integer or vector of integer value.
int countr_zero(T Val)
Count number of 0's from the least significant bit to the most stopping at the first 1.
Definition bit.h:202
LLVM_ABI Value * simplifyInstruction(Instruction *I, const SimplifyQuery &Q)
See if we can compute a simplified version of this instruction.
LLVM_ABI void setBranchWeights(Instruction &I, ArrayRef< uint32_t > Weights, bool IsExpected, bool ElideAllZero=false)
Create a new branch_weights metadata node and add or overwrite a prof metadata reference to instructi...
static cl::opt< bool > SinkCommon("simplifycfg-sink-common", cl::Hidden, cl::init(true), cl::desc("Sink common instructions down to the end block"))
void erase(Container &C, ValueType V)
Wrapper function to remove a value from a container:
Definition STLExtras.h:2200
constexpr bool has_single_bit(T Value) noexcept
Definition bit.h:147
static cl::opt< bool > HoistStoresWithCondFaulting("simplifycfg-hoist-stores-with-cond-faulting", cl::Hidden, cl::init(true), cl::desc("Hoist stores if the target supports conditional faulting"))
bool any_of(R &&range, UnaryPredicate P)
Provide wrappers to std::any_of which take ranges instead of having to pass begin/end explicitly.
Definition STLExtras.h:1746
constexpr detail::StaticCastFunc< To > StaticCastTo
Function objects corresponding to the Cast types defined above.
Definition Casting.h:882
unsigned Log2_32(uint32_t Value)
Return the floor log base 2 of the specified value, -1 if the value is zero.
Definition MathExtras.h:331
LLVM_ABI CondBrInst * GetIfCondition(BasicBlock *BB, BasicBlock *&IfTrue, BasicBlock *&IfFalse)
Check whether BB is the merge point of a if-region.
LLVM_ABI bool TryToSimplifyUncondBranchFromEmptyBlock(BasicBlock *BB, DomTreeUpdater *DTU=nullptr)
BB is known to contain an unconditional branch, and contains no instructions other than PHI nodes,...
Definition Local.cpp:1155
void RemapDbgRecordRange(Module *M, iterator_range< DbgRecordIterator > Range, ValueToValueMapTy &VM, RemapFlags Flags=RF_None, ValueMapTypeRemapper *TypeMapper=nullptr, ValueMaterializer *Materializer=nullptr, const MetadataPredicate *IdentityMD=nullptr)
Remap the Values used in the DbgRecords Range using the value map VM.
LLVM_ABI void InvertBranch(CondBrInst *PBI, IRBuilderBase &Builder)
auto reverse(ContainerTy &&C)
Definition STLExtras.h:408
constexpr bool isPowerOf2_32(uint32_t Value)
Return true if the argument is a power of two > 0.
Definition MathExtras.h:279
LLVM_ABI bool impliesPoison(const Value *ValAssumedPoison, const Value *V)
Return true if V is poison given that ValAssumedPoison is already poison.
void sort(IteratorTy Start, IteratorTy End)
Definition STLExtras.h:1636
static cl::opt< bool > EnableMergeCompatibleInvokes("simplifycfg-merge-compatible-invokes", cl::Hidden, cl::init(true), cl::desc("Allow SimplifyCFG to merge invokes together when appropriate"))
@ RF_IgnoreMissingLocals
If this flag is set, the remapper ignores missing function-local entries (Argument,...
Definition ValueMapper.h:98
@ RF_NoModuleLevelChanges
If this flag is set, the remapper knows that only local values within a function (such as an instruct...
Definition ValueMapper.h:80
LLVM_ABI void computeKnownBits(const Value *V, KnownBits &Known, const DataLayout &DL, AssumptionCache *AC=nullptr, const Instruction *CxtI=nullptr, const DominatorTree *DT=nullptr, bool UseInstrInfo=true, unsigned Depth=0)
Determine which bits of V are known to be either zero or one and return them in the KnownZero/KnownOn...
LLVM_ABI bool NullPointerIsDefined(const Function *F, unsigned AS=0)
Check whether null pointer dereferencing is considered undefined behavior for a given function or an ...
LLVM_ABI raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
Definition Debug.cpp:207
bool none_of(R &&Range, UnaryPredicate P)
Provide wrappers to std::none_of which take ranges instead of having to pass begin/end explicitly.
Definition STLExtras.h:1753
auto make_first_range(ContainerTy &&c)
Given a container of pairs, return a range over the first elements.
Definition STLExtras.h:1399
LLVM_ABI bool collectPossibleValues(const Value *V, SmallPtrSetImpl< const Constant * > &Constants, unsigned MaxCount, bool AllowUndefOrPoison=true)
Enumerates all possible immediate values of V and inserts them into the set Constants.
LLVM_ABI Instruction * removeUnwindEdge(BasicBlock *BB, DomTreeUpdater *DTU=nullptr)
Replace 'BB's terminator with one that does not have an unwind successor block.
Definition Local.cpp:2863
FunctionAddr VTableAddr Count
Definition InstrProf.h:139
auto succ_size(const MachineBasicBlock *BB)
iterator_range< filter_iterator< detail::IterOfRange< RangeT >, PredicateT > > make_filter_range(RangeT &&Range, PredicateT Pred)
Convenience function that takes a range of elements and a predicate, and return a new filter_iterator...
Definition STLExtras.h:552
class LLVM_GSL_OWNER SmallVector
Forward declaration of SmallVector so that calculateSmallVectorDefaultInlinedElements can reference s...
bool isa(const From &Val)
isa<X> - Return true if the parameter to the template is an instance of one of the template type argu...
Definition Casting.h:547
static cl::opt< unsigned > MaxJumpThreadingLiveBlocks("max-jump-threading-live-blocks", cl::Hidden, cl::init(24), cl::desc("Limit number of blocks a define in a threaded block is allowed " "to be live in"))
RNSuccIterator< NodeRef, BlockT, RegionT > succ_begin(NodeRef Node)
LLVM_ABI void combineMetadataForCSE(Instruction *K, const Instruction *J, bool DoesKMove)
Combine the metadata of two instructions so that K can replace J.
Definition Local.cpp:3110
iterator_range(Container &&) -> iterator_range< llvm::detail::IterOfRange< Container > >
auto drop_end(T &&RangeOrContainer, size_t N=1)
Return a range covering RangeOrContainer with the last N elements excluded.
Definition STLExtras.h:323
static cl::opt< int > MaxSmallBlockSize("simplifycfg-max-small-block-size", cl::Hidden, cl::init(10), cl::desc("Max size of a block which is still considered " "small enough to thread through"))
LLVM_ABI BasicBlock * SplitBlockPredecessors(BasicBlock *BB, ArrayRef< BasicBlock * > Preds, const char *Suffix, DominatorTree *DT, LoopInfo *LI=nullptr, MemorySSAUpdater *MSSAU=nullptr, bool PreserveLCSSA=false)
This method introduces at least one new basic block into the function and moves some of the predecess...
bool isWidenableBranch(const User *U)
Returns true iff U is a widenable branch (that is, extractWidenableCondition returns widenable condit...
@ Other
Any other memory.
Definition ModRef.h:68
TargetTransformInfo TTI
static cl::opt< unsigned > HoistCommonSkipLimit("simplifycfg-hoist-common-skip-limit", cl::Hidden, cl::init(20), cl::desc("Allow reordering across at most this many " "instructions when hoisting"))
IRBuilder(LLVMContext &, FolderTy, InserterTy, MDNode *, ArrayRef< OperandBundleDef >) -> IRBuilder< FolderTy, InserterTy >
LLVM_ABI cl::opt< bool > RequireAndPreserveDomTree
This function is used to do simplification of a CFG.
static cl::opt< bool > MergeCondStores("simplifycfg-merge-cond-stores", cl::Hidden, cl::init(true), cl::desc("Hoist conditional stores even if an unconditional store does not " "precede - hoist multiple conditional stores into a single " "predicated store"))
static cl::opt< unsigned > BranchFoldToCommonDestVectorMultiplier("simplifycfg-branch-fold-common-dest-vector-multiplier", cl::Hidden, cl::init(2), cl::desc("Multiplier to apply to threshold when determining whether or not " "to fold branch to common destination when vector operations are " "present"))
RNSuccIterator< NodeRef, BlockT, RegionT > succ_end(NodeRef Node)
LLVM_ABI bool MergeBlockIntoPredecessor(BasicBlock *BB, DomTreeUpdater *DTU=nullptr, LoopInfo *LI=nullptr, MemorySSAUpdater *MSSAU=nullptr, MemoryDependenceResults *MemDep=nullptr, bool PredecessorWithTwoSuccessors=false, DominatorTree *DT=nullptr)
Attempts to merge a block into its predecessor, if possible.
LLVM_ABI void hoistAllInstructionsInto(BasicBlock *DomBlock, Instruction *InsertPt, BasicBlock *BB)
Hoist all of the instructions in the IfBlock to the dominant block DomBlock, by moving its instructio...
Definition Local.cpp:3392
@ Sub
Subtraction of integers.
LLVM_ABI BasicBlock * SplitBlock(BasicBlock *Old, BasicBlock::iterator SplitPt, DominatorTree *DT, LoopInfo *LI=nullptr, MemorySSAUpdater *MSSAU=nullptr, const Twine &BBName="")
Split the specified block at the specified instruction.
auto count(R &&Range, const E &Element)
Wrapper function around std::count to count the number of times an element Element occurs in the give...
Definition STLExtras.h:2012
void RemapInstruction(Instruction *I, ValueToValueMapTy &VM, RemapFlags Flags=RF_None, ValueMapTypeRemapper *TypeMapper=nullptr, ValueMaterializer *Materializer=nullptr, const MetadataPredicate *IdentityMD=nullptr)
Convert the instruction operands from referencing the current values into those specified by VM.
LLVM_ABI bool canReplaceOperandWithVariable(const Instruction *I, unsigned OpIdx)
Given an instruction, is it legal to set operand OpIdx to a non-constant value?
Definition Local.cpp:3899
DWARFExpression::Operation Op
LLVM_ABI bool PointerMayBeCaptured(const Value *V, bool ReturnCaptures, unsigned MaxUsesToExplore=0)
PointerMayBeCaptured - Return true if this pointer value may be captured by the enclosing function (w...
LLVM_ABI bool FoldSingleEntryPHINodes(BasicBlock *BB, MemoryDependenceResults *MemDep=nullptr)
We know that BB has one predecessor.
LLVM_ABI bool isGuaranteedNotToBeUndefOrPoison(const Value *V, AssumptionCache *AC=nullptr, const Instruction *CtxI=nullptr, const DominatorTree *DT=nullptr, unsigned Depth=0)
Return true if this function can prove that V does not have undef bits and is never poison.
void RemapDbgRecord(Module *M, DbgRecord *DR, ValueToValueMapTy &VM, RemapFlags Flags=RF_None, ValueMapTypeRemapper *TypeMapper=nullptr, ValueMaterializer *Materializer=nullptr, const MetadataPredicate *IdentityMD=nullptr)
Remap the Values used in the DbgRecord DR using the value map VM.
ArrayRef(const T &OneElt) -> ArrayRef< T >
constexpr unsigned BitWidth
auto sum_of(R &&Range, E Init=E{0})
Returns the sum of all values in Range with Init initial value.
Definition STLExtras.h:1717
ValueMap< const Value *, WeakTrackingVH > ValueToValueMapTy
LLVM_ABI bool isDereferenceablePointer(const Value *V, Type *Ty, const DataLayout &DL, const Instruction *CtxI=nullptr, AssumptionCache *AC=nullptr, const DominatorTree *DT=nullptr, const TargetLibraryInfo *TLI=nullptr)
Return true if this is always a dereferenceable pointer.
Definition Loads.cpp:249
LLVM_ABI bool isGuaranteedToTransferExecutionToSuccessor(const Instruction *I)
Return true if this function can prove that the instruction I will always transfer execution to one o...
static cl::opt< bool > HoistCondStores("simplifycfg-hoist-cond-stores", cl::Hidden, cl::init(true), cl::desc("Hoist conditional stores if an unconditional store precedes"))
LLVM_ABI bool extractBranchWeights(const MDNode *ProfileData, SmallVectorImpl< uint32_t > &Weights)
Extract branch weights from MD_prof metadata.
LLVM_ABI bool simplifyCFG(BasicBlock *BB, const TargetTransformInfo &TTI, DomTreeUpdater *DTU=nullptr, const SimplifyCFGOptions &Options={}, ArrayRef< WeakVH > LoopHeaders={})
auto pred_begin(const MachineBasicBlock *BB)
decltype(auto) cast(const From &Val)
cast<X> - Return the argument parameter cast to the specified type.
Definition Casting.h:559
auto find_if(R &&Range, UnaryPredicate P)
Provide wrappers to std::find_if which take ranges instead of having to pass begin/end explicitly.
Definition STLExtras.h:1772
void erase_if(Container &C, UnaryPredicate P)
Provide a container algorithm similar to C++ Library Fundamentals v2's erase_if which is equivalent t...
Definition STLExtras.h:2192
constexpr bool isIntN(unsigned N, int64_t x)
Checks if an signed integer fits into the given (dynamic) bit width.
Definition MathExtras.h:248
auto predecessors(const MachineBasicBlock *BB)
static cl::opt< unsigned > HoistLoadsStoresWithCondFaultingThreshold("hoist-loads-stores-with-cond-faulting-threshold", cl::Hidden, cl::init(6), cl::desc("Control the maximal conditional load/store that we are willing " "to speculatively execute to eliminate conditional branch " "(default = 6)"))
static cl::opt< bool > HoistCommon("simplifycfg-hoist-common", cl::Hidden, cl::init(true), cl::desc("Hoist common instructions up to the parent block"))
iterator_range< pointer_iterator< WrappedIteratorT > > make_pointer_range(RangeT &&Range)
Definition iterator.h:368
LLVM_ABI unsigned ComputeMaxSignificantBits(const Value *Op, const DataLayout &DL, AssumptionCache *AC=nullptr, const Instruction *CxtI=nullptr, const DominatorTree *DT=nullptr, unsigned Depth=0)
Get the upper bound on bit size for this Value Op as a signed integer.
bool is_contained(R &&Range, const E &Element)
Returns true if Element is found in Range.
Definition STLExtras.h:1947
LLVM_ABI bool foldBranchToCommonDest(CondBrInst *BI, llvm::DomTreeUpdater *DTU=nullptr, MemorySSAUpdater *MSSAU=nullptr, const TargetTransformInfo *TTI=nullptr, unsigned BonusInstThreshold=1)
If this basic block is ONLY a setcc and a branch, and if a predecessor branches to us and one of our ...
static cl::opt< unsigned > TwoEntryPHINodeFoldingThreshold("two-entry-phi-node-folding-threshold", cl::Hidden, cl::init(4), cl::desc("Control the maximal total instruction cost that we are willing " "to speculatively execute to fold a 2-entry PHI node into a " "select (default = 4)"))
Type * getLoadStoreType(const Value *I)
A helper function that returns the type of a load or store instruction.
PointerUnion< const Value *, const PseudoSourceValue * > ValueType
SmallVector< uint64_t, 2 > getDisjunctionWeights(const SmallVector< T1, 2 > &B1, const SmallVector< T2, 2 > &B2)
Get the branch weights of a branch conditioned on b1 || b2, where b1 and b2 are 2 booleans that are t...
bool pred_empty(const BasicBlock *BB)
Definition CFG.h:119
LLVM_ABI Instruction * SplitBlockAndInsertIfThen(Value *Cond, BasicBlock::iterator SplitBefore, bool Unreachable, MDNode *BranchWeights=nullptr, DomTreeUpdater *DTU=nullptr, LoopInfo *LI=nullptr, BasicBlock *ThenBlock=nullptr)
Split the containing block at the specified instruction - everything before SplitBefore stays in the ...
LLVM_ABI std::optional< bool > isImpliedByDomCondition(const Value *Cond, const Instruction *ContextI, const DataLayout &DL)
Return the boolean condition value in the context of the given instruction if it is known based on do...
auto seq(T Begin, T End)
Iterate over an integral type from Begin up to - but not including - End.
Definition Sequence.h:305
void array_pod_sort(IteratorTy Start, IteratorTy End)
array_pod_sort - This sorts an array with the specified start and end extent.
Definition STLExtras.h:1596
LLVM_ABI bool hasBranchWeightMD(const Instruction &I)
Checks if an instructions has Branch Weight Metadata.
hash_code hash_combine(const Ts &...args)
Combine values into a single hash_code.
Definition Hashing.h:592
bool equal(L &&LRange, R &&RRange)
Wrapper function around std::equal to detect if pair-wise elements between two ranges are the same.
Definition STLExtras.h:2146
static cl::opt< bool > HoistLoadsWithCondFaulting("simplifycfg-hoist-loads-with-cond-faulting", cl::Hidden, cl::init(true), cl::desc("Hoist loads if the target supports conditional faulting"))
LLVM_ABI Constant * ConstantFoldInstOperands(const Instruction *I, ArrayRef< Constant * > Ops, const DataLayout &DL, const TargetLibraryInfo *TLI=nullptr, bool AllowNonDeterministic=true)
ConstantFoldInstOperands - Attempt to constant fold an instruction with the specified operands.
LLVM_ABI void setFittedBranchWeights(Instruction &I, ArrayRef< uint64_t > Weights, bool IsExpected, bool ElideAllZero=false)
Variant of setBranchWeights where the Weights will be fit first to uint32_t by shifting right.
LLVM_ABI const Value * getUnderlyingObject(const Value *V, unsigned MaxLookup=MaxLookupSearchDepth)
This method strips off any GEP address adjustments, pointer casts or llvm.threadlocal....
LLVM_ABI Constant * ConstantFoldIntegerCast(Constant *C, Type *DestTy, bool IsSigned, const DataLayout &DL)
Constant fold a zext, sext or trunc, depending on IsSigned and whether the DestTy is wider or narrowe...
bool capturesNothing(CaptureComponents CC)
Definition ModRef.h:355
static auto filterDbgVars(iterator_range< simple_ilist< DbgRecord >::iterator > R)
Filter the DbgRecord range to DbgVariableRecord types only and downcast.
LLVM_ABI bool EliminateDuplicatePHINodes(BasicBlock *BB)
Check for and eliminate duplicate PHI nodes in this block.
Definition Local.cpp:1524
@ Keep
No function return thunk.
Definition CodeGen.h:162
constexpr detail::IsaCheckPredicate< Types... > IsaPred
Function object wrapper for the llvm::isa type check.
Definition Casting.h:866
LLVM_ABI void RemapSourceAtom(Instruction *I, ValueToValueMapTy &VM)
Remap source location atom.
hash_code hash_combine_range(InputIteratorT first, InputIteratorT last)
Compute a hash_code for a sequence of values.
Definition Hashing.h:466
LLVM_ABI bool isWritableObject(const Value *Object, bool &ExplicitlyDereferenceableOnly)
Return true if the Object is writable, in the sense that any location based on this pointer that can ...
LLVM_ABI void mapAtomInstance(const DebugLoc &DL, ValueToValueMapTy &VMap)
Mark a cloned instruction as a new instance so that its source loc can be updated when remapped.
constexpr uint64_t NextPowerOf2(uint64_t A)
Returns the next power of two (in 64-bits) that is strictly greater than A.
Definition MathExtras.h:373
LLVM_ABI void extractFromBranchWeightMD64(const MDNode *ProfileData, SmallVectorImpl< uint64_t > &Weights)
Faster version of extractBranchWeights() that skips checks and must only be called with "branch_weigh...
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.
Definition BitVector.h:872
#define N
SmallVectorImpl< ConstantInt * > * Cases
SmallVectorImpl< ConstantInt * > * OtherCases
Checking whether two BBs are equal depends on the contents of the BasicBlock and the incoming values ...
SmallDenseMap< BasicBlock *, Value *, 8 > BB2ValueMap
Phi2IVsMap * PhiPredIVs
DenseMap< PHINode *, BB2ValueMap > Phi2IVsMap
static bool canBeMerged(const BasicBlock *BB)
BasicBlock * BB
LLVM_ABI AAMDNodes merge(const AAMDNodes &Other) const
Given two sets of AAMDNodes applying to potentially different locations, determine the best AAMDNodes...
static const EqualBBWrapper * getEmptyKey()
static bool isEqual(const EqualBBWrapper *LHS, const EqualBBWrapper *RHS)
static unsigned getHashValue(const EqualBBWrapper *EBW)
static const EqualBBWrapper * getTombstoneKey()
An information struct used to provide DenseMap with the various necessary components for a given valu...
Incoming for lane mask phi as machine instruction, incoming register Reg and incoming block Block are...
unsigned getBitWidth() const
Get the bit width of this value.
Definition KnownBits.h:44
unsigned countMaxActiveBits() const
Returns the maximum number of bits needed to represent all possible unsigned values with these known ...
Definition KnownBits.h:312
APInt getMaxValue() const
Return the maximal unsigned value possible given these KnownBits.
Definition KnownBits.h:148
Matching combinators.
A MapVector that performs no allocations if smaller than a certain size.
Definition MapVector.h:276