LLVM 23.0.0git
Local.cpp
Go to the documentation of this file.
1//===- Local.cpp - Functions to perform local transformations -------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This family of functions perform various local transformations to the
10// program.
11//
12//===----------------------------------------------------------------------===//
13
15#include "llvm/ADT/APInt.h"
16#include "llvm/ADT/DenseMap.h"
18#include "llvm/ADT/DenseSet.h"
19#include "llvm/ADT/Hashing.h"
20#include "llvm/ADT/STLExtras.h"
21#include "llvm/ADT/SetVector.h"
24#include "llvm/ADT/Statistic.h"
35#include "llvm/IR/Argument.h"
36#include "llvm/IR/Attributes.h"
37#include "llvm/IR/BasicBlock.h"
38#include "llvm/IR/CFG.h"
39#include "llvm/IR/Constant.h"
41#include "llvm/IR/Constants.h"
42#include "llvm/IR/DIBuilder.h"
43#include "llvm/IR/DataLayout.h"
44#include "llvm/IR/DebugInfo.h"
46#include "llvm/IR/DebugLoc.h"
48#include "llvm/IR/Dominators.h"
50#include "llvm/IR/Function.h"
52#include "llvm/IR/IRBuilder.h"
53#include "llvm/IR/InstrTypes.h"
54#include "llvm/IR/Instruction.h"
57#include "llvm/IR/Intrinsics.h"
58#include "llvm/IR/IntrinsicsWebAssembly.h"
59#include "llvm/IR/LLVMContext.h"
60#include "llvm/IR/MDBuilder.h"
62#include "llvm/IR/Metadata.h"
63#include "llvm/IR/Module.h"
66#include "llvm/IR/Type.h"
67#include "llvm/IR/Use.h"
68#include "llvm/IR/User.h"
69#include "llvm/IR/Value.h"
70#include "llvm/IR/ValueHandle.h"
74#include "llvm/Support/Debug.h"
80#include <algorithm>
81#include <cassert>
82#include <cstdint>
83#include <iterator>
84#include <map>
85#include <optional>
86#include <utility>
87
88using namespace llvm;
89using namespace llvm::PatternMatch;
90
91#define DEBUG_TYPE "local"
92
93STATISTIC(NumRemoved, "Number of unreachable basic blocks removed");
94STATISTIC(NumPHICSEs, "Number of PHI's that got CSE'd");
95
97 "phicse-debug-hash",
98#ifdef EXPENSIVE_CHECKS
99 cl::init(true),
100#else
101 cl::init(false),
102#endif
104 cl::desc("Perform extra assertion checking to verify that PHINodes's hash "
105 "function is well-behaved w.r.t. its isEqual predicate"));
106
108 "phicse-num-phi-smallsize", cl::init(32), cl::Hidden,
109 cl::desc(
110 "When the basic block contains not more than this number of PHI nodes, "
111 "perform a (faster!) exhaustive search instead of set-driven one."));
112
114 "max-phi-entries-increase-after-removing-empty-block", cl::init(1000),
116 cl::desc("Stop removing an empty block if removing it will introduce more "
117 "than this number of phi entries in its successor"));
118
119// Max recursion depth for collectBitParts used when detecting bswap and
120// bitreverse idioms.
121static const unsigned BitPartRecursionMaxDepth = 48;
122
123//===----------------------------------------------------------------------===//
124// Local constant propagation.
125//
126
127/// ConstantFoldTerminator - If a terminator instruction is predicated on a
128/// constant value, convert it into an unconditional branch to the constant
129/// destination. This is a nontrivial operation because the successors of this
130/// basic block must have their PHI nodes updated.
131/// Also calls RecursivelyDeleteTriviallyDeadInstructions() on any branch/switch
132/// conditions and indirectbr addresses this might make dead if
133/// DeleteDeadConditions is true.
134bool llvm::ConstantFoldTerminator(BasicBlock *BB, bool DeleteDeadConditions,
135 const TargetLibraryInfo *TLI,
136 DomTreeUpdater *DTU) {
137 Instruction *T = BB->getTerminator();
138 IRBuilder<> Builder(T);
139
140 // Branch - See if we are conditional jumping on constant
141 if (auto *BI = dyn_cast<CondBrInst>(T)) {
142 BasicBlock *Dest1 = BI->getSuccessor(0);
143 BasicBlock *Dest2 = BI->getSuccessor(1);
144
145 if (Dest2 == Dest1) { // Conditional branch to same location?
146 // This branch matches something like this:
147 // br bool %cond, label %Dest, label %Dest
148 // and changes it into: br label %Dest
149
150 // Let the basic block know that we are letting go of one copy of it.
151 assert(BI->getParent() && "Terminator not inserted in block!");
152 Dest1->removePredecessor(BI->getParent());
153
154 // Replace the conditional branch with an unconditional one.
155 UncondBrInst *NewBI = Builder.CreateBr(Dest1);
156
157 // Transfer the metadata to the new branch instruction.
158 NewBI->copyMetadata(*BI, {LLVMContext::MD_loop, LLVMContext::MD_dbg,
159 LLVMContext::MD_annotation});
160
161 Value *Cond = BI->getCondition();
162 BI->eraseFromParent();
163 if (DeleteDeadConditions)
165 return true;
166 }
167
168 if (auto *Cond = dyn_cast<ConstantInt>(BI->getCondition())) {
169 // Are we branching on constant?
170 // YES. Change to unconditional branch...
171 BasicBlock *Destination = Cond->getZExtValue() ? Dest1 : Dest2;
172 BasicBlock *OldDest = Cond->getZExtValue() ? Dest2 : Dest1;
173
174 // Let the basic block know that we are letting go of it. Based on this,
175 // it will adjust it's PHI nodes.
176 OldDest->removePredecessor(BB);
177
178 // Replace the conditional branch with an unconditional one.
179 UncondBrInst *NewBI = Builder.CreateBr(Destination);
180
181 // Transfer the metadata to the new branch instruction.
182 NewBI->copyMetadata(*BI, {LLVMContext::MD_loop, LLVMContext::MD_dbg,
183 LLVMContext::MD_annotation});
184
185 BI->eraseFromParent();
186 if (DTU)
187 DTU->applyUpdates({{DominatorTree::Delete, BB, OldDest}});
188 return true;
189 }
190
191 return false;
192 }
193
194 if (auto *SI = dyn_cast<SwitchInst>(T)) {
195 // If we are switching on a constant, we can convert the switch to an
196 // unconditional branch.
197 auto *CI = dyn_cast<ConstantInt>(SI->getCondition());
198 BasicBlock *DefaultDest = SI->getDefaultDest();
199 BasicBlock *TheOnlyDest = DefaultDest;
200
201 // If the default is unreachable, ignore it when searching for TheOnlyDest.
202 if (SI->defaultDestUnreachable() && SI->getNumCases() > 0)
203 TheOnlyDest = SI->case_begin()->getCaseSuccessor();
204
205 bool Changed = false;
206
207 // Figure out which case it goes to.
208 for (auto It = SI->case_begin(), End = SI->case_end(); It != End;) {
209 // Found case matching a constant operand?
210 if (It->getCaseValue() == CI) {
211 TheOnlyDest = It->getCaseSuccessor();
212 break;
213 }
214
215 // Check to see if this branch is going to the same place as the default
216 // dest. If so, eliminate it as an explicit compare.
217 if (It->getCaseSuccessor() == DefaultDest) {
219 unsigned NCases = SI->getNumCases();
220 // Fold the case metadata into the default if there will be any branches
221 // left, unless the metadata doesn't match the switch.
222 if (NCases > 1 && MD) {
223 // Collect branch weights into a vector.
225 extractFromBranchWeightMD64(MD, Weights);
226
227 // Merge weight of this case to the default weight.
228 unsigned Idx = It->getCaseIndex();
229
230 // Check for and prevent uint64_t overflow by reducing branch weights.
231 if (Weights[0] > UINT64_MAX - Weights[Idx + 1])
232 fitWeights(Weights);
233
234 Weights[0] += Weights[Idx + 1];
235 // Remove weight for this case.
236 std::swap(Weights[Idx + 1], Weights.back());
237 Weights.pop_back();
239 }
240 // Remove this entry.
241 BasicBlock *ParentBB = SI->getParent();
242 DefaultDest->removePredecessor(ParentBB);
243 It = SI->removeCase(It);
244 End = SI->case_end();
245
246 // Removing this case may have made the condition constant. In that
247 // case, update CI and restart iteration through the cases.
248 if (auto *NewCI = dyn_cast<ConstantInt>(SI->getCondition())) {
249 CI = NewCI;
250 It = SI->case_begin();
251 }
252
253 Changed = true;
254 continue;
255 }
256
257 // Otherwise, check to see if the switch only branches to one destination.
258 // We do this by reseting "TheOnlyDest" to null when we find two non-equal
259 // destinations.
260 if (It->getCaseSuccessor() != TheOnlyDest)
261 TheOnlyDest = nullptr;
262
263 // Increment this iterator as we haven't removed the case.
264 ++It;
265 }
266
267 if (CI && !TheOnlyDest) {
268 // Branching on a constant, but not any of the cases, go to the default
269 // successor.
270 TheOnlyDest = SI->getDefaultDest();
271 }
272
273 // If we found a single destination that we can fold the switch into, do so
274 // now.
275 if (TheOnlyDest) {
276 // Insert the new branch.
277 Builder.CreateBr(TheOnlyDest);
278 BasicBlock *BB = SI->getParent();
279
280 SmallPtrSet<BasicBlock *, 8> RemovedSuccessors;
281
282 // Remove entries from PHI nodes which we no longer branch to...
283 BasicBlock *SuccToKeep = TheOnlyDest;
284 for (BasicBlock *Succ : successors(SI)) {
285 if (DTU && Succ != TheOnlyDest)
286 RemovedSuccessors.insert(Succ);
287 // Found case matching a constant operand?
288 if (Succ == SuccToKeep) {
289 SuccToKeep = nullptr; // Don't modify the first branch to TheOnlyDest
290 } else {
291 Succ->removePredecessor(BB);
292 }
293 }
294
295 // Delete the old switch.
296 Value *Cond = SI->getCondition();
297 SI->eraseFromParent();
298 if (DeleteDeadConditions)
300 if (DTU) {
301 std::vector<DominatorTree::UpdateType> Updates;
302 Updates.reserve(RemovedSuccessors.size());
303 for (auto *RemovedSuccessor : RemovedSuccessors)
304 Updates.push_back({DominatorTree::Delete, BB, RemovedSuccessor});
305 DTU->applyUpdates(Updates);
306 }
307 return true;
308 }
309
310 if (SI->getNumCases() == 1) {
311 // Otherwise, we can fold this switch into a conditional branch
312 // instruction if it has only one non-default destination.
313 auto FirstCase = *SI->case_begin();
314 Value *Cond = Builder.CreateICmpEQ(SI->getCondition(),
315 FirstCase.getCaseValue(), "cond");
316
317 // Insert the new branch.
318 CondBrInst *NewBr = Builder.CreateCondBr(
319 Cond, FirstCase.getCaseSuccessor(), SI->getDefaultDest());
320 SmallVector<uint32_t> Weights;
321 if (extractBranchWeights(*SI, Weights) && Weights.size() == 2) {
322 uint32_t DefWeight = Weights[0];
323 uint32_t CaseWeight = Weights[1];
324 // The TrueWeight should be the weight for the single case of SI.
325 NewBr->setMetadata(LLVMContext::MD_prof,
326 MDBuilder(BB->getContext())
327 .createBranchWeights(CaseWeight, DefWeight));
328 }
329
330 // Update make.implicit metadata to the newly-created conditional branch.
331 MDNode *MakeImplicitMD = SI->getMetadata(LLVMContext::MD_make_implicit);
332 if (MakeImplicitMD)
333 NewBr->setMetadata(LLVMContext::MD_make_implicit, MakeImplicitMD);
334
335 // Delete the old switch.
336 SI->eraseFromParent();
337 return true;
338 }
339 return Changed;
340 }
341
342 if (auto *IBI = dyn_cast<IndirectBrInst>(T)) {
343 // indirectbr blockaddress(@F, @BB) -> br label @BB
344 if (auto *BA =
345 dyn_cast<BlockAddress>(IBI->getAddress()->stripPointerCasts())) {
346 BasicBlock *TheOnlyDest = BA->getBasicBlock();
347 SmallPtrSet<BasicBlock *, 8> RemovedSuccessors;
348
349 // Insert the new branch.
350 Builder.CreateBr(TheOnlyDest);
351
352 BasicBlock *SuccToKeep = TheOnlyDest;
353 for (unsigned i = 0, e = IBI->getNumDestinations(); i != e; ++i) {
354 BasicBlock *DestBB = IBI->getDestination(i);
355 if (DTU && DestBB != TheOnlyDest)
356 RemovedSuccessors.insert(DestBB);
357 if (IBI->getDestination(i) == SuccToKeep) {
358 SuccToKeep = nullptr;
359 } else {
360 DestBB->removePredecessor(BB);
361 }
362 }
363 Value *Address = IBI->getAddress();
364 IBI->eraseFromParent();
365 if (DeleteDeadConditions)
366 // Delete pointer cast instructions.
368
369 // Also zap the blockaddress constant if there are no users remaining,
370 // otherwise the destination is still marked as having its address taken.
371 if (BA->use_empty())
372 BA->destroyConstant();
373
374 // If we didn't find our destination in the IBI successor list, then we
375 // have undefined behavior. Replace the unconditional branch with an
376 // 'unreachable' instruction.
377 if (SuccToKeep) {
379 new UnreachableInst(BB->getContext(), BB);
380 }
381
382 if (DTU) {
383 std::vector<DominatorTree::UpdateType> Updates;
384 Updates.reserve(RemovedSuccessors.size());
385 for (auto *RemovedSuccessor : RemovedSuccessors)
386 Updates.push_back({DominatorTree::Delete, BB, RemovedSuccessor});
387 DTU->applyUpdates(Updates);
388 }
389 return true;
390 }
391 }
392
393 return false;
394}
395
396//===----------------------------------------------------------------------===//
397// Local dead code elimination.
398//
399
400/// isInstructionTriviallyDead - Return true if the result produced by the
401/// instruction is not used, and the instruction has no side effects.
402///
404 const TargetLibraryInfo *TLI) {
405 if (!I->use_empty())
406 return false;
408}
409
411 Instruction *I, const TargetLibraryInfo *TLI) {
412 // Instructions that are "markers" and have implied meaning on code around
413 // them (without explicit uses), are not dead on unused paths.
415 if (II->getIntrinsicID() == Intrinsic::stacksave ||
416 II->getIntrinsicID() == Intrinsic::launder_invariant_group ||
417 II->isLifetimeStartOrEnd())
418 return false;
420}
421
423 const TargetLibraryInfo *TLI) {
424 if (I->isTerminator())
425 return false;
426
427 // We don't want the landingpad-like instructions removed by anything this
428 // general.
429 if (I->isEHPad())
430 return false;
431
432 if (const DbgLabelInst *DLI = dyn_cast<DbgLabelInst>(I)) {
433 if (DLI->getLabel())
434 return false;
435 return true;
436 }
437
438 if (auto *CB = dyn_cast<CallBase>(I))
439 if (isRemovableAlloc(CB, TLI))
440 return true;
441
442 if (!I->willReturn()) {
444 if (!II)
445 return false;
446
447 switch (II->getIntrinsicID()) {
448 case Intrinsic::experimental_guard: {
449 // Guards on true are operationally no-ops. In the future we can
450 // consider more sophisticated tradeoffs for guards considering potential
451 // for check widening, but for now we keep things simple.
452 auto *Cond = dyn_cast<ConstantInt>(II->getArgOperand(0));
453 return Cond && Cond->isOne();
454 }
455 // TODO: These intrinsics are not safe to remove, because this may remove
456 // a well-defined trap.
457 case Intrinsic::wasm_trunc_signed:
458 case Intrinsic::wasm_trunc_unsigned:
459 case Intrinsic::ptrauth_auth:
460 case Intrinsic::ptrauth_resign:
461 case Intrinsic::ptrauth_resign_load_relative:
462 return true;
463 default:
464 return false;
465 }
466 }
467
468 if (!I->mayHaveSideEffects())
469 return true;
470
471 // Special case intrinsics that "may have side effects" but can be deleted
472 // when dead.
474 // Safe to delete llvm.stacksave and launder.invariant.group if dead.
475 if (II->getIntrinsicID() == Intrinsic::stacksave ||
476 II->getIntrinsicID() == Intrinsic::launder_invariant_group)
477 return true;
478
479 // Intrinsics declare sideeffects to prevent them from moving, but they are
480 // nops without users.
481 if (II->getIntrinsicID() == Intrinsic::allow_runtime_check ||
482 II->getIntrinsicID() == Intrinsic::allow_ubsan_check)
483 return true;
484
485 if (II->isLifetimeStartOrEnd()) {
486 auto *Arg = II->getArgOperand(0);
487 if (isa<PoisonValue>(Arg))
488 return true;
489
490 // If the only uses of the alloca are lifetime intrinsics, then the
491 // intrinsics are dead.
492 return llvm::all_of(Arg->uses(), [](Use &Use) {
493 return isa<LifetimeIntrinsic>(Use.getUser());
494 });
495 }
496
497 // Assumptions are dead if their condition is trivially true.
498 if (II->getIntrinsicID() == Intrinsic::assume &&
500 if (ConstantInt *Cond = dyn_cast<ConstantInt>(II->getArgOperand(0)))
501 return !Cond->isZero();
502
503 return false;
504 }
505
506 if (auto *FPI = dyn_cast<ConstrainedFPIntrinsic>(I)) {
507 std::optional<fp::ExceptionBehavior> ExBehavior =
508 FPI->getExceptionBehavior();
509 return *ExBehavior != fp::ebStrict;
510 }
511 }
512
513 if (auto *Call = dyn_cast<CallBase>(I)) {
514 if (Value *FreedOp = getFreedOperand(Call, TLI))
515 if (Constant *C = dyn_cast<Constant>(FreedOp))
516 return C->isNullValue() || isa<UndefValue>(C);
517 if (isMathLibCallNoop(Call, TLI))
518 return true;
519 }
520
521 // Non-volatile atomic loads from constants can be removed.
522 if (auto *LI = dyn_cast<LoadInst>(I))
523 if (auto *GV = dyn_cast<GlobalVariable>(
524 LI->getPointerOperand()->stripPointerCasts()))
525 if (!LI->isVolatile() && GV->isConstant())
526 return true;
527
528 return false;
529}
530
531/// RecursivelyDeleteTriviallyDeadInstructions - If the specified value is a
532/// trivially dead instruction, delete it. If that makes any of its operands
533/// trivially dead, delete them too, recursively. Return true if any
534/// instructions were deleted.
536 Value *V, const TargetLibraryInfo *TLI, MemorySSAUpdater *MSSAU,
537 std::function<void(Value *)> AboutToDeleteCallback) {
539 if (!I || !isInstructionTriviallyDead(I, TLI))
540 return false;
541
543 DeadInsts.push_back(I);
544 RecursivelyDeleteTriviallyDeadInstructions(DeadInsts, TLI, MSSAU,
545 AboutToDeleteCallback);
546
547 return true;
548}
549
552 MemorySSAUpdater *MSSAU,
553 std::function<void(Value *)> AboutToDeleteCallback) {
554 unsigned S = 0, E = DeadInsts.size(), Alive = 0;
555 for (; S != E; ++S) {
556 auto *I = dyn_cast_or_null<Instruction>(DeadInsts[S]);
557 if (!I || !isInstructionTriviallyDead(I)) {
558 DeadInsts[S] = nullptr;
559 ++Alive;
560 }
561 }
562 if (Alive == E)
563 return false;
564 RecursivelyDeleteTriviallyDeadInstructions(DeadInsts, TLI, MSSAU,
565 AboutToDeleteCallback);
566 return true;
567}
568
571 MemorySSAUpdater *MSSAU,
572 std::function<void(Value *)> AboutToDeleteCallback) {
573 // Process the dead instruction list until empty.
574 while (!DeadInsts.empty()) {
575 Value *V = DeadInsts.pop_back_val();
577 if (!I)
578 continue;
580 "Live instruction found in dead worklist!");
581 assert(I->use_empty() && "Instructions with uses are not dead.");
582
583 // Don't lose the debug info while deleting the instructions.
585
586 if (AboutToDeleteCallback)
587 AboutToDeleteCallback(I);
588
589 // Null out all of the instruction's operands to see if any operand becomes
590 // dead as we go.
591 for (Use &OpU : I->operands()) {
592 Value *OpV = OpU.get();
593 OpU.set(nullptr);
594
595 if (!OpV->use_empty())
596 continue;
597
598 // If the operand is an instruction that became dead as we nulled out the
599 // operand, and if it is 'trivially' dead, delete it in a future loop
600 // iteration.
601 if (Instruction *OpI = dyn_cast<Instruction>(OpV))
602 if (isInstructionTriviallyDead(OpI, TLI))
603 DeadInsts.push_back(OpI);
604 }
605 if (MSSAU)
606 MSSAU->removeMemoryAccess(I);
607
608 I->eraseFromParent();
609 }
610}
611
614 findDbgUsers(I, DPUsers);
615 for (auto *DVR : DPUsers)
616 DVR->setKillLocation();
617 return !DPUsers.empty();
618}
619
620/// areAllUsesEqual - Check whether the uses of a value are all the same.
621/// This is similar to Instruction::hasOneUse() except this will also return
622/// true when there are no uses or multiple uses that all refer to the same
623/// value.
625 Value::user_iterator UI = I->user_begin();
626 Value::user_iterator UE = I->user_end();
627 if (UI == UE)
628 return true;
629
630 User *TheUse = *UI;
631 for (++UI; UI != UE; ++UI) {
632 if (*UI != TheUse)
633 return false;
634 }
635 return true;
636}
637
638/// RecursivelyDeleteDeadPHINode - If the specified value is an effectively
639/// dead PHI node, due to being a def-use chain of single-use nodes that
640/// either forms a cycle or is terminated by a trivially dead instruction,
641/// delete it. If that makes any of its operands trivially dead, delete them
642/// too, recursively. Return true if a change was made.
644 const TargetLibraryInfo *TLI,
645 llvm::MemorySSAUpdater *MSSAU) {
647 for (Instruction *I = PN; areAllUsesEqual(I) && !I->mayHaveSideEffects();
648 I = cast<Instruction>(*I->user_begin())) {
649 if (I->use_empty())
651
652 // If we find an instruction more than once, we're on a cycle that
653 // won't prove fruitful.
654 if (!Visited.insert(I).second) {
655 // Break the cycle and delete the instruction and its operands.
656 I->replaceAllUsesWith(PoisonValue::get(I->getType()));
658 return true;
659 }
660 }
661 return false;
662}
663
664static bool
667 const DataLayout &DL,
668 const TargetLibraryInfo *TLI) {
669 if (isInstructionTriviallyDead(I, TLI)) {
671
672 // Null out all of the instruction's operands to see if any operand becomes
673 // dead as we go.
674 for (unsigned i = 0, e = I->getNumOperands(); i != e; ++i) {
675 Value *OpV = I->getOperand(i);
676 I->setOperand(i, nullptr);
677
678 if (!OpV->use_empty() || I == OpV)
679 continue;
680
681 // If the operand is an instruction that became dead as we nulled out the
682 // operand, and if it is 'trivially' dead, delete it in a future loop
683 // iteration.
684 if (Instruction *OpI = dyn_cast<Instruction>(OpV))
685 if (isInstructionTriviallyDead(OpI, TLI))
686 WorkList.insert(OpI);
687 }
688
689 I->eraseFromParent();
690
691 return true;
692 }
693
694 if (Value *SimpleV = simplifyInstruction(I, DL)) {
695 // Add the users to the worklist. CAREFUL: an instruction can use itself,
696 // in the case of a phi node.
697 for (User *U : I->users()) {
698 if (U != I) {
699 WorkList.insert(cast<Instruction>(U));
700 }
701 }
702
703 // Replace the instruction with its simplified value.
704 bool Changed = false;
705 if (!I->use_empty()) {
706 I->replaceAllUsesWith(SimpleV);
707 Changed = true;
708 }
709 if (isInstructionTriviallyDead(I, TLI)) {
710 I->eraseFromParent();
711 Changed = true;
712 }
713 return Changed;
714 }
715 return false;
716}
717
718/// SimplifyInstructionsInBlock - Scan the specified basic block and try to
719/// simplify any instructions in it and recursively delete dead instructions.
720///
721/// This returns true if it changed the code, note that it can delete
722/// instructions in other blocks as well in this block.
724 const TargetLibraryInfo *TLI) {
725 bool MadeChange = false;
726 const DataLayout &DL = BB->getDataLayout();
727
728#ifndef NDEBUG
729 // In debug builds, ensure that the terminator of the block is never replaced
730 // or deleted by these simplifications. The idea of simplification is that it
731 // cannot introduce new instructions, and there is no way to replace the
732 // terminator of a block without introducing a new instruction.
733 AssertingVH<Instruction> TerminatorVH(&BB->back());
734#endif
735
737 // Iterate over the original function, only adding insts to the worklist
738 // if they actually need to be revisited. This avoids having to pre-init
739 // the worklist with the entire function's worth of instructions.
740 for (BasicBlock::iterator BI = BB->begin(), E = std::prev(BB->end());
741 BI != E;) {
742 assert(!BI->isTerminator());
743 Instruction *I = &*BI;
744 ++BI;
745
746 // We're visiting this instruction now, so make sure it's not in the
747 // worklist from an earlier visit.
748 if (!WorkList.count(I))
749 MadeChange |= simplifyAndDCEInstruction(I, WorkList, DL, TLI);
750 }
751
752 while (!WorkList.empty()) {
753 Instruction *I = WorkList.pop_back_val();
754 MadeChange |= simplifyAndDCEInstruction(I, WorkList, DL, TLI);
755 }
756 return MadeChange;
757}
758
759//===----------------------------------------------------------------------===//
760// Control Flow Graph Restructuring.
761//
762
764 DomTreeUpdater *DTU) {
765
766 // If BB has single-entry PHI nodes, fold them.
767 while (PHINode *PN = dyn_cast<PHINode>(DestBB->begin())) {
768 Value *NewVal = PN->getIncomingValue(0);
769 // Replace self referencing PHI with poison, it must be dead.
770 if (NewVal == PN) NewVal = PoisonValue::get(PN->getType());
771 PN->replaceAllUsesWith(NewVal);
772 PN->eraseFromParent();
773 }
774
775 BasicBlock *PredBB = DestBB->getSinglePredecessor();
776 assert(PredBB && "Block doesn't have a single predecessor!");
777
778 bool ReplaceEntryBB = PredBB->isEntryBlock();
779
780 // DTU updates: Collect all the edges that enter
781 // PredBB. These dominator edges will be redirected to DestBB.
783
784 if (DTU) {
785 // To avoid processing the same predecessor more than once.
787 Updates.reserve(Updates.size() + 2 * pred_size(PredBB) + 1);
788 for (BasicBlock *PredOfPredBB : predecessors(PredBB))
789 // This predecessor of PredBB may already have DestBB as a successor.
790 if (PredOfPredBB != PredBB)
791 if (SeenPreds.insert(PredOfPredBB).second)
792 Updates.push_back({DominatorTree::Insert, PredOfPredBB, DestBB});
793 SeenPreds.clear();
794 for (BasicBlock *PredOfPredBB : predecessors(PredBB))
795 if (SeenPreds.insert(PredOfPredBB).second)
796 Updates.push_back({DominatorTree::Delete, PredOfPredBB, PredBB});
797 Updates.push_back({DominatorTree::Delete, PredBB, DestBB});
798 }
799
800 // Zap anything that took the address of DestBB. Not doing this will give the
801 // address an invalid value.
802 if (DestBB->hasAddressTaken()) {
803 BlockAddress *BA = BlockAddress::get(DestBB);
804 Constant *Replacement =
805 ConstantInt::get(Type::getInt32Ty(BA->getContext()), 1);
807 BA->getType()));
808 BA->destroyConstant();
809 }
810
811 // Anything that branched to PredBB now branches to DestBB.
812 PredBB->replaceAllUsesWith(DestBB);
813
814 // Splice all the instructions from PredBB to DestBB.
815 PredBB->getTerminator()->eraseFromParent();
816 DestBB->splice(DestBB->begin(), PredBB);
817 new UnreachableInst(PredBB->getContext(), PredBB);
818
819 // If the PredBB is the entry block of the function, move DestBB up to
820 // become the entry block after we erase PredBB.
821 if (ReplaceEntryBB)
822 DestBB->moveAfter(PredBB);
823
824 if (DTU) {
825 assert(PredBB->size() == 1 &&
827 "The successor list of PredBB isn't empty before "
828 "applying corresponding DTU updates.");
829 DTU->applyUpdatesPermissive(Updates);
830 DTU->deleteBB(PredBB);
831 // Recalculation of DomTree is needed when updating a forward DomTree and
832 // the Entry BB is replaced.
833 if (ReplaceEntryBB && DTU->hasDomTree()) {
834 // The entry block was removed and there is no external interface for
835 // the dominator tree to be notified of this change. In this corner-case
836 // we recalculate the entire tree.
837 DTU->recalculate(*(DestBB->getParent()));
838 }
839 }
840
841 else {
842 PredBB->eraseFromParent(); // Nuke BB if DTU is nullptr.
843 }
844}
845
846/// Return true if we can choose one of these values to use in place of the
847/// other. Note that we will always choose the non-undef value to keep.
848static bool CanMergeValues(Value *First, Value *Second) {
849 return First == Second || isa<UndefValue>(First) || isa<UndefValue>(Second);
850}
851
852/// Return true if we can fold BB, an almost-empty BB ending in an unconditional
853/// branch to Succ, into Succ.
854///
855/// Assumption: Succ is the single successor for BB.
856static bool
858 const SmallPtrSetImpl<BasicBlock *> &BBPreds) {
859 assert(*succ_begin(BB) == Succ && "Succ is not successor of BB!");
860
861 LLVM_DEBUG(dbgs() << "Looking to fold " << BB->getName() << " into "
862 << Succ->getName() << "\n");
863 // Shortcut, if there is only a single predecessor it must be BB and merging
864 // is always safe
865 if (Succ->getSinglePredecessor())
866 return true;
867
868 // Look at all the phi nodes in Succ, to see if they present a conflict when
869 // merging these blocks
870 for (BasicBlock::iterator I = Succ->begin(); isa<PHINode>(I); ++I) {
871 PHINode *PN = cast<PHINode>(I);
872
873 // If the incoming value from BB is again a PHINode in
874 // BB which has the same incoming value for *PI as PN does, we can
875 // merge the phi nodes and then the blocks can still be merged
877 if (BBPN && BBPN->getParent() == BB) {
878 for (unsigned PI = 0, PE = PN->getNumIncomingValues(); PI != PE; ++PI) {
879 BasicBlock *IBB = PN->getIncomingBlock(PI);
880 if (BBPreds.count(IBB) &&
882 PN->getIncomingValue(PI))) {
884 << "Can't fold, phi node " << PN->getName() << " in "
885 << Succ->getName() << " is conflicting with "
886 << BBPN->getName() << " with regard to common predecessor "
887 << IBB->getName() << "\n");
888 return false;
889 }
890 }
891 } else {
892 Value* Val = PN->getIncomingValueForBlock(BB);
893 for (unsigned PI = 0, PE = PN->getNumIncomingValues(); PI != PE; ++PI) {
894 // See if the incoming value for the common predecessor is equal to the
895 // one for BB, in which case this phi node will not prevent the merging
896 // of the block.
897 BasicBlock *IBB = PN->getIncomingBlock(PI);
898 if (BBPreds.count(IBB) &&
899 !CanMergeValues(Val, PN->getIncomingValue(PI))) {
900 LLVM_DEBUG(dbgs() << "Can't fold, phi node " << PN->getName()
901 << " in " << Succ->getName()
902 << " is conflicting with regard to common "
903 << "predecessor " << IBB->getName() << "\n");
904 return false;
905 }
906 }
907 }
908 }
909
910 return true;
911}
912
915
916/// Determines the value to use as the phi node input for a block.
917///
918/// Select between \p OldVal any value that we know flows from \p BB
919/// to a particular phi on the basis of which one (if either) is not
920/// undef. Update IncomingValues based on the selected value.
921///
922/// \param OldVal The value we are considering selecting.
923/// \param BB The block that the value flows in from.
924/// \param IncomingValues A map from block-to-value for other phi inputs
925/// that we have examined.
926///
927/// \returns the selected value.
929 IncomingValueMap &IncomingValues) {
930 IncomingValueMap::const_iterator It = IncomingValues.find(BB);
931 if (!isa<UndefValue>(OldVal)) {
932 assert((It != IncomingValues.end() &&
933 (!(It->second) || It->second == OldVal)) &&
934 "Expected OldVal to match incoming value from BB!");
935
936 IncomingValues.insert_or_assign(BB, OldVal);
937 return OldVal;
938 }
939
940 if (It != IncomingValues.end() && It->second)
941 return It->second;
942
943 return OldVal;
944}
945
946/// Create a map from block to value for the operands of a
947/// given phi.
948///
949/// This function initializes the map with UndefValue for all predecessors
950/// in BBPreds, and then updates the map with concrete non-undef values
951/// found in the PHI node.
952///
953/// \param PN The phi we are collecting the map for.
954/// \param BBPreds The list of all predecessor blocks to initialize with Undef.
955/// \param IncomingValues [out] The map from block to value for this phi.
957 const PredBlockVector &BBPreds,
958 IncomingValueMap &IncomingValues) {
959 for (BasicBlock *Pred : BBPreds)
960 IncomingValues[Pred] = nullptr;
961
962 for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) {
963 Value *V = PN->getIncomingValue(i);
964 if (isa<UndefValue>(V))
965 continue;
966
967 BasicBlock *BB = PN->getIncomingBlock(i);
968 auto It = IncomingValues.find(BB);
969 if (It != IncomingValues.end())
970 It->second = V;
971 }
972}
973
974/// Replace the incoming undef values to a phi with the values
975/// from a block-to-value map.
976///
977/// \param PN The phi we are replacing the undefs in.
978/// \param IncomingValues A map from block to value.
980 const IncomingValueMap &IncomingValues) {
981 SmallVector<unsigned> TrueUndefOps;
982 for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) {
983 Value *V = PN->getIncomingValue(i);
984
985 if (!isa<UndefValue>(V)) continue;
986
987 BasicBlock *BB = PN->getIncomingBlock(i);
988 IncomingValueMap::const_iterator It = IncomingValues.find(BB);
989 if (It == IncomingValues.end())
990 continue;
991
992 // Keep track of undef/poison incoming values. Those must match, so we fix
993 // them up below if needed.
994 // Note: this is conservatively correct, but we could try harder and group
995 // the undef values per incoming basic block.
996 if (!It->second) {
997 TrueUndefOps.push_back(i);
998 continue;
999 }
1000
1001 // There is a defined value for this incoming block, so map this undef
1002 // incoming value to the defined value.
1003 PN->setIncomingValue(i, It->second);
1004 }
1005
1006 // If there are both undef and poison values incoming, then convert those
1007 // values to undef. It is invalid to have different values for the same
1008 // incoming block.
1009 unsigned PoisonCount = count_if(TrueUndefOps, [&](unsigned i) {
1010 return isa<PoisonValue>(PN->getIncomingValue(i));
1011 });
1012 if (PoisonCount != 0 && PoisonCount != TrueUndefOps.size()) {
1013 for (unsigned i : TrueUndefOps)
1015 }
1016}
1017
1018// Only when they shares a single common predecessor, return true.
1019// Only handles cases when BB can't be merged while its predecessors can be
1020// redirected.
1021static bool
1023 const SmallPtrSetImpl<BasicBlock *> &BBPreds,
1024 BasicBlock *&CommonPred) {
1025
1026 // There must be phis in BB, otherwise BB will be merged into Succ directly
1027 if (BB->phis().empty() || Succ->phis().empty())
1028 return false;
1029
1030 // BB must have predecessors not shared that can be redirected to Succ
1031 if (!BB->hasNPredecessorsOrMore(2))
1032 return false;
1033
1034 if (any_of(BBPreds, [](const BasicBlock *Pred) {
1035 return isa<IndirectBrInst>(Pred->getTerminator());
1036 }))
1037 return false;
1038
1039 // Get the single common predecessor of both BB and Succ. Return false
1040 // when there are more than one common predecessors.
1041 for (BasicBlock *SuccPred : predecessors(Succ)) {
1042 if (BBPreds.count(SuccPred)) {
1043 if (CommonPred)
1044 return false;
1045 CommonPred = SuccPred;
1046 }
1047 }
1048
1049 return true;
1050}
1051
1052/// Check whether removing \p BB will make the phis in its \p Succ have too
1053/// many incoming entries. This function does not check whether \p BB is
1054/// foldable or not.
1056 // If BB only has one predecessor, then removing it will not introduce more
1057 // incoming edges for phis.
1058 if (BB->hasNPredecessors(1))
1059 return false;
1060 unsigned NumPreds = pred_size(BB);
1061 unsigned NumChangedPhi = 0;
1062 for (auto &Phi : Succ->phis()) {
1063 // If the incoming value is a phi and the phi is defined in BB,
1064 // then removing BB will not increase the total phi entries of the ir.
1065 if (auto *IncomingPhi = dyn_cast<PHINode>(Phi.getIncomingValueForBlock(BB)))
1066 if (IncomingPhi->getParent() == BB)
1067 continue;
1068 // Otherwise, we need to add entries to the phi
1069 NumChangedPhi++;
1070 }
1071 // For every phi that needs to be changed, (NumPreds - 1) new entries will be
1072 // added. If the total increase in phi entries exceeds
1073 // MaxPhiEntriesIncreaseAfterRemovingEmptyBlock, it will be considered as
1074 // introducing too many new phi entries.
1075 return (NumPreds - 1) * NumChangedPhi >
1077}
1078
1079/// Replace a value flowing from a block to a phi with
1080/// potentially multiple instances of that value flowing from the
1081/// block's predecessors to the phi.
1082///
1083/// \param BB The block with the value flowing into the phi.
1084/// \param BBPreds The predecessors of BB.
1085/// \param PN The phi that we are updating.
1086/// \param CommonPred The common predecessor of BB and PN's BasicBlock
1088 const PredBlockVector &BBPreds,
1089 PHINode *PN,
1090 BasicBlock *CommonPred) {
1091 Value *OldVal = PN->removeIncomingValue(BB, false);
1092 assert(OldVal && "No entry in PHI for Pred BB!");
1093
1094 // Map BBPreds to defined values or nullptr (representing undefined values).
1095 IncomingValueMap IncomingValues;
1096
1097 // We are merging two blocks - BB, and the block containing PN - and
1098 // as a result we need to redirect edges from the predecessors of BB
1099 // to go to the block containing PN, and update PN
1100 // accordingly. Since we allow merging blocks in the case where the
1101 // predecessor and successor blocks both share some predecessors,
1102 // and where some of those common predecessors might have undef
1103 // values flowing into PN, we want to rewrite those values to be
1104 // consistent with the non-undef values.
1105
1106 gatherIncomingValuesToPhi(PN, BBPreds, IncomingValues);
1107
1108 // If this incoming value is one of the PHI nodes in BB, the new entries
1109 // in the PHI node are the entries from the old PHI.
1110 if (isa<PHINode>(OldVal) && cast<PHINode>(OldVal)->getParent() == BB) {
1111 PHINode *OldValPN = cast<PHINode>(OldVal);
1112 for (unsigned i = 0, e = OldValPN->getNumIncomingValues(); i != e; ++i) {
1113 // Note that, since we are merging phi nodes and BB and Succ might
1114 // have common predecessors, we could end up with a phi node with
1115 // identical incoming branches. This will be cleaned up later (and
1116 // will trigger asserts if we try to clean it up now, without also
1117 // simplifying the corresponding conditional branch).
1118 BasicBlock *PredBB = OldValPN->getIncomingBlock(i);
1119
1120 if (PredBB == CommonPred)
1121 continue;
1122
1123 Value *PredVal = OldValPN->getIncomingValue(i);
1124 Value *Selected =
1125 selectIncomingValueForBlock(PredVal, PredBB, IncomingValues);
1126
1127 // And add a new incoming value for this predecessor for the
1128 // newly retargeted branch.
1129 PN->addIncoming(Selected, PredBB);
1130 }
1131 if (CommonPred)
1132 PN->addIncoming(OldValPN->getIncomingValueForBlock(CommonPred), BB);
1133
1134 } else {
1135 for (BasicBlock *PredBB : BBPreds) {
1136 // Update existing incoming values in PN for this
1137 // predecessor of BB.
1138 if (PredBB == CommonPred)
1139 continue;
1140
1141 Value *Selected =
1142 selectIncomingValueForBlock(OldVal, PredBB, IncomingValues);
1143
1144 // And add a new incoming value for this predecessor for the
1145 // newly retargeted branch.
1146 PN->addIncoming(Selected, PredBB);
1147 }
1148 if (CommonPred)
1149 PN->addIncoming(OldVal, BB);
1150 }
1151
1152 replaceUndefValuesInPhi(PN, IncomingValues);
1153}
1154
1156 DomTreeUpdater *DTU) {
1157 assert(BB != &BB->getParent()->getEntryBlock() &&
1158 "TryToSimplifyUncondBranchFromEmptyBlock called on entry block!");
1159
1160 // We can't simplify infinite loops.
1161 BasicBlock *Succ = cast<UncondBrInst>(BB->getTerminator())->getSuccessor(0);
1162 if (BB == Succ)
1163 return false;
1164
1166
1167 // The single common predecessor of BB and Succ when BB cannot be killed
1168 BasicBlock *CommonPred = nullptr;
1169
1170 bool BBKillable = CanPropagatePredecessorsForPHIs(BB, Succ, BBPreds);
1171
1172 // Even if we can not fold BB into Succ, we may be able to redirect the
1173 // predecessors of BB to Succ.
1174 bool BBPhisMergeable = BBKillable || CanRedirectPredsOfEmptyBBToSucc(
1175 BB, Succ, BBPreds, CommonPred);
1176
1177 if ((!BBKillable && !BBPhisMergeable) || introduceTooManyPhiEntries(BB, Succ))
1178 return false;
1179
1180 // Check to see if merging these blocks/phis would cause conflicts for any of
1181 // the phi nodes in BB or Succ. If not, we can safely merge.
1182
1183 // Check for cases where Succ has multiple predecessors and a PHI node in BB
1184 // has uses which will not disappear when the PHI nodes are merged. It is
1185 // possible to handle such cases, but difficult: it requires checking whether
1186 // BB dominates Succ, which is non-trivial to calculate in the case where
1187 // Succ has multiple predecessors. Also, it requires checking whether
1188 // constructing the necessary self-referential PHI node doesn't introduce any
1189 // conflicts; this isn't too difficult, but the previous code for doing this
1190 // was incorrect.
1191 //
1192 // Note that if this check finds a live use, BB dominates Succ, so BB is
1193 // something like a loop pre-header (or rarely, a part of an irreducible CFG);
1194 // folding the branch isn't profitable in that case anyway.
1195 if (!Succ->getSinglePredecessor()) {
1196 BasicBlock::iterator BBI = BB->begin();
1197 while (isa<PHINode>(*BBI)) {
1198 for (Use &U : BBI->uses()) {
1199 if (PHINode* PN = dyn_cast<PHINode>(U.getUser())) {
1200 if (PN->getIncomingBlock(U) != BB)
1201 return false;
1202 } else {
1203 return false;
1204 }
1205 }
1206 ++BBI;
1207 }
1208 }
1209
1210 if (BBPhisMergeable && CommonPred)
1211 LLVM_DEBUG(dbgs() << "Found Common Predecessor between: " << BB->getName()
1212 << " and " << Succ->getName() << " : "
1213 << CommonPred->getName() << "\n");
1214
1215 // 'BB' and 'BB->Pred' are loop latches, bail out to presrve inner loop
1216 // metadata.
1217 //
1218 // FIXME: This is a stop-gap solution to preserve inner-loop metadata given
1219 // current status (that loop metadata is implemented as metadata attached to
1220 // the branch instruction in the loop latch block). To quote from review
1221 // comments, "the current representation of loop metadata (using a loop latch
1222 // terminator attachment) is known to be fundamentally broken. Loop latches
1223 // are not uniquely associated with loops (both in that a latch can be part of
1224 // multiple loops and a loop may have multiple latches). Loop headers are. The
1225 // solution to this problem is also known: Add support for basic block
1226 // metadata, and attach loop metadata to the loop header."
1227 //
1228 // Why bail out:
1229 // In this case, we expect 'BB' is the latch for outer-loop and 'BB->Pred' is
1230 // the latch for inner-loop (see reason below), so bail out to prerserve
1231 // inner-loop metadata rather than eliminating 'BB' and attaching its metadata
1232 // to this inner-loop.
1233 // - The reason we believe 'BB' and 'BB->Pred' have different inner-most
1234 // loops: assuming 'BB' and 'BB->Pred' are from the same inner-most loop L,
1235 // then 'BB' is the header and latch of 'L' and thereby 'L' must consist of
1236 // one self-looping basic block, which is contradictory with the assumption.
1237 //
1238 // To illustrate how inner-loop metadata is dropped:
1239 //
1240 // CFG Before
1241 //
1242 // BB is while.cond.exit, attached with loop metdata md2.
1243 // BB->Pred is for.body, attached with loop metadata md1.
1244 //
1245 // entry
1246 // |
1247 // v
1248 // ---> while.cond -------------> while.end
1249 // | |
1250 // | v
1251 // | while.body
1252 // | |
1253 // | v
1254 // | for.body <---- (md1)
1255 // | | |______|
1256 // | v
1257 // | while.cond.exit (md2)
1258 // | |
1259 // |_______|
1260 //
1261 // CFG After
1262 //
1263 // while.cond1 is the merge of while.cond.exit and while.cond above.
1264 // for.body is attached with md2, and md1 is dropped.
1265 // If LoopSimplify runs later (as a part of loop pass), it could create
1266 // dedicated exits for inner-loop (essentially adding `while.cond.exit`
1267 // back), but won't it won't see 'md1' nor restore it for the inner-loop.
1268 //
1269 // entry
1270 // |
1271 // v
1272 // ---> while.cond1 -------------> while.end
1273 // | |
1274 // | v
1275 // | while.body
1276 // | |
1277 // | v
1278 // | for.body <---- (md2)
1279 // |_______| |______|
1280 if (Instruction *TI = BB->getTerminator())
1281 if (TI->hasNonDebugLocLoopMetadata())
1282 for (BasicBlock *Pred : predecessors(BB))
1283 if (Instruction *PredTI = Pred->getTerminator())
1284 if (PredTI->hasNonDebugLocLoopMetadata())
1285 return false;
1286
1287 if (BBKillable)
1288 LLVM_DEBUG(dbgs() << "Killing Trivial BB: \n" << *BB);
1289 else if (BBPhisMergeable)
1290 LLVM_DEBUG(dbgs() << "Merge Phis in Trivial BB: \n" << *BB);
1291
1293
1294 if (DTU) {
1295 // To avoid processing the same predecessor more than once.
1297 // All predecessors of BB (except the common predecessor) will be moved to
1298 // Succ.
1299 Updates.reserve(Updates.size() + 2 * pred_size(BB) + 1);
1301 predecessors(Succ));
1302 for (auto *PredOfBB : predecessors(BB)) {
1303 // Do not modify those common predecessors of BB and Succ
1304 if (!SuccPreds.contains(PredOfBB))
1305 if (SeenPreds.insert(PredOfBB).second)
1306 Updates.push_back({DominatorTree::Insert, PredOfBB, Succ});
1307 }
1308
1309 SeenPreds.clear();
1310
1311 for (auto *PredOfBB : predecessors(BB))
1312 // When BB cannot be killed, do not remove the edge between BB and
1313 // CommonPred.
1314 if (SeenPreds.insert(PredOfBB).second && PredOfBB != CommonPred)
1315 Updates.push_back({DominatorTree::Delete, PredOfBB, BB});
1316
1317 if (BBKillable)
1318 Updates.push_back({DominatorTree::Delete, BB, Succ});
1319 }
1320
1321 if (isa<PHINode>(Succ->begin())) {
1322 // If there is more than one pred of succ, and there are PHI nodes in
1323 // the successor, then we need to add incoming edges for the PHI nodes
1324 //
1325 const PredBlockVector BBPreds(predecessors(BB));
1326
1327 // Loop over all of the PHI nodes in the successor of BB.
1328 for (BasicBlock::iterator I = Succ->begin(); isa<PHINode>(I); ++I) {
1329 PHINode *PN = cast<PHINode>(I);
1330 redirectValuesFromPredecessorsToPhi(BB, BBPreds, PN, CommonPred);
1331 }
1332 }
1333
1334 if (Succ->getSinglePredecessor()) {
1335 // BB is the only predecessor of Succ, so Succ will end up with exactly
1336 // the same predecessors BB had.
1337 // Copy over any phi, debug or lifetime instruction.
1339 Succ->splice(Succ->getFirstNonPHIIt(), BB);
1340 } else {
1341 while (PHINode *PN = dyn_cast<PHINode>(&BB->front())) {
1342 // We explicitly check for such uses for merging phis.
1343 assert(PN->use_empty() && "There shouldn't be any uses here!");
1344 PN->eraseFromParent();
1345 }
1346 }
1347
1348 // If the unconditional branch we replaced contains non-debug llvm.loop
1349 // metadata, we add the metadata to the branch instructions in the
1350 // predecessors.
1351 if (Instruction *TI = BB->getTerminator())
1352 if (TI->hasNonDebugLocLoopMetadata()) {
1353 MDNode *LoopMD = TI->getMetadata(LLVMContext::MD_loop);
1354 for (BasicBlock *Pred : predecessors(BB))
1355 Pred->getTerminator()->setMetadata(LLVMContext::MD_loop, LoopMD);
1356 }
1357
1358 if (BBKillable) {
1359 // Everything that jumped to BB now goes to Succ.
1360 BB->replaceAllUsesWith(Succ);
1361
1362 if (!Succ->hasName())
1363 Succ->takeName(BB);
1364
1365 // Clear the successor list of BB to match updates applying to DTU later.
1366 if (BB->getTerminator())
1367 BB->back().eraseFromParent();
1368
1369 new UnreachableInst(BB->getContext(), BB);
1370 assert(succ_empty(BB) && "The successor list of BB isn't empty before "
1371 "applying corresponding DTU updates.");
1372 } else if (BBPhisMergeable) {
1373 // Everything except CommonPred that jumped to BB now goes to Succ.
1374 BB->replaceUsesWithIf(Succ, [BBPreds, CommonPred](Use &U) -> bool {
1375 if (Instruction *UseInst = dyn_cast<Instruction>(U.getUser()))
1376 return UseInst->getParent() != CommonPred &&
1377 BBPreds.contains(UseInst->getParent());
1378 return false;
1379 });
1380 }
1381
1382 if (DTU)
1383 DTU->applyUpdates(Updates);
1384
1385 if (BBKillable)
1386 DeleteDeadBlock(BB, DTU);
1387
1388 return true;
1389}
1390
1391static bool
1394 // This implementation doesn't currently consider undef operands
1395 // specially. Theoretically, two phis which are identical except for
1396 // one having an undef where the other doesn't could be collapsed.
1397
1398 bool Changed = false;
1399
1400 // Examine each PHI.
1401 // Note that increment of I must *NOT* be in the iteration_expression, since
1402 // we don't want to immediately advance when we restart from the beginning.
1403 for (auto I = BB->begin(); PHINode *PN = dyn_cast<PHINode>(I);) {
1404 ++I;
1405 // Is there an identical PHI node in this basic block?
1406 // Note that we only look in the upper square's triangle,
1407 // we already checked that the lower triangle PHI's aren't identical.
1408 for (auto J = I; PHINode *DuplicatePN = dyn_cast<PHINode>(J); ++J) {
1409 if (ToRemove.contains(DuplicatePN))
1410 continue;
1411 if (!DuplicatePN->isIdenticalToWhenDefined(PN))
1412 continue;
1413 // A duplicate. Replace this PHI with the base PHI.
1414 ++NumPHICSEs;
1415 DuplicatePN->replaceAllUsesWith(PN);
1416 ToRemove.insert(DuplicatePN);
1417 Changed = true;
1418
1419 // The RAUW can change PHIs that we already visited.
1420 I = BB->begin();
1421 break; // Start over from the beginning.
1422 }
1423 }
1424 return Changed;
1425}
1426
1427static bool
1430 // This implementation doesn't currently consider undef operands
1431 // specially. Theoretically, two phis which are identical except for
1432 // one having an undef where the other doesn't could be collapsed.
1433
1434 struct PHIDenseMapInfo {
1435 static PHINode *getEmptyKey() {
1437 }
1438
1439 static PHINode *getTombstoneKey() {
1441 }
1442
1443 static bool isSentinel(PHINode *PN) {
1444 return PN == getEmptyKey() || PN == getTombstoneKey();
1445 }
1446
1447 // WARNING: this logic must be kept in sync with
1448 // Instruction::isIdenticalToWhenDefined()!
1449 static unsigned getHashValueImpl(PHINode *PN) {
1450 // Compute a hash value on the operands. Instcombine will likely have
1451 // sorted them, which helps expose duplicates, but we have to check all
1452 // the operands to be safe in case instcombine hasn't run.
1453 return static_cast<unsigned>(
1455 hash_combine_range(PN->blocks())));
1456 }
1457
1458 static unsigned getHashValue(PHINode *PN) {
1459#ifndef NDEBUG
1460 // If -phicse-debug-hash was specified, return a constant -- this
1461 // will force all hashing to collide, so we'll exhaustively search
1462 // the table for a match, and the assertion in isEqual will fire if
1463 // there's a bug causing equal keys to hash differently.
1464 if (PHICSEDebugHash)
1465 return 0;
1466#endif
1467 return getHashValueImpl(PN);
1468 }
1469
1470 static bool isEqualImpl(PHINode *LHS, PHINode *RHS) {
1471 if (isSentinel(LHS) || isSentinel(RHS))
1472 return LHS == RHS;
1473 return LHS->isIdenticalTo(RHS);
1474 }
1475
1476 static bool isEqual(PHINode *LHS, PHINode *RHS) {
1477 // These comparisons are nontrivial, so assert that equality implies
1478 // hash equality (DenseMap demands this as an invariant).
1479 bool Result = isEqualImpl(LHS, RHS);
1480 assert(!Result || (isSentinel(LHS) && LHS == RHS) ||
1482 return Result;
1483 }
1484 };
1485
1486 // Set of unique PHINodes.
1488 PHISet.reserve(4 * PHICSENumPHISmallSize);
1489
1490 // Examine each PHI.
1491 bool Changed = false;
1492 for (auto I = BB->begin(); PHINode *PN = dyn_cast<PHINode>(I++);) {
1493 if (ToRemove.contains(PN))
1494 continue;
1495 auto Inserted = PHISet.insert(PN);
1496 if (!Inserted.second) {
1497 // A duplicate. Replace this PHI with its duplicate.
1498 ++NumPHICSEs;
1499 PN->replaceAllUsesWith(*Inserted.first);
1500 ToRemove.insert(PN);
1501 Changed = true;
1502
1503 // The RAUW can change PHIs that we already visited. Start over from the
1504 // beginning.
1505 PHISet.clear();
1506 I = BB->begin();
1507 }
1508 }
1509
1510 return Changed;
1511}
1512
1523
1527 for (PHINode *PN : ToRemove)
1528 PN->eraseFromParent();
1529 return Changed;
1530}
1531
1533 const DataLayout &DL) {
1534 V = V->stripPointerCasts();
1535
1536 if (AllocaInst *AI = dyn_cast<AllocaInst>(V)) {
1537 // TODO: Ideally, this function would not be called if PrefAlign is smaller
1538 // than the current alignment, as the known bits calculation should have
1539 // already taken it into account. However, this is not always the case,
1540 // as computeKnownBits() has a depth limit, while stripPointerCasts()
1541 // doesn't.
1542 Align CurrentAlign = AI->getAlign();
1543 if (PrefAlign <= CurrentAlign)
1544 return CurrentAlign;
1545
1546 // If the preferred alignment is greater than the natural stack alignment
1547 // then don't round up. This avoids dynamic stack realignment.
1548 MaybeAlign StackAlign = DL.getStackAlignment();
1549 if (StackAlign && PrefAlign > *StackAlign)
1550 return CurrentAlign;
1551 AI->setAlignment(PrefAlign);
1552 return PrefAlign;
1553 }
1554
1555 if (auto *GV = dyn_cast<GlobalVariable>(V)) {
1556 // TODO: as above, this shouldn't be necessary.
1557 Align CurrentAlign = GV->getPointerAlignment(DL);
1558 if (PrefAlign <= CurrentAlign)
1559 return CurrentAlign;
1560
1561 // If there is a large requested alignment and we can, bump up the alignment
1562 // of the global. If the memory we set aside for the global may not be the
1563 // memory used by the final program then it is impossible for us to reliably
1564 // enforce the preferred alignment.
1565 if (!GV->canIncreaseAlignment())
1566 return CurrentAlign;
1567
1568 if (GV->isThreadLocal()) {
1569 unsigned MaxTLSAlign = GV->getParent()->getMaxTLSAlignment() / CHAR_BIT;
1570 if (MaxTLSAlign && PrefAlign > Align(MaxTLSAlign))
1571 PrefAlign = Align(MaxTLSAlign);
1572 }
1573
1574 GV->setAlignment(PrefAlign);
1575 return PrefAlign;
1576 }
1577
1578 return Align(1);
1579}
1580
1582 const DataLayout &DL,
1583 const Instruction *CxtI,
1584 AssumptionCache *AC,
1585 const DominatorTree *DT) {
1586 assert(V->getType()->isPointerTy() &&
1587 "getOrEnforceKnownAlignment expects a pointer!");
1588
1589 KnownBits Known = computeKnownBits(V, DL, AC, CxtI, DT);
1590 unsigned TrailZ = Known.countMinTrailingZeros();
1591
1592 // Avoid trouble with ridiculously large TrailZ values, such as
1593 // those computed from a null pointer.
1594 // LLVM doesn't support alignments larger than (1 << MaxAlignmentExponent).
1595 TrailZ = std::min(TrailZ, +Value::MaxAlignmentExponent);
1596
1597 Align Alignment = Align(1ull << std::min(Known.getBitWidth() - 1, TrailZ));
1598
1599 if (PrefAlign && *PrefAlign > Alignment)
1600 Alignment = std::max(Alignment, tryEnforceAlignment(V, *PrefAlign, DL));
1601
1602 // We don't need to make any adjustment.
1603 return Alignment;
1604}
1605
1606///===---------------------------------------------------------------------===//
1607/// Dbg Intrinsic utilities
1608///
1609
1610/// See if there is a dbg.value intrinsic for DIVar for the PHI node.
1612 DIExpression *DIExpr,
1613 PHINode *APN) {
1614 // Since we can't guarantee that the original dbg.declare intrinsic
1615 // is removed by LowerDbgDeclare(), we need to make sure that we are
1616 // not inserting the same dbg.value intrinsic over and over.
1617 SmallVector<DbgVariableRecord *, 1> DbgVariableRecords;
1618 findDbgValues(APN, DbgVariableRecords);
1619 for (DbgVariableRecord *DVR : DbgVariableRecords) {
1620 assert(is_contained(DVR->location_ops(), APN));
1621 if ((DVR->getVariable() == DIVar) && (DVR->getExpression() == DIExpr))
1622 return true;
1623 }
1624 return false;
1625}
1626
1627/// Check if the alloc size of \p ValTy is large enough to cover the variable
1628/// (or fragment of the variable) described by \p DII.
1629///
1630/// This is primarily intended as a helper for the different
1631/// ConvertDebugDeclareToDebugValue functions. The dbg.declare that is converted
1632/// describes an alloca'd variable, so we need to use the alloc size of the
1633/// value when doing the comparison. E.g. an i1 value will be identified as
1634/// covering an n-bit fragment, if the store size of i1 is at least n bits.
1636 const DataLayout &DL = DVR->getModule()->getDataLayout();
1637 TypeSize ValueSize = DL.getTypeAllocSizeInBits(ValTy);
1638 if (std::optional<uint64_t> FragmentSize =
1639 DVR->getExpression()->getActiveBits(DVR->getVariable()))
1640 return TypeSize::isKnownGE(ValueSize, TypeSize::getFixed(*FragmentSize));
1641
1642 // We can't always calculate the size of the DI variable (e.g. if it is a
1643 // VLA). Try to use the size of the alloca that the dbg intrinsic describes
1644 // instead.
1645 if (DVR->isAddressOfVariable()) {
1646 // DVR should have exactly 1 location when it is an address.
1647 assert(DVR->getNumVariableLocationOps() == 1 &&
1648 "address of variable must have exactly 1 location operand.");
1649 if (auto *AI =
1651 if (std::optional<TypeSize> FragmentSize = AI->getAllocationSizeInBits(DL)) {
1652 return TypeSize::isKnownGE(ValueSize, *FragmentSize);
1653 }
1654 }
1655 }
1656 // Could not determine size of variable. Conservatively return false.
1657 return false;
1658}
1659
1661 DILocalVariable *DIVar,
1662 DIExpression *DIExpr,
1663 const DebugLoc &NewLoc,
1664 BasicBlock::iterator Instr) {
1666 DbgVariableRecord *DVRec =
1667 new DbgVariableRecord(DVAM, DIVar, DIExpr, NewLoc.get());
1668 Instr->getParent()->insertDbgRecordBefore(DVRec, Instr);
1669}
1670
1672 int NumEltDropped = DIExpr->getElements()[0] == dwarf::DW_OP_LLVM_arg ? 3 : 1;
1673 return DIExpression::get(DIExpr->getContext(),
1674 DIExpr->getElements().drop_front(NumEltDropped));
1675}
1676
1678 StoreInst *SI, DIBuilder &Builder) {
1679 assert(DVR->isAddressOfVariable() || DVR->isDbgAssign());
1680 auto *DIVar = DVR->getVariable();
1681 assert(DIVar && "Missing variable");
1682 auto *DIExpr = DVR->getExpression();
1683 Value *DV = SI->getValueOperand();
1684
1685 DebugLoc NewLoc = getDebugValueLoc(DVR);
1686
1687 // If the alloca describes the variable itself, i.e. the expression in the
1688 // dbg.declare doesn't start with a dereference, we can perform the
1689 // conversion if the value covers the entire fragment of DII.
1690 // If the alloca describes the *address* of DIVar, i.e. DIExpr is
1691 // *just* a DW_OP_deref, we use DV as is for the dbg.value.
1692 // We conservatively ignore other dereferences, because the following two are
1693 // not equivalent:
1694 // dbg.declare(alloca, ..., !Expr(deref, plus_uconstant, 2))
1695 // dbg.value(DV, ..., !Expr(deref, plus_uconstant, 2))
1696 // The former is adding 2 to the address of the variable, whereas the latter
1697 // is adding 2 to the value of the variable. As such, we insist on just a
1698 // deref expression.
1699 bool CanConvert =
1700 DIExpr->isDeref() || (!DIExpr->startsWithDeref() &&
1702 if (CanConvert) {
1703 insertDbgValueOrDbgVariableRecord(Builder, DV, DIVar, DIExpr, NewLoc,
1704 SI->getIterator());
1705 return;
1706 }
1707
1708 // FIXME: If storing to a part of the variable described by the dbg.declare,
1709 // then we want to insert a dbg.value for the corresponding fragment.
1710 LLVM_DEBUG(dbgs() << "Failed to convert dbg.declare to dbg.value: " << *DVR
1711 << '\n');
1712
1713 // For now, when there is a store to parts of the variable (but we do not
1714 // know which part) we insert an dbg.value intrinsic to indicate that we
1715 // know nothing about the variable's content.
1716 DV = PoisonValue::get(DV->getType());
1718 DbgVariableRecord *NewDVR =
1719 new DbgVariableRecord(DVAM, DIVar, DIExpr, NewLoc.get());
1720 SI->getParent()->insertDbgRecordBefore(NewDVR, SI->getIterator());
1721}
1722
1724 DIBuilder &Builder) {
1725 auto *DIVar = DVR->getVariable();
1726 assert(DIVar && "Missing variable");
1727 auto *DIExpr = DVR->getExpression();
1728 DIExpr = dropInitialDeref(DIExpr);
1729 Value *DV = SI->getValueOperand();
1730
1731 DebugLoc NewLoc = getDebugValueLoc(DVR);
1732
1733 insertDbgValueOrDbgVariableRecord(Builder, DV, DIVar, DIExpr, NewLoc,
1734 SI->getIterator());
1735}
1736
1738 DIBuilder &Builder) {
1739 auto *DIVar = DVR->getVariable();
1740 auto *DIExpr = DVR->getExpression();
1741 assert(DIVar && "Missing variable");
1742
1743 if (!valueCoversEntireFragment(LI->getType(), DVR)) {
1744 // FIXME: If only referring to a part of the variable described by the
1745 // dbg.declare, then we want to insert a DbgVariableRecord for the
1746 // corresponding fragment.
1747 LLVM_DEBUG(dbgs() << "Failed to convert dbg.declare to DbgVariableRecord: "
1748 << *DVR << '\n');
1749 return;
1750 }
1751
1752 DebugLoc NewLoc = getDebugValueLoc(DVR);
1753
1754 // We are now tracking the loaded value instead of the address. In the
1755 // future if multi-location support is added to the IR, it might be
1756 // preferable to keep tracking both the loaded value and the original
1757 // address in case the alloca can not be elided.
1758
1759 // Create a DbgVariableRecord directly and insert.
1761 DbgVariableRecord *DV =
1762 new DbgVariableRecord(LIVAM, DIVar, DIExpr, NewLoc.get());
1763 LI->getParent()->insertDbgRecordAfter(DV, LI);
1764}
1765
1766/// Determine whether this alloca is either a VLA or an array.
1767static bool isArray(AllocaInst *AI) {
1768 return AI->isArrayAllocation() ||
1769 (AI->getAllocatedType() && AI->getAllocatedType()->isArrayTy());
1770}
1771
1772/// Determine whether this alloca is a structure.
1773static bool isStructure(AllocaInst *AI) {
1774 return AI->getAllocatedType() && AI->getAllocatedType()->isStructTy();
1775}
1777 DIBuilder &Builder) {
1778 auto *DIVar = DVR->getVariable();
1779 auto *DIExpr = DVR->getExpression();
1780 assert(DIVar && "Missing variable");
1781
1782 if (PhiHasDebugValue(DIVar, DIExpr, APN))
1783 return;
1784
1785 if (!valueCoversEntireFragment(APN->getType(), DVR)) {
1786 // FIXME: If only referring to a part of the variable described by the
1787 // dbg.declare, then we want to insert a DbgVariableRecord for the
1788 // corresponding fragment.
1789 LLVM_DEBUG(dbgs() << "Failed to convert dbg.declare to DbgVariableRecord: "
1790 << *DVR << '\n');
1791 return;
1792 }
1793
1794 BasicBlock *BB = APN->getParent();
1795 auto InsertionPt = BB->getFirstInsertionPt();
1796
1797 DebugLoc NewLoc = getDebugValueLoc(DVR);
1798
1799 // The block may be a catchswitch block, which does not have a valid
1800 // insertion point.
1801 // FIXME: Insert DbgVariableRecord markers in the successors when appropriate.
1802 if (InsertionPt != BB->end()) {
1803 insertDbgValueOrDbgVariableRecord(Builder, APN, DIVar, DIExpr, NewLoc,
1804 InsertionPt);
1805 }
1806}
1807
1808/// LowerDbgDeclare - Lowers llvm.dbg.declare intrinsics into appropriate set
1809/// of llvm.dbg.value intrinsics.
1811 bool Changed = false;
1812 DIBuilder DIB(*F.getParent(), /*AllowUnresolved*/ false);
1815 for (auto &FI : F) {
1816 for (Instruction &BI : FI) {
1817 if (auto *DDI = dyn_cast<DbgDeclareInst>(&BI))
1818 Dbgs.push_back(DDI);
1819 for (DbgVariableRecord &DVR : filterDbgVars(BI.getDbgRecordRange())) {
1820 if (DVR.getType() == DbgVariableRecord::LocationType::Declare)
1821 DVRs.push_back(&DVR);
1822 }
1823 }
1824 }
1825
1826 if (Dbgs.empty() && DVRs.empty())
1827 return Changed;
1828
1829 auto LowerOne = [&](DbgVariableRecord *DDI) {
1830 AllocaInst *AI =
1831 dyn_cast_or_null<AllocaInst>(DDI->getVariableLocationOp(0));
1832 // If this is an alloca for a scalar variable, insert a dbg.value
1833 // at each load and store to the alloca and erase the dbg.declare.
1834 // The dbg.values allow tracking a variable even if it is not
1835 // stored on the stack, while the dbg.declare can only describe
1836 // the stack slot (and at a lexical-scope granularity). Later
1837 // passes will attempt to elide the stack slot.
1838 if (!AI || isArray(AI) || isStructure(AI))
1839 return;
1840
1841 // A volatile load/store means that the alloca can't be elided anyway.
1842 if (llvm::any_of(AI->users(), [](User *U) -> bool {
1843 if (LoadInst *LI = dyn_cast<LoadInst>(U))
1844 return LI->isVolatile();
1845 if (StoreInst *SI = dyn_cast<StoreInst>(U))
1846 return SI->isVolatile();
1847 return false;
1848 }))
1849 return;
1850
1852 WorkList.push_back(AI);
1853 while (!WorkList.empty()) {
1854 const Value *V = WorkList.pop_back_val();
1855 for (const auto &AIUse : V->uses()) {
1856 User *U = AIUse.getUser();
1857 if (StoreInst *SI = dyn_cast<StoreInst>(U)) {
1858 if (AIUse.getOperandNo() == 1)
1860 } else if (LoadInst *LI = dyn_cast<LoadInst>(U)) {
1861 ConvertDebugDeclareToDebugValue(DDI, LI, DIB);
1862 } else if (CallInst *CI = dyn_cast<CallInst>(U)) {
1863 // This is a call by-value or some other instruction that takes a
1864 // pointer to the variable. Insert a *value* intrinsic that describes
1865 // the variable by dereferencing the alloca.
1866 if (!CI->isLifetimeStartOrEnd()) {
1867 DebugLoc NewLoc = getDebugValueLoc(DDI);
1868 auto *DerefExpr =
1869 DIExpression::append(DDI->getExpression(), dwarf::DW_OP_deref);
1870 insertDbgValueOrDbgVariableRecord(DIB, AI, DDI->getVariable(),
1871 DerefExpr, NewLoc,
1872 CI->getIterator());
1873 }
1874 } else if (BitCastInst *BI = dyn_cast<BitCastInst>(U)) {
1875 if (BI->getType()->isPointerTy())
1876 WorkList.push_back(BI);
1877 }
1878 }
1879 }
1880 DDI->eraseFromParent();
1881 Changed = true;
1882 };
1883
1884 for_each(DVRs, LowerOne);
1885
1886 if (Changed)
1887 for (BasicBlock &BB : F)
1889
1890 return Changed;
1891}
1892
1893/// Propagate dbg.value records through the newly inserted PHIs.
1895 SmallVectorImpl<PHINode *> &InsertedPHIs) {
1896 assert(BB && "No BasicBlock to clone DbgVariableRecord(s) from.");
1897 if (InsertedPHIs.size() == 0)
1898 return;
1899
1900 // Map existing PHI nodes to their DbgVariableRecords.
1902 for (auto &I : *BB) {
1903 for (DbgVariableRecord &DVR : filterDbgVars(I.getDbgRecordRange())) {
1904 for (Value *V : DVR.location_ops())
1905 if (auto *Loc = dyn_cast_or_null<PHINode>(V))
1906 DbgValueMap.insert({Loc, &DVR});
1907 }
1908 }
1909 if (DbgValueMap.size() == 0)
1910 return;
1911
1912 // Map a pair of the destination BB and old DbgVariableRecord to the new
1913 // DbgVariableRecord, so that if a DbgVariableRecord is being rewritten to use
1914 // more than one of the inserted PHIs in the same destination BB, we can
1915 // update the same DbgVariableRecord with all the new PHIs instead of creating
1916 // one copy for each.
1918 NewDbgValueMap;
1919 // Then iterate through the new PHIs and look to see if they use one of the
1920 // previously mapped PHIs. If so, create a new DbgVariableRecord that will
1921 // propagate the info through the new PHI. If we use more than one new PHI in
1922 // a single destination BB with the same old dbg.value, merge the updates so
1923 // that we get a single new DbgVariableRecord with all the new PHIs.
1924 for (auto PHI : InsertedPHIs) {
1925 BasicBlock *Parent = PHI->getParent();
1926 // Avoid inserting a debug-info record into an EH block.
1927 if (Parent->getFirstNonPHIIt()->isEHPad())
1928 continue;
1929 for (auto VI : PHI->operand_values()) {
1930 auto V = DbgValueMap.find(VI);
1931 if (V != DbgValueMap.end()) {
1932 DbgVariableRecord *DbgII = cast<DbgVariableRecord>(V->second);
1933 auto NewDI = NewDbgValueMap.find({Parent, DbgII});
1934 if (NewDI == NewDbgValueMap.end()) {
1935 DbgVariableRecord *NewDbgII = DbgII->clone();
1936 NewDI = NewDbgValueMap.insert({{Parent, DbgII}, NewDbgII}).first;
1937 }
1938 DbgVariableRecord *NewDbgII = NewDI->second;
1939 // If PHI contains VI as an operand more than once, we may
1940 // replaced it in NewDbgII; confirm that it is present.
1941 if (is_contained(NewDbgII->location_ops(), VI))
1942 NewDbgII->replaceVariableLocationOp(VI, PHI);
1943 }
1944 }
1945 }
1946 // Insert the new DbgVariableRecords into their destination blocks.
1947 for (auto DI : NewDbgValueMap) {
1948 BasicBlock *Parent = DI.first.first;
1949 DbgVariableRecord *NewDbgII = DI.second;
1950 auto InsertionPt = Parent->getFirstInsertionPt();
1951 assert(InsertionPt != Parent->end() && "Ill-formed basic block");
1952
1953 Parent->insertDbgRecordBefore(NewDbgII, InsertionPt);
1954 }
1955}
1956
1958 DIBuilder &Builder, uint8_t DIExprFlags,
1959 int Offset) {
1961
1962 auto ReplaceOne = [&](DbgVariableRecord *DII) {
1963 assert(DII->getVariable() && "Missing variable");
1964 auto *DIExpr = DII->getExpression();
1965 DIExpr = DIExpression::prepend(DIExpr, DIExprFlags, Offset);
1966 DII->setExpression(DIExpr);
1967 DII->replaceVariableLocationOp(Address, NewAddress);
1968 };
1969
1970 for_each(DVRDeclares, ReplaceOne);
1971
1972 return !DVRDeclares.empty();
1973}
1974
1976 DILocalVariable *DIVar,
1977 DIExpression *DIExpr, Value *NewAddress,
1978 DbgVariableRecord *DVR,
1979 DIBuilder &Builder, int Offset) {
1980 assert(DIVar && "Missing variable");
1981
1982 // This is an alloca-based dbg.value/DbgVariableRecord. The first thing it
1983 // should do with the alloca pointer is dereference it. Otherwise we don't
1984 // know how to handle it and give up.
1985 if (!DIExpr || DIExpr->getNumElements() < 1 ||
1986 DIExpr->getElement(0) != dwarf::DW_OP_deref)
1987 return;
1988
1989 // Insert the offset before the first deref.
1990 if (Offset)
1991 DIExpr = DIExpression::prepend(DIExpr, 0, Offset);
1992
1993 DVR->setExpression(DIExpr);
1994 DVR->replaceVariableLocationOp(0u, NewAddress);
1995}
1996
1998 DIBuilder &Builder, int Offset) {
2000 findDbgValues(AI, DPUsers);
2001
2002 // Replace any DbgVariableRecords that use this alloca.
2003 for (DbgVariableRecord *DVR : DPUsers)
2004 updateOneDbgValueForAlloca(DVR->getDebugLoc(), DVR->getVariable(),
2005 DVR->getExpression(), NewAllocaAddress, DVR,
2006 Builder, Offset);
2007}
2008
2009/// Where possible to salvage debug information for \p I do so.
2010/// If not possible mark undef.
2016
2017template <typename T> static void salvageDbgAssignAddress(T *Assign) {
2018 Instruction *I = dyn_cast<Instruction>(Assign->getAddress());
2019 // Only instructions can be salvaged at the moment.
2020 if (!I)
2021 return;
2022
2023 assert(!Assign->getAddressExpression()->getFragmentInfo().has_value() &&
2024 "address-expression shouldn't have fragment info");
2025
2026 // The address component of a dbg.assign cannot be variadic.
2027 uint64_t CurrentLocOps = 0;
2028 SmallVector<Value *, 4> AdditionalValues;
2030 Value *NewV = salvageDebugInfoImpl(*I, CurrentLocOps, Ops, AdditionalValues);
2031
2032 // Check if the salvage failed.
2033 if (!NewV)
2034 return;
2035
2037 Assign->getAddressExpression(), Ops, 0, /*StackValue=*/false);
2038 assert(!SalvagedExpr->getFragmentInfo().has_value() &&
2039 "address-expression shouldn't have fragment info");
2040
2041 SalvagedExpr = SalvagedExpr->foldConstantMath();
2042
2043 // Salvage succeeds if no additional values are required.
2044 if (AdditionalValues.empty()) {
2045 Assign->setAddress(NewV);
2046 Assign->setAddressExpression(SalvagedExpr);
2047 } else {
2048 Assign->setKillAddress();
2049 }
2050}
2051
2054 // These are arbitrary chosen limits on the maximum number of values and the
2055 // maximum size of a debug expression we can salvage up to, used for
2056 // performance reasons.
2057 const unsigned MaxDebugArgs = 16;
2058 const unsigned MaxExpressionSize = 128;
2059 bool Salvaged = false;
2060
2061 for (auto *DVR : DPUsers) {
2062 if (DVR->isDbgAssign()) {
2063 if (DVR->getAddress() == &I) {
2065 Salvaged = true;
2066 }
2067 if (DVR->getValue() != &I)
2068 continue;
2069 }
2070
2071 // Do not add DW_OP_stack_value for DbgDeclare and DbgAddr, because they
2072 // are implicitly pointing out the value as a DWARF memory location
2073 // description.
2074 bool StackValue =
2076 auto DVRLocation = DVR->location_ops();
2077 assert(
2078 is_contained(DVRLocation, &I) &&
2079 "DbgVariableIntrinsic must use salvaged instruction as its location");
2080 SmallVector<Value *, 4> AdditionalValues;
2081 // 'I' may appear more than once in DVR's location ops, and each use of 'I'
2082 // must be updated in the DIExpression and potentially have additional
2083 // values added; thus we call salvageDebugInfoImpl for each 'I' instance in
2084 // DVRLocation.
2085 Value *Op0 = nullptr;
2086 DIExpression *SalvagedExpr = DVR->getExpression();
2087 auto LocItr = find(DVRLocation, &I);
2088 while (SalvagedExpr && LocItr != DVRLocation.end()) {
2090 unsigned LocNo = std::distance(DVRLocation.begin(), LocItr);
2091 uint64_t CurrentLocOps = SalvagedExpr->getNumLocationOperands();
2092 Op0 = salvageDebugInfoImpl(I, CurrentLocOps, Ops, AdditionalValues);
2093 if (!Op0)
2094 break;
2095 SalvagedExpr =
2096 DIExpression::appendOpsToArg(SalvagedExpr, Ops, LocNo, StackValue);
2097 LocItr = std::find(++LocItr, DVRLocation.end(), &I);
2098 }
2099 // salvageDebugInfoImpl should fail on examining the first element of
2100 // DbgUsers, or none of them.
2101 if (!Op0)
2102 break;
2103
2104 SalvagedExpr = SalvagedExpr->foldConstantMath();
2105 DVR->replaceVariableLocationOp(&I, Op0);
2106 bool IsValidSalvageExpr =
2107 SalvagedExpr->getNumElements() <= MaxExpressionSize;
2108 if (AdditionalValues.empty() && IsValidSalvageExpr) {
2109 DVR->setExpression(SalvagedExpr);
2110 } else if (DVR->getType() != DbgVariableRecord::LocationType::Declare &&
2111 IsValidSalvageExpr &&
2112 DVR->getNumVariableLocationOps() + AdditionalValues.size() <=
2113 MaxDebugArgs) {
2114 DVR->addVariableLocationOps(AdditionalValues, SalvagedExpr);
2115 } else {
2116 // Do not salvage using DIArgList for dbg.addr/dbg.declare, as it is
2117 // currently only valid for stack value expressions.
2118 // Also do not salvage if the resulting DIArgList would contain an
2119 // unreasonably large number of values.
2120 DVR->setKillLocation();
2121 }
2122 LLVM_DEBUG(dbgs() << "SALVAGE: " << DVR << '\n');
2123 Salvaged = true;
2124 }
2125
2126 if (Salvaged)
2127 return;
2128
2129 for (auto *DVR : DPUsers)
2130 DVR->setKillLocation();
2131}
2132
2134 uint64_t CurrentLocOps,
2136 SmallVectorImpl<Value *> &AdditionalValues) {
2137 unsigned BitWidth = DL.getIndexSizeInBits(GEP->getPointerAddressSpace());
2138 // Rewrite a GEP into a DIExpression.
2139 SmallMapVector<Value *, APInt, 4> VariableOffsets;
2140 APInt ConstantOffset(BitWidth, 0);
2141 if (!GEP->collectOffset(DL, BitWidth, VariableOffsets, ConstantOffset))
2142 return nullptr;
2143 if (!VariableOffsets.empty() && !CurrentLocOps) {
2144 Opcodes.insert(Opcodes.begin(), {dwarf::DW_OP_LLVM_arg, 0});
2145 CurrentLocOps = 1;
2146 }
2147 for (const auto &Offset : VariableOffsets) {
2148 AdditionalValues.push_back(Offset.first);
2149 assert(Offset.second.isStrictlyPositive() &&
2150 "Expected strictly positive multiplier for offset.");
2151 Opcodes.append({dwarf::DW_OP_LLVM_arg, CurrentLocOps++, dwarf::DW_OP_constu,
2152 Offset.second.getZExtValue(), dwarf::DW_OP_mul,
2153 dwarf::DW_OP_plus});
2154 }
2155 DIExpression::appendOffset(Opcodes, ConstantOffset.getSExtValue());
2156 return GEP->getOperand(0);
2157}
2158
2160 switch (Opcode) {
2161 case Instruction::Add:
2162 return dwarf::DW_OP_plus;
2163 case Instruction::Sub:
2164 return dwarf::DW_OP_minus;
2165 case Instruction::Mul:
2166 return dwarf::DW_OP_mul;
2167 case Instruction::SDiv:
2168 return dwarf::DW_OP_div;
2169 case Instruction::SRem:
2170 return dwarf::DW_OP_mod;
2171 case Instruction::Or:
2172 return dwarf::DW_OP_or;
2173 case Instruction::And:
2174 return dwarf::DW_OP_and;
2175 case Instruction::Xor:
2176 return dwarf::DW_OP_xor;
2177 case Instruction::Shl:
2178 return dwarf::DW_OP_shl;
2179 case Instruction::LShr:
2180 return dwarf::DW_OP_shr;
2181 case Instruction::AShr:
2182 return dwarf::DW_OP_shra;
2183 default:
2184 // TODO: Salvage from each kind of binop we know about.
2185 return 0;
2186 }
2187}
2188
2189static void handleSSAValueOperands(uint64_t CurrentLocOps,
2191 SmallVectorImpl<Value *> &AdditionalValues,
2192 Instruction *I) {
2193 if (!CurrentLocOps) {
2194 Opcodes.append({dwarf::DW_OP_LLVM_arg, 0});
2195 CurrentLocOps = 1;
2196 }
2197 Opcodes.append({dwarf::DW_OP_LLVM_arg, CurrentLocOps});
2198 AdditionalValues.push_back(I->getOperand(1));
2199}
2200
2203 SmallVectorImpl<Value *> &AdditionalValues) {
2204 // Handle binary operations with constant integer operands as a special case.
2205 auto *ConstInt = dyn_cast<ConstantInt>(BI->getOperand(1));
2206 // Values wider than 64 bits cannot be represented within a DIExpression.
2207 if (ConstInt && ConstInt->getBitWidth() > 64)
2208 return nullptr;
2209
2210 Instruction::BinaryOps BinOpcode = BI->getOpcode();
2211 // Push any Constant Int operand onto the expression stack.
2212 if (ConstInt) {
2213 uint64_t Val = ConstInt->getSExtValue();
2214 // Add or Sub Instructions with a constant operand can potentially be
2215 // simplified.
2216 if (BinOpcode == Instruction::Add || BinOpcode == Instruction::Sub) {
2217 uint64_t Offset = BinOpcode == Instruction::Add ? Val : -int64_t(Val);
2219 return BI->getOperand(0);
2220 }
2221 Opcodes.append({dwarf::DW_OP_constu, Val});
2222 } else {
2223 handleSSAValueOperands(CurrentLocOps, Opcodes, AdditionalValues, BI);
2224 }
2225
2226 // Add salvaged binary operator to expression stack, if it has a valid
2227 // representation in a DIExpression.
2228 uint64_t DwarfBinOp = getDwarfOpForBinOp(BinOpcode);
2229 if (!DwarfBinOp)
2230 return nullptr;
2231 Opcodes.push_back(DwarfBinOp);
2232 return BI->getOperand(0);
2233}
2234
2236 // The signedness of the operation is implicit in the typed stack, signed and
2237 // unsigned instructions map to the same DWARF opcode.
2238 switch (Pred) {
2239 case CmpInst::ICMP_EQ:
2240 return dwarf::DW_OP_eq;
2241 case CmpInst::ICMP_NE:
2242 return dwarf::DW_OP_ne;
2243 case CmpInst::ICMP_UGT:
2244 case CmpInst::ICMP_SGT:
2245 return dwarf::DW_OP_gt;
2246 case CmpInst::ICMP_UGE:
2247 case CmpInst::ICMP_SGE:
2248 return dwarf::DW_OP_ge;
2249 case CmpInst::ICMP_ULT:
2250 case CmpInst::ICMP_SLT:
2251 return dwarf::DW_OP_lt;
2252 case CmpInst::ICMP_ULE:
2253 case CmpInst::ICMP_SLE:
2254 return dwarf::DW_OP_le;
2255 default:
2256 return 0;
2257 }
2258}
2259
2262 SmallVectorImpl<Value *> &AdditionalValues) {
2263 // Handle icmp operations with constant integer operands as a special case.
2264 auto *ConstInt = dyn_cast<ConstantInt>(Icmp->getOperand(1));
2265 // Values wider than 64 bits cannot be represented within a DIExpression.
2266 if (ConstInt && ConstInt->getBitWidth() > 64)
2267 return nullptr;
2268 // Push any Constant Int operand onto the expression stack.
2269 if (ConstInt) {
2270 if (Icmp->isSigned())
2271 Opcodes.push_back(dwarf::DW_OP_consts);
2272 else
2273 Opcodes.push_back(dwarf::DW_OP_constu);
2274 uint64_t Val = ConstInt->getSExtValue();
2275 Opcodes.push_back(Val);
2276 } else {
2277 handleSSAValueOperands(CurrentLocOps, Opcodes, AdditionalValues, Icmp);
2278 }
2279
2280 // Add salvaged binary operator to expression stack, if it has a valid
2281 // representation in a DIExpression.
2282 uint64_t DwarfIcmpOp = getDwarfOpForIcmpPred(Icmp->getPredicate());
2283 if (!DwarfIcmpOp)
2284 return nullptr;
2285 Opcodes.push_back(DwarfIcmpOp);
2286 return Icmp->getOperand(0);
2287}
2288
2291 SmallVectorImpl<Value *> &AdditionalValues) {
2292 auto &M = *I.getModule();
2293 auto &DL = M.getDataLayout();
2294
2295 if (auto *CI = dyn_cast<CastInst>(&I)) {
2296 Value *FromValue = CI->getOperand(0);
2297 // No-op casts are irrelevant for debug info.
2298 if (CI->isNoopCast(DL)) {
2299 return FromValue;
2300 }
2301
2302 Type *Type = CI->getType();
2303 if (Type->isPointerTy())
2304 Type = DL.getIntPtrType(Type);
2305 // Casts other than Trunc, SExt, or ZExt to scalar types cannot be salvaged.
2306 if (Type->isVectorTy() ||
2309 return nullptr;
2310
2311 llvm::Type *FromType = FromValue->getType();
2312 if (FromType->isPointerTy())
2313 FromType = DL.getIntPtrType(FromType);
2314
2315 unsigned FromTypeBitSize = FromType->getScalarSizeInBits();
2316 unsigned ToTypeBitSize = Type->getScalarSizeInBits();
2317
2318 auto ExtOps = DIExpression::getExtOps(FromTypeBitSize, ToTypeBitSize,
2319 isa<SExtInst>(&I));
2320 Ops.append(ExtOps.begin(), ExtOps.end());
2321 return FromValue;
2322 }
2323
2324 if (auto *GEP = dyn_cast<GetElementPtrInst>(&I))
2325 return getSalvageOpsForGEP(GEP, DL, CurrentLocOps, Ops, AdditionalValues);
2326 if (auto *BI = dyn_cast<BinaryOperator>(&I))
2327 return getSalvageOpsForBinOp(BI, CurrentLocOps, Ops, AdditionalValues);
2328 if (auto *IC = dyn_cast<ICmpInst>(&I))
2329 return getSalvageOpsForIcmpOp(IC, CurrentLocOps, Ops, AdditionalValues);
2330
2331 // *Not* to do: we should not attempt to salvage load instructions,
2332 // because the validity and lifetime of a dbg.value containing
2333 // DW_OP_deref becomes difficult to analyze. See PR40628 for examples.
2334 return nullptr;
2335}
2336
2337/// A replacement for a dbg.value expression.
2338using DbgValReplacement = std::optional<DIExpression *>;
2339
2340/// Point debug users of \p From to \p To using exprs given by \p RewriteExpr,
2341/// possibly moving/undefing users to prevent use-before-def. Returns true if
2342/// changes are made.
2344 Instruction &From, Value &To, Instruction &DomPoint, DominatorTree &DT,
2345 function_ref<DbgValReplacement(DbgVariableRecord &DVR)> RewriteDVRExpr) {
2346 // Find debug users of From.
2348 findDbgUsers(&From, DPUsers);
2349 if (DPUsers.empty())
2350 return false;
2351
2352 // Prevent use-before-def of To.
2353 bool Changed = false;
2354
2355 SmallPtrSet<DbgVariableRecord *, 1> UndefOrSalvageDVR;
2356 if (isa<Instruction>(&To)) {
2357 bool DomPointAfterFrom = From.getNextNode() == &DomPoint;
2358
2359 // DbgVariableRecord implementation of the above.
2360 for (auto *DVR : DPUsers) {
2361 Instruction *MarkedInstr = DVR->getMarker()->MarkedInstr;
2362 Instruction *NextNonDebug = MarkedInstr;
2363
2364 // It's common to see a debug user between From and DomPoint. Move it
2365 // after DomPoint to preserve the variable update without any reordering.
2366 if (DomPointAfterFrom && NextNonDebug == &DomPoint) {
2367 LLVM_DEBUG(dbgs() << "MOVE: " << *DVR << '\n');
2368 DVR->removeFromParent();
2369 DomPoint.getParent()->insertDbgRecordAfter(DVR, &DomPoint);
2370 Changed = true;
2371
2372 // Users which otherwise aren't dominated by the replacement value must
2373 // be salvaged or deleted.
2374 } else if (!DT.dominates(&DomPoint, MarkedInstr)) {
2375 UndefOrSalvageDVR.insert(DVR);
2376 }
2377 }
2378 }
2379
2380 // Update debug users without use-before-def risk.
2381 for (auto *DVR : DPUsers) {
2382 if (UndefOrSalvageDVR.count(DVR))
2383 continue;
2384
2385 DbgValReplacement DVRepl = RewriteDVRExpr(*DVR);
2386 if (!DVRepl)
2387 continue;
2388
2389 DVR->replaceVariableLocationOp(&From, &To);
2390 DVR->setExpression(*DVRepl);
2391 LLVM_DEBUG(dbgs() << "REWRITE: " << DVR << '\n');
2392 Changed = true;
2393 }
2394
2395 if (!UndefOrSalvageDVR.empty()) {
2396 // Try to salvage the remaining debug users.
2397 salvageDebugInfo(From);
2398 Changed = true;
2399 }
2400
2401 return Changed;
2402}
2403
2404/// Check if a bitcast between a value of type \p FromTy to type \p ToTy would
2405/// losslessly preserve the bits and semantics of the value. This predicate is
2406/// symmetric, i.e swapping \p FromTy and \p ToTy should give the same result.
2407///
2408/// Note that Type::canLosslesslyBitCastTo is not suitable here because it
2409/// allows semantically unequivalent bitcasts, such as <2 x i64> -> <4 x i32>,
2410/// and also does not allow lossless pointer <-> integer conversions.
2412 Type *ToTy) {
2413 // Trivially compatible types.
2414 if (FromTy == ToTy)
2415 return true;
2416
2417 // Handle compatible pointer <-> integer conversions.
2418 if (FromTy->isIntOrPtrTy() && ToTy->isIntOrPtrTy()) {
2419 bool SameSize = DL.getTypeSizeInBits(FromTy) == DL.getTypeSizeInBits(ToTy);
2420 bool LosslessConversion = !DL.isNonIntegralPointerType(FromTy) &&
2421 !DL.isNonIntegralPointerType(ToTy);
2422 return SameSize && LosslessConversion;
2423 }
2424
2425 // TODO: This is not exhaustive.
2426 return false;
2427}
2428
2430 Instruction &DomPoint, DominatorTree &DT) {
2431 // Exit early if From has no debug users.
2432 if (!From.isUsedByMetadata())
2433 return false;
2434
2435 assert(&From != &To && "Can't replace something with itself");
2436
2437 Type *FromTy = From.getType();
2438 Type *ToTy = To.getType();
2439
2440 auto IdentityDVR = [&](DbgVariableRecord &DVR) -> DbgValReplacement {
2441 return DVR.getExpression();
2442 };
2443
2444 // Handle no-op conversions.
2445 Module &M = *From.getModule();
2446 const DataLayout &DL = M.getDataLayout();
2447 if (isBitCastSemanticsPreserving(DL, FromTy, ToTy))
2448 return rewriteDebugUsers(From, To, DomPoint, DT, IdentityDVR);
2449
2450 // Handle integer-to-integer widening and narrowing.
2451 // FIXME: Use DW_OP_convert when it's available everywhere.
2452 if (FromTy->isIntegerTy() && ToTy->isIntegerTy()) {
2453 uint64_t FromBits = FromTy->getIntegerBitWidth();
2454 uint64_t ToBits = ToTy->getIntegerBitWidth();
2455 assert(FromBits != ToBits && "Unexpected no-op conversion");
2456
2457 // When the width of the result grows, assume that a debugger will only
2458 // access the low `FromBits` bits when inspecting the source variable.
2459 if (FromBits < ToBits)
2460 return rewriteDebugUsers(From, To, DomPoint, DT, IdentityDVR);
2461
2462 // The width of the result has shrunk. Use sign/zero extension to describe
2463 // the source variable's high bits.
2464 auto SignOrZeroExtDVR = [&](DbgVariableRecord &DVR) -> DbgValReplacement {
2465 DILocalVariable *Var = DVR.getVariable();
2466
2467 // Without knowing signedness, sign/zero extension isn't possible.
2468 auto Signedness = Var->getSignedness();
2469 if (!Signedness)
2470 return std::nullopt;
2471
2472 bool Signed = *Signedness == DIBasicType::Signedness::Signed;
2473 return DIExpression::appendExt(DVR.getExpression(), ToBits, FromBits,
2474 Signed);
2475 };
2476 return rewriteDebugUsers(From, To, DomPoint, DT, SignOrZeroExtDVR);
2477 }
2478
2479 // TODO: Floating-point conversions, vectors.
2480 return false;
2481}
2482
2484 Instruction *I, SmallVectorImpl<Value *> &PoisonedValues) {
2485 bool Changed = false;
2486 // RemoveDIs: erase debug-info on this instruction manually.
2487 I->dropDbgRecords();
2488 for (Use &U : I->operands()) {
2489 Value *Op = U.get();
2490 if (isa<Instruction>(Op) && !Op->getType()->isTokenTy()) {
2491 U.set(PoisonValue::get(Op->getType()));
2492 PoisonedValues.push_back(Op);
2493 Changed = true;
2494 }
2495 }
2496
2497 return Changed;
2498}
2499
2501 unsigned NumDeadInst = 0;
2502 // Delete the instructions backwards, as it has a reduced likelihood of
2503 // having to update as many def-use and use-def chains.
2504 Instruction *EndInst = BB->getTerminator(); // Last not to be deleted.
2507
2508 while (EndInst != &BB->front()) {
2509 // Delete the next to last instruction.
2510 Instruction *Inst = &*--EndInst->getIterator();
2511 if (!Inst->use_empty() && !Inst->getType()->isTokenTy())
2513 if (Inst->isEHPad() || Inst->getType()->isTokenTy()) {
2514 // EHPads can't have DbgVariableRecords attached to them, but it might be
2515 // possible for things with token type.
2516 Inst->dropDbgRecords();
2517 EndInst = Inst;
2518 continue;
2519 }
2520 ++NumDeadInst;
2521 // RemoveDIs: erasing debug-info must be done manually.
2522 Inst->dropDbgRecords();
2523 Inst->eraseFromParent();
2524 }
2525 return NumDeadInst;
2526}
2527
2528unsigned llvm::changeToUnreachable(Instruction *I, bool PreserveLCSSA,
2529 DomTreeUpdater *DTU,
2530 MemorySSAUpdater *MSSAU) {
2531 BasicBlock *BB = I->getParent();
2532
2533 if (MSSAU)
2534 MSSAU->changeToUnreachable(I);
2535
2536 SmallPtrSet<BasicBlock *, 8> UniqueSuccessors;
2537
2538 // Loop over all of the successors, removing BB's entry from any PHI
2539 // nodes.
2540 for (BasicBlock *Successor : successors(BB)) {
2541 Successor->removePredecessor(BB, PreserveLCSSA);
2542 if (DTU)
2543 UniqueSuccessors.insert(Successor);
2544 }
2545 auto *UI = new UnreachableInst(I->getContext(), I->getIterator());
2546 UI->setDebugLoc(I->getDebugLoc());
2547
2548 // All instructions after this are dead.
2549 unsigned NumInstrsRemoved = 0;
2550 BasicBlock::iterator BBI = I->getIterator(), BBE = BB->end();
2551 while (BBI != BBE) {
2552 if (!BBI->use_empty())
2553 BBI->replaceAllUsesWith(PoisonValue::get(BBI->getType()));
2554 BBI++->eraseFromParent();
2555 ++NumInstrsRemoved;
2556 }
2557 if (DTU) {
2559 Updates.reserve(UniqueSuccessors.size());
2560 for (BasicBlock *UniqueSuccessor : UniqueSuccessors)
2561 Updates.push_back({DominatorTree::Delete, BB, UniqueSuccessor});
2562 DTU->applyUpdates(Updates);
2563 }
2565 return NumInstrsRemoved;
2566}
2567
2569 SmallVector<Value *, 8> Args(II->args());
2571 II->getOperandBundlesAsDefs(OpBundles);
2572 CallInst *NewCall = CallInst::Create(II->getFunctionType(),
2573 II->getCalledOperand(), Args, OpBundles);
2574 NewCall->setCallingConv(II->getCallingConv());
2575 NewCall->setAttributes(II->getAttributes());
2576 NewCall->setDebugLoc(II->getDebugLoc());
2577 NewCall->copyMetadata(*II);
2578
2579 // If the invoke had profile metadata, try converting them for CallInst.
2580 uint64_t TotalWeight;
2581 if (NewCall->extractProfTotalWeight(TotalWeight)) {
2582 // Set the total weight if it fits into i32, otherwise reset.
2583 MDBuilder MDB(NewCall->getContext());
2584 auto NewWeights = uint32_t(TotalWeight) != TotalWeight
2585 ? nullptr
2586 : MDB.createBranchWeights({uint32_t(TotalWeight)});
2587 NewCall->setMetadata(LLVMContext::MD_prof, NewWeights);
2588 }
2589
2590 return NewCall;
2591}
2592
2593// changeToCall - Convert the specified invoke into a normal call.
2596 NewCall->takeName(II);
2597 NewCall->insertBefore(II->getIterator());
2598 II->replaceAllUsesWith(NewCall);
2599
2600 // Follow the call by a branch to the normal destination.
2601 BasicBlock *NormalDestBB = II->getNormalDest();
2602 auto *BI = UncondBrInst::Create(NormalDestBB, II->getIterator());
2603 // Although it takes place after the call itself, the new branch is still
2604 // performing part of the control-flow functionality of the invoke, so we use
2605 // II's DebugLoc.
2606 BI->setDebugLoc(II->getDebugLoc());
2607
2608 // Update PHI nodes in the unwind destination
2609 BasicBlock *BB = II->getParent();
2610 BasicBlock *UnwindDestBB = II->getUnwindDest();
2611 UnwindDestBB->removePredecessor(BB);
2612 II->eraseFromParent();
2613 if (DTU)
2614 DTU->applyUpdates({{DominatorTree::Delete, BB, UnwindDestBB}});
2615 return NewCall;
2616}
2617
2619 BasicBlock *UnwindEdge,
2620 DomTreeUpdater *DTU) {
2621 BasicBlock *BB = CI->getParent();
2622
2623 // Convert this function call into an invoke instruction. First, split the
2624 // basic block.
2625 BasicBlock *Split = SplitBlock(BB, CI, DTU, /*LI=*/nullptr, /*MSSAU*/ nullptr,
2626 CI->getName() + ".noexc");
2627
2628 // Delete the unconditional branch inserted by SplitBlock
2629 BB->back().eraseFromParent();
2630
2631 // Create the new invoke instruction.
2632 SmallVector<Value *, 8> InvokeArgs(CI->args());
2634
2635 CI->getOperandBundlesAsDefs(OpBundles);
2636
2637 // Note: we're round tripping operand bundles through memory here, and that
2638 // can potentially be avoided with a cleverer API design that we do not have
2639 // as of this time.
2640
2641 InvokeInst *II =
2643 UnwindEdge, InvokeArgs, OpBundles, CI->getName(), BB);
2644 II->setDebugLoc(CI->getDebugLoc());
2645 II->setCallingConv(CI->getCallingConv());
2646 II->setAttributes(CI->getAttributes());
2647 II->setMetadata(LLVMContext::MD_prof, CI->getMetadata(LLVMContext::MD_prof));
2648
2649 if (DTU)
2650 DTU->applyUpdates({{DominatorTree::Insert, BB, UnwindEdge}});
2651
2652 // Make sure that anything using the call now uses the invoke! This also
2653 // updates the CallGraph if present, because it uses a WeakTrackingVH.
2655
2656 // Delete the original call
2657 Split->front().eraseFromParent();
2658 return Split;
2659}
2660
2662 DomTreeUpdater *DTU = nullptr) {
2664 BasicBlock *BB = &F.front();
2665 Worklist.push_back(BB);
2666 Reachable[BB->getNumber()] = true;
2667 bool Changed = false;
2668 do {
2669 BB = Worklist.pop_back_val();
2670
2671 // Do a quick scan of the basic block, turning any obviously unreachable
2672 // instructions into LLVM unreachable insts. The instruction combining pass
2673 // canonicalizes unreachable insts into stores to null or undef.
2674 for (Instruction &I : *BB) {
2675 if (auto *CI = dyn_cast<CallInst>(&I)) {
2676 Value *Callee = CI->getCalledOperand();
2677 // Handle intrinsic calls.
2678 if (Function *F = dyn_cast<Function>(Callee)) {
2679 auto IntrinsicID = F->getIntrinsicID();
2680 // Assumptions that are known to be false are equivalent to
2681 // unreachable. Also, if the condition is undefined, then we make the
2682 // choice most beneficial to the optimizer, and choose that to also be
2683 // unreachable.
2684 if (IntrinsicID == Intrinsic::assume) {
2685 if (match(CI->getArgOperand(0), m_CombineOr(m_Zero(), m_Undef()))) {
2686 // Don't insert a call to llvm.trap right before the unreachable.
2687 changeToUnreachable(CI, false, DTU);
2688 Changed = true;
2689 break;
2690 }
2691 } else if (IntrinsicID == Intrinsic::experimental_guard) {
2692 // A call to the guard intrinsic bails out of the current
2693 // compilation unit if the predicate passed to it is false. If the
2694 // predicate is a constant false, then we know the guard will bail
2695 // out of the current compile unconditionally, so all code following
2696 // it is dead.
2697 //
2698 // Note: unlike in llvm.assume, it is not "obviously profitable" for
2699 // guards to treat `undef` as `false` since a guard on `undef` can
2700 // still be useful for widening.
2701 if (match(CI->getArgOperand(0), m_Zero()))
2702 if (!isa<UnreachableInst>(CI->getNextNode())) {
2703 changeToUnreachable(CI->getNextNode(), false, DTU);
2704 Changed = true;
2705 break;
2706 }
2707 }
2708 } else if ((isa<ConstantPointerNull>(Callee) &&
2709 !NullPointerIsDefined(CI->getFunction(),
2710 cast<PointerType>(Callee->getType())
2711 ->getAddressSpace())) ||
2712 isa<UndefValue>(Callee)) {
2713 changeToUnreachable(CI, false, DTU);
2714 Changed = true;
2715 break;
2716 }
2717 if (CI->doesNotReturn() && !CI->isMustTailCall()) {
2718 // If we found a call to a no-return function, insert an unreachable
2719 // instruction after it. Make sure there isn't *already* one there
2720 // though.
2721 if (!isa<UnreachableInst>(CI->getNextNode())) {
2722 // Don't insert a call to llvm.trap right before the unreachable.
2723 changeToUnreachable(CI->getNextNode(), false, DTU);
2724 Changed = true;
2725 }
2726 break;
2727 }
2728 } else if (auto *SI = dyn_cast<StoreInst>(&I)) {
2729 // Store to undef and store to null are undefined and used to signal
2730 // that they should be changed to unreachable by passes that can't
2731 // modify the CFG.
2732
2733 // Don't touch volatile stores.
2734 if (SI->isVolatile()) continue;
2735
2736 Value *Ptr = SI->getOperand(1);
2737
2738 if (isa<UndefValue>(Ptr) ||
2740 !NullPointerIsDefined(SI->getFunction(),
2741 SI->getPointerAddressSpace()))) {
2742 changeToUnreachable(SI, false, DTU);
2743 Changed = true;
2744 break;
2745 }
2746 }
2747 }
2748
2749 Instruction *Terminator = BB->getTerminator();
2750 if (auto *II = dyn_cast<InvokeInst>(Terminator)) {
2751 // Turn invokes that call 'nounwind' functions into ordinary calls.
2752 Value *Callee = II->getCalledOperand();
2753 if ((isa<ConstantPointerNull>(Callee) &&
2754 !NullPointerIsDefined(BB->getParent())) ||
2755 isa<UndefValue>(Callee)) {
2756 changeToUnreachable(II, false, DTU);
2757 Changed = true;
2758 } else {
2759 if (II->doesNotReturn() &&
2760 !isa<UnreachableInst>(II->getNormalDest()->front())) {
2761 // If we found an invoke of a no-return function,
2762 // create a new empty basic block with an `unreachable` terminator,
2763 // and set it as the normal destination for the invoke,
2764 // unless that is already the case.
2765 // Note that the original normal destination could have other uses.
2766 BasicBlock *OrigNormalDest = II->getNormalDest();
2767 OrigNormalDest->removePredecessor(II->getParent());
2768 LLVMContext &Ctx = II->getContext();
2769 BasicBlock *UnreachableNormalDest = BasicBlock::Create(
2770 Ctx, OrigNormalDest->getName() + ".unreachable",
2771 II->getFunction(), OrigNormalDest);
2772 Reachable.resize(II->getFunction()->getMaxBlockNumber());
2773 auto *UI = new UnreachableInst(Ctx, UnreachableNormalDest);
2774 UI->setDebugLoc(DebugLoc::getTemporary());
2775 II->setNormalDest(UnreachableNormalDest);
2776 if (DTU)
2777 DTU->applyUpdates(
2778 {{DominatorTree::Delete, BB, OrigNormalDest},
2779 {DominatorTree::Insert, BB, UnreachableNormalDest}});
2780 Changed = true;
2781 }
2782 if (II->doesNotThrow() && canSimplifyInvokeNoUnwind(&F)) {
2783 if (II->use_empty() && !II->mayHaveSideEffects()) {
2784 // jump to the normal destination branch.
2785 BasicBlock *NormalDestBB = II->getNormalDest();
2786 BasicBlock *UnwindDestBB = II->getUnwindDest();
2787 UncondBrInst::Create(NormalDestBB, II->getIterator());
2788 UnwindDestBB->removePredecessor(II->getParent());
2789 II->eraseFromParent();
2790 if (DTU)
2791 DTU->applyUpdates({{DominatorTree::Delete, BB, UnwindDestBB}});
2792 } else
2793 changeToCall(II, DTU);
2794 Changed = true;
2795 }
2796 }
2797 } else if (auto *CatchSwitch = dyn_cast<CatchSwitchInst>(Terminator)) {
2798 // Remove catchpads which cannot be reached.
2799 struct CatchPadDenseMapInfo {
2800 static CatchPadInst *getEmptyKey() {
2802 }
2803
2804 static CatchPadInst *getTombstoneKey() {
2806 }
2807
2808 static unsigned getHashValue(CatchPadInst *CatchPad) {
2809 return static_cast<unsigned>(hash_combine_range(
2810 CatchPad->value_op_begin(), CatchPad->value_op_end()));
2811 }
2812
2813 static bool isEqual(CatchPadInst *LHS, CatchPadInst *RHS) {
2814 if (LHS == getEmptyKey() || LHS == getTombstoneKey() ||
2815 RHS == getEmptyKey() || RHS == getTombstoneKey())
2816 return LHS == RHS;
2817 return LHS->isIdenticalTo(RHS);
2818 }
2819 };
2820
2821 SmallDenseMap<BasicBlock *, int, 8> NumPerSuccessorCases;
2822 // Set of unique CatchPads.
2824 CatchPadDenseMapInfo, detail::DenseSetPair<CatchPadInst *>>
2825 HandlerSet;
2827 for (CatchSwitchInst::handler_iterator I = CatchSwitch->handler_begin(),
2828 E = CatchSwitch->handler_end();
2829 I != E; ++I) {
2830 BasicBlock *HandlerBB = *I;
2831 if (DTU)
2832 ++NumPerSuccessorCases[HandlerBB];
2833 auto *CatchPad = cast<CatchPadInst>(HandlerBB->getFirstNonPHIIt());
2834 if (!HandlerSet.insert({CatchPad, Empty}).second) {
2835 if (DTU)
2836 --NumPerSuccessorCases[HandlerBB];
2837 CatchSwitch->removeHandler(I);
2838 --I;
2839 --E;
2840 Changed = true;
2841 }
2842 }
2843 if (DTU) {
2844 std::vector<DominatorTree::UpdateType> Updates;
2845 for (const std::pair<BasicBlock *, int> &I : NumPerSuccessorCases)
2846 if (I.second == 0)
2847 Updates.push_back({DominatorTree::Delete, BB, I.first});
2848 DTU->applyUpdates(Updates);
2849 }
2850 }
2851
2852 Changed |= ConstantFoldTerminator(BB, true, nullptr, DTU);
2853 for (BasicBlock *Successor : successors(BB)) {
2854 if (!Reachable[Successor->getNumber()]) {
2855 Worklist.push_back(Successor);
2856 Reachable[Successor->getNumber()] = true;
2857 }
2858 }
2859 } while (!Worklist.empty());
2860 return Changed;
2861}
2862
2864 Instruction *TI = BB->getTerminator();
2865
2866 if (auto *II = dyn_cast<InvokeInst>(TI))
2867 return changeToCall(II, DTU);
2868
2869 Instruction *NewTI;
2870 BasicBlock *UnwindDest;
2871
2872 if (auto *CRI = dyn_cast<CleanupReturnInst>(TI)) {
2873 NewTI = CleanupReturnInst::Create(CRI->getCleanupPad(), nullptr, CRI->getIterator());
2874 UnwindDest = CRI->getUnwindDest();
2875 } else if (auto *CatchSwitch = dyn_cast<CatchSwitchInst>(TI)) {
2876 auto *NewCatchSwitch = CatchSwitchInst::Create(
2877 CatchSwitch->getParentPad(), nullptr, CatchSwitch->getNumHandlers(),
2878 CatchSwitch->getName(), CatchSwitch->getIterator());
2879 for (BasicBlock *PadBB : CatchSwitch->handlers())
2880 NewCatchSwitch->addHandler(PadBB);
2881
2882 NewTI = NewCatchSwitch;
2883 UnwindDest = CatchSwitch->getUnwindDest();
2884 } else {
2885 llvm_unreachable("Could not find unwind successor");
2886 }
2887
2888 NewTI->takeName(TI);
2889 NewTI->setDebugLoc(TI->getDebugLoc());
2890 UnwindDest->removePredecessor(BB);
2891 TI->replaceAllUsesWith(NewTI);
2892 TI->eraseFromParent();
2893 if (DTU)
2894 DTU->applyUpdates({{DominatorTree::Delete, BB, UnwindDest}});
2895 return NewTI;
2896}
2897
2898/// removeUnreachableBlocks - Remove blocks that are not reachable, even
2899/// if they are in a dead cycle. Return true if a change was made, false
2900/// otherwise.
2902 MemorySSAUpdater *MSSAU) {
2903 SmallVector<bool, 16> Reachable(F.getMaxBlockNumber());
2904 bool Changed = markAliveBlocks(F, Reachable, DTU);
2905
2906 // Are there any blocks left to actually delete?
2907 SmallSetVector<BasicBlock *, 8> BlocksToRemove;
2908 for (BasicBlock &BB : F) {
2909 // Skip reachable basic blocks
2910 if (Reachable[BB.getNumber()])
2911 continue;
2912 // Skip already-deleted blocks
2913 if (DTU && DTU->isBBPendingDeletion(&BB))
2914 continue;
2915 BlocksToRemove.insert(&BB);
2916 }
2917
2918 if (BlocksToRemove.empty())
2919 return Changed;
2920
2921 Changed = true;
2922 NumRemoved += BlocksToRemove.size();
2923
2924 if (MSSAU)
2925 MSSAU->removeBlocks(BlocksToRemove);
2926
2927 DeleteDeadBlocks(BlocksToRemove.takeVector(), DTU);
2928
2929 return Changed;
2930}
2931
2932/// If AAOnly is set, only intersect alias analysis metadata and preserve other
2933/// known metadata. Unknown metadata is always dropped.
2934static void combineMetadata(Instruction *K, const Instruction *J,
2935 bool DoesKMove, bool AAOnly = false) {
2937 K->getAllMetadataOtherThanDebugLoc(Metadata);
2938 for (const auto &MD : Metadata) {
2939 unsigned Kind = MD.first;
2940 MDNode *JMD = J->getMetadata(Kind);
2941 MDNode *KMD = MD.second;
2942
2943 // TODO: Assert that this switch is exhaustive for fixed MD kinds.
2944 switch (Kind) {
2945 default:
2946 K->setMetadata(Kind, nullptr); // Remove unknown metadata
2947 break;
2948 case LLVMContext::MD_dbg:
2949 llvm_unreachable("getAllMetadataOtherThanDebugLoc returned a MD_dbg");
2950 case LLVMContext::MD_DIAssignID:
2951 if (!AAOnly)
2952 K->mergeDIAssignID(J);
2953 break;
2954 case LLVMContext::MD_tbaa:
2955 if (DoesKMove)
2956 K->setMetadata(Kind, MDNode::getMostGenericTBAA(JMD, KMD));
2957 break;
2958 case LLVMContext::MD_alias_scope:
2959 if (DoesKMove)
2960 K->setMetadata(Kind, MDNode::getMostGenericAliasScope(JMD, KMD));
2961 break;
2962 case LLVMContext::MD_noalias:
2963 case LLVMContext::MD_mem_parallel_loop_access:
2964 if (DoesKMove)
2965 K->setMetadata(Kind, MDNode::intersect(JMD, KMD));
2966 break;
2967 case LLVMContext::MD_access_group:
2968 if (DoesKMove)
2969 K->setMetadata(LLVMContext::MD_access_group,
2970 intersectAccessGroups(K, J));
2971 break;
2972 case LLVMContext::MD_range:
2973 if (!AAOnly && (DoesKMove || !K->hasMetadata(LLVMContext::MD_noundef)))
2974 K->setMetadata(Kind, MDNode::getMostGenericRange(JMD, KMD));
2975 break;
2976 case LLVMContext::MD_nofpclass:
2977 if (!AAOnly && (DoesKMove || !K->hasMetadata(LLVMContext::MD_noundef)))
2978 K->setMetadata(Kind, MDNode::getMostGenericNoFPClass(JMD, KMD));
2979 break;
2980 case LLVMContext::MD_fpmath:
2981 if (!AAOnly)
2982 K->setMetadata(Kind, MDNode::getMostGenericFPMath(JMD, KMD));
2983 break;
2984 case LLVMContext::MD_invariant_load:
2985 // If K moves, only set the !invariant.load if it is present in both
2986 // instructions.
2987 if (DoesKMove)
2988 K->setMetadata(Kind, JMD);
2989 break;
2990 case LLVMContext::MD_nonnull:
2991 if (!AAOnly && (DoesKMove || !K->hasMetadata(LLVMContext::MD_noundef)))
2992 K->setMetadata(Kind, JMD);
2993 break;
2994 case LLVMContext::MD_invariant_group:
2995 // Preserve !invariant.group in K.
2996 break;
2997 // Keep empty cases for prof, mmra, memprof, and callsite to prevent them
2998 // from being removed as unknown metadata. The actual merging is handled
2999 // separately below.
3000 case LLVMContext::MD_prof:
3001 case LLVMContext::MD_mmra:
3002 case LLVMContext::MD_memprof:
3003 case LLVMContext::MD_callsite:
3004 break;
3005 case LLVMContext::MD_callee_type:
3006 if (!AAOnly) {
3007 K->setMetadata(LLVMContext::MD_callee_type,
3009 }
3010 break;
3011 case LLVMContext::MD_align:
3012 if (!AAOnly && (DoesKMove || !K->hasMetadata(LLVMContext::MD_noundef)))
3013 K->setMetadata(
3015 break;
3016 case LLVMContext::MD_dereferenceable:
3017 case LLVMContext::MD_dereferenceable_or_null:
3018 if (!AAOnly && DoesKMove)
3019 K->setMetadata(Kind,
3021 break;
3022 case LLVMContext::MD_preserve_access_index:
3023 // Preserve !preserve.access.index in K.
3024 break;
3025 case LLVMContext::MD_noundef:
3026 // If K does move, keep noundef if it is present in both instructions.
3027 if (!AAOnly && DoesKMove)
3028 K->setMetadata(Kind, JMD);
3029 break;
3030 case LLVMContext::MD_nontemporal:
3031 // Preserve !nontemporal if it is present on both instructions.
3032 if (!AAOnly)
3033 K->setMetadata(Kind, JMD);
3034 break;
3035 case LLVMContext::MD_noalias_addrspace:
3036 if (DoesKMove)
3037 K->setMetadata(Kind,
3039 break;
3040 case LLVMContext::MD_nosanitize:
3041 // Preserve !nosanitize if both K and J have it.
3042 K->setMetadata(Kind, JMD);
3043 break;
3044 case LLVMContext::MD_captures:
3045 K->setMetadata(
3047 K->getContext(), MDNode::toCaptureComponents(JMD) |
3049 break;
3050 case LLVMContext::MD_alloc_token:
3051 // Preserve !alloc_token if both K and J have it, and they are equal.
3052 if (KMD == JMD)
3053 K->setMetadata(Kind, JMD);
3054 else
3055 K->setMetadata(Kind, nullptr);
3056 break;
3057 }
3058 }
3059 // Set !invariant.group from J if J has it. If both instructions have it
3060 // then we will just pick it from J - even when they are different.
3061 // Also make sure that K is load or store - f.e. combining bitcast with load
3062 // could produce bitcast with invariant.group metadata, which is invalid.
3063 // FIXME: we should try to preserve both invariant.group md if they are
3064 // different, but right now instruction can only have one invariant.group.
3065 if (auto *JMD = J->getMetadata(LLVMContext::MD_invariant_group))
3066 if (isa<LoadInst>(K) || isa<StoreInst>(K))
3067 K->setMetadata(LLVMContext::MD_invariant_group, JMD);
3068
3069 // Merge MMRAs.
3070 // This is handled separately because we also want to handle cases where K
3071 // doesn't have tags but J does.
3072 auto JMMRA = J->getMetadata(LLVMContext::MD_mmra);
3073 auto KMMRA = K->getMetadata(LLVMContext::MD_mmra);
3074 if (JMMRA || KMMRA) {
3075 K->setMetadata(LLVMContext::MD_mmra,
3076 MMRAMetadata::combine(K->getContext(), JMMRA, KMMRA));
3077 }
3078
3079 // Merge memprof metadata.
3080 // Handle separately to support cases where only one instruction has the
3081 // metadata.
3082 auto *JMemProf = J->getMetadata(LLVMContext::MD_memprof);
3083 auto *KMemProf = K->getMetadata(LLVMContext::MD_memprof);
3084 if (!AAOnly && (JMemProf || KMemProf)) {
3085 K->setMetadata(LLVMContext::MD_memprof,
3086 MDNode::getMergedMemProfMetadata(KMemProf, JMemProf));
3087 }
3088
3089 // Merge callsite metadata.
3090 // Handle separately to support cases where only one instruction has the
3091 // metadata.
3092 auto *JCallSite = J->getMetadata(LLVMContext::MD_callsite);
3093 auto *KCallSite = K->getMetadata(LLVMContext::MD_callsite);
3094 if (!AAOnly && (JCallSite || KCallSite)) {
3095 K->setMetadata(LLVMContext::MD_callsite,
3096 MDNode::getMergedCallsiteMetadata(KCallSite, JCallSite));
3097 }
3098
3099 // Merge prof metadata.
3100 // Handle separately to support cases where only one instruction has the
3101 // metadata.
3102 auto *JProf = J->getMetadata(LLVMContext::MD_prof);
3103 auto *KProf = K->getMetadata(LLVMContext::MD_prof);
3104 if (!AAOnly && (JProf || KProf)) {
3105 K->setMetadata(LLVMContext::MD_prof,
3106 MDNode::getMergedProfMetadata(KProf, JProf, K, J));
3107 }
3108}
3109
3111 bool DoesKMove) {
3112 combineMetadata(K, J, DoesKMove);
3113}
3114
3116 combineMetadata(K, J, /*DoesKMove=*/true, /*AAOnly=*/true);
3117}
3118
3119void llvm::copyMetadataForLoad(LoadInst &Dest, const LoadInst &Source) {
3121 Source.getAllMetadata(MD);
3122 MDBuilder MDB(Dest.getContext());
3123 Type *NewType = Dest.getType();
3124 const DataLayout &DL = Source.getDataLayout();
3125 for (const auto &MDPair : MD) {
3126 unsigned ID = MDPair.first;
3127 MDNode *N = MDPair.second;
3128 // Note, essentially every kind of metadata should be preserved here! This
3129 // routine is supposed to clone a load instruction changing *only its type*.
3130 // The only metadata it makes sense to drop is metadata which is invalidated
3131 // when the pointer type changes. This should essentially never be the case
3132 // in LLVM, but we explicitly switch over only known metadata to be
3133 // conservatively correct. If you are adding metadata to LLVM which pertains
3134 // to loads, you almost certainly want to add it here.
3135 switch (ID) {
3136 case LLVMContext::MD_dbg:
3137 case LLVMContext::MD_tbaa:
3138 case LLVMContext::MD_prof:
3139 case LLVMContext::MD_fpmath:
3140 case LLVMContext::MD_tbaa_struct:
3141 case LLVMContext::MD_invariant_load:
3142 case LLVMContext::MD_alias_scope:
3143 case LLVMContext::MD_noalias:
3144 case LLVMContext::MD_nontemporal:
3145 case LLVMContext::MD_mem_parallel_loop_access:
3146 case LLVMContext::MD_access_group:
3147 case LLVMContext::MD_noundef:
3148 case LLVMContext::MD_noalias_addrspace:
3149 // All of these directly apply.
3150 Dest.setMetadata(ID, N);
3151 break;
3152
3153 case LLVMContext::MD_nonnull:
3154 copyNonnullMetadata(Source, N, Dest);
3155 break;
3156
3157 case LLVMContext::MD_align:
3158 case LLVMContext::MD_dereferenceable:
3159 case LLVMContext::MD_dereferenceable_or_null:
3160 // These only directly apply if the new type is also a pointer.
3161 if (NewType->isPointerTy())
3162 Dest.setMetadata(ID, N);
3163 break;
3164
3165 case LLVMContext::MD_range:
3166 copyRangeMetadata(DL, Source, N, Dest);
3167 break;
3168
3169 case LLVMContext::MD_nofpclass:
3170 // This only applies if the floating-point type interpretation. This
3171 // should handle degenerate cases like casting between a scalar and single
3172 // element vector.
3173 if (NewType->getScalarType() == Source.getType()->getScalarType())
3174 Dest.setMetadata(ID, N);
3175 break;
3176 }
3177 }
3178}
3179
3181 auto *ReplInst = dyn_cast<Instruction>(Repl);
3182 if (!ReplInst)
3183 return;
3184
3185 // Patch the replacement so that it is not more restrictive than the value
3186 // being replaced.
3187 WithOverflowInst *UnusedWO;
3188 // When replacing the result of a llvm.*.with.overflow intrinsic with a
3189 // overflowing binary operator, nuw/nsw flags may no longer hold.
3190 if (isa<OverflowingBinaryOperator>(ReplInst) &&
3192 ReplInst->dropPoisonGeneratingFlags();
3193 // Note that if 'I' is a load being replaced by some operation,
3194 // for example, by an arithmetic operation, then andIRFlags()
3195 // would just erase all math flags from the original arithmetic
3196 // operation, which is clearly not wanted and not needed.
3197 else if (!isa<LoadInst>(I))
3198 ReplInst->andIRFlags(I);
3199
3200 // Handle attributes.
3201 if (auto *CB1 = dyn_cast<CallBase>(ReplInst)) {
3202 if (auto *CB2 = dyn_cast<CallBase>(I)) {
3203 bool Success = CB1->tryIntersectAttributes(CB2);
3204 assert(Success && "We should not be trying to sink callbases "
3205 "with non-intersectable attributes");
3206 // For NDEBUG Compile.
3207 (void)Success;
3208 }
3209 }
3210
3211 // FIXME: If both the original and replacement value are part of the
3212 // same control-flow region (meaning that the execution of one
3213 // guarantees the execution of the other), then we can combine the
3214 // noalias scopes here and do better than the general conservative
3215 // answer used in combineMetadata().
3216
3217 // In general, GVN unifies expressions over different control-flow
3218 // regions, and so we need a conservative combination of the noalias
3219 // scopes.
3220 combineMetadataForCSE(ReplInst, I, false);
3221}
3222
3223template <typename ShouldReplaceFn>
3224static unsigned replaceDominatedUsesWith(Value *From, Value *To,
3225 const ShouldReplaceFn &ShouldReplace) {
3226 assert(From->getType() == To->getType());
3227
3228 unsigned Count = 0;
3229 for (Use &U : llvm::make_early_inc_range(From->uses())) {
3230 auto *II = dyn_cast<IntrinsicInst>(U.getUser());
3231 if (II && II->getIntrinsicID() == Intrinsic::fake_use)
3232 continue;
3233 if (!ShouldReplace(U))
3234 continue;
3235 LLVM_DEBUG(dbgs() << "Replace dominated use of '";
3236 From->printAsOperand(dbgs());
3237 dbgs() << "' with " << *To << " in " << *U.getUser() << "\n");
3238 U.set(To);
3239 ++Count;
3240 }
3241 return Count;
3242}
3243
3245 assert(From->getType() == To->getType());
3246 auto *BB = From->getParent();
3247 unsigned Count = 0;
3248
3249 for (Use &U : llvm::make_early_inc_range(From->uses())) {
3250 auto *I = cast<Instruction>(U.getUser());
3251 if (I->getParent() == BB)
3252 continue;
3253 U.set(To);
3254 ++Count;
3255 }
3256 return Count;
3257}
3258
3260 DominatorTree &DT,
3261 const BasicBlockEdge &Root) {
3262 auto Dominates = [&](const Use &U) { return DT.dominates(Root, U); };
3263 return ::replaceDominatedUsesWith(From, To, Dominates);
3264}
3265
3267 DominatorTree &DT,
3268 const BasicBlock *BB) {
3269 auto Dominates = [&](const Use &U) { return DT.dominates(BB, U); };
3270 return ::replaceDominatedUsesWith(From, To, Dominates);
3271}
3272
3274 DominatorTree &DT,
3275 const Instruction *I) {
3276 auto Dominates = [&](const Use &U) { return DT.dominates(I, U); };
3277 return ::replaceDominatedUsesWith(From, To, Dominates);
3278}
3279
3281 Value *From, Value *To, DominatorTree &DT, const BasicBlockEdge &Root,
3282 function_ref<bool(const Use &U, const Value *To)> ShouldReplace) {
3283 auto DominatesAndShouldReplace = [&](const Use &U) {
3284 return DT.dominates(Root, U) && ShouldReplace(U, To);
3285 };
3286 return ::replaceDominatedUsesWith(From, To, DominatesAndShouldReplace);
3287}
3288
3290 Value *From, Value *To, DominatorTree &DT, const BasicBlock *BB,
3291 function_ref<bool(const Use &U, const Value *To)> ShouldReplace) {
3292 auto DominatesAndShouldReplace = [&](const Use &U) {
3293 return DT.dominates(BB, U) && ShouldReplace(U, To);
3294 };
3295 return ::replaceDominatedUsesWith(From, To, DominatesAndShouldReplace);
3296}
3297
3299 Value *From, Value *To, DominatorTree &DT, const Instruction *I,
3300 function_ref<bool(const Use &U, const Value *To)> ShouldReplace) {
3301 auto DominatesAndShouldReplace = [&](const Use &U) {
3302 return DT.dominates(I, U) && ShouldReplace(U, To);
3303 };
3304 return ::replaceDominatedUsesWith(From, To, DominatesAndShouldReplace);
3305}
3306
3308 const TargetLibraryInfo &TLI) {
3309 // Check if the function is specifically marked as a gc leaf function.
3310 if (Call->hasFnAttr("gc-leaf-function"))
3311 return true;
3312 if (const Function *F = Call->getCalledFunction()) {
3313 if (F->hasFnAttribute("gc-leaf-function"))
3314 return true;
3315
3316 if (auto IID = F->getIntrinsicID()) {
3317 // Most LLVM intrinsics do not take safepoints.
3318 return IID != Intrinsic::experimental_gc_statepoint &&
3319 IID != Intrinsic::experimental_deoptimize &&
3320 IID != Intrinsic::memcpy_element_unordered_atomic &&
3321 IID != Intrinsic::memmove_element_unordered_atomic;
3322 }
3323 }
3324
3325 // Lib calls can be materialized by some passes, and won't be
3326 // marked as 'gc-leaf-function.' All available Libcalls are
3327 // GC-leaf.
3328 LibFunc LF;
3329 if (TLI.getLibFunc(*Call, LF)) {
3330 return TLI.has(LF);
3331 }
3332
3333 return false;
3334}
3335
3337 LoadInst &NewLI) {
3338 auto *NewTy = NewLI.getType();
3339
3340 // This only directly applies if the new type is also a pointer.
3341 if (NewTy->isPointerTy()) {
3342 NewLI.setMetadata(LLVMContext::MD_nonnull, N);
3343 return;
3344 }
3345
3346 // The only other translation we can do is to integral loads with !range
3347 // metadata.
3348 if (!NewTy->isIntegerTy())
3349 return;
3350
3351 MDBuilder MDB(NewLI.getContext());
3352 const Value *Ptr = OldLI.getPointerOperand();
3353 auto *ITy = cast<IntegerType>(NewTy);
3354 auto *NullInt = ConstantExpr::getPtrToInt(
3356 auto *NonNullInt = ConstantExpr::getAdd(NullInt, ConstantInt::get(ITy, 1));
3357 NewLI.setMetadata(LLVMContext::MD_range,
3358 MDB.createRange(NonNullInt, NullInt));
3359}
3360
3362 MDNode *N, LoadInst &NewLI) {
3363 auto *NewTy = NewLI.getType();
3364 // Simply copy the metadata if the type did not change.
3365 if (NewTy == OldLI.getType()) {
3366 NewLI.setMetadata(LLVMContext::MD_range, N);
3367 return;
3368 }
3369
3370 // Give up unless it is converted to a pointer where there is a single very
3371 // valuable mapping we can do reliably.
3372 // FIXME: It would be nice to propagate this in more ways, but the type
3373 // conversions make it hard.
3374 if (!NewTy->isPointerTy())
3375 return;
3376
3377 unsigned BitWidth = DL.getPointerTypeSizeInBits(NewTy);
3378 if (BitWidth == OldLI.getType()->getScalarSizeInBits() &&
3379 !getConstantRangeFromMetadata(*N).contains(APInt(BitWidth, 0))) {
3380 MDNode *NN = MDNode::get(OldLI.getContext(), {});
3381 NewLI.setMetadata(LLVMContext::MD_nonnull, NN);
3382 }
3383}
3384
3387 findDbgUsers(&I, DPUsers);
3388 for (auto *DVR : DPUsers)
3389 DVR->eraseFromParent();
3390}
3391
3393 BasicBlock *BB) {
3394 // Since we are moving the instructions out of its basic block, we do not
3395 // retain their original debug locations (DILocations) and debug intrinsic
3396 // instructions.
3397 //
3398 // Doing so would degrade the debugging experience.
3399 //
3400 // FIXME: Issue #152767: debug info should also be the same as the
3401 // original branch, **if** the user explicitly indicated that (for sampling
3402 // PGO)
3403 //
3404 // Currently, when hoisting the instructions, we take the following actions:
3405 // - Remove their debug intrinsic instructions.
3406 // - Set their debug locations to the values from the insertion point.
3407 //
3408 // As per PR39141 (comment #8), the more fundamental reason why the dbg.values
3409 // need to be deleted, is because there will not be any instructions with a
3410 // DILocation in either branch left after performing the transformation. We
3411 // can only insert a dbg.value after the two branches are joined again.
3412 //
3413 // See PR38762, PR39243 for more details.
3414 //
3415 // TODO: Extend llvm.dbg.value to take more than one SSA Value (PR39141) to
3416 // encode predicated DIExpressions that yield different results on different
3417 // code paths.
3418
3419 for (BasicBlock::iterator II = BB->begin(), IE = BB->end(); II != IE;) {
3420 Instruction *I = &*II;
3421 I->dropUBImplyingAttrsAndMetadata();
3422 if (I->isUsedByMetadata())
3423 dropDebugUsers(*I);
3424 // RemoveDIs: drop debug-info too as the following code does.
3425 I->dropDbgRecords();
3426 if (I->isDebugOrPseudoInst()) {
3427 // Remove DbgInfo and pseudo probe Intrinsics.
3428 II = I->eraseFromParent();
3429 continue;
3430 }
3431 I->setDebugLoc(InsertPt->getDebugLoc());
3432 ++II;
3433 }
3434 DomBlock->splice(InsertPt->getIterator(), BB, BB->begin(),
3435 BB->getTerminator()->getIterator());
3436}
3437
3439 Type &Ty) {
3440 // Create integer constant expression.
3441 auto createIntegerExpression = [&DIB](const Constant &CV) -> DIExpression * {
3442 const APInt &API = cast<ConstantInt>(&CV)->getValue();
3443 std::optional<int64_t> InitIntOpt;
3444 if (API.getBitWidth() == 1)
3445 InitIntOpt = API.tryZExtValue();
3446 else
3447 InitIntOpt = API.trySExtValue();
3448 return InitIntOpt ? DIB.createConstantValueExpression(
3449 static_cast<uint64_t>(*InitIntOpt))
3450 : nullptr;
3451 };
3452
3453 if (isa<ConstantInt>(C))
3454 return createIntegerExpression(C);
3455
3456 auto *FP = dyn_cast<ConstantFP>(&C);
3457 if (FP && Ty.isFloatingPointTy() && Ty.getScalarSizeInBits() <= 64) {
3458 const APFloat &APF = FP->getValueAPF();
3459 APInt const &API = APF.bitcastToAPInt();
3460 if (uint64_t Temp = API.getZExtValue())
3461 return DIB.createConstantValueExpression(Temp);
3462 return DIB.createConstantValueExpression(*API.getRawData());
3463 }
3464
3465 if (!Ty.isPointerTy())
3466 return nullptr;
3467
3469 return DIB.createConstantValueExpression(0);
3470
3471 if (const ConstantExpr *CE = dyn_cast<ConstantExpr>(&C))
3472 if (CE->getOpcode() == Instruction::IntToPtr) {
3473 const Value *V = CE->getOperand(0);
3474 if (auto CI = dyn_cast_or_null<ConstantInt>(V))
3475 return createIntegerExpression(*CI);
3476 }
3477 return nullptr;
3478}
3479
3481 auto RemapDebugOperands = [&Mapping](auto *DV, auto Set) {
3482 for (auto *Op : Set) {
3483 auto I = Mapping.find(Op);
3484 if (I != Mapping.end())
3485 DV->replaceVariableLocationOp(Op, I->second, /*AllowEmpty=*/true);
3486 }
3487 };
3488 auto RemapAssignAddress = [&Mapping](auto *DA) {
3489 auto I = Mapping.find(DA->getAddress());
3490 if (I != Mapping.end())
3491 DA->setAddress(I->second);
3492 };
3493 for (DbgVariableRecord &DVR : filterDbgVars(Inst->getDbgRecordRange())) {
3494 RemapDebugOperands(&DVR, DVR.location_ops());
3495 if (DVR.isDbgAssign())
3496 RemapAssignAddress(&DVR);
3497 }
3498}
3499
3500namespace {
3501
3502/// A potential constituent of a bitreverse or bswap expression. See
3503/// collectBitParts for a fuller explanation.
3504struct BitPart {
3505 BitPart(Value *P, unsigned BW) : Provider(P) {
3506 Provenance.resize(BW);
3507 }
3508
3509 /// The Value that this is a bitreverse/bswap of.
3510 Value *Provider;
3511
3512 /// The "provenance" of each bit. Provenance[A] = B means that bit A
3513 /// in Provider becomes bit B in the result of this expression.
3514 SmallVector<int8_t, 32> Provenance; // int8_t means max size is i128.
3515
3516 enum { Unset = -1 };
3517};
3518
3519} // end anonymous namespace
3520
3521/// Analyze the specified subexpression and see if it is capable of providing
3522/// pieces of a bswap or bitreverse. The subexpression provides a potential
3523/// piece of a bswap or bitreverse if it can be proved that each non-zero bit in
3524/// the output of the expression came from a corresponding bit in some other
3525/// value. This function is recursive, and the end result is a mapping of
3526/// bitnumber to bitnumber. It is the caller's responsibility to validate that
3527/// the bitnumber to bitnumber mapping is correct for a bswap or bitreverse.
3528///
3529/// For example, if the current subexpression if "(shl i32 %X, 24)" then we know
3530/// that the expression deposits the low byte of %X into the high byte of the
3531/// result and that all other bits are zero. This expression is accepted and a
3532/// BitPart is returned with Provider set to %X and Provenance[24-31] set to
3533/// [0-7].
3534///
3535/// For vector types, all analysis is performed at the per-element level. No
3536/// cross-element analysis is supported (shuffle/insertion/reduction), and all
3537/// constant masks must be splatted across all elements.
3538///
3539/// To avoid revisiting values, the BitPart results are memoized into the
3540/// provided map. To avoid unnecessary copying of BitParts, BitParts are
3541/// constructed in-place in the \c BPS map. Because of this \c BPS needs to
3542/// store BitParts objects, not pointers. As we need the concept of a nullptr
3543/// BitParts (Value has been analyzed and the analysis failed), we an Optional
3544/// type instead to provide the same functionality.
3545///
3546/// Because we pass around references into \c BPS, we must use a container that
3547/// does not invalidate internal references (std::map instead of DenseMap).
3548static const std::optional<BitPart> &
3549collectBitParts(Value *V, bool MatchBSwaps, bool MatchBitReversals,
3550 std::map<Value *, std::optional<BitPart>> &BPS, int Depth,
3551 bool &FoundRoot) {
3552 auto [I, Inserted] = BPS.try_emplace(V);
3553 if (!Inserted)
3554 return I->second;
3555
3556 auto &Result = I->second;
3557 auto BitWidth = V->getType()->getScalarSizeInBits();
3558
3559 // Can't do integer/elements > 128 bits.
3560 if (BitWidth > 128)
3561 return Result;
3562
3563 // Prevent stack overflow by limiting the recursion depth
3565 LLVM_DEBUG(dbgs() << "collectBitParts max recursion depth reached.\n");
3566 return Result;
3567 }
3568
3569 if (auto *I = dyn_cast<Instruction>(V)) {
3570 Value *X, *Y;
3571 const APInt *C;
3572
3573 // If this is an or instruction, it may be an inner node of the bswap.
3574 if (match(V, m_Or(m_Value(X), m_Value(Y)))) {
3575 // Check we have both sources and they are from the same provider.
3576 const auto &A = collectBitParts(X, MatchBSwaps, MatchBitReversals, BPS,
3577 Depth + 1, FoundRoot);
3578 if (!A || !A->Provider)
3579 return Result;
3580
3581 const auto &B = collectBitParts(Y, MatchBSwaps, MatchBitReversals, BPS,
3582 Depth + 1, FoundRoot);
3583 if (!B || A->Provider != B->Provider)
3584 return Result;
3585
3586 // Try and merge the two together.
3587 Result = BitPart(A->Provider, BitWidth);
3588 for (unsigned BitIdx = 0; BitIdx < BitWidth; ++BitIdx) {
3589 if (A->Provenance[BitIdx] != BitPart::Unset &&
3590 B->Provenance[BitIdx] != BitPart::Unset &&
3591 A->Provenance[BitIdx] != B->Provenance[BitIdx])
3592 return Result = std::nullopt;
3593
3594 if (A->Provenance[BitIdx] == BitPart::Unset)
3595 Result->Provenance[BitIdx] = B->Provenance[BitIdx];
3596 else
3597 Result->Provenance[BitIdx] = A->Provenance[BitIdx];
3598 }
3599
3600 return Result;
3601 }
3602
3603 // If this is a logical shift by a constant, recurse then shift the result.
3604 if (match(V, m_LogicalShift(m_Value(X), m_APInt(C)))) {
3605 const APInt &BitShift = *C;
3606
3607 // Ensure the shift amount is defined.
3608 if (BitShift.uge(BitWidth))
3609 return Result;
3610
3611 // For bswap-only, limit shift amounts to whole bytes, for an early exit.
3612 if (!MatchBitReversals && (BitShift.getZExtValue() % 8) != 0)
3613 return Result;
3614
3615 const auto &Res = collectBitParts(X, MatchBSwaps, MatchBitReversals, BPS,
3616 Depth + 1, FoundRoot);
3617 if (!Res)
3618 return Result;
3619 Result = Res;
3620
3621 // Perform the "shift" on BitProvenance.
3622 auto &P = Result->Provenance;
3623 if (I->getOpcode() == Instruction::Shl) {
3624 P.erase(std::prev(P.end(), BitShift.getZExtValue()), P.end());
3625 P.insert(P.begin(), BitShift.getZExtValue(), BitPart::Unset);
3626 } else {
3627 P.erase(P.begin(), std::next(P.begin(), BitShift.getZExtValue()));
3628 P.insert(P.end(), BitShift.getZExtValue(), BitPart::Unset);
3629 }
3630
3631 return Result;
3632 }
3633
3634 // If this is a logical 'and' with a mask that clears bits, recurse then
3635 // unset the appropriate bits.
3636 if (match(V, m_And(m_Value(X), m_APInt(C)))) {
3637 const APInt &AndMask = *C;
3638
3639 // Check that the mask allows a multiple of 8 bits for a bswap, for an
3640 // early exit.
3641 unsigned NumMaskedBits = AndMask.popcount();
3642 if (!MatchBitReversals && (NumMaskedBits % 8) != 0)
3643 return Result;
3644
3645 const auto &Res = collectBitParts(X, MatchBSwaps, MatchBitReversals, BPS,
3646 Depth + 1, FoundRoot);
3647 if (!Res)
3648 return Result;
3649 Result = Res;
3650
3651 for (unsigned BitIdx = 0; BitIdx < BitWidth; ++BitIdx)
3652 // If the AndMask is zero for this bit, clear the bit.
3653 if (AndMask[BitIdx] == 0)
3654 Result->Provenance[BitIdx] = BitPart::Unset;
3655 return Result;
3656 }
3657
3658 // If this is a zext instruction zero extend the result.
3659 if (match(V, m_ZExt(m_Value(X)))) {
3660 const auto &Res = collectBitParts(X, MatchBSwaps, MatchBitReversals, BPS,
3661 Depth + 1, FoundRoot);
3662 if (!Res)
3663 return Result;
3664
3665 Result = BitPart(Res->Provider, BitWidth);
3666 auto NarrowBitWidth = X->getType()->getScalarSizeInBits();
3667 for (unsigned BitIdx = 0; BitIdx < NarrowBitWidth; ++BitIdx)
3668 Result->Provenance[BitIdx] = Res->Provenance[BitIdx];
3669 for (unsigned BitIdx = NarrowBitWidth; BitIdx < BitWidth; ++BitIdx)
3670 Result->Provenance[BitIdx] = BitPart::Unset;
3671 return Result;
3672 }
3673
3674 // If this is a truncate instruction, extract the lower bits.
3675 if (match(V, m_Trunc(m_Value(X)))) {
3676 const auto &Res = collectBitParts(X, MatchBSwaps, MatchBitReversals, BPS,
3677 Depth + 1, FoundRoot);
3678 if (!Res)
3679 return Result;
3680
3681 Result = BitPart(Res->Provider, BitWidth);
3682 for (unsigned BitIdx = 0; BitIdx < BitWidth; ++BitIdx)
3683 Result->Provenance[BitIdx] = Res->Provenance[BitIdx];
3684 return Result;
3685 }
3686
3687 // BITREVERSE - most likely due to us previous matching a partial
3688 // bitreverse.
3689 if (match(V, m_BitReverse(m_Value(X)))) {
3690 const auto &Res = collectBitParts(X, MatchBSwaps, MatchBitReversals, BPS,
3691 Depth + 1, FoundRoot);
3692 if (!Res)
3693 return Result;
3694
3695 Result = BitPart(Res->Provider, BitWidth);
3696 for (unsigned BitIdx = 0; BitIdx < BitWidth; ++BitIdx)
3697 Result->Provenance[(BitWidth - 1) - BitIdx] = Res->Provenance[BitIdx];
3698 return Result;
3699 }
3700
3701 // BSWAP - most likely due to us previous matching a partial bswap.
3702 if (match(V, m_BSwap(m_Value(X)))) {
3703 const auto &Res = collectBitParts(X, MatchBSwaps, MatchBitReversals, BPS,
3704 Depth + 1, FoundRoot);
3705 if (!Res)
3706 return Result;
3707
3708 unsigned ByteWidth = BitWidth / 8;
3709 Result = BitPart(Res->Provider, BitWidth);
3710 for (unsigned ByteIdx = 0; ByteIdx < ByteWidth; ++ByteIdx) {
3711 unsigned ByteBitOfs = ByteIdx * 8;
3712 for (unsigned BitIdx = 0; BitIdx < 8; ++BitIdx)
3713 Result->Provenance[(BitWidth - 8 - ByteBitOfs) + BitIdx] =
3714 Res->Provenance[ByteBitOfs + BitIdx];
3715 }
3716 return Result;
3717 }
3718
3719 // Funnel 'double' shifts take 3 operands, 2 inputs and the shift
3720 // amount (modulo).
3721 // fshl(X,Y,Z): (X << (Z % BW)) | (Y >> (BW - (Z % BW)))
3722 // fshr(X,Y,Z): (X << (BW - (Z % BW))) | (Y >> (Z % BW))
3723 if (match(V, m_FShl(m_Value(X), m_Value(Y), m_APInt(C))) ||
3724 match(V, m_FShr(m_Value(X), m_Value(Y), m_APInt(C)))) {
3725 // We can treat fshr as a fshl by flipping the modulo amount.
3726 unsigned ModAmt = C->urem(BitWidth);
3727 if (cast<IntrinsicInst>(I)->getIntrinsicID() == Intrinsic::fshr)
3728 ModAmt = BitWidth - ModAmt;
3729
3730 // For bswap-only, limit shift amounts to whole bytes, for an early exit.
3731 if (!MatchBitReversals && (ModAmt % 8) != 0)
3732 return Result;
3733
3734 // Check we have both sources and they are from the same provider.
3735 const auto &LHS = collectBitParts(X, MatchBSwaps, MatchBitReversals, BPS,
3736 Depth + 1, FoundRoot);
3737 if (!LHS || !LHS->Provider)
3738 return Result;
3739
3740 const auto &RHS = collectBitParts(Y, MatchBSwaps, MatchBitReversals, BPS,
3741 Depth + 1, FoundRoot);
3742 if (!RHS || LHS->Provider != RHS->Provider)
3743 return Result;
3744
3745 unsigned StartBitRHS = BitWidth - ModAmt;
3746 Result = BitPart(LHS->Provider, BitWidth);
3747 for (unsigned BitIdx = 0; BitIdx < StartBitRHS; ++BitIdx)
3748 Result->Provenance[BitIdx + ModAmt] = LHS->Provenance[BitIdx];
3749 for (unsigned BitIdx = 0; BitIdx < ModAmt; ++BitIdx)
3750 Result->Provenance[BitIdx] = RHS->Provenance[BitIdx + StartBitRHS];
3751 return Result;
3752 }
3753 }
3754
3755 // If we've already found a root input value then we're never going to merge
3756 // these back together.
3757 if (FoundRoot)
3758 return Result;
3759
3760 // Okay, we got to something that isn't a shift, 'or', 'and', etc. This must
3761 // be the root input value to the bswap/bitreverse.
3762 FoundRoot = true;
3763 Result = BitPart(V, BitWidth);
3764 for (unsigned BitIdx = 0; BitIdx < BitWidth; ++BitIdx)
3765 Result->Provenance[BitIdx] = BitIdx;
3766 return Result;
3767}
3768
3769static bool bitTransformIsCorrectForBSwap(unsigned From, unsigned To,
3770 unsigned BitWidth) {
3771 if (From % 8 != To % 8)
3772 return false;
3773 // Convert from bit indices to byte indices and check for a byte reversal.
3774 From >>= 3;
3775 To >>= 3;
3776 BitWidth >>= 3;
3777 return From == BitWidth - To - 1;
3778}
3779
3780static bool bitTransformIsCorrectForBitReverse(unsigned From, unsigned To,
3781 unsigned BitWidth) {
3782 return From == BitWidth - To - 1;
3783}
3784
3786 Instruction *I, bool MatchBSwaps, bool MatchBitReversals,
3787 SmallVectorImpl<Instruction *> &InsertedInsts) {
3788 if (!match(I, m_Or(m_Value(), m_Value())) &&
3789 !match(I, m_FShl(m_Value(), m_Value(), m_Value())) &&
3790 !match(I, m_FShr(m_Value(), m_Value(), m_Value())) &&
3791 !match(I, m_BSwap(m_Value())))
3792 return false;
3793 if (!MatchBSwaps && !MatchBitReversals)
3794 return false;
3795 Type *ITy = I->getType();
3796 if (!ITy->isIntOrIntVectorTy() || ITy->getScalarSizeInBits() == 1 ||
3797 ITy->getScalarSizeInBits() > 128)
3798 return false; // Can't do integer/elements > 128 bits.
3799
3800 // Try to find all the pieces corresponding to the bswap.
3801 bool FoundRoot = false;
3802 std::map<Value *, std::optional<BitPart>> BPS;
3803 const auto &Res =
3804 collectBitParts(I, MatchBSwaps, MatchBitReversals, BPS, 0, FoundRoot);
3805 if (!Res)
3806 return false;
3807 ArrayRef<int8_t> BitProvenance = Res->Provenance;
3808 assert(all_of(BitProvenance,
3809 [](int8_t I) { return I == BitPart::Unset || 0 <= I; }) &&
3810 "Illegal bit provenance index");
3811
3812 // If the upper bits are zero, then attempt to perform as a truncated op.
3813 Type *DemandedTy = ITy;
3814 if (BitProvenance.back() == BitPart::Unset) {
3815 while (!BitProvenance.empty() && BitProvenance.back() == BitPart::Unset)
3816 BitProvenance = BitProvenance.drop_back();
3817 if (BitProvenance.empty())
3818 return false; // TODO - handle null value?
3819 DemandedTy = Type::getIntNTy(I->getContext(), BitProvenance.size());
3820 if (auto *IVecTy = dyn_cast<VectorType>(ITy))
3821 DemandedTy = VectorType::get(DemandedTy, IVecTy);
3822 }
3823
3824 // Check BitProvenance hasn't found a source larger than the result type.
3825 unsigned DemandedBW = DemandedTy->getScalarSizeInBits();
3826 if (DemandedBW > ITy->getScalarSizeInBits())
3827 return false;
3828
3829 // Now, is the bit permutation correct for a bswap or a bitreverse? We can
3830 // only byteswap values with an even number of bytes.
3831 APInt DemandedMask = APInt::getAllOnes(DemandedBW);
3832 bool OKForBSwap = MatchBSwaps && (DemandedBW % 16) == 0;
3833 bool OKForBitReverse = MatchBitReversals;
3834 for (unsigned BitIdx = 0;
3835 (BitIdx < DemandedBW) && (OKForBSwap || OKForBitReverse); ++BitIdx) {
3836 if (BitProvenance[BitIdx] == BitPart::Unset) {
3837 DemandedMask.clearBit(BitIdx);
3838 continue;
3839 }
3840 OKForBSwap &= bitTransformIsCorrectForBSwap(BitProvenance[BitIdx], BitIdx,
3841 DemandedBW);
3842 OKForBitReverse &= bitTransformIsCorrectForBitReverse(BitProvenance[BitIdx],
3843 BitIdx, DemandedBW);
3844 }
3845
3846 Intrinsic::ID Intrin;
3847 if (OKForBSwap)
3848 Intrin = Intrinsic::bswap;
3849 else if (OKForBitReverse)
3850 Intrin = Intrinsic::bitreverse;
3851 else
3852 return false;
3853
3854 Function *F =
3855 Intrinsic::getOrInsertDeclaration(I->getModule(), Intrin, DemandedTy);
3856 Value *Provider = Res->Provider;
3857
3858 // We may need to truncate the provider.
3859 if (DemandedTy != Provider->getType()) {
3860 auto *Trunc =
3861 CastInst::CreateIntegerCast(Provider, DemandedTy, false, "trunc", I->getIterator());
3862 InsertedInsts.push_back(Trunc);
3863 Provider = Trunc;
3864 }
3865
3866 Instruction *Result = CallInst::Create(F, Provider, "rev", I->getIterator());
3867 InsertedInsts.push_back(Result);
3868
3869 if (!DemandedMask.isAllOnes()) {
3870 auto *Mask = ConstantInt::get(DemandedTy, DemandedMask);
3871 Result = BinaryOperator::Create(Instruction::And, Result, Mask, "mask", I->getIterator());
3872 InsertedInsts.push_back(Result);
3873 }
3874
3875 // We may need to zeroextend back to the result type.
3876 if (ITy != Result->getType()) {
3877 auto *ExtInst = CastInst::CreateIntegerCast(Result, ITy, false, "zext", I->getIterator());
3878 InsertedInsts.push_back(ExtInst);
3879 }
3880
3881 return true;
3882}
3883
3884// CodeGen has special handling for some string functions that may replace
3885// them with target-specific intrinsics. Since that'd skip our interceptors
3886// in ASan/MSan/TSan/DFSan, and thus make us miss some memory accesses,
3887// we mark affected calls as NoBuiltin, which will disable optimization
3888// in CodeGen.
3890 CallInst *CI, const TargetLibraryInfo *TLI) {
3891 Function *F = CI->getCalledFunction();
3892 LibFunc Func;
3893 if (F && !F->hasLocalLinkage() && F->hasName() &&
3894 TLI->getLibFunc(F->getName(), Func) && TLI->hasOptimizedCodeGen(Func) &&
3895 !F->doesNotAccessMemory())
3896 CI->addFnAttr(Attribute::NoBuiltin);
3897}
3898
3900 const auto *Op = I->getOperand(OpIdx);
3901 // We can't have a PHI with a metadata or token type.
3902 if (Op->getType()->isMetadataTy() || Op->getType()->isTokenLikeTy())
3903 return false;
3904
3905 // swifterror pointers can only be used by a load, store, or as a swifterror
3906 // argument; swifterror pointers are not allowed to be used in select or phi
3907 // instructions.
3908 if (Op->isSwiftError())
3909 return false;
3910
3911 // Protected pointer field loads/stores should be paired with the intrinsic
3912 // to avoid unnecessary address escapes.
3913 if (auto *II = dyn_cast<IntrinsicInst>(Op))
3914 if (II->getIntrinsicID() == Intrinsic::protected_field_ptr)
3915 return false;
3916
3917 // Cannot replace alloca argument with phi/select.
3918 if (I->isLifetimeStartOrEnd())
3919 return false;
3920
3921 // Early exit.
3923 return true;
3924
3925 switch (I->getOpcode()) {
3926 default:
3927 return true;
3928 case Instruction::Call:
3929 case Instruction::Invoke: {
3930 const auto &CB = cast<CallBase>(*I);
3931
3932 // Can't handle inline asm. Skip it.
3933 if (CB.isInlineAsm())
3934 return false;
3935
3936 // Constant bundle operands may need to retain their constant-ness for
3937 // correctness.
3938 if (CB.isBundleOperand(OpIdx))
3939 return false;
3940
3941 if (OpIdx < CB.arg_size()) {
3942 // Some variadic intrinsics require constants in the variadic arguments,
3943 // which currently aren't markable as immarg.
3944 if (isa<IntrinsicInst>(CB) &&
3945 OpIdx >= CB.getFunctionType()->getNumParams()) {
3946 // This is known to be OK for stackmap.
3947 return CB.getIntrinsicID() == Intrinsic::experimental_stackmap;
3948 }
3949
3950 // gcroot is a special case, since it requires a constant argument which
3951 // isn't also required to be a simple ConstantInt.
3952 if (CB.getIntrinsicID() == Intrinsic::gcroot)
3953 return false;
3954
3955 // Some intrinsic operands are required to be immediates.
3956 return !CB.paramHasAttr(OpIdx, Attribute::ImmArg);
3957 }
3958
3959 // It is never allowed to replace the call argument to an intrinsic, but it
3960 // may be possible for a call.
3961 return !isa<IntrinsicInst>(CB);
3962 }
3963 case Instruction::ShuffleVector:
3964 // Shufflevector masks are constant.
3965 return OpIdx != 2;
3966 case Instruction::Switch:
3967 case Instruction::ExtractValue:
3968 // All operands apart from the first are constant.
3969 return OpIdx == 0;
3970 case Instruction::InsertValue:
3971 // All operands apart from the first and the second are constant.
3972 return OpIdx < 2;
3973 case Instruction::Alloca:
3974 // Static allocas (constant size in the entry block) are handled by
3975 // prologue/epilogue insertion so they're free anyway. We definitely don't
3976 // want to make them non-constant.
3977 return !cast<AllocaInst>(I)->isStaticAlloca();
3978 case Instruction::GetElementPtr:
3979 if (OpIdx == 0)
3980 return true;
3982 for (auto E = std::next(It, OpIdx); It != E; ++It)
3983 if (It.isStruct())
3984 return false;
3985 return true;
3986 }
3987}
3988
3990 // First: Check if it's a constant
3991 if (Constant *C = dyn_cast<Constant>(Condition))
3992 return ConstantExpr::getNot(C);
3993
3994 // Second: If the condition is already inverted, return the original value
3995 Value *NotCondition;
3996 if (match(Condition, m_Not(m_Value(NotCondition))))
3997 return NotCondition;
3998
3999 BasicBlock *Parent = nullptr;
4000 Instruction *Inst = dyn_cast<Instruction>(Condition);
4001 if (Inst)
4002 Parent = Inst->getParent();
4003 else if (Argument *Arg = dyn_cast<Argument>(Condition))
4004 Parent = &Arg->getParent()->getEntryBlock();
4005 assert(Parent && "Unsupported condition to invert");
4006
4007 // Third: Check all the users for an invert
4008 for (User *U : Condition->users())
4010 if (I->getParent() == Parent && match(I, m_Not(m_Specific(Condition))))
4011 return I;
4012
4013 // Last option: Create a new instruction
4014 auto *Inverted =
4015 BinaryOperator::CreateNot(Condition, Condition->getName() + ".inv");
4016 if (Inst && !isa<PHINode>(Inst))
4017 Inverted->insertAfter(Inst->getIterator());
4018 else
4019 Inverted->insertBefore(Parent->getFirstInsertionPt());
4020 return Inverted;
4021}
4022
4024 // Note: We explicitly check for attributes rather than using cover functions
4025 // because some of the cover functions include the logic being implemented.
4026
4027 bool Changed = false;
4028 // readnone + not convergent implies nosync
4029 if (!F.hasFnAttribute(Attribute::NoSync) &&
4030 F.doesNotAccessMemory() && !F.isConvergent()) {
4031 F.setNoSync();
4032 Changed = true;
4033 }
4034
4035 // readonly implies nofree
4036 if (!F.hasFnAttribute(Attribute::NoFree) && F.onlyReadsMemory()) {
4037 F.setDoesNotFreeMemory();
4038 Changed = true;
4039 }
4040
4041 // willreturn implies mustprogress
4042 if (!F.hasFnAttribute(Attribute::MustProgress) && F.willReturn()) {
4043 F.setMustProgress();
4044 Changed = true;
4045 }
4046
4047 // TODO: There are a bunch of cases of restrictive memory effects we
4048 // can infer by inspecting arguments of argmemonly-ish functions.
4049
4050 return Changed;
4051}
4052
4054#ifndef NDEBUG
4055 if (Opcode)
4056 assert(Opcode == I.getOpcode() &&
4057 "can only use mergeFlags on instructions with matching opcodes");
4058 else
4059 Opcode = I.getOpcode();
4060#endif
4062 HasNUW &= I.hasNoUnsignedWrap();
4063 HasNSW &= I.hasNoSignedWrap();
4064 }
4065 if (auto *DisjointOp = dyn_cast<PossiblyDisjointInst>(&I))
4066 IsDisjoint &= DisjointOp->isDisjoint();
4067}
4068
4070 I.clearSubclassOptionalData();
4071 if (I.getOpcode() == Instruction::Add ||
4072 (I.getOpcode() == Instruction::Mul && AllKnownNonZero)) {
4073 if (HasNUW)
4074 I.setHasNoUnsignedWrap();
4075 if (HasNSW && (AllKnownNonNegative || HasNUW))
4076 I.setHasNoSignedWrap();
4077 }
4078 if (auto *DisjointOp = dyn_cast<PossiblyDisjointInst>(&I))
4079 DisjointOp->setIsDisjoint(IsDisjoint);
4080}
static unsigned getIntrinsicID(const SDNode *N)
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
Rewrite undef for PHI
This file implements a class to represent arbitrary precision integral constant values and operations...
ReachingDefInfo InstSet & ToRemove
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
static bool isEqual(const Function &Caller, const Function &Callee)
This file contains the simple types necessary to represent the attributes associated with functions a...
static const Function * getParent(const Value *V)
#define X(NUM, ENUM, NAME)
Definition ELF.h:849
static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
This file contains the declarations for the subclasses of Constant, which represent the different fla...
static bool isSentinel(const DWARFDebugNames::AttributeEncoding &AE)
This file defines DenseMapInfo traits for DenseMap.
This file defines the DenseMap class.
This file defines the DenseSet and SmallDenseSet classes.
This file contains constants used for implementing Dwarf debug support.
static unsigned getHashValueImpl(SimpleValue Val)
Definition EarlyCSE.cpp:229
static bool isEqualImpl(SimpleValue LHS, SimpleValue RHS)
Definition EarlyCSE.cpp:350
Hexagon Common GEP
This file provides various utilities for inspecting and working with the control flow graph in LLVM I...
Module.h This file contains the declarations for the Module class.
This defines the Use class.
const AbstractManglingParser< Derived, Alloc >::OperatorInfo AbstractManglingParser< Derived, Alloc >::Ops[]
#define F(x, y, z)
Definition MD5.cpp:54
#define I(x, y, z)
Definition MD5.cpp:57
This file provides utility for Memory Model Relaxation Annotations (MMRAs).
This file contains the declarations for metadata subclasses.
#define T
MachineInstr unsigned OpIdx
uint64_t IntrinsicInst * II
#define P(N)
This file contains the declarations for profiling metadata utility functions.
const SmallVectorImpl< MachineOperand > & Cond
Remove Loads Into Fake Uses
This file contains some templates that are useful if you are working with the STL at all.
This file implements a set that has insertion order iteration characteristics.
This file defines the SmallPtrSet class.
This file defines the SmallVector class.
This file defines the 'Statistic' class, which is designed to be an easy way to expose various metric...
#define STATISTIC(VARNAME, DESC)
Definition Statistic.h:171
#define LLVM_DEBUG(...)
Definition Debug.h:114
static TableGen::Emitter::Opt Y("gen-skeleton-entry", EmitSkeleton, "Generate example skeleton entry")
SmallDenseMap< BasicBlock *, Value *, 16 > IncomingValueMap
Definition Local.cpp:914
static bool valueCoversEntireFragment(Type *ValTy, DbgVariableRecord *DVR)
Check if the alloc size of ValTy is large enough to cover the variable (or fragment of the variable) ...
Definition Local.cpp:1635
static bool isBitCastSemanticsPreserving(const DataLayout &DL, Type *FromTy, Type *ToTy)
Check if a bitcast between a value of type FromTy to type ToTy would losslessly preserve the bits and...
Definition Local.cpp:2411
static bool isStructure(AllocaInst *AI)
Determine whether this alloca is a structure.
Definition Local.cpp:1773
uint64_t getDwarfOpForBinOp(Instruction::BinaryOps Opcode)
Definition Local.cpp:2159
static bool PhiHasDebugValue(DILocalVariable *DIVar, DIExpression *DIExpr, PHINode *APN)
===------------------------------------------------------------------—===// Dbg Intrinsic utilities
Definition Local.cpp:1611
static void combineMetadata(Instruction *K, const Instruction *J, bool DoesKMove, bool AAOnly=false)
If AAOnly is set, only intersect alias analysis metadata and preserve other known metadata.
Definition Local.cpp:2934
static void handleSSAValueOperands(uint64_t CurrentLocOps, SmallVectorImpl< uint64_t > &Opcodes, SmallVectorImpl< Value * > &AdditionalValues, Instruction *I)
Definition Local.cpp:2189
std::optional< DIExpression * > DbgValReplacement
A replacement for a dbg.value expression.
Definition Local.cpp:2338
static bool rewriteDebugUsers(Instruction &From, Value &To, Instruction &DomPoint, DominatorTree &DT, function_ref< DbgValReplacement(DbgVariableRecord &DVR)> RewriteDVRExpr)
Point debug users of From to To using exprs given by RewriteExpr, possibly moving/undefing users to p...
Definition Local.cpp:2343
Value * getSalvageOpsForBinOp(BinaryOperator *BI, uint64_t CurrentLocOps, SmallVectorImpl< uint64_t > &Opcodes, SmallVectorImpl< Value * > &AdditionalValues)
Definition Local.cpp:2201
static DIExpression * dropInitialDeref(const DIExpression *DIExpr)
Definition Local.cpp:1671
static void replaceUndefValuesInPhi(PHINode *PN, const IncomingValueMap &IncomingValues)
Replace the incoming undef values to a phi with the values from a block-to-value map.
Definition Local.cpp:979
Value * getSalvageOpsForGEP(GetElementPtrInst *GEP, const DataLayout &DL, uint64_t CurrentLocOps, SmallVectorImpl< uint64_t > &Opcodes, SmallVectorImpl< Value * > &AdditionalValues)
Definition Local.cpp:2133
static bool CanRedirectPredsOfEmptyBBToSucc(BasicBlock *BB, BasicBlock *Succ, const SmallPtrSetImpl< BasicBlock * > &BBPreds, BasicBlock *&CommonPred)
Definition Local.cpp:1022
Value * getSalvageOpsForIcmpOp(ICmpInst *Icmp, uint64_t CurrentLocOps, SmallVectorImpl< uint64_t > &Opcodes, SmallVectorImpl< Value * > &AdditionalValues)
Definition Local.cpp:2260
static bool CanMergeValues(Value *First, Value *Second)
Return true if we can choose one of these values to use in place of the other.
Definition Local.cpp:848
static bool simplifyAndDCEInstruction(Instruction *I, SmallSetVector< Instruction *, 16 > &WorkList, const DataLayout &DL, const TargetLibraryInfo *TLI)
Definition Local.cpp:665
static bool isArray(AllocaInst *AI)
Determine whether this alloca is either a VLA or an array.
Definition Local.cpp:1767
static bool areAllUsesEqual(Instruction *I)
areAllUsesEqual - Check whether the uses of a value are all the same.
Definition Local.cpp:624
static cl::opt< bool > PHICSEDebugHash("phicse-debug-hash", cl::init(false), cl::Hidden, cl::desc("Perform extra assertion checking to verify that PHINodes's hash " "function is well-behaved w.r.t. its isEqual predicate"))
static void gatherIncomingValuesToPhi(PHINode *PN, const PredBlockVector &BBPreds, IncomingValueMap &IncomingValues)
Create a map from block to value for the operands of a given phi.
Definition Local.cpp:956
static bool markAliveBlocks(Function &F, SmallVectorImpl< bool > &Reachable, DomTreeUpdater *DTU=nullptr)
Definition Local.cpp:2661
uint64_t getDwarfOpForIcmpPred(CmpInst::Predicate Pred)
Definition Local.cpp:2235
static bool bitTransformIsCorrectForBSwap(unsigned From, unsigned To, unsigned BitWidth)
Definition Local.cpp:3769
static const std::optional< BitPart > & collectBitParts(Value *V, bool MatchBSwaps, bool MatchBitReversals, std::map< Value *, std::optional< BitPart > > &BPS, int Depth, bool &FoundRoot)
Analyze the specified subexpression and see if it is capable of providing pieces of a bswap or bitrev...
Definition Local.cpp:3549
static bool EliminateDuplicatePHINodesNaiveImpl(BasicBlock *BB, SmallPtrSetImpl< PHINode * > &ToRemove)
Definition Local.cpp:1392
static bool CanPropagatePredecessorsForPHIs(BasicBlock *BB, BasicBlock *Succ, const SmallPtrSetImpl< BasicBlock * > &BBPreds)
Return true if we can fold BB, an almost-empty BB ending in an unconditional branch to Succ,...
Definition Local.cpp:857
static cl::opt< unsigned > PHICSENumPHISmallSize("phicse-num-phi-smallsize", cl::init(32), cl::Hidden, cl::desc("When the basic block contains not more than this number of PHI nodes, " "perform a (faster!) exhaustive search instead of set-driven one."))
static void updateOneDbgValueForAlloca(const DebugLoc &Loc, DILocalVariable *DIVar, DIExpression *DIExpr, Value *NewAddress, DbgVariableRecord *DVR, DIBuilder &Builder, int Offset)
Definition Local.cpp:1975
static bool EliminateDuplicatePHINodesSetBasedImpl(BasicBlock *BB, SmallPtrSetImpl< PHINode * > &ToRemove)
Definition Local.cpp:1428
SmallVector< BasicBlock *, 16 > PredBlockVector
Definition Local.cpp:913
static void insertDbgValueOrDbgVariableRecord(DIBuilder &Builder, Value *DV, DILocalVariable *DIVar, DIExpression *DIExpr, const DebugLoc &NewLoc, BasicBlock::iterator Instr)
Definition Local.cpp:1660
static bool introduceTooManyPhiEntries(BasicBlock *BB, BasicBlock *Succ)
Check whether removing BB will make the phis in its Succ have too many incoming entries.
Definition Local.cpp:1055
static Value * selectIncomingValueForBlock(Value *OldVal, BasicBlock *BB, IncomingValueMap &IncomingValues)
Determines the value to use as the phi node input for a block.
Definition Local.cpp:928
static const unsigned BitPartRecursionMaxDepth
Definition Local.cpp:121
static void redirectValuesFromPredecessorsToPhi(BasicBlock *BB, const PredBlockVector &BBPreds, PHINode *PN, BasicBlock *CommonPred)
Replace a value flowing from a block to a phi with potentially multiple instances of that value flowi...
Definition Local.cpp:1087
static cl::opt< unsigned > MaxPhiEntriesIncreaseAfterRemovingEmptyBlock("max-phi-entries-increase-after-removing-empty-block", cl::init(1000), cl::Hidden, cl::desc("Stop removing an empty block if removing it will introduce more " "than this number of phi entries in its successor"))
static bool bitTransformIsCorrectForBitReverse(unsigned From, unsigned To, unsigned BitWidth)
Definition Local.cpp:3780
static void salvageDbgAssignAddress(T *Assign)
Definition Local.cpp:2017
LocallyHashedType DenseMapInfo< LocallyHashedType >::Empty
Value * RHS
Value * LHS
APInt bitcastToAPInt() const
Definition APFloat.h:1408
Class for arbitrary precision integers.
Definition APInt.h:78
std::optional< uint64_t > tryZExtValue() const
Get zero extended value if possible.
Definition APInt.h:1567
static APInt getAllOnes(unsigned numBits)
Return an APInt of a specified width with all bits set.
Definition APInt.h:235
void clearBit(unsigned BitPosition)
Set a given bit to 0.
Definition APInt.h:1421
uint64_t getZExtValue() const
Get zero extended value.
Definition APInt.h:1555
unsigned popcount() const
Count the number of bits set.
Definition APInt.h:1685
bool isAllOnes() const
Determine if all bits are set. This is true for zero-width values.
Definition APInt.h:372
unsigned getBitWidth() const
Return the number of bits in the APInt.
Definition APInt.h:1503
const uint64_t * getRawData() const
This function returns a pointer to the internal storage of the APInt.
Definition APInt.h:576
std::optional< int64_t > trySExtValue() const
Get sign extended value if possible.
Definition APInt.h:1589
int64_t getSExtValue() const
Get sign extended value.
Definition APInt.h:1577
bool uge(const APInt &RHS) const
Unsigned greater or equal comparison.
Definition APInt.h:1228
an instruction to allocate memory on the stack
Type * getAllocatedType() const
Return the type that is being allocated by the instruction.
LLVM_ABI bool isArrayAllocation() const
Return true if there is an allocation size parameter to the allocation instruction that is not 1.
This class represents an incoming formal argument to a Function.
Definition Argument.h:32
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
Definition ArrayRef.h:40
const T & back() const
back - Get the last element.
Definition ArrayRef.h:151
ArrayRef< T > drop_front(size_t N=1) const
Drop the first N elements of the array.
Definition ArrayRef.h:195
size_t size() const
size - Get the array size.
Definition ArrayRef.h:142
ArrayRef< T > drop_back(size_t N=1) const
Drop the last N elements of the array.
Definition ArrayRef.h:201
bool empty() const
empty - Check if the array is empty.
Definition ArrayRef.h:137
Value handle that asserts if the Value is deleted.
A cache of @llvm.assume calls within a function.
LLVM Basic Block Representation.
Definition BasicBlock.h:62
iterator end()
Definition BasicBlock.h:462
unsigned getNumber() const
Definition BasicBlock.h:95
iterator begin()
Instruction iterator methods.
Definition BasicBlock.h:449
iterator_range< const_phi_iterator > phis() const
Returns a range that iterates over the phis in the basic block.
Definition BasicBlock.h:518
LLVM_ABI const_iterator getFirstInsertionPt() const
Returns an iterator to the first instruction in this block that is suitable for inserting a non-PHI i...
const Function * getParent() const
Return the enclosing method, or null if none.
Definition BasicBlock.h:213
const Instruction & back() const
Definition BasicBlock.h:474
bool hasAddressTaken() const
Returns true if there are any uses of this basic block other than direct branches,...
Definition BasicBlock.h:675
LLVM_ABI InstListType::const_iterator getFirstNonPHIIt() const
Returns an iterator to the first instruction in this block that is not a PHINode instruction.
LLVM_ABI void insertDbgRecordBefore(DbgRecord *DR, InstListType::iterator Here)
Insert a DbgRecord into a block at the position given by Here.
static BasicBlock * Create(LLVMContext &Context, const Twine &Name="", Function *Parent=nullptr, BasicBlock *InsertBefore=nullptr)
Creates a new BasicBlock.
Definition BasicBlock.h:206
LLVM_ABI bool isEntryBlock() const
Return true if this is the entry block of the containing function.
LLVM_ABI void moveAfter(BasicBlock *MovePos)
Unlink this basic block from its current function and insert it right after MovePos in the function M...
LLVM_ABI bool hasNPredecessors(unsigned N) const
Return true if this block has exactly N predecessors.
LLVM_ABI const BasicBlock * getSinglePredecessor() const
Return the predecessor of this block if it has a single predecessor block.
const Instruction & front() const
Definition BasicBlock.h:472
LLVM_ABI void flushTerminatorDbgRecords()
Eject any debug-info trailing at the end of a block.
LLVM_ABI const DataLayout & getDataLayout() const
Get the data layout of the module this basic block belongs to.
LLVM_ABI SymbolTableList< BasicBlock >::iterator eraseFromParent()
Unlink 'this' from the containing function and delete it.
InstListType::iterator iterator
Instruction iterators...
Definition BasicBlock.h:170
LLVM_ABI LLVMContext & getContext() const
Get the context in which this basic block lives.
size_t size() const
Definition BasicBlock.h:470
LLVM_ABI bool hasNPredecessorsOrMore(unsigned N) const
Return true if this block has N predecessors or more.
const Instruction * getTerminator() const LLVM_READONLY
Returns the terminator instruction if the block is well formed or null if the block is not well forme...
Definition BasicBlock.h:233
void splice(BasicBlock::iterator ToIt, BasicBlock *FromBB)
Transfer all instructions from FromBB to this basic block at ToIt.
Definition BasicBlock.h:647
LLVM_ABI void removePredecessor(BasicBlock *Pred, bool KeepOneInputPHIs=false)
Update PHI nodes in this BasicBlock before removal of predecessor Pred.
BinaryOps getOpcode() const
Definition InstrTypes.h:374
static LLVM_ABI BinaryOperator * CreateNot(Value *Op, const Twine &Name="", InsertPosition InsertBefore=nullptr)
static LLVM_ABI BinaryOperator * Create(BinaryOps Op, Value *S1, Value *S2, const Twine &Name=Twine(), InsertPosition InsertBefore=nullptr)
Construct a binary instruction, given the opcode and the two operands.
This class represents a no-op cast from one type to another.
The address of a basic block.
Definition Constants.h:1065
static LLVM_ABI BlockAddress * get(Function *F, BasicBlock *BB)
Return a BlockAddress for the specified function and basic block.
Base class for all callable instructions (InvokeInst and CallInst) Holds everything related to callin...
void setCallingConv(CallingConv::ID CC)
void addFnAttr(Attribute::AttrKind Kind)
Adds the attribute to the function.
LLVM_ABI void getOperandBundlesAsDefs(SmallVectorImpl< OperandBundleDef > &Defs) const
Return the list of operand bundles attached to this instruction as a vector of OperandBundleDefs.
Function * getCalledFunction() const
Returns the function called, or null if this is an indirect function invocation or the function signa...
CallingConv::ID getCallingConv() const
Value * getCalledOperand() const
void setAttributes(AttributeList A)
Set the attributes for this call.
FunctionType * getFunctionType() const
iterator_range< User::op_iterator > args()
Iteration adapter for range-for loops.
AttributeList getAttributes() const
Return the attributes for this call.
This class represents a function call, abstracting a target machine's calling convention.
static CallInst * Create(FunctionType *Ty, Value *F, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
static LLVM_ABI CastInst * CreateIntegerCast(Value *S, Type *Ty, bool isSigned, const Twine &Name="", InsertPosition InsertBefore=nullptr)
Create a ZExt, BitCast, or Trunc for int -> int casts.
mapped_iterator< op_iterator, DerefFnTy > handler_iterator
static CatchSwitchInst * Create(Value *ParentPad, BasicBlock *UnwindDest, unsigned NumHandlers, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
static CleanupReturnInst * Create(Value *CleanupPad, BasicBlock *UnwindBB=nullptr, InsertPosition InsertBefore=nullptr)
Predicate
This enumeration lists the possible predicates for CmpInst subclasses.
Definition InstrTypes.h:676
@ ICMP_SLT
signed less than
Definition InstrTypes.h:705
@ ICMP_SLE
signed less or equal
Definition InstrTypes.h:706
@ ICMP_UGE
unsigned greater or equal
Definition InstrTypes.h:700
@ ICMP_UGT
unsigned greater than
Definition InstrTypes.h:699
@ ICMP_SGT
signed greater than
Definition InstrTypes.h:703
@ ICMP_ULT
unsigned less than
Definition InstrTypes.h:701
@ ICMP_NE
not equal
Definition InstrTypes.h:698
@ ICMP_SGE
signed greater or equal
Definition InstrTypes.h:704
@ ICMP_ULE
unsigned less or equal
Definition InstrTypes.h:702
bool isSigned() const
Definition InstrTypes.h:930
Predicate getPredicate() const
Return the predicate for this instruction.
Definition InstrTypes.h:765
Conditional Branch instruction.
A constant value that is initialized with an expression using other constant values.
Definition Constants.h:1291
static LLVM_ABI Constant * getIntToPtr(Constant *C, Type *Ty, bool OnlyIfReduced=false)
static LLVM_ABI Constant * getNot(Constant *C)
static LLVM_ABI Constant * getPtrToInt(Constant *C, Type *Ty, bool OnlyIfReduced=false)
static LLVM_ABI Constant * getAdd(Constant *C1, Constant *C2, bool HasNUW=false, bool HasNSW=false)
This is the shared class of boolean and integer constants.
Definition Constants.h:87
static LLVM_ABI ConstantPointerNull * get(PointerType *T)
Static factory methods - Return objects of the specified value.
This is an important base class in LLVM.
Definition Constant.h:43
LLVM_ABI void destroyConstant()
Called if some element of this constant is no longer valid.
DIExpression * createConstantValueExpression(uint64_t Val)
Create an expression for a variable that does not have an address, but does have a constant value.
Definition DIBuilder.h:940
DWARF expression.
static LLVM_ABI DIExpression * append(const DIExpression *Expr, ArrayRef< uint64_t > Ops)
Append the opcodes Ops to DIExpr.
unsigned getNumElements() const
static LLVM_ABI ExtOps getExtOps(unsigned FromSize, unsigned ToSize, bool Signed)
Returns the ops for a zero- or sign-extension in a DIExpression.
static LLVM_ABI void appendOffset(SmallVectorImpl< uint64_t > &Ops, int64_t Offset)
Append Ops with operations to apply the Offset.
static LLVM_ABI DIExpression * appendOpsToArg(const DIExpression *Expr, ArrayRef< uint64_t > Ops, unsigned ArgNo, bool StackValue=false)
Create a copy of Expr by appending the given list of Ops to each instance of the operand DW_OP_LLVM_a...
static LLVM_ABI std::optional< FragmentInfo > getFragmentInfo(expr_op_iterator Start, expr_op_iterator End)
Retrieve the details of this fragment expression.
LLVM_ABI DIExpression * foldConstantMath()
Try to shorten an expression with constant math operations that can be evaluated at compile time.
LLVM_ABI uint64_t getNumLocationOperands() const
Return the number of unique location operands referred to (via DW_OP_LLVM_arg) in this expression; th...
ArrayRef< uint64_t > getElements() const
LLVM_ABI std::optional< uint64_t > getActiveBits(DIVariable *Var)
Return the number of bits that have an active value, i.e.
uint64_t getElement(unsigned I) const
static LLVM_ABI DIExpression * prepend(const DIExpression *Expr, uint8_t Flags, int64_t Offset=0)
Prepend DIExpr with a deref and offset operation and optionally turn it into a stack value or/and an ...
static LLVM_ABI DIExpression * appendExt(const DIExpression *Expr, unsigned FromSize, unsigned ToSize, bool Signed)
Append a zero- or sign-extension to Expr.
std::optional< DIBasicType::Signedness > getSignedness() const
Return the signedness of this variable's type, or std::nullopt if this type is neither signed nor uns...
A parsed version of the target data layout string in and methods for querying it.
Definition DataLayout.h:64
This represents the llvm.dbg.label instruction.
Instruction * MarkedInstr
Link back to the Instruction that owns this marker.
LLVM_ABI void removeFromParent()
LLVM_ABI Module * getModule()
Record of a variable value-assignment, aka a non instruction representation of the dbg....
LLVM_ABI void replaceVariableLocationOp(Value *OldValue, Value *NewValue, bool AllowEmpty=false)
LLVM_ABI Value * getVariableLocationOp(unsigned OpIdx) const
LLVM_ABI unsigned getNumVariableLocationOps() const
bool isAddressOfVariable() const
Does this describe the address of a local variable.
LLVM_ABI DbgVariableRecord * clone() const
void setExpression(DIExpression *NewExpr)
DIExpression * getExpression() const
DILocalVariable * getVariable() const
LLVM_ABI iterator_range< location_op_iterator > location_ops() const
Get the locations corresponding to the variable referenced by the debug info intrinsic.
A debug info location.
Definition DebugLoc.h:123
LLVM_ABI DILocation * get() const
Get the underlying DILocation.
Definition DebugLoc.cpp:48
static DebugLoc getTemporary()
Definition DebugLoc.h:160
iterator find(const_arg_type_t< KeyT > Val)
Definition DenseMap.h:178
unsigned size() const
Definition DenseMap.h:110
DenseMapIterator< KeyT, ValueT, KeyInfoT, BucketT, true > const_iterator
Definition DenseMap.h:75
iterator end()
Definition DenseMap.h:81
std::pair< iterator, bool > insert(const std::pair< KeyT, ValueT > &KV)
Definition DenseMap.h:241
std::pair< iterator, bool > insert_or_assign(const KeyT &Key, V &&Val)
Definition DenseMap.h:299
Implements a dense probed hash-table based set.
Definition DenseSet.h:279
LLVM_ABI void deleteBB(BasicBlock *DelBB)
Delete DelBB.
Concrete subclass of DominatorTreeBase that is used to compute a normal dominator tree.
Definition Dominators.h:159
LLVM_ABI bool dominates(const BasicBlock *BB, const Use &U) const
Return true if the (end of the) basic block BB dominates the use U.
const BasicBlock & getEntryBlock() const
Definition Function.h:809
void applyUpdatesPermissive(ArrayRef< UpdateT > Updates)
Submit updates to all available trees.
void applyUpdates(ArrayRef< UpdateT > Updates)
Submit updates to all available trees.
bool hasDomTree() const
Returns true if it holds a DomTreeT.
void recalculate(FuncT &F)
Notify DTU that the entry block was replaced.
bool isBBPendingDeletion(BasicBlockT *DelBB) const
Returns true if DelBB is awaiting deletion.
an instruction for type-safe pointer arithmetic to access elements of arrays and structs
This instruction compares its operands according to the predicate given to the constructor.
This provides a uniform API for creating instructions and inserting them into a basic block: either a...
Definition IRBuilder.h:2811
iterator_range< simple_ilist< DbgRecord >::iterator > getDbgRecordRange() const
Return a range over the DbgRecords attached to this instruction.
const DebugLoc & getDebugLoc() const
Return the debug location for this node as a DebugLoc.
LLVM_ABI const Module * getModule() const
Return the module owning the function this instruction belongs to or nullptr it the function does not...
LLVM_ABI bool extractProfTotalWeight(uint64_t &TotalVal) const
Retrieve total raw weight values of a branch.
LLVM_ABI void insertBefore(InstListType::iterator InsertPos)
Insert an unlinked instruction into a basic block immediately before the specified position.
bool isEHPad() const
Return true if the instruction is a variety of EH-block.
LLVM_ABI InstListType::iterator eraseFromParent()
This method unlinks 'this' from the containing basic block and deletes it.
LLVM_ABI bool isIdenticalToWhenDefined(const Instruction *I, bool IntersectAttrs=false) const LLVM_READONLY
This is like isIdenticalTo, except that it ignores the SubclassOptionalData flags,...
MDNode * getMetadata(unsigned KindID) const
Get the metadata of given kind attached to this Instruction.
LLVM_ABI void setMetadata(unsigned KindID, MDNode *Node)
Set the metadata of the specified kind to the specified node.
LLVM_ABI void dropPoisonGeneratingFlags()
Drops flags that may cause this instruction to evaluate to poison despite having non-poison inputs.
void setDebugLoc(DebugLoc Loc)
Set the debug location information for this instruction.
LLVM_ABI void copyMetadata(const Instruction &SrcInst, ArrayRef< unsigned > WL=ArrayRef< unsigned >())
Copy metadata from SrcInst to this instruction.
LLVM_ABI void dropDbgRecords()
Erase any DbgRecords attached to this instruction.
A wrapper class for inspecting calls to intrinsic functions.
Invoke instruction.
static InvokeInst * Create(FunctionType *Ty, Value *Func, BasicBlock *IfNormal, BasicBlock *IfException, ArrayRef< Value * > Args, const Twine &NameStr, InsertPosition InsertBefore=nullptr)
This is an important class for using LLVM in a threaded context.
Definition LLVMContext.h:68
An instruction for reading from memory.
Value * getPointerOperand()
LLVM_ABI MDNode * createBranchWeights(uint32_t TrueWeight, uint32_t FalseWeight, bool IsExpected=false)
Return metadata containing two branch weights.
Definition MDBuilder.cpp:38
LLVM_ABI MDNode * createRange(const APInt &Lo, const APInt &Hi)
Return metadata describing the range [Lo, Hi).
Definition MDBuilder.cpp:96
Metadata node.
Definition Metadata.h:1080
static LLVM_ABI MDNode * getMostGenericAliasScope(MDNode *A, MDNode *B)
static LLVM_ABI MDNode * getMergedCallsiteMetadata(MDNode *A, MDNode *B)
static LLVM_ABI CaptureComponents toCaptureComponents(const MDNode *MD)
Convert !captures metadata to CaptureComponents. MD may be nullptr.
static LLVM_ABI MDNode * getMergedCalleeTypeMetadata(const MDNode *A, const MDNode *B)
static LLVM_ABI MDNode * getMostGenericTBAA(MDNode *A, MDNode *B)
static LLVM_ABI MDNode * getMostGenericNoaliasAddrspace(MDNode *A, MDNode *B)
static MDTuple * get(LLVMContext &Context, ArrayRef< Metadata * > MDs)
Definition Metadata.h:1572
static LLVM_ABI MDNode * getMergedProfMetadata(MDNode *A, MDNode *B, const Instruction *AInstr, const Instruction *BInstr)
Merge !prof metadata from two instructions.
static LLVM_ABI MDNode * getMostGenericFPMath(MDNode *A, MDNode *B)
static LLVM_ABI MDNode * getMostGenericRange(MDNode *A, MDNode *B)
static LLVM_ABI MDNode * getMergedMemProfMetadata(MDNode *A, MDNode *B)
static LLVM_ABI MDNode * intersect(MDNode *A, MDNode *B)
static LLVM_ABI MDNode * getMostGenericNoFPClass(MDNode *A, MDNode *B)
LLVMContext & getContext() const
Definition Metadata.h:1244
static LLVM_ABI MDNode * fromCaptureComponents(LLVMContext &Ctx, CaptureComponents CC)
Convert CaptureComponents to !captures metadata.
static LLVM_ABI MDNode * getMostGenericAlignmentOrDereferenceable(MDNode *A, MDNode *B)
static LLVM_ABI MDNode * combine(LLVMContext &Ctx, const MMRAMetadata &A, const MMRAMetadata &B)
Combines A and B according to MMRA semantics.
This class implements a map that also provides access to all stored values in a deterministic order.
Definition MapVector.h:36
iterator end()
Definition MapVector.h:67
iterator find(const KeyT &Key)
Definition MapVector.h:154
bool empty() const
Definition MapVector.h:77
std::pair< iterator, bool > insert(const std::pair< KeyT, ValueT > &KV)
Definition MapVector.h:124
LLVM_ABI void changeToUnreachable(const Instruction *I)
Instruction I will be changed to an unreachable.
LLVM_ABI void removeBlocks(const SmallSetVector< BasicBlock *, 8 > &DeadBlocks)
Remove all MemoryAcceses in a set of BasicBlocks about to be deleted.
LLVM_ABI void removeMemoryAccess(MemoryAccess *, bool OptimizePhis=false)
Remove a MemoryAccess from MemorySSA, including updating all definitions and uses.
Root of the metadata hierarchy.
Definition Metadata.h:64
A Module instance is used to store all the information related to an LLVM module.
Definition Module.h:67
const DataLayout & getDataLayout() const
Get the data layout for the module's target platform.
Definition Module.h:278
void addIncoming(Value *V, BasicBlock *BB)
Add an incoming value to the end of the PHI list.
iterator_range< const_block_iterator > blocks() const
LLVM_ABI Value * removeIncomingValue(unsigned Idx, bool DeletePHIIfEmpty=true)
Remove an incoming value.
void setIncomingValue(unsigned i, Value *V)
Value * getIncomingValueForBlock(const BasicBlock *BB) const
BasicBlock * getIncomingBlock(unsigned i) const
Return incoming basic block number i.
Value * getIncomingValue(unsigned i) const
Return incoming value number x.
unsigned getNumIncomingValues() const
Return the number of incoming edges.
static LLVM_ABI PoisonValue * get(Type *T)
Static factory methods - Return an 'poison' object of the specified type.
size_type size() const
Determine the number of elements in the SetVector.
Definition SetVector.h:103
size_type count(const_arg_type key) const
Count the number of elements of a given key in the SetVector.
Definition SetVector.h:262
Vector takeVector()
Clear the SetVector and return the underlying vector.
Definition SetVector.h:94
bool empty() const
Determine if the SetVector is empty or not.
Definition SetVector.h:100
bool insert(const value_type &X)
Insert a new element into the SetVector.
Definition SetVector.h:151
value_type pop_back_val()
Definition SetVector.h:279
size_type size() const
Definition SmallPtrSet.h:99
A templated base class for SmallPtrSet which provides the typesafe interface that is common across al...
size_type count(ConstPtrType Ptr) const
count - Return 1 if the specified pointer is in the set, 0 otherwise.
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
bool contains(ConstPtrType Ptr) const
SmallPtrSet - This class implements a set which is optimized for holding SmallSize or less elements.
A SetVector that performs no allocations if smaller than a certain size.
Definition SetVector.h:339
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
void reserve(size_type N)
void append(ItTy in_start, ItTy in_end)
Add the specified range to the end of the SmallVector.
iterator insert(iterator I, T &&Elt)
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
An instruction for storing to memory.
Provides information about what library functions are available for the current target.
bool hasOptimizedCodeGen(LibFunc F) const
Tests if the function is both available and a candidate for optimized code generation.
bool has(LibFunc F) const
Tests whether a library function is available.
bool getLibFunc(StringRef funcName, LibFunc &F) const
Searches for a particular function name.
TinyPtrVector - This class is specialized for cases where there are normally 0 or 1 element in a vect...
static constexpr TypeSize getFixed(ScalarTy ExactSize)
Definition TypeSize.h:343
The instances of the Type class are immutable: once they are created, they are never changed.
Definition Type.h:46
LLVM_ABI unsigned getIntegerBitWidth() const
bool isVectorTy() const
True if this is an instance of VectorType.
Definition Type.h:290
bool isArrayTy() const
True if this is an instance of ArrayType.
Definition Type.h:281
static LLVM_ABI IntegerType * getInt32Ty(LLVMContext &C)
Definition Type.cpp:313
bool isIntOrIntVectorTy() const
Return true if this is an integer type or a vector of integer types.
Definition Type.h:263
bool isPointerTy() const
True if this is an instance of PointerType.
Definition Type.h:284
Type * getScalarType() const
If this is a vector type, return the element type, otherwise return 'this'.
Definition Type.h:370
bool isStructTy() const
True if this is an instance of StructType.
Definition Type.h:278
LLVM_ABI unsigned getScalarSizeInBits() const LLVM_READONLY
If this is a vector type, return the getPrimitiveSizeInBits value for the element type.
Definition Type.cpp:236
bool isIntOrPtrTy() const
Return true if this is an integer type or a pointer type.
Definition Type.h:272
bool isIntegerTy() const
True if this is an instance of IntegerType.
Definition Type.h:257
bool isTokenTy() const
Return true if this is 'token'.
Definition Type.h:236
static LLVM_ABI IntegerType * getIntNTy(LLVMContext &C, unsigned N)
Definition Type.cpp:317
Unconditional Branch instruction.
static UncondBrInst * Create(BasicBlock *IfTrue, InsertPosition InsertBefore=nullptr)
static LLVM_ABI UndefValue * get(Type *T)
Static factory methods - Return an 'undef' object of the specified type.
This function has undefined behavior.
A Use represents the edge between a Value definition and its users.
Definition Use.h:35
value_op_iterator value_op_end()
Definition User.h:288
Value * getOperand(unsigned i) const
Definition User.h:207
value_op_iterator value_op_begin()
Definition User.h:285
iterator_range< value_op_iterator > operand_values()
Definition User.h:291
Value wrapper in the Metadata hierarchy.
Definition Metadata.h:459
static LLVM_ABI ValueAsMetadata * get(Value *V)
Definition Metadata.cpp:509
iterator find(const KeyT &Val)
Definition ValueMap.h:160
iterator end()
Definition ValueMap.h:139
LLVM Value Representation.
Definition Value.h:75
Type * getType() const
All values are typed, get the type of this value.
Definition Value.h:256
LLVM_ABI void replaceAllUsesWith(Value *V)
Change all uses of this to point to a new Value.
Definition Value.cpp:553
LLVMContext & getContext() const
All values hold a context through their type.
Definition Value.h:259
iterator_range< user_iterator > users()
Definition Value.h:427
LLVM_ABI void printAsOperand(raw_ostream &O, bool PrintType=true, const Module *M=nullptr) const
Print the name of this Value out to the specified raw_ostream.
bool isUsedByMetadata() const
Return true if there is metadata referencing this value.
Definition Value.h:569
bool use_empty() const
Definition Value.h:347
static constexpr unsigned MaxAlignmentExponent
The maximum alignment for instructions.
Definition Value.h:831
LLVM_ABI bool replaceUsesWithIf(Value *New, llvm::function_ref< bool(Use &U)> ShouldReplace)
Go through the uses list for this definition and make each use point to "V" if the callback ShouldRep...
Definition Value.cpp:561
iterator_range< use_iterator > uses()
Definition Value.h:381
user_iterator_impl< User > user_iterator
Definition Value.h:392
bool hasName() const
Definition Value.h:262
LLVM_ABI StringRef getName() const
Return a constant reference to the value's name.
Definition Value.cpp:322
LLVM_ABI void takeName(Value *V)
Transfer the name from V to this value.
Definition Value.cpp:403
static LLVM_ABI VectorType * get(Type *ElementType, ElementCount EC)
This static method is the primary way to construct an VectorType.
Represents an op.with.overflow intrinsic.
std::pair< iterator, bool > insert(const ValueT &V)
Definition DenseSet.h:202
void reserve(size_t Size)
Grow the DenseSet so that it can contain at least NumEntries items before resizing again.
Definition DenseSet.h:96
static constexpr bool isKnownGE(const FixedOrScalableQuantity &LHS, const FixedOrScalableQuantity &RHS)
Definition TypeSize.h:237
An efficient, type-erasing, non-owning reference to a callable.
const ParentTy * getParent() const
Definition ilist_node.h:34
self_iterator getIterator()
Definition ilist_node.h:123
NodeTy * getNextNode()
Get the next node, or nullptr for the list tail.
Definition ilist_node.h:348
CallInst * Call
Changed
#define UINT64_MAX
Definition DataTypes.h:77
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
unsigned ID
LLVM IR allows to use arbitrary numbers as calling convention identifiers.
Definition CallingConv.h:24
@ C
The default llvm calling convention, compatible with C.
Definition CallingConv.h:34
LLVM_ABI Function * getOrInsertDeclaration(Module *M, ID id, ArrayRef< Type * > Tys={})
Look up the Function declaration of the intrinsic id in the Module M.
BinaryOp_match< SrcTy, SpecificConstantMatch, TargetOpcode::G_XOR, true > m_Not(const SrcTy &&Src)
Matches a register not-ed by a G_XOR.
BinaryOp_match< LHS, RHS, Instruction::And > m_And(const LHS &L, const RHS &R)
m_Intrinsic_Ty< Opnd0 >::Ty m_BitReverse(const Opnd0 &Op0)
ap_match< APInt > m_APInt(const APInt *&Res)
Match a ConstantInt or splatted ConstantVector, binding the specified pointer to the contained APInt.
CastInst_match< OpTy, TruncInst > m_Trunc(const OpTy &Op)
Matches Trunc.
bool match(Val *V, const Pattern &P)
specificval_ty m_Specific(const Value *V)
Match if we have a specific specified value.
ExtractValue_match< Ind, Val_t > m_ExtractValue(const Val_t &V)
Match a single index ExtractValue instruction.
BinOpPred_match< LHS, RHS, is_logical_shift_op > m_LogicalShift(const LHS &L, const RHS &R)
Matches logical shift operations.
bind_ty< WithOverflowInst > m_WithOverflowInst(WithOverflowInst *&I)
Match a with overflow intrinsic, capturing it if we match.
CastInst_match< OpTy, ZExtInst > m_ZExt(const OpTy &Op)
Matches ZExt.
m_Intrinsic_Ty< Opnd0, Opnd1, Opnd2 >::Ty m_FShl(const Opnd0 &Op0, const Opnd1 &Op1, const Opnd2 &Op2)
class_match< Value > m_Value()
Match an arbitrary value and ignore it.
m_Intrinsic_Ty< Opnd0, Opnd1, Opnd2 >::Ty m_FShr(const Opnd0 &Op0, const Opnd1 &Op1, const Opnd2 &Op2)
auto m_Undef()
Match an arbitrary undef constant.
BinaryOp_match< LHS, RHS, Instruction::Or > m_Or(const LHS &L, const RHS &R)
m_Intrinsic_Ty< Opnd0 >::Ty m_BSwap(const Opnd0 &Op0)
is_zero m_Zero()
Match any null constant or a vector with all elements equal to 0.
match_combine_or< LTy, RTy > m_CombineOr(const LTy &L, const RTy &R)
Combine two pattern matchers matching L || R.
initializer< Ty > init(const Ty &Val)
@ DW_OP_LLVM_arg
Only used in LLVM metadata.
Definition Dwarf.h:149
@ ebStrict
This corresponds to "fpexcept.strict".
Definition FPEnv.h:42
This is an optimization pass for GlobalISel generic memory operations.
Definition Types.h:26
@ Offset
Definition DWP.cpp:532
FunctionAddr VTableAddr Value
Definition InstrProf.h:137
auto find(R &&Range, const T &Val)
Provide wrappers to std::find which take ranges instead of having to pass begin/end explicitly.
Definition STLExtras.h:1765
LLVM_ABI bool RemoveRedundantDbgInstrs(BasicBlock *BB)
Try to remove redundant dbg.value instructions from given basic block.
UnaryFunction for_each(R &&Range, UnaryFunction F)
Provide wrappers to std::for_each which take ranges instead of having to pass begin/end explicitly.
Definition STLExtras.h:1732
LLVM_ABI unsigned removeAllNonTerminatorAndEHPadInstructions(BasicBlock *BB)
Remove all instructions from a basic block other than its terminator and any present EH pad instructi...
Definition Local.cpp:2500
bool all_of(R &&range, UnaryPredicate P)
Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly.
Definition STLExtras.h:1739
LLVM_ABI bool RecursivelyDeleteTriviallyDeadInstructions(Value *V, const TargetLibraryInfo *TLI=nullptr, MemorySSAUpdater *MSSAU=nullptr, std::function< void(Value *)> AboutToDeleteCallback=std::function< void(Value *)>())
If the specified value is a trivially dead instruction, delete it.
Definition Local.cpp:535
bool succ_empty(const Instruction *I)
Definition CFG.h:153
LLVM_ABI BasicBlock * changeToInvokeAndSplitBasicBlock(CallInst *CI, BasicBlock *UnwindEdge, DomTreeUpdater *DTU=nullptr)
Convert the CallInst to InvokeInst with the specified unwind edge basic block.
Definition Local.cpp:2618
LLVM_ABI bool ConstantFoldTerminator(BasicBlock *BB, bool DeleteDeadConditions=false, const TargetLibraryInfo *TLI=nullptr, DomTreeUpdater *DTU=nullptr)
If a terminator instruction is predicated on a constant value, convert it into an unconditional branc...
Definition Local.cpp:134
LLVM_ABI unsigned replaceDominatedUsesWithIf(Value *From, Value *To, DominatorTree &DT, const BasicBlockEdge &Edge, function_ref< bool(const Use &U, const Value *To)> ShouldReplace)
Replace each use of 'From' with 'To' if that use is dominated by the given edge and the callback Shou...
Definition Local.cpp:3280
LLVM_ABI void findDbgValues(Value *V, SmallVectorImpl< DbgVariableRecord * > &DbgVariableRecords)
Finds the dbg.values describing a value.
LLVM_ABI unsigned replaceNonLocalUsesWith(Instruction *From, Value *To)
Definition Local.cpp:3244
decltype(auto) dyn_cast(const From &Val)
dyn_cast<X> - Return the argument parameter cast to the specified type.
Definition Casting.h:643
LLVM_ABI void salvageDebugInfo(const MachineRegisterInfo &MRI, MachineInstr &MI)
Assuming the instruction MI is going to be deleted, attempt to salvage debug users of MI by writing t...
Definition Utils.cpp:1725
auto successors(const MachineBasicBlock *BB)
LLVM_ABI bool isRemovableAlloc(const CallBase *V, const TargetLibraryInfo *TLI)
Return true if this is a call to an allocation function that does not have side effects that we are r...
LLVM_ABI CallInst * changeToCall(InvokeInst *II, DomTreeUpdater *DTU=nullptr)
This function converts the specified invoke into a normal call.
Definition Local.cpp:2594
LLVM_ABI bool isMathLibCallNoop(const CallBase *Call, const TargetLibraryInfo *TLI)
Check whether the given call has no side-effects.
LLVM_ABI void copyMetadataForLoad(LoadInst &Dest, const LoadInst &Source)
Copy the metadata from the source instruction to the destination (the replacement for the source inst...
Definition Local.cpp:3119
LLVM_ABI void InsertDebugValueAtStoreLoc(DbgVariableRecord *DVR, StoreInst *SI, DIBuilder &Builder)
===------------------------------------------------------------------—===// Dbg Intrinsic utilities
Definition Local.cpp:1723
constexpr from_range_t from_range
bool hasNItemsOrLess(IterTy &&Begin, IterTy &&End, unsigned N, Pred &&ShouldBeCounted=[](const decltype(*std::declval< IterTy >()) &) { return true;})
Returns true if the sequence [Begin, End) has N or less items.
Definition STLExtras.h:2659
LLVM_ABI void remapDebugVariable(ValueToValueMapTy &Mapping, Instruction *Inst)
Remap the operands of the debug records attached to Inst, and the operands of Inst itself if it's a d...
Definition Local.cpp:3480
iterator_range< early_inc_iterator_impl< detail::IterOfRange< RangeT > > > make_early_inc_range(RangeT &&Range)
Make a range that does early increment to allow mutation of the underlying range without disrupting i...
Definition STLExtras.h:634
auto cast_or_null(const Y &Val)
Definition Casting.h:714
auto pred_size(const MachineBasicBlock *BB)
LLVM_ABI bool SimplifyInstructionsInBlock(BasicBlock *BB, const TargetLibraryInfo *TLI=nullptr)
Scan the specified basic block and try to simplify any instructions in it and recursively delete dead...
Definition Local.cpp:723
LLVM_ABI bool isAssumeWithEmptyBundle(const AssumeInst &Assume)
Return true iff the operand bundles of the provided llvm.assume doesn't contain any valuable informat...
LLVM_ABI void DeleteDeadBlock(BasicBlock *BB, DomTreeUpdater *DTU=nullptr, bool KeepOneInputPHIs=false)
Delete the specified block, which must have no predecessors.
LLVM_ABI bool hasBranchWeightOrigin(const Instruction &I)
Check if Branch Weight Metadata has an "expected" field from an llvm.expect* intrinsic.
LLVM_ABI void insertDebugValuesForPHIs(BasicBlock *BB, SmallVectorImpl< PHINode * > &InsertedPHIs)
Propagate dbg.value intrinsics through the newly inserted PHIs.
Definition Local.cpp:1894
LLVM_ABI ConstantRange getConstantRangeFromMetadata(const MDNode &RangeMD)
Parse out a conservative ConstantRange from !range metadata.
LLVM_ABI MDNode * intersectAccessGroups(const Instruction *Inst1, const Instruction *Inst2)
Compute the access-group list of access groups that Inst1 and Inst2 are both in.
LLVM_ABI bool handleUnreachableTerminator(Instruction *I, SmallVectorImpl< Value * > &PoisonedValues)
If a terminator in an unreachable basic block has an operand of type Instruction, transform it into p...
Definition Local.cpp:2483
LLVM_ABI bool canSimplifyInvokeNoUnwind(const Function *F)
LLVM_ABI Value * simplifyInstruction(Instruction *I, const SimplifyQuery &Q)
See if we can compute a simplified version of this instruction.
auto dyn_cast_or_null(const Y &Val)
Definition Casting.h:753
bool any_of(R &&range, UnaryPredicate P)
Provide wrappers to std::any_of which take ranges instead of having to pass begin/end explicitly.
Definition STLExtras.h:1746
LLVM_ABI bool isInstructionTriviallyDead(Instruction *I, const TargetLibraryInfo *TLI=nullptr)
Return true if the result produced by the instruction is not used, and the instruction will return.
Definition Local.cpp:403
LLVM_ABI bool TryToSimplifyUncondBranchFromEmptyBlock(BasicBlock *BB, DomTreeUpdater *DTU=nullptr)
BB is known to contain an unconditional branch, and contains no instructions other than PHI nodes,...
Definition Local.cpp:1155
LLVM_ABI SmallVector< uint32_t > fitWeights(ArrayRef< uint64_t > Weights)
Push the weights right to fit in uint32_t.
LLVM_ABI bool recognizeBSwapOrBitReverseIdiom(Instruction *I, bool MatchBSwaps, bool MatchBitReversals, SmallVectorImpl< Instruction * > &InsertedInsts)
Try to match a bswap or bitreverse idiom.
Definition Local.cpp:3785
LLVM_ABI MDNode * getValidBranchWeightMDNode(const Instruction &I)
Get the valid branch weights metadata node.
LLVM_ABI Align getOrEnforceKnownAlignment(Value *V, MaybeAlign PrefAlign, const DataLayout &DL, const Instruction *CxtI=nullptr, AssumptionCache *AC=nullptr, const DominatorTree *DT=nullptr)
Try to ensure that the alignment of V is at least PrefAlign bytes.
Definition Local.cpp:1581
LLVM_ABI bool wouldInstructionBeTriviallyDeadOnUnusedPaths(Instruction *I, const TargetLibraryInfo *TLI=nullptr)
Return true if the result produced by the instruction has no side effects on any paths other than whe...
Definition Local.cpp:410
LLVM_ABI void computeKnownBits(const Value *V, KnownBits &Known, const DataLayout &DL, AssumptionCache *AC=nullptr, const Instruction *CxtI=nullptr, const DominatorTree *DT=nullptr, bool UseInstrInfo=true, unsigned Depth=0)
Determine which bits of V are known to be either zero or one and return them in the KnownZero/KnownOn...
LLVM_ABI bool LowerDbgDeclare(Function &F)
Lowers dbg.declare records into appropriate set of dbg.value records.
Definition Local.cpp:1810
LLVM_ABI bool NullPointerIsDefined(const Function *F, unsigned AS=0)
Check whether null pointer dereferencing is considered undefined behavior for a given function or an ...
LLVM_ABI DIExpression * getExpressionForConstant(DIBuilder &DIB, const Constant &C, Type &Ty)
Given a constant, create a debug information expression.
Definition Local.cpp:3438
LLVM_ABI CallInst * createCallMatchingInvoke(InvokeInst *II)
Create a call that matches the invoke II in terms of arguments, attributes, debug information,...
Definition Local.cpp:2568
LLVM_ABI raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
Definition Debug.cpp:207
generic_gep_type_iterator<> gep_type_iterator
LLVM_ABI void ConvertDebugDeclareToDebugValue(DbgVariableRecord *DVR, StoreInst *SI, DIBuilder &Builder)
Inserts a dbg.value record before a store to an alloca'd value that has an associated dbg....
Definition Local.cpp:1677
LLVM_ABI Instruction * removeUnwindEdge(BasicBlock *BB, DomTreeUpdater *DTU=nullptr)
Replace 'BB's terminator with one that does not have an unwind successor block.
Definition Local.cpp:2863
LLVM_ABI bool wouldInstructionBeTriviallyDead(const Instruction *I, const TargetLibraryInfo *TLI=nullptr)
Return true if the result produced by the instruction would have no side effects if it was not used.
Definition Local.cpp:422
FunctionAddr VTableAddr Count
Definition InstrProf.h:139
LLVM_ABI void patchReplacementInstruction(Instruction *I, Value *Repl)
Patch the replacement so that it is not more restrictive than the value being replaced.
Definition Local.cpp:3180
LLVM_ABI void salvageDebugInfoForDbgValues(Instruction &I, ArrayRef< DbgVariableRecord * > DPInsns)
Implementation of salvageDebugInfo, applying only to instructions in Insns, rather than all debug use...
Definition Local.cpp:2052
LLVM_ABI unsigned replaceDominatedUsesWith(Value *From, Value *To, DominatorTree &DT, const BasicBlockEdge &Edge)
Replace each use of 'From' with 'To' if that use is dominated by the given edge.
Definition Local.cpp:3259
class LLVM_GSL_OWNER SmallVector
Forward declaration of SmallVector so that calculateSmallVectorDefaultInlinedElements can reference s...
bool isa(const From &Val)
isa<X> - Return true if the parameter to the template is an instance of one of the template type argu...
Definition Casting.h:547
@ Success
The lock was released successfully.
LLVM_ABI unsigned changeToUnreachable(Instruction *I, bool PreserveLCSSA=false, DomTreeUpdater *DTU=nullptr, MemorySSAUpdater *MSSAU=nullptr)
Insert an unreachable instruction before the specified instruction, making it and the rest of the cod...
Definition Local.cpp:2528
LLVM_ABI bool replaceAllDbgUsesWith(Instruction &From, Value &To, Instruction &DomPoint, DominatorTree &DT)
Point debug users of From to To or salvage them.
Definition Local.cpp:2429
LLVM_ABI Value * salvageDebugInfoImpl(Instruction &I, uint64_t CurrentLocOps, SmallVectorImpl< uint64_t > &Ops, SmallVectorImpl< Value * > &AdditionalValues)
Definition Local.cpp:2289
RNSuccIterator< NodeRef, BlockT, RegionT > succ_begin(NodeRef Node)
LLVM_ABI void combineMetadataForCSE(Instruction *K, const Instruction *J, bool DoesKMove)
Combine the metadata of two instructions so that K can replace J.
Definition Local.cpp:3110
LLVM_ABI void dropDebugUsers(Instruction &I)
Remove the debug intrinsic instructions for the given instruction.
Definition Local.cpp:3385
@ First
Helpers to iterate all locations in the MemoryEffectsBase class.
Definition ModRef.h:74
LLVM_ABI void MergeBasicBlockIntoOnlyPred(BasicBlock *BB, DomTreeUpdater *DTU=nullptr)
BB is a block with one predecessor and its predecessor is known to have one successor (BB!...
Definition Local.cpp:763
LLVM_ABI bool replaceDbgUsesWithUndef(Instruction *I)
Replace all the uses of an SSA value in @llvm.dbg intrinsics with undef.
Definition Local.cpp:612
LLVM_ABI void hoistAllInstructionsInto(BasicBlock *DomBlock, Instruction *InsertPt, BasicBlock *BB)
Hoist all of the instructions in the IfBlock to the dominant block DomBlock, by moving its instructio...
Definition Local.cpp:3392
LLVM_ABI void copyRangeMetadata(const DataLayout &DL, const LoadInst &OldLI, MDNode *N, LoadInst &NewLI)
Copy a range metadata node to a new load instruction.
Definition Local.cpp:3361
LLVM_ABI BasicBlock * SplitBlock(BasicBlock *Old, BasicBlock::iterator SplitPt, DominatorTree *DT, LoopInfo *LI=nullptr, MemorySSAUpdater *MSSAU=nullptr, const Twine &BBName="")
Split the specified block at the specified instruction.
LLVM_ABI DebugLoc getDebugValueLoc(DbgVariableRecord *DVR)
Produce a DebugLoc to use for each dbg.declare that is promoted to a dbg.value.
LLVM_ABI void copyNonnullMetadata(const LoadInst &OldLI, MDNode *N, LoadInst &NewLI)
Copy a nonnull metadata node to a new load instruction.
Definition Local.cpp:3336
LLVM_ABI bool canReplaceOperandWithVariable(const Instruction *I, unsigned OpIdx)
Given an instruction, is it legal to set operand OpIdx to a non-constant value?
Definition Local.cpp:3899
DWARFExpression::Operation Op
LLVM_ABI void replaceDbgValueForAlloca(AllocaInst *AI, Value *NewAllocaAddress, DIBuilder &Builder, int Offset=0)
Replaces multiple dbg.value records when the alloca it describes is replaced with a new value.
Definition Local.cpp:1997
LLVM_ABI Align tryEnforceAlignment(Value *V, Align PrefAlign, const DataLayout &DL)
If the specified pointer points to an object that we control, try to modify the object's alignment to...
Definition Local.cpp:1532
LLVM_ABI Value * getFreedOperand(const CallBase *CB, const TargetLibraryInfo *TLI)
If this if a call to a free function, return the freed operand.
LLVM_ABI bool RecursivelyDeleteTriviallyDeadInstructionsPermissive(SmallVectorImpl< WeakTrackingVH > &DeadInsts, const TargetLibraryInfo *TLI=nullptr, MemorySSAUpdater *MSSAU=nullptr, std::function< void(Value *)> AboutToDeleteCallback=std::function< void(Value *)>())
Same functionality as RecursivelyDeleteTriviallyDeadInstructions, but allow instructions that are not...
Definition Local.cpp:550
constexpr unsigned BitWidth
ValueMap< const Value *, WeakTrackingVH > ValueToValueMapTy
LLVM_ABI bool extractBranchWeights(const MDNode *ProfileData, SmallVectorImpl< uint32_t > &Weights)
Extract branch weights from MD_prof metadata.
auto count_if(R &&Range, UnaryPredicate P)
Wrapper function around std::count_if to count the number of times an element satisfying a given pred...
Definition STLExtras.h:2019
decltype(auto) cast(const From &Val)
cast<X> - Return the argument parameter cast to the specified type.
Definition Casting.h:559
gep_type_iterator gep_type_begin(const User *GEP)
LLVM_ABI TinyPtrVector< DbgVariableRecord * > findDVRDeclares(Value *V)
Finds dbg.declare records declaring local variables as living in the memory that 'V' points to.
Definition DebugInfo.cpp:48
auto predecessors(const MachineBasicBlock *BB)
bool is_contained(R &&Range, const E &Element)
Returns true if Element is found in Range.
Definition STLExtras.h:1947
LLVM_ABI void combineAAMetadata(Instruction *K, const Instruction *J)
Combine metadata of two instructions, where instruction J is a memory access that has been merged int...
Definition Local.cpp:3115
LLVM_ABI bool RecursivelyDeleteDeadPHINode(PHINode *PN, const TargetLibraryInfo *TLI=nullptr, MemorySSAUpdater *MSSAU=nullptr)
If the specified value is an effectively dead PHI node, due to being a def-use chain of single-use no...
Definition Local.cpp:643
LLVM_ABI bool inferAttributesFromOthers(Function &F)
If we can infer one attribute from another on the declaration of a function, explicitly materialize t...
Definition Local.cpp:4023
LLVM_ABI Value * invertCondition(Value *Condition)
Invert the given true/false value, possibly reusing an existing copy.
Definition Local.cpp:3989
hash_code hash_combine(const Ts &...args)
Combine values into a single hash_code.
Definition Hashing.h:592
LLVM_ABI void DeleteDeadBlocks(ArrayRef< BasicBlock * > BBs, DomTreeUpdater *DTU=nullptr, bool KeepOneInputPHIs=false)
Delete the specified blocks from BB.
LLVM_ABI void setFittedBranchWeights(Instruction &I, ArrayRef< uint64_t > Weights, bool IsExpected, bool ElideAllZero=false)
Variant of setBranchWeights where the Weights will be fit first to uint32_t by shifting right.
LLVM_ABI void maybeMarkSanitizerLibraryCallNoBuiltin(CallInst *CI, const TargetLibraryInfo *TLI)
Given a CallInst, check if it calls a string function known to CodeGen, and mark it with NoBuiltin if...
Definition Local.cpp:3889
static auto filterDbgVars(iterator_range< simple_ilist< DbgRecord >::iterator > R)
Filter the DbgRecord range to DbgVariableRecord types only and downcast.
LLVM_ABI bool removeUnreachableBlocks(Function &F, DomTreeUpdater *DTU=nullptr, MemorySSAUpdater *MSSAU=nullptr)
Remove all blocks that can not be reached from the function's entry.
Definition Local.cpp:2901
LLVM_ABI bool EliminateDuplicatePHINodes(BasicBlock *BB)
Check for and eliminate duplicate PHI nodes in this block.
Definition Local.cpp:1524
LLVM_ABI void findDbgUsers(Value *V, SmallVectorImpl< DbgVariableRecord * > &DbgVariableRecords)
Finds the debug info records describing a value.
LLVM_ABI bool callsGCLeafFunction(const CallBase *Call, const TargetLibraryInfo &TLI)
Return true if this call calls a gc leaf function.
Definition Local.cpp:3307
hash_code hash_combine_range(InputIteratorT first, InputIteratorT last)
Compute a hash_code for a sequence of values.
Definition Hashing.h:466
LLVM_ABI bool replaceDbgDeclare(Value *Address, Value *NewAddress, DIBuilder &Builder, uint8_t DIExprFlags, int Offset)
Replaces dbg.declare record when the address it describes is replaced with a new value.
Definition Local.cpp:1957
LLVM_ABI void extractFromBranchWeightMD64(const MDNode *ProfileData, SmallVectorImpl< uint64_t > &Weights)
Faster version of extractBranchWeights() that skips checks and must only be called with "branch_weigh...
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.
Definition BitVector.h:872
#define N
#define NDEBUG
Definition regutils.h:48
This struct is a compact representation of a valid (non-zero power of two) alignment.
Definition Alignment.h:39
An information struct used to provide DenseMap with the various necessary components for a given valu...
unsigned countMinTrailingZeros() const
Returns the minimum number of trailing zero bits.
Definition KnownBits.h:258
unsigned getBitWidth() const
Get the bit width of this value.
Definition KnownBits.h:44
This struct is a compact representation of a valid (power of two) or undefined (0) alignment.
Definition Alignment.h:106
std::optional< unsigned > Opcode
Opcode of merged instructions.
Definition Local.h:591
LLVM_ABI void mergeFlags(Instruction &I)
Merge in the no-wrap flags from I.
Definition Local.cpp:4053
LLVM_ABI void applyFlags(Instruction &I)
Apply the no-wrap flags to I if applicable.
Definition Local.cpp:4069
A MapVector that performs no allocations if smaller than a certain size.
Definition MapVector.h:276