LLVM 19.0.0git
Local.cpp
Go to the documentation of this file.
1//===- Local.cpp - Functions to perform local transformations -------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This family of functions perform various local transformations to the
10// program.
11//
12//===----------------------------------------------------------------------===//
13
15#include "llvm/ADT/APInt.h"
16#include "llvm/ADT/DenseMap.h"
18#include "llvm/ADT/DenseSet.h"
19#include "llvm/ADT/Hashing.h"
20#include "llvm/ADT/STLExtras.h"
21#include "llvm/ADT/SetVector.h"
24#include "llvm/ADT/Statistic.h"
35#include "llvm/IR/Argument.h"
36#include "llvm/IR/Attributes.h"
37#include "llvm/IR/BasicBlock.h"
38#include "llvm/IR/CFG.h"
39#include "llvm/IR/Constant.h"
41#include "llvm/IR/Constants.h"
42#include "llvm/IR/DIBuilder.h"
43#include "llvm/IR/DataLayout.h"
44#include "llvm/IR/DebugInfo.h"
46#include "llvm/IR/DebugLoc.h"
48#include "llvm/IR/Dominators.h"
50#include "llvm/IR/Function.h"
53#include "llvm/IR/IRBuilder.h"
54#include "llvm/IR/InstrTypes.h"
55#include "llvm/IR/Instruction.h"
58#include "llvm/IR/Intrinsics.h"
59#include "llvm/IR/IntrinsicsWebAssembly.h"
60#include "llvm/IR/LLVMContext.h"
61#include "llvm/IR/MDBuilder.h"
62#include "llvm/IR/Metadata.h"
63#include "llvm/IR/Module.h"
66#include "llvm/IR/Type.h"
67#include "llvm/IR/Use.h"
68#include "llvm/IR/User.h"
69#include "llvm/IR/Value.h"
70#include "llvm/IR/ValueHandle.h"
73#include "llvm/Support/Debug.h"
79#include <algorithm>
80#include <cassert>
81#include <cstdint>
82#include <iterator>
83#include <map>
84#include <optional>
85#include <utility>
86
87using namespace llvm;
88using namespace llvm::PatternMatch;
89
91
92#define DEBUG_TYPE "local"
93
94STATISTIC(NumRemoved, "Number of unreachable basic blocks removed");
95STATISTIC(NumPHICSEs, "Number of PHI's that got CSE'd");
96
98 "phicse-debug-hash",
99#ifdef EXPENSIVE_CHECKS
100 cl::init(true),
101#else
102 cl::init(false),
103#endif
105 cl::desc("Perform extra assertion checking to verify that PHINodes's hash "
106 "function is well-behaved w.r.t. its isEqual predicate"));
107
109 "phicse-num-phi-smallsize", cl::init(32), cl::Hidden,
110 cl::desc(
111 "When the basic block contains not more than this number of PHI nodes, "
112 "perform a (faster!) exhaustive search instead of set-driven one."));
113
114// Max recursion depth for collectBitParts used when detecting bswap and
115// bitreverse idioms.
116static const unsigned BitPartRecursionMaxDepth = 48;
117
118//===----------------------------------------------------------------------===//
119// Local constant propagation.
120//
121
122/// ConstantFoldTerminator - If a terminator instruction is predicated on a
123/// constant value, convert it into an unconditional branch to the constant
124/// destination. This is a nontrivial operation because the successors of this
125/// basic block must have their PHI nodes updated.
126/// Also calls RecursivelyDeleteTriviallyDeadInstructions() on any branch/switch
127/// conditions and indirectbr addresses this might make dead if
128/// DeleteDeadConditions is true.
129bool llvm::ConstantFoldTerminator(BasicBlock *BB, bool DeleteDeadConditions,
130 const TargetLibraryInfo *TLI,
131 DomTreeUpdater *DTU) {
132 Instruction *T = BB->getTerminator();
133 IRBuilder<> Builder(T);
134
135 // Branch - See if we are conditional jumping on constant
136 if (auto *BI = dyn_cast<BranchInst>(T)) {
137 if (BI->isUnconditional()) return false; // Can't optimize uncond branch
138
139 BasicBlock *Dest1 = BI->getSuccessor(0);
140 BasicBlock *Dest2 = BI->getSuccessor(1);
141
142 if (Dest2 == Dest1) { // Conditional branch to same location?
143 // This branch matches something like this:
144 // br bool %cond, label %Dest, label %Dest
145 // and changes it into: br label %Dest
146
147 // Let the basic block know that we are letting go of one copy of it.
148 assert(BI->getParent() && "Terminator not inserted in block!");
149 Dest1->removePredecessor(BI->getParent());
150
151 // Replace the conditional branch with an unconditional one.
152 BranchInst *NewBI = Builder.CreateBr(Dest1);
153
154 // Transfer the metadata to the new branch instruction.
155 NewBI->copyMetadata(*BI, {LLVMContext::MD_loop, LLVMContext::MD_dbg,
156 LLVMContext::MD_annotation});
157
158 Value *Cond = BI->getCondition();
159 BI->eraseFromParent();
160 if (DeleteDeadConditions)
162 return true;
163 }
164
165 if (auto *Cond = dyn_cast<ConstantInt>(BI->getCondition())) {
166 // Are we branching on constant?
167 // YES. Change to unconditional branch...
168 BasicBlock *Destination = Cond->getZExtValue() ? Dest1 : Dest2;
169 BasicBlock *OldDest = Cond->getZExtValue() ? Dest2 : Dest1;
170
171 // Let the basic block know that we are letting go of it. Based on this,
172 // it will adjust it's PHI nodes.
173 OldDest->removePredecessor(BB);
174
175 // Replace the conditional branch with an unconditional one.
176 BranchInst *NewBI = Builder.CreateBr(Destination);
177
178 // Transfer the metadata to the new branch instruction.
179 NewBI->copyMetadata(*BI, {LLVMContext::MD_loop, LLVMContext::MD_dbg,
180 LLVMContext::MD_annotation});
181
182 BI->eraseFromParent();
183 if (DTU)
184 DTU->applyUpdates({{DominatorTree::Delete, BB, OldDest}});
185 return true;
186 }
187
188 return false;
189 }
190
191 if (auto *SI = dyn_cast<SwitchInst>(T)) {
192 // If we are switching on a constant, we can convert the switch to an
193 // unconditional branch.
194 auto *CI = dyn_cast<ConstantInt>(SI->getCondition());
195 BasicBlock *DefaultDest = SI->getDefaultDest();
196 BasicBlock *TheOnlyDest = DefaultDest;
197
198 // If the default is unreachable, ignore it when searching for TheOnlyDest.
199 if (isa<UnreachableInst>(DefaultDest->getFirstNonPHIOrDbg()) &&
200 SI->getNumCases() > 0) {
201 TheOnlyDest = SI->case_begin()->getCaseSuccessor();
202 }
203
204 bool Changed = false;
205
206 // Figure out which case it goes to.
207 for (auto It = SI->case_begin(), End = SI->case_end(); It != End;) {
208 // Found case matching a constant operand?
209 if (It->getCaseValue() == CI) {
210 TheOnlyDest = It->getCaseSuccessor();
211 break;
212 }
213
214 // Check to see if this branch is going to the same place as the default
215 // dest. If so, eliminate it as an explicit compare.
216 if (It->getCaseSuccessor() == DefaultDest) {
218 unsigned NCases = SI->getNumCases();
219 // Fold the case metadata into the default if there will be any branches
220 // left, unless the metadata doesn't match the switch.
221 if (NCases > 1 && MD) {
222 // Collect branch weights into a vector.
224 extractBranchWeights(MD, Weights);
225
226 // Merge weight of this case to the default weight.
227 unsigned Idx = It->getCaseIndex();
228 // TODO: Add overflow check.
229 Weights[0] += Weights[Idx + 1];
230 // Remove weight for this case.
231 std::swap(Weights[Idx + 1], Weights.back());
232 Weights.pop_back();
233 setBranchWeights(*SI, Weights);
234 }
235 // Remove this entry.
236 BasicBlock *ParentBB = SI->getParent();
237 DefaultDest->removePredecessor(ParentBB);
238 It = SI->removeCase(It);
239 End = SI->case_end();
240
241 // Removing this case may have made the condition constant. In that
242 // case, update CI and restart iteration through the cases.
243 if (auto *NewCI = dyn_cast<ConstantInt>(SI->getCondition())) {
244 CI = NewCI;
245 It = SI->case_begin();
246 }
247
248 Changed = true;
249 continue;
250 }
251
252 // Otherwise, check to see if the switch only branches to one destination.
253 // We do this by reseting "TheOnlyDest" to null when we find two non-equal
254 // destinations.
255 if (It->getCaseSuccessor() != TheOnlyDest)
256 TheOnlyDest = nullptr;
257
258 // Increment this iterator as we haven't removed the case.
259 ++It;
260 }
261
262 if (CI && !TheOnlyDest) {
263 // Branching on a constant, but not any of the cases, go to the default
264 // successor.
265 TheOnlyDest = SI->getDefaultDest();
266 }
267
268 // If we found a single destination that we can fold the switch into, do so
269 // now.
270 if (TheOnlyDest) {
271 // Insert the new branch.
272 Builder.CreateBr(TheOnlyDest);
273 BasicBlock *BB = SI->getParent();
274
275 SmallSet<BasicBlock *, 8> RemovedSuccessors;
276
277 // Remove entries from PHI nodes which we no longer branch to...
278 BasicBlock *SuccToKeep = TheOnlyDest;
279 for (BasicBlock *Succ : successors(SI)) {
280 if (DTU && Succ != TheOnlyDest)
281 RemovedSuccessors.insert(Succ);
282 // Found case matching a constant operand?
283 if (Succ == SuccToKeep) {
284 SuccToKeep = nullptr; // Don't modify the first branch to TheOnlyDest
285 } else {
286 Succ->removePredecessor(BB);
287 }
288 }
289
290 // Delete the old switch.
291 Value *Cond = SI->getCondition();
292 SI->eraseFromParent();
293 if (DeleteDeadConditions)
295 if (DTU) {
296 std::vector<DominatorTree::UpdateType> Updates;
297 Updates.reserve(RemovedSuccessors.size());
298 for (auto *RemovedSuccessor : RemovedSuccessors)
299 Updates.push_back({DominatorTree::Delete, BB, RemovedSuccessor});
300 DTU->applyUpdates(Updates);
301 }
302 return true;
303 }
304
305 if (SI->getNumCases() == 1) {
306 // Otherwise, we can fold this switch into a conditional branch
307 // instruction if it has only one non-default destination.
308 auto FirstCase = *SI->case_begin();
309 Value *Cond = Builder.CreateICmpEQ(SI->getCondition(),
310 FirstCase.getCaseValue(), "cond");
311
312 // Insert the new branch.
313 BranchInst *NewBr = Builder.CreateCondBr(Cond,
314 FirstCase.getCaseSuccessor(),
315 SI->getDefaultDest());
316 SmallVector<uint32_t> Weights;
317 if (extractBranchWeights(*SI, Weights) && Weights.size() == 2) {
318 uint32_t DefWeight = Weights[0];
319 uint32_t CaseWeight = Weights[1];
320 // The TrueWeight should be the weight for the single case of SI.
321 NewBr->setMetadata(LLVMContext::MD_prof,
322 MDBuilder(BB->getContext())
323 .createBranchWeights(CaseWeight, DefWeight));
324 }
325
326 // Update make.implicit metadata to the newly-created conditional branch.
327 MDNode *MakeImplicitMD = SI->getMetadata(LLVMContext::MD_make_implicit);
328 if (MakeImplicitMD)
329 NewBr->setMetadata(LLVMContext::MD_make_implicit, MakeImplicitMD);
330
331 // Delete the old switch.
332 SI->eraseFromParent();
333 return true;
334 }
335 return Changed;
336 }
337
338 if (auto *IBI = dyn_cast<IndirectBrInst>(T)) {
339 // indirectbr blockaddress(@F, @BB) -> br label @BB
340 if (auto *BA =
341 dyn_cast<BlockAddress>(IBI->getAddress()->stripPointerCasts())) {
342 BasicBlock *TheOnlyDest = BA->getBasicBlock();
343 SmallSet<BasicBlock *, 8> RemovedSuccessors;
344
345 // Insert the new branch.
346 Builder.CreateBr(TheOnlyDest);
347
348 BasicBlock *SuccToKeep = TheOnlyDest;
349 for (unsigned i = 0, e = IBI->getNumDestinations(); i != e; ++i) {
350 BasicBlock *DestBB = IBI->getDestination(i);
351 if (DTU && DestBB != TheOnlyDest)
352 RemovedSuccessors.insert(DestBB);
353 if (IBI->getDestination(i) == SuccToKeep) {
354 SuccToKeep = nullptr;
355 } else {
356 DestBB->removePredecessor(BB);
357 }
358 }
359 Value *Address = IBI->getAddress();
360 IBI->eraseFromParent();
361 if (DeleteDeadConditions)
362 // Delete pointer cast instructions.
364
365 // Also zap the blockaddress constant if there are no users remaining,
366 // otherwise the destination is still marked as having its address taken.
367 if (BA->use_empty())
368 BA->destroyConstant();
369
370 // If we didn't find our destination in the IBI successor list, then we
371 // have undefined behavior. Replace the unconditional branch with an
372 // 'unreachable' instruction.
373 if (SuccToKeep) {
375 new UnreachableInst(BB->getContext(), BB);
376 }
377
378 if (DTU) {
379 std::vector<DominatorTree::UpdateType> Updates;
380 Updates.reserve(RemovedSuccessors.size());
381 for (auto *RemovedSuccessor : RemovedSuccessors)
382 Updates.push_back({DominatorTree::Delete, BB, RemovedSuccessor});
383 DTU->applyUpdates(Updates);
384 }
385 return true;
386 }
387 }
388
389 return false;
390}
391
392//===----------------------------------------------------------------------===//
393// Local dead code elimination.
394//
395
396/// isInstructionTriviallyDead - Return true if the result produced by the
397/// instruction is not used, and the instruction has no side effects.
398///
400 const TargetLibraryInfo *TLI) {
401 if (!I->use_empty())
402 return false;
404}
405
407 Instruction *I, const TargetLibraryInfo *TLI) {
408 // Instructions that are "markers" and have implied meaning on code around
409 // them (without explicit uses), are not dead on unused paths.
410 if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(I))
411 if (II->getIntrinsicID() == Intrinsic::stacksave ||
412 II->getIntrinsicID() == Intrinsic::launder_invariant_group ||
413 II->isLifetimeStartOrEnd())
414 return false;
416}
417
419 const TargetLibraryInfo *TLI) {
420 if (I->isTerminator())
421 return false;
422
423 // We don't want the landingpad-like instructions removed by anything this
424 // general.
425 if (I->isEHPad())
426 return false;
427
428 // We don't want debug info removed by anything this general.
429 if (isa<DbgVariableIntrinsic>(I))
430 return false;
431
432 if (const DbgLabelInst *DLI = dyn_cast<DbgLabelInst>(I)) {
433 if (DLI->getLabel())
434 return false;
435 return true;
436 }
437
438 if (auto *CB = dyn_cast<CallBase>(I))
439 if (isRemovableAlloc(CB, TLI))
440 return true;
441
442 if (!I->willReturn()) {
443 auto *II = dyn_cast<IntrinsicInst>(I);
444 if (!II)
445 return false;
446
447 switch (II->getIntrinsicID()) {
448 case Intrinsic::experimental_guard: {
449 // Guards on true are operationally no-ops. In the future we can
450 // consider more sophisticated tradeoffs for guards considering potential
451 // for check widening, but for now we keep things simple.
452 auto *Cond = dyn_cast<ConstantInt>(II->getArgOperand(0));
453 return Cond && Cond->isOne();
454 }
455 // TODO: These intrinsics are not safe to remove, because this may remove
456 // a well-defined trap.
457 case Intrinsic::wasm_trunc_signed:
458 case Intrinsic::wasm_trunc_unsigned:
459 case Intrinsic::ptrauth_auth:
460 case Intrinsic::ptrauth_resign:
461 return true;
462 default:
463 return false;
464 }
465 }
466
467 if (!I->mayHaveSideEffects())
468 return true;
469
470 // Special case intrinsics that "may have side effects" but can be deleted
471 // when dead.
472 if (const IntrinsicInst *II = dyn_cast<IntrinsicInst>(I)) {
473 // Safe to delete llvm.stacksave and launder.invariant.group if dead.
474 if (II->getIntrinsicID() == Intrinsic::stacksave ||
475 II->getIntrinsicID() == Intrinsic::launder_invariant_group)
476 return true;
477
478 // Intrinsics declare sideeffects to prevent them from moving, but they are
479 // nops without users.
480 if (II->getIntrinsicID() == Intrinsic::allow_runtime_check ||
481 II->getIntrinsicID() == Intrinsic::allow_ubsan_check)
482 return true;
483
484 if (II->isLifetimeStartOrEnd()) {
485 auto *Arg = II->getArgOperand(1);
486 // Lifetime intrinsics are dead when their right-hand is undef.
487 if (isa<UndefValue>(Arg))
488 return true;
489 // If the right-hand is an alloc, global, or argument and the only uses
490 // are lifetime intrinsics then the intrinsics are dead.
491 if (isa<AllocaInst>(Arg) || isa<GlobalValue>(Arg) || isa<Argument>(Arg))
492 return llvm::all_of(Arg->uses(), [](Use &Use) {
493 if (IntrinsicInst *IntrinsicUse =
494 dyn_cast<IntrinsicInst>(Use.getUser()))
495 return IntrinsicUse->isLifetimeStartOrEnd();
496 return false;
497 });
498 return false;
499 }
500
501 // Assumptions are dead if their condition is trivially true.
502 if (II->getIntrinsicID() == Intrinsic::assume &&
503 isAssumeWithEmptyBundle(cast<AssumeInst>(*II))) {
504 if (ConstantInt *Cond = dyn_cast<ConstantInt>(II->getArgOperand(0)))
505 return !Cond->isZero();
506
507 return false;
508 }
509
510 if (auto *FPI = dyn_cast<ConstrainedFPIntrinsic>(I)) {
511 std::optional<fp::ExceptionBehavior> ExBehavior =
512 FPI->getExceptionBehavior();
513 return *ExBehavior != fp::ebStrict;
514 }
515 }
516
517 if (auto *Call = dyn_cast<CallBase>(I)) {
518 if (Value *FreedOp = getFreedOperand(Call, TLI))
519 if (Constant *C = dyn_cast<Constant>(FreedOp))
520 return C->isNullValue() || isa<UndefValue>(C);
521 if (isMathLibCallNoop(Call, TLI))
522 return true;
523 }
524
525 // Non-volatile atomic loads from constants can be removed.
526 if (auto *LI = dyn_cast<LoadInst>(I))
527 if (auto *GV = dyn_cast<GlobalVariable>(
528 LI->getPointerOperand()->stripPointerCasts()))
529 if (!LI->isVolatile() && GV->isConstant())
530 return true;
531
532 return false;
533}
534
535/// RecursivelyDeleteTriviallyDeadInstructions - If the specified value is a
536/// trivially dead instruction, delete it. If that makes any of its operands
537/// trivially dead, delete them too, recursively. Return true if any
538/// instructions were deleted.
540 Value *V, const TargetLibraryInfo *TLI, MemorySSAUpdater *MSSAU,
541 std::function<void(Value *)> AboutToDeleteCallback) {
542 Instruction *I = dyn_cast<Instruction>(V);
543 if (!I || !isInstructionTriviallyDead(I, TLI))
544 return false;
545
547 DeadInsts.push_back(I);
548 RecursivelyDeleteTriviallyDeadInstructions(DeadInsts, TLI, MSSAU,
549 AboutToDeleteCallback);
550
551 return true;
552}
553
556 MemorySSAUpdater *MSSAU,
557 std::function<void(Value *)> AboutToDeleteCallback) {
558 unsigned S = 0, E = DeadInsts.size(), Alive = 0;
559 for (; S != E; ++S) {
560 auto *I = dyn_cast_or_null<Instruction>(DeadInsts[S]);
561 if (!I || !isInstructionTriviallyDead(I)) {
562 DeadInsts[S] = nullptr;
563 ++Alive;
564 }
565 }
566 if (Alive == E)
567 return false;
568 RecursivelyDeleteTriviallyDeadInstructions(DeadInsts, TLI, MSSAU,
569 AboutToDeleteCallback);
570 return true;
571}
572
575 MemorySSAUpdater *MSSAU,
576 std::function<void(Value *)> AboutToDeleteCallback) {
577 // Process the dead instruction list until empty.
578 while (!DeadInsts.empty()) {
579 Value *V = DeadInsts.pop_back_val();
580 Instruction *I = cast_or_null<Instruction>(V);
581 if (!I)
582 continue;
584 "Live instruction found in dead worklist!");
585 assert(I->use_empty() && "Instructions with uses are not dead.");
586
587 // Don't lose the debug info while deleting the instructions.
589
590 if (AboutToDeleteCallback)
591 AboutToDeleteCallback(I);
592
593 // Null out all of the instruction's operands to see if any operand becomes
594 // dead as we go.
595 for (Use &OpU : I->operands()) {
596 Value *OpV = OpU.get();
597 OpU.set(nullptr);
598
599 if (!OpV->use_empty())
600 continue;
601
602 // If the operand is an instruction that became dead as we nulled out the
603 // operand, and if it is 'trivially' dead, delete it in a future loop
604 // iteration.
605 if (Instruction *OpI = dyn_cast<Instruction>(OpV))
606 if (isInstructionTriviallyDead(OpI, TLI))
607 DeadInsts.push_back(OpI);
608 }
609 if (MSSAU)
610 MSSAU->removeMemoryAccess(I);
611
612 I->eraseFromParent();
613 }
614}
615
619 findDbgUsers(DbgUsers, I, &DPUsers);
620 for (auto *DII : DbgUsers)
621 DII->setKillLocation();
622 for (auto *DVR : DPUsers)
623 DVR->setKillLocation();
624 return !DbgUsers.empty() || !DPUsers.empty();
625}
626
627/// areAllUsesEqual - Check whether the uses of a value are all the same.
628/// This is similar to Instruction::hasOneUse() except this will also return
629/// true when there are no uses or multiple uses that all refer to the same
630/// value.
632 Value::user_iterator UI = I->user_begin();
633 Value::user_iterator UE = I->user_end();
634 if (UI == UE)
635 return true;
636
637 User *TheUse = *UI;
638 for (++UI; UI != UE; ++UI) {
639 if (*UI != TheUse)
640 return false;
641 }
642 return true;
643}
644
645/// RecursivelyDeleteDeadPHINode - If the specified value is an effectively
646/// dead PHI node, due to being a def-use chain of single-use nodes that
647/// either forms a cycle or is terminated by a trivially dead instruction,
648/// delete it. If that makes any of its operands trivially dead, delete them
649/// too, recursively. Return true if a change was made.
651 const TargetLibraryInfo *TLI,
652 llvm::MemorySSAUpdater *MSSAU) {
654 for (Instruction *I = PN; areAllUsesEqual(I) && !I->mayHaveSideEffects();
655 I = cast<Instruction>(*I->user_begin())) {
656 if (I->use_empty())
658
659 // If we find an instruction more than once, we're on a cycle that
660 // won't prove fruitful.
661 if (!Visited.insert(I).second) {
662 // Break the cycle and delete the instruction and its operands.
663 I->replaceAllUsesWith(PoisonValue::get(I->getType()));
665 return true;
666 }
667 }
668 return false;
669}
670
671static bool
674 const DataLayout &DL,
675 const TargetLibraryInfo *TLI) {
676 if (isInstructionTriviallyDead(I, TLI)) {
678
679 // Null out all of the instruction's operands to see if any operand becomes
680 // dead as we go.
681 for (unsigned i = 0, e = I->getNumOperands(); i != e; ++i) {
682 Value *OpV = I->getOperand(i);
683 I->setOperand(i, nullptr);
684
685 if (!OpV->use_empty() || I == OpV)
686 continue;
687
688 // If the operand is an instruction that became dead as we nulled out the
689 // operand, and if it is 'trivially' dead, delete it in a future loop
690 // iteration.
691 if (Instruction *OpI = dyn_cast<Instruction>(OpV))
692 if (isInstructionTriviallyDead(OpI, TLI))
693 WorkList.insert(OpI);
694 }
695
696 I->eraseFromParent();
697
698 return true;
699 }
700
701 if (Value *SimpleV = simplifyInstruction(I, DL)) {
702 // Add the users to the worklist. CAREFUL: an instruction can use itself,
703 // in the case of a phi node.
704 for (User *U : I->users()) {
705 if (U != I) {
706 WorkList.insert(cast<Instruction>(U));
707 }
708 }
709
710 // Replace the instruction with its simplified value.
711 bool Changed = false;
712 if (!I->use_empty()) {
713 I->replaceAllUsesWith(SimpleV);
714 Changed = true;
715 }
716 if (isInstructionTriviallyDead(I, TLI)) {
717 I->eraseFromParent();
718 Changed = true;
719 }
720 return Changed;
721 }
722 return false;
723}
724
725/// SimplifyInstructionsInBlock - Scan the specified basic block and try to
726/// simplify any instructions in it and recursively delete dead instructions.
727///
728/// This returns true if it changed the code, note that it can delete
729/// instructions in other blocks as well in this block.
731 const TargetLibraryInfo *TLI) {
732 bool MadeChange = false;
733 const DataLayout &DL = BB->getModule()->getDataLayout();
734
735#ifndef NDEBUG
736 // In debug builds, ensure that the terminator of the block is never replaced
737 // or deleted by these simplifications. The idea of simplification is that it
738 // cannot introduce new instructions, and there is no way to replace the
739 // terminator of a block without introducing a new instruction.
740 AssertingVH<Instruction> TerminatorVH(&BB->back());
741#endif
742
744 // Iterate over the original function, only adding insts to the worklist
745 // if they actually need to be revisited. This avoids having to pre-init
746 // the worklist with the entire function's worth of instructions.
747 for (BasicBlock::iterator BI = BB->begin(), E = std::prev(BB->end());
748 BI != E;) {
749 assert(!BI->isTerminator());
750 Instruction *I = &*BI;
751 ++BI;
752
753 // We're visiting this instruction now, so make sure it's not in the
754 // worklist from an earlier visit.
755 if (!WorkList.count(I))
756 MadeChange |= simplifyAndDCEInstruction(I, WorkList, DL, TLI);
757 }
758
759 while (!WorkList.empty()) {
760 Instruction *I = WorkList.pop_back_val();
761 MadeChange |= simplifyAndDCEInstruction(I, WorkList, DL, TLI);
762 }
763 return MadeChange;
764}
765
766//===----------------------------------------------------------------------===//
767// Control Flow Graph Restructuring.
768//
769
771 DomTreeUpdater *DTU) {
772
773 // If BB has single-entry PHI nodes, fold them.
774 while (PHINode *PN = dyn_cast<PHINode>(DestBB->begin())) {
775 Value *NewVal = PN->getIncomingValue(0);
776 // Replace self referencing PHI with poison, it must be dead.
777 if (NewVal == PN) NewVal = PoisonValue::get(PN->getType());
778 PN->replaceAllUsesWith(NewVal);
779 PN->eraseFromParent();
780 }
781
782 BasicBlock *PredBB = DestBB->getSinglePredecessor();
783 assert(PredBB && "Block doesn't have a single predecessor!");
784
785 bool ReplaceEntryBB = PredBB->isEntryBlock();
786
787 // DTU updates: Collect all the edges that enter
788 // PredBB. These dominator edges will be redirected to DestBB.
790
791 if (DTU) {
792 // To avoid processing the same predecessor more than once.
794 Updates.reserve(Updates.size() + 2 * pred_size(PredBB) + 1);
795 for (BasicBlock *PredOfPredBB : predecessors(PredBB))
796 // This predecessor of PredBB may already have DestBB as a successor.
797 if (PredOfPredBB != PredBB)
798 if (SeenPreds.insert(PredOfPredBB).second)
799 Updates.push_back({DominatorTree::Insert, PredOfPredBB, DestBB});
800 SeenPreds.clear();
801 for (BasicBlock *PredOfPredBB : predecessors(PredBB))
802 if (SeenPreds.insert(PredOfPredBB).second)
803 Updates.push_back({DominatorTree::Delete, PredOfPredBB, PredBB});
804 Updates.push_back({DominatorTree::Delete, PredBB, DestBB});
805 }
806
807 // Zap anything that took the address of DestBB. Not doing this will give the
808 // address an invalid value.
809 if (DestBB->hasAddressTaken()) {
810 BlockAddress *BA = BlockAddress::get(DestBB);
811 Constant *Replacement =
812 ConstantInt::get(Type::getInt32Ty(BA->getContext()), 1);
814 BA->getType()));
815 BA->destroyConstant();
816 }
817
818 // Anything that branched to PredBB now branches to DestBB.
819 PredBB->replaceAllUsesWith(DestBB);
820
821 // Splice all the instructions from PredBB to DestBB.
822 PredBB->getTerminator()->eraseFromParent();
823 DestBB->splice(DestBB->begin(), PredBB);
824 new UnreachableInst(PredBB->getContext(), PredBB);
825
826 // If the PredBB is the entry block of the function, move DestBB up to
827 // become the entry block after we erase PredBB.
828 if (ReplaceEntryBB)
829 DestBB->moveAfter(PredBB);
830
831 if (DTU) {
832 assert(PredBB->size() == 1 &&
833 isa<UnreachableInst>(PredBB->getTerminator()) &&
834 "The successor list of PredBB isn't empty before "
835 "applying corresponding DTU updates.");
836 DTU->applyUpdatesPermissive(Updates);
837 DTU->deleteBB(PredBB);
838 // Recalculation of DomTree is needed when updating a forward DomTree and
839 // the Entry BB is replaced.
840 if (ReplaceEntryBB && DTU->hasDomTree()) {
841 // The entry block was removed and there is no external interface for
842 // the dominator tree to be notified of this change. In this corner-case
843 // we recalculate the entire tree.
844 DTU->recalculate(*(DestBB->getParent()));
845 }
846 }
847
848 else {
849 PredBB->eraseFromParent(); // Nuke BB if DTU is nullptr.
850 }
851}
852
853/// Return true if we can choose one of these values to use in place of the
854/// other. Note that we will always choose the non-undef value to keep.
855static bool CanMergeValues(Value *First, Value *Second) {
856 return First == Second || isa<UndefValue>(First) || isa<UndefValue>(Second);
857}
858
859/// Return true if we can fold BB, an almost-empty BB ending in an unconditional
860/// branch to Succ, into Succ.
861///
862/// Assumption: Succ is the single successor for BB.
863static bool
865 const SmallPtrSetImpl<BasicBlock *> &BBPreds) {
866 assert(*succ_begin(BB) == Succ && "Succ is not successor of BB!");
867
868 LLVM_DEBUG(dbgs() << "Looking to fold " << BB->getName() << " into "
869 << Succ->getName() << "\n");
870 // Shortcut, if there is only a single predecessor it must be BB and merging
871 // is always safe
872 if (Succ->getSinglePredecessor())
873 return true;
874
875 // Look at all the phi nodes in Succ, to see if they present a conflict when
876 // merging these blocks
877 for (BasicBlock::iterator I = Succ->begin(); isa<PHINode>(I); ++I) {
878 PHINode *PN = cast<PHINode>(I);
879
880 // If the incoming value from BB is again a PHINode in
881 // BB which has the same incoming value for *PI as PN does, we can
882 // merge the phi nodes and then the blocks can still be merged
883 PHINode *BBPN = dyn_cast<PHINode>(PN->getIncomingValueForBlock(BB));
884 if (BBPN && BBPN->getParent() == BB) {
885 for (unsigned PI = 0, PE = PN->getNumIncomingValues(); PI != PE; ++PI) {
886 BasicBlock *IBB = PN->getIncomingBlock(PI);
887 if (BBPreds.count(IBB) &&
889 PN->getIncomingValue(PI))) {
891 << "Can't fold, phi node " << PN->getName() << " in "
892 << Succ->getName() << " is conflicting with "
893 << BBPN->getName() << " with regard to common predecessor "
894 << IBB->getName() << "\n");
895 return false;
896 }
897 }
898 } else {
899 Value* Val = PN->getIncomingValueForBlock(BB);
900 for (unsigned PI = 0, PE = PN->getNumIncomingValues(); PI != PE; ++PI) {
901 // See if the incoming value for the common predecessor is equal to the
902 // one for BB, in which case this phi node will not prevent the merging
903 // of the block.
904 BasicBlock *IBB = PN->getIncomingBlock(PI);
905 if (BBPreds.count(IBB) &&
906 !CanMergeValues(Val, PN->getIncomingValue(PI))) {
907 LLVM_DEBUG(dbgs() << "Can't fold, phi node " << PN->getName()
908 << " in " << Succ->getName()
909 << " is conflicting with regard to common "
910 << "predecessor " << IBB->getName() << "\n");
911 return false;
912 }
913 }
914 }
915 }
916
917 return true;
918}
919
922
923/// Determines the value to use as the phi node input for a block.
924///
925/// Select between \p OldVal any value that we know flows from \p BB
926/// to a particular phi on the basis of which one (if either) is not
927/// undef. Update IncomingValues based on the selected value.
928///
929/// \param OldVal The value we are considering selecting.
930/// \param BB The block that the value flows in from.
931/// \param IncomingValues A map from block-to-value for other phi inputs
932/// that we have examined.
933///
934/// \returns the selected value.
936 IncomingValueMap &IncomingValues) {
937 if (!isa<UndefValue>(OldVal)) {
938 assert((!IncomingValues.count(BB) ||
939 IncomingValues.find(BB)->second == OldVal) &&
940 "Expected OldVal to match incoming value from BB!");
941
942 IncomingValues.insert(std::make_pair(BB, OldVal));
943 return OldVal;
944 }
945
946 IncomingValueMap::const_iterator It = IncomingValues.find(BB);
947 if (It != IncomingValues.end()) return It->second;
948
949 return OldVal;
950}
951
952/// Create a map from block to value for the operands of a
953/// given phi.
954///
955/// Create a map from block to value for each non-undef value flowing
956/// into \p PN.
957///
958/// \param PN The phi we are collecting the map for.
959/// \param IncomingValues [out] The map from block to value for this phi.
961 IncomingValueMap &IncomingValues) {
962 for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) {
963 BasicBlock *BB = PN->getIncomingBlock(i);
964 Value *V = PN->getIncomingValue(i);
965
966 if (!isa<UndefValue>(V))
967 IncomingValues.insert(std::make_pair(BB, V));
968 }
969}
970
971/// Replace the incoming undef values to a phi with the values
972/// from a block-to-value map.
973///
974/// \param PN The phi we are replacing the undefs in.
975/// \param IncomingValues A map from block to value.
977 const IncomingValueMap &IncomingValues) {
978 SmallVector<unsigned> TrueUndefOps;
979 for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) {
980 Value *V = PN->getIncomingValue(i);
981
982 if (!isa<UndefValue>(V)) continue;
983
984 BasicBlock *BB = PN->getIncomingBlock(i);
985 IncomingValueMap::const_iterator It = IncomingValues.find(BB);
986
987 // Keep track of undef/poison incoming values. Those must match, so we fix
988 // them up below if needed.
989 // Note: this is conservatively correct, but we could try harder and group
990 // the undef values per incoming basic block.
991 if (It == IncomingValues.end()) {
992 TrueUndefOps.push_back(i);
993 continue;
994 }
995
996 // There is a defined value for this incoming block, so map this undef
997 // incoming value to the defined value.
998 PN->setIncomingValue(i, It->second);
999 }
1000
1001 // If there are both undef and poison values incoming, then convert those
1002 // values to undef. It is invalid to have different values for the same
1003 // incoming block.
1004 unsigned PoisonCount = count_if(TrueUndefOps, [&](unsigned i) {
1005 return isa<PoisonValue>(PN->getIncomingValue(i));
1006 });
1007 if (PoisonCount != 0 && PoisonCount != TrueUndefOps.size()) {
1008 for (unsigned i : TrueUndefOps)
1010 }
1011}
1012
1013// Only when they shares a single common predecessor, return true.
1014// Only handles cases when BB can't be merged while its predecessors can be
1015// redirected.
1016static bool
1018 const SmallPtrSetImpl<BasicBlock *> &BBPreds,
1019 const SmallPtrSetImpl<BasicBlock *> &SuccPreds,
1020 BasicBlock *&CommonPred) {
1021
1022 // There must be phis in BB, otherwise BB will be merged into Succ directly
1023 if (BB->phis().empty() || Succ->phis().empty())
1024 return false;
1025
1026 // BB must have predecessors not shared that can be redirected to Succ
1027 if (!BB->hasNPredecessorsOrMore(2))
1028 return false;
1029
1030 // Get single common predecessors of both BB and Succ
1031 for (BasicBlock *SuccPred : SuccPreds) {
1032 if (BBPreds.count(SuccPred)) {
1033 if (CommonPred)
1034 return false;
1035 CommonPred = SuccPred;
1036 }
1037 }
1038
1039 return true;
1040}
1041
1042/// Replace a value flowing from a block to a phi with
1043/// potentially multiple instances of that value flowing from the
1044/// block's predecessors to the phi.
1045///
1046/// \param BB The block with the value flowing into the phi.
1047/// \param BBPreds The predecessors of BB.
1048/// \param PN The phi that we are updating.
1049/// \param CommonPred The common predecessor of BB and PN's BasicBlock
1051 const PredBlockVector &BBPreds,
1052 PHINode *PN,
1053 BasicBlock *CommonPred) {
1054 Value *OldVal = PN->removeIncomingValue(BB, false);
1055 assert(OldVal && "No entry in PHI for Pred BB!");
1056
1057 IncomingValueMap IncomingValues;
1058
1059 // We are merging two blocks - BB, and the block containing PN - and
1060 // as a result we need to redirect edges from the predecessors of BB
1061 // to go to the block containing PN, and update PN
1062 // accordingly. Since we allow merging blocks in the case where the
1063 // predecessor and successor blocks both share some predecessors,
1064 // and where some of those common predecessors might have undef
1065 // values flowing into PN, we want to rewrite those values to be
1066 // consistent with the non-undef values.
1067
1068 gatherIncomingValuesToPhi(PN, IncomingValues);
1069
1070 // If this incoming value is one of the PHI nodes in BB, the new entries
1071 // in the PHI node are the entries from the old PHI.
1072 if (isa<PHINode>(OldVal) && cast<PHINode>(OldVal)->getParent() == BB) {
1073 PHINode *OldValPN = cast<PHINode>(OldVal);
1074 for (unsigned i = 0, e = OldValPN->getNumIncomingValues(); i != e; ++i) {
1075 // Note that, since we are merging phi nodes and BB and Succ might
1076 // have common predecessors, we could end up with a phi node with
1077 // identical incoming branches. This will be cleaned up later (and
1078 // will trigger asserts if we try to clean it up now, without also
1079 // simplifying the corresponding conditional branch).
1080 BasicBlock *PredBB = OldValPN->getIncomingBlock(i);
1081
1082 if (PredBB == CommonPred)
1083 continue;
1084
1085 Value *PredVal = OldValPN->getIncomingValue(i);
1086 Value *Selected =
1087 selectIncomingValueForBlock(PredVal, PredBB, IncomingValues);
1088
1089 // And add a new incoming value for this predecessor for the
1090 // newly retargeted branch.
1091 PN->addIncoming(Selected, PredBB);
1092 }
1093 if (CommonPred)
1094 PN->addIncoming(OldValPN->getIncomingValueForBlock(CommonPred), BB);
1095
1096 } else {
1097 for (unsigned i = 0, e = BBPreds.size(); i != e; ++i) {
1098 // Update existing incoming values in PN for this
1099 // predecessor of BB.
1100 BasicBlock *PredBB = BBPreds[i];
1101
1102 if (PredBB == CommonPred)
1103 continue;
1104
1105 Value *Selected =
1106 selectIncomingValueForBlock(OldVal, PredBB, IncomingValues);
1107
1108 // And add a new incoming value for this predecessor for the
1109 // newly retargeted branch.
1110 PN->addIncoming(Selected, PredBB);
1111 }
1112 if (CommonPred)
1113 PN->addIncoming(OldVal, BB);
1114 }
1115
1116 replaceUndefValuesInPhi(PN, IncomingValues);
1117}
1118
1120 DomTreeUpdater *DTU) {
1121 assert(BB != &BB->getParent()->getEntryBlock() &&
1122 "TryToSimplifyUncondBranchFromEmptyBlock called on entry block!");
1123
1124 // We can't simplify infinite loops.
1125 BasicBlock *Succ = cast<BranchInst>(BB->getTerminator())->getSuccessor(0);
1126 if (BB == Succ)
1127 return false;
1128
1130 SmallPtrSet<BasicBlock *, 16> SuccPreds(pred_begin(Succ), pred_end(Succ));
1131
1132 // The single common predecessor of BB and Succ when BB cannot be killed
1133 BasicBlock *CommonPred = nullptr;
1134
1135 bool BBKillable = CanPropagatePredecessorsForPHIs(BB, Succ, BBPreds);
1136
1137 // Even if we can not fold bB into Succ, we may be able to redirect the
1138 // predecessors of BB to Succ.
1139 bool BBPhisMergeable =
1140 BBKillable ||
1141 CanRedirectPredsOfEmptyBBToSucc(BB, Succ, BBPreds, SuccPreds, CommonPred);
1142
1143 if (!BBKillable && !BBPhisMergeable)
1144 return false;
1145
1146 // Check to see if merging these blocks/phis would cause conflicts for any of
1147 // the phi nodes in BB or Succ. If not, we can safely merge.
1148
1149 // Check for cases where Succ has multiple predecessors and a PHI node in BB
1150 // has uses which will not disappear when the PHI nodes are merged. It is
1151 // possible to handle such cases, but difficult: it requires checking whether
1152 // BB dominates Succ, which is non-trivial to calculate in the case where
1153 // Succ has multiple predecessors. Also, it requires checking whether
1154 // constructing the necessary self-referential PHI node doesn't introduce any
1155 // conflicts; this isn't too difficult, but the previous code for doing this
1156 // was incorrect.
1157 //
1158 // Note that if this check finds a live use, BB dominates Succ, so BB is
1159 // something like a loop pre-header (or rarely, a part of an irreducible CFG);
1160 // folding the branch isn't profitable in that case anyway.
1161 if (!Succ->getSinglePredecessor()) {
1162 BasicBlock::iterator BBI = BB->begin();
1163 while (isa<PHINode>(*BBI)) {
1164 for (Use &U : BBI->uses()) {
1165 if (PHINode* PN = dyn_cast<PHINode>(U.getUser())) {
1166 if (PN->getIncomingBlock(U) != BB)
1167 return false;
1168 } else {
1169 return false;
1170 }
1171 }
1172 ++BBI;
1173 }
1174 }
1175
1176 if (BBPhisMergeable && CommonPred)
1177 LLVM_DEBUG(dbgs() << "Found Common Predecessor between: " << BB->getName()
1178 << " and " << Succ->getName() << " : "
1179 << CommonPred->getName() << "\n");
1180
1181 // 'BB' and 'BB->Pred' are loop latches, bail out to presrve inner loop
1182 // metadata.
1183 //
1184 // FIXME: This is a stop-gap solution to preserve inner-loop metadata given
1185 // current status (that loop metadata is implemented as metadata attached to
1186 // the branch instruction in the loop latch block). To quote from review
1187 // comments, "the current representation of loop metadata (using a loop latch
1188 // terminator attachment) is known to be fundamentally broken. Loop latches
1189 // are not uniquely associated with loops (both in that a latch can be part of
1190 // multiple loops and a loop may have multiple latches). Loop headers are. The
1191 // solution to this problem is also known: Add support for basic block
1192 // metadata, and attach loop metadata to the loop header."
1193 //
1194 // Why bail out:
1195 // In this case, we expect 'BB' is the latch for outer-loop and 'BB->Pred' is
1196 // the latch for inner-loop (see reason below), so bail out to prerserve
1197 // inner-loop metadata rather than eliminating 'BB' and attaching its metadata
1198 // to this inner-loop.
1199 // - The reason we believe 'BB' and 'BB->Pred' have different inner-most
1200 // loops: assuming 'BB' and 'BB->Pred' are from the same inner-most loop L,
1201 // then 'BB' is the header and latch of 'L' and thereby 'L' must consist of
1202 // one self-looping basic block, which is contradictory with the assumption.
1203 //
1204 // To illustrate how inner-loop metadata is dropped:
1205 //
1206 // CFG Before
1207 //
1208 // BB is while.cond.exit, attached with loop metdata md2.
1209 // BB->Pred is for.body, attached with loop metadata md1.
1210 //
1211 // entry
1212 // |
1213 // v
1214 // ---> while.cond -------------> while.end
1215 // | |
1216 // | v
1217 // | while.body
1218 // | |
1219 // | v
1220 // | for.body <---- (md1)
1221 // | | |______|
1222 // | v
1223 // | while.cond.exit (md2)
1224 // | |
1225 // |_______|
1226 //
1227 // CFG After
1228 //
1229 // while.cond1 is the merge of while.cond.exit and while.cond above.
1230 // for.body is attached with md2, and md1 is dropped.
1231 // If LoopSimplify runs later (as a part of loop pass), it could create
1232 // dedicated exits for inner-loop (essentially adding `while.cond.exit`
1233 // back), but won't it won't see 'md1' nor restore it for the inner-loop.
1234 //
1235 // entry
1236 // |
1237 // v
1238 // ---> while.cond1 -------------> while.end
1239 // | |
1240 // | v
1241 // | while.body
1242 // | |
1243 // | v
1244 // | for.body <---- (md2)
1245 // |_______| |______|
1246 if (Instruction *TI = BB->getTerminator())
1247 if (TI->hasMetadata(LLVMContext::MD_loop))
1248 for (BasicBlock *Pred : predecessors(BB))
1249 if (Instruction *PredTI = Pred->getTerminator())
1250 if (PredTI->hasMetadata(LLVMContext::MD_loop))
1251 return false;
1252
1253 if (BBKillable)
1254 LLVM_DEBUG(dbgs() << "Killing Trivial BB: \n" << *BB);
1255 else if (BBPhisMergeable)
1256 LLVM_DEBUG(dbgs() << "Merge Phis in Trivial BB: \n" << *BB);
1257
1259
1260 if (DTU) {
1261 // To avoid processing the same predecessor more than once.
1263 // All predecessors of BB (except the common predecessor) will be moved to
1264 // Succ.
1265 Updates.reserve(Updates.size() + 2 * pred_size(BB) + 1);
1266
1267 for (auto *PredOfBB : predecessors(BB)) {
1268 // Do not modify those common predecessors of BB and Succ
1269 if (!SuccPreds.contains(PredOfBB))
1270 if (SeenPreds.insert(PredOfBB).second)
1271 Updates.push_back({DominatorTree::Insert, PredOfBB, Succ});
1272 }
1273
1274 SeenPreds.clear();
1275
1276 for (auto *PredOfBB : predecessors(BB))
1277 // When BB cannot be killed, do not remove the edge between BB and
1278 // CommonPred.
1279 if (SeenPreds.insert(PredOfBB).second && PredOfBB != CommonPred)
1280 Updates.push_back({DominatorTree::Delete, PredOfBB, BB});
1281
1282 if (BBKillable)
1283 Updates.push_back({DominatorTree::Delete, BB, Succ});
1284 }
1285
1286 if (isa<PHINode>(Succ->begin())) {
1287 // If there is more than one pred of succ, and there are PHI nodes in
1288 // the successor, then we need to add incoming edges for the PHI nodes
1289 //
1290 const PredBlockVector BBPreds(predecessors(BB));
1291
1292 // Loop over all of the PHI nodes in the successor of BB.
1293 for (BasicBlock::iterator I = Succ->begin(); isa<PHINode>(I); ++I) {
1294 PHINode *PN = cast<PHINode>(I);
1295 redirectValuesFromPredecessorsToPhi(BB, BBPreds, PN, CommonPred);
1296 }
1297 }
1298
1299 if (Succ->getSinglePredecessor()) {
1300 // BB is the only predecessor of Succ, so Succ will end up with exactly
1301 // the same predecessors BB had.
1302 // Copy over any phi, debug or lifetime instruction.
1304 Succ->splice(Succ->getFirstNonPHIIt(), BB);
1305 } else {
1306 while (PHINode *PN = dyn_cast<PHINode>(&BB->front())) {
1307 // We explicitly check for such uses for merging phis.
1308 assert(PN->use_empty() && "There shouldn't be any uses here!");
1309 PN->eraseFromParent();
1310 }
1311 }
1312
1313 // If the unconditional branch we replaced contains llvm.loop metadata, we
1314 // add the metadata to the branch instructions in the predecessors.
1315 if (Instruction *TI = BB->getTerminator())
1316 if (MDNode *LoopMD = TI->getMetadata(LLVMContext::MD_loop))
1317 for (BasicBlock *Pred : predecessors(BB))
1318 Pred->getTerminator()->setMetadata(LLVMContext::MD_loop, LoopMD);
1319
1320 if (BBKillable) {
1321 // Everything that jumped to BB now goes to Succ.
1322 BB->replaceAllUsesWith(Succ);
1323
1324 if (!Succ->hasName())
1325 Succ->takeName(BB);
1326
1327 // Clear the successor list of BB to match updates applying to DTU later.
1328 if (BB->getTerminator())
1329 BB->back().eraseFromParent();
1330
1331 new UnreachableInst(BB->getContext(), BB);
1332 assert(succ_empty(BB) && "The successor list of BB isn't empty before "
1333 "applying corresponding DTU updates.");
1334 } else if (BBPhisMergeable) {
1335 // Everything except CommonPred that jumped to BB now goes to Succ.
1336 BB->replaceUsesWithIf(Succ, [BBPreds, CommonPred](Use &U) -> bool {
1337 if (Instruction *UseInst = dyn_cast<Instruction>(U.getUser()))
1338 return UseInst->getParent() != CommonPred &&
1339 BBPreds.contains(UseInst->getParent());
1340 return false;
1341 });
1342 }
1343
1344 if (DTU)
1345 DTU->applyUpdates(Updates);
1346
1347 if (BBKillable)
1348 DeleteDeadBlock(BB, DTU);
1349
1350 return true;
1351}
1352
1353static bool
1356 // This implementation doesn't currently consider undef operands
1357 // specially. Theoretically, two phis which are identical except for
1358 // one having an undef where the other doesn't could be collapsed.
1359
1360 bool Changed = false;
1361
1362 // Examine each PHI.
1363 // Note that increment of I must *NOT* be in the iteration_expression, since
1364 // we don't want to immediately advance when we restart from the beginning.
1365 for (auto I = BB->begin(); PHINode *PN = dyn_cast<PHINode>(I);) {
1366 ++I;
1367 // Is there an identical PHI node in this basic block?
1368 // Note that we only look in the upper square's triangle,
1369 // we already checked that the lower triangle PHI's aren't identical.
1370 for (auto J = I; PHINode *DuplicatePN = dyn_cast<PHINode>(J); ++J) {
1371 if (ToRemove.contains(DuplicatePN))
1372 continue;
1373 if (!DuplicatePN->isIdenticalToWhenDefined(PN))
1374 continue;
1375 // A duplicate. Replace this PHI with the base PHI.
1376 ++NumPHICSEs;
1377 DuplicatePN->replaceAllUsesWith(PN);
1378 ToRemove.insert(DuplicatePN);
1379 Changed = true;
1380
1381 // The RAUW can change PHIs that we already visited.
1382 I = BB->begin();
1383 break; // Start over from the beginning.
1384 }
1385 }
1386 return Changed;
1387}
1388
1389static bool
1392 // This implementation doesn't currently consider undef operands
1393 // specially. Theoretically, two phis which are identical except for
1394 // one having an undef where the other doesn't could be collapsed.
1395
1396 struct PHIDenseMapInfo {
1397 static PHINode *getEmptyKey() {
1399 }
1400
1401 static PHINode *getTombstoneKey() {
1403 }
1404
1405 static bool isSentinel(PHINode *PN) {
1406 return PN == getEmptyKey() || PN == getTombstoneKey();
1407 }
1408
1409 // WARNING: this logic must be kept in sync with
1410 // Instruction::isIdenticalToWhenDefined()!
1411 static unsigned getHashValueImpl(PHINode *PN) {
1412 // Compute a hash value on the operands. Instcombine will likely have
1413 // sorted them, which helps expose duplicates, but we have to check all
1414 // the operands to be safe in case instcombine hasn't run.
1415 return static_cast<unsigned>(hash_combine(
1418 }
1419
1420 static unsigned getHashValue(PHINode *PN) {
1421#ifndef NDEBUG
1422 // If -phicse-debug-hash was specified, return a constant -- this
1423 // will force all hashing to collide, so we'll exhaustively search
1424 // the table for a match, and the assertion in isEqual will fire if
1425 // there's a bug causing equal keys to hash differently.
1426 if (PHICSEDebugHash)
1427 return 0;
1428#endif
1429 return getHashValueImpl(PN);
1430 }
1431
1432 static bool isEqualImpl(PHINode *LHS, PHINode *RHS) {
1433 if (isSentinel(LHS) || isSentinel(RHS))
1434 return LHS == RHS;
1435 return LHS->isIdenticalTo(RHS);
1436 }
1437
1438 static bool isEqual(PHINode *LHS, PHINode *RHS) {
1439 // These comparisons are nontrivial, so assert that equality implies
1440 // hash equality (DenseMap demands this as an invariant).
1441 bool Result = isEqualImpl(LHS, RHS);
1442 assert(!Result || (isSentinel(LHS) && LHS == RHS) ||
1444 return Result;
1445 }
1446 };
1447
1448 // Set of unique PHINodes.
1450 PHISet.reserve(4 * PHICSENumPHISmallSize);
1451
1452 // Examine each PHI.
1453 bool Changed = false;
1454 for (auto I = BB->begin(); PHINode *PN = dyn_cast<PHINode>(I++);) {
1455 if (ToRemove.contains(PN))
1456 continue;
1457 auto Inserted = PHISet.insert(PN);
1458 if (!Inserted.second) {
1459 // A duplicate. Replace this PHI with its duplicate.
1460 ++NumPHICSEs;
1461 PN->replaceAllUsesWith(*Inserted.first);
1462 ToRemove.insert(PN);
1463 Changed = true;
1464
1465 // The RAUW can change PHIs that we already visited. Start over from the
1466 // beginning.
1467 PHISet.clear();
1468 I = BB->begin();
1469 }
1470 }
1471
1472 return Changed;
1473}
1474
1477 if (
1478#ifndef NDEBUG
1479 !PHICSEDebugHash &&
1480#endif
1484}
1485
1488 bool Changed = EliminateDuplicatePHINodes(BB, ToRemove);
1489 for (PHINode *PN : ToRemove)
1490 PN->eraseFromParent();
1491 return Changed;
1492}
1493
1495 const DataLayout &DL) {
1496 V = V->stripPointerCasts();
1497
1498 if (AllocaInst *AI = dyn_cast<AllocaInst>(V)) {
1499 // TODO: Ideally, this function would not be called if PrefAlign is smaller
1500 // than the current alignment, as the known bits calculation should have
1501 // already taken it into account. However, this is not always the case,
1502 // as computeKnownBits() has a depth limit, while stripPointerCasts()
1503 // doesn't.
1504 Align CurrentAlign = AI->getAlign();
1505 if (PrefAlign <= CurrentAlign)
1506 return CurrentAlign;
1507
1508 // If the preferred alignment is greater than the natural stack alignment
1509 // then don't round up. This avoids dynamic stack realignment.
1510 if (DL.exceedsNaturalStackAlignment(PrefAlign))
1511 return CurrentAlign;
1512 AI->setAlignment(PrefAlign);
1513 return PrefAlign;
1514 }
1515
1516 if (auto *GO = dyn_cast<GlobalObject>(V)) {
1517 // TODO: as above, this shouldn't be necessary.
1518 Align CurrentAlign = GO->getPointerAlignment(DL);
1519 if (PrefAlign <= CurrentAlign)
1520 return CurrentAlign;
1521
1522 // If there is a large requested alignment and we can, bump up the alignment
1523 // of the global. If the memory we set aside for the global may not be the
1524 // memory used by the final program then it is impossible for us to reliably
1525 // enforce the preferred alignment.
1526 if (!GO->canIncreaseAlignment())
1527 return CurrentAlign;
1528
1529 if (GO->isThreadLocal()) {
1530 unsigned MaxTLSAlign = GO->getParent()->getMaxTLSAlignment() / CHAR_BIT;
1531 if (MaxTLSAlign && PrefAlign > Align(MaxTLSAlign))
1532 PrefAlign = Align(MaxTLSAlign);
1533 }
1534
1535 GO->setAlignment(PrefAlign);
1536 return PrefAlign;
1537 }
1538
1539 return Align(1);
1540}
1541
1543 const DataLayout &DL,
1544 const Instruction *CxtI,
1545 AssumptionCache *AC,
1546 const DominatorTree *DT) {
1547 assert(V->getType()->isPointerTy() &&
1548 "getOrEnforceKnownAlignment expects a pointer!");
1549
1550 KnownBits Known = computeKnownBits(V, DL, 0, AC, CxtI, DT);
1551 unsigned TrailZ = Known.countMinTrailingZeros();
1552
1553 // Avoid trouble with ridiculously large TrailZ values, such as
1554 // those computed from a null pointer.
1555 // LLVM doesn't support alignments larger than (1 << MaxAlignmentExponent).
1556 TrailZ = std::min(TrailZ, +Value::MaxAlignmentExponent);
1557
1558 Align Alignment = Align(1ull << std::min(Known.getBitWidth() - 1, TrailZ));
1559
1560 if (PrefAlign && *PrefAlign > Alignment)
1561 Alignment = std::max(Alignment, tryEnforceAlignment(V, *PrefAlign, DL));
1562
1563 // We don't need to make any adjustment.
1564 return Alignment;
1565}
1566
1567///===---------------------------------------------------------------------===//
1568/// Dbg Intrinsic utilities
1569///
1570
1571/// See if there is a dbg.value intrinsic for DIVar for the PHI node.
1573 DIExpression *DIExpr,
1574 PHINode *APN) {
1575 // Since we can't guarantee that the original dbg.declare intrinsic
1576 // is removed by LowerDbgDeclare(), we need to make sure that we are
1577 // not inserting the same dbg.value intrinsic over and over.
1579 SmallVector<DbgVariableRecord *, 1> DbgVariableRecords;
1580 findDbgValues(DbgValues, APN, &DbgVariableRecords);
1581 for (auto *DVI : DbgValues) {
1582 assert(is_contained(DVI->getValues(), APN));
1583 if ((DVI->getVariable() == DIVar) && (DVI->getExpression() == DIExpr))
1584 return true;
1585 }
1586 for (auto *DVR : DbgVariableRecords) {
1587 assert(is_contained(DVR->location_ops(), APN));
1588 if ((DVR->getVariable() == DIVar) && (DVR->getExpression() == DIExpr))
1589 return true;
1590 }
1591 return false;
1592}
1593
1594/// Check if the alloc size of \p ValTy is large enough to cover the variable
1595/// (or fragment of the variable) described by \p DII.
1596///
1597/// This is primarily intended as a helper for the different
1598/// ConvertDebugDeclareToDebugValue functions. The dbg.declare that is converted
1599/// describes an alloca'd variable, so we need to use the alloc size of the
1600/// value when doing the comparison. E.g. an i1 value will be identified as
1601/// covering an n-bit fragment, if the store size of i1 is at least n bits.
1603 const DataLayout &DL = DII->getModule()->getDataLayout();
1604 TypeSize ValueSize = DL.getTypeAllocSizeInBits(ValTy);
1605 if (std::optional<uint64_t> FragmentSize = DII->getFragmentSizeInBits())
1606 return TypeSize::isKnownGE(ValueSize, TypeSize::getFixed(*FragmentSize));
1607
1608 // We can't always calculate the size of the DI variable (e.g. if it is a
1609 // VLA). Try to use the size of the alloca that the dbg intrinsic describes
1610 // intead.
1611 if (DII->isAddressOfVariable()) {
1612 // DII should have exactly 1 location when it is an address.
1613 assert(DII->getNumVariableLocationOps() == 1 &&
1614 "address of variable must have exactly 1 location operand.");
1615 if (auto *AI =
1616 dyn_cast_or_null<AllocaInst>(DII->getVariableLocationOp(0))) {
1617 if (std::optional<TypeSize> FragmentSize =
1618 AI->getAllocationSizeInBits(DL)) {
1619 return TypeSize::isKnownGE(ValueSize, *FragmentSize);
1620 }
1621 }
1622 }
1623 // Could not determine size of variable. Conservatively return false.
1624 return false;
1625}
1626// RemoveDIs: duplicate implementation of the above, using DbgVariableRecords,
1627// the replacement for dbg.values.
1629 const DataLayout &DL = DVR->getModule()->getDataLayout();
1630 TypeSize ValueSize = DL.getTypeAllocSizeInBits(ValTy);
1631 if (std::optional<uint64_t> FragmentSize = DVR->getFragmentSizeInBits())
1632 return TypeSize::isKnownGE(ValueSize, TypeSize::getFixed(*FragmentSize));
1633
1634 // We can't always calculate the size of the DI variable (e.g. if it is a
1635 // VLA). Try to use the size of the alloca that the dbg intrinsic describes
1636 // intead.
1637 if (DVR->isAddressOfVariable()) {
1638 // DVR should have exactly 1 location when it is an address.
1639 assert(DVR->getNumVariableLocationOps() == 1 &&
1640 "address of variable must have exactly 1 location operand.");
1641 if (auto *AI =
1642 dyn_cast_or_null<AllocaInst>(DVR->getVariableLocationOp(0))) {
1643 if (std::optional<TypeSize> FragmentSize = AI->getAllocationSizeInBits(DL)) {
1644 return TypeSize::isKnownGE(ValueSize, *FragmentSize);
1645 }
1646 }
1647 }
1648 // Could not determine size of variable. Conservatively return false.
1649 return false;
1650}
1651
1653 DILocalVariable *DIVar,
1654 DIExpression *DIExpr,
1655 const DebugLoc &NewLoc,
1656 BasicBlock::iterator Instr) {
1657 if (!UseNewDbgInfoFormat) {
1658 auto DbgVal = Builder.insertDbgValueIntrinsic(DV, DIVar, DIExpr, NewLoc,
1659 (Instruction *)nullptr);
1660 DbgVal.get<Instruction *>()->insertBefore(Instr);
1661 } else {
1662 // RemoveDIs: if we're using the new debug-info format, allocate a
1663 // DbgVariableRecord directly instead of a dbg.value intrinsic.
1665 DbgVariableRecord *DV =
1666 new DbgVariableRecord(DVAM, DIVar, DIExpr, NewLoc.get());
1667 Instr->getParent()->insertDbgRecordBefore(DV, Instr);
1668 }
1669}
1670
1672 DIBuilder &Builder, Value *DV, DILocalVariable *DIVar, DIExpression *DIExpr,
1673 const DebugLoc &NewLoc, BasicBlock::iterator Instr) {
1674 if (!UseNewDbgInfoFormat) {
1675 auto DbgVal = Builder.insertDbgValueIntrinsic(DV, DIVar, DIExpr, NewLoc,
1676 (Instruction *)nullptr);
1677 DbgVal.get<Instruction *>()->insertAfter(&*Instr);
1678 } else {
1679 // RemoveDIs: if we're using the new debug-info format, allocate a
1680 // DbgVariableRecord directly instead of a dbg.value intrinsic.
1682 DbgVariableRecord *DV =
1683 new DbgVariableRecord(DVAM, DIVar, DIExpr, NewLoc.get());
1684 Instr->getParent()->insertDbgRecordAfter(DV, &*Instr);
1685 }
1686}
1687
1688/// Inserts a llvm.dbg.value intrinsic before a store to an alloca'd value
1689/// that has an associated llvm.dbg.declare intrinsic.
1691 StoreInst *SI, DIBuilder &Builder) {
1692 assert(DII->isAddressOfVariable() || isa<DbgAssignIntrinsic>(DII));
1693 auto *DIVar = DII->getVariable();
1694 assert(DIVar && "Missing variable");
1695 auto *DIExpr = DII->getExpression();
1696 Value *DV = SI->getValueOperand();
1697
1698 DebugLoc NewLoc = getDebugValueLoc(DII);
1699
1700 // If the alloca describes the variable itself, i.e. the expression in the
1701 // dbg.declare doesn't start with a dereference, we can perform the
1702 // conversion if the value covers the entire fragment of DII.
1703 // If the alloca describes the *address* of DIVar, i.e. DIExpr is
1704 // *just* a DW_OP_deref, we use DV as is for the dbg.value.
1705 // We conservatively ignore other dereferences, because the following two are
1706 // not equivalent:
1707 // dbg.declare(alloca, ..., !Expr(deref, plus_uconstant, 2))
1708 // dbg.value(DV, ..., !Expr(deref, plus_uconstant, 2))
1709 // The former is adding 2 to the address of the variable, whereas the latter
1710 // is adding 2 to the value of the variable. As such, we insist on just a
1711 // deref expression.
1712 bool CanConvert =
1713 DIExpr->isDeref() || (!DIExpr->startsWithDeref() &&
1715 if (CanConvert) {
1716 insertDbgValueOrDbgVariableRecord(Builder, DV, DIVar, DIExpr, NewLoc,
1717 SI->getIterator());
1718 return;
1719 }
1720
1721 // FIXME: If storing to a part of the variable described by the dbg.declare,
1722 // then we want to insert a dbg.value for the corresponding fragment.
1723 LLVM_DEBUG(dbgs() << "Failed to convert dbg.declare to dbg.value: " << *DII
1724 << '\n');
1725 // For now, when there is a store to parts of the variable (but we do not
1726 // know which part) we insert an dbg.value intrinsic to indicate that we
1727 // know nothing about the variable's content.
1728 DV = UndefValue::get(DV->getType());
1729 insertDbgValueOrDbgVariableRecord(Builder, DV, DIVar, DIExpr, NewLoc,
1730 SI->getIterator());
1731}
1732
1733/// Inserts a llvm.dbg.value intrinsic before a load of an alloca'd value
1734/// that has an associated llvm.dbg.declare intrinsic.
1736 LoadInst *LI, DIBuilder &Builder) {
1737 auto *DIVar = DII->getVariable();
1738 auto *DIExpr = DII->getExpression();
1739 assert(DIVar && "Missing variable");
1740
1741 if (!valueCoversEntireFragment(LI->getType(), DII)) {
1742 // FIXME: If only referring to a part of the variable described by the
1743 // dbg.declare, then we want to insert a dbg.value for the corresponding
1744 // fragment.
1745 LLVM_DEBUG(dbgs() << "Failed to convert dbg.declare to dbg.value: "
1746 << *DII << '\n');
1747 return;
1748 }
1749
1750 DebugLoc NewLoc = getDebugValueLoc(DII);
1751
1752 // We are now tracking the loaded value instead of the address. In the
1753 // future if multi-location support is added to the IR, it might be
1754 // preferable to keep tracking both the loaded value and the original
1755 // address in case the alloca can not be elided.
1756 insertDbgValueOrDbgVariableRecordAfter(Builder, LI, DIVar, DIExpr, NewLoc,
1757 LI->getIterator());
1758}
1759
1761 StoreInst *SI, DIBuilder &Builder) {
1762 assert(DVR->isAddressOfVariable() || DVR->isDbgAssign());
1763 auto *DIVar = DVR->getVariable();
1764 assert(DIVar && "Missing variable");
1765 auto *DIExpr = DVR->getExpression();
1766 Value *DV = SI->getValueOperand();
1767
1768 DebugLoc NewLoc = getDebugValueLoc(DVR);
1769
1770 // If the alloca describes the variable itself, i.e. the expression in the
1771 // dbg.declare doesn't start with a dereference, we can perform the
1772 // conversion if the value covers the entire fragment of DII.
1773 // If the alloca describes the *address* of DIVar, i.e. DIExpr is
1774 // *just* a DW_OP_deref, we use DV as is for the dbg.value.
1775 // We conservatively ignore other dereferences, because the following two are
1776 // not equivalent:
1777 // dbg.declare(alloca, ..., !Expr(deref, plus_uconstant, 2))
1778 // dbg.value(DV, ..., !Expr(deref, plus_uconstant, 2))
1779 // The former is adding 2 to the address of the variable, whereas the latter
1780 // is adding 2 to the value of the variable. As such, we insist on just a
1781 // deref expression.
1782 bool CanConvert =
1783 DIExpr->isDeref() || (!DIExpr->startsWithDeref() &&
1785 if (CanConvert) {
1786 insertDbgValueOrDbgVariableRecord(Builder, DV, DIVar, DIExpr, NewLoc,
1787 SI->getIterator());
1788 return;
1789 }
1790
1791 // FIXME: If storing to a part of the variable described by the dbg.declare,
1792 // then we want to insert a dbg.value for the corresponding fragment.
1793 LLVM_DEBUG(dbgs() << "Failed to convert dbg.declare to dbg.value: " << *DVR
1794 << '\n');
1796
1797 // For now, when there is a store to parts of the variable (but we do not
1798 // know which part) we insert an dbg.value intrinsic to indicate that we
1799 // know nothing about the variable's content.
1800 DV = UndefValue::get(DV->getType());
1802 DbgVariableRecord *NewDVR =
1803 new DbgVariableRecord(DVAM, DIVar, DIExpr, NewLoc.get());
1804 SI->getParent()->insertDbgRecordBefore(NewDVR, SI->getIterator());
1805}
1806
1807/// Inserts a llvm.dbg.value intrinsic after a phi that has an associated
1808/// llvm.dbg.declare intrinsic.
1810 PHINode *APN, DIBuilder &Builder) {
1811 auto *DIVar = DII->getVariable();
1812 auto *DIExpr = DII->getExpression();
1813 assert(DIVar && "Missing variable");
1814
1815 if (PhiHasDebugValue(DIVar, DIExpr, APN))
1816 return;
1817
1818 if (!valueCoversEntireFragment(APN->getType(), DII)) {
1819 // FIXME: If only referring to a part of the variable described by the
1820 // dbg.declare, then we want to insert a dbg.value for the corresponding
1821 // fragment.
1822 LLVM_DEBUG(dbgs() << "Failed to convert dbg.declare to dbg.value: "
1823 << *DII << '\n');
1824 return;
1825 }
1826
1827 BasicBlock *BB = APN->getParent();
1828 auto InsertionPt = BB->getFirstInsertionPt();
1829
1830 DebugLoc NewLoc = getDebugValueLoc(DII);
1831
1832 // The block may be a catchswitch block, which does not have a valid
1833 // insertion point.
1834 // FIXME: Insert dbg.value markers in the successors when appropriate.
1835 if (InsertionPt != BB->end()) {
1836 insertDbgValueOrDbgVariableRecord(Builder, APN, DIVar, DIExpr, NewLoc,
1837 InsertionPt);
1838 }
1839}
1840
1842 DIBuilder &Builder) {
1843 auto *DIVar = DVR->getVariable();
1844 auto *DIExpr = DVR->getExpression();
1845 assert(DIVar && "Missing variable");
1846
1847 if (!valueCoversEntireFragment(LI->getType(), DVR)) {
1848 // FIXME: If only referring to a part of the variable described by the
1849 // dbg.declare, then we want to insert a DbgVariableRecord for the
1850 // corresponding fragment.
1851 LLVM_DEBUG(dbgs() << "Failed to convert dbg.declare to DbgVariableRecord: "
1852 << *DVR << '\n');
1853 return;
1854 }
1855
1856 DebugLoc NewLoc = getDebugValueLoc(DVR);
1857
1858 // We are now tracking the loaded value instead of the address. In the
1859 // future if multi-location support is added to the IR, it might be
1860 // preferable to keep tracking both the loaded value and the original
1861 // address in case the alloca can not be elided.
1863
1864 // Create a DbgVariableRecord directly and insert.
1866 DbgVariableRecord *DV =
1867 new DbgVariableRecord(LIVAM, DIVar, DIExpr, NewLoc.get());
1868 LI->getParent()->insertDbgRecordAfter(DV, LI);
1869}
1870
1871/// Determine whether this alloca is either a VLA or an array.
1872static bool isArray(AllocaInst *AI) {
1873 return AI->isArrayAllocation() ||
1874 (AI->getAllocatedType() && AI->getAllocatedType()->isArrayTy());
1875}
1876
1877/// Determine whether this alloca is a structure.
1878static bool isStructure(AllocaInst *AI) {
1879 return AI->getAllocatedType() && AI->getAllocatedType()->isStructTy();
1880}
1882 DIBuilder &Builder) {
1883 auto *DIVar = DVR->getVariable();
1884 auto *DIExpr = DVR->getExpression();
1885 assert(DIVar && "Missing variable");
1886
1887 if (PhiHasDebugValue(DIVar, DIExpr, APN))
1888 return;
1889
1890 if (!valueCoversEntireFragment(APN->getType(), DVR)) {
1891 // FIXME: If only referring to a part of the variable described by the
1892 // dbg.declare, then we want to insert a DbgVariableRecord for the
1893 // corresponding fragment.
1894 LLVM_DEBUG(dbgs() << "Failed to convert dbg.declare to DbgVariableRecord: "
1895 << *DVR << '\n');
1896 return;
1897 }
1898
1899 BasicBlock *BB = APN->getParent();
1900 auto InsertionPt = BB->getFirstInsertionPt();
1901
1902 DebugLoc NewLoc = getDebugValueLoc(DVR);
1903
1904 // The block may be a catchswitch block, which does not have a valid
1905 // insertion point.
1906 // FIXME: Insert DbgVariableRecord markers in the successors when appropriate.
1907 if (InsertionPt != BB->end()) {
1908 insertDbgValueOrDbgVariableRecord(Builder, APN, DIVar, DIExpr, NewLoc,
1909 InsertionPt);
1910 }
1911}
1912
1913/// LowerDbgDeclare - Lowers llvm.dbg.declare intrinsics into appropriate set
1914/// of llvm.dbg.value intrinsics.
1916 bool Changed = false;
1917 DIBuilder DIB(*F.getParent(), /*AllowUnresolved*/ false);
1920 for (auto &FI : F) {
1921 for (Instruction &BI : FI) {
1922 if (auto *DDI = dyn_cast<DbgDeclareInst>(&BI))
1923 Dbgs.push_back(DDI);
1924 for (DbgVariableRecord &DVR : filterDbgVars(BI.getDbgRecordRange())) {
1925 if (DVR.getType() == DbgVariableRecord::LocationType::Declare)
1926 DVRs.push_back(&DVR);
1927 }
1928 }
1929 }
1930
1931 if (Dbgs.empty() && DVRs.empty())
1932 return Changed;
1933
1934 auto LowerOne = [&](auto *DDI) {
1935 AllocaInst *AI =
1936 dyn_cast_or_null<AllocaInst>(DDI->getVariableLocationOp(0));
1937 // If this is an alloca for a scalar variable, insert a dbg.value
1938 // at each load and store to the alloca and erase the dbg.declare.
1939 // The dbg.values allow tracking a variable even if it is not
1940 // stored on the stack, while the dbg.declare can only describe
1941 // the stack slot (and at a lexical-scope granularity). Later
1942 // passes will attempt to elide the stack slot.
1943 if (!AI || isArray(AI) || isStructure(AI))
1944 return;
1945
1946 // A volatile load/store means that the alloca can't be elided anyway.
1947 if (llvm::any_of(AI->users(), [](User *U) -> bool {
1948 if (LoadInst *LI = dyn_cast<LoadInst>(U))
1949 return LI->isVolatile();
1950 if (StoreInst *SI = dyn_cast<StoreInst>(U))
1951 return SI->isVolatile();
1952 return false;
1953 }))
1954 return;
1955
1957 WorkList.push_back(AI);
1958 while (!WorkList.empty()) {
1959 const Value *V = WorkList.pop_back_val();
1960 for (const auto &AIUse : V->uses()) {
1961 User *U = AIUse.getUser();
1962 if (StoreInst *SI = dyn_cast<StoreInst>(U)) {
1963 if (AIUse.getOperandNo() == 1)
1964 ConvertDebugDeclareToDebugValue(DDI, SI, DIB);
1965 } else if (LoadInst *LI = dyn_cast<LoadInst>(U)) {
1966 ConvertDebugDeclareToDebugValue(DDI, LI, DIB);
1967 } else if (CallInst *CI = dyn_cast<CallInst>(U)) {
1968 // This is a call by-value or some other instruction that takes a
1969 // pointer to the variable. Insert a *value* intrinsic that describes
1970 // the variable by dereferencing the alloca.
1971 if (!CI->isLifetimeStartOrEnd()) {
1972 DebugLoc NewLoc = getDebugValueLoc(DDI);
1973 auto *DerefExpr =
1974 DIExpression::append(DDI->getExpression(), dwarf::DW_OP_deref);
1975 insertDbgValueOrDbgVariableRecord(DIB, AI, DDI->getVariable(),
1976 DerefExpr, NewLoc,
1977 CI->getIterator());
1978 }
1979 } else if (BitCastInst *BI = dyn_cast<BitCastInst>(U)) {
1980 if (BI->getType()->isPointerTy())
1981 WorkList.push_back(BI);
1982 }
1983 }
1984 }
1985 DDI->eraseFromParent();
1986 Changed = true;
1987 };
1988
1989 for_each(Dbgs, LowerOne);
1990 for_each(DVRs, LowerOne);
1991
1992 if (Changed)
1993 for (BasicBlock &BB : F)
1995
1996 return Changed;
1997}
1998
1999// RemoveDIs: re-implementation of insertDebugValuesForPHIs, but which pulls the
2000// debug-info out of the block's DbgVariableRecords rather than dbg.value
2001// intrinsics.
2002static void
2004 SmallVectorImpl<PHINode *> &InsertedPHIs) {
2005 assert(BB && "No BasicBlock to clone DbgVariableRecord(s) from.");
2006 if (InsertedPHIs.size() == 0)
2007 return;
2008
2009 // Map existing PHI nodes to their DbgVariableRecords.
2011 for (auto &I : *BB) {
2012 for (DbgVariableRecord &DVR : filterDbgVars(I.getDbgRecordRange())) {
2013 for (Value *V : DVR.location_ops())
2014 if (auto *Loc = dyn_cast_or_null<PHINode>(V))
2015 DbgValueMap.insert({Loc, &DVR});
2016 }
2017 }
2018 if (DbgValueMap.size() == 0)
2019 return;
2020
2021 // Map a pair of the destination BB and old DbgVariableRecord to the new
2022 // DbgVariableRecord, so that if a DbgVariableRecord is being rewritten to use
2023 // more than one of the inserted PHIs in the same destination BB, we can
2024 // update the same DbgVariableRecord with all the new PHIs instead of creating
2025 // one copy for each.
2027 NewDbgValueMap;
2028 // Then iterate through the new PHIs and look to see if they use one of the
2029 // previously mapped PHIs. If so, create a new DbgVariableRecord that will
2030 // propagate the info through the new PHI. If we use more than one new PHI in
2031 // a single destination BB with the same old dbg.value, merge the updates so
2032 // that we get a single new DbgVariableRecord with all the new PHIs.
2033 for (auto PHI : InsertedPHIs) {
2034 BasicBlock *Parent = PHI->getParent();
2035 // Avoid inserting a debug-info record into an EH block.
2036 if (Parent->getFirstNonPHI()->isEHPad())
2037 continue;
2038 for (auto VI : PHI->operand_values()) {
2039 auto V = DbgValueMap.find(VI);
2040 if (V != DbgValueMap.end()) {
2041 DbgVariableRecord *DbgII = cast<DbgVariableRecord>(V->second);
2042 auto NewDI = NewDbgValueMap.find({Parent, DbgII});
2043 if (NewDI == NewDbgValueMap.end()) {
2044 DbgVariableRecord *NewDbgII = DbgII->clone();
2045 NewDI = NewDbgValueMap.insert({{Parent, DbgII}, NewDbgII}).first;
2046 }
2047 DbgVariableRecord *NewDbgII = NewDI->second;
2048 // If PHI contains VI as an operand more than once, we may
2049 // replaced it in NewDbgII; confirm that it is present.
2050 if (is_contained(NewDbgII->location_ops(), VI))
2051 NewDbgII->replaceVariableLocationOp(VI, PHI);
2052 }
2053 }
2054 }
2055 // Insert the new DbgVariableRecords into their destination blocks.
2056 for (auto DI : NewDbgValueMap) {
2057 BasicBlock *Parent = DI.first.first;
2058 DbgVariableRecord *NewDbgII = DI.second;
2059 auto InsertionPt = Parent->getFirstInsertionPt();
2060 assert(InsertionPt != Parent->end() && "Ill-formed basic block");
2061
2062 Parent->insertDbgRecordBefore(NewDbgII, InsertionPt);
2063 }
2064}
2065
2066/// Propagate dbg.value intrinsics through the newly inserted PHIs.
2068 SmallVectorImpl<PHINode *> &InsertedPHIs) {
2069 assert(BB && "No BasicBlock to clone dbg.value(s) from.");
2070 if (InsertedPHIs.size() == 0)
2071 return;
2072
2073 insertDbgVariableRecordsForPHIs(BB, InsertedPHIs);
2074
2075 // Map existing PHI nodes to their dbg.values.
2076 ValueToValueMapTy DbgValueMap;
2077 for (auto &I : *BB) {
2078 if (auto DbgII = dyn_cast<DbgVariableIntrinsic>(&I)) {
2079 for (Value *V : DbgII->location_ops())
2080 if (auto *Loc = dyn_cast_or_null<PHINode>(V))
2081 DbgValueMap.insert({Loc, DbgII});
2082 }
2083 }
2084 if (DbgValueMap.size() == 0)
2085 return;
2086
2087 // Map a pair of the destination BB and old dbg.value to the new dbg.value,
2088 // so that if a dbg.value is being rewritten to use more than one of the
2089 // inserted PHIs in the same destination BB, we can update the same dbg.value
2090 // with all the new PHIs instead of creating one copy for each.
2093 NewDbgValueMap;
2094 // Then iterate through the new PHIs and look to see if they use one of the
2095 // previously mapped PHIs. If so, create a new dbg.value intrinsic that will
2096 // propagate the info through the new PHI. If we use more than one new PHI in
2097 // a single destination BB with the same old dbg.value, merge the updates so
2098 // that we get a single new dbg.value with all the new PHIs.
2099 for (auto *PHI : InsertedPHIs) {
2100 BasicBlock *Parent = PHI->getParent();
2101 // Avoid inserting an intrinsic into an EH block.
2102 if (Parent->getFirstNonPHI()->isEHPad())
2103 continue;
2104 for (auto *VI : PHI->operand_values()) {
2105 auto V = DbgValueMap.find(VI);
2106 if (V != DbgValueMap.end()) {
2107 auto *DbgII = cast<DbgVariableIntrinsic>(V->second);
2108 auto NewDI = NewDbgValueMap.find({Parent, DbgII});
2109 if (NewDI == NewDbgValueMap.end()) {
2110 auto *NewDbgII = cast<DbgVariableIntrinsic>(DbgII->clone());
2111 NewDI = NewDbgValueMap.insert({{Parent, DbgII}, NewDbgII}).first;
2112 }
2113 DbgVariableIntrinsic *NewDbgII = NewDI->second;
2114 // If PHI contains VI as an operand more than once, we may
2115 // replaced it in NewDbgII; confirm that it is present.
2116 if (is_contained(NewDbgII->location_ops(), VI))
2117 NewDbgII->replaceVariableLocationOp(VI, PHI);
2118 }
2119 }
2120 }
2121 // Insert thew new dbg.values into their destination blocks.
2122 for (auto DI : NewDbgValueMap) {
2123 BasicBlock *Parent = DI.first.first;
2124 auto *NewDbgII = DI.second;
2125 auto InsertionPt = Parent->getFirstInsertionPt();
2126 assert(InsertionPt != Parent->end() && "Ill-formed basic block");
2127 NewDbgII->insertBefore(&*InsertionPt);
2128 }
2129}
2130
2131bool llvm::replaceDbgDeclare(Value *Address, Value *NewAddress,
2132 DIBuilder &Builder, uint8_t DIExprFlags,
2133 int Offset) {
2134 TinyPtrVector<DbgDeclareInst *> DbgDeclares = findDbgDeclares(Address);
2136
2137 auto ReplaceOne = [&](auto *DII) {
2138 assert(DII->getVariable() && "Missing variable");
2139 auto *DIExpr = DII->getExpression();
2140 DIExpr = DIExpression::prepend(DIExpr, DIExprFlags, Offset);
2141 DII->setExpression(DIExpr);
2142 DII->replaceVariableLocationOp(Address, NewAddress);
2143 };
2144
2145 for_each(DbgDeclares, ReplaceOne);
2146 for_each(DVRDeclares, ReplaceOne);
2147
2148 return !DbgDeclares.empty() || !DVRDeclares.empty();
2149}
2150
2152 DILocalVariable *DIVar,
2153 DIExpression *DIExpr, Value *NewAddress,
2154 DbgValueInst *DVI,
2155 DbgVariableRecord *DVR,
2156 DIBuilder &Builder, int Offset) {
2157 assert(DIVar && "Missing variable");
2158
2159 // This is an alloca-based dbg.value/DbgVariableRecord. The first thing it
2160 // should do with the alloca pointer is dereference it. Otherwise we don't
2161 // know how to handle it and give up.
2162 if (!DIExpr || DIExpr->getNumElements() < 1 ||
2163 DIExpr->getElement(0) != dwarf::DW_OP_deref)
2164 return;
2165
2166 // Insert the offset before the first deref.
2167 if (Offset)
2168 DIExpr = DIExpression::prepend(DIExpr, 0, Offset);
2169
2170 if (DVI) {
2171 DVI->setExpression(DIExpr);
2172 DVI->replaceVariableLocationOp(0u, NewAddress);
2173 } else {
2174 assert(DVR);
2175 DVR->setExpression(DIExpr);
2176 DVR->replaceVariableLocationOp(0u, NewAddress);
2177 }
2178}
2179
2181 DIBuilder &Builder, int Offset) {
2184 findDbgValues(DbgUsers, AI, &DPUsers);
2185
2186 // Attempt to replace dbg.values that use this alloca.
2187 for (auto *DVI : DbgUsers)
2188 updateOneDbgValueForAlloca(DVI->getDebugLoc(), DVI->getVariable(),
2189 DVI->getExpression(), NewAllocaAddress, DVI,
2190 nullptr, Builder, Offset);
2191
2192 // Replace any DbgVariableRecords that use this alloca.
2193 for (DbgVariableRecord *DVR : DPUsers)
2194 updateOneDbgValueForAlloca(DVR->getDebugLoc(), DVR->getVariable(),
2195 DVR->getExpression(), NewAllocaAddress, nullptr,
2196 DVR, Builder, Offset);
2197}
2198
2199/// Where possible to salvage debug information for \p I do so.
2200/// If not possible mark undef.
2204 findDbgUsers(DbgUsers, &I, &DPUsers);
2205 salvageDebugInfoForDbgValues(I, DbgUsers, DPUsers);
2206}
2207
2208template <typename T> static void salvageDbgAssignAddress(T *Assign) {
2209 Instruction *I = dyn_cast<Instruction>(Assign->getAddress());
2210 // Only instructions can be salvaged at the moment.
2211 if (!I)
2212 return;
2213
2214 assert(!Assign->getAddressExpression()->getFragmentInfo().has_value() &&
2215 "address-expression shouldn't have fragment info");
2216
2217 // The address component of a dbg.assign cannot be variadic.
2218 uint64_t CurrentLocOps = 0;
2219 SmallVector<Value *, 4> AdditionalValues;
2221 Value *NewV = salvageDebugInfoImpl(*I, CurrentLocOps, Ops, AdditionalValues);
2222
2223 // Check if the salvage failed.
2224 if (!NewV)
2225 return;
2226
2228 Assign->getAddressExpression(), Ops, 0, /*StackValue=*/false);
2229 assert(!SalvagedExpr->getFragmentInfo().has_value() &&
2230 "address-expression shouldn't have fragment info");
2231
2232 // Salvage succeeds if no additional values are required.
2233 if (AdditionalValues.empty()) {
2234 Assign->setAddress(NewV);
2235 Assign->setAddressExpression(SalvagedExpr);
2236 } else {
2237 Assign->setKillAddress();
2238 }
2239}
2240
2244 // These are arbitrary chosen limits on the maximum number of values and the
2245 // maximum size of a debug expression we can salvage up to, used for
2246 // performance reasons.
2247 const unsigned MaxDebugArgs = 16;
2248 const unsigned MaxExpressionSize = 128;
2249 bool Salvaged = false;
2250
2251 for (auto *DII : DbgUsers) {
2252 if (auto *DAI = dyn_cast<DbgAssignIntrinsic>(DII)) {
2253 if (DAI->getAddress() == &I) {
2255 Salvaged = true;
2256 }
2257 if (DAI->getValue() != &I)
2258 continue;
2259 }
2260
2261 // Do not add DW_OP_stack_value for DbgDeclare, because they are implicitly
2262 // pointing out the value as a DWARF memory location description.
2263 bool StackValue = isa<DbgValueInst>(DII);
2264 auto DIILocation = DII->location_ops();
2265 assert(
2266 is_contained(DIILocation, &I) &&
2267 "DbgVariableIntrinsic must use salvaged instruction as its location");
2268 SmallVector<Value *, 4> AdditionalValues;
2269 // `I` may appear more than once in DII's location ops, and each use of `I`
2270 // must be updated in the DIExpression and potentially have additional
2271 // values added; thus we call salvageDebugInfoImpl for each `I` instance in
2272 // DIILocation.
2273 Value *Op0 = nullptr;
2274 DIExpression *SalvagedExpr = DII->getExpression();
2275 auto LocItr = find(DIILocation, &I);
2276 while (SalvagedExpr && LocItr != DIILocation.end()) {
2278 unsigned LocNo = std::distance(DIILocation.begin(), LocItr);
2279 uint64_t CurrentLocOps = SalvagedExpr->getNumLocationOperands();
2280 Op0 = salvageDebugInfoImpl(I, CurrentLocOps, Ops, AdditionalValues);
2281 if (!Op0)
2282 break;
2283 SalvagedExpr =
2284 DIExpression::appendOpsToArg(SalvagedExpr, Ops, LocNo, StackValue);
2285 LocItr = std::find(++LocItr, DIILocation.end(), &I);
2286 }
2287 // salvageDebugInfoImpl should fail on examining the first element of
2288 // DbgUsers, or none of them.
2289 if (!Op0)
2290 break;
2291
2292 DII->replaceVariableLocationOp(&I, Op0);
2293 bool IsValidSalvageExpr = SalvagedExpr->getNumElements() <= MaxExpressionSize;
2294 if (AdditionalValues.empty() && IsValidSalvageExpr) {
2295 DII->setExpression(SalvagedExpr);
2296 } else if (isa<DbgValueInst>(DII) && IsValidSalvageExpr &&
2297 DII->getNumVariableLocationOps() + AdditionalValues.size() <=
2298 MaxDebugArgs) {
2299 DII->addVariableLocationOps(AdditionalValues, SalvagedExpr);
2300 } else {
2301 // Do not salvage using DIArgList for dbg.declare, as it is not currently
2302 // supported in those instructions. Also do not salvage if the resulting
2303 // DIArgList would contain an unreasonably large number of values.
2304 DII->setKillLocation();
2305 }
2306 LLVM_DEBUG(dbgs() << "SALVAGE: " << *DII << '\n');
2307 Salvaged = true;
2308 }
2309 // Duplicate of above block for DbgVariableRecords.
2310 for (auto *DVR : DPUsers) {
2311 if (DVR->isDbgAssign()) {
2312 if (DVR->getAddress() == &I) {
2314 Salvaged = true;
2315 }
2316 if (DVR->getValue() != &I)
2317 continue;
2318 }
2319
2320 // Do not add DW_OP_stack_value for DbgDeclare and DbgAddr, because they
2321 // are implicitly pointing out the value as a DWARF memory location
2322 // description.
2323 bool StackValue =
2324 DVR->getType() != DbgVariableRecord::LocationType::Declare;
2325 auto DVRLocation = DVR->location_ops();
2326 assert(
2327 is_contained(DVRLocation, &I) &&
2328 "DbgVariableIntrinsic must use salvaged instruction as its location");
2329 SmallVector<Value *, 4> AdditionalValues;
2330 // 'I' may appear more than once in DVR's location ops, and each use of 'I'
2331 // must be updated in the DIExpression and potentially have additional
2332 // values added; thus we call salvageDebugInfoImpl for each 'I' instance in
2333 // DVRLocation.
2334 Value *Op0 = nullptr;
2335 DIExpression *SalvagedExpr = DVR->getExpression();
2336 auto LocItr = find(DVRLocation, &I);
2337 while (SalvagedExpr && LocItr != DVRLocation.end()) {
2339 unsigned LocNo = std::distance(DVRLocation.begin(), LocItr);
2340 uint64_t CurrentLocOps = SalvagedExpr->getNumLocationOperands();
2341 Op0 = salvageDebugInfoImpl(I, CurrentLocOps, Ops, AdditionalValues);
2342 if (!Op0)
2343 break;
2344 SalvagedExpr =
2345 DIExpression::appendOpsToArg(SalvagedExpr, Ops, LocNo, StackValue);
2346 LocItr = std::find(++LocItr, DVRLocation.end(), &I);
2347 }
2348 // salvageDebugInfoImpl should fail on examining the first element of
2349 // DbgUsers, or none of them.
2350 if (!Op0)
2351 break;
2352
2353 DVR->replaceVariableLocationOp(&I, Op0);
2354 bool IsValidSalvageExpr =
2355 SalvagedExpr->getNumElements() <= MaxExpressionSize;
2356 if (AdditionalValues.empty() && IsValidSalvageExpr) {
2357 DVR->setExpression(SalvagedExpr);
2358 } else if (DVR->getType() != DbgVariableRecord::LocationType::Declare &&
2359 IsValidSalvageExpr &&
2360 DVR->getNumVariableLocationOps() + AdditionalValues.size() <=
2361 MaxDebugArgs) {
2362 DVR->addVariableLocationOps(AdditionalValues, SalvagedExpr);
2363 } else {
2364 // Do not salvage using DIArgList for dbg.addr/dbg.declare, as it is
2365 // currently only valid for stack value expressions.
2366 // Also do not salvage if the resulting DIArgList would contain an
2367 // unreasonably large number of values.
2368 DVR->setKillLocation();
2369 }
2370 LLVM_DEBUG(dbgs() << "SALVAGE: " << DVR << '\n');
2371 Salvaged = true;
2372 }
2373
2374 if (Salvaged)
2375 return;
2376
2377 for (auto *DII : DbgUsers)
2378 DII->setKillLocation();
2379
2380 for (auto *DVR : DPUsers)
2381 DVR->setKillLocation();
2382}
2383
2385 uint64_t CurrentLocOps,
2387 SmallVectorImpl<Value *> &AdditionalValues) {
2388 unsigned BitWidth = DL.getIndexSizeInBits(GEP->getPointerAddressSpace());
2389 // Rewrite a GEP into a DIExpression.
2390 MapVector<Value *, APInt> VariableOffsets;
2391 APInt ConstantOffset(BitWidth, 0);
2392 if (!GEP->collectOffset(DL, BitWidth, VariableOffsets, ConstantOffset))
2393 return nullptr;
2394 if (!VariableOffsets.empty() && !CurrentLocOps) {
2395 Opcodes.insert(Opcodes.begin(), {dwarf::DW_OP_LLVM_arg, 0});
2396 CurrentLocOps = 1;
2397 }
2398 for (const auto &Offset : VariableOffsets) {
2399 AdditionalValues.push_back(Offset.first);
2400 assert(Offset.second.isStrictlyPositive() &&
2401 "Expected strictly positive multiplier for offset.");
2402 Opcodes.append({dwarf::DW_OP_LLVM_arg, CurrentLocOps++, dwarf::DW_OP_constu,
2403 Offset.second.getZExtValue(), dwarf::DW_OP_mul,
2404 dwarf::DW_OP_plus});
2405 }
2406 DIExpression::appendOffset(Opcodes, ConstantOffset.getSExtValue());
2407 return GEP->getOperand(0);
2408}
2409
2411 switch (Opcode) {
2412 case Instruction::Add:
2413 return dwarf::DW_OP_plus;
2414 case Instruction::Sub:
2415 return dwarf::DW_OP_minus;
2416 case Instruction::Mul:
2417 return dwarf::DW_OP_mul;
2418 case Instruction::SDiv:
2419 return dwarf::DW_OP_div;
2420 case Instruction::SRem:
2421 return dwarf::DW_OP_mod;
2422 case Instruction::Or:
2423 return dwarf::DW_OP_or;
2424 case Instruction::And:
2425 return dwarf::DW_OP_and;
2426 case Instruction::Xor:
2427 return dwarf::DW_OP_xor;
2428 case Instruction::Shl:
2429 return dwarf::DW_OP_shl;
2430 case Instruction::LShr:
2431 return dwarf::DW_OP_shr;
2432 case Instruction::AShr:
2433 return dwarf::DW_OP_shra;
2434 default:
2435 // TODO: Salvage from each kind of binop we know about.
2436 return 0;
2437 }
2438}
2439
2440static void handleSSAValueOperands(uint64_t CurrentLocOps,
2442 SmallVectorImpl<Value *> &AdditionalValues,
2443 Instruction *I) {
2444 if (!CurrentLocOps) {
2445 Opcodes.append({dwarf::DW_OP_LLVM_arg, 0});
2446 CurrentLocOps = 1;
2447 }
2448 Opcodes.append({dwarf::DW_OP_LLVM_arg, CurrentLocOps});
2449 AdditionalValues.push_back(I->getOperand(1));
2450}
2451
2454 SmallVectorImpl<Value *> &AdditionalValues) {
2455 // Handle binary operations with constant integer operands as a special case.
2456 auto *ConstInt = dyn_cast<ConstantInt>(BI->getOperand(1));
2457 // Values wider than 64 bits cannot be represented within a DIExpression.
2458 if (ConstInt && ConstInt->getBitWidth() > 64)
2459 return nullptr;
2460
2461 Instruction::BinaryOps BinOpcode = BI->getOpcode();
2462 // Push any Constant Int operand onto the expression stack.
2463 if (ConstInt) {
2464 uint64_t Val = ConstInt->getSExtValue();
2465 // Add or Sub Instructions with a constant operand can potentially be
2466 // simplified.
2467 if (BinOpcode == Instruction::Add || BinOpcode == Instruction::Sub) {
2468 uint64_t Offset = BinOpcode == Instruction::Add ? Val : -int64_t(Val);
2470 return BI->getOperand(0);
2471 }
2472 Opcodes.append({dwarf::DW_OP_constu, Val});
2473 } else {
2474 handleSSAValueOperands(CurrentLocOps, Opcodes, AdditionalValues, BI);
2475 }
2476
2477 // Add salvaged binary operator to expression stack, if it has a valid
2478 // representation in a DIExpression.
2479 uint64_t DwarfBinOp = getDwarfOpForBinOp(BinOpcode);
2480 if (!DwarfBinOp)
2481 return nullptr;
2482 Opcodes.push_back(DwarfBinOp);
2483 return BI->getOperand(0);
2484}
2485
2487 // The signedness of the operation is implicit in the typed stack, signed and
2488 // unsigned instructions map to the same DWARF opcode.
2489 switch (Pred) {
2490 case CmpInst::ICMP_EQ:
2491 return dwarf::DW_OP_eq;
2492 case CmpInst::ICMP_NE:
2493 return dwarf::DW_OP_ne;
2494 case CmpInst::ICMP_UGT:
2495 case CmpInst::ICMP_SGT:
2496 return dwarf::DW_OP_gt;
2497 case CmpInst::ICMP_UGE:
2498 case CmpInst::ICMP_SGE:
2499 return dwarf::DW_OP_ge;
2500 case CmpInst::ICMP_ULT:
2501 case CmpInst::ICMP_SLT:
2502 return dwarf::DW_OP_lt;
2503 case CmpInst::ICMP_ULE:
2504 case CmpInst::ICMP_SLE:
2505 return dwarf::DW_OP_le;
2506 default:
2507 return 0;
2508 }
2509}
2510
2513 SmallVectorImpl<Value *> &AdditionalValues) {
2514 // Handle icmp operations with constant integer operands as a special case.
2515 auto *ConstInt = dyn_cast<ConstantInt>(Icmp->getOperand(1));
2516 // Values wider than 64 bits cannot be represented within a DIExpression.
2517 if (ConstInt && ConstInt->getBitWidth() > 64)
2518 return nullptr;
2519 // Push any Constant Int operand onto the expression stack.
2520 if (ConstInt) {
2521 if (Icmp->isSigned())
2522 Opcodes.push_back(dwarf::DW_OP_consts);
2523 else
2524 Opcodes.push_back(dwarf::DW_OP_constu);
2525 uint64_t Val = ConstInt->getSExtValue();
2526 Opcodes.push_back(Val);
2527 } else {
2528 handleSSAValueOperands(CurrentLocOps, Opcodes, AdditionalValues, Icmp);
2529 }
2530
2531 // Add salvaged binary operator to expression stack, if it has a valid
2532 // representation in a DIExpression.
2533 uint64_t DwarfIcmpOp = getDwarfOpForIcmpPred(Icmp->getPredicate());
2534 if (!DwarfIcmpOp)
2535 return nullptr;
2536 Opcodes.push_back(DwarfIcmpOp);
2537 return Icmp->getOperand(0);
2538}
2539
2542 SmallVectorImpl<Value *> &AdditionalValues) {
2543 auto &M = *I.getModule();
2544 auto &DL = M.getDataLayout();
2545
2546 if (auto *CI = dyn_cast<CastInst>(&I)) {
2547 Value *FromValue = CI->getOperand(0);
2548 // No-op casts are irrelevant for debug info.
2549 if (CI->isNoopCast(DL)) {
2550 return FromValue;
2551 }
2552
2553 Type *Type = CI->getType();
2554 if (Type->isPointerTy())
2555 Type = DL.getIntPtrType(Type);
2556 // Casts other than Trunc, SExt, or ZExt to scalar types cannot be salvaged.
2557 if (Type->isVectorTy() ||
2558 !(isa<TruncInst>(&I) || isa<SExtInst>(&I) || isa<ZExtInst>(&I) ||
2559 isa<IntToPtrInst>(&I) || isa<PtrToIntInst>(&I)))
2560 return nullptr;
2561
2562 llvm::Type *FromType = FromValue->getType();
2563 if (FromType->isPointerTy())
2564 FromType = DL.getIntPtrType(FromType);
2565
2566 unsigned FromTypeBitSize = FromType->getScalarSizeInBits();
2567 unsigned ToTypeBitSize = Type->getScalarSizeInBits();
2568
2569 auto ExtOps = DIExpression::getExtOps(FromTypeBitSize, ToTypeBitSize,
2570 isa<SExtInst>(&I));
2571 Ops.append(ExtOps.begin(), ExtOps.end());
2572 return FromValue;
2573 }
2574
2575 if (auto *GEP = dyn_cast<GetElementPtrInst>(&I))
2576 return getSalvageOpsForGEP(GEP, DL, CurrentLocOps, Ops, AdditionalValues);
2577 if (auto *BI = dyn_cast<BinaryOperator>(&I))
2578 return getSalvageOpsForBinOp(BI, CurrentLocOps, Ops, AdditionalValues);
2579 if (auto *IC = dyn_cast<ICmpInst>(&I))
2580 return getSalvageOpsForIcmpOp(IC, CurrentLocOps, Ops, AdditionalValues);
2581
2582 // *Not* to do: we should not attempt to salvage load instructions,
2583 // because the validity and lifetime of a dbg.value containing
2584 // DW_OP_deref becomes difficult to analyze. See PR40628 for examples.
2585 return nullptr;
2586}
2587
2588/// A replacement for a dbg.value expression.
2589using DbgValReplacement = std::optional<DIExpression *>;
2590
2591/// Point debug users of \p From to \p To using exprs given by \p RewriteExpr,
2592/// possibly moving/undefing users to prevent use-before-def. Returns true if
2593/// changes are made.
2595 Instruction &From, Value &To, Instruction &DomPoint, DominatorTree &DT,
2597 function_ref<DbgValReplacement(DbgVariableRecord &DVR)> RewriteDVRExpr) {
2598 // Find debug users of From.
2601 findDbgUsers(Users, &From, &DPUsers);
2602 if (Users.empty() && DPUsers.empty())
2603 return false;
2604
2605 // Prevent use-before-def of To.
2606 bool Changed = false;
2607
2609 SmallPtrSet<DbgVariableRecord *, 1> UndefOrSalvageDVR;
2610 if (isa<Instruction>(&To)) {
2611 bool DomPointAfterFrom = From.getNextNonDebugInstruction() == &DomPoint;
2612
2613 for (auto *DII : Users) {
2614 // It's common to see a debug user between From and DomPoint. Move it
2615 // after DomPoint to preserve the variable update without any reordering.
2616 if (DomPointAfterFrom && DII->getNextNonDebugInstruction() == &DomPoint) {
2617 LLVM_DEBUG(dbgs() << "MOVE: " << *DII << '\n');
2618 DII->moveAfter(&DomPoint);
2619 Changed = true;
2620
2621 // Users which otherwise aren't dominated by the replacement value must
2622 // be salvaged or deleted.
2623 } else if (!DT.dominates(&DomPoint, DII)) {
2624 UndefOrSalvage.insert(DII);
2625 }
2626 }
2627
2628 // DbgVariableRecord implementation of the above.
2629 for (auto *DVR : DPUsers) {
2630 Instruction *MarkedInstr = DVR->getMarker()->MarkedInstr;
2631 Instruction *NextNonDebug = MarkedInstr;
2632 // The next instruction might still be a dbg.declare, skip over it.
2633 if (isa<DbgVariableIntrinsic>(NextNonDebug))
2634 NextNonDebug = NextNonDebug->getNextNonDebugInstruction();
2635
2636 if (DomPointAfterFrom && NextNonDebug == &DomPoint) {
2637 LLVM_DEBUG(dbgs() << "MOVE: " << *DVR << '\n');
2638 DVR->removeFromParent();
2639 // Ensure there's a marker.
2640 DomPoint.getParent()->insertDbgRecordAfter(DVR, &DomPoint);
2641 Changed = true;
2642 } else if (!DT.dominates(&DomPoint, MarkedInstr)) {
2643 UndefOrSalvageDVR.insert(DVR);
2644 }
2645 }
2646 }
2647
2648 // Update debug users without use-before-def risk.
2649 for (auto *DII : Users) {
2650 if (UndefOrSalvage.count(DII))
2651 continue;
2652
2653 DbgValReplacement DVRepl = RewriteExpr(*DII);
2654 if (!DVRepl)
2655 continue;
2656
2657 DII->replaceVariableLocationOp(&From, &To);
2658 DII->setExpression(*DVRepl);
2659 LLVM_DEBUG(dbgs() << "REWRITE: " << *DII << '\n');
2660 Changed = true;
2661 }
2662 for (auto *DVR : DPUsers) {
2663 if (UndefOrSalvageDVR.count(DVR))
2664 continue;
2665
2666 DbgValReplacement DVRepl = RewriteDVRExpr(*DVR);
2667 if (!DVRepl)
2668 continue;
2669
2670 DVR->replaceVariableLocationOp(&From, &To);
2671 DVR->setExpression(*DVRepl);
2672 LLVM_DEBUG(dbgs() << "REWRITE: " << DVR << '\n');
2673 Changed = true;
2674 }
2675
2676 if (!UndefOrSalvage.empty() || !UndefOrSalvageDVR.empty()) {
2677 // Try to salvage the remaining debug users.
2679 Changed = true;
2680 }
2681
2682 return Changed;
2683}
2684
2685/// Check if a bitcast between a value of type \p FromTy to type \p ToTy would
2686/// losslessly preserve the bits and semantics of the value. This predicate is
2687/// symmetric, i.e swapping \p FromTy and \p ToTy should give the same result.
2688///
2689/// Note that Type::canLosslesslyBitCastTo is not suitable here because it
2690/// allows semantically unequivalent bitcasts, such as <2 x i64> -> <4 x i32>,
2691/// and also does not allow lossless pointer <-> integer conversions.
2693 Type *ToTy) {
2694 // Trivially compatible types.
2695 if (FromTy == ToTy)
2696 return true;
2697
2698 // Handle compatible pointer <-> integer conversions.
2699 if (FromTy->isIntOrPtrTy() && ToTy->isIntOrPtrTy()) {
2700 bool SameSize = DL.getTypeSizeInBits(FromTy) == DL.getTypeSizeInBits(ToTy);
2701 bool LosslessConversion = !DL.isNonIntegralPointerType(FromTy) &&
2702 !DL.isNonIntegralPointerType(ToTy);
2703 return SameSize && LosslessConversion;
2704 }
2705
2706 // TODO: This is not exhaustive.
2707 return false;
2708}
2709
2711 Instruction &DomPoint, DominatorTree &DT) {
2712 // Exit early if From has no debug users.
2713 if (!From.isUsedByMetadata())
2714 return false;
2715
2716 assert(&From != &To && "Can't replace something with itself");
2717
2718 Type *FromTy = From.getType();
2719 Type *ToTy = To.getType();
2720
2721 auto Identity = [&](DbgVariableIntrinsic &DII) -> DbgValReplacement {
2722 return DII.getExpression();
2723 };
2724 auto IdentityDVR = [&](DbgVariableRecord &DVR) -> DbgValReplacement {
2725 return DVR.getExpression();
2726 };
2727
2728 // Handle no-op conversions.
2729 Module &M = *From.getModule();
2730 const DataLayout &DL = M.getDataLayout();
2731 if (isBitCastSemanticsPreserving(DL, FromTy, ToTy))
2732 return rewriteDebugUsers(From, To, DomPoint, DT, Identity, IdentityDVR);
2733
2734 // Handle integer-to-integer widening and narrowing.
2735 // FIXME: Use DW_OP_convert when it's available everywhere.
2736 if (FromTy->isIntegerTy() && ToTy->isIntegerTy()) {
2737 uint64_t FromBits = FromTy->getPrimitiveSizeInBits();
2738 uint64_t ToBits = ToTy->getPrimitiveSizeInBits();
2739 assert(FromBits != ToBits && "Unexpected no-op conversion");
2740
2741 // When the width of the result grows, assume that a debugger will only
2742 // access the low `FromBits` bits when inspecting the source variable.
2743 if (FromBits < ToBits)
2744 return rewriteDebugUsers(From, To, DomPoint, DT, Identity, IdentityDVR);
2745
2746 // The width of the result has shrunk. Use sign/zero extension to describe
2747 // the source variable's high bits.
2748 auto SignOrZeroExt = [&](DbgVariableIntrinsic &DII) -> DbgValReplacement {
2749 DILocalVariable *Var = DII.getVariable();
2750
2751 // Without knowing signedness, sign/zero extension isn't possible.
2752 auto Signedness = Var->getSignedness();
2753 if (!Signedness)
2754 return std::nullopt;
2755
2756 bool Signed = *Signedness == DIBasicType::Signedness::Signed;
2757 return DIExpression::appendExt(DII.getExpression(), ToBits, FromBits,
2758 Signed);
2759 };
2760 // RemoveDIs: duplicate implementation working on DbgVariableRecords rather
2761 // than on dbg.value intrinsics.
2762 auto SignOrZeroExtDVR = [&](DbgVariableRecord &DVR) -> DbgValReplacement {
2763 DILocalVariable *Var = DVR.getVariable();
2764
2765 // Without knowing signedness, sign/zero extension isn't possible.
2766 auto Signedness = Var->getSignedness();
2767 if (!Signedness)
2768 return std::nullopt;
2769
2770 bool Signed = *Signedness == DIBasicType::Signedness::Signed;
2771 return DIExpression::appendExt(DVR.getExpression(), ToBits, FromBits,
2772 Signed);
2773 };
2774 return rewriteDebugUsers(From, To, DomPoint, DT, SignOrZeroExt,
2775 SignOrZeroExtDVR);
2776 }
2777
2778 // TODO: Floating-point conversions, vectors.
2779 return false;
2780}
2781
2783 Instruction *I, SmallVectorImpl<Value *> &PoisonedValues) {
2784 bool Changed = false;
2785 // RemoveDIs: erase debug-info on this instruction manually.
2786 I->dropDbgRecords();
2787 for (Use &U : I->operands()) {
2788 Value *Op = U.get();
2789 if (isa<Instruction>(Op) && !Op->getType()->isTokenTy()) {
2790 U.set(PoisonValue::get(Op->getType()));
2791 PoisonedValues.push_back(Op);
2792 Changed = true;
2793 }
2794 }
2795
2796 return Changed;
2797}
2798
2799std::pair<unsigned, unsigned>
2801 unsigned NumDeadInst = 0;
2802 unsigned NumDeadDbgInst = 0;
2803 // Delete the instructions backwards, as it has a reduced likelihood of
2804 // having to update as many def-use and use-def chains.
2805 Instruction *EndInst = BB->getTerminator(); // Last not to be deleted.
2808
2809 while (EndInst != &BB->front()) {
2810 // Delete the next to last instruction.
2811 Instruction *Inst = &*--EndInst->getIterator();
2812 if (!Inst->use_empty() && !Inst->getType()->isTokenTy())
2814 if (Inst->isEHPad() || Inst->getType()->isTokenTy()) {
2815 // EHPads can't have DbgVariableRecords attached to them, but it might be
2816 // possible for things with token type.
2817 Inst->dropDbgRecords();
2818 EndInst = Inst;
2819 continue;
2820 }
2821 if (isa<DbgInfoIntrinsic>(Inst))
2822 ++NumDeadDbgInst;
2823 else
2824 ++NumDeadInst;
2825 // RemoveDIs: erasing debug-info must be done manually.
2826 Inst->dropDbgRecords();
2827 Inst->eraseFromParent();
2828 }
2829 return {NumDeadInst, NumDeadDbgInst};
2830}
2831
2832unsigned llvm::changeToUnreachable(Instruction *I, bool PreserveLCSSA,
2833 DomTreeUpdater *DTU,
2834 MemorySSAUpdater *MSSAU) {
2835 BasicBlock *BB = I->getParent();
2836
2837 if (MSSAU)
2838 MSSAU->changeToUnreachable(I);
2839
2840 SmallSet<BasicBlock *, 8> UniqueSuccessors;
2841
2842 // Loop over all of the successors, removing BB's entry from any PHI
2843 // nodes.
2844 for (BasicBlock *Successor : successors(BB)) {
2845 Successor->removePredecessor(BB, PreserveLCSSA);
2846 if (DTU)
2847 UniqueSuccessors.insert(Successor);
2848 }
2849 auto *UI = new UnreachableInst(I->getContext(), I->getIterator());
2850 UI->setDebugLoc(I->getDebugLoc());
2851
2852 // All instructions after this are dead.
2853 unsigned NumInstrsRemoved = 0;
2854 BasicBlock::iterator BBI = I->getIterator(), BBE = BB->end();
2855 while (BBI != BBE) {
2856 if (!BBI->use_empty())
2857 BBI->replaceAllUsesWith(PoisonValue::get(BBI->getType()));
2858 BBI++->eraseFromParent();
2859 ++NumInstrsRemoved;
2860 }
2861 if (DTU) {
2863 Updates.reserve(UniqueSuccessors.size());
2864 for (BasicBlock *UniqueSuccessor : UniqueSuccessors)
2865 Updates.push_back({DominatorTree::Delete, BB, UniqueSuccessor});
2866 DTU->applyUpdates(Updates);
2867 }
2869 return NumInstrsRemoved;
2870}
2871
2873 SmallVector<Value *, 8> Args(II->args());
2875 II->getOperandBundlesAsDefs(OpBundles);
2876 CallInst *NewCall = CallInst::Create(II->getFunctionType(),
2877 II->getCalledOperand(), Args, OpBundles);
2878 NewCall->setCallingConv(II->getCallingConv());
2879 NewCall->setAttributes(II->getAttributes());
2880 NewCall->setDebugLoc(II->getDebugLoc());
2881 NewCall->copyMetadata(*II);
2882
2883 // If the invoke had profile metadata, try converting them for CallInst.
2884 uint64_t TotalWeight;
2885 if (NewCall->extractProfTotalWeight(TotalWeight)) {
2886 // Set the total weight if it fits into i32, otherwise reset.
2887 MDBuilder MDB(NewCall->getContext());
2888 auto NewWeights = uint32_t(TotalWeight) != TotalWeight
2889 ? nullptr
2890 : MDB.createBranchWeights({uint32_t(TotalWeight)});
2891 NewCall->setMetadata(LLVMContext::MD_prof, NewWeights);
2892 }
2893
2894 return NewCall;
2895}
2896
2897// changeToCall - Convert the specified invoke into a normal call.
2899 CallInst *NewCall = createCallMatchingInvoke(II);
2900 NewCall->takeName(II);
2901 NewCall->insertBefore(II);
2902 II->replaceAllUsesWith(NewCall);
2903
2904 // Follow the call by a branch to the normal destination.
2905 BasicBlock *NormalDestBB = II->getNormalDest();
2906 BranchInst::Create(NormalDestBB, II->getIterator());
2907
2908 // Update PHI nodes in the unwind destination
2909 BasicBlock *BB = II->getParent();
2910 BasicBlock *UnwindDestBB = II->getUnwindDest();
2911 UnwindDestBB->removePredecessor(BB);
2912 II->eraseFromParent();
2913 if (DTU)
2914 DTU->applyUpdates({{DominatorTree::Delete, BB, UnwindDestBB}});
2915 return NewCall;
2916}
2917
2919 BasicBlock *UnwindEdge,
2920 DomTreeUpdater *DTU) {
2921 BasicBlock *BB = CI->getParent();
2922
2923 // Convert this function call into an invoke instruction. First, split the
2924 // basic block.
2925 BasicBlock *Split = SplitBlock(BB, CI, DTU, /*LI=*/nullptr, /*MSSAU*/ nullptr,
2926 CI->getName() + ".noexc");
2927
2928 // Delete the unconditional branch inserted by SplitBlock
2929 BB->back().eraseFromParent();
2930
2931 // Create the new invoke instruction.
2932 SmallVector<Value *, 8> InvokeArgs(CI->args());
2934
2935 CI->getOperandBundlesAsDefs(OpBundles);
2936
2937 // Note: we're round tripping operand bundles through memory here, and that
2938 // can potentially be avoided with a cleverer API design that we do not have
2939 // as of this time.
2940
2941 InvokeInst *II =
2943 UnwindEdge, InvokeArgs, OpBundles, CI->getName(), BB);
2944 II->setDebugLoc(CI->getDebugLoc());
2945 II->setCallingConv(CI->getCallingConv());
2946 II->setAttributes(CI->getAttributes());
2947 II->setMetadata(LLVMContext::MD_prof, CI->getMetadata(LLVMContext::MD_prof));
2948
2949 if (DTU)
2950 DTU->applyUpdates({{DominatorTree::Insert, BB, UnwindEdge}});
2951
2952 // Make sure that anything using the call now uses the invoke! This also
2953 // updates the CallGraph if present, because it uses a WeakTrackingVH.
2954 CI->replaceAllUsesWith(II);
2955
2956 // Delete the original call
2957 Split->front().eraseFromParent();
2958 return Split;
2959}
2960
2963 DomTreeUpdater *DTU = nullptr) {
2965 BasicBlock *BB = &F.front();
2966 Worklist.push_back(BB);
2967 Reachable.insert(BB);
2968 bool Changed = false;
2969 do {
2970 BB = Worklist.pop_back_val();
2971
2972 // Do a quick scan of the basic block, turning any obviously unreachable
2973 // instructions into LLVM unreachable insts. The instruction combining pass
2974 // canonicalizes unreachable insts into stores to null or undef.
2975 for (Instruction &I : *BB) {
2976 if (auto *CI = dyn_cast<CallInst>(&I)) {
2977 Value *Callee = CI->getCalledOperand();
2978 // Handle intrinsic calls.
2979 if (Function *F = dyn_cast<Function>(Callee)) {
2980 auto IntrinsicID = F->getIntrinsicID();
2981 // Assumptions that are known to be false are equivalent to
2982 // unreachable. Also, if the condition is undefined, then we make the
2983 // choice most beneficial to the optimizer, and choose that to also be
2984 // unreachable.
2985 if (IntrinsicID == Intrinsic::assume) {
2986 if (match(CI->getArgOperand(0), m_CombineOr(m_Zero(), m_Undef()))) {
2987 // Don't insert a call to llvm.trap right before the unreachable.
2988 changeToUnreachable(CI, false, DTU);
2989 Changed = true;
2990 break;
2991 }
2992 } else if (IntrinsicID == Intrinsic::experimental_guard) {
2993 // A call to the guard intrinsic bails out of the current
2994 // compilation unit if the predicate passed to it is false. If the
2995 // predicate is a constant false, then we know the guard will bail
2996 // out of the current compile unconditionally, so all code following
2997 // it is dead.
2998 //
2999 // Note: unlike in llvm.assume, it is not "obviously profitable" for
3000 // guards to treat `undef` as `false` since a guard on `undef` can
3001 // still be useful for widening.
3002 if (match(CI->getArgOperand(0), m_Zero()))
3003 if (!isa<UnreachableInst>(CI->getNextNode())) {
3004 changeToUnreachable(CI->getNextNode(), false, DTU);
3005 Changed = true;
3006 break;
3007 }
3008 }
3009 } else if ((isa<ConstantPointerNull>(Callee) &&
3010 !NullPointerIsDefined(CI->getFunction(),
3011 cast<PointerType>(Callee->getType())
3012 ->getAddressSpace())) ||
3013 isa<UndefValue>(Callee)) {
3014 changeToUnreachable(CI, false, DTU);
3015 Changed = true;
3016 break;
3017 }
3018 if (CI->doesNotReturn() && !CI->isMustTailCall()) {
3019 // If we found a call to a no-return function, insert an unreachable
3020 // instruction after it. Make sure there isn't *already* one there
3021 // though.
3022 if (!isa<UnreachableInst>(CI->getNextNonDebugInstruction())) {
3023 // Don't insert a call to llvm.trap right before the unreachable.
3024 changeToUnreachable(CI->getNextNonDebugInstruction(), false, DTU);
3025 Changed = true;
3026 }
3027 break;
3028 }
3029 } else if (auto *SI = dyn_cast<StoreInst>(&I)) {
3030 // Store to undef and store to null are undefined and used to signal
3031 // that they should be changed to unreachable by passes that can't
3032 // modify the CFG.
3033
3034 // Don't touch volatile stores.
3035 if (SI->isVolatile()) continue;
3036
3037 Value *Ptr = SI->getOperand(1);
3038
3039 if (isa<UndefValue>(Ptr) ||
3040 (isa<ConstantPointerNull>(Ptr) &&
3041 !NullPointerIsDefined(SI->getFunction(),
3042 SI->getPointerAddressSpace()))) {
3043 changeToUnreachable(SI, false, DTU);
3044 Changed = true;
3045 break;
3046 }
3047 }
3048 }
3049
3050 Instruction *Terminator = BB->getTerminator();
3051 if (auto *II = dyn_cast<InvokeInst>(Terminator)) {
3052 // Turn invokes that call 'nounwind' functions into ordinary calls.
3053 Value *Callee = II->getCalledOperand();
3054 if ((isa<ConstantPointerNull>(Callee) &&
3055 !NullPointerIsDefined(BB->getParent())) ||
3056 isa<UndefValue>(Callee)) {
3057 changeToUnreachable(II, false, DTU);
3058 Changed = true;
3059 } else {
3060 if (II->doesNotReturn() &&
3061 !isa<UnreachableInst>(II->getNormalDest()->front())) {
3062 // If we found an invoke of a no-return function,
3063 // create a new empty basic block with an `unreachable` terminator,
3064 // and set it as the normal destination for the invoke,
3065 // unless that is already the case.
3066 // Note that the original normal destination could have other uses.
3067 BasicBlock *OrigNormalDest = II->getNormalDest();
3068 OrigNormalDest->removePredecessor(II->getParent());
3069 LLVMContext &Ctx = II->getContext();
3070 BasicBlock *UnreachableNormalDest = BasicBlock::Create(
3071 Ctx, OrigNormalDest->getName() + ".unreachable",
3072 II->getFunction(), OrigNormalDest);
3073 new UnreachableInst(Ctx, UnreachableNormalDest);
3074 II->setNormalDest(UnreachableNormalDest);
3075 if (DTU)
3076 DTU->applyUpdates(
3077 {{DominatorTree::Delete, BB, OrigNormalDest},
3078 {DominatorTree::Insert, BB, UnreachableNormalDest}});
3079 Changed = true;
3080 }
3081 if (II->doesNotThrow() && canSimplifyInvokeNoUnwind(&F)) {
3082 if (II->use_empty() && !II->mayHaveSideEffects()) {
3083 // jump to the normal destination branch.
3084 BasicBlock *NormalDestBB = II->getNormalDest();
3085 BasicBlock *UnwindDestBB = II->getUnwindDest();
3086 BranchInst::Create(NormalDestBB, II->getIterator());
3087 UnwindDestBB->removePredecessor(II->getParent());
3088 II->eraseFromParent();
3089 if (DTU)
3090 DTU->applyUpdates({{DominatorTree::Delete, BB, UnwindDestBB}});
3091 } else
3092 changeToCall(II, DTU);
3093 Changed = true;
3094 }
3095 }
3096 } else if (auto *CatchSwitch = dyn_cast<CatchSwitchInst>(Terminator)) {
3097 // Remove catchpads which cannot be reached.
3098 struct CatchPadDenseMapInfo {
3099 static CatchPadInst *getEmptyKey() {
3101 }
3102
3103 static CatchPadInst *getTombstoneKey() {
3105 }
3106
3107 static unsigned getHashValue(CatchPadInst *CatchPad) {
3108 return static_cast<unsigned>(hash_combine_range(
3109 CatchPad->value_op_begin(), CatchPad->value_op_end()));
3110 }
3111
3112 static bool isEqual(CatchPadInst *LHS, CatchPadInst *RHS) {
3113 if (LHS == getEmptyKey() || LHS == getTombstoneKey() ||
3114 RHS == getEmptyKey() || RHS == getTombstoneKey())
3115 return LHS == RHS;
3116 return LHS->isIdenticalTo(RHS);
3117 }
3118 };
3119
3120 SmallDenseMap<BasicBlock *, int, 8> NumPerSuccessorCases;
3121 // Set of unique CatchPads.
3123 CatchPadDenseMapInfo, detail::DenseSetPair<CatchPadInst *>>
3124 HandlerSet;
3126 for (CatchSwitchInst::handler_iterator I = CatchSwitch->handler_begin(),
3127 E = CatchSwitch->handler_end();
3128 I != E; ++I) {
3129 BasicBlock *HandlerBB = *I;
3130 if (DTU)
3131 ++NumPerSuccessorCases[HandlerBB];
3132 auto *CatchPad = cast<CatchPadInst>(HandlerBB->getFirstNonPHI());
3133 if (!HandlerSet.insert({CatchPad, Empty}).second) {
3134 if (DTU)
3135 --NumPerSuccessorCases[HandlerBB];
3136 CatchSwitch->removeHandler(I);
3137 --I;
3138 --E;
3139 Changed = true;
3140 }
3141 }
3142 if (DTU) {
3143 std::vector<DominatorTree::UpdateType> Updates;
3144 for (const std::pair<BasicBlock *, int> &I : NumPerSuccessorCases)
3145 if (I.second == 0)
3146 Updates.push_back({DominatorTree::Delete, BB, I.first});
3147 DTU->applyUpdates(Updates);
3148 }
3149 }
3150
3151 Changed |= ConstantFoldTerminator(BB, true, nullptr, DTU);
3152 for (BasicBlock *Successor : successors(BB))
3153 if (Reachable.insert(Successor).second)
3154 Worklist.push_back(Successor);
3155 } while (!Worklist.empty());
3156 return Changed;
3157}
3158
3160 Instruction *TI = BB->getTerminator();
3161
3162 if (auto *II = dyn_cast<InvokeInst>(TI))
3163 return changeToCall(II, DTU);
3164
3165 Instruction *NewTI;
3166 BasicBlock *UnwindDest;
3167
3168 if (auto *CRI = dyn_cast<CleanupReturnInst>(TI)) {
3169 NewTI = CleanupReturnInst::Create(CRI->getCleanupPad(), nullptr, CRI->getIterator());
3170 UnwindDest = CRI->getUnwindDest();
3171 } else if (auto *CatchSwitch = dyn_cast<CatchSwitchInst>(TI)) {
3172 auto *NewCatchSwitch = CatchSwitchInst::Create(
3173 CatchSwitch->getParentPad(), nullptr, CatchSwitch->getNumHandlers(),
3174 CatchSwitch->getName(), CatchSwitch->getIterator());
3175 for (BasicBlock *PadBB : CatchSwitch->handlers())
3176 NewCatchSwitch->addHandler(PadBB);
3177
3178 NewTI = NewCatchSwitch;
3179 UnwindDest = CatchSwitch->getUnwindDest();
3180 } else {
3181 llvm_unreachable("Could not find unwind successor");
3182 }
3183
3184 NewTI->takeName(TI);
3185 NewTI->setDebugLoc(TI->getDebugLoc());
3186 UnwindDest->removePredecessor(BB);
3187 TI->replaceAllUsesWith(NewTI);
3188 TI->eraseFromParent();
3189 if (DTU)
3190 DTU->applyUpdates({{DominatorTree::Delete, BB, UnwindDest}});
3191 return NewTI;
3192}
3193
3194/// removeUnreachableBlocks - Remove blocks that are not reachable, even
3195/// if they are in a dead cycle. Return true if a change was made, false
3196/// otherwise.
3198 MemorySSAUpdater *MSSAU) {
3200 bool Changed = markAliveBlocks(F, Reachable, DTU);
3201
3202 // If there are unreachable blocks in the CFG...
3203 if (Reachable.size() == F.size())
3204 return Changed;
3205
3206 assert(Reachable.size() < F.size());
3207
3208 // Are there any blocks left to actually delete?
3209 SmallSetVector<BasicBlock *, 8> BlocksToRemove;
3210 for (BasicBlock &BB : F) {
3211 // Skip reachable basic blocks
3212 if (Reachable.count(&BB))
3213 continue;
3214 // Skip already-deleted blocks
3215 if (DTU && DTU->isBBPendingDeletion(&BB))
3216 continue;
3217 BlocksToRemove.insert(&BB);
3218 }
3219
3220 if (BlocksToRemove.empty())
3221 return Changed;
3222
3223 Changed = true;
3224 NumRemoved += BlocksToRemove.size();
3225
3226 if (MSSAU)
3227 MSSAU->removeBlocks(BlocksToRemove);
3228
3229 DeleteDeadBlocks(BlocksToRemove.takeVector(), DTU);
3230
3231 return Changed;
3232}
3233
3235 ArrayRef<unsigned> KnownIDs, bool DoesKMove) {
3237 K->dropUnknownNonDebugMetadata(KnownIDs);
3238 K->getAllMetadataOtherThanDebugLoc(Metadata);
3239 for (const auto &MD : Metadata) {
3240 unsigned Kind = MD.first;
3241 MDNode *JMD = J->getMetadata(Kind);
3242 MDNode *KMD = MD.second;
3243
3244 switch (Kind) {
3245 default:
3246 K->setMetadata(Kind, nullptr); // Remove unknown metadata
3247 break;
3248 case LLVMContext::MD_dbg:
3249 llvm_unreachable("getAllMetadataOtherThanDebugLoc returned a MD_dbg");
3250 case LLVMContext::MD_DIAssignID:
3251 K->mergeDIAssignID(J);
3252 break;
3253 case LLVMContext::MD_tbaa:
3254 K->setMetadata(Kind, MDNode::getMostGenericTBAA(JMD, KMD));
3255 break;
3256 case LLVMContext::MD_alias_scope:
3257 K->setMetadata(Kind, MDNode::getMostGenericAliasScope(JMD, KMD));
3258 break;
3259 case LLVMContext::MD_noalias:
3260 case LLVMContext::MD_mem_parallel_loop_access:
3261 K->setMetadata(Kind, MDNode::intersect(JMD, KMD));
3262 break;
3263 case LLVMContext::MD_access_group:
3264 K->setMetadata(LLVMContext::MD_access_group,
3265 intersectAccessGroups(K, J));
3266 break;
3267 case LLVMContext::MD_range:
3268 if (DoesKMove || !K->hasMetadata(LLVMContext::MD_noundef))
3269 K->setMetadata(Kind, MDNode::getMostGenericRange(JMD, KMD));
3270 break;
3271 case LLVMContext::MD_fpmath:
3272 K->setMetadata(Kind, MDNode::getMostGenericFPMath(JMD, KMD));
3273 break;
3274 case LLVMContext::MD_invariant_load:
3275 // If K moves, only set the !invariant.load if it is present in both
3276 // instructions.
3277 if (DoesKMove)
3278 K->setMetadata(Kind, JMD);
3279 break;
3280 case LLVMContext::MD_nonnull:
3281 if (DoesKMove || !K->hasMetadata(LLVMContext::MD_noundef))
3282 K->setMetadata(Kind, JMD);
3283 break;
3284 case LLVMContext::MD_invariant_group:
3285 // Preserve !invariant.group in K.
3286 break;
3287 case LLVMContext::MD_align:
3288 if (DoesKMove || !K->hasMetadata(LLVMContext::MD_noundef))
3289 K->setMetadata(
3291 break;
3292 case LLVMContext::MD_dereferenceable:
3293 case LLVMContext::MD_dereferenceable_or_null:
3294 if (DoesKMove)
3295 K->setMetadata(Kind,
3297 break;
3298 case LLVMContext::MD_preserve_access_index:
3299 // Preserve !preserve.access.index in K.
3300 break;
3301 case LLVMContext::MD_noundef:
3302 // If K does move, keep noundef if it is present in both instructions.
3303 if (DoesKMove)
3304 K->setMetadata(Kind, JMD);
3305 break;
3306 case LLVMContext::MD_nontemporal:
3307 // Preserve !nontemporal if it is present on both instructions.
3308 K->setMetadata(Kind, JMD);
3309 break;
3310 case LLVMContext::MD_prof:
3311 if (DoesKMove)
3312 K->setMetadata(Kind, MDNode::getMergedProfMetadata(KMD, JMD, K, J));
3313 break;
3314 }
3315 }
3316 // Set !invariant.group from J if J has it. If both instructions have it
3317 // then we will just pick it from J - even when they are different.
3318 // Also make sure that K is load or store - f.e. combining bitcast with load
3319 // could produce bitcast with invariant.group metadata, which is invalid.
3320 // FIXME: we should try to preserve both invariant.group md if they are
3321 // different, but right now instruction can only have one invariant.group.
3322 if (auto *JMD = J->getMetadata(LLVMContext::MD_invariant_group))
3323 if (isa<LoadInst>(K) || isa<StoreInst>(K))
3324 K->setMetadata(LLVMContext::MD_invariant_group, JMD);
3325}
3326
3328 bool KDominatesJ) {
3329 unsigned KnownIDs[] = {LLVMContext::MD_tbaa,
3330 LLVMContext::MD_alias_scope,
3331 LLVMContext::MD_noalias,
3332 LLVMContext::MD_range,
3333 LLVMContext::MD_fpmath,
3334 LLVMContext::MD_invariant_load,
3335 LLVMContext::MD_nonnull,
3336 LLVMContext::MD_invariant_group,
3337 LLVMContext::MD_align,
3338 LLVMContext::MD_dereferenceable,
3339 LLVMContext::MD_dereferenceable_or_null,
3340 LLVMContext::MD_access_group,
3341 LLVMContext::MD_preserve_access_index,
3342 LLVMContext::MD_prof,
3343 LLVMContext::MD_nontemporal,
3344 LLVMContext::MD_noundef};
3345 combineMetadata(K, J, KnownIDs, KDominatesJ);
3346}
3347
3348void llvm::copyMetadataForLoad(LoadInst &Dest, const LoadInst &Source) {
3350 Source.getAllMetadata(MD);
3351 MDBuilder MDB(Dest.getContext());
3352 Type *NewType = Dest.getType();
3353 const DataLayout &DL = Source.getModule()->getDataLayout();
3354 for (const auto &MDPair : MD) {
3355 unsigned ID = MDPair.first;
3356 MDNode *N = MDPair.second;
3357 // Note, essentially every kind of metadata should be preserved here! This
3358 // routine is supposed to clone a load instruction changing *only its type*.
3359 // The only metadata it makes sense to drop is metadata which is invalidated
3360 // when the pointer type changes. This should essentially never be the case
3361 // in LLVM, but we explicitly switch over only known metadata to be
3362 // conservatively correct. If you are adding metadata to LLVM which pertains
3363 // to loads, you almost certainly want to add it here.
3364 switch (ID) {
3365 case LLVMContext::MD_dbg:
3366 case LLVMContext::MD_tbaa:
3367 case LLVMContext::MD_prof:
3368 case LLVMContext::MD_fpmath:
3369 case LLVMContext::MD_tbaa_struct:
3370 case LLVMContext::MD_invariant_load:
3371 case LLVMContext::MD_alias_scope:
3372 case LLVMContext::MD_noalias:
3373 case LLVMContext::MD_nontemporal:
3374 case LLVMContext::MD_mem_parallel_loop_access:
3375 case LLVMContext::MD_access_group:
3376 case LLVMContext::MD_noundef:
3377 // All of these directly apply.
3378 Dest.setMetadata(ID, N);
3379 break;
3380
3381 case LLVMContext::MD_nonnull:
3382 copyNonnullMetadata(Source, N, Dest);
3383 break;
3384
3385 case LLVMContext::MD_align:
3386 case LLVMContext::MD_dereferenceable:
3387 case LLVMContext::MD_dereferenceable_or_null:
3388 // These only directly apply if the new type is also a pointer.
3389 if (NewType->isPointerTy())
3390 Dest.setMetadata(ID, N);
3391 break;
3392
3393 case LLVMContext::MD_range:
3394 copyRangeMetadata(DL, Source, N, Dest);
3395 break;
3396 }
3397 }
3398}
3399
3401 auto *ReplInst = dyn_cast<Instruction>(Repl);
3402 if (!ReplInst)
3403 return;
3404
3405 // Patch the replacement so that it is not more restrictive than the value
3406 // being replaced.
3407 WithOverflowInst *UnusedWO;
3408 // When replacing the result of a llvm.*.with.overflow intrinsic with a
3409 // overflowing binary operator, nuw/nsw flags may no longer hold.
3410 if (isa<OverflowingBinaryOperator>(ReplInst) &&
3411 match(I, m_ExtractValue<0>(m_WithOverflowInst(UnusedWO))))
3412 ReplInst->dropPoisonGeneratingFlags();
3413 // Note that if 'I' is a load being replaced by some operation,
3414 // for example, by an arithmetic operation, then andIRFlags()
3415 // would just erase all math flags from the original arithmetic
3416 // operation, which is clearly not wanted and not needed.
3417 else if (!isa<LoadInst>(I))
3418 ReplInst->andIRFlags(I);
3419
3420 // FIXME: If both the original and replacement value are part of the
3421 // same control-flow region (meaning that the execution of one
3422 // guarantees the execution of the other), then we can combine the
3423 // noalias scopes here and do better than the general conservative
3424 // answer used in combineMetadata().
3425
3426 // In general, GVN unifies expressions over different control-flow
3427 // regions, and so we need a conservative combination of the noalias
3428 // scopes.
3429 combineMetadataForCSE(ReplInst, I, false);
3430}
3431
3432template <typename RootType, typename DominatesFn>
3434 const RootType &Root,
3435 const DominatesFn &Dominates) {
3436 assert(From->getType() == To->getType());
3437
3438 unsigned Count = 0;
3439 for (Use &U : llvm::make_early_inc_range(From->uses())) {
3440 if (!Dominates(Root, U))
3441 continue;
3442 LLVM_DEBUG(dbgs() << "Replace dominated use of '";
3443 From->printAsOperand(dbgs());
3444 dbgs() << "' with " << *To << " in " << *U.getUser() << "\n");
3445 U.set(To);
3446 ++Count;
3447 }
3448 return Count;
3449}
3450
3452 assert(From->getType() == To->getType());
3453 auto *BB = From->getParent();
3454 unsigned Count = 0;
3455
3456 for (Use &U : llvm::make_early_inc_range(From->uses())) {
3457 auto *I = cast<Instruction>(U.getUser());
3458 if (I->getParent() == BB)
3459 continue;
3460 U.set(To);
3461 ++Count;
3462 }
3463 return Count;
3464}
3465
3467 DominatorTree &DT,
3468 const BasicBlockEdge &Root) {
3469 auto Dominates = [&DT](const BasicBlockEdge &Root, const Use &U) {
3470 return DT.dominates(Root, U);
3471 };
3472 return ::replaceDominatedUsesWith(From, To, Root, Dominates);
3473}
3474
3476 DominatorTree &DT,
3477 const BasicBlock *BB) {
3478 auto Dominates = [&DT](const BasicBlock *BB, const Use &U) {
3479 return DT.dominates(BB, U);
3480 };
3481 return ::replaceDominatedUsesWith(From, To, BB, Dominates);
3482}
3483
3485 const TargetLibraryInfo &TLI) {
3486 // Check if the function is specifically marked as a gc leaf function.
3487 if (Call->hasFnAttr("gc-leaf-function"))
3488 return true;
3489 if (const Function *F = Call->getCalledFunction()) {
3490 if (F->hasFnAttribute("gc-leaf-function"))
3491 return true;
3492
3493 if (auto IID = F->getIntrinsicID()) {
3494 // Most LLVM intrinsics do not take safepoints.
3495 return IID != Intrinsic::experimental_gc_statepoint &&
3496 IID != Intrinsic::experimental_deoptimize &&
3497 IID != Intrinsic::memcpy_element_unordered_atomic &&
3498 IID != Intrinsic::memmove_element_unordered_atomic;
3499 }
3500 }
3501
3502 // Lib calls can be materialized by some passes, and won't be
3503 // marked as 'gc-leaf-function.' All available Libcalls are
3504 // GC-leaf.
3505 LibFunc LF;
3506 if (TLI.getLibFunc(*Call, LF)) {
3507 return TLI.has(LF);
3508 }
3509
3510 return false;
3511}
3512
3514 LoadInst &NewLI) {
3515 auto *NewTy = NewLI.getType();
3516
3517 // This only directly applies if the new type is also a pointer.
3518 if (NewTy->isPointerTy()) {
3519 NewLI.setMetadata(LLVMContext::MD_nonnull, N);
3520 return;
3521 }
3522
3523 // The only other translation we can do is to integral loads with !range
3524 // metadata.
3525 if (!NewTy->isIntegerTy())
3526 return;
3527
3528 MDBuilder MDB(NewLI.getContext());
3529 const Value *Ptr = OldLI.getPointerOperand();
3530 auto *ITy = cast<IntegerType>(NewTy);
3531 auto *NullInt = ConstantExpr::getPtrToInt(
3532 ConstantPointerNull::get(cast<PointerType>(Ptr->getType())), ITy);
3533 auto *NonNullInt = ConstantExpr::getAdd(NullInt, ConstantInt::get(ITy, 1));
3534 NewLI.setMetadata(LLVMContext::MD_range,
3535 MDB.createRange(NonNullInt, NullInt));
3536}
3537
3539 MDNode *N, LoadInst &NewLI) {
3540 auto *NewTy = NewLI.getType();
3541 // Simply copy the metadata if the type did not change.
3542 if (NewTy == OldLI.getType()) {
3543 NewLI.setMetadata(LLVMContext::MD_range, N);
3544 return;
3545 }
3546
3547 // Give up unless it is converted to a pointer where there is a single very
3548 // valuable mapping we can do reliably.
3549 // FIXME: It would be nice to propagate this in more ways, but the type
3550 // conversions make it hard.
3551 if (!NewTy->isPointerTy())
3552 return;
3553
3554 unsigned BitWidth = DL.getPointerTypeSizeInBits(NewTy);
3555 if (BitWidth == OldLI.getType()->getScalarSizeInBits() &&
3557 MDNode *NN = MDNode::get(OldLI.getContext(), std::nullopt);
3558 NewLI.setMetadata(LLVMContext::MD_nonnull, NN);
3559 }
3560}
3561
3565 findDbgUsers(DbgUsers, &I, &DPUsers);
3566 for (auto *DII : DbgUsers)
3567 DII->eraseFromParent();
3568 for (auto *DVR : DPUsers)
3569 DVR->eraseFromParent();
3570}
3571
3573 BasicBlock *BB) {
3574 // Since we are moving the instructions out of its basic block, we do not
3575 // retain their original debug locations (DILocations) and debug intrinsic
3576 // instructions.
3577 //
3578 // Doing so would degrade the debugging experience and adversely affect the
3579 // accuracy of profiling information.
3580 //
3581 // Currently, when hoisting the instructions, we take the following actions:
3582 // - Remove their debug intrinsic instructions.
3583 // - Set their debug locations to the values from the insertion point.
3584 //
3585 // As per PR39141 (comment #8), the more fundamental reason why the dbg.values
3586 // need to be deleted, is because there will not be any instructions with a
3587 // DILocation in either branch left after performing the transformation. We
3588 // can only insert a dbg.value after the two branches are joined again.
3589 //
3590 // See PR38762, PR39243 for more details.
3591 //
3592 // TODO: Extend llvm.dbg.value to take more than one SSA Value (PR39141) to
3593 // encode predicated DIExpressions that yield different results on different
3594 // code paths.
3595
3596 for (BasicBlock::iterator II = BB->begin(), IE = BB->end(); II != IE;) {
3597 Instruction *I = &*II;
3598 I->dropUBImplyingAttrsAndMetadata();
3599 if (I->isUsedByMetadata())
3600 dropDebugUsers(*I);
3601 // RemoveDIs: drop debug-info too as the following code does.
3602 I->dropDbgRecords();
3603 if (I->isDebugOrPseudoInst()) {
3604 // Remove DbgInfo and pseudo probe Intrinsics.
3605 II = I->eraseFromParent();
3606 continue;
3607 }
3608 I->setDebugLoc(InsertPt->getDebugLoc());
3609 ++II;
3610 }
3611 DomBlock->splice(InsertPt->getIterator(), BB, BB->begin(),
3612 BB->getTerminator()->getIterator());
3613}
3614
3616 Type &Ty) {
3617 // Create integer constant expression.
3618 auto createIntegerExpression = [&DIB](const Constant &CV) -> DIExpression * {
3619 const APInt &API = cast<ConstantInt>(&CV)->getValue();
3620 std::optional<int64_t> InitIntOpt = API.trySExtValue();
3621 return InitIntOpt ? DIB.createConstantValueExpression(
3622 static_cast<uint64_t>(*InitIntOpt))
3623 : nullptr;
3624 };
3625
3626 if (isa<ConstantInt>(C))
3627 return createIntegerExpression(C);
3628
3629 auto *FP = dyn_cast<ConstantFP>(&C);
3630 if (FP && Ty.isFloatingPointTy() && Ty.getScalarSizeInBits() <= 64) {
3631 const APFloat &APF = FP->getValueAPF();
3632 APInt const &API = APF.bitcastToAPInt();
3633 if (auto Temp = API.getZExtValue())
3634 return DIB.createConstantValueExpression(static_cast<uint64_t>(Temp));
3635 return DIB.createConstantValueExpression(*API.getRawData());
3636 }
3637
3638 if (!Ty.isPointerTy())
3639 return nullptr;
3640
3641 if (isa<ConstantPointerNull>(C))
3642 return DIB.createConstantValueExpression(0);
3643
3644 if (const ConstantExpr *CE = dyn_cast<ConstantExpr>(&C))
3645 if (CE->getOpcode() == Instruction::IntToPtr) {
3646 const Value *V = CE->getOperand(0);
3647 if (auto CI = dyn_cast_or_null<ConstantInt>(V))
3648 return createIntegerExpression(*CI);
3649 }
3650 return nullptr;
3651}
3652
3653namespace {
3654
3655/// A potential constituent of a bitreverse or bswap expression. See
3656/// collectBitParts for a fuller explanation.
3657struct BitPart {
3658 BitPart(Value *P, unsigned BW) : Provider(P) {
3659 Provenance.resize(BW);
3660 }
3661
3662 /// The Value that this is a bitreverse/bswap of.
3663 Value *Provider;
3664
3665 /// The "provenance" of each bit. Provenance[A] = B means that bit A
3666 /// in Provider becomes bit B in the result of this expression.
3667 SmallVector<int8_t, 32> Provenance; // int8_t means max size is i128.
3668
3669 enum { Unset = -1 };
3670};
3671
3672} // end anonymous namespace
3673
3674/// Analyze the specified subexpression and see if it is capable of providing
3675/// pieces of a bswap or bitreverse. The subexpression provides a potential
3676/// piece of a bswap or bitreverse if it can be proved that each non-zero bit in
3677/// the output of the expression came from a corresponding bit in some other
3678/// value. This function is recursive, and the end result is a mapping of
3679/// bitnumber to bitnumber. It is the caller's responsibility to validate that
3680/// the bitnumber to bitnumber mapping is correct for a bswap or bitreverse.
3681///
3682/// For example, if the current subexpression if "(shl i32 %X, 24)" then we know
3683/// that the expression deposits the low byte of %X into the high byte of the
3684/// result and that all other bits are zero. This expression is accepted and a
3685/// BitPart is returned with Provider set to %X and Provenance[24-31] set to
3686/// [0-7].
3687///
3688/// For vector types, all analysis is performed at the per-element level. No
3689/// cross-element analysis is supported (shuffle/insertion/reduction), and all
3690/// constant masks must be splatted across all elements.
3691///
3692/// To avoid revisiting values, the BitPart results are memoized into the
3693/// provided map. To avoid unnecessary copying of BitParts, BitParts are
3694/// constructed in-place in the \c BPS map. Because of this \c BPS needs to
3695/// store BitParts objects, not pointers. As we need the concept of a nullptr
3696/// BitParts (Value has been analyzed and the analysis failed), we an Optional
3697/// type instead to provide the same functionality.
3698///
3699/// Because we pass around references into \c BPS, we must use a container that
3700/// does not invalidate internal references (std::map instead of DenseMap).
3701static const std::optional<BitPart> &
3702collectBitParts(Value *V, bool MatchBSwaps, bool MatchBitReversals,
3703 std::map<Value *, std::optional<BitPart>> &BPS, int Depth,
3704 bool &FoundRoot) {
3705 auto I = BPS.find(V);
3706 if (I != BPS.end())
3707 return I->second;
3708
3709 auto &Result = BPS[V] = std::nullopt;
3710 auto BitWidth = V->getType()->getScalarSizeInBits();
3711
3712 // Can't do integer/elements > 128 bits.
3713 if (BitWidth > 128)
3714 return Result;
3715
3716 // Prevent stack overflow by limiting the recursion depth
3718 LLVM_DEBUG(dbgs() << "collectBitParts max recursion depth reached.\n");
3719 return Result;
3720 }
3721
3722 if (auto *I = dyn_cast<Instruction>(V)) {
3723 Value *X, *Y;
3724 const APInt *C;
3725
3726 // If this is an or instruction, it may be an inner node of the bswap.
3727 if (match(V, m_Or(m_Value(X), m_Value(Y)))) {
3728 // Check we have both sources and they are from the same provider.
3729 const auto &A = collectBitParts(X, MatchBSwaps, MatchBitReversals, BPS,
3730 Depth + 1, FoundRoot);
3731 if (!A || !A->Provider)
3732 return Result;
3733
3734 const auto &B = collectBitParts(Y, MatchBSwaps, MatchBitReversals, BPS,
3735 Depth + 1, FoundRoot);
3736 if (!B || A->Provider != B->Provider)
3737 return Result;
3738
3739 // Try and merge the two together.
3740 Result = BitPart(A->Provider, BitWidth);
3741 for (unsigned BitIdx = 0; BitIdx < BitWidth; ++BitIdx) {
3742 if (A->Provenance[BitIdx] != BitPart::Unset &&
3743 B->Provenance[BitIdx] != BitPart::Unset &&
3744 A->Provenance[BitIdx] != B->Provenance[BitIdx])
3745 return Result = std::nullopt;
3746
3747 if (A->Provenance[BitIdx] == BitPart::Unset)
3748 Result->Provenance[BitIdx] = B->Provenance[BitIdx];
3749 else
3750 Result->Provenance[BitIdx] = A->Provenance[BitIdx];
3751 }
3752
3753 return Result;
3754 }
3755
3756 // If this is a logical shift by a constant, recurse then shift the result.
3757 if (match(V, m_LogicalShift(m_Value(X), m_APInt(C)))) {
3758 const APInt &BitShift = *C;
3759
3760 // Ensure the shift amount is defined.
3761 if (BitShift.uge(BitWidth))
3762 return Result;
3763
3764 // For bswap-only, limit shift amounts to whole bytes, for an early exit.
3765 if (!MatchBitReversals && (BitShift.getZExtValue() % 8) != 0)
3766 return Result;
3767
3768 const auto &Res = collectBitParts(X, MatchBSwaps, MatchBitReversals, BPS,
3769 Depth + 1, FoundRoot);
3770 if (!Res)
3771 return Result;
3772 Result = Res;
3773
3774 // Perform the "shift" on BitProvenance.
3775 auto &P = Result->Provenance;
3776 if (I->getOpcode() == Instruction::Shl) {
3777 P.erase(std::prev(P.end(), BitShift.getZExtValue()), P.end());
3778 P.insert(P.begin(), BitShift.getZExtValue(), BitPart::Unset);
3779 } else {
3780 P.erase(P.begin(), std::next(P.begin(), BitShift.getZExtValue()));
3781 P.insert(P.end(), BitShift.getZExtValue(), BitPart::Unset);
3782 }
3783
3784 return Result;
3785 }
3786
3787 // If this is a logical 'and' with a mask that clears bits, recurse then
3788 // unset the appropriate bits.
3789 if (match(V, m_And(m_Value(X), m_APInt(C)))) {
3790 const APInt &AndMask = *C;
3791
3792 // Check that the mask allows a multiple of 8 bits for a bswap, for an
3793 // early exit.
3794 unsigned NumMaskedBits = AndMask.popcount();
3795 if (!MatchBitReversals && (NumMaskedBits % 8) != 0)
3796 return Result;
3797
3798 const auto &Res = collectBitParts(X, MatchBSwaps, MatchBitReversals, BPS,
3799 Depth + 1, FoundRoot);
3800 if (!Res)
3801 return Result;
3802 Result = Res;
3803
3804 for (unsigned BitIdx = 0; BitIdx < BitWidth; ++BitIdx)
3805 // If the AndMask is zero for this bit, clear the bit.
3806 if (AndMask[BitIdx] == 0)
3807 Result->Provenance[BitIdx] = BitPart::Unset;
3808 return Result;
3809 }
3810
3811 // If this is a zext instruction zero extend the result.
3812 if (match(V, m_ZExt(m_Value(X)))) {
3813 const auto &Res = collectBitParts(X, MatchBSwaps, MatchBitReversals, BPS,
3814 Depth + 1, FoundRoot);
3815 if (!Res)
3816 return Result;
3817
3818 Result = BitPart(Res->Provider, BitWidth);
3819 auto NarrowBitWidth = X->getType()->getScalarSizeInBits();
3820 for (unsigned BitIdx = 0; BitIdx < NarrowBitWidth; ++BitIdx)
3821 Result->Provenance[BitIdx] = Res->Provenance[BitIdx];
3822 for (unsigned BitIdx = NarrowBitWidth; BitIdx < BitWidth; ++BitIdx)
3823 Result->Provenance[BitIdx] = BitPart::Unset;
3824 return Result;
3825 }
3826
3827 // If this is a truncate instruction, extract the lower bits.
3828 if (match(V, m_Trunc(m_Value(X)))) {
3829 const auto &Res = collectBitParts(X, MatchBSwaps, MatchBitReversals, BPS,
3830 Depth + 1, FoundRoot);
3831 if (!Res)
3832 return Result;
3833
3834 Result = BitPart(Res->Provider, BitWidth);
3835 for (unsigned BitIdx = 0; BitIdx < BitWidth; ++BitIdx)
3836 Result->Provenance[BitIdx] = Res->Provenance[BitIdx];
3837 return Result;
3838 }
3839
3840 // BITREVERSE - most likely due to us previous matching a partial
3841 // bitreverse.
3842 if (match(V, m_BitReverse(m_Value(X)))) {
3843 const auto &Res = collectBitParts(X, MatchBSwaps, MatchBitReversals, BPS,
3844 Depth + 1, FoundRoot);
3845 if (!Res)
3846 return Result;
3847
3848 Result = BitPart(Res->Provider, BitWidth);
3849 for (unsigned BitIdx = 0; BitIdx < BitWidth; ++BitIdx)
3850 Result->Provenance[(BitWidth - 1) - BitIdx] = Res->Provenance[BitIdx];
3851 return Result;
3852 }
3853
3854 // BSWAP - most likely due to us previous matching a partial bswap.
3855 if (match(V, m_BSwap(m_Value(X)))) {
3856 const auto &Res = collectBitParts(X, MatchBSwaps, MatchBitReversals, BPS,
3857 Depth + 1, FoundRoot);
3858 if (!Res)
3859 return Result;
3860
3861 unsigned ByteWidth = BitWidth / 8;
3862 Result = BitPart(Res->Provider, BitWidth);
3863 for (unsigned ByteIdx = 0; ByteIdx < ByteWidth; ++ByteIdx) {
3864 unsigned ByteBitOfs = ByteIdx * 8;
3865 for (unsigned BitIdx = 0; BitIdx < 8; ++BitIdx)
3866 Result->Provenance[(BitWidth - 8 - ByteBitOfs) + BitIdx] =
3867 Res->Provenance[ByteBitOfs + BitIdx];
3868 }
3869 return Result;
3870 }
3871
3872 // Funnel 'double' shifts take 3 operands, 2 inputs and the shift
3873 // amount (modulo).
3874 // fshl(X,Y,Z): (X << (Z % BW)) | (Y >> (BW - (Z % BW)))
3875 // fshr(X,Y,Z): (X << (BW - (Z % BW))) | (Y >> (Z % BW))
3876 if (match(V, m_FShl(m_Value(X), m_Value(Y), m_APInt(C))) ||
3877 match(V, m_FShr(m_Value(X), m_Value(Y), m_APInt(C)))) {
3878 // We can treat fshr as a fshl by flipping the modulo amount.
3879 unsigned ModAmt = C->urem(BitWidth);
3880 if (cast<IntrinsicInst>(I)->getIntrinsicID() == Intrinsic::fshr)
3881 ModAmt = BitWidth - ModAmt;
3882
3883 // For bswap-only, limit shift amounts to whole bytes, for an early exit.
3884 if (!MatchBitReversals && (ModAmt % 8) != 0)
3885 return Result;
3886
3887 // Check we have both sources and they are from the same provider.
3888 const auto &LHS = collectBitParts(X, MatchBSwaps, MatchBitReversals, BPS,
3889 Depth + 1, FoundRoot);
3890 if (!LHS || !LHS->Provider)
3891 return Result;
3892
3893 const auto &RHS = collectBitParts(Y, MatchBSwaps, MatchBitReversals, BPS,
3894 Depth + 1, FoundRoot);
3895 if (!RHS || LHS->Provider != RHS->Provider)
3896 return Result;
3897
3898 unsigned StartBitRHS = BitWidth - ModAmt;
3899 Result = BitPart(LHS->Provider, BitWidth);
3900 for (unsigned BitIdx = 0; BitIdx < StartBitRHS; ++BitIdx)
3901 Result->Provenance[BitIdx + ModAmt] = LHS->Provenance[BitIdx];
3902 for (unsigned BitIdx = 0; BitIdx < ModAmt; ++BitIdx)
3903 Result->Provenance[BitIdx] = RHS->Provenance[BitIdx + StartBitRHS];
3904 return Result;
3905 }
3906 }
3907
3908 // If we've already found a root input value then we're never going to merge
3909 // these back together.
3910 if (FoundRoot)
3911 return Result;
3912
3913 // Okay, we got to something that isn't a shift, 'or', 'and', etc. This must
3914 // be the root input value to the bswap/bitreverse.
3915 FoundRoot = true;
3916 Result = BitPart(V, BitWidth);
3917 for (unsigned BitIdx = 0; BitIdx < BitWidth; ++BitIdx)
3918 Result->Provenance[BitIdx] = BitIdx;
3919 return Result;
3920}
3921
3922static bool bitTransformIsCorrectForBSwap(unsigned From, unsigned To,
3923 unsigned BitWidth) {
3924 if (From % 8 != To % 8)
3925 return false;
3926 // Convert from bit indices to byte indices and check for a byte reversal.
3927 From >>= 3;
3928 To >>= 3;
3929 BitWidth >>= 3;
3930 return From == BitWidth - To - 1;
3931}
3932
3933static bool bitTransformIsCorrectForBitReverse(unsigned From, unsigned To,
3934 unsigned BitWidth) {
3935 return From == BitWidth - To - 1;
3936}
3937
3939 Instruction *I, bool MatchBSwaps, bool MatchBitReversals,
3940 SmallVectorImpl<Instruction *> &InsertedInsts) {
3941 if (!match(I, m_Or(m_Value(), m_Value())) &&
3942 !match(I, m_FShl(m_Value(), m_Value(), m_Value())) &&
3943 !match(I, m_FShr(m_Value(), m_Value(), m_Value())) &&
3944 !match(I, m_BSwap(m_Value())))
3945 return false;
3946 if (!MatchBSwaps && !MatchBitReversals)
3947 return false;
3948 Type *ITy = I->getType();
3949 if (!ITy->isIntOrIntVectorTy() || ITy->getScalarSizeInBits() > 128)
3950 return false; // Can't do integer/elements > 128 bits.
3951
3952 // Try to find all the pieces corresponding to the bswap.
3953 bool FoundRoot = false;
3954 std::map<Value *, std::optional<BitPart>> BPS;
3955 const auto &Res =
3956 collectBitParts(I, MatchBSwaps, MatchBitReversals, BPS, 0, FoundRoot);
3957 if (!Res)
3958 return false;
3959 ArrayRef<int8_t> BitProvenance = Res->Provenance;
3960 assert(all_of(BitProvenance,
3961 [](int8_t I) { return I == BitPart::Unset || 0 <= I; }) &&
3962 "Illegal bit provenance index");
3963
3964 // If the upper bits are zero, then attempt to perform as a truncated op.
3965 Type *DemandedTy = ITy;
3966 if (BitProvenance.back() == BitPart::Unset) {
3967 while (!BitProvenance.empty() && BitProvenance.back() == BitPart::Unset)
3968 BitProvenance = BitProvenance.drop_back();
3969 if (BitProvenance.empty())
3970 return false; // TODO - handle null value?
3971 DemandedTy = Type::getIntNTy(I->getContext(), BitProvenance.size());
3972 if (auto *IVecTy = dyn_cast<VectorType>(ITy))
3973 DemandedTy = VectorType::get(DemandedTy, IVecTy);
3974 }
3975
3976 // Check BitProvenance hasn't found a source larger than the result type.
3977 unsigned DemandedBW = DemandedTy->getScalarSizeInBits();
3978 if (DemandedBW > ITy->getScalarSizeInBits())
3979 return false;
3980
3981 // Now, is the bit permutation correct for a bswap or a bitreverse? We can
3982 // only byteswap values with an even number of bytes.
3983 APInt DemandedMask = APInt::getAllOnes(DemandedBW);
3984 bool OKForBSwap = MatchBSwaps && (DemandedBW % 16) == 0;
3985 bool OKForBitReverse = MatchBitReversals;
3986 for (unsigned BitIdx = 0;
3987 (BitIdx < DemandedBW) && (OKForBSwap || OKForBitReverse); ++BitIdx) {
3988 if (BitProvenance[BitIdx] == BitPart::Unset) {
3989 DemandedMask.clearBit(BitIdx);
3990 continue;
3991 }
3992 OKForBSwap &= bitTransformIsCorrectForBSwap(BitProvenance[BitIdx], BitIdx,
3993 DemandedBW);
3994 OKForBitReverse &= bitTransformIsCorrectForBitReverse(BitProvenance[BitIdx],
3995 BitIdx, DemandedBW);
3996 }
3997
3998 Intrinsic::ID Intrin;
3999 if (OKForBSwap)
4000 Intrin = Intrinsic::bswap;
4001 else if (OKForBitReverse)
4002 Intrin = Intrinsic::bitreverse;
4003 else
4004 return false;
4005
4006 Function *F = Intrinsic::getDeclaration(I->getModule(), Intrin, DemandedTy);
4007 Value *Provider = Res->Provider;
4008
4009 // We may need to truncate the provider.
4010 if (DemandedTy != Provider->getType()) {
4011 auto *Trunc =
4012 CastInst::CreateIntegerCast(Provider, DemandedTy, false, "trunc", I->getIterator());
4013 InsertedInsts.push_back(Trunc);
4014 Provider = Trunc;
4015 }
4016
4017 Instruction *Result = CallInst::Create(F, Provider, "rev", I->getIterator());
4018 InsertedInsts.push_back(Result);
4019
4020 if (!DemandedMask.isAllOnes()) {
4021 auto *Mask = ConstantInt::get(DemandedTy, DemandedMask);
4022 Result = BinaryOperator::Create(Instruction::And, Result, Mask, "mask", I->getIterator());
4023 InsertedInsts.push_back(Result);
4024 }
4025
4026 // We may need to zeroextend back to the result type.
4027 if (ITy != Result->getType()) {
4028 auto *ExtInst = CastInst::CreateIntegerCast(Result, ITy, false, "zext", I->getIterator());
4029 InsertedInsts.push_back(ExtInst);
4030 }
4031
4032 return true;
4033}
4034
4035// CodeGen has special handling for some string functions that may replace
4036// them with target-specific intrinsics. Since that'd skip our interceptors
4037// in ASan/MSan/TSan/DFSan, and thus make us miss some memory accesses,
4038// we mark affected calls as NoBuiltin, which will disable optimization
4039// in CodeGen.
4041 CallInst *CI, const TargetLibraryInfo *TLI) {
4042 Function *F = CI->getCalledFunction();
4043 LibFunc Func;
4044 if (F && !F->hasLocalLinkage() && F->hasName() &&
4045 TLI->getLibFunc(F->getName(), Func) && TLI->hasOptimizedCodeGen(Func) &&
4046 !F->doesNotAccessMemory())
4047 CI->addFnAttr(Attribute::NoBuiltin);
4048}
4049
4051 // We can't have a PHI with a metadata type.
4052 if (I->getOperand(OpIdx)->getType()->isMetadataTy())
4053 return false;
4054
4055 // Early exit.
4056 if (!isa<Constant>(I->getOperand(OpIdx)))
4057 return true;
4058
4059 switch (I->getOpcode()) {
4060 default:
4061 return true;
4062 case Instruction::Call:
4063 case Instruction::Invoke: {
4064 const auto &CB = cast<CallBase>(*I);
4065
4066 // Can't handle inline asm. Skip it.
4067 if (CB.isInlineAsm())
4068 return false;
4069
4070 // Constant bundle operands may need to retain their constant-ness for
4071 // correctness.
4072 if (CB.isBundleOperand(OpIdx))
4073 return false;
4074
4075 if (OpIdx < CB.arg_size()) {
4076 // Some variadic intrinsics require constants in the variadic arguments,
4077 // which currently aren't markable as immarg.
4078 if (isa<IntrinsicInst>(CB) &&
4079 OpIdx >= CB.getFunctionType()->getNumParams()) {
4080 // This is known to be OK for stackmap.
4081 return CB.getIntrinsicID() == Intrinsic::experimental_stackmap;
4082 }
4083
4084 // gcroot is a special case, since it requires a constant argument which
4085 // isn't also required to be a simple ConstantInt.
4086 if (CB.getIntrinsicID() == Intrinsic::gcroot)
4087 return false;
4088
4089 // Some intrinsic operands are required to be immediates.
4090 return !CB.paramHasAttr(OpIdx, Attribute::ImmArg);
4091 }
4092
4093 // It is never allowed to replace the call argument to an intrinsic, but it
4094 // may be possible for a call.
4095 return !isa<IntrinsicInst>(CB);
4096 }
4097 case Instruction::ShuffleVector:
4098 // Shufflevector masks are constant.
4099 return OpIdx != 2;
4100 case Instruction::Switch:
4101 case Instruction::ExtractValue:
4102 // All operands apart from the first are constant.
4103 return OpIdx == 0;
4104 case Instruction::InsertValue:
4105 // All operands apart from the first and the second are constant.
4106 return OpIdx < 2;
4107 case Instruction::Alloca:
4108 // Static allocas (constant size in the entry block) are handled by
4109 // prologue/epilogue insertion so they're free anyway. We definitely don't
4110 // want to make them non-constant.
4111 return !cast<AllocaInst>(I)->isStaticAlloca();
4112 case Instruction::GetElementPtr:
4113 if (OpIdx == 0)
4114 return true;
4116 for (auto E = std::next(It, OpIdx); It != E; ++It)
4117 if (It.isStruct())
4118 return false;
4119 return true;
4120 }
4121}
4122
4124 // First: Check if it's a constant
4125 if (Constant *C = dyn_cast<Constant>(Condition))
4126 return ConstantExpr::getNot(C);
4127
4128 // Second: If the condition is already inverted, return the original value
4129 Value *NotCondition;
4130 if (match(Condition, m_Not(m_Value(NotCondition))))
4131 return NotCondition;
4132
4133 BasicBlock *Parent = nullptr;
4134 Instruction *Inst = dyn_cast<Instruction>(Condition);
4135 if (Inst)
4136 Parent = Inst->getParent();
4137 else if (Argument *Arg = dyn_cast<Argument>(Condition))
4138 Parent = &Arg->getParent()->getEntryBlock();
4139 assert(Parent && "Unsupported condition to invert");
4140
4141 // Third: Check all the users for an invert
4142 for (User *U : Condition->users())
4143 if (Instruction *I = dyn_cast<Instruction>(U))
4144 if (I->getParent() == Parent && match(I, m_Not(m_Specific(Condition))))
4145 return I;
4146
4147 // Last option: Create a new instruction
4148 auto *Inverted =
4149 BinaryOperator::CreateNot(Condition, Condition->getName() + ".inv");
4150 if (Inst && !isa<PHINode>(Inst))
4151 Inverted->insertAfter(Inst);
4152 else
4153 Inverted->insertBefore(&*Parent->getFirstInsertionPt());
4154 return Inverted;
4155}
4156
4158 // Note: We explicitly check for attributes rather than using cover functions
4159 // because some of the cover functions include the logic being implemented.
4160
4161 bool Changed = false;
4162 // readnone + not convergent implies nosync
4163 if (!F.hasFnAttribute(Attribute::NoSync) &&
4164 F.doesNotAccessMemory() && !F.isConvergent()) {
4165 F.setNoSync();
4166 Changed = true;
4167 }
4168
4169 // readonly implies nofree
4170 if (!F.hasFnAttribute(Attribute::NoFree) && F.onlyReadsMemory()) {
4171 F.setDoesNotFreeMemory();
4172 Changed = true;
4173 }
4174
4175 // willreturn implies mustprogress
4176 if (!F.hasFnAttribute(Attribute::MustProgress) && F.willReturn()) {
4177 F.setMustProgress();
4178 Changed = true;
4179 }
4180
4181 // TODO: There are a bunch of cases of restrictive memory effects we
4182 // can infer by inspecting arguments of argmemonly-ish functions.
4183
4184 return Changed;
4185}
for(const MachineOperand &MO :llvm::drop_begin(OldMI.operands(), Desc.getNumOperands()))
static unsigned getIntrinsicID(const SDNode *N)
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
Rewrite undef for PHI
This file implements a class to represent arbitrary precision integral constant values and operations...
ReachingDefAnalysis InstSet & ToRemove
static bool isEqual(const Function &Caller, const Function &Callee)
This file contains the simple types necessary to represent the attributes associated with functions a...
static const Function * getParent(const Value *V)
BlockVerifier::State From
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")
This file contains the declarations for the subclasses of Constant, which represent the different fla...
static bool isSentinel(const DWARFDebugNames::AttributeEncoding &AE)
Returns the sub type a function will return at a given Idx Should correspond to the result type of an ExtractValue instruction executed with just that one unsigned Idx
#define LLVM_DEBUG(X)
Definition: Debug.h:101
This file defines DenseMapInfo traits for DenseMap.
This file defines the DenseMap class.
This file defines the DenseSet and SmallDenseSet classes.
This file contains constants used for implementing Dwarf debug support.
bool End
Definition: ELF_riscv.cpp:480
static unsigned getHashValueImpl(SimpleValue Val)
Definition: EarlyCSE.cpp:235
static bool isEqualImpl(SimpleValue LHS, SimpleValue RHS)
Definition: EarlyCSE.cpp:357
static GCMetadataPrinterRegistry::Add< ErlangGCPrinter > X("erlang", "erlang-compatible garbage collector")
Rewrite Partial Register Uses
Hexagon Common GEP
This file provides various utilities for inspecting and working with the control flow graph in LLVM I...
iv Induction Variable Users
Definition: IVUsers.cpp:48
#define F(x, y, z)
Definition: MD5.cpp:55
#define I(x, y, z)
Definition: MD5.cpp:58
This file contains the declarations for metadata subclasses.
Module.h This file contains the declarations for the Module class.
static GCMetadataPrinterRegistry::Add< OcamlGCMetadataPrinter > Y("ocaml", "ocaml 3.10-compatible collector")
#define P(N)
This file contains the declarations for profiling metadata utility functions.
const SmallVectorImpl< MachineOperand > & Cond
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
This file contains some templates that are useful if you are working with the STL at all.
This file implements a set that has insertion order iteration characteristics.
This file defines the SmallPtrSet class.
This file defines the SmallVector class.
This file defines the 'Statistic' class, which is designed to be an easy way to expose various metric...
#define STATISTIC(VARNAME, DESC)
Definition: Statistic.h:167
static bool markAliveBlocks(Function &F, SmallPtrSetImpl< BasicBlock * > &Reachable, DomTreeUpdater *DTU=nullptr)
Definition: Local.cpp:2961
static bool valueCoversEntireFragment(Type *ValTy, DbgVariableIntrinsic *DII)
Check if the alloc size of ValTy is large enough to cover the variable (or fragment of the variable) ...
Definition: Local.cpp:1602
static bool isBitCastSemanticsPreserving(const DataLayout &DL, Type *FromTy, Type *ToTy)
Check if a bitcast between a value of type FromTy to type ToTy would losslessly preserve the bits and...
Definition: Local.cpp:2692
static bool isStructure(AllocaInst *AI)
Determine whether this alloca is a structure.
Definition: Local.cpp:1878
uint64_t getDwarfOpForBinOp(Instruction::BinaryOps Opcode)
Definition: Local.cpp:2410
static bool PhiHasDebugValue(DILocalVariable *DIVar, DIExpression *DIExpr, PHINode *APN)
===------------------------------------------------------------------—===// Dbg Intrinsic utilities
Definition: Local.cpp:1572
static void gatherIncomingValuesToPhi(PHINode *PN, IncomingValueMap &IncomingValues)
Create a map from block to value for the operands of a given phi.
Definition: Local.cpp:960
static void handleSSAValueOperands(uint64_t CurrentLocOps, SmallVectorImpl< uint64_t > &Opcodes, SmallVectorImpl< Value * > &AdditionalValues, Instruction *I)
Definition: Local.cpp:2440
std::optional< DIExpression * > DbgValReplacement
A replacement for a dbg.value expression.
Definition: Local.cpp:2589
static void insertDbgValueOrDbgVariableRecordAfter(DIBuilder &Builder, Value *DV, DILocalVariable *DIVar, DIExpression *DIExpr, const DebugLoc &NewLoc, BasicBlock::iterator Instr)
Definition: Local.cpp:1671
Value * getSalvageOpsForBinOp(BinaryOperator *BI, uint64_t CurrentLocOps, SmallVectorImpl< uint64_t > &Opcodes, SmallVectorImpl< Value * > &AdditionalValues)
Definition: Local.cpp:2452
static void replaceUndefValuesInPhi(PHINode *PN, const IncomingValueMap &IncomingValues)
Replace the incoming undef values to a phi with the values from a block-to-value map.
Definition: Local.cpp:976
Value * getSalvageOpsForGEP(GetElementPtrInst *GEP, const DataLayout &DL, uint64_t CurrentLocOps, SmallVectorImpl< uint64_t > &Opcodes, SmallVectorImpl< Value * > &AdditionalValues)
Definition: Local.cpp:2384
Value * getSalvageOpsForIcmpOp(ICmpInst *Icmp, uint64_t CurrentLocOps, SmallVectorImpl< uint64_t > &Opcodes, SmallVectorImpl< Value * > &AdditionalValues)
Definition: Local.cpp:2511
static unsigned replaceDominatedUsesWith(Value *From, Value *To, const RootType &Root, const DominatesFn &Dominates)
Definition: Local.cpp:3433
static bool CanMergeValues(Value *First, Value *Second)
Return true if we can choose one of these values to use in place of the other.
Definition: Local.cpp:855
static bool simplifyAndDCEInstruction(Instruction *I, SmallSetVector< Instruction *, 16 > &WorkList, const DataLayout &DL, const TargetLibraryInfo *TLI)
Definition: Local.cpp:672
static bool isArray(AllocaInst *AI)
Determine whether this alloca is either a VLA or an array.
Definition: Local.cpp:1872
static bool areAllUsesEqual(Instruction *I)
areAllUsesEqual - Check whether the uses of a value are all the same.
Definition: Local.cpp:631
static cl::opt< bool > PHICSEDebugHash("phicse-debug-hash", cl::init(false), cl::Hidden, cl::desc("Perform extra assertion checking to verify that PHINodes's hash " "function is well-behaved w.r.t. its isEqual predicate"))
uint64_t getDwarfOpForIcmpPred(CmpInst::Predicate Pred)
Definition: Local.cpp:2486
static bool bitTransformIsCorrectForBSwap(unsigned From, unsigned To, unsigned BitWidth)
Definition: Local.cpp:3922
static const std::optional< BitPart > & collectBitParts(Value *V, bool MatchBSwaps, bool MatchBitReversals, std::map< Value *, std::optional< BitPart > > &BPS, int Depth, bool &FoundRoot)
Analyze the specified subexpression and see if it is capable of providing pieces of a bswap or bitrev...
Definition: Local.cpp:3702
static bool EliminateDuplicatePHINodesNaiveImpl(BasicBlock *BB, SmallPtrSetImpl< PHINode * > &ToRemove)
Definition: Local.cpp:1354
static bool CanPropagatePredecessorsForPHIs(BasicBlock *BB, BasicBlock *Succ, const SmallPtrSetImpl< BasicBlock * > &BBPreds)
Return true if we can fold BB, an almost-empty BB ending in an unconditional branch to Succ,...
Definition: Local.cpp:864
static cl::opt< unsigned > PHICSENumPHISmallSize("phicse-num-phi-smallsize", cl::init(32), cl::Hidden, cl::desc("When the basic block contains not more than this number of PHI nodes, " "perform a (faster!) exhaustive search instead of set-driven one."))
static bool EliminateDuplicatePHINodesSetBasedImpl(BasicBlock *BB, SmallPtrSetImpl< PHINode * > &ToRemove)
Definition: Local.cpp:1390
static bool rewriteDebugUsers(Instruction &From, Value &To, Instruction &DomPoint, DominatorTree &DT, function_ref< DbgValReplacement(DbgVariableIntrinsic &DII)> RewriteExpr, function_ref< DbgValReplacement(DbgVariableRecord &DVR)> RewriteDVRExpr)
Point debug users of From to To using exprs given by RewriteExpr, possibly moving/undefing users to p...
Definition: Local.cpp:2594
static void insertDbgValueOrDbgVariableRecord(DIBuilder &Builder, Value *DV, DILocalVariable *DIVar, DIExpression *DIExpr, const DebugLoc &NewLoc, BasicBlock::iterator Instr)
Definition: Local.cpp:1652
static Value * selectIncomingValueForBlock(Value *OldVal, BasicBlock *BB, IncomingValueMap &IncomingValues)
Determines the value to use as the phi node input for a block.
Definition: Local.cpp:935
static void updateOneDbgValueForAlloca(const DebugLoc &Loc, DILocalVariable *DIVar, DIExpression *DIExpr, Value *NewAddress, DbgValueInst *DVI, DbgVariableRecord *DVR, DIBuilder &Builder, int Offset)
Definition: Local.cpp:2151
static void insertDbgVariableRecordsForPHIs(BasicBlock *BB, SmallVectorImpl< PHINode * > &InsertedPHIs)
Definition: Local.cpp:2003
static bool CanRedirectPredsOfEmptyBBToSucc(BasicBlock *BB, BasicBlock *Succ, const SmallPtrSetImpl< BasicBlock * > &BBPreds, const SmallPtrSetImpl< BasicBlock * > &SuccPreds, BasicBlock *&CommonPred)
Definition: Local.cpp:1017
static const unsigned BitPartRecursionMaxDepth
Definition: Local.cpp:116
static void redirectValuesFromPredecessorsToPhi(BasicBlock *BB, const PredBlockVector &BBPreds, PHINode *PN, BasicBlock *CommonPred)
Replace a value flowing from a block to a phi with potentially multiple instances of that value flowi...
Definition: Local.cpp:1050
cl::opt< bool > UseNewDbgInfoFormat
static bool bitTransformIsCorrectForBitReverse(unsigned From, unsigned To, unsigned BitWidth)
Definition: Local.cpp:3933
static void salvageDbgAssignAddress(T *Assign)
Definition: Local.cpp:2208
This defines the Use class.
Value * RHS
Value * LHS
APInt bitcastToAPInt() const
Definition: APFloat.h:1210
Class for arbitrary precision integers.
Definition: APInt.h:76
static APInt getAllOnes(unsigned numBits)
Return an APInt of a specified width with all bits set.
Definition: APInt.h:212
void clearBit(unsigned BitPosition)
Set a given bit to 0.
Definition: APInt.h:1385
uint64_t getZExtValue() const
Get zero extended value.
Definition: APInt.h:1491
unsigned popcount() const
Count the number of bits set.
Definition: APInt.h:1620
bool isAllOnes() const
Determine if all bits are set. This is true for zero-width values.
Definition: APInt.h:349
const uint64_t * getRawData() const
This function returns a pointer to the internal storage of the APInt.
Definition: APInt.h:547
std::optional< int64_t > trySExtValue() const
Get sign extended value if possible.
Definition: APInt.h:1525
int64_t getSExtValue() const
Get sign extended value.
Definition: APInt.h:1513
bool uge(const APInt &RHS) const
Unsigned greater or equal comparison.
Definition: APInt.h:1199
an instruction to allocate memory on the stack
Definition: Instructions.h:59
Type * getAllocatedType() const
Return the type that is being allocated by the instruction.
Definition: Instructions.h:125
bool isArrayAllocation() const
Return true if there is an allocation size parameter to the allocation instruction that is not 1.
This class represents an incoming formal argument to a Function.
Definition: Argument.h:31
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
Definition: ArrayRef.h:41
const T & back() const
back - Get the last element.
Definition: ArrayRef.h:174
size_t size() const
size - Get the array size.
Definition: ArrayRef.h:165
ArrayRef< T > drop_back(size_t N=1) const
Drop the last N elements of the array.
Definition: ArrayRef.h:210
bool empty() const
empty - Check if the array is empty.
Definition: ArrayRef.h:160
Value handle that asserts if the Value is deleted.
Definition: ValueHandle.h:264
A cache of @llvm.assume calls within a function.
LLVM Basic Block Representation.
Definition: BasicBlock.h:60
iterator end()
Definition: BasicBlock.h:443
iterator begin()
Instruction iterator methods.
Definition: BasicBlock.h:430
iterator_range< const_phi_iterator > phis() const
Returns a range that iterates over the phis in the basic block.
Definition: BasicBlock.h:499
const_iterator getFirstInsertionPt() const
Returns an iterator to the first instruction in this block that is suitable for inserting a non-PHI i...
Definition: BasicBlock.cpp:409
bool hasAddressTaken() const
Returns true if there are any uses of this basic block other than direct branches,...
Definition: BasicBlock.h:640
InstListType::const_iterator getFirstNonPHIIt() const
Iterator returning form of getFirstNonPHI.
Definition: BasicBlock.cpp:367
void insertDbgRecordBefore(DbgRecord *DR, InstListType::iterator Here)
Insert a DbgRecord into a block at the position given by Here.
const Instruction * getFirstNonPHI() const
Returns a pointer to the first instruction in this block that is not a PHINode instruction.
Definition: BasicBlock.cpp:360
const Instruction & front() const
Definition: BasicBlock.h:453
static BasicBlock * Create(LLVMContext &Context, const Twine &Name="", Function *Parent=nullptr, BasicBlock *InsertBefore=nullptr)
Creates a new BasicBlock.
Definition: BasicBlock.h:199
bool isEntryBlock() const
Return true if this is the entry block of the containing function.
Definition: BasicBlock.cpp:564
void moveAfter(BasicBlock *MovePos)
Unlink this basic block from its current function and insert it right after MovePos in the function M...
Definition: BasicBlock.cpp:284
const BasicBlock * getSinglePredecessor() const
Return the predecessor of this block if it has a single predecessor block.
Definition: BasicBlock.cpp:452
void flushTerminatorDbgRecords()
Eject any debug-info trailing at the end of a block.
Definition: BasicBlock.cpp:712
const Function * getParent() const
Return the enclosing method, or null if none.
Definition: BasicBlock.h:206
SymbolTableList< BasicBlock >::iterator eraseFromParent()
Unlink 'this' from the containing function and delete it.
Definition: BasicBlock.cpp:276
void insertDbgRecordAfter(DbgRecord *DR, Instruction *I)
Insert a DbgRecord into a block at the position given by I.
const Instruction * getFirstNonPHIOrDbg(bool SkipPseudoOp=true) const
Returns a pointer to the first instruction in this block that is not a PHINode or a debug intrinsic,...
Definition: BasicBlock.cpp:379
InstListType::iterator iterator
Instruction iterators...
Definition: BasicBlock.h:165
LLVMContext & getContext() const
Get the context in which this basic block lives.
Definition: BasicBlock.cpp:168
size_t size() const
Definition: BasicBlock.h:451
const Instruction * getTerminator() const LLVM_READONLY
Returns the terminator instruction if the block is well formed or null if the block is not well forme...
Definition: BasicBlock.h:221
bool hasNPredecessorsOrMore(unsigned N) const
Return true if this block has N predecessors or more.
Definition: BasicBlock.cpp:478
void splice(BasicBlock::iterator ToIt, BasicBlock *FromBB)
Transfer all instructions from FromBB to this basic block at ToIt.
Definition: BasicBlock.h:613
const Instruction & back() const
Definition: BasicBlock.h:455
const Module * getModule() const
Return the module owning the function this basic block belongs to, or nullptr if the function does no...
Definition: BasicBlock.cpp:289
void removePredecessor(BasicBlock *Pred, bool KeepOneInputPHIs=false)
Update PHI nodes in this BasicBlock before removal of predecessor Pred.
Definition: BasicBlock.cpp:509
static BinaryOperator * Create(BinaryOps Op, Value *S1, Value *S2, const Twine &Name, BasicBlock::iterator InsertBefore)
Construct a binary instruction, given the opcode and the two operands.
BinaryOps getOpcode() const
Definition: InstrTypes.h:513
static BinaryOperator * CreateNot(Value *Op, const Twine &Name, BasicBlock::iterator InsertBefore)
This class represents a no-op cast from one type to another.
The address of a basic block.
Definition: Constants.h:889
static BlockAddress * get(Function *F, BasicBlock *BB)
Return a BlockAddress for the specified function and basic block.
Definition: Constants.cpp:1846
Conditional or Unconditional Branch instruction.
static BranchInst * Create(BasicBlock *IfTrue, BasicBlock::iterator InsertBefore)
Base class for all callable instructions (InvokeInst and CallInst) Holds everything related to callin...
Definition: InstrTypes.h:1494
void setCallingConv(CallingConv::ID CC)
Definition: InstrTypes.h:1804
void addFnAttr(Attribute::AttrKind Kind)
Adds the attribute to the function.
Definition: InstrTypes.h:1851
void getOperandBundlesAsDefs(SmallVectorImpl< OperandBundleDef > &Defs) const
Return the list of operand bundles attached to this instruction as a vector of OperandBundleDefs.
Function * getCalledFunction() const
Returns the function called, or null if this is an indirect function invocation or the function signa...
Definition: InstrTypes.h:1742
CallingConv::ID getCallingConv() const
Definition: InstrTypes.h:1800
Value * getCalledOperand() const
Definition: InstrTypes.h:1735
void setAttributes(AttributeList A)
Set the parameter attributes for this call.
Definition: InstrTypes.h:1823
FunctionType * getFunctionType() const
Definition: InstrTypes.h:1600
iterator_range< User::op_iterator > args()
Iteration adapter for range-for loops.
Definition: InstrTypes.h:1678
AttributeList getAttributes() const
Return the parameter attributes for this call.
Definition: InstrTypes.h:1819
This class represents a function call, abstracting a target machine's calling convention.
static CallInst * Create(FunctionType *Ty, Value *F, const Twine &NameStr, BasicBlock::iterator InsertBefore)
static CastInst * CreateIntegerCast(Value *S, Type *Ty, bool isSigned, const Twine &Name, BasicBlock::iterator InsertBefore)
Create a ZExt, BitCast, or Trunc for int -> int casts.
static CatchSwitchInst * Create(Value *ParentPad, BasicBlock *UnwindDest, unsigned NumHandlers, const Twine &NameStr, BasicBlock::iterator InsertBefore)
static CleanupReturnInst * Create(Value *CleanupPad, BasicBlock *UnwindBB, BasicBlock::iterator InsertBefore)
Predicate
This enumeration lists the possible predicates for CmpInst subclasses.
Definition: InstrTypes.h:993
@ ICMP_SLT
signed less than
Definition: InstrTypes.h:1022
@ ICMP_SLE
signed less or equal
Definition: InstrTypes.h:1023
@ ICMP_UGE
unsigned greater or equal
Definition: InstrTypes.h:1017
@ ICMP_UGT
unsigned greater than
Definition: InstrTypes.h:1016
@ ICMP_SGT
signed greater than
Definition: InstrTypes.h:1020
@ ICMP_ULT
unsigned less than
Definition: InstrTypes.h:1018
@ ICMP_EQ
equal
Definition: InstrTypes.h:1014
@ ICMP_NE
not equal
Definition: InstrTypes.h:1015
@ ICMP_SGE
signed greater or equal
Definition: InstrTypes.h:1021
@ ICMP_ULE
unsigned less or equal
Definition: InstrTypes.h:1019
bool isSigned() const
Definition: InstrTypes.h:1265
Predicate getPredicate() const
Return the predicate for this instruction.
Definition: InstrTypes.h:1105
A constant value that is initialized with an expression using other constant values.
Definition: Constants.h:1017
static Constant * getIntToPtr(Constant *C, Type *Ty, bool OnlyIfReduced=false)
Definition: Constants.cpp:2126
static Constant * getNot(Constant *C)
Definition: Constants.cpp:2529
static Constant * getPtrToInt(Constant *C, Type *Ty, bool OnlyIfReduced=false)
Definition: Constants.cpp:2112
static Constant * getAdd(Constant *C1, Constant *C2, bool HasNUW=false, bool HasNSW=false)
Definition: Constants.cpp:2535
This is the shared class of boolean and integer constants.
Definition: Constants.h:80
static ConstantPointerNull * get(PointerType *T)
Static factory methods - Return objects of the specified value.
Definition: Constants.cpp:1775
bool contains(const APInt &Val) const
Return true if the specified value is in the set.
This is an important base class in LLVM.
Definition: Constant.h:41
void destroyConstant()
Called if some element of this constant is no longer valid.
Definition: Constants.cpp:472
DIExpression * createConstantValueExpression(uint64_t Val)
Create an expression for a variable that does not have an address, but does have a constant value.
Definition: DIBuilder.h:779
DWARF expression.
static DIExpression * append(const DIExpression *Expr, ArrayRef< uint64_t > Ops)
Append the opcodes Ops to DIExpr.
unsigned getNumElements() const
static ExtOps getExtOps(unsigned FromSize, unsigned ToSize, bool Signed)
Returns the ops for a zero- or sign-extension in a DIExpression.
static void appendOffset(SmallVectorImpl< uint64_t > &Ops, int64_t Offset)
Append Ops with operations to apply the Offset.
static DIExpression * appendOpsToArg(const DIExpression *Expr, ArrayRef< uint64_t > Ops, unsigned ArgNo, bool StackValue=false)
Create a copy of Expr by appending the given list of Ops to each instance of the operand DW_OP_LLVM_a...
static std::optional< FragmentInfo > getFragmentInfo(expr_op_iterator Start, expr_op_iterator End)
Retrieve the details of this fragment expression.
uint64_t getNumLocationOperands() const
Return the number of unique location operands referred to (via DW_OP_LLVM_arg) in this expression; th...
uint64_t getElement(unsigned I) const
static DIExpression * prepend(const DIExpression *Expr, uint8_t Flags, int64_t Offset=0)
Prepend DIExpr with a deref and offset operation and optionally turn it into a stack value or/and an ...
static DIExpression * appendExt(const DIExpression *Expr, unsigned FromSize, unsigned ToSize, bool Signed)
Append a zero- or sign-extension to Expr.
std::optional< DIBasicType::Signedness > getSignedness() const
Return the signedness of this variable's type, or std::nullopt if this type is neither signed nor uns...
This class represents an Operation in the Expression.
A parsed version of the target data layout string in and methods for querying it.
Definition: DataLayout.h:110
This represents the llvm.dbg.label instruction.
Instruction * MarkedInstr
Link back to the Instruction that owns this marker.
This represents the llvm.dbg.value instruction.
This is the common base class for debug info intrinsics for variables.
iterator_range< location_op_iterator > location_ops() const
Get the locations corresponding to the variable referenced by the debug info intrinsic.
void replaceVariableLocationOp(Value *OldValue, Value *NewValue)
Value * getVariableLocationOp(unsigned OpIdx) const
void setExpression(DIExpression *NewExpr)
DILocalVariable * getVariable() const
unsigned getNumVariableLocationOps() const
bool isAddressOfVariable() const
Does this describe the address of a local variable.
std::optional< uint64_t > getFragmentSizeInBits() const
Get the size (in bits) of the variable, or fragment of the variable that is described.
DIExpression * getExpression() const
Record of a variable value-assignment, aka a non instruction representation of the dbg....
bool isAddressOfVariable() const
Does this describe the address of a local variable.
std::optional< uint64_t > getFragmentSizeInBits() const
Get the size (in bits) of the variable, or fragment of the variable that is described.
DbgVariableRecord * clone() const
void setExpression(DIExpression *NewExpr)
DIExpression * getExpression() const
Value * getVariableLocationOp(unsigned OpIdx) const
DILocalVariable * getVariable() const
unsigned getNumVariableLocationOps() const
void replaceVariableLocationOp(Value *OldValue, Value *NewValue, bool AllowEmpty=false)
iterator_range< location_op_iterator > location_ops() const
Get the locations corresponding to the variable referenced by the debug info intrinsic.
A debug info location.
Definition: DebugLoc.h:33
DILocation * get() const
Get the underlying DILocation.
Definition: DebugLoc.cpp:20
iterator find(const_arg_type_t< KeyT > Val)
Definition: DenseMap.h:155
unsigned size() const
Definition: DenseMap.h:99
size_type count(const_arg_type_t< KeyT > Val) const
Return 1 if the specified key is in the map, 0 otherwise.
Definition: DenseMap.h:151
iterator end()
Definition: DenseMap.h:84
std::pair< iterator, bool > insert(const std::pair< KeyT, ValueT > &KV)
Definition: DenseMap.h:220
Implements a dense probed hash-table based set.
Definition: DenseSet.h:271
void recalculate(Function &F)
Notify DTU that the entry block was replaced.
bool hasDomTree() const
Returns true if it holds a DominatorTree.
bool isBBPendingDeletion(BasicBlock *DelBB) const
Returns true if DelBB is awaiting deletion.
void applyUpdates(ArrayRef< DominatorTree::UpdateType > Updates)
Submit updates to all available trees.
void deleteBB(BasicBlock *DelBB)
Delete DelBB.
void applyUpdatesPermissive(ArrayRef< DominatorTree::UpdateType > Updates)
Submit updates to all available trees.
Concrete subclass of DominatorTreeBase that is used to compute a normal dominator tree.
Definition: Dominators.h:162
bool dominates(const BasicBlock *BB, const Use &U) const
Return true if the (end of the) basic block BB dominates the use U.
Definition: Dominators.cpp:122
const BasicBlock & getEntryBlock() const
Definition: Function.h:783
an instruction for type-safe pointer arithmetic to access elements of arrays and structs
Definition: Instructions.h:973
This instruction compares its operands according to the predicate given to the constructor.
Value * CreateICmpEQ(Value *LHS, Value *RHS, const Twine &Name="")
Definition: IRBuilder.h:2241
BranchInst * CreateCondBr(Value *Cond, BasicBlock *True, BasicBlock *False, MDNode *BranchWeights=nullptr, MDNode *Unpredictable=nullptr)
Create a conditional 'br Cond, TrueDest, FalseDest' instruction.
Definition: IRBuilder.h:1120
BranchInst * CreateBr(BasicBlock *Dest)
Create an unconditional 'br label X' instruction.
Definition: IRBuilder.h:1114
This provides a uniform API for creating instructions and inserting them into a basic block: either a...
Definition: IRBuilder.h:2666
void insertBefore(Instruction *InsertPos)
Insert an unlinked instruction into a basic block immediately before the specified instruction.
const DebugLoc & getDebugLoc() const
Return the debug location for this node as a DebugLoc.
Definition: Instruction.h:454
const Module * getModule() const
Return the module owning the function this instruction belongs to or nullptr it the function does not...
Definition: Instruction.cpp:82
bool extractProfTotalWeight(uint64_t &TotalVal) const
Retrieve total raw weight values of a branch.
Definition: Metadata.cpp:1745
void moveAfter(Instruction *MovePos)
Unlink this instruction from its current basic block and insert it into the basic block that MovePos ...
bool isEHPad() const
Return true if the instruction is a variety of EH-block.
Definition: Instruction.h:812
const BasicBlock * getParent() const
Definition: Instruction.h:152
InstListType::iterator eraseFromParent()
This method unlinks 'this' from the containing basic block and deletes it.
bool isIdenticalToWhenDefined(const Instruction *I) const LLVM_READONLY
This is like isIdenticalTo, except that it ignores the SubclassOptionalData flags,...
MDNode * getMetadata(unsigned KindID) const
Get the metadata of given kind attached to this Instruction.
Definition: Instruction.h:359
const Instruction * getNextNonDebugInstruction(bool SkipPseudoOp=false) const
Return a pointer to the next non-debug instruction in the same basic block as 'this',...
void setMetadata(unsigned KindID, MDNode *Node)
Set the metadata of the specified kind to the specified node.
Definition: Metadata.cpp:1636
void dropPoisonGeneratingFlags()
Drops flags that may cause this instruction to evaluate to poison despite having non-poison inputs.
void setDebugLoc(DebugLoc Loc)
Set the debug location information for this instruction.
Definition: Instruction.h:451
void copyMetadata(const Instruction &SrcInst, ArrayRef< unsigned > WL=ArrayRef< unsigned >())
Copy metadata from SrcInst to this instruction.
void dropDbgRecords()
Erase any DbgRecords attached to this instruction.
A wrapper class for inspecting calls to intrinsic functions.
Definition: IntrinsicInst.h:47
Invoke instruction.
BasicBlock * getUnwindDest() const
static InvokeInst * Create(FunctionType *Ty, Value *Func, BasicBlock *IfNormal, BasicBlock *IfException, ArrayRef< Value * > Args, const Twine &NameStr, BasicBlock::iterator InsertBefore)
BasicBlock * getNormalDest() const
This is an important class for using LLVM in a threaded context.
Definition: LLVMContext.h:67
An instruction for reading from memory.
Definition: Instructions.h:184
Value * getPointerOperand()
Definition: Instructions.h:280
MDNode * createRange(const APInt &Lo, const APInt &Hi)
Return metadata describing the range [Lo, Hi).
Definition: MDBuilder.cpp:94
MDNode * createBranchWeights(uint32_t TrueWeight, uint32_t FalseWeight)
Return metadata containing two branch weights.
Definition: MDBuilder.cpp:37
Metadata node.
Definition: Metadata.h:1067
static MDNode * getMostGenericAliasScope(MDNode *A, MDNode *B)
Definition: Metadata.cpp:1135
static MDNode * getMostGenericTBAA(MDNode *A, MDNode *B)
static MDTuple * get(LLVMContext &Context, ArrayRef< Metadata * > MDs)
Definition: Metadata.h:1541
static MDNode * getMergedProfMetadata(MDNode *A, MDNode *B, const Instruction *AInstr, const Instruction *BInstr)
Merge !prof metadata from two instructions.
Definition: Metadata.cpp:1217
static MDNode * getMostGenericFPMath(MDNode *A, MDNode *B)
Definition: Metadata.cpp:1167
static MDNode * getMostGenericRange(MDNode *A, MDNode *B)
Definition: Metadata.cpp:1279
static MDNode * intersect(MDNode *A, MDNode *B)
Definition: Metadata.cpp:1122
static MDNode * getMostGenericAlignmentOrDereferenceable(MDNode *A, MDNode *B)
Definition: Metadata.cpp:1351
This class implements a map that also provides access to all stored values in a deterministic order.
Definition: MapVector.h:36
iterator end()
Definition: MapVector.h:71
iterator find(const KeyT &Key)
Definition: MapVector.h:167
bool empty() const
Definition: MapVector.h:79
std::pair< iterator, bool > insert(const std::pair< KeyT, ValueT > &KV)
Definition: MapVector.h:141
void changeToUnreachable(const Instruction *I)
Instruction I will be changed to an unreachable.
void removeBlocks(const SmallSetVector< BasicBlock *, 8 > &DeadBlocks)
Remove all MemoryAcceses in a set of BasicBlocks about to be deleted.
void removeMemoryAccess(MemoryAccess *, bool OptimizePhis=false)
Remove a MemoryAccess from MemorySSA, including updating all definitions and uses.
Root of the metadata hierarchy.
Definition: Metadata.h:62
A Module instance is used to store all the information related to an LLVM module.
Definition: Module.h:65
const DataLayout & getDataLayout() const
Get the data layout for the module's target platform.
Definition: Module.h:293
void addIncoming(Value *V, BasicBlock *BB)
Add an incoming value to the end of the PHI list.
const_block_iterator block_begin() const
Value * removeIncomingValue(unsigned Idx, bool DeletePHIIfEmpty=true)
Remove an incoming value.
void setIncomingValue(unsigned i, Value *V)
const_block_iterator block_end() const
Value * getIncomingValueForBlock(const BasicBlock *BB) const
BasicBlock * getIncomingBlock(unsigned i) const
Return incoming basic block number i.
Value * getIncomingValue(unsigned i) const
Return incoming value number x.
unsigned getNumIncomingValues() const
Return the number of incoming edges.
T get() const
Returns the value of the specified pointer type.
Definition: PointerUnion.h:155
static PoisonValue * get(Type *T)
Static factory methods - Return an 'poison' object of the specified type.
Definition: Constants.cpp:1827
size_type size() const
Determine the number of elements in the SetVector.
Definition: SetVector.h:98
Vector takeVector()
Clear the SetVector and return the underlying vector.
Definition: SetVector.h:87
size_type count(const key_type &key) const
Count the number of elements of a given key in the SetVector.
Definition: SetVector.h:264
bool empty() const
Determine if the SetVector is empty or not.
Definition: SetVector.h:93
bool insert(const value_type &X)
Insert a new element into the SetVector.
Definition: SetVector.h:162
value_type pop_back_val()
Definition: SetVector.h:285
size_type size() const
Definition: SmallPtrSet.h:94
A templated base class for SmallPtrSet which provides the typesafe interface that is common across al...
Definition: SmallPtrSet.h:321
size_type count(ConstPtrType Ptr) const
count - Return 1 if the specified pointer is in the set, 0 otherwise.
Definition: SmallPtrSet.h:360
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
Definition: SmallPtrSet.h:342
bool contains(ConstPtrType Ptr) const
Definition: SmallPtrSet.h:366
SmallPtrSet - This class implements a set which is optimized for holding SmallSize or less elements.
Definition: SmallPtrSet.h:427
A SetVector that performs no allocations if smaller than a certain size.
Definition: SetVector.h:370
SmallSet - This maintains a set of unique values, optimizing for the case when the set is small (less...
Definition: SmallSet.h:135
std::pair< const_iterator, bool > insert(const T &V)
insert - Insert an element into the set if it isn't already there.
Definition: SmallSet.h:179
size_type size() const
Definition: SmallSet.h:161
bool empty() const
Definition: SmallVector.h:94
size_t size() const
Definition: SmallVector.h:91
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
Definition: SmallVector.h:586
void reserve(size_type N)
Definition: SmallVector.h:676
void append(ItTy in_start, ItTy in_end)
Add the specified range to the end of the SmallVector.
Definition: SmallVector.h:696
iterator insert(iterator I, T &&Elt)
Definition: SmallVector.h:818
void push_back(const T &Elt)
Definition: SmallVector.h:426
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
Definition: SmallVector.h:1209
An instruction for storing to memory.
Definition: Instructions.h:317
Provides information about what library functions are available for the current target.
bool hasOptimizedCodeGen(LibFunc F) const
Tests if the function is both available and a candidate for optimized code generation.
bool has(LibFunc F) const
Tests whether a library function is available.
bool getLibFunc(StringRef funcName, LibFunc &F) const
Searches for a particular function name.
TinyPtrVector - This class is specialized for cases where there are normally 0 or 1 element in a vect...
Definition: TinyPtrVector.h:29
bool empty() const
static constexpr TypeSize getFixed(ScalarTy ExactSize)
Definition: TypeSize.h:330
The instances of the Type class are immutable: once they are created, they are never changed.
Definition: Type.h:45
bool isVectorTy() const
True if this is an instance of VectorType.
Definition: Type.h:265
bool isArrayTy() const
True if this is an instance of ArrayType.
Definition: Type.h:252
bool isIntOrIntVectorTy() const
Return true if this is an integer type or a vector of integer types.
Definition: Type.h:234
bool isPointerTy() const
True if this is an instance of PointerType.
Definition: Type.h:255
static IntegerType * getIntNTy(LLVMContext &C, unsigned N)
unsigned getScalarSizeInBits() const LLVM_READONLY
If this is a vector type, return the getPrimitiveSizeInBits value for the element type.
bool isStructTy() const
True if this is an instance of StructType.
Definition: Type.h:249
bool isFloatingPointTy() const
Return true if this is one of the floating-point types.
Definition: Type.h:185
bool isIntOrPtrTy() const
Return true if this is an integer type or a pointer type.
Definition: Type.h:243
static IntegerType * getInt32Ty(LLVMContext &C)
bool isIntegerTy() const
True if this is an instance of IntegerType.
Definition: Type.h:228
bool isTokenTy() const
Return true if this is 'token'.
Definition: Type.h:225
TypeSize getPrimitiveSizeInBits() const LLVM_READONLY
Return the basic size of this type if it is a primitive type.
static UndefValue * get(Type *T)
Static factory methods - Return an 'undef' object of the specified type.
Definition: Constants.cpp:1808
This function has undefined behavior.
A Use represents the edge between a Value definition and its users.
Definition: Use.h:43
value_op_iterator value_op_end()
Definition: User.h:263
Value * getOperand(unsigned i) const
Definition: User.h:169
value_op_iterator value_op_begin()
Definition: User.h:260
Value wrapper in the Metadata hierarchy.
Definition: Metadata.h:450
static ValueAsMetadata * get(Value *V)
Definition: Metadata.cpp:495
iterator find(const KeyT &Val)
Definition: ValueMap.h:155
std::pair< iterator, bool > insert(const std::pair< KeyT, ValueT > &KV)
Definition: ValueMap.h:172
size_type size() const
Definition: ValueMap.h:140
iterator end()
Definition: ValueMap.h:135
LLVM Value Representation.
Definition: Value.h:74
Type * getType() const
All values are typed, get the type of this value.
Definition: Value.h:255
void replaceAllUsesWith(Value *V)
Change all uses of this to point to a new Value.
Definition: Value.cpp:534
iterator_range< user_iterator > users()
Definition: Value.h:421
void replaceUsesWithIf(Value *New, llvm::function_ref< bool(Use &U)> ShouldReplace)
Go through the uses list for this definition and make each use point to "V" if the callback ShouldRep...
Definition: Value.cpp:542
bool use_empty() const
Definition: Value.h:344
LLVMContext & getContext() const
All values hold a context through their type.
Definition: Value.cpp:1074
static constexpr unsigned MaxAlignmentExponent
The maximum alignment for instructions.
Definition: Value.h:806
user_iterator_impl< User > user_iterator
Definition: Value.h:390
bool hasName() const
Definition: Value.h:261
StringRef getName() const
Return a constant reference to the value's name.
Definition: Value.cpp:309
void takeName(Value *V)
Transfer the name from V to this value.
Definition: Value.cpp:383
Represents an op.with.overflow intrinsic.
std::pair< iterator, bool > insert(const ValueT &V)
Definition: DenseSet.h:206
void reserve(size_t Size)
Grow the DenseSet so that it can contain at least NumEntries items before resizing again.
Definition: DenseSet.h:90
An efficient, type-erasing, non-owning reference to a callable.
self_iterator getIterator()
Definition: ilist_node.h:109
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
@ C
The default llvm calling convention, compatible with C.
Definition: CallingConv.h:34
Function * getDeclaration(Module *M, ID id, ArrayRef< Type * > Tys=std::nullopt)
Create or insert an LLVM Function declaration for an intrinsic, and return it.
Definition: Function.cpp:1465
BinaryOp_match< LHS, RHS, Instruction::And > m_And(const LHS &L, const RHS &R)
m_Intrinsic_Ty< Opnd0 >::Ty m_BitReverse(const Opnd0 &Op0)
bool match(Val *V, const Pattern &P)
Definition: PatternMatch.h:49
specificval_ty m_Specific(const Value *V)
Match if we have a specific specified value.
Definition: PatternMatch.h:830
BinOpPred_match< LHS, RHS, is_logical_shift_op > m_LogicalShift(const LHS &L, const RHS &R)
Matches logical shift operations.
CastOperator_match< OpTy, Instruction::Trunc > m_Trunc(const OpTy &Op)
Matches Trunc.
bind_ty< WithOverflowInst > m_WithOverflowInst(WithOverflowInst *&I)
Match a with overflow intrinsic, capturing it if we match.
Definition: PatternMatch.h:777
CastInst_match< OpTy, ZExtInst > m_ZExt(const OpTy &Op)
Matches ZExt.
m_Intrinsic_Ty< Opnd0, Opnd1, Opnd2 >::Ty m_FShl(const Opnd0 &Op0, const Opnd1 &Op1, const Opnd2 &Op2)
apint_match m_APInt(const APInt *&Res)
Match a ConstantInt or splatted ConstantVector, binding the specified pointer to the contained APInt.
Definition: PatternMatch.h:299
class_match< Value > m_Value()
Match an arbitrary value and ignore it.
Definition: PatternMatch.h:92
m_Intrinsic_Ty< Opnd0, Opnd1, Opnd2 >::Ty m_FShr(const Opnd0 &Op0, const Opnd1 &Op1, const Opnd2 &Op2)
auto m_Undef()
Match an arbitrary undef constant.
Definition: PatternMatch.h:152
BinaryOp_match< cst_pred_ty< is_all_ones >, ValTy, Instruction::Xor, true > m_Not(const ValTy &V)
Matches a 'Not' as 'xor V, -1' or 'xor -1, V'.
BinaryOp_match< LHS, RHS, Instruction::Or > m_Or(const LHS &L, const RHS &R)
m_Intrinsic_Ty< Opnd0 >::Ty m_BSwap(const Opnd0 &Op0)
is_zero m_Zero()
Match any null constant or a vector with all elements equal to 0.
Definition: PatternMatch.h:567
match_combine_or< LTy, RTy > m_CombineOr(const LTy &L, const RTy &R)
Combine two pattern matchers matching L || R.
Definition: PatternMatch.h:239
initializer< Ty > init(const Ty &Val)
Definition: CommandLine.h:450
@ DW_OP_LLVM_arg
Only used in LLVM metadata.
Definition: Dwarf.h:146
@ ebStrict
This corresponds to "fpexcept.strict".
Definition: FPEnv.h:41
This is an optimization pass for GlobalISel generic memory operations.
Definition: AddressRanges.h:18
pred_iterator pred_end(BasicBlock *BB)
Definition: CFG.h:114
@ Offset
Definition: DWP.cpp:456
auto find(R &&Range, const T &Val)
Provide wrappers to std::find which take ranges instead of having to pass begin/end explicitly.
Definition: STLExtras.h:1742
bool RemoveRedundantDbgInstrs(BasicBlock *BB)
Try to remove redundant dbg.value instructions from given basic block.
UnaryFunction for_each(R &&Range, UnaryFunction F)
Provide wrappers to std::for_each which take ranges instead of having to pass begin/end explicitly.
Definition: STLExtras.h:1715
bool all_of(R &&range, UnaryPredicate P)
Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly.
Definition: STLExtras.h:1722
bool RecursivelyDeleteTriviallyDeadInstructions(Value *V, const TargetLibraryInfo *TLI=nullptr, MemorySSAUpdater *MSSAU=nullptr, std::function< void(Value *)> AboutToDeleteCallback=std::function< void(Value *)>())
If the specified value is a trivially dead instruction, delete it.
Definition: Local.cpp:539
bool succ_empty(const Instruction *I)
Definition: CFG.h:255
BasicBlock * changeToInvokeAndSplitBasicBlock(CallInst *CI, BasicBlock *UnwindEdge, DomTreeUpdater *DTU=nullptr)
Convert the CallInst to InvokeInst with the specified unwind edge basic block.
Definition: Local.cpp:2918
bool ConstantFoldTerminator(BasicBlock *BB, bool DeleteDeadConditions=false, const TargetLibraryInfo *TLI=nullptr, DomTreeUpdater *DTU=nullptr)
If a terminator instruction is predicated on a constant value, convert it into an unconditional branc...
Definition: Local.cpp:129
TinyPtrVector< DbgDeclareInst * > findDbgDeclares(Value *V)
Finds dbg.declare intrinsics declaring local variables as living in the memory that 'V' points to.
Definition: DebugInfo.cpp:47
std::pair< unsigned, unsigned > removeAllNonTerminatorAndEHPadInstructions(BasicBlock *BB)
Remove all instructions from a basic block other than its terminator and any present EH pad instructi...
Definition: Local.cpp:2800
unsigned replaceNonLocalUsesWith(Instruction *From, Value *To)
Definition: Local.cpp:3451
void salvageDebugInfoForDbgValues(Instruction &I, ArrayRef< DbgVariableIntrinsic * > Insns, ArrayRef< DbgVariableRecord * > DPInsns)
Implementation of salvageDebugInfo, applying only to instructions in Insns, rather than all debug use...
Definition: Local.cpp:2241
void findDbgUsers(SmallVectorImpl< DbgVariableIntrinsic * > &DbgInsts, Value *V, SmallVectorImpl< DbgVariableRecord * > *DbgVariableRecords=nullptr)
Finds the debug info intrinsics describing a value.
Definition: DebugInfo.cpp:148
void salvageDebugInfo(const MachineRegisterInfo &MRI, MachineInstr &MI)
Assuming the instruction MI is going to be deleted, attempt to salvage debug users of MI by writing t...
Definition: Utils.cpp:1650
auto successors(const MachineBasicBlock *BB)
bool isRemovableAlloc(const CallBase *V, const TargetLibraryInfo *TLI)
Return true if this is a call to an allocation function that does not have side effects that we are r...
CallInst * changeToCall(InvokeInst *II, DomTreeUpdater *DTU=nullptr)
This function converts the specified invoke into a normal call.
Definition: Local.cpp:2898
bool isMathLibCallNoop(const CallBase *Call, const TargetLibraryInfo *TLI)
Check whether the given call has no side-effects.
void copyMetadataForLoad(LoadInst &Dest, const LoadInst &Source)
Copy the metadata from the source instruction to the destination (the replacement for the source inst...
Definition: Local.cpp:3348
bool hasNItemsOrLess(IterTy &&Begin, IterTy &&End, unsigned N, Pred &&ShouldBeCounted=[](const decltype(*std::declval< IterTy >()) &) { return true;})
Returns true if the sequence [Begin, End) has N or less items.
Definition: STLExtras.h:2511
iterator_range< early_inc_iterator_impl< detail::IterOfRange< RangeT > > > make_early_inc_range(RangeT &&Range)
Make a range that does early increment to allow mutation of the underlying range without disrupting i...
Definition: STLExtras.h:656
bool SimplifyInstructionsInBlock(BasicBlock *BB, const TargetLibraryInfo *TLI=nullptr)
Scan the specified basic block and try to simplify any instructions in it and recursively delete dead...
Definition: Local.cpp:730
bool isAssumeWithEmptyBundle(const AssumeInst &Assume)
Return true iff the operand bundles of the provided llvm.assume doesn't contain any valuable informat...
void DeleteDeadBlock(BasicBlock *BB, DomTreeUpdater *DTU=nullptr, bool KeepOneInputPHIs=false)
Delete the specified block, which must have no predecessors.
void findDbgValues(SmallVectorImpl< DbgValueInst * > &DbgValues, Value *V, SmallVectorImpl< DbgVariableRecord * > *DbgVariableRecords=nullptr)
Finds the llvm.dbg.value intrinsics describing a value.
Definition: DebugInfo.cpp:141
void insertDebugValuesForPHIs(BasicBlock *BB, SmallVectorImpl< PHINode * > &InsertedPHIs)
Propagate dbg.value intrinsics through the newly inserted PHIs.
Definition: Local.cpp:2067
ConstantRange getConstantRangeFromMetadata(const MDNode &RangeMD)
Parse out a conservative ConstantRange from !range metadata.
MDNode * intersectAccessGroups(const Instruction *Inst1, const Instruction *Inst2)
Compute the access-group list of access groups that Inst1 and Inst2 are both in.
void setBranchWeights(Instruction &I, ArrayRef< uint32_t > Weights)
Create a new branch_weights metadata node and add or overwrite a prof metadata reference to instructi...
bool handleUnreachableTerminator(Instruction *I, SmallVectorImpl< Value * > &PoisonedValues)
If a terminator in an unreachable basic block has an operand of type Instruction, transform it into p...
Definition: Local.cpp:2782
bool canSimplifyInvokeNoUnwind(const Function *F)
Value * simplifyInstruction(Instruction *I, const SimplifyQuery &Q)
See if we can compute a simplified version of this instruction.
bool any_of(R &&range, UnaryPredicate P)
Provide wrappers to std::any_of which take ranges instead of having to pass begin/end explicitly.
Definition: STLExtras.h:1729
bool isInstructionTriviallyDead(Instruction *I, const TargetLibraryInfo *TLI=nullptr)
Return true if the result produced by the instruction is not used, and the instruction will return.
Definition: Local.cpp:399
pred_iterator pred_begin(BasicBlock *BB)
Definition: CFG.h:110
bool TryToSimplifyUncondBranchFromEmptyBlock(BasicBlock *BB, DomTreeUpdater *DTU=nullptr)
BB is known to contain an unconditional branch, and contains no instructions other than PHI nodes,...
Definition: Local.cpp:1119
bool recognizeBSwapOrBitReverseIdiom(Instruction *I, bool MatchBSwaps, bool MatchBitReversals, SmallVectorImpl< Instruction * > &InsertedInsts)
Try to match a bswap or bitreverse idiom.
Definition: Local.cpp:3938
MDNode * getValidBranchWeightMDNode(const Instruction &I)
Get the valid branch weights metadata node.
Align getOrEnforceKnownAlignment(Value *V, MaybeAlign PrefAlign, const DataLayout &DL, const Instruction *CxtI=nullptr, AssumptionCache *AC=nullptr, const DominatorTree *DT=nullptr)
Try to ensure that the alignment of V is at least PrefAlign bytes.
Definition: Local.cpp:1542
bool wouldInstructionBeTriviallyDeadOnUnusedPaths(Instruction *I, const TargetLibraryInfo *TLI=nullptr)
Return true if the result produced by the instruction has no side effects on any paths other than whe...
Definition: Local.cpp:406
bool LowerDbgDeclare(Function &F)
Lowers llvm.dbg.declare intrinsics into appropriate set of llvm.dbg.value intrinsics.
Definition: Local.cpp:1915
bool NullPointerIsDefined(const Function *F, unsigned AS=0)
Check whether null pointer dereferencing is considered undefined behavior for a given function or an ...
Definition: Function.cpp:2043
DIExpression * getExpressionForConstant(DIBuilder &DIB, const Constant &C, Type &Ty)
Given a constant, create a debug information expression.
Definition: Local.cpp:3615
CallInst * createCallMatchingInvoke(InvokeInst *II)
Create a call that matches the invoke II in terms of arguments, attributes, debug information,...
Definition: Local.cpp:2872
raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
Definition: Debug.cpp:163
Instruction * removeUnwindEdge(BasicBlock *BB, DomTreeUpdater *DTU=nullptr)
Replace 'BB's terminator with one that does not have an unwind successor block.
Definition: Local.cpp:3159
bool wouldInstructionBeTriviallyDead(const Instruction *I, const TargetLibraryInfo *TLI=nullptr)
Return true if the result produced by the instruction would have no side effects if it was not used.
Definition: Local.cpp:418
void patchReplacementInstruction(Instruction *I, Value *Repl)
Patch the replacement so that it is not more restrictive than the value being replaced.
Definition: Local.cpp:3400
DebugLoc getDebugValueLoc(DbgVariableIntrinsic *DII)
Produce a DebugLoc to use for each dbg.declare that is promoted to a dbg.value.
Definition: DebugInfo.cpp:161
void ConvertDebugDeclareToDebugValue(DbgVariableIntrinsic *DII, StoreInst *SI, DIBuilder &Builder)
===------------------------------------------------------------------—===// Dbg Intrinsic utilities
Definition: Local.cpp:1690
unsigned replaceDominatedUsesWith(Value *From, Value *To, DominatorTree &DT, const BasicBlockEdge &Edge)
Replace each use of 'From' with 'To' if that use is dominated by the given edge.
Definition: Local.cpp:3466
void combineMetadata(Instruction *K, const Instruction *J, ArrayRef< unsigned > KnownIDs, bool DoesKMove)
Combine the metadata of two instructions so that K can replace J.
Definition: Local.cpp:3234
unsigned changeToUnreachable(Instruction *I, bool PreserveLCSSA=false, DomTreeUpdater *DTU=nullptr, MemorySSAUpdater *MSSAU=nullptr)
Insert an unreachable instruction before the specified instruction, making it and the rest of the cod...
Definition: Local.cpp:2832
bool replaceAllDbgUsesWith(Instruction &From, Value &To, Instruction &DomPoint, DominatorTree &DT)
Point debug users of From to To or salvage them.
Definition: Local.cpp:2710
Value * salvageDebugInfoImpl(Instruction &I, uint64_t CurrentLocOps, SmallVectorImpl< uint64_t > &Ops, SmallVectorImpl< Value * > &AdditionalValues)
Definition: Local.cpp:2540
RNSuccIterator< NodeRef, BlockT, RegionT > succ_begin(NodeRef Node)
void combineMetadataForCSE(Instruction *K, const Instruction *J, bool DoesKMove)
Combine the metadata of two instructions so that K can replace J.
Definition: Local.cpp:3327
void dropDebugUsers(Instruction &I)
Remove the debug intrinsic instructions for the given instruction.
Definition: Local.cpp:3562
@ First
Helpers to iterate all locations in the MemoryEffectsBase class.
void MergeBasicBlockIntoOnlyPred(BasicBlock *BB, DomTreeUpdater *DTU=nullptr)
BB is a block with one predecessor and its predecessor is known to have one successor (BB!...
Definition: Local.cpp:770
bool replaceDbgUsesWithUndef(Instruction *I)
Replace all the uses of an SSA value in @llvm.dbg intrinsics with undef.
Definition: Local.cpp:616
void hoistAllInstructionsInto(BasicBlock *DomBlock, Instruction *InsertPt, BasicBlock *BB)
Hoist all of the instructions in the IfBlock to the dominant block DomBlock, by moving its instructio...
Definition: Local.cpp:3572
void copyRangeMetadata(const DataLayout &DL, const LoadInst &OldLI, MDNode *N, LoadInst &NewLI)
Copy a range metadata node to a new load instruction.
Definition: Local.cpp:3538
void copyNonnullMetadata(const LoadInst &OldLI, MDNode *N, LoadInst &NewLI)
Copy a nonnull metadata node to a new load instruction.
Definition: Local.cpp:3513
void computeKnownBits(const Value *V, KnownBits &Known, const DataLayout &DL, unsigned Depth=0, AssumptionCache *AC=nullptr, const Instruction *CxtI=nullptr, const DominatorTree *DT=nullptr, bool UseInstrInfo=true)
Determine which bits of V are known to be either zero or one and return them in the KnownZero/KnownOn...
bool canReplaceOperandWithVariable(const Instruction *I, unsigned OpIdx)
Given an instruction, is it legal to set operand OpIdx to a non-constant value?
Definition: Local.cpp:4050
void replaceDbgValueForAlloca(AllocaInst *AI, Value *NewAllocaAddress, DIBuilder &Builder, int Offset=0)
Replaces multiple llvm.dbg.value instructions when the alloca it describes is replaced with a new val...
Definition: Local.cpp:2180
Align tryEnforceAlignment(Value *V, Align PrefAlign, const DataLayout &DL)
If the specified pointer points to an object that we control, try to modify the object's alignment to...
Definition: Local.cpp:1494
Value * getFreedOperand(const CallBase *CB, const TargetLibraryInfo *TLI)
If this if a call to a free function, return the freed operand.
bool RecursivelyDeleteTriviallyDeadInstructionsPermissive(SmallVectorImpl< WeakTrackingVH > &DeadInsts, const TargetLibraryInfo *TLI=nullptr, MemorySSAUpdater *MSSAU=nullptr, std::function< void(Value *)> AboutToDeleteCallback=std::function< void(Value *)>())
Same functionality as RecursivelyDeleteTriviallyDeadInstructions, but allow instructions that are not...
Definition: Local.cpp:554
constexpr unsigned BitWidth
Definition: BitmaskEnum.h:191
bool extractBranchWeights(const MDNode *ProfileData, SmallVectorImpl< uint32_t > &Weights)
Extract branch weights from MD_prof metadata.
auto count_if(R &&Range, UnaryPredicate P)
Wrapper function around std::count_if to count the number of times an element satisfying a given pred...
Definition: STLExtras.h:1921
BasicBlock * SplitBlock(BasicBlock *Old, BasicBlock::iterator SplitPt, DominatorTree *DT, LoopInfo *LI=nullptr, MemorySSAUpdater *MSSAU=nullptr, const Twine &BBName="", bool Before=false)
Split the specified block at the specified instruction.
gep_type_iterator gep_type_begin(const User *GEP)
TinyPtrVector< DbgVariableRecord * > findDVRDeclares(Value *V)
As above, for DVRDeclares.
Definition: DebugInfo.cpp:66
auto predecessors(const MachineBasicBlock *BB)
bool is_contained(R &&Range, const E &Element)
Returns true if Element is found in Range.
Definition: STLExtras.h:1879
bool RecursivelyDeleteDeadPHINode(PHINode *PN, const TargetLibraryInfo *TLI=nullptr, MemorySSAUpdater *MSSAU=nullptr)
If the specified value is an effectively dead PHI node, due to being a def-use chain of single-use no...
Definition: Local.cpp:650
bool inferAttributesFromOthers(Function &F)
If we can infer one attribute from another on the declaration of a function, explicitly materialize t...
Definition: Local.cpp:4157
Value * invertCondition(Value *Condition)
Invert the given true/false value, possibly reusing an existing copy.
Definition: Local.cpp:4123
hash_code hash_combine(const Ts &...args)
Combine values into a single hash_code.
Definition: Hashing.h:613
void DeleteDeadBlocks(ArrayRef< BasicBlock * > BBs, DomTreeUpdater *DTU=nullptr, bool KeepOneInputPHIs=false)
Delete the specified blocks from BB.
void maybeMarkSanitizerLibraryCallNoBuiltin(CallInst *CI, const TargetLibraryInfo *TLI)
Given a CallInst, check if it calls a string function known to CodeGen, and mark it with NoBuiltin if...
Definition: Local.cpp:4040
unsigned pred_size(const MachineBasicBlock *BB)
static auto filterDbgVars(iterator_range< simple_ilist< DbgRecord >::iterator > R)
Filter the DbgRecord range to DbgVariableRecord types only and downcast.
bool removeUnreachableBlocks(Function &F, DomTreeUpdater *DTU=nullptr, MemorySSAUpdater *MSSAU=nullptr)
Remove all blocks that can not be reached from the function's entry.
Definition: Local.cpp:3197
bool EliminateDuplicatePHINodes(BasicBlock *BB)
Check for and eliminate duplicate PHI nodes in this block.
Definition: Local.cpp:1486
bool callsGCLeafFunction(const CallBase *Call, const TargetLibraryInfo &TLI)
Return true if this call calls a gc leaf function.
Definition: Local.cpp:3484
hash_code hash_combine_range(InputIteratorT first, InputIteratorT last)
Compute a hash_code for a sequence of values.
Definition: Hashing.h:491
bool replaceDbgDeclare(Value *Address, Value *NewAddress, DIBuilder &Builder, uint8_t DIExprFlags, int Offset)
Replaces llvm.dbg.declare instruction when the address it describes is replaced with a new value.
Definition: Local.cpp:2131
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.
Definition: BitVector.h:860
#define N
#define NDEBUG
Definition: regutils.h:48
This struct is a compact representation of a valid (non-zero power of two) alignment.
Definition: Alignment.h:39
An information struct used to provide DenseMap with the various necessary components for a given valu...
Definition: DenseMapInfo.h:50
unsigned countMinTrailingZeros() const
Returns the minimum number of trailing zero bits.
Definition: KnownBits.h:238
unsigned getBitWidth() const
Get the bit width of this value.
Definition: KnownBits.h:40
This struct is a compact representation of a valid (power of two) or undefined (0) alignment.
Definition: Alignment.h:117