LLVM 20.0.0git
Local.cpp
Go to the documentation of this file.
1//===- Local.cpp - Functions to perform local transformations -------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This family of functions perform various local transformations to the
10// program.
11//
12//===----------------------------------------------------------------------===//
13
15#include "llvm/ADT/APInt.h"
16#include "llvm/ADT/DenseMap.h"
18#include "llvm/ADT/DenseSet.h"
19#include "llvm/ADT/Hashing.h"
20#include "llvm/ADT/STLExtras.h"
21#include "llvm/ADT/SetVector.h"
24#include "llvm/ADT/Statistic.h"
35#include "llvm/IR/Argument.h"
36#include "llvm/IR/Attributes.h"
37#include "llvm/IR/BasicBlock.h"
38#include "llvm/IR/CFG.h"
39#include "llvm/IR/Constant.h"
41#include "llvm/IR/Constants.h"
42#include "llvm/IR/DIBuilder.h"
43#include "llvm/IR/DataLayout.h"
44#include "llvm/IR/DebugInfo.h"
46#include "llvm/IR/DebugLoc.h"
48#include "llvm/IR/Dominators.h"
50#include "llvm/IR/Function.h"
53#include "llvm/IR/IRBuilder.h"
54#include "llvm/IR/InstrTypes.h"
55#include "llvm/IR/Instruction.h"
58#include "llvm/IR/Intrinsics.h"
59#include "llvm/IR/IntrinsicsWebAssembly.h"
60#include "llvm/IR/LLVMContext.h"
61#include "llvm/IR/MDBuilder.h"
63#include "llvm/IR/Metadata.h"
64#include "llvm/IR/Module.h"
67#include "llvm/IR/Type.h"
68#include "llvm/IR/Use.h"
69#include "llvm/IR/User.h"
70#include "llvm/IR/Value.h"
71#include "llvm/IR/ValueHandle.h"
74#include "llvm/Support/Debug.h"
80#include <algorithm>
81#include <cassert>
82#include <cstdint>
83#include <iterator>
84#include <map>
85#include <optional>
86#include <utility>
87
88using namespace llvm;
89using namespace llvm::PatternMatch;
90
92
93#define DEBUG_TYPE "local"
94
95STATISTIC(NumRemoved, "Number of unreachable basic blocks removed");
96STATISTIC(NumPHICSEs, "Number of PHI's that got CSE'd");
97
99 "phicse-debug-hash",
100#ifdef EXPENSIVE_CHECKS
101 cl::init(true),
102#else
103 cl::init(false),
104#endif
106 cl::desc("Perform extra assertion checking to verify that PHINodes's hash "
107 "function is well-behaved w.r.t. its isEqual predicate"));
108
110 "phicse-num-phi-smallsize", cl::init(32), cl::Hidden,
111 cl::desc(
112 "When the basic block contains not more than this number of PHI nodes, "
113 "perform a (faster!) exhaustive search instead of set-driven one."));
114
115// Max recursion depth for collectBitParts used when detecting bswap and
116// bitreverse idioms.
117static const unsigned BitPartRecursionMaxDepth = 48;
118
119//===----------------------------------------------------------------------===//
120// Local constant propagation.
121//
122
123/// ConstantFoldTerminator - If a terminator instruction is predicated on a
124/// constant value, convert it into an unconditional branch to the constant
125/// destination. This is a nontrivial operation because the successors of this
126/// basic block must have their PHI nodes updated.
127/// Also calls RecursivelyDeleteTriviallyDeadInstructions() on any branch/switch
128/// conditions and indirectbr addresses this might make dead if
129/// DeleteDeadConditions is true.
130bool llvm::ConstantFoldTerminator(BasicBlock *BB, bool DeleteDeadConditions,
131 const TargetLibraryInfo *TLI,
132 DomTreeUpdater *DTU) {
133 Instruction *T = BB->getTerminator();
134 IRBuilder<> Builder(T);
135
136 // Branch - See if we are conditional jumping on constant
137 if (auto *BI = dyn_cast<BranchInst>(T)) {
138 if (BI->isUnconditional()) return false; // Can't optimize uncond branch
139
140 BasicBlock *Dest1 = BI->getSuccessor(0);
141 BasicBlock *Dest2 = BI->getSuccessor(1);
142
143 if (Dest2 == Dest1) { // Conditional branch to same location?
144 // This branch matches something like this:
145 // br bool %cond, label %Dest, label %Dest
146 // and changes it into: br label %Dest
147
148 // Let the basic block know that we are letting go of one copy of it.
149 assert(BI->getParent() && "Terminator not inserted in block!");
150 Dest1->removePredecessor(BI->getParent());
151
152 // Replace the conditional branch with an unconditional one.
153 BranchInst *NewBI = Builder.CreateBr(Dest1);
154
155 // Transfer the metadata to the new branch instruction.
156 NewBI->copyMetadata(*BI, {LLVMContext::MD_loop, LLVMContext::MD_dbg,
157 LLVMContext::MD_annotation});
158
159 Value *Cond = BI->getCondition();
160 BI->eraseFromParent();
161 if (DeleteDeadConditions)
163 return true;
164 }
165
166 if (auto *Cond = dyn_cast<ConstantInt>(BI->getCondition())) {
167 // Are we branching on constant?
168 // YES. Change to unconditional branch...
169 BasicBlock *Destination = Cond->getZExtValue() ? Dest1 : Dest2;
170 BasicBlock *OldDest = Cond->getZExtValue() ? Dest2 : Dest1;
171
172 // Let the basic block know that we are letting go of it. Based on this,
173 // it will adjust it's PHI nodes.
174 OldDest->removePredecessor(BB);
175
176 // Replace the conditional branch with an unconditional one.
177 BranchInst *NewBI = Builder.CreateBr(Destination);
178
179 // Transfer the metadata to the new branch instruction.
180 NewBI->copyMetadata(*BI, {LLVMContext::MD_loop, LLVMContext::MD_dbg,
181 LLVMContext::MD_annotation});
182
183 BI->eraseFromParent();
184 if (DTU)
185 DTU->applyUpdates({{DominatorTree::Delete, BB, OldDest}});
186 return true;
187 }
188
189 return false;
190 }
191
192 if (auto *SI = dyn_cast<SwitchInst>(T)) {
193 // If we are switching on a constant, we can convert the switch to an
194 // unconditional branch.
195 auto *CI = dyn_cast<ConstantInt>(SI->getCondition());
196 BasicBlock *DefaultDest = SI->getDefaultDest();
197 BasicBlock *TheOnlyDest = DefaultDest;
198
199 // If the default is unreachable, ignore it when searching for TheOnlyDest.
200 if (isa<UnreachableInst>(DefaultDest->getFirstNonPHIOrDbg()) &&
201 SI->getNumCases() > 0) {
202 TheOnlyDest = SI->case_begin()->getCaseSuccessor();
203 }
204
205 bool Changed = false;
206
207 // Figure out which case it goes to.
208 for (auto It = SI->case_begin(), End = SI->case_end(); It != End;) {
209 // Found case matching a constant operand?
210 if (It->getCaseValue() == CI) {
211 TheOnlyDest = It->getCaseSuccessor();
212 break;
213 }
214
215 // Check to see if this branch is going to the same place as the default
216 // dest. If so, eliminate it as an explicit compare.
217 if (It->getCaseSuccessor() == DefaultDest) {
219 unsigned NCases = SI->getNumCases();
220 // Fold the case metadata into the default if there will be any branches
221 // left, unless the metadata doesn't match the switch.
222 if (NCases > 1 && MD) {
223 // Collect branch weights into a vector.
225 extractBranchWeights(MD, Weights);
226
227 // Merge weight of this case to the default weight.
228 unsigned Idx = It->getCaseIndex();
229 // TODO: Add overflow check.
230 Weights[0] += Weights[Idx + 1];
231 // Remove weight for this case.
232 std::swap(Weights[Idx + 1], Weights.back());
233 Weights.pop_back();
234 setBranchWeights(*SI, Weights, hasBranchWeightOrigin(MD));
235 }
236 // Remove this entry.
237 BasicBlock *ParentBB = SI->getParent();
238 DefaultDest->removePredecessor(ParentBB);
239 It = SI->removeCase(It);
240 End = SI->case_end();
241
242 // Removing this case may have made the condition constant. In that
243 // case, update CI and restart iteration through the cases.
244 if (auto *NewCI = dyn_cast<ConstantInt>(SI->getCondition())) {
245 CI = NewCI;
246 It = SI->case_begin();
247 }
248
249 Changed = true;
250 continue;
251 }
252
253 // Otherwise, check to see if the switch only branches to one destination.
254 // We do this by reseting "TheOnlyDest" to null when we find two non-equal
255 // destinations.
256 if (It->getCaseSuccessor() != TheOnlyDest)
257 TheOnlyDest = nullptr;
258
259 // Increment this iterator as we haven't removed the case.
260 ++It;
261 }
262
263 if (CI && !TheOnlyDest) {
264 // Branching on a constant, but not any of the cases, go to the default
265 // successor.
266 TheOnlyDest = SI->getDefaultDest();
267 }
268
269 // If we found a single destination that we can fold the switch into, do so
270 // now.
271 if (TheOnlyDest) {
272 // Insert the new branch.
273 Builder.CreateBr(TheOnlyDest);
274 BasicBlock *BB = SI->getParent();
275
276 SmallSet<BasicBlock *, 8> RemovedSuccessors;
277
278 // Remove entries from PHI nodes which we no longer branch to...
279 BasicBlock *SuccToKeep = TheOnlyDest;
280 for (BasicBlock *Succ : successors(SI)) {
281 if (DTU && Succ != TheOnlyDest)
282 RemovedSuccessors.insert(Succ);
283 // Found case matching a constant operand?
284 if (Succ == SuccToKeep) {
285 SuccToKeep = nullptr; // Don't modify the first branch to TheOnlyDest
286 } else {
287 Succ->removePredecessor(BB);
288 }
289 }
290
291 // Delete the old switch.
292 Value *Cond = SI->getCondition();
293 SI->eraseFromParent();
294 if (DeleteDeadConditions)
296 if (DTU) {
297 std::vector<DominatorTree::UpdateType> Updates;
298 Updates.reserve(RemovedSuccessors.size());
299 for (auto *RemovedSuccessor : RemovedSuccessors)
300 Updates.push_back({DominatorTree::Delete, BB, RemovedSuccessor});
301 DTU->applyUpdates(Updates);
302 }
303 return true;
304 }
305
306 if (SI->getNumCases() == 1) {
307 // Otherwise, we can fold this switch into a conditional branch
308 // instruction if it has only one non-default destination.
309 auto FirstCase = *SI->case_begin();
310 Value *Cond = Builder.CreateICmpEQ(SI->getCondition(),
311 FirstCase.getCaseValue(), "cond");
312
313 // Insert the new branch.
314 BranchInst *NewBr = Builder.CreateCondBr(Cond,
315 FirstCase.getCaseSuccessor(),
316 SI->getDefaultDest());
317 SmallVector<uint32_t> Weights;
318 if (extractBranchWeights(*SI, Weights) && Weights.size() == 2) {
319 uint32_t DefWeight = Weights[0];
320 uint32_t CaseWeight = Weights[1];
321 // The TrueWeight should be the weight for the single case of SI.
322 NewBr->setMetadata(LLVMContext::MD_prof,
323 MDBuilder(BB->getContext())
324 .createBranchWeights(CaseWeight, DefWeight));
325 }
326
327 // Update make.implicit metadata to the newly-created conditional branch.
328 MDNode *MakeImplicitMD = SI->getMetadata(LLVMContext::MD_make_implicit);
329 if (MakeImplicitMD)
330 NewBr->setMetadata(LLVMContext::MD_make_implicit, MakeImplicitMD);
331
332 // Delete the old switch.
333 SI->eraseFromParent();
334 return true;
335 }
336 return Changed;
337 }
338
339 if (auto *IBI = dyn_cast<IndirectBrInst>(T)) {
340 // indirectbr blockaddress(@F, @BB) -> br label @BB
341 if (auto *BA =
342 dyn_cast<BlockAddress>(IBI->getAddress()->stripPointerCasts())) {
343 BasicBlock *TheOnlyDest = BA->getBasicBlock();
344 SmallSet<BasicBlock *, 8> RemovedSuccessors;
345
346 // Insert the new branch.
347 Builder.CreateBr(TheOnlyDest);
348
349 BasicBlock *SuccToKeep = TheOnlyDest;
350 for (unsigned i = 0, e = IBI->getNumDestinations(); i != e; ++i) {
351 BasicBlock *DestBB = IBI->getDestination(i);
352 if (DTU && DestBB != TheOnlyDest)
353 RemovedSuccessors.insert(DestBB);
354 if (IBI->getDestination(i) == SuccToKeep) {
355 SuccToKeep = nullptr;
356 } else {
357 DestBB->removePredecessor(BB);
358 }
359 }
360 Value *Address = IBI->getAddress();
361 IBI->eraseFromParent();
362 if (DeleteDeadConditions)
363 // Delete pointer cast instructions.
365
366 // Also zap the blockaddress constant if there are no users remaining,
367 // otherwise the destination is still marked as having its address taken.
368 if (BA->use_empty())
369 BA->destroyConstant();
370
371 // If we didn't find our destination in the IBI successor list, then we
372 // have undefined behavior. Replace the unconditional branch with an
373 // 'unreachable' instruction.
374 if (SuccToKeep) {
376 new UnreachableInst(BB->getContext(), BB);
377 }
378
379 if (DTU) {
380 std::vector<DominatorTree::UpdateType> Updates;
381 Updates.reserve(RemovedSuccessors.size());
382 for (auto *RemovedSuccessor : RemovedSuccessors)
383 Updates.push_back({DominatorTree::Delete, BB, RemovedSuccessor});
384 DTU->applyUpdates(Updates);
385 }
386 return true;
387 }
388 }
389
390 return false;
391}
392
393//===----------------------------------------------------------------------===//
394// Local dead code elimination.
395//
396
397/// isInstructionTriviallyDead - Return true if the result produced by the
398/// instruction is not used, and the instruction has no side effects.
399///
401 const TargetLibraryInfo *TLI) {
402 if (!I->use_empty())
403 return false;
405}
406
408 Instruction *I, const TargetLibraryInfo *TLI) {
409 // Instructions that are "markers" and have implied meaning on code around
410 // them (without explicit uses), are not dead on unused paths.
411 if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(I))
412 if (II->getIntrinsicID() == Intrinsic::stacksave ||
413 II->getIntrinsicID() == Intrinsic::launder_invariant_group ||
414 II->isLifetimeStartOrEnd())
415 return false;
417}
418
420 const TargetLibraryInfo *TLI) {
421 if (I->isTerminator())
422 return false;
423
424 // We don't want the landingpad-like instructions removed by anything this
425 // general.
426 if (I->isEHPad())
427 return false;
428
429 // We don't want debug info removed by anything this general.
430 if (isa<DbgVariableIntrinsic>(I))
431 return false;
432
433 if (const DbgLabelInst *DLI = dyn_cast<DbgLabelInst>(I)) {
434 if (DLI->getLabel())
435 return false;
436 return true;
437 }
438
439 if (auto *CB = dyn_cast<CallBase>(I))
440 if (isRemovableAlloc(CB, TLI))
441 return true;
442
443 if (!I->willReturn()) {
444 auto *II = dyn_cast<IntrinsicInst>(I);
445 if (!II)
446 return false;
447
448 switch (II->getIntrinsicID()) {
449 case Intrinsic::experimental_guard: {
450 // Guards on true are operationally no-ops. In the future we can
451 // consider more sophisticated tradeoffs for guards considering potential
452 // for check widening, but for now we keep things simple.
453 auto *Cond = dyn_cast<ConstantInt>(II->getArgOperand(0));
454 return Cond && Cond->isOne();
455 }
456 // TODO: These intrinsics are not safe to remove, because this may remove
457 // a well-defined trap.
458 case Intrinsic::wasm_trunc_signed:
459 case Intrinsic::wasm_trunc_unsigned:
460 case Intrinsic::ptrauth_auth:
461 case Intrinsic::ptrauth_resign:
462 return true;
463 default:
464 return false;
465 }
466 }
467
468 if (!I->mayHaveSideEffects())
469 return true;
470
471 // Special case intrinsics that "may have side effects" but can be deleted
472 // when dead.
473 if (const IntrinsicInst *II = dyn_cast<IntrinsicInst>(I)) {
474 // Safe to delete llvm.stacksave and launder.invariant.group if dead.
475 if (II->getIntrinsicID() == Intrinsic::stacksave ||
476 II->getIntrinsicID() == Intrinsic::launder_invariant_group)
477 return true;
478
479 // Intrinsics declare sideeffects to prevent them from moving, but they are
480 // nops without users.
481 if (II->getIntrinsicID() == Intrinsic::allow_runtime_check ||
482 II->getIntrinsicID() == Intrinsic::allow_ubsan_check)
483 return true;
484
485 if (II->isLifetimeStartOrEnd()) {
486 auto *Arg = II->getArgOperand(1);
487 // Lifetime intrinsics are dead when their right-hand is undef.
488 if (isa<UndefValue>(Arg))
489 return true;
490 // If the right-hand is an alloc, global, or argument and the only uses
491 // are lifetime intrinsics then the intrinsics are dead.
492 if (isa<AllocaInst>(Arg) || isa<GlobalValue>(Arg) || isa<Argument>(Arg))
493 return llvm::all_of(Arg->uses(), [](Use &Use) {
494 if (IntrinsicInst *IntrinsicUse =
495 dyn_cast<IntrinsicInst>(Use.getUser()))
496 return IntrinsicUse->isLifetimeStartOrEnd();
497 return false;
498 });
499 return false;
500 }
501
502 // Assumptions are dead if their condition is trivially true.
503 if (II->getIntrinsicID() == Intrinsic::assume &&
504 isAssumeWithEmptyBundle(cast<AssumeInst>(*II))) {
505 if (ConstantInt *Cond = dyn_cast<ConstantInt>(II->getArgOperand(0)))
506 return !Cond->isZero();
507
508 return false;
509 }
510
511 if (auto *FPI = dyn_cast<ConstrainedFPIntrinsic>(I)) {
512 std::optional<fp::ExceptionBehavior> ExBehavior =
513 FPI->getExceptionBehavior();
514 return *ExBehavior != fp::ebStrict;
515 }
516 }
517
518 if (auto *Call = dyn_cast<CallBase>(I)) {
519 if (Value *FreedOp = getFreedOperand(Call, TLI))
520 if (Constant *C = dyn_cast<Constant>(FreedOp))
521 return C->isNullValue() || isa<UndefValue>(C);
522 if (isMathLibCallNoop(Call, TLI))
523 return true;
524 }
525
526 // Non-volatile atomic loads from constants can be removed.
527 if (auto *LI = dyn_cast<LoadInst>(I))
528 if (auto *GV = dyn_cast<GlobalVariable>(
529 LI->getPointerOperand()->stripPointerCasts()))
530 if (!LI->isVolatile() && GV->isConstant())
531 return true;
532
533 return false;
534}
535
536/// RecursivelyDeleteTriviallyDeadInstructions - If the specified value is a
537/// trivially dead instruction, delete it. If that makes any of its operands
538/// trivially dead, delete them too, recursively. Return true if any
539/// instructions were deleted.
541 Value *V, const TargetLibraryInfo *TLI, MemorySSAUpdater *MSSAU,
542 std::function<void(Value *)> AboutToDeleteCallback) {
543 Instruction *I = dyn_cast<Instruction>(V);
544 if (!I || !isInstructionTriviallyDead(I, TLI))
545 return false;
546
548 DeadInsts.push_back(I);
549 RecursivelyDeleteTriviallyDeadInstructions(DeadInsts, TLI, MSSAU,
550 AboutToDeleteCallback);
551
552 return true;
553}
554
557 MemorySSAUpdater *MSSAU,
558 std::function<void(Value *)> AboutToDeleteCallback) {
559 unsigned S = 0, E = DeadInsts.size(), Alive = 0;
560 for (; S != E; ++S) {
561 auto *I = dyn_cast_or_null<Instruction>(DeadInsts[S]);
562 if (!I || !isInstructionTriviallyDead(I)) {
563 DeadInsts[S] = nullptr;
564 ++Alive;
565 }
566 }
567 if (Alive == E)
568 return false;
569 RecursivelyDeleteTriviallyDeadInstructions(DeadInsts, TLI, MSSAU,
570 AboutToDeleteCallback);
571 return true;
572}
573
576 MemorySSAUpdater *MSSAU,
577 std::function<void(Value *)> AboutToDeleteCallback) {
578 // Process the dead instruction list until empty.
579 while (!DeadInsts.empty()) {
580 Value *V = DeadInsts.pop_back_val();
581 Instruction *I = cast_or_null<Instruction>(V);
582 if (!I)
583 continue;
585 "Live instruction found in dead worklist!");
586 assert(I->use_empty() && "Instructions with uses are not dead.");
587
588 // Don't lose the debug info while deleting the instructions.
590
591 if (AboutToDeleteCallback)
592 AboutToDeleteCallback(I);
593
594 // Null out all of the instruction's operands to see if any operand becomes
595 // dead as we go.
596 for (Use &OpU : I->operands()) {
597 Value *OpV = OpU.get();
598 OpU.set(nullptr);
599
600 if (!OpV->use_empty())
601 continue;
602
603 // If the operand is an instruction that became dead as we nulled out the
604 // operand, and if it is 'trivially' dead, delete it in a future loop
605 // iteration.
606 if (Instruction *OpI = dyn_cast<Instruction>(OpV))
607 if (isInstructionTriviallyDead(OpI, TLI))
608 DeadInsts.push_back(OpI);
609 }
610 if (MSSAU)
611 MSSAU->removeMemoryAccess(I);
612
613 I->eraseFromParent();
614 }
615}
616
620 findDbgUsers(DbgUsers, I, &DPUsers);
621 for (auto *DII : DbgUsers)
622 DII->setKillLocation();
623 for (auto *DVR : DPUsers)
624 DVR->setKillLocation();
625 return !DbgUsers.empty() || !DPUsers.empty();
626}
627
628/// areAllUsesEqual - Check whether the uses of a value are all the same.
629/// This is similar to Instruction::hasOneUse() except this will also return
630/// true when there are no uses or multiple uses that all refer to the same
631/// value.
633 Value::user_iterator UI = I->user_begin();
634 Value::user_iterator UE = I->user_end();
635 if (UI == UE)
636 return true;
637
638 User *TheUse = *UI;
639 for (++UI; UI != UE; ++UI) {
640 if (*UI != TheUse)
641 return false;
642 }
643 return true;
644}
645
646/// RecursivelyDeleteDeadPHINode - If the specified value is an effectively
647/// dead PHI node, due to being a def-use chain of single-use nodes that
648/// either forms a cycle or is terminated by a trivially dead instruction,
649/// delete it. If that makes any of its operands trivially dead, delete them
650/// too, recursively. Return true if a change was made.
652 const TargetLibraryInfo *TLI,
653 llvm::MemorySSAUpdater *MSSAU) {
655 for (Instruction *I = PN; areAllUsesEqual(I) && !I->mayHaveSideEffects();
656 I = cast<Instruction>(*I->user_begin())) {
657 if (I->use_empty())
659
660 // If we find an instruction more than once, we're on a cycle that
661 // won't prove fruitful.
662 if (!Visited.insert(I).second) {
663 // Break the cycle and delete the instruction and its operands.
664 I->replaceAllUsesWith(PoisonValue::get(I->getType()));
666 return true;
667 }
668 }
669 return false;
670}
671
672static bool
675 const DataLayout &DL,
676 const TargetLibraryInfo *TLI) {
677 if (isInstructionTriviallyDead(I, TLI)) {
679
680 // Null out all of the instruction's operands to see if any operand becomes
681 // dead as we go.
682 for (unsigned i = 0, e = I->getNumOperands(); i != e; ++i) {
683 Value *OpV = I->getOperand(i);
684 I->setOperand(i, nullptr);
685
686 if (!OpV->use_empty() || I == OpV)
687 continue;
688
689 // If the operand is an instruction that became dead as we nulled out the
690 // operand, and if it is 'trivially' dead, delete it in a future loop
691 // iteration.
692 if (Instruction *OpI = dyn_cast<Instruction>(OpV))
693 if (isInstructionTriviallyDead(OpI, TLI))
694 WorkList.insert(OpI);
695 }
696
697 I->eraseFromParent();
698
699 return true;
700 }
701
702 if (Value *SimpleV = simplifyInstruction(I, DL)) {
703 // Add the users to the worklist. CAREFUL: an instruction can use itself,
704 // in the case of a phi node.
705 for (User *U : I->users()) {
706 if (U != I) {
707 WorkList.insert(cast<Instruction>(U));
708 }
709 }
710
711 // Replace the instruction with its simplified value.
712 bool Changed = false;
713 if (!I->use_empty()) {
714 I->replaceAllUsesWith(SimpleV);
715 Changed = true;
716 }
717 if (isInstructionTriviallyDead(I, TLI)) {
718 I->eraseFromParent();
719 Changed = true;
720 }
721 return Changed;
722 }
723 return false;
724}
725
726/// SimplifyInstructionsInBlock - Scan the specified basic block and try to
727/// simplify any instructions in it and recursively delete dead instructions.
728///
729/// This returns true if it changed the code, note that it can delete
730/// instructions in other blocks as well in this block.
732 const TargetLibraryInfo *TLI) {
733 bool MadeChange = false;
734 const DataLayout &DL = BB->getDataLayout();
735
736#ifndef NDEBUG
737 // In debug builds, ensure that the terminator of the block is never replaced
738 // or deleted by these simplifications. The idea of simplification is that it
739 // cannot introduce new instructions, and there is no way to replace the
740 // terminator of a block without introducing a new instruction.
741 AssertingVH<Instruction> TerminatorVH(&BB->back());
742#endif
743
745 // Iterate over the original function, only adding insts to the worklist
746 // if they actually need to be revisited. This avoids having to pre-init
747 // the worklist with the entire function's worth of instructions.
748 for (BasicBlock::iterator BI = BB->begin(), E = std::prev(BB->end());
749 BI != E;) {
750 assert(!BI->isTerminator());
751 Instruction *I = &*BI;
752 ++BI;
753
754 // We're visiting this instruction now, so make sure it's not in the
755 // worklist from an earlier visit.
756 if (!WorkList.count(I))
757 MadeChange |= simplifyAndDCEInstruction(I, WorkList, DL, TLI);
758 }
759
760 while (!WorkList.empty()) {
761 Instruction *I = WorkList.pop_back_val();
762 MadeChange |= simplifyAndDCEInstruction(I, WorkList, DL, TLI);
763 }
764 return MadeChange;
765}
766
767//===----------------------------------------------------------------------===//
768// Control Flow Graph Restructuring.
769//
770
772 DomTreeUpdater *DTU) {
773
774 // If BB has single-entry PHI nodes, fold them.
775 while (PHINode *PN = dyn_cast<PHINode>(DestBB->begin())) {
776 Value *NewVal = PN->getIncomingValue(0);
777 // Replace self referencing PHI with poison, it must be dead.
778 if (NewVal == PN) NewVal = PoisonValue::get(PN->getType());
779 PN->replaceAllUsesWith(NewVal);
780 PN->eraseFromParent();
781 }
782
783 BasicBlock *PredBB = DestBB->getSinglePredecessor();
784 assert(PredBB && "Block doesn't have a single predecessor!");
785
786 bool ReplaceEntryBB = PredBB->isEntryBlock();
787
788 // DTU updates: Collect all the edges that enter
789 // PredBB. These dominator edges will be redirected to DestBB.
791
792 if (DTU) {
793 // To avoid processing the same predecessor more than once.
795 Updates.reserve(Updates.size() + 2 * pred_size(PredBB) + 1);
796 for (BasicBlock *PredOfPredBB : predecessors(PredBB))
797 // This predecessor of PredBB may already have DestBB as a successor.
798 if (PredOfPredBB != PredBB)
799 if (SeenPreds.insert(PredOfPredBB).second)
800 Updates.push_back({DominatorTree::Insert, PredOfPredBB, DestBB});
801 SeenPreds.clear();
802 for (BasicBlock *PredOfPredBB : predecessors(PredBB))
803 if (SeenPreds.insert(PredOfPredBB).second)
804 Updates.push_back({DominatorTree::Delete, PredOfPredBB, PredBB});
805 Updates.push_back({DominatorTree::Delete, PredBB, DestBB});
806 }
807
808 // Zap anything that took the address of DestBB. Not doing this will give the
809 // address an invalid value.
810 if (DestBB->hasAddressTaken()) {
811 BlockAddress *BA = BlockAddress::get(DestBB);
812 Constant *Replacement =
813 ConstantInt::get(Type::getInt32Ty(BA->getContext()), 1);
815 BA->getType()));
816 BA->destroyConstant();
817 }
818
819 // Anything that branched to PredBB now branches to DestBB.
820 PredBB->replaceAllUsesWith(DestBB);
821
822 // Splice all the instructions from PredBB to DestBB.
823 PredBB->getTerminator()->eraseFromParent();
824 DestBB->splice(DestBB->begin(), PredBB);
825 new UnreachableInst(PredBB->getContext(), PredBB);
826
827 // If the PredBB is the entry block of the function, move DestBB up to
828 // become the entry block after we erase PredBB.
829 if (ReplaceEntryBB)
830 DestBB->moveAfter(PredBB);
831
832 if (DTU) {
833 assert(PredBB->size() == 1 &&
834 isa<UnreachableInst>(PredBB->getTerminator()) &&
835 "The successor list of PredBB isn't empty before "
836 "applying corresponding DTU updates.");
837 DTU->applyUpdatesPermissive(Updates);
838 DTU->deleteBB(PredBB);
839 // Recalculation of DomTree is needed when updating a forward DomTree and
840 // the Entry BB is replaced.
841 if (ReplaceEntryBB && DTU->hasDomTree()) {
842 // The entry block was removed and there is no external interface for
843 // the dominator tree to be notified of this change. In this corner-case
844 // we recalculate the entire tree.
845 DTU->recalculate(*(DestBB->getParent()));
846 }
847 }
848
849 else {
850 PredBB->eraseFromParent(); // Nuke BB if DTU is nullptr.
851 }
852}
853
854/// Return true if we can choose one of these values to use in place of the
855/// other. Note that we will always choose the non-undef value to keep.
856static bool CanMergeValues(Value *First, Value *Second) {
857 return First == Second || isa<UndefValue>(First) || isa<UndefValue>(Second);
858}
859
860/// Return true if we can fold BB, an almost-empty BB ending in an unconditional
861/// branch to Succ, into Succ.
862///
863/// Assumption: Succ is the single successor for BB.
864static bool
866 const SmallPtrSetImpl<BasicBlock *> &BBPreds) {
867 assert(*succ_begin(BB) == Succ && "Succ is not successor of BB!");
868
869 LLVM_DEBUG(dbgs() << "Looking to fold " << BB->getName() << " into "
870 << Succ->getName() << "\n");
871 // Shortcut, if there is only a single predecessor it must be BB and merging
872 // is always safe
873 if (Succ->getSinglePredecessor())
874 return true;
875
876 // Look at all the phi nodes in Succ, to see if they present a conflict when
877 // merging these blocks
878 for (BasicBlock::iterator I = Succ->begin(); isa<PHINode>(I); ++I) {
879 PHINode *PN = cast<PHINode>(I);
880
881 // If the incoming value from BB is again a PHINode in
882 // BB which has the same incoming value for *PI as PN does, we can
883 // merge the phi nodes and then the blocks can still be merged
884 PHINode *BBPN = dyn_cast<PHINode>(PN->getIncomingValueForBlock(BB));
885 if (BBPN && BBPN->getParent() == BB) {
886 for (unsigned PI = 0, PE = PN->getNumIncomingValues(); PI != PE; ++PI) {
887 BasicBlock *IBB = PN->getIncomingBlock(PI);
888 if (BBPreds.count(IBB) &&
890 PN->getIncomingValue(PI))) {
892 << "Can't fold, phi node " << PN->getName() << " in "
893 << Succ->getName() << " is conflicting with "
894 << BBPN->getName() << " with regard to common predecessor "
895 << IBB->getName() << "\n");
896 return false;
897 }
898 }
899 } else {
900 Value* Val = PN->getIncomingValueForBlock(BB);
901 for (unsigned PI = 0, PE = PN->getNumIncomingValues(); PI != PE; ++PI) {
902 // See if the incoming value for the common predecessor is equal to the
903 // one for BB, in which case this phi node will not prevent the merging
904 // of the block.
905 BasicBlock *IBB = PN->getIncomingBlock(PI);
906 if (BBPreds.count(IBB) &&
907 !CanMergeValues(Val, PN->getIncomingValue(PI))) {
908 LLVM_DEBUG(dbgs() << "Can't fold, phi node " << PN->getName()
909 << " in " << Succ->getName()
910 << " is conflicting with regard to common "
911 << "predecessor " << IBB->getName() << "\n");
912 return false;
913 }
914 }
915 }
916 }
917
918 return true;
919}
920
923
924/// Determines the value to use as the phi node input for a block.
925///
926/// Select between \p OldVal any value that we know flows from \p BB
927/// to a particular phi on the basis of which one (if either) is not
928/// undef. Update IncomingValues based on the selected value.
929///
930/// \param OldVal The value we are considering selecting.
931/// \param BB The block that the value flows in from.
932/// \param IncomingValues A map from block-to-value for other phi inputs
933/// that we have examined.
934///
935/// \returns the selected value.
937 IncomingValueMap &IncomingValues) {
938 if (!isa<UndefValue>(OldVal)) {
939 assert((!IncomingValues.count(BB) ||
940 IncomingValues.find(BB)->second == OldVal) &&
941 "Expected OldVal to match incoming value from BB!");
942
943 IncomingValues.insert(std::make_pair(BB, OldVal));
944 return OldVal;
945 }
946
947 IncomingValueMap::const_iterator It = IncomingValues.find(BB);
948 if (It != IncomingValues.end()) return It->second;
949
950 return OldVal;
951}
952
953/// Create a map from block to value for the operands of a
954/// given phi.
955///
956/// Create a map from block to value for each non-undef value flowing
957/// into \p PN.
958///
959/// \param PN The phi we are collecting the map for.
960/// \param IncomingValues [out] The map from block to value for this phi.
962 IncomingValueMap &IncomingValues) {
963 for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) {
964 BasicBlock *BB = PN->getIncomingBlock(i);
965 Value *V = PN->getIncomingValue(i);
966
967 if (!isa<UndefValue>(V))
968 IncomingValues.insert(std::make_pair(BB, V));
969 }
970}
971
972/// Replace the incoming undef values to a phi with the values
973/// from a block-to-value map.
974///
975/// \param PN The phi we are replacing the undefs in.
976/// \param IncomingValues A map from block to value.
978 const IncomingValueMap &IncomingValues) {
979 SmallVector<unsigned> TrueUndefOps;
980 for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) {
981 Value *V = PN->getIncomingValue(i);
982
983 if (!isa<UndefValue>(V)) continue;
984
985 BasicBlock *BB = PN->getIncomingBlock(i);
986 IncomingValueMap::const_iterator It = IncomingValues.find(BB);
987
988 // Keep track of undef/poison incoming values. Those must match, so we fix
989 // them up below if needed.
990 // Note: this is conservatively correct, but we could try harder and group
991 // the undef values per incoming basic block.
992 if (It == IncomingValues.end()) {
993 TrueUndefOps.push_back(i);
994 continue;
995 }
996
997 // There is a defined value for this incoming block, so map this undef
998 // incoming value to the defined value.
999 PN->setIncomingValue(i, It->second);
1000 }
1001
1002 // If there are both undef and poison values incoming, then convert those
1003 // values to undef. It is invalid to have different values for the same
1004 // incoming block.
1005 unsigned PoisonCount = count_if(TrueUndefOps, [&](unsigned i) {
1006 return isa<PoisonValue>(PN->getIncomingValue(i));
1007 });
1008 if (PoisonCount != 0 && PoisonCount != TrueUndefOps.size()) {
1009 for (unsigned i : TrueUndefOps)
1011 }
1012}
1013
1014// Only when they shares a single common predecessor, return true.
1015// Only handles cases when BB can't be merged while its predecessors can be
1016// redirected.
1017static bool
1019 const SmallPtrSetImpl<BasicBlock *> &BBPreds,
1020 const SmallPtrSetImpl<BasicBlock *> &SuccPreds,
1021 BasicBlock *&CommonPred) {
1022
1023 // There must be phis in BB, otherwise BB will be merged into Succ directly
1024 if (BB->phis().empty() || Succ->phis().empty())
1025 return false;
1026
1027 // BB must have predecessors not shared that can be redirected to Succ
1028 if (!BB->hasNPredecessorsOrMore(2))
1029 return false;
1030
1031 // Get single common predecessors of both BB and Succ
1032 for (BasicBlock *SuccPred : SuccPreds) {
1033 if (BBPreds.count(SuccPred)) {
1034 if (CommonPred)
1035 return false;
1036 CommonPred = SuccPred;
1037 }
1038 }
1039
1040 return true;
1041}
1042
1043/// Replace a value flowing from a block to a phi with
1044/// potentially multiple instances of that value flowing from the
1045/// block's predecessors to the phi.
1046///
1047/// \param BB The block with the value flowing into the phi.
1048/// \param BBPreds The predecessors of BB.
1049/// \param PN The phi that we are updating.
1050/// \param CommonPred The common predecessor of BB and PN's BasicBlock
1052 const PredBlockVector &BBPreds,
1053 PHINode *PN,
1054 BasicBlock *CommonPred) {
1055 Value *OldVal = PN->removeIncomingValue(BB, false);
1056 assert(OldVal && "No entry in PHI for Pred BB!");
1057
1058 IncomingValueMap IncomingValues;
1059
1060 // We are merging two blocks - BB, and the block containing PN - and
1061 // as a result we need to redirect edges from the predecessors of BB
1062 // to go to the block containing PN, and update PN
1063 // accordingly. Since we allow merging blocks in the case where the
1064 // predecessor and successor blocks both share some predecessors,
1065 // and where some of those common predecessors might have undef
1066 // values flowing into PN, we want to rewrite those values to be
1067 // consistent with the non-undef values.
1068
1069 gatherIncomingValuesToPhi(PN, IncomingValues);
1070
1071 // If this incoming value is one of the PHI nodes in BB, the new entries
1072 // in the PHI node are the entries from the old PHI.
1073 if (isa<PHINode>(OldVal) && cast<PHINode>(OldVal)->getParent() == BB) {
1074 PHINode *OldValPN = cast<PHINode>(OldVal);
1075 for (unsigned i = 0, e = OldValPN->getNumIncomingValues(); i != e; ++i) {
1076 // Note that, since we are merging phi nodes and BB and Succ might
1077 // have common predecessors, we could end up with a phi node with
1078 // identical incoming branches. This will be cleaned up later (and
1079 // will trigger asserts if we try to clean it up now, without also
1080 // simplifying the corresponding conditional branch).
1081 BasicBlock *PredBB = OldValPN->getIncomingBlock(i);
1082
1083 if (PredBB == CommonPred)
1084 continue;
1085
1086 Value *PredVal = OldValPN->getIncomingValue(i);
1087 Value *Selected =
1088 selectIncomingValueForBlock(PredVal, PredBB, IncomingValues);
1089
1090 // And add a new incoming value for this predecessor for the
1091 // newly retargeted branch.
1092 PN->addIncoming(Selected, PredBB);
1093 }
1094 if (CommonPred)
1095 PN->addIncoming(OldValPN->getIncomingValueForBlock(CommonPred), BB);
1096
1097 } else {
1098 for (BasicBlock *PredBB : BBPreds) {
1099 // Update existing incoming values in PN for this
1100 // predecessor of BB.
1101 if (PredBB == CommonPred)
1102 continue;
1103
1104 Value *Selected =
1105 selectIncomingValueForBlock(OldVal, PredBB, IncomingValues);
1106
1107 // And add a new incoming value for this predecessor for the
1108 // newly retargeted branch.
1109 PN->addIncoming(Selected, PredBB);
1110 }
1111 if (CommonPred)
1112 PN->addIncoming(OldVal, BB);
1113 }
1114
1115 replaceUndefValuesInPhi(PN, IncomingValues);
1116}
1117
1119 DomTreeUpdater *DTU) {
1120 assert(BB != &BB->getParent()->getEntryBlock() &&
1121 "TryToSimplifyUncondBranchFromEmptyBlock called on entry block!");
1122
1123 // We can't simplify infinite loops.
1124 BasicBlock *Succ = cast<BranchInst>(BB->getTerminator())->getSuccessor(0);
1125 if (BB == Succ)
1126 return false;
1127
1129 SmallPtrSet<BasicBlock *, 16> SuccPreds(pred_begin(Succ), pred_end(Succ));
1130
1131 // The single common predecessor of BB and Succ when BB cannot be killed
1132 BasicBlock *CommonPred = nullptr;
1133
1134 bool BBKillable = CanPropagatePredecessorsForPHIs(BB, Succ, BBPreds);
1135
1136 // Even if we can not fold bB into Succ, we may be able to redirect the
1137 // predecessors of BB to Succ.
1138 bool BBPhisMergeable =
1139 BBKillable ||
1140 CanRedirectPredsOfEmptyBBToSucc(BB, Succ, BBPreds, SuccPreds, CommonPred);
1141
1142 if (!BBKillable && !BBPhisMergeable)
1143 return false;
1144
1145 // Check to see if merging these blocks/phis would cause conflicts for any of
1146 // the phi nodes in BB or Succ. If not, we can safely merge.
1147
1148 // Check for cases where Succ has multiple predecessors and a PHI node in BB
1149 // has uses which will not disappear when the PHI nodes are merged. It is
1150 // possible to handle such cases, but difficult: it requires checking whether
1151 // BB dominates Succ, which is non-trivial to calculate in the case where
1152 // Succ has multiple predecessors. Also, it requires checking whether
1153 // constructing the necessary self-referential PHI node doesn't introduce any
1154 // conflicts; this isn't too difficult, but the previous code for doing this
1155 // was incorrect.
1156 //
1157 // Note that if this check finds a live use, BB dominates Succ, so BB is
1158 // something like a loop pre-header (or rarely, a part of an irreducible CFG);
1159 // folding the branch isn't profitable in that case anyway.
1160 if (!Succ->getSinglePredecessor()) {
1161 BasicBlock::iterator BBI = BB->begin();
1162 while (isa<PHINode>(*BBI)) {
1163 for (Use &U : BBI->uses()) {
1164 if (PHINode* PN = dyn_cast<PHINode>(U.getUser())) {
1165 if (PN->getIncomingBlock(U) != BB)
1166 return false;
1167 } else {
1168 return false;
1169 }
1170 }
1171 ++BBI;
1172 }
1173 }
1174
1175 if (BBPhisMergeable && CommonPred)
1176 LLVM_DEBUG(dbgs() << "Found Common Predecessor between: " << BB->getName()
1177 << " and " << Succ->getName() << " : "
1178 << CommonPred->getName() << "\n");
1179
1180 // 'BB' and 'BB->Pred' are loop latches, bail out to presrve inner loop
1181 // metadata.
1182 //
1183 // FIXME: This is a stop-gap solution to preserve inner-loop metadata given
1184 // current status (that loop metadata is implemented as metadata attached to
1185 // the branch instruction in the loop latch block). To quote from review
1186 // comments, "the current representation of loop metadata (using a loop latch
1187 // terminator attachment) is known to be fundamentally broken. Loop latches
1188 // are not uniquely associated with loops (both in that a latch can be part of
1189 // multiple loops and a loop may have multiple latches). Loop headers are. The
1190 // solution to this problem is also known: Add support for basic block
1191 // metadata, and attach loop metadata to the loop header."
1192 //
1193 // Why bail out:
1194 // In this case, we expect 'BB' is the latch for outer-loop and 'BB->Pred' is
1195 // the latch for inner-loop (see reason below), so bail out to prerserve
1196 // inner-loop metadata rather than eliminating 'BB' and attaching its metadata
1197 // to this inner-loop.
1198 // - The reason we believe 'BB' and 'BB->Pred' have different inner-most
1199 // loops: assuming 'BB' and 'BB->Pred' are from the same inner-most loop L,
1200 // then 'BB' is the header and latch of 'L' and thereby 'L' must consist of
1201 // one self-looping basic block, which is contradictory with the assumption.
1202 //
1203 // To illustrate how inner-loop metadata is dropped:
1204 //
1205 // CFG Before
1206 //
1207 // BB is while.cond.exit, attached with loop metdata md2.
1208 // BB->Pred is for.body, attached with loop metadata md1.
1209 //
1210 // entry
1211 // |
1212 // v
1213 // ---> while.cond -------------> while.end
1214 // | |
1215 // | v
1216 // | while.body
1217 // | |
1218 // | v
1219 // | for.body <---- (md1)
1220 // | | |______|
1221 // | v
1222 // | while.cond.exit (md2)
1223 // | |
1224 // |_______|
1225 //
1226 // CFG After
1227 //
1228 // while.cond1 is the merge of while.cond.exit and while.cond above.
1229 // for.body is attached with md2, and md1 is dropped.
1230 // If LoopSimplify runs later (as a part of loop pass), it could create
1231 // dedicated exits for inner-loop (essentially adding `while.cond.exit`
1232 // back), but won't it won't see 'md1' nor restore it for the inner-loop.
1233 //
1234 // entry
1235 // |
1236 // v
1237 // ---> while.cond1 -------------> while.end
1238 // | |
1239 // | v
1240 // | while.body
1241 // | |
1242 // | v
1243 // | for.body <---- (md2)
1244 // |_______| |______|
1245 if (Instruction *TI = BB->getTerminator())
1246 if (TI->hasMetadata(LLVMContext::MD_loop))
1247 for (BasicBlock *Pred : predecessors(BB))
1248 if (Instruction *PredTI = Pred->getTerminator())
1249 if (PredTI->hasMetadata(LLVMContext::MD_loop))
1250 return false;
1251
1252 if (BBKillable)
1253 LLVM_DEBUG(dbgs() << "Killing Trivial BB: \n" << *BB);
1254 else if (BBPhisMergeable)
1255 LLVM_DEBUG(dbgs() << "Merge Phis in Trivial BB: \n" << *BB);
1256
1258
1259 if (DTU) {
1260 // To avoid processing the same predecessor more than once.
1262 // All predecessors of BB (except the common predecessor) will be moved to
1263 // Succ.
1264 Updates.reserve(Updates.size() + 2 * pred_size(BB) + 1);
1265
1266 for (auto *PredOfBB : predecessors(BB)) {
1267 // Do not modify those common predecessors of BB and Succ
1268 if (!SuccPreds.contains(PredOfBB))
1269 if (SeenPreds.insert(PredOfBB).second)
1270 Updates.push_back({DominatorTree::Insert, PredOfBB, Succ});
1271 }
1272
1273 SeenPreds.clear();
1274
1275 for (auto *PredOfBB : predecessors(BB))
1276 // When BB cannot be killed, do not remove the edge between BB and
1277 // CommonPred.
1278 if (SeenPreds.insert(PredOfBB).second && PredOfBB != CommonPred)
1279 Updates.push_back({DominatorTree::Delete, PredOfBB, BB});
1280
1281 if (BBKillable)
1282 Updates.push_back({DominatorTree::Delete, BB, Succ});
1283 }
1284
1285 if (isa<PHINode>(Succ->begin())) {
1286 // If there is more than one pred of succ, and there are PHI nodes in
1287 // the successor, then we need to add incoming edges for the PHI nodes
1288 //
1289 const PredBlockVector BBPreds(predecessors(BB));
1290
1291 // Loop over all of the PHI nodes in the successor of BB.
1292 for (BasicBlock::iterator I = Succ->begin(); isa<PHINode>(I); ++I) {
1293 PHINode *PN = cast<PHINode>(I);
1294 redirectValuesFromPredecessorsToPhi(BB, BBPreds, PN, CommonPred);
1295 }
1296 }
1297
1298 if (Succ->getSinglePredecessor()) {
1299 // BB is the only predecessor of Succ, so Succ will end up with exactly
1300 // the same predecessors BB had.
1301 // Copy over any phi, debug or lifetime instruction.
1303 Succ->splice(Succ->getFirstNonPHIIt(), BB);
1304 } else {
1305 while (PHINode *PN = dyn_cast<PHINode>(&BB->front())) {
1306 // We explicitly check for such uses for merging phis.
1307 assert(PN->use_empty() && "There shouldn't be any uses here!");
1308 PN->eraseFromParent();
1309 }
1310 }
1311
1312 // If the unconditional branch we replaced contains llvm.loop metadata, we
1313 // add the metadata to the branch instructions in the predecessors.
1314 if (Instruction *TI = BB->getTerminator())
1315 if (MDNode *LoopMD = TI->getMetadata(LLVMContext::MD_loop))
1316 for (BasicBlock *Pred : predecessors(BB))
1317 Pred->getTerminator()->setMetadata(LLVMContext::MD_loop, LoopMD);
1318
1319 if (BBKillable) {
1320 // Everything that jumped to BB now goes to Succ.
1321 BB->replaceAllUsesWith(Succ);
1322
1323 if (!Succ->hasName())
1324 Succ->takeName(BB);
1325
1326 // Clear the successor list of BB to match updates applying to DTU later.
1327 if (BB->getTerminator())
1328 BB->back().eraseFromParent();
1329
1330 new UnreachableInst(BB->getContext(), BB);
1331 assert(succ_empty(BB) && "The successor list of BB isn't empty before "
1332 "applying corresponding DTU updates.");
1333 } else if (BBPhisMergeable) {
1334 // Everything except CommonPred that jumped to BB now goes to Succ.
1335 BB->replaceUsesWithIf(Succ, [BBPreds, CommonPred](Use &U) -> bool {
1336 if (Instruction *UseInst = dyn_cast<Instruction>(U.getUser()))
1337 return UseInst->getParent() != CommonPred &&
1338 BBPreds.contains(UseInst->getParent());
1339 return false;
1340 });
1341 }
1342
1343 if (DTU)
1344 DTU->applyUpdates(Updates);
1345
1346 if (BBKillable)
1347 DeleteDeadBlock(BB, DTU);
1348
1349 return true;
1350}
1351
1352static bool
1355 // This implementation doesn't currently consider undef operands
1356 // specially. Theoretically, two phis which are identical except for
1357 // one having an undef where the other doesn't could be collapsed.
1358
1359 bool Changed = false;
1360
1361 // Examine each PHI.
1362 // Note that increment of I must *NOT* be in the iteration_expression, since
1363 // we don't want to immediately advance when we restart from the beginning.
1364 for (auto I = BB->begin(); PHINode *PN = dyn_cast<PHINode>(I);) {
1365 ++I;
1366 // Is there an identical PHI node in this basic block?
1367 // Note that we only look in the upper square's triangle,
1368 // we already checked that the lower triangle PHI's aren't identical.
1369 for (auto J = I; PHINode *DuplicatePN = dyn_cast<PHINode>(J); ++J) {
1370 if (ToRemove.contains(DuplicatePN))
1371 continue;
1372 if (!DuplicatePN->isIdenticalToWhenDefined(PN))
1373 continue;
1374 // A duplicate. Replace this PHI with the base PHI.
1375 ++NumPHICSEs;
1376 DuplicatePN->replaceAllUsesWith(PN);
1377 ToRemove.insert(DuplicatePN);
1378 Changed = true;
1379
1380 // The RAUW can change PHIs that we already visited.
1381 I = BB->begin();
1382 break; // Start over from the beginning.
1383 }
1384 }
1385 return Changed;
1386}
1387
1388static bool
1391 // This implementation doesn't currently consider undef operands
1392 // specially. Theoretically, two phis which are identical except for
1393 // one having an undef where the other doesn't could be collapsed.
1394
1395 struct PHIDenseMapInfo {
1396 static PHINode *getEmptyKey() {
1398 }
1399
1400 static PHINode *getTombstoneKey() {
1402 }
1403
1404 static bool isSentinel(PHINode *PN) {
1405 return PN == getEmptyKey() || PN == getTombstoneKey();
1406 }
1407
1408 // WARNING: this logic must be kept in sync with
1409 // Instruction::isIdenticalToWhenDefined()!
1410 static unsigned getHashValueImpl(PHINode *PN) {
1411 // Compute a hash value on the operands. Instcombine will likely have
1412 // sorted them, which helps expose duplicates, but we have to check all
1413 // the operands to be safe in case instcombine hasn't run.
1414 return static_cast<unsigned>(hash_combine(
1417 }
1418
1419 static unsigned getHashValue(PHINode *PN) {
1420#ifndef NDEBUG
1421 // If -phicse-debug-hash was specified, return a constant -- this
1422 // will force all hashing to collide, so we'll exhaustively search
1423 // the table for a match, and the assertion in isEqual will fire if
1424 // there's a bug causing equal keys to hash differently.
1425 if (PHICSEDebugHash)
1426 return 0;
1427#endif
1428 return getHashValueImpl(PN);
1429 }
1430
1431 static bool isEqualImpl(PHINode *LHS, PHINode *RHS) {
1432 if (isSentinel(LHS) || isSentinel(RHS))
1433 return LHS == RHS;
1434 return LHS->isIdenticalTo(RHS);
1435 }
1436
1437 static bool isEqual(PHINode *LHS, PHINode *RHS) {
1438 // These comparisons are nontrivial, so assert that equality implies
1439 // hash equality (DenseMap demands this as an invariant).
1440 bool Result = isEqualImpl(LHS, RHS);
1441 assert(!Result || (isSentinel(LHS) && LHS == RHS) ||
1443 return Result;
1444 }
1445 };
1446
1447 // Set of unique PHINodes.
1449 PHISet.reserve(4 * PHICSENumPHISmallSize);
1450
1451 // Examine each PHI.
1452 bool Changed = false;
1453 for (auto I = BB->begin(); PHINode *PN = dyn_cast<PHINode>(I++);) {
1454 if (ToRemove.contains(PN))
1455 continue;
1456 auto Inserted = PHISet.insert(PN);
1457 if (!Inserted.second) {
1458 // A duplicate. Replace this PHI with its duplicate.
1459 ++NumPHICSEs;
1460 PN->replaceAllUsesWith(*Inserted.first);
1461 ToRemove.insert(PN);
1462 Changed = true;
1463
1464 // The RAUW can change PHIs that we already visited. Start over from the
1465 // beginning.
1466 PHISet.clear();
1467 I = BB->begin();
1468 }
1469 }
1470
1471 return Changed;
1472}
1473
1476 if (
1477#ifndef NDEBUG
1478 !PHICSEDebugHash &&
1479#endif
1483}
1484
1487 bool Changed = EliminateDuplicatePHINodes(BB, ToRemove);
1488 for (PHINode *PN : ToRemove)
1489 PN->eraseFromParent();
1490 return Changed;
1491}
1492
1494 const DataLayout &DL) {
1495 V = V->stripPointerCasts();
1496
1497 if (AllocaInst *AI = dyn_cast<AllocaInst>(V)) {
1498 // TODO: Ideally, this function would not be called if PrefAlign is smaller
1499 // than the current alignment, as the known bits calculation should have
1500 // already taken it into account. However, this is not always the case,
1501 // as computeKnownBits() has a depth limit, while stripPointerCasts()
1502 // doesn't.
1503 Align CurrentAlign = AI->getAlign();
1504 if (PrefAlign <= CurrentAlign)
1505 return CurrentAlign;
1506
1507 // If the preferred alignment is greater than the natural stack alignment
1508 // then don't round up. This avoids dynamic stack realignment.
1509 if (DL.exceedsNaturalStackAlignment(PrefAlign))
1510 return CurrentAlign;
1511 AI->setAlignment(PrefAlign);
1512 return PrefAlign;
1513 }
1514
1515 if (auto *GO = dyn_cast<GlobalObject>(V)) {
1516 // TODO: as above, this shouldn't be necessary.
1517 Align CurrentAlign = GO->getPointerAlignment(DL);
1518 if (PrefAlign <= CurrentAlign)
1519 return CurrentAlign;
1520
1521 // If there is a large requested alignment and we can, bump up the alignment
1522 // of the global. If the memory we set aside for the global may not be the
1523 // memory used by the final program then it is impossible for us to reliably
1524 // enforce the preferred alignment.
1525 if (!GO->canIncreaseAlignment())
1526 return CurrentAlign;
1527
1528 if (GO->isThreadLocal()) {
1529 unsigned MaxTLSAlign = GO->getParent()->getMaxTLSAlignment() / CHAR_BIT;
1530 if (MaxTLSAlign && PrefAlign > Align(MaxTLSAlign))
1531 PrefAlign = Align(MaxTLSAlign);
1532 }
1533
1534 GO->setAlignment(PrefAlign);
1535 return PrefAlign;
1536 }
1537
1538 return Align(1);
1539}
1540
1542 const DataLayout &DL,
1543 const Instruction *CxtI,
1544 AssumptionCache *AC,
1545 const DominatorTree *DT) {
1546 assert(V->getType()->isPointerTy() &&
1547 "getOrEnforceKnownAlignment expects a pointer!");
1548
1549 KnownBits Known = computeKnownBits(V, DL, 0, AC, CxtI, DT);
1550 unsigned TrailZ = Known.countMinTrailingZeros();
1551
1552 // Avoid trouble with ridiculously large TrailZ values, such as
1553 // those computed from a null pointer.
1554 // LLVM doesn't support alignments larger than (1 << MaxAlignmentExponent).
1555 TrailZ = std::min(TrailZ, +Value::MaxAlignmentExponent);
1556
1557 Align Alignment = Align(1ull << std::min(Known.getBitWidth() - 1, TrailZ));
1558
1559 if (PrefAlign && *PrefAlign > Alignment)
1560 Alignment = std::max(Alignment, tryEnforceAlignment(V, *PrefAlign, DL));
1561
1562 // We don't need to make any adjustment.
1563 return Alignment;
1564}
1565
1566///===---------------------------------------------------------------------===//
1567/// Dbg Intrinsic utilities
1568///
1569
1570/// See if there is a dbg.value intrinsic for DIVar for the PHI node.
1572 DIExpression *DIExpr,
1573 PHINode *APN) {
1574 // Since we can't guarantee that the original dbg.declare intrinsic
1575 // is removed by LowerDbgDeclare(), we need to make sure that we are
1576 // not inserting the same dbg.value intrinsic over and over.
1578 SmallVector<DbgVariableRecord *, 1> DbgVariableRecords;
1579 findDbgValues(DbgValues, APN, &DbgVariableRecords);
1580 for (auto *DVI : DbgValues) {
1581 assert(is_contained(DVI->getValues(), APN));
1582 if ((DVI->getVariable() == DIVar) && (DVI->getExpression() == DIExpr))
1583 return true;
1584 }
1585 for (auto *DVR : DbgVariableRecords) {
1586 assert(is_contained(DVR->location_ops(), APN));
1587 if ((DVR->getVariable() == DIVar) && (DVR->getExpression() == DIExpr))
1588 return true;
1589 }
1590 return false;
1591}
1592
1593/// Check if the alloc size of \p ValTy is large enough to cover the variable
1594/// (or fragment of the variable) described by \p DII.
1595///
1596/// This is primarily intended as a helper for the different
1597/// ConvertDebugDeclareToDebugValue functions. The dbg.declare that is converted
1598/// describes an alloca'd variable, so we need to use the alloc size of the
1599/// value when doing the comparison. E.g. an i1 value will be identified as
1600/// covering an n-bit fragment, if the store size of i1 is at least n bits.
1602 const DataLayout &DL = DII->getDataLayout();
1603 TypeSize ValueSize = DL.getTypeAllocSizeInBits(ValTy);
1604 if (std::optional<uint64_t> FragmentSize =
1605 DII->getExpression()->getActiveBits(DII->getVariable()))
1606 return TypeSize::isKnownGE(ValueSize, TypeSize::getFixed(*FragmentSize));
1607
1608 // We can't always calculate the size of the DI variable (e.g. if it is a
1609 // VLA). Try to use the size of the alloca that the dbg intrinsic describes
1610 // intead.
1611 if (DII->isAddressOfVariable()) {
1612 // DII should have exactly 1 location when it is an address.
1613 assert(DII->getNumVariableLocationOps() == 1 &&
1614 "address of variable must have exactly 1 location operand.");
1615 if (auto *AI =
1616 dyn_cast_or_null<AllocaInst>(DII->getVariableLocationOp(0))) {
1617 if (std::optional<TypeSize> FragmentSize =
1618 AI->getAllocationSizeInBits(DL)) {
1619 return TypeSize::isKnownGE(ValueSize, *FragmentSize);
1620 }
1621 }
1622 }
1623 // Could not determine size of variable. Conservatively return false.
1624 return false;
1625}
1626// RemoveDIs: duplicate implementation of the above, using DbgVariableRecords,
1627// the replacement for dbg.values.
1629 const DataLayout &DL = DVR->getModule()->getDataLayout();
1630 TypeSize ValueSize = DL.getTypeAllocSizeInBits(ValTy);
1631 if (std::optional<uint64_t> FragmentSize =
1632 DVR->getExpression()->getActiveBits(DVR->getVariable()))
1633 return TypeSize::isKnownGE(ValueSize, TypeSize::getFixed(*FragmentSize));
1634
1635 // We can't always calculate the size of the DI variable (e.g. if it is a
1636 // VLA). Try to use the size of the alloca that the dbg intrinsic describes
1637 // intead.
1638 if (DVR->isAddressOfVariable()) {
1639 // DVR should have exactly 1 location when it is an address.
1640 assert(DVR->getNumVariableLocationOps() == 1 &&
1641 "address of variable must have exactly 1 location operand.");
1642 if (auto *AI =
1643 dyn_cast_or_null<AllocaInst>(DVR->getVariableLocationOp(0))) {
1644 if (std::optional<TypeSize> FragmentSize = AI->getAllocationSizeInBits(DL)) {
1645 return TypeSize::isKnownGE(ValueSize, *FragmentSize);
1646 }
1647 }
1648 }
1649 // Could not determine size of variable. Conservatively return false.
1650 return false;
1651}
1652
1654 DILocalVariable *DIVar,
1655 DIExpression *DIExpr,
1656 const DebugLoc &NewLoc,
1657 BasicBlock::iterator Instr) {
1658 if (!UseNewDbgInfoFormat) {
1659 auto DbgVal = Builder.insertDbgValueIntrinsic(DV, DIVar, DIExpr, NewLoc,
1660 (Instruction *)nullptr);
1661 DbgVal.get<Instruction *>()->insertBefore(Instr);
1662 } else {
1663 // RemoveDIs: if we're using the new debug-info format, allocate a
1664 // DbgVariableRecord directly instead of a dbg.value intrinsic.
1666 DbgVariableRecord *DV =
1667 new DbgVariableRecord(DVAM, DIVar, DIExpr, NewLoc.get());
1668 Instr->getParent()->insertDbgRecordBefore(DV, Instr);
1669 }
1670}
1671
1673 DIBuilder &Builder, Value *DV, DILocalVariable *DIVar, DIExpression *DIExpr,
1674 const DebugLoc &NewLoc, BasicBlock::iterator Instr) {
1675 if (!UseNewDbgInfoFormat) {
1676 auto DbgVal = Builder.insertDbgValueIntrinsic(DV, DIVar, DIExpr, NewLoc,
1677 (Instruction *)nullptr);
1678 DbgVal.get<Instruction *>()->insertAfter(&*Instr);
1679 } else {
1680 // RemoveDIs: if we're using the new debug-info format, allocate a
1681 // DbgVariableRecord directly instead of a dbg.value intrinsic.
1683 DbgVariableRecord *DV =
1684 new DbgVariableRecord(DVAM, DIVar, DIExpr, NewLoc.get());
1685 Instr->getParent()->insertDbgRecordAfter(DV, &*Instr);
1686 }
1687}
1688
1689/// Inserts a llvm.dbg.value intrinsic before a store to an alloca'd value
1690/// that has an associated llvm.dbg.declare intrinsic.
1692 StoreInst *SI, DIBuilder &Builder) {
1693 assert(DII->isAddressOfVariable() || isa<DbgAssignIntrinsic>(DII));
1694 auto *DIVar = DII->getVariable();
1695 assert(DIVar && "Missing variable");
1696 auto *DIExpr = DII->getExpression();
1697 Value *DV = SI->getValueOperand();
1698
1699 DebugLoc NewLoc = getDebugValueLoc(DII);
1700
1701 // If the alloca describes the variable itself, i.e. the expression in the
1702 // dbg.declare doesn't start with a dereference, we can perform the
1703 // conversion if the value covers the entire fragment of DII.
1704 // If the alloca describes the *address* of DIVar, i.e. DIExpr is
1705 // *just* a DW_OP_deref, we use DV as is for the dbg.value.
1706 // We conservatively ignore other dereferences, because the following two are
1707 // not equivalent:
1708 // dbg.declare(alloca, ..., !Expr(deref, plus_uconstant, 2))
1709 // dbg.value(DV, ..., !Expr(deref, plus_uconstant, 2))
1710 // The former is adding 2 to the address of the variable, whereas the latter
1711 // is adding 2 to the value of the variable. As such, we insist on just a
1712 // deref expression.
1713 bool CanConvert =
1714 DIExpr->isDeref() || (!DIExpr->startsWithDeref() &&
1716 if (CanConvert) {
1717 insertDbgValueOrDbgVariableRecord(Builder, DV, DIVar, DIExpr, NewLoc,
1718 SI->getIterator());
1719 return;
1720 }
1721
1722 // FIXME: If storing to a part of the variable described by the dbg.declare,
1723 // then we want to insert a dbg.value for the corresponding fragment.
1724 LLVM_DEBUG(dbgs() << "Failed to convert dbg.declare to dbg.value: " << *DII
1725 << '\n');
1726 // For now, when there is a store to parts of the variable (but we do not
1727 // know which part) we insert an dbg.value intrinsic to indicate that we
1728 // know nothing about the variable's content.
1729 DV = UndefValue::get(DV->getType());
1730 insertDbgValueOrDbgVariableRecord(Builder, DV, DIVar, DIExpr, NewLoc,
1731 SI->getIterator());
1732}
1733
1734/// Inserts a llvm.dbg.value intrinsic before a load of an alloca'd value
1735/// that has an associated llvm.dbg.declare intrinsic.
1737 LoadInst *LI, DIBuilder &Builder) {
1738 auto *DIVar = DII->getVariable();
1739 auto *DIExpr = DII->getExpression();
1740 assert(DIVar && "Missing variable");
1741
1742 if (!valueCoversEntireFragment(LI->getType(), DII)) {
1743 // FIXME: If only referring to a part of the variable described by the
1744 // dbg.declare, then we want to insert a dbg.value for the corresponding
1745 // fragment.
1746 LLVM_DEBUG(dbgs() << "Failed to convert dbg.declare to dbg.value: "
1747 << *DII << '\n');
1748 return;
1749 }
1750
1751 DebugLoc NewLoc = getDebugValueLoc(DII);
1752
1753 // We are now tracking the loaded value instead of the address. In the
1754 // future if multi-location support is added to the IR, it might be
1755 // preferable to keep tracking both the loaded value and the original
1756 // address in case the alloca can not be elided.
1757 insertDbgValueOrDbgVariableRecordAfter(Builder, LI, DIVar, DIExpr, NewLoc,
1758 LI->getIterator());
1759}
1760
1762 StoreInst *SI, DIBuilder &Builder) {
1763 assert(DVR->isAddressOfVariable() || DVR->isDbgAssign());
1764 auto *DIVar = DVR->getVariable();
1765 assert(DIVar && "Missing variable");
1766 auto *DIExpr = DVR->getExpression();
1767 Value *DV = SI->getValueOperand();
1768
1769 DebugLoc NewLoc = getDebugValueLoc(DVR);
1770
1771 // If the alloca describes the variable itself, i.e. the expression in the
1772 // dbg.declare doesn't start with a dereference, we can perform the
1773 // conversion if the value covers the entire fragment of DII.
1774 // If the alloca describes the *address* of DIVar, i.e. DIExpr is
1775 // *just* a DW_OP_deref, we use DV as is for the dbg.value.
1776 // We conservatively ignore other dereferences, because the following two are
1777 // not equivalent:
1778 // dbg.declare(alloca, ..., !Expr(deref, plus_uconstant, 2))
1779 // dbg.value(DV, ..., !Expr(deref, plus_uconstant, 2))
1780 // The former is adding 2 to the address of the variable, whereas the latter
1781 // is adding 2 to the value of the variable. As such, we insist on just a
1782 // deref expression.
1783 bool CanConvert =
1784 DIExpr->isDeref() || (!DIExpr->startsWithDeref() &&
1786 if (CanConvert) {
1787 insertDbgValueOrDbgVariableRecord(Builder, DV, DIVar, DIExpr, NewLoc,
1788 SI->getIterator());
1789 return;
1790 }
1791
1792 // FIXME: If storing to a part of the variable described by the dbg.declare,
1793 // then we want to insert a dbg.value for the corresponding fragment.
1794 LLVM_DEBUG(dbgs() << "Failed to convert dbg.declare to dbg.value: " << *DVR
1795 << '\n');
1797
1798 // For now, when there is a store to parts of the variable (but we do not
1799 // know which part) we insert an dbg.value intrinsic to indicate that we
1800 // know nothing about the variable's content.
1801 DV = UndefValue::get(DV->getType());
1803 DbgVariableRecord *NewDVR =
1804 new DbgVariableRecord(DVAM, DIVar, DIExpr, NewLoc.get());
1805 SI->getParent()->insertDbgRecordBefore(NewDVR, SI->getIterator());
1806}
1807
1808/// Inserts a llvm.dbg.value intrinsic after a phi that has an associated
1809/// llvm.dbg.declare intrinsic.
1811 PHINode *APN, DIBuilder &Builder) {
1812 auto *DIVar = DII->getVariable();
1813 auto *DIExpr = DII->getExpression();
1814 assert(DIVar && "Missing variable");
1815
1816 if (PhiHasDebugValue(DIVar, DIExpr, APN))
1817 return;
1818
1819 if (!valueCoversEntireFragment(APN->getType(), DII)) {
1820 // FIXME: If only referring to a part of the variable described by the
1821 // dbg.declare, then we want to insert a dbg.value for the corresponding
1822 // fragment.
1823 LLVM_DEBUG(dbgs() << "Failed to convert dbg.declare to dbg.value: "
1824 << *DII << '\n');
1825 return;
1826 }
1827
1828 BasicBlock *BB = APN->getParent();
1829 auto InsertionPt = BB->getFirstInsertionPt();
1830
1831 DebugLoc NewLoc = getDebugValueLoc(DII);
1832
1833 // The block may be a catchswitch block, which does not have a valid
1834 // insertion point.
1835 // FIXME: Insert dbg.value markers in the successors when appropriate.
1836 if (InsertionPt != BB->end()) {
1837 insertDbgValueOrDbgVariableRecord(Builder, APN, DIVar, DIExpr, NewLoc,
1838 InsertionPt);
1839 }
1840}
1841
1843 DIBuilder &Builder) {
1844 auto *DIVar = DVR->getVariable();
1845 auto *DIExpr = DVR->getExpression();
1846 assert(DIVar && "Missing variable");
1847
1848 if (!valueCoversEntireFragment(LI->getType(), DVR)) {
1849 // FIXME: If only referring to a part of the variable described by the
1850 // dbg.declare, then we want to insert a DbgVariableRecord for the
1851 // corresponding fragment.
1852 LLVM_DEBUG(dbgs() << "Failed to convert dbg.declare to DbgVariableRecord: "
1853 << *DVR << '\n');
1854 return;
1855 }
1856
1857 DebugLoc NewLoc = getDebugValueLoc(DVR);
1858
1859 // We are now tracking the loaded value instead of the address. In the
1860 // future if multi-location support is added to the IR, it might be
1861 // preferable to keep tracking both the loaded value and the original
1862 // address in case the alloca can not be elided.
1864
1865 // Create a DbgVariableRecord directly and insert.
1867 DbgVariableRecord *DV =
1868 new DbgVariableRecord(LIVAM, DIVar, DIExpr, NewLoc.get());
1869 LI->getParent()->insertDbgRecordAfter(DV, LI);
1870}
1871
1872/// Determine whether this alloca is either a VLA or an array.
1873static bool isArray(AllocaInst *AI) {
1874 return AI->isArrayAllocation() ||
1875 (AI->getAllocatedType() && AI->getAllocatedType()->isArrayTy());
1876}
1877
1878/// Determine whether this alloca is a structure.
1879static bool isStructure(AllocaInst *AI) {
1880 return AI->getAllocatedType() && AI->getAllocatedType()->isStructTy();
1881}
1883 DIBuilder &Builder) {
1884 auto *DIVar = DVR->getVariable();
1885 auto *DIExpr = DVR->getExpression();
1886 assert(DIVar && "Missing variable");
1887
1888 if (PhiHasDebugValue(DIVar, DIExpr, APN))
1889 return;
1890
1891 if (!valueCoversEntireFragment(APN->getType(), DVR)) {
1892 // FIXME: If only referring to a part of the variable described by the
1893 // dbg.declare, then we want to insert a DbgVariableRecord for the
1894 // corresponding fragment.
1895 LLVM_DEBUG(dbgs() << "Failed to convert dbg.declare to DbgVariableRecord: "
1896 << *DVR << '\n');
1897 return;
1898 }
1899
1900 BasicBlock *BB = APN->getParent();
1901 auto InsertionPt = BB->getFirstInsertionPt();
1902
1903 DebugLoc NewLoc = getDebugValueLoc(DVR);
1904
1905 // The block may be a catchswitch block, which does not have a valid
1906 // insertion point.
1907 // FIXME: Insert DbgVariableRecord markers in the successors when appropriate.
1908 if (InsertionPt != BB->end()) {
1909 insertDbgValueOrDbgVariableRecord(Builder, APN, DIVar, DIExpr, NewLoc,
1910 InsertionPt);
1911 }
1912}
1913
1914/// LowerDbgDeclare - Lowers llvm.dbg.declare intrinsics into appropriate set
1915/// of llvm.dbg.value intrinsics.
1917 bool Changed = false;
1918 DIBuilder DIB(*F.getParent(), /*AllowUnresolved*/ false);
1921 for (auto &FI : F) {
1922 for (Instruction &BI : FI) {
1923 if (auto *DDI = dyn_cast<DbgDeclareInst>(&BI))
1924 Dbgs.push_back(DDI);
1925 for (DbgVariableRecord &DVR : filterDbgVars(BI.getDbgRecordRange())) {
1926 if (DVR.getType() == DbgVariableRecord::LocationType::Declare)
1927 DVRs.push_back(&DVR);
1928 }
1929 }
1930 }
1931
1932 if (Dbgs.empty() && DVRs.empty())
1933 return Changed;
1934
1935 auto LowerOne = [&](auto *DDI) {
1936 AllocaInst *AI =
1937 dyn_cast_or_null<AllocaInst>(DDI->getVariableLocationOp(0));
1938 // If this is an alloca for a scalar variable, insert a dbg.value
1939 // at each load and store to the alloca and erase the dbg.declare.
1940 // The dbg.values allow tracking a variable even if it is not
1941 // stored on the stack, while the dbg.declare can only describe
1942 // the stack slot (and at a lexical-scope granularity). Later
1943 // passes will attempt to elide the stack slot.
1944 if (!AI || isArray(AI) || isStructure(AI))
1945 return;
1946
1947 // A volatile load/store means that the alloca can't be elided anyway.
1948 if (llvm::any_of(AI->users(), [](User *U) -> bool {
1949 if (LoadInst *LI = dyn_cast<LoadInst>(U))
1950 return LI->isVolatile();
1951 if (StoreInst *SI = dyn_cast<StoreInst>(U))
1952 return SI->isVolatile();
1953 return false;
1954 }))
1955 return;
1956
1958 WorkList.push_back(AI);
1959 while (!WorkList.empty()) {
1960 const Value *V = WorkList.pop_back_val();
1961 for (const auto &AIUse : V->uses()) {
1962 User *U = AIUse.getUser();
1963 if (StoreInst *SI = dyn_cast<StoreInst>(U)) {
1964 if (AIUse.getOperandNo() == 1)
1965 ConvertDebugDeclareToDebugValue(DDI, SI, DIB);
1966 } else if (LoadInst *LI = dyn_cast<LoadInst>(U)) {
1967 ConvertDebugDeclareToDebugValue(DDI, LI, DIB);
1968 } else if (CallInst *CI = dyn_cast<CallInst>(U)) {
1969 // This is a call by-value or some other instruction that takes a
1970 // pointer to the variable. Insert a *value* intrinsic that describes
1971 // the variable by dereferencing the alloca.
1972 if (!CI->isLifetimeStartOrEnd()) {
1973 DebugLoc NewLoc = getDebugValueLoc(DDI);
1974 auto *DerefExpr =
1975 DIExpression::append(DDI->getExpression(), dwarf::DW_OP_deref);
1976 insertDbgValueOrDbgVariableRecord(DIB, AI, DDI->getVariable(),
1977 DerefExpr, NewLoc,
1978 CI->getIterator());
1979 }
1980 } else if (BitCastInst *BI = dyn_cast<BitCastInst>(U)) {
1981 if (BI->getType()->isPointerTy())
1982 WorkList.push_back(BI);
1983 }
1984 }
1985 }
1986 DDI->eraseFromParent();
1987 Changed = true;
1988 };
1989
1990 for_each(Dbgs, LowerOne);
1991 for_each(DVRs, LowerOne);
1992
1993 if (Changed)
1994 for (BasicBlock &BB : F)
1996
1997 return Changed;
1998}
1999
2000// RemoveDIs: re-implementation of insertDebugValuesForPHIs, but which pulls the
2001// debug-info out of the block's DbgVariableRecords rather than dbg.value
2002// intrinsics.
2003static void
2005 SmallVectorImpl<PHINode *> &InsertedPHIs) {
2006 assert(BB && "No BasicBlock to clone DbgVariableRecord(s) from.");
2007 if (InsertedPHIs.size() == 0)
2008 return;
2009
2010 // Map existing PHI nodes to their DbgVariableRecords.
2012 for (auto &I : *BB) {
2013 for (DbgVariableRecord &DVR : filterDbgVars(I.getDbgRecordRange())) {
2014 for (Value *V : DVR.location_ops())
2015 if (auto *Loc = dyn_cast_or_null<PHINode>(V))
2016 DbgValueMap.insert({Loc, &DVR});
2017 }
2018 }
2019 if (DbgValueMap.size() == 0)
2020 return;
2021
2022 // Map a pair of the destination BB and old DbgVariableRecord to the new
2023 // DbgVariableRecord, so that if a DbgVariableRecord is being rewritten to use
2024 // more than one of the inserted PHIs in the same destination BB, we can
2025 // update the same DbgVariableRecord with all the new PHIs instead of creating
2026 // one copy for each.
2028 NewDbgValueMap;
2029 // Then iterate through the new PHIs and look to see if they use one of the
2030 // previously mapped PHIs. If so, create a new DbgVariableRecord that will
2031 // propagate the info through the new PHI. If we use more than one new PHI in
2032 // a single destination BB with the same old dbg.value, merge the updates so
2033 // that we get a single new DbgVariableRecord with all the new PHIs.
2034 for (auto PHI : InsertedPHIs) {
2035 BasicBlock *Parent = PHI->getParent();
2036 // Avoid inserting a debug-info record into an EH block.
2037 if (Parent->getFirstNonPHI()->isEHPad())
2038 continue;
2039 for (auto VI : PHI->operand_values()) {
2040 auto V = DbgValueMap.find(VI);
2041 if (V != DbgValueMap.end()) {
2042 DbgVariableRecord *DbgII = cast<DbgVariableRecord>(V->second);
2043 auto NewDI = NewDbgValueMap.find({Parent, DbgII});
2044 if (NewDI == NewDbgValueMap.end()) {
2045 DbgVariableRecord *NewDbgII = DbgII->clone();
2046 NewDI = NewDbgValueMap.insert({{Parent, DbgII}, NewDbgII}).first;
2047 }
2048 DbgVariableRecord *NewDbgII = NewDI->second;
2049 // If PHI contains VI as an operand more than once, we may
2050 // replaced it in NewDbgII; confirm that it is present.
2051 if (is_contained(NewDbgII->location_ops(), VI))
2052 NewDbgII->replaceVariableLocationOp(VI, PHI);
2053 }
2054 }
2055 }
2056 // Insert the new DbgVariableRecords into their destination blocks.
2057 for (auto DI : NewDbgValueMap) {
2058 BasicBlock *Parent = DI.first.first;
2059 DbgVariableRecord *NewDbgII = DI.second;
2060 auto InsertionPt = Parent->getFirstInsertionPt();
2061 assert(InsertionPt != Parent->end() && "Ill-formed basic block");
2062
2063 Parent->insertDbgRecordBefore(NewDbgII, InsertionPt);
2064 }
2065}
2066
2067/// Propagate dbg.value intrinsics through the newly inserted PHIs.
2069 SmallVectorImpl<PHINode *> &InsertedPHIs) {
2070 assert(BB && "No BasicBlock to clone dbg.value(s) from.");
2071 if (InsertedPHIs.size() == 0)
2072 return;
2073
2074 insertDbgVariableRecordsForPHIs(BB, InsertedPHIs);
2075
2076 // Map existing PHI nodes to their dbg.values.
2077 ValueToValueMapTy DbgValueMap;
2078 for (auto &I : *BB) {
2079 if (auto DbgII = dyn_cast<DbgVariableIntrinsic>(&I)) {
2080 for (Value *V : DbgII->location_ops())
2081 if (auto *Loc = dyn_cast_or_null<PHINode>(V))
2082 DbgValueMap.insert({Loc, DbgII});
2083 }
2084 }
2085 if (DbgValueMap.size() == 0)
2086 return;
2087
2088 // Map a pair of the destination BB and old dbg.value to the new dbg.value,
2089 // so that if a dbg.value is being rewritten to use more than one of the
2090 // inserted PHIs in the same destination BB, we can update the same dbg.value
2091 // with all the new PHIs instead of creating one copy for each.
2094 NewDbgValueMap;
2095 // Then iterate through the new PHIs and look to see if they use one of the
2096 // previously mapped PHIs. If so, create a new dbg.value intrinsic that will
2097 // propagate the info through the new PHI. If we use more than one new PHI in
2098 // a single destination BB with the same old dbg.value, merge the updates so
2099 // that we get a single new dbg.value with all the new PHIs.
2100 for (auto *PHI : InsertedPHIs) {
2101 BasicBlock *Parent = PHI->getParent();
2102 // Avoid inserting an intrinsic into an EH block.
2103 if (Parent->getFirstNonPHI()->isEHPad())
2104 continue;
2105 for (auto *VI : PHI->operand_values()) {
2106 auto V = DbgValueMap.find(VI);
2107 if (V != DbgValueMap.end()) {
2108 auto *DbgII = cast<DbgVariableIntrinsic>(V->second);
2109 auto NewDI = NewDbgValueMap.find({Parent, DbgII});
2110 if (NewDI == NewDbgValueMap.end()) {
2111 auto *NewDbgII = cast<DbgVariableIntrinsic>(DbgII->clone());
2112 NewDI = NewDbgValueMap.insert({{Parent, DbgII}, NewDbgII}).first;
2113 }
2114 DbgVariableIntrinsic *NewDbgII = NewDI->second;
2115 // If PHI contains VI as an operand more than once, we may
2116 // replaced it in NewDbgII; confirm that it is present.
2117 if (is_contained(NewDbgII->location_ops(), VI))
2118 NewDbgII->replaceVariableLocationOp(VI, PHI);
2119 }
2120 }
2121 }
2122 // Insert thew new dbg.values into their destination blocks.
2123 for (auto DI : NewDbgValueMap) {
2124 BasicBlock *Parent = DI.first.first;
2125 auto *NewDbgII = DI.second;
2126 auto InsertionPt = Parent->getFirstInsertionPt();
2127 assert(InsertionPt != Parent->end() && "Ill-formed basic block");
2128 NewDbgII->insertBefore(&*InsertionPt);
2129 }
2130}
2131
2132bool llvm::replaceDbgDeclare(Value *Address, Value *NewAddress,
2133 DIBuilder &Builder, uint8_t DIExprFlags,
2134 int Offset) {
2135 TinyPtrVector<DbgDeclareInst *> DbgDeclares = findDbgDeclares(Address);
2137
2138 auto ReplaceOne = [&](auto *DII) {
2139 assert(DII->getVariable() && "Missing variable");
2140 auto *DIExpr = DII->getExpression();
2141 DIExpr = DIExpression::prepend(DIExpr, DIExprFlags, Offset);
2142 DII->setExpression(DIExpr);
2143 DII->replaceVariableLocationOp(Address, NewAddress);
2144 };
2145
2146 for_each(DbgDeclares, ReplaceOne);
2147 for_each(DVRDeclares, ReplaceOne);
2148
2149 return !DbgDeclares.empty() || !DVRDeclares.empty();
2150}
2151
2153 DILocalVariable *DIVar,
2154 DIExpression *DIExpr, Value *NewAddress,
2155 DbgValueInst *DVI,
2156 DbgVariableRecord *DVR,
2157 DIBuilder &Builder, int Offset) {
2158 assert(DIVar && "Missing variable");
2159
2160 // This is an alloca-based dbg.value/DbgVariableRecord. The first thing it
2161 // should do with the alloca pointer is dereference it. Otherwise we don't
2162 // know how to handle it and give up.
2163 if (!DIExpr || DIExpr->getNumElements() < 1 ||
2164 DIExpr->getElement(0) != dwarf::DW_OP_deref)
2165 return;
2166
2167 // Insert the offset before the first deref.
2168 if (Offset)
2169 DIExpr = DIExpression::prepend(DIExpr, 0, Offset);
2170
2171 if (DVI) {
2172 DVI->setExpression(DIExpr);
2173 DVI->replaceVariableLocationOp(0u, NewAddress);
2174 } else {
2175 assert(DVR);
2176 DVR->setExpression(DIExpr);
2177 DVR->replaceVariableLocationOp(0u, NewAddress);
2178 }
2179}
2180
2182 DIBuilder &Builder, int Offset) {
2185 findDbgValues(DbgUsers, AI, &DPUsers);
2186
2187 // Attempt to replace dbg.values that use this alloca.
2188 for (auto *DVI : DbgUsers)
2189 updateOneDbgValueForAlloca(DVI->getDebugLoc(), DVI->getVariable(),
2190 DVI->getExpression(), NewAllocaAddress, DVI,
2191 nullptr, Builder, Offset);
2192
2193 // Replace any DbgVariableRecords that use this alloca.
2194 for (DbgVariableRecord *DVR : DPUsers)
2195 updateOneDbgValueForAlloca(DVR->getDebugLoc(), DVR->getVariable(),
2196 DVR->getExpression(), NewAllocaAddress, nullptr,
2197 DVR, Builder, Offset);
2198}
2199
2200/// Where possible to salvage debug information for \p I do so.
2201/// If not possible mark undef.
2205 findDbgUsers(DbgUsers, &I, &DPUsers);
2206 salvageDebugInfoForDbgValues(I, DbgUsers, DPUsers);
2207}
2208
2209template <typename T> static void salvageDbgAssignAddress(T *Assign) {
2210 Instruction *I = dyn_cast<Instruction>(Assign->getAddress());
2211 // Only instructions can be salvaged at the moment.
2212 if (!I)
2213 return;
2214
2215 assert(!Assign->getAddressExpression()->getFragmentInfo().has_value() &&
2216 "address-expression shouldn't have fragment info");
2217
2218 // The address component of a dbg.assign cannot be variadic.
2219 uint64_t CurrentLocOps = 0;
2220 SmallVector<Value *, 4> AdditionalValues;
2222 Value *NewV = salvageDebugInfoImpl(*I, CurrentLocOps, Ops, AdditionalValues);
2223
2224 // Check if the salvage failed.
2225 if (!NewV)
2226 return;
2227
2229 Assign->getAddressExpression(), Ops, 0, /*StackValue=*/false);
2230 assert(!SalvagedExpr->getFragmentInfo().has_value() &&
2231 "address-expression shouldn't have fragment info");
2232
2233 SalvagedExpr = SalvagedExpr->foldConstantMath();
2234
2235 // Salvage succeeds if no additional values are required.
2236 if (AdditionalValues.empty()) {
2237 Assign->setAddress(NewV);
2238 Assign->setAddressExpression(SalvagedExpr);
2239 } else {
2240 Assign->setKillAddress();
2241 }
2242}
2243
2247 // These are arbitrary chosen limits on the maximum number of values and the
2248 // maximum size of a debug expression we can salvage up to, used for
2249 // performance reasons.
2250 const unsigned MaxDebugArgs = 16;
2251 const unsigned MaxExpressionSize = 128;
2252 bool Salvaged = false;
2253
2254 for (auto *DII : DbgUsers) {
2255 if (auto *DAI = dyn_cast<DbgAssignIntrinsic>(DII)) {
2256 if (DAI->getAddress() == &I) {
2258 Salvaged = true;
2259 }
2260 if (DAI->getValue() != &I)
2261 continue;
2262 }
2263
2264 // Do not add DW_OP_stack_value for DbgDeclare, because they are implicitly
2265 // pointing out the value as a DWARF memory location description.
2266 bool StackValue = isa<DbgValueInst>(DII);
2267 auto DIILocation = DII->location_ops();
2268 assert(
2269 is_contained(DIILocation, &I) &&
2270 "DbgVariableIntrinsic must use salvaged instruction as its location");
2271 SmallVector<Value *, 4> AdditionalValues;
2272 // `I` may appear more than once in DII's location ops, and each use of `I`
2273 // must be updated in the DIExpression and potentially have additional
2274 // values added; thus we call salvageDebugInfoImpl for each `I` instance in
2275 // DIILocation.
2276 Value *Op0 = nullptr;
2277 DIExpression *SalvagedExpr = DII->getExpression();
2278 auto LocItr = find(DIILocation, &I);
2279 while (SalvagedExpr && LocItr != DIILocation.end()) {
2281 unsigned LocNo = std::distance(DIILocation.begin(), LocItr);
2282 uint64_t CurrentLocOps = SalvagedExpr->getNumLocationOperands();
2283 Op0 = salvageDebugInfoImpl(I, CurrentLocOps, Ops, AdditionalValues);
2284 if (!Op0)
2285 break;
2286 SalvagedExpr =
2287 DIExpression::appendOpsToArg(SalvagedExpr, Ops, LocNo, StackValue);
2288 LocItr = std::find(++LocItr, DIILocation.end(), &I);
2289 }
2290 // salvageDebugInfoImpl should fail on examining the first element of
2291 // DbgUsers, or none of them.
2292 if (!Op0)
2293 break;
2294
2295 SalvagedExpr = SalvagedExpr->foldConstantMath();
2296 DII->replaceVariableLocationOp(&I, Op0);
2297 bool IsValidSalvageExpr = SalvagedExpr->getNumElements() <= MaxExpressionSize;
2298 if (AdditionalValues.empty() && IsValidSalvageExpr) {
2299 DII->setExpression(SalvagedExpr);
2300 } else if (isa<DbgValueInst>(DII) && IsValidSalvageExpr &&
2301 DII->getNumVariableLocationOps() + AdditionalValues.size() <=
2302 MaxDebugArgs) {
2303 DII->addVariableLocationOps(AdditionalValues, SalvagedExpr);
2304 } else {
2305 // Do not salvage using DIArgList for dbg.declare, as it is not currently
2306 // supported in those instructions. Also do not salvage if the resulting
2307 // DIArgList would contain an unreasonably large number of values.
2308 DII->setKillLocation();
2309 }
2310 LLVM_DEBUG(dbgs() << "SALVAGE: " << *DII << '\n');
2311 Salvaged = true;
2312 }
2313 // Duplicate of above block for DbgVariableRecords.
2314 for (auto *DVR : DPUsers) {
2315 if (DVR->isDbgAssign()) {
2316 if (DVR->getAddress() == &I) {
2318 Salvaged = true;
2319 }
2320 if (DVR->getValue() != &I)
2321 continue;
2322 }
2323
2324 // Do not add DW_OP_stack_value for DbgDeclare and DbgAddr, because they
2325 // are implicitly pointing out the value as a DWARF memory location
2326 // description.
2327 bool StackValue =
2328 DVR->getType() != DbgVariableRecord::LocationType::Declare;
2329 auto DVRLocation = DVR->location_ops();
2330 assert(
2331 is_contained(DVRLocation, &I) &&
2332 "DbgVariableIntrinsic must use salvaged instruction as its location");
2333 SmallVector<Value *, 4> AdditionalValues;
2334 // 'I' may appear more than once in DVR's location ops, and each use of 'I'
2335 // must be updated in the DIExpression and potentially have additional
2336 // values added; thus we call salvageDebugInfoImpl for each 'I' instance in
2337 // DVRLocation.
2338 Value *Op0 = nullptr;
2339 DIExpression *SalvagedExpr = DVR->getExpression();
2340 auto LocItr = find(DVRLocation, &I);
2341 while (SalvagedExpr && LocItr != DVRLocation.end()) {
2343 unsigned LocNo = std::distance(DVRLocation.begin(), LocItr);
2344 uint64_t CurrentLocOps = SalvagedExpr->getNumLocationOperands();
2345 Op0 = salvageDebugInfoImpl(I, CurrentLocOps, Ops, AdditionalValues);
2346 if (!Op0)
2347 break;
2348 SalvagedExpr =
2349 DIExpression::appendOpsToArg(SalvagedExpr, Ops, LocNo, StackValue);
2350 LocItr = std::find(++LocItr, DVRLocation.end(), &I);
2351 }
2352 // salvageDebugInfoImpl should fail on examining the first element of
2353 // DbgUsers, or none of them.
2354 if (!Op0)
2355 break;
2356
2357 SalvagedExpr = SalvagedExpr->foldConstantMath();
2358 DVR->replaceVariableLocationOp(&I, Op0);
2359 bool IsValidSalvageExpr =
2360 SalvagedExpr->getNumElements() <= MaxExpressionSize;
2361 if (AdditionalValues.empty() && IsValidSalvageExpr) {
2362 DVR->setExpression(SalvagedExpr);
2363 } else if (DVR->getType() != DbgVariableRecord::LocationType::Declare &&
2364 IsValidSalvageExpr &&
2365 DVR->getNumVariableLocationOps() + AdditionalValues.size() <=
2366 MaxDebugArgs) {
2367 DVR->addVariableLocationOps(AdditionalValues, SalvagedExpr);
2368 } else {
2369 // Do not salvage using DIArgList for dbg.addr/dbg.declare, as it is
2370 // currently only valid for stack value expressions.
2371 // Also do not salvage if the resulting DIArgList would contain an
2372 // unreasonably large number of values.
2373 DVR->setKillLocation();
2374 }
2375 LLVM_DEBUG(dbgs() << "SALVAGE: " << DVR << '\n');
2376 Salvaged = true;
2377 }
2378
2379 if (Salvaged)
2380 return;
2381
2382 for (auto *DII : DbgUsers)
2383 DII->setKillLocation();
2384
2385 for (auto *DVR : DPUsers)
2386 DVR->setKillLocation();
2387}
2388
2390 uint64_t CurrentLocOps,
2392 SmallVectorImpl<Value *> &AdditionalValues) {
2393 unsigned BitWidth = DL.getIndexSizeInBits(GEP->getPointerAddressSpace());
2394 // Rewrite a GEP into a DIExpression.
2395 MapVector<Value *, APInt> VariableOffsets;
2396 APInt ConstantOffset(BitWidth, 0);
2397 if (!GEP->collectOffset(DL, BitWidth, VariableOffsets, ConstantOffset))
2398 return nullptr;
2399 if (!VariableOffsets.empty() && !CurrentLocOps) {
2400 Opcodes.insert(Opcodes.begin(), {dwarf::DW_OP_LLVM_arg, 0});
2401 CurrentLocOps = 1;
2402 }
2403 for (const auto &Offset : VariableOffsets) {
2404 AdditionalValues.push_back(Offset.first);
2405 assert(Offset.second.isStrictlyPositive() &&
2406 "Expected strictly positive multiplier for offset.");
2407 Opcodes.append({dwarf::DW_OP_LLVM_arg, CurrentLocOps++, dwarf::DW_OP_constu,
2408 Offset.second.getZExtValue(), dwarf::DW_OP_mul,
2409 dwarf::DW_OP_plus});
2410 }
2411 DIExpression::appendOffset(Opcodes, ConstantOffset.getSExtValue());
2412 return GEP->getOperand(0);
2413}
2414
2416 switch (Opcode) {
2417 case Instruction::Add:
2418 return dwarf::DW_OP_plus;
2419 case Instruction::Sub:
2420 return dwarf::DW_OP_minus;
2421 case Instruction::Mul:
2422 return dwarf::DW_OP_mul;
2423 case Instruction::SDiv:
2424 return dwarf::DW_OP_div;
2425 case Instruction::SRem:
2426 return dwarf::DW_OP_mod;
2427 case Instruction::Or:
2428 return dwarf::DW_OP_or;
2429 case Instruction::And:
2430 return dwarf::DW_OP_and;
2431 case Instruction::Xor:
2432 return dwarf::DW_OP_xor;
2433 case Instruction::Shl:
2434 return dwarf::DW_OP_shl;
2435 case Instruction::LShr:
2436 return dwarf::DW_OP_shr;
2437 case Instruction::AShr:
2438 return dwarf::DW_OP_shra;
2439 default:
2440 // TODO: Salvage from each kind of binop we know about.
2441 return 0;
2442 }
2443}
2444
2445static void handleSSAValueOperands(uint64_t CurrentLocOps,
2447 SmallVectorImpl<Value *> &AdditionalValues,
2448 Instruction *I) {
2449 if (!CurrentLocOps) {
2450 Opcodes.append({dwarf::DW_OP_LLVM_arg, 0});
2451 CurrentLocOps = 1;
2452 }
2453 Opcodes.append({dwarf::DW_OP_LLVM_arg, CurrentLocOps});
2454 AdditionalValues.push_back(I->getOperand(1));
2455}
2456
2459 SmallVectorImpl<Value *> &AdditionalValues) {
2460 // Handle binary operations with constant integer operands as a special case.
2461 auto *ConstInt = dyn_cast<ConstantInt>(BI->getOperand(1));
2462 // Values wider than 64 bits cannot be represented within a DIExpression.
2463 if (ConstInt && ConstInt->getBitWidth() > 64)
2464 return nullptr;
2465
2466 Instruction::BinaryOps BinOpcode = BI->getOpcode();
2467 // Push any Constant Int operand onto the expression stack.
2468 if (ConstInt) {
2469 uint64_t Val = ConstInt->getSExtValue();
2470 // Add or Sub Instructions with a constant operand can potentially be
2471 // simplified.
2472 if (BinOpcode == Instruction::Add || BinOpcode == Instruction::Sub) {
2473 uint64_t Offset = BinOpcode == Instruction::Add ? Val : -int64_t(Val);
2475 return BI->getOperand(0);
2476 }
2477 Opcodes.append({dwarf::DW_OP_constu, Val});
2478 } else {
2479 handleSSAValueOperands(CurrentLocOps, Opcodes, AdditionalValues, BI);
2480 }
2481
2482 // Add salvaged binary operator to expression stack, if it has a valid
2483 // representation in a DIExpression.
2484 uint64_t DwarfBinOp = getDwarfOpForBinOp(BinOpcode);
2485 if (!DwarfBinOp)
2486 return nullptr;
2487 Opcodes.push_back(DwarfBinOp);
2488 return BI->getOperand(0);
2489}
2490
2492 // The signedness of the operation is implicit in the typed stack, signed and
2493 // unsigned instructions map to the same DWARF opcode.
2494 switch (Pred) {
2495 case CmpInst::ICMP_EQ:
2496 return dwarf::DW_OP_eq;
2497 case CmpInst::ICMP_NE:
2498 return dwarf::DW_OP_ne;
2499 case CmpInst::ICMP_UGT:
2500 case CmpInst::ICMP_SGT:
2501 return dwarf::DW_OP_gt;
2502 case CmpInst::ICMP_UGE:
2503 case CmpInst::ICMP_SGE:
2504 return dwarf::DW_OP_ge;
2505 case CmpInst::ICMP_ULT:
2506 case CmpInst::ICMP_SLT:
2507 return dwarf::DW_OP_lt;
2508 case CmpInst::ICMP_ULE:
2509 case CmpInst::ICMP_SLE:
2510 return dwarf::DW_OP_le;
2511 default:
2512 return 0;
2513 }
2514}
2515
2518 SmallVectorImpl<Value *> &AdditionalValues) {
2519 // Handle icmp operations with constant integer operands as a special case.
2520 auto *ConstInt = dyn_cast<ConstantInt>(Icmp->getOperand(1));
2521 // Values wider than 64 bits cannot be represented within a DIExpression.
2522 if (ConstInt && ConstInt->getBitWidth() > 64)
2523 return nullptr;
2524 // Push any Constant Int operand onto the expression stack.
2525 if (ConstInt) {
2526 if (Icmp->isSigned())
2527 Opcodes.push_back(dwarf::DW_OP_consts);
2528 else
2529 Opcodes.push_back(dwarf::DW_OP_constu);
2530 uint64_t Val = ConstInt->getSExtValue();
2531 Opcodes.push_back(Val);
2532 } else {
2533 handleSSAValueOperands(CurrentLocOps, Opcodes, AdditionalValues, Icmp);
2534 }
2535
2536 // Add salvaged binary operator to expression stack, if it has a valid
2537 // representation in a DIExpression.
2538 uint64_t DwarfIcmpOp = getDwarfOpForIcmpPred(Icmp->getPredicate());
2539 if (!DwarfIcmpOp)
2540 return nullptr;
2541 Opcodes.push_back(DwarfIcmpOp);
2542 return Icmp->getOperand(0);
2543}
2544
2547 SmallVectorImpl<Value *> &AdditionalValues) {
2548 auto &M = *I.getModule();
2549 auto &DL = M.getDataLayout();
2550
2551 if (auto *CI = dyn_cast<CastInst>(&I)) {
2552 Value *FromValue = CI->getOperand(0);
2553 // No-op casts are irrelevant for debug info.
2554 if (CI->isNoopCast(DL)) {
2555 return FromValue;
2556 }
2557
2558 Type *Type = CI->getType();
2559 if (Type->isPointerTy())
2560 Type = DL.getIntPtrType(Type);
2561 // Casts other than Trunc, SExt, or ZExt to scalar types cannot be salvaged.
2562 if (Type->isVectorTy() ||
2563 !(isa<TruncInst>(&I) || isa<SExtInst>(&I) || isa<ZExtInst>(&I) ||
2564 isa<IntToPtrInst>(&I) || isa<PtrToIntInst>(&I)))
2565 return nullptr;
2566
2567 llvm::Type *FromType = FromValue->getType();
2568 if (FromType->isPointerTy())
2569 FromType = DL.getIntPtrType(FromType);
2570
2571 unsigned FromTypeBitSize = FromType->getScalarSizeInBits();
2572 unsigned ToTypeBitSize = Type->getScalarSizeInBits();
2573
2574 auto ExtOps = DIExpression::getExtOps(FromTypeBitSize, ToTypeBitSize,
2575 isa<SExtInst>(&I));
2576 Ops.append(ExtOps.begin(), ExtOps.end());
2577 return FromValue;
2578 }
2579
2580 if (auto *GEP = dyn_cast<GetElementPtrInst>(&I))
2581 return getSalvageOpsForGEP(GEP, DL, CurrentLocOps, Ops, AdditionalValues);
2582 if (auto *BI = dyn_cast<BinaryOperator>(&I))
2583 return getSalvageOpsForBinOp(BI, CurrentLocOps, Ops, AdditionalValues);
2584 if (auto *IC = dyn_cast<ICmpInst>(&I))
2585 return getSalvageOpsForIcmpOp(IC, CurrentLocOps, Ops, AdditionalValues);
2586
2587 // *Not* to do: we should not attempt to salvage load instructions,
2588 // because the validity and lifetime of a dbg.value containing
2589 // DW_OP_deref becomes difficult to analyze. See PR40628 for examples.
2590 return nullptr;
2591}
2592
2593/// A replacement for a dbg.value expression.
2594using DbgValReplacement = std::optional<DIExpression *>;
2595
2596/// Point debug users of \p From to \p To using exprs given by \p RewriteExpr,
2597/// possibly moving/undefing users to prevent use-before-def. Returns true if
2598/// changes are made.
2600 Instruction &From, Value &To, Instruction &DomPoint, DominatorTree &DT,
2602 function_ref<DbgValReplacement(DbgVariableRecord &DVR)> RewriteDVRExpr) {
2603 // Find debug users of From.
2606 findDbgUsers(Users, &From, &DPUsers);
2607 if (Users.empty() && DPUsers.empty())
2608 return false;
2609
2610 // Prevent use-before-def of To.
2611 bool Changed = false;
2612
2614 SmallPtrSet<DbgVariableRecord *, 1> UndefOrSalvageDVR;
2615 if (isa<Instruction>(&To)) {
2616 bool DomPointAfterFrom = From.getNextNonDebugInstruction() == &DomPoint;
2617
2618 for (auto *DII : Users) {
2619 // It's common to see a debug user between From and DomPoint. Move it
2620 // after DomPoint to preserve the variable update without any reordering.
2621 if (DomPointAfterFrom && DII->getNextNonDebugInstruction() == &DomPoint) {
2622 LLVM_DEBUG(dbgs() << "MOVE: " << *DII << '\n');
2623 DII->moveAfter(&DomPoint);
2624 Changed = true;
2625
2626 // Users which otherwise aren't dominated by the replacement value must
2627 // be salvaged or deleted.
2628 } else if (!DT.dominates(&DomPoint, DII)) {
2629 UndefOrSalvage.insert(DII);
2630 }
2631 }
2632
2633 // DbgVariableRecord implementation of the above.
2634 for (auto *DVR : DPUsers) {
2635 Instruction *MarkedInstr = DVR->getMarker()->MarkedInstr;
2636 Instruction *NextNonDebug = MarkedInstr;
2637 // The next instruction might still be a dbg.declare, skip over it.
2638 if (isa<DbgVariableIntrinsic>(NextNonDebug))
2639 NextNonDebug = NextNonDebug->getNextNonDebugInstruction();
2640
2641 if (DomPointAfterFrom && NextNonDebug == &DomPoint) {
2642 LLVM_DEBUG(dbgs() << "MOVE: " << *DVR << '\n');
2643 DVR->removeFromParent();
2644 // Ensure there's a marker.
2645 DomPoint.getParent()->insertDbgRecordAfter(DVR, &DomPoint);
2646 Changed = true;
2647 } else if (!DT.dominates(&DomPoint, MarkedInstr)) {
2648 UndefOrSalvageDVR.insert(DVR);
2649 }
2650 }
2651 }
2652
2653 // Update debug users without use-before-def risk.
2654 for (auto *DII : Users) {
2655 if (UndefOrSalvage.count(DII))
2656 continue;
2657
2658 DbgValReplacement DVRepl = RewriteExpr(*DII);
2659 if (!DVRepl)
2660 continue;
2661
2662 DII->replaceVariableLocationOp(&From, &To);
2663 DII->setExpression(*DVRepl);
2664 LLVM_DEBUG(dbgs() << "REWRITE: " << *DII << '\n');
2665 Changed = true;
2666 }
2667 for (auto *DVR : DPUsers) {
2668 if (UndefOrSalvageDVR.count(DVR))
2669 continue;
2670
2671 DbgValReplacement DVRepl = RewriteDVRExpr(*DVR);
2672 if (!DVRepl)
2673 continue;
2674
2675 DVR->replaceVariableLocationOp(&From, &To);
2676 DVR->setExpression(*DVRepl);
2677 LLVM_DEBUG(dbgs() << "REWRITE: " << DVR << '\n');
2678 Changed = true;
2679 }
2680
2681 if (!UndefOrSalvage.empty() || !UndefOrSalvageDVR.empty()) {
2682 // Try to salvage the remaining debug users.
2684 Changed = true;
2685 }
2686
2687 return Changed;
2688}
2689
2690/// Check if a bitcast between a value of type \p FromTy to type \p ToTy would
2691/// losslessly preserve the bits and semantics of the value. This predicate is
2692/// symmetric, i.e swapping \p FromTy and \p ToTy should give the same result.
2693///
2694/// Note that Type::canLosslesslyBitCastTo is not suitable here because it
2695/// allows semantically unequivalent bitcasts, such as <2 x i64> -> <4 x i32>,
2696/// and also does not allow lossless pointer <-> integer conversions.
2698 Type *ToTy) {
2699 // Trivially compatible types.
2700 if (FromTy == ToTy)
2701 return true;
2702
2703 // Handle compatible pointer <-> integer conversions.
2704 if (FromTy->isIntOrPtrTy() && ToTy->isIntOrPtrTy()) {
2705 bool SameSize = DL.getTypeSizeInBits(FromTy) == DL.getTypeSizeInBits(ToTy);
2706 bool LosslessConversion = !DL.isNonIntegralPointerType(FromTy) &&
2707 !DL.isNonIntegralPointerType(ToTy);
2708 return SameSize && LosslessConversion;
2709 }
2710
2711 // TODO: This is not exhaustive.
2712 return false;
2713}
2714
2716 Instruction &DomPoint, DominatorTree &DT) {
2717 // Exit early if From has no debug users.
2718 if (!From.isUsedByMetadata())
2719 return false;
2720
2721 assert(&From != &To && "Can't replace something with itself");
2722
2723 Type *FromTy = From.getType();
2724 Type *ToTy = To.getType();
2725
2726 auto Identity = [&](DbgVariableIntrinsic &DII) -> DbgValReplacement {
2727 return DII.getExpression();
2728 };
2729 auto IdentityDVR = [&](DbgVariableRecord &DVR) -> DbgValReplacement {
2730 return DVR.getExpression();
2731 };
2732
2733 // Handle no-op conversions.
2734 Module &M = *From.getModule();
2735 const DataLayout &DL = M.getDataLayout();
2736 if (isBitCastSemanticsPreserving(DL, FromTy, ToTy))
2737 return rewriteDebugUsers(From, To, DomPoint, DT, Identity, IdentityDVR);
2738
2739 // Handle integer-to-integer widening and narrowing.
2740 // FIXME: Use DW_OP_convert when it's available everywhere.
2741 if (FromTy->isIntegerTy() && ToTy->isIntegerTy()) {
2742 uint64_t FromBits = FromTy->getPrimitiveSizeInBits();
2743 uint64_t ToBits = ToTy->getPrimitiveSizeInBits();
2744 assert(FromBits != ToBits && "Unexpected no-op conversion");
2745
2746 // When the width of the result grows, assume that a debugger will only
2747 // access the low `FromBits` bits when inspecting the source variable.
2748 if (FromBits < ToBits)
2749 return rewriteDebugUsers(From, To, DomPoint, DT, Identity, IdentityDVR);
2750
2751 // The width of the result has shrunk. Use sign/zero extension to describe
2752 // the source variable's high bits.
2753 auto SignOrZeroExt = [&](DbgVariableIntrinsic &DII) -> DbgValReplacement {
2754 DILocalVariable *Var = DII.getVariable();
2755
2756 // Without knowing signedness, sign/zero extension isn't possible.
2757 auto Signedness = Var->getSignedness();
2758 if (!Signedness)
2759 return std::nullopt;
2760
2761 bool Signed = *Signedness == DIBasicType::Signedness::Signed;
2762 return DIExpression::appendExt(DII.getExpression(), ToBits, FromBits,
2763 Signed);
2764 };
2765 // RemoveDIs: duplicate implementation working on DbgVariableRecords rather
2766 // than on dbg.value intrinsics.
2767 auto SignOrZeroExtDVR = [&](DbgVariableRecord &DVR) -> DbgValReplacement {
2768 DILocalVariable *Var = DVR.getVariable();
2769
2770 // Without knowing signedness, sign/zero extension isn't possible.
2771 auto Signedness = Var->getSignedness();
2772 if (!Signedness)
2773 return std::nullopt;
2774
2775 bool Signed = *Signedness == DIBasicType::Signedness::Signed;
2776 return DIExpression::appendExt(DVR.getExpression(), ToBits, FromBits,
2777 Signed);
2778 };
2779 return rewriteDebugUsers(From, To, DomPoint, DT, SignOrZeroExt,
2780 SignOrZeroExtDVR);
2781 }
2782
2783 // TODO: Floating-point conversions, vectors.
2784 return false;
2785}
2786
2788 Instruction *I, SmallVectorImpl<Value *> &PoisonedValues) {
2789 bool Changed = false;
2790 // RemoveDIs: erase debug-info on this instruction manually.
2791 I->dropDbgRecords();
2792 for (Use &U : I->operands()) {
2793 Value *Op = U.get();
2794 if (isa<Instruction>(Op) && !Op->getType()->isTokenTy()) {
2795 U.set(PoisonValue::get(Op->getType()));
2796 PoisonedValues.push_back(Op);
2797 Changed = true;
2798 }
2799 }
2800
2801 return Changed;
2802}
2803
2804std::pair<unsigned, unsigned>
2806 unsigned NumDeadInst = 0;
2807 unsigned NumDeadDbgInst = 0;
2808 // Delete the instructions backwards, as it has a reduced likelihood of
2809 // having to update as many def-use and use-def chains.
2810 Instruction *EndInst = BB->getTerminator(); // Last not to be deleted.
2813
2814 while (EndInst != &BB->front()) {
2815 // Delete the next to last instruction.
2816 Instruction *Inst = &*--EndInst->getIterator();
2817 if (!Inst->use_empty() && !Inst->getType()->isTokenTy())
2819 if (Inst->isEHPad() || Inst->getType()->isTokenTy()) {
2820 // EHPads can't have DbgVariableRecords attached to them, but it might be
2821 // possible for things with token type.
2822 Inst->dropDbgRecords();
2823 EndInst = Inst;
2824 continue;
2825 }
2826 if (isa<DbgInfoIntrinsic>(Inst))
2827 ++NumDeadDbgInst;
2828 else
2829 ++NumDeadInst;
2830 // RemoveDIs: erasing debug-info must be done manually.
2831 Inst->dropDbgRecords();
2832 Inst->eraseFromParent();
2833 }
2834 return {NumDeadInst, NumDeadDbgInst};
2835}
2836
2837unsigned llvm::changeToUnreachable(Instruction *I, bool PreserveLCSSA,
2838 DomTreeUpdater *DTU,
2839 MemorySSAUpdater *MSSAU) {
2840 BasicBlock *BB = I->getParent();
2841
2842 if (MSSAU)
2843 MSSAU->changeToUnreachable(I);
2844
2845 SmallSet<BasicBlock *, 8> UniqueSuccessors;
2846
2847 // Loop over all of the successors, removing BB's entry from any PHI
2848 // nodes.
2849 for (BasicBlock *Successor : successors(BB)) {
2850 Successor->removePredecessor(BB, PreserveLCSSA);
2851 if (DTU)
2852 UniqueSuccessors.insert(Successor);
2853 }
2854 auto *UI = new UnreachableInst(I->getContext(), I->getIterator());
2855 UI->setDebugLoc(I->getDebugLoc());
2856
2857 // All instructions after this are dead.
2858 unsigned NumInstrsRemoved = 0;
2859 BasicBlock::iterator BBI = I->getIterator(), BBE = BB->end();
2860 while (BBI != BBE) {
2861 if (!BBI->use_empty())
2862 BBI->replaceAllUsesWith(PoisonValue::get(BBI->getType()));
2863 BBI++->eraseFromParent();
2864 ++NumInstrsRemoved;
2865 }
2866 if (DTU) {
2868 Updates.reserve(UniqueSuccessors.size());
2869 for (BasicBlock *UniqueSuccessor : UniqueSuccessors)
2870 Updates.push_back({DominatorTree::Delete, BB, UniqueSuccessor});
2871 DTU->applyUpdates(Updates);
2872 }
2874 return NumInstrsRemoved;
2875}
2876
2878 SmallVector<Value *, 8> Args(II->args());
2880 II->getOperandBundlesAsDefs(OpBundles);
2881 CallInst *NewCall = CallInst::Create(II->getFunctionType(),
2882 II->getCalledOperand(), Args, OpBundles);
2883 NewCall->setCallingConv(II->getCallingConv());
2884 NewCall->setAttributes(II->getAttributes());
2885 NewCall->setDebugLoc(II->getDebugLoc());
2886 NewCall->copyMetadata(*II);
2887
2888 // If the invoke had profile metadata, try converting them for CallInst.
2889 uint64_t TotalWeight;
2890 if (NewCall->extractProfTotalWeight(TotalWeight)) {
2891 // Set the total weight if it fits into i32, otherwise reset.
2892 MDBuilder MDB(NewCall->getContext());
2893 auto NewWeights = uint32_t(TotalWeight) != TotalWeight
2894 ? nullptr
2895 : MDB.createBranchWeights({uint32_t(TotalWeight)});
2896 NewCall->setMetadata(LLVMContext::MD_prof, NewWeights);
2897 }
2898
2899 return NewCall;
2900}
2901
2902// changeToCall - Convert the specified invoke into a normal call.
2905 NewCall->takeName(II);
2906 NewCall->insertBefore(II);
2907 II->replaceAllUsesWith(NewCall);
2908
2909 // Follow the call by a branch to the normal destination.
2910 BasicBlock *NormalDestBB = II->getNormalDest();
2911 BranchInst::Create(NormalDestBB, II->getIterator());
2912
2913 // Update PHI nodes in the unwind destination
2914 BasicBlock *BB = II->getParent();
2915 BasicBlock *UnwindDestBB = II->getUnwindDest();
2916 UnwindDestBB->removePredecessor(BB);
2917 II->eraseFromParent();
2918 if (DTU)
2919 DTU->applyUpdates({{DominatorTree::Delete, BB, UnwindDestBB}});
2920 return NewCall;
2921}
2922
2924 BasicBlock *UnwindEdge,
2925 DomTreeUpdater *DTU) {
2926 BasicBlock *BB = CI->getParent();
2927
2928 // Convert this function call into an invoke instruction. First, split the
2929 // basic block.
2930 BasicBlock *Split = SplitBlock(BB, CI, DTU, /*LI=*/nullptr, /*MSSAU*/ nullptr,
2931 CI->getName() + ".noexc");
2932
2933 // Delete the unconditional branch inserted by SplitBlock
2934 BB->back().eraseFromParent();
2935
2936 // Create the new invoke instruction.
2937 SmallVector<Value *, 8> InvokeArgs(CI->args());
2939
2940 CI->getOperandBundlesAsDefs(OpBundles);
2941
2942 // Note: we're round tripping operand bundles through memory here, and that
2943 // can potentially be avoided with a cleverer API design that we do not have
2944 // as of this time.
2945
2946 InvokeInst *II =
2948 UnwindEdge, InvokeArgs, OpBundles, CI->getName(), BB);
2949 II->setDebugLoc(CI->getDebugLoc());
2950 II->setCallingConv(CI->getCallingConv());
2951 II->setAttributes(CI->getAttributes());
2952 II->setMetadata(LLVMContext::MD_prof, CI->getMetadata(LLVMContext::MD_prof));
2953
2954 if (DTU)
2955 DTU->applyUpdates({{DominatorTree::Insert, BB, UnwindEdge}});
2956
2957 // Make sure that anything using the call now uses the invoke! This also
2958 // updates the CallGraph if present, because it uses a WeakTrackingVH.
2960
2961 // Delete the original call
2962 Split->front().eraseFromParent();
2963 return Split;
2964}
2965
2968 DomTreeUpdater *DTU = nullptr) {
2970 BasicBlock *BB = &F.front();
2971 Worklist.push_back(BB);
2972 Reachable.insert(BB);
2973 bool Changed = false;
2974 do {
2975 BB = Worklist.pop_back_val();
2976
2977 // Do a quick scan of the basic block, turning any obviously unreachable
2978 // instructions into LLVM unreachable insts. The instruction combining pass
2979 // canonicalizes unreachable insts into stores to null or undef.
2980 for (Instruction &I : *BB) {
2981 if (auto *CI = dyn_cast<CallInst>(&I)) {
2982 Value *Callee = CI->getCalledOperand();
2983 // Handle intrinsic calls.
2984 if (Function *F = dyn_cast<Function>(Callee)) {
2985 auto IntrinsicID = F->getIntrinsicID();
2986 // Assumptions that are known to be false are equivalent to
2987 // unreachable. Also, if the condition is undefined, then we make the
2988 // choice most beneficial to the optimizer, and choose that to also be
2989 // unreachable.
2990 if (IntrinsicID == Intrinsic::assume) {
2991 if (match(CI->getArgOperand(0), m_CombineOr(m_Zero(), m_Undef()))) {
2992 // Don't insert a call to llvm.trap right before the unreachable.
2993 changeToUnreachable(CI, false, DTU);
2994 Changed = true;
2995 break;
2996 }
2997 } else if (IntrinsicID == Intrinsic::experimental_guard) {
2998 // A call to the guard intrinsic bails out of the current
2999 // compilation unit if the predicate passed to it is false. If the
3000 // predicate is a constant false, then we know the guard will bail
3001 // out of the current compile unconditionally, so all code following
3002 // it is dead.
3003 //
3004 // Note: unlike in llvm.assume, it is not "obviously profitable" for
3005 // guards to treat `undef` as `false` since a guard on `undef` can
3006 // still be useful for widening.
3007 if (match(CI->getArgOperand(0), m_Zero()))
3008 if (!isa<UnreachableInst>(CI->getNextNode())) {
3009 changeToUnreachable(CI->getNextNode(), false, DTU);
3010 Changed = true;
3011 break;
3012 }
3013 }
3014 } else if ((isa<ConstantPointerNull>(Callee) &&
3015 !NullPointerIsDefined(CI->getFunction(),
3016 cast<PointerType>(Callee->getType())
3017 ->getAddressSpace())) ||
3018 isa<UndefValue>(Callee)) {
3019 changeToUnreachable(CI, false, DTU);
3020 Changed = true;
3021 break;
3022 }
3023 if (CI->doesNotReturn() && !CI->isMustTailCall()) {
3024 // If we found a call to a no-return function, insert an unreachable
3025 // instruction after it. Make sure there isn't *already* one there
3026 // though.
3027 if (!isa<UnreachableInst>(CI->getNextNonDebugInstruction())) {
3028 // Don't insert a call to llvm.trap right before the unreachable.
3029 changeToUnreachable(CI->getNextNonDebugInstruction(), false, DTU);
3030 Changed = true;
3031 }
3032 break;
3033 }
3034 } else if (auto *SI = dyn_cast<StoreInst>(&I)) {
3035 // Store to undef and store to null are undefined and used to signal
3036 // that they should be changed to unreachable by passes that can't
3037 // modify the CFG.
3038
3039 // Don't touch volatile stores.
3040 if (SI->isVolatile()) continue;
3041
3042 Value *Ptr = SI->getOperand(1);
3043
3044 if (isa<UndefValue>(Ptr) ||
3045 (isa<ConstantPointerNull>(Ptr) &&
3046 !NullPointerIsDefined(SI->getFunction(),
3047 SI->getPointerAddressSpace()))) {
3048 changeToUnreachable(SI, false, DTU);
3049 Changed = true;
3050 break;
3051 }
3052 }
3053 }
3054
3055 Instruction *Terminator = BB->getTerminator();
3056 if (auto *II = dyn_cast<InvokeInst>(Terminator)) {
3057 // Turn invokes that call 'nounwind' functions into ordinary calls.
3058 Value *Callee = II->getCalledOperand();
3059 if ((isa<ConstantPointerNull>(Callee) &&
3060 !NullPointerIsDefined(BB->getParent())) ||
3061 isa<UndefValue>(Callee)) {
3062 changeToUnreachable(II, false, DTU);
3063 Changed = true;
3064 } else {
3065 if (II->doesNotReturn() &&
3066 !isa<UnreachableInst>(II->getNormalDest()->front())) {
3067 // If we found an invoke of a no-return function,
3068 // create a new empty basic block with an `unreachable` terminator,
3069 // and set it as the normal destination for the invoke,
3070 // unless that is already the case.
3071 // Note that the original normal destination could have other uses.
3072 BasicBlock *OrigNormalDest = II->getNormalDest();
3073 OrigNormalDest->removePredecessor(II->getParent());
3074 LLVMContext &Ctx = II->getContext();
3075 BasicBlock *UnreachableNormalDest = BasicBlock::Create(
3076 Ctx, OrigNormalDest->getName() + ".unreachable",
3077 II->getFunction(), OrigNormalDest);
3078 new UnreachableInst(Ctx, UnreachableNormalDest);
3079 II->setNormalDest(UnreachableNormalDest);
3080 if (DTU)
3081 DTU->applyUpdates(
3082 {{DominatorTree::Delete, BB, OrigNormalDest},
3083 {DominatorTree::Insert, BB, UnreachableNormalDest}});
3084 Changed = true;
3085 }
3086 if (II->doesNotThrow() && canSimplifyInvokeNoUnwind(&F)) {
3087 if (II->use_empty() && !II->mayHaveSideEffects()) {
3088 // jump to the normal destination branch.
3089 BasicBlock *NormalDestBB = II->getNormalDest();
3090 BasicBlock *UnwindDestBB = II->getUnwindDest();
3091 BranchInst::Create(NormalDestBB, II->getIterator());
3092 UnwindDestBB->removePredecessor(II->getParent());
3093 II->eraseFromParent();
3094 if (DTU)
3095 DTU->applyUpdates({{DominatorTree::Delete, BB, UnwindDestBB}});
3096 } else
3097 changeToCall(II, DTU);
3098 Changed = true;
3099 }
3100 }
3101 } else if (auto *CatchSwitch = dyn_cast<CatchSwitchInst>(Terminator)) {
3102 // Remove catchpads which cannot be reached.
3103 struct CatchPadDenseMapInfo {
3104 static CatchPadInst *getEmptyKey() {
3106 }
3107
3108 static CatchPadInst *getTombstoneKey() {
3110 }
3111
3112 static unsigned getHashValue(CatchPadInst *CatchPad) {
3113 return static_cast<unsigned>(hash_combine_range(
3114 CatchPad->value_op_begin(), CatchPad->value_op_end()));
3115 }
3116
3117 static bool isEqual(CatchPadInst *LHS, CatchPadInst *RHS) {
3118 if (LHS == getEmptyKey() || LHS == getTombstoneKey() ||
3119 RHS == getEmptyKey() || RHS == getTombstoneKey())
3120 return LHS == RHS;
3121 return LHS->isIdenticalTo(RHS);
3122 }
3123 };
3124
3125 SmallDenseMap<BasicBlock *, int, 8> NumPerSuccessorCases;
3126 // Set of unique CatchPads.
3128 CatchPadDenseMapInfo, detail::DenseSetPair<CatchPadInst *>>
3129 HandlerSet;
3131 for (CatchSwitchInst::handler_iterator I = CatchSwitch->handler_begin(),
3132 E = CatchSwitch->handler_end();
3133 I != E; ++I) {
3134 BasicBlock *HandlerBB = *I;
3135 if (DTU)
3136 ++NumPerSuccessorCases[HandlerBB];
3137 auto *CatchPad = cast<CatchPadInst>(HandlerBB->getFirstNonPHI());
3138 if (!HandlerSet.insert({CatchPad, Empty}).second) {
3139 if (DTU)
3140 --NumPerSuccessorCases[HandlerBB];
3141 CatchSwitch->removeHandler(I);
3142 --I;
3143 --E;
3144 Changed = true;
3145 }
3146 }
3147 if (DTU) {
3148 std::vector<DominatorTree::UpdateType> Updates;
3149 for (const std::pair<BasicBlock *, int> &I : NumPerSuccessorCases)
3150 if (I.second == 0)
3151 Updates.push_back({DominatorTree::Delete, BB, I.first});
3152 DTU->applyUpdates(Updates);
3153 }
3154 }
3155
3156 Changed |= ConstantFoldTerminator(BB, true, nullptr, DTU);
3157 for (BasicBlock *Successor : successors(BB))
3158 if (Reachable.insert(Successor).second)
3159 Worklist.push_back(Successor);
3160 } while (!Worklist.empty());
3161 return Changed;
3162}
3163
3165 Instruction *TI = BB->getTerminator();
3166
3167 if (auto *II = dyn_cast<InvokeInst>(TI))
3168 return changeToCall(II, DTU);
3169
3170 Instruction *NewTI;
3171 BasicBlock *UnwindDest;
3172
3173 if (auto *CRI = dyn_cast<CleanupReturnInst>(TI)) {
3174 NewTI = CleanupReturnInst::Create(CRI->getCleanupPad(), nullptr, CRI->getIterator());
3175 UnwindDest = CRI->getUnwindDest();
3176 } else if (auto *CatchSwitch = dyn_cast<CatchSwitchInst>(TI)) {
3177 auto *NewCatchSwitch = CatchSwitchInst::Create(
3178 CatchSwitch->getParentPad(), nullptr, CatchSwitch->getNumHandlers(),
3179 CatchSwitch->getName(), CatchSwitch->getIterator());
3180 for (BasicBlock *PadBB : CatchSwitch->handlers())
3181 NewCatchSwitch->addHandler(PadBB);
3182
3183 NewTI = NewCatchSwitch;
3184 UnwindDest = CatchSwitch->getUnwindDest();
3185 } else {
3186 llvm_unreachable("Could not find unwind successor");
3187 }
3188
3189 NewTI->takeName(TI);
3190 NewTI->setDebugLoc(TI->getDebugLoc());
3191 UnwindDest->removePredecessor(BB);
3192 TI->replaceAllUsesWith(NewTI);
3193 TI->eraseFromParent();
3194 if (DTU)
3195 DTU->applyUpdates({{DominatorTree::Delete, BB, UnwindDest}});
3196 return NewTI;
3197}
3198
3199/// removeUnreachableBlocks - Remove blocks that are not reachable, even
3200/// if they are in a dead cycle. Return true if a change was made, false
3201/// otherwise.
3203 MemorySSAUpdater *MSSAU) {
3205 bool Changed = markAliveBlocks(F, Reachable, DTU);
3206
3207 // If there are unreachable blocks in the CFG...
3208 if (Reachable.size() == F.size())
3209 return Changed;
3210
3211 assert(Reachable.size() < F.size());
3212
3213 // Are there any blocks left to actually delete?
3214 SmallSetVector<BasicBlock *, 8> BlocksToRemove;
3215 for (BasicBlock &BB : F) {
3216 // Skip reachable basic blocks
3217 if (Reachable.count(&BB))
3218 continue;
3219 // Skip already-deleted blocks
3220 if (DTU && DTU->isBBPendingDeletion(&BB))
3221 continue;
3222 BlocksToRemove.insert(&BB);
3223 }
3224
3225 if (BlocksToRemove.empty())
3226 return Changed;
3227
3228 Changed = true;
3229 NumRemoved += BlocksToRemove.size();
3230
3231 if (MSSAU)
3232 MSSAU->removeBlocks(BlocksToRemove);
3233
3234 DeleteDeadBlocks(BlocksToRemove.takeVector(), DTU);
3235
3236 return Changed;
3237}
3238
3240 ArrayRef<unsigned> KnownIDs, bool DoesKMove) {
3242 K->dropUnknownNonDebugMetadata(KnownIDs);
3243 K->getAllMetadataOtherThanDebugLoc(Metadata);
3244 for (const auto &MD : Metadata) {
3245 unsigned Kind = MD.first;
3246 MDNode *JMD = J->getMetadata(Kind);
3247 MDNode *KMD = MD.second;
3248
3249 switch (Kind) {
3250 default:
3251 K->setMetadata(Kind, nullptr); // Remove unknown metadata
3252 break;
3253 case LLVMContext::MD_dbg:
3254 llvm_unreachable("getAllMetadataOtherThanDebugLoc returned a MD_dbg");
3255 case LLVMContext::MD_DIAssignID:
3256 K->mergeDIAssignID(J);
3257 break;
3258 case LLVMContext::MD_tbaa:
3259 K->setMetadata(Kind, MDNode::getMostGenericTBAA(JMD, KMD));
3260 break;
3261 case LLVMContext::MD_alias_scope:
3262 K->setMetadata(Kind, MDNode::getMostGenericAliasScope(JMD, KMD));
3263 break;
3264 case LLVMContext::MD_noalias:
3265 case LLVMContext::MD_mem_parallel_loop_access:
3266 K->setMetadata(Kind, MDNode::intersect(JMD, KMD));
3267 break;
3268 case LLVMContext::MD_access_group:
3269 K->setMetadata(LLVMContext::MD_access_group,
3270 intersectAccessGroups(K, J));
3271 break;
3272 case LLVMContext::MD_range:
3273 if (DoesKMove || !K->hasMetadata(LLVMContext::MD_noundef))
3274 K->setMetadata(Kind, MDNode::getMostGenericRange(JMD, KMD));
3275 break;
3276 case LLVMContext::MD_fpmath:
3277 K->setMetadata(Kind, MDNode::getMostGenericFPMath(JMD, KMD));
3278 break;
3279 case LLVMContext::MD_invariant_load:
3280 // If K moves, only set the !invariant.load if it is present in both
3281 // instructions.
3282 if (DoesKMove)
3283 K->setMetadata(Kind, JMD);
3284 break;
3285 case LLVMContext::MD_nonnull:
3286 if (DoesKMove || !K->hasMetadata(LLVMContext::MD_noundef))
3287 K->setMetadata(Kind, JMD);
3288 break;
3289 case LLVMContext::MD_invariant_group:
3290 // Preserve !invariant.group in K.
3291 break;
3292 case LLVMContext::MD_mmra:
3293 // Combine MMRAs
3294 break;
3295 case LLVMContext::MD_align:
3296 if (DoesKMove || !K->hasMetadata(LLVMContext::MD_noundef))
3297 K->setMetadata(
3299 break;
3300 case LLVMContext::MD_dereferenceable:
3301 case LLVMContext::MD_dereferenceable_or_null:
3302 if (DoesKMove)
3303 K->setMetadata(Kind,
3305 break;
3306 case LLVMContext::MD_preserve_access_index:
3307 // Preserve !preserve.access.index in K.
3308 break;
3309 case LLVMContext::MD_noundef:
3310 // If K does move, keep noundef if it is present in both instructions.
3311 if (DoesKMove)
3312 K->setMetadata(Kind, JMD);
3313 break;
3314 case LLVMContext::MD_nontemporal:
3315 // Preserve !nontemporal if it is present on both instructions.
3316 K->setMetadata(Kind, JMD);
3317 break;
3318 case LLVMContext::MD_prof:
3319 if (DoesKMove)
3320 K->setMetadata(Kind, MDNode::getMergedProfMetadata(KMD, JMD, K, J));
3321 break;
3322 }
3323 }
3324 // Set !invariant.group from J if J has it. If both instructions have it
3325 // then we will just pick it from J - even when they are different.
3326 // Also make sure that K is load or store - f.e. combining bitcast with load
3327 // could produce bitcast with invariant.group metadata, which is invalid.
3328 // FIXME: we should try to preserve both invariant.group md if they are
3329 // different, but right now instruction can only have one invariant.group.
3330 if (auto *JMD = J->getMetadata(LLVMContext::MD_invariant_group))
3331 if (isa<LoadInst>(K) || isa<StoreInst>(K))
3332 K->setMetadata(LLVMContext::MD_invariant_group, JMD);
3333
3334 // Merge MMRAs.
3335 // This is handled separately because we also want to handle cases where K
3336 // doesn't have tags but J does.
3337 auto JMMRA = J->getMetadata(LLVMContext::MD_mmra);
3338 auto KMMRA = K->getMetadata(LLVMContext::MD_mmra);
3339 if (JMMRA || KMMRA) {
3340 K->setMetadata(LLVMContext::MD_mmra,
3341 MMRAMetadata::combine(K->getContext(), JMMRA, KMMRA));
3342 }
3343}
3344
3346 bool KDominatesJ) {
3347 unsigned KnownIDs[] = {LLVMContext::MD_tbaa,
3348 LLVMContext::MD_alias_scope,
3349 LLVMContext::MD_noalias,
3350 LLVMContext::MD_range,
3351 LLVMContext::MD_fpmath,
3352 LLVMContext::MD_invariant_load,
3353 LLVMContext::MD_nonnull,
3354 LLVMContext::MD_invariant_group,
3355 LLVMContext::MD_align,
3356 LLVMContext::MD_dereferenceable,
3357 LLVMContext::MD_dereferenceable_or_null,
3358 LLVMContext::MD_access_group,
3359 LLVMContext::MD_preserve_access_index,
3360 LLVMContext::MD_prof,
3361 LLVMContext::MD_nontemporal,
3362 LLVMContext::MD_noundef,
3363 LLVMContext::MD_mmra};
3364 combineMetadata(K, J, KnownIDs, KDominatesJ);
3365}
3366
3367void llvm::copyMetadataForLoad(LoadInst &Dest, const LoadInst &Source) {
3369 Source.getAllMetadata(MD);
3370 MDBuilder MDB(Dest.getContext());
3371 Type *NewType = Dest.getType();
3372 const DataLayout &DL = Source.getDataLayout();
3373 for (const auto &MDPair : MD) {
3374 unsigned ID = MDPair.first;
3375 MDNode *N = MDPair.second;
3376 // Note, essentially every kind of metadata should be preserved here! This
3377 // routine is supposed to clone a load instruction changing *only its type*.
3378 // The only metadata it makes sense to drop is metadata which is invalidated
3379 // when the pointer type changes. This should essentially never be the case
3380 // in LLVM, but we explicitly switch over only known metadata to be
3381 // conservatively correct. If you are adding metadata to LLVM which pertains
3382 // to loads, you almost certainly want to add it here.
3383 switch (ID) {
3384 case LLVMContext::MD_dbg:
3385 case LLVMContext::MD_tbaa:
3386 case LLVMContext::MD_prof:
3387 case LLVMContext::MD_fpmath:
3388 case LLVMContext::MD_tbaa_struct:
3389 case LLVMContext::MD_invariant_load:
3390 case LLVMContext::MD_alias_scope:
3391 case LLVMContext::MD_noalias:
3392 case LLVMContext::MD_nontemporal:
3393 case LLVMContext::MD_mem_parallel_loop_access:
3394 case LLVMContext::MD_access_group:
3395 case LLVMContext::MD_noundef:
3396 // All of these directly apply.
3397 Dest.setMetadata(ID, N);
3398 break;
3399
3400 case LLVMContext::MD_nonnull:
3401 copyNonnullMetadata(Source, N, Dest);
3402 break;
3403
3404 case LLVMContext::MD_align:
3405 case LLVMContext::MD_dereferenceable:
3406 case LLVMContext::MD_dereferenceable_or_null:
3407 // These only directly apply if the new type is also a pointer.
3408 if (NewType->isPointerTy())
3409 Dest.setMetadata(ID, N);
3410 break;
3411
3412 case LLVMContext::MD_range:
3413 copyRangeMetadata(DL, Source, N, Dest);
3414 break;
3415 }
3416 }
3417}
3418
3420 auto *ReplInst = dyn_cast<Instruction>(Repl);
3421 if (!ReplInst)
3422 return;
3423
3424 // Patch the replacement so that it is not more restrictive than the value
3425 // being replaced.
3426 WithOverflowInst *UnusedWO;
3427 // When replacing the result of a llvm.*.with.overflow intrinsic with a
3428 // overflowing binary operator, nuw/nsw flags may no longer hold.
3429 if (isa<OverflowingBinaryOperator>(ReplInst) &&
3430 match(I, m_ExtractValue<0>(m_WithOverflowInst(UnusedWO))))
3431 ReplInst->dropPoisonGeneratingFlags();
3432 // Note that if 'I' is a load being replaced by some operation,
3433 // for example, by an arithmetic operation, then andIRFlags()
3434 // would just erase all math flags from the original arithmetic
3435 // operation, which is clearly not wanted and not needed.
3436 else if (!isa<LoadInst>(I))
3437 ReplInst->andIRFlags(I);
3438
3439 // FIXME: If both the original and replacement value are part of the
3440 // same control-flow region (meaning that the execution of one
3441 // guarantees the execution of the other), then we can combine the
3442 // noalias scopes here and do better than the general conservative
3443 // answer used in combineMetadata().
3444
3445 // In general, GVN unifies expressions over different control-flow
3446 // regions, and so we need a conservative combination of the noalias
3447 // scopes.
3448 combineMetadataForCSE(ReplInst, I, false);
3449}
3450
3451template <typename RootType, typename ShouldReplaceFn>
3453 const RootType &Root,
3454 const ShouldReplaceFn &ShouldReplace) {
3455 assert(From->getType() == To->getType());
3456
3457 unsigned Count = 0;
3458 for (Use &U : llvm::make_early_inc_range(From->uses())) {
3459 if (!ShouldReplace(Root, U))
3460 continue;
3461 LLVM_DEBUG(dbgs() << "Replace dominated use of '";
3462 From->printAsOperand(dbgs());
3463 dbgs() << "' with " << *To << " in " << *U.getUser() << "\n");
3464 U.set(To);
3465 ++Count;
3466 }
3467 return Count;
3468}
3469
3471 assert(From->getType() == To->getType());
3472 auto *BB = From->getParent();
3473 unsigned Count = 0;
3474
3475 for (Use &U : llvm::make_early_inc_range(From->uses())) {
3476 auto *I = cast<Instruction>(U.getUser());
3477 if (I->getParent() == BB)
3478 continue;
3479 U.set(To);
3480 ++Count;
3481 }
3482 return Count;
3483}
3484
3486 DominatorTree &DT,
3487 const BasicBlockEdge &Root) {
3488 auto Dominates = [&DT](const BasicBlockEdge &Root, const Use &U) {
3489 return DT.dominates(Root, U);
3490 };
3491 return ::replaceDominatedUsesWith(From, To, Root, Dominates);
3492}
3493
3495 DominatorTree &DT,
3496 const BasicBlock *BB) {
3497 auto Dominates = [&DT](const BasicBlock *BB, const Use &U) {
3498 return DT.dominates(BB, U);
3499 };
3500 return ::replaceDominatedUsesWith(From, To, BB, Dominates);
3501}
3502
3504 Value *From, Value *To, DominatorTree &DT, const BasicBlockEdge &Root,
3505 function_ref<bool(const Use &U, const Value *To)> ShouldReplace) {
3506 auto DominatesAndShouldReplace =
3507 [&DT, &ShouldReplace, To](const BasicBlockEdge &Root, const Use &U) {
3508 return DT.dominates(Root, U) && ShouldReplace(U, To);
3509 };
3510 return ::replaceDominatedUsesWith(From, To, Root, DominatesAndShouldReplace);
3511}
3512
3514 Value *From, Value *To, DominatorTree &DT, const BasicBlock *BB,
3515 function_ref<bool(const Use &U, const Value *To)> ShouldReplace) {
3516 auto DominatesAndShouldReplace = [&DT, &ShouldReplace,
3517 To](const BasicBlock *BB, const Use &U) {
3518 return DT.dominates(BB, U) && ShouldReplace(U, To);
3519 };
3520 return ::replaceDominatedUsesWith(From, To, BB, DominatesAndShouldReplace);
3521}
3522
3524 const TargetLibraryInfo &TLI) {
3525 // Check if the function is specifically marked as a gc leaf function.
3526 if (Call->hasFnAttr("gc-leaf-function"))
3527 return true;
3528 if (const Function *F = Call->getCalledFunction()) {
3529 if (F->hasFnAttribute("gc-leaf-function"))
3530 return true;
3531
3532 if (auto IID = F->getIntrinsicID()) {
3533 // Most LLVM intrinsics do not take safepoints.
3534 return IID != Intrinsic::experimental_gc_statepoint &&
3535 IID != Intrinsic::experimental_deoptimize &&
3536 IID != Intrinsic::memcpy_element_unordered_atomic &&
3537 IID != Intrinsic::memmove_element_unordered_atomic;
3538 }
3539 }
3540
3541 // Lib calls can be materialized by some passes, and won't be
3542 // marked as 'gc-leaf-function.' All available Libcalls are
3543 // GC-leaf.
3544 LibFunc LF;
3545 if (TLI.getLibFunc(*Call, LF)) {
3546 return TLI.has(LF);
3547 }
3548
3549 return false;
3550}
3551
3553 LoadInst &NewLI) {
3554 auto *NewTy = NewLI.getType();
3555
3556 // This only directly applies if the new type is also a pointer.
3557 if (NewTy->isPointerTy()) {
3558 NewLI.setMetadata(LLVMContext::MD_nonnull, N);
3559 return;
3560 }
3561
3562 // The only other translation we can do is to integral loads with !range
3563 // metadata.
3564 if (!NewTy->isIntegerTy())
3565 return;
3566
3567 MDBuilder MDB(NewLI.getContext());
3568 const Value *Ptr = OldLI.getPointerOperand();
3569 auto *ITy = cast<IntegerType>(NewTy);
3570 auto *NullInt = ConstantExpr::getPtrToInt(
3571 ConstantPointerNull::get(cast<PointerType>(Ptr->getType())), ITy);
3572 auto *NonNullInt = ConstantExpr::getAdd(NullInt, ConstantInt::get(ITy, 1));
3573 NewLI.setMetadata(LLVMContext::MD_range,
3574 MDB.createRange(NonNullInt, NullInt));
3575}
3576
3578 MDNode *N, LoadInst &NewLI) {
3579 auto *NewTy = NewLI.getType();
3580 // Simply copy the metadata if the type did not change.
3581 if (NewTy == OldLI.getType()) {
3582 NewLI.setMetadata(LLVMContext::MD_range, N);
3583 return;
3584 }
3585
3586 // Give up unless it is converted to a pointer where there is a single very
3587 // valuable mapping we can do reliably.
3588 // FIXME: It would be nice to propagate this in more ways, but the type
3589 // conversions make it hard.
3590 if (!NewTy->isPointerTy())
3591 return;
3592
3593 unsigned BitWidth = DL.getPointerTypeSizeInBits(NewTy);
3594 if (BitWidth == OldLI.getType()->getScalarSizeInBits() &&
3596 MDNode *NN = MDNode::get(OldLI.getContext(), std::nullopt);
3597 NewLI.setMetadata(LLVMContext::MD_nonnull, NN);
3598 }
3599}
3600
3604 findDbgUsers(DbgUsers, &I, &DPUsers);
3605 for (auto *DII : DbgUsers)
3606 DII->eraseFromParent();
3607 for (auto *DVR : DPUsers)
3608 DVR->eraseFromParent();
3609}
3610
3612 BasicBlock *BB) {
3613 // Since we are moving the instructions out of its basic block, we do not
3614 // retain their original debug locations (DILocations) and debug intrinsic
3615 // instructions.
3616 //
3617 // Doing so would degrade the debugging experience and adversely affect the
3618 // accuracy of profiling information.
3619 //
3620 // Currently, when hoisting the instructions, we take the following actions:
3621 // - Remove their debug intrinsic instructions.
3622 // - Set their debug locations to the values from the insertion point.
3623 //
3624 // As per PR39141 (comment #8), the more fundamental reason why the dbg.values
3625 // need to be deleted, is because there will not be any instructions with a
3626 // DILocation in either branch left after performing the transformation. We
3627 // can only insert a dbg.value after the two branches are joined again.
3628 //
3629 // See PR38762, PR39243 for more details.
3630 //
3631 // TODO: Extend llvm.dbg.value to take more than one SSA Value (PR39141) to
3632 // encode predicated DIExpressions that yield different results on different
3633 // code paths.
3634
3635 for (BasicBlock::iterator II = BB->begin(), IE = BB->end(); II != IE;) {
3636 Instruction *I = &*II;
3637 I->dropUBImplyingAttrsAndMetadata();
3638 if (I->isUsedByMetadata())
3639 dropDebugUsers(*I);
3640 // RemoveDIs: drop debug-info too as the following code does.
3641 I->dropDbgRecords();
3642 if (I->isDebugOrPseudoInst()) {
3643 // Remove DbgInfo and pseudo probe Intrinsics.
3644 II = I->eraseFromParent();
3645 continue;
3646 }
3647 I->setDebugLoc(InsertPt->getDebugLoc());
3648 ++II;
3649 }
3650 DomBlock->splice(InsertPt->getIterator(), BB, BB->begin(),
3651 BB->getTerminator()->getIterator());
3652}
3653
3655 Type &Ty) {
3656 // Create integer constant expression.
3657 auto createIntegerExpression = [&DIB](const Constant &CV) -> DIExpression * {
3658 const APInt &API = cast<ConstantInt>(&CV)->getValue();
3659 std::optional<int64_t> InitIntOpt = API.trySExtValue();
3660 return InitIntOpt ? DIB.createConstantValueExpression(
3661 static_cast<uint64_t>(*InitIntOpt))
3662 : nullptr;
3663 };
3664
3665 if (isa<ConstantInt>(C))
3666 return createIntegerExpression(C);
3667
3668 auto *FP = dyn_cast<ConstantFP>(&C);
3669 if (FP && Ty.isFloatingPointTy() && Ty.getScalarSizeInBits() <= 64) {
3670 const APFloat &APF = FP->getValueAPF();
3671 APInt const &API = APF.bitcastToAPInt();
3672 if (auto Temp = API.getZExtValue())
3673 return DIB.createConstantValueExpression(static_cast<uint64_t>(Temp));
3674 return DIB.createConstantValueExpression(*API.getRawData());
3675 }
3676
3677 if (!Ty.isPointerTy())
3678 return nullptr;
3679
3680 if (isa<ConstantPointerNull>(C))
3681 return DIB.createConstantValueExpression(0);
3682
3683 if (const ConstantExpr *CE = dyn_cast<ConstantExpr>(&C))
3684 if (CE->getOpcode() == Instruction::IntToPtr) {
3685 const Value *V = CE->getOperand(0);
3686 if (auto CI = dyn_cast_or_null<ConstantInt>(V))
3687 return createIntegerExpression(*CI);
3688 }
3689 return nullptr;
3690}
3691
3693 auto RemapDebugOperands = [&Mapping](auto *DV, auto Set) {
3694 for (auto *Op : Set) {
3695 auto I = Mapping.find(Op);
3696 if (I != Mapping.end())
3697 DV->replaceVariableLocationOp(Op, I->second, /*AllowEmpty=*/true);
3698 }
3699 };
3700 auto RemapAssignAddress = [&Mapping](auto *DA) {
3701 auto I = Mapping.find(DA->getAddress());
3702 if (I != Mapping.end())
3703 DA->setAddress(I->second);
3704 };
3705 if (auto DVI = dyn_cast<DbgVariableIntrinsic>(Inst))
3706 RemapDebugOperands(DVI, DVI->location_ops());
3707 if (auto DAI = dyn_cast<DbgAssignIntrinsic>(Inst))
3708 RemapAssignAddress(DAI);
3709 for (DbgVariableRecord &DVR : filterDbgVars(Inst->getDbgRecordRange())) {
3710 RemapDebugOperands(&DVR, DVR.location_ops());
3711 if (DVR.isDbgAssign())
3712 RemapAssignAddress(&DVR);
3713 }
3714}
3715
3716namespace {
3717
3718/// A potential constituent of a bitreverse or bswap expression. See
3719/// collectBitParts for a fuller explanation.
3720struct BitPart {
3721 BitPart(Value *P, unsigned BW) : Provider(P) {
3722 Provenance.resize(BW);
3723 }
3724
3725 /// The Value that this is a bitreverse/bswap of.
3726 Value *Provider;
3727
3728 /// The "provenance" of each bit. Provenance[A] = B means that bit A
3729 /// in Provider becomes bit B in the result of this expression.
3730 SmallVector<int8_t, 32> Provenance; // int8_t means max size is i128.
3731
3732 enum { Unset = -1 };
3733};
3734
3735} // end anonymous namespace
3736
3737/// Analyze the specified subexpression and see if it is capable of providing
3738/// pieces of a bswap or bitreverse. The subexpression provides a potential
3739/// piece of a bswap or bitreverse if it can be proved that each non-zero bit in
3740/// the output of the expression came from a corresponding bit in some other
3741/// value. This function is recursive, and the end result is a mapping of
3742/// bitnumber to bitnumber. It is the caller's responsibility to validate that
3743/// the bitnumber to bitnumber mapping is correct for a bswap or bitreverse.
3744///
3745/// For example, if the current subexpression if "(shl i32 %X, 24)" then we know
3746/// that the expression deposits the low byte of %X into the high byte of the
3747/// result and that all other bits are zero. This expression is accepted and a
3748/// BitPart is returned with Provider set to %X and Provenance[24-31] set to
3749/// [0-7].
3750///
3751/// For vector types, all analysis is performed at the per-element level. No
3752/// cross-element analysis is supported (shuffle/insertion/reduction), and all
3753/// constant masks must be splatted across all elements.
3754///
3755/// To avoid revisiting values, the BitPart results are memoized into the
3756/// provided map. To avoid unnecessary copying of BitParts, BitParts are
3757/// constructed in-place in the \c BPS map. Because of this \c BPS needs to
3758/// store BitParts objects, not pointers. As we need the concept of a nullptr
3759/// BitParts (Value has been analyzed and the analysis failed), we an Optional
3760/// type instead to provide the same functionality.
3761///
3762/// Because we pass around references into \c BPS, we must use a container that
3763/// does not invalidate internal references (std::map instead of DenseMap).
3764static const std::optional<BitPart> &
3765collectBitParts(Value *V, bool MatchBSwaps, bool MatchBitReversals,
3766 std::map<Value *, std::optional<BitPart>> &BPS, int Depth,
3767 bool &FoundRoot) {
3768 auto I = BPS.find(V);
3769 if (I != BPS.end())
3770 return I->second;
3771
3772 auto &Result = BPS[V] = std::nullopt;
3773 auto BitWidth = V->getType()->getScalarSizeInBits();
3774
3775 // Can't do integer/elements > 128 bits.
3776 if (BitWidth > 128)
3777 return Result;
3778
3779 // Prevent stack overflow by limiting the recursion depth
3781 LLVM_DEBUG(dbgs() << "collectBitParts max recursion depth reached.\n");
3782 return Result;
3783 }
3784
3785 if (auto *I = dyn_cast<Instruction>(V)) {
3786 Value *X, *Y;
3787 const APInt *C;
3788
3789 // If this is an or instruction, it may be an inner node of the bswap.
3790 if (match(V, m_Or(m_Value(X), m_Value(Y)))) {
3791 // Check we have both sources and they are from the same provider.
3792 const auto &A = collectBitParts(X, MatchBSwaps, MatchBitReversals, BPS,
3793 Depth + 1, FoundRoot);
3794 if (!A || !A->Provider)
3795 return Result;
3796
3797 const auto &B = collectBitParts(Y, MatchBSwaps, MatchBitReversals, BPS,
3798 Depth + 1, FoundRoot);
3799 if (!B || A->Provider != B->Provider)
3800 return Result;
3801
3802 // Try and merge the two together.
3803 Result = BitPart(A->Provider, BitWidth);
3804 for (unsigned BitIdx = 0; BitIdx < BitWidth; ++BitIdx) {
3805 if (A->Provenance[BitIdx] != BitPart::Unset &&
3806 B->Provenance[BitIdx] != BitPart::Unset &&
3807 A->Provenance[BitIdx] != B->Provenance[BitIdx])
3808 return Result = std::nullopt;
3809
3810 if (A->Provenance[BitIdx] == BitPart::Unset)
3811 Result->Provenance[BitIdx] = B->Provenance[BitIdx];
3812 else
3813 Result->Provenance[BitIdx] = A->Provenance[BitIdx];
3814 }
3815
3816 return Result;
3817 }
3818
3819 // If this is a logical shift by a constant, recurse then shift the result.
3820 if (match(V, m_LogicalShift(m_Value(X), m_APInt(C)))) {
3821 const APInt &BitShift = *C;
3822
3823 // Ensure the shift amount is defined.
3824 if (BitShift.uge(BitWidth))
3825 return Result;
3826
3827 // For bswap-only, limit shift amounts to whole bytes, for an early exit.
3828 if (!MatchBitReversals && (BitShift.getZExtValue() % 8) != 0)
3829 return Result;
3830
3831 const auto &Res = collectBitParts(X, MatchBSwaps, MatchBitReversals, BPS,
3832 Depth + 1, FoundRoot);
3833 if (!Res)
3834 return Result;
3835 Result = Res;
3836
3837 // Perform the "shift" on BitProvenance.
3838 auto &P = Result->Provenance;
3839 if (I->getOpcode() == Instruction::Shl) {
3840 P.erase(std::prev(P.end(), BitShift.getZExtValue()), P.end());
3841 P.insert(P.begin(), BitShift.getZExtValue(), BitPart::Unset);
3842 } else {
3843 P.erase(P.begin(), std::next(P.begin(), BitShift.getZExtValue()));
3844 P.insert(P.end(), BitShift.getZExtValue(), BitPart::Unset);
3845 }
3846
3847 return Result;
3848 }
3849
3850 // If this is a logical 'and' with a mask that clears bits, recurse then
3851 // unset the appropriate bits.
3852 if (match(V, m_And(m_Value(X), m_APInt(C)))) {
3853 const APInt &AndMask = *C;
3854
3855 // Check that the mask allows a multiple of 8 bits for a bswap, for an
3856 // early exit.
3857 unsigned NumMaskedBits = AndMask.popcount();
3858 if (!MatchBitReversals && (NumMaskedBits % 8) != 0)
3859 return Result;
3860
3861 const auto &Res = collectBitParts(X, MatchBSwaps, MatchBitReversals, BPS,
3862 Depth + 1, FoundRoot);
3863 if (!Res)
3864 return Result;
3865 Result = Res;
3866
3867 for (unsigned BitIdx = 0; BitIdx < BitWidth; ++BitIdx)
3868 // If the AndMask is zero for this bit, clear the bit.
3869 if (AndMask[BitIdx] == 0)
3870 Result->Provenance[BitIdx] = BitPart::Unset;
3871 return Result;
3872 }
3873
3874 // If this is a zext instruction zero extend the result.
3875 if (match(V, m_ZExt(m_Value(X)))) {
3876 const auto &Res = collectBitParts(X, MatchBSwaps, MatchBitReversals, BPS,
3877 Depth + 1, FoundRoot);
3878 if (!Res)
3879 return Result;
3880
3881 Result = BitPart(Res->Provider, BitWidth);
3882 auto NarrowBitWidth = X->getType()->getScalarSizeInBits();
3883 for (unsigned BitIdx = 0; BitIdx < NarrowBitWidth; ++BitIdx)
3884 Result->Provenance[BitIdx] = Res->Provenance[BitIdx];
3885 for (unsigned BitIdx = NarrowBitWidth; BitIdx < BitWidth; ++BitIdx)
3886 Result->Provenance[BitIdx] = BitPart::Unset;
3887 return Result;
3888 }
3889
3890 // If this is a truncate instruction, extract the lower bits.
3891 if (match(V, m_Trunc(m_Value(X)))) {
3892 const auto &Res = collectBitParts(X, MatchBSwaps, MatchBitReversals, BPS,
3893 Depth + 1, FoundRoot);
3894 if (!Res)
3895 return Result;
3896
3897 Result = BitPart(Res->Provider, BitWidth);
3898 for (unsigned BitIdx = 0; BitIdx < BitWidth; ++BitIdx)
3899 Result->Provenance[BitIdx] = Res->Provenance[BitIdx];
3900 return Result;
3901 }
3902
3903 // BITREVERSE - most likely due to us previous matching a partial
3904 // bitreverse.
3905 if (match(V, m_BitReverse(m_Value(X)))) {
3906 const auto &Res = collectBitParts(X, MatchBSwaps, MatchBitReversals, BPS,
3907 Depth + 1, FoundRoot);
3908 if (!Res)
3909 return Result;
3910
3911 Result = BitPart(Res->Provider, BitWidth);
3912 for (unsigned BitIdx = 0; BitIdx < BitWidth; ++BitIdx)
3913 Result->Provenance[(BitWidth - 1) - BitIdx] = Res->Provenance[BitIdx];
3914 return Result;
3915 }
3916
3917 // BSWAP - most likely due to us previous matching a partial bswap.
3918 if (match(V, m_BSwap(m_Value(X)))) {
3919 const auto &Res = collectBitParts(X, MatchBSwaps, MatchBitReversals, BPS,
3920 Depth + 1, FoundRoot);
3921 if (!Res)
3922 return Result;
3923
3924 unsigned ByteWidth = BitWidth / 8;
3925 Result = BitPart(Res->Provider, BitWidth);
3926 for (unsigned ByteIdx = 0; ByteIdx < ByteWidth; ++ByteIdx) {
3927 unsigned ByteBitOfs = ByteIdx * 8;
3928 for (unsigned BitIdx = 0; BitIdx < 8; ++BitIdx)
3929 Result->Provenance[(BitWidth - 8 - ByteBitOfs) + BitIdx] =
3930 Res->Provenance[ByteBitOfs + BitIdx];
3931 }
3932 return Result;
3933 }
3934
3935 // Funnel 'double' shifts take 3 operands, 2 inputs and the shift
3936 // amount (modulo).
3937 // fshl(X,Y,Z): (X << (Z % BW)) | (Y >> (BW - (Z % BW)))
3938 // fshr(X,Y,Z): (X << (BW - (Z % BW))) | (Y >> (Z % BW))
3939 if (match(V, m_FShl(m_Value(X), m_Value(Y), m_APInt(C))) ||
3940 match(V, m_FShr(m_Value(X), m_Value(Y), m_APInt(C)))) {
3941 // We can treat fshr as a fshl by flipping the modulo amount.
3942 unsigned ModAmt = C->urem(BitWidth);
3943 if (cast<IntrinsicInst>(I)->getIntrinsicID() == Intrinsic::fshr)
3944 ModAmt = BitWidth - ModAmt;
3945
3946 // For bswap-only, limit shift amounts to whole bytes, for an early exit.
3947 if (!MatchBitReversals && (ModAmt % 8) != 0)
3948 return Result;
3949
3950 // Check we have both sources and they are from the same provider.
3951 const auto &LHS = collectBitParts(X, MatchBSwaps, MatchBitReversals, BPS,
3952 Depth + 1, FoundRoot);
3953 if (!LHS || !LHS->Provider)
3954 return Result;
3955
3956 const auto &RHS = collectBitParts(Y, MatchBSwaps, MatchBitReversals, BPS,
3957 Depth + 1, FoundRoot);
3958 if (!RHS || LHS->Provider != RHS->Provider)
3959 return Result;
3960
3961 unsigned StartBitRHS = BitWidth - ModAmt;
3962 Result = BitPart(LHS->Provider, BitWidth);
3963 for (unsigned BitIdx = 0; BitIdx < StartBitRHS; ++BitIdx)
3964 Result->Provenance[BitIdx + ModAmt] = LHS->Provenance[BitIdx];
3965 for (unsigned BitIdx = 0; BitIdx < ModAmt; ++BitIdx)
3966 Result->Provenance[BitIdx] = RHS->Provenance[BitIdx + StartBitRHS];
3967 return Result;
3968 }
3969 }
3970
3971 // If we've already found a root input value then we're never going to merge
3972 // these back together.
3973 if (FoundRoot)
3974 return Result;
3975
3976 // Okay, we got to something that isn't a shift, 'or', 'and', etc. This must
3977 // be the root input value to the bswap/bitreverse.
3978 FoundRoot = true;
3979 Result = BitPart(V, BitWidth);
3980 for (unsigned BitIdx = 0; BitIdx < BitWidth; ++BitIdx)
3981 Result->Provenance[BitIdx] = BitIdx;
3982 return Result;
3983}
3984
3985static bool bitTransformIsCorrectForBSwap(unsigned From, unsigned To,
3986 unsigned BitWidth) {
3987 if (From % 8 != To % 8)
3988 return false;
3989 // Convert from bit indices to byte indices and check for a byte reversal.
3990 From >>= 3;
3991 To >>= 3;
3992 BitWidth >>= 3;
3993 return From == BitWidth - To - 1;
3994}
3995
3996static bool bitTransformIsCorrectForBitReverse(unsigned From, unsigned To,
3997 unsigned BitWidth) {
3998 return From == BitWidth - To - 1;
3999}
4000
4002 Instruction *I, bool MatchBSwaps, bool MatchBitReversals,
4003 SmallVectorImpl<Instruction *> &InsertedInsts) {
4004 if (!match(I, m_Or(m_Value(), m_Value())) &&
4005 !match(I, m_FShl(m_Value(), m_Value(), m_Value())) &&
4006 !match(I, m_FShr(m_Value(), m_Value(), m_Value())) &&
4007 !match(I, m_BSwap(m_Value())))
4008 return false;
4009 if (!MatchBSwaps && !MatchBitReversals)
4010 return false;
4011 Type *ITy = I->getType();
4012 if (!ITy->isIntOrIntVectorTy() || ITy->getScalarSizeInBits() > 128)
4013 return false; // Can't do integer/elements > 128 bits.
4014
4015 // Try to find all the pieces corresponding to the bswap.
4016 bool FoundRoot = false;
4017 std::map<Value *, std::optional<BitPart>> BPS;
4018 const auto &Res =
4019 collectBitParts(I, MatchBSwaps, MatchBitReversals, BPS, 0, FoundRoot);
4020 if (!Res)
4021 return false;
4022 ArrayRef<int8_t> BitProvenance = Res->Provenance;
4023 assert(all_of(BitProvenance,
4024 [](int8_t I) { return I == BitPart::Unset || 0 <= I; }) &&
4025 "Illegal bit provenance index");
4026
4027 // If the upper bits are zero, then attempt to perform as a truncated op.
4028 Type *DemandedTy = ITy;
4029 if (BitProvenance.back() == BitPart::Unset) {
4030 while (!BitProvenance.empty() && BitProvenance.back() == BitPart::Unset)
4031 BitProvenance = BitProvenance.drop_back();
4032 if (BitProvenance.empty())
4033 return false; // TODO - handle null value?
4034 DemandedTy = Type::getIntNTy(I->getContext(), BitProvenance.size());
4035 if (auto *IVecTy = dyn_cast<VectorType>(ITy))
4036 DemandedTy = VectorType::get(DemandedTy, IVecTy);
4037 }
4038
4039 // Check BitProvenance hasn't found a source larger than the result type.
4040 unsigned DemandedBW = DemandedTy->getScalarSizeInBits();
4041 if (DemandedBW > ITy->getScalarSizeInBits())
4042 return false;
4043
4044 // Now, is the bit permutation correct for a bswap or a bitreverse? We can
4045 // only byteswap values with an even number of bytes.
4046 APInt DemandedMask = APInt::getAllOnes(DemandedBW);
4047 bool OKForBSwap = MatchBSwaps && (DemandedBW % 16) == 0;
4048 bool OKForBitReverse = MatchBitReversals;
4049 for (unsigned BitIdx = 0;
4050 (BitIdx < DemandedBW) && (OKForBSwap || OKForBitReverse); ++BitIdx) {
4051 if (BitProvenance[BitIdx] == BitPart::Unset) {
4052 DemandedMask.clearBit(BitIdx);
4053 continue;
4054 }
4055 OKForBSwap &= bitTransformIsCorrectForBSwap(BitProvenance[BitIdx], BitIdx,
4056 DemandedBW);
4057 OKForBitReverse &= bitTransformIsCorrectForBitReverse(BitProvenance[BitIdx],
4058 BitIdx, DemandedBW);
4059 }
4060
4061 Intrinsic::ID Intrin;
4062 if (OKForBSwap)
4063 Intrin = Intrinsic::bswap;
4064 else if (OKForBitReverse)
4065 Intrin = Intrinsic::bitreverse;
4066 else
4067 return false;
4068
4069 Function *F = Intrinsic::getDeclaration(I->getModule(), Intrin, DemandedTy);
4070 Value *Provider = Res->Provider;
4071
4072 // We may need to truncate the provider.
4073 if (DemandedTy != Provider->getType()) {
4074 auto *Trunc =
4075 CastInst::CreateIntegerCast(Provider, DemandedTy, false, "trunc", I->getIterator());
4076 InsertedInsts.push_back(Trunc);
4077 Provider = Trunc;
4078 }
4079
4080 Instruction *Result = CallInst::Create(F, Provider, "rev", I->getIterator());
4081 InsertedInsts.push_back(Result);
4082
4083 if (!DemandedMask.isAllOnes()) {
4084 auto *Mask = ConstantInt::get(DemandedTy, DemandedMask);
4085 Result = BinaryOperator::Create(Instruction::And, Result, Mask, "mask", I->getIterator());
4086 InsertedInsts.push_back(Result);
4087 }
4088
4089 // We may need to zeroextend back to the result type.
4090 if (ITy != Result->getType()) {
4091 auto *ExtInst = CastInst::CreateIntegerCast(Result, ITy, false, "zext", I->getIterator());
4092 InsertedInsts.push_back(ExtInst);
4093 }
4094
4095 return true;
4096}
4097
4098// CodeGen has special handling for some string functions that may replace
4099// them with target-specific intrinsics. Since that'd skip our interceptors
4100// in ASan/MSan/TSan/DFSan, and thus make us miss some memory accesses,
4101// we mark affected calls as NoBuiltin, which will disable optimization
4102// in CodeGen.
4104 CallInst *CI, const TargetLibraryInfo *TLI) {
4105 Function *F = CI->getCalledFunction();
4106 LibFunc Func;
4107 if (F && !F->hasLocalLinkage() && F->hasName() &&
4108 TLI->getLibFunc(F->getName(), Func) && TLI->hasOptimizedCodeGen(Func) &&
4109 !F->doesNotAccessMemory())
4110 CI->addFnAttr(Attribute::NoBuiltin);
4111}
4112
4114 // We can't have a PHI with a metadata type.
4115 if (I->getOperand(OpIdx)->getType()->isMetadataTy())
4116 return false;
4117
4118 // Early exit.
4119 if (!isa<Constant>(I->getOperand(OpIdx)))
4120 return true;
4121
4122 switch (I->getOpcode()) {
4123 default:
4124 return true;
4125 case Instruction::Call:
4126 case Instruction::Invoke: {
4127 const auto &CB = cast<CallBase>(*I);
4128
4129 // Can't handle inline asm. Skip it.
4130 if (CB.isInlineAsm())
4131 return false;
4132
4133 // Constant bundle operands may need to retain their constant-ness for
4134 // correctness.
4135 if (CB.isBundleOperand(OpIdx))
4136 return false;
4137
4138 if (OpIdx < CB.arg_size()) {
4139 // Some variadic intrinsics require constants in the variadic arguments,
4140 // which currently aren't markable as immarg.
4141 if (isa<IntrinsicInst>(CB) &&
4142 OpIdx >= CB.getFunctionType()->getNumParams()) {
4143 // This is known to be OK for stackmap.
4144 return CB.getIntrinsicID() == Intrinsic::experimental_stackmap;
4145 }
4146
4147 // gcroot is a special case, since it requires a constant argument which
4148 // isn't also required to be a simple ConstantInt.
4149 if (CB.getIntrinsicID() == Intrinsic::gcroot)
4150 return false;
4151
4152 // Some intrinsic operands are required to be immediates.
4153 return !CB.paramHasAttr(OpIdx, Attribute::ImmArg);
4154 }
4155
4156 // It is never allowed to replace the call argument to an intrinsic, but it
4157 // may be possible for a call.
4158 return !isa<IntrinsicInst>(CB);
4159 }
4160 case Instruction::ShuffleVector:
4161 // Shufflevector masks are constant.
4162 return OpIdx != 2;
4163 case Instruction::Switch:
4164 case Instruction::ExtractValue:
4165 // All operands apart from the first are constant.
4166 return OpIdx == 0;
4167 case Instruction::InsertValue:
4168 // All operands apart from the first and the second are constant.
4169 return OpIdx < 2;
4170 case Instruction::Alloca:
4171 // Static allocas (constant size in the entry block) are handled by
4172 // prologue/epilogue insertion so they're free anyway. We definitely don't
4173 // want to make them non-constant.
4174 return !cast<AllocaInst>(I)->isStaticAlloca();
4175 case Instruction::GetElementPtr:
4176 if (OpIdx == 0)
4177 return true;
4179 for (auto E = std::next(It, OpIdx); It != E; ++It)
4180 if (It.isStruct())
4181 return false;
4182 return true;
4183 }
4184}
4185
4187 // First: Check if it's a constant
4188 if (Constant *C = dyn_cast<Constant>(Condition))
4189 return ConstantExpr::getNot(C);
4190
4191 // Second: If the condition is already inverted, return the original value
4192 Value *NotCondition;
4193 if (match(Condition, m_Not(m_Value(NotCondition))))
4194 return NotCondition;
4195
4196 BasicBlock *Parent = nullptr;
4197 Instruction *Inst = dyn_cast<Instruction>(Condition);
4198 if (Inst)
4199 Parent = Inst->getParent();
4200 else if (Argument *Arg = dyn_cast<Argument>(Condition))
4201 Parent = &Arg->getParent()->getEntryBlock();
4202 assert(Parent && "Unsupported condition to invert");
4203
4204 // Third: Check all the users for an invert
4205 for (User *U : Condition->users())
4206 if (Instruction *I = dyn_cast<Instruction>(U))
4207 if (I->getParent() == Parent && match(I, m_Not(m_Specific(Condition))))
4208 return I;
4209
4210 // Last option: Create a new instruction
4211 auto *Inverted =
4212 BinaryOperator::CreateNot(Condition, Condition->getName() + ".inv");
4213 if (Inst && !isa<PHINode>(Inst))
4214 Inverted->insertAfter(Inst);
4215 else
4216 Inverted->insertBefore(&*Parent->getFirstInsertionPt());
4217 return Inverted;
4218}
4219
4221 // Note: We explicitly check for attributes rather than using cover functions
4222 // because some of the cover functions include the logic being implemented.
4223
4224 bool Changed = false;
4225 // readnone + not convergent implies nosync
4226 if (!F.hasFnAttribute(Attribute::NoSync) &&
4227 F.doesNotAccessMemory() && !F.isConvergent()) {
4228 F.setNoSync();
4229 Changed = true;
4230 }
4231
4232 // readonly implies nofree
4233 if (!F.hasFnAttribute(Attribute::NoFree) && F.onlyReadsMemory()) {
4234 F.setDoesNotFreeMemory();
4235 Changed = true;
4236 }
4237
4238 // willreturn implies mustprogress
4239 if (!F.hasFnAttribute(Attribute::MustProgress) && F.willReturn()) {
4240 F.setMustProgress();
4241 Changed = true;
4242 }
4243
4244 // TODO: There are a bunch of cases of restrictive memory effects we
4245 // can infer by inspecting arguments of argmemonly-ish functions.
4246
4247 return Changed;
4248}
for(const MachineOperand &MO :llvm::drop_begin(OldMI.operands(), Desc.getNumOperands()))
static unsigned getIntrinsicID(const SDNode *N)
Rewrite undef for PHI
This file implements a class to represent arbitrary precision integral constant values and operations...
ReachingDefAnalysis InstSet & ToRemove
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
static bool isEqual(const Function &Caller, const Function &Callee)
This file contains the simple types necessary to represent the attributes associated with functions a...
static const Function * getParent(const Value *V)
BlockVerifier::State From
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")
This file contains the declarations for the subclasses of Constant, which represent the different fla...
static bool isSentinel(const DWARFDebugNames::AttributeEncoding &AE)
Returns the sub type a function will return at a given Idx Should correspond to the result type of an ExtractValue instruction executed with just that one unsigned Idx
#define LLVM_DEBUG(X)
Definition: Debug.h:101
This file defines DenseMapInfo traits for DenseMap.
This file defines the DenseMap class.
This file defines the DenseSet and SmallDenseSet classes.
This file contains constants used for implementing Dwarf debug support.
bool End
Definition: ELF_riscv.cpp:480
static unsigned getHashValueImpl(SimpleValue Val)
Definition: EarlyCSE.cpp:235
static bool isEqualImpl(SimpleValue LHS, SimpleValue RHS)
Definition: EarlyCSE.cpp:357
static GCMetadataPrinterRegistry::Add< ErlangGCPrinter > X("erlang", "erlang-compatible garbage collector")
Rewrite Partial Register Uses
Hexagon Common GEP
This file provides various utilities for inspecting and working with the control flow graph in LLVM I...
This defines the Use class.
iv Induction Variable Users
Definition: IVUsers.cpp:48
#define F(x, y, z)
Definition: MD5.cpp:55
#define I(x, y, z)
Definition: MD5.cpp:58
This file provides utility for Memory Model Relaxation Annotations (MMRAs).
This file contains the declarations for metadata subclasses.
Module.h This file contains the declarations for the Module class.
uint64_t IntrinsicInst * II
static GCMetadataPrinterRegistry::Add< OcamlGCMetadataPrinter > Y("ocaml", "ocaml 3.10-compatible collector")
#define P(N)
This file contains the declarations for profiling metadata utility functions.
const SmallVectorImpl< MachineOperand > & Cond
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
This file contains some templates that are useful if you are working with the STL at all.
This file implements a set that has insertion order iteration characteristics.
This file defines the SmallPtrSet class.
This file defines the SmallVector class.
This file defines the 'Statistic' class, which is designed to be an easy way to expose various metric...
#define STATISTIC(VARNAME, DESC)
Definition: Statistic.h:167
static bool markAliveBlocks(Function &F, SmallPtrSetImpl< BasicBlock * > &Reachable, DomTreeUpdater *DTU=nullptr)
Definition: Local.cpp:2966
static bool valueCoversEntireFragment(Type *ValTy, DbgVariableIntrinsic *DII)
Check if the alloc size of ValTy is large enough to cover the variable (or fragment of the variable) ...
Definition: Local.cpp:1601
static bool isBitCastSemanticsPreserving(const DataLayout &DL, Type *FromTy, Type *ToTy)
Check if a bitcast between a value of type FromTy to type ToTy would losslessly preserve the bits and...
Definition: Local.cpp:2697
static bool isStructure(AllocaInst *AI)
Determine whether this alloca is a structure.
Definition: Local.cpp:1879
uint64_t getDwarfOpForBinOp(Instruction::BinaryOps Opcode)
Definition: Local.cpp:2415
static bool PhiHasDebugValue(DILocalVariable *DIVar, DIExpression *DIExpr, PHINode *APN)
===------------------------------------------------------------------—===// Dbg Intrinsic utilities
Definition: Local.cpp:1571
static void gatherIncomingValuesToPhi(PHINode *PN, IncomingValueMap &IncomingValues)
Create a map from block to value for the operands of a given phi.
Definition: Local.cpp:961
static void handleSSAValueOperands(uint64_t CurrentLocOps, SmallVectorImpl< uint64_t > &Opcodes, SmallVectorImpl< Value * > &AdditionalValues, Instruction *I)
Definition: Local.cpp:2445
std::optional< DIExpression * > DbgValReplacement
A replacement for a dbg.value expression.
Definition: Local.cpp:2594
static void insertDbgValueOrDbgVariableRecordAfter(DIBuilder &Builder, Value *DV, DILocalVariable *DIVar, DIExpression *DIExpr, const DebugLoc &NewLoc, BasicBlock::iterator Instr)
Definition: Local.cpp:1672
Value * getSalvageOpsForBinOp(BinaryOperator *BI, uint64_t CurrentLocOps, SmallVectorImpl< uint64_t > &Opcodes, SmallVectorImpl< Value * > &AdditionalValues)
Definition: Local.cpp:2457
static void replaceUndefValuesInPhi(PHINode *PN, const IncomingValueMap &IncomingValues)
Replace the incoming undef values to a phi with the values from a block-to-value map.
Definition: Local.cpp:977
Value * getSalvageOpsForGEP(GetElementPtrInst *GEP, const DataLayout &DL, uint64_t CurrentLocOps, SmallVectorImpl< uint64_t > &Opcodes, SmallVectorImpl< Value * > &AdditionalValues)
Definition: Local.cpp:2389
Value * getSalvageOpsForIcmpOp(ICmpInst *Icmp, uint64_t CurrentLocOps, SmallVectorImpl< uint64_t > &Opcodes, SmallVectorImpl< Value * > &AdditionalValues)
Definition: Local.cpp:2516
static bool CanMergeValues(Value *First, Value *Second)
Return true if we can choose one of these values to use in place of the other.
Definition: Local.cpp:856
static bool simplifyAndDCEInstruction(Instruction *I, SmallSetVector< Instruction *, 16 > &WorkList, const DataLayout &DL, const TargetLibraryInfo *TLI)
Definition: Local.cpp:673
static bool isArray(AllocaInst *AI)
Determine whether this alloca is either a VLA or an array.
Definition: Local.cpp:1873
static bool areAllUsesEqual(Instruction *I)
areAllUsesEqual - Check whether the uses of a value are all the same.
Definition: Local.cpp:632
static cl::opt< bool > PHICSEDebugHash("phicse-debug-hash", cl::init(false), cl::Hidden, cl::desc("Perform extra assertion checking to verify that PHINodes's hash " "function is well-behaved w.r.t. its isEqual predicate"))
static unsigned replaceDominatedUsesWith(Value *From, Value *To, const RootType &Root, const ShouldReplaceFn &ShouldReplace)
Definition: Local.cpp:3452
uint64_t getDwarfOpForIcmpPred(CmpInst::Predicate Pred)
Definition: Local.cpp:2491
static bool bitTransformIsCorrectForBSwap(unsigned From, unsigned To, unsigned BitWidth)
Definition: Local.cpp:3985
static const std::optional< BitPart > & collectBitParts(Value *V, bool MatchBSwaps, bool MatchBitReversals, std::map< Value *, std::optional< BitPart > > &BPS, int Depth, bool &FoundRoot)
Analyze the specified subexpression and see if it is capable of providing pieces of a bswap or bitrev...
Definition: Local.cpp:3765
static bool EliminateDuplicatePHINodesNaiveImpl(BasicBlock *BB, SmallPtrSetImpl< PHINode * > &ToRemove)
Definition: Local.cpp:1353
static bool CanPropagatePredecessorsForPHIs(BasicBlock *BB, BasicBlock *Succ, const SmallPtrSetImpl< BasicBlock * > &BBPreds)
Return true if we can fold BB, an almost-empty BB ending in an unconditional branch to Succ,...
Definition: Local.cpp:865
static cl::opt< unsigned > PHICSENumPHISmallSize("phicse-num-phi-smallsize", cl::init(32), cl::Hidden, cl::desc("When the basic block contains not more than this number of PHI nodes, " "perform a (faster!) exhaustive search instead of set-driven one."))
static bool EliminateDuplicatePHINodesSetBasedImpl(BasicBlock *BB, SmallPtrSetImpl< PHINode * > &ToRemove)
Definition: Local.cpp:1389
static bool rewriteDebugUsers(Instruction &From, Value &To, Instruction &DomPoint, DominatorTree &DT, function_ref< DbgValReplacement(DbgVariableIntrinsic &DII)> RewriteExpr, function_ref< DbgValReplacement(DbgVariableRecord &DVR)> RewriteDVRExpr)
Point debug users of From to To using exprs given by RewriteExpr, possibly moving/undefing users to p...
Definition: Local.cpp:2599
static void insertDbgValueOrDbgVariableRecord(DIBuilder &Builder, Value *DV, DILocalVariable *DIVar, DIExpression *DIExpr, const DebugLoc &NewLoc, BasicBlock::iterator Instr)
Definition: Local.cpp:1653
static Value * selectIncomingValueForBlock(Value *OldVal, BasicBlock *BB, IncomingValueMap &IncomingValues)
Determines the value to use as the phi node input for a block.
Definition: Local.cpp:936
static void updateOneDbgValueForAlloca(const DebugLoc &Loc, DILocalVariable *DIVar, DIExpression *DIExpr, Value *NewAddress, DbgValueInst *DVI, DbgVariableRecord *DVR, DIBuilder &Builder, int Offset)
Definition: Local.cpp:2152
static void insertDbgVariableRecordsForPHIs(BasicBlock *BB, SmallVectorImpl< PHINode * > &InsertedPHIs)
Definition: Local.cpp:2004
static bool CanRedirectPredsOfEmptyBBToSucc(BasicBlock *BB, BasicBlock *Succ, const SmallPtrSetImpl< BasicBlock * > &BBPreds, const SmallPtrSetImpl< BasicBlock * > &SuccPreds, BasicBlock *&CommonPred)
Definition: Local.cpp:1018
static const unsigned BitPartRecursionMaxDepth
Definition: Local.cpp:117
static void redirectValuesFromPredecessorsToPhi(BasicBlock *BB, const PredBlockVector &BBPreds, PHINode *PN, BasicBlock *CommonPred)
Replace a value flowing from a block to a phi with potentially multiple instances of that value flowi...
Definition: Local.cpp:1051
cl::opt< bool > UseNewDbgInfoFormat
static bool bitTransformIsCorrectForBitReverse(unsigned From, unsigned To, unsigned BitWidth)
Definition: Local.cpp:3996
static void salvageDbgAssignAddress(T *Assign)
Definition: Local.cpp:2209
Value * RHS
Value * LHS
APInt bitcastToAPInt() const
Definition: APFloat.h:1260
Class for arbitrary precision integers.
Definition: APInt.h:78
static APInt getAllOnes(unsigned numBits)
Return an APInt of a specified width with all bits set.
Definition: APInt.h:214
void clearBit(unsigned BitPosition)
Set a given bit to 0.
Definition: APInt.h:1387
uint64_t getZExtValue() const
Get zero extended value.
Definition: APInt.h:1500
unsigned popcount() const
Count the number of bits set.
Definition: APInt.h:1629
bool isAllOnes() const
Determine if all bits are set. This is true for zero-width values.
Definition: APInt.h:351
const uint64_t * getRawData() const
This function returns a pointer to the internal storage of the APInt.
Definition: APInt.h:549
std::optional< int64_t > trySExtValue() const
Get sign extended value if possible.
Definition: APInt.h:1534
int64_t getSExtValue() const
Get sign extended value.
Definition: APInt.h:1522
bool uge(const APInt &RHS) const
Unsigned greater or equal comparison.
Definition: APInt.h:1201
an instruction to allocate memory on the stack
Definition: Instructions.h:61
Type * getAllocatedType() const
Return the type that is being allocated by the instruction.
Definition: Instructions.h:115
bool isArrayAllocation() const
Return true if there is an allocation size parameter to the allocation instruction that is not 1.
This class represents an incoming formal argument to a Function.
Definition: Argument.h:31
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
Definition: ArrayRef.h:41
const T & back() const
back - Get the last element.
Definition: ArrayRef.h:174
size_t size() const
size - Get the array size.
Definition: ArrayRef.h:165
ArrayRef< T > drop_back(size_t N=1) const
Drop the last N elements of the array.
Definition: ArrayRef.h:210
bool empty() const
empty - Check if the array is empty.
Definition: ArrayRef.h:160
Value handle that asserts if the Value is deleted.
Definition: ValueHandle.h:264
A cache of @llvm.assume calls within a function.
LLVM Basic Block Representation.
Definition: BasicBlock.h:61
iterator end()
Definition: BasicBlock.h:451
iterator begin()
Instruction iterator methods.
Definition: BasicBlock.h:438
iterator_range< const_phi_iterator > phis() const
Returns a range that iterates over the phis in the basic block.
Definition: BasicBlock.h:507
const_iterator getFirstInsertionPt() const
Returns an iterator to the first instruction in this block that is suitable for inserting a non-PHI i...
Definition: BasicBlock.cpp:414
bool hasAddressTaken() const
Returns true if there are any uses of this basic block other than direct branches,...
Definition: BasicBlock.h:648
InstListType::const_iterator getFirstNonPHIIt() const
Iterator returning form of getFirstNonPHI.
Definition: BasicBlock.cpp:372
void insertDbgRecordBefore(DbgRecord *DR, InstListType::iterator Here)
Insert a DbgRecord into a block at the position given by Here.
const Instruction * getFirstNonPHI() const
Returns a pointer to the first instruction in this block that is not a PHINode instruction.
Definition: BasicBlock.cpp:365
const Instruction & front() const
Definition: BasicBlock.h:461
static BasicBlock * Create(LLVMContext &Context, const Twine &Name="", Function *Parent=nullptr, BasicBlock *InsertBefore=nullptr)
Creates a new BasicBlock.
Definition: BasicBlock.h:202
bool isEntryBlock() const
Return true if this is the entry block of the containing function.
Definition: BasicBlock.cpp:569
void moveAfter(BasicBlock *MovePos)
Unlink this basic block from its current function and insert it right after MovePos in the function M...
Definition: BasicBlock.cpp:285
const BasicBlock * getSinglePredecessor() const
Return the predecessor of this block if it has a single predecessor block.
Definition: BasicBlock.cpp:457
void flushTerminatorDbgRecords()
Eject any debug-info trailing at the end of a block.
Definition: BasicBlock.cpp:717
const Function * getParent() const
Return the enclosing method, or null if none.
Definition: BasicBlock.h:209
const DataLayout & getDataLayout() const
Get the data layout of the module this basic block belongs to.
Definition: BasicBlock.cpp:294
SymbolTableList< BasicBlock >::iterator eraseFromParent()
Unlink 'this' from the containing function and delete it.
Definition: BasicBlock.cpp:277
const Instruction * getFirstNonPHIOrDbg(bool SkipPseudoOp=true) const
Returns a pointer to the first instruction in this block that is not a PHINode or a debug intrinsic,...
Definition: BasicBlock.cpp:384
InstListType::iterator iterator
Instruction iterators...
Definition: BasicBlock.h:167
LLVMContext & getContext() const
Get the context in which this basic block lives.
Definition: BasicBlock.cpp:168
size_t size() const
Definition: BasicBlock.h:459
const Instruction * getTerminator() const LLVM_READONLY
Returns the terminator instruction if the block is well formed or null if the block is not well forme...
Definition: BasicBlock.h:229
bool hasNPredecessorsOrMore(unsigned N) const
Return true if this block has N predecessors or more.
Definition: BasicBlock.cpp:483
void splice(BasicBlock::iterator ToIt, BasicBlock *FromBB)
Transfer all instructions from FromBB to this basic block at ToIt.
Definition: BasicBlock.h:621
const Instruction & back() const
Definition: BasicBlock.h:463
void removePredecessor(BasicBlock *Pred, bool KeepOneInputPHIs=false)
Update PHI nodes in this BasicBlock before removal of predecessor Pred.
Definition: BasicBlock.cpp:514
BinaryOps getOpcode() const
Definition: InstrTypes.h:442
static BinaryOperator * CreateNot(Value *Op, const Twine &Name="", InsertPosition InsertBefore=nullptr)
static BinaryOperator * Create(BinaryOps Op, Value *S1, Value *S2, const Twine &Name=Twine(), InsertPosition InsertBefore=nullptr)
Construct a binary instruction, given the opcode and the two operands.
This class represents a no-op cast from one type to another.
The address of a basic block.
Definition: Constants.h:890
static BlockAddress * get(Function *F, BasicBlock *BB)
Return a BlockAddress for the specified function and basic block.
Definition: Constants.cpp:1871
Conditional or Unconditional Branch instruction.
static BranchInst * Create(BasicBlock *IfTrue, InsertPosition InsertBefore=nullptr)
Base class for all callable instructions (InvokeInst and CallInst) Holds everything related to callin...
Definition: InstrTypes.h:1236
void setCallingConv(CallingConv::ID CC)
Definition: InstrTypes.h:1527
void addFnAttr(Attribute::AttrKind Kind)
Adds the attribute to the function.
Definition: InstrTypes.h:1574
void getOperandBundlesAsDefs(SmallVectorImpl< OperandBundleDef > &Defs) const
Return the list of operand bundles attached to this instruction as a vector of OperandBundleDefs.
Function * getCalledFunction() const
Returns the function called, or null if this is an indirect function invocation or the function signa...
Definition: InstrTypes.h:1465
CallingConv::ID getCallingConv() const
Definition: InstrTypes.h:1523
Value * getCalledOperand() const
Definition: InstrTypes.h:1458
void setAttributes(AttributeList A)
Set the parameter attributes for this call.
Definition: InstrTypes.h:1546
FunctionType * getFunctionType() const
Definition: InstrTypes.h:1323
iterator_range< User::op_iterator > args()
Iteration adapter for range-for loops.
Definition: InstrTypes.h:1401
AttributeList getAttributes() const
Return the parameter attributes for this call.
Definition: InstrTypes.h:1542
This class represents a function call, abstracting a target machine's calling convention.
static CallInst * Create(FunctionType *Ty, Value *F, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
static CastInst * CreateIntegerCast(Value *S, Type *Ty, bool isSigned, const Twine &Name="", InsertPosition InsertBefore=nullptr)
Create a ZExt, BitCast, or Trunc for int -> int casts.
static CatchSwitchInst * Create(Value *ParentPad, BasicBlock *UnwindDest, unsigned NumHandlers, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
static CleanupReturnInst * Create(Value *CleanupPad, BasicBlock *UnwindBB=nullptr, InsertPosition InsertBefore=nullptr)
Predicate
This enumeration lists the possible predicates for CmpInst subclasses.
Definition: InstrTypes.h:757
@ ICMP_SLT
signed less than
Definition: InstrTypes.h:786
@ ICMP_SLE
signed less or equal
Definition: InstrTypes.h:787
@ ICMP_UGE
unsigned greater or equal
Definition: InstrTypes.h:781
@ ICMP_UGT
unsigned greater than
Definition: InstrTypes.h:780
@ ICMP_SGT
signed greater than
Definition: InstrTypes.h:784
@ ICMP_ULT
unsigned less than
Definition: InstrTypes.h:782
@ ICMP_EQ
equal
Definition: InstrTypes.h:778
@ ICMP_NE
not equal
Definition: InstrTypes.h:779
@ ICMP_SGE
signed greater or equal
Definition: InstrTypes.h:785
@ ICMP_ULE
unsigned less or equal
Definition: InstrTypes.h:783
bool isSigned() const
Definition: InstrTypes.h:1007
Predicate getPredicate() const
Return the predicate for this instruction.
Definition: InstrTypes.h:847
A constant value that is initialized with an expression using other constant values.
Definition: Constants.h:1084
static Constant * getIntToPtr(Constant *C, Type *Ty, bool OnlyIfReduced=false)
Definition: Constants.cpp:2269
static Constant * getNot(Constant *C)
Definition: Constants.cpp:2593
static Constant * getPtrToInt(Constant *C, Type *Ty, bool OnlyIfReduced=false)
Definition: Constants.cpp:2255
static Constant * getAdd(Constant *C1, Constant *C2, bool HasNUW=false, bool HasNSW=false)
Definition: Constants.cpp:2599
This is the shared class of boolean and integer constants.
Definition: Constants.h:81
static ConstantPointerNull * get(PointerType *T)
Static factory methods - Return objects of the specified value.
Definition: Constants.cpp:1800
bool contains(const APInt &Val) const
Return true if the specified value is in the set.
This is an important base class in LLVM.
Definition: Constant.h:42
void destroyConstant()
Called if some element of this constant is no longer valid.
Definition: Constants.cpp:472
DIExpression * createConstantValueExpression(uint64_t Val)
Create an expression for a variable that does not have an address, but does have a constant value.
Definition: DIBuilder.h:779
DWARF expression.
static DIExpression * append(const DIExpression *Expr, ArrayRef< uint64_t > Ops)
Append the opcodes Ops to DIExpr.
unsigned getNumElements() const
static ExtOps getExtOps(unsigned FromSize, unsigned ToSize, bool Signed)
Returns the ops for a zero- or sign-extension in a DIExpression.
static void appendOffset(SmallVectorImpl< uint64_t > &Ops, int64_t Offset)
Append Ops with operations to apply the Offset.
static DIExpression * appendOpsToArg(const DIExpression *Expr, ArrayRef< uint64_t > Ops, unsigned ArgNo, bool StackValue=false)
Create a copy of Expr by appending the given list of Ops to each instance of the operand DW_OP_LLVM_a...
static std::optional< FragmentInfo > getFragmentInfo(expr_op_iterator Start, expr_op_iterator End)
Retrieve the details of this fragment expression.
DIExpression * foldConstantMath()
Try to shorten an expression with constant math operations that can be evaluated at compile time.
uint64_t getNumLocationOperands() const
Return the number of unique location operands referred to (via DW_OP_LLVM_arg) in this expression; th...
std::optional< uint64_t > getActiveBits(DIVariable *Var)
Return the number of bits that have an active value, i.e.
uint64_t getElement(unsigned I) const
static DIExpression * prepend(const DIExpression *Expr, uint8_t Flags, int64_t Offset=0)
Prepend DIExpr with a deref and offset operation and optionally turn it into a stack value or/and an ...
static DIExpression * appendExt(const DIExpression *Expr, unsigned FromSize, unsigned ToSize, bool Signed)
Append a zero- or sign-extension to Expr.
std::optional< DIBasicType::Signedness > getSignedness() const
Return the signedness of this variable's type, or std::nullopt if this type is neither signed nor uns...
This class represents an Operation in the Expression.
A parsed version of the target data layout string in and methods for querying it.
Definition: DataLayout.h:110
This represents the llvm.dbg.label instruction.
Instruction * MarkedInstr
Link back to the Instruction that owns this marker.
This represents the llvm.dbg.value instruction.
This is the common base class for debug info intrinsics for variables.
iterator_range< location_op_iterator > location_ops() const
Get the locations corresponding to the variable referenced by the debug info intrinsic.
void replaceVariableLocationOp(Value *OldValue, Value *NewValue, bool AllowEmpty=false)
Value * getVariableLocationOp(unsigned OpIdx) const
void setExpression(DIExpression *NewExpr)
DILocalVariable * getVariable() const
unsigned getNumVariableLocationOps() const
bool isAddressOfVariable() const
Does this describe the address of a local variable.
DIExpression * getExpression() const
Record of a variable value-assignment, aka a non instruction representation of the dbg....
bool isAddressOfVariable() const
Does this describe the address of a local variable.
DbgVariableRecord * clone() const
void setExpression(DIExpression *NewExpr)
DIExpression * getExpression() const
Value * getVariableLocationOp(unsigned OpIdx) const
DILocalVariable * getVariable() const
unsigned getNumVariableLocationOps() const
void replaceVariableLocationOp(Value *OldValue, Value *NewValue, bool AllowEmpty=false)
iterator_range< location_op_iterator > location_ops() const
Get the locations corresponding to the variable referenced by the debug info intrinsic.
A debug info location.
Definition: DebugLoc.h:33
DILocation * get() const
Get the underlying DILocation.
Definition: DebugLoc.cpp:20
iterator find(const_arg_type_t< KeyT > Val)
Definition: DenseMap.h:155
unsigned size() const
Definition: DenseMap.h:99
size_type count(const_arg_type_t< KeyT > Val) const
Return 1 if the specified key is in the map, 0 otherwise.
Definition: DenseMap.h:151
iterator end()
Definition: DenseMap.h:84
std::pair< iterator, bool > insert(const std::pair< KeyT, ValueT > &KV)
Definition: DenseMap.h:220
Implements a dense probed hash-table based set.
Definition: DenseSet.h:271
void deleteBB(BasicBlock *DelBB)
Delete DelBB.
Concrete subclass of DominatorTreeBase that is used to compute a normal dominator tree.
Definition: Dominators.h:162
bool dominates(const BasicBlock *BB, const Use &U) const
Return true if the (end of the) basic block BB dominates the use U.
Definition: Dominators.cpp:122
const BasicBlock & getEntryBlock() const
Definition: Function.h:800
void applyUpdatesPermissive(ArrayRef< typename DomTreeT::UpdateType > Updates)
Submit updates to all available trees.
void applyUpdates(ArrayRef< typename DomTreeT::UpdateType > Updates)
Submit updates to all available trees.
bool hasDomTree() const
Returns true if it holds a DomTreeT.
bool isBBPendingDeletion(BasicBlockT *DelBB) const
Returns true if DelBB is awaiting deletion.
void recalculate(FuncT &F)
Notify DTU that the entry block was replaced.
an instruction for type-safe pointer arithmetic to access elements of arrays and structs
Definition: Instructions.h:915
This instruction compares its operands according to the predicate given to the constructor.
Value * CreateICmpEQ(Value *LHS, Value *RHS, const Twine &Name="")
Definition: IRBuilder.h:2246
BranchInst * CreateCondBr(Value *Cond, BasicBlock *True, BasicBlock *False, MDNode *BranchWeights=nullptr, MDNode *Unpredictable=nullptr)
Create a conditional 'br Cond, TrueDest, FalseDest' instruction.
Definition: IRBuilder.h:1125
BranchInst * CreateBr(BasicBlock *Dest)
Create an unconditional 'br label X' instruction.
Definition: IRBuilder.h:1119
This provides a uniform API for creating instructions and inserting them into a basic block: either a...
Definition: IRBuilder.h:2671
iterator_range< simple_ilist< DbgRecord >::iterator > getDbgRecordRange() const
Return a range over the DbgRecords attached to this instruction.
Definition: Instruction.h:104
void insertBefore(Instruction *InsertPos)
Insert an unlinked instruction into a basic block immediately before the specified instruction.
Definition: Instruction.cpp:97
const DebugLoc & getDebugLoc() const
Return the debug location for this node as a DebugLoc.
Definition: Instruction.h:466
bool extractProfTotalWeight(uint64_t &TotalVal) const
Retrieve total raw weight values of a branch.
Definition: Metadata.cpp:1744
void moveAfter(Instruction *MovePos)
Unlink this instruction from its current basic block and insert it into the basic block that MovePos ...
bool isEHPad() const
Return true if the instruction is a variety of EH-block.
Definition: Instruction.h:824
InstListType::iterator eraseFromParent()
This method unlinks 'this' from the containing basic block and deletes it.
Definition: Instruction.cpp:92
bool isIdenticalToWhenDefined(const Instruction *I) const LLVM_READONLY
This is like isIdenticalTo, except that it ignores the SubclassOptionalData flags,...
MDNode * getMetadata(unsigned KindID) const
Get the metadata of given kind attached to this Instruction.
Definition: Instruction.h:381
const Instruction * getNextNonDebugInstruction(bool SkipPseudoOp=false) const
Return a pointer to the next non-debug instruction in the same basic block as 'this',...
void setMetadata(unsigned KindID, MDNode *Node)
Set the metadata of the specified kind to the specified node.
Definition: Metadata.cpp:1635
void dropPoisonGeneratingFlags()
Drops flags that may cause this instruction to evaluate to poison despite having non-poison inputs.
void setDebugLoc(DebugLoc Loc)
Set the debug location information for this instruction.
Definition: Instruction.h:463
void copyMetadata(const Instruction &SrcInst, ArrayRef< unsigned > WL=ArrayRef< unsigned >())
Copy metadata from SrcInst to this instruction.
void dropDbgRecords()
Erase any DbgRecords attached to this instruction.
const DataLayout & getDataLayout() const
Get the data layout of the module this instruction belongs to.
Definition: Instruction.cpp:74
A wrapper class for inspecting calls to intrinsic functions.
Definition: IntrinsicInst.h:48
Invoke instruction.
static InvokeInst * Create(FunctionType *Ty, Value *Func, BasicBlock *IfNormal, BasicBlock *IfException, ArrayRef< Value * > Args, const Twine &NameStr, InsertPosition InsertBefore=nullptr)
This is an important class for using LLVM in a threaded context.
Definition: LLVMContext.h:67
An instruction for reading from memory.
Definition: Instructions.h:174
Value * getPointerOperand()
Definition: Instructions.h:253
MDNode * createBranchWeights(uint32_t TrueWeight, uint32_t FalseWeight, bool IsExpected=false)
Return metadata containing two branch weights.
Definition: MDBuilder.cpp:37
MDNode * createRange(const APInt &Lo, const APInt &Hi)
Return metadata describing the range [Lo, Hi).
Definition: MDBuilder.cpp:97
Metadata node.
Definition: Metadata.h:1067
static MDNode * getMostGenericAliasScope(MDNode *A, MDNode *B)
Definition: Metadata.cpp:1135
static MDNode * getMostGenericTBAA(MDNode *A, MDNode *B)
static MDTuple * get(LLVMContext &Context, ArrayRef< Metadata * > MDs)
Definition: Metadata.h:1541
static MDNode * getMergedProfMetadata(MDNode *A, MDNode *B, const Instruction *AInstr, const Instruction *BInstr)
Merge !prof metadata from two instructions.
Definition: Metadata.cpp:1216
static MDNode * getMostGenericFPMath(MDNode *A, MDNode *B)
Definition: Metadata.cpp:1167
static MDNode * getMostGenericRange(MDNode *A, MDNode *B)
Definition: Metadata.cpp:1278
static MDNode * intersect(MDNode *A, MDNode *B)
Definition: Metadata.cpp:1122
static MDNode * getMostGenericAlignmentOrDereferenceable(MDNode *A, MDNode *B)
Definition: Metadata.cpp:1350
static MDNode * combine(LLVMContext &Ctx, const MMRAMetadata &A, const MMRAMetadata &B)
Combines A and B according to MMRA semantics.
This class implements a map that also provides access to all stored values in a deterministic order.
Definition: MapVector.h:36
iterator end()
Definition: MapVector.h:71
iterator find(const KeyT &Key)
Definition: MapVector.h:167
bool empty() const
Definition: MapVector.h:79
std::pair< iterator, bool > insert(const std::pair< KeyT, ValueT > &KV)
Definition: MapVector.h:141
void changeToUnreachable(const Instruction *I)
Instruction I will be changed to an unreachable.
void removeBlocks(const SmallSetVector< BasicBlock *, 8 > &DeadBlocks)
Remove all MemoryAcceses in a set of BasicBlocks about to be deleted.
void removeMemoryAccess(MemoryAccess *, bool OptimizePhis=false)
Remove a MemoryAccess from MemorySSA, including updating all definitions and uses.
Root of the metadata hierarchy.
Definition: Metadata.h:62
A Module instance is used to store all the information related to an LLVM module.
Definition: Module.h:65
const DataLayout & getDataLayout() const
Get the data layout for the module's target platform.
Definition: Module.h:293
void addIncoming(Value *V, BasicBlock *BB)
Add an incoming value to the end of the PHI list.
const_block_iterator block_begin() const
Value * removeIncomingValue(unsigned Idx, bool DeletePHIIfEmpty=true)
Remove an incoming value.
void setIncomingValue(unsigned i, Value *V)
const_block_iterator block_end() const
Value * getIncomingValueForBlock(const BasicBlock *BB) const
BasicBlock * getIncomingBlock(unsigned i) const
Return incoming basic block number i.
Value * getIncomingValue(unsigned i) const
Return incoming value number x.
unsigned getNumIncomingValues() const
Return the number of incoming edges.
T get() const
Returns the value of the specified pointer type.
Definition: PointerUnion.h:155
static PoisonValue * get(Type *T)
Static factory methods - Return an 'poison' object of the specified type.
Definition: Constants.cpp:1852
size_type size() const
Determine the number of elements in the SetVector.
Definition: SetVector.h:98
Vector takeVector()
Clear the SetVector and return the underlying vector.
Definition: SetVector.h:87
size_type count(const key_type &key) const
Count the number of elements of a given key in the SetVector.
Definition: SetVector.h:264
bool empty() const
Determine if the SetVector is empty or not.
Definition: SetVector.h:93
bool insert(const value_type &X)
Insert a new element into the SetVector.
Definition: SetVector.h:162
value_type pop_back_val()
Definition: SetVector.h:285
size_type size() const
Definition: SmallPtrSet.h:94
A templated base class for SmallPtrSet which provides the typesafe interface that is common across al...
Definition: SmallPtrSet.h:323
size_type count(ConstPtrType Ptr) const
count - Return 1 if the specified pointer is in the set, 0 otherwise.
Definition: SmallPtrSet.h:412
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
Definition: SmallPtrSet.h:344
bool contains(ConstPtrType Ptr) const
Definition: SmallPtrSet.h:418
SmallPtrSet - This class implements a set which is optimized for holding SmallSize or less elements.
Definition: SmallPtrSet.h:479
A SetVector that performs no allocations if smaller than a certain size.
Definition: SetVector.h:370
SmallSet - This maintains a set of unique values, optimizing for the case when the set is small (less...
Definition: SmallSet.h:135
std::pair< const_iterator, bool > insert(const T &V)
insert - Insert an element into the set if it isn't already there.
Definition: SmallSet.h:179
size_type size() const
Definition: SmallSet.h:161
bool empty() const
Definition: SmallVector.h:94
size_t size() const
Definition: SmallVector.h:91
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
Definition: SmallVector.h:586
void reserve(size_type N)
Definition: SmallVector.h:676
void append(ItTy in_start, ItTy in_end)
Add the specified range to the end of the SmallVector.
Definition: SmallVector.h:696
iterator insert(iterator I, T &&Elt)
Definition: SmallVector.h:818
void push_back(const T &Elt)
Definition: SmallVector.h:426
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
Definition: SmallVector.h:1209
An instruction for storing to memory.
Definition: Instructions.h:290
Provides information about what library functions are available for the current target.
bool hasOptimizedCodeGen(LibFunc F) const
Tests if the function is both available and a candidate for optimized code generation.
bool has(LibFunc F) const
Tests whether a library function is available.
bool getLibFunc(StringRef funcName, LibFunc &F) const
Searches for a particular function name.
TinyPtrVector - This class is specialized for cases where there are normally 0 or 1 element in a vect...
Definition: TinyPtrVector.h:29
bool empty() const
static constexpr TypeSize getFixed(ScalarTy ExactSize)
Definition: TypeSize.h:345
The instances of the Type class are immutable: once they are created, they are never changed.
Definition: Type.h:45
bool isVectorTy() const
True if this is an instance of VectorType.
Definition: Type.h:265
bool isArrayTy() const
True if this is an instance of ArrayType.
Definition: Type.h:252
bool isIntOrIntVectorTy() const
Return true if this is an integer type or a vector of integer types.
Definition: Type.h:234
bool isPointerTy() const
True if this is an instance of PointerType.
Definition: Type.h:255
static IntegerType * getIntNTy(LLVMContext &C, unsigned N)
unsigned getScalarSizeInBits() const LLVM_READONLY
If this is a vector type, return the getPrimitiveSizeInBits value for the element type.
bool isStructTy() const
True if this is an instance of StructType.
Definition: Type.h:249
bool isFloatingPointTy() const
Return true if this is one of the floating-point types.
Definition: Type.h:185
bool isIntOrPtrTy() const
Return true if this is an integer type or a pointer type.
Definition: Type.h:243
static IntegerType * getInt32Ty(LLVMContext &C)
bool isIntegerTy() const
True if this is an instance of IntegerType.
Definition: Type.h:228
bool isTokenTy() const
Return true if this is 'token'.
Definition: Type.h:225
TypeSize getPrimitiveSizeInBits() const LLVM_READONLY
Return the basic size of this type if it is a primitive type.
static UndefValue * get(Type *T)
Static factory methods - Return an 'undef' object of the specified type.
Definition: Constants.cpp:1833
This function has undefined behavior.
A Use represents the edge between a Value definition and its users.
Definition: Use.h:43
value_op_iterator value_op_end()
Definition: User.h:263
Value * getOperand(unsigned i) const
Definition: User.h:169
value_op_iterator value_op_begin()
Definition: User.h:260
Value wrapper in the Metadata hierarchy.
Definition: Metadata.h:450
static ValueAsMetadata * get(Value *V)
Definition: Metadata.cpp:495
iterator find(const KeyT &Val)
Definition: ValueMap.h:155
std::pair< iterator, bool > insert(const std::pair< KeyT, ValueT > &KV)
Definition: ValueMap.h:172
size_type size() const
Definition: ValueMap.h:140
iterator end()
Definition: ValueMap.h:135
LLVM Value Representation.
Definition: Value.h:74
Type * getType() const
All values are typed, get the type of this value.
Definition: Value.h:255
void replaceAllUsesWith(Value *V)
Change all uses of this to point to a new Value.
Definition: Value.cpp:534
iterator_range< user_iterator > users()
Definition: Value.h:421
void replaceUsesWithIf(Value *New, llvm::function_ref< bool(Use &U)> ShouldReplace)
Go through the uses list for this definition and make each use point to "V" if the callback ShouldRep...
Definition: Value.cpp:542
bool use_empty() const
Definition: Value.h:344
LLVMContext & getContext() const
All values hold a context through their type.
Definition: Value.cpp:1075
static constexpr unsigned MaxAlignmentExponent
The maximum alignment for instructions.
Definition: Value.h:806
user_iterator_impl< User > user_iterator
Definition: Value.h:390
bool hasName() const
Definition: Value.h:261
StringRef getName() const
Return a constant reference to the value's name.
Definition: Value.cpp:309
void takeName(Value *V)
Transfer the name from V to this value.
Definition: Value.cpp:383
Represents an op.with.overflow intrinsic.
std::pair< iterator, bool > insert(const ValueT &V)
Definition: DenseSet.h:206
void reserve(size_t Size)
Grow the DenseSet so that it can contain at least NumEntries items before resizing again.
Definition: DenseSet.h:90
An efficient, type-erasing, non-owning reference to a callable.
const ParentTy * getParent() const
Definition: ilist_node.h:32
self_iterator getIterator()
Definition: ilist_node.h:132
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
@ C
The default llvm calling convention, compatible with C.
Definition: CallingConv.h:34
Function * getDeclaration(Module *M, ID id, ArrayRef< Type * > Tys=std::nullopt)
Create or insert an LLVM Function declaration for an intrinsic, and return it.
Definition: Function.cpp:1513
BinaryOp_match< LHS, RHS, Instruction::And > m_And(const LHS &L, const RHS &R)
m_Intrinsic_Ty< Opnd0 >::Ty m_BitReverse(const Opnd0 &Op0)
CastInst_match< OpTy, TruncInst > m_Trunc(const OpTy &Op)
Matches Trunc.
bool match(Val *V, const Pattern &P)
Definition: PatternMatch.h:49
specificval_ty m_Specific(const Value *V)
Match if we have a specific specified value.
Definition: PatternMatch.h:875
BinOpPred_match< LHS, RHS, is_logical_shift_op > m_LogicalShift(const LHS &L, const RHS &R)
Matches logical shift operations.
bind_ty< WithOverflowInst > m_WithOverflowInst(WithOverflowInst *&I)
Match a with overflow intrinsic, capturing it if we match.
Definition: PatternMatch.h:822
CastInst_match< OpTy, ZExtInst > m_ZExt(const OpTy &Op)
Matches ZExt.
m_Intrinsic_Ty< Opnd0, Opnd1, Opnd2 >::Ty m_FShl(const Opnd0 &Op0, const Opnd1 &Op1, const Opnd2 &Op2)
apint_match m_APInt(const APInt *&Res)
Match a ConstantInt or splatted ConstantVector, binding the specified pointer to the contained APInt.
Definition: PatternMatch.h:299
class_match< Value > m_Value()
Match an arbitrary value and ignore it.
Definition: PatternMatch.h:92
m_Intrinsic_Ty< Opnd0, Opnd1, Opnd2 >::Ty m_FShr(const Opnd0 &Op0, const Opnd1 &Op1, const Opnd2 &Op2)
auto m_Undef()
Match an arbitrary undef constant.
Definition: PatternMatch.h:152
BinaryOp_match< cst_pred_ty< is_all_ones >, ValTy, Instruction::Xor, true > m_Not(const ValTy &V)
Matches a 'Not' as 'xor V, -1' or 'xor -1, V'.
BinaryOp_match< LHS, RHS, Instruction::Or > m_Or(const LHS &L, const RHS &R)
m_Intrinsic_Ty< Opnd0 >::Ty m_BSwap(const Opnd0 &Op0)
is_zero m_Zero()
Match any null constant or a vector with all elements equal to 0.
Definition: PatternMatch.h:612
match_combine_or< LTy, RTy > m_CombineOr(const LTy &L, const RTy &R)
Combine two pattern matchers matching L || R.
Definition: PatternMatch.h:239
initializer< Ty > init(const Ty &Val)
Definition: CommandLine.h:443
@ DW_OP_LLVM_arg
Only used in LLVM metadata.
Definition: Dwarf.h:147
@ ebStrict
This corresponds to "fpexcept.strict".
Definition: FPEnv.h:41
This is an optimization pass for GlobalISel generic memory operations.
Definition: AddressRanges.h:18
pred_iterator pred_end(BasicBlock *BB)
Definition: CFG.h:114
@ Offset
Definition: DWP.cpp:480
auto find(R &&Range, const T &Val)
Provide wrappers to std::find which take ranges instead of having to pass begin/end explicitly.
Definition: STLExtras.h:1742
bool RemoveRedundantDbgInstrs(BasicBlock *BB)
Try to remove redundant dbg.value instructions from given basic block.
UnaryFunction for_each(R &&Range, UnaryFunction F)
Provide wrappers to std::for_each which take ranges instead of having to pass begin/end explicitly.
Definition: STLExtras.h:1715
bool all_of(R &&range, UnaryPredicate P)
Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly.
Definition: STLExtras.h:1722
bool RecursivelyDeleteTriviallyDeadInstructions(Value *V, const TargetLibraryInfo *TLI=nullptr, MemorySSAUpdater *MSSAU=nullptr, std::function< void(Value *)> AboutToDeleteCallback=std::function< void(Value *)>())
If the specified value is a trivially dead instruction, delete it.
Definition: Local.cpp:540
bool succ_empty(const Instruction *I)
Definition: CFG.h:255
BasicBlock * changeToInvokeAndSplitBasicBlock(CallInst *CI, BasicBlock *UnwindEdge, DomTreeUpdater *DTU=nullptr)
Convert the CallInst to InvokeInst with the specified unwind edge basic block.
Definition: Local.cpp:2923
bool ConstantFoldTerminator(BasicBlock *BB, bool DeleteDeadConditions=false, const TargetLibraryInfo *TLI=nullptr, DomTreeUpdater *DTU=nullptr)
If a terminator instruction is predicated on a constant value, convert it into an unconditional branc...
Definition: Local.cpp:130
unsigned replaceDominatedUsesWithIf(Value *From, Value *To, DominatorTree &DT, const BasicBlockEdge &Edge, function_ref< bool(const Use &U, const Value *To)> ShouldReplace)
Replace each use of 'From' with 'To' if that use is dominated by the given edge and the callback Shou...
Definition: Local.cpp:3503
TinyPtrVector< DbgDeclareInst * > findDbgDeclares(Value *V)
Finds dbg.declare intrinsics declaring local variables as living in the memory that 'V' points to.
Definition: DebugInfo.cpp:47
std::pair< unsigned, unsigned > removeAllNonTerminatorAndEHPadInstructions(BasicBlock *BB)
Remove all instructions from a basic block other than its terminator and any present EH pad instructi...
Definition: Local.cpp:2805
unsigned replaceNonLocalUsesWith(Instruction *From, Value *To)
Definition: Local.cpp:3470
void salvageDebugInfoForDbgValues(Instruction &I, ArrayRef< DbgVariableIntrinsic * > Insns, ArrayRef< DbgVariableRecord * > DPInsns)
Implementation of salvageDebugInfo, applying only to instructions in Insns, rather than all debug use...
Definition: Local.cpp:2244
void findDbgUsers(SmallVectorImpl< DbgVariableIntrinsic * > &DbgInsts, Value *V, SmallVectorImpl< DbgVariableRecord * > *DbgVariableRecords=nullptr)
Finds the debug info intrinsics describing a value.
Definition: DebugInfo.cpp:145
void salvageDebugInfo(const MachineRegisterInfo &MRI, MachineInstr &MI)
Assuming the instruction MI is going to be deleted, attempt to salvage debug users of MI by writing t...
Definition: Utils.cpp:1671
auto successors(const MachineBasicBlock *BB)
bool isRemovableAlloc(const CallBase *V, const TargetLibraryInfo *TLI)
Return true if this is a call to an allocation function that does not have side effects that we are r...
CallInst * changeToCall(InvokeInst *II, DomTreeUpdater *DTU=nullptr)
This function converts the specified invoke into a normal call.
Definition: Local.cpp:2903
bool isMathLibCallNoop(const CallBase *Call, const TargetLibraryInfo *TLI)
Check whether the given call has no side-effects.
void copyMetadataForLoad(LoadInst &Dest, const LoadInst &Source)
Copy the metadata from the source instruction to the destination (the replacement for the source inst...
Definition: Local.cpp:3367
bool hasNItemsOrLess(IterTy &&Begin, IterTy &&End, unsigned N, Pred &&ShouldBeCounted=[](const decltype(*std::declval< IterTy >()) &) { return true;})
Returns true if the sequence [Begin, End) has N or less items.
Definition: STLExtras.h:2505
void remapDebugVariable(ValueToValueMapTy &Mapping, Instruction *Inst)
Remap the operands of the debug records attached to Inst, and the operands of Inst itself if it's a d...
Definition: Local.cpp:3692
iterator_range< early_inc_iterator_impl< detail::IterOfRange< RangeT > > > make_early_inc_range(RangeT &&Range)
Make a range that does early increment to allow mutation of the underlying range without disrupting i...
Definition: STLExtras.h:656
bool SimplifyInstructionsInBlock(BasicBlock *BB, const TargetLibraryInfo *TLI=nullptr)
Scan the specified basic block and try to simplify any instructions in it and recursively delete dead...
Definition: Local.cpp:731
bool isAssumeWithEmptyBundle(const AssumeInst &Assume)
Return true iff the operand bundles of the provided llvm.assume doesn't contain any valuable informat...
void DeleteDeadBlock(BasicBlock *BB, DomTreeUpdater *DTU=nullptr, bool KeepOneInputPHIs=false)
Delete the specified block, which must have no predecessors.
bool hasBranchWeightOrigin(const Instruction &I)
Check if Branch Weight Metadata has an "expected" field from an llvm.expect* intrinsic.
void findDbgValues(SmallVectorImpl< DbgValueInst * > &DbgValues, Value *V, SmallVectorImpl< DbgVariableRecord * > *DbgVariableRecords=nullptr)
Finds the llvm.dbg.value intrinsics describing a value.
Definition: DebugInfo.cpp:138
void insertDebugValuesForPHIs(BasicBlock *BB, SmallVectorImpl< PHINode * > &InsertedPHIs)
Propagate dbg.value intrinsics through the newly inserted PHIs.
Definition: Local.cpp:2068
ConstantRange getConstantRangeFromMetadata(const MDNode &RangeMD)
Parse out a conservative ConstantRange from !range metadata.
MDNode * intersectAccessGroups(const Instruction *Inst1, const Instruction *Inst2)
Compute the access-group list of access groups that Inst1 and Inst2 are both in.
bool handleUnreachableTerminator(Instruction *I, SmallVectorImpl< Value * > &PoisonedValues)
If a terminator in an unreachable basic block has an operand of type Instruction, transform it into p...
Definition: Local.cpp:2787
bool canSimplifyInvokeNoUnwind(const Function *F)
Value * simplifyInstruction(Instruction *I, const SimplifyQuery &Q)
See if we can compute a simplified version of this instruction.
bool any_of(R &&range, UnaryPredicate P)
Provide wrappers to std::any_of which take ranges instead of having to pass begin/end explicitly.
Definition: STLExtras.h:1729
bool isInstructionTriviallyDead(Instruction *I, const TargetLibraryInfo *TLI=nullptr)
Return true if the result produced by the instruction is not used, and the instruction will return.
Definition: Local.cpp:400
pred_iterator pred_begin(BasicBlock *BB)
Definition: CFG.h:110
bool TryToSimplifyUncondBranchFromEmptyBlock(BasicBlock *BB, DomTreeUpdater *DTU=nullptr)
BB is known to contain an unconditional branch, and contains no instructions other than PHI nodes,...
Definition: Local.cpp:1118
bool recognizeBSwapOrBitReverseIdiom(Instruction *I, bool MatchBSwaps, bool MatchBitReversals, SmallVectorImpl< Instruction * > &InsertedInsts)
Try to match a bswap or bitreverse idiom.
Definition: Local.cpp:4001
void setBranchWeights(Instruction &I, ArrayRef< uint32_t > Weights, bool IsExpected)
Create a new branch_weights metadata node and add or overwrite a prof metadata reference to instructi...
MDNode * getValidBranchWeightMDNode(const Instruction &I)
Get the valid branch weights metadata node.
Align getOrEnforceKnownAlignment(Value *V, MaybeAlign PrefAlign, const DataLayout &DL, const Instruction *CxtI=nullptr, AssumptionCache *AC=nullptr, const DominatorTree *DT=nullptr)
Try to ensure that the alignment of V is at least PrefAlign bytes.
Definition: Local.cpp:1541
bool wouldInstructionBeTriviallyDeadOnUnusedPaths(Instruction *I, const TargetLibraryInfo *TLI=nullptr)
Return true if the result produced by the instruction has no side effects on any paths other than whe...
Definition: Local.cpp:407
bool LowerDbgDeclare(Function &F)
Lowers llvm.dbg.declare intrinsics into appropriate set of llvm.dbg.value intrinsics.
Definition: Local.cpp:1916
bool NullPointerIsDefined(const Function *F, unsigned AS=0)
Check whether null pointer dereferencing is considered undefined behavior for a given function or an ...
Definition: Function.cpp:2102
DIExpression * getExpressionForConstant(DIBuilder &DIB, const Constant &C, Type &Ty)
Given a constant, create a debug information expression.
Definition: Local.cpp:3654
CallInst * createCallMatchingInvoke(InvokeInst *II)
Create a call that matches the invoke II in terms of arguments, attributes, debug information,...
Definition: Local.cpp:2877
raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
Definition: Debug.cpp:163
Instruction * removeUnwindEdge(BasicBlock *BB, DomTreeUpdater *DTU=nullptr)
Replace 'BB's terminator with one that does not have an unwind successor block.
Definition: Local.cpp:3164
bool wouldInstructionBeTriviallyDead(const Instruction *I, const TargetLibraryInfo *TLI=nullptr)
Return true if the result produced by the instruction would have no side effects if it was not used.
Definition: Local.cpp:419
void patchReplacementInstruction(Instruction *I, Value *Repl)
Patch the replacement so that it is not more restrictive than the value being replaced.
Definition: Local.cpp:3419
DebugLoc getDebugValueLoc(DbgVariableIntrinsic *DII)
Produce a DebugLoc to use for each dbg.declare that is promoted to a dbg.value.
Definition: DebugInfo.cpp:158
void ConvertDebugDeclareToDebugValue(DbgVariableIntrinsic *DII, StoreInst *SI, DIBuilder &Builder)
===------------------------------------------------------------------—===// Dbg Intrinsic utilities
Definition: Local.cpp:1691
unsigned replaceDominatedUsesWith(Value *From, Value *To, DominatorTree &DT, const BasicBlockEdge &Edge)
Replace each use of 'From' with 'To' if that use is dominated by the given edge.
Definition: Local.cpp:3485
void combineMetadata(Instruction *K, const Instruction *J, ArrayRef< unsigned > KnownIDs, bool DoesKMove)
Combine the metadata of two instructions so that K can replace J.
Definition: Local.cpp:3239
unsigned changeToUnreachable(Instruction *I, bool PreserveLCSSA=false, DomTreeUpdater *DTU=nullptr, MemorySSAUpdater *MSSAU=nullptr)
Insert an unreachable instruction before the specified instruction, making it and the rest of the cod...
Definition: Local.cpp:2837
bool replaceAllDbgUsesWith(Instruction &From, Value &To, Instruction &DomPoint, DominatorTree &DT)
Point debug users of From to To or salvage them.
Definition: Local.cpp:2715
Value * salvageDebugInfoImpl(Instruction &I, uint64_t CurrentLocOps, SmallVectorImpl< uint64_t > &Ops, SmallVectorImpl< Value * > &AdditionalValues)
Definition: Local.cpp:2545
RNSuccIterator< NodeRef, BlockT, RegionT > succ_begin(NodeRef Node)
void combineMetadataForCSE(Instruction *K, const Instruction *J, bool DoesKMove)
Combine the metadata of two instructions so that K can replace J.
Definition: Local.cpp:3345
void dropDebugUsers(Instruction &I)
Remove the debug intrinsic instructions for the given instruction.
Definition: Local.cpp:3601
@ First
Helpers to iterate all locations in the MemoryEffectsBase class.
void MergeBasicBlockIntoOnlyPred(BasicBlock *BB, DomTreeUpdater *DTU=nullptr)
BB is a block with one predecessor and its predecessor is known to have one successor (BB!...
Definition: Local.cpp:771
bool replaceDbgUsesWithUndef(Instruction *I)
Replace all the uses of an SSA value in @llvm.dbg intrinsics with undef.
Definition: Local.cpp:617
void hoistAllInstructionsInto(BasicBlock *DomBlock, Instruction *InsertPt, BasicBlock *BB)
Hoist all of the instructions in the IfBlock to the dominant block DomBlock, by moving its instructio...
Definition: Local.cpp:3611
void copyRangeMetadata(const DataLayout &DL, const LoadInst &OldLI, MDNode *N, LoadInst &NewLI)
Copy a range metadata node to a new load instruction.
Definition: Local.cpp:3577
void copyNonnullMetadata(const LoadInst &OldLI, MDNode *N, LoadInst &NewLI)
Copy a nonnull metadata node to a new load instruction.
Definition: Local.cpp:3552
void computeKnownBits(const Value *V, KnownBits &Known, const DataLayout &DL, unsigned Depth=0, AssumptionCache *AC=nullptr, const Instruction *CxtI=nullptr, const DominatorTree *DT=nullptr, bool UseInstrInfo=true)
Determine which bits of V are known to be either zero or one and return them in the KnownZero/KnownOn...
bool canReplaceOperandWithVariable(const Instruction *I, unsigned OpIdx)
Given an instruction, is it legal to set operand OpIdx to a non-constant value?
Definition: Local.cpp:4113
void replaceDbgValueForAlloca(AllocaInst *AI, Value *NewAllocaAddress, DIBuilder &Builder, int Offset=0)
Replaces multiple llvm.dbg.value instructions when the alloca it describes is replaced with a new val...
Definition: Local.cpp:2181
Align tryEnforceAlignment(Value *V, Align PrefAlign, const DataLayout &DL)
If the specified pointer points to an object that we control, try to modify the object's alignment to...
Definition: Local.cpp:1493
Value * getFreedOperand(const CallBase *CB, const TargetLibraryInfo *TLI)
If this if a call to a free function, return the freed operand.
bool RecursivelyDeleteTriviallyDeadInstructionsPermissive(SmallVectorImpl< WeakTrackingVH > &DeadInsts, const TargetLibraryInfo *TLI=nullptr, MemorySSAUpdater *MSSAU=nullptr, std::function< void(Value *)> AboutToDeleteCallback=std::function< void(Value *)>())
Same functionality as RecursivelyDeleteTriviallyDeadInstructions, but allow instructions that are not...
Definition: Local.cpp:555
constexpr unsigned BitWidth
Definition: BitmaskEnum.h:191
bool extractBranchWeights(const MDNode *ProfileData, SmallVectorImpl< uint32_t > &Weights)
Extract branch weights from MD_prof metadata.
auto count_if(R &&Range, UnaryPredicate P)
Wrapper function around std::count_if to count the number of times an element satisfying a given pred...
Definition: STLExtras.h:1921
BasicBlock * SplitBlock(BasicBlock *Old, BasicBlock::iterator SplitPt, DominatorTree *DT, LoopInfo *LI=nullptr, MemorySSAUpdater *MSSAU=nullptr, const Twine &BBName="", bool Before=false)
Split the specified block at the specified instruction.
gep_type_iterator gep_type_begin(const User *GEP)
TinyPtrVector< DbgVariableRecord * > findDVRDeclares(Value *V)
As above, for DVRDeclares.
Definition: DebugInfo.cpp:66
auto predecessors(const MachineBasicBlock *BB)
bool is_contained(R &&Range, const E &Element)
Returns true if Element is found in Range.
Definition: STLExtras.h:1879
bool RecursivelyDeleteDeadPHINode(PHINode *PN, const TargetLibraryInfo *TLI=nullptr, MemorySSAUpdater *MSSAU=nullptr)
If the specified value is an effectively dead PHI node, due to being a def-use chain of single-use no...
Definition: Local.cpp:651
bool inferAttributesFromOthers(Function &F)
If we can infer one attribute from another on the declaration of a function, explicitly materialize t...
Definition: Local.cpp:4220
Value * invertCondition(Value *Condition)
Invert the given true/false value, possibly reusing an existing copy.
Definition: Local.cpp:4186
hash_code hash_combine(const Ts &...args)
Combine values into a single hash_code.
Definition: Hashing.h:593
void DeleteDeadBlocks(ArrayRef< BasicBlock * > BBs, DomTreeUpdater *DTU=nullptr, bool KeepOneInputPHIs=false)
Delete the specified blocks from BB.
void maybeMarkSanitizerLibraryCallNoBuiltin(CallInst *CI, const TargetLibraryInfo *TLI)
Given a CallInst, check if it calls a string function known to CodeGen, and mark it with NoBuiltin if...
Definition: Local.cpp:4103
unsigned pred_size(const MachineBasicBlock *BB)
static auto filterDbgVars(iterator_range< simple_ilist< DbgRecord >::iterator > R)
Filter the DbgRecord range to DbgVariableRecord types only and downcast.
bool removeUnreachableBlocks(Function &F, DomTreeUpdater *DTU=nullptr, MemorySSAUpdater *MSSAU=nullptr)
Remove all blocks that can not be reached from the function's entry.
Definition: Local.cpp:3202
bool EliminateDuplicatePHINodes(BasicBlock *BB)
Check for and eliminate duplicate PHI nodes in this block.
Definition: Local.cpp:1485
bool callsGCLeafFunction(const CallBase *Call, const TargetLibraryInfo &TLI)
Return true if this call calls a gc leaf function.
Definition: Local.cpp:3523
hash_code hash_combine_range(InputIteratorT first, InputIteratorT last)
Compute a hash_code for a sequence of values.
Definition: Hashing.h:471
bool replaceDbgDeclare(Value *Address, Value *NewAddress, DIBuilder &Builder, uint8_t DIExprFlags, int Offset)
Replaces llvm.dbg.declare instruction when the address it describes is replaced with a new value.
Definition: Local.cpp:2132
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.
Definition: BitVector.h:860
#define N
#define NDEBUG
Definition: regutils.h:48
This struct is a compact representation of a valid (non-zero power of two) alignment.
Definition: Alignment.h:39
An information struct used to provide DenseMap with the various necessary components for a given valu...
Definition: DenseMapInfo.h:52
unsigned countMinTrailingZeros() const
Returns the minimum number of trailing zero bits.
Definition: KnownBits.h:231
unsigned getBitWidth() const
Get the bit width of this value.
Definition: KnownBits.h:40
This struct is a compact representation of a valid (power of two) or undefined (0) alignment.
Definition: Alignment.h:117