LLVM 23.0.0git
Local.cpp
Go to the documentation of this file.
1//===- Local.cpp - Functions to perform local transformations -------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This family of functions perform various local transformations to the
10// program.
11//
12//===----------------------------------------------------------------------===//
13
15#include "llvm/ADT/APInt.h"
16#include "llvm/ADT/DenseMap.h"
18#include "llvm/ADT/DenseSet.h"
19#include "llvm/ADT/Hashing.h"
20#include "llvm/ADT/STLExtras.h"
21#include "llvm/ADT/SetVector.h"
24#include "llvm/ADT/Statistic.h"
35#include "llvm/IR/Argument.h"
36#include "llvm/IR/Attributes.h"
37#include "llvm/IR/BasicBlock.h"
38#include "llvm/IR/CFG.h"
39#include "llvm/IR/Constant.h"
41#include "llvm/IR/Constants.h"
42#include "llvm/IR/DIBuilder.h"
43#include "llvm/IR/DataLayout.h"
44#include "llvm/IR/DebugInfo.h"
46#include "llvm/IR/DebugLoc.h"
48#include "llvm/IR/Dominators.h"
50#include "llvm/IR/Function.h"
52#include "llvm/IR/IRBuilder.h"
53#include "llvm/IR/InstrTypes.h"
54#include "llvm/IR/Instruction.h"
57#include "llvm/IR/Intrinsics.h"
58#include "llvm/IR/IntrinsicsWebAssembly.h"
59#include "llvm/IR/LLVMContext.h"
60#include "llvm/IR/MDBuilder.h"
62#include "llvm/IR/Metadata.h"
63#include "llvm/IR/Module.h"
66#include "llvm/IR/Type.h"
67#include "llvm/IR/Use.h"
68#include "llvm/IR/User.h"
69#include "llvm/IR/Value.h"
70#include "llvm/IR/ValueHandle.h"
74#include "llvm/Support/Debug.h"
80#include <algorithm>
81#include <cassert>
82#include <cstdint>
83#include <iterator>
84#include <map>
85#include <optional>
86#include <utility>
87
88using namespace llvm;
89using namespace llvm::PatternMatch;
90
91#define DEBUG_TYPE "local"
92
93STATISTIC(NumRemoved, "Number of unreachable basic blocks removed");
94STATISTIC(NumPHICSEs, "Number of PHI's that got CSE'd");
95
97 "phicse-debug-hash",
98#ifdef EXPENSIVE_CHECKS
99 cl::init(true),
100#else
101 cl::init(false),
102#endif
104 cl::desc("Perform extra assertion checking to verify that PHINodes's hash "
105 "function is well-behaved w.r.t. its isEqual predicate"));
106
108 "phicse-num-phi-smallsize", cl::init(32), cl::Hidden,
109 cl::desc(
110 "When the basic block contains not more than this number of PHI nodes, "
111 "perform a (faster!) exhaustive search instead of set-driven one."));
112
114 "max-phi-entries-increase-after-removing-empty-block", cl::init(1000),
116 cl::desc("Stop removing an empty block if removing it will introduce more "
117 "than this number of phi entries in its successor"));
118
119// Max recursion depth for collectBitParts used when detecting bswap and
120// bitreverse idioms.
121static const unsigned BitPartRecursionMaxDepth = 48;
122
123//===----------------------------------------------------------------------===//
124// Local constant propagation.
125//
126
127/// ConstantFoldTerminator - If a terminator instruction is predicated on a
128/// constant value, convert it into an unconditional branch to the constant
129/// destination. This is a nontrivial operation because the successors of this
130/// basic block must have their PHI nodes updated.
131/// Also calls RecursivelyDeleteTriviallyDeadInstructions() on any branch/switch
132/// conditions and indirectbr addresses this might make dead if
133/// DeleteDeadConditions is true.
134bool llvm::ConstantFoldTerminator(BasicBlock *BB, bool DeleteDeadConditions,
135 const TargetLibraryInfo *TLI,
136 DomTreeUpdater *DTU) {
137 Instruction *T = BB->getTerminator();
138 IRBuilder<> Builder(T);
139
140 // Branch - See if we are conditional jumping on constant
141 if (auto *BI = dyn_cast<BranchInst>(T)) {
142 if (BI->isUnconditional()) return false; // Can't optimize uncond branch
143
144 BasicBlock *Dest1 = BI->getSuccessor(0);
145 BasicBlock *Dest2 = BI->getSuccessor(1);
146
147 if (Dest2 == Dest1) { // Conditional branch to same location?
148 // This branch matches something like this:
149 // br bool %cond, label %Dest, label %Dest
150 // and changes it into: br label %Dest
151
152 // Let the basic block know that we are letting go of one copy of it.
153 assert(BI->getParent() && "Terminator not inserted in block!");
154 Dest1->removePredecessor(BI->getParent());
155
156 // Replace the conditional branch with an unconditional one.
157 BranchInst *NewBI = Builder.CreateBr(Dest1);
158
159 // Transfer the metadata to the new branch instruction.
160 NewBI->copyMetadata(*BI, {LLVMContext::MD_loop, LLVMContext::MD_dbg,
161 LLVMContext::MD_annotation});
162
163 Value *Cond = BI->getCondition();
164 BI->eraseFromParent();
165 if (DeleteDeadConditions)
167 return true;
168 }
169
170 if (auto *Cond = dyn_cast<ConstantInt>(BI->getCondition())) {
171 // Are we branching on constant?
172 // YES. Change to unconditional branch...
173 BasicBlock *Destination = Cond->getZExtValue() ? Dest1 : Dest2;
174 BasicBlock *OldDest = Cond->getZExtValue() ? Dest2 : Dest1;
175
176 // Let the basic block know that we are letting go of it. Based on this,
177 // it will adjust it's PHI nodes.
178 OldDest->removePredecessor(BB);
179
180 // Replace the conditional branch with an unconditional one.
181 BranchInst *NewBI = Builder.CreateBr(Destination);
182
183 // Transfer the metadata to the new branch instruction.
184 NewBI->copyMetadata(*BI, {LLVMContext::MD_loop, LLVMContext::MD_dbg,
185 LLVMContext::MD_annotation});
186
187 BI->eraseFromParent();
188 if (DTU)
189 DTU->applyUpdates({{DominatorTree::Delete, BB, OldDest}});
190 return true;
191 }
192
193 return false;
194 }
195
196 if (auto *SI = dyn_cast<SwitchInst>(T)) {
197 // If we are switching on a constant, we can convert the switch to an
198 // unconditional branch.
199 auto *CI = dyn_cast<ConstantInt>(SI->getCondition());
200 BasicBlock *DefaultDest = SI->getDefaultDest();
201 BasicBlock *TheOnlyDest = DefaultDest;
202
203 // If the default is unreachable, ignore it when searching for TheOnlyDest.
204 if (SI->defaultDestUnreachable() && SI->getNumCases() > 0)
205 TheOnlyDest = SI->case_begin()->getCaseSuccessor();
206
207 bool Changed = false;
208
209 // Figure out which case it goes to.
210 for (auto It = SI->case_begin(), End = SI->case_end(); It != End;) {
211 // Found case matching a constant operand?
212 if (It->getCaseValue() == CI) {
213 TheOnlyDest = It->getCaseSuccessor();
214 break;
215 }
216
217 // Check to see if this branch is going to the same place as the default
218 // dest. If so, eliminate it as an explicit compare.
219 if (It->getCaseSuccessor() == DefaultDest) {
221 unsigned NCases = SI->getNumCases();
222 // Fold the case metadata into the default if there will be any branches
223 // left, unless the metadata doesn't match the switch.
224 if (NCases > 1 && MD) {
225 // Collect branch weights into a vector.
227 extractFromBranchWeightMD64(MD, Weights);
228
229 // Merge weight of this case to the default weight.
230 unsigned Idx = It->getCaseIndex();
231
232 // Check for and prevent uint64_t overflow by reducing branch weights.
233 if (Weights[0] > UINT64_MAX - Weights[Idx + 1])
234 fitWeights(Weights);
235
236 Weights[0] += Weights[Idx + 1];
237 // Remove weight for this case.
238 std::swap(Weights[Idx + 1], Weights.back());
239 Weights.pop_back();
241 }
242 // Remove this entry.
243 BasicBlock *ParentBB = SI->getParent();
244 DefaultDest->removePredecessor(ParentBB);
245 It = SI->removeCase(It);
246 End = SI->case_end();
247
248 // Removing this case may have made the condition constant. In that
249 // case, update CI and restart iteration through the cases.
250 if (auto *NewCI = dyn_cast<ConstantInt>(SI->getCondition())) {
251 CI = NewCI;
252 It = SI->case_begin();
253 }
254
255 Changed = true;
256 continue;
257 }
258
259 // Otherwise, check to see if the switch only branches to one destination.
260 // We do this by reseting "TheOnlyDest" to null when we find two non-equal
261 // destinations.
262 if (It->getCaseSuccessor() != TheOnlyDest)
263 TheOnlyDest = nullptr;
264
265 // Increment this iterator as we haven't removed the case.
266 ++It;
267 }
268
269 if (CI && !TheOnlyDest) {
270 // Branching on a constant, but not any of the cases, go to the default
271 // successor.
272 TheOnlyDest = SI->getDefaultDest();
273 }
274
275 // If we found a single destination that we can fold the switch into, do so
276 // now.
277 if (TheOnlyDest) {
278 // Insert the new branch.
279 Builder.CreateBr(TheOnlyDest);
280 BasicBlock *BB = SI->getParent();
281
282 SmallPtrSet<BasicBlock *, 8> RemovedSuccessors;
283
284 // Remove entries from PHI nodes which we no longer branch to...
285 BasicBlock *SuccToKeep = TheOnlyDest;
286 for (BasicBlock *Succ : successors(SI)) {
287 if (DTU && Succ != TheOnlyDest)
288 RemovedSuccessors.insert(Succ);
289 // Found case matching a constant operand?
290 if (Succ == SuccToKeep) {
291 SuccToKeep = nullptr; // Don't modify the first branch to TheOnlyDest
292 } else {
293 Succ->removePredecessor(BB);
294 }
295 }
296
297 // Delete the old switch.
298 Value *Cond = SI->getCondition();
299 SI->eraseFromParent();
300 if (DeleteDeadConditions)
302 if (DTU) {
303 std::vector<DominatorTree::UpdateType> Updates;
304 Updates.reserve(RemovedSuccessors.size());
305 for (auto *RemovedSuccessor : RemovedSuccessors)
306 Updates.push_back({DominatorTree::Delete, BB, RemovedSuccessor});
307 DTU->applyUpdates(Updates);
308 }
309 return true;
310 }
311
312 if (SI->getNumCases() == 1) {
313 // Otherwise, we can fold this switch into a conditional branch
314 // instruction if it has only one non-default destination.
315 auto FirstCase = *SI->case_begin();
316 Value *Cond = Builder.CreateICmpEQ(SI->getCondition(),
317 FirstCase.getCaseValue(), "cond");
318
319 // Insert the new branch.
320 BranchInst *NewBr = Builder.CreateCondBr(Cond,
321 FirstCase.getCaseSuccessor(),
322 SI->getDefaultDest());
323 SmallVector<uint32_t> Weights;
324 if (extractBranchWeights(*SI, Weights) && Weights.size() == 2) {
325 uint32_t DefWeight = Weights[0];
326 uint32_t CaseWeight = Weights[1];
327 // The TrueWeight should be the weight for the single case of SI.
328 NewBr->setMetadata(LLVMContext::MD_prof,
329 MDBuilder(BB->getContext())
330 .createBranchWeights(CaseWeight, DefWeight));
331 }
332
333 // Update make.implicit metadata to the newly-created conditional branch.
334 MDNode *MakeImplicitMD = SI->getMetadata(LLVMContext::MD_make_implicit);
335 if (MakeImplicitMD)
336 NewBr->setMetadata(LLVMContext::MD_make_implicit, MakeImplicitMD);
337
338 // Delete the old switch.
339 SI->eraseFromParent();
340 return true;
341 }
342 return Changed;
343 }
344
345 if (auto *IBI = dyn_cast<IndirectBrInst>(T)) {
346 // indirectbr blockaddress(@F, @BB) -> br label @BB
347 if (auto *BA =
348 dyn_cast<BlockAddress>(IBI->getAddress()->stripPointerCasts())) {
349 BasicBlock *TheOnlyDest = BA->getBasicBlock();
350 SmallPtrSet<BasicBlock *, 8> RemovedSuccessors;
351
352 // Insert the new branch.
353 Builder.CreateBr(TheOnlyDest);
354
355 BasicBlock *SuccToKeep = TheOnlyDest;
356 for (unsigned i = 0, e = IBI->getNumDestinations(); i != e; ++i) {
357 BasicBlock *DestBB = IBI->getDestination(i);
358 if (DTU && DestBB != TheOnlyDest)
359 RemovedSuccessors.insert(DestBB);
360 if (IBI->getDestination(i) == SuccToKeep) {
361 SuccToKeep = nullptr;
362 } else {
363 DestBB->removePredecessor(BB);
364 }
365 }
366 Value *Address = IBI->getAddress();
367 IBI->eraseFromParent();
368 if (DeleteDeadConditions)
369 // Delete pointer cast instructions.
371
372 // Also zap the blockaddress constant if there are no users remaining,
373 // otherwise the destination is still marked as having its address taken.
374 if (BA->use_empty())
375 BA->destroyConstant();
376
377 // If we didn't find our destination in the IBI successor list, then we
378 // have undefined behavior. Replace the unconditional branch with an
379 // 'unreachable' instruction.
380 if (SuccToKeep) {
382 new UnreachableInst(BB->getContext(), BB);
383 }
384
385 if (DTU) {
386 std::vector<DominatorTree::UpdateType> Updates;
387 Updates.reserve(RemovedSuccessors.size());
388 for (auto *RemovedSuccessor : RemovedSuccessors)
389 Updates.push_back({DominatorTree::Delete, BB, RemovedSuccessor});
390 DTU->applyUpdates(Updates);
391 }
392 return true;
393 }
394 }
395
396 return false;
397}
398
399//===----------------------------------------------------------------------===//
400// Local dead code elimination.
401//
402
403/// isInstructionTriviallyDead - Return true if the result produced by the
404/// instruction is not used, and the instruction has no side effects.
405///
407 const TargetLibraryInfo *TLI) {
408 if (!I->use_empty())
409 return false;
411}
412
414 Instruction *I, const TargetLibraryInfo *TLI) {
415 // Instructions that are "markers" and have implied meaning on code around
416 // them (without explicit uses), are not dead on unused paths.
418 if (II->getIntrinsicID() == Intrinsic::stacksave ||
419 II->getIntrinsicID() == Intrinsic::launder_invariant_group ||
420 II->isLifetimeStartOrEnd())
421 return false;
423}
424
426 const TargetLibraryInfo *TLI) {
427 if (I->isTerminator())
428 return false;
429
430 // We don't want the landingpad-like instructions removed by anything this
431 // general.
432 if (I->isEHPad())
433 return false;
434
435 if (const DbgLabelInst *DLI = dyn_cast<DbgLabelInst>(I)) {
436 if (DLI->getLabel())
437 return false;
438 return true;
439 }
440
441 if (auto *CB = dyn_cast<CallBase>(I))
442 if (isRemovableAlloc(CB, TLI))
443 return true;
444
445 if (!I->willReturn()) {
447 if (!II)
448 return false;
449
450 switch (II->getIntrinsicID()) {
451 case Intrinsic::experimental_guard: {
452 // Guards on true are operationally no-ops. In the future we can
453 // consider more sophisticated tradeoffs for guards considering potential
454 // for check widening, but for now we keep things simple.
455 auto *Cond = dyn_cast<ConstantInt>(II->getArgOperand(0));
456 return Cond && Cond->isOne();
457 }
458 // TODO: These intrinsics are not safe to remove, because this may remove
459 // a well-defined trap.
460 case Intrinsic::wasm_trunc_signed:
461 case Intrinsic::wasm_trunc_unsigned:
462 case Intrinsic::ptrauth_auth:
463 case Intrinsic::ptrauth_resign:
464 case Intrinsic::ptrauth_resign_load_relative:
465 return true;
466 default:
467 return false;
468 }
469 }
470
471 if (!I->mayHaveSideEffects())
472 return true;
473
474 // Special case intrinsics that "may have side effects" but can be deleted
475 // when dead.
477 // Safe to delete llvm.stacksave and launder.invariant.group if dead.
478 if (II->getIntrinsicID() == Intrinsic::stacksave ||
479 II->getIntrinsicID() == Intrinsic::launder_invariant_group)
480 return true;
481
482 // Intrinsics declare sideeffects to prevent them from moving, but they are
483 // nops without users.
484 if (II->getIntrinsicID() == Intrinsic::allow_runtime_check ||
485 II->getIntrinsicID() == Intrinsic::allow_ubsan_check)
486 return true;
487
488 if (II->isLifetimeStartOrEnd()) {
489 auto *Arg = II->getArgOperand(0);
490 if (isa<PoisonValue>(Arg))
491 return true;
492
493 // If the only uses of the alloca are lifetime intrinsics, then the
494 // intrinsics are dead.
495 return llvm::all_of(Arg->uses(), [](Use &Use) {
496 return isa<LifetimeIntrinsic>(Use.getUser());
497 });
498 }
499
500 // Assumptions are dead if their condition is trivially true.
501 if (II->getIntrinsicID() == Intrinsic::assume &&
503 if (ConstantInt *Cond = dyn_cast<ConstantInt>(II->getArgOperand(0)))
504 return !Cond->isZero();
505
506 return false;
507 }
508
509 if (auto *FPI = dyn_cast<ConstrainedFPIntrinsic>(I)) {
510 std::optional<fp::ExceptionBehavior> ExBehavior =
511 FPI->getExceptionBehavior();
512 return *ExBehavior != fp::ebStrict;
513 }
514 }
515
516 if (auto *Call = dyn_cast<CallBase>(I)) {
517 if (Value *FreedOp = getFreedOperand(Call, TLI))
518 if (Constant *C = dyn_cast<Constant>(FreedOp))
519 return C->isNullValue() || isa<UndefValue>(C);
520 if (isMathLibCallNoop(Call, TLI))
521 return true;
522 }
523
524 // Non-volatile atomic loads from constants can be removed.
525 if (auto *LI = dyn_cast<LoadInst>(I))
526 if (auto *GV = dyn_cast<GlobalVariable>(
527 LI->getPointerOperand()->stripPointerCasts()))
528 if (!LI->isVolatile() && GV->isConstant())
529 return true;
530
531 return false;
532}
533
534/// RecursivelyDeleteTriviallyDeadInstructions - If the specified value is a
535/// trivially dead instruction, delete it. If that makes any of its operands
536/// trivially dead, delete them too, recursively. Return true if any
537/// instructions were deleted.
539 Value *V, const TargetLibraryInfo *TLI, MemorySSAUpdater *MSSAU,
540 std::function<void(Value *)> AboutToDeleteCallback) {
542 if (!I || !isInstructionTriviallyDead(I, TLI))
543 return false;
544
546 DeadInsts.push_back(I);
547 RecursivelyDeleteTriviallyDeadInstructions(DeadInsts, TLI, MSSAU,
548 AboutToDeleteCallback);
549
550 return true;
551}
552
555 MemorySSAUpdater *MSSAU,
556 std::function<void(Value *)> AboutToDeleteCallback) {
557 unsigned S = 0, E = DeadInsts.size(), Alive = 0;
558 for (; S != E; ++S) {
559 auto *I = dyn_cast_or_null<Instruction>(DeadInsts[S]);
560 if (!I || !isInstructionTriviallyDead(I)) {
561 DeadInsts[S] = nullptr;
562 ++Alive;
563 }
564 }
565 if (Alive == E)
566 return false;
567 RecursivelyDeleteTriviallyDeadInstructions(DeadInsts, TLI, MSSAU,
568 AboutToDeleteCallback);
569 return true;
570}
571
574 MemorySSAUpdater *MSSAU,
575 std::function<void(Value *)> AboutToDeleteCallback) {
576 // Process the dead instruction list until empty.
577 while (!DeadInsts.empty()) {
578 Value *V = DeadInsts.pop_back_val();
580 if (!I)
581 continue;
583 "Live instruction found in dead worklist!");
584 assert(I->use_empty() && "Instructions with uses are not dead.");
585
586 // Don't lose the debug info while deleting the instructions.
588
589 if (AboutToDeleteCallback)
590 AboutToDeleteCallback(I);
591
592 // Null out all of the instruction's operands to see if any operand becomes
593 // dead as we go.
594 for (Use &OpU : I->operands()) {
595 Value *OpV = OpU.get();
596 OpU.set(nullptr);
597
598 if (!OpV->use_empty())
599 continue;
600
601 // If the operand is an instruction that became dead as we nulled out the
602 // operand, and if it is 'trivially' dead, delete it in a future loop
603 // iteration.
604 if (Instruction *OpI = dyn_cast<Instruction>(OpV))
605 if (isInstructionTriviallyDead(OpI, TLI))
606 DeadInsts.push_back(OpI);
607 }
608 if (MSSAU)
609 MSSAU->removeMemoryAccess(I);
610
611 I->eraseFromParent();
612 }
613}
614
617 findDbgUsers(I, DPUsers);
618 for (auto *DVR : DPUsers)
619 DVR->setKillLocation();
620 return !DPUsers.empty();
621}
622
623/// areAllUsesEqual - Check whether the uses of a value are all the same.
624/// This is similar to Instruction::hasOneUse() except this will also return
625/// true when there are no uses or multiple uses that all refer to the same
626/// value.
628 Value::user_iterator UI = I->user_begin();
629 Value::user_iterator UE = I->user_end();
630 if (UI == UE)
631 return true;
632
633 User *TheUse = *UI;
634 for (++UI; UI != UE; ++UI) {
635 if (*UI != TheUse)
636 return false;
637 }
638 return true;
639}
640
641/// RecursivelyDeleteDeadPHINode - If the specified value is an effectively
642/// dead PHI node, due to being a def-use chain of single-use nodes that
643/// either forms a cycle or is terminated by a trivially dead instruction,
644/// delete it. If that makes any of its operands trivially dead, delete them
645/// too, recursively. Return true if a change was made.
647 const TargetLibraryInfo *TLI,
648 llvm::MemorySSAUpdater *MSSAU) {
650 for (Instruction *I = PN; areAllUsesEqual(I) && !I->mayHaveSideEffects();
651 I = cast<Instruction>(*I->user_begin())) {
652 if (I->use_empty())
654
655 // If we find an instruction more than once, we're on a cycle that
656 // won't prove fruitful.
657 if (!Visited.insert(I).second) {
658 // Break the cycle and delete the instruction and its operands.
659 I->replaceAllUsesWith(PoisonValue::get(I->getType()));
661 return true;
662 }
663 }
664 return false;
665}
666
667static bool
670 const DataLayout &DL,
671 const TargetLibraryInfo *TLI) {
672 if (isInstructionTriviallyDead(I, TLI)) {
674
675 // Null out all of the instruction's operands to see if any operand becomes
676 // dead as we go.
677 for (unsigned i = 0, e = I->getNumOperands(); i != e; ++i) {
678 Value *OpV = I->getOperand(i);
679 I->setOperand(i, nullptr);
680
681 if (!OpV->use_empty() || I == OpV)
682 continue;
683
684 // If the operand is an instruction that became dead as we nulled out the
685 // operand, and if it is 'trivially' dead, delete it in a future loop
686 // iteration.
687 if (Instruction *OpI = dyn_cast<Instruction>(OpV))
688 if (isInstructionTriviallyDead(OpI, TLI))
689 WorkList.insert(OpI);
690 }
691
692 I->eraseFromParent();
693
694 return true;
695 }
696
697 if (Value *SimpleV = simplifyInstruction(I, DL)) {
698 // Add the users to the worklist. CAREFUL: an instruction can use itself,
699 // in the case of a phi node.
700 for (User *U : I->users()) {
701 if (U != I) {
702 WorkList.insert(cast<Instruction>(U));
703 }
704 }
705
706 // Replace the instruction with its simplified value.
707 bool Changed = false;
708 if (!I->use_empty()) {
709 I->replaceAllUsesWith(SimpleV);
710 Changed = true;
711 }
712 if (isInstructionTriviallyDead(I, TLI)) {
713 I->eraseFromParent();
714 Changed = true;
715 }
716 return Changed;
717 }
718 return false;
719}
720
721/// SimplifyInstructionsInBlock - Scan the specified basic block and try to
722/// simplify any instructions in it and recursively delete dead instructions.
723///
724/// This returns true if it changed the code, note that it can delete
725/// instructions in other blocks as well in this block.
727 const TargetLibraryInfo *TLI) {
728 bool MadeChange = false;
729 const DataLayout &DL = BB->getDataLayout();
730
731#ifndef NDEBUG
732 // In debug builds, ensure that the terminator of the block is never replaced
733 // or deleted by these simplifications. The idea of simplification is that it
734 // cannot introduce new instructions, and there is no way to replace the
735 // terminator of a block without introducing a new instruction.
736 AssertingVH<Instruction> TerminatorVH(&BB->back());
737#endif
738
740 // Iterate over the original function, only adding insts to the worklist
741 // if they actually need to be revisited. This avoids having to pre-init
742 // the worklist with the entire function's worth of instructions.
743 for (BasicBlock::iterator BI = BB->begin(), E = std::prev(BB->end());
744 BI != E;) {
745 assert(!BI->isTerminator());
746 Instruction *I = &*BI;
747 ++BI;
748
749 // We're visiting this instruction now, so make sure it's not in the
750 // worklist from an earlier visit.
751 if (!WorkList.count(I))
752 MadeChange |= simplifyAndDCEInstruction(I, WorkList, DL, TLI);
753 }
754
755 while (!WorkList.empty()) {
756 Instruction *I = WorkList.pop_back_val();
757 MadeChange |= simplifyAndDCEInstruction(I, WorkList, DL, TLI);
758 }
759 return MadeChange;
760}
761
762//===----------------------------------------------------------------------===//
763// Control Flow Graph Restructuring.
764//
765
767 DomTreeUpdater *DTU) {
768
769 // If BB has single-entry PHI nodes, fold them.
770 while (PHINode *PN = dyn_cast<PHINode>(DestBB->begin())) {
771 Value *NewVal = PN->getIncomingValue(0);
772 // Replace self referencing PHI with poison, it must be dead.
773 if (NewVal == PN) NewVal = PoisonValue::get(PN->getType());
774 PN->replaceAllUsesWith(NewVal);
775 PN->eraseFromParent();
776 }
777
778 BasicBlock *PredBB = DestBB->getSinglePredecessor();
779 assert(PredBB && "Block doesn't have a single predecessor!");
780
781 bool ReplaceEntryBB = PredBB->isEntryBlock();
782
783 // DTU updates: Collect all the edges that enter
784 // PredBB. These dominator edges will be redirected to DestBB.
786
787 if (DTU) {
788 // To avoid processing the same predecessor more than once.
790 Updates.reserve(Updates.size() + 2 * pred_size(PredBB) + 1);
791 for (BasicBlock *PredOfPredBB : predecessors(PredBB))
792 // This predecessor of PredBB may already have DestBB as a successor.
793 if (PredOfPredBB != PredBB)
794 if (SeenPreds.insert(PredOfPredBB).second)
795 Updates.push_back({DominatorTree::Insert, PredOfPredBB, DestBB});
796 SeenPreds.clear();
797 for (BasicBlock *PredOfPredBB : predecessors(PredBB))
798 if (SeenPreds.insert(PredOfPredBB).second)
799 Updates.push_back({DominatorTree::Delete, PredOfPredBB, PredBB});
800 Updates.push_back({DominatorTree::Delete, PredBB, DestBB});
801 }
802
803 // Zap anything that took the address of DestBB. Not doing this will give the
804 // address an invalid value.
805 if (DestBB->hasAddressTaken()) {
806 BlockAddress *BA = BlockAddress::get(DestBB);
807 Constant *Replacement =
808 ConstantInt::get(Type::getInt32Ty(BA->getContext()), 1);
810 BA->getType()));
811 BA->destroyConstant();
812 }
813
814 // Anything that branched to PredBB now branches to DestBB.
815 PredBB->replaceAllUsesWith(DestBB);
816
817 // Splice all the instructions from PredBB to DestBB.
818 PredBB->getTerminator()->eraseFromParent();
819 DestBB->splice(DestBB->begin(), PredBB);
820 new UnreachableInst(PredBB->getContext(), PredBB);
821
822 // If the PredBB is the entry block of the function, move DestBB up to
823 // become the entry block after we erase PredBB.
824 if (ReplaceEntryBB)
825 DestBB->moveAfter(PredBB);
826
827 if (DTU) {
828 assert(PredBB->size() == 1 &&
830 "The successor list of PredBB isn't empty before "
831 "applying corresponding DTU updates.");
832 DTU->applyUpdatesPermissive(Updates);
833 DTU->deleteBB(PredBB);
834 // Recalculation of DomTree is needed when updating a forward DomTree and
835 // the Entry BB is replaced.
836 if (ReplaceEntryBB && DTU->hasDomTree()) {
837 // The entry block was removed and there is no external interface for
838 // the dominator tree to be notified of this change. In this corner-case
839 // we recalculate the entire tree.
840 DTU->recalculate(*(DestBB->getParent()));
841 }
842 }
843
844 else {
845 PredBB->eraseFromParent(); // Nuke BB if DTU is nullptr.
846 }
847}
848
849/// Return true if we can choose one of these values to use in place of the
850/// other. Note that we will always choose the non-undef value to keep.
851static bool CanMergeValues(Value *First, Value *Second) {
852 return First == Second || isa<UndefValue>(First) || isa<UndefValue>(Second);
853}
854
855/// Return true if we can fold BB, an almost-empty BB ending in an unconditional
856/// branch to Succ, into Succ.
857///
858/// Assumption: Succ is the single successor for BB.
859static bool
861 const SmallPtrSetImpl<BasicBlock *> &BBPreds) {
862 assert(*succ_begin(BB) == Succ && "Succ is not successor of BB!");
863
864 LLVM_DEBUG(dbgs() << "Looking to fold " << BB->getName() << " into "
865 << Succ->getName() << "\n");
866 // Shortcut, if there is only a single predecessor it must be BB and merging
867 // is always safe
868 if (Succ->getSinglePredecessor())
869 return true;
870
871 // Look at all the phi nodes in Succ, to see if they present a conflict when
872 // merging these blocks
873 for (BasicBlock::iterator I = Succ->begin(); isa<PHINode>(I); ++I) {
874 PHINode *PN = cast<PHINode>(I);
875
876 // If the incoming value from BB is again a PHINode in
877 // BB which has the same incoming value for *PI as PN does, we can
878 // merge the phi nodes and then the blocks can still be merged
880 if (BBPN && BBPN->getParent() == BB) {
881 for (unsigned PI = 0, PE = PN->getNumIncomingValues(); PI != PE; ++PI) {
882 BasicBlock *IBB = PN->getIncomingBlock(PI);
883 if (BBPreds.count(IBB) &&
885 PN->getIncomingValue(PI))) {
887 << "Can't fold, phi node " << PN->getName() << " in "
888 << Succ->getName() << " is conflicting with "
889 << BBPN->getName() << " with regard to common predecessor "
890 << IBB->getName() << "\n");
891 return false;
892 }
893 }
894 } else {
895 Value* Val = PN->getIncomingValueForBlock(BB);
896 for (unsigned PI = 0, PE = PN->getNumIncomingValues(); PI != PE; ++PI) {
897 // See if the incoming value for the common predecessor is equal to the
898 // one for BB, in which case this phi node will not prevent the merging
899 // of the block.
900 BasicBlock *IBB = PN->getIncomingBlock(PI);
901 if (BBPreds.count(IBB) &&
902 !CanMergeValues(Val, PN->getIncomingValue(PI))) {
903 LLVM_DEBUG(dbgs() << "Can't fold, phi node " << PN->getName()
904 << " in " << Succ->getName()
905 << " is conflicting with regard to common "
906 << "predecessor " << IBB->getName() << "\n");
907 return false;
908 }
909 }
910 }
911 }
912
913 return true;
914}
915
918
919/// Determines the value to use as the phi node input for a block.
920///
921/// Select between \p OldVal any value that we know flows from \p BB
922/// to a particular phi on the basis of which one (if either) is not
923/// undef. Update IncomingValues based on the selected value.
924///
925/// \param OldVal The value we are considering selecting.
926/// \param BB The block that the value flows in from.
927/// \param IncomingValues A map from block-to-value for other phi inputs
928/// that we have examined.
929///
930/// \returns the selected value.
932 IncomingValueMap &IncomingValues) {
933 IncomingValueMap::const_iterator It = IncomingValues.find(BB);
934 if (!isa<UndefValue>(OldVal)) {
935 assert((It != IncomingValues.end() &&
936 (!(It->second) || It->second == OldVal)) &&
937 "Expected OldVal to match incoming value from BB!");
938
939 IncomingValues.insert_or_assign(BB, OldVal);
940 return OldVal;
941 }
942
943 if (It != IncomingValues.end() && It->second)
944 return It->second;
945
946 return OldVal;
947}
948
949/// Create a map from block to value for the operands of a
950/// given phi.
951///
952/// This function initializes the map with UndefValue for all predecessors
953/// in BBPreds, and then updates the map with concrete non-undef values
954/// found in the PHI node.
955///
956/// \param PN The phi we are collecting the map for.
957/// \param BBPreds The list of all predecessor blocks to initialize with Undef.
958/// \param IncomingValues [out] The map from block to value for this phi.
960 const PredBlockVector &BBPreds,
961 IncomingValueMap &IncomingValues) {
962 for (BasicBlock *Pred : BBPreds)
963 IncomingValues[Pred] = nullptr;
964
965 for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) {
966 Value *V = PN->getIncomingValue(i);
967 if (isa<UndefValue>(V))
968 continue;
969
970 BasicBlock *BB = PN->getIncomingBlock(i);
971 auto It = IncomingValues.find(BB);
972 if (It != IncomingValues.end())
973 It->second = V;
974 }
975}
976
977/// Replace the incoming undef values to a phi with the values
978/// from a block-to-value map.
979///
980/// \param PN The phi we are replacing the undefs in.
981/// \param IncomingValues A map from block to value.
983 const IncomingValueMap &IncomingValues) {
984 SmallVector<unsigned> TrueUndefOps;
985 for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) {
986 Value *V = PN->getIncomingValue(i);
987
988 if (!isa<UndefValue>(V)) continue;
989
990 BasicBlock *BB = PN->getIncomingBlock(i);
991 IncomingValueMap::const_iterator It = IncomingValues.find(BB);
992 if (It == IncomingValues.end())
993 continue;
994
995 // Keep track of undef/poison incoming values. Those must match, so we fix
996 // them up below if needed.
997 // Note: this is conservatively correct, but we could try harder and group
998 // the undef values per incoming basic block.
999 if (!It->second) {
1000 TrueUndefOps.push_back(i);
1001 continue;
1002 }
1003
1004 // There is a defined value for this incoming block, so map this undef
1005 // incoming value to the defined value.
1006 PN->setIncomingValue(i, It->second);
1007 }
1008
1009 // If there are both undef and poison values incoming, then convert those
1010 // values to undef. It is invalid to have different values for the same
1011 // incoming block.
1012 unsigned PoisonCount = count_if(TrueUndefOps, [&](unsigned i) {
1013 return isa<PoisonValue>(PN->getIncomingValue(i));
1014 });
1015 if (PoisonCount != 0 && PoisonCount != TrueUndefOps.size()) {
1016 for (unsigned i : TrueUndefOps)
1018 }
1019}
1020
1021// Only when they shares a single common predecessor, return true.
1022// Only handles cases when BB can't be merged while its predecessors can be
1023// redirected.
1024static bool
1026 const SmallPtrSetImpl<BasicBlock *> &BBPreds,
1027 BasicBlock *&CommonPred) {
1028
1029 // There must be phis in BB, otherwise BB will be merged into Succ directly
1030 if (BB->phis().empty() || Succ->phis().empty())
1031 return false;
1032
1033 // BB must have predecessors not shared that can be redirected to Succ
1034 if (!BB->hasNPredecessorsOrMore(2))
1035 return false;
1036
1037 if (any_of(BBPreds, [](const BasicBlock *Pred) {
1038 return isa<IndirectBrInst>(Pred->getTerminator());
1039 }))
1040 return false;
1041
1042 // Get the single common predecessor of both BB and Succ. Return false
1043 // when there are more than one common predecessors.
1044 for (BasicBlock *SuccPred : predecessors(Succ)) {
1045 if (BBPreds.count(SuccPred)) {
1046 if (CommonPred)
1047 return false;
1048 CommonPred = SuccPred;
1049 }
1050 }
1051
1052 return true;
1053}
1054
1055/// Check whether removing \p BB will make the phis in its \p Succ have too
1056/// many incoming entries. This function does not check whether \p BB is
1057/// foldable or not.
1059 // If BB only has one predecessor, then removing it will not introduce more
1060 // incoming edges for phis.
1061 if (BB->hasNPredecessors(1))
1062 return false;
1063 unsigned NumPreds = pred_size(BB);
1064 unsigned NumChangedPhi = 0;
1065 for (auto &Phi : Succ->phis()) {
1066 // If the incoming value is a phi and the phi is defined in BB,
1067 // then removing BB will not increase the total phi entries of the ir.
1068 if (auto *IncomingPhi = dyn_cast<PHINode>(Phi.getIncomingValueForBlock(BB)))
1069 if (IncomingPhi->getParent() == BB)
1070 continue;
1071 // Otherwise, we need to add entries to the phi
1072 NumChangedPhi++;
1073 }
1074 // For every phi that needs to be changed, (NumPreds - 1) new entries will be
1075 // added. If the total increase in phi entries exceeds
1076 // MaxPhiEntriesIncreaseAfterRemovingEmptyBlock, it will be considered as
1077 // introducing too many new phi entries.
1078 return (NumPreds - 1) * NumChangedPhi >
1080}
1081
1082/// Replace a value flowing from a block to a phi with
1083/// potentially multiple instances of that value flowing from the
1084/// block's predecessors to the phi.
1085///
1086/// \param BB The block with the value flowing into the phi.
1087/// \param BBPreds The predecessors of BB.
1088/// \param PN The phi that we are updating.
1089/// \param CommonPred The common predecessor of BB and PN's BasicBlock
1091 const PredBlockVector &BBPreds,
1092 PHINode *PN,
1093 BasicBlock *CommonPred) {
1094 Value *OldVal = PN->removeIncomingValue(BB, false);
1095 assert(OldVal && "No entry in PHI for Pred BB!");
1096
1097 // Map BBPreds to defined values or nullptr (representing undefined values).
1098 IncomingValueMap IncomingValues;
1099
1100 // We are merging two blocks - BB, and the block containing PN - and
1101 // as a result we need to redirect edges from the predecessors of BB
1102 // to go to the block containing PN, and update PN
1103 // accordingly. Since we allow merging blocks in the case where the
1104 // predecessor and successor blocks both share some predecessors,
1105 // and where some of those common predecessors might have undef
1106 // values flowing into PN, we want to rewrite those values to be
1107 // consistent with the non-undef values.
1108
1109 gatherIncomingValuesToPhi(PN, BBPreds, IncomingValues);
1110
1111 // If this incoming value is one of the PHI nodes in BB, the new entries
1112 // in the PHI node are the entries from the old PHI.
1113 if (isa<PHINode>(OldVal) && cast<PHINode>(OldVal)->getParent() == BB) {
1114 PHINode *OldValPN = cast<PHINode>(OldVal);
1115 for (unsigned i = 0, e = OldValPN->getNumIncomingValues(); i != e; ++i) {
1116 // Note that, since we are merging phi nodes and BB and Succ might
1117 // have common predecessors, we could end up with a phi node with
1118 // identical incoming branches. This will be cleaned up later (and
1119 // will trigger asserts if we try to clean it up now, without also
1120 // simplifying the corresponding conditional branch).
1121 BasicBlock *PredBB = OldValPN->getIncomingBlock(i);
1122
1123 if (PredBB == CommonPred)
1124 continue;
1125
1126 Value *PredVal = OldValPN->getIncomingValue(i);
1127 Value *Selected =
1128 selectIncomingValueForBlock(PredVal, PredBB, IncomingValues);
1129
1130 // And add a new incoming value for this predecessor for the
1131 // newly retargeted branch.
1132 PN->addIncoming(Selected, PredBB);
1133 }
1134 if (CommonPred)
1135 PN->addIncoming(OldValPN->getIncomingValueForBlock(CommonPred), BB);
1136
1137 } else {
1138 for (BasicBlock *PredBB : BBPreds) {
1139 // Update existing incoming values in PN for this
1140 // predecessor of BB.
1141 if (PredBB == CommonPred)
1142 continue;
1143
1144 Value *Selected =
1145 selectIncomingValueForBlock(OldVal, PredBB, IncomingValues);
1146
1147 // And add a new incoming value for this predecessor for the
1148 // newly retargeted branch.
1149 PN->addIncoming(Selected, PredBB);
1150 }
1151 if (CommonPred)
1152 PN->addIncoming(OldVal, BB);
1153 }
1154
1155 replaceUndefValuesInPhi(PN, IncomingValues);
1156}
1157
1159 DomTreeUpdater *DTU) {
1160 assert(BB != &BB->getParent()->getEntryBlock() &&
1161 "TryToSimplifyUncondBranchFromEmptyBlock called on entry block!");
1162
1163 // We can't simplify infinite loops.
1164 BasicBlock *Succ = cast<BranchInst>(BB->getTerminator())->getSuccessor(0);
1165 if (BB == Succ)
1166 return false;
1167
1169
1170 // The single common predecessor of BB and Succ when BB cannot be killed
1171 BasicBlock *CommonPred = nullptr;
1172
1173 bool BBKillable = CanPropagatePredecessorsForPHIs(BB, Succ, BBPreds);
1174
1175 // Even if we can not fold BB into Succ, we may be able to redirect the
1176 // predecessors of BB to Succ.
1177 bool BBPhisMergeable = BBKillable || CanRedirectPredsOfEmptyBBToSucc(
1178 BB, Succ, BBPreds, CommonPred);
1179
1180 if ((!BBKillable && !BBPhisMergeable) || introduceTooManyPhiEntries(BB, Succ))
1181 return false;
1182
1183 // Check to see if merging these blocks/phis would cause conflicts for any of
1184 // the phi nodes in BB or Succ. If not, we can safely merge.
1185
1186 // Check for cases where Succ has multiple predecessors and a PHI node in BB
1187 // has uses which will not disappear when the PHI nodes are merged. It is
1188 // possible to handle such cases, but difficult: it requires checking whether
1189 // BB dominates Succ, which is non-trivial to calculate in the case where
1190 // Succ has multiple predecessors. Also, it requires checking whether
1191 // constructing the necessary self-referential PHI node doesn't introduce any
1192 // conflicts; this isn't too difficult, but the previous code for doing this
1193 // was incorrect.
1194 //
1195 // Note that if this check finds a live use, BB dominates Succ, so BB is
1196 // something like a loop pre-header (or rarely, a part of an irreducible CFG);
1197 // folding the branch isn't profitable in that case anyway.
1198 if (!Succ->getSinglePredecessor()) {
1199 BasicBlock::iterator BBI = BB->begin();
1200 while (isa<PHINode>(*BBI)) {
1201 for (Use &U : BBI->uses()) {
1202 if (PHINode* PN = dyn_cast<PHINode>(U.getUser())) {
1203 if (PN->getIncomingBlock(U) != BB)
1204 return false;
1205 } else {
1206 return false;
1207 }
1208 }
1209 ++BBI;
1210 }
1211 }
1212
1213 if (BBPhisMergeable && CommonPred)
1214 LLVM_DEBUG(dbgs() << "Found Common Predecessor between: " << BB->getName()
1215 << " and " << Succ->getName() << " : "
1216 << CommonPred->getName() << "\n");
1217
1218 // 'BB' and 'BB->Pred' are loop latches, bail out to presrve inner loop
1219 // metadata.
1220 //
1221 // FIXME: This is a stop-gap solution to preserve inner-loop metadata given
1222 // current status (that loop metadata is implemented as metadata attached to
1223 // the branch instruction in the loop latch block). To quote from review
1224 // comments, "the current representation of loop metadata (using a loop latch
1225 // terminator attachment) is known to be fundamentally broken. Loop latches
1226 // are not uniquely associated with loops (both in that a latch can be part of
1227 // multiple loops and a loop may have multiple latches). Loop headers are. The
1228 // solution to this problem is also known: Add support for basic block
1229 // metadata, and attach loop metadata to the loop header."
1230 //
1231 // Why bail out:
1232 // In this case, we expect 'BB' is the latch for outer-loop and 'BB->Pred' is
1233 // the latch for inner-loop (see reason below), so bail out to prerserve
1234 // inner-loop metadata rather than eliminating 'BB' and attaching its metadata
1235 // to this inner-loop.
1236 // - The reason we believe 'BB' and 'BB->Pred' have different inner-most
1237 // loops: assuming 'BB' and 'BB->Pred' are from the same inner-most loop L,
1238 // then 'BB' is the header and latch of 'L' and thereby 'L' must consist of
1239 // one self-looping basic block, which is contradictory with the assumption.
1240 //
1241 // To illustrate how inner-loop metadata is dropped:
1242 //
1243 // CFG Before
1244 //
1245 // BB is while.cond.exit, attached with loop metdata md2.
1246 // BB->Pred is for.body, attached with loop metadata md1.
1247 //
1248 // entry
1249 // |
1250 // v
1251 // ---> while.cond -------------> while.end
1252 // | |
1253 // | v
1254 // | while.body
1255 // | |
1256 // | v
1257 // | for.body <---- (md1)
1258 // | | |______|
1259 // | v
1260 // | while.cond.exit (md2)
1261 // | |
1262 // |_______|
1263 //
1264 // CFG After
1265 //
1266 // while.cond1 is the merge of while.cond.exit and while.cond above.
1267 // for.body is attached with md2, and md1 is dropped.
1268 // If LoopSimplify runs later (as a part of loop pass), it could create
1269 // dedicated exits for inner-loop (essentially adding `while.cond.exit`
1270 // back), but won't it won't see 'md1' nor restore it for the inner-loop.
1271 //
1272 // entry
1273 // |
1274 // v
1275 // ---> while.cond1 -------------> while.end
1276 // | |
1277 // | v
1278 // | while.body
1279 // | |
1280 // | v
1281 // | for.body <---- (md2)
1282 // |_______| |______|
1283 if (Instruction *TI = BB->getTerminator())
1284 if (TI->hasNonDebugLocLoopMetadata())
1285 for (BasicBlock *Pred : predecessors(BB))
1286 if (Instruction *PredTI = Pred->getTerminator())
1287 if (PredTI->hasNonDebugLocLoopMetadata())
1288 return false;
1289
1290 if (BBKillable)
1291 LLVM_DEBUG(dbgs() << "Killing Trivial BB: \n" << *BB);
1292 else if (BBPhisMergeable)
1293 LLVM_DEBUG(dbgs() << "Merge Phis in Trivial BB: \n" << *BB);
1294
1296
1297 if (DTU) {
1298 // To avoid processing the same predecessor more than once.
1300 // All predecessors of BB (except the common predecessor) will be moved to
1301 // Succ.
1302 Updates.reserve(Updates.size() + 2 * pred_size(BB) + 1);
1304 predecessors(Succ));
1305 for (auto *PredOfBB : predecessors(BB)) {
1306 // Do not modify those common predecessors of BB and Succ
1307 if (!SuccPreds.contains(PredOfBB))
1308 if (SeenPreds.insert(PredOfBB).second)
1309 Updates.push_back({DominatorTree::Insert, PredOfBB, Succ});
1310 }
1311
1312 SeenPreds.clear();
1313
1314 for (auto *PredOfBB : predecessors(BB))
1315 // When BB cannot be killed, do not remove the edge between BB and
1316 // CommonPred.
1317 if (SeenPreds.insert(PredOfBB).second && PredOfBB != CommonPred)
1318 Updates.push_back({DominatorTree::Delete, PredOfBB, BB});
1319
1320 if (BBKillable)
1321 Updates.push_back({DominatorTree::Delete, BB, Succ});
1322 }
1323
1324 if (isa<PHINode>(Succ->begin())) {
1325 // If there is more than one pred of succ, and there are PHI nodes in
1326 // the successor, then we need to add incoming edges for the PHI nodes
1327 //
1328 const PredBlockVector BBPreds(predecessors(BB));
1329
1330 // Loop over all of the PHI nodes in the successor of BB.
1331 for (BasicBlock::iterator I = Succ->begin(); isa<PHINode>(I); ++I) {
1332 PHINode *PN = cast<PHINode>(I);
1333 redirectValuesFromPredecessorsToPhi(BB, BBPreds, PN, CommonPred);
1334 }
1335 }
1336
1337 if (Succ->getSinglePredecessor()) {
1338 // BB is the only predecessor of Succ, so Succ will end up with exactly
1339 // the same predecessors BB had.
1340 // Copy over any phi, debug or lifetime instruction.
1342 Succ->splice(Succ->getFirstNonPHIIt(), BB);
1343 } else {
1344 while (PHINode *PN = dyn_cast<PHINode>(&BB->front())) {
1345 // We explicitly check for such uses for merging phis.
1346 assert(PN->use_empty() && "There shouldn't be any uses here!");
1347 PN->eraseFromParent();
1348 }
1349 }
1350
1351 // If the unconditional branch we replaced contains non-debug llvm.loop
1352 // metadata, we add the metadata to the branch instructions in the
1353 // predecessors.
1354 if (Instruction *TI = BB->getTerminator())
1355 if (TI->hasNonDebugLocLoopMetadata()) {
1356 MDNode *LoopMD = TI->getMetadata(LLVMContext::MD_loop);
1357 for (BasicBlock *Pred : predecessors(BB))
1358 Pred->getTerminator()->setMetadata(LLVMContext::MD_loop, LoopMD);
1359 }
1360
1361 if (BBKillable) {
1362 // Everything that jumped to BB now goes to Succ.
1363 BB->replaceAllUsesWith(Succ);
1364
1365 if (!Succ->hasName())
1366 Succ->takeName(BB);
1367
1368 // Clear the successor list of BB to match updates applying to DTU later.
1369 if (BB->getTerminator())
1370 BB->back().eraseFromParent();
1371
1372 new UnreachableInst(BB->getContext(), BB);
1373 assert(succ_empty(BB) && "The successor list of BB isn't empty before "
1374 "applying corresponding DTU updates.");
1375 } else if (BBPhisMergeable) {
1376 // Everything except CommonPred that jumped to BB now goes to Succ.
1377 BB->replaceUsesWithIf(Succ, [BBPreds, CommonPred](Use &U) -> bool {
1378 if (Instruction *UseInst = dyn_cast<Instruction>(U.getUser()))
1379 return UseInst->getParent() != CommonPred &&
1380 BBPreds.contains(UseInst->getParent());
1381 return false;
1382 });
1383 }
1384
1385 if (DTU)
1386 DTU->applyUpdates(Updates);
1387
1388 if (BBKillable)
1389 DeleteDeadBlock(BB, DTU);
1390
1391 return true;
1392}
1393
1394static bool
1397 // This implementation doesn't currently consider undef operands
1398 // specially. Theoretically, two phis which are identical except for
1399 // one having an undef where the other doesn't could be collapsed.
1400
1401 bool Changed = false;
1402
1403 // Examine each PHI.
1404 // Note that increment of I must *NOT* be in the iteration_expression, since
1405 // we don't want to immediately advance when we restart from the beginning.
1406 for (auto I = BB->begin(); PHINode *PN = dyn_cast<PHINode>(I);) {
1407 ++I;
1408 // Is there an identical PHI node in this basic block?
1409 // Note that we only look in the upper square's triangle,
1410 // we already checked that the lower triangle PHI's aren't identical.
1411 for (auto J = I; PHINode *DuplicatePN = dyn_cast<PHINode>(J); ++J) {
1412 if (ToRemove.contains(DuplicatePN))
1413 continue;
1414 if (!DuplicatePN->isIdenticalToWhenDefined(PN))
1415 continue;
1416 // A duplicate. Replace this PHI with the base PHI.
1417 ++NumPHICSEs;
1418 DuplicatePN->replaceAllUsesWith(PN);
1419 ToRemove.insert(DuplicatePN);
1420 Changed = true;
1421
1422 // The RAUW can change PHIs that we already visited.
1423 I = BB->begin();
1424 break; // Start over from the beginning.
1425 }
1426 }
1427 return Changed;
1428}
1429
1430static bool
1433 // This implementation doesn't currently consider undef operands
1434 // specially. Theoretically, two phis which are identical except for
1435 // one having an undef where the other doesn't could be collapsed.
1436
1437 struct PHIDenseMapInfo {
1438 static PHINode *getEmptyKey() {
1440 }
1441
1442 static PHINode *getTombstoneKey() {
1444 }
1445
1446 static bool isSentinel(PHINode *PN) {
1447 return PN == getEmptyKey() || PN == getTombstoneKey();
1448 }
1449
1450 // WARNING: this logic must be kept in sync with
1451 // Instruction::isIdenticalToWhenDefined()!
1452 static unsigned getHashValueImpl(PHINode *PN) {
1453 // Compute a hash value on the operands. Instcombine will likely have
1454 // sorted them, which helps expose duplicates, but we have to check all
1455 // the operands to be safe in case instcombine hasn't run.
1456 return static_cast<unsigned>(
1458 hash_combine_range(PN->blocks())));
1459 }
1460
1461 static unsigned getHashValue(PHINode *PN) {
1462#ifndef NDEBUG
1463 // If -phicse-debug-hash was specified, return a constant -- this
1464 // will force all hashing to collide, so we'll exhaustively search
1465 // the table for a match, and the assertion in isEqual will fire if
1466 // there's a bug causing equal keys to hash differently.
1467 if (PHICSEDebugHash)
1468 return 0;
1469#endif
1470 return getHashValueImpl(PN);
1471 }
1472
1473 static bool isEqualImpl(PHINode *LHS, PHINode *RHS) {
1474 if (isSentinel(LHS) || isSentinel(RHS))
1475 return LHS == RHS;
1476 return LHS->isIdenticalTo(RHS);
1477 }
1478
1479 static bool isEqual(PHINode *LHS, PHINode *RHS) {
1480 // These comparisons are nontrivial, so assert that equality implies
1481 // hash equality (DenseMap demands this as an invariant).
1482 bool Result = isEqualImpl(LHS, RHS);
1483 assert(!Result || (isSentinel(LHS) && LHS == RHS) ||
1485 return Result;
1486 }
1487 };
1488
1489 // Set of unique PHINodes.
1491 PHISet.reserve(4 * PHICSENumPHISmallSize);
1492
1493 // Examine each PHI.
1494 bool Changed = false;
1495 for (auto I = BB->begin(); PHINode *PN = dyn_cast<PHINode>(I++);) {
1496 if (ToRemove.contains(PN))
1497 continue;
1498 auto Inserted = PHISet.insert(PN);
1499 if (!Inserted.second) {
1500 // A duplicate. Replace this PHI with its duplicate.
1501 ++NumPHICSEs;
1502 PN->replaceAllUsesWith(*Inserted.first);
1503 ToRemove.insert(PN);
1504 Changed = true;
1505
1506 // The RAUW can change PHIs that we already visited. Start over from the
1507 // beginning.
1508 PHISet.clear();
1509 I = BB->begin();
1510 }
1511 }
1512
1513 return Changed;
1514}
1515
1526
1530 for (PHINode *PN : ToRemove)
1531 PN->eraseFromParent();
1532 return Changed;
1533}
1534
1536 const DataLayout &DL) {
1537 V = V->stripPointerCasts();
1538
1539 if (AllocaInst *AI = dyn_cast<AllocaInst>(V)) {
1540 // TODO: Ideally, this function would not be called if PrefAlign is smaller
1541 // than the current alignment, as the known bits calculation should have
1542 // already taken it into account. However, this is not always the case,
1543 // as computeKnownBits() has a depth limit, while stripPointerCasts()
1544 // doesn't.
1545 Align CurrentAlign = AI->getAlign();
1546 if (PrefAlign <= CurrentAlign)
1547 return CurrentAlign;
1548
1549 // If the preferred alignment is greater than the natural stack alignment
1550 // then don't round up. This avoids dynamic stack realignment.
1551 MaybeAlign StackAlign = DL.getStackAlignment();
1552 if (StackAlign && PrefAlign > *StackAlign)
1553 return CurrentAlign;
1554 AI->setAlignment(PrefAlign);
1555 return PrefAlign;
1556 }
1557
1558 if (auto *GV = dyn_cast<GlobalVariable>(V)) {
1559 // TODO: as above, this shouldn't be necessary.
1560 Align CurrentAlign = GV->getPointerAlignment(DL);
1561 if (PrefAlign <= CurrentAlign)
1562 return CurrentAlign;
1563
1564 // If there is a large requested alignment and we can, bump up the alignment
1565 // of the global. If the memory we set aside for the global may not be the
1566 // memory used by the final program then it is impossible for us to reliably
1567 // enforce the preferred alignment.
1568 if (!GV->canIncreaseAlignment())
1569 return CurrentAlign;
1570
1571 if (GV->isThreadLocal()) {
1572 unsigned MaxTLSAlign = GV->getParent()->getMaxTLSAlignment() / CHAR_BIT;
1573 if (MaxTLSAlign && PrefAlign > Align(MaxTLSAlign))
1574 PrefAlign = Align(MaxTLSAlign);
1575 }
1576
1577 GV->setAlignment(PrefAlign);
1578 return PrefAlign;
1579 }
1580
1581 return Align(1);
1582}
1583
1585 const DataLayout &DL,
1586 const Instruction *CxtI,
1587 AssumptionCache *AC,
1588 const DominatorTree *DT) {
1589 assert(V->getType()->isPointerTy() &&
1590 "getOrEnforceKnownAlignment expects a pointer!");
1591
1592 KnownBits Known = computeKnownBits(V, DL, AC, CxtI, DT);
1593 unsigned TrailZ = Known.countMinTrailingZeros();
1594
1595 // Avoid trouble with ridiculously large TrailZ values, such as
1596 // those computed from a null pointer.
1597 // LLVM doesn't support alignments larger than (1 << MaxAlignmentExponent).
1598 TrailZ = std::min(TrailZ, +Value::MaxAlignmentExponent);
1599
1600 Align Alignment = Align(1ull << std::min(Known.getBitWidth() - 1, TrailZ));
1601
1602 if (PrefAlign && *PrefAlign > Alignment)
1603 Alignment = std::max(Alignment, tryEnforceAlignment(V, *PrefAlign, DL));
1604
1605 // We don't need to make any adjustment.
1606 return Alignment;
1607}
1608
1609///===---------------------------------------------------------------------===//
1610/// Dbg Intrinsic utilities
1611///
1612
1613/// See if there is a dbg.value intrinsic for DIVar for the PHI node.
1615 DIExpression *DIExpr,
1616 PHINode *APN) {
1617 // Since we can't guarantee that the original dbg.declare intrinsic
1618 // is removed by LowerDbgDeclare(), we need to make sure that we are
1619 // not inserting the same dbg.value intrinsic over and over.
1620 SmallVector<DbgVariableRecord *, 1> DbgVariableRecords;
1621 findDbgValues(APN, DbgVariableRecords);
1622 for (DbgVariableRecord *DVR : DbgVariableRecords) {
1623 assert(is_contained(DVR->location_ops(), APN));
1624 if ((DVR->getVariable() == DIVar) && (DVR->getExpression() == DIExpr))
1625 return true;
1626 }
1627 return false;
1628}
1629
1630/// Check if the alloc size of \p ValTy is large enough to cover the variable
1631/// (or fragment of the variable) described by \p DII.
1632///
1633/// This is primarily intended as a helper for the different
1634/// ConvertDebugDeclareToDebugValue functions. The dbg.declare that is converted
1635/// describes an alloca'd variable, so we need to use the alloc size of the
1636/// value when doing the comparison. E.g. an i1 value will be identified as
1637/// covering an n-bit fragment, if the store size of i1 is at least n bits.
1639 const DataLayout &DL = DVR->getModule()->getDataLayout();
1640 TypeSize ValueSize = DL.getTypeAllocSizeInBits(ValTy);
1641 if (std::optional<uint64_t> FragmentSize =
1642 DVR->getExpression()->getActiveBits(DVR->getVariable()))
1643 return TypeSize::isKnownGE(ValueSize, TypeSize::getFixed(*FragmentSize));
1644
1645 // We can't always calculate the size of the DI variable (e.g. if it is a
1646 // VLA). Try to use the size of the alloca that the dbg intrinsic describes
1647 // instead.
1648 if (DVR->isAddressOfVariable()) {
1649 // DVR should have exactly 1 location when it is an address.
1650 assert(DVR->getNumVariableLocationOps() == 1 &&
1651 "address of variable must have exactly 1 location operand.");
1652 if (auto *AI =
1654 if (std::optional<TypeSize> FragmentSize = AI->getAllocationSizeInBits(DL)) {
1655 return TypeSize::isKnownGE(ValueSize, *FragmentSize);
1656 }
1657 }
1658 }
1659 // Could not determine size of variable. Conservatively return false.
1660 return false;
1661}
1662
1664 DILocalVariable *DIVar,
1665 DIExpression *DIExpr,
1666 const DebugLoc &NewLoc,
1667 BasicBlock::iterator Instr) {
1669 DbgVariableRecord *DVRec =
1670 new DbgVariableRecord(DVAM, DIVar, DIExpr, NewLoc.get());
1671 Instr->getParent()->insertDbgRecordBefore(DVRec, Instr);
1672}
1673
1675 int NumEltDropped = DIExpr->getElements()[0] == dwarf::DW_OP_LLVM_arg ? 3 : 1;
1676 return DIExpression::get(DIExpr->getContext(),
1677 DIExpr->getElements().drop_front(NumEltDropped));
1678}
1679
1681 StoreInst *SI, DIBuilder &Builder) {
1682 assert(DVR->isAddressOfVariable() || DVR->isDbgAssign());
1683 auto *DIVar = DVR->getVariable();
1684 assert(DIVar && "Missing variable");
1685 auto *DIExpr = DVR->getExpression();
1686 Value *DV = SI->getValueOperand();
1687
1688 DebugLoc NewLoc = getDebugValueLoc(DVR);
1689
1690 // If the alloca describes the variable itself, i.e. the expression in the
1691 // dbg.declare doesn't start with a dereference, we can perform the
1692 // conversion if the value covers the entire fragment of DII.
1693 // If the alloca describes the *address* of DIVar, i.e. DIExpr is
1694 // *just* a DW_OP_deref, we use DV as is for the dbg.value.
1695 // We conservatively ignore other dereferences, because the following two are
1696 // not equivalent:
1697 // dbg.declare(alloca, ..., !Expr(deref, plus_uconstant, 2))
1698 // dbg.value(DV, ..., !Expr(deref, plus_uconstant, 2))
1699 // The former is adding 2 to the address of the variable, whereas the latter
1700 // is adding 2 to the value of the variable. As such, we insist on just a
1701 // deref expression.
1702 bool CanConvert =
1703 DIExpr->isDeref() || (!DIExpr->startsWithDeref() &&
1705 if (CanConvert) {
1706 insertDbgValueOrDbgVariableRecord(Builder, DV, DIVar, DIExpr, NewLoc,
1707 SI->getIterator());
1708 return;
1709 }
1710
1711 // FIXME: If storing to a part of the variable described by the dbg.declare,
1712 // then we want to insert a dbg.value for the corresponding fragment.
1713 LLVM_DEBUG(dbgs() << "Failed to convert dbg.declare to dbg.value: " << *DVR
1714 << '\n');
1715
1716 // For now, when there is a store to parts of the variable (but we do not
1717 // know which part) we insert an dbg.value intrinsic to indicate that we
1718 // know nothing about the variable's content.
1719 DV = PoisonValue::get(DV->getType());
1721 DbgVariableRecord *NewDVR =
1722 new DbgVariableRecord(DVAM, DIVar, DIExpr, NewLoc.get());
1723 SI->getParent()->insertDbgRecordBefore(NewDVR, SI->getIterator());
1724}
1725
1727 DIBuilder &Builder) {
1728 auto *DIVar = DVR->getVariable();
1729 assert(DIVar && "Missing variable");
1730 auto *DIExpr = DVR->getExpression();
1731 DIExpr = dropInitialDeref(DIExpr);
1732 Value *DV = SI->getValueOperand();
1733
1734 DebugLoc NewLoc = getDebugValueLoc(DVR);
1735
1736 insertDbgValueOrDbgVariableRecord(Builder, DV, DIVar, DIExpr, NewLoc,
1737 SI->getIterator());
1738}
1739
1741 DIBuilder &Builder) {
1742 auto *DIVar = DVR->getVariable();
1743 auto *DIExpr = DVR->getExpression();
1744 assert(DIVar && "Missing variable");
1745
1746 if (!valueCoversEntireFragment(LI->getType(), DVR)) {
1747 // FIXME: If only referring to a part of the variable described by the
1748 // dbg.declare, then we want to insert a DbgVariableRecord for the
1749 // corresponding fragment.
1750 LLVM_DEBUG(dbgs() << "Failed to convert dbg.declare to DbgVariableRecord: "
1751 << *DVR << '\n');
1752 return;
1753 }
1754
1755 DebugLoc NewLoc = getDebugValueLoc(DVR);
1756
1757 // We are now tracking the loaded value instead of the address. In the
1758 // future if multi-location support is added to the IR, it might be
1759 // preferable to keep tracking both the loaded value and the original
1760 // address in case the alloca can not be elided.
1761
1762 // Create a DbgVariableRecord directly and insert.
1764 DbgVariableRecord *DV =
1765 new DbgVariableRecord(LIVAM, DIVar, DIExpr, NewLoc.get());
1766 LI->getParent()->insertDbgRecordAfter(DV, LI);
1767}
1768
1769/// Determine whether this alloca is either a VLA or an array.
1770static bool isArray(AllocaInst *AI) {
1771 return AI->isArrayAllocation() ||
1772 (AI->getAllocatedType() && AI->getAllocatedType()->isArrayTy());
1773}
1774
1775/// Determine whether this alloca is a structure.
1776static bool isStructure(AllocaInst *AI) {
1777 return AI->getAllocatedType() && AI->getAllocatedType()->isStructTy();
1778}
1780 DIBuilder &Builder) {
1781 auto *DIVar = DVR->getVariable();
1782 auto *DIExpr = DVR->getExpression();
1783 assert(DIVar && "Missing variable");
1784
1785 if (PhiHasDebugValue(DIVar, DIExpr, APN))
1786 return;
1787
1788 if (!valueCoversEntireFragment(APN->getType(), DVR)) {
1789 // FIXME: If only referring to a part of the variable described by the
1790 // dbg.declare, then we want to insert a DbgVariableRecord for the
1791 // corresponding fragment.
1792 LLVM_DEBUG(dbgs() << "Failed to convert dbg.declare to DbgVariableRecord: "
1793 << *DVR << '\n');
1794 return;
1795 }
1796
1797 BasicBlock *BB = APN->getParent();
1798 auto InsertionPt = BB->getFirstInsertionPt();
1799
1800 DebugLoc NewLoc = getDebugValueLoc(DVR);
1801
1802 // The block may be a catchswitch block, which does not have a valid
1803 // insertion point.
1804 // FIXME: Insert DbgVariableRecord markers in the successors when appropriate.
1805 if (InsertionPt != BB->end()) {
1806 insertDbgValueOrDbgVariableRecord(Builder, APN, DIVar, DIExpr, NewLoc,
1807 InsertionPt);
1808 }
1809}
1810
1811/// LowerDbgDeclare - Lowers llvm.dbg.declare intrinsics into appropriate set
1812/// of llvm.dbg.value intrinsics.
1814 bool Changed = false;
1815 DIBuilder DIB(*F.getParent(), /*AllowUnresolved*/ false);
1818 for (auto &FI : F) {
1819 for (Instruction &BI : FI) {
1820 if (auto *DDI = dyn_cast<DbgDeclareInst>(&BI))
1821 Dbgs.push_back(DDI);
1822 for (DbgVariableRecord &DVR : filterDbgVars(BI.getDbgRecordRange())) {
1823 if (DVR.getType() == DbgVariableRecord::LocationType::Declare)
1824 DVRs.push_back(&DVR);
1825 }
1826 }
1827 }
1828
1829 if (Dbgs.empty() && DVRs.empty())
1830 return Changed;
1831
1832 auto LowerOne = [&](DbgVariableRecord *DDI) {
1833 AllocaInst *AI =
1834 dyn_cast_or_null<AllocaInst>(DDI->getVariableLocationOp(0));
1835 // If this is an alloca for a scalar variable, insert a dbg.value
1836 // at each load and store to the alloca and erase the dbg.declare.
1837 // The dbg.values allow tracking a variable even if it is not
1838 // stored on the stack, while the dbg.declare can only describe
1839 // the stack slot (and at a lexical-scope granularity). Later
1840 // passes will attempt to elide the stack slot.
1841 if (!AI || isArray(AI) || isStructure(AI))
1842 return;
1843
1844 // A volatile load/store means that the alloca can't be elided anyway.
1845 if (llvm::any_of(AI->users(), [](User *U) -> bool {
1846 if (LoadInst *LI = dyn_cast<LoadInst>(U))
1847 return LI->isVolatile();
1848 if (StoreInst *SI = dyn_cast<StoreInst>(U))
1849 return SI->isVolatile();
1850 return false;
1851 }))
1852 return;
1853
1855 WorkList.push_back(AI);
1856 while (!WorkList.empty()) {
1857 const Value *V = WorkList.pop_back_val();
1858 for (const auto &AIUse : V->uses()) {
1859 User *U = AIUse.getUser();
1860 if (StoreInst *SI = dyn_cast<StoreInst>(U)) {
1861 if (AIUse.getOperandNo() == 1)
1863 } else if (LoadInst *LI = dyn_cast<LoadInst>(U)) {
1864 ConvertDebugDeclareToDebugValue(DDI, LI, DIB);
1865 } else if (CallInst *CI = dyn_cast<CallInst>(U)) {
1866 // This is a call by-value or some other instruction that takes a
1867 // pointer to the variable. Insert a *value* intrinsic that describes
1868 // the variable by dereferencing the alloca.
1869 if (!CI->isLifetimeStartOrEnd()) {
1870 DebugLoc NewLoc = getDebugValueLoc(DDI);
1871 auto *DerefExpr =
1872 DIExpression::append(DDI->getExpression(), dwarf::DW_OP_deref);
1873 insertDbgValueOrDbgVariableRecord(DIB, AI, DDI->getVariable(),
1874 DerefExpr, NewLoc,
1875 CI->getIterator());
1876 }
1877 } else if (BitCastInst *BI = dyn_cast<BitCastInst>(U)) {
1878 if (BI->getType()->isPointerTy())
1879 WorkList.push_back(BI);
1880 }
1881 }
1882 }
1883 DDI->eraseFromParent();
1884 Changed = true;
1885 };
1886
1887 for_each(DVRs, LowerOne);
1888
1889 if (Changed)
1890 for (BasicBlock &BB : F)
1892
1893 return Changed;
1894}
1895
1896/// Propagate dbg.value records through the newly inserted PHIs.
1898 SmallVectorImpl<PHINode *> &InsertedPHIs) {
1899 assert(BB && "No BasicBlock to clone DbgVariableRecord(s) from.");
1900 if (InsertedPHIs.size() == 0)
1901 return;
1902
1903 // Map existing PHI nodes to their DbgVariableRecords.
1905 for (auto &I : *BB) {
1906 for (DbgVariableRecord &DVR : filterDbgVars(I.getDbgRecordRange())) {
1907 for (Value *V : DVR.location_ops())
1908 if (auto *Loc = dyn_cast_or_null<PHINode>(V))
1909 DbgValueMap.insert({Loc, &DVR});
1910 }
1911 }
1912 if (DbgValueMap.size() == 0)
1913 return;
1914
1915 // Map a pair of the destination BB and old DbgVariableRecord to the new
1916 // DbgVariableRecord, so that if a DbgVariableRecord is being rewritten to use
1917 // more than one of the inserted PHIs in the same destination BB, we can
1918 // update the same DbgVariableRecord with all the new PHIs instead of creating
1919 // one copy for each.
1921 NewDbgValueMap;
1922 // Then iterate through the new PHIs and look to see if they use one of the
1923 // previously mapped PHIs. If so, create a new DbgVariableRecord that will
1924 // propagate the info through the new PHI. If we use more than one new PHI in
1925 // a single destination BB with the same old dbg.value, merge the updates so
1926 // that we get a single new DbgVariableRecord with all the new PHIs.
1927 for (auto PHI : InsertedPHIs) {
1928 BasicBlock *Parent = PHI->getParent();
1929 // Avoid inserting a debug-info record into an EH block.
1930 if (Parent->getFirstNonPHIIt()->isEHPad())
1931 continue;
1932 for (auto VI : PHI->operand_values()) {
1933 auto V = DbgValueMap.find(VI);
1934 if (V != DbgValueMap.end()) {
1935 DbgVariableRecord *DbgII = cast<DbgVariableRecord>(V->second);
1936 auto NewDI = NewDbgValueMap.find({Parent, DbgII});
1937 if (NewDI == NewDbgValueMap.end()) {
1938 DbgVariableRecord *NewDbgII = DbgII->clone();
1939 NewDI = NewDbgValueMap.insert({{Parent, DbgII}, NewDbgII}).first;
1940 }
1941 DbgVariableRecord *NewDbgII = NewDI->second;
1942 // If PHI contains VI as an operand more than once, we may
1943 // replaced it in NewDbgII; confirm that it is present.
1944 if (is_contained(NewDbgII->location_ops(), VI))
1945 NewDbgII->replaceVariableLocationOp(VI, PHI);
1946 }
1947 }
1948 }
1949 // Insert the new DbgVariableRecords into their destination blocks.
1950 for (auto DI : NewDbgValueMap) {
1951 BasicBlock *Parent = DI.first.first;
1952 DbgVariableRecord *NewDbgII = DI.second;
1953 auto InsertionPt = Parent->getFirstInsertionPt();
1954 assert(InsertionPt != Parent->end() && "Ill-formed basic block");
1955
1956 Parent->insertDbgRecordBefore(NewDbgII, InsertionPt);
1957 }
1958}
1959
1961 DIBuilder &Builder, uint8_t DIExprFlags,
1962 int Offset) {
1964
1965 auto ReplaceOne = [&](DbgVariableRecord *DII) {
1966 assert(DII->getVariable() && "Missing variable");
1967 auto *DIExpr = DII->getExpression();
1968 DIExpr = DIExpression::prepend(DIExpr, DIExprFlags, Offset);
1969 DII->setExpression(DIExpr);
1970 DII->replaceVariableLocationOp(Address, NewAddress);
1971 };
1972
1973 for_each(DVRDeclares, ReplaceOne);
1974
1975 return !DVRDeclares.empty();
1976}
1977
1979 DILocalVariable *DIVar,
1980 DIExpression *DIExpr, Value *NewAddress,
1981 DbgVariableRecord *DVR,
1982 DIBuilder &Builder, int Offset) {
1983 assert(DIVar && "Missing variable");
1984
1985 // This is an alloca-based dbg.value/DbgVariableRecord. The first thing it
1986 // should do with the alloca pointer is dereference it. Otherwise we don't
1987 // know how to handle it and give up.
1988 if (!DIExpr || DIExpr->getNumElements() < 1 ||
1989 DIExpr->getElement(0) != dwarf::DW_OP_deref)
1990 return;
1991
1992 // Insert the offset before the first deref.
1993 if (Offset)
1994 DIExpr = DIExpression::prepend(DIExpr, 0, Offset);
1995
1996 DVR->setExpression(DIExpr);
1997 DVR->replaceVariableLocationOp(0u, NewAddress);
1998}
1999
2001 DIBuilder &Builder, int Offset) {
2003 findDbgValues(AI, DPUsers);
2004
2005 // Replace any DbgVariableRecords that use this alloca.
2006 for (DbgVariableRecord *DVR : DPUsers)
2007 updateOneDbgValueForAlloca(DVR->getDebugLoc(), DVR->getVariable(),
2008 DVR->getExpression(), NewAllocaAddress, DVR,
2009 Builder, Offset);
2010}
2011
2012/// Where possible to salvage debug information for \p I do so.
2013/// If not possible mark undef.
2019
2020template <typename T> static void salvageDbgAssignAddress(T *Assign) {
2021 Instruction *I = dyn_cast<Instruction>(Assign->getAddress());
2022 // Only instructions can be salvaged at the moment.
2023 if (!I)
2024 return;
2025
2026 assert(!Assign->getAddressExpression()->getFragmentInfo().has_value() &&
2027 "address-expression shouldn't have fragment info");
2028
2029 // The address component of a dbg.assign cannot be variadic.
2030 uint64_t CurrentLocOps = 0;
2031 SmallVector<Value *, 4> AdditionalValues;
2033 Value *NewV = salvageDebugInfoImpl(*I, CurrentLocOps, Ops, AdditionalValues);
2034
2035 // Check if the salvage failed.
2036 if (!NewV)
2037 return;
2038
2040 Assign->getAddressExpression(), Ops, 0, /*StackValue=*/false);
2041 assert(!SalvagedExpr->getFragmentInfo().has_value() &&
2042 "address-expression shouldn't have fragment info");
2043
2044 SalvagedExpr = SalvagedExpr->foldConstantMath();
2045
2046 // Salvage succeeds if no additional values are required.
2047 if (AdditionalValues.empty()) {
2048 Assign->setAddress(NewV);
2049 Assign->setAddressExpression(SalvagedExpr);
2050 } else {
2051 Assign->setKillAddress();
2052 }
2053}
2054
2057 // These are arbitrary chosen limits on the maximum number of values and the
2058 // maximum size of a debug expression we can salvage up to, used for
2059 // performance reasons.
2060 const unsigned MaxDebugArgs = 16;
2061 const unsigned MaxExpressionSize = 128;
2062 bool Salvaged = false;
2063
2064 for (auto *DVR : DPUsers) {
2065 if (DVR->isDbgAssign()) {
2066 if (DVR->getAddress() == &I) {
2068 Salvaged = true;
2069 }
2070 if (DVR->getValue() != &I)
2071 continue;
2072 }
2073
2074 // Do not add DW_OP_stack_value for DbgDeclare and DbgAddr, because they
2075 // are implicitly pointing out the value as a DWARF memory location
2076 // description.
2077 bool StackValue =
2079 auto DVRLocation = DVR->location_ops();
2080 assert(
2081 is_contained(DVRLocation, &I) &&
2082 "DbgVariableIntrinsic must use salvaged instruction as its location");
2083 SmallVector<Value *, 4> AdditionalValues;
2084 // 'I' may appear more than once in DVR's location ops, and each use of 'I'
2085 // must be updated in the DIExpression and potentially have additional
2086 // values added; thus we call salvageDebugInfoImpl for each 'I' instance in
2087 // DVRLocation.
2088 Value *Op0 = nullptr;
2089 DIExpression *SalvagedExpr = DVR->getExpression();
2090 auto LocItr = find(DVRLocation, &I);
2091 while (SalvagedExpr && LocItr != DVRLocation.end()) {
2093 unsigned LocNo = std::distance(DVRLocation.begin(), LocItr);
2094 uint64_t CurrentLocOps = SalvagedExpr->getNumLocationOperands();
2095 Op0 = salvageDebugInfoImpl(I, CurrentLocOps, Ops, AdditionalValues);
2096 if (!Op0)
2097 break;
2098 SalvagedExpr =
2099 DIExpression::appendOpsToArg(SalvagedExpr, Ops, LocNo, StackValue);
2100 LocItr = std::find(++LocItr, DVRLocation.end(), &I);
2101 }
2102 // salvageDebugInfoImpl should fail on examining the first element of
2103 // DbgUsers, or none of them.
2104 if (!Op0)
2105 break;
2106
2107 SalvagedExpr = SalvagedExpr->foldConstantMath();
2108 DVR->replaceVariableLocationOp(&I, Op0);
2109 bool IsValidSalvageExpr =
2110 SalvagedExpr->getNumElements() <= MaxExpressionSize;
2111 if (AdditionalValues.empty() && IsValidSalvageExpr) {
2112 DVR->setExpression(SalvagedExpr);
2113 } else if (DVR->getType() != DbgVariableRecord::LocationType::Declare &&
2114 IsValidSalvageExpr &&
2115 DVR->getNumVariableLocationOps() + AdditionalValues.size() <=
2116 MaxDebugArgs) {
2117 DVR->addVariableLocationOps(AdditionalValues, SalvagedExpr);
2118 } else {
2119 // Do not salvage using DIArgList for dbg.addr/dbg.declare, as it is
2120 // currently only valid for stack value expressions.
2121 // Also do not salvage if the resulting DIArgList would contain an
2122 // unreasonably large number of values.
2123 DVR->setKillLocation();
2124 }
2125 LLVM_DEBUG(dbgs() << "SALVAGE: " << DVR << '\n');
2126 Salvaged = true;
2127 }
2128
2129 if (Salvaged)
2130 return;
2131
2132 for (auto *DVR : DPUsers)
2133 DVR->setKillLocation();
2134}
2135
2137 uint64_t CurrentLocOps,
2139 SmallVectorImpl<Value *> &AdditionalValues) {
2140 unsigned BitWidth = DL.getIndexSizeInBits(GEP->getPointerAddressSpace());
2141 // Rewrite a GEP into a DIExpression.
2142 SmallMapVector<Value *, APInt, 4> VariableOffsets;
2143 APInt ConstantOffset(BitWidth, 0);
2144 if (!GEP->collectOffset(DL, BitWidth, VariableOffsets, ConstantOffset))
2145 return nullptr;
2146 if (!VariableOffsets.empty() && !CurrentLocOps) {
2147 Opcodes.insert(Opcodes.begin(), {dwarf::DW_OP_LLVM_arg, 0});
2148 CurrentLocOps = 1;
2149 }
2150 for (const auto &Offset : VariableOffsets) {
2151 AdditionalValues.push_back(Offset.first);
2152 assert(Offset.second.isStrictlyPositive() &&
2153 "Expected strictly positive multiplier for offset.");
2154 Opcodes.append({dwarf::DW_OP_LLVM_arg, CurrentLocOps++, dwarf::DW_OP_constu,
2155 Offset.second.getZExtValue(), dwarf::DW_OP_mul,
2156 dwarf::DW_OP_plus});
2157 }
2158 DIExpression::appendOffset(Opcodes, ConstantOffset.getSExtValue());
2159 return GEP->getOperand(0);
2160}
2161
2163 switch (Opcode) {
2164 case Instruction::Add:
2165 return dwarf::DW_OP_plus;
2166 case Instruction::Sub:
2167 return dwarf::DW_OP_minus;
2168 case Instruction::Mul:
2169 return dwarf::DW_OP_mul;
2170 case Instruction::SDiv:
2171 return dwarf::DW_OP_div;
2172 case Instruction::SRem:
2173 return dwarf::DW_OP_mod;
2174 case Instruction::Or:
2175 return dwarf::DW_OP_or;
2176 case Instruction::And:
2177 return dwarf::DW_OP_and;
2178 case Instruction::Xor:
2179 return dwarf::DW_OP_xor;
2180 case Instruction::Shl:
2181 return dwarf::DW_OP_shl;
2182 case Instruction::LShr:
2183 return dwarf::DW_OP_shr;
2184 case Instruction::AShr:
2185 return dwarf::DW_OP_shra;
2186 default:
2187 // TODO: Salvage from each kind of binop we know about.
2188 return 0;
2189 }
2190}
2191
2192static void handleSSAValueOperands(uint64_t CurrentLocOps,
2194 SmallVectorImpl<Value *> &AdditionalValues,
2195 Instruction *I) {
2196 if (!CurrentLocOps) {
2197 Opcodes.append({dwarf::DW_OP_LLVM_arg, 0});
2198 CurrentLocOps = 1;
2199 }
2200 Opcodes.append({dwarf::DW_OP_LLVM_arg, CurrentLocOps});
2201 AdditionalValues.push_back(I->getOperand(1));
2202}
2203
2206 SmallVectorImpl<Value *> &AdditionalValues) {
2207 // Handle binary operations with constant integer operands as a special case.
2208 auto *ConstInt = dyn_cast<ConstantInt>(BI->getOperand(1));
2209 // Values wider than 64 bits cannot be represented within a DIExpression.
2210 if (ConstInt && ConstInt->getBitWidth() > 64)
2211 return nullptr;
2212
2213 Instruction::BinaryOps BinOpcode = BI->getOpcode();
2214 // Push any Constant Int operand onto the expression stack.
2215 if (ConstInt) {
2216 uint64_t Val = ConstInt->getSExtValue();
2217 // Add or Sub Instructions with a constant operand can potentially be
2218 // simplified.
2219 if (BinOpcode == Instruction::Add || BinOpcode == Instruction::Sub) {
2220 uint64_t Offset = BinOpcode == Instruction::Add ? Val : -int64_t(Val);
2222 return BI->getOperand(0);
2223 }
2224 Opcodes.append({dwarf::DW_OP_constu, Val});
2225 } else {
2226 handleSSAValueOperands(CurrentLocOps, Opcodes, AdditionalValues, BI);
2227 }
2228
2229 // Add salvaged binary operator to expression stack, if it has a valid
2230 // representation in a DIExpression.
2231 uint64_t DwarfBinOp = getDwarfOpForBinOp(BinOpcode);
2232 if (!DwarfBinOp)
2233 return nullptr;
2234 Opcodes.push_back(DwarfBinOp);
2235 return BI->getOperand(0);
2236}
2237
2239 // The signedness of the operation is implicit in the typed stack, signed and
2240 // unsigned instructions map to the same DWARF opcode.
2241 switch (Pred) {
2242 case CmpInst::ICMP_EQ:
2243 return dwarf::DW_OP_eq;
2244 case CmpInst::ICMP_NE:
2245 return dwarf::DW_OP_ne;
2246 case CmpInst::ICMP_UGT:
2247 case CmpInst::ICMP_SGT:
2248 return dwarf::DW_OP_gt;
2249 case CmpInst::ICMP_UGE:
2250 case CmpInst::ICMP_SGE:
2251 return dwarf::DW_OP_ge;
2252 case CmpInst::ICMP_ULT:
2253 case CmpInst::ICMP_SLT:
2254 return dwarf::DW_OP_lt;
2255 case CmpInst::ICMP_ULE:
2256 case CmpInst::ICMP_SLE:
2257 return dwarf::DW_OP_le;
2258 default:
2259 return 0;
2260 }
2261}
2262
2265 SmallVectorImpl<Value *> &AdditionalValues) {
2266 // Handle icmp operations with constant integer operands as a special case.
2267 auto *ConstInt = dyn_cast<ConstantInt>(Icmp->getOperand(1));
2268 // Values wider than 64 bits cannot be represented within a DIExpression.
2269 if (ConstInt && ConstInt->getBitWidth() > 64)
2270 return nullptr;
2271 // Push any Constant Int operand onto the expression stack.
2272 if (ConstInt) {
2273 if (Icmp->isSigned())
2274 Opcodes.push_back(dwarf::DW_OP_consts);
2275 else
2276 Opcodes.push_back(dwarf::DW_OP_constu);
2277 uint64_t Val = ConstInt->getSExtValue();
2278 Opcodes.push_back(Val);
2279 } else {
2280 handleSSAValueOperands(CurrentLocOps, Opcodes, AdditionalValues, Icmp);
2281 }
2282
2283 // Add salvaged binary operator to expression stack, if it has a valid
2284 // representation in a DIExpression.
2285 uint64_t DwarfIcmpOp = getDwarfOpForIcmpPred(Icmp->getPredicate());
2286 if (!DwarfIcmpOp)
2287 return nullptr;
2288 Opcodes.push_back(DwarfIcmpOp);
2289 return Icmp->getOperand(0);
2290}
2291
2294 SmallVectorImpl<Value *> &AdditionalValues) {
2295 auto &M = *I.getModule();
2296 auto &DL = M.getDataLayout();
2297
2298 if (auto *CI = dyn_cast<CastInst>(&I)) {
2299 Value *FromValue = CI->getOperand(0);
2300 // No-op casts are irrelevant for debug info.
2301 if (CI->isNoopCast(DL)) {
2302 return FromValue;
2303 }
2304
2305 Type *Type = CI->getType();
2306 if (Type->isPointerTy())
2307 Type = DL.getIntPtrType(Type);
2308 // Casts other than Trunc, SExt, or ZExt to scalar types cannot be salvaged.
2309 if (Type->isVectorTy() ||
2312 return nullptr;
2313
2314 llvm::Type *FromType = FromValue->getType();
2315 if (FromType->isPointerTy())
2316 FromType = DL.getIntPtrType(FromType);
2317
2318 unsigned FromTypeBitSize = FromType->getScalarSizeInBits();
2319 unsigned ToTypeBitSize = Type->getScalarSizeInBits();
2320
2321 auto ExtOps = DIExpression::getExtOps(FromTypeBitSize, ToTypeBitSize,
2322 isa<SExtInst>(&I));
2323 Ops.append(ExtOps.begin(), ExtOps.end());
2324 return FromValue;
2325 }
2326
2327 if (auto *GEP = dyn_cast<GetElementPtrInst>(&I))
2328 return getSalvageOpsForGEP(GEP, DL, CurrentLocOps, Ops, AdditionalValues);
2329 if (auto *BI = dyn_cast<BinaryOperator>(&I))
2330 return getSalvageOpsForBinOp(BI, CurrentLocOps, Ops, AdditionalValues);
2331 if (auto *IC = dyn_cast<ICmpInst>(&I))
2332 return getSalvageOpsForIcmpOp(IC, CurrentLocOps, Ops, AdditionalValues);
2333
2334 // *Not* to do: we should not attempt to salvage load instructions,
2335 // because the validity and lifetime of a dbg.value containing
2336 // DW_OP_deref becomes difficult to analyze. See PR40628 for examples.
2337 return nullptr;
2338}
2339
2340/// A replacement for a dbg.value expression.
2341using DbgValReplacement = std::optional<DIExpression *>;
2342
2343/// Point debug users of \p From to \p To using exprs given by \p RewriteExpr,
2344/// possibly moving/undefing users to prevent use-before-def. Returns true if
2345/// changes are made.
2347 Instruction &From, Value &To, Instruction &DomPoint, DominatorTree &DT,
2348 function_ref<DbgValReplacement(DbgVariableRecord &DVR)> RewriteDVRExpr) {
2349 // Find debug users of From.
2351 findDbgUsers(&From, DPUsers);
2352 if (DPUsers.empty())
2353 return false;
2354
2355 // Prevent use-before-def of To.
2356 bool Changed = false;
2357
2358 SmallPtrSet<DbgVariableRecord *, 1> UndefOrSalvageDVR;
2359 if (isa<Instruction>(&To)) {
2360 bool DomPointAfterFrom = From.getNextNode() == &DomPoint;
2361
2362 // DbgVariableRecord implementation of the above.
2363 for (auto *DVR : DPUsers) {
2364 Instruction *MarkedInstr = DVR->getMarker()->MarkedInstr;
2365 Instruction *NextNonDebug = MarkedInstr;
2366
2367 // It's common to see a debug user between From and DomPoint. Move it
2368 // after DomPoint to preserve the variable update without any reordering.
2369 if (DomPointAfterFrom && NextNonDebug == &DomPoint) {
2370 LLVM_DEBUG(dbgs() << "MOVE: " << *DVR << '\n');
2371 DVR->removeFromParent();
2372 DomPoint.getParent()->insertDbgRecordAfter(DVR, &DomPoint);
2373 Changed = true;
2374
2375 // Users which otherwise aren't dominated by the replacement value must
2376 // be salvaged or deleted.
2377 } else if (!DT.dominates(&DomPoint, MarkedInstr)) {
2378 UndefOrSalvageDVR.insert(DVR);
2379 }
2380 }
2381 }
2382
2383 // Update debug users without use-before-def risk.
2384 for (auto *DVR : DPUsers) {
2385 if (UndefOrSalvageDVR.count(DVR))
2386 continue;
2387
2388 DbgValReplacement DVRepl = RewriteDVRExpr(*DVR);
2389 if (!DVRepl)
2390 continue;
2391
2392 DVR->replaceVariableLocationOp(&From, &To);
2393 DVR->setExpression(*DVRepl);
2394 LLVM_DEBUG(dbgs() << "REWRITE: " << DVR << '\n');
2395 Changed = true;
2396 }
2397
2398 if (!UndefOrSalvageDVR.empty()) {
2399 // Try to salvage the remaining debug users.
2400 salvageDebugInfo(From);
2401 Changed = true;
2402 }
2403
2404 return Changed;
2405}
2406
2407/// Check if a bitcast between a value of type \p FromTy to type \p ToTy would
2408/// losslessly preserve the bits and semantics of the value. This predicate is
2409/// symmetric, i.e swapping \p FromTy and \p ToTy should give the same result.
2410///
2411/// Note that Type::canLosslesslyBitCastTo is not suitable here because it
2412/// allows semantically unequivalent bitcasts, such as <2 x i64> -> <4 x i32>,
2413/// and also does not allow lossless pointer <-> integer conversions.
2415 Type *ToTy) {
2416 // Trivially compatible types.
2417 if (FromTy == ToTy)
2418 return true;
2419
2420 // Handle compatible pointer <-> integer conversions.
2421 if (FromTy->isIntOrPtrTy() && ToTy->isIntOrPtrTy()) {
2422 bool SameSize = DL.getTypeSizeInBits(FromTy) == DL.getTypeSizeInBits(ToTy);
2423 bool LosslessConversion = !DL.isNonIntegralPointerType(FromTy) &&
2424 !DL.isNonIntegralPointerType(ToTy);
2425 return SameSize && LosslessConversion;
2426 }
2427
2428 // TODO: This is not exhaustive.
2429 return false;
2430}
2431
2433 Instruction &DomPoint, DominatorTree &DT) {
2434 // Exit early if From has no debug users.
2435 if (!From.isUsedByMetadata())
2436 return false;
2437
2438 assert(&From != &To && "Can't replace something with itself");
2439
2440 Type *FromTy = From.getType();
2441 Type *ToTy = To.getType();
2442
2443 auto IdentityDVR = [&](DbgVariableRecord &DVR) -> DbgValReplacement {
2444 return DVR.getExpression();
2445 };
2446
2447 // Handle no-op conversions.
2448 Module &M = *From.getModule();
2449 const DataLayout &DL = M.getDataLayout();
2450 if (isBitCastSemanticsPreserving(DL, FromTy, ToTy))
2451 return rewriteDebugUsers(From, To, DomPoint, DT, IdentityDVR);
2452
2453 // Handle integer-to-integer widening and narrowing.
2454 // FIXME: Use DW_OP_convert when it's available everywhere.
2455 if (FromTy->isIntegerTy() && ToTy->isIntegerTy()) {
2456 uint64_t FromBits = FromTy->getIntegerBitWidth();
2457 uint64_t ToBits = ToTy->getIntegerBitWidth();
2458 assert(FromBits != ToBits && "Unexpected no-op conversion");
2459
2460 // When the width of the result grows, assume that a debugger will only
2461 // access the low `FromBits` bits when inspecting the source variable.
2462 if (FromBits < ToBits)
2463 return rewriteDebugUsers(From, To, DomPoint, DT, IdentityDVR);
2464
2465 // The width of the result has shrunk. Use sign/zero extension to describe
2466 // the source variable's high bits.
2467 auto SignOrZeroExtDVR = [&](DbgVariableRecord &DVR) -> DbgValReplacement {
2468 DILocalVariable *Var = DVR.getVariable();
2469
2470 // Without knowing signedness, sign/zero extension isn't possible.
2471 auto Signedness = Var->getSignedness();
2472 if (!Signedness)
2473 return std::nullopt;
2474
2475 bool Signed = *Signedness == DIBasicType::Signedness::Signed;
2476 return DIExpression::appendExt(DVR.getExpression(), ToBits, FromBits,
2477 Signed);
2478 };
2479 return rewriteDebugUsers(From, To, DomPoint, DT, SignOrZeroExtDVR);
2480 }
2481
2482 // TODO: Floating-point conversions, vectors.
2483 return false;
2484}
2485
2487 Instruction *I, SmallVectorImpl<Value *> &PoisonedValues) {
2488 bool Changed = false;
2489 // RemoveDIs: erase debug-info on this instruction manually.
2490 I->dropDbgRecords();
2491 for (Use &U : I->operands()) {
2492 Value *Op = U.get();
2493 if (isa<Instruction>(Op) && !Op->getType()->isTokenTy()) {
2494 U.set(PoisonValue::get(Op->getType()));
2495 PoisonedValues.push_back(Op);
2496 Changed = true;
2497 }
2498 }
2499
2500 return Changed;
2501}
2502
2504 unsigned NumDeadInst = 0;
2505 // Delete the instructions backwards, as it has a reduced likelihood of
2506 // having to update as many def-use and use-def chains.
2507 Instruction *EndInst = BB->getTerminator(); // Last not to be deleted.
2510
2511 while (EndInst != &BB->front()) {
2512 // Delete the next to last instruction.
2513 Instruction *Inst = &*--EndInst->getIterator();
2514 if (!Inst->use_empty() && !Inst->getType()->isTokenTy())
2516 if (Inst->isEHPad() || Inst->getType()->isTokenTy()) {
2517 // EHPads can't have DbgVariableRecords attached to them, but it might be
2518 // possible for things with token type.
2519 Inst->dropDbgRecords();
2520 EndInst = Inst;
2521 continue;
2522 }
2523 ++NumDeadInst;
2524 // RemoveDIs: erasing debug-info must be done manually.
2525 Inst->dropDbgRecords();
2526 Inst->eraseFromParent();
2527 }
2528 return NumDeadInst;
2529}
2530
2531unsigned llvm::changeToUnreachable(Instruction *I, bool PreserveLCSSA,
2532 DomTreeUpdater *DTU,
2533 MemorySSAUpdater *MSSAU) {
2534 BasicBlock *BB = I->getParent();
2535
2536 if (MSSAU)
2537 MSSAU->changeToUnreachable(I);
2538
2539 SmallPtrSet<BasicBlock *, 8> UniqueSuccessors;
2540
2541 // Loop over all of the successors, removing BB's entry from any PHI
2542 // nodes.
2543 for (BasicBlock *Successor : successors(BB)) {
2544 Successor->removePredecessor(BB, PreserveLCSSA);
2545 if (DTU)
2546 UniqueSuccessors.insert(Successor);
2547 }
2548 auto *UI = new UnreachableInst(I->getContext(), I->getIterator());
2549 UI->setDebugLoc(I->getDebugLoc());
2550
2551 // All instructions after this are dead.
2552 unsigned NumInstrsRemoved = 0;
2553 BasicBlock::iterator BBI = I->getIterator(), BBE = BB->end();
2554 while (BBI != BBE) {
2555 if (!BBI->use_empty())
2556 BBI->replaceAllUsesWith(PoisonValue::get(BBI->getType()));
2557 BBI++->eraseFromParent();
2558 ++NumInstrsRemoved;
2559 }
2560 if (DTU) {
2562 Updates.reserve(UniqueSuccessors.size());
2563 for (BasicBlock *UniqueSuccessor : UniqueSuccessors)
2564 Updates.push_back({DominatorTree::Delete, BB, UniqueSuccessor});
2565 DTU->applyUpdates(Updates);
2566 }
2568 return NumInstrsRemoved;
2569}
2570
2572 SmallVector<Value *, 8> Args(II->args());
2574 II->getOperandBundlesAsDefs(OpBundles);
2575 CallInst *NewCall = CallInst::Create(II->getFunctionType(),
2576 II->getCalledOperand(), Args, OpBundles);
2577 NewCall->setCallingConv(II->getCallingConv());
2578 NewCall->setAttributes(II->getAttributes());
2579 NewCall->setDebugLoc(II->getDebugLoc());
2580 NewCall->copyMetadata(*II);
2581
2582 // If the invoke had profile metadata, try converting them for CallInst.
2583 uint64_t TotalWeight;
2584 if (NewCall->extractProfTotalWeight(TotalWeight)) {
2585 // Set the total weight if it fits into i32, otherwise reset.
2586 MDBuilder MDB(NewCall->getContext());
2587 auto NewWeights = uint32_t(TotalWeight) != TotalWeight
2588 ? nullptr
2589 : MDB.createBranchWeights({uint32_t(TotalWeight)});
2590 NewCall->setMetadata(LLVMContext::MD_prof, NewWeights);
2591 }
2592
2593 return NewCall;
2594}
2595
2596// changeToCall - Convert the specified invoke into a normal call.
2599 NewCall->takeName(II);
2600 NewCall->insertBefore(II->getIterator());
2601 II->replaceAllUsesWith(NewCall);
2602
2603 // Follow the call by a branch to the normal destination.
2604 BasicBlock *NormalDestBB = II->getNormalDest();
2605 auto *BI = BranchInst::Create(NormalDestBB, II->getIterator());
2606 // Although it takes place after the call itself, the new branch is still
2607 // performing part of the control-flow functionality of the invoke, so we use
2608 // II's DebugLoc.
2609 BI->setDebugLoc(II->getDebugLoc());
2610
2611 // Update PHI nodes in the unwind destination
2612 BasicBlock *BB = II->getParent();
2613 BasicBlock *UnwindDestBB = II->getUnwindDest();
2614 UnwindDestBB->removePredecessor(BB);
2615 II->eraseFromParent();
2616 if (DTU)
2617 DTU->applyUpdates({{DominatorTree::Delete, BB, UnwindDestBB}});
2618 return NewCall;
2619}
2620
2622 BasicBlock *UnwindEdge,
2623 DomTreeUpdater *DTU) {
2624 BasicBlock *BB = CI->getParent();
2625
2626 // Convert this function call into an invoke instruction. First, split the
2627 // basic block.
2628 BasicBlock *Split = SplitBlock(BB, CI, DTU, /*LI=*/nullptr, /*MSSAU*/ nullptr,
2629 CI->getName() + ".noexc");
2630
2631 // Delete the unconditional branch inserted by SplitBlock
2632 BB->back().eraseFromParent();
2633
2634 // Create the new invoke instruction.
2635 SmallVector<Value *, 8> InvokeArgs(CI->args());
2637
2638 CI->getOperandBundlesAsDefs(OpBundles);
2639
2640 // Note: we're round tripping operand bundles through memory here, and that
2641 // can potentially be avoided with a cleverer API design that we do not have
2642 // as of this time.
2643
2644 InvokeInst *II =
2646 UnwindEdge, InvokeArgs, OpBundles, CI->getName(), BB);
2647 II->setDebugLoc(CI->getDebugLoc());
2648 II->setCallingConv(CI->getCallingConv());
2649 II->setAttributes(CI->getAttributes());
2650 II->setMetadata(LLVMContext::MD_prof, CI->getMetadata(LLVMContext::MD_prof));
2651
2652 if (DTU)
2653 DTU->applyUpdates({{DominatorTree::Insert, BB, UnwindEdge}});
2654
2655 // Make sure that anything using the call now uses the invoke! This also
2656 // updates the CallGraph if present, because it uses a WeakTrackingVH.
2658
2659 // Delete the original call
2660 Split->front().eraseFromParent();
2661 return Split;
2662}
2663
2666 DomTreeUpdater *DTU = nullptr) {
2668 BasicBlock *BB = &F.front();
2669 Worklist.push_back(BB);
2670 Reachable.insert(BB);
2671 bool Changed = false;
2672 do {
2673 BB = Worklist.pop_back_val();
2674
2675 // Do a quick scan of the basic block, turning any obviously unreachable
2676 // instructions into LLVM unreachable insts. The instruction combining pass
2677 // canonicalizes unreachable insts into stores to null or undef.
2678 for (Instruction &I : *BB) {
2679 if (auto *CI = dyn_cast<CallInst>(&I)) {
2680 Value *Callee = CI->getCalledOperand();
2681 // Handle intrinsic calls.
2682 if (Function *F = dyn_cast<Function>(Callee)) {
2683 auto IntrinsicID = F->getIntrinsicID();
2684 // Assumptions that are known to be false are equivalent to
2685 // unreachable. Also, if the condition is undefined, then we make the
2686 // choice most beneficial to the optimizer, and choose that to also be
2687 // unreachable.
2688 if (IntrinsicID == Intrinsic::assume) {
2689 if (match(CI->getArgOperand(0), m_CombineOr(m_Zero(), m_Undef()))) {
2690 // Don't insert a call to llvm.trap right before the unreachable.
2691 changeToUnreachable(CI, false, DTU);
2692 Changed = true;
2693 break;
2694 }
2695 } else if (IntrinsicID == Intrinsic::experimental_guard) {
2696 // A call to the guard intrinsic bails out of the current
2697 // compilation unit if the predicate passed to it is false. If the
2698 // predicate is a constant false, then we know the guard will bail
2699 // out of the current compile unconditionally, so all code following
2700 // it is dead.
2701 //
2702 // Note: unlike in llvm.assume, it is not "obviously profitable" for
2703 // guards to treat `undef` as `false` since a guard on `undef` can
2704 // still be useful for widening.
2705 if (match(CI->getArgOperand(0), m_Zero()))
2706 if (!isa<UnreachableInst>(CI->getNextNode())) {
2707 changeToUnreachable(CI->getNextNode(), false, DTU);
2708 Changed = true;
2709 break;
2710 }
2711 }
2712 } else if ((isa<ConstantPointerNull>(Callee) &&
2713 !NullPointerIsDefined(CI->getFunction(),
2714 cast<PointerType>(Callee->getType())
2715 ->getAddressSpace())) ||
2716 isa<UndefValue>(Callee)) {
2717 changeToUnreachable(CI, false, DTU);
2718 Changed = true;
2719 break;
2720 }
2721 if (CI->doesNotReturn() && !CI->isMustTailCall()) {
2722 // If we found a call to a no-return function, insert an unreachable
2723 // instruction after it. Make sure there isn't *already* one there
2724 // though.
2725 if (!isa<UnreachableInst>(CI->getNextNode())) {
2726 // Don't insert a call to llvm.trap right before the unreachable.
2727 changeToUnreachable(CI->getNextNode(), false, DTU);
2728 Changed = true;
2729 }
2730 break;
2731 }
2732 } else if (auto *SI = dyn_cast<StoreInst>(&I)) {
2733 // Store to undef and store to null are undefined and used to signal
2734 // that they should be changed to unreachable by passes that can't
2735 // modify the CFG.
2736
2737 // Don't touch volatile stores.
2738 if (SI->isVolatile()) continue;
2739
2740 Value *Ptr = SI->getOperand(1);
2741
2742 if (isa<UndefValue>(Ptr) ||
2744 !NullPointerIsDefined(SI->getFunction(),
2745 SI->getPointerAddressSpace()))) {
2746 changeToUnreachable(SI, false, DTU);
2747 Changed = true;
2748 break;
2749 }
2750 }
2751 }
2752
2753 Instruction *Terminator = BB->getTerminator();
2754 if (auto *II = dyn_cast<InvokeInst>(Terminator)) {
2755 // Turn invokes that call 'nounwind' functions into ordinary calls.
2756 Value *Callee = II->getCalledOperand();
2757 if ((isa<ConstantPointerNull>(Callee) &&
2758 !NullPointerIsDefined(BB->getParent())) ||
2759 isa<UndefValue>(Callee)) {
2760 changeToUnreachable(II, false, DTU);
2761 Changed = true;
2762 } else {
2763 if (II->doesNotReturn() &&
2764 !isa<UnreachableInst>(II->getNormalDest()->front())) {
2765 // If we found an invoke of a no-return function,
2766 // create a new empty basic block with an `unreachable` terminator,
2767 // and set it as the normal destination for the invoke,
2768 // unless that is already the case.
2769 // Note that the original normal destination could have other uses.
2770 BasicBlock *OrigNormalDest = II->getNormalDest();
2771 OrigNormalDest->removePredecessor(II->getParent());
2772 LLVMContext &Ctx = II->getContext();
2773 BasicBlock *UnreachableNormalDest = BasicBlock::Create(
2774 Ctx, OrigNormalDest->getName() + ".unreachable",
2775 II->getFunction(), OrigNormalDest);
2776 auto *UI = new UnreachableInst(Ctx, UnreachableNormalDest);
2777 UI->setDebugLoc(DebugLoc::getTemporary());
2778 II->setNormalDest(UnreachableNormalDest);
2779 if (DTU)
2780 DTU->applyUpdates(
2781 {{DominatorTree::Delete, BB, OrigNormalDest},
2782 {DominatorTree::Insert, BB, UnreachableNormalDest}});
2783 Changed = true;
2784 }
2785 if (II->doesNotThrow() && canSimplifyInvokeNoUnwind(&F)) {
2786 if (II->use_empty() && !II->mayHaveSideEffects()) {
2787 // jump to the normal destination branch.
2788 BasicBlock *NormalDestBB = II->getNormalDest();
2789 BasicBlock *UnwindDestBB = II->getUnwindDest();
2790 BranchInst::Create(NormalDestBB, II->getIterator());
2791 UnwindDestBB->removePredecessor(II->getParent());
2792 II->eraseFromParent();
2793 if (DTU)
2794 DTU->applyUpdates({{DominatorTree::Delete, BB, UnwindDestBB}});
2795 } else
2796 changeToCall(II, DTU);
2797 Changed = true;
2798 }
2799 }
2800 } else if (auto *CatchSwitch = dyn_cast<CatchSwitchInst>(Terminator)) {
2801 // Remove catchpads which cannot be reached.
2802 struct CatchPadDenseMapInfo {
2803 static CatchPadInst *getEmptyKey() {
2805 }
2806
2807 static CatchPadInst *getTombstoneKey() {
2809 }
2810
2811 static unsigned getHashValue(CatchPadInst *CatchPad) {
2812 return static_cast<unsigned>(hash_combine_range(
2813 CatchPad->value_op_begin(), CatchPad->value_op_end()));
2814 }
2815
2816 static bool isEqual(CatchPadInst *LHS, CatchPadInst *RHS) {
2817 if (LHS == getEmptyKey() || LHS == getTombstoneKey() ||
2818 RHS == getEmptyKey() || RHS == getTombstoneKey())
2819 return LHS == RHS;
2820 return LHS->isIdenticalTo(RHS);
2821 }
2822 };
2823
2824 SmallDenseMap<BasicBlock *, int, 8> NumPerSuccessorCases;
2825 // Set of unique CatchPads.
2827 CatchPadDenseMapInfo, detail::DenseSetPair<CatchPadInst *>>
2828 HandlerSet;
2830 for (CatchSwitchInst::handler_iterator I = CatchSwitch->handler_begin(),
2831 E = CatchSwitch->handler_end();
2832 I != E; ++I) {
2833 BasicBlock *HandlerBB = *I;
2834 if (DTU)
2835 ++NumPerSuccessorCases[HandlerBB];
2836 auto *CatchPad = cast<CatchPadInst>(HandlerBB->getFirstNonPHIIt());
2837 if (!HandlerSet.insert({CatchPad, Empty}).second) {
2838 if (DTU)
2839 --NumPerSuccessorCases[HandlerBB];
2840 CatchSwitch->removeHandler(I);
2841 --I;
2842 --E;
2843 Changed = true;
2844 }
2845 }
2846 if (DTU) {
2847 std::vector<DominatorTree::UpdateType> Updates;
2848 for (const std::pair<BasicBlock *, int> &I : NumPerSuccessorCases)
2849 if (I.second == 0)
2850 Updates.push_back({DominatorTree::Delete, BB, I.first});
2851 DTU->applyUpdates(Updates);
2852 }
2853 }
2854
2855 Changed |= ConstantFoldTerminator(BB, true, nullptr, DTU);
2856 for (BasicBlock *Successor : successors(BB))
2857 if (Reachable.insert(Successor).second)
2858 Worklist.push_back(Successor);
2859 } while (!Worklist.empty());
2860 return Changed;
2861}
2862
2864 Instruction *TI = BB->getTerminator();
2865
2866 if (auto *II = dyn_cast<InvokeInst>(TI))
2867 return changeToCall(II, DTU);
2868
2869 Instruction *NewTI;
2870 BasicBlock *UnwindDest;
2871
2872 if (auto *CRI = dyn_cast<CleanupReturnInst>(TI)) {
2873 NewTI = CleanupReturnInst::Create(CRI->getCleanupPad(), nullptr, CRI->getIterator());
2874 UnwindDest = CRI->getUnwindDest();
2875 } else if (auto *CatchSwitch = dyn_cast<CatchSwitchInst>(TI)) {
2876 auto *NewCatchSwitch = CatchSwitchInst::Create(
2877 CatchSwitch->getParentPad(), nullptr, CatchSwitch->getNumHandlers(),
2878 CatchSwitch->getName(), CatchSwitch->getIterator());
2879 for (BasicBlock *PadBB : CatchSwitch->handlers())
2880 NewCatchSwitch->addHandler(PadBB);
2881
2882 NewTI = NewCatchSwitch;
2883 UnwindDest = CatchSwitch->getUnwindDest();
2884 } else {
2885 llvm_unreachable("Could not find unwind successor");
2886 }
2887
2888 NewTI->takeName(TI);
2889 NewTI->setDebugLoc(TI->getDebugLoc());
2890 UnwindDest->removePredecessor(BB);
2891 TI->replaceAllUsesWith(NewTI);
2892 TI->eraseFromParent();
2893 if (DTU)
2894 DTU->applyUpdates({{DominatorTree::Delete, BB, UnwindDest}});
2895 return NewTI;
2896}
2897
2898/// removeUnreachableBlocks - Remove blocks that are not reachable, even
2899/// if they are in a dead cycle. Return true if a change was made, false
2900/// otherwise.
2902 MemorySSAUpdater *MSSAU) {
2904 bool Changed = markAliveBlocks(F, Reachable, DTU);
2905
2906 // If there are unreachable blocks in the CFG...
2907 if (Reachable.size() == F.size())
2908 return Changed;
2909
2910 assert(Reachable.size() < F.size());
2911
2912 // Are there any blocks left to actually delete?
2913 SmallSetVector<BasicBlock *, 8> BlocksToRemove;
2914 for (BasicBlock &BB : F) {
2915 // Skip reachable basic blocks
2916 if (Reachable.count(&BB))
2917 continue;
2918 // Skip already-deleted blocks
2919 if (DTU && DTU->isBBPendingDeletion(&BB))
2920 continue;
2921 BlocksToRemove.insert(&BB);
2922 }
2923
2924 if (BlocksToRemove.empty())
2925 return Changed;
2926
2927 Changed = true;
2928 NumRemoved += BlocksToRemove.size();
2929
2930 if (MSSAU)
2931 MSSAU->removeBlocks(BlocksToRemove);
2932
2933 DeleteDeadBlocks(BlocksToRemove.takeVector(), DTU);
2934
2935 return Changed;
2936}
2937
2938/// If AAOnly is set, only intersect alias analysis metadata and preserve other
2939/// known metadata. Unknown metadata is always dropped.
2940static void combineMetadata(Instruction *K, const Instruction *J,
2941 bool DoesKMove, bool AAOnly = false) {
2943 K->getAllMetadataOtherThanDebugLoc(Metadata);
2944 for (const auto &MD : Metadata) {
2945 unsigned Kind = MD.first;
2946 MDNode *JMD = J->getMetadata(Kind);
2947 MDNode *KMD = MD.second;
2948
2949 // TODO: Assert that this switch is exhaustive for fixed MD kinds.
2950 switch (Kind) {
2951 default:
2952 K->setMetadata(Kind, nullptr); // Remove unknown metadata
2953 break;
2954 case LLVMContext::MD_dbg:
2955 llvm_unreachable("getAllMetadataOtherThanDebugLoc returned a MD_dbg");
2956 case LLVMContext::MD_DIAssignID:
2957 if (!AAOnly)
2958 K->mergeDIAssignID(J);
2959 break;
2960 case LLVMContext::MD_tbaa:
2961 if (DoesKMove)
2962 K->setMetadata(Kind, MDNode::getMostGenericTBAA(JMD, KMD));
2963 break;
2964 case LLVMContext::MD_alias_scope:
2965 if (DoesKMove)
2966 K->setMetadata(Kind, MDNode::getMostGenericAliasScope(JMD, KMD));
2967 break;
2968 case LLVMContext::MD_noalias:
2969 case LLVMContext::MD_mem_parallel_loop_access:
2970 if (DoesKMove)
2971 K->setMetadata(Kind, MDNode::intersect(JMD, KMD));
2972 break;
2973 case LLVMContext::MD_access_group:
2974 if (DoesKMove)
2975 K->setMetadata(LLVMContext::MD_access_group,
2976 intersectAccessGroups(K, J));
2977 break;
2978 case LLVMContext::MD_range:
2979 if (!AAOnly && (DoesKMove || !K->hasMetadata(LLVMContext::MD_noundef)))
2980 K->setMetadata(Kind, MDNode::getMostGenericRange(JMD, KMD));
2981 break;
2982 case LLVMContext::MD_nofpclass:
2983 if (!AAOnly && (DoesKMove || !K->hasMetadata(LLVMContext::MD_noundef)))
2984 K->setMetadata(Kind, MDNode::getMostGenericNoFPClass(JMD, KMD));
2985 break;
2986 case LLVMContext::MD_fpmath:
2987 if (!AAOnly)
2988 K->setMetadata(Kind, MDNode::getMostGenericFPMath(JMD, KMD));
2989 break;
2990 case LLVMContext::MD_invariant_load:
2991 // If K moves, only set the !invariant.load if it is present in both
2992 // instructions.
2993 if (DoesKMove)
2994 K->setMetadata(Kind, JMD);
2995 break;
2996 case LLVMContext::MD_nonnull:
2997 if (!AAOnly && (DoesKMove || !K->hasMetadata(LLVMContext::MD_noundef)))
2998 K->setMetadata(Kind, JMD);
2999 break;
3000 case LLVMContext::MD_invariant_group:
3001 // Preserve !invariant.group in K.
3002 break;
3003 // Keep empty cases for prof, mmra, memprof, and callsite to prevent them
3004 // from being removed as unknown metadata. The actual merging is handled
3005 // separately below.
3006 case LLVMContext::MD_prof:
3007 case LLVMContext::MD_mmra:
3008 case LLVMContext::MD_memprof:
3009 case LLVMContext::MD_callsite:
3010 break;
3011 case LLVMContext::MD_callee_type:
3012 if (!AAOnly) {
3013 K->setMetadata(LLVMContext::MD_callee_type,
3015 }
3016 break;
3017 case LLVMContext::MD_align:
3018 if (!AAOnly && (DoesKMove || !K->hasMetadata(LLVMContext::MD_noundef)))
3019 K->setMetadata(
3021 break;
3022 case LLVMContext::MD_dereferenceable:
3023 case LLVMContext::MD_dereferenceable_or_null:
3024 if (!AAOnly && DoesKMove)
3025 K->setMetadata(Kind,
3027 break;
3028 case LLVMContext::MD_preserve_access_index:
3029 // Preserve !preserve.access.index in K.
3030 break;
3031 case LLVMContext::MD_noundef:
3032 // If K does move, keep noundef if it is present in both instructions.
3033 if (!AAOnly && DoesKMove)
3034 K->setMetadata(Kind, JMD);
3035 break;
3036 case LLVMContext::MD_nontemporal:
3037 // Preserve !nontemporal if it is present on both instructions.
3038 if (!AAOnly)
3039 K->setMetadata(Kind, JMD);
3040 break;
3041 case LLVMContext::MD_noalias_addrspace:
3042 if (DoesKMove)
3043 K->setMetadata(Kind,
3045 break;
3046 case LLVMContext::MD_nosanitize:
3047 // Preserve !nosanitize if both K and J have it.
3048 K->setMetadata(Kind, JMD);
3049 break;
3050 case LLVMContext::MD_captures:
3051 K->setMetadata(
3053 K->getContext(), MDNode::toCaptureComponents(JMD) |
3055 break;
3056 case LLVMContext::MD_alloc_token:
3057 // Preserve !alloc_token if both K and J have it, and they are equal.
3058 if (KMD == JMD)
3059 K->setMetadata(Kind, JMD);
3060 else
3061 K->setMetadata(Kind, nullptr);
3062 break;
3063 }
3064 }
3065 // Set !invariant.group from J if J has it. If both instructions have it
3066 // then we will just pick it from J - even when they are different.
3067 // Also make sure that K is load or store - f.e. combining bitcast with load
3068 // could produce bitcast with invariant.group metadata, which is invalid.
3069 // FIXME: we should try to preserve both invariant.group md if they are
3070 // different, but right now instruction can only have one invariant.group.
3071 if (auto *JMD = J->getMetadata(LLVMContext::MD_invariant_group))
3072 if (isa<LoadInst>(K) || isa<StoreInst>(K))
3073 K->setMetadata(LLVMContext::MD_invariant_group, JMD);
3074
3075 // Merge MMRAs.
3076 // This is handled separately because we also want to handle cases where K
3077 // doesn't have tags but J does.
3078 auto JMMRA = J->getMetadata(LLVMContext::MD_mmra);
3079 auto KMMRA = K->getMetadata(LLVMContext::MD_mmra);
3080 if (JMMRA || KMMRA) {
3081 K->setMetadata(LLVMContext::MD_mmra,
3082 MMRAMetadata::combine(K->getContext(), JMMRA, KMMRA));
3083 }
3084
3085 // Merge memprof metadata.
3086 // Handle separately to support cases where only one instruction has the
3087 // metadata.
3088 auto *JMemProf = J->getMetadata(LLVMContext::MD_memprof);
3089 auto *KMemProf = K->getMetadata(LLVMContext::MD_memprof);
3090 if (!AAOnly && (JMemProf || KMemProf)) {
3091 K->setMetadata(LLVMContext::MD_memprof,
3092 MDNode::getMergedMemProfMetadata(KMemProf, JMemProf));
3093 }
3094
3095 // Merge callsite metadata.
3096 // Handle separately to support cases where only one instruction has the
3097 // metadata.
3098 auto *JCallSite = J->getMetadata(LLVMContext::MD_callsite);
3099 auto *KCallSite = K->getMetadata(LLVMContext::MD_callsite);
3100 if (!AAOnly && (JCallSite || KCallSite)) {
3101 K->setMetadata(LLVMContext::MD_callsite,
3102 MDNode::getMergedCallsiteMetadata(KCallSite, JCallSite));
3103 }
3104
3105 // Merge prof metadata.
3106 // Handle separately to support cases where only one instruction has the
3107 // metadata.
3108 auto *JProf = J->getMetadata(LLVMContext::MD_prof);
3109 auto *KProf = K->getMetadata(LLVMContext::MD_prof);
3110 if (!AAOnly && (JProf || KProf)) {
3111 K->setMetadata(LLVMContext::MD_prof,
3112 MDNode::getMergedProfMetadata(KProf, JProf, K, J));
3113 }
3114}
3115
3117 bool DoesKMove) {
3118 combineMetadata(K, J, DoesKMove);
3119}
3120
3122 combineMetadata(K, J, /*DoesKMove=*/true, /*AAOnly=*/true);
3123}
3124
3125void llvm::copyMetadataForLoad(LoadInst &Dest, const LoadInst &Source) {
3127 Source.getAllMetadata(MD);
3128 MDBuilder MDB(Dest.getContext());
3129 Type *NewType = Dest.getType();
3130 const DataLayout &DL = Source.getDataLayout();
3131 for (const auto &MDPair : MD) {
3132 unsigned ID = MDPair.first;
3133 MDNode *N = MDPair.second;
3134 // Note, essentially every kind of metadata should be preserved here! This
3135 // routine is supposed to clone a load instruction changing *only its type*.
3136 // The only metadata it makes sense to drop is metadata which is invalidated
3137 // when the pointer type changes. This should essentially never be the case
3138 // in LLVM, but we explicitly switch over only known metadata to be
3139 // conservatively correct. If you are adding metadata to LLVM which pertains
3140 // to loads, you almost certainly want to add it here.
3141 switch (ID) {
3142 case LLVMContext::MD_dbg:
3143 case LLVMContext::MD_tbaa:
3144 case LLVMContext::MD_prof:
3145 case LLVMContext::MD_fpmath:
3146 case LLVMContext::MD_tbaa_struct:
3147 case LLVMContext::MD_invariant_load:
3148 case LLVMContext::MD_alias_scope:
3149 case LLVMContext::MD_noalias:
3150 case LLVMContext::MD_nontemporal:
3151 case LLVMContext::MD_mem_parallel_loop_access:
3152 case LLVMContext::MD_access_group:
3153 case LLVMContext::MD_noundef:
3154 case LLVMContext::MD_noalias_addrspace:
3155 // All of these directly apply.
3156 Dest.setMetadata(ID, N);
3157 break;
3158
3159 case LLVMContext::MD_nonnull:
3160 copyNonnullMetadata(Source, N, Dest);
3161 break;
3162
3163 case LLVMContext::MD_align:
3164 case LLVMContext::MD_dereferenceable:
3165 case LLVMContext::MD_dereferenceable_or_null:
3166 // These only directly apply if the new type is also a pointer.
3167 if (NewType->isPointerTy())
3168 Dest.setMetadata(ID, N);
3169 break;
3170
3171 case LLVMContext::MD_range:
3172 copyRangeMetadata(DL, Source, N, Dest);
3173 break;
3174
3175 case LLVMContext::MD_nofpclass:
3176 // This only applies if the floating-point type interpretation. This
3177 // should handle degenerate cases like casting between a scalar and single
3178 // element vector.
3179 if (NewType->getScalarType() == Source.getType()->getScalarType())
3180 Dest.setMetadata(ID, N);
3181 break;
3182 }
3183 }
3184}
3185
3187 auto *ReplInst = dyn_cast<Instruction>(Repl);
3188 if (!ReplInst)
3189 return;
3190
3191 // Patch the replacement so that it is not more restrictive than the value
3192 // being replaced.
3193 WithOverflowInst *UnusedWO;
3194 // When replacing the result of a llvm.*.with.overflow intrinsic with a
3195 // overflowing binary operator, nuw/nsw flags may no longer hold.
3196 if (isa<OverflowingBinaryOperator>(ReplInst) &&
3198 ReplInst->dropPoisonGeneratingFlags();
3199 // Note that if 'I' is a load being replaced by some operation,
3200 // for example, by an arithmetic operation, then andIRFlags()
3201 // would just erase all math flags from the original arithmetic
3202 // operation, which is clearly not wanted and not needed.
3203 else if (!isa<LoadInst>(I))
3204 ReplInst->andIRFlags(I);
3205
3206 // Handle attributes.
3207 if (auto *CB1 = dyn_cast<CallBase>(ReplInst)) {
3208 if (auto *CB2 = dyn_cast<CallBase>(I)) {
3209 bool Success = CB1->tryIntersectAttributes(CB2);
3210 assert(Success && "We should not be trying to sink callbases "
3211 "with non-intersectable attributes");
3212 // For NDEBUG Compile.
3213 (void)Success;
3214 }
3215 }
3216
3217 // FIXME: If both the original and replacement value are part of the
3218 // same control-flow region (meaning that the execution of one
3219 // guarantees the execution of the other), then we can combine the
3220 // noalias scopes here and do better than the general conservative
3221 // answer used in combineMetadata().
3222
3223 // In general, GVN unifies expressions over different control-flow
3224 // regions, and so we need a conservative combination of the noalias
3225 // scopes.
3226 combineMetadataForCSE(ReplInst, I, false);
3227}
3228
3229template <typename ShouldReplaceFn>
3230static unsigned replaceDominatedUsesWith(Value *From, Value *To,
3231 const ShouldReplaceFn &ShouldReplace) {
3232 assert(From->getType() == To->getType());
3233
3234 unsigned Count = 0;
3235 for (Use &U : llvm::make_early_inc_range(From->uses())) {
3236 auto *II = dyn_cast<IntrinsicInst>(U.getUser());
3237 if (II && II->getIntrinsicID() == Intrinsic::fake_use)
3238 continue;
3239 if (!ShouldReplace(U))
3240 continue;
3241 LLVM_DEBUG(dbgs() << "Replace dominated use of '";
3242 From->printAsOperand(dbgs());
3243 dbgs() << "' with " << *To << " in " << *U.getUser() << "\n");
3244 U.set(To);
3245 ++Count;
3246 }
3247 return Count;
3248}
3249
3251 assert(From->getType() == To->getType());
3252 auto *BB = From->getParent();
3253 unsigned Count = 0;
3254
3255 for (Use &U : llvm::make_early_inc_range(From->uses())) {
3256 auto *I = cast<Instruction>(U.getUser());
3257 if (I->getParent() == BB)
3258 continue;
3259 U.set(To);
3260 ++Count;
3261 }
3262 return Count;
3263}
3264
3266 DominatorTree &DT,
3267 const BasicBlockEdge &Root) {
3268 auto Dominates = [&](const Use &U) { return DT.dominates(Root, U); };
3269 return ::replaceDominatedUsesWith(From, To, Dominates);
3270}
3271
3273 DominatorTree &DT,
3274 const BasicBlock *BB) {
3275 auto Dominates = [&](const Use &U) { return DT.dominates(BB, U); };
3276 return ::replaceDominatedUsesWith(From, To, Dominates);
3277}
3278
3280 DominatorTree &DT,
3281 const Instruction *I) {
3282 auto Dominates = [&](const Use &U) { return DT.dominates(I, U); };
3283 return ::replaceDominatedUsesWith(From, To, Dominates);
3284}
3285
3287 Value *From, Value *To, DominatorTree &DT, const BasicBlockEdge &Root,
3288 function_ref<bool(const Use &U, const Value *To)> ShouldReplace) {
3289 auto DominatesAndShouldReplace = [&](const Use &U) {
3290 return DT.dominates(Root, U) && ShouldReplace(U, To);
3291 };
3292 return ::replaceDominatedUsesWith(From, To, DominatesAndShouldReplace);
3293}
3294
3296 Value *From, Value *To, DominatorTree &DT, const BasicBlock *BB,
3297 function_ref<bool(const Use &U, const Value *To)> ShouldReplace) {
3298 auto DominatesAndShouldReplace = [&](const Use &U) {
3299 return DT.dominates(BB, U) && ShouldReplace(U, To);
3300 };
3301 return ::replaceDominatedUsesWith(From, To, DominatesAndShouldReplace);
3302}
3303
3305 Value *From, Value *To, DominatorTree &DT, const Instruction *I,
3306 function_ref<bool(const Use &U, const Value *To)> ShouldReplace) {
3307 auto DominatesAndShouldReplace = [&](const Use &U) {
3308 return DT.dominates(I, U) && ShouldReplace(U, To);
3309 };
3310 return ::replaceDominatedUsesWith(From, To, DominatesAndShouldReplace);
3311}
3312
3314 const TargetLibraryInfo &TLI) {
3315 // Check if the function is specifically marked as a gc leaf function.
3316 if (Call->hasFnAttr("gc-leaf-function"))
3317 return true;
3318 if (const Function *F = Call->getCalledFunction()) {
3319 if (F->hasFnAttribute("gc-leaf-function"))
3320 return true;
3321
3322 if (auto IID = F->getIntrinsicID()) {
3323 // Most LLVM intrinsics do not take safepoints.
3324 return IID != Intrinsic::experimental_gc_statepoint &&
3325 IID != Intrinsic::experimental_deoptimize &&
3326 IID != Intrinsic::memcpy_element_unordered_atomic &&
3327 IID != Intrinsic::memmove_element_unordered_atomic;
3328 }
3329 }
3330
3331 // Lib calls can be materialized by some passes, and won't be
3332 // marked as 'gc-leaf-function.' All available Libcalls are
3333 // GC-leaf.
3334 LibFunc LF;
3335 if (TLI.getLibFunc(*Call, LF)) {
3336 return TLI.has(LF);
3337 }
3338
3339 return false;
3340}
3341
3343 LoadInst &NewLI) {
3344 auto *NewTy = NewLI.getType();
3345
3346 // This only directly applies if the new type is also a pointer.
3347 if (NewTy->isPointerTy()) {
3348 NewLI.setMetadata(LLVMContext::MD_nonnull, N);
3349 return;
3350 }
3351
3352 // The only other translation we can do is to integral loads with !range
3353 // metadata.
3354 if (!NewTy->isIntegerTy())
3355 return;
3356
3357 MDBuilder MDB(NewLI.getContext());
3358 const Value *Ptr = OldLI.getPointerOperand();
3359 auto *ITy = cast<IntegerType>(NewTy);
3360 auto *NullInt = ConstantExpr::getPtrToInt(
3362 auto *NonNullInt = ConstantExpr::getAdd(NullInt, ConstantInt::get(ITy, 1));
3363 NewLI.setMetadata(LLVMContext::MD_range,
3364 MDB.createRange(NonNullInt, NullInt));
3365}
3366
3368 MDNode *N, LoadInst &NewLI) {
3369 auto *NewTy = NewLI.getType();
3370 // Simply copy the metadata if the type did not change.
3371 if (NewTy == OldLI.getType()) {
3372 NewLI.setMetadata(LLVMContext::MD_range, N);
3373 return;
3374 }
3375
3376 // Give up unless it is converted to a pointer where there is a single very
3377 // valuable mapping we can do reliably.
3378 // FIXME: It would be nice to propagate this in more ways, but the type
3379 // conversions make it hard.
3380 if (!NewTy->isPointerTy())
3381 return;
3382
3383 unsigned BitWidth = DL.getPointerTypeSizeInBits(NewTy);
3384 if (BitWidth == OldLI.getType()->getScalarSizeInBits() &&
3385 !getConstantRangeFromMetadata(*N).contains(APInt(BitWidth, 0))) {
3386 MDNode *NN = MDNode::get(OldLI.getContext(), {});
3387 NewLI.setMetadata(LLVMContext::MD_nonnull, NN);
3388 }
3389}
3390
3393 findDbgUsers(&I, DPUsers);
3394 for (auto *DVR : DPUsers)
3395 DVR->eraseFromParent();
3396}
3397
3399 BasicBlock *BB) {
3400 // Since we are moving the instructions out of its basic block, we do not
3401 // retain their original debug locations (DILocations) and debug intrinsic
3402 // instructions.
3403 //
3404 // Doing so would degrade the debugging experience.
3405 //
3406 // FIXME: Issue #152767: debug info should also be the same as the
3407 // original branch, **if** the user explicitly indicated that (for sampling
3408 // PGO)
3409 //
3410 // Currently, when hoisting the instructions, we take the following actions:
3411 // - Remove their debug intrinsic instructions.
3412 // - Set their debug locations to the values from the insertion point.
3413 //
3414 // As per PR39141 (comment #8), the more fundamental reason why the dbg.values
3415 // need to be deleted, is because there will not be any instructions with a
3416 // DILocation in either branch left after performing the transformation. We
3417 // can only insert a dbg.value after the two branches are joined again.
3418 //
3419 // See PR38762, PR39243 for more details.
3420 //
3421 // TODO: Extend llvm.dbg.value to take more than one SSA Value (PR39141) to
3422 // encode predicated DIExpressions that yield different results on different
3423 // code paths.
3424
3425 for (BasicBlock::iterator II = BB->begin(), IE = BB->end(); II != IE;) {
3426 Instruction *I = &*II;
3427 I->dropUBImplyingAttrsAndMetadata();
3428 if (I->isUsedByMetadata())
3429 dropDebugUsers(*I);
3430 // RemoveDIs: drop debug-info too as the following code does.
3431 I->dropDbgRecords();
3432 if (I->isDebugOrPseudoInst()) {
3433 // Remove DbgInfo and pseudo probe Intrinsics.
3434 II = I->eraseFromParent();
3435 continue;
3436 }
3437 I->setDebugLoc(InsertPt->getDebugLoc());
3438 ++II;
3439 }
3440 DomBlock->splice(InsertPt->getIterator(), BB, BB->begin(),
3441 BB->getTerminator()->getIterator());
3442}
3443
3445 Type &Ty) {
3446 // Create integer constant expression.
3447 auto createIntegerExpression = [&DIB](const Constant &CV) -> DIExpression * {
3448 const APInt &API = cast<ConstantInt>(&CV)->getValue();
3449 std::optional<int64_t> InitIntOpt;
3450 if (API.getBitWidth() == 1)
3451 InitIntOpt = API.tryZExtValue();
3452 else
3453 InitIntOpt = API.trySExtValue();
3454 return InitIntOpt ? DIB.createConstantValueExpression(
3455 static_cast<uint64_t>(*InitIntOpt))
3456 : nullptr;
3457 };
3458
3459 if (isa<ConstantInt>(C))
3460 return createIntegerExpression(C);
3461
3462 auto *FP = dyn_cast<ConstantFP>(&C);
3463 if (FP && Ty.isFloatingPointTy() && Ty.getScalarSizeInBits() <= 64) {
3464 const APFloat &APF = FP->getValueAPF();
3465 APInt const &API = APF.bitcastToAPInt();
3466 if (uint64_t Temp = API.getZExtValue())
3467 return DIB.createConstantValueExpression(Temp);
3468 return DIB.createConstantValueExpression(*API.getRawData());
3469 }
3470
3471 if (!Ty.isPointerTy())
3472 return nullptr;
3473
3475 return DIB.createConstantValueExpression(0);
3476
3477 if (const ConstantExpr *CE = dyn_cast<ConstantExpr>(&C))
3478 if (CE->getOpcode() == Instruction::IntToPtr) {
3479 const Value *V = CE->getOperand(0);
3480 if (auto CI = dyn_cast_or_null<ConstantInt>(V))
3481 return createIntegerExpression(*CI);
3482 }
3483 return nullptr;
3484}
3485
3487 auto RemapDebugOperands = [&Mapping](auto *DV, auto Set) {
3488 for (auto *Op : Set) {
3489 auto I = Mapping.find(Op);
3490 if (I != Mapping.end())
3491 DV->replaceVariableLocationOp(Op, I->second, /*AllowEmpty=*/true);
3492 }
3493 };
3494 auto RemapAssignAddress = [&Mapping](auto *DA) {
3495 auto I = Mapping.find(DA->getAddress());
3496 if (I != Mapping.end())
3497 DA->setAddress(I->second);
3498 };
3499 for (DbgVariableRecord &DVR : filterDbgVars(Inst->getDbgRecordRange())) {
3500 RemapDebugOperands(&DVR, DVR.location_ops());
3501 if (DVR.isDbgAssign())
3502 RemapAssignAddress(&DVR);
3503 }
3504}
3505
3506namespace {
3507
3508/// A potential constituent of a bitreverse or bswap expression. See
3509/// collectBitParts for a fuller explanation.
3510struct BitPart {
3511 BitPart(Value *P, unsigned BW) : Provider(P) {
3512 Provenance.resize(BW);
3513 }
3514
3515 /// The Value that this is a bitreverse/bswap of.
3516 Value *Provider;
3517
3518 /// The "provenance" of each bit. Provenance[A] = B means that bit A
3519 /// in Provider becomes bit B in the result of this expression.
3520 SmallVector<int8_t, 32> Provenance; // int8_t means max size is i128.
3521
3522 enum { Unset = -1 };
3523};
3524
3525} // end anonymous namespace
3526
3527/// Analyze the specified subexpression and see if it is capable of providing
3528/// pieces of a bswap or bitreverse. The subexpression provides a potential
3529/// piece of a bswap or bitreverse if it can be proved that each non-zero bit in
3530/// the output of the expression came from a corresponding bit in some other
3531/// value. This function is recursive, and the end result is a mapping of
3532/// bitnumber to bitnumber. It is the caller's responsibility to validate that
3533/// the bitnumber to bitnumber mapping is correct for a bswap or bitreverse.
3534///
3535/// For example, if the current subexpression if "(shl i32 %X, 24)" then we know
3536/// that the expression deposits the low byte of %X into the high byte of the
3537/// result and that all other bits are zero. This expression is accepted and a
3538/// BitPart is returned with Provider set to %X and Provenance[24-31] set to
3539/// [0-7].
3540///
3541/// For vector types, all analysis is performed at the per-element level. No
3542/// cross-element analysis is supported (shuffle/insertion/reduction), and all
3543/// constant masks must be splatted across all elements.
3544///
3545/// To avoid revisiting values, the BitPart results are memoized into the
3546/// provided map. To avoid unnecessary copying of BitParts, BitParts are
3547/// constructed in-place in the \c BPS map. Because of this \c BPS needs to
3548/// store BitParts objects, not pointers. As we need the concept of a nullptr
3549/// BitParts (Value has been analyzed and the analysis failed), we an Optional
3550/// type instead to provide the same functionality.
3551///
3552/// Because we pass around references into \c BPS, we must use a container that
3553/// does not invalidate internal references (std::map instead of DenseMap).
3554static const std::optional<BitPart> &
3555collectBitParts(Value *V, bool MatchBSwaps, bool MatchBitReversals,
3556 std::map<Value *, std::optional<BitPart>> &BPS, int Depth,
3557 bool &FoundRoot) {
3558 auto [I, Inserted] = BPS.try_emplace(V);
3559 if (!Inserted)
3560 return I->second;
3561
3562 auto &Result = I->second;
3563 auto BitWidth = V->getType()->getScalarSizeInBits();
3564
3565 // Can't do integer/elements > 128 bits.
3566 if (BitWidth > 128)
3567 return Result;
3568
3569 // Prevent stack overflow by limiting the recursion depth
3571 LLVM_DEBUG(dbgs() << "collectBitParts max recursion depth reached.\n");
3572 return Result;
3573 }
3574
3575 if (auto *I = dyn_cast<Instruction>(V)) {
3576 Value *X, *Y;
3577 const APInt *C;
3578
3579 // If this is an or instruction, it may be an inner node of the bswap.
3580 if (match(V, m_Or(m_Value(X), m_Value(Y)))) {
3581 // Check we have both sources and they are from the same provider.
3582 const auto &A = collectBitParts(X, MatchBSwaps, MatchBitReversals, BPS,
3583 Depth + 1, FoundRoot);
3584 if (!A || !A->Provider)
3585 return Result;
3586
3587 const auto &B = collectBitParts(Y, MatchBSwaps, MatchBitReversals, BPS,
3588 Depth + 1, FoundRoot);
3589 if (!B || A->Provider != B->Provider)
3590 return Result;
3591
3592 // Try and merge the two together.
3593 Result = BitPart(A->Provider, BitWidth);
3594 for (unsigned BitIdx = 0; BitIdx < BitWidth; ++BitIdx) {
3595 if (A->Provenance[BitIdx] != BitPart::Unset &&
3596 B->Provenance[BitIdx] != BitPart::Unset &&
3597 A->Provenance[BitIdx] != B->Provenance[BitIdx])
3598 return Result = std::nullopt;
3599
3600 if (A->Provenance[BitIdx] == BitPart::Unset)
3601 Result->Provenance[BitIdx] = B->Provenance[BitIdx];
3602 else
3603 Result->Provenance[BitIdx] = A->Provenance[BitIdx];
3604 }
3605
3606 return Result;
3607 }
3608
3609 // If this is a logical shift by a constant, recurse then shift the result.
3610 if (match(V, m_LogicalShift(m_Value(X), m_APInt(C)))) {
3611 const APInt &BitShift = *C;
3612
3613 // Ensure the shift amount is defined.
3614 if (BitShift.uge(BitWidth))
3615 return Result;
3616
3617 // For bswap-only, limit shift amounts to whole bytes, for an early exit.
3618 if (!MatchBitReversals && (BitShift.getZExtValue() % 8) != 0)
3619 return Result;
3620
3621 const auto &Res = collectBitParts(X, MatchBSwaps, MatchBitReversals, BPS,
3622 Depth + 1, FoundRoot);
3623 if (!Res)
3624 return Result;
3625 Result = Res;
3626
3627 // Perform the "shift" on BitProvenance.
3628 auto &P = Result->Provenance;
3629 if (I->getOpcode() == Instruction::Shl) {
3630 P.erase(std::prev(P.end(), BitShift.getZExtValue()), P.end());
3631 P.insert(P.begin(), BitShift.getZExtValue(), BitPart::Unset);
3632 } else {
3633 P.erase(P.begin(), std::next(P.begin(), BitShift.getZExtValue()));
3634 P.insert(P.end(), BitShift.getZExtValue(), BitPart::Unset);
3635 }
3636
3637 return Result;
3638 }
3639
3640 // If this is a logical 'and' with a mask that clears bits, recurse then
3641 // unset the appropriate bits.
3642 if (match(V, m_And(m_Value(X), m_APInt(C)))) {
3643 const APInt &AndMask = *C;
3644
3645 // Check that the mask allows a multiple of 8 bits for a bswap, for an
3646 // early exit.
3647 unsigned NumMaskedBits = AndMask.popcount();
3648 if (!MatchBitReversals && (NumMaskedBits % 8) != 0)
3649 return Result;
3650
3651 const auto &Res = collectBitParts(X, MatchBSwaps, MatchBitReversals, BPS,
3652 Depth + 1, FoundRoot);
3653 if (!Res)
3654 return Result;
3655 Result = Res;
3656
3657 for (unsigned BitIdx = 0; BitIdx < BitWidth; ++BitIdx)
3658 // If the AndMask is zero for this bit, clear the bit.
3659 if (AndMask[BitIdx] == 0)
3660 Result->Provenance[BitIdx] = BitPart::Unset;
3661 return Result;
3662 }
3663
3664 // If this is a zext instruction zero extend the result.
3665 if (match(V, m_ZExt(m_Value(X)))) {
3666 const auto &Res = collectBitParts(X, MatchBSwaps, MatchBitReversals, BPS,
3667 Depth + 1, FoundRoot);
3668 if (!Res)
3669 return Result;
3670
3671 Result = BitPart(Res->Provider, BitWidth);
3672 auto NarrowBitWidth = X->getType()->getScalarSizeInBits();
3673 for (unsigned BitIdx = 0; BitIdx < NarrowBitWidth; ++BitIdx)
3674 Result->Provenance[BitIdx] = Res->Provenance[BitIdx];
3675 for (unsigned BitIdx = NarrowBitWidth; BitIdx < BitWidth; ++BitIdx)
3676 Result->Provenance[BitIdx] = BitPart::Unset;
3677 return Result;
3678 }
3679
3680 // If this is a truncate instruction, extract the lower bits.
3681 if (match(V, m_Trunc(m_Value(X)))) {
3682 const auto &Res = collectBitParts(X, MatchBSwaps, MatchBitReversals, BPS,
3683 Depth + 1, FoundRoot);
3684 if (!Res)
3685 return Result;
3686
3687 Result = BitPart(Res->Provider, BitWidth);
3688 for (unsigned BitIdx = 0; BitIdx < BitWidth; ++BitIdx)
3689 Result->Provenance[BitIdx] = Res->Provenance[BitIdx];
3690 return Result;
3691 }
3692
3693 // BITREVERSE - most likely due to us previous matching a partial
3694 // bitreverse.
3695 if (match(V, m_BitReverse(m_Value(X)))) {
3696 const auto &Res = collectBitParts(X, MatchBSwaps, MatchBitReversals, BPS,
3697 Depth + 1, FoundRoot);
3698 if (!Res)
3699 return Result;
3700
3701 Result = BitPart(Res->Provider, BitWidth);
3702 for (unsigned BitIdx = 0; BitIdx < BitWidth; ++BitIdx)
3703 Result->Provenance[(BitWidth - 1) - BitIdx] = Res->Provenance[BitIdx];
3704 return Result;
3705 }
3706
3707 // BSWAP - most likely due to us previous matching a partial bswap.
3708 if (match(V, m_BSwap(m_Value(X)))) {
3709 const auto &Res = collectBitParts(X, MatchBSwaps, MatchBitReversals, BPS,
3710 Depth + 1, FoundRoot);
3711 if (!Res)
3712 return Result;
3713
3714 unsigned ByteWidth = BitWidth / 8;
3715 Result = BitPart(Res->Provider, BitWidth);
3716 for (unsigned ByteIdx = 0; ByteIdx < ByteWidth; ++ByteIdx) {
3717 unsigned ByteBitOfs = ByteIdx * 8;
3718 for (unsigned BitIdx = 0; BitIdx < 8; ++BitIdx)
3719 Result->Provenance[(BitWidth - 8 - ByteBitOfs) + BitIdx] =
3720 Res->Provenance[ByteBitOfs + BitIdx];
3721 }
3722 return Result;
3723 }
3724
3725 // Funnel 'double' shifts take 3 operands, 2 inputs and the shift
3726 // amount (modulo).
3727 // fshl(X,Y,Z): (X << (Z % BW)) | (Y >> (BW - (Z % BW)))
3728 // fshr(X,Y,Z): (X << (BW - (Z % BW))) | (Y >> (Z % BW))
3729 if (match(V, m_FShl(m_Value(X), m_Value(Y), m_APInt(C))) ||
3730 match(V, m_FShr(m_Value(X), m_Value(Y), m_APInt(C)))) {
3731 // We can treat fshr as a fshl by flipping the modulo amount.
3732 unsigned ModAmt = C->urem(BitWidth);
3733 if (cast<IntrinsicInst>(I)->getIntrinsicID() == Intrinsic::fshr)
3734 ModAmt = BitWidth - ModAmt;
3735
3736 // For bswap-only, limit shift amounts to whole bytes, for an early exit.
3737 if (!MatchBitReversals && (ModAmt % 8) != 0)
3738 return Result;
3739
3740 // Check we have both sources and they are from the same provider.
3741 const auto &LHS = collectBitParts(X, MatchBSwaps, MatchBitReversals, BPS,
3742 Depth + 1, FoundRoot);
3743 if (!LHS || !LHS->Provider)
3744 return Result;
3745
3746 const auto &RHS = collectBitParts(Y, MatchBSwaps, MatchBitReversals, BPS,
3747 Depth + 1, FoundRoot);
3748 if (!RHS || LHS->Provider != RHS->Provider)
3749 return Result;
3750
3751 unsigned StartBitRHS = BitWidth - ModAmt;
3752 Result = BitPart(LHS->Provider, BitWidth);
3753 for (unsigned BitIdx = 0; BitIdx < StartBitRHS; ++BitIdx)
3754 Result->Provenance[BitIdx + ModAmt] = LHS->Provenance[BitIdx];
3755 for (unsigned BitIdx = 0; BitIdx < ModAmt; ++BitIdx)
3756 Result->Provenance[BitIdx] = RHS->Provenance[BitIdx + StartBitRHS];
3757 return Result;
3758 }
3759 }
3760
3761 // If we've already found a root input value then we're never going to merge
3762 // these back together.
3763 if (FoundRoot)
3764 return Result;
3765
3766 // Okay, we got to something that isn't a shift, 'or', 'and', etc. This must
3767 // be the root input value to the bswap/bitreverse.
3768 FoundRoot = true;
3769 Result = BitPart(V, BitWidth);
3770 for (unsigned BitIdx = 0; BitIdx < BitWidth; ++BitIdx)
3771 Result->Provenance[BitIdx] = BitIdx;
3772 return Result;
3773}
3774
3775static bool bitTransformIsCorrectForBSwap(unsigned From, unsigned To,
3776 unsigned BitWidth) {
3777 if (From % 8 != To % 8)
3778 return false;
3779 // Convert from bit indices to byte indices and check for a byte reversal.
3780 From >>= 3;
3781 To >>= 3;
3782 BitWidth >>= 3;
3783 return From == BitWidth - To - 1;
3784}
3785
3786static bool bitTransformIsCorrectForBitReverse(unsigned From, unsigned To,
3787 unsigned BitWidth) {
3788 return From == BitWidth - To - 1;
3789}
3790
3792 Instruction *I, bool MatchBSwaps, bool MatchBitReversals,
3793 SmallVectorImpl<Instruction *> &InsertedInsts) {
3794 if (!match(I, m_Or(m_Value(), m_Value())) &&
3795 !match(I, m_FShl(m_Value(), m_Value(), m_Value())) &&
3796 !match(I, m_FShr(m_Value(), m_Value(), m_Value())) &&
3797 !match(I, m_BSwap(m_Value())))
3798 return false;
3799 if (!MatchBSwaps && !MatchBitReversals)
3800 return false;
3801 Type *ITy = I->getType();
3802 if (!ITy->isIntOrIntVectorTy() || ITy->getScalarSizeInBits() == 1 ||
3803 ITy->getScalarSizeInBits() > 128)
3804 return false; // Can't do integer/elements > 128 bits.
3805
3806 // Try to find all the pieces corresponding to the bswap.
3807 bool FoundRoot = false;
3808 std::map<Value *, std::optional<BitPart>> BPS;
3809 const auto &Res =
3810 collectBitParts(I, MatchBSwaps, MatchBitReversals, BPS, 0, FoundRoot);
3811 if (!Res)
3812 return false;
3813 ArrayRef<int8_t> BitProvenance = Res->Provenance;
3814 assert(all_of(BitProvenance,
3815 [](int8_t I) { return I == BitPart::Unset || 0 <= I; }) &&
3816 "Illegal bit provenance index");
3817
3818 // If the upper bits are zero, then attempt to perform as a truncated op.
3819 Type *DemandedTy = ITy;
3820 if (BitProvenance.back() == BitPart::Unset) {
3821 while (!BitProvenance.empty() && BitProvenance.back() == BitPart::Unset)
3822 BitProvenance = BitProvenance.drop_back();
3823 if (BitProvenance.empty())
3824 return false; // TODO - handle null value?
3825 DemandedTy = Type::getIntNTy(I->getContext(), BitProvenance.size());
3826 if (auto *IVecTy = dyn_cast<VectorType>(ITy))
3827 DemandedTy = VectorType::get(DemandedTy, IVecTy);
3828 }
3829
3830 // Check BitProvenance hasn't found a source larger than the result type.
3831 unsigned DemandedBW = DemandedTy->getScalarSizeInBits();
3832 if (DemandedBW > ITy->getScalarSizeInBits())
3833 return false;
3834
3835 // Now, is the bit permutation correct for a bswap or a bitreverse? We can
3836 // only byteswap values with an even number of bytes.
3837 APInt DemandedMask = APInt::getAllOnes(DemandedBW);
3838 bool OKForBSwap = MatchBSwaps && (DemandedBW % 16) == 0;
3839 bool OKForBitReverse = MatchBitReversals;
3840 for (unsigned BitIdx = 0;
3841 (BitIdx < DemandedBW) && (OKForBSwap || OKForBitReverse); ++BitIdx) {
3842 if (BitProvenance[BitIdx] == BitPart::Unset) {
3843 DemandedMask.clearBit(BitIdx);
3844 continue;
3845 }
3846 OKForBSwap &= bitTransformIsCorrectForBSwap(BitProvenance[BitIdx], BitIdx,
3847 DemandedBW);
3848 OKForBitReverse &= bitTransformIsCorrectForBitReverse(BitProvenance[BitIdx],
3849 BitIdx, DemandedBW);
3850 }
3851
3852 Intrinsic::ID Intrin;
3853 if (OKForBSwap)
3854 Intrin = Intrinsic::bswap;
3855 else if (OKForBitReverse)
3856 Intrin = Intrinsic::bitreverse;
3857 else
3858 return false;
3859
3860 Function *F =
3861 Intrinsic::getOrInsertDeclaration(I->getModule(), Intrin, DemandedTy);
3862 Value *Provider = Res->Provider;
3863
3864 // We may need to truncate the provider.
3865 if (DemandedTy != Provider->getType()) {
3866 auto *Trunc =
3867 CastInst::CreateIntegerCast(Provider, DemandedTy, false, "trunc", I->getIterator());
3868 InsertedInsts.push_back(Trunc);
3869 Provider = Trunc;
3870 }
3871
3872 Instruction *Result = CallInst::Create(F, Provider, "rev", I->getIterator());
3873 InsertedInsts.push_back(Result);
3874
3875 if (!DemandedMask.isAllOnes()) {
3876 auto *Mask = ConstantInt::get(DemandedTy, DemandedMask);
3877 Result = BinaryOperator::Create(Instruction::And, Result, Mask, "mask", I->getIterator());
3878 InsertedInsts.push_back(Result);
3879 }
3880
3881 // We may need to zeroextend back to the result type.
3882 if (ITy != Result->getType()) {
3883 auto *ExtInst = CastInst::CreateIntegerCast(Result, ITy, false, "zext", I->getIterator());
3884 InsertedInsts.push_back(ExtInst);
3885 }
3886
3887 return true;
3888}
3889
3890// CodeGen has special handling for some string functions that may replace
3891// them with target-specific intrinsics. Since that'd skip our interceptors
3892// in ASan/MSan/TSan/DFSan, and thus make us miss some memory accesses,
3893// we mark affected calls as NoBuiltin, which will disable optimization
3894// in CodeGen.
3896 CallInst *CI, const TargetLibraryInfo *TLI) {
3897 Function *F = CI->getCalledFunction();
3898 LibFunc Func;
3899 if (F && !F->hasLocalLinkage() && F->hasName() &&
3900 TLI->getLibFunc(F->getName(), Func) && TLI->hasOptimizedCodeGen(Func) &&
3901 !F->doesNotAccessMemory())
3902 CI->addFnAttr(Attribute::NoBuiltin);
3903}
3904
3906 const auto *Op = I->getOperand(OpIdx);
3907 // We can't have a PHI with a metadata or token type.
3908 if (Op->getType()->isMetadataTy() || Op->getType()->isTokenLikeTy())
3909 return false;
3910
3911 // swifterror pointers can only be used by a load, store, or as a swifterror
3912 // argument; swifterror pointers are not allowed to be used in select or phi
3913 // instructions.
3914 if (Op->isSwiftError())
3915 return false;
3916
3917 // Protected pointer field loads/stores should be paired with the intrinsic
3918 // to avoid unnecessary address escapes.
3919 if (auto *II = dyn_cast<IntrinsicInst>(Op))
3920 if (II->getIntrinsicID() == Intrinsic::protected_field_ptr)
3921 return false;
3922
3923 // Cannot replace alloca argument with phi/select.
3924 if (I->isLifetimeStartOrEnd())
3925 return false;
3926
3927 // Early exit.
3929 return true;
3930
3931 switch (I->getOpcode()) {
3932 default:
3933 return true;
3934 case Instruction::Call:
3935 case Instruction::Invoke: {
3936 const auto &CB = cast<CallBase>(*I);
3937
3938 // Can't handle inline asm. Skip it.
3939 if (CB.isInlineAsm())
3940 return false;
3941
3942 // Constant bundle operands may need to retain their constant-ness for
3943 // correctness.
3944 if (CB.isBundleOperand(OpIdx))
3945 return false;
3946
3947 if (OpIdx < CB.arg_size()) {
3948 // Some variadic intrinsics require constants in the variadic arguments,
3949 // which currently aren't markable as immarg.
3950 if (isa<IntrinsicInst>(CB) &&
3951 OpIdx >= CB.getFunctionType()->getNumParams()) {
3952 // This is known to be OK for stackmap.
3953 return CB.getIntrinsicID() == Intrinsic::experimental_stackmap;
3954 }
3955
3956 // gcroot is a special case, since it requires a constant argument which
3957 // isn't also required to be a simple ConstantInt.
3958 if (CB.getIntrinsicID() == Intrinsic::gcroot)
3959 return false;
3960
3961 // Some intrinsic operands are required to be immediates.
3962 return !CB.paramHasAttr(OpIdx, Attribute::ImmArg);
3963 }
3964
3965 // It is never allowed to replace the call argument to an intrinsic, but it
3966 // may be possible for a call.
3967 return !isa<IntrinsicInst>(CB);
3968 }
3969 case Instruction::ShuffleVector:
3970 // Shufflevector masks are constant.
3971 return OpIdx != 2;
3972 case Instruction::Switch:
3973 case Instruction::ExtractValue:
3974 // All operands apart from the first are constant.
3975 return OpIdx == 0;
3976 case Instruction::InsertValue:
3977 // All operands apart from the first and the second are constant.
3978 return OpIdx < 2;
3979 case Instruction::Alloca:
3980 // Static allocas (constant size in the entry block) are handled by
3981 // prologue/epilogue insertion so they're free anyway. We definitely don't
3982 // want to make them non-constant.
3983 return !cast<AllocaInst>(I)->isStaticAlloca();
3984 case Instruction::GetElementPtr:
3985 if (OpIdx == 0)
3986 return true;
3988 for (auto E = std::next(It, OpIdx); It != E; ++It)
3989 if (It.isStruct())
3990 return false;
3991 return true;
3992 }
3993}
3994
3996 // First: Check if it's a constant
3997 if (Constant *C = dyn_cast<Constant>(Condition))
3998 return ConstantExpr::getNot(C);
3999
4000 // Second: If the condition is already inverted, return the original value
4001 Value *NotCondition;
4002 if (match(Condition, m_Not(m_Value(NotCondition))))
4003 return NotCondition;
4004
4005 BasicBlock *Parent = nullptr;
4006 Instruction *Inst = dyn_cast<Instruction>(Condition);
4007 if (Inst)
4008 Parent = Inst->getParent();
4009 else if (Argument *Arg = dyn_cast<Argument>(Condition))
4010 Parent = &Arg->getParent()->getEntryBlock();
4011 assert(Parent && "Unsupported condition to invert");
4012
4013 // Third: Check all the users for an invert
4014 for (User *U : Condition->users())
4016 if (I->getParent() == Parent && match(I, m_Not(m_Specific(Condition))))
4017 return I;
4018
4019 // Last option: Create a new instruction
4020 auto *Inverted =
4021 BinaryOperator::CreateNot(Condition, Condition->getName() + ".inv");
4022 if (Inst && !isa<PHINode>(Inst))
4023 Inverted->insertAfter(Inst->getIterator());
4024 else
4025 Inverted->insertBefore(Parent->getFirstInsertionPt());
4026 return Inverted;
4027}
4028
4030 // Note: We explicitly check for attributes rather than using cover functions
4031 // because some of the cover functions include the logic being implemented.
4032
4033 bool Changed = false;
4034 // readnone + not convergent implies nosync
4035 if (!F.hasFnAttribute(Attribute::NoSync) &&
4036 F.doesNotAccessMemory() && !F.isConvergent()) {
4037 F.setNoSync();
4038 Changed = true;
4039 }
4040
4041 // readonly implies nofree
4042 if (!F.hasFnAttribute(Attribute::NoFree) && F.onlyReadsMemory()) {
4043 F.setDoesNotFreeMemory();
4044 Changed = true;
4045 }
4046
4047 // willreturn implies mustprogress
4048 if (!F.hasFnAttribute(Attribute::MustProgress) && F.willReturn()) {
4049 F.setMustProgress();
4050 Changed = true;
4051 }
4052
4053 // TODO: There are a bunch of cases of restrictive memory effects we
4054 // can infer by inspecting arguments of argmemonly-ish functions.
4055
4056 return Changed;
4057}
4058
4060#ifndef NDEBUG
4061 if (Opcode)
4062 assert(Opcode == I.getOpcode() &&
4063 "can only use mergeFlags on instructions with matching opcodes");
4064 else
4065 Opcode = I.getOpcode();
4066#endif
4068 HasNUW &= I.hasNoUnsignedWrap();
4069 HasNSW &= I.hasNoSignedWrap();
4070 }
4071 if (auto *DisjointOp = dyn_cast<PossiblyDisjointInst>(&I))
4072 IsDisjoint &= DisjointOp->isDisjoint();
4073}
4074
4076 I.clearSubclassOptionalData();
4077 if (I.getOpcode() == Instruction::Add ||
4078 (I.getOpcode() == Instruction::Mul && AllKnownNonZero)) {
4079 if (HasNUW)
4080 I.setHasNoUnsignedWrap();
4081 if (HasNSW && (AllKnownNonNegative || HasNUW))
4082 I.setHasNoSignedWrap();
4083 }
4084 if (auto *DisjointOp = dyn_cast<PossiblyDisjointInst>(&I))
4085 DisjointOp->setIsDisjoint(IsDisjoint);
4086}
static unsigned getIntrinsicID(const SDNode *N)
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
Rewrite undef for PHI
This file implements a class to represent arbitrary precision integral constant values and operations...
ReachingDefInfo InstSet & ToRemove
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
static bool isEqual(const Function &Caller, const Function &Callee)
This file contains the simple types necessary to represent the attributes associated with functions a...
static const Function * getParent(const Value *V)
static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
This file contains the declarations for the subclasses of Constant, which represent the different fla...
static bool isSentinel(const DWARFDebugNames::AttributeEncoding &AE)
This file defines DenseMapInfo traits for DenseMap.
This file defines the DenseMap class.
This file defines the DenseSet and SmallDenseSet classes.
This file contains constants used for implementing Dwarf debug support.
static unsigned getHashValueImpl(SimpleValue Val)
Definition EarlyCSE.cpp:229
static bool isEqualImpl(SimpleValue LHS, SimpleValue RHS)
Definition EarlyCSE.cpp:350
Hexagon Common GEP
This file provides various utilities for inspecting and working with the control flow graph in LLVM I...
Module.h This file contains the declarations for the Module class.
This defines the Use class.
const AbstractManglingParser< Derived, Alloc >::OperatorInfo AbstractManglingParser< Derived, Alloc >::Ops[]
#define F(x, y, z)
Definition MD5.cpp:54
#define I(x, y, z)
Definition MD5.cpp:57
This file provides utility for Memory Model Relaxation Annotations (MMRAs).
This file contains the declarations for metadata subclasses.
#define T
MachineInstr unsigned OpIdx
uint64_t IntrinsicInst * II
#define P(N)
This file contains the declarations for profiling metadata utility functions.
const SmallVectorImpl< MachineOperand > & Cond
Remove Loads Into Fake Uses
This file contains some templates that are useful if you are working with the STL at all.
This file implements a set that has insertion order iteration characteristics.
This file defines the SmallPtrSet class.
This file defines the SmallVector class.
This file defines the 'Statistic' class, which is designed to be an easy way to expose various metric...
#define STATISTIC(VARNAME, DESC)
Definition Statistic.h:171
#define LLVM_DEBUG(...)
Definition Debug.h:114
static TableGen::Emitter::Opt Y("gen-skeleton-entry", EmitSkeleton, "Generate example skeleton entry")
static TableGen::Emitter::OptClass< SkeletonEmitter > X("gen-skeleton-class", "Generate example skeleton class")
SmallDenseMap< BasicBlock *, Value *, 16 > IncomingValueMap
Definition Local.cpp:917
static bool valueCoversEntireFragment(Type *ValTy, DbgVariableRecord *DVR)
Check if the alloc size of ValTy is large enough to cover the variable (or fragment of the variable) ...
Definition Local.cpp:1638
static bool markAliveBlocks(Function &F, SmallPtrSetImpl< BasicBlock * > &Reachable, DomTreeUpdater *DTU=nullptr)
Definition Local.cpp:2664
static bool isBitCastSemanticsPreserving(const DataLayout &DL, Type *FromTy, Type *ToTy)
Check if a bitcast between a value of type FromTy to type ToTy would losslessly preserve the bits and...
Definition Local.cpp:2414
static bool isStructure(AllocaInst *AI)
Determine whether this alloca is a structure.
Definition Local.cpp:1776
uint64_t getDwarfOpForBinOp(Instruction::BinaryOps Opcode)
Definition Local.cpp:2162
static bool PhiHasDebugValue(DILocalVariable *DIVar, DIExpression *DIExpr, PHINode *APN)
===------------------------------------------------------------------—===// Dbg Intrinsic utilities
Definition Local.cpp:1614
static void combineMetadata(Instruction *K, const Instruction *J, bool DoesKMove, bool AAOnly=false)
If AAOnly is set, only intersect alias analysis metadata and preserve other known metadata.
Definition Local.cpp:2940
static void handleSSAValueOperands(uint64_t CurrentLocOps, SmallVectorImpl< uint64_t > &Opcodes, SmallVectorImpl< Value * > &AdditionalValues, Instruction *I)
Definition Local.cpp:2192
std::optional< DIExpression * > DbgValReplacement
A replacement for a dbg.value expression.
Definition Local.cpp:2341
static bool rewriteDebugUsers(Instruction &From, Value &To, Instruction &DomPoint, DominatorTree &DT, function_ref< DbgValReplacement(DbgVariableRecord &DVR)> RewriteDVRExpr)
Point debug users of From to To using exprs given by RewriteExpr, possibly moving/undefing users to p...
Definition Local.cpp:2346
Value * getSalvageOpsForBinOp(BinaryOperator *BI, uint64_t CurrentLocOps, SmallVectorImpl< uint64_t > &Opcodes, SmallVectorImpl< Value * > &AdditionalValues)
Definition Local.cpp:2204
static DIExpression * dropInitialDeref(const DIExpression *DIExpr)
Definition Local.cpp:1674
static void replaceUndefValuesInPhi(PHINode *PN, const IncomingValueMap &IncomingValues)
Replace the incoming undef values to a phi with the values from a block-to-value map.
Definition Local.cpp:982
Value * getSalvageOpsForGEP(GetElementPtrInst *GEP, const DataLayout &DL, uint64_t CurrentLocOps, SmallVectorImpl< uint64_t > &Opcodes, SmallVectorImpl< Value * > &AdditionalValues)
Definition Local.cpp:2136
static bool CanRedirectPredsOfEmptyBBToSucc(BasicBlock *BB, BasicBlock *Succ, const SmallPtrSetImpl< BasicBlock * > &BBPreds, BasicBlock *&CommonPred)
Definition Local.cpp:1025
Value * getSalvageOpsForIcmpOp(ICmpInst *Icmp, uint64_t CurrentLocOps, SmallVectorImpl< uint64_t > &Opcodes, SmallVectorImpl< Value * > &AdditionalValues)
Definition Local.cpp:2263
static bool CanMergeValues(Value *First, Value *Second)
Return true if we can choose one of these values to use in place of the other.
Definition Local.cpp:851
static bool simplifyAndDCEInstruction(Instruction *I, SmallSetVector< Instruction *, 16 > &WorkList, const DataLayout &DL, const TargetLibraryInfo *TLI)
Definition Local.cpp:668
static bool isArray(AllocaInst *AI)
Determine whether this alloca is either a VLA or an array.
Definition Local.cpp:1770
static bool areAllUsesEqual(Instruction *I)
areAllUsesEqual - Check whether the uses of a value are all the same.
Definition Local.cpp:627
static cl::opt< bool > PHICSEDebugHash("phicse-debug-hash", cl::init(false), cl::Hidden, cl::desc("Perform extra assertion checking to verify that PHINodes's hash " "function is well-behaved w.r.t. its isEqual predicate"))
static void gatherIncomingValuesToPhi(PHINode *PN, const PredBlockVector &BBPreds, IncomingValueMap &IncomingValues)
Create a map from block to value for the operands of a given phi.
Definition Local.cpp:959
uint64_t getDwarfOpForIcmpPred(CmpInst::Predicate Pred)
Definition Local.cpp:2238
static bool bitTransformIsCorrectForBSwap(unsigned From, unsigned To, unsigned BitWidth)
Definition Local.cpp:3775
static const std::optional< BitPart > & collectBitParts(Value *V, bool MatchBSwaps, bool MatchBitReversals, std::map< Value *, std::optional< BitPart > > &BPS, int Depth, bool &FoundRoot)
Analyze the specified subexpression and see if it is capable of providing pieces of a bswap or bitrev...
Definition Local.cpp:3555
static bool EliminateDuplicatePHINodesNaiveImpl(BasicBlock *BB, SmallPtrSetImpl< PHINode * > &ToRemove)
Definition Local.cpp:1395
static bool CanPropagatePredecessorsForPHIs(BasicBlock *BB, BasicBlock *Succ, const SmallPtrSetImpl< BasicBlock * > &BBPreds)
Return true if we can fold BB, an almost-empty BB ending in an unconditional branch to Succ,...
Definition Local.cpp:860
static cl::opt< unsigned > PHICSENumPHISmallSize("phicse-num-phi-smallsize", cl::init(32), cl::Hidden, cl::desc("When the basic block contains not more than this number of PHI nodes, " "perform a (faster!) exhaustive search instead of set-driven one."))
static void updateOneDbgValueForAlloca(const DebugLoc &Loc, DILocalVariable *DIVar, DIExpression *DIExpr, Value *NewAddress, DbgVariableRecord *DVR, DIBuilder &Builder, int Offset)
Definition Local.cpp:1978
static bool EliminateDuplicatePHINodesSetBasedImpl(BasicBlock *BB, SmallPtrSetImpl< PHINode * > &ToRemove)
Definition Local.cpp:1431
SmallVector< BasicBlock *, 16 > PredBlockVector
Definition Local.cpp:916
static void insertDbgValueOrDbgVariableRecord(DIBuilder &Builder, Value *DV, DILocalVariable *DIVar, DIExpression *DIExpr, const DebugLoc &NewLoc, BasicBlock::iterator Instr)
Definition Local.cpp:1663
static bool introduceTooManyPhiEntries(BasicBlock *BB, BasicBlock *Succ)
Check whether removing BB will make the phis in its Succ have too many incoming entries.
Definition Local.cpp:1058
static Value * selectIncomingValueForBlock(Value *OldVal, BasicBlock *BB, IncomingValueMap &IncomingValues)
Determines the value to use as the phi node input for a block.
Definition Local.cpp:931
static const unsigned BitPartRecursionMaxDepth
Definition Local.cpp:121
static void redirectValuesFromPredecessorsToPhi(BasicBlock *BB, const PredBlockVector &BBPreds, PHINode *PN, BasicBlock *CommonPred)
Replace a value flowing from a block to a phi with potentially multiple instances of that value flowi...
Definition Local.cpp:1090
static cl::opt< unsigned > MaxPhiEntriesIncreaseAfterRemovingEmptyBlock("max-phi-entries-increase-after-removing-empty-block", cl::init(1000), cl::Hidden, cl::desc("Stop removing an empty block if removing it will introduce more " "than this number of phi entries in its successor"))
static bool bitTransformIsCorrectForBitReverse(unsigned From, unsigned To, unsigned BitWidth)
Definition Local.cpp:3786
static void salvageDbgAssignAddress(T *Assign)
Definition Local.cpp:2020
LocallyHashedType DenseMapInfo< LocallyHashedType >::Empty
Value * RHS
Value * LHS
APInt bitcastToAPInt() const
Definition APFloat.h:1404
Class for arbitrary precision integers.
Definition APInt.h:78
std::optional< uint64_t > tryZExtValue() const
Get zero extended value if possible.
Definition APInt.h:1567
static APInt getAllOnes(unsigned numBits)
Return an APInt of a specified width with all bits set.
Definition APInt.h:235
void clearBit(unsigned BitPosition)
Set a given bit to 0.
Definition APInt.h:1421
uint64_t getZExtValue() const
Get zero extended value.
Definition APInt.h:1555
unsigned popcount() const
Count the number of bits set.
Definition APInt.h:1685
bool isAllOnes() const
Determine if all bits are set. This is true for zero-width values.
Definition APInt.h:372
unsigned getBitWidth() const
Return the number of bits in the APInt.
Definition APInt.h:1503
const uint64_t * getRawData() const
This function returns a pointer to the internal storage of the APInt.
Definition APInt.h:576
std::optional< int64_t > trySExtValue() const
Get sign extended value if possible.
Definition APInt.h:1589
int64_t getSExtValue() const
Get sign extended value.
Definition APInt.h:1577
bool uge(const APInt &RHS) const
Unsigned greater or equal comparison.
Definition APInt.h:1228
an instruction to allocate memory on the stack
Type * getAllocatedType() const
Return the type that is being allocated by the instruction.
LLVM_ABI bool isArrayAllocation() const
Return true if there is an allocation size parameter to the allocation instruction that is not 1.
This class represents an incoming formal argument to a Function.
Definition Argument.h:32
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
Definition ArrayRef.h:40
const T & back() const
back - Get the last element.
Definition ArrayRef.h:151
ArrayRef< T > drop_front(size_t N=1) const
Drop the first N elements of the array.
Definition ArrayRef.h:195
size_t size() const
size - Get the array size.
Definition ArrayRef.h:142
ArrayRef< T > drop_back(size_t N=1) const
Drop the last N elements of the array.
Definition ArrayRef.h:201
bool empty() const
empty - Check if the array is empty.
Definition ArrayRef.h:137
Value handle that asserts if the Value is deleted.
A cache of @llvm.assume calls within a function.
LLVM Basic Block Representation.
Definition BasicBlock.h:62
iterator end()
Definition BasicBlock.h:483
iterator begin()
Instruction iterator methods.
Definition BasicBlock.h:470
iterator_range< const_phi_iterator > phis() const
Returns a range that iterates over the phis in the basic block.
Definition BasicBlock.h:539
LLVM_ABI const_iterator getFirstInsertionPt() const
Returns an iterator to the first instruction in this block that is suitable for inserting a non-PHI i...
const Function * getParent() const
Return the enclosing method, or null if none.
Definition BasicBlock.h:213
const Instruction & back() const
Definition BasicBlock.h:495
bool hasAddressTaken() const
Returns true if there are any uses of this basic block other than direct branches,...
Definition BasicBlock.h:696
LLVM_ABI InstListType::const_iterator getFirstNonPHIIt() const
Returns an iterator to the first instruction in this block that is not a PHINode instruction.
LLVM_ABI void insertDbgRecordBefore(DbgRecord *DR, InstListType::iterator Here)
Insert a DbgRecord into a block at the position given by Here.
static BasicBlock * Create(LLVMContext &Context, const Twine &Name="", Function *Parent=nullptr, BasicBlock *InsertBefore=nullptr)
Creates a new BasicBlock.
Definition BasicBlock.h:206
LLVM_ABI bool isEntryBlock() const
Return true if this is the entry block of the containing function.
LLVM_ABI void moveAfter(BasicBlock *MovePos)
Unlink this basic block from its current function and insert it right after MovePos in the function M...
LLVM_ABI bool hasNPredecessors(unsigned N) const
Return true if this block has exactly N predecessors.
LLVM_ABI const BasicBlock * getSinglePredecessor() const
Return the predecessor of this block if it has a single predecessor block.
const Instruction & front() const
Definition BasicBlock.h:493
LLVM_ABI void flushTerminatorDbgRecords()
Eject any debug-info trailing at the end of a block.
LLVM_ABI const DataLayout & getDataLayout() const
Get the data layout of the module this basic block belongs to.
LLVM_ABI SymbolTableList< BasicBlock >::iterator eraseFromParent()
Unlink 'this' from the containing function and delete it.
InstListType::iterator iterator
Instruction iterators...
Definition BasicBlock.h:170
LLVM_ABI LLVMContext & getContext() const
Get the context in which this basic block lives.
size_t size() const
Definition BasicBlock.h:491
LLVM_ABI bool hasNPredecessorsOrMore(unsigned N) const
Return true if this block has N predecessors or more.
const Instruction * getTerminator() const LLVM_READONLY
Returns the terminator instruction if the block is well formed or null if the block is not well forme...
Definition BasicBlock.h:233
void splice(BasicBlock::iterator ToIt, BasicBlock *FromBB)
Transfer all instructions from FromBB to this basic block at ToIt.
Definition BasicBlock.h:668
LLVM_ABI void removePredecessor(BasicBlock *Pred, bool KeepOneInputPHIs=false)
Update PHI nodes in this BasicBlock before removal of predecessor Pred.
BinaryOps getOpcode() const
Definition InstrTypes.h:374
static LLVM_ABI BinaryOperator * CreateNot(Value *Op, const Twine &Name="", InsertPosition InsertBefore=nullptr)
static LLVM_ABI BinaryOperator * Create(BinaryOps Op, Value *S1, Value *S2, const Twine &Name=Twine(), InsertPosition InsertBefore=nullptr)
Construct a binary instruction, given the opcode and the two operands.
This class represents a no-op cast from one type to another.
The address of a basic block.
Definition Constants.h:904
static LLVM_ABI BlockAddress * get(Function *F, BasicBlock *BB)
Return a BlockAddress for the specified function and basic block.
Conditional or Unconditional Branch instruction.
static BranchInst * Create(BasicBlock *IfTrue, InsertPosition InsertBefore=nullptr)
Base class for all callable instructions (InvokeInst and CallInst) Holds everything related to callin...
void setCallingConv(CallingConv::ID CC)
void addFnAttr(Attribute::AttrKind Kind)
Adds the attribute to the function.
LLVM_ABI void getOperandBundlesAsDefs(SmallVectorImpl< OperandBundleDef > &Defs) const
Return the list of operand bundles attached to this instruction as a vector of OperandBundleDefs.
Function * getCalledFunction() const
Returns the function called, or null if this is an indirect function invocation or the function signa...
CallingConv::ID getCallingConv() const
Value * getCalledOperand() const
void setAttributes(AttributeList A)
Set the attributes for this call.
FunctionType * getFunctionType() const
iterator_range< User::op_iterator > args()
Iteration adapter for range-for loops.
AttributeList getAttributes() const
Return the attributes for this call.
This class represents a function call, abstracting a target machine's calling convention.
static CallInst * Create(FunctionType *Ty, Value *F, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
static LLVM_ABI CastInst * CreateIntegerCast(Value *S, Type *Ty, bool isSigned, const Twine &Name="", InsertPosition InsertBefore=nullptr)
Create a ZExt, BitCast, or Trunc for int -> int casts.
mapped_iterator< op_iterator, DerefFnTy > handler_iterator
static CatchSwitchInst * Create(Value *ParentPad, BasicBlock *UnwindDest, unsigned NumHandlers, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
static CleanupReturnInst * Create(Value *CleanupPad, BasicBlock *UnwindBB=nullptr, InsertPosition InsertBefore=nullptr)
Predicate
This enumeration lists the possible predicates for CmpInst subclasses.
Definition InstrTypes.h:676
@ ICMP_SLT
signed less than
Definition InstrTypes.h:705
@ ICMP_SLE
signed less or equal
Definition InstrTypes.h:706
@ ICMP_UGE
unsigned greater or equal
Definition InstrTypes.h:700
@ ICMP_UGT
unsigned greater than
Definition InstrTypes.h:699
@ ICMP_SGT
signed greater than
Definition InstrTypes.h:703
@ ICMP_ULT
unsigned less than
Definition InstrTypes.h:701
@ ICMP_NE
not equal
Definition InstrTypes.h:698
@ ICMP_SGE
signed greater or equal
Definition InstrTypes.h:704
@ ICMP_ULE
unsigned less or equal
Definition InstrTypes.h:702
bool isSigned() const
Definition InstrTypes.h:930
Predicate getPredicate() const
Return the predicate for this instruction.
Definition InstrTypes.h:765
A constant value that is initialized with an expression using other constant values.
Definition Constants.h:1130
static LLVM_ABI Constant * getIntToPtr(Constant *C, Type *Ty, bool OnlyIfReduced=false)
static LLVM_ABI Constant * getNot(Constant *C)
static LLVM_ABI Constant * getPtrToInt(Constant *C, Type *Ty, bool OnlyIfReduced=false)
static LLVM_ABI Constant * getAdd(Constant *C1, Constant *C2, bool HasNUW=false, bool HasNSW=false)
This is the shared class of boolean and integer constants.
Definition Constants.h:87
static LLVM_ABI ConstantPointerNull * get(PointerType *T)
Static factory methods - Return objects of the specified value.
This is an important base class in LLVM.
Definition Constant.h:43
LLVM_ABI void destroyConstant()
Called if some element of this constant is no longer valid.
DIExpression * createConstantValueExpression(uint64_t Val)
Create an expression for a variable that does not have an address, but does have a constant value.
Definition DIBuilder.h:940
DWARF expression.
static LLVM_ABI DIExpression * append(const DIExpression *Expr, ArrayRef< uint64_t > Ops)
Append the opcodes Ops to DIExpr.
unsigned getNumElements() const
static LLVM_ABI ExtOps getExtOps(unsigned FromSize, unsigned ToSize, bool Signed)
Returns the ops for a zero- or sign-extension in a DIExpression.
static LLVM_ABI void appendOffset(SmallVectorImpl< uint64_t > &Ops, int64_t Offset)
Append Ops with operations to apply the Offset.
static LLVM_ABI DIExpression * appendOpsToArg(const DIExpression *Expr, ArrayRef< uint64_t > Ops, unsigned ArgNo, bool StackValue=false)
Create a copy of Expr by appending the given list of Ops to each instance of the operand DW_OP_LLVM_a...
static LLVM_ABI std::optional< FragmentInfo > getFragmentInfo(expr_op_iterator Start, expr_op_iterator End)
Retrieve the details of this fragment expression.
LLVM_ABI DIExpression * foldConstantMath()
Try to shorten an expression with constant math operations that can be evaluated at compile time.
LLVM_ABI uint64_t getNumLocationOperands() const
Return the number of unique location operands referred to (via DW_OP_LLVM_arg) in this expression; th...
ArrayRef< uint64_t > getElements() const
LLVM_ABI std::optional< uint64_t > getActiveBits(DIVariable *Var)
Return the number of bits that have an active value, i.e.
uint64_t getElement(unsigned I) const
static LLVM_ABI DIExpression * prepend(const DIExpression *Expr, uint8_t Flags, int64_t Offset=0)
Prepend DIExpr with a deref and offset operation and optionally turn it into a stack value or/and an ...
static LLVM_ABI DIExpression * appendExt(const DIExpression *Expr, unsigned FromSize, unsigned ToSize, bool Signed)
Append a zero- or sign-extension to Expr.
std::optional< DIBasicType::Signedness > getSignedness() const
Return the signedness of this variable's type, or std::nullopt if this type is neither signed nor uns...
A parsed version of the target data layout string in and methods for querying it.
Definition DataLayout.h:64
This represents the llvm.dbg.label instruction.
Instruction * MarkedInstr
Link back to the Instruction that owns this marker.
LLVM_ABI void removeFromParent()
LLVM_ABI Module * getModule()
Record of a variable value-assignment, aka a non instruction representation of the dbg....
LLVM_ABI void replaceVariableLocationOp(Value *OldValue, Value *NewValue, bool AllowEmpty=false)
LLVM_ABI Value * getVariableLocationOp(unsigned OpIdx) const
LLVM_ABI unsigned getNumVariableLocationOps() const
bool isAddressOfVariable() const
Does this describe the address of a local variable.
LLVM_ABI DbgVariableRecord * clone() const
void setExpression(DIExpression *NewExpr)
DIExpression * getExpression() const
DILocalVariable * getVariable() const
LLVM_ABI iterator_range< location_op_iterator > location_ops() const
Get the locations corresponding to the variable referenced by the debug info intrinsic.
A debug info location.
Definition DebugLoc.h:123
LLVM_ABI DILocation * get() const
Get the underlying DILocation.
Definition DebugLoc.cpp:48
static DebugLoc getTemporary()
Definition DebugLoc.h:160
iterator find(const_arg_type_t< KeyT > Val)
Definition DenseMap.h:178
unsigned size() const
Definition DenseMap.h:110
DenseMapIterator< KeyT, ValueT, KeyInfoT, BucketT, true > const_iterator
Definition DenseMap.h:75
iterator end()
Definition DenseMap.h:81
std::pair< iterator, bool > insert(const std::pair< KeyT, ValueT > &KV)
Definition DenseMap.h:241
std::pair< iterator, bool > insert_or_assign(const KeyT &Key, V &&Val)
Definition DenseMap.h:299
Implements a dense probed hash-table based set.
Definition DenseSet.h:279
LLVM_ABI void deleteBB(BasicBlock *DelBB)
Delete DelBB.
Concrete subclass of DominatorTreeBase that is used to compute a normal dominator tree.
Definition Dominators.h:164
LLVM_ABI bool dominates(const BasicBlock *BB, const Use &U) const
Return true if the (end of the) basic block BB dominates the use U.
const BasicBlock & getEntryBlock() const
Definition Function.h:809
void applyUpdatesPermissive(ArrayRef< UpdateT > Updates)
Submit updates to all available trees.
void applyUpdates(ArrayRef< UpdateT > Updates)
Submit updates to all available trees.
bool hasDomTree() const
Returns true if it holds a DomTreeT.
void recalculate(FuncT &F)
Notify DTU that the entry block was replaced.
bool isBBPendingDeletion(BasicBlockT *DelBB) const
Returns true if DelBB is awaiting deletion.
an instruction for type-safe pointer arithmetic to access elements of arrays and structs
This instruction compares its operands according to the predicate given to the constructor.
This provides a uniform API for creating instructions and inserting them into a basic block: either a...
Definition IRBuilder.h:2788
iterator_range< simple_ilist< DbgRecord >::iterator > getDbgRecordRange() const
Return a range over the DbgRecords attached to this instruction.
const DebugLoc & getDebugLoc() const
Return the debug location for this node as a DebugLoc.
LLVM_ABI const Module * getModule() const
Return the module owning the function this instruction belongs to or nullptr it the function does not...
LLVM_ABI bool extractProfTotalWeight(uint64_t &TotalVal) const
Retrieve total raw weight values of a branch.
LLVM_ABI void insertBefore(InstListType::iterator InsertPos)
Insert an unlinked instruction into a basic block immediately before the specified position.
bool isEHPad() const
Return true if the instruction is a variety of EH-block.
LLVM_ABI InstListType::iterator eraseFromParent()
This method unlinks 'this' from the containing basic block and deletes it.
LLVM_ABI bool isIdenticalToWhenDefined(const Instruction *I, bool IntersectAttrs=false) const LLVM_READONLY
This is like isIdenticalTo, except that it ignores the SubclassOptionalData flags,...
MDNode * getMetadata(unsigned KindID) const
Get the metadata of given kind attached to this Instruction.
LLVM_ABI void setMetadata(unsigned KindID, MDNode *Node)
Set the metadata of the specified kind to the specified node.
LLVM_ABI void dropPoisonGeneratingFlags()
Drops flags that may cause this instruction to evaluate to poison despite having non-poison inputs.
void setDebugLoc(DebugLoc Loc)
Set the debug location information for this instruction.
LLVM_ABI void copyMetadata(const Instruction &SrcInst, ArrayRef< unsigned > WL=ArrayRef< unsigned >())
Copy metadata from SrcInst to this instruction.
LLVM_ABI void dropDbgRecords()
Erase any DbgRecords attached to this instruction.
A wrapper class for inspecting calls to intrinsic functions.
Invoke instruction.
static InvokeInst * Create(FunctionType *Ty, Value *Func, BasicBlock *IfNormal, BasicBlock *IfException, ArrayRef< Value * > Args, const Twine &NameStr, InsertPosition InsertBefore=nullptr)
This is an important class for using LLVM in a threaded context.
Definition LLVMContext.h:68
An instruction for reading from memory.
Value * getPointerOperand()
LLVM_ABI MDNode * createBranchWeights(uint32_t TrueWeight, uint32_t FalseWeight, bool IsExpected=false)
Return metadata containing two branch weights.
Definition MDBuilder.cpp:38
LLVM_ABI MDNode * createRange(const APInt &Lo, const APInt &Hi)
Return metadata describing the range [Lo, Hi).
Definition MDBuilder.cpp:96
Metadata node.
Definition Metadata.h:1080
static LLVM_ABI MDNode * getMostGenericAliasScope(MDNode *A, MDNode *B)
static LLVM_ABI MDNode * getMergedCallsiteMetadata(MDNode *A, MDNode *B)
static LLVM_ABI CaptureComponents toCaptureComponents(const MDNode *MD)
Convert !captures metadata to CaptureComponents. MD may be nullptr.
static LLVM_ABI MDNode * getMergedCalleeTypeMetadata(const MDNode *A, const MDNode *B)
static LLVM_ABI MDNode * getMostGenericTBAA(MDNode *A, MDNode *B)
static LLVM_ABI MDNode * getMostGenericNoaliasAddrspace(MDNode *A, MDNode *B)
static MDTuple * get(LLVMContext &Context, ArrayRef< Metadata * > MDs)
Definition Metadata.h:1572
static LLVM_ABI MDNode * getMergedProfMetadata(MDNode *A, MDNode *B, const Instruction *AInstr, const Instruction *BInstr)
Merge !prof metadata from two instructions.
static LLVM_ABI MDNode * getMostGenericFPMath(MDNode *A, MDNode *B)
static LLVM_ABI MDNode * getMostGenericRange(MDNode *A, MDNode *B)
static LLVM_ABI MDNode * getMergedMemProfMetadata(MDNode *A, MDNode *B)
static LLVM_ABI MDNode * intersect(MDNode *A, MDNode *B)
static LLVM_ABI MDNode * getMostGenericNoFPClass(MDNode *A, MDNode *B)
LLVMContext & getContext() const
Definition Metadata.h:1244
static LLVM_ABI MDNode * fromCaptureComponents(LLVMContext &Ctx, CaptureComponents CC)
Convert CaptureComponents to !captures metadata.
static LLVM_ABI MDNode * getMostGenericAlignmentOrDereferenceable(MDNode *A, MDNode *B)
static LLVM_ABI MDNode * combine(LLVMContext &Ctx, const MMRAMetadata &A, const MMRAMetadata &B)
Combines A and B according to MMRA semantics.
This class implements a map that also provides access to all stored values in a deterministic order.
Definition MapVector.h:36
iterator end()
Definition MapVector.h:67
iterator find(const KeyT &Key)
Definition MapVector.h:154
bool empty() const
Definition MapVector.h:77
std::pair< iterator, bool > insert(const std::pair< KeyT, ValueT > &KV)
Definition MapVector.h:124
LLVM_ABI void changeToUnreachable(const Instruction *I)
Instruction I will be changed to an unreachable.
LLVM_ABI void removeBlocks(const SmallSetVector< BasicBlock *, 8 > &DeadBlocks)
Remove all MemoryAcceses in a set of BasicBlocks about to be deleted.
LLVM_ABI void removeMemoryAccess(MemoryAccess *, bool OptimizePhis=false)
Remove a MemoryAccess from MemorySSA, including updating all definitions and uses.
Root of the metadata hierarchy.
Definition Metadata.h:64
A Module instance is used to store all the information related to an LLVM module.
Definition Module.h:67
const DataLayout & getDataLayout() const
Get the data layout for the module's target platform.
Definition Module.h:278
void addIncoming(Value *V, BasicBlock *BB)
Add an incoming value to the end of the PHI list.
iterator_range< const_block_iterator > blocks() const
LLVM_ABI Value * removeIncomingValue(unsigned Idx, bool DeletePHIIfEmpty=true)
Remove an incoming value.
void setIncomingValue(unsigned i, Value *V)
Value * getIncomingValueForBlock(const BasicBlock *BB) const
BasicBlock * getIncomingBlock(unsigned i) const
Return incoming basic block number i.
Value * getIncomingValue(unsigned i) const
Return incoming value number x.
unsigned getNumIncomingValues() const
Return the number of incoming edges.
static LLVM_ABI PoisonValue * get(Type *T)
Static factory methods - Return an 'poison' object of the specified type.
size_type size() const
Determine the number of elements in the SetVector.
Definition SetVector.h:103
size_type count(const_arg_type key) const
Count the number of elements of a given key in the SetVector.
Definition SetVector.h:262
Vector takeVector()
Clear the SetVector and return the underlying vector.
Definition SetVector.h:94
bool empty() const
Determine if the SetVector is empty or not.
Definition SetVector.h:100
bool insert(const value_type &X)
Insert a new element into the SetVector.
Definition SetVector.h:151
value_type pop_back_val()
Definition SetVector.h:279
size_type size() const
Definition SmallPtrSet.h:99
A templated base class for SmallPtrSet which provides the typesafe interface that is common across al...
size_type count(ConstPtrType Ptr) const
count - Return 1 if the specified pointer is in the set, 0 otherwise.
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
bool contains(ConstPtrType Ptr) const
SmallPtrSet - This class implements a set which is optimized for holding SmallSize or less elements.
A SetVector that performs no allocations if smaller than a certain size.
Definition SetVector.h:339
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
void reserve(size_type N)
void append(ItTy in_start, ItTy in_end)
Add the specified range to the end of the SmallVector.
iterator insert(iterator I, T &&Elt)
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
An instruction for storing to memory.
Provides information about what library functions are available for the current target.
bool hasOptimizedCodeGen(LibFunc F) const
Tests if the function is both available and a candidate for optimized code generation.
bool has(LibFunc F) const
Tests whether a library function is available.
bool getLibFunc(StringRef funcName, LibFunc &F) const
Searches for a particular function name.
TinyPtrVector - This class is specialized for cases where there are normally 0 or 1 element in a vect...
static constexpr TypeSize getFixed(ScalarTy ExactSize)
Definition TypeSize.h:343
The instances of the Type class are immutable: once they are created, they are never changed.
Definition Type.h:45
LLVM_ABI unsigned getIntegerBitWidth() const
bool isVectorTy() const
True if this is an instance of VectorType.
Definition Type.h:273
bool isArrayTy() const
True if this is an instance of ArrayType.
Definition Type.h:264
static LLVM_ABI IntegerType * getInt32Ty(LLVMContext &C)
Definition Type.cpp:296
bool isIntOrIntVectorTy() const
Return true if this is an integer type or a vector of integer types.
Definition Type.h:246
bool isPointerTy() const
True if this is an instance of PointerType.
Definition Type.h:267
Type * getScalarType() const
If this is a vector type, return the element type, otherwise return 'this'.
Definition Type.h:352
bool isStructTy() const
True if this is an instance of StructType.
Definition Type.h:261
LLVM_ABI unsigned getScalarSizeInBits() const LLVM_READONLY
If this is a vector type, return the getPrimitiveSizeInBits value for the element type.
Definition Type.cpp:230
bool isIntOrPtrTy() const
Return true if this is an integer type or a pointer type.
Definition Type.h:255
bool isIntegerTy() const
True if this is an instance of IntegerType.
Definition Type.h:240
bool isTokenTy() const
Return true if this is 'token'.
Definition Type.h:234
static LLVM_ABI IntegerType * getIntNTy(LLVMContext &C, unsigned N)
Definition Type.cpp:300
static LLVM_ABI UndefValue * get(Type *T)
Static factory methods - Return an 'undef' object of the specified type.
This function has undefined behavior.
A Use represents the edge between a Value definition and its users.
Definition Use.h:35
value_op_iterator value_op_end()
Definition User.h:288
Value * getOperand(unsigned i) const
Definition User.h:207
value_op_iterator value_op_begin()
Definition User.h:285
iterator_range< value_op_iterator > operand_values()
Definition User.h:291
Value wrapper in the Metadata hierarchy.
Definition Metadata.h:459
static LLVM_ABI ValueAsMetadata * get(Value *V)
Definition Metadata.cpp:509
iterator find(const KeyT &Val)
Definition ValueMap.h:160
iterator end()
Definition ValueMap.h:139
LLVM Value Representation.
Definition Value.h:75
Type * getType() const
All values are typed, get the type of this value.
Definition Value.h:256
LLVM_ABI void replaceAllUsesWith(Value *V)
Change all uses of this to point to a new Value.
Definition Value.cpp:553
LLVMContext & getContext() const
All values hold a context through their type.
Definition Value.h:259
iterator_range< user_iterator > users()
Definition Value.h:427
LLVM_ABI void printAsOperand(raw_ostream &O, bool PrintType=true, const Module *M=nullptr) const
Print the name of this Value out to the specified raw_ostream.
bool isUsedByMetadata() const
Return true if there is metadata referencing this value.
Definition Value.h:569
bool use_empty() const
Definition Value.h:347
static constexpr unsigned MaxAlignmentExponent
The maximum alignment for instructions.
Definition Value.h:831
LLVM_ABI bool replaceUsesWithIf(Value *New, llvm::function_ref< bool(Use &U)> ShouldReplace)
Go through the uses list for this definition and make each use point to "V" if the callback ShouldRep...
Definition Value.cpp:561
iterator_range< use_iterator > uses()
Definition Value.h:381
user_iterator_impl< User > user_iterator
Definition Value.h:392
bool hasName() const
Definition Value.h:262
LLVM_ABI StringRef getName() const
Return a constant reference to the value's name.
Definition Value.cpp:322
LLVM_ABI void takeName(Value *V)
Transfer the name from V to this value.
Definition Value.cpp:403
static LLVM_ABI VectorType * get(Type *ElementType, ElementCount EC)
This static method is the primary way to construct an VectorType.
Represents an op.with.overflow intrinsic.
std::pair< iterator, bool > insert(const ValueT &V)
Definition DenseSet.h:202
void reserve(size_t Size)
Grow the DenseSet so that it can contain at least NumEntries items before resizing again.
Definition DenseSet.h:96
static constexpr bool isKnownGE(const FixedOrScalableQuantity &LHS, const FixedOrScalableQuantity &RHS)
Definition TypeSize.h:237
An efficient, type-erasing, non-owning reference to a callable.
const ParentTy * getParent() const
Definition ilist_node.h:34
self_iterator getIterator()
Definition ilist_node.h:123
NodeTy * getNextNode()
Get the next node, or nullptr for the list tail.
Definition ilist_node.h:348
CallInst * Call
Changed
#define UINT64_MAX
Definition DataTypes.h:77
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
unsigned ID
LLVM IR allows to use arbitrary numbers as calling convention identifiers.
Definition CallingConv.h:24
@ C
The default llvm calling convention, compatible with C.
Definition CallingConv.h:34
LLVM_ABI Function * getOrInsertDeclaration(Module *M, ID id, ArrayRef< Type * > Tys={})
Look up the Function declaration of the intrinsic id in the Module M.
BinaryOp_match< SrcTy, SpecificConstantMatch, TargetOpcode::G_XOR, true > m_Not(const SrcTy &&Src)
Matches a register not-ed by a G_XOR.
BinaryOp_match< LHS, RHS, Instruction::And > m_And(const LHS &L, const RHS &R)
m_Intrinsic_Ty< Opnd0 >::Ty m_BitReverse(const Opnd0 &Op0)
ap_match< APInt > m_APInt(const APInt *&Res)
Match a ConstantInt or splatted ConstantVector, binding the specified pointer to the contained APInt.
CastInst_match< OpTy, TruncInst > m_Trunc(const OpTy &Op)
Matches Trunc.
bool match(Val *V, const Pattern &P)
specificval_ty m_Specific(const Value *V)
Match if we have a specific specified value.
ExtractValue_match< Ind, Val_t > m_ExtractValue(const Val_t &V)
Match a single index ExtractValue instruction.
BinOpPred_match< LHS, RHS, is_logical_shift_op > m_LogicalShift(const LHS &L, const RHS &R)
Matches logical shift operations.
bind_ty< WithOverflowInst > m_WithOverflowInst(WithOverflowInst *&I)
Match a with overflow intrinsic, capturing it if we match.
CastInst_match< OpTy, ZExtInst > m_ZExt(const OpTy &Op)
Matches ZExt.
m_Intrinsic_Ty< Opnd0, Opnd1, Opnd2 >::Ty m_FShl(const Opnd0 &Op0, const Opnd1 &Op1, const Opnd2 &Op2)
class_match< Value > m_Value()
Match an arbitrary value and ignore it.
m_Intrinsic_Ty< Opnd0, Opnd1, Opnd2 >::Ty m_FShr(const Opnd0 &Op0, const Opnd1 &Op1, const Opnd2 &Op2)
auto m_Undef()
Match an arbitrary undef constant.
BinaryOp_match< LHS, RHS, Instruction::Or > m_Or(const LHS &L, const RHS &R)
m_Intrinsic_Ty< Opnd0 >::Ty m_BSwap(const Opnd0 &Op0)
is_zero m_Zero()
Match any null constant or a vector with all elements equal to 0.
match_combine_or< LTy, RTy > m_CombineOr(const LTy &L, const RTy &R)
Combine two pattern matchers matching L || R.
initializer< Ty > init(const Ty &Val)
@ DW_OP_LLVM_arg
Only used in LLVM metadata.
Definition Dwarf.h:149
@ ebStrict
This corresponds to "fpexcept.strict".
Definition FPEnv.h:42
This is an optimization pass for GlobalISel generic memory operations.
Definition Types.h:26
@ Offset
Definition DWP.cpp:532
FunctionAddr VTableAddr Value
Definition InstrProf.h:137
auto find(R &&Range, const T &Val)
Provide wrappers to std::find which take ranges instead of having to pass begin/end explicitly.
Definition STLExtras.h:1765
LLVM_ABI bool RemoveRedundantDbgInstrs(BasicBlock *BB)
Try to remove redundant dbg.value instructions from given basic block.
UnaryFunction for_each(R &&Range, UnaryFunction F)
Provide wrappers to std::for_each which take ranges instead of having to pass begin/end explicitly.
Definition STLExtras.h:1732
LLVM_ABI unsigned removeAllNonTerminatorAndEHPadInstructions(BasicBlock *BB)
Remove all instructions from a basic block other than its terminator and any present EH pad instructi...
Definition Local.cpp:2503
bool all_of(R &&range, UnaryPredicate P)
Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly.
Definition STLExtras.h:1739
LLVM_ABI bool RecursivelyDeleteTriviallyDeadInstructions(Value *V, const TargetLibraryInfo *TLI=nullptr, MemorySSAUpdater *MSSAU=nullptr, std::function< void(Value *)> AboutToDeleteCallback=std::function< void(Value *)>())
If the specified value is a trivially dead instruction, delete it.
Definition Local.cpp:538
bool succ_empty(const Instruction *I)
Definition CFG.h:257
LLVM_ABI BasicBlock * changeToInvokeAndSplitBasicBlock(CallInst *CI, BasicBlock *UnwindEdge, DomTreeUpdater *DTU=nullptr)
Convert the CallInst to InvokeInst with the specified unwind edge basic block.
Definition Local.cpp:2621
LLVM_ABI bool ConstantFoldTerminator(BasicBlock *BB, bool DeleteDeadConditions=false, const TargetLibraryInfo *TLI=nullptr, DomTreeUpdater *DTU=nullptr)
If a terminator instruction is predicated on a constant value, convert it into an unconditional branc...
Definition Local.cpp:134
LLVM_ABI unsigned replaceDominatedUsesWithIf(Value *From, Value *To, DominatorTree &DT, const BasicBlockEdge &Edge, function_ref< bool(const Use &U, const Value *To)> ShouldReplace)
Replace each use of 'From' with 'To' if that use is dominated by the given edge and the callback Shou...
Definition Local.cpp:3286
LLVM_ABI void findDbgValues(Value *V, SmallVectorImpl< DbgVariableRecord * > &DbgVariableRecords)
Finds the dbg.values describing a value.
LLVM_ABI unsigned replaceNonLocalUsesWith(Instruction *From, Value *To)
Definition Local.cpp:3250
decltype(auto) dyn_cast(const From &Val)
dyn_cast<X> - Return the argument parameter cast to the specified type.
Definition Casting.h:643
LLVM_ABI void salvageDebugInfo(const MachineRegisterInfo &MRI, MachineInstr &MI)
Assuming the instruction MI is going to be deleted, attempt to salvage debug users of MI by writing t...
Definition Utils.cpp:1730
auto successors(const MachineBasicBlock *BB)
LLVM_ABI bool isRemovableAlloc(const CallBase *V, const TargetLibraryInfo *TLI)
Return true if this is a call to an allocation function that does not have side effects that we are r...
LLVM_ABI CallInst * changeToCall(InvokeInst *II, DomTreeUpdater *DTU=nullptr)
This function converts the specified invoke into a normal call.
Definition Local.cpp:2597
LLVM_ABI bool isMathLibCallNoop(const CallBase *Call, const TargetLibraryInfo *TLI)
Check whether the given call has no side-effects.
LLVM_ABI void copyMetadataForLoad(LoadInst &Dest, const LoadInst &Source)
Copy the metadata from the source instruction to the destination (the replacement for the source inst...
Definition Local.cpp:3125
LLVM_ABI void InsertDebugValueAtStoreLoc(DbgVariableRecord *DVR, StoreInst *SI, DIBuilder &Builder)
===------------------------------------------------------------------—===// Dbg Intrinsic utilities
Definition Local.cpp:1726
constexpr from_range_t from_range
bool hasNItemsOrLess(IterTy &&Begin, IterTy &&End, unsigned N, Pred &&ShouldBeCounted=[](const decltype(*std::declval< IterTy >()) &) { return true;})
Returns true if the sequence [Begin, End) has N or less items.
Definition STLExtras.h:2659
LLVM_ABI void remapDebugVariable(ValueToValueMapTy &Mapping, Instruction *Inst)
Remap the operands of the debug records attached to Inst, and the operands of Inst itself if it's a d...
Definition Local.cpp:3486
iterator_range< early_inc_iterator_impl< detail::IterOfRange< RangeT > > > make_early_inc_range(RangeT &&Range)
Make a range that does early increment to allow mutation of the underlying range without disrupting i...
Definition STLExtras.h:634
auto cast_or_null(const Y &Val)
Definition Casting.h:714
auto pred_size(const MachineBasicBlock *BB)
LLVM_ABI bool SimplifyInstructionsInBlock(BasicBlock *BB, const TargetLibraryInfo *TLI=nullptr)
Scan the specified basic block and try to simplify any instructions in it and recursively delete dead...
Definition Local.cpp:726
LLVM_ABI bool isAssumeWithEmptyBundle(const AssumeInst &Assume)
Return true iff the operand bundles of the provided llvm.assume doesn't contain any valuable informat...
LLVM_ABI void DeleteDeadBlock(BasicBlock *BB, DomTreeUpdater *DTU=nullptr, bool KeepOneInputPHIs=false)
Delete the specified block, which must have no predecessors.
LLVM_ABI bool hasBranchWeightOrigin(const Instruction &I)
Check if Branch Weight Metadata has an "expected" field from an llvm.expect* intrinsic.
LLVM_ABI void insertDebugValuesForPHIs(BasicBlock *BB, SmallVectorImpl< PHINode * > &InsertedPHIs)
Propagate dbg.value intrinsics through the newly inserted PHIs.
Definition Local.cpp:1897
LLVM_ABI ConstantRange getConstantRangeFromMetadata(const MDNode &RangeMD)
Parse out a conservative ConstantRange from !range metadata.
LLVM_ABI MDNode * intersectAccessGroups(const Instruction *Inst1, const Instruction *Inst2)
Compute the access-group list of access groups that Inst1 and Inst2 are both in.
LLVM_ABI bool handleUnreachableTerminator(Instruction *I, SmallVectorImpl< Value * > &PoisonedValues)
If a terminator in an unreachable basic block has an operand of type Instruction, transform it into p...
Definition Local.cpp:2486
LLVM_ABI bool canSimplifyInvokeNoUnwind(const Function *F)
LLVM_ABI Value * simplifyInstruction(Instruction *I, const SimplifyQuery &Q)
See if we can compute a simplified version of this instruction.
auto dyn_cast_or_null(const Y &Val)
Definition Casting.h:753
bool any_of(R &&range, UnaryPredicate P)
Provide wrappers to std::any_of which take ranges instead of having to pass begin/end explicitly.
Definition STLExtras.h:1746
LLVM_ABI bool isInstructionTriviallyDead(Instruction *I, const TargetLibraryInfo *TLI=nullptr)
Return true if the result produced by the instruction is not used, and the instruction will return.
Definition Local.cpp:406
LLVM_ABI bool TryToSimplifyUncondBranchFromEmptyBlock(BasicBlock *BB, DomTreeUpdater *DTU=nullptr)
BB is known to contain an unconditional branch, and contains no instructions other than PHI nodes,...
Definition Local.cpp:1158
LLVM_ABI SmallVector< uint32_t > fitWeights(ArrayRef< uint64_t > Weights)
Push the weights right to fit in uint32_t.
LLVM_ABI bool recognizeBSwapOrBitReverseIdiom(Instruction *I, bool MatchBSwaps, bool MatchBitReversals, SmallVectorImpl< Instruction * > &InsertedInsts)
Try to match a bswap or bitreverse idiom.
Definition Local.cpp:3791
LLVM_ABI MDNode * getValidBranchWeightMDNode(const Instruction &I)
Get the valid branch weights metadata node.
LLVM_ABI Align getOrEnforceKnownAlignment(Value *V, MaybeAlign PrefAlign, const DataLayout &DL, const Instruction *CxtI=nullptr, AssumptionCache *AC=nullptr, const DominatorTree *DT=nullptr)
Try to ensure that the alignment of V is at least PrefAlign bytes.
Definition Local.cpp:1584
LLVM_ABI bool wouldInstructionBeTriviallyDeadOnUnusedPaths(Instruction *I, const TargetLibraryInfo *TLI=nullptr)
Return true if the result produced by the instruction has no side effects on any paths other than whe...
Definition Local.cpp:413
LLVM_ABI void computeKnownBits(const Value *V, KnownBits &Known, const DataLayout &DL, AssumptionCache *AC=nullptr, const Instruction *CxtI=nullptr, const DominatorTree *DT=nullptr, bool UseInstrInfo=true, unsigned Depth=0)
Determine which bits of V are known to be either zero or one and return them in the KnownZero/KnownOn...
LLVM_ABI bool LowerDbgDeclare(Function &F)
Lowers dbg.declare records into appropriate set of dbg.value records.
Definition Local.cpp:1813
LLVM_ABI bool NullPointerIsDefined(const Function *F, unsigned AS=0)
Check whether null pointer dereferencing is considered undefined behavior for a given function or an ...
LLVM_ABI DIExpression * getExpressionForConstant(DIBuilder &DIB, const Constant &C, Type &Ty)
Given a constant, create a debug information expression.
Definition Local.cpp:3444
LLVM_ABI CallInst * createCallMatchingInvoke(InvokeInst *II)
Create a call that matches the invoke II in terms of arguments, attributes, debug information,...
Definition Local.cpp:2571
LLVM_ABI raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
Definition Debug.cpp:207
generic_gep_type_iterator<> gep_type_iterator
LLVM_ABI void ConvertDebugDeclareToDebugValue(DbgVariableRecord *DVR, StoreInst *SI, DIBuilder &Builder)
Inserts a dbg.value record before a store to an alloca'd value that has an associated dbg....
Definition Local.cpp:1680
LLVM_ABI Instruction * removeUnwindEdge(BasicBlock *BB, DomTreeUpdater *DTU=nullptr)
Replace 'BB's terminator with one that does not have an unwind successor block.
Definition Local.cpp:2863
LLVM_ABI bool wouldInstructionBeTriviallyDead(const Instruction *I, const TargetLibraryInfo *TLI=nullptr)
Return true if the result produced by the instruction would have no side effects if it was not used.
Definition Local.cpp:425
FunctionAddr VTableAddr Count
Definition InstrProf.h:139
LLVM_ABI void patchReplacementInstruction(Instruction *I, Value *Repl)
Patch the replacement so that it is not more restrictive than the value being replaced.
Definition Local.cpp:3186
LLVM_ABI void salvageDebugInfoForDbgValues(Instruction &I, ArrayRef< DbgVariableRecord * > DPInsns)
Implementation of salvageDebugInfo, applying only to instructions in Insns, rather than all debug use...
Definition Local.cpp:2055
LLVM_ABI unsigned replaceDominatedUsesWith(Value *From, Value *To, DominatorTree &DT, const BasicBlockEdge &Edge)
Replace each use of 'From' with 'To' if that use is dominated by the given edge.
Definition Local.cpp:3265
class LLVM_GSL_OWNER SmallVector
Forward declaration of SmallVector so that calculateSmallVectorDefaultInlinedElements can reference s...
bool isa(const From &Val)
isa<X> - Return true if the parameter to the template is an instance of one of the template type argu...
Definition Casting.h:547
@ Success
The lock was released successfully.
LLVM_ABI unsigned changeToUnreachable(Instruction *I, bool PreserveLCSSA=false, DomTreeUpdater *DTU=nullptr, MemorySSAUpdater *MSSAU=nullptr)
Insert an unreachable instruction before the specified instruction, making it and the rest of the cod...
Definition Local.cpp:2531
LLVM_ABI bool replaceAllDbgUsesWith(Instruction &From, Value &To, Instruction &DomPoint, DominatorTree &DT)
Point debug users of From to To or salvage them.
Definition Local.cpp:2432
LLVM_ABI Value * salvageDebugInfoImpl(Instruction &I, uint64_t CurrentLocOps, SmallVectorImpl< uint64_t > &Ops, SmallVectorImpl< Value * > &AdditionalValues)
Definition Local.cpp:2292
RNSuccIterator< NodeRef, BlockT, RegionT > succ_begin(NodeRef Node)
LLVM_ABI void combineMetadataForCSE(Instruction *K, const Instruction *J, bool DoesKMove)
Combine the metadata of two instructions so that K can replace J.
Definition Local.cpp:3116
LLVM_ABI void dropDebugUsers(Instruction &I)
Remove the debug intrinsic instructions for the given instruction.
Definition Local.cpp:3391
@ First
Helpers to iterate all locations in the MemoryEffectsBase class.
Definition ModRef.h:74
LLVM_ABI void MergeBasicBlockIntoOnlyPred(BasicBlock *BB, DomTreeUpdater *DTU=nullptr)
BB is a block with one predecessor and its predecessor is known to have one successor (BB!...
Definition Local.cpp:766
LLVM_ABI bool replaceDbgUsesWithUndef(Instruction *I)
Replace all the uses of an SSA value in @llvm.dbg intrinsics with undef.
Definition Local.cpp:615
LLVM_ABI void hoistAllInstructionsInto(BasicBlock *DomBlock, Instruction *InsertPt, BasicBlock *BB)
Hoist all of the instructions in the IfBlock to the dominant block DomBlock, by moving its instructio...
Definition Local.cpp:3398
LLVM_ABI void copyRangeMetadata(const DataLayout &DL, const LoadInst &OldLI, MDNode *N, LoadInst &NewLI)
Copy a range metadata node to a new load instruction.
Definition Local.cpp:3367
LLVM_ABI BasicBlock * SplitBlock(BasicBlock *Old, BasicBlock::iterator SplitPt, DominatorTree *DT, LoopInfo *LI=nullptr, MemorySSAUpdater *MSSAU=nullptr, const Twine &BBName="")
Split the specified block at the specified instruction.
LLVM_ABI DebugLoc getDebugValueLoc(DbgVariableRecord *DVR)
Produce a DebugLoc to use for each dbg.declare that is promoted to a dbg.value.
LLVM_ABI void copyNonnullMetadata(const LoadInst &OldLI, MDNode *N, LoadInst &NewLI)
Copy a nonnull metadata node to a new load instruction.
Definition Local.cpp:3342
LLVM_ABI bool canReplaceOperandWithVariable(const Instruction *I, unsigned OpIdx)
Given an instruction, is it legal to set operand OpIdx to a non-constant value?
Definition Local.cpp:3905
DWARFExpression::Operation Op
LLVM_ABI void replaceDbgValueForAlloca(AllocaInst *AI, Value *NewAllocaAddress, DIBuilder &Builder, int Offset=0)
Replaces multiple dbg.value records when the alloca it describes is replaced with a new value.
Definition Local.cpp:2000
LLVM_ABI Align tryEnforceAlignment(Value *V, Align PrefAlign, const DataLayout &DL)
If the specified pointer points to an object that we control, try to modify the object's alignment to...
Definition Local.cpp:1535
LLVM_ABI Value * getFreedOperand(const CallBase *CB, const TargetLibraryInfo *TLI)
If this if a call to a free function, return the freed operand.
LLVM_ABI bool RecursivelyDeleteTriviallyDeadInstructionsPermissive(SmallVectorImpl< WeakTrackingVH > &DeadInsts, const TargetLibraryInfo *TLI=nullptr, MemorySSAUpdater *MSSAU=nullptr, std::function< void(Value *)> AboutToDeleteCallback=std::function< void(Value *)>())
Same functionality as RecursivelyDeleteTriviallyDeadInstructions, but allow instructions that are not...
Definition Local.cpp:553
constexpr unsigned BitWidth
ValueMap< const Value *, WeakTrackingVH > ValueToValueMapTy
LLVM_ABI bool extractBranchWeights(const MDNode *ProfileData, SmallVectorImpl< uint32_t > &Weights)
Extract branch weights from MD_prof metadata.
auto count_if(R &&Range, UnaryPredicate P)
Wrapper function around std::count_if to count the number of times an element satisfying a given pred...
Definition STLExtras.h:2019
decltype(auto) cast(const From &Val)
cast<X> - Return the argument parameter cast to the specified type.
Definition Casting.h:559
gep_type_iterator gep_type_begin(const User *GEP)
LLVM_ABI TinyPtrVector< DbgVariableRecord * > findDVRDeclares(Value *V)
Finds dbg.declare records declaring local variables as living in the memory that 'V' points to.
Definition DebugInfo.cpp:48
auto predecessors(const MachineBasicBlock *BB)
bool is_contained(R &&Range, const E &Element)
Returns true if Element is found in Range.
Definition STLExtras.h:1947
LLVM_ABI void combineAAMetadata(Instruction *K, const Instruction *J)
Combine metadata of two instructions, where instruction J is a memory access that has been merged int...
Definition Local.cpp:3121
LLVM_ABI bool RecursivelyDeleteDeadPHINode(PHINode *PN, const TargetLibraryInfo *TLI=nullptr, MemorySSAUpdater *MSSAU=nullptr)
If the specified value is an effectively dead PHI node, due to being a def-use chain of single-use no...
Definition Local.cpp:646
LLVM_ABI bool inferAttributesFromOthers(Function &F)
If we can infer one attribute from another on the declaration of a function, explicitly materialize t...
Definition Local.cpp:4029
LLVM_ABI Value * invertCondition(Value *Condition)
Invert the given true/false value, possibly reusing an existing copy.
Definition Local.cpp:3995
hash_code hash_combine(const Ts &...args)
Combine values into a single hash_code.
Definition Hashing.h:592
LLVM_ABI void DeleteDeadBlocks(ArrayRef< BasicBlock * > BBs, DomTreeUpdater *DTU=nullptr, bool KeepOneInputPHIs=false)
Delete the specified blocks from BB.
LLVM_ABI void setFittedBranchWeights(Instruction &I, ArrayRef< uint64_t > Weights, bool IsExpected, bool ElideAllZero=false)
Variant of setBranchWeights where the Weights will be fit first to uint32_t by shifting right.
LLVM_ABI void maybeMarkSanitizerLibraryCallNoBuiltin(CallInst *CI, const TargetLibraryInfo *TLI)
Given a CallInst, check if it calls a string function known to CodeGen, and mark it with NoBuiltin if...
Definition Local.cpp:3895
static auto filterDbgVars(iterator_range< simple_ilist< DbgRecord >::iterator > R)
Filter the DbgRecord range to DbgVariableRecord types only and downcast.
LLVM_ABI bool removeUnreachableBlocks(Function &F, DomTreeUpdater *DTU=nullptr, MemorySSAUpdater *MSSAU=nullptr)
Remove all blocks that can not be reached from the function's entry.
Definition Local.cpp:2901
LLVM_ABI bool EliminateDuplicatePHINodes(BasicBlock *BB)
Check for and eliminate duplicate PHI nodes in this block.
Definition Local.cpp:1527
LLVM_ABI void findDbgUsers(Value *V, SmallVectorImpl< DbgVariableRecord * > &DbgVariableRecords)
Finds the debug info records describing a value.
LLVM_ABI bool callsGCLeafFunction(const CallBase *Call, const TargetLibraryInfo &TLI)
Return true if this call calls a gc leaf function.
Definition Local.cpp:3313
hash_code hash_combine_range(InputIteratorT first, InputIteratorT last)
Compute a hash_code for a sequence of values.
Definition Hashing.h:466
LLVM_ABI bool replaceDbgDeclare(Value *Address, Value *NewAddress, DIBuilder &Builder, uint8_t DIExprFlags, int Offset)
Replaces dbg.declare record when the address it describes is replaced with a new value.
Definition Local.cpp:1960
LLVM_ABI void extractFromBranchWeightMD64(const MDNode *ProfileData, SmallVectorImpl< uint64_t > &Weights)
Faster version of extractBranchWeights() that skips checks and must only be called with "branch_weigh...
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.
Definition BitVector.h:872
#define N
#define NDEBUG
Definition regutils.h:48
This struct is a compact representation of a valid (non-zero power of two) alignment.
Definition Alignment.h:39
An information struct used to provide DenseMap with the various necessary components for a given valu...
unsigned countMinTrailingZeros() const
Returns the minimum number of trailing zero bits.
Definition KnownBits.h:258
unsigned getBitWidth() const
Get the bit width of this value.
Definition KnownBits.h:44
This struct is a compact representation of a valid (power of two) or undefined (0) alignment.
Definition Alignment.h:106
std::optional< unsigned > Opcode
Opcode of merged instructions.
Definition Local.h:591
LLVM_ABI void mergeFlags(Instruction &I)
Merge in the no-wrap flags from I.
Definition Local.cpp:4059
LLVM_ABI void applyFlags(Instruction &I)
Apply the no-wrap flags to I if applicable.
Definition Local.cpp:4075
A MapVector that performs no allocations if smaller than a certain size.
Definition MapVector.h:276