LLVM 20.0.0git
Instruction.cpp
Go to the documentation of this file.
1//===-- Instruction.cpp - Implement the Instruction class -----------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file implements the Instruction class for the IR library.
10//
11//===----------------------------------------------------------------------===//
12
13#include "llvm/IR/Instruction.h"
14#include "llvm/ADT/DenseSet.h"
15#include "llvm/ADT/STLExtras.h"
17#include "llvm/IR/Attributes.h"
18#include "llvm/IR/Constants.h"
19#include "llvm/IR/InstrTypes.h"
22#include "llvm/IR/Intrinsics.h"
23#include "llvm/IR/LLVMContext.h"
25#include "llvm/IR/Module.h"
26#include "llvm/IR/Operator.h"
28#include "llvm/IR/Type.h"
29using namespace llvm;
30
31InsertPosition::InsertPosition(Instruction *InsertBefore)
32 : InsertAt(InsertBefore ? InsertBefore->getIterator()
33 : InstListType::iterator()) {}
34InsertPosition::InsertPosition(BasicBlock *InsertAtEnd)
35 : InsertAt(InsertAtEnd ? InsertAtEnd->end() : InstListType::iterator()) {}
36
37Instruction::Instruction(Type *ty, unsigned it, AllocInfo AllocInfo,
38 InsertPosition InsertBefore)
39 : User(ty, Value::InstructionVal + it, AllocInfo) {
40 // When called with an iterator, there must be a block to insert into.
41 if (InstListType::iterator InsertIt = InsertBefore; InsertIt.isValid()) {
42 BasicBlock *BB = InsertIt.getNodeParent();
43 assert(BB && "Instruction to insert before is not in a basic block!");
44 insertInto(BB, InsertBefore);
45 }
46}
47
48Instruction::~Instruction() {
49 assert(!getParent() && "Instruction still linked in the program!");
50
51 // Replace any extant metadata uses of this instruction with undef to
52 // preserve debug info accuracy. Some alternatives include:
53 // - Treat Instruction like any other Value, and point its extant metadata
54 // uses to an empty ValueAsMetadata node. This makes extant dbg.value uses
55 // trivially dead (i.e. fair game for deletion in many passes), leading to
56 // stale dbg.values being in effect for too long.
57 // - Call salvageDebugInfoOrMarkUndef. Not needed to make instruction removal
58 // correct. OTOH results in wasted work in some common cases (e.g. when all
59 // instructions in a BasicBlock are deleted).
60 if (isUsedByMetadata())
61 ValueAsMetadata::handleRAUW(this, UndefValue::get(getType()));
62
63 // Explicitly remove DIAssignID metadata to clear up ID -> Instruction(s)
64 // mapping in LLVMContext.
65 setMetadata(LLVMContext::MD_DIAssignID, nullptr);
66}
67
68const Module *Instruction::getModule() const {
69 return getParent()->getModule();
70}
71
72const Function *Instruction::getFunction() const {
73 return getParent()->getParent();
74}
75
76const DataLayout &Instruction::getDataLayout() const {
77 return getModule()->getDataLayout();
78}
79
80void Instruction::removeFromParent() {
81 // Perform any debug-info maintenence required.
82 handleMarkerRemoval();
83
84 getParent()->getInstList().remove(getIterator());
85}
86
87void Instruction::handleMarkerRemoval() {
88 if (!getParent()->IsNewDbgInfoFormat || !DebugMarker)
89 return;
90
91 DebugMarker->removeMarker();
92}
93
94BasicBlock::iterator Instruction::eraseFromParent() {
95 handleMarkerRemoval();
96 return getParent()->getInstList().erase(getIterator());
97}
98
99void Instruction::insertBefore(Instruction *InsertPos) {
100 insertBefore(InsertPos->getIterator());
101}
102
103/// Insert an unlinked instruction into a basic block immediately before the
104/// specified instruction.
105void Instruction::insertBefore(BasicBlock::iterator InsertPos) {
106 insertBefore(*InsertPos->getParent(), InsertPos);
107}
108
109/// Insert an unlinked instruction into a basic block immediately after the
110/// specified instruction.
111void Instruction::insertAfter(Instruction *InsertPos) {
112 BasicBlock *DestParent = InsertPos->getParent();
113
114 DestParent->getInstList().insertAfter(InsertPos->getIterator(), this);
115}
116
117BasicBlock::iterator Instruction::insertInto(BasicBlock *ParentBB,
119 assert(getParent() == nullptr && "Expected detached instruction");
120 assert((It == ParentBB->end() || It->getParent() == ParentBB) &&
121 "It not in ParentBB");
122 insertBefore(*ParentBB, It);
123 return getIterator();
124}
125
127
128void Instruction::insertBefore(BasicBlock &BB,
129 InstListType::iterator InsertPos) {
130 assert(!DebugMarker);
131
132 BB.getInstList().insert(InsertPos, this);
133
134 if (!BB.IsNewDbgInfoFormat)
135 return;
136
137 // We've inserted "this": if InsertAtHead is set then it comes before any
138 // DbgVariableRecords attached to InsertPos. But if it's not set, then any
139 // DbgRecords should now come before "this".
140 bool InsertAtHead = InsertPos.getHeadBit();
141 if (!InsertAtHead) {
142 DbgMarker *SrcMarker = BB.getMarker(InsertPos);
143 if (SrcMarker && !SrcMarker->empty()) {
144 // If this assertion fires, the calling code is about to insert a PHI
145 // after debug-records, which would form a sequence like:
146 // %0 = PHI
147 // #dbg_value
148 // %1 = PHI
149 // Which is de-normalised and undesired -- hence the assertion. To avoid
150 // this, you must insert at that position using an iterator, and it must
151 // be aquired by calling getFirstNonPHIIt / begin or similar methods on
152 // the block. This will signal to this behind-the-scenes debug-info
153 // maintenence code that you intend the PHI to be ahead of everything,
154 // including any debug-info.
155 assert(!isa<PHINode>(this) && "Inserting PHI after debug-records!");
156 adoptDbgRecords(&BB, InsertPos, false);
157 }
158 }
159
160 // If we're inserting a terminator, check if we need to flush out
161 // TrailingDbgRecords. Inserting instructions at the end of an incomplete
162 // block is handled by the code block above.
163 if (isTerminator())
164 getParent()->flushTerminatorDbgRecords();
165}
166
167/// Unlink this instruction from its current basic block and insert it into the
168/// basic block that MovePos lives in, right before MovePos.
169void Instruction::moveBefore(Instruction *MovePos) {
170 moveBeforeImpl(*MovePos->getParent(), MovePos->getIterator(), false);
171}
172
173void Instruction::moveBeforePreserving(Instruction *MovePos) {
174 moveBeforeImpl(*MovePos->getParent(), MovePos->getIterator(), true);
175}
176
177void Instruction::moveAfter(Instruction *MovePos) {
178 auto NextIt = std::next(MovePos->getIterator());
179 // We want this instruction to be moved to before NextIt in the instruction
180 // list, but before NextIt's debug value range.
181 NextIt.setHeadBit(true);
182 moveBeforeImpl(*MovePos->getParent(), NextIt, false);
183}
184
185void Instruction::moveAfterPreserving(Instruction *MovePos) {
186 auto NextIt = std::next(MovePos->getIterator());
187 // We want this instruction and its debug range to be moved to before NextIt
188 // in the instruction list, but before NextIt's debug value range.
189 NextIt.setHeadBit(true);
190 moveBeforeImpl(*MovePos->getParent(), NextIt, true);
191}
192
193void Instruction::moveBefore(BasicBlock &BB, InstListType::iterator I) {
194 moveBeforeImpl(BB, I, false);
195}
196
197void Instruction::moveBeforePreserving(BasicBlock &BB,
198 InstListType::iterator I) {
199 moveBeforeImpl(BB, I, true);
200}
201
202void Instruction::moveBeforeImpl(BasicBlock &BB, InstListType::iterator I,
203 bool Preserve) {
204 assert(I == BB.end() || I->getParent() == &BB);
205 bool InsertAtHead = I.getHeadBit();
206
207 // If we've been given the "Preserve" flag, then just move the DbgRecords with
208 // the instruction, no more special handling needed.
209 if (BB.IsNewDbgInfoFormat && DebugMarker && !Preserve) {
210 if (I != this->getIterator() || InsertAtHead) {
211 // "this" is definitely moving in the list, or it's moving ahead of its
212 // attached DbgVariableRecords. Detach any existing DbgRecords.
213 handleMarkerRemoval();
214 }
215 }
216
217 // Move this single instruction. Use the list splice method directly, not
218 // the block splicer, which will do more debug-info things.
219 BB.getInstList().splice(I, getParent()->getInstList(), getIterator());
220
221 if (BB.IsNewDbgInfoFormat && !Preserve) {
222 DbgMarker *NextMarker = getParent()->getNextMarker(this);
223
224 // If we're inserting at point I, and not in front of the DbgRecords
225 // attached there, then we should absorb the DbgRecords attached to I.
226 if (!InsertAtHead && NextMarker && !NextMarker->empty()) {
227 adoptDbgRecords(&BB, I, false);
228 }
229 }
230
231 if (isTerminator())
232 getParent()->flushTerminatorDbgRecords();
233}
234
235iterator_range<DbgRecord::self_iterator> Instruction::cloneDebugInfoFrom(
236 const Instruction *From, std::optional<DbgRecord::self_iterator> FromHere,
237 bool InsertAtHead) {
238 if (!From->DebugMarker)
239 return DbgMarker::getEmptyDbgRecordRange();
240
241 assert(getParent()->IsNewDbgInfoFormat);
242 assert(getParent()->IsNewDbgInfoFormat ==
243 From->getParent()->IsNewDbgInfoFormat);
244
245 if (!DebugMarker)
246 getParent()->createMarker(this);
247
248 return DebugMarker->cloneDebugInfoFrom(From->DebugMarker, FromHere,
249 InsertAtHead);
250}
251
252std::optional<DbgRecord::self_iterator>
253Instruction::getDbgReinsertionPosition() {
254 // Is there a marker on the next instruction?
255 DbgMarker *NextMarker = getParent()->getNextMarker(this);
256 if (!NextMarker)
257 return std::nullopt;
258
259 // Are there any DbgRecords in the next marker?
260 if (NextMarker->StoredDbgRecords.empty())
261 return std::nullopt;
262
263 return NextMarker->StoredDbgRecords.begin();
264}
265
266bool Instruction::hasDbgRecords() const { return !getDbgRecordRange().empty(); }
267
268void Instruction::adoptDbgRecords(BasicBlock *BB, BasicBlock::iterator It,
269 bool InsertAtHead) {
270 DbgMarker *SrcMarker = BB->getMarker(It);
271 auto ReleaseTrailingDbgRecords = [BB, It, SrcMarker]() {
272 if (BB->end() == It) {
273 SrcMarker->eraseFromParent();
275 }
276 };
277
278 if (!SrcMarker || SrcMarker->StoredDbgRecords.empty()) {
279 ReleaseTrailingDbgRecords();
280 return;
281 }
282
283 // If we have DbgMarkers attached to this instruction, we have to honour the
284 // ordering of DbgRecords between this and the other marker. Fall back to just
285 // absorbing from the source.
286 if (DebugMarker || It == BB->end()) {
287 // Ensure we _do_ have a marker.
288 getParent()->createMarker(this);
289 DebugMarker->absorbDebugValues(*SrcMarker, InsertAtHead);
290
291 // Having transferred everything out of SrcMarker, we _could_ clean it up
292 // and free the marker now. However, that's a lot of heap-accounting for a
293 // small amount of memory with a good chance of re-use. Leave it for the
294 // moment. It will be released when the Instruction is freed in the worst
295 // case.
296 // However: if we transferred from a trailing marker off the end of the
297 // block, it's important to not leave the empty marker trailing. It will
298 // give a misleading impression that some debug records have been left
299 // trailing.
300 ReleaseTrailingDbgRecords();
301 } else {
302 // Optimisation: we're transferring all the DbgRecords from the source
303 // marker onto this empty location: just adopt the other instructions
304 // marker.
305 DebugMarker = SrcMarker;
306 DebugMarker->MarkedInstr = this;
307 It->DebugMarker = nullptr;
308 }
309}
310
311void Instruction::dropDbgRecords() {
312 if (DebugMarker)
313 DebugMarker->dropDbgRecords();
314}
315
316void Instruction::dropOneDbgRecord(DbgRecord *DVR) {
317 DebugMarker->dropOneDbgRecord(DVR);
318}
319
320bool Instruction::comesBefore(const Instruction *Other) const {
321 assert(getParent() && Other->getParent() &&
322 "instructions without BB parents have no order");
323 assert(getParent() == Other->getParent() &&
324 "cross-BB instruction order comparison");
325 if (!getParent()->isInstrOrderValid())
326 const_cast<BasicBlock *>(getParent())->renumberInstructions();
327 return Order < Other->Order;
328}
329
330std::optional<BasicBlock::iterator> Instruction::getInsertionPointAfterDef() {
331 assert(!getType()->isVoidTy() && "Instruction must define result");
332 BasicBlock *InsertBB;
333 BasicBlock::iterator InsertPt;
334 if (auto *PN = dyn_cast<PHINode>(this)) {
335 InsertBB = PN->getParent();
336 InsertPt = InsertBB->getFirstInsertionPt();
337 } else if (auto *II = dyn_cast<InvokeInst>(this)) {
338 InsertBB = II->getNormalDest();
339 InsertPt = InsertBB->getFirstInsertionPt();
340 } else if (isa<CallBrInst>(this)) {
341 // Def is available in multiple successors, there's no single dominating
342 // insertion point.
343 return std::nullopt;
344 } else {
345 assert(!isTerminator() && "Only invoke/callbr terminators return value");
346 InsertBB = getParent();
347 InsertPt = std::next(getIterator());
348 // Any instruction inserted immediately after "this" will come before any
349 // debug-info records take effect -- thus, set the head bit indicating that
350 // to debug-info-transfer code.
351 InsertPt.setHeadBit(true);
352 }
353
354 // catchswitch blocks don't have any legal insertion point (because they
355 // are both an exception pad and a terminator).
356 if (InsertPt == InsertBB->end())
357 return std::nullopt;
358 return InsertPt;
359}
360
361bool Instruction::isOnlyUserOfAnyOperand() {
362 return any_of(operands(), [](Value *V) { return V->hasOneUser(); });
363}
364
365void Instruction::setHasNoUnsignedWrap(bool b) {
366 if (auto *Inst = dyn_cast<OverflowingBinaryOperator>(this))
367 Inst->setHasNoUnsignedWrap(b);
368 else
369 cast<TruncInst>(this)->setHasNoUnsignedWrap(b);
370}
371
372void Instruction::setHasNoSignedWrap(bool b) {
373 if (auto *Inst = dyn_cast<OverflowingBinaryOperator>(this))
374 Inst->setHasNoSignedWrap(b);
375 else
376 cast<TruncInst>(this)->setHasNoSignedWrap(b);
377}
378
379void Instruction::setIsExact(bool b) {
380 cast<PossiblyExactOperator>(this)->setIsExact(b);
381}
382
383void Instruction::setNonNeg(bool b) {
384 assert(isa<PossiblyNonNegInst>(this) && "Must be zext/uitofp");
385 SubclassOptionalData = (SubclassOptionalData & ~PossiblyNonNegInst::NonNeg) |
386 (b * PossiblyNonNegInst::NonNeg);
387}
388
389bool Instruction::hasNoUnsignedWrap() const {
390 if (auto *Inst = dyn_cast<OverflowingBinaryOperator>(this))
391 return Inst->hasNoUnsignedWrap();
392
393 return cast<TruncInst>(this)->hasNoUnsignedWrap();
394}
395
396bool Instruction::hasNoSignedWrap() const {
397 if (auto *Inst = dyn_cast<OverflowingBinaryOperator>(this))
398 return Inst->hasNoSignedWrap();
399
400 return cast<TruncInst>(this)->hasNoSignedWrap();
401}
402
403bool Instruction::hasNonNeg() const {
404 assert(isa<PossiblyNonNegInst>(this) && "Must be zext/uitofp");
405 return (SubclassOptionalData & PossiblyNonNegInst::NonNeg) != 0;
406}
407
408bool Instruction::hasPoisonGeneratingFlags() const {
409 return cast<Operator>(this)->hasPoisonGeneratingFlags();
410}
411
412void Instruction::dropPoisonGeneratingFlags() {
413 switch (getOpcode()) {
414 case Instruction::Add:
415 case Instruction::Sub:
416 case Instruction::Mul:
417 case Instruction::Shl:
418 cast<OverflowingBinaryOperator>(this)->setHasNoUnsignedWrap(false);
419 cast<OverflowingBinaryOperator>(this)->setHasNoSignedWrap(false);
420 break;
421
422 case Instruction::UDiv:
423 case Instruction::SDiv:
424 case Instruction::AShr:
425 case Instruction::LShr:
426 cast<PossiblyExactOperator>(this)->setIsExact(false);
427 break;
428
429 case Instruction::Or:
430 cast<PossiblyDisjointInst>(this)->setIsDisjoint(false);
431 break;
432
433 case Instruction::GetElementPtr:
434 cast<GetElementPtrInst>(this)->setNoWrapFlags(GEPNoWrapFlags::none());
435 break;
436
437 case Instruction::UIToFP:
438 case Instruction::ZExt:
439 setNonNeg(false);
440 break;
441
442 case Instruction::Trunc:
443 cast<TruncInst>(this)->setHasNoUnsignedWrap(false);
444 cast<TruncInst>(this)->setHasNoSignedWrap(false);
445 break;
446
447 case Instruction::ICmp:
448 cast<ICmpInst>(this)->setSameSign(false);
449 break;
450 }
451
452 if (isa<FPMathOperator>(this)) {
453 setHasNoNaNs(false);
454 setHasNoInfs(false);
455 }
456
457 assert(!hasPoisonGeneratingFlags() && "must be kept in sync");
458}
459
460bool Instruction::hasPoisonGeneratingMetadata() const {
461 return any_of(Metadata::PoisonGeneratingIDs,
462 [this](unsigned ID) { return hasMetadata(ID); });
463}
464
465bool Instruction::hasNonDebugLocLoopMetadata() const {
466 // If there is no loop metadata at all, we also don't have
467 // non-debug loop metadata, obviously.
468 if (!hasMetadata(LLVMContext::MD_loop))
469 return false;
470
471 // If we do have loop metadata, retrieve it.
472 MDNode *LoopMD = getMetadata(LLVMContext::MD_loop);
473
474 // Check if the existing operands are debug locations. This loop
475 // should terminate after at most three iterations. Skip
476 // the first item because it is a self-reference.
477 for (const MDOperand &Op : llvm::drop_begin(LoopMD->operands())) {
478 // check for debug location type by attempting a cast.
479 if (!dyn_cast<DILocation>(Op)) {
480 return true;
481 }
482 }
483
484 // If we get here, then all we have is debug locations in the loop metadata.
485 return false;
486}
487
488void Instruction::dropPoisonGeneratingMetadata() {
489 for (unsigned ID : Metadata::PoisonGeneratingIDs)
490 eraseMetadata(ID);
491}
492
493bool Instruction::hasPoisonGeneratingReturnAttributes() const {
494 if (const auto *CB = dyn_cast<CallBase>(this)) {
495 AttributeSet RetAttrs = CB->getAttributes().getRetAttrs();
496 return RetAttrs.hasAttribute(Attribute::Range) ||
497 RetAttrs.hasAttribute(Attribute::Alignment) ||
498 RetAttrs.hasAttribute(Attribute::NonNull);
499 }
500 return false;
501}
502
503void Instruction::dropPoisonGeneratingReturnAttributes() {
504 if (auto *CB = dyn_cast<CallBase>(this)) {
505 AttributeMask AM;
506 AM.addAttribute(Attribute::Range);
507 AM.addAttribute(Attribute::Alignment);
508 AM.addAttribute(Attribute::NonNull);
509 CB->removeRetAttrs(AM);
510 }
511 assert(!hasPoisonGeneratingReturnAttributes() && "must be kept in sync");
512}
513
514void Instruction::dropUBImplyingAttrsAndUnknownMetadata(
515 ArrayRef<unsigned> KnownIDs) {
516 dropUnknownNonDebugMetadata(KnownIDs);
517 auto *CB = dyn_cast<CallBase>(this);
518 if (!CB)
519 return;
520 // For call instructions, we also need to drop parameter and return attributes
521 // that are can cause UB if the call is moved to a location where the
522 // attribute is not valid.
523 AttributeList AL = CB->getAttributes();
524 if (AL.isEmpty())
525 return;
526 AttributeMask UBImplyingAttributes =
527 AttributeFuncs::getUBImplyingAttributes();
528 for (unsigned ArgNo = 0; ArgNo < CB->arg_size(); ArgNo++)
529 CB->removeParamAttrs(ArgNo, UBImplyingAttributes);
530 CB->removeRetAttrs(UBImplyingAttributes);
531}
532
533void Instruction::dropUBImplyingAttrsAndMetadata() {
534 // !annotation metadata does not impact semantics.
535 // !range, !nonnull and !align produce poison, so they are safe to speculate.
536 // !noundef and various AA metadata must be dropped, as it generally produces
537 // immediate undefined behavior.
538 unsigned KnownIDs[] = {LLVMContext::MD_annotation, LLVMContext::MD_range,
539 LLVMContext::MD_nonnull, LLVMContext::MD_align};
540 dropUBImplyingAttrsAndUnknownMetadata(KnownIDs);
541}
542
543bool Instruction::isExact() const {
544 return cast<PossiblyExactOperator>(this)->isExact();
545}
546
547void Instruction::setFast(bool B) {
548 assert(isa<FPMathOperator>(this) && "setting fast-math flag on invalid op");
549 cast<FPMathOperator>(this)->setFast(B);
550}
551
552void Instruction::setHasAllowReassoc(bool B) {
553 assert(isa<FPMathOperator>(this) && "setting fast-math flag on invalid op");
554 cast<FPMathOperator>(this)->setHasAllowReassoc(B);
555}
556
557void Instruction::setHasNoNaNs(bool B) {
558 assert(isa<FPMathOperator>(this) && "setting fast-math flag on invalid op");
559 cast<FPMathOperator>(this)->setHasNoNaNs(B);
560}
561
562void Instruction::setHasNoInfs(bool B) {
563 assert(isa<FPMathOperator>(this) && "setting fast-math flag on invalid op");
564 cast<FPMathOperator>(this)->setHasNoInfs(B);
565}
566
567void Instruction::setHasNoSignedZeros(bool B) {
568 assert(isa<FPMathOperator>(this) && "setting fast-math flag on invalid op");
569 cast<FPMathOperator>(this)->setHasNoSignedZeros(B);
570}
571
572void Instruction::setHasAllowReciprocal(bool B) {
573 assert(isa<FPMathOperator>(this) && "setting fast-math flag on invalid op");
574 cast<FPMathOperator>(this)->setHasAllowReciprocal(B);
575}
576
577void Instruction::setHasAllowContract(bool B) {
578 assert(isa<FPMathOperator>(this) && "setting fast-math flag on invalid op");
579 cast<FPMathOperator>(this)->setHasAllowContract(B);
580}
581
582void Instruction::setHasApproxFunc(bool B) {
583 assert(isa<FPMathOperator>(this) && "setting fast-math flag on invalid op");
584 cast<FPMathOperator>(this)->setHasApproxFunc(B);
585}
586
587void Instruction::setFastMathFlags(FastMathFlags FMF) {
588 assert(isa<FPMathOperator>(this) && "setting fast-math flag on invalid op");
589 cast<FPMathOperator>(this)->setFastMathFlags(FMF);
590}
591
592void Instruction::copyFastMathFlags(FastMathFlags FMF) {
593 assert(isa<FPMathOperator>(this) && "copying fast-math flag on invalid op");
594 cast<FPMathOperator>(this)->copyFastMathFlags(FMF);
595}
596
597bool Instruction::isFast() const {
598 assert(isa<FPMathOperator>(this) && "getting fast-math flag on invalid op");
599 return cast<FPMathOperator>(this)->isFast();
600}
601
602bool Instruction::hasAllowReassoc() const {
603 assert(isa<FPMathOperator>(this) && "getting fast-math flag on invalid op");
604 return cast<FPMathOperator>(this)->hasAllowReassoc();
605}
606
607bool Instruction::hasNoNaNs() const {
608 assert(isa<FPMathOperator>(this) && "getting fast-math flag on invalid op");
609 return cast<FPMathOperator>(this)->hasNoNaNs();
610}
611
612bool Instruction::hasNoInfs() const {
613 assert(isa<FPMathOperator>(this) && "getting fast-math flag on invalid op");
614 return cast<FPMathOperator>(this)->hasNoInfs();
615}
616
617bool Instruction::hasNoSignedZeros() const {
618 assert(isa<FPMathOperator>(this) && "getting fast-math flag on invalid op");
619 return cast<FPMathOperator>(this)->hasNoSignedZeros();
620}
621
622bool Instruction::hasAllowReciprocal() const {
623 assert(isa<FPMathOperator>(this) && "getting fast-math flag on invalid op");
624 return cast<FPMathOperator>(this)->hasAllowReciprocal();
625}
626
627bool Instruction::hasAllowContract() const {
628 assert(isa<FPMathOperator>(this) && "getting fast-math flag on invalid op");
629 return cast<FPMathOperator>(this)->hasAllowContract();
630}
631
632bool Instruction::hasApproxFunc() const {
633 assert(isa<FPMathOperator>(this) && "getting fast-math flag on invalid op");
634 return cast<FPMathOperator>(this)->hasApproxFunc();
635}
636
637FastMathFlags Instruction::getFastMathFlags() const {
638 assert(isa<FPMathOperator>(this) && "getting fast-math flag on invalid op");
639 return cast<FPMathOperator>(this)->getFastMathFlags();
640}
641
642void Instruction::copyFastMathFlags(const Instruction *I) {
643 copyFastMathFlags(I->getFastMathFlags());
644}
645
646void Instruction::copyIRFlags(const Value *V, bool IncludeWrapFlags) {
647 // Copy the wrapping flags.
648 if (IncludeWrapFlags && isa<OverflowingBinaryOperator>(this)) {
649 if (auto *OB = dyn_cast<OverflowingBinaryOperator>(V)) {
650 setHasNoSignedWrap(OB->hasNoSignedWrap());
651 setHasNoUnsignedWrap(OB->hasNoUnsignedWrap());
652 }
653 }
654
655 if (auto *TI = dyn_cast<TruncInst>(V)) {
656 if (isa<TruncInst>(this)) {
657 setHasNoSignedWrap(TI->hasNoSignedWrap());
658 setHasNoUnsignedWrap(TI->hasNoUnsignedWrap());
659 }
660 }
661
662 // Copy the exact flag.
663 if (auto *PE = dyn_cast<PossiblyExactOperator>(V))
664 if (isa<PossiblyExactOperator>(this))
665 setIsExact(PE->isExact());
666
667 if (auto *SrcPD = dyn_cast<PossiblyDisjointInst>(V))
668 if (auto *DestPD = dyn_cast<PossiblyDisjointInst>(this))
669 DestPD->setIsDisjoint(SrcPD->isDisjoint());
670
671 // Copy the fast-math flags.
672 if (auto *FP = dyn_cast<FPMathOperator>(V))
673 if (isa<FPMathOperator>(this))
674 copyFastMathFlags(FP->getFastMathFlags());
675
676 if (auto *SrcGEP = dyn_cast<GetElementPtrInst>(V))
677 if (auto *DestGEP = dyn_cast<GetElementPtrInst>(this))
678 DestGEP->setNoWrapFlags(SrcGEP->getNoWrapFlags() |
679 DestGEP->getNoWrapFlags());
680
681 if (auto *NNI = dyn_cast<PossiblyNonNegInst>(V))
682 if (isa<PossiblyNonNegInst>(this))
683 setNonNeg(NNI->hasNonNeg());
684
685 if (auto *SrcICmp = dyn_cast<ICmpInst>(V))
686 if (auto *DestICmp = dyn_cast<ICmpInst>(this))
687 DestICmp->setSameSign(SrcICmp->hasSameSign());
688}
689
690void Instruction::andIRFlags(const Value *V) {
691 if (auto *OB = dyn_cast<OverflowingBinaryOperator>(V)) {
692 if (isa<OverflowingBinaryOperator>(this)) {
693 setHasNoSignedWrap(hasNoSignedWrap() && OB->hasNoSignedWrap());
694 setHasNoUnsignedWrap(hasNoUnsignedWrap() && OB->hasNoUnsignedWrap());
695 }
696 }
697
698 if (auto *TI = dyn_cast<TruncInst>(V)) {
699 if (isa<TruncInst>(this)) {
700 setHasNoSignedWrap(hasNoSignedWrap() && TI->hasNoSignedWrap());
701 setHasNoUnsignedWrap(hasNoUnsignedWrap() && TI->hasNoUnsignedWrap());
702 }
703 }
704
705 if (auto *PE = dyn_cast<PossiblyExactOperator>(V))
706 if (isa<PossiblyExactOperator>(this))
707 setIsExact(isExact() && PE->isExact());
708
709 if (auto *SrcPD = dyn_cast<PossiblyDisjointInst>(V))
710 if (auto *DestPD = dyn_cast<PossiblyDisjointInst>(this))
711 DestPD->setIsDisjoint(DestPD->isDisjoint() && SrcPD->isDisjoint());
712
713 if (auto *FP = dyn_cast<FPMathOperator>(V)) {
714 if (isa<FPMathOperator>(this)) {
715 FastMathFlags FM = getFastMathFlags();
716 FM &= FP->getFastMathFlags();
717 copyFastMathFlags(FM);
718 }
719 }
720
721 if (auto *SrcGEP = dyn_cast<GetElementPtrInst>(V))
722 if (auto *DestGEP = dyn_cast<GetElementPtrInst>(this))
723 DestGEP->setNoWrapFlags(SrcGEP->getNoWrapFlags() &
724 DestGEP->getNoWrapFlags());
725
726 if (auto *NNI = dyn_cast<PossiblyNonNegInst>(V))
727 if (isa<PossiblyNonNegInst>(this))
728 setNonNeg(hasNonNeg() && NNI->hasNonNeg());
729
730 if (auto *SrcICmp = dyn_cast<ICmpInst>(V))
731 if (auto *DestICmp = dyn_cast<ICmpInst>(this))
732 DestICmp->setSameSign(DestICmp->hasSameSign() && SrcICmp->hasSameSign());
733}
734
735const char *Instruction::getOpcodeName(unsigned OpCode) {
736 switch (OpCode) {
737 // Terminators
738 case Ret: return "ret";
739 case Br: return "br";
740 case Switch: return "switch";
741 case IndirectBr: return "indirectbr";
742 case Invoke: return "invoke";
743 case Resume: return "resume";
744 case Unreachable: return "unreachable";
745 case CleanupRet: return "cleanupret";
746 case CatchRet: return "catchret";
747 case CatchPad: return "catchpad";
748 case CatchSwitch: return "catchswitch";
749 case CallBr: return "callbr";
750
751 // Standard unary operators...
752 case FNeg: return "fneg";
753
754 // Standard binary operators...
755 case Add: return "add";
756 case FAdd: return "fadd";
757 case Sub: return "sub";
758 case FSub: return "fsub";
759 case Mul: return "mul";
760 case FMul: return "fmul";
761 case UDiv: return "udiv";
762 case SDiv: return "sdiv";
763 case FDiv: return "fdiv";
764 case URem: return "urem";
765 case SRem: return "srem";
766 case FRem: return "frem";
767
768 // Logical operators...
769 case And: return "and";
770 case Or : return "or";
771 case Xor: return "xor";
772
773 // Memory instructions...
774 case Alloca: return "alloca";
775 case Load: return "load";
776 case Store: return "store";
777 case AtomicCmpXchg: return "cmpxchg";
778 case AtomicRMW: return "atomicrmw";
779 case Fence: return "fence";
780 case GetElementPtr: return "getelementptr";
781
782 // Convert instructions...
783 case Trunc: return "trunc";
784 case ZExt: return "zext";
785 case SExt: return "sext";
786 case FPTrunc: return "fptrunc";
787 case FPExt: return "fpext";
788 case FPToUI: return "fptoui";
789 case FPToSI: return "fptosi";
790 case UIToFP: return "uitofp";
791 case SIToFP: return "sitofp";
792 case IntToPtr: return "inttoptr";
793 case PtrToInt: return "ptrtoint";
794 case BitCast: return "bitcast";
795 case AddrSpaceCast: return "addrspacecast";
796
797 // Other instructions...
798 case ICmp: return "icmp";
799 case FCmp: return "fcmp";
800 case PHI: return "phi";
801 case Select: return "select";
802 case Call: return "call";
803 case Shl: return "shl";
804 case LShr: return "lshr";
805 case AShr: return "ashr";
806 case VAArg: return "va_arg";
807 case ExtractElement: return "extractelement";
808 case InsertElement: return "insertelement";
809 case ShuffleVector: return "shufflevector";
810 case ExtractValue: return "extractvalue";
811 case InsertValue: return "insertvalue";
812 case LandingPad: return "landingpad";
813 case CleanupPad: return "cleanuppad";
814 case Freeze: return "freeze";
815
816 default: return "<Invalid operator> ";
817 }
818}
819
820/// This must be kept in sync with FunctionComparator::cmpOperations in
821/// lib/Transforms/IPO/MergeFunctions.cpp.
822bool Instruction::hasSameSpecialState(const Instruction *I2,
823 bool IgnoreAlignment,
824 bool IntersectAttrs) const {
825 auto I1 = this;
826 assert(I1->getOpcode() == I2->getOpcode() &&
827 "Can not compare special state of different instructions");
828
829 auto CheckAttrsSame = [IntersectAttrs](const CallBase *CB0,
830 const CallBase *CB1) {
831 return IntersectAttrs
832 ? CB0->getAttributes()
833 .intersectWith(CB0->getContext(), CB1->getAttributes())
834 .has_value()
835 : CB0->getAttributes() == CB1->getAttributes();
836 };
837
838 if (const AllocaInst *AI = dyn_cast<AllocaInst>(I1))
839 return AI->getAllocatedType() == cast<AllocaInst>(I2)->getAllocatedType() &&
840 (AI->getAlign() == cast<AllocaInst>(I2)->getAlign() ||
841 IgnoreAlignment);
842 if (const LoadInst *LI = dyn_cast<LoadInst>(I1))
843 return LI->isVolatile() == cast<LoadInst>(I2)->isVolatile() &&
844 (LI->getAlign() == cast<LoadInst>(I2)->getAlign() ||
845 IgnoreAlignment) &&
846 LI->getOrdering() == cast<LoadInst>(I2)->getOrdering() &&
847 LI->getSyncScopeID() == cast<LoadInst>(I2)->getSyncScopeID();
848 if (const StoreInst *SI = dyn_cast<StoreInst>(I1))
849 return SI->isVolatile() == cast<StoreInst>(I2)->isVolatile() &&
850 (SI->getAlign() == cast<StoreInst>(I2)->getAlign() ||
851 IgnoreAlignment) &&
852 SI->getOrdering() == cast<StoreInst>(I2)->getOrdering() &&
853 SI->getSyncScopeID() == cast<StoreInst>(I2)->getSyncScopeID();
854 if (const CmpInst *CI = dyn_cast<CmpInst>(I1))
855 return CI->getPredicate() == cast<CmpInst>(I2)->getPredicate();
856 if (const CallInst *CI = dyn_cast<CallInst>(I1))
857 return CI->isTailCall() == cast<CallInst>(I2)->isTailCall() &&
858 CI->getCallingConv() == cast<CallInst>(I2)->getCallingConv() &&
859 CheckAttrsSame(CI, cast<CallInst>(I2)) &&
860 CI->hasIdenticalOperandBundleSchema(*cast<CallInst>(I2));
861 if (const InvokeInst *CI = dyn_cast<InvokeInst>(I1))
862 return CI->getCallingConv() == cast<InvokeInst>(I2)->getCallingConv() &&
863 CheckAttrsSame(CI, cast<InvokeInst>(I2)) &&
864 CI->hasIdenticalOperandBundleSchema(*cast<InvokeInst>(I2));
865 if (const CallBrInst *CI = dyn_cast<CallBrInst>(I1))
866 return CI->getCallingConv() == cast<CallBrInst>(I2)->getCallingConv() &&
867 CheckAttrsSame(CI, cast<CallBrInst>(I2)) &&
868 CI->hasIdenticalOperandBundleSchema(*cast<CallBrInst>(I2));
869 if (const InsertValueInst *IVI = dyn_cast<InsertValueInst>(I1))
870 return IVI->getIndices() == cast<InsertValueInst>(I2)->getIndices();
871 if (const ExtractValueInst *EVI = dyn_cast<ExtractValueInst>(I1))
872 return EVI->getIndices() == cast<ExtractValueInst>(I2)->getIndices();
873 if (const FenceInst *FI = dyn_cast<FenceInst>(I1))
874 return FI->getOrdering() == cast<FenceInst>(I2)->getOrdering() &&
875 FI->getSyncScopeID() == cast<FenceInst>(I2)->getSyncScopeID();
876 if (const AtomicCmpXchgInst *CXI = dyn_cast<AtomicCmpXchgInst>(I1))
877 return CXI->isVolatile() == cast<AtomicCmpXchgInst>(I2)->isVolatile() &&
878 CXI->isWeak() == cast<AtomicCmpXchgInst>(I2)->isWeak() &&
879 CXI->getSuccessOrdering() ==
880 cast<AtomicCmpXchgInst>(I2)->getSuccessOrdering() &&
881 CXI->getFailureOrdering() ==
882 cast<AtomicCmpXchgInst>(I2)->getFailureOrdering() &&
883 CXI->getSyncScopeID() ==
884 cast<AtomicCmpXchgInst>(I2)->getSyncScopeID();
885 if (const AtomicRMWInst *RMWI = dyn_cast<AtomicRMWInst>(I1))
886 return RMWI->getOperation() == cast<AtomicRMWInst>(I2)->getOperation() &&
887 RMWI->isVolatile() == cast<AtomicRMWInst>(I2)->isVolatile() &&
888 RMWI->getOrdering() == cast<AtomicRMWInst>(I2)->getOrdering() &&
889 RMWI->getSyncScopeID() == cast<AtomicRMWInst>(I2)->getSyncScopeID();
890 if (const ShuffleVectorInst *SVI = dyn_cast<ShuffleVectorInst>(I1))
891 return SVI->getShuffleMask() ==
892 cast<ShuffleVectorInst>(I2)->getShuffleMask();
893 if (const GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(I1))
894 return GEP->getSourceElementType() ==
895 cast<GetElementPtrInst>(I2)->getSourceElementType();
896
897 return true;
898}
899
900bool Instruction::isIdenticalTo(const Instruction *I) const {
901 return isIdenticalToWhenDefined(I) &&
902 SubclassOptionalData == I->SubclassOptionalData;
903}
904
905bool Instruction::isIdenticalToWhenDefined(const Instruction *I,
906 bool IntersectAttrs) const {
907 if (getOpcode() != I->getOpcode() ||
908 getNumOperands() != I->getNumOperands() || getType() != I->getType())
909 return false;
910
911 // If both instructions have no operands, they are identical.
912 if (getNumOperands() == 0 && I->getNumOperands() == 0)
913 return this->hasSameSpecialState(I, /*IgnoreAlignment=*/false,
914 IntersectAttrs);
915
916 // We have two instructions of identical opcode and #operands. Check to see
917 // if all operands are the same.
918 if (!std::equal(op_begin(), op_end(), I->op_begin()))
919 return false;
920
921 // WARNING: this logic must be kept in sync with EliminateDuplicatePHINodes()!
922 if (const PHINode *thisPHI = dyn_cast<PHINode>(this)) {
923 const PHINode *otherPHI = cast<PHINode>(I);
924 return std::equal(thisPHI->block_begin(), thisPHI->block_end(),
925 otherPHI->block_begin());
926 }
927
928 return this->hasSameSpecialState(I, /*IgnoreAlignment=*/false,
929 IntersectAttrs);
930}
931
932// Keep this in sync with FunctionComparator::cmpOperations in
933// lib/Transforms/IPO/MergeFunctions.cpp.
934bool Instruction::isSameOperationAs(const Instruction *I,
935 unsigned flags) const {
936 bool IgnoreAlignment = flags & CompareIgnoringAlignment;
937 bool UseScalarTypes = flags & CompareUsingScalarTypes;
938 bool IntersectAttrs = flags & CompareUsingIntersectedAttrs;
939
940 if (getOpcode() != I->getOpcode() ||
941 getNumOperands() != I->getNumOperands() ||
942 (UseScalarTypes ?
943 getType()->getScalarType() != I->getType()->getScalarType() :
944 getType() != I->getType()))
945 return false;
946
947 // We have two instructions of identical opcode and #operands. Check to see
948 // if all operands are the same type
949 for (unsigned i = 0, e = getNumOperands(); i != e; ++i)
950 if (UseScalarTypes ?
951 getOperand(i)->getType()->getScalarType() !=
952 I->getOperand(i)->getType()->getScalarType() :
953 getOperand(i)->getType() != I->getOperand(i)->getType())
954 return false;
955
956 return this->hasSameSpecialState(I, IgnoreAlignment, IntersectAttrs);
957}
958
959bool Instruction::isUsedOutsideOfBlock(const BasicBlock *BB) const {
960 for (const Use &U : uses()) {
961 // PHI nodes uses values in the corresponding predecessor block. For other
962 // instructions, just check to see whether the parent of the use matches up.
963 const Instruction *I = cast<Instruction>(U.getUser());
964 const PHINode *PN = dyn_cast<PHINode>(I);
965 if (!PN) {
966 if (I->getParent() != BB)
967 return true;
968 continue;
969 }
970
971 if (PN->getIncomingBlock(U) != BB)
972 return true;
973 }
974 return false;
975}
976
977bool Instruction::mayReadFromMemory() const {
978 switch (getOpcode()) {
979 default: return false;
980 case Instruction::VAArg:
981 case Instruction::Load:
982 case Instruction::Fence: // FIXME: refine definition of mayReadFromMemory
983 case Instruction::AtomicCmpXchg:
984 case Instruction::AtomicRMW:
985 case Instruction::CatchPad:
986 case Instruction::CatchRet:
987 return true;
988 case Instruction::Call:
989 case Instruction::Invoke:
990 case Instruction::CallBr:
991 return !cast<CallBase>(this)->onlyWritesMemory();
992 case Instruction::Store:
993 return !cast<StoreInst>(this)->isUnordered();
994 }
995}
996
997bool Instruction::mayWriteToMemory() const {
998 switch (getOpcode()) {
999 default: return false;
1000 case Instruction::Fence: // FIXME: refine definition of mayWriteToMemory
1001 case Instruction::Store:
1002 case Instruction::VAArg:
1003 case Instruction::AtomicCmpXchg:
1004 case Instruction::AtomicRMW:
1005 case Instruction::CatchPad:
1006 case Instruction::CatchRet:
1007 return true;
1008 case Instruction::Call:
1009 case Instruction::Invoke:
1010 case Instruction::CallBr:
1011 return !cast<CallBase>(this)->onlyReadsMemory();
1012 case Instruction::Load:
1013 return !cast<LoadInst>(this)->isUnordered();
1014 }
1015}
1016
1017bool Instruction::isAtomic() const {
1018 switch (getOpcode()) {
1019 default:
1020 return false;
1021 case Instruction::AtomicCmpXchg:
1022 case Instruction::AtomicRMW:
1023 case Instruction::Fence:
1024 return true;
1025 case Instruction::Load:
1026 return cast<LoadInst>(this)->getOrdering() != AtomicOrdering::NotAtomic;
1027 case Instruction::Store:
1028 return cast<StoreInst>(this)->getOrdering() != AtomicOrdering::NotAtomic;
1029 }
1030}
1031
1032bool Instruction::hasAtomicLoad() const {
1033 assert(isAtomic());
1034 switch (getOpcode()) {
1035 default:
1036 return false;
1037 case Instruction::AtomicCmpXchg:
1038 case Instruction::AtomicRMW:
1039 case Instruction::Load:
1040 return true;
1041 }
1042}
1043
1044bool Instruction::hasAtomicStore() const {
1045 assert(isAtomic());
1046 switch (getOpcode()) {
1047 default:
1048 return false;
1049 case Instruction::AtomicCmpXchg:
1050 case Instruction::AtomicRMW:
1051 case Instruction::Store:
1052 return true;
1053 }
1054}
1055
1056bool Instruction::isVolatile() const {
1057 switch (getOpcode()) {
1058 default:
1059 return false;
1060 case Instruction::AtomicRMW:
1061 return cast<AtomicRMWInst>(this)->isVolatile();
1062 case Instruction::Store:
1063 return cast<StoreInst>(this)->isVolatile();
1064 case Instruction::Load:
1065 return cast<LoadInst>(this)->isVolatile();
1066 case Instruction::AtomicCmpXchg:
1067 return cast<AtomicCmpXchgInst>(this)->isVolatile();
1068 case Instruction::Call:
1069 case Instruction::Invoke:
1070 // There are a very limited number of intrinsics with volatile flags.
1071 if (auto *II = dyn_cast<IntrinsicInst>(this)) {
1072 if (auto *MI = dyn_cast<MemIntrinsic>(II))
1073 return MI->isVolatile();
1074 switch (II->getIntrinsicID()) {
1075 default: break;
1076 case Intrinsic::matrix_column_major_load:
1077 return cast<ConstantInt>(II->getArgOperand(2))->isOne();
1078 case Intrinsic::matrix_column_major_store:
1079 return cast<ConstantInt>(II->getArgOperand(3))->isOne();
1080 }
1081 }
1082 return false;
1083 }
1084}
1085
1086Type *Instruction::getAccessType() const {
1087 switch (getOpcode()) {
1088 case Instruction::Store:
1089 return cast<StoreInst>(this)->getValueOperand()->getType();
1090 case Instruction::Load:
1091 case Instruction::AtomicRMW:
1092 return getType();
1093 case Instruction::AtomicCmpXchg:
1094 return cast<AtomicCmpXchgInst>(this)->getNewValOperand()->getType();
1095 case Instruction::Call:
1096 case Instruction::Invoke:
1097 if (const IntrinsicInst *II = dyn_cast<IntrinsicInst>(this)) {
1098 switch (II->getIntrinsicID()) {
1099 case Intrinsic::masked_load:
1100 case Intrinsic::masked_gather:
1101 case Intrinsic::masked_expandload:
1102 case Intrinsic::vp_load:
1103 case Intrinsic::vp_gather:
1104 case Intrinsic::experimental_vp_strided_load:
1105 return II->getType();
1106 case Intrinsic::masked_store:
1107 case Intrinsic::masked_scatter:
1108 case Intrinsic::masked_compressstore:
1109 case Intrinsic::vp_store:
1110 case Intrinsic::vp_scatter:
1111 case Intrinsic::experimental_vp_strided_store:
1112 return II->getOperand(0)->getType();
1113 default:
1114 break;
1115 }
1116 }
1117 }
1118
1119 return nullptr;
1120}
1121
1122static bool canUnwindPastLandingPad(const LandingPadInst *LP,
1123 bool IncludePhaseOneUnwind) {
1124 // Because phase one unwinding skips cleanup landingpads, we effectively
1125 // unwind past this frame, and callers need to have valid unwind info.
1126 if (LP->isCleanup())
1127 return IncludePhaseOneUnwind;
1128
1129 for (unsigned I = 0; I < LP->getNumClauses(); ++I) {
1130 Constant *Clause = LP->getClause(I);
1131 // catch ptr null catches all exceptions.
1132 if (LP->isCatch(I) && isa<ConstantPointerNull>(Clause))
1133 return false;
1134 // filter [0 x ptr] catches all exceptions.
1135 if (LP->isFilter(I) && Clause->getType()->getArrayNumElements() == 0)
1136 return false;
1137 }
1138
1139 // May catch only some subset of exceptions, in which case other exceptions
1140 // will continue unwinding.
1141 return true;
1142}
1143
1144bool Instruction::mayThrow(bool IncludePhaseOneUnwind) const {
1145 switch (getOpcode()) {
1146 case Instruction::Call:
1147 return !cast<CallInst>(this)->doesNotThrow();
1148 case Instruction::CleanupRet:
1149 return cast<CleanupReturnInst>(this)->unwindsToCaller();
1150 case Instruction::CatchSwitch:
1151 return cast<CatchSwitchInst>(this)->unwindsToCaller();
1152 case Instruction::Resume:
1153 return true;
1154 case Instruction::Invoke: {
1155 // Landingpads themselves don't unwind -- however, an invoke of a skipped
1156 // landingpad may continue unwinding.
1157 BasicBlock *UnwindDest = cast<InvokeInst>(this)->getUnwindDest();
1158 Instruction *Pad = UnwindDest->getFirstNonPHI();
1159 if (auto *LP = dyn_cast<LandingPadInst>(Pad))
1160 return canUnwindPastLandingPad(LP, IncludePhaseOneUnwind);
1161 return false;
1162 }
1163 case Instruction::CleanupPad:
1164 // Treat the same as cleanup landingpad.
1165 return IncludePhaseOneUnwind;
1166 default:
1167 return false;
1168 }
1169}
1170
1171bool Instruction::mayHaveSideEffects() const {
1172 return mayWriteToMemory() || mayThrow() || !willReturn();
1173}
1174
1175bool Instruction::isSafeToRemove() const {
1176 return (!isa<CallInst>(this) || !this->mayHaveSideEffects()) &&
1177 !this->isTerminator() && !this->isEHPad();
1178}
1179
1180bool Instruction::willReturn() const {
1181 // Volatile store isn't guaranteed to return; see LangRef.
1182 if (auto *SI = dyn_cast<StoreInst>(this))
1183 return !SI->isVolatile();
1184
1185 if (const auto *CB = dyn_cast<CallBase>(this))
1186 return CB->hasFnAttr(Attribute::WillReturn);
1187 return true;
1188}
1189
1190bool Instruction::isLifetimeStartOrEnd() const {
1191 auto *II = dyn_cast<IntrinsicInst>(this);
1192 if (!II)
1193 return false;
1194 Intrinsic::ID ID = II->getIntrinsicID();
1195 return ID == Intrinsic::lifetime_start || ID == Intrinsic::lifetime_end;
1196}
1197
1198bool Instruction::isLaunderOrStripInvariantGroup() const {
1199 auto *II = dyn_cast<IntrinsicInst>(this);
1200 if (!II)
1201 return false;
1202 Intrinsic::ID ID = II->getIntrinsicID();
1203 return ID == Intrinsic::launder_invariant_group ||
1204 ID == Intrinsic::strip_invariant_group;
1205}
1206
1207bool Instruction::isDebugOrPseudoInst() const {
1208 return isa<DbgInfoIntrinsic>(this) || isa<PseudoProbeInst>(this);
1209}
1210
1211const Instruction *
1212Instruction::getNextNonDebugInstruction(bool SkipPseudoOp) const {
1213 for (const Instruction *I = getNextNode(); I; I = I->getNextNode())
1214 if (!isa<DbgInfoIntrinsic>(I) && !(SkipPseudoOp && isa<PseudoProbeInst>(I)))
1215 return I;
1216 return nullptr;
1217}
1218
1219const Instruction *
1220Instruction::getPrevNonDebugInstruction(bool SkipPseudoOp) const {
1221 for (const Instruction *I = getPrevNode(); I; I = I->getPrevNode())
1222 if (!isa<DbgInfoIntrinsic>(I) &&
1223 !(SkipPseudoOp && isa<PseudoProbeInst>(I)) &&
1224 !(isa<IntrinsicInst>(I) &&
1225 cast<IntrinsicInst>(I)->getIntrinsicID() == Intrinsic::fake_use))
1226 return I;
1227 return nullptr;
1228}
1229
1230const DebugLoc &Instruction::getStableDebugLoc() const {
1231 if (isa<DbgInfoIntrinsic>(this))
1232 if (const Instruction *Next = getNextNonDebugInstruction())
1233 return Next->getDebugLoc();
1234 return getDebugLoc();
1235}
1236
1237bool Instruction::isAssociative() const {
1238 if (auto *II = dyn_cast<IntrinsicInst>(this))
1239 return II->isAssociative();
1240 unsigned Opcode = getOpcode();
1241 if (isAssociative(Opcode))
1242 return true;
1243
1244 switch (Opcode) {
1245 case FMul:
1246 case FAdd:
1247 return cast<FPMathOperator>(this)->hasAllowReassoc() &&
1248 cast<FPMathOperator>(this)->hasNoSignedZeros();
1249 default:
1250 return false;
1251 }
1252}
1253
1254bool Instruction::isCommutative() const {
1255 if (auto *II = dyn_cast<IntrinsicInst>(this))
1256 return II->isCommutative();
1257 // TODO: Should allow icmp/fcmp?
1258 return isCommutative(getOpcode());
1259}
1260
1261unsigned Instruction::getNumSuccessors() const {
1262 switch (getOpcode()) {
1263#define HANDLE_TERM_INST(N, OPC, CLASS) \
1264 case Instruction::OPC: \
1265 return static_cast<const CLASS *>(this)->getNumSuccessors();
1266#include "llvm/IR/Instruction.def"
1267 default:
1268 break;
1269 }
1270 llvm_unreachable("not a terminator");
1271}
1272
1273BasicBlock *Instruction::getSuccessor(unsigned idx) const {
1274 switch (getOpcode()) {
1275#define HANDLE_TERM_INST(N, OPC, CLASS) \
1276 case Instruction::OPC: \
1277 return static_cast<const CLASS *>(this)->getSuccessor(idx);
1278#include "llvm/IR/Instruction.def"
1279 default:
1280 break;
1281 }
1282 llvm_unreachable("not a terminator");
1283}
1284
1285void Instruction::setSuccessor(unsigned idx, BasicBlock *B) {
1286 switch (getOpcode()) {
1287#define HANDLE_TERM_INST(N, OPC, CLASS) \
1288 case Instruction::OPC: \
1289 return static_cast<CLASS *>(this)->setSuccessor(idx, B);
1290#include "llvm/IR/Instruction.def"
1291 default:
1292 break;
1293 }
1294 llvm_unreachable("not a terminator");
1295}
1296
1297void Instruction::replaceSuccessorWith(BasicBlock *OldBB, BasicBlock *NewBB) {
1298 for (unsigned Idx = 0, NumSuccessors = Instruction::getNumSuccessors();
1299 Idx != NumSuccessors; ++Idx)
1300 if (getSuccessor(Idx) == OldBB)
1301 setSuccessor(Idx, NewBB);
1302}
1303
1304Instruction *Instruction::cloneImpl() const {
1305 llvm_unreachable("Subclass of Instruction failed to implement cloneImpl");
1306}
1307
1308void Instruction::swapProfMetadata() {
1309 MDNode *ProfileData = getBranchWeightMDNode(*this);
1310 if (!ProfileData)
1311 return;
1312 unsigned FirstIdx = getBranchWeightOffset(ProfileData);
1313 if (ProfileData->getNumOperands() != 2 + FirstIdx)
1314 return;
1315
1316 unsigned SecondIdx = FirstIdx + 1;
1318 // If there are more weights past the second, we can't swap them
1319 if (ProfileData->getNumOperands() > SecondIdx + 1)
1320 return;
1321 for (unsigned Idx = 0; Idx < FirstIdx; ++Idx) {
1322 Ops.push_back(ProfileData->getOperand(Idx));
1323 }
1324 // Switch the order of the weights
1325 Ops.push_back(ProfileData->getOperand(SecondIdx));
1326 Ops.push_back(ProfileData->getOperand(FirstIdx));
1327 setMetadata(LLVMContext::MD_prof,
1328 MDNode::get(ProfileData->getContext(), Ops));
1329}
1330
1331void Instruction::copyMetadata(const Instruction &SrcInst,
1332 ArrayRef<unsigned> WL) {
1333 if (!SrcInst.hasMetadata())
1334 return;
1335
1337
1338 // Otherwise, enumerate and copy over metadata from the old instruction to the
1339 // new one.
1341 SrcInst.getAllMetadataOtherThanDebugLoc(TheMDs);
1342 for (const auto &MD : TheMDs) {
1343 if (WL.empty() || WLS.count(MD.first))
1344 setMetadata(MD.first, MD.second);
1345 }
1346 if (WL.empty() || WLS.count(LLVMContext::MD_dbg))
1347 setDebugLoc(SrcInst.getDebugLoc());
1348}
1349
1350Instruction *Instruction::clone() const {
1351 Instruction *New = nullptr;
1352 switch (getOpcode()) {
1353 default:
1354 llvm_unreachable("Unhandled Opcode.");
1355#define HANDLE_INST(num, opc, clas) \
1356 case Instruction::opc: \
1357 New = cast<clas>(this)->cloneImpl(); \
1358 break;
1359#include "llvm/IR/Instruction.def"
1360#undef HANDLE_INST
1361 }
1362
1363 New->SubclassOptionalData = SubclassOptionalData;
1364 New->copyMetadata(*this);
1365 return New;
1366}
static unsigned getIntrinsicID(const SDNode *N)
AMDGPU Register Bank Select
Rewrite undef for PHI
VarLocInsertPt getNextNode(const DbgRecord *DVR)
This file contains the simple types necessary to represent the attributes associated with functions a...
static const Function * getParent(const Value *V)
BlockVerifier::State From
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
This file contains the declarations for the subclasses of Constant, which represent the different fla...
Returns the sub type a function will return at a given Idx Should correspond to the result type of an ExtractValue instruction executed with just that one unsigned Idx
Given that RA is a live propagate it s liveness to any other values it uses(according to Uses). void DeadArgumentEliminationPass
This file defines the DenseSet and SmallDenseSet classes.
std::optional< std::vector< StOtherPiece > > Other
Definition: ELFYAML.cpp:1315
Hexagon Common GEP
IRTranslator LLVM IR MI
Module.h This file contains the declarations for the Module class.
static bool hasNoSignedWrap(BinaryOperator &I)
static bool hasNoUnsignedWrap(BinaryOperator &I)
#define I(x, y, z)
Definition: MD5.cpp:58
static DebugLoc getDebugLoc(MachineBasicBlock::instr_iterator FirstMI, MachineBasicBlock::instr_iterator LastMI)
Return the first found DebugLoc that has a DILocation, given a range of instructions.
This file provides utility for Memory Model Relaxation Annotations (MMRAs).
uint64_t IntrinsicInst * II
StandardInstrumentations SI(Mod->getContext(), Debug, VerifyEach)
llvm::cl::opt< bool > UseNewDbgInfoFormat
This file contains the declarations for profiling metadata utility functions.
static bool mayHaveSideEffects(MachineInstr &MI)
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
static bool isCommutative(Instruction *I)
This file contains some templates that are useful if you are working with the STL at all.
static bool canUnwindPastLandingPad(const LandingPadInst *LP, bool IncludePhaseOneUnwind)
static SymbolRef::Type getType(const Symbol *Sym)
Definition: TapiFile.cpp:39
static std::optional< unsigned > getOpcode(ArrayRef< VPValue * > Values)
Returns the opcode of Values or ~0 if they do not all agree.
Definition: VPlanSLP.cpp:191
static bool isAssociative(const COFFSection &Section)
BinaryOperator * Mul
an instruction to allocate memory on the stack
Definition: Instructions.h:63
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
Definition: ArrayRef.h:41
iterator end() const
Definition: ArrayRef.h:157
iterator begin() const
Definition: ArrayRef.h:156
bool empty() const
empty - Check if the array is empty.
Definition: ArrayRef.h:163
An instruction that atomically checks whether a specified value is in a memory location,...
Definition: Instructions.h:501
an instruction that atomically reads a memory location, combines it with another value,...
Definition: Instructions.h:704
AttributeSet getAttributes(unsigned Index) const
The attributes for the specified index are returned.
std::optional< AttributeList > intersectWith(LLVMContext &C, AttributeList Other) const
Try to intersect this AttributeList with Other.
AttributeMask & addAttribute(Attribute::AttrKind Val)
Add an attribute to the mask.
Definition: AttributeMask.h:44
bool hasAttribute(Attribute::AttrKind Kind) const
Return true if the attribute exists in this set.
LLVM Basic Block Representation.
Definition: BasicBlock.h:61
iterator end()
Definition: BasicBlock.h:461
void deleteTrailingDbgRecords()
Delete any trailing DbgRecords at the end of this block, see setTrailingDbgRecords.
const_iterator getFirstInsertionPt() const
Returns an iterator to the first instruction in this block that is suitable for inserting a non-PHI i...
Definition: BasicBlock.cpp:416
const Instruction * getFirstNonPHI() const
Returns a pointer to the first instruction in this block that is not a PHINode instruction.
Definition: BasicBlock.cpp:367
const Function * getParent() const
Return the enclosing method, or null if none.
Definition: BasicBlock.h:219
DbgMarker * getMarker(InstListType::iterator It)
Return the DbgMarker for the position given by It, so that DbgRecords can be inserted there.
InstListType::iterator iterator
Instruction iterators...
Definition: BasicBlock.h:177
bool IsNewDbgInfoFormat
Flag recording whether or not this block stores debug-info in the form of intrinsic instructions (fal...
Definition: BasicBlock.h:67
Base class for all callable instructions (InvokeInst and CallInst) Holds everything related to callin...
Definition: InstrTypes.h:1112
AttributeList getAttributes() const
Return the attributes for this call.
Definition: InstrTypes.h:1417
CallBr instruction, tracking function calls that may not return control but instead transfer it to a ...
This class represents a function call, abstracting a target machine's calling convention.
This class is the base class for the comparison instructions.
Definition: InstrTypes.h:661
This is an important base class in LLVM.
Definition: Constant.h:42
This class represents an Operation in the Expression.
A parsed version of the target data layout string in and methods for querying it.
Definition: DataLayout.h:63
Per-instruction record of debug-info.
Instruction * MarkedInstr
Link back to the Instruction that owns this marker.
void dropDbgRecords()
Erase all DbgRecords in this DbgMarker.
simple_ilist< DbgRecord > StoredDbgRecords
List of DbgRecords, the non-instruction equivalent of llvm.dbg.
Base class for non-instruction debug metadata records that have positions within IR.
A debug info location.
Definition: DebugLoc.h:33
This instruction extracts a struct member or array element value from an aggregate value.
Convenience struct for specifying and reasoning about fast-math flags.
Definition: FMF.h:20
An instruction for ordering other memory operations.
Definition: Instructions.h:424
an instruction for type-safe pointer arithmetic to access elements of arrays and structs
Definition: Instructions.h:933
This instruction inserts a struct field of array element value into an aggregate value.
DbgMarker * DebugMarker
Optional marker recording the position for debugging information that takes effect immediately before...
Definition: Instruction.h:84
const DebugLoc & getDebugLoc() const
Return the debug location for this node as a DebugLoc.
Definition: Instruction.h:475
bool hasMetadata() const
Return true if this instruction has any metadata attached to it.
Definition: Instruction.h:368
void getAllMetadataOtherThanDebugLoc(SmallVectorImpl< std::pair< unsigned, MDNode * > > &MDs) const
This does the same thing as getAllMetadata, except that it filters out the debug location.
Definition: Instruction.h:415
unsigned getOpcode() const
Returns a member of one of the enums like Instruction::Add.
Definition: Instruction.h:274
A wrapper class for inspecting calls to intrinsic functions.
Definition: IntrinsicInst.h:48
Invoke instruction.
The landingpad instruction holds all of the information necessary to generate correct exception handl...
bool isCleanup() const
Return 'true' if this landingpad instruction is a cleanup.
unsigned getNumClauses() const
Get the number of clauses for this landing pad.
bool isCatch(unsigned Idx) const
Return 'true' if the clause and index Idx is a catch clause.
bool isFilter(unsigned Idx) const
Return 'true' if the clause and index Idx is a filter clause.
Constant * getClause(unsigned Idx) const
Get the value of the clause at index Idx.
An instruction for reading from memory.
Definition: Instructions.h:176
Metadata node.
Definition: Metadata.h:1073
const MDOperand & getOperand(unsigned I) const
Definition: Metadata.h:1434
ArrayRef< MDOperand > operands() const
Definition: Metadata.h:1432
unsigned getNumOperands() const
Return number of MDNode operands.
Definition: Metadata.h:1440
LLVMContext & getContext() const
Definition: Metadata.h:1237
Tracking metadata reference owned by Metadata.
Definition: Metadata.h:895
A Module instance is used to store all the information related to an LLVM module.
Definition: Module.h:65
const_block_iterator block_begin() const
BasicBlock * getIncomingBlock(unsigned i) const
Return incoming basic block number i.
Instruction that can have a nneg flag (zext/uitofp).
Definition: InstrTypes.h:636
This instruction constructs a fixed permutation of two input vectors.
Implements a dense probed hash-table based set with some number of buckets stored inline.
Definition: DenseSet.h:298
void push_back(const T &Elt)
Definition: SmallVector.h:413
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
Definition: SmallVector.h:1196
An instruction for storing to memory.
Definition: Instructions.h:292
The instances of the Type class are immutable: once they are created, they are never changed.
Definition: Type.h:45
A Use represents the edge between a Value definition and its users.
Definition: Use.h:43
LLVM Value Representation.
Definition: Value.h:74
LLVMContext & getContext() const
All values hold a context through their type.
Definition: Value.cpp:1075
const ParentTy * getParent() const
Definition: ilist_node.h:32
self_iterator getIterator()
Definition: ilist_node.h:132
void splice(iterator where, iplist_impl &L2)
Definition: ilist.h:266
iterator insertAfter(iterator where, pointer New)
Definition: ilist.h:174
iterator insert(iterator where, pointer New)
Definition: ilist.h:165
A range adaptor for a pair of iterators.
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
bool mayThrow(const MachineInstr &MI)
@ OB
OB - OneByte - Set if this instruction has a one byte opcode.
Definition: X86BaseInfo.h:732
@ Switch
The "resume-switch" lowering, where there are separate resume and destroy functions that are shared b...
constexpr double e
Definition: MathExtras.h:48
const_iterator end(StringRef path LLVM_LIFETIME_BOUND)
Get end iterator over path.
Definition: Path.cpp:235
This is an optimization pass for GlobalISel generic memory operations.
Definition: AddressRanges.h:18
auto drop_begin(T &&RangeOrContainer, size_t N=1)
Return a range covering RangeOrContainer with the first N elements excluded.
Definition: STLExtras.h:329
unsigned getBranchWeightOffset(const MDNode *ProfileData)
Return the offset to the first branch weight data.
MDNode * getBranchWeightMDNode(const Instruction &I)
Get the branch weights metadata node.
bool any_of(R &&range, UnaryPredicate P)
Provide wrappers to std::any_of which take ranges instead of having to pass begin/end explicitly.
Definition: STLExtras.h:1746
iterator_range< simple_ilist< DbgRecord >::iterator > getDbgRecordRange(DbgMarker *DebugMarker)
Inline helper to return a range of DbgRecords attached to a marker.
@ Or
Bitwise or logical OR of integers.
@ Xor
Bitwise or logical XOR of integers.
@ FMul
Product of floats.
@ And
Bitwise or logical AND of integers.
@ FAdd
Sum of floats.
Summary of memprof metadata on allocations.