LLVM 23.0.0git
MachineInstr.cpp
Go to the documentation of this file.
1//===- lib/CodeGen/MachineInstr.cpp ---------------------------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// Methods common to all machine instructions.
10//
11//===----------------------------------------------------------------------===//
12
14#include "llvm/ADT/ArrayRef.h"
15#include "llvm/ADT/Hashing.h"
16#include "llvm/ADT/STLExtras.h"
38#include "llvm/IR/Constants.h"
40#include "llvm/IR/DebugLoc.h"
41#include "llvm/IR/Function.h"
42#include "llvm/IR/InlineAsm.h"
44#include "llvm/IR/LLVMContext.h"
45#include "llvm/IR/Metadata.h"
46#include "llvm/IR/Module.h"
48#include "llvm/IR/Operator.h"
49#include "llvm/MC/MCInstrDesc.h"
53#include "llvm/Support/Debug.h"
58#include <algorithm>
59#include <cassert>
60#include <cstdint>
61#include <cstring>
62#include <utility>
63
64using namespace llvm;
65
66static cl::opt<bool>
67 PrintMIAddrs("print-mi-addrs", cl::Hidden,
68 cl::desc("Print addresses of MachineInstrs when dumping"));
69
71 if (const MachineBasicBlock *MBB = MI.getParent())
72 if (const MachineFunction *MF = MBB->getParent())
73 return MF;
74 return nullptr;
75}
76
77// Try to crawl up to the machine function and get TRI/MRI/TII from it.
79 const TargetRegisterInfo *&TRI,
81 const TargetInstrInfo *&TII) {
82
83 if (const MachineFunction *MF = getMFIfAvailable(MI)) {
84 TRI = MF->getSubtarget().getRegisterInfo();
85 MRI = &MF->getRegInfo();
86 TII = MF->getSubtarget().getInstrInfo();
87 }
88}
89
91 for (MCPhysReg ImpDef : MCID->implicit_defs())
92 addOperand(MF, MachineOperand::CreateReg(ImpDef, true, true));
93 for (MCPhysReg ImpUse : MCID->implicit_uses())
94 addOperand(MF, MachineOperand::CreateReg(ImpUse, false, true));
95}
96
97/// MachineInstr ctor - This constructor creates a MachineInstr and adds the
98/// implicit operands. It reserves space for the number of operands specified by
99/// the MCInstrDesc.
100MachineInstr::MachineInstr(MachineFunction &MF, const MCInstrDesc &TID,
101 DebugLoc DL, bool NoImp)
102 : MCID(&TID), NumOperands(0), Flags(0), AsmPrinterFlags(0),
103 DbgLoc(std::move(DL)), DebugInstrNum(0), Opcode(TID.Opcode) {
104 assert(DbgLoc.hasTrivialDestructor() && "Expected trivial destructor");
105
106 // Reserve space for the expected number of operands.
107 if (unsigned NumOps = MCID->getNumOperands() + MCID->implicit_defs().size() +
108 MCID->implicit_uses().size()) {
109 CapOperands = OperandCapacity::get(NumOps);
110 Operands = MF.allocateOperandArray(CapOperands);
111 }
112
113 if (!NoImp)
115}
116
117/// MachineInstr ctor - Copies MachineInstr arg exactly.
118/// Does not copy the number from debug instruction numbering, to preserve
119/// uniqueness.
120MachineInstr::MachineInstr(MachineFunction &MF, const MachineInstr &MI)
121 : MCID(&MI.getDesc()), NumOperands(0), Flags(0), AsmPrinterFlags(0),
122 Info(MI.Info), DbgLoc(MI.getDebugLoc()), DebugInstrNum(0),
123 Opcode(MI.getOpcode()) {
124 assert(DbgLoc.hasTrivialDestructor() && "Expected trivial destructor");
125
126 CapOperands = OperandCapacity::get(MI.getNumOperands());
127 Operands = MF.allocateOperandArray(CapOperands);
128
129 // Copy operands.
130 for (const MachineOperand &MO : MI.operands())
131 addOperand(MF, MO);
132
133 // Replicate ties between the operands, which addOperand was not
134 // able to do reliably.
135 for (unsigned i = 0, e = getNumOperands(); i < e; ++i) {
136 MachineOperand &NewMO = getOperand(i);
137 const MachineOperand &OrigMO = MI.getOperand(i);
138 NewMO.TiedTo = OrigMO.TiedTo;
139 }
140
141 // Copy all the sensible flags.
142 setFlags(MI.Flags);
143}
144
146 if (getParent())
147 getMF()->handleChangeDesc(*this, TID);
148 MCID = &TID;
149 Opcode = TID.Opcode;
150}
151
152void MachineInstr::moveBefore(MachineInstr *MovePos) {
153 MovePos->getParent()->splice(MovePos, getParent(), getIterator());
154}
155
156/// getRegInfo - If this instruction is embedded into a MachineFunction,
157/// return the MachineRegisterInfo object for the current function, otherwise
158/// return null.
159MachineRegisterInfo *MachineInstr::getRegInfo() {
161 return &MBB->getParent()->getRegInfo();
162 return nullptr;
163}
164
165const MachineRegisterInfo *MachineInstr::getRegInfo() const {
166 if (const MachineBasicBlock *MBB = getParent())
167 return &MBB->getParent()->getRegInfo();
168 return nullptr;
169}
170
171void MachineInstr::removeRegOperandsFromUseLists(MachineRegisterInfo &MRI) {
172 for (MachineOperand &MO : operands())
173 if (MO.isReg())
174 MRI.removeRegOperandFromUseList(&MO);
175}
176
177void MachineInstr::addRegOperandsToUseLists(MachineRegisterInfo &MRI) {
178 for (MachineOperand &MO : operands())
179 if (MO.isReg())
180 MRI.addRegOperandToUseList(&MO);
181}
182
185 assert(MBB && "Use MachineInstrBuilder to add operands to dangling instrs");
186 MachineFunction *MF = MBB->getParent();
187 assert(MF && "Use MachineInstrBuilder to add operands to dangling instrs");
188 addOperand(*MF, Op);
189}
190
191/// Move NumOps MachineOperands from Src to Dst, with support for overlapping
192/// ranges. If MRI is non-null also update use-def chains.
194 unsigned NumOps, MachineRegisterInfo *MRI) {
195 if (MRI)
196 return MRI->moveOperands(Dst, Src, NumOps);
197 // MachineOperand is a trivially copyable type so we can just use memmove.
198 assert(Dst && Src && "Unknown operands");
199 std::memmove(Dst, Src, NumOps * sizeof(MachineOperand));
200}
201
202/// addOperand - Add the specified operand to the instruction. If it is an
203/// implicit operand, it is added to the end of the operand list. If it is
204/// an explicit operand it is added at the end of the explicit operand list
205/// (before the first implicit operand).
207 assert(isUInt<LLVM_MI_NUMOPERANDS_BITS>(NumOperands + 1) &&
208 "Cannot add more operands.");
209 assert(MCID && "Cannot add operands before providing an instr descriptor");
210
211 // Check if we're adding one of our existing operands.
212 if (&Op >= Operands && &Op < Operands + NumOperands) {
213 // This is unusual: MI->addOperand(MI->getOperand(i)).
214 // If adding Op requires reallocating or moving existing operands around,
215 // the Op reference could go stale. Support it by copying Op.
216 MachineOperand CopyOp(Op);
217 return addOperand(MF, CopyOp);
218 }
219
220 // Find the insert location for the new operand. Implicit registers go at
221 // the end, everything else goes before the implicit regs.
222 //
223 // FIXME: Allow mixed explicit and implicit operands on inline asm.
224 // InstrEmitter::EmitSpecialNode() is marking inline asm clobbers as
225 // implicit-defs, but they must not be moved around. See the FIXME in
226 // InstrEmitter.cpp.
227 unsigned OpNo = getNumOperands();
228 bool isImpReg = Op.isReg() && Op.isImplicit();
229 if (!isImpReg && !isInlineAsm()) {
230 while (OpNo && Operands[OpNo-1].isReg() && Operands[OpNo-1].isImplicit()) {
231 --OpNo;
232 assert(!Operands[OpNo].isTied() && "Cannot move tied operands");
233 }
234 }
235
236 // OpNo now points as the desired insertion point. Unless this is a variadic
237 // instruction, only implicit regs are allowed beyond MCID->getNumOperands().
238 // RegMask operands go between the explicit and implicit operands.
239 MachineRegisterInfo *MRI = getRegInfo();
240
241 // Determine if the Operands array needs to be reallocated.
242 // Save the old capacity and operand array.
243 OperandCapacity OldCap = CapOperands;
244 MachineOperand *OldOperands = Operands;
245 if (!OldOperands || OldCap.getSize() == getNumOperands()) {
246 CapOperands = OldOperands ? OldCap.getNext() : OldCap.get(1);
247 Operands = MF.allocateOperandArray(CapOperands);
248 // Move the operands before the insertion point.
249 if (OpNo)
250 moveOperands(Operands, OldOperands, OpNo, MRI);
251 }
252
253 // Move the operands following the insertion point.
254 if (OpNo != NumOperands)
255 moveOperands(Operands + OpNo + 1, OldOperands + OpNo, NumOperands - OpNo,
256 MRI);
257 ++NumOperands;
258
259 // Deallocate the old operand array.
260 if (OldOperands != Operands && OldOperands)
261 MF.deallocateOperandArray(OldCap, OldOperands);
262
263 // Copy Op into place. It still needs to be inserted into the MRI use lists.
264 MachineOperand *NewMO = new (Operands + OpNo) MachineOperand(Op);
265 NewMO->ParentMI = this;
266
267 // When adding a register operand, tell MRI about it.
268 if (NewMO->isReg()) {
269 // Ensure isOnRegUseList() returns false, regardless of Op's status.
270 NewMO->Contents.Reg.Prev = nullptr;
271 // Ignore existing ties. This is not a property that can be copied.
272 NewMO->TiedTo = 0;
273 // Add the new operand to MRI, but only for instructions in an MBB.
274 if (MRI)
275 MRI->addRegOperandToUseList(NewMO);
276 // The MCID operand information isn't accurate until we start adding
277 // explicit operands. The implicit operands are added first, then the
278 // explicits are inserted before them.
279 if (!isImpReg) {
280 // Tie uses to defs as indicated in MCInstrDesc.
281 if (NewMO->isUse()) {
282 int DefIdx = MCID->getOperandConstraint(OpNo, MCOI::TIED_TO);
283 if (DefIdx != -1)
284 tieOperands(DefIdx, OpNo);
285 }
286 // If the register operand is flagged as early, mark the operand as such.
287 if (MCID->getOperandConstraint(OpNo, MCOI::EARLY_CLOBBER) != -1)
288 NewMO->setIsEarlyClobber(true);
289 }
290 // Ensure debug instructions set debug flag on register uses.
291 if (NewMO->isUse() && isDebugInstr())
292 NewMO->setIsDebug();
293 }
294}
295
296void MachineInstr::removeOperand(unsigned OpNo) {
297 assert(OpNo < getNumOperands() && "Invalid operand number");
298 untieRegOperand(OpNo);
299
300#ifndef NDEBUG
301 // Moving tied operands would break the ties.
302 for (unsigned i = OpNo + 1, e = getNumOperands(); i != e; ++i)
303 if (Operands[i].isReg())
304 assert(!Operands[i].isTied() && "Cannot move tied operands");
305#endif
306
307 MachineRegisterInfo *MRI = getRegInfo();
308 if (MRI && Operands[OpNo].isReg())
309 MRI->removeRegOperandFromUseList(Operands + OpNo);
310
311 // Don't call the MachineOperand destructor. A lot of this code depends on
312 // MachineOperand having a trivial destructor anyway, and adding a call here
313 // wouldn't make it 'destructor-correct'.
314
315 if (unsigned N = NumOperands - 1 - OpNo)
316 moveOperands(Operands + OpNo, Operands + OpNo + 1, N, MRI);
317 --NumOperands;
318}
319
320void MachineInstr::setExtraInfo(MachineFunction &MF,
322 MCSymbol *PreInstrSymbol,
323 MCSymbol *PostInstrSymbol,
324 MDNode *HeapAllocMarker, MDNode *PCSections,
325 uint32_t CFIType, MDNode *MMRAs, Value *DS) {
326 bool HasPreInstrSymbol = PreInstrSymbol != nullptr;
327 bool HasPostInstrSymbol = PostInstrSymbol != nullptr;
328 bool HasHeapAllocMarker = HeapAllocMarker != nullptr;
329 bool HasPCSections = PCSections != nullptr;
330 bool HasCFIType = CFIType != 0;
331 bool HasMMRAs = MMRAs != nullptr;
332 bool HasDS = DS != nullptr;
333 int NumPointers = MMOs.size() + HasPreInstrSymbol + HasPostInstrSymbol +
334 HasHeapAllocMarker + HasPCSections + HasCFIType + HasMMRAs +
335 HasDS;
336
337 // Drop all extra info if there is none.
338 if (NumPointers <= 0) {
339 Info.clear();
340 return;
341 }
342
343 // If more than one pointer, then store out of line. Store heap alloc markers
344 // out of line because PointerSumType cannot hold more than 4 tag types with
345 // 32-bit pointers.
346 // FIXME: Maybe we should make the symbols in the extra info mutable?
347 else if (NumPointers > 1 || HasMMRAs || HasHeapAllocMarker || HasPCSections ||
348 HasCFIType || HasDS) {
349 Info.set<EIIK_OutOfLine>(
350 MF.createMIExtraInfo(MMOs, PreInstrSymbol, PostInstrSymbol,
351 HeapAllocMarker, PCSections, CFIType, MMRAs, DS));
352 return;
353 }
354
355 // Otherwise store the single pointer inline.
356 if (HasPreInstrSymbol)
357 Info.set<EIIK_PreInstrSymbol>(PreInstrSymbol);
358 else if (HasPostInstrSymbol)
359 Info.set<EIIK_PostInstrSymbol>(PostInstrSymbol);
360 else
361 Info.set<EIIK_MMO>(MMOs[0]);
362}
363
372
375 if (MMOs.empty()) {
376 dropMemRefs(MF);
377 return;
378 }
379
380 setExtraInfo(MF, MMOs, getPreInstrSymbol(), getPostInstrSymbol(),
383}
384
392
393void MachineInstr::cloneMemRefs(MachineFunction &MF, const MachineInstr &MI) {
394 if (this == &MI)
395 // Nothing to do for a self-clone!
396 return;
397
398 assert(&MF == MI.getMF() &&
399 "Invalid machine functions when cloning memory refrences!");
400 // See if we can just steal the extra info already allocated for the
401 // instruction. We can do this whenever the pre- and post-instruction symbols
402 // are the same (including null).
403 if (getPreInstrSymbol() == MI.getPreInstrSymbol() &&
404 getPostInstrSymbol() == MI.getPostInstrSymbol() &&
405 getHeapAllocMarker() == MI.getHeapAllocMarker() &&
406 getPCSections() == MI.getPCSections() && getMMRAMetadata() &&
407 MI.getMMRAMetadata()) {
408 Info = MI.Info;
409 return;
410 }
411
412 // Otherwise, fall back on a copy-based clone.
413 setMemRefs(MF, MI.memoperands());
414}
415
416/// Check to see if the MMOs pointed to by the two MemRefs arrays are
417/// identical.
420 if (LHS.size() != RHS.size())
421 return false;
422
423 auto LHSPointees = make_pointee_range(LHS);
424 auto RHSPointees = make_pointee_range(RHS);
425 return std::equal(LHSPointees.begin(), LHSPointees.end(),
426 RHSPointees.begin());
427}
428
431 // Try handling easy numbers of MIs with simpler mechanisms.
432 if (MIs.empty()) {
433 dropMemRefs(MF);
434 return;
435 }
436 if (MIs.size() == 1) {
437 cloneMemRefs(MF, *MIs[0]);
438 return;
439 }
440 // Because an empty memoperands list provides *no* information and must be
441 // handled conservatively (assuming the instruction can do anything), the only
442 // way to merge with it is to drop all other memoperands.
443 if (MIs[0]->memoperands_empty()) {
444 dropMemRefs(MF);
445 return;
446 }
447
448 // Handle the general case.
450 // Start with the first instruction.
451 assert(&MF == MIs[0]->getMF() &&
452 "Invalid machine functions when cloning memory references!");
453 MergedMMOs.append(MIs[0]->memoperands_begin(), MIs[0]->memoperands_end());
454 // Now walk all the other instructions and accumulate any different MMOs.
455 for (const MachineInstr &MI : make_pointee_range(MIs.slice(1))) {
456 assert(&MF == MI.getMF() &&
457 "Invalid machine functions when cloning memory references!");
458
459 // Skip MIs with identical operands to the first. This is a somewhat
460 // arbitrary hack but will catch common cases without being quadratic.
461 // TODO: We could fully implement merge semantics here if needed.
462 if (hasIdenticalMMOs(MIs[0]->memoperands(), MI.memoperands()))
463 continue;
464
465 // Because an empty memoperands list provides *no* information and must be
466 // handled conservatively (assuming the instruction can do anything), the
467 // only way to merge with it is to drop all other memoperands.
468 if (MI.memoperands_empty()) {
469 dropMemRefs(MF);
470 return;
471 }
472
473 // Otherwise accumulate these into our temporary buffer of the merged state.
474 MergedMMOs.append(MI.memoperands_begin(), MI.memoperands_end());
475 }
476
477 setMemRefs(MF, MergedMMOs);
478}
479
481 // Do nothing if old and new symbols are the same.
482 if (Symbol == getPreInstrSymbol())
483 return;
484
485 // If there was only one symbol and we're removing it, just clear info.
486 if (!Symbol && Info.is<EIIK_PreInstrSymbol>()) {
487 Info.clear();
488 return;
489 }
490
491 setExtraInfo(MF, memoperands(), Symbol, getPostInstrSymbol(),
494}
495
497 // Do nothing if old and new symbols are the same.
498 if (Symbol == getPostInstrSymbol())
499 return;
500
501 // If there was only one symbol and we're removing it, just clear info.
502 if (!Symbol && Info.is<EIIK_PostInstrSymbol>()) {
503 Info.clear();
504 return;
505 }
506
507 setExtraInfo(MF, memoperands(), getPreInstrSymbol(), Symbol,
510}
511
513 // Do nothing if old and new symbols are the same.
514 if (Marker == getHeapAllocMarker())
515 return;
516
517 setExtraInfo(MF, memoperands(), getPreInstrSymbol(), getPostInstrSymbol(),
520}
521
523 // Do nothing if old and new symbols are the same.
524 if (PCSections == getPCSections())
525 return;
526
527 setExtraInfo(MF, memoperands(), getPreInstrSymbol(), getPostInstrSymbol(),
528 getHeapAllocMarker(), PCSections, getCFIType(),
530}
531
533 // Do nothing if old and new types are the same.
534 if (Type == getCFIType())
535 return;
536
537 setExtraInfo(MF, memoperands(), getPreInstrSymbol(), getPostInstrSymbol(),
540}
541
543 // Do nothing if old and new symbols are the same.
544 if (MMRAs == getMMRAMetadata())
545 return;
546
547 setExtraInfo(MF, memoperands(), getPreInstrSymbol(), getPostInstrSymbol(),
550}
551
553 // Do nothing if old and new symbols are the same.
554 if (DS == getDeactivationSymbol())
555 return;
556
557 setExtraInfo(MF, memoperands(), getPreInstrSymbol(), getPostInstrSymbol(),
559 getMMRAMetadata(), DS);
560}
561
563 const MachineInstr &MI) {
564 if (this == &MI)
565 // Nothing to do for a self-clone!
566 return;
567
568 assert(&MF == MI.getMF() &&
569 "Invalid machine functions when cloning instruction symbols!");
570
571 setPreInstrSymbol(MF, MI.getPreInstrSymbol());
572 setPostInstrSymbol(MF, MI.getPostInstrSymbol());
573 setHeapAllocMarker(MF, MI.getHeapAllocMarker());
574 setPCSections(MF, MI.getPCSections());
575 setMMRAMetadata(MF, MI.getMMRAMetadata());
576}
577
578uint32_t MachineInstr::mergeFlagsWith(const MachineInstr &Other) const {
579 // For now, the just return the union of the flags. If the flags get more
580 // complicated over time, we might need more logic here.
581 return getFlags() | Other.getFlags();
582}
583
585 uint32_t MIFlags = 0;
586 // Copy the wrapping flags.
587 if (const OverflowingBinaryOperator *OB =
589 if (OB->hasNoSignedWrap())
591 if (OB->hasNoUnsignedWrap())
593 } else if (const TruncInst *TI = dyn_cast<TruncInst>(&I)) {
594 if (TI->hasNoSignedWrap())
596 if (TI->hasNoUnsignedWrap())
598 } else if (const GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(&I)) {
599 if (GEP->hasNoUnsignedSignedWrap())
601 if (GEP->hasNoUnsignedWrap())
603 if (GEP->isInBounds())
605 }
606
607 // Copy the nonneg flag.
609 if (PNI->hasNonNeg())
611 // Copy the disjoint flag.
612 } else if (const PossiblyDisjointInst *PD =
614 if (PD->isDisjoint())
616 }
617
618 // Copy the samesign flag.
619 if (const ICmpInst *ICmp = dyn_cast<ICmpInst>(&I))
620 if (ICmp->hasSameSign())
622
623 // Copy the exact flag.
625 if (PE->isExact())
627
628 // Copy the fast-math flags.
630 const FastMathFlags Flags = FP->getFastMathFlags();
631 if (Flags.noNaNs())
633 if (Flags.noInfs())
635 if (Flags.noSignedZeros())
637 if (Flags.allowReciprocal())
639 if (Flags.allowContract())
641 if (Flags.approxFunc())
643 if (Flags.allowReassoc())
645 }
646
647 if (I.getMetadata(LLVMContext::MD_unpredictable))
649
650 return MIFlags;
651}
652
656
657bool MachineInstr::hasPropertyInBundle(uint64_t Mask, QueryType Type) const {
658 assert(!isBundledWithPred() && "Must be called on bundle header");
660 if (MII->getDesc().getFlags() & Mask) {
661 if (Type == AnyInBundle)
662 return true;
663 } else {
664 if (Type == AllInBundle && !MII->isBundle())
665 return false;
666 }
667 // This was the last instruction in the bundle.
668 if (!MII->isBundledWithSucc())
669 return Type == AllInBundle;
670 }
671}
672
673bool MachineInstr::isIdenticalTo(const MachineInstr &Other,
674 MICheckType Check) const {
675 // If opcodes or number of operands are not the same then the two
676 // instructions are obviously not identical.
677 if (Other.getOpcode() != getOpcode() ||
678 Other.getNumOperands() != getNumOperands())
679 return false;
680
681 if (isBundle()) {
682 // We have passed the test above that both instructions have the same
683 // opcode, so we know that both instructions are bundles here. Let's compare
684 // MIs inside the bundle.
685 assert(Other.isBundle() && "Expected that both instructions are bundles.");
688 // Loop until we analysed the last intruction inside at least one of the
689 // bundles.
690 while (I1->isBundledWithSucc() && I2->isBundledWithSucc()) {
691 ++I1;
692 ++I2;
693 if (!I1->isIdenticalTo(*I2, Check))
694 return false;
695 }
696 // If we've reached the end of just one of the two bundles, but not both,
697 // the instructions are not identical.
698 if (I1->isBundledWithSucc() || I2->isBundledWithSucc())
699 return false;
700 }
701
702 // Check operands to make sure they match.
703 for (unsigned i = 0, e = getNumOperands(); i != e; ++i) {
704 const MachineOperand &MO = getOperand(i);
705 const MachineOperand &OMO = Other.getOperand(i);
706 if (!MO.isReg()) {
707 if (!MO.isIdenticalTo(OMO))
708 return false;
709 continue;
710 }
711
712 // Clients may or may not want to ignore defs when testing for equality.
713 // For example, machine CSE pass only cares about finding common
714 // subexpressions, so it's safe to ignore virtual register defs.
715 if (MO.isDef()) {
716 if (Check == IgnoreDefs)
717 continue;
718 else if (Check == IgnoreVRegDefs) {
719 if (!MO.getReg().isVirtual() || !OMO.getReg().isVirtual())
720 if (!MO.isIdenticalTo(OMO))
721 return false;
722 } else {
723 if (!MO.isIdenticalTo(OMO))
724 return false;
725 if (Check == CheckKillDead && MO.isDead() != OMO.isDead())
726 return false;
727 }
728 } else {
729 if (!MO.isIdenticalTo(OMO))
730 return false;
731 if (Check == CheckKillDead && MO.isKill() != OMO.isKill())
732 return false;
733 }
734 }
735 // If DebugLoc does not match then two debug instructions are not identical.
736 if (isDebugInstr())
737 if (getDebugLoc() && Other.getDebugLoc() &&
738 getDebugLoc() != Other.getDebugLoc())
739 return false;
740 // If pre- or post-instruction symbols do not match then the two instructions
741 // are not identical.
742 if (getPreInstrSymbol() != Other.getPreInstrSymbol() ||
743 getPostInstrSymbol() != Other.getPostInstrSymbol())
744 return false;
745 if (isCall()) {
746 // Call instructions with different CFI types are not identical.
747 if (getCFIType() != Other.getCFIType())
748 return false;
749 // Even if the call instructions have the same ops, they are not identical
750 // if they are for different globals (this may happen with indirect calls).
755 Other.getParent()->getParent()->tryGetCalledGlobal(&Other);
756 if (ThisCGI.Callee != OtherCGI.Callee ||
757 ThisCGI.TargetFlags != OtherCGI.TargetFlags)
758 return false;
759 }
760 }
761 if (getDeactivationSymbol() != Other.getDeactivationSymbol())
762 return false;
763
764 return true;
765}
766
767bool MachineInstr::isEquivalentDbgInstr(const MachineInstr &Other) const {
768 if (!isDebugValueLike() || !Other.isDebugValueLike())
769 return false;
770 if (getDebugLoc() != Other.getDebugLoc())
771 return false;
772 if (getDebugVariable() != Other.getDebugVariable())
773 return false;
774 if (getNumDebugOperands() != Other.getNumDebugOperands())
775 return false;
776 for (unsigned OpIdx = 0; OpIdx < getNumDebugOperands(); ++OpIdx)
777 if (!getDebugOperand(OpIdx).isIdenticalTo(Other.getDebugOperand(OpIdx)))
778 return false;
781 Other.getDebugExpression(), Other.isIndirectDebugValue()))
782 return false;
783 return true;
784}
785
787 return getParent()->getParent();
788}
789
791 assert(getParent() && "Not embedded in a basic block!");
792 return getParent()->remove(this);
793}
794
796 assert(getParent() && "Not embedded in a basic block!");
797 return getParent()->remove_instr(this);
798}
799
801 assert(getParent() && "Not embedded in a basic block!");
802 getParent()->erase(this);
803}
804
806 assert(getParent() && "Not embedded in a basic block!");
807 getParent()->erase_instr(this);
808}
809
811 if (!isCall(Type))
812 return false;
813 switch (getOpcode()) {
814 case TargetOpcode::PATCHPOINT:
815 case TargetOpcode::STACKMAP:
816 case TargetOpcode::STATEPOINT:
817 case TargetOpcode::FENTRY_CALL:
818 return false;
819 }
820 return true;
821}
822
828
829template <typename Operand, typename Instruction>
830static iterator_range<
831 filter_iterator<Operand *, std::function<bool(Operand &Op)>>>
833 std::function<bool(Operand & Op)> OpUsesReg(
834 [Reg](Operand &Op) { return Op.isReg() && Op.getReg() == Reg; });
835 return make_filter_range(MI->debug_operands(), OpUsesReg);
836}
837
839 std::function<bool(const MachineOperand &Op)>>>
844
850
852 unsigned NumOperands = MCID->getNumOperands();
853 if (!MCID->isVariadic())
854 return NumOperands;
855
856 for (const MachineOperand &MO : operands_impl().drop_front(NumOperands)) {
857 // The operands must always be in the following order:
858 // - explicit reg defs,
859 // - other explicit operands (reg uses, immediates, etc.),
860 // - implicit reg defs
861 // - implicit reg uses
862 if (MO.isReg() && MO.isImplicit())
863 break;
864 ++NumOperands;
865 }
866 return NumOperands;
867}
868
870 unsigned NumDefs = MCID->getNumDefs();
871 if (!MCID->isVariadic())
872 return NumDefs;
873
874 for (const MachineOperand &MO : operands_impl().drop_front(NumDefs)) {
875 if (!MO.isReg() || !MO.isDef() || MO.isImplicit())
876 break;
877 ++NumDefs;
878 }
879 return NumDefs;
880}
881
883 assert(!isBundledWithPred() && "MI is already bundled with its predecessor");
886 --Pred;
887 assert(!Pred->isBundledWithSucc() && "Inconsistent bundle flags");
888 Pred->setFlag(BundledSucc);
889}
890
892 assert(!isBundledWithSucc() && "MI is already bundled with its successor");
895 ++Succ;
896 assert(!Succ->isBundledWithPred() && "Inconsistent bundle flags");
897 Succ->setFlag(BundledPred);
898}
899
901 assert(isBundledWithPred() && "MI isn't bundled with its predecessor");
904 --Pred;
905 assert(Pred->isBundledWithSucc() && "Inconsistent bundle flags");
906 Pred->clearFlag(BundledSucc);
907}
908
910 assert(isBundledWithSucc() && "MI isn't bundled with its successor");
913 ++Succ;
914 assert(Succ->isBundledWithPred() && "Inconsistent bundle flags");
915 Succ->clearFlag(BundledPred);
916}
917
919 if (isInlineAsm()) {
920 unsigned ExtraInfo = getOperand(InlineAsm::MIOp_ExtraInfo).getImm();
921 if (ExtraInfo & InlineAsm::Extra_IsAlignStack)
922 return true;
923 }
924 return false;
925}
926
928 assert(isInlineAsm() && "getInlineAsmDialect() only works for inline asms!");
929 unsigned ExtraInfo = getOperand(InlineAsm::MIOp_ExtraInfo).getImm();
930 return InlineAsm::AsmDialect((ExtraInfo & InlineAsm::Extra_AsmDialect) != 0);
931}
932
934 unsigned *GroupNo) const {
935 assert(isInlineAsm() && "Expected an inline asm instruction");
936 assert(OpIdx < getNumOperands() && "OpIdx out of range");
937
938 // Ignore queries about the initial operands.
940 return -1;
941
942 unsigned Group = 0;
943 unsigned NumOps;
944 for (unsigned i = InlineAsm::MIOp_FirstOperand, e = getNumOperands(); i < e;
945 i += NumOps) {
946 const MachineOperand &FlagMO = getOperand(i);
947 // If we reach the implicit register operands, stop looking.
948 if (!FlagMO.isImm())
949 return -1;
950 const InlineAsm::Flag F(FlagMO.getImm());
951 NumOps = 1 + F.getNumOperandRegisters();
952 if (i + NumOps > OpIdx) {
953 if (GroupNo)
954 *GroupNo = Group;
955 return i;
956 }
957 ++Group;
958 }
959 return -1;
960}
961
963 assert(isDebugLabel() && "not a DBG_LABEL");
964 return cast<DILabel>(getOperand(0).getMetadata());
965}
966
968 assert((isDebugValueLike()) && "not a DBG_VALUE*");
969 unsigned VariableOp = isNonListDebugValue() ? 2 : 0;
970 return getOperand(VariableOp);
971}
972
974 assert((isDebugValueLike()) && "not a DBG_VALUE*");
975 unsigned VariableOp = isNonListDebugValue() ? 2 : 0;
976 return getOperand(VariableOp);
977}
978
982
984 assert((isDebugValueLike()) && "not a DBG_VALUE*");
985 unsigned ExpressionOp = isNonListDebugValue() ? 3 : 1;
986 return getOperand(ExpressionOp);
987}
988
990 assert((isDebugValueLike()) && "not a DBG_VALUE*");
991 unsigned ExpressionOp = isNonListDebugValue() ? 3 : 1;
992 return getOperand(ExpressionOp);
993}
994
998
1002
1005 const TargetInstrInfo *TII,
1006 const TargetRegisterInfo *TRI) const {
1007 assert(getParent() && "Can't have an MBB reference here!");
1008 assert(getMF() && "Can't have an MF reference here!");
1009 // Most opcodes have fixed constraints in their MCInstrDesc.
1010 if (!isInlineAsm())
1011 return TII->getRegClass(getDesc(), OpIdx);
1012
1013 if (!getOperand(OpIdx).isReg())
1014 return nullptr;
1015
1016 // For tied uses on inline asm, get the constraint from the def.
1017 unsigned DefIdx;
1018 if (getOperand(OpIdx).isUse() && isRegTiedToDefOperand(OpIdx, &DefIdx))
1019 OpIdx = DefIdx;
1020
1021 // Inline asm stores register class constraints in the flag word.
1022 int FlagIdx = findInlineAsmFlagIdx(OpIdx);
1023 if (FlagIdx < 0)
1024 return nullptr;
1025
1026 const InlineAsm::Flag F(getOperand(FlagIdx).getImm());
1027 unsigned RCID;
1028 if ((F.isRegUseKind() || F.isRegDefKind() || F.isRegDefEarlyClobberKind()) &&
1029 F.hasRegClassConstraint(RCID))
1030 return TRI->getRegClass(RCID);
1031
1032 // Assume that all registers in a memory operand are pointers.
1033 if (F.isMemKind())
1034 return TRI->getPointerRegClass();
1035
1036 return nullptr;
1037}
1038
1040 Register Reg, const TargetRegisterClass *CurRC, const TargetInstrInfo *TII,
1041 const TargetRegisterInfo *TRI, bool ExploreBundle) const {
1042 // Check every operands inside the bundle if we have
1043 // been asked to.
1044 if (ExploreBundle)
1045 for (ConstMIBundleOperands OpndIt(*this); OpndIt.isValid() && CurRC;
1046 ++OpndIt)
1047 CurRC = OpndIt->getParent()->getRegClassConstraintEffectForVRegImpl(
1048 OpndIt.getOperandNo(), Reg, CurRC, TII, TRI);
1049 else
1050 // Otherwise, just check the current operands.
1051 for (unsigned i = 0, e = NumOperands; i < e && CurRC; ++i)
1052 CurRC = getRegClassConstraintEffectForVRegImpl(i, Reg, CurRC, TII, TRI);
1053 return CurRC;
1054}
1055
1056const TargetRegisterClass *MachineInstr::getRegClassConstraintEffectForVRegImpl(
1057 unsigned OpIdx, Register Reg, const TargetRegisterClass *CurRC,
1058 const TargetInstrInfo *TII, const TargetRegisterInfo *TRI) const {
1059 assert(CurRC && "Invalid initial register class");
1060 // Check if Reg is constrained by some of its use/def from MI.
1061 const MachineOperand &MO = getOperand(OpIdx);
1062 if (!MO.isReg() || MO.getReg() != Reg)
1063 return CurRC;
1064 // If yes, accumulate the constraints through the operand.
1065 return getRegClassConstraintEffect(OpIdx, CurRC, TII, TRI);
1066}
1067
1069 unsigned OpIdx, const TargetRegisterClass *CurRC,
1070 const TargetInstrInfo *TII, const TargetRegisterInfo *TRI) const {
1072 const MachineOperand &MO = getOperand(OpIdx);
1073 assert(MO.isReg() &&
1074 "Cannot get register constraints for non-register operand");
1075 assert(CurRC && "Invalid initial register class");
1076 if (unsigned SubIdx = MO.getSubReg()) {
1077 if (OpRC)
1078 CurRC = TRI->getMatchingSuperRegClass(CurRC, OpRC, SubIdx);
1079 else
1080 CurRC = TRI->getSubClassWithSubReg(CurRC, SubIdx);
1081 } else if (OpRC)
1082 CurRC = TRI->getCommonSubClass(CurRC, OpRC);
1083 return CurRC;
1084}
1085
1086/// Return the number of instructions inside the MI bundle, not counting the
1087/// header instruction.
1090 unsigned Size = 0;
1091 while (I->isBundledWithSucc()) {
1092 ++Size;
1093 ++I;
1094 }
1095 return Size;
1096}
1097
1098/// Returns true if the MachineInstr has an implicit-use operand of exactly
1099/// the given register (not considering sub/super-registers).
1101 for (const MachineOperand &MO : implicit_operands()) {
1102 if (MO.isReg() && MO.isUse() && MO.getReg() == Reg)
1103 return true;
1104 }
1105 return false;
1106}
1107
1108/// findRegisterUseOperandIdx() - Returns the MachineOperand that is a use of
1109/// the specific register or -1 if it is not found. It further tightens
1110/// the search criteria to a use that kills the register if isKill is true.
1112 const TargetRegisterInfo *TRI,
1113 bool isKill) const {
1114 for (unsigned i = 0, e = getNumOperands(); i != e; ++i) {
1115 const MachineOperand &MO = getOperand(i);
1116 if (!MO.isReg() || !MO.isUse())
1117 continue;
1118 Register MOReg = MO.getReg();
1119 if (!MOReg)
1120 continue;
1121 if (MOReg == Reg || (TRI && Reg && MOReg && TRI->regsOverlap(MOReg, Reg)))
1122 if (!isKill || MO.isKill())
1123 return i;
1124 }
1125 return -1;
1126}
1127
1128/// readsWritesVirtualRegister - Return a pair of bools (reads, writes)
1129/// indicating if this instruction reads or writes Reg. This also considers
1130/// partial defines.
1131std::pair<bool,bool>
1134 bool PartDef = false; // Partial redefine.
1135 bool FullDef = false; // Full define.
1136 bool Use = false;
1137
1138 for (unsigned i = 0, e = getNumOperands(); i != e; ++i) {
1139 const MachineOperand &MO = getOperand(i);
1140 if (!MO.isReg() || MO.getReg() != Reg)
1141 continue;
1142 if (Ops)
1143 Ops->push_back(i);
1144 if (MO.isUse())
1145 Use |= !MO.isUndef();
1146 else if (MO.getSubReg() && !MO.isUndef())
1147 // A partial def undef doesn't count as reading the register.
1148 PartDef = true;
1149 else
1150 FullDef = true;
1151 }
1152 // A partial redefine uses Reg unless there is also a full define.
1153 return std::make_pair(Use || (PartDef && !FullDef), PartDef || FullDef);
1154}
1155
1156/// findRegisterDefOperandIdx() - Returns the operand index that is a def of
1157/// the specified register or -1 if it is not found. If isDead is true, defs
1158/// that are not dead are skipped. If TargetRegisterInfo is non-null, then it
1159/// also checks if there is a def of a super-register.
1161 const TargetRegisterInfo *TRI,
1162 bool isDead, bool Overlap) const {
1163 bool isPhys = Reg.isPhysical();
1164 for (unsigned i = 0, e = getNumOperands(); i != e; ++i) {
1165 const MachineOperand &MO = getOperand(i);
1166 // Accept regmask operands when Overlap is set.
1167 // Ignore them when looking for a specific def operand (Overlap == false).
1168 if (isPhys && Overlap && MO.isRegMask() && MO.clobbersPhysReg(Reg))
1169 return i;
1170 if (!MO.isReg() || !MO.isDef())
1171 continue;
1172 Register MOReg = MO.getReg();
1173 bool Found = (MOReg == Reg);
1174 if (!Found && TRI && isPhys && MOReg.isPhysical()) {
1175 if (Overlap)
1176 Found = TRI->regsOverlap(MOReg, Reg);
1177 else
1178 Found = TRI->isSubRegister(MOReg, Reg);
1179 }
1180 if (Found && (!isDead || MO.isDead()))
1181 return i;
1182 }
1183 return -1;
1184}
1185
1186/// findFirstPredOperandIdx() - Find the index of the first operand in the
1187/// operand list that is used to represent the predicate. It returns -1 if
1188/// none is found.
1190 // Don't call MCID.findFirstPredOperandIdx() because this variant
1191 // is sometimes called on an instruction that's not yet complete, and
1192 // so the number of operands is less than the MCID indicates. In
1193 // particular, the PTX target does this.
1194 const MCInstrDesc &MCID = getDesc();
1195 if (MCID.isPredicable()) {
1196 for (unsigned i = 0, e = getNumOperands(); i != e; ++i)
1197 if (MCID.operands()[i].isPredicate())
1198 return i;
1199 }
1200
1201 return -1;
1202}
1203
1204// MachineOperand::TiedTo is 4 bits wide.
1205const unsigned TiedMax = 15;
1206
1207/// tieOperands - Mark operands at DefIdx and UseIdx as tied to each other.
1208///
1209/// Use and def operands can be tied together, indicated by a non-zero TiedTo
1210/// field. TiedTo can have these values:
1211///
1212/// 0: Operand is not tied to anything.
1213/// 1 to TiedMax-1: Tied to getOperand(TiedTo-1).
1214/// TiedMax: Tied to an operand >= TiedMax-1.
1215///
1216/// The tied def must be one of the first TiedMax operands on a normal
1217/// instruction. INLINEASM instructions allow more tied defs.
1218///
1219void MachineInstr::tieOperands(unsigned DefIdx, unsigned UseIdx) {
1220 MachineOperand &DefMO = getOperand(DefIdx);
1221 MachineOperand &UseMO = getOperand(UseIdx);
1222 assert(DefMO.isDef() && "DefIdx must be a def operand");
1223 assert(UseMO.isUse() && "UseIdx must be a use operand");
1224 assert(!DefMO.isTied() && "Def is already tied to another use");
1225 assert(!UseMO.isTied() && "Use is already tied to another def");
1226
1227 if (DefIdx < TiedMax) {
1228 UseMO.TiedTo = DefIdx + 1;
1229 } else {
1230 // Inline asm can use the group descriptors to find tied operands,
1231 // statepoint tied operands are trivial to match (1-1 reg def with reg use),
1232 // but on normal instruction, the tied def must be within the first TiedMax
1233 // operands.
1234 assert((isInlineAsm() || getOpcode() == TargetOpcode::STATEPOINT) &&
1235 "DefIdx out of range");
1236 UseMO.TiedTo = TiedMax;
1237 }
1238
1239 // UseIdx can be out of range, we'll search for it in findTiedOperandIdx().
1240 DefMO.TiedTo = std::min(UseIdx + 1, TiedMax);
1241}
1242
1243/// Given the index of a tied register operand, find the operand it is tied to.
1244/// Defs are tied to uses and vice versa. Returns the index of the tied operand
1245/// which must exist.
1246unsigned MachineInstr::findTiedOperandIdx(unsigned OpIdx) const {
1247 const MachineOperand &MO = getOperand(OpIdx);
1248 assert(MO.isTied() && "Operand isn't tied");
1249
1250 // Normally TiedTo is in range.
1251 if (MO.TiedTo < TiedMax)
1252 return MO.TiedTo - 1;
1253
1254 // Uses on normal instructions can be out of range.
1255 if (!isInlineAsm() && getOpcode() != TargetOpcode::STATEPOINT) {
1256 // Normal tied defs must be in the 0..TiedMax-1 range.
1257 if (MO.isUse())
1258 return TiedMax - 1;
1259 // MO is a def. Search for the tied use.
1260 for (unsigned i = TiedMax - 1, e = getNumOperands(); i != e; ++i) {
1261 const MachineOperand &UseMO = getOperand(i);
1262 if (UseMO.isReg() && UseMO.isUse() && UseMO.TiedTo == OpIdx + 1)
1263 return i;
1264 }
1265 llvm_unreachable("Can't find tied use");
1266 }
1267
1268 if (getOpcode() == TargetOpcode::STATEPOINT) {
1269 // In STATEPOINT defs correspond 1-1 to GC pointer operands passed
1270 // on registers.
1271 StatepointOpers SO(this);
1272 unsigned CurUseIdx = SO.getFirstGCPtrIdx();
1273 assert(CurUseIdx != -1U && "only gc pointer statepoint operands can be tied");
1274 unsigned NumDefs = getNumDefs();
1275 for (unsigned CurDefIdx = 0; CurDefIdx < NumDefs; ++CurDefIdx) {
1276 while (!getOperand(CurUseIdx).isReg())
1277 CurUseIdx = StackMaps::getNextMetaArgIdx(this, CurUseIdx);
1278 if (OpIdx == CurDefIdx)
1279 return CurUseIdx;
1280 if (OpIdx == CurUseIdx)
1281 return CurDefIdx;
1282 CurUseIdx = StackMaps::getNextMetaArgIdx(this, CurUseIdx);
1283 }
1284 llvm_unreachable("Can't find tied use");
1285 }
1286
1287 // Now deal with inline asm by parsing the operand group descriptor flags.
1288 // Find the beginning of each operand group.
1289 SmallVector<unsigned, 8> GroupIdx;
1290 unsigned OpIdxGroup = ~0u;
1291 unsigned NumOps;
1292 for (unsigned i = InlineAsm::MIOp_FirstOperand, e = getNumOperands(); i < e;
1293 i += NumOps) {
1294 const MachineOperand &FlagMO = getOperand(i);
1295 assert(FlagMO.isImm() && "Invalid tied operand on inline asm");
1296 unsigned CurGroup = GroupIdx.size();
1297 GroupIdx.push_back(i);
1298 const InlineAsm::Flag F(FlagMO.getImm());
1299 NumOps = 1 + F.getNumOperandRegisters();
1300 // OpIdx belongs to this operand group.
1301 if (OpIdx > i && OpIdx < i + NumOps)
1302 OpIdxGroup = CurGroup;
1303 unsigned TiedGroup;
1304 if (!F.isUseOperandTiedToDef(TiedGroup))
1305 continue;
1306 // Operands in this group are tied to operands in TiedGroup which must be
1307 // earlier. Find the number of operands between the two groups.
1308 unsigned Delta = i - GroupIdx[TiedGroup];
1309
1310 // OpIdx is a use tied to TiedGroup.
1311 if (OpIdxGroup == CurGroup)
1312 return OpIdx - Delta;
1313
1314 // OpIdx is a def tied to this use group.
1315 if (OpIdxGroup == TiedGroup)
1316 return OpIdx + Delta;
1317 }
1318 llvm_unreachable("Invalid tied operand on inline asm");
1319}
1320
1321/// clearKillInfo - Clears kill flags on all operands.
1322///
1324 for (MachineOperand &MO : operands()) {
1325 if (MO.isReg() && MO.isUse())
1326 MO.setIsKill(false);
1327 }
1328}
1329
1331 unsigned SubIdx,
1332 const TargetRegisterInfo &RegInfo) {
1333 if (ToReg.isPhysical()) {
1334 if (SubIdx)
1335 ToReg = RegInfo.getSubReg(ToReg, SubIdx);
1336 for (MachineOperand &MO : operands()) {
1337 if (!MO.isReg() || MO.getReg() != FromReg)
1338 continue;
1339 MO.substPhysReg(ToReg, RegInfo);
1340 }
1341 } else {
1342 for (MachineOperand &MO : operands()) {
1343 if (!MO.isReg() || MO.getReg() != FromReg)
1344 continue;
1345 MO.substVirtReg(ToReg, SubIdx, RegInfo);
1346 }
1347 }
1348}
1349
1350/// isSafeToMove - Return true if it is safe to move this instruction. If
1351/// SawStore is set to true, it means that there is a store (or call) between
1352/// the instruction's location and its intended destination.
1353bool MachineInstr::isSafeToMove(bool &SawStore) const {
1354 // Ignore stuff that we obviously can't move.
1355 //
1356 // Treat volatile loads as stores. This is not strictly necessary for
1357 // volatiles, but it is required for atomic loads. It is not allowed to move
1358 // a load across an atomic load with Ordering > Monotonic.
1359 if (mayStore() || isCall() || isPHI() ||
1360 (mayLoad() && hasOrderedMemoryRef())) {
1361 SawStore = true;
1362 return false;
1363 }
1364
1365 // Don't touch instructions that have non-trivial invariants. For example,
1366 // terminators have to be at the end of a basic block.
1367 if (isPosition() || isDebugInstr() || isTerminator() ||
1369 return false;
1370
1371 // Don't touch instructions which can have non-load/store effects.
1372 //
1373 // Inline asm has a "sideeffect" marker to indicate whether the asm has
1374 // intentional side-effects. Even if an inline asm is not "sideeffect",
1375 // though, it still can't be speculatively executed: the operation might
1376 // not be valid on the current target, or for some combinations of operands.
1377 // (Some transforms that move an instruction don't speculatively execute it;
1378 // we currently don't try to handle that distinction here.)
1379 //
1380 // Other instructions handled here include those that can raise FP
1381 // exceptions, x86 "DIV" instructions which trap on divide by zero, and
1382 // stack adjustments.
1384 isInlineAsm())
1385 return false;
1386
1387 // See if this instruction does a load. If so, we have to guarantee that the
1388 // loaded value doesn't change between the load and the its intended
1389 // destination. The check for isInvariantLoad gives the target the chance to
1390 // classify the load as always returning a constant, e.g. a constant pool
1391 // load.
1393 // Otherwise, this is a real load. If there is a store between the load and
1394 // end of block, we can't move it.
1395 return !SawStore;
1396
1397 return true;
1398}
1399
1401 // Don't delete frame allocation labels.
1402 // FIXME: Why is LOCAL_ESCAPE not considered in MachineInstr::isLabel?
1403 if (getOpcode() == TargetOpcode::LOCAL_ESCAPE)
1404 return false;
1405
1406 // Don't delete FAKE_USE.
1407 // FIXME: Why is FAKE_USE not considered in MachineInstr::isPosition?
1408 if (isFakeUse())
1409 return false;
1410
1411 // LIFETIME markers should be preserved.
1412 // FIXME: Why are LIFETIME markers not considered in MachineInstr::isPosition?
1413 if (isLifetimeMarker())
1414 return false;
1415
1416 // If we can move an instruction, we can remove it. Otherwise, it has
1417 // a side-effect of some sort.
1418 bool SawStore = false;
1419 return isPHI() || isSafeToMove(SawStore);
1420}
1421
1423 LiveRegUnits *LivePhysRegs) const {
1424 // Instructions without side-effects are dead iff they only define dead regs.
1425 // This function is hot and this loop returns early in the common case,
1426 // so only perform additional checks before this if absolutely necessary.
1427 for (const MachineOperand &MO : all_defs()) {
1428 Register Reg = MO.getReg();
1429 if (Reg.isPhysical()) {
1430 // Don't delete live physreg defs, or any reserved register defs.
1431 if (!LivePhysRegs || !LivePhysRegs->available(Reg) || MRI.isReserved(Reg))
1432 return false;
1433 } else {
1434 if (MO.isDead())
1435 continue;
1436 for (const MachineInstr &Use : MRI.use_nodbg_instructions(Reg)) {
1437 if (&Use != this)
1438 // This def has a non-debug use. Don't delete the instruction!
1439 return false;
1440 }
1441 }
1442 }
1443
1444 // Technically speaking inline asm without side effects and no defs can still
1445 // be deleted. But there is so much bad inline asm code out there, we should
1446 // let them be.
1447 if (isInlineAsm())
1448 return false;
1449
1450 // FIXME: See issue #105950 for why LIFETIME markers are considered dead here.
1451 if (isLifetimeMarker())
1452 return true;
1453
1454 // If there are no defs with uses, then we call the instruction dead so long
1455 // as we do not suspect it may have sideeffects.
1456 return wouldBeTriviallyDead();
1457}
1458
1460 BatchAAResults *AA, bool UseTBAA,
1461 const MachineMemOperand *MMOa,
1462 const MachineMemOperand *MMOb) {
1463 // The following interface to AA is fashioned after DAGCombiner::isAlias and
1464 // operates with MachineMemOperand offset with some important assumptions:
1465 // - LLVM fundamentally assumes flat address spaces.
1466 // - MachineOperand offset can *only* result from legalization and cannot
1467 // affect queries other than the trivial case of overlap checking.
1468 // - These offsets never wrap and never step outside of allocated objects.
1469 // - There should never be any negative offsets here.
1470 //
1471 // FIXME: Modify API to hide this math from "user"
1472 // Even before we go to AA we can reason locally about some memory objects. It
1473 // can save compile time, and possibly catch some corner cases not currently
1474 // covered.
1475
1476 int64_t OffsetA = MMOa->getOffset();
1477 int64_t OffsetB = MMOb->getOffset();
1478 int64_t MinOffset = std::min(OffsetA, OffsetB);
1479
1480 LocationSize WidthA = MMOa->getSize();
1481 LocationSize WidthB = MMOb->getSize();
1482 bool KnownWidthA = WidthA.hasValue();
1483 bool KnownWidthB = WidthB.hasValue();
1484 bool BothMMONonScalable = !WidthA.isScalable() && !WidthB.isScalable();
1485
1486 const Value *ValA = MMOa->getValue();
1487 const Value *ValB = MMOb->getValue();
1488 bool SameVal = (ValA && ValB && (ValA == ValB));
1489 if (!SameVal) {
1490 const PseudoSourceValue *PSVa = MMOa->getPseudoValue();
1491 const PseudoSourceValue *PSVb = MMOb->getPseudoValue();
1492 if (PSVa && ValB && !PSVa->mayAlias(&MFI))
1493 return false;
1494 if (PSVb && ValA && !PSVb->mayAlias(&MFI))
1495 return false;
1496 if (PSVa && PSVb && (PSVa == PSVb))
1497 SameVal = true;
1498 }
1499
1500 if (SameVal && BothMMONonScalable) {
1501 if (!KnownWidthA || !KnownWidthB)
1502 return true;
1503 int64_t MaxOffset = std::max(OffsetA, OffsetB);
1504 int64_t LowWidth = (MinOffset == OffsetA)
1505 ? WidthA.getValue().getKnownMinValue()
1506 : WidthB.getValue().getKnownMinValue();
1507 return (MinOffset + LowWidth > MaxOffset);
1508 }
1509
1510 if (!AA)
1511 return true;
1512
1513 if (!ValA || !ValB)
1514 return true;
1515
1516 assert((OffsetA >= 0) && "Negative MachineMemOperand offset");
1517 assert((OffsetB >= 0) && "Negative MachineMemOperand offset");
1518
1519 // If Scalable Location Size has non-zero offset, Width + Offset does not work
1520 // at the moment
1521 if ((WidthA.isScalable() && OffsetA > 0) ||
1522 (WidthB.isScalable() && OffsetB > 0))
1523 return true;
1524
1525 int64_t OverlapA =
1526 KnownWidthA ? WidthA.getValue().getKnownMinValue() + OffsetA - MinOffset
1528 int64_t OverlapB =
1529 KnownWidthB ? WidthB.getValue().getKnownMinValue() + OffsetB - MinOffset
1531
1532 LocationSize LocA = (WidthA.isScalable() || !KnownWidthA)
1533 ? WidthA
1534 : LocationSize::precise(OverlapA);
1535 LocationSize LocB = (WidthB.isScalable() || !KnownWidthB)
1536 ? WidthB
1537 : LocationSize::precise(OverlapB);
1538
1539 return !AA->isNoAlias(
1540 MemoryLocation(ValA, LocA, UseTBAA ? MMOa->getAAInfo() : AAMDNodes()),
1541 MemoryLocation(ValB, LocB, UseTBAA ? MMOb->getAAInfo() : AAMDNodes()));
1542}
1543
1545 bool UseTBAA) const {
1546 const MachineFunction *MF = getMF();
1548 const MachineFrameInfo &MFI = MF->getFrameInfo();
1549
1550 // Exclude call instruction which may alter the memory but can not be handled
1551 // by this function.
1552 if (isCall() || Other.isCall())
1553 return true;
1554
1555 // If neither instruction stores to memory, they can't alias in any
1556 // meaningful way, even if they read from the same address.
1557 if (!mayStore() && !Other.mayStore())
1558 return false;
1559
1560 // Both instructions must be memory operations to be able to alias.
1561 if (!mayLoadOrStore() || !Other.mayLoadOrStore())
1562 return false;
1563
1564 // Let the target decide if memory accesses cannot possibly overlap.
1565 if (TII->areMemAccessesTriviallyDisjoint(*this, Other))
1566 return false;
1567
1568 // Memory operations without memory operands may access anything. Be
1569 // conservative and assume `MayAlias`.
1570 if (memoperands_empty() || Other.memoperands_empty())
1571 return true;
1572
1573 // Skip if there are too many memory operands.
1574 auto NumChecks = getNumMemOperands() * Other.getNumMemOperands();
1575 if (NumChecks > TII->getMemOperandAACheckLimit())
1576 return true;
1577
1578 // Check each pair of memory operands from both instructions, which can't
1579 // alias only if all pairs won't alias.
1580 for (auto *MMOa : memoperands()) {
1581 for (auto *MMOb : Other.memoperands()) {
1582 if (!MMOa->isStore() && !MMOb->isStore())
1583 continue;
1584 if (MemOperandsHaveAlias(MFI, AA, UseTBAA, MMOa, MMOb))
1585 return true;
1586 }
1587 }
1588
1589 return false;
1590}
1591
1592bool MachineInstr::mayAlias(AAResults *AA, const MachineInstr &Other,
1593 bool UseTBAA) const {
1594 if (AA) {
1595 BatchAAResults BAA(*AA);
1596 return mayAlias(&BAA, Other, UseTBAA);
1597 }
1598 return mayAlias(static_cast<BatchAAResults *>(nullptr), Other, UseTBAA);
1599}
1600
1601/// hasOrderedMemoryRef - Return true if this instruction may have an ordered
1602/// or volatile memory reference, or if the information describing the memory
1603/// reference is not available. Return false if it is known to have no ordered
1604/// memory references.
1606 // An instruction known never to access memory won't have a volatile access.
1607 if (!mayStore() &&
1608 !mayLoad() &&
1609 !isCall() &&
1611 return false;
1612
1613 // Otherwise, if the instruction has no memory reference information,
1614 // conservatively assume it wasn't preserved.
1615 if (memoperands_empty())
1616 return true;
1617
1618 // Check if any of our memory operands are ordered.
1619 return llvm::any_of(memoperands(), [](const MachineMemOperand *MMO) {
1620 return !MMO->isUnordered();
1621 });
1622}
1623
1624/// isDereferenceableInvariantLoad - Return true if this instruction will never
1625/// trap and is loading from a location whose value is invariant across a run of
1626/// this function.
1628 // If the instruction doesn't load at all, it isn't an invariant load.
1629 if (!mayLoad())
1630 return false;
1631
1632 // If the instruction has lost its memoperands, conservatively assume that
1633 // it may not be an invariant load.
1634 if (memoperands_empty())
1635 return false;
1636
1637 const MachineFrameInfo &MFI = getParent()->getParent()->getFrameInfo();
1638
1639 for (MachineMemOperand *MMO : memoperands()) {
1640 if (!MMO->isUnordered())
1641 // If the memory operand has ordering side effects, we can't move the
1642 // instruction. Such an instruction is technically an invariant load,
1643 // but the caller code would need updated to expect that.
1644 return false;
1645 if (MMO->isStore()) return false;
1646 if (MMO->isInvariant() && MMO->isDereferenceable())
1647 continue;
1648
1649 // A load from a constant PseudoSourceValue is invariant.
1650 if (const PseudoSourceValue *PSV = MMO->getPseudoValue()) {
1651 if (PSV->isConstant(&MFI))
1652 continue;
1653 }
1654
1655 // Otherwise assume conservatively.
1656 return false;
1657 }
1658
1659 // Everything checks out.
1660 return true;
1661}
1662
1664 if (!isPHI())
1665 return {};
1666 assert(getNumOperands() >= 3 &&
1667 "It's illegal to have a PHI without source operands");
1668
1669 Register Reg = getOperand(1).getReg();
1670 for (unsigned i = 3, e = getNumOperands(); i < e; i += 2)
1671 if (getOperand(i).getReg() != Reg)
1672 return {};
1673 return Reg;
1674}
1675
1678 return true;
1679 if (isInlineAsm()) {
1680 unsigned ExtraInfo = getOperand(InlineAsm::MIOp_ExtraInfo).getImm();
1681 if (ExtraInfo & InlineAsm::Extra_HasSideEffects)
1682 return true;
1683 }
1684
1685 return false;
1686}
1687
1689 return mayStore() || isCall() ||
1691}
1692
1693/// allDefsAreDead - Return true if all the defs of this instruction are dead.
1694///
1696 for (const MachineOperand &MO : operands()) {
1697 if (!MO.isReg() || MO.isUse())
1698 continue;
1699 if (!MO.isDead())
1700 return false;
1701 }
1702 return true;
1703}
1704
1706 for (const MachineOperand &MO : implicit_operands()) {
1707 if (!MO.isReg() || MO.isUse())
1708 continue;
1709 if (!MO.isDead())
1710 return false;
1711 }
1712 return true;
1713}
1714
1715/// copyImplicitOps - Copy implicit register operands from specified
1716/// instruction to this instruction.
1718 const MachineInstr &MI) {
1719 for (const MachineOperand &MO :
1720 llvm::drop_begin(MI.operands(), MI.getDesc().getNumOperands()))
1721 if ((MO.isReg() && MO.isImplicit()) || MO.isRegMask())
1722 addOperand(MF, MO);
1723}
1724
1726 const MCInstrDesc &MCID = getDesc();
1727 if (MCID.Opcode == TargetOpcode::STATEPOINT)
1728 return true;
1729 for (unsigned I = 0, E = getNumOperands(); I < E; ++I) {
1730 const auto &Operand = getOperand(I);
1731 if (!Operand.isReg() || Operand.isDef())
1732 // Ignore the defined registers as MCID marks only the uses as tied.
1733 continue;
1734 int ExpectedTiedIdx = MCID.getOperandConstraint(I, MCOI::TIED_TO);
1735 int TiedIdx = Operand.isTied() ? int(findTiedOperandIdx(I)) : -1;
1736 if (ExpectedTiedIdx != TiedIdx)
1737 return true;
1738 }
1739 return false;
1740}
1741
1743 const MachineRegisterInfo &MRI) const {
1745 if (!Op.isReg())
1746 return LLT{};
1747
1749 return MRI.getType(Op.getReg());
1750
1751 auto &OpInfo = getDesc().operands()[OpIdx];
1752 if (!OpInfo.isGenericType())
1753 return MRI.getType(Op.getReg());
1754
1755 if (PrintedTypes[OpInfo.getGenericTypeIndex()])
1756 return LLT{};
1757
1758 LLT TypeToPrint = MRI.getType(Op.getReg());
1759 // Don't mark the type index printed if it wasn't actually printed: maybe
1760 // another operand with the same type index has an actual type attached:
1761 if (TypeToPrint.isValid())
1762 PrintedTypes.set(OpInfo.getGenericTypeIndex());
1763 return TypeToPrint;
1764}
1765
1766#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
1768 dbgs() << " ";
1769 print(dbgs());
1770}
1771
1772LLVM_DUMP_METHOD void MachineInstr::dumprImpl(
1773 const MachineRegisterInfo &MRI, unsigned Depth, unsigned MaxDepth,
1774 SmallPtrSetImpl<const MachineInstr *> &AlreadySeenInstrs) const {
1775 if (Depth >= MaxDepth)
1776 return;
1777 if (!AlreadySeenInstrs.insert(this).second)
1778 return;
1779 // PadToColumn always inserts at least one space.
1780 // Don't mess up the alignment if we don't want any space.
1781 if (Depth)
1782 fdbgs().PadToColumn(Depth * 2);
1783 print(fdbgs());
1784 for (const MachineOperand &MO : operands()) {
1785 if (!MO.isReg() || MO.isDef())
1786 continue;
1787 Register Reg = MO.getReg();
1788 if (Reg.isPhysical())
1789 continue;
1790 const MachineInstr *NewMI = MRI.getUniqueVRegDef(Reg);
1791 if (NewMI == nullptr)
1792 continue;
1793 NewMI->dumprImpl(MRI, Depth + 1, MaxDepth, AlreadySeenInstrs);
1794 }
1795}
1796
1798 unsigned MaxDepth) const {
1799 SmallPtrSet<const MachineInstr *, 16> AlreadySeenInstrs;
1800 dumprImpl(MRI, 0, MaxDepth, AlreadySeenInstrs);
1801}
1802#endif
1803
1804void MachineInstr::print(raw_ostream &OS, bool IsStandalone, bool SkipOpers,
1805 bool SkipDebugLoc, bool AddNewLine,
1806 const TargetInstrInfo *TII) const {
1807 const Module *M = nullptr;
1808 const Function *F = nullptr;
1809 if (const MachineFunction *MF = getMFIfAvailable(*this)) {
1810 F = &MF->getFunction();
1811 M = F->getParent();
1812 if (!TII)
1813 TII = MF->getSubtarget().getInstrInfo();
1814 }
1815
1816 ModuleSlotTracker MST(M);
1817 if (F)
1818 MST.incorporateFunction(*F);
1819 print(OS, MST, IsStandalone, SkipOpers, SkipDebugLoc, AddNewLine, TII);
1820}
1821
1823 bool IsStandalone, bool SkipOpers, bool SkipDebugLoc,
1824 bool AddNewLine, const TargetInstrInfo *TII) const {
1825 // We can be a bit tidier if we know the MachineFunction.
1826 const TargetRegisterInfo *TRI = nullptr;
1827 const MachineRegisterInfo *MRI = nullptr;
1828 tryToGetTargetInfo(*this, TRI, MRI, TII);
1829
1830 if (isCFIInstruction())
1831 assert(getNumOperands() == 1 && "Expected 1 operand in CFI instruction");
1832
1833 SmallBitVector PrintedTypes(8);
1834 bool ShouldPrintRegisterTies = IsStandalone || hasComplexRegisterTies();
1835 auto getTiedOperandIdx = [&](unsigned OpIdx) {
1836 if (!ShouldPrintRegisterTies)
1837 return 0U;
1838 const MachineOperand &MO = getOperand(OpIdx);
1839 if (MO.isReg() && MO.isTied() && !MO.isDef())
1840 return findTiedOperandIdx(OpIdx);
1841 return 0U;
1842 };
1843 unsigned StartOp = 0;
1844 unsigned e = getNumOperands();
1845
1846 // Print explicitly defined operands on the left of an assignment syntax.
1847 while (StartOp < e) {
1848 const MachineOperand &MO = getOperand(StartOp);
1849 if (!MO.isReg() || !MO.isDef() || MO.isImplicit())
1850 break;
1851
1852 if (StartOp != 0)
1853 OS << ", ";
1854
1855 LLT TypeToPrint = MRI ? getTypeToPrint(StartOp, PrintedTypes, *MRI) : LLT{};
1856 unsigned TiedOperandIdx = getTiedOperandIdx(StartOp);
1857 MO.print(OS, MST, TypeToPrint, StartOp, /*PrintDef=*/false, IsStandalone,
1858 ShouldPrintRegisterTies, TiedOperandIdx, TRI);
1859 ++StartOp;
1860 }
1861
1862 if (StartOp != 0)
1863 OS << " = ";
1864
1866 OS << "frame-setup ";
1868 OS << "frame-destroy ";
1870 OS << "nnan ";
1872 OS << "ninf ";
1874 OS << "nsz ";
1876 OS << "arcp ";
1878 OS << "contract ";
1880 OS << "afn ";
1882 OS << "reassoc ";
1884 OS << "nuw ";
1886 OS << "nsw ";
1888 OS << "exact ";
1890 OS << "nofpexcept ";
1892 OS << "nomerge ";
1894 OS << "nneg ";
1896 OS << "disjoint ";
1898 OS << "nusw ";
1900 OS << "samesign ";
1902 OS << "inbounds ";
1903
1904 // Print the opcode name.
1905 if (TII)
1906 OS << TII->getName(getOpcode());
1907 else
1908 OS << "UNKNOWN";
1909
1910 if (SkipOpers)
1911 return;
1912
1913 // Print the rest of the operands.
1914 bool FirstOp = true;
1915 unsigned AsmDescOp = ~0u;
1916 unsigned AsmOpCount = 0;
1917
1919 // Print asm string.
1920 OS << " ";
1921 const unsigned OpIdx = InlineAsm::MIOp_AsmString;
1922 LLT TypeToPrint = MRI ? getTypeToPrint(OpIdx, PrintedTypes, *MRI) : LLT{};
1923 unsigned TiedOperandIdx = getTiedOperandIdx(OpIdx);
1924 getOperand(OpIdx).print(OS, MST, TypeToPrint, OpIdx, /*PrintDef=*/true,
1925 IsStandalone, ShouldPrintRegisterTies,
1926 TiedOperandIdx, TRI);
1927
1928 // Print HasSideEffects, MayLoad, MayStore, IsAlignStack
1929 unsigned ExtraInfo = getOperand(InlineAsm::MIOp_ExtraInfo).getImm();
1930 if (ExtraInfo & InlineAsm::Extra_HasSideEffects)
1931 OS << " [sideeffect]";
1932 if (ExtraInfo & InlineAsm::Extra_MayLoad)
1933 OS << " [mayload]";
1934 if (ExtraInfo & InlineAsm::Extra_MayStore)
1935 OS << " [maystore]";
1936 if (ExtraInfo & InlineAsm::Extra_IsConvergent)
1937 OS << " [isconvergent]";
1938 if (ExtraInfo & InlineAsm::Extra_IsAlignStack)
1939 OS << " [alignstack]";
1941 OS << " [attdialect]";
1943 OS << " [inteldialect]";
1944
1945 StartOp = AsmDescOp = InlineAsm::MIOp_FirstOperand;
1946 FirstOp = false;
1947 }
1948
1949 for (unsigned i = StartOp, e = getNumOperands(); i != e; ++i) {
1950 const MachineOperand &MO = getOperand(i);
1951
1952 if (FirstOp) FirstOp = false; else OS << ",";
1953 OS << " ";
1954
1955 if (isDebugValueLike() && MO.isMetadata()) {
1956 // Pretty print DBG_VALUE* instructions.
1957 auto *DIV = dyn_cast<DILocalVariable>(MO.getMetadata());
1958 if (DIV && !DIV->getName().empty())
1959 OS << "!\"" << DIV->getName() << '\"';
1960 else {
1961 LLT TypeToPrint = MRI ? getTypeToPrint(i, PrintedTypes, *MRI) : LLT{};
1962 unsigned TiedOperandIdx = getTiedOperandIdx(i);
1963 MO.print(OS, MST, TypeToPrint, i, /*PrintDef=*/true, IsStandalone,
1964 ShouldPrintRegisterTies, TiedOperandIdx, TRI);
1965 }
1966 } else if (isDebugLabel() && MO.isMetadata()) {
1967 // Pretty print DBG_LABEL instructions.
1968 auto *DIL = dyn_cast<DILabel>(MO.getMetadata());
1969 if (DIL && !DIL->getName().empty())
1970 OS << "\"" << DIL->getName() << '\"';
1971 else {
1972 LLT TypeToPrint = MRI ? getTypeToPrint(i, PrintedTypes, *MRI) : LLT{};
1973 unsigned TiedOperandIdx = getTiedOperandIdx(i);
1974 MO.print(OS, MST, TypeToPrint, i, /*PrintDef=*/true, IsStandalone,
1975 ShouldPrintRegisterTies, TiedOperandIdx, TRI);
1976 }
1977 } else if (i == AsmDescOp && MO.isImm()) {
1978 // Pretty print the inline asm operand descriptor.
1979 OS << '$' << AsmOpCount++;
1980 unsigned Flag = MO.getImm();
1981 const InlineAsm::Flag F(Flag);
1982 OS << ":[";
1983 OS << F.getKindName();
1984
1985 unsigned RCID;
1986 if (!F.isImmKind() && !F.isMemKind() && F.hasRegClassConstraint(RCID)) {
1987 if (TRI) {
1988 OS << ':' << TRI->getRegClassName(TRI->getRegClass(RCID));
1989 } else
1990 OS << ":RC" << RCID;
1991 }
1992
1993 if (F.isMemKind()) {
1994 const InlineAsm::ConstraintCode MCID = F.getMemoryConstraintID();
1995 OS << ":" << InlineAsm::getMemConstraintName(MCID);
1996 }
1997
1998 unsigned TiedTo;
1999 if (F.isUseOperandTiedToDef(TiedTo))
2000 OS << " tiedto:$" << TiedTo;
2001
2002 if ((F.isRegDefKind() || F.isRegDefEarlyClobberKind() ||
2003 F.isRegUseKind()) &&
2004 F.getRegMayBeFolded()) {
2005 OS << " foldable";
2006 }
2007
2008 OS << ']';
2009
2010 // Compute the index of the next operand descriptor.
2011 AsmDescOp += 1 + F.getNumOperandRegisters();
2012 } else {
2013 LLT TypeToPrint = MRI ? getTypeToPrint(i, PrintedTypes, *MRI) : LLT{};
2014 unsigned TiedOperandIdx = getTiedOperandIdx(i);
2015 if (MO.isImm() && isOperandSubregIdx(i))
2017 else
2018 MO.print(OS, MST, TypeToPrint, i, /*PrintDef=*/true, IsStandalone,
2019 ShouldPrintRegisterTies, TiedOperandIdx, TRI);
2020 }
2021 }
2022
2023 // Print any optional symbols attached to this instruction as-if they were
2024 // operands.
2025 if (MCSymbol *PreInstrSymbol = getPreInstrSymbol()) {
2026 if (!FirstOp) {
2027 OS << ',';
2028 }
2029 OS << " pre-instr-symbol ";
2030 MachineOperand::printSymbol(OS, *PreInstrSymbol);
2031 }
2032 if (MCSymbol *PostInstrSymbol = getPostInstrSymbol()) {
2033 if (!FirstOp) {
2034 OS << ',';
2035 }
2036 OS << " post-instr-symbol ";
2037 MachineOperand::printSymbol(OS, *PostInstrSymbol);
2038 }
2039 if (MDNode *HeapAllocMarker = getHeapAllocMarker()) {
2040 if (!FirstOp) {
2041 OS << ',';
2042 }
2043 OS << " heap-alloc-marker ";
2044 HeapAllocMarker->printAsOperand(OS, MST);
2045 }
2046 if (MDNode *PCSections = getPCSections()) {
2047 if (!FirstOp) {
2048 OS << ',';
2049 }
2050 OS << " pcsections ";
2051 PCSections->printAsOperand(OS, MST);
2052 }
2053 if (MDNode *MMRA = getMMRAMetadata()) {
2054 if (!FirstOp) {
2055 OS << ',';
2056 }
2057 OS << " mmra ";
2058 MMRA->printAsOperand(OS, MST);
2059 }
2060 if (uint32_t CFIType = getCFIType()) {
2061 if (!FirstOp)
2062 OS << ',';
2063 OS << " cfi-type " << CFIType;
2064 }
2066 OS << ", deactivation-symbol " << getDeactivationSymbol()->getName();
2067
2068 if (DebugInstrNum) {
2069 if (!FirstOp)
2070 OS << ",";
2071 OS << " debug-instr-number " << DebugInstrNum;
2072 }
2073
2074 if (!SkipDebugLoc) {
2075 if (const DebugLoc &DL = getDebugLoc()) {
2076 if (!FirstOp)
2077 OS << ',';
2078 OS << " debug-location ";
2079 DL->printAsOperand(OS, MST);
2080 }
2081 }
2082
2083 if (!memoperands_empty()) {
2085 const LLVMContext *Context = nullptr;
2086 std::unique_ptr<LLVMContext> CtxPtr;
2087 const MachineFrameInfo *MFI = nullptr;
2088 if (const MachineFunction *MF = getMFIfAvailable(*this)) {
2089 MFI = &MF->getFrameInfo();
2090 Context = &MF->getFunction().getContext();
2091 } else {
2092 CtxPtr = std::make_unique<LLVMContext>();
2093 Context = CtxPtr.get();
2094 }
2095
2096 OS << " :: ";
2097 bool NeedComma = false;
2098 for (const MachineMemOperand *Op : memoperands()) {
2099 if (NeedComma)
2100 OS << ", ";
2101 Op->print(OS, MST, SSNs, *Context, MFI, TII);
2102 NeedComma = true;
2103 }
2104 }
2105
2106 if (SkipDebugLoc)
2107 return;
2108
2109 bool HaveSemi = false;
2110
2111 // Print debug location information.
2112 if (const DebugLoc &DL = getDebugLoc()) {
2113 if (!HaveSemi) {
2114 OS << ';';
2115 HaveSemi = true;
2116 }
2117 OS << ' ';
2118 DL.print(OS);
2119 }
2120
2121 // Print extra comments for DEBUG_VALUE and friends if they are well-formed.
2122 if ((isNonListDebugValue() && getNumOperands() >= 4) ||
2123 (isDebugValueList() && getNumOperands() >= 2) ||
2124 (isDebugRef() && getNumOperands() >= 3)) {
2125 if (getDebugVariableOp().isMetadata()) {
2126 if (!HaveSemi) {
2127 OS << ";";
2128 HaveSemi = true;
2129 }
2130 auto *DV = getDebugVariable();
2131 OS << " line no:" << DV->getLine();
2133 OS << " indirect";
2134 }
2135 }
2136 // TODO: DBG_LABEL
2137
2138 if (PrintMIAddrs)
2139 OS << " ; " << this;
2140
2141 if (AddNewLine)
2142 OS << '\n';
2143}
2144
2146 const TargetRegisterInfo *RegInfo,
2147 bool AddIfNotFound) {
2148 bool isPhysReg = IncomingReg.isPhysical();
2149 bool hasAliases = isPhysReg &&
2150 MCRegAliasIterator(IncomingReg, RegInfo, false).isValid();
2151 bool Found = false;
2153 for (unsigned i = 0, e = getNumOperands(); i != e; ++i) {
2154 MachineOperand &MO = getOperand(i);
2155 if (!MO.isReg() || !MO.isUse() || MO.isUndef())
2156 continue;
2157
2158 // DEBUG_VALUE nodes do not contribute to code generation and should
2159 // always be ignored. Failure to do so may result in trying to modify
2160 // KILL flags on DEBUG_VALUE nodes.
2161 if (MO.isDebug())
2162 continue;
2163
2164 Register Reg = MO.getReg();
2165 if (!Reg)
2166 continue;
2167
2168 if (Reg == IncomingReg) {
2169 if (!Found) {
2170 if (MO.isKill())
2171 // The register is already marked kill.
2172 return true;
2173 if (isPhysReg && isRegTiedToDefOperand(i))
2174 // Two-address uses of physregs must not be marked kill.
2175 return true;
2176 MO.setIsKill();
2177 Found = true;
2178 }
2179 } else if (hasAliases && MO.isKill() && Reg.isPhysical()) {
2180 // A super-register kill already exists.
2181 if (RegInfo->isSuperRegister(IncomingReg, Reg))
2182 return true;
2183 if (RegInfo->isSubRegister(IncomingReg, Reg))
2184 DeadOps.push_back(i);
2185 }
2186 }
2187
2188 // Trim unneeded kill operands.
2189 while (!DeadOps.empty()) {
2190 unsigned OpIdx = DeadOps.back();
2191 if (getOperand(OpIdx).isImplicit() &&
2194 else
2195 getOperand(OpIdx).setIsKill(false);
2196 DeadOps.pop_back();
2197 }
2198
2199 // If not found, this means an alias of one of the operands is killed. Add a
2200 // new implicit operand if required.
2201 if (!Found && AddIfNotFound) {
2203 false /*IsDef*/,
2204 true /*IsImp*/,
2205 true /*IsKill*/));
2206 return true;
2207 }
2208 return Found;
2209}
2210
2212 const TargetRegisterInfo *RegInfo) {
2213 if (!Reg.isPhysical())
2214 RegInfo = nullptr;
2215 for (MachineOperand &MO : operands()) {
2216 if (!MO.isReg() || !MO.isUse() || !MO.isKill())
2217 continue;
2218 Register OpReg = MO.getReg();
2219 if ((RegInfo && RegInfo->regsOverlap(Reg, OpReg)) || Reg == OpReg)
2220 MO.setIsKill(false);
2221 }
2222}
2223
2225 const TargetRegisterInfo *RegInfo,
2226 bool AddIfNotFound) {
2227 bool isPhysReg = Reg.isPhysical();
2228 bool hasAliases = isPhysReg &&
2229 MCRegAliasIterator(Reg, RegInfo, false).isValid();
2230 bool Found = false;
2232 for (unsigned i = 0, e = getNumOperands(); i != e; ++i) {
2233 MachineOperand &MO = getOperand(i);
2234 if (!MO.isReg() || !MO.isDef())
2235 continue;
2236 Register MOReg = MO.getReg();
2237 if (!MOReg)
2238 continue;
2239
2240 if (MOReg == Reg) {
2241 MO.setIsDead();
2242 Found = true;
2243 } else if (hasAliases && MO.isDead() && MOReg.isPhysical()) {
2244 // There exists a super-register that's marked dead.
2245 if (RegInfo->isSuperRegister(Reg, MOReg))
2246 return true;
2247 if (RegInfo->isSubRegister(Reg, MOReg))
2248 DeadOps.push_back(i);
2249 }
2250 }
2251
2252 // Trim unneeded dead operands.
2253 while (!DeadOps.empty()) {
2254 unsigned OpIdx = DeadOps.back();
2255 if (getOperand(OpIdx).isImplicit() &&
2258 else
2259 getOperand(OpIdx).setIsDead(false);
2260 DeadOps.pop_back();
2261 }
2262
2263 // If not found, this means an alias of one of the operands is dead. Add a
2264 // new implicit operand if required.
2265 if (Found || !AddIfNotFound)
2266 return Found;
2267
2269 true /*IsDef*/,
2270 true /*IsImp*/,
2271 false /*IsKill*/,
2272 true /*IsDead*/));
2273 return true;
2274}
2275
2277 for (MachineOperand &MO : all_defs())
2278 if (MO.getReg() == Reg)
2279 MO.setIsDead(false);
2280}
2281
2283 for (MachineOperand &MO : all_defs())
2284 if (MO.getReg() == Reg && MO.getSubReg() != 0)
2285 MO.setIsUndef(IsUndef);
2286}
2287
2289 const TargetRegisterInfo *RegInfo) {
2290 if (Reg.isPhysical()) {
2291 MachineOperand *MO = findRegisterDefOperand(Reg, RegInfo, false, false);
2292 if (MO)
2293 return;
2294 } else {
2295 for (const MachineOperand &MO : all_defs()) {
2296 if (MO.getReg() == Reg && MO.getSubReg() == 0)
2297 return;
2298 }
2299 }
2301 true /*IsDef*/,
2302 true /*IsImp*/));
2303}
2304
2306 const TargetRegisterInfo &TRI) {
2307 bool HasRegMask = false;
2308 for (MachineOperand &MO : operands()) {
2309 if (MO.isRegMask()) {
2310 HasRegMask = true;
2311 continue;
2312 }
2313 if (!MO.isReg() || !MO.isDef()) continue;
2314 Register Reg = MO.getReg();
2315 if (!Reg.isPhysical())
2316 continue;
2317 // If there are no uses, including partial uses, the def is dead.
2318 if (llvm::none_of(UsedRegs,
2319 [&](MCRegister Use) { return TRI.regsOverlap(Use, Reg); }))
2320 MO.setIsDead();
2321 }
2322
2323 // This is a call with a register mask operand.
2324 // Mask clobbers are always dead, so add defs for the non-dead defines.
2325 if (HasRegMask)
2326 for (const Register &UsedReg : UsedRegs)
2327 addRegisterDefined(UsedReg, &TRI);
2328}
2329
2330unsigned
2332 // Build up a buffer of hash code components.
2333 SmallVector<size_t, 16> HashComponents;
2334 HashComponents.reserve(MI->getNumOperands() + 1);
2335 HashComponents.push_back(MI->getOpcode());
2336 for (const MachineOperand &MO : MI->operands()) {
2337 if (MO.isReg() && MO.isDef() && MO.getReg().isVirtual())
2338 continue; // Skip virtual register defs.
2339
2340 HashComponents.push_back(hash_value(MO));
2341 }
2342 return hash_combine_range(HashComponents);
2343}
2344
2346 // Find the source location cookie.
2347 const MDNode *LocMD = nullptr;
2348 for (unsigned i = getNumOperands(); i != 0; --i) {
2349 if (getOperand(i-1).isMetadata() &&
2350 (LocMD = getOperand(i-1).getMetadata()) &&
2351 LocMD->getNumOperands() != 0) {
2353 return LocMD;
2354 }
2355 }
2356
2357 return nullptr;
2358}
2359
2362 const MDNode *LocMD = getLocCookieMD();
2363 uint64_t LocCookie =
2364 LocMD
2365 ? mdconst::extract<ConstantInt>(LocMD->getOperand(0))->getZExtValue()
2366 : 0;
2368 Ctx.diagnose(DiagnosticInfoInlineAsm(LocCookie, Msg));
2369}
2370
2372 const Function &Fn = getMF()->getFunction();
2373 Fn.getContext().diagnose(
2375}
2376
2378 const MCInstrDesc &MCID, bool IsIndirect,
2379 Register Reg, const MDNode *Variable,
2380 const MDNode *Expr) {
2381 assert(isa<DILocalVariable>(Variable) && "not a variable");
2382 assert(cast<DIExpression>(Expr)->isValid() && "not an expression");
2383 assert(cast<DILocalVariable>(Variable)->isValidLocationForIntrinsic(DL) &&
2384 "Expected inlined-at fields to agree");
2385 auto MIB = BuildMI(MF, DL, MCID).addReg(Reg);
2386 if (IsIndirect)
2387 MIB.addImm(0U);
2388 else
2389 MIB.addReg(0U);
2390 return MIB.addMetadata(Variable).addMetadata(Expr);
2391}
2392
2394 const MCInstrDesc &MCID, bool IsIndirect,
2395 ArrayRef<MachineOperand> DebugOps,
2396 const MDNode *Variable, const MDNode *Expr) {
2397 assert(isa<DILocalVariable>(Variable) && "not a variable");
2398 assert(cast<DIExpression>(Expr)->isValid() && "not an expression");
2399 assert(cast<DILocalVariable>(Variable)->isValidLocationForIntrinsic(DL) &&
2400 "Expected inlined-at fields to agree");
2401 if (MCID.Opcode == TargetOpcode::DBG_VALUE) {
2402 assert(DebugOps.size() == 1 &&
2403 "DBG_VALUE must contain exactly one debug operand");
2404 MachineOperand DebugOp = DebugOps[0];
2405 if (DebugOp.isReg())
2406 return BuildMI(MF, DL, MCID, IsIndirect, DebugOp.getReg(), Variable,
2407 Expr);
2408
2409 auto MIB = BuildMI(MF, DL, MCID).add(DebugOp);
2410 if (IsIndirect)
2411 MIB.addImm(0U);
2412 else
2413 MIB.addReg(0U);
2414 return MIB.addMetadata(Variable).addMetadata(Expr);
2415 }
2416
2417 auto MIB = BuildMI(MF, DL, MCID);
2418 MIB.addMetadata(Variable).addMetadata(Expr);
2419 for (const MachineOperand &DebugOp : DebugOps)
2420 if (DebugOp.isReg())
2421 MIB.addReg(DebugOp.getReg());
2422 else
2423 MIB.add(DebugOp);
2424 return MIB;
2425}
2426
2429 const DebugLoc &DL, const MCInstrDesc &MCID,
2430 bool IsIndirect, Register Reg,
2431 const MDNode *Variable, const MDNode *Expr) {
2432 MachineFunction &MF = *BB.getParent();
2433 MachineInstr *MI = BuildMI(MF, DL, MCID, IsIndirect, Reg, Variable, Expr);
2434 BB.insert(I, MI);
2435 return MachineInstrBuilder(MF, MI);
2436}
2437
2440 const DebugLoc &DL, const MCInstrDesc &MCID,
2441 bool IsIndirect,
2442 ArrayRef<MachineOperand> DebugOps,
2443 const MDNode *Variable, const MDNode *Expr) {
2444 MachineFunction &MF = *BB.getParent();
2445 MachineInstr *MI =
2446 BuildMI(MF, DL, MCID, IsIndirect, DebugOps, Variable, Expr);
2447 BB.insert(I, MI);
2448 return MachineInstrBuilder(MF, *MI);
2449}
2450
2451/// Compute the new DIExpression to use with a DBG_VALUE for a spill slot.
2452/// This prepends DW_OP_deref when spilling an indirect DBG_VALUE.
2454 const MachineInstr &MI,
2455 const SmallVectorImpl<const MachineOperand *> &SpilledOperands) {
2456 assert(MI.getDebugVariable()->isValidLocationForIntrinsic(MI.getDebugLoc()) &&
2457 "Expected inlined-at fields to agree");
2458
2459 const DIExpression *Expr = MI.getDebugExpression();
2460 if (MI.isIndirectDebugValue()) {
2461 assert(MI.getDebugOffset().getImm() == 0 &&
2462 "DBG_VALUE with nonzero offset");
2464 } else if (MI.isDebugValueList()) {
2465 // We will replace the spilled register with a frame index, so
2466 // immediately deref all references to the spilled register.
2467 std::array<uint64_t, 1> Ops{{dwarf::DW_OP_deref}};
2468 for (const MachineOperand *Op : SpilledOperands) {
2469 unsigned OpIdx = MI.getDebugOperandIndex(Op);
2470 Expr = DIExpression::appendOpsToArg(Expr, Ops, OpIdx);
2471 }
2472 }
2473 return Expr;
2474}
2476 Register SpillReg) {
2477 assert(MI.hasDebugOperandForReg(SpillReg) && "Spill Reg is not used in MI.");
2479 llvm::make_pointer_range(MI.getDebugOperandsForReg(SpillReg)));
2480 return computeExprForSpill(MI, SpillOperands);
2481}
2482
2485 const MachineInstr &Orig,
2486 int FrameIndex, Register SpillReg) {
2487 assert(!Orig.isDebugRef() &&
2488 "DBG_INSTR_REF should not reference a virtual register.");
2489 const DIExpression *Expr = computeExprForSpill(Orig, SpillReg);
2490 MachineInstrBuilder NewMI =
2491 BuildMI(BB, I, Orig.getDebugLoc(), Orig.getDesc());
2492 // Non-Variadic Operands: Location, Offset, Variable, Expression
2493 // Variadic Operands: Variable, Expression, Locations...
2494 if (Orig.isNonListDebugValue())
2495 NewMI.addFrameIndex(FrameIndex).addImm(0U);
2496 NewMI.addMetadata(Orig.getDebugVariable()).addMetadata(Expr);
2497 if (Orig.isDebugValueList()) {
2498 for (const MachineOperand &Op : Orig.debug_operands())
2499 if (Op.isReg() && Op.getReg() == SpillReg)
2500 NewMI.addFrameIndex(FrameIndex);
2501 else
2502 NewMI.add(MachineOperand(Op));
2503 }
2504 return NewMI;
2505}
2508 const MachineInstr &Orig, int FrameIndex,
2509 const SmallVectorImpl<const MachineOperand *> &SpilledOperands) {
2510 const DIExpression *Expr = computeExprForSpill(Orig, SpilledOperands);
2511 MachineInstrBuilder NewMI =
2512 BuildMI(BB, I, Orig.getDebugLoc(), Orig.getDesc());
2513 // Non-Variadic Operands: Location, Offset, Variable, Expression
2514 // Variadic Operands: Variable, Expression, Locations...
2515 if (Orig.isNonListDebugValue())
2516 NewMI.addFrameIndex(FrameIndex).addImm(0U);
2517 NewMI.addMetadata(Orig.getDebugVariable()).addMetadata(Expr);
2518 if (Orig.isDebugValueList()) {
2519 for (const MachineOperand &Op : Orig.debug_operands())
2520 if (is_contained(SpilledOperands, &Op))
2521 NewMI.addFrameIndex(FrameIndex);
2522 else
2523 NewMI.add(MachineOperand(Op));
2524 }
2525 return NewMI;
2526}
2527
2529 Register Reg) {
2530 const DIExpression *Expr = computeExprForSpill(Orig, Reg);
2531 if (Orig.isNonListDebugValue())
2533 for (MachineOperand &Op : Orig.getDebugOperandsForReg(Reg))
2534 Op.ChangeToFrameIndex(FrameIndex);
2535 Orig.getDebugExpressionOp().setMetadata(Expr);
2536}
2537
2540 MachineInstr &MI = *this;
2541 if (!MI.getOperand(0).isReg())
2542 return;
2543
2545 for (MachineBasicBlock::iterator DE = MI.getParent()->end();
2546 DI != DE; ++DI) {
2547 if (!DI->isDebugValue())
2548 return;
2549 if (DI->hasDebugOperandForReg(MI.getOperand(0).getReg()))
2550 DbgValues.push_back(&*DI);
2551 }
2552}
2553
2555 // Collect matching debug values.
2557
2558 if (!getOperand(0).isReg())
2559 return;
2560
2561 Register DefReg = getOperand(0).getReg();
2562 auto *MRI = getRegInfo();
2563 for (auto &MO : MRI->use_operands(DefReg)) {
2564 auto *DI = MO.getParent();
2565 if (!DI->isDebugValue())
2566 continue;
2567 if (DI->hasDebugOperandForReg(DefReg)) {
2568 DbgValues.push_back(DI);
2569 }
2570 }
2571
2572 // Propagate Reg to debug value instructions.
2573 for (auto *DBI : DbgValues)
2574 for (MachineOperand &Op : DBI->getDebugOperandsForReg(DefReg))
2575 Op.setReg(Reg);
2576}
2577
2579
2581 const MachineFrameInfo &MFI) {
2582 std::optional<TypeSize> Size;
2583 for (const auto *A : Accesses) {
2584 if (MFI.isSpillSlotObjectIndex(
2585 cast<FixedStackPseudoSourceValue>(A->getPseudoValue())
2586 ->getFrameIndex())) {
2587 LocationSize S = A->getSize();
2588 if (!S.hasValue())
2590 if (!Size)
2591 Size = S.getValue();
2592 else
2593 Size = *Size + S.getValue();
2594 }
2595 }
2596 if (!Size)
2597 return LocationSize::precise(0);
2598 return LocationSize::precise(*Size);
2599}
2600
2601std::optional<LocationSize>
2603 int FI;
2604 if (TII->isStoreToStackSlotPostFE(*this, FI)) {
2605 const MachineFrameInfo &MFI = getMF()->getFrameInfo();
2606 if (MFI.isSpillSlotObjectIndex(FI))
2607 return (*memoperands_begin())->getSize();
2608 }
2609 return std::nullopt;
2610}
2611
2612std::optional<LocationSize>
2615 if (TII->hasStoreToStackSlot(*this, Accesses))
2616 return getSpillSlotSize(Accesses, getMF()->getFrameInfo());
2617 return std::nullopt;
2618}
2619
2620std::optional<LocationSize>
2622 int FI;
2623 if (TII->isLoadFromStackSlotPostFE(*this, FI)) {
2624 const MachineFrameInfo &MFI = getMF()->getFrameInfo();
2625 if (MFI.isSpillSlotObjectIndex(FI))
2626 return (*memoperands_begin())->getSize();
2627 }
2628 return std::nullopt;
2629}
2630
2631std::optional<LocationSize>
2634 if (TII->hasLoadFromStackSlot(*this, Accesses))
2635 return getSpillSlotSize(Accesses, getMF()->getFrameInfo());
2636 return std::nullopt;
2637}
2638
2640 if (DebugInstrNum == 0)
2641 DebugInstrNum = getParent()->getParent()->getNewDebugInstrNum();
2642 return DebugInstrNum;
2643}
2644
2646 if (DebugInstrNum == 0)
2647 DebugInstrNum = MF.getNewDebugInstrNum();
2648 return DebugInstrNum;
2649}
2650
2651std::tuple<LLT, LLT> MachineInstr::getFirst2LLTs() const {
2652 return std::tuple(getRegInfo()->getType(getOperand(0).getReg()),
2653 getRegInfo()->getType(getOperand(1).getReg()));
2654}
2655
2656std::tuple<LLT, LLT, LLT> MachineInstr::getFirst3LLTs() const {
2657 return std::tuple(getRegInfo()->getType(getOperand(0).getReg()),
2658 getRegInfo()->getType(getOperand(1).getReg()),
2659 getRegInfo()->getType(getOperand(2).getReg()));
2660}
2661
2662std::tuple<LLT, LLT, LLT, LLT> MachineInstr::getFirst4LLTs() const {
2663 return std::tuple(getRegInfo()->getType(getOperand(0).getReg()),
2664 getRegInfo()->getType(getOperand(1).getReg()),
2665 getRegInfo()->getType(getOperand(2).getReg()),
2666 getRegInfo()->getType(getOperand(3).getReg()));
2667}
2668
2669std::tuple<LLT, LLT, LLT, LLT, LLT> MachineInstr::getFirst5LLTs() const {
2670 return std::tuple(getRegInfo()->getType(getOperand(0).getReg()),
2671 getRegInfo()->getType(getOperand(1).getReg()),
2672 getRegInfo()->getType(getOperand(2).getReg()),
2673 getRegInfo()->getType(getOperand(3).getReg()),
2674 getRegInfo()->getType(getOperand(4).getReg()));
2675}
2676
2677std::tuple<Register, LLT, Register, LLT>
2679 Register Reg0 = getOperand(0).getReg();
2680 Register Reg1 = getOperand(1).getReg();
2681 return std::tuple(Reg0, getRegInfo()->getType(Reg0), Reg1,
2682 getRegInfo()->getType(Reg1));
2683}
2684
2685std::tuple<Register, LLT, Register, LLT, Register, LLT>
2687 Register Reg0 = getOperand(0).getReg();
2688 Register Reg1 = getOperand(1).getReg();
2689 Register Reg2 = getOperand(2).getReg();
2690 return std::tuple(Reg0, getRegInfo()->getType(Reg0), Reg1,
2691 getRegInfo()->getType(Reg1), Reg2,
2692 getRegInfo()->getType(Reg2));
2693}
2694
2695std::tuple<Register, LLT, Register, LLT, Register, LLT, Register, LLT>
2697 Register Reg0 = getOperand(0).getReg();
2698 Register Reg1 = getOperand(1).getReg();
2699 Register Reg2 = getOperand(2).getReg();
2700 Register Reg3 = getOperand(3).getReg();
2701 return std::tuple(
2702 Reg0, getRegInfo()->getType(Reg0), Reg1, getRegInfo()->getType(Reg1),
2703 Reg2, getRegInfo()->getType(Reg2), Reg3, getRegInfo()->getType(Reg3));
2704}
2705
2707 LLT>
2709 Register Reg0 = getOperand(0).getReg();
2710 Register Reg1 = getOperand(1).getReg();
2711 Register Reg2 = getOperand(2).getReg();
2712 Register Reg3 = getOperand(3).getReg();
2713 Register Reg4 = getOperand(4).getReg();
2714 return std::tuple(
2715 Reg0, getRegInfo()->getType(Reg0), Reg1, getRegInfo()->getType(Reg1),
2716 Reg2, getRegInfo()->getType(Reg2), Reg3, getRegInfo()->getType(Reg3),
2717 Reg4, getRegInfo()->getType(Reg4));
2718}
2719
2722 assert(InsertBefore != nullptr && "invalid iterator");
2723 assert(InsertBefore->getParent() == this &&
2724 "iterator points to operand of other inst");
2725 if (Ops.empty())
2726 return;
2727
2728 // Do one pass to untie operands.
2730 for (const MachineOperand &MO : operands()) {
2731 if (MO.isReg() && MO.isTied()) {
2732 unsigned OpNo = getOperandNo(&MO);
2733 unsigned TiedTo = findTiedOperandIdx(OpNo);
2734 TiedOpIndices[OpNo] = TiedTo;
2735 untieRegOperand(OpNo);
2736 }
2737 }
2738
2739 unsigned OpIdx = getOperandNo(InsertBefore);
2740 unsigned NumOperands = getNumOperands();
2741 unsigned OpsToMove = NumOperands - OpIdx;
2742
2744 MovingOps.reserve(OpsToMove);
2745
2746 for (unsigned I = 0; I < OpsToMove; ++I) {
2747 MovingOps.emplace_back(getOperand(OpIdx));
2749 }
2750 for (const MachineOperand &MO : Ops)
2751 addOperand(MO);
2752 for (const MachineOperand &OpMoved : MovingOps)
2753 addOperand(OpMoved);
2754
2755 // Re-tie operands.
2756 for (auto [Tie1, Tie2] : TiedOpIndices) {
2757 if (Tie1 >= OpIdx)
2758 Tie1 += Ops.size();
2759 if (Tie2 >= OpIdx)
2760 Tie2 += Ops.size();
2761 tieOperands(Tie1, Tie2);
2762 }
2763}
2764
2765bool MachineInstr::mayFoldInlineAsmRegOp(unsigned OpId) const {
2766 assert(OpId && "expected non-zero operand id");
2767 assert(isInlineAsm() && "should only be used on inline asm");
2768
2769 if (!getOperand(OpId).isReg())
2770 return false;
2771
2772 const MachineOperand &MD = getOperand(OpId - 1);
2773 if (!MD.isImm())
2774 return false;
2775
2776 InlineAsm::Flag F(MD.getImm());
2777 if (F.isRegUseKind() || F.isRegDefKind() || F.isRegDefEarlyClobberKind())
2778 return F.getRegMayBeFolded();
2779 return false;
2780}
2781
2783 assert(isPHI());
2784
2785 // Phi might have multiple entries for MBB. Need to remove them all.
2786 unsigned RemovedCount = 0;
2787 for (unsigned N = getNumOperands(); N > 2; N -= 2) {
2788 if (getOperand(N - 1).getMBB() == &MBB) {
2789 removeOperand(N - 1);
2790 removeOperand(N - 2);
2791 RemovedCount += 2;
2792 }
2793 }
2794 return RemovedCount;
2795}
unsigned const MachineRegisterInfo * MRI
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
MachineBasicBlock & MBB
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")
Analysis containing CSE Info
Definition CSEInfo.cpp:27
#define LLVM_DUMP_METHOD
Mark debug helper function definitions like dump() that should not be stripped from debug builds.
Definition Compiler.h:646
This file contains the declarations for the subclasses of Constant, which represent the different fla...
DXIL Forward Handle Accesses
Hexagon Common GEP
const HexagonInstrInfo * TII
IRTranslator LLVM IR MI
Module.h This file contains the declarations for the Module class.
const size_t AbstractManglingParser< Derived, Alloc >::NumOps
const AbstractManglingParser< Derived, Alloc >::OperatorInfo AbstractManglingParser< Derived, Alloc >::Ops[]
A set of register units.
Implement a low-level type suitable for MachineInstr level instruction selection.
#define F(x, y, z)
Definition MD5.cpp:54
#define I(x, y, z)
Definition MD5.cpp:57
static DebugLoc getDebugLoc(MachineBasicBlock::instr_iterator FirstMI, MachineBasicBlock::instr_iterator LastMI)
Return the first DebugLoc that has line number information, given a range of instructions.
const unsigned TiedMax
static void moveOperands(MachineOperand *Dst, MachineOperand *Src, unsigned NumOps, MachineRegisterInfo *MRI)
Move NumOps MachineOperands from Src to Dst, with support for overlapping ranges.
static cl::opt< bool > PrintMIAddrs("print-mi-addrs", cl::Hidden, cl::desc("Print addresses of MachineInstrs when dumping"))
static LocationSize getSpillSlotSize(const MMOList &Accesses, const MachineFrameInfo &MFI)
static const DIExpression * computeExprForSpill(const MachineInstr &MI, const SmallVectorImpl< const MachineOperand * > &SpilledOperands)
Compute the new DIExpression to use with a DBG_VALUE for a spill slot.
static bool MemOperandsHaveAlias(const MachineFrameInfo &MFI, BatchAAResults *AA, bool UseTBAA, const MachineMemOperand *MMOa, const MachineMemOperand *MMOb)
static iterator_range< filter_iterator< Operand *, std::function< bool(Operand &Op)> > > getDebugOperandsForRegHelper(Instruction *MI, Register Reg)
SmallVector< const MachineMemOperand *, 2 > MMOList
static void tryToGetTargetInfo(const MachineInstr &MI, const TargetRegisterInfo *&TRI, const MachineRegisterInfo *&MRI, const TargetInstrInfo *&TII)
static const MachineFunction * getMFIfAvailable(const MachineInstr &MI)
static bool hasIdenticalMMOs(ArrayRef< MachineMemOperand * > LHS, ArrayRef< MachineMemOperand * > RHS)
Check to see if the MMOs pointed to by the two MemRefs arrays are identical.
Register Reg
Register const TargetRegisterInfo * TRI
This file provides utility analysis objects describing memory locations.
This file contains the declarations for metadata subclasses.
static MCRegister getReg(const MCDisassembler *D, unsigned RC, unsigned RegNo)
static bool isReg(const MCInst &MI, unsigned OpNo)
MachineInstr unsigned OpIdx
static bool isValid(const char C)
Returns true if C is a valid mangled character: <0-9a-zA-Z_>.
This file contains some templates that are useful if you are working with the STL at all.
static cl::opt< bool > UseTBAA("use-tbaa-in-sched-mi", cl::Hidden, cl::init(true), cl::desc("Enable use of TBAA during MI DAG construction"))
This file implements the SmallBitVector class.
This file defines the SmallVector class.
static SymbolRef::Type getType(const Symbol *Sym)
Definition TapiFile.cpp:39
static std::optional< unsigned > getOpcode(ArrayRef< VPValue * > Values)
Returns the opcode of Values or ~0 if they do not all agree.
Definition VPlanSLP.cpp:247
Value * RHS
Value * LHS
Capacity getNext() const
Get the next larger capacity.
size_t getSize() const
Get the number of elements in an array with this capacity.
static Capacity get(size_t N)
Get the capacity of an array that can hold at least N elements.
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
Definition ArrayRef.h:40
size_t size() const
size - Get the array size.
Definition ArrayRef.h:142
bool empty() const
empty - Check if the array is empty.
Definition ArrayRef.h:137
ArrayRef< T > slice(size_t N, size_t M) const
slice(n, m) - Chop off the first N elements of the array, and keep M elements in the array.
Definition ArrayRef.h:186
This class is a wrapper over an AAResults, and it is intended to be used only when there are no IR ch...
ConstMIBundleOperands - Iterate over all operands in a const bundle of machine instructions.
DWARF expression.
LLVM_ABI bool isEntryValue() const
Check if the expression consists of exactly one entry value operand.
static LLVM_ABI bool isEqualExpression(const DIExpression *FirstExpr, bool FirstIndirect, const DIExpression *SecondExpr, bool SecondIndirect)
Determines whether two debug values should produce equivalent DWARF expressions, using their DIExpres...
static LLVM_ABI DIExpression * appendOpsToArg(const DIExpression *Expr, ArrayRef< uint64_t > Ops, unsigned ArgNo, bool StackValue=false)
Create a copy of Expr by appending the given list of Ops to each instance of the operand DW_OP_LLVM_a...
static LLVM_ABI DIExpression * prepend(const DIExpression *Expr, uint8_t Flags, int64_t Offset=0)
Prepend DIExpr with a deref and offset operation and optionally turn it into a stack value or/and an ...
A debug info location.
Definition DebugLoc.h:123
bool hasTrivialDestructor() const
Check whether this has a trivial destructor.
Definition DebugLoc.h:243
Diagnostic information for inline asm reporting.
Utility class for floating point operations which can have information about relaxed accuracy require...
Definition Operator.h:200
Convenience struct for specifying and reasoning about fast-math flags.
Definition FMF.h:22
LLVMContext & getContext() const
getContext - Return a reference to the LLVMContext associated with this function.
Definition Function.cpp:359
an instruction for type-safe pointer arithmetic to access elements of arrays and structs
This instruction compares its operands according to the predicate given to the constructor.
static StringRef getMemConstraintName(ConstraintCode C)
Definition InlineAsm.h:470
constexpr bool isValid() const
This is an important class for using LLVM in a threaded context.
Definition LLVMContext.h:68
LLVM_ABI void diagnose(const DiagnosticInfo &DI)
Report a message to the currently installed diagnostic handler.
A set of physical registers with utility functions to track liveness when walking backward/forward th...
bool available(const MachineRegisterInfo &MRI, MCRegister Reg) const
Returns true if register Reg and no aliasing register is in the set.
A set of register units used to track register liveness.
bool hasValue() const
static LocationSize precise(uint64_t Value)
static constexpr LocationSize beforeOrAfterPointer()
Any location before or after the base pointer (but still within the underlying object).
bool isScalable() const
TypeSize getValue() const
Describe properties that are true of each instruction in the target description file.
ArrayRef< MCOperandInfo > operands() const
unsigned short Opcode
MCRegAliasIterator enumerates all registers aliasing Reg.
Wrapper class representing physical registers. Should be passed by value.
Definition MCRegister.h:41
MCSymbol - Instances of this class represent a symbol name in the MC file, and MCSymbols are created ...
Definition MCSymbol.h:42
Metadata node.
Definition Metadata.h:1078
const MDOperand & getOperand(unsigned I) const
Definition Metadata.h:1442
unsigned getNumOperands() const
Return number of MDNode operands.
Definition Metadata.h:1448
bool isValid() const
isValid - Returns true until all the operands have been visited.
LLVM_ABI MachineInstr * remove_instr(MachineInstr *I)
Remove the possibly bundled instruction from the instruction list without deleting it.
LLVM_ABI instr_iterator insert(instr_iterator I, MachineInstr *M)
Insert MI into the instruction list before I, possibly inside a bundle.
instr_iterator erase_instr(MachineInstr *I)
Remove an instruction from the instruction list and delete it.
MachineInstr * remove(MachineInstr *I)
Remove the unbundled instruction from the instruction list without deleting it.
Instructions::iterator instr_iterator
Instructions::const_iterator const_instr_iterator
const MachineFunction * getParent() const
Return the MachineFunction containing this basic block.
LLVM_ABI instr_iterator erase(instr_iterator I)
Remove an instruction from the instruction list and delete it.
void splice(iterator Where, MachineBasicBlock *Other, iterator From)
Take an instruction from MBB 'Other' at the position From, and insert it into this MBB right before '...
MachineInstrBundleIterator< MachineInstr > iterator
The MachineFrameInfo class represents an abstract stack frame until prolog/epilog code is inserted.
bool isSpillSlotObjectIndex(int ObjectIdx) const
Returns true if the specified index corresponds to a spill slot.
CalledGlobalInfo tryGetCalledGlobal(const MachineInstr *MI) const
Tries to get the global and target flags for a call site, if the instruction is a call to a global.
const TargetSubtargetInfo & getSubtarget() const
getSubtarget - Return the subtarget for which this machine code is being compiled.
MachineInstr::ExtraInfo * createMIExtraInfo(ArrayRef< MachineMemOperand * > MMOs, MCSymbol *PreInstrSymbol=nullptr, MCSymbol *PostInstrSymbol=nullptr, MDNode *HeapAllocMarker=nullptr, MDNode *PCSections=nullptr, uint32_t CFIType=0, MDNode *MMRAs=nullptr, Value *DS=nullptr)
Allocate and construct an extra info structure for a MachineInstr.
MachineFrameInfo & getFrameInfo()
getFrameInfo - Return the frame info object for the current function.
void deallocateOperandArray(OperandCapacity Cap, MachineOperand *Array)
Dellocate an array of MachineOperands and recycle the memory.
MachineOperand * allocateOperandArray(OperandCapacity Cap)
Allocate an array of MachineOperands.
void handleChangeDesc(MachineInstr &MI, const MCInstrDesc &TID)
Function & getFunction()
Return the LLVM function that this machine code represents.
const MachineInstrBuilder & addImm(int64_t Val) const
Add a new immediate operand.
const MachineInstrBuilder & add(const MachineOperand &MO) const
const MachineInstrBuilder & addMetadata(const MDNode *MD) const
const MachineInstrBuilder & addFrameIndex(int Idx) const
const MachineInstrBuilder & addReg(Register RegNo, unsigned flags=0, unsigned SubReg=0) const
Add a new virtual register operand.
Representation of each machine instruction.
bool mayRaiseFPException() const
Return true if this instruction could possibly raise a floating-point exception.
unsigned getOpcode() const
Returns the opcode of this MachineInstr.
LLVM_ABI void setRegisterDefReadUndef(Register Reg, bool IsUndef=true)
Mark all subregister defs of register Reg with the undef flag.
bool isDebugValueList() const
LLVM_ABI void bundleWithPred()
Bundle this instruction with its predecessor.
bool isPosition() const
bool isTerminator(QueryType Type=AnyInBundle) const
Returns true if this instruction part of the terminator for a basic block.
LLVM_ABI std::tuple< Register, LLT, Register, LLT, Register, LLT, Register, LLT, Register, LLT > getFirst5RegLLTs() const
LLVM_ABI iterator_range< filter_iterator< const MachineOperand *, std::function< bool(const MachineOperand &Op)> > > getDebugOperandsForReg(Register Reg) const
Returns a range of all of the operands that correspond to a debug use of Reg.
mop_range debug_operands()
Returns all operands that are used to determine the variable location for this DBG_VALUE instruction.
bool mayLoadOrStore(QueryType Type=AnyInBundle) const
Return true if this instruction could possibly read or modify memory.
LLVM_ABI void setCFIType(MachineFunction &MF, uint32_t Type)
Set the CFI type for the instruction.
LLVM_ABI MachineInstr * removeFromParent()
Unlink 'this' from the containing basic block, and return it without deleting it.
const MachineBasicBlock * getParent() const
MDNode * getMMRAMetadata() const
Helper to extract mmra.op metadata.
LLVM_ABI void bundleWithSucc()
Bundle this instruction with its successor.
uint32_t getCFIType() const
Helper to extract a CFI type hash if one has been added.
bool isDebugLabel() const
LLVM_ABI void setPreInstrSymbol(MachineFunction &MF, MCSymbol *Symbol)
Set a symbol that will be emitted just prior to the instruction itself.
bool hasProperty(unsigned MCFlag, QueryType Type=AnyInBundle) const
Return true if the instruction (or in the case of a bundle, the instructions inside the bundle) has t...
LLVM_ABI bool isDereferenceableInvariantLoad() const
Return true if this load instruction never traps and points to a memory location whose value doesn't ...
void setFlags(unsigned flags)
QueryType
API for querying MachineInstr properties.
LLVM_ABI void addImplicitDefUseOperands(MachineFunction &MF)
Add all implicit def and use operands to this instruction.
filtered_mop_range all_defs()
Returns an iterator range over all operands that are (explicit or implicit) register defs.
LLVM_ABI std::tuple< LLT, LLT, LLT, LLT, LLT > getFirst5LLTs() const
bool isCall(QueryType Type=AnyInBundle) const
LLVM_ABI std::tuple< Register, LLT, Register, LLT, Register, LLT > getFirst3RegLLTs() const
bool getFlag(MIFlag Flag) const
Return whether an MI flag is set.
LLVM_ABI uint32_t mergeFlagsWith(const MachineInstr &Other) const
Return the MIFlags which represent both MachineInstrs.
LLVM_ABI const MachineOperand & getDebugExpressionOp() const
Return the operand for the complex address expression referenced by this DBG_VALUE instruction.
LLVM_ABI std::pair< bool, bool > readsWritesVirtualRegister(Register Reg, SmallVectorImpl< unsigned > *Ops=nullptr) const
Return a pair of bools (reads, writes) indicating if this instruction reads or writes Reg.
LLVM_ABI Register isConstantValuePHI() const
If the specified instruction is a PHI that always merges together the same virtual register,...
bool isRegTiedToDefOperand(unsigned UseOpIdx, unsigned *DefOpIdx=nullptr) const
Return true if the use operand of the specified index is tied to a def operand.
LLVM_ABI bool allImplicitDefsAreDead() const
Return true if all the implicit defs of this instruction are dead.
LLVM_ABI void cloneMemRefs(MachineFunction &MF, const MachineInstr &MI)
Clone another MachineInstr's memory reference descriptor list and replace ours with it.
LLVM_ABI const TargetRegisterClass * getRegClassConstraintEffectForVReg(Register Reg, const TargetRegisterClass *CurRC, const TargetInstrInfo *TII, const TargetRegisterInfo *TRI, bool ExploreBundle=false) const
Applies the constraints (def/use) implied by this MI on Reg to the given CurRC.
LLVM_ABI bool isSafeToMove(bool &SawStore) const
Return true if it is safe to move this instruction.
LLVM_ABI bool mayAlias(BatchAAResults *AA, const MachineInstr &Other, bool UseTBAA) const
Returns true if this instruction's memory access aliases the memory access of Other.
bool isBundle() const
bool isDebugInstr() const
unsigned getNumDebugOperands() const
Returns the total number of operands which are debug locations.
unsigned getNumOperands() const
Retuns the total number of operands.
LLVM_ABI void addOperand(MachineFunction &MF, const MachineOperand &Op)
Add the specified operand to the instruction.
LLVM_ABI MachineInstr * removeFromBundle()
Unlink this instruction from its basic block and return it without deleting it.
LLVM_ABI void dumpr(const MachineRegisterInfo &MRI, unsigned MaxDepth=UINT_MAX) const
Print on dbgs() the current instruction and the instructions defining its operands and so on until we...
LLVM_ABI void copyIRFlags(const Instruction &I)
Copy all flags to MachineInst MIFlags.
bool isDebugValueLike() const
bool isInlineAsm() const
bool memoperands_empty() const
Return true if we don't have any memory operands which described the memory access done by this instr...
mmo_iterator memoperands_end() const
Access to memory operands of the instruction.
bool isDebugRef() const
LLVM_ABI void collectDebugValues(SmallVectorImpl< MachineInstr * > &DbgValues)
Scan instructions immediately following MI and collect any matching DBG_VALUEs.
LLVM_ABI std::optional< LocationSize > getRestoreSize(const TargetInstrInfo *TII) const
Return a valid size if the instruction is a restore instruction.
unsigned getOperandNo(const_mop_iterator I) const
Returns the number of the operand iterator I points to.
LLVM_ABI unsigned getNumExplicitOperands() const
Returns the number of non-implicit operands.
mop_range implicit_operands()
LLVM_ABI void setMemRefs(MachineFunction &MF, ArrayRef< MachineMemOperand * > MemRefs)
Assign this MachineInstr's memory reference descriptor list.
LLVM_ABI bool wouldBeTriviallyDead() const
Return true if this instruction would be trivially dead if all of its defined registers were dead.
bool isBundledWithPred() const
Return true if this instruction is part of a bundle, and it is not the first instruction in the bundl...
LLVM_ABI std::tuple< LLT, LLT > getFirst2LLTs() const
LLVM_ABI std::optional< LocationSize > getFoldedSpillSize(const TargetInstrInfo *TII) const
Return a valid size if the instruction is a folded spill instruction.
LLVM_ABI void unbundleFromPred()
Break bundle above this instruction.
LLVM_ABI void copyImplicitOps(MachineFunction &MF, const MachineInstr &MI)
Copy implicit register operands from specified instruction to this instruction.
bool mayLoad(QueryType Type=AnyInBundle) const
Return true if this instruction could possibly read memory.
LLVM_ABI bool isStackAligningInlineAsm() const
LLVM_ABI void dropMemRefs(MachineFunction &MF)
Clear this MachineInstr's memory reference descriptor list.
LLVM_ABI int findRegisterUseOperandIdx(Register Reg, const TargetRegisterInfo *TRI, bool isKill=false) const
Returns the operand index that is a use of the specific register or -1 if it is not found.
MDNode * getPCSections() const
Helper to extract PCSections metadata target sections.
bool isCFIInstruction() const
LLVM_ABI int findFirstPredOperandIdx() const
Find the index of the first operand in the operand list that is used to represent the predicate.
const MCInstrDesc & getDesc() const
Returns the target instruction descriptor of this MachineInstr.
LLVM_ABI unsigned getBundleSize() const
Return the number of instructions inside the MI bundle, excluding the bundle header.
LLVM_ABI void cloneMergedMemRefs(MachineFunction &MF, ArrayRef< const MachineInstr * > MIs)
Clone the merge of multiple MachineInstrs' memory reference descriptors list and replace ours with it...
mop_range operands()
LLVM_ABI bool isCandidateForAdditionalCallInfo(QueryType Type=IgnoreBundle) const
Return true if this is a call instruction that may have an additional information associated with it.
LLVM_ABI std::tuple< Register, LLT, Register, LLT, Register, LLT, Register, LLT > getFirst4RegLLTs() const
LLVM_ABI std::tuple< Register, LLT, Register, LLT > getFirst2RegLLTs() const
unsigned getNumMemOperands() const
Return the number of memory operands.
void clearFlag(MIFlag Flag)
clearFlag - Clear a MI flag.
LLVM_ABI std::optional< LocationSize > getFoldedRestoreSize(const TargetInstrInfo *TII) const
Return a valid size if the instruction is a folded restore instruction.
LLVM_ABI const TargetRegisterClass * getRegClassConstraintEffect(unsigned OpIdx, const TargetRegisterClass *CurRC, const TargetInstrInfo *TII, const TargetRegisterInfo *TRI) const
Applies the constraints (def/use) implied by the OpIdx operand to the given CurRC.
bool isOperandSubregIdx(unsigned OpIdx) const
Return true if operand OpIdx is a subregister index.
LLVM_ABI InlineAsm::AsmDialect getInlineAsmDialect() const
LLVM_ABI bool hasUnmodeledSideEffects() const
Return true if this instruction has side effects that are not modeled by mayLoad / mayStore,...
LLVM_ABI bool isEquivalentDbgInstr(const MachineInstr &Other) const
Returns true if this instruction is a debug instruction that represents an identical debug value to O...
LLVM_ABI const DILabel * getDebugLabel() const
Return the debug label referenced by this DBG_LABEL instruction.
void untieRegOperand(unsigned OpIdx)
Break any tie involving OpIdx.
static LLVM_ABI uint32_t copyFlagsFromInstruction(const Instruction &I)
LLVM_ABI unsigned removePHIIncomingValueFor(const MachineBasicBlock &MBB)
Remove all incoming values of Phi instruction for the given block.
LLVM_ABI void insert(mop_iterator InsertBefore, ArrayRef< MachineOperand > Ops)
Inserts Ops BEFORE It. Can untie/retie tied operands.
LLVM_ABI void setDesc(const MCInstrDesc &TID)
Replace the instruction descriptor (thus opcode) of the current instruction with a new one.
bool isJumpTableDebugInfo() const
LLVM_ABI unsigned getNumExplicitDefs() const
Returns the number of non-implicit definitions.
LLVM_ABI void eraseFromBundle()
Unlink 'this' from its basic block and delete it.
LLVM_ABI void setHeapAllocMarker(MachineFunction &MF, MDNode *MD)
Set a marker on instructions that denotes where we should create and emit heap alloc site labels.
LLVM_ABI const DILocalVariable * getDebugVariable() const
Return the debug variable referenced by this DBG_VALUE instruction.
LLVM_ABI bool hasComplexRegisterTies() const
Return true when an instruction has tied register that can't be determined by the instruction's descr...
LLVM_ABI LLT getTypeToPrint(unsigned OpIdx, SmallBitVector &PrintedTypes, const MachineRegisterInfo &MRI) const
Debugging supportDetermine the generic type to be printed (if needed) on uses and defs.
bool isLifetimeMarker() const
LLVM_ABI void substituteRegister(Register FromReg, Register ToReg, unsigned SubIdx, const TargetRegisterInfo &RegInfo)
Replace all occurrences of FromReg with ToReg:SubIdx, properly composing subreg indices where necessa...
LLVM_ABI unsigned findTiedOperandIdx(unsigned OpIdx) const
Given the index of a tied register operand, find the operand it is tied to.
LLVM_ABI void tieOperands(unsigned DefIdx, unsigned UseIdx)
Add a tie between the register operands at DefIdx and UseIdx.
mmo_iterator memoperands_begin() const
Access to memory operands of the instruction.
LLVM_ABI void cloneInstrSymbols(MachineFunction &MF, const MachineInstr &MI)
Clone another MachineInstr's pre- and post- instruction symbols and replace ours with it.
LLVM_ABI void changeDebugValuesDefReg(Register Reg)
Find all DBG_VALUEs that point to the register def in this instruction and point them to Reg instead.
LLVM_ABI bool isIdenticalTo(const MachineInstr &Other, MICheckType Check=CheckDefs) const
Return true if this instruction is identical to Other.
LLVM_ABI bool hasOrderedMemoryRef() const
Return true if this instruction may have an ordered or volatile memory reference, or if the informati...
LLVM_ABI void emitGenericError(const Twine &ErrMsg) const
LLVM_ABI const MachineFunction * getMF() const
Return the function that contains the basic block that this instruction belongs to.
LLVM_ABI const DIExpression * getDebugExpression() const
Return the complex address expression referenced by this DBG_VALUE instruction.
ArrayRef< MachineMemOperand * > memoperands() const
Access to memory operands of the instruction.
LLVM_ABI void print(raw_ostream &OS, bool IsStandalone=true, bool SkipOpers=false, bool SkipDebugLoc=false, bool AddNewLine=true, const TargetInstrInfo *TII=nullptr) const
Print this MI to OS.
bool isNonListDebugValue() const
MachineOperand * mop_iterator
iterator/begin/end - Iterate over all operands of a machine instruction.
LLVM_ABI bool isLoadFoldBarrier() const
Returns true if it is illegal to fold a load across this instruction.
bool mayStore(QueryType Type=AnyInBundle) const
Return true if this instruction could possibly modify memory.
void setFlag(MIFlag Flag)
Set a MI flag.
const DebugLoc & getDebugLoc() const
Returns the debug location id of this MachineInstr.
LLVM_ABI bool isDead(const MachineRegisterInfo &MRI, LiveRegUnits *LivePhysRegs=nullptr) const
Check whether an MI is dead.
LLVM_ABI std::tuple< LLT, LLT, LLT > getFirst3LLTs() const
LLVM_ABI const MachineOperand & getDebugVariableOp() const
Return the operand for the debug variable referenced by this DBG_VALUE instruction.
LLVM_ABI void eraseFromParent()
Unlink 'this' from the containing basic block and delete it.
LLVM_ABI void setPhysRegsDeadExcept(ArrayRef< Register > UsedRegs, const TargetRegisterInfo &TRI)
Mark every physreg used by this instruction as dead except those in the UsedRegs list.
LLVM_ABI void removeOperand(unsigned OpNo)
Erase an operand from an instruction, leaving it with one fewer operand than it started with.
friend class MachineFunction
MCSymbol * getPreInstrSymbol() const
Helper to extract a pre-instruction symbol if one has been added.
LLVM_ABI bool addRegisterKilled(Register IncomingReg, const TargetRegisterInfo *RegInfo, bool AddIfNotFound=false)
We have determined MI kills a register.
LLVM_ABI void setPostInstrSymbol(MachineFunction &MF, MCSymbol *Symbol)
Set a symbol that will be emitted just after the instruction itself.
bool isDebugValue() const
LLVM_ABI void dump() const
const MachineOperand & getDebugOffset() const
Return the operand containing the offset to be used if this DBG_VALUE instruction is indirect; will b...
MachineOperand & getDebugOperand(unsigned Index)
LLVM_ABI std::optional< LocationSize > getSpillSize(const TargetInstrInfo *TII) const
Return a valid size if the instruction is a spill instruction.
bool isBundledWithSucc() const
Return true if this instruction is part of a bundle, and it is not the last instruction in the bundle...
LLVM_ABI void addRegisterDefined(Register Reg, const TargetRegisterInfo *RegInfo=nullptr)
We have determined MI defines a register.
MDNode * getHeapAllocMarker() const
Helper to extract a heap alloc marker if one has been added.
LLVM_ABI unsigned getDebugInstrNum()
Fetch the instruction number of this MachineInstr.
LLVM_ABI std::tuple< LLT, LLT, LLT, LLT > getFirst4LLTs() const
LLVM_ABI void clearRegisterDeads(Register Reg)
Clear all dead flags on operands defining register Reg.
LLVM_ABI void clearRegisterKills(Register Reg, const TargetRegisterInfo *RegInfo)
Clear all kill flags affecting Reg.
const MachineOperand & getOperand(unsigned i) const
LLVM_ABI void emitInlineAsmError(const Twine &ErrMsg) const
Emit an error referring to the source location of this instruction.
uint32_t getFlags() const
Return the MI flags bitvector.
bool isPseudoProbe() const
LLVM_ABI bool hasRegisterImplicitUseOperand(Register Reg) const
Returns true if the MachineInstr has an implicit-use operand of exactly the given register (not consi...
LLVM_ABI bool shouldUpdateAdditionalCallInfo() const
Return true if copying, moving, or erasing this instruction requires updating additional call info (s...
LLVM_ABI void setDeactivationSymbol(MachineFunction &MF, Value *DS)
Value * getDeactivationSymbol() const
MCSymbol * getPostInstrSymbol() const
Helper to extract a post-instruction symbol if one has been added.
LLVM_ABI void unbundleFromSucc()
Break bundle below this instruction.
LLVM_ABI void clearKillInfo()
Clears kill flags on all operands.
LLVM_ABI bool isDebugEntryValue() const
A DBG_VALUE is an entry value iff its debug expression contains the DW_OP_LLVM_entry_value operation.
bool isIndirectDebugValue() const
A DBG_VALUE is indirect iff the location operand is a register and the offset operand is an immediate...
unsigned getNumDefs() const
Returns the total number of definitions.
LLVM_ABI void setPCSections(MachineFunction &MF, MDNode *MD)
bool isKill() const
LLVM_ABI const MDNode * getLocCookieMD() const
For inline asm, get the !srcloc metadata node if we have it, and decode the loc cookie from it.
LLVM_ABI int findRegisterDefOperandIdx(Register Reg, const TargetRegisterInfo *TRI, bool isDead=false, bool Overlap=false) const
Returns the operand index that is a def of the specified register or -1 if it is not found.
bool isFakeUse() const
bool isVariadic(QueryType Type=IgnoreBundle) const
Return true if this instruction can have a variable number of operands.
LLVM_ABI int findInlineAsmFlagIdx(unsigned OpIdx, unsigned *GroupNo=nullptr) const
Find the index of the flag word operand that corresponds to operand OpIdx on an inline asm instructio...
LLVM_ABI bool allDefsAreDead() const
Return true if all the defs of this instruction are dead.
LLVM_ABI void setMMRAMetadata(MachineFunction &MF, MDNode *MMRAs)
LLVM_ABI const TargetRegisterClass * getRegClassConstraint(unsigned OpIdx, const TargetInstrInfo *TII, const TargetRegisterInfo *TRI) const
Compute the static register class constraint for operand OpIdx.
LLVM_ABI void moveBefore(MachineInstr *MovePos)
Move the instruction before MovePos.
MachineOperand * findRegisterDefOperand(Register Reg, const TargetRegisterInfo *TRI, bool isDead=false, bool Overlap=false)
Wrapper for findRegisterDefOperandIdx, it returns a pointer to the MachineOperand rather than an inde...
LLVM_ABI void addMemOperand(MachineFunction &MF, MachineMemOperand *MO)
Add a MachineMemOperand to the machine instruction.
LLVM_ABI bool addRegisterDead(Register Reg, const TargetRegisterInfo *RegInfo, bool AddIfNotFound=false)
We have determined MI defined a register without a use.
LLVM_ABI bool mayFoldInlineAsmRegOp(unsigned OpId) const
Returns true if the register operand can be folded with a load or store into a frame index.
A description of a memory reference used in the backend.
LocationSize getSize() const
Return the size in bytes of the memory reference.
const PseudoSourceValue * getPseudoValue() const
bool isUnordered() const
Returns true if this memory operation doesn't have any ordering constraints other than normal aliasin...
AAMDNodes getAAInfo() const
Return the AA tags for the memory reference.
const Value * getValue() const
Return the base address of the memory access.
int64_t getOffset() const
For normal values, this is a byte offset added to the base address.
MachineOperand class - Representation of each machine instruction operand.
unsigned getSubReg() const
LLVM_ABI void substVirtReg(Register Reg, unsigned SubIdx, const TargetRegisterInfo &)
substVirtReg - Substitute the current register with the virtual subregister Reg:SubReg.
static LLVM_ABI void printSubRegIdx(raw_ostream &OS, uint64_t Index, const TargetRegisterInfo *TRI)
Print a subreg index operand.
int64_t getImm() const
bool isReg() const
isReg - Tests if this is a MO_Register operand.
bool isRegMask() const
isRegMask - Tests if this is a MO_RegisterMask operand.
const MDNode * getMetadata() const
void setIsDead(bool Val=true)
void setMetadata(const MDNode *MD)
bool isImm() const
isImm - Tests if this is a MO_Immediate operand.
LLVM_ABI void ChangeToImmediate(int64_t ImmVal, unsigned TargetFlags=0)
ChangeToImmediate - Replace this operand with a new immediate operand of the specified value.
bool isMetadata() const
isMetadata - Tests if this is a MO_Metadata operand.
LLVM_ABI void print(raw_ostream &os, const TargetRegisterInfo *TRI=nullptr) const
Print the MachineOperand to os.
void setIsKill(bool Val=true)
MachineInstr * getParent()
getParent - Return the instruction that this operand belongs to.
LLVM_ABI void substPhysReg(MCRegister Reg, const TargetRegisterInfo &)
substPhysReg - Substitute the current register with the physical register Reg, taking any existing Su...
void setIsEarlyClobber(bool Val=true)
void setIsUndef(bool Val=true)
void setIsDebug(bool Val=true)
Register getReg() const
getReg - Returns the register number.
LLVM_ABI bool isIdenticalTo(const MachineOperand &Other) const
Returns true if this operand is identical to the specified operand except for liveness related flags ...
static bool clobbersPhysReg(const uint32_t *RegMask, MCRegister PhysReg)
clobbersPhysReg - Returns true if this RegMask clobbers PhysReg.
static LLVM_ABI void printSymbol(raw_ostream &OS, MCSymbol &Sym)
Print a MCSymbol as an operand.
static MachineOperand CreateReg(Register Reg, bool isDef, bool isImp=false, bool isKill=false, bool isDead=false, bool isUndef=false, bool isEarlyClobber=false, unsigned SubReg=0, bool isDebug=false, bool isInternalRead=false, bool isRenamable=false)
MachineRegisterInfo - Keep track of information for virtual and physical registers,...
Representation for a specific memory location.
LLVM_ABI void printAsOperand(raw_ostream &OS, const Module *M=nullptr) const
Print as operand.
Manage lifetime of a slot tracker for printing IR.
void incorporateFunction(const Function &F)
Incorporate the given function.
A Module instance is used to store all the information related to an LLVM module.
Definition Module.h:67
Utility class for integer operators which may exhibit overflow - Add, Sub, Mul, and Shl.
Definition Operator.h:78
An or instruction, which can be marked as "disjoint", indicating that the inputs don't have a 1 in th...
Definition InstrTypes.h:404
A udiv, sdiv, lshr, or ashr instruction, which can be marked as "exact", indicating that no bits are ...
Definition Operator.h:154
Instruction that can have a nneg flag (zext/uitofp).
Definition InstrTypes.h:639
Special value supplied for machine level alias analysis.
virtual bool mayAlias(const MachineFrameInfo *) const
Return true if the memory pointed to by this PseudoSourceValue can ever alias an LLVM IR Value.
Wrapper class representing virtual and physical registers.
Definition Register.h:20
constexpr bool isVirtual() const
Return true if the specified register number is in the virtual register namespace.
Definition Register.h:79
constexpr bool isPhysical() const
Return true if the specified register number is in the physical register namespace.
Definition Register.h:83
This is a 'bitvector' (really, a variable-sized bit array), optimized for the case when the array is ...
SmallBitVector & set()
A templated base class for SmallPtrSet which provides the typesafe interface that is common across al...
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
SmallPtrSet - This class implements a set which is optimized for holding SmallSize or less elements.
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
reference emplace_back(ArgTypes &&... Args)
void reserve(size_type N)
void append(ItTy in_start, ItTy in_end)
Add the specified range to the end of the SmallVector.
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
static LLVM_ABI unsigned getNextMetaArgIdx(const MachineInstr *MI, unsigned CurIdx)
Get index of next meta operand.
MI-level Statepoint operands.
Definition StackMaps.h:159
LLVM_ABI int getFirstGCPtrIdx()
Get index of first GC pointer operand of -1 if there are none.
TargetInstrInfo - Interface to description of machine instruction set.
TargetRegisterInfo base class - We assume that the target defines a static array of TargetRegisterDes...
virtual const TargetInstrInfo * getInstrInfo() const
This class represents a truncation of integer types.
Twine - A lightweight data structure for efficiently representing the concatenation of temporary valu...
Definition Twine.h:82
The instances of the Type class are immutable: once they are created, they are never changed.
Definition Type.h:45
A Use represents the edge between a Value definition and its users.
Definition Use.h:35
LLVM Value Representation.
Definition Value.h:75
LLVM_ABI StringRef getName() const
Return a constant reference to the value's name.
Definition Value.cpp:322
constexpr ScalarTy getKnownMinValue() const
Returns the minimum value this quantity can represent.
Definition TypeSize.h:165
formatted_raw_ostream & PadToColumn(unsigned NewCol)
PadToColumn - Align the output to some column number.
A range adaptor for a pair of iterators.
This class implements an extremely fast bulk output stream that can only output to a stream.
Definition raw_ostream.h:53
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
Abstract Attribute helper functions.
Definition Attributor.h:165
MCInstrDesc const & getDesc(MCInstrInfo const &MCII, MCInst const &MCI)
@ UnmodeledSideEffects
std::enable_if_t< detail::IsValidPointer< X, Y >::value, bool > hasa(Y &&MD)
Check whether Metadata has a Value.
Definition Metadata.h:650
std::enable_if_t< detail::IsValidPointer< X, Y >::value, X * > extract(Y &&MD)
Extract a Value from Metadata.
Definition Metadata.h:667
constexpr double e
This is an optimization pass for GlobalISel generic memory operations.
Definition Types.h:26
auto drop_begin(T &&RangeOrContainer, size_t N=1)
Return a range covering RangeOrContainer with the first N elements excluded.
Definition STLExtras.h:316
hash_code hash_value(const FixedPointSemantics &Val)
LLVM_ABI formatted_raw_ostream & fdbgs()
fdbgs() - This returns a reference to a formatted_raw_ostream for debug output.
MachineInstrBuilder BuildMI(MachineFunction &MF, const MIMetadata &MIMD, const MCInstrDesc &MCID)
Builder interface. Specify how to create the initial instruction itself.
LLVM_ABI void updateDbgValueForSpill(MachineInstr &Orig, int FrameIndex, Register Reg)
Update a DBG_VALUE whose value has been spilled to FrameIndex.
decltype(auto) dyn_cast(const From &Val)
dyn_cast<X> - Return the argument parameter cast to the specified type.
Definition Casting.h:643
bool any_of(R &&range, UnaryPredicate P)
Provide wrappers to std::any_of which take ranges instead of having to pass begin/end explicitly.
Definition STLExtras.h:1744
iterator_range< pointee_iterator< WrappedIteratorT > > make_pointee_range(RangeT &&Range)
Definition iterator.h:341
MachineInstr * getImm(const MachineOperand &MO, const MachineRegisterInfo *MRI)
LLVM_ABI raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
Definition Debug.cpp:207
bool none_of(R &&Range, UnaryPredicate P)
Provide wrappers to std::none_of which take ranges instead of having to pass begin/end explicitly.
Definition STLExtras.h:1751
constexpr bool isUInt(uint64_t x)
Checks if an unsigned integer fits into the given bit width.
Definition MathExtras.h:189
iterator_range< filter_iterator< detail::IterOfRange< RangeT >, PredicateT > > make_filter_range(RangeT &&Range, PredicateT Pred)
Convenience function that takes a range of elements and a predicate, and return a new filter_iterator...
Definition STLExtras.h:550
bool isa(const From &Val)
isa<X> - Return true if the parameter to the template is an instance of one of the template type argu...
Definition Casting.h:547
@ Other
Any other memory.
Definition ModRef.h:68
uint16_t MCPhysReg
An unsigned integer type large enough to represent all physical registers, but not necessarily virtua...
Definition MCRegister.h:21
DWARFExpression::Operation Op
OutputIt move(R &&Range, OutputIt Out)
Provide wrappers to std::move which take ranges instead of having to pass begin/end explicitly.
Definition STLExtras.h:1915
decltype(auto) cast(const From &Val)
cast<X> - Return the argument parameter cast to the specified type.
Definition Casting.h:559
LLVM_ABI MachineInstr * buildDbgValueForSpill(MachineBasicBlock &BB, MachineBasicBlock::iterator I, const MachineInstr &Orig, int FrameIndex, Register SpillReg)
Clone a DBG_VALUE whose value has been spilled to FrameIndex.
iterator_range< pointer_iterator< WrappedIteratorT > > make_pointer_range(RangeT &&Range)
Definition iterator.h:368
bool is_contained(R &&Range, const E &Element)
Returns true if Element is found in Range.
Definition STLExtras.h:1945
filter_iterator_impl< WrappedIteratorT, PredicateT, detail::fwd_or_bidi_tag< WrappedIteratorT > > filter_iterator
Defines filter_iterator to a suitable specialization of filter_iterator_impl, based on the underlying...
Definition STLExtras.h:537
hash_code hash_combine_range(InputIteratorT first, InputIteratorT last)
Compute a hash_code for a sequence of values.
Definition Hashing.h:466
Implement std::hash so that hash_code can be used in STL containers.
Definition BitVector.h:870
#define N
A collection of metadata nodes that might be associated with a memory access used by the alias-analys...
Definition Metadata.h:761
static LLVM_ABI unsigned getHashValue(const MachineInstr *const &MI)