LLVM 20.0.0git
MachineInstr.cpp
Go to the documentation of this file.
1//===- lib/CodeGen/MachineInstr.cpp ---------------------------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// Methods common to all machine instructions.
10//
11//===----------------------------------------------------------------------===//
12
14#include "llvm/ADT/ArrayRef.h"
15#include "llvm/ADT/Hashing.h"
16#include "llvm/ADT/STLExtras.h"
37#include "llvm/IR/Constants.h"
39#include "llvm/IR/DebugLoc.h"
40#include "llvm/IR/Function.h"
41#include "llvm/IR/InlineAsm.h"
43#include "llvm/IR/LLVMContext.h"
44#include "llvm/IR/Metadata.h"
45#include "llvm/IR/Module.h"
47#include "llvm/IR/Operator.h"
48#include "llvm/MC/MCInstrDesc.h"
52#include "llvm/Support/Debug.h"
57#include <algorithm>
58#include <cassert>
59#include <cstdint>
60#include <cstring>
61#include <utility>
62
63using namespace llvm;
64
66 if (const MachineBasicBlock *MBB = MI.getParent())
67 if (const MachineFunction *MF = MBB->getParent())
68 return MF;
69 return nullptr;
70}
71
72// Try to crawl up to the machine function and get TRI and IntrinsicInfo from
73// it.
75 const TargetRegisterInfo *&TRI,
77 const TargetIntrinsicInfo *&IntrinsicInfo,
78 const TargetInstrInfo *&TII) {
79
80 if (const MachineFunction *MF = getMFIfAvailable(MI)) {
81 TRI = MF->getSubtarget().getRegisterInfo();
82 MRI = &MF->getRegInfo();
83 IntrinsicInfo = MF->getTarget().getIntrinsicInfo();
84 TII = MF->getSubtarget().getInstrInfo();
85 }
86}
87
89 for (MCPhysReg ImpDef : MCID->implicit_defs())
90 addOperand(MF, MachineOperand::CreateReg(ImpDef, true, true));
91 for (MCPhysReg ImpUse : MCID->implicit_uses())
92 addOperand(MF, MachineOperand::CreateReg(ImpUse, false, true));
93}
94
95/// MachineInstr ctor - This constructor creates a MachineInstr and adds the
96/// implicit operands. It reserves space for the number of operands specified by
97/// the MCInstrDesc.
98MachineInstr::MachineInstr(MachineFunction &MF, const MCInstrDesc &TID,
99 DebugLoc DL, bool NoImp)
100 : MCID(&TID), NumOperands(0), Flags(0), AsmPrinterFlags(0),
101 DbgLoc(std::move(DL)), DebugInstrNum(0), Opcode(TID.Opcode) {
102 assert(DbgLoc.hasTrivialDestructor() && "Expected trivial destructor");
103
104 // Reserve space for the expected number of operands.
105 if (unsigned NumOps = MCID->getNumOperands() + MCID->implicit_defs().size() +
106 MCID->implicit_uses().size()) {
107 CapOperands = OperandCapacity::get(NumOps);
108 Operands = MF.allocateOperandArray(CapOperands);
109 }
110
111 if (!NoImp)
113}
114
115/// MachineInstr ctor - Copies MachineInstr arg exactly.
116/// Does not copy the number from debug instruction numbering, to preserve
117/// uniqueness.
118MachineInstr::MachineInstr(MachineFunction &MF, const MachineInstr &MI)
119 : MCID(&MI.getDesc()), NumOperands(0), Flags(0), AsmPrinterFlags(0),
120 Info(MI.Info), DbgLoc(MI.getDebugLoc()), DebugInstrNum(0),
121 Opcode(MI.getOpcode()) {
122 assert(DbgLoc.hasTrivialDestructor() && "Expected trivial destructor");
123
124 CapOperands = OperandCapacity::get(MI.getNumOperands());
125 Operands = MF.allocateOperandArray(CapOperands);
126
127 // Copy operands.
128 for (const MachineOperand &MO : MI.operands())
129 addOperand(MF, MO);
130
131 // Replicate ties between the operands, which addOperand was not
132 // able to do reliably.
133 for (unsigned i = 0, e = getNumOperands(); i < e; ++i) {
134 MachineOperand &NewMO = getOperand(i);
135 const MachineOperand &OrigMO = MI.getOperand(i);
136 NewMO.TiedTo = OrigMO.TiedTo;
137 }
138
139 // Copy all the sensible flags.
140 setFlags(MI.Flags);
141}
142
144 if (getParent())
145 getMF()->handleChangeDesc(*this, TID);
146 MCID = &TID;
147 Opcode = TID.Opcode;
148}
149
151 MovePos->getParent()->splice(MovePos, getParent(), getIterator());
152}
153
154/// getRegInfo - If this instruction is embedded into a MachineFunction,
155/// return the MachineRegisterInfo object for the current function, otherwise
156/// return null.
157MachineRegisterInfo *MachineInstr::getRegInfo() {
159 return &MBB->getParent()->getRegInfo();
160 return nullptr;
161}
162
163const MachineRegisterInfo *MachineInstr::getRegInfo() const {
164 if (const MachineBasicBlock *MBB = getParent())
165 return &MBB->getParent()->getRegInfo();
166 return nullptr;
167}
168
169void MachineInstr::removeRegOperandsFromUseLists(MachineRegisterInfo &MRI) {
170 for (MachineOperand &MO : operands())
171 if (MO.isReg())
172 MRI.removeRegOperandFromUseList(&MO);
173}
174
175void MachineInstr::addRegOperandsToUseLists(MachineRegisterInfo &MRI) {
176 for (MachineOperand &MO : operands())
177 if (MO.isReg())
178 MRI.addRegOperandToUseList(&MO);
179}
180
183 assert(MBB && "Use MachineInstrBuilder to add operands to dangling instrs");
185 assert(MF && "Use MachineInstrBuilder to add operands to dangling instrs");
186 addOperand(*MF, Op);
187}
188
189/// Move NumOps MachineOperands from Src to Dst, with support for overlapping
190/// ranges. If MRI is non-null also update use-def chains.
192 unsigned NumOps, MachineRegisterInfo *MRI) {
193 if (MRI)
194 return MRI->moveOperands(Dst, Src, NumOps);
195 // MachineOperand is a trivially copyable type so we can just use memmove.
196 assert(Dst && Src && "Unknown operands");
197 std::memmove(Dst, Src, NumOps * sizeof(MachineOperand));
198}
199
200/// addOperand - Add the specified operand to the instruction. If it is an
201/// implicit operand, it is added to the end of the operand list. If it is
202/// an explicit operand it is added at the end of the explicit operand list
203/// (before the first implicit operand).
205 assert(isUInt<LLVM_MI_NUMOPERANDS_BITS>(NumOperands + 1) &&
206 "Cannot add more operands.");
207 assert(MCID && "Cannot add operands before providing an instr descriptor");
208
209 // Check if we're adding one of our existing operands.
210 if (&Op >= Operands && &Op < Operands + NumOperands) {
211 // This is unusual: MI->addOperand(MI->getOperand(i)).
212 // If adding Op requires reallocating or moving existing operands around,
213 // the Op reference could go stale. Support it by copying Op.
214 MachineOperand CopyOp(Op);
215 return addOperand(MF, CopyOp);
216 }
217
218 // Find the insert location for the new operand. Implicit registers go at
219 // the end, everything else goes before the implicit regs.
220 //
221 // FIXME: Allow mixed explicit and implicit operands on inline asm.
222 // InstrEmitter::EmitSpecialNode() is marking inline asm clobbers as
223 // implicit-defs, but they must not be moved around. See the FIXME in
224 // InstrEmitter.cpp.
225 unsigned OpNo = getNumOperands();
226 bool isImpReg = Op.isReg() && Op.isImplicit();
227 if (!isImpReg && !isInlineAsm()) {
228 while (OpNo && Operands[OpNo-1].isReg() && Operands[OpNo-1].isImplicit()) {
229 --OpNo;
230 assert(!Operands[OpNo].isTied() && "Cannot move tied operands");
231 }
232 }
233
234 // OpNo now points as the desired insertion point. Unless this is a variadic
235 // instruction, only implicit regs are allowed beyond MCID->getNumOperands().
236 // RegMask operands go between the explicit and implicit operands.
237 MachineRegisterInfo *MRI = getRegInfo();
238
239 // Determine if the Operands array needs to be reallocated.
240 // Save the old capacity and operand array.
241 OperandCapacity OldCap = CapOperands;
242 MachineOperand *OldOperands = Operands;
243 if (!OldOperands || OldCap.getSize() == getNumOperands()) {
244 CapOperands = OldOperands ? OldCap.getNext() : OldCap.get(1);
245 Operands = MF.allocateOperandArray(CapOperands);
246 // Move the operands before the insertion point.
247 if (OpNo)
248 moveOperands(Operands, OldOperands, OpNo, MRI);
249 }
250
251 // Move the operands following the insertion point.
252 if (OpNo != NumOperands)
253 moveOperands(Operands + OpNo + 1, OldOperands + OpNo, NumOperands - OpNo,
254 MRI);
255 ++NumOperands;
256
257 // Deallocate the old operand array.
258 if (OldOperands != Operands && OldOperands)
259 MF.deallocateOperandArray(OldCap, OldOperands);
260
261 // Copy Op into place. It still needs to be inserted into the MRI use lists.
262 MachineOperand *NewMO = new (Operands + OpNo) MachineOperand(Op);
263 NewMO->ParentMI = this;
264
265 // When adding a register operand, tell MRI about it.
266 if (NewMO->isReg()) {
267 // Ensure isOnRegUseList() returns false, regardless of Op's status.
268 NewMO->Contents.Reg.Prev = nullptr;
269 // Ignore existing ties. This is not a property that can be copied.
270 NewMO->TiedTo = 0;
271 // Add the new operand to MRI, but only for instructions in an MBB.
272 if (MRI)
273 MRI->addRegOperandToUseList(NewMO);
274 // The MCID operand information isn't accurate until we start adding
275 // explicit operands. The implicit operands are added first, then the
276 // explicits are inserted before them.
277 if (!isImpReg) {
278 // Tie uses to defs as indicated in MCInstrDesc.
279 if (NewMO->isUse()) {
280 int DefIdx = MCID->getOperandConstraint(OpNo, MCOI::TIED_TO);
281 if (DefIdx != -1)
282 tieOperands(DefIdx, OpNo);
283 }
284 // If the register operand is flagged as early, mark the operand as such.
285 if (MCID->getOperandConstraint(OpNo, MCOI::EARLY_CLOBBER) != -1)
286 NewMO->setIsEarlyClobber(true);
287 }
288 // Ensure debug instructions set debug flag on register uses.
289 if (NewMO->isUse() && isDebugInstr())
290 NewMO->setIsDebug();
291 }
292}
293
294void MachineInstr::removeOperand(unsigned OpNo) {
295 assert(OpNo < getNumOperands() && "Invalid operand number");
296 untieRegOperand(OpNo);
297
298#ifndef NDEBUG
299 // Moving tied operands would break the ties.
300 for (unsigned i = OpNo + 1, e = getNumOperands(); i != e; ++i)
301 if (Operands[i].isReg())
302 assert(!Operands[i].isTied() && "Cannot move tied operands");
303#endif
304
305 MachineRegisterInfo *MRI = getRegInfo();
306 if (MRI && Operands[OpNo].isReg())
307 MRI->removeRegOperandFromUseList(Operands + OpNo);
308
309 // Don't call the MachineOperand destructor. A lot of this code depends on
310 // MachineOperand having a trivial destructor anyway, and adding a call here
311 // wouldn't make it 'destructor-correct'.
312
313 if (unsigned N = NumOperands - 1 - OpNo)
314 moveOperands(Operands + OpNo, Operands + OpNo + 1, N, MRI);
315 --NumOperands;
316}
317
318void MachineInstr::setExtraInfo(MachineFunction &MF,
320 MCSymbol *PreInstrSymbol,
321 MCSymbol *PostInstrSymbol,
322 MDNode *HeapAllocMarker, MDNode *PCSections,
323 uint32_t CFIType, MDNode *MMRAs) {
324 bool HasPreInstrSymbol = PreInstrSymbol != nullptr;
325 bool HasPostInstrSymbol = PostInstrSymbol != nullptr;
326 bool HasHeapAllocMarker = HeapAllocMarker != nullptr;
327 bool HasPCSections = PCSections != nullptr;
328 bool HasCFIType = CFIType != 0;
329 bool HasMMRAs = MMRAs != nullptr;
330 int NumPointers = MMOs.size() + HasPreInstrSymbol + HasPostInstrSymbol +
331 HasHeapAllocMarker + HasPCSections + HasCFIType + HasMMRAs;
332
333 // Drop all extra info if there is none.
334 if (NumPointers <= 0) {
335 Info.clear();
336 return;
337 }
338
339 // If more than one pointer, then store out of line. Store heap alloc markers
340 // out of line because PointerSumType cannot hold more than 4 tag types with
341 // 32-bit pointers.
342 // FIXME: Maybe we should make the symbols in the extra info mutable?
343 else if (NumPointers > 1 || HasMMRAs || HasHeapAllocMarker || HasPCSections ||
344 HasCFIType) {
345 Info.set<EIIK_OutOfLine>(
346 MF.createMIExtraInfo(MMOs, PreInstrSymbol, PostInstrSymbol,
347 HeapAllocMarker, PCSections, CFIType, MMRAs));
348 return;
349 }
350
351 // Otherwise store the single pointer inline.
352 if (HasPreInstrSymbol)
353 Info.set<EIIK_PreInstrSymbol>(PreInstrSymbol);
354 else if (HasPostInstrSymbol)
355 Info.set<EIIK_PostInstrSymbol>(PostInstrSymbol);
356 else
357 Info.set<EIIK_MMO>(MMOs[0]);
358}
359
361 if (memoperands_empty())
362 return;
363
364 setExtraInfo(MF, {}, getPreInstrSymbol(), getPostInstrSymbol(),
367}
368
371 if (MMOs.empty()) {
372 dropMemRefs(MF);
373 return;
374 }
375
376 setExtraInfo(MF, MMOs, getPreInstrSymbol(), getPostInstrSymbol(),
379}
380
382 MachineMemOperand *MO) {
385 MMOs.push_back(MO);
386 setMemRefs(MF, MMOs);
387}
388
390 if (this == &MI)
391 // Nothing to do for a self-clone!
392 return;
393
394 assert(&MF == MI.getMF() &&
395 "Invalid machine functions when cloning memory refrences!");
396 // See if we can just steal the extra info already allocated for the
397 // instruction. We can do this whenever the pre- and post-instruction symbols
398 // are the same (including null).
399 if (getPreInstrSymbol() == MI.getPreInstrSymbol() &&
400 getPostInstrSymbol() == MI.getPostInstrSymbol() &&
401 getHeapAllocMarker() == MI.getHeapAllocMarker() &&
402 getPCSections() == MI.getPCSections() && getMMRAMetadata() &&
403 MI.getMMRAMetadata()) {
404 Info = MI.Info;
405 return;
406 }
407
408 // Otherwise, fall back on a copy-based clone.
409 setMemRefs(MF, MI.memoperands());
410}
411
412/// Check to see if the MMOs pointed to by the two MemRefs arrays are
413/// identical.
416 if (LHS.size() != RHS.size())
417 return false;
418
419 auto LHSPointees = make_pointee_range(LHS);
420 auto RHSPointees = make_pointee_range(RHS);
421 return std::equal(LHSPointees.begin(), LHSPointees.end(),
422 RHSPointees.begin());
423}
424
427 // Try handling easy numbers of MIs with simpler mechanisms.
428 if (MIs.empty()) {
429 dropMemRefs(MF);
430 return;
431 }
432 if (MIs.size() == 1) {
433 cloneMemRefs(MF, *MIs[0]);
434 return;
435 }
436 // Because an empty memoperands list provides *no* information and must be
437 // handled conservatively (assuming the instruction can do anything), the only
438 // way to merge with it is to drop all other memoperands.
439 if (MIs[0]->memoperands_empty()) {
440 dropMemRefs(MF);
441 return;
442 }
443
444 // Handle the general case.
446 // Start with the first instruction.
447 assert(&MF == MIs[0]->getMF() &&
448 "Invalid machine functions when cloning memory references!");
449 MergedMMOs.append(MIs[0]->memoperands_begin(), MIs[0]->memoperands_end());
450 // Now walk all the other instructions and accumulate any different MMOs.
451 for (const MachineInstr &MI : make_pointee_range(MIs.slice(1))) {
452 assert(&MF == MI.getMF() &&
453 "Invalid machine functions when cloning memory references!");
454
455 // Skip MIs with identical operands to the first. This is a somewhat
456 // arbitrary hack but will catch common cases without being quadratic.
457 // TODO: We could fully implement merge semantics here if needed.
458 if (hasIdenticalMMOs(MIs[0]->memoperands(), MI.memoperands()))
459 continue;
460
461 // Because an empty memoperands list provides *no* information and must be
462 // handled conservatively (assuming the instruction can do anything), the
463 // only way to merge with it is to drop all other memoperands.
464 if (MI.memoperands_empty()) {
465 dropMemRefs(MF);
466 return;
467 }
468
469 // Otherwise accumulate these into our temporary buffer of the merged state.
470 MergedMMOs.append(MI.memoperands_begin(), MI.memoperands_end());
471 }
472
473 setMemRefs(MF, MergedMMOs);
474}
475
477 // Do nothing if old and new symbols are the same.
478 if (Symbol == getPreInstrSymbol())
479 return;
480
481 // If there was only one symbol and we're removing it, just clear info.
482 if (!Symbol && Info.is<EIIK_PreInstrSymbol>()) {
483 Info.clear();
484 return;
485 }
486
487 setExtraInfo(MF, memoperands(), Symbol, getPostInstrSymbol(),
490}
491
493 // Do nothing if old and new symbols are the same.
494 if (Symbol == getPostInstrSymbol())
495 return;
496
497 // If there was only one symbol and we're removing it, just clear info.
498 if (!Symbol && Info.is<EIIK_PostInstrSymbol>()) {
499 Info.clear();
500 return;
501 }
502
503 setExtraInfo(MF, memoperands(), getPreInstrSymbol(), Symbol,
506}
507
509 // Do nothing if old and new symbols are the same.
510 if (Marker == getHeapAllocMarker())
511 return;
512
513 setExtraInfo(MF, memoperands(), getPreInstrSymbol(), getPostInstrSymbol(),
515}
516
518 // Do nothing if old and new symbols are the same.
519 if (PCSections == getPCSections())
520 return;
521
522 setExtraInfo(MF, memoperands(), getPreInstrSymbol(), getPostInstrSymbol(),
523 getHeapAllocMarker(), PCSections, getCFIType(),
525}
526
528 // Do nothing if old and new types are the same.
529 if (Type == getCFIType())
530 return;
531
532 setExtraInfo(MF, memoperands(), getPreInstrSymbol(), getPostInstrSymbol(),
534}
535
537 // Do nothing if old and new symbols are the same.
538 if (MMRAs == getMMRAMetadata())
539 return;
540
541 setExtraInfo(MF, memoperands(), getPreInstrSymbol(), getPostInstrSymbol(),
543}
544
546 const MachineInstr &MI) {
547 if (this == &MI)
548 // Nothing to do for a self-clone!
549 return;
550
551 assert(&MF == MI.getMF() &&
552 "Invalid machine functions when cloning instruction symbols!");
553
554 setPreInstrSymbol(MF, MI.getPreInstrSymbol());
555 setPostInstrSymbol(MF, MI.getPostInstrSymbol());
556 setHeapAllocMarker(MF, MI.getHeapAllocMarker());
557 setPCSections(MF, MI.getPCSections());
558 setMMRAMetadata(MF, MI.getMMRAMetadata());
559}
560
562 // For now, the just return the union of the flags. If the flags get more
563 // complicated over time, we might need more logic here.
564 return getFlags() | Other.getFlags();
565}
566
568 uint32_t MIFlags = 0;
569 // Copy the wrapping flags.
570 if (const OverflowingBinaryOperator *OB =
571 dyn_cast<OverflowingBinaryOperator>(&I)) {
572 if (OB->hasNoSignedWrap())
574 if (OB->hasNoUnsignedWrap())
576 } else if (const TruncInst *TI = dyn_cast<TruncInst>(&I)) {
577 if (TI->hasNoSignedWrap())
579 if (TI->hasNoUnsignedWrap())
581 } else if (const GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(&I)) {
582 if (GEP->hasNoUnsignedSignedWrap())
584 if (GEP->hasNoUnsignedWrap())
586 }
587
588 // Copy the nonneg flag.
589 if (const PossiblyNonNegInst *PNI = dyn_cast<PossiblyNonNegInst>(&I)) {
590 if (PNI->hasNonNeg())
592 // Copy the disjoint flag.
593 } else if (const PossiblyDisjointInst *PD =
594 dyn_cast<PossiblyDisjointInst>(&I)) {
595 if (PD->isDisjoint())
597 }
598
599 // Copy the samesign flag.
600 if (const ICmpInst *ICmp = dyn_cast<ICmpInst>(&I))
601 if (ICmp->hasSameSign())
603
604 // Copy the exact flag.
605 if (const PossiblyExactOperator *PE = dyn_cast<PossiblyExactOperator>(&I))
606 if (PE->isExact())
608
609 // Copy the fast-math flags.
610 if (const FPMathOperator *FP = dyn_cast<FPMathOperator>(&I)) {
611 const FastMathFlags Flags = FP->getFastMathFlags();
612 if (Flags.noNaNs())
614 if (Flags.noInfs())
616 if (Flags.noSignedZeros())
618 if (Flags.allowReciprocal())
620 if (Flags.allowContract())
622 if (Flags.approxFunc())
624 if (Flags.allowReassoc())
626 }
627
628 if (I.getMetadata(LLVMContext::MD_unpredictable))
630
631 return MIFlags;
632}
633
636}
637
638bool MachineInstr::hasPropertyInBundle(uint64_t Mask, QueryType Type) const {
639 assert(!isBundledWithPred() && "Must be called on bundle header");
641 if (MII->getDesc().getFlags() & Mask) {
642 if (Type == AnyInBundle)
643 return true;
644 } else {
645 if (Type == AllInBundle && !MII->isBundle())
646 return false;
647 }
648 // This was the last instruction in the bundle.
649 if (!MII->isBundledWithSucc())
650 return Type == AllInBundle;
651 }
652}
653
655 MICheckType Check) const {
656 // If opcodes or number of operands are not the same then the two
657 // instructions are obviously not identical.
658 if (Other.getOpcode() != getOpcode() ||
659 Other.getNumOperands() != getNumOperands())
660 return false;
661
662 if (isBundle()) {
663 // We have passed the test above that both instructions have the same
664 // opcode, so we know that both instructions are bundles here. Let's compare
665 // MIs inside the bundle.
666 assert(Other.isBundle() && "Expected that both instructions are bundles.");
669 // Loop until we analysed the last intruction inside at least one of the
670 // bundles.
671 while (I1->isBundledWithSucc() && I2->isBundledWithSucc()) {
672 ++I1;
673 ++I2;
674 if (!I1->isIdenticalTo(*I2, Check))
675 return false;
676 }
677 // If we've reached the end of just one of the two bundles, but not both,
678 // the instructions are not identical.
679 if (I1->isBundledWithSucc() || I2->isBundledWithSucc())
680 return false;
681 }
682
683 // Check operands to make sure they match.
684 for (unsigned i = 0, e = getNumOperands(); i != e; ++i) {
685 const MachineOperand &MO = getOperand(i);
686 const MachineOperand &OMO = Other.getOperand(i);
687 if (!MO.isReg()) {
688 if (!MO.isIdenticalTo(OMO))
689 return false;
690 continue;
691 }
692
693 // Clients may or may not want to ignore defs when testing for equality.
694 // For example, machine CSE pass only cares about finding common
695 // subexpressions, so it's safe to ignore virtual register defs.
696 if (MO.isDef()) {
697 if (Check == IgnoreDefs)
698 continue;
699 else if (Check == IgnoreVRegDefs) {
700 if (!MO.getReg().isVirtual() || !OMO.getReg().isVirtual())
701 if (!MO.isIdenticalTo(OMO))
702 return false;
703 } else {
704 if (!MO.isIdenticalTo(OMO))
705 return false;
706 if (Check == CheckKillDead && MO.isDead() != OMO.isDead())
707 return false;
708 }
709 } else {
710 if (!MO.isIdenticalTo(OMO))
711 return false;
712 if (Check == CheckKillDead && MO.isKill() != OMO.isKill())
713 return false;
714 }
715 }
716 // If DebugLoc does not match then two debug instructions are not identical.
717 if (isDebugInstr())
718 if (getDebugLoc() && Other.getDebugLoc() &&
719 getDebugLoc() != Other.getDebugLoc())
720 return false;
721 // If pre- or post-instruction symbols do not match then the two instructions
722 // are not identical.
723 if (getPreInstrSymbol() != Other.getPreInstrSymbol() ||
724 getPostInstrSymbol() != Other.getPostInstrSymbol())
725 return false;
726 // Call instructions with different CFI types are not identical.
727 if (isCall() && getCFIType() != Other.getCFIType())
728 return false;
729
730 return true;
731}
732
734 if (!isDebugValueLike() || !Other.isDebugValueLike())
735 return false;
736 if (getDebugLoc() != Other.getDebugLoc())
737 return false;
738 if (getDebugVariable() != Other.getDebugVariable())
739 return false;
740 if (getNumDebugOperands() != Other.getNumDebugOperands())
741 return false;
742 for (unsigned OpIdx = 0; OpIdx < getNumDebugOperands(); ++OpIdx)
743 if (!getDebugOperand(OpIdx).isIdenticalTo(Other.getDebugOperand(OpIdx)))
744 return false;
747 Other.getDebugExpression(), Other.isIndirectDebugValue()))
748 return false;
749 return true;
750}
751
753 return getParent()->getParent();
754}
755
757 assert(getParent() && "Not embedded in a basic block!");
758 return getParent()->remove(this);
759}
760
762 assert(getParent() && "Not embedded in a basic block!");
763 return getParent()->remove_instr(this);
764}
765
767 assert(getParent() && "Not embedded in a basic block!");
768 getParent()->erase(this);
769}
770
772 assert(getParent() && "Not embedded in a basic block!");
773 getParent()->erase_instr(this);
774}
775
777 if (!isCall(Type))
778 return false;
779 switch (getOpcode()) {
780 case TargetOpcode::PATCHPOINT:
781 case TargetOpcode::STACKMAP:
782 case TargetOpcode::STATEPOINT:
783 case TargetOpcode::FENTRY_CALL:
784 return false;
785 }
786 return true;
787}
788
790 if (isBundle())
793}
794
796 unsigned NumOperands = MCID->getNumOperands();
797 if (!MCID->isVariadic())
798 return NumOperands;
799
800 for (unsigned I = NumOperands, E = getNumOperands(); I != E; ++I) {
801 const MachineOperand &MO = getOperand(I);
802 // The operands must always be in the following order:
803 // - explicit reg defs,
804 // - other explicit operands (reg uses, immediates, etc.),
805 // - implicit reg defs
806 // - implicit reg uses
807 if (MO.isReg() && MO.isImplicit())
808 break;
809 ++NumOperands;
810 }
811 return NumOperands;
812}
813
815 unsigned NumDefs = MCID->getNumDefs();
816 if (!MCID->isVariadic())
817 return NumDefs;
818
819 for (unsigned I = NumDefs, E = getNumOperands(); I != E; ++I) {
820 const MachineOperand &MO = getOperand(I);
821 if (!MO.isReg() || !MO.isDef() || MO.isImplicit())
822 break;
823 ++NumDefs;
824 }
825 return NumDefs;
826}
827
829 assert(!isBundledWithPred() && "MI is already bundled with its predecessor");
832 --Pred;
833 assert(!Pred->isBundledWithSucc() && "Inconsistent bundle flags");
834 Pred->setFlag(BundledSucc);
835}
836
838 assert(!isBundledWithSucc() && "MI is already bundled with its successor");
841 ++Succ;
842 assert(!Succ->isBundledWithPred() && "Inconsistent bundle flags");
843 Succ->setFlag(BundledPred);
844}
845
847 assert(isBundledWithPred() && "MI isn't bundled with its predecessor");
850 --Pred;
851 assert(Pred->isBundledWithSucc() && "Inconsistent bundle flags");
852 Pred->clearFlag(BundledSucc);
853}
854
856 assert(isBundledWithSucc() && "MI isn't bundled with its successor");
859 ++Succ;
860 assert(Succ->isBundledWithPred() && "Inconsistent bundle flags");
861 Succ->clearFlag(BundledPred);
862}
863
865 if (isInlineAsm()) {
866 unsigned ExtraInfo = getOperand(InlineAsm::MIOp_ExtraInfo).getImm();
867 if (ExtraInfo & InlineAsm::Extra_IsAlignStack)
868 return true;
869 }
870 return false;
871}
872
874 assert(isInlineAsm() && "getInlineAsmDialect() only works for inline asms!");
875 unsigned ExtraInfo = getOperand(InlineAsm::MIOp_ExtraInfo).getImm();
876 return InlineAsm::AsmDialect((ExtraInfo & InlineAsm::Extra_AsmDialect) != 0);
877}
878
880 unsigned *GroupNo) const {
881 assert(isInlineAsm() && "Expected an inline asm instruction");
882 assert(OpIdx < getNumOperands() && "OpIdx out of range");
883
884 // Ignore queries about the initial operands.
886 return -1;
887
888 unsigned Group = 0;
889 unsigned NumOps;
890 for (unsigned i = InlineAsm::MIOp_FirstOperand, e = getNumOperands(); i < e;
891 i += NumOps) {
892 const MachineOperand &FlagMO = getOperand(i);
893 // If we reach the implicit register operands, stop looking.
894 if (!FlagMO.isImm())
895 return -1;
896 const InlineAsm::Flag F(FlagMO.getImm());
897 NumOps = 1 + F.getNumOperandRegisters();
898 if (i + NumOps > OpIdx) {
899 if (GroupNo)
900 *GroupNo = Group;
901 return i;
902 }
903 ++Group;
904 }
905 return -1;
906}
907
909 assert(isDebugLabel() && "not a DBG_LABEL");
910 return cast<DILabel>(getOperand(0).getMetadata());
911}
912
914 assert((isDebugValueLike()) && "not a DBG_VALUE*");
915 unsigned VariableOp = isNonListDebugValue() ? 2 : 0;
916 return getOperand(VariableOp);
917}
918
920 assert((isDebugValueLike()) && "not a DBG_VALUE*");
921 unsigned VariableOp = isNonListDebugValue() ? 2 : 0;
922 return getOperand(VariableOp);
923}
924
926 return cast<DILocalVariable>(getDebugVariableOp().getMetadata());
927}
928
930 assert((isDebugValueLike()) && "not a DBG_VALUE*");
931 unsigned ExpressionOp = isNonListDebugValue() ? 3 : 1;
932 return getOperand(ExpressionOp);
933}
934
936 assert((isDebugValueLike()) && "not a DBG_VALUE*");
937 unsigned ExpressionOp = isNonListDebugValue() ? 3 : 1;
938 return getOperand(ExpressionOp);
939}
940
942 return cast<DIExpression>(getDebugExpressionOp().getMetadata());
943}
944
947}
948
951 const TargetInstrInfo *TII,
952 const TargetRegisterInfo *TRI) const {
953 assert(getParent() && "Can't have an MBB reference here!");
954 assert(getMF() && "Can't have an MF reference here!");
955 const MachineFunction &MF = *getMF();
956
957 // Most opcodes have fixed constraints in their MCInstrDesc.
958 if (!isInlineAsm())
959 return TII->getRegClass(getDesc(), OpIdx, TRI, MF);
960
961 if (!getOperand(OpIdx).isReg())
962 return nullptr;
963
964 // For tied uses on inline asm, get the constraint from the def.
965 unsigned DefIdx;
966 if (getOperand(OpIdx).isUse() && isRegTiedToDefOperand(OpIdx, &DefIdx))
967 OpIdx = DefIdx;
968
969 // Inline asm stores register class constraints in the flag word.
970 int FlagIdx = findInlineAsmFlagIdx(OpIdx);
971 if (FlagIdx < 0)
972 return nullptr;
973
974 const InlineAsm::Flag F(getOperand(FlagIdx).getImm());
975 unsigned RCID;
976 if ((F.isRegUseKind() || F.isRegDefKind() || F.isRegDefEarlyClobberKind()) &&
977 F.hasRegClassConstraint(RCID))
978 return TRI->getRegClass(RCID);
979
980 // Assume that all registers in a memory operand are pointers.
981 if (F.isMemKind())
982 return TRI->getPointerRegClass(MF);
983
984 return nullptr;
985}
986
988 Register Reg, const TargetRegisterClass *CurRC, const TargetInstrInfo *TII,
989 const TargetRegisterInfo *TRI, bool ExploreBundle) const {
990 // Check every operands inside the bundle if we have
991 // been asked to.
992 if (ExploreBundle)
993 for (ConstMIBundleOperands OpndIt(*this); OpndIt.isValid() && CurRC;
994 ++OpndIt)
995 CurRC = OpndIt->getParent()->getRegClassConstraintEffectForVRegImpl(
996 OpndIt.getOperandNo(), Reg, CurRC, TII, TRI);
997 else
998 // Otherwise, just check the current operands.
999 for (unsigned i = 0, e = NumOperands; i < e && CurRC; ++i)
1000 CurRC = getRegClassConstraintEffectForVRegImpl(i, Reg, CurRC, TII, TRI);
1001 return CurRC;
1002}
1003
1004const TargetRegisterClass *MachineInstr::getRegClassConstraintEffectForVRegImpl(
1005 unsigned OpIdx, Register Reg, const TargetRegisterClass *CurRC,
1006 const TargetInstrInfo *TII, const TargetRegisterInfo *TRI) const {
1007 assert(CurRC && "Invalid initial register class");
1008 // Check if Reg is constrained by some of its use/def from MI.
1009 const MachineOperand &MO = getOperand(OpIdx);
1010 if (!MO.isReg() || MO.getReg() != Reg)
1011 return CurRC;
1012 // If yes, accumulate the constraints through the operand.
1013 return getRegClassConstraintEffect(OpIdx, CurRC, TII, TRI);
1014}
1015
1017 unsigned OpIdx, const TargetRegisterClass *CurRC,
1018 const TargetInstrInfo *TII, const TargetRegisterInfo *TRI) const {
1019 const TargetRegisterClass *OpRC = getRegClassConstraint(OpIdx, TII, TRI);
1020 const MachineOperand &MO = getOperand(OpIdx);
1021 assert(MO.isReg() &&
1022 "Cannot get register constraints for non-register operand");
1023 assert(CurRC && "Invalid initial register class");
1024 if (unsigned SubIdx = MO.getSubReg()) {
1025 if (OpRC)
1026 CurRC = TRI->getMatchingSuperRegClass(CurRC, OpRC, SubIdx);
1027 else
1028 CurRC = TRI->getSubClassWithSubReg(CurRC, SubIdx);
1029 } else if (OpRC)
1030 CurRC = TRI->getCommonSubClass(CurRC, OpRC);
1031 return CurRC;
1032}
1033
1034/// Return the number of instructions inside the MI bundle, not counting the
1035/// header instruction.
1038 unsigned Size = 0;
1039 while (I->isBundledWithSucc()) {
1040 ++Size;
1041 ++I;
1042 }
1043 return Size;
1044}
1045
1046/// Returns true if the MachineInstr has an implicit-use operand of exactly
1047/// the given register (not considering sub/super-registers).
1049 for (const MachineOperand &MO : implicit_operands()) {
1050 if (MO.isReg() && MO.isUse() && MO.getReg() == Reg)
1051 return true;
1052 }
1053 return false;
1054}
1055
1056/// findRegisterUseOperandIdx() - Returns the MachineOperand that is a use of
1057/// the specific register or -1 if it is not found. It further tightens
1058/// the search criteria to a use that kills the register if isKill is true.
1060 const TargetRegisterInfo *TRI,
1061 bool isKill) const {
1062 for (unsigned i = 0, e = getNumOperands(); i != e; ++i) {
1063 const MachineOperand &MO = getOperand(i);
1064 if (!MO.isReg() || !MO.isUse())
1065 continue;
1066 Register MOReg = MO.getReg();
1067 if (!MOReg)
1068 continue;
1069 if (MOReg == Reg || (TRI && Reg && MOReg && TRI->regsOverlap(MOReg, Reg)))
1070 if (!isKill || MO.isKill())
1071 return i;
1072 }
1073 return -1;
1074}
1075
1076/// readsWritesVirtualRegister - Return a pair of bools (reads, writes)
1077/// indicating if this instruction reads or writes Reg. This also considers
1078/// partial defines.
1079std::pair<bool,bool>
1081 SmallVectorImpl<unsigned> *Ops) const {
1082 bool PartDef = false; // Partial redefine.
1083 bool FullDef = false; // Full define.
1084 bool Use = false;
1085
1086 for (unsigned i = 0, e = getNumOperands(); i != e; ++i) {
1087 const MachineOperand &MO = getOperand(i);
1088 if (!MO.isReg() || MO.getReg() != Reg)
1089 continue;
1090 if (Ops)
1091 Ops->push_back(i);
1092 if (MO.isUse())
1093 Use |= !MO.isUndef();
1094 else if (MO.getSubReg() && !MO.isUndef())
1095 // A partial def undef doesn't count as reading the register.
1096 PartDef = true;
1097 else
1098 FullDef = true;
1099 }
1100 // A partial redefine uses Reg unless there is also a full define.
1101 return std::make_pair(Use || (PartDef && !FullDef), PartDef || FullDef);
1102}
1103
1104/// findRegisterDefOperandIdx() - Returns the operand index that is a def of
1105/// the specified register or -1 if it is not found. If isDead is true, defs
1106/// that are not dead are skipped. If TargetRegisterInfo is non-null, then it
1107/// also checks if there is a def of a super-register.
1109 const TargetRegisterInfo *TRI,
1110 bool isDead, bool Overlap) const {
1111 bool isPhys = Reg.isPhysical();
1112 for (unsigned i = 0, e = getNumOperands(); i != e; ++i) {
1113 const MachineOperand &MO = getOperand(i);
1114 // Accept regmask operands when Overlap is set.
1115 // Ignore them when looking for a specific def operand (Overlap == false).
1116 if (isPhys && Overlap && MO.isRegMask() && MO.clobbersPhysReg(Reg))
1117 return i;
1118 if (!MO.isReg() || !MO.isDef())
1119 continue;
1120 Register MOReg = MO.getReg();
1121 bool Found = (MOReg == Reg);
1122 if (!Found && TRI && isPhys && MOReg.isPhysical()) {
1123 if (Overlap)
1124 Found = TRI->regsOverlap(MOReg, Reg);
1125 else
1126 Found = TRI->isSubRegister(MOReg, Reg);
1127 }
1128 if (Found && (!isDead || MO.isDead()))
1129 return i;
1130 }
1131 return -1;
1132}
1133
1134/// findFirstPredOperandIdx() - Find the index of the first operand in the
1135/// operand list that is used to represent the predicate. It returns -1 if
1136/// none is found.
1138 // Don't call MCID.findFirstPredOperandIdx() because this variant
1139 // is sometimes called on an instruction that's not yet complete, and
1140 // so the number of operands is less than the MCID indicates. In
1141 // particular, the PTX target does this.
1142 const MCInstrDesc &MCID = getDesc();
1143 if (MCID.isPredicable()) {
1144 for (unsigned i = 0, e = getNumOperands(); i != e; ++i)
1145 if (MCID.operands()[i].isPredicate())
1146 return i;
1147 }
1148
1149 return -1;
1150}
1151
1152// MachineOperand::TiedTo is 4 bits wide.
1153const unsigned TiedMax = 15;
1154
1155/// tieOperands - Mark operands at DefIdx and UseIdx as tied to each other.
1156///
1157/// Use and def operands can be tied together, indicated by a non-zero TiedTo
1158/// field. TiedTo can have these values:
1159///
1160/// 0: Operand is not tied to anything.
1161/// 1 to TiedMax-1: Tied to getOperand(TiedTo-1).
1162/// TiedMax: Tied to an operand >= TiedMax-1.
1163///
1164/// The tied def must be one of the first TiedMax operands on a normal
1165/// instruction. INLINEASM instructions allow more tied defs.
1166///
1167void MachineInstr::tieOperands(unsigned DefIdx, unsigned UseIdx) {
1168 MachineOperand &DefMO = getOperand(DefIdx);
1169 MachineOperand &UseMO = getOperand(UseIdx);
1170 assert(DefMO.isDef() && "DefIdx must be a def operand");
1171 assert(UseMO.isUse() && "UseIdx must be a use operand");
1172 assert(!DefMO.isTied() && "Def is already tied to another use");
1173 assert(!UseMO.isTied() && "Use is already tied to another def");
1174
1175 if (DefIdx < TiedMax)
1176 UseMO.TiedTo = DefIdx + 1;
1177 else {
1178 // Inline asm can use the group descriptors to find tied operands,
1179 // statepoint tied operands are trivial to match (1-1 reg def with reg use),
1180 // but on normal instruction, the tied def must be within the first TiedMax
1181 // operands.
1182 assert((isInlineAsm() || getOpcode() == TargetOpcode::STATEPOINT) &&
1183 "DefIdx out of range");
1184 UseMO.TiedTo = TiedMax;
1185 }
1186
1187 // UseIdx can be out of range, we'll search for it in findTiedOperandIdx().
1188 DefMO.TiedTo = std::min(UseIdx + 1, TiedMax);
1189}
1190
1191/// Given the index of a tied register operand, find the operand it is tied to.
1192/// Defs are tied to uses and vice versa. Returns the index of the tied operand
1193/// which must exist.
1194unsigned MachineInstr::findTiedOperandIdx(unsigned OpIdx) const {
1195 const MachineOperand &MO = getOperand(OpIdx);
1196 assert(MO.isTied() && "Operand isn't tied");
1197
1198 // Normally TiedTo is in range.
1199 if (MO.TiedTo < TiedMax)
1200 return MO.TiedTo - 1;
1201
1202 // Uses on normal instructions can be out of range.
1203 if (!isInlineAsm() && getOpcode() != TargetOpcode::STATEPOINT) {
1204 // Normal tied defs must be in the 0..TiedMax-1 range.
1205 if (MO.isUse())
1206 return TiedMax - 1;
1207 // MO is a def. Search for the tied use.
1208 for (unsigned i = TiedMax - 1, e = getNumOperands(); i != e; ++i) {
1209 const MachineOperand &UseMO = getOperand(i);
1210 if (UseMO.isReg() && UseMO.isUse() && UseMO.TiedTo == OpIdx + 1)
1211 return i;
1212 }
1213 llvm_unreachable("Can't find tied use");
1214 }
1215
1216 if (getOpcode() == TargetOpcode::STATEPOINT) {
1217 // In STATEPOINT defs correspond 1-1 to GC pointer operands passed
1218 // on registers.
1219 StatepointOpers SO(this);
1220 unsigned CurUseIdx = SO.getFirstGCPtrIdx();
1221 assert(CurUseIdx != -1U && "only gc pointer statepoint operands can be tied");
1222 unsigned NumDefs = getNumDefs();
1223 for (unsigned CurDefIdx = 0; CurDefIdx < NumDefs; ++CurDefIdx) {
1224 while (!getOperand(CurUseIdx).isReg())
1225 CurUseIdx = StackMaps::getNextMetaArgIdx(this, CurUseIdx);
1226 if (OpIdx == CurDefIdx)
1227 return CurUseIdx;
1228 if (OpIdx == CurUseIdx)
1229 return CurDefIdx;
1230 CurUseIdx = StackMaps::getNextMetaArgIdx(this, CurUseIdx);
1231 }
1232 llvm_unreachable("Can't find tied use");
1233 }
1234
1235 // Now deal with inline asm by parsing the operand group descriptor flags.
1236 // Find the beginning of each operand group.
1237 SmallVector<unsigned, 8> GroupIdx;
1238 unsigned OpIdxGroup = ~0u;
1239 unsigned NumOps;
1240 for (unsigned i = InlineAsm::MIOp_FirstOperand, e = getNumOperands(); i < e;
1241 i += NumOps) {
1242 const MachineOperand &FlagMO = getOperand(i);
1243 assert(FlagMO.isImm() && "Invalid tied operand on inline asm");
1244 unsigned CurGroup = GroupIdx.size();
1245 GroupIdx.push_back(i);
1246 const InlineAsm::Flag F(FlagMO.getImm());
1247 NumOps = 1 + F.getNumOperandRegisters();
1248 // OpIdx belongs to this operand group.
1249 if (OpIdx > i && OpIdx < i + NumOps)
1250 OpIdxGroup = CurGroup;
1251 unsigned TiedGroup;
1252 if (!F.isUseOperandTiedToDef(TiedGroup))
1253 continue;
1254 // Operands in this group are tied to operands in TiedGroup which must be
1255 // earlier. Find the number of operands between the two groups.
1256 unsigned Delta = i - GroupIdx[TiedGroup];
1257
1258 // OpIdx is a use tied to TiedGroup.
1259 if (OpIdxGroup == CurGroup)
1260 return OpIdx - Delta;
1261
1262 // OpIdx is a def tied to this use group.
1263 if (OpIdxGroup == TiedGroup)
1264 return OpIdx + Delta;
1265 }
1266 llvm_unreachable("Invalid tied operand on inline asm");
1267}
1268
1269/// clearKillInfo - Clears kill flags on all operands.
1270///
1272 for (MachineOperand &MO : operands()) {
1273 if (MO.isReg() && MO.isUse())
1274 MO.setIsKill(false);
1275 }
1276}
1277
1279 unsigned SubIdx,
1280 const TargetRegisterInfo &RegInfo) {
1281 if (ToReg.isPhysical()) {
1282 if (SubIdx)
1283 ToReg = RegInfo.getSubReg(ToReg, SubIdx);
1284 for (MachineOperand &MO : operands()) {
1285 if (!MO.isReg() || MO.getReg() != FromReg)
1286 continue;
1287 MO.substPhysReg(ToReg, RegInfo);
1288 }
1289 } else {
1290 for (MachineOperand &MO : operands()) {
1291 if (!MO.isReg() || MO.getReg() != FromReg)
1292 continue;
1293 MO.substVirtReg(ToReg, SubIdx, RegInfo);
1294 }
1295 }
1296}
1297
1298/// isSafeToMove - Return true if it is safe to move this instruction. If
1299/// SawStore is set to true, it means that there is a store (or call) between
1300/// the instruction's location and its intended destination.
1301bool MachineInstr::isSafeToMove(bool &SawStore) const {
1302 // Ignore stuff that we obviously can't move.
1303 //
1304 // Treat volatile loads as stores. This is not strictly necessary for
1305 // volatiles, but it is required for atomic loads. It is not allowed to move
1306 // a load across an atomic load with Ordering > Monotonic.
1307 if (mayStore() || isCall() || isPHI() ||
1308 (mayLoad() && hasOrderedMemoryRef())) {
1309 SawStore = true;
1310 return false;
1311 }
1312
1313 if (isPosition() || isDebugInstr() || isTerminator() ||
1316 return false;
1317
1318 // See if this instruction does a load. If so, we have to guarantee that the
1319 // loaded value doesn't change between the load and the its intended
1320 // destination. The check for isInvariantLoad gives the target the chance to
1321 // classify the load as always returning a constant, e.g. a constant pool
1322 // load.
1324 // Otherwise, this is a real load. If there is a store between the load and
1325 // end of block, we can't move it.
1326 return !SawStore;
1327
1328 return true;
1329}
1330
1332 // Don't delete frame allocation labels.
1333 // FIXME: Why is LOCAL_ESCAPE not considered in MachineInstr::isLabel?
1334 if (getOpcode() == TargetOpcode::LOCAL_ESCAPE)
1335 return false;
1336
1337 // Don't delete FAKE_USE.
1338 // FIXME: Why is FAKE_USE not considered in MachineInstr::isPosition?
1339 if (isFakeUse())
1340 return false;
1341
1342 // LIFETIME markers should be preserved.
1343 // FIXME: Why are LIFETIME markers not considered in MachineInstr::isPosition?
1344 if (isLifetimeMarker())
1345 return false;
1346
1347 // If we can move an instruction, we can remove it. Otherwise, it has
1348 // a side-effect of some sort.
1349 bool SawStore = false;
1350 return isPHI() || isSafeToMove(SawStore);
1351}
1352
1354 bool UseTBAA, const MachineMemOperand *MMOa,
1355 const MachineMemOperand *MMOb) {
1356 // The following interface to AA is fashioned after DAGCombiner::isAlias and
1357 // operates with MachineMemOperand offset with some important assumptions:
1358 // - LLVM fundamentally assumes flat address spaces.
1359 // - MachineOperand offset can *only* result from legalization and cannot
1360 // affect queries other than the trivial case of overlap checking.
1361 // - These offsets never wrap and never step outside of allocated objects.
1362 // - There should never be any negative offsets here.
1363 //
1364 // FIXME: Modify API to hide this math from "user"
1365 // Even before we go to AA we can reason locally about some memory objects. It
1366 // can save compile time, and possibly catch some corner cases not currently
1367 // covered.
1368
1369 int64_t OffsetA = MMOa->getOffset();
1370 int64_t OffsetB = MMOb->getOffset();
1371 int64_t MinOffset = std::min(OffsetA, OffsetB);
1372
1373 LocationSize WidthA = MMOa->getSize();
1374 LocationSize WidthB = MMOb->getSize();
1375 bool KnownWidthA = WidthA.hasValue();
1376 bool KnownWidthB = WidthB.hasValue();
1377 bool BothMMONonScalable = !WidthA.isScalable() && !WidthB.isScalable();
1378
1379 const Value *ValA = MMOa->getValue();
1380 const Value *ValB = MMOb->getValue();
1381 bool SameVal = (ValA && ValB && (ValA == ValB));
1382 if (!SameVal) {
1383 const PseudoSourceValue *PSVa = MMOa->getPseudoValue();
1384 const PseudoSourceValue *PSVb = MMOb->getPseudoValue();
1385 if (PSVa && ValB && !PSVa->mayAlias(&MFI))
1386 return false;
1387 if (PSVb && ValA && !PSVb->mayAlias(&MFI))
1388 return false;
1389 if (PSVa && PSVb && (PSVa == PSVb))
1390 SameVal = true;
1391 }
1392
1393 if (SameVal && BothMMONonScalable) {
1394 if (!KnownWidthA || !KnownWidthB)
1395 return true;
1396 int64_t MaxOffset = std::max(OffsetA, OffsetB);
1397 int64_t LowWidth = (MinOffset == OffsetA)
1398 ? WidthA.getValue().getKnownMinValue()
1399 : WidthB.getValue().getKnownMinValue();
1400 return (MinOffset + LowWidth > MaxOffset);
1401 }
1402
1403 if (!AA)
1404 return true;
1405
1406 if (!ValA || !ValB)
1407 return true;
1408
1409 assert((OffsetA >= 0) && "Negative MachineMemOperand offset");
1410 assert((OffsetB >= 0) && "Negative MachineMemOperand offset");
1411
1412 // If Scalable Location Size has non-zero offset, Width + Offset does not work
1413 // at the moment
1414 if ((WidthA.isScalable() && OffsetA > 0) ||
1415 (WidthB.isScalable() && OffsetB > 0))
1416 return true;
1417
1418 int64_t OverlapA =
1419 KnownWidthA ? WidthA.getValue().getKnownMinValue() + OffsetA - MinOffset
1421 int64_t OverlapB =
1422 KnownWidthB ? WidthB.getValue().getKnownMinValue() + OffsetB - MinOffset
1424
1425 LocationSize LocA = (WidthA.isScalable() || !KnownWidthA)
1426 ? WidthA
1427 : LocationSize::precise(OverlapA);
1428 LocationSize LocB = (WidthB.isScalable() || !KnownWidthB)
1429 ? WidthB
1430 : LocationSize::precise(OverlapB);
1431
1432 return !AA->isNoAlias(
1433 MemoryLocation(ValA, LocA, UseTBAA ? MMOa->getAAInfo() : AAMDNodes()),
1434 MemoryLocation(ValB, LocB, UseTBAA ? MMOb->getAAInfo() : AAMDNodes()));
1435}
1436
1438 bool UseTBAA) const {
1439 const MachineFunction *MF = getMF();
1441 const MachineFrameInfo &MFI = MF->getFrameInfo();
1442
1443 // Exclude call instruction which may alter the memory but can not be handled
1444 // by this function.
1445 if (isCall() || Other.isCall())
1446 return true;
1447
1448 // If neither instruction stores to memory, they can't alias in any
1449 // meaningful way, even if they read from the same address.
1450 if (!mayStore() && !Other.mayStore())
1451 return false;
1452
1453 // Both instructions must be memory operations to be able to alias.
1454 if (!mayLoadOrStore() || !Other.mayLoadOrStore())
1455 return false;
1456
1457 // Let the target decide if memory accesses cannot possibly overlap.
1459 return false;
1460
1461 // Memory operations without memory operands may access anything. Be
1462 // conservative and assume `MayAlias`.
1463 if (memoperands_empty() || Other.memoperands_empty())
1464 return true;
1465
1466 // Skip if there are too many memory operands.
1467 auto NumChecks = getNumMemOperands() * Other.getNumMemOperands();
1468 if (NumChecks > TII->getMemOperandAACheckLimit())
1469 return true;
1470
1471 // Check each pair of memory operands from both instructions, which can't
1472 // alias only if all pairs won't alias.
1473 for (auto *MMOa : memoperands())
1474 for (auto *MMOb : Other.memoperands())
1475 if (MemOperandsHaveAlias(MFI, AA, UseTBAA, MMOa, MMOb))
1476 return true;
1477
1478 return false;
1479}
1480
1481/// hasOrderedMemoryRef - Return true if this instruction may have an ordered
1482/// or volatile memory reference, or if the information describing the memory
1483/// reference is not available. Return false if it is known to have no ordered
1484/// memory references.
1486 // An instruction known never to access memory won't have a volatile access.
1487 if (!mayStore() &&
1488 !mayLoad() &&
1489 !isCall() &&
1491 return false;
1492
1493 // Otherwise, if the instruction has no memory reference information,
1494 // conservatively assume it wasn't preserved.
1495 if (memoperands_empty())
1496 return true;
1497
1498 // Check if any of our memory operands are ordered.
1499 return llvm::any_of(memoperands(), [](const MachineMemOperand *MMO) {
1500 return !MMO->isUnordered();
1501 });
1502}
1503
1504/// isDereferenceableInvariantLoad - Return true if this instruction will never
1505/// trap and is loading from a location whose value is invariant across a run of
1506/// this function.
1508 // If the instruction doesn't load at all, it isn't an invariant load.
1509 if (!mayLoad())
1510 return false;
1511
1512 // If the instruction has lost its memoperands, conservatively assume that
1513 // it may not be an invariant load.
1514 if (memoperands_empty())
1515 return false;
1516
1517 const MachineFrameInfo &MFI = getParent()->getParent()->getFrameInfo();
1518
1519 for (MachineMemOperand *MMO : memoperands()) {
1520 if (!MMO->isUnordered())
1521 // If the memory operand has ordering side effects, we can't move the
1522 // instruction. Such an instruction is technically an invariant load,
1523 // but the caller code would need updated to expect that.
1524 return false;
1525 if (MMO->isStore()) return false;
1526 if (MMO->isInvariant() && MMO->isDereferenceable())
1527 continue;
1528
1529 // A load from a constant PseudoSourceValue is invariant.
1530 if (const PseudoSourceValue *PSV = MMO->getPseudoValue()) {
1531 if (PSV->isConstant(&MFI))
1532 continue;
1533 }
1534
1535 // Otherwise assume conservatively.
1536 return false;
1537 }
1538
1539 // Everything checks out.
1540 return true;
1541}
1542
1544 if (!isPHI())
1545 return {};
1546 assert(getNumOperands() >= 3 &&
1547 "It's illegal to have a PHI without source operands");
1548
1549 Register Reg = getOperand(1).getReg();
1550 for (unsigned i = 3, e = getNumOperands(); i < e; i += 2)
1551 if (getOperand(i).getReg() != Reg)
1552 return {};
1553 return Reg;
1554}
1555
1558 return true;
1559 if (isInlineAsm()) {
1560 unsigned ExtraInfo = getOperand(InlineAsm::MIOp_ExtraInfo).getImm();
1561 if (ExtraInfo & InlineAsm::Extra_HasSideEffects)
1562 return true;
1563 }
1564
1565 return false;
1566}
1567
1569 return mayStore() || isCall() ||
1571}
1572
1573/// allDefsAreDead - Return true if all the defs of this instruction are dead.
1574///
1576 for (const MachineOperand &MO : operands()) {
1577 if (!MO.isReg() || MO.isUse())
1578 continue;
1579 if (!MO.isDead())
1580 return false;
1581 }
1582 return true;
1583}
1584
1586 for (const MachineOperand &MO : implicit_operands()) {
1587 if (!MO.isReg() || MO.isUse())
1588 continue;
1589 if (!MO.isDead())
1590 return false;
1591 }
1592 return true;
1593}
1594
1595/// copyImplicitOps - Copy implicit register operands from specified
1596/// instruction to this instruction.
1598 const MachineInstr &MI) {
1599 for (const MachineOperand &MO :
1600 llvm::drop_begin(MI.operands(), MI.getDesc().getNumOperands()))
1601 if ((MO.isReg() && MO.isImplicit()) || MO.isRegMask())
1602 addOperand(MF, MO);
1603}
1604
1606 const MCInstrDesc &MCID = getDesc();
1607 if (MCID.Opcode == TargetOpcode::STATEPOINT)
1608 return true;
1609 for (unsigned I = 0, E = getNumOperands(); I < E; ++I) {
1610 const auto &Operand = getOperand(I);
1611 if (!Operand.isReg() || Operand.isDef())
1612 // Ignore the defined registers as MCID marks only the uses as tied.
1613 continue;
1614 int ExpectedTiedIdx = MCID.getOperandConstraint(I, MCOI::TIED_TO);
1615 int TiedIdx = Operand.isTied() ? int(findTiedOperandIdx(I)) : -1;
1616 if (ExpectedTiedIdx != TiedIdx)
1617 return true;
1618 }
1619 return false;
1620}
1621
1623 const MachineRegisterInfo &MRI) const {
1624 const MachineOperand &Op = getOperand(OpIdx);
1625 if (!Op.isReg())
1626 return LLT{};
1627
1628 if (isVariadic() || OpIdx >= getNumExplicitOperands())
1629 return MRI.getType(Op.getReg());
1630
1631 auto &OpInfo = getDesc().operands()[OpIdx];
1632 if (!OpInfo.isGenericType())
1633 return MRI.getType(Op.getReg());
1634
1635 if (PrintedTypes[OpInfo.getGenericTypeIndex()])
1636 return LLT{};
1637
1638 LLT TypeToPrint = MRI.getType(Op.getReg());
1639 // Don't mark the type index printed if it wasn't actually printed: maybe
1640 // another operand with the same type index has an actual type attached:
1641 if (TypeToPrint.isValid())
1642 PrintedTypes.set(OpInfo.getGenericTypeIndex());
1643 return TypeToPrint;
1644}
1645
1646#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
1648 dbgs() << " ";
1649 print(dbgs());
1650}
1651
1652LLVM_DUMP_METHOD void MachineInstr::dumprImpl(
1653 const MachineRegisterInfo &MRI, unsigned Depth, unsigned MaxDepth,
1654 SmallPtrSetImpl<const MachineInstr *> &AlreadySeenInstrs) const {
1655 if (Depth >= MaxDepth)
1656 return;
1657 if (!AlreadySeenInstrs.insert(this).second)
1658 return;
1659 // PadToColumn always inserts at least one space.
1660 // Don't mess up the alignment if we don't want any space.
1661 if (Depth)
1662 fdbgs().PadToColumn(Depth * 2);
1663 print(fdbgs());
1664 for (const MachineOperand &MO : operands()) {
1665 if (!MO.isReg() || MO.isDef())
1666 continue;
1667 Register Reg = MO.getReg();
1668 if (Reg.isPhysical())
1669 continue;
1670 const MachineInstr *NewMI = MRI.getUniqueVRegDef(Reg);
1671 if (NewMI == nullptr)
1672 continue;
1673 NewMI->dumprImpl(MRI, Depth + 1, MaxDepth, AlreadySeenInstrs);
1674 }
1675}
1676
1678 unsigned MaxDepth) const {
1679 SmallPtrSet<const MachineInstr *, 16> AlreadySeenInstrs;
1680 dumprImpl(MRI, 0, MaxDepth, AlreadySeenInstrs);
1681}
1682#endif
1683
1684void MachineInstr::print(raw_ostream &OS, bool IsStandalone, bool SkipOpers,
1685 bool SkipDebugLoc, bool AddNewLine,
1686 const TargetInstrInfo *TII) const {
1687 const Module *M = nullptr;
1688 const Function *F = nullptr;
1689 if (const MachineFunction *MF = getMFIfAvailable(*this)) {
1690 F = &MF->getFunction();
1691 M = F->getParent();
1692 if (!TII)
1693 TII = MF->getSubtarget().getInstrInfo();
1694 }
1695
1696 ModuleSlotTracker MST(M);
1697 if (F)
1698 MST.incorporateFunction(*F);
1699 print(OS, MST, IsStandalone, SkipOpers, SkipDebugLoc, AddNewLine, TII);
1700}
1701
1703 bool IsStandalone, bool SkipOpers, bool SkipDebugLoc,
1704 bool AddNewLine, const TargetInstrInfo *TII) const {
1705 // We can be a bit tidier if we know the MachineFunction.
1706 const TargetRegisterInfo *TRI = nullptr;
1707 const MachineRegisterInfo *MRI = nullptr;
1708 const TargetIntrinsicInfo *IntrinsicInfo = nullptr;
1709 tryToGetTargetInfo(*this, TRI, MRI, IntrinsicInfo, TII);
1710
1711 if (isCFIInstruction())
1712 assert(getNumOperands() == 1 && "Expected 1 operand in CFI instruction");
1713
1714 SmallBitVector PrintedTypes(8);
1715 bool ShouldPrintRegisterTies = IsStandalone || hasComplexRegisterTies();
1716 auto getTiedOperandIdx = [&](unsigned OpIdx) {
1717 if (!ShouldPrintRegisterTies)
1718 return 0U;
1719 const MachineOperand &MO = getOperand(OpIdx);
1720 if (MO.isReg() && MO.isTied() && !MO.isDef())
1721 return findTiedOperandIdx(OpIdx);
1722 return 0U;
1723 };
1724 unsigned StartOp = 0;
1725 unsigned e = getNumOperands();
1726
1727 // Print explicitly defined operands on the left of an assignment syntax.
1728 while (StartOp < e) {
1729 const MachineOperand &MO = getOperand(StartOp);
1730 if (!MO.isReg() || !MO.isDef() || MO.isImplicit())
1731 break;
1732
1733 if (StartOp != 0)
1734 OS << ", ";
1735
1736 LLT TypeToPrint = MRI ? getTypeToPrint(StartOp, PrintedTypes, *MRI) : LLT{};
1737 unsigned TiedOperandIdx = getTiedOperandIdx(StartOp);
1738 MO.print(OS, MST, TypeToPrint, StartOp, /*PrintDef=*/false, IsStandalone,
1739 ShouldPrintRegisterTies, TiedOperandIdx, TRI, IntrinsicInfo);
1740 ++StartOp;
1741 }
1742
1743 if (StartOp != 0)
1744 OS << " = ";
1745
1747 OS << "frame-setup ";
1749 OS << "frame-destroy ";
1751 OS << "nnan ";
1753 OS << "ninf ";
1755 OS << "nsz ";
1757 OS << "arcp ";
1759 OS << "contract ";
1761 OS << "afn ";
1763 OS << "reassoc ";
1765 OS << "nuw ";
1767 OS << "nsw ";
1769 OS << "exact ";
1771 OS << "nofpexcept ";
1773 OS << "nomerge ";
1775 OS << "nneg ";
1777 OS << "disjoint ";
1779 OS << "samesign ";
1780
1781 // Print the opcode name.
1782 if (TII)
1783 OS << TII->getName(getOpcode());
1784 else
1785 OS << "UNKNOWN";
1786
1787 if (SkipOpers)
1788 return;
1789
1790 // Print the rest of the operands.
1791 bool FirstOp = true;
1792 unsigned AsmDescOp = ~0u;
1793 unsigned AsmOpCount = 0;
1794
1796 // Print asm string.
1797 OS << " ";
1798 const unsigned OpIdx = InlineAsm::MIOp_AsmString;
1799 LLT TypeToPrint = MRI ? getTypeToPrint(OpIdx, PrintedTypes, *MRI) : LLT{};
1800 unsigned TiedOperandIdx = getTiedOperandIdx(OpIdx);
1801 getOperand(OpIdx).print(OS, MST, TypeToPrint, OpIdx, /*PrintDef=*/true, IsStandalone,
1802 ShouldPrintRegisterTies, TiedOperandIdx, TRI,
1803 IntrinsicInfo);
1804
1805 // Print HasSideEffects, MayLoad, MayStore, IsAlignStack
1806 unsigned ExtraInfo = getOperand(InlineAsm::MIOp_ExtraInfo).getImm();
1807 if (ExtraInfo & InlineAsm::Extra_HasSideEffects)
1808 OS << " [sideeffect]";
1809 if (ExtraInfo & InlineAsm::Extra_MayLoad)
1810 OS << " [mayload]";
1811 if (ExtraInfo & InlineAsm::Extra_MayStore)
1812 OS << " [maystore]";
1813 if (ExtraInfo & InlineAsm::Extra_IsConvergent)
1814 OS << " [isconvergent]";
1815 if (ExtraInfo & InlineAsm::Extra_IsAlignStack)
1816 OS << " [alignstack]";
1818 OS << " [attdialect]";
1820 OS << " [inteldialect]";
1821
1822 StartOp = AsmDescOp = InlineAsm::MIOp_FirstOperand;
1823 FirstOp = false;
1824 }
1825
1826 for (unsigned i = StartOp, e = getNumOperands(); i != e; ++i) {
1827 const MachineOperand &MO = getOperand(i);
1828
1829 if (FirstOp) FirstOp = false; else OS << ",";
1830 OS << " ";
1831
1832 if (isDebugValueLike() && MO.isMetadata()) {
1833 // Pretty print DBG_VALUE* instructions.
1834 auto *DIV = dyn_cast<DILocalVariable>(MO.getMetadata());
1835 if (DIV && !DIV->getName().empty())
1836 OS << "!\"" << DIV->getName() << '\"';
1837 else {
1838 LLT TypeToPrint = MRI ? getTypeToPrint(i, PrintedTypes, *MRI) : LLT{};
1839 unsigned TiedOperandIdx = getTiedOperandIdx(i);
1840 MO.print(OS, MST, TypeToPrint, i, /*PrintDef=*/true, IsStandalone,
1841 ShouldPrintRegisterTies, TiedOperandIdx, TRI, IntrinsicInfo);
1842 }
1843 } else if (isDebugLabel() && MO.isMetadata()) {
1844 // Pretty print DBG_LABEL instructions.
1845 auto *DIL = dyn_cast<DILabel>(MO.getMetadata());
1846 if (DIL && !DIL->getName().empty())
1847 OS << "\"" << DIL->getName() << '\"';
1848 else {
1849 LLT TypeToPrint = MRI ? getTypeToPrint(i, PrintedTypes, *MRI) : LLT{};
1850 unsigned TiedOperandIdx = getTiedOperandIdx(i);
1851 MO.print(OS, MST, TypeToPrint, i, /*PrintDef=*/true, IsStandalone,
1852 ShouldPrintRegisterTies, TiedOperandIdx, TRI, IntrinsicInfo);
1853 }
1854 } else if (i == AsmDescOp && MO.isImm()) {
1855 // Pretty print the inline asm operand descriptor.
1856 OS << '$' << AsmOpCount++;
1857 unsigned Flag = MO.getImm();
1858 const InlineAsm::Flag F(Flag);
1859 OS << ":[";
1860 OS << F.getKindName();
1861
1862 unsigned RCID;
1863 if (!F.isImmKind() && !F.isMemKind() && F.hasRegClassConstraint(RCID)) {
1864 if (TRI) {
1865 OS << ':' << TRI->getRegClassName(TRI->getRegClass(RCID));
1866 } else
1867 OS << ":RC" << RCID;
1868 }
1869
1870 if (F.isMemKind()) {
1871 const InlineAsm::ConstraintCode MCID = F.getMemoryConstraintID();
1872 OS << ":" << InlineAsm::getMemConstraintName(MCID);
1873 }
1874
1875 unsigned TiedTo;
1876 if (F.isUseOperandTiedToDef(TiedTo))
1877 OS << " tiedto:$" << TiedTo;
1878
1879 if ((F.isRegDefKind() || F.isRegDefEarlyClobberKind() ||
1880 F.isRegUseKind()) &&
1881 F.getRegMayBeFolded()) {
1882 OS << " foldable";
1883 }
1884
1885 OS << ']';
1886
1887 // Compute the index of the next operand descriptor.
1888 AsmDescOp += 1 + F.getNumOperandRegisters();
1889 } else {
1890 LLT TypeToPrint = MRI ? getTypeToPrint(i, PrintedTypes, *MRI) : LLT{};
1891 unsigned TiedOperandIdx = getTiedOperandIdx(i);
1892 if (MO.isImm() && isOperandSubregIdx(i))
1894 else
1895 MO.print(OS, MST, TypeToPrint, i, /*PrintDef=*/true, IsStandalone,
1896 ShouldPrintRegisterTies, TiedOperandIdx, TRI, IntrinsicInfo);
1897 }
1898 }
1899
1900 // Print any optional symbols attached to this instruction as-if they were
1901 // operands.
1902 if (MCSymbol *PreInstrSymbol = getPreInstrSymbol()) {
1903 if (!FirstOp) {
1904 FirstOp = false;
1905 OS << ',';
1906 }
1907 OS << " pre-instr-symbol ";
1908 MachineOperand::printSymbol(OS, *PreInstrSymbol);
1909 }
1910 if (MCSymbol *PostInstrSymbol = getPostInstrSymbol()) {
1911 if (!FirstOp) {
1912 FirstOp = false;
1913 OS << ',';
1914 }
1915 OS << " post-instr-symbol ";
1916 MachineOperand::printSymbol(OS, *PostInstrSymbol);
1917 }
1918 if (MDNode *HeapAllocMarker = getHeapAllocMarker()) {
1919 if (!FirstOp) {
1920 FirstOp = false;
1921 OS << ',';
1922 }
1923 OS << " heap-alloc-marker ";
1924 HeapAllocMarker->printAsOperand(OS, MST);
1925 }
1926 if (MDNode *PCSections = getPCSections()) {
1927 if (!FirstOp) {
1928 FirstOp = false;
1929 OS << ',';
1930 }
1931 OS << " pcsections ";
1932 PCSections->printAsOperand(OS, MST);
1933 }
1934 if (MDNode *MMRA = getMMRAMetadata()) {
1935 if (!FirstOp) {
1936 FirstOp = false;
1937 OS << ',';
1938 }
1939 OS << " mmra ";
1940 MMRA->printAsOperand(OS, MST);
1941 }
1942 if (uint32_t CFIType = getCFIType()) {
1943 if (!FirstOp)
1944 OS << ',';
1945 OS << " cfi-type " << CFIType;
1946 }
1947
1948 if (DebugInstrNum) {
1949 if (!FirstOp)
1950 OS << ",";
1951 OS << " debug-instr-number " << DebugInstrNum;
1952 }
1953
1954 if (!SkipDebugLoc) {
1955 if (const DebugLoc &DL = getDebugLoc()) {
1956 if (!FirstOp)
1957 OS << ',';
1958 OS << " debug-location ";
1959 DL->printAsOperand(OS, MST);
1960 }
1961 }
1962
1963 if (!memoperands_empty()) {
1965 const LLVMContext *Context = nullptr;
1966 std::unique_ptr<LLVMContext> CtxPtr;
1967 const MachineFrameInfo *MFI = nullptr;
1968 if (const MachineFunction *MF = getMFIfAvailable(*this)) {
1969 MFI = &MF->getFrameInfo();
1970 Context = &MF->getFunction().getContext();
1971 } else {
1972 CtxPtr = std::make_unique<LLVMContext>();
1973 Context = CtxPtr.get();
1974 }
1975
1976 OS << " :: ";
1977 bool NeedComma = false;
1978 for (const MachineMemOperand *Op : memoperands()) {
1979 if (NeedComma)
1980 OS << ", ";
1981 Op->print(OS, MST, SSNs, *Context, MFI, TII);
1982 NeedComma = true;
1983 }
1984 }
1985
1986 if (SkipDebugLoc)
1987 return;
1988
1989 bool HaveSemi = false;
1990
1991 // Print debug location information.
1992 if (const DebugLoc &DL = getDebugLoc()) {
1993 if (!HaveSemi) {
1994 OS << ';';
1995 HaveSemi = true;
1996 }
1997 OS << ' ';
1998 DL.print(OS);
1999 }
2000
2001 // Print extra comments for DEBUG_VALUE and friends if they are well-formed.
2002 if ((isNonListDebugValue() && getNumOperands() >= 4) ||
2003 (isDebugValueList() && getNumOperands() >= 2) ||
2004 (isDebugRef() && getNumOperands() >= 3)) {
2005 if (getDebugVariableOp().isMetadata()) {
2006 if (!HaveSemi) {
2007 OS << ";";
2008 HaveSemi = true;
2009 }
2010 auto *DV = getDebugVariable();
2011 OS << " line no:" << DV->getLine();
2013 OS << " indirect";
2014 }
2015 }
2016 // TODO: DBG_LABEL
2017
2018 if (AddNewLine)
2019 OS << '\n';
2020}
2021
2023 const TargetRegisterInfo *RegInfo,
2024 bool AddIfNotFound) {
2025 bool isPhysReg = IncomingReg.isPhysical();
2026 bool hasAliases = isPhysReg &&
2027 MCRegAliasIterator(IncomingReg, RegInfo, false).isValid();
2028 bool Found = false;
2030 for (unsigned i = 0, e = getNumOperands(); i != e; ++i) {
2031 MachineOperand &MO = getOperand(i);
2032 if (!MO.isReg() || !MO.isUse() || MO.isUndef())
2033 continue;
2034
2035 // DEBUG_VALUE nodes do not contribute to code generation and should
2036 // always be ignored. Failure to do so may result in trying to modify
2037 // KILL flags on DEBUG_VALUE nodes.
2038 if (MO.isDebug())
2039 continue;
2040
2041 Register Reg = MO.getReg();
2042 if (!Reg)
2043 continue;
2044
2045 if (Reg == IncomingReg) {
2046 if (!Found) {
2047 if (MO.isKill())
2048 // The register is already marked kill.
2049 return true;
2050 if (isPhysReg && isRegTiedToDefOperand(i))
2051 // Two-address uses of physregs must not be marked kill.
2052 return true;
2053 MO.setIsKill();
2054 Found = true;
2055 }
2056 } else if (hasAliases && MO.isKill() && Reg.isPhysical()) {
2057 // A super-register kill already exists.
2058 if (RegInfo->isSuperRegister(IncomingReg, Reg))
2059 return true;
2060 if (RegInfo->isSubRegister(IncomingReg, Reg))
2061 DeadOps.push_back(i);
2062 }
2063 }
2064
2065 // Trim unneeded kill operands.
2066 while (!DeadOps.empty()) {
2067 unsigned OpIdx = DeadOps.back();
2068 if (getOperand(OpIdx).isImplicit() &&
2069 (!isInlineAsm() || findInlineAsmFlagIdx(OpIdx) < 0))
2070 removeOperand(OpIdx);
2071 else
2072 getOperand(OpIdx).setIsKill(false);
2073 DeadOps.pop_back();
2074 }
2075
2076 // If not found, this means an alias of one of the operands is killed. Add a
2077 // new implicit operand if required.
2078 if (!Found && AddIfNotFound) {
2080 false /*IsDef*/,
2081 true /*IsImp*/,
2082 true /*IsKill*/));
2083 return true;
2084 }
2085 return Found;
2086}
2087
2089 const TargetRegisterInfo *RegInfo) {
2090 if (!Reg.isPhysical())
2091 RegInfo = nullptr;
2092 for (MachineOperand &MO : operands()) {
2093 if (!MO.isReg() || !MO.isUse() || !MO.isKill())
2094 continue;
2095 Register OpReg = MO.getReg();
2096 if ((RegInfo && RegInfo->regsOverlap(Reg, OpReg)) || Reg == OpReg)
2097 MO.setIsKill(false);
2098 }
2099}
2100
2102 const TargetRegisterInfo *RegInfo,
2103 bool AddIfNotFound) {
2104 bool isPhysReg = Reg.isPhysical();
2105 bool hasAliases = isPhysReg &&
2106 MCRegAliasIterator(Reg, RegInfo, false).isValid();
2107 bool Found = false;
2109 for (unsigned i = 0, e = getNumOperands(); i != e; ++i) {
2110 MachineOperand &MO = getOperand(i);
2111 if (!MO.isReg() || !MO.isDef())
2112 continue;
2113 Register MOReg = MO.getReg();
2114 if (!MOReg)
2115 continue;
2116
2117 if (MOReg == Reg) {
2118 MO.setIsDead();
2119 Found = true;
2120 } else if (hasAliases && MO.isDead() && MOReg.isPhysical()) {
2121 // There exists a super-register that's marked dead.
2122 if (RegInfo->isSuperRegister(Reg, MOReg))
2123 return true;
2124 if (RegInfo->isSubRegister(Reg, MOReg))
2125 DeadOps.push_back(i);
2126 }
2127 }
2128
2129 // Trim unneeded dead operands.
2130 while (!DeadOps.empty()) {
2131 unsigned OpIdx = DeadOps.back();
2132 if (getOperand(OpIdx).isImplicit() &&
2133 (!isInlineAsm() || findInlineAsmFlagIdx(OpIdx) < 0))
2134 removeOperand(OpIdx);
2135 else
2136 getOperand(OpIdx).setIsDead(false);
2137 DeadOps.pop_back();
2138 }
2139
2140 // If not found, this means an alias of one of the operands is dead. Add a
2141 // new implicit operand if required.
2142 if (Found || !AddIfNotFound)
2143 return Found;
2144
2146 true /*IsDef*/,
2147 true /*IsImp*/,
2148 false /*IsKill*/,
2149 true /*IsDead*/));
2150 return true;
2151}
2152
2154 for (MachineOperand &MO : all_defs())
2155 if (MO.getReg() == Reg)
2156 MO.setIsDead(false);
2157}
2158
2160 for (MachineOperand &MO : all_defs())
2161 if (MO.getReg() == Reg && MO.getSubReg() != 0)
2162 MO.setIsUndef(IsUndef);
2163}
2164
2166 const TargetRegisterInfo *RegInfo) {
2167 if (Reg.isPhysical()) {
2168 MachineOperand *MO = findRegisterDefOperand(Reg, RegInfo, false, false);
2169 if (MO)
2170 return;
2171 } else {
2172 for (const MachineOperand &MO : all_defs()) {
2173 if (MO.getReg() == Reg && MO.getSubReg() == 0)
2174 return;
2175 }
2176 }
2178 true /*IsDef*/,
2179 true /*IsImp*/));
2180}
2181
2183 const TargetRegisterInfo &TRI) {
2184 bool HasRegMask = false;
2185 for (MachineOperand &MO : operands()) {
2186 if (MO.isRegMask()) {
2187 HasRegMask = true;
2188 continue;
2189 }
2190 if (!MO.isReg() || !MO.isDef()) continue;
2191 Register Reg = MO.getReg();
2192 if (!Reg.isPhysical())
2193 continue;
2194 // If there are no uses, including partial uses, the def is dead.
2195 if (llvm::none_of(UsedRegs,
2196 [&](MCRegister Use) { return TRI.regsOverlap(Use, Reg); }))
2197 MO.setIsDead();
2198 }
2199
2200 // This is a call with a register mask operand.
2201 // Mask clobbers are always dead, so add defs for the non-dead defines.
2202 if (HasRegMask)
2203 for (const Register &UsedReg : UsedRegs)
2204 addRegisterDefined(UsedReg, &TRI);
2205}
2206
2207unsigned
2209 // Build up a buffer of hash code components.
2210 SmallVector<size_t, 16> HashComponents;
2211 HashComponents.reserve(MI->getNumOperands() + 1);
2212 HashComponents.push_back(MI->getOpcode());
2213 for (const MachineOperand &MO : MI->operands()) {
2214 if (MO.isReg() && MO.isDef() && MO.getReg().isVirtual())
2215 continue; // Skip virtual register defs.
2216
2217 HashComponents.push_back(hash_value(MO));
2218 }
2219 return hash_combine_range(HashComponents.begin(), HashComponents.end());
2220}
2221
2223 // Find the source location cookie.
2224 const MDNode *LocMD = nullptr;
2225 for (unsigned i = getNumOperands(); i != 0; --i) {
2226 if (getOperand(i-1).isMetadata() &&
2227 (LocMD = getOperand(i-1).getMetadata()) &&
2228 LocMD->getNumOperands() != 0) {
2229 if (mdconst::hasa<ConstantInt>(LocMD->getOperand(0)))
2230 return LocMD;
2231 }
2232 }
2233
2234 return nullptr;
2235}
2236
2239 const MDNode *LocMD = getLocCookieMD();
2240 uint64_t LocCookie =
2241 LocMD
2242 ? mdconst::extract<ConstantInt>(LocMD->getOperand(0))->getZExtValue()
2243 : 0;
2245 Ctx.diagnose(DiagnosticInfoInlineAsm(LocCookie, Msg));
2246}
2247
2249 const Function &Fn = getMF()->getFunction();
2250 Fn.getContext().diagnose(
2252}
2253
2255 const MCInstrDesc &MCID, bool IsIndirect,
2256 Register Reg, const MDNode *Variable,
2257 const MDNode *Expr) {
2258 assert(isa<DILocalVariable>(Variable) && "not a variable");
2259 assert(cast<DIExpression>(Expr)->isValid() && "not an expression");
2260 assert(cast<DILocalVariable>(Variable)->isValidLocationForIntrinsic(DL) &&
2261 "Expected inlined-at fields to agree");
2262 auto MIB = BuildMI(MF, DL, MCID).addReg(Reg);
2263 if (IsIndirect)
2264 MIB.addImm(0U);
2265 else
2266 MIB.addReg(0U);
2267 return MIB.addMetadata(Variable).addMetadata(Expr);
2268}
2269
2271 const MCInstrDesc &MCID, bool IsIndirect,
2272 ArrayRef<MachineOperand> DebugOps,
2273 const MDNode *Variable, const MDNode *Expr) {
2274 assert(isa<DILocalVariable>(Variable) && "not a variable");
2275 assert(cast<DIExpression>(Expr)->isValid() && "not an expression");
2276 assert(cast<DILocalVariable>(Variable)->isValidLocationForIntrinsic(DL) &&
2277 "Expected inlined-at fields to agree");
2278 if (MCID.Opcode == TargetOpcode::DBG_VALUE) {
2279 assert(DebugOps.size() == 1 &&
2280 "DBG_VALUE must contain exactly one debug operand");
2281 MachineOperand DebugOp = DebugOps[0];
2282 if (DebugOp.isReg())
2283 return BuildMI(MF, DL, MCID, IsIndirect, DebugOp.getReg(), Variable,
2284 Expr);
2285
2286 auto MIB = BuildMI(MF, DL, MCID).add(DebugOp);
2287 if (IsIndirect)
2288 MIB.addImm(0U);
2289 else
2290 MIB.addReg(0U);
2291 return MIB.addMetadata(Variable).addMetadata(Expr);
2292 }
2293
2294 auto MIB = BuildMI(MF, DL, MCID);
2295 MIB.addMetadata(Variable).addMetadata(Expr);
2296 for (const MachineOperand &DebugOp : DebugOps)
2297 if (DebugOp.isReg())
2298 MIB.addReg(DebugOp.getReg());
2299 else
2300 MIB.add(DebugOp);
2301 return MIB;
2302}
2303
2306 const DebugLoc &DL, const MCInstrDesc &MCID,
2307 bool IsIndirect, Register Reg,
2308 const MDNode *Variable, const MDNode *Expr) {
2309 MachineFunction &MF = *BB.getParent();
2310 MachineInstr *MI = BuildMI(MF, DL, MCID, IsIndirect, Reg, Variable, Expr);
2311 BB.insert(I, MI);
2312 return MachineInstrBuilder(MF, MI);
2313}
2314
2317 const DebugLoc &DL, const MCInstrDesc &MCID,
2318 bool IsIndirect,
2319 ArrayRef<MachineOperand> DebugOps,
2320 const MDNode *Variable, const MDNode *Expr) {
2321 MachineFunction &MF = *BB.getParent();
2322 MachineInstr *MI =
2323 BuildMI(MF, DL, MCID, IsIndirect, DebugOps, Variable, Expr);
2324 BB.insert(I, MI);
2325 return MachineInstrBuilder(MF, *MI);
2326}
2327
2328/// Compute the new DIExpression to use with a DBG_VALUE for a spill slot.
2329/// This prepends DW_OP_deref when spilling an indirect DBG_VALUE.
2331 const MachineInstr &MI,
2332 const SmallVectorImpl<const MachineOperand *> &SpilledOperands) {
2333 assert(MI.getDebugVariable()->isValidLocationForIntrinsic(MI.getDebugLoc()) &&
2334 "Expected inlined-at fields to agree");
2335
2336 const DIExpression *Expr = MI.getDebugExpression();
2337 if (MI.isIndirectDebugValue()) {
2338 assert(MI.getDebugOffset().getImm() == 0 &&
2339 "DBG_VALUE with nonzero offset");
2341 } else if (MI.isDebugValueList()) {
2342 // We will replace the spilled register with a frame index, so
2343 // immediately deref all references to the spilled register.
2344 std::array<uint64_t, 1> Ops{{dwarf::DW_OP_deref}};
2345 for (const MachineOperand *Op : SpilledOperands) {
2346 unsigned OpIdx = MI.getDebugOperandIndex(Op);
2347 Expr = DIExpression::appendOpsToArg(Expr, Ops, OpIdx);
2348 }
2349 }
2350 return Expr;
2351}
2353 Register SpillReg) {
2354 assert(MI.hasDebugOperandForReg(SpillReg) && "Spill Reg is not used in MI.");
2356 for (const MachineOperand &Op : MI.getDebugOperandsForReg(SpillReg))
2357 SpillOperands.push_back(&Op);
2358 return computeExprForSpill(MI, SpillOperands);
2359}
2360
2363 const MachineInstr &Orig,
2364 int FrameIndex, Register SpillReg) {
2365 assert(!Orig.isDebugRef() &&
2366 "DBG_INSTR_REF should not reference a virtual register.");
2367 const DIExpression *Expr = computeExprForSpill(Orig, SpillReg);
2368 MachineInstrBuilder NewMI =
2369 BuildMI(BB, I, Orig.getDebugLoc(), Orig.getDesc());
2370 // Non-Variadic Operands: Location, Offset, Variable, Expression
2371 // Variadic Operands: Variable, Expression, Locations...
2372 if (Orig.isNonListDebugValue())
2373 NewMI.addFrameIndex(FrameIndex).addImm(0U);
2374 NewMI.addMetadata(Orig.getDebugVariable()).addMetadata(Expr);
2375 if (Orig.isDebugValueList()) {
2376 for (const MachineOperand &Op : Orig.debug_operands())
2377 if (Op.isReg() && Op.getReg() == SpillReg)
2378 NewMI.addFrameIndex(FrameIndex);
2379 else
2380 NewMI.add(MachineOperand(Op));
2381 }
2382 return NewMI;
2383}
2386 const MachineInstr &Orig, int FrameIndex,
2387 const SmallVectorImpl<const MachineOperand *> &SpilledOperands) {
2388 const DIExpression *Expr = computeExprForSpill(Orig, SpilledOperands);
2389 MachineInstrBuilder NewMI =
2390 BuildMI(BB, I, Orig.getDebugLoc(), Orig.getDesc());
2391 // Non-Variadic Operands: Location, Offset, Variable, Expression
2392 // Variadic Operands: Variable, Expression, Locations...
2393 if (Orig.isNonListDebugValue())
2394 NewMI.addFrameIndex(FrameIndex).addImm(0U);
2395 NewMI.addMetadata(Orig.getDebugVariable()).addMetadata(Expr);
2396 if (Orig.isDebugValueList()) {
2397 for (const MachineOperand &Op : Orig.debug_operands())
2398 if (is_contained(SpilledOperands, &Op))
2399 NewMI.addFrameIndex(FrameIndex);
2400 else
2401 NewMI.add(MachineOperand(Op));
2402 }
2403 return NewMI;
2404}
2405
2407 Register Reg) {
2408 const DIExpression *Expr = computeExprForSpill(Orig, Reg);
2409 if (Orig.isNonListDebugValue())
2411 for (MachineOperand &Op : Orig.getDebugOperandsForReg(Reg))
2412 Op.ChangeToFrameIndex(FrameIndex);
2413 Orig.getDebugExpressionOp().setMetadata(Expr);
2414}
2415
2418 MachineInstr &MI = *this;
2419 if (!MI.getOperand(0).isReg())
2420 return;
2421
2423 for (MachineBasicBlock::iterator DE = MI.getParent()->end();
2424 DI != DE; ++DI) {
2425 if (!DI->isDebugValue())
2426 return;
2427 if (DI->hasDebugOperandForReg(MI.getOperand(0).getReg()))
2428 DbgValues.push_back(&*DI);
2429 }
2430}
2431
2433 // Collect matching debug values.
2435
2436 if (!getOperand(0).isReg())
2437 return;
2438
2439 Register DefReg = getOperand(0).getReg();
2440 auto *MRI = getRegInfo();
2441 for (auto &MO : MRI->use_operands(DefReg)) {
2442 auto *DI = MO.getParent();
2443 if (!DI->isDebugValue())
2444 continue;
2445 if (DI->hasDebugOperandForReg(DefReg)) {
2446 DbgValues.push_back(DI);
2447 }
2448 }
2449
2450 // Propagate Reg to debug value instructions.
2451 for (auto *DBI : DbgValues)
2452 for (MachineOperand &Op : DBI->getDebugOperandsForReg(DefReg))
2453 Op.setReg(Reg);
2454}
2455
2457
2459 const MachineFrameInfo &MFI) {
2460 uint64_t Size = 0;
2461 for (const auto *A : Accesses) {
2462 if (MFI.isSpillSlotObjectIndex(
2463 cast<FixedStackPseudoSourceValue>(A->getPseudoValue())
2464 ->getFrameIndex())) {
2465 LocationSize S = A->getSize();
2466 if (!S.hasValue())
2468 Size += S.getValue();
2469 }
2470 }
2471 return Size;
2472}
2473
2474std::optional<LocationSize>
2476 int FI;
2477 if (TII->isStoreToStackSlotPostFE(*this, FI)) {
2478 const MachineFrameInfo &MFI = getMF()->getFrameInfo();
2479 if (MFI.isSpillSlotObjectIndex(FI))
2480 return (*memoperands_begin())->getSize();
2481 }
2482 return std::nullopt;
2483}
2484
2485std::optional<LocationSize>
2487 MMOList Accesses;
2488 if (TII->hasStoreToStackSlot(*this, Accesses))
2489 return getSpillSlotSize(Accesses, getMF()->getFrameInfo());
2490 return std::nullopt;
2491}
2492
2493std::optional<LocationSize>
2495 int FI;
2496 if (TII->isLoadFromStackSlotPostFE(*this, FI)) {
2497 const MachineFrameInfo &MFI = getMF()->getFrameInfo();
2498 if (MFI.isSpillSlotObjectIndex(FI))
2499 return (*memoperands_begin())->getSize();
2500 }
2501 return std::nullopt;
2502}
2503
2504std::optional<LocationSize>
2506 MMOList Accesses;
2507 if (TII->hasLoadFromStackSlot(*this, Accesses))
2508 return getSpillSlotSize(Accesses, getMF()->getFrameInfo());
2509 return std::nullopt;
2510}
2511
2513 if (DebugInstrNum == 0)
2514 DebugInstrNum = getParent()->getParent()->getNewDebugInstrNum();
2515 return DebugInstrNum;
2516}
2517
2519 if (DebugInstrNum == 0)
2520 DebugInstrNum = MF.getNewDebugInstrNum();
2521 return DebugInstrNum;
2522}
2523
2524std::tuple<LLT, LLT> MachineInstr::getFirst2LLTs() const {
2525 return std::tuple(getRegInfo()->getType(getOperand(0).getReg()),
2526 getRegInfo()->getType(getOperand(1).getReg()));
2527}
2528
2529std::tuple<LLT, LLT, LLT> MachineInstr::getFirst3LLTs() const {
2530 return std::tuple(getRegInfo()->getType(getOperand(0).getReg()),
2531 getRegInfo()->getType(getOperand(1).getReg()),
2532 getRegInfo()->getType(getOperand(2).getReg()));
2533}
2534
2535std::tuple<LLT, LLT, LLT, LLT> MachineInstr::getFirst4LLTs() const {
2536 return std::tuple(getRegInfo()->getType(getOperand(0).getReg()),
2537 getRegInfo()->getType(getOperand(1).getReg()),
2538 getRegInfo()->getType(getOperand(2).getReg()),
2539 getRegInfo()->getType(getOperand(3).getReg()));
2540}
2541
2542std::tuple<LLT, LLT, LLT, LLT, LLT> MachineInstr::getFirst5LLTs() const {
2543 return std::tuple(getRegInfo()->getType(getOperand(0).getReg()),
2544 getRegInfo()->getType(getOperand(1).getReg()),
2545 getRegInfo()->getType(getOperand(2).getReg()),
2546 getRegInfo()->getType(getOperand(3).getReg()),
2547 getRegInfo()->getType(getOperand(4).getReg()));
2548}
2549
2550std::tuple<Register, LLT, Register, LLT>
2552 Register Reg0 = getOperand(0).getReg();
2553 Register Reg1 = getOperand(1).getReg();
2554 return std::tuple(Reg0, getRegInfo()->getType(Reg0), Reg1,
2555 getRegInfo()->getType(Reg1));
2556}
2557
2558std::tuple<Register, LLT, Register, LLT, Register, LLT>
2560 Register Reg0 = getOperand(0).getReg();
2561 Register Reg1 = getOperand(1).getReg();
2562 Register Reg2 = getOperand(2).getReg();
2563 return std::tuple(Reg0, getRegInfo()->getType(Reg0), Reg1,
2564 getRegInfo()->getType(Reg1), Reg2,
2565 getRegInfo()->getType(Reg2));
2566}
2567
2568std::tuple<Register, LLT, Register, LLT, Register, LLT, Register, LLT>
2570 Register Reg0 = getOperand(0).getReg();
2571 Register Reg1 = getOperand(1).getReg();
2572 Register Reg2 = getOperand(2).getReg();
2573 Register Reg3 = getOperand(3).getReg();
2574 return std::tuple(
2575 Reg0, getRegInfo()->getType(Reg0), Reg1, getRegInfo()->getType(Reg1),
2576 Reg2, getRegInfo()->getType(Reg2), Reg3, getRegInfo()->getType(Reg3));
2577}
2578
2580 LLT>
2582 Register Reg0 = getOperand(0).getReg();
2583 Register Reg1 = getOperand(1).getReg();
2584 Register Reg2 = getOperand(2).getReg();
2585 Register Reg3 = getOperand(3).getReg();
2586 Register Reg4 = getOperand(4).getReg();
2587 return std::tuple(
2588 Reg0, getRegInfo()->getType(Reg0), Reg1, getRegInfo()->getType(Reg1),
2589 Reg2, getRegInfo()->getType(Reg2), Reg3, getRegInfo()->getType(Reg3),
2590 Reg4, getRegInfo()->getType(Reg4));
2591}
2592
2595 assert(InsertBefore != nullptr && "invalid iterator");
2596 assert(InsertBefore->getParent() == this &&
2597 "iterator points to operand of other inst");
2598 if (Ops.empty())
2599 return;
2600
2601 // Do one pass to untie operands.
2603 for (const MachineOperand &MO : operands()) {
2604 if (MO.isReg() && MO.isTied()) {
2605 unsigned OpNo = getOperandNo(&MO);
2606 unsigned TiedTo = findTiedOperandIdx(OpNo);
2607 TiedOpIndices[OpNo] = TiedTo;
2608 untieRegOperand(OpNo);
2609 }
2610 }
2611
2612 unsigned OpIdx = getOperandNo(InsertBefore);
2613 unsigned NumOperands = getNumOperands();
2614 unsigned OpsToMove = NumOperands - OpIdx;
2615
2617 MovingOps.reserve(OpsToMove);
2618
2619 for (unsigned I = 0; I < OpsToMove; ++I) {
2620 MovingOps.emplace_back(getOperand(OpIdx));
2621 removeOperand(OpIdx);
2622 }
2623 for (const MachineOperand &MO : Ops)
2624 addOperand(MO);
2625 for (const MachineOperand &OpMoved : MovingOps)
2626 addOperand(OpMoved);
2627
2628 // Re-tie operands.
2629 for (auto [Tie1, Tie2] : TiedOpIndices) {
2630 if (Tie1 >= OpIdx)
2631 Tie1 += Ops.size();
2632 if (Tie2 >= OpIdx)
2633 Tie2 += Ops.size();
2634 tieOperands(Tie1, Tie2);
2635 }
2636}
2637
2638bool MachineInstr::mayFoldInlineAsmRegOp(unsigned OpId) const {
2639 assert(OpId && "expected non-zero operand id");
2640 assert(isInlineAsm() && "should only be used on inline asm");
2641
2642 if (!getOperand(OpId).isReg())
2643 return false;
2644
2645 const MachineOperand &MD = getOperand(OpId - 1);
2646 if (!MD.isImm())
2647 return false;
2648
2649 InlineAsm::Flag F(MD.getImm());
2650 if (F.isRegUseKind() || F.isRegDefKind() || F.isRegDefEarlyClobberKind())
2651 return F.getRegMayBeFolded();
2652 return false;
2653}
unsigned const MachineRegisterInfo * MRI
MachineBasicBlock & MBB
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")
Analysis containing CSE Info
Definition: CSEInfo.cpp:27
#define LLVM_DUMP_METHOD
Mark debug helper function definitions like dump() that should not be stripped from debug builds.
Definition: Compiler.h:622
This file contains the declarations for the subclasses of Constant, which represent the different fla...
uint64_t Size
#define Check(C,...)
Hexagon Common GEP
const HexagonInstrInfo * TII
IRTranslator LLVM IR MI
Module.h This file contains the declarations for the Module class.
static const unsigned MaxDepth
Implement a low-level type suitable for MachineInstr level instruction selection.
#define F(x, y, z)
Definition: MD5.cpp:55
#define I(x, y, z)
Definition: MD5.cpp:58
static DebugLoc getDebugLoc(MachineBasicBlock::instr_iterator FirstMI, MachineBasicBlock::instr_iterator LastMI)
Return the first found DebugLoc that has a DILocation, given a range of instructions.
const unsigned TiedMax
static void moveOperands(MachineOperand *Dst, MachineOperand *Src, unsigned NumOps, MachineRegisterInfo *MRI)
Move NumOps MachineOperands from Src to Dst, with support for overlapping ranges.
static LocationSize getSpillSlotSize(const MMOList &Accesses, const MachineFrameInfo &MFI)
static const DIExpression * computeExprForSpill(const MachineInstr &MI, const SmallVectorImpl< const MachineOperand * > &SpilledOperands)
Compute the new DIExpression to use with a DBG_VALUE for a spill slot.
static bool MemOperandsHaveAlias(const MachineFrameInfo &MFI, AAResults *AA, bool UseTBAA, const MachineMemOperand *MMOa, const MachineMemOperand *MMOb)
static void tryToGetTargetInfo(const MachineInstr &MI, const TargetRegisterInfo *&TRI, const MachineRegisterInfo *&MRI, const TargetIntrinsicInfo *&IntrinsicInfo, const TargetInstrInfo *&TII)
static const MachineFunction * getMFIfAvailable(const MachineInstr &MI)
static bool hasIdenticalMMOs(ArrayRef< MachineMemOperand * > LHS, ArrayRef< MachineMemOperand * > RHS)
Check to see if the MMOs pointed to by the two MemRefs arrays are identical.
unsigned const TargetRegisterInfo * TRI
This file provides utility analysis objects describing memory locations.
This file contains the declarations for metadata subclasses.
static unsigned getReg(const MCDisassembler *D, unsigned RC, unsigned RegNo)
static bool isReg(const MCInst &MI, unsigned OpNo)
static bool isValid(const char C)
Returns true if C is a valid mangled character: <0-9a-zA-Z_>.
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
bool isDead(const MachineInstr &MI, const MachineRegisterInfo &MRI)
This file contains some templates that are useful if you are working with the STL at all.
raw_pwrite_stream & OS
static cl::opt< bool > UseTBAA("use-tbaa-in-sched-mi", cl::Hidden, cl::init(true), cl::desc("Enable use of TBAA during MI DAG construction"))
This file implements the SmallBitVector class.
This file defines the SmallVector class.
static SymbolRef::Type getType(const Symbol *Sym)
Definition: TapiFile.cpp:39
static std::optional< unsigned > getOpcode(ArrayRef< VPValue * > Values)
Returns the opcode of Values or ~0 if they do not all agree.
Definition: VPlanSLP.cpp:191
Value * RHS
Value * LHS
bool isNoAlias(const MemoryLocation &LocA, const MemoryLocation &LocB)
A trivial helper function to check to see if the specified pointers are no-alias.
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
Definition: ArrayRef.h:41
size_t size() const
size - Get the array size.
Definition: ArrayRef.h:168
bool empty() const
empty - Check if the array is empty.
Definition: ArrayRef.h:163
ArrayRef< T > slice(size_t N, size_t M) const
slice(n, m) - Chop off the first N elements of the array, and keep M elements in the array.
Definition: ArrayRef.h:198
ConstMIBundleOperands - Iterate over all operands in a const bundle of machine instructions.
DWARF expression.
bool isEntryValue() const
Check if the expression consists of exactly one entry value operand.
static bool isEqualExpression(const DIExpression *FirstExpr, bool FirstIndirect, const DIExpression *SecondExpr, bool SecondIndirect)
Determines whether two debug values should produce equivalent DWARF expressions, using their DIExpres...
static DIExpression * appendOpsToArg(const DIExpression *Expr, ArrayRef< uint64_t > Ops, unsigned ArgNo, bool StackValue=false)
Create a copy of Expr by appending the given list of Ops to each instance of the operand DW_OP_LLVM_a...
static DIExpression * prepend(const DIExpression *Expr, uint8_t Flags, int64_t Offset=0)
Prepend DIExpr with a deref and offset operation and optionally turn it into a stack value or/and an ...
This class represents an Operation in the Expression.
bool print(raw_ostream &OS, DIDumpOptions DumpOpts, const DWARFExpression *Expr, DWARFUnit *U) const
A debug info location.
Definition: DebugLoc.h:33
bool hasTrivialDestructor() const
Check whether this has a trivial destructor.
Definition: DebugLoc.h:69
Diagnostic information for inline asm reporting.
Utility class for floating point operations which can have information about relaxed accuracy require...
Definition: Operator.h:205
Convenience struct for specifying and reasoning about fast-math flags.
Definition: FMF.h:20
LLVMContext & getContext() const
getContext - Return a reference to the LLVMContext associated with this function.
Definition: Function.cpp:369
an instruction for type-safe pointer arithmetic to access elements of arrays and structs
Definition: Instructions.h:933
bool hasStoreToStackSlot(const MachineInstr &MI, SmallVectorImpl< const MachineMemOperand * > &Accesses) const override
Check if the instruction or the bundle of instructions has store to stack slots.
bool areMemAccessesTriviallyDisjoint(const MachineInstr &MIa, const MachineInstr &MIb) const override
bool hasLoadFromStackSlot(const MachineInstr &MI, SmallVectorImpl< const MachineMemOperand * > &Accesses) const override
Check if the instruction or the bundle of instructions has load from stack slots.
This instruction compares its operands according to the predicate given to the constructor.
static StringRef getMemConstraintName(ConstraintCode C)
Definition: InlineAsm.h:467
constexpr bool isValid() const
Definition: LowLevelType.h:145
This is an important class for using LLVM in a threaded context.
Definition: LLVMContext.h:67
void diagnose(const DiagnosticInfo &DI)
Report a message to the currently installed diagnostic handler.
bool hasValue() const
static LocationSize precise(uint64_t Value)
static constexpr LocationSize beforeOrAfterPointer()
Any location before or after the base pointer (but still within the underlying object).
bool isScalable() const
TypeSize getValue() const
Describe properties that are true of each instruction in the target description file.
Definition: MCInstrDesc.h:198
unsigned getNumOperands() const
Return the number of declared MachineOperands for this MachineInstruction.
Definition: MCInstrDesc.h:237
ArrayRef< MCOperandInfo > operands() const
Definition: MCInstrDesc.h:239
unsigned getNumDefs() const
Return the number of MachineOperands that are register definitions.
Definition: MCInstrDesc.h:248
int getOperandConstraint(unsigned OpNum, MCOI::OperandConstraint Constraint) const
Returns the value of the specified operand constraint if it is present.
Definition: MCInstrDesc.h:219
ArrayRef< MCPhysReg > implicit_defs() const
Return a list of registers that are potentially written by any instance of this machine instruction.
Definition: MCInstrDesc.h:579
bool isPredicable() const
Return true if this instruction has a predicate operand that controls execution.
Definition: MCInstrDesc.h:338
unsigned short Opcode
Definition: MCInstrDesc.h:205
bool isVariadic() const
Return true if this instruction can have a variable number of operands.
Definition: MCInstrDesc.h:261
ArrayRef< MCPhysReg > implicit_uses() const
Return a list of registers that are potentially read by any instance of this machine instruction.
Definition: MCInstrDesc.h:565
MCRegAliasIterator enumerates all registers aliasing Reg.
bool isSubRegister(MCRegister RegA, MCRegister RegB) const
Returns true if RegB is a sub-register of RegA.
bool isSuperRegister(MCRegister RegA, MCRegister RegB) const
Returns true if RegB is a super-register of RegA.
Wrapper class representing physical registers. Should be passed by value.
Definition: MCRegister.h:33
MCSymbol - Instances of this class represent a symbol name in the MC file, and MCSymbols are created ...
Definition: MCSymbol.h:41
Metadata node.
Definition: Metadata.h:1069
const MDOperand & getOperand(unsigned I) const
Definition: Metadata.h:1430
unsigned getNumOperands() const
Return number of MDNode operands.
Definition: Metadata.h:1436
bool isValid() const
isValid - Returns true until all the operands have been visited.
MachineInstr * remove_instr(MachineInstr *I)
Remove the possibly bundled instruction from the instruction list without deleting it.
instr_iterator insert(instr_iterator I, MachineInstr *M)
Insert MI into the instruction list before I, possibly inside a bundle.
instr_iterator erase_instr(MachineInstr *I)
Remove an instruction from the instruction list and delete it.
void printAsOperand(raw_ostream &OS, bool PrintType=true) const
MachineInstr * remove(MachineInstr *I)
Remove the unbundled instruction from the instruction list without deleting it.
void print(raw_ostream &OS, const SlotIndexes *=nullptr, bool IsStandalone=true) const
Instructions::iterator instr_iterator
Instructions::const_iterator const_instr_iterator
const MachineFunction * getParent() const
Return the MachineFunction containing this basic block.
instr_iterator erase(instr_iterator I)
Remove an instruction from the instruction list and delete it.
void splice(iterator Where, MachineBasicBlock *Other, iterator From)
Take an instruction from MBB 'Other' at the position From, and insert it into this MBB right before '...
The MachineFrameInfo class represents an abstract stack frame until prolog/epilog code is inserted.
bool isSpillSlotObjectIndex(int ObjectIdx) const
Returns true if the specified index corresponds to a spill slot.
MachineInstr::ExtraInfo * createMIExtraInfo(ArrayRef< MachineMemOperand * > MMOs, MCSymbol *PreInstrSymbol=nullptr, MCSymbol *PostInstrSymbol=nullptr, MDNode *HeapAllocMarker=nullptr, MDNode *PCSections=nullptr, uint32_t CFIType=0, MDNode *MMRAs=nullptr)
Allocate and construct an extra info structure for a MachineInstr.
const TargetSubtargetInfo & getSubtarget() const
getSubtarget - Return the subtarget for which this machine code is being compiled.
MachineFrameInfo & getFrameInfo()
getFrameInfo - Return the frame info object for the current function.
void deallocateOperandArray(OperandCapacity Cap, MachineOperand *Array)
Dellocate an array of MachineOperands and recycle the memory.
MachineOperand * allocateOperandArray(OperandCapacity Cap)
Allocate an array of MachineOperands.
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
void handleChangeDesc(MachineInstr &MI, const MCInstrDesc &TID)
Function & getFunction()
Return the LLVM function that this machine code represents.
const MachineInstrBuilder & addImm(int64_t Val) const
Add a new immediate operand.
const MachineInstrBuilder & add(const MachineOperand &MO) const
const MachineInstrBuilder & addMetadata(const MDNode *MD) const
const MachineInstrBuilder & addFrameIndex(int Idx) const
const MachineInstrBuilder & addReg(Register RegNo, unsigned flags=0, unsigned SubReg=0) const
Add a new virtual register operand.
Representation of each machine instruction.
Definition: MachineInstr.h:69
bool mayRaiseFPException() const
Return true if this instruction could possibly raise a floating-point exception.
unsigned getOpcode() const
Returns the opcode of this MachineInstr.
Definition: MachineInstr.h:575
void setRegisterDefReadUndef(Register Reg, bool IsUndef=true)
Mark all subregister defs of register Reg with the undef flag.
static iterator_range< filter_iterator< Operand *, std::function< bool(Operand &Op)> > > getDebugOperandsForReg(Instruction *MI, Register Reg)
Returns a range of all of the operands that correspond to a debug use of Reg.
Definition: MachineInstr.h:616
bool isDebugValueList() const
void bundleWithPred()
Bundle this instruction with its predecessor.
bool isPosition() const
bool isTerminator(QueryType Type=AnyInBundle) const
Returns true if this instruction part of the terminator for a basic block.
Definition: MachineInstr.h:980
std::tuple< Register, LLT, Register, LLT, Register, LLT, Register, LLT, Register, LLT > getFirst5RegLLTs() const
bool mayLoadOrStore(QueryType Type=AnyInBundle) const
Return true if this instruction could possibly read or modify memory.
void setCFIType(MachineFunction &MF, uint32_t Type)
Set the CFI type for the instruction.
MachineInstr * removeFromParent()
Unlink 'this' from the containing basic block, and return it without deleting it.
iterator_range< mop_iterator > debug_operands()
Returns a range over all operands that are used to determine the variable location for this DBG_VALUE...
Definition: MachineInstr.h:713
const MachineBasicBlock * getParent() const
Definition: MachineInstr.h:347
MDNode * getMMRAMetadata() const
Helper to extract mmra.op metadata.
Definition: MachineInstr.h:871
void bundleWithSucc()
Bundle this instruction with its successor.
uint32_t getCFIType() const
Helper to extract a CFI type hash if one has been added.
Definition: MachineInstr.h:880
bool isDebugLabel() const
void setPreInstrSymbol(MachineFunction &MF, MCSymbol *Symbol)
Set a symbol that will be emitted just prior to the instruction itself.
bool hasProperty(unsigned MCFlag, QueryType Type=AnyInBundle) const
Return true if the instruction (or in the case of a bundle, the instructions inside the bundle) has t...
Definition: MachineInstr.h:903
bool isDereferenceableInvariantLoad() const
Return true if this load instruction never traps and points to a memory location whose value doesn't ...
void setFlags(unsigned flags)
Definition: MachineInstr.h:410
QueryType
API for querying MachineInstr properties.
Definition: MachineInstr.h:892
void addImplicitDefUseOperands(MachineFunction &MF)
Add all implicit def and use operands to this instruction.
std::tuple< LLT, LLT, LLT, LLT, LLT > getFirst5LLTs() const
bool isCall(QueryType Type=AnyInBundle) const
Definition: MachineInstr.h:956
std::tuple< Register, LLT, Register, LLT, Register, LLT > getFirst3RegLLTs() const
bool getFlag(MIFlag Flag) const
Return whether an MI flag is set.
Definition: MachineInstr.h:397
uint32_t mergeFlagsWith(const MachineInstr &Other) const
Return the MIFlags which represent both MachineInstrs.
const MachineOperand & getDebugExpressionOp() const
Return the operand for the complex address expression referenced by this DBG_VALUE instruction.
std::pair< bool, bool > readsWritesVirtualRegister(Register Reg, SmallVectorImpl< unsigned > *Ops=nullptr) const
Return a pair of bools (reads, writes) indicating if this instruction reads or writes Reg.
Register isConstantValuePHI() const
If the specified instruction is a PHI that always merges together the same virtual register,...
bool isRegTiedToDefOperand(unsigned UseOpIdx, unsigned *DefOpIdx=nullptr) const
Return true if the use operand of the specified index is tied to a def operand.
bool allImplicitDefsAreDead() const
Return true if all the implicit defs of this instruction are dead.
void cloneMemRefs(MachineFunction &MF, const MachineInstr &MI)
Clone another MachineInstr's memory reference descriptor list and replace ours with it.
const TargetRegisterClass * getRegClassConstraintEffectForVReg(Register Reg, const TargetRegisterClass *CurRC, const TargetInstrInfo *TII, const TargetRegisterInfo *TRI, bool ExploreBundle=false) const
Applies the constraints (def/use) implied by this MI on Reg to the given CurRC.
bool isSafeToMove(bool &SawStore) const
Return true if it is safe to move this instruction.
bool isBundle() const
bool isDebugInstr() const
unsigned getNumDebugOperands() const
Returns the total number of operands which are debug locations.
Definition: MachineInstr.h:581
unsigned getNumOperands() const
Retuns the total number of operands.
Definition: MachineInstr.h:578
void addOperand(MachineFunction &MF, const MachineOperand &Op)
Add the specified operand to the instruction.
MachineInstr * removeFromBundle()
Unlink this instruction from its basic block and return it without deleting it.
void dumpr(const MachineRegisterInfo &MRI, unsigned MaxDepth=UINT_MAX) const
Print on dbgs() the current instruction and the instructions defining its operands and so on until we...
void copyIRFlags(const Instruction &I)
Copy all flags to MachineInst MIFlags.
bool isDebugValueLike() const
bool isInlineAsm() const
bool memoperands_empty() const
Return true if we don't have any memory operands which described the memory access done by this instr...
Definition: MachineInstr.h:818
mmo_iterator memoperands_end() const
Access to memory operands of the instruction.
Definition: MachineInstr.h:813
bool isDebugRef() const
void collectDebugValues(SmallVectorImpl< MachineInstr * > &DbgValues)
Scan instructions immediately following MI and collect any matching DBG_VALUEs.
std::optional< LocationSize > getRestoreSize(const TargetInstrInfo *TII) const
Return a valid size if the instruction is a restore instruction.
unsigned getOperandNo(const_mop_iterator I) const
Returns the number of the operand iterator I points to.
Definition: MachineInstr.h:781
bool mayAlias(AAResults *AA, const MachineInstr &Other, bool UseTBAA) const
Returns true if this instruction's memory access aliases the memory access of Other.
unsigned getNumExplicitOperands() const
Returns the number of non-implicit operands.
void setMemRefs(MachineFunction &MF, ArrayRef< MachineMemOperand * > MemRefs)
Assign this MachineInstr's memory reference descriptor list.
bool wouldBeTriviallyDead() const
Return true if this instruction would be trivially dead if all of its defined registers were dead.
bool isBundledWithPred() const
Return true if this instruction is part of a bundle, and it is not the first instruction in the bundl...
Definition: MachineInstr.h:478
std::tuple< LLT, LLT > getFirst2LLTs() const
std::optional< LocationSize > getFoldedSpillSize(const TargetInstrInfo *TII) const
Return a valid size if the instruction is a folded spill instruction.
void unbundleFromPred()
Break bundle above this instruction.
void copyImplicitOps(MachineFunction &MF, const MachineInstr &MI)
Copy implicit register operands from specified instruction to this instruction.
bool mayLoad(QueryType Type=AnyInBundle) const
Return true if this instruction could possibly read memory.
bool isStackAligningInlineAsm() const
void dropMemRefs(MachineFunction &MF)
Clear this MachineInstr's memory reference descriptor list.
int findRegisterUseOperandIdx(Register Reg, const TargetRegisterInfo *TRI, bool isKill=false) const
Returns the operand index that is a use of the specific register or -1 if it is not found.
bool shouldUpdateCallSiteInfo() const
Return true if copying, moving, or erasing this instruction requires updating Call Site Info (see cop...
MDNode * getPCSections() const
Helper to extract PCSections metadata target sections.
Definition: MachineInstr.h:861
bool isCFIInstruction() const
int findFirstPredOperandIdx() const
Find the index of the first operand in the operand list that is used to represent the predicate.
const MCInstrDesc & getDesc() const
Returns the target instruction descriptor of this MachineInstr.
Definition: MachineInstr.h:572
unsigned getBundleSize() const
Return the number of instructions inside the MI bundle, excluding the bundle header.
void cloneMergedMemRefs(MachineFunction &MF, ArrayRef< const MachineInstr * > MIs)
Clone the merge of multiple MachineInstrs' memory reference descriptors list and replace ours with it...
std::tuple< Register, LLT, Register, LLT, Register, LLT, Register, LLT > getFirst4RegLLTs() const
std::tuple< Register, LLT, Register, LLT > getFirst2RegLLTs() const
unsigned getNumMemOperands() const
Return the number of memory operands.
Definition: MachineInstr.h:824
void clearFlag(MIFlag Flag)
clearFlag - Clear a MI flag.
Definition: MachineInstr.h:419
std::optional< LocationSize > getFoldedRestoreSize(const TargetInstrInfo *TII) const
Return a valid size if the instruction is a folded restore instruction.
const TargetRegisterClass * getRegClassConstraintEffect(unsigned OpIdx, const TargetRegisterClass *CurRC, const TargetInstrInfo *TII, const TargetRegisterInfo *TRI) const
Applies the constraints (def/use) implied by the OpIdx operand to the given CurRC.
bool isOperandSubregIdx(unsigned OpIdx) const
Return true if operand OpIdx is a subregister index.
Definition: MachineInstr.h:662
InlineAsm::AsmDialect getInlineAsmDialect() const
bool hasUnmodeledSideEffects() const
Return true if this instruction has side effects that are not modeled by mayLoad / mayStore,...
bool isEquivalentDbgInstr(const MachineInstr &Other) const
Returns true if this instruction is a debug instruction that represents an identical debug value to O...
const DILabel * getDebugLabel() const
Return the debug label referenced by this DBG_LABEL instruction.
void untieRegOperand(unsigned OpIdx)
Break any tie involving OpIdx.
static uint32_t copyFlagsFromInstruction(const Instruction &I)
void insert(mop_iterator InsertBefore, ArrayRef< MachineOperand > Ops)
Inserts Ops BEFORE It. Can untie/retie tied operands.
void setDesc(const MCInstrDesc &TID)
Replace the instruction descriptor (thus opcode) of the current instruction with a new one.
bool isJumpTableDebugInfo() const
unsigned getNumExplicitDefs() const
Returns the number of non-implicit definitions.
void eraseFromBundle()
Unlink 'this' from its basic block and delete it.
iterator_range< mop_iterator > operands()
Definition: MachineInstr.h:691
void setHeapAllocMarker(MachineFunction &MF, MDNode *MD)
Set a marker on instructions that denotes where we should create and emit heap alloc site labels.
const DILocalVariable * getDebugVariable() const
Return the debug variable referenced by this DBG_VALUE instruction.
bool hasComplexRegisterTies() const
Return true when an instruction has tied register that can't be determined by the instruction's descr...
LLT getTypeToPrint(unsigned OpIdx, SmallBitVector &PrintedTypes, const MachineRegisterInfo &MRI) const
Debugging supportDetermine the generic type to be printed (if needed) on uses and defs.
bool isLifetimeMarker() const
void substituteRegister(Register FromReg, Register ToReg, unsigned SubIdx, const TargetRegisterInfo &RegInfo)
Replace all occurrences of FromReg with ToReg:SubIdx, properly composing subreg indices where necessa...
unsigned findTiedOperandIdx(unsigned OpIdx) const
Given the index of a tied register operand, find the operand it is tied to.
void tieOperands(unsigned DefIdx, unsigned UseIdx)
Add a tie between the register operands at DefIdx and UseIdx.
mmo_iterator memoperands_begin() const
Access to memory operands of the instruction.
Definition: MachineInstr.h:806
void cloneInstrSymbols(MachineFunction &MF, const MachineInstr &MI)
Clone another MachineInstr's pre- and post- instruction symbols and replace ours with it.
void changeDebugValuesDefReg(Register Reg)
Find all DBG_VALUEs that point to the register def in this instruction and point them to Reg instead.
bool isIdenticalTo(const MachineInstr &Other, MICheckType Check=CheckDefs) const
Return true if this instruction is identical to Other.
bool hasOrderedMemoryRef() const
Return true if this instruction may have an ordered or volatile memory reference, or if the informati...
void emitGenericError(const Twine &ErrMsg) const
const MachineFunction * getMF() const
Return the function that contains the basic block that this instruction belongs to.
const DIExpression * getDebugExpression() const
Return the complex address expression referenced by this DBG_VALUE instruction.
ArrayRef< MachineMemOperand * > memoperands() const
Access to memory operands of the instruction.
Definition: MachineInstr.h:788
void print(raw_ostream &OS, bool IsStandalone=true, bool SkipOpers=false, bool SkipDebugLoc=false, bool AddNewLine=true, const TargetInstrInfo *TII=nullptr) const
Print this MI to OS.
bool isNonListDebugValue() const
bool isLoadFoldBarrier() const
Returns true if it is illegal to fold a load across this instruction.
bool mayStore(QueryType Type=AnyInBundle) const
Return true if this instruction could possibly modify memory.
void setFlag(MIFlag Flag)
Set a MI flag.
Definition: MachineInstr.h:404
const DebugLoc & getDebugLoc() const
Returns the debug location id of this MachineInstr.
Definition: MachineInstr.h:499
std::tuple< LLT, LLT, LLT > getFirst3LLTs() const
const MachineOperand & getDebugVariableOp() const
Return the operand for the debug variable referenced by this DBG_VALUE instruction.
void eraseFromParent()
Unlink 'this' from the containing basic block and delete it.
void setPhysRegsDeadExcept(ArrayRef< Register > UsedRegs, const TargetRegisterInfo &TRI)
Mark every physreg used by this instruction as dead except those in the UsedRegs list.
bool isCandidateForCallSiteEntry(QueryType Type=IgnoreBundle) const
Return true if this is a call instruction that may have an associated call site entry in the debug in...
void removeOperand(unsigned OpNo)
Erase an operand from an instruction, leaving it with one fewer operand than it started with.
MCSymbol * getPreInstrSymbol() const
Helper to extract a pre-instruction symbol if one has been added.
Definition: MachineInstr.h:827
bool addRegisterKilled(Register IncomingReg, const TargetRegisterInfo *RegInfo, bool AddIfNotFound=false)
We have determined MI kills a register.
void setPostInstrSymbol(MachineFunction &MF, MCSymbol *Symbol)
Set a symbol that will be emitted just after the instruction itself.
bool isDebugValue() const
const MachineOperand & getDebugOffset() const
Return the operand containing the offset to be used if this DBG_VALUE instruction is indirect; will b...
Definition: MachineInstr.h:504
MachineOperand & getDebugOperand(unsigned Index)
Definition: MachineInstr.h:594
std::optional< LocationSize > getSpillSize(const TargetInstrInfo *TII) const
Return a valid size if the instruction is a spill instruction.
iterator_range< mop_iterator > implicit_operands()
Definition: MachineInstr.h:705
bool isBundledWithSucc() const
Return true if this instruction is part of a bundle, and it is not the last instruction in the bundle...
Definition: MachineInstr.h:482
void addRegisterDefined(Register Reg, const TargetRegisterInfo *RegInfo=nullptr)
We have determined MI defines a register.
MDNode * getHeapAllocMarker() const
Helper to extract a heap alloc marker if one has been added.
Definition: MachineInstr.h:851
unsigned getDebugInstrNum()
Fetch the instruction number of this MachineInstr.
std::tuple< LLT, LLT, LLT, LLT > getFirst4LLTs() const
bool isPHI() const
void clearRegisterDeads(Register Reg)
Clear all dead flags on operands defining register Reg.
void clearRegisterKills(Register Reg, const TargetRegisterInfo *RegInfo)
Clear all kill flags affecting Reg.
const MachineOperand & getOperand(unsigned i) const
Definition: MachineInstr.h:585
void emitInlineAsmError(const Twine &ErrMsg) const
Emit an error referring to the source location of this instruction.
uint32_t getFlags() const
Return the MI flags bitvector.
Definition: MachineInstr.h:392
bool isPseudoProbe() const
bool hasRegisterImplicitUseOperand(Register Reg) const
Returns true if the MachineInstr has an implicit-use operand of exactly the given register (not consi...
MCSymbol * getPostInstrSymbol() const
Helper to extract a post-instruction symbol if one has been added.
Definition: MachineInstr.h:839
void unbundleFromSucc()
Break bundle below this instruction.
iterator_range< filtered_mop_iterator > all_defs()
Returns an iterator range over all operands that are (explicit or implicit) register defs.
Definition: MachineInstr.h:762
void clearKillInfo()
Clears kill flags on all operands.
bool isDebugEntryValue() const
A DBG_VALUE is an entry value iff its debug expression contains the DW_OP_LLVM_entry_value operation.
bool isIndirectDebugValue() const
A DBG_VALUE is indirect iff the location operand is a register and the offset operand is an immediate...
unsigned getNumDefs() const
Returns the total number of definitions.
Definition: MachineInstr.h:644
void setPCSections(MachineFunction &MF, MDNode *MD)
bool isKill() const
const MDNode * getLocCookieMD() const
For inline asm, get the !srcloc metadata node if we have it, and decode the loc cookie from it.
int findRegisterDefOperandIdx(Register Reg, const TargetRegisterInfo *TRI, bool isDead=false, bool Overlap=false) const
Returns the operand index that is a def of the specified register or -1 if it is not found.
bool isFakeUse() const
bool isVariadic(QueryType Type=IgnoreBundle) const
Return true if this instruction can have a variable number of operands.
Definition: MachineInstr.h:924
int findInlineAsmFlagIdx(unsigned OpIdx, unsigned *GroupNo=nullptr) const
Find the index of the flag word operand that corresponds to operand OpIdx on an inline asm instructio...
bool allDefsAreDead() const
Return true if all the defs of this instruction are dead.
void setMMRAMetadata(MachineFunction &MF, MDNode *MMRAs)
const TargetRegisterClass * getRegClassConstraint(unsigned OpIdx, const TargetInstrInfo *TII, const TargetRegisterInfo *TRI) const
Compute the static register class constraint for operand OpIdx.
void moveBefore(MachineInstr *MovePos)
Move the instruction before MovePos.
MachineOperand * findRegisterDefOperand(Register Reg, const TargetRegisterInfo *TRI, bool isDead=false, bool Overlap=false)
Wrapper for findRegisterDefOperandIdx, it returns a pointer to the MachineOperand rather than an inde...
void addMemOperand(MachineFunction &MF, MachineMemOperand *MO)
Add a MachineMemOperand to the machine instruction.
bool addRegisterDead(Register Reg, const TargetRegisterInfo *RegInfo, bool AddIfNotFound=false)
We have determined MI defined a register without a use.
bool mayFoldInlineAsmRegOp(unsigned OpId) const
Returns true if the register operand can be folded with a load or store into a frame index.
A description of a memory reference used in the backend.
LocationSize getSize() const
Return the size in bytes of the memory reference.
const PseudoSourceValue * getPseudoValue() const
bool isUnordered() const
Returns true if this memory operation doesn't have any ordering constraints other than normal aliasin...
AAMDNodes getAAInfo() const
Return the AA tags for the memory reference.
const Value * getValue() const
Return the base address of the memory access.
int64_t getOffset() const
For normal values, this is a byte offset added to the base address.
MachineOperand class - Representation of each machine instruction operand.
unsigned getSubReg() const
void substVirtReg(Register Reg, unsigned SubIdx, const TargetRegisterInfo &)
substVirtReg - Substitute the current register with the virtual subregister Reg:SubReg.
static void printSubRegIdx(raw_ostream &OS, uint64_t Index, const TargetRegisterInfo *TRI)
Print a subreg index operand.
int64_t getImm() const
bool isImplicit() const
bool isReg() const
isReg - Tests if this is a MO_Register operand.
bool isRegMask() const
isRegMask - Tests if this is a MO_RegisterMask operand.
const MDNode * getMetadata() const
void setIsDead(bool Val=true)
void setMetadata(const MDNode *MD)
bool isImm() const
isImm - Tests if this is a MO_Immediate operand.
void ChangeToImmediate(int64_t ImmVal, unsigned TargetFlags=0)
ChangeToImmediate - Replace this operand with a new immediate operand of the specified value.
bool isMetadata() const
isMetadata - Tests if this is a MO_Metadata operand.
void setIsKill(bool Val=true)
MachineInstr * getParent()
getParent - Return the instruction that this operand belongs to.
void substPhysReg(MCRegister Reg, const TargetRegisterInfo &)
substPhysReg - Substitute the current register with the physical register Reg, taking any existing Su...
void setIsEarlyClobber(bool Val=true)
void setIsUndef(bool Val=true)
void setIsDebug(bool Val=true)
Register getReg() const
getReg - Returns the register number.
bool isIdenticalTo(const MachineOperand &Other) const
Returns true if this operand is identical to the specified operand except for liveness related flags ...
static bool clobbersPhysReg(const uint32_t *RegMask, MCRegister PhysReg)
clobbersPhysReg - Returns true if this RegMask clobbers PhysReg.
void print(raw_ostream &os, const TargetRegisterInfo *TRI=nullptr, const TargetIntrinsicInfo *IntrinsicInfo=nullptr) const
Print the MachineOperand to os.
static void printSymbol(raw_ostream &OS, MCSymbol &Sym)
Print a MCSymbol as an operand.
static MachineOperand CreateReg(Register Reg, bool isDef, bool isImp=false, bool isKill=false, bool isDead=false, bool isUndef=false, bool isEarlyClobber=false, unsigned SubReg=0, bool isDebug=false, bool isInternalRead=false, bool isRenamable=false)
MachineRegisterInfo - Keep track of information for virtual and physical registers,...
Representation for a specific memory location.
void printAsOperand(raw_ostream &OS, const Module *M=nullptr) const
Print as operand.
Definition: AsmWriter.cpp:5250
Manage lifetime of a slot tracker for printing IR.
void incorporateFunction(const Function &F)
Incorporate the given function.
Definition: AsmWriter.cpp:904
A Module instance is used to store all the information related to an LLVM module.
Definition: Module.h:65
Utility class for integer operators which may exhibit overflow - Add, Sub, Mul, and Shl.
Definition: Operator.h:77
An or instruction, which can be marked as "disjoint", indicating that the inputs don't have a 1 in th...
Definition: InstrTypes.h:400
A udiv or sdiv instruction, which can be marked as "exact", indicating that no bits are destroyed.
Definition: Operator.h:155
Instruction that can have a nneg flag (zext/uitofp).
Definition: InstrTypes.h:636
Special value supplied for machine level alias analysis.
virtual bool mayAlias(const MachineFrameInfo *) const
Return true if the memory pointed to by this PseudoSourceValue can ever alias an LLVM IR Value.
Wrapper class representing virtual and physical registers.
Definition: Register.h:19
constexpr bool isVirtual() const
Return true if the specified register number is in the virtual register namespace.
Definition: Register.h:91
constexpr bool isPhysical() const
Return true if the specified register number is in the physical register namespace.
Definition: Register.h:95
This is a 'bitvector' (really, a variable-sized bit array), optimized for the case when the array is ...
SmallBitVector & set()
A templated base class for SmallPtrSet which provides the typesafe interface that is common across al...
Definition: SmallPtrSet.h:363
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
Definition: SmallPtrSet.h:384
SmallPtrSet - This class implements a set which is optimized for holding SmallSize or less elements.
Definition: SmallPtrSet.h:519
bool empty() const
Definition: SmallVector.h:81
size_t size() const
Definition: SmallVector.h:78
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
Definition: SmallVector.h:573
reference emplace_back(ArgTypes &&... Args)
Definition: SmallVector.h:937
void reserve(size_type N)
Definition: SmallVector.h:663
void append(ItTy in_start, ItTy in_end)
Add the specified range to the end of the SmallVector.
Definition: SmallVector.h:683
void push_back(const T &Elt)
Definition: SmallVector.h:413
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
Definition: SmallVector.h:1196
static unsigned getNextMetaArgIdx(const MachineInstr *MI, unsigned CurIdx)
Get index of next meta operand.
Definition: StackMaps.cpp:170
MI-level Statepoint operands.
Definition: StackMaps.h:158
int getFirstGCPtrIdx()
Get index of first GC pointer operand of -1 if there are none.
Definition: StackMaps.cpp:124
TargetInstrInfo - Interface to description of machine instruction set.
TargetIntrinsicInfo - Interface to description of machine instruction set.
TargetRegisterInfo base class - We assume that the target defines a static array of TargetRegisterDes...
MCRegister getSubReg(MCRegister Reg, unsigned Idx) const
Returns the physical register number of sub-register "Index" for physical register RegNo.
bool regsOverlap(Register RegA, Register RegB) const
Returns true if the two registers are equal or alias each other.
virtual const TargetInstrInfo * getInstrInfo() const
This class represents a truncation of integer types.
Twine - A lightweight data structure for efficiently representing the concatenation of temporary valu...
Definition: Twine.h:81
The instances of the Type class are immutable: once they are created, they are never changed.
Definition: Type.h:45
A Use represents the edge between a Value definition and its users.
Definition: Use.h:43
LLVM Value Representation.
Definition: Value.h:74
constexpr ScalarTy getKnownMinValue() const
Returns the minimum value this quantity can represent.
Definition: TypeSize.h:168
formatted_raw_ostream & PadToColumn(unsigned NewCol)
PadToColumn - Align the output to some column number.
This class implements an extremely fast bulk output stream that can only output to a stream.
Definition: raw_ostream.h:52
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
MCInstrDesc const & getDesc(MCInstrInfo const &MCII, MCInst const &MCI)
@ UnmodeledSideEffects
Definition: MCInstrDesc.h:173
constexpr double e
Definition: MathExtras.h:47
This is an optimization pass for GlobalISel generic memory operations.
Definition: AddressRanges.h:18
auto drop_begin(T &&RangeOrContainer, size_t N=1)
Return a range covering RangeOrContainer with the first N elements excluded.
Definition: STLExtras.h:329
hash_code hash_value(const FixedPointSemantics &Val)
Definition: APFixedPoint.h:136
formatted_raw_ostream & fdbgs()
fdbgs() - This returns a reference to a formatted_raw_ostream for debug output.
MachineInstrBuilder BuildMI(MachineFunction &MF, const MIMetadata &MIMD, const MCInstrDesc &MCID)
Builder interface. Specify how to create the initial instruction itself.
void updateDbgValueForSpill(MachineInstr &Orig, int FrameIndex, Register Reg)
Update a DBG_VALUE whose value has been spilled to FrameIndex.
bool any_of(R &&range, UnaryPredicate P)
Provide wrappers to std::any_of which take ranges instead of having to pass begin/end explicitly.
Definition: STLExtras.h:1746
iterator_range< pointee_iterator< WrappedIteratorT > > make_pointee_range(RangeT &&Range)
Definition: iterator.h:336
raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
Definition: Debug.cpp:163
bool none_of(R &&Range, UnaryPredicate P)
Provide wrappers to std::none_of which take ranges instead of having to pass begin/end explicitly.
Definition: STLExtras.h:1753
@ Other
Any other memory.
OutputIt move(R &&Range, OutputIt Out)
Provide wrappers to std::move which take ranges instead of having to pass begin/end explicitly.
Definition: STLExtras.h:1873
MachineInstr * buildDbgValueForSpill(MachineBasicBlock &BB, MachineBasicBlock::iterator I, const MachineInstr &Orig, int FrameIndex, Register SpillReg)
Clone a DBG_VALUE whose value has been spilled to FrameIndex.
bool is_contained(R &&Range, const E &Element)
Returns true if Element is found in Range.
Definition: STLExtras.h:1903
hash_code hash_combine_range(InputIteratorT first, InputIteratorT last)
Compute a hash_code for a sequence of values.
Definition: Hashing.h:468
Implement std::hash so that hash_code can be used in STL containers.
Definition: BitVector.h:858
#define N
A collection of metadata nodes that might be associated with a memory access used by the alias-analys...
Definition: Metadata.h:760
static unsigned getHashValue(const MachineInstr *const &MI)