LLVM 20.0.0git
MachineVerifier.cpp
Go to the documentation of this file.
1//===- MachineVerifier.cpp - Machine Code Verifier ------------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// Pass to verify generated machine code. The following is checked:
10//
11// Operand counts: All explicit operands must be present.
12//
13// Register classes: All physical and virtual register operands must be
14// compatible with the register class required by the instruction descriptor.
15//
16// Register live intervals: Registers must be defined only once, and must be
17// defined before use.
18//
19// The machine code verifier is enabled with the command-line option
20// -verify-machineinstrs.
21//===----------------------------------------------------------------------===//
22
24#include "llvm/ADT/BitVector.h"
25#include "llvm/ADT/DenseMap.h"
26#include "llvm/ADT/DenseSet.h"
29#include "llvm/ADT/STLExtras.h"
33#include "llvm/ADT/StringRef.h"
34#include "llvm/ADT/Twine.h"
64#include "llvm/IR/BasicBlock.h"
65#include "llvm/IR/Constants.h"
67#include "llvm/IR/Function.h"
68#include "llvm/IR/InlineAsm.h"
71#include "llvm/MC/LaneBitmask.h"
72#include "llvm/MC/MCAsmInfo.h"
73#include "llvm/MC/MCDwarf.h"
74#include "llvm/MC/MCInstrDesc.h"
77#include "llvm/Pass.h"
82#include "llvm/Support/ModRef.h"
83#include "llvm/Support/Mutex.h"
86#include <algorithm>
87#include <cassert>
88#include <cstddef>
89#include <cstdint>
90#include <iterator>
91#include <string>
92#include <utility>
93
94using namespace llvm;
95
96namespace {
97
98/// Used the by the ReportedErrors class to guarantee only one error is reported
99/// at one time.
100static ManagedStatic<sys::SmartMutex<true>> ReportedErrorsLock;
101
102struct MachineVerifier {
103 MachineVerifier(MachineFunctionAnalysisManager &MFAM, const char *b,
104 raw_ostream *OS, bool AbortOnError = true)
105 : MFAM(&MFAM), OS(OS ? *OS : nulls()), Banner(b),
106 ReportedErrs(AbortOnError) {}
107
108 MachineVerifier(Pass *pass, const char *b, raw_ostream *OS,
109 bool AbortOnError = true)
110 : PASS(pass), OS(OS ? *OS : nulls()), Banner(b),
111 ReportedErrs(AbortOnError) {}
112
113 MachineVerifier(const char *b, LiveVariables *LiveVars,
114 LiveIntervals *LiveInts, LiveStacks *LiveStks,
115 SlotIndexes *Indexes, raw_ostream *OS,
116 bool AbortOnError = true)
117 : OS(OS ? *OS : nulls()), Banner(b), LiveVars(LiveVars),
118 LiveInts(LiveInts), LiveStks(LiveStks), Indexes(Indexes),
119 ReportedErrs(AbortOnError) {}
120
121 /// \returns true if no problems were found.
122 bool verify(const MachineFunction &MF);
123
124 MachineFunctionAnalysisManager *MFAM = nullptr;
125 Pass *const PASS = nullptr;
127 const char *Banner;
128 const MachineFunction *MF = nullptr;
129 const TargetMachine *TM = nullptr;
130 const TargetInstrInfo *TII = nullptr;
131 const TargetRegisterInfo *TRI = nullptr;
132 const MachineRegisterInfo *MRI = nullptr;
133 const RegisterBankInfo *RBI = nullptr;
134
135 // Avoid querying the MachineFunctionProperties for each operand.
136 bool isFunctionRegBankSelected = false;
137 bool isFunctionSelected = false;
138 bool isFunctionTracksDebugUserValues = false;
139
140 using RegVector = SmallVector<Register, 16>;
141 using RegMaskVector = SmallVector<const uint32_t *, 4>;
142 using RegSet = DenseSet<Register>;
145
146 const MachineInstr *FirstNonPHI = nullptr;
147 const MachineInstr *FirstTerminator = nullptr;
148 BlockSet FunctionBlocks;
149
150 BitVector regsReserved;
151 RegSet regsLive;
152 RegVector regsDefined, regsDead, regsKilled;
153 RegMaskVector regMasks;
154
155 SlotIndex lastIndex;
156
157 // Add Reg and any sub-registers to RV
158 void addRegWithSubRegs(RegVector &RV, Register Reg) {
159 RV.push_back(Reg);
160 if (Reg.isPhysical())
161 append_range(RV, TRI->subregs(Reg.asMCReg()));
162 }
163
164 struct BBInfo {
165 // Is this MBB reachable from the MF entry point?
166 bool reachable = false;
167
168 // Vregs that must be live in because they are used without being
169 // defined. Map value is the user. vregsLiveIn doesn't include regs
170 // that only are used by PHI nodes.
171 RegMap vregsLiveIn;
172
173 // Regs killed in MBB. They may be defined again, and will then be in both
174 // regsKilled and regsLiveOut.
175 RegSet regsKilled;
176
177 // Regs defined in MBB and live out. Note that vregs passing through may
178 // be live out without being mentioned here.
179 RegSet regsLiveOut;
180
181 // Vregs that pass through MBB untouched. This set is disjoint from
182 // regsKilled and regsLiveOut.
183 RegSet vregsPassed;
184
185 // Vregs that must pass through MBB because they are needed by a successor
186 // block. This set is disjoint from regsLiveOut.
187 RegSet vregsRequired;
188
189 // Set versions of block's predecessor and successor lists.
190 BlockSet Preds, Succs;
191
192 BBInfo() = default;
193
194 // Add register to vregsRequired if it belongs there. Return true if
195 // anything changed.
196 bool addRequired(Register Reg) {
197 if (!Reg.isVirtual())
198 return false;
199 if (regsLiveOut.count(Reg))
200 return false;
201 return vregsRequired.insert(Reg).second;
202 }
203
204 // Same for a full set.
205 bool addRequired(const RegSet &RS) {
206 bool Changed = false;
207 for (Register Reg : RS)
208 Changed |= addRequired(Reg);
209 return Changed;
210 }
211
212 // Same for a full map.
213 bool addRequired(const RegMap &RM) {
214 bool Changed = false;
215 for (const auto &I : RM)
216 Changed |= addRequired(I.first);
217 return Changed;
218 }
219
220 // Live-out registers are either in regsLiveOut or vregsPassed.
221 bool isLiveOut(Register Reg) const {
222 return regsLiveOut.count(Reg) || vregsPassed.count(Reg);
223 }
224 };
225
226 // Extra register info per MBB.
228
229 bool isReserved(Register Reg) {
230 return Reg.id() < regsReserved.size() && regsReserved.test(Reg.id());
231 }
232
233 bool isAllocatable(Register Reg) const {
234 return Reg.id() < TRI->getNumRegs() && TRI->isInAllocatableClass(Reg) &&
235 !regsReserved.test(Reg.id());
236 }
237
238 // Analysis information if available
239 LiveVariables *LiveVars = nullptr;
240 LiveIntervals *LiveInts = nullptr;
241 LiveStacks *LiveStks = nullptr;
242 SlotIndexes *Indexes = nullptr;
243
244 /// A class to track the number of reported error and to guarantee that only
245 /// one error is reported at one time.
246 class ReportedErrors {
247 unsigned NumReported = 0;
248 bool AbortOnError;
249
250 public:
251 /// \param AbortOnError -- If set, abort after printing the first error.
252 ReportedErrors(bool AbortOnError) : AbortOnError(AbortOnError) {}
253
254 ~ReportedErrors() {
255 if (!hasError())
256 return;
257 if (AbortOnError)
258 report_fatal_error("Found " + Twine(NumReported) +
259 " machine code errors.");
260 // Since we haven't aborted, release the lock to allow other threads to
261 // report errors.
262 ReportedErrorsLock->unlock();
263 }
264
265 /// Increment the number of reported errors.
266 /// \returns true if this is the first reported error.
267 bool increment() {
268 // If this is the first error this thread has encountered, grab the lock
269 // to prevent other threads from reporting errors at the same time.
270 // Otherwise we assume we already have the lock.
271 if (!hasError())
272 ReportedErrorsLock->lock();
273 ++NumReported;
274 return NumReported == 1;
275 }
276
277 /// \returns true if an error was reported.
278 bool hasError() { return NumReported; }
279 };
280 ReportedErrors ReportedErrs;
281
282 // This is calculated only when trying to verify convergence control tokens.
283 // Similar to the LLVM IR verifier, we calculate this locally instead of
284 // relying on the pass manager.
286
287 void visitMachineFunctionBefore();
288 void visitMachineBasicBlockBefore(const MachineBasicBlock *MBB);
289 void visitMachineBundleBefore(const MachineInstr *MI);
290
291 /// Verify that all of \p MI's virtual register operands are scalars.
292 /// \returns True if all virtual register operands are scalar. False
293 /// otherwise.
294 bool verifyAllRegOpsScalar(const MachineInstr &MI,
295 const MachineRegisterInfo &MRI);
296 bool verifyVectorElementMatch(LLT Ty0, LLT Ty1, const MachineInstr *MI);
297
298 bool verifyGIntrinsicSideEffects(const MachineInstr *MI);
299 bool verifyGIntrinsicConvergence(const MachineInstr *MI);
300 void verifyPreISelGenericInstruction(const MachineInstr *MI);
301
302 void visitMachineInstrBefore(const MachineInstr *MI);
303 void visitMachineOperand(const MachineOperand *MO, unsigned MONum);
304 void visitMachineBundleAfter(const MachineInstr *MI);
305 void visitMachineBasicBlockAfter(const MachineBasicBlock *MBB);
306 void visitMachineFunctionAfter();
307
308 void report(const char *msg, const MachineFunction *MF);
309 void report(const char *msg, const MachineBasicBlock *MBB);
310 void report(const char *msg, const MachineInstr *MI);
311 void report(const char *msg, const MachineOperand *MO, unsigned MONum,
312 LLT MOVRegType = LLT{});
313 void report(const Twine &Msg, const MachineInstr *MI);
314
315 void report_context(const LiveInterval &LI) const;
316 void report_context(const LiveRange &LR, Register VRegUnit,
317 LaneBitmask LaneMask) const;
318 void report_context(const LiveRange::Segment &S) const;
319 void report_context(const VNInfo &VNI) const;
320 void report_context(SlotIndex Pos) const;
321 void report_context(MCPhysReg PhysReg) const;
322 void report_context_liverange(const LiveRange &LR) const;
323 void report_context_lanemask(LaneBitmask LaneMask) const;
324 void report_context_vreg(Register VReg) const;
325 void report_context_vreg_regunit(Register VRegOrUnit) const;
326
327 void verifyInlineAsm(const MachineInstr *MI);
328
329 void checkLiveness(const MachineOperand *MO, unsigned MONum);
330 void checkLivenessAtUse(const MachineOperand *MO, unsigned MONum,
331 SlotIndex UseIdx, const LiveRange &LR,
332 Register VRegOrUnit,
333 LaneBitmask LaneMask = LaneBitmask::getNone());
334 void checkLivenessAtDef(const MachineOperand *MO, unsigned MONum,
335 SlotIndex DefIdx, const LiveRange &LR,
336 Register VRegOrUnit, bool SubRangeCheck = false,
337 LaneBitmask LaneMask = LaneBitmask::getNone());
338
339 void markReachable(const MachineBasicBlock *MBB);
340 void calcRegsPassed();
341 void checkPHIOps(const MachineBasicBlock &MBB);
342
343 void calcRegsRequired();
344 void verifyLiveVariables();
345 void verifyLiveIntervals();
346 void verifyLiveInterval(const LiveInterval &);
347 void verifyLiveRangeValue(const LiveRange &, const VNInfo *, Register,
349 void verifyLiveRangeSegment(const LiveRange &,
352 void verifyLiveRange(const LiveRange &, Register,
353 LaneBitmask LaneMask = LaneBitmask::getNone());
354
355 void verifyStackFrame();
356
357 void verifySlotIndexes() const;
358 void verifyProperties(const MachineFunction &MF);
359};
360
361struct MachineVerifierLegacyPass : public MachineFunctionPass {
362 static char ID; // Pass ID, replacement for typeid
363
364 const std::string Banner;
365
366 MachineVerifierLegacyPass(std::string banner = std::string())
367 : MachineFunctionPass(ID), Banner(std::move(banner)) {
369 }
370
371 void getAnalysisUsage(AnalysisUsage &AU) const override {
376 AU.setPreservesAll();
378 }
379
380 bool runOnMachineFunction(MachineFunction &MF) override {
381 // Skip functions that have known verification problems.
382 // FIXME: Remove this mechanism when all problematic passes have been
383 // fixed.
384 if (MF.getProperties().hasProperty(
385 MachineFunctionProperties::Property::FailsVerification))
386 return false;
387
388 MachineVerifier(this, Banner.c_str(), &errs()).verify(MF);
389 return false;
390 }
391};
392
393} // end anonymous namespace
394
398 // Skip functions that have known verification problems.
399 // FIXME: Remove this mechanism when all problematic passes have been
400 // fixed.
401 if (MF.getProperties().hasProperty(
403 return PreservedAnalyses::all();
404 MachineVerifier(MFAM, Banner.c_str(), &errs()).verify(MF);
405 return PreservedAnalyses::all();
406}
407
408char MachineVerifierLegacyPass::ID = 0;
409
410INITIALIZE_PASS(MachineVerifierLegacyPass, "machineverifier",
411 "Verify generated machine code", false, false)
412
414 return new MachineVerifierLegacyPass(Banner);
415}
416
417void llvm::verifyMachineFunction(const std::string &Banner,
418 const MachineFunction &MF) {
419 // TODO: Use MFAM after porting below analyses.
420 // LiveVariables *LiveVars;
421 // LiveIntervals *LiveInts;
422 // LiveStacks *LiveStks;
423 // SlotIndexes *Indexes;
424 MachineVerifier(nullptr, Banner.c_str(), &errs()).verify(MF);
425}
426
427bool MachineFunction::verify(Pass *p, const char *Banner, raw_ostream *OS,
428 bool AbortOnError) const {
429 return MachineVerifier(p, Banner, OS, AbortOnError).verify(*this);
430}
431
433 const char *Banner, raw_ostream *OS,
434 bool AbortOnError) const {
435 return MachineVerifier(Banner, /*LiveVars=*/nullptr, LiveInts,
436 /*LiveStks=*/nullptr, Indexes, OS, AbortOnError)
437 .verify(*this);
438}
439
440void MachineVerifier::verifySlotIndexes() const {
441 if (Indexes == nullptr)
442 return;
443
444 // Ensure the IdxMBB list is sorted by slot indexes.
447 E = Indexes->MBBIndexEnd(); I != E; ++I) {
448 assert(!Last.isValid() || I->first > Last);
449 Last = I->first;
450 }
451}
452
453void MachineVerifier::verifyProperties(const MachineFunction &MF) {
454 // If a pass has introduced virtual registers without clearing the
455 // NoVRegs property (or set it without allocating the vregs)
456 // then report an error.
457 if (MF.getProperties().hasProperty(
459 MRI->getNumVirtRegs())
460 report("Function has NoVRegs property but there are VReg operands", &MF);
461}
462
463bool MachineVerifier::verify(const MachineFunction &MF) {
464 this->MF = &MF;
465 TM = &MF.getTarget();
468 RBI = MF.getSubtarget().getRegBankInfo();
469 MRI = &MF.getRegInfo();
470
471 const bool isFunctionFailedISel = MF.getProperties().hasProperty(
473
474 // If we're mid-GlobalISel and we already triggered the fallback path then
475 // it's expected that the MIR is somewhat broken but that's ok since we'll
476 // reset it and clear the FailedISel attribute in ResetMachineFunctions.
477 if (isFunctionFailedISel)
478 return true;
479
480 isFunctionRegBankSelected = MF.getProperties().hasProperty(
482 isFunctionSelected = MF.getProperties().hasProperty(
484 isFunctionTracksDebugUserValues = MF.getProperties().hasProperty(
486
487 if (PASS) {
488 auto *LISWrapper = PASS->getAnalysisIfAvailable<LiveIntervalsWrapperPass>();
489 LiveInts = LISWrapper ? &LISWrapper->getLIS() : nullptr;
490 // We don't want to verify LiveVariables if LiveIntervals is available.
491 auto *LVWrapper = PASS->getAnalysisIfAvailable<LiveVariablesWrapperPass>();
492 if (!LiveInts)
493 LiveVars = LVWrapper ? &LVWrapper->getLV() : nullptr;
494 auto *LSWrapper = PASS->getAnalysisIfAvailable<LiveStacksWrapperLegacy>();
495 LiveStks = LSWrapper ? &LSWrapper->getLS() : nullptr;
496 auto *SIWrapper = PASS->getAnalysisIfAvailable<SlotIndexesWrapperPass>();
497 Indexes = SIWrapper ? &SIWrapper->getSI() : nullptr;
498 }
499 if (MFAM) {
500 MachineFunction &Func = const_cast<MachineFunction &>(MF);
501 LiveInts = MFAM->getCachedResult<LiveIntervalsAnalysis>(Func);
502 if (!LiveInts)
503 LiveVars = MFAM->getCachedResult<LiveVariablesAnalysis>(Func);
504 // TODO: LiveStks = MFAM->getCachedResult<LiveStacksAnalysis>(Func);
505 Indexes = MFAM->getCachedResult<SlotIndexesAnalysis>(Func);
506 }
507
508 verifySlotIndexes();
509
510 verifyProperties(MF);
511
512 visitMachineFunctionBefore();
513 for (const MachineBasicBlock &MBB : MF) {
514 visitMachineBasicBlockBefore(&MBB);
515 // Keep track of the current bundle header.
516 const MachineInstr *CurBundle = nullptr;
517 // Do we expect the next instruction to be part of the same bundle?
518 bool InBundle = false;
519
520 for (const MachineInstr &MI : MBB.instrs()) {
521 if (MI.getParent() != &MBB) {
522 report("Bad instruction parent pointer", &MBB);
523 OS << "Instruction: " << MI;
524 continue;
525 }
526
527 // Check for consistent bundle flags.
528 if (InBundle && !MI.isBundledWithPred())
529 report("Missing BundledPred flag, "
530 "BundledSucc was set on predecessor",
531 &MI);
532 if (!InBundle && MI.isBundledWithPred())
533 report("BundledPred flag is set, "
534 "but BundledSucc not set on predecessor",
535 &MI);
536
537 // Is this a bundle header?
538 if (!MI.isInsideBundle()) {
539 if (CurBundle)
540 visitMachineBundleAfter(CurBundle);
541 CurBundle = &MI;
542 visitMachineBundleBefore(CurBundle);
543 } else if (!CurBundle)
544 report("No bundle header", &MI);
545 visitMachineInstrBefore(&MI);
546 for (unsigned I = 0, E = MI.getNumOperands(); I != E; ++I) {
547 const MachineOperand &Op = MI.getOperand(I);
548 if (Op.getParent() != &MI) {
549 // Make sure to use correct addOperand / removeOperand / ChangeTo
550 // functions when replacing operands of a MachineInstr.
551 report("Instruction has operand with wrong parent set", &MI);
552 }
553
554 visitMachineOperand(&Op, I);
555 }
556
557 // Was this the last bundled instruction?
558 InBundle = MI.isBundledWithSucc();
559 }
560 if (CurBundle)
561 visitMachineBundleAfter(CurBundle);
562 if (InBundle)
563 report("BundledSucc flag set on last instruction in block", &MBB.back());
564 visitMachineBasicBlockAfter(&MBB);
565 }
566 visitMachineFunctionAfter();
567
568 // Clean up.
569 regsLive.clear();
570 regsDefined.clear();
571 regsDead.clear();
572 regsKilled.clear();
573 regMasks.clear();
574 MBBInfoMap.clear();
575
576 return !ReportedErrs.hasError();
577}
578
579void MachineVerifier::report(const char *msg, const MachineFunction *MF) {
580 assert(MF);
581 OS << '\n';
582 if (ReportedErrs.increment()) {
583 if (Banner)
584 OS << "# " << Banner << '\n';
585
586 if (LiveInts != nullptr)
587 LiveInts->print(OS);
588 else
589 MF->print(OS, Indexes);
590 }
591
592 OS << "*** Bad machine code: " << msg << " ***\n"
593 << "- function: " << MF->getName() << '\n';
594}
595
596void MachineVerifier::report(const char *msg, const MachineBasicBlock *MBB) {
597 assert(MBB);
598 report(msg, MBB->getParent());
599 OS << "- basic block: " << printMBBReference(*MBB) << ' ' << MBB->getName()
600 << " (" << (const void *)MBB << ')';
601 if (Indexes)
602 OS << " [" << Indexes->getMBBStartIdx(MBB) << ';'
603 << Indexes->getMBBEndIdx(MBB) << ')';
604 OS << '\n';
605}
606
607void MachineVerifier::report(const char *msg, const MachineInstr *MI) {
608 assert(MI);
609 report(msg, MI->getParent());
610 OS << "- instruction: ";
611 if (Indexes && Indexes->hasIndex(*MI))
612 OS << Indexes->getInstructionIndex(*MI) << '\t';
613 MI->print(OS, /*IsStandalone=*/true);
614}
615
616void MachineVerifier::report(const char *msg, const MachineOperand *MO,
617 unsigned MONum, LLT MOVRegType) {
618 assert(MO);
619 report(msg, MO->getParent());
620 OS << "- operand " << MONum << ": ";
621 MO->print(OS, MOVRegType, TRI);
622 OS << '\n';
623}
624
625void MachineVerifier::report(const Twine &Msg, const MachineInstr *MI) {
626 report(Msg.str().c_str(), MI);
627}
628
629void MachineVerifier::report_context(SlotIndex Pos) const {
630 OS << "- at: " << Pos << '\n';
631}
632
633void MachineVerifier::report_context(const LiveInterval &LI) const {
634 OS << "- interval: " << LI << '\n';
635}
636
637void MachineVerifier::report_context(const LiveRange &LR, Register VRegUnit,
638 LaneBitmask LaneMask) const {
639 report_context_liverange(LR);
640 report_context_vreg_regunit(VRegUnit);
641 if (LaneMask.any())
642 report_context_lanemask(LaneMask);
643}
644
645void MachineVerifier::report_context(const LiveRange::Segment &S) const {
646 OS << "- segment: " << S << '\n';
647}
648
649void MachineVerifier::report_context(const VNInfo &VNI) const {
650 OS << "- ValNo: " << VNI.id << " (def " << VNI.def << ")\n";
651}
652
653void MachineVerifier::report_context_liverange(const LiveRange &LR) const {
654 OS << "- liverange: " << LR << '\n';
655}
656
657void MachineVerifier::report_context(MCPhysReg PReg) const {
658 OS << "- p. register: " << printReg(PReg, TRI) << '\n';
659}
660
661void MachineVerifier::report_context_vreg(Register VReg) const {
662 OS << "- v. register: " << printReg(VReg, TRI) << '\n';
663}
664
665void MachineVerifier::report_context_vreg_regunit(Register VRegOrUnit) const {
666 if (VRegOrUnit.isVirtual()) {
667 report_context_vreg(VRegOrUnit);
668 } else {
669 OS << "- regunit: " << printRegUnit(VRegOrUnit, TRI) << '\n';
670 }
671}
672
673void MachineVerifier::report_context_lanemask(LaneBitmask LaneMask) const {
674 OS << "- lanemask: " << PrintLaneMask(LaneMask) << '\n';
675}
676
677void MachineVerifier::markReachable(const MachineBasicBlock *MBB) {
678 BBInfo &MInfo = MBBInfoMap[MBB];
679 if (!MInfo.reachable) {
680 MInfo.reachable = true;
681 for (const MachineBasicBlock *Succ : MBB->successors())
682 markReachable(Succ);
683 }
684}
685
686void MachineVerifier::visitMachineFunctionBefore() {
687 lastIndex = SlotIndex();
688 regsReserved = MRI->reservedRegsFrozen() ? MRI->getReservedRegs()
689 : TRI->getReservedRegs(*MF);
690
691 if (!MF->empty())
692 markReachable(&MF->front());
693
694 // Build a set of the basic blocks in the function.
695 FunctionBlocks.clear();
696 for (const auto &MBB : *MF) {
697 FunctionBlocks.insert(&MBB);
698 BBInfo &MInfo = MBBInfoMap[&MBB];
699
700 MInfo.Preds.insert(MBB.pred_begin(), MBB.pred_end());
701 if (MInfo.Preds.size() != MBB.pred_size())
702 report("MBB has duplicate entries in its predecessor list.", &MBB);
703
704 MInfo.Succs.insert(MBB.succ_begin(), MBB.succ_end());
705 if (MInfo.Succs.size() != MBB.succ_size())
706 report("MBB has duplicate entries in its successor list.", &MBB);
707 }
708
709 // Check that the register use lists are sane.
710 MRI->verifyUseLists();
711
712 if (!MF->empty())
713 verifyStackFrame();
714}
715
716void
717MachineVerifier::visitMachineBasicBlockBefore(const MachineBasicBlock *MBB) {
718 FirstTerminator = nullptr;
719 FirstNonPHI = nullptr;
720
721 if (!MF->getProperties().hasProperty(
722 MachineFunctionProperties::Property::NoPHIs) && MRI->tracksLiveness()) {
723 // If this block has allocatable physical registers live-in, check that
724 // it is an entry block or landing pad.
725 for (const auto &LI : MBB->liveins()) {
726 if (isAllocatable(LI.PhysReg) && !MBB->isEHPad() &&
727 MBB->getIterator() != MBB->getParent()->begin() &&
729 report("MBB has allocatable live-in, but isn't entry, landing-pad, or "
730 "inlineasm-br-indirect-target.",
731 MBB);
732 report_context(LI.PhysReg);
733 }
734 }
735 }
736
737 if (MBB->isIRBlockAddressTaken()) {
739 report("ir-block-address-taken is associated with basic block not used by "
740 "a blockaddress.",
741 MBB);
742 }
743
744 // Count the number of landing pad successors.
746 for (const auto *succ : MBB->successors()) {
747 if (succ->isEHPad())
748 LandingPadSuccs.insert(succ);
749 if (!FunctionBlocks.count(succ))
750 report("MBB has successor that isn't part of the function.", MBB);
751 if (!MBBInfoMap[succ].Preds.count(MBB)) {
752 report("Inconsistent CFG", MBB);
753 OS << "MBB is not in the predecessor list of the successor "
754 << printMBBReference(*succ) << ".\n";
755 }
756 }
757
758 // Check the predecessor list.
759 for (const MachineBasicBlock *Pred : MBB->predecessors()) {
760 if (!FunctionBlocks.count(Pred))
761 report("MBB has predecessor that isn't part of the function.", MBB);
762 if (!MBBInfoMap[Pred].Succs.count(MBB)) {
763 report("Inconsistent CFG", MBB);
764 OS << "MBB is not in the successor list of the predecessor "
765 << printMBBReference(*Pred) << ".\n";
766 }
767 }
768
769 const MCAsmInfo *AsmInfo = TM->getMCAsmInfo();
770 const BasicBlock *BB = MBB->getBasicBlock();
771 const Function &F = MF->getFunction();
772 if (LandingPadSuccs.size() > 1 &&
773 !(AsmInfo &&
775 BB && isa<SwitchInst>(BB->getTerminator())) &&
776 !isScopedEHPersonality(classifyEHPersonality(F.getPersonalityFn())))
777 report("MBB has more than one landing pad successor", MBB);
778
779 // Call analyzeBranch. If it succeeds, there several more conditions to check.
780 MachineBasicBlock *TBB = nullptr, *FBB = nullptr;
782 if (!TII->analyzeBranch(*const_cast<MachineBasicBlock *>(MBB), TBB, FBB,
783 Cond)) {
784 // Ok, analyzeBranch thinks it knows what's going on with this block. Let's
785 // check whether its answers match up with reality.
786 if (!TBB && !FBB) {
787 // Block falls through to its successor.
788 if (!MBB->empty() && MBB->back().isBarrier() &&
789 !TII->isPredicated(MBB->back())) {
790 report("MBB exits via unconditional fall-through but ends with a "
791 "barrier instruction!", MBB);
792 }
793 if (!Cond.empty()) {
794 report("MBB exits via unconditional fall-through but has a condition!",
795 MBB);
796 }
797 } else if (TBB && !FBB && Cond.empty()) {
798 // Block unconditionally branches somewhere.
799 if (MBB->empty()) {
800 report("MBB exits via unconditional branch but doesn't contain "
801 "any instructions!", MBB);
802 } else if (!MBB->back().isBarrier()) {
803 report("MBB exits via unconditional branch but doesn't end with a "
804 "barrier instruction!", MBB);
805 } else if (!MBB->back().isTerminator()) {
806 report("MBB exits via unconditional branch but the branch isn't a "
807 "terminator instruction!", MBB);
808 }
809 } else if (TBB && !FBB && !Cond.empty()) {
810 // Block conditionally branches somewhere, otherwise falls through.
811 if (MBB->empty()) {
812 report("MBB exits via conditional branch/fall-through but doesn't "
813 "contain any instructions!", MBB);
814 } else if (MBB->back().isBarrier()) {
815 report("MBB exits via conditional branch/fall-through but ends with a "
816 "barrier instruction!", MBB);
817 } else if (!MBB->back().isTerminator()) {
818 report("MBB exits via conditional branch/fall-through but the branch "
819 "isn't a terminator instruction!", MBB);
820 }
821 } else if (TBB && FBB) {
822 // Block conditionally branches somewhere, otherwise branches
823 // somewhere else.
824 if (MBB->empty()) {
825 report("MBB exits via conditional branch/branch but doesn't "
826 "contain any instructions!", MBB);
827 } else if (!MBB->back().isBarrier()) {
828 report("MBB exits via conditional branch/branch but doesn't end with a "
829 "barrier instruction!", MBB);
830 } else if (!MBB->back().isTerminator()) {
831 report("MBB exits via conditional branch/branch but the branch "
832 "isn't a terminator instruction!", MBB);
833 }
834 if (Cond.empty()) {
835 report("MBB exits via conditional branch/branch but there's no "
836 "condition!", MBB);
837 }
838 } else {
839 report("analyzeBranch returned invalid data!", MBB);
840 }
841
842 // Now check that the successors match up with the answers reported by
843 // analyzeBranch.
844 if (TBB && !MBB->isSuccessor(TBB))
845 report("MBB exits via jump or conditional branch, but its target isn't a "
846 "CFG successor!",
847 MBB);
848 if (FBB && !MBB->isSuccessor(FBB))
849 report("MBB exits via conditional branch, but its target isn't a CFG "
850 "successor!",
851 MBB);
852
853 // There might be a fallthrough to the next block if there's either no
854 // unconditional true branch, or if there's a condition, and one of the
855 // branches is missing.
856 bool Fallthrough = !TBB || (!Cond.empty() && !FBB);
857
858 // A conditional fallthrough must be an actual CFG successor, not
859 // unreachable. (Conversely, an unconditional fallthrough might not really
860 // be a successor, because the block might end in unreachable.)
861 if (!Cond.empty() && !FBB) {
863 if (MBBI == MF->end()) {
864 report("MBB conditionally falls through out of function!", MBB);
865 } else if (!MBB->isSuccessor(&*MBBI))
866 report("MBB exits via conditional branch/fall-through but the CFG "
867 "successors don't match the actual successors!",
868 MBB);
869 }
870
871 // Verify that there aren't any extra un-accounted-for successors.
872 for (const MachineBasicBlock *SuccMBB : MBB->successors()) {
873 // If this successor is one of the branch targets, it's okay.
874 if (SuccMBB == TBB || SuccMBB == FBB)
875 continue;
876 // If we might have a fallthrough, and the successor is the fallthrough
877 // block, that's also ok.
878 if (Fallthrough && SuccMBB == MBB->getNextNode())
879 continue;
880 // Also accept successors which are for exception-handling or might be
881 // inlineasm_br targets.
882 if (SuccMBB->isEHPad() || SuccMBB->isInlineAsmBrIndirectTarget())
883 continue;
884 report("MBB has unexpected successors which are not branch targets, "
885 "fallthrough, EHPads, or inlineasm_br targets.",
886 MBB);
887 }
888 }
889
890 regsLive.clear();
891 if (MRI->tracksLiveness()) {
892 for (const auto &LI : MBB->liveins()) {
893 if (!Register::isPhysicalRegister(LI.PhysReg)) {
894 report("MBB live-in list contains non-physical register", MBB);
895 continue;
896 }
897 for (const MCPhysReg &SubReg : TRI->subregs_inclusive(LI.PhysReg))
898 regsLive.insert(SubReg);
899 }
900 }
901
902 const MachineFrameInfo &MFI = MF->getFrameInfo();
903 BitVector PR = MFI.getPristineRegs(*MF);
904 for (unsigned I : PR.set_bits()) {
905 for (const MCPhysReg &SubReg : TRI->subregs_inclusive(I))
906 regsLive.insert(SubReg);
907 }
908
909 regsKilled.clear();
910 regsDefined.clear();
911
912 if (Indexes)
913 lastIndex = Indexes->getMBBStartIdx(MBB);
914}
915
916// This function gets called for all bundle headers, including normal
917// stand-alone unbundled instructions.
918void MachineVerifier::visitMachineBundleBefore(const MachineInstr *MI) {
919 if (Indexes && Indexes->hasIndex(*MI)) {
920 SlotIndex idx = Indexes->getInstructionIndex(*MI);
921 if (!(idx > lastIndex)) {
922 report("Instruction index out of order", MI);
923 OS << "Last instruction was at " << lastIndex << '\n';
924 }
925 lastIndex = idx;
926 }
927
928 // Ensure non-terminators don't follow terminators.
929 if (MI->isTerminator()) {
930 if (!FirstTerminator)
931 FirstTerminator = MI;
932 } else if (FirstTerminator) {
933 // For GlobalISel, G_INVOKE_REGION_START is a terminator that we allow to
934 // precede non-terminators.
935 if (FirstTerminator->getOpcode() != TargetOpcode::G_INVOKE_REGION_START) {
936 report("Non-terminator instruction after the first terminator", MI);
937 OS << "First terminator was:\t" << *FirstTerminator;
938 }
939 }
940}
941
942// The operands on an INLINEASM instruction must follow a template.
943// Verify that the flag operands make sense.
944void MachineVerifier::verifyInlineAsm(const MachineInstr *MI) {
945 // The first two operands on INLINEASM are the asm string and global flags.
946 if (MI->getNumOperands() < 2) {
947 report("Too few operands on inline asm", MI);
948 return;
949 }
950 if (!MI->getOperand(0).isSymbol())
951 report("Asm string must be an external symbol", MI);
952 if (!MI->getOperand(1).isImm())
953 report("Asm flags must be an immediate", MI);
954 // Allowed flags are Extra_HasSideEffects = 1, Extra_IsAlignStack = 2,
955 // Extra_AsmDialect = 4, Extra_MayLoad = 8, and Extra_MayStore = 16,
956 // and Extra_IsConvergent = 32.
957 if (!isUInt<6>(MI->getOperand(1).getImm()))
958 report("Unknown asm flags", &MI->getOperand(1), 1);
959
960 static_assert(InlineAsm::MIOp_FirstOperand == 2, "Asm format changed");
961
962 unsigned OpNo = InlineAsm::MIOp_FirstOperand;
963 unsigned NumOps;
964 for (unsigned e = MI->getNumOperands(); OpNo < e; OpNo += NumOps) {
965 const MachineOperand &MO = MI->getOperand(OpNo);
966 // There may be implicit ops after the fixed operands.
967 if (!MO.isImm())
968 break;
969 const InlineAsm::Flag F(MO.getImm());
970 NumOps = 1 + F.getNumOperandRegisters();
971 }
972
973 if (OpNo > MI->getNumOperands())
974 report("Missing operands in last group", MI);
975
976 // An optional MDNode follows the groups.
977 if (OpNo < MI->getNumOperands() && MI->getOperand(OpNo).isMetadata())
978 ++OpNo;
979
980 // All trailing operands must be implicit registers.
981 for (unsigned e = MI->getNumOperands(); OpNo < e; ++OpNo) {
982 const MachineOperand &MO = MI->getOperand(OpNo);
983 if (!MO.isReg() || !MO.isImplicit())
984 report("Expected implicit register after groups", &MO, OpNo);
985 }
986
987 if (MI->getOpcode() == TargetOpcode::INLINEASM_BR) {
988 const MachineBasicBlock *MBB = MI->getParent();
989
990 for (unsigned i = InlineAsm::MIOp_FirstOperand, e = MI->getNumOperands();
991 i != e; ++i) {
992 const MachineOperand &MO = MI->getOperand(i);
993
994 if (!MO.isMBB())
995 continue;
996
997 // Check the successor & predecessor lists look ok, assume they are
998 // not. Find the indirect target without going through the successors.
999 const MachineBasicBlock *IndirectTargetMBB = MO.getMBB();
1000 if (!IndirectTargetMBB) {
1001 report("INLINEASM_BR indirect target does not exist", &MO, i);
1002 break;
1003 }
1004
1005 if (!MBB->isSuccessor(IndirectTargetMBB))
1006 report("INLINEASM_BR indirect target missing from successor list", &MO,
1007 i);
1008
1009 if (!IndirectTargetMBB->isPredecessor(MBB))
1010 report("INLINEASM_BR indirect target predecessor list missing parent",
1011 &MO, i);
1012 }
1013 }
1014}
1015
1016bool MachineVerifier::verifyAllRegOpsScalar(const MachineInstr &MI,
1017 const MachineRegisterInfo &MRI) {
1018 if (none_of(MI.explicit_operands(), [&MRI](const MachineOperand &Op) {
1019 if (!Op.isReg())
1020 return false;
1021 const auto Reg = Op.getReg();
1022 if (Reg.isPhysical())
1023 return false;
1024 return !MRI.getType(Reg).isScalar();
1025 }))
1026 return true;
1027 report("All register operands must have scalar types", &MI);
1028 return false;
1029}
1030
1031/// Check that types are consistent when two operands need to have the same
1032/// number of vector elements.
1033/// \return true if the types are valid.
1034bool MachineVerifier::verifyVectorElementMatch(LLT Ty0, LLT Ty1,
1035 const MachineInstr *MI) {
1036 if (Ty0.isVector() != Ty1.isVector()) {
1037 report("operand types must be all-vector or all-scalar", MI);
1038 // Generally we try to report as many issues as possible at once, but in
1039 // this case it's not clear what should we be comparing the size of the
1040 // scalar with: the size of the whole vector or its lane. Instead of
1041 // making an arbitrary choice and emitting not so helpful message, let's
1042 // avoid the extra noise and stop here.
1043 return false;
1044 }
1045
1046 if (Ty0.isVector() && Ty0.getElementCount() != Ty1.getElementCount()) {
1047 report("operand types must preserve number of vector elements", MI);
1048 return false;
1049 }
1050
1051 return true;
1052}
1053
1054bool MachineVerifier::verifyGIntrinsicSideEffects(const MachineInstr *MI) {
1055 auto Opcode = MI->getOpcode();
1056 bool NoSideEffects = Opcode == TargetOpcode::G_INTRINSIC ||
1057 Opcode == TargetOpcode::G_INTRINSIC_CONVERGENT;
1058 unsigned IntrID = cast<GIntrinsic>(MI)->getIntrinsicID();
1059 if (IntrID != 0 && IntrID < Intrinsic::num_intrinsics) {
1061 MF->getFunction().getContext(), static_cast<Intrinsic::ID>(IntrID));
1062 bool DeclHasSideEffects = !Attrs.getMemoryEffects().doesNotAccessMemory();
1063 if (NoSideEffects && DeclHasSideEffects) {
1064 report(Twine(TII->getName(Opcode),
1065 " used with intrinsic that accesses memory"),
1066 MI);
1067 return false;
1068 }
1069 if (!NoSideEffects && !DeclHasSideEffects) {
1070 report(Twine(TII->getName(Opcode), " used with readnone intrinsic"), MI);
1071 return false;
1072 }
1073 }
1074
1075 return true;
1076}
1077
1078bool MachineVerifier::verifyGIntrinsicConvergence(const MachineInstr *MI) {
1079 auto Opcode = MI->getOpcode();
1080 bool NotConvergent = Opcode == TargetOpcode::G_INTRINSIC ||
1081 Opcode == TargetOpcode::G_INTRINSIC_W_SIDE_EFFECTS;
1082 unsigned IntrID = cast<GIntrinsic>(MI)->getIntrinsicID();
1083 if (IntrID != 0 && IntrID < Intrinsic::num_intrinsics) {
1085 MF->getFunction().getContext(), static_cast<Intrinsic::ID>(IntrID));
1086 bool DeclIsConvergent = Attrs.hasFnAttr(Attribute::Convergent);
1087 if (NotConvergent && DeclIsConvergent) {
1088 report(Twine(TII->getName(Opcode), " used with a convergent intrinsic"),
1089 MI);
1090 return false;
1091 }
1092 if (!NotConvergent && !DeclIsConvergent) {
1093 report(
1094 Twine(TII->getName(Opcode), " used with a non-convergent intrinsic"),
1095 MI);
1096 return false;
1097 }
1098 }
1099
1100 return true;
1101}
1102
1103void MachineVerifier::verifyPreISelGenericInstruction(const MachineInstr *MI) {
1104 if (isFunctionSelected)
1105 report("Unexpected generic instruction in a Selected function", MI);
1106
1107 const MCInstrDesc &MCID = MI->getDesc();
1108 unsigned NumOps = MI->getNumOperands();
1109
1110 // Branches must reference a basic block if they are not indirect
1111 if (MI->isBranch() && !MI->isIndirectBranch()) {
1112 bool HasMBB = false;
1113 for (const MachineOperand &Op : MI->operands()) {
1114 if (Op.isMBB()) {
1115 HasMBB = true;
1116 break;
1117 }
1118 }
1119
1120 if (!HasMBB) {
1121 report("Branch instruction is missing a basic block operand or "
1122 "isIndirectBranch property",
1123 MI);
1124 }
1125 }
1126
1127 // Check types.
1129 for (unsigned I = 0, E = std::min(MCID.getNumOperands(), NumOps);
1130 I != E; ++I) {
1131 if (!MCID.operands()[I].isGenericType())
1132 continue;
1133 // Generic instructions specify type equality constraints between some of
1134 // their operands. Make sure these are consistent.
1135 size_t TypeIdx = MCID.operands()[I].getGenericTypeIndex();
1136 Types.resize(std::max(TypeIdx + 1, Types.size()));
1137
1138 const MachineOperand *MO = &MI->getOperand(I);
1139 if (!MO->isReg()) {
1140 report("generic instruction must use register operands", MI);
1141 continue;
1142 }
1143
1144 LLT OpTy = MRI->getType(MO->getReg());
1145 // Don't report a type mismatch if there is no actual mismatch, only a
1146 // type missing, to reduce noise:
1147 if (OpTy.isValid()) {
1148 // Only the first valid type for a type index will be printed: don't
1149 // overwrite it later so it's always clear which type was expected:
1150 if (!Types[TypeIdx].isValid())
1151 Types[TypeIdx] = OpTy;
1152 else if (Types[TypeIdx] != OpTy)
1153 report("Type mismatch in generic instruction", MO, I, OpTy);
1154 } else {
1155 // Generic instructions must have types attached to their operands.
1156 report("Generic instruction is missing a virtual register type", MO, I);
1157 }
1158 }
1159
1160 // Generic opcodes must not have physical register operands.
1161 for (unsigned I = 0; I < MI->getNumOperands(); ++I) {
1162 const MachineOperand *MO = &MI->getOperand(I);
1163 if (MO->isReg() && MO->getReg().isPhysical())
1164 report("Generic instruction cannot have physical register", MO, I);
1165 }
1166
1167 // Avoid out of bounds in checks below. This was already reported earlier.
1168 if (MI->getNumOperands() < MCID.getNumOperands())
1169 return;
1170
1172 if (!TII->verifyInstruction(*MI, ErrorInfo))
1173 report(ErrorInfo.data(), MI);
1174
1175 // Verify properties of various specific instruction types
1176 unsigned Opc = MI->getOpcode();
1177 switch (Opc) {
1178 case TargetOpcode::G_ASSERT_SEXT:
1179 case TargetOpcode::G_ASSERT_ZEXT: {
1180 std::string OpcName =
1181 Opc == TargetOpcode::G_ASSERT_ZEXT ? "G_ASSERT_ZEXT" : "G_ASSERT_SEXT";
1182 if (!MI->getOperand(2).isImm()) {
1183 report(Twine(OpcName, " expects an immediate operand #2"), MI);
1184 break;
1185 }
1186
1187 Register Dst = MI->getOperand(0).getReg();
1188 Register Src = MI->getOperand(1).getReg();
1189 LLT SrcTy = MRI->getType(Src);
1190 int64_t Imm = MI->getOperand(2).getImm();
1191 if (Imm <= 0) {
1192 report(Twine(OpcName, " size must be >= 1"), MI);
1193 break;
1194 }
1195
1196 if (Imm >= SrcTy.getScalarSizeInBits()) {
1197 report(Twine(OpcName, " size must be less than source bit width"), MI);
1198 break;
1199 }
1200
1201 const RegisterBank *SrcRB = RBI->getRegBank(Src, *MRI, *TRI);
1202 const RegisterBank *DstRB = RBI->getRegBank(Dst, *MRI, *TRI);
1203
1204 // Allow only the source bank to be set.
1205 if ((SrcRB && DstRB && SrcRB != DstRB) || (DstRB && !SrcRB)) {
1206 report(Twine(OpcName, " cannot change register bank"), MI);
1207 break;
1208 }
1209
1210 // Don't allow a class change. Do allow member class->regbank.
1211 const TargetRegisterClass *DstRC = MRI->getRegClassOrNull(Dst);
1212 if (DstRC && DstRC != MRI->getRegClassOrNull(Src)) {
1213 report(
1214 Twine(OpcName, " source and destination register classes must match"),
1215 MI);
1216 break;
1217 }
1218
1219 break;
1220 }
1221
1222 case TargetOpcode::G_CONSTANT:
1223 case TargetOpcode::G_FCONSTANT: {
1224 LLT DstTy = MRI->getType(MI->getOperand(0).getReg());
1225 if (DstTy.isVector())
1226 report("Instruction cannot use a vector result type", MI);
1227
1228 if (MI->getOpcode() == TargetOpcode::G_CONSTANT) {
1229 if (!MI->getOperand(1).isCImm()) {
1230 report("G_CONSTANT operand must be cimm", MI);
1231 break;
1232 }
1233
1234 const ConstantInt *CI = MI->getOperand(1).getCImm();
1235 if (CI->getBitWidth() != DstTy.getSizeInBits())
1236 report("inconsistent constant size", MI);
1237 } else {
1238 if (!MI->getOperand(1).isFPImm()) {
1239 report("G_FCONSTANT operand must be fpimm", MI);
1240 break;
1241 }
1242 const ConstantFP *CF = MI->getOperand(1).getFPImm();
1243
1245 DstTy.getSizeInBits()) {
1246 report("inconsistent constant size", MI);
1247 }
1248 }
1249
1250 break;
1251 }
1252 case TargetOpcode::G_LOAD:
1253 case TargetOpcode::G_STORE:
1254 case TargetOpcode::G_ZEXTLOAD:
1255 case TargetOpcode::G_SEXTLOAD: {
1256 LLT ValTy = MRI->getType(MI->getOperand(0).getReg());
1257 LLT PtrTy = MRI->getType(MI->getOperand(1).getReg());
1258 if (!PtrTy.isPointer())
1259 report("Generic memory instruction must access a pointer", MI);
1260
1261 // Generic loads and stores must have a single MachineMemOperand
1262 // describing that access.
1263 if (!MI->hasOneMemOperand()) {
1264 report("Generic instruction accessing memory must have one mem operand",
1265 MI);
1266 } else {
1267 const MachineMemOperand &MMO = **MI->memoperands_begin();
1268 if (MI->getOpcode() == TargetOpcode::G_ZEXTLOAD ||
1269 MI->getOpcode() == TargetOpcode::G_SEXTLOAD) {
1271 ValTy.getSizeInBits()))
1272 report("Generic extload must have a narrower memory type", MI);
1273 } else if (MI->getOpcode() == TargetOpcode::G_LOAD) {
1275 ValTy.getSizeInBytes()))
1276 report("load memory size cannot exceed result size", MI);
1277 } else if (MI->getOpcode() == TargetOpcode::G_STORE) {
1279 MMO.getSize().getValue()))
1280 report("store memory size cannot exceed value size", MI);
1281 }
1282
1283 const AtomicOrdering Order = MMO.getSuccessOrdering();
1284 if (Opc == TargetOpcode::G_STORE) {
1285 if (Order == AtomicOrdering::Acquire ||
1287 report("atomic store cannot use acquire ordering", MI);
1288
1289 } else {
1290 if (Order == AtomicOrdering::Release ||
1292 report("atomic load cannot use release ordering", MI);
1293 }
1294 }
1295
1296 break;
1297 }
1298 case TargetOpcode::G_PHI: {
1299 LLT DstTy = MRI->getType(MI->getOperand(0).getReg());
1300 if (!DstTy.isValid() || !all_of(drop_begin(MI->operands()),
1301 [this, &DstTy](const MachineOperand &MO) {
1302 if (!MO.isReg())
1303 return true;
1304 LLT Ty = MRI->getType(MO.getReg());
1305 if (!Ty.isValid() || (Ty != DstTy))
1306 return false;
1307 return true;
1308 }))
1309 report("Generic Instruction G_PHI has operands with incompatible/missing "
1310 "types",
1311 MI);
1312 break;
1313 }
1314 case TargetOpcode::G_BITCAST: {
1315 LLT DstTy = MRI->getType(MI->getOperand(0).getReg());
1316 LLT SrcTy = MRI->getType(MI->getOperand(1).getReg());
1317 if (!DstTy.isValid() || !SrcTy.isValid())
1318 break;
1319
1320 if (SrcTy.isPointer() != DstTy.isPointer())
1321 report("bitcast cannot convert between pointers and other types", MI);
1322
1323 if (SrcTy.getSizeInBits() != DstTy.getSizeInBits())
1324 report("bitcast sizes must match", MI);
1325
1326 if (SrcTy == DstTy)
1327 report("bitcast must change the type", MI);
1328
1329 break;
1330 }
1331 case TargetOpcode::G_INTTOPTR:
1332 case TargetOpcode::G_PTRTOINT:
1333 case TargetOpcode::G_ADDRSPACE_CAST: {
1334 LLT DstTy = MRI->getType(MI->getOperand(0).getReg());
1335 LLT SrcTy = MRI->getType(MI->getOperand(1).getReg());
1336 if (!DstTy.isValid() || !SrcTy.isValid())
1337 break;
1338
1339 verifyVectorElementMatch(DstTy, SrcTy, MI);
1340
1341 DstTy = DstTy.getScalarType();
1342 SrcTy = SrcTy.getScalarType();
1343
1344 if (MI->getOpcode() == TargetOpcode::G_INTTOPTR) {
1345 if (!DstTy.isPointer())
1346 report("inttoptr result type must be a pointer", MI);
1347 if (SrcTy.isPointer())
1348 report("inttoptr source type must not be a pointer", MI);
1349 } else if (MI->getOpcode() == TargetOpcode::G_PTRTOINT) {
1350 if (!SrcTy.isPointer())
1351 report("ptrtoint source type must be a pointer", MI);
1352 if (DstTy.isPointer())
1353 report("ptrtoint result type must not be a pointer", MI);
1354 } else {
1355 assert(MI->getOpcode() == TargetOpcode::G_ADDRSPACE_CAST);
1356 if (!SrcTy.isPointer() || !DstTy.isPointer())
1357 report("addrspacecast types must be pointers", MI);
1358 else {
1359 if (SrcTy.getAddressSpace() == DstTy.getAddressSpace())
1360 report("addrspacecast must convert different address spaces", MI);
1361 }
1362 }
1363
1364 break;
1365 }
1366 case TargetOpcode::G_PTR_ADD: {
1367 LLT DstTy = MRI->getType(MI->getOperand(0).getReg());
1368 LLT PtrTy = MRI->getType(MI->getOperand(1).getReg());
1369 LLT OffsetTy = MRI->getType(MI->getOperand(2).getReg());
1370 if (!DstTy.isValid() || !PtrTy.isValid() || !OffsetTy.isValid())
1371 break;
1372
1373 if (!PtrTy.isPointerOrPointerVector())
1374 report("gep first operand must be a pointer", MI);
1375
1376 if (OffsetTy.isPointerOrPointerVector())
1377 report("gep offset operand must not be a pointer", MI);
1378
1379 if (PtrTy.isPointerOrPointerVector()) {
1380 const DataLayout &DL = MF->getDataLayout();
1381 unsigned AS = PtrTy.getAddressSpace();
1382 unsigned IndexSizeInBits = DL.getIndexSize(AS) * 8;
1383 if (OffsetTy.getScalarSizeInBits() != IndexSizeInBits) {
1384 report("gep offset operand must match index size for address space",
1385 MI);
1386 }
1387 }
1388
1389 // TODO: Is the offset allowed to be a scalar with a vector?
1390 break;
1391 }
1392 case TargetOpcode::G_PTRMASK: {
1393 LLT DstTy = MRI->getType(MI->getOperand(0).getReg());
1394 LLT SrcTy = MRI->getType(MI->getOperand(1).getReg());
1395 LLT MaskTy = MRI->getType(MI->getOperand(2).getReg());
1396 if (!DstTy.isValid() || !SrcTy.isValid() || !MaskTy.isValid())
1397 break;
1398
1399 if (!DstTy.isPointerOrPointerVector())
1400 report("ptrmask result type must be a pointer", MI);
1401
1402 if (!MaskTy.getScalarType().isScalar())
1403 report("ptrmask mask type must be an integer", MI);
1404
1405 verifyVectorElementMatch(DstTy, MaskTy, MI);
1406 break;
1407 }
1408 case TargetOpcode::G_SEXT:
1409 case TargetOpcode::G_ZEXT:
1410 case TargetOpcode::G_ANYEXT:
1411 case TargetOpcode::G_TRUNC:
1412 case TargetOpcode::G_FPEXT:
1413 case TargetOpcode::G_FPTRUNC: {
1414 // Number of operands and presense of types is already checked (and
1415 // reported in case of any issues), so no need to report them again. As
1416 // we're trying to report as many issues as possible at once, however, the
1417 // instructions aren't guaranteed to have the right number of operands or
1418 // types attached to them at this point
1419 assert(MCID.getNumOperands() == 2 && "Expected 2 operands G_*{EXT,TRUNC}");
1420 LLT DstTy = MRI->getType(MI->getOperand(0).getReg());
1421 LLT SrcTy = MRI->getType(MI->getOperand(1).getReg());
1422 if (!DstTy.isValid() || !SrcTy.isValid())
1423 break;
1424
1426 report("Generic extend/truncate can not operate on pointers", MI);
1427
1428 verifyVectorElementMatch(DstTy, SrcTy, MI);
1429
1430 unsigned DstSize = DstTy.getScalarSizeInBits();
1431 unsigned SrcSize = SrcTy.getScalarSizeInBits();
1432 switch (MI->getOpcode()) {
1433 default:
1434 if (DstSize <= SrcSize)
1435 report("Generic extend has destination type no larger than source", MI);
1436 break;
1437 case TargetOpcode::G_TRUNC:
1438 case TargetOpcode::G_FPTRUNC:
1439 if (DstSize >= SrcSize)
1440 report("Generic truncate has destination type no smaller than source",
1441 MI);
1442 break;
1443 }
1444 break;
1445 }
1446 case TargetOpcode::G_SELECT: {
1447 LLT SelTy = MRI->getType(MI->getOperand(0).getReg());
1448 LLT CondTy = MRI->getType(MI->getOperand(1).getReg());
1449 if (!SelTy.isValid() || !CondTy.isValid())
1450 break;
1451
1452 // Scalar condition select on a vector is valid.
1453 if (CondTy.isVector())
1454 verifyVectorElementMatch(SelTy, CondTy, MI);
1455 break;
1456 }
1457 case TargetOpcode::G_MERGE_VALUES: {
1458 // G_MERGE_VALUES should only be used to merge scalars into a larger scalar,
1459 // e.g. s2N = MERGE sN, sN
1460 // Merging multiple scalars into a vector is not allowed, should use
1461 // G_BUILD_VECTOR for that.
1462 LLT DstTy = MRI->getType(MI->getOperand(0).getReg());
1463 LLT SrcTy = MRI->getType(MI->getOperand(1).getReg());
1464 if (DstTy.isVector() || SrcTy.isVector())
1465 report("G_MERGE_VALUES cannot operate on vectors", MI);
1466
1467 const unsigned NumOps = MI->getNumOperands();
1468 if (DstTy.getSizeInBits() != SrcTy.getSizeInBits() * (NumOps - 1))
1469 report("G_MERGE_VALUES result size is inconsistent", MI);
1470
1471 for (unsigned I = 2; I != NumOps; ++I) {
1472 if (MRI->getType(MI->getOperand(I).getReg()) != SrcTy)
1473 report("G_MERGE_VALUES source types do not match", MI);
1474 }
1475
1476 break;
1477 }
1478 case TargetOpcode::G_UNMERGE_VALUES: {
1479 unsigned NumDsts = MI->getNumOperands() - 1;
1480 LLT DstTy = MRI->getType(MI->getOperand(0).getReg());
1481 for (unsigned i = 1; i < NumDsts; ++i) {
1482 if (MRI->getType(MI->getOperand(i).getReg()) != DstTy) {
1483 report("G_UNMERGE_VALUES destination types do not match", MI);
1484 break;
1485 }
1486 }
1487
1488 LLT SrcTy = MRI->getType(MI->getOperand(NumDsts).getReg());
1489 if (DstTy.isVector()) {
1490 // This case is the converse of G_CONCAT_VECTORS.
1491 if (!SrcTy.isVector() ||
1492 (SrcTy.getScalarType() != DstTy.getScalarType() &&
1493 !SrcTy.isPointerVector()) ||
1494 SrcTy.isScalableVector() != DstTy.isScalableVector() ||
1495 SrcTy.getSizeInBits() != NumDsts * DstTy.getSizeInBits())
1496 report("G_UNMERGE_VALUES source operand does not match vector "
1497 "destination operands",
1498 MI);
1499 } else if (SrcTy.isVector()) {
1500 // This case is the converse of G_BUILD_VECTOR, but relaxed to allow
1501 // mismatched types as long as the total size matches:
1502 // %0:_(s64), %1:_(s64) = G_UNMERGE_VALUES %2:_(<4 x s32>)
1503 if (SrcTy.getSizeInBits() != NumDsts * DstTy.getSizeInBits())
1504 report("G_UNMERGE_VALUES vector source operand does not match scalar "
1505 "destination operands",
1506 MI);
1507 } else {
1508 // This case is the converse of G_MERGE_VALUES.
1509 if (SrcTy.getSizeInBits() != NumDsts * DstTy.getSizeInBits()) {
1510 report("G_UNMERGE_VALUES scalar source operand does not match scalar "
1511 "destination operands",
1512 MI);
1513 }
1514 }
1515 break;
1516 }
1517 case TargetOpcode::G_BUILD_VECTOR: {
1518 // Source types must be scalars, dest type a vector. Total size of scalars
1519 // must match the dest vector size.
1520 LLT DstTy = MRI->getType(MI->getOperand(0).getReg());
1521 LLT SrcEltTy = MRI->getType(MI->getOperand(1).getReg());
1522 if (!DstTy.isVector() || SrcEltTy.isVector()) {
1523 report("G_BUILD_VECTOR must produce a vector from scalar operands", MI);
1524 break;
1525 }
1526
1527 if (DstTy.getElementType() != SrcEltTy)
1528 report("G_BUILD_VECTOR result element type must match source type", MI);
1529
1530 if (DstTy.getNumElements() != MI->getNumOperands() - 1)
1531 report("G_BUILD_VECTOR must have an operand for each elemement", MI);
1532
1533 for (const MachineOperand &MO : llvm::drop_begin(MI->operands(), 2))
1534 if (MRI->getType(MI->getOperand(1).getReg()) != MRI->getType(MO.getReg()))
1535 report("G_BUILD_VECTOR source operand types are not homogeneous", MI);
1536
1537 break;
1538 }
1539 case TargetOpcode::G_BUILD_VECTOR_TRUNC: {
1540 // Source types must be scalars, dest type a vector. Scalar types must be
1541 // larger than the dest vector elt type, as this is a truncating operation.
1542 LLT DstTy = MRI->getType(MI->getOperand(0).getReg());
1543 LLT SrcEltTy = MRI->getType(MI->getOperand(1).getReg());
1544 if (!DstTy.isVector() || SrcEltTy.isVector())
1545 report("G_BUILD_VECTOR_TRUNC must produce a vector from scalar operands",
1546 MI);
1547 for (const MachineOperand &MO : llvm::drop_begin(MI->operands(), 2))
1548 if (MRI->getType(MI->getOperand(1).getReg()) != MRI->getType(MO.getReg()))
1549 report("G_BUILD_VECTOR_TRUNC source operand types are not homogeneous",
1550 MI);
1551 if (SrcEltTy.getSizeInBits() <= DstTy.getElementType().getSizeInBits())
1552 report("G_BUILD_VECTOR_TRUNC source operand types are not larger than "
1553 "dest elt type",
1554 MI);
1555 break;
1556 }
1557 case TargetOpcode::G_CONCAT_VECTORS: {
1558 // Source types should be vectors, and total size should match the dest
1559 // vector size.
1560 LLT DstTy = MRI->getType(MI->getOperand(0).getReg());
1561 LLT SrcTy = MRI->getType(MI->getOperand(1).getReg());
1562 if (!DstTy.isVector() || !SrcTy.isVector())
1563 report("G_CONCAT_VECTOR requires vector source and destination operands",
1564 MI);
1565
1566 if (MI->getNumOperands() < 3)
1567 report("G_CONCAT_VECTOR requires at least 2 source operands", MI);
1568
1569 for (const MachineOperand &MO : llvm::drop_begin(MI->operands(), 2))
1570 if (MRI->getType(MI->getOperand(1).getReg()) != MRI->getType(MO.getReg()))
1571 report("G_CONCAT_VECTOR source operand types are not homogeneous", MI);
1572 if (DstTy.getElementCount() !=
1573 SrcTy.getElementCount() * (MI->getNumOperands() - 1))
1574 report("G_CONCAT_VECTOR num dest and source elements should match", MI);
1575 break;
1576 }
1577 case TargetOpcode::G_ICMP:
1578 case TargetOpcode::G_FCMP: {
1579 LLT DstTy = MRI->getType(MI->getOperand(0).getReg());
1580 LLT SrcTy = MRI->getType(MI->getOperand(2).getReg());
1581
1582 if ((DstTy.isVector() != SrcTy.isVector()) ||
1583 (DstTy.isVector() &&
1584 DstTy.getElementCount() != SrcTy.getElementCount()))
1585 report("Generic vector icmp/fcmp must preserve number of lanes", MI);
1586
1587 break;
1588 }
1589 case TargetOpcode::G_SCMP:
1590 case TargetOpcode::G_UCMP: {
1591 LLT DstTy = MRI->getType(MI->getOperand(0).getReg());
1592 LLT SrcTy = MRI->getType(MI->getOperand(1).getReg());
1593
1594 if (SrcTy.isPointerOrPointerVector()) {
1595 report("Generic scmp/ucmp does not support pointers as operands", MI);
1596 break;
1597 }
1598
1599 if (DstTy.isPointerOrPointerVector()) {
1600 report("Generic scmp/ucmp does not support pointers as a result", MI);
1601 break;
1602 }
1603
1604 if (DstTy.getScalarSizeInBits() < 2) {
1605 report("Result type must be at least 2 bits wide", MI);
1606 break;
1607 }
1608
1609 if ((DstTy.isVector() != SrcTy.isVector()) ||
1610 (DstTy.isVector() &&
1611 DstTy.getElementCount() != SrcTy.getElementCount())) {
1612 report("Generic vector scmp/ucmp must preserve number of lanes", MI);
1613 break;
1614 }
1615
1616 break;
1617 }
1618 case TargetOpcode::G_EXTRACT: {
1619 const MachineOperand &SrcOp = MI->getOperand(1);
1620 if (!SrcOp.isReg()) {
1621 report("extract source must be a register", MI);
1622 break;
1623 }
1624
1625 const MachineOperand &OffsetOp = MI->getOperand(2);
1626 if (!OffsetOp.isImm()) {
1627 report("extract offset must be a constant", MI);
1628 break;
1629 }
1630
1631 unsigned DstSize = MRI->getType(MI->getOperand(0).getReg()).getSizeInBits();
1632 unsigned SrcSize = MRI->getType(SrcOp.getReg()).getSizeInBits();
1633 if (SrcSize == DstSize)
1634 report("extract source must be larger than result", MI);
1635
1636 if (DstSize + OffsetOp.getImm() > SrcSize)
1637 report("extract reads past end of register", MI);
1638 break;
1639 }
1640 case TargetOpcode::G_INSERT: {
1641 const MachineOperand &SrcOp = MI->getOperand(2);
1642 if (!SrcOp.isReg()) {
1643 report("insert source must be a register", MI);
1644 break;
1645 }
1646
1647 const MachineOperand &OffsetOp = MI->getOperand(3);
1648 if (!OffsetOp.isImm()) {
1649 report("insert offset must be a constant", MI);
1650 break;
1651 }
1652
1653 unsigned DstSize = MRI->getType(MI->getOperand(0).getReg()).getSizeInBits();
1654 unsigned SrcSize = MRI->getType(SrcOp.getReg()).getSizeInBits();
1655
1656 if (DstSize <= SrcSize)
1657 report("inserted size must be smaller than total register", MI);
1658
1659 if (SrcSize + OffsetOp.getImm() > DstSize)
1660 report("insert writes past end of register", MI);
1661
1662 break;
1663 }
1664 case TargetOpcode::G_JUMP_TABLE: {
1665 if (!MI->getOperand(1).isJTI())
1666 report("G_JUMP_TABLE source operand must be a jump table index", MI);
1667 LLT DstTy = MRI->getType(MI->getOperand(0).getReg());
1668 if (!DstTy.isPointer())
1669 report("G_JUMP_TABLE dest operand must have a pointer type", MI);
1670 break;
1671 }
1672 case TargetOpcode::G_BRJT: {
1673 if (!MRI->getType(MI->getOperand(0).getReg()).isPointer())
1674 report("G_BRJT src operand 0 must be a pointer type", MI);
1675
1676 if (!MI->getOperand(1).isJTI())
1677 report("G_BRJT src operand 1 must be a jump table index", MI);
1678
1679 const auto &IdxOp = MI->getOperand(2);
1680 if (!IdxOp.isReg() || MRI->getType(IdxOp.getReg()).isPointer())
1681 report("G_BRJT src operand 2 must be a scalar reg type", MI);
1682 break;
1683 }
1684 case TargetOpcode::G_INTRINSIC:
1685 case TargetOpcode::G_INTRINSIC_W_SIDE_EFFECTS:
1686 case TargetOpcode::G_INTRINSIC_CONVERGENT:
1687 case TargetOpcode::G_INTRINSIC_CONVERGENT_W_SIDE_EFFECTS: {
1688 // TODO: Should verify number of def and use operands, but the current
1689 // interface requires passing in IR types for mangling.
1690 const MachineOperand &IntrIDOp = MI->getOperand(MI->getNumExplicitDefs());
1691 if (!IntrIDOp.isIntrinsicID()) {
1692 report("G_INTRINSIC first src operand must be an intrinsic ID", MI);
1693 break;
1694 }
1695
1696 if (!verifyGIntrinsicSideEffects(MI))
1697 break;
1698 if (!verifyGIntrinsicConvergence(MI))
1699 break;
1700
1701 break;
1702 }
1703 case TargetOpcode::G_SEXT_INREG: {
1704 if (!MI->getOperand(2).isImm()) {
1705 report("G_SEXT_INREG expects an immediate operand #2", MI);
1706 break;
1707 }
1708
1709 LLT SrcTy = MRI->getType(MI->getOperand(1).getReg());
1710 int64_t Imm = MI->getOperand(2).getImm();
1711 if (Imm <= 0)
1712 report("G_SEXT_INREG size must be >= 1", MI);
1713 if (Imm >= SrcTy.getScalarSizeInBits())
1714 report("G_SEXT_INREG size must be less than source bit width", MI);
1715 break;
1716 }
1717 case TargetOpcode::G_BSWAP: {
1718 LLT DstTy = MRI->getType(MI->getOperand(0).getReg());
1719 if (DstTy.getScalarSizeInBits() % 16 != 0)
1720 report("G_BSWAP size must be a multiple of 16 bits", MI);
1721 break;
1722 }
1723 case TargetOpcode::G_VSCALE: {
1724 if (!MI->getOperand(1).isCImm()) {
1725 report("G_VSCALE operand must be cimm", MI);
1726 break;
1727 }
1728 if (MI->getOperand(1).getCImm()->isZero()) {
1729 report("G_VSCALE immediate cannot be zero", MI);
1730 break;
1731 }
1732 break;
1733 }
1734 case TargetOpcode::G_STEP_VECTOR: {
1735 if (!MI->getOperand(1).isCImm()) {
1736 report("operand must be cimm", MI);
1737 break;
1738 }
1739
1740 if (!MI->getOperand(1).getCImm()->getValue().isStrictlyPositive()) {
1741 report("step must be > 0", MI);
1742 break;
1743 }
1744
1745 LLT DstTy = MRI->getType(MI->getOperand(0).getReg());
1746 if (!DstTy.isScalableVector()) {
1747 report("Destination type must be a scalable vector", MI);
1748 break;
1749 }
1750
1751 // <vscale x 2 x p0>
1752 if (!DstTy.getElementType().isScalar()) {
1753 report("Destination element type must be scalar", MI);
1754 break;
1755 }
1756
1757 if (MI->getOperand(1).getCImm()->getBitWidth() !=
1759 report("step bitwidth differs from result type element bitwidth", MI);
1760 break;
1761 }
1762 break;
1763 }
1764 case TargetOpcode::G_INSERT_SUBVECTOR: {
1765 const MachineOperand &Src0Op = MI->getOperand(1);
1766 if (!Src0Op.isReg()) {
1767 report("G_INSERT_SUBVECTOR first source must be a register", MI);
1768 break;
1769 }
1770
1771 const MachineOperand &Src1Op = MI->getOperand(2);
1772 if (!Src1Op.isReg()) {
1773 report("G_INSERT_SUBVECTOR second source must be a register", MI);
1774 break;
1775 }
1776
1777 const MachineOperand &IndexOp = MI->getOperand(3);
1778 if (!IndexOp.isImm()) {
1779 report("G_INSERT_SUBVECTOR index must be an immediate", MI);
1780 break;
1781 }
1782
1783 LLT DstTy = MRI->getType(MI->getOperand(0).getReg());
1784 LLT Src1Ty = MRI->getType(Src1Op.getReg());
1785
1786 if (!DstTy.isVector()) {
1787 report("Destination type must be a vector", MI);
1788 break;
1789 }
1790
1791 if (!Src1Ty.isVector()) {
1792 report("Second source must be a vector", MI);
1793 break;
1794 }
1795
1796 if (DstTy.getElementType() != Src1Ty.getElementType()) {
1797 report("Element type of vectors must be the same", MI);
1798 break;
1799 }
1800
1801 if (Src1Ty.isScalable() != DstTy.isScalable()) {
1802 report("Vector types must both be fixed or both be scalable", MI);
1803 break;
1804 }
1805
1807 DstTy.getElementCount())) {
1808 report("Second source must be smaller than destination vector", MI);
1809 break;
1810 }
1811
1812 uint64_t Idx = IndexOp.getImm();
1813 uint64_t Src1MinLen = Src1Ty.getElementCount().getKnownMinValue();
1814 if (IndexOp.getImm() % Src1MinLen != 0) {
1815 report("Index must be a multiple of the second source vector's "
1816 "minimum vector length",
1817 MI);
1818 break;
1819 }
1820
1821 uint64_t DstMinLen = DstTy.getElementCount().getKnownMinValue();
1822 if (Idx >= DstMinLen || Idx + Src1MinLen > DstMinLen) {
1823 report("Subvector type and index must not cause insert to overrun the "
1824 "vector being inserted into",
1825 MI);
1826 break;
1827 }
1828
1829 break;
1830 }
1831 case TargetOpcode::G_EXTRACT_SUBVECTOR: {
1832 const MachineOperand &SrcOp = MI->getOperand(1);
1833 if (!SrcOp.isReg()) {
1834 report("G_EXTRACT_SUBVECTOR first source must be a register", MI);
1835 break;
1836 }
1837
1838 const MachineOperand &IndexOp = MI->getOperand(2);
1839 if (!IndexOp.isImm()) {
1840 report("G_EXTRACT_SUBVECTOR index must be an immediate", MI);
1841 break;
1842 }
1843
1844 LLT DstTy = MRI->getType(MI->getOperand(0).getReg());
1845 LLT SrcTy = MRI->getType(SrcOp.getReg());
1846
1847 if (!DstTy.isVector()) {
1848 report("Destination type must be a vector", MI);
1849 break;
1850 }
1851
1852 if (!SrcTy.isVector()) {
1853 report("Source must be a vector", MI);
1854 break;
1855 }
1856
1857 if (DstTy.getElementType() != SrcTy.getElementType()) {
1858 report("Element type of vectors must be the same", MI);
1859 break;
1860 }
1861
1862 if (SrcTy.isScalable() != DstTy.isScalable()) {
1863 report("Vector types must both be fixed or both be scalable", MI);
1864 break;
1865 }
1866
1868 SrcTy.getElementCount())) {
1869 report("Destination vector must be smaller than source vector", MI);
1870 break;
1871 }
1872
1873 uint64_t Idx = IndexOp.getImm();
1874 uint64_t DstMinLen = DstTy.getElementCount().getKnownMinValue();
1875 if (Idx % DstMinLen != 0) {
1876 report("Index must be a multiple of the destination vector's minimum "
1877 "vector length",
1878 MI);
1879 break;
1880 }
1881
1882 uint64_t SrcMinLen = SrcTy.getElementCount().getKnownMinValue();
1883 if (Idx >= SrcMinLen || Idx + DstMinLen > SrcMinLen) {
1884 report("Destination type and index must not cause extract to overrun the "
1885 "source vector",
1886 MI);
1887 break;
1888 }
1889
1890 break;
1891 }
1892 case TargetOpcode::G_SHUFFLE_VECTOR: {
1893 const MachineOperand &MaskOp = MI->getOperand(3);
1894 if (!MaskOp.isShuffleMask()) {
1895 report("Incorrect mask operand type for G_SHUFFLE_VECTOR", MI);
1896 break;
1897 }
1898
1899 LLT DstTy = MRI->getType(MI->getOperand(0).getReg());
1900 LLT Src0Ty = MRI->getType(MI->getOperand(1).getReg());
1901 LLT Src1Ty = MRI->getType(MI->getOperand(2).getReg());
1902
1903 if (Src0Ty != Src1Ty)
1904 report("Source operands must be the same type", MI);
1905
1906 if (Src0Ty.getScalarType() != DstTy.getScalarType())
1907 report("G_SHUFFLE_VECTOR cannot change element type", MI);
1908
1909 // Don't check that all operands are vector because scalars are used in
1910 // place of 1 element vectors.
1911 int SrcNumElts = Src0Ty.isVector() ? Src0Ty.getNumElements() : 1;
1912 int DstNumElts = DstTy.isVector() ? DstTy.getNumElements() : 1;
1913
1914 ArrayRef<int> MaskIdxes = MaskOp.getShuffleMask();
1915
1916 if (static_cast<int>(MaskIdxes.size()) != DstNumElts)
1917 report("Wrong result type for shufflemask", MI);
1918
1919 for (int Idx : MaskIdxes) {
1920 if (Idx < 0)
1921 continue;
1922
1923 if (Idx >= 2 * SrcNumElts)
1924 report("Out of bounds shuffle index", MI);
1925 }
1926
1927 break;
1928 }
1929
1930 case TargetOpcode::G_SPLAT_VECTOR: {
1931 LLT DstTy = MRI->getType(MI->getOperand(0).getReg());
1932 LLT SrcTy = MRI->getType(MI->getOperand(1).getReg());
1933
1934 if (!DstTy.isScalableVector()) {
1935 report("Destination type must be a scalable vector", MI);
1936 break;
1937 }
1938
1939 if (!SrcTy.isScalar() && !SrcTy.isPointer()) {
1940 report("Source type must be a scalar or pointer", MI);
1941 break;
1942 }
1943
1945 SrcTy.getSizeInBits())) {
1946 report("Element type of the destination must be the same size or smaller "
1947 "than the source type",
1948 MI);
1949 break;
1950 }
1951
1952 break;
1953 }
1954 case TargetOpcode::G_EXTRACT_VECTOR_ELT: {
1955 LLT DstTy = MRI->getType(MI->getOperand(0).getReg());
1956 LLT SrcTy = MRI->getType(MI->getOperand(1).getReg());
1957 LLT IdxTy = MRI->getType(MI->getOperand(2).getReg());
1958
1959 if (!DstTy.isScalar() && !DstTy.isPointer()) {
1960 report("Destination type must be a scalar or pointer", MI);
1961 break;
1962 }
1963
1964 if (!SrcTy.isVector()) {
1965 report("First source must be a vector", MI);
1966 break;
1967 }
1968
1969 auto TLI = MF->getSubtarget().getTargetLowering();
1970 if (IdxTy.getSizeInBits() !=
1971 TLI->getVectorIdxTy(MF->getDataLayout()).getFixedSizeInBits()) {
1972 report("Index type must match VectorIdxTy", MI);
1973 break;
1974 }
1975
1976 break;
1977 }
1978 case TargetOpcode::G_INSERT_VECTOR_ELT: {
1979 LLT DstTy = MRI->getType(MI->getOperand(0).getReg());
1980 LLT VecTy = MRI->getType(MI->getOperand(1).getReg());
1981 LLT ScaTy = MRI->getType(MI->getOperand(2).getReg());
1982 LLT IdxTy = MRI->getType(MI->getOperand(3).getReg());
1983
1984 if (!DstTy.isVector()) {
1985 report("Destination type must be a vector", MI);
1986 break;
1987 }
1988
1989 if (VecTy != DstTy) {
1990 report("Destination type and vector type must match", MI);
1991 break;
1992 }
1993
1994 if (!ScaTy.isScalar() && !ScaTy.isPointer()) {
1995 report("Inserted element must be a scalar or pointer", MI);
1996 break;
1997 }
1998
1999 auto TLI = MF->getSubtarget().getTargetLowering();
2000 if (IdxTy.getSizeInBits() !=
2001 TLI->getVectorIdxTy(MF->getDataLayout()).getFixedSizeInBits()) {
2002 report("Index type must match VectorIdxTy", MI);
2003 break;
2004 }
2005
2006 break;
2007 }
2008 case TargetOpcode::G_DYN_STACKALLOC: {
2009 const MachineOperand &DstOp = MI->getOperand(0);
2010 const MachineOperand &AllocOp = MI->getOperand(1);
2011 const MachineOperand &AlignOp = MI->getOperand(2);
2012
2013 if (!DstOp.isReg() || !MRI->getType(DstOp.getReg()).isPointer()) {
2014 report("dst operand 0 must be a pointer type", MI);
2015 break;
2016 }
2017
2018 if (!AllocOp.isReg() || !MRI->getType(AllocOp.getReg()).isScalar()) {
2019 report("src operand 1 must be a scalar reg type", MI);
2020 break;
2021 }
2022
2023 if (!AlignOp.isImm()) {
2024 report("src operand 2 must be an immediate type", MI);
2025 break;
2026 }
2027 break;
2028 }
2029 case TargetOpcode::G_MEMCPY_INLINE:
2030 case TargetOpcode::G_MEMCPY:
2031 case TargetOpcode::G_MEMMOVE: {
2032 ArrayRef<MachineMemOperand *> MMOs = MI->memoperands();
2033 if (MMOs.size() != 2) {
2034 report("memcpy/memmove must have 2 memory operands", MI);
2035 break;
2036 }
2037
2038 if ((!MMOs[0]->isStore() || MMOs[0]->isLoad()) ||
2039 (MMOs[1]->isStore() || !MMOs[1]->isLoad())) {
2040 report("wrong memory operand types", MI);
2041 break;
2042 }
2043
2044 if (MMOs[0]->getSize() != MMOs[1]->getSize())
2045 report("inconsistent memory operand sizes", MI);
2046
2047 LLT DstPtrTy = MRI->getType(MI->getOperand(0).getReg());
2048 LLT SrcPtrTy = MRI->getType(MI->getOperand(1).getReg());
2049
2050 if (!DstPtrTy.isPointer() || !SrcPtrTy.isPointer()) {
2051 report("memory instruction operand must be a pointer", MI);
2052 break;
2053 }
2054
2055 if (DstPtrTy.getAddressSpace() != MMOs[0]->getAddrSpace())
2056 report("inconsistent store address space", MI);
2057 if (SrcPtrTy.getAddressSpace() != MMOs[1]->getAddrSpace())
2058 report("inconsistent load address space", MI);
2059
2060 if (Opc != TargetOpcode::G_MEMCPY_INLINE)
2061 if (!MI->getOperand(3).isImm() || (MI->getOperand(3).getImm() & ~1LL))
2062 report("'tail' flag (operand 3) must be an immediate 0 or 1", MI);
2063
2064 break;
2065 }
2066 case TargetOpcode::G_BZERO:
2067 case TargetOpcode::G_MEMSET: {
2068 ArrayRef<MachineMemOperand *> MMOs = MI->memoperands();
2069 std::string Name = Opc == TargetOpcode::G_MEMSET ? "memset" : "bzero";
2070 if (MMOs.size() != 1) {
2071 report(Twine(Name, " must have 1 memory operand"), MI);
2072 break;
2073 }
2074
2075 if ((!MMOs[0]->isStore() || MMOs[0]->isLoad())) {
2076 report(Twine(Name, " memory operand must be a store"), MI);
2077 break;
2078 }
2079
2080 LLT DstPtrTy = MRI->getType(MI->getOperand(0).getReg());
2081 if (!DstPtrTy.isPointer()) {
2082 report(Twine(Name, " operand must be a pointer"), MI);
2083 break;
2084 }
2085
2086 if (DstPtrTy.getAddressSpace() != MMOs[0]->getAddrSpace())
2087 report("inconsistent " + Twine(Name, " address space"), MI);
2088
2089 if (!MI->getOperand(MI->getNumOperands() - 1).isImm() ||
2090 (MI->getOperand(MI->getNumOperands() - 1).getImm() & ~1LL))
2091 report("'tail' flag (last operand) must be an immediate 0 or 1", MI);
2092
2093 break;
2094 }
2095 case TargetOpcode::G_UBSANTRAP: {
2096 const MachineOperand &KindOp = MI->getOperand(0);
2097 if (!MI->getOperand(0).isImm()) {
2098 report("Crash kind must be an immediate", &KindOp, 0);
2099 break;
2100 }
2101 int64_t Kind = MI->getOperand(0).getImm();
2102 if (!isInt<8>(Kind))
2103 report("Crash kind must be 8 bit wide", &KindOp, 0);
2104 break;
2105 }
2106 case TargetOpcode::G_VECREDUCE_SEQ_FADD:
2107 case TargetOpcode::G_VECREDUCE_SEQ_FMUL: {
2108 LLT DstTy = MRI->getType(MI->getOperand(0).getReg());
2109 LLT Src1Ty = MRI->getType(MI->getOperand(1).getReg());
2110 LLT Src2Ty = MRI->getType(MI->getOperand(2).getReg());
2111 if (!DstTy.isScalar())
2112 report("Vector reduction requires a scalar destination type", MI);
2113 if (!Src1Ty.isScalar())
2114 report("Sequential FADD/FMUL vector reduction requires a scalar 1st operand", MI);
2115 if (!Src2Ty.isVector())
2116 report("Sequential FADD/FMUL vector reduction must have a vector 2nd operand", MI);
2117 break;
2118 }
2119 case TargetOpcode::G_VECREDUCE_FADD:
2120 case TargetOpcode::G_VECREDUCE_FMUL:
2121 case TargetOpcode::G_VECREDUCE_FMAX:
2122 case TargetOpcode::G_VECREDUCE_FMIN:
2123 case TargetOpcode::G_VECREDUCE_FMAXIMUM:
2124 case TargetOpcode::G_VECREDUCE_FMINIMUM:
2125 case TargetOpcode::G_VECREDUCE_ADD:
2126 case TargetOpcode::G_VECREDUCE_MUL:
2127 case TargetOpcode::G_VECREDUCE_AND:
2128 case TargetOpcode::G_VECREDUCE_OR:
2129 case TargetOpcode::G_VECREDUCE_XOR:
2130 case TargetOpcode::G_VECREDUCE_SMAX:
2131 case TargetOpcode::G_VECREDUCE_SMIN:
2132 case TargetOpcode::G_VECREDUCE_UMAX:
2133 case TargetOpcode::G_VECREDUCE_UMIN: {
2134 LLT DstTy = MRI->getType(MI->getOperand(0).getReg());
2135 if (!DstTy.isScalar())
2136 report("Vector reduction requires a scalar destination type", MI);
2137 break;
2138 }
2139
2140 case TargetOpcode::G_SBFX:
2141 case TargetOpcode::G_UBFX: {
2142 LLT DstTy = MRI->getType(MI->getOperand(0).getReg());
2143 if (DstTy.isVector()) {
2144 report("Bitfield extraction is not supported on vectors", MI);
2145 break;
2146 }
2147 break;
2148 }
2149 case TargetOpcode::G_SHL:
2150 case TargetOpcode::G_LSHR:
2151 case TargetOpcode::G_ASHR:
2152 case TargetOpcode::G_ROTR:
2153 case TargetOpcode::G_ROTL: {
2154 LLT Src1Ty = MRI->getType(MI->getOperand(1).getReg());
2155 LLT Src2Ty = MRI->getType(MI->getOperand(2).getReg());
2156 if (Src1Ty.isVector() != Src2Ty.isVector()) {
2157 report("Shifts and rotates require operands to be either all scalars or "
2158 "all vectors",
2159 MI);
2160 break;
2161 }
2162 break;
2163 }
2164 case TargetOpcode::G_LLROUND:
2165 case TargetOpcode::G_LROUND: {
2166 LLT DstTy = MRI->getType(MI->getOperand(0).getReg());
2167 LLT SrcTy = MRI->getType(MI->getOperand(1).getReg());
2168 if (!DstTy.isValid() || !SrcTy.isValid())
2169 break;
2170 if (SrcTy.isPointer() || DstTy.isPointer()) {
2171 StringRef Op = SrcTy.isPointer() ? "Source" : "Destination";
2172 report(Twine(Op, " operand must not be a pointer type"), MI);
2173 } else if (SrcTy.isScalar()) {
2174 verifyAllRegOpsScalar(*MI, *MRI);
2175 break;
2176 } else if (SrcTy.isVector()) {
2177 verifyVectorElementMatch(SrcTy, DstTy, MI);
2178 break;
2179 }
2180 break;
2181 }
2182 case TargetOpcode::G_IS_FPCLASS: {
2183 LLT DestTy = MRI->getType(MI->getOperand(0).getReg());
2184 LLT DestEltTy = DestTy.getScalarType();
2185 if (!DestEltTy.isScalar()) {
2186 report("Destination must be a scalar or vector of scalars", MI);
2187 break;
2188 }
2189 LLT SrcTy = MRI->getType(MI->getOperand(1).getReg());
2190 LLT SrcEltTy = SrcTy.getScalarType();
2191 if (!SrcEltTy.isScalar()) {
2192 report("Source must be a scalar or vector of scalars", MI);
2193 break;
2194 }
2195 if (!verifyVectorElementMatch(DestTy, SrcTy, MI))
2196 break;
2197 const MachineOperand &TestMO = MI->getOperand(2);
2198 if (!TestMO.isImm()) {
2199 report("floating-point class set (operand 2) must be an immediate", MI);
2200 break;
2201 }
2202 int64_t Test = TestMO.getImm();
2203 if (Test < 0 || Test > fcAllFlags) {
2204 report("Incorrect floating-point class set (operand 2)", MI);
2205 break;
2206 }
2207 break;
2208 }
2209 case TargetOpcode::G_PREFETCH: {
2210 const MachineOperand &AddrOp = MI->getOperand(0);
2211 if (!AddrOp.isReg() || !MRI->getType(AddrOp.getReg()).isPointer()) {
2212 report("addr operand must be a pointer", &AddrOp, 0);
2213 break;
2214 }
2215 const MachineOperand &RWOp = MI->getOperand(1);
2216 if (!RWOp.isImm() || (uint64_t)RWOp.getImm() >= 2) {
2217 report("rw operand must be an immediate 0-1", &RWOp, 1);
2218 break;
2219 }
2220 const MachineOperand &LocalityOp = MI->getOperand(2);
2221 if (!LocalityOp.isImm() || (uint64_t)LocalityOp.getImm() >= 4) {
2222 report("locality operand must be an immediate 0-3", &LocalityOp, 2);
2223 break;
2224 }
2225 const MachineOperand &CacheTypeOp = MI->getOperand(3);
2226 if (!CacheTypeOp.isImm() || (uint64_t)CacheTypeOp.getImm() >= 2) {
2227 report("cache type operand must be an immediate 0-1", &CacheTypeOp, 3);
2228 break;
2229 }
2230 break;
2231 }
2232 case TargetOpcode::G_ASSERT_ALIGN: {
2233 if (MI->getOperand(2).getImm() < 1)
2234 report("alignment immediate must be >= 1", MI);
2235 break;
2236 }
2237 case TargetOpcode::G_CONSTANT_POOL: {
2238 if (!MI->getOperand(1).isCPI())
2239 report("Src operand 1 must be a constant pool index", MI);
2240 if (!MRI->getType(MI->getOperand(0).getReg()).isPointer())
2241 report("Dst operand 0 must be a pointer", MI);
2242 break;
2243 }
2244 case TargetOpcode::G_PTRAUTH_GLOBAL_VALUE: {
2245 const MachineOperand &AddrOp = MI->getOperand(1);
2246 if (!AddrOp.isReg() || !MRI->getType(AddrOp.getReg()).isPointer())
2247 report("addr operand must be a pointer", &AddrOp, 1);
2248 break;
2249 }
2250 default:
2251 break;
2252 }
2253}
2254
2255void MachineVerifier::visitMachineInstrBefore(const MachineInstr *MI) {
2256 const MCInstrDesc &MCID = MI->getDesc();
2257 if (MI->getNumOperands() < MCID.getNumOperands()) {
2258 report("Too few operands", MI);
2259 OS << MCID.getNumOperands() << " operands expected, but "
2260 << MI->getNumOperands() << " given.\n";
2261 }
2262
2263 if (MI->getFlag(MachineInstr::NoConvergent) && !MCID.isConvergent())
2264 report("NoConvergent flag expected only on convergent instructions.", MI);
2265
2266 if (MI->isPHI()) {
2267 if (MF->getProperties().hasProperty(
2269 report("Found PHI instruction with NoPHIs property set", MI);
2270
2271 if (FirstNonPHI)
2272 report("Found PHI instruction after non-PHI", MI);
2273 } else if (FirstNonPHI == nullptr)
2274 FirstNonPHI = MI;
2275
2276 // Check the tied operands.
2277 if (MI->isInlineAsm())
2278 verifyInlineAsm(MI);
2279
2280 // Check that unspillable terminators define a reg and have at most one use.
2281 if (TII->isUnspillableTerminator(MI)) {
2282 if (!MI->getOperand(0).isReg() || !MI->getOperand(0).isDef())
2283 report("Unspillable Terminator does not define a reg", MI);
2284 Register Def = MI->getOperand(0).getReg();
2285 if (Def.isVirtual() &&
2286 !MF->getProperties().hasProperty(
2288 std::distance(MRI->use_nodbg_begin(Def), MRI->use_nodbg_end()) > 1)
2289 report("Unspillable Terminator expected to have at most one use!", MI);
2290 }
2291
2292 // A fully-formed DBG_VALUE must have a location. Ignore partially formed
2293 // DBG_VALUEs: these are convenient to use in tests, but should never get
2294 // generated.
2295 if (MI->isDebugValue() && MI->getNumOperands() == 4)
2296 if (!MI->getDebugLoc())
2297 report("Missing DebugLoc for debug instruction", MI);
2298
2299 // Meta instructions should never be the subject of debug value tracking,
2300 // they don't create a value in the output program at all.
2301 if (MI->isMetaInstruction() && MI->peekDebugInstrNum())
2302 report("Metadata instruction should not have a value tracking number", MI);
2303
2304 // Check the MachineMemOperands for basic consistency.
2305 for (MachineMemOperand *Op : MI->memoperands()) {
2306 if (Op->isLoad() && !MI->mayLoad())
2307 report("Missing mayLoad flag", MI);
2308 if (Op->isStore() && !MI->mayStore())
2309 report("Missing mayStore flag", MI);
2310 }
2311
2312 // Debug values must not have a slot index.
2313 // Other instructions must have one, unless they are inside a bundle.
2314 if (LiveInts) {
2315 bool mapped = !LiveInts->isNotInMIMap(*MI);
2316 if (MI->isDebugOrPseudoInstr()) {
2317 if (mapped)
2318 report("Debug instruction has a slot index", MI);
2319 } else if (MI->isInsideBundle()) {
2320 if (mapped)
2321 report("Instruction inside bundle has a slot index", MI);
2322 } else {
2323 if (!mapped)
2324 report("Missing slot index", MI);
2325 }
2326 }
2327
2328 unsigned Opc = MCID.getOpcode();
2330 verifyPreISelGenericInstruction(MI);
2331 return;
2332 }
2333
2335 if (!TII->verifyInstruction(*MI, ErrorInfo))
2336 report(ErrorInfo.data(), MI);
2337
2338 // Verify properties of various specific instruction types
2339 switch (MI->getOpcode()) {
2340 case TargetOpcode::COPY: {
2341 const MachineOperand &DstOp = MI->getOperand(0);
2342 const MachineOperand &SrcOp = MI->getOperand(1);
2343 const Register SrcReg = SrcOp.getReg();
2344 const Register DstReg = DstOp.getReg();
2345
2346 LLT DstTy = MRI->getType(DstReg);
2347 LLT SrcTy = MRI->getType(SrcReg);
2348 if (SrcTy.isValid() && DstTy.isValid()) {
2349 // If both types are valid, check that the types are the same.
2350 if (SrcTy != DstTy) {
2351 report("Copy Instruction is illegal with mismatching types", MI);
2352 OS << "Def = " << DstTy << ", Src = " << SrcTy << '\n';
2353 }
2354
2355 break;
2356 }
2357
2358 if (!SrcTy.isValid() && !DstTy.isValid())
2359 break;
2360
2361 // If we have only one valid type, this is likely a copy between a virtual
2362 // and physical register.
2363 TypeSize SrcSize = TRI->getRegSizeInBits(SrcReg, *MRI);
2364 TypeSize DstSize = TRI->getRegSizeInBits(DstReg, *MRI);
2365 if (SrcReg.isPhysical() && DstTy.isValid()) {
2366 const TargetRegisterClass *SrcRC =
2367 TRI->getMinimalPhysRegClassLLT(SrcReg, DstTy);
2368 if (SrcRC)
2369 SrcSize = TRI->getRegSizeInBits(*SrcRC);
2370 }
2371
2372 if (DstReg.isPhysical() && SrcTy.isValid()) {
2373 const TargetRegisterClass *DstRC =
2374 TRI->getMinimalPhysRegClassLLT(DstReg, SrcTy);
2375 if (DstRC)
2376 DstSize = TRI->getRegSizeInBits(*DstRC);
2377 }
2378
2379 // The next two checks allow COPY between physical and virtual registers,
2380 // when the virtual register has a scalable size and the physical register
2381 // has a fixed size. These checks allow COPY between *potentialy* mismatched
2382 // sizes. However, once RegisterBankSelection occurs, MachineVerifier should
2383 // be able to resolve a fixed size for the scalable vector, and at that
2384 // point this function will know for sure whether the sizes are mismatched
2385 // and correctly report a size mismatch.
2386 if (SrcReg.isPhysical() && DstReg.isVirtual() && DstSize.isScalable() &&
2387 !SrcSize.isScalable())
2388 break;
2389 if (SrcReg.isVirtual() && DstReg.isPhysical() && SrcSize.isScalable() &&
2390 !DstSize.isScalable())
2391 break;
2392
2393 if (SrcSize.isNonZero() && DstSize.isNonZero() && SrcSize != DstSize) {
2394 if (!DstOp.getSubReg() && !SrcOp.getSubReg()) {
2395 report("Copy Instruction is illegal with mismatching sizes", MI);
2396 OS << "Def Size = " << DstSize << ", Src Size = " << SrcSize << '\n';
2397 }
2398 }
2399 break;
2400 }
2401 case TargetOpcode::STATEPOINT: {
2402 StatepointOpers SO(MI);
2403 if (!MI->getOperand(SO.getIDPos()).isImm() ||
2404 !MI->getOperand(SO.getNBytesPos()).isImm() ||
2405 !MI->getOperand(SO.getNCallArgsPos()).isImm()) {
2406 report("meta operands to STATEPOINT not constant!", MI);
2407 break;
2408 }
2409
2410 auto VerifyStackMapConstant = [&](unsigned Offset) {
2411 if (Offset >= MI->getNumOperands()) {
2412 report("stack map constant to STATEPOINT is out of range!", MI);
2413 return;
2414 }
2415 if (!MI->getOperand(Offset - 1).isImm() ||
2416 MI->getOperand(Offset - 1).getImm() != StackMaps::ConstantOp ||
2417 !MI->getOperand(Offset).isImm())
2418 report("stack map constant to STATEPOINT not well formed!", MI);
2419 };
2420 VerifyStackMapConstant(SO.getCCIdx());
2421 VerifyStackMapConstant(SO.getFlagsIdx());
2422 VerifyStackMapConstant(SO.getNumDeoptArgsIdx());
2423 VerifyStackMapConstant(SO.getNumGCPtrIdx());
2424 VerifyStackMapConstant(SO.getNumAllocaIdx());
2425 VerifyStackMapConstant(SO.getNumGcMapEntriesIdx());
2426
2427 // Verify that all explicit statepoint defs are tied to gc operands as
2428 // they are expected to be a relocation of gc operands.
2429 unsigned FirstGCPtrIdx = SO.getFirstGCPtrIdx();
2430 unsigned LastGCPtrIdx = SO.getNumAllocaIdx() - 2;
2431 for (unsigned Idx = 0; Idx < MI->getNumDefs(); Idx++) {
2432 unsigned UseOpIdx;
2433 if (!MI->isRegTiedToUseOperand(Idx, &UseOpIdx)) {
2434 report("STATEPOINT defs expected to be tied", MI);
2435 break;
2436 }
2437 if (UseOpIdx < FirstGCPtrIdx || UseOpIdx > LastGCPtrIdx) {
2438 report("STATEPOINT def tied to non-gc operand", MI);
2439 break;
2440 }
2441 }
2442
2443 // TODO: verify we have properly encoded deopt arguments
2444 } break;
2445 case TargetOpcode::INSERT_SUBREG: {
2446 unsigned InsertedSize;
2447 if (unsigned SubIdx = MI->getOperand(2).getSubReg())
2448 InsertedSize = TRI->getSubRegIdxSize(SubIdx);
2449 else
2450 InsertedSize = TRI->getRegSizeInBits(MI->getOperand(2).getReg(), *MRI);
2451 unsigned SubRegSize = TRI->getSubRegIdxSize(MI->getOperand(3).getImm());
2452 if (SubRegSize < InsertedSize) {
2453 report("INSERT_SUBREG expected inserted value to have equal or lesser "
2454 "size than the subreg it was inserted into", MI);
2455 break;
2456 }
2457 } break;
2458 case TargetOpcode::REG_SEQUENCE: {
2459 unsigned NumOps = MI->getNumOperands();
2460 if (!(NumOps & 1)) {
2461 report("Invalid number of operands for REG_SEQUENCE", MI);
2462 break;
2463 }
2464
2465 for (unsigned I = 1; I != NumOps; I += 2) {
2466 const MachineOperand &RegOp = MI->getOperand(I);
2467 const MachineOperand &SubRegOp = MI->getOperand(I + 1);
2468
2469 if (!RegOp.isReg())
2470 report("Invalid register operand for REG_SEQUENCE", &RegOp, I);
2471
2472 if (!SubRegOp.isImm() || SubRegOp.getImm() == 0 ||
2473 SubRegOp.getImm() >= TRI->getNumSubRegIndices()) {
2474 report("Invalid subregister index operand for REG_SEQUENCE",
2475 &SubRegOp, I + 1);
2476 }
2477 }
2478
2479 Register DstReg = MI->getOperand(0).getReg();
2480 if (DstReg.isPhysical())
2481 report("REG_SEQUENCE does not support physical register results", MI);
2482
2483 if (MI->getOperand(0).getSubReg())
2484 report("Invalid subreg result for REG_SEQUENCE", MI);
2485
2486 break;
2487 }
2488 }
2489}
2490
2491void
2492MachineVerifier::visitMachineOperand(const MachineOperand *MO, unsigned MONum) {
2493 const MachineInstr *MI = MO->getParent();
2494 const MCInstrDesc &MCID = MI->getDesc();
2495 unsigned NumDefs = MCID.getNumDefs();
2496 if (MCID.getOpcode() == TargetOpcode::PATCHPOINT)
2497 NumDefs = (MONum == 0 && MO->isReg()) ? NumDefs : 0;
2498
2499 // The first MCID.NumDefs operands must be explicit register defines
2500 if (MONum < NumDefs) {
2501 const MCOperandInfo &MCOI = MCID.operands()[MONum];
2502 if (!MO->isReg())
2503 report("Explicit definition must be a register", MO, MONum);
2504 else if (!MO->isDef() && !MCOI.isOptionalDef())
2505 report("Explicit definition marked as use", MO, MONum);
2506 else if (MO->isImplicit())
2507 report("Explicit definition marked as implicit", MO, MONum);
2508 } else if (MONum < MCID.getNumOperands()) {
2509 const MCOperandInfo &MCOI = MCID.operands()[MONum];
2510 // Don't check if it's the last operand in a variadic instruction. See,
2511 // e.g., LDM_RET in the arm back end. Check non-variadic operands only.
2512 bool IsOptional = MI->isVariadic() && MONum == MCID.getNumOperands() - 1;
2513 if (!IsOptional) {
2514 if (MO->isReg()) {
2515 if (MO->isDef() && !MCOI.isOptionalDef() && !MCID.variadicOpsAreDefs())
2516 report("Explicit operand marked as def", MO, MONum);
2517 if (MO->isImplicit())
2518 report("Explicit operand marked as implicit", MO, MONum);
2519 }
2520
2521 // Check that an instruction has register operands only as expected.
2522 if (MCOI.OperandType == MCOI::OPERAND_REGISTER &&
2523 !MO->isReg() && !MO->isFI())
2524 report("Expected a register operand.", MO, MONum);
2525 if (MO->isReg()) {
2528 !TII->isPCRelRegisterOperandLegal(*MO)))
2529 report("Expected a non-register operand.", MO, MONum);
2530 }
2531 }
2532
2533 int TiedTo = MCID.getOperandConstraint(MONum, MCOI::TIED_TO);
2534 if (TiedTo != -1) {
2535 if (!MO->isReg())
2536 report("Tied use must be a register", MO, MONum);
2537 else if (!MO->isTied())
2538 report("Operand should be tied", MO, MONum);
2539 else if (unsigned(TiedTo) != MI->findTiedOperandIdx(MONum))
2540 report("Tied def doesn't match MCInstrDesc", MO, MONum);
2541 else if (MO->getReg().isPhysical()) {
2542 const MachineOperand &MOTied = MI->getOperand(TiedTo);
2543 if (!MOTied.isReg())
2544 report("Tied counterpart must be a register", &MOTied, TiedTo);
2545 else if (MOTied.getReg().isPhysical() &&
2546 MO->getReg() != MOTied.getReg())
2547 report("Tied physical registers must match.", &MOTied, TiedTo);
2548 }
2549 } else if (MO->isReg() && MO->isTied())
2550 report("Explicit operand should not be tied", MO, MONum);
2551 } else if (!MI->isVariadic()) {
2552 // ARM adds %reg0 operands to indicate predicates. We'll allow that.
2553 if (!MO->isValidExcessOperand())
2554 report("Extra explicit operand on non-variadic instruction", MO, MONum);
2555 }
2556
2557 switch (MO->getType()) {
2559 // Verify debug flag on debug instructions. Check this first because reg0
2560 // indicates an undefined debug value.
2561 if (MI->isDebugInstr() && MO->isUse()) {
2562 if (!MO->isDebug())
2563 report("Register operand must be marked debug", MO, MONum);
2564 } else if (MO->isDebug()) {
2565 report("Register operand must not be marked debug", MO, MONum);
2566 }
2567
2568 const Register Reg = MO->getReg();
2569 if (!Reg)
2570 return;
2571 if (MRI->tracksLiveness() && !MI->isDebugInstr())
2572 checkLiveness(MO, MONum);
2573
2574 if (MO->isDef() && MO->isUndef() && !MO->getSubReg() &&
2575 MO->getReg().isVirtual()) // TODO: Apply to physregs too
2576 report("Undef virtual register def operands require a subregister", MO, MONum);
2577
2578 // Verify the consistency of tied operands.
2579 if (MO->isTied()) {
2580 unsigned OtherIdx = MI->findTiedOperandIdx(MONum);
2581 const MachineOperand &OtherMO = MI->getOperand(OtherIdx);
2582 if (!OtherMO.isReg())
2583 report("Must be tied to a register", MO, MONum);
2584 if (!OtherMO.isTied())
2585 report("Missing tie flags on tied operand", MO, MONum);
2586 if (MI->findTiedOperandIdx(OtherIdx) != MONum)
2587 report("Inconsistent tie links", MO, MONum);
2588 if (MONum < MCID.getNumDefs()) {
2589 if (OtherIdx < MCID.getNumOperands()) {
2590 if (-1 == MCID.getOperandConstraint(OtherIdx, MCOI::TIED_TO))
2591 report("Explicit def tied to explicit use without tie constraint",
2592 MO, MONum);
2593 } else {
2594 if (!OtherMO.isImplicit())
2595 report("Explicit def should be tied to implicit use", MO, MONum);
2596 }
2597 }
2598 }
2599
2600 // Verify two-address constraints after the twoaddressinstruction pass.
2601 // Both twoaddressinstruction pass and phi-node-elimination pass call
2602 // MRI->leaveSSA() to set MF as not IsSSA, we should do the verification
2603 // after twoaddressinstruction pass not after phi-node-elimination pass. So
2604 // we shouldn't use the IsSSA as the condition, we should based on
2605 // TiedOpsRewritten property to verify two-address constraints, this
2606 // property will be set in twoaddressinstruction pass.
2607 unsigned DefIdx;
2608 if (MF->getProperties().hasProperty(
2610 MO->isUse() && MI->isRegTiedToDefOperand(MONum, &DefIdx) &&
2611 Reg != MI->getOperand(DefIdx).getReg())
2612 report("Two-address instruction operands must be identical", MO, MONum);
2613
2614 // Check register classes.
2615 unsigned SubIdx = MO->getSubReg();
2616
2617 if (Reg.isPhysical()) {
2618 if (SubIdx) {
2619 report("Illegal subregister index for physical register", MO, MONum);
2620 return;
2621 }
2622 if (MONum < MCID.getNumOperands()) {
2623 if (const TargetRegisterClass *DRC =
2624 TII->getRegClass(MCID, MONum, TRI, *MF)) {
2625 if (!DRC->contains(Reg)) {
2626 report("Illegal physical register for instruction", MO, MONum);
2627 OS << printReg(Reg, TRI) << " is not a "
2628 << TRI->getRegClassName(DRC) << " register.\n";
2629 }
2630 }
2631 }
2632 if (MO->isRenamable()) {
2633 if (MRI->isReserved(Reg)) {
2634 report("isRenamable set on reserved register", MO, MONum);
2635 return;
2636 }
2637 }
2638 } else {
2639 // Virtual register.
2640 const TargetRegisterClass *RC = MRI->getRegClassOrNull(Reg);
2641 if (!RC) {
2642 // This is a generic virtual register.
2643
2644 // Do not allow undef uses for generic virtual registers. This ensures
2645 // getVRegDef can never fail and return null on a generic register.
2646 //
2647 // FIXME: This restriction should probably be broadened to all SSA
2648 // MIR. However, DetectDeadLanes/ProcessImplicitDefs technically still
2649 // run on the SSA function just before phi elimination.
2650 if (MO->isUndef())
2651 report("Generic virtual register use cannot be undef", MO, MONum);
2652
2653 // Debug value instruction is permitted to use undefined vregs.
2654 // This is a performance measure to skip the overhead of immediately
2655 // pruning unused debug operands. The final undef substitution occurs
2656 // when debug values are allocated in LDVImpl::handleDebugValue, so
2657 // these verifications always apply after this pass.
2658 if (isFunctionTracksDebugUserValues || !MO->isUse() ||
2659 !MI->isDebugValue() || !MRI->def_empty(Reg)) {
2660 // If we're post-Select, we can't have gvregs anymore.
2661 if (isFunctionSelected) {
2662 report("Generic virtual register invalid in a Selected function",
2663 MO, MONum);
2664 return;
2665 }
2666
2667 // The gvreg must have a type and it must not have a SubIdx.
2668 LLT Ty = MRI->getType(Reg);
2669 if (!Ty.isValid()) {
2670 report("Generic virtual register must have a valid type", MO,
2671 MONum);
2672 return;
2673 }
2674
2675 const RegisterBank *RegBank = MRI->getRegBankOrNull(Reg);
2676 const RegisterBankInfo *RBI = MF->getSubtarget().getRegBankInfo();
2677
2678 // If we're post-RegBankSelect, the gvreg must have a bank.
2679 if (!RegBank && isFunctionRegBankSelected) {
2680 report("Generic virtual register must have a bank in a "
2681 "RegBankSelected function",
2682 MO, MONum);
2683 return;
2684 }
2685
2686 // Make sure the register fits into its register bank if any.
2687 if (RegBank && Ty.isValid() && !Ty.isScalableVector() &&
2688 RBI->getMaximumSize(RegBank->getID()) < Ty.getSizeInBits()) {
2689 report("Register bank is too small for virtual register", MO,
2690 MONum);
2691 OS << "Register bank " << RegBank->getName() << " too small("
2692 << RBI->getMaximumSize(RegBank->getID()) << ") to fit "
2693 << Ty.getSizeInBits() << "-bits\n";
2694 return;
2695 }
2696 }
2697
2698 if (SubIdx) {
2699 report("Generic virtual register does not allow subregister index", MO,
2700 MONum);
2701 return;
2702 }
2703
2704 // If this is a target specific instruction and this operand
2705 // has register class constraint, the virtual register must
2706 // comply to it.
2707 if (!isPreISelGenericOpcode(MCID.getOpcode()) &&
2708 MONum < MCID.getNumOperands() &&
2709 TII->getRegClass(MCID, MONum, TRI, *MF)) {
2710 report("Virtual register does not match instruction constraint", MO,
2711 MONum);
2712 OS << "Expect register class "
2713 << TRI->getRegClassName(TII->getRegClass(MCID, MONum, TRI, *MF))
2714 << " but got nothing\n";
2715 return;
2716 }
2717
2718 break;
2719 }
2720 if (SubIdx) {
2721 const TargetRegisterClass *SRC =
2722 TRI->getSubClassWithSubReg(RC, SubIdx);
2723 if (!SRC) {
2724 report("Invalid subregister index for virtual register", MO, MONum);
2725 OS << "Register class " << TRI->getRegClassName(RC)
2726 << " does not support subreg index " << SubIdx << '\n';
2727 return;
2728 }
2729 if (RC != SRC) {
2730 report("Invalid register class for subregister index", MO, MONum);
2731 OS << "Register class " << TRI->getRegClassName(RC)
2732 << " does not fully support subreg index " << SubIdx << '\n';
2733 return;
2734 }
2735 }
2736 if (MONum < MCID.getNumOperands()) {
2737 if (const TargetRegisterClass *DRC =
2738 TII->getRegClass(MCID, MONum, TRI, *MF)) {
2739 if (SubIdx) {
2740 const TargetRegisterClass *SuperRC =
2741 TRI->getLargestLegalSuperClass(RC, *MF);
2742 if (!SuperRC) {
2743 report("No largest legal super class exists.", MO, MONum);
2744 return;
2745 }
2746 DRC = TRI->getMatchingSuperRegClass(SuperRC, DRC, SubIdx);
2747 if (!DRC) {
2748 report("No matching super-reg register class.", MO, MONum);
2749 return;
2750 }
2751 }
2752 if (!RC->hasSuperClassEq(DRC)) {
2753 report("Illegal virtual register for instruction", MO, MONum);
2754 OS << "Expected a " << TRI->getRegClassName(DRC)
2755 << " register, but got a " << TRI->getRegClassName(RC)
2756 << " register\n";
2757 }
2758 }
2759 }
2760 }
2761 break;
2762 }
2763
2765 regMasks.push_back(MO->getRegMask());
2766 break;
2767
2769 if (MI->isPHI() && !MO->getMBB()->isSuccessor(MI->getParent()))
2770 report("PHI operand is not in the CFG", MO, MONum);
2771 break;
2772
2774 if (LiveStks && LiveStks->hasInterval(MO->getIndex()) &&
2775 LiveInts && !LiveInts->isNotInMIMap(*MI)) {
2776 int FI = MO->getIndex();
2777 LiveInterval &LI = LiveStks->getInterval(FI);
2778 SlotIndex Idx = LiveInts->getInstructionIndex(*MI);
2779
2780 bool stores = MI->mayStore();
2781 bool loads = MI->mayLoad();
2782 // For a memory-to-memory move, we need to check if the frame
2783 // index is used for storing or loading, by inspecting the
2784 // memory operands.
2785 if (stores && loads) {
2786 for (auto *MMO : MI->memoperands()) {
2787 const PseudoSourceValue *PSV = MMO->getPseudoValue();
2788 if (PSV == nullptr) continue;
2790 dyn_cast<FixedStackPseudoSourceValue>(PSV);
2791 if (Value == nullptr) continue;
2792 if (Value->getFrameIndex() != FI) continue;
2793
2794 if (MMO->isStore())
2795 loads = false;
2796 else
2797 stores = false;
2798 break;
2799 }
2800 if (loads == stores)
2801 report("Missing fixed stack memoperand.", MI);
2802 }
2803 if (loads && !LI.liveAt(Idx.getRegSlot(true))) {
2804 report("Instruction loads from dead spill slot", MO, MONum);
2805 OS << "Live stack: " << LI << '\n';
2806 }
2807 if (stores && !LI.liveAt(Idx.getRegSlot())) {
2808 report("Instruction stores to dead spill slot", MO, MONum);
2809 OS << "Live stack: " << LI << '\n';
2810 }
2811 }
2812 break;
2813
2815 if (MO->getCFIIndex() >= MF->getFrameInstructions().size())
2816 report("CFI instruction has invalid index", MO, MONum);
2817 break;
2818
2819 default:
2820 break;
2821 }
2822}
2823
2824void MachineVerifier::checkLivenessAtUse(const MachineOperand *MO,
2825 unsigned MONum, SlotIndex UseIdx,
2826 const LiveRange &LR,
2827 Register VRegOrUnit,
2828 LaneBitmask LaneMask) {
2829 const MachineInstr *MI = MO->getParent();
2830
2831 if (!LR.verify()) {
2832 report("invalid live range", MO, MONum);
2833 report_context_liverange(LR);
2834 report_context_vreg_regunit(VRegOrUnit);
2835 report_context(UseIdx);
2836 return;
2837 }
2838
2839 LiveQueryResult LRQ = LR.Query(UseIdx);
2840 bool HasValue = LRQ.valueIn() || (MI->isPHI() && LRQ.valueOut());
2841 // Check if we have a segment at the use, note however that we only need one
2842 // live subregister range, the others may be dead.
2843 if (!HasValue && LaneMask.none()) {
2844 report("No live segment at use", MO, MONum);
2845 report_context_liverange(LR);
2846 report_context_vreg_regunit(VRegOrUnit);
2847 report_context(UseIdx);
2848 }
2849 if (MO->isKill() && !LRQ.isKill()) {
2850 report("Live range continues after kill flag", MO, MONum);
2851 report_context_liverange(LR);
2852 report_context_vreg_regunit(VRegOrUnit);
2853 if (LaneMask.any())
2854 report_context_lanemask(LaneMask);
2855 report_context(UseIdx);
2856 }
2857}
2858
2859void MachineVerifier::checkLivenessAtDef(const MachineOperand *MO,
2860 unsigned MONum, SlotIndex DefIdx,
2861 const LiveRange &LR,
2862 Register VRegOrUnit,
2863 bool SubRangeCheck,
2864 LaneBitmask LaneMask) {
2865 if (!LR.verify()) {
2866 report("invalid live range", MO, MONum);
2867 report_context_liverange(LR);
2868 report_context_vreg_regunit(VRegOrUnit);
2869 if (LaneMask.any())
2870 report_context_lanemask(LaneMask);
2871 report_context(DefIdx);
2872 }
2873
2874 if (const VNInfo *VNI = LR.getVNInfoAt(DefIdx)) {
2875 // The LR can correspond to the whole reg and its def slot is not obliged
2876 // to be the same as the MO' def slot. E.g. when we check here "normal"
2877 // subreg MO but there is other EC subreg MO in the same instruction so the
2878 // whole reg has EC def slot and differs from the currently checked MO' def
2879 // slot. For example:
2880 // %0 [16e,32r:0) 0@16e L..3 [16e,32r:0) 0@16e L..C [16r,32r:0) 0@16r
2881 // Check that there is an early-clobber def of the same superregister
2882 // somewhere is performed in visitMachineFunctionAfter()
2883 if (((SubRangeCheck || MO->getSubReg() == 0) && VNI->def != DefIdx) ||
2884 !SlotIndex::isSameInstr(VNI->def, DefIdx) ||
2885 (VNI->def != DefIdx &&
2886 (!VNI->def.isEarlyClobber() || !DefIdx.isRegister()))) {
2887 report("Inconsistent valno->def", MO, MONum);
2888 report_context_liverange(LR);
2889 report_context_vreg_regunit(VRegOrUnit);
2890 if (LaneMask.any())
2891 report_context_lanemask(LaneMask);
2892 report_context(*VNI);
2893 report_context(DefIdx);
2894 }
2895 } else {
2896 report("No live segment at def", MO, MONum);
2897 report_context_liverange(LR);
2898 report_context_vreg_regunit(VRegOrUnit);
2899 if (LaneMask.any())
2900 report_context_lanemask(LaneMask);
2901 report_context(DefIdx);
2902 }
2903 // Check that, if the dead def flag is present, LiveInts agree.
2904 if (MO->isDead()) {
2905 LiveQueryResult LRQ = LR.Query(DefIdx);
2906 if (!LRQ.isDeadDef()) {
2907 assert(VRegOrUnit.isVirtual() && "Expecting a virtual register.");
2908 // A dead subreg def only tells us that the specific subreg is dead. There
2909 // could be other non-dead defs of other subregs, or we could have other
2910 // parts of the register being live through the instruction. So unless we
2911 // are checking liveness for a subrange it is ok for the live range to
2912 // continue, given that we have a dead def of a subregister.
2913 if (SubRangeCheck || MO->getSubReg() == 0) {
2914 report("Live range continues after dead def flag", MO, MONum);
2915 report_context_liverange(LR);
2916 report_context_vreg_regunit(VRegOrUnit);
2917 if (LaneMask.any())
2918 report_context_lanemask(LaneMask);
2919 }
2920 }
2921 }
2922}
2923
2924void MachineVerifier::checkLiveness(const MachineOperand *MO, unsigned MONum) {
2925 const MachineInstr *MI = MO->getParent();
2926 const Register Reg = MO->getReg();
2927 const unsigned SubRegIdx = MO->getSubReg();
2928
2929 const LiveInterval *LI = nullptr;
2930 if (LiveInts && Reg.isVirtual()) {
2931 if (LiveInts->hasInterval(Reg)) {
2932 LI = &LiveInts->getInterval(Reg);
2933 if (SubRegIdx != 0 && (MO->isDef() || !MO->isUndef()) && !LI->empty() &&
2934 !LI->hasSubRanges() && MRI->shouldTrackSubRegLiveness(Reg))
2935 report("Live interval for subreg operand has no subranges", MO, MONum);
2936 } else {
2937 report("Virtual register has no live interval", MO, MONum);
2938 }
2939 }
2940
2941 // Both use and def operands can read a register.
2942 if (MO->readsReg()) {
2943 if (MO->isKill())
2944 addRegWithSubRegs(regsKilled, Reg);
2945
2946 // Check that LiveVars knows this kill (unless we are inside a bundle, in
2947 // which case we have already checked that LiveVars knows any kills on the
2948 // bundle header instead).
2949 if (LiveVars && Reg.isVirtual() && MO->isKill() &&
2950 !MI->isBundledWithPred()) {
2951 LiveVariables::VarInfo &VI = LiveVars->getVarInfo(Reg);
2952 if (!is_contained(VI.Kills, MI))
2953 report("Kill missing from LiveVariables", MO, MONum);
2954 }
2955
2956 // Check LiveInts liveness and kill.
2957 if (LiveInts && !LiveInts->isNotInMIMap(*MI)) {
2958 SlotIndex UseIdx;
2959 if (MI->isPHI()) {
2960 // PHI use occurs on the edge, so check for live out here instead.
2961 UseIdx = LiveInts->getMBBEndIdx(
2962 MI->getOperand(MONum + 1).getMBB()).getPrevSlot();
2963 } else {
2964 UseIdx = LiveInts->getInstructionIndex(*MI);
2965 }
2966 // Check the cached regunit intervals.
2967 if (Reg.isPhysical() && !isReserved(Reg)) {
2968 for (MCRegUnit Unit : TRI->regunits(Reg.asMCReg())) {
2969 if (MRI->isReservedRegUnit(Unit))
2970 continue;
2971 if (const LiveRange *LR = LiveInts->getCachedRegUnit(Unit))
2972 checkLivenessAtUse(MO, MONum, UseIdx, *LR, Unit);
2973 }
2974 }
2975
2976 if (Reg.isVirtual()) {
2977 // This is a virtual register interval.
2978 checkLivenessAtUse(MO, MONum, UseIdx, *LI, Reg);
2979
2980 if (LI->hasSubRanges() && !MO->isDef()) {
2981 LaneBitmask MOMask = SubRegIdx != 0
2982 ? TRI->getSubRegIndexLaneMask(SubRegIdx)
2983 : MRI->getMaxLaneMaskForVReg(Reg);
2984 LaneBitmask LiveInMask;
2985 for (const LiveInterval::SubRange &SR : LI->subranges()) {
2986 if ((MOMask & SR.LaneMask).none())
2987 continue;
2988 checkLivenessAtUse(MO, MONum, UseIdx, SR, Reg, SR.LaneMask);
2989 LiveQueryResult LRQ = SR.Query(UseIdx);
2990 if (LRQ.valueIn() || (MI->isPHI() && LRQ.valueOut()))
2991 LiveInMask |= SR.LaneMask;
2992 }
2993 // At least parts of the register has to be live at the use.
2994 if ((LiveInMask & MOMask).none()) {
2995 report("No live subrange at use", MO, MONum);
2996 report_context(*LI);
2997 report_context(UseIdx);
2998 }
2999 // For PHIs all lanes should be live
3000 if (MI->isPHI() && LiveInMask != MOMask) {
3001 report("Not all lanes of PHI source live at use", MO, MONum);
3002 report_context(*LI);
3003 report_context(UseIdx);
3004 }
3005 }
3006 }
3007 }
3008
3009 // Use of a dead register.
3010 if (!regsLive.count(Reg)) {
3011 if (Reg.isPhysical()) {
3012 // Reserved registers may be used even when 'dead'.
3013 bool Bad = !isReserved(Reg);
3014 // We are fine if just any subregister has a defined value.
3015 if (Bad) {
3016
3017 for (const MCPhysReg &SubReg : TRI->subregs(Reg)) {
3018 if (regsLive.count(SubReg)) {
3019 Bad = false;
3020 break;
3021 }
3022 }
3023 }
3024 // If there is an additional implicit-use of a super register we stop
3025 // here. By definition we are fine if the super register is not
3026 // (completely) dead, if the complete super register is dead we will
3027 // get a report for its operand.
3028 if (Bad) {
3029 for (const MachineOperand &MOP : MI->uses()) {
3030 if (!MOP.isReg() || !MOP.isImplicit())
3031 continue;
3032
3033 if (!MOP.getReg().isPhysical())
3034 continue;
3035
3036 if (MOP.getReg() != Reg &&
3037 all_of(TRI->regunits(Reg), [&](const MCRegUnit RegUnit) {
3038 return llvm::is_contained(TRI->regunits(MOP.getReg()),
3039 RegUnit);
3040 }))
3041 Bad = false;
3042 }
3043 }
3044 if (Bad)
3045 report("Using an undefined physical register", MO, MONum);
3046 } else if (MRI->def_empty(Reg)) {
3047 report("Reading virtual register without a def", MO, MONum);
3048 } else {
3049 BBInfo &MInfo = MBBInfoMap[MI->getParent()];
3050 // We don't know which virtual registers are live in, so only complain
3051 // if vreg was killed in this MBB. Otherwise keep track of vregs that
3052 // must be live in. PHI instructions are handled separately.
3053 if (MInfo.regsKilled.count(Reg))
3054 report("Using a killed virtual register", MO, MONum);
3055 else if (!MI->isPHI())
3056 MInfo.vregsLiveIn.insert(std::make_pair(Reg, MI));
3057 }
3058 }
3059 }
3060
3061 if (MO->isDef()) {
3062 // Register defined.
3063 // TODO: verify that earlyclobber ops are not used.
3064 if (MO->isDead())
3065 addRegWithSubRegs(regsDead, Reg);
3066 else
3067 addRegWithSubRegs(regsDefined, Reg);
3068
3069 // Verify SSA form.
3070 if (MRI->isSSA() && Reg.isVirtual() &&
3071 std::next(MRI->def_begin(Reg)) != MRI->def_end())
3072 report("Multiple virtual register defs in SSA form", MO, MONum);
3073
3074 // Check LiveInts for a live segment, but only for virtual registers.
3075 if (LiveInts && !LiveInts->isNotInMIMap(*MI)) {
3076 SlotIndex DefIdx = LiveInts->getInstructionIndex(*MI);
3077 DefIdx = DefIdx.getRegSlot(MO->isEarlyClobber());
3078
3079 if (Reg.isVirtual()) {
3080 checkLivenessAtDef(MO, MONum, DefIdx, *LI, Reg);
3081
3082 if (LI->hasSubRanges()) {
3083 LaneBitmask MOMask = SubRegIdx != 0
3084 ? TRI->getSubRegIndexLaneMask(SubRegIdx)
3085 : MRI->getMaxLaneMaskForVReg(Reg);
3086 for (const LiveInterval::SubRange &SR : LI->subranges()) {
3087 if ((SR.LaneMask & MOMask).none())
3088 continue;
3089 checkLivenessAtDef(MO, MONum, DefIdx, SR, Reg, true, SR.LaneMask);
3090 }
3091 }
3092 }
3093 }
3094 }
3095}
3096
3097// This function gets called after visiting all instructions in a bundle. The
3098// argument points to the bundle header.
3099// Normal stand-alone instructions are also considered 'bundles', and this
3100// function is called for all of them.
3101void MachineVerifier::visitMachineBundleAfter(const MachineInstr *MI) {
3102 BBInfo &MInfo = MBBInfoMap[MI->getParent()];
3103 set_union(MInfo.regsKilled, regsKilled);
3104 set_subtract(regsLive, regsKilled); regsKilled.clear();
3105 // Kill any masked registers.
3106 while (!regMasks.empty()) {
3107 const uint32_t *Mask = regMasks.pop_back_val();
3108 for (Register Reg : regsLive)
3109 if (Reg.isPhysical() &&
3110 MachineOperand::clobbersPhysReg(Mask, Reg.asMCReg()))
3111 regsDead.push_back(Reg);
3112 }
3113 set_subtract(regsLive, regsDead); regsDead.clear();
3114 set_union(regsLive, regsDefined); regsDefined.clear();
3115}
3116
3117void
3118MachineVerifier::visitMachineBasicBlockAfter(const MachineBasicBlock *MBB) {
3119 MBBInfoMap[MBB].regsLiveOut = regsLive;
3120 regsLive.clear();
3121
3122 if (Indexes) {
3123 SlotIndex stop = Indexes->getMBBEndIdx(MBB);
3124 if (!(stop > lastIndex)) {
3125 report("Block ends before last instruction index", MBB);
3126 OS << "Block ends at " << stop << " last instruction was at " << lastIndex
3127 << '\n';
3128 }
3129 lastIndex = stop;
3130 }
3131}
3132
3133namespace {
3134// This implements a set of registers that serves as a filter: can filter other
3135// sets by passing through elements not in the filter and blocking those that
3136// are. Any filter implicitly includes the full set of physical registers upon
3137// creation, thus filtering them all out. The filter itself as a set only grows,
3138// and needs to be as efficient as possible.
3139struct VRegFilter {
3140 // Add elements to the filter itself. \pre Input set \p FromRegSet must have
3141 // no duplicates. Both virtual and physical registers are fine.
3142 template <typename RegSetT> void add(const RegSetT &FromRegSet) {
3143 SmallVector<Register, 0> VRegsBuffer;
3144 filterAndAdd(FromRegSet, VRegsBuffer);
3145 }
3146 // Filter \p FromRegSet through the filter and append passed elements into \p
3147 // ToVRegs. All elements appended are then added to the filter itself.
3148 // \returns true if anything changed.
3149 template <typename RegSetT>
3150 bool filterAndAdd(const RegSetT &FromRegSet,
3151 SmallVectorImpl<Register> &ToVRegs) {
3152 unsigned SparseUniverse = Sparse.size();
3153 unsigned NewSparseUniverse = SparseUniverse;
3154 unsigned NewDenseSize = Dense.size();
3155 size_t Begin = ToVRegs.size();
3156 for (Register Reg : FromRegSet) {
3157 if (!Reg.isVirtual())
3158 continue;
3159 unsigned Index = Register::virtReg2Index(Reg);
3160 if (Index < SparseUniverseMax) {
3161 if (Index < SparseUniverse && Sparse.test(Index))
3162 continue;
3163 NewSparseUniverse = std::max(NewSparseUniverse, Index + 1);
3164 } else {
3165 if (Dense.count(Reg))
3166 continue;
3167 ++NewDenseSize;
3168 }
3169 ToVRegs.push_back(Reg);
3170 }
3171 size_t End = ToVRegs.size();
3172 if (Begin == End)
3173 return false;
3174 // Reserving space in sets once performs better than doing so continuously
3175 // and pays easily for double look-ups (even in Dense with SparseUniverseMax
3176 // tuned all the way down) and double iteration (the second one is over a
3177 // SmallVector, which is a lot cheaper compared to DenseSet or BitVector).
3178 Sparse.resize(NewSparseUniverse);
3179 Dense.reserve(NewDenseSize);
3180 for (unsigned I = Begin; I < End; ++I) {
3181 Register Reg = ToVRegs[I];
3182 unsigned Index = Register::virtReg2Index(Reg);
3183 if (Index < SparseUniverseMax)
3184 Sparse.set(Index);
3185 else
3186 Dense.insert(Reg);
3187 }
3188 return true;
3189 }
3190
3191private:
3192 static constexpr unsigned SparseUniverseMax = 10 * 1024 * 8;
3193 // VRegs indexed within SparseUniverseMax are tracked by Sparse, those beyound
3194 // are tracked by Dense. The only purpose of the threashold and the Dense set
3195 // is to have a reasonably growing memory usage in pathological cases (large
3196 // number of very sparse VRegFilter instances live at the same time). In
3197 // practice even in the worst-by-execution time cases having all elements
3198 // tracked by Sparse (very large SparseUniverseMax scenario) tends to be more
3199 // space efficient than if tracked by Dense. The threashold is set to keep the
3200 // worst-case memory usage within 2x of figures determined empirically for
3201 // "all Dense" scenario in such worst-by-execution-time cases.
3202 BitVector Sparse;
3204};
3205
3206// Implements both a transfer function and a (binary, in-place) join operator
3207// for a dataflow over register sets with set union join and filtering transfer
3208// (out_b = in_b \ filter_b). filter_b is expected to be set-up ahead of time.
3209// Maintains out_b as its state, allowing for O(n) iteration over it at any
3210// time, where n is the size of the set (as opposed to O(U) where U is the
3211// universe). filter_b implicitly contains all physical registers at all times.
3212class FilteringVRegSet {
3213 VRegFilter Filter;
3215
3216public:
3217 // Set-up the filter_b. \pre Input register set \p RS must have no duplicates.
3218 // Both virtual and physical registers are fine.
3219 template <typename RegSetT> void addToFilter(const RegSetT &RS) {
3220 Filter.add(RS);
3221 }
3222 // Passes \p RS through the filter_b (transfer function) and adds what's left
3223 // to itself (out_b).
3224 template <typename RegSetT> bool add(const RegSetT &RS) {
3225 // Double-duty the Filter: to maintain VRegs a set (and the join operation
3226 // a set union) just add everything being added here to the Filter as well.
3227 return Filter.filterAndAdd(RS, VRegs);
3228 }
3229 using const_iterator = decltype(VRegs)::const_iterator;
3230 const_iterator begin() const { return VRegs.begin(); }
3231 const_iterator end() const { return VRegs.end(); }
3232 size_t size() const { return VRegs.size(); }
3233};
3234} // namespace
3235
3236// Calculate the largest possible vregsPassed sets. These are the registers that
3237// can pass through an MBB live, but may not be live every time. It is assumed
3238// that all vregsPassed sets are empty before the call.
3239void MachineVerifier::calcRegsPassed() {
3240 if (MF->empty())
3241 // ReversePostOrderTraversal doesn't handle empty functions.
3242 return;
3243
3244 for (const MachineBasicBlock *MB :
3246 FilteringVRegSet VRegs;
3247 BBInfo &Info = MBBInfoMap[MB];
3248 assert(Info.reachable);
3249
3250 VRegs.addToFilter(Info.regsKilled);
3251 VRegs.addToFilter(Info.regsLiveOut);
3252 for (const MachineBasicBlock *Pred : MB->predecessors()) {
3253 const BBInfo &PredInfo = MBBInfoMap[Pred];
3254 if (!PredInfo.reachable)
3255 continue;
3256
3257 VRegs.add(PredInfo.regsLiveOut);
3258 VRegs.add(PredInfo.vregsPassed);
3259 }
3260 Info.vregsPassed.reserve(VRegs.size());
3261 Info.vregsPassed.insert(VRegs.begin(), VRegs.end());
3262 }
3263}
3264
3265// Calculate the set of virtual registers that must be passed through each basic
3266// block in order to satisfy the requirements of successor blocks. This is very
3267// similar to calcRegsPassed, only backwards.
3268void MachineVerifier::calcRegsRequired() {
3269 // First push live-in regs to predecessors' vregsRequired.
3271 for (const auto &MBB : *MF) {
3272 BBInfo &MInfo = MBBInfoMap[&MBB];
3273 for (const MachineBasicBlock *Pred : MBB.predecessors()) {
3274 BBInfo &PInfo = MBBInfoMap[Pred];
3275 if (PInfo.addRequired(MInfo.vregsLiveIn))
3276 todo.insert(Pred);
3277 }
3278
3279 // Handle the PHI node.
3280 for (const MachineInstr &MI : MBB.phis()) {
3281 for (unsigned i = 1, e = MI.getNumOperands(); i != e; i += 2) {
3282 // Skip those Operands which are undef regs or not regs.
3283 if (!MI.getOperand(i).isReg() || !MI.getOperand(i).readsReg())
3284 continue;
3285
3286 // Get register and predecessor for one PHI edge.
3287 Register Reg = MI.getOperand(i).getReg();
3288 const MachineBasicBlock *Pred = MI.getOperand(i + 1).getMBB();
3289
3290 BBInfo &PInfo = MBBInfoMap[Pred];
3291 if (PInfo.addRequired(Reg))
3292 todo.insert(Pred);
3293 }
3294 }
3295 }
3296
3297 // Iteratively push vregsRequired to predecessors. This will converge to the
3298 // same final state regardless of DenseSet iteration order.
3299 while (!todo.empty()) {
3300 const MachineBasicBlock *MBB = *todo.begin();
3301 todo.erase(MBB);
3302 BBInfo &MInfo = MBBInfoMap[MBB];
3303 for (const MachineBasicBlock *Pred : MBB->predecessors()) {
3304 if (Pred == MBB)
3305 continue;
3306 BBInfo &SInfo = MBBInfoMap[Pred];
3307 if (SInfo.addRequired(MInfo.vregsRequired))
3308 todo.insert(Pred);
3309 }
3310 }
3311}
3312
3313// Check PHI instructions at the beginning of MBB. It is assumed that
3314// calcRegsPassed has been run so BBInfo::isLiveOut is valid.
3315void MachineVerifier::checkPHIOps(const MachineBasicBlock &MBB) {
3316 BBInfo &MInfo = MBBInfoMap[&MBB];
3317
3319 for (const MachineInstr &Phi : MBB) {
3320 if (!Phi.isPHI())
3321 break;
3322 seen.clear();
3323
3324 const MachineOperand &MODef = Phi.getOperand(0);
3325 if (!MODef.isReg() || !MODef.isDef()) {
3326 report("Expected first PHI operand to be a register def", &MODef, 0);
3327 continue;
3328 }
3329 if (MODef.isTied() || MODef.isImplicit() || MODef.isInternalRead() ||
3330 MODef.isEarlyClobber() || MODef.isDebug())
3331 report("Unexpected flag on PHI operand", &MODef, 0);
3332 Register DefReg = MODef.getReg();
3333 if (!DefReg.isVirtual())
3334 report("Expected first PHI operand to be a virtual register", &MODef, 0);
3335
3336 for (unsigned I = 1, E = Phi.getNumOperands(); I != E; I += 2) {
3337 const MachineOperand &MO0 = Phi.getOperand(I);
3338 if (!MO0.isReg()) {
3339 report("Expected PHI operand to be a register", &MO0, I);
3340 continue;
3341 }
3342 if (MO0.isImplicit() || MO0.isInternalRead() || MO0.isEarlyClobber() ||
3343 MO0.isDebug() || MO0.isTied())
3344 report("Unexpected flag on PHI operand", &MO0, I);
3345
3346 const MachineOperand &MO1 = Phi.getOperand(I + 1);
3347 if (!MO1.isMBB()) {
3348 report("Expected PHI operand to be a basic block", &MO1, I + 1);
3349 continue;
3350 }
3351
3352 const MachineBasicBlock &Pre = *MO1.getMBB();
3353 if (!Pre.isSuccessor(&MBB)) {
3354 report("PHI input is not a predecessor block", &MO1, I + 1);
3355 continue;
3356 }
3357
3358 if (MInfo.reachable) {
3359 seen.insert(&Pre);
3360 BBInfo &PrInfo = MBBInfoMap[&Pre];
3361 if (!MO0.isUndef() && PrInfo.reachable &&
3362 !PrInfo.isLiveOut(MO0.getReg()))
3363 report("PHI operand is not live-out from predecessor", &MO0, I);
3364 }
3365 }
3366
3367 // Did we see all predecessors?
3368 if (MInfo.reachable) {
3369 for (MachineBasicBlock *Pred : MBB.predecessors()) {
3370 if (!seen.count(Pred)) {
3371 report("Missing PHI operand", &Phi);
3372 OS << printMBBReference(*Pred)
3373 << " is a predecessor according to the CFG.\n";
3374 }
3375 }
3376 }
3377 }
3378}
3379
3380static void
3382 std::function<void(const Twine &Message)> FailureCB,
3383 raw_ostream &OS) {
3385 CV.initialize(&OS, FailureCB, MF);
3386
3387 for (const auto &MBB : MF) {
3388 CV.visit(MBB);
3389 for (const auto &MI : MBB.instrs())
3390 CV.visit(MI);
3391 }
3392
3393 if (CV.sawTokens()) {
3394 DT.recalculate(const_cast<MachineFunction &>(MF));
3395 CV.verify(DT);
3396 }
3397}
3398
3399void MachineVerifier::visitMachineFunctionAfter() {
3400 auto FailureCB = [this](const Twine &Message) {
3401 report(Message.str().c_str(), MF);
3402 };
3403 verifyConvergenceControl(*MF, DT, FailureCB, OS);
3404
3405 calcRegsPassed();
3406
3407 for (const MachineBasicBlock &MBB : *MF)
3408 checkPHIOps(MBB);
3409
3410 // Now check liveness info if available
3411 calcRegsRequired();
3412
3413 // Check for killed virtual registers that should be live out.
3414 for (const auto &MBB : *MF) {
3415 BBInfo &MInfo = MBBInfoMap[&MBB];
3416 for (Register VReg : MInfo.vregsRequired)
3417 if (MInfo.regsKilled.count(VReg)) {
3418 report("Virtual register killed in block, but needed live out.", &MBB);
3419 OS << "Virtual register " << printReg(VReg)
3420 << " is used after the block.\n";
3421 }
3422 }
3423
3424 if (!MF->empty()) {
3425 BBInfo &MInfo = MBBInfoMap[&MF->front()];
3426 for (Register VReg : MInfo.vregsRequired) {
3427 report("Virtual register defs don't dominate all uses.", MF);
3428 report_context_vreg(VReg);
3429 }
3430 }
3431
3432 if (LiveVars)
3433 verifyLiveVariables();
3434 if (LiveInts)
3435 verifyLiveIntervals();
3436
3437 // Check live-in list of each MBB. If a register is live into MBB, check
3438 // that the register is in regsLiveOut of each predecessor block. Since
3439 // this must come from a definition in the predecesssor or its live-in
3440 // list, this will catch a live-through case where the predecessor does not
3441 // have the register in its live-in list. This currently only checks
3442 // registers that have no aliases, are not allocatable and are not
3443 // reserved, which could mean a condition code register for instance.
3444 if (MRI->tracksLiveness())
3445 for (const auto &MBB : *MF)
3447 MCPhysReg LiveInReg = P.PhysReg;
3448 bool hasAliases = MCRegAliasIterator(LiveInReg, TRI, false).isValid();
3449 if (hasAliases || isAllocatable(LiveInReg) || isReserved(LiveInReg))
3450 continue;
3451 for (const MachineBasicBlock *Pred : MBB.predecessors()) {
3452 BBInfo &PInfo = MBBInfoMap[Pred];
3453 if (!PInfo.regsLiveOut.count(LiveInReg)) {
3454 report("Live in register not found to be live out from predecessor.",
3455 &MBB);
3456 OS << TRI->getName(LiveInReg) << " not found to be live out from "
3457 << printMBBReference(*Pred) << '\n';
3458 }
3459 }
3460 }
3461
3462 for (auto CSInfo : MF->getCallSitesInfo())
3463 if (!CSInfo.first->isCall())
3464 report("Call site info referencing instruction that is not call", MF);
3465
3466 // If there's debug-info, check that we don't have any duplicate value
3467 // tracking numbers.
3468 if (MF->getFunction().getSubprogram()) {
3469 DenseSet<unsigned> SeenNumbers;
3470 for (const auto &MBB : *MF) {
3471 for (const auto &MI : MBB) {
3472 if (auto Num = MI.peekDebugInstrNum()) {
3473 auto Result = SeenNumbers.insert((unsigned)Num);
3474 if (!Result.second)
3475 report("Instruction has a duplicated value tracking number", &MI);
3476 }
3477 }
3478 }
3479 }
3480}
3481
3482void MachineVerifier::verifyLiveVariables() {
3483 assert(LiveVars && "Don't call verifyLiveVariables without LiveVars");
3484 for (unsigned I = 0, E = MRI->getNumVirtRegs(); I != E; ++I) {
3486 LiveVariables::VarInfo &VI = LiveVars->getVarInfo(Reg);
3487 for (const auto &MBB : *MF) {
3488 BBInfo &MInfo = MBBInfoMap[&MBB];
3489
3490 // Our vregsRequired should be identical to LiveVariables' AliveBlocks
3491 if (MInfo.vregsRequired.count(Reg)) {
3492 if (!VI.AliveBlocks.test(MBB.getNumber())) {
3493 report("LiveVariables: Block missing from AliveBlocks", &MBB);
3494 OS << "Virtual register " << printReg(Reg)
3495 << " must be live through the block.\n";
3496 }
3497 } else {
3498 if (VI.AliveBlocks.test(MBB.getNumber())) {
3499 report("LiveVariables: Block should not be in AliveBlocks", &MBB);
3500 OS << "Virtual register " << printReg(Reg)
3501 << " is not needed live through the block.\n";
3502 }
3503 }
3504 }
3505 }
3506}
3507
3508void MachineVerifier::verifyLiveIntervals() {
3509 assert(LiveInts && "Don't call verifyLiveIntervals without LiveInts");
3510 for (unsigned I = 0, E = MRI->getNumVirtRegs(); I != E; ++I) {
3512
3513 // Spilling and splitting may leave unused registers around. Skip them.
3514 if (MRI->reg_nodbg_empty(Reg))
3515 continue;
3516
3517 if (!LiveInts->hasInterval(Reg)) {
3518 report("Missing live interval for virtual register", MF);
3519 OS << printReg(Reg, TRI) << " still has defs or uses\n";
3520 continue;
3521 }
3522
3523 const LiveInterval &LI = LiveInts->getInterval(Reg);
3524 assert(Reg == LI.reg() && "Invalid reg to interval mapping");
3525 verifyLiveInterval(LI);
3526 }
3527
3528 // Verify all the cached regunit intervals.
3529 for (unsigned i = 0, e = TRI->getNumRegUnits(); i != e; ++i)
3530 if (const LiveRange *LR = LiveInts->getCachedRegUnit(i))
3531 verifyLiveRange(*LR, i);
3532}
3533
3534void MachineVerifier::verifyLiveRangeValue(const LiveRange &LR,
3535 const VNInfo *VNI, Register Reg,
3536 LaneBitmask LaneMask) {
3537 if (VNI->isUnused())
3538 return;
3539
3540 const VNInfo *DefVNI = LR.getVNInfoAt(VNI->def);
3541
3542 if (!DefVNI) {
3543 report("Value not live at VNInfo def and not marked unused", MF);
3544 report_context(LR, Reg, LaneMask);
3545 report_context(*VNI);
3546 return;
3547 }
3548
3549 if (DefVNI != VNI) {
3550 report("Live segment at def has different VNInfo", MF);
3551 report_context(LR, Reg, LaneMask);
3552 report_context(*VNI);
3553 return;
3554 }
3555
3556 const MachineBasicBlock *MBB = LiveInts->getMBBFromIndex(VNI->def);
3557 if (!MBB) {
3558 report("Invalid VNInfo definition index", MF);
3559 report_context(LR, Reg, LaneMask);
3560 report_context(*VNI);
3561 return;
3562 }
3563
3564 if (VNI->isPHIDef()) {
3565 if (VNI->def != LiveInts->getMBBStartIdx(MBB)) {
3566 report("PHIDef VNInfo is not defined at MBB start", MBB);
3567 report_context(LR, Reg, LaneMask);
3568 report_context(*VNI);
3569 }
3570 return;
3571 }
3572
3573 // Non-PHI def.
3574 const MachineInstr *MI = LiveInts->getInstructionFromIndex(VNI->def);
3575 if (!MI) {
3576 report("No instruction at VNInfo def index", MBB);
3577 report_context(LR, Reg, LaneMask);
3578 report_context(*VNI);
3579 return;
3580 }
3581
3582 if (Reg != 0) {
3583 bool hasDef = false;
3584 bool isEarlyClobber = false;
3585 for (ConstMIBundleOperands MOI(*MI); MOI.isValid(); ++MOI) {
3586 if (!MOI->isReg() || !MOI->isDef())
3587 continue;
3588 if (Reg.isVirtual()) {
3589 if (MOI->getReg() != Reg)
3590 continue;
3591 } else {
3592 if (!MOI->getReg().isPhysical() || !TRI->hasRegUnit(MOI->getReg(), Reg))
3593 continue;
3594 }
3595 if (LaneMask.any() &&
3596 (TRI->getSubRegIndexLaneMask(MOI->getSubReg()) & LaneMask).none())
3597 continue;
3598 hasDef = true;
3599 if (MOI->isEarlyClobber())
3600 isEarlyClobber = true;
3601 }
3602
3603 if (!hasDef) {
3604 report("Defining instruction does not modify register", MI);
3605 report_context(LR, Reg, LaneMask);
3606 report_context(*VNI);
3607 }
3608
3609 // Early clobber defs begin at USE slots, but other defs must begin at
3610 // DEF slots.
3611 if (isEarlyClobber) {
3612 if (!VNI->def.isEarlyClobber()) {
3613 report("Early clobber def must be at an early-clobber slot", MBB);
3614 report_context(LR, Reg, LaneMask);
3615 report_context(*VNI);
3616 }
3617 } else if (!VNI->def.isRegister()) {
3618 report("Non-PHI, non-early clobber def must be at a register slot", MBB);
3619 report_context(LR, Reg, LaneMask);
3620 report_context(*VNI);
3621 }
3622 }
3623}
3624
3625void MachineVerifier::verifyLiveRangeSegment(const LiveRange &LR,
3627 Register Reg,
3628 LaneBitmask LaneMask) {
3629 const LiveRange::Segment &S = *I;
3630 const VNInfo *VNI = S.valno;
3631 assert(VNI && "Live segment has no valno");
3632
3633 if (VNI->id >= LR.getNumValNums() || VNI != LR.getValNumInfo(VNI->id)) {
3634 report("Foreign valno in live segment", MF);
3635 report_context(LR, Reg, LaneMask);
3636 report_context(S);
3637 report_context(*VNI);
3638 }
3639
3640 if (VNI->isUnused()) {
3641 report("Live segment valno is marked unused", MF);
3642 report_context(LR, Reg, LaneMask);
3643 report_context(S);
3644 }
3645
3646 const MachineBasicBlock *MBB = LiveInts->getMBBFromIndex(S.start);
3647 if (!MBB) {
3648 report("Bad start of live segment, no basic block", MF);
3649 report_context(LR, Reg, LaneMask);
3650 report_context(S);
3651 return;
3652 }
3653 SlotIndex MBBStartIdx = LiveInts->getMBBStartIdx(MBB);
3654 if (S.start != MBBStartIdx && S.start != VNI->def) {
3655 report("Live segment must begin at MBB entry or valno def", MBB);
3656 report_context(LR, Reg, LaneMask);
3657 report_context(S);
3658 }
3659
3660 const MachineBasicBlock *EndMBB =
3661 LiveInts->getMBBFromIndex(S.end.getPrevSlot());
3662 if (!EndMBB) {
3663 report("Bad end of live segment, no basic block", MF);
3664 report_context(LR, Reg, LaneMask);
3665 report_context(S);
3666 return;
3667 }
3668
3669 // Checks for non-live-out segments.
3670 if (S.end != LiveInts->getMBBEndIdx(EndMBB)) {
3671 // RegUnit intervals are allowed dead phis.
3672 if (!Reg.isVirtual() && VNI->isPHIDef() && S.start == VNI->def &&
3673 S.end == VNI->def.getDeadSlot())
3674 return;
3675
3676 // The live segment is ending inside EndMBB
3677 const MachineInstr *MI =
3678 LiveInts->getInstructionFromIndex(S.end.getPrevSlot());
3679 if (!MI) {
3680 report("Live segment doesn't end at a valid instruction", EndMBB);
3681 report_context(LR, Reg, LaneMask);
3682 report_context(S);
3683 return;
3684 }
3685
3686 // The block slot must refer to a basic block boundary.
3687 if (S.end.isBlock()) {
3688 report("Live segment ends at B slot of an instruction", EndMBB);
3689 report_context(LR, Reg, LaneMask);
3690 report_context(S);
3691 }
3692
3693 if (S.end.isDead()) {
3694 // Segment ends on the dead slot.
3695 // That means there must be a dead def.
3696 if (!SlotIndex::isSameInstr(S.start, S.end)) {
3697 report("Live segment ending at dead slot spans instructions", EndMBB);
3698 report_context(LR, Reg, LaneMask);
3699 report_context(S);
3700 }
3701 }
3702
3703 // After tied operands are rewritten, a live segment can only end at an
3704 // early-clobber slot if it is being redefined by an early-clobber def.
3705 // TODO: Before tied operands are rewritten, a live segment can only end at
3706 // an early-clobber slot if the last use is tied to an early-clobber def.
3707 if (MF->getProperties().hasProperty(
3709 S.end.isEarlyClobber()) {
3710 if (I + 1 == LR.end() || (I + 1)->start != S.end) {
3711 report("Live segment ending at early clobber slot must be "
3712 "redefined by an EC def in the same instruction",
3713 EndMBB);
3714 report_context(LR, Reg, LaneMask);
3715 report_context(S);
3716 }
3717 }
3718
3719 // The following checks only apply to virtual registers. Physreg liveness
3720 // is too weird to check.
3721 if (Reg.isVirtual()) {
3722 // A live segment can end with either a redefinition, a kill flag on a
3723 // use, or a dead flag on a def.
3724 bool hasRead = false;
3725 bool hasSubRegDef = false;
3726 bool hasDeadDef = false;
3727 for (ConstMIBundleOperands MOI(*MI); MOI.isValid(); ++MOI) {
3728 if (!MOI->isReg() || MOI->getReg() != Reg)
3729 continue;
3730 unsigned Sub = MOI->getSubReg();
3731 LaneBitmask SLM =
3732 Sub != 0 ? TRI->getSubRegIndexLaneMask(Sub) : LaneBitmask::getAll();
3733 if (MOI->isDef()) {
3734 if (Sub != 0) {
3735 hasSubRegDef = true;
3736 // An operand %0:sub0 reads %0:sub1..n. Invert the lane
3737 // mask for subregister defs. Read-undef defs will be handled by
3738 // readsReg below.
3739 SLM = ~SLM;
3740 }
3741 if (MOI->isDead())
3742 hasDeadDef = true;
3743 }
3744 if (LaneMask.any() && (LaneMask & SLM).none())
3745 continue;
3746 if (MOI->readsReg())
3747 hasRead = true;
3748 }
3749 if (S.end.isDead()) {
3750 // Make sure that the corresponding machine operand for a "dead" live
3751 // range has the dead flag. We cannot perform this check for subregister
3752 // liveranges as partially dead values are allowed.
3753 if (LaneMask.none() && !hasDeadDef) {
3754 report(
3755 "Instruction ending live segment on dead slot has no dead flag",
3756 MI);
3757 report_context(LR, Reg, LaneMask);
3758 report_context(S);
3759 }
3760 } else {
3761 if (!hasRead) {
3762 // When tracking subregister liveness, the main range must start new
3763 // values on partial register writes, even if there is no read.
3764 if (!MRI->shouldTrackSubRegLiveness(Reg) || LaneMask.any() ||
3765 !hasSubRegDef) {
3766 report("Instruction ending live segment doesn't read the register",
3767 MI);
3768 report_context(LR, Reg, LaneMask);
3769 report_context(S);
3770 }
3771 }
3772 }
3773 }
3774 }
3775
3776 // Now check all the basic blocks in this live segment.
3778 // Is this live segment the beginning of a non-PHIDef VN?
3779 if (S.start == VNI->def && !VNI->isPHIDef()) {
3780 // Not live-in to any blocks.
3781 if (MBB == EndMBB)
3782 return;
3783 // Skip this block.
3784 ++MFI;
3785 }
3786
3788 if (LaneMask.any()) {
3789 LiveInterval &OwnerLI = LiveInts->getInterval(Reg);
3790 OwnerLI.computeSubRangeUndefs(Undefs, LaneMask, *MRI, *Indexes);
3791 }
3792
3793 while (true) {
3794 assert(LiveInts->isLiveInToMBB(LR, &*MFI));
3795 // We don't know how to track physregs into a landing pad.
3796 if (!Reg.isVirtual() && MFI->isEHPad()) {
3797 if (&*MFI == EndMBB)
3798 break;
3799 ++MFI;
3800 continue;
3801 }
3802
3803 // Is VNI a PHI-def in the current block?
3804 bool IsPHI = VNI->isPHIDef() &&
3805 VNI->def == LiveInts->getMBBStartIdx(&*MFI);
3806
3807 // Check that VNI is live-out of all predecessors.
3808 for (const MachineBasicBlock *Pred : MFI->predecessors()) {
3809 SlotIndex PEnd = LiveInts->getMBBEndIdx(Pred);
3810 // Predecessor of landing pad live-out on last call.
3811 if (MFI->isEHPad()) {
3812 for (const MachineInstr &MI : llvm::reverse(*Pred)) {
3813 if (MI.isCall()) {
3814 PEnd = Indexes->getInstructionIndex(MI).getBoundaryIndex();
3815 break;
3816 }
3817 }
3818 }
3819 const VNInfo *PVNI = LR.getVNInfoBefore(PEnd);
3820
3821 // All predecessors must have a live-out value. However for a phi
3822 // instruction with subregister intervals
3823 // only one of the subregisters (not necessarily the current one) needs to
3824 // be defined.
3825 if (!PVNI && (LaneMask.none() || !IsPHI)) {
3826 if (LiveRangeCalc::isJointlyDominated(Pred, Undefs, *Indexes))
3827 continue;
3828 report("Register not marked live out of predecessor", Pred);
3829 report_context(LR, Reg, LaneMask);
3830 report_context(*VNI);
3831 OS << " live into " << printMBBReference(*MFI) << '@'
3832 << LiveInts->getMBBStartIdx(&*MFI) << ", not live before " << PEnd
3833 << '\n';
3834 continue;
3835 }
3836
3837 // Only PHI-defs can take different predecessor values.
3838 if (!IsPHI && PVNI != VNI) {
3839 report("Different value live out of predecessor", Pred);
3840 report_context(LR, Reg, LaneMask);
3841 OS << "Valno #" << PVNI->id << " live out of "
3842 << printMBBReference(*Pred) << '@' << PEnd << "\nValno #" << VNI->id
3843 << " live into " << printMBBReference(*MFI) << '@'
3844 << LiveInts->getMBBStartIdx(&*MFI) << '\n';
3845 }
3846 }
3847 if (&*MFI == EndMBB)
3848 break;
3849 ++MFI;
3850 }
3851}
3852
3853void MachineVerifier::verifyLiveRange(const LiveRange &LR, Register Reg,
3854 LaneBitmask LaneMask) {
3855 for (const VNInfo *VNI : LR.valnos)
3856 verifyLiveRangeValue(LR, VNI, Reg, LaneMask);
3857
3858 for (LiveRange::const_iterator I = LR.begin(), E = LR.end(); I != E; ++I)
3859 verifyLiveRangeSegment(LR, I, Reg, LaneMask);
3860}
3861
3862void MachineVerifier::verifyLiveInterval(const LiveInterval &LI) {
3863 Register Reg = LI.reg();
3864 assert(Reg.isVirtual());
3865 verifyLiveRange(LI, Reg);
3866
3867 if (LI.hasSubRanges()) {
3869 LaneBitmask MaxMask = MRI->getMaxLaneMaskForVReg(Reg);
3870 for (const LiveInterval::SubRange &SR : LI.subranges()) {
3871 if ((Mask & SR.LaneMask).any()) {
3872 report("Lane masks of sub ranges overlap in live interval", MF);
3873 report_context(LI);
3874 }
3875 if ((SR.LaneMask & ~MaxMask).any()) {
3876 report("Subrange lanemask is invalid", MF);
3877 report_context(LI);
3878 }
3879 if (SR.empty()) {
3880 report("Subrange must not be empty", MF);
3881 report_context(SR, LI.reg(), SR.LaneMask);
3882 }
3883 Mask |= SR.LaneMask;
3884 verifyLiveRange(SR, LI.reg(), SR.LaneMask);
3885 if (!LI.covers(SR)) {
3886 report("A Subrange is not covered by the main range", MF);
3887 report_context(LI);
3888 }
3889 }
3890 }
3891
3892 // Check the LI only has one connected component.
3893 ConnectedVNInfoEqClasses ConEQ(*LiveInts);
3894 unsigned NumComp = ConEQ.Classify(LI);
3895 if (NumComp > 1) {
3896 report("Multiple connected components in live interval", MF);
3897 report_context(LI);
3898 for (unsigned comp = 0; comp != NumComp; ++comp) {
3899 OS << comp << ": valnos";
3900 for (const VNInfo *I : LI.valnos)
3901 if (comp == ConEQ.getEqClass(I))
3902 OS << ' ' << I->id;
3903 OS << '\n';
3904 }
3905 }
3906}
3907
3908namespace {
3909
3910 // FrameSetup and FrameDestroy can have zero adjustment, so using a single
3911 // integer, we can't tell whether it is a FrameSetup or FrameDestroy if the
3912 // value is zero.
3913 // We use a bool plus an integer to capture the stack state.
3914struct StackStateOfBB {
3915 StackStateOfBB() = default;
3916 StackStateOfBB(int EntryVal, int ExitVal, bool EntrySetup, bool ExitSetup)
3917 : EntryValue(EntryVal), ExitValue(ExitVal), EntryIsSetup(EntrySetup),
3918 ExitIsSetup(ExitSetup) {}
3919
3920 // Can be negative, which means we are setting up a frame.
3921 int EntryValue = 0;
3922 int ExitValue = 0;
3923 bool EntryIsSetup = false;
3924 bool ExitIsSetup = false;
3925};
3926
3927} // end anonymous namespace
3928
3929/// Make sure on every path through the CFG, a FrameSetup <n> is always followed
3930/// by a FrameDestroy <n>, stack adjustments are identical on all
3931/// CFG edges to a merge point, and frame is destroyed at end of a return block.
3932void MachineVerifier::verifyStackFrame() {
3933 unsigned FrameSetupOpcode = TII->getCallFrameSetupOpcode();
3934 unsigned FrameDestroyOpcode = TII->getCallFrameDestroyOpcode();
3935 if (FrameSetupOpcode == ~0u && FrameDestroyOpcode == ~0u)
3936 return;
3937
3939 SPState.resize(MF->getNumBlockIDs());
3941
3942 // Visit the MBBs in DFS order.
3943 for (df_ext_iterator<const MachineFunction *,
3945 DFI = df_ext_begin(MF, Reachable), DFE = df_ext_end(MF, Reachable);
3946 DFI != DFE; ++DFI) {
3947 const MachineBasicBlock *MBB = *DFI;
3948
3949 StackStateOfBB BBState;
3950 // Check the exit state of the DFS stack predecessor.
3951 if (DFI.getPathLength() >= 2) {
3952 const MachineBasicBlock *StackPred = DFI.getPath(DFI.getPathLength() - 2);
3953 assert(Reachable.count(StackPred) &&
3954 "DFS stack predecessor is already visited.\n");
3955 BBState.EntryValue = SPState[StackPred->getNumber()].ExitValue;
3956 BBState.EntryIsSetup = SPState[StackPred->getNumber()].ExitIsSetup;
3957 BBState.ExitValue = BBState.EntryValue;
3958 BBState.ExitIsSetup = BBState.EntryIsSetup;
3959 }
3960
3961 if ((int)MBB->getCallFrameSize() != -BBState.EntryValue) {
3962 report("Call frame size on entry does not match value computed from "
3963 "predecessor",
3964 MBB);
3965 OS << "Call frame size on entry " << MBB->getCallFrameSize()
3966 << " does not match value computed from predecessor "
3967 << -BBState.EntryValue << '\n';
3968 }
3969
3970 // Update stack state by checking contents of MBB.
3971 for (const auto &I : *MBB) {
3972 if (I.getOpcode() == FrameSetupOpcode) {
3973 if (BBState.ExitIsSetup)
3974 report("FrameSetup is after another FrameSetup", &I);
3975 if (!MRI->isSSA() && !MF->getFrameInfo().adjustsStack())
3976 report("AdjustsStack not set in presence of a frame pseudo "
3977 "instruction.", &I);
3978 BBState.ExitValue -= TII->getFrameTotalSize(I);
3979 BBState.ExitIsSetup = true;
3980 }
3981
3982 if (I.getOpcode() == FrameDestroyOpcode) {
3983 int Size = TII->getFrameTotalSize(I);
3984 if (!BBState.ExitIsSetup)
3985 report("FrameDestroy is not after a FrameSetup", &I);
3986 int AbsSPAdj = BBState.ExitValue < 0 ? -BBState.ExitValue :
3987 BBState.ExitValue;
3988 if (BBState.ExitIsSetup && AbsSPAdj != Size) {
3989 report("FrameDestroy <n> is after FrameSetup <m>", &I);
3990 OS << "FrameDestroy <" << Size << "> is after FrameSetup <"
3991 << AbsSPAdj << ">.\n";
3992 }
3993 if (!MRI->isSSA() && !MF->getFrameInfo().adjustsStack())
3994 report("AdjustsStack not set in presence of a frame pseudo "
3995 "instruction.", &I);
3996 BBState.ExitValue += Size;
3997 BBState.ExitIsSetup = false;
3998 }
3999 }
4000 SPState[MBB->getNumber()] = BBState;
4001
4002 // Make sure the exit state of any predecessor is consistent with the entry
4003 // state.
4004 for (const MachineBasicBlock *Pred : MBB->predecessors()) {
4005 if (Reachable.count(Pred) &&
4006 (SPState[Pred->getNumber()].ExitValue != BBState.EntryValue ||
4007 SPState[Pred->getNumber()].ExitIsSetup != BBState.EntryIsSetup)) {
4008 report("The exit stack state of a predecessor is inconsistent.", MBB);
4009 OS << "Predecessor " << printMBBReference(*Pred) << " has exit state ("
4010 << SPState[Pred->getNumber()].ExitValue << ", "
4011 << SPState[Pred->getNumber()].ExitIsSetup << "), while "
4012 << printMBBReference(*MBB) << " has entry state ("
4013 << BBState.EntryValue << ", " << BBState.EntryIsSetup << ").\n";
4014 }
4015 }
4016
4017 // Make sure the entry state of any successor is consistent with the exit
4018 // state.
4019 for (const MachineBasicBlock *Succ : MBB->successors()) {
4020 if (Reachable.count(Succ) &&
4021 (SPState[Succ->getNumber()].EntryValue != BBState.ExitValue ||
4022 SPState[Succ->getNumber()].EntryIsSetup != BBState.ExitIsSetup)) {
4023 report("The entry stack state of a successor is inconsistent.", MBB);
4024 OS << "Successor " << printMBBReference(*Succ) << " has entry state ("
4025 << SPState[Succ->getNumber()].EntryValue << ", "
4026 << SPState[Succ->getNumber()].EntryIsSetup << "), while "
4027 << printMBBReference(*MBB) << " has exit state ("
4028 << BBState.ExitValue << ", " << BBState.ExitIsSetup << ").\n";
4029 }
4030 }
4031
4032 // Make sure a basic block with return ends with zero stack adjustment.
4033 if (!MBB->empty() && MBB->back().isReturn()) {
4034 if (BBState.ExitIsSetup)
4035 report("A return block ends with a FrameSetup.", MBB);
4036 if (BBState.ExitValue)
4037 report("A return block ends with a nonzero stack adjustment.", MBB);
4038 }
4039 }
4040}
unsigned SubReg
unsigned const MachineRegisterInfo * MRI
aarch64 promote const
static bool isLoad(int Opcode)
static bool isStore(int Opcode)
MachineBasicBlock & MBB
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
MachineBasicBlock MachineBasicBlock::iterator MBBI
This file implements the BitVector class.
Analysis containing CSE Info
Definition: CSEInfo.cpp:27
This file contains the declarations for the subclasses of Constant, which represent the different fla...
Returns the sub type a function will return at a given Idx Should correspond to the result type of an ExtractValue instruction executed with just that one unsigned Idx
This file defines the DenseMap class.
This file defines the DenseSet and SmallDenseSet classes.
This file builds on the ADT/GraphTraits.h file to build generic depth first graph iterator.
std::string Name
uint32_t Index
uint64_t Size
bool End
Definition: ELF_riscv.cpp:480
Declares convenience wrapper classes for interpreting MachineInstr instances as specific generic oper...
global merge Global merge function pass
const HexagonInstrInfo * TII
hexagon widen Hexagon Store false hexagon widen loads
hexagon widen stores
IRTranslator LLVM IR MI
A common definition of LaneBitmask for use in TableGen and CodeGen.
Implement a low-level type suitable for MachineInstr level instruction selection.
#define F(x, y, z)
Definition: MD5.cpp:55
#define I(x, y, z)
Definition: MD5.cpp:58
This file declares the MIR specialization of the GenericConvergenceVerifier template.
unsigned const TargetRegisterInfo * TRI
unsigned Reg
static void verifyConvergenceControl(const MachineFunction &MF, MachineDominatorTree &DT, std::function< void(const Twine &Message)> FailureCB, raw_ostream &OS)
#define P(N)
ppc ctr loops verify
#define INITIALIZE_PASS(passName, arg, name, cfg, analysis)
Definition: PassSupport.h:38
This file builds on the ADT/GraphTraits.h file to build a generic graph post order iterator.
const SmallVectorImpl< MachineOperand > MachineBasicBlock * TBB
const SmallVectorImpl< MachineOperand > & Cond
static bool isValid(const char C)
Returns true if C is a valid mangled character: <0-9a-zA-Z_>.
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
static bool isLiveOut(const MachineBasicBlock &MBB, unsigned Reg)
This file contains some templates that are useful if you are working with the STL at all.
raw_pwrite_stream & OS
This file defines generic set operations that may be used on set's of different types,...
This file defines the SmallPtrSet class.
This file defines the SmallVector class.
This file describes how to lower LLVM code to machine code.
static unsigned getSize(unsigned Kind)
const fltSemantics & getSemantics() const
Definition: APFloat.h:1448
A container for analyses that lazily runs them and caches their results.
Definition: PassManager.h:253
PassT::Result * getCachedResult(IRUnitT &IR) const
Get the cached result of an analysis pass for a given IR unit.
Definition: PassManager.h:429
Represent the analysis usage information of a pass.
AnalysisUsage & addUsedIfAvailable()
Add the specified Pass class to the set of analyses used by this pass.
void setPreservesAll()
Set by analyses that do not transform their input at all.
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
Definition: ArrayRef.h:41
size_t size() const
size - Get the array size.
Definition: ArrayRef.h:168
LLVM Basic Block Representation.
Definition: BasicBlock.h:61
bool hasAddressTaken() const
Returns true if there are any uses of this basic block other than direct branches,...
Definition: BasicBlock.h:658
const Instruction * getTerminator() const LLVM_READONLY
Returns the terminator instruction if the block is well formed or null if the block is not well forme...
Definition: BasicBlock.h:239
bool test(unsigned Idx) const
Definition: BitVector.h:461
void clear()
clear - Removes all bits from the bitvector.
Definition: BitVector.h:335
iterator_range< const_set_bits_iterator > set_bits() const
Definition: BitVector.h:140
size_type size() const
size - Returns the number of bits in this bitvector.
Definition: BitVector.h:159
ConnectedVNInfoEqClasses - Helper class that can divide VNInfos in a LiveInterval into equivalence cl...
ConstMIBundleOperands - Iterate over all operands in a const bundle of machine instructions.
ConstantFP - Floating Point Values [float, double].
Definition: Constants.h:271
const APFloat & getValueAPF() const
Definition: Constants.h:314
This is the shared class of boolean and integer constants.
Definition: Constants.h:83
unsigned getBitWidth() const
getBitWidth - Return the scalar bitwidth of this constant.
Definition: Constants.h:151
This class represents an Operation in the Expression.
A parsed version of the target data layout string in and methods for querying it.
Definition: DataLayout.h:63
std::pair< iterator, bool > insert(const std::pair< KeyT, ValueT > &KV)
Definition: DenseMap.h:211
Implements a dense probed hash-table based set.
Definition: DenseSet.h:278
void recalculate(ParentType &Func)
recalculate - compute a dominator tree for the given function
Register getReg() const
Base class for user error types.
Definition: Error.h:355
A specialized PseudoSourceValue for holding FixedStack values, which must include a frame index.
FunctionPass class - This class is used to implement most global optimizations.
Definition: Pass.h:310
void initialize(raw_ostream *OS, function_ref< void(const Twine &Message)> FailureCB, const FunctionT &F)
bool isPredicated(const MachineInstr &MI) const override
Returns true if the instruction is already predicated.
bool analyzeBranch(MachineBasicBlock &MBB, MachineBasicBlock *&TBB, MachineBasicBlock *&FBB, SmallVectorImpl< MachineOperand > &Cond, bool AllowModify) const override
Analyze the branching code at the end of MBB, returning true if it cannot be understood (e....
constexpr bool isScalableVector() const
Returns true if the LLT is a scalable vector.
Definition: LowLevelType.h:181
constexpr unsigned getScalarSizeInBits() const
Definition: LowLevelType.h:264
constexpr bool isScalar() const
Definition: LowLevelType.h:146
constexpr bool isPointerVector() const
Definition: LowLevelType.h:152
constexpr bool isValid() const
Definition: LowLevelType.h:145
constexpr uint16_t getNumElements() const
Returns the number of elements in a vector LLT.
Definition: LowLevelType.h:159
constexpr bool isVector() const
Definition: LowLevelType.h:148
constexpr bool isScalable() const
Returns true if the LLT is a scalable vector.
Definition: LowLevelType.h:170
constexpr TypeSize getSizeInBits() const
Returns the total size of the type. Must only be called on sized types.
Definition: LowLevelType.h:190
constexpr bool isPointer() const
Definition: LowLevelType.h:149
constexpr LLT getElementType() const
Returns the vector's element type. Only valid for vector types.
Definition: LowLevelType.h:277
constexpr ElementCount getElementCount() const
Definition: LowLevelType.h:183
constexpr unsigned getAddressSpace() const
Definition: LowLevelType.h:270
constexpr bool isPointerOrPointerVector() const
Definition: LowLevelType.h:153
constexpr LLT getScalarType() const
Definition: LowLevelType.h:205
constexpr TypeSize getSizeInBytes() const
Returns the total size of the type in bytes, i.e.
Definition: LowLevelType.h:200
A live range for subregisters.
Definition: LiveInterval.h:694
LiveInterval - This class represents the liveness of a register, or stack slot.
Definition: LiveInterval.h:687
Register reg() const
Definition: LiveInterval.h:718
bool hasSubRanges() const
Returns true if subregister liveness information is available.
Definition: LiveInterval.h:810
iterator_range< subrange_iterator > subranges()
Definition: LiveInterval.h:782
void computeSubRangeUndefs(SmallVectorImpl< SlotIndex > &Undefs, LaneBitmask LaneMask, const MachineRegisterInfo &MRI, const SlotIndexes &Indexes) const
For a given lane mask LaneMask, compute indexes at which the lane is marked undefined by subregister ...
void print(raw_ostream &O, const Module *=nullptr) const override
Implement the dump method.
Result of a LiveRange query.
Definition: LiveInterval.h:90
bool isDeadDef() const
Return true if this instruction has a dead def.
Definition: LiveInterval.h:117
VNInfo * valueIn() const
Return the value that is live-in to the instruction.
Definition: LiveInterval.h:105
VNInfo * valueOut() const
Return the value leaving the instruction, if any.
Definition: LiveInterval.h:123
bool isKill() const
Return true if the live-in value is killed by this instruction.
Definition: LiveInterval.h:112
static LLVM_ATTRIBUTE_UNUSED bool isJointlyDominated(const MachineBasicBlock *MBB, ArrayRef< SlotIndex > Defs, const SlotIndexes &Indexes)
A diagnostic function to check if the end of the block MBB is jointly dominated by the blocks corresp...
This class represents the liveness of a register, stack slot, etc.
Definition: LiveInterval.h:157
VNInfo * getValNumInfo(unsigned ValNo)
getValNumInfo - Returns pointer to the specified val#.
Definition: LiveInterval.h:317
bool liveAt(SlotIndex index) const
Definition: LiveInterval.h:401
bool covers(const LiveRange &Other) const
Returns true if all segments of the Other live range are completely covered by this live range.
bool empty() const
Definition: LiveInterval.h:382
LiveQueryResult Query(SlotIndex Idx) const
Query Liveness at Idx.
Definition: LiveInterval.h:542
iterator end()
Definition: LiveInterval.h:216
VNInfo * getVNInfoBefore(SlotIndex Idx) const
getVNInfoBefore - Return the VNInfo that is live up to but not necessarily including Idx,...
Definition: LiveInterval.h:429
bool verify() const
Walk the range and assert if any invariants fail to hold.
unsigned getNumValNums() const
Definition: LiveInterval.h:313
iterator begin()
Definition: LiveInterval.h:215
VNInfoList valnos
Definition: LiveInterval.h:204
VNInfo * getVNInfoAt(SlotIndex Idx) const
getVNInfoAt - Return the VNInfo that is live at Idx, or NULL.
Definition: LiveInterval.h:421
VarInfo & getVarInfo(Register Reg)
getVarInfo - Return the VarInfo structure for the specified VIRTUAL register.
TypeSize getValue() const
This class is intended to be used as a base class for asm properties and features specific to the tar...
Definition: MCAsmInfo.h:56
ExceptionHandling getExceptionHandlingType() const
Definition: MCAsmInfo.h:740
Describe properties that are true of each instruction in the target description file.
Definition: MCInstrDesc.h:198
unsigned getNumOperands() const
Return the number of declared MachineOperands for this MachineInstruction.
Definition: MCInstrDesc.h:237
ArrayRef< MCOperandInfo > operands() const
Definition: MCInstrDesc.h:239
unsigned getNumDefs() const
Return the number of MachineOperands that are register definitions.
Definition: MCInstrDesc.h:248
bool isConvergent() const
Return true if this instruction is convergent.
Definition: MCInstrDesc.h:415
bool variadicOpsAreDefs() const
Return true if variadic operands of this instruction are definitions.
Definition: MCInstrDesc.h:418
int getOperandConstraint(unsigned OpNum, MCOI::OperandConstraint Constraint) const
Returns the value of the specified operand constraint if it is present.
Definition: MCInstrDesc.h:219
unsigned getOpcode() const
Return the opcode number for this descriptor.
Definition: MCInstrDesc.h:230
This holds information about one operand of a machine instruction, indicating the register class for ...
Definition: MCInstrDesc.h:85
bool isOptionalDef() const
Set if this operand is a optional def.
Definition: MCInstrDesc.h:113
uint8_t OperandType
Information about the type of the operand.
Definition: MCInstrDesc.h:97
MCRegAliasIterator enumerates all registers aliasing Reg.
bool isValid() const
isValid - Returns true until all the operands have been visited.
bool isInlineAsmBrIndirectTarget() const
Returns true if this is the indirect dest of an INLINEASM_BR.
unsigned pred_size() const
bool isEHPad() const
Returns true if the block is a landing pad.
iterator_range< livein_iterator > liveins() const
iterator_range< iterator > phis()
Returns a range that iterates over the phis in the basic block.
int getNumber() const
MachineBasicBlocks are uniquely numbered at the function level, unless they're not in a MachineFuncti...
const BasicBlock * getBasicBlock() const
Return the LLVM basic block that this instance corresponded to originally.
bool isIRBlockAddressTaken() const
Test whether this block is the target of an IR BlockAddress.
unsigned succ_size() const
BasicBlock * getAddressTakenIRBlock() const
Retrieves the BasicBlock which corresponds to this MachineBasicBlock.
bool isPredecessor(const MachineBasicBlock *MBB) const
Return true if the specified MBB is a predecessor of this block.
const MachineFunction * getParent() const
Return the MachineFunction containing this basic block.
unsigned getCallFrameSize() const
Return the call frame size on entry to this basic block.
iterator_range< succ_iterator > successors()
bool isSuccessor(const MachineBasicBlock *MBB) const
Return true if the specified MBB is a successor of this block.
iterator_range< pred_iterator > predecessors()
StringRef getName() const
Return the name of the corresponding LLVM basic block, or an empty string.
DominatorTree Class - Concrete subclass of DominatorTreeBase that is used to compute a normal dominat...
The MachineFrameInfo class represents an abstract stack frame until prolog/epilog code is inserted.
BitVector getPristineRegs(const MachineFunction &MF) const
Return a set of physical registers that are pristine.
MachineFunctionPass - This class adapts the FunctionPass interface to allow convenient creation of pa...
void getAnalysisUsage(AnalysisUsage &AU) const override
getAnalysisUsage - Subclasses that override getAnalysisUsage must call this.
virtual bool runOnMachineFunction(MachineFunction &MF)=0
runOnMachineFunction - This method must be overloaded to perform the desired machine code transformat...
bool hasProperty(Property P) const
const TargetSubtargetInfo & getSubtarget() const
getSubtarget - Return the subtarget for which this machine code is being compiled.
StringRef getName() const
getName - Return the name of the corresponding LLVM function.
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
bool verify(Pass *p=nullptr, const char *Banner=nullptr, raw_ostream *OS=nullptr, bool AbortOnError=true) const
Run the current MachineFunction through the machine code verifier, useful for debugger use.
const MachineFunctionProperties & getProperties() const
Get the function properties.
const MachineBasicBlock & front() const
void print(raw_ostream &OS, const SlotIndexes *=nullptr) const
print - Print out the MachineFunction in a format suitable for debugging to the specified stream.
const TargetMachine & getTarget() const
getTarget - Return the target machine this machine code is compiled with
BasicBlockListType::const_iterator const_iterator
Representation of each machine instruction.
Definition: MachineInstr.h:69
unsigned getOpcode() const
Returns the opcode of this MachineInstr.
Definition: MachineInstr.h:575
bool isReturn(QueryType Type=AnyInBundle) const
Definition: MachineInstr.h:946
bool isTerminator(QueryType Type=AnyInBundle) const
Returns true if this instruction part of the terminator for a basic block.
Definition: MachineInstr.h:980
bool isBarrier(QueryType Type=AnyInBundle) const
Returns true if the specified instruction stops control flow from executing the instruction immediate...
Definition: MachineInstr.h:971
A description of a memory reference used in the backend.
LocationSize getSize() const
Return the size in bytes of the memory reference.
const PseudoSourceValue * getPseudoValue() const
AtomicOrdering getSuccessOrdering() const
Return the atomic ordering requirements for this memory operation.
LocationSize getSizeInBits() const
Return the size in bits of the memory reference.
MachineOperand class - Representation of each machine instruction operand.
unsigned getSubReg() const
int64_t getImm() const
bool readsReg() const
readsReg - Returns true if this operand reads the previous value of its register.
bool isImplicit() const
bool isIntrinsicID() const
bool isReg() const
isReg - Tests if this is a MO_Register operand.
MachineBasicBlock * getMBB() const
ArrayRef< int > getShuffleMask() const
bool isImm() const
isImm - Tests if this is a MO_Immediate operand.
bool isValidExcessOperand() const
Return true if this operand can validly be appended to an arbitrary operand list.
bool isShuffleMask() const
unsigned getCFIIndex() const
bool isRenamable() const
isRenamable - Returns true if this register may be renamed, i.e.
MachineInstr * getParent()
getParent - Return the instruction that this operand belongs to.
MachineOperandType getType() const
getType - Returns the MachineOperandType for this operand.
bool isEarlyClobber() const
Register getReg() const
getReg - Returns the register number.
bool isInternalRead() const
bool isFI() const
isFI - Tests if this is a MO_FrameIndex operand.
static bool clobbersPhysReg(const uint32_t *RegMask, MCRegister PhysReg)
clobbersPhysReg - Returns true if this RegMask clobbers PhysReg.
const uint32_t * getRegMask() const
getRegMask - Returns a bit mask of registers preserved by this RegMask operand.
void print(raw_ostream &os, const TargetRegisterInfo *TRI=nullptr, const TargetIntrinsicInfo *IntrinsicInfo=nullptr) const
Print the MachineOperand to os.
@ MO_CFIIndex
MCCFIInstruction index.
@ MO_RegisterMask
Mask of preserved registers.
@ MO_MachineBasicBlock
MachineBasicBlock reference.
@ MO_FrameIndex
Abstract Stack Frame Index.
@ MO_Register
Register operand.
bool isMBB() const
isMBB - Tests if this is a MO_MachineBasicBlock operand.
MachineRegisterInfo - Keep track of information for virtual and physical registers,...
PreservedAnalyses run(MachineFunction &MF, MachineFunctionAnalysisManager &MFAM)
ManagedStatic - This transparently changes the behavior of global statics to be lazily constructed on...
Definition: ManagedStatic.h:83
static PassRegistry * getPassRegistry()
getPassRegistry - Access the global registry object, which is automatically initialized at applicatio...
Pass interface - Implemented by all 'passes'.
Definition: Pass.h:94
virtual void print(raw_ostream &OS, const Module *M) const
print - Print out the internal state of the pass.
Definition: Pass.cpp:130
AnalysisType * getAnalysisIfAvailable() const
getAnalysisIfAvailable<AnalysisType>() - Subclasses use this function to get analysis information tha...
A set of analyses that are preserved following a run of a transformation pass.
Definition: Analysis.h:111
static PreservedAnalyses all()
Construct a special preserved set that preserves all passes.
Definition: Analysis.h:117
Special value supplied for machine level alias analysis.
Holds all the information related to register banks.
const RegisterBank & getRegBank(unsigned ID)
Get the register bank identified by ID.
unsigned getMaximumSize(unsigned RegBankID) const
Get the maximum size in bits that fits in the given register bank.
This class implements the register bank concept.
Definition: RegisterBank.h:28
const char * getName() const
Get a user friendly name of this register bank.
Definition: RegisterBank.h:49
unsigned getID() const
Get the identifier of this register bank.
Definition: RegisterBank.h:45
Wrapper class representing virtual and physical registers.
Definition: Register.h:19
static Register index2VirtReg(unsigned Index)
Convert a 0-based index to a virtual register number.
Definition: Register.h:84
static unsigned virtReg2Index(Register Reg)
Convert a virtual register number to a 0-based index.
Definition: Register.h:77
constexpr bool isVirtual() const
Return true if the specified register number is in the virtual register namespace.
Definition: Register.h:91
static constexpr bool isPhysicalRegister(unsigned Reg)
Return true if the specified register number is in the physical register namespace.
Definition: Register.h:65
constexpr bool isPhysical() const
Return true if the specified register number is in the physical register namespace.
Definition: Register.h:95
SlotIndex - An opaque wrapper around machine indexes.
Definition: SlotIndexes.h:65
static bool isSameInstr(SlotIndex A, SlotIndex B)
isSameInstr - Return true if A and B refer to the same instruction.
Definition: SlotIndexes.h:176
bool isBlock() const
isBlock - Returns true if this is a block boundary slot.
Definition: SlotIndexes.h:209
SlotIndex getDeadSlot() const
Returns the dead def kill slot for the current instruction.
Definition: SlotIndexes.h:242
bool isEarlyClobber() const
isEarlyClobber - Returns true if this is an early-clobber slot.
Definition: SlotIndexes.h:212
bool isRegister() const
isRegister - Returns true if this is a normal register use/def slot.
Definition: SlotIndexes.h:216
SlotIndex getPrevSlot() const
Returns the previous slot in the index list.
Definition: SlotIndexes.h:272
SlotIndex getRegSlot(bool EC=false) const
Returns the register use/def slot in the current instruction for a normal or early-clobber def.
Definition: SlotIndexes.h:237
bool isDead() const
isDead - Returns true if this is a dead def kill slot.
Definition: SlotIndexes.h:219
SlotIndexes pass.
Definition: SlotIndexes.h:297
MBBIndexIterator MBBIndexBegin() const
Returns an iterator for the begin of the idx2MBBMap.
Definition: SlotIndexes.h:505
MBBIndexIterator MBBIndexEnd() const
Return an iterator for the end of the idx2MBBMap.
Definition: SlotIndexes.h:510
SmallVectorImpl< IdxMBBPair >::const_iterator MBBIndexIterator
Iterator over the idx2MBBMap (sorted pairs of slot index of basic block begin and basic block)
Definition: SlotIndexes.h:481
size_type size() const
Definition: SmallPtrSet.h:94
bool erase(PtrType Ptr)
Remove pointer from the set.
Definition: SmallPtrSet.h:401
size_type count(ConstPtrType Ptr) const
count - Return 1 if the specified pointer is in the set, 0 otherwise.
Definition: SmallPtrSet.h:452
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
Definition: SmallPtrSet.h:384
iterator begin() const
Definition: SmallPtrSet.h:472
SmallPtrSet - This class implements a set which is optimized for holding SmallSize or less elements.
Definition: SmallPtrSet.h:519
size_t size() const
Definition: SmallVector.h:78
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
Definition: SmallVector.h:573
void resize(size_type N)
Definition: SmallVector.h:638
void push_back(const T &Elt)
Definition: SmallVector.h:413
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
Definition: SmallVector.h:1196
Register getReg() const
MI-level Statepoint operands.
Definition: StackMaps.h:158
StringRef - Represent a constant reference to a string, i.e.
Definition: StringRef.h:51
TargetInstrInfo - Interface to description of machine instruction set.
Primary interface to the complete machine description for the target machine.
Definition: TargetMachine.h:77
bool hasSuperClassEq(const TargetRegisterClass *RC) const
Returns true if RC is a super-class of or equal to this class.
TargetRegisterInfo base class - We assume that the target defines a static array of TargetRegisterDes...
virtual const TargetRegisterInfo * getRegisterInfo() const
getRegisterInfo - If register information is available, return it.
virtual const RegisterBankInfo * getRegBankInfo() const
If the information for the register banks is available, return it.
virtual const TargetInstrInfo * getInstrInfo() const
Twine - A lightweight data structure for efficiently representing the concatenation of temporary valu...
Definition: Twine.h:81
std::string str() const
Return the twine contents as a std::string.
Definition: Twine.cpp:17
VNInfo - Value Number Information.
Definition: LiveInterval.h:53
bool isUnused() const
Returns true if this value is unused.
Definition: LiveInterval.h:81
unsigned id
The ID number of this value.
Definition: LiveInterval.h:58
SlotIndex def
The index of the defining instruction.
Definition: LiveInterval.h:61
bool isPHIDef() const
Returns true if this value is defined by a PHI instruction (or was, PHI instructions may have been el...
Definition: LiveInterval.h:78
LLVM Value Representation.
Definition: Value.h:74
std::pair< iterator, bool > insert(const ValueT &V)
Definition: DenseSet.h:213
constexpr bool isNonZero() const
Definition: TypeSize.h:158
static constexpr bool isKnownLT(const FixedOrScalableQuantity &LHS, const FixedOrScalableQuantity &RHS)
Definition: TypeSize.h:218
constexpr bool isScalable() const
Returns whether the quantity is scaled by a runtime quantity (vscale).
Definition: TypeSize.h:171
constexpr ScalarTy getKnownMinValue() const
Returns the minimum value this quantity can represent.
Definition: TypeSize.h:168
static constexpr bool isKnownGT(const FixedOrScalableQuantity &LHS, const FixedOrScalableQuantity &RHS)
Definition: TypeSize.h:225
static constexpr bool isKnownGE(const FixedOrScalableQuantity &LHS, const FixedOrScalableQuantity &RHS)
Definition: TypeSize.h:239
self_iterator getIterator()
Definition: ilist_node.h:132
NodeTy * getNextNode()
Get the next node, or nullptr for the list tail.
Definition: ilist_node.h:353
This class implements an extremely fast bulk output stream that can only output to a stream.
Definition: raw_ostream.h:52
constexpr char Attrs[]
Key for Kernel::Metadata::mAttrs.
constexpr std::underlying_type_t< E > Mask()
Get a bitmask with 1s in all places up to the high-order bit of E's largest value.
Definition: BitmaskEnum.h:125
unsigned ID
LLVM IR allows to use arbitrary numbers as calling convention identifiers.
Definition: CallingConv.h:24
AttributeList getAttributes(LLVMContext &C, ID id)
Return the attributes for an intrinsic.
@ OPERAND_REGISTER
Definition: MCInstrDesc.h:61
@ OPERAND_IMMEDIATE
Definition: MCInstrDesc.h:60
Reg
All possible values of the reg field in the ModR/M byte.
constexpr double e
Definition: MathExtras.h:47
NodeAddr< PhiNode * > Phi
Definition: RDFGraph.h:390
NodeAddr< DefNode * > Def
Definition: RDFGraph.h:384
NodeAddr< FuncNode * > Func
Definition: RDFGraph.h:393
const_iterator begin(StringRef path, Style style=Style::native)
Get begin iterator over path.
Definition: Path.cpp:226
const_iterator end(StringRef path)
Get end iterator over path.
Definition: Path.cpp:235
This is an optimization pass for GlobalISel generic memory operations.
Definition: AddressRanges.h:18
auto drop_begin(T &&RangeOrContainer, size_t N=1)
Return a range covering RangeOrContainer with the first N elements excluded.
Definition: STLExtras.h:329
@ Offset
Definition: DWP.cpp:480
bool all_of(R &&range, UnaryPredicate P)
Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly.
Definition: STLExtras.h:1739
auto size(R &&Range, std::enable_if_t< std::is_base_of< std::random_access_iterator_tag, typename std::iterator_traits< decltype(Range.begin())>::iterator_category >::value, void > *=nullptr)
Get the size of a range.
Definition: STLExtras.h:1697
void initializeMachineVerifierLegacyPassPass(PassRegistry &)
@ SjLj
setjmp/longjmp based exceptions
bool isPreISelGenericOpcode(unsigned Opcode)
Check whether the given Opcode is a generic opcode that is not supposed to appear after ISel.
Definition: TargetOpcodes.h:30
Printable printRegUnit(unsigned Unit, const TargetRegisterInfo *TRI)
Create Printable object to print register units on a raw_ostream.
void append_range(Container &C, Range &&R)
Wrapper function to append range R to container C.
Definition: STLExtras.h:2115
void set_subtract(S1Ty &S1, const S2Ty &S2)
set_subtract(A, B) - Compute A := A - B
Printable PrintLaneMask(LaneBitmask LaneMask)
Create Printable object to print LaneBitmasks on a raw_ostream.
Definition: LaneBitmask.h:92
bool isPreISelGenericOptimizationHint(unsigned Opcode)
Definition: TargetOpcodes.h:42
bool isScopedEHPersonality(EHPersonality Pers)
Returns true if this personality uses scope-style EH IR instructions: catchswitch,...
void verifyMachineFunction(const std::string &Banner, const MachineFunction &MF)
auto reverse(ContainerTy &&C)
Definition: STLExtras.h:420
detail::ValueMatchesPoly< M > HasValue(M Matcher)
Definition: Error.h:221
df_ext_iterator< T, SetTy > df_ext_begin(const T &G, SetTy &S)
bool none_of(R &&Range, UnaryPredicate P)
Provide wrappers to std::none_of which take ranges instead of having to pass begin/end explicitly.
Definition: STLExtras.h:1753
void report_fatal_error(Error Err, bool gen_crash_diag=true)
Report a serious error, calling any installed error handler.
Definition: Error.cpp:167
raw_ostream & nulls()
This returns a reference to a raw_ostream which simply discards output.
bool set_union(S1Ty &S1, const S2Ty &S2)
set_union(A, B) - Compute A := A u B, return whether A changed.
Definition: SetOperations.h:43
EHPersonality classifyEHPersonality(const Value *Pers)
See if the given exception handling personality function is one that we understand.
raw_fd_ostream & errs()
This returns a reference to a raw_ostream for standard error.
AtomicOrdering
Atomic ordering for LLVM's memory model.
OutputIt move(R &&Range, OutputIt Out)
Provide wrappers to std::move which take ranges instead of having to pass begin/end explicitly.
Definition: STLExtras.h:1873
bool is_contained(R &&Range, const E &Element)
Returns true if Element is found in Range.
Definition: STLExtras.h:1903
df_ext_iterator< T, SetTy > df_ext_end(const T &G, SetTy &S)
Printable printReg(Register Reg, const TargetRegisterInfo *TRI=nullptr, unsigned SubIdx=0, const MachineRegisterInfo *MRI=nullptr)
Prints virtual and physical registers with or without a TRI instance.
Printable printMBBReference(const MachineBasicBlock &MBB)
Prints a machine basic block reference.
FunctionPass * createMachineVerifierPass(const std::string &Banner)
createMachineVerifierPass - This pass verifies cenerated machine code instructions for correctness.
Implement std::hash so that hash_code can be used in STL containers.
Definition: BitVector.h:858
static unsigned getSizeInBits(const fltSemantics &Sem)
Returns the size of the floating point number (in bits) in the given semantics.
Definition: APFloat.cpp:372
static constexpr LaneBitmask getAll()
Definition: LaneBitmask.h:82
constexpr bool none() const
Definition: LaneBitmask.h:52
constexpr bool any() const
Definition: LaneBitmask.h:53
static constexpr LaneBitmask getNone()
Definition: LaneBitmask.h:81
This represents a simple continuous liveness interval for a value.
Definition: LiveInterval.h:162
VarInfo - This represents the regions where a virtual register is live in the program.
Definition: LiveVariables.h:78
Pair of physical register and lane mask.