LLVM 20.0.0git
MachineVerifier.cpp
Go to the documentation of this file.
1//===- MachineVerifier.cpp - Machine Code Verifier ------------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// Pass to verify generated machine code. The following is checked:
10//
11// Operand counts: All explicit operands must be present.
12//
13// Register classes: All physical and virtual register operands must be
14// compatible with the register class required by the instruction descriptor.
15//
16// Register live intervals: Registers must be defined only once, and must be
17// defined before use.
18//
19// The machine code verifier is enabled with the command-line option
20// -verify-machineinstrs.
21//===----------------------------------------------------------------------===//
22
24#include "llvm/ADT/BitVector.h"
25#include "llvm/ADT/DenseMap.h"
26#include "llvm/ADT/DenseSet.h"
29#include "llvm/ADT/STLExtras.h"
33#include "llvm/ADT/StringRef.h"
34#include "llvm/ADT/Twine.h"
64#include "llvm/IR/BasicBlock.h"
65#include "llvm/IR/Constants.h"
67#include "llvm/IR/Function.h"
68#include "llvm/IR/InlineAsm.h"
71#include "llvm/MC/LaneBitmask.h"
72#include "llvm/MC/MCAsmInfo.h"
73#include "llvm/MC/MCDwarf.h"
74#include "llvm/MC/MCInstrDesc.h"
77#include "llvm/Pass.h"
82#include "llvm/Support/ModRef.h"
83#include "llvm/Support/Mutex.h"
86#include <algorithm>
87#include <cassert>
88#include <cstddef>
89#include <cstdint>
90#include <iterator>
91#include <string>
92#include <utility>
93
94using namespace llvm;
95
96namespace {
97
98/// Used the by the ReportedErrors class to guarantee only one error is reported
99/// at one time.
100static ManagedStatic<sys::SmartMutex<true>> ReportedErrorsLock;
101
102struct MachineVerifier {
103 MachineVerifier(MachineFunctionAnalysisManager &MFAM, const char *b,
104 raw_ostream *OS, bool AbortOnError = true)
105 : MFAM(&MFAM), OS(OS ? *OS : nulls()), Banner(b),
106 ReportedErrs(AbortOnError) {}
107
108 MachineVerifier(Pass *pass, const char *b, raw_ostream *OS,
109 bool AbortOnError = true)
110 : PASS(pass), OS(OS ? *OS : nulls()), Banner(b),
111 ReportedErrs(AbortOnError) {}
112
113 MachineVerifier(const char *b, LiveVariables *LiveVars,
114 LiveIntervals *LiveInts, LiveStacks *LiveStks,
115 SlotIndexes *Indexes, raw_ostream *OS,
116 bool AbortOnError = true)
117 : OS(OS ? *OS : nulls()), Banner(b), LiveVars(LiveVars),
118 LiveInts(LiveInts), LiveStks(LiveStks), Indexes(Indexes),
119 ReportedErrs(AbortOnError) {}
120
121 /// \returns true if no problems were found.
122 bool verify(const MachineFunction &MF);
123
124 MachineFunctionAnalysisManager *MFAM = nullptr;
125 Pass *const PASS = nullptr;
127 const char *Banner;
128 const MachineFunction *MF = nullptr;
129 const TargetMachine *TM = nullptr;
130 const TargetInstrInfo *TII = nullptr;
131 const TargetRegisterInfo *TRI = nullptr;
132 const MachineRegisterInfo *MRI = nullptr;
133 const RegisterBankInfo *RBI = nullptr;
134
135 // Avoid querying the MachineFunctionProperties for each operand.
136 bool isFunctionRegBankSelected = false;
137 bool isFunctionSelected = false;
138 bool isFunctionTracksDebugUserValues = false;
139
140 using RegVector = SmallVector<Register, 16>;
141 using RegMaskVector = SmallVector<const uint32_t *, 4>;
142 using RegSet = DenseSet<Register>;
145
146 const MachineInstr *FirstNonPHI = nullptr;
147 const MachineInstr *FirstTerminator = nullptr;
148 BlockSet FunctionBlocks;
149
150 BitVector regsReserved;
151 RegSet regsLive;
152 RegVector regsDefined, regsDead, regsKilled;
153 RegMaskVector regMasks;
154
155 SlotIndex lastIndex;
156
157 // Add Reg and any sub-registers to RV
158 void addRegWithSubRegs(RegVector &RV, Register Reg) {
159 RV.push_back(Reg);
160 if (Reg.isPhysical())
161 append_range(RV, TRI->subregs(Reg.asMCReg()));
162 }
163
164 struct BBInfo {
165 // Is this MBB reachable from the MF entry point?
166 bool reachable = false;
167
168 // Vregs that must be live in because they are used without being
169 // defined. Map value is the user. vregsLiveIn doesn't include regs
170 // that only are used by PHI nodes.
171 RegMap vregsLiveIn;
172
173 // Regs killed in MBB. They may be defined again, and will then be in both
174 // regsKilled and regsLiveOut.
175 RegSet regsKilled;
176
177 // Regs defined in MBB and live out. Note that vregs passing through may
178 // be live out without being mentioned here.
179 RegSet regsLiveOut;
180
181 // Vregs that pass through MBB untouched. This set is disjoint from
182 // regsKilled and regsLiveOut.
183 RegSet vregsPassed;
184
185 // Vregs that must pass through MBB because they are needed by a successor
186 // block. This set is disjoint from regsLiveOut.
187 RegSet vregsRequired;
188
189 // Set versions of block's predecessor and successor lists.
190 BlockSet Preds, Succs;
191
192 BBInfo() = default;
193
194 // Add register to vregsRequired if it belongs there. Return true if
195 // anything changed.
196 bool addRequired(Register Reg) {
197 if (!Reg.isVirtual())
198 return false;
199 if (regsLiveOut.count(Reg))
200 return false;
201 return vregsRequired.insert(Reg).second;
202 }
203
204 // Same for a full set.
205 bool addRequired(const RegSet &RS) {
206 bool Changed = false;
207 for (Register Reg : RS)
208 Changed |= addRequired(Reg);
209 return Changed;
210 }
211
212 // Same for a full map.
213 bool addRequired(const RegMap &RM) {
214 bool Changed = false;
215 for (const auto &I : RM)
216 Changed |= addRequired(I.first);
217 return Changed;
218 }
219
220 // Live-out registers are either in regsLiveOut or vregsPassed.
221 bool isLiveOut(Register Reg) const {
222 return regsLiveOut.count(Reg) || vregsPassed.count(Reg);
223 }
224 };
225
226 // Extra register info per MBB.
228
229 bool isReserved(Register Reg) {
230 return Reg.id() < regsReserved.size() && regsReserved.test(Reg.id());
231 }
232
233 bool isAllocatable(Register Reg) const {
234 return Reg.id() < TRI->getNumRegs() && TRI->isInAllocatableClass(Reg) &&
235 !regsReserved.test(Reg.id());
236 }
237
238 // Analysis information if available
239 LiveVariables *LiveVars = nullptr;
240 LiveIntervals *LiveInts = nullptr;
241 LiveStacks *LiveStks = nullptr;
242 SlotIndexes *Indexes = nullptr;
243
244 /// A class to track the number of reported error and to guarantee that only
245 /// one error is reported at one time.
246 class ReportedErrors {
247 unsigned NumReported = 0;
248 bool AbortOnError;
249
250 public:
251 /// \param AbortOnError -- If set, abort after printing the first error.
252 ReportedErrors(bool AbortOnError) : AbortOnError(AbortOnError) {}
253
254 ~ReportedErrors() {
255 if (!hasError())
256 return;
257 if (AbortOnError)
258 report_fatal_error("Found " + Twine(NumReported) +
259 " machine code errors.");
260 // Since we haven't aborted, release the lock to allow other threads to
261 // report errors.
262 ReportedErrorsLock->unlock();
263 }
264
265 /// Increment the number of reported errors.
266 /// \returns true if this is the first reported error.
267 bool increment() {
268 // If this is the first error this thread has encountered, grab the lock
269 // to prevent other threads from reporting errors at the same time.
270 // Otherwise we assume we already have the lock.
271 if (!hasError())
272 ReportedErrorsLock->lock();
273 ++NumReported;
274 return NumReported == 1;
275 }
276
277 /// \returns true if an error was reported.
278 bool hasError() { return NumReported; }
279 };
280 ReportedErrors ReportedErrs;
281
282 // This is calculated only when trying to verify convergence control tokens.
283 // Similar to the LLVM IR verifier, we calculate this locally instead of
284 // relying on the pass manager.
286
287 void visitMachineFunctionBefore();
288 void visitMachineBasicBlockBefore(const MachineBasicBlock *MBB);
289 void visitMachineBundleBefore(const MachineInstr *MI);
290
291 /// Verify that all of \p MI's virtual register operands are scalars.
292 /// \returns True if all virtual register operands are scalar. False
293 /// otherwise.
294 bool verifyAllRegOpsScalar(const MachineInstr &MI,
295 const MachineRegisterInfo &MRI);
296 bool verifyVectorElementMatch(LLT Ty0, LLT Ty1, const MachineInstr *MI);
297
298 bool verifyGIntrinsicSideEffects(const MachineInstr *MI);
299 bool verifyGIntrinsicConvergence(const MachineInstr *MI);
300 void verifyPreISelGenericInstruction(const MachineInstr *MI);
301
302 void visitMachineInstrBefore(const MachineInstr *MI);
303 void visitMachineOperand(const MachineOperand *MO, unsigned MONum);
304 void visitMachineBundleAfter(const MachineInstr *MI);
305 void visitMachineBasicBlockAfter(const MachineBasicBlock *MBB);
306 void visitMachineFunctionAfter();
307
308 void report(const char *msg, const MachineFunction *MF);
309 void report(const char *msg, const MachineBasicBlock *MBB);
310 void report(const char *msg, const MachineInstr *MI);
311 void report(const char *msg, const MachineOperand *MO, unsigned MONum,
312 LLT MOVRegType = LLT{});
313 void report(const Twine &Msg, const MachineInstr *MI);
314
315 void report_context(const LiveInterval &LI) const;
316 void report_context(const LiveRange &LR, Register VRegUnit,
317 LaneBitmask LaneMask) const;
318 void report_context(const LiveRange::Segment &S) const;
319 void report_context(const VNInfo &VNI) const;
320 void report_context(SlotIndex Pos) const;
321 void report_context(MCPhysReg PhysReg) const;
322 void report_context_liverange(const LiveRange &LR) const;
323 void report_context_lanemask(LaneBitmask LaneMask) const;
324 void report_context_vreg(Register VReg) const;
325 void report_context_vreg_regunit(Register VRegOrUnit) const;
326
327 void verifyInlineAsm(const MachineInstr *MI);
328
329 void checkLiveness(const MachineOperand *MO, unsigned MONum);
330 void checkLivenessAtUse(const MachineOperand *MO, unsigned MONum,
331 SlotIndex UseIdx, const LiveRange &LR,
332 Register VRegOrUnit,
333 LaneBitmask LaneMask = LaneBitmask::getNone());
334 void checkLivenessAtDef(const MachineOperand *MO, unsigned MONum,
335 SlotIndex DefIdx, const LiveRange &LR,
336 Register VRegOrUnit, bool SubRangeCheck = false,
337 LaneBitmask LaneMask = LaneBitmask::getNone());
338
339 void markReachable(const MachineBasicBlock *MBB);
340 void calcRegsPassed();
341 void checkPHIOps(const MachineBasicBlock &MBB);
342
343 void calcRegsRequired();
344 void verifyLiveVariables();
345 void verifyLiveIntervals();
346 void verifyLiveInterval(const LiveInterval &);
347 void verifyLiveRangeValue(const LiveRange &, const VNInfo *, Register,
349 void verifyLiveRangeSegment(const LiveRange &,
352 void verifyLiveRange(const LiveRange &, Register,
353 LaneBitmask LaneMask = LaneBitmask::getNone());
354
355 void verifyStackFrame();
356 /// Check that the stack protector is the top-most object in the stack.
357 void verifyStackProtector();
358
359 void verifySlotIndexes() const;
360 void verifyProperties(const MachineFunction &MF);
361};
362
363struct MachineVerifierLegacyPass : public MachineFunctionPass {
364 static char ID; // Pass ID, replacement for typeid
365
366 const std::string Banner;
367
368 MachineVerifierLegacyPass(std::string banner = std::string())
369 : MachineFunctionPass(ID), Banner(std::move(banner)) {
371 }
372
373 void getAnalysisUsage(AnalysisUsage &AU) const override {
378 AU.setPreservesAll();
380 }
381
382 bool runOnMachineFunction(MachineFunction &MF) override {
383 // Skip functions that have known verification problems.
384 // FIXME: Remove this mechanism when all problematic passes have been
385 // fixed.
386 if (MF.getProperties().hasProperty(
387 MachineFunctionProperties::Property::FailsVerification))
388 return false;
389
390 MachineVerifier(this, Banner.c_str(), &errs()).verify(MF);
391 return false;
392 }
393};
394
395} // end anonymous namespace
396
400 // Skip functions that have known verification problems.
401 // FIXME: Remove this mechanism when all problematic passes have been
402 // fixed.
403 if (MF.getProperties().hasProperty(
405 return PreservedAnalyses::all();
406 MachineVerifier(MFAM, Banner.c_str(), &errs()).verify(MF);
407 return PreservedAnalyses::all();
408}
409
410char MachineVerifierLegacyPass::ID = 0;
411
412INITIALIZE_PASS(MachineVerifierLegacyPass, "machineverifier",
413 "Verify generated machine code", false, false)
414
416 return new MachineVerifierLegacyPass(Banner);
417}
418
419void llvm::verifyMachineFunction(const std::string &Banner,
420 const MachineFunction &MF) {
421 // TODO: Use MFAM after porting below analyses.
422 // LiveVariables *LiveVars;
423 // LiveIntervals *LiveInts;
424 // LiveStacks *LiveStks;
425 // SlotIndexes *Indexes;
426 MachineVerifier(nullptr, Banner.c_str(), &errs()).verify(MF);
427}
428
429bool MachineFunction::verify(Pass *p, const char *Banner, raw_ostream *OS,
430 bool AbortOnError) const {
431 return MachineVerifier(p, Banner, OS, AbortOnError).verify(*this);
432}
433
435 const char *Banner, raw_ostream *OS,
436 bool AbortOnError) const {
437 return MachineVerifier(Banner, /*LiveVars=*/nullptr, LiveInts,
438 /*LiveStks=*/nullptr, Indexes, OS, AbortOnError)
439 .verify(*this);
440}
441
442void MachineVerifier::verifySlotIndexes() const {
443 if (Indexes == nullptr)
444 return;
445
446 // Ensure the IdxMBB list is sorted by slot indexes.
449 E = Indexes->MBBIndexEnd(); I != E; ++I) {
450 assert(!Last.isValid() || I->first > Last);
451 Last = I->first;
452 }
453}
454
455void MachineVerifier::verifyProperties(const MachineFunction &MF) {
456 // If a pass has introduced virtual registers without clearing the
457 // NoVRegs property (or set it without allocating the vregs)
458 // then report an error.
459 if (MF.getProperties().hasProperty(
461 MRI->getNumVirtRegs())
462 report("Function has NoVRegs property but there are VReg operands", &MF);
463}
464
465bool MachineVerifier::verify(const MachineFunction &MF) {
466 this->MF = &MF;
467 TM = &MF.getTarget();
470 RBI = MF.getSubtarget().getRegBankInfo();
471 MRI = &MF.getRegInfo();
472
473 const bool isFunctionFailedISel = MF.getProperties().hasProperty(
475
476 // If we're mid-GlobalISel and we already triggered the fallback path then
477 // it's expected that the MIR is somewhat broken but that's ok since we'll
478 // reset it and clear the FailedISel attribute in ResetMachineFunctions.
479 if (isFunctionFailedISel)
480 return true;
481
482 isFunctionRegBankSelected = MF.getProperties().hasProperty(
484 isFunctionSelected = MF.getProperties().hasProperty(
486 isFunctionTracksDebugUserValues = MF.getProperties().hasProperty(
488
489 if (PASS) {
490 auto *LISWrapper = PASS->getAnalysisIfAvailable<LiveIntervalsWrapperPass>();
491 LiveInts = LISWrapper ? &LISWrapper->getLIS() : nullptr;
492 // We don't want to verify LiveVariables if LiveIntervals is available.
493 auto *LVWrapper = PASS->getAnalysisIfAvailable<LiveVariablesWrapperPass>();
494 if (!LiveInts)
495 LiveVars = LVWrapper ? &LVWrapper->getLV() : nullptr;
496 auto *LSWrapper = PASS->getAnalysisIfAvailable<LiveStacksWrapperLegacy>();
497 LiveStks = LSWrapper ? &LSWrapper->getLS() : nullptr;
498 auto *SIWrapper = PASS->getAnalysisIfAvailable<SlotIndexesWrapperPass>();
499 Indexes = SIWrapper ? &SIWrapper->getSI() : nullptr;
500 }
501 if (MFAM) {
502 MachineFunction &Func = const_cast<MachineFunction &>(MF);
503 LiveInts = MFAM->getCachedResult<LiveIntervalsAnalysis>(Func);
504 if (!LiveInts)
505 LiveVars = MFAM->getCachedResult<LiveVariablesAnalysis>(Func);
506 // TODO: LiveStks = MFAM->getCachedResult<LiveStacksAnalysis>(Func);
507 Indexes = MFAM->getCachedResult<SlotIndexesAnalysis>(Func);
508 }
509
510 verifySlotIndexes();
511
512 verifyProperties(MF);
513
514 visitMachineFunctionBefore();
515 for (const MachineBasicBlock &MBB : MF) {
516 visitMachineBasicBlockBefore(&MBB);
517 // Keep track of the current bundle header.
518 const MachineInstr *CurBundle = nullptr;
519 // Do we expect the next instruction to be part of the same bundle?
520 bool InBundle = false;
521
522 for (const MachineInstr &MI : MBB.instrs()) {
523 if (MI.getParent() != &MBB) {
524 report("Bad instruction parent pointer", &MBB);
525 OS << "Instruction: " << MI;
526 continue;
527 }
528
529 // Check for consistent bundle flags.
530 if (InBundle && !MI.isBundledWithPred())
531 report("Missing BundledPred flag, "
532 "BundledSucc was set on predecessor",
533 &MI);
534 if (!InBundle && MI.isBundledWithPred())
535 report("BundledPred flag is set, "
536 "but BundledSucc not set on predecessor",
537 &MI);
538
539 // Is this a bundle header?
540 if (!MI.isInsideBundle()) {
541 if (CurBundle)
542 visitMachineBundleAfter(CurBundle);
543 CurBundle = &MI;
544 visitMachineBundleBefore(CurBundle);
545 } else if (!CurBundle)
546 report("No bundle header", &MI);
547 visitMachineInstrBefore(&MI);
548 for (unsigned I = 0, E = MI.getNumOperands(); I != E; ++I) {
549 const MachineOperand &Op = MI.getOperand(I);
550 if (Op.getParent() != &MI) {
551 // Make sure to use correct addOperand / removeOperand / ChangeTo
552 // functions when replacing operands of a MachineInstr.
553 report("Instruction has operand with wrong parent set", &MI);
554 }
555
556 visitMachineOperand(&Op, I);
557 }
558
559 // Was this the last bundled instruction?
560 InBundle = MI.isBundledWithSucc();
561 }
562 if (CurBundle)
563 visitMachineBundleAfter(CurBundle);
564 if (InBundle)
565 report("BundledSucc flag set on last instruction in block", &MBB.back());
566 visitMachineBasicBlockAfter(&MBB);
567 }
568 visitMachineFunctionAfter();
569
570 // Clean up.
571 regsLive.clear();
572 regsDefined.clear();
573 regsDead.clear();
574 regsKilled.clear();
575 regMasks.clear();
576 MBBInfoMap.clear();
577
578 return !ReportedErrs.hasError();
579}
580
581void MachineVerifier::report(const char *msg, const MachineFunction *MF) {
582 assert(MF);
583 OS << '\n';
584 if (ReportedErrs.increment()) {
585 if (Banner)
586 OS << "# " << Banner << '\n';
587
588 if (LiveInts != nullptr)
589 LiveInts->print(OS);
590 else
591 MF->print(OS, Indexes);
592 }
593
594 OS << "*** Bad machine code: " << msg << " ***\n"
595 << "- function: " << MF->getName() << '\n';
596}
597
598void MachineVerifier::report(const char *msg, const MachineBasicBlock *MBB) {
599 assert(MBB);
600 report(msg, MBB->getParent());
601 OS << "- basic block: " << printMBBReference(*MBB) << ' ' << MBB->getName()
602 << " (" << (const void *)MBB << ')';
603 if (Indexes)
604 OS << " [" << Indexes->getMBBStartIdx(MBB) << ';'
605 << Indexes->getMBBEndIdx(MBB) << ')';
606 OS << '\n';
607}
608
609void MachineVerifier::report(const char *msg, const MachineInstr *MI) {
610 assert(MI);
611 report(msg, MI->getParent());
612 OS << "- instruction: ";
613 if (Indexes && Indexes->hasIndex(*MI))
614 OS << Indexes->getInstructionIndex(*MI) << '\t';
615 MI->print(OS, /*IsStandalone=*/true);
616}
617
618void MachineVerifier::report(const char *msg, const MachineOperand *MO,
619 unsigned MONum, LLT MOVRegType) {
620 assert(MO);
621 report(msg, MO->getParent());
622 OS << "- operand " << MONum << ": ";
623 MO->print(OS, MOVRegType, TRI);
624 OS << '\n';
625}
626
627void MachineVerifier::report(const Twine &Msg, const MachineInstr *MI) {
628 report(Msg.str().c_str(), MI);
629}
630
631void MachineVerifier::report_context(SlotIndex Pos) const {
632 OS << "- at: " << Pos << '\n';
633}
634
635void MachineVerifier::report_context(const LiveInterval &LI) const {
636 OS << "- interval: " << LI << '\n';
637}
638
639void MachineVerifier::report_context(const LiveRange &LR, Register VRegUnit,
640 LaneBitmask LaneMask) const {
641 report_context_liverange(LR);
642 report_context_vreg_regunit(VRegUnit);
643 if (LaneMask.any())
644 report_context_lanemask(LaneMask);
645}
646
647void MachineVerifier::report_context(const LiveRange::Segment &S) const {
648 OS << "- segment: " << S << '\n';
649}
650
651void MachineVerifier::report_context(const VNInfo &VNI) const {
652 OS << "- ValNo: " << VNI.id << " (def " << VNI.def << ")\n";
653}
654
655void MachineVerifier::report_context_liverange(const LiveRange &LR) const {
656 OS << "- liverange: " << LR << '\n';
657}
658
659void MachineVerifier::report_context(MCPhysReg PReg) const {
660 OS << "- p. register: " << printReg(PReg, TRI) << '\n';
661}
662
663void MachineVerifier::report_context_vreg(Register VReg) const {
664 OS << "- v. register: " << printReg(VReg, TRI) << '\n';
665}
666
667void MachineVerifier::report_context_vreg_regunit(Register VRegOrUnit) const {
668 if (VRegOrUnit.isVirtual()) {
669 report_context_vreg(VRegOrUnit);
670 } else {
671 OS << "- regunit: " << printRegUnit(VRegOrUnit, TRI) << '\n';
672 }
673}
674
675void MachineVerifier::report_context_lanemask(LaneBitmask LaneMask) const {
676 OS << "- lanemask: " << PrintLaneMask(LaneMask) << '\n';
677}
678
679void MachineVerifier::markReachable(const MachineBasicBlock *MBB) {
680 BBInfo &MInfo = MBBInfoMap[MBB];
681 if (!MInfo.reachable) {
682 MInfo.reachable = true;
683 for (const MachineBasicBlock *Succ : MBB->successors())
684 markReachable(Succ);
685 }
686}
687
688void MachineVerifier::visitMachineFunctionBefore() {
689 lastIndex = SlotIndex();
690 regsReserved = MRI->reservedRegsFrozen() ? MRI->getReservedRegs()
691 : TRI->getReservedRegs(*MF);
692
693 if (!MF->empty())
694 markReachable(&MF->front());
695
696 // Build a set of the basic blocks in the function.
697 FunctionBlocks.clear();
698 for (const auto &MBB : *MF) {
699 FunctionBlocks.insert(&MBB);
700 BBInfo &MInfo = MBBInfoMap[&MBB];
701
702 MInfo.Preds.insert(MBB.pred_begin(), MBB.pred_end());
703 if (MInfo.Preds.size() != MBB.pred_size())
704 report("MBB has duplicate entries in its predecessor list.", &MBB);
705
706 MInfo.Succs.insert(MBB.succ_begin(), MBB.succ_end());
707 if (MInfo.Succs.size() != MBB.succ_size())
708 report("MBB has duplicate entries in its successor list.", &MBB);
709 }
710
711 // Check that the register use lists are sane.
712 MRI->verifyUseLists();
713
714 if (!MF->empty()) {
715 verifyStackFrame();
716 verifyStackProtector();
717 }
718}
719
720void
721MachineVerifier::visitMachineBasicBlockBefore(const MachineBasicBlock *MBB) {
722 FirstTerminator = nullptr;
723 FirstNonPHI = nullptr;
724
725 if (!MF->getProperties().hasProperty(
726 MachineFunctionProperties::Property::NoPHIs) && MRI->tracksLiveness()) {
727 // If this block has allocatable physical registers live-in, check that
728 // it is an entry block or landing pad.
729 for (const auto &LI : MBB->liveins()) {
730 if (isAllocatable(LI.PhysReg) && !MBB->isEHPad() &&
731 MBB->getIterator() != MBB->getParent()->begin() &&
733 report("MBB has allocatable live-in, but isn't entry, landing-pad, or "
734 "inlineasm-br-indirect-target.",
735 MBB);
736 report_context(LI.PhysReg);
737 }
738 }
739 }
740
741 if (MBB->isIRBlockAddressTaken()) {
743 report("ir-block-address-taken is associated with basic block not used by "
744 "a blockaddress.",
745 MBB);
746 }
747
748 // Count the number of landing pad successors.
750 for (const auto *succ : MBB->successors()) {
751 if (succ->isEHPad())
752 LandingPadSuccs.insert(succ);
753 if (!FunctionBlocks.count(succ))
754 report("MBB has successor that isn't part of the function.", MBB);
755 if (!MBBInfoMap[succ].Preds.count(MBB)) {
756 report("Inconsistent CFG", MBB);
757 OS << "MBB is not in the predecessor list of the successor "
758 << printMBBReference(*succ) << ".\n";
759 }
760 }
761
762 // Check the predecessor list.
763 for (const MachineBasicBlock *Pred : MBB->predecessors()) {
764 if (!FunctionBlocks.count(Pred))
765 report("MBB has predecessor that isn't part of the function.", MBB);
766 if (!MBBInfoMap[Pred].Succs.count(MBB)) {
767 report("Inconsistent CFG", MBB);
768 OS << "MBB is not in the successor list of the predecessor "
769 << printMBBReference(*Pred) << ".\n";
770 }
771 }
772
773 const MCAsmInfo *AsmInfo = TM->getMCAsmInfo();
774 const BasicBlock *BB = MBB->getBasicBlock();
775 const Function &F = MF->getFunction();
776 if (LandingPadSuccs.size() > 1 &&
777 !(AsmInfo &&
779 BB && isa<SwitchInst>(BB->getTerminator())) &&
780 !isScopedEHPersonality(classifyEHPersonality(F.getPersonalityFn())))
781 report("MBB has more than one landing pad successor", MBB);
782
783 // Call analyzeBranch. If it succeeds, there several more conditions to check.
784 MachineBasicBlock *TBB = nullptr, *FBB = nullptr;
786 if (!TII->analyzeBranch(*const_cast<MachineBasicBlock *>(MBB), TBB, FBB,
787 Cond)) {
788 // Ok, analyzeBranch thinks it knows what's going on with this block. Let's
789 // check whether its answers match up with reality.
790 if (!TBB && !FBB) {
791 // Block falls through to its successor.
792 if (!MBB->empty() && MBB->back().isBarrier() &&
793 !TII->isPredicated(MBB->back())) {
794 report("MBB exits via unconditional fall-through but ends with a "
795 "barrier instruction!", MBB);
796 }
797 if (!Cond.empty()) {
798 report("MBB exits via unconditional fall-through but has a condition!",
799 MBB);
800 }
801 } else if (TBB && !FBB && Cond.empty()) {
802 // Block unconditionally branches somewhere.
803 if (MBB->empty()) {
804 report("MBB exits via unconditional branch but doesn't contain "
805 "any instructions!", MBB);
806 } else if (!MBB->back().isBarrier()) {
807 report("MBB exits via unconditional branch but doesn't end with a "
808 "barrier instruction!", MBB);
809 } else if (!MBB->back().isTerminator()) {
810 report("MBB exits via unconditional branch but the branch isn't a "
811 "terminator instruction!", MBB);
812 }
813 } else if (TBB && !FBB && !Cond.empty()) {
814 // Block conditionally branches somewhere, otherwise falls through.
815 if (MBB->empty()) {
816 report("MBB exits via conditional branch/fall-through but doesn't "
817 "contain any instructions!", MBB);
818 } else if (MBB->back().isBarrier()) {
819 report("MBB exits via conditional branch/fall-through but ends with a "
820 "barrier instruction!", MBB);
821 } else if (!MBB->back().isTerminator()) {
822 report("MBB exits via conditional branch/fall-through but the branch "
823 "isn't a terminator instruction!", MBB);
824 }
825 } else if (TBB && FBB) {
826 // Block conditionally branches somewhere, otherwise branches
827 // somewhere else.
828 if (MBB->empty()) {
829 report("MBB exits via conditional branch/branch but doesn't "
830 "contain any instructions!", MBB);
831 } else if (!MBB->back().isBarrier()) {
832 report("MBB exits via conditional branch/branch but doesn't end with a "
833 "barrier instruction!", MBB);
834 } else if (!MBB->back().isTerminator()) {
835 report("MBB exits via conditional branch/branch but the branch "
836 "isn't a terminator instruction!", MBB);
837 }
838 if (Cond.empty()) {
839 report("MBB exits via conditional branch/branch but there's no "
840 "condition!", MBB);
841 }
842 } else {
843 report("analyzeBranch returned invalid data!", MBB);
844 }
845
846 // Now check that the successors match up with the answers reported by
847 // analyzeBranch.
848 if (TBB && !MBB->isSuccessor(TBB))
849 report("MBB exits via jump or conditional branch, but its target isn't a "
850 "CFG successor!",
851 MBB);
852 if (FBB && !MBB->isSuccessor(FBB))
853 report("MBB exits via conditional branch, but its target isn't a CFG "
854 "successor!",
855 MBB);
856
857 // There might be a fallthrough to the next block if there's either no
858 // unconditional true branch, or if there's a condition, and one of the
859 // branches is missing.
860 bool Fallthrough = !TBB || (!Cond.empty() && !FBB);
861
862 // A conditional fallthrough must be an actual CFG successor, not
863 // unreachable. (Conversely, an unconditional fallthrough might not really
864 // be a successor, because the block might end in unreachable.)
865 if (!Cond.empty() && !FBB) {
867 if (MBBI == MF->end()) {
868 report("MBB conditionally falls through out of function!", MBB);
869 } else if (!MBB->isSuccessor(&*MBBI))
870 report("MBB exits via conditional branch/fall-through but the CFG "
871 "successors don't match the actual successors!",
872 MBB);
873 }
874
875 // Verify that there aren't any extra un-accounted-for successors.
876 for (const MachineBasicBlock *SuccMBB : MBB->successors()) {
877 // If this successor is one of the branch targets, it's okay.
878 if (SuccMBB == TBB || SuccMBB == FBB)
879 continue;
880 // If we might have a fallthrough, and the successor is the fallthrough
881 // block, that's also ok.
882 if (Fallthrough && SuccMBB == MBB->getNextNode())
883 continue;
884 // Also accept successors which are for exception-handling or might be
885 // inlineasm_br targets.
886 if (SuccMBB->isEHPad() || SuccMBB->isInlineAsmBrIndirectTarget())
887 continue;
888 report("MBB has unexpected successors which are not branch targets, "
889 "fallthrough, EHPads, or inlineasm_br targets.",
890 MBB);
891 }
892 }
893
894 regsLive.clear();
895 if (MRI->tracksLiveness()) {
896 for (const auto &LI : MBB->liveins()) {
897 if (!Register::isPhysicalRegister(LI.PhysReg)) {
898 report("MBB live-in list contains non-physical register", MBB);
899 continue;
900 }
901 for (const MCPhysReg &SubReg : TRI->subregs_inclusive(LI.PhysReg))
902 regsLive.insert(SubReg);
903 }
904 }
905
906 const MachineFrameInfo &MFI = MF->getFrameInfo();
907 BitVector PR = MFI.getPristineRegs(*MF);
908 for (unsigned I : PR.set_bits()) {
909 for (const MCPhysReg &SubReg : TRI->subregs_inclusive(I))
910 regsLive.insert(SubReg);
911 }
912
913 regsKilled.clear();
914 regsDefined.clear();
915
916 if (Indexes)
917 lastIndex = Indexes->getMBBStartIdx(MBB);
918}
919
920// This function gets called for all bundle headers, including normal
921// stand-alone unbundled instructions.
922void MachineVerifier::visitMachineBundleBefore(const MachineInstr *MI) {
923 if (Indexes && Indexes->hasIndex(*MI)) {
924 SlotIndex idx = Indexes->getInstructionIndex(*MI);
925 if (!(idx > lastIndex)) {
926 report("Instruction index out of order", MI);
927 OS << "Last instruction was at " << lastIndex << '\n';
928 }
929 lastIndex = idx;
930 }
931
932 // Ensure non-terminators don't follow terminators.
933 if (MI->isTerminator()) {
934 if (!FirstTerminator)
935 FirstTerminator = MI;
936 } else if (FirstTerminator) {
937 // For GlobalISel, G_INVOKE_REGION_START is a terminator that we allow to
938 // precede non-terminators.
939 if (FirstTerminator->getOpcode() != TargetOpcode::G_INVOKE_REGION_START) {
940 report("Non-terminator instruction after the first terminator", MI);
941 OS << "First terminator was:\t" << *FirstTerminator;
942 }
943 }
944}
945
946// The operands on an INLINEASM instruction must follow a template.
947// Verify that the flag operands make sense.
948void MachineVerifier::verifyInlineAsm(const MachineInstr *MI) {
949 // The first two operands on INLINEASM are the asm string and global flags.
950 if (MI->getNumOperands() < 2) {
951 report("Too few operands on inline asm", MI);
952 return;
953 }
954 if (!MI->getOperand(0).isSymbol())
955 report("Asm string must be an external symbol", MI);
956 if (!MI->getOperand(1).isImm())
957 report("Asm flags must be an immediate", MI);
958 // Allowed flags are Extra_HasSideEffects = 1, Extra_IsAlignStack = 2,
959 // Extra_AsmDialect = 4, Extra_MayLoad = 8, and Extra_MayStore = 16,
960 // and Extra_IsConvergent = 32.
961 if (!isUInt<6>(MI->getOperand(1).getImm()))
962 report("Unknown asm flags", &MI->getOperand(1), 1);
963
964 static_assert(InlineAsm::MIOp_FirstOperand == 2, "Asm format changed");
965
966 unsigned OpNo = InlineAsm::MIOp_FirstOperand;
967 unsigned NumOps;
968 for (unsigned e = MI->getNumOperands(); OpNo < e; OpNo += NumOps) {
969 const MachineOperand &MO = MI->getOperand(OpNo);
970 // There may be implicit ops after the fixed operands.
971 if (!MO.isImm())
972 break;
973 const InlineAsm::Flag F(MO.getImm());
974 NumOps = 1 + F.getNumOperandRegisters();
975 }
976
977 if (OpNo > MI->getNumOperands())
978 report("Missing operands in last group", MI);
979
980 // An optional MDNode follows the groups.
981 if (OpNo < MI->getNumOperands() && MI->getOperand(OpNo).isMetadata())
982 ++OpNo;
983
984 // All trailing operands must be implicit registers.
985 for (unsigned e = MI->getNumOperands(); OpNo < e; ++OpNo) {
986 const MachineOperand &MO = MI->getOperand(OpNo);
987 if (!MO.isReg() || !MO.isImplicit())
988 report("Expected implicit register after groups", &MO, OpNo);
989 }
990
991 if (MI->getOpcode() == TargetOpcode::INLINEASM_BR) {
992 const MachineBasicBlock *MBB = MI->getParent();
993
994 for (unsigned i = InlineAsm::MIOp_FirstOperand, e = MI->getNumOperands();
995 i != e; ++i) {
996 const MachineOperand &MO = MI->getOperand(i);
997
998 if (!MO.isMBB())
999 continue;
1000
1001 // Check the successor & predecessor lists look ok, assume they are
1002 // not. Find the indirect target without going through the successors.
1003 const MachineBasicBlock *IndirectTargetMBB = MO.getMBB();
1004 if (!IndirectTargetMBB) {
1005 report("INLINEASM_BR indirect target does not exist", &MO, i);
1006 break;
1007 }
1008
1009 if (!MBB->isSuccessor(IndirectTargetMBB))
1010 report("INLINEASM_BR indirect target missing from successor list", &MO,
1011 i);
1012
1013 if (!IndirectTargetMBB->isPredecessor(MBB))
1014 report("INLINEASM_BR indirect target predecessor list missing parent",
1015 &MO, i);
1016 }
1017 }
1018}
1019
1020bool MachineVerifier::verifyAllRegOpsScalar(const MachineInstr &MI,
1021 const MachineRegisterInfo &MRI) {
1022 if (none_of(MI.explicit_operands(), [&MRI](const MachineOperand &Op) {
1023 if (!Op.isReg())
1024 return false;
1025 const auto Reg = Op.getReg();
1026 if (Reg.isPhysical())
1027 return false;
1028 return !MRI.getType(Reg).isScalar();
1029 }))
1030 return true;
1031 report("All register operands must have scalar types", &MI);
1032 return false;
1033}
1034
1035/// Check that types are consistent when two operands need to have the same
1036/// number of vector elements.
1037/// \return true if the types are valid.
1038bool MachineVerifier::verifyVectorElementMatch(LLT Ty0, LLT Ty1,
1039 const MachineInstr *MI) {
1040 if (Ty0.isVector() != Ty1.isVector()) {
1041 report("operand types must be all-vector or all-scalar", MI);
1042 // Generally we try to report as many issues as possible at once, but in
1043 // this case it's not clear what should we be comparing the size of the
1044 // scalar with: the size of the whole vector or its lane. Instead of
1045 // making an arbitrary choice and emitting not so helpful message, let's
1046 // avoid the extra noise and stop here.
1047 return false;
1048 }
1049
1050 if (Ty0.isVector() && Ty0.getElementCount() != Ty1.getElementCount()) {
1051 report("operand types must preserve number of vector elements", MI);
1052 return false;
1053 }
1054
1055 return true;
1056}
1057
1058bool MachineVerifier::verifyGIntrinsicSideEffects(const MachineInstr *MI) {
1059 auto Opcode = MI->getOpcode();
1060 bool NoSideEffects = Opcode == TargetOpcode::G_INTRINSIC ||
1061 Opcode == TargetOpcode::G_INTRINSIC_CONVERGENT;
1062 unsigned IntrID = cast<GIntrinsic>(MI)->getIntrinsicID();
1063 if (IntrID != 0 && IntrID < Intrinsic::num_intrinsics) {
1065 MF->getFunction().getContext(), static_cast<Intrinsic::ID>(IntrID));
1066 bool DeclHasSideEffects = !Attrs.getMemoryEffects().doesNotAccessMemory();
1067 if (NoSideEffects && DeclHasSideEffects) {
1068 report(Twine(TII->getName(Opcode),
1069 " used with intrinsic that accesses memory"),
1070 MI);
1071 return false;
1072 }
1073 if (!NoSideEffects && !DeclHasSideEffects) {
1074 report(Twine(TII->getName(Opcode), " used with readnone intrinsic"), MI);
1075 return false;
1076 }
1077 }
1078
1079 return true;
1080}
1081
1082bool MachineVerifier::verifyGIntrinsicConvergence(const MachineInstr *MI) {
1083 auto Opcode = MI->getOpcode();
1084 bool NotConvergent = Opcode == TargetOpcode::G_INTRINSIC ||
1085 Opcode == TargetOpcode::G_INTRINSIC_W_SIDE_EFFECTS;
1086 unsigned IntrID = cast<GIntrinsic>(MI)->getIntrinsicID();
1087 if (IntrID != 0 && IntrID < Intrinsic::num_intrinsics) {
1089 MF->getFunction().getContext(), static_cast<Intrinsic::ID>(IntrID));
1090 bool DeclIsConvergent = Attrs.hasFnAttr(Attribute::Convergent);
1091 if (NotConvergent && DeclIsConvergent) {
1092 report(Twine(TII->getName(Opcode), " used with a convergent intrinsic"),
1093 MI);
1094 return false;
1095 }
1096 if (!NotConvergent && !DeclIsConvergent) {
1097 report(
1098 Twine(TII->getName(Opcode), " used with a non-convergent intrinsic"),
1099 MI);
1100 return false;
1101 }
1102 }
1103
1104 return true;
1105}
1106
1107void MachineVerifier::verifyPreISelGenericInstruction(const MachineInstr *MI) {
1108 if (isFunctionSelected)
1109 report("Unexpected generic instruction in a Selected function", MI);
1110
1111 const MCInstrDesc &MCID = MI->getDesc();
1112 unsigned NumOps = MI->getNumOperands();
1113
1114 // Branches must reference a basic block if they are not indirect
1115 if (MI->isBranch() && !MI->isIndirectBranch()) {
1116 bool HasMBB = false;
1117 for (const MachineOperand &Op : MI->operands()) {
1118 if (Op.isMBB()) {
1119 HasMBB = true;
1120 break;
1121 }
1122 }
1123
1124 if (!HasMBB) {
1125 report("Branch instruction is missing a basic block operand or "
1126 "isIndirectBranch property",
1127 MI);
1128 }
1129 }
1130
1131 // Check types.
1133 for (unsigned I = 0, E = std::min(MCID.getNumOperands(), NumOps);
1134 I != E; ++I) {
1135 if (!MCID.operands()[I].isGenericType())
1136 continue;
1137 // Generic instructions specify type equality constraints between some of
1138 // their operands. Make sure these are consistent.
1139 size_t TypeIdx = MCID.operands()[I].getGenericTypeIndex();
1140 Types.resize(std::max(TypeIdx + 1, Types.size()));
1141
1142 const MachineOperand *MO = &MI->getOperand(I);
1143 if (!MO->isReg()) {
1144 report("generic instruction must use register operands", MI);
1145 continue;
1146 }
1147
1148 LLT OpTy = MRI->getType(MO->getReg());
1149 // Don't report a type mismatch if there is no actual mismatch, only a
1150 // type missing, to reduce noise:
1151 if (OpTy.isValid()) {
1152 // Only the first valid type for a type index will be printed: don't
1153 // overwrite it later so it's always clear which type was expected:
1154 if (!Types[TypeIdx].isValid())
1155 Types[TypeIdx] = OpTy;
1156 else if (Types[TypeIdx] != OpTy)
1157 report("Type mismatch in generic instruction", MO, I, OpTy);
1158 } else {
1159 // Generic instructions must have types attached to their operands.
1160 report("Generic instruction is missing a virtual register type", MO, I);
1161 }
1162 }
1163
1164 // Generic opcodes must not have physical register operands.
1165 for (unsigned I = 0; I < MI->getNumOperands(); ++I) {
1166 const MachineOperand *MO = &MI->getOperand(I);
1167 if (MO->isReg() && MO->getReg().isPhysical())
1168 report("Generic instruction cannot have physical register", MO, I);
1169 }
1170
1171 // Avoid out of bounds in checks below. This was already reported earlier.
1172 if (MI->getNumOperands() < MCID.getNumOperands())
1173 return;
1174
1176 if (!TII->verifyInstruction(*MI, ErrorInfo))
1177 report(ErrorInfo.data(), MI);
1178
1179 // Verify properties of various specific instruction types
1180 unsigned Opc = MI->getOpcode();
1181 switch (Opc) {
1182 case TargetOpcode::G_ASSERT_SEXT:
1183 case TargetOpcode::G_ASSERT_ZEXT: {
1184 std::string OpcName =
1185 Opc == TargetOpcode::G_ASSERT_ZEXT ? "G_ASSERT_ZEXT" : "G_ASSERT_SEXT";
1186 if (!MI->getOperand(2).isImm()) {
1187 report(Twine(OpcName, " expects an immediate operand #2"), MI);
1188 break;
1189 }
1190
1191 Register Dst = MI->getOperand(0).getReg();
1192 Register Src = MI->getOperand(1).getReg();
1193 LLT SrcTy = MRI->getType(Src);
1194 int64_t Imm = MI->getOperand(2).getImm();
1195 if (Imm <= 0) {
1196 report(Twine(OpcName, " size must be >= 1"), MI);
1197 break;
1198 }
1199
1200 if (Imm >= SrcTy.getScalarSizeInBits()) {
1201 report(Twine(OpcName, " size must be less than source bit width"), MI);
1202 break;
1203 }
1204
1205 const RegisterBank *SrcRB = RBI->getRegBank(Src, *MRI, *TRI);
1206 const RegisterBank *DstRB = RBI->getRegBank(Dst, *MRI, *TRI);
1207
1208 // Allow only the source bank to be set.
1209 if ((SrcRB && DstRB && SrcRB != DstRB) || (DstRB && !SrcRB)) {
1210 report(Twine(OpcName, " cannot change register bank"), MI);
1211 break;
1212 }
1213
1214 // Don't allow a class change. Do allow member class->regbank.
1215 const TargetRegisterClass *DstRC = MRI->getRegClassOrNull(Dst);
1216 if (DstRC && DstRC != MRI->getRegClassOrNull(Src)) {
1217 report(
1218 Twine(OpcName, " source and destination register classes must match"),
1219 MI);
1220 break;
1221 }
1222
1223 break;
1224 }
1225
1226 case TargetOpcode::G_CONSTANT:
1227 case TargetOpcode::G_FCONSTANT: {
1228 LLT DstTy = MRI->getType(MI->getOperand(0).getReg());
1229 if (DstTy.isVector())
1230 report("Instruction cannot use a vector result type", MI);
1231
1232 if (MI->getOpcode() == TargetOpcode::G_CONSTANT) {
1233 if (!MI->getOperand(1).isCImm()) {
1234 report("G_CONSTANT operand must be cimm", MI);
1235 break;
1236 }
1237
1238 const ConstantInt *CI = MI->getOperand(1).getCImm();
1239 if (CI->getBitWidth() != DstTy.getSizeInBits())
1240 report("inconsistent constant size", MI);
1241 } else {
1242 if (!MI->getOperand(1).isFPImm()) {
1243 report("G_FCONSTANT operand must be fpimm", MI);
1244 break;
1245 }
1246 const ConstantFP *CF = MI->getOperand(1).getFPImm();
1247
1249 DstTy.getSizeInBits()) {
1250 report("inconsistent constant size", MI);
1251 }
1252 }
1253
1254 break;
1255 }
1256 case TargetOpcode::G_LOAD:
1257 case TargetOpcode::G_STORE:
1258 case TargetOpcode::G_ZEXTLOAD:
1259 case TargetOpcode::G_SEXTLOAD: {
1260 LLT ValTy = MRI->getType(MI->getOperand(0).getReg());
1261 LLT PtrTy = MRI->getType(MI->getOperand(1).getReg());
1262 if (!PtrTy.isPointer())
1263 report("Generic memory instruction must access a pointer", MI);
1264
1265 // Generic loads and stores must have a single MachineMemOperand
1266 // describing that access.
1267 if (!MI->hasOneMemOperand()) {
1268 report("Generic instruction accessing memory must have one mem operand",
1269 MI);
1270 } else {
1271 const MachineMemOperand &MMO = **MI->memoperands_begin();
1272 if (MI->getOpcode() == TargetOpcode::G_ZEXTLOAD ||
1273 MI->getOpcode() == TargetOpcode::G_SEXTLOAD) {
1275 ValTy.getSizeInBits()))
1276 report("Generic extload must have a narrower memory type", MI);
1277 } else if (MI->getOpcode() == TargetOpcode::G_LOAD) {
1279 ValTy.getSizeInBytes()))
1280 report("load memory size cannot exceed result size", MI);
1281 } else if (MI->getOpcode() == TargetOpcode::G_STORE) {
1283 MMO.getSize().getValue()))
1284 report("store memory size cannot exceed value size", MI);
1285 }
1286
1287 const AtomicOrdering Order = MMO.getSuccessOrdering();
1288 if (Opc == TargetOpcode::G_STORE) {
1289 if (Order == AtomicOrdering::Acquire ||
1291 report("atomic store cannot use acquire ordering", MI);
1292
1293 } else {
1294 if (Order == AtomicOrdering::Release ||
1296 report("atomic load cannot use release ordering", MI);
1297 }
1298 }
1299
1300 break;
1301 }
1302 case TargetOpcode::G_PHI: {
1303 LLT DstTy = MRI->getType(MI->getOperand(0).getReg());
1304 if (!DstTy.isValid() || !all_of(drop_begin(MI->operands()),
1305 [this, &DstTy](const MachineOperand &MO) {
1306 if (!MO.isReg())
1307 return true;
1308 LLT Ty = MRI->getType(MO.getReg());
1309 if (!Ty.isValid() || (Ty != DstTy))
1310 return false;
1311 return true;
1312 }))
1313 report("Generic Instruction G_PHI has operands with incompatible/missing "
1314 "types",
1315 MI);
1316 break;
1317 }
1318 case TargetOpcode::G_BITCAST: {
1319 LLT DstTy = MRI->getType(MI->getOperand(0).getReg());
1320 LLT SrcTy = MRI->getType(MI->getOperand(1).getReg());
1321 if (!DstTy.isValid() || !SrcTy.isValid())
1322 break;
1323
1324 if (SrcTy.isPointer() != DstTy.isPointer())
1325 report("bitcast cannot convert between pointers and other types", MI);
1326
1327 if (SrcTy.getSizeInBits() != DstTy.getSizeInBits())
1328 report("bitcast sizes must match", MI);
1329
1330 if (SrcTy == DstTy)
1331 report("bitcast must change the type", MI);
1332
1333 break;
1334 }
1335 case TargetOpcode::G_INTTOPTR:
1336 case TargetOpcode::G_PTRTOINT:
1337 case TargetOpcode::G_ADDRSPACE_CAST: {
1338 LLT DstTy = MRI->getType(MI->getOperand(0).getReg());
1339 LLT SrcTy = MRI->getType(MI->getOperand(1).getReg());
1340 if (!DstTy.isValid() || !SrcTy.isValid())
1341 break;
1342
1343 verifyVectorElementMatch(DstTy, SrcTy, MI);
1344
1345 DstTy = DstTy.getScalarType();
1346 SrcTy = SrcTy.getScalarType();
1347
1348 if (MI->getOpcode() == TargetOpcode::G_INTTOPTR) {
1349 if (!DstTy.isPointer())
1350 report("inttoptr result type must be a pointer", MI);
1351 if (SrcTy.isPointer())
1352 report("inttoptr source type must not be a pointer", MI);
1353 } else if (MI->getOpcode() == TargetOpcode::G_PTRTOINT) {
1354 if (!SrcTy.isPointer())
1355 report("ptrtoint source type must be a pointer", MI);
1356 if (DstTy.isPointer())
1357 report("ptrtoint result type must not be a pointer", MI);
1358 } else {
1359 assert(MI->getOpcode() == TargetOpcode::G_ADDRSPACE_CAST);
1360 if (!SrcTy.isPointer() || !DstTy.isPointer())
1361 report("addrspacecast types must be pointers", MI);
1362 else {
1363 if (SrcTy.getAddressSpace() == DstTy.getAddressSpace())
1364 report("addrspacecast must convert different address spaces", MI);
1365 }
1366 }
1367
1368 break;
1369 }
1370 case TargetOpcode::G_PTR_ADD: {
1371 LLT DstTy = MRI->getType(MI->getOperand(0).getReg());
1372 LLT PtrTy = MRI->getType(MI->getOperand(1).getReg());
1373 LLT OffsetTy = MRI->getType(MI->getOperand(2).getReg());
1374 if (!DstTy.isValid() || !PtrTy.isValid() || !OffsetTy.isValid())
1375 break;
1376
1377 if (!PtrTy.isPointerOrPointerVector())
1378 report("gep first operand must be a pointer", MI);
1379
1380 if (OffsetTy.isPointerOrPointerVector())
1381 report("gep offset operand must not be a pointer", MI);
1382
1383 if (PtrTy.isPointerOrPointerVector()) {
1384 const DataLayout &DL = MF->getDataLayout();
1385 unsigned AS = PtrTy.getAddressSpace();
1386 unsigned IndexSizeInBits = DL.getIndexSize(AS) * 8;
1387 if (OffsetTy.getScalarSizeInBits() != IndexSizeInBits) {
1388 report("gep offset operand must match index size for address space",
1389 MI);
1390 }
1391 }
1392
1393 // TODO: Is the offset allowed to be a scalar with a vector?
1394 break;
1395 }
1396 case TargetOpcode::G_PTRMASK: {
1397 LLT DstTy = MRI->getType(MI->getOperand(0).getReg());
1398 LLT SrcTy = MRI->getType(MI->getOperand(1).getReg());
1399 LLT MaskTy = MRI->getType(MI->getOperand(2).getReg());
1400 if (!DstTy.isValid() || !SrcTy.isValid() || !MaskTy.isValid())
1401 break;
1402
1403 if (!DstTy.isPointerOrPointerVector())
1404 report("ptrmask result type must be a pointer", MI);
1405
1406 if (!MaskTy.getScalarType().isScalar())
1407 report("ptrmask mask type must be an integer", MI);
1408
1409 verifyVectorElementMatch(DstTy, MaskTy, MI);
1410 break;
1411 }
1412 case TargetOpcode::G_SEXT:
1413 case TargetOpcode::G_ZEXT:
1414 case TargetOpcode::G_ANYEXT:
1415 case TargetOpcode::G_TRUNC:
1416 case TargetOpcode::G_FPEXT:
1417 case TargetOpcode::G_FPTRUNC: {
1418 // Number of operands and presense of types is already checked (and
1419 // reported in case of any issues), so no need to report them again. As
1420 // we're trying to report as many issues as possible at once, however, the
1421 // instructions aren't guaranteed to have the right number of operands or
1422 // types attached to them at this point
1423 assert(MCID.getNumOperands() == 2 && "Expected 2 operands G_*{EXT,TRUNC}");
1424 LLT DstTy = MRI->getType(MI->getOperand(0).getReg());
1425 LLT SrcTy = MRI->getType(MI->getOperand(1).getReg());
1426 if (!DstTy.isValid() || !SrcTy.isValid())
1427 break;
1428
1430 report("Generic extend/truncate can not operate on pointers", MI);
1431
1432 verifyVectorElementMatch(DstTy, SrcTy, MI);
1433
1434 unsigned DstSize = DstTy.getScalarSizeInBits();
1435 unsigned SrcSize = SrcTy.getScalarSizeInBits();
1436 switch (MI->getOpcode()) {
1437 default:
1438 if (DstSize <= SrcSize)
1439 report("Generic extend has destination type no larger than source", MI);
1440 break;
1441 case TargetOpcode::G_TRUNC:
1442 case TargetOpcode::G_FPTRUNC:
1443 if (DstSize >= SrcSize)
1444 report("Generic truncate has destination type no smaller than source",
1445 MI);
1446 break;
1447 }
1448 break;
1449 }
1450 case TargetOpcode::G_SELECT: {
1451 LLT SelTy = MRI->getType(MI->getOperand(0).getReg());
1452 LLT CondTy = MRI->getType(MI->getOperand(1).getReg());
1453 if (!SelTy.isValid() || !CondTy.isValid())
1454 break;
1455
1456 // Scalar condition select on a vector is valid.
1457 if (CondTy.isVector())
1458 verifyVectorElementMatch(SelTy, CondTy, MI);
1459 break;
1460 }
1461 case TargetOpcode::G_MERGE_VALUES: {
1462 // G_MERGE_VALUES should only be used to merge scalars into a larger scalar,
1463 // e.g. s2N = MERGE sN, sN
1464 // Merging multiple scalars into a vector is not allowed, should use
1465 // G_BUILD_VECTOR for that.
1466 LLT DstTy = MRI->getType(MI->getOperand(0).getReg());
1467 LLT SrcTy = MRI->getType(MI->getOperand(1).getReg());
1468 if (DstTy.isVector() || SrcTy.isVector())
1469 report("G_MERGE_VALUES cannot operate on vectors", MI);
1470
1471 const unsigned NumOps = MI->getNumOperands();
1472 if (DstTy.getSizeInBits() != SrcTy.getSizeInBits() * (NumOps - 1))
1473 report("G_MERGE_VALUES result size is inconsistent", MI);
1474
1475 for (unsigned I = 2; I != NumOps; ++I) {
1476 if (MRI->getType(MI->getOperand(I).getReg()) != SrcTy)
1477 report("G_MERGE_VALUES source types do not match", MI);
1478 }
1479
1480 break;
1481 }
1482 case TargetOpcode::G_UNMERGE_VALUES: {
1483 unsigned NumDsts = MI->getNumOperands() - 1;
1484 LLT DstTy = MRI->getType(MI->getOperand(0).getReg());
1485 for (unsigned i = 1; i < NumDsts; ++i) {
1486 if (MRI->getType(MI->getOperand(i).getReg()) != DstTy) {
1487 report("G_UNMERGE_VALUES destination types do not match", MI);
1488 break;
1489 }
1490 }
1491
1492 LLT SrcTy = MRI->getType(MI->getOperand(NumDsts).getReg());
1493 if (DstTy.isVector()) {
1494 // This case is the converse of G_CONCAT_VECTORS.
1495 if (!SrcTy.isVector() ||
1496 (SrcTy.getScalarType() != DstTy.getScalarType() &&
1497 !SrcTy.isPointerVector()) ||
1498 SrcTy.isScalableVector() != DstTy.isScalableVector() ||
1499 SrcTy.getSizeInBits() != NumDsts * DstTy.getSizeInBits())
1500 report("G_UNMERGE_VALUES source operand does not match vector "
1501 "destination operands",
1502 MI);
1503 } else if (SrcTy.isVector()) {
1504 // This case is the converse of G_BUILD_VECTOR, but relaxed to allow
1505 // mismatched types as long as the total size matches:
1506 // %0:_(s64), %1:_(s64) = G_UNMERGE_VALUES %2:_(<4 x s32>)
1507 if (SrcTy.getSizeInBits() != NumDsts * DstTy.getSizeInBits())
1508 report("G_UNMERGE_VALUES vector source operand does not match scalar "
1509 "destination operands",
1510 MI);
1511 } else {
1512 // This case is the converse of G_MERGE_VALUES.
1513 if (SrcTy.getSizeInBits() != NumDsts * DstTy.getSizeInBits()) {
1514 report("G_UNMERGE_VALUES scalar source operand does not match scalar "
1515 "destination operands",
1516 MI);
1517 }
1518 }
1519 break;
1520 }
1521 case TargetOpcode::G_BUILD_VECTOR: {
1522 // Source types must be scalars, dest type a vector. Total size of scalars
1523 // must match the dest vector size.
1524 LLT DstTy = MRI->getType(MI->getOperand(0).getReg());
1525 LLT SrcEltTy = MRI->getType(MI->getOperand(1).getReg());
1526 if (!DstTy.isVector() || SrcEltTy.isVector()) {
1527 report("G_BUILD_VECTOR must produce a vector from scalar operands", MI);
1528 break;
1529 }
1530
1531 if (DstTy.getElementType() != SrcEltTy)
1532 report("G_BUILD_VECTOR result element type must match source type", MI);
1533
1534 if (DstTy.getNumElements() != MI->getNumOperands() - 1)
1535 report("G_BUILD_VECTOR must have an operand for each elemement", MI);
1536
1537 for (const MachineOperand &MO : llvm::drop_begin(MI->operands(), 2))
1538 if (MRI->getType(MI->getOperand(1).getReg()) != MRI->getType(MO.getReg()))
1539 report("G_BUILD_VECTOR source operand types are not homogeneous", MI);
1540
1541 break;
1542 }
1543 case TargetOpcode::G_BUILD_VECTOR_TRUNC: {
1544 // Source types must be scalars, dest type a vector. Scalar types must be
1545 // larger than the dest vector elt type, as this is a truncating operation.
1546 LLT DstTy = MRI->getType(MI->getOperand(0).getReg());
1547 LLT SrcEltTy = MRI->getType(MI->getOperand(1).getReg());
1548 if (!DstTy.isVector() || SrcEltTy.isVector())
1549 report("G_BUILD_VECTOR_TRUNC must produce a vector from scalar operands",
1550 MI);
1551 for (const MachineOperand &MO : llvm::drop_begin(MI->operands(), 2))
1552 if (MRI->getType(MI->getOperand(1).getReg()) != MRI->getType(MO.getReg()))
1553 report("G_BUILD_VECTOR_TRUNC source operand types are not homogeneous",
1554 MI);
1555 if (SrcEltTy.getSizeInBits() <= DstTy.getElementType().getSizeInBits())
1556 report("G_BUILD_VECTOR_TRUNC source operand types are not larger than "
1557 "dest elt type",
1558 MI);
1559 break;
1560 }
1561 case TargetOpcode::G_CONCAT_VECTORS: {
1562 // Source types should be vectors, and total size should match the dest
1563 // vector size.
1564 LLT DstTy = MRI->getType(MI->getOperand(0).getReg());
1565 LLT SrcTy = MRI->getType(MI->getOperand(1).getReg());
1566 if (!DstTy.isVector() || !SrcTy.isVector())
1567 report("G_CONCAT_VECTOR requires vector source and destination operands",
1568 MI);
1569
1570 if (MI->getNumOperands() < 3)
1571 report("G_CONCAT_VECTOR requires at least 2 source operands", MI);
1572
1573 for (const MachineOperand &MO : llvm::drop_begin(MI->operands(), 2))
1574 if (MRI->getType(MI->getOperand(1).getReg()) != MRI->getType(MO.getReg()))
1575 report("G_CONCAT_VECTOR source operand types are not homogeneous", MI);
1576 if (DstTy.getElementCount() !=
1577 SrcTy.getElementCount() * (MI->getNumOperands() - 1))
1578 report("G_CONCAT_VECTOR num dest and source elements should match", MI);
1579 break;
1580 }
1581 case TargetOpcode::G_ICMP:
1582 case TargetOpcode::G_FCMP: {
1583 LLT DstTy = MRI->getType(MI->getOperand(0).getReg());
1584 LLT SrcTy = MRI->getType(MI->getOperand(2).getReg());
1585
1586 if ((DstTy.isVector() != SrcTy.isVector()) ||
1587 (DstTy.isVector() &&
1588 DstTy.getElementCount() != SrcTy.getElementCount()))
1589 report("Generic vector icmp/fcmp must preserve number of lanes", MI);
1590
1591 break;
1592 }
1593 case TargetOpcode::G_SCMP:
1594 case TargetOpcode::G_UCMP: {
1595 LLT DstTy = MRI->getType(MI->getOperand(0).getReg());
1596 LLT SrcTy = MRI->getType(MI->getOperand(1).getReg());
1597
1598 if (SrcTy.isPointerOrPointerVector()) {
1599 report("Generic scmp/ucmp does not support pointers as operands", MI);
1600 break;
1601 }
1602
1603 if (DstTy.isPointerOrPointerVector()) {
1604 report("Generic scmp/ucmp does not support pointers as a result", MI);
1605 break;
1606 }
1607
1608 if (DstTy.getScalarSizeInBits() < 2) {
1609 report("Result type must be at least 2 bits wide", MI);
1610 break;
1611 }
1612
1613 if ((DstTy.isVector() != SrcTy.isVector()) ||
1614 (DstTy.isVector() &&
1615 DstTy.getElementCount() != SrcTy.getElementCount())) {
1616 report("Generic vector scmp/ucmp must preserve number of lanes", MI);
1617 break;
1618 }
1619
1620 break;
1621 }
1622 case TargetOpcode::G_EXTRACT: {
1623 const MachineOperand &SrcOp = MI->getOperand(1);
1624 if (!SrcOp.isReg()) {
1625 report("extract source must be a register", MI);
1626 break;
1627 }
1628
1629 const MachineOperand &OffsetOp = MI->getOperand(2);
1630 if (!OffsetOp.isImm()) {
1631 report("extract offset must be a constant", MI);
1632 break;
1633 }
1634
1635 unsigned DstSize = MRI->getType(MI->getOperand(0).getReg()).getSizeInBits();
1636 unsigned SrcSize = MRI->getType(SrcOp.getReg()).getSizeInBits();
1637 if (SrcSize == DstSize)
1638 report("extract source must be larger than result", MI);
1639
1640 if (DstSize + OffsetOp.getImm() > SrcSize)
1641 report("extract reads past end of register", MI);
1642 break;
1643 }
1644 case TargetOpcode::G_INSERT: {
1645 const MachineOperand &SrcOp = MI->getOperand(2);
1646 if (!SrcOp.isReg()) {
1647 report("insert source must be a register", MI);
1648 break;
1649 }
1650
1651 const MachineOperand &OffsetOp = MI->getOperand(3);
1652 if (!OffsetOp.isImm()) {
1653 report("insert offset must be a constant", MI);
1654 break;
1655 }
1656
1657 unsigned DstSize = MRI->getType(MI->getOperand(0).getReg()).getSizeInBits();
1658 unsigned SrcSize = MRI->getType(SrcOp.getReg()).getSizeInBits();
1659
1660 if (DstSize <= SrcSize)
1661 report("inserted size must be smaller than total register", MI);
1662
1663 if (SrcSize + OffsetOp.getImm() > DstSize)
1664 report("insert writes past end of register", MI);
1665
1666 break;
1667 }
1668 case TargetOpcode::G_JUMP_TABLE: {
1669 if (!MI->getOperand(1).isJTI())
1670 report("G_JUMP_TABLE source operand must be a jump table index", MI);
1671 LLT DstTy = MRI->getType(MI->getOperand(0).getReg());
1672 if (!DstTy.isPointer())
1673 report("G_JUMP_TABLE dest operand must have a pointer type", MI);
1674 break;
1675 }
1676 case TargetOpcode::G_BRJT: {
1677 if (!MRI->getType(MI->getOperand(0).getReg()).isPointer())
1678 report("G_BRJT src operand 0 must be a pointer type", MI);
1679
1680 if (!MI->getOperand(1).isJTI())
1681 report("G_BRJT src operand 1 must be a jump table index", MI);
1682
1683 const auto &IdxOp = MI->getOperand(2);
1684 if (!IdxOp.isReg() || MRI->getType(IdxOp.getReg()).isPointer())
1685 report("G_BRJT src operand 2 must be a scalar reg type", MI);
1686 break;
1687 }
1688 case TargetOpcode::G_INTRINSIC:
1689 case TargetOpcode::G_INTRINSIC_W_SIDE_EFFECTS:
1690 case TargetOpcode::G_INTRINSIC_CONVERGENT:
1691 case TargetOpcode::G_INTRINSIC_CONVERGENT_W_SIDE_EFFECTS: {
1692 // TODO: Should verify number of def and use operands, but the current
1693 // interface requires passing in IR types for mangling.
1694 const MachineOperand &IntrIDOp = MI->getOperand(MI->getNumExplicitDefs());
1695 if (!IntrIDOp.isIntrinsicID()) {
1696 report("G_INTRINSIC first src operand must be an intrinsic ID", MI);
1697 break;
1698 }
1699
1700 if (!verifyGIntrinsicSideEffects(MI))
1701 break;
1702 if (!verifyGIntrinsicConvergence(MI))
1703 break;
1704
1705 break;
1706 }
1707 case TargetOpcode::G_SEXT_INREG: {
1708 if (!MI->getOperand(2).isImm()) {
1709 report("G_SEXT_INREG expects an immediate operand #2", MI);
1710 break;
1711 }
1712
1713 LLT SrcTy = MRI->getType(MI->getOperand(1).getReg());
1714 int64_t Imm = MI->getOperand(2).getImm();
1715 if (Imm <= 0)
1716 report("G_SEXT_INREG size must be >= 1", MI);
1717 if (Imm >= SrcTy.getScalarSizeInBits())
1718 report("G_SEXT_INREG size must be less than source bit width", MI);
1719 break;
1720 }
1721 case TargetOpcode::G_BSWAP: {
1722 LLT DstTy = MRI->getType(MI->getOperand(0).getReg());
1723 if (DstTy.getScalarSizeInBits() % 16 != 0)
1724 report("G_BSWAP size must be a multiple of 16 bits", MI);
1725 break;
1726 }
1727 case TargetOpcode::G_VSCALE: {
1728 if (!MI->getOperand(1).isCImm()) {
1729 report("G_VSCALE operand must be cimm", MI);
1730 break;
1731 }
1732 if (MI->getOperand(1).getCImm()->isZero()) {
1733 report("G_VSCALE immediate cannot be zero", MI);
1734 break;
1735 }
1736 break;
1737 }
1738 case TargetOpcode::G_STEP_VECTOR: {
1739 if (!MI->getOperand(1).isCImm()) {
1740 report("operand must be cimm", MI);
1741 break;
1742 }
1743
1744 if (!MI->getOperand(1).getCImm()->getValue().isStrictlyPositive()) {
1745 report("step must be > 0", MI);
1746 break;
1747 }
1748
1749 LLT DstTy = MRI->getType(MI->getOperand(0).getReg());
1750 if (!DstTy.isScalableVector()) {
1751 report("Destination type must be a scalable vector", MI);
1752 break;
1753 }
1754
1755 // <vscale x 2 x p0>
1756 if (!DstTy.getElementType().isScalar()) {
1757 report("Destination element type must be scalar", MI);
1758 break;
1759 }
1760
1761 if (MI->getOperand(1).getCImm()->getBitWidth() !=
1763 report("step bitwidth differs from result type element bitwidth", MI);
1764 break;
1765 }
1766 break;
1767 }
1768 case TargetOpcode::G_INSERT_SUBVECTOR: {
1769 const MachineOperand &Src0Op = MI->getOperand(1);
1770 if (!Src0Op.isReg()) {
1771 report("G_INSERT_SUBVECTOR first source must be a register", MI);
1772 break;
1773 }
1774
1775 const MachineOperand &Src1Op = MI->getOperand(2);
1776 if (!Src1Op.isReg()) {
1777 report("G_INSERT_SUBVECTOR second source must be a register", MI);
1778 break;
1779 }
1780
1781 const MachineOperand &IndexOp = MI->getOperand(3);
1782 if (!IndexOp.isImm()) {
1783 report("G_INSERT_SUBVECTOR index must be an immediate", MI);
1784 break;
1785 }
1786
1787 LLT DstTy = MRI->getType(MI->getOperand(0).getReg());
1788 LLT Src1Ty = MRI->getType(Src1Op.getReg());
1789
1790 if (!DstTy.isVector()) {
1791 report("Destination type must be a vector", MI);
1792 break;
1793 }
1794
1795 if (!Src1Ty.isVector()) {
1796 report("Second source must be a vector", MI);
1797 break;
1798 }
1799
1800 if (DstTy.getElementType() != Src1Ty.getElementType()) {
1801 report("Element type of vectors must be the same", MI);
1802 break;
1803 }
1804
1805 if (Src1Ty.isScalable() != DstTy.isScalable()) {
1806 report("Vector types must both be fixed or both be scalable", MI);
1807 break;
1808 }
1809
1811 DstTy.getElementCount())) {
1812 report("Second source must be smaller than destination vector", MI);
1813 break;
1814 }
1815
1816 uint64_t Idx = IndexOp.getImm();
1817 uint64_t Src1MinLen = Src1Ty.getElementCount().getKnownMinValue();
1818 if (IndexOp.getImm() % Src1MinLen != 0) {
1819 report("Index must be a multiple of the second source vector's "
1820 "minimum vector length",
1821 MI);
1822 break;
1823 }
1824
1825 uint64_t DstMinLen = DstTy.getElementCount().getKnownMinValue();
1826 if (Idx >= DstMinLen || Idx + Src1MinLen > DstMinLen) {
1827 report("Subvector type and index must not cause insert to overrun the "
1828 "vector being inserted into",
1829 MI);
1830 break;
1831 }
1832
1833 break;
1834 }
1835 case TargetOpcode::G_EXTRACT_SUBVECTOR: {
1836 const MachineOperand &SrcOp = MI->getOperand(1);
1837 if (!SrcOp.isReg()) {
1838 report("G_EXTRACT_SUBVECTOR first source must be a register", MI);
1839 break;
1840 }
1841
1842 const MachineOperand &IndexOp = MI->getOperand(2);
1843 if (!IndexOp.isImm()) {
1844 report("G_EXTRACT_SUBVECTOR index must be an immediate", MI);
1845 break;
1846 }
1847
1848 LLT DstTy = MRI->getType(MI->getOperand(0).getReg());
1849 LLT SrcTy = MRI->getType(SrcOp.getReg());
1850
1851 if (!DstTy.isVector()) {
1852 report("Destination type must be a vector", MI);
1853 break;
1854 }
1855
1856 if (!SrcTy.isVector()) {
1857 report("Source must be a vector", MI);
1858 break;
1859 }
1860
1861 if (DstTy.getElementType() != SrcTy.getElementType()) {
1862 report("Element type of vectors must be the same", MI);
1863 break;
1864 }
1865
1866 if (SrcTy.isScalable() != DstTy.isScalable()) {
1867 report("Vector types must both be fixed or both be scalable", MI);
1868 break;
1869 }
1870
1872 SrcTy.getElementCount())) {
1873 report("Destination vector must be smaller than source vector", MI);
1874 break;
1875 }
1876
1877 uint64_t Idx = IndexOp.getImm();
1878 uint64_t DstMinLen = DstTy.getElementCount().getKnownMinValue();
1879 if (Idx % DstMinLen != 0) {
1880 report("Index must be a multiple of the destination vector's minimum "
1881 "vector length",
1882 MI);
1883 break;
1884 }
1885
1886 uint64_t SrcMinLen = SrcTy.getElementCount().getKnownMinValue();
1887 if (Idx >= SrcMinLen || Idx + DstMinLen > SrcMinLen) {
1888 report("Destination type and index must not cause extract to overrun the "
1889 "source vector",
1890 MI);
1891 break;
1892 }
1893
1894 break;
1895 }
1896 case TargetOpcode::G_SHUFFLE_VECTOR: {
1897 const MachineOperand &MaskOp = MI->getOperand(3);
1898 if (!MaskOp.isShuffleMask()) {
1899 report("Incorrect mask operand type for G_SHUFFLE_VECTOR", MI);
1900 break;
1901 }
1902
1903 LLT DstTy = MRI->getType(MI->getOperand(0).getReg());
1904 LLT Src0Ty = MRI->getType(MI->getOperand(1).getReg());
1905 LLT Src1Ty = MRI->getType(MI->getOperand(2).getReg());
1906
1907 if (Src0Ty != Src1Ty)
1908 report("Source operands must be the same type", MI);
1909
1910 if (Src0Ty.getScalarType() != DstTy.getScalarType())
1911 report("G_SHUFFLE_VECTOR cannot change element type", MI);
1912
1913 // Don't check that all operands are vector because scalars are used in
1914 // place of 1 element vectors.
1915 int SrcNumElts = Src0Ty.isVector() ? Src0Ty.getNumElements() : 1;
1916 int DstNumElts = DstTy.isVector() ? DstTy.getNumElements() : 1;
1917
1918 ArrayRef<int> MaskIdxes = MaskOp.getShuffleMask();
1919
1920 if (static_cast<int>(MaskIdxes.size()) != DstNumElts)
1921 report("Wrong result type for shufflemask", MI);
1922
1923 for (int Idx : MaskIdxes) {
1924 if (Idx < 0)
1925 continue;
1926
1927 if (Idx >= 2 * SrcNumElts)
1928 report("Out of bounds shuffle index", MI);
1929 }
1930
1931 break;
1932 }
1933
1934 case TargetOpcode::G_SPLAT_VECTOR: {
1935 LLT DstTy = MRI->getType(MI->getOperand(0).getReg());
1936 LLT SrcTy = MRI->getType(MI->getOperand(1).getReg());
1937
1938 if (!DstTy.isScalableVector()) {
1939 report("Destination type must be a scalable vector", MI);
1940 break;
1941 }
1942
1943 if (!SrcTy.isScalar() && !SrcTy.isPointer()) {
1944 report("Source type must be a scalar or pointer", MI);
1945 break;
1946 }
1947
1949 SrcTy.getSizeInBits())) {
1950 report("Element type of the destination must be the same size or smaller "
1951 "than the source type",
1952 MI);
1953 break;
1954 }
1955
1956 break;
1957 }
1958 case TargetOpcode::G_EXTRACT_VECTOR_ELT: {
1959 LLT DstTy = MRI->getType(MI->getOperand(0).getReg());
1960 LLT SrcTy = MRI->getType(MI->getOperand(1).getReg());
1961 LLT IdxTy = MRI->getType(MI->getOperand(2).getReg());
1962
1963 if (!DstTy.isScalar() && !DstTy.isPointer()) {
1964 report("Destination type must be a scalar or pointer", MI);
1965 break;
1966 }
1967
1968 if (!SrcTy.isVector()) {
1969 report("First source must be a vector", MI);
1970 break;
1971 }
1972
1973 auto TLI = MF->getSubtarget().getTargetLowering();
1974 if (IdxTy.getSizeInBits() !=
1975 TLI->getVectorIdxTy(MF->getDataLayout()).getFixedSizeInBits()) {
1976 report("Index type must match VectorIdxTy", MI);
1977 break;
1978 }
1979
1980 break;
1981 }
1982 case TargetOpcode::G_INSERT_VECTOR_ELT: {
1983 LLT DstTy = MRI->getType(MI->getOperand(0).getReg());
1984 LLT VecTy = MRI->getType(MI->getOperand(1).getReg());
1985 LLT ScaTy = MRI->getType(MI->getOperand(2).getReg());
1986 LLT IdxTy = MRI->getType(MI->getOperand(3).getReg());
1987
1988 if (!DstTy.isVector()) {
1989 report("Destination type must be a vector", MI);
1990 break;
1991 }
1992
1993 if (VecTy != DstTy) {
1994 report("Destination type and vector type must match", MI);
1995 break;
1996 }
1997
1998 if (!ScaTy.isScalar() && !ScaTy.isPointer()) {
1999 report("Inserted element must be a scalar or pointer", MI);
2000 break;
2001 }
2002
2003 auto TLI = MF->getSubtarget().getTargetLowering();
2004 if (IdxTy.getSizeInBits() !=
2005 TLI->getVectorIdxTy(MF->getDataLayout()).getFixedSizeInBits()) {
2006 report("Index type must match VectorIdxTy", MI);
2007 break;
2008 }
2009
2010 break;
2011 }
2012 case TargetOpcode::G_DYN_STACKALLOC: {
2013 const MachineOperand &DstOp = MI->getOperand(0);
2014 const MachineOperand &AllocOp = MI->getOperand(1);
2015 const MachineOperand &AlignOp = MI->getOperand(2);
2016
2017 if (!DstOp.isReg() || !MRI->getType(DstOp.getReg()).isPointer()) {
2018 report("dst operand 0 must be a pointer type", MI);
2019 break;
2020 }
2021
2022 if (!AllocOp.isReg() || !MRI->getType(AllocOp.getReg()).isScalar()) {
2023 report("src operand 1 must be a scalar reg type", MI);
2024 break;
2025 }
2026
2027 if (!AlignOp.isImm()) {
2028 report("src operand 2 must be an immediate type", MI);
2029 break;
2030 }
2031 break;
2032 }
2033 case TargetOpcode::G_MEMCPY_INLINE:
2034 case TargetOpcode::G_MEMCPY:
2035 case TargetOpcode::G_MEMMOVE: {
2036 ArrayRef<MachineMemOperand *> MMOs = MI->memoperands();
2037 if (MMOs.size() != 2) {
2038 report("memcpy/memmove must have 2 memory operands", MI);
2039 break;
2040 }
2041
2042 if ((!MMOs[0]->isStore() || MMOs[0]->isLoad()) ||
2043 (MMOs[1]->isStore() || !MMOs[1]->isLoad())) {
2044 report("wrong memory operand types", MI);
2045 break;
2046 }
2047
2048 if (MMOs[0]->getSize() != MMOs[1]->getSize())
2049 report("inconsistent memory operand sizes", MI);
2050
2051 LLT DstPtrTy = MRI->getType(MI->getOperand(0).getReg());
2052 LLT SrcPtrTy = MRI->getType(MI->getOperand(1).getReg());
2053
2054 if (!DstPtrTy.isPointer() || !SrcPtrTy.isPointer()) {
2055 report("memory instruction operand must be a pointer", MI);
2056 break;
2057 }
2058
2059 if (DstPtrTy.getAddressSpace() != MMOs[0]->getAddrSpace())
2060 report("inconsistent store address space", MI);
2061 if (SrcPtrTy.getAddressSpace() != MMOs[1]->getAddrSpace())
2062 report("inconsistent load address space", MI);
2063
2064 if (Opc != TargetOpcode::G_MEMCPY_INLINE)
2065 if (!MI->getOperand(3).isImm() || (MI->getOperand(3).getImm() & ~1LL))
2066 report("'tail' flag (operand 3) must be an immediate 0 or 1", MI);
2067
2068 break;
2069 }
2070 case TargetOpcode::G_BZERO:
2071 case TargetOpcode::G_MEMSET: {
2072 ArrayRef<MachineMemOperand *> MMOs = MI->memoperands();
2073 std::string Name = Opc == TargetOpcode::G_MEMSET ? "memset" : "bzero";
2074 if (MMOs.size() != 1) {
2075 report(Twine(Name, " must have 1 memory operand"), MI);
2076 break;
2077 }
2078
2079 if ((!MMOs[0]->isStore() || MMOs[0]->isLoad())) {
2080 report(Twine(Name, " memory operand must be a store"), MI);
2081 break;
2082 }
2083
2084 LLT DstPtrTy = MRI->getType(MI->getOperand(0).getReg());
2085 if (!DstPtrTy.isPointer()) {
2086 report(Twine(Name, " operand must be a pointer"), MI);
2087 break;
2088 }
2089
2090 if (DstPtrTy.getAddressSpace() != MMOs[0]->getAddrSpace())
2091 report("inconsistent " + Twine(Name, " address space"), MI);
2092
2093 if (!MI->getOperand(MI->getNumOperands() - 1).isImm() ||
2094 (MI->getOperand(MI->getNumOperands() - 1).getImm() & ~1LL))
2095 report("'tail' flag (last operand) must be an immediate 0 or 1", MI);
2096
2097 break;
2098 }
2099 case TargetOpcode::G_UBSANTRAP: {
2100 const MachineOperand &KindOp = MI->getOperand(0);
2101 if (!MI->getOperand(0).isImm()) {
2102 report("Crash kind must be an immediate", &KindOp, 0);
2103 break;
2104 }
2105 int64_t Kind = MI->getOperand(0).getImm();
2106 if (!isInt<8>(Kind))
2107 report("Crash kind must be 8 bit wide", &KindOp, 0);
2108 break;
2109 }
2110 case TargetOpcode::G_VECREDUCE_SEQ_FADD:
2111 case TargetOpcode::G_VECREDUCE_SEQ_FMUL: {
2112 LLT DstTy = MRI->getType(MI->getOperand(0).getReg());
2113 LLT Src1Ty = MRI->getType(MI->getOperand(1).getReg());
2114 LLT Src2Ty = MRI->getType(MI->getOperand(2).getReg());
2115 if (!DstTy.isScalar())
2116 report("Vector reduction requires a scalar destination type", MI);
2117 if (!Src1Ty.isScalar())
2118 report("Sequential FADD/FMUL vector reduction requires a scalar 1st operand", MI);
2119 if (!Src2Ty.isVector())
2120 report("Sequential FADD/FMUL vector reduction must have a vector 2nd operand", MI);
2121 break;
2122 }
2123 case TargetOpcode::G_VECREDUCE_FADD:
2124 case TargetOpcode::G_VECREDUCE_FMUL:
2125 case TargetOpcode::G_VECREDUCE_FMAX:
2126 case TargetOpcode::G_VECREDUCE_FMIN:
2127 case TargetOpcode::G_VECREDUCE_FMAXIMUM:
2128 case TargetOpcode::G_VECREDUCE_FMINIMUM:
2129 case TargetOpcode::G_VECREDUCE_ADD:
2130 case TargetOpcode::G_VECREDUCE_MUL:
2131 case TargetOpcode::G_VECREDUCE_AND:
2132 case TargetOpcode::G_VECREDUCE_OR:
2133 case TargetOpcode::G_VECREDUCE_XOR:
2134 case TargetOpcode::G_VECREDUCE_SMAX:
2135 case TargetOpcode::G_VECREDUCE_SMIN:
2136 case TargetOpcode::G_VECREDUCE_UMAX:
2137 case TargetOpcode::G_VECREDUCE_UMIN: {
2138 LLT DstTy = MRI->getType(MI->getOperand(0).getReg());
2139 if (!DstTy.isScalar())
2140 report("Vector reduction requires a scalar destination type", MI);
2141 break;
2142 }
2143
2144 case TargetOpcode::G_SBFX:
2145 case TargetOpcode::G_UBFX: {
2146 LLT DstTy = MRI->getType(MI->getOperand(0).getReg());
2147 if (DstTy.isVector()) {
2148 report("Bitfield extraction is not supported on vectors", MI);
2149 break;
2150 }
2151 break;
2152 }
2153 case TargetOpcode::G_SHL:
2154 case TargetOpcode::G_LSHR:
2155 case TargetOpcode::G_ASHR:
2156 case TargetOpcode::G_ROTR:
2157 case TargetOpcode::G_ROTL: {
2158 LLT Src1Ty = MRI->getType(MI->getOperand(1).getReg());
2159 LLT Src2Ty = MRI->getType(MI->getOperand(2).getReg());
2160 if (Src1Ty.isVector() != Src2Ty.isVector()) {
2161 report("Shifts and rotates require operands to be either all scalars or "
2162 "all vectors",
2163 MI);
2164 break;
2165 }
2166 break;
2167 }
2168 case TargetOpcode::G_LLROUND:
2169 case TargetOpcode::G_LROUND: {
2170 LLT DstTy = MRI->getType(MI->getOperand(0).getReg());
2171 LLT SrcTy = MRI->getType(MI->getOperand(1).getReg());
2172 if (!DstTy.isValid() || !SrcTy.isValid())
2173 break;
2174 if (SrcTy.isPointer() || DstTy.isPointer()) {
2175 StringRef Op = SrcTy.isPointer() ? "Source" : "Destination";
2176 report(Twine(Op, " operand must not be a pointer type"), MI);
2177 } else if (SrcTy.isScalar()) {
2178 verifyAllRegOpsScalar(*MI, *MRI);
2179 break;
2180 } else if (SrcTy.isVector()) {
2181 verifyVectorElementMatch(SrcTy, DstTy, MI);
2182 break;
2183 }
2184 break;
2185 }
2186 case TargetOpcode::G_IS_FPCLASS: {
2187 LLT DestTy = MRI->getType(MI->getOperand(0).getReg());
2188 LLT DestEltTy = DestTy.getScalarType();
2189 if (!DestEltTy.isScalar()) {
2190 report("Destination must be a scalar or vector of scalars", MI);
2191 break;
2192 }
2193 LLT SrcTy = MRI->getType(MI->getOperand(1).getReg());
2194 LLT SrcEltTy = SrcTy.getScalarType();
2195 if (!SrcEltTy.isScalar()) {
2196 report("Source must be a scalar or vector of scalars", MI);
2197 break;
2198 }
2199 if (!verifyVectorElementMatch(DestTy, SrcTy, MI))
2200 break;
2201 const MachineOperand &TestMO = MI->getOperand(2);
2202 if (!TestMO.isImm()) {
2203 report("floating-point class set (operand 2) must be an immediate", MI);
2204 break;
2205 }
2206 int64_t Test = TestMO.getImm();
2207 if (Test < 0 || Test > fcAllFlags) {
2208 report("Incorrect floating-point class set (operand 2)", MI);
2209 break;
2210 }
2211 break;
2212 }
2213 case TargetOpcode::G_PREFETCH: {
2214 const MachineOperand &AddrOp = MI->getOperand(0);
2215 if (!AddrOp.isReg() || !MRI->getType(AddrOp.getReg()).isPointer()) {
2216 report("addr operand must be a pointer", &AddrOp, 0);
2217 break;
2218 }
2219 const MachineOperand &RWOp = MI->getOperand(1);
2220 if (!RWOp.isImm() || (uint64_t)RWOp.getImm() >= 2) {
2221 report("rw operand must be an immediate 0-1", &RWOp, 1);
2222 break;
2223 }
2224 const MachineOperand &LocalityOp = MI->getOperand(2);
2225 if (!LocalityOp.isImm() || (uint64_t)LocalityOp.getImm() >= 4) {
2226 report("locality operand must be an immediate 0-3", &LocalityOp, 2);
2227 break;
2228 }
2229 const MachineOperand &CacheTypeOp = MI->getOperand(3);
2230 if (!CacheTypeOp.isImm() || (uint64_t)CacheTypeOp.getImm() >= 2) {
2231 report("cache type operand must be an immediate 0-1", &CacheTypeOp, 3);
2232 break;
2233 }
2234 break;
2235 }
2236 case TargetOpcode::G_ASSERT_ALIGN: {
2237 if (MI->getOperand(2).getImm() < 1)
2238 report("alignment immediate must be >= 1", MI);
2239 break;
2240 }
2241 case TargetOpcode::G_CONSTANT_POOL: {
2242 if (!MI->getOperand(1).isCPI())
2243 report("Src operand 1 must be a constant pool index", MI);
2244 if (!MRI->getType(MI->getOperand(0).getReg()).isPointer())
2245 report("Dst operand 0 must be a pointer", MI);
2246 break;
2247 }
2248 case TargetOpcode::G_PTRAUTH_GLOBAL_VALUE: {
2249 const MachineOperand &AddrOp = MI->getOperand(1);
2250 if (!AddrOp.isReg() || !MRI->getType(AddrOp.getReg()).isPointer())
2251 report("addr operand must be a pointer", &AddrOp, 1);
2252 break;
2253 }
2254 default:
2255 break;
2256 }
2257}
2258
2259void MachineVerifier::visitMachineInstrBefore(const MachineInstr *MI) {
2260 const MCInstrDesc &MCID = MI->getDesc();
2261 if (MI->getNumOperands() < MCID.getNumOperands()) {
2262 report("Too few operands", MI);
2263 OS << MCID.getNumOperands() << " operands expected, but "
2264 << MI->getNumOperands() << " given.\n";
2265 }
2266
2267 if (MI->getFlag(MachineInstr::NoConvergent) && !MCID.isConvergent())
2268 report("NoConvergent flag expected only on convergent instructions.", MI);
2269
2270 if (MI->isPHI()) {
2271 if (MF->getProperties().hasProperty(
2273 report("Found PHI instruction with NoPHIs property set", MI);
2274
2275 if (FirstNonPHI)
2276 report("Found PHI instruction after non-PHI", MI);
2277 } else if (FirstNonPHI == nullptr)
2278 FirstNonPHI = MI;
2279
2280 // Check the tied operands.
2281 if (MI->isInlineAsm())
2282 verifyInlineAsm(MI);
2283
2284 // Check that unspillable terminators define a reg and have at most one use.
2285 if (TII->isUnspillableTerminator(MI)) {
2286 if (!MI->getOperand(0).isReg() || !MI->getOperand(0).isDef())
2287 report("Unspillable Terminator does not define a reg", MI);
2288 Register Def = MI->getOperand(0).getReg();
2289 if (Def.isVirtual() &&
2290 !MF->getProperties().hasProperty(
2292 std::distance(MRI->use_nodbg_begin(Def), MRI->use_nodbg_end()) > 1)
2293 report("Unspillable Terminator expected to have at most one use!", MI);
2294 }
2295
2296 // A fully-formed DBG_VALUE must have a location. Ignore partially formed
2297 // DBG_VALUEs: these are convenient to use in tests, but should never get
2298 // generated.
2299 if (MI->isDebugValue() && MI->getNumOperands() == 4)
2300 if (!MI->getDebugLoc())
2301 report("Missing DebugLoc for debug instruction", MI);
2302
2303 // Meta instructions should never be the subject of debug value tracking,
2304 // they don't create a value in the output program at all.
2305 if (MI->isMetaInstruction() && MI->peekDebugInstrNum())
2306 report("Metadata instruction should not have a value tracking number", MI);
2307
2308 // Check the MachineMemOperands for basic consistency.
2309 for (MachineMemOperand *Op : MI->memoperands()) {
2310 if (Op->isLoad() && !MI->mayLoad())
2311 report("Missing mayLoad flag", MI);
2312 if (Op->isStore() && !MI->mayStore())
2313 report("Missing mayStore flag", MI);
2314 }
2315
2316 // Debug values must not have a slot index.
2317 // Other instructions must have one, unless they are inside a bundle.
2318 if (LiveInts) {
2319 bool mapped = !LiveInts->isNotInMIMap(*MI);
2320 if (MI->isDebugOrPseudoInstr()) {
2321 if (mapped)
2322 report("Debug instruction has a slot index", MI);
2323 } else if (MI->isInsideBundle()) {
2324 if (mapped)
2325 report("Instruction inside bundle has a slot index", MI);
2326 } else {
2327 if (!mapped)
2328 report("Missing slot index", MI);
2329 }
2330 }
2331
2332 unsigned Opc = MCID.getOpcode();
2334 verifyPreISelGenericInstruction(MI);
2335 return;
2336 }
2337
2339 if (!TII->verifyInstruction(*MI, ErrorInfo))
2340 report(ErrorInfo.data(), MI);
2341
2342 // Verify properties of various specific instruction types
2343 switch (MI->getOpcode()) {
2344 case TargetOpcode::COPY: {
2345 const MachineOperand &DstOp = MI->getOperand(0);
2346 const MachineOperand &SrcOp = MI->getOperand(1);
2347 const Register SrcReg = SrcOp.getReg();
2348 const Register DstReg = DstOp.getReg();
2349
2350 LLT DstTy = MRI->getType(DstReg);
2351 LLT SrcTy = MRI->getType(SrcReg);
2352 if (SrcTy.isValid() && DstTy.isValid()) {
2353 // If both types are valid, check that the types are the same.
2354 if (SrcTy != DstTy) {
2355 report("Copy Instruction is illegal with mismatching types", MI);
2356 OS << "Def = " << DstTy << ", Src = " << SrcTy << '\n';
2357 }
2358
2359 break;
2360 }
2361
2362 if (!SrcTy.isValid() && !DstTy.isValid())
2363 break;
2364
2365 // If we have only one valid type, this is likely a copy between a virtual
2366 // and physical register.
2367 TypeSize SrcSize = TRI->getRegSizeInBits(SrcReg, *MRI);
2368 TypeSize DstSize = TRI->getRegSizeInBits(DstReg, *MRI);
2369 if (SrcReg.isPhysical() && DstTy.isValid()) {
2370 const TargetRegisterClass *SrcRC =
2371 TRI->getMinimalPhysRegClassLLT(SrcReg, DstTy);
2372 if (SrcRC)
2373 SrcSize = TRI->getRegSizeInBits(*SrcRC);
2374 }
2375
2376 if (DstReg.isPhysical() && SrcTy.isValid()) {
2377 const TargetRegisterClass *DstRC =
2378 TRI->getMinimalPhysRegClassLLT(DstReg, SrcTy);
2379 if (DstRC)
2380 DstSize = TRI->getRegSizeInBits(*DstRC);
2381 }
2382
2383 // The next two checks allow COPY between physical and virtual registers,
2384 // when the virtual register has a scalable size and the physical register
2385 // has a fixed size. These checks allow COPY between *potentialy* mismatched
2386 // sizes. However, once RegisterBankSelection occurs, MachineVerifier should
2387 // be able to resolve a fixed size for the scalable vector, and at that
2388 // point this function will know for sure whether the sizes are mismatched
2389 // and correctly report a size mismatch.
2390 if (SrcReg.isPhysical() && DstReg.isVirtual() && DstSize.isScalable() &&
2391 !SrcSize.isScalable())
2392 break;
2393 if (SrcReg.isVirtual() && DstReg.isPhysical() && SrcSize.isScalable() &&
2394 !DstSize.isScalable())
2395 break;
2396
2397 if (SrcSize.isNonZero() && DstSize.isNonZero() && SrcSize != DstSize) {
2398 if (!DstOp.getSubReg() && !SrcOp.getSubReg()) {
2399 report("Copy Instruction is illegal with mismatching sizes", MI);
2400 OS << "Def Size = " << DstSize << ", Src Size = " << SrcSize << '\n';
2401 }
2402 }
2403 break;
2404 }
2405 case TargetOpcode::STATEPOINT: {
2406 StatepointOpers SO(MI);
2407 if (!MI->getOperand(SO.getIDPos()).isImm() ||
2408 !MI->getOperand(SO.getNBytesPos()).isImm() ||
2409 !MI->getOperand(SO.getNCallArgsPos()).isImm()) {
2410 report("meta operands to STATEPOINT not constant!", MI);
2411 break;
2412 }
2413
2414 auto VerifyStackMapConstant = [&](unsigned Offset) {
2415 if (Offset >= MI->getNumOperands()) {
2416 report("stack map constant to STATEPOINT is out of range!", MI);
2417 return;
2418 }
2419 if (!MI->getOperand(Offset - 1).isImm() ||
2420 MI->getOperand(Offset - 1).getImm() != StackMaps::ConstantOp ||
2421 !MI->getOperand(Offset).isImm())
2422 report("stack map constant to STATEPOINT not well formed!", MI);
2423 };
2424 VerifyStackMapConstant(SO.getCCIdx());
2425 VerifyStackMapConstant(SO.getFlagsIdx());
2426 VerifyStackMapConstant(SO.getNumDeoptArgsIdx());
2427 VerifyStackMapConstant(SO.getNumGCPtrIdx());
2428 VerifyStackMapConstant(SO.getNumAllocaIdx());
2429 VerifyStackMapConstant(SO.getNumGcMapEntriesIdx());
2430
2431 // Verify that all explicit statepoint defs are tied to gc operands as
2432 // they are expected to be a relocation of gc operands.
2433 unsigned FirstGCPtrIdx = SO.getFirstGCPtrIdx();
2434 unsigned LastGCPtrIdx = SO.getNumAllocaIdx() - 2;
2435 for (unsigned Idx = 0; Idx < MI->getNumDefs(); Idx++) {
2436 unsigned UseOpIdx;
2437 if (!MI->isRegTiedToUseOperand(Idx, &UseOpIdx)) {
2438 report("STATEPOINT defs expected to be tied", MI);
2439 break;
2440 }
2441 if (UseOpIdx < FirstGCPtrIdx || UseOpIdx > LastGCPtrIdx) {
2442 report("STATEPOINT def tied to non-gc operand", MI);
2443 break;
2444 }
2445 }
2446
2447 // TODO: verify we have properly encoded deopt arguments
2448 } break;
2449 case TargetOpcode::INSERT_SUBREG: {
2450 unsigned InsertedSize;
2451 if (unsigned SubIdx = MI->getOperand(2).getSubReg())
2452 InsertedSize = TRI->getSubRegIdxSize(SubIdx);
2453 else
2454 InsertedSize = TRI->getRegSizeInBits(MI->getOperand(2).getReg(), *MRI);
2455 unsigned SubRegSize = TRI->getSubRegIdxSize(MI->getOperand(3).getImm());
2456 if (SubRegSize < InsertedSize) {
2457 report("INSERT_SUBREG expected inserted value to have equal or lesser "
2458 "size than the subreg it was inserted into", MI);
2459 break;
2460 }
2461 } break;
2462 case TargetOpcode::REG_SEQUENCE: {
2463 unsigned NumOps = MI->getNumOperands();
2464 if (!(NumOps & 1)) {
2465 report("Invalid number of operands for REG_SEQUENCE", MI);
2466 break;
2467 }
2468
2469 for (unsigned I = 1; I != NumOps; I += 2) {
2470 const MachineOperand &RegOp = MI->getOperand(I);
2471 const MachineOperand &SubRegOp = MI->getOperand(I + 1);
2472
2473 if (!RegOp.isReg())
2474 report("Invalid register operand for REG_SEQUENCE", &RegOp, I);
2475
2476 if (!SubRegOp.isImm() || SubRegOp.getImm() == 0 ||
2477 SubRegOp.getImm() >= TRI->getNumSubRegIndices()) {
2478 report("Invalid subregister index operand for REG_SEQUENCE",
2479 &SubRegOp, I + 1);
2480 }
2481 }
2482
2483 Register DstReg = MI->getOperand(0).getReg();
2484 if (DstReg.isPhysical())
2485 report("REG_SEQUENCE does not support physical register results", MI);
2486
2487 if (MI->getOperand(0).getSubReg())
2488 report("Invalid subreg result for REG_SEQUENCE", MI);
2489
2490 break;
2491 }
2492 }
2493}
2494
2495void
2496MachineVerifier::visitMachineOperand(const MachineOperand *MO, unsigned MONum) {
2497 const MachineInstr *MI = MO->getParent();
2498 const MCInstrDesc &MCID = MI->getDesc();
2499 unsigned NumDefs = MCID.getNumDefs();
2500 if (MCID.getOpcode() == TargetOpcode::PATCHPOINT)
2501 NumDefs = (MONum == 0 && MO->isReg()) ? NumDefs : 0;
2502
2503 // The first MCID.NumDefs operands must be explicit register defines
2504 if (MONum < NumDefs) {
2505 const MCOperandInfo &MCOI = MCID.operands()[MONum];
2506 if (!MO->isReg())
2507 report("Explicit definition must be a register", MO, MONum);
2508 else if (!MO->isDef() && !MCOI.isOptionalDef())
2509 report("Explicit definition marked as use", MO, MONum);
2510 else if (MO->isImplicit())
2511 report("Explicit definition marked as implicit", MO, MONum);
2512 } else if (MONum < MCID.getNumOperands()) {
2513 const MCOperandInfo &MCOI = MCID.operands()[MONum];
2514 // Don't check if it's the last operand in a variadic instruction. See,
2515 // e.g., LDM_RET in the arm back end. Check non-variadic operands only.
2516 bool IsOptional = MI->isVariadic() && MONum == MCID.getNumOperands() - 1;
2517 if (!IsOptional) {
2518 if (MO->isReg()) {
2519 if (MO->isDef() && !MCOI.isOptionalDef() && !MCID.variadicOpsAreDefs())
2520 report("Explicit operand marked as def", MO, MONum);
2521 if (MO->isImplicit())
2522 report("Explicit operand marked as implicit", MO, MONum);
2523 }
2524
2525 // Check that an instruction has register operands only as expected.
2526 if (MCOI.OperandType == MCOI::OPERAND_REGISTER &&
2527 !MO->isReg() && !MO->isFI())
2528 report("Expected a register operand.", MO, MONum);
2529 if (MO->isReg()) {
2532 !TII->isPCRelRegisterOperandLegal(*MO)))
2533 report("Expected a non-register operand.", MO, MONum);
2534 }
2535 }
2536
2537 int TiedTo = MCID.getOperandConstraint(MONum, MCOI::TIED_TO);
2538 if (TiedTo != -1) {
2539 if (!MO->isReg())
2540 report("Tied use must be a register", MO, MONum);
2541 else if (!MO->isTied())
2542 report("Operand should be tied", MO, MONum);
2543 else if (unsigned(TiedTo) != MI->findTiedOperandIdx(MONum))
2544 report("Tied def doesn't match MCInstrDesc", MO, MONum);
2545 else if (MO->getReg().isPhysical()) {
2546 const MachineOperand &MOTied = MI->getOperand(TiedTo);
2547 if (!MOTied.isReg())
2548 report("Tied counterpart must be a register", &MOTied, TiedTo);
2549 else if (MOTied.getReg().isPhysical() &&
2550 MO->getReg() != MOTied.getReg())
2551 report("Tied physical registers must match.", &MOTied, TiedTo);
2552 }
2553 } else if (MO->isReg() && MO->isTied())
2554 report("Explicit operand should not be tied", MO, MONum);
2555 } else if (!MI->isVariadic()) {
2556 // ARM adds %reg0 operands to indicate predicates. We'll allow that.
2557 if (!MO->isValidExcessOperand())
2558 report("Extra explicit operand on non-variadic instruction", MO, MONum);
2559 }
2560
2561 switch (MO->getType()) {
2563 // Verify debug flag on debug instructions. Check this first because reg0
2564 // indicates an undefined debug value.
2565 if (MI->isDebugInstr() && MO->isUse()) {
2566 if (!MO->isDebug())
2567 report("Register operand must be marked debug", MO, MONum);
2568 } else if (MO->isDebug()) {
2569 report("Register operand must not be marked debug", MO, MONum);
2570 }
2571
2572 const Register Reg = MO->getReg();
2573 if (!Reg)
2574 return;
2575 if (MRI->tracksLiveness() && !MI->isDebugInstr())
2576 checkLiveness(MO, MONum);
2577
2578 if (MO->isDef() && MO->isUndef() && !MO->getSubReg() &&
2579 MO->getReg().isVirtual()) // TODO: Apply to physregs too
2580 report("Undef virtual register def operands require a subregister", MO, MONum);
2581
2582 // Verify the consistency of tied operands.
2583 if (MO->isTied()) {
2584 unsigned OtherIdx = MI->findTiedOperandIdx(MONum);
2585 const MachineOperand &OtherMO = MI->getOperand(OtherIdx);
2586 if (!OtherMO.isReg())
2587 report("Must be tied to a register", MO, MONum);
2588 if (!OtherMO.isTied())
2589 report("Missing tie flags on tied operand", MO, MONum);
2590 if (MI->findTiedOperandIdx(OtherIdx) != MONum)
2591 report("Inconsistent tie links", MO, MONum);
2592 if (MONum < MCID.getNumDefs()) {
2593 if (OtherIdx < MCID.getNumOperands()) {
2594 if (-1 == MCID.getOperandConstraint(OtherIdx, MCOI::TIED_TO))
2595 report("Explicit def tied to explicit use without tie constraint",
2596 MO, MONum);
2597 } else {
2598 if (!OtherMO.isImplicit())
2599 report("Explicit def should be tied to implicit use", MO, MONum);
2600 }
2601 }
2602 }
2603
2604 // Verify two-address constraints after the twoaddressinstruction pass.
2605 // Both twoaddressinstruction pass and phi-node-elimination pass call
2606 // MRI->leaveSSA() to set MF as not IsSSA, we should do the verification
2607 // after twoaddressinstruction pass not after phi-node-elimination pass. So
2608 // we shouldn't use the IsSSA as the condition, we should based on
2609 // TiedOpsRewritten property to verify two-address constraints, this
2610 // property will be set in twoaddressinstruction pass.
2611 unsigned DefIdx;
2612 if (MF->getProperties().hasProperty(
2614 MO->isUse() && MI->isRegTiedToDefOperand(MONum, &DefIdx) &&
2615 Reg != MI->getOperand(DefIdx).getReg())
2616 report("Two-address instruction operands must be identical", MO, MONum);
2617
2618 // Check register classes.
2619 unsigned SubIdx = MO->getSubReg();
2620
2621 if (Reg.isPhysical()) {
2622 if (SubIdx) {
2623 report("Illegal subregister index for physical register", MO, MONum);
2624 return;
2625 }
2626 if (MONum < MCID.getNumOperands()) {
2627 if (const TargetRegisterClass *DRC =
2628 TII->getRegClass(MCID, MONum, TRI, *MF)) {
2629 if (!DRC->contains(Reg)) {
2630 report("Illegal physical register for instruction", MO, MONum);
2631 OS << printReg(Reg, TRI) << " is not a "
2632 << TRI->getRegClassName(DRC) << " register.\n";
2633 }
2634 }
2635 }
2636 if (MO->isRenamable()) {
2637 if (MRI->isReserved(Reg)) {
2638 report("isRenamable set on reserved register", MO, MONum);
2639 return;
2640 }
2641 }
2642 } else {
2643 // Virtual register.
2644 const TargetRegisterClass *RC = MRI->getRegClassOrNull(Reg);
2645 if (!RC) {
2646 // This is a generic virtual register.
2647
2648 // Do not allow undef uses for generic virtual registers. This ensures
2649 // getVRegDef can never fail and return null on a generic register.
2650 //
2651 // FIXME: This restriction should probably be broadened to all SSA
2652 // MIR. However, DetectDeadLanes/ProcessImplicitDefs technically still
2653 // run on the SSA function just before phi elimination.
2654 if (MO->isUndef())
2655 report("Generic virtual register use cannot be undef", MO, MONum);
2656
2657 // Debug value instruction is permitted to use undefined vregs.
2658 // This is a performance measure to skip the overhead of immediately
2659 // pruning unused debug operands. The final undef substitution occurs
2660 // when debug values are allocated in LDVImpl::handleDebugValue, so
2661 // these verifications always apply after this pass.
2662 if (isFunctionTracksDebugUserValues || !MO->isUse() ||
2663 !MI->isDebugValue() || !MRI->def_empty(Reg)) {
2664 // If we're post-Select, we can't have gvregs anymore.
2665 if (isFunctionSelected) {
2666 report("Generic virtual register invalid in a Selected function",
2667 MO, MONum);
2668 return;
2669 }
2670
2671 // The gvreg must have a type and it must not have a SubIdx.
2672 LLT Ty = MRI->getType(Reg);
2673 if (!Ty.isValid()) {
2674 report("Generic virtual register must have a valid type", MO,
2675 MONum);
2676 return;
2677 }
2678
2679 const RegisterBank *RegBank = MRI->getRegBankOrNull(Reg);
2680 const RegisterBankInfo *RBI = MF->getSubtarget().getRegBankInfo();
2681
2682 // If we're post-RegBankSelect, the gvreg must have a bank.
2683 if (!RegBank && isFunctionRegBankSelected) {
2684 report("Generic virtual register must have a bank in a "
2685 "RegBankSelected function",
2686 MO, MONum);
2687 return;
2688 }
2689
2690 // Make sure the register fits into its register bank if any.
2691 if (RegBank && Ty.isValid() && !Ty.isScalableVector() &&
2692 RBI->getMaximumSize(RegBank->getID()) < Ty.getSizeInBits()) {
2693 report("Register bank is too small for virtual register", MO,
2694 MONum);
2695 OS << "Register bank " << RegBank->getName() << " too small("
2696 << RBI->getMaximumSize(RegBank->getID()) << ") to fit "
2697 << Ty.getSizeInBits() << "-bits\n";
2698 return;
2699 }
2700 }
2701
2702 if (SubIdx) {
2703 report("Generic virtual register does not allow subregister index", MO,
2704 MONum);
2705 return;
2706 }
2707
2708 // If this is a target specific instruction and this operand
2709 // has register class constraint, the virtual register must
2710 // comply to it.
2711 if (!isPreISelGenericOpcode(MCID.getOpcode()) &&
2712 MONum < MCID.getNumOperands() &&
2713 TII->getRegClass(MCID, MONum, TRI, *MF)) {
2714 report("Virtual register does not match instruction constraint", MO,
2715 MONum);
2716 OS << "Expect register class "
2717 << TRI->getRegClassName(TII->getRegClass(MCID, MONum, TRI, *MF))
2718 << " but got nothing\n";
2719 return;
2720 }
2721
2722 break;
2723 }
2724 if (SubIdx) {
2725 const TargetRegisterClass *SRC =
2726 TRI->getSubClassWithSubReg(RC, SubIdx);
2727 if (!SRC) {
2728 report("Invalid subregister index for virtual register", MO, MONum);
2729 OS << "Register class " << TRI->getRegClassName(RC)
2730 << " does not support subreg index " << SubIdx << '\n';
2731 return;
2732 }
2733 if (RC != SRC) {
2734 report("Invalid register class for subregister index", MO, MONum);
2735 OS << "Register class " << TRI->getRegClassName(RC)
2736 << " does not fully support subreg index " << SubIdx << '\n';
2737 return;
2738 }
2739 }
2740 if (MONum < MCID.getNumOperands()) {
2741 if (const TargetRegisterClass *DRC =
2742 TII->getRegClass(MCID, MONum, TRI, *MF)) {
2743 if (SubIdx) {
2744 const TargetRegisterClass *SuperRC =
2745 TRI->getLargestLegalSuperClass(RC, *MF);
2746 if (!SuperRC) {
2747 report("No largest legal super class exists.", MO, MONum);
2748 return;
2749 }
2750 DRC = TRI->getMatchingSuperRegClass(SuperRC, DRC, SubIdx);
2751 if (!DRC) {
2752 report("No matching super-reg register class.", MO, MONum);
2753 return;
2754 }
2755 }
2756 if (!RC->hasSuperClassEq(DRC)) {
2757 report("Illegal virtual register for instruction", MO, MONum);
2758 OS << "Expected a " << TRI->getRegClassName(DRC)
2759 << " register, but got a " << TRI->getRegClassName(RC)
2760 << " register\n";
2761 }
2762 }
2763 }
2764 }
2765 break;
2766 }
2767
2769 regMasks.push_back(MO->getRegMask());
2770 break;
2771
2773 if (MI->isPHI() && !MO->getMBB()->isSuccessor(MI->getParent()))
2774 report("PHI operand is not in the CFG", MO, MONum);
2775 break;
2776
2778 if (LiveStks && LiveStks->hasInterval(MO->getIndex()) &&
2779 LiveInts && !LiveInts->isNotInMIMap(*MI)) {
2780 int FI = MO->getIndex();
2781 LiveInterval &LI = LiveStks->getInterval(FI);
2782 SlotIndex Idx = LiveInts->getInstructionIndex(*MI);
2783
2784 bool stores = MI->mayStore();
2785 bool loads = MI->mayLoad();
2786 // For a memory-to-memory move, we need to check if the frame
2787 // index is used for storing or loading, by inspecting the
2788 // memory operands.
2789 if (stores && loads) {
2790 for (auto *MMO : MI->memoperands()) {
2791 const PseudoSourceValue *PSV = MMO->getPseudoValue();
2792 if (PSV == nullptr) continue;
2794 dyn_cast<FixedStackPseudoSourceValue>(PSV);
2795 if (Value == nullptr) continue;
2796 if (Value->getFrameIndex() != FI) continue;
2797
2798 if (MMO->isStore())
2799 loads = false;
2800 else
2801 stores = false;
2802 break;
2803 }
2804 if (loads == stores)
2805 report("Missing fixed stack memoperand.", MI);
2806 }
2807 if (loads && !LI.liveAt(Idx.getRegSlot(true))) {
2808 report("Instruction loads from dead spill slot", MO, MONum);
2809 OS << "Live stack: " << LI << '\n';
2810 }
2811 if (stores && !LI.liveAt(Idx.getRegSlot())) {
2812 report("Instruction stores to dead spill slot", MO, MONum);
2813 OS << "Live stack: " << LI << '\n';
2814 }
2815 }
2816 break;
2817
2819 if (MO->getCFIIndex() >= MF->getFrameInstructions().size())
2820 report("CFI instruction has invalid index", MO, MONum);
2821 break;
2822
2823 default:
2824 break;
2825 }
2826}
2827
2828void MachineVerifier::checkLivenessAtUse(const MachineOperand *MO,
2829 unsigned MONum, SlotIndex UseIdx,
2830 const LiveRange &LR,
2831 Register VRegOrUnit,
2832 LaneBitmask LaneMask) {
2833 const MachineInstr *MI = MO->getParent();
2834
2835 if (!LR.verify()) {
2836 report("invalid live range", MO, MONum);
2837 report_context_liverange(LR);
2838 report_context_vreg_regunit(VRegOrUnit);
2839 report_context(UseIdx);
2840 return;
2841 }
2842
2843 LiveQueryResult LRQ = LR.Query(UseIdx);
2844 bool HasValue = LRQ.valueIn() || (MI->isPHI() && LRQ.valueOut());
2845 // Check if we have a segment at the use, note however that we only need one
2846 // live subregister range, the others may be dead.
2847 if (!HasValue && LaneMask.none()) {
2848 report("No live segment at use", MO, MONum);
2849 report_context_liverange(LR);
2850 report_context_vreg_regunit(VRegOrUnit);
2851 report_context(UseIdx);
2852 }
2853 if (MO->isKill() && !LRQ.isKill()) {
2854 report("Live range continues after kill flag", MO, MONum);
2855 report_context_liverange(LR);
2856 report_context_vreg_regunit(VRegOrUnit);
2857 if (LaneMask.any())
2858 report_context_lanemask(LaneMask);
2859 report_context(UseIdx);
2860 }
2861}
2862
2863void MachineVerifier::checkLivenessAtDef(const MachineOperand *MO,
2864 unsigned MONum, SlotIndex DefIdx,
2865 const LiveRange &LR,
2866 Register VRegOrUnit,
2867 bool SubRangeCheck,
2868 LaneBitmask LaneMask) {
2869 if (!LR.verify()) {
2870 report("invalid live range", MO, MONum);
2871 report_context_liverange(LR);
2872 report_context_vreg_regunit(VRegOrUnit);
2873 if (LaneMask.any())
2874 report_context_lanemask(LaneMask);
2875 report_context(DefIdx);
2876 }
2877
2878 if (const VNInfo *VNI = LR.getVNInfoAt(DefIdx)) {
2879 // The LR can correspond to the whole reg and its def slot is not obliged
2880 // to be the same as the MO' def slot. E.g. when we check here "normal"
2881 // subreg MO but there is other EC subreg MO in the same instruction so the
2882 // whole reg has EC def slot and differs from the currently checked MO' def
2883 // slot. For example:
2884 // %0 [16e,32r:0) 0@16e L..3 [16e,32r:0) 0@16e L..C [16r,32r:0) 0@16r
2885 // Check that there is an early-clobber def of the same superregister
2886 // somewhere is performed in visitMachineFunctionAfter()
2887 if (((SubRangeCheck || MO->getSubReg() == 0) && VNI->def != DefIdx) ||
2888 !SlotIndex::isSameInstr(VNI->def, DefIdx) ||
2889 (VNI->def != DefIdx &&
2890 (!VNI->def.isEarlyClobber() || !DefIdx.isRegister()))) {
2891 report("Inconsistent valno->def", MO, MONum);
2892 report_context_liverange(LR);
2893 report_context_vreg_regunit(VRegOrUnit);
2894 if (LaneMask.any())
2895 report_context_lanemask(LaneMask);
2896 report_context(*VNI);
2897 report_context(DefIdx);
2898 }
2899 } else {
2900 report("No live segment at def", MO, MONum);
2901 report_context_liverange(LR);
2902 report_context_vreg_regunit(VRegOrUnit);
2903 if (LaneMask.any())
2904 report_context_lanemask(LaneMask);
2905 report_context(DefIdx);
2906 }
2907 // Check that, if the dead def flag is present, LiveInts agree.
2908 if (MO->isDead()) {
2909 LiveQueryResult LRQ = LR.Query(DefIdx);
2910 if (!LRQ.isDeadDef()) {
2911 assert(VRegOrUnit.isVirtual() && "Expecting a virtual register.");
2912 // A dead subreg def only tells us that the specific subreg is dead. There
2913 // could be other non-dead defs of other subregs, or we could have other
2914 // parts of the register being live through the instruction. So unless we
2915 // are checking liveness for a subrange it is ok for the live range to
2916 // continue, given that we have a dead def of a subregister.
2917 if (SubRangeCheck || MO->getSubReg() == 0) {
2918 report("Live range continues after dead def flag", MO, MONum);
2919 report_context_liverange(LR);
2920 report_context_vreg_regunit(VRegOrUnit);
2921 if (LaneMask.any())
2922 report_context_lanemask(LaneMask);
2923 }
2924 }
2925 }
2926}
2927
2928void MachineVerifier::checkLiveness(const MachineOperand *MO, unsigned MONum) {
2929 const MachineInstr *MI = MO->getParent();
2930 const Register Reg = MO->getReg();
2931 const unsigned SubRegIdx = MO->getSubReg();
2932
2933 const LiveInterval *LI = nullptr;
2934 if (LiveInts && Reg.isVirtual()) {
2935 if (LiveInts->hasInterval(Reg)) {
2936 LI = &LiveInts->getInterval(Reg);
2937 if (SubRegIdx != 0 && (MO->isDef() || !MO->isUndef()) && !LI->empty() &&
2938 !LI->hasSubRanges() && MRI->shouldTrackSubRegLiveness(Reg))
2939 report("Live interval for subreg operand has no subranges", MO, MONum);
2940 } else {
2941 report("Virtual register has no live interval", MO, MONum);
2942 }
2943 }
2944
2945 // Both use and def operands can read a register.
2946 if (MO->readsReg()) {
2947 if (MO->isKill())
2948 addRegWithSubRegs(regsKilled, Reg);
2949
2950 // Check that LiveVars knows this kill (unless we are inside a bundle, in
2951 // which case we have already checked that LiveVars knows any kills on the
2952 // bundle header instead).
2953 if (LiveVars && Reg.isVirtual() && MO->isKill() &&
2954 !MI->isBundledWithPred()) {
2955 LiveVariables::VarInfo &VI = LiveVars->getVarInfo(Reg);
2956 if (!is_contained(VI.Kills, MI))
2957 report("Kill missing from LiveVariables", MO, MONum);
2958 }
2959
2960 // Check LiveInts liveness and kill.
2961 if (LiveInts && !LiveInts->isNotInMIMap(*MI)) {
2962 SlotIndex UseIdx;
2963 if (MI->isPHI()) {
2964 // PHI use occurs on the edge, so check for live out here instead.
2965 UseIdx = LiveInts->getMBBEndIdx(
2966 MI->getOperand(MONum + 1).getMBB()).getPrevSlot();
2967 } else {
2968 UseIdx = LiveInts->getInstructionIndex(*MI);
2969 }
2970 // Check the cached regunit intervals.
2971 if (Reg.isPhysical() && !isReserved(Reg)) {
2972 for (MCRegUnit Unit : TRI->regunits(Reg.asMCReg())) {
2973 if (MRI->isReservedRegUnit(Unit))
2974 continue;
2975 if (const LiveRange *LR = LiveInts->getCachedRegUnit(Unit))
2976 checkLivenessAtUse(MO, MONum, UseIdx, *LR, Unit);
2977 }
2978 }
2979
2980 if (Reg.isVirtual()) {
2981 // This is a virtual register interval.
2982 checkLivenessAtUse(MO, MONum, UseIdx, *LI, Reg);
2983
2984 if (LI->hasSubRanges() && !MO->isDef()) {
2985 LaneBitmask MOMask = SubRegIdx != 0
2986 ? TRI->getSubRegIndexLaneMask(SubRegIdx)
2987 : MRI->getMaxLaneMaskForVReg(Reg);
2988 LaneBitmask LiveInMask;
2989 for (const LiveInterval::SubRange &SR : LI->subranges()) {
2990 if ((MOMask & SR.LaneMask).none())
2991 continue;
2992 checkLivenessAtUse(MO, MONum, UseIdx, SR, Reg, SR.LaneMask);
2993 LiveQueryResult LRQ = SR.Query(UseIdx);
2994 if (LRQ.valueIn() || (MI->isPHI() && LRQ.valueOut()))
2995 LiveInMask |= SR.LaneMask;
2996 }
2997 // At least parts of the register has to be live at the use.
2998 if ((LiveInMask & MOMask).none()) {
2999 report("No live subrange at use", MO, MONum);
3000 report_context(*LI);
3001 report_context(UseIdx);
3002 }
3003 // For PHIs all lanes should be live
3004 if (MI->isPHI() && LiveInMask != MOMask) {
3005 report("Not all lanes of PHI source live at use", MO, MONum);
3006 report_context(*LI);
3007 report_context(UseIdx);
3008 }
3009 }
3010 }
3011 }
3012
3013 // Use of a dead register.
3014 if (!regsLive.count(Reg)) {
3015 if (Reg.isPhysical()) {
3016 // Reserved registers may be used even when 'dead'.
3017 bool Bad = !isReserved(Reg);
3018 // We are fine if just any subregister has a defined value.
3019 if (Bad) {
3020
3021 for (const MCPhysReg &SubReg : TRI->subregs(Reg)) {
3022 if (regsLive.count(SubReg)) {
3023 Bad = false;
3024 break;
3025 }
3026 }
3027 }
3028 // If there is an additional implicit-use of a super register we stop
3029 // here. By definition we are fine if the super register is not
3030 // (completely) dead, if the complete super register is dead we will
3031 // get a report for its operand.
3032 if (Bad) {
3033 for (const MachineOperand &MOP : MI->uses()) {
3034 if (!MOP.isReg() || !MOP.isImplicit())
3035 continue;
3036
3037 if (!MOP.getReg().isPhysical())
3038 continue;
3039
3040 if (MOP.getReg() != Reg &&
3041 all_of(TRI->regunits(Reg), [&](const MCRegUnit RegUnit) {
3042 return llvm::is_contained(TRI->regunits(MOP.getReg()),
3043 RegUnit);
3044 }))
3045 Bad = false;
3046 }
3047 }
3048 if (Bad)
3049 report("Using an undefined physical register", MO, MONum);
3050 } else if (MRI->def_empty(Reg)) {
3051 report("Reading virtual register without a def", MO, MONum);
3052 } else {
3053 BBInfo &MInfo = MBBInfoMap[MI->getParent()];
3054 // We don't know which virtual registers are live in, so only complain
3055 // if vreg was killed in this MBB. Otherwise keep track of vregs that
3056 // must be live in. PHI instructions are handled separately.
3057 if (MInfo.regsKilled.count(Reg))
3058 report("Using a killed virtual register", MO, MONum);
3059 else if (!MI->isPHI())
3060 MInfo.vregsLiveIn.insert(std::make_pair(Reg, MI));
3061 }
3062 }
3063 }
3064
3065 if (MO->isDef()) {
3066 // Register defined.
3067 // TODO: verify that earlyclobber ops are not used.
3068 if (MO->isDead())
3069 addRegWithSubRegs(regsDead, Reg);
3070 else
3071 addRegWithSubRegs(regsDefined, Reg);
3072
3073 // Verify SSA form.
3074 if (MRI->isSSA() && Reg.isVirtual() &&
3075 std::next(MRI->def_begin(Reg)) != MRI->def_end())
3076 report("Multiple virtual register defs in SSA form", MO, MONum);
3077
3078 // Check LiveInts for a live segment, but only for virtual registers.
3079 if (LiveInts && !LiveInts->isNotInMIMap(*MI)) {
3080 SlotIndex DefIdx = LiveInts->getInstructionIndex(*MI);
3081 DefIdx = DefIdx.getRegSlot(MO->isEarlyClobber());
3082
3083 if (Reg.isVirtual()) {
3084 checkLivenessAtDef(MO, MONum, DefIdx, *LI, Reg);
3085
3086 if (LI->hasSubRanges()) {
3087 LaneBitmask MOMask = SubRegIdx != 0
3088 ? TRI->getSubRegIndexLaneMask(SubRegIdx)
3089 : MRI->getMaxLaneMaskForVReg(Reg);
3090 for (const LiveInterval::SubRange &SR : LI->subranges()) {
3091 if ((SR.LaneMask & MOMask).none())
3092 continue;
3093 checkLivenessAtDef(MO, MONum, DefIdx, SR, Reg, true, SR.LaneMask);
3094 }
3095 }
3096 }
3097 }
3098 }
3099}
3100
3101// This function gets called after visiting all instructions in a bundle. The
3102// argument points to the bundle header.
3103// Normal stand-alone instructions are also considered 'bundles', and this
3104// function is called for all of them.
3105void MachineVerifier::visitMachineBundleAfter(const MachineInstr *MI) {
3106 BBInfo &MInfo = MBBInfoMap[MI->getParent()];
3107 set_union(MInfo.regsKilled, regsKilled);
3108 set_subtract(regsLive, regsKilled); regsKilled.clear();
3109 // Kill any masked registers.
3110 while (!regMasks.empty()) {
3111 const uint32_t *Mask = regMasks.pop_back_val();
3112 for (Register Reg : regsLive)
3113 if (Reg.isPhysical() &&
3114 MachineOperand::clobbersPhysReg(Mask, Reg.asMCReg()))
3115 regsDead.push_back(Reg);
3116 }
3117 set_subtract(regsLive, regsDead); regsDead.clear();
3118 set_union(regsLive, regsDefined); regsDefined.clear();
3119}
3120
3121void
3122MachineVerifier::visitMachineBasicBlockAfter(const MachineBasicBlock *MBB) {
3123 MBBInfoMap[MBB].regsLiveOut = regsLive;
3124 regsLive.clear();
3125
3126 if (Indexes) {
3127 SlotIndex stop = Indexes->getMBBEndIdx(MBB);
3128 if (!(stop > lastIndex)) {
3129 report("Block ends before last instruction index", MBB);
3130 OS << "Block ends at " << stop << " last instruction was at " << lastIndex
3131 << '\n';
3132 }
3133 lastIndex = stop;
3134 }
3135}
3136
3137namespace {
3138// This implements a set of registers that serves as a filter: can filter other
3139// sets by passing through elements not in the filter and blocking those that
3140// are. Any filter implicitly includes the full set of physical registers upon
3141// creation, thus filtering them all out. The filter itself as a set only grows,
3142// and needs to be as efficient as possible.
3143struct VRegFilter {
3144 // Add elements to the filter itself. \pre Input set \p FromRegSet must have
3145 // no duplicates. Both virtual and physical registers are fine.
3146 template <typename RegSetT> void add(const RegSetT &FromRegSet) {
3147 SmallVector<Register, 0> VRegsBuffer;
3148 filterAndAdd(FromRegSet, VRegsBuffer);
3149 }
3150 // Filter \p FromRegSet through the filter and append passed elements into \p
3151 // ToVRegs. All elements appended are then added to the filter itself.
3152 // \returns true if anything changed.
3153 template <typename RegSetT>
3154 bool filterAndAdd(const RegSetT &FromRegSet,
3155 SmallVectorImpl<Register> &ToVRegs) {
3156 unsigned SparseUniverse = Sparse.size();
3157 unsigned NewSparseUniverse = SparseUniverse;
3158 unsigned NewDenseSize = Dense.size();
3159 size_t Begin = ToVRegs.size();
3160 for (Register Reg : FromRegSet) {
3161 if (!Reg.isVirtual())
3162 continue;
3163 unsigned Index = Register::virtReg2Index(Reg);
3164 if (Index < SparseUniverseMax) {
3165 if (Index < SparseUniverse && Sparse.test(Index))
3166 continue;
3167 NewSparseUniverse = std::max(NewSparseUniverse, Index + 1);
3168 } else {
3169 if (Dense.count(Reg))
3170 continue;
3171 ++NewDenseSize;
3172 }
3173 ToVRegs.push_back(Reg);
3174 }
3175 size_t End = ToVRegs.size();
3176 if (Begin == End)
3177 return false;
3178 // Reserving space in sets once performs better than doing so continuously
3179 // and pays easily for double look-ups (even in Dense with SparseUniverseMax
3180 // tuned all the way down) and double iteration (the second one is over a
3181 // SmallVector, which is a lot cheaper compared to DenseSet or BitVector).
3182 Sparse.resize(NewSparseUniverse);
3183 Dense.reserve(NewDenseSize);
3184 for (unsigned I = Begin; I < End; ++I) {
3185 Register Reg = ToVRegs[I];
3186 unsigned Index = Register::virtReg2Index(Reg);
3187 if (Index < SparseUniverseMax)
3188 Sparse.set(Index);
3189 else
3190 Dense.insert(Reg);
3191 }
3192 return true;
3193 }
3194
3195private:
3196 static constexpr unsigned SparseUniverseMax = 10 * 1024 * 8;
3197 // VRegs indexed within SparseUniverseMax are tracked by Sparse, those beyound
3198 // are tracked by Dense. The only purpose of the threashold and the Dense set
3199 // is to have a reasonably growing memory usage in pathological cases (large
3200 // number of very sparse VRegFilter instances live at the same time). In
3201 // practice even in the worst-by-execution time cases having all elements
3202 // tracked by Sparse (very large SparseUniverseMax scenario) tends to be more
3203 // space efficient than if tracked by Dense. The threashold is set to keep the
3204 // worst-case memory usage within 2x of figures determined empirically for
3205 // "all Dense" scenario in such worst-by-execution-time cases.
3206 BitVector Sparse;
3208};
3209
3210// Implements both a transfer function and a (binary, in-place) join operator
3211// for a dataflow over register sets with set union join and filtering transfer
3212// (out_b = in_b \ filter_b). filter_b is expected to be set-up ahead of time.
3213// Maintains out_b as its state, allowing for O(n) iteration over it at any
3214// time, where n is the size of the set (as opposed to O(U) where U is the
3215// universe). filter_b implicitly contains all physical registers at all times.
3216class FilteringVRegSet {
3217 VRegFilter Filter;
3219
3220public:
3221 // Set-up the filter_b. \pre Input register set \p RS must have no duplicates.
3222 // Both virtual and physical registers are fine.
3223 template <typename RegSetT> void addToFilter(const RegSetT &RS) {
3224 Filter.add(RS);
3225 }
3226 // Passes \p RS through the filter_b (transfer function) and adds what's left
3227 // to itself (out_b).
3228 template <typename RegSetT> bool add(const RegSetT &RS) {
3229 // Double-duty the Filter: to maintain VRegs a set (and the join operation
3230 // a set union) just add everything being added here to the Filter as well.
3231 return Filter.filterAndAdd(RS, VRegs);
3232 }
3233 using const_iterator = decltype(VRegs)::const_iterator;
3234 const_iterator begin() const { return VRegs.begin(); }
3235 const_iterator end() const { return VRegs.end(); }
3236 size_t size() const { return VRegs.size(); }
3237};
3238} // namespace
3239
3240// Calculate the largest possible vregsPassed sets. These are the registers that
3241// can pass through an MBB live, but may not be live every time. It is assumed
3242// that all vregsPassed sets are empty before the call.
3243void MachineVerifier::calcRegsPassed() {
3244 if (MF->empty())
3245 // ReversePostOrderTraversal doesn't handle empty functions.
3246 return;
3247
3248 for (const MachineBasicBlock *MB :
3250 FilteringVRegSet VRegs;
3251 BBInfo &Info = MBBInfoMap[MB];
3252 assert(Info.reachable);
3253
3254 VRegs.addToFilter(Info.regsKilled);
3255 VRegs.addToFilter(Info.regsLiveOut);
3256 for (const MachineBasicBlock *Pred : MB->predecessors()) {
3257 const BBInfo &PredInfo = MBBInfoMap[Pred];
3258 if (!PredInfo.reachable)
3259 continue;
3260
3261 VRegs.add(PredInfo.regsLiveOut);
3262 VRegs.add(PredInfo.vregsPassed);
3263 }
3264 Info.vregsPassed.reserve(VRegs.size());
3265 Info.vregsPassed.insert(VRegs.begin(), VRegs.end());
3266 }
3267}
3268
3269// Calculate the set of virtual registers that must be passed through each basic
3270// block in order to satisfy the requirements of successor blocks. This is very
3271// similar to calcRegsPassed, only backwards.
3272void MachineVerifier::calcRegsRequired() {
3273 // First push live-in regs to predecessors' vregsRequired.
3275 for (const auto &MBB : *MF) {
3276 BBInfo &MInfo = MBBInfoMap[&MBB];
3277 for (const MachineBasicBlock *Pred : MBB.predecessors()) {
3278 BBInfo &PInfo = MBBInfoMap[Pred];
3279 if (PInfo.addRequired(MInfo.vregsLiveIn))
3280 todo.insert(Pred);
3281 }
3282
3283 // Handle the PHI node.
3284 for (const MachineInstr &MI : MBB.phis()) {
3285 for (unsigned i = 1, e = MI.getNumOperands(); i != e; i += 2) {
3286 // Skip those Operands which are undef regs or not regs.
3287 if (!MI.getOperand(i).isReg() || !MI.getOperand(i).readsReg())
3288 continue;
3289
3290 // Get register and predecessor for one PHI edge.
3291 Register Reg = MI.getOperand(i).getReg();
3292 const MachineBasicBlock *Pred = MI.getOperand(i + 1).getMBB();
3293
3294 BBInfo &PInfo = MBBInfoMap[Pred];
3295 if (PInfo.addRequired(Reg))
3296 todo.insert(Pred);
3297 }
3298 }
3299 }
3300
3301 // Iteratively push vregsRequired to predecessors. This will converge to the
3302 // same final state regardless of DenseSet iteration order.
3303 while (!todo.empty()) {
3304 const MachineBasicBlock *MBB = *todo.begin();
3305 todo.erase(MBB);
3306 BBInfo &MInfo = MBBInfoMap[MBB];
3307 for (const MachineBasicBlock *Pred : MBB->predecessors()) {
3308 if (Pred == MBB)
3309 continue;
3310 BBInfo &SInfo = MBBInfoMap[Pred];
3311 if (SInfo.addRequired(MInfo.vregsRequired))
3312 todo.insert(Pred);
3313 }
3314 }
3315}
3316
3317// Check PHI instructions at the beginning of MBB. It is assumed that
3318// calcRegsPassed has been run so BBInfo::isLiveOut is valid.
3319void MachineVerifier::checkPHIOps(const MachineBasicBlock &MBB) {
3320 BBInfo &MInfo = MBBInfoMap[&MBB];
3321
3323 for (const MachineInstr &Phi : MBB) {
3324 if (!Phi.isPHI())
3325 break;
3326 seen.clear();
3327
3328 const MachineOperand &MODef = Phi.getOperand(0);
3329 if (!MODef.isReg() || !MODef.isDef()) {
3330 report("Expected first PHI operand to be a register def", &MODef, 0);
3331 continue;
3332 }
3333 if (MODef.isTied() || MODef.isImplicit() || MODef.isInternalRead() ||
3334 MODef.isEarlyClobber() || MODef.isDebug())
3335 report("Unexpected flag on PHI operand", &MODef, 0);
3336 Register DefReg = MODef.getReg();
3337 if (!DefReg.isVirtual())
3338 report("Expected first PHI operand to be a virtual register", &MODef, 0);
3339
3340 for (unsigned I = 1, E = Phi.getNumOperands(); I != E; I += 2) {
3341 const MachineOperand &MO0 = Phi.getOperand(I);
3342 if (!MO0.isReg()) {
3343 report("Expected PHI operand to be a register", &MO0, I);
3344 continue;
3345 }
3346 if (MO0.isImplicit() || MO0.isInternalRead() || MO0.isEarlyClobber() ||
3347 MO0.isDebug() || MO0.isTied())
3348 report("Unexpected flag on PHI operand", &MO0, I);
3349
3350 const MachineOperand &MO1 = Phi.getOperand(I + 1);
3351 if (!MO1.isMBB()) {
3352 report("Expected PHI operand to be a basic block", &MO1, I + 1);
3353 continue;
3354 }
3355
3356 const MachineBasicBlock &Pre = *MO1.getMBB();
3357 if (!Pre.isSuccessor(&MBB)) {
3358 report("PHI input is not a predecessor block", &MO1, I + 1);
3359 continue;
3360 }
3361
3362 if (MInfo.reachable) {
3363 seen.insert(&Pre);
3364 BBInfo &PrInfo = MBBInfoMap[&Pre];
3365 if (!MO0.isUndef() && PrInfo.reachable &&
3366 !PrInfo.isLiveOut(MO0.getReg()))
3367 report("PHI operand is not live-out from predecessor", &MO0, I);
3368 }
3369 }
3370
3371 // Did we see all predecessors?
3372 if (MInfo.reachable) {
3373 for (MachineBasicBlock *Pred : MBB.predecessors()) {
3374 if (!seen.count(Pred)) {
3375 report("Missing PHI operand", &Phi);
3376 OS << printMBBReference(*Pred)
3377 << " is a predecessor according to the CFG.\n";
3378 }
3379 }
3380 }
3381 }
3382}
3383
3384static void
3386 std::function<void(const Twine &Message)> FailureCB,
3387 raw_ostream &OS) {
3389 CV.initialize(&OS, FailureCB, MF);
3390
3391 for (const auto &MBB : MF) {
3392 CV.visit(MBB);
3393 for (const auto &MI : MBB.instrs())
3394 CV.visit(MI);
3395 }
3396
3397 if (CV.sawTokens()) {
3398 DT.recalculate(const_cast<MachineFunction &>(MF));
3399 CV.verify(DT);
3400 }
3401}
3402
3403void MachineVerifier::visitMachineFunctionAfter() {
3404 auto FailureCB = [this](const Twine &Message) {
3405 report(Message.str().c_str(), MF);
3406 };
3407 verifyConvergenceControl(*MF, DT, FailureCB, OS);
3408
3409 calcRegsPassed();
3410
3411 for (const MachineBasicBlock &MBB : *MF)
3412 checkPHIOps(MBB);
3413
3414 // Now check liveness info if available
3415 calcRegsRequired();
3416
3417 // Check for killed virtual registers that should be live out.
3418 for (const auto &MBB : *MF) {
3419 BBInfo &MInfo = MBBInfoMap[&MBB];
3420 for (Register VReg : MInfo.vregsRequired)
3421 if (MInfo.regsKilled.count(VReg)) {
3422 report("Virtual register killed in block, but needed live out.", &MBB);
3423 OS << "Virtual register " << printReg(VReg)
3424 << " is used after the block.\n";
3425 }
3426 }
3427
3428 if (!MF->empty()) {
3429 BBInfo &MInfo = MBBInfoMap[&MF->front()];
3430 for (Register VReg : MInfo.vregsRequired) {
3431 report("Virtual register defs don't dominate all uses.", MF);
3432 report_context_vreg(VReg);
3433 }
3434 }
3435
3436 if (LiveVars)
3437 verifyLiveVariables();
3438 if (LiveInts)
3439 verifyLiveIntervals();
3440
3441 // Check live-in list of each MBB. If a register is live into MBB, check
3442 // that the register is in regsLiveOut of each predecessor block. Since
3443 // this must come from a definition in the predecesssor or its live-in
3444 // list, this will catch a live-through case where the predecessor does not
3445 // have the register in its live-in list. This currently only checks
3446 // registers that have no aliases, are not allocatable and are not
3447 // reserved, which could mean a condition code register for instance.
3448 if (MRI->tracksLiveness())
3449 for (const auto &MBB : *MF)
3451 MCPhysReg LiveInReg = P.PhysReg;
3452 bool hasAliases = MCRegAliasIterator(LiveInReg, TRI, false).isValid();
3453 if (hasAliases || isAllocatable(LiveInReg) || isReserved(LiveInReg))
3454 continue;
3455 for (const MachineBasicBlock *Pred : MBB.predecessors()) {
3456 BBInfo &PInfo = MBBInfoMap[Pred];
3457 if (!PInfo.regsLiveOut.count(LiveInReg)) {
3458 report("Live in register not found to be live out from predecessor.",
3459 &MBB);
3460 OS << TRI->getName(LiveInReg) << " not found to be live out from "
3461 << printMBBReference(*Pred) << '\n';
3462 }
3463 }
3464 }
3465
3466 for (auto CSInfo : MF->getCallSitesInfo())
3467 if (!CSInfo.first->isCall())
3468 report("Call site info referencing instruction that is not call", MF);
3469
3470 // If there's debug-info, check that we don't have any duplicate value
3471 // tracking numbers.
3472 if (MF->getFunction().getSubprogram()) {
3473 DenseSet<unsigned> SeenNumbers;
3474 for (const auto &MBB : *MF) {
3475 for (const auto &MI : MBB) {
3476 if (auto Num = MI.peekDebugInstrNum()) {
3477 auto Result = SeenNumbers.insert((unsigned)Num);
3478 if (!Result.second)
3479 report("Instruction has a duplicated value tracking number", &MI);
3480 }
3481 }
3482 }
3483 }
3484}
3485
3486void MachineVerifier::verifyLiveVariables() {
3487 assert(LiveVars && "Don't call verifyLiveVariables without LiveVars");
3488 for (unsigned I = 0, E = MRI->getNumVirtRegs(); I != E; ++I) {
3490 LiveVariables::VarInfo &VI = LiveVars->getVarInfo(Reg);
3491 for (const auto &MBB : *MF) {
3492 BBInfo &MInfo = MBBInfoMap[&MBB];
3493
3494 // Our vregsRequired should be identical to LiveVariables' AliveBlocks
3495 if (MInfo.vregsRequired.count(Reg)) {
3496 if (!VI.AliveBlocks.test(MBB.getNumber())) {
3497 report("LiveVariables: Block missing from AliveBlocks", &MBB);
3498 OS << "Virtual register " << printReg(Reg)
3499 << " must be live through the block.\n";
3500 }
3501 } else {
3502 if (VI.AliveBlocks.test(MBB.getNumber())) {
3503 report("LiveVariables: Block should not be in AliveBlocks", &MBB);
3504 OS << "Virtual register " << printReg(Reg)
3505 << " is not needed live through the block.\n";
3506 }
3507 }
3508 }
3509 }
3510}
3511
3512void MachineVerifier::verifyLiveIntervals() {
3513 assert(LiveInts && "Don't call verifyLiveIntervals without LiveInts");
3514 for (unsigned I = 0, E = MRI->getNumVirtRegs(); I != E; ++I) {
3516
3517 // Spilling and splitting may leave unused registers around. Skip them.
3518 if (MRI->reg_nodbg_empty(Reg))
3519 continue;
3520
3521 if (!LiveInts->hasInterval(Reg)) {
3522 report("Missing live interval for virtual register", MF);
3523 OS << printReg(Reg, TRI) << " still has defs or uses\n";
3524 continue;
3525 }
3526
3527 const LiveInterval &LI = LiveInts->getInterval(Reg);
3528 assert(Reg == LI.reg() && "Invalid reg to interval mapping");
3529 verifyLiveInterval(LI);
3530 }
3531
3532 // Verify all the cached regunit intervals.
3533 for (unsigned i = 0, e = TRI->getNumRegUnits(); i != e; ++i)
3534 if (const LiveRange *LR = LiveInts->getCachedRegUnit(i))
3535 verifyLiveRange(*LR, i);
3536}
3537
3538void MachineVerifier::verifyLiveRangeValue(const LiveRange &LR,
3539 const VNInfo *VNI, Register Reg,
3540 LaneBitmask LaneMask) {
3541 if (VNI->isUnused())
3542 return;
3543
3544 const VNInfo *DefVNI = LR.getVNInfoAt(VNI->def);
3545
3546 if (!DefVNI) {
3547 report("Value not live at VNInfo def and not marked unused", MF);
3548 report_context(LR, Reg, LaneMask);
3549 report_context(*VNI);
3550 return;
3551 }
3552
3553 if (DefVNI != VNI) {
3554 report("Live segment at def has different VNInfo", MF);
3555 report_context(LR, Reg, LaneMask);
3556 report_context(*VNI);
3557 return;
3558 }
3559
3560 const MachineBasicBlock *MBB = LiveInts->getMBBFromIndex(VNI->def);
3561 if (!MBB) {
3562 report("Invalid VNInfo definition index", MF);
3563 report_context(LR, Reg, LaneMask);
3564 report_context(*VNI);
3565 return;
3566 }
3567
3568 if (VNI->isPHIDef()) {
3569 if (VNI->def != LiveInts->getMBBStartIdx(MBB)) {
3570 report("PHIDef VNInfo is not defined at MBB start", MBB);
3571 report_context(LR, Reg, LaneMask);
3572 report_context(*VNI);
3573 }
3574 return;
3575 }
3576
3577 // Non-PHI def.
3578 const MachineInstr *MI = LiveInts->getInstructionFromIndex(VNI->def);
3579 if (!MI) {
3580 report("No instruction at VNInfo def index", MBB);
3581 report_context(LR, Reg, LaneMask);
3582 report_context(*VNI);
3583 return;
3584 }
3585
3586 if (Reg != 0) {
3587 bool hasDef = false;
3588 bool isEarlyClobber = false;
3589 for (ConstMIBundleOperands MOI(*MI); MOI.isValid(); ++MOI) {
3590 if (!MOI->isReg() || !MOI->isDef())
3591 continue;
3592 if (Reg.isVirtual()) {
3593 if (MOI->getReg() != Reg)
3594 continue;
3595 } else {
3596 if (!MOI->getReg().isPhysical() || !TRI->hasRegUnit(MOI->getReg(), Reg))
3597 continue;
3598 }
3599 if (LaneMask.any() &&
3600 (TRI->getSubRegIndexLaneMask(MOI->getSubReg()) & LaneMask).none())
3601 continue;
3602 hasDef = true;
3603 if (MOI->isEarlyClobber())
3604 isEarlyClobber = true;
3605 }
3606
3607 if (!hasDef) {
3608 report("Defining instruction does not modify register", MI);
3609 report_context(LR, Reg, LaneMask);
3610 report_context(*VNI);
3611 }
3612
3613 // Early clobber defs begin at USE slots, but other defs must begin at
3614 // DEF slots.
3615 if (isEarlyClobber) {
3616 if (!VNI->def.isEarlyClobber()) {
3617 report("Early clobber def must be at an early-clobber slot", MBB);
3618 report_context(LR, Reg, LaneMask);
3619 report_context(*VNI);
3620 }
3621 } else if (!VNI->def.isRegister()) {
3622 report("Non-PHI, non-early clobber def must be at a register slot", MBB);
3623 report_context(LR, Reg, LaneMask);
3624 report_context(*VNI);
3625 }
3626 }
3627}
3628
3629void MachineVerifier::verifyLiveRangeSegment(const LiveRange &LR,
3631 Register Reg,
3632 LaneBitmask LaneMask) {
3633 const LiveRange::Segment &S = *I;
3634 const VNInfo *VNI = S.valno;
3635 assert(VNI && "Live segment has no valno");
3636
3637 if (VNI->id >= LR.getNumValNums() || VNI != LR.getValNumInfo(VNI->id)) {
3638 report("Foreign valno in live segment", MF);
3639 report_context(LR, Reg, LaneMask);
3640 report_context(S);
3641 report_context(*VNI);
3642 }
3643
3644 if (VNI->isUnused()) {
3645 report("Live segment valno is marked unused", MF);
3646 report_context(LR, Reg, LaneMask);
3647 report_context(S);
3648 }
3649
3650 const MachineBasicBlock *MBB = LiveInts->getMBBFromIndex(S.start);
3651 if (!MBB) {
3652 report("Bad start of live segment, no basic block", MF);
3653 report_context(LR, Reg, LaneMask);
3654 report_context(S);
3655 return;
3656 }
3657 SlotIndex MBBStartIdx = LiveInts->getMBBStartIdx(MBB);
3658 if (S.start != MBBStartIdx && S.start != VNI->def) {
3659 report("Live segment must begin at MBB entry or valno def", MBB);
3660 report_context(LR, Reg, LaneMask);
3661 report_context(S);
3662 }
3663
3664 const MachineBasicBlock *EndMBB =
3665 LiveInts->getMBBFromIndex(S.end.getPrevSlot());
3666 if (!EndMBB) {
3667 report("Bad end of live segment, no basic block", MF);
3668 report_context(LR, Reg, LaneMask);
3669 report_context(S);
3670 return;
3671 }
3672
3673 // Checks for non-live-out segments.
3674 if (S.end != LiveInts->getMBBEndIdx(EndMBB)) {
3675 // RegUnit intervals are allowed dead phis.
3676 if (!Reg.isVirtual() && VNI->isPHIDef() && S.start == VNI->def &&
3677 S.end == VNI->def.getDeadSlot())
3678 return;
3679
3680 // The live segment is ending inside EndMBB
3681 const MachineInstr *MI =
3682 LiveInts->getInstructionFromIndex(S.end.getPrevSlot());
3683 if (!MI) {
3684 report("Live segment doesn't end at a valid instruction", EndMBB);
3685 report_context(LR, Reg, LaneMask);
3686 report_context(S);
3687 return;
3688 }
3689
3690 // The block slot must refer to a basic block boundary.
3691 if (S.end.isBlock()) {
3692 report("Live segment ends at B slot of an instruction", EndMBB);
3693 report_context(LR, Reg, LaneMask);
3694 report_context(S);
3695 }
3696
3697 if (S.end.isDead()) {
3698 // Segment ends on the dead slot.
3699 // That means there must be a dead def.
3700 if (!SlotIndex::isSameInstr(S.start, S.end)) {
3701 report("Live segment ending at dead slot spans instructions", EndMBB);
3702 report_context(LR, Reg, LaneMask);
3703 report_context(S);
3704 }
3705 }
3706
3707 // After tied operands are rewritten, a live segment can only end at an
3708 // early-clobber slot if it is being redefined by an early-clobber def.
3709 // TODO: Before tied operands are rewritten, a live segment can only end at
3710 // an early-clobber slot if the last use is tied to an early-clobber def.
3711 if (MF->getProperties().hasProperty(
3713 S.end.isEarlyClobber()) {
3714 if (I + 1 == LR.end() || (I + 1)->start != S.end) {
3715 report("Live segment ending at early clobber slot must be "
3716 "redefined by an EC def in the same instruction",
3717 EndMBB);
3718 report_context(LR, Reg, LaneMask);
3719 report_context(S);
3720 }
3721 }
3722
3723 // The following checks only apply to virtual registers. Physreg liveness
3724 // is too weird to check.
3725 if (Reg.isVirtual()) {
3726 // A live segment can end with either a redefinition, a kill flag on a
3727 // use, or a dead flag on a def.
3728 bool hasRead = false;
3729 bool hasSubRegDef = false;
3730 bool hasDeadDef = false;
3731 for (ConstMIBundleOperands MOI(*MI); MOI.isValid(); ++MOI) {
3732 if (!MOI->isReg() || MOI->getReg() != Reg)
3733 continue;
3734 unsigned Sub = MOI->getSubReg();
3735 LaneBitmask SLM =
3736 Sub != 0 ? TRI->getSubRegIndexLaneMask(Sub) : LaneBitmask::getAll();
3737 if (MOI->isDef()) {
3738 if (Sub != 0) {
3739 hasSubRegDef = true;
3740 // An operand %0:sub0 reads %0:sub1..n. Invert the lane
3741 // mask for subregister defs. Read-undef defs will be handled by
3742 // readsReg below.
3743 SLM = ~SLM;
3744 }
3745 if (MOI->isDead())
3746 hasDeadDef = true;
3747 }
3748 if (LaneMask.any() && (LaneMask & SLM).none())
3749 continue;
3750 if (MOI->readsReg())
3751 hasRead = true;
3752 }
3753 if (S.end.isDead()) {
3754 // Make sure that the corresponding machine operand for a "dead" live
3755 // range has the dead flag. We cannot perform this check for subregister
3756 // liveranges as partially dead values are allowed.
3757 if (LaneMask.none() && !hasDeadDef) {
3758 report(
3759 "Instruction ending live segment on dead slot has no dead flag",
3760 MI);
3761 report_context(LR, Reg, LaneMask);
3762 report_context(S);
3763 }
3764 } else {
3765 if (!hasRead) {
3766 // When tracking subregister liveness, the main range must start new
3767 // values on partial register writes, even if there is no read.
3768 if (!MRI->shouldTrackSubRegLiveness(Reg) || LaneMask.any() ||
3769 !hasSubRegDef) {
3770 report("Instruction ending live segment doesn't read the register",
3771 MI);
3772 report_context(LR, Reg, LaneMask);
3773 report_context(S);
3774 }
3775 }
3776 }
3777 }
3778 }
3779
3780 // Now check all the basic blocks in this live segment.
3782 // Is this live segment the beginning of a non-PHIDef VN?
3783 if (S.start == VNI->def && !VNI->isPHIDef()) {
3784 // Not live-in to any blocks.
3785 if (MBB == EndMBB)
3786 return;
3787 // Skip this block.
3788 ++MFI;
3789 }
3790
3792 if (LaneMask.any()) {
3793 LiveInterval &OwnerLI = LiveInts->getInterval(Reg);
3794 OwnerLI.computeSubRangeUndefs(Undefs, LaneMask, *MRI, *Indexes);
3795 }
3796
3797 while (true) {
3798 assert(LiveInts->isLiveInToMBB(LR, &*MFI));
3799 // We don't know how to track physregs into a landing pad.
3800 if (!Reg.isVirtual() && MFI->isEHPad()) {
3801 if (&*MFI == EndMBB)
3802 break;
3803 ++MFI;
3804 continue;
3805 }
3806
3807 // Is VNI a PHI-def in the current block?
3808 bool IsPHI = VNI->isPHIDef() &&
3809 VNI->def == LiveInts->getMBBStartIdx(&*MFI);
3810
3811 // Check that VNI is live-out of all predecessors.
3812 for (const MachineBasicBlock *Pred : MFI->predecessors()) {
3813 SlotIndex PEnd = LiveInts->getMBBEndIdx(Pred);
3814 // Predecessor of landing pad live-out on last call.
3815 if (MFI->isEHPad()) {
3816 for (const MachineInstr &MI : llvm::reverse(*Pred)) {
3817 if (MI.isCall()) {
3818 PEnd = Indexes->getInstructionIndex(MI).getBoundaryIndex();
3819 break;
3820 }
3821 }
3822 }
3823 const VNInfo *PVNI = LR.getVNInfoBefore(PEnd);
3824
3825 // All predecessors must have a live-out value. However for a phi
3826 // instruction with subregister intervals
3827 // only one of the subregisters (not necessarily the current one) needs to
3828 // be defined.
3829 if (!PVNI && (LaneMask.none() || !IsPHI)) {
3830 if (LiveRangeCalc::isJointlyDominated(Pred, Undefs, *Indexes))
3831 continue;
3832 report("Register not marked live out of predecessor", Pred);
3833 report_context(LR, Reg, LaneMask);
3834 report_context(*VNI);
3835 OS << " live into " << printMBBReference(*MFI) << '@'
3836 << LiveInts->getMBBStartIdx(&*MFI) << ", not live before " << PEnd
3837 << '\n';
3838 continue;
3839 }
3840
3841 // Only PHI-defs can take different predecessor values.
3842 if (!IsPHI && PVNI != VNI) {
3843 report("Different value live out of predecessor", Pred);
3844 report_context(LR, Reg, LaneMask);
3845 OS << "Valno #" << PVNI->id << " live out of "
3846 << printMBBReference(*Pred) << '@' << PEnd << "\nValno #" << VNI->id
3847 << " live into " << printMBBReference(*MFI) << '@'
3848 << LiveInts->getMBBStartIdx(&*MFI) << '\n';
3849 }
3850 }
3851 if (&*MFI == EndMBB)
3852 break;
3853 ++MFI;
3854 }
3855}
3856
3857void MachineVerifier::verifyLiveRange(const LiveRange &LR, Register Reg,
3858 LaneBitmask LaneMask) {
3859 for (const VNInfo *VNI : LR.valnos)
3860 verifyLiveRangeValue(LR, VNI, Reg, LaneMask);
3861
3862 for (LiveRange::const_iterator I = LR.begin(), E = LR.end(); I != E; ++I)
3863 verifyLiveRangeSegment(LR, I, Reg, LaneMask);
3864}
3865
3866void MachineVerifier::verifyLiveInterval(const LiveInterval &LI) {
3867 Register Reg = LI.reg();
3868 assert(Reg.isVirtual());
3869 verifyLiveRange(LI, Reg);
3870
3871 if (LI.hasSubRanges()) {
3873 LaneBitmask MaxMask = MRI->getMaxLaneMaskForVReg(Reg);
3874 for (const LiveInterval::SubRange &SR : LI.subranges()) {
3875 if ((Mask & SR.LaneMask).any()) {
3876 report("Lane masks of sub ranges overlap in live interval", MF);
3877 report_context(LI);
3878 }
3879 if ((SR.LaneMask & ~MaxMask).any()) {
3880 report("Subrange lanemask is invalid", MF);
3881 report_context(LI);
3882 }
3883 if (SR.empty()) {
3884 report("Subrange must not be empty", MF);
3885 report_context(SR, LI.reg(), SR.LaneMask);
3886 }
3887 Mask |= SR.LaneMask;
3888 verifyLiveRange(SR, LI.reg(), SR.LaneMask);
3889 if (!LI.covers(SR)) {
3890 report("A Subrange is not covered by the main range", MF);
3891 report_context(LI);
3892 }
3893 }
3894 }
3895
3896 // Check the LI only has one connected component.
3897 ConnectedVNInfoEqClasses ConEQ(*LiveInts);
3898 unsigned NumComp = ConEQ.Classify(LI);
3899 if (NumComp > 1) {
3900 report("Multiple connected components in live interval", MF);
3901 report_context(LI);
3902 for (unsigned comp = 0; comp != NumComp; ++comp) {
3903 OS << comp << ": valnos";
3904 for (const VNInfo *I : LI.valnos)
3905 if (comp == ConEQ.getEqClass(I))
3906 OS << ' ' << I->id;
3907 OS << '\n';
3908 }
3909 }
3910}
3911
3912namespace {
3913
3914 // FrameSetup and FrameDestroy can have zero adjustment, so using a single
3915 // integer, we can't tell whether it is a FrameSetup or FrameDestroy if the
3916 // value is zero.
3917 // We use a bool plus an integer to capture the stack state.
3918struct StackStateOfBB {
3919 StackStateOfBB() = default;
3920 StackStateOfBB(int EntryVal, int ExitVal, bool EntrySetup, bool ExitSetup)
3921 : EntryValue(EntryVal), ExitValue(ExitVal), EntryIsSetup(EntrySetup),
3922 ExitIsSetup(ExitSetup) {}
3923
3924 // Can be negative, which means we are setting up a frame.
3925 int EntryValue = 0;
3926 int ExitValue = 0;
3927 bool EntryIsSetup = false;
3928 bool ExitIsSetup = false;
3929};
3930
3931} // end anonymous namespace
3932
3933/// Make sure on every path through the CFG, a FrameSetup <n> is always followed
3934/// by a FrameDestroy <n>, stack adjustments are identical on all
3935/// CFG edges to a merge point, and frame is destroyed at end of a return block.
3936void MachineVerifier::verifyStackFrame() {
3937 unsigned FrameSetupOpcode = TII->getCallFrameSetupOpcode();
3938 unsigned FrameDestroyOpcode = TII->getCallFrameDestroyOpcode();
3939 if (FrameSetupOpcode == ~0u && FrameDestroyOpcode == ~0u)
3940 return;
3941
3943 SPState.resize(MF->getNumBlockIDs());
3945
3946 // Visit the MBBs in DFS order.
3947 for (df_ext_iterator<const MachineFunction *,
3949 DFI = df_ext_begin(MF, Reachable), DFE = df_ext_end(MF, Reachable);
3950 DFI != DFE; ++DFI) {
3951 const MachineBasicBlock *MBB = *DFI;
3952
3953 StackStateOfBB BBState;
3954 // Check the exit state of the DFS stack predecessor.
3955 if (DFI.getPathLength() >= 2) {
3956 const MachineBasicBlock *StackPred = DFI.getPath(DFI.getPathLength() - 2);
3957 assert(Reachable.count(StackPred) &&
3958 "DFS stack predecessor is already visited.\n");
3959 BBState.EntryValue = SPState[StackPred->getNumber()].ExitValue;
3960 BBState.EntryIsSetup = SPState[StackPred->getNumber()].ExitIsSetup;
3961 BBState.ExitValue = BBState.EntryValue;
3962 BBState.ExitIsSetup = BBState.EntryIsSetup;
3963 }
3964
3965 if ((int)MBB->getCallFrameSize() != -BBState.EntryValue) {
3966 report("Call frame size on entry does not match value computed from "
3967 "predecessor",
3968 MBB);
3969 OS << "Call frame size on entry " << MBB->getCallFrameSize()
3970 << " does not match value computed from predecessor "
3971 << -BBState.EntryValue << '\n';
3972 }
3973
3974 // Update stack state by checking contents of MBB.
3975 for (const auto &I : *MBB) {
3976 if (I.getOpcode() == FrameSetupOpcode) {
3977 if (BBState.ExitIsSetup)
3978 report("FrameSetup is after another FrameSetup", &I);
3979 if (!MRI->isSSA() && !MF->getFrameInfo().adjustsStack())
3980 report("AdjustsStack not set in presence of a frame pseudo "
3981 "instruction.", &I);
3982 BBState.ExitValue -= TII->getFrameTotalSize(I);
3983 BBState.ExitIsSetup = true;
3984 }
3985
3986 if (I.getOpcode() == FrameDestroyOpcode) {
3987 int Size = TII->getFrameTotalSize(I);
3988 if (!BBState.ExitIsSetup)
3989 report("FrameDestroy is not after a FrameSetup", &I);
3990 int AbsSPAdj = BBState.ExitValue < 0 ? -BBState.ExitValue :
3991 BBState.ExitValue;
3992 if (BBState.ExitIsSetup && AbsSPAdj != Size) {
3993 report("FrameDestroy <n> is after FrameSetup <m>", &I);
3994 OS << "FrameDestroy <" << Size << "> is after FrameSetup <"
3995 << AbsSPAdj << ">.\n";
3996 }
3997 if (!MRI->isSSA() && !MF->getFrameInfo().adjustsStack())
3998 report("AdjustsStack not set in presence of a frame pseudo "
3999 "instruction.", &I);
4000 BBState.ExitValue += Size;
4001 BBState.ExitIsSetup = false;
4002 }
4003 }
4004 SPState[MBB->getNumber()] = BBState;
4005
4006 // Make sure the exit state of any predecessor is consistent with the entry
4007 // state.
4008 for (const MachineBasicBlock *Pred : MBB->predecessors()) {
4009 if (Reachable.count(Pred) &&
4010 (SPState[Pred->getNumber()].ExitValue != BBState.EntryValue ||
4011 SPState[Pred->getNumber()].ExitIsSetup != BBState.EntryIsSetup)) {
4012 report("The exit stack state of a predecessor is inconsistent.", MBB);
4013 OS << "Predecessor " << printMBBReference(*Pred) << " has exit state ("
4014 << SPState[Pred->getNumber()].ExitValue << ", "
4015 << SPState[Pred->getNumber()].ExitIsSetup << "), while "
4016 << printMBBReference(*MBB) << " has entry state ("
4017 << BBState.EntryValue << ", " << BBState.EntryIsSetup << ").\n";
4018 }
4019 }
4020
4021 // Make sure the entry state of any successor is consistent with the exit
4022 // state.
4023 for (const MachineBasicBlock *Succ : MBB->successors()) {
4024 if (Reachable.count(Succ) &&
4025 (SPState[Succ->getNumber()].EntryValue != BBState.ExitValue ||
4026 SPState[Succ->getNumber()].EntryIsSetup != BBState.ExitIsSetup)) {
4027 report("The entry stack state of a successor is inconsistent.", MBB);
4028 OS << "Successor " << printMBBReference(*Succ) << " has entry state ("
4029 << SPState[Succ->getNumber()].EntryValue << ", "
4030 << SPState[Succ->getNumber()].EntryIsSetup << "), while "
4031 << printMBBReference(*MBB) << " has exit state ("
4032 << BBState.ExitValue << ", " << BBState.ExitIsSetup << ").\n";
4033 }
4034 }
4035
4036 // Make sure a basic block with return ends with zero stack adjustment.
4037 if (!MBB->empty() && MBB->back().isReturn()) {
4038 if (BBState.ExitIsSetup)
4039 report("A return block ends with a FrameSetup.", MBB);
4040 if (BBState.ExitValue)
4041 report("A return block ends with a nonzero stack adjustment.", MBB);
4042 }
4043 }
4044}
4045
4046void MachineVerifier::verifyStackProtector() {
4047 const MachineFrameInfo &MFI = MF->getFrameInfo();
4048 if (!MFI.hasStackProtectorIndex())
4049 return;
4050 // Only applicable when the offsets of frame objects have been determined,
4051 // which is indicated by a non-zero stack size.
4052 if (!MFI.getStackSize())
4053 return;
4054 const TargetFrameLowering &TFI = *MF->getSubtarget().getFrameLowering();
4055 bool StackGrowsDown =
4057 unsigned FI = MFI.getStackProtectorIndex();
4058 int64_t SPStart = MFI.getObjectOffset(FI);
4059 int64_t SPEnd = SPStart + MFI.getObjectSize(FI);
4060 for (unsigned I = 0, E = MFI.getObjectIndexEnd(); I != E; ++I) {
4061 if (I == FI)
4062 continue;
4063 if (MFI.isDeadObjectIndex(I))
4064 continue;
4065 // FIXME: Skip non-default stack objects, as some targets may place them
4066 // above the stack protector. This is a workaround for the fact that
4067 // backends such as AArch64 may place SVE stack objects *above* the stack
4068 // protector.
4070 continue;
4071 // Skip variable-sized objects because they do not have a fixed offset.
4073 continue;
4074 // FIXME: Skip spill slots which may be allocated above the stack protector.
4075 // Ideally this would only skip callee-saved registers, but we don't have
4076 // that information here. For example, spill-slots used for scavenging are
4077 // not described in CalleeSavedInfo.
4078 if (MFI.isSpillSlotObjectIndex(I))
4079 continue;
4080 int64_t ObjStart = MFI.getObjectOffset(I);
4081 int64_t ObjEnd = ObjStart + MFI.getObjectSize(I);
4082 if (SPStart < ObjEnd && ObjStart < SPEnd) {
4083 report("Stack protector overlaps with another stack object", MF);
4084 break;
4085 }
4086 if ((StackGrowsDown && SPStart <= ObjStart) ||
4087 (!StackGrowsDown && SPStart >= ObjStart)) {
4088 report("Stack protector is not the top-most object on the stack", MF);
4089 break;
4090 }
4091 }
4092}
unsigned SubReg
unsigned const MachineRegisterInfo * MRI
aarch64 promote const
static bool isLoad(int Opcode)
static bool isStore(int Opcode)
MachineBasicBlock & MBB
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
MachineBasicBlock MachineBasicBlock::iterator MBBI
This file implements the BitVector class.
Analysis containing CSE Info
Definition: CSEInfo.cpp:27
This file contains the declarations for the subclasses of Constant, which represent the different fla...
Returns the sub type a function will return at a given Idx Should correspond to the result type of an ExtractValue instruction executed with just that one unsigned Idx
This file defines the DenseMap class.
This file defines the DenseSet and SmallDenseSet classes.
This file builds on the ADT/GraphTraits.h file to build generic depth first graph iterator.
std::string Name
uint32_t Index
uint64_t Size
bool End
Definition: ELF_riscv.cpp:480
Declares convenience wrapper classes for interpreting MachineInstr instances as specific generic oper...
global merge Global merge function pass
const HexagonInstrInfo * TII
hexagon widen Hexagon Store false hexagon widen loads
hexagon widen stores
IRTranslator LLVM IR MI
A common definition of LaneBitmask for use in TableGen and CodeGen.
Implement a low-level type suitable for MachineInstr level instruction selection.
#define F(x, y, z)
Definition: MD5.cpp:55
#define I(x, y, z)
Definition: MD5.cpp:58
This file declares the MIR specialization of the GenericConvergenceVerifier template.
unsigned const TargetRegisterInfo * TRI
unsigned Reg
static void verifyConvergenceControl(const MachineFunction &MF, MachineDominatorTree &DT, std::function< void(const Twine &Message)> FailureCB, raw_ostream &OS)
#define P(N)
ppc ctr loops verify
#define INITIALIZE_PASS(passName, arg, name, cfg, analysis)
Definition: PassSupport.h:38
This file builds on the ADT/GraphTraits.h file to build a generic graph post order iterator.
const SmallVectorImpl< MachineOperand > MachineBasicBlock * TBB
const SmallVectorImpl< MachineOperand > & Cond
static bool isValid(const char C)
Returns true if C is a valid mangled character: <0-9a-zA-Z_>.
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
static bool isLiveOut(const MachineBasicBlock &MBB, unsigned Reg)
This file contains some templates that are useful if you are working with the STL at all.
raw_pwrite_stream & OS
This file defines generic set operations that may be used on set's of different types,...
This file defines the SmallPtrSet class.
This file defines the SmallVector class.
This file describes how to lower LLVM code to machine code.
static unsigned getSize(unsigned Kind)
const fltSemantics & getSemantics() const
Definition: APFloat.h:1453
A container for analyses that lazily runs them and caches their results.
Definition: PassManager.h:253
PassT::Result * getCachedResult(IRUnitT &IR) const
Get the cached result of an analysis pass for a given IR unit.
Definition: PassManager.h:429
Represent the analysis usage information of a pass.
AnalysisUsage & addUsedIfAvailable()
Add the specified Pass class to the set of analyses used by this pass.
void setPreservesAll()
Set by analyses that do not transform their input at all.
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
Definition: ArrayRef.h:41
size_t size() const
size - Get the array size.
Definition: ArrayRef.h:168
LLVM Basic Block Representation.
Definition: BasicBlock.h:61
bool hasAddressTaken() const
Returns true if there are any uses of this basic block other than direct branches,...
Definition: BasicBlock.h:658
const Instruction * getTerminator() const LLVM_READONLY
Returns the terminator instruction if the block is well formed or null if the block is not well forme...
Definition: BasicBlock.h:239
bool test(unsigned Idx) const
Definition: BitVector.h:461
void clear()
clear - Removes all bits from the bitvector.
Definition: BitVector.h:335
iterator_range< const_set_bits_iterator > set_bits() const
Definition: BitVector.h:140
size_type size() const
size - Returns the number of bits in this bitvector.
Definition: BitVector.h:159
ConnectedVNInfoEqClasses - Helper class that can divide VNInfos in a LiveInterval into equivalence cl...
ConstMIBundleOperands - Iterate over all operands in a const bundle of machine instructions.
ConstantFP - Floating Point Values [float, double].
Definition: Constants.h:271
const APFloat & getValueAPF() const
Definition: Constants.h:314
This is the shared class of boolean and integer constants.
Definition: Constants.h:83
unsigned getBitWidth() const
getBitWidth - Return the scalar bitwidth of this constant.
Definition: Constants.h:151
This class represents an Operation in the Expression.
A parsed version of the target data layout string in and methods for querying it.
Definition: DataLayout.h:63
std::pair< iterator, bool > insert(const std::pair< KeyT, ValueT > &KV)
Definition: DenseMap.h:211
Implements a dense probed hash-table based set.
Definition: DenseSet.h:278
void recalculate(ParentType &Func)
recalculate - compute a dominator tree for the given function
Register getReg() const
Base class for user error types.
Definition: Error.h:355
A specialized PseudoSourceValue for holding FixedStack values, which must include a frame index.
FunctionPass class - This class is used to implement most global optimizations.
Definition: Pass.h:310
void initialize(raw_ostream *OS, function_ref< void(const Twine &Message)> FailureCB, const FunctionT &F)
bool isPredicated(const MachineInstr &MI) const override
Returns true if the instruction is already predicated.
bool analyzeBranch(MachineBasicBlock &MBB, MachineBasicBlock *&TBB, MachineBasicBlock *&FBB, SmallVectorImpl< MachineOperand > &Cond, bool AllowModify) const override
Analyze the branching code at the end of MBB, returning true if it cannot be understood (e....
constexpr bool isScalableVector() const
Returns true if the LLT is a scalable vector.
Definition: LowLevelType.h:181
constexpr unsigned getScalarSizeInBits() const
Definition: LowLevelType.h:264
constexpr bool isScalar() const
Definition: LowLevelType.h:146
constexpr bool isPointerVector() const
Definition: LowLevelType.h:152
constexpr bool isValid() const
Definition: LowLevelType.h:145
constexpr uint16_t getNumElements() const
Returns the number of elements in a vector LLT.
Definition: LowLevelType.h:159
constexpr bool isVector() const
Definition: LowLevelType.h:148
constexpr bool isScalable() const
Returns true if the LLT is a scalable vector.
Definition: LowLevelType.h:170
constexpr TypeSize getSizeInBits() const
Returns the total size of the type. Must only be called on sized types.
Definition: LowLevelType.h:190
constexpr bool isPointer() const
Definition: LowLevelType.h:149
constexpr LLT getElementType() const
Returns the vector's element type. Only valid for vector types.
Definition: LowLevelType.h:277
constexpr ElementCount getElementCount() const
Definition: LowLevelType.h:183
constexpr unsigned getAddressSpace() const
Definition: LowLevelType.h:270
constexpr bool isPointerOrPointerVector() const
Definition: LowLevelType.h:153
constexpr LLT getScalarType() const
Definition: LowLevelType.h:205
constexpr TypeSize getSizeInBytes() const
Returns the total size of the type in bytes, i.e.
Definition: LowLevelType.h:200
A live range for subregisters.
Definition: LiveInterval.h:694
LiveInterval - This class represents the liveness of a register, or stack slot.
Definition: LiveInterval.h:687
Register reg() const
Definition: LiveInterval.h:718
bool hasSubRanges() const
Returns true if subregister liveness information is available.
Definition: LiveInterval.h:810
iterator_range< subrange_iterator > subranges()
Definition: LiveInterval.h:782
void computeSubRangeUndefs(SmallVectorImpl< SlotIndex > &Undefs, LaneBitmask LaneMask, const MachineRegisterInfo &MRI, const SlotIndexes &Indexes) const
For a given lane mask LaneMask, compute indexes at which the lane is marked undefined by subregister ...
void print(raw_ostream &O, const Module *=nullptr) const override
Implement the dump method.
Result of a LiveRange query.
Definition: LiveInterval.h:90
bool isDeadDef() const
Return true if this instruction has a dead def.
Definition: LiveInterval.h:117
VNInfo * valueIn() const
Return the value that is live-in to the instruction.
Definition: LiveInterval.h:105
VNInfo * valueOut() const
Return the value leaving the instruction, if any.
Definition: LiveInterval.h:123
bool isKill() const
Return true if the live-in value is killed by this instruction.
Definition: LiveInterval.h:112
static LLVM_ATTRIBUTE_UNUSED bool isJointlyDominated(const MachineBasicBlock *MBB, ArrayRef< SlotIndex > Defs, const SlotIndexes &Indexes)
A diagnostic function to check if the end of the block MBB is jointly dominated by the blocks corresp...
This class represents the liveness of a register, stack slot, etc.
Definition: LiveInterval.h:157
VNInfo * getValNumInfo(unsigned ValNo)
getValNumInfo - Returns pointer to the specified val#.
Definition: LiveInterval.h:317
bool liveAt(SlotIndex index) const
Definition: LiveInterval.h:401
bool covers(const LiveRange &Other) const
Returns true if all segments of the Other live range are completely covered by this live range.
bool empty() const
Definition: LiveInterval.h:382
LiveQueryResult Query(SlotIndex Idx) const
Query Liveness at Idx.
Definition: LiveInterval.h:542
iterator end()
Definition: LiveInterval.h:216
VNInfo * getVNInfoBefore(SlotIndex Idx) const
getVNInfoBefore - Return the VNInfo that is live up to but not necessarily including Idx,...
Definition: LiveInterval.h:429
bool verify() const
Walk the range and assert if any invariants fail to hold.
unsigned getNumValNums() const
Definition: LiveInterval.h:313
iterator begin()
Definition: LiveInterval.h:215
VNInfoList valnos
Definition: LiveInterval.h:204
VNInfo * getVNInfoAt(SlotIndex Idx) const
getVNInfoAt - Return the VNInfo that is live at Idx, or NULL.
Definition: LiveInterval.h:421
VarInfo & getVarInfo(Register Reg)
getVarInfo - Return the VarInfo structure for the specified VIRTUAL register.
TypeSize getValue() const
This class is intended to be used as a base class for asm properties and features specific to the tar...
Definition: MCAsmInfo.h:56
ExceptionHandling getExceptionHandlingType() const
Definition: MCAsmInfo.h:642
Describe properties that are true of each instruction in the target description file.
Definition: MCInstrDesc.h:198
unsigned getNumOperands() const
Return the number of declared MachineOperands for this MachineInstruction.
Definition: MCInstrDesc.h:237
ArrayRef< MCOperandInfo > operands() const
Definition: MCInstrDesc.h:239
unsigned getNumDefs() const
Return the number of MachineOperands that are register definitions.
Definition: MCInstrDesc.h:248
bool isConvergent() const
Return true if this instruction is convergent.
Definition: MCInstrDesc.h:415
bool variadicOpsAreDefs() const
Return true if variadic operands of this instruction are definitions.
Definition: MCInstrDesc.h:418
int getOperandConstraint(unsigned OpNum, MCOI::OperandConstraint Constraint) const
Returns the value of the specified operand constraint if it is present.
Definition: MCInstrDesc.h:219
unsigned getOpcode() const
Return the opcode number for this descriptor.
Definition: MCInstrDesc.h:230
This holds information about one operand of a machine instruction, indicating the register class for ...
Definition: MCInstrDesc.h:85
bool isOptionalDef() const
Set if this operand is a optional def.
Definition: MCInstrDesc.h:113
uint8_t OperandType
Information about the type of the operand.
Definition: MCInstrDesc.h:97
MCRegAliasIterator enumerates all registers aliasing Reg.
bool isValid() const
isValid - Returns true until all the operands have been visited.
bool isInlineAsmBrIndirectTarget() const
Returns true if this is the indirect dest of an INLINEASM_BR.
unsigned pred_size() const
bool isEHPad() const
Returns true if the block is a landing pad.
iterator_range< livein_iterator > liveins() const
iterator_range< iterator > phis()
Returns a range that iterates over the phis in the basic block.
int getNumber() const
MachineBasicBlocks are uniquely numbered at the function level, unless they're not in a MachineFuncti...
const BasicBlock * getBasicBlock() const
Return the LLVM basic block that this instance corresponded to originally.
bool isIRBlockAddressTaken() const
Test whether this block is the target of an IR BlockAddress.
unsigned succ_size() const
BasicBlock * getAddressTakenIRBlock() const
Retrieves the BasicBlock which corresponds to this MachineBasicBlock.
bool isPredecessor(const MachineBasicBlock *MBB) const
Return true if the specified MBB is a predecessor of this block.
const MachineFunction * getParent() const
Return the MachineFunction containing this basic block.
unsigned getCallFrameSize() const
Return the call frame size on entry to this basic block.
iterator_range< succ_iterator > successors()
bool isSuccessor(const MachineBasicBlock *MBB) const
Return true if the specified MBB is a successor of this block.
iterator_range< pred_iterator > predecessors()
StringRef getName() const
Return the name of the corresponding LLVM basic block, or an empty string.
DominatorTree Class - Concrete subclass of DominatorTreeBase that is used to compute a normal dominat...
The MachineFrameInfo class represents an abstract stack frame until prolog/epilog code is inserted.
uint64_t getStackSize() const
Return the number of bytes that must be allocated to hold all of the fixed size frame objects.
int getStackProtectorIndex() const
Return the index for the stack protector object.
bool isSpillSlotObjectIndex(int ObjectIdx) const
Returns true if the specified index corresponds to a spill slot.
int64_t getObjectSize(int ObjectIdx) const
Return the size of the specified object.
BitVector getPristineRegs(const MachineFunction &MF) const
Return a set of physical registers that are pristine.
bool isVariableSizedObjectIndex(int ObjectIdx) const
Returns true if the specified index corresponds to a variable sized object.
int getObjectIndexEnd() const
Return one past the maximum frame object index.
bool hasStackProtectorIndex() const
uint8_t getStackID(int ObjectIdx) const
int64_t getObjectOffset(int ObjectIdx) const
Return the assigned stack offset of the specified object from the incoming stack pointer.
bool isDeadObjectIndex(int ObjectIdx) const
Returns true if the specified index corresponds to a dead object.
MachineFunctionPass - This class adapts the FunctionPass interface to allow convenient creation of pa...
void getAnalysisUsage(AnalysisUsage &AU) const override
getAnalysisUsage - Subclasses that override getAnalysisUsage must call this.
virtual bool runOnMachineFunction(MachineFunction &MF)=0
runOnMachineFunction - This method must be overloaded to perform the desired machine code transformat...
bool hasProperty(Property P) const
const TargetSubtargetInfo & getSubtarget() const
getSubtarget - Return the subtarget for which this machine code is being compiled.
StringRef getName() const
getName - Return the name of the corresponding LLVM function.
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
bool verify(Pass *p=nullptr, const char *Banner=nullptr, raw_ostream *OS=nullptr, bool AbortOnError=true) const
Run the current MachineFunction through the machine code verifier, useful for debugger use.
const MachineFunctionProperties & getProperties() const
Get the function properties.
const MachineBasicBlock & front() const
void print(raw_ostream &OS, const SlotIndexes *=nullptr) const
print - Print out the MachineFunction in a format suitable for debugging to the specified stream.
const TargetMachine & getTarget() const
getTarget - Return the target machine this machine code is compiled with
BasicBlockListType::const_iterator const_iterator
Representation of each machine instruction.
Definition: MachineInstr.h:69
unsigned getOpcode() const
Returns the opcode of this MachineInstr.
Definition: MachineInstr.h:575
bool isReturn(QueryType Type=AnyInBundle) const
Definition: MachineInstr.h:946
bool isTerminator(QueryType Type=AnyInBundle) const
Returns true if this instruction part of the terminator for a basic block.
Definition: MachineInstr.h:981
bool isBarrier(QueryType Type=AnyInBundle) const
Returns true if the specified instruction stops control flow from executing the instruction immediate...
Definition: MachineInstr.h:972
A description of a memory reference used in the backend.
LocationSize getSize() const
Return the size in bytes of the memory reference.
const PseudoSourceValue * getPseudoValue() const
AtomicOrdering getSuccessOrdering() const
Return the atomic ordering requirements for this memory operation.
LocationSize getSizeInBits() const
Return the size in bits of the memory reference.
MachineOperand class - Representation of each machine instruction operand.
unsigned getSubReg() const
int64_t getImm() const
bool readsReg() const
readsReg - Returns true if this operand reads the previous value of its register.
bool isImplicit() const
bool isIntrinsicID() const
bool isReg() const
isReg - Tests if this is a MO_Register operand.
MachineBasicBlock * getMBB() const
ArrayRef< int > getShuffleMask() const
bool isImm() const
isImm - Tests if this is a MO_Immediate operand.
bool isValidExcessOperand() const
Return true if this operand can validly be appended to an arbitrary operand list.
bool isShuffleMask() const
unsigned getCFIIndex() const
bool isRenamable() const
isRenamable - Returns true if this register may be renamed, i.e.
MachineInstr * getParent()
getParent - Return the instruction that this operand belongs to.
MachineOperandType getType() const
getType - Returns the MachineOperandType for this operand.
bool isEarlyClobber() const
Register getReg() const
getReg - Returns the register number.
bool isInternalRead() const
bool isFI() const
isFI - Tests if this is a MO_FrameIndex operand.
static bool clobbersPhysReg(const uint32_t *RegMask, MCRegister PhysReg)
clobbersPhysReg - Returns true if this RegMask clobbers PhysReg.
const uint32_t * getRegMask() const
getRegMask - Returns a bit mask of registers preserved by this RegMask operand.
void print(raw_ostream &os, const TargetRegisterInfo *TRI=nullptr, const TargetIntrinsicInfo *IntrinsicInfo=nullptr) const
Print the MachineOperand to os.
@ MO_CFIIndex
MCCFIInstruction index.
@ MO_RegisterMask
Mask of preserved registers.
@ MO_MachineBasicBlock
MachineBasicBlock reference.
@ MO_FrameIndex
Abstract Stack Frame Index.
@ MO_Register
Register operand.
bool isMBB() const
isMBB - Tests if this is a MO_MachineBasicBlock operand.
MachineRegisterInfo - Keep track of information for virtual and physical registers,...
PreservedAnalyses run(MachineFunction &MF, MachineFunctionAnalysisManager &MFAM)
ManagedStatic - This transparently changes the behavior of global statics to be lazily constructed on...
Definition: ManagedStatic.h:83
static PassRegistry * getPassRegistry()
getPassRegistry - Access the global registry object, which is automatically initialized at applicatio...
Pass interface - Implemented by all 'passes'.
Definition: Pass.h:94
virtual void print(raw_ostream &OS, const Module *M) const
print - Print out the internal state of the pass.
Definition: Pass.cpp:130
AnalysisType * getAnalysisIfAvailable() const
getAnalysisIfAvailable<AnalysisType>() - Subclasses use this function to get analysis information tha...
A set of analyses that are preserved following a run of a transformation pass.
Definition: Analysis.h:111
static PreservedAnalyses all()
Construct a special preserved set that preserves all passes.
Definition: Analysis.h:117
Special value supplied for machine level alias analysis.
Holds all the information related to register banks.
const RegisterBank & getRegBank(unsigned ID)
Get the register bank identified by ID.
unsigned getMaximumSize(unsigned RegBankID) const
Get the maximum size in bits that fits in the given register bank.
This class implements the register bank concept.
Definition: RegisterBank.h:28
const char * getName() const
Get a user friendly name of this register bank.
Definition: RegisterBank.h:49
unsigned getID() const
Get the identifier of this register bank.
Definition: RegisterBank.h:45
Wrapper class representing virtual and physical registers.
Definition: Register.h:19
static Register index2VirtReg(unsigned Index)
Convert a 0-based index to a virtual register number.
Definition: Register.h:84
static unsigned virtReg2Index(Register Reg)
Convert a virtual register number to a 0-based index.
Definition: Register.h:77
constexpr bool isVirtual() const
Return true if the specified register number is in the virtual register namespace.
Definition: Register.h:91
static constexpr bool isPhysicalRegister(unsigned Reg)
Return true if the specified register number is in the physical register namespace.
Definition: Register.h:65
constexpr bool isPhysical() const
Return true if the specified register number is in the physical register namespace.
Definition: Register.h:95
SlotIndex - An opaque wrapper around machine indexes.
Definition: SlotIndexes.h:65
static bool isSameInstr(SlotIndex A, SlotIndex B)
isSameInstr - Return true if A and B refer to the same instruction.
Definition: SlotIndexes.h:176
bool isBlock() const
isBlock - Returns true if this is a block boundary slot.
Definition: SlotIndexes.h:209
SlotIndex getDeadSlot() const
Returns the dead def kill slot for the current instruction.
Definition: SlotIndexes.h:242
bool isEarlyClobber() const
isEarlyClobber - Returns true if this is an early-clobber slot.
Definition: SlotIndexes.h:212
bool isRegister() const
isRegister - Returns true if this is a normal register use/def slot.
Definition: SlotIndexes.h:216
SlotIndex getPrevSlot() const
Returns the previous slot in the index list.
Definition: SlotIndexes.h:272
SlotIndex getRegSlot(bool EC=false) const
Returns the register use/def slot in the current instruction for a normal or early-clobber def.
Definition: SlotIndexes.h:237
bool isDead() const
isDead - Returns true if this is a dead def kill slot.
Definition: SlotIndexes.h:219
SlotIndexes pass.
Definition: SlotIndexes.h:297
MBBIndexIterator MBBIndexBegin() const
Returns an iterator for the begin of the idx2MBBMap.
Definition: SlotIndexes.h:505
MBBIndexIterator MBBIndexEnd() const
Return an iterator for the end of the idx2MBBMap.
Definition: SlotIndexes.h:510
SmallVectorImpl< IdxMBBPair >::const_iterator MBBIndexIterator
Iterator over the idx2MBBMap (sorted pairs of slot index of basic block begin and basic block)
Definition: SlotIndexes.h:481
size_type size() const
Definition: SmallPtrSet.h:94
bool erase(PtrType Ptr)
Remove pointer from the set.
Definition: SmallPtrSet.h:401
size_type count(ConstPtrType Ptr) const
count - Return 1 if the specified pointer is in the set, 0 otherwise.
Definition: SmallPtrSet.h:452
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
Definition: SmallPtrSet.h:384
iterator begin() const
Definition: SmallPtrSet.h:472
SmallPtrSet - This class implements a set which is optimized for holding SmallSize or less elements.
Definition: SmallPtrSet.h:519
size_t size() const
Definition: SmallVector.h:78
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
Definition: SmallVector.h:573
void resize(size_type N)
Definition: SmallVector.h:638
void push_back(const T &Elt)
Definition: SmallVector.h:413
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
Definition: SmallVector.h:1196
Register getReg() const
MI-level Statepoint operands.
Definition: StackMaps.h:158
StringRef - Represent a constant reference to a string, i.e.
Definition: StringRef.h:51
Information about stack frame layout on the target.
StackDirection getStackGrowthDirection() const
getStackGrowthDirection - Return the direction the stack grows
TargetInstrInfo - Interface to description of machine instruction set.
Primary interface to the complete machine description for the target machine.
Definition: TargetMachine.h:77
bool hasSuperClassEq(const TargetRegisterClass *RC) const
Returns true if RC is a super-class of or equal to this class.
TargetRegisterInfo base class - We assume that the target defines a static array of TargetRegisterDes...
virtual const TargetRegisterInfo * getRegisterInfo() const
getRegisterInfo - If register information is available, return it.
virtual const RegisterBankInfo * getRegBankInfo() const
If the information for the register banks is available, return it.
virtual const TargetInstrInfo * getInstrInfo() const
Twine - A lightweight data structure for efficiently representing the concatenation of temporary valu...
Definition: Twine.h:81
std::string str() const
Return the twine contents as a std::string.
Definition: Twine.cpp:17
VNInfo - Value Number Information.
Definition: LiveInterval.h:53
bool isUnused() const
Returns true if this value is unused.
Definition: LiveInterval.h:81
unsigned id
The ID number of this value.
Definition: LiveInterval.h:58
SlotIndex def
The index of the defining instruction.
Definition: LiveInterval.h:61
bool isPHIDef() const
Returns true if this value is defined by a PHI instruction (or was, PHI instructions may have been el...
Definition: LiveInterval.h:78
LLVM Value Representation.
Definition: Value.h:74
std::pair< iterator, bool > insert(const ValueT &V)
Definition: DenseSet.h:213
constexpr bool isNonZero() const
Definition: TypeSize.h:158
static constexpr bool isKnownLT(const FixedOrScalableQuantity &LHS, const FixedOrScalableQuantity &RHS)
Definition: TypeSize.h:218
constexpr bool isScalable() const
Returns whether the quantity is scaled by a runtime quantity (vscale).
Definition: TypeSize.h:171
constexpr ScalarTy getKnownMinValue() const
Returns the minimum value this quantity can represent.
Definition: TypeSize.h:168
static constexpr bool isKnownGT(const FixedOrScalableQuantity &LHS, const FixedOrScalableQuantity &RHS)
Definition: TypeSize.h:225
static constexpr bool isKnownGE(const FixedOrScalableQuantity &LHS, const FixedOrScalableQuantity &RHS)
Definition: TypeSize.h:239
self_iterator getIterator()
Definition: ilist_node.h:132
NodeTy * getNextNode()
Get the next node, or nullptr for the list tail.
Definition: ilist_node.h:353
This class implements an extremely fast bulk output stream that can only output to a stream.
Definition: raw_ostream.h:52
constexpr char Attrs[]
Key for Kernel::Metadata::mAttrs.
constexpr std::underlying_type_t< E > Mask()
Get a bitmask with 1s in all places up to the high-order bit of E's largest value.
Definition: BitmaskEnum.h:125
unsigned ID
LLVM IR allows to use arbitrary numbers as calling convention identifiers.
Definition: CallingConv.h:24
AttributeList getAttributes(LLVMContext &C, ID id)
Return the attributes for an intrinsic.
@ OPERAND_REGISTER
Definition: MCInstrDesc.h:61
@ OPERAND_IMMEDIATE
Definition: MCInstrDesc.h:60
Reg
All possible values of the reg field in the ModR/M byte.
constexpr double e
Definition: MathExtras.h:48
NodeAddr< PhiNode * > Phi
Definition: RDFGraph.h:390
NodeAddr< DefNode * > Def
Definition: RDFGraph.h:384
NodeAddr< FuncNode * > Func
Definition: RDFGraph.h:393
const_iterator begin(StringRef path LLVM_LIFETIME_BOUND, Style style=Style::native)
Get begin iterator over path.
Definition: Path.cpp:226
const_iterator end(StringRef path LLVM_LIFETIME_BOUND)
Get end iterator over path.
Definition: Path.cpp:235
This is an optimization pass for GlobalISel generic memory operations.
Definition: AddressRanges.h:18
auto drop_begin(T &&RangeOrContainer, size_t N=1)
Return a range covering RangeOrContainer with the first N elements excluded.
Definition: STLExtras.h:329
@ Offset
Definition: DWP.cpp:480
bool all_of(R &&range, UnaryPredicate P)
Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly.
Definition: STLExtras.h:1739
auto size(R &&Range, std::enable_if_t< std::is_base_of< std::random_access_iterator_tag, typename std::iterator_traits< decltype(Range.begin())>::iterator_category >::value, void > *=nullptr)
Get the size of a range.
Definition: STLExtras.h:1697
void initializeMachineVerifierLegacyPassPass(PassRegistry &)
@ SjLj
setjmp/longjmp based exceptions
bool isPreISelGenericOpcode(unsigned Opcode)
Check whether the given Opcode is a generic opcode that is not supposed to appear after ISel.
Definition: TargetOpcodes.h:30
Printable printRegUnit(unsigned Unit, const TargetRegisterInfo *TRI)
Create Printable object to print register units on a raw_ostream.
void append_range(Container &C, Range &&R)
Wrapper function to append range R to container C.
Definition: STLExtras.h:2115
void set_subtract(S1Ty &S1, const S2Ty &S2)
set_subtract(A, B) - Compute A := A - B
Printable PrintLaneMask(LaneBitmask LaneMask)
Create Printable object to print LaneBitmasks on a raw_ostream.
Definition: LaneBitmask.h:92
bool isPreISelGenericOptimizationHint(unsigned Opcode)
Definition: TargetOpcodes.h:42
bool isScopedEHPersonality(EHPersonality Pers)
Returns true if this personality uses scope-style EH IR instructions: catchswitch,...
void verifyMachineFunction(const std::string &Banner, const MachineFunction &MF)
auto reverse(ContainerTy &&C)
Definition: STLExtras.h:420
detail::ValueMatchesPoly< M > HasValue(M Matcher)
Definition: Error.h:221
df_ext_iterator< T, SetTy > df_ext_begin(const T &G, SetTy &S)
bool none_of(R &&Range, UnaryPredicate P)
Provide wrappers to std::none_of which take ranges instead of having to pass begin/end explicitly.
Definition: STLExtras.h:1753
void report_fatal_error(Error Err, bool gen_crash_diag=true)
Report a serious error, calling any installed error handler.
Definition: Error.cpp:167
raw_ostream & nulls()
This returns a reference to a raw_ostream which simply discards output.
bool set_union(S1Ty &S1, const S2Ty &S2)
set_union(A, B) - Compute A := A u B, return whether A changed.
Definition: SetOperations.h:43
EHPersonality classifyEHPersonality(const Value *Pers)
See if the given exception handling personality function is one that we understand.
raw_fd_ostream & errs()
This returns a reference to a raw_ostream for standard error.
AtomicOrdering
Atomic ordering for LLVM's memory model.
OutputIt move(R &&Range, OutputIt Out)
Provide wrappers to std::move which take ranges instead of having to pass begin/end explicitly.
Definition: STLExtras.h:1873
bool is_contained(R &&Range, const E &Element)
Returns true if Element is found in Range.
Definition: STLExtras.h:1903
df_ext_iterator< T, SetTy > df_ext_end(const T &G, SetTy &S)
Printable printReg(Register Reg, const TargetRegisterInfo *TRI=nullptr, unsigned SubIdx=0, const MachineRegisterInfo *MRI=nullptr)
Prints virtual and physical registers with or without a TRI instance.
Printable printMBBReference(const MachineBasicBlock &MBB)
Prints a machine basic block reference.
FunctionPass * createMachineVerifierPass(const std::string &Banner)
createMachineVerifierPass - This pass verifies cenerated machine code instructions for correctness.
Implement std::hash so that hash_code can be used in STL containers.
Definition: BitVector.h:858
static unsigned getSizeInBits(const fltSemantics &Sem)
Returns the size of the floating point number (in bits) in the given semantics.
Definition: APFloat.cpp:370
static constexpr LaneBitmask getAll()
Definition: LaneBitmask.h:82
constexpr bool none() const
Definition: LaneBitmask.h:52
constexpr bool any() const
Definition: LaneBitmask.h:53
static constexpr LaneBitmask getNone()
Definition: LaneBitmask.h:81
This represents a simple continuous liveness interval for a value.
Definition: LiveInterval.h:162
VarInfo - This represents the regions where a virtual register is live in the program.
Definition: LiveVariables.h:78
Pair of physical register and lane mask.