LLVM 23.0.0git
MachineVerifier.cpp
Go to the documentation of this file.
1//===- MachineVerifier.cpp - Machine Code Verifier ------------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// Pass to verify generated machine code. The following is checked:
10//
11// Operand counts: All explicit operands must be present.
12//
13// Register classes: All physical and virtual register operands must be
14// compatible with the register class required by the instruction descriptor.
15//
16// Register live intervals: Registers must be defined only once, and must be
17// defined before use.
18//
19// The machine code verifier is enabled with the command-line option
20// -verify-machineinstrs.
21//===----------------------------------------------------------------------===//
22
24#include "llvm/ADT/BitVector.h"
25#include "llvm/ADT/DenseMap.h"
26#include "llvm/ADT/DenseSet.h"
29#include "llvm/ADT/STLExtras.h"
33#include "llvm/ADT/StringRef.h"
34#include "llvm/ADT/Twine.h"
64#include "llvm/IR/BasicBlock.h"
65#include "llvm/IR/Constants.h"
67#include "llvm/IR/Function.h"
68#include "llvm/IR/InlineAsm.h"
71#include "llvm/MC/LaneBitmask.h"
72#include "llvm/MC/MCAsmInfo.h"
73#include "llvm/MC/MCDwarf.h"
74#include "llvm/MC/MCInstrDesc.h"
77#include "llvm/Pass.h"
82#include "llvm/Support/ModRef.h"
83#include "llvm/Support/Mutex.h"
86#include <algorithm>
87#include <cassert>
88#include <cstddef>
89#include <cstdint>
90#include <iterator>
91#include <string>
92#include <utility>
93
94using namespace llvm;
95
96namespace {
97
98/// Used the by the ReportedErrors class to guarantee only one error is reported
99/// at one time.
100static ManagedStatic<sys::SmartMutex<true>> ReportedErrorsLock;
101
102struct MachineVerifier {
103 MachineVerifier(MachineFunctionAnalysisManager &MFAM, const char *b,
104 raw_ostream *OS, bool AbortOnError = true)
105 : MFAM(&MFAM), OS(OS ? *OS : nulls()), Banner(b),
106 ReportedErrs(AbortOnError) {}
107
108 MachineVerifier(Pass *pass, const char *b, raw_ostream *OS,
109 bool AbortOnError = true)
110 : PASS(pass), OS(OS ? *OS : nulls()), Banner(b),
111 ReportedErrs(AbortOnError) {}
112
113 MachineVerifier(const char *b, LiveVariables *LiveVars,
114 LiveIntervals *LiveInts, LiveStacks *LiveStks,
115 SlotIndexes *Indexes, raw_ostream *OS,
116 bool AbortOnError = true)
117 : OS(OS ? *OS : nulls()), Banner(b), LiveVars(LiveVars),
118 LiveInts(LiveInts), LiveStks(LiveStks), Indexes(Indexes),
119 ReportedErrs(AbortOnError) {}
120
121 /// \returns true if no problems were found.
122 bool verify(const MachineFunction &MF);
123
124 MachineFunctionAnalysisManager *MFAM = nullptr;
125 Pass *const PASS = nullptr;
126 raw_ostream &OS;
127 const char *Banner;
128 const MachineFunction *MF = nullptr;
129 const TargetMachine *TM = nullptr;
130 const TargetInstrInfo *TII = nullptr;
131 const TargetRegisterInfo *TRI = nullptr;
132 const MachineRegisterInfo *MRI = nullptr;
133 const RegisterBankInfo *RBI = nullptr;
134
135 // Avoid querying the MachineFunctionProperties for each operand.
136 bool isFunctionRegBankSelected = false;
137 bool isFunctionSelected = false;
138 bool isFunctionTracksDebugUserValues = false;
139
140 using RegVector = SmallVector<Register, 16>;
141 using RegMaskVector = SmallVector<const uint32_t *, 4>;
142 using RegSet = DenseSet<Register>;
143 using RegMap = DenseMap<Register, const MachineInstr *>;
144 using BlockSet = SmallPtrSet<const MachineBasicBlock *, 8>;
145
146 const MachineInstr *FirstNonPHI = nullptr;
147 const MachineInstr *FirstTerminator = nullptr;
148 BlockSet FunctionBlocks;
149
150 BitVector regsReserved;
151 RegSet regsLive;
152 RegVector regsDefined, regsDead, regsKilled;
153 RegMaskVector regMasks;
154
155 SlotIndex lastIndex;
156
157 // Add Reg and any sub-registers to RV
158 void addRegWithSubRegs(RegVector &RV, Register Reg) {
159 RV.push_back(Reg);
160 if (Reg.isPhysical())
161 append_range(RV, TRI->subregs(Reg.asMCReg()));
162 }
163
164 struct BBInfo {
165 // Is this MBB reachable from the MF entry point?
166 bool reachable = false;
167
168 // Vregs that must be live in because they are used without being
169 // defined. Map value is the user. vregsLiveIn doesn't include regs
170 // that only are used by PHI nodes.
171 RegMap vregsLiveIn;
172
173 // Regs killed in MBB. They may be defined again, and will then be in both
174 // regsKilled and regsLiveOut.
175 RegSet regsKilled;
176
177 // Regs defined in MBB and live out. Note that vregs passing through may
178 // be live out without being mentioned here.
179 RegSet regsLiveOut;
180
181 // Vregs that pass through MBB untouched. This set is disjoint from
182 // regsKilled and regsLiveOut.
183 RegSet vregsPassed;
184
185 // Vregs that must pass through MBB because they are needed by a successor
186 // block. This set is disjoint from regsLiveOut.
187 RegSet vregsRequired;
188
189 // Set versions of block's predecessor and successor lists.
190 BlockSet Preds, Succs;
191
192 BBInfo() = default;
193
194 // Add register to vregsRequired if it belongs there. Return true if
195 // anything changed.
196 bool addRequired(Register Reg) {
197 if (!Reg.isVirtual())
198 return false;
199 if (regsLiveOut.count(Reg))
200 return false;
201 return vregsRequired.insert(Reg).second;
202 }
203
204 // Same for a full set.
205 bool addRequired(const RegSet &RS) {
206 bool Changed = false;
207 for (Register Reg : RS)
208 Changed |= addRequired(Reg);
209 return Changed;
210 }
211
212 // Same for a full map.
213 bool addRequired(const RegMap &RM) {
214 bool Changed = false;
215 for (const auto &I : RM)
216 Changed |= addRequired(I.first);
217 return Changed;
218 }
219
220 // Live-out registers are either in regsLiveOut or vregsPassed.
221 bool isLiveOut(Register Reg) const {
222 return regsLiveOut.count(Reg) || vregsPassed.count(Reg);
223 }
224 };
225
226 // Extra register info per MBB.
227 DenseMap<const MachineBasicBlock *, BBInfo> MBBInfoMap;
228
229 bool isReserved(Register Reg) {
230 return Reg.id() < regsReserved.size() && regsReserved.test(Reg.id());
231 }
232
233 bool isAllocatable(Register Reg) const {
234 return Reg.id() < TRI->getNumRegs() && TRI->isInAllocatableClass(Reg) &&
235 !regsReserved.test(Reg.id());
236 }
237
238 // Analysis information if available
239 LiveVariables *LiveVars = nullptr;
240 LiveIntervals *LiveInts = nullptr;
241 LiveStacks *LiveStks = nullptr;
242 SlotIndexes *Indexes = nullptr;
243
244 /// A class to track the number of reported error and to guarantee that only
245 /// one error is reported at one time.
246 class ReportedErrors {
247 unsigned NumReported = 0;
248 bool AbortOnError;
249
250 public:
251 /// \param AbortOnError -- If set, abort after printing the first error.
252 ReportedErrors(bool AbortOnError) : AbortOnError(AbortOnError) {}
253
254 ~ReportedErrors() {
255 if (!hasError())
256 return;
257 if (AbortOnError)
258 report_fatal_error("Found " + Twine(NumReported) +
259 " machine code errors.");
260 // Since we haven't aborted, release the lock to allow other threads to
261 // report errors.
262 ReportedErrorsLock->unlock();
263 }
264
265 /// Increment the number of reported errors.
266 /// \returns true if this is the first reported error.
267 bool increment() {
268 // If this is the first error this thread has encountered, grab the lock
269 // to prevent other threads from reporting errors at the same time.
270 // Otherwise we assume we already have the lock.
271 if (!hasError())
272 ReportedErrorsLock->lock();
273 ++NumReported;
274 return NumReported == 1;
275 }
276
277 /// \returns true if an error was reported.
278 bool hasError() { return NumReported; }
279 };
280 ReportedErrors ReportedErrs;
281
282 // This is calculated only when trying to verify convergence control tokens.
283 // Similar to the LLVM IR verifier, we calculate this locally instead of
284 // relying on the pass manager.
285 MachineDominatorTree DT;
286
287 void visitMachineFunctionBefore();
288 void visitMachineBasicBlockBefore(const MachineBasicBlock *MBB);
289 void visitMachineBundleBefore(const MachineInstr *MI);
290
291 /// Verify that all of \p MI's virtual register operands are scalars.
292 /// \returns True if all virtual register operands are scalar. False
293 /// otherwise.
294 bool verifyAllRegOpsScalar(const MachineInstr &MI,
295 const MachineRegisterInfo &MRI);
296 bool verifyVectorElementMatch(LLT Ty0, LLT Ty1, const MachineInstr *MI);
297
298 bool verifyGIntrinsicSideEffects(const MachineInstr *MI);
299 bool verifyGIntrinsicConvergence(const MachineInstr *MI);
300 void verifyPreISelGenericInstruction(const MachineInstr *MI);
301
302 void visitMachineInstrBefore(const MachineInstr *MI);
303 void visitMachineOperand(const MachineOperand *MO, unsigned MONum);
304 void visitMachineBundleAfter(const MachineInstr *MI);
305 void visitMachineBasicBlockAfter(const MachineBasicBlock *MBB);
306 void visitMachineFunctionAfter();
307
308 void report(const char *msg, const MachineFunction *MF);
309 void report(const char *msg, const MachineBasicBlock *MBB);
310 void report(const char *msg, const MachineInstr *MI);
311 void report(const char *msg, const MachineOperand *MO, unsigned MONum,
312 LLT MOVRegType = LLT{});
313 void report(const Twine &Msg, const MachineInstr *MI);
314
315 void report_context(const LiveInterval &LI) const;
316 void report_context(const LiveRange &LR, VirtRegOrUnit VRegOrUnit,
317 LaneBitmask LaneMask) const;
318 void report_context(const LiveRange::Segment &S) const;
319 void report_context(const VNInfo &VNI) const;
320 void report_context(SlotIndex Pos) const;
321 void report_context(MCPhysReg PhysReg) const;
322 void report_context_liverange(const LiveRange &LR) const;
323 void report_context_lanemask(LaneBitmask LaneMask) const;
324 void report_context_vreg(Register VReg) const;
325 void report_context_vreg_regunit(VirtRegOrUnit VRegOrUnit) const;
326
327 void verifyInlineAsm(const MachineInstr *MI);
328
329 void checkLiveness(const MachineOperand *MO, unsigned MONum);
330 void checkLivenessAtUse(const MachineOperand *MO, unsigned MONum,
331 SlotIndex UseIdx, const LiveRange &LR,
332 VirtRegOrUnit VRegOrUnit,
333 LaneBitmask LaneMask = LaneBitmask::getNone());
334 void checkLivenessAtDef(const MachineOperand *MO, unsigned MONum,
335 SlotIndex DefIdx, const LiveRange &LR,
336 VirtRegOrUnit VRegOrUnit, bool SubRangeCheck = false,
337 LaneBitmask LaneMask = LaneBitmask::getNone());
338
339 void markReachable(const MachineBasicBlock *MBB);
340 void calcRegsPassed();
341 void checkPHIOps(const MachineBasicBlock &MBB);
342
343 void calcRegsRequired();
344 void verifyLiveVariables();
345 void verifyLiveIntervals();
346 void verifyLiveInterval(const LiveInterval &);
347 void verifyLiveRangeValue(const LiveRange &, const VNInfo *, VirtRegOrUnit,
348 LaneBitmask);
349 void verifyLiveRangeSegment(const LiveRange &,
350 const LiveRange::const_iterator I, VirtRegOrUnit,
351 LaneBitmask);
352 void verifyLiveRange(const LiveRange &, VirtRegOrUnit,
353 LaneBitmask LaneMask = LaneBitmask::getNone());
354
355 void verifyStackFrame();
356 /// Check that the stack protector is the top-most object in the stack.
357 void verifyStackProtector();
358
359 void verifySlotIndexes() const;
360 void verifyProperties(const MachineFunction &MF);
361};
362
363struct MachineVerifierLegacyPass : public MachineFunctionPass {
364 static char ID; // Pass ID, replacement for typeid
365
366 const std::string Banner;
367
368 MachineVerifierLegacyPass(std::string banner = std::string())
369 : MachineFunctionPass(ID), Banner(std::move(banner)) {}
370
371 void getAnalysisUsage(AnalysisUsage &AU) const override {
372 AU.addUsedIfAvailable<LiveStacksWrapperLegacy>();
373 AU.addUsedIfAvailable<LiveVariablesWrapperPass>();
374 AU.addUsedIfAvailable<SlotIndexesWrapperPass>();
375 AU.addUsedIfAvailable<LiveIntervalsWrapperPass>();
376 AU.setPreservesAll();
378 }
379
380 bool runOnMachineFunction(MachineFunction &MF) override {
381 // Skip functions that have known verification problems.
382 // FIXME: Remove this mechanism when all problematic passes have been
383 // fixed.
384 if (MF.getProperties().hasFailsVerification())
385 return false;
386
387 MachineVerifier(this, Banner.c_str(), &errs()).verify(MF);
388 return false;
389 }
390};
391
392} // end anonymous namespace
393
397 // Skip functions that have known verification problems.
398 // FIXME: Remove this mechanism when all problematic passes have been
399 // fixed.
400 if (MF.getProperties().hasFailsVerification())
401 return PreservedAnalyses::all();
402 MachineVerifier(MFAM, Banner.c_str(), &errs()).verify(MF);
403 return PreservedAnalyses::all();
404}
405
406char MachineVerifierLegacyPass::ID = 0;
407
408INITIALIZE_PASS(MachineVerifierLegacyPass, "machineverifier",
409 "Verify generated machine code", false, false)
410
412 return new MachineVerifierLegacyPass(Banner);
413}
414
415void llvm::verifyMachineFunction(const std::string &Banner,
416 const MachineFunction &MF) {
417 // TODO: Use MFAM after porting below analyses.
418 // LiveVariables *LiveVars;
419 // LiveIntervals *LiveInts;
420 // LiveStacks *LiveStks;
421 // SlotIndexes *Indexes;
422 MachineVerifier(nullptr, Banner.c_str(), &errs()).verify(MF);
423}
424
425bool MachineFunction::verify(Pass *p, const char *Banner, raw_ostream *OS,
426 bool AbortOnError) const {
427 return MachineVerifier(p, Banner, OS, AbortOnError).verify(*this);
428}
429
431 const char *Banner, raw_ostream *OS,
432 bool AbortOnError) const {
433 return MachineVerifier(MFAM, Banner, OS, AbortOnError).verify(*this);
434}
435
437 const char *Banner, raw_ostream *OS,
438 bool AbortOnError) const {
439 return MachineVerifier(Banner, /*LiveVars=*/nullptr, LiveInts,
440 /*LiveStks=*/nullptr, Indexes, OS, AbortOnError)
441 .verify(*this);
442}
443
444void MachineVerifier::verifySlotIndexes() const {
445 if (Indexes == nullptr)
446 return;
447
448 // Ensure the IdxMBB list is sorted by slot indexes.
451 E = Indexes->MBBIndexEnd(); I != E; ++I) {
452 assert(!Last.isValid() || I->first > Last);
453 Last = I->first;
454 }
455}
456
457void MachineVerifier::verifyProperties(const MachineFunction &MF) {
458 // If a pass has introduced virtual registers without clearing the
459 // NoVRegs property (or set it without allocating the vregs)
460 // then report an error.
461 if (MF.getProperties().hasNoVRegs() && MRI->getNumVirtRegs())
462 report("Function has NoVRegs property but there are VReg operands", &MF);
463}
464
465bool MachineVerifier::verify(const MachineFunction &MF) {
466 this->MF = &MF;
467 TM = &MF.getTarget();
470 RBI = MF.getSubtarget().getRegBankInfo();
471 MRI = &MF.getRegInfo();
472
473 const MachineFunctionProperties &Props = MF.getProperties();
474 const bool isFunctionFailedISel = Props.hasFailedISel();
475
476 // If we're mid-GlobalISel and we already triggered the fallback path then
477 // it's expected that the MIR is somewhat broken but that's ok since we'll
478 // reset it and clear the FailedISel attribute in ResetMachineFunctions.
479 if (isFunctionFailedISel)
480 return true;
481
482 isFunctionRegBankSelected = Props.hasRegBankSelected();
483 isFunctionSelected = Props.hasSelected();
484 isFunctionTracksDebugUserValues = Props.hasTracksDebugUserValues();
485
486 if (PASS) {
487 auto *LISWrapper = PASS->getAnalysisIfAvailable<LiveIntervalsWrapperPass>();
488 LiveInts = LISWrapper ? &LISWrapper->getLIS() : nullptr;
489 // We don't want to verify LiveVariables if LiveIntervals is available.
490 auto *LVWrapper = PASS->getAnalysisIfAvailable<LiveVariablesWrapperPass>();
491 if (!LiveInts)
492 LiveVars = LVWrapper ? &LVWrapper->getLV() : nullptr;
493 auto *LSWrapper = PASS->getAnalysisIfAvailable<LiveStacksWrapperLegacy>();
494 LiveStks = LSWrapper ? &LSWrapper->getLS() : nullptr;
495 auto *SIWrapper = PASS->getAnalysisIfAvailable<SlotIndexesWrapperPass>();
496 Indexes = SIWrapper ? &SIWrapper->getSI() : nullptr;
497 }
498 if (MFAM) {
499 MachineFunction &Func = const_cast<MachineFunction &>(MF);
500 LiveInts = MFAM->getCachedResult<LiveIntervalsAnalysis>(Func);
501 if (!LiveInts)
502 LiveVars = MFAM->getCachedResult<LiveVariablesAnalysis>(Func);
503 // TODO: LiveStks = MFAM->getCachedResult<LiveStacksAnalysis>(Func);
504 Indexes = MFAM->getCachedResult<SlotIndexesAnalysis>(Func);
505 }
506
507 verifySlotIndexes();
508
509 verifyProperties(MF);
510
511 visitMachineFunctionBefore();
512 for (const MachineBasicBlock &MBB : MF) {
513 visitMachineBasicBlockBefore(&MBB);
514 // Keep track of the current bundle header.
515 const MachineInstr *CurBundle = nullptr;
516 // Do we expect the next instruction to be part of the same bundle?
517 bool InBundle = false;
518
519 for (const MachineInstr &MI : MBB.instrs()) {
520 if (MI.getParent() != &MBB) {
521 report("Bad instruction parent pointer", &MBB);
522 OS << "Instruction: " << MI;
523 continue;
524 }
525
526 // Check for consistent bundle flags.
527 if (InBundle && !MI.isBundledWithPred())
528 report("Missing BundledPred flag, "
529 "BundledSucc was set on predecessor",
530 &MI);
531 if (!InBundle && MI.isBundledWithPred())
532 report("BundledPred flag is set, "
533 "but BundledSucc not set on predecessor",
534 &MI);
535
536 // Is this a bundle header?
537 if (!MI.isInsideBundle()) {
538 if (CurBundle)
539 visitMachineBundleAfter(CurBundle);
540 CurBundle = &MI;
541 visitMachineBundleBefore(CurBundle);
542 } else if (!CurBundle)
543 report("No bundle header", &MI);
544 visitMachineInstrBefore(&MI);
545 for (unsigned I = 0, E = MI.getNumOperands(); I != E; ++I) {
546 const MachineOperand &Op = MI.getOperand(I);
547 if (Op.getParent() != &MI) {
548 // Make sure to use correct addOperand / removeOperand / ChangeTo
549 // functions when replacing operands of a MachineInstr.
550 report("Instruction has operand with wrong parent set", &MI);
551 }
552
553 visitMachineOperand(&Op, I);
554 }
555
556 // Was this the last bundled instruction?
557 InBundle = MI.isBundledWithSucc();
558 }
559 if (CurBundle)
560 visitMachineBundleAfter(CurBundle);
561 if (InBundle)
562 report("BundledSucc flag set on last instruction in block", &MBB.back());
563 visitMachineBasicBlockAfter(&MBB);
564 }
565 visitMachineFunctionAfter();
566
567 // Clean up.
568 regsLive.clear();
569 regsDefined.clear();
570 regsDead.clear();
571 regsKilled.clear();
572 regMasks.clear();
573 MBBInfoMap.clear();
574
575 return !ReportedErrs.hasError();
576}
577
578void MachineVerifier::report(const char *msg, const MachineFunction *MF) {
579 assert(MF);
580 OS << '\n';
581 if (ReportedErrs.increment()) {
582 if (Banner)
583 OS << "# " << Banner << '\n';
584
585 if (LiveInts != nullptr)
586 LiveInts->print(OS);
587 else
588 MF->print(OS, Indexes);
589 }
590
591 OS << "*** Bad machine code: " << msg << " ***\n"
592 << "- function: " << MF->getName() << '\n';
593}
594
595void MachineVerifier::report(const char *msg, const MachineBasicBlock *MBB) {
596 assert(MBB);
597 report(msg, MBB->getParent());
598 OS << "- basic block: " << printMBBReference(*MBB) << ' ' << MBB->getName()
599 << " (" << (const void *)MBB << ')';
600 if (Indexes)
601 OS << " [" << Indexes->getMBBStartIdx(MBB) << ';'
602 << Indexes->getMBBEndIdx(MBB) << ')';
603 OS << '\n';
604}
605
606void MachineVerifier::report(const char *msg, const MachineInstr *MI) {
607 assert(MI);
608 report(msg, MI->getParent());
609 OS << "- instruction: ";
610 if (Indexes && Indexes->hasIndex(*MI))
611 OS << Indexes->getInstructionIndex(*MI) << '\t';
612 MI->print(OS, /*IsStandalone=*/true);
613}
614
615void MachineVerifier::report(const char *msg, const MachineOperand *MO,
616 unsigned MONum, LLT MOVRegType) {
617 assert(MO);
618 report(msg, MO->getParent());
619 OS << "- operand " << MONum << ": ";
620 MO->print(OS, MOVRegType, TRI);
621 OS << '\n';
622}
623
624void MachineVerifier::report(const Twine &Msg, const MachineInstr *MI) {
625 report(Msg.str().c_str(), MI);
626}
627
628void MachineVerifier::report_context(SlotIndex Pos) const {
629 OS << "- at: " << Pos << '\n';
630}
631
632void MachineVerifier::report_context(const LiveInterval &LI) const {
633 OS << "- interval: " << LI << '\n';
634}
635
636void MachineVerifier::report_context(const LiveRange &LR,
637 VirtRegOrUnit VRegOrUnit,
638 LaneBitmask LaneMask) const {
639 report_context_liverange(LR);
640 report_context_vreg_regunit(VRegOrUnit);
641 if (LaneMask.any())
642 report_context_lanemask(LaneMask);
643}
644
645void MachineVerifier::report_context(const LiveRange::Segment &S) const {
646 OS << "- segment: " << S << '\n';
647}
648
649void MachineVerifier::report_context(const VNInfo &VNI) const {
650 OS << "- ValNo: " << VNI.id << " (def " << VNI.def << ")\n";
651}
652
653void MachineVerifier::report_context_liverange(const LiveRange &LR) const {
654 OS << "- liverange: " << LR << '\n';
655}
656
657void MachineVerifier::report_context(MCPhysReg PReg) const {
658 OS << "- p. register: " << printReg(PReg, TRI) << '\n';
659}
660
661void MachineVerifier::report_context_vreg(Register VReg) const {
662 OS << "- v. register: " << printReg(VReg, TRI) << '\n';
663}
664
665void MachineVerifier::report_context_vreg_regunit(
666 VirtRegOrUnit VRegOrUnit) const {
667 if (VRegOrUnit.isVirtualReg()) {
668 report_context_vreg(VRegOrUnit.asVirtualReg());
669 } else {
670 OS << "- regunit: " << printRegUnit(VRegOrUnit.asMCRegUnit(), TRI)
671 << '\n';
672 }
673}
674
675void MachineVerifier::report_context_lanemask(LaneBitmask LaneMask) const {
676 OS << "- lanemask: " << PrintLaneMask(LaneMask) << '\n';
677}
678
679void MachineVerifier::markReachable(const MachineBasicBlock *MBB) {
680 BBInfo &MInfo = MBBInfoMap[MBB];
681 if (!MInfo.reachable) {
682 MInfo.reachable = true;
683 for (const MachineBasicBlock *Succ : MBB->successors())
684 markReachable(Succ);
685 }
686}
687
688void MachineVerifier::visitMachineFunctionBefore() {
689 lastIndex = SlotIndex();
690 regsReserved = MRI->reservedRegsFrozen() ? MRI->getReservedRegs()
691 : TRI->getReservedRegs(*MF);
692
693 if (!MF->empty())
694 markReachable(&MF->front());
695
696 // Build a set of the basic blocks in the function.
697 FunctionBlocks.clear();
698 for (const auto &MBB : *MF) {
699 FunctionBlocks.insert(&MBB);
700 BBInfo &MInfo = MBBInfoMap[&MBB];
701
702 MInfo.Preds.insert_range(MBB.predecessors());
703 if (MInfo.Preds.size() != MBB.pred_size())
704 report("MBB has duplicate entries in its predecessor list.", &MBB);
705
706 MInfo.Succs.insert_range(MBB.successors());
707 if (MInfo.Succs.size() != MBB.succ_size())
708 report("MBB has duplicate entries in its successor list.", &MBB);
709 }
710
711 // Check that the register use lists are sane.
712 MRI->verifyUseLists();
713
714 if (!MF->empty()) {
715 verifyStackFrame();
716 verifyStackProtector();
717 }
718}
719
720void
721MachineVerifier::visitMachineBasicBlockBefore(const MachineBasicBlock *MBB) {
722 FirstTerminator = nullptr;
723 FirstNonPHI = nullptr;
724
725 if (!MF->getProperties().hasNoPHIs() && MRI->tracksLiveness()) {
726 // If this block has allocatable physical registers live-in, check that
727 // it is an entry block or landing pad.
728 for (const auto &LI : MBB->liveins()) {
729 if (isAllocatable(LI.PhysReg) && !MBB->isEHPad() &&
730 MBB->getIterator() != MBB->getParent()->begin() &&
732 report("MBB has allocatable live-in, but isn't entry, landing-pad, or "
733 "inlineasm-br-indirect-target.",
734 MBB);
735 report_context(LI.PhysReg);
736 }
737 }
738 }
739
740 if (MBB->isIRBlockAddressTaken()) {
742 report("ir-block-address-taken is associated with basic block not used by "
743 "a blockaddress.",
744 MBB);
745 }
746
747 // Count the number of landing pad successors.
749 for (const auto *succ : MBB->successors()) {
750 if (succ->isEHPad())
751 LandingPadSuccs.insert(succ);
752 if (!FunctionBlocks.count(succ))
753 report("MBB has successor that isn't part of the function.", MBB);
754 if (!MBBInfoMap[succ].Preds.count(MBB)) {
755 report("Inconsistent CFG", MBB);
756 OS << "MBB is not in the predecessor list of the successor "
757 << printMBBReference(*succ) << ".\n";
758 }
759 }
760
761 // Check the predecessor list.
762 for (const MachineBasicBlock *Pred : MBB->predecessors()) {
763 if (!FunctionBlocks.count(Pred))
764 report("MBB has predecessor that isn't part of the function.", MBB);
765 if (!MBBInfoMap[Pred].Succs.count(MBB)) {
766 report("Inconsistent CFG", MBB);
767 OS << "MBB is not in the successor list of the predecessor "
768 << printMBBReference(*Pred) << ".\n";
769 }
770 }
771
772 const MCAsmInfo *AsmInfo = TM->getMCAsmInfo();
773 const BasicBlock *BB = MBB->getBasicBlock();
774 const Function &F = MF->getFunction();
775 if (LandingPadSuccs.size() > 1 &&
776 !(AsmInfo &&
778 BB && isa<SwitchInst>(BB->getTerminator())) &&
779 !isScopedEHPersonality(classifyEHPersonality(F.getPersonalityFn())))
780 report("MBB has more than one landing pad successor", MBB);
781
782 // Call analyzeBranch. If it succeeds, there several more conditions to check.
783 MachineBasicBlock *TBB = nullptr, *FBB = nullptr;
785 if (!TII->analyzeBranch(*const_cast<MachineBasicBlock *>(MBB), TBB, FBB,
786 Cond)) {
787 // Ok, analyzeBranch thinks it knows what's going on with this block. Let's
788 // check whether its answers match up with reality.
789 if (!TBB && !FBB) {
790 // Block falls through to its successor.
791 if (!MBB->empty() && MBB->back().isBarrier() &&
792 !TII->isPredicated(MBB->back())) {
793 report("MBB exits via unconditional fall-through but ends with a "
794 "barrier instruction!", MBB);
795 }
796 if (!Cond.empty()) {
797 report("MBB exits via unconditional fall-through but has a condition!",
798 MBB);
799 }
800 } else if (TBB && !FBB && Cond.empty()) {
801 // Block unconditionally branches somewhere.
802 if (MBB->empty()) {
803 report("MBB exits via unconditional branch but doesn't contain "
804 "any instructions!", MBB);
805 } else if (!MBB->back().isBarrier()) {
806 report("MBB exits via unconditional branch but doesn't end with a "
807 "barrier instruction!", MBB);
808 } else if (!MBB->back().isTerminator()) {
809 report("MBB exits via unconditional branch but the branch isn't a "
810 "terminator instruction!", MBB);
811 }
812 } else if (TBB && !FBB && !Cond.empty()) {
813 // Block conditionally branches somewhere, otherwise falls through.
814 if (MBB->empty()) {
815 report("MBB exits via conditional branch/fall-through but doesn't "
816 "contain any instructions!", MBB);
817 } else if (MBB->back().isBarrier()) {
818 report("MBB exits via conditional branch/fall-through but ends with a "
819 "barrier instruction!", MBB);
820 } else if (!MBB->back().isTerminator()) {
821 report("MBB exits via conditional branch/fall-through but the branch "
822 "isn't a terminator instruction!", MBB);
823 }
824 } else if (TBB && FBB) {
825 // Block conditionally branches somewhere, otherwise branches
826 // somewhere else.
827 if (MBB->empty()) {
828 report("MBB exits via conditional branch/branch but doesn't "
829 "contain any instructions!", MBB);
830 } else if (!MBB->back().isBarrier()) {
831 report("MBB exits via conditional branch/branch but doesn't end with a "
832 "barrier instruction!", MBB);
833 } else if (!MBB->back().isTerminator()) {
834 report("MBB exits via conditional branch/branch but the branch "
835 "isn't a terminator instruction!", MBB);
836 }
837 if (Cond.empty()) {
838 report("MBB exits via conditional branch/branch but there's no "
839 "condition!", MBB);
840 }
841 } else {
842 report("analyzeBranch returned invalid data!", MBB);
843 }
844
845 // Now check that the successors match up with the answers reported by
846 // analyzeBranch.
847 if (TBB && !MBB->isSuccessor(TBB))
848 report("MBB exits via jump or conditional branch, but its target isn't a "
849 "CFG successor!",
850 MBB);
851 if (FBB && !MBB->isSuccessor(FBB))
852 report("MBB exits via conditional branch, but its target isn't a CFG "
853 "successor!",
854 MBB);
855
856 // There might be a fallthrough to the next block if there's either no
857 // unconditional true branch, or if there's a condition, and one of the
858 // branches is missing.
859 bool Fallthrough = !TBB || (!Cond.empty() && !FBB);
860
861 // A conditional fallthrough must be an actual CFG successor, not
862 // unreachable. (Conversely, an unconditional fallthrough might not really
863 // be a successor, because the block might end in unreachable.)
864 if (!Cond.empty() && !FBB) {
866 if (MBBI == MF->end()) {
867 report("MBB conditionally falls through out of function!", MBB);
868 } else if (!MBB->isSuccessor(&*MBBI))
869 report("MBB exits via conditional branch/fall-through but the CFG "
870 "successors don't match the actual successors!",
871 MBB);
872 }
873
874 // Verify that there aren't any extra un-accounted-for successors.
875 for (const MachineBasicBlock *SuccMBB : MBB->successors()) {
876 // If this successor is one of the branch targets, it's okay.
877 if (SuccMBB == TBB || SuccMBB == FBB)
878 continue;
879 // If we might have a fallthrough, and the successor is the fallthrough
880 // block, that's also ok.
881 if (Fallthrough && SuccMBB == MBB->getNextNode())
882 continue;
883 // Also accept successors which are for exception-handling or might be
884 // inlineasm_br targets.
885 if (SuccMBB->isEHPad() || SuccMBB->isInlineAsmBrIndirectTarget())
886 continue;
887 report("MBB has unexpected successors which are not branch targets, "
888 "fallthrough, EHPads, or inlineasm_br targets.",
889 MBB);
890 }
891 }
892
893 regsLive.clear();
894 if (MRI->tracksLiveness()) {
895 for (const auto &LI : MBB->liveins()) {
896 if (!LI.PhysReg.isPhysical()) {
897 report("MBB live-in list contains non-physical register", MBB);
898 continue;
899 }
900 regsLive.insert_range(TRI->subregs_inclusive(LI.PhysReg));
901 }
902 }
903
904 const MachineFrameInfo &MFI = MF->getFrameInfo();
905 BitVector PR = MFI.getPristineRegs(*MF);
906 for (unsigned I : PR.set_bits())
907 regsLive.insert_range(TRI->subregs_inclusive(I));
908
909 regsKilled.clear();
910 regsDefined.clear();
911
912 if (Indexes)
913 lastIndex = Indexes->getMBBStartIdx(MBB);
914}
915
916// This function gets called for all bundle headers, including normal
917// stand-alone unbundled instructions.
918void MachineVerifier::visitMachineBundleBefore(const MachineInstr *MI) {
919 if (Indexes && Indexes->hasIndex(*MI)) {
920 SlotIndex idx = Indexes->getInstructionIndex(*MI);
921 if (!(idx > lastIndex)) {
922 report("Instruction index out of order", MI);
923 OS << "Last instruction was at " << lastIndex << '\n';
924 }
925 lastIndex = idx;
926 }
927
928 // Ensure non-terminators don't follow terminators.
929 if (MI->isTerminator()) {
930 if (!FirstTerminator)
931 FirstTerminator = MI;
932 } else if (FirstTerminator) {
933 // For GlobalISel, G_INVOKE_REGION_START is a terminator that we allow to
934 // precede non-terminators.
935 if (FirstTerminator->getOpcode() != TargetOpcode::G_INVOKE_REGION_START) {
936 report("Non-terminator instruction after the first terminator", MI);
937 OS << "First terminator was:\t" << *FirstTerminator;
938 }
939 }
940}
941
942// The operands on an INLINEASM instruction must follow a template.
943// Verify that the flag operands make sense.
944void MachineVerifier::verifyInlineAsm(const MachineInstr *MI) {
945 // The first two operands on INLINEASM are the asm string and global flags.
946 if (MI->getNumOperands() < 2) {
947 report("Too few operands on inline asm", MI);
948 return;
949 }
950 if (!MI->getOperand(0).isSymbol())
951 report("Asm string must be an external symbol", MI);
952 if (!MI->getOperand(1).isImm())
953 report("Asm flags must be an immediate", MI);
954 // Allowed flags are Extra_HasSideEffects = 1, Extra_IsAlignStack = 2,
955 // Extra_AsmDialect = 4, Extra_MayLoad = 8, and Extra_MayStore = 16,
956 // and Extra_IsConvergent = 32, Extra_MayUnwind = 64.
957 if (!isUInt<7>(MI->getOperand(1).getImm()))
958 report("Unknown asm flags", &MI->getOperand(1), 1);
959
960 static_assert(InlineAsm::MIOp_FirstOperand == 2, "Asm format changed");
961
962 unsigned OpNo = InlineAsm::MIOp_FirstOperand;
963 unsigned NumOps;
964 for (unsigned e = MI->getNumOperands(); OpNo < e; OpNo += NumOps) {
965 const MachineOperand &MO = MI->getOperand(OpNo);
966 // There may be implicit ops after the fixed operands.
967 if (!MO.isImm())
968 break;
969 const InlineAsm::Flag F(MO.getImm());
970 NumOps = 1 + F.getNumOperandRegisters();
971 }
972
973 if (OpNo > MI->getNumOperands())
974 report("Missing operands in last group", MI);
975
976 // An optional MDNode follows the groups.
977 if (OpNo < MI->getNumOperands() && MI->getOperand(OpNo).isMetadata())
978 ++OpNo;
979
980 // All trailing operands must be implicit registers.
981 for (unsigned e = MI->getNumOperands(); OpNo < e; ++OpNo) {
982 const MachineOperand &MO = MI->getOperand(OpNo);
983 if (!MO.isReg() || !MO.isImplicit())
984 report("Expected implicit register after groups", &MO, OpNo);
985 }
986
987 if (MI->getOpcode() == TargetOpcode::INLINEASM_BR) {
988 const MachineBasicBlock *MBB = MI->getParent();
989
990 for (unsigned i = InlineAsm::MIOp_FirstOperand, e = MI->getNumOperands();
991 i != e; ++i) {
992 const MachineOperand &MO = MI->getOperand(i);
993
994 if (!MO.isMBB())
995 continue;
996
997 // Check the successor & predecessor lists look ok, assume they are
998 // not. Find the indirect target without going through the successors.
999 const MachineBasicBlock *IndirectTargetMBB = MO.getMBB();
1000 if (!IndirectTargetMBB) {
1001 report("INLINEASM_BR indirect target does not exist", &MO, i);
1002 break;
1003 }
1004
1005 if (!MBB->isSuccessor(IndirectTargetMBB))
1006 report("INLINEASM_BR indirect target missing from successor list", &MO,
1007 i);
1008
1009 if (!IndirectTargetMBB->isPredecessor(MBB))
1010 report("INLINEASM_BR indirect target predecessor list missing parent",
1011 &MO, i);
1012 }
1013 }
1014}
1015
1016bool MachineVerifier::verifyAllRegOpsScalar(const MachineInstr &MI,
1017 const MachineRegisterInfo &MRI) {
1018 if (none_of(MI.explicit_operands(), [&MRI](const MachineOperand &Op) {
1019 if (!Op.isReg())
1020 return false;
1021 const auto Reg = Op.getReg();
1022 if (Reg.isPhysical())
1023 return false;
1024 return !MRI.getType(Reg).isScalar();
1025 }))
1026 return true;
1027 report("All register operands must have scalar types", &MI);
1028 return false;
1029}
1030
1031/// Check that types are consistent when two operands need to have the same
1032/// number of vector elements.
1033/// \return true if the types are valid.
1034bool MachineVerifier::verifyVectorElementMatch(LLT Ty0, LLT Ty1,
1035 const MachineInstr *MI) {
1036 if (Ty0.isVector() != Ty1.isVector()) {
1037 report("operand types must be all-vector or all-scalar", MI);
1038 // Generally we try to report as many issues as possible at once, but in
1039 // this case it's not clear what should we be comparing the size of the
1040 // scalar with: the size of the whole vector or its lane. Instead of
1041 // making an arbitrary choice and emitting not so helpful message, let's
1042 // avoid the extra noise and stop here.
1043 return false;
1044 }
1045
1046 if (Ty0.isVector() && Ty0.getElementCount() != Ty1.getElementCount()) {
1047 report("operand types must preserve number of vector elements", MI);
1048 return false;
1049 }
1050
1051 return true;
1052}
1053
1054bool MachineVerifier::verifyGIntrinsicSideEffects(const MachineInstr *MI) {
1055 auto Opcode = MI->getOpcode();
1056 bool NoSideEffects = Opcode == TargetOpcode::G_INTRINSIC ||
1057 Opcode == TargetOpcode::G_INTRINSIC_CONVERGENT;
1058 unsigned IntrID = cast<GIntrinsic>(MI)->getIntrinsicID();
1059 if (IntrID != 0 && IntrID < Intrinsic::num_intrinsics) {
1061 MF->getFunction().getContext(), static_cast<Intrinsic::ID>(IntrID));
1062 bool DeclHasSideEffects = !Attrs.getMemoryEffects().doesNotAccessMemory();
1063 if (NoSideEffects && DeclHasSideEffects) {
1064 report(Twine(TII->getName(Opcode),
1065 " used with intrinsic that accesses memory"),
1066 MI);
1067 return false;
1068 }
1069 if (!NoSideEffects && !DeclHasSideEffects) {
1070 report(Twine(TII->getName(Opcode), " used with readnone intrinsic"), MI);
1071 return false;
1072 }
1073 }
1074
1075 return true;
1076}
1077
1078bool MachineVerifier::verifyGIntrinsicConvergence(const MachineInstr *MI) {
1079 auto Opcode = MI->getOpcode();
1080 bool NotConvergent = Opcode == TargetOpcode::G_INTRINSIC ||
1081 Opcode == TargetOpcode::G_INTRINSIC_W_SIDE_EFFECTS;
1082 unsigned IntrID = cast<GIntrinsic>(MI)->getIntrinsicID();
1083 if (IntrID != 0 && IntrID < Intrinsic::num_intrinsics) {
1085 MF->getFunction().getContext(), static_cast<Intrinsic::ID>(IntrID));
1086 bool DeclIsConvergent = Attrs.hasAttribute(Attribute::Convergent);
1087 if (NotConvergent && DeclIsConvergent) {
1088 report(Twine(TII->getName(Opcode), " used with a convergent intrinsic"),
1089 MI);
1090 return false;
1091 }
1092 if (!NotConvergent && !DeclIsConvergent) {
1093 report(
1094 Twine(TII->getName(Opcode), " used with a non-convergent intrinsic"),
1095 MI);
1096 return false;
1097 }
1098 }
1099
1100 return true;
1101}
1102
1103void MachineVerifier::verifyPreISelGenericInstruction(const MachineInstr *MI) {
1104 if (isFunctionSelected)
1105 report("Unexpected generic instruction in a Selected function", MI);
1106
1107 const MCInstrDesc &MCID = MI->getDesc();
1108 unsigned NumOps = MI->getNumOperands();
1109
1110 // Branches must reference a basic block if they are not indirect
1111 if (MI->isBranch() && !MI->isIndirectBranch()) {
1112 bool HasMBB = false;
1113 for (const MachineOperand &Op : MI->operands()) {
1114 if (Op.isMBB()) {
1115 HasMBB = true;
1116 break;
1117 }
1118 }
1119
1120 if (!HasMBB) {
1121 report("Branch instruction is missing a basic block operand or "
1122 "isIndirectBranch property",
1123 MI);
1124 }
1125 }
1126
1127 // Check types.
1129 for (unsigned I = 0, E = std::min(MCID.getNumOperands(), NumOps);
1130 I != E; ++I) {
1131 if (!MCID.operands()[I].isGenericType())
1132 continue;
1133 // Generic instructions specify type equality constraints between some of
1134 // their operands. Make sure these are consistent.
1135 size_t TypeIdx = MCID.operands()[I].getGenericTypeIndex();
1136 Types.resize(std::max(TypeIdx + 1, Types.size()));
1137
1138 const MachineOperand *MO = &MI->getOperand(I);
1139 if (!MO->isReg()) {
1140 report("generic instruction must use register operands", MI);
1141 continue;
1142 }
1143
1144 LLT OpTy = MRI->getType(MO->getReg());
1145 // Don't report a type mismatch if there is no actual mismatch, only a
1146 // type missing, to reduce noise:
1147 if (OpTy.isValid()) {
1148 // Only the first valid type for a type index will be printed: don't
1149 // overwrite it later so it's always clear which type was expected:
1150 if (!Types[TypeIdx].isValid())
1151 Types[TypeIdx] = OpTy;
1152 else if (Types[TypeIdx] != OpTy)
1153 report("Type mismatch in generic instruction", MO, I, OpTy);
1154 } else {
1155 // Generic instructions must have types attached to their operands.
1156 report("Generic instruction is missing a virtual register type", MO, I);
1157 }
1158 }
1159
1160 // Generic opcodes must not have physical register operands.
1161 for (unsigned I = 0; I < MI->getNumOperands(); ++I) {
1162 const MachineOperand *MO = &MI->getOperand(I);
1163 if (MO->isReg() && MO->getReg().isPhysical())
1164 report("Generic instruction cannot have physical register", MO, I);
1165 }
1166
1167 // Avoid out of bounds in checks below. This was already reported earlier.
1168 if (MI->getNumOperands() < MCID.getNumOperands())
1169 return;
1170
1172 if (!TII->verifyInstruction(*MI, ErrorInfo))
1173 report(ErrorInfo.data(), MI);
1174
1175 // Verify properties of various specific instruction types
1176 unsigned Opc = MI->getOpcode();
1177 switch (Opc) {
1178 case TargetOpcode::G_ASSERT_SEXT:
1179 case TargetOpcode::G_ASSERT_ZEXT: {
1180 std::string OpcName =
1181 Opc == TargetOpcode::G_ASSERT_ZEXT ? "G_ASSERT_ZEXT" : "G_ASSERT_SEXT";
1182 if (!MI->getOperand(2).isImm()) {
1183 report(Twine(OpcName, " expects an immediate operand #2"), MI);
1184 break;
1185 }
1186
1187 Register Dst = MI->getOperand(0).getReg();
1188 Register Src = MI->getOperand(1).getReg();
1189 LLT SrcTy = MRI->getType(Src);
1190 int64_t Imm = MI->getOperand(2).getImm();
1191 if (Imm <= 0) {
1192 report(Twine(OpcName, " size must be >= 1"), MI);
1193 break;
1194 }
1195
1196 if (Imm >= SrcTy.getScalarSizeInBits()) {
1197 report(Twine(OpcName, " size must be less than source bit width"), MI);
1198 break;
1199 }
1200
1201 const RegisterBank *SrcRB = RBI->getRegBank(Src, *MRI, *TRI);
1202 const RegisterBank *DstRB = RBI->getRegBank(Dst, *MRI, *TRI);
1203
1204 // Allow only the source bank to be set.
1205 if ((SrcRB && DstRB && SrcRB != DstRB) || (DstRB && !SrcRB)) {
1206 report(Twine(OpcName, " cannot change register bank"), MI);
1207 break;
1208 }
1209
1210 // Don't allow a class change. Do allow member class->regbank.
1211 const TargetRegisterClass *DstRC = MRI->getRegClassOrNull(Dst);
1212 if (DstRC && DstRC != MRI->getRegClassOrNull(Src)) {
1213 report(
1214 Twine(OpcName, " source and destination register classes must match"),
1215 MI);
1216 break;
1217 }
1218
1219 break;
1220 }
1221
1222 case TargetOpcode::G_CONSTANT:
1223 case TargetOpcode::G_FCONSTANT: {
1224 LLT DstTy = MRI->getType(MI->getOperand(0).getReg());
1225 if (DstTy.isVector())
1226 report("Instruction cannot use a vector result type", MI);
1227
1228 if (MI->getOpcode() == TargetOpcode::G_CONSTANT) {
1229 if (!MI->getOperand(1).isCImm()) {
1230 report("G_CONSTANT operand must be cimm", MI);
1231 break;
1232 }
1233
1234 const ConstantInt *CI = MI->getOperand(1).getCImm();
1235 if (CI->getBitWidth() != DstTy.getSizeInBits())
1236 report("inconsistent constant size", MI);
1237 } else {
1238 if (!MI->getOperand(1).isFPImm()) {
1239 report("G_FCONSTANT operand must be fpimm", MI);
1240 break;
1241 }
1242 const ConstantFP *CF = MI->getOperand(1).getFPImm();
1243
1245 DstTy.getSizeInBits()) {
1246 report("inconsistent constant size", MI);
1247 }
1248 }
1249
1250 break;
1251 }
1252 case TargetOpcode::G_LOAD:
1253 case TargetOpcode::G_STORE:
1254 case TargetOpcode::G_ZEXTLOAD:
1255 case TargetOpcode::G_SEXTLOAD: {
1256 LLT ValTy = MRI->getType(MI->getOperand(0).getReg());
1257 LLT PtrTy = MRI->getType(MI->getOperand(1).getReg());
1258 if (!PtrTy.isPointer())
1259 report("Generic memory instruction must access a pointer", MI);
1260
1261 // Generic loads and stores must have a single MachineMemOperand
1262 // describing that access.
1263 if (!MI->hasOneMemOperand()) {
1264 report("Generic instruction accessing memory must have one mem operand",
1265 MI);
1266 } else {
1267 const MachineMemOperand &MMO = **MI->memoperands_begin();
1268 if (MI->getOpcode() == TargetOpcode::G_ZEXTLOAD ||
1269 MI->getOpcode() == TargetOpcode::G_SEXTLOAD) {
1271 ValTy.getSizeInBits()))
1272 report("Generic extload must have a narrower memory type", MI);
1273 } else if (MI->getOpcode() == TargetOpcode::G_LOAD) {
1275 ValTy.getSizeInBytes()))
1276 report("load memory size cannot exceed result size", MI);
1277
1278 if (MMO.getRanges()) {
1279 ConstantInt *i =
1281 const LLT RangeTy = LLT::scalar(i->getIntegerType()->getBitWidth());
1282 const LLT MemTy = MMO.getMemoryType();
1283 if (MemTy.getScalarType() != RangeTy ||
1284 ValTy.isScalar() != MemTy.isScalar() ||
1285 (ValTy.isVector() &&
1286 ValTy.getNumElements() != MemTy.getNumElements())) {
1287 report("range is incompatible with the result type", MI);
1288 }
1289 }
1290 } else if (MI->getOpcode() == TargetOpcode::G_STORE) {
1292 MMO.getSize().getValue()))
1293 report("store memory size cannot exceed value size", MI);
1294 }
1295
1296 const AtomicOrdering Order = MMO.getSuccessOrdering();
1297 if (Opc == TargetOpcode::G_STORE) {
1298 if (Order == AtomicOrdering::Acquire ||
1300 report("atomic store cannot use acquire ordering", MI);
1301
1302 } else {
1303 if (Order == AtomicOrdering::Release ||
1305 report("atomic load cannot use release ordering", MI);
1306 }
1307 }
1308
1309 break;
1310 }
1311 case TargetOpcode::G_PHI: {
1312 LLT DstTy = MRI->getType(MI->getOperand(0).getReg());
1313 if (!DstTy.isValid() || !all_of(drop_begin(MI->operands()),
1314 [this, &DstTy](const MachineOperand &MO) {
1315 if (!MO.isReg())
1316 return true;
1317 LLT Ty = MRI->getType(MO.getReg());
1318 if (!Ty.isValid() || (Ty != DstTy))
1319 return false;
1320 return true;
1321 }))
1322 report("Generic Instruction G_PHI has operands with incompatible/missing "
1323 "types",
1324 MI);
1325 break;
1326 }
1327 case TargetOpcode::G_BITCAST: {
1328 LLT DstTy = MRI->getType(MI->getOperand(0).getReg());
1329 LLT SrcTy = MRI->getType(MI->getOperand(1).getReg());
1330 if (!DstTy.isValid() || !SrcTy.isValid())
1331 break;
1332
1333 if (SrcTy.isPointer() != DstTy.isPointer())
1334 report("bitcast cannot convert between pointers and other types", MI);
1335
1336 if (SrcTy.getSizeInBits() != DstTy.getSizeInBits())
1337 report("bitcast sizes must match", MI);
1338
1339 bool SameType = SrcTy.getKind() == DstTy.getKind();
1340 if (SameType && SrcTy.isPointerOrPointerVector())
1341 SameType &= SrcTy.getAddressSpace() == DstTy.getAddressSpace();
1342
1343 SameType &= SrcTy.getScalarSizeInBits() == DstTy.getScalarSizeInBits();
1344
1345 if (SameType && SrcTy.isVector())
1346 SameType &= SrcTy.getElementCount() == DstTy.getElementCount();
1347
1348 if (SameType)
1349 report("bitcast must change the type", MI);
1350
1351 break;
1352 }
1353 case TargetOpcode::G_INTTOPTR:
1354 case TargetOpcode::G_PTRTOINT:
1355 case TargetOpcode::G_ADDRSPACE_CAST: {
1356 LLT DstTy = MRI->getType(MI->getOperand(0).getReg());
1357 LLT SrcTy = MRI->getType(MI->getOperand(1).getReg());
1358 if (!DstTy.isValid() || !SrcTy.isValid())
1359 break;
1360
1361 verifyVectorElementMatch(DstTy, SrcTy, MI);
1362
1363 DstTy = DstTy.getScalarType();
1364 SrcTy = SrcTy.getScalarType();
1365
1366 if (MI->getOpcode() == TargetOpcode::G_INTTOPTR) {
1367 if (!DstTy.isPointer())
1368 report("inttoptr result type must be a pointer", MI);
1369 if (SrcTy.isPointer())
1370 report("inttoptr source type must not be a pointer", MI);
1371 } else if (MI->getOpcode() == TargetOpcode::G_PTRTOINT) {
1372 if (!SrcTy.isPointer())
1373 report("ptrtoint source type must be a pointer", MI);
1374 if (DstTy.isPointer())
1375 report("ptrtoint result type must not be a pointer", MI);
1376 } else {
1377 assert(MI->getOpcode() == TargetOpcode::G_ADDRSPACE_CAST);
1378 if (!SrcTy.isPointer() || !DstTy.isPointer())
1379 report("addrspacecast types must be pointers", MI);
1380 else {
1381 if (SrcTy.getAddressSpace() == DstTy.getAddressSpace())
1382 report("addrspacecast must convert different address spaces", MI);
1383 }
1384 }
1385
1386 break;
1387 }
1388 case TargetOpcode::G_PTR_ADD: {
1389 LLT DstTy = MRI->getType(MI->getOperand(0).getReg());
1390 LLT PtrTy = MRI->getType(MI->getOperand(1).getReg());
1391 LLT OffsetTy = MRI->getType(MI->getOperand(2).getReg());
1392 if (!DstTy.isValid() || !PtrTy.isValid() || !OffsetTy.isValid())
1393 break;
1394
1395 if (!PtrTy.isPointerOrPointerVector())
1396 report("gep first operand must be a pointer", MI);
1397
1398 if (OffsetTy.isPointerOrPointerVector())
1399 report("gep offset operand must not be a pointer", MI);
1400
1401 if (PtrTy.isPointerOrPointerVector()) {
1402 const DataLayout &DL = MF->getDataLayout();
1403 unsigned AS = PtrTy.getAddressSpace();
1404 unsigned IndexSizeInBits = DL.getIndexSize(AS) * 8;
1405 if (OffsetTy.getScalarSizeInBits() != IndexSizeInBits) {
1406 report("gep offset operand must match index size for address space",
1407 MI);
1408 }
1409 }
1410
1411 // TODO: Is the offset allowed to be a scalar with a vector?
1412 break;
1413 }
1414 case TargetOpcode::G_PTRMASK: {
1415 LLT DstTy = MRI->getType(MI->getOperand(0).getReg());
1416 LLT SrcTy = MRI->getType(MI->getOperand(1).getReg());
1417 LLT MaskTy = MRI->getType(MI->getOperand(2).getReg());
1418 if (!DstTy.isValid() || !SrcTy.isValid() || !MaskTy.isValid())
1419 break;
1420
1421 if (!DstTy.isPointerOrPointerVector())
1422 report("ptrmask result type must be a pointer", MI);
1423
1424 if (!MaskTy.getScalarType().isScalar())
1425 report("ptrmask mask type must be an integer", MI);
1426
1427 verifyVectorElementMatch(DstTy, MaskTy, MI);
1428 break;
1429 }
1430 case TargetOpcode::G_SEXT:
1431 case TargetOpcode::G_ZEXT:
1432 case TargetOpcode::G_ANYEXT:
1433 case TargetOpcode::G_TRUNC:
1434 case TargetOpcode::G_TRUNC_SSAT_S:
1435 case TargetOpcode::G_TRUNC_SSAT_U:
1436 case TargetOpcode::G_TRUNC_USAT_U:
1437 case TargetOpcode::G_FPEXT:
1438 case TargetOpcode::G_FPTRUNC: {
1439 // Number of operands and presense of types is already checked (and
1440 // reported in case of any issues), so no need to report them again. As
1441 // we're trying to report as many issues as possible at once, however, the
1442 // instructions aren't guaranteed to have the right number of operands or
1443 // types attached to them at this point
1444 assert(MCID.getNumOperands() == 2 && "Expected 2 operands G_*{EXT,TRUNC}");
1445 LLT DstTy = MRI->getType(MI->getOperand(0).getReg());
1446 LLT SrcTy = MRI->getType(MI->getOperand(1).getReg());
1447 if (!DstTy.isValid() || !SrcTy.isValid())
1448 break;
1449
1451 report("Generic extend/truncate can not operate on pointers", MI);
1452
1453 verifyVectorElementMatch(DstTy, SrcTy, MI);
1454
1455 unsigned DstSize = DstTy.getScalarSizeInBits();
1456 unsigned SrcSize = SrcTy.getScalarSizeInBits();
1457 switch (MI->getOpcode()) {
1458 default:
1459 if (DstSize <= SrcSize)
1460 report("Generic extend has destination type no larger than source", MI);
1461 break;
1462 case TargetOpcode::G_TRUNC:
1463 case TargetOpcode::G_TRUNC_SSAT_S:
1464 case TargetOpcode::G_TRUNC_SSAT_U:
1465 case TargetOpcode::G_TRUNC_USAT_U:
1466 case TargetOpcode::G_FPTRUNC:
1467 if (DstSize >= SrcSize)
1468 report("Generic truncate has destination type no smaller than source",
1469 MI);
1470 break;
1471 }
1472 break;
1473 }
1474 case TargetOpcode::G_SELECT: {
1475 LLT SelTy = MRI->getType(MI->getOperand(0).getReg());
1476 LLT CondTy = MRI->getType(MI->getOperand(1).getReg());
1477 if (!SelTy.isValid() || !CondTy.isValid())
1478 break;
1479
1480 // Scalar condition select on a vector is valid.
1481 if (CondTy.isVector())
1482 verifyVectorElementMatch(SelTy, CondTy, MI);
1483 break;
1484 }
1485 case TargetOpcode::G_MERGE_VALUES: {
1486 // G_MERGE_VALUES should only be used to merge scalars into a larger scalar,
1487 // e.g. s2N = MERGE sN, sN
1488 // Merging multiple scalars into a vector is not allowed, should use
1489 // G_BUILD_VECTOR for that.
1490 LLT DstTy = MRI->getType(MI->getOperand(0).getReg());
1491 LLT SrcTy = MRI->getType(MI->getOperand(1).getReg());
1492 if (DstTy.isVector() || SrcTy.isVector())
1493 report("G_MERGE_VALUES cannot operate on vectors", MI);
1494
1495 const unsigned NumOps = MI->getNumOperands();
1496 if (DstTy.getSizeInBits() != SrcTy.getSizeInBits() * (NumOps - 1))
1497 report("G_MERGE_VALUES result size is inconsistent", MI);
1498
1499 for (unsigned I = 2; I != NumOps; ++I) {
1500 if (MRI->getType(MI->getOperand(I).getReg()) != SrcTy)
1501 report("G_MERGE_VALUES source types do not match", MI);
1502 }
1503
1504 break;
1505 }
1506 case TargetOpcode::G_UNMERGE_VALUES: {
1507 unsigned NumDsts = MI->getNumOperands() - 1;
1508 LLT DstTy = MRI->getType(MI->getOperand(0).getReg());
1509 for (unsigned i = 1; i < NumDsts; ++i) {
1510 if (MRI->getType(MI->getOperand(i).getReg()) != DstTy) {
1511 report("G_UNMERGE_VALUES destination types do not match", MI);
1512 break;
1513 }
1514 }
1515
1516 LLT SrcTy = MRI->getType(MI->getOperand(NumDsts).getReg());
1517 if (DstTy.isVector()) {
1518 // This case is the converse of G_CONCAT_VECTORS.
1519 if (!SrcTy.isVector() ||
1520 (SrcTy.getScalarType() != DstTy.getScalarType() &&
1521 !SrcTy.isPointerVector()) ||
1522 SrcTy.isScalableVector() != DstTy.isScalableVector() ||
1523 SrcTy.getSizeInBits() != NumDsts * DstTy.getSizeInBits())
1524 report("G_UNMERGE_VALUES source operand does not match vector "
1525 "destination operands",
1526 MI);
1527 } else if (SrcTy.isVector()) {
1528 // This case is the converse of G_BUILD_VECTOR, but relaxed to allow
1529 // mismatched types as long as the total size matches:
1530 // %0:_(s64), %1:_(s64) = G_UNMERGE_VALUES %2:_(<4 x s32>)
1531 if (SrcTy.getSizeInBits() != NumDsts * DstTy.getSizeInBits())
1532 report("G_UNMERGE_VALUES vector source operand does not match scalar "
1533 "destination operands",
1534 MI);
1535 } else {
1536 // This case is the converse of G_MERGE_VALUES.
1537 if (SrcTy.getSizeInBits() != NumDsts * DstTy.getSizeInBits()) {
1538 report("G_UNMERGE_VALUES scalar source operand does not match scalar "
1539 "destination operands",
1540 MI);
1541 }
1542 }
1543 break;
1544 }
1545 case TargetOpcode::G_BUILD_VECTOR: {
1546 // Source types must be scalars, dest type a vector. Total size of scalars
1547 // must match the dest vector size.
1548 LLT DstTy = MRI->getType(MI->getOperand(0).getReg());
1549 LLT SrcEltTy = MRI->getType(MI->getOperand(1).getReg());
1550 if (!DstTy.isVector() || SrcEltTy.isVector()) {
1551 report("G_BUILD_VECTOR must produce a vector from scalar operands", MI);
1552 break;
1553 }
1554
1555 if (DstTy.getElementType() != SrcEltTy)
1556 report("G_BUILD_VECTOR result element type must match source type", MI);
1557
1558 if (DstTy.getNumElements() != MI->getNumOperands() - 1)
1559 report("G_BUILD_VECTOR must have an operand for each element", MI);
1560
1561 for (const MachineOperand &MO : llvm::drop_begin(MI->operands(), 2))
1562 if (MRI->getType(MI->getOperand(1).getReg()) != MRI->getType(MO.getReg()))
1563 report("G_BUILD_VECTOR source operand types are not homogeneous", MI);
1564
1565 break;
1566 }
1567 case TargetOpcode::G_BUILD_VECTOR_TRUNC: {
1568 // Source types must be scalars, dest type a vector. Scalar types must be
1569 // larger than the dest vector elt type, as this is a truncating operation.
1570 LLT DstTy = MRI->getType(MI->getOperand(0).getReg());
1571 LLT SrcEltTy = MRI->getType(MI->getOperand(1).getReg());
1572 if (!DstTy.isVector() || SrcEltTy.isVector())
1573 report("G_BUILD_VECTOR_TRUNC must produce a vector from scalar operands",
1574 MI);
1575 for (const MachineOperand &MO : llvm::drop_begin(MI->operands(), 2))
1576 if (MRI->getType(MI->getOperand(1).getReg()) != MRI->getType(MO.getReg()))
1577 report("G_BUILD_VECTOR_TRUNC source operand types are not homogeneous",
1578 MI);
1579 if (SrcEltTy.getSizeInBits() <= DstTy.getElementType().getSizeInBits())
1580 report("G_BUILD_VECTOR_TRUNC source operand types are not larger than "
1581 "dest elt type",
1582 MI);
1583 break;
1584 }
1585 case TargetOpcode::G_CONCAT_VECTORS: {
1586 // Source types should be vectors, and total size should match the dest
1587 // vector size.
1588 LLT DstTy = MRI->getType(MI->getOperand(0).getReg());
1589 LLT SrcTy = MRI->getType(MI->getOperand(1).getReg());
1590 if (!DstTy.isVector() || !SrcTy.isVector())
1591 report("G_CONCAT_VECTOR requires vector source and destination operands",
1592 MI);
1593
1594 if (MI->getNumOperands() < 3)
1595 report("G_CONCAT_VECTOR requires at least 2 source operands", MI);
1596
1597 for (const MachineOperand &MO : llvm::drop_begin(MI->operands(), 2))
1598 if (MRI->getType(MI->getOperand(1).getReg()) != MRI->getType(MO.getReg()))
1599 report("G_CONCAT_VECTOR source operand types are not homogeneous", MI);
1600 if (DstTy.getElementCount() !=
1601 SrcTy.getElementCount() * (MI->getNumOperands() - 1))
1602 report("G_CONCAT_VECTOR num dest and source elements should match", MI);
1603 break;
1604 }
1605 case TargetOpcode::G_ICMP:
1606 case TargetOpcode::G_FCMP: {
1607 LLT DstTy = MRI->getType(MI->getOperand(0).getReg());
1608 LLT SrcTy = MRI->getType(MI->getOperand(2).getReg());
1609
1610 if ((DstTy.isVector() != SrcTy.isVector()) ||
1611 (DstTy.isVector() &&
1612 DstTy.getElementCount() != SrcTy.getElementCount()))
1613 report("Generic vector icmp/fcmp must preserve number of lanes", MI);
1614
1615 break;
1616 }
1617 case TargetOpcode::G_SCMP:
1618 case TargetOpcode::G_UCMP: {
1619 LLT DstTy = MRI->getType(MI->getOperand(0).getReg());
1620 LLT SrcTy = MRI->getType(MI->getOperand(1).getReg());
1621
1622 if (SrcTy.isPointerOrPointerVector()) {
1623 report("Generic scmp/ucmp does not support pointers as operands", MI);
1624 break;
1625 }
1626
1627 if (DstTy.isPointerOrPointerVector()) {
1628 report("Generic scmp/ucmp does not support pointers as a result", MI);
1629 break;
1630 }
1631
1632 if (DstTy.getScalarSizeInBits() < 2) {
1633 report("Result type must be at least 2 bits wide", MI);
1634 break;
1635 }
1636
1637 if ((DstTy.isVector() != SrcTy.isVector()) ||
1638 (DstTy.isVector() &&
1639 DstTy.getElementCount() != SrcTy.getElementCount())) {
1640 report("Generic vector scmp/ucmp must preserve number of lanes", MI);
1641 break;
1642 }
1643
1644 break;
1645 }
1646 case TargetOpcode::G_EXTRACT: {
1647 const MachineOperand &SrcOp = MI->getOperand(1);
1648 if (!SrcOp.isReg()) {
1649 report("extract source must be a register", MI);
1650 break;
1651 }
1652
1653 const MachineOperand &OffsetOp = MI->getOperand(2);
1654 if (!OffsetOp.isImm()) {
1655 report("extract offset must be a constant", MI);
1656 break;
1657 }
1658
1659 unsigned DstSize = MRI->getType(MI->getOperand(0).getReg()).getSizeInBits();
1660 unsigned SrcSize = MRI->getType(SrcOp.getReg()).getSizeInBits();
1661 if (SrcSize == DstSize)
1662 report("extract source must be larger than result", MI);
1663
1664 if (DstSize + OffsetOp.getImm() > SrcSize)
1665 report("extract reads past end of register", MI);
1666 break;
1667 }
1668 case TargetOpcode::G_INSERT: {
1669 const MachineOperand &SrcOp = MI->getOperand(2);
1670 if (!SrcOp.isReg()) {
1671 report("insert source must be a register", MI);
1672 break;
1673 }
1674
1675 const MachineOperand &OffsetOp = MI->getOperand(3);
1676 if (!OffsetOp.isImm()) {
1677 report("insert offset must be a constant", MI);
1678 break;
1679 }
1680
1681 unsigned DstSize = MRI->getType(MI->getOperand(0).getReg()).getSizeInBits();
1682 unsigned SrcSize = MRI->getType(SrcOp.getReg()).getSizeInBits();
1683
1684 if (DstSize <= SrcSize)
1685 report("inserted size must be smaller than total register", MI);
1686
1687 if (SrcSize + OffsetOp.getImm() > DstSize)
1688 report("insert writes past end of register", MI);
1689
1690 break;
1691 }
1692 case TargetOpcode::G_JUMP_TABLE: {
1693 if (!MI->getOperand(1).isJTI())
1694 report("G_JUMP_TABLE source operand must be a jump table index", MI);
1695 LLT DstTy = MRI->getType(MI->getOperand(0).getReg());
1696 if (!DstTy.isPointer())
1697 report("G_JUMP_TABLE dest operand must have a pointer type", MI);
1698 break;
1699 }
1700 case TargetOpcode::G_BRJT: {
1701 if (!MRI->getType(MI->getOperand(0).getReg()).isPointer())
1702 report("G_BRJT src operand 0 must be a pointer type", MI);
1703
1704 if (!MI->getOperand(1).isJTI())
1705 report("G_BRJT src operand 1 must be a jump table index", MI);
1706
1707 const auto &IdxOp = MI->getOperand(2);
1708 if (!IdxOp.isReg() || MRI->getType(IdxOp.getReg()).isPointer())
1709 report("G_BRJT src operand 2 must be a scalar reg type", MI);
1710 break;
1711 }
1712 case TargetOpcode::G_INTRINSIC:
1713 case TargetOpcode::G_INTRINSIC_W_SIDE_EFFECTS:
1714 case TargetOpcode::G_INTRINSIC_CONVERGENT:
1715 case TargetOpcode::G_INTRINSIC_CONVERGENT_W_SIDE_EFFECTS: {
1716 // TODO: Should verify number of def and use operands, but the current
1717 // interface requires passing in IR types for mangling.
1718 const MachineOperand &IntrIDOp = MI->getOperand(MI->getNumExplicitDefs());
1719 if (!IntrIDOp.isIntrinsicID()) {
1720 report("G_INTRINSIC first src operand must be an intrinsic ID", MI);
1721 break;
1722 }
1723
1724 if (!verifyGIntrinsicSideEffects(MI))
1725 break;
1726 if (!verifyGIntrinsicConvergence(MI))
1727 break;
1728
1729 break;
1730 }
1731 case TargetOpcode::G_SEXT_INREG: {
1732 if (!MI->getOperand(2).isImm()) {
1733 report("G_SEXT_INREG expects an immediate operand #2", MI);
1734 break;
1735 }
1736
1737 LLT SrcTy = MRI->getType(MI->getOperand(1).getReg());
1738 int64_t Imm = MI->getOperand(2).getImm();
1739 if (Imm <= 0)
1740 report("G_SEXT_INREG size must be >= 1", MI);
1741 if (Imm >= SrcTy.getScalarSizeInBits())
1742 report("G_SEXT_INREG size must be less than source bit width", MI);
1743 break;
1744 }
1745 case TargetOpcode::G_BSWAP: {
1746 LLT DstTy = MRI->getType(MI->getOperand(0).getReg());
1747 if (DstTy.getScalarSizeInBits() % 16 != 0)
1748 report("G_BSWAP size must be a multiple of 16 bits", MI);
1749 break;
1750 }
1751 case TargetOpcode::G_VSCALE: {
1752 if (!MI->getOperand(1).isCImm()) {
1753 report("G_VSCALE operand must be cimm", MI);
1754 break;
1755 }
1756 if (MI->getOperand(1).getCImm()->isZero()) {
1757 report("G_VSCALE immediate cannot be zero", MI);
1758 break;
1759 }
1760 break;
1761 }
1762 case TargetOpcode::G_STEP_VECTOR: {
1763 if (!MI->getOperand(1).isCImm()) {
1764 report("operand must be cimm", MI);
1765 break;
1766 }
1767
1768 if (!MI->getOperand(1).getCImm()->getValue().isStrictlyPositive()) {
1769 report("step must be > 0", MI);
1770 break;
1771 }
1772
1773 LLT DstTy = MRI->getType(MI->getOperand(0).getReg());
1774 if (!DstTy.isScalableVector()) {
1775 report("Destination type must be a scalable vector", MI);
1776 break;
1777 }
1778
1779 // <vscale x 2 x p0>
1780 if (!DstTy.getElementType().isScalar()) {
1781 report("Destination element type must be scalar", MI);
1782 break;
1783 }
1784
1785 if (MI->getOperand(1).getCImm()->getBitWidth() !=
1787 report("step bitwidth differs from result type element bitwidth", MI);
1788 break;
1789 }
1790 break;
1791 }
1792 case TargetOpcode::G_INSERT_SUBVECTOR: {
1793 const MachineOperand &Src0Op = MI->getOperand(1);
1794 if (!Src0Op.isReg()) {
1795 report("G_INSERT_SUBVECTOR first source must be a register", MI);
1796 break;
1797 }
1798
1799 const MachineOperand &Src1Op = MI->getOperand(2);
1800 if (!Src1Op.isReg()) {
1801 report("G_INSERT_SUBVECTOR second source must be a register", MI);
1802 break;
1803 }
1804
1805 const MachineOperand &IndexOp = MI->getOperand(3);
1806 if (!IndexOp.isImm()) {
1807 report("G_INSERT_SUBVECTOR index must be an immediate", MI);
1808 break;
1809 }
1810
1811 LLT DstTy = MRI->getType(MI->getOperand(0).getReg());
1812 LLT Src1Ty = MRI->getType(Src1Op.getReg());
1813
1814 if (!DstTy.isVector()) {
1815 report("Destination type must be a vector", MI);
1816 break;
1817 }
1818
1819 if (!Src1Ty.isVector()) {
1820 report("Second source must be a vector", MI);
1821 break;
1822 }
1823
1824 if (DstTy.getElementType() != Src1Ty.getElementType()) {
1825 report("Element type of vectors must be the same", MI);
1826 break;
1827 }
1828
1829 if (Src1Ty.isScalable() != DstTy.isScalable()) {
1830 report("Vector types must both be fixed or both be scalable", MI);
1831 break;
1832 }
1833
1835 DstTy.getElementCount())) {
1836 report("Second source must be smaller than destination vector", MI);
1837 break;
1838 }
1839
1840 uint64_t Idx = IndexOp.getImm();
1841 uint64_t Src1MinLen = Src1Ty.getElementCount().getKnownMinValue();
1842 if (IndexOp.getImm() % Src1MinLen != 0) {
1843 report("Index must be a multiple of the second source vector's "
1844 "minimum vector length",
1845 MI);
1846 break;
1847 }
1848
1849 uint64_t DstMinLen = DstTy.getElementCount().getKnownMinValue();
1850 if (Idx >= DstMinLen || Idx + Src1MinLen > DstMinLen) {
1851 report("Subvector type and index must not cause insert to overrun the "
1852 "vector being inserted into",
1853 MI);
1854 break;
1855 }
1856
1857 break;
1858 }
1859 case TargetOpcode::G_EXTRACT_SUBVECTOR: {
1860 const MachineOperand &SrcOp = MI->getOperand(1);
1861 if (!SrcOp.isReg()) {
1862 report("G_EXTRACT_SUBVECTOR first source must be a register", MI);
1863 break;
1864 }
1865
1866 const MachineOperand &IndexOp = MI->getOperand(2);
1867 if (!IndexOp.isImm()) {
1868 report("G_EXTRACT_SUBVECTOR index must be an immediate", MI);
1869 break;
1870 }
1871
1872 LLT DstTy = MRI->getType(MI->getOperand(0).getReg());
1873 LLT SrcTy = MRI->getType(SrcOp.getReg());
1874
1875 if (!DstTy.isVector()) {
1876 report("Destination type must be a vector", MI);
1877 break;
1878 }
1879
1880 if (!SrcTy.isVector()) {
1881 report("Source must be a vector", MI);
1882 break;
1883 }
1884
1885 if (DstTy.getElementType() != SrcTy.getElementType()) {
1886 report("Element type of vectors must be the same", MI);
1887 break;
1888 }
1889
1890 if (SrcTy.isScalable() != DstTy.isScalable()) {
1891 report("Vector types must both be fixed or both be scalable", MI);
1892 break;
1893 }
1894
1896 SrcTy.getElementCount())) {
1897 report("Destination vector must be smaller than source vector", MI);
1898 break;
1899 }
1900
1901 uint64_t Idx = IndexOp.getImm();
1902 uint64_t DstMinLen = DstTy.getElementCount().getKnownMinValue();
1903 if (Idx % DstMinLen != 0) {
1904 report("Index must be a multiple of the destination vector's minimum "
1905 "vector length",
1906 MI);
1907 break;
1908 }
1909
1910 uint64_t SrcMinLen = SrcTy.getElementCount().getKnownMinValue();
1911 if (Idx >= SrcMinLen || Idx + DstMinLen > SrcMinLen) {
1912 report("Destination type and index must not cause extract to overrun the "
1913 "source vector",
1914 MI);
1915 break;
1916 }
1917
1918 break;
1919 }
1920 case TargetOpcode::G_SHUFFLE_VECTOR: {
1921 const MachineOperand &MaskOp = MI->getOperand(3);
1922 if (!MaskOp.isShuffleMask()) {
1923 report("Incorrect mask operand type for G_SHUFFLE_VECTOR", MI);
1924 break;
1925 }
1926
1927 LLT DstTy = MRI->getType(MI->getOperand(0).getReg());
1928 LLT Src0Ty = MRI->getType(MI->getOperand(1).getReg());
1929 LLT Src1Ty = MRI->getType(MI->getOperand(2).getReg());
1930
1931 if (Src0Ty != Src1Ty)
1932 report("Source operands must be the same type", MI);
1933
1934 if (Src0Ty.getScalarType() != DstTy.getScalarType()) {
1935 report("G_SHUFFLE_VECTOR cannot change element type", MI);
1936 break;
1937 }
1938 if (!Src0Ty.isVector()) {
1939 report("G_SHUFFLE_VECTOR must have vector src", MI);
1940 break;
1941 }
1942 if (!DstTy.isVector()) {
1943 report("G_SHUFFLE_VECTOR must have vector dst", MI);
1944 break;
1945 }
1946
1947 // Don't check that all operands are vector because scalars are used in
1948 // place of 1 element vectors.
1949 int SrcNumElts = Src0Ty.getNumElements();
1950 int DstNumElts = DstTy.getNumElements();
1951
1952 ArrayRef<int> MaskIdxes = MaskOp.getShuffleMask();
1953
1954 if (static_cast<int>(MaskIdxes.size()) != DstNumElts)
1955 report("Wrong result type for shufflemask", MI);
1956
1957 for (int Idx : MaskIdxes) {
1958 if (Idx < 0)
1959 continue;
1960
1961 if (Idx >= 2 * SrcNumElts)
1962 report("Out of bounds shuffle index", MI);
1963 }
1964
1965 break;
1966 }
1967
1968 case TargetOpcode::G_SPLAT_VECTOR: {
1969 LLT DstTy = MRI->getType(MI->getOperand(0).getReg());
1970 LLT SrcTy = MRI->getType(MI->getOperand(1).getReg());
1971
1972 if (!DstTy.isScalableVector()) {
1973 report("Destination type must be a scalable vector", MI);
1974 break;
1975 }
1976
1977 if (!SrcTy.isScalar() && !SrcTy.isPointer()) {
1978 report("Source type must be a scalar or pointer", MI);
1979 break;
1980 }
1981
1983 SrcTy.getSizeInBits())) {
1984 report("Element type of the destination must be the same size or smaller "
1985 "than the source type",
1986 MI);
1987 break;
1988 }
1989
1990 break;
1991 }
1992 case TargetOpcode::G_EXTRACT_VECTOR_ELT: {
1993 LLT DstTy = MRI->getType(MI->getOperand(0).getReg());
1994 LLT SrcTy = MRI->getType(MI->getOperand(1).getReg());
1995 LLT IdxTy = MRI->getType(MI->getOperand(2).getReg());
1996
1997 if (!DstTy.isScalar() && !DstTy.isPointer()) {
1998 report("Destination type must be a scalar or pointer", MI);
1999 break;
2000 }
2001
2002 if (!SrcTy.isVector()) {
2003 report("First source must be a vector", MI);
2004 break;
2005 }
2006
2007 auto TLI = MF->getSubtarget().getTargetLowering();
2008 if (IdxTy.getSizeInBits() != TLI->getVectorIdxWidth(MF->getDataLayout())) {
2009 report("Index type must match VectorIdxTy", MI);
2010 break;
2011 }
2012
2013 break;
2014 }
2015 case TargetOpcode::G_INSERT_VECTOR_ELT: {
2016 LLT DstTy = MRI->getType(MI->getOperand(0).getReg());
2017 LLT VecTy = MRI->getType(MI->getOperand(1).getReg());
2018 LLT ScaTy = MRI->getType(MI->getOperand(2).getReg());
2019 LLT IdxTy = MRI->getType(MI->getOperand(3).getReg());
2020
2021 if (!DstTy.isVector()) {
2022 report("Destination type must be a vector", MI);
2023 break;
2024 }
2025
2026 if (VecTy != DstTy) {
2027 report("Destination type and vector type must match", MI);
2028 break;
2029 }
2030
2031 if (!ScaTy.isScalar() && !ScaTy.isPointer()) {
2032 report("Inserted element must be a scalar or pointer", MI);
2033 break;
2034 }
2035
2036 auto TLI = MF->getSubtarget().getTargetLowering();
2037 if (IdxTy.getSizeInBits() != TLI->getVectorIdxWidth(MF->getDataLayout())) {
2038 report("Index type must match VectorIdxTy", MI);
2039 break;
2040 }
2041
2042 break;
2043 }
2044 case TargetOpcode::G_DYN_STACKALLOC: {
2045 const MachineOperand &DstOp = MI->getOperand(0);
2046 const MachineOperand &AllocOp = MI->getOperand(1);
2047 const MachineOperand &AlignOp = MI->getOperand(2);
2048
2049 if (!DstOp.isReg() || !MRI->getType(DstOp.getReg()).isPointer()) {
2050 report("dst operand 0 must be a pointer type", MI);
2051 break;
2052 }
2053
2054 if (!AllocOp.isReg() || !MRI->getType(AllocOp.getReg()).isScalar()) {
2055 report("src operand 1 must be a scalar reg type", MI);
2056 break;
2057 }
2058
2059 if (!AlignOp.isImm()) {
2060 report("src operand 2 must be an immediate type", MI);
2061 break;
2062 }
2063 break;
2064 }
2065 case TargetOpcode::G_MEMCPY_INLINE:
2066 case TargetOpcode::G_MEMCPY:
2067 case TargetOpcode::G_MEMMOVE: {
2068 ArrayRef<MachineMemOperand *> MMOs = MI->memoperands();
2069 if (MMOs.size() != 2) {
2070 report("memcpy/memmove must have 2 memory operands", MI);
2071 break;
2072 }
2073
2074 if ((!MMOs[0]->isStore() || MMOs[0]->isLoad()) ||
2075 (MMOs[1]->isStore() || !MMOs[1]->isLoad())) {
2076 report("wrong memory operand types", MI);
2077 break;
2078 }
2079
2080 if (MMOs[0]->getSize() != MMOs[1]->getSize())
2081 report("inconsistent memory operand sizes", MI);
2082
2083 LLT DstPtrTy = MRI->getType(MI->getOperand(0).getReg());
2084 LLT SrcPtrTy = MRI->getType(MI->getOperand(1).getReg());
2085
2086 if (!DstPtrTy.isPointer() || !SrcPtrTy.isPointer()) {
2087 report("memory instruction operand must be a pointer", MI);
2088 break;
2089 }
2090
2091 if (DstPtrTy.getAddressSpace() != MMOs[0]->getAddrSpace())
2092 report("inconsistent store address space", MI);
2093 if (SrcPtrTy.getAddressSpace() != MMOs[1]->getAddrSpace())
2094 report("inconsistent load address space", MI);
2095
2096 if (Opc != TargetOpcode::G_MEMCPY_INLINE)
2097 if (!MI->getOperand(3).isImm() || (MI->getOperand(3).getImm() & ~1LL))
2098 report("'tail' flag (operand 3) must be an immediate 0 or 1", MI);
2099
2100 break;
2101 }
2102 case TargetOpcode::G_BZERO:
2103 case TargetOpcode::G_MEMSET: {
2104 ArrayRef<MachineMemOperand *> MMOs = MI->memoperands();
2105 std::string Name = Opc == TargetOpcode::G_MEMSET ? "memset" : "bzero";
2106 if (MMOs.size() != 1) {
2107 report(Twine(Name, " must have 1 memory operand"), MI);
2108 break;
2109 }
2110
2111 if ((!MMOs[0]->isStore() || MMOs[0]->isLoad())) {
2112 report(Twine(Name, " memory operand must be a store"), MI);
2113 break;
2114 }
2115
2116 LLT DstPtrTy = MRI->getType(MI->getOperand(0).getReg());
2117 if (!DstPtrTy.isPointer()) {
2118 report(Twine(Name, " operand must be a pointer"), MI);
2119 break;
2120 }
2121
2122 if (DstPtrTy.getAddressSpace() != MMOs[0]->getAddrSpace())
2123 report("inconsistent " + Twine(Name, " address space"), MI);
2124
2125 if (!MI->getOperand(MI->getNumOperands() - 1).isImm() ||
2126 (MI->getOperand(MI->getNumOperands() - 1).getImm() & ~1LL))
2127 report("'tail' flag (last operand) must be an immediate 0 or 1", MI);
2128
2129 break;
2130 }
2131 case TargetOpcode::G_UBSANTRAP: {
2132 const MachineOperand &KindOp = MI->getOperand(0);
2133 if (!MI->getOperand(0).isImm()) {
2134 report("Crash kind must be an immediate", &KindOp, 0);
2135 break;
2136 }
2137 int64_t Kind = MI->getOperand(0).getImm();
2138 if (!isInt<8>(Kind))
2139 report("Crash kind must be 8 bit wide", &KindOp, 0);
2140 break;
2141 }
2142 case TargetOpcode::G_VECREDUCE_SEQ_FADD:
2143 case TargetOpcode::G_VECREDUCE_SEQ_FMUL: {
2144 LLT DstTy = MRI->getType(MI->getOperand(0).getReg());
2145 LLT Src1Ty = MRI->getType(MI->getOperand(1).getReg());
2146 LLT Src2Ty = MRI->getType(MI->getOperand(2).getReg());
2147 if (!DstTy.isScalar())
2148 report("Vector reduction requires a scalar destination type", MI);
2149 if (!Src1Ty.isScalar())
2150 report("Sequential FADD/FMUL vector reduction requires a scalar 1st operand", MI);
2151 if (!Src2Ty.isVector())
2152 report("Sequential FADD/FMUL vector reduction must have a vector 2nd operand", MI);
2153 break;
2154 }
2155 case TargetOpcode::G_VECREDUCE_FADD:
2156 case TargetOpcode::G_VECREDUCE_FMUL:
2157 case TargetOpcode::G_VECREDUCE_FMAX:
2158 case TargetOpcode::G_VECREDUCE_FMIN:
2159 case TargetOpcode::G_VECREDUCE_FMAXIMUM:
2160 case TargetOpcode::G_VECREDUCE_FMINIMUM:
2161 case TargetOpcode::G_VECREDUCE_ADD:
2162 case TargetOpcode::G_VECREDUCE_MUL:
2163 case TargetOpcode::G_VECREDUCE_AND:
2164 case TargetOpcode::G_VECREDUCE_OR:
2165 case TargetOpcode::G_VECREDUCE_XOR:
2166 case TargetOpcode::G_VECREDUCE_SMAX:
2167 case TargetOpcode::G_VECREDUCE_SMIN:
2168 case TargetOpcode::G_VECREDUCE_UMAX:
2169 case TargetOpcode::G_VECREDUCE_UMIN: {
2170 LLT DstTy = MRI->getType(MI->getOperand(0).getReg());
2171 if (!DstTy.isScalar())
2172 report("Vector reduction requires a scalar destination type", MI);
2173 break;
2174 }
2175
2176 case TargetOpcode::G_SBFX:
2177 case TargetOpcode::G_UBFX: {
2178 LLT DstTy = MRI->getType(MI->getOperand(0).getReg());
2179 if (DstTy.isVector()) {
2180 report("Bitfield extraction is not supported on vectors", MI);
2181 break;
2182 }
2183 break;
2184 }
2185 case TargetOpcode::G_SHL:
2186 case TargetOpcode::G_LSHR:
2187 case TargetOpcode::G_ASHR:
2188 case TargetOpcode::G_ROTR:
2189 case TargetOpcode::G_ROTL: {
2190 LLT Src1Ty = MRI->getType(MI->getOperand(1).getReg());
2191 LLT Src2Ty = MRI->getType(MI->getOperand(2).getReg());
2192 if (Src1Ty.isVector() != Src2Ty.isVector()) {
2193 report("Shifts and rotates require operands to be either all scalars or "
2194 "all vectors",
2195 MI);
2196 break;
2197 }
2198 break;
2199 }
2200 case TargetOpcode::G_LLROUND:
2201 case TargetOpcode::G_LROUND: {
2202 LLT DstTy = MRI->getType(MI->getOperand(0).getReg());
2203 LLT SrcTy = MRI->getType(MI->getOperand(1).getReg());
2204 if (!DstTy.isValid() || !SrcTy.isValid())
2205 break;
2206 if (SrcTy.isPointer() || DstTy.isPointer()) {
2207 StringRef Op = SrcTy.isPointer() ? "Source" : "Destination";
2208 report(Twine(Op, " operand must not be a pointer type"), MI);
2209 } else if (SrcTy.isScalar()) {
2210 verifyAllRegOpsScalar(*MI, *MRI);
2211 break;
2212 } else if (SrcTy.isVector()) {
2213 verifyVectorElementMatch(SrcTy, DstTy, MI);
2214 break;
2215 }
2216 break;
2217 }
2218 case TargetOpcode::G_IS_FPCLASS: {
2219 LLT DestTy = MRI->getType(MI->getOperand(0).getReg());
2220 LLT DestEltTy = DestTy.getScalarType();
2221 if (!DestEltTy.isScalar()) {
2222 report("Destination must be a scalar or vector of scalars", MI);
2223 break;
2224 }
2225 LLT SrcTy = MRI->getType(MI->getOperand(1).getReg());
2226 LLT SrcEltTy = SrcTy.getScalarType();
2227 if (!SrcEltTy.isScalar()) {
2228 report("Source must be a scalar or vector of scalars", MI);
2229 break;
2230 }
2231 if (!verifyVectorElementMatch(DestTy, SrcTy, MI))
2232 break;
2233 const MachineOperand &TestMO = MI->getOperand(2);
2234 if (!TestMO.isImm()) {
2235 report("floating-point class set (operand 2) must be an immediate", MI);
2236 break;
2237 }
2238 int64_t Test = TestMO.getImm();
2240 report("Incorrect floating-point class set (operand 2)", MI);
2241 break;
2242 }
2243 break;
2244 }
2245 case TargetOpcode::G_PREFETCH: {
2246 const MachineOperand &AddrOp = MI->getOperand(0);
2247 if (!AddrOp.isReg() || !MRI->getType(AddrOp.getReg()).isPointer()) {
2248 report("addr operand must be a pointer", &AddrOp, 0);
2249 break;
2250 }
2251 const MachineOperand &RWOp = MI->getOperand(1);
2252 if (!RWOp.isImm() || (uint64_t)RWOp.getImm() >= 2) {
2253 report("rw operand must be an immediate 0-1", &RWOp, 1);
2254 break;
2255 }
2256 const MachineOperand &LocalityOp = MI->getOperand(2);
2257 if (!LocalityOp.isImm() || (uint64_t)LocalityOp.getImm() >= 4) {
2258 report("locality operand must be an immediate 0-3", &LocalityOp, 2);
2259 break;
2260 }
2261 const MachineOperand &CacheTypeOp = MI->getOperand(3);
2262 if (!CacheTypeOp.isImm() || (uint64_t)CacheTypeOp.getImm() >= 2) {
2263 report("cache type operand must be an immediate 0-1", &CacheTypeOp, 3);
2264 break;
2265 }
2266 break;
2267 }
2268 case TargetOpcode::G_ASSERT_ALIGN: {
2269 if (MI->getOperand(2).getImm() < 1)
2270 report("alignment immediate must be >= 1", MI);
2271 break;
2272 }
2273 case TargetOpcode::G_CONSTANT_POOL: {
2274 if (!MI->getOperand(1).isCPI())
2275 report("Src operand 1 must be a constant pool index", MI);
2276 if (!MRI->getType(MI->getOperand(0).getReg()).isPointer())
2277 report("Dst operand 0 must be a pointer", MI);
2278 break;
2279 }
2280 case TargetOpcode::G_PTRAUTH_GLOBAL_VALUE: {
2281 const MachineOperand &AddrOp = MI->getOperand(1);
2282 if (!AddrOp.isReg() || !MRI->getType(AddrOp.getReg()).isPointer())
2283 report("addr operand must be a pointer", &AddrOp, 1);
2284 break;
2285 }
2286 case TargetOpcode::G_SMIN:
2287 case TargetOpcode::G_SMAX:
2288 case TargetOpcode::G_UMIN:
2289 case TargetOpcode::G_UMAX: {
2290 const LLT DstTy = MRI->getType(MI->getOperand(0).getReg());
2291 if (DstTy.isPointerOrPointerVector())
2292 report("Generic smin/smax/umin/umax does not support pointer operands",
2293 MI);
2294 break;
2295 }
2296 default:
2297 break;
2298 }
2299}
2300
2301void MachineVerifier::visitMachineInstrBefore(const MachineInstr *MI) {
2302 const MCInstrDesc &MCID = MI->getDesc();
2303 if (MI->getNumOperands() < MCID.getNumOperands()) {
2304 report("Too few operands", MI);
2305 OS << MCID.getNumOperands() << " operands expected, but "
2306 << MI->getNumOperands() << " given.\n";
2307 }
2308
2309 if (MI->getFlag(MachineInstr::NoConvergent) && !MCID.isConvergent())
2310 report("NoConvergent flag expected only on convergent instructions.", MI);
2311
2312 if (MI->isPHI()) {
2313 if (MF->getProperties().hasNoPHIs())
2314 report("Found PHI instruction with NoPHIs property set", MI);
2315
2316 if (FirstNonPHI)
2317 report("Found PHI instruction after non-PHI", MI);
2318 } else if (FirstNonPHI == nullptr)
2319 FirstNonPHI = MI;
2320
2321 // Check the tied operands.
2322 if (MI->isInlineAsm())
2323 verifyInlineAsm(MI);
2324
2325 // Check that unspillable terminators define a reg and have at most one use.
2326 if (TII->isUnspillableTerminator(MI)) {
2327 if (!MI->getOperand(0).isReg() || !MI->getOperand(0).isDef())
2328 report("Unspillable Terminator does not define a reg", MI);
2329 Register Def = MI->getOperand(0).getReg();
2330 if (Def.isVirtual() && !MF->getProperties().hasNoPHIs() &&
2331 std::distance(MRI->use_nodbg_begin(Def), MRI->use_nodbg_end()) > 1)
2332 report("Unspillable Terminator expected to have at most one use!", MI);
2333 }
2334
2335 // A fully-formed DBG_VALUE must have a location. Ignore partially formed
2336 // DBG_VALUEs: these are convenient to use in tests, but should never get
2337 // generated.
2338 if (MI->isDebugValue() && MI->getNumOperands() == 4)
2339 if (!MI->getDebugLoc())
2340 report("Missing DebugLoc for debug instruction", MI);
2341
2342 // Meta instructions should never be the subject of debug value tracking,
2343 // they don't create a value in the output program at all.
2344 if (MI->isMetaInstruction() && MI->peekDebugInstrNum())
2345 report("Metadata instruction should not have a value tracking number", MI);
2346
2347 // Check the MachineMemOperands for basic consistency.
2348 for (MachineMemOperand *Op : MI->memoperands()) {
2349 if (Op->isLoad() && !MI->mayLoad())
2350 report("Missing mayLoad flag", MI);
2351 if (Op->isStore() && !MI->mayStore())
2352 report("Missing mayStore flag", MI);
2353 }
2354
2355 // Debug values must not have a slot index.
2356 // Other instructions must have one, unless they are inside a bundle.
2357 if (LiveInts) {
2358 bool mapped = !LiveInts->isNotInMIMap(*MI);
2359 if (MI->isDebugOrPseudoInstr()) {
2360 if (mapped)
2361 report("Debug instruction has a slot index", MI);
2362 } else if (MI->isInsideBundle()) {
2363 if (mapped)
2364 report("Instruction inside bundle has a slot index", MI);
2365 } else {
2366 if (!mapped)
2367 report("Missing slot index", MI);
2368 }
2369 }
2370
2371 unsigned Opc = MCID.getOpcode();
2373 verifyPreISelGenericInstruction(MI);
2374 return;
2375 }
2376
2378 if (!TII->verifyInstruction(*MI, ErrorInfo))
2379 report(ErrorInfo.data(), MI);
2380
2381 // Verify properties of various specific instruction types
2382 switch (MI->getOpcode()) {
2383 case TargetOpcode::COPY: {
2384 const MachineOperand &DstOp = MI->getOperand(0);
2385 const MachineOperand &SrcOp = MI->getOperand(1);
2386 const Register SrcReg = SrcOp.getReg();
2387 const Register DstReg = DstOp.getReg();
2388
2389 LLT DstTy = MRI->getType(DstReg);
2390 LLT SrcTy = MRI->getType(SrcReg);
2391 if (SrcTy.isValid() && DstTy.isValid()) {
2392 // If both types are valid, check that the types are the same.
2393 if (SrcTy != DstTy) {
2394 report("Copy Instruction is illegal with mismatching types", MI);
2395 OS << "Def = " << DstTy << ", Src = " << SrcTy << '\n';
2396 }
2397
2398 break;
2399 }
2400
2401 if (!SrcTy.isValid() && !DstTy.isValid())
2402 break;
2403
2404 // If we have only one valid type, this is likely a copy between a virtual
2405 // and physical register.
2406 TypeSize SrcSize = TypeSize::getZero();
2407 TypeSize DstSize = TypeSize::getZero();
2408 if (SrcReg.isPhysical() && DstTy.isValid()) {
2409 const TargetRegisterClass *SrcRC =
2410 TRI->getMinimalPhysRegClassLLT(SrcReg, DstTy);
2411 if (!SrcRC)
2412 SrcSize = TRI->getRegSizeInBits(SrcReg, *MRI);
2413 } else {
2414 SrcSize = TRI->getRegSizeInBits(SrcReg, *MRI);
2415 }
2416
2417 if (DstReg.isPhysical() && SrcTy.isValid()) {
2418 const TargetRegisterClass *DstRC =
2419 TRI->getMinimalPhysRegClassLLT(DstReg, SrcTy);
2420 if (!DstRC)
2421 DstSize = TRI->getRegSizeInBits(DstReg, *MRI);
2422 } else {
2423 DstSize = TRI->getRegSizeInBits(DstReg, *MRI);
2424 }
2425
2426 // The next two checks allow COPY between physical and virtual registers,
2427 // when the virtual register has a scalable size and the physical register
2428 // has a fixed size. These checks allow COPY between *potentially*
2429 // mismatched sizes. However, once RegisterBankSelection occurs,
2430 // MachineVerifier should be able to resolve a fixed size for the scalable
2431 // vector, and at that point this function will know for sure whether the
2432 // sizes are mismatched and correctly report a size mismatch.
2433 if (SrcReg.isPhysical() && DstReg.isVirtual() && DstSize.isScalable() &&
2434 !SrcSize.isScalable())
2435 break;
2436 if (SrcReg.isVirtual() && DstReg.isPhysical() && SrcSize.isScalable() &&
2437 !DstSize.isScalable())
2438 break;
2439
2440 if (SrcSize.isNonZero() && DstSize.isNonZero() && SrcSize != DstSize) {
2441 if (!DstOp.getSubReg() && !SrcOp.getSubReg()) {
2442 report("Copy Instruction is illegal with mismatching sizes", MI);
2443 OS << "Def Size = " << DstSize << ", Src Size = " << SrcSize << '\n';
2444 }
2445 }
2446 break;
2447 }
2448 case TargetOpcode::COPY_LANEMASK: {
2449 const MachineOperand &DstOp = MI->getOperand(0);
2450 const MachineOperand &SrcOp = MI->getOperand(1);
2451 const MachineOperand &LaneMaskOp = MI->getOperand(2);
2452 const Register SrcReg = SrcOp.getReg();
2453 const LaneBitmask LaneMask = LaneMaskOp.getLaneMask();
2454 LaneBitmask SrcMaxLaneMask = LaneBitmask::getAll();
2455
2456 if (DstOp.getSubReg())
2457 report("COPY_LANEMASK must not use a subregister index", &DstOp, 0);
2458
2459 if (SrcOp.getSubReg())
2460 report("COPY_LANEMASK must not use a subregister index", &SrcOp, 1);
2461
2462 if (LaneMask.none())
2463 report("COPY_LANEMASK must read at least one lane", MI);
2464
2465 if (SrcReg.isPhysical()) {
2466 const TargetRegisterClass *SrcRC = TRI->getMinimalPhysRegClass(SrcReg);
2467 if (SrcRC)
2468 SrcMaxLaneMask = SrcRC->getLaneMask();
2469 } else {
2470 SrcMaxLaneMask = MRI->getMaxLaneMaskForVReg(SrcReg);
2471 }
2472
2473 // COPY_LANEMASK should be used only for partial copy. For full
2474 // copy, one should strictly use the COPY instruction.
2475 if (SrcMaxLaneMask == LaneMask)
2476 report("COPY_LANEMASK cannot be used to do full copy", MI);
2477
2478 // If LaneMask is greater than the SrcMaxLaneMask, it implies
2479 // COPY_LANEMASK is attempting to read from the lanes that
2480 // don't exists in the source register.
2481 if (SrcMaxLaneMask < LaneMask)
2482 report("COPY_LANEMASK attempts to read from the lanes that "
2483 "don't exist in the source register",
2484 MI);
2485
2486 break;
2487 }
2488 case TargetOpcode::STATEPOINT: {
2489 StatepointOpers SO(MI);
2490 if (!MI->getOperand(SO.getIDPos()).isImm() ||
2491 !MI->getOperand(SO.getNBytesPos()).isImm() ||
2492 !MI->getOperand(SO.getNCallArgsPos()).isImm()) {
2493 report("meta operands to STATEPOINT not constant!", MI);
2494 break;
2495 }
2496
2497 auto VerifyStackMapConstant = [&](unsigned Offset) {
2498 if (Offset >= MI->getNumOperands()) {
2499 report("stack map constant to STATEPOINT is out of range!", MI);
2500 return;
2501 }
2502 if (!MI->getOperand(Offset - 1).isImm() ||
2503 MI->getOperand(Offset - 1).getImm() != StackMaps::ConstantOp ||
2504 !MI->getOperand(Offset).isImm())
2505 report("stack map constant to STATEPOINT not well formed!", MI);
2506 };
2507 VerifyStackMapConstant(SO.getCCIdx());
2508 VerifyStackMapConstant(SO.getFlagsIdx());
2509 VerifyStackMapConstant(SO.getNumDeoptArgsIdx());
2510 VerifyStackMapConstant(SO.getNumGCPtrIdx());
2511 VerifyStackMapConstant(SO.getNumAllocaIdx());
2512 VerifyStackMapConstant(SO.getNumGcMapEntriesIdx());
2513
2514 // Verify that all explicit statepoint defs are tied to gc operands as
2515 // they are expected to be a relocation of gc operands.
2516 unsigned FirstGCPtrIdx = SO.getFirstGCPtrIdx();
2517 unsigned LastGCPtrIdx = SO.getNumAllocaIdx() - 2;
2518 for (unsigned Idx = 0; Idx < MI->getNumDefs(); Idx++) {
2519 unsigned UseOpIdx;
2520 if (!MI->isRegTiedToUseOperand(Idx, &UseOpIdx)) {
2521 report("STATEPOINT defs expected to be tied", MI);
2522 break;
2523 }
2524 if (UseOpIdx < FirstGCPtrIdx || UseOpIdx > LastGCPtrIdx) {
2525 report("STATEPOINT def tied to non-gc operand", MI);
2526 break;
2527 }
2528 }
2529
2530 // TODO: verify we have properly encoded deopt arguments
2531 } break;
2532 case TargetOpcode::INSERT_SUBREG: {
2533 unsigned InsertedSize;
2534 if (unsigned SubIdx = MI->getOperand(2).getSubReg())
2535 InsertedSize = TRI->getSubRegIdxSize(SubIdx);
2536 else
2537 InsertedSize = TRI->getRegSizeInBits(MI->getOperand(2).getReg(), *MRI);
2538 unsigned SubRegSize = TRI->getSubRegIdxSize(MI->getOperand(3).getImm());
2539 if (SubRegSize < InsertedSize) {
2540 report("INSERT_SUBREG expected inserted value to have equal or lesser "
2541 "size than the subreg it was inserted into", MI);
2542 break;
2543 }
2544 } break;
2545 case TargetOpcode::REG_SEQUENCE: {
2546 unsigned NumOps = MI->getNumOperands();
2547 if (!(NumOps & 1)) {
2548 report("Invalid number of operands for REG_SEQUENCE", MI);
2549 break;
2550 }
2551
2552 for (unsigned I = 1; I != NumOps; I += 2) {
2553 const MachineOperand &RegOp = MI->getOperand(I);
2554 const MachineOperand &SubRegOp = MI->getOperand(I + 1);
2555
2556 if (!RegOp.isReg())
2557 report("Invalid register operand for REG_SEQUENCE", &RegOp, I);
2558
2559 if (!SubRegOp.isImm() || SubRegOp.getImm() == 0 ||
2560 SubRegOp.getImm() >= TRI->getNumSubRegIndices()) {
2561 report("Invalid subregister index operand for REG_SEQUENCE",
2562 &SubRegOp, I + 1);
2563 }
2564 }
2565
2566 Register DstReg = MI->getOperand(0).getReg();
2567 if (DstReg.isPhysical())
2568 report("REG_SEQUENCE does not support physical register results", MI);
2569
2570 if (MI->getOperand(0).getSubReg())
2571 report("Invalid subreg result for REG_SEQUENCE", MI);
2572
2573 break;
2574 }
2575 }
2576}
2577
2578void
2579MachineVerifier::visitMachineOperand(const MachineOperand *MO, unsigned MONum) {
2580 const MachineInstr *MI = MO->getParent();
2581 const MCInstrDesc &MCID = MI->getDesc();
2582 unsigned NumDefs = MCID.getNumDefs();
2583 if (MCID.getOpcode() == TargetOpcode::PATCHPOINT)
2584 NumDefs = (MONum == 0 && MO->isReg()) ? NumDefs : 0;
2585
2586 // The first MCID.NumDefs operands must be explicit register defines
2587 if (MONum < NumDefs) {
2588 const MCOperandInfo &MCOI = MCID.operands()[MONum];
2589 if (!MO->isReg())
2590 report("Explicit definition must be a register", MO, MONum);
2591 else if (!MO->isDef() && !MCOI.isOptionalDef())
2592 report("Explicit definition marked as use", MO, MONum);
2593 else if (MO->isImplicit())
2594 report("Explicit definition marked as implicit", MO, MONum);
2595 } else if (MONum < MCID.getNumOperands()) {
2596 const MCOperandInfo &MCOI = MCID.operands()[MONum];
2597 // Don't check if it's the last operand in a variadic instruction. See,
2598 // e.g., LDM_RET in the arm back end. Check non-variadic operands only.
2599 bool IsOptional = MI->isVariadic() && MONum == MCID.getNumOperands() - 1;
2600 if (!IsOptional) {
2601 if (MO->isReg()) {
2602 if (MO->isDef() && !MCOI.isOptionalDef() && !MCID.variadicOpsAreDefs())
2603 report("Explicit operand marked as def", MO, MONum);
2604 if (MO->isImplicit())
2605 report("Explicit operand marked as implicit", MO, MONum);
2606 }
2607
2608 // Check that an instruction has register operands only as expected.
2609 if (MCOI.OperandType == MCOI::OPERAND_REGISTER &&
2610 !MO->isReg() && !MO->isFI())
2611 report("Expected a register operand.", MO, MONum);
2612 if (MO->isReg()) {
2613 if (MCOI.OperandType == MCOI::OPERAND_IMMEDIATE ||
2614 (MCOI.OperandType == MCOI::OPERAND_PCREL &&
2615 !TII->isPCRelRegisterOperandLegal(*MO)))
2616 report("Expected a non-register operand.", MO, MONum);
2617 }
2618 }
2619
2620 int TiedTo = MCID.getOperandConstraint(MONum, MCOI::TIED_TO);
2621 if (TiedTo != -1) {
2622 if (!MO->isReg())
2623 report("Tied use must be a register", MO, MONum);
2624 else if (!MO->isTied())
2625 report("Operand should be tied", MO, MONum);
2626 else if (unsigned(TiedTo) != MI->findTiedOperandIdx(MONum))
2627 report("Tied def doesn't match MCInstrDesc", MO, MONum);
2628 else if (MO->getReg().isPhysical()) {
2629 const MachineOperand &MOTied = MI->getOperand(TiedTo);
2630 if (!MOTied.isReg())
2631 report("Tied counterpart must be a register", &MOTied, TiedTo);
2632 else if (MOTied.getReg().isPhysical() &&
2633 MO->getReg() != MOTied.getReg())
2634 report("Tied physical registers must match.", &MOTied, TiedTo);
2635 }
2636 } else if (MO->isReg() && MO->isTied())
2637 report("Explicit operand should not be tied", MO, MONum);
2638 } else if (!MI->isVariadic()) {
2639 // ARM adds %reg0 operands to indicate predicates. We'll allow that.
2640 if (!MO->isValidExcessOperand())
2641 report("Extra explicit operand on non-variadic instruction", MO, MONum);
2642 }
2643
2644 // Verify earlyClobber def operand
2645 if (MCID.getOperandConstraint(MONum, MCOI::EARLY_CLOBBER) != -1) {
2646 if (!MO->isReg())
2647 report("Early clobber must be a register", MI);
2648 if (!MO->isEarlyClobber())
2649 report("Missing earlyClobber flag", MI);
2650 }
2651
2652 switch (MO->getType()) {
2654 // Verify debug flag on debug instructions. Check this first because reg0
2655 // indicates an undefined debug value.
2656 if (MI->isDebugInstr() && MO->isUse()) {
2657 if (!MO->isDebug())
2658 report("Register operand must be marked debug", MO, MONum);
2659 } else if (MO->isDebug()) {
2660 report("Register operand must not be marked debug", MO, MONum);
2661 }
2662
2663 const Register Reg = MO->getReg();
2664 if (!Reg)
2665 return;
2666 if (MRI->tracksLiveness() && !MI->isDebugInstr())
2667 checkLiveness(MO, MONum);
2668
2669 if (MO->isDef() && MO->isUndef() && !MO->getSubReg() &&
2670 MO->getReg().isVirtual()) // TODO: Apply to physregs too
2671 report("Undef virtual register def operands require a subregister", MO, MONum);
2672
2673 // Verify the consistency of tied operands.
2674 if (MO->isTied()) {
2675 unsigned OtherIdx = MI->findTiedOperandIdx(MONum);
2676 const MachineOperand &OtherMO = MI->getOperand(OtherIdx);
2677 if (!OtherMO.isReg())
2678 report("Must be tied to a register", MO, MONum);
2679 if (!OtherMO.isTied())
2680 report("Missing tie flags on tied operand", MO, MONum);
2681 if (MI->findTiedOperandIdx(OtherIdx) != MONum)
2682 report("Inconsistent tie links", MO, MONum);
2683 if (MONum < MCID.getNumDefs()) {
2684 if (OtherIdx < MCID.getNumOperands()) {
2685 if (-1 == MCID.getOperandConstraint(OtherIdx, MCOI::TIED_TO))
2686 report("Explicit def tied to explicit use without tie constraint",
2687 MO, MONum);
2688 } else {
2689 if (!OtherMO.isImplicit())
2690 report("Explicit def should be tied to implicit use", MO, MONum);
2691 }
2692 }
2693 }
2694
2695 // Verify two-address constraints after the twoaddressinstruction pass.
2696 // Both twoaddressinstruction pass and phi-node-elimination pass call
2697 // MRI->leaveSSA() to set MF as not IsSSA, we should do the verification
2698 // after twoaddressinstruction pass not after phi-node-elimination pass. So
2699 // we shouldn't use the IsSSA as the condition, we should based on
2700 // TiedOpsRewritten property to verify two-address constraints, this
2701 // property will be set in twoaddressinstruction pass.
2702 unsigned DefIdx;
2703 if (MF->getProperties().hasTiedOpsRewritten() && MO->isUse() &&
2704 MI->isRegTiedToDefOperand(MONum, &DefIdx) &&
2705 Reg != MI->getOperand(DefIdx).getReg())
2706 report("Two-address instruction operands must be identical", MO, MONum);
2707
2708 // Check register classes.
2709 unsigned SubIdx = MO->getSubReg();
2710
2711 if (Reg.isPhysical()) {
2712 if (SubIdx) {
2713 report("Illegal subregister index for physical register", MO, MONum);
2714 return;
2715 }
2716 if (MONum < MCID.getNumOperands()) {
2717 if (const TargetRegisterClass *DRC = TII->getRegClass(MCID, MONum)) {
2718 if (!DRC->contains(Reg)) {
2719 report("Illegal physical register for instruction", MO, MONum);
2720 OS << printReg(Reg, TRI) << " is not a "
2721 << TRI->getRegClassName(DRC) << " register.\n";
2722 }
2723 }
2724 }
2725 if (MO->isRenamable()) {
2726 if (MRI->isReserved(Reg)) {
2727 report("isRenamable set on reserved register", MO, MONum);
2728 return;
2729 }
2730 }
2731 } else {
2732 // Virtual register.
2733 const TargetRegisterClass *RC = MRI->getRegClassOrNull(Reg);
2734 if (!RC) {
2735 // This is a generic virtual register.
2736
2737 // Do not allow undef uses for generic virtual registers. This ensures
2738 // getVRegDef can never fail and return null on a generic register.
2739 //
2740 // FIXME: This restriction should probably be broadened to all SSA
2741 // MIR. However, DetectDeadLanes/ProcessImplicitDefs technically still
2742 // run on the SSA function just before phi elimination.
2743 if (MO->isUndef())
2744 report("Generic virtual register use cannot be undef", MO, MONum);
2745
2746 // Debug value instruction is permitted to use undefined vregs.
2747 // This is a performance measure to skip the overhead of immediately
2748 // pruning unused debug operands. The final undef substitution occurs
2749 // when debug values are allocated in LDVImpl::handleDebugValue, so
2750 // these verifications always apply after this pass.
2751 if (isFunctionTracksDebugUserValues || !MO->isUse() ||
2752 !MI->isDebugValue() || !MRI->def_empty(Reg)) {
2753 // If we're post-Select, we can't have gvregs anymore.
2754 if (isFunctionSelected) {
2755 report("Generic virtual register invalid in a Selected function",
2756 MO, MONum);
2757 return;
2758 }
2759
2760 // The gvreg must have a type and it must not have a SubIdx.
2761 LLT Ty = MRI->getType(Reg);
2762 if (!Ty.isValid()) {
2763 report("Generic virtual register must have a valid type", MO,
2764 MONum);
2765 return;
2766 }
2767
2768 const RegisterBank *RegBank = MRI->getRegBankOrNull(Reg);
2769 const RegisterBankInfo *RBI = MF->getSubtarget().getRegBankInfo();
2770
2771 // If we're post-RegBankSelect, the gvreg must have a bank.
2772 if (!RegBank && isFunctionRegBankSelected) {
2773 report("Generic virtual register must have a bank in a "
2774 "RegBankSelected function",
2775 MO, MONum);
2776 return;
2777 }
2778
2779 // Make sure the register fits into its register bank if any.
2780 if (RegBank && Ty.isValid() && !Ty.isScalableVector() &&
2781 RBI->getMaximumSize(RegBank->getID()) < Ty.getSizeInBits()) {
2782 report("Register bank is too small for virtual register", MO,
2783 MONum);
2784 OS << "Register bank " << RegBank->getName() << " too small("
2785 << RBI->getMaximumSize(RegBank->getID()) << ") to fit "
2786 << Ty.getSizeInBits() << "-bits\n";
2787 return;
2788 }
2789 }
2790
2791 if (SubIdx) {
2792 report("Generic virtual register does not allow subregister index", MO,
2793 MONum);
2794 return;
2795 }
2796
2797 // If this is a target specific instruction and this operand
2798 // has register class constraint, the virtual register must
2799 // comply to it.
2800 if (!isPreISelGenericOpcode(MCID.getOpcode()) &&
2801 MONum < MCID.getNumOperands() && TII->getRegClass(MCID, MONum)) {
2802 report("Virtual register does not match instruction constraint", MO,
2803 MONum);
2804 OS << "Expect register class "
2805 << TRI->getRegClassName(TII->getRegClass(MCID, MONum))
2806 << " but got nothing\n";
2807 return;
2808 }
2809
2810 break;
2811 }
2812 // Validate that SubIdx can be applied to the virtual register.
2813 if (!TRI->isSubRegValidForRegClass(RC, SubIdx)) {
2814 report("Invalid subregister index for virtual register", MO, MONum);
2815 OS << "Register class " << TRI->getRegClassName(RC)
2816 << " does not support subreg index "
2817 << TRI->getSubRegIndexName(SubIdx) << '\n';
2818 return;
2819 }
2820 if (MONum >= MCID.getNumOperands())
2821 break;
2822 const TargetRegisterClass *DRC = TII->getRegClass(MCID, MONum);
2823 if (!DRC)
2824 break;
2825
2826 // If SubIdx is used, verify that RC with SubIdx can be used for an
2827 // operand of class DRC. This is valid if for every register in RC, the
2828 // register obtained by applying SubIdx to it is in DRC.
2829 if (SubIdx && TRI->getMatchingSuperRegClass(RC, DRC, SubIdx) != RC) {
2830 report("Illegal virtual register for instruction", MO, MONum);
2831 OS << TRI->getRegClassName(RC) << "." << TRI->getSubRegIndexName(SubIdx)
2832 << " cannot be used for " << TRI->getRegClassName(DRC)
2833 << " operands.";
2834 }
2835
2836 // If no SubIdx is used, verify that RC is a sub-class of DRC.
2837 if (!SubIdx && !RC->hasSuperClassEq(DRC)) {
2838 report("Illegal virtual register for instruction", MO, MONum);
2839 OS << "Expected a " << TRI->getRegClassName(DRC)
2840 << " register, but got a " << TRI->getRegClassName(RC)
2841 << " register\n";
2842 }
2843 }
2844 break;
2845 }
2846
2848 regMasks.push_back(MO->getRegMask());
2849 break;
2850
2852 if (MI->isPHI() && !MO->getMBB()->isSuccessor(MI->getParent()))
2853 report("PHI operand is not in the CFG", MO, MONum);
2854 break;
2855
2857 if (LiveStks && LiveStks->hasInterval(MO->getIndex()) &&
2858 LiveInts && !LiveInts->isNotInMIMap(*MI)) {
2859 int FI = MO->getIndex();
2860 LiveInterval &LI = LiveStks->getInterval(FI);
2861 SlotIndex Idx = LiveInts->getInstructionIndex(*MI);
2862
2863 bool MayStore = MI->mayStore();
2864 bool MayLoad = MI->mayLoad();
2865 // For a memory-to-memory move, we need to check if the frame
2866 // index is used for storing or loading, by inspecting the
2867 // memory operands.
2868 if (MayStore && MayLoad) {
2869 for (const MachineMemOperand *MMO : MI->memoperands()) {
2871 MMO->getPseudoValue());
2872 if (!Value || Value->getFrameIndex() != FI)
2873 continue;
2874
2875 if (MMO->isStore())
2876 MayLoad = false;
2877 else
2878 MayStore = false;
2879 break;
2880 }
2881 if (MayLoad == MayStore)
2882 report("Missing fixed stack memoperand.", MI);
2883 }
2884 if (MayLoad && !LI.liveAt(Idx.getRegSlot(true))) {
2885 report("Instruction loads from dead spill slot", MO, MONum);
2886 OS << "Live stack: " << LI << '\n';
2887 }
2888 if (MayStore && !LI.liveAt(Idx.getRegSlot())) {
2889 report("Instruction stores to dead spill slot", MO, MONum);
2890 OS << "Live stack: " << LI << '\n';
2891 }
2892 }
2893 break;
2894
2896 if (MO->getCFIIndex() >= MF->getFrameInstructions().size())
2897 report("CFI instruction has invalid index", MO, MONum);
2898 break;
2899
2900 default:
2901 break;
2902 }
2903}
2904
2905void MachineVerifier::checkLivenessAtUse(const MachineOperand *MO,
2906 unsigned MONum, SlotIndex UseIdx,
2907 const LiveRange &LR,
2908 VirtRegOrUnit VRegOrUnit,
2909 LaneBitmask LaneMask) {
2910 const MachineInstr *MI = MO->getParent();
2911
2912 if (!LR.verify()) {
2913 report("invalid live range", MO, MONum);
2914 report_context_liverange(LR);
2915 report_context_vreg_regunit(VRegOrUnit);
2916 report_context(UseIdx);
2917 return;
2918 }
2919
2920 LiveQueryResult LRQ = LR.Query(UseIdx);
2921 bool HasValue = LRQ.valueIn() || (MI->isPHI() && LRQ.valueOut());
2922 // Check if we have a segment at the use, note however that we only need one
2923 // live subregister range, the others may be dead.
2924 if (!HasValue && LaneMask.none()) {
2925 report("No live segment at use", MO, MONum);
2926 report_context_liverange(LR);
2927 report_context_vreg_regunit(VRegOrUnit);
2928 report_context(UseIdx);
2929 }
2930 if (MO->isKill() && !LRQ.isKill()) {
2931 report("Live range continues after kill flag", MO, MONum);
2932 report_context_liverange(LR);
2933 report_context_vreg_regunit(VRegOrUnit);
2934 if (LaneMask.any())
2935 report_context_lanemask(LaneMask);
2936 report_context(UseIdx);
2937 }
2938}
2939
2940void MachineVerifier::checkLivenessAtDef(const MachineOperand *MO,
2941 unsigned MONum, SlotIndex DefIdx,
2942 const LiveRange &LR,
2943 VirtRegOrUnit VRegOrUnit,
2944 bool SubRangeCheck,
2945 LaneBitmask LaneMask) {
2946 if (!LR.verify()) {
2947 report("invalid live range", MO, MONum);
2948 report_context_liverange(LR);
2949 report_context_vreg_regunit(VRegOrUnit);
2950 if (LaneMask.any())
2951 report_context_lanemask(LaneMask);
2952 report_context(DefIdx);
2953 }
2954
2955 if (const VNInfo *VNI = LR.getVNInfoAt(DefIdx)) {
2956 // The LR can correspond to the whole reg and its def slot is not obliged
2957 // to be the same as the MO' def slot. E.g. when we check here "normal"
2958 // subreg MO but there is other EC subreg MO in the same instruction so the
2959 // whole reg has EC def slot and differs from the currently checked MO' def
2960 // slot. For example:
2961 // %0 [16e,32r:0) 0@16e L..3 [16e,32r:0) 0@16e L..C [16r,32r:0) 0@16r
2962 // Check that there is an early-clobber def of the same superregister
2963 // somewhere is performed in visitMachineFunctionAfter()
2964 if (((SubRangeCheck || MO->getSubReg() == 0) && VNI->def != DefIdx) ||
2965 !SlotIndex::isSameInstr(VNI->def, DefIdx) ||
2966 (VNI->def != DefIdx &&
2967 (!VNI->def.isEarlyClobber() || !DefIdx.isRegister()))) {
2968 report("Inconsistent valno->def", MO, MONum);
2969 report_context_liverange(LR);
2970 report_context_vreg_regunit(VRegOrUnit);
2971 if (LaneMask.any())
2972 report_context_lanemask(LaneMask);
2973 report_context(*VNI);
2974 report_context(DefIdx);
2975 }
2976 } else {
2977 report("No live segment at def", MO, MONum);
2978 report_context_liverange(LR);
2979 report_context_vreg_regunit(VRegOrUnit);
2980 if (LaneMask.any())
2981 report_context_lanemask(LaneMask);
2982 report_context(DefIdx);
2983 }
2984 // Check that, if the dead def flag is present, LiveInts agree.
2985 if (MO->isDead()) {
2986 LiveQueryResult LRQ = LR.Query(DefIdx);
2987 if (!LRQ.isDeadDef()) {
2988 assert(VRegOrUnit.isVirtualReg() && "Expecting a virtual register.");
2989 // A dead subreg def only tells us that the specific subreg is dead. There
2990 // could be other non-dead defs of other subregs, or we could have other
2991 // parts of the register being live through the instruction. So unless we
2992 // are checking liveness for a subrange it is ok for the live range to
2993 // continue, given that we have a dead def of a subregister.
2994 if (SubRangeCheck || MO->getSubReg() == 0) {
2995 report("Live range continues after dead def flag", MO, MONum);
2996 report_context_liverange(LR);
2997 report_context_vreg_regunit(VRegOrUnit);
2998 if (LaneMask.any())
2999 report_context_lanemask(LaneMask);
3000 }
3001 }
3002 }
3003}
3004
3005void MachineVerifier::checkLiveness(const MachineOperand *MO, unsigned MONum) {
3006 const MachineInstr *MI = MO->getParent();
3007 const Register Reg = MO->getReg();
3008 const unsigned SubRegIdx = MO->getSubReg();
3009
3010 const LiveInterval *LI = nullptr;
3011 if (LiveInts && Reg.isVirtual()) {
3012 if (LiveInts->hasInterval(Reg)) {
3013 LI = &LiveInts->getInterval(Reg);
3014 if (SubRegIdx != 0 && (MO->isDef() || !MO->isUndef()) && !LI->empty() &&
3016 report("Live interval for subreg operand has no subranges", MO, MONum);
3017 } else {
3018 report("Virtual register has no live interval", MO, MONum);
3019 }
3020 }
3021
3022 // Both use and def operands can read a register.
3023 if (MO->readsReg()) {
3024 if (MO->isKill())
3025 addRegWithSubRegs(regsKilled, Reg);
3026
3027 // Check that LiveVars knows this kill (unless we are inside a bundle, in
3028 // which case we have already checked that LiveVars knows any kills on the
3029 // bundle header instead).
3030 if (LiveVars && Reg.isVirtual() && MO->isKill() &&
3031 !MI->isBundledWithPred()) {
3033 if (!is_contained(VI.Kills, MI))
3034 report("Kill missing from LiveVariables", MO, MONum);
3035 }
3036
3037 // Check LiveInts liveness and kill.
3038 if (LiveInts && !LiveInts->isNotInMIMap(*MI)) {
3039 SlotIndex UseIdx;
3040 if (MI->isPHI()) {
3041 // PHI use occurs on the edge, so check for live out here instead.
3042 UseIdx = LiveInts->getMBBEndIdx(
3043 MI->getOperand(MONum + 1).getMBB()).getPrevSlot();
3044 } else {
3045 UseIdx = LiveInts->getInstructionIndex(*MI);
3046 }
3047 // Check the cached regunit intervals.
3048 if (Reg.isPhysical() && !isReserved(Reg)) {
3049 for (MCRegUnit Unit : TRI->regunits(Reg.asMCReg())) {
3050 if (MRI->isReservedRegUnit(Unit))
3051 continue;
3052 if (const LiveRange *LR = LiveInts->getCachedRegUnit(Unit))
3053 checkLivenessAtUse(MO, MONum, UseIdx, *LR, VirtRegOrUnit(Unit));
3054 }
3055 }
3056
3057 if (Reg.isVirtual()) {
3058 // This is a virtual register interval.
3059 checkLivenessAtUse(MO, MONum, UseIdx, *LI, VirtRegOrUnit(Reg));
3060
3061 if (LI->hasSubRanges() && !MO->isDef()) {
3062 LaneBitmask MOMask = SubRegIdx != 0
3063 ? TRI->getSubRegIndexLaneMask(SubRegIdx)
3064 : MRI->getMaxLaneMaskForVReg(Reg);
3065 LaneBitmask LiveInMask;
3066 for (const LiveInterval::SubRange &SR : LI->subranges()) {
3067 if ((MOMask & SR.LaneMask).none())
3068 continue;
3069 checkLivenessAtUse(MO, MONum, UseIdx, SR, VirtRegOrUnit(Reg),
3070 SR.LaneMask);
3071 LiveQueryResult LRQ = SR.Query(UseIdx);
3072 if (LRQ.valueIn() || (MI->isPHI() && LRQ.valueOut()))
3073 LiveInMask |= SR.LaneMask;
3074 }
3075 // At least parts of the register has to be live at the use.
3076 if ((LiveInMask & MOMask).none()) {
3077 report("No live subrange at use", MO, MONum);
3078 report_context(*LI);
3079 report_context(UseIdx);
3080 }
3081 // For PHIs all lanes should be live
3082 if (MI->isPHI() && LiveInMask != MOMask) {
3083 report("Not all lanes of PHI source live at use", MO, MONum);
3084 report_context(*LI);
3085 report_context(UseIdx);
3086 }
3087 }
3088 }
3089 }
3090
3091 // Use of a dead register.
3092 if (!regsLive.count(Reg)) {
3093 if (Reg.isPhysical()) {
3094 // Reserved registers may be used even when 'dead'.
3095 bool Bad = !isReserved(Reg);
3096 // We are fine if just any subregister has a defined value.
3097 if (Bad) {
3098
3099 for (const MCPhysReg &SubReg : TRI->subregs(Reg)) {
3100 if (regsLive.count(SubReg)) {
3101 Bad = false;
3102 break;
3103 }
3104 }
3105 }
3106 // If there is an additional implicit-use of a super register we stop
3107 // here. By definition we are fine if the super register is not
3108 // (completely) dead, if the complete super register is dead we will
3109 // get a report for its operand.
3110 if (Bad) {
3111 for (const MachineOperand &MOP : MI->uses()) {
3112 if (!MOP.isReg() || !MOP.isImplicit())
3113 continue;
3114
3115 if (!MOP.getReg().isPhysical())
3116 continue;
3117
3118 if (MOP.getReg() != Reg &&
3119 all_of(TRI->regunits(Reg), [&](const MCRegUnit RegUnit) {
3120 return llvm::is_contained(TRI->regunits(MOP.getReg()),
3121 RegUnit);
3122 }))
3123 Bad = false;
3124 }
3125 }
3126 if (Bad)
3127 report("Using an undefined physical register", MO, MONum);
3128 } else if (MRI->def_empty(Reg)) {
3129 report("Reading virtual register without a def", MO, MONum);
3130 } else {
3131 BBInfo &MInfo = MBBInfoMap[MI->getParent()];
3132 // We don't know which virtual registers are live in, so only complain
3133 // if vreg was killed in this MBB. Otherwise keep track of vregs that
3134 // must be live in. PHI instructions are handled separately.
3135 if (MInfo.regsKilled.count(Reg))
3136 report("Using a killed virtual register", MO, MONum);
3137 else if (!MI->isPHI())
3138 MInfo.vregsLiveIn.insert(std::make_pair(Reg, MI));
3139 }
3140 }
3141 }
3142
3143 if (MO->isDef()) {
3144 // Register defined.
3145 // TODO: verify that earlyclobber ops are not used.
3146 if (MO->isDead())
3147 addRegWithSubRegs(regsDead, Reg);
3148 else
3149 addRegWithSubRegs(regsDefined, Reg);
3150
3151 // Verify SSA form.
3152 if (MRI->isSSA() && Reg.isVirtual()) {
3153 if (!MRI->hasOneDef(Reg))
3154 report("Multiple virtual register defs in SSA form", MO, MONum);
3155 if (MO->getSubReg())
3156 report("Subreg def in SSA form", MO, MONum);
3157 }
3158
3159 // Check LiveInts for a live segment, but only for virtual registers.
3160 if (LiveInts && !LiveInts->isNotInMIMap(*MI)) {
3161 SlotIndex DefIdx = LiveInts->getInstructionIndex(*MI);
3162 DefIdx = DefIdx.getRegSlot(MO->isEarlyClobber());
3163
3164 if (Reg.isVirtual()) {
3165 checkLivenessAtDef(MO, MONum, DefIdx, *LI, VirtRegOrUnit(Reg));
3166
3167 if (LI->hasSubRanges()) {
3168 LaneBitmask MOMask = SubRegIdx != 0
3169 ? TRI->getSubRegIndexLaneMask(SubRegIdx)
3170 : MRI->getMaxLaneMaskForVReg(Reg);
3171 for (const LiveInterval::SubRange &SR : LI->subranges()) {
3172 if ((SR.LaneMask & MOMask).none())
3173 continue;
3174 checkLivenessAtDef(MO, MONum, DefIdx, SR, VirtRegOrUnit(Reg), true,
3175 SR.LaneMask);
3176 }
3177 }
3178 }
3179 }
3180 }
3181}
3182
3183// This function gets called after visiting all instructions in a bundle. The
3184// argument points to the bundle header.
3185// Normal stand-alone instructions are also considered 'bundles', and this
3186// function is called for all of them.
3187void MachineVerifier::visitMachineBundleAfter(const MachineInstr *MI) {
3188 BBInfo &MInfo = MBBInfoMap[MI->getParent()];
3189 set_union(MInfo.regsKilled, regsKilled);
3190 set_subtract(regsLive, regsKilled); regsKilled.clear();
3191 // Kill any masked registers.
3192 while (!regMasks.empty()) {
3193 const uint32_t *Mask = regMasks.pop_back_val();
3194 for (Register Reg : regsLive)
3195 if (Reg.isPhysical() &&
3197 regsDead.push_back(Reg);
3198 }
3199 set_subtract(regsLive, regsDead); regsDead.clear();
3200 set_union(regsLive, regsDefined); regsDefined.clear();
3201}
3202
3203void
3204MachineVerifier::visitMachineBasicBlockAfter(const MachineBasicBlock *MBB) {
3205 MBBInfoMap[MBB].regsLiveOut = regsLive;
3206 regsLive.clear();
3207
3208 if (Indexes) {
3209 SlotIndex stop = Indexes->getMBBEndIdx(MBB);
3210 if (!(stop > lastIndex)) {
3211 report("Block ends before last instruction index", MBB);
3212 OS << "Block ends at " << stop << " last instruction was at " << lastIndex
3213 << '\n';
3214 }
3215 lastIndex = stop;
3216 }
3217}
3218
3219namespace {
3220// This implements a set of registers that serves as a filter: can filter other
3221// sets by passing through elements not in the filter and blocking those that
3222// are. Any filter implicitly includes the full set of physical registers upon
3223// creation, thus filtering them all out. The filter itself as a set only grows,
3224// and needs to be as efficient as possible.
3225struct VRegFilter {
3226 // Add elements to the filter itself. \pre Input set \p FromRegSet must have
3227 // no duplicates. Both virtual and physical registers are fine.
3228 template <typename RegSetT> void add(const RegSetT &FromRegSet) {
3229 SmallVector<Register, 0> VRegsBuffer;
3230 filterAndAdd(FromRegSet, VRegsBuffer);
3231 }
3232 // Filter \p FromRegSet through the filter and append passed elements into \p
3233 // ToVRegs. All elements appended are then added to the filter itself.
3234 // \returns true if anything changed.
3235 template <typename RegSetT>
3236 bool filterAndAdd(const RegSetT &FromRegSet,
3237 SmallVectorImpl<Register> &ToVRegs) {
3238 unsigned SparseUniverse = Sparse.size();
3239 unsigned NewSparseUniverse = SparseUniverse;
3240 unsigned NewDenseSize = Dense.size();
3241 size_t Begin = ToVRegs.size();
3242 for (Register Reg : FromRegSet) {
3243 if (!Reg.isVirtual())
3244 continue;
3245 unsigned Index = Reg.virtRegIndex();
3246 if (Index < SparseUniverseMax) {
3247 if (Index < SparseUniverse && Sparse.test(Index))
3248 continue;
3249 NewSparseUniverse = std::max(NewSparseUniverse, Index + 1);
3250 } else {
3251 if (Dense.count(Reg))
3252 continue;
3253 ++NewDenseSize;
3254 }
3255 ToVRegs.push_back(Reg);
3256 }
3257 size_t End = ToVRegs.size();
3258 if (Begin == End)
3259 return false;
3260 // Reserving space in sets once performs better than doing so continuously
3261 // and pays easily for double look-ups (even in Dense with SparseUniverseMax
3262 // tuned all the way down) and double iteration (the second one is over a
3263 // SmallVector, which is a lot cheaper compared to DenseSet or BitVector).
3264 Sparse.resize(NewSparseUniverse);
3265 Dense.reserve(NewDenseSize);
3266 for (unsigned I = Begin; I < End; ++I) {
3267 Register Reg = ToVRegs[I];
3268 unsigned Index = Reg.virtRegIndex();
3269 if (Index < SparseUniverseMax)
3270 Sparse.set(Index);
3271 else
3272 Dense.insert(Reg);
3273 }
3274 return true;
3275 }
3276
3277private:
3278 static constexpr unsigned SparseUniverseMax = 10 * 1024 * 8;
3279 // VRegs indexed within SparseUniverseMax are tracked by Sparse, those beyond
3280 // are tracked by Dense. The only purpose of the threshold and the Dense set
3281 // is to have a reasonably growing memory usage in pathological cases (large
3282 // number of very sparse VRegFilter instances live at the same time). In
3283 // practice even in the worst-by-execution time cases having all elements
3284 // tracked by Sparse (very large SparseUniverseMax scenario) tends to be more
3285 // space efficient than if tracked by Dense. The threshold is set to keep the
3286 // worst-case memory usage within 2x of figures determined empirically for
3287 // "all Dense" scenario in such worst-by-execution-time cases.
3288 BitVector Sparse;
3289 DenseSet<Register> Dense;
3290};
3291
3292// Implements both a transfer function and a (binary, in-place) join operator
3293// for a dataflow over register sets with set union join and filtering transfer
3294// (out_b = in_b \ filter_b). filter_b is expected to be set-up ahead of time.
3295// Maintains out_b as its state, allowing for O(n) iteration over it at any
3296// time, where n is the size of the set (as opposed to O(U) where U is the
3297// universe). filter_b implicitly contains all physical registers at all times.
3298class FilteringVRegSet {
3299 VRegFilter Filter;
3301
3302public:
3303 // Set-up the filter_b. \pre Input register set \p RS must have no duplicates.
3304 // Both virtual and physical registers are fine.
3305 template <typename RegSetT> void addToFilter(const RegSetT &RS) {
3306 Filter.add(RS);
3307 }
3308 // Passes \p RS through the filter_b (transfer function) and adds what's left
3309 // to itself (out_b).
3310 template <typename RegSetT> bool add(const RegSetT &RS) {
3311 // Double-duty the Filter: to maintain VRegs a set (and the join operation
3312 // a set union) just add everything being added here to the Filter as well.
3313 return Filter.filterAndAdd(RS, VRegs);
3314 }
3315 using const_iterator = decltype(VRegs)::const_iterator;
3316 const_iterator begin() const { return VRegs.begin(); }
3317 const_iterator end() const { return VRegs.end(); }
3318 size_t size() const { return VRegs.size(); }
3319};
3320} // namespace
3321
3322// Calculate the largest possible vregsPassed sets. These are the registers that
3323// can pass through an MBB live, but may not be live every time. It is assumed
3324// that all vregsPassed sets are empty before the call.
3325void MachineVerifier::calcRegsPassed() {
3326 if (MF->empty())
3327 // ReversePostOrderTraversal doesn't handle empty functions.
3328 return;
3329
3330 for (const MachineBasicBlock *MB :
3332 FilteringVRegSet VRegs;
3333 BBInfo &Info = MBBInfoMap[MB];
3334 assert(Info.reachable);
3335
3336 VRegs.addToFilter(Info.regsKilled);
3337 VRegs.addToFilter(Info.regsLiveOut);
3338 for (const MachineBasicBlock *Pred : MB->predecessors()) {
3339 const BBInfo &PredInfo = MBBInfoMap[Pred];
3340 if (!PredInfo.reachable)
3341 continue;
3342
3343 VRegs.add(PredInfo.regsLiveOut);
3344 VRegs.add(PredInfo.vregsPassed);
3345 }
3346 Info.vregsPassed.reserve(VRegs.size());
3347 Info.vregsPassed.insert_range(VRegs);
3348 }
3349}
3350
3351// Calculate the set of virtual registers that must be passed through each basic
3352// block in order to satisfy the requirements of successor blocks. This is very
3353// similar to calcRegsPassed, only backwards.
3354void MachineVerifier::calcRegsRequired() {
3355 // First push live-in regs to predecessors' vregsRequired.
3357 for (const auto &MBB : *MF) {
3358 BBInfo &MInfo = MBBInfoMap[&MBB];
3359 for (const MachineBasicBlock *Pred : MBB.predecessors()) {
3360 BBInfo &PInfo = MBBInfoMap[Pred];
3361 if (PInfo.addRequired(MInfo.vregsLiveIn))
3362 todo.insert(Pred);
3363 }
3364
3365 // Handle the PHI node.
3366 for (const MachineInstr &MI : MBB.phis()) {
3367 for (unsigned i = 1, e = MI.getNumOperands(); i != e; i += 2) {
3368 // Skip those Operands which are undef regs or not regs.
3369 if (!MI.getOperand(i).isReg() || !MI.getOperand(i).readsReg())
3370 continue;
3371
3372 // Get register and predecessor for one PHI edge.
3373 Register Reg = MI.getOperand(i).getReg();
3374 const MachineBasicBlock *Pred = MI.getOperand(i + 1).getMBB();
3375
3376 BBInfo &PInfo = MBBInfoMap[Pred];
3377 if (PInfo.addRequired(Reg))
3378 todo.insert(Pred);
3379 }
3380 }
3381 }
3382
3383 // Iteratively push vregsRequired to predecessors. This will converge to the
3384 // same final state regardless of DenseSet iteration order.
3385 while (!todo.empty()) {
3386 const MachineBasicBlock *MBB = *todo.begin();
3387 todo.erase(MBB);
3388 BBInfo &MInfo = MBBInfoMap[MBB];
3389 for (const MachineBasicBlock *Pred : MBB->predecessors()) {
3390 if (Pred == MBB)
3391 continue;
3392 BBInfo &SInfo = MBBInfoMap[Pred];
3393 if (SInfo.addRequired(MInfo.vregsRequired))
3394 todo.insert(Pred);
3395 }
3396 }
3397}
3398
3399// Check PHI instructions at the beginning of MBB. It is assumed that
3400// calcRegsPassed has been run so BBInfo::isLiveOut is valid.
3401void MachineVerifier::checkPHIOps(const MachineBasicBlock &MBB) {
3402 BBInfo &MInfo = MBBInfoMap[&MBB];
3403
3405 for (const MachineInstr &Phi : MBB) {
3406 if (!Phi.isPHI())
3407 break;
3408 seen.clear();
3409
3410 const MachineOperand &MODef = Phi.getOperand(0);
3411 if (!MODef.isReg() || !MODef.isDef()) {
3412 report("Expected first PHI operand to be a register def", &MODef, 0);
3413 continue;
3414 }
3415 if (MODef.isTied() || MODef.isImplicit() || MODef.isInternalRead() ||
3416 MODef.isEarlyClobber() || MODef.isDebug())
3417 report("Unexpected flag on PHI operand", &MODef, 0);
3418 Register DefReg = MODef.getReg();
3419 if (!DefReg.isVirtual())
3420 report("Expected first PHI operand to be a virtual register", &MODef, 0);
3421
3422 for (unsigned I = 1, E = Phi.getNumOperands(); I != E; I += 2) {
3423 const MachineOperand &MO0 = Phi.getOperand(I);
3424 if (!MO0.isReg()) {
3425 report("Expected PHI operand to be a register", &MO0, I);
3426 continue;
3427 }
3428 if (MO0.isImplicit() || MO0.isInternalRead() || MO0.isEarlyClobber() ||
3429 MO0.isDebug() || MO0.isTied())
3430 report("Unexpected flag on PHI operand", &MO0, I);
3431
3432 const MachineOperand &MO1 = Phi.getOperand(I + 1);
3433 if (!MO1.isMBB()) {
3434 report("Expected PHI operand to be a basic block", &MO1, I + 1);
3435 continue;
3436 }
3437
3438 const MachineBasicBlock &Pre = *MO1.getMBB();
3439 if (!Pre.isSuccessor(&MBB)) {
3440 report("PHI input is not a predecessor block", &MO1, I + 1);
3441 continue;
3442 }
3443
3444 if (MInfo.reachable) {
3445 seen.insert(&Pre);
3446 BBInfo &PrInfo = MBBInfoMap[&Pre];
3447 if (!MO0.isUndef() && PrInfo.reachable &&
3448 !PrInfo.isLiveOut(MO0.getReg()))
3449 report("PHI operand is not live-out from predecessor", &MO0, I);
3450 }
3451 }
3452
3453 // Did we see all predecessors?
3454 if (MInfo.reachable) {
3455 for (MachineBasicBlock *Pred : MBB.predecessors()) {
3456 if (!seen.count(Pred)) {
3457 report("Missing PHI operand", &Phi);
3458 OS << printMBBReference(*Pred)
3459 << " is a predecessor according to the CFG.\n";
3460 }
3461 }
3462 }
3463 }
3464}
3465
3466static void
3468 std::function<void(const Twine &Message)> FailureCB,
3469 raw_ostream &OS) {
3471 CV.initialize(&OS, FailureCB, MF);
3472
3473 for (const auto &MBB : MF) {
3474 CV.visit(MBB);
3475 for (const auto &MI : MBB.instrs())
3476 CV.visit(MI);
3477 }
3478
3479 if (CV.sawTokens()) {
3480 DT.recalculate(const_cast<MachineFunction &>(MF));
3481 CV.verify(DT);
3482 }
3483}
3484
3485void MachineVerifier::visitMachineFunctionAfter() {
3486 auto FailureCB = [this](const Twine &Message) {
3487 report(Message.str().c_str(), MF);
3488 };
3489 verifyConvergenceControl(*MF, DT, FailureCB, OS);
3490
3491 calcRegsPassed();
3492
3493 for (const MachineBasicBlock &MBB : *MF)
3494 checkPHIOps(MBB);
3495
3496 // Now check liveness info if available
3497 calcRegsRequired();
3498
3499 // Check for killed virtual registers that should be live out.
3500 for (const auto &MBB : *MF) {
3501 BBInfo &MInfo = MBBInfoMap[&MBB];
3502 for (Register VReg : MInfo.vregsRequired)
3503 if (MInfo.regsKilled.count(VReg)) {
3504 report("Virtual register killed in block, but needed live out.", &MBB);
3505 OS << "Virtual register " << printReg(VReg)
3506 << " is used after the block.\n";
3507 }
3508 }
3509
3510 if (!MF->empty()) {
3511 BBInfo &MInfo = MBBInfoMap[&MF->front()];
3512 for (Register VReg : MInfo.vregsRequired) {
3513 report("Virtual register defs don't dominate all uses.", MF);
3514 report_context_vreg(VReg);
3515 }
3516 }
3517
3518 if (LiveVars)
3519 verifyLiveVariables();
3520 if (LiveInts)
3521 verifyLiveIntervals();
3522
3523 // Check live-in list of each MBB. If a register is live into MBB, check
3524 // that the register is in regsLiveOut of each predecessor block. Since
3525 // this must come from a definition in the predecessor or its live-in
3526 // list, this will catch a live-through case where the predecessor does not
3527 // have the register in its live-in list. This currently only checks
3528 // registers that have no aliases, are not allocatable and are not
3529 // reserved, which could mean a condition code register for instance.
3530 if (MRI->tracksLiveness())
3531 for (const auto &MBB : *MF)
3533 MCRegister LiveInReg = P.PhysReg;
3534 bool hasAliases = MCRegAliasIterator(LiveInReg, TRI, false).isValid();
3535 if (hasAliases || isAllocatable(LiveInReg) || isReserved(LiveInReg))
3536 continue;
3537 for (const MachineBasicBlock *Pred : MBB.predecessors()) {
3538 BBInfo &PInfo = MBBInfoMap[Pred];
3539 if (!PInfo.regsLiveOut.count(LiveInReg)) {
3540 report("Live in register not found to be live out from predecessor.",
3541 &MBB);
3542 OS << TRI->getName(LiveInReg) << " not found to be live out from "
3543 << printMBBReference(*Pred) << '\n';
3544 }
3545 }
3546 }
3547
3548 for (auto CSInfo : MF->getCallSitesInfo())
3549 if (!CSInfo.first->isCall())
3550 report("Call site info referencing instruction that is not call", MF);
3551
3552 // If there's debug-info, check that we don't have any duplicate value
3553 // tracking numbers.
3554 if (MF->getFunction().getSubprogram()) {
3555 DenseSet<unsigned> SeenNumbers;
3556 for (const auto &MBB : *MF) {
3557 for (const auto &MI : MBB) {
3558 if (auto Num = MI.peekDebugInstrNum()) {
3559 auto Result = SeenNumbers.insert((unsigned)Num);
3560 if (!Result.second)
3561 report("Instruction has a duplicated value tracking number", &MI);
3562 }
3563 }
3564 }
3565 }
3566}
3567
3568void MachineVerifier::verifyLiveVariables() {
3569 assert(LiveVars && "Don't call verifyLiveVariables without LiveVars");
3570 for (unsigned I = 0, E = MRI->getNumVirtRegs(); I != E; ++I) {
3573 for (const auto &MBB : *MF) {
3574 BBInfo &MInfo = MBBInfoMap[&MBB];
3575
3576 // Our vregsRequired should be identical to LiveVariables' AliveBlocks
3577 if (MInfo.vregsRequired.count(Reg)) {
3578 if (!VI.AliveBlocks.test(MBB.getNumber())) {
3579 report("LiveVariables: Block missing from AliveBlocks", &MBB);
3580 OS << "Virtual register " << printReg(Reg)
3581 << " must be live through the block.\n";
3582 }
3583 } else {
3584 if (VI.AliveBlocks.test(MBB.getNumber())) {
3585 report("LiveVariables: Block should not be in AliveBlocks", &MBB);
3586 OS << "Virtual register " << printReg(Reg)
3587 << " is not needed live through the block.\n";
3588 }
3589 }
3590 }
3591 }
3592}
3593
3594void MachineVerifier::verifyLiveIntervals() {
3595 assert(LiveInts && "Don't call verifyLiveIntervals without LiveInts");
3596 for (unsigned I = 0, E = MRI->getNumVirtRegs(); I != E; ++I) {
3598
3599 // Spilling and splitting may leave unused registers around. Skip them.
3600 if (MRI->reg_nodbg_empty(Reg))
3601 continue;
3602
3603 if (!LiveInts->hasInterval(Reg)) {
3604 report("Missing live interval for virtual register", MF);
3605 OS << printReg(Reg, TRI) << " still has defs or uses\n";
3606 continue;
3607 }
3608
3609 const LiveInterval &LI = LiveInts->getInterval(Reg);
3610 assert(Reg == LI.reg() && "Invalid reg to interval mapping");
3611 verifyLiveInterval(LI);
3612 }
3613
3614 // Verify all the cached regunit intervals.
3615 for (MCRegUnit Unit : TRI->regunits())
3616 if (const LiveRange *LR = LiveInts->getCachedRegUnit(Unit))
3617 verifyLiveRange(*LR, VirtRegOrUnit(Unit));
3618}
3619
3620void MachineVerifier::verifyLiveRangeValue(const LiveRange &LR,
3621 const VNInfo *VNI,
3622 VirtRegOrUnit VRegOrUnit,
3623 LaneBitmask LaneMask) {
3624 if (VNI->isUnused())
3625 return;
3626
3627 const VNInfo *DefVNI = LR.getVNInfoAt(VNI->def);
3628
3629 if (!DefVNI) {
3630 report("Value not live at VNInfo def and not marked unused", MF);
3631 report_context(LR, VRegOrUnit, LaneMask);
3632 report_context(*VNI);
3633 return;
3634 }
3635
3636 if (DefVNI != VNI) {
3637 report("Live segment at def has different VNInfo", MF);
3638 report_context(LR, VRegOrUnit, LaneMask);
3639 report_context(*VNI);
3640 return;
3641 }
3642
3643 const MachineBasicBlock *MBB = LiveInts->getMBBFromIndex(VNI->def);
3644 if (!MBB) {
3645 report("Invalid VNInfo definition index", MF);
3646 report_context(LR, VRegOrUnit, LaneMask);
3647 report_context(*VNI);
3648 return;
3649 }
3650
3651 if (VNI->isPHIDef()) {
3652 if (VNI->def != LiveInts->getMBBStartIdx(MBB)) {
3653 report("PHIDef VNInfo is not defined at MBB start", MBB);
3654 report_context(LR, VRegOrUnit, LaneMask);
3655 report_context(*VNI);
3656 }
3657 return;
3658 }
3659
3660 // Non-PHI def.
3661 const MachineInstr *MI = LiveInts->getInstructionFromIndex(VNI->def);
3662 if (!MI) {
3663 report("No instruction at VNInfo def index", MBB);
3664 report_context(LR, VRegOrUnit, LaneMask);
3665 report_context(*VNI);
3666 return;
3667 }
3668
3669 bool hasDef = false;
3670 bool isEarlyClobber = false;
3671 for (ConstMIBundleOperands MOI(*MI); MOI.isValid(); ++MOI) {
3672 if (!MOI->isReg() || !MOI->isDef())
3673 continue;
3674 if (VRegOrUnit.isVirtualReg()) {
3675 if (MOI->getReg() != VRegOrUnit.asVirtualReg())
3676 continue;
3677 } else {
3678 if (!MOI->getReg().isPhysical() ||
3679 !TRI->hasRegUnit(MOI->getReg(), VRegOrUnit.asMCRegUnit()))
3680 continue;
3681 }
3682 if (LaneMask.any() &&
3683 (TRI->getSubRegIndexLaneMask(MOI->getSubReg()) & LaneMask).none())
3684 continue;
3685 hasDef = true;
3686 if (MOI->isEarlyClobber())
3687 isEarlyClobber = true;
3688 }
3689
3690 if (!hasDef) {
3691 report("Defining instruction does not modify register", MI);
3692 report_context(LR, VRegOrUnit, LaneMask);
3693 report_context(*VNI);
3694 }
3695
3696 // Early clobber defs begin at USE slots, but other defs must begin at
3697 // DEF slots.
3698 if (isEarlyClobber) {
3699 if (!VNI->def.isEarlyClobber()) {
3700 report("Early clobber def must be at an early-clobber slot", MBB);
3701 report_context(LR, VRegOrUnit, LaneMask);
3702 report_context(*VNI);
3703 }
3704 } else if (!VNI->def.isRegister()) {
3705 report("Non-PHI, non-early clobber def must be at a register slot", MBB);
3706 report_context(LR, VRegOrUnit, LaneMask);
3707 report_context(*VNI);
3708 }
3709}
3710
3711void MachineVerifier::verifyLiveRangeSegment(const LiveRange &LR,
3713 VirtRegOrUnit VRegOrUnit,
3714 LaneBitmask LaneMask) {
3715 const LiveRange::Segment &S = *I;
3716 const VNInfo *VNI = S.valno;
3717 assert(VNI && "Live segment has no valno");
3718
3719 if (VNI->id >= LR.getNumValNums() || VNI != LR.getValNumInfo(VNI->id)) {
3720 report("Foreign valno in live segment", MF);
3721 report_context(LR, VRegOrUnit, LaneMask);
3722 report_context(S);
3723 report_context(*VNI);
3724 }
3725
3726 if (VNI->isUnused()) {
3727 report("Live segment valno is marked unused", MF);
3728 report_context(LR, VRegOrUnit, LaneMask);
3729 report_context(S);
3730 }
3731
3732 const MachineBasicBlock *MBB = LiveInts->getMBBFromIndex(S.start);
3733 if (!MBB) {
3734 report("Bad start of live segment, no basic block", MF);
3735 report_context(LR, VRegOrUnit, LaneMask);
3736 report_context(S);
3737 return;
3738 }
3739 SlotIndex MBBStartIdx = LiveInts->getMBBStartIdx(MBB);
3740 if (S.start != MBBStartIdx && S.start != VNI->def) {
3741 report("Live segment must begin at MBB entry or valno def", MBB);
3742 report_context(LR, VRegOrUnit, LaneMask);
3743 report_context(S);
3744 }
3745
3746 const MachineBasicBlock *EndMBB =
3747 LiveInts->getMBBFromIndex(S.end.getPrevSlot());
3748 if (!EndMBB) {
3749 report("Bad end of live segment, no basic block", MF);
3750 report_context(LR, VRegOrUnit, LaneMask);
3751 report_context(S);
3752 return;
3753 }
3754
3755 // Checks for non-live-out segments.
3756 if (S.end != LiveInts->getMBBEndIdx(EndMBB)) {
3757 // RegUnit intervals are allowed dead phis.
3758 if (!VRegOrUnit.isVirtualReg() && VNI->isPHIDef() && S.start == VNI->def &&
3759 S.end == VNI->def.getDeadSlot())
3760 return;
3761
3762 // The live segment is ending inside EndMBB
3763 const MachineInstr *MI =
3764 LiveInts->getInstructionFromIndex(S.end.getPrevSlot());
3765 if (!MI) {
3766 report("Live segment doesn't end at a valid instruction", EndMBB);
3767 report_context(LR, VRegOrUnit, LaneMask);
3768 report_context(S);
3769 return;
3770 }
3771
3772 // The block slot must refer to a basic block boundary.
3773 if (S.end.isBlock()) {
3774 report("Live segment ends at B slot of an instruction", EndMBB);
3775 report_context(LR, VRegOrUnit, LaneMask);
3776 report_context(S);
3777 }
3778
3779 if (S.end.isDead()) {
3780 // Segment ends on the dead slot.
3781 // That means there must be a dead def.
3782 if (!SlotIndex::isSameInstr(S.start, S.end)) {
3783 report("Live segment ending at dead slot spans instructions", EndMBB);
3784 report_context(LR, VRegOrUnit, LaneMask);
3785 report_context(S);
3786 }
3787 }
3788
3789 // After tied operands are rewritten, a live segment can only end at an
3790 // early-clobber slot if it is being redefined by an early-clobber def.
3791 // TODO: Before tied operands are rewritten, a live segment can only end at
3792 // an early-clobber slot if the last use is tied to an early-clobber def.
3793 if (MF->getProperties().hasTiedOpsRewritten() && S.end.isEarlyClobber()) {
3794 if (I + 1 == LR.end() || (I + 1)->start != S.end) {
3795 report("Live segment ending at early clobber slot must be "
3796 "redefined by an EC def in the same instruction",
3797 EndMBB);
3798 report_context(LR, VRegOrUnit, LaneMask);
3799 report_context(S);
3800 }
3801 }
3802
3803 // The following checks only apply to virtual registers. Physreg liveness
3804 // is too weird to check.
3805 if (VRegOrUnit.isVirtualReg()) {
3806 // A live segment can end with either a redefinition, a kill flag on a
3807 // use, or a dead flag on a def.
3808 bool hasRead = false;
3809 bool hasSubRegDef = false;
3810 bool hasDeadDef = false;
3811 for (ConstMIBundleOperands MOI(*MI); MOI.isValid(); ++MOI) {
3812 if (!MOI->isReg() || MOI->getReg() != VRegOrUnit.asVirtualReg())
3813 continue;
3814 unsigned Sub = MOI->getSubReg();
3815 LaneBitmask SLM =
3816 Sub != 0 ? TRI->getSubRegIndexLaneMask(Sub) : LaneBitmask::getAll();
3817 if (MOI->isDef()) {
3818 if (Sub != 0) {
3819 hasSubRegDef = true;
3820 // An operand %0:sub0 reads %0:sub1..n. Invert the lane
3821 // mask for subregister defs. Read-undef defs will be handled by
3822 // readsReg below.
3823 SLM = ~SLM;
3824 }
3825 if (MOI->isDead())
3826 hasDeadDef = true;
3827 }
3828 if (LaneMask.any() && (LaneMask & SLM).none())
3829 continue;
3830 if (MOI->readsReg())
3831 hasRead = true;
3832 }
3833 if (S.end.isDead()) {
3834 // Make sure that the corresponding machine operand for a "dead" live
3835 // range has the dead flag. We cannot perform this check for subregister
3836 // liveranges as partially dead values are allowed.
3837 if (LaneMask.none() && !hasDeadDef) {
3838 report(
3839 "Instruction ending live segment on dead slot has no dead flag",
3840 MI);
3841 report_context(LR, VRegOrUnit, LaneMask);
3842 report_context(S);
3843 }
3844 } else {
3845 if (!hasRead) {
3846 // When tracking subregister liveness, the main range must start new
3847 // values on partial register writes, even if there is no read.
3848 if (!MRI->shouldTrackSubRegLiveness(VRegOrUnit.asVirtualReg()) ||
3849 LaneMask.any() || !hasSubRegDef) {
3850 report("Instruction ending live segment doesn't read the register",
3851 MI);
3852 report_context(LR, VRegOrUnit, LaneMask);
3853 report_context(S);
3854 }
3855 }
3856 }
3857 }
3858 }
3859
3860 // Now check all the basic blocks in this live segment.
3862 // Is this live segment the beginning of a non-PHIDef VN?
3863 if (S.start == VNI->def && !VNI->isPHIDef()) {
3864 // Not live-in to any blocks.
3865 if (MBB == EndMBB)
3866 return;
3867 // Skip this block.
3868 ++MFI;
3869 }
3870
3872 if (LaneMask.any()) {
3873 LiveInterval &OwnerLI = LiveInts->getInterval(VRegOrUnit.asVirtualReg());
3874 OwnerLI.computeSubRangeUndefs(Undefs, LaneMask, *MRI, *Indexes);
3875 }
3876
3877 while (true) {
3878 assert(LiveInts->isLiveInToMBB(LR, &*MFI));
3879 // We don't know how to track physregs into a landing pad.
3880 if (!VRegOrUnit.isVirtualReg() && MFI->isEHPad()) {
3881 if (&*MFI == EndMBB)
3882 break;
3883 ++MFI;
3884 continue;
3885 }
3886
3887 // Is VNI a PHI-def in the current block?
3888 bool IsPHI = VNI->isPHIDef() &&
3889 VNI->def == LiveInts->getMBBStartIdx(&*MFI);
3890
3891 // Check that VNI is live-out of all predecessors.
3892 for (const MachineBasicBlock *Pred : MFI->predecessors()) {
3893 SlotIndex PEnd = LiveInts->getMBBEndIdx(Pred);
3894 // Predecessor of landing pad live-out on last call.
3895 if (MFI->isEHPad()) {
3896 for (const MachineInstr &MI : llvm::reverse(*Pred)) {
3897 if (MI.isCall()) {
3898 PEnd = Indexes->getInstructionIndex(MI).getBoundaryIndex();
3899 break;
3900 }
3901 }
3902 }
3903 const VNInfo *PVNI = LR.getVNInfoBefore(PEnd);
3904
3905 // All predecessors must have a live-out value. However for a phi
3906 // instruction with subregister intervals
3907 // only one of the subregisters (not necessarily the current one) needs to
3908 // be defined.
3909 if (!PVNI && (LaneMask.none() || !IsPHI)) {
3910 if (LiveRangeCalc::isJointlyDominated(Pred, Undefs, *Indexes))
3911 continue;
3912 report("Register not marked live out of predecessor", Pred);
3913 report_context(LR, VRegOrUnit, LaneMask);
3914 report_context(*VNI);
3915 OS << " live into " << printMBBReference(*MFI) << '@'
3916 << LiveInts->getMBBStartIdx(&*MFI) << ", not live before " << PEnd
3917 << '\n';
3918 continue;
3919 }
3920
3921 // Only PHI-defs can take different predecessor values.
3922 if (!IsPHI && PVNI != VNI) {
3923 report("Different value live out of predecessor", Pred);
3924 report_context(LR, VRegOrUnit, LaneMask);
3925 OS << "Valno #" << PVNI->id << " live out of "
3926 << printMBBReference(*Pred) << '@' << PEnd << "\nValno #" << VNI->id
3927 << " live into " << printMBBReference(*MFI) << '@'
3928 << LiveInts->getMBBStartIdx(&*MFI) << '\n';
3929 }
3930 }
3931 if (&*MFI == EndMBB)
3932 break;
3933 ++MFI;
3934 }
3935}
3936
3937void MachineVerifier::verifyLiveRange(const LiveRange &LR,
3938 VirtRegOrUnit VRegOrUnit,
3939 LaneBitmask LaneMask) {
3940 for (const VNInfo *VNI : LR.valnos)
3941 verifyLiveRangeValue(LR, VNI, VRegOrUnit, LaneMask);
3942
3943 for (LiveRange::const_iterator I = LR.begin(), E = LR.end(); I != E; ++I)
3944 verifyLiveRangeSegment(LR, I, VRegOrUnit, LaneMask);
3945}
3946
3947void MachineVerifier::verifyLiveInterval(const LiveInterval &LI) {
3948 Register Reg = LI.reg();
3949 assert(Reg.isVirtual());
3950 verifyLiveRange(LI, VirtRegOrUnit(Reg));
3951
3952 if (LI.hasSubRanges()) {
3954 LaneBitmask MaxMask = MRI->getMaxLaneMaskForVReg(Reg);
3955 for (const LiveInterval::SubRange &SR : LI.subranges()) {
3956 if ((Mask & SR.LaneMask).any()) {
3957 report("Lane masks of sub ranges overlap in live interval", MF);
3958 report_context(LI);
3959 }
3960 if ((SR.LaneMask & ~MaxMask).any()) {
3961 report("Subrange lanemask is invalid", MF);
3962 report_context(LI);
3963 }
3964 if (SR.empty()) {
3965 report("Subrange must not be empty", MF);
3966 report_context(SR, VirtRegOrUnit(LI.reg()), SR.LaneMask);
3967 }
3968 Mask |= SR.LaneMask;
3969 verifyLiveRange(SR, VirtRegOrUnit(LI.reg()), SR.LaneMask);
3970 if (!LI.covers(SR)) {
3971 report("A Subrange is not covered by the main range", MF);
3972 report_context(LI);
3973 }
3974 }
3975 }
3976
3977 // Check the LI only has one connected component.
3978 ConnectedVNInfoEqClasses ConEQ(*LiveInts);
3979 unsigned NumComp = ConEQ.Classify(LI);
3980 if (NumComp > 1) {
3981 report("Multiple connected components in live interval", MF);
3982 report_context(LI);
3983 for (unsigned comp = 0; comp != NumComp; ++comp) {
3984 OS << comp << ": valnos";
3985 for (const VNInfo *I : LI.valnos)
3986 if (comp == ConEQ.getEqClass(I))
3987 OS << ' ' << I->id;
3988 OS << '\n';
3989 }
3990 }
3991}
3992
3993namespace {
3994
3995 // FrameSetup and FrameDestroy can have zero adjustment, so using a single
3996 // integer, we can't tell whether it is a FrameSetup or FrameDestroy if the
3997 // value is zero.
3998 // We use a bool plus an integer to capture the stack state.
3999struct StackStateOfBB {
4000 StackStateOfBB() = default;
4001 StackStateOfBB(int EntryVal, int ExitVal, bool EntrySetup, bool ExitSetup)
4002 : EntryValue(EntryVal), ExitValue(ExitVal), EntryIsSetup(EntrySetup),
4003 ExitIsSetup(ExitSetup) {}
4004
4005 // Can be negative, which means we are setting up a frame.
4006 int EntryValue = 0;
4007 int ExitValue = 0;
4008 bool EntryIsSetup = false;
4009 bool ExitIsSetup = false;
4010};
4011
4012} // end anonymous namespace
4013
4014/// Make sure on every path through the CFG, a FrameSetup <n> is always followed
4015/// by a FrameDestroy <n>, stack adjustments are identical on all
4016/// CFG edges to a merge point, and frame is destroyed at end of a return block.
4017void MachineVerifier::verifyStackFrame() {
4018 unsigned FrameSetupOpcode = TII->getCallFrameSetupOpcode();
4019 unsigned FrameDestroyOpcode = TII->getCallFrameDestroyOpcode();
4020 if (FrameSetupOpcode == ~0u && FrameDestroyOpcode == ~0u)
4021 return;
4022
4024 SPState.resize(MF->getNumBlockIDs());
4026
4027 // Visit the MBBs in DFS order.
4028 for (df_ext_iterator<const MachineFunction *,
4030 DFI = df_ext_begin(MF, Reachable), DFE = df_ext_end(MF, Reachable);
4031 DFI != DFE; ++DFI) {
4032 const MachineBasicBlock *MBB = *DFI;
4033
4034 StackStateOfBB BBState;
4035 // Check the exit state of the DFS stack predecessor.
4036 if (DFI.getPathLength() >= 2) {
4037 const MachineBasicBlock *StackPred = DFI.getPath(DFI.getPathLength() - 2);
4038 assert(Reachable.count(StackPred) &&
4039 "DFS stack predecessor is already visited.\n");
4040 BBState.EntryValue = SPState[StackPred->getNumber()].ExitValue;
4041 BBState.EntryIsSetup = SPState[StackPred->getNumber()].ExitIsSetup;
4042 BBState.ExitValue = BBState.EntryValue;
4043 BBState.ExitIsSetup = BBState.EntryIsSetup;
4044 }
4045
4046 if ((int)MBB->getCallFrameSize() != -BBState.EntryValue) {
4047 report("Call frame size on entry does not match value computed from "
4048 "predecessor",
4049 MBB);
4050 OS << "Call frame size on entry " << MBB->getCallFrameSize()
4051 << " does not match value computed from predecessor "
4052 << -BBState.EntryValue << '\n';
4053 }
4054
4055 // Update stack state by checking contents of MBB.
4056 for (const auto &I : *MBB) {
4057 if (I.getOpcode() == FrameSetupOpcode) {
4058 if (BBState.ExitIsSetup)
4059 report("FrameSetup is after another FrameSetup", &I);
4060 if (!MRI->isSSA() && !MF->getFrameInfo().adjustsStack())
4061 report("AdjustsStack not set in presence of a frame pseudo "
4062 "instruction.", &I);
4063 BBState.ExitValue -= TII->getFrameTotalSize(I);
4064 BBState.ExitIsSetup = true;
4065 }
4066
4067 if (I.getOpcode() == FrameDestroyOpcode) {
4068 int Size = TII->getFrameTotalSize(I);
4069 if (!BBState.ExitIsSetup)
4070 report("FrameDestroy is not after a FrameSetup", &I);
4071 int AbsSPAdj = BBState.ExitValue < 0 ? -BBState.ExitValue :
4072 BBState.ExitValue;
4073 if (BBState.ExitIsSetup && AbsSPAdj != Size) {
4074 report("FrameDestroy <n> is after FrameSetup <m>", &I);
4075 OS << "FrameDestroy <" << Size << "> is after FrameSetup <"
4076 << AbsSPAdj << ">.\n";
4077 }
4078 if (!MRI->isSSA() && !MF->getFrameInfo().adjustsStack())
4079 report("AdjustsStack not set in presence of a frame pseudo "
4080 "instruction.", &I);
4081 BBState.ExitValue += Size;
4082 BBState.ExitIsSetup = false;
4083 }
4084 }
4085 SPState[MBB->getNumber()] = BBState;
4086
4087 // Make sure the exit state of any predecessor is consistent with the entry
4088 // state.
4089 for (const MachineBasicBlock *Pred : MBB->predecessors()) {
4090 if (Reachable.count(Pred) &&
4091 (SPState[Pred->getNumber()].ExitValue != BBState.EntryValue ||
4092 SPState[Pred->getNumber()].ExitIsSetup != BBState.EntryIsSetup)) {
4093 report("The exit stack state of a predecessor is inconsistent.", MBB);
4094 OS << "Predecessor " << printMBBReference(*Pred) << " has exit state ("
4095 << SPState[Pred->getNumber()].ExitValue << ", "
4096 << SPState[Pred->getNumber()].ExitIsSetup << "), while "
4097 << printMBBReference(*MBB) << " has entry state ("
4098 << BBState.EntryValue << ", " << BBState.EntryIsSetup << ").\n";
4099 }
4100 }
4101
4102 // Make sure the entry state of any successor is consistent with the exit
4103 // state.
4104 for (const MachineBasicBlock *Succ : MBB->successors()) {
4105 if (Reachable.count(Succ) &&
4106 (SPState[Succ->getNumber()].EntryValue != BBState.ExitValue ||
4107 SPState[Succ->getNumber()].EntryIsSetup != BBState.ExitIsSetup)) {
4108 report("The entry stack state of a successor is inconsistent.", MBB);
4109 OS << "Successor " << printMBBReference(*Succ) << " has entry state ("
4110 << SPState[Succ->getNumber()].EntryValue << ", "
4111 << SPState[Succ->getNumber()].EntryIsSetup << "), while "
4112 << printMBBReference(*MBB) << " has exit state ("
4113 << BBState.ExitValue << ", " << BBState.ExitIsSetup << ").\n";
4114 }
4115 }
4116
4117 // Make sure a basic block with return ends with zero stack adjustment.
4118 if (!MBB->empty() && MBB->back().isReturn()) {
4119 if (BBState.ExitIsSetup)
4120 report("A return block ends with a FrameSetup.", MBB);
4121 if (BBState.ExitValue)
4122 report("A return block ends with a nonzero stack adjustment.", MBB);
4123 }
4124 }
4125}
4126
4127void MachineVerifier::verifyStackProtector() {
4128 const MachineFrameInfo &MFI = MF->getFrameInfo();
4129 if (!MFI.hasStackProtectorIndex())
4130 return;
4131 // Only applicable when the offsets of frame objects have been determined,
4132 // which is indicated by a non-zero stack size.
4133 if (!MFI.getStackSize())
4134 return;
4135 const TargetFrameLowering &TFI = *MF->getSubtarget().getFrameLowering();
4136 bool StackGrowsDown =
4138 unsigned FI = MFI.getStackProtectorIndex();
4139 int64_t SPStart = MFI.getObjectOffset(FI);
4140 int64_t SPEnd = SPStart + MFI.getObjectSize(FI);
4141 for (unsigned I = 0, E = MFI.getObjectIndexEnd(); I != E; ++I) {
4142 if (I == FI)
4143 continue;
4144 if (MFI.isDeadObjectIndex(I))
4145 continue;
4146 // FIXME: Skip non-default stack objects, as some targets may place them
4147 // above the stack protector. This is a workaround for the fact that
4148 // backends such as AArch64 may place SVE stack objects *above* the stack
4149 // protector.
4151 continue;
4152 // Skip variable-sized objects because they do not have a fixed offset.
4154 continue;
4155 // FIXME: Skip spill slots which may be allocated above the stack protector.
4156 // Ideally this would only skip callee-saved registers, but we don't have
4157 // that information here. For example, spill-slots used for scavenging are
4158 // not described in CalleeSavedInfo.
4159 if (MFI.isSpillSlotObjectIndex(I))
4160 continue;
4161 int64_t ObjStart = MFI.getObjectOffset(I);
4162 int64_t ObjEnd = ObjStart + MFI.getObjectSize(I);
4163 if (SPStart < ObjEnd && ObjStart < SPEnd) {
4164 report("Stack protector overlaps with another stack object", MF);
4165 break;
4166 }
4167 if ((StackGrowsDown && SPStart <= ObjStart) ||
4168 (!StackGrowsDown && SPStart >= ObjStart)) {
4169 report("Stack protector is not the top-most object on the stack", MF);
4170 break;
4171 }
4172 }
4173}
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
aarch64 promote const
static bool isLoad(int Opcode)
static bool isStore(int Opcode)
MachineBasicBlock & MBB
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
MachineBasicBlock MachineBasicBlock::iterator MBBI
This file implements the BitVector class.
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
This file contains the declarations for the subclasses of Constant, which represent the different fla...
This file defines the DenseMap class.
This file defines the DenseSet and SmallDenseSet classes.
This file builds on the ADT/GraphTraits.h file to build generic depth first graph iterator.
Declares convenience wrapper classes for interpreting MachineInstr instances as specific generic oper...
const HexagonInstrInfo * TII
IRTranslator LLVM IR MI
std::pair< Instruction::BinaryOps, Value * > OffsetOp
Find all possible pairs (BinOp, RHS) that BinOp V, RHS can be simplified.
const size_t AbstractManglingParser< Derived, Alloc >::NumOps
A common definition of LaneBitmask for use in TableGen and CodeGen.
Implement a low-level type suitable for MachineInstr level instruction selection.
#define F(x, y, z)
Definition MD5.cpp:54
#define I(x, y, z)
Definition MD5.cpp:57
print mir2vec MIR2Vec Vocabulary Printer Pass
Definition MIR2Vec.cpp:598
This file declares the MIR specialization of the GenericConvergenceVerifier template.
Register Reg
Register const TargetRegisterInfo * TRI
static void verifyConvergenceControl(const MachineFunction &MF, MachineDominatorTree &DT, std::function< void(const Twine &Message)> FailureCB, raw_ostream &OS)
Promote Memory to Register
Definition Mem2Reg.cpp:110
modulo schedule Modulo Schedule test pass
#define P(N)
ppc ctr loops verify
#define INITIALIZE_PASS(passName, arg, name, cfg, analysis)
Definition PassSupport.h:56
This file builds on the ADT/GraphTraits.h file to build a generic graph post order iterator.
const SmallVectorImpl< MachineOperand > MachineBasicBlock * TBB
const SmallVectorImpl< MachineOperand > & Cond
static bool isValid(const char C)
Returns true if C is a valid mangled character: <0-9a-zA-Z_>.
static bool isLiveOut(const MachineBasicBlock &MBB, unsigned Reg)
SI Optimize VGPR LiveRange
std::unordered_set< BasicBlock * > BlockSet
This file contains some templates that are useful if you are working with the STL at all.
This file defines generic set operations that may be used on set's of different types,...
This file defines the SmallPtrSet class.
This file defines the SmallVector class.
This file describes how to lower LLVM code to machine code.
static unsigned getSize(unsigned Kind)
static LLVM_ABI unsigned getSizeInBits(const fltSemantics &Sem)
Returns the size of the floating point number (in bits) in the given semantics.
Definition APFloat.cpp:278
const fltSemantics & getSemantics() const
Definition APFloat.h:1524
PassT::Result * getCachedResult(IRUnitT &IR) const
Get the cached result of an analysis pass for a given IR unit.
AnalysisUsage & addUsedIfAvailable()
Add the specified Pass class to the set of analyses used by this pass.
void setPreservesAll()
Set by analyses that do not transform their input at all.
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
Definition ArrayRef.h:40
size_t size() const
size - Get the array size.
Definition ArrayRef.h:142
This class holds the attributes for a particular argument, parameter, function, or return value.
Definition Attributes.h:407
LLVM Basic Block Representation.
Definition BasicBlock.h:62
bool hasAddressTaken() const
Returns true if there are any uses of this basic block other than direct branches,...
Definition BasicBlock.h:687
const Instruction * getTerminator() const LLVM_READONLY
Returns the terminator instruction; assumes that the block is well-formed.
Definition BasicBlock.h:237
void clear()
clear - Removes all bits from the bitvector.
Definition BitVector.h:354
iterator_range< const_set_bits_iterator > set_bits() const
Definition BitVector.h:159
ConnectedVNInfoEqClasses - Helper class that can divide VNInfos in a LiveInterval into equivalence cl...
ConstMIBundleOperands - Iterate over all operands in a const bundle of machine instructions.
ConstantFP - Floating Point Values [float, double].
Definition Constants.h:420
const APFloat & getValueAPF() const
Definition Constants.h:463
This is the shared class of boolean and integer constants.
Definition Constants.h:87
IntegerType * getIntegerType() const
Variant of the getType() method to always return an IntegerType, which reduces the amount of casting ...
Definition Constants.h:198
unsigned getBitWidth() const
getBitWidth - Return the scalar bitwidth of this constant.
Definition Constants.h:162
A parsed version of the target data layout string in and methods for querying it.
Definition DataLayout.h:64
Implements a dense probed hash-table based set.
Definition DenseSet.h:279
void recalculate(ParentType &Func)
recalculate - compute a dominator tree for the given function
Register getReg() const
Base class for user error types.
Definition Error.h:354
FunctionPass class - This class is used to implement most global optimizations.
Definition Pass.h:314
const Function & getFunction() const
Definition Function.h:166
void initialize(raw_ostream *OS, function_ref< void(const Twine &Message)> FailureCB, const FunctionT &F)
bool isPredicated(const MachineInstr &MI) const override
Returns true if the instruction is already predicated.
bool analyzeBranch(MachineBasicBlock &MBB, MachineBasicBlock *&TBB, MachineBasicBlock *&FBB, SmallVectorImpl< MachineOperand > &Cond, bool AllowModify) const override
Analyze the branching code at the end of MBB, returning true if it cannot be understood (e....
unsigned getBitWidth() const
Get the number of bits in this IntegerType.
constexpr bool isScalableVector() const
Returns true if the LLT is a scalable vector.
constexpr unsigned getScalarSizeInBits() const
constexpr bool isScalar() const
constexpr Kind getKind() const
LLT getScalarType() const
constexpr bool isPointerVector() const
static constexpr LLT scalar(unsigned SizeInBits)
Get a low-level scalar or aggregate "bag of bits".
constexpr bool isValid() const
constexpr uint16_t getNumElements() const
Returns the number of elements in a vector LLT.
constexpr bool isVector() const
constexpr bool isScalable() const
Returns true if the LLT is a scalable vector.
constexpr TypeSize getSizeInBits() const
Returns the total size of the type. Must only be called on sized types.
constexpr bool isPointer() const
constexpr ElementCount getElementCount() const
constexpr unsigned getAddressSpace() const
constexpr bool isPointerOrPointerVector() const
constexpr TypeSize getSizeInBytes() const
Returns the total size of the type in bytes, i.e.
LLT getElementType() const
Returns the vector's element type. Only valid for vector types.
A live range for subregisters.
LiveInterval - This class represents the liveness of a register, or stack slot.
Register reg() const
bool hasSubRanges() const
Returns true if subregister liveness information is available.
iterator_range< subrange_iterator > subranges()
LLVM_ABI void computeSubRangeUndefs(SmallVectorImpl< SlotIndex > &Undefs, LaneBitmask LaneMask, const MachineRegisterInfo &MRI, const SlotIndexes &Indexes) const
For a given lane mask LaneMask, compute indexes at which the lane is marked undefined by subregister ...
void print(raw_ostream &O, const Module *=nullptr) const override
Implement the dump method.
Result of a LiveRange query.
bool isDeadDef() const
Return true if this instruction has a dead def.
VNInfo * valueIn() const
Return the value that is live-in to the instruction.
VNInfo * valueOut() const
Return the value leaving the instruction, if any.
bool isKill() const
Return true if the live-in value is killed by this instruction.
static LLVM_ABI bool isJointlyDominated(const MachineBasicBlock *MBB, ArrayRef< SlotIndex > Defs, const SlotIndexes &Indexes)
A diagnostic function to check if the end of the block MBB is jointly dominated by the blocks corresp...
This class represents the liveness of a register, stack slot, etc.
VNInfo * getValNumInfo(unsigned ValNo)
getValNumInfo - Returns pointer to the specified val#.
Segments::const_iterator const_iterator
bool liveAt(SlotIndex index) const
LLVM_ABI bool covers(const LiveRange &Other) const
Returns true if all segments of the Other live range are completely covered by this live range.
bool empty() const
LiveQueryResult Query(SlotIndex Idx) const
Query Liveness at Idx.
VNInfo * getVNInfoBefore(SlotIndex Idx) const
getVNInfoBefore - Return the VNInfo that is live up to but not necessarily including Idx,...
bool verify() const
Walk the range and assert if any invariants fail to hold.
unsigned getNumValNums() const
iterator begin()
VNInfoList valnos
VNInfo * getVNInfoAt(SlotIndex Idx) const
getVNInfoAt - Return the VNInfo that is live at Idx, or NULL.
LLVM_ABI VarInfo & getVarInfo(Register Reg)
getVarInfo - Return the VarInfo structure for the specified VIRTUAL register.
TypeSize getValue() const
This class is intended to be used as a base class for asm properties and features specific to the tar...
Definition MCAsmInfo.h:64
ExceptionHandling getExceptionHandlingType() const
Definition MCAsmInfo.h:646
Describe properties that are true of each instruction in the target description file.
This holds information about one operand of a machine instruction, indicating the register class for ...
Definition MCInstrDesc.h:86
MCRegAliasIterator enumerates all registers aliasing Reg.
Wrapper class representing physical registers. Should be passed by value.
Definition MCRegister.h:41
const MDOperand & getOperand(unsigned I) const
Definition Metadata.h:1444
bool isValid() const
isValid - Returns true until all the operands have been visited.
bool isInlineAsmBrIndirectTarget() const
Returns true if this is the indirect dest of an INLINEASM_BR.
bool isEHPad() const
Returns true if the block is a landing pad.
iterator_range< livein_iterator > liveins() const
iterator_range< iterator > phis()
Returns a range that iterates over the phis in the basic block.
int getNumber() const
MachineBasicBlocks are uniquely numbered at the function level, unless they're not in a MachineFuncti...
const BasicBlock * getBasicBlock() const
Return the LLVM basic block that this instance corresponded to originally.
bool isIRBlockAddressTaken() const
Test whether this block is the target of an IR BlockAddress.
BasicBlock * getAddressTakenIRBlock() const
Retrieves the BasicBlock which corresponds to this MachineBasicBlock.
LLVM_ABI bool isPredecessor(const MachineBasicBlock *MBB) const
Return true if the specified MBB is a predecessor of this block.
const MachineFunction * getParent() const
Return the MachineFunction containing this basic block.
unsigned getCallFrameSize() const
Return the call frame size on entry to this basic block.
iterator_range< succ_iterator > successors()
LLVM_ABI bool isSuccessor(const MachineBasicBlock *MBB) const
Return true if the specified MBB is a successor of this block.
iterator_range< pred_iterator > predecessors()
LLVM_ABI StringRef getName() const
Return the name of the corresponding LLVM basic block, or an empty string.
DominatorTree Class - Concrete subclass of DominatorTreeBase that is used to compute a normal dominat...
The MachineFrameInfo class represents an abstract stack frame until prolog/epilog code is inserted.
uint64_t getStackSize() const
Return the number of bytes that must be allocated to hold all of the fixed size frame objects.
int getStackProtectorIndex() const
Return the index for the stack protector object.
bool isSpillSlotObjectIndex(int ObjectIdx) const
Returns true if the specified index corresponds to a spill slot.
int64_t getObjectSize(int ObjectIdx) const
Return the size of the specified object.
LLVM_ABI BitVector getPristineRegs(const MachineFunction &MF) const
Return a set of physical registers that are pristine.
bool isVariableSizedObjectIndex(int ObjectIdx) const
Returns true if the specified index corresponds to a variable sized object.
int getObjectIndexEnd() const
Return one past the maximum frame object index.
bool hasStackProtectorIndex() const
uint8_t getStackID(int ObjectIdx) const
int64_t getObjectOffset(int ObjectIdx) const
Return the assigned stack offset of the specified object from the incoming stack pointer.
bool isDeadObjectIndex(int ObjectIdx) const
Returns true if the specified index corresponds to a dead object.
MachineFunctionPass - This class adapts the FunctionPass interface to allow convenient creation of pa...
void getAnalysisUsage(AnalysisUsage &AU) const override
getAnalysisUsage - Subclasses that override getAnalysisUsage must call this.
Properties which a MachineFunction may have at a given point in time.
const TargetSubtargetInfo & getSubtarget() const
getSubtarget - Return the subtarget for which this machine code is being compiled.
StringRef getName() const
getName - Return the name of the corresponding LLVM function.
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
bool verify(Pass *p=nullptr, const char *Banner=nullptr, raw_ostream *OS=nullptr, bool AbortOnError=true) const
Run the current MachineFunction through the machine code verifier, useful for debugger use.
const MachineFunctionProperties & getProperties() const
Get the function properties.
const MachineBasicBlock & front() const
void print(raw_ostream &OS, const SlotIndexes *=nullptr) const
print - Print out the MachineFunction in a format suitable for debugging to the specified stream.
const TargetMachine & getTarget() const
getTarget - Return the target machine this machine code is compiled with
BasicBlockListType::const_iterator const_iterator
Representation of each machine instruction.
unsigned getOpcode() const
Returns the opcode of this MachineInstr.
bool isReturn(QueryType Type=AnyInBundle) const
bool isTerminator(QueryType Type=AnyInBundle) const
Returns true if this instruction part of the terminator for a basic block.
bool isBarrier(QueryType Type=AnyInBundle) const
Returns true if the specified instruction stops control flow from executing the instruction immediate...
A description of a memory reference used in the backend.
LocationSize getSize() const
Return the size in bytes of the memory reference.
const PseudoSourceValue * getPseudoValue() const
LLT getMemoryType() const
Return the memory type of the memory reference.
const MDNode * getRanges() const
Return the range tag for the memory reference.
AtomicOrdering getSuccessOrdering() const
Return the atomic ordering requirements for this memory operation.
LocationSize getSizeInBits() const
Return the size in bits of the memory reference.
MachineOperand class - Representation of each machine instruction operand.
unsigned getSubReg() const
int64_t getImm() const
bool readsReg() const
readsReg - Returns true if this operand reads the previous value of its register.
bool isIntrinsicID() const
bool isReg() const
isReg - Tests if this is a MO_Register operand.
MachineBasicBlock * getMBB() const
ArrayRef< int > getShuffleMask() const
bool isImm() const
isImm - Tests if this is a MO_Immediate operand.
bool isValidExcessOperand() const
Return true if this operand can validly be appended to an arbitrary operand list.
bool isShuffleMask() const
LLVM_ABI void print(raw_ostream &os, const TargetRegisterInfo *TRI=nullptr) const
Print the MachineOperand to os.
LaneBitmask getLaneMask() const
unsigned getCFIIndex() const
LLVM_ABI bool isRenamable() const
isRenamable - Returns true if this register may be renamed, i.e.
MachineInstr * getParent()
getParent - Return the instruction that this operand belongs to.
MachineOperandType getType() const
getType - Returns the MachineOperandType for this operand.
bool isEarlyClobber() const
Register getReg() const
getReg - Returns the register number.
bool isInternalRead() const
bool isFI() const
isFI - Tests if this is a MO_FrameIndex operand.
static bool clobbersPhysReg(const uint32_t *RegMask, MCRegister PhysReg)
clobbersPhysReg - Returns true if this RegMask clobbers PhysReg.
const uint32_t * getRegMask() const
getRegMask - Returns a bit mask of registers preserved by this RegMask operand.
@ MO_CFIIndex
MCCFIInstruction index.
@ MO_RegisterMask
Mask of preserved registers.
@ MO_MachineBasicBlock
MachineBasicBlock reference.
@ MO_FrameIndex
Abstract Stack Frame Index.
@ MO_Register
Register operand.
bool isMBB() const
isMBB - Tests if this is a MO_MachineBasicBlock operand.
MachineRegisterInfo - Keep track of information for virtual and physical registers,...
use_nodbg_iterator use_nodbg_begin(Register RegNo) const
LLVM_ABI void verifyUseLists() const
Verify the use list of all registers.
bool tracksLiveness() const
tracksLiveness - Returns true when tracking register liveness accurately.
static use_nodbg_iterator use_nodbg_end()
bool isReserved(MCRegister PhysReg) const
isReserved - Returns true when PhysReg is a reserved register.
const BitVector & getReservedRegs() const
getReservedRegs - Returns a reference to the frozen set of reserved registers.
LLT getType(Register Reg) const
Get the low-level type of Reg or LLT{} if Reg is not a generic (target independent) virtual register.
bool reservedRegsFrozen() const
reservedRegsFrozen - Returns true after freezeReservedRegs() was called to ensure the set of reserved...
bool def_empty(Register RegNo) const
def_empty - Return true if there are no instructions defining the specified register (it may be live-...
bool reg_nodbg_empty(Register RegNo) const
reg_nodbg_empty - Return true if the only instructions using or defining Reg are Debug instructions.
const RegisterBank * getRegBankOrNull(Register Reg) const
Return the register bank of Reg, or null if Reg has not been assigned a register bank or has been ass...
bool shouldTrackSubRegLiveness(const TargetRegisterClass &RC) const
Returns true if liveness for register class RC should be tracked at the subregister level.
bool hasOneDef(Register RegNo) const
Return true if there is exactly one operand defining the specified register.
LLVM_ABI bool isReservedRegUnit(MCRegUnit Unit) const
Returns true when the given register unit is considered reserved.
const TargetRegisterClass * getRegClassOrNull(Register Reg) const
Return the register class of Reg, or null if Reg has not been assigned a register class yet.
LLVM_ABI LaneBitmask getMaxLaneMaskForVReg(Register Reg) const
Returns a mask covering all bits that can appear in lane masks of subregisters of the virtual registe...
unsigned getNumVirtRegs() const
getNumVirtRegs - Return the number of virtual registers created.
LLVM_ABI PreservedAnalyses run(MachineFunction &MF, MachineFunctionAnalysisManager &MFAM)
ManagedStatic - This transparently changes the behavior of global statics to be lazily constructed on...
Pass interface - Implemented by all 'passes'.
Definition Pass.h:99
virtual void print(raw_ostream &OS, const Module *M) const
print - Print out the internal state of the pass.
Definition Pass.cpp:140
AnalysisType * getAnalysisIfAvailable() const
getAnalysisIfAvailable<AnalysisType>() - Subclasses use this function to get analysis information tha...
A set of analyses that are preserved following a run of a transformation pass.
Definition Analysis.h:112
static PreservedAnalyses all()
Construct a special preserved set that preserves all passes.
Definition Analysis.h:118
Holds all the information related to register banks.
const RegisterBank & getRegBank(unsigned ID)
Get the register bank identified by ID.
unsigned getMaximumSize(unsigned RegBankID) const
Get the maximum size in bits that fits in the given register bank.
This class implements the register bank concept.
const char * getName() const
Get a user friendly name of this register bank.
unsigned getID() const
Get the identifier of this register bank.
Wrapper class representing virtual and physical registers.
Definition Register.h:20
static Register index2VirtReg(unsigned Index)
Convert a 0-based index to a virtual register number.
Definition Register.h:72
MCRegister asMCReg() const
Utility to check-convert this value to a MCRegister.
Definition Register.h:107
unsigned virtRegIndex() const
Convert a virtual register number to a 0-based index.
Definition Register.h:87
constexpr bool isVirtual() const
Return true if the specified register number is in the virtual register namespace.
Definition Register.h:79
constexpr unsigned id() const
Definition Register.h:100
constexpr bool isPhysical() const
Return true if the specified register number is in the physical register namespace.
Definition Register.h:83
SlotIndex - An opaque wrapper around machine indexes.
Definition SlotIndexes.h:66
static bool isSameInstr(SlotIndex A, SlotIndex B)
isSameInstr - Return true if A and B refer to the same instruction.
bool isBlock() const
isBlock - Returns true if this is a block boundary slot.
SlotIndex getDeadSlot() const
Returns the dead def kill slot for the current instruction.
bool isEarlyClobber() const
isEarlyClobber - Returns true if this is an early-clobber slot.
bool isRegister() const
isRegister - Returns true if this is a normal register use/def slot.
SlotIndex getPrevSlot() const
Returns the previous slot in the index list.
SlotIndex getRegSlot(bool EC=false) const
Returns the register use/def slot in the current instruction for a normal or early-clobber def.
bool isDead() const
isDead - Returns true if this is a dead def kill slot.
SlotIndexes pass.
MBBIndexIterator MBBIndexBegin() const
Returns an iterator for the begin of the idx2MBBMap.
MBBIndexIterator MBBIndexEnd() const
Return an iterator for the end of the idx2MBBMap.
SmallVectorImpl< IdxMBBPair >::const_iterator MBBIndexIterator
Iterator over the idx2MBBMap (sorted pairs of slot index of basic block begin and basic block)
size_type size() const
Definition SmallPtrSet.h:99
bool erase(PtrType Ptr)
Remove pointer from the set.
size_type count(ConstPtrType Ptr) const
count - Return 1 if the specified pointer is in the set, 0 otherwise.
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
iterator begin() const
SmallPtrSet - This class implements a set which is optimized for holding SmallSize or less elements.
void resize(size_type N)
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
Register getReg() const
MI-level Statepoint operands.
Definition StackMaps.h:159
StringRef - Represent a constant reference to a string, i.e.
Definition StringRef.h:55
Information about stack frame layout on the target.
StackDirection getStackGrowthDirection() const
getStackGrowthDirection - Return the direction the stack grows
const MCAsmInfo * getMCAsmInfo() const
Return target specific asm information.
bool hasSuperClassEq(const TargetRegisterClass *RC) const
Returns true if RC is a super-class of or equal to this class.
LaneBitmask getLaneMask() const
Returns the combination of all lane masks of register in this class.
virtual const RegisterBankInfo * getRegBankInfo() const
If the information for the register banks is available, return it.
virtual const TargetInstrInfo * getInstrInfo() const
virtual const TargetRegisterInfo * getRegisterInfo() const =0
Return the target's register information.
Twine - A lightweight data structure for efficiently representing the concatenation of temporary valu...
Definition Twine.h:82
LLVM_ABI std::string str() const
Return the twine contents as a std::string.
Definition Twine.cpp:17
static constexpr TypeSize getZero()
Definition TypeSize.h:349
VNInfo - Value Number Information.
bool isUnused() const
Returns true if this value is unused.
unsigned id
The ID number of this value.
SlotIndex def
The index of the defining instruction.
bool isPHIDef() const
Returns true if this value is defined by a PHI instruction (or was, PHI instructions may have been el...
LLVM Value Representation.
Definition Value.h:75
Wrapper class representing a virtual register or register unit.
Definition Register.h:181
constexpr bool isVirtualReg() const
Definition Register.h:197
constexpr MCRegUnit asMCRegUnit() const
Definition Register.h:201
constexpr Register asVirtualReg() const
Definition Register.h:206
std::pair< iterator, bool > insert(const ValueT &V)
Definition DenseSet.h:202
constexpr bool isNonZero() const
Definition TypeSize.h:155
static constexpr bool isKnownLT(const FixedOrScalableQuantity &LHS, const FixedOrScalableQuantity &RHS)
Definition TypeSize.h:216
constexpr bool isScalable() const
Returns whether the quantity is scaled by a runtime quantity (vscale).
Definition TypeSize.h:168
constexpr ScalarTy getKnownMinValue() const
Returns the minimum value this quantity can represent.
Definition TypeSize.h:165
static constexpr bool isKnownGT(const FixedOrScalableQuantity &LHS, const FixedOrScalableQuantity &RHS)
Definition TypeSize.h:223
static constexpr bool isKnownGE(const FixedOrScalableQuantity &LHS, const FixedOrScalableQuantity &RHS)
Definition TypeSize.h:237
self_iterator getIterator()
Definition ilist_node.h:123
NodeTy * getNextNode()
Get the next node, or nullptr for the list tail.
Definition ilist_node.h:348
This class implements an extremely fast bulk output stream that can only output to a stream.
Definition raw_ostream.h:53
Changed
constexpr char Attrs[]
Key for Kernel::Metadata::mAttrs.
constexpr std::underlying_type_t< E > Mask()
Get a bitmask with 1s in all places up to the high-order bit of E's largest value.
LLVM_ABI AttributeSet getFnAttributes(LLVMContext &C, ID id)
Return the function attributes for an intrinsic.
@ OPERAND_IMMEDIATE
Definition MCInstrDesc.h:61
std::enable_if_t< detail::IsValidPointer< X, Y >::value, X * > extract(Y &&MD)
Extract a Value from Metadata.
Definition Metadata.h:668
NodeAddr< DefNode * > Def
Definition RDFGraph.h:384
NodeAddr< PhiNode * > Phi
Definition RDFGraph.h:390
NodeAddr< FuncNode * > Func
Definition RDFGraph.h:393
iterator end() const
Definition BasicBlock.h:89
LLVM_ABI iterator begin() const
This is an optimization pass for GlobalISel generic memory operations.
auto drop_begin(T &&RangeOrContainer, size_t N=1)
Return a range covering RangeOrContainer with the first N elements excluded.
Definition STLExtras.h:316
@ Offset
Definition DWP.cpp:532
bool all_of(R &&range, UnaryPredicate P)
Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly.
Definition STLExtras.h:1739
auto size(R &&Range, std::enable_if_t< std::is_base_of< std::random_access_iterator_tag, typename std::iterator_traits< decltype(Range.begin())>::iterator_category >::value, void > *=nullptr)
Get the size of a range.
Definition STLExtras.h:1669
constexpr bool isInt(int64_t x)
Checks if an integer fits into the given bit width.
Definition MathExtras.h:165
@ SjLj
setjmp/longjmp based exceptions
Definition CodeGen.h:56
bool isPreISelGenericOpcode(unsigned Opcode)
Check whether the given Opcode is a generic opcode that is not supposed to appear after ISel.
auto dyn_cast_if_present(const Y &Val)
dyn_cast_if_present<X> - Functionally identical to dyn_cast, except that a null (or none in the case ...
Definition Casting.h:732
void append_range(Container &C, Range &&R)
Wrapper function to append range R to container C.
Definition STLExtras.h:2208
void set_subtract(S1Ty &S1, const S2Ty &S2)
set_subtract(A, B) - Compute A := A - B
Printable PrintLaneMask(LaneBitmask LaneMask)
Create Printable object to print LaneBitmasks on a raw_ostream.
Definition LaneBitmask.h:92
LLVM_ABI Printable printRegUnit(MCRegUnit Unit, const TargetRegisterInfo *TRI)
Create Printable object to print register units on a raw_ostream.
AnalysisManager< MachineFunction > MachineFunctionAnalysisManager
bool isPreISelGenericOptimizationHint(unsigned Opcode)
bool isScopedEHPersonality(EHPersonality Pers)
Returns true if this personality uses scope-style EH IR instructions: catchswitch,...
LLVM_ABI FunctionPass * createMachineVerifierPass(const std::string &Banner)
createMachineVerifierPass - This pass verifies cenerated machine code instructions for correctness.
LLVM_ABI void verifyMachineFunction(const std::string &Banner, const MachineFunction &MF)
auto reverse(ContainerTy &&C)
Definition STLExtras.h:408
detail::ValueMatchesPoly< M > HasValue(M Matcher)
Definition Error.h:221
df_ext_iterator< T, SetTy > df_ext_begin(const T &G, SetTy &S)
bool none_of(R &&Range, UnaryPredicate P)
Provide wrappers to std::none_of which take ranges instead of having to pass begin/end explicitly.
Definition STLExtras.h:1753
LLVM_ABI void report_fatal_error(Error Err, bool gen_crash_diag=true)
Definition Error.cpp:163
GenericConvergenceVerifier< MachineSSAContext > MachineConvergenceVerifier
constexpr bool isUInt(uint64_t x)
Checks if an unsigned integer fits into the given bit width.
Definition MathExtras.h:189
LLVM_ABI raw_ostream & nulls()
This returns a reference to a raw_ostream which simply discards output.
bool set_union(S1Ty &S1, const S2Ty &S2)
set_union(A, B) - Compute A := A u B, return whether A changed.
LLVM_ABI EHPersonality classifyEHPersonality(const Value *Pers)
See if the given exception handling personality function is one that we understand.
class LLVM_GSL_OWNER SmallVector
Forward declaration of SmallVector so that calculateSmallVectorDefaultInlinedElements can reference s...
bool isa(const From &Val)
isa<X> - Return true if the parameter to the template is an instance of one of the template type argu...
Definition Casting.h:547
LLVM_ABI raw_fd_ostream & errs()
This returns a reference to a raw_ostream for standard error.
AtomicOrdering
Atomic ordering for LLVM's memory model.
@ Sub
Subtraction of integers.
uint16_t MCPhysReg
An unsigned integer type large enough to represent all physical registers, but not necessarily virtua...
Definition MCRegister.h:21
DWARFExpression::Operation Op
OutputIt move(R &&Range, OutputIt Out)
Provide wrappers to std::move which take ranges instead of having to pass begin/end explicitly.
Definition STLExtras.h:1917
decltype(auto) cast(const From &Val)
cast<X> - Return the argument parameter cast to the specified type.
Definition Casting.h:559
bool is_contained(R &&Range, const E &Element)
Returns true if Element is found in Range.
Definition STLExtras.h:1947
df_ext_iterator< T, SetTy > df_ext_end(const T &G, SetTy &S)
LLVM_ABI Printable printReg(Register Reg, const TargetRegisterInfo *TRI=nullptr, unsigned SubIdx=0, const MachineRegisterInfo *MRI=nullptr)
Prints virtual and physical registers with or without a TRI instance.
LLVM_ABI Printable printMBBReference(const MachineBasicBlock &MBB)
Prints a machine basic block reference.
Implement std::hash so that hash_code can be used in STL containers.
Definition BitVector.h:870
static constexpr LaneBitmask getAll()
Definition LaneBitmask.h:82
constexpr bool none() const
Definition LaneBitmask.h:52
constexpr bool any() const
Definition LaneBitmask.h:53
static constexpr LaneBitmask getNone()
Definition LaneBitmask.h:81
This represents a simple continuous liveness interval for a value.
VarInfo - This represents the regions where a virtual register is live in the program.
Pair of physical register and lane mask.