LLVM 19.0.0git
MachineVerifier.cpp
Go to the documentation of this file.
1//===- MachineVerifier.cpp - Machine Code Verifier ------------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// Pass to verify generated machine code. The following is checked:
10//
11// Operand counts: All explicit operands must be present.
12//
13// Register classes: All physical and virtual register operands must be
14// compatible with the register class required by the instruction descriptor.
15//
16// Register live intervals: Registers must be defined only once, and must be
17// defined before use.
18//
19// The machine code verifier is enabled with the command-line option
20// -verify-machineinstrs.
21//===----------------------------------------------------------------------===//
22
24#include "llvm/ADT/BitVector.h"
25#include "llvm/ADT/DenseMap.h"
26#include "llvm/ADT/DenseSet.h"
29#include "llvm/ADT/STLExtras.h"
33#include "llvm/ADT/StringRef.h"
34#include "llvm/ADT/Twine.h"
64#include "llvm/IR/BasicBlock.h"
65#include "llvm/IR/Constants.h"
67#include "llvm/IR/Function.h"
68#include "llvm/IR/InlineAsm.h"
71#include "llvm/MC/LaneBitmask.h"
72#include "llvm/MC/MCAsmInfo.h"
73#include "llvm/MC/MCDwarf.h"
74#include "llvm/MC/MCInstrDesc.h"
77#include "llvm/Pass.h"
81#include "llvm/Support/ModRef.h"
84#include <algorithm>
85#include <cassert>
86#include <cstddef>
87#include <cstdint>
88#include <iterator>
89#include <string>
90#include <utility>
91
92using namespace llvm;
93
94namespace {
95
96 struct MachineVerifier {
97 MachineVerifier(MachineFunctionAnalysisManager &MFAM, const char *b)
98 : MFAM(&MFAM), Banner(b) {}
99
100 MachineVerifier(Pass *pass, const char *b) : PASS(pass), Banner(b) {}
101
102 MachineVerifier(const char *b, LiveVariables *LiveVars,
103 LiveIntervals *LiveInts, LiveStacks *LiveStks,
104 SlotIndexes *Indexes)
105 : Banner(b), LiveVars(LiveVars), LiveInts(LiveInts), LiveStks(LiveStks),
106 Indexes(Indexes) {}
107
108 unsigned verify(const MachineFunction &MF);
109
110 MachineFunctionAnalysisManager *MFAM = nullptr;
111 Pass *const PASS = nullptr;
112 const char *Banner;
113 const MachineFunction *MF = nullptr;
114 const TargetMachine *TM = nullptr;
115 const TargetInstrInfo *TII = nullptr;
116 const TargetRegisterInfo *TRI = nullptr;
117 const MachineRegisterInfo *MRI = nullptr;
118 const RegisterBankInfo *RBI = nullptr;
119
120 unsigned foundErrors = 0;
121
122 // Avoid querying the MachineFunctionProperties for each operand.
123 bool isFunctionRegBankSelected = false;
124 bool isFunctionSelected = false;
125 bool isFunctionTracksDebugUserValues = false;
126
127 using RegVector = SmallVector<Register, 16>;
128 using RegMaskVector = SmallVector<const uint32_t *, 4>;
129 using RegSet = DenseSet<Register>;
132
133 const MachineInstr *FirstNonPHI = nullptr;
134 const MachineInstr *FirstTerminator = nullptr;
135 BlockSet FunctionBlocks;
136
137 BitVector regsReserved;
138 RegSet regsLive;
139 RegVector regsDefined, regsDead, regsKilled;
140 RegMaskVector regMasks;
141
142 SlotIndex lastIndex;
143
144 // Add Reg and any sub-registers to RV
145 void addRegWithSubRegs(RegVector &RV, Register Reg) {
146 RV.push_back(Reg);
147 if (Reg.isPhysical())
148 append_range(RV, TRI->subregs(Reg.asMCReg()));
149 }
150
151 struct BBInfo {
152 // Is this MBB reachable from the MF entry point?
153 bool reachable = false;
154
155 // Vregs that must be live in because they are used without being
156 // defined. Map value is the user. vregsLiveIn doesn't include regs
157 // that only are used by PHI nodes.
158 RegMap vregsLiveIn;
159
160 // Regs killed in MBB. They may be defined again, and will then be in both
161 // regsKilled and regsLiveOut.
162 RegSet regsKilled;
163
164 // Regs defined in MBB and live out. Note that vregs passing through may
165 // be live out without being mentioned here.
166 RegSet regsLiveOut;
167
168 // Vregs that pass through MBB untouched. This set is disjoint from
169 // regsKilled and regsLiveOut.
170 RegSet vregsPassed;
171
172 // Vregs that must pass through MBB because they are needed by a successor
173 // block. This set is disjoint from regsLiveOut.
174 RegSet vregsRequired;
175
176 // Set versions of block's predecessor and successor lists.
177 BlockSet Preds, Succs;
178
179 BBInfo() = default;
180
181 // Add register to vregsRequired if it belongs there. Return true if
182 // anything changed.
183 bool addRequired(Register Reg) {
184 if (!Reg.isVirtual())
185 return false;
186 if (regsLiveOut.count(Reg))
187 return false;
188 return vregsRequired.insert(Reg).second;
189 }
190
191 // Same for a full set.
192 bool addRequired(const RegSet &RS) {
193 bool Changed = false;
194 for (Register Reg : RS)
195 Changed |= addRequired(Reg);
196 return Changed;
197 }
198
199 // Same for a full map.
200 bool addRequired(const RegMap &RM) {
201 bool Changed = false;
202 for (const auto &I : RM)
203 Changed |= addRequired(I.first);
204 return Changed;
205 }
206
207 // Live-out registers are either in regsLiveOut or vregsPassed.
208 bool isLiveOut(Register Reg) const {
209 return regsLiveOut.count(Reg) || vregsPassed.count(Reg);
210 }
211 };
212
213 // Extra register info per MBB.
215
216 bool isReserved(Register Reg) {
217 return Reg.id() < regsReserved.size() && regsReserved.test(Reg.id());
218 }
219
220 bool isAllocatable(Register Reg) const {
221 return Reg.id() < TRI->getNumRegs() && TRI->isInAllocatableClass(Reg) &&
222 !regsReserved.test(Reg.id());
223 }
224
225 // Analysis information if available
226 LiveVariables *LiveVars = nullptr;
227 LiveIntervals *LiveInts = nullptr;
228 LiveStacks *LiveStks = nullptr;
229 SlotIndexes *Indexes = nullptr;
230
231 // This is calculated only when trying to verify convergence control tokens.
232 // Similar to the LLVM IR verifier, we calculate this locally instead of
233 // relying on the pass manager.
235
236 void visitMachineFunctionBefore();
237 void visitMachineBasicBlockBefore(const MachineBasicBlock *MBB);
238 void visitMachineBundleBefore(const MachineInstr *MI);
239
240 /// Verify that all of \p MI's virtual register operands are scalars.
241 /// \returns True if all virtual register operands are scalar. False
242 /// otherwise.
243 bool verifyAllRegOpsScalar(const MachineInstr &MI,
244 const MachineRegisterInfo &MRI);
245 bool verifyVectorElementMatch(LLT Ty0, LLT Ty1, const MachineInstr *MI);
246
247 bool verifyGIntrinsicSideEffects(const MachineInstr *MI);
248 bool verifyGIntrinsicConvergence(const MachineInstr *MI);
249 void verifyPreISelGenericInstruction(const MachineInstr *MI);
250
251 void visitMachineInstrBefore(const MachineInstr *MI);
252 void visitMachineOperand(const MachineOperand *MO, unsigned MONum);
253 void visitMachineBundleAfter(const MachineInstr *MI);
254 void visitMachineBasicBlockAfter(const MachineBasicBlock *MBB);
255 void visitMachineFunctionAfter();
256
257 void report(const char *msg, const MachineFunction *MF);
258 void report(const char *msg, const MachineBasicBlock *MBB);
259 void report(const char *msg, const MachineInstr *MI);
260 void report(const char *msg, const MachineOperand *MO, unsigned MONum,
261 LLT MOVRegType = LLT{});
262 void report(const Twine &Msg, const MachineInstr *MI);
263
264 void report_context(const LiveInterval &LI) const;
265 void report_context(const LiveRange &LR, Register VRegUnit,
266 LaneBitmask LaneMask) const;
267 void report_context(const LiveRange::Segment &S) const;
268 void report_context(const VNInfo &VNI) const;
269 void report_context(SlotIndex Pos) const;
270 void report_context(MCPhysReg PhysReg) const;
271 void report_context_liverange(const LiveRange &LR) const;
272 void report_context_lanemask(LaneBitmask LaneMask) const;
273 void report_context_vreg(Register VReg) const;
274 void report_context_vreg_regunit(Register VRegOrUnit) const;
275
276 void verifyInlineAsm(const MachineInstr *MI);
277
278 void checkLiveness(const MachineOperand *MO, unsigned MONum);
279 void checkLivenessAtUse(const MachineOperand *MO, unsigned MONum,
280 SlotIndex UseIdx, const LiveRange &LR,
281 Register VRegOrUnit,
282 LaneBitmask LaneMask = LaneBitmask::getNone());
283 void checkLivenessAtDef(const MachineOperand *MO, unsigned MONum,
284 SlotIndex DefIdx, const LiveRange &LR,
285 Register VRegOrUnit, bool SubRangeCheck = false,
286 LaneBitmask LaneMask = LaneBitmask::getNone());
287
288 void markReachable(const MachineBasicBlock *MBB);
289 void calcRegsPassed();
290 void checkPHIOps(const MachineBasicBlock &MBB);
291
292 void calcRegsRequired();
293 void verifyLiveVariables();
294 void verifyLiveIntervals();
295 void verifyLiveInterval(const LiveInterval&);
296 void verifyLiveRangeValue(const LiveRange &, const VNInfo *, Register,
298 void verifyLiveRangeSegment(const LiveRange &,
301 void verifyLiveRange(const LiveRange &, Register,
302 LaneBitmask LaneMask = LaneBitmask::getNone());
303
304 void verifyStackFrame();
305
306 void verifySlotIndexes() const;
307 void verifyProperties(const MachineFunction &MF);
308 };
309
310 struct MachineVerifierLegacyPass : public MachineFunctionPass {
311 static char ID; // Pass ID, replacement for typeid
312
313 const std::string Banner;
314
315 MachineVerifierLegacyPass(std::string banner = std::string())
316 : MachineFunctionPass(ID), Banner(std::move(banner)) {
318 }
319
320 void getAnalysisUsage(AnalysisUsage &AU) const override {
325 AU.setPreservesAll();
327 }
328
329 bool runOnMachineFunction(MachineFunction &MF) override {
330 // Skip functions that have known verification problems.
331 // FIXME: Remove this mechanism when all problematic passes have been
332 // fixed.
333 if (MF.getProperties().hasProperty(
334 MachineFunctionProperties::Property::FailsVerification))
335 return false;
336
337 unsigned FoundErrors = MachineVerifier(this, Banner.c_str()).verify(MF);
338 if (FoundErrors)
339 report_fatal_error("Found "+Twine(FoundErrors)+" machine code errors.");
340 return false;
341 }
342 };
343
344} // end anonymous namespace
345
349 // Skip functions that have known verification problems.
350 // FIXME: Remove this mechanism when all problematic passes have been
351 // fixed.
352 if (MF.getProperties().hasProperty(
354 return PreservedAnalyses::all();
355 unsigned FoundErrors = MachineVerifier(MFAM, Banner.c_str()).verify(MF);
356 if (FoundErrors)
357 report_fatal_error("Found " + Twine(FoundErrors) + " machine code errors.");
358 return PreservedAnalyses::all();
359}
360
361char MachineVerifierLegacyPass::ID = 0;
362
363INITIALIZE_PASS(MachineVerifierLegacyPass, "machineverifier",
364 "Verify generated machine code", false, false)
365
367 return new MachineVerifierLegacyPass(Banner);
368}
369
370void llvm::verifyMachineFunction(const std::string &Banner,
371 const MachineFunction &MF) {
372 // TODO: Use MFAM after porting below analyses.
373 // LiveVariables *LiveVars;
374 // LiveIntervals *LiveInts;
375 // LiveStacks *LiveStks;
376 // SlotIndexes *Indexes;
377 unsigned FoundErrors = MachineVerifier(nullptr, Banner.c_str()).verify(MF);
378 if (FoundErrors)
379 report_fatal_error("Found " + Twine(FoundErrors) + " machine code errors.");
380}
381
382bool MachineFunction::verify(Pass *p, const char *Banner, bool AbortOnErrors)
383 const {
384 MachineFunction &MF = const_cast<MachineFunction&>(*this);
385 unsigned FoundErrors = MachineVerifier(p, Banner).verify(MF);
386 if (AbortOnErrors && FoundErrors)
387 report_fatal_error("Found "+Twine(FoundErrors)+" machine code errors.");
388 return FoundErrors == 0;
389}
390
392 const char *Banner, bool AbortOnErrors) const {
393 MachineFunction &MF = const_cast<MachineFunction &>(*this);
394 unsigned FoundErrors =
395 MachineVerifier(Banner, nullptr, LiveInts, nullptr, Indexes).verify(MF);
396 if (AbortOnErrors && FoundErrors)
397 report_fatal_error("Found " + Twine(FoundErrors) + " machine code errors.");
398 return FoundErrors == 0;
399}
400
401void MachineVerifier::verifySlotIndexes() const {
402 if (Indexes == nullptr)
403 return;
404
405 // Ensure the IdxMBB list is sorted by slot indexes.
408 E = Indexes->MBBIndexEnd(); I != E; ++I) {
409 assert(!Last.isValid() || I->first > Last);
410 Last = I->first;
411 }
412}
413
414void MachineVerifier::verifyProperties(const MachineFunction &MF) {
415 // If a pass has introduced virtual registers without clearing the
416 // NoVRegs property (or set it without allocating the vregs)
417 // then report an error.
418 if (MF.getProperties().hasProperty(
420 MRI->getNumVirtRegs())
421 report("Function has NoVRegs property but there are VReg operands", &MF);
422}
423
424unsigned MachineVerifier::verify(const MachineFunction &MF) {
425 foundErrors = 0;
426
427 this->MF = &MF;
428 TM = &MF.getTarget();
431 RBI = MF.getSubtarget().getRegBankInfo();
432 MRI = &MF.getRegInfo();
433
434 const bool isFunctionFailedISel = MF.getProperties().hasProperty(
436
437 // If we're mid-GlobalISel and we already triggered the fallback path then
438 // it's expected that the MIR is somewhat broken but that's ok since we'll
439 // reset it and clear the FailedISel attribute in ResetMachineFunctions.
440 if (isFunctionFailedISel)
441 return foundErrors;
442
443 isFunctionRegBankSelected = MF.getProperties().hasProperty(
445 isFunctionSelected = MF.getProperties().hasProperty(
447 isFunctionTracksDebugUserValues = MF.getProperties().hasProperty(
449
450 if (PASS) {
451 auto *LISWrapper = PASS->getAnalysisIfAvailable<LiveIntervalsWrapperPass>();
452 LiveInts = LISWrapper ? &LISWrapper->getLIS() : nullptr;
453 // We don't want to verify LiveVariables if LiveIntervals is available.
454 auto *LVWrapper = PASS->getAnalysisIfAvailable<LiveVariablesWrapperPass>();
455 if (!LiveInts)
456 LiveVars = LVWrapper ? &LVWrapper->getLV() : nullptr;
457 LiveStks = PASS->getAnalysisIfAvailable<LiveStacks>();
458 auto *SIWrapper = PASS->getAnalysisIfAvailable<SlotIndexesWrapperPass>();
459 Indexes = SIWrapper ? &SIWrapper->getSI() : nullptr;
460 }
461 if (MFAM) {
462 MachineFunction &Func = const_cast<MachineFunction &>(MF);
463 LiveInts = MFAM->getCachedResult<LiveIntervalsAnalysis>(Func);
464 if (!LiveInts)
465 LiveVars = MFAM->getCachedResult<LiveVariablesAnalysis>(Func);
466 // TODO: LiveStks = MFAM->getCachedResult<LiveStacksAnalysis>(Func);
467 Indexes = MFAM->getCachedResult<SlotIndexesAnalysis>(Func);
468 }
469
470 verifySlotIndexes();
471
472 verifyProperties(MF);
473
474 visitMachineFunctionBefore();
475 for (const MachineBasicBlock &MBB : MF) {
476 visitMachineBasicBlockBefore(&MBB);
477 // Keep track of the current bundle header.
478 const MachineInstr *CurBundle = nullptr;
479 // Do we expect the next instruction to be part of the same bundle?
480 bool InBundle = false;
481
482 for (const MachineInstr &MI : MBB.instrs()) {
483 if (MI.getParent() != &MBB) {
484 report("Bad instruction parent pointer", &MBB);
485 errs() << "Instruction: " << MI;
486 continue;
487 }
488
489 // Check for consistent bundle flags.
490 if (InBundle && !MI.isBundledWithPred())
491 report("Missing BundledPred flag, "
492 "BundledSucc was set on predecessor",
493 &MI);
494 if (!InBundle && MI.isBundledWithPred())
495 report("BundledPred flag is set, "
496 "but BundledSucc not set on predecessor",
497 &MI);
498
499 // Is this a bundle header?
500 if (!MI.isInsideBundle()) {
501 if (CurBundle)
502 visitMachineBundleAfter(CurBundle);
503 CurBundle = &MI;
504 visitMachineBundleBefore(CurBundle);
505 } else if (!CurBundle)
506 report("No bundle header", &MI);
507 visitMachineInstrBefore(&MI);
508 for (unsigned I = 0, E = MI.getNumOperands(); I != E; ++I) {
509 const MachineOperand &Op = MI.getOperand(I);
510 if (Op.getParent() != &MI) {
511 // Make sure to use correct addOperand / removeOperand / ChangeTo
512 // functions when replacing operands of a MachineInstr.
513 report("Instruction has operand with wrong parent set", &MI);
514 }
515
516 visitMachineOperand(&Op, I);
517 }
518
519 // Was this the last bundled instruction?
520 InBundle = MI.isBundledWithSucc();
521 }
522 if (CurBundle)
523 visitMachineBundleAfter(CurBundle);
524 if (InBundle)
525 report("BundledSucc flag set on last instruction in block", &MBB.back());
526 visitMachineBasicBlockAfter(&MBB);
527 }
528 visitMachineFunctionAfter();
529
530 // Clean up.
531 regsLive.clear();
532 regsDefined.clear();
533 regsDead.clear();
534 regsKilled.clear();
535 regMasks.clear();
536 MBBInfoMap.clear();
537
538 return foundErrors;
539}
540
541void MachineVerifier::report(const char *msg, const MachineFunction *MF) {
542 assert(MF);
543 errs() << '\n';
544 if (!foundErrors++) {
545 if (Banner)
546 errs() << "# " << Banner << '\n';
547 if (LiveInts != nullptr)
548 LiveInts->print(errs());
549 else
550 MF->print(errs(), Indexes);
551 }
552 errs() << "*** Bad machine code: " << msg << " ***\n"
553 << "- function: " << MF->getName() << "\n";
554}
555
556void MachineVerifier::report(const char *msg, const MachineBasicBlock *MBB) {
557 assert(MBB);
558 report(msg, MBB->getParent());
559 errs() << "- basic block: " << printMBBReference(*MBB) << ' '
560 << MBB->getName() << " (" << (const void *)MBB << ')';
561 if (Indexes)
562 errs() << " [" << Indexes->getMBBStartIdx(MBB)
563 << ';' << Indexes->getMBBEndIdx(MBB) << ')';
564 errs() << '\n';
565}
566
567void MachineVerifier::report(const char *msg, const MachineInstr *MI) {
568 assert(MI);
569 report(msg, MI->getParent());
570 errs() << "- instruction: ";
571 if (Indexes && Indexes->hasIndex(*MI))
572 errs() << Indexes->getInstructionIndex(*MI) << '\t';
573 MI->print(errs(), /*IsStandalone=*/true);
574}
575
576void MachineVerifier::report(const char *msg, const MachineOperand *MO,
577 unsigned MONum, LLT MOVRegType) {
578 assert(MO);
579 report(msg, MO->getParent());
580 errs() << "- operand " << MONum << ": ";
581 MO->print(errs(), MOVRegType, TRI);
582 errs() << "\n";
583}
584
585void MachineVerifier::report(const Twine &Msg, const MachineInstr *MI) {
586 report(Msg.str().c_str(), MI);
587}
588
589void MachineVerifier::report_context(SlotIndex Pos) const {
590 errs() << "- at: " << Pos << '\n';
591}
592
593void MachineVerifier::report_context(const LiveInterval &LI) const {
594 errs() << "- interval: " << LI << '\n';
595}
596
597void MachineVerifier::report_context(const LiveRange &LR, Register VRegUnit,
598 LaneBitmask LaneMask) const {
599 report_context_liverange(LR);
600 report_context_vreg_regunit(VRegUnit);
601 if (LaneMask.any())
602 report_context_lanemask(LaneMask);
603}
604
605void MachineVerifier::report_context(const LiveRange::Segment &S) const {
606 errs() << "- segment: " << S << '\n';
607}
608
609void MachineVerifier::report_context(const VNInfo &VNI) const {
610 errs() << "- ValNo: " << VNI.id << " (def " << VNI.def << ")\n";
611}
612
613void MachineVerifier::report_context_liverange(const LiveRange &LR) const {
614 errs() << "- liverange: " << LR << '\n';
615}
616
617void MachineVerifier::report_context(MCPhysReg PReg) const {
618 errs() << "- p. register: " << printReg(PReg, TRI) << '\n';
619}
620
621void MachineVerifier::report_context_vreg(Register VReg) const {
622 errs() << "- v. register: " << printReg(VReg, TRI) << '\n';
623}
624
625void MachineVerifier::report_context_vreg_regunit(Register VRegOrUnit) const {
626 if (VRegOrUnit.isVirtual()) {
627 report_context_vreg(VRegOrUnit);
628 } else {
629 errs() << "- regunit: " << printRegUnit(VRegOrUnit, TRI) << '\n';
630 }
631}
632
633void MachineVerifier::report_context_lanemask(LaneBitmask LaneMask) const {
634 errs() << "- lanemask: " << PrintLaneMask(LaneMask) << '\n';
635}
636
637void MachineVerifier::markReachable(const MachineBasicBlock *MBB) {
638 BBInfo &MInfo = MBBInfoMap[MBB];
639 if (!MInfo.reachable) {
640 MInfo.reachable = true;
641 for (const MachineBasicBlock *Succ : MBB->successors())
642 markReachable(Succ);
643 }
644}
645
646void MachineVerifier::visitMachineFunctionBefore() {
647 lastIndex = SlotIndex();
648 regsReserved = MRI->reservedRegsFrozen() ? MRI->getReservedRegs()
649 : TRI->getReservedRegs(*MF);
650
651 if (!MF->empty())
652 markReachable(&MF->front());
653
654 // Build a set of the basic blocks in the function.
655 FunctionBlocks.clear();
656 for (const auto &MBB : *MF) {
657 FunctionBlocks.insert(&MBB);
658 BBInfo &MInfo = MBBInfoMap[&MBB];
659
660 MInfo.Preds.insert(MBB.pred_begin(), MBB.pred_end());
661 if (MInfo.Preds.size() != MBB.pred_size())
662 report("MBB has duplicate entries in its predecessor list.", &MBB);
663
664 MInfo.Succs.insert(MBB.succ_begin(), MBB.succ_end());
665 if (MInfo.Succs.size() != MBB.succ_size())
666 report("MBB has duplicate entries in its successor list.", &MBB);
667 }
668
669 // Check that the register use lists are sane.
670 MRI->verifyUseLists();
671
672 if (!MF->empty())
673 verifyStackFrame();
674}
675
676void
677MachineVerifier::visitMachineBasicBlockBefore(const MachineBasicBlock *MBB) {
678 FirstTerminator = nullptr;
679 FirstNonPHI = nullptr;
680
681 if (!MF->getProperties().hasProperty(
682 MachineFunctionProperties::Property::NoPHIs) && MRI->tracksLiveness()) {
683 // If this block has allocatable physical registers live-in, check that
684 // it is an entry block or landing pad.
685 for (const auto &LI : MBB->liveins()) {
686 if (isAllocatable(LI.PhysReg) && !MBB->isEHPad() &&
687 MBB->getIterator() != MBB->getParent()->begin() &&
689 report("MBB has allocatable live-in, but isn't entry, landing-pad, or "
690 "inlineasm-br-indirect-target.",
691 MBB);
692 report_context(LI.PhysReg);
693 }
694 }
695 }
696
697 if (MBB->isIRBlockAddressTaken()) {
699 report("ir-block-address-taken is associated with basic block not used by "
700 "a blockaddress.",
701 MBB);
702 }
703
704 // Count the number of landing pad successors.
706 for (const auto *succ : MBB->successors()) {
707 if (succ->isEHPad())
708 LandingPadSuccs.insert(succ);
709 if (!FunctionBlocks.count(succ))
710 report("MBB has successor that isn't part of the function.", MBB);
711 if (!MBBInfoMap[succ].Preds.count(MBB)) {
712 report("Inconsistent CFG", MBB);
713 errs() << "MBB is not in the predecessor list of the successor "
714 << printMBBReference(*succ) << ".\n";
715 }
716 }
717
718 // Check the predecessor list.
719 for (const MachineBasicBlock *Pred : MBB->predecessors()) {
720 if (!FunctionBlocks.count(Pred))
721 report("MBB has predecessor that isn't part of the function.", MBB);
722 if (!MBBInfoMap[Pred].Succs.count(MBB)) {
723 report("Inconsistent CFG", MBB);
724 errs() << "MBB is not in the successor list of the predecessor "
725 << printMBBReference(*Pred) << ".\n";
726 }
727 }
728
729 const MCAsmInfo *AsmInfo = TM->getMCAsmInfo();
730 const BasicBlock *BB = MBB->getBasicBlock();
731 const Function &F = MF->getFunction();
732 if (LandingPadSuccs.size() > 1 &&
733 !(AsmInfo &&
735 BB && isa<SwitchInst>(BB->getTerminator())) &&
736 !isScopedEHPersonality(classifyEHPersonality(F.getPersonalityFn())))
737 report("MBB has more than one landing pad successor", MBB);
738
739 // Call analyzeBranch. If it succeeds, there several more conditions to check.
740 MachineBasicBlock *TBB = nullptr, *FBB = nullptr;
742 if (!TII->analyzeBranch(*const_cast<MachineBasicBlock *>(MBB), TBB, FBB,
743 Cond)) {
744 // Ok, analyzeBranch thinks it knows what's going on with this block. Let's
745 // check whether its answers match up with reality.
746 if (!TBB && !FBB) {
747 // Block falls through to its successor.
748 if (!MBB->empty() && MBB->back().isBarrier() &&
749 !TII->isPredicated(MBB->back())) {
750 report("MBB exits via unconditional fall-through but ends with a "
751 "barrier instruction!", MBB);
752 }
753 if (!Cond.empty()) {
754 report("MBB exits via unconditional fall-through but has a condition!",
755 MBB);
756 }
757 } else if (TBB && !FBB && Cond.empty()) {
758 // Block unconditionally branches somewhere.
759 if (MBB->empty()) {
760 report("MBB exits via unconditional branch but doesn't contain "
761 "any instructions!", MBB);
762 } else if (!MBB->back().isBarrier()) {
763 report("MBB exits via unconditional branch but doesn't end with a "
764 "barrier instruction!", MBB);
765 } else if (!MBB->back().isTerminator()) {
766 report("MBB exits via unconditional branch but the branch isn't a "
767 "terminator instruction!", MBB);
768 }
769 } else if (TBB && !FBB && !Cond.empty()) {
770 // Block conditionally branches somewhere, otherwise falls through.
771 if (MBB->empty()) {
772 report("MBB exits via conditional branch/fall-through but doesn't "
773 "contain any instructions!", MBB);
774 } else if (MBB->back().isBarrier()) {
775 report("MBB exits via conditional branch/fall-through but ends with a "
776 "barrier instruction!", MBB);
777 } else if (!MBB->back().isTerminator()) {
778 report("MBB exits via conditional branch/fall-through but the branch "
779 "isn't a terminator instruction!", MBB);
780 }
781 } else if (TBB && FBB) {
782 // Block conditionally branches somewhere, otherwise branches
783 // somewhere else.
784 if (MBB->empty()) {
785 report("MBB exits via conditional branch/branch but doesn't "
786 "contain any instructions!", MBB);
787 } else if (!MBB->back().isBarrier()) {
788 report("MBB exits via conditional branch/branch but doesn't end with a "
789 "barrier instruction!", MBB);
790 } else if (!MBB->back().isTerminator()) {
791 report("MBB exits via conditional branch/branch but the branch "
792 "isn't a terminator instruction!", MBB);
793 }
794 if (Cond.empty()) {
795 report("MBB exits via conditional branch/branch but there's no "
796 "condition!", MBB);
797 }
798 } else {
799 report("analyzeBranch returned invalid data!", MBB);
800 }
801
802 // Now check that the successors match up with the answers reported by
803 // analyzeBranch.
804 if (TBB && !MBB->isSuccessor(TBB))
805 report("MBB exits via jump or conditional branch, but its target isn't a "
806 "CFG successor!",
807 MBB);
808 if (FBB && !MBB->isSuccessor(FBB))
809 report("MBB exits via conditional branch, but its target isn't a CFG "
810 "successor!",
811 MBB);
812
813 // There might be a fallthrough to the next block if there's either no
814 // unconditional true branch, or if there's a condition, and one of the
815 // branches is missing.
816 bool Fallthrough = !TBB || (!Cond.empty() && !FBB);
817
818 // A conditional fallthrough must be an actual CFG successor, not
819 // unreachable. (Conversely, an unconditional fallthrough might not really
820 // be a successor, because the block might end in unreachable.)
821 if (!Cond.empty() && !FBB) {
823 if (MBBI == MF->end()) {
824 report("MBB conditionally falls through out of function!", MBB);
825 } else if (!MBB->isSuccessor(&*MBBI))
826 report("MBB exits via conditional branch/fall-through but the CFG "
827 "successors don't match the actual successors!",
828 MBB);
829 }
830
831 // Verify that there aren't any extra un-accounted-for successors.
832 for (const MachineBasicBlock *SuccMBB : MBB->successors()) {
833 // If this successor is one of the branch targets, it's okay.
834 if (SuccMBB == TBB || SuccMBB == FBB)
835 continue;
836 // If we might have a fallthrough, and the successor is the fallthrough
837 // block, that's also ok.
838 if (Fallthrough && SuccMBB == MBB->getNextNode())
839 continue;
840 // Also accept successors which are for exception-handling or might be
841 // inlineasm_br targets.
842 if (SuccMBB->isEHPad() || SuccMBB->isInlineAsmBrIndirectTarget())
843 continue;
844 report("MBB has unexpected successors which are not branch targets, "
845 "fallthrough, EHPads, or inlineasm_br targets.",
846 MBB);
847 }
848 }
849
850 regsLive.clear();
851 if (MRI->tracksLiveness()) {
852 for (const auto &LI : MBB->liveins()) {
853 if (!Register::isPhysicalRegister(LI.PhysReg)) {
854 report("MBB live-in list contains non-physical register", MBB);
855 continue;
856 }
857 for (const MCPhysReg &SubReg : TRI->subregs_inclusive(LI.PhysReg))
858 regsLive.insert(SubReg);
859 }
860 }
861
862 const MachineFrameInfo &MFI = MF->getFrameInfo();
863 BitVector PR = MFI.getPristineRegs(*MF);
864 for (unsigned I : PR.set_bits()) {
865 for (const MCPhysReg &SubReg : TRI->subregs_inclusive(I))
866 regsLive.insert(SubReg);
867 }
868
869 regsKilled.clear();
870 regsDefined.clear();
871
872 if (Indexes)
873 lastIndex = Indexes->getMBBStartIdx(MBB);
874}
875
876// This function gets called for all bundle headers, including normal
877// stand-alone unbundled instructions.
878void MachineVerifier::visitMachineBundleBefore(const MachineInstr *MI) {
879 if (Indexes && Indexes->hasIndex(*MI)) {
880 SlotIndex idx = Indexes->getInstructionIndex(*MI);
881 if (!(idx > lastIndex)) {
882 report("Instruction index out of order", MI);
883 errs() << "Last instruction was at " << lastIndex << '\n';
884 }
885 lastIndex = idx;
886 }
887
888 // Ensure non-terminators don't follow terminators.
889 if (MI->isTerminator()) {
890 if (!FirstTerminator)
891 FirstTerminator = MI;
892 } else if (FirstTerminator) {
893 // For GlobalISel, G_INVOKE_REGION_START is a terminator that we allow to
894 // precede non-terminators.
895 if (FirstTerminator->getOpcode() != TargetOpcode::G_INVOKE_REGION_START) {
896 report("Non-terminator instruction after the first terminator", MI);
897 errs() << "First terminator was:\t" << *FirstTerminator;
898 }
899 }
900}
901
902// The operands on an INLINEASM instruction must follow a template.
903// Verify that the flag operands make sense.
904void MachineVerifier::verifyInlineAsm(const MachineInstr *MI) {
905 // The first two operands on INLINEASM are the asm string and global flags.
906 if (MI->getNumOperands() < 2) {
907 report("Too few operands on inline asm", MI);
908 return;
909 }
910 if (!MI->getOperand(0).isSymbol())
911 report("Asm string must be an external symbol", MI);
912 if (!MI->getOperand(1).isImm())
913 report("Asm flags must be an immediate", MI);
914 // Allowed flags are Extra_HasSideEffects = 1, Extra_IsAlignStack = 2,
915 // Extra_AsmDialect = 4, Extra_MayLoad = 8, and Extra_MayStore = 16,
916 // and Extra_IsConvergent = 32.
917 if (!isUInt<6>(MI->getOperand(1).getImm()))
918 report("Unknown asm flags", &MI->getOperand(1), 1);
919
920 static_assert(InlineAsm::MIOp_FirstOperand == 2, "Asm format changed");
921
922 unsigned OpNo = InlineAsm::MIOp_FirstOperand;
923 unsigned NumOps;
924 for (unsigned e = MI->getNumOperands(); OpNo < e; OpNo += NumOps) {
925 const MachineOperand &MO = MI->getOperand(OpNo);
926 // There may be implicit ops after the fixed operands.
927 if (!MO.isImm())
928 break;
929 const InlineAsm::Flag F(MO.getImm());
930 NumOps = 1 + F.getNumOperandRegisters();
931 }
932
933 if (OpNo > MI->getNumOperands())
934 report("Missing operands in last group", MI);
935
936 // An optional MDNode follows the groups.
937 if (OpNo < MI->getNumOperands() && MI->getOperand(OpNo).isMetadata())
938 ++OpNo;
939
940 // All trailing operands must be implicit registers.
941 for (unsigned e = MI->getNumOperands(); OpNo < e; ++OpNo) {
942 const MachineOperand &MO = MI->getOperand(OpNo);
943 if (!MO.isReg() || !MO.isImplicit())
944 report("Expected implicit register after groups", &MO, OpNo);
945 }
946
947 if (MI->getOpcode() == TargetOpcode::INLINEASM_BR) {
948 const MachineBasicBlock *MBB = MI->getParent();
949
950 for (unsigned i = InlineAsm::MIOp_FirstOperand, e = MI->getNumOperands();
951 i != e; ++i) {
952 const MachineOperand &MO = MI->getOperand(i);
953
954 if (!MO.isMBB())
955 continue;
956
957 // Check the successor & predecessor lists look ok, assume they are
958 // not. Find the indirect target without going through the successors.
959 const MachineBasicBlock *IndirectTargetMBB = MO.getMBB();
960 if (!IndirectTargetMBB) {
961 report("INLINEASM_BR indirect target does not exist", &MO, i);
962 break;
963 }
964
965 if (!MBB->isSuccessor(IndirectTargetMBB))
966 report("INLINEASM_BR indirect target missing from successor list", &MO,
967 i);
968
969 if (!IndirectTargetMBB->isPredecessor(MBB))
970 report("INLINEASM_BR indirect target predecessor list missing parent",
971 &MO, i);
972 }
973 }
974}
975
976bool MachineVerifier::verifyAllRegOpsScalar(const MachineInstr &MI,
977 const MachineRegisterInfo &MRI) {
978 if (none_of(MI.explicit_operands(), [&MRI](const MachineOperand &Op) {
979 if (!Op.isReg())
980 return false;
981 const auto Reg = Op.getReg();
982 if (Reg.isPhysical())
983 return false;
984 return !MRI.getType(Reg).isScalar();
985 }))
986 return true;
987 report("All register operands must have scalar types", &MI);
988 return false;
989}
990
991/// Check that types are consistent when two operands need to have the same
992/// number of vector elements.
993/// \return true if the types are valid.
994bool MachineVerifier::verifyVectorElementMatch(LLT Ty0, LLT Ty1,
995 const MachineInstr *MI) {
996 if (Ty0.isVector() != Ty1.isVector()) {
997 report("operand types must be all-vector or all-scalar", MI);
998 // Generally we try to report as many issues as possible at once, but in
999 // this case it's not clear what should we be comparing the size of the
1000 // scalar with: the size of the whole vector or its lane. Instead of
1001 // making an arbitrary choice and emitting not so helpful message, let's
1002 // avoid the extra noise and stop here.
1003 return false;
1004 }
1005
1006 if (Ty0.isVector() && Ty0.getElementCount() != Ty1.getElementCount()) {
1007 report("operand types must preserve number of vector elements", MI);
1008 return false;
1009 }
1010
1011 return true;
1012}
1013
1014bool MachineVerifier::verifyGIntrinsicSideEffects(const MachineInstr *MI) {
1015 auto Opcode = MI->getOpcode();
1016 bool NoSideEffects = Opcode == TargetOpcode::G_INTRINSIC ||
1017 Opcode == TargetOpcode::G_INTRINSIC_CONVERGENT;
1018 unsigned IntrID = cast<GIntrinsic>(MI)->getIntrinsicID();
1019 if (IntrID != 0 && IntrID < Intrinsic::num_intrinsics) {
1021 MF->getFunction().getContext(), static_cast<Intrinsic::ID>(IntrID));
1022 bool DeclHasSideEffects = !Attrs.getMemoryEffects().doesNotAccessMemory();
1023 if (NoSideEffects && DeclHasSideEffects) {
1024 report(Twine(TII->getName(Opcode),
1025 " used with intrinsic that accesses memory"),
1026 MI);
1027 return false;
1028 }
1029 if (!NoSideEffects && !DeclHasSideEffects) {
1030 report(Twine(TII->getName(Opcode), " used with readnone intrinsic"), MI);
1031 return false;
1032 }
1033 }
1034
1035 return true;
1036}
1037
1038bool MachineVerifier::verifyGIntrinsicConvergence(const MachineInstr *MI) {
1039 auto Opcode = MI->getOpcode();
1040 bool NotConvergent = Opcode == TargetOpcode::G_INTRINSIC ||
1041 Opcode == TargetOpcode::G_INTRINSIC_W_SIDE_EFFECTS;
1042 unsigned IntrID = cast<GIntrinsic>(MI)->getIntrinsicID();
1043 if (IntrID != 0 && IntrID < Intrinsic::num_intrinsics) {
1045 MF->getFunction().getContext(), static_cast<Intrinsic::ID>(IntrID));
1046 bool DeclIsConvergent = Attrs.hasFnAttr(Attribute::Convergent);
1047 if (NotConvergent && DeclIsConvergent) {
1048 report(Twine(TII->getName(Opcode), " used with a convergent intrinsic"),
1049 MI);
1050 return false;
1051 }
1052 if (!NotConvergent && !DeclIsConvergent) {
1053 report(
1054 Twine(TII->getName(Opcode), " used with a non-convergent intrinsic"),
1055 MI);
1056 return false;
1057 }
1058 }
1059
1060 return true;
1061}
1062
1063void MachineVerifier::verifyPreISelGenericInstruction(const MachineInstr *MI) {
1064 if (isFunctionSelected)
1065 report("Unexpected generic instruction in a Selected function", MI);
1066
1067 const MCInstrDesc &MCID = MI->getDesc();
1068 unsigned NumOps = MI->getNumOperands();
1069
1070 // Branches must reference a basic block if they are not indirect
1071 if (MI->isBranch() && !MI->isIndirectBranch()) {
1072 bool HasMBB = false;
1073 for (const MachineOperand &Op : MI->operands()) {
1074 if (Op.isMBB()) {
1075 HasMBB = true;
1076 break;
1077 }
1078 }
1079
1080 if (!HasMBB) {
1081 report("Branch instruction is missing a basic block operand or "
1082 "isIndirectBranch property",
1083 MI);
1084 }
1085 }
1086
1087 // Check types.
1089 for (unsigned I = 0, E = std::min(MCID.getNumOperands(), NumOps);
1090 I != E; ++I) {
1091 if (!MCID.operands()[I].isGenericType())
1092 continue;
1093 // Generic instructions specify type equality constraints between some of
1094 // their operands. Make sure these are consistent.
1095 size_t TypeIdx = MCID.operands()[I].getGenericTypeIndex();
1096 Types.resize(std::max(TypeIdx + 1, Types.size()));
1097
1098 const MachineOperand *MO = &MI->getOperand(I);
1099 if (!MO->isReg()) {
1100 report("generic instruction must use register operands", MI);
1101 continue;
1102 }
1103
1104 LLT OpTy = MRI->getType(MO->getReg());
1105 // Don't report a type mismatch if there is no actual mismatch, only a
1106 // type missing, to reduce noise:
1107 if (OpTy.isValid()) {
1108 // Only the first valid type for a type index will be printed: don't
1109 // overwrite it later so it's always clear which type was expected:
1110 if (!Types[TypeIdx].isValid())
1111 Types[TypeIdx] = OpTy;
1112 else if (Types[TypeIdx] != OpTy)
1113 report("Type mismatch in generic instruction", MO, I, OpTy);
1114 } else {
1115 // Generic instructions must have types attached to their operands.
1116 report("Generic instruction is missing a virtual register type", MO, I);
1117 }
1118 }
1119
1120 // Generic opcodes must not have physical register operands.
1121 for (unsigned I = 0; I < MI->getNumOperands(); ++I) {
1122 const MachineOperand *MO = &MI->getOperand(I);
1123 if (MO->isReg() && MO->getReg().isPhysical())
1124 report("Generic instruction cannot have physical register", MO, I);
1125 }
1126
1127 // Avoid out of bounds in checks below. This was already reported earlier.
1128 if (MI->getNumOperands() < MCID.getNumOperands())
1129 return;
1130
1132 if (!TII->verifyInstruction(*MI, ErrorInfo))
1133 report(ErrorInfo.data(), MI);
1134
1135 // Verify properties of various specific instruction types
1136 unsigned Opc = MI->getOpcode();
1137 switch (Opc) {
1138 case TargetOpcode::G_ASSERT_SEXT:
1139 case TargetOpcode::G_ASSERT_ZEXT: {
1140 std::string OpcName =
1141 Opc == TargetOpcode::G_ASSERT_ZEXT ? "G_ASSERT_ZEXT" : "G_ASSERT_SEXT";
1142 if (!MI->getOperand(2).isImm()) {
1143 report(Twine(OpcName, " expects an immediate operand #2"), MI);
1144 break;
1145 }
1146
1147 Register Dst = MI->getOperand(0).getReg();
1148 Register Src = MI->getOperand(1).getReg();
1149 LLT SrcTy = MRI->getType(Src);
1150 int64_t Imm = MI->getOperand(2).getImm();
1151 if (Imm <= 0) {
1152 report(Twine(OpcName, " size must be >= 1"), MI);
1153 break;
1154 }
1155
1156 if (Imm >= SrcTy.getScalarSizeInBits()) {
1157 report(Twine(OpcName, " size must be less than source bit width"), MI);
1158 break;
1159 }
1160
1161 const RegisterBank *SrcRB = RBI->getRegBank(Src, *MRI, *TRI);
1162 const RegisterBank *DstRB = RBI->getRegBank(Dst, *MRI, *TRI);
1163
1164 // Allow only the source bank to be set.
1165 if ((SrcRB && DstRB && SrcRB != DstRB) || (DstRB && !SrcRB)) {
1166 report(Twine(OpcName, " cannot change register bank"), MI);
1167 break;
1168 }
1169
1170 // Don't allow a class change. Do allow member class->regbank.
1171 const TargetRegisterClass *DstRC = MRI->getRegClassOrNull(Dst);
1172 if (DstRC && DstRC != MRI->getRegClassOrNull(Src)) {
1173 report(
1174 Twine(OpcName, " source and destination register classes must match"),
1175 MI);
1176 break;
1177 }
1178
1179 break;
1180 }
1181
1182 case TargetOpcode::G_CONSTANT:
1183 case TargetOpcode::G_FCONSTANT: {
1184 LLT DstTy = MRI->getType(MI->getOperand(0).getReg());
1185 if (DstTy.isVector())
1186 report("Instruction cannot use a vector result type", MI);
1187
1188 if (MI->getOpcode() == TargetOpcode::G_CONSTANT) {
1189 if (!MI->getOperand(1).isCImm()) {
1190 report("G_CONSTANT operand must be cimm", MI);
1191 break;
1192 }
1193
1194 const ConstantInt *CI = MI->getOperand(1).getCImm();
1195 if (CI->getBitWidth() != DstTy.getSizeInBits())
1196 report("inconsistent constant size", MI);
1197 } else {
1198 if (!MI->getOperand(1).isFPImm()) {
1199 report("G_FCONSTANT operand must be fpimm", MI);
1200 break;
1201 }
1202 const ConstantFP *CF = MI->getOperand(1).getFPImm();
1203
1205 DstTy.getSizeInBits()) {
1206 report("inconsistent constant size", MI);
1207 }
1208 }
1209
1210 break;
1211 }
1212 case TargetOpcode::G_LOAD:
1213 case TargetOpcode::G_STORE:
1214 case TargetOpcode::G_ZEXTLOAD:
1215 case TargetOpcode::G_SEXTLOAD: {
1216 LLT ValTy = MRI->getType(MI->getOperand(0).getReg());
1217 LLT PtrTy = MRI->getType(MI->getOperand(1).getReg());
1218 if (!PtrTy.isPointer())
1219 report("Generic memory instruction must access a pointer", MI);
1220
1221 // Generic loads and stores must have a single MachineMemOperand
1222 // describing that access.
1223 if (!MI->hasOneMemOperand()) {
1224 report("Generic instruction accessing memory must have one mem operand",
1225 MI);
1226 } else {
1227 const MachineMemOperand &MMO = **MI->memoperands_begin();
1228 if (MI->getOpcode() == TargetOpcode::G_ZEXTLOAD ||
1229 MI->getOpcode() == TargetOpcode::G_SEXTLOAD) {
1231 ValTy.getSizeInBits()))
1232 report("Generic extload must have a narrower memory type", MI);
1233 } else if (MI->getOpcode() == TargetOpcode::G_LOAD) {
1235 ValTy.getSizeInBytes()))
1236 report("load memory size cannot exceed result size", MI);
1237 } else if (MI->getOpcode() == TargetOpcode::G_STORE) {
1239 MMO.getSize().getValue()))
1240 report("store memory size cannot exceed value size", MI);
1241 }
1242
1243 const AtomicOrdering Order = MMO.getSuccessOrdering();
1244 if (Opc == TargetOpcode::G_STORE) {
1245 if (Order == AtomicOrdering::Acquire ||
1247 report("atomic store cannot use acquire ordering", MI);
1248
1249 } else {
1250 if (Order == AtomicOrdering::Release ||
1252 report("atomic load cannot use release ordering", MI);
1253 }
1254 }
1255
1256 break;
1257 }
1258 case TargetOpcode::G_PHI: {
1259 LLT DstTy = MRI->getType(MI->getOperand(0).getReg());
1260 if (!DstTy.isValid() || !all_of(drop_begin(MI->operands()),
1261 [this, &DstTy](const MachineOperand &MO) {
1262 if (!MO.isReg())
1263 return true;
1264 LLT Ty = MRI->getType(MO.getReg());
1265 if (!Ty.isValid() || (Ty != DstTy))
1266 return false;
1267 return true;
1268 }))
1269 report("Generic Instruction G_PHI has operands with incompatible/missing "
1270 "types",
1271 MI);
1272 break;
1273 }
1274 case TargetOpcode::G_BITCAST: {
1275 LLT DstTy = MRI->getType(MI->getOperand(0).getReg());
1276 LLT SrcTy = MRI->getType(MI->getOperand(1).getReg());
1277 if (!DstTy.isValid() || !SrcTy.isValid())
1278 break;
1279
1280 if (SrcTy.isPointer() != DstTy.isPointer())
1281 report("bitcast cannot convert between pointers and other types", MI);
1282
1283 if (SrcTy.getSizeInBits() != DstTy.getSizeInBits())
1284 report("bitcast sizes must match", MI);
1285
1286 if (SrcTy == DstTy)
1287 report("bitcast must change the type", MI);
1288
1289 break;
1290 }
1291 case TargetOpcode::G_INTTOPTR:
1292 case TargetOpcode::G_PTRTOINT:
1293 case TargetOpcode::G_ADDRSPACE_CAST: {
1294 LLT DstTy = MRI->getType(MI->getOperand(0).getReg());
1295 LLT SrcTy = MRI->getType(MI->getOperand(1).getReg());
1296 if (!DstTy.isValid() || !SrcTy.isValid())
1297 break;
1298
1299 verifyVectorElementMatch(DstTy, SrcTy, MI);
1300
1301 DstTy = DstTy.getScalarType();
1302 SrcTy = SrcTy.getScalarType();
1303
1304 if (MI->getOpcode() == TargetOpcode::G_INTTOPTR) {
1305 if (!DstTy.isPointer())
1306 report("inttoptr result type must be a pointer", MI);
1307 if (SrcTy.isPointer())
1308 report("inttoptr source type must not be a pointer", MI);
1309 } else if (MI->getOpcode() == TargetOpcode::G_PTRTOINT) {
1310 if (!SrcTy.isPointer())
1311 report("ptrtoint source type must be a pointer", MI);
1312 if (DstTy.isPointer())
1313 report("ptrtoint result type must not be a pointer", MI);
1314 } else {
1315 assert(MI->getOpcode() == TargetOpcode::G_ADDRSPACE_CAST);
1316 if (!SrcTy.isPointer() || !DstTy.isPointer())
1317 report("addrspacecast types must be pointers", MI);
1318 else {
1319 if (SrcTy.getAddressSpace() == DstTy.getAddressSpace())
1320 report("addrspacecast must convert different address spaces", MI);
1321 }
1322 }
1323
1324 break;
1325 }
1326 case TargetOpcode::G_PTR_ADD: {
1327 LLT DstTy = MRI->getType(MI->getOperand(0).getReg());
1328 LLT PtrTy = MRI->getType(MI->getOperand(1).getReg());
1329 LLT OffsetTy = MRI->getType(MI->getOperand(2).getReg());
1330 if (!DstTy.isValid() || !PtrTy.isValid() || !OffsetTy.isValid())
1331 break;
1332
1333 if (!PtrTy.isPointerOrPointerVector())
1334 report("gep first operand must be a pointer", MI);
1335
1336 if (OffsetTy.isPointerOrPointerVector())
1337 report("gep offset operand must not be a pointer", MI);
1338
1339 if (PtrTy.isPointerOrPointerVector()) {
1340 const DataLayout &DL = MF->getDataLayout();
1341 unsigned AS = PtrTy.getAddressSpace();
1342 unsigned IndexSizeInBits = DL.getIndexSize(AS) * 8;
1343 if (OffsetTy.getScalarSizeInBits() != IndexSizeInBits) {
1344 report("gep offset operand must match index size for address space",
1345 MI);
1346 }
1347 }
1348
1349 // TODO: Is the offset allowed to be a scalar with a vector?
1350 break;
1351 }
1352 case TargetOpcode::G_PTRMASK: {
1353 LLT DstTy = MRI->getType(MI->getOperand(0).getReg());
1354 LLT SrcTy = MRI->getType(MI->getOperand(1).getReg());
1355 LLT MaskTy = MRI->getType(MI->getOperand(2).getReg());
1356 if (!DstTy.isValid() || !SrcTy.isValid() || !MaskTy.isValid())
1357 break;
1358
1359 if (!DstTy.isPointerOrPointerVector())
1360 report("ptrmask result type must be a pointer", MI);
1361
1362 if (!MaskTy.getScalarType().isScalar())
1363 report("ptrmask mask type must be an integer", MI);
1364
1365 verifyVectorElementMatch(DstTy, MaskTy, MI);
1366 break;
1367 }
1368 case TargetOpcode::G_SEXT:
1369 case TargetOpcode::G_ZEXT:
1370 case TargetOpcode::G_ANYEXT:
1371 case TargetOpcode::G_TRUNC:
1372 case TargetOpcode::G_FPEXT:
1373 case TargetOpcode::G_FPTRUNC: {
1374 // Number of operands and presense of types is already checked (and
1375 // reported in case of any issues), so no need to report them again. As
1376 // we're trying to report as many issues as possible at once, however, the
1377 // instructions aren't guaranteed to have the right number of operands or
1378 // types attached to them at this point
1379 assert(MCID.getNumOperands() == 2 && "Expected 2 operands G_*{EXT,TRUNC}");
1380 LLT DstTy = MRI->getType(MI->getOperand(0).getReg());
1381 LLT SrcTy = MRI->getType(MI->getOperand(1).getReg());
1382 if (!DstTy.isValid() || !SrcTy.isValid())
1383 break;
1384
1386 report("Generic extend/truncate can not operate on pointers", MI);
1387
1388 verifyVectorElementMatch(DstTy, SrcTy, MI);
1389
1390 unsigned DstSize = DstTy.getScalarSizeInBits();
1391 unsigned SrcSize = SrcTy.getScalarSizeInBits();
1392 switch (MI->getOpcode()) {
1393 default:
1394 if (DstSize <= SrcSize)
1395 report("Generic extend has destination type no larger than source", MI);
1396 break;
1397 case TargetOpcode::G_TRUNC:
1398 case TargetOpcode::G_FPTRUNC:
1399 if (DstSize >= SrcSize)
1400 report("Generic truncate has destination type no smaller than source",
1401 MI);
1402 break;
1403 }
1404 break;
1405 }
1406 case TargetOpcode::G_SELECT: {
1407 LLT SelTy = MRI->getType(MI->getOperand(0).getReg());
1408 LLT CondTy = MRI->getType(MI->getOperand(1).getReg());
1409 if (!SelTy.isValid() || !CondTy.isValid())
1410 break;
1411
1412 // Scalar condition select on a vector is valid.
1413 if (CondTy.isVector())
1414 verifyVectorElementMatch(SelTy, CondTy, MI);
1415 break;
1416 }
1417 case TargetOpcode::G_MERGE_VALUES: {
1418 // G_MERGE_VALUES should only be used to merge scalars into a larger scalar,
1419 // e.g. s2N = MERGE sN, sN
1420 // Merging multiple scalars into a vector is not allowed, should use
1421 // G_BUILD_VECTOR for that.
1422 LLT DstTy = MRI->getType(MI->getOperand(0).getReg());
1423 LLT SrcTy = MRI->getType(MI->getOperand(1).getReg());
1424 if (DstTy.isVector() || SrcTy.isVector())
1425 report("G_MERGE_VALUES cannot operate on vectors", MI);
1426
1427 const unsigned NumOps = MI->getNumOperands();
1428 if (DstTy.getSizeInBits() != SrcTy.getSizeInBits() * (NumOps - 1))
1429 report("G_MERGE_VALUES result size is inconsistent", MI);
1430
1431 for (unsigned I = 2; I != NumOps; ++I) {
1432 if (MRI->getType(MI->getOperand(I).getReg()) != SrcTy)
1433 report("G_MERGE_VALUES source types do not match", MI);
1434 }
1435
1436 break;
1437 }
1438 case TargetOpcode::G_UNMERGE_VALUES: {
1439 unsigned NumDsts = MI->getNumOperands() - 1;
1440 LLT DstTy = MRI->getType(MI->getOperand(0).getReg());
1441 for (unsigned i = 1; i < NumDsts; ++i) {
1442 if (MRI->getType(MI->getOperand(i).getReg()) != DstTy) {
1443 report("G_UNMERGE_VALUES destination types do not match", MI);
1444 break;
1445 }
1446 }
1447
1448 LLT SrcTy = MRI->getType(MI->getOperand(NumDsts).getReg());
1449 if (DstTy.isVector()) {
1450 // This case is the converse of G_CONCAT_VECTORS.
1451 if (!SrcTy.isVector() || SrcTy.getScalarType() != DstTy.getScalarType() ||
1452 SrcTy.isScalableVector() != DstTy.isScalableVector() ||
1453 SrcTy.getSizeInBits() != NumDsts * DstTy.getSizeInBits())
1454 report("G_UNMERGE_VALUES source operand does not match vector "
1455 "destination operands",
1456 MI);
1457 } else if (SrcTy.isVector()) {
1458 // This case is the converse of G_BUILD_VECTOR, but relaxed to allow
1459 // mismatched types as long as the total size matches:
1460 // %0:_(s64), %1:_(s64) = G_UNMERGE_VALUES %2:_(<4 x s32>)
1461 if (SrcTy.getSizeInBits() != NumDsts * DstTy.getSizeInBits())
1462 report("G_UNMERGE_VALUES vector source operand does not match scalar "
1463 "destination operands",
1464 MI);
1465 } else {
1466 // This case is the converse of G_MERGE_VALUES.
1467 if (SrcTy.getSizeInBits() != NumDsts * DstTy.getSizeInBits()) {
1468 report("G_UNMERGE_VALUES scalar source operand does not match scalar "
1469 "destination operands",
1470 MI);
1471 }
1472 }
1473 break;
1474 }
1475 case TargetOpcode::G_BUILD_VECTOR: {
1476 // Source types must be scalars, dest type a vector. Total size of scalars
1477 // must match the dest vector size.
1478 LLT DstTy = MRI->getType(MI->getOperand(0).getReg());
1479 LLT SrcEltTy = MRI->getType(MI->getOperand(1).getReg());
1480 if (!DstTy.isVector() || SrcEltTy.isVector()) {
1481 report("G_BUILD_VECTOR must produce a vector from scalar operands", MI);
1482 break;
1483 }
1484
1485 if (DstTy.getElementType() != SrcEltTy)
1486 report("G_BUILD_VECTOR result element type must match source type", MI);
1487
1488 if (DstTy.getNumElements() != MI->getNumOperands() - 1)
1489 report("G_BUILD_VECTOR must have an operand for each elemement", MI);
1490
1491 for (const MachineOperand &MO : llvm::drop_begin(MI->operands(), 2))
1492 if (MRI->getType(MI->getOperand(1).getReg()) != MRI->getType(MO.getReg()))
1493 report("G_BUILD_VECTOR source operand types are not homogeneous", MI);
1494
1495 break;
1496 }
1497 case TargetOpcode::G_BUILD_VECTOR_TRUNC: {
1498 // Source types must be scalars, dest type a vector. Scalar types must be
1499 // larger than the dest vector elt type, as this is a truncating operation.
1500 LLT DstTy = MRI->getType(MI->getOperand(0).getReg());
1501 LLT SrcEltTy = MRI->getType(MI->getOperand(1).getReg());
1502 if (!DstTy.isVector() || SrcEltTy.isVector())
1503 report("G_BUILD_VECTOR_TRUNC must produce a vector from scalar operands",
1504 MI);
1505 for (const MachineOperand &MO : llvm::drop_begin(MI->operands(), 2))
1506 if (MRI->getType(MI->getOperand(1).getReg()) != MRI->getType(MO.getReg()))
1507 report("G_BUILD_VECTOR_TRUNC source operand types are not homogeneous",
1508 MI);
1509 if (SrcEltTy.getSizeInBits() <= DstTy.getElementType().getSizeInBits())
1510 report("G_BUILD_VECTOR_TRUNC source operand types are not larger than "
1511 "dest elt type",
1512 MI);
1513 break;
1514 }
1515 case TargetOpcode::G_CONCAT_VECTORS: {
1516 // Source types should be vectors, and total size should match the dest
1517 // vector size.
1518 LLT DstTy = MRI->getType(MI->getOperand(0).getReg());
1519 LLT SrcTy = MRI->getType(MI->getOperand(1).getReg());
1520 if (!DstTy.isVector() || !SrcTy.isVector())
1521 report("G_CONCAT_VECTOR requires vector source and destination operands",
1522 MI);
1523
1524 if (MI->getNumOperands() < 3)
1525 report("G_CONCAT_VECTOR requires at least 2 source operands", MI);
1526
1527 for (const MachineOperand &MO : llvm::drop_begin(MI->operands(), 2))
1528 if (MRI->getType(MI->getOperand(1).getReg()) != MRI->getType(MO.getReg()))
1529 report("G_CONCAT_VECTOR source operand types are not homogeneous", MI);
1530 if (DstTy.getElementCount() !=
1531 SrcTy.getElementCount() * (MI->getNumOperands() - 1))
1532 report("G_CONCAT_VECTOR num dest and source elements should match", MI);
1533 break;
1534 }
1535 case TargetOpcode::G_ICMP:
1536 case TargetOpcode::G_FCMP: {
1537 LLT DstTy = MRI->getType(MI->getOperand(0).getReg());
1538 LLT SrcTy = MRI->getType(MI->getOperand(2).getReg());
1539
1540 if ((DstTy.isVector() != SrcTy.isVector()) ||
1541 (DstTy.isVector() &&
1542 DstTy.getElementCount() != SrcTy.getElementCount()))
1543 report("Generic vector icmp/fcmp must preserve number of lanes", MI);
1544
1545 break;
1546 }
1547 case TargetOpcode::G_SCMP:
1548 case TargetOpcode::G_UCMP: {
1549 LLT DstTy = MRI->getType(MI->getOperand(0).getReg());
1550 LLT SrcTy = MRI->getType(MI->getOperand(1).getReg());
1551 LLT SrcTy2 = MRI->getType(MI->getOperand(2).getReg());
1552
1553 if (SrcTy.isPointerOrPointerVector() || SrcTy2.isPointerOrPointerVector()) {
1554 report("Generic scmp/ucmp does not support pointers as operands", MI);
1555 break;
1556 }
1557
1558 if (DstTy.isPointerOrPointerVector()) {
1559 report("Generic scmp/ucmp does not support pointers as a result", MI);
1560 break;
1561 }
1562
1563 if ((DstTy.isVector() != SrcTy.isVector()) ||
1564 (DstTy.isVector() &&
1565 DstTy.getElementCount() != SrcTy.getElementCount())) {
1566 report("Generic vector scmp/ucmp must preserve number of lanes", MI);
1567 break;
1568 }
1569
1570 if (SrcTy != SrcTy2) {
1571 report("Generic scmp/ucmp must have same input types", MI);
1572 break;
1573 }
1574
1575 break;
1576 }
1577 case TargetOpcode::G_EXTRACT: {
1578 const MachineOperand &SrcOp = MI->getOperand(1);
1579 if (!SrcOp.isReg()) {
1580 report("extract source must be a register", MI);
1581 break;
1582 }
1583
1584 const MachineOperand &OffsetOp = MI->getOperand(2);
1585 if (!OffsetOp.isImm()) {
1586 report("extract offset must be a constant", MI);
1587 break;
1588 }
1589
1590 unsigned DstSize = MRI->getType(MI->getOperand(0).getReg()).getSizeInBits();
1591 unsigned SrcSize = MRI->getType(SrcOp.getReg()).getSizeInBits();
1592 if (SrcSize == DstSize)
1593 report("extract source must be larger than result", MI);
1594
1595 if (DstSize + OffsetOp.getImm() > SrcSize)
1596 report("extract reads past end of register", MI);
1597 break;
1598 }
1599 case TargetOpcode::G_INSERT: {
1600 const MachineOperand &SrcOp = MI->getOperand(2);
1601 if (!SrcOp.isReg()) {
1602 report("insert source must be a register", MI);
1603 break;
1604 }
1605
1606 const MachineOperand &OffsetOp = MI->getOperand(3);
1607 if (!OffsetOp.isImm()) {
1608 report("insert offset must be a constant", MI);
1609 break;
1610 }
1611
1612 unsigned DstSize = MRI->getType(MI->getOperand(0).getReg()).getSizeInBits();
1613 unsigned SrcSize = MRI->getType(SrcOp.getReg()).getSizeInBits();
1614
1615 if (DstSize <= SrcSize)
1616 report("inserted size must be smaller than total register", MI);
1617
1618 if (SrcSize + OffsetOp.getImm() > DstSize)
1619 report("insert writes past end of register", MI);
1620
1621 break;
1622 }
1623 case TargetOpcode::G_JUMP_TABLE: {
1624 if (!MI->getOperand(1).isJTI())
1625 report("G_JUMP_TABLE source operand must be a jump table index", MI);
1626 LLT DstTy = MRI->getType(MI->getOperand(0).getReg());
1627 if (!DstTy.isPointer())
1628 report("G_JUMP_TABLE dest operand must have a pointer type", MI);
1629 break;
1630 }
1631 case TargetOpcode::G_BRJT: {
1632 if (!MRI->getType(MI->getOperand(0).getReg()).isPointer())
1633 report("G_BRJT src operand 0 must be a pointer type", MI);
1634
1635 if (!MI->getOperand(1).isJTI())
1636 report("G_BRJT src operand 1 must be a jump table index", MI);
1637
1638 const auto &IdxOp = MI->getOperand(2);
1639 if (!IdxOp.isReg() || MRI->getType(IdxOp.getReg()).isPointer())
1640 report("G_BRJT src operand 2 must be a scalar reg type", MI);
1641 break;
1642 }
1643 case TargetOpcode::G_INTRINSIC:
1644 case TargetOpcode::G_INTRINSIC_W_SIDE_EFFECTS:
1645 case TargetOpcode::G_INTRINSIC_CONVERGENT:
1646 case TargetOpcode::G_INTRINSIC_CONVERGENT_W_SIDE_EFFECTS: {
1647 // TODO: Should verify number of def and use operands, but the current
1648 // interface requires passing in IR types for mangling.
1649 const MachineOperand &IntrIDOp = MI->getOperand(MI->getNumExplicitDefs());
1650 if (!IntrIDOp.isIntrinsicID()) {
1651 report("G_INTRINSIC first src operand must be an intrinsic ID", MI);
1652 break;
1653 }
1654
1655 if (!verifyGIntrinsicSideEffects(MI))
1656 break;
1657 if (!verifyGIntrinsicConvergence(MI))
1658 break;
1659
1660 break;
1661 }
1662 case TargetOpcode::G_SEXT_INREG: {
1663 if (!MI->getOperand(2).isImm()) {
1664 report("G_SEXT_INREG expects an immediate operand #2", MI);
1665 break;
1666 }
1667
1668 LLT SrcTy = MRI->getType(MI->getOperand(1).getReg());
1669 int64_t Imm = MI->getOperand(2).getImm();
1670 if (Imm <= 0)
1671 report("G_SEXT_INREG size must be >= 1", MI);
1672 if (Imm >= SrcTy.getScalarSizeInBits())
1673 report("G_SEXT_INREG size must be less than source bit width", MI);
1674 break;
1675 }
1676 case TargetOpcode::G_BSWAP: {
1677 LLT DstTy = MRI->getType(MI->getOperand(0).getReg());
1678 if (DstTy.getScalarSizeInBits() % 16 != 0)
1679 report("G_BSWAP size must be a multiple of 16 bits", MI);
1680 break;
1681 }
1682 case TargetOpcode::G_VSCALE: {
1683 if (!MI->getOperand(1).isCImm()) {
1684 report("G_VSCALE operand must be cimm", MI);
1685 break;
1686 }
1687 if (MI->getOperand(1).getCImm()->isZero()) {
1688 report("G_VSCALE immediate cannot be zero", MI);
1689 break;
1690 }
1691 break;
1692 }
1693 case TargetOpcode::G_INSERT_SUBVECTOR: {
1694 const MachineOperand &Src0Op = MI->getOperand(1);
1695 if (!Src0Op.isReg()) {
1696 report("G_INSERT_SUBVECTOR first source must be a register", MI);
1697 break;
1698 }
1699
1700 const MachineOperand &Src1Op = MI->getOperand(2);
1701 if (!Src1Op.isReg()) {
1702 report("G_INSERT_SUBVECTOR second source must be a register", MI);
1703 break;
1704 }
1705
1706 const MachineOperand &IndexOp = MI->getOperand(3);
1707 if (!IndexOp.isImm()) {
1708 report("G_INSERT_SUBVECTOR index must be an immediate", MI);
1709 break;
1710 }
1711
1712 LLT DstTy = MRI->getType(MI->getOperand(0).getReg());
1713 LLT Src0Ty = MRI->getType(Src0Op.getReg());
1714 LLT Src1Ty = MRI->getType(Src1Op.getReg());
1715
1716 if (!DstTy.isVector()) {
1717 report("Destination type must be a vector", MI);
1718 break;
1719 }
1720
1721 if (!Src0Ty.isVector()) {
1722 report("First source must be a vector", MI);
1723 break;
1724 }
1725
1726 if (!Src1Ty.isVector()) {
1727 report("Second source must be a vector", MI);
1728 break;
1729 }
1730
1731 if (DstTy != Src0Ty) {
1732 report("Destination type must match the first source vector type", MI);
1733 break;
1734 }
1735
1736 if (Src0Ty.getElementType() != Src1Ty.getElementType()) {
1737 report("Element type of source vectors must be the same", MI);
1738 break;
1739 }
1740
1741 if (IndexOp.getImm() != 0 &&
1742 Src1Ty.getElementCount().getKnownMinValue() % IndexOp.getImm() != 0) {
1743 report("Index must be a multiple of the second source vector's "
1744 "minimum vector length",
1745 MI);
1746 break;
1747 }
1748 break;
1749 }
1750 case TargetOpcode::G_EXTRACT_SUBVECTOR: {
1751 const MachineOperand &SrcOp = MI->getOperand(1);
1752 if (!SrcOp.isReg()) {
1753 report("G_EXTRACT_SUBVECTOR first source must be a register", MI);
1754 break;
1755 }
1756
1757 const MachineOperand &IndexOp = MI->getOperand(2);
1758 if (!IndexOp.isImm()) {
1759 report("G_EXTRACT_SUBVECTOR index must be an immediate", MI);
1760 break;
1761 }
1762
1763 LLT DstTy = MRI->getType(MI->getOperand(0).getReg());
1764 LLT SrcTy = MRI->getType(SrcOp.getReg());
1765
1766 if (!DstTy.isVector()) {
1767 report("Destination type must be a vector", MI);
1768 break;
1769 }
1770
1771 if (!SrcTy.isVector()) {
1772 report("First source must be a vector", MI);
1773 break;
1774 }
1775
1776 if (DstTy.getElementType() != SrcTy.getElementType()) {
1777 report("Element type of vectors must be the same", MI);
1778 break;
1779 }
1780
1781 if (IndexOp.getImm() != 0 &&
1782 SrcTy.getElementCount().getKnownMinValue() % IndexOp.getImm() != 0) {
1783 report("Index must be a multiple of the source vector's minimum vector "
1784 "length",
1785 MI);
1786 break;
1787 }
1788
1789 break;
1790 }
1791 case TargetOpcode::G_SHUFFLE_VECTOR: {
1792 const MachineOperand &MaskOp = MI->getOperand(3);
1793 if (!MaskOp.isShuffleMask()) {
1794 report("Incorrect mask operand type for G_SHUFFLE_VECTOR", MI);
1795 break;
1796 }
1797
1798 LLT DstTy = MRI->getType(MI->getOperand(0).getReg());
1799 LLT Src0Ty = MRI->getType(MI->getOperand(1).getReg());
1800 LLT Src1Ty = MRI->getType(MI->getOperand(2).getReg());
1801
1802 if (Src0Ty != Src1Ty)
1803 report("Source operands must be the same type", MI);
1804
1805 if (Src0Ty.getScalarType() != DstTy.getScalarType())
1806 report("G_SHUFFLE_VECTOR cannot change element type", MI);
1807
1808 // Don't check that all operands are vector because scalars are used in
1809 // place of 1 element vectors.
1810 int SrcNumElts = Src0Ty.isVector() ? Src0Ty.getNumElements() : 1;
1811 int DstNumElts = DstTy.isVector() ? DstTy.getNumElements() : 1;
1812
1813 ArrayRef<int> MaskIdxes = MaskOp.getShuffleMask();
1814
1815 if (static_cast<int>(MaskIdxes.size()) != DstNumElts)
1816 report("Wrong result type for shufflemask", MI);
1817
1818 for (int Idx : MaskIdxes) {
1819 if (Idx < 0)
1820 continue;
1821
1822 if (Idx >= 2 * SrcNumElts)
1823 report("Out of bounds shuffle index", MI);
1824 }
1825
1826 break;
1827 }
1828
1829 case TargetOpcode::G_SPLAT_VECTOR: {
1830 LLT DstTy = MRI->getType(MI->getOperand(0).getReg());
1831 LLT SrcTy = MRI->getType(MI->getOperand(1).getReg());
1832
1833 if (!DstTy.isScalableVector()) {
1834 report("Destination type must be a scalable vector", MI);
1835 break;
1836 }
1837
1838 if (!SrcTy.isScalar()) {
1839 report("Source type must be a scalar", MI);
1840 break;
1841 }
1842
1844 SrcTy.getSizeInBits())) {
1845 report("Element type of the destination must be the same size or smaller "
1846 "than the source type",
1847 MI);
1848 break;
1849 }
1850
1851 break;
1852 }
1853 case TargetOpcode::G_EXTRACT_VECTOR_ELT: {
1854 LLT DstTy = MRI->getType(MI->getOperand(0).getReg());
1855 LLT SrcTy = MRI->getType(MI->getOperand(1).getReg());
1856 LLT IdxTy = MRI->getType(MI->getOperand(2).getReg());
1857
1858 if (!DstTy.isScalar() && !DstTy.isPointer()) {
1859 report("Destination type must be a scalar or pointer", MI);
1860 break;
1861 }
1862
1863 if (!SrcTy.isVector()) {
1864 report("First source must be a vector", MI);
1865 break;
1866 }
1867
1868 auto TLI = MF->getSubtarget().getTargetLowering();
1869 if (IdxTy.getSizeInBits() !=
1870 TLI->getVectorIdxTy(MF->getDataLayout()).getFixedSizeInBits()) {
1871 report("Index type must match VectorIdxTy", MI);
1872 break;
1873 }
1874
1875 break;
1876 }
1877 case TargetOpcode::G_INSERT_VECTOR_ELT: {
1878 LLT DstTy = MRI->getType(MI->getOperand(0).getReg());
1879 LLT VecTy = MRI->getType(MI->getOperand(1).getReg());
1880 LLT ScaTy = MRI->getType(MI->getOperand(2).getReg());
1881 LLT IdxTy = MRI->getType(MI->getOperand(3).getReg());
1882
1883 if (!DstTy.isVector()) {
1884 report("Destination type must be a vector", MI);
1885 break;
1886 }
1887
1888 if (VecTy != DstTy) {
1889 report("Destination type and vector type must match", MI);
1890 break;
1891 }
1892
1893 if (!ScaTy.isScalar() && !ScaTy.isPointer()) {
1894 report("Inserted element must be a scalar or pointer", MI);
1895 break;
1896 }
1897
1898 auto TLI = MF->getSubtarget().getTargetLowering();
1899 if (IdxTy.getSizeInBits() !=
1900 TLI->getVectorIdxTy(MF->getDataLayout()).getFixedSizeInBits()) {
1901 report("Index type must match VectorIdxTy", MI);
1902 break;
1903 }
1904
1905 break;
1906 }
1907 case TargetOpcode::G_DYN_STACKALLOC: {
1908 const MachineOperand &DstOp = MI->getOperand(0);
1909 const MachineOperand &AllocOp = MI->getOperand(1);
1910 const MachineOperand &AlignOp = MI->getOperand(2);
1911
1912 if (!DstOp.isReg() || !MRI->getType(DstOp.getReg()).isPointer()) {
1913 report("dst operand 0 must be a pointer type", MI);
1914 break;
1915 }
1916
1917 if (!AllocOp.isReg() || !MRI->getType(AllocOp.getReg()).isScalar()) {
1918 report("src operand 1 must be a scalar reg type", MI);
1919 break;
1920 }
1921
1922 if (!AlignOp.isImm()) {
1923 report("src operand 2 must be an immediate type", MI);
1924 break;
1925 }
1926 break;
1927 }
1928 case TargetOpcode::G_MEMCPY_INLINE:
1929 case TargetOpcode::G_MEMCPY:
1930 case TargetOpcode::G_MEMMOVE: {
1931 ArrayRef<MachineMemOperand *> MMOs = MI->memoperands();
1932 if (MMOs.size() != 2) {
1933 report("memcpy/memmove must have 2 memory operands", MI);
1934 break;
1935 }
1936
1937 if ((!MMOs[0]->isStore() || MMOs[0]->isLoad()) ||
1938 (MMOs[1]->isStore() || !MMOs[1]->isLoad())) {
1939 report("wrong memory operand types", MI);
1940 break;
1941 }
1942
1943 if (MMOs[0]->getSize() != MMOs[1]->getSize())
1944 report("inconsistent memory operand sizes", MI);
1945
1946 LLT DstPtrTy = MRI->getType(MI->getOperand(0).getReg());
1947 LLT SrcPtrTy = MRI->getType(MI->getOperand(1).getReg());
1948
1949 if (!DstPtrTy.isPointer() || !SrcPtrTy.isPointer()) {
1950 report("memory instruction operand must be a pointer", MI);
1951 break;
1952 }
1953
1954 if (DstPtrTy.getAddressSpace() != MMOs[0]->getAddrSpace())
1955 report("inconsistent store address space", MI);
1956 if (SrcPtrTy.getAddressSpace() != MMOs[1]->getAddrSpace())
1957 report("inconsistent load address space", MI);
1958
1959 if (Opc != TargetOpcode::G_MEMCPY_INLINE)
1960 if (!MI->getOperand(3).isImm() || (MI->getOperand(3).getImm() & ~1LL))
1961 report("'tail' flag (operand 3) must be an immediate 0 or 1", MI);
1962
1963 break;
1964 }
1965 case TargetOpcode::G_BZERO:
1966 case TargetOpcode::G_MEMSET: {
1967 ArrayRef<MachineMemOperand *> MMOs = MI->memoperands();
1968 std::string Name = Opc == TargetOpcode::G_MEMSET ? "memset" : "bzero";
1969 if (MMOs.size() != 1) {
1970 report(Twine(Name, " must have 1 memory operand"), MI);
1971 break;
1972 }
1973
1974 if ((!MMOs[0]->isStore() || MMOs[0]->isLoad())) {
1975 report(Twine(Name, " memory operand must be a store"), MI);
1976 break;
1977 }
1978
1979 LLT DstPtrTy = MRI->getType(MI->getOperand(0).getReg());
1980 if (!DstPtrTy.isPointer()) {
1981 report(Twine(Name, " operand must be a pointer"), MI);
1982 break;
1983 }
1984
1985 if (DstPtrTy.getAddressSpace() != MMOs[0]->getAddrSpace())
1986 report("inconsistent " + Twine(Name, " address space"), MI);
1987
1988 if (!MI->getOperand(MI->getNumOperands() - 1).isImm() ||
1989 (MI->getOperand(MI->getNumOperands() - 1).getImm() & ~1LL))
1990 report("'tail' flag (last operand) must be an immediate 0 or 1", MI);
1991
1992 break;
1993 }
1994 case TargetOpcode::G_UBSANTRAP: {
1995 const MachineOperand &KindOp = MI->getOperand(0);
1996 if (!MI->getOperand(0).isImm()) {
1997 report("Crash kind must be an immediate", &KindOp, 0);
1998 break;
1999 }
2000 int64_t Kind = MI->getOperand(0).getImm();
2001 if (!isInt<8>(Kind))
2002 report("Crash kind must be 8 bit wide", &KindOp, 0);
2003 break;
2004 }
2005 case TargetOpcode::G_VECREDUCE_SEQ_FADD:
2006 case TargetOpcode::G_VECREDUCE_SEQ_FMUL: {
2007 LLT DstTy = MRI->getType(MI->getOperand(0).getReg());
2008 LLT Src1Ty = MRI->getType(MI->getOperand(1).getReg());
2009 LLT Src2Ty = MRI->getType(MI->getOperand(2).getReg());
2010 if (!DstTy.isScalar())
2011 report("Vector reduction requires a scalar destination type", MI);
2012 if (!Src1Ty.isScalar())
2013 report("Sequential FADD/FMUL vector reduction requires a scalar 1st operand", MI);
2014 if (!Src2Ty.isVector())
2015 report("Sequential FADD/FMUL vector reduction must have a vector 2nd operand", MI);
2016 break;
2017 }
2018 case TargetOpcode::G_VECREDUCE_FADD:
2019 case TargetOpcode::G_VECREDUCE_FMUL:
2020 case TargetOpcode::G_VECREDUCE_FMAX:
2021 case TargetOpcode::G_VECREDUCE_FMIN:
2022 case TargetOpcode::G_VECREDUCE_FMAXIMUM:
2023 case TargetOpcode::G_VECREDUCE_FMINIMUM:
2024 case TargetOpcode::G_VECREDUCE_ADD:
2025 case TargetOpcode::G_VECREDUCE_MUL:
2026 case TargetOpcode::G_VECREDUCE_AND:
2027 case TargetOpcode::G_VECREDUCE_OR:
2028 case TargetOpcode::G_VECREDUCE_XOR:
2029 case TargetOpcode::G_VECREDUCE_SMAX:
2030 case TargetOpcode::G_VECREDUCE_SMIN:
2031 case TargetOpcode::G_VECREDUCE_UMAX:
2032 case TargetOpcode::G_VECREDUCE_UMIN: {
2033 LLT DstTy = MRI->getType(MI->getOperand(0).getReg());
2034 if (!DstTy.isScalar())
2035 report("Vector reduction requires a scalar destination type", MI);
2036 break;
2037 }
2038
2039 case TargetOpcode::G_SBFX:
2040 case TargetOpcode::G_UBFX: {
2041 LLT DstTy = MRI->getType(MI->getOperand(0).getReg());
2042 if (DstTy.isVector()) {
2043 report("Bitfield extraction is not supported on vectors", MI);
2044 break;
2045 }
2046 break;
2047 }
2048 case TargetOpcode::G_SHL:
2049 case TargetOpcode::G_LSHR:
2050 case TargetOpcode::G_ASHR:
2051 case TargetOpcode::G_ROTR:
2052 case TargetOpcode::G_ROTL: {
2053 LLT Src1Ty = MRI->getType(MI->getOperand(1).getReg());
2054 LLT Src2Ty = MRI->getType(MI->getOperand(2).getReg());
2055 if (Src1Ty.isVector() != Src2Ty.isVector()) {
2056 report("Shifts and rotates require operands to be either all scalars or "
2057 "all vectors",
2058 MI);
2059 break;
2060 }
2061 break;
2062 }
2063 case TargetOpcode::G_LLROUND:
2064 case TargetOpcode::G_LROUND: {
2065 verifyAllRegOpsScalar(*MI, *MRI);
2066 break;
2067 }
2068 case TargetOpcode::G_IS_FPCLASS: {
2069 LLT DestTy = MRI->getType(MI->getOperand(0).getReg());
2070 LLT DestEltTy = DestTy.getScalarType();
2071 if (!DestEltTy.isScalar()) {
2072 report("Destination must be a scalar or vector of scalars", MI);
2073 break;
2074 }
2075 LLT SrcTy = MRI->getType(MI->getOperand(1).getReg());
2076 LLT SrcEltTy = SrcTy.getScalarType();
2077 if (!SrcEltTy.isScalar()) {
2078 report("Source must be a scalar or vector of scalars", MI);
2079 break;
2080 }
2081 if (!verifyVectorElementMatch(DestTy, SrcTy, MI))
2082 break;
2083 const MachineOperand &TestMO = MI->getOperand(2);
2084 if (!TestMO.isImm()) {
2085 report("floating-point class set (operand 2) must be an immediate", MI);
2086 break;
2087 }
2088 int64_t Test = TestMO.getImm();
2089 if (Test < 0 || Test > fcAllFlags) {
2090 report("Incorrect floating-point class set (operand 2)", MI);
2091 break;
2092 }
2093 break;
2094 }
2095 case TargetOpcode::G_PREFETCH: {
2096 const MachineOperand &AddrOp = MI->getOperand(0);
2097 if (!AddrOp.isReg() || !MRI->getType(AddrOp.getReg()).isPointer()) {
2098 report("addr operand must be a pointer", &AddrOp, 0);
2099 break;
2100 }
2101 const MachineOperand &RWOp = MI->getOperand(1);
2102 if (!RWOp.isImm() || (uint64_t)RWOp.getImm() >= 2) {
2103 report("rw operand must be an immediate 0-1", &RWOp, 1);
2104 break;
2105 }
2106 const MachineOperand &LocalityOp = MI->getOperand(2);
2107 if (!LocalityOp.isImm() || (uint64_t)LocalityOp.getImm() >= 4) {
2108 report("locality operand must be an immediate 0-3", &LocalityOp, 2);
2109 break;
2110 }
2111 const MachineOperand &CacheTypeOp = MI->getOperand(3);
2112 if (!CacheTypeOp.isImm() || (uint64_t)CacheTypeOp.getImm() >= 2) {
2113 report("cache type operand must be an immediate 0-1", &CacheTypeOp, 3);
2114 break;
2115 }
2116 break;
2117 }
2118 case TargetOpcode::G_ASSERT_ALIGN: {
2119 if (MI->getOperand(2).getImm() < 1)
2120 report("alignment immediate must be >= 1", MI);
2121 break;
2122 }
2123 case TargetOpcode::G_CONSTANT_POOL: {
2124 if (!MI->getOperand(1).isCPI())
2125 report("Src operand 1 must be a constant pool index", MI);
2126 if (!MRI->getType(MI->getOperand(0).getReg()).isPointer())
2127 report("Dst operand 0 must be a pointer", MI);
2128 break;
2129 }
2130 case TargetOpcode::G_PTRAUTH_GLOBAL_VALUE: {
2131 const MachineOperand &AddrOp = MI->getOperand(1);
2132 if (!AddrOp.isReg() || !MRI->getType(AddrOp.getReg()).isPointer())
2133 report("addr operand must be a pointer", &AddrOp, 1);
2134 break;
2135 }
2136 default:
2137 break;
2138 }
2139}
2140
2141void MachineVerifier::visitMachineInstrBefore(const MachineInstr *MI) {
2142 const MCInstrDesc &MCID = MI->getDesc();
2143 if (MI->getNumOperands() < MCID.getNumOperands()) {
2144 report("Too few operands", MI);
2145 errs() << MCID.getNumOperands() << " operands expected, but "
2146 << MI->getNumOperands() << " given.\n";
2147 }
2148
2149 if (MI->getFlag(MachineInstr::NoConvergent) && !MCID.isConvergent())
2150 report("NoConvergent flag expected only on convergent instructions.", MI);
2151
2152 if (MI->isPHI()) {
2153 if (MF->getProperties().hasProperty(
2155 report("Found PHI instruction with NoPHIs property set", MI);
2156
2157 if (FirstNonPHI)
2158 report("Found PHI instruction after non-PHI", MI);
2159 } else if (FirstNonPHI == nullptr)
2160 FirstNonPHI = MI;
2161
2162 // Check the tied operands.
2163 if (MI->isInlineAsm())
2164 verifyInlineAsm(MI);
2165
2166 // Check that unspillable terminators define a reg and have at most one use.
2167 if (TII->isUnspillableTerminator(MI)) {
2168 if (!MI->getOperand(0).isReg() || !MI->getOperand(0).isDef())
2169 report("Unspillable Terminator does not define a reg", MI);
2170 Register Def = MI->getOperand(0).getReg();
2171 if (Def.isVirtual() &&
2172 !MF->getProperties().hasProperty(
2174 std::distance(MRI->use_nodbg_begin(Def), MRI->use_nodbg_end()) > 1)
2175 report("Unspillable Terminator expected to have at most one use!", MI);
2176 }
2177
2178 // A fully-formed DBG_VALUE must have a location. Ignore partially formed
2179 // DBG_VALUEs: these are convenient to use in tests, but should never get
2180 // generated.
2181 if (MI->isDebugValue() && MI->getNumOperands() == 4)
2182 if (!MI->getDebugLoc())
2183 report("Missing DebugLoc for debug instruction", MI);
2184
2185 // Meta instructions should never be the subject of debug value tracking,
2186 // they don't create a value in the output program at all.
2187 if (MI->isMetaInstruction() && MI->peekDebugInstrNum())
2188 report("Metadata instruction should not have a value tracking number", MI);
2189
2190 // Check the MachineMemOperands for basic consistency.
2191 for (MachineMemOperand *Op : MI->memoperands()) {
2192 if (Op->isLoad() && !MI->mayLoad())
2193 report("Missing mayLoad flag", MI);
2194 if (Op->isStore() && !MI->mayStore())
2195 report("Missing mayStore flag", MI);
2196 }
2197
2198 // Debug values must not have a slot index.
2199 // Other instructions must have one, unless they are inside a bundle.
2200 if (LiveInts) {
2201 bool mapped = !LiveInts->isNotInMIMap(*MI);
2202 if (MI->isDebugOrPseudoInstr()) {
2203 if (mapped)
2204 report("Debug instruction has a slot index", MI);
2205 } else if (MI->isInsideBundle()) {
2206 if (mapped)
2207 report("Instruction inside bundle has a slot index", MI);
2208 } else {
2209 if (!mapped)
2210 report("Missing slot index", MI);
2211 }
2212 }
2213
2214 unsigned Opc = MCID.getOpcode();
2216 verifyPreISelGenericInstruction(MI);
2217 return;
2218 }
2219
2221 if (!TII->verifyInstruction(*MI, ErrorInfo))
2222 report(ErrorInfo.data(), MI);
2223
2224 // Verify properties of various specific instruction types
2225 switch (MI->getOpcode()) {
2226 case TargetOpcode::COPY: {
2227 const MachineOperand &DstOp = MI->getOperand(0);
2228 const MachineOperand &SrcOp = MI->getOperand(1);
2229 const Register SrcReg = SrcOp.getReg();
2230 const Register DstReg = DstOp.getReg();
2231
2232 LLT DstTy = MRI->getType(DstReg);
2233 LLT SrcTy = MRI->getType(SrcReg);
2234 if (SrcTy.isValid() && DstTy.isValid()) {
2235 // If both types are valid, check that the types are the same.
2236 if (SrcTy != DstTy) {
2237 report("Copy Instruction is illegal with mismatching types", MI);
2238 errs() << "Def = " << DstTy << ", Src = " << SrcTy << "\n";
2239 }
2240
2241 break;
2242 }
2243
2244 if (!SrcTy.isValid() && !DstTy.isValid())
2245 break;
2246
2247 // If we have only one valid type, this is likely a copy between a virtual
2248 // and physical register.
2249 TypeSize SrcSize = TRI->getRegSizeInBits(SrcReg, *MRI);
2250 TypeSize DstSize = TRI->getRegSizeInBits(DstReg, *MRI);
2251 if (SrcReg.isPhysical() && DstTy.isValid()) {
2252 const TargetRegisterClass *SrcRC =
2253 TRI->getMinimalPhysRegClassLLT(SrcReg, DstTy);
2254 if (SrcRC)
2255 SrcSize = TRI->getRegSizeInBits(*SrcRC);
2256 }
2257
2258 if (DstReg.isPhysical() && SrcTy.isValid()) {
2259 const TargetRegisterClass *DstRC =
2260 TRI->getMinimalPhysRegClassLLT(DstReg, SrcTy);
2261 if (DstRC)
2262 DstSize = TRI->getRegSizeInBits(*DstRC);
2263 }
2264
2265 // The next two checks allow COPY between physical and virtual registers,
2266 // when the virtual register has a scalable size and the physical register
2267 // has a fixed size. These checks allow COPY between *potentialy* mismatched
2268 // sizes. However, once RegisterBankSelection occurs, MachineVerifier should
2269 // be able to resolve a fixed size for the scalable vector, and at that
2270 // point this function will know for sure whether the sizes are mismatched
2271 // and correctly report a size mismatch.
2272 if (SrcReg.isPhysical() && DstReg.isVirtual() && DstSize.isScalable() &&
2273 !SrcSize.isScalable())
2274 break;
2275 if (SrcReg.isVirtual() && DstReg.isPhysical() && SrcSize.isScalable() &&
2276 !DstSize.isScalable())
2277 break;
2278
2279 if (SrcSize.isNonZero() && DstSize.isNonZero() && SrcSize != DstSize) {
2280 if (!DstOp.getSubReg() && !SrcOp.getSubReg()) {
2281 report("Copy Instruction is illegal with mismatching sizes", MI);
2282 errs() << "Def Size = " << DstSize << ", Src Size = " << SrcSize
2283 << "\n";
2284 }
2285 }
2286 break;
2287 }
2288 case TargetOpcode::STATEPOINT: {
2289 StatepointOpers SO(MI);
2290 if (!MI->getOperand(SO.getIDPos()).isImm() ||
2291 !MI->getOperand(SO.getNBytesPos()).isImm() ||
2292 !MI->getOperand(SO.getNCallArgsPos()).isImm()) {
2293 report("meta operands to STATEPOINT not constant!", MI);
2294 break;
2295 }
2296
2297 auto VerifyStackMapConstant = [&](unsigned Offset) {
2298 if (Offset >= MI->getNumOperands()) {
2299 report("stack map constant to STATEPOINT is out of range!", MI);
2300 return;
2301 }
2302 if (!MI->getOperand(Offset - 1).isImm() ||
2303 MI->getOperand(Offset - 1).getImm() != StackMaps::ConstantOp ||
2304 !MI->getOperand(Offset).isImm())
2305 report("stack map constant to STATEPOINT not well formed!", MI);
2306 };
2307 VerifyStackMapConstant(SO.getCCIdx());
2308 VerifyStackMapConstant(SO.getFlagsIdx());
2309 VerifyStackMapConstant(SO.getNumDeoptArgsIdx());
2310 VerifyStackMapConstant(SO.getNumGCPtrIdx());
2311 VerifyStackMapConstant(SO.getNumAllocaIdx());
2312 VerifyStackMapConstant(SO.getNumGcMapEntriesIdx());
2313
2314 // Verify that all explicit statepoint defs are tied to gc operands as
2315 // they are expected to be a relocation of gc operands.
2316 unsigned FirstGCPtrIdx = SO.getFirstGCPtrIdx();
2317 unsigned LastGCPtrIdx = SO.getNumAllocaIdx() - 2;
2318 for (unsigned Idx = 0; Idx < MI->getNumDefs(); Idx++) {
2319 unsigned UseOpIdx;
2320 if (!MI->isRegTiedToUseOperand(Idx, &UseOpIdx)) {
2321 report("STATEPOINT defs expected to be tied", MI);
2322 break;
2323 }
2324 if (UseOpIdx < FirstGCPtrIdx || UseOpIdx > LastGCPtrIdx) {
2325 report("STATEPOINT def tied to non-gc operand", MI);
2326 break;
2327 }
2328 }
2329
2330 // TODO: verify we have properly encoded deopt arguments
2331 } break;
2332 case TargetOpcode::INSERT_SUBREG: {
2333 unsigned InsertedSize;
2334 if (unsigned SubIdx = MI->getOperand(2).getSubReg())
2335 InsertedSize = TRI->getSubRegIdxSize(SubIdx);
2336 else
2337 InsertedSize = TRI->getRegSizeInBits(MI->getOperand(2).getReg(), *MRI);
2338 unsigned SubRegSize = TRI->getSubRegIdxSize(MI->getOperand(3).getImm());
2339 if (SubRegSize < InsertedSize) {
2340 report("INSERT_SUBREG expected inserted value to have equal or lesser "
2341 "size than the subreg it was inserted into", MI);
2342 break;
2343 }
2344 } break;
2345 case TargetOpcode::REG_SEQUENCE: {
2346 unsigned NumOps = MI->getNumOperands();
2347 if (!(NumOps & 1)) {
2348 report("Invalid number of operands for REG_SEQUENCE", MI);
2349 break;
2350 }
2351
2352 for (unsigned I = 1; I != NumOps; I += 2) {
2353 const MachineOperand &RegOp = MI->getOperand(I);
2354 const MachineOperand &SubRegOp = MI->getOperand(I + 1);
2355
2356 if (!RegOp.isReg())
2357 report("Invalid register operand for REG_SEQUENCE", &RegOp, I);
2358
2359 if (!SubRegOp.isImm() || SubRegOp.getImm() == 0 ||
2360 SubRegOp.getImm() >= TRI->getNumSubRegIndices()) {
2361 report("Invalid subregister index operand for REG_SEQUENCE",
2362 &SubRegOp, I + 1);
2363 }
2364 }
2365
2366 Register DstReg = MI->getOperand(0).getReg();
2367 if (DstReg.isPhysical())
2368 report("REG_SEQUENCE does not support physical register results", MI);
2369
2370 if (MI->getOperand(0).getSubReg())
2371 report("Invalid subreg result for REG_SEQUENCE", MI);
2372
2373 break;
2374 }
2375 }
2376}
2377
2378void
2379MachineVerifier::visitMachineOperand(const MachineOperand *MO, unsigned MONum) {
2380 const MachineInstr *MI = MO->getParent();
2381 const MCInstrDesc &MCID = MI->getDesc();
2382 unsigned NumDefs = MCID.getNumDefs();
2383 if (MCID.getOpcode() == TargetOpcode::PATCHPOINT)
2384 NumDefs = (MONum == 0 && MO->isReg()) ? NumDefs : 0;
2385
2386 // The first MCID.NumDefs operands must be explicit register defines
2387 if (MONum < NumDefs) {
2388 const MCOperandInfo &MCOI = MCID.operands()[MONum];
2389 if (!MO->isReg())
2390 report("Explicit definition must be a register", MO, MONum);
2391 else if (!MO->isDef() && !MCOI.isOptionalDef())
2392 report("Explicit definition marked as use", MO, MONum);
2393 else if (MO->isImplicit())
2394 report("Explicit definition marked as implicit", MO, MONum);
2395 } else if (MONum < MCID.getNumOperands()) {
2396 const MCOperandInfo &MCOI = MCID.operands()[MONum];
2397 // Don't check if it's the last operand in a variadic instruction. See,
2398 // e.g., LDM_RET in the arm back end. Check non-variadic operands only.
2399 bool IsOptional = MI->isVariadic() && MONum == MCID.getNumOperands() - 1;
2400 if (!IsOptional) {
2401 if (MO->isReg()) {
2402 if (MO->isDef() && !MCOI.isOptionalDef() && !MCID.variadicOpsAreDefs())
2403 report("Explicit operand marked as def", MO, MONum);
2404 if (MO->isImplicit())
2405 report("Explicit operand marked as implicit", MO, MONum);
2406 }
2407
2408 // Check that an instruction has register operands only as expected.
2409 if (MCOI.OperandType == MCOI::OPERAND_REGISTER &&
2410 !MO->isReg() && !MO->isFI())
2411 report("Expected a register operand.", MO, MONum);
2412 if (MO->isReg()) {
2415 !TII->isPCRelRegisterOperandLegal(*MO)))
2416 report("Expected a non-register operand.", MO, MONum);
2417 }
2418 }
2419
2420 int TiedTo = MCID.getOperandConstraint(MONum, MCOI::TIED_TO);
2421 if (TiedTo != -1) {
2422 if (!MO->isReg())
2423 report("Tied use must be a register", MO, MONum);
2424 else if (!MO->isTied())
2425 report("Operand should be tied", MO, MONum);
2426 else if (unsigned(TiedTo) != MI->findTiedOperandIdx(MONum))
2427 report("Tied def doesn't match MCInstrDesc", MO, MONum);
2428 else if (MO->getReg().isPhysical()) {
2429 const MachineOperand &MOTied = MI->getOperand(TiedTo);
2430 if (!MOTied.isReg())
2431 report("Tied counterpart must be a register", &MOTied, TiedTo);
2432 else if (MOTied.getReg().isPhysical() &&
2433 MO->getReg() != MOTied.getReg())
2434 report("Tied physical registers must match.", &MOTied, TiedTo);
2435 }
2436 } else if (MO->isReg() && MO->isTied())
2437 report("Explicit operand should not be tied", MO, MONum);
2438 } else if (!MI->isVariadic()) {
2439 // ARM adds %reg0 operands to indicate predicates. We'll allow that.
2440 if (!MO->isValidExcessOperand())
2441 report("Extra explicit operand on non-variadic instruction", MO, MONum);
2442 }
2443
2444 switch (MO->getType()) {
2446 // Verify debug flag on debug instructions. Check this first because reg0
2447 // indicates an undefined debug value.
2448 if (MI->isDebugInstr() && MO->isUse()) {
2449 if (!MO->isDebug())
2450 report("Register operand must be marked debug", MO, MONum);
2451 } else if (MO->isDebug()) {
2452 report("Register operand must not be marked debug", MO, MONum);
2453 }
2454
2455 const Register Reg = MO->getReg();
2456 if (!Reg)
2457 return;
2458 if (MRI->tracksLiveness() && !MI->isDebugInstr())
2459 checkLiveness(MO, MONum);
2460
2461 if (MO->isDef() && MO->isUndef() && !MO->getSubReg() &&
2462 MO->getReg().isVirtual()) // TODO: Apply to physregs too
2463 report("Undef virtual register def operands require a subregister", MO, MONum);
2464
2465 // Verify the consistency of tied operands.
2466 if (MO->isTied()) {
2467 unsigned OtherIdx = MI->findTiedOperandIdx(MONum);
2468 const MachineOperand &OtherMO = MI->getOperand(OtherIdx);
2469 if (!OtherMO.isReg())
2470 report("Must be tied to a register", MO, MONum);
2471 if (!OtherMO.isTied())
2472 report("Missing tie flags on tied operand", MO, MONum);
2473 if (MI->findTiedOperandIdx(OtherIdx) != MONum)
2474 report("Inconsistent tie links", MO, MONum);
2475 if (MONum < MCID.getNumDefs()) {
2476 if (OtherIdx < MCID.getNumOperands()) {
2477 if (-1 == MCID.getOperandConstraint(OtherIdx, MCOI::TIED_TO))
2478 report("Explicit def tied to explicit use without tie constraint",
2479 MO, MONum);
2480 } else {
2481 if (!OtherMO.isImplicit())
2482 report("Explicit def should be tied to implicit use", MO, MONum);
2483 }
2484 }
2485 }
2486
2487 // Verify two-address constraints after the twoaddressinstruction pass.
2488 // Both twoaddressinstruction pass and phi-node-elimination pass call
2489 // MRI->leaveSSA() to set MF as not IsSSA, we should do the verification
2490 // after twoaddressinstruction pass not after phi-node-elimination pass. So
2491 // we shouldn't use the IsSSA as the condition, we should based on
2492 // TiedOpsRewritten property to verify two-address constraints, this
2493 // property will be set in twoaddressinstruction pass.
2494 unsigned DefIdx;
2495 if (MF->getProperties().hasProperty(
2497 MO->isUse() && MI->isRegTiedToDefOperand(MONum, &DefIdx) &&
2498 Reg != MI->getOperand(DefIdx).getReg())
2499 report("Two-address instruction operands must be identical", MO, MONum);
2500
2501 // Check register classes.
2502 unsigned SubIdx = MO->getSubReg();
2503
2504 if (Reg.isPhysical()) {
2505 if (SubIdx) {
2506 report("Illegal subregister index for physical register", MO, MONum);
2507 return;
2508 }
2509 if (MONum < MCID.getNumOperands()) {
2510 if (const TargetRegisterClass *DRC =
2511 TII->getRegClass(MCID, MONum, TRI, *MF)) {
2512 if (!DRC->contains(Reg)) {
2513 report("Illegal physical register for instruction", MO, MONum);
2514 errs() << printReg(Reg, TRI) << " is not a "
2515 << TRI->getRegClassName(DRC) << " register.\n";
2516 }
2517 }
2518 }
2519 if (MO->isRenamable()) {
2520 if (MRI->isReserved(Reg)) {
2521 report("isRenamable set on reserved register", MO, MONum);
2522 return;
2523 }
2524 }
2525 } else {
2526 // Virtual register.
2527 const TargetRegisterClass *RC = MRI->getRegClassOrNull(Reg);
2528 if (!RC) {
2529 // This is a generic virtual register.
2530
2531 // Do not allow undef uses for generic virtual registers. This ensures
2532 // getVRegDef can never fail and return null on a generic register.
2533 //
2534 // FIXME: This restriction should probably be broadened to all SSA
2535 // MIR. However, DetectDeadLanes/ProcessImplicitDefs technically still
2536 // run on the SSA function just before phi elimination.
2537 if (MO->isUndef())
2538 report("Generic virtual register use cannot be undef", MO, MONum);
2539
2540 // Debug value instruction is permitted to use undefined vregs.
2541 // This is a performance measure to skip the overhead of immediately
2542 // pruning unused debug operands. The final undef substitution occurs
2543 // when debug values are allocated in LDVImpl::handleDebugValue, so
2544 // these verifications always apply after this pass.
2545 if (isFunctionTracksDebugUserValues || !MO->isUse() ||
2546 !MI->isDebugValue() || !MRI->def_empty(Reg)) {
2547 // If we're post-Select, we can't have gvregs anymore.
2548 if (isFunctionSelected) {
2549 report("Generic virtual register invalid in a Selected function",
2550 MO, MONum);
2551 return;
2552 }
2553
2554 // The gvreg must have a type and it must not have a SubIdx.
2555 LLT Ty = MRI->getType(Reg);
2556 if (!Ty.isValid()) {
2557 report("Generic virtual register must have a valid type", MO,
2558 MONum);
2559 return;
2560 }
2561
2562 const RegisterBank *RegBank = MRI->getRegBankOrNull(Reg);
2563 const RegisterBankInfo *RBI = MF->getSubtarget().getRegBankInfo();
2564
2565 // If we're post-RegBankSelect, the gvreg must have a bank.
2566 if (!RegBank && isFunctionRegBankSelected) {
2567 report("Generic virtual register must have a bank in a "
2568 "RegBankSelected function",
2569 MO, MONum);
2570 return;
2571 }
2572
2573 // Make sure the register fits into its register bank if any.
2574 if (RegBank && Ty.isValid() && !Ty.isScalableVector() &&
2575 RBI->getMaximumSize(RegBank->getID()) < Ty.getSizeInBits()) {
2576 report("Register bank is too small for virtual register", MO,
2577 MONum);
2578 errs() << "Register bank " << RegBank->getName() << " too small("
2579 << RBI->getMaximumSize(RegBank->getID()) << ") to fit "
2580 << Ty.getSizeInBits() << "-bits\n";
2581 return;
2582 }
2583 }
2584
2585 if (SubIdx) {
2586 report("Generic virtual register does not allow subregister index", MO,
2587 MONum);
2588 return;
2589 }
2590
2591 // If this is a target specific instruction and this operand
2592 // has register class constraint, the virtual register must
2593 // comply to it.
2594 if (!isPreISelGenericOpcode(MCID.getOpcode()) &&
2595 MONum < MCID.getNumOperands() &&
2596 TII->getRegClass(MCID, MONum, TRI, *MF)) {
2597 report("Virtual register does not match instruction constraint", MO,
2598 MONum);
2599 errs() << "Expect register class "
2600 << TRI->getRegClassName(
2601 TII->getRegClass(MCID, MONum, TRI, *MF))
2602 << " but got nothing\n";
2603 return;
2604 }
2605
2606 break;
2607 }
2608 if (SubIdx) {
2609 const TargetRegisterClass *SRC =
2610 TRI->getSubClassWithSubReg(RC, SubIdx);
2611 if (!SRC) {
2612 report("Invalid subregister index for virtual register", MO, MONum);
2613 errs() << "Register class " << TRI->getRegClassName(RC)
2614 << " does not support subreg index " << SubIdx << "\n";
2615 return;
2616 }
2617 if (RC != SRC) {
2618 report("Invalid register class for subregister index", MO, MONum);
2619 errs() << "Register class " << TRI->getRegClassName(RC)
2620 << " does not fully support subreg index " << SubIdx << "\n";
2621 return;
2622 }
2623 }
2624 if (MONum < MCID.getNumOperands()) {
2625 if (const TargetRegisterClass *DRC =
2626 TII->getRegClass(MCID, MONum, TRI, *MF)) {
2627 if (SubIdx) {
2628 const TargetRegisterClass *SuperRC =
2629 TRI->getLargestLegalSuperClass(RC, *MF);
2630 if (!SuperRC) {
2631 report("No largest legal super class exists.", MO, MONum);
2632 return;
2633 }
2634 DRC = TRI->getMatchingSuperRegClass(SuperRC, DRC, SubIdx);
2635 if (!DRC) {
2636 report("No matching super-reg register class.", MO, MONum);
2637 return;
2638 }
2639 }
2640 if (!RC->hasSuperClassEq(DRC)) {
2641 report("Illegal virtual register for instruction", MO, MONum);
2642 errs() << "Expected a " << TRI->getRegClassName(DRC)
2643 << " register, but got a " << TRI->getRegClassName(RC)
2644 << " register\n";
2645 }
2646 }
2647 }
2648 }
2649 break;
2650 }
2651
2653 regMasks.push_back(MO->getRegMask());
2654 break;
2655
2657 if (MI->isPHI() && !MO->getMBB()->isSuccessor(MI->getParent()))
2658 report("PHI operand is not in the CFG", MO, MONum);
2659 break;
2660
2662 if (LiveStks && LiveStks->hasInterval(MO->getIndex()) &&
2663 LiveInts && !LiveInts->isNotInMIMap(*MI)) {
2664 int FI = MO->getIndex();
2665 LiveInterval &LI = LiveStks->getInterval(FI);
2666 SlotIndex Idx = LiveInts->getInstructionIndex(*MI);
2667
2668 bool stores = MI->mayStore();
2669 bool loads = MI->mayLoad();
2670 // For a memory-to-memory move, we need to check if the frame
2671 // index is used for storing or loading, by inspecting the
2672 // memory operands.
2673 if (stores && loads) {
2674 for (auto *MMO : MI->memoperands()) {
2675 const PseudoSourceValue *PSV = MMO->getPseudoValue();
2676 if (PSV == nullptr) continue;
2678 dyn_cast<FixedStackPseudoSourceValue>(PSV);
2679 if (Value == nullptr) continue;
2680 if (Value->getFrameIndex() != FI) continue;
2681
2682 if (MMO->isStore())
2683 loads = false;
2684 else
2685 stores = false;
2686 break;
2687 }
2688 if (loads == stores)
2689 report("Missing fixed stack memoperand.", MI);
2690 }
2691 if (loads && !LI.liveAt(Idx.getRegSlot(true))) {
2692 report("Instruction loads from dead spill slot", MO, MONum);
2693 errs() << "Live stack: " << LI << '\n';
2694 }
2695 if (stores && !LI.liveAt(Idx.getRegSlot())) {
2696 report("Instruction stores to dead spill slot", MO, MONum);
2697 errs() << "Live stack: " << LI << '\n';
2698 }
2699 }
2700 break;
2701
2703 if (MO->getCFIIndex() >= MF->getFrameInstructions().size())
2704 report("CFI instruction has invalid index", MO, MONum);
2705 break;
2706
2707 default:
2708 break;
2709 }
2710}
2711
2712void MachineVerifier::checkLivenessAtUse(const MachineOperand *MO,
2713 unsigned MONum, SlotIndex UseIdx,
2714 const LiveRange &LR,
2715 Register VRegOrUnit,
2716 LaneBitmask LaneMask) {
2717 const MachineInstr *MI = MO->getParent();
2718 LiveQueryResult LRQ = LR.Query(UseIdx);
2719 bool HasValue = LRQ.valueIn() || (MI->isPHI() && LRQ.valueOut());
2720 // Check if we have a segment at the use, note however that we only need one
2721 // live subregister range, the others may be dead.
2722 if (!HasValue && LaneMask.none()) {
2723 report("No live segment at use", MO, MONum);
2724 report_context_liverange(LR);
2725 report_context_vreg_regunit(VRegOrUnit);
2726 report_context(UseIdx);
2727 }
2728 if (MO->isKill() && !LRQ.isKill()) {
2729 report("Live range continues after kill flag", MO, MONum);
2730 report_context_liverange(LR);
2731 report_context_vreg_regunit(VRegOrUnit);
2732 if (LaneMask.any())
2733 report_context_lanemask(LaneMask);
2734 report_context(UseIdx);
2735 }
2736}
2737
2738void MachineVerifier::checkLivenessAtDef(const MachineOperand *MO,
2739 unsigned MONum, SlotIndex DefIdx,
2740 const LiveRange &LR,
2741 Register VRegOrUnit,
2742 bool SubRangeCheck,
2743 LaneBitmask LaneMask) {
2744 if (const VNInfo *VNI = LR.getVNInfoAt(DefIdx)) {
2745 // The LR can correspond to the whole reg and its def slot is not obliged
2746 // to be the same as the MO' def slot. E.g. when we check here "normal"
2747 // subreg MO but there is other EC subreg MO in the same instruction so the
2748 // whole reg has EC def slot and differs from the currently checked MO' def
2749 // slot. For example:
2750 // %0 [16e,32r:0) 0@16e L..3 [16e,32r:0) 0@16e L..C [16r,32r:0) 0@16r
2751 // Check that there is an early-clobber def of the same superregister
2752 // somewhere is performed in visitMachineFunctionAfter()
2753 if (((SubRangeCheck || MO->getSubReg() == 0) && VNI->def != DefIdx) ||
2754 !SlotIndex::isSameInstr(VNI->def, DefIdx) ||
2755 (VNI->def != DefIdx &&
2756 (!VNI->def.isEarlyClobber() || !DefIdx.isRegister()))) {
2757 report("Inconsistent valno->def", MO, MONum);
2758 report_context_liverange(LR);
2759 report_context_vreg_regunit(VRegOrUnit);
2760 if (LaneMask.any())
2761 report_context_lanemask(LaneMask);
2762 report_context(*VNI);
2763 report_context(DefIdx);
2764 }
2765 } else {
2766 report("No live segment at def", MO, MONum);
2767 report_context_liverange(LR);
2768 report_context_vreg_regunit(VRegOrUnit);
2769 if (LaneMask.any())
2770 report_context_lanemask(LaneMask);
2771 report_context(DefIdx);
2772 }
2773 // Check that, if the dead def flag is present, LiveInts agree.
2774 if (MO->isDead()) {
2775 LiveQueryResult LRQ = LR.Query(DefIdx);
2776 if (!LRQ.isDeadDef()) {
2777 assert(VRegOrUnit.isVirtual() && "Expecting a virtual register.");
2778 // A dead subreg def only tells us that the specific subreg is dead. There
2779 // could be other non-dead defs of other subregs, or we could have other
2780 // parts of the register being live through the instruction. So unless we
2781 // are checking liveness for a subrange it is ok for the live range to
2782 // continue, given that we have a dead def of a subregister.
2783 if (SubRangeCheck || MO->getSubReg() == 0) {
2784 report("Live range continues after dead def flag", MO, MONum);
2785 report_context_liverange(LR);
2786 report_context_vreg_regunit(VRegOrUnit);
2787 if (LaneMask.any())
2788 report_context_lanemask(LaneMask);
2789 }
2790 }
2791 }
2792}
2793
2794void MachineVerifier::checkLiveness(const MachineOperand *MO, unsigned MONum) {
2795 const MachineInstr *MI = MO->getParent();
2796 const Register Reg = MO->getReg();
2797 const unsigned SubRegIdx = MO->getSubReg();
2798
2799 const LiveInterval *LI = nullptr;
2800 if (LiveInts && Reg.isVirtual()) {
2801 if (LiveInts->hasInterval(Reg)) {
2802 LI = &LiveInts->getInterval(Reg);
2803 if (SubRegIdx != 0 && (MO->isDef() || !MO->isUndef()) && !LI->empty() &&
2804 !LI->hasSubRanges() && MRI->shouldTrackSubRegLiveness(Reg))
2805 report("Live interval for subreg operand has no subranges", MO, MONum);
2806 } else {
2807 report("Virtual register has no live interval", MO, MONum);
2808 }
2809 }
2810
2811 // Both use and def operands can read a register.
2812 if (MO->readsReg()) {
2813 if (MO->isKill())
2814 addRegWithSubRegs(regsKilled, Reg);
2815
2816 // Check that LiveVars knows this kill (unless we are inside a bundle, in
2817 // which case we have already checked that LiveVars knows any kills on the
2818 // bundle header instead).
2819 if (LiveVars && Reg.isVirtual() && MO->isKill() &&
2820 !MI->isBundledWithPred()) {
2821 LiveVariables::VarInfo &VI = LiveVars->getVarInfo(Reg);
2822 if (!is_contained(VI.Kills, MI))
2823 report("Kill missing from LiveVariables", MO, MONum);
2824 }
2825
2826 // Check LiveInts liveness and kill.
2827 if (LiveInts && !LiveInts->isNotInMIMap(*MI)) {
2828 SlotIndex UseIdx;
2829 if (MI->isPHI()) {
2830 // PHI use occurs on the edge, so check for live out here instead.
2831 UseIdx = LiveInts->getMBBEndIdx(
2832 MI->getOperand(MONum + 1).getMBB()).getPrevSlot();
2833 } else {
2834 UseIdx = LiveInts->getInstructionIndex(*MI);
2835 }
2836 // Check the cached regunit intervals.
2837 if (Reg.isPhysical() && !isReserved(Reg)) {
2838 for (MCRegUnit Unit : TRI->regunits(Reg.asMCReg())) {
2839 if (MRI->isReservedRegUnit(Unit))
2840 continue;
2841 if (const LiveRange *LR = LiveInts->getCachedRegUnit(Unit))
2842 checkLivenessAtUse(MO, MONum, UseIdx, *LR, Unit);
2843 }
2844 }
2845
2846 if (Reg.isVirtual()) {
2847 // This is a virtual register interval.
2848 checkLivenessAtUse(MO, MONum, UseIdx, *LI, Reg);
2849
2850 if (LI->hasSubRanges() && !MO->isDef()) {
2851 LaneBitmask MOMask = SubRegIdx != 0
2852 ? TRI->getSubRegIndexLaneMask(SubRegIdx)
2853 : MRI->getMaxLaneMaskForVReg(Reg);
2854 LaneBitmask LiveInMask;
2855 for (const LiveInterval::SubRange &SR : LI->subranges()) {
2856 if ((MOMask & SR.LaneMask).none())
2857 continue;
2858 checkLivenessAtUse(MO, MONum, UseIdx, SR, Reg, SR.LaneMask);
2859 LiveQueryResult LRQ = SR.Query(UseIdx);
2860 if (LRQ.valueIn() || (MI->isPHI() && LRQ.valueOut()))
2861 LiveInMask |= SR.LaneMask;
2862 }
2863 // At least parts of the register has to be live at the use.
2864 if ((LiveInMask & MOMask).none()) {
2865 report("No live subrange at use", MO, MONum);
2866 report_context(*LI);
2867 report_context(UseIdx);
2868 }
2869 // For PHIs all lanes should be live
2870 if (MI->isPHI() && LiveInMask != MOMask) {
2871 report("Not all lanes of PHI source live at use", MO, MONum);
2872 report_context(*LI);
2873 report_context(UseIdx);
2874 }
2875 }
2876 }
2877 }
2878
2879 // Use of a dead register.
2880 if (!regsLive.count(Reg)) {
2881 if (Reg.isPhysical()) {
2882 // Reserved registers may be used even when 'dead'.
2883 bool Bad = !isReserved(Reg);
2884 // We are fine if just any subregister has a defined value.
2885 if (Bad) {
2886
2887 for (const MCPhysReg &SubReg : TRI->subregs(Reg)) {
2888 if (regsLive.count(SubReg)) {
2889 Bad = false;
2890 break;
2891 }
2892 }
2893 }
2894 // If there is an additional implicit-use of a super register we stop
2895 // here. By definition we are fine if the super register is not
2896 // (completely) dead, if the complete super register is dead we will
2897 // get a report for its operand.
2898 if (Bad) {
2899 for (const MachineOperand &MOP : MI->uses()) {
2900 if (!MOP.isReg() || !MOP.isImplicit())
2901 continue;
2902
2903 if (!MOP.getReg().isPhysical())
2904 continue;
2905
2906 if (llvm::is_contained(TRI->subregs(MOP.getReg()), Reg))
2907 Bad = false;
2908 }
2909 }
2910 if (Bad)
2911 report("Using an undefined physical register", MO, MONum);
2912 } else if (MRI->def_empty(Reg)) {
2913 report("Reading virtual register without a def", MO, MONum);
2914 } else {
2915 BBInfo &MInfo = MBBInfoMap[MI->getParent()];
2916 // We don't know which virtual registers are live in, so only complain
2917 // if vreg was killed in this MBB. Otherwise keep track of vregs that
2918 // must be live in. PHI instructions are handled separately.
2919 if (MInfo.regsKilled.count(Reg))
2920 report("Using a killed virtual register", MO, MONum);
2921 else if (!MI->isPHI())
2922 MInfo.vregsLiveIn.insert(std::make_pair(Reg, MI));
2923 }
2924 }
2925 }
2926
2927 if (MO->isDef()) {
2928 // Register defined.
2929 // TODO: verify that earlyclobber ops are not used.
2930 if (MO->isDead())
2931 addRegWithSubRegs(regsDead, Reg);
2932 else
2933 addRegWithSubRegs(regsDefined, Reg);
2934
2935 // Verify SSA form.
2936 if (MRI->isSSA() && Reg.isVirtual() &&
2937 std::next(MRI->def_begin(Reg)) != MRI->def_end())
2938 report("Multiple virtual register defs in SSA form", MO, MONum);
2939
2940 // Check LiveInts for a live segment, but only for virtual registers.
2941 if (LiveInts && !LiveInts->isNotInMIMap(*MI)) {
2942 SlotIndex DefIdx = LiveInts->getInstructionIndex(*MI);
2943 DefIdx = DefIdx.getRegSlot(MO->isEarlyClobber());
2944
2945 if (Reg.isVirtual()) {
2946 checkLivenessAtDef(MO, MONum, DefIdx, *LI, Reg);
2947
2948 if (LI->hasSubRanges()) {
2949 LaneBitmask MOMask = SubRegIdx != 0
2950 ? TRI->getSubRegIndexLaneMask(SubRegIdx)
2951 : MRI->getMaxLaneMaskForVReg(Reg);
2952 for (const LiveInterval::SubRange &SR : LI->subranges()) {
2953 if ((SR.LaneMask & MOMask).none())
2954 continue;
2955 checkLivenessAtDef(MO, MONum, DefIdx, SR, Reg, true, SR.LaneMask);
2956 }
2957 }
2958 }
2959 }
2960 }
2961}
2962
2963// This function gets called after visiting all instructions in a bundle. The
2964// argument points to the bundle header.
2965// Normal stand-alone instructions are also considered 'bundles', and this
2966// function is called for all of them.
2967void MachineVerifier::visitMachineBundleAfter(const MachineInstr *MI) {
2968 BBInfo &MInfo = MBBInfoMap[MI->getParent()];
2969 set_union(MInfo.regsKilled, regsKilled);
2970 set_subtract(regsLive, regsKilled); regsKilled.clear();
2971 // Kill any masked registers.
2972 while (!regMasks.empty()) {
2973 const uint32_t *Mask = regMasks.pop_back_val();
2974 for (Register Reg : regsLive)
2975 if (Reg.isPhysical() &&
2976 MachineOperand::clobbersPhysReg(Mask, Reg.asMCReg()))
2977 regsDead.push_back(Reg);
2978 }
2979 set_subtract(regsLive, regsDead); regsDead.clear();
2980 set_union(regsLive, regsDefined); regsDefined.clear();
2981}
2982
2983void
2984MachineVerifier::visitMachineBasicBlockAfter(const MachineBasicBlock *MBB) {
2985 MBBInfoMap[MBB].regsLiveOut = regsLive;
2986 regsLive.clear();
2987
2988 if (Indexes) {
2989 SlotIndex stop = Indexes->getMBBEndIdx(MBB);
2990 if (!(stop > lastIndex)) {
2991 report("Block ends before last instruction index", MBB);
2992 errs() << "Block ends at " << stop
2993 << " last instruction was at " << lastIndex << '\n';
2994 }
2995 lastIndex = stop;
2996 }
2997}
2998
2999namespace {
3000// This implements a set of registers that serves as a filter: can filter other
3001// sets by passing through elements not in the filter and blocking those that
3002// are. Any filter implicitly includes the full set of physical registers upon
3003// creation, thus filtering them all out. The filter itself as a set only grows,
3004// and needs to be as efficient as possible.
3005struct VRegFilter {
3006 // Add elements to the filter itself. \pre Input set \p FromRegSet must have
3007 // no duplicates. Both virtual and physical registers are fine.
3008 template <typename RegSetT> void add(const RegSetT &FromRegSet) {
3009 SmallVector<Register, 0> VRegsBuffer;
3010 filterAndAdd(FromRegSet, VRegsBuffer);
3011 }
3012 // Filter \p FromRegSet through the filter and append passed elements into \p
3013 // ToVRegs. All elements appended are then added to the filter itself.
3014 // \returns true if anything changed.
3015 template <typename RegSetT>
3016 bool filterAndAdd(const RegSetT &FromRegSet,
3017 SmallVectorImpl<Register> &ToVRegs) {
3018 unsigned SparseUniverse = Sparse.size();
3019 unsigned NewSparseUniverse = SparseUniverse;
3020 unsigned NewDenseSize = Dense.size();
3021 size_t Begin = ToVRegs.size();
3022 for (Register Reg : FromRegSet) {
3023 if (!Reg.isVirtual())
3024 continue;
3025 unsigned Index = Register::virtReg2Index(Reg);
3026 if (Index < SparseUniverseMax) {
3027 if (Index < SparseUniverse && Sparse.test(Index))
3028 continue;
3029 NewSparseUniverse = std::max(NewSparseUniverse, Index + 1);
3030 } else {
3031 if (Dense.count(Reg))
3032 continue;
3033 ++NewDenseSize;
3034 }
3035 ToVRegs.push_back(Reg);
3036 }
3037 size_t End = ToVRegs.size();
3038 if (Begin == End)
3039 return false;
3040 // Reserving space in sets once performs better than doing so continuously
3041 // and pays easily for double look-ups (even in Dense with SparseUniverseMax
3042 // tuned all the way down) and double iteration (the second one is over a
3043 // SmallVector, which is a lot cheaper compared to DenseSet or BitVector).
3044 Sparse.resize(NewSparseUniverse);
3045 Dense.reserve(NewDenseSize);
3046 for (unsigned I = Begin; I < End; ++I) {
3047 Register Reg = ToVRegs[I];
3048 unsigned Index = Register::virtReg2Index(Reg);
3049 if (Index < SparseUniverseMax)
3050 Sparse.set(Index);
3051 else
3052 Dense.insert(Reg);
3053 }
3054 return true;
3055 }
3056
3057private:
3058 static constexpr unsigned SparseUniverseMax = 10 * 1024 * 8;
3059 // VRegs indexed within SparseUniverseMax are tracked by Sparse, those beyound
3060 // are tracked by Dense. The only purpose of the threashold and the Dense set
3061 // is to have a reasonably growing memory usage in pathological cases (large
3062 // number of very sparse VRegFilter instances live at the same time). In
3063 // practice even in the worst-by-execution time cases having all elements
3064 // tracked by Sparse (very large SparseUniverseMax scenario) tends to be more
3065 // space efficient than if tracked by Dense. The threashold is set to keep the
3066 // worst-case memory usage within 2x of figures determined empirically for
3067 // "all Dense" scenario in such worst-by-execution-time cases.
3068 BitVector Sparse;
3070};
3071
3072// Implements both a transfer function and a (binary, in-place) join operator
3073// for a dataflow over register sets with set union join and filtering transfer
3074// (out_b = in_b \ filter_b). filter_b is expected to be set-up ahead of time.
3075// Maintains out_b as its state, allowing for O(n) iteration over it at any
3076// time, where n is the size of the set (as opposed to O(U) where U is the
3077// universe). filter_b implicitly contains all physical registers at all times.
3078class FilteringVRegSet {
3079 VRegFilter Filter;
3081
3082public:
3083 // Set-up the filter_b. \pre Input register set \p RS must have no duplicates.
3084 // Both virtual and physical registers are fine.
3085 template <typename RegSetT> void addToFilter(const RegSetT &RS) {
3086 Filter.add(RS);
3087 }
3088 // Passes \p RS through the filter_b (transfer function) and adds what's left
3089 // to itself (out_b).
3090 template <typename RegSetT> bool add(const RegSetT &RS) {
3091 // Double-duty the Filter: to maintain VRegs a set (and the join operation
3092 // a set union) just add everything being added here to the Filter as well.
3093 return Filter.filterAndAdd(RS, VRegs);
3094 }
3095 using const_iterator = decltype(VRegs)::const_iterator;
3096 const_iterator begin() const { return VRegs.begin(); }
3097 const_iterator end() const { return VRegs.end(); }
3098 size_t size() const { return VRegs.size(); }
3099};
3100} // namespace
3101
3102// Calculate the largest possible vregsPassed sets. These are the registers that
3103// can pass through an MBB live, but may not be live every time. It is assumed
3104// that all vregsPassed sets are empty before the call.
3105void MachineVerifier::calcRegsPassed() {
3106 if (MF->empty())
3107 // ReversePostOrderTraversal doesn't handle empty functions.
3108 return;
3109
3110 for (const MachineBasicBlock *MB :
3112 FilteringVRegSet VRegs;
3113 BBInfo &Info = MBBInfoMap[MB];
3114 assert(Info.reachable);
3115
3116 VRegs.addToFilter(Info.regsKilled);
3117 VRegs.addToFilter(Info.regsLiveOut);
3118 for (const MachineBasicBlock *Pred : MB->predecessors()) {
3119 const BBInfo &PredInfo = MBBInfoMap[Pred];
3120 if (!PredInfo.reachable)
3121 continue;
3122
3123 VRegs.add(PredInfo.regsLiveOut);
3124 VRegs.add(PredInfo.vregsPassed);
3125 }
3126 Info.vregsPassed.reserve(VRegs.size());
3127 Info.vregsPassed.insert(VRegs.begin(), VRegs.end());
3128 }
3129}
3130
3131// Calculate the set of virtual registers that must be passed through each basic
3132// block in order to satisfy the requirements of successor blocks. This is very
3133// similar to calcRegsPassed, only backwards.
3134void MachineVerifier::calcRegsRequired() {
3135 // First push live-in regs to predecessors' vregsRequired.
3137 for (const auto &MBB : *MF) {
3138 BBInfo &MInfo = MBBInfoMap[&MBB];
3139 for (const MachineBasicBlock *Pred : MBB.predecessors()) {
3140 BBInfo &PInfo = MBBInfoMap[Pred];
3141 if (PInfo.addRequired(MInfo.vregsLiveIn))
3142 todo.insert(Pred);
3143 }
3144
3145 // Handle the PHI node.
3146 for (const MachineInstr &MI : MBB.phis()) {
3147 for (unsigned i = 1, e = MI.getNumOperands(); i != e; i += 2) {
3148 // Skip those Operands which are undef regs or not regs.
3149 if (!MI.getOperand(i).isReg() || !MI.getOperand(i).readsReg())
3150 continue;
3151
3152 // Get register and predecessor for one PHI edge.
3153 Register Reg = MI.getOperand(i).getReg();
3154 const MachineBasicBlock *Pred = MI.getOperand(i + 1).getMBB();
3155
3156 BBInfo &PInfo = MBBInfoMap[Pred];
3157 if (PInfo.addRequired(Reg))
3158 todo.insert(Pred);
3159 }
3160 }
3161 }
3162
3163 // Iteratively push vregsRequired to predecessors. This will converge to the
3164 // same final state regardless of DenseSet iteration order.
3165 while (!todo.empty()) {
3166 const MachineBasicBlock *MBB = *todo.begin();
3167 todo.erase(MBB);
3168 BBInfo &MInfo = MBBInfoMap[MBB];
3169 for (const MachineBasicBlock *Pred : MBB->predecessors()) {
3170 if (Pred == MBB)
3171 continue;
3172 BBInfo &SInfo = MBBInfoMap[Pred];
3173 if (SInfo.addRequired(MInfo.vregsRequired))
3174 todo.insert(Pred);
3175 }
3176 }
3177}
3178
3179// Check PHI instructions at the beginning of MBB. It is assumed that
3180// calcRegsPassed has been run so BBInfo::isLiveOut is valid.
3181void MachineVerifier::checkPHIOps(const MachineBasicBlock &MBB) {
3182 BBInfo &MInfo = MBBInfoMap[&MBB];
3183
3185 for (const MachineInstr &Phi : MBB) {
3186 if (!Phi.isPHI())
3187 break;
3188 seen.clear();
3189
3190 const MachineOperand &MODef = Phi.getOperand(0);
3191 if (!MODef.isReg() || !MODef.isDef()) {
3192 report("Expected first PHI operand to be a register def", &MODef, 0);
3193 continue;
3194 }
3195 if (MODef.isTied() || MODef.isImplicit() || MODef.isInternalRead() ||
3196 MODef.isEarlyClobber() || MODef.isDebug())
3197 report("Unexpected flag on PHI operand", &MODef, 0);
3198 Register DefReg = MODef.getReg();
3199 if (!DefReg.isVirtual())
3200 report("Expected first PHI operand to be a virtual register", &MODef, 0);
3201
3202 for (unsigned I = 1, E = Phi.getNumOperands(); I != E; I += 2) {
3203 const MachineOperand &MO0 = Phi.getOperand(I);
3204 if (!MO0.isReg()) {
3205 report("Expected PHI operand to be a register", &MO0, I);
3206 continue;
3207 }
3208 if (MO0.isImplicit() || MO0.isInternalRead() || MO0.isEarlyClobber() ||
3209 MO0.isDebug() || MO0.isTied())
3210 report("Unexpected flag on PHI operand", &MO0, I);
3211
3212 const MachineOperand &MO1 = Phi.getOperand(I + 1);
3213 if (!MO1.isMBB()) {
3214 report("Expected PHI operand to be a basic block", &MO1, I + 1);
3215 continue;
3216 }
3217
3218 const MachineBasicBlock &Pre = *MO1.getMBB();
3219 if (!Pre.isSuccessor(&MBB)) {
3220 report("PHI input is not a predecessor block", &MO1, I + 1);
3221 continue;
3222 }
3223
3224 if (MInfo.reachable) {
3225 seen.insert(&Pre);
3226 BBInfo &PrInfo = MBBInfoMap[&Pre];
3227 if (!MO0.isUndef() && PrInfo.reachable &&
3228 !PrInfo.isLiveOut(MO0.getReg()))
3229 report("PHI operand is not live-out from predecessor", &MO0, I);
3230 }
3231 }
3232
3233 // Did we see all predecessors?
3234 if (MInfo.reachable) {
3235 for (MachineBasicBlock *Pred : MBB.predecessors()) {
3236 if (!seen.count(Pred)) {
3237 report("Missing PHI operand", &Phi);
3238 errs() << printMBBReference(*Pred)
3239 << " is a predecessor according to the CFG.\n";
3240 }
3241 }
3242 }
3243 }
3244}
3245
3246static void
3248 std::function<void(const Twine &Message)> FailureCB) {
3250 CV.initialize(&errs(), FailureCB, MF);
3251
3252 for (const auto &MBB : MF) {
3253 CV.visit(MBB);
3254 for (const auto &MI : MBB.instrs())
3255 CV.visit(MI);
3256 }
3257
3258 if (CV.sawTokens()) {
3259 DT.recalculate(const_cast<MachineFunction &>(MF));
3260 CV.verify(DT);
3261 }
3262}
3263
3264void MachineVerifier::visitMachineFunctionAfter() {
3265 auto FailureCB = [this](const Twine &Message) {
3266 report(Message.str().c_str(), MF);
3267 };
3268 verifyConvergenceControl(*MF, DT, FailureCB);
3269
3270 calcRegsPassed();
3271
3272 for (const MachineBasicBlock &MBB : *MF)
3273 checkPHIOps(MBB);
3274
3275 // Now check liveness info if available
3276 calcRegsRequired();
3277
3278 // Check for killed virtual registers that should be live out.
3279 for (const auto &MBB : *MF) {
3280 BBInfo &MInfo = MBBInfoMap[&MBB];
3281 for (Register VReg : MInfo.vregsRequired)
3282 if (MInfo.regsKilled.count(VReg)) {
3283 report("Virtual register killed in block, but needed live out.", &MBB);
3284 errs() << "Virtual register " << printReg(VReg)
3285 << " is used after the block.\n";
3286 }
3287 }
3288
3289 if (!MF->empty()) {
3290 BBInfo &MInfo = MBBInfoMap[&MF->front()];
3291 for (Register VReg : MInfo.vregsRequired) {
3292 report("Virtual register defs don't dominate all uses.", MF);
3293 report_context_vreg(VReg);
3294 }
3295 }
3296
3297 if (LiveVars)
3298 verifyLiveVariables();
3299 if (LiveInts)
3300 verifyLiveIntervals();
3301
3302 // Check live-in list of each MBB. If a register is live into MBB, check
3303 // that the register is in regsLiveOut of each predecessor block. Since
3304 // this must come from a definition in the predecesssor or its live-in
3305 // list, this will catch a live-through case where the predecessor does not
3306 // have the register in its live-in list. This currently only checks
3307 // registers that have no aliases, are not allocatable and are not
3308 // reserved, which could mean a condition code register for instance.
3309 if (MRI->tracksLiveness())
3310 for (const auto &MBB : *MF)
3312 MCPhysReg LiveInReg = P.PhysReg;
3313 bool hasAliases = MCRegAliasIterator(LiveInReg, TRI, false).isValid();
3314 if (hasAliases || isAllocatable(LiveInReg) || isReserved(LiveInReg))
3315 continue;
3316 for (const MachineBasicBlock *Pred : MBB.predecessors()) {
3317 BBInfo &PInfo = MBBInfoMap[Pred];
3318 if (!PInfo.regsLiveOut.count(LiveInReg)) {
3319 report("Live in register not found to be live out from predecessor.",
3320 &MBB);
3321 errs() << TRI->getName(LiveInReg)
3322 << " not found to be live out from "
3323 << printMBBReference(*Pred) << "\n";
3324 }
3325 }
3326 }
3327
3328 for (auto CSInfo : MF->getCallSitesInfo())
3329 if (!CSInfo.first->isCall())
3330 report("Call site info referencing instruction that is not call", MF);
3331
3332 // If there's debug-info, check that we don't have any duplicate value
3333 // tracking numbers.
3334 if (MF->getFunction().getSubprogram()) {
3335 DenseSet<unsigned> SeenNumbers;
3336 for (const auto &MBB : *MF) {
3337 for (const auto &MI : MBB) {
3338 if (auto Num = MI.peekDebugInstrNum()) {
3339 auto Result = SeenNumbers.insert((unsigned)Num);
3340 if (!Result.second)
3341 report("Instruction has a duplicated value tracking number", &MI);
3342 }
3343 }
3344 }
3345 }
3346}
3347
3348void MachineVerifier::verifyLiveVariables() {
3349 assert(LiveVars && "Don't call verifyLiveVariables without LiveVars");
3350 for (unsigned I = 0, E = MRI->getNumVirtRegs(); I != E; ++I) {
3352 LiveVariables::VarInfo &VI = LiveVars->getVarInfo(Reg);
3353 for (const auto &MBB : *MF) {
3354 BBInfo &MInfo = MBBInfoMap[&MBB];
3355
3356 // Our vregsRequired should be identical to LiveVariables' AliveBlocks
3357 if (MInfo.vregsRequired.count(Reg)) {
3358 if (!VI.AliveBlocks.test(MBB.getNumber())) {
3359 report("LiveVariables: Block missing from AliveBlocks", &MBB);
3360 errs() << "Virtual register " << printReg(Reg)
3361 << " must be live through the block.\n";
3362 }
3363 } else {
3364 if (VI.AliveBlocks.test(MBB.getNumber())) {
3365 report("LiveVariables: Block should not be in AliveBlocks", &MBB);
3366 errs() << "Virtual register " << printReg(Reg)
3367 << " is not needed live through the block.\n";
3368 }
3369 }
3370 }
3371 }
3372}
3373
3374void MachineVerifier::verifyLiveIntervals() {
3375 assert(LiveInts && "Don't call verifyLiveIntervals without LiveInts");
3376 for (unsigned I = 0, E = MRI->getNumVirtRegs(); I != E; ++I) {
3378
3379 // Spilling and splitting may leave unused registers around. Skip them.
3380 if (MRI->reg_nodbg_empty(Reg))
3381 continue;
3382
3383 if (!LiveInts->hasInterval(Reg)) {
3384 report("Missing live interval for virtual register", MF);
3385 errs() << printReg(Reg, TRI) << " still has defs or uses\n";
3386 continue;
3387 }
3388
3389 const LiveInterval &LI = LiveInts->getInterval(Reg);
3390 assert(Reg == LI.reg() && "Invalid reg to interval mapping");
3391 verifyLiveInterval(LI);
3392 }
3393
3394 // Verify all the cached regunit intervals.
3395 for (unsigned i = 0, e = TRI->getNumRegUnits(); i != e; ++i)
3396 if (const LiveRange *LR = LiveInts->getCachedRegUnit(i))
3397 verifyLiveRange(*LR, i);
3398}
3399
3400void MachineVerifier::verifyLiveRangeValue(const LiveRange &LR,
3401 const VNInfo *VNI, Register Reg,
3402 LaneBitmask LaneMask) {
3403 if (VNI->isUnused())
3404 return;
3405
3406 const VNInfo *DefVNI = LR.getVNInfoAt(VNI->def);
3407
3408 if (!DefVNI) {
3409 report("Value not live at VNInfo def and not marked unused", MF);
3410 report_context(LR, Reg, LaneMask);
3411 report_context(*VNI);
3412 return;
3413 }
3414
3415 if (DefVNI != VNI) {
3416 report("Live segment at def has different VNInfo", MF);
3417 report_context(LR, Reg, LaneMask);
3418 report_context(*VNI);
3419 return;
3420 }
3421
3422 const MachineBasicBlock *MBB = LiveInts->getMBBFromIndex(VNI->def);
3423 if (!MBB) {
3424 report("Invalid VNInfo definition index", MF);
3425 report_context(LR, Reg, LaneMask);
3426 report_context(*VNI);
3427 return;
3428 }
3429
3430 if (VNI->isPHIDef()) {
3431 if (VNI->def != LiveInts->getMBBStartIdx(MBB)) {
3432 report("PHIDef VNInfo is not defined at MBB start", MBB);
3433 report_context(LR, Reg, LaneMask);
3434 report_context(*VNI);
3435 }
3436 return;
3437 }
3438
3439 // Non-PHI def.
3440 const MachineInstr *MI = LiveInts->getInstructionFromIndex(VNI->def);
3441 if (!MI) {
3442 report("No instruction at VNInfo def index", MBB);
3443 report_context(LR, Reg, LaneMask);
3444 report_context(*VNI);
3445 return;
3446 }
3447
3448 if (Reg != 0) {
3449 bool hasDef = false;
3450 bool isEarlyClobber = false;
3451 for (ConstMIBundleOperands MOI(*MI); MOI.isValid(); ++MOI) {
3452 if (!MOI->isReg() || !MOI->isDef())
3453 continue;
3454 if (Reg.isVirtual()) {
3455 if (MOI->getReg() != Reg)
3456 continue;
3457 } else {
3458 if (!MOI->getReg().isPhysical() || !TRI->hasRegUnit(MOI->getReg(), Reg))
3459 continue;
3460 }
3461 if (LaneMask.any() &&
3462 (TRI->getSubRegIndexLaneMask(MOI->getSubReg()) & LaneMask).none())
3463 continue;
3464 hasDef = true;
3465 if (MOI->isEarlyClobber())
3466 isEarlyClobber = true;
3467 }
3468
3469 if (!hasDef) {
3470 report("Defining instruction does not modify register", MI);
3471 report_context(LR, Reg, LaneMask);
3472 report_context(*VNI);
3473 }
3474
3475 // Early clobber defs begin at USE slots, but other defs must begin at
3476 // DEF slots.
3477 if (isEarlyClobber) {
3478 if (!VNI->def.isEarlyClobber()) {
3479 report("Early clobber def must be at an early-clobber slot", MBB);
3480 report_context(LR, Reg, LaneMask);
3481 report_context(*VNI);
3482 }
3483 } else if (!VNI->def.isRegister()) {
3484 report("Non-PHI, non-early clobber def must be at a register slot", MBB);
3485 report_context(LR, Reg, LaneMask);
3486 report_context(*VNI);
3487 }
3488 }
3489}
3490
3491void MachineVerifier::verifyLiveRangeSegment(const LiveRange &LR,
3493 Register Reg,
3494 LaneBitmask LaneMask) {
3495 const LiveRange::Segment &S = *I;
3496 const VNInfo *VNI = S.valno;
3497 assert(VNI && "Live segment has no valno");
3498
3499 if (VNI->id >= LR.getNumValNums() || VNI != LR.getValNumInfo(VNI->id)) {
3500 report("Foreign valno in live segment", MF);
3501 report_context(LR, Reg, LaneMask);
3502 report_context(S);
3503 report_context(*VNI);
3504 }
3505
3506 if (VNI->isUnused()) {
3507 report("Live segment valno is marked unused", MF);
3508 report_context(LR, Reg, LaneMask);
3509 report_context(S);
3510 }
3511
3512 const MachineBasicBlock *MBB = LiveInts->getMBBFromIndex(S.start);
3513 if (!MBB) {
3514 report("Bad start of live segment, no basic block", MF);
3515 report_context(LR, Reg, LaneMask);
3516 report_context(S);
3517 return;
3518 }
3519 SlotIndex MBBStartIdx = LiveInts->getMBBStartIdx(MBB);
3520 if (S.start != MBBStartIdx && S.start != VNI->def) {
3521 report("Live segment must begin at MBB entry or valno def", MBB);
3522 report_context(LR, Reg, LaneMask);
3523 report_context(S);
3524 }
3525
3526 const MachineBasicBlock *EndMBB =
3527 LiveInts->getMBBFromIndex(S.end.getPrevSlot());
3528 if (!EndMBB) {
3529 report("Bad end of live segment, no basic block", MF);
3530 report_context(LR, Reg, LaneMask);
3531 report_context(S);
3532 return;
3533 }
3534
3535 // Checks for non-live-out segments.
3536 if (S.end != LiveInts->getMBBEndIdx(EndMBB)) {
3537 // RegUnit intervals are allowed dead phis.
3538 if (!Reg.isVirtual() && VNI->isPHIDef() && S.start == VNI->def &&
3539 S.end == VNI->def.getDeadSlot())
3540 return;
3541
3542 // The live segment is ending inside EndMBB
3543 const MachineInstr *MI =
3544 LiveInts->getInstructionFromIndex(S.end.getPrevSlot());
3545 if (!MI) {
3546 report("Live segment doesn't end at a valid instruction", EndMBB);
3547 report_context(LR, Reg, LaneMask);
3548 report_context(S);
3549 return;
3550 }
3551
3552 // The block slot must refer to a basic block boundary.
3553 if (S.end.isBlock()) {
3554 report("Live segment ends at B slot of an instruction", EndMBB);
3555 report_context(LR, Reg, LaneMask);
3556 report_context(S);
3557 }
3558
3559 if (S.end.isDead()) {
3560 // Segment ends on the dead slot.
3561 // That means there must be a dead def.
3562 if (!SlotIndex::isSameInstr(S.start, S.end)) {
3563 report("Live segment ending at dead slot spans instructions", EndMBB);
3564 report_context(LR, Reg, LaneMask);
3565 report_context(S);
3566 }
3567 }
3568
3569 // After tied operands are rewritten, a live segment can only end at an
3570 // early-clobber slot if it is being redefined by an early-clobber def.
3571 // TODO: Before tied operands are rewritten, a live segment can only end at
3572 // an early-clobber slot if the last use is tied to an early-clobber def.
3573 if (MF->getProperties().hasProperty(
3575 S.end.isEarlyClobber()) {
3576 if (I + 1 == LR.end() || (I + 1)->start != S.end) {
3577 report("Live segment ending at early clobber slot must be "
3578 "redefined by an EC def in the same instruction",
3579 EndMBB);
3580 report_context(LR, Reg, LaneMask);
3581 report_context(S);
3582 }
3583 }
3584
3585 // The following checks only apply to virtual registers. Physreg liveness
3586 // is too weird to check.
3587 if (Reg.isVirtual()) {
3588 // A live segment can end with either a redefinition, a kill flag on a
3589 // use, or a dead flag on a def.
3590 bool hasRead = false;
3591 bool hasSubRegDef = false;
3592 bool hasDeadDef = false;
3593 for (ConstMIBundleOperands MOI(*MI); MOI.isValid(); ++MOI) {
3594 if (!MOI->isReg() || MOI->getReg() != Reg)
3595 continue;
3596 unsigned Sub = MOI->getSubReg();
3597 LaneBitmask SLM =
3598 Sub != 0 ? TRI->getSubRegIndexLaneMask(Sub) : LaneBitmask::getAll();
3599 if (MOI->isDef()) {
3600 if (Sub != 0) {
3601 hasSubRegDef = true;
3602 // An operand %0:sub0 reads %0:sub1..n. Invert the lane
3603 // mask for subregister defs. Read-undef defs will be handled by
3604 // readsReg below.
3605 SLM = ~SLM;
3606 }
3607 if (MOI->isDead())
3608 hasDeadDef = true;
3609 }
3610 if (LaneMask.any() && (LaneMask & SLM).none())
3611 continue;
3612 if (MOI->readsReg())
3613 hasRead = true;
3614 }
3615 if (S.end.isDead()) {
3616 // Make sure that the corresponding machine operand for a "dead" live
3617 // range has the dead flag. We cannot perform this check for subregister
3618 // liveranges as partially dead values are allowed.
3619 if (LaneMask.none() && !hasDeadDef) {
3620 report(
3621 "Instruction ending live segment on dead slot has no dead flag",
3622 MI);
3623 report_context(LR, Reg, LaneMask);
3624 report_context(S);
3625 }
3626 } else {
3627 if (!hasRead) {
3628 // When tracking subregister liveness, the main range must start new
3629 // values on partial register writes, even if there is no read.
3630 if (!MRI->shouldTrackSubRegLiveness(Reg) || LaneMask.any() ||
3631 !hasSubRegDef) {
3632 report("Instruction ending live segment doesn't read the register",
3633 MI);
3634 report_context(LR, Reg, LaneMask);
3635 report_context(S);
3636 }
3637 }
3638 }
3639 }
3640 }
3641
3642 // Now check all the basic blocks in this live segment.
3644 // Is this live segment the beginning of a non-PHIDef VN?
3645 if (S.start == VNI->def && !VNI->isPHIDef()) {
3646 // Not live-in to any blocks.
3647 if (MBB == EndMBB)
3648 return;
3649 // Skip this block.
3650 ++MFI;
3651 }
3652
3654 if (LaneMask.any()) {
3655 LiveInterval &OwnerLI = LiveInts->getInterval(Reg);
3656 OwnerLI.computeSubRangeUndefs(Undefs, LaneMask, *MRI, *Indexes);
3657 }
3658
3659 while (true) {
3660 assert(LiveInts->isLiveInToMBB(LR, &*MFI));
3661 // We don't know how to track physregs into a landing pad.
3662 if (!Reg.isVirtual() && MFI->isEHPad()) {
3663 if (&*MFI == EndMBB)
3664 break;
3665 ++MFI;
3666 continue;
3667 }
3668
3669 // Is VNI a PHI-def in the current block?
3670 bool IsPHI = VNI->isPHIDef() &&
3671 VNI->def == LiveInts->getMBBStartIdx(&*MFI);
3672
3673 // Check that VNI is live-out of all predecessors.
3674 for (const MachineBasicBlock *Pred : MFI->predecessors()) {
3675 SlotIndex PEnd = LiveInts->getMBBEndIdx(Pred);
3676 // Predecessor of landing pad live-out on last call.
3677 if (MFI->isEHPad()) {
3678 for (const MachineInstr &MI : llvm::reverse(*Pred)) {
3679 if (MI.isCall()) {
3680 PEnd = Indexes->getInstructionIndex(MI).getBoundaryIndex();
3681 break;
3682 }
3683 }
3684 }
3685 const VNInfo *PVNI = LR.getVNInfoBefore(PEnd);
3686
3687 // All predecessors must have a live-out value. However for a phi
3688 // instruction with subregister intervals
3689 // only one of the subregisters (not necessarily the current one) needs to
3690 // be defined.
3691 if (!PVNI && (LaneMask.none() || !IsPHI)) {
3692 if (LiveRangeCalc::isJointlyDominated(Pred, Undefs, *Indexes))
3693 continue;
3694 report("Register not marked live out of predecessor", Pred);
3695 report_context(LR, Reg, LaneMask);
3696 report_context(*VNI);
3697 errs() << " live into " << printMBBReference(*MFI) << '@'
3698 << LiveInts->getMBBStartIdx(&*MFI) << ", not live before "
3699 << PEnd << '\n';
3700 continue;
3701 }
3702
3703 // Only PHI-defs can take different predecessor values.
3704 if (!IsPHI && PVNI != VNI) {
3705 report("Different value live out of predecessor", Pred);
3706 report_context(LR, Reg, LaneMask);
3707 errs() << "Valno #" << PVNI->id << " live out of "
3708 << printMBBReference(*Pred) << '@' << PEnd << "\nValno #"
3709 << VNI->id << " live into " << printMBBReference(*MFI) << '@'
3710 << LiveInts->getMBBStartIdx(&*MFI) << '\n';
3711 }
3712 }
3713 if (&*MFI == EndMBB)
3714 break;
3715 ++MFI;
3716 }
3717}
3718
3719void MachineVerifier::verifyLiveRange(const LiveRange &LR, Register Reg,
3720 LaneBitmask LaneMask) {
3721 for (const VNInfo *VNI : LR.valnos)
3722 verifyLiveRangeValue(LR, VNI, Reg, LaneMask);
3723
3724 for (LiveRange::const_iterator I = LR.begin(), E = LR.end(); I != E; ++I)
3725 verifyLiveRangeSegment(LR, I, Reg, LaneMask);
3726}
3727
3728void MachineVerifier::verifyLiveInterval(const LiveInterval &LI) {
3729 Register Reg = LI.reg();
3730 assert(Reg.isVirtual());
3731 verifyLiveRange(LI, Reg);
3732
3733 if (LI.hasSubRanges()) {
3735 LaneBitmask MaxMask = MRI->getMaxLaneMaskForVReg(Reg);
3736 for (const LiveInterval::SubRange &SR : LI.subranges()) {
3737 if ((Mask & SR.LaneMask).any()) {
3738 report("Lane masks of sub ranges overlap in live interval", MF);
3739 report_context(LI);
3740 }
3741 if ((SR.LaneMask & ~MaxMask).any()) {
3742 report("Subrange lanemask is invalid", MF);
3743 report_context(LI);
3744 }
3745 if (SR.empty()) {
3746 report("Subrange must not be empty", MF);
3747 report_context(SR, LI.reg(), SR.LaneMask);
3748 }
3749 Mask |= SR.LaneMask;
3750 verifyLiveRange(SR, LI.reg(), SR.LaneMask);
3751 if (!LI.covers(SR)) {
3752 report("A Subrange is not covered by the main range", MF);
3753 report_context(LI);
3754 }
3755 }
3756 }
3757
3758 // Check the LI only has one connected component.
3759 ConnectedVNInfoEqClasses ConEQ(*LiveInts);
3760 unsigned NumComp = ConEQ.Classify(LI);
3761 if (NumComp > 1) {
3762 report("Multiple connected components in live interval", MF);
3763 report_context(LI);
3764 for (unsigned comp = 0; comp != NumComp; ++comp) {
3765 errs() << comp << ": valnos";
3766 for (const VNInfo *I : LI.valnos)
3767 if (comp == ConEQ.getEqClass(I))
3768 errs() << ' ' << I->id;
3769 errs() << '\n';
3770 }
3771 }
3772}
3773
3774namespace {
3775
3776 // FrameSetup and FrameDestroy can have zero adjustment, so using a single
3777 // integer, we can't tell whether it is a FrameSetup or FrameDestroy if the
3778 // value is zero.
3779 // We use a bool plus an integer to capture the stack state.
3780 struct StackStateOfBB {
3781 StackStateOfBB() = default;
3782 StackStateOfBB(int EntryVal, int ExitVal, bool EntrySetup, bool ExitSetup) :
3783 EntryValue(EntryVal), ExitValue(ExitVal), EntryIsSetup(EntrySetup),
3784 ExitIsSetup(ExitSetup) {}
3785
3786 // Can be negative, which means we are setting up a frame.
3787 int EntryValue = 0;
3788 int ExitValue = 0;
3789 bool EntryIsSetup = false;
3790 bool ExitIsSetup = false;
3791 };
3792
3793} // end anonymous namespace
3794
3795/// Make sure on every path through the CFG, a FrameSetup <n> is always followed
3796/// by a FrameDestroy <n>, stack adjustments are identical on all
3797/// CFG edges to a merge point, and frame is destroyed at end of a return block.
3798void MachineVerifier::verifyStackFrame() {
3799 unsigned FrameSetupOpcode = TII->getCallFrameSetupOpcode();
3800 unsigned FrameDestroyOpcode = TII->getCallFrameDestroyOpcode();
3801 if (FrameSetupOpcode == ~0u && FrameDestroyOpcode == ~0u)
3802 return;
3803
3805 SPState.resize(MF->getNumBlockIDs());
3807
3808 // Visit the MBBs in DFS order.
3809 for (df_ext_iterator<const MachineFunction *,
3811 DFI = df_ext_begin(MF, Reachable), DFE = df_ext_end(MF, Reachable);
3812 DFI != DFE; ++DFI) {
3813 const MachineBasicBlock *MBB = *DFI;
3814
3815 StackStateOfBB BBState;
3816 // Check the exit state of the DFS stack predecessor.
3817 if (DFI.getPathLength() >= 2) {
3818 const MachineBasicBlock *StackPred = DFI.getPath(DFI.getPathLength() - 2);
3819 assert(Reachable.count(StackPred) &&
3820 "DFS stack predecessor is already visited.\n");
3821 BBState.EntryValue = SPState[StackPred->getNumber()].ExitValue;
3822 BBState.EntryIsSetup = SPState[StackPred->getNumber()].ExitIsSetup;
3823 BBState.ExitValue = BBState.EntryValue;
3824 BBState.ExitIsSetup = BBState.EntryIsSetup;
3825 }
3826
3827 if ((int)MBB->getCallFrameSize() != -BBState.EntryValue) {
3828 report("Call frame size on entry does not match value computed from "
3829 "predecessor",
3830 MBB);
3831 errs() << "Call frame size on entry " << MBB->getCallFrameSize()
3832 << " does not match value computed from predecessor "
3833 << -BBState.EntryValue << '\n';
3834 }
3835
3836 // Update stack state by checking contents of MBB.
3837 for (const auto &I : *MBB) {
3838 if (I.getOpcode() == FrameSetupOpcode) {
3839 if (BBState.ExitIsSetup)
3840 report("FrameSetup is after another FrameSetup", &I);
3841 if (!MRI->isSSA() && !MF->getFrameInfo().adjustsStack())
3842 report("AdjustsStack not set in presence of a frame pseudo "
3843 "instruction.", &I);
3844 BBState.ExitValue -= TII->getFrameTotalSize(I);
3845 BBState.ExitIsSetup = true;
3846 }
3847
3848 if (I.getOpcode() == FrameDestroyOpcode) {
3849 int Size = TII->getFrameTotalSize(I);
3850 if (!BBState.ExitIsSetup)
3851 report("FrameDestroy is not after a FrameSetup", &I);
3852 int AbsSPAdj = BBState.ExitValue < 0 ? -BBState.ExitValue :
3853 BBState.ExitValue;
3854 if (BBState.ExitIsSetup && AbsSPAdj != Size) {
3855 report("FrameDestroy <n> is after FrameSetup <m>", &I);
3856 errs() << "FrameDestroy <" << Size << "> is after FrameSetup <"
3857 << AbsSPAdj << ">.\n";
3858 }
3859 if (!MRI->isSSA() && !MF->getFrameInfo().adjustsStack())
3860 report("AdjustsStack not set in presence of a frame pseudo "
3861 "instruction.", &I);
3862 BBState.ExitValue += Size;
3863 BBState.ExitIsSetup = false;
3864 }
3865 }
3866 SPState[MBB->getNumber()] = BBState;
3867
3868 // Make sure the exit state of any predecessor is consistent with the entry
3869 // state.
3870 for (const MachineBasicBlock *Pred : MBB->predecessors()) {
3871 if (Reachable.count(Pred) &&
3872 (SPState[Pred->getNumber()].ExitValue != BBState.EntryValue ||
3873 SPState[Pred->getNumber()].ExitIsSetup != BBState.EntryIsSetup)) {
3874 report("The exit stack state of a predecessor is inconsistent.", MBB);
3875 errs() << "Predecessor " << printMBBReference(*Pred)
3876 << " has exit state (" << SPState[Pred->getNumber()].ExitValue
3877 << ", " << SPState[Pred->getNumber()].ExitIsSetup << "), while "
3878 << printMBBReference(*MBB) << " has entry state ("
3879 << BBState.EntryValue << ", " << BBState.EntryIsSetup << ").\n";
3880 }
3881 }
3882
3883 // Make sure the entry state of any successor is consistent with the exit
3884 // state.
3885 for (const MachineBasicBlock *Succ : MBB->successors()) {
3886 if (Reachable.count(Succ) &&
3887 (SPState[Succ->getNumber()].EntryValue != BBState.ExitValue ||
3888 SPState[Succ->getNumber()].EntryIsSetup != BBState.ExitIsSetup)) {
3889 report("The entry stack state of a successor is inconsistent.", MBB);
3890 errs() << "Successor " << printMBBReference(*Succ)
3891 << " has entry state (" << SPState[Succ->getNumber()].EntryValue
3892 << ", " << SPState[Succ->getNumber()].EntryIsSetup << "), while "
3893 << printMBBReference(*MBB) << " has exit state ("
3894 << BBState.ExitValue << ", " << BBState.ExitIsSetup << ").\n";
3895 }
3896 }
3897
3898 // Make sure a basic block with return ends with zero stack adjustment.
3899 if (!MBB->empty() && MBB->back().isReturn()) {
3900 if (BBState.ExitIsSetup)
3901 report("A return block ends with a FrameSetup.", MBB);
3902 if (BBState.ExitValue)
3903 report("A return block ends with a nonzero stack adjustment.", MBB);
3904 }
3905 }
3906}
unsigned SubReg
unsigned const MachineRegisterInfo * MRI
aarch64 promote const
static bool isLoad(int Opcode)
static bool isStore(int Opcode)
MachineBasicBlock & MBB
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
MachineBasicBlock MachineBasicBlock::iterator MBBI
This file implements the BitVector class.
Analysis containing CSE Info
Definition: CSEInfo.cpp:27
This file contains the declarations for the subclasses of Constant, which represent the different fla...
Returns the sub type a function will return at a given Idx Should correspond to the result type of an ExtractValue instruction executed with just that one unsigned Idx
This file defines the DenseMap class.
This file defines the DenseSet and SmallDenseSet classes.
This file builds on the ADT/GraphTraits.h file to build generic depth first graph iterator.
std::string Name
uint64_t Size
bool End
Definition: ELF_riscv.cpp:480
Declares convenience wrapper classes for interpreting MachineInstr instances as specific generic oper...
const HexagonInstrInfo * TII
hexagon widen stores
IRTranslator LLVM IR MI
A common definition of LaneBitmask for use in TableGen and CodeGen.
Implement a low-level type suitable for MachineInstr level instruction selection.
#define F(x, y, z)
Definition: MD5.cpp:55
#define I(x, y, z)
Definition: MD5.cpp:58
This file declares the MIR specialization of the GenericConvergenceVerifier template.
unsigned const TargetRegisterInfo * TRI
unsigned Reg
static void verifyConvergenceControl(const MachineFunction &MF, MachineDominatorTree &DT, std::function< void(const Twine &Message)> FailureCB)
modulo schedule Modulo Schedule test pass
#define P(N)
ppc ctr loops verify
const char LLVMTargetMachineRef TM
#define INITIALIZE_PASS(passName, arg, name, cfg, analysis)
Definition: PassSupport.h:38
This file builds on the ADT/GraphTraits.h file to build a generic graph post order iterator.
const SmallVectorImpl< MachineOperand > MachineBasicBlock * TBB
const SmallVectorImpl< MachineOperand > & Cond
static bool isValid(const char C)
Returns true if C is a valid mangled character: <0-9a-zA-Z_>.
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
static bool isLiveOut(const MachineBasicBlock &MBB, unsigned Reg)
This file contains some templates that are useful if you are working with the STL at all.
This file defines generic set operations that may be used on set's of different types,...
This file defines the SmallPtrSet class.
This file defines the SmallVector class.
This file describes how to lower LLVM code to machine code.
static unsigned getSize(unsigned Kind)
const fltSemantics & getSemantics() const
Definition: APFloat.h:1362
A container for analyses that lazily runs them and caches their results.
Definition: PassManager.h:253
PassT::Result * getCachedResult(IRUnitT &IR) const
Get the cached result of an analysis pass for a given IR unit.
Definition: PassManager.h:424
Represent the analysis usage information of a pass.
AnalysisUsage & addUsedIfAvailable()
Add the specified Pass class to the set of analyses used by this pass.
void setPreservesAll()
Set by analyses that do not transform their input at all.
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
Definition: ArrayRef.h:41
size_t size() const
size - Get the array size.
Definition: ArrayRef.h:165
LLVM Basic Block Representation.
Definition: BasicBlock.h:61
bool hasAddressTaken() const
Returns true if there are any uses of this basic block other than direct branches,...
Definition: BasicBlock.h:648
const Instruction * getTerminator() const LLVM_READONLY
Returns the terminator instruction if the block is well formed or null if the block is not well forme...
Definition: BasicBlock.h:229
bool test(unsigned Idx) const
Definition: BitVector.h:461
void clear()
clear - Removes all bits from the bitvector.
Definition: BitVector.h:335
iterator_range< const_set_bits_iterator > set_bits() const
Definition: BitVector.h:140
size_type size() const
size - Returns the number of bits in this bitvector.
Definition: BitVector.h:159
ConnectedVNInfoEqClasses - Helper class that can divide VNInfos in a LiveInterval into equivalence cl...
ConstMIBundleOperands - Iterate over all operands in a const bundle of machine instructions.
ConstantFP - Floating Point Values [float, double].
Definition: Constants.h:269
const APFloat & getValueAPF() const
Definition: Constants.h:312
This is the shared class of boolean and integer constants.
Definition: Constants.h:81
unsigned getBitWidth() const
getBitWidth - Return the scalar bitwidth of this constant.
Definition: Constants.h:149
This class represents an Operation in the Expression.
A parsed version of the target data layout string in and methods for querying it.
Definition: DataLayout.h:110
std::pair< iterator, bool > insert(const std::pair< KeyT, ValueT > &KV)
Definition: DenseMap.h:220
Implements a dense probed hash-table based set.
Definition: DenseSet.h:271
void recalculate(ParentType &Func)
recalculate - compute a dominator tree for the given function
Register getReg() const
Base class for user error types.
Definition: Error.h:355
A specialized PseudoSourceValue for holding FixedStack values, which must include a frame index.
FunctionPass class - This class is used to implement most global optimizations.
Definition: Pass.h:311
void initialize(raw_ostream *OS, function_ref< void(const Twine &Message)> FailureCB, const FunctionT &F)
bool isPredicated(const MachineInstr &MI) const override
Returns true if the instruction is already predicated.
bool analyzeBranch(MachineBasicBlock &MBB, MachineBasicBlock *&TBB, MachineBasicBlock *&FBB, SmallVectorImpl< MachineOperand > &Cond, bool AllowModify) const override
Analyze the branching code at the end of MBB, returning true if it cannot be understood (e....
constexpr bool isScalableVector() const
Returns true if the LLT is a scalable vector.
Definition: LowLevelType.h:182
constexpr unsigned getScalarSizeInBits() const
Definition: LowLevelType.h:267
constexpr bool isScalar() const
Definition: LowLevelType.h:146
constexpr bool isValid() const
Definition: LowLevelType.h:145
constexpr uint16_t getNumElements() const
Returns the number of elements in a vector LLT.
Definition: LowLevelType.h:159
constexpr bool isVector() const
Definition: LowLevelType.h:148
constexpr TypeSize getSizeInBits() const
Returns the total size of the type. Must only be called on sized types.
Definition: LowLevelType.h:193
constexpr bool isPointer() const
Definition: LowLevelType.h:149
constexpr LLT getElementType() const
Returns the vector's element type. Only valid for vector types.
Definition: LowLevelType.h:290
constexpr ElementCount getElementCount() const
Definition: LowLevelType.h:184
constexpr unsigned getAddressSpace() const
Definition: LowLevelType.h:280
constexpr bool isPointerOrPointerVector() const
Definition: LowLevelType.h:153
constexpr LLT getScalarType() const
Definition: LowLevelType.h:208
constexpr TypeSize getSizeInBytes() const
Returns the total size of the type in bytes, i.e.
Definition: LowLevelType.h:203
A live range for subregisters.
Definition: LiveInterval.h:694
LiveInterval - This class represents the liveness of a register, or stack slot.
Definition: LiveInterval.h:687
Register reg() const
Definition: LiveInterval.h:718
bool hasSubRanges() const
Returns true if subregister liveness information is available.
Definition: LiveInterval.h:810
iterator_range< subrange_iterator > subranges()
Definition: LiveInterval.h:782
void computeSubRangeUndefs(SmallVectorImpl< SlotIndex > &Undefs, LaneBitmask LaneMask, const MachineRegisterInfo &MRI, const SlotIndexes &Indexes) const
For a given lane mask LaneMask, compute indexes at which the lane is marked undefined by subregister ...
void print(raw_ostream &O, const Module *=nullptr) const override
Implement the dump method.
Result of a LiveRange query.
Definition: LiveInterval.h:90
bool isDeadDef() const
Return true if this instruction has a dead def.
Definition: LiveInterval.h:117
VNInfo * valueIn() const
Return the value that is live-in to the instruction.
Definition: LiveInterval.h:105
VNInfo * valueOut() const
Return the value leaving the instruction, if any.
Definition: LiveInterval.h:123
bool isKill() const
Return true if the live-in value is killed by this instruction.
Definition: LiveInterval.h:112
static LLVM_ATTRIBUTE_UNUSED bool isJointlyDominated(const MachineBasicBlock *MBB, ArrayRef< SlotIndex > Defs, const SlotIndexes &Indexes)
A diagnostic function to check if the end of the block MBB is jointly dominated by the blocks corresp...
This class represents the liveness of a register, stack slot, etc.
Definition: LiveInterval.h:157
VNInfo * getValNumInfo(unsigned ValNo)
getValNumInfo - Returns pointer to the specified val#.
Definition: LiveInterval.h:317
bool liveAt(SlotIndex index) const
Definition: LiveInterval.h:401
bool covers(const LiveRange &Other) const
Returns true if all segments of the Other live range are completely covered by this live range.
bool empty() const
Definition: LiveInterval.h:382
LiveQueryResult Query(SlotIndex Idx) const
Query Liveness at Idx.
Definition: LiveInterval.h:542
iterator end()
Definition: LiveInterval.h:216
VNInfo * getVNInfoBefore(SlotIndex Idx) const
getVNInfoBefore - Return the VNInfo that is live up to but not necessarilly including Idx,...
Definition: LiveInterval.h:429
unsigned getNumValNums() const
Definition: LiveInterval.h:313
iterator begin()
Definition: LiveInterval.h:215
VNInfoList valnos
Definition: LiveInterval.h:204
VNInfo * getVNInfoAt(SlotIndex Idx) const
getVNInfoAt - Return the VNInfo that is live at Idx, or NULL.
Definition: LiveInterval.h:421
LiveInterval & getInterval(int Slot)
Definition: LiveStacks.h:68
bool hasInterval(int Slot) const
Definition: LiveStacks.h:82
VarInfo & getVarInfo(Register Reg)
getVarInfo - Return the VarInfo structure for the specified VIRTUAL register.
TypeSize getValue() const
This class is intended to be used as a base class for asm properties and features specific to the tar...
Definition: MCAsmInfo.h:56
ExceptionHandling getExceptionHandlingType() const
Definition: MCAsmInfo.h:774
Describe properties that are true of each instruction in the target description file.
Definition: MCInstrDesc.h:198
unsigned getNumOperands() const
Return the number of declared MachineOperands for this MachineInstruction.
Definition: MCInstrDesc.h:237
ArrayRef< MCOperandInfo > operands() const
Definition: MCInstrDesc.h:239
unsigned getNumDefs() const
Return the number of MachineOperands that are register definitions.
Definition: MCInstrDesc.h:248
bool isConvergent() const
Return true if this instruction is convergent.
Definition: MCInstrDesc.h:415
bool variadicOpsAreDefs() const
Return true if variadic operands of this instruction are definitions.
Definition: MCInstrDesc.h:418
int getOperandConstraint(unsigned OpNum, MCOI::OperandConstraint Constraint) const
Returns the value of the specified operand constraint if it is present.
Definition: MCInstrDesc.h:219
unsigned getOpcode() const
Return the opcode number for this descriptor.
Definition: MCInstrDesc.h:230
This holds information about one operand of a machine instruction, indicating the register class for ...
Definition: MCInstrDesc.h:85
bool isOptionalDef() const
Set if this operand is a optional def.
Definition: MCInstrDesc.h:113
uint8_t OperandType
Information about the type of the operand.
Definition: MCInstrDesc.h:97
MCRegAliasIterator enumerates all registers aliasing Reg.
bool isValid() const
isValid - Returns true until all the operands have been visited.
bool isInlineAsmBrIndirectTarget() const
Returns true if this is the indirect dest of an INLINEASM_BR.
unsigned pred_size() const
bool isEHPad() const
Returns true if the block is a landing pad.
iterator_range< livein_iterator > liveins() const
iterator_range< iterator > phis()
Returns a range that iterates over the phis in the basic block.
int getNumber() const
MachineBasicBlocks are uniquely numbered at the function level, unless they're not in a MachineFuncti...
const BasicBlock * getBasicBlock() const
Return the LLVM basic block that this instance corresponded to originally.
bool isIRBlockAddressTaken() const
Test whether this block is the target of an IR BlockAddress.
unsigned succ_size() const
BasicBlock * getAddressTakenIRBlock() const
Retrieves the BasicBlock which corresponds to this MachineBasicBlock.
bool isPredecessor(const MachineBasicBlock *MBB) const
Return true if the specified MBB is a predecessor of this block.
const MachineFunction * getParent() const
Return the MachineFunction containing this basic block.
unsigned getCallFrameSize() const
Return the call frame size on entry to this basic block.
iterator_range< succ_iterator > successors()
bool isSuccessor(const MachineBasicBlock *MBB) const
Return true if the specified MBB is a successor of this block.
iterator_range< pred_iterator > predecessors()
StringRef getName() const
Return the name of the corresponding LLVM basic block, or an empty string.
DominatorTree Class - Concrete subclass of DominatorTreeBase that is used to compute a normal dominat...
The MachineFrameInfo class represents an abstract stack frame until prolog/epilog code is inserted.
BitVector getPristineRegs(const MachineFunction &MF) const
Return a set of physical registers that are pristine.
MachineFunctionPass - This class adapts the FunctionPass interface to allow convenient creation of pa...
void getAnalysisUsage(AnalysisUsage &AU) const override
getAnalysisUsage - Subclasses that override getAnalysisUsage must call this.
virtual bool runOnMachineFunction(MachineFunction &MF)=0
runOnMachineFunction - This method must be overloaded to perform the desired machine code transformat...
bool hasProperty(Property P) const
const TargetSubtargetInfo & getSubtarget() const
getSubtarget - Return the subtarget for which this machine code is being compiled.
StringRef getName() const
getName - Return the name of the corresponding LLVM function.
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
const LLVMTargetMachine & getTarget() const
getTarget - Return the target machine this machine code is compiled with
const MachineFunctionProperties & getProperties() const
Get the function properties.
const MachineBasicBlock & front() const
bool verify(Pass *p=nullptr, const char *Banner=nullptr, bool AbortOnError=true) const
Run the current MachineFunction through the machine code verifier, useful for debugger use.
void print(raw_ostream &OS, const SlotIndexes *=nullptr) const
print - Print out the MachineFunction in a format suitable for debugging to the specified stream.
BasicBlockListType::const_iterator const_iterator
Representation of each machine instruction.
Definition: MachineInstr.h:69
unsigned getOpcode() const
Returns the opcode of this MachineInstr.
Definition: MachineInstr.h:569
bool isReturn(QueryType Type=AnyInBundle) const
Definition: MachineInstr.h:940
bool isTerminator(QueryType Type=AnyInBundle) const
Returns true if this instruction part of the terminator for a basic block.
Definition: MachineInstr.h:974
bool isBarrier(QueryType Type=AnyInBundle) const
Returns true if the specified instruction stops control flow from executing the instruction immediate...
Definition: MachineInstr.h:965
A description of a memory reference used in the backend.
LocationSize getSize() const
Return the size in bytes of the memory reference.
const PseudoSourceValue * getPseudoValue() const
AtomicOrdering getSuccessOrdering() const
Return the atomic ordering requirements for this memory operation.
LocationSize getSizeInBits() const
Return the size in bits of the memory reference.
MachineOperand class - Representation of each machine instruction operand.
unsigned getSubReg() const
int64_t getImm() const
bool readsReg() const
readsReg - Returns true if this operand reads the previous value of its register.
bool isImplicit() const
bool isIntrinsicID() const
bool isReg() const
isReg - Tests if this is a MO_Register operand.
MachineBasicBlock * getMBB() const
ArrayRef< int > getShuffleMask() const
bool isImm() const
isImm - Tests if this is a MO_Immediate operand.
bool isValidExcessOperand() const
Return true if this operand can validly be appended to an arbitrary operand list.
bool isShuffleMask() const
unsigned getCFIIndex() const
bool isRenamable() const
isRenamable - Returns true if this register may be renamed, i.e.
MachineInstr * getParent()
getParent - Return the instruction that this operand belongs to.
MachineOperandType getType() const
getType - Returns the MachineOperandType for this operand.
bool isEarlyClobber() const
Register getReg() const
getReg - Returns the register number.
bool isInternalRead() const
bool isFI() const
isFI - Tests if this is a MO_FrameIndex operand.
static bool clobbersPhysReg(const uint32_t *RegMask, MCRegister PhysReg)
clobbersPhysReg - Returns true if this RegMask clobbers PhysReg.
const uint32_t * getRegMask() const
getRegMask - Returns a bit mask of registers preserved by this RegMask operand.
void print(raw_ostream &os, const TargetRegisterInfo *TRI=nullptr, const TargetIntrinsicInfo *IntrinsicInfo=nullptr) const
Print the MachineOperand to os.
@ MO_CFIIndex
MCCFIInstruction index.
@ MO_RegisterMask
Mask of preserved registers.
@ MO_MachineBasicBlock
MachineBasicBlock reference.
@ MO_FrameIndex
Abstract Stack Frame Index.
@ MO_Register
Register operand.
bool isMBB() const
isMBB - Tests if this is a MO_MachineBasicBlock operand.
MachineRegisterInfo - Keep track of information for virtual and physical registers,...
PreservedAnalyses run(MachineFunction &MF, MachineFunctionAnalysisManager &MFAM)
static PassRegistry * getPassRegistry()
getPassRegistry - Access the global registry object, which is automatically initialized at applicatio...
Pass interface - Implemented by all 'passes'.
Definition: Pass.h:94
virtual void print(raw_ostream &OS, const Module *M) const
print - Print out the internal state of the pass.
Definition: Pass.cpp:130
AnalysisType * getAnalysisIfAvailable() const
getAnalysisIfAvailable<AnalysisType>() - Subclasses use this function to get analysis information tha...
A set of analyses that are preserved following a run of a transformation pass.
Definition: Analysis.h:111
static PreservedAnalyses all()
Construct a special preserved set that preserves all passes.
Definition: Analysis.h:117
Special value supplied for machine level alias analysis.
Holds all the information related to register banks.
const RegisterBank & getRegBank(unsigned ID)
Get the register bank identified by ID.
unsigned getMaximumSize(unsigned RegBankID) const
Get the maximum size in bits that fits in the given register bank.
This class implements the register bank concept.
Definition: RegisterBank.h:28
const char * getName() const
Get a user friendly name of this register bank.
Definition: RegisterBank.h:49
unsigned getID() const
Get the identifier of this register bank.
Definition: RegisterBank.h:45
Wrapper class representing virtual and physical registers.
Definition: Register.h:19
static Register index2VirtReg(unsigned Index)
Convert a 0-based index to a virtual register number.
Definition: Register.h:84
static unsigned virtReg2Index(Register Reg)
Convert a virtual register number to a 0-based index.
Definition: Register.h:77
constexpr bool isVirtual() const
Return true if the specified register number is in the virtual register namespace.
Definition: Register.h:91
static constexpr bool isPhysicalRegister(unsigned Reg)
Return true if the specified register number is in the physical register namespace.
Definition: Register.h:65
constexpr bool isPhysical() const
Return true if the specified register number is in the physical register namespace.
Definition: Register.h:95
SlotIndex - An opaque wrapper around machine indexes.
Definition: SlotIndexes.h:65
static bool isSameInstr(SlotIndex A, SlotIndex B)
isSameInstr - Return true if A and B refer to the same instruction.
Definition: SlotIndexes.h:176
bool isBlock() const
isBlock - Returns true if this is a block boundary slot.
Definition: SlotIndexes.h:209
SlotIndex getDeadSlot() const
Returns the dead def kill slot for the current instruction.
Definition: SlotIndexes.h:242
bool isEarlyClobber() const
isEarlyClobber - Returns true if this is an early-clobber slot.
Definition: SlotIndexes.h:212
bool isRegister() const
isRegister - Returns true if this is a normal register use/def slot.
Definition: SlotIndexes.h:216
SlotIndex getPrevSlot() const
Returns the previous slot in the index list.
Definition: SlotIndexes.h:272
SlotIndex getRegSlot(bool EC=false) const
Returns the register use/def slot in the current instruction for a normal or early-clobber def.
Definition: SlotIndexes.h:237
bool isDead() const
isDead - Returns true if this is a dead def kill slot.
Definition: SlotIndexes.h:219
SlotIndexes pass.
Definition: SlotIndexes.h:297
MBBIndexIterator MBBIndexBegin() const
Returns an iterator for the begin of the idx2MBBMap.
Definition: SlotIndexes.h:505
MBBIndexIterator MBBIndexEnd() const
Return an iterator for the end of the idx2MBBMap.
Definition: SlotIndexes.h:510
SmallVectorImpl< IdxMBBPair >::const_iterator MBBIndexIterator
Iterator over the idx2MBBMap (sorted pairs of slot index of basic block begin and basic block)
Definition: SlotIndexes.h:481
size_type size() const
Definition: SmallPtrSet.h:94
bool erase(PtrType Ptr)
Remove pointer from the set.
Definition: SmallPtrSet.h:361
size_type count(ConstPtrType Ptr) const
count - Return 1 if the specified pointer is in the set, 0 otherwise.
Definition: SmallPtrSet.h:412
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
Definition: SmallPtrSet.h:344
iterator begin() const
Definition: SmallPtrSet.h:432
SmallPtrSet - This class implements a set which is optimized for holding SmallSize or less elements.
Definition: SmallPtrSet.h:479
size_t size() const
Definition: SmallVector.h:91
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
Definition: SmallVector.h:586
void resize(size_type N)
Definition: SmallVector.h:651
void push_back(const T &Elt)
Definition: SmallVector.h:426
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
Definition: SmallVector.h:1209
Register getReg() const
MI-level Statepoint operands.
Definition: StackMaps.h:158
StringRef - Represent a constant reference to a string, i.e.
Definition: StringRef.h:50
TargetInstrInfo - Interface to description of machine instruction set.
Primary interface to the complete machine description for the target machine.
Definition: TargetMachine.h:77
bool hasSuperClassEq(const TargetRegisterClass *RC) const
Returns true if RC is a super-class of or equal to this class.
TargetRegisterInfo base class - We assume that the target defines a static array of TargetRegisterDes...
virtual const TargetRegisterInfo * getRegisterInfo() const
getRegisterInfo - If register information is available, return it.
virtual const RegisterBankInfo * getRegBankInfo() const
If the information for the register banks is available, return it.
virtual const TargetInstrInfo * getInstrInfo() const
Twine - A lightweight data structure for efficiently representing the concatenation of temporary valu...
Definition: Twine.h:81
std::string str() const
Return the twine contents as a std::string.
Definition: Twine.cpp:17
VNInfo - Value Number Information.
Definition: LiveInterval.h:53
bool isUnused() const
Returns true if this value is unused.
Definition: LiveInterval.h:81
unsigned id
The ID number of this value.
Definition: LiveInterval.h:58
SlotIndex def
The index of the defining instruction.
Definition: LiveInterval.h:61
bool isPHIDef() const
Returns true if this value is defined by a PHI instruction (or was, PHI instructions may have been el...
Definition: LiveInterval.h:78
LLVM Value Representation.
Definition: Value.h:74
std::pair< iterator, bool > insert(const ValueT &V)
Definition: DenseSet.h:206
constexpr bool isNonZero() const
Definition: TypeSize.h:158
static constexpr bool isKnownLT(const FixedOrScalableQuantity &LHS, const FixedOrScalableQuantity &RHS)
Definition: TypeSize.h:218
constexpr bool isScalable() const
Returns whether the quantity is scaled by a runtime quantity (vscale).
Definition: TypeSize.h:171
constexpr ScalarTy getKnownMinValue() const
Returns the minimum value this quantity can represent.
Definition: TypeSize.h:168
static constexpr bool isKnownGT(const FixedOrScalableQuantity &LHS, const FixedOrScalableQuantity &RHS)
Definition: TypeSize.h:225
static constexpr bool isKnownGE(const FixedOrScalableQuantity &LHS, const FixedOrScalableQuantity &RHS)
Definition: TypeSize.h:239
self_iterator getIterator()
Definition: ilist_node.h:132
NodeTy * getNextNode()
Get the next node, or nullptr for the list tail.
Definition: ilist_node.h:353
constexpr char Attrs[]
Key for Kernel::Metadata::mAttrs.
constexpr std::underlying_type_t< E > Mask()
Get a bitmask with 1s in all places up to the high-order bit of E's largest value.
Definition: BitmaskEnum.h:121
unsigned ID
LLVM IR allows to use arbitrary numbers as calling convention identifiers.
Definition: CallingConv.h:24
AttributeList getAttributes(LLVMContext &C, ID id)
Return the attributes for an intrinsic.
@ OPERAND_REGISTER
Definition: MCInstrDesc.h:61
@ OPERAND_IMMEDIATE
Definition: MCInstrDesc.h:60
Reg
All possible values of the reg field in the ModR/M byte.
constexpr double e
Definition: MathExtras.h:47
NodeAddr< PhiNode * > Phi
Definition: RDFGraph.h:390
NodeAddr< DefNode * > Def
Definition: RDFGraph.h:384
NodeAddr< FuncNode * > Func
Definition: RDFGraph.h:393
const_iterator begin(StringRef path, Style style=Style::native)
Get begin iterator over path.
Definition: Path.cpp:227
const_iterator end(StringRef path)
Get end iterator over path.
Definition: Path.cpp:236
This is an optimization pass for GlobalISel generic memory operations.
Definition: AddressRanges.h:18
auto drop_begin(T &&RangeOrContainer, size_t N=1)
Return a range covering RangeOrContainer with the first N elements excluded.
Definition: STLExtras.h:329
@ Offset
Definition: DWP.cpp:480
bool all_of(R &&range, UnaryPredicate P)
Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly.
Definition: STLExtras.h:1722
auto size(R &&Range, std::enable_if_t< std::is_base_of< std::random_access_iterator_tag, typename std::iterator_traits< decltype(Range.begin())>::iterator_category >::value, void > *=nullptr)
Get the size of a range.
Definition: STLExtras.h:1680
void initializeMachineVerifierLegacyPassPass(PassRegistry &)
@ SjLj
setjmp/longjmp based exceptions
bool isPreISelGenericOpcode(unsigned Opcode)
Check whether the given Opcode is a generic opcode that is not supposed to appear after ISel.
Definition: TargetOpcodes.h:30
Printable printRegUnit(unsigned Unit, const TargetRegisterInfo *TRI)
Create Printable object to print register units on a raw_ostream.
void append_range(Container &C, Range &&R)
Wrapper function to append range R to container C.
Definition: STLExtras.h:2067
void set_subtract(S1Ty &S1, const S2Ty &S2)
set_subtract(A, B) - Compute A := A - B
Printable PrintLaneMask(LaneBitmask LaneMask)
Create Printable object to print LaneBitmasks on a raw_ostream.
Definition: LaneBitmask.h:92
bool isPreISelGenericOptimizationHint(unsigned Opcode)
Definition: TargetOpcodes.h:42
bool isScopedEHPersonality(EHPersonality Pers)
Returns true if this personality uses scope-style EH IR instructions: catchswitch,...
void verifyMachineFunction(const std::string &Banner, const MachineFunction &MF)
auto reverse(ContainerTy &&C)
Definition: STLExtras.h:419
detail::ValueMatchesPoly< M > HasValue(M Matcher)
Definition: Error.h:221
df_ext_iterator< T, SetTy > df_ext_begin(const T &G, SetTy &S)
bool none_of(R &&Range, UnaryPredicate P)
Provide wrappers to std::none_of which take ranges instead of having to pass begin/end explicitly.
Definition: STLExtras.h:1736
void report_fatal_error(Error Err, bool gen_crash_diag=true)
Report a serious error, calling any installed error handler.
Definition: Error.cpp:167
bool set_union(S1Ty &S1, const S2Ty &S2)
set_union(A, B) - Compute A := A u B, return whether A changed.
Definition: SetOperations.h:43
EHPersonality classifyEHPersonality(const Value *Pers)
See if the given exception handling personality function is one that we understand.
raw_fd_ostream & errs()
This returns a reference to a raw_ostream for standard error.
AtomicOrdering
Atomic ordering for LLVM's memory model.
OutputIt move(R &&Range, OutputIt Out)
Provide wrappers to std::move which take ranges instead of having to pass begin/end explicitly.
Definition: STLExtras.h:1849
bool is_contained(R &&Range, const E &Element)
Returns true if Element is found in Range.
Definition: STLExtras.h:1879
df_ext_iterator< T, SetTy > df_ext_end(const T &G, SetTy &S)
Printable printReg(Register Reg, const TargetRegisterInfo *TRI=nullptr, unsigned SubIdx=0, const MachineRegisterInfo *MRI=nullptr)
Prints virtual and physical registers with or without a TRI instance.
Printable printMBBReference(const MachineBasicBlock &MBB)
Prints a machine basic block reference.
FunctionPass * createMachineVerifierPass(const std::string &Banner)
createMachineVerifierPass - This pass verifies cenerated machine code instructions for correctness.
Implement std::hash so that hash_code can be used in STL containers.
Definition: BitVector.h:858
static unsigned getSizeInBits(const fltSemantics &Sem)
Returns the size of the floating point number (in bits) in the given semantics.
Definition: APFloat.cpp:362
static constexpr LaneBitmask getAll()
Definition: LaneBitmask.h:82
constexpr bool none() const
Definition: LaneBitmask.h:52
constexpr bool any() const
Definition: LaneBitmask.h:53
static constexpr LaneBitmask getNone()
Definition: LaneBitmask.h:81
This represents a simple continuous liveness interval for a value.
Definition: LiveInterval.h:162
VarInfo - This represents the regions where a virtual register is live in the program.
Definition: LiveVariables.h:78
Pair of physical register and lane mask.