LLVM 17.0.0git
MachineVerifier.cpp
Go to the documentation of this file.
1//===- MachineVerifier.cpp - Machine Code Verifier ------------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// Pass to verify generated machine code. The following is checked:
10//
11// Operand counts: All explicit operands must be present.
12//
13// Register classes: All physical and virtual register operands must be
14// compatible with the register class required by the instruction descriptor.
15//
16// Register live intervals: Registers must be defined only once, and must be
17// defined before use.
18//
19// The machine code verifier is enabled with the command-line option
20// -verify-machineinstrs.
21//===----------------------------------------------------------------------===//
22
23#include "llvm/ADT/BitVector.h"
24#include "llvm/ADT/DenseMap.h"
25#include "llvm/ADT/DenseSet.h"
28#include "llvm/ADT/STLExtras.h"
32#include "llvm/ADT/StringRef.h"
33#include "llvm/ADT/Twine.h"
59#include "llvm/IR/BasicBlock.h"
60#include "llvm/IR/Constants.h"
62#include "llvm/IR/Function.h"
63#include "llvm/IR/InlineAsm.h"
66#include "llvm/MC/LaneBitmask.h"
67#include "llvm/MC/MCAsmInfo.h"
68#include "llvm/MC/MCDwarf.h"
69#include "llvm/MC/MCInstrDesc.h"
72#include "llvm/Pass.h"
76#include "llvm/Support/ModRef.h"
79#include <algorithm>
80#include <cassert>
81#include <cstddef>
82#include <cstdint>
83#include <iterator>
84#include <string>
85#include <utility>
86
87using namespace llvm;
88
89namespace {
90
91 struct MachineVerifier {
92 MachineVerifier(Pass *pass, const char *b) : PASS(pass), Banner(b) {}
93
94 unsigned verify(const MachineFunction &MF);
95
96 Pass *const PASS;
97 const char *Banner;
98 const MachineFunction *MF = nullptr;
99 const TargetMachine *TM = nullptr;
100 const TargetInstrInfo *TII = nullptr;
101 const TargetRegisterInfo *TRI = nullptr;
102 const MachineRegisterInfo *MRI = nullptr;
103 const RegisterBankInfo *RBI = nullptr;
104
105 unsigned foundErrors = 0;
106
107 // Avoid querying the MachineFunctionProperties for each operand.
108 bool isFunctionRegBankSelected = false;
109 bool isFunctionSelected = false;
110 bool isFunctionTracksDebugUserValues = false;
111
112 using RegVector = SmallVector<Register, 16>;
113 using RegMaskVector = SmallVector<const uint32_t *, 4>;
114 using RegSet = DenseSet<Register>;
117
118 const MachineInstr *FirstNonPHI = nullptr;
119 const MachineInstr *FirstTerminator = nullptr;
120 BlockSet FunctionBlocks;
121
122 BitVector regsReserved;
123 RegSet regsLive;
124 RegVector regsDefined, regsDead, regsKilled;
125 RegMaskVector regMasks;
126
127 SlotIndex lastIndex;
128
129 // Add Reg and any sub-registers to RV
130 void addRegWithSubRegs(RegVector &RV, Register Reg) {
131 RV.push_back(Reg);
132 if (Reg.isPhysical())
133 append_range(RV, TRI->subregs(Reg.asMCReg()));
134 }
135
136 struct BBInfo {
137 // Is this MBB reachable from the MF entry point?
138 bool reachable = false;
139
140 // Vregs that must be live in because they are used without being
141 // defined. Map value is the user. vregsLiveIn doesn't include regs
142 // that only are used by PHI nodes.
143 RegMap vregsLiveIn;
144
145 // Regs killed in MBB. They may be defined again, and will then be in both
146 // regsKilled and regsLiveOut.
147 RegSet regsKilled;
148
149 // Regs defined in MBB and live out. Note that vregs passing through may
150 // be live out without being mentioned here.
151 RegSet regsLiveOut;
152
153 // Vregs that pass through MBB untouched. This set is disjoint from
154 // regsKilled and regsLiveOut.
155 RegSet vregsPassed;
156
157 // Vregs that must pass through MBB because they are needed by a successor
158 // block. This set is disjoint from regsLiveOut.
159 RegSet vregsRequired;
160
161 // Set versions of block's predecessor and successor lists.
162 BlockSet Preds, Succs;
163
164 BBInfo() = default;
165
166 // Add register to vregsRequired if it belongs there. Return true if
167 // anything changed.
168 bool addRequired(Register Reg) {
169 if (!Reg.isVirtual())
170 return false;
171 if (regsLiveOut.count(Reg))
172 return false;
173 return vregsRequired.insert(Reg).second;
174 }
175
176 // Same for a full set.
177 bool addRequired(const RegSet &RS) {
178 bool Changed = false;
179 for (Register Reg : RS)
180 Changed |= addRequired(Reg);
181 return Changed;
182 }
183
184 // Same for a full map.
185 bool addRequired(const RegMap &RM) {
186 bool Changed = false;
187 for (const auto &I : RM)
188 Changed |= addRequired(I.first);
189 return Changed;
190 }
191
192 // Live-out registers are either in regsLiveOut or vregsPassed.
193 bool isLiveOut(Register Reg) const {
194 return regsLiveOut.count(Reg) || vregsPassed.count(Reg);
195 }
196 };
197
198 // Extra register info per MBB.
200
201 bool isReserved(Register Reg) {
202 return Reg.id() < regsReserved.size() && regsReserved.test(Reg.id());
203 }
204
205 bool isAllocatable(Register Reg) const {
206 return Reg.id() < TRI->getNumRegs() && TRI->isInAllocatableClass(Reg) &&
207 !regsReserved.test(Reg.id());
208 }
209
210 // Analysis information if available
211 LiveVariables *LiveVars = nullptr;
212 LiveIntervals *LiveInts = nullptr;
213 LiveStacks *LiveStks = nullptr;
214 SlotIndexes *Indexes = nullptr;
215
216 void visitMachineFunctionBefore();
217 void visitMachineBasicBlockBefore(const MachineBasicBlock *MBB);
218 void visitMachineBundleBefore(const MachineInstr *MI);
219
220 /// Verify that all of \p MI's virtual register operands are scalars.
221 /// \returns True if all virtual register operands are scalar. False
222 /// otherwise.
223 bool verifyAllRegOpsScalar(const MachineInstr &MI,
224 const MachineRegisterInfo &MRI);
225 bool verifyVectorElementMatch(LLT Ty0, LLT Ty1, const MachineInstr *MI);
226 void verifyPreISelGenericInstruction(const MachineInstr *MI);
227 void visitMachineInstrBefore(const MachineInstr *MI);
228 void visitMachineOperand(const MachineOperand *MO, unsigned MONum);
229 void visitMachineBundleAfter(const MachineInstr *MI);
230 void visitMachineBasicBlockAfter(const MachineBasicBlock *MBB);
231 void visitMachineFunctionAfter();
232
233 void report(const char *msg, const MachineFunction *MF);
234 void report(const char *msg, const MachineBasicBlock *MBB);
235 void report(const char *msg, const MachineInstr *MI);
236 void report(const char *msg, const MachineOperand *MO, unsigned MONum,
237 LLT MOVRegType = LLT{});
238 void report(const Twine &Msg, const MachineInstr *MI);
239
240 void report_context(const LiveInterval &LI) const;
241 void report_context(const LiveRange &LR, Register VRegUnit,
242 LaneBitmask LaneMask) const;
243 void report_context(const LiveRange::Segment &S) const;
244 void report_context(const VNInfo &VNI) const;
245 void report_context(SlotIndex Pos) const;
246 void report_context(MCPhysReg PhysReg) const;
247 void report_context_liverange(const LiveRange &LR) const;
248 void report_context_lanemask(LaneBitmask LaneMask) const;
249 void report_context_vreg(Register VReg) const;
250 void report_context_vreg_regunit(Register VRegOrUnit) const;
251
252 void verifyInlineAsm(const MachineInstr *MI);
253
254 void checkLiveness(const MachineOperand *MO, unsigned MONum);
255 void checkLivenessAtUse(const MachineOperand *MO, unsigned MONum,
256 SlotIndex UseIdx, const LiveRange &LR,
257 Register VRegOrUnit,
258 LaneBitmask LaneMask = LaneBitmask::getNone());
259 void checkLivenessAtDef(const MachineOperand *MO, unsigned MONum,
260 SlotIndex DefIdx, const LiveRange &LR,
261 Register VRegOrUnit, bool SubRangeCheck = false,
262 LaneBitmask LaneMask = LaneBitmask::getNone());
263
264 void markReachable(const MachineBasicBlock *MBB);
265 void calcRegsPassed();
266 void checkPHIOps(const MachineBasicBlock &MBB);
267
268 void calcRegsRequired();
269 void verifyLiveVariables();
270 void verifyLiveIntervals();
271 void verifyLiveInterval(const LiveInterval&);
272 void verifyLiveRangeValue(const LiveRange &, const VNInfo *, Register,
274 void verifyLiveRangeSegment(const LiveRange &,
277 void verifyLiveRange(const LiveRange &, Register,
278 LaneBitmask LaneMask = LaneBitmask::getNone());
279
280 void verifyStackFrame();
281
282 void verifySlotIndexes() const;
283 void verifyProperties(const MachineFunction &MF);
284 };
285
286 struct MachineVerifierPass : public MachineFunctionPass {
287 static char ID; // Pass ID, replacement for typeid
288
289 const std::string Banner;
290
291 MachineVerifierPass(std::string banner = std::string())
292 : MachineFunctionPass(ID), Banner(std::move(banner)) {
294 }
295
296 void getAnalysisUsage(AnalysisUsage &AU) const override {
301 AU.setPreservesAll();
303 }
304
305 bool runOnMachineFunction(MachineFunction &MF) override {
306 // Skip functions that have known verification problems.
307 // FIXME: Remove this mechanism when all problematic passes have been
308 // fixed.
309 if (MF.getProperties().hasProperty(
310 MachineFunctionProperties::Property::FailsVerification))
311 return false;
312
313 unsigned FoundErrors = MachineVerifier(this, Banner.c_str()).verify(MF);
314 if (FoundErrors)
315 report_fatal_error("Found "+Twine(FoundErrors)+" machine code errors.");
316 return false;
317 }
318 };
319
320} // end anonymous namespace
321
322char MachineVerifierPass::ID = 0;
323
324INITIALIZE_PASS(MachineVerifierPass, "machineverifier",
325 "Verify generated machine code", false, false)
326
328 return new MachineVerifierPass(Banner);
329}
330
332 const std::string &Banner,
333 const MachineFunction &MF) {
334 // TODO: Use MFAM after porting below analyses.
335 // LiveVariables *LiveVars;
336 // LiveIntervals *LiveInts;
337 // LiveStacks *LiveStks;
338 // SlotIndexes *Indexes;
339 unsigned FoundErrors = MachineVerifier(nullptr, Banner.c_str()).verify(MF);
340 if (FoundErrors)
341 report_fatal_error("Found " + Twine(FoundErrors) + " machine code errors.");
342}
343
344bool MachineFunction::verify(Pass *p, const char *Banner, bool AbortOnErrors)
345 const {
346 MachineFunction &MF = const_cast<MachineFunction&>(*this);
347 unsigned FoundErrors = MachineVerifier(p, Banner).verify(MF);
348 if (AbortOnErrors && FoundErrors)
349 report_fatal_error("Found "+Twine(FoundErrors)+" machine code errors.");
350 return FoundErrors == 0;
351}
352
353void MachineVerifier::verifySlotIndexes() const {
354 if (Indexes == nullptr)
355 return;
356
357 // Ensure the IdxMBB list is sorted by slot indexes.
360 E = Indexes->MBBIndexEnd(); I != E; ++I) {
361 assert(!Last.isValid() || I->first > Last);
362 Last = I->first;
363 }
364}
365
366void MachineVerifier::verifyProperties(const MachineFunction &MF) {
367 // If a pass has introduced virtual registers without clearing the
368 // NoVRegs property (or set it without allocating the vregs)
369 // then report an error.
370 if (MF.getProperties().hasProperty(
372 MRI->getNumVirtRegs())
373 report("Function has NoVRegs property but there are VReg operands", &MF);
374}
375
376unsigned MachineVerifier::verify(const MachineFunction &MF) {
377 foundErrors = 0;
378
379 this->MF = &MF;
380 TM = &MF.getTarget();
383 RBI = MF.getSubtarget().getRegBankInfo();
384 MRI = &MF.getRegInfo();
385
386 const bool isFunctionFailedISel = MF.getProperties().hasProperty(
388
389 // If we're mid-GlobalISel and we already triggered the fallback path then
390 // it's expected that the MIR is somewhat broken but that's ok since we'll
391 // reset it and clear the FailedISel attribute in ResetMachineFunctions.
392 if (isFunctionFailedISel)
393 return foundErrors;
394
395 isFunctionRegBankSelected = MF.getProperties().hasProperty(
397 isFunctionSelected = MF.getProperties().hasProperty(
399 isFunctionTracksDebugUserValues = MF.getProperties().hasProperty(
401
402 LiveVars = nullptr;
403 LiveInts = nullptr;
404 LiveStks = nullptr;
405 Indexes = nullptr;
406 if (PASS) {
407 LiveInts = PASS->getAnalysisIfAvailable<LiveIntervals>();
408 // We don't want to verify LiveVariables if LiveIntervals is available.
409 if (!LiveInts)
410 LiveVars = PASS->getAnalysisIfAvailable<LiveVariables>();
411 LiveStks = PASS->getAnalysisIfAvailable<LiveStacks>();
412 Indexes = PASS->getAnalysisIfAvailable<SlotIndexes>();
413 }
414
415 verifySlotIndexes();
416
417 verifyProperties(MF);
418
419 visitMachineFunctionBefore();
420 for (const MachineBasicBlock &MBB : MF) {
421 visitMachineBasicBlockBefore(&MBB);
422 // Keep track of the current bundle header.
423 const MachineInstr *CurBundle = nullptr;
424 // Do we expect the next instruction to be part of the same bundle?
425 bool InBundle = false;
426
427 for (const MachineInstr &MI : MBB.instrs()) {
428 if (MI.getParent() != &MBB) {
429 report("Bad instruction parent pointer", &MBB);
430 errs() << "Instruction: " << MI;
431 continue;
432 }
433
434 // Check for consistent bundle flags.
435 if (InBundle && !MI.isBundledWithPred())
436 report("Missing BundledPred flag, "
437 "BundledSucc was set on predecessor",
438 &MI);
439 if (!InBundle && MI.isBundledWithPred())
440 report("BundledPred flag is set, "
441 "but BundledSucc not set on predecessor",
442 &MI);
443
444 // Is this a bundle header?
445 if (!MI.isInsideBundle()) {
446 if (CurBundle)
447 visitMachineBundleAfter(CurBundle);
448 CurBundle = &MI;
449 visitMachineBundleBefore(CurBundle);
450 } else if (!CurBundle)
451 report("No bundle header", &MI);
452 visitMachineInstrBefore(&MI);
453 for (unsigned I = 0, E = MI.getNumOperands(); I != E; ++I) {
454 const MachineOperand &Op = MI.getOperand(I);
455 if (Op.getParent() != &MI) {
456 // Make sure to use correct addOperand / removeOperand / ChangeTo
457 // functions when replacing operands of a MachineInstr.
458 report("Instruction has operand with wrong parent set", &MI);
459 }
460
461 visitMachineOperand(&Op, I);
462 }
463
464 // Was this the last bundled instruction?
465 InBundle = MI.isBundledWithSucc();
466 }
467 if (CurBundle)
468 visitMachineBundleAfter(CurBundle);
469 if (InBundle)
470 report("BundledSucc flag set on last instruction in block", &MBB.back());
471 visitMachineBasicBlockAfter(&MBB);
472 }
473 visitMachineFunctionAfter();
474
475 // Clean up.
476 regsLive.clear();
477 regsDefined.clear();
478 regsDead.clear();
479 regsKilled.clear();
480 regMasks.clear();
481 MBBInfoMap.clear();
482
483 return foundErrors;
484}
485
486void MachineVerifier::report(const char *msg, const MachineFunction *MF) {
487 assert(MF);
488 errs() << '\n';
489 if (!foundErrors++) {
490 if (Banner)
491 errs() << "# " << Banner << '\n';
492 if (LiveInts != nullptr)
493 LiveInts->print(errs());
494 else
495 MF->print(errs(), Indexes);
496 }
497 errs() << "*** Bad machine code: " << msg << " ***\n"
498 << "- function: " << MF->getName() << "\n";
499}
500
501void MachineVerifier::report(const char *msg, const MachineBasicBlock *MBB) {
502 assert(MBB);
503 report(msg, MBB->getParent());
504 errs() << "- basic block: " << printMBBReference(*MBB) << ' '
505 << MBB->getName() << " (" << (const void *)MBB << ')';
506 if (Indexes)
507 errs() << " [" << Indexes->getMBBStartIdx(MBB)
508 << ';' << Indexes->getMBBEndIdx(MBB) << ')';
509 errs() << '\n';
510}
511
512void MachineVerifier::report(const char *msg, const MachineInstr *MI) {
513 assert(MI);
514 report(msg, MI->getParent());
515 errs() << "- instruction: ";
516 if (Indexes && Indexes->hasIndex(*MI))
517 errs() << Indexes->getInstructionIndex(*MI) << '\t';
518 MI->print(errs(), /*IsStandalone=*/true);
519}
520
521void MachineVerifier::report(const char *msg, const MachineOperand *MO,
522 unsigned MONum, LLT MOVRegType) {
523 assert(MO);
524 report(msg, MO->getParent());
525 errs() << "- operand " << MONum << ": ";
526 MO->print(errs(), MOVRegType, TRI);
527 errs() << "\n";
528}
529
530void MachineVerifier::report(const Twine &Msg, const MachineInstr *MI) {
531 report(Msg.str().c_str(), MI);
532}
533
534void MachineVerifier::report_context(SlotIndex Pos) const {
535 errs() << "- at: " << Pos << '\n';
536}
537
538void MachineVerifier::report_context(const LiveInterval &LI) const {
539 errs() << "- interval: " << LI << '\n';
540}
541
542void MachineVerifier::report_context(const LiveRange &LR, Register VRegUnit,
543 LaneBitmask LaneMask) const {
544 report_context_liverange(LR);
545 report_context_vreg_regunit(VRegUnit);
546 if (LaneMask.any())
547 report_context_lanemask(LaneMask);
548}
549
550void MachineVerifier::report_context(const LiveRange::Segment &S) const {
551 errs() << "- segment: " << S << '\n';
552}
553
554void MachineVerifier::report_context(const VNInfo &VNI) const {
555 errs() << "- ValNo: " << VNI.id << " (def " << VNI.def << ")\n";
556}
557
558void MachineVerifier::report_context_liverange(const LiveRange &LR) const {
559 errs() << "- liverange: " << LR << '\n';
560}
561
562void MachineVerifier::report_context(MCPhysReg PReg) const {
563 errs() << "- p. register: " << printReg(PReg, TRI) << '\n';
564}
565
566void MachineVerifier::report_context_vreg(Register VReg) const {
567 errs() << "- v. register: " << printReg(VReg, TRI) << '\n';
568}
569
570void MachineVerifier::report_context_vreg_regunit(Register VRegOrUnit) const {
571 if (VRegOrUnit.isVirtual()) {
572 report_context_vreg(VRegOrUnit);
573 } else {
574 errs() << "- regunit: " << printRegUnit(VRegOrUnit, TRI) << '\n';
575 }
576}
577
578void MachineVerifier::report_context_lanemask(LaneBitmask LaneMask) const {
579 errs() << "- lanemask: " << PrintLaneMask(LaneMask) << '\n';
580}
581
582void MachineVerifier::markReachable(const MachineBasicBlock *MBB) {
583 BBInfo &MInfo = MBBInfoMap[MBB];
584 if (!MInfo.reachable) {
585 MInfo.reachable = true;
586 for (const MachineBasicBlock *Succ : MBB->successors())
587 markReachable(Succ);
588 }
589}
590
591void MachineVerifier::visitMachineFunctionBefore() {
592 lastIndex = SlotIndex();
593 regsReserved = MRI->reservedRegsFrozen() ? MRI->getReservedRegs()
594 : TRI->getReservedRegs(*MF);
595
596 if (!MF->empty())
597 markReachable(&MF->front());
598
599 // Build a set of the basic blocks in the function.
600 FunctionBlocks.clear();
601 for (const auto &MBB : *MF) {
602 FunctionBlocks.insert(&MBB);
603 BBInfo &MInfo = MBBInfoMap[&MBB];
604
605 MInfo.Preds.insert(MBB.pred_begin(), MBB.pred_end());
606 if (MInfo.Preds.size() != MBB.pred_size())
607 report("MBB has duplicate entries in its predecessor list.", &MBB);
608
609 MInfo.Succs.insert(MBB.succ_begin(), MBB.succ_end());
610 if (MInfo.Succs.size() != MBB.succ_size())
611 report("MBB has duplicate entries in its successor list.", &MBB);
612 }
613
614 // Check that the register use lists are sane.
615 MRI->verifyUseLists();
616
617 if (!MF->empty())
618 verifyStackFrame();
619}
620
621void
622MachineVerifier::visitMachineBasicBlockBefore(const MachineBasicBlock *MBB) {
623 FirstTerminator = nullptr;
624 FirstNonPHI = nullptr;
625
626 if (!MF->getProperties().hasProperty(
627 MachineFunctionProperties::Property::NoPHIs) && MRI->tracksLiveness()) {
628 // If this block has allocatable physical registers live-in, check that
629 // it is an entry block or landing pad.
630 for (const auto &LI : MBB->liveins()) {
631 if (isAllocatable(LI.PhysReg) && !MBB->isEHPad() &&
632 MBB->getIterator() != MBB->getParent()->begin() &&
634 report("MBB has allocatable live-in, but isn't entry, landing-pad, or "
635 "inlineasm-br-indirect-target.",
636 MBB);
637 report_context(LI.PhysReg);
638 }
639 }
640 }
641
642 if (MBB->isIRBlockAddressTaken()) {
644 report("ir-block-address-taken is associated with basic block not used by "
645 "a blockaddress.",
646 MBB);
647 }
648
649 // Count the number of landing pad successors.
651 for (const auto *succ : MBB->successors()) {
652 if (succ->isEHPad())
653 LandingPadSuccs.insert(succ);
654 if (!FunctionBlocks.count(succ))
655 report("MBB has successor that isn't part of the function.", MBB);
656 if (!MBBInfoMap[succ].Preds.count(MBB)) {
657 report("Inconsistent CFG", MBB);
658 errs() << "MBB is not in the predecessor list of the successor "
659 << printMBBReference(*succ) << ".\n";
660 }
661 }
662
663 // Check the predecessor list.
664 for (const MachineBasicBlock *Pred : MBB->predecessors()) {
665 if (!FunctionBlocks.count(Pred))
666 report("MBB has predecessor that isn't part of the function.", MBB);
667 if (!MBBInfoMap[Pred].Succs.count(MBB)) {
668 report("Inconsistent CFG", MBB);
669 errs() << "MBB is not in the successor list of the predecessor "
670 << printMBBReference(*Pred) << ".\n";
671 }
672 }
673
674 const MCAsmInfo *AsmInfo = TM->getMCAsmInfo();
675 const BasicBlock *BB = MBB->getBasicBlock();
676 const Function &F = MF->getFunction();
677 if (LandingPadSuccs.size() > 1 &&
678 !(AsmInfo &&
680 BB && isa<SwitchInst>(BB->getTerminator())) &&
681 !isScopedEHPersonality(classifyEHPersonality(F.getPersonalityFn())))
682 report("MBB has more than one landing pad successor", MBB);
683
684 // Call analyzeBranch. If it succeeds, there several more conditions to check.
685 MachineBasicBlock *TBB = nullptr, *FBB = nullptr;
687 if (!TII->analyzeBranch(*const_cast<MachineBasicBlock *>(MBB), TBB, FBB,
688 Cond)) {
689 // Ok, analyzeBranch thinks it knows what's going on with this block. Let's
690 // check whether its answers match up with reality.
691 if (!TBB && !FBB) {
692 // Block falls through to its successor.
693 if (!MBB->empty() && MBB->back().isBarrier() &&
694 !TII->isPredicated(MBB->back())) {
695 report("MBB exits via unconditional fall-through but ends with a "
696 "barrier instruction!", MBB);
697 }
698 if (!Cond.empty()) {
699 report("MBB exits via unconditional fall-through but has a condition!",
700 MBB);
701 }
702 } else if (TBB && !FBB && Cond.empty()) {
703 // Block unconditionally branches somewhere.
704 if (MBB->empty()) {
705 report("MBB exits via unconditional branch but doesn't contain "
706 "any instructions!", MBB);
707 } else if (!MBB->back().isBarrier()) {
708 report("MBB exits via unconditional branch but doesn't end with a "
709 "barrier instruction!", MBB);
710 } else if (!MBB->back().isTerminator()) {
711 report("MBB exits via unconditional branch but the branch isn't a "
712 "terminator instruction!", MBB);
713 }
714 } else if (TBB && !FBB && !Cond.empty()) {
715 // Block conditionally branches somewhere, otherwise falls through.
716 if (MBB->empty()) {
717 report("MBB exits via conditional branch/fall-through but doesn't "
718 "contain any instructions!", MBB);
719 } else if (MBB->back().isBarrier()) {
720 report("MBB exits via conditional branch/fall-through but ends with a "
721 "barrier instruction!", MBB);
722 } else if (!MBB->back().isTerminator()) {
723 report("MBB exits via conditional branch/fall-through but the branch "
724 "isn't a terminator instruction!", MBB);
725 }
726 } else if (TBB && FBB) {
727 // Block conditionally branches somewhere, otherwise branches
728 // somewhere else.
729 if (MBB->empty()) {
730 report("MBB exits via conditional branch/branch but doesn't "
731 "contain any instructions!", MBB);
732 } else if (!MBB->back().isBarrier()) {
733 report("MBB exits via conditional branch/branch but doesn't end with a "
734 "barrier instruction!", MBB);
735 } else if (!MBB->back().isTerminator()) {
736 report("MBB exits via conditional branch/branch but the branch "
737 "isn't a terminator instruction!", MBB);
738 }
739 if (Cond.empty()) {
740 report("MBB exits via conditional branch/branch but there's no "
741 "condition!", MBB);
742 }
743 } else {
744 report("analyzeBranch returned invalid data!", MBB);
745 }
746
747 // Now check that the successors match up with the answers reported by
748 // analyzeBranch.
749 if (TBB && !MBB->isSuccessor(TBB))
750 report("MBB exits via jump or conditional branch, but its target isn't a "
751 "CFG successor!",
752 MBB);
753 if (FBB && !MBB->isSuccessor(FBB))
754 report("MBB exits via conditional branch, but its target isn't a CFG "
755 "successor!",
756 MBB);
757
758 // There might be a fallthrough to the next block if there's either no
759 // unconditional true branch, or if there's a condition, and one of the
760 // branches is missing.
761 bool Fallthrough = !TBB || (!Cond.empty() && !FBB);
762
763 // A conditional fallthrough must be an actual CFG successor, not
764 // unreachable. (Conversely, an unconditional fallthrough might not really
765 // be a successor, because the block might end in unreachable.)
766 if (!Cond.empty() && !FBB) {
768 if (MBBI == MF->end()) {
769 report("MBB conditionally falls through out of function!", MBB);
770 } else if (!MBB->isSuccessor(&*MBBI))
771 report("MBB exits via conditional branch/fall-through but the CFG "
772 "successors don't match the actual successors!",
773 MBB);
774 }
775
776 // Verify that there aren't any extra un-accounted-for successors.
777 for (const MachineBasicBlock *SuccMBB : MBB->successors()) {
778 // If this successor is one of the branch targets, it's okay.
779 if (SuccMBB == TBB || SuccMBB == FBB)
780 continue;
781 // If we might have a fallthrough, and the successor is the fallthrough
782 // block, that's also ok.
783 if (Fallthrough && SuccMBB == MBB->getNextNode())
784 continue;
785 // Also accept successors which are for exception-handling or might be
786 // inlineasm_br targets.
787 if (SuccMBB->isEHPad() || SuccMBB->isInlineAsmBrIndirectTarget())
788 continue;
789 report("MBB has unexpected successors which are not branch targets, "
790 "fallthrough, EHPads, or inlineasm_br targets.",
791 MBB);
792 }
793 }
794
795 regsLive.clear();
796 if (MRI->tracksLiveness()) {
797 for (const auto &LI : MBB->liveins()) {
798 if (!Register::isPhysicalRegister(LI.PhysReg)) {
799 report("MBB live-in list contains non-physical register", MBB);
800 continue;
801 }
802 for (const MCPhysReg &SubReg : TRI->subregs_inclusive(LI.PhysReg))
803 regsLive.insert(SubReg);
804 }
805 }
806
807 const MachineFrameInfo &MFI = MF->getFrameInfo();
808 BitVector PR = MFI.getPristineRegs(*MF);
809 for (unsigned I : PR.set_bits()) {
810 for (const MCPhysReg &SubReg : TRI->subregs_inclusive(I))
811 regsLive.insert(SubReg);
812 }
813
814 regsKilled.clear();
815 regsDefined.clear();
816
817 if (Indexes)
818 lastIndex = Indexes->getMBBStartIdx(MBB);
819}
820
821// This function gets called for all bundle headers, including normal
822// stand-alone unbundled instructions.
823void MachineVerifier::visitMachineBundleBefore(const MachineInstr *MI) {
824 if (Indexes && Indexes->hasIndex(*MI)) {
825 SlotIndex idx = Indexes->getInstructionIndex(*MI);
826 if (!(idx > lastIndex)) {
827 report("Instruction index out of order", MI);
828 errs() << "Last instruction was at " << lastIndex << '\n';
829 }
830 lastIndex = idx;
831 }
832
833 // Ensure non-terminators don't follow terminators.
834 if (MI->isTerminator()) {
835 if (!FirstTerminator)
836 FirstTerminator = MI;
837 } else if (FirstTerminator) {
838 // For GlobalISel, G_INVOKE_REGION_START is a terminator that we allow to
839 // precede non-terminators.
840 if (FirstTerminator->getOpcode() != TargetOpcode::G_INVOKE_REGION_START) {
841 report("Non-terminator instruction after the first terminator", MI);
842 errs() << "First terminator was:\t" << *FirstTerminator;
843 }
844 }
845}
846
847// The operands on an INLINEASM instruction must follow a template.
848// Verify that the flag operands make sense.
849void MachineVerifier::verifyInlineAsm(const MachineInstr *MI) {
850 // The first two operands on INLINEASM are the asm string and global flags.
851 if (MI->getNumOperands() < 2) {
852 report("Too few operands on inline asm", MI);
853 return;
854 }
855 if (!MI->getOperand(0).isSymbol())
856 report("Asm string must be an external symbol", MI);
857 if (!MI->getOperand(1).isImm())
858 report("Asm flags must be an immediate", MI);
859 // Allowed flags are Extra_HasSideEffects = 1, Extra_IsAlignStack = 2,
860 // Extra_AsmDialect = 4, Extra_MayLoad = 8, and Extra_MayStore = 16,
861 // and Extra_IsConvergent = 32.
862 if (!isUInt<6>(MI->getOperand(1).getImm()))
863 report("Unknown asm flags", &MI->getOperand(1), 1);
864
865 static_assert(InlineAsm::MIOp_FirstOperand == 2, "Asm format changed");
866
867 unsigned OpNo = InlineAsm::MIOp_FirstOperand;
868 unsigned NumOps;
869 for (unsigned e = MI->getNumOperands(); OpNo < e; OpNo += NumOps) {
870 const MachineOperand &MO = MI->getOperand(OpNo);
871 // There may be implicit ops after the fixed operands.
872 if (!MO.isImm())
873 break;
874 NumOps = 1 + InlineAsm::getNumOperandRegisters(MO.getImm());
875 }
876
877 if (OpNo > MI->getNumOperands())
878 report("Missing operands in last group", MI);
879
880 // An optional MDNode follows the groups.
881 if (OpNo < MI->getNumOperands() && MI->getOperand(OpNo).isMetadata())
882 ++OpNo;
883
884 // All trailing operands must be implicit registers.
885 for (unsigned e = MI->getNumOperands(); OpNo < e; ++OpNo) {
886 const MachineOperand &MO = MI->getOperand(OpNo);
887 if (!MO.isReg() || !MO.isImplicit())
888 report("Expected implicit register after groups", &MO, OpNo);
889 }
890
891 if (MI->getOpcode() == TargetOpcode::INLINEASM_BR) {
892 const MachineBasicBlock *MBB = MI->getParent();
893
894 for (unsigned i = InlineAsm::MIOp_FirstOperand, e = MI->getNumOperands();
895 i != e; ++i) {
896 const MachineOperand &MO = MI->getOperand(i);
897
898 if (!MO.isMBB())
899 continue;
900
901 // Check the successor & predecessor lists look ok, assume they are
902 // not. Find the indirect target without going through the successors.
903 const MachineBasicBlock *IndirectTargetMBB = MO.getMBB();
904 if (!IndirectTargetMBB) {
905 report("INLINEASM_BR indirect target does not exist", &MO, i);
906 break;
907 }
908
909 if (!MBB->isSuccessor(IndirectTargetMBB))
910 report("INLINEASM_BR indirect target missing from successor list", &MO,
911 i);
912
913 if (!IndirectTargetMBB->isPredecessor(MBB))
914 report("INLINEASM_BR indirect target predecessor list missing parent",
915 &MO, i);
916 }
917 }
918}
919
920bool MachineVerifier::verifyAllRegOpsScalar(const MachineInstr &MI,
921 const MachineRegisterInfo &MRI) {
922 if (none_of(MI.explicit_operands(), [&MRI](const MachineOperand &Op) {
923 if (!Op.isReg())
924 return false;
925 const auto Reg = Op.getReg();
926 if (Reg.isPhysical())
927 return false;
928 return !MRI.getType(Reg).isScalar();
929 }))
930 return true;
931 report("All register operands must have scalar types", &MI);
932 return false;
933}
934
935/// Check that types are consistent when two operands need to have the same
936/// number of vector elements.
937/// \return true if the types are valid.
938bool MachineVerifier::verifyVectorElementMatch(LLT Ty0, LLT Ty1,
939 const MachineInstr *MI) {
940 if (Ty0.isVector() != Ty1.isVector()) {
941 report("operand types must be all-vector or all-scalar", MI);
942 // Generally we try to report as many issues as possible at once, but in
943 // this case it's not clear what should we be comparing the size of the
944 // scalar with: the size of the whole vector or its lane. Instead of
945 // making an arbitrary choice and emitting not so helpful message, let's
946 // avoid the extra noise and stop here.
947 return false;
948 }
949
950 if (Ty0.isVector() && Ty0.getNumElements() != Ty1.getNumElements()) {
951 report("operand types must preserve number of vector elements", MI);
952 return false;
953 }
954
955 return true;
956}
957
958void MachineVerifier::verifyPreISelGenericInstruction(const MachineInstr *MI) {
959 if (isFunctionSelected)
960 report("Unexpected generic instruction in a Selected function", MI);
961
962 const MCInstrDesc &MCID = MI->getDesc();
963 unsigned NumOps = MI->getNumOperands();
964
965 // Branches must reference a basic block if they are not indirect
966 if (MI->isBranch() && !MI->isIndirectBranch()) {
967 bool HasMBB = false;
968 for (const MachineOperand &Op : MI->operands()) {
969 if (Op.isMBB()) {
970 HasMBB = true;
971 break;
972 }
973 }
974
975 if (!HasMBB) {
976 report("Branch instruction is missing a basic block operand or "
977 "isIndirectBranch property",
978 MI);
979 }
980 }
981
982 // Check types.
984 for (unsigned I = 0, E = std::min(MCID.getNumOperands(), NumOps);
985 I != E; ++I) {
986 if (!MCID.operands()[I].isGenericType())
987 continue;
988 // Generic instructions specify type equality constraints between some of
989 // their operands. Make sure these are consistent.
990 size_t TypeIdx = MCID.operands()[I].getGenericTypeIndex();
991 Types.resize(std::max(TypeIdx + 1, Types.size()));
992
993 const MachineOperand *MO = &MI->getOperand(I);
994 if (!MO->isReg()) {
995 report("generic instruction must use register operands", MI);
996 continue;
997 }
998
999 LLT OpTy = MRI->getType(MO->getReg());
1000 // Don't report a type mismatch if there is no actual mismatch, only a
1001 // type missing, to reduce noise:
1002 if (OpTy.isValid()) {
1003 // Only the first valid type for a type index will be printed: don't
1004 // overwrite it later so it's always clear which type was expected:
1005 if (!Types[TypeIdx].isValid())
1006 Types[TypeIdx] = OpTy;
1007 else if (Types[TypeIdx] != OpTy)
1008 report("Type mismatch in generic instruction", MO, I, OpTy);
1009 } else {
1010 // Generic instructions must have types attached to their operands.
1011 report("Generic instruction is missing a virtual register type", MO, I);
1012 }
1013 }
1014
1015 // Generic opcodes must not have physical register operands.
1016 for (unsigned I = 0; I < MI->getNumOperands(); ++I) {
1017 const MachineOperand *MO = &MI->getOperand(I);
1018 if (MO->isReg() && MO->getReg().isPhysical())
1019 report("Generic instruction cannot have physical register", MO, I);
1020 }
1021
1022 // Avoid out of bounds in checks below. This was already reported earlier.
1023 if (MI->getNumOperands() < MCID.getNumOperands())
1024 return;
1025
1027 if (!TII->verifyInstruction(*MI, ErrorInfo))
1028 report(ErrorInfo.data(), MI);
1029
1030 // Verify properties of various specific instruction types
1031 unsigned Opc = MI->getOpcode();
1032 switch (Opc) {
1033 case TargetOpcode::G_ASSERT_SEXT:
1034 case TargetOpcode::G_ASSERT_ZEXT: {
1035 std::string OpcName =
1036 Opc == TargetOpcode::G_ASSERT_ZEXT ? "G_ASSERT_ZEXT" : "G_ASSERT_SEXT";
1037 if (!MI->getOperand(2).isImm()) {
1038 report(Twine(OpcName, " expects an immediate operand #2"), MI);
1039 break;
1040 }
1041
1042 Register Dst = MI->getOperand(0).getReg();
1043 Register Src = MI->getOperand(1).getReg();
1044 LLT SrcTy = MRI->getType(Src);
1045 int64_t Imm = MI->getOperand(2).getImm();
1046 if (Imm <= 0) {
1047 report(Twine(OpcName, " size must be >= 1"), MI);
1048 break;
1049 }
1050
1051 if (Imm >= SrcTy.getScalarSizeInBits()) {
1052 report(Twine(OpcName, " size must be less than source bit width"), MI);
1053 break;
1054 }
1055
1056 const RegisterBank *SrcRB = RBI->getRegBank(Src, *MRI, *TRI);
1057 const RegisterBank *DstRB = RBI->getRegBank(Dst, *MRI, *TRI);
1058
1059 // Allow only the source bank to be set.
1060 if ((SrcRB && DstRB && SrcRB != DstRB) || (DstRB && !SrcRB)) {
1061 report(Twine(OpcName, " cannot change register bank"), MI);
1062 break;
1063 }
1064
1065 // Don't allow a class change. Do allow member class->regbank.
1066 const TargetRegisterClass *DstRC = MRI->getRegClassOrNull(Dst);
1067 if (DstRC && DstRC != MRI->getRegClassOrNull(Src)) {
1068 report(
1069 Twine(OpcName, " source and destination register classes must match"),
1070 MI);
1071 break;
1072 }
1073
1074 break;
1075 }
1076
1077 case TargetOpcode::G_CONSTANT:
1078 case TargetOpcode::G_FCONSTANT: {
1079 LLT DstTy = MRI->getType(MI->getOperand(0).getReg());
1080 if (DstTy.isVector())
1081 report("Instruction cannot use a vector result type", MI);
1082
1083 if (MI->getOpcode() == TargetOpcode::G_CONSTANT) {
1084 if (!MI->getOperand(1).isCImm()) {
1085 report("G_CONSTANT operand must be cimm", MI);
1086 break;
1087 }
1088
1089 const ConstantInt *CI = MI->getOperand(1).getCImm();
1090 if (CI->getBitWidth() != DstTy.getSizeInBits())
1091 report("inconsistent constant size", MI);
1092 } else {
1093 if (!MI->getOperand(1).isFPImm()) {
1094 report("G_FCONSTANT operand must be fpimm", MI);
1095 break;
1096 }
1097 const ConstantFP *CF = MI->getOperand(1).getFPImm();
1098
1100 DstTy.getSizeInBits()) {
1101 report("inconsistent constant size", MI);
1102 }
1103 }
1104
1105 break;
1106 }
1107 case TargetOpcode::G_LOAD:
1108 case TargetOpcode::G_STORE:
1109 case TargetOpcode::G_ZEXTLOAD:
1110 case TargetOpcode::G_SEXTLOAD: {
1111 LLT ValTy = MRI->getType(MI->getOperand(0).getReg());
1112 LLT PtrTy = MRI->getType(MI->getOperand(1).getReg());
1113 if (!PtrTy.isPointer())
1114 report("Generic memory instruction must access a pointer", MI);
1115
1116 // Generic loads and stores must have a single MachineMemOperand
1117 // describing that access.
1118 if (!MI->hasOneMemOperand()) {
1119 report("Generic instruction accessing memory must have one mem operand",
1120 MI);
1121 } else {
1122 const MachineMemOperand &MMO = **MI->memoperands_begin();
1123 if (MI->getOpcode() == TargetOpcode::G_ZEXTLOAD ||
1124 MI->getOpcode() == TargetOpcode::G_SEXTLOAD) {
1125 if (MMO.getSizeInBits() >= ValTy.getSizeInBits())
1126 report("Generic extload must have a narrower memory type", MI);
1127 } else if (MI->getOpcode() == TargetOpcode::G_LOAD) {
1128 if (MMO.getSize() > ValTy.getSizeInBytes())
1129 report("load memory size cannot exceed result size", MI);
1130 } else if (MI->getOpcode() == TargetOpcode::G_STORE) {
1131 if (ValTy.getSizeInBytes() < MMO.getSize())
1132 report("store memory size cannot exceed value size", MI);
1133 }
1134
1135 const AtomicOrdering Order = MMO.getSuccessOrdering();
1136 if (Opc == TargetOpcode::G_STORE) {
1137 if (Order == AtomicOrdering::Acquire ||
1139 report("atomic store cannot use acquire ordering", MI);
1140
1141 } else {
1142 if (Order == AtomicOrdering::Release ||
1144 report("atomic load cannot use release ordering", MI);
1145 }
1146 }
1147
1148 break;
1149 }
1150 case TargetOpcode::G_PHI: {
1151 LLT DstTy = MRI->getType(MI->getOperand(0).getReg());
1152 if (!DstTy.isValid() || !all_of(drop_begin(MI->operands()),
1153 [this, &DstTy](const MachineOperand &MO) {
1154 if (!MO.isReg())
1155 return true;
1156 LLT Ty = MRI->getType(MO.getReg());
1157 if (!Ty.isValid() || (Ty != DstTy))
1158 return false;
1159 return true;
1160 }))
1161 report("Generic Instruction G_PHI has operands with incompatible/missing "
1162 "types",
1163 MI);
1164 break;
1165 }
1166 case TargetOpcode::G_BITCAST: {
1167 LLT DstTy = MRI->getType(MI->getOperand(0).getReg());
1168 LLT SrcTy = MRI->getType(MI->getOperand(1).getReg());
1169 if (!DstTy.isValid() || !SrcTy.isValid())
1170 break;
1171
1172 if (SrcTy.isPointer() != DstTy.isPointer())
1173 report("bitcast cannot convert between pointers and other types", MI);
1174
1175 if (SrcTy.getSizeInBits() != DstTy.getSizeInBits())
1176 report("bitcast sizes must match", MI);
1177
1178 if (SrcTy == DstTy)
1179 report("bitcast must change the type", MI);
1180
1181 break;
1182 }
1183 case TargetOpcode::G_INTTOPTR:
1184 case TargetOpcode::G_PTRTOINT:
1185 case TargetOpcode::G_ADDRSPACE_CAST: {
1186 LLT DstTy = MRI->getType(MI->getOperand(0).getReg());
1187 LLT SrcTy = MRI->getType(MI->getOperand(1).getReg());
1188 if (!DstTy.isValid() || !SrcTy.isValid())
1189 break;
1190
1191 verifyVectorElementMatch(DstTy, SrcTy, MI);
1192
1193 DstTy = DstTy.getScalarType();
1194 SrcTy = SrcTy.getScalarType();
1195
1196 if (MI->getOpcode() == TargetOpcode::G_INTTOPTR) {
1197 if (!DstTy.isPointer())
1198 report("inttoptr result type must be a pointer", MI);
1199 if (SrcTy.isPointer())
1200 report("inttoptr source type must not be a pointer", MI);
1201 } else if (MI->getOpcode() == TargetOpcode::G_PTRTOINT) {
1202 if (!SrcTy.isPointer())
1203 report("ptrtoint source type must be a pointer", MI);
1204 if (DstTy.isPointer())
1205 report("ptrtoint result type must not be a pointer", MI);
1206 } else {
1207 assert(MI->getOpcode() == TargetOpcode::G_ADDRSPACE_CAST);
1208 if (!SrcTy.isPointer() || !DstTy.isPointer())
1209 report("addrspacecast types must be pointers", MI);
1210 else {
1211 if (SrcTy.getAddressSpace() == DstTy.getAddressSpace())
1212 report("addrspacecast must convert different address spaces", MI);
1213 }
1214 }
1215
1216 break;
1217 }
1218 case TargetOpcode::G_PTR_ADD: {
1219 LLT DstTy = MRI->getType(MI->getOperand(0).getReg());
1220 LLT PtrTy = MRI->getType(MI->getOperand(1).getReg());
1221 LLT OffsetTy = MRI->getType(MI->getOperand(2).getReg());
1222 if (!DstTy.isValid() || !PtrTy.isValid() || !OffsetTy.isValid())
1223 break;
1224
1225 if (!PtrTy.getScalarType().isPointer())
1226 report("gep first operand must be a pointer", MI);
1227
1228 if (OffsetTy.getScalarType().isPointer())
1229 report("gep offset operand must not be a pointer", MI);
1230
1231 // TODO: Is the offset allowed to be a scalar with a vector?
1232 break;
1233 }
1234 case TargetOpcode::G_PTRMASK: {
1235 LLT DstTy = MRI->getType(MI->getOperand(0).getReg());
1236 LLT SrcTy = MRI->getType(MI->getOperand(1).getReg());
1237 LLT MaskTy = MRI->getType(MI->getOperand(2).getReg());
1238 if (!DstTy.isValid() || !SrcTy.isValid() || !MaskTy.isValid())
1239 break;
1240
1241 if (!DstTy.getScalarType().isPointer())
1242 report("ptrmask result type must be a pointer", MI);
1243
1244 if (!MaskTy.getScalarType().isScalar())
1245 report("ptrmask mask type must be an integer", MI);
1246
1247 verifyVectorElementMatch(DstTy, MaskTy, MI);
1248 break;
1249 }
1250 case TargetOpcode::G_SEXT:
1251 case TargetOpcode::G_ZEXT:
1252 case TargetOpcode::G_ANYEXT:
1253 case TargetOpcode::G_TRUNC:
1254 case TargetOpcode::G_FPEXT:
1255 case TargetOpcode::G_FPTRUNC: {
1256 // Number of operands and presense of types is already checked (and
1257 // reported in case of any issues), so no need to report them again. As
1258 // we're trying to report as many issues as possible at once, however, the
1259 // instructions aren't guaranteed to have the right number of operands or
1260 // types attached to them at this point
1261 assert(MCID.getNumOperands() == 2 && "Expected 2 operands G_*{EXT,TRUNC}");
1262 LLT DstTy = MRI->getType(MI->getOperand(0).getReg());
1263 LLT SrcTy = MRI->getType(MI->getOperand(1).getReg());
1264 if (!DstTy.isValid() || !SrcTy.isValid())
1265 break;
1266
1267 LLT DstElTy = DstTy.getScalarType();
1268 LLT SrcElTy = SrcTy.getScalarType();
1269 if (DstElTy.isPointer() || SrcElTy.isPointer())
1270 report("Generic extend/truncate can not operate on pointers", MI);
1271
1272 verifyVectorElementMatch(DstTy, SrcTy, MI);
1273
1274 unsigned DstSize = DstElTy.getSizeInBits();
1275 unsigned SrcSize = SrcElTy.getSizeInBits();
1276 switch (MI->getOpcode()) {
1277 default:
1278 if (DstSize <= SrcSize)
1279 report("Generic extend has destination type no larger than source", MI);
1280 break;
1281 case TargetOpcode::G_TRUNC:
1282 case TargetOpcode::G_FPTRUNC:
1283 if (DstSize >= SrcSize)
1284 report("Generic truncate has destination type no smaller than source",
1285 MI);
1286 break;
1287 }
1288 break;
1289 }
1290 case TargetOpcode::G_SELECT: {
1291 LLT SelTy = MRI->getType(MI->getOperand(0).getReg());
1292 LLT CondTy = MRI->getType(MI->getOperand(1).getReg());
1293 if (!SelTy.isValid() || !CondTy.isValid())
1294 break;
1295
1296 // Scalar condition select on a vector is valid.
1297 if (CondTy.isVector())
1298 verifyVectorElementMatch(SelTy, CondTy, MI);
1299 break;
1300 }
1301 case TargetOpcode::G_MERGE_VALUES: {
1302 // G_MERGE_VALUES should only be used to merge scalars into a larger scalar,
1303 // e.g. s2N = MERGE sN, sN
1304 // Merging multiple scalars into a vector is not allowed, should use
1305 // G_BUILD_VECTOR for that.
1306 LLT DstTy = MRI->getType(MI->getOperand(0).getReg());
1307 LLT SrcTy = MRI->getType(MI->getOperand(1).getReg());
1308 if (DstTy.isVector() || SrcTy.isVector())
1309 report("G_MERGE_VALUES cannot operate on vectors", MI);
1310
1311 const unsigned NumOps = MI->getNumOperands();
1312 if (DstTy.getSizeInBits() != SrcTy.getSizeInBits() * (NumOps - 1))
1313 report("G_MERGE_VALUES result size is inconsistent", MI);
1314
1315 for (unsigned I = 2; I != NumOps; ++I) {
1316 if (MRI->getType(MI->getOperand(I).getReg()) != SrcTy)
1317 report("G_MERGE_VALUES source types do not match", MI);
1318 }
1319
1320 break;
1321 }
1322 case TargetOpcode::G_UNMERGE_VALUES: {
1323 unsigned NumDsts = MI->getNumOperands() - 1;
1324 LLT DstTy = MRI->getType(MI->getOperand(0).getReg());
1325 for (unsigned i = 1; i < NumDsts; ++i) {
1326 if (MRI->getType(MI->getOperand(i).getReg()) != DstTy) {
1327 report("G_UNMERGE_VALUES destination types do not match", MI);
1328 break;
1329 }
1330 }
1331
1332 LLT SrcTy = MRI->getType(MI->getOperand(NumDsts).getReg());
1333 if (DstTy.isVector()) {
1334 // This case is the converse of G_CONCAT_VECTORS.
1335 if (!SrcTy.isVector() || SrcTy.getScalarType() != DstTy.getScalarType() ||
1336 SrcTy.getNumElements() != NumDsts * DstTy.getNumElements())
1337 report("G_UNMERGE_VALUES source operand does not match vector "
1338 "destination operands",
1339 MI);
1340 } else if (SrcTy.isVector()) {
1341 // This case is the converse of G_BUILD_VECTOR, but relaxed to allow
1342 // mismatched types as long as the total size matches:
1343 // %0:_(s64), %1:_(s64) = G_UNMERGE_VALUES %2:_(<4 x s32>)
1344 if (SrcTy.getSizeInBits() != NumDsts * DstTy.getSizeInBits())
1345 report("G_UNMERGE_VALUES vector source operand does not match scalar "
1346 "destination operands",
1347 MI);
1348 } else {
1349 // This case is the converse of G_MERGE_VALUES.
1350 if (SrcTy.getSizeInBits() != NumDsts * DstTy.getSizeInBits()) {
1351 report("G_UNMERGE_VALUES scalar source operand does not match scalar "
1352 "destination operands",
1353 MI);
1354 }
1355 }
1356 break;
1357 }
1358 case TargetOpcode::G_BUILD_VECTOR: {
1359 // Source types must be scalars, dest type a vector. Total size of scalars
1360 // must match the dest vector size.
1361 LLT DstTy = MRI->getType(MI->getOperand(0).getReg());
1362 LLT SrcEltTy = MRI->getType(MI->getOperand(1).getReg());
1363 if (!DstTy.isVector() || SrcEltTy.isVector()) {
1364 report("G_BUILD_VECTOR must produce a vector from scalar operands", MI);
1365 break;
1366 }
1367
1368 if (DstTy.getElementType() != SrcEltTy)
1369 report("G_BUILD_VECTOR result element type must match source type", MI);
1370
1371 if (DstTy.getNumElements() != MI->getNumOperands() - 1)
1372 report("G_BUILD_VECTOR must have an operand for each elemement", MI);
1373
1374 for (const MachineOperand &MO : llvm::drop_begin(MI->operands(), 2))
1375 if (MRI->getType(MI->getOperand(1).getReg()) != MRI->getType(MO.getReg()))
1376 report("G_BUILD_VECTOR source operand types are not homogeneous", MI);
1377
1378 break;
1379 }
1380 case TargetOpcode::G_BUILD_VECTOR_TRUNC: {
1381 // Source types must be scalars, dest type a vector. Scalar types must be
1382 // larger than the dest vector elt type, as this is a truncating operation.
1383 LLT DstTy = MRI->getType(MI->getOperand(0).getReg());
1384 LLT SrcEltTy = MRI->getType(MI->getOperand(1).getReg());
1385 if (!DstTy.isVector() || SrcEltTy.isVector())
1386 report("G_BUILD_VECTOR_TRUNC must produce a vector from scalar operands",
1387 MI);
1388 for (const MachineOperand &MO : llvm::drop_begin(MI->operands(), 2))
1389 if (MRI->getType(MI->getOperand(1).getReg()) != MRI->getType(MO.getReg()))
1390 report("G_BUILD_VECTOR_TRUNC source operand types are not homogeneous",
1391 MI);
1392 if (SrcEltTy.getSizeInBits() <= DstTy.getElementType().getSizeInBits())
1393 report("G_BUILD_VECTOR_TRUNC source operand types are not larger than "
1394 "dest elt type",
1395 MI);
1396 break;
1397 }
1398 case TargetOpcode::G_CONCAT_VECTORS: {
1399 // Source types should be vectors, and total size should match the dest
1400 // vector size.
1401 LLT DstTy = MRI->getType(MI->getOperand(0).getReg());
1402 LLT SrcTy = MRI->getType(MI->getOperand(1).getReg());
1403 if (!DstTy.isVector() || !SrcTy.isVector())
1404 report("G_CONCAT_VECTOR requires vector source and destination operands",
1405 MI);
1406
1407 if (MI->getNumOperands() < 3)
1408 report("G_CONCAT_VECTOR requires at least 2 source operands", MI);
1409
1410 for (const MachineOperand &MO : llvm::drop_begin(MI->operands(), 2))
1411 if (MRI->getType(MI->getOperand(1).getReg()) != MRI->getType(MO.getReg()))
1412 report("G_CONCAT_VECTOR source operand types are not homogeneous", MI);
1413 if (DstTy.getNumElements() !=
1414 SrcTy.getNumElements() * (MI->getNumOperands() - 1))
1415 report("G_CONCAT_VECTOR num dest and source elements should match", MI);
1416 break;
1417 }
1418 case TargetOpcode::G_ICMP:
1419 case TargetOpcode::G_FCMP: {
1420 LLT DstTy = MRI->getType(MI->getOperand(0).getReg());
1421 LLT SrcTy = MRI->getType(MI->getOperand(2).getReg());
1422
1423 if ((DstTy.isVector() != SrcTy.isVector()) ||
1424 (DstTy.isVector() && DstTy.getNumElements() != SrcTy.getNumElements()))
1425 report("Generic vector icmp/fcmp must preserve number of lanes", MI);
1426
1427 break;
1428 }
1429 case TargetOpcode::G_EXTRACT: {
1430 const MachineOperand &SrcOp = MI->getOperand(1);
1431 if (!SrcOp.isReg()) {
1432 report("extract source must be a register", MI);
1433 break;
1434 }
1435
1436 const MachineOperand &OffsetOp = MI->getOperand(2);
1437 if (!OffsetOp.isImm()) {
1438 report("extract offset must be a constant", MI);
1439 break;
1440 }
1441
1442 unsigned DstSize = MRI->getType(MI->getOperand(0).getReg()).getSizeInBits();
1443 unsigned SrcSize = MRI->getType(SrcOp.getReg()).getSizeInBits();
1444 if (SrcSize == DstSize)
1445 report("extract source must be larger than result", MI);
1446
1447 if (DstSize + OffsetOp.getImm() > SrcSize)
1448 report("extract reads past end of register", MI);
1449 break;
1450 }
1451 case TargetOpcode::G_INSERT: {
1452 const MachineOperand &SrcOp = MI->getOperand(2);
1453 if (!SrcOp.isReg()) {
1454 report("insert source must be a register", MI);
1455 break;
1456 }
1457
1458 const MachineOperand &OffsetOp = MI->getOperand(3);
1459 if (!OffsetOp.isImm()) {
1460 report("insert offset must be a constant", MI);
1461 break;
1462 }
1463
1464 unsigned DstSize = MRI->getType(MI->getOperand(0).getReg()).getSizeInBits();
1465 unsigned SrcSize = MRI->getType(SrcOp.getReg()).getSizeInBits();
1466
1467 if (DstSize <= SrcSize)
1468 report("inserted size must be smaller than total register", MI);
1469
1470 if (SrcSize + OffsetOp.getImm() > DstSize)
1471 report("insert writes past end of register", MI);
1472
1473 break;
1474 }
1475 case TargetOpcode::G_JUMP_TABLE: {
1476 if (!MI->getOperand(1).isJTI())
1477 report("G_JUMP_TABLE source operand must be a jump table index", MI);
1478 LLT DstTy = MRI->getType(MI->getOperand(0).getReg());
1479 if (!DstTy.isPointer())
1480 report("G_JUMP_TABLE dest operand must have a pointer type", MI);
1481 break;
1482 }
1483 case TargetOpcode::G_BRJT: {
1484 if (!MRI->getType(MI->getOperand(0).getReg()).isPointer())
1485 report("G_BRJT src operand 0 must be a pointer type", MI);
1486
1487 if (!MI->getOperand(1).isJTI())
1488 report("G_BRJT src operand 1 must be a jump table index", MI);
1489
1490 const auto &IdxOp = MI->getOperand(2);
1491 if (!IdxOp.isReg() || MRI->getType(IdxOp.getReg()).isPointer())
1492 report("G_BRJT src operand 2 must be a scalar reg type", MI);
1493 break;
1494 }
1495 case TargetOpcode::G_INTRINSIC:
1496 case TargetOpcode::G_INTRINSIC_W_SIDE_EFFECTS: {
1497 // TODO: Should verify number of def and use operands, but the current
1498 // interface requires passing in IR types for mangling.
1499 const MachineOperand &IntrIDOp = MI->getOperand(MI->getNumExplicitDefs());
1500 if (!IntrIDOp.isIntrinsicID()) {
1501 report("G_INTRINSIC first src operand must be an intrinsic ID", MI);
1502 break;
1503 }
1504
1505 bool NoSideEffects = MI->getOpcode() == TargetOpcode::G_INTRINSIC;
1506 unsigned IntrID = IntrIDOp.getIntrinsicID();
1507 if (IntrID != 0 && IntrID < Intrinsic::num_intrinsics) {
1509 MF->getFunction().getContext(), static_cast<Intrinsic::ID>(IntrID));
1510 bool DeclHasSideEffects = !Attrs.getMemoryEffects().doesNotAccessMemory();
1511 if (NoSideEffects && DeclHasSideEffects) {
1512 report("G_INTRINSIC used with intrinsic that accesses memory", MI);
1513 break;
1514 }
1515 if (!NoSideEffects && !DeclHasSideEffects) {
1516 report("G_INTRINSIC_W_SIDE_EFFECTS used with readnone intrinsic", MI);
1517 break;
1518 }
1519 }
1520
1521 break;
1522 }
1523 case TargetOpcode::G_SEXT_INREG: {
1524 if (!MI->getOperand(2).isImm()) {
1525 report("G_SEXT_INREG expects an immediate operand #2", MI);
1526 break;
1527 }
1528
1529 LLT SrcTy = MRI->getType(MI->getOperand(1).getReg());
1530 int64_t Imm = MI->getOperand(2).getImm();
1531 if (Imm <= 0)
1532 report("G_SEXT_INREG size must be >= 1", MI);
1533 if (Imm >= SrcTy.getScalarSizeInBits())
1534 report("G_SEXT_INREG size must be less than source bit width", MI);
1535 break;
1536 }
1537 case TargetOpcode::G_SHUFFLE_VECTOR: {
1538 const MachineOperand &MaskOp = MI->getOperand(3);
1539 if (!MaskOp.isShuffleMask()) {
1540 report("Incorrect mask operand type for G_SHUFFLE_VECTOR", MI);
1541 break;
1542 }
1543
1544 LLT DstTy = MRI->getType(MI->getOperand(0).getReg());
1545 LLT Src0Ty = MRI->getType(MI->getOperand(1).getReg());
1546 LLT Src1Ty = MRI->getType(MI->getOperand(2).getReg());
1547
1548 if (Src0Ty != Src1Ty)
1549 report("Source operands must be the same type", MI);
1550
1551 if (Src0Ty.getScalarType() != DstTy.getScalarType())
1552 report("G_SHUFFLE_VECTOR cannot change element type", MI);
1553
1554 // Don't check that all operands are vector because scalars are used in
1555 // place of 1 element vectors.
1556 int SrcNumElts = Src0Ty.isVector() ? Src0Ty.getNumElements() : 1;
1557 int DstNumElts = DstTy.isVector() ? DstTy.getNumElements() : 1;
1558
1559 ArrayRef<int> MaskIdxes = MaskOp.getShuffleMask();
1560
1561 if (static_cast<int>(MaskIdxes.size()) != DstNumElts)
1562 report("Wrong result type for shufflemask", MI);
1563
1564 for (int Idx : MaskIdxes) {
1565 if (Idx < 0)
1566 continue;
1567
1568 if (Idx >= 2 * SrcNumElts)
1569 report("Out of bounds shuffle index", MI);
1570 }
1571
1572 break;
1573 }
1574 case TargetOpcode::G_DYN_STACKALLOC: {
1575 const MachineOperand &DstOp = MI->getOperand(0);
1576 const MachineOperand &AllocOp = MI->getOperand(1);
1577 const MachineOperand &AlignOp = MI->getOperand(2);
1578
1579 if (!DstOp.isReg() || !MRI->getType(DstOp.getReg()).isPointer()) {
1580 report("dst operand 0 must be a pointer type", MI);
1581 break;
1582 }
1583
1584 if (!AllocOp.isReg() || !MRI->getType(AllocOp.getReg()).isScalar()) {
1585 report("src operand 1 must be a scalar reg type", MI);
1586 break;
1587 }
1588
1589 if (!AlignOp.isImm()) {
1590 report("src operand 2 must be an immediate type", MI);
1591 break;
1592 }
1593 break;
1594 }
1595 case TargetOpcode::G_MEMCPY_INLINE:
1596 case TargetOpcode::G_MEMCPY:
1597 case TargetOpcode::G_MEMMOVE: {
1598 ArrayRef<MachineMemOperand *> MMOs = MI->memoperands();
1599 if (MMOs.size() != 2) {
1600 report("memcpy/memmove must have 2 memory operands", MI);
1601 break;
1602 }
1603
1604 if ((!MMOs[0]->isStore() || MMOs[0]->isLoad()) ||
1605 (MMOs[1]->isStore() || !MMOs[1]->isLoad())) {
1606 report("wrong memory operand types", MI);
1607 break;
1608 }
1609
1610 if (MMOs[0]->getSize() != MMOs[1]->getSize())
1611 report("inconsistent memory operand sizes", MI);
1612
1613 LLT DstPtrTy = MRI->getType(MI->getOperand(0).getReg());
1614 LLT SrcPtrTy = MRI->getType(MI->getOperand(1).getReg());
1615
1616 if (!DstPtrTy.isPointer() || !SrcPtrTy.isPointer()) {
1617 report("memory instruction operand must be a pointer", MI);
1618 break;
1619 }
1620
1621 if (DstPtrTy.getAddressSpace() != MMOs[0]->getAddrSpace())
1622 report("inconsistent store address space", MI);
1623 if (SrcPtrTy.getAddressSpace() != MMOs[1]->getAddrSpace())
1624 report("inconsistent load address space", MI);
1625
1626 if (Opc != TargetOpcode::G_MEMCPY_INLINE)
1627 if (!MI->getOperand(3).isImm() || (MI->getOperand(3).getImm() & ~1LL))
1628 report("'tail' flag (operand 3) must be an immediate 0 or 1", MI);
1629
1630 break;
1631 }
1632 case TargetOpcode::G_BZERO:
1633 case TargetOpcode::G_MEMSET: {
1634 ArrayRef<MachineMemOperand *> MMOs = MI->memoperands();
1635 std::string Name = Opc == TargetOpcode::G_MEMSET ? "memset" : "bzero";
1636 if (MMOs.size() != 1) {
1637 report(Twine(Name, " must have 1 memory operand"), MI);
1638 break;
1639 }
1640
1641 if ((!MMOs[0]->isStore() || MMOs[0]->isLoad())) {
1642 report(Twine(Name, " memory operand must be a store"), MI);
1643 break;
1644 }
1645
1646 LLT DstPtrTy = MRI->getType(MI->getOperand(0).getReg());
1647 if (!DstPtrTy.isPointer()) {
1648 report(Twine(Name, " operand must be a pointer"), MI);
1649 break;
1650 }
1651
1652 if (DstPtrTy.getAddressSpace() != MMOs[0]->getAddrSpace())
1653 report("inconsistent " + Twine(Name, " address space"), MI);
1654
1655 if (!MI->getOperand(MI->getNumOperands() - 1).isImm() ||
1656 (MI->getOperand(MI->getNumOperands() - 1).getImm() & ~1LL))
1657 report("'tail' flag (last operand) must be an immediate 0 or 1", MI);
1658
1659 break;
1660 }
1661 case TargetOpcode::G_VECREDUCE_SEQ_FADD:
1662 case TargetOpcode::G_VECREDUCE_SEQ_FMUL: {
1663 LLT DstTy = MRI->getType(MI->getOperand(0).getReg());
1664 LLT Src1Ty = MRI->getType(MI->getOperand(1).getReg());
1665 LLT Src2Ty = MRI->getType(MI->getOperand(2).getReg());
1666 if (!DstTy.isScalar())
1667 report("Vector reduction requires a scalar destination type", MI);
1668 if (!Src1Ty.isScalar())
1669 report("Sequential FADD/FMUL vector reduction requires a scalar 1st operand", MI);
1670 if (!Src2Ty.isVector())
1671 report("Sequential FADD/FMUL vector reduction must have a vector 2nd operand", MI);
1672 break;
1673 }
1674 case TargetOpcode::G_VECREDUCE_FADD:
1675 case TargetOpcode::G_VECREDUCE_FMUL:
1676 case TargetOpcode::G_VECREDUCE_FMAX:
1677 case TargetOpcode::G_VECREDUCE_FMIN:
1678 case TargetOpcode::G_VECREDUCE_ADD:
1679 case TargetOpcode::G_VECREDUCE_MUL:
1680 case TargetOpcode::G_VECREDUCE_AND:
1681 case TargetOpcode::G_VECREDUCE_OR:
1682 case TargetOpcode::G_VECREDUCE_XOR:
1683 case TargetOpcode::G_VECREDUCE_SMAX:
1684 case TargetOpcode::G_VECREDUCE_SMIN:
1685 case TargetOpcode::G_VECREDUCE_UMAX:
1686 case TargetOpcode::G_VECREDUCE_UMIN: {
1687 LLT DstTy = MRI->getType(MI->getOperand(0).getReg());
1688 if (!DstTy.isScalar())
1689 report("Vector reduction requires a scalar destination type", MI);
1690 break;
1691 }
1692
1693 case TargetOpcode::G_SBFX:
1694 case TargetOpcode::G_UBFX: {
1695 LLT DstTy = MRI->getType(MI->getOperand(0).getReg());
1696 if (DstTy.isVector()) {
1697 report("Bitfield extraction is not supported on vectors", MI);
1698 break;
1699 }
1700 break;
1701 }
1702 case TargetOpcode::G_SHL:
1703 case TargetOpcode::G_LSHR:
1704 case TargetOpcode::G_ASHR:
1705 case TargetOpcode::G_ROTR:
1706 case TargetOpcode::G_ROTL: {
1707 LLT Src1Ty = MRI->getType(MI->getOperand(1).getReg());
1708 LLT Src2Ty = MRI->getType(MI->getOperand(2).getReg());
1709 if (Src1Ty.isVector() != Src2Ty.isVector()) {
1710 report("Shifts and rotates require operands to be either all scalars or "
1711 "all vectors",
1712 MI);
1713 break;
1714 }
1715 break;
1716 }
1717 case TargetOpcode::G_LLROUND:
1718 case TargetOpcode::G_LROUND: {
1719 verifyAllRegOpsScalar(*MI, *MRI);
1720 break;
1721 }
1722 case TargetOpcode::G_IS_FPCLASS: {
1723 LLT DestTy = MRI->getType(MI->getOperand(0).getReg());
1724 LLT DestEltTy = DestTy.getScalarType();
1725 if (!DestEltTy.isScalar()) {
1726 report("Destination must be a scalar or vector of scalars", MI);
1727 break;
1728 }
1729 LLT SrcTy = MRI->getType(MI->getOperand(1).getReg());
1730 LLT SrcEltTy = SrcTy.getScalarType();
1731 if (!SrcEltTy.isScalar()) {
1732 report("Source must be a scalar or vector of scalars", MI);
1733 break;
1734 }
1735 if (!verifyVectorElementMatch(DestTy, SrcTy, MI))
1736 break;
1737 const MachineOperand &TestMO = MI->getOperand(2);
1738 if (!TestMO.isImm()) {
1739 report("floating-point class set (operand 2) must be an immediate", MI);
1740 break;
1741 }
1742 int64_t Test = TestMO.getImm();
1743 if (Test < 0 || Test > fcAllFlags) {
1744 report("Incorrect floating-point class set (operand 2)", MI);
1745 break;
1746 }
1747 break;
1748 }
1749 case TargetOpcode::G_ASSERT_ALIGN: {
1750 if (MI->getOperand(2).getImm() < 1)
1751 report("alignment immediate must be >= 1", MI);
1752 break;
1753 }
1754 case TargetOpcode::G_CONSTANT_POOL: {
1755 if (!MI->getOperand(1).isCPI())
1756 report("Src operand 1 must be a constant pool index", MI);
1757 if (!MRI->getType(MI->getOperand(0).getReg()).isPointer())
1758 report("Dst operand 0 must be a pointer", MI);
1759 break;
1760 }
1761 default:
1762 break;
1763 }
1764}
1765
1766void MachineVerifier::visitMachineInstrBefore(const MachineInstr *MI) {
1767 const MCInstrDesc &MCID = MI->getDesc();
1768 if (MI->getNumOperands() < MCID.getNumOperands()) {
1769 report("Too few operands", MI);
1770 errs() << MCID.getNumOperands() << " operands expected, but "
1771 << MI->getNumOperands() << " given.\n";
1772 }
1773
1774 if (MI->isPHI()) {
1775 if (MF->getProperties().hasProperty(
1777 report("Found PHI instruction with NoPHIs property set", MI);
1778
1779 if (FirstNonPHI)
1780 report("Found PHI instruction after non-PHI", MI);
1781 } else if (FirstNonPHI == nullptr)
1782 FirstNonPHI = MI;
1783
1784 // Check the tied operands.
1785 if (MI->isInlineAsm())
1786 verifyInlineAsm(MI);
1787
1788 // Check that unspillable terminators define a reg and have at most one use.
1789 if (TII->isUnspillableTerminator(MI)) {
1790 if (!MI->getOperand(0).isReg() || !MI->getOperand(0).isDef())
1791 report("Unspillable Terminator does not define a reg", MI);
1792 Register Def = MI->getOperand(0).getReg();
1793 if (Def.isVirtual() &&
1794 !MF->getProperties().hasProperty(
1796 std::distance(MRI->use_nodbg_begin(Def), MRI->use_nodbg_end()) > 1)
1797 report("Unspillable Terminator expected to have at most one use!", MI);
1798 }
1799
1800 // A fully-formed DBG_VALUE must have a location. Ignore partially formed
1801 // DBG_VALUEs: these are convenient to use in tests, but should never get
1802 // generated.
1803 if (MI->isDebugValue() && MI->getNumOperands() == 4)
1804 if (!MI->getDebugLoc())
1805 report("Missing DebugLoc for debug instruction", MI);
1806
1807 // Meta instructions should never be the subject of debug value tracking,
1808 // they don't create a value in the output program at all.
1809 if (MI->isMetaInstruction() && MI->peekDebugInstrNum())
1810 report("Metadata instruction should not have a value tracking number", MI);
1811
1812 // Check the MachineMemOperands for basic consistency.
1813 for (MachineMemOperand *Op : MI->memoperands()) {
1814 if (Op->isLoad() && !MI->mayLoad())
1815 report("Missing mayLoad flag", MI);
1816 if (Op->isStore() && !MI->mayStore())
1817 report("Missing mayStore flag", MI);
1818 }
1819
1820 // Debug values must not have a slot index.
1821 // Other instructions must have one, unless they are inside a bundle.
1822 if (LiveInts) {
1823 bool mapped = !LiveInts->isNotInMIMap(*MI);
1824 if (MI->isDebugOrPseudoInstr()) {
1825 if (mapped)
1826 report("Debug instruction has a slot index", MI);
1827 } else if (MI->isInsideBundle()) {
1828 if (mapped)
1829 report("Instruction inside bundle has a slot index", MI);
1830 } else {
1831 if (!mapped)
1832 report("Missing slot index", MI);
1833 }
1834 }
1835
1836 unsigned Opc = MCID.getOpcode();
1838 verifyPreISelGenericInstruction(MI);
1839 return;
1840 }
1841
1843 if (!TII->verifyInstruction(*MI, ErrorInfo))
1844 report(ErrorInfo.data(), MI);
1845
1846 // Verify properties of various specific instruction types
1847 switch (MI->getOpcode()) {
1848 case TargetOpcode::COPY: {
1849 const MachineOperand &DstOp = MI->getOperand(0);
1850 const MachineOperand &SrcOp = MI->getOperand(1);
1851 const Register SrcReg = SrcOp.getReg();
1852 const Register DstReg = DstOp.getReg();
1853
1854 LLT DstTy = MRI->getType(DstReg);
1855 LLT SrcTy = MRI->getType(SrcReg);
1856 if (SrcTy.isValid() && DstTy.isValid()) {
1857 // If both types are valid, check that the types are the same.
1858 if (SrcTy != DstTy) {
1859 report("Copy Instruction is illegal with mismatching types", MI);
1860 errs() << "Def = " << DstTy << ", Src = " << SrcTy << "\n";
1861 }
1862
1863 break;
1864 }
1865
1866 if (!SrcTy.isValid() && !DstTy.isValid())
1867 break;
1868
1869 // If we have only one valid type, this is likely a copy between a virtual
1870 // and physical register.
1871 unsigned SrcSize = 0;
1872 unsigned DstSize = 0;
1873 if (SrcReg.isPhysical() && DstTy.isValid()) {
1874 const TargetRegisterClass *SrcRC =
1875 TRI->getMinimalPhysRegClassLLT(SrcReg, DstTy);
1876 if (SrcRC)
1877 SrcSize = TRI->getRegSizeInBits(*SrcRC);
1878 }
1879
1880 if (SrcSize == 0)
1881 SrcSize = TRI->getRegSizeInBits(SrcReg, *MRI);
1882
1883 if (DstReg.isPhysical() && SrcTy.isValid()) {
1884 const TargetRegisterClass *DstRC =
1885 TRI->getMinimalPhysRegClassLLT(DstReg, SrcTy);
1886 if (DstRC)
1887 DstSize = TRI->getRegSizeInBits(*DstRC);
1888 }
1889
1890 if (DstSize == 0)
1891 DstSize = TRI->getRegSizeInBits(DstReg, *MRI);
1892
1893 if (SrcSize != 0 && DstSize != 0 && SrcSize != DstSize) {
1894 if (!DstOp.getSubReg() && !SrcOp.getSubReg()) {
1895 report("Copy Instruction is illegal with mismatching sizes", MI);
1896 errs() << "Def Size = " << DstSize << ", Src Size = " << SrcSize
1897 << "\n";
1898 }
1899 }
1900 break;
1901 }
1902 case TargetOpcode::STATEPOINT: {
1903 StatepointOpers SO(MI);
1904 if (!MI->getOperand(SO.getIDPos()).isImm() ||
1905 !MI->getOperand(SO.getNBytesPos()).isImm() ||
1906 !MI->getOperand(SO.getNCallArgsPos()).isImm()) {
1907 report("meta operands to STATEPOINT not constant!", MI);
1908 break;
1909 }
1910
1911 auto VerifyStackMapConstant = [&](unsigned Offset) {
1912 if (Offset >= MI->getNumOperands()) {
1913 report("stack map constant to STATEPOINT is out of range!", MI);
1914 return;
1915 }
1916 if (!MI->getOperand(Offset - 1).isImm() ||
1917 MI->getOperand(Offset - 1).getImm() != StackMaps::ConstantOp ||
1918 !MI->getOperand(Offset).isImm())
1919 report("stack map constant to STATEPOINT not well formed!", MI);
1920 };
1921 VerifyStackMapConstant(SO.getCCIdx());
1922 VerifyStackMapConstant(SO.getFlagsIdx());
1923 VerifyStackMapConstant(SO.getNumDeoptArgsIdx());
1924 VerifyStackMapConstant(SO.getNumGCPtrIdx());
1925 VerifyStackMapConstant(SO.getNumAllocaIdx());
1926 VerifyStackMapConstant(SO.getNumGcMapEntriesIdx());
1927
1928 // Verify that all explicit statepoint defs are tied to gc operands as
1929 // they are expected to be a relocation of gc operands.
1930 unsigned FirstGCPtrIdx = SO.getFirstGCPtrIdx();
1931 unsigned LastGCPtrIdx = SO.getNumAllocaIdx() - 2;
1932 for (unsigned Idx = 0; Idx < MI->getNumDefs(); Idx++) {
1933 unsigned UseOpIdx;
1934 if (!MI->isRegTiedToUseOperand(Idx, &UseOpIdx)) {
1935 report("STATEPOINT defs expected to be tied", MI);
1936 break;
1937 }
1938 if (UseOpIdx < FirstGCPtrIdx || UseOpIdx > LastGCPtrIdx) {
1939 report("STATEPOINT def tied to non-gc operand", MI);
1940 break;
1941 }
1942 }
1943
1944 // TODO: verify we have properly encoded deopt arguments
1945 } break;
1946 case TargetOpcode::INSERT_SUBREG: {
1947 unsigned InsertedSize;
1948 if (unsigned SubIdx = MI->getOperand(2).getSubReg())
1949 InsertedSize = TRI->getSubRegIdxSize(SubIdx);
1950 else
1951 InsertedSize = TRI->getRegSizeInBits(MI->getOperand(2).getReg(), *MRI);
1952 unsigned SubRegSize = TRI->getSubRegIdxSize(MI->getOperand(3).getImm());
1953 if (SubRegSize < InsertedSize) {
1954 report("INSERT_SUBREG expected inserted value to have equal or lesser "
1955 "size than the subreg it was inserted into", MI);
1956 break;
1957 }
1958 } break;
1959 case TargetOpcode::REG_SEQUENCE: {
1960 unsigned NumOps = MI->getNumOperands();
1961 if (!(NumOps & 1)) {
1962 report("Invalid number of operands for REG_SEQUENCE", MI);
1963 break;
1964 }
1965
1966 for (unsigned I = 1; I != NumOps; I += 2) {
1967 const MachineOperand &RegOp = MI->getOperand(I);
1968 const MachineOperand &SubRegOp = MI->getOperand(I + 1);
1969
1970 if (!RegOp.isReg())
1971 report("Invalid register operand for REG_SEQUENCE", &RegOp, I);
1972
1973 if (!SubRegOp.isImm() || SubRegOp.getImm() == 0 ||
1974 SubRegOp.getImm() >= TRI->getNumSubRegIndices()) {
1975 report("Invalid subregister index operand for REG_SEQUENCE",
1976 &SubRegOp, I + 1);
1977 }
1978 }
1979
1980 Register DstReg = MI->getOperand(0).getReg();
1981 if (DstReg.isPhysical())
1982 report("REG_SEQUENCE does not support physical register results", MI);
1983
1984 if (MI->getOperand(0).getSubReg())
1985 report("Invalid subreg result for REG_SEQUENCE", MI);
1986
1987 break;
1988 }
1989 }
1990}
1991
1992void
1993MachineVerifier::visitMachineOperand(const MachineOperand *MO, unsigned MONum) {
1994 const MachineInstr *MI = MO->getParent();
1995 const MCInstrDesc &MCID = MI->getDesc();
1996 unsigned NumDefs = MCID.getNumDefs();
1997 if (MCID.getOpcode() == TargetOpcode::PATCHPOINT)
1998 NumDefs = (MONum == 0 && MO->isReg()) ? NumDefs : 0;
1999
2000 // The first MCID.NumDefs operands must be explicit register defines
2001 if (MONum < NumDefs) {
2002 const MCOperandInfo &MCOI = MCID.operands()[MONum];
2003 if (!MO->isReg())
2004 report("Explicit definition must be a register", MO, MONum);
2005 else if (!MO->isDef() && !MCOI.isOptionalDef())
2006 report("Explicit definition marked as use", MO, MONum);
2007 else if (MO->isImplicit())
2008 report("Explicit definition marked as implicit", MO, MONum);
2009 } else if (MONum < MCID.getNumOperands()) {
2010 const MCOperandInfo &MCOI = MCID.operands()[MONum];
2011 // Don't check if it's the last operand in a variadic instruction. See,
2012 // e.g., LDM_RET in the arm back end. Check non-variadic operands only.
2013 bool IsOptional = MI->isVariadic() && MONum == MCID.getNumOperands() - 1;
2014 if (!IsOptional) {
2015 if (MO->isReg()) {
2016 if (MO->isDef() && !MCOI.isOptionalDef() && !MCID.variadicOpsAreDefs())
2017 report("Explicit operand marked as def", MO, MONum);
2018 if (MO->isImplicit())
2019 report("Explicit operand marked as implicit", MO, MONum);
2020 }
2021
2022 // Check that an instruction has register operands only as expected.
2023 if (MCOI.OperandType == MCOI::OPERAND_REGISTER &&
2024 !MO->isReg() && !MO->isFI())
2025 report("Expected a register operand.", MO, MONum);
2026 if (MO->isReg()) {
2029 !TII->isPCRelRegisterOperandLegal(*MO)))
2030 report("Expected a non-register operand.", MO, MONum);
2031 }
2032 }
2033
2034 int TiedTo = MCID.getOperandConstraint(MONum, MCOI::TIED_TO);
2035 if (TiedTo != -1) {
2036 if (!MO->isReg())
2037 report("Tied use must be a register", MO, MONum);
2038 else if (!MO->isTied())
2039 report("Operand should be tied", MO, MONum);
2040 else if (unsigned(TiedTo) != MI->findTiedOperandIdx(MONum))
2041 report("Tied def doesn't match MCInstrDesc", MO, MONum);
2042 else if (MO->getReg().isPhysical()) {
2043 const MachineOperand &MOTied = MI->getOperand(TiedTo);
2044 if (!MOTied.isReg())
2045 report("Tied counterpart must be a register", &MOTied, TiedTo);
2046 else if (MOTied.getReg().isPhysical() &&
2047 MO->getReg() != MOTied.getReg())
2048 report("Tied physical registers must match.", &MOTied, TiedTo);
2049 }
2050 } else if (MO->isReg() && MO->isTied())
2051 report("Explicit operand should not be tied", MO, MONum);
2052 } else {
2053 // ARM adds %reg0 operands to indicate predicates. We'll allow that.
2054 if (MO->isReg() && !MO->isImplicit() && !MI->isVariadic() && MO->getReg())
2055 report("Extra explicit operand on non-variadic instruction", MO, MONum);
2056 }
2057
2058 switch (MO->getType()) {
2060 // Verify debug flag on debug instructions. Check this first because reg0
2061 // indicates an undefined debug value.
2062 if (MI->isDebugInstr() && MO->isUse()) {
2063 if (!MO->isDebug())
2064 report("Register operand must be marked debug", MO, MONum);
2065 } else if (MO->isDebug()) {
2066 report("Register operand must not be marked debug", MO, MONum);
2067 }
2068
2069 const Register Reg = MO->getReg();
2070 if (!Reg)
2071 return;
2072 if (MRI->tracksLiveness() && !MI->isDebugInstr())
2073 checkLiveness(MO, MONum);
2074
2075 if (MO->isDef() && MO->isUndef() && !MO->getSubReg() &&
2076 MO->getReg().isVirtual()) // TODO: Apply to physregs too
2077 report("Undef virtual register def operands require a subregister", MO, MONum);
2078
2079 // Verify the consistency of tied operands.
2080 if (MO->isTied()) {
2081 unsigned OtherIdx = MI->findTiedOperandIdx(MONum);
2082 const MachineOperand &OtherMO = MI->getOperand(OtherIdx);
2083 if (!OtherMO.isReg())
2084 report("Must be tied to a register", MO, MONum);
2085 if (!OtherMO.isTied())
2086 report("Missing tie flags on tied operand", MO, MONum);
2087 if (MI->findTiedOperandIdx(OtherIdx) != MONum)
2088 report("Inconsistent tie links", MO, MONum);
2089 if (MONum < MCID.getNumDefs()) {
2090 if (OtherIdx < MCID.getNumOperands()) {
2091 if (-1 == MCID.getOperandConstraint(OtherIdx, MCOI::TIED_TO))
2092 report("Explicit def tied to explicit use without tie constraint",
2093 MO, MONum);
2094 } else {
2095 if (!OtherMO.isImplicit())
2096 report("Explicit def should be tied to implicit use", MO, MONum);
2097 }
2098 }
2099 }
2100
2101 // Verify two-address constraints after the twoaddressinstruction pass.
2102 // Both twoaddressinstruction pass and phi-node-elimination pass call
2103 // MRI->leaveSSA() to set MF as NoSSA, we should do the verification after
2104 // twoaddressinstruction pass not after phi-node-elimination pass. So we
2105 // shouldn't use the NoSSA as the condition, we should based on
2106 // TiedOpsRewritten property to verify two-address constraints, this
2107 // property will be set in twoaddressinstruction pass.
2108 unsigned DefIdx;
2109 if (MF->getProperties().hasProperty(
2111 MO->isUse() && MI->isRegTiedToDefOperand(MONum, &DefIdx) &&
2112 Reg != MI->getOperand(DefIdx).getReg())
2113 report("Two-address instruction operands must be identical", MO, MONum);
2114
2115 // Check register classes.
2116 unsigned SubIdx = MO->getSubReg();
2117
2118 if (Reg.isPhysical()) {
2119 if (SubIdx) {
2120 report("Illegal subregister index for physical register", MO, MONum);
2121 return;
2122 }
2123 if (MONum < MCID.getNumOperands()) {
2124 if (const TargetRegisterClass *DRC =
2125 TII->getRegClass(MCID, MONum, TRI, *MF)) {
2126 if (!DRC->contains(Reg)) {
2127 report("Illegal physical register for instruction", MO, MONum);
2128 errs() << printReg(Reg, TRI) << " is not a "
2129 << TRI->getRegClassName(DRC) << " register.\n";
2130 }
2131 }
2132 }
2133 if (MO->isRenamable()) {
2134 if (MRI->isReserved(Reg)) {
2135 report("isRenamable set on reserved register", MO, MONum);
2136 return;
2137 }
2138 }
2139 } else {
2140 // Virtual register.
2141 const TargetRegisterClass *RC = MRI->getRegClassOrNull(Reg);
2142 if (!RC) {
2143 // This is a generic virtual register.
2144
2145 // Do not allow undef uses for generic virtual registers. This ensures
2146 // getVRegDef can never fail and return null on a generic register.
2147 //
2148 // FIXME: This restriction should probably be broadened to all SSA
2149 // MIR. However, DetectDeadLanes/ProcessImplicitDefs technically still
2150 // run on the SSA function just before phi elimination.
2151 if (MO->isUndef())
2152 report("Generic virtual register use cannot be undef", MO, MONum);
2153
2154 // Debug value instruction is permitted to use undefined vregs.
2155 // This is a performance measure to skip the overhead of immediately
2156 // pruning unused debug operands. The final undef substitution occurs
2157 // when debug values are allocated in LDVImpl::handleDebugValue, so
2158 // these verifications always apply after this pass.
2159 if (isFunctionTracksDebugUserValues || !MO->isUse() ||
2160 !MI->isDebugValue() || !MRI->def_empty(Reg)) {
2161 // If we're post-Select, we can't have gvregs anymore.
2162 if (isFunctionSelected) {
2163 report("Generic virtual register invalid in a Selected function",
2164 MO, MONum);
2165 return;
2166 }
2167
2168 // The gvreg must have a type and it must not have a SubIdx.
2169 LLT Ty = MRI->getType(Reg);
2170 if (!Ty.isValid()) {
2171 report("Generic virtual register must have a valid type", MO,
2172 MONum);
2173 return;
2174 }
2175
2176 const RegisterBank *RegBank = MRI->getRegBankOrNull(Reg);
2177 const RegisterBankInfo *RBI = MF->getSubtarget().getRegBankInfo();
2178
2179 // If we're post-RegBankSelect, the gvreg must have a bank.
2180 if (!RegBank && isFunctionRegBankSelected) {
2181 report("Generic virtual register must have a bank in a "
2182 "RegBankSelected function",
2183 MO, MONum);
2184 return;
2185 }
2186
2187 // Make sure the register fits into its register bank if any.
2188 if (RegBank && Ty.isValid() &&
2189 RBI->getMaximumSize(RegBank->getID()) < Ty.getSizeInBits()) {
2190 report("Register bank is too small for virtual register", MO,
2191 MONum);
2192 errs() << "Register bank " << RegBank->getName() << " too small("
2193 << RBI->getMaximumSize(RegBank->getID()) << ") to fit "
2194 << Ty.getSizeInBits() << "-bits\n";
2195 return;
2196 }
2197 }
2198
2199 if (SubIdx) {
2200 report("Generic virtual register does not allow subregister index", MO,
2201 MONum);
2202 return;
2203 }
2204
2205 // If this is a target specific instruction and this operand
2206 // has register class constraint, the virtual register must
2207 // comply to it.
2208 if (!isPreISelGenericOpcode(MCID.getOpcode()) &&
2209 MONum < MCID.getNumOperands() &&
2210 TII->getRegClass(MCID, MONum, TRI, *MF)) {
2211 report("Virtual register does not match instruction constraint", MO,
2212 MONum);
2213 errs() << "Expect register class "
2214 << TRI->getRegClassName(
2215 TII->getRegClass(MCID, MONum, TRI, *MF))
2216 << " but got nothing\n";
2217 return;
2218 }
2219
2220 break;
2221 }
2222 if (SubIdx) {
2223 const TargetRegisterClass *SRC =
2224 TRI->getSubClassWithSubReg(RC, SubIdx);
2225 if (!SRC) {
2226 report("Invalid subregister index for virtual register", MO, MONum);
2227 errs() << "Register class " << TRI->getRegClassName(RC)
2228 << " does not support subreg index " << SubIdx << "\n";
2229 return;
2230 }
2231 if (RC != SRC) {
2232 report("Invalid register class for subregister index", MO, MONum);
2233 errs() << "Register class " << TRI->getRegClassName(RC)
2234 << " does not fully support subreg index " << SubIdx << "\n";
2235 return;
2236 }
2237 }
2238 if (MONum < MCID.getNumOperands()) {
2239 if (const TargetRegisterClass *DRC =
2240 TII->getRegClass(MCID, MONum, TRI, *MF)) {
2241 if (SubIdx) {
2242 const TargetRegisterClass *SuperRC =
2243 TRI->getLargestLegalSuperClass(RC, *MF);
2244 if (!SuperRC) {
2245 report("No largest legal super class exists.", MO, MONum);
2246 return;
2247 }
2248 DRC = TRI->getMatchingSuperRegClass(SuperRC, DRC, SubIdx);
2249 if (!DRC) {
2250 report("No matching super-reg register class.", MO, MONum);
2251 return;
2252 }
2253 }
2254 if (!RC->hasSuperClassEq(DRC)) {
2255 report("Illegal virtual register for instruction", MO, MONum);
2256 errs() << "Expected a " << TRI->getRegClassName(DRC)
2257 << " register, but got a " << TRI->getRegClassName(RC)
2258 << " register\n";
2259 }
2260 }
2261 }
2262 }
2263 break;
2264 }
2265
2267 regMasks.push_back(MO->getRegMask());
2268 break;
2269
2271 if (MI->isPHI() && !MO->getMBB()->isSuccessor(MI->getParent()))
2272 report("PHI operand is not in the CFG", MO, MONum);
2273 break;
2274
2276 if (LiveStks && LiveStks->hasInterval(MO->getIndex()) &&
2277 LiveInts && !LiveInts->isNotInMIMap(*MI)) {
2278 int FI = MO->getIndex();
2279 LiveInterval &LI = LiveStks->getInterval(FI);
2280 SlotIndex Idx = LiveInts->getInstructionIndex(*MI);
2281
2282 bool stores = MI->mayStore();
2283 bool loads = MI->mayLoad();
2284 // For a memory-to-memory move, we need to check if the frame
2285 // index is used for storing or loading, by inspecting the
2286 // memory operands.
2287 if (stores && loads) {
2288 for (auto *MMO : MI->memoperands()) {
2289 const PseudoSourceValue *PSV = MMO->getPseudoValue();
2290 if (PSV == nullptr) continue;
2292 dyn_cast<FixedStackPseudoSourceValue>(PSV);
2293 if (Value == nullptr) continue;
2294 if (Value->getFrameIndex() != FI) continue;
2295
2296 if (MMO->isStore())
2297 loads = false;
2298 else
2299 stores = false;
2300 break;
2301 }
2302 if (loads == stores)
2303 report("Missing fixed stack memoperand.", MI);
2304 }
2305 if (loads && !LI.liveAt(Idx.getRegSlot(true))) {
2306 report("Instruction loads from dead spill slot", MO, MONum);
2307 errs() << "Live stack: " << LI << '\n';
2308 }
2309 if (stores && !LI.liveAt(Idx.getRegSlot())) {
2310 report("Instruction stores to dead spill slot", MO, MONum);
2311 errs() << "Live stack: " << LI << '\n';
2312 }
2313 }
2314 break;
2315
2317 if (MO->getCFIIndex() >= MF->getFrameInstructions().size())
2318 report("CFI instruction has invalid index", MO, MONum);
2319 break;
2320
2321 default:
2322 break;
2323 }
2324}
2325
2326void MachineVerifier::checkLivenessAtUse(const MachineOperand *MO,
2327 unsigned MONum, SlotIndex UseIdx,
2328 const LiveRange &LR,
2329 Register VRegOrUnit,
2330 LaneBitmask LaneMask) {
2331 LiveQueryResult LRQ = LR.Query(UseIdx);
2332 // Check if we have a segment at the use, note however that we only need one
2333 // live subregister range, the others may be dead.
2334 if (!LRQ.valueIn() && LaneMask.none()) {
2335 report("No live segment at use", MO, MONum);
2336 report_context_liverange(LR);
2337 report_context_vreg_regunit(VRegOrUnit);
2338 report_context(UseIdx);
2339 }
2340 if (MO->isKill() && !LRQ.isKill()) {
2341 report("Live range continues after kill flag", MO, MONum);
2342 report_context_liverange(LR);
2343 report_context_vreg_regunit(VRegOrUnit);
2344 if (LaneMask.any())
2345 report_context_lanemask(LaneMask);
2346 report_context(UseIdx);
2347 }
2348}
2349
2350void MachineVerifier::checkLivenessAtDef(const MachineOperand *MO,
2351 unsigned MONum, SlotIndex DefIdx,
2352 const LiveRange &LR,
2353 Register VRegOrUnit,
2354 bool SubRangeCheck,
2355 LaneBitmask LaneMask) {
2356 if (const VNInfo *VNI = LR.getVNInfoAt(DefIdx)) {
2357 // The LR can correspond to the whole reg and its def slot is not obliged
2358 // to be the same as the MO' def slot. E.g. when we check here "normal"
2359 // subreg MO but there is other EC subreg MO in the same instruction so the
2360 // whole reg has EC def slot and differs from the currently checked MO' def
2361 // slot. For example:
2362 // %0 [16e,32r:0) 0@16e L..3 [16e,32r:0) 0@16e L..C [16r,32r:0) 0@16r
2363 // Check that there is an early-clobber def of the same superregister
2364 // somewhere is performed in visitMachineFunctionAfter()
2365 if (((SubRangeCheck || MO->getSubReg() == 0) && VNI->def != DefIdx) ||
2366 !SlotIndex::isSameInstr(VNI->def, DefIdx) ||
2367 (VNI->def != DefIdx &&
2368 (!VNI->def.isEarlyClobber() || !DefIdx.isRegister()))) {
2369 report("Inconsistent valno->def", MO, MONum);
2370 report_context_liverange(LR);
2371 report_context_vreg_regunit(VRegOrUnit);
2372 if (LaneMask.any())
2373 report_context_lanemask(LaneMask);
2374 report_context(*VNI);
2375 report_context(DefIdx);
2376 }
2377 } else {
2378 report("No live segment at def", MO, MONum);
2379 report_context_liverange(LR);
2380 report_context_vreg_regunit(VRegOrUnit);
2381 if (LaneMask.any())
2382 report_context_lanemask(LaneMask);
2383 report_context(DefIdx);
2384 }
2385 // Check that, if the dead def flag is present, LiveInts agree.
2386 if (MO->isDead()) {
2387 LiveQueryResult LRQ = LR.Query(DefIdx);
2388 if (!LRQ.isDeadDef()) {
2389 assert(VRegOrUnit.isVirtual() && "Expecting a virtual register.");
2390 // A dead subreg def only tells us that the specific subreg is dead. There
2391 // could be other non-dead defs of other subregs, or we could have other
2392 // parts of the register being live through the instruction. So unless we
2393 // are checking liveness for a subrange it is ok for the live range to
2394 // continue, given that we have a dead def of a subregister.
2395 if (SubRangeCheck || MO->getSubReg() == 0) {
2396 report("Live range continues after dead def flag", MO, MONum);
2397 report_context_liverange(LR);
2398 report_context_vreg_regunit(VRegOrUnit);
2399 if (LaneMask.any())
2400 report_context_lanemask(LaneMask);
2401 }
2402 }
2403 }
2404}
2405
2406void MachineVerifier::checkLiveness(const MachineOperand *MO, unsigned MONum) {
2407 const MachineInstr *MI = MO->getParent();
2408 const Register Reg = MO->getReg();
2409 const unsigned SubRegIdx = MO->getSubReg();
2410
2411 const LiveInterval *LI = nullptr;
2412 if (LiveInts && Reg.isVirtual()) {
2413 if (LiveInts->hasInterval(Reg)) {
2414 LI = &LiveInts->getInterval(Reg);
2415 if (SubRegIdx != 0 && (MO->isDef() || !MO->isUndef()) && !LI->empty() &&
2416 !LI->hasSubRanges() && MRI->shouldTrackSubRegLiveness(Reg))
2417 report("Live interval for subreg operand has no subranges", MO, MONum);
2418 } else {
2419 report("Virtual register has no live interval", MO, MONum);
2420 }
2421 }
2422
2423 // Both use and def operands can read a register.
2424 if (MO->readsReg()) {
2425 if (MO->isKill())
2426 addRegWithSubRegs(regsKilled, Reg);
2427
2428 // Check that LiveVars knows this kill (unless we are inside a bundle, in
2429 // which case we have already checked that LiveVars knows any kills on the
2430 // bundle header instead).
2431 if (LiveVars && Reg.isVirtual() && MO->isKill() &&
2432 !MI->isBundledWithPred()) {
2433 LiveVariables::VarInfo &VI = LiveVars->getVarInfo(Reg);
2434 if (!is_contained(VI.Kills, MI))
2435 report("Kill missing from LiveVariables", MO, MONum);
2436 }
2437
2438 // Check LiveInts liveness and kill.
2439 if (LiveInts && !LiveInts->isNotInMIMap(*MI)) {
2440 SlotIndex UseIdx = LiveInts->getInstructionIndex(*MI);
2441 // Check the cached regunit intervals.
2442 if (Reg.isPhysical() && !isReserved(Reg)) {
2443 for (MCRegUnitIterator Units(Reg.asMCReg(), TRI); Units.isValid();
2444 ++Units) {
2445 if (MRI->isReservedRegUnit(*Units))
2446 continue;
2447 if (const LiveRange *LR = LiveInts->getCachedRegUnit(*Units))
2448 checkLivenessAtUse(MO, MONum, UseIdx, *LR, *Units);
2449 }
2450 }
2451
2452 if (Reg.isVirtual()) {
2453 // This is a virtual register interval.
2454 checkLivenessAtUse(MO, MONum, UseIdx, *LI, Reg);
2455
2456 if (LI->hasSubRanges() && !MO->isDef()) {
2457 LaneBitmask MOMask = SubRegIdx != 0
2458 ? TRI->getSubRegIndexLaneMask(SubRegIdx)
2459 : MRI->getMaxLaneMaskForVReg(Reg);
2460 LaneBitmask LiveInMask;
2461 for (const LiveInterval::SubRange &SR : LI->subranges()) {
2462 if ((MOMask & SR.LaneMask).none())
2463 continue;
2464 checkLivenessAtUse(MO, MONum, UseIdx, SR, Reg, SR.LaneMask);
2465 LiveQueryResult LRQ = SR.Query(UseIdx);
2466 if (LRQ.valueIn())
2467 LiveInMask |= SR.LaneMask;
2468 }
2469 // At least parts of the register has to be live at the use.
2470 if ((LiveInMask & MOMask).none()) {
2471 report("No live subrange at use", MO, MONum);
2472 report_context(*LI);
2473 report_context(UseIdx);
2474 }
2475 }
2476 }
2477 }
2478
2479 // Use of a dead register.
2480 if (!regsLive.count(Reg)) {
2481 if (Reg.isPhysical()) {
2482 // Reserved registers may be used even when 'dead'.
2483 bool Bad = !isReserved(Reg);
2484 // We are fine if just any subregister has a defined value.
2485 if (Bad) {
2486
2487 for (const MCPhysReg &SubReg : TRI->subregs(Reg)) {
2488 if (regsLive.count(SubReg)) {
2489 Bad = false;
2490 break;
2491 }
2492 }
2493 }
2494 // If there is an additional implicit-use of a super register we stop
2495 // here. By definition we are fine if the super register is not
2496 // (completely) dead, if the complete super register is dead we will
2497 // get a report for its operand.
2498 if (Bad) {
2499 for (const MachineOperand &MOP : MI->uses()) {
2500 if (!MOP.isReg() || !MOP.isImplicit())
2501 continue;
2502
2503 if (!MOP.getReg().isPhysical())
2504 continue;
2505
2506 if (llvm::is_contained(TRI->subregs(MOP.getReg()), Reg))
2507 Bad = false;
2508 }
2509 }
2510 if (Bad)
2511 report("Using an undefined physical register", MO, MONum);
2512 } else if (MRI->def_empty(Reg)) {
2513 report("Reading virtual register without a def", MO, MONum);
2514 } else {
2515 BBInfo &MInfo = MBBInfoMap[MI->getParent()];
2516 // We don't know which virtual registers are live in, so only complain
2517 // if vreg was killed in this MBB. Otherwise keep track of vregs that
2518 // must be live in. PHI instructions are handled separately.
2519 if (MInfo.regsKilled.count(Reg))
2520 report("Using a killed virtual register", MO, MONum);
2521 else if (!MI->isPHI())
2522 MInfo.vregsLiveIn.insert(std::make_pair(Reg, MI));
2523 }
2524 }
2525 }
2526
2527 if (MO->isDef()) {
2528 // Register defined.
2529 // TODO: verify that earlyclobber ops are not used.
2530 if (MO->isDead())
2531 addRegWithSubRegs(regsDead, Reg);
2532 else
2533 addRegWithSubRegs(regsDefined, Reg);
2534
2535 // Verify SSA form.
2536 if (MRI->isSSA() && Reg.isVirtual() &&
2537 std::next(MRI->def_begin(Reg)) != MRI->def_end())
2538 report("Multiple virtual register defs in SSA form", MO, MONum);
2539
2540 // Check LiveInts for a live segment, but only for virtual registers.
2541 if (LiveInts && !LiveInts->isNotInMIMap(*MI)) {
2542 SlotIndex DefIdx = LiveInts->getInstructionIndex(*MI);
2543 DefIdx = DefIdx.getRegSlot(MO->isEarlyClobber());
2544
2545 if (Reg.isVirtual()) {
2546 checkLivenessAtDef(MO, MONum, DefIdx, *LI, Reg);
2547
2548 if (LI->hasSubRanges()) {
2549 LaneBitmask MOMask = SubRegIdx != 0
2550 ? TRI->getSubRegIndexLaneMask(SubRegIdx)
2551 : MRI->getMaxLaneMaskForVReg(Reg);
2552 for (const LiveInterval::SubRange &SR : LI->subranges()) {
2553 if ((SR.LaneMask & MOMask).none())
2554 continue;
2555 checkLivenessAtDef(MO, MONum, DefIdx, SR, Reg, true, SR.LaneMask);
2556 }
2557 }
2558 }
2559 }
2560 }
2561}
2562
2563// This function gets called after visiting all instructions in a bundle. The
2564// argument points to the bundle header.
2565// Normal stand-alone instructions are also considered 'bundles', and this
2566// function is called for all of them.
2567void MachineVerifier::visitMachineBundleAfter(const MachineInstr *MI) {
2568 BBInfo &MInfo = MBBInfoMap[MI->getParent()];
2569 set_union(MInfo.regsKilled, regsKilled);
2570 set_subtract(regsLive, regsKilled); regsKilled.clear();
2571 // Kill any masked registers.
2572 while (!regMasks.empty()) {
2573 const uint32_t *Mask = regMasks.pop_back_val();
2574 for (Register Reg : regsLive)
2575 if (Reg.isPhysical() &&
2576 MachineOperand::clobbersPhysReg(Mask, Reg.asMCReg()))
2577 regsDead.push_back(Reg);
2578 }
2579 set_subtract(regsLive, regsDead); regsDead.clear();
2580 set_union(regsLive, regsDefined); regsDefined.clear();
2581}
2582
2583void
2584MachineVerifier::visitMachineBasicBlockAfter(const MachineBasicBlock *MBB) {
2585 MBBInfoMap[MBB].regsLiveOut = regsLive;
2586 regsLive.clear();
2587
2588 if (Indexes) {
2589 SlotIndex stop = Indexes->getMBBEndIdx(MBB);
2590 if (!(stop > lastIndex)) {
2591 report("Block ends before last instruction index", MBB);
2592 errs() << "Block ends at " << stop
2593 << " last instruction was at " << lastIndex << '\n';
2594 }
2595 lastIndex = stop;
2596 }
2597}
2598
2599namespace {
2600// This implements a set of registers that serves as a filter: can filter other
2601// sets by passing through elements not in the filter and blocking those that
2602// are. Any filter implicitly includes the full set of physical registers upon
2603// creation, thus filtering them all out. The filter itself as a set only grows,
2604// and needs to be as efficient as possible.
2605struct VRegFilter {
2606 // Add elements to the filter itself. \pre Input set \p FromRegSet must have
2607 // no duplicates. Both virtual and physical registers are fine.
2608 template <typename RegSetT> void add(const RegSetT &FromRegSet) {
2609 SmallVector<Register, 0> VRegsBuffer;
2610 filterAndAdd(FromRegSet, VRegsBuffer);
2611 }
2612 // Filter \p FromRegSet through the filter and append passed elements into \p
2613 // ToVRegs. All elements appended are then added to the filter itself.
2614 // \returns true if anything changed.
2615 template <typename RegSetT>
2616 bool filterAndAdd(const RegSetT &FromRegSet,
2617 SmallVectorImpl<Register> &ToVRegs) {
2618 unsigned SparseUniverse = Sparse.size();
2619 unsigned NewSparseUniverse = SparseUniverse;
2620 unsigned NewDenseSize = Dense.size();
2621 size_t Begin = ToVRegs.size();
2622 for (Register Reg : FromRegSet) {
2623 if (!Reg.isVirtual())
2624 continue;
2625 unsigned Index = Register::virtReg2Index(Reg);
2626 if (Index < SparseUniverseMax) {
2627 if (Index < SparseUniverse && Sparse.test(Index))
2628 continue;
2629 NewSparseUniverse = std::max(NewSparseUniverse, Index + 1);
2630 } else {
2631 if (Dense.count(Reg))
2632 continue;
2633 ++NewDenseSize;
2634 }
2635 ToVRegs.push_back(Reg);
2636 }
2637 size_t End = ToVRegs.size();
2638 if (Begin == End)
2639 return false;
2640 // Reserving space in sets once performs better than doing so continuously
2641 // and pays easily for double look-ups (even in Dense with SparseUniverseMax
2642 // tuned all the way down) and double iteration (the second one is over a
2643 // SmallVector, which is a lot cheaper compared to DenseSet or BitVector).
2644 Sparse.resize(NewSparseUniverse);
2645 Dense.reserve(NewDenseSize);
2646 for (unsigned I = Begin; I < End; ++I) {
2647 Register Reg = ToVRegs[I];
2648 unsigned Index = Register::virtReg2Index(Reg);
2649 if (Index < SparseUniverseMax)
2650 Sparse.set(Index);
2651 else
2652 Dense.insert(Reg);
2653 }
2654 return true;
2655 }
2656
2657private:
2658 static constexpr unsigned SparseUniverseMax = 10 * 1024 * 8;
2659 // VRegs indexed within SparseUniverseMax are tracked by Sparse, those beyound
2660 // are tracked by Dense. The only purpose of the threashold and the Dense set
2661 // is to have a reasonably growing memory usage in pathological cases (large
2662 // number of very sparse VRegFilter instances live at the same time). In
2663 // practice even in the worst-by-execution time cases having all elements
2664 // tracked by Sparse (very large SparseUniverseMax scenario) tends to be more
2665 // space efficient than if tracked by Dense. The threashold is set to keep the
2666 // worst-case memory usage within 2x of figures determined empirically for
2667 // "all Dense" scenario in such worst-by-execution-time cases.
2668 BitVector Sparse;
2670};
2671
2672// Implements both a transfer function and a (binary, in-place) join operator
2673// for a dataflow over register sets with set union join and filtering transfer
2674// (out_b = in_b \ filter_b). filter_b is expected to be set-up ahead of time.
2675// Maintains out_b as its state, allowing for O(n) iteration over it at any
2676// time, where n is the size of the set (as opposed to O(U) where U is the
2677// universe). filter_b implicitly contains all physical registers at all times.
2678class FilteringVRegSet {
2679 VRegFilter Filter;
2681
2682public:
2683 // Set-up the filter_b. \pre Input register set \p RS must have no duplicates.
2684 // Both virtual and physical registers are fine.
2685 template <typename RegSetT> void addToFilter(const RegSetT &RS) {
2686 Filter.add(RS);
2687 }
2688 // Passes \p RS through the filter_b (transfer function) and adds what's left
2689 // to itself (out_b).
2690 template <typename RegSetT> bool add(const RegSetT &RS) {
2691 // Double-duty the Filter: to maintain VRegs a set (and the join operation
2692 // a set union) just add everything being added here to the Filter as well.
2693 return Filter.filterAndAdd(RS, VRegs);
2694 }
2695 using const_iterator = decltype(VRegs)::const_iterator;
2696 const_iterator begin() const { return VRegs.begin(); }
2697 const_iterator end() const { return VRegs.end(); }
2698 size_t size() const { return VRegs.size(); }
2699};
2700} // namespace
2701
2702// Calculate the largest possible vregsPassed sets. These are the registers that
2703// can pass through an MBB live, but may not be live every time. It is assumed
2704// that all vregsPassed sets are empty before the call.
2705void MachineVerifier::calcRegsPassed() {
2706 if (MF->empty())
2707 // ReversePostOrderTraversal doesn't handle empty functions.
2708 return;
2709
2710 for (const MachineBasicBlock *MB :
2712 FilteringVRegSet VRegs;
2713 BBInfo &Info = MBBInfoMap[MB];
2714 assert(Info.reachable);
2715
2716 VRegs.addToFilter(Info.regsKilled);
2717 VRegs.addToFilter(Info.regsLiveOut);
2718 for (const MachineBasicBlock *Pred : MB->predecessors()) {
2719 const BBInfo &PredInfo = MBBInfoMap[Pred];
2720 if (!PredInfo.reachable)
2721 continue;
2722
2723 VRegs.add(PredInfo.regsLiveOut);
2724 VRegs.add(PredInfo.vregsPassed);
2725 }
2726 Info.vregsPassed.reserve(VRegs.size());
2727 Info.vregsPassed.insert(VRegs.begin(), VRegs.end());
2728 }
2729}
2730
2731// Calculate the set of virtual registers that must be passed through each basic
2732// block in order to satisfy the requirements of successor blocks. This is very
2733// similar to calcRegsPassed, only backwards.
2734void MachineVerifier::calcRegsRequired() {
2735 // First push live-in regs to predecessors' vregsRequired.
2737 for (const auto &MBB : *MF) {
2738 BBInfo &MInfo = MBBInfoMap[&MBB];
2739 for (const MachineBasicBlock *Pred : MBB.predecessors()) {
2740 BBInfo &PInfo = MBBInfoMap[Pred];
2741 if (PInfo.addRequired(MInfo.vregsLiveIn))
2742 todo.insert(Pred);
2743 }
2744
2745 // Handle the PHI node.
2746 for (const MachineInstr &MI : MBB.phis()) {
2747 for (unsigned i = 1, e = MI.getNumOperands(); i != e; i += 2) {
2748 // Skip those Operands which are undef regs or not regs.
2749 if (!MI.getOperand(i).isReg() || !MI.getOperand(i).readsReg())
2750 continue;
2751
2752 // Get register and predecessor for one PHI edge.
2753 Register Reg = MI.getOperand(i).getReg();
2754 const MachineBasicBlock *Pred = MI.getOperand(i + 1).getMBB();
2755
2756 BBInfo &PInfo = MBBInfoMap[Pred];
2757 if (PInfo.addRequired(Reg))
2758 todo.insert(Pred);
2759 }
2760 }
2761 }
2762
2763 // Iteratively push vregsRequired to predecessors. This will converge to the
2764 // same final state regardless of DenseSet iteration order.
2765 while (!todo.empty()) {
2766 const MachineBasicBlock *MBB = *todo.begin();
2767 todo.erase(MBB);
2768 BBInfo &MInfo = MBBInfoMap[MBB];
2769 for (const MachineBasicBlock *Pred : MBB->predecessors()) {
2770 if (Pred == MBB)
2771 continue;
2772 BBInfo &SInfo = MBBInfoMap[Pred];
2773 if (SInfo.addRequired(MInfo.vregsRequired))
2774 todo.insert(Pred);
2775 }
2776 }
2777}
2778
2779// Check PHI instructions at the beginning of MBB. It is assumed that
2780// calcRegsPassed has been run so BBInfo::isLiveOut is valid.
2781void MachineVerifier::checkPHIOps(const MachineBasicBlock &MBB) {
2782 BBInfo &MInfo = MBBInfoMap[&MBB];
2783
2785 for (const MachineInstr &Phi : MBB) {
2786 if (!Phi.isPHI())
2787 break;
2788 seen.clear();
2789
2790 const MachineOperand &MODef = Phi.getOperand(0);
2791 if (!MODef.isReg() || !MODef.isDef()) {
2792 report("Expected first PHI operand to be a register def", &MODef, 0);
2793 continue;
2794 }
2795 if (MODef.isTied() || MODef.isImplicit() || MODef.isInternalRead() ||
2796 MODef.isEarlyClobber() || MODef.isDebug())
2797 report("Unexpected flag on PHI operand", &MODef, 0);
2798 Register DefReg = MODef.getReg();
2799 if (!DefReg.isVirtual())
2800 report("Expected first PHI operand to be a virtual register", &MODef, 0);
2801
2802 for (unsigned I = 1, E = Phi.getNumOperands(); I != E; I += 2) {
2803 const MachineOperand &MO0 = Phi.getOperand(I);
2804 if (!MO0.isReg()) {
2805 report("Expected PHI operand to be a register", &MO0, I);
2806 continue;
2807 }
2808 if (MO0.isImplicit() || MO0.isInternalRead() || MO0.isEarlyClobber() ||
2809 MO0.isDebug() || MO0.isTied())
2810 report("Unexpected flag on PHI operand", &MO0, I);
2811
2812 const MachineOperand &MO1 = Phi.getOperand(I + 1);
2813 if (!MO1.isMBB()) {
2814 report("Expected PHI operand to be a basic block", &MO1, I + 1);
2815 continue;
2816 }
2817
2818 const MachineBasicBlock &Pre = *MO1.getMBB();
2819 if (!Pre.isSuccessor(&MBB)) {
2820 report("PHI input is not a predecessor block", &MO1, I + 1);
2821 continue;
2822 }
2823
2824 if (MInfo.reachable) {
2825 seen.insert(&Pre);
2826 BBInfo &PrInfo = MBBInfoMap[&Pre];
2827 if (!MO0.isUndef() && PrInfo.reachable &&
2828 !PrInfo.isLiveOut(MO0.getReg()))
2829 report("PHI operand is not live-out from predecessor", &MO0, I);
2830 }
2831 }
2832
2833 // Did we see all predecessors?
2834 if (MInfo.reachable) {
2835 for (MachineBasicBlock *Pred : MBB.predecessors()) {
2836 if (!seen.count(Pred)) {
2837 report("Missing PHI operand", &Phi);
2838 errs() << printMBBReference(*Pred)
2839 << " is a predecessor according to the CFG.\n";
2840 }
2841 }
2842 }
2843 }
2844}
2845
2846void MachineVerifier::visitMachineFunctionAfter() {
2847 calcRegsPassed();
2848
2849 for (const MachineBasicBlock &MBB : *MF)
2850 checkPHIOps(MBB);
2851
2852 // Now check liveness info if available
2853 calcRegsRequired();
2854
2855 // Check for killed virtual registers that should be live out.
2856 for (const auto &MBB : *MF) {
2857 BBInfo &MInfo = MBBInfoMap[&MBB];
2858 for (Register VReg : MInfo.vregsRequired)
2859 if (MInfo.regsKilled.count(VReg)) {
2860 report("Virtual register killed in block, but needed live out.", &MBB);
2861 errs() << "Virtual register " << printReg(VReg)
2862 << " is used after the block.\n";
2863 }
2864 }
2865
2866 if (!MF->empty()) {
2867 BBInfo &MInfo = MBBInfoMap[&MF->front()];
2868 for (Register VReg : MInfo.vregsRequired) {
2869 report("Virtual register defs don't dominate all uses.", MF);
2870 report_context_vreg(VReg);
2871 }
2872 }
2873
2874 if (LiveVars)
2875 verifyLiveVariables();
2876 if (LiveInts)
2877 verifyLiveIntervals();
2878
2879 // Check live-in list of each MBB. If a register is live into MBB, check
2880 // that the register is in regsLiveOut of each predecessor block. Since
2881 // this must come from a definition in the predecesssor or its live-in
2882 // list, this will catch a live-through case where the predecessor does not
2883 // have the register in its live-in list. This currently only checks
2884 // registers that have no aliases, are not allocatable and are not
2885 // reserved, which could mean a condition code register for instance.
2886 if (MRI->tracksLiveness())
2887 for (const auto &MBB : *MF)
2889 MCPhysReg LiveInReg = P.PhysReg;
2890 bool hasAliases = MCRegAliasIterator(LiveInReg, TRI, false).isValid();
2891 if (hasAliases || isAllocatable(LiveInReg) || isReserved(LiveInReg))
2892 continue;
2893 for (const MachineBasicBlock *Pred : MBB.predecessors()) {
2894 BBInfo &PInfo = MBBInfoMap[Pred];
2895 if (!PInfo.regsLiveOut.count(LiveInReg)) {
2896 report("Live in register not found to be live out from predecessor.",
2897 &MBB);
2898 errs() << TRI->getName(LiveInReg)
2899 << " not found to be live out from "
2900 << printMBBReference(*Pred) << "\n";
2901 }
2902 }
2903 }
2904
2905 for (auto CSInfo : MF->getCallSitesInfo())
2906 if (!CSInfo.first->isCall())
2907 report("Call site info referencing instruction that is not call", MF);
2908
2909 // If there's debug-info, check that we don't have any duplicate value
2910 // tracking numbers.
2911 if (MF->getFunction().getSubprogram()) {
2912 DenseSet<unsigned> SeenNumbers;
2913 for (const auto &MBB : *MF) {
2914 for (const auto &MI : MBB) {
2915 if (auto Num = MI.peekDebugInstrNum()) {
2916 auto Result = SeenNumbers.insert((unsigned)Num);
2917 if (!Result.second)
2918 report("Instruction has a duplicated value tracking number", &MI);
2919 }
2920 }
2921 }
2922 }
2923}
2924
2925void MachineVerifier::verifyLiveVariables() {
2926 assert(LiveVars && "Don't call verifyLiveVariables without LiveVars");
2927 for (unsigned I = 0, E = MRI->getNumVirtRegs(); I != E; ++I) {
2929 LiveVariables::VarInfo &VI = LiveVars->getVarInfo(Reg);
2930 for (const auto &MBB : *MF) {
2931 BBInfo &MInfo = MBBInfoMap[&MBB];
2932
2933 // Our vregsRequired should be identical to LiveVariables' AliveBlocks
2934 if (MInfo.vregsRequired.count(Reg)) {
2935 if (!VI.AliveBlocks.test(MBB.getNumber())) {
2936 report("LiveVariables: Block missing from AliveBlocks", &MBB);
2937 errs() << "Virtual register " << printReg(Reg)
2938 << " must be live through the block.\n";
2939 }
2940 } else {
2941 if (VI.AliveBlocks.test(MBB.getNumber())) {
2942 report("LiveVariables: Block should not be in AliveBlocks", &MBB);
2943 errs() << "Virtual register " << printReg(Reg)
2944 << " is not needed live through the block.\n";
2945 }
2946 }
2947 }
2948 }
2949}
2950
2951void MachineVerifier::verifyLiveIntervals() {
2952 assert(LiveInts && "Don't call verifyLiveIntervals without LiveInts");
2953 for (unsigned I = 0, E = MRI->getNumVirtRegs(); I != E; ++I) {
2955
2956 // Spilling and splitting may leave unused registers around. Skip them.
2957 if (MRI->reg_nodbg_empty(Reg))
2958 continue;
2959
2960 if (!LiveInts->hasInterval(Reg)) {
2961 report("Missing live interval for virtual register", MF);
2962 errs() << printReg(Reg, TRI) << " still has defs or uses\n";
2963 continue;
2964 }
2965
2966 const LiveInterval &LI = LiveInts->getInterval(Reg);
2967 assert(Reg == LI.reg() && "Invalid reg to interval mapping");
2968 verifyLiveInterval(LI);
2969 }
2970
2971 // Verify all the cached regunit intervals.
2972 for (unsigned i = 0, e = TRI->getNumRegUnits(); i != e; ++i)
2973 if (const LiveRange *LR = LiveInts->getCachedRegUnit(i))
2974 verifyLiveRange(*LR, i);
2975}
2976
2977void MachineVerifier::verifyLiveRangeValue(const LiveRange &LR,
2978 const VNInfo *VNI, Register Reg,
2979 LaneBitmask LaneMask) {
2980 if (VNI->isUnused())
2981 return;
2982
2983 const VNInfo *DefVNI = LR.getVNInfoAt(VNI->def);
2984
2985 if (!DefVNI) {
2986 report("Value not live at VNInfo def and not marked unused", MF);
2987 report_context(LR, Reg, LaneMask);
2988 report_context(*VNI);
2989 return;
2990 }
2991
2992 if (DefVNI != VNI) {
2993 report("Live segment at def has different VNInfo", MF);
2994 report_context(LR, Reg, LaneMask);
2995 report_context(*VNI);
2996 return;
2997 }
2998
2999 const MachineBasicBlock *MBB = LiveInts->getMBBFromIndex(VNI->def);
3000 if (!MBB) {
3001 report("Invalid VNInfo definition index", MF);
3002 report_context(LR, Reg, LaneMask);
3003 report_context(*VNI);
3004 return;
3005 }
3006
3007 if (VNI->isPHIDef()) {
3008 if (VNI->def != LiveInts->getMBBStartIdx(MBB)) {
3009 report("PHIDef VNInfo is not defined at MBB start", MBB);
3010 report_context(LR, Reg, LaneMask);
3011 report_context(*VNI);
3012 }
3013 return;
3014 }
3015
3016 // Non-PHI def.
3017 const MachineInstr *MI = LiveInts->getInstructionFromIndex(VNI->def);
3018 if (!MI) {
3019 report("No instruction at VNInfo def index", MBB);
3020 report_context(LR, Reg, LaneMask);
3021 report_context(*VNI);
3022 return;
3023 }
3024
3025 if (Reg != 0) {
3026 bool hasDef = false;
3027 bool isEarlyClobber = false;
3028 for (ConstMIBundleOperands MOI(*MI); MOI.isValid(); ++MOI) {
3029 if (!MOI->isReg() || !MOI->isDef())
3030 continue;
3031 if (Reg.isVirtual()) {
3032 if (MOI->getReg() != Reg)
3033 continue;
3034 } else {
3035 if (!MOI->getReg().isPhysical() || !TRI->hasRegUnit(MOI->getReg(), Reg))
3036 continue;
3037 }
3038 if (LaneMask.any() &&
3039 (TRI->getSubRegIndexLaneMask(MOI->getSubReg()) & LaneMask).none())
3040 continue;
3041 hasDef = true;
3042 if (MOI->isEarlyClobber())
3043 isEarlyClobber = true;
3044 }
3045
3046 if (!hasDef) {
3047 report("Defining instruction does not modify register", MI);
3048 report_context(LR, Reg, LaneMask);
3049 report_context(*VNI);
3050 }
3051
3052 // Early clobber defs begin at USE slots, but other defs must begin at
3053 // DEF slots.
3054 if (isEarlyClobber) {
3055 if (!VNI->def.isEarlyClobber()) {
3056 report("Early clobber def must be at an early-clobber slot", MBB);
3057 report_context(LR, Reg, LaneMask);
3058 report_context(*VNI);
3059 }
3060 } else if (!VNI->def.isRegister()) {
3061 report("Non-PHI, non-early clobber def must be at a register slot", MBB);
3062 report_context(LR, Reg, LaneMask);
3063 report_context(*VNI);
3064 }
3065 }
3066}
3067
3068void MachineVerifier::verifyLiveRangeSegment(const LiveRange &LR,
3070 Register Reg,
3071 LaneBitmask LaneMask) {
3072 const LiveRange::Segment &S = *I;
3073 const VNInfo *VNI = S.valno;
3074 assert(VNI && "Live segment has no valno");
3075
3076 if (VNI->id >= LR.getNumValNums() || VNI != LR.getValNumInfo(VNI->id)) {
3077 report("Foreign valno in live segment", MF);
3078 report_context(LR, Reg, LaneMask);
3079 report_context(S);
3080 report_context(*VNI);
3081 }
3082
3083 if (VNI->isUnused()) {
3084 report("Live segment valno is marked unused", MF);
3085 report_context(LR, Reg, LaneMask);
3086 report_context(S);
3087 }
3088
3089 const MachineBasicBlock *MBB = LiveInts->getMBBFromIndex(S.start);
3090 if (!MBB) {
3091 report("Bad start of live segment, no basic block", MF);
3092 report_context(LR, Reg, LaneMask);
3093 report_context(S);
3094 return;
3095 }
3096 SlotIndex MBBStartIdx = LiveInts->getMBBStartIdx(MBB);
3097 if (S.start != MBBStartIdx && S.start != VNI->def) {
3098 report("Live segment must begin at MBB entry or valno def", MBB);
3099 report_context(LR, Reg, LaneMask);
3100 report_context(S);
3101 }
3102
3103 const MachineBasicBlock *EndMBB =
3104 LiveInts->getMBBFromIndex(S.end.getPrevSlot());
3105 if (!EndMBB) {
3106 report("Bad end of live segment, no basic block", MF);
3107 report_context(LR, Reg, LaneMask);
3108 report_context(S);
3109 return;
3110 }
3111
3112 // Checks for non-live-out segments.
3113 if (S.end != LiveInts->getMBBEndIdx(EndMBB)) {
3114 // RegUnit intervals are allowed dead phis.
3115 if (!Reg.isVirtual() && VNI->isPHIDef() && S.start == VNI->def &&
3116 S.end == VNI->def.getDeadSlot())
3117 return;
3118
3119 // The live segment is ending inside EndMBB
3120 const MachineInstr *MI =
3122 if (!MI) {
3123 report("Live segment doesn't end at a valid instruction", EndMBB);
3124 report_context(LR, Reg, LaneMask);
3125 report_context(S);
3126 return;
3127 }
3128
3129 // The block slot must refer to a basic block boundary.
3130 if (S.end.isBlock()) {
3131 report("Live segment ends at B slot of an instruction", EndMBB);
3132 report_context(LR, Reg, LaneMask);
3133 report_context(S);
3134 }
3135
3136 if (S.end.isDead()) {
3137 // Segment ends on the dead slot.
3138 // That means there must be a dead def.
3139 if (!SlotIndex::isSameInstr(S.start, S.end)) {
3140 report("Live segment ending at dead slot spans instructions", EndMBB);
3141 report_context(LR, Reg, LaneMask);
3142 report_context(S);
3143 }
3144 }
3145
3146 // After tied operands are rewritten, a live segment can only end at an
3147 // early-clobber slot if it is being redefined by an early-clobber def.
3148 // TODO: Before tied operands are rewritten, a live segment can only end at
3149 // an early-clobber slot if the last use is tied to an early-clobber def.
3150 if (MF->getProperties().hasProperty(
3152 S.end.isEarlyClobber()) {
3153 if (I + 1 == LR.end() || (I + 1)->start != S.end) {
3154 report("Live segment ending at early clobber slot must be "
3155 "redefined by an EC def in the same instruction",
3156 EndMBB);
3157 report_context(LR, Reg, LaneMask);
3158 report_context(S);
3159 }
3160 }
3161
3162 // The following checks only apply to virtual registers. Physreg liveness
3163 // is too weird to check.
3164 if (Reg.isVirtual()) {
3165 // A live segment can end with either a redefinition, a kill flag on a
3166 // use, or a dead flag on a def.
3167 bool hasRead = false;
3168 bool hasSubRegDef = false;
3169 bool hasDeadDef = false;
3170 for (ConstMIBundleOperands MOI(*MI); MOI.isValid(); ++MOI) {
3171 if (!MOI->isReg() || MOI->getReg() != Reg)
3172 continue;
3173 unsigned Sub = MOI->getSubReg();
3174 LaneBitmask SLM =
3175 Sub != 0 ? TRI->getSubRegIndexLaneMask(Sub) : LaneBitmask::getAll();
3176 if (MOI->isDef()) {
3177 if (Sub != 0) {
3178 hasSubRegDef = true;
3179 // An operand %0:sub0 reads %0:sub1..n. Invert the lane
3180 // mask for subregister defs. Read-undef defs will be handled by
3181 // readsReg below.
3182 SLM = ~SLM;
3183 }
3184 if (MOI->isDead())
3185 hasDeadDef = true;
3186 }
3187 if (LaneMask.any() && (LaneMask & SLM).none())
3188 continue;
3189 if (MOI->readsReg())
3190 hasRead = true;
3191 }
3192 if (S.end.isDead()) {
3193 // Make sure that the corresponding machine operand for a "dead" live
3194 // range has the dead flag. We cannot perform this check for subregister
3195 // liveranges as partially dead values are allowed.
3196 if (LaneMask.none() && !hasDeadDef) {
3197 report(
3198 "Instruction ending live segment on dead slot has no dead flag",
3199 MI);
3200 report_context(LR, Reg, LaneMask);
3201 report_context(S);
3202 }
3203 } else {
3204 if (!hasRead) {
3205 // When tracking subregister liveness, the main range must start new
3206 // values on partial register writes, even if there is no read.
3207 if (!MRI->shouldTrackSubRegLiveness(Reg) || LaneMask.any() ||
3208 !hasSubRegDef) {
3209 report("Instruction ending live segment doesn't read the register",
3210 MI);
3211 report_context(LR, Reg, LaneMask);
3212 report_context(S);
3213 }
3214 }
3215 }
3216 }
3217 }
3218
3219 // Now check all the basic blocks in this live segment.
3221 // Is this live segment the beginning of a non-PHIDef VN?
3222 if (S.start == VNI->def && !VNI->isPHIDef()) {
3223 // Not live-in to any blocks.
3224 if (MBB == EndMBB)
3225 return;
3226 // Skip this block.
3227 ++MFI;
3228 }
3229
3231 if (LaneMask.any()) {
3232 LiveInterval &OwnerLI = LiveInts->getInterval(Reg);
3233 OwnerLI.computeSubRangeUndefs(Undefs, LaneMask, *MRI, *Indexes);
3234 }
3235
3236 while (true) {
3237 assert(LiveInts->isLiveInToMBB(LR, &*MFI));
3238 // We don't know how to track physregs into a landing pad.
3239 if (!Reg.isVirtual() && MFI->isEHPad()) {
3240 if (&*MFI == EndMBB)
3241 break;
3242 ++MFI;
3243 continue;
3244 }
3245
3246 // Is VNI a PHI-def in the current block?
3247 bool IsPHI = VNI->isPHIDef() &&
3248 VNI->def == LiveInts->getMBBStartIdx(&*MFI);
3249
3250 // Check that VNI is live-out of all predecessors.
3251 for (const MachineBasicBlock *Pred : MFI->predecessors()) {
3252 SlotIndex PEnd = LiveInts->getMBBEndIdx(Pred);
3253 // Predecessor of landing pad live-out on last call.
3254 if (MFI->isEHPad()) {
3255 for (const MachineInstr &MI : llvm::reverse(*Pred)) {
3256 if (MI.isCall()) {
3257 PEnd = Indexes->getInstructionIndex(MI).getBoundaryIndex();
3258 break;
3259 }
3260 }
3261 }
3262 const VNInfo *PVNI = LR.getVNInfoBefore(PEnd);
3263
3264 // All predecessors must have a live-out value. However for a phi
3265 // instruction with subregister intervals
3266 // only one of the subregisters (not necessarily the current one) needs to
3267 // be defined.
3268 if (!PVNI && (LaneMask.none() || !IsPHI)) {
3269 if (LiveRangeCalc::isJointlyDominated(Pred, Undefs, *Indexes))
3270 continue;
3271 report("Register not marked live out of predecessor", Pred);
3272 report_context(LR, Reg, LaneMask);
3273 report_context(*VNI);
3274 errs() << " live into " << printMBBReference(*MFI) << '@'
3275 << LiveInts->getMBBStartIdx(&*MFI) << ", not live before "
3276 << PEnd << '\n';
3277 continue;
3278 }
3279
3280 // Only PHI-defs can take different predecessor values.
3281 if (!IsPHI && PVNI != VNI) {
3282 report("Different value live out of predecessor", Pred);
3283 report_context(LR, Reg, LaneMask);
3284 errs() << "Valno #" << PVNI->id << " live out of "
3285 << printMBBReference(*Pred) << '@' << PEnd << "\nValno #"
3286 << VNI->id << " live into " << printMBBReference(*MFI) << '@'
3287 << LiveInts->getMBBStartIdx(&*MFI) << '\n';
3288 }
3289 }
3290 if (&*MFI == EndMBB)
3291 break;
3292 ++MFI;
3293 }
3294}
3295
3296void MachineVerifier::verifyLiveRange(const LiveRange &LR, Register Reg,
3297 LaneBitmask LaneMask) {
3298 for (const VNInfo *VNI : LR.valnos)
3299 verifyLiveRangeValue(LR, VNI, Reg, LaneMask);
3300
3301 for (LiveRange::const_iterator I = LR.begin(), E = LR.end(); I != E; ++I)
3302 verifyLiveRangeSegment(LR, I, Reg, LaneMask);
3303}
3304
3305void MachineVerifier::verifyLiveInterval(const LiveInterval &LI) {
3306 Register Reg = LI.reg();
3307 assert(Reg.isVirtual());
3308 verifyLiveRange(LI, Reg);
3309
3311 LaneBitmask MaxMask = MRI->getMaxLaneMaskForVReg(Reg);
3312 for (const LiveInterval::SubRange &SR : LI.subranges()) {
3313 if ((Mask & SR.LaneMask).any()) {
3314 report("Lane masks of sub ranges overlap in live interval", MF);
3315 report_context(LI);
3316 }
3317 if ((SR.LaneMask & ~MaxMask).any()) {
3318 report("Subrange lanemask is invalid", MF);
3319 report_context(LI);
3320 }
3321 if (SR.empty()) {
3322 report("Subrange must not be empty", MF);
3323 report_context(SR, LI.reg(), SR.LaneMask);
3324 }
3325 Mask |= SR.LaneMask;
3326 verifyLiveRange(SR, LI.reg(), SR.LaneMask);
3327 if (!LI.covers(SR)) {
3328 report("A Subrange is not covered by the main range", MF);
3329 report_context(LI);
3330 }
3331 }
3332
3333 // Check the LI only has one connected component.
3334 ConnectedVNInfoEqClasses ConEQ(*LiveInts);
3335 unsigned NumComp = ConEQ.Classify(LI);
3336 if (NumComp > 1) {
3337 report("Multiple connected components in live interval", MF);
3338 report_context(LI);
3339 for (unsigned comp = 0; comp != NumComp; ++comp) {
3340 errs() << comp << ": valnos";
3341 for (const VNInfo *I : LI.valnos)
3342 if (comp == ConEQ.getEqClass(I))
3343 errs() << ' ' << I->id;
3344 errs() << '\n';
3345 }
3346 }
3347}
3348
3349namespace {
3350
3351 // FrameSetup and FrameDestroy can have zero adjustment, so using a single
3352 // integer, we can't tell whether it is a FrameSetup or FrameDestroy if the
3353 // value is zero.
3354 // We use a bool plus an integer to capture the stack state.
3355 struct StackStateOfBB {
3356 StackStateOfBB() = default;
3357 StackStateOfBB(int EntryVal, int ExitVal, bool EntrySetup, bool ExitSetup) :
3358 EntryValue(EntryVal), ExitValue(ExitVal), EntryIsSetup(EntrySetup),
3359 ExitIsSetup(ExitSetup) {}
3360
3361 // Can be negative, which means we are setting up a frame.
3362 int EntryValue = 0;
3363 int ExitValue = 0;
3364 bool EntryIsSetup = false;
3365 bool ExitIsSetup = false;
3366 };
3367
3368} // end anonymous namespace
3369
3370/// Make sure on every path through the CFG, a FrameSetup <n> is always followed
3371/// by a FrameDestroy <n>, stack adjustments are identical on all
3372/// CFG edges to a merge point, and frame is destroyed at end of a return block.
3373void MachineVerifier::verifyStackFrame() {
3374 unsigned FrameSetupOpcode = TII->getCallFrameSetupOpcode();
3375 unsigned FrameDestroyOpcode = TII->getCallFrameDestroyOpcode();
3376 if (FrameSetupOpcode == ~0u && FrameDestroyOpcode == ~0u)
3377 return;
3378
3380 SPState.resize(MF->getNumBlockIDs());
3382
3383 // Visit the MBBs in DFS order.
3384 for (df_ext_iterator<const MachineFunction *,
3386 DFI = df_ext_begin(MF, Reachable), DFE = df_ext_end(MF, Reachable);
3387 DFI != DFE; ++DFI) {
3388 const MachineBasicBlock *MBB = *DFI;
3389
3390 StackStateOfBB BBState;
3391 // Check the exit state of the DFS stack predecessor.
3392 if (DFI.getPathLength() >= 2) {
3393 const MachineBasicBlock *StackPred = DFI.getPath(DFI.getPathLength() - 2);
3394 assert(Reachable.count(StackPred) &&
3395 "DFS stack predecessor is already visited.\n");
3396 BBState.EntryValue = SPState[StackPred->getNumber()].ExitValue;
3397 BBState.EntryIsSetup = SPState[StackPred->getNumber()].ExitIsSetup;
3398 BBState.ExitValue = BBState.EntryValue;
3399 BBState.ExitIsSetup = BBState.EntryIsSetup;
3400 }
3401
3402 // Update stack state by checking contents of MBB.
3403 for (const auto &I : *MBB) {
3404 if (I.getOpcode() == FrameSetupOpcode) {
3405 if (BBState.ExitIsSetup)
3406 report("FrameSetup is after another FrameSetup", &I);
3407 BBState.ExitValue -= TII->getFrameTotalSize(I);
3408 BBState.ExitIsSetup = true;
3409 }
3410
3411 if (I.getOpcode() == FrameDestroyOpcode) {
3412 int Size = TII->getFrameTotalSize(I);
3413 if (!BBState.ExitIsSetup)
3414 report("FrameDestroy is not after a FrameSetup", &I);
3415 int AbsSPAdj = BBState.ExitValue < 0 ? -BBState.ExitValue :
3416 BBState.ExitValue;
3417 if (BBState.ExitIsSetup && AbsSPAdj != Size) {
3418 report("FrameDestroy <n> is after FrameSetup <m>", &I);
3419 errs() << "FrameDestroy <" << Size << "> is after FrameSetup <"
3420 << AbsSPAdj << ">.\n";
3421 }
3422 BBState.ExitValue += Size;
3423 BBState.ExitIsSetup = false;
3424 }
3425 }
3426 SPState[MBB->getNumber()] = BBState;
3427
3428 // Make sure the exit state of any predecessor is consistent with the entry
3429 // state.
3430 for (const MachineBasicBlock *Pred : MBB->predecessors()) {
3431 if (Reachable.count(Pred) &&
3432 (SPState[Pred->getNumber()].ExitValue != BBState.EntryValue ||
3433 SPState[Pred->getNumber()].ExitIsSetup != BBState.EntryIsSetup)) {
3434 report("The exit stack state of a predecessor is inconsistent.", MBB);
3435 errs() << "Predecessor " << printMBBReference(*Pred)
3436 << " has exit state (" << SPState[Pred->getNumber()].ExitValue
3437 << ", " << SPState[Pred->getNumber()].ExitIsSetup << "), while "
3438 << printMBBReference(*MBB) << " has entry state ("
3439 << BBState.EntryValue << ", " << BBState.EntryIsSetup << ").\n";
3440 }
3441 }
3442
3443 // Make sure the entry state of any successor is consistent with the exit
3444 // state.
3445 for (const MachineBasicBlock *Succ : MBB->successors()) {
3446 if (Reachable.count(Succ) &&
3447 (SPState[Succ->getNumber()].EntryValue != BBState.ExitValue ||
3448 SPState[Succ->getNumber()].EntryIsSetup != BBState.ExitIsSetup)) {
3449 report("The entry stack state of a successor is inconsistent.", MBB);
3450 errs() << "Successor " << printMBBReference(*Succ)
3451 << " has entry state (" << SPState[Succ->getNumber()].EntryValue
3452 << ", " << SPState[Succ->getNumber()].EntryIsSetup << "), while "
3453 << printMBBReference(*MBB) << " has exit state ("
3454 << BBState.ExitValue << ", " << BBState.ExitIsSetup << ").\n";
3455 }
3456 }
3457
3458 // Make sure a basic block with return ends with zero stack adjustment.
3459 if (!MBB->empty() && MBB->back().isReturn()) {
3460 if (BBState.ExitIsSetup)
3461 report("A return block ends with a FrameSetup.", MBB);
3462 if (BBState.ExitValue)
3463 report("A return block ends with a nonzero stack adjustment.", MBB);
3464 }
3465 }
3466}
unsigned SubReg
unsigned const MachineRegisterInfo * MRI
aarch64 promote const
MachineBasicBlock & MBB
MachineBasicBlock MachineBasicBlock::iterator MBBI
static bool isLoad(int Opcode)
static bool isStore(int Opcode)
SmallVector< MachineOperand, 4 > Cond
This file implements the BitVector class.
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
Analysis containing CSE Info
Definition: CSEInfo.cpp:27
This file contains the declarations for the subclasses of Constant, which represent the different fla...
Returns the sub type a function will return at a given Idx Should correspond to the result type of an ExtractValue instruction executed with just that one unsigned Idx
This file defines the DenseMap class.
This file defines the DenseSet and SmallDenseSet classes.
This file builds on the ADT/GraphTraits.h file to build generic depth first graph iterator.
std::string Name
uint64_t Size
bool End
Definition: ELF_riscv.cpp:464
const HexagonInstrInfo * TII
hexagon widen stores
IRTranslator LLVM IR MI
A common definition of LaneBitmask for use in TableGen and CodeGen.
Implement a low-level type suitable for MachineInstr level instruction selection.
#define F(x, y, z)
Definition: MD5.cpp:55
#define I(x, y, z)
Definition: MD5.cpp:58
unsigned const TargetRegisterInfo * TRI
unsigned Reg
modulo schedule Modulo Schedule test pass
#define P(N)
ppc ctr loops verify
const char LLVMTargetMachineRef TM
#define INITIALIZE_PASS(passName, arg, name, cfg, analysis)
Definition: PassSupport.h:38
This file builds on the ADT/GraphTraits.h file to build a generic graph post order iterator.
unsigned UseOpIdx
const SmallVectorImpl< MachineOperand > MachineBasicBlock * TBB
static bool isValid(const char C)
Returns true if C is a valid mangled character: <0-9a-zA-Z_>.
@ VI
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
static bool isLiveOut(const MachineBasicBlock &MBB, unsigned Reg)
This file contains some templates that are useful if you are working with the STL at all.
This file defines generic set operations that may be used on set's of different types,...
This file defines the SmallPtrSet class.
This file defines the SmallVector class.
static unsigned getSize(unsigned Kind)
const fltSemantics & getSemantics() const
Definition: APFloat.h:1277
Represent the analysis usage information of a pass.
AnalysisUsage & addUsedIfAvailable()
Add the specified Pass class to the set of analyses used by this pass.
void setPreservesAll()
Set by analyses that do not transform their input at all.
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
Definition: ArrayRef.h:41
size_t size() const
size - Get the array size.
Definition: ArrayRef.h:163
LLVM Basic Block Representation.
Definition: BasicBlock.h:56
bool hasAddressTaken() const
Returns true if there are any uses of this basic block other than direct branches,...
Definition: BasicBlock.h:507
const Instruction * getTerminator() const LLVM_READONLY
Returns the terminator instruction if the block is well formed or null if the block is not well forme...
Definition: BasicBlock.h:127
bool test(unsigned Idx) const
Definition: BitVector.h:461
void clear()
clear - Removes all bits from the bitvector.
Definition: BitVector.h:335
iterator_range< const_set_bits_iterator > set_bits() const
Definition: BitVector.h:140
size_type size() const
size - Returns the number of bits in this bitvector.
Definition: BitVector.h:159
ConnectedVNInfoEqClasses - Helper class that can divide VNInfos in a LiveInterval into equivalence cl...
Definition: LiveInterval.h:998
ConstMIBundleOperands - Iterate over all operands in a const bundle of machine instructions.
ConstantFP - Floating Point Values [float, double].
Definition: Constants.h:260
const APFloat & getValueAPF() const
Definition: Constants.h:296
This is the shared class of boolean and integer constants.
Definition: Constants.h:78
unsigned getBitWidth() const
getBitWidth - Return the bitwidth of this constant.
Definition: Constants.h:139
std::pair< iterator, bool > insert(const std::pair< KeyT, ValueT > &KV)
Definition: DenseMap.h:220
Implements a dense probed hash-table based set.
Definition: DenseSet.h:271
Register getReg() const
Base class for user error types.
Definition: Error.h:348
A specialized PseudoSourceValue for holding FixedStack values, which must include a frame index.
FunctionPass class - This class is used to implement most global optimizations.
Definition: Pass.h:311
bool isPredicated(const MachineInstr &MI) const override
Returns true if the instruction is already predicated.
bool analyzeBranch(MachineBasicBlock &MBB, MachineBasicBlock *&TBB, MachineBasicBlock *&FBB, SmallVectorImpl< MachineOperand > &Cond, bool AllowModify) const override
Analyze the branching code at the end of MBB, returning true if it cannot be understood (e....
static unsigned getNumOperandRegisters(unsigned Flag)
getNumOperandRegisters - Extract the number of registers field from the inline asm operand flag.
Definition: InlineAsm.h:363
constexpr unsigned getScalarSizeInBits() const
Definition: LowLevelType.h:233
constexpr bool isScalar() const
Definition: LowLevelType.h:123
constexpr bool isValid() const
Definition: LowLevelType.h:121
constexpr uint16_t getNumElements() const
Returns the number of elements in a vector LLT.
Definition: LowLevelType.h:133
constexpr bool isVector() const
Definition: LowLevelType.h:129
constexpr TypeSize getSizeInBits() const
Returns the total size of the type. Must only be called on sized types.
Definition: LowLevelType.h:159
constexpr bool isPointer() const
Definition: LowLevelType.h:125
constexpr LLT getElementType() const
Returns the vector's element type. Only valid for vector types.
Definition: LowLevelType.h:257
constexpr unsigned getAddressSpace() const
Definition: LowLevelType.h:247
constexpr LLT getScalarType() const
Definition: LowLevelType.h:174
constexpr TypeSize getSizeInBytes() const
Returns the total size of the type in bytes, i.e.
Definition: LowLevelType.h:169
A live range for subregisters.
Definition: LiveInterval.h:693
LiveInterval - This class represents the liveness of a register, or stack slot.
Definition: LiveInterval.h:686
Register reg() const
Definition: LiveInterval.h:717
bool hasSubRanges() const
Returns true if subregister liveness information is available.
Definition: LiveInterval.h:803
iterator_range< subrange_iterator > subranges()
Definition: LiveInterval.h:775
void computeSubRangeUndefs(SmallVectorImpl< SlotIndex > &Undefs, LaneBitmask LaneMask, const MachineRegisterInfo &MRI, const SlotIndexes &Indexes) const
For a given lane mask LaneMask, compute indexes at which the lane is marked undefined by subregister ...
bool hasInterval(Register Reg) const
SlotIndex getMBBStartIdx(const MachineBasicBlock *mbb) const
Return the first index in the given basic block.
MachineInstr * getInstructionFromIndex(SlotIndex index) const
Returns the instruction associated with the given index.
SlotIndex getInstructionIndex(const MachineInstr &Instr) const
Returns the base index of the given instruction.
SlotIndex getMBBEndIdx(const MachineBasicBlock *mbb) const
Return the last index in the given basic block.
LiveRange * getCachedRegUnit(unsigned Unit)
Return the live range for register unit Unit if it has already been computed, or nullptr if it hasn't...
LiveInterval & getInterval(Register Reg)
bool isNotInMIMap(const MachineInstr &Instr) const
Returns true if the specified machine instr has been removed or was never entered in the map.
MachineBasicBlock * getMBBFromIndex(SlotIndex index) const
bool isLiveInToMBB(const LiveRange &LR, const MachineBasicBlock *mbb) const
void print(raw_ostream &O, const Module *=nullptr) const override
Implement the dump method.
Result of a LiveRange query.
Definition: LiveInterval.h:90
bool isDeadDef() const
Return true if this instruction has a dead def.
Definition: LiveInterval.h:117
VNInfo * valueIn() const
Return the value that is live-in to the instruction.
Definition: LiveInterval.h:105
bool isKill() const
Return true if the live-in value is killed by this instruction.
Definition: LiveInterval.h:112
static LLVM_ATTRIBUTE_UNUSED bool isJointlyDominated(const MachineBasicBlock *MBB, ArrayRef< SlotIndex > Defs, const SlotIndexes &Indexes)
A diagnostic function to check if the end of the block MBB is jointly dominated by the blocks corresp...
This class represents the liveness of a register, stack slot, etc.
Definition: LiveInterval.h:157
VNInfo * getValNumInfo(unsigned ValNo)
getValNumInfo - Returns pointer to the specified val#.
Definition: LiveInterval.h:317
bool liveAt(SlotIndex index) const
Definition: LiveInterval.h:401
bool covers(const LiveRange &Other) const
Returns true if all segments of the Other live range are completely covered by this live range.
bool empty() const
Definition: LiveInterval.h:382
LiveQueryResult Query(SlotIndex Idx) const
Query Liveness at Idx.
Definition: LiveInterval.h:541
iterator end()
Definition: LiveInterval.h:216
VNInfo * getVNInfoBefore(SlotIndex Idx) const
getVNInfoBefore - Return the VNInfo that is live up to but not necessarilly including Idx,...
Definition: LiveInterval.h:429
unsigned getNumValNums() const
Definition: LiveInterval.h:313
iterator begin()
Definition: LiveInterval.h:215
VNInfoList valnos
Definition: LiveInterval.h:204
VNInfo * getVNInfoAt(SlotIndex Idx) const
getVNInfoAt - Return the VNInfo that is live at Idx, or NULL.
Definition: LiveInterval.h:421
LiveInterval & getInterval(int Slot)
Definition: LiveStacks.h:68
bool hasInterval(int Slot) const
Definition: LiveStacks.h:82
VarInfo & getVarInfo(Register Reg)
getVarInfo - Return the VarInfo structure for the specified VIRTUAL register.
This class is intended to be used as a base class for asm properties and features specific to the tar...
Definition: MCAsmInfo.h:56
ExceptionHandling getExceptionHandlingType() const
Definition: MCAsmInfo.h:781
Describe properties that are true of each instruction in the target description file.
Definition: MCInstrDesc.h:198
unsigned getNumOperands() const
Return the number of declared MachineOperands for this MachineInstruction.
Definition: MCInstrDesc.h:237
ArrayRef< MCOperandInfo > operands() const
Definition: MCInstrDesc.h:239
unsigned getNumDefs() const
Return the number of MachineOperands that are register definitions.
Definition: MCInstrDesc.h:248
bool variadicOpsAreDefs() const
Return true if variadic operands of this instruction are definitions.
Definition: MCInstrDesc.h:418
int getOperandConstraint(unsigned OpNum, MCOI::OperandConstraint Constraint) const
Returns the value of the specified operand constraint if it is present.
Definition: MCInstrDesc.h:219
unsigned getOpcode() const
Return the opcode number for this descriptor.
Definition: MCInstrDesc.h:230
This holds information about one operand of a machine instruction, indicating the register class for ...
Definition: MCInstrDesc.h:85
bool isOptionalDef() const
Set if this operand is a optional def.
Definition: MCInstrDesc.h:113
uint8_t OperandType
Information about the type of the operand.
Definition: MCInstrDesc.h:97
MCRegAliasIterator enumerates all registers aliasing Reg.
bool isValid() const
isValid - returns true if this iterator is not yet at the end.
bool isValid() const
isValid - Returns true until all the operands have been visited.
bool isInlineAsmBrIndirectTarget() const
Returns true if this is the indirect dest of an INLINEASM_BR.
unsigned pred_size() const
bool isEHPad() const
Returns true if the block is a landing pad.
iterator_range< livein_iterator > liveins() const
iterator_range< iterator > phis()
Returns a range that iterates over the phis in the basic block.
int getNumber() const
MachineBasicBlocks are uniquely numbered at the function level, unless they're not in a MachineFuncti...
const BasicBlock * getBasicBlock() const
Return the LLVM basic block that this instance corresponded to originally.
bool isIRBlockAddressTaken() const
Test whether this block is the target of an IR BlockAddress.
unsigned succ_size() const
BasicBlock * getAddressTakenIRBlock() const
Retrieves the BasicBlock which corresponds to this MachineBasicBlock.
bool isPredecessor(const MachineBasicBlock *MBB) const
Return true if the specified MBB is a predecessor of this block.
const MachineFunction * getParent() const
Return the MachineFunction containing this basic block.
iterator_range< succ_iterator > successors()
bool isSuccessor(const MachineBasicBlock *MBB) const
Return true if the specified MBB is a successor of this block.
iterator_range< pred_iterator > predecessors()
StringRef getName() const
Return the name of the corresponding LLVM basic block, or an empty string.
The MachineFrameInfo class represents an abstract stack frame until prolog/epilog code is inserted.
BitVector getPristineRegs(const MachineFunction &MF) const
Return a set of physical registers that are pristine.
An AnalysisManager<MachineFunction> that also exposes IR analysis results.
MachineFunctionPass - This class adapts the FunctionPass interface to allow convenient creation of pa...
void getAnalysisUsage(AnalysisUsage &AU) const override
getAnalysisUsage - Subclasses that override getAnalysisUsage must call this.
virtual bool runOnMachineFunction(MachineFunction &MF)=0
runOnMachineFunction - This method must be overloaded to perform the desired machine code transformat...
bool hasProperty(Property P) const
const TargetSubtargetInfo & getSubtarget() const
getSubtarget - Return the subtarget for which this machine code is being compiled.
StringRef getName() const
getName - Return the name of the corresponding LLVM function.
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
const LLVMTargetMachine & getTarget() const
getTarget - Return the target machine this machine code is compiled with
const MachineFunctionProperties & getProperties() const
Get the function properties.
const MachineBasicBlock & front() const
bool verify(Pass *p=nullptr, const char *Banner=nullptr, bool AbortOnError=true) const
Run the current MachineFunction through the machine code verifier, useful for debugger use.
void print(raw_ostream &OS, const SlotIndexes *=nullptr) const
print - Print out the MachineFunction in a format suitable for debugging to the specified stream.
Representation of each machine instruction.
Definition: MachineInstr.h:68
unsigned getOpcode() const
Returns the opcode of this MachineInstr.
Definition: MachineInstr.h:523
bool isReturn(QueryType Type=AnyInBundle) const
Definition: MachineInstr.h:894
bool isTerminator(QueryType Type=AnyInBundle) const
Returns true if this instruction part of the terminator for a basic block.
Definition: MachineInstr.h:928
bool isBarrier(QueryType Type=AnyInBundle) const
Returns true if the specified instruction stops control flow from executing the instruction immediate...
Definition: MachineInstr.h:919
A description of a memory reference used in the backend.
const PseudoSourceValue * getPseudoValue() const
uint64_t getSize() const
Return the size in bytes of the memory reference.
AtomicOrdering getSuccessOrdering() const
Return the atomic ordering requirements for this memory operation.
uint64_t getSizeInBits() const
Return the size in bits of the memory reference.
MachineOperand class - Representation of each machine instruction operand.
unsigned getSubReg() const
int64_t getImm() const
bool readsReg() const
readsReg - Returns true if this operand reads the previous value of its register.
bool isImplicit() const
bool isIntrinsicID() const
bool isReg() const
isReg - Tests if this is a MO_Register operand.
MachineBasicBlock * getMBB() const
ArrayRef< int > getShuffleMask() const
bool isImm() const
isImm - Tests if this is a MO_Immediate operand.
bool isShuffleMask() const
unsigned getCFIIndex() const
bool isRenamable() const
isRenamable - Returns true if this register may be renamed, i.e.
MachineInstr * getParent()
getParent - Return the instruction that this operand belongs to.
MachineOperandType getType() const
getType - Returns the MachineOperandType for this operand.
bool isEarlyClobber() const
Register getReg() const
getReg - Returns the register number.
Intrinsic::ID getIntrinsicID() const
bool isInternalRead() const
bool isFI() const
isFI - Tests if this is a MO_FrameIndex operand.
static bool clobbersPhysReg(const uint32_t *RegMask, MCRegister PhysReg)
clobbersPhysReg - Returns true if this RegMask clobbers PhysReg.
const uint32_t * getRegMask() const
getRegMask - Returns a bit mask of registers preserved by this RegMask operand.
void print(raw_ostream &os, const TargetRegisterInfo *TRI=nullptr, const TargetIntrinsicInfo *IntrinsicInfo=nullptr) const
Print the MachineOperand to os.
@ MO_CFIIndex
MCCFIInstruction index.
@ MO_RegisterMask
Mask of preserved registers.
@ MO_MachineBasicBlock
MachineBasicBlock reference.
@ MO_FrameIndex
Abstract Stack Frame Index.
@ MO_Register
Register operand.
bool isMBB() const
isMBB - Tests if this is a MO_MachineBasicBlock operand.
MachineRegisterInfo - Keep track of information for virtual and physical registers,...
static PassRegistry * getPassRegistry()
getPassRegistry - Access the global registry object, which is automatically initialized at applicatio...
Pass interface - Implemented by all 'passes'.
Definition: Pass.h:94
virtual void print(raw_ostream &OS, const Module *M) const
print - Print out the internal state of the pass.
Definition: Pass.cpp:130
AnalysisType * getAnalysisIfAvailable() const
getAnalysisIfAvailable<AnalysisType>() - Subclasses use this function to get analysis information tha...
Special value supplied for machine level alias analysis.
Holds all the information related to register banks.
const RegisterBank & getRegBank(unsigned ID)
Get the register bank identified by ID.
unsigned getMaximumSize(unsigned RegBankID) const
Get the maximum size in bits that fits in the given register bank.
This class implements the register bank concept.
Definition: RegisterBank.h:28
const char * getName() const
Get a user friendly name of this register bank.
Definition: RegisterBank.h:50
unsigned getID() const
Get the identifier of this register bank.
Definition: RegisterBank.h:46
Wrapper class representing virtual and physical registers.
Definition: Register.h:19
static Register index2VirtReg(unsigned Index)
Convert a 0-based index to a virtual register number.
Definition: Register.h:84
static unsigned virtReg2Index(Register Reg)
Convert a virtual register number to a 0-based index.
Definition: Register.h:77
constexpr bool isVirtual() const
Return true if the specified register number is in the virtual register namespace.
Definition: Register.h:91
static constexpr bool isPhysicalRegister(unsigned Reg)
Return true if the specified register number is in the physical register namespace.
Definition: Register.h:65
constexpr bool isPhysical() const
Return true if the specified register number is in the physical register namespace.
Definition: Register.h:95
SlotIndex - An opaque wrapper around machine indexes.
Definition: SlotIndexes.h:82
static bool isSameInstr(SlotIndex A, SlotIndex B)
isSameInstr - Return true if A and B refer to the same instruction.
Definition: SlotIndexes.h:198
bool isBlock() const
isBlock - Returns true if this is a block boundary slot.
Definition: SlotIndexes.h:231
SlotIndex getDeadSlot() const
Returns the dead def kill slot for the current instruction.
Definition: SlotIndexes.h:264
bool isEarlyClobber() const
isEarlyClobber - Returns true if this is an early-clobber slot.
Definition: SlotIndexes.h:234
bool isRegister() const
isRegister - Returns true if this is a normal register use/def slot.
Definition: SlotIndexes.h:238
SlotIndex getBoundaryIndex() const
Returns the boundary index for associated with this index.
Definition: SlotIndexes.h:253
SlotIndex getPrevSlot() const
Returns the previous slot in the index list.
Definition: SlotIndexes.h:294
SlotIndex getRegSlot(bool EC=false) const
Returns the register use/def slot in the current instruction for a normal or early-clobber def.
Definition: SlotIndexes.h:259
bool isDead() const
isDead - Returns true if this is a dead def kill slot.
Definition: SlotIndexes.h:241
SlotIndexes pass.
Definition: SlotIndexes.h:319
SlotIndex getMBBEndIdx(unsigned Num) const
Returns the last index in the given basic block number.
Definition: SlotIndexes.h:481
MBBIndexIterator MBBIndexBegin() const
Returns an iterator for the begin of the idx2MBBMap.
Definition: SlotIndexes.h:509
MBBIndexIterator MBBIndexEnd() const
Return an iterator for the end of the idx2MBBMap.
Definition: SlotIndexes.h:514
SmallVectorImpl< IdxMBBPair >::const_iterator MBBIndexIterator
Iterator over the idx2MBBMap (sorted pairs of slot index of basic block begin and basic block)
Definition: SlotIndexes.h:492
SlotIndex getInstructionIndex(const MachineInstr &MI, bool IgnoreBundle=false) const
Returns the base index for the given instruction.
Definition: SlotIndexes.h:390
SlotIndex getMBBStartIdx(unsigned Num) const
Returns the first index in the given basic block number.
Definition: SlotIndexes.h:471
bool hasIndex(const MachineInstr &instr) const
Returns true if the given machine instr is mapped to an index, otherwise returns false.
Definition: SlotIndexes.h:385
size_type size() const
Definition: SmallPtrSet.h:93
bool erase(PtrType Ptr)
erase - If the set contains the specified pointer, remove it and return true, otherwise return false.
Definition: SmallPtrSet.h:379
size_type count(ConstPtrType Ptr) const
count - Return 1 if the specified pointer is in the set, 0 otherwise.
Definition: SmallPtrSet.h:383
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
Definition: SmallPtrSet.h:365
iterator begin() const
Definition: SmallPtrSet.h:403
SmallPtrSet - This class implements a set which is optimized for holding SmallSize or less elements.
Definition: SmallPtrSet.h:450
size_t size() const
Definition: SmallVector.h:91
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
Definition: SmallVector.h:577
void resize(size_type N)
Definition: SmallVector.h:642
void push_back(const T &Elt)
Definition: SmallVector.h:416
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
Definition: SmallVector.h:1200
Register getReg() const
MI-level Statepoint operands.
Definition: StackMaps.h:158
StringRef - Represent a constant reference to a string, i.e.
Definition: StringRef.h:50
TargetInstrInfo - Interface to description of machine instruction set.
Primary interface to the complete machine description for the target machine.
Definition: TargetMachine.h:78
bool hasSuperClassEq(const TargetRegisterClass *RC) const
Returns true if RC is a super-class of or equal to this class.
TargetRegisterInfo base class - We assume that the target defines a static array of TargetRegisterDes...
virtual const TargetRegisterInfo * getRegisterInfo() const
getRegisterInfo - If register information is available, return it.
virtual const RegisterBankInfo * getRegBankInfo() const
If the information for the register banks is available, return it.
virtual const TargetInstrInfo * getInstrInfo() const
Twine - A lightweight data structure for efficiently representing the concatenation of temporary valu...
Definition: Twine.h:81
VNInfo - Value Number Information.
Definition: LiveInterval.h:53
bool isUnused() const
Returns true if this value is unused.
Definition: LiveInterval.h:81
unsigned id
The ID number of this value.
Definition: LiveInterval.h:58
SlotIndex def
The index of the defining instruction.
Definition: LiveInterval.h:61
bool isPHIDef() const
Returns true if this value is defined by a PHI instruction (or was, PHI instructions may have been el...
Definition: LiveInterval.h:78
LLVM Value Representation.
Definition: Value.h:74
std::pair< iterator, bool > insert(const ValueT &V)
Definition: DenseSet.h:206
Iterator for intrusive lists based on ilist_node.
self_iterator getIterator()
Definition: ilist_node.h:82
NodeTy * getNextNode()
Get the next node, or nullptr for the list tail.
Definition: ilist_node.h:289
constexpr char Attrs[]
Key for Kernel::Metadata::mAttrs.
const CustomOperand< const MCSubtargetInfo & > Msg[]
constexpr std::underlying_type_t< E > Mask()
Get a bitmask with 1s in all places up to the high-order bit of E's largest value.
Definition: BitmaskEnum.h:119
unsigned ID
LLVM IR allows to use arbitrary numbers as calling convention identifiers.
Definition: CallingConv.h:24
AttributeList getAttributes(LLVMContext &C, ID id)
Return the attributes for an intrinsic.
@ OPERAND_REGISTER
Definition: MCInstrDesc.h:61
@ OPERAND_IMMEDIATE
Definition: MCInstrDesc.h:60
Reg
All possible values of the reg field in the ModR/M byte.
constexpr double e
Definition: MathExtras.h:31
const_iterator begin(StringRef path, Style style=Style::native)
Get begin iterator over path.
Definition: Path.cpp:227
const_iterator end(StringRef path)
Get end iterator over path.
Definition: Path.cpp:236
This is an optimization pass for GlobalISel generic memory operations.
Definition: AddressRanges.h:18
auto drop_begin(T &&RangeOrContainer, size_t N=1)
Return a range covering RangeOrContainer with the first N elements excluded.
Definition: STLExtras.h:413
@ Offset
Definition: DWP.cpp:440
bool all_of(R &&range, UnaryPredicate P)
Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly.
Definition: STLExtras.h:1819
auto size(R &&Range, std::enable_if_t< std::is_base_of< std::random_access_iterator_tag, typename std::iterator_traits< decltype(Range.begin())>::iterator_category >::value, void > *=nullptr)
Get the size of a range.
Definition: STLExtras.h:1777
@ SjLj
setjmp/longjmp based exceptions
bool isPreISelGenericOpcode(unsigned Opcode)
Check whether the given Opcode is a generic opcode that is not supposed to appear after ISel.
Definition: TargetOpcodes.h:30
Printable printRegUnit(unsigned Unit, const TargetRegisterInfo *TRI)
Create Printable object to print register units on a raw_ostream.
void append_range(Container &C, Range &&R)
Wrapper function to append a range to a container.
Definition: STLExtras.h:2129
void set_subtract(S1Ty &S1, const S2Ty &S2)
set_subtract(A, B) - Compute A := A - B
Definition: SetOperations.h:82
Printable PrintLaneMask(LaneBitmask LaneMask)
Create Printable object to print LaneBitmasks on a raw_ostream.
Definition: LaneBitmask.h:92
bool isPreISelGenericOptimizationHint(unsigned Opcode)
Definition: TargetOpcodes.h:42
bool isScopedEHPersonality(EHPersonality Pers)
Returns true if this personality uses scope-style EH IR instructions: catchswitch,...
void initializeMachineVerifierPassPass(PassRegistry &)
auto reverse(ContainerTy &&C)
Definition: STLExtras.h:511
df_ext_iterator< T, SetTy > df_ext_begin(const T &G, SetTy &S)
bool none_of(R &&Range, UnaryPredicate P)
Provide wrappers to std::none_of which take ranges instead of having to pass begin/end explicitly.
Definition: STLExtras.h:1833
void report_fatal_error(Error Err, bool gen_crash_diag=true)
Report a serious error, calling any installed error handler.
Definition: Error.cpp:145
bool set_union(S1Ty &S1, const S2Ty &S2)
set_union(A, B) - Compute A := A u B, return whether A changed.
Definition: SetOperations.h:23
EHPersonality classifyEHPersonality(const Value *Pers)
See if the given exception handling personality function is one that we understand.
raw_fd_ostream & errs()
This returns a reference to a raw_ostream for standard error.
AtomicOrdering
Atomic ordering for LLVM's memory model.
void verifyMachineFunction(MachineFunctionAnalysisManager *, const std::string &Banner, const MachineFunction &MF)
OutputIt move(R &&Range, OutputIt Out)
Provide wrappers to std::move which take ranges instead of having to pass begin/end explicitly.
Definition: STLExtras.h:1946
bool is_contained(R &&Range, const E &Element)
Returns true if Element is found in Range.
Definition: STLExtras.h:1976
df_ext_iterator< T, SetTy > df_ext_end(const T &G, SetTy &S)
Printable printReg(Register Reg, const TargetRegisterInfo *TRI=nullptr, unsigned SubIdx=0, const MachineRegisterInfo *MRI=nullptr)
Prints virtual and physical registers with or without a TRI instance.
Printable printMBBReference(const MachineBasicBlock &MBB)
Prints a machine basic block reference.
FunctionPass * createMachineVerifierPass(const std::string &Banner)
createMachineVerifierPass - This pass verifies cenerated machine code instructions for correctness.
Definition: BitVector.h:858
static unsigned getSizeInBits(const fltSemantics &Sem)
Returns the size of the floating point number (in bits) in the given semantics.
Definition: APFloat.cpp:325
static constexpr LaneBitmask getAll()
Definition: LaneBitmask.h:82
constexpr bool none() const
Definition: LaneBitmask.h:52
constexpr bool any() const
Definition: LaneBitmask.h:53
static constexpr LaneBitmask getNone()
Definition: LaneBitmask.h:81
This represents a simple continuous liveness interval for a value.
Definition: LiveInterval.h:162
VarInfo - This represents the regions where a virtual register is live in the program.
Definition: LiveVariables.h:80
Pair of physical register and lane mask.