LLVM 19.0.0git
MachineVerifier.cpp
Go to the documentation of this file.
1//===- MachineVerifier.cpp - Machine Code Verifier ------------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// Pass to verify generated machine code. The following is checked:
10//
11// Operand counts: All explicit operands must be present.
12//
13// Register classes: All physical and virtual register operands must be
14// compatible with the register class required by the instruction descriptor.
15//
16// Register live intervals: Registers must be defined only once, and must be
17// defined before use.
18//
19// The machine code verifier is enabled with the command-line option
20// -verify-machineinstrs.
21//===----------------------------------------------------------------------===//
22
23#include "llvm/ADT/BitVector.h"
24#include "llvm/ADT/DenseMap.h"
25#include "llvm/ADT/DenseSet.h"
28#include "llvm/ADT/STLExtras.h"
32#include "llvm/ADT/StringRef.h"
33#include "llvm/ADT/Twine.h"
63#include "llvm/IR/BasicBlock.h"
64#include "llvm/IR/Constants.h"
66#include "llvm/IR/Function.h"
67#include "llvm/IR/InlineAsm.h"
70#include "llvm/MC/LaneBitmask.h"
71#include "llvm/MC/MCAsmInfo.h"
72#include "llvm/MC/MCDwarf.h"
73#include "llvm/MC/MCInstrDesc.h"
76#include "llvm/Pass.h"
80#include "llvm/Support/ModRef.h"
83#include <algorithm>
84#include <cassert>
85#include <cstddef>
86#include <cstdint>
87#include <iterator>
88#include <string>
89#include <utility>
90
91using namespace llvm;
92
93namespace {
94
95 struct MachineVerifier {
96 MachineVerifier(Pass *pass, const char *b) : PASS(pass), Banner(b) {}
97
98 MachineVerifier(const char *b, LiveVariables *LiveVars,
99 LiveIntervals *LiveInts, LiveStacks *LiveStks,
100 SlotIndexes *Indexes)
101 : Banner(b), LiveVars(LiveVars), LiveInts(LiveInts), LiveStks(LiveStks),
102 Indexes(Indexes) {}
103
104 unsigned verify(const MachineFunction &MF);
105
106 Pass *const PASS = nullptr;
107 const char *Banner;
108 const MachineFunction *MF = nullptr;
109 const TargetMachine *TM = nullptr;
110 const TargetInstrInfo *TII = nullptr;
111 const TargetRegisterInfo *TRI = nullptr;
112 const MachineRegisterInfo *MRI = nullptr;
113 const RegisterBankInfo *RBI = nullptr;
114
115 unsigned foundErrors = 0;
116
117 // Avoid querying the MachineFunctionProperties for each operand.
118 bool isFunctionRegBankSelected = false;
119 bool isFunctionSelected = false;
120 bool isFunctionTracksDebugUserValues = false;
121
122 using RegVector = SmallVector<Register, 16>;
123 using RegMaskVector = SmallVector<const uint32_t *, 4>;
124 using RegSet = DenseSet<Register>;
127
128 const MachineInstr *FirstNonPHI = nullptr;
129 const MachineInstr *FirstTerminator = nullptr;
130 BlockSet FunctionBlocks;
131
132 BitVector regsReserved;
133 RegSet regsLive;
134 RegVector regsDefined, regsDead, regsKilled;
135 RegMaskVector regMasks;
136
137 SlotIndex lastIndex;
138
139 // Add Reg and any sub-registers to RV
140 void addRegWithSubRegs(RegVector &RV, Register Reg) {
141 RV.push_back(Reg);
142 if (Reg.isPhysical())
143 append_range(RV, TRI->subregs(Reg.asMCReg()));
144 }
145
146 struct BBInfo {
147 // Is this MBB reachable from the MF entry point?
148 bool reachable = false;
149
150 // Vregs that must be live in because they are used without being
151 // defined. Map value is the user. vregsLiveIn doesn't include regs
152 // that only are used by PHI nodes.
153 RegMap vregsLiveIn;
154
155 // Regs killed in MBB. They may be defined again, and will then be in both
156 // regsKilled and regsLiveOut.
157 RegSet regsKilled;
158
159 // Regs defined in MBB and live out. Note that vregs passing through may
160 // be live out without being mentioned here.
161 RegSet regsLiveOut;
162
163 // Vregs that pass through MBB untouched. This set is disjoint from
164 // regsKilled and regsLiveOut.
165 RegSet vregsPassed;
166
167 // Vregs that must pass through MBB because they are needed by a successor
168 // block. This set is disjoint from regsLiveOut.
169 RegSet vregsRequired;
170
171 // Set versions of block's predecessor and successor lists.
172 BlockSet Preds, Succs;
173
174 BBInfo() = default;
175
176 // Add register to vregsRequired if it belongs there. Return true if
177 // anything changed.
178 bool addRequired(Register Reg) {
179 if (!Reg.isVirtual())
180 return false;
181 if (regsLiveOut.count(Reg))
182 return false;
183 return vregsRequired.insert(Reg).second;
184 }
185
186 // Same for a full set.
187 bool addRequired(const RegSet &RS) {
188 bool Changed = false;
189 for (Register Reg : RS)
190 Changed |= addRequired(Reg);
191 return Changed;
192 }
193
194 // Same for a full map.
195 bool addRequired(const RegMap &RM) {
196 bool Changed = false;
197 for (const auto &I : RM)
198 Changed |= addRequired(I.first);
199 return Changed;
200 }
201
202 // Live-out registers are either in regsLiveOut or vregsPassed.
203 bool isLiveOut(Register Reg) const {
204 return regsLiveOut.count(Reg) || vregsPassed.count(Reg);
205 }
206 };
207
208 // Extra register info per MBB.
210
211 bool isReserved(Register Reg) {
212 return Reg.id() < regsReserved.size() && regsReserved.test(Reg.id());
213 }
214
215 bool isAllocatable(Register Reg) const {
216 return Reg.id() < TRI->getNumRegs() && TRI->isInAllocatableClass(Reg) &&
217 !regsReserved.test(Reg.id());
218 }
219
220 // Analysis information if available
221 LiveVariables *LiveVars = nullptr;
222 LiveIntervals *LiveInts = nullptr;
223 LiveStacks *LiveStks = nullptr;
224 SlotIndexes *Indexes = nullptr;
225
226 // This is calculated only when trying to verify convergence control tokens.
227 // Similar to the LLVM IR verifier, we calculate this locally instead of
228 // relying on the pass manager.
230
231 void visitMachineFunctionBefore();
232 void visitMachineBasicBlockBefore(const MachineBasicBlock *MBB);
233 void visitMachineBundleBefore(const MachineInstr *MI);
234
235 /// Verify that all of \p MI's virtual register operands are scalars.
236 /// \returns True if all virtual register operands are scalar. False
237 /// otherwise.
238 bool verifyAllRegOpsScalar(const MachineInstr &MI,
239 const MachineRegisterInfo &MRI);
240 bool verifyVectorElementMatch(LLT Ty0, LLT Ty1, const MachineInstr *MI);
241
242 bool verifyGIntrinsicSideEffects(const MachineInstr *MI);
243 bool verifyGIntrinsicConvergence(const MachineInstr *MI);
244 void verifyPreISelGenericInstruction(const MachineInstr *MI);
245
246 void visitMachineInstrBefore(const MachineInstr *MI);
247 void visitMachineOperand(const MachineOperand *MO, unsigned MONum);
248 void visitMachineBundleAfter(const MachineInstr *MI);
249 void visitMachineBasicBlockAfter(const MachineBasicBlock *MBB);
250 void visitMachineFunctionAfter();
251
252 void report(const char *msg, const MachineFunction *MF);
253 void report(const char *msg, const MachineBasicBlock *MBB);
254 void report(const char *msg, const MachineInstr *MI);
255 void report(const char *msg, const MachineOperand *MO, unsigned MONum,
256 LLT MOVRegType = LLT{});
257 void report(const Twine &Msg, const MachineInstr *MI);
258
259 void report_context(const LiveInterval &LI) const;
260 void report_context(const LiveRange &LR, Register VRegUnit,
261 LaneBitmask LaneMask) const;
262 void report_context(const LiveRange::Segment &S) const;
263 void report_context(const VNInfo &VNI) const;
264 void report_context(SlotIndex Pos) const;
265 void report_context(MCPhysReg PhysReg) const;
266 void report_context_liverange(const LiveRange &LR) const;
267 void report_context_lanemask(LaneBitmask LaneMask) const;
268 void report_context_vreg(Register VReg) const;
269 void report_context_vreg_regunit(Register VRegOrUnit) const;
270
271 void verifyInlineAsm(const MachineInstr *MI);
272
273 void checkLiveness(const MachineOperand *MO, unsigned MONum);
274 void checkLivenessAtUse(const MachineOperand *MO, unsigned MONum,
275 SlotIndex UseIdx, const LiveRange &LR,
276 Register VRegOrUnit,
277 LaneBitmask LaneMask = LaneBitmask::getNone());
278 void checkLivenessAtDef(const MachineOperand *MO, unsigned MONum,
279 SlotIndex DefIdx, const LiveRange &LR,
280 Register VRegOrUnit, bool SubRangeCheck = false,
281 LaneBitmask LaneMask = LaneBitmask::getNone());
282
283 void markReachable(const MachineBasicBlock *MBB);
284 void calcRegsPassed();
285 void checkPHIOps(const MachineBasicBlock &MBB);
286
287 void calcRegsRequired();
288 void verifyLiveVariables();
289 void verifyLiveIntervals();
290 void verifyLiveInterval(const LiveInterval&);
291 void verifyLiveRangeValue(const LiveRange &, const VNInfo *, Register,
293 void verifyLiveRangeSegment(const LiveRange &,
296 void verifyLiveRange(const LiveRange &, Register,
297 LaneBitmask LaneMask = LaneBitmask::getNone());
298
299 void verifyStackFrame();
300
301 void verifySlotIndexes() const;
302 void verifyProperties(const MachineFunction &MF);
303 };
304
305 struct MachineVerifierPass : public MachineFunctionPass {
306 static char ID; // Pass ID, replacement for typeid
307
308 const std::string Banner;
309
310 MachineVerifierPass(std::string banner = std::string())
311 : MachineFunctionPass(ID), Banner(std::move(banner)) {
313 }
314
315 void getAnalysisUsage(AnalysisUsage &AU) const override {
320 AU.setPreservesAll();
322 }
323
324 bool runOnMachineFunction(MachineFunction &MF) override {
325 // Skip functions that have known verification problems.
326 // FIXME: Remove this mechanism when all problematic passes have been
327 // fixed.
328 if (MF.getProperties().hasProperty(
329 MachineFunctionProperties::Property::FailsVerification))
330 return false;
331
332 unsigned FoundErrors = MachineVerifier(this, Banner.c_str()).verify(MF);
333 if (FoundErrors)
334 report_fatal_error("Found "+Twine(FoundErrors)+" machine code errors.");
335 return false;
336 }
337 };
338
339} // end anonymous namespace
340
341char MachineVerifierPass::ID = 0;
342
343INITIALIZE_PASS(MachineVerifierPass, "machineverifier",
344 "Verify generated machine code", false, false)
345
347 return new MachineVerifierPass(Banner);
348}
349
350void llvm::verifyMachineFunction(const std::string &Banner,
351 const MachineFunction &MF) {
352 // TODO: Use MFAM after porting below analyses.
353 // LiveVariables *LiveVars;
354 // LiveIntervals *LiveInts;
355 // LiveStacks *LiveStks;
356 // SlotIndexes *Indexes;
357 unsigned FoundErrors = MachineVerifier(nullptr, Banner.c_str()).verify(MF);
358 if (FoundErrors)
359 report_fatal_error("Found " + Twine(FoundErrors) + " machine code errors.");
360}
361
362bool MachineFunction::verify(Pass *p, const char *Banner, bool AbortOnErrors)
363 const {
364 MachineFunction &MF = const_cast<MachineFunction&>(*this);
365 unsigned FoundErrors = MachineVerifier(p, Banner).verify(MF);
366 if (AbortOnErrors && FoundErrors)
367 report_fatal_error("Found "+Twine(FoundErrors)+" machine code errors.");
368 return FoundErrors == 0;
369}
370
372 const char *Banner, bool AbortOnErrors) const {
373 MachineFunction &MF = const_cast<MachineFunction &>(*this);
374 unsigned FoundErrors =
375 MachineVerifier(Banner, nullptr, LiveInts, nullptr, Indexes).verify(MF);
376 if (AbortOnErrors && FoundErrors)
377 report_fatal_error("Found " + Twine(FoundErrors) + " machine code errors.");
378 return FoundErrors == 0;
379}
380
381void MachineVerifier::verifySlotIndexes() const {
382 if (Indexes == nullptr)
383 return;
384
385 // Ensure the IdxMBB list is sorted by slot indexes.
388 E = Indexes->MBBIndexEnd(); I != E; ++I) {
389 assert(!Last.isValid() || I->first > Last);
390 Last = I->first;
391 }
392}
393
394void MachineVerifier::verifyProperties(const MachineFunction &MF) {
395 // If a pass has introduced virtual registers without clearing the
396 // NoVRegs property (or set it without allocating the vregs)
397 // then report an error.
398 if (MF.getProperties().hasProperty(
400 MRI->getNumVirtRegs())
401 report("Function has NoVRegs property but there are VReg operands", &MF);
402}
403
404unsigned MachineVerifier::verify(const MachineFunction &MF) {
405 foundErrors = 0;
406
407 this->MF = &MF;
408 TM = &MF.getTarget();
411 RBI = MF.getSubtarget().getRegBankInfo();
412 MRI = &MF.getRegInfo();
413
414 const bool isFunctionFailedISel = MF.getProperties().hasProperty(
416
417 // If we're mid-GlobalISel and we already triggered the fallback path then
418 // it's expected that the MIR is somewhat broken but that's ok since we'll
419 // reset it and clear the FailedISel attribute in ResetMachineFunctions.
420 if (isFunctionFailedISel)
421 return foundErrors;
422
423 isFunctionRegBankSelected = MF.getProperties().hasProperty(
425 isFunctionSelected = MF.getProperties().hasProperty(
427 isFunctionTracksDebugUserValues = MF.getProperties().hasProperty(
429
430 if (PASS) {
431 LiveInts = PASS->getAnalysisIfAvailable<LiveIntervals>();
432 // We don't want to verify LiveVariables if LiveIntervals is available.
433 if (!LiveInts)
434 LiveVars = PASS->getAnalysisIfAvailable<LiveVariables>();
435 LiveStks = PASS->getAnalysisIfAvailable<LiveStacks>();
436 Indexes = PASS->getAnalysisIfAvailable<SlotIndexes>();
437 }
438
439 verifySlotIndexes();
440
441 verifyProperties(MF);
442
443 visitMachineFunctionBefore();
444 for (const MachineBasicBlock &MBB : MF) {
445 visitMachineBasicBlockBefore(&MBB);
446 // Keep track of the current bundle header.
447 const MachineInstr *CurBundle = nullptr;
448 // Do we expect the next instruction to be part of the same bundle?
449 bool InBundle = false;
450
451 for (const MachineInstr &MI : MBB.instrs()) {
452 if (MI.getParent() != &MBB) {
453 report("Bad instruction parent pointer", &MBB);
454 errs() << "Instruction: " << MI;
455 continue;
456 }
457
458 // Check for consistent bundle flags.
459 if (InBundle && !MI.isBundledWithPred())
460 report("Missing BundledPred flag, "
461 "BundledSucc was set on predecessor",
462 &MI);
463 if (!InBundle && MI.isBundledWithPred())
464 report("BundledPred flag is set, "
465 "but BundledSucc not set on predecessor",
466 &MI);
467
468 // Is this a bundle header?
469 if (!MI.isInsideBundle()) {
470 if (CurBundle)
471 visitMachineBundleAfter(CurBundle);
472 CurBundle = &MI;
473 visitMachineBundleBefore(CurBundle);
474 } else if (!CurBundle)
475 report("No bundle header", &MI);
476 visitMachineInstrBefore(&MI);
477 for (unsigned I = 0, E = MI.getNumOperands(); I != E; ++I) {
478 const MachineOperand &Op = MI.getOperand(I);
479 if (Op.getParent() != &MI) {
480 // Make sure to use correct addOperand / removeOperand / ChangeTo
481 // functions when replacing operands of a MachineInstr.
482 report("Instruction has operand with wrong parent set", &MI);
483 }
484
485 visitMachineOperand(&Op, I);
486 }
487
488 // Was this the last bundled instruction?
489 InBundle = MI.isBundledWithSucc();
490 }
491 if (CurBundle)
492 visitMachineBundleAfter(CurBundle);
493 if (InBundle)
494 report("BundledSucc flag set on last instruction in block", &MBB.back());
495 visitMachineBasicBlockAfter(&MBB);
496 }
497 visitMachineFunctionAfter();
498
499 // Clean up.
500 regsLive.clear();
501 regsDefined.clear();
502 regsDead.clear();
503 regsKilled.clear();
504 regMasks.clear();
505 MBBInfoMap.clear();
506
507 return foundErrors;
508}
509
510void MachineVerifier::report(const char *msg, const MachineFunction *MF) {
511 assert(MF);
512 errs() << '\n';
513 if (!foundErrors++) {
514 if (Banner)
515 errs() << "# " << Banner << '\n';
516 if (LiveInts != nullptr)
517 LiveInts->print(errs());
518 else
519 MF->print(errs(), Indexes);
520 }
521 errs() << "*** Bad machine code: " << msg << " ***\n"
522 << "- function: " << MF->getName() << "\n";
523}
524
525void MachineVerifier::report(const char *msg, const MachineBasicBlock *MBB) {
526 assert(MBB);
527 report(msg, MBB->getParent());
528 errs() << "- basic block: " << printMBBReference(*MBB) << ' '
529 << MBB->getName() << " (" << (const void *)MBB << ')';
530 if (Indexes)
531 errs() << " [" << Indexes->getMBBStartIdx(MBB)
532 << ';' << Indexes->getMBBEndIdx(MBB) << ')';
533 errs() << '\n';
534}
535
536void MachineVerifier::report(const char *msg, const MachineInstr *MI) {
537 assert(MI);
538 report(msg, MI->getParent());
539 errs() << "- instruction: ";
540 if (Indexes && Indexes->hasIndex(*MI))
541 errs() << Indexes->getInstructionIndex(*MI) << '\t';
542 MI->print(errs(), /*IsStandalone=*/true);
543}
544
545void MachineVerifier::report(const char *msg, const MachineOperand *MO,
546 unsigned MONum, LLT MOVRegType) {
547 assert(MO);
548 report(msg, MO->getParent());
549 errs() << "- operand " << MONum << ": ";
550 MO->print(errs(), MOVRegType, TRI);
551 errs() << "\n";
552}
553
554void MachineVerifier::report(const Twine &Msg, const MachineInstr *MI) {
555 report(Msg.str().c_str(), MI);
556}
557
558void MachineVerifier::report_context(SlotIndex Pos) const {
559 errs() << "- at: " << Pos << '\n';
560}
561
562void MachineVerifier::report_context(const LiveInterval &LI) const {
563 errs() << "- interval: " << LI << '\n';
564}
565
566void MachineVerifier::report_context(const LiveRange &LR, Register VRegUnit,
567 LaneBitmask LaneMask) const {
568 report_context_liverange(LR);
569 report_context_vreg_regunit(VRegUnit);
570 if (LaneMask.any())
571 report_context_lanemask(LaneMask);
572}
573
574void MachineVerifier::report_context(const LiveRange::Segment &S) const {
575 errs() << "- segment: " << S << '\n';
576}
577
578void MachineVerifier::report_context(const VNInfo &VNI) const {
579 errs() << "- ValNo: " << VNI.id << " (def " << VNI.def << ")\n";
580}
581
582void MachineVerifier::report_context_liverange(const LiveRange &LR) const {
583 errs() << "- liverange: " << LR << '\n';
584}
585
586void MachineVerifier::report_context(MCPhysReg PReg) const {
587 errs() << "- p. register: " << printReg(PReg, TRI) << '\n';
588}
589
590void MachineVerifier::report_context_vreg(Register VReg) const {
591 errs() << "- v. register: " << printReg(VReg, TRI) << '\n';
592}
593
594void MachineVerifier::report_context_vreg_regunit(Register VRegOrUnit) const {
595 if (VRegOrUnit.isVirtual()) {
596 report_context_vreg(VRegOrUnit);
597 } else {
598 errs() << "- regunit: " << printRegUnit(VRegOrUnit, TRI) << '\n';
599 }
600}
601
602void MachineVerifier::report_context_lanemask(LaneBitmask LaneMask) const {
603 errs() << "- lanemask: " << PrintLaneMask(LaneMask) << '\n';
604}
605
606void MachineVerifier::markReachable(const MachineBasicBlock *MBB) {
607 BBInfo &MInfo = MBBInfoMap[MBB];
608 if (!MInfo.reachable) {
609 MInfo.reachable = true;
610 for (const MachineBasicBlock *Succ : MBB->successors())
611 markReachable(Succ);
612 }
613}
614
615void MachineVerifier::visitMachineFunctionBefore() {
616 lastIndex = SlotIndex();
617 regsReserved = MRI->reservedRegsFrozen() ? MRI->getReservedRegs()
618 : TRI->getReservedRegs(*MF);
619
620 if (!MF->empty())
621 markReachable(&MF->front());
622
623 // Build a set of the basic blocks in the function.
624 FunctionBlocks.clear();
625 for (const auto &MBB : *MF) {
626 FunctionBlocks.insert(&MBB);
627 BBInfo &MInfo = MBBInfoMap[&MBB];
628
629 MInfo.Preds.insert(MBB.pred_begin(), MBB.pred_end());
630 if (MInfo.Preds.size() != MBB.pred_size())
631 report("MBB has duplicate entries in its predecessor list.", &MBB);
632
633 MInfo.Succs.insert(MBB.succ_begin(), MBB.succ_end());
634 if (MInfo.Succs.size() != MBB.succ_size())
635 report("MBB has duplicate entries in its successor list.", &MBB);
636 }
637
638 // Check that the register use lists are sane.
639 MRI->verifyUseLists();
640
641 if (!MF->empty())
642 verifyStackFrame();
643}
644
645void
646MachineVerifier::visitMachineBasicBlockBefore(const MachineBasicBlock *MBB) {
647 FirstTerminator = nullptr;
648 FirstNonPHI = nullptr;
649
650 if (!MF->getProperties().hasProperty(
651 MachineFunctionProperties::Property::NoPHIs) && MRI->tracksLiveness()) {
652 // If this block has allocatable physical registers live-in, check that
653 // it is an entry block or landing pad.
654 for (const auto &LI : MBB->liveins()) {
655 if (isAllocatable(LI.PhysReg) && !MBB->isEHPad() &&
656 MBB->getIterator() != MBB->getParent()->begin() &&
658 report("MBB has allocatable live-in, but isn't entry, landing-pad, or "
659 "inlineasm-br-indirect-target.",
660 MBB);
661 report_context(LI.PhysReg);
662 }
663 }
664 }
665
666 if (MBB->isIRBlockAddressTaken()) {
668 report("ir-block-address-taken is associated with basic block not used by "
669 "a blockaddress.",
670 MBB);
671 }
672
673 // Count the number of landing pad successors.
675 for (const auto *succ : MBB->successors()) {
676 if (succ->isEHPad())
677 LandingPadSuccs.insert(succ);
678 if (!FunctionBlocks.count(succ))
679 report("MBB has successor that isn't part of the function.", MBB);
680 if (!MBBInfoMap[succ].Preds.count(MBB)) {
681 report("Inconsistent CFG", MBB);
682 errs() << "MBB is not in the predecessor list of the successor "
683 << printMBBReference(*succ) << ".\n";
684 }
685 }
686
687 // Check the predecessor list.
688 for (const MachineBasicBlock *Pred : MBB->predecessors()) {
689 if (!FunctionBlocks.count(Pred))
690 report("MBB has predecessor that isn't part of the function.", MBB);
691 if (!MBBInfoMap[Pred].Succs.count(MBB)) {
692 report("Inconsistent CFG", MBB);
693 errs() << "MBB is not in the successor list of the predecessor "
694 << printMBBReference(*Pred) << ".\n";
695 }
696 }
697
698 const MCAsmInfo *AsmInfo = TM->getMCAsmInfo();
699 const BasicBlock *BB = MBB->getBasicBlock();
700 const Function &F = MF->getFunction();
701 if (LandingPadSuccs.size() > 1 &&
702 !(AsmInfo &&
704 BB && isa<SwitchInst>(BB->getTerminator())) &&
705 !isScopedEHPersonality(classifyEHPersonality(F.getPersonalityFn())))
706 report("MBB has more than one landing pad successor", MBB);
707
708 // Call analyzeBranch. If it succeeds, there several more conditions to check.
709 MachineBasicBlock *TBB = nullptr, *FBB = nullptr;
711 if (!TII->analyzeBranch(*const_cast<MachineBasicBlock *>(MBB), TBB, FBB,
712 Cond)) {
713 // Ok, analyzeBranch thinks it knows what's going on with this block. Let's
714 // check whether its answers match up with reality.
715 if (!TBB && !FBB) {
716 // Block falls through to its successor.
717 if (!MBB->empty() && MBB->back().isBarrier() &&
718 !TII->isPredicated(MBB->back())) {
719 report("MBB exits via unconditional fall-through but ends with a "
720 "barrier instruction!", MBB);
721 }
722 if (!Cond.empty()) {
723 report("MBB exits via unconditional fall-through but has a condition!",
724 MBB);
725 }
726 } else if (TBB && !FBB && Cond.empty()) {
727 // Block unconditionally branches somewhere.
728 if (MBB->empty()) {
729 report("MBB exits via unconditional branch but doesn't contain "
730 "any instructions!", MBB);
731 } else if (!MBB->back().isBarrier()) {
732 report("MBB exits via unconditional branch but doesn't end with a "
733 "barrier instruction!", MBB);
734 } else if (!MBB->back().isTerminator()) {
735 report("MBB exits via unconditional branch but the branch isn't a "
736 "terminator instruction!", MBB);
737 }
738 } else if (TBB && !FBB && !Cond.empty()) {
739 // Block conditionally branches somewhere, otherwise falls through.
740 if (MBB->empty()) {
741 report("MBB exits via conditional branch/fall-through but doesn't "
742 "contain any instructions!", MBB);
743 } else if (MBB->back().isBarrier()) {
744 report("MBB exits via conditional branch/fall-through but ends with a "
745 "barrier instruction!", MBB);
746 } else if (!MBB->back().isTerminator()) {
747 report("MBB exits via conditional branch/fall-through but the branch "
748 "isn't a terminator instruction!", MBB);
749 }
750 } else if (TBB && FBB) {
751 // Block conditionally branches somewhere, otherwise branches
752 // somewhere else.
753 if (MBB->empty()) {
754 report("MBB exits via conditional branch/branch but doesn't "
755 "contain any instructions!", MBB);
756 } else if (!MBB->back().isBarrier()) {
757 report("MBB exits via conditional branch/branch but doesn't end with a "
758 "barrier instruction!", MBB);
759 } else if (!MBB->back().isTerminator()) {
760 report("MBB exits via conditional branch/branch but the branch "
761 "isn't a terminator instruction!", MBB);
762 }
763 if (Cond.empty()) {
764 report("MBB exits via conditional branch/branch but there's no "
765 "condition!", MBB);
766 }
767 } else {
768 report("analyzeBranch returned invalid data!", MBB);
769 }
770
771 // Now check that the successors match up with the answers reported by
772 // analyzeBranch.
773 if (TBB && !MBB->isSuccessor(TBB))
774 report("MBB exits via jump or conditional branch, but its target isn't a "
775 "CFG successor!",
776 MBB);
777 if (FBB && !MBB->isSuccessor(FBB))
778 report("MBB exits via conditional branch, but its target isn't a CFG "
779 "successor!",
780 MBB);
781
782 // There might be a fallthrough to the next block if there's either no
783 // unconditional true branch, or if there's a condition, and one of the
784 // branches is missing.
785 bool Fallthrough = !TBB || (!Cond.empty() && !FBB);
786
787 // A conditional fallthrough must be an actual CFG successor, not
788 // unreachable. (Conversely, an unconditional fallthrough might not really
789 // be a successor, because the block might end in unreachable.)
790 if (!Cond.empty() && !FBB) {
792 if (MBBI == MF->end()) {
793 report("MBB conditionally falls through out of function!", MBB);
794 } else if (!MBB->isSuccessor(&*MBBI))
795 report("MBB exits via conditional branch/fall-through but the CFG "
796 "successors don't match the actual successors!",
797 MBB);
798 }
799
800 // Verify that there aren't any extra un-accounted-for successors.
801 for (const MachineBasicBlock *SuccMBB : MBB->successors()) {
802 // If this successor is one of the branch targets, it's okay.
803 if (SuccMBB == TBB || SuccMBB == FBB)
804 continue;
805 // If we might have a fallthrough, and the successor is the fallthrough
806 // block, that's also ok.
807 if (Fallthrough && SuccMBB == MBB->getNextNode())
808 continue;
809 // Also accept successors which are for exception-handling or might be
810 // inlineasm_br targets.
811 if (SuccMBB->isEHPad() || SuccMBB->isInlineAsmBrIndirectTarget())
812 continue;
813 report("MBB has unexpected successors which are not branch targets, "
814 "fallthrough, EHPads, or inlineasm_br targets.",
815 MBB);
816 }
817 }
818
819 regsLive.clear();
820 if (MRI->tracksLiveness()) {
821 for (const auto &LI : MBB->liveins()) {
822 if (!Register::isPhysicalRegister(LI.PhysReg)) {
823 report("MBB live-in list contains non-physical register", MBB);
824 continue;
825 }
826 for (const MCPhysReg &SubReg : TRI->subregs_inclusive(LI.PhysReg))
827 regsLive.insert(SubReg);
828 }
829 }
830
831 const MachineFrameInfo &MFI = MF->getFrameInfo();
832 BitVector PR = MFI.getPristineRegs(*MF);
833 for (unsigned I : PR.set_bits()) {
834 for (const MCPhysReg &SubReg : TRI->subregs_inclusive(I))
835 regsLive.insert(SubReg);
836 }
837
838 regsKilled.clear();
839 regsDefined.clear();
840
841 if (Indexes)
842 lastIndex = Indexes->getMBBStartIdx(MBB);
843}
844
845// This function gets called for all bundle headers, including normal
846// stand-alone unbundled instructions.
847void MachineVerifier::visitMachineBundleBefore(const MachineInstr *MI) {
848 if (Indexes && Indexes->hasIndex(*MI)) {
849 SlotIndex idx = Indexes->getInstructionIndex(*MI);
850 if (!(idx > lastIndex)) {
851 report("Instruction index out of order", MI);
852 errs() << "Last instruction was at " << lastIndex << '\n';
853 }
854 lastIndex = idx;
855 }
856
857 // Ensure non-terminators don't follow terminators.
858 if (MI->isTerminator()) {
859 if (!FirstTerminator)
860 FirstTerminator = MI;
861 } else if (FirstTerminator) {
862 // For GlobalISel, G_INVOKE_REGION_START is a terminator that we allow to
863 // precede non-terminators.
864 if (FirstTerminator->getOpcode() != TargetOpcode::G_INVOKE_REGION_START) {
865 report("Non-terminator instruction after the first terminator", MI);
866 errs() << "First terminator was:\t" << *FirstTerminator;
867 }
868 }
869}
870
871// The operands on an INLINEASM instruction must follow a template.
872// Verify that the flag operands make sense.
873void MachineVerifier::verifyInlineAsm(const MachineInstr *MI) {
874 // The first two operands on INLINEASM are the asm string and global flags.
875 if (MI->getNumOperands() < 2) {
876 report("Too few operands on inline asm", MI);
877 return;
878 }
879 if (!MI->getOperand(0).isSymbol())
880 report("Asm string must be an external symbol", MI);
881 if (!MI->getOperand(1).isImm())
882 report("Asm flags must be an immediate", MI);
883 // Allowed flags are Extra_HasSideEffects = 1, Extra_IsAlignStack = 2,
884 // Extra_AsmDialect = 4, Extra_MayLoad = 8, and Extra_MayStore = 16,
885 // and Extra_IsConvergent = 32.
886 if (!isUInt<6>(MI->getOperand(1).getImm()))
887 report("Unknown asm flags", &MI->getOperand(1), 1);
888
889 static_assert(InlineAsm::MIOp_FirstOperand == 2, "Asm format changed");
890
891 unsigned OpNo = InlineAsm::MIOp_FirstOperand;
892 unsigned NumOps;
893 for (unsigned e = MI->getNumOperands(); OpNo < e; OpNo += NumOps) {
894 const MachineOperand &MO = MI->getOperand(OpNo);
895 // There may be implicit ops after the fixed operands.
896 if (!MO.isImm())
897 break;
898 const InlineAsm::Flag F(MO.getImm());
899 NumOps = 1 + F.getNumOperandRegisters();
900 }
901
902 if (OpNo > MI->getNumOperands())
903 report("Missing operands in last group", MI);
904
905 // An optional MDNode follows the groups.
906 if (OpNo < MI->getNumOperands() && MI->getOperand(OpNo).isMetadata())
907 ++OpNo;
908
909 // All trailing operands must be implicit registers.
910 for (unsigned e = MI->getNumOperands(); OpNo < e; ++OpNo) {
911 const MachineOperand &MO = MI->getOperand(OpNo);
912 if (!MO.isReg() || !MO.isImplicit())
913 report("Expected implicit register after groups", &MO, OpNo);
914 }
915
916 if (MI->getOpcode() == TargetOpcode::INLINEASM_BR) {
917 const MachineBasicBlock *MBB = MI->getParent();
918
919 for (unsigned i = InlineAsm::MIOp_FirstOperand, e = MI->getNumOperands();
920 i != e; ++i) {
921 const MachineOperand &MO = MI->getOperand(i);
922
923 if (!MO.isMBB())
924 continue;
925
926 // Check the successor & predecessor lists look ok, assume they are
927 // not. Find the indirect target without going through the successors.
928 const MachineBasicBlock *IndirectTargetMBB = MO.getMBB();
929 if (!IndirectTargetMBB) {
930 report("INLINEASM_BR indirect target does not exist", &MO, i);
931 break;
932 }
933
934 if (!MBB->isSuccessor(IndirectTargetMBB))
935 report("INLINEASM_BR indirect target missing from successor list", &MO,
936 i);
937
938 if (!IndirectTargetMBB->isPredecessor(MBB))
939 report("INLINEASM_BR indirect target predecessor list missing parent",
940 &MO, i);
941 }
942 }
943}
944
945bool MachineVerifier::verifyAllRegOpsScalar(const MachineInstr &MI,
946 const MachineRegisterInfo &MRI) {
947 if (none_of(MI.explicit_operands(), [&MRI](const MachineOperand &Op) {
948 if (!Op.isReg())
949 return false;
950 const auto Reg = Op.getReg();
951 if (Reg.isPhysical())
952 return false;
953 return !MRI.getType(Reg).isScalar();
954 }))
955 return true;
956 report("All register operands must have scalar types", &MI);
957 return false;
958}
959
960/// Check that types are consistent when two operands need to have the same
961/// number of vector elements.
962/// \return true if the types are valid.
963bool MachineVerifier::verifyVectorElementMatch(LLT Ty0, LLT Ty1,
964 const MachineInstr *MI) {
965 if (Ty0.isVector() != Ty1.isVector()) {
966 report("operand types must be all-vector or all-scalar", MI);
967 // Generally we try to report as many issues as possible at once, but in
968 // this case it's not clear what should we be comparing the size of the
969 // scalar with: the size of the whole vector or its lane. Instead of
970 // making an arbitrary choice and emitting not so helpful message, let's
971 // avoid the extra noise and stop here.
972 return false;
973 }
974
975 if (Ty0.isVector() && Ty0.getElementCount() != Ty1.getElementCount()) {
976 report("operand types must preserve number of vector elements", MI);
977 return false;
978 }
979
980 return true;
981}
982
983bool MachineVerifier::verifyGIntrinsicSideEffects(const MachineInstr *MI) {
984 auto Opcode = MI->getOpcode();
985 bool NoSideEffects = Opcode == TargetOpcode::G_INTRINSIC ||
986 Opcode == TargetOpcode::G_INTRINSIC_CONVERGENT;
987 unsigned IntrID = cast<GIntrinsic>(MI)->getIntrinsicID();
988 if (IntrID != 0 && IntrID < Intrinsic::num_intrinsics) {
990 MF->getFunction().getContext(), static_cast<Intrinsic::ID>(IntrID));
991 bool DeclHasSideEffects = !Attrs.getMemoryEffects().doesNotAccessMemory();
992 if (NoSideEffects && DeclHasSideEffects) {
993 report(Twine(TII->getName(Opcode),
994 " used with intrinsic that accesses memory"),
995 MI);
996 return false;
997 }
998 if (!NoSideEffects && !DeclHasSideEffects) {
999 report(Twine(TII->getName(Opcode), " used with readnone intrinsic"), MI);
1000 return false;
1001 }
1002 }
1003
1004 return true;
1005}
1006
1007bool MachineVerifier::verifyGIntrinsicConvergence(const MachineInstr *MI) {
1008 auto Opcode = MI->getOpcode();
1009 bool NotConvergent = Opcode == TargetOpcode::G_INTRINSIC ||
1010 Opcode == TargetOpcode::G_INTRINSIC_W_SIDE_EFFECTS;
1011 unsigned IntrID = cast<GIntrinsic>(MI)->getIntrinsicID();
1012 if (IntrID != 0 && IntrID < Intrinsic::num_intrinsics) {
1014 MF->getFunction().getContext(), static_cast<Intrinsic::ID>(IntrID));
1015 bool DeclIsConvergent = Attrs.hasFnAttr(Attribute::Convergent);
1016 if (NotConvergent && DeclIsConvergent) {
1017 report(Twine(TII->getName(Opcode), " used with a convergent intrinsic"),
1018 MI);
1019 return false;
1020 }
1021 if (!NotConvergent && !DeclIsConvergent) {
1022 report(
1023 Twine(TII->getName(Opcode), " used with a non-convergent intrinsic"),
1024 MI);
1025 return false;
1026 }
1027 }
1028
1029 return true;
1030}
1031
1032void MachineVerifier::verifyPreISelGenericInstruction(const MachineInstr *MI) {
1033 if (isFunctionSelected)
1034 report("Unexpected generic instruction in a Selected function", MI);
1035
1036 const MCInstrDesc &MCID = MI->getDesc();
1037 unsigned NumOps = MI->getNumOperands();
1038
1039 // Branches must reference a basic block if they are not indirect
1040 if (MI->isBranch() && !MI->isIndirectBranch()) {
1041 bool HasMBB = false;
1042 for (const MachineOperand &Op : MI->operands()) {
1043 if (Op.isMBB()) {
1044 HasMBB = true;
1045 break;
1046 }
1047 }
1048
1049 if (!HasMBB) {
1050 report("Branch instruction is missing a basic block operand or "
1051 "isIndirectBranch property",
1052 MI);
1053 }
1054 }
1055
1056 // Check types.
1058 for (unsigned I = 0, E = std::min(MCID.getNumOperands(), NumOps);
1059 I != E; ++I) {
1060 if (!MCID.operands()[I].isGenericType())
1061 continue;
1062 // Generic instructions specify type equality constraints between some of
1063 // their operands. Make sure these are consistent.
1064 size_t TypeIdx = MCID.operands()[I].getGenericTypeIndex();
1065 Types.resize(std::max(TypeIdx + 1, Types.size()));
1066
1067 const MachineOperand *MO = &MI->getOperand(I);
1068 if (!MO->isReg()) {
1069 report("generic instruction must use register operands", MI);
1070 continue;
1071 }
1072
1073 LLT OpTy = MRI->getType(MO->getReg());
1074 // Don't report a type mismatch if there is no actual mismatch, only a
1075 // type missing, to reduce noise:
1076 if (OpTy.isValid()) {
1077 // Only the first valid type for a type index will be printed: don't
1078 // overwrite it later so it's always clear which type was expected:
1079 if (!Types[TypeIdx].isValid())
1080 Types[TypeIdx] = OpTy;
1081 else if (Types[TypeIdx] != OpTy)
1082 report("Type mismatch in generic instruction", MO, I, OpTy);
1083 } else {
1084 // Generic instructions must have types attached to their operands.
1085 report("Generic instruction is missing a virtual register type", MO, I);
1086 }
1087 }
1088
1089 // Generic opcodes must not have physical register operands.
1090 for (unsigned I = 0; I < MI->getNumOperands(); ++I) {
1091 const MachineOperand *MO = &MI->getOperand(I);
1092 if (MO->isReg() && MO->getReg().isPhysical())
1093 report("Generic instruction cannot have physical register", MO, I);
1094 }
1095
1096 // Avoid out of bounds in checks below. This was already reported earlier.
1097 if (MI->getNumOperands() < MCID.getNumOperands())
1098 return;
1099
1101 if (!TII->verifyInstruction(*MI, ErrorInfo))
1102 report(ErrorInfo.data(), MI);
1103
1104 // Verify properties of various specific instruction types
1105 unsigned Opc = MI->getOpcode();
1106 switch (Opc) {
1107 case TargetOpcode::G_ASSERT_SEXT:
1108 case TargetOpcode::G_ASSERT_ZEXT: {
1109 std::string OpcName =
1110 Opc == TargetOpcode::G_ASSERT_ZEXT ? "G_ASSERT_ZEXT" : "G_ASSERT_SEXT";
1111 if (!MI->getOperand(2).isImm()) {
1112 report(Twine(OpcName, " expects an immediate operand #2"), MI);
1113 break;
1114 }
1115
1116 Register Dst = MI->getOperand(0).getReg();
1117 Register Src = MI->getOperand(1).getReg();
1118 LLT SrcTy = MRI->getType(Src);
1119 int64_t Imm = MI->getOperand(2).getImm();
1120 if (Imm <= 0) {
1121 report(Twine(OpcName, " size must be >= 1"), MI);
1122 break;
1123 }
1124
1125 if (Imm >= SrcTy.getScalarSizeInBits()) {
1126 report(Twine(OpcName, " size must be less than source bit width"), MI);
1127 break;
1128 }
1129
1130 const RegisterBank *SrcRB = RBI->getRegBank(Src, *MRI, *TRI);
1131 const RegisterBank *DstRB = RBI->getRegBank(Dst, *MRI, *TRI);
1132
1133 // Allow only the source bank to be set.
1134 if ((SrcRB && DstRB && SrcRB != DstRB) || (DstRB && !SrcRB)) {
1135 report(Twine(OpcName, " cannot change register bank"), MI);
1136 break;
1137 }
1138
1139 // Don't allow a class change. Do allow member class->regbank.
1140 const TargetRegisterClass *DstRC = MRI->getRegClassOrNull(Dst);
1141 if (DstRC && DstRC != MRI->getRegClassOrNull(Src)) {
1142 report(
1143 Twine(OpcName, " source and destination register classes must match"),
1144 MI);
1145 break;
1146 }
1147
1148 break;
1149 }
1150
1151 case TargetOpcode::G_CONSTANT:
1152 case TargetOpcode::G_FCONSTANT: {
1153 LLT DstTy = MRI->getType(MI->getOperand(0).getReg());
1154 if (DstTy.isVector())
1155 report("Instruction cannot use a vector result type", MI);
1156
1157 if (MI->getOpcode() == TargetOpcode::G_CONSTANT) {
1158 if (!MI->getOperand(1).isCImm()) {
1159 report("G_CONSTANT operand must be cimm", MI);
1160 break;
1161 }
1162
1163 const ConstantInt *CI = MI->getOperand(1).getCImm();
1164 if (CI->getBitWidth() != DstTy.getSizeInBits())
1165 report("inconsistent constant size", MI);
1166 } else {
1167 if (!MI->getOperand(1).isFPImm()) {
1168 report("G_FCONSTANT operand must be fpimm", MI);
1169 break;
1170 }
1171 const ConstantFP *CF = MI->getOperand(1).getFPImm();
1172
1174 DstTy.getSizeInBits()) {
1175 report("inconsistent constant size", MI);
1176 }
1177 }
1178
1179 break;
1180 }
1181 case TargetOpcode::G_LOAD:
1182 case TargetOpcode::G_STORE:
1183 case TargetOpcode::G_ZEXTLOAD:
1184 case TargetOpcode::G_SEXTLOAD: {
1185 LLT ValTy = MRI->getType(MI->getOperand(0).getReg());
1186 LLT PtrTy = MRI->getType(MI->getOperand(1).getReg());
1187 if (!PtrTy.isPointer())
1188 report("Generic memory instruction must access a pointer", MI);
1189
1190 // Generic loads and stores must have a single MachineMemOperand
1191 // describing that access.
1192 if (!MI->hasOneMemOperand()) {
1193 report("Generic instruction accessing memory must have one mem operand",
1194 MI);
1195 } else {
1196 const MachineMemOperand &MMO = **MI->memoperands_begin();
1197 if (MI->getOpcode() == TargetOpcode::G_ZEXTLOAD ||
1198 MI->getOpcode() == TargetOpcode::G_SEXTLOAD) {
1200 ValTy.getSizeInBits()))
1201 report("Generic extload must have a narrower memory type", MI);
1202 } else if (MI->getOpcode() == TargetOpcode::G_LOAD) {
1204 ValTy.getSizeInBytes()))
1205 report("load memory size cannot exceed result size", MI);
1206 } else if (MI->getOpcode() == TargetOpcode::G_STORE) {
1208 MMO.getSize().getValue()))
1209 report("store memory size cannot exceed value size", MI);
1210 }
1211
1212 const AtomicOrdering Order = MMO.getSuccessOrdering();
1213 if (Opc == TargetOpcode::G_STORE) {
1214 if (Order == AtomicOrdering::Acquire ||
1216 report("atomic store cannot use acquire ordering", MI);
1217
1218 } else {
1219 if (Order == AtomicOrdering::Release ||
1221 report("atomic load cannot use release ordering", MI);
1222 }
1223 }
1224
1225 break;
1226 }
1227 case TargetOpcode::G_PHI: {
1228 LLT DstTy = MRI->getType(MI->getOperand(0).getReg());
1229 if (!DstTy.isValid() || !all_of(drop_begin(MI->operands()),
1230 [this, &DstTy](const MachineOperand &MO) {
1231 if (!MO.isReg())
1232 return true;
1233 LLT Ty = MRI->getType(MO.getReg());
1234 if (!Ty.isValid() || (Ty != DstTy))
1235 return false;
1236 return true;
1237 }))
1238 report("Generic Instruction G_PHI has operands with incompatible/missing "
1239 "types",
1240 MI);
1241 break;
1242 }
1243 case TargetOpcode::G_BITCAST: {
1244 LLT DstTy = MRI->getType(MI->getOperand(0).getReg());
1245 LLT SrcTy = MRI->getType(MI->getOperand(1).getReg());
1246 if (!DstTy.isValid() || !SrcTy.isValid())
1247 break;
1248
1249 if (SrcTy.isPointer() != DstTy.isPointer())
1250 report("bitcast cannot convert between pointers and other types", MI);
1251
1252 if (SrcTy.getSizeInBits() != DstTy.getSizeInBits())
1253 report("bitcast sizes must match", MI);
1254
1255 if (SrcTy == DstTy)
1256 report("bitcast must change the type", MI);
1257
1258 break;
1259 }
1260 case TargetOpcode::G_INTTOPTR:
1261 case TargetOpcode::G_PTRTOINT:
1262 case TargetOpcode::G_ADDRSPACE_CAST: {
1263 LLT DstTy = MRI->getType(MI->getOperand(0).getReg());
1264 LLT SrcTy = MRI->getType(MI->getOperand(1).getReg());
1265 if (!DstTy.isValid() || !SrcTy.isValid())
1266 break;
1267
1268 verifyVectorElementMatch(DstTy, SrcTy, MI);
1269
1270 DstTy = DstTy.getScalarType();
1271 SrcTy = SrcTy.getScalarType();
1272
1273 if (MI->getOpcode() == TargetOpcode::G_INTTOPTR) {
1274 if (!DstTy.isPointer())
1275 report("inttoptr result type must be a pointer", MI);
1276 if (SrcTy.isPointer())
1277 report("inttoptr source type must not be a pointer", MI);
1278 } else if (MI->getOpcode() == TargetOpcode::G_PTRTOINT) {
1279 if (!SrcTy.isPointer())
1280 report("ptrtoint source type must be a pointer", MI);
1281 if (DstTy.isPointer())
1282 report("ptrtoint result type must not be a pointer", MI);
1283 } else {
1284 assert(MI->getOpcode() == TargetOpcode::G_ADDRSPACE_CAST);
1285 if (!SrcTy.isPointer() || !DstTy.isPointer())
1286 report("addrspacecast types must be pointers", MI);
1287 else {
1288 if (SrcTy.getAddressSpace() == DstTy.getAddressSpace())
1289 report("addrspacecast must convert different address spaces", MI);
1290 }
1291 }
1292
1293 break;
1294 }
1295 case TargetOpcode::G_PTR_ADD: {
1296 LLT DstTy = MRI->getType(MI->getOperand(0).getReg());
1297 LLT PtrTy = MRI->getType(MI->getOperand(1).getReg());
1298 LLT OffsetTy = MRI->getType(MI->getOperand(2).getReg());
1299 if (!DstTy.isValid() || !PtrTy.isValid() || !OffsetTy.isValid())
1300 break;
1301
1302 if (!PtrTy.isPointerOrPointerVector())
1303 report("gep first operand must be a pointer", MI);
1304
1305 if (OffsetTy.isPointerOrPointerVector())
1306 report("gep offset operand must not be a pointer", MI);
1307
1308 if (PtrTy.isPointerOrPointerVector()) {
1309 const DataLayout &DL = MF->getDataLayout();
1310 unsigned AS = PtrTy.getAddressSpace();
1311 unsigned IndexSizeInBits = DL.getIndexSize(AS) * 8;
1312 if (OffsetTy.getScalarSizeInBits() != IndexSizeInBits) {
1313 report("gep offset operand must match index size for address space",
1314 MI);
1315 }
1316 }
1317
1318 // TODO: Is the offset allowed to be a scalar with a vector?
1319 break;
1320 }
1321 case TargetOpcode::G_PTRMASK: {
1322 LLT DstTy = MRI->getType(MI->getOperand(0).getReg());
1323 LLT SrcTy = MRI->getType(MI->getOperand(1).getReg());
1324 LLT MaskTy = MRI->getType(MI->getOperand(2).getReg());
1325 if (!DstTy.isValid() || !SrcTy.isValid() || !MaskTy.isValid())
1326 break;
1327
1328 if (!DstTy.isPointerOrPointerVector())
1329 report("ptrmask result type must be a pointer", MI);
1330
1331 if (!MaskTy.getScalarType().isScalar())
1332 report("ptrmask mask type must be an integer", MI);
1333
1334 verifyVectorElementMatch(DstTy, MaskTy, MI);
1335 break;
1336 }
1337 case TargetOpcode::G_SEXT:
1338 case TargetOpcode::G_ZEXT:
1339 case TargetOpcode::G_ANYEXT:
1340 case TargetOpcode::G_TRUNC:
1341 case TargetOpcode::G_FPEXT:
1342 case TargetOpcode::G_FPTRUNC: {
1343 // Number of operands and presense of types is already checked (and
1344 // reported in case of any issues), so no need to report them again. As
1345 // we're trying to report as many issues as possible at once, however, the
1346 // instructions aren't guaranteed to have the right number of operands or
1347 // types attached to them at this point
1348 assert(MCID.getNumOperands() == 2 && "Expected 2 operands G_*{EXT,TRUNC}");
1349 LLT DstTy = MRI->getType(MI->getOperand(0).getReg());
1350 LLT SrcTy = MRI->getType(MI->getOperand(1).getReg());
1351 if (!DstTy.isValid() || !SrcTy.isValid())
1352 break;
1353
1355 report("Generic extend/truncate can not operate on pointers", MI);
1356
1357 verifyVectorElementMatch(DstTy, SrcTy, MI);
1358
1359 unsigned DstSize = DstTy.getScalarSizeInBits();
1360 unsigned SrcSize = SrcTy.getScalarSizeInBits();
1361 switch (MI->getOpcode()) {
1362 default:
1363 if (DstSize <= SrcSize)
1364 report("Generic extend has destination type no larger than source", MI);
1365 break;
1366 case TargetOpcode::G_TRUNC:
1367 case TargetOpcode::G_FPTRUNC:
1368 if (DstSize >= SrcSize)
1369 report("Generic truncate has destination type no smaller than source",
1370 MI);
1371 break;
1372 }
1373 break;
1374 }
1375 case TargetOpcode::G_SELECT: {
1376 LLT SelTy = MRI->getType(MI->getOperand(0).getReg());
1377 LLT CondTy = MRI->getType(MI->getOperand(1).getReg());
1378 if (!SelTy.isValid() || !CondTy.isValid())
1379 break;
1380
1381 // Scalar condition select on a vector is valid.
1382 if (CondTy.isVector())
1383 verifyVectorElementMatch(SelTy, CondTy, MI);
1384 break;
1385 }
1386 case TargetOpcode::G_MERGE_VALUES: {
1387 // G_MERGE_VALUES should only be used to merge scalars into a larger scalar,
1388 // e.g. s2N = MERGE sN, sN
1389 // Merging multiple scalars into a vector is not allowed, should use
1390 // G_BUILD_VECTOR for that.
1391 LLT DstTy = MRI->getType(MI->getOperand(0).getReg());
1392 LLT SrcTy = MRI->getType(MI->getOperand(1).getReg());
1393 if (DstTy.isVector() || SrcTy.isVector())
1394 report("G_MERGE_VALUES cannot operate on vectors", MI);
1395
1396 const unsigned NumOps = MI->getNumOperands();
1397 if (DstTy.getSizeInBits() != SrcTy.getSizeInBits() * (NumOps - 1))
1398 report("G_MERGE_VALUES result size is inconsistent", MI);
1399
1400 for (unsigned I = 2; I != NumOps; ++I) {
1401 if (MRI->getType(MI->getOperand(I).getReg()) != SrcTy)
1402 report("G_MERGE_VALUES source types do not match", MI);
1403 }
1404
1405 break;
1406 }
1407 case TargetOpcode::G_UNMERGE_VALUES: {
1408 unsigned NumDsts = MI->getNumOperands() - 1;
1409 LLT DstTy = MRI->getType(MI->getOperand(0).getReg());
1410 for (unsigned i = 1; i < NumDsts; ++i) {
1411 if (MRI->getType(MI->getOperand(i).getReg()) != DstTy) {
1412 report("G_UNMERGE_VALUES destination types do not match", MI);
1413 break;
1414 }
1415 }
1416
1417 LLT SrcTy = MRI->getType(MI->getOperand(NumDsts).getReg());
1418 if (DstTy.isVector()) {
1419 // This case is the converse of G_CONCAT_VECTORS.
1420 if (!SrcTy.isVector() || SrcTy.getScalarType() != DstTy.getScalarType() ||
1421 SrcTy.isScalableVector() != DstTy.isScalableVector() ||
1422 SrcTy.getSizeInBits() != NumDsts * DstTy.getSizeInBits())
1423 report("G_UNMERGE_VALUES source operand does not match vector "
1424 "destination operands",
1425 MI);
1426 } else if (SrcTy.isVector()) {
1427 // This case is the converse of G_BUILD_VECTOR, but relaxed to allow
1428 // mismatched types as long as the total size matches:
1429 // %0:_(s64), %1:_(s64) = G_UNMERGE_VALUES %2:_(<4 x s32>)
1430 if (SrcTy.getSizeInBits() != NumDsts * DstTy.getSizeInBits())
1431 report("G_UNMERGE_VALUES vector source operand does not match scalar "
1432 "destination operands",
1433 MI);
1434 } else {
1435 // This case is the converse of G_MERGE_VALUES.
1436 if (SrcTy.getSizeInBits() != NumDsts * DstTy.getSizeInBits()) {
1437 report("G_UNMERGE_VALUES scalar source operand does not match scalar "
1438 "destination operands",
1439 MI);
1440 }
1441 }
1442 break;
1443 }
1444 case TargetOpcode::G_BUILD_VECTOR: {
1445 // Source types must be scalars, dest type a vector. Total size of scalars
1446 // must match the dest vector size.
1447 LLT DstTy = MRI->getType(MI->getOperand(0).getReg());
1448 LLT SrcEltTy = MRI->getType(MI->getOperand(1).getReg());
1449 if (!DstTy.isVector() || SrcEltTy.isVector()) {
1450 report("G_BUILD_VECTOR must produce a vector from scalar operands", MI);
1451 break;
1452 }
1453
1454 if (DstTy.getElementType() != SrcEltTy)
1455 report("G_BUILD_VECTOR result element type must match source type", MI);
1456
1457 if (DstTy.getNumElements() != MI->getNumOperands() - 1)
1458 report("G_BUILD_VECTOR must have an operand for each elemement", MI);
1459
1460 for (const MachineOperand &MO : llvm::drop_begin(MI->operands(), 2))
1461 if (MRI->getType(MI->getOperand(1).getReg()) != MRI->getType(MO.getReg()))
1462 report("G_BUILD_VECTOR source operand types are not homogeneous", MI);
1463
1464 break;
1465 }
1466 case TargetOpcode::G_BUILD_VECTOR_TRUNC: {
1467 // Source types must be scalars, dest type a vector. Scalar types must be
1468 // larger than the dest vector elt type, as this is a truncating operation.
1469 LLT DstTy = MRI->getType(MI->getOperand(0).getReg());
1470 LLT SrcEltTy = MRI->getType(MI->getOperand(1).getReg());
1471 if (!DstTy.isVector() || SrcEltTy.isVector())
1472 report("G_BUILD_VECTOR_TRUNC must produce a vector from scalar operands",
1473 MI);
1474 for (const MachineOperand &MO : llvm::drop_begin(MI->operands(), 2))
1475 if (MRI->getType(MI->getOperand(1).getReg()) != MRI->getType(MO.getReg()))
1476 report("G_BUILD_VECTOR_TRUNC source operand types are not homogeneous",
1477 MI);
1478 if (SrcEltTy.getSizeInBits() <= DstTy.getElementType().getSizeInBits())
1479 report("G_BUILD_VECTOR_TRUNC source operand types are not larger than "
1480 "dest elt type",
1481 MI);
1482 break;
1483 }
1484 case TargetOpcode::G_CONCAT_VECTORS: {
1485 // Source types should be vectors, and total size should match the dest
1486 // vector size.
1487 LLT DstTy = MRI->getType(MI->getOperand(0).getReg());
1488 LLT SrcTy = MRI->getType(MI->getOperand(1).getReg());
1489 if (!DstTy.isVector() || !SrcTy.isVector())
1490 report("G_CONCAT_VECTOR requires vector source and destination operands",
1491 MI);
1492
1493 if (MI->getNumOperands() < 3)
1494 report("G_CONCAT_VECTOR requires at least 2 source operands", MI);
1495
1496 for (const MachineOperand &MO : llvm::drop_begin(MI->operands(), 2))
1497 if (MRI->getType(MI->getOperand(1).getReg()) != MRI->getType(MO.getReg()))
1498 report("G_CONCAT_VECTOR source operand types are not homogeneous", MI);
1499 if (DstTy.getElementCount() !=
1500 SrcTy.getElementCount() * (MI->getNumOperands() - 1))
1501 report("G_CONCAT_VECTOR num dest and source elements should match", MI);
1502 break;
1503 }
1504 case TargetOpcode::G_ICMP:
1505 case TargetOpcode::G_FCMP: {
1506 LLT DstTy = MRI->getType(MI->getOperand(0).getReg());
1507 LLT SrcTy = MRI->getType(MI->getOperand(2).getReg());
1508
1509 if ((DstTy.isVector() != SrcTy.isVector()) ||
1510 (DstTy.isVector() &&
1511 DstTy.getElementCount() != SrcTy.getElementCount()))
1512 report("Generic vector icmp/fcmp must preserve number of lanes", MI);
1513
1514 break;
1515 }
1516 case TargetOpcode::G_EXTRACT: {
1517 const MachineOperand &SrcOp = MI->getOperand(1);
1518 if (!SrcOp.isReg()) {
1519 report("extract source must be a register", MI);
1520 break;
1521 }
1522
1523 const MachineOperand &OffsetOp = MI->getOperand(2);
1524 if (!OffsetOp.isImm()) {
1525 report("extract offset must be a constant", MI);
1526 break;
1527 }
1528
1529 unsigned DstSize = MRI->getType(MI->getOperand(0).getReg()).getSizeInBits();
1530 unsigned SrcSize = MRI->getType(SrcOp.getReg()).getSizeInBits();
1531 if (SrcSize == DstSize)
1532 report("extract source must be larger than result", MI);
1533
1534 if (DstSize + OffsetOp.getImm() > SrcSize)
1535 report("extract reads past end of register", MI);
1536 break;
1537 }
1538 case TargetOpcode::G_INSERT: {
1539 const MachineOperand &SrcOp = MI->getOperand(2);
1540 if (!SrcOp.isReg()) {
1541 report("insert source must be a register", MI);
1542 break;
1543 }
1544
1545 const MachineOperand &OffsetOp = MI->getOperand(3);
1546 if (!OffsetOp.isImm()) {
1547 report("insert offset must be a constant", MI);
1548 break;
1549 }
1550
1551 unsigned DstSize = MRI->getType(MI->getOperand(0).getReg()).getSizeInBits();
1552 unsigned SrcSize = MRI->getType(SrcOp.getReg()).getSizeInBits();
1553
1554 if (DstSize <= SrcSize)
1555 report("inserted size must be smaller than total register", MI);
1556
1557 if (SrcSize + OffsetOp.getImm() > DstSize)
1558 report("insert writes past end of register", MI);
1559
1560 break;
1561 }
1562 case TargetOpcode::G_JUMP_TABLE: {
1563 if (!MI->getOperand(1).isJTI())
1564 report("G_JUMP_TABLE source operand must be a jump table index", MI);
1565 LLT DstTy = MRI->getType(MI->getOperand(0).getReg());
1566 if (!DstTy.isPointer())
1567 report("G_JUMP_TABLE dest operand must have a pointer type", MI);
1568 break;
1569 }
1570 case TargetOpcode::G_BRJT: {
1571 if (!MRI->getType(MI->getOperand(0).getReg()).isPointer())
1572 report("G_BRJT src operand 0 must be a pointer type", MI);
1573
1574 if (!MI->getOperand(1).isJTI())
1575 report("G_BRJT src operand 1 must be a jump table index", MI);
1576
1577 const auto &IdxOp = MI->getOperand(2);
1578 if (!IdxOp.isReg() || MRI->getType(IdxOp.getReg()).isPointer())
1579 report("G_BRJT src operand 2 must be a scalar reg type", MI);
1580 break;
1581 }
1582 case TargetOpcode::G_INTRINSIC:
1583 case TargetOpcode::G_INTRINSIC_W_SIDE_EFFECTS:
1584 case TargetOpcode::G_INTRINSIC_CONVERGENT:
1585 case TargetOpcode::G_INTRINSIC_CONVERGENT_W_SIDE_EFFECTS: {
1586 // TODO: Should verify number of def and use operands, but the current
1587 // interface requires passing in IR types for mangling.
1588 const MachineOperand &IntrIDOp = MI->getOperand(MI->getNumExplicitDefs());
1589 if (!IntrIDOp.isIntrinsicID()) {
1590 report("G_INTRINSIC first src operand must be an intrinsic ID", MI);
1591 break;
1592 }
1593
1594 if (!verifyGIntrinsicSideEffects(MI))
1595 break;
1596 if (!verifyGIntrinsicConvergence(MI))
1597 break;
1598
1599 break;
1600 }
1601 case TargetOpcode::G_SEXT_INREG: {
1602 if (!MI->getOperand(2).isImm()) {
1603 report("G_SEXT_INREG expects an immediate operand #2", MI);
1604 break;
1605 }
1606
1607 LLT SrcTy = MRI->getType(MI->getOperand(1).getReg());
1608 int64_t Imm = MI->getOperand(2).getImm();
1609 if (Imm <= 0)
1610 report("G_SEXT_INREG size must be >= 1", MI);
1611 if (Imm >= SrcTy.getScalarSizeInBits())
1612 report("G_SEXT_INREG size must be less than source bit width", MI);
1613 break;
1614 }
1615 case TargetOpcode::G_BSWAP: {
1616 LLT DstTy = MRI->getType(MI->getOperand(0).getReg());
1617 if (DstTy.getScalarSizeInBits() % 16 != 0)
1618 report("G_BSWAP size must be a multiple of 16 bits", MI);
1619 break;
1620 }
1621 case TargetOpcode::G_VSCALE: {
1622 if (!MI->getOperand(1).isCImm()) {
1623 report("G_VSCALE operand must be cimm", MI);
1624 break;
1625 }
1626 if (MI->getOperand(1).getCImm()->isZero()) {
1627 report("G_VSCALE immediate cannot be zero", MI);
1628 break;
1629 }
1630 break;
1631 }
1632 case TargetOpcode::G_INSERT_SUBVECTOR: {
1633 const MachineOperand &Src0Op = MI->getOperand(1);
1634 if (!Src0Op.isReg()) {
1635 report("G_INSERT_SUBVECTOR first source must be a register", MI);
1636 break;
1637 }
1638
1639 const MachineOperand &Src1Op = MI->getOperand(2);
1640 if (!Src1Op.isReg()) {
1641 report("G_INSERT_SUBVECTOR second source must be a register", MI);
1642 break;
1643 }
1644
1645 const MachineOperand &IndexOp = MI->getOperand(3);
1646 if (!IndexOp.isImm()) {
1647 report("G_INSERT_SUBVECTOR index must be an immediate", MI);
1648 break;
1649 }
1650
1651 LLT DstTy = MRI->getType(MI->getOperand(0).getReg());
1652 LLT Src0Ty = MRI->getType(Src0Op.getReg());
1653 LLT Src1Ty = MRI->getType(Src1Op.getReg());
1654
1655 if (!DstTy.isVector()) {
1656 report("Destination type must be a vector", MI);
1657 break;
1658 }
1659
1660 if (!Src0Ty.isVector()) {
1661 report("First source must be a vector", MI);
1662 break;
1663 }
1664
1665 if (!Src1Ty.isVector()) {
1666 report("Second source must be a vector", MI);
1667 break;
1668 }
1669
1670 if (DstTy != Src0Ty) {
1671 report("Destination type must match the first source vector type", MI);
1672 break;
1673 }
1674
1675 if (Src0Ty.getElementType() != Src1Ty.getElementType()) {
1676 report("Element type of source vectors must be the same", MI);
1677 break;
1678 }
1679
1680 if (IndexOp.getImm() != 0 &&
1681 Src1Ty.getElementCount().getKnownMinValue() % IndexOp.getImm() != 0) {
1682 report("Index must be a multiple of the second source vector's "
1683 "minimum vector length",
1684 MI);
1685 break;
1686 }
1687 break;
1688 }
1689 case TargetOpcode::G_EXTRACT_SUBVECTOR: {
1690 const MachineOperand &SrcOp = MI->getOperand(1);
1691 if (!SrcOp.isReg()) {
1692 report("G_EXTRACT_SUBVECTOR first source must be a register", MI);
1693 break;
1694 }
1695
1696 const MachineOperand &IndexOp = MI->getOperand(2);
1697 if (!IndexOp.isImm()) {
1698 report("G_EXTRACT_SUBVECTOR index must be an immediate", MI);
1699 break;
1700 }
1701
1702 LLT DstTy = MRI->getType(MI->getOperand(0).getReg());
1703 LLT SrcTy = MRI->getType(SrcOp.getReg());
1704
1705 if (!DstTy.isVector()) {
1706 report("Destination type must be a vector", MI);
1707 break;
1708 }
1709
1710 if (!SrcTy.isVector()) {
1711 report("First source must be a vector", MI);
1712 break;
1713 }
1714
1715 if (DstTy.getElementType() != SrcTy.getElementType()) {
1716 report("Element type of vectors must be the same", MI);
1717 break;
1718 }
1719
1720 if (IndexOp.getImm() != 0 &&
1721 SrcTy.getElementCount().getKnownMinValue() % IndexOp.getImm() != 0) {
1722 report("Index must be a multiple of the source vector's minimum vector "
1723 "length",
1724 MI);
1725 break;
1726 }
1727
1728 break;
1729 }
1730 case TargetOpcode::G_SHUFFLE_VECTOR: {
1731 const MachineOperand &MaskOp = MI->getOperand(3);
1732 if (!MaskOp.isShuffleMask()) {
1733 report("Incorrect mask operand type for G_SHUFFLE_VECTOR", MI);
1734 break;
1735 }
1736
1737 LLT DstTy = MRI->getType(MI->getOperand(0).getReg());
1738 LLT Src0Ty = MRI->getType(MI->getOperand(1).getReg());
1739 LLT Src1Ty = MRI->getType(MI->getOperand(2).getReg());
1740
1741 if (Src0Ty != Src1Ty)
1742 report("Source operands must be the same type", MI);
1743
1744 if (Src0Ty.getScalarType() != DstTy.getScalarType())
1745 report("G_SHUFFLE_VECTOR cannot change element type", MI);
1746
1747 // Don't check that all operands are vector because scalars are used in
1748 // place of 1 element vectors.
1749 int SrcNumElts = Src0Ty.isVector() ? Src0Ty.getNumElements() : 1;
1750 int DstNumElts = DstTy.isVector() ? DstTy.getNumElements() : 1;
1751
1752 ArrayRef<int> MaskIdxes = MaskOp.getShuffleMask();
1753
1754 if (static_cast<int>(MaskIdxes.size()) != DstNumElts)
1755 report("Wrong result type for shufflemask", MI);
1756
1757 for (int Idx : MaskIdxes) {
1758 if (Idx < 0)
1759 continue;
1760
1761 if (Idx >= 2 * SrcNumElts)
1762 report("Out of bounds shuffle index", MI);
1763 }
1764
1765 break;
1766 }
1767
1768 case TargetOpcode::G_SPLAT_VECTOR: {
1769 LLT DstTy = MRI->getType(MI->getOperand(0).getReg());
1770 LLT SrcTy = MRI->getType(MI->getOperand(1).getReg());
1771
1772 if (!DstTy.isScalableVector()) {
1773 report("Destination type must be a scalable vector", MI);
1774 break;
1775 }
1776
1777 if (!SrcTy.isScalar()) {
1778 report("Source type must be a scalar", MI);
1779 break;
1780 }
1781
1783 SrcTy.getSizeInBits())) {
1784 report("Element type of the destination must be the same size or smaller "
1785 "than the source type",
1786 MI);
1787 break;
1788 }
1789
1790 break;
1791 }
1792 case TargetOpcode::G_EXTRACT_VECTOR_ELT: {
1793 LLT DstTy = MRI->getType(MI->getOperand(0).getReg());
1794 LLT SrcTy = MRI->getType(MI->getOperand(1).getReg());
1795 LLT IdxTy = MRI->getType(MI->getOperand(2).getReg());
1796
1797 if (!DstTy.isScalar() && !DstTy.isPointer()) {
1798 report("Destination type must be a scalar or pointer", MI);
1799 break;
1800 }
1801
1802 if (!SrcTy.isVector()) {
1803 report("First source must be a vector", MI);
1804 break;
1805 }
1806
1807 auto TLI = MF->getSubtarget().getTargetLowering();
1808 if (IdxTy.getSizeInBits() !=
1809 TLI->getVectorIdxTy(MF->getDataLayout()).getFixedSizeInBits()) {
1810 report("Index type must match VectorIdxTy", MI);
1811 break;
1812 }
1813
1814 break;
1815 }
1816 case TargetOpcode::G_INSERT_VECTOR_ELT: {
1817 LLT DstTy = MRI->getType(MI->getOperand(0).getReg());
1818 LLT VecTy = MRI->getType(MI->getOperand(1).getReg());
1819 LLT ScaTy = MRI->getType(MI->getOperand(2).getReg());
1820 LLT IdxTy = MRI->getType(MI->getOperand(3).getReg());
1821
1822 if (!DstTy.isVector()) {
1823 report("Destination type must be a vector", MI);
1824 break;
1825 }
1826
1827 if (VecTy != DstTy) {
1828 report("Destination type and vector type must match", MI);
1829 break;
1830 }
1831
1832 if (!ScaTy.isScalar() && !ScaTy.isPointer()) {
1833 report("Inserted element must be a scalar or pointer", MI);
1834 break;
1835 }
1836
1837 auto TLI = MF->getSubtarget().getTargetLowering();
1838 if (IdxTy.getSizeInBits() !=
1839 TLI->getVectorIdxTy(MF->getDataLayout()).getFixedSizeInBits()) {
1840 report("Index type must match VectorIdxTy", MI);
1841 break;
1842 }
1843
1844 break;
1845 }
1846 case TargetOpcode::G_DYN_STACKALLOC: {
1847 const MachineOperand &DstOp = MI->getOperand(0);
1848 const MachineOperand &AllocOp = MI->getOperand(1);
1849 const MachineOperand &AlignOp = MI->getOperand(2);
1850
1851 if (!DstOp.isReg() || !MRI->getType(DstOp.getReg()).isPointer()) {
1852 report("dst operand 0 must be a pointer type", MI);
1853 break;
1854 }
1855
1856 if (!AllocOp.isReg() || !MRI->getType(AllocOp.getReg()).isScalar()) {
1857 report("src operand 1 must be a scalar reg type", MI);
1858 break;
1859 }
1860
1861 if (!AlignOp.isImm()) {
1862 report("src operand 2 must be an immediate type", MI);
1863 break;
1864 }
1865 break;
1866 }
1867 case TargetOpcode::G_MEMCPY_INLINE:
1868 case TargetOpcode::G_MEMCPY:
1869 case TargetOpcode::G_MEMMOVE: {
1870 ArrayRef<MachineMemOperand *> MMOs = MI->memoperands();
1871 if (MMOs.size() != 2) {
1872 report("memcpy/memmove must have 2 memory operands", MI);
1873 break;
1874 }
1875
1876 if ((!MMOs[0]->isStore() || MMOs[0]->isLoad()) ||
1877 (MMOs[1]->isStore() || !MMOs[1]->isLoad())) {
1878 report("wrong memory operand types", MI);
1879 break;
1880 }
1881
1882 if (MMOs[0]->getSize() != MMOs[1]->getSize())
1883 report("inconsistent memory operand sizes", MI);
1884
1885 LLT DstPtrTy = MRI->getType(MI->getOperand(0).getReg());
1886 LLT SrcPtrTy = MRI->getType(MI->getOperand(1).getReg());
1887
1888 if (!DstPtrTy.isPointer() || !SrcPtrTy.isPointer()) {
1889 report("memory instruction operand must be a pointer", MI);
1890 break;
1891 }
1892
1893 if (DstPtrTy.getAddressSpace() != MMOs[0]->getAddrSpace())
1894 report("inconsistent store address space", MI);
1895 if (SrcPtrTy.getAddressSpace() != MMOs[1]->getAddrSpace())
1896 report("inconsistent load address space", MI);
1897
1898 if (Opc != TargetOpcode::G_MEMCPY_INLINE)
1899 if (!MI->getOperand(3).isImm() || (MI->getOperand(3).getImm() & ~1LL))
1900 report("'tail' flag (operand 3) must be an immediate 0 or 1", MI);
1901
1902 break;
1903 }
1904 case TargetOpcode::G_BZERO:
1905 case TargetOpcode::G_MEMSET: {
1906 ArrayRef<MachineMemOperand *> MMOs = MI->memoperands();
1907 std::string Name = Opc == TargetOpcode::G_MEMSET ? "memset" : "bzero";
1908 if (MMOs.size() != 1) {
1909 report(Twine(Name, " must have 1 memory operand"), MI);
1910 break;
1911 }
1912
1913 if ((!MMOs[0]->isStore() || MMOs[0]->isLoad())) {
1914 report(Twine(Name, " memory operand must be a store"), MI);
1915 break;
1916 }
1917
1918 LLT DstPtrTy = MRI->getType(MI->getOperand(0).getReg());
1919 if (!DstPtrTy.isPointer()) {
1920 report(Twine(Name, " operand must be a pointer"), MI);
1921 break;
1922 }
1923
1924 if (DstPtrTy.getAddressSpace() != MMOs[0]->getAddrSpace())
1925 report("inconsistent " + Twine(Name, " address space"), MI);
1926
1927 if (!MI->getOperand(MI->getNumOperands() - 1).isImm() ||
1928 (MI->getOperand(MI->getNumOperands() - 1).getImm() & ~1LL))
1929 report("'tail' flag (last operand) must be an immediate 0 or 1", MI);
1930
1931 break;
1932 }
1933 case TargetOpcode::G_UBSANTRAP: {
1934 const MachineOperand &KindOp = MI->getOperand(0);
1935 if (!MI->getOperand(0).isImm()) {
1936 report("Crash kind must be an immediate", &KindOp, 0);
1937 break;
1938 }
1939 int64_t Kind = MI->getOperand(0).getImm();
1940 if (!isInt<8>(Kind))
1941 report("Crash kind must be 8 bit wide", &KindOp, 0);
1942 break;
1943 }
1944 case TargetOpcode::G_VECREDUCE_SEQ_FADD:
1945 case TargetOpcode::G_VECREDUCE_SEQ_FMUL: {
1946 LLT DstTy = MRI->getType(MI->getOperand(0).getReg());
1947 LLT Src1Ty = MRI->getType(MI->getOperand(1).getReg());
1948 LLT Src2Ty = MRI->getType(MI->getOperand(2).getReg());
1949 if (!DstTy.isScalar())
1950 report("Vector reduction requires a scalar destination type", MI);
1951 if (!Src1Ty.isScalar())
1952 report("Sequential FADD/FMUL vector reduction requires a scalar 1st operand", MI);
1953 if (!Src2Ty.isVector())
1954 report("Sequential FADD/FMUL vector reduction must have a vector 2nd operand", MI);
1955 break;
1956 }
1957 case TargetOpcode::G_VECREDUCE_FADD:
1958 case TargetOpcode::G_VECREDUCE_FMUL:
1959 case TargetOpcode::G_VECREDUCE_FMAX:
1960 case TargetOpcode::G_VECREDUCE_FMIN:
1961 case TargetOpcode::G_VECREDUCE_FMAXIMUM:
1962 case TargetOpcode::G_VECREDUCE_FMINIMUM:
1963 case TargetOpcode::G_VECREDUCE_ADD:
1964 case TargetOpcode::G_VECREDUCE_MUL:
1965 case TargetOpcode::G_VECREDUCE_AND:
1966 case TargetOpcode::G_VECREDUCE_OR:
1967 case TargetOpcode::G_VECREDUCE_XOR:
1968 case TargetOpcode::G_VECREDUCE_SMAX:
1969 case TargetOpcode::G_VECREDUCE_SMIN:
1970 case TargetOpcode::G_VECREDUCE_UMAX:
1971 case TargetOpcode::G_VECREDUCE_UMIN: {
1972 LLT DstTy = MRI->getType(MI->getOperand(0).getReg());
1973 if (!DstTy.isScalar())
1974 report("Vector reduction requires a scalar destination type", MI);
1975 break;
1976 }
1977
1978 case TargetOpcode::G_SBFX:
1979 case TargetOpcode::G_UBFX: {
1980 LLT DstTy = MRI->getType(MI->getOperand(0).getReg());
1981 if (DstTy.isVector()) {
1982 report("Bitfield extraction is not supported on vectors", MI);
1983 break;
1984 }
1985 break;
1986 }
1987 case TargetOpcode::G_SHL:
1988 case TargetOpcode::G_LSHR:
1989 case TargetOpcode::G_ASHR:
1990 case TargetOpcode::G_ROTR:
1991 case TargetOpcode::G_ROTL: {
1992 LLT Src1Ty = MRI->getType(MI->getOperand(1).getReg());
1993 LLT Src2Ty = MRI->getType(MI->getOperand(2).getReg());
1994 if (Src1Ty.isVector() != Src2Ty.isVector()) {
1995 report("Shifts and rotates require operands to be either all scalars or "
1996 "all vectors",
1997 MI);
1998 break;
1999 }
2000 break;
2001 }
2002 case TargetOpcode::G_LLROUND:
2003 case TargetOpcode::G_LROUND: {
2004 verifyAllRegOpsScalar(*MI, *MRI);
2005 break;
2006 }
2007 case TargetOpcode::G_IS_FPCLASS: {
2008 LLT DestTy = MRI->getType(MI->getOperand(0).getReg());
2009 LLT DestEltTy = DestTy.getScalarType();
2010 if (!DestEltTy.isScalar()) {
2011 report("Destination must be a scalar or vector of scalars", MI);
2012 break;
2013 }
2014 LLT SrcTy = MRI->getType(MI->getOperand(1).getReg());
2015 LLT SrcEltTy = SrcTy.getScalarType();
2016 if (!SrcEltTy.isScalar()) {
2017 report("Source must be a scalar or vector of scalars", MI);
2018 break;
2019 }
2020 if (!verifyVectorElementMatch(DestTy, SrcTy, MI))
2021 break;
2022 const MachineOperand &TestMO = MI->getOperand(2);
2023 if (!TestMO.isImm()) {
2024 report("floating-point class set (operand 2) must be an immediate", MI);
2025 break;
2026 }
2027 int64_t Test = TestMO.getImm();
2028 if (Test < 0 || Test > fcAllFlags) {
2029 report("Incorrect floating-point class set (operand 2)", MI);
2030 break;
2031 }
2032 break;
2033 }
2034 case TargetOpcode::G_PREFETCH: {
2035 const MachineOperand &AddrOp = MI->getOperand(0);
2036 if (!AddrOp.isReg() || !MRI->getType(AddrOp.getReg()).isPointer()) {
2037 report("addr operand must be a pointer", &AddrOp, 0);
2038 break;
2039 }
2040 const MachineOperand &RWOp = MI->getOperand(1);
2041 if (!RWOp.isImm() || (uint64_t)RWOp.getImm() >= 2) {
2042 report("rw operand must be an immediate 0-1", &RWOp, 1);
2043 break;
2044 }
2045 const MachineOperand &LocalityOp = MI->getOperand(2);
2046 if (!LocalityOp.isImm() || (uint64_t)LocalityOp.getImm() >= 4) {
2047 report("locality operand must be an immediate 0-3", &LocalityOp, 2);
2048 break;
2049 }
2050 const MachineOperand &CacheTypeOp = MI->getOperand(3);
2051 if (!CacheTypeOp.isImm() || (uint64_t)CacheTypeOp.getImm() >= 2) {
2052 report("cache type operand must be an immediate 0-1", &CacheTypeOp, 3);
2053 break;
2054 }
2055 break;
2056 }
2057 case TargetOpcode::G_ASSERT_ALIGN: {
2058 if (MI->getOperand(2).getImm() < 1)
2059 report("alignment immediate must be >= 1", MI);
2060 break;
2061 }
2062 case TargetOpcode::G_CONSTANT_POOL: {
2063 if (!MI->getOperand(1).isCPI())
2064 report("Src operand 1 must be a constant pool index", MI);
2065 if (!MRI->getType(MI->getOperand(0).getReg()).isPointer())
2066 report("Dst operand 0 must be a pointer", MI);
2067 break;
2068 }
2069 case TargetOpcode::G_PTRAUTH_GLOBAL_VALUE: {
2070 const MachineOperand &AddrOp = MI->getOperand(1);
2071 if (!AddrOp.isReg() || !MRI->getType(AddrOp.getReg()).isPointer())
2072 report("addr operand must be a pointer", &AddrOp, 1);
2073 break;
2074 }
2075 default:
2076 break;
2077 }
2078}
2079
2080void MachineVerifier::visitMachineInstrBefore(const MachineInstr *MI) {
2081 const MCInstrDesc &MCID = MI->getDesc();
2082 if (MI->getNumOperands() < MCID.getNumOperands()) {
2083 report("Too few operands", MI);
2084 errs() << MCID.getNumOperands() << " operands expected, but "
2085 << MI->getNumOperands() << " given.\n";
2086 }
2087
2088 if (MI->getFlag(MachineInstr::NoConvergent) && !MCID.isConvergent())
2089 report("NoConvergent flag expected only on convergent instructions.", MI);
2090
2091 if (MI->isPHI()) {
2092 if (MF->getProperties().hasProperty(
2094 report("Found PHI instruction with NoPHIs property set", MI);
2095
2096 if (FirstNonPHI)
2097 report("Found PHI instruction after non-PHI", MI);
2098 } else if (FirstNonPHI == nullptr)
2099 FirstNonPHI = MI;
2100
2101 // Check the tied operands.
2102 if (MI->isInlineAsm())
2103 verifyInlineAsm(MI);
2104
2105 // Check that unspillable terminators define a reg and have at most one use.
2106 if (TII->isUnspillableTerminator(MI)) {
2107 if (!MI->getOperand(0).isReg() || !MI->getOperand(0).isDef())
2108 report("Unspillable Terminator does not define a reg", MI);
2109 Register Def = MI->getOperand(0).getReg();
2110 if (Def.isVirtual() &&
2111 !MF->getProperties().hasProperty(
2113 std::distance(MRI->use_nodbg_begin(Def), MRI->use_nodbg_end()) > 1)
2114 report("Unspillable Terminator expected to have at most one use!", MI);
2115 }
2116
2117 // A fully-formed DBG_VALUE must have a location. Ignore partially formed
2118 // DBG_VALUEs: these are convenient to use in tests, but should never get
2119 // generated.
2120 if (MI->isDebugValue() && MI->getNumOperands() == 4)
2121 if (!MI->getDebugLoc())
2122 report("Missing DebugLoc for debug instruction", MI);
2123
2124 // Meta instructions should never be the subject of debug value tracking,
2125 // they don't create a value in the output program at all.
2126 if (MI->isMetaInstruction() && MI->peekDebugInstrNum())
2127 report("Metadata instruction should not have a value tracking number", MI);
2128
2129 // Check the MachineMemOperands for basic consistency.
2130 for (MachineMemOperand *Op : MI->memoperands()) {
2131 if (Op->isLoad() && !MI->mayLoad())
2132 report("Missing mayLoad flag", MI);
2133 if (Op->isStore() && !MI->mayStore())
2134 report("Missing mayStore flag", MI);
2135 }
2136
2137 // Debug values must not have a slot index.
2138 // Other instructions must have one, unless they are inside a bundle.
2139 if (LiveInts) {
2140 bool mapped = !LiveInts->isNotInMIMap(*MI);
2141 if (MI->isDebugOrPseudoInstr()) {
2142 if (mapped)
2143 report("Debug instruction has a slot index", MI);
2144 } else if (MI->isInsideBundle()) {
2145 if (mapped)
2146 report("Instruction inside bundle has a slot index", MI);
2147 } else {
2148 if (!mapped)
2149 report("Missing slot index", MI);
2150 }
2151 }
2152
2153 unsigned Opc = MCID.getOpcode();
2155 verifyPreISelGenericInstruction(MI);
2156 return;
2157 }
2158
2160 if (!TII->verifyInstruction(*MI, ErrorInfo))
2161 report(ErrorInfo.data(), MI);
2162
2163 // Verify properties of various specific instruction types
2164 switch (MI->getOpcode()) {
2165 case TargetOpcode::COPY: {
2166 const MachineOperand &DstOp = MI->getOperand(0);
2167 const MachineOperand &SrcOp = MI->getOperand(1);
2168 const Register SrcReg = SrcOp.getReg();
2169 const Register DstReg = DstOp.getReg();
2170
2171 LLT DstTy = MRI->getType(DstReg);
2172 LLT SrcTy = MRI->getType(SrcReg);
2173 if (SrcTy.isValid() && DstTy.isValid()) {
2174 // If both types are valid, check that the types are the same.
2175 if (SrcTy != DstTy) {
2176 report("Copy Instruction is illegal with mismatching types", MI);
2177 errs() << "Def = " << DstTy << ", Src = " << SrcTy << "\n";
2178 }
2179
2180 break;
2181 }
2182
2183 if (!SrcTy.isValid() && !DstTy.isValid())
2184 break;
2185
2186 // If we have only one valid type, this is likely a copy between a virtual
2187 // and physical register.
2188 TypeSize SrcSize = TRI->getRegSizeInBits(SrcReg, *MRI);
2189 TypeSize DstSize = TRI->getRegSizeInBits(DstReg, *MRI);
2190 if (SrcReg.isPhysical() && DstTy.isValid()) {
2191 const TargetRegisterClass *SrcRC =
2192 TRI->getMinimalPhysRegClassLLT(SrcReg, DstTy);
2193 if (SrcRC)
2194 SrcSize = TRI->getRegSizeInBits(*SrcRC);
2195 }
2196
2197 if (DstReg.isPhysical() && SrcTy.isValid()) {
2198 const TargetRegisterClass *DstRC =
2199 TRI->getMinimalPhysRegClassLLT(DstReg, SrcTy);
2200 if (DstRC)
2201 DstSize = TRI->getRegSizeInBits(*DstRC);
2202 }
2203
2204 // The next two checks allow COPY between physical and virtual registers,
2205 // when the virtual register has a scalable size and the physical register
2206 // has a fixed size. These checks allow COPY between *potentialy* mismatched
2207 // sizes. However, once RegisterBankSelection occurs, MachineVerifier should
2208 // be able to resolve a fixed size for the scalable vector, and at that
2209 // point this function will know for sure whether the sizes are mismatched
2210 // and correctly report a size mismatch.
2211 if (SrcReg.isPhysical() && DstReg.isVirtual() && DstSize.isScalable() &&
2212 !SrcSize.isScalable())
2213 break;
2214 if (SrcReg.isVirtual() && DstReg.isPhysical() && SrcSize.isScalable() &&
2215 !DstSize.isScalable())
2216 break;
2217
2218 if (SrcSize.isNonZero() && DstSize.isNonZero() && SrcSize != DstSize) {
2219 if (!DstOp.getSubReg() && !SrcOp.getSubReg()) {
2220 report("Copy Instruction is illegal with mismatching sizes", MI);
2221 errs() << "Def Size = " << DstSize << ", Src Size = " << SrcSize
2222 << "\n";
2223 }
2224 }
2225 break;
2226 }
2227 case TargetOpcode::STATEPOINT: {
2228 StatepointOpers SO(MI);
2229 if (!MI->getOperand(SO.getIDPos()).isImm() ||
2230 !MI->getOperand(SO.getNBytesPos()).isImm() ||
2231 !MI->getOperand(SO.getNCallArgsPos()).isImm()) {
2232 report("meta operands to STATEPOINT not constant!", MI);
2233 break;
2234 }
2235
2236 auto VerifyStackMapConstant = [&](unsigned Offset) {
2237 if (Offset >= MI->getNumOperands()) {
2238 report("stack map constant to STATEPOINT is out of range!", MI);
2239 return;
2240 }
2241 if (!MI->getOperand(Offset - 1).isImm() ||
2242 MI->getOperand(Offset - 1).getImm() != StackMaps::ConstantOp ||
2243 !MI->getOperand(Offset).isImm())
2244 report("stack map constant to STATEPOINT not well formed!", MI);
2245 };
2246 VerifyStackMapConstant(SO.getCCIdx());
2247 VerifyStackMapConstant(SO.getFlagsIdx());
2248 VerifyStackMapConstant(SO.getNumDeoptArgsIdx());
2249 VerifyStackMapConstant(SO.getNumGCPtrIdx());
2250 VerifyStackMapConstant(SO.getNumAllocaIdx());
2251 VerifyStackMapConstant(SO.getNumGcMapEntriesIdx());
2252
2253 // Verify that all explicit statepoint defs are tied to gc operands as
2254 // they are expected to be a relocation of gc operands.
2255 unsigned FirstGCPtrIdx = SO.getFirstGCPtrIdx();
2256 unsigned LastGCPtrIdx = SO.getNumAllocaIdx() - 2;
2257 for (unsigned Idx = 0; Idx < MI->getNumDefs(); Idx++) {
2258 unsigned UseOpIdx;
2259 if (!MI->isRegTiedToUseOperand(Idx, &UseOpIdx)) {
2260 report("STATEPOINT defs expected to be tied", MI);
2261 break;
2262 }
2263 if (UseOpIdx < FirstGCPtrIdx || UseOpIdx > LastGCPtrIdx) {
2264 report("STATEPOINT def tied to non-gc operand", MI);
2265 break;
2266 }
2267 }
2268
2269 // TODO: verify we have properly encoded deopt arguments
2270 } break;
2271 case TargetOpcode::INSERT_SUBREG: {
2272 unsigned InsertedSize;
2273 if (unsigned SubIdx = MI->getOperand(2).getSubReg())
2274 InsertedSize = TRI->getSubRegIdxSize(SubIdx);
2275 else
2276 InsertedSize = TRI->getRegSizeInBits(MI->getOperand(2).getReg(), *MRI);
2277 unsigned SubRegSize = TRI->getSubRegIdxSize(MI->getOperand(3).getImm());
2278 if (SubRegSize < InsertedSize) {
2279 report("INSERT_SUBREG expected inserted value to have equal or lesser "
2280 "size than the subreg it was inserted into", MI);
2281 break;
2282 }
2283 } break;
2284 case TargetOpcode::REG_SEQUENCE: {
2285 unsigned NumOps = MI->getNumOperands();
2286 if (!(NumOps & 1)) {
2287 report("Invalid number of operands for REG_SEQUENCE", MI);
2288 break;
2289 }
2290
2291 for (unsigned I = 1; I != NumOps; I += 2) {
2292 const MachineOperand &RegOp = MI->getOperand(I);
2293 const MachineOperand &SubRegOp = MI->getOperand(I + 1);
2294
2295 if (!RegOp.isReg())
2296 report("Invalid register operand for REG_SEQUENCE", &RegOp, I);
2297
2298 if (!SubRegOp.isImm() || SubRegOp.getImm() == 0 ||
2299 SubRegOp.getImm() >= TRI->getNumSubRegIndices()) {
2300 report("Invalid subregister index operand for REG_SEQUENCE",
2301 &SubRegOp, I + 1);
2302 }
2303 }
2304
2305 Register DstReg = MI->getOperand(0).getReg();
2306 if (DstReg.isPhysical())
2307 report("REG_SEQUENCE does not support physical register results", MI);
2308
2309 if (MI->getOperand(0).getSubReg())
2310 report("Invalid subreg result for REG_SEQUENCE", MI);
2311
2312 break;
2313 }
2314 }
2315}
2316
2317void
2318MachineVerifier::visitMachineOperand(const MachineOperand *MO, unsigned MONum) {
2319 const MachineInstr *MI = MO->getParent();
2320 const MCInstrDesc &MCID = MI->getDesc();
2321 unsigned NumDefs = MCID.getNumDefs();
2322 if (MCID.getOpcode() == TargetOpcode::PATCHPOINT)
2323 NumDefs = (MONum == 0 && MO->isReg()) ? NumDefs : 0;
2324
2325 // The first MCID.NumDefs operands must be explicit register defines
2326 if (MONum < NumDefs) {
2327 const MCOperandInfo &MCOI = MCID.operands()[MONum];
2328 if (!MO->isReg())
2329 report("Explicit definition must be a register", MO, MONum);
2330 else if (!MO->isDef() && !MCOI.isOptionalDef())
2331 report("Explicit definition marked as use", MO, MONum);
2332 else if (MO->isImplicit())
2333 report("Explicit definition marked as implicit", MO, MONum);
2334 } else if (MONum < MCID.getNumOperands()) {
2335 const MCOperandInfo &MCOI = MCID.operands()[MONum];
2336 // Don't check if it's the last operand in a variadic instruction. See,
2337 // e.g., LDM_RET in the arm back end. Check non-variadic operands only.
2338 bool IsOptional = MI->isVariadic() && MONum == MCID.getNumOperands() - 1;
2339 if (!IsOptional) {
2340 if (MO->isReg()) {
2341 if (MO->isDef() && !MCOI.isOptionalDef() && !MCID.variadicOpsAreDefs())
2342 report("Explicit operand marked as def", MO, MONum);
2343 if (MO->isImplicit())
2344 report("Explicit operand marked as implicit", MO, MONum);
2345 }
2346
2347 // Check that an instruction has register operands only as expected.
2348 if (MCOI.OperandType == MCOI::OPERAND_REGISTER &&
2349 !MO->isReg() && !MO->isFI())
2350 report("Expected a register operand.", MO, MONum);
2351 if (MO->isReg()) {
2354 !TII->isPCRelRegisterOperandLegal(*MO)))
2355 report("Expected a non-register operand.", MO, MONum);
2356 }
2357 }
2358
2359 int TiedTo = MCID.getOperandConstraint(MONum, MCOI::TIED_TO);
2360 if (TiedTo != -1) {
2361 if (!MO->isReg())
2362 report("Tied use must be a register", MO, MONum);
2363 else if (!MO->isTied())
2364 report("Operand should be tied", MO, MONum);
2365 else if (unsigned(TiedTo) != MI->findTiedOperandIdx(MONum))
2366 report("Tied def doesn't match MCInstrDesc", MO, MONum);
2367 else if (MO->getReg().isPhysical()) {
2368 const MachineOperand &MOTied = MI->getOperand(TiedTo);
2369 if (!MOTied.isReg())
2370 report("Tied counterpart must be a register", &MOTied, TiedTo);
2371 else if (MOTied.getReg().isPhysical() &&
2372 MO->getReg() != MOTied.getReg())
2373 report("Tied physical registers must match.", &MOTied, TiedTo);
2374 }
2375 } else if (MO->isReg() && MO->isTied())
2376 report("Explicit operand should not be tied", MO, MONum);
2377 } else if (!MI->isVariadic()) {
2378 // ARM adds %reg0 operands to indicate predicates. We'll allow that.
2379 if (!MO->isValidExcessOperand())
2380 report("Extra explicit operand on non-variadic instruction", MO, MONum);
2381 }
2382
2383 switch (MO->getType()) {
2385 // Verify debug flag on debug instructions. Check this first because reg0
2386 // indicates an undefined debug value.
2387 if (MI->isDebugInstr() && MO->isUse()) {
2388 if (!MO->isDebug())
2389 report("Register operand must be marked debug", MO, MONum);
2390 } else if (MO->isDebug()) {
2391 report("Register operand must not be marked debug", MO, MONum);
2392 }
2393
2394 const Register Reg = MO->getReg();
2395 if (!Reg)
2396 return;
2397 if (MRI->tracksLiveness() && !MI->isDebugInstr())
2398 checkLiveness(MO, MONum);
2399
2400 if (MO->isDef() && MO->isUndef() && !MO->getSubReg() &&
2401 MO->getReg().isVirtual()) // TODO: Apply to physregs too
2402 report("Undef virtual register def operands require a subregister", MO, MONum);
2403
2404 // Verify the consistency of tied operands.
2405 if (MO->isTied()) {
2406 unsigned OtherIdx = MI->findTiedOperandIdx(MONum);
2407 const MachineOperand &OtherMO = MI->getOperand(OtherIdx);
2408 if (!OtherMO.isReg())
2409 report("Must be tied to a register", MO, MONum);
2410 if (!OtherMO.isTied())
2411 report("Missing tie flags on tied operand", MO, MONum);
2412 if (MI->findTiedOperandIdx(OtherIdx) != MONum)
2413 report("Inconsistent tie links", MO, MONum);
2414 if (MONum < MCID.getNumDefs()) {
2415 if (OtherIdx < MCID.getNumOperands()) {
2416 if (-1 == MCID.getOperandConstraint(OtherIdx, MCOI::TIED_TO))
2417 report("Explicit def tied to explicit use without tie constraint",
2418 MO, MONum);
2419 } else {
2420 if (!OtherMO.isImplicit())
2421 report("Explicit def should be tied to implicit use", MO, MONum);
2422 }
2423 }
2424 }
2425
2426 // Verify two-address constraints after the twoaddressinstruction pass.
2427 // Both twoaddressinstruction pass and phi-node-elimination pass call
2428 // MRI->leaveSSA() to set MF as not IsSSA, we should do the verification
2429 // after twoaddressinstruction pass not after phi-node-elimination pass. So
2430 // we shouldn't use the IsSSA as the condition, we should based on
2431 // TiedOpsRewritten property to verify two-address constraints, this
2432 // property will be set in twoaddressinstruction pass.
2433 unsigned DefIdx;
2434 if (MF->getProperties().hasProperty(
2436 MO->isUse() && MI->isRegTiedToDefOperand(MONum, &DefIdx) &&
2437 Reg != MI->getOperand(DefIdx).getReg())
2438 report("Two-address instruction operands must be identical", MO, MONum);
2439
2440 // Check register classes.
2441 unsigned SubIdx = MO->getSubReg();
2442
2443 if (Reg.isPhysical()) {
2444 if (SubIdx) {
2445 report("Illegal subregister index for physical register", MO, MONum);
2446 return;
2447 }
2448 if (MONum < MCID.getNumOperands()) {
2449 if (const TargetRegisterClass *DRC =
2450 TII->getRegClass(MCID, MONum, TRI, *MF)) {
2451 if (!DRC->contains(Reg)) {
2452 report("Illegal physical register for instruction", MO, MONum);
2453 errs() << printReg(Reg, TRI) << " is not a "
2454 << TRI->getRegClassName(DRC) << " register.\n";
2455 }
2456 }
2457 }
2458 if (MO->isRenamable()) {
2459 if (MRI->isReserved(Reg)) {
2460 report("isRenamable set on reserved register", MO, MONum);
2461 return;
2462 }
2463 }
2464 } else {
2465 // Virtual register.
2466 const TargetRegisterClass *RC = MRI->getRegClassOrNull(Reg);
2467 if (!RC) {
2468 // This is a generic virtual register.
2469
2470 // Do not allow undef uses for generic virtual registers. This ensures
2471 // getVRegDef can never fail and return null on a generic register.
2472 //
2473 // FIXME: This restriction should probably be broadened to all SSA
2474 // MIR. However, DetectDeadLanes/ProcessImplicitDefs technically still
2475 // run on the SSA function just before phi elimination.
2476 if (MO->isUndef())
2477 report("Generic virtual register use cannot be undef", MO, MONum);
2478
2479 // Debug value instruction is permitted to use undefined vregs.
2480 // This is a performance measure to skip the overhead of immediately
2481 // pruning unused debug operands. The final undef substitution occurs
2482 // when debug values are allocated in LDVImpl::handleDebugValue, so
2483 // these verifications always apply after this pass.
2484 if (isFunctionTracksDebugUserValues || !MO->isUse() ||
2485 !MI->isDebugValue() || !MRI->def_empty(Reg)) {
2486 // If we're post-Select, we can't have gvregs anymore.
2487 if (isFunctionSelected) {
2488 report("Generic virtual register invalid in a Selected function",
2489 MO, MONum);
2490 return;
2491 }
2492
2493 // The gvreg must have a type and it must not have a SubIdx.
2494 LLT Ty = MRI->getType(Reg);
2495 if (!Ty.isValid()) {
2496 report("Generic virtual register must have a valid type", MO,
2497 MONum);
2498 return;
2499 }
2500
2501 const RegisterBank *RegBank = MRI->getRegBankOrNull(Reg);
2502 const RegisterBankInfo *RBI = MF->getSubtarget().getRegBankInfo();
2503
2504 // If we're post-RegBankSelect, the gvreg must have a bank.
2505 if (!RegBank && isFunctionRegBankSelected) {
2506 report("Generic virtual register must have a bank in a "
2507 "RegBankSelected function",
2508 MO, MONum);
2509 return;
2510 }
2511
2512 // Make sure the register fits into its register bank if any.
2513 if (RegBank && Ty.isValid() && !Ty.isScalableVector() &&
2514 RBI->getMaximumSize(RegBank->getID()) < Ty.getSizeInBits()) {
2515 report("Register bank is too small for virtual register", MO,
2516 MONum);
2517 errs() << "Register bank " << RegBank->getName() << " too small("
2518 << RBI->getMaximumSize(RegBank->getID()) << ") to fit "
2519 << Ty.getSizeInBits() << "-bits\n";
2520 return;
2521 }
2522 }
2523
2524 if (SubIdx) {
2525 report("Generic virtual register does not allow subregister index", MO,
2526 MONum);
2527 return;
2528 }
2529
2530 // If this is a target specific instruction and this operand
2531 // has register class constraint, the virtual register must
2532 // comply to it.
2533 if (!isPreISelGenericOpcode(MCID.getOpcode()) &&
2534 MONum < MCID.getNumOperands() &&
2535 TII->getRegClass(MCID, MONum, TRI, *MF)) {
2536 report("Virtual register does not match instruction constraint", MO,
2537 MONum);
2538 errs() << "Expect register class "
2539 << TRI->getRegClassName(
2540 TII->getRegClass(MCID, MONum, TRI, *MF))
2541 << " but got nothing\n";
2542 return;
2543 }
2544
2545 break;
2546 }
2547 if (SubIdx) {
2548 const TargetRegisterClass *SRC =
2549 TRI->getSubClassWithSubReg(RC, SubIdx);
2550 if (!SRC) {
2551 report("Invalid subregister index for virtual register", MO, MONum);
2552 errs() << "Register class " << TRI->getRegClassName(RC)
2553 << " does not support subreg index " << SubIdx << "\n";
2554 return;
2555 }
2556 if (RC != SRC) {
2557 report("Invalid register class for subregister index", MO, MONum);
2558 errs() << "Register class " << TRI->getRegClassName(RC)
2559 << " does not fully support subreg index " << SubIdx << "\n";
2560 return;
2561 }
2562 }
2563 if (MONum < MCID.getNumOperands()) {
2564 if (const TargetRegisterClass *DRC =
2565 TII->getRegClass(MCID, MONum, TRI, *MF)) {
2566 if (SubIdx) {
2567 const TargetRegisterClass *SuperRC =
2568 TRI->getLargestLegalSuperClass(RC, *MF);
2569 if (!SuperRC) {
2570 report("No largest legal super class exists.", MO, MONum);
2571 return;
2572 }
2573 DRC = TRI->getMatchingSuperRegClass(SuperRC, DRC, SubIdx);
2574 if (!DRC) {
2575 report("No matching super-reg register class.", MO, MONum);
2576 return;
2577 }
2578 }
2579 if (!RC->hasSuperClassEq(DRC)) {
2580 report("Illegal virtual register for instruction", MO, MONum);
2581 errs() << "Expected a " << TRI->getRegClassName(DRC)
2582 << " register, but got a " << TRI->getRegClassName(RC)
2583 << " register\n";
2584 }
2585 }
2586 }
2587 }
2588 break;
2589 }
2590
2592 regMasks.push_back(MO->getRegMask());
2593 break;
2594
2596 if (MI->isPHI() && !MO->getMBB()->isSuccessor(MI->getParent()))
2597 report("PHI operand is not in the CFG", MO, MONum);
2598 break;
2599
2601 if (LiveStks && LiveStks->hasInterval(MO->getIndex()) &&
2602 LiveInts && !LiveInts->isNotInMIMap(*MI)) {
2603 int FI = MO->getIndex();
2604 LiveInterval &LI = LiveStks->getInterval(FI);
2605 SlotIndex Idx = LiveInts->getInstructionIndex(*MI);
2606
2607 bool stores = MI->mayStore();
2608 bool loads = MI->mayLoad();
2609 // For a memory-to-memory move, we need to check if the frame
2610 // index is used for storing or loading, by inspecting the
2611 // memory operands.
2612 if (stores && loads) {
2613 for (auto *MMO : MI->memoperands()) {
2614 const PseudoSourceValue *PSV = MMO->getPseudoValue();
2615 if (PSV == nullptr) continue;
2617 dyn_cast<FixedStackPseudoSourceValue>(PSV);
2618 if (Value == nullptr) continue;
2619 if (Value->getFrameIndex() != FI) continue;
2620
2621 if (MMO->isStore())
2622 loads = false;
2623 else
2624 stores = false;
2625 break;
2626 }
2627 if (loads == stores)
2628 report("Missing fixed stack memoperand.", MI);
2629 }
2630 if (loads && !LI.liveAt(Idx.getRegSlot(true))) {
2631 report("Instruction loads from dead spill slot", MO, MONum);
2632 errs() << "Live stack: " << LI << '\n';
2633 }
2634 if (stores && !LI.liveAt(Idx.getRegSlot())) {
2635 report("Instruction stores to dead spill slot", MO, MONum);
2636 errs() << "Live stack: " << LI << '\n';
2637 }
2638 }
2639 break;
2640
2642 if (MO->getCFIIndex() >= MF->getFrameInstructions().size())
2643 report("CFI instruction has invalid index", MO, MONum);
2644 break;
2645
2646 default:
2647 break;
2648 }
2649}
2650
2651void MachineVerifier::checkLivenessAtUse(const MachineOperand *MO,
2652 unsigned MONum, SlotIndex UseIdx,
2653 const LiveRange &LR,
2654 Register VRegOrUnit,
2655 LaneBitmask LaneMask) {
2656 const MachineInstr *MI = MO->getParent();
2657 LiveQueryResult LRQ = LR.Query(UseIdx);
2658 bool HasValue = LRQ.valueIn() || (MI->isPHI() && LRQ.valueOut());
2659 // Check if we have a segment at the use, note however that we only need one
2660 // live subregister range, the others may be dead.
2661 if (!HasValue && LaneMask.none()) {
2662 report("No live segment at use", MO, MONum);
2663 report_context_liverange(LR);
2664 report_context_vreg_regunit(VRegOrUnit);
2665 report_context(UseIdx);
2666 }
2667 if (MO->isKill() && !LRQ.isKill()) {
2668 report("Live range continues after kill flag", MO, MONum);
2669 report_context_liverange(LR);
2670 report_context_vreg_regunit(VRegOrUnit);
2671 if (LaneMask.any())
2672 report_context_lanemask(LaneMask);
2673 report_context(UseIdx);
2674 }
2675}
2676
2677void MachineVerifier::checkLivenessAtDef(const MachineOperand *MO,
2678 unsigned MONum, SlotIndex DefIdx,
2679 const LiveRange &LR,
2680 Register VRegOrUnit,
2681 bool SubRangeCheck,
2682 LaneBitmask LaneMask) {
2683 if (const VNInfo *VNI = LR.getVNInfoAt(DefIdx)) {
2684 // The LR can correspond to the whole reg and its def slot is not obliged
2685 // to be the same as the MO' def slot. E.g. when we check here "normal"
2686 // subreg MO but there is other EC subreg MO in the same instruction so the
2687 // whole reg has EC def slot and differs from the currently checked MO' def
2688 // slot. For example:
2689 // %0 [16e,32r:0) 0@16e L..3 [16e,32r:0) 0@16e L..C [16r,32r:0) 0@16r
2690 // Check that there is an early-clobber def of the same superregister
2691 // somewhere is performed in visitMachineFunctionAfter()
2692 if (((SubRangeCheck || MO->getSubReg() == 0) && VNI->def != DefIdx) ||
2693 !SlotIndex::isSameInstr(VNI->def, DefIdx) ||
2694 (VNI->def != DefIdx &&
2695 (!VNI->def.isEarlyClobber() || !DefIdx.isRegister()))) {
2696 report("Inconsistent valno->def", MO, MONum);
2697 report_context_liverange(LR);
2698 report_context_vreg_regunit(VRegOrUnit);
2699 if (LaneMask.any())
2700 report_context_lanemask(LaneMask);
2701 report_context(*VNI);
2702 report_context(DefIdx);
2703 }
2704 } else {
2705 report("No live segment at def", MO, MONum);
2706 report_context_liverange(LR);
2707 report_context_vreg_regunit(VRegOrUnit);
2708 if (LaneMask.any())
2709 report_context_lanemask(LaneMask);
2710 report_context(DefIdx);
2711 }
2712 // Check that, if the dead def flag is present, LiveInts agree.
2713 if (MO->isDead()) {
2714 LiveQueryResult LRQ = LR.Query(DefIdx);
2715 if (!LRQ.isDeadDef()) {
2716 assert(VRegOrUnit.isVirtual() && "Expecting a virtual register.");
2717 // A dead subreg def only tells us that the specific subreg is dead. There
2718 // could be other non-dead defs of other subregs, or we could have other
2719 // parts of the register being live through the instruction. So unless we
2720 // are checking liveness for a subrange it is ok for the live range to
2721 // continue, given that we have a dead def of a subregister.
2722 if (SubRangeCheck || MO->getSubReg() == 0) {
2723 report("Live range continues after dead def flag", MO, MONum);
2724 report_context_liverange(LR);
2725 report_context_vreg_regunit(VRegOrUnit);
2726 if (LaneMask.any())
2727 report_context_lanemask(LaneMask);
2728 }
2729 }
2730 }
2731}
2732
2733void MachineVerifier::checkLiveness(const MachineOperand *MO, unsigned MONum) {
2734 const MachineInstr *MI = MO->getParent();
2735 const Register Reg = MO->getReg();
2736 const unsigned SubRegIdx = MO->getSubReg();
2737
2738 const LiveInterval *LI = nullptr;
2739 if (LiveInts && Reg.isVirtual()) {
2740 if (LiveInts->hasInterval(Reg)) {
2741 LI = &LiveInts->getInterval(Reg);
2742 if (SubRegIdx != 0 && (MO->isDef() || !MO->isUndef()) && !LI->empty() &&
2743 !LI->hasSubRanges() && MRI->shouldTrackSubRegLiveness(Reg))
2744 report("Live interval for subreg operand has no subranges", MO, MONum);
2745 } else {
2746 report("Virtual register has no live interval", MO, MONum);
2747 }
2748 }
2749
2750 // Both use and def operands can read a register.
2751 if (MO->readsReg()) {
2752 if (MO->isKill())
2753 addRegWithSubRegs(regsKilled, Reg);
2754
2755 // Check that LiveVars knows this kill (unless we are inside a bundle, in
2756 // which case we have already checked that LiveVars knows any kills on the
2757 // bundle header instead).
2758 if (LiveVars && Reg.isVirtual() && MO->isKill() &&
2759 !MI->isBundledWithPred()) {
2760 LiveVariables::VarInfo &VI = LiveVars->getVarInfo(Reg);
2761 if (!is_contained(VI.Kills, MI))
2762 report("Kill missing from LiveVariables", MO, MONum);
2763 }
2764
2765 // Check LiveInts liveness and kill.
2766 if (LiveInts && !LiveInts->isNotInMIMap(*MI)) {
2767 SlotIndex UseIdx;
2768 if (MI->isPHI()) {
2769 // PHI use occurs on the edge, so check for live out here instead.
2770 UseIdx = LiveInts->getMBBEndIdx(
2771 MI->getOperand(MONum + 1).getMBB()).getPrevSlot();
2772 } else {
2773 UseIdx = LiveInts->getInstructionIndex(*MI);
2774 }
2775 // Check the cached regunit intervals.
2776 if (Reg.isPhysical() && !isReserved(Reg)) {
2777 for (MCRegUnit Unit : TRI->regunits(Reg.asMCReg())) {
2778 if (MRI->isReservedRegUnit(Unit))
2779 continue;
2780 if (const LiveRange *LR = LiveInts->getCachedRegUnit(Unit))
2781 checkLivenessAtUse(MO, MONum, UseIdx, *LR, Unit);
2782 }
2783 }
2784
2785 if (Reg.isVirtual()) {
2786 // This is a virtual register interval.
2787 checkLivenessAtUse(MO, MONum, UseIdx, *LI, Reg);
2788
2789 if (LI->hasSubRanges() && !MO->isDef()) {
2790 LaneBitmask MOMask = SubRegIdx != 0
2791 ? TRI->getSubRegIndexLaneMask(SubRegIdx)
2792 : MRI->getMaxLaneMaskForVReg(Reg);
2793 LaneBitmask LiveInMask;
2794 for (const LiveInterval::SubRange &SR : LI->subranges()) {
2795 if ((MOMask & SR.LaneMask).none())
2796 continue;
2797 checkLivenessAtUse(MO, MONum, UseIdx, SR, Reg, SR.LaneMask);
2798 LiveQueryResult LRQ = SR.Query(UseIdx);
2799 if (LRQ.valueIn() || (MI->isPHI() && LRQ.valueOut()))
2800 LiveInMask |= SR.LaneMask;
2801 }
2802 // At least parts of the register has to be live at the use.
2803 if ((LiveInMask & MOMask).none()) {
2804 report("No live subrange at use", MO, MONum);
2805 report_context(*LI);
2806 report_context(UseIdx);
2807 }
2808 // For PHIs all lanes should be live
2809 if (MI->isPHI() && LiveInMask != MOMask) {
2810 report("Not all lanes of PHI source live at use", MO, MONum);
2811 report_context(*LI);
2812 report_context(UseIdx);
2813 }
2814 }
2815 }
2816 }
2817
2818 // Use of a dead register.
2819 if (!regsLive.count(Reg)) {
2820 if (Reg.isPhysical()) {
2821 // Reserved registers may be used even when 'dead'.
2822 bool Bad = !isReserved(Reg);
2823 // We are fine if just any subregister has a defined value.
2824 if (Bad) {
2825
2826 for (const MCPhysReg &SubReg : TRI->subregs(Reg)) {
2827 if (regsLive.count(SubReg)) {
2828 Bad = false;
2829 break;
2830 }
2831 }
2832 }
2833 // If there is an additional implicit-use of a super register we stop
2834 // here. By definition we are fine if the super register is not
2835 // (completely) dead, if the complete super register is dead we will
2836 // get a report for its operand.
2837 if (Bad) {
2838 for (const MachineOperand &MOP : MI->uses()) {
2839 if (!MOP.isReg() || !MOP.isImplicit())
2840 continue;
2841
2842 if (!MOP.getReg().isPhysical())
2843 continue;
2844
2845 if (llvm::is_contained(TRI->subregs(MOP.getReg()), Reg))
2846 Bad = false;
2847 }
2848 }
2849 if (Bad)
2850 report("Using an undefined physical register", MO, MONum);
2851 } else if (MRI->def_empty(Reg)) {
2852 report("Reading virtual register without a def", MO, MONum);
2853 } else {
2854 BBInfo &MInfo = MBBInfoMap[MI->getParent()];
2855 // We don't know which virtual registers are live in, so only complain
2856 // if vreg was killed in this MBB. Otherwise keep track of vregs that
2857 // must be live in. PHI instructions are handled separately.
2858 if (MInfo.regsKilled.count(Reg))
2859 report("Using a killed virtual register", MO, MONum);
2860 else if (!MI->isPHI())
2861 MInfo.vregsLiveIn.insert(std::make_pair(Reg, MI));
2862 }
2863 }
2864 }
2865
2866 if (MO->isDef()) {
2867 // Register defined.
2868 // TODO: verify that earlyclobber ops are not used.
2869 if (MO->isDead())
2870 addRegWithSubRegs(regsDead, Reg);
2871 else
2872 addRegWithSubRegs(regsDefined, Reg);
2873
2874 // Verify SSA form.
2875 if (MRI->isSSA() && Reg.isVirtual() &&
2876 std::next(MRI->def_begin(Reg)) != MRI->def_end())
2877 report("Multiple virtual register defs in SSA form", MO, MONum);
2878
2879 // Check LiveInts for a live segment, but only for virtual registers.
2880 if (LiveInts && !LiveInts->isNotInMIMap(*MI)) {
2881 SlotIndex DefIdx = LiveInts->getInstructionIndex(*MI);
2882 DefIdx = DefIdx.getRegSlot(MO->isEarlyClobber());
2883
2884 if (Reg.isVirtual()) {
2885 checkLivenessAtDef(MO, MONum, DefIdx, *LI, Reg);
2886
2887 if (LI->hasSubRanges()) {
2888 LaneBitmask MOMask = SubRegIdx != 0
2889 ? TRI->getSubRegIndexLaneMask(SubRegIdx)
2890 : MRI->getMaxLaneMaskForVReg(Reg);
2891 for (const LiveInterval::SubRange &SR : LI->subranges()) {
2892 if ((SR.LaneMask & MOMask).none())
2893 continue;
2894 checkLivenessAtDef(MO, MONum, DefIdx, SR, Reg, true, SR.LaneMask);
2895 }
2896 }
2897 }
2898 }
2899 }
2900}
2901
2902// This function gets called after visiting all instructions in a bundle. The
2903// argument points to the bundle header.
2904// Normal stand-alone instructions are also considered 'bundles', and this
2905// function is called for all of them.
2906void MachineVerifier::visitMachineBundleAfter(const MachineInstr *MI) {
2907 BBInfo &MInfo = MBBInfoMap[MI->getParent()];
2908 set_union(MInfo.regsKilled, regsKilled);
2909 set_subtract(regsLive, regsKilled); regsKilled.clear();
2910 // Kill any masked registers.
2911 while (!regMasks.empty()) {
2912 const uint32_t *Mask = regMasks.pop_back_val();
2913 for (Register Reg : regsLive)
2914 if (Reg.isPhysical() &&
2915 MachineOperand::clobbersPhysReg(Mask, Reg.asMCReg()))
2916 regsDead.push_back(Reg);
2917 }
2918 set_subtract(regsLive, regsDead); regsDead.clear();
2919 set_union(regsLive, regsDefined); regsDefined.clear();
2920}
2921
2922void
2923MachineVerifier::visitMachineBasicBlockAfter(const MachineBasicBlock *MBB) {
2924 MBBInfoMap[MBB].regsLiveOut = regsLive;
2925 regsLive.clear();
2926
2927 if (Indexes) {
2928 SlotIndex stop = Indexes->getMBBEndIdx(MBB);
2929 if (!(stop > lastIndex)) {
2930 report("Block ends before last instruction index", MBB);
2931 errs() << "Block ends at " << stop
2932 << " last instruction was at " << lastIndex << '\n';
2933 }
2934 lastIndex = stop;
2935 }
2936}
2937
2938namespace {
2939// This implements a set of registers that serves as a filter: can filter other
2940// sets by passing through elements not in the filter and blocking those that
2941// are. Any filter implicitly includes the full set of physical registers upon
2942// creation, thus filtering them all out. The filter itself as a set only grows,
2943// and needs to be as efficient as possible.
2944struct VRegFilter {
2945 // Add elements to the filter itself. \pre Input set \p FromRegSet must have
2946 // no duplicates. Both virtual and physical registers are fine.
2947 template <typename RegSetT> void add(const RegSetT &FromRegSet) {
2948 SmallVector<Register, 0> VRegsBuffer;
2949 filterAndAdd(FromRegSet, VRegsBuffer);
2950 }
2951 // Filter \p FromRegSet through the filter and append passed elements into \p
2952 // ToVRegs. All elements appended are then added to the filter itself.
2953 // \returns true if anything changed.
2954 template <typename RegSetT>
2955 bool filterAndAdd(const RegSetT &FromRegSet,
2956 SmallVectorImpl<Register> &ToVRegs) {
2957 unsigned SparseUniverse = Sparse.size();
2958 unsigned NewSparseUniverse = SparseUniverse;
2959 unsigned NewDenseSize = Dense.size();
2960 size_t Begin = ToVRegs.size();
2961 for (Register Reg : FromRegSet) {
2962 if (!Reg.isVirtual())
2963 continue;
2964 unsigned Index = Register::virtReg2Index(Reg);
2965 if (Index < SparseUniverseMax) {
2966 if (Index < SparseUniverse && Sparse.test(Index))
2967 continue;
2968 NewSparseUniverse = std::max(NewSparseUniverse, Index + 1);
2969 } else {
2970 if (Dense.count(Reg))
2971 continue;
2972 ++NewDenseSize;
2973 }
2974 ToVRegs.push_back(Reg);
2975 }
2976 size_t End = ToVRegs.size();
2977 if (Begin == End)
2978 return false;
2979 // Reserving space in sets once performs better than doing so continuously
2980 // and pays easily for double look-ups (even in Dense with SparseUniverseMax
2981 // tuned all the way down) and double iteration (the second one is over a
2982 // SmallVector, which is a lot cheaper compared to DenseSet or BitVector).
2983 Sparse.resize(NewSparseUniverse);
2984 Dense.reserve(NewDenseSize);
2985 for (unsigned I = Begin; I < End; ++I) {
2986 Register Reg = ToVRegs[I];
2987 unsigned Index = Register::virtReg2Index(Reg);
2988 if (Index < SparseUniverseMax)
2989 Sparse.set(Index);
2990 else
2991 Dense.insert(Reg);
2992 }
2993 return true;
2994 }
2995
2996private:
2997 static constexpr unsigned SparseUniverseMax = 10 * 1024 * 8;
2998 // VRegs indexed within SparseUniverseMax are tracked by Sparse, those beyound
2999 // are tracked by Dense. The only purpose of the threashold and the Dense set
3000 // is to have a reasonably growing memory usage in pathological cases (large
3001 // number of very sparse VRegFilter instances live at the same time). In
3002 // practice even in the worst-by-execution time cases having all elements
3003 // tracked by Sparse (very large SparseUniverseMax scenario) tends to be more
3004 // space efficient than if tracked by Dense. The threashold is set to keep the
3005 // worst-case memory usage within 2x of figures determined empirically for
3006 // "all Dense" scenario in such worst-by-execution-time cases.
3007 BitVector Sparse;
3009};
3010
3011// Implements both a transfer function and a (binary, in-place) join operator
3012// for a dataflow over register sets with set union join and filtering transfer
3013// (out_b = in_b \ filter_b). filter_b is expected to be set-up ahead of time.
3014// Maintains out_b as its state, allowing for O(n) iteration over it at any
3015// time, where n is the size of the set (as opposed to O(U) where U is the
3016// universe). filter_b implicitly contains all physical registers at all times.
3017class FilteringVRegSet {
3018 VRegFilter Filter;
3020
3021public:
3022 // Set-up the filter_b. \pre Input register set \p RS must have no duplicates.
3023 // Both virtual and physical registers are fine.
3024 template <typename RegSetT> void addToFilter(const RegSetT &RS) {
3025 Filter.add(RS);
3026 }
3027 // Passes \p RS through the filter_b (transfer function) and adds what's left
3028 // to itself (out_b).
3029 template <typename RegSetT> bool add(const RegSetT &RS) {
3030 // Double-duty the Filter: to maintain VRegs a set (and the join operation
3031 // a set union) just add everything being added here to the Filter as well.
3032 return Filter.filterAndAdd(RS, VRegs);
3033 }
3034 using const_iterator = decltype(VRegs)::const_iterator;
3035 const_iterator begin() const { return VRegs.begin(); }
3036 const_iterator end() const { return VRegs.end(); }
3037 size_t size() const { return VRegs.size(); }
3038};
3039} // namespace
3040
3041// Calculate the largest possible vregsPassed sets. These are the registers that
3042// can pass through an MBB live, but may not be live every time. It is assumed
3043// that all vregsPassed sets are empty before the call.
3044void MachineVerifier::calcRegsPassed() {
3045 if (MF->empty())
3046 // ReversePostOrderTraversal doesn't handle empty functions.
3047 return;
3048
3049 for (const MachineBasicBlock *MB :
3051 FilteringVRegSet VRegs;
3052 BBInfo &Info = MBBInfoMap[MB];
3053 assert(Info.reachable);
3054
3055 VRegs.addToFilter(Info.regsKilled);
3056 VRegs.addToFilter(Info.regsLiveOut);
3057 for (const MachineBasicBlock *Pred : MB->predecessors()) {
3058 const BBInfo &PredInfo = MBBInfoMap[Pred];
3059 if (!PredInfo.reachable)
3060 continue;
3061
3062 VRegs.add(PredInfo.regsLiveOut);
3063 VRegs.add(PredInfo.vregsPassed);
3064 }
3065 Info.vregsPassed.reserve(VRegs.size());
3066 Info.vregsPassed.insert(VRegs.begin(), VRegs.end());
3067 }
3068}
3069
3070// Calculate the set of virtual registers that must be passed through each basic
3071// block in order to satisfy the requirements of successor blocks. This is very
3072// similar to calcRegsPassed, only backwards.
3073void MachineVerifier::calcRegsRequired() {
3074 // First push live-in regs to predecessors' vregsRequired.
3076 for (const auto &MBB : *MF) {
3077 BBInfo &MInfo = MBBInfoMap[&MBB];
3078 for (const MachineBasicBlock *Pred : MBB.predecessors()) {
3079 BBInfo &PInfo = MBBInfoMap[Pred];
3080 if (PInfo.addRequired(MInfo.vregsLiveIn))
3081 todo.insert(Pred);
3082 }
3083
3084 // Handle the PHI node.
3085 for (const MachineInstr &MI : MBB.phis()) {
3086 for (unsigned i = 1, e = MI.getNumOperands(); i != e; i += 2) {
3087 // Skip those Operands which are undef regs or not regs.
3088 if (!MI.getOperand(i).isReg() || !MI.getOperand(i).readsReg())
3089 continue;
3090
3091 // Get register and predecessor for one PHI edge.
3092 Register Reg = MI.getOperand(i).getReg();
3093 const MachineBasicBlock *Pred = MI.getOperand(i + 1).getMBB();
3094
3095 BBInfo &PInfo = MBBInfoMap[Pred];
3096 if (PInfo.addRequired(Reg))
3097 todo.insert(Pred);
3098 }
3099 }
3100 }
3101
3102 // Iteratively push vregsRequired to predecessors. This will converge to the
3103 // same final state regardless of DenseSet iteration order.
3104 while (!todo.empty()) {
3105 const MachineBasicBlock *MBB = *todo.begin();
3106 todo.erase(MBB);
3107 BBInfo &MInfo = MBBInfoMap[MBB];
3108 for (const MachineBasicBlock *Pred : MBB->predecessors()) {
3109 if (Pred == MBB)
3110 continue;
3111 BBInfo &SInfo = MBBInfoMap[Pred];
3112 if (SInfo.addRequired(MInfo.vregsRequired))
3113 todo.insert(Pred);
3114 }
3115 }
3116}
3117
3118// Check PHI instructions at the beginning of MBB. It is assumed that
3119// calcRegsPassed has been run so BBInfo::isLiveOut is valid.
3120void MachineVerifier::checkPHIOps(const MachineBasicBlock &MBB) {
3121 BBInfo &MInfo = MBBInfoMap[&MBB];
3122
3124 for (const MachineInstr &Phi : MBB) {
3125 if (!Phi.isPHI())
3126 break;
3127 seen.clear();
3128
3129 const MachineOperand &MODef = Phi.getOperand(0);
3130 if (!MODef.isReg() || !MODef.isDef()) {
3131 report("Expected first PHI operand to be a register def", &MODef, 0);
3132 continue;
3133 }
3134 if (MODef.isTied() || MODef.isImplicit() || MODef.isInternalRead() ||
3135 MODef.isEarlyClobber() || MODef.isDebug())
3136 report("Unexpected flag on PHI operand", &MODef, 0);
3137 Register DefReg = MODef.getReg();
3138 if (!DefReg.isVirtual())
3139 report("Expected first PHI operand to be a virtual register", &MODef, 0);
3140
3141 for (unsigned I = 1, E = Phi.getNumOperands(); I != E; I += 2) {
3142 const MachineOperand &MO0 = Phi.getOperand(I);
3143 if (!MO0.isReg()) {
3144 report("Expected PHI operand to be a register", &MO0, I);
3145 continue;
3146 }
3147 if (MO0.isImplicit() || MO0.isInternalRead() || MO0.isEarlyClobber() ||
3148 MO0.isDebug() || MO0.isTied())
3149 report("Unexpected flag on PHI operand", &MO0, I);
3150
3151 const MachineOperand &MO1 = Phi.getOperand(I + 1);
3152 if (!MO1.isMBB()) {
3153 report("Expected PHI operand to be a basic block", &MO1, I + 1);
3154 continue;
3155 }
3156
3157 const MachineBasicBlock &Pre = *MO1.getMBB();
3158 if (!Pre.isSuccessor(&MBB)) {
3159 report("PHI input is not a predecessor block", &MO1, I + 1);
3160 continue;
3161 }
3162
3163 if (MInfo.reachable) {
3164 seen.insert(&Pre);
3165 BBInfo &PrInfo = MBBInfoMap[&Pre];
3166 if (!MO0.isUndef() && PrInfo.reachable &&
3167 !PrInfo.isLiveOut(MO0.getReg()))
3168 report("PHI operand is not live-out from predecessor", &MO0, I);
3169 }
3170 }
3171
3172 // Did we see all predecessors?
3173 if (MInfo.reachable) {
3174 for (MachineBasicBlock *Pred : MBB.predecessors()) {
3175 if (!seen.count(Pred)) {
3176 report("Missing PHI operand", &Phi);
3177 errs() << printMBBReference(*Pred)
3178 << " is a predecessor according to the CFG.\n";
3179 }
3180 }
3181 }
3182 }
3183}
3184
3185static void
3187 std::function<void(const Twine &Message)> FailureCB) {
3189 CV.initialize(&errs(), FailureCB, MF);
3190
3191 for (const auto &MBB : MF) {
3192 CV.visit(MBB);
3193 for (const auto &MI : MBB.instrs())
3194 CV.visit(MI);
3195 }
3196
3197 if (CV.sawTokens()) {
3198 DT.recalculate(const_cast<MachineFunction &>(MF));
3199 CV.verify(DT);
3200 }
3201}
3202
3203void MachineVerifier::visitMachineFunctionAfter() {
3204 auto FailureCB = [this](const Twine &Message) {
3205 report(Message.str().c_str(), MF);
3206 };
3207 verifyConvergenceControl(*MF, DT, FailureCB);
3208
3209 calcRegsPassed();
3210
3211 for (const MachineBasicBlock &MBB : *MF)
3212 checkPHIOps(MBB);
3213
3214 // Now check liveness info if available
3215 calcRegsRequired();
3216
3217 // Check for killed virtual registers that should be live out.
3218 for (const auto &MBB : *MF) {
3219 BBInfo &MInfo = MBBInfoMap[&MBB];
3220 for (Register VReg : MInfo.vregsRequired)
3221 if (MInfo.regsKilled.count(VReg)) {
3222 report("Virtual register killed in block, but needed live out.", &MBB);
3223 errs() << "Virtual register " << printReg(VReg)
3224 << " is used after the block.\n";
3225 }
3226 }
3227
3228 if (!MF->empty()) {
3229 BBInfo &MInfo = MBBInfoMap[&MF->front()];
3230 for (Register VReg : MInfo.vregsRequired) {
3231 report("Virtual register defs don't dominate all uses.", MF);
3232 report_context_vreg(VReg);
3233 }
3234 }
3235
3236 if (LiveVars)
3237 verifyLiveVariables();
3238 if (LiveInts)
3239 verifyLiveIntervals();
3240
3241 // Check live-in list of each MBB. If a register is live into MBB, check
3242 // that the register is in regsLiveOut of each predecessor block. Since
3243 // this must come from a definition in the predecesssor or its live-in
3244 // list, this will catch a live-through case where the predecessor does not
3245 // have the register in its live-in list. This currently only checks
3246 // registers that have no aliases, are not allocatable and are not
3247 // reserved, which could mean a condition code register for instance.
3248 if (MRI->tracksLiveness())
3249 for (const auto &MBB : *MF)
3251 MCPhysReg LiveInReg = P.PhysReg;
3252 bool hasAliases = MCRegAliasIterator(LiveInReg, TRI, false).isValid();
3253 if (hasAliases || isAllocatable(LiveInReg) || isReserved(LiveInReg))
3254 continue;
3255 for (const MachineBasicBlock *Pred : MBB.predecessors()) {
3256 BBInfo &PInfo = MBBInfoMap[Pred];
3257 if (!PInfo.regsLiveOut.count(LiveInReg)) {
3258 report("Live in register not found to be live out from predecessor.",
3259 &MBB);
3260 errs() << TRI->getName(LiveInReg)
3261 << " not found to be live out from "
3262 << printMBBReference(*Pred) << "\n";
3263 }
3264 }
3265 }
3266
3267 for (auto CSInfo : MF->getCallSitesInfo())
3268 if (!CSInfo.first->isCall())
3269 report("Call site info referencing instruction that is not call", MF);
3270
3271 // If there's debug-info, check that we don't have any duplicate value
3272 // tracking numbers.
3273 if (MF->getFunction().getSubprogram()) {
3274 DenseSet<unsigned> SeenNumbers;
3275 for (const auto &MBB : *MF) {
3276 for (const auto &MI : MBB) {
3277 if (auto Num = MI.peekDebugInstrNum()) {
3278 auto Result = SeenNumbers.insert((unsigned)Num);
3279 if (!Result.second)
3280 report("Instruction has a duplicated value tracking number", &MI);
3281 }
3282 }
3283 }
3284 }
3285}
3286
3287void MachineVerifier::verifyLiveVariables() {
3288 assert(LiveVars && "Don't call verifyLiveVariables without LiveVars");
3289 for (unsigned I = 0, E = MRI->getNumVirtRegs(); I != E; ++I) {
3291 LiveVariables::VarInfo &VI = LiveVars->getVarInfo(Reg);
3292 for (const auto &MBB : *MF) {
3293 BBInfo &MInfo = MBBInfoMap[&MBB];
3294
3295 // Our vregsRequired should be identical to LiveVariables' AliveBlocks
3296 if (MInfo.vregsRequired.count(Reg)) {
3297 if (!VI.AliveBlocks.test(MBB.getNumber())) {
3298 report("LiveVariables: Block missing from AliveBlocks", &MBB);
3299 errs() << "Virtual register " << printReg(Reg)
3300 << " must be live through the block.\n";
3301 }
3302 } else {
3303 if (VI.AliveBlocks.test(MBB.getNumber())) {
3304 report("LiveVariables: Block should not be in AliveBlocks", &MBB);
3305 errs() << "Virtual register " << printReg(Reg)
3306 << " is not needed live through the block.\n";
3307 }
3308 }
3309 }
3310 }
3311}
3312
3313void MachineVerifier::verifyLiveIntervals() {
3314 assert(LiveInts && "Don't call verifyLiveIntervals without LiveInts");
3315 for (unsigned I = 0, E = MRI->getNumVirtRegs(); I != E; ++I) {
3317
3318 // Spilling and splitting may leave unused registers around. Skip them.
3319 if (MRI->reg_nodbg_empty(Reg))
3320 continue;
3321
3322 if (!LiveInts->hasInterval(Reg)) {
3323 report("Missing live interval for virtual register", MF);
3324 errs() << printReg(Reg, TRI) << " still has defs or uses\n";
3325 continue;
3326 }
3327
3328 const LiveInterval &LI = LiveInts->getInterval(Reg);
3329 assert(Reg == LI.reg() && "Invalid reg to interval mapping");
3330 verifyLiveInterval(LI);
3331 }
3332
3333 // Verify all the cached regunit intervals.
3334 for (unsigned i = 0, e = TRI->getNumRegUnits(); i != e; ++i)
3335 if (const LiveRange *LR = LiveInts->getCachedRegUnit(i))
3336 verifyLiveRange(*LR, i);
3337}
3338
3339void MachineVerifier::verifyLiveRangeValue(const LiveRange &LR,
3340 const VNInfo *VNI, Register Reg,
3341 LaneBitmask LaneMask) {
3342 if (VNI->isUnused())
3343 return;
3344
3345 const VNInfo *DefVNI = LR.getVNInfoAt(VNI->def);
3346
3347 if (!DefVNI) {
3348 report("Value not live at VNInfo def and not marked unused", MF);
3349 report_context(LR, Reg, LaneMask);
3350 report_context(*VNI);
3351 return;
3352 }
3353
3354 if (DefVNI != VNI) {
3355 report("Live segment at def has different VNInfo", MF);
3356 report_context(LR, Reg, LaneMask);
3357 report_context(*VNI);
3358 return;
3359 }
3360
3361 const MachineBasicBlock *MBB = LiveInts->getMBBFromIndex(VNI->def);
3362 if (!MBB) {
3363 report("Invalid VNInfo definition index", MF);
3364 report_context(LR, Reg, LaneMask);
3365 report_context(*VNI);
3366 return;
3367 }
3368
3369 if (VNI->isPHIDef()) {
3370 if (VNI->def != LiveInts->getMBBStartIdx(MBB)) {
3371 report("PHIDef VNInfo is not defined at MBB start", MBB);
3372 report_context(LR, Reg, LaneMask);
3373 report_context(*VNI);
3374 }
3375 return;
3376 }
3377
3378 // Non-PHI def.
3379 const MachineInstr *MI = LiveInts->getInstructionFromIndex(VNI->def);
3380 if (!MI) {
3381 report("No instruction at VNInfo def index", MBB);
3382 report_context(LR, Reg, LaneMask);
3383 report_context(*VNI);
3384 return;
3385 }
3386
3387 if (Reg != 0) {
3388 bool hasDef = false;
3389 bool isEarlyClobber = false;
3390 for (ConstMIBundleOperands MOI(*MI); MOI.isValid(); ++MOI) {
3391 if (!MOI->isReg() || !MOI->isDef())
3392 continue;
3393 if (Reg.isVirtual()) {
3394 if (MOI->getReg() != Reg)
3395 continue;
3396 } else {
3397 if (!MOI->getReg().isPhysical() || !TRI->hasRegUnit(MOI->getReg(), Reg))
3398 continue;
3399 }
3400 if (LaneMask.any() &&
3401 (TRI->getSubRegIndexLaneMask(MOI->getSubReg()) & LaneMask).none())
3402 continue;
3403 hasDef = true;
3404 if (MOI->isEarlyClobber())
3405 isEarlyClobber = true;
3406 }
3407
3408 if (!hasDef) {
3409 report("Defining instruction does not modify register", MI);
3410 report_context(LR, Reg, LaneMask);
3411 report_context(*VNI);
3412 }
3413
3414 // Early clobber defs begin at USE slots, but other defs must begin at
3415 // DEF slots.
3416 if (isEarlyClobber) {
3417 if (!VNI->def.isEarlyClobber()) {
3418 report("Early clobber def must be at an early-clobber slot", MBB);
3419 report_context(LR, Reg, LaneMask);
3420 report_context(*VNI);
3421 }
3422 } else if (!VNI->def.isRegister()) {
3423 report("Non-PHI, non-early clobber def must be at a register slot", MBB);
3424 report_context(LR, Reg, LaneMask);
3425 report_context(*VNI);
3426 }
3427 }
3428}
3429
3430void MachineVerifier::verifyLiveRangeSegment(const LiveRange &LR,
3432 Register Reg,
3433 LaneBitmask LaneMask) {
3434 const LiveRange::Segment &S = *I;
3435 const VNInfo *VNI = S.valno;
3436 assert(VNI && "Live segment has no valno");
3437
3438 if (VNI->id >= LR.getNumValNums() || VNI != LR.getValNumInfo(VNI->id)) {
3439 report("Foreign valno in live segment", MF);
3440 report_context(LR, Reg, LaneMask);
3441 report_context(S);
3442 report_context(*VNI);
3443 }
3444
3445 if (VNI->isUnused()) {
3446 report("Live segment valno is marked unused", MF);
3447 report_context(LR, Reg, LaneMask);
3448 report_context(S);
3449 }
3450
3451 const MachineBasicBlock *MBB = LiveInts->getMBBFromIndex(S.start);
3452 if (!MBB) {
3453 report("Bad start of live segment, no basic block", MF);
3454 report_context(LR, Reg, LaneMask);
3455 report_context(S);
3456 return;
3457 }
3458 SlotIndex MBBStartIdx = LiveInts->getMBBStartIdx(MBB);
3459 if (S.start != MBBStartIdx && S.start != VNI->def) {
3460 report("Live segment must begin at MBB entry or valno def", MBB);
3461 report_context(LR, Reg, LaneMask);
3462 report_context(S);
3463 }
3464
3465 const MachineBasicBlock *EndMBB =
3466 LiveInts->getMBBFromIndex(S.end.getPrevSlot());
3467 if (!EndMBB) {
3468 report("Bad end of live segment, no basic block", MF);
3469 report_context(LR, Reg, LaneMask);
3470 report_context(S);
3471 return;
3472 }
3473
3474 // Checks for non-live-out segments.
3475 if (S.end != LiveInts->getMBBEndIdx(EndMBB)) {
3476 // RegUnit intervals are allowed dead phis.
3477 if (!Reg.isVirtual() && VNI->isPHIDef() && S.start == VNI->def &&
3478 S.end == VNI->def.getDeadSlot())
3479 return;
3480
3481 // The live segment is ending inside EndMBB
3482 const MachineInstr *MI =
3484 if (!MI) {
3485 report("Live segment doesn't end at a valid instruction", EndMBB);
3486 report_context(LR, Reg, LaneMask);
3487 report_context(S);
3488 return;
3489 }
3490
3491 // The block slot must refer to a basic block boundary.
3492 if (S.end.isBlock()) {
3493 report("Live segment ends at B slot of an instruction", EndMBB);
3494 report_context(LR, Reg, LaneMask);
3495 report_context(S);
3496 }
3497
3498 if (S.end.isDead()) {
3499 // Segment ends on the dead slot.
3500 // That means there must be a dead def.
3501 if (!SlotIndex::isSameInstr(S.start, S.end)) {
3502 report("Live segment ending at dead slot spans instructions", EndMBB);
3503 report_context(LR, Reg, LaneMask);
3504 report_context(S);
3505 }
3506 }
3507
3508 // After tied operands are rewritten, a live segment can only end at an
3509 // early-clobber slot if it is being redefined by an early-clobber def.
3510 // TODO: Before tied operands are rewritten, a live segment can only end at
3511 // an early-clobber slot if the last use is tied to an early-clobber def.
3512 if (MF->getProperties().hasProperty(
3514 S.end.isEarlyClobber()) {
3515 if (I + 1 == LR.end() || (I + 1)->start != S.end) {
3516 report("Live segment ending at early clobber slot must be "
3517 "redefined by an EC def in the same instruction",
3518 EndMBB);
3519 report_context(LR, Reg, LaneMask);
3520 report_context(S);
3521 }
3522 }
3523
3524 // The following checks only apply to virtual registers. Physreg liveness
3525 // is too weird to check.
3526 if (Reg.isVirtual()) {
3527 // A live segment can end with either a redefinition, a kill flag on a
3528 // use, or a dead flag on a def.
3529 bool hasRead = false;
3530 bool hasSubRegDef = false;
3531 bool hasDeadDef = false;
3532 for (ConstMIBundleOperands MOI(*MI); MOI.isValid(); ++MOI) {
3533 if (!MOI->isReg() || MOI->getReg() != Reg)
3534 continue;
3535 unsigned Sub = MOI->getSubReg();
3536 LaneBitmask SLM =
3537 Sub != 0 ? TRI->getSubRegIndexLaneMask(Sub) : LaneBitmask::getAll();
3538 if (MOI->isDef()) {
3539 if (Sub != 0) {
3540 hasSubRegDef = true;
3541 // An operand %0:sub0 reads %0:sub1..n. Invert the lane
3542 // mask for subregister defs. Read-undef defs will be handled by
3543 // readsReg below.
3544 SLM = ~SLM;
3545 }
3546 if (MOI->isDead())
3547 hasDeadDef = true;
3548 }
3549 if (LaneMask.any() && (LaneMask & SLM).none())
3550 continue;
3551 if (MOI->readsReg())
3552 hasRead = true;
3553 }
3554 if (S.end.isDead()) {
3555 // Make sure that the corresponding machine operand for a "dead" live
3556 // range has the dead flag. We cannot perform this check for subregister
3557 // liveranges as partially dead values are allowed.
3558 if (LaneMask.none() && !hasDeadDef) {
3559 report(
3560 "Instruction ending live segment on dead slot has no dead flag",
3561 MI);
3562 report_context(LR, Reg, LaneMask);
3563 report_context(S);
3564 }
3565 } else {
3566 if (!hasRead) {
3567 // When tracking subregister liveness, the main range must start new
3568 // values on partial register writes, even if there is no read.
3569 if (!MRI->shouldTrackSubRegLiveness(Reg) || LaneMask.any() ||
3570 !hasSubRegDef) {
3571 report("Instruction ending live segment doesn't read the register",
3572 MI);
3573 report_context(LR, Reg, LaneMask);
3574 report_context(S);
3575 }
3576 }
3577 }
3578 }
3579 }
3580
3581 // Now check all the basic blocks in this live segment.
3583 // Is this live segment the beginning of a non-PHIDef VN?
3584 if (S.start == VNI->def && !VNI->isPHIDef()) {
3585 // Not live-in to any blocks.
3586 if (MBB == EndMBB)
3587 return;
3588 // Skip this block.
3589 ++MFI;
3590 }
3591
3593 if (LaneMask.any()) {
3594 LiveInterval &OwnerLI = LiveInts->getInterval(Reg);
3595 OwnerLI.computeSubRangeUndefs(Undefs, LaneMask, *MRI, *Indexes);
3596 }
3597
3598 while (true) {
3599 assert(LiveInts->isLiveInToMBB(LR, &*MFI));
3600 // We don't know how to track physregs into a landing pad.
3601 if (!Reg.isVirtual() && MFI->isEHPad()) {
3602 if (&*MFI == EndMBB)
3603 break;
3604 ++MFI;
3605 continue;
3606 }
3607
3608 // Is VNI a PHI-def in the current block?
3609 bool IsPHI = VNI->isPHIDef() &&
3610 VNI->def == LiveInts->getMBBStartIdx(&*MFI);
3611
3612 // Check that VNI is live-out of all predecessors.
3613 for (const MachineBasicBlock *Pred : MFI->predecessors()) {
3614 SlotIndex PEnd = LiveInts->getMBBEndIdx(Pred);
3615 // Predecessor of landing pad live-out on last call.
3616 if (MFI->isEHPad()) {
3617 for (const MachineInstr &MI : llvm::reverse(*Pred)) {
3618 if (MI.isCall()) {
3619 PEnd = Indexes->getInstructionIndex(MI).getBoundaryIndex();
3620 break;
3621 }
3622 }
3623 }
3624 const VNInfo *PVNI = LR.getVNInfoBefore(PEnd);
3625
3626 // All predecessors must have a live-out value. However for a phi
3627 // instruction with subregister intervals
3628 // only one of the subregisters (not necessarily the current one) needs to
3629 // be defined.
3630 if (!PVNI && (LaneMask.none() || !IsPHI)) {
3631 if (LiveRangeCalc::isJointlyDominated(Pred, Undefs, *Indexes))
3632 continue;
3633 report("Register not marked live out of predecessor", Pred);
3634 report_context(LR, Reg, LaneMask);
3635 report_context(*VNI);
3636 errs() << " live into " << printMBBReference(*MFI) << '@'
3637 << LiveInts->getMBBStartIdx(&*MFI) << ", not live before "
3638 << PEnd << '\n';
3639 continue;
3640 }
3641
3642 // Only PHI-defs can take different predecessor values.
3643 if (!IsPHI && PVNI != VNI) {
3644 report("Different value live out of predecessor", Pred);
3645 report_context(LR, Reg, LaneMask);
3646 errs() << "Valno #" << PVNI->id << " live out of "
3647 << printMBBReference(*Pred) << '@' << PEnd << "\nValno #"
3648 << VNI->id << " live into " << printMBBReference(*MFI) << '@'
3649 << LiveInts->getMBBStartIdx(&*MFI) << '\n';
3650 }
3651 }
3652 if (&*MFI == EndMBB)
3653 break;
3654 ++MFI;
3655 }
3656}
3657
3658void MachineVerifier::verifyLiveRange(const LiveRange &LR, Register Reg,
3659 LaneBitmask LaneMask) {
3660 for (const VNInfo *VNI : LR.valnos)
3661 verifyLiveRangeValue(LR, VNI, Reg, LaneMask);
3662
3663 for (LiveRange::const_iterator I = LR.begin(), E = LR.end(); I != E; ++I)
3664 verifyLiveRangeSegment(LR, I, Reg, LaneMask);
3665}
3666
3667void MachineVerifier::verifyLiveInterval(const LiveInterval &LI) {
3668 Register Reg = LI.reg();
3669 assert(Reg.isVirtual());
3670 verifyLiveRange(LI, Reg);
3671
3672 if (LI.hasSubRanges()) {
3674 LaneBitmask MaxMask = MRI->getMaxLaneMaskForVReg(Reg);
3675 for (const LiveInterval::SubRange &SR : LI.subranges()) {
3676 if ((Mask & SR.LaneMask).any()) {
3677 report("Lane masks of sub ranges overlap in live interval", MF);
3678 report_context(LI);
3679 }
3680 if ((SR.LaneMask & ~MaxMask).any()) {
3681 report("Subrange lanemask is invalid", MF);
3682 report_context(LI);
3683 }
3684 if (SR.empty()) {
3685 report("Subrange must not be empty", MF);
3686 report_context(SR, LI.reg(), SR.LaneMask);
3687 }
3688 Mask |= SR.LaneMask;
3689 verifyLiveRange(SR, LI.reg(), SR.LaneMask);
3690 if (!LI.covers(SR)) {
3691 report("A Subrange is not covered by the main range", MF);
3692 report_context(LI);
3693 }
3694 }
3695 }
3696
3697 // Check the LI only has one connected component.
3698 ConnectedVNInfoEqClasses ConEQ(*LiveInts);
3699 unsigned NumComp = ConEQ.Classify(LI);
3700 if (NumComp > 1) {
3701 report("Multiple connected components in live interval", MF);
3702 report_context(LI);
3703 for (unsigned comp = 0; comp != NumComp; ++comp) {
3704 errs() << comp << ": valnos";
3705 for (const VNInfo *I : LI.valnos)
3706 if (comp == ConEQ.getEqClass(I))
3707 errs() << ' ' << I->id;
3708 errs() << '\n';
3709 }
3710 }
3711}
3712
3713namespace {
3714
3715 // FrameSetup and FrameDestroy can have zero adjustment, so using a single
3716 // integer, we can't tell whether it is a FrameSetup or FrameDestroy if the
3717 // value is zero.
3718 // We use a bool plus an integer to capture the stack state.
3719 struct StackStateOfBB {
3720 StackStateOfBB() = default;
3721 StackStateOfBB(int EntryVal, int ExitVal, bool EntrySetup, bool ExitSetup) :
3722 EntryValue(EntryVal), ExitValue(ExitVal), EntryIsSetup(EntrySetup),
3723 ExitIsSetup(ExitSetup) {}
3724
3725 // Can be negative, which means we are setting up a frame.
3726 int EntryValue = 0;
3727 int ExitValue = 0;
3728 bool EntryIsSetup = false;
3729 bool ExitIsSetup = false;
3730 };
3731
3732} // end anonymous namespace
3733
3734/// Make sure on every path through the CFG, a FrameSetup <n> is always followed
3735/// by a FrameDestroy <n>, stack adjustments are identical on all
3736/// CFG edges to a merge point, and frame is destroyed at end of a return block.
3737void MachineVerifier::verifyStackFrame() {
3738 unsigned FrameSetupOpcode = TII->getCallFrameSetupOpcode();
3739 unsigned FrameDestroyOpcode = TII->getCallFrameDestroyOpcode();
3740 if (FrameSetupOpcode == ~0u && FrameDestroyOpcode == ~0u)
3741 return;
3742
3744 SPState.resize(MF->getNumBlockIDs());
3746
3747 // Visit the MBBs in DFS order.
3748 for (df_ext_iterator<const MachineFunction *,
3750 DFI = df_ext_begin(MF, Reachable), DFE = df_ext_end(MF, Reachable);
3751 DFI != DFE; ++DFI) {
3752 const MachineBasicBlock *MBB = *DFI;
3753
3754 StackStateOfBB BBState;
3755 // Check the exit state of the DFS stack predecessor.
3756 if (DFI.getPathLength() >= 2) {
3757 const MachineBasicBlock *StackPred = DFI.getPath(DFI.getPathLength() - 2);
3758 assert(Reachable.count(StackPred) &&
3759 "DFS stack predecessor is already visited.\n");
3760 BBState.EntryValue = SPState[StackPred->getNumber()].ExitValue;
3761 BBState.EntryIsSetup = SPState[StackPred->getNumber()].ExitIsSetup;
3762 BBState.ExitValue = BBState.EntryValue;
3763 BBState.ExitIsSetup = BBState.EntryIsSetup;
3764 }
3765
3766 if ((int)MBB->getCallFrameSize() != -BBState.EntryValue) {
3767 report("Call frame size on entry does not match value computed from "
3768 "predecessor",
3769 MBB);
3770 errs() << "Call frame size on entry " << MBB->getCallFrameSize()
3771 << " does not match value computed from predecessor "
3772 << -BBState.EntryValue << '\n';
3773 }
3774
3775 // Update stack state by checking contents of MBB.
3776 for (const auto &I : *MBB) {
3777 if (I.getOpcode() == FrameSetupOpcode) {
3778 if (BBState.ExitIsSetup)
3779 report("FrameSetup is after another FrameSetup", &I);
3780 if (!MRI->isSSA() && !MF->getFrameInfo().adjustsStack())
3781 report("AdjustsStack not set in presence of a frame pseudo "
3782 "instruction.", &I);
3783 BBState.ExitValue -= TII->getFrameTotalSize(I);
3784 BBState.ExitIsSetup = true;
3785 }
3786
3787 if (I.getOpcode() == FrameDestroyOpcode) {
3788 int Size = TII->getFrameTotalSize(I);
3789 if (!BBState.ExitIsSetup)
3790 report("FrameDestroy is not after a FrameSetup", &I);
3791 int AbsSPAdj = BBState.ExitValue < 0 ? -BBState.ExitValue :
3792 BBState.ExitValue;
3793 if (BBState.ExitIsSetup && AbsSPAdj != Size) {
3794 report("FrameDestroy <n> is after FrameSetup <m>", &I);
3795 errs() << "FrameDestroy <" << Size << "> is after FrameSetup <"
3796 << AbsSPAdj << ">.\n";
3797 }
3798 if (!MRI->isSSA() && !MF->getFrameInfo().adjustsStack())
3799 report("AdjustsStack not set in presence of a frame pseudo "
3800 "instruction.", &I);
3801 BBState.ExitValue += Size;
3802 BBState.ExitIsSetup = false;
3803 }
3804 }
3805 SPState[MBB->getNumber()] = BBState;
3806
3807 // Make sure the exit state of any predecessor is consistent with the entry
3808 // state.
3809 for (const MachineBasicBlock *Pred : MBB->predecessors()) {
3810 if (Reachable.count(Pred) &&
3811 (SPState[Pred->getNumber()].ExitValue != BBState.EntryValue ||
3812 SPState[Pred->getNumber()].ExitIsSetup != BBState.EntryIsSetup)) {
3813 report("The exit stack state of a predecessor is inconsistent.", MBB);
3814 errs() << "Predecessor " << printMBBReference(*Pred)
3815 << " has exit state (" << SPState[Pred->getNumber()].ExitValue
3816 << ", " << SPState[Pred->getNumber()].ExitIsSetup << "), while "
3817 << printMBBReference(*MBB) << " has entry state ("
3818 << BBState.EntryValue << ", " << BBState.EntryIsSetup << ").\n";
3819 }
3820 }
3821
3822 // Make sure the entry state of any successor is consistent with the exit
3823 // state.
3824 for (const MachineBasicBlock *Succ : MBB->successors()) {
3825 if (Reachable.count(Succ) &&
3826 (SPState[Succ->getNumber()].EntryValue != BBState.ExitValue ||
3827 SPState[Succ->getNumber()].EntryIsSetup != BBState.ExitIsSetup)) {
3828 report("The entry stack state of a successor is inconsistent.", MBB);
3829 errs() << "Successor " << printMBBReference(*Succ)
3830 << " has entry state (" << SPState[Succ->getNumber()].EntryValue
3831 << ", " << SPState[Succ->getNumber()].EntryIsSetup << "), while "
3832 << printMBBReference(*MBB) << " has exit state ("
3833 << BBState.ExitValue << ", " << BBState.ExitIsSetup << ").\n";
3834 }
3835 }
3836
3837 // Make sure a basic block with return ends with zero stack adjustment.
3838 if (!MBB->empty() && MBB->back().isReturn()) {
3839 if (BBState.ExitIsSetup)
3840 report("A return block ends with a FrameSetup.", MBB);
3841 if (BBState.ExitValue)
3842 report("A return block ends with a nonzero stack adjustment.", MBB);
3843 }
3844 }
3845}
unsigned SubReg
unsigned const MachineRegisterInfo * MRI
aarch64 promote const
MachineBasicBlock & MBB
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
MachineBasicBlock MachineBasicBlock::iterator MBBI
static bool isLoad(int Opcode)
static bool isStore(int Opcode)
This file implements the BitVector class.
Analysis containing CSE Info
Definition: CSEInfo.cpp:27
This file contains the declarations for the subclasses of Constant, which represent the different fla...
Returns the sub type a function will return at a given Idx Should correspond to the result type of an ExtractValue instruction executed with just that one unsigned Idx
This file defines the DenseMap class.
This file defines the DenseSet and SmallDenseSet classes.
This file builds on the ADT/GraphTraits.h file to build generic depth first graph iterator.
std::string Name
uint64_t Size
bool End
Definition: ELF_riscv.cpp:480
Declares convenience wrapper classes for interpreting MachineInstr instances as specific generic oper...
const HexagonInstrInfo * TII
hexagon widen stores
IRTranslator LLVM IR MI
A common definition of LaneBitmask for use in TableGen and CodeGen.
Implement a low-level type suitable for MachineInstr level instruction selection.
#define F(x, y, z)
Definition: MD5.cpp:55
#define I(x, y, z)
Definition: MD5.cpp:58
This file declares the MIR specialization of the GenericConvergenceVerifier template.
unsigned const TargetRegisterInfo * TRI
unsigned Reg
static void verifyConvergenceControl(const MachineFunction &MF, MachineDominatorTree &DT, std::function< void(const Twine &Message)> FailureCB)
modulo schedule Modulo Schedule test pass
#define P(N)
ppc ctr loops verify
const char LLVMTargetMachineRef TM
#define INITIALIZE_PASS(passName, arg, name, cfg, analysis)
Definition: PassSupport.h:38
This file builds on the ADT/GraphTraits.h file to build a generic graph post order iterator.
const SmallVectorImpl< MachineOperand > MachineBasicBlock * TBB
const SmallVectorImpl< MachineOperand > & Cond
static bool isValid(const char C)
Returns true if C is a valid mangled character: <0-9a-zA-Z_>.
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
static bool isLiveOut(const MachineBasicBlock &MBB, unsigned Reg)
This file contains some templates that are useful if you are working with the STL at all.
This file defines generic set operations that may be used on set's of different types,...
This file defines the SmallPtrSet class.
This file defines the SmallVector class.
This file describes how to lower LLVM code to machine code.
static unsigned getSize(unsigned Kind)
const fltSemantics & getSemantics() const
Definition: APFloat.h:1356
Represent the analysis usage information of a pass.
AnalysisUsage & addUsedIfAvailable()
Add the specified Pass class to the set of analyses used by this pass.
void setPreservesAll()
Set by analyses that do not transform their input at all.
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
Definition: ArrayRef.h:41
size_t size() const
size - Get the array size.
Definition: ArrayRef.h:165
LLVM Basic Block Representation.
Definition: BasicBlock.h:61
bool hasAddressTaken() const
Returns true if there are any uses of this basic block other than direct branches,...
Definition: BasicBlock.h:648
const Instruction * getTerminator() const LLVM_READONLY
Returns the terminator instruction if the block is well formed or null if the block is not well forme...
Definition: BasicBlock.h:229
bool test(unsigned Idx) const
Definition: BitVector.h:461
void clear()
clear - Removes all bits from the bitvector.
Definition: BitVector.h:335
iterator_range< const_set_bits_iterator > set_bits() const
Definition: BitVector.h:140
size_type size() const
size - Returns the number of bits in this bitvector.
Definition: BitVector.h:159
ConnectedVNInfoEqClasses - Helper class that can divide VNInfos in a LiveInterval into equivalence cl...
ConstMIBundleOperands - Iterate over all operands in a const bundle of machine instructions.
ConstantFP - Floating Point Values [float, double].
Definition: Constants.h:269
const APFloat & getValueAPF() const
Definition: Constants.h:312
This is the shared class of boolean and integer constants.
Definition: Constants.h:81
unsigned getBitWidth() const
getBitWidth - Return the scalar bitwidth of this constant.
Definition: Constants.h:149
This class represents an Operation in the Expression.
A parsed version of the target data layout string in and methods for querying it.
Definition: DataLayout.h:110
std::pair< iterator, bool > insert(const std::pair< KeyT, ValueT > &KV)
Definition: DenseMap.h:220
Implements a dense probed hash-table based set.
Definition: DenseSet.h:271
void recalculate(ParentType &Func)
recalculate - compute a dominator tree for the given function
Register getReg() const
Base class for user error types.
Definition: Error.h:355
A specialized PseudoSourceValue for holding FixedStack values, which must include a frame index.
FunctionPass class - This class is used to implement most global optimizations.
Definition: Pass.h:311
void initialize(raw_ostream *OS, function_ref< void(const Twine &Message)> FailureCB, const FunctionT &F)
bool isPredicated(const MachineInstr &MI) const override
Returns true if the instruction is already predicated.
bool analyzeBranch(MachineBasicBlock &MBB, MachineBasicBlock *&TBB, MachineBasicBlock *&FBB, SmallVectorImpl< MachineOperand > &Cond, bool AllowModify) const override
Analyze the branching code at the end of MBB, returning true if it cannot be understood (e....
constexpr bool isScalableVector() const
Returns true if the LLT is a scalable vector.
Definition: LowLevelType.h:182
constexpr unsigned getScalarSizeInBits() const
Definition: LowLevelType.h:267
constexpr bool isScalar() const
Definition: LowLevelType.h:146
constexpr bool isValid() const
Definition: LowLevelType.h:145
constexpr uint16_t getNumElements() const
Returns the number of elements in a vector LLT.
Definition: LowLevelType.h:159
constexpr bool isVector() const
Definition: LowLevelType.h:148
constexpr TypeSize getSizeInBits() const
Returns the total size of the type. Must only be called on sized types.
Definition: LowLevelType.h:193
constexpr bool isPointer() const
Definition: LowLevelType.h:149
constexpr LLT getElementType() const
Returns the vector's element type. Only valid for vector types.
Definition: LowLevelType.h:290
constexpr ElementCount getElementCount() const
Definition: LowLevelType.h:184
constexpr unsigned getAddressSpace() const
Definition: LowLevelType.h:280
constexpr bool isPointerOrPointerVector() const
Definition: LowLevelType.h:153
constexpr LLT getScalarType() const
Definition: LowLevelType.h:208
constexpr TypeSize getSizeInBytes() const
Returns the total size of the type in bytes, i.e.
Definition: LowLevelType.h:203
A live range for subregisters.
Definition: LiveInterval.h:694
LiveInterval - This class represents the liveness of a register, or stack slot.
Definition: LiveInterval.h:687
Register reg() const
Definition: LiveInterval.h:718
bool hasSubRanges() const
Returns true if subregister liveness information is available.
Definition: LiveInterval.h:810
iterator_range< subrange_iterator > subranges()
Definition: LiveInterval.h:782
void computeSubRangeUndefs(SmallVectorImpl< SlotIndex > &Undefs, LaneBitmask LaneMask, const MachineRegisterInfo &MRI, const SlotIndexes &Indexes) const
For a given lane mask LaneMask, compute indexes at which the lane is marked undefined by subregister ...
bool hasInterval(Register Reg) const
SlotIndex getMBBStartIdx(const MachineBasicBlock *mbb) const
Return the first index in the given basic block.
MachineInstr * getInstructionFromIndex(SlotIndex index) const
Returns the instruction associated with the given index.
SlotIndex getInstructionIndex(const MachineInstr &Instr) const
Returns the base index of the given instruction.
SlotIndex getMBBEndIdx(const MachineBasicBlock *mbb) const
Return the last index in the given basic block.
LiveRange * getCachedRegUnit(unsigned Unit)
Return the live range for register unit Unit if it has already been computed, or nullptr if it hasn't...
LiveInterval & getInterval(Register Reg)
bool isNotInMIMap(const MachineInstr &Instr) const
Returns true if the specified machine instr has been removed or was never entered in the map.
MachineBasicBlock * getMBBFromIndex(SlotIndex index) const
bool isLiveInToMBB(const LiveRange &LR, const MachineBasicBlock *mbb) const
void print(raw_ostream &O, const Module *=nullptr) const override
Implement the dump method.
Result of a LiveRange query.
Definition: LiveInterval.h:90
bool isDeadDef() const
Return true if this instruction has a dead def.
Definition: LiveInterval.h:117
VNInfo * valueIn() const
Return the value that is live-in to the instruction.
Definition: LiveInterval.h:105
VNInfo * valueOut() const
Return the value leaving the instruction, if any.
Definition: LiveInterval.h:123
bool isKill() const
Return true if the live-in value is killed by this instruction.
Definition: LiveInterval.h:112
static LLVM_ATTRIBUTE_UNUSED bool isJointlyDominated(const MachineBasicBlock *MBB, ArrayRef< SlotIndex > Defs, const SlotIndexes &Indexes)
A diagnostic function to check if the end of the block MBB is jointly dominated by the blocks corresp...
This class represents the liveness of a register, stack slot, etc.
Definition: LiveInterval.h:157
VNInfo * getValNumInfo(unsigned ValNo)
getValNumInfo - Returns pointer to the specified val#.
Definition: LiveInterval.h:317
bool liveAt(SlotIndex index) const
Definition: LiveInterval.h:401
bool covers(const LiveRange &Other) const
Returns true if all segments of the Other live range are completely covered by this live range.
bool empty() const
Definition: LiveInterval.h:382
LiveQueryResult Query(SlotIndex Idx) const
Query Liveness at Idx.
Definition: LiveInterval.h:542
iterator end()
Definition: LiveInterval.h:216
VNInfo * getVNInfoBefore(SlotIndex Idx) const
getVNInfoBefore - Return the VNInfo that is live up to but not necessarilly including Idx,...
Definition: LiveInterval.h:429
unsigned getNumValNums() const
Definition: LiveInterval.h:313
iterator begin()
Definition: LiveInterval.h:215
VNInfoList valnos
Definition: LiveInterval.h:204
VNInfo * getVNInfoAt(SlotIndex Idx) const
getVNInfoAt - Return the VNInfo that is live at Idx, or NULL.
Definition: LiveInterval.h:421
LiveInterval & getInterval(int Slot)
Definition: LiveStacks.h:68
bool hasInterval(int Slot) const
Definition: LiveStacks.h:82
VarInfo & getVarInfo(Register Reg)
getVarInfo - Return the VarInfo structure for the specified VIRTUAL register.
TypeSize getValue() const
This class is intended to be used as a base class for asm properties and features specific to the tar...
Definition: MCAsmInfo.h:56
ExceptionHandling getExceptionHandlingType() const
Definition: MCAsmInfo.h:780
Describe properties that are true of each instruction in the target description file.
Definition: MCInstrDesc.h:198
unsigned getNumOperands() const
Return the number of declared MachineOperands for this MachineInstruction.
Definition: MCInstrDesc.h:237
ArrayRef< MCOperandInfo > operands() const
Definition: MCInstrDesc.h:239
unsigned getNumDefs() const
Return the number of MachineOperands that are register definitions.
Definition: MCInstrDesc.h:248
bool isConvergent() const
Return true if this instruction is convergent.
Definition: MCInstrDesc.h:415
bool variadicOpsAreDefs() const
Return true if variadic operands of this instruction are definitions.
Definition: MCInstrDesc.h:418
int getOperandConstraint(unsigned OpNum, MCOI::OperandConstraint Constraint) const
Returns the value of the specified operand constraint if it is present.
Definition: MCInstrDesc.h:219
unsigned getOpcode() const
Return the opcode number for this descriptor.
Definition: MCInstrDesc.h:230
This holds information about one operand of a machine instruction, indicating the register class for ...
Definition: MCInstrDesc.h:85
bool isOptionalDef() const
Set if this operand is a optional def.
Definition: MCInstrDesc.h:113
uint8_t OperandType
Information about the type of the operand.
Definition: MCInstrDesc.h:97
MCRegAliasIterator enumerates all registers aliasing Reg.
bool isValid() const
isValid - Returns true until all the operands have been visited.
bool isInlineAsmBrIndirectTarget() const
Returns true if this is the indirect dest of an INLINEASM_BR.
unsigned pred_size() const
bool isEHPad() const
Returns true if the block is a landing pad.
iterator_range< livein_iterator > liveins() const
iterator_range< iterator > phis()
Returns a range that iterates over the phis in the basic block.
int getNumber() const
MachineBasicBlocks are uniquely numbered at the function level, unless they're not in a MachineFuncti...
const BasicBlock * getBasicBlock() const
Return the LLVM basic block that this instance corresponded to originally.
bool isIRBlockAddressTaken() const
Test whether this block is the target of an IR BlockAddress.
unsigned succ_size() const
BasicBlock * getAddressTakenIRBlock() const
Retrieves the BasicBlock which corresponds to this MachineBasicBlock.
bool isPredecessor(const MachineBasicBlock *MBB) const
Return true if the specified MBB is a predecessor of this block.
const MachineFunction * getParent() const
Return the MachineFunction containing this basic block.
unsigned getCallFrameSize() const
Return the call frame size on entry to this basic block.
iterator_range< succ_iterator > successors()
bool isSuccessor(const MachineBasicBlock *MBB) const
Return true if the specified MBB is a successor of this block.
iterator_range< pred_iterator > predecessors()
StringRef getName() const
Return the name of the corresponding LLVM basic block, or an empty string.
DominatorTree Class - Concrete subclass of DominatorTreeBase that is used to compute a normal dominat...
The MachineFrameInfo class represents an abstract stack frame until prolog/epilog code is inserted.
BitVector getPristineRegs(const MachineFunction &MF) const
Return a set of physical registers that are pristine.
MachineFunctionPass - This class adapts the FunctionPass interface to allow convenient creation of pa...
void getAnalysisUsage(AnalysisUsage &AU) const override
getAnalysisUsage - Subclasses that override getAnalysisUsage must call this.
virtual bool runOnMachineFunction(MachineFunction &MF)=0
runOnMachineFunction - This method must be overloaded to perform the desired machine code transformat...
bool hasProperty(Property P) const
const TargetSubtargetInfo & getSubtarget() const
getSubtarget - Return the subtarget for which this machine code is being compiled.
StringRef getName() const
getName - Return the name of the corresponding LLVM function.
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
const LLVMTargetMachine & getTarget() const
getTarget - Return the target machine this machine code is compiled with
const MachineFunctionProperties & getProperties() const
Get the function properties.
const MachineBasicBlock & front() const
bool verify(Pass *p=nullptr, const char *Banner=nullptr, bool AbortOnError=true) const
Run the current MachineFunction through the machine code verifier, useful for debugger use.
void print(raw_ostream &OS, const SlotIndexes *=nullptr) const
print - Print out the MachineFunction in a format suitable for debugging to the specified stream.
BasicBlockListType::const_iterator const_iterator
Representation of each machine instruction.
Definition: MachineInstr.h:69
unsigned getOpcode() const
Returns the opcode of this MachineInstr.
Definition: MachineInstr.h:569
bool isReturn(QueryType Type=AnyInBundle) const
Definition: MachineInstr.h:940
bool isTerminator(QueryType Type=AnyInBundle) const
Returns true if this instruction part of the terminator for a basic block.
Definition: MachineInstr.h:974
bool isBarrier(QueryType Type=AnyInBundle) const
Returns true if the specified instruction stops control flow from executing the instruction immediate...
Definition: MachineInstr.h:965
A description of a memory reference used in the backend.
LocationSize getSize() const
Return the size in bytes of the memory reference.
const PseudoSourceValue * getPseudoValue() const
AtomicOrdering getSuccessOrdering() const
Return the atomic ordering requirements for this memory operation.
LocationSize getSizeInBits() const
Return the size in bits of the memory reference.
MachineOperand class - Representation of each machine instruction operand.
unsigned getSubReg() const
int64_t getImm() const
bool readsReg() const
readsReg - Returns true if this operand reads the previous value of its register.
bool isImplicit() const
bool isIntrinsicID() const
bool isReg() const
isReg - Tests if this is a MO_Register operand.
MachineBasicBlock * getMBB() const
ArrayRef< int > getShuffleMask() const
bool isImm() const
isImm - Tests if this is a MO_Immediate operand.
bool isValidExcessOperand() const
Return true if this operand can validly be appended to an arbitrary operand list.
bool isShuffleMask() const
unsigned getCFIIndex() const
bool isRenamable() const
isRenamable - Returns true if this register may be renamed, i.e.
MachineInstr * getParent()
getParent - Return the instruction that this operand belongs to.
MachineOperandType getType() const
getType - Returns the MachineOperandType for this operand.
bool isEarlyClobber() const
Register getReg() const
getReg - Returns the register number.
bool isInternalRead() const
bool isFI() const
isFI - Tests if this is a MO_FrameIndex operand.
static bool clobbersPhysReg(const uint32_t *RegMask, MCRegister PhysReg)
clobbersPhysReg - Returns true if this RegMask clobbers PhysReg.
const uint32_t * getRegMask() const
getRegMask - Returns a bit mask of registers preserved by this RegMask operand.
void print(raw_ostream &os, const TargetRegisterInfo *TRI=nullptr, const TargetIntrinsicInfo *IntrinsicInfo=nullptr) const
Print the MachineOperand to os.
@ MO_CFIIndex
MCCFIInstruction index.
@ MO_RegisterMask
Mask of preserved registers.
@ MO_MachineBasicBlock
MachineBasicBlock reference.
@ MO_FrameIndex
Abstract Stack Frame Index.
@ MO_Register
Register operand.
bool isMBB() const
isMBB - Tests if this is a MO_MachineBasicBlock operand.
MachineRegisterInfo - Keep track of information for virtual and physical registers,...
static PassRegistry * getPassRegistry()
getPassRegistry - Access the global registry object, which is automatically initialized at applicatio...
Pass interface - Implemented by all 'passes'.
Definition: Pass.h:94
virtual void print(raw_ostream &OS, const Module *M) const
print - Print out the internal state of the pass.
Definition: Pass.cpp:130
AnalysisType * getAnalysisIfAvailable() const
getAnalysisIfAvailable<AnalysisType>() - Subclasses use this function to get analysis information tha...
Special value supplied for machine level alias analysis.
Holds all the information related to register banks.
const RegisterBank & getRegBank(unsigned ID)
Get the register bank identified by ID.
unsigned getMaximumSize(unsigned RegBankID) const
Get the maximum size in bits that fits in the given register bank.
This class implements the register bank concept.
Definition: RegisterBank.h:28
const char * getName() const
Get a user friendly name of this register bank.
Definition: RegisterBank.h:49
unsigned getID() const
Get the identifier of this register bank.
Definition: RegisterBank.h:45
Wrapper class representing virtual and physical registers.
Definition: Register.h:19
static Register index2VirtReg(unsigned Index)
Convert a 0-based index to a virtual register number.
Definition: Register.h:84
static unsigned virtReg2Index(Register Reg)
Convert a virtual register number to a 0-based index.
Definition: Register.h:77
constexpr bool isVirtual() const
Return true if the specified register number is in the virtual register namespace.
Definition: Register.h:91
static constexpr bool isPhysicalRegister(unsigned Reg)
Return true if the specified register number is in the physical register namespace.
Definition: Register.h:65
constexpr bool isPhysical() const
Return true if the specified register number is in the physical register namespace.
Definition: Register.h:95
SlotIndex - An opaque wrapper around machine indexes.
Definition: SlotIndexes.h:64
static bool isSameInstr(SlotIndex A, SlotIndex B)
isSameInstr - Return true if A and B refer to the same instruction.
Definition: SlotIndexes.h:175
bool isBlock() const
isBlock - Returns true if this is a block boundary slot.
Definition: SlotIndexes.h:208
SlotIndex getDeadSlot() const
Returns the dead def kill slot for the current instruction.
Definition: SlotIndexes.h:241
bool isEarlyClobber() const
isEarlyClobber - Returns true if this is an early-clobber slot.
Definition: SlotIndexes.h:211
bool isRegister() const
isRegister - Returns true if this is a normal register use/def slot.
Definition: SlotIndexes.h:215
SlotIndex getBoundaryIndex() const
Returns the boundary index for associated with this index.
Definition: SlotIndexes.h:230
SlotIndex getPrevSlot() const
Returns the previous slot in the index list.
Definition: SlotIndexes.h:271
SlotIndex getRegSlot(bool EC=false) const
Returns the register use/def slot in the current instruction for a normal or early-clobber def.
Definition: SlotIndexes.h:236
bool isDead() const
isDead - Returns true if this is a dead def kill slot.
Definition: SlotIndexes.h:218
SlotIndexes pass.
Definition: SlotIndexes.h:296
SlotIndex getMBBEndIdx(unsigned Num) const
Returns the last index in the given basic block number.
Definition: SlotIndexes.h:458
MBBIndexIterator MBBIndexBegin() const
Returns an iterator for the begin of the idx2MBBMap.
Definition: SlotIndexes.h:493
MBBIndexIterator MBBIndexEnd() const
Return an iterator for the end of the idx2MBBMap.
Definition: SlotIndexes.h:498
SmallVectorImpl< IdxMBBPair >::const_iterator MBBIndexIterator
Iterator over the idx2MBBMap (sorted pairs of slot index of basic block begin and basic block)
Definition: SlotIndexes.h:469
SlotIndex getInstructionIndex(const MachineInstr &MI, bool IgnoreBundle=false) const
Returns the base index for the given instruction.
Definition: SlotIndexes.h:367
SlotIndex getMBBStartIdx(unsigned Num) const
Returns the first index in the given basic block number.
Definition: SlotIndexes.h:448
bool hasIndex(const MachineInstr &instr) const
Returns true if the given machine instr is mapped to an index, otherwise returns false.
Definition: SlotIndexes.h:362
size_type size() const
Definition: SmallPtrSet.h:94
bool erase(PtrType Ptr)
Remove pointer from the set.
Definition: SmallPtrSet.h:361
size_type count(ConstPtrType Ptr) const
count - Return 1 if the specified pointer is in the set, 0 otherwise.
Definition: SmallPtrSet.h:412
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
Definition: SmallPtrSet.h:344
iterator begin() const
Definition: SmallPtrSet.h:432
SmallPtrSet - This class implements a set which is optimized for holding SmallSize or less elements.
Definition: SmallPtrSet.h:479
size_t size() const
Definition: SmallVector.h:91
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
Definition: SmallVector.h:586
void resize(size_type N)
Definition: SmallVector.h:651
void push_back(const T &Elt)
Definition: SmallVector.h:426
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
Definition: SmallVector.h:1209
Register getReg() const
MI-level Statepoint operands.
Definition: StackMaps.h:158
StringRef - Represent a constant reference to a string, i.e.
Definition: StringRef.h:50
TargetInstrInfo - Interface to description of machine instruction set.
Primary interface to the complete machine description for the target machine.
Definition: TargetMachine.h:77
bool hasSuperClassEq(const TargetRegisterClass *RC) const
Returns true if RC is a super-class of or equal to this class.
TargetRegisterInfo base class - We assume that the target defines a static array of TargetRegisterDes...
virtual const TargetRegisterInfo * getRegisterInfo() const
getRegisterInfo - If register information is available, return it.
virtual const RegisterBankInfo * getRegBankInfo() const
If the information for the register banks is available, return it.
virtual const TargetInstrInfo * getInstrInfo() const
Twine - A lightweight data structure for efficiently representing the concatenation of temporary valu...
Definition: Twine.h:81
std::string str() const
Return the twine contents as a std::string.
Definition: Twine.cpp:17
VNInfo - Value Number Information.
Definition: LiveInterval.h:53
bool isUnused() const
Returns true if this value is unused.
Definition: LiveInterval.h:81
unsigned id
The ID number of this value.
Definition: LiveInterval.h:58
SlotIndex def
The index of the defining instruction.
Definition: LiveInterval.h:61
bool isPHIDef() const
Returns true if this value is defined by a PHI instruction (or was, PHI instructions may have been el...
Definition: LiveInterval.h:78
LLVM Value Representation.
Definition: Value.h:74
std::pair< iterator, bool > insert(const ValueT &V)
Definition: DenseSet.h:206
constexpr bool isNonZero() const
Definition: TypeSize.h:158
static constexpr bool isKnownLT(const FixedOrScalableQuantity &LHS, const FixedOrScalableQuantity &RHS)
Definition: TypeSize.h:215
constexpr bool isScalable() const
Returns whether the quantity is scaled by a runtime quantity (vscale).
Definition: TypeSize.h:171
constexpr ScalarTy getKnownMinValue() const
Returns the minimum value this quantity can represent.
Definition: TypeSize.h:168
static constexpr bool isKnownGT(const FixedOrScalableQuantity &LHS, const FixedOrScalableQuantity &RHS)
Definition: TypeSize.h:222
static constexpr bool isKnownGE(const FixedOrScalableQuantity &LHS, const FixedOrScalableQuantity &RHS)
Definition: TypeSize.h:236
self_iterator getIterator()
Definition: ilist_node.h:132
NodeTy * getNextNode()
Get the next node, or nullptr for the list tail.
Definition: ilist_node.h:353
constexpr char Attrs[]
Key for Kernel::Metadata::mAttrs.
constexpr std::underlying_type_t< E > Mask()
Get a bitmask with 1s in all places up to the high-order bit of E's largest value.
Definition: BitmaskEnum.h:121
unsigned ID
LLVM IR allows to use arbitrary numbers as calling convention identifiers.
Definition: CallingConv.h:24
AttributeList getAttributes(LLVMContext &C, ID id)
Return the attributes for an intrinsic.
@ OPERAND_REGISTER
Definition: MCInstrDesc.h:61
@ OPERAND_IMMEDIATE
Definition: MCInstrDesc.h:60
Reg
All possible values of the reg field in the ModR/M byte.
constexpr double e
Definition: MathExtras.h:31
NodeAddr< PhiNode * > Phi
Definition: RDFGraph.h:390
NodeAddr< DefNode * > Def
Definition: RDFGraph.h:384
const_iterator begin(StringRef path, Style style=Style::native)
Get begin iterator over path.
Definition: Path.cpp:227
const_iterator end(StringRef path)
Get end iterator over path.
Definition: Path.cpp:236
This is an optimization pass for GlobalISel generic memory operations.
Definition: AddressRanges.h:18
auto drop_begin(T &&RangeOrContainer, size_t N=1)
Return a range covering RangeOrContainer with the first N elements excluded.
Definition: STLExtras.h:329
@ Offset
Definition: DWP.cpp:480
bool all_of(R &&range, UnaryPredicate P)
Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly.
Definition: STLExtras.h:1722
auto size(R &&Range, std::enable_if_t< std::is_base_of< std::random_access_iterator_tag, typename std::iterator_traits< decltype(Range.begin())>::iterator_category >::value, void > *=nullptr)
Get the size of a range.
Definition: STLExtras.h:1680
@ SjLj
setjmp/longjmp based exceptions
bool isPreISelGenericOpcode(unsigned Opcode)
Check whether the given Opcode is a generic opcode that is not supposed to appear after ISel.
Definition: TargetOpcodes.h:30
Printable printRegUnit(unsigned Unit, const TargetRegisterInfo *TRI)
Create Printable object to print register units on a raw_ostream.
void append_range(Container &C, Range &&R)
Wrapper function to append range R to container C.
Definition: STLExtras.h:2067
void set_subtract(S1Ty &S1, const S2Ty &S2)
set_subtract(A, B) - Compute A := A - B
Definition: SetOperations.h:97
Printable PrintLaneMask(LaneBitmask LaneMask)
Create Printable object to print LaneBitmasks on a raw_ostream.
Definition: LaneBitmask.h:92
bool isPreISelGenericOptimizationHint(unsigned Opcode)
Definition: TargetOpcodes.h:42
bool isScopedEHPersonality(EHPersonality Pers)
Returns true if this personality uses scope-style EH IR instructions: catchswitch,...
void initializeMachineVerifierPassPass(PassRegistry &)
void verifyMachineFunction(const std::string &Banner, const MachineFunction &MF)
auto reverse(ContainerTy &&C)
Definition: STLExtras.h:419
detail::ValueMatchesPoly< M > HasValue(M Matcher)
Definition: Error.h:221
df_ext_iterator< T, SetTy > df_ext_begin(const T &G, SetTy &S)
bool none_of(R &&Range, UnaryPredicate P)
Provide wrappers to std::none_of which take ranges instead of having to pass begin/end explicitly.
Definition: STLExtras.h:1736
void report_fatal_error(Error Err, bool gen_crash_diag=true)
Report a serious error, calling any installed error handler.
Definition: Error.cpp:167
bool set_union(S1Ty &S1, const S2Ty &S2)
set_union(A, B) - Compute A := A u B, return whether A changed.
Definition: SetOperations.h:34
EHPersonality classifyEHPersonality(const Value *Pers)
See if the given exception handling personality function is one that we understand.
raw_fd_ostream & errs()
This returns a reference to a raw_ostream for standard error.
AtomicOrdering
Atomic ordering for LLVM's memory model.
OutputIt move(R &&Range, OutputIt Out)
Provide wrappers to std::move which take ranges instead of having to pass begin/end explicitly.
Definition: STLExtras.h:1849
bool is_contained(R &&Range, const E &Element)
Returns true if Element is found in Range.
Definition: STLExtras.h:1879
df_ext_iterator< T, SetTy > df_ext_end(const T &G, SetTy &S)
Printable printReg(Register Reg, const TargetRegisterInfo *TRI=nullptr, unsigned SubIdx=0, const MachineRegisterInfo *MRI=nullptr)
Prints virtual and physical registers with or without a TRI instance.
Printable printMBBReference(const MachineBasicBlock &MBB)
Prints a machine basic block reference.
FunctionPass * createMachineVerifierPass(const std::string &Banner)
createMachineVerifierPass - This pass verifies cenerated machine code instructions for correctness.
Implement std::hash so that hash_code can be used in STL containers.
Definition: BitVector.h:858
static unsigned getSizeInBits(const fltSemantics &Sem)
Returns the size of the floating point number (in bits) in the given semantics.
Definition: APFloat.cpp:356
static constexpr LaneBitmask getAll()
Definition: LaneBitmask.h:82
constexpr bool none() const
Definition: LaneBitmask.h:52
constexpr bool any() const
Definition: LaneBitmask.h:53
static constexpr LaneBitmask getNone()
Definition: LaneBitmask.h:81
This represents a simple continuous liveness interval for a value.
Definition: LiveInterval.h:162
VarInfo - This represents the regions where a virtual register is live in the program.
Definition: LiveVariables.h:80
Pair of physical register and lane mask.