LLVM 19.0.0git
MachineVerifier.cpp
Go to the documentation of this file.
1//===- MachineVerifier.cpp - Machine Code Verifier ------------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// Pass to verify generated machine code. The following is checked:
10//
11// Operand counts: All explicit operands must be present.
12//
13// Register classes: All physical and virtual register operands must be
14// compatible with the register class required by the instruction descriptor.
15//
16// Register live intervals: Registers must be defined only once, and must be
17// defined before use.
18//
19// The machine code verifier is enabled with the command-line option
20// -verify-machineinstrs.
21//===----------------------------------------------------------------------===//
22
23#include "llvm/ADT/BitVector.h"
24#include "llvm/ADT/DenseMap.h"
25#include "llvm/ADT/DenseSet.h"
28#include "llvm/ADT/STLExtras.h"
32#include "llvm/ADT/StringRef.h"
33#include "llvm/ADT/Twine.h"
63#include "llvm/IR/BasicBlock.h"
64#include "llvm/IR/Constants.h"
66#include "llvm/IR/Function.h"
67#include "llvm/IR/InlineAsm.h"
70#include "llvm/MC/LaneBitmask.h"
71#include "llvm/MC/MCAsmInfo.h"
72#include "llvm/MC/MCDwarf.h"
73#include "llvm/MC/MCInstrDesc.h"
76#include "llvm/Pass.h"
80#include "llvm/Support/ModRef.h"
83#include <algorithm>
84#include <cassert>
85#include <cstddef>
86#include <cstdint>
87#include <iterator>
88#include <string>
89#include <utility>
90
91using namespace llvm;
92
93namespace {
94
95 struct MachineVerifier {
96 MachineVerifier(Pass *pass, const char *b) : PASS(pass), Banner(b) {}
97
98 MachineVerifier(const char *b, LiveVariables *LiveVars,
99 LiveIntervals *LiveInts, LiveStacks *LiveStks,
100 SlotIndexes *Indexes)
101 : Banner(b), LiveVars(LiveVars), LiveInts(LiveInts), LiveStks(LiveStks),
102 Indexes(Indexes) {}
103
104 unsigned verify(const MachineFunction &MF);
105
106 Pass *const PASS = nullptr;
107 const char *Banner;
108 const MachineFunction *MF = nullptr;
109 const TargetMachine *TM = nullptr;
110 const TargetInstrInfo *TII = nullptr;
111 const TargetRegisterInfo *TRI = nullptr;
112 const MachineRegisterInfo *MRI = nullptr;
113 const RegisterBankInfo *RBI = nullptr;
114
115 unsigned foundErrors = 0;
116
117 // Avoid querying the MachineFunctionProperties for each operand.
118 bool isFunctionRegBankSelected = false;
119 bool isFunctionSelected = false;
120 bool isFunctionTracksDebugUserValues = false;
121
122 using RegVector = SmallVector<Register, 16>;
123 using RegMaskVector = SmallVector<const uint32_t *, 4>;
124 using RegSet = DenseSet<Register>;
127
128 const MachineInstr *FirstNonPHI = nullptr;
129 const MachineInstr *FirstTerminator = nullptr;
130 BlockSet FunctionBlocks;
131
132 BitVector regsReserved;
133 RegSet regsLive;
134 RegVector regsDefined, regsDead, regsKilled;
135 RegMaskVector regMasks;
136
137 SlotIndex lastIndex;
138
139 // Add Reg and any sub-registers to RV
140 void addRegWithSubRegs(RegVector &RV, Register Reg) {
141 RV.push_back(Reg);
142 if (Reg.isPhysical())
143 append_range(RV, TRI->subregs(Reg.asMCReg()));
144 }
145
146 struct BBInfo {
147 // Is this MBB reachable from the MF entry point?
148 bool reachable = false;
149
150 // Vregs that must be live in because they are used without being
151 // defined. Map value is the user. vregsLiveIn doesn't include regs
152 // that only are used by PHI nodes.
153 RegMap vregsLiveIn;
154
155 // Regs killed in MBB. They may be defined again, and will then be in both
156 // regsKilled and regsLiveOut.
157 RegSet regsKilled;
158
159 // Regs defined in MBB and live out. Note that vregs passing through may
160 // be live out without being mentioned here.
161 RegSet regsLiveOut;
162
163 // Vregs that pass through MBB untouched. This set is disjoint from
164 // regsKilled and regsLiveOut.
165 RegSet vregsPassed;
166
167 // Vregs that must pass through MBB because they are needed by a successor
168 // block. This set is disjoint from regsLiveOut.
169 RegSet vregsRequired;
170
171 // Set versions of block's predecessor and successor lists.
172 BlockSet Preds, Succs;
173
174 BBInfo() = default;
175
176 // Add register to vregsRequired if it belongs there. Return true if
177 // anything changed.
178 bool addRequired(Register Reg) {
179 if (!Reg.isVirtual())
180 return false;
181 if (regsLiveOut.count(Reg))
182 return false;
183 return vregsRequired.insert(Reg).second;
184 }
185
186 // Same for a full set.
187 bool addRequired(const RegSet &RS) {
188 bool Changed = false;
189 for (Register Reg : RS)
190 Changed |= addRequired(Reg);
191 return Changed;
192 }
193
194 // Same for a full map.
195 bool addRequired(const RegMap &RM) {
196 bool Changed = false;
197 for (const auto &I : RM)
198 Changed |= addRequired(I.first);
199 return Changed;
200 }
201
202 // Live-out registers are either in regsLiveOut or vregsPassed.
203 bool isLiveOut(Register Reg) const {
204 return regsLiveOut.count(Reg) || vregsPassed.count(Reg);
205 }
206 };
207
208 // Extra register info per MBB.
210
211 bool isReserved(Register Reg) {
212 return Reg.id() < regsReserved.size() && regsReserved.test(Reg.id());
213 }
214
215 bool isAllocatable(Register Reg) const {
216 return Reg.id() < TRI->getNumRegs() && TRI->isInAllocatableClass(Reg) &&
217 !regsReserved.test(Reg.id());
218 }
219
220 // Analysis information if available
221 LiveVariables *LiveVars = nullptr;
222 LiveIntervals *LiveInts = nullptr;
223 LiveStacks *LiveStks = nullptr;
224 SlotIndexes *Indexes = nullptr;
225
226 // This is calculated only when trying to verify convergence control tokens.
227 // Similar to the LLVM IR verifier, we calculate this locally instead of
228 // relying on the pass manager.
230
231 void visitMachineFunctionBefore();
232 void visitMachineBasicBlockBefore(const MachineBasicBlock *MBB);
233 void visitMachineBundleBefore(const MachineInstr *MI);
234
235 /// Verify that all of \p MI's virtual register operands are scalars.
236 /// \returns True if all virtual register operands are scalar. False
237 /// otherwise.
238 bool verifyAllRegOpsScalar(const MachineInstr &MI,
239 const MachineRegisterInfo &MRI);
240 bool verifyVectorElementMatch(LLT Ty0, LLT Ty1, const MachineInstr *MI);
241
242 bool verifyGIntrinsicSideEffects(const MachineInstr *MI);
243 bool verifyGIntrinsicConvergence(const MachineInstr *MI);
244 void verifyPreISelGenericInstruction(const MachineInstr *MI);
245
246 void visitMachineInstrBefore(const MachineInstr *MI);
247 void visitMachineOperand(const MachineOperand *MO, unsigned MONum);
248 void visitMachineBundleAfter(const MachineInstr *MI);
249 void visitMachineBasicBlockAfter(const MachineBasicBlock *MBB);
250 void visitMachineFunctionAfter();
251
252 void report(const char *msg, const MachineFunction *MF);
253 void report(const char *msg, const MachineBasicBlock *MBB);
254 void report(const char *msg, const MachineInstr *MI);
255 void report(const char *msg, const MachineOperand *MO, unsigned MONum,
256 LLT MOVRegType = LLT{});
257 void report(const Twine &Msg, const MachineInstr *MI);
258
259 void report_context(const LiveInterval &LI) const;
260 void report_context(const LiveRange &LR, Register VRegUnit,
261 LaneBitmask LaneMask) const;
262 void report_context(const LiveRange::Segment &S) const;
263 void report_context(const VNInfo &VNI) const;
264 void report_context(SlotIndex Pos) const;
265 void report_context(MCPhysReg PhysReg) const;
266 void report_context_liverange(const LiveRange &LR) const;
267 void report_context_lanemask(LaneBitmask LaneMask) const;
268 void report_context_vreg(Register VReg) const;
269 void report_context_vreg_regunit(Register VRegOrUnit) const;
270
271 void verifyInlineAsm(const MachineInstr *MI);
272
273 void checkLiveness(const MachineOperand *MO, unsigned MONum);
274 void checkLivenessAtUse(const MachineOperand *MO, unsigned MONum,
275 SlotIndex UseIdx, const LiveRange &LR,
276 Register VRegOrUnit,
277 LaneBitmask LaneMask = LaneBitmask::getNone());
278 void checkLivenessAtDef(const MachineOperand *MO, unsigned MONum,
279 SlotIndex DefIdx, const LiveRange &LR,
280 Register VRegOrUnit, bool SubRangeCheck = false,
281 LaneBitmask LaneMask = LaneBitmask::getNone());
282
283 void markReachable(const MachineBasicBlock *MBB);
284 void calcRegsPassed();
285 void checkPHIOps(const MachineBasicBlock &MBB);
286
287 void calcRegsRequired();
288 void verifyLiveVariables();
289 void verifyLiveIntervals();
290 void verifyLiveInterval(const LiveInterval&);
291 void verifyLiveRangeValue(const LiveRange &, const VNInfo *, Register,
293 void verifyLiveRangeSegment(const LiveRange &,
296 void verifyLiveRange(const LiveRange &, Register,
297 LaneBitmask LaneMask = LaneBitmask::getNone());
298
299 void verifyStackFrame();
300
301 void verifySlotIndexes() const;
302 void verifyProperties(const MachineFunction &MF);
303 };
304
305 struct MachineVerifierPass : public MachineFunctionPass {
306 static char ID; // Pass ID, replacement for typeid
307
308 const std::string Banner;
309
310 MachineVerifierPass(std::string banner = std::string())
311 : MachineFunctionPass(ID), Banner(std::move(banner)) {
313 }
314
315 void getAnalysisUsage(AnalysisUsage &AU) const override {
320 AU.setPreservesAll();
322 }
323
324 bool runOnMachineFunction(MachineFunction &MF) override {
325 // Skip functions that have known verification problems.
326 // FIXME: Remove this mechanism when all problematic passes have been
327 // fixed.
328 if (MF.getProperties().hasProperty(
329 MachineFunctionProperties::Property::FailsVerification))
330 return false;
331
332 unsigned FoundErrors = MachineVerifier(this, Banner.c_str()).verify(MF);
333 if (FoundErrors)
334 report_fatal_error("Found "+Twine(FoundErrors)+" machine code errors.");
335 return false;
336 }
337 };
338
339} // end anonymous namespace
340
341char MachineVerifierPass::ID = 0;
342
343INITIALIZE_PASS(MachineVerifierPass, "machineverifier",
344 "Verify generated machine code", false, false)
345
347 return new MachineVerifierPass(Banner);
348}
349
350void llvm::verifyMachineFunction(const std::string &Banner,
351 const MachineFunction &MF) {
352 // TODO: Use MFAM after porting below analyses.
353 // LiveVariables *LiveVars;
354 // LiveIntervals *LiveInts;
355 // LiveStacks *LiveStks;
356 // SlotIndexes *Indexes;
357 unsigned FoundErrors = MachineVerifier(nullptr, Banner.c_str()).verify(MF);
358 if (FoundErrors)
359 report_fatal_error("Found " + Twine(FoundErrors) + " machine code errors.");
360}
361
362bool MachineFunction::verify(Pass *p, const char *Banner, bool AbortOnErrors)
363 const {
364 MachineFunction &MF = const_cast<MachineFunction&>(*this);
365 unsigned FoundErrors = MachineVerifier(p, Banner).verify(MF);
366 if (AbortOnErrors && FoundErrors)
367 report_fatal_error("Found "+Twine(FoundErrors)+" machine code errors.");
368 return FoundErrors == 0;
369}
370
372 const char *Banner, bool AbortOnErrors) const {
373 MachineFunction &MF = const_cast<MachineFunction &>(*this);
374 unsigned FoundErrors =
375 MachineVerifier(Banner, nullptr, LiveInts, nullptr, Indexes).verify(MF);
376 if (AbortOnErrors && FoundErrors)
377 report_fatal_error("Found " + Twine(FoundErrors) + " machine code errors.");
378 return FoundErrors == 0;
379}
380
381void MachineVerifier::verifySlotIndexes() const {
382 if (Indexes == nullptr)
383 return;
384
385 // Ensure the IdxMBB list is sorted by slot indexes.
388 E = Indexes->MBBIndexEnd(); I != E; ++I) {
389 assert(!Last.isValid() || I->first > Last);
390 Last = I->first;
391 }
392}
393
394void MachineVerifier::verifyProperties(const MachineFunction &MF) {
395 // If a pass has introduced virtual registers without clearing the
396 // NoVRegs property (or set it without allocating the vregs)
397 // then report an error.
398 if (MF.getProperties().hasProperty(
400 MRI->getNumVirtRegs())
401 report("Function has NoVRegs property but there are VReg operands", &MF);
402}
403
404unsigned MachineVerifier::verify(const MachineFunction &MF) {
405 foundErrors = 0;
406
407 this->MF = &MF;
408 TM = &MF.getTarget();
411 RBI = MF.getSubtarget().getRegBankInfo();
412 MRI = &MF.getRegInfo();
413
414 const bool isFunctionFailedISel = MF.getProperties().hasProperty(
416
417 // If we're mid-GlobalISel and we already triggered the fallback path then
418 // it's expected that the MIR is somewhat broken but that's ok since we'll
419 // reset it and clear the FailedISel attribute in ResetMachineFunctions.
420 if (isFunctionFailedISel)
421 return foundErrors;
422
423 isFunctionRegBankSelected = MF.getProperties().hasProperty(
425 isFunctionSelected = MF.getProperties().hasProperty(
427 isFunctionTracksDebugUserValues = MF.getProperties().hasProperty(
429
430 if (PASS) {
431 LiveInts = PASS->getAnalysisIfAvailable<LiveIntervals>();
432 // We don't want to verify LiveVariables if LiveIntervals is available.
433 if (!LiveInts)
434 LiveVars = PASS->getAnalysisIfAvailable<LiveVariables>();
435 LiveStks = PASS->getAnalysisIfAvailable<LiveStacks>();
436 Indexes = PASS->getAnalysisIfAvailable<SlotIndexes>();
437 }
438
439 verifySlotIndexes();
440
441 verifyProperties(MF);
442
443 visitMachineFunctionBefore();
444 for (const MachineBasicBlock &MBB : MF) {
445 visitMachineBasicBlockBefore(&MBB);
446 // Keep track of the current bundle header.
447 const MachineInstr *CurBundle = nullptr;
448 // Do we expect the next instruction to be part of the same bundle?
449 bool InBundle = false;
450
451 for (const MachineInstr &MI : MBB.instrs()) {
452 if (MI.getParent() != &MBB) {
453 report("Bad instruction parent pointer", &MBB);
454 errs() << "Instruction: " << MI;
455 continue;
456 }
457
458 // Check for consistent bundle flags.
459 if (InBundle && !MI.isBundledWithPred())
460 report("Missing BundledPred flag, "
461 "BundledSucc was set on predecessor",
462 &MI);
463 if (!InBundle && MI.isBundledWithPred())
464 report("BundledPred flag is set, "
465 "but BundledSucc not set on predecessor",
466 &MI);
467
468 // Is this a bundle header?
469 if (!MI.isInsideBundle()) {
470 if (CurBundle)
471 visitMachineBundleAfter(CurBundle);
472 CurBundle = &MI;
473 visitMachineBundleBefore(CurBundle);
474 } else if (!CurBundle)
475 report("No bundle header", &MI);
476 visitMachineInstrBefore(&MI);
477 for (unsigned I = 0, E = MI.getNumOperands(); I != E; ++I) {
478 const MachineOperand &Op = MI.getOperand(I);
479 if (Op.getParent() != &MI) {
480 // Make sure to use correct addOperand / removeOperand / ChangeTo
481 // functions when replacing operands of a MachineInstr.
482 report("Instruction has operand with wrong parent set", &MI);
483 }
484
485 visitMachineOperand(&Op, I);
486 }
487
488 // Was this the last bundled instruction?
489 InBundle = MI.isBundledWithSucc();
490 }
491 if (CurBundle)
492 visitMachineBundleAfter(CurBundle);
493 if (InBundle)
494 report("BundledSucc flag set on last instruction in block", &MBB.back());
495 visitMachineBasicBlockAfter(&MBB);
496 }
497 visitMachineFunctionAfter();
498
499 // Clean up.
500 regsLive.clear();
501 regsDefined.clear();
502 regsDead.clear();
503 regsKilled.clear();
504 regMasks.clear();
505 MBBInfoMap.clear();
506
507 return foundErrors;
508}
509
510void MachineVerifier::report(const char *msg, const MachineFunction *MF) {
511 assert(MF);
512 errs() << '\n';
513 if (!foundErrors++) {
514 if (Banner)
515 errs() << "# " << Banner << '\n';
516 if (LiveInts != nullptr)
517 LiveInts->print(errs());
518 else
519 MF->print(errs(), Indexes);
520 }
521 errs() << "*** Bad machine code: " << msg << " ***\n"
522 << "- function: " << MF->getName() << "\n";
523}
524
525void MachineVerifier::report(const char *msg, const MachineBasicBlock *MBB) {
526 assert(MBB);
527 report(msg, MBB->getParent());
528 errs() << "- basic block: " << printMBBReference(*MBB) << ' '
529 << MBB->getName() << " (" << (const void *)MBB << ')';
530 if (Indexes)
531 errs() << " [" << Indexes->getMBBStartIdx(MBB)
532 << ';' << Indexes->getMBBEndIdx(MBB) << ')';
533 errs() << '\n';
534}
535
536void MachineVerifier::report(const char *msg, const MachineInstr *MI) {
537 assert(MI);
538 report(msg, MI->getParent());
539 errs() << "- instruction: ";
540 if (Indexes && Indexes->hasIndex(*MI))
541 errs() << Indexes->getInstructionIndex(*MI) << '\t';
542 MI->print(errs(), /*IsStandalone=*/true);
543}
544
545void MachineVerifier::report(const char *msg, const MachineOperand *MO,
546 unsigned MONum, LLT MOVRegType) {
547 assert(MO);
548 report(msg, MO->getParent());
549 errs() << "- operand " << MONum << ": ";
550 MO->print(errs(), MOVRegType, TRI);
551 errs() << "\n";
552}
553
554void MachineVerifier::report(const Twine &Msg, const MachineInstr *MI) {
555 report(Msg.str().c_str(), MI);
556}
557
558void MachineVerifier::report_context(SlotIndex Pos) const {
559 errs() << "- at: " << Pos << '\n';
560}
561
562void MachineVerifier::report_context(const LiveInterval &LI) const {
563 errs() << "- interval: " << LI << '\n';
564}
565
566void MachineVerifier::report_context(const LiveRange &LR, Register VRegUnit,
567 LaneBitmask LaneMask) const {
568 report_context_liverange(LR);
569 report_context_vreg_regunit(VRegUnit);
570 if (LaneMask.any())
571 report_context_lanemask(LaneMask);
572}
573
574void MachineVerifier::report_context(const LiveRange::Segment &S) const {
575 errs() << "- segment: " << S << '\n';
576}
577
578void MachineVerifier::report_context(const VNInfo &VNI) const {
579 errs() << "- ValNo: " << VNI.id << " (def " << VNI.def << ")\n";
580}
581
582void MachineVerifier::report_context_liverange(const LiveRange &LR) const {
583 errs() << "- liverange: " << LR << '\n';
584}
585
586void MachineVerifier::report_context(MCPhysReg PReg) const {
587 errs() << "- p. register: " << printReg(PReg, TRI) << '\n';
588}
589
590void MachineVerifier::report_context_vreg(Register VReg) const {
591 errs() << "- v. register: " << printReg(VReg, TRI) << '\n';
592}
593
594void MachineVerifier::report_context_vreg_regunit(Register VRegOrUnit) const {
595 if (VRegOrUnit.isVirtual()) {
596 report_context_vreg(VRegOrUnit);
597 } else {
598 errs() << "- regunit: " << printRegUnit(VRegOrUnit, TRI) << '\n';
599 }
600}
601
602void MachineVerifier::report_context_lanemask(LaneBitmask LaneMask) const {
603 errs() << "- lanemask: " << PrintLaneMask(LaneMask) << '\n';
604}
605
606void MachineVerifier::markReachable(const MachineBasicBlock *MBB) {
607 BBInfo &MInfo = MBBInfoMap[MBB];
608 if (!MInfo.reachable) {
609 MInfo.reachable = true;
610 for (const MachineBasicBlock *Succ : MBB->successors())
611 markReachable(Succ);
612 }
613}
614
615void MachineVerifier::visitMachineFunctionBefore() {
616 lastIndex = SlotIndex();
617 regsReserved = MRI->reservedRegsFrozen() ? MRI->getReservedRegs()
618 : TRI->getReservedRegs(*MF);
619
620 if (!MF->empty())
621 markReachable(&MF->front());
622
623 // Build a set of the basic blocks in the function.
624 FunctionBlocks.clear();
625 for (const auto &MBB : *MF) {
626 FunctionBlocks.insert(&MBB);
627 BBInfo &MInfo = MBBInfoMap[&MBB];
628
629 MInfo.Preds.insert(MBB.pred_begin(), MBB.pred_end());
630 if (MInfo.Preds.size() != MBB.pred_size())
631 report("MBB has duplicate entries in its predecessor list.", &MBB);
632
633 MInfo.Succs.insert(MBB.succ_begin(), MBB.succ_end());
634 if (MInfo.Succs.size() != MBB.succ_size())
635 report("MBB has duplicate entries in its successor list.", &MBB);
636 }
637
638 // Check that the register use lists are sane.
639 MRI->verifyUseLists();
640
641 if (!MF->empty())
642 verifyStackFrame();
643}
644
645void
646MachineVerifier::visitMachineBasicBlockBefore(const MachineBasicBlock *MBB) {
647 FirstTerminator = nullptr;
648 FirstNonPHI = nullptr;
649
650 if (!MF->getProperties().hasProperty(
651 MachineFunctionProperties::Property::NoPHIs) && MRI->tracksLiveness()) {
652 // If this block has allocatable physical registers live-in, check that
653 // it is an entry block or landing pad.
654 for (const auto &LI : MBB->liveins()) {
655 if (isAllocatable(LI.PhysReg) && !MBB->isEHPad() &&
656 MBB->getIterator() != MBB->getParent()->begin() &&
658 report("MBB has allocatable live-in, but isn't entry, landing-pad, or "
659 "inlineasm-br-indirect-target.",
660 MBB);
661 report_context(LI.PhysReg);
662 }
663 }
664 }
665
666 if (MBB->isIRBlockAddressTaken()) {
668 report("ir-block-address-taken is associated with basic block not used by "
669 "a blockaddress.",
670 MBB);
671 }
672
673 // Count the number of landing pad successors.
675 for (const auto *succ : MBB->successors()) {
676 if (succ->isEHPad())
677 LandingPadSuccs.insert(succ);
678 if (!FunctionBlocks.count(succ))
679 report("MBB has successor that isn't part of the function.", MBB);
680 if (!MBBInfoMap[succ].Preds.count(MBB)) {
681 report("Inconsistent CFG", MBB);
682 errs() << "MBB is not in the predecessor list of the successor "
683 << printMBBReference(*succ) << ".\n";
684 }
685 }
686
687 // Check the predecessor list.
688 for (const MachineBasicBlock *Pred : MBB->predecessors()) {
689 if (!FunctionBlocks.count(Pred))
690 report("MBB has predecessor that isn't part of the function.", MBB);
691 if (!MBBInfoMap[Pred].Succs.count(MBB)) {
692 report("Inconsistent CFG", MBB);
693 errs() << "MBB is not in the successor list of the predecessor "
694 << printMBBReference(*Pred) << ".\n";
695 }
696 }
697
698 const MCAsmInfo *AsmInfo = TM->getMCAsmInfo();
699 const BasicBlock *BB = MBB->getBasicBlock();
700 const Function &F = MF->getFunction();
701 if (LandingPadSuccs.size() > 1 &&
702 !(AsmInfo &&
704 BB && isa<SwitchInst>(BB->getTerminator())) &&
705 !isScopedEHPersonality(classifyEHPersonality(F.getPersonalityFn())))
706 report("MBB has more than one landing pad successor", MBB);
707
708 // Call analyzeBranch. If it succeeds, there several more conditions to check.
709 MachineBasicBlock *TBB = nullptr, *FBB = nullptr;
711 if (!TII->analyzeBranch(*const_cast<MachineBasicBlock *>(MBB), TBB, FBB,
712 Cond)) {
713 // Ok, analyzeBranch thinks it knows what's going on with this block. Let's
714 // check whether its answers match up with reality.
715 if (!TBB && !FBB) {
716 // Block falls through to its successor.
717 if (!MBB->empty() && MBB->back().isBarrier() &&
718 !TII->isPredicated(MBB->back())) {
719 report("MBB exits via unconditional fall-through but ends with a "
720 "barrier instruction!", MBB);
721 }
722 if (!Cond.empty()) {
723 report("MBB exits via unconditional fall-through but has a condition!",
724 MBB);
725 }
726 } else if (TBB && !FBB && Cond.empty()) {
727 // Block unconditionally branches somewhere.
728 if (MBB->empty()) {
729 report("MBB exits via unconditional branch but doesn't contain "
730 "any instructions!", MBB);
731 } else if (!MBB->back().isBarrier()) {
732 report("MBB exits via unconditional branch but doesn't end with a "
733 "barrier instruction!", MBB);
734 } else if (!MBB->back().isTerminator()) {
735 report("MBB exits via unconditional branch but the branch isn't a "
736 "terminator instruction!", MBB);
737 }
738 } else if (TBB && !FBB && !Cond.empty()) {
739 // Block conditionally branches somewhere, otherwise falls through.
740 if (MBB->empty()) {
741 report("MBB exits via conditional branch/fall-through but doesn't "
742 "contain any instructions!", MBB);
743 } else if (MBB->back().isBarrier()) {
744 report("MBB exits via conditional branch/fall-through but ends with a "
745 "barrier instruction!", MBB);
746 } else if (!MBB->back().isTerminator()) {
747 report("MBB exits via conditional branch/fall-through but the branch "
748 "isn't a terminator instruction!", MBB);
749 }
750 } else if (TBB && FBB) {
751 // Block conditionally branches somewhere, otherwise branches
752 // somewhere else.
753 if (MBB->empty()) {
754 report("MBB exits via conditional branch/branch but doesn't "
755 "contain any instructions!", MBB);
756 } else if (!MBB->back().isBarrier()) {
757 report("MBB exits via conditional branch/branch but doesn't end with a "
758 "barrier instruction!", MBB);
759 } else if (!MBB->back().isTerminator()) {
760 report("MBB exits via conditional branch/branch but the branch "
761 "isn't a terminator instruction!", MBB);
762 }
763 if (Cond.empty()) {
764 report("MBB exits via conditional branch/branch but there's no "
765 "condition!", MBB);
766 }
767 } else {
768 report("analyzeBranch returned invalid data!", MBB);
769 }
770
771 // Now check that the successors match up with the answers reported by
772 // analyzeBranch.
773 if (TBB && !MBB->isSuccessor(TBB))
774 report("MBB exits via jump or conditional branch, but its target isn't a "
775 "CFG successor!",
776 MBB);
777 if (FBB && !MBB->isSuccessor(FBB))
778 report("MBB exits via conditional branch, but its target isn't a CFG "
779 "successor!",
780 MBB);
781
782 // There might be a fallthrough to the next block if there's either no
783 // unconditional true branch, or if there's a condition, and one of the
784 // branches is missing.
785 bool Fallthrough = !TBB || (!Cond.empty() && !FBB);
786
787 // A conditional fallthrough must be an actual CFG successor, not
788 // unreachable. (Conversely, an unconditional fallthrough might not really
789 // be a successor, because the block might end in unreachable.)
790 if (!Cond.empty() && !FBB) {
792 if (MBBI == MF->end()) {
793 report("MBB conditionally falls through out of function!", MBB);
794 } else if (!MBB->isSuccessor(&*MBBI))
795 report("MBB exits via conditional branch/fall-through but the CFG "
796 "successors don't match the actual successors!",
797 MBB);
798 }
799
800 // Verify that there aren't any extra un-accounted-for successors.
801 for (const MachineBasicBlock *SuccMBB : MBB->successors()) {
802 // If this successor is one of the branch targets, it's okay.
803 if (SuccMBB == TBB || SuccMBB == FBB)
804 continue;
805 // If we might have a fallthrough, and the successor is the fallthrough
806 // block, that's also ok.
807 if (Fallthrough && SuccMBB == MBB->getNextNode())
808 continue;
809 // Also accept successors which are for exception-handling or might be
810 // inlineasm_br targets.
811 if (SuccMBB->isEHPad() || SuccMBB->isInlineAsmBrIndirectTarget())
812 continue;
813 report("MBB has unexpected successors which are not branch targets, "
814 "fallthrough, EHPads, or inlineasm_br targets.",
815 MBB);
816 }
817 }
818
819 regsLive.clear();
820 if (MRI->tracksLiveness()) {
821 for (const auto &LI : MBB->liveins()) {
822 if (!Register::isPhysicalRegister(LI.PhysReg)) {
823 report("MBB live-in list contains non-physical register", MBB);
824 continue;
825 }
826 for (const MCPhysReg &SubReg : TRI->subregs_inclusive(LI.PhysReg))
827 regsLive.insert(SubReg);
828 }
829 }
830
831 const MachineFrameInfo &MFI = MF->getFrameInfo();
832 BitVector PR = MFI.getPristineRegs(*MF);
833 for (unsigned I : PR.set_bits()) {
834 for (const MCPhysReg &SubReg : TRI->subregs_inclusive(I))
835 regsLive.insert(SubReg);
836 }
837
838 regsKilled.clear();
839 regsDefined.clear();
840
841 if (Indexes)
842 lastIndex = Indexes->getMBBStartIdx(MBB);
843}
844
845// This function gets called for all bundle headers, including normal
846// stand-alone unbundled instructions.
847void MachineVerifier::visitMachineBundleBefore(const MachineInstr *MI) {
848 if (Indexes && Indexes->hasIndex(*MI)) {
849 SlotIndex idx = Indexes->getInstructionIndex(*MI);
850 if (!(idx > lastIndex)) {
851 report("Instruction index out of order", MI);
852 errs() << "Last instruction was at " << lastIndex << '\n';
853 }
854 lastIndex = idx;
855 }
856
857 // Ensure non-terminators don't follow terminators.
858 if (MI->isTerminator()) {
859 if (!FirstTerminator)
860 FirstTerminator = MI;
861 } else if (FirstTerminator) {
862 // For GlobalISel, G_INVOKE_REGION_START is a terminator that we allow to
863 // precede non-terminators.
864 if (FirstTerminator->getOpcode() != TargetOpcode::G_INVOKE_REGION_START) {
865 report("Non-terminator instruction after the first terminator", MI);
866 errs() << "First terminator was:\t" << *FirstTerminator;
867 }
868 }
869}
870
871// The operands on an INLINEASM instruction must follow a template.
872// Verify that the flag operands make sense.
873void MachineVerifier::verifyInlineAsm(const MachineInstr *MI) {
874 // The first two operands on INLINEASM are the asm string and global flags.
875 if (MI->getNumOperands() < 2) {
876 report("Too few operands on inline asm", MI);
877 return;
878 }
879 if (!MI->getOperand(0).isSymbol())
880 report("Asm string must be an external symbol", MI);
881 if (!MI->getOperand(1).isImm())
882 report("Asm flags must be an immediate", MI);
883 // Allowed flags are Extra_HasSideEffects = 1, Extra_IsAlignStack = 2,
884 // Extra_AsmDialect = 4, Extra_MayLoad = 8, and Extra_MayStore = 16,
885 // and Extra_IsConvergent = 32.
886 if (!isUInt<6>(MI->getOperand(1).getImm()))
887 report("Unknown asm flags", &MI->getOperand(1), 1);
888
889 static_assert(InlineAsm::MIOp_FirstOperand == 2, "Asm format changed");
890
891 unsigned OpNo = InlineAsm::MIOp_FirstOperand;
892 unsigned NumOps;
893 for (unsigned e = MI->getNumOperands(); OpNo < e; OpNo += NumOps) {
894 const MachineOperand &MO = MI->getOperand(OpNo);
895 // There may be implicit ops after the fixed operands.
896 if (!MO.isImm())
897 break;
898 const InlineAsm::Flag F(MO.getImm());
899 NumOps = 1 + F.getNumOperandRegisters();
900 }
901
902 if (OpNo > MI->getNumOperands())
903 report("Missing operands in last group", MI);
904
905 // An optional MDNode follows the groups.
906 if (OpNo < MI->getNumOperands() && MI->getOperand(OpNo).isMetadata())
907 ++OpNo;
908
909 // All trailing operands must be implicit registers.
910 for (unsigned e = MI->getNumOperands(); OpNo < e; ++OpNo) {
911 const MachineOperand &MO = MI->getOperand(OpNo);
912 if (!MO.isReg() || !MO.isImplicit())
913 report("Expected implicit register after groups", &MO, OpNo);
914 }
915
916 if (MI->getOpcode() == TargetOpcode::INLINEASM_BR) {
917 const MachineBasicBlock *MBB = MI->getParent();
918
919 for (unsigned i = InlineAsm::MIOp_FirstOperand, e = MI->getNumOperands();
920 i != e; ++i) {
921 const MachineOperand &MO = MI->getOperand(i);
922
923 if (!MO.isMBB())
924 continue;
925
926 // Check the successor & predecessor lists look ok, assume they are
927 // not. Find the indirect target without going through the successors.
928 const MachineBasicBlock *IndirectTargetMBB = MO.getMBB();
929 if (!IndirectTargetMBB) {
930 report("INLINEASM_BR indirect target does not exist", &MO, i);
931 break;
932 }
933
934 if (!MBB->isSuccessor(IndirectTargetMBB))
935 report("INLINEASM_BR indirect target missing from successor list", &MO,
936 i);
937
938 if (!IndirectTargetMBB->isPredecessor(MBB))
939 report("INLINEASM_BR indirect target predecessor list missing parent",
940 &MO, i);
941 }
942 }
943}
944
945bool MachineVerifier::verifyAllRegOpsScalar(const MachineInstr &MI,
946 const MachineRegisterInfo &MRI) {
947 if (none_of(MI.explicit_operands(), [&MRI](const MachineOperand &Op) {
948 if (!Op.isReg())
949 return false;
950 const auto Reg = Op.getReg();
951 if (Reg.isPhysical())
952 return false;
953 return !MRI.getType(Reg).isScalar();
954 }))
955 return true;
956 report("All register operands must have scalar types", &MI);
957 return false;
958}
959
960/// Check that types are consistent when two operands need to have the same
961/// number of vector elements.
962/// \return true if the types are valid.
963bool MachineVerifier::verifyVectorElementMatch(LLT Ty0, LLT Ty1,
964 const MachineInstr *MI) {
965 if (Ty0.isVector() != Ty1.isVector()) {
966 report("operand types must be all-vector or all-scalar", MI);
967 // Generally we try to report as many issues as possible at once, but in
968 // this case it's not clear what should we be comparing the size of the
969 // scalar with: the size of the whole vector or its lane. Instead of
970 // making an arbitrary choice and emitting not so helpful message, let's
971 // avoid the extra noise and stop here.
972 return false;
973 }
974
975 if (Ty0.isVector() && Ty0.getElementCount() != Ty1.getElementCount()) {
976 report("operand types must preserve number of vector elements", MI);
977 return false;
978 }
979
980 return true;
981}
982
983bool MachineVerifier::verifyGIntrinsicSideEffects(const MachineInstr *MI) {
984 auto Opcode = MI->getOpcode();
985 bool NoSideEffects = Opcode == TargetOpcode::G_INTRINSIC ||
986 Opcode == TargetOpcode::G_INTRINSIC_CONVERGENT;
987 unsigned IntrID = cast<GIntrinsic>(MI)->getIntrinsicID();
988 if (IntrID != 0 && IntrID < Intrinsic::num_intrinsics) {
990 MF->getFunction().getContext(), static_cast<Intrinsic::ID>(IntrID));
991 bool DeclHasSideEffects = !Attrs.getMemoryEffects().doesNotAccessMemory();
992 if (NoSideEffects && DeclHasSideEffects) {
993 report(Twine(TII->getName(Opcode),
994 " used with intrinsic that accesses memory"),
995 MI);
996 return false;
997 }
998 if (!NoSideEffects && !DeclHasSideEffects) {
999 report(Twine(TII->getName(Opcode), " used with readnone intrinsic"), MI);
1000 return false;
1001 }
1002 }
1003
1004 return true;
1005}
1006
1007bool MachineVerifier::verifyGIntrinsicConvergence(const MachineInstr *MI) {
1008 auto Opcode = MI->getOpcode();
1009 bool NotConvergent = Opcode == TargetOpcode::G_INTRINSIC ||
1010 Opcode == TargetOpcode::G_INTRINSIC_W_SIDE_EFFECTS;
1011 unsigned IntrID = cast<GIntrinsic>(MI)->getIntrinsicID();
1012 if (IntrID != 0 && IntrID < Intrinsic::num_intrinsics) {
1014 MF->getFunction().getContext(), static_cast<Intrinsic::ID>(IntrID));
1015 bool DeclIsConvergent = Attrs.hasFnAttr(Attribute::Convergent);
1016 if (NotConvergent && DeclIsConvergent) {
1017 report(Twine(TII->getName(Opcode), " used with a convergent intrinsic"),
1018 MI);
1019 return false;
1020 }
1021 if (!NotConvergent && !DeclIsConvergent) {
1022 report(
1023 Twine(TII->getName(Opcode), " used with a non-convergent intrinsic"),
1024 MI);
1025 return false;
1026 }
1027 }
1028
1029 return true;
1030}
1031
1032void MachineVerifier::verifyPreISelGenericInstruction(const MachineInstr *MI) {
1033 if (isFunctionSelected)
1034 report("Unexpected generic instruction in a Selected function", MI);
1035
1036 const MCInstrDesc &MCID = MI->getDesc();
1037 unsigned NumOps = MI->getNumOperands();
1038
1039 // Branches must reference a basic block if they are not indirect
1040 if (MI->isBranch() && !MI->isIndirectBranch()) {
1041 bool HasMBB = false;
1042 for (const MachineOperand &Op : MI->operands()) {
1043 if (Op.isMBB()) {
1044 HasMBB = true;
1045 break;
1046 }
1047 }
1048
1049 if (!HasMBB) {
1050 report("Branch instruction is missing a basic block operand or "
1051 "isIndirectBranch property",
1052 MI);
1053 }
1054 }
1055
1056 // Check types.
1058 for (unsigned I = 0, E = std::min(MCID.getNumOperands(), NumOps);
1059 I != E; ++I) {
1060 if (!MCID.operands()[I].isGenericType())
1061 continue;
1062 // Generic instructions specify type equality constraints between some of
1063 // their operands. Make sure these are consistent.
1064 size_t TypeIdx = MCID.operands()[I].getGenericTypeIndex();
1065 Types.resize(std::max(TypeIdx + 1, Types.size()));
1066
1067 const MachineOperand *MO = &MI->getOperand(I);
1068 if (!MO->isReg()) {
1069 report("generic instruction must use register operands", MI);
1070 continue;
1071 }
1072
1073 LLT OpTy = MRI->getType(MO->getReg());
1074 // Don't report a type mismatch if there is no actual mismatch, only a
1075 // type missing, to reduce noise:
1076 if (OpTy.isValid()) {
1077 // Only the first valid type for a type index will be printed: don't
1078 // overwrite it later so it's always clear which type was expected:
1079 if (!Types[TypeIdx].isValid())
1080 Types[TypeIdx] = OpTy;
1081 else if (Types[TypeIdx] != OpTy)
1082 report("Type mismatch in generic instruction", MO, I, OpTy);
1083 } else {
1084 // Generic instructions must have types attached to their operands.
1085 report("Generic instruction is missing a virtual register type", MO, I);
1086 }
1087 }
1088
1089 // Generic opcodes must not have physical register operands.
1090 for (unsigned I = 0; I < MI->getNumOperands(); ++I) {
1091 const MachineOperand *MO = &MI->getOperand(I);
1092 if (MO->isReg() && MO->getReg().isPhysical())
1093 report("Generic instruction cannot have physical register", MO, I);
1094 }
1095
1096 // Avoid out of bounds in checks below. This was already reported earlier.
1097 if (MI->getNumOperands() < MCID.getNumOperands())
1098 return;
1099
1101 if (!TII->verifyInstruction(*MI, ErrorInfo))
1102 report(ErrorInfo.data(), MI);
1103
1104 // Verify properties of various specific instruction types
1105 unsigned Opc = MI->getOpcode();
1106 switch (Opc) {
1107 case TargetOpcode::G_ASSERT_SEXT:
1108 case TargetOpcode::G_ASSERT_ZEXT: {
1109 std::string OpcName =
1110 Opc == TargetOpcode::G_ASSERT_ZEXT ? "G_ASSERT_ZEXT" : "G_ASSERT_SEXT";
1111 if (!MI->getOperand(2).isImm()) {
1112 report(Twine(OpcName, " expects an immediate operand #2"), MI);
1113 break;
1114 }
1115
1116 Register Dst = MI->getOperand(0).getReg();
1117 Register Src = MI->getOperand(1).getReg();
1118 LLT SrcTy = MRI->getType(Src);
1119 int64_t Imm = MI->getOperand(2).getImm();
1120 if (Imm <= 0) {
1121 report(Twine(OpcName, " size must be >= 1"), MI);
1122 break;
1123 }
1124
1125 if (Imm >= SrcTy.getScalarSizeInBits()) {
1126 report(Twine(OpcName, " size must be less than source bit width"), MI);
1127 break;
1128 }
1129
1130 const RegisterBank *SrcRB = RBI->getRegBank(Src, *MRI, *TRI);
1131 const RegisterBank *DstRB = RBI->getRegBank(Dst, *MRI, *TRI);
1132
1133 // Allow only the source bank to be set.
1134 if ((SrcRB && DstRB && SrcRB != DstRB) || (DstRB && !SrcRB)) {
1135 report(Twine(OpcName, " cannot change register bank"), MI);
1136 break;
1137 }
1138
1139 // Don't allow a class change. Do allow member class->regbank.
1140 const TargetRegisterClass *DstRC = MRI->getRegClassOrNull(Dst);
1141 if (DstRC && DstRC != MRI->getRegClassOrNull(Src)) {
1142 report(
1143 Twine(OpcName, " source and destination register classes must match"),
1144 MI);
1145 break;
1146 }
1147
1148 break;
1149 }
1150
1151 case TargetOpcode::G_CONSTANT:
1152 case TargetOpcode::G_FCONSTANT: {
1153 LLT DstTy = MRI->getType(MI->getOperand(0).getReg());
1154 if (DstTy.isVector())
1155 report("Instruction cannot use a vector result type", MI);
1156
1157 if (MI->getOpcode() == TargetOpcode::G_CONSTANT) {
1158 if (!MI->getOperand(1).isCImm()) {
1159 report("G_CONSTANT operand must be cimm", MI);
1160 break;
1161 }
1162
1163 const ConstantInt *CI = MI->getOperand(1).getCImm();
1164 if (CI->getBitWidth() != DstTy.getSizeInBits())
1165 report("inconsistent constant size", MI);
1166 } else {
1167 if (!MI->getOperand(1).isFPImm()) {
1168 report("G_FCONSTANT operand must be fpimm", MI);
1169 break;
1170 }
1171 const ConstantFP *CF = MI->getOperand(1).getFPImm();
1172
1174 DstTy.getSizeInBits()) {
1175 report("inconsistent constant size", MI);
1176 }
1177 }
1178
1179 break;
1180 }
1181 case TargetOpcode::G_LOAD:
1182 case TargetOpcode::G_STORE:
1183 case TargetOpcode::G_ZEXTLOAD:
1184 case TargetOpcode::G_SEXTLOAD: {
1185 LLT ValTy = MRI->getType(MI->getOperand(0).getReg());
1186 LLT PtrTy = MRI->getType(MI->getOperand(1).getReg());
1187 if (!PtrTy.isPointer())
1188 report("Generic memory instruction must access a pointer", MI);
1189
1190 // Generic loads and stores must have a single MachineMemOperand
1191 // describing that access.
1192 if (!MI->hasOneMemOperand()) {
1193 report("Generic instruction accessing memory must have one mem operand",
1194 MI);
1195 } else {
1196 const MachineMemOperand &MMO = **MI->memoperands_begin();
1197 if (MI->getOpcode() == TargetOpcode::G_ZEXTLOAD ||
1198 MI->getOpcode() == TargetOpcode::G_SEXTLOAD) {
1200 ValTy.getSizeInBits()))
1201 report("Generic extload must have a narrower memory type", MI);
1202 } else if (MI->getOpcode() == TargetOpcode::G_LOAD) {
1204 ValTy.getSizeInBytes()))
1205 report("load memory size cannot exceed result size", MI);
1206 } else if (MI->getOpcode() == TargetOpcode::G_STORE) {
1208 MMO.getSize().getValue()))
1209 report("store memory size cannot exceed value size", MI);
1210 }
1211
1212 const AtomicOrdering Order = MMO.getSuccessOrdering();
1213 if (Opc == TargetOpcode::G_STORE) {
1214 if (Order == AtomicOrdering::Acquire ||
1216 report("atomic store cannot use acquire ordering", MI);
1217
1218 } else {
1219 if (Order == AtomicOrdering::Release ||
1221 report("atomic load cannot use release ordering", MI);
1222 }
1223 }
1224
1225 break;
1226 }
1227 case TargetOpcode::G_PHI: {
1228 LLT DstTy = MRI->getType(MI->getOperand(0).getReg());
1229 if (!DstTy.isValid() || !all_of(drop_begin(MI->operands()),
1230 [this, &DstTy](const MachineOperand &MO) {
1231 if (!MO.isReg())
1232 return true;
1233 LLT Ty = MRI->getType(MO.getReg());
1234 if (!Ty.isValid() || (Ty != DstTy))
1235 return false;
1236 return true;
1237 }))
1238 report("Generic Instruction G_PHI has operands with incompatible/missing "
1239 "types",
1240 MI);
1241 break;
1242 }
1243 case TargetOpcode::G_BITCAST: {
1244 LLT DstTy = MRI->getType(MI->getOperand(0).getReg());
1245 LLT SrcTy = MRI->getType(MI->getOperand(1).getReg());
1246 if (!DstTy.isValid() || !SrcTy.isValid())
1247 break;
1248
1249 if (SrcTy.isPointer() != DstTy.isPointer())
1250 report("bitcast cannot convert between pointers and other types", MI);
1251
1252 if (SrcTy.getSizeInBits() != DstTy.getSizeInBits())
1253 report("bitcast sizes must match", MI);
1254
1255 if (SrcTy == DstTy)
1256 report("bitcast must change the type", MI);
1257
1258 break;
1259 }
1260 case TargetOpcode::G_INTTOPTR:
1261 case TargetOpcode::G_PTRTOINT:
1262 case TargetOpcode::G_ADDRSPACE_CAST: {
1263 LLT DstTy = MRI->getType(MI->getOperand(0).getReg());
1264 LLT SrcTy = MRI->getType(MI->getOperand(1).getReg());
1265 if (!DstTy.isValid() || !SrcTy.isValid())
1266 break;
1267
1268 verifyVectorElementMatch(DstTy, SrcTy, MI);
1269
1270 DstTy = DstTy.getScalarType();
1271 SrcTy = SrcTy.getScalarType();
1272
1273 if (MI->getOpcode() == TargetOpcode::G_INTTOPTR) {
1274 if (!DstTy.isPointer())
1275 report("inttoptr result type must be a pointer", MI);
1276 if (SrcTy.isPointer())
1277 report("inttoptr source type must not be a pointer", MI);
1278 } else if (MI->getOpcode() == TargetOpcode::G_PTRTOINT) {
1279 if (!SrcTy.isPointer())
1280 report("ptrtoint source type must be a pointer", MI);
1281 if (DstTy.isPointer())
1282 report("ptrtoint result type must not be a pointer", MI);
1283 } else {
1284 assert(MI->getOpcode() == TargetOpcode::G_ADDRSPACE_CAST);
1285 if (!SrcTy.isPointer() || !DstTy.isPointer())
1286 report("addrspacecast types must be pointers", MI);
1287 else {
1288 if (SrcTy.getAddressSpace() == DstTy.getAddressSpace())
1289 report("addrspacecast must convert different address spaces", MI);
1290 }
1291 }
1292
1293 break;
1294 }
1295 case TargetOpcode::G_PTR_ADD: {
1296 LLT DstTy = MRI->getType(MI->getOperand(0).getReg());
1297 LLT PtrTy = MRI->getType(MI->getOperand(1).getReg());
1298 LLT OffsetTy = MRI->getType(MI->getOperand(2).getReg());
1299 if (!DstTy.isValid() || !PtrTy.isValid() || !OffsetTy.isValid())
1300 break;
1301
1302 if (!PtrTy.isPointerOrPointerVector())
1303 report("gep first operand must be a pointer", MI);
1304
1305 if (OffsetTy.isPointerOrPointerVector())
1306 report("gep offset operand must not be a pointer", MI);
1307
1308 if (PtrTy.isPointerOrPointerVector()) {
1309 const DataLayout &DL = MF->getDataLayout();
1310 unsigned AS = PtrTy.getAddressSpace();
1311 unsigned IndexSizeInBits = DL.getIndexSize(AS) * 8;
1312 if (OffsetTy.getScalarSizeInBits() != IndexSizeInBits) {
1313 report("gep offset operand must match index size for address space",
1314 MI);
1315 }
1316 }
1317
1318 // TODO: Is the offset allowed to be a scalar with a vector?
1319 break;
1320 }
1321 case TargetOpcode::G_PTRMASK: {
1322 LLT DstTy = MRI->getType(MI->getOperand(0).getReg());
1323 LLT SrcTy = MRI->getType(MI->getOperand(1).getReg());
1324 LLT MaskTy = MRI->getType(MI->getOperand(2).getReg());
1325 if (!DstTy.isValid() || !SrcTy.isValid() || !MaskTy.isValid())
1326 break;
1327
1328 if (!DstTy.isPointerOrPointerVector())
1329 report("ptrmask result type must be a pointer", MI);
1330
1331 if (!MaskTy.getScalarType().isScalar())
1332 report("ptrmask mask type must be an integer", MI);
1333
1334 verifyVectorElementMatch(DstTy, MaskTy, MI);
1335 break;
1336 }
1337 case TargetOpcode::G_SEXT:
1338 case TargetOpcode::G_ZEXT:
1339 case TargetOpcode::G_ANYEXT:
1340 case TargetOpcode::G_TRUNC:
1341 case TargetOpcode::G_FPEXT:
1342 case TargetOpcode::G_FPTRUNC: {
1343 // Number of operands and presense of types is already checked (and
1344 // reported in case of any issues), so no need to report them again. As
1345 // we're trying to report as many issues as possible at once, however, the
1346 // instructions aren't guaranteed to have the right number of operands or
1347 // types attached to them at this point
1348 assert(MCID.getNumOperands() == 2 && "Expected 2 operands G_*{EXT,TRUNC}");
1349 LLT DstTy = MRI->getType(MI->getOperand(0).getReg());
1350 LLT SrcTy = MRI->getType(MI->getOperand(1).getReg());
1351 if (!DstTy.isValid() || !SrcTy.isValid())
1352 break;
1353
1355 report("Generic extend/truncate can not operate on pointers", MI);
1356
1357 verifyVectorElementMatch(DstTy, SrcTy, MI);
1358
1359 unsigned DstSize = DstTy.getScalarSizeInBits();
1360 unsigned SrcSize = SrcTy.getScalarSizeInBits();
1361 switch (MI->getOpcode()) {
1362 default:
1363 if (DstSize <= SrcSize)
1364 report("Generic extend has destination type no larger than source", MI);
1365 break;
1366 case TargetOpcode::G_TRUNC:
1367 case TargetOpcode::G_FPTRUNC:
1368 if (DstSize >= SrcSize)
1369 report("Generic truncate has destination type no smaller than source",
1370 MI);
1371 break;
1372 }
1373 break;
1374 }
1375 case TargetOpcode::G_SELECT: {
1376 LLT SelTy = MRI->getType(MI->getOperand(0).getReg());
1377 LLT CondTy = MRI->getType(MI->getOperand(1).getReg());
1378 if (!SelTy.isValid() || !CondTy.isValid())
1379 break;
1380
1381 // Scalar condition select on a vector is valid.
1382 if (CondTy.isVector())
1383 verifyVectorElementMatch(SelTy, CondTy, MI);
1384 break;
1385 }
1386 case TargetOpcode::G_MERGE_VALUES: {
1387 // G_MERGE_VALUES should only be used to merge scalars into a larger scalar,
1388 // e.g. s2N = MERGE sN, sN
1389 // Merging multiple scalars into a vector is not allowed, should use
1390 // G_BUILD_VECTOR for that.
1391 LLT DstTy = MRI->getType(MI->getOperand(0).getReg());
1392 LLT SrcTy = MRI->getType(MI->getOperand(1).getReg());
1393 if (DstTy.isVector() || SrcTy.isVector())
1394 report("G_MERGE_VALUES cannot operate on vectors", MI);
1395
1396 const unsigned NumOps = MI->getNumOperands();
1397 if (DstTy.getSizeInBits() != SrcTy.getSizeInBits() * (NumOps - 1))
1398 report("G_MERGE_VALUES result size is inconsistent", MI);
1399
1400 for (unsigned I = 2; I != NumOps; ++I) {
1401 if (MRI->getType(MI->getOperand(I).getReg()) != SrcTy)
1402 report("G_MERGE_VALUES source types do not match", MI);
1403 }
1404
1405 break;
1406 }
1407 case TargetOpcode::G_UNMERGE_VALUES: {
1408 unsigned NumDsts = MI->getNumOperands() - 1;
1409 LLT DstTy = MRI->getType(MI->getOperand(0).getReg());
1410 for (unsigned i = 1; i < NumDsts; ++i) {
1411 if (MRI->getType(MI->getOperand(i).getReg()) != DstTy) {
1412 report("G_UNMERGE_VALUES destination types do not match", MI);
1413 break;
1414 }
1415 }
1416
1417 LLT SrcTy = MRI->getType(MI->getOperand(NumDsts).getReg());
1418 if (DstTy.isVector()) {
1419 // This case is the converse of G_CONCAT_VECTORS.
1420 if (!SrcTy.isVector() || SrcTy.getScalarType() != DstTy.getScalarType() ||
1421 SrcTy.isScalableVector() != DstTy.isScalableVector() ||
1422 SrcTy.getSizeInBits() != NumDsts * DstTy.getSizeInBits())
1423 report("G_UNMERGE_VALUES source operand does not match vector "
1424 "destination operands",
1425 MI);
1426 } else if (SrcTy.isVector()) {
1427 // This case is the converse of G_BUILD_VECTOR, but relaxed to allow
1428 // mismatched types as long as the total size matches:
1429 // %0:_(s64), %1:_(s64) = G_UNMERGE_VALUES %2:_(<4 x s32>)
1430 if (SrcTy.getSizeInBits() != NumDsts * DstTy.getSizeInBits())
1431 report("G_UNMERGE_VALUES vector source operand does not match scalar "
1432 "destination operands",
1433 MI);
1434 } else {
1435 // This case is the converse of G_MERGE_VALUES.
1436 if (SrcTy.getSizeInBits() != NumDsts * DstTy.getSizeInBits()) {
1437 report("G_UNMERGE_VALUES scalar source operand does not match scalar "
1438 "destination operands",
1439 MI);
1440 }
1441 }
1442 break;
1443 }
1444 case TargetOpcode::G_BUILD_VECTOR: {
1445 // Source types must be scalars, dest type a vector. Total size of scalars
1446 // must match the dest vector size.
1447 LLT DstTy = MRI->getType(MI->getOperand(0).getReg());
1448 LLT SrcEltTy = MRI->getType(MI->getOperand(1).getReg());
1449 if (!DstTy.isVector() || SrcEltTy.isVector()) {
1450 report("G_BUILD_VECTOR must produce a vector from scalar operands", MI);
1451 break;
1452 }
1453
1454 if (DstTy.getElementType() != SrcEltTy)
1455 report("G_BUILD_VECTOR result element type must match source type", MI);
1456
1457 if (DstTy.getNumElements() != MI->getNumOperands() - 1)
1458 report("G_BUILD_VECTOR must have an operand for each elemement", MI);
1459
1460 for (const MachineOperand &MO : llvm::drop_begin(MI->operands(), 2))
1461 if (MRI->getType(MI->getOperand(1).getReg()) != MRI->getType(MO.getReg()))
1462 report("G_BUILD_VECTOR source operand types are not homogeneous", MI);
1463
1464 break;
1465 }
1466 case TargetOpcode::G_BUILD_VECTOR_TRUNC: {
1467 // Source types must be scalars, dest type a vector. Scalar types must be
1468 // larger than the dest vector elt type, as this is a truncating operation.
1469 LLT DstTy = MRI->getType(MI->getOperand(0).getReg());
1470 LLT SrcEltTy = MRI->getType(MI->getOperand(1).getReg());
1471 if (!DstTy.isVector() || SrcEltTy.isVector())
1472 report("G_BUILD_VECTOR_TRUNC must produce a vector from scalar operands",
1473 MI);
1474 for (const MachineOperand &MO : llvm::drop_begin(MI->operands(), 2))
1475 if (MRI->getType(MI->getOperand(1).getReg()) != MRI->getType(MO.getReg()))
1476 report("G_BUILD_VECTOR_TRUNC source operand types are not homogeneous",
1477 MI);
1478 if (SrcEltTy.getSizeInBits() <= DstTy.getElementType().getSizeInBits())
1479 report("G_BUILD_VECTOR_TRUNC source operand types are not larger than "
1480 "dest elt type",
1481 MI);
1482 break;
1483 }
1484 case TargetOpcode::G_CONCAT_VECTORS: {
1485 // Source types should be vectors, and total size should match the dest
1486 // vector size.
1487 LLT DstTy = MRI->getType(MI->getOperand(0).getReg());
1488 LLT SrcTy = MRI->getType(MI->getOperand(1).getReg());
1489 if (!DstTy.isVector() || !SrcTy.isVector())
1490 report("G_CONCAT_VECTOR requires vector source and destination operands",
1491 MI);
1492
1493 if (MI->getNumOperands() < 3)
1494 report("G_CONCAT_VECTOR requires at least 2 source operands", MI);
1495
1496 for (const MachineOperand &MO : llvm::drop_begin(MI->operands(), 2))
1497 if (MRI->getType(MI->getOperand(1).getReg()) != MRI->getType(MO.getReg()))
1498 report("G_CONCAT_VECTOR source operand types are not homogeneous", MI);
1499 if (DstTy.getElementCount() !=
1500 SrcTy.getElementCount() * (MI->getNumOperands() - 1))
1501 report("G_CONCAT_VECTOR num dest and source elements should match", MI);
1502 break;
1503 }
1504 case TargetOpcode::G_ICMP:
1505 case TargetOpcode::G_FCMP: {
1506 LLT DstTy = MRI->getType(MI->getOperand(0).getReg());
1507 LLT SrcTy = MRI->getType(MI->getOperand(2).getReg());
1508
1509 if ((DstTy.isVector() != SrcTy.isVector()) ||
1510 (DstTy.isVector() &&
1511 DstTy.getElementCount() != SrcTy.getElementCount()))
1512 report("Generic vector icmp/fcmp must preserve number of lanes", MI);
1513
1514 break;
1515 }
1516 case TargetOpcode::G_EXTRACT: {
1517 const MachineOperand &SrcOp = MI->getOperand(1);
1518 if (!SrcOp.isReg()) {
1519 report("extract source must be a register", MI);
1520 break;
1521 }
1522
1523 const MachineOperand &OffsetOp = MI->getOperand(2);
1524 if (!OffsetOp.isImm()) {
1525 report("extract offset must be a constant", MI);
1526 break;
1527 }
1528
1529 unsigned DstSize = MRI->getType(MI->getOperand(0).getReg()).getSizeInBits();
1530 unsigned SrcSize = MRI->getType(SrcOp.getReg()).getSizeInBits();
1531 if (SrcSize == DstSize)
1532 report("extract source must be larger than result", MI);
1533
1534 if (DstSize + OffsetOp.getImm() > SrcSize)
1535 report("extract reads past end of register", MI);
1536 break;
1537 }
1538 case TargetOpcode::G_INSERT: {
1539 const MachineOperand &SrcOp = MI->getOperand(2);
1540 if (!SrcOp.isReg()) {
1541 report("insert source must be a register", MI);
1542 break;
1543 }
1544
1545 const MachineOperand &OffsetOp = MI->getOperand(3);
1546 if (!OffsetOp.isImm()) {
1547 report("insert offset must be a constant", MI);
1548 break;
1549 }
1550
1551 unsigned DstSize = MRI->getType(MI->getOperand(0).getReg()).getSizeInBits();
1552 unsigned SrcSize = MRI->getType(SrcOp.getReg()).getSizeInBits();
1553
1554 if (DstSize <= SrcSize)
1555 report("inserted size must be smaller than total register", MI);
1556
1557 if (SrcSize + OffsetOp.getImm() > DstSize)
1558 report("insert writes past end of register", MI);
1559
1560 break;
1561 }
1562 case TargetOpcode::G_JUMP_TABLE: {
1563 if (!MI->getOperand(1).isJTI())
1564 report("G_JUMP_TABLE source operand must be a jump table index", MI);
1565 LLT DstTy = MRI->getType(MI->getOperand(0).getReg());
1566 if (!DstTy.isPointer())
1567 report("G_JUMP_TABLE dest operand must have a pointer type", MI);
1568 break;
1569 }
1570 case TargetOpcode::G_BRJT: {
1571 if (!MRI->getType(MI->getOperand(0).getReg()).isPointer())
1572 report("G_BRJT src operand 0 must be a pointer type", MI);
1573
1574 if (!MI->getOperand(1).isJTI())
1575 report("G_BRJT src operand 1 must be a jump table index", MI);
1576
1577 const auto &IdxOp = MI->getOperand(2);
1578 if (!IdxOp.isReg() || MRI->getType(IdxOp.getReg()).isPointer())
1579 report("G_BRJT src operand 2 must be a scalar reg type", MI);
1580 break;
1581 }
1582 case TargetOpcode::G_INTRINSIC:
1583 case TargetOpcode::G_INTRINSIC_W_SIDE_EFFECTS:
1584 case TargetOpcode::G_INTRINSIC_CONVERGENT:
1585 case TargetOpcode::G_INTRINSIC_CONVERGENT_W_SIDE_EFFECTS: {
1586 // TODO: Should verify number of def and use operands, but the current
1587 // interface requires passing in IR types for mangling.
1588 const MachineOperand &IntrIDOp = MI->getOperand(MI->getNumExplicitDefs());
1589 if (!IntrIDOp.isIntrinsicID()) {
1590 report("G_INTRINSIC first src operand must be an intrinsic ID", MI);
1591 break;
1592 }
1593
1594 if (!verifyGIntrinsicSideEffects(MI))
1595 break;
1596 if (!verifyGIntrinsicConvergence(MI))
1597 break;
1598
1599 break;
1600 }
1601 case TargetOpcode::G_SEXT_INREG: {
1602 if (!MI->getOperand(2).isImm()) {
1603 report("G_SEXT_INREG expects an immediate operand #2", MI);
1604 break;
1605 }
1606
1607 LLT SrcTy = MRI->getType(MI->getOperand(1).getReg());
1608 int64_t Imm = MI->getOperand(2).getImm();
1609 if (Imm <= 0)
1610 report("G_SEXT_INREG size must be >= 1", MI);
1611 if (Imm >= SrcTy.getScalarSizeInBits())
1612 report("G_SEXT_INREG size must be less than source bit width", MI);
1613 break;
1614 }
1615 case TargetOpcode::G_BSWAP: {
1616 LLT DstTy = MRI->getType(MI->getOperand(0).getReg());
1617 if (DstTy.getScalarSizeInBits() % 16 != 0)
1618 report("G_BSWAP size must be a multiple of 16 bits", MI);
1619 break;
1620 }
1621 case TargetOpcode::G_VSCALE: {
1622 if (!MI->getOperand(1).isCImm()) {
1623 report("G_VSCALE operand must be cimm", MI);
1624 break;
1625 }
1626 if (MI->getOperand(1).getCImm()->isZero()) {
1627 report("G_VSCALE immediate cannot be zero", MI);
1628 break;
1629 }
1630 break;
1631 }
1632 case TargetOpcode::G_INSERT_SUBVECTOR: {
1633 const MachineOperand &Src0Op = MI->getOperand(1);
1634 if (!Src0Op.isReg()) {
1635 report("G_INSERT_SUBVECTOR first source must be a register", MI);
1636 break;
1637 }
1638
1639 const MachineOperand &Src1Op = MI->getOperand(2);
1640 if (!Src1Op.isReg()) {
1641 report("G_INSERT_SUBVECTOR second source must be a register", MI);
1642 break;
1643 }
1644
1645 const MachineOperand &IndexOp = MI->getOperand(3);
1646 if (!IndexOp.isImm()) {
1647 report("G_INSERT_SUBVECTOR index must be an immediate", MI);
1648 break;
1649 }
1650
1651 LLT DstTy = MRI->getType(MI->getOperand(0).getReg());
1652 LLT Src0Ty = MRI->getType(Src0Op.getReg());
1653 LLT Src1Ty = MRI->getType(Src1Op.getReg());
1654
1655 if (!DstTy.isVector()) {
1656 report("Destination type must be a vector", MI);
1657 break;
1658 }
1659
1660 if (!Src0Ty.isVector()) {
1661 report("First source must be a vector", MI);
1662 break;
1663 }
1664
1665 if (!Src1Ty.isVector()) {
1666 report("Second source must be a vector", MI);
1667 break;
1668 }
1669
1670 if (DstTy != Src0Ty) {
1671 report("Destination type must match the first source vector type", MI);
1672 break;
1673 }
1674
1675 if (Src0Ty.getElementType() != Src1Ty.getElementType()) {
1676 report("Element type of source vectors must be the same", MI);
1677 break;
1678 }
1679
1680 if (IndexOp.getImm() != 0 &&
1681 Src1Ty.getElementCount().getKnownMinValue() % IndexOp.getImm() != 0) {
1682 report("Index must be a multiple of the second source vector's "
1683 "minimum vector length",
1684 MI);
1685 break;
1686 }
1687 break;
1688 }
1689 case TargetOpcode::G_EXTRACT_SUBVECTOR: {
1690 const MachineOperand &SrcOp = MI->getOperand(1);
1691 if (!SrcOp.isReg()) {
1692 report("G_EXTRACT_SUBVECTOR first source must be a register", MI);
1693 break;
1694 }
1695
1696 const MachineOperand &IndexOp = MI->getOperand(2);
1697 if (!IndexOp.isImm()) {
1698 report("G_EXTRACT_SUBVECTOR index must be an immediate", MI);
1699 break;
1700 }
1701
1702 LLT DstTy = MRI->getType(MI->getOperand(0).getReg());
1703 LLT SrcTy = MRI->getType(SrcOp.getReg());
1704
1705 if (!DstTy.isVector()) {
1706 report("Destination type must be a vector", MI);
1707 break;
1708 }
1709
1710 if (!SrcTy.isVector()) {
1711 report("First source must be a vector", MI);
1712 break;
1713 }
1714
1715 if (DstTy.getElementType() != SrcTy.getElementType()) {
1716 report("Element type of vectors must be the same", MI);
1717 break;
1718 }
1719
1720 if (IndexOp.getImm() != 0 &&
1721 SrcTy.getElementCount().getKnownMinValue() % IndexOp.getImm() != 0) {
1722 report("Index must be a multiple of the source vector's minimum vector "
1723 "length",
1724 MI);
1725 break;
1726 }
1727
1728 break;
1729 }
1730 case TargetOpcode::G_SHUFFLE_VECTOR: {
1731 const MachineOperand &MaskOp = MI->getOperand(3);
1732 if (!MaskOp.isShuffleMask()) {
1733 report("Incorrect mask operand type for G_SHUFFLE_VECTOR", MI);
1734 break;
1735 }
1736
1737 LLT DstTy = MRI->getType(MI->getOperand(0).getReg());
1738 LLT Src0Ty = MRI->getType(MI->getOperand(1).getReg());
1739 LLT Src1Ty = MRI->getType(MI->getOperand(2).getReg());
1740
1741 if (Src0Ty != Src1Ty)
1742 report("Source operands must be the same type", MI);
1743
1744 if (Src0Ty.getScalarType() != DstTy.getScalarType())
1745 report("G_SHUFFLE_VECTOR cannot change element type", MI);
1746
1747 // Don't check that all operands are vector because scalars are used in
1748 // place of 1 element vectors.
1749 int SrcNumElts = Src0Ty.isVector() ? Src0Ty.getNumElements() : 1;
1750 int DstNumElts = DstTy.isVector() ? DstTy.getNumElements() : 1;
1751
1752 ArrayRef<int> MaskIdxes = MaskOp.getShuffleMask();
1753
1754 if (static_cast<int>(MaskIdxes.size()) != DstNumElts)
1755 report("Wrong result type for shufflemask", MI);
1756
1757 for (int Idx : MaskIdxes) {
1758 if (Idx < 0)
1759 continue;
1760
1761 if (Idx >= 2 * SrcNumElts)
1762 report("Out of bounds shuffle index", MI);
1763 }
1764
1765 break;
1766 }
1767
1768 case TargetOpcode::G_SPLAT_VECTOR: {
1769 LLT DstTy = MRI->getType(MI->getOperand(0).getReg());
1770 LLT SrcTy = MRI->getType(MI->getOperand(1).getReg());
1771
1772 if (!DstTy.isScalableVector()) {
1773 report("Destination type must be a scalable vector", MI);
1774 break;
1775 }
1776
1777 if (!SrcTy.isScalar()) {
1778 report("Source type must be a scalar", MI);
1779 break;
1780 }
1781
1783 SrcTy.getSizeInBits())) {
1784 report("Element type of the destination must be the same size or smaller "
1785 "than the source type",
1786 MI);
1787 break;
1788 }
1789
1790 break;
1791 }
1792 case TargetOpcode::G_EXTRACT_VECTOR_ELT: {
1793 LLT DstTy = MRI->getType(MI->getOperand(0).getReg());
1794 LLT SrcTy = MRI->getType(MI->getOperand(1).getReg());
1795 LLT IdxTy = MRI->getType(MI->getOperand(2).getReg());
1796
1797 if (!DstTy.isScalar() && !DstTy.isPointer()) {
1798 report("Destination type must be a scalar or pointer", MI);
1799 break;
1800 }
1801
1802 if (!SrcTy.isVector()) {
1803 report("First source must be a vector", MI);
1804 break;
1805 }
1806
1807 auto TLI = MF->getSubtarget().getTargetLowering();
1808 if (IdxTy.getSizeInBits() !=
1809 TLI->getVectorIdxTy(MF->getDataLayout()).getFixedSizeInBits()) {
1810 report("Index type must match VectorIdxTy", MI);
1811 break;
1812 }
1813
1814 break;
1815 }
1816 case TargetOpcode::G_INSERT_VECTOR_ELT: {
1817 LLT DstTy = MRI->getType(MI->getOperand(0).getReg());
1818 LLT VecTy = MRI->getType(MI->getOperand(1).getReg());
1819 LLT ScaTy = MRI->getType(MI->getOperand(2).getReg());
1820 LLT IdxTy = MRI->getType(MI->getOperand(3).getReg());
1821
1822 if (!DstTy.isVector()) {
1823 report("Destination type must be a vector", MI);
1824 break;
1825 }
1826
1827 if (VecTy != DstTy) {
1828 report("Destination type and vector type must match", MI);
1829 break;
1830 }
1831
1832 if (!ScaTy.isScalar() && !ScaTy.isPointer()) {
1833 report("Inserted element must be a scalar or pointer", MI);
1834 break;
1835 }
1836
1837 auto TLI = MF->getSubtarget().getTargetLowering();
1838 if (IdxTy.getSizeInBits() !=
1839 TLI->getVectorIdxTy(MF->getDataLayout()).getFixedSizeInBits()) {
1840 report("Index type must match VectorIdxTy", MI);
1841 break;
1842 }
1843
1844 break;
1845 }
1846 case TargetOpcode::G_DYN_STACKALLOC: {
1847 const MachineOperand &DstOp = MI->getOperand(0);
1848 const MachineOperand &AllocOp = MI->getOperand(1);
1849 const MachineOperand &AlignOp = MI->getOperand(2);
1850
1851 if (!DstOp.isReg() || !MRI->getType(DstOp.getReg()).isPointer()) {
1852 report("dst operand 0 must be a pointer type", MI);
1853 break;
1854 }
1855
1856 if (!AllocOp.isReg() || !MRI->getType(AllocOp.getReg()).isScalar()) {
1857 report("src operand 1 must be a scalar reg type", MI);
1858 break;
1859 }
1860
1861 if (!AlignOp.isImm()) {
1862 report("src operand 2 must be an immediate type", MI);
1863 break;
1864 }
1865 break;
1866 }
1867 case TargetOpcode::G_MEMCPY_INLINE:
1868 case TargetOpcode::G_MEMCPY:
1869 case TargetOpcode::G_MEMMOVE: {
1870 ArrayRef<MachineMemOperand *> MMOs = MI->memoperands();
1871 if (MMOs.size() != 2) {
1872 report("memcpy/memmove must have 2 memory operands", MI);
1873 break;
1874 }
1875
1876 if ((!MMOs[0]->isStore() || MMOs[0]->isLoad()) ||
1877 (MMOs[1]->isStore() || !MMOs[1]->isLoad())) {
1878 report("wrong memory operand types", MI);
1879 break;
1880 }
1881
1882 if (MMOs[0]->getSize() != MMOs[1]->getSize())
1883 report("inconsistent memory operand sizes", MI);
1884
1885 LLT DstPtrTy = MRI->getType(MI->getOperand(0).getReg());
1886 LLT SrcPtrTy = MRI->getType(MI->getOperand(1).getReg());
1887
1888 if (!DstPtrTy.isPointer() || !SrcPtrTy.isPointer()) {
1889 report("memory instruction operand must be a pointer", MI);
1890 break;
1891 }
1892
1893 if (DstPtrTy.getAddressSpace() != MMOs[0]->getAddrSpace())
1894 report("inconsistent store address space", MI);
1895 if (SrcPtrTy.getAddressSpace() != MMOs[1]->getAddrSpace())
1896 report("inconsistent load address space", MI);
1897
1898 if (Opc != TargetOpcode::G_MEMCPY_INLINE)
1899 if (!MI->getOperand(3).isImm() || (MI->getOperand(3).getImm() & ~1LL))
1900 report("'tail' flag (operand 3) must be an immediate 0 or 1", MI);
1901
1902 break;
1903 }
1904 case TargetOpcode::G_BZERO:
1905 case TargetOpcode::G_MEMSET: {
1906 ArrayRef<MachineMemOperand *> MMOs = MI->memoperands();
1907 std::string Name = Opc == TargetOpcode::G_MEMSET ? "memset" : "bzero";
1908 if (MMOs.size() != 1) {
1909 report(Twine(Name, " must have 1 memory operand"), MI);
1910 break;
1911 }
1912
1913 if ((!MMOs[0]->isStore() || MMOs[0]->isLoad())) {
1914 report(Twine(Name, " memory operand must be a store"), MI);
1915 break;
1916 }
1917
1918 LLT DstPtrTy = MRI->getType(MI->getOperand(0).getReg());
1919 if (!DstPtrTy.isPointer()) {
1920 report(Twine(Name, " operand must be a pointer"), MI);
1921 break;
1922 }
1923
1924 if (DstPtrTy.getAddressSpace() != MMOs[0]->getAddrSpace())
1925 report("inconsistent " + Twine(Name, " address space"), MI);
1926
1927 if (!MI->getOperand(MI->getNumOperands() - 1).isImm() ||
1928 (MI->getOperand(MI->getNumOperands() - 1).getImm() & ~1LL))
1929 report("'tail' flag (last operand) must be an immediate 0 or 1", MI);
1930
1931 break;
1932 }
1933 case TargetOpcode::G_UBSANTRAP: {
1934 const MachineOperand &KindOp = MI->getOperand(0);
1935 if (!MI->getOperand(0).isImm()) {
1936 report("Crash kind must be an immediate", &KindOp, 0);
1937 break;
1938 }
1939 int64_t Kind = MI->getOperand(0).getImm();
1940 if (!isInt<8>(Kind))
1941 report("Crash kind must be 8 bit wide", &KindOp, 0);
1942 break;
1943 }
1944 case TargetOpcode::G_VECREDUCE_SEQ_FADD:
1945 case TargetOpcode::G_VECREDUCE_SEQ_FMUL: {
1946 LLT DstTy = MRI->getType(MI->getOperand(0).getReg());
1947 LLT Src1Ty = MRI->getType(MI->getOperand(1).getReg());
1948 LLT Src2Ty = MRI->getType(MI->getOperand(2).getReg());
1949 if (!DstTy.isScalar())
1950 report("Vector reduction requires a scalar destination type", MI);
1951 if (!Src1Ty.isScalar())
1952 report("Sequential FADD/FMUL vector reduction requires a scalar 1st operand", MI);
1953 if (!Src2Ty.isVector())
1954 report("Sequential FADD/FMUL vector reduction must have a vector 2nd operand", MI);
1955 break;
1956 }
1957 case TargetOpcode::G_VECREDUCE_FADD:
1958 case TargetOpcode::G_VECREDUCE_FMUL:
1959 case TargetOpcode::G_VECREDUCE_FMAX:
1960 case TargetOpcode::G_VECREDUCE_FMIN:
1961 case TargetOpcode::G_VECREDUCE_FMAXIMUM:
1962 case TargetOpcode::G_VECREDUCE_FMINIMUM:
1963 case TargetOpcode::G_VECREDUCE_ADD:
1964 case TargetOpcode::G_VECREDUCE_MUL:
1965 case TargetOpcode::G_VECREDUCE_AND:
1966 case TargetOpcode::G_VECREDUCE_OR:
1967 case TargetOpcode::G_VECREDUCE_XOR:
1968 case TargetOpcode::G_VECREDUCE_SMAX:
1969 case TargetOpcode::G_VECREDUCE_SMIN:
1970 case TargetOpcode::G_VECREDUCE_UMAX:
1971 case TargetOpcode::G_VECREDUCE_UMIN: {
1972 LLT DstTy = MRI->getType(MI->getOperand(0).getReg());
1973 if (!DstTy.isScalar())
1974 report("Vector reduction requires a scalar destination type", MI);
1975 break;
1976 }
1977
1978 case TargetOpcode::G_SBFX:
1979 case TargetOpcode::G_UBFX: {
1980 LLT DstTy = MRI->getType(MI->getOperand(0).getReg());
1981 if (DstTy.isVector()) {
1982 report("Bitfield extraction is not supported on vectors", MI);
1983 break;
1984 }
1985 break;
1986 }
1987 case TargetOpcode::G_SHL:
1988 case TargetOpcode::G_LSHR:
1989 case TargetOpcode::G_ASHR:
1990 case TargetOpcode::G_ROTR:
1991 case TargetOpcode::G_ROTL: {
1992 LLT Src1Ty = MRI->getType(MI->getOperand(1).getReg());
1993 LLT Src2Ty = MRI->getType(MI->getOperand(2).getReg());
1994 if (Src1Ty.isVector() != Src2Ty.isVector()) {
1995 report("Shifts and rotates require operands to be either all scalars or "
1996 "all vectors",
1997 MI);
1998 break;
1999 }
2000 break;
2001 }
2002 case TargetOpcode::G_LLROUND:
2003 case TargetOpcode::G_LROUND: {
2004 verifyAllRegOpsScalar(*MI, *MRI);
2005 break;
2006 }
2007 case TargetOpcode::G_IS_FPCLASS: {
2008 LLT DestTy = MRI->getType(MI->getOperand(0).getReg());
2009 LLT DestEltTy = DestTy.getScalarType();
2010 if (!DestEltTy.isScalar()) {
2011 report("Destination must be a scalar or vector of scalars", MI);
2012 break;
2013 }
2014 LLT SrcTy = MRI->getType(MI->getOperand(1).getReg());
2015 LLT SrcEltTy = SrcTy.getScalarType();
2016 if (!SrcEltTy.isScalar()) {
2017 report("Source must be a scalar or vector of scalars", MI);
2018 break;
2019 }
2020 if (!verifyVectorElementMatch(DestTy, SrcTy, MI))
2021 break;
2022 const MachineOperand &TestMO = MI->getOperand(2);
2023 if (!TestMO.isImm()) {
2024 report("floating-point class set (operand 2) must be an immediate", MI);
2025 break;
2026 }
2027 int64_t Test = TestMO.getImm();
2028 if (Test < 0 || Test > fcAllFlags) {
2029 report("Incorrect floating-point class set (operand 2)", MI);
2030 break;
2031 }
2032 break;
2033 }
2034 case TargetOpcode::G_PREFETCH: {
2035 const MachineOperand &AddrOp = MI->getOperand(0);
2036 if (!AddrOp.isReg() || !MRI->getType(AddrOp.getReg()).isPointer()) {
2037 report("addr operand must be a pointer", &AddrOp, 0);
2038 break;
2039 }
2040 const MachineOperand &RWOp = MI->getOperand(1);
2041 if (!RWOp.isImm() || (uint64_t)RWOp.getImm() >= 2) {
2042 report("rw operand must be an immediate 0-1", &RWOp, 1);
2043 break;
2044 }
2045 const MachineOperand &LocalityOp = MI->getOperand(2);
2046 if (!LocalityOp.isImm() || (uint64_t)LocalityOp.getImm() >= 4) {
2047 report("locality operand must be an immediate 0-3", &LocalityOp, 2);
2048 break;
2049 }
2050 const MachineOperand &CacheTypeOp = MI->getOperand(3);
2051 if (!CacheTypeOp.isImm() || (uint64_t)CacheTypeOp.getImm() >= 2) {
2052 report("cache type operand must be an immediate 0-1", &CacheTypeOp, 3);
2053 break;
2054 }
2055 break;
2056 }
2057 case TargetOpcode::G_ASSERT_ALIGN: {
2058 if (MI->getOperand(2).getImm() < 1)
2059 report("alignment immediate must be >= 1", MI);
2060 break;
2061 }
2062 case TargetOpcode::G_CONSTANT_POOL: {
2063 if (!MI->getOperand(1).isCPI())
2064 report("Src operand 1 must be a constant pool index", MI);
2065 if (!MRI->getType(MI->getOperand(0).getReg()).isPointer())
2066 report("Dst operand 0 must be a pointer", MI);
2067 break;
2068 }
2069 default:
2070 break;
2071 }
2072}
2073
2074void MachineVerifier::visitMachineInstrBefore(const MachineInstr *MI) {
2075 const MCInstrDesc &MCID = MI->getDesc();
2076 if (MI->getNumOperands() < MCID.getNumOperands()) {
2077 report("Too few operands", MI);
2078 errs() << MCID.getNumOperands() << " operands expected, but "
2079 << MI->getNumOperands() << " given.\n";
2080 }
2081
2082 if (MI->getFlag(MachineInstr::NoConvergent) && !MCID.isConvergent())
2083 report("NoConvergent flag expected only on convergent instructions.", MI);
2084
2085 if (MI->isPHI()) {
2086 if (MF->getProperties().hasProperty(
2088 report("Found PHI instruction with NoPHIs property set", MI);
2089
2090 if (FirstNonPHI)
2091 report("Found PHI instruction after non-PHI", MI);
2092 } else if (FirstNonPHI == nullptr)
2093 FirstNonPHI = MI;
2094
2095 // Check the tied operands.
2096 if (MI->isInlineAsm())
2097 verifyInlineAsm(MI);
2098
2099 // Check that unspillable terminators define a reg and have at most one use.
2100 if (TII->isUnspillableTerminator(MI)) {
2101 if (!MI->getOperand(0).isReg() || !MI->getOperand(0).isDef())
2102 report("Unspillable Terminator does not define a reg", MI);
2103 Register Def = MI->getOperand(0).getReg();
2104 if (Def.isVirtual() &&
2105 !MF->getProperties().hasProperty(
2107 std::distance(MRI->use_nodbg_begin(Def), MRI->use_nodbg_end()) > 1)
2108 report("Unspillable Terminator expected to have at most one use!", MI);
2109 }
2110
2111 // A fully-formed DBG_VALUE must have a location. Ignore partially formed
2112 // DBG_VALUEs: these are convenient to use in tests, but should never get
2113 // generated.
2114 if (MI->isDebugValue() && MI->getNumOperands() == 4)
2115 if (!MI->getDebugLoc())
2116 report("Missing DebugLoc for debug instruction", MI);
2117
2118 // Meta instructions should never be the subject of debug value tracking,
2119 // they don't create a value in the output program at all.
2120 if (MI->isMetaInstruction() && MI->peekDebugInstrNum())
2121 report("Metadata instruction should not have a value tracking number", MI);
2122
2123 // Check the MachineMemOperands for basic consistency.
2124 for (MachineMemOperand *Op : MI->memoperands()) {
2125 if (Op->isLoad() && !MI->mayLoad())
2126 report("Missing mayLoad flag", MI);
2127 if (Op->isStore() && !MI->mayStore())
2128 report("Missing mayStore flag", MI);
2129 }
2130
2131 // Debug values must not have a slot index.
2132 // Other instructions must have one, unless they are inside a bundle.
2133 if (LiveInts) {
2134 bool mapped = !LiveInts->isNotInMIMap(*MI);
2135 if (MI->isDebugOrPseudoInstr()) {
2136 if (mapped)
2137 report("Debug instruction has a slot index", MI);
2138 } else if (MI->isInsideBundle()) {
2139 if (mapped)
2140 report("Instruction inside bundle has a slot index", MI);
2141 } else {
2142 if (!mapped)
2143 report("Missing slot index", MI);
2144 }
2145 }
2146
2147 unsigned Opc = MCID.getOpcode();
2149 verifyPreISelGenericInstruction(MI);
2150 return;
2151 }
2152
2154 if (!TII->verifyInstruction(*MI, ErrorInfo))
2155 report(ErrorInfo.data(), MI);
2156
2157 // Verify properties of various specific instruction types
2158 switch (MI->getOpcode()) {
2159 case TargetOpcode::COPY: {
2160 const MachineOperand &DstOp = MI->getOperand(0);
2161 const MachineOperand &SrcOp = MI->getOperand(1);
2162 const Register SrcReg = SrcOp.getReg();
2163 const Register DstReg = DstOp.getReg();
2164
2165 LLT DstTy = MRI->getType(DstReg);
2166 LLT SrcTy = MRI->getType(SrcReg);
2167 if (SrcTy.isValid() && DstTy.isValid()) {
2168 // If both types are valid, check that the types are the same.
2169 if (SrcTy != DstTy) {
2170 report("Copy Instruction is illegal with mismatching types", MI);
2171 errs() << "Def = " << DstTy << ", Src = " << SrcTy << "\n";
2172 }
2173
2174 break;
2175 }
2176
2177 if (!SrcTy.isValid() && !DstTy.isValid())
2178 break;
2179
2180 // If we have only one valid type, this is likely a copy between a virtual
2181 // and physical register.
2182 TypeSize SrcSize = TRI->getRegSizeInBits(SrcReg, *MRI);
2183 TypeSize DstSize = TRI->getRegSizeInBits(DstReg, *MRI);
2184 if (SrcReg.isPhysical() && DstTy.isValid()) {
2185 const TargetRegisterClass *SrcRC =
2186 TRI->getMinimalPhysRegClassLLT(SrcReg, DstTy);
2187 if (SrcRC)
2188 SrcSize = TRI->getRegSizeInBits(*SrcRC);
2189 }
2190
2191 if (DstReg.isPhysical() && SrcTy.isValid()) {
2192 const TargetRegisterClass *DstRC =
2193 TRI->getMinimalPhysRegClassLLT(DstReg, SrcTy);
2194 if (DstRC)
2195 DstSize = TRI->getRegSizeInBits(*DstRC);
2196 }
2197
2198 // The next two checks allow COPY between physical and virtual registers,
2199 // when the virtual register has a scalable size and the physical register
2200 // has a fixed size. These checks allow COPY between *potentialy* mismatched
2201 // sizes. However, once RegisterBankSelection occurs, MachineVerifier should
2202 // be able to resolve a fixed size for the scalable vector, and at that
2203 // point this function will know for sure whether the sizes are mismatched
2204 // and correctly report a size mismatch.
2205 if (SrcReg.isPhysical() && DstReg.isVirtual() && DstSize.isScalable() &&
2206 !SrcSize.isScalable())
2207 break;
2208 if (SrcReg.isVirtual() && DstReg.isPhysical() && SrcSize.isScalable() &&
2209 !DstSize.isScalable())
2210 break;
2211
2212 if (SrcSize.isNonZero() && DstSize.isNonZero() && SrcSize != DstSize) {
2213 if (!DstOp.getSubReg() && !SrcOp.getSubReg()) {
2214 report("Copy Instruction is illegal with mismatching sizes", MI);
2215 errs() << "Def Size = " << DstSize << ", Src Size = " << SrcSize
2216 << "\n";
2217 }
2218 }
2219 break;
2220 }
2221 case TargetOpcode::STATEPOINT: {
2222 StatepointOpers SO(MI);
2223 if (!MI->getOperand(SO.getIDPos()).isImm() ||
2224 !MI->getOperand(SO.getNBytesPos()).isImm() ||
2225 !MI->getOperand(SO.getNCallArgsPos()).isImm()) {
2226 report("meta operands to STATEPOINT not constant!", MI);
2227 break;
2228 }
2229
2230 auto VerifyStackMapConstant = [&](unsigned Offset) {
2231 if (Offset >= MI->getNumOperands()) {
2232 report("stack map constant to STATEPOINT is out of range!", MI);
2233 return;
2234 }
2235 if (!MI->getOperand(Offset - 1).isImm() ||
2236 MI->getOperand(Offset - 1).getImm() != StackMaps::ConstantOp ||
2237 !MI->getOperand(Offset).isImm())
2238 report("stack map constant to STATEPOINT not well formed!", MI);
2239 };
2240 VerifyStackMapConstant(SO.getCCIdx());
2241 VerifyStackMapConstant(SO.getFlagsIdx());
2242 VerifyStackMapConstant(SO.getNumDeoptArgsIdx());
2243 VerifyStackMapConstant(SO.getNumGCPtrIdx());
2244 VerifyStackMapConstant(SO.getNumAllocaIdx());
2245 VerifyStackMapConstant(SO.getNumGcMapEntriesIdx());
2246
2247 // Verify that all explicit statepoint defs are tied to gc operands as
2248 // they are expected to be a relocation of gc operands.
2249 unsigned FirstGCPtrIdx = SO.getFirstGCPtrIdx();
2250 unsigned LastGCPtrIdx = SO.getNumAllocaIdx() - 2;
2251 for (unsigned Idx = 0; Idx < MI->getNumDefs(); Idx++) {
2252 unsigned UseOpIdx;
2253 if (!MI->isRegTiedToUseOperand(Idx, &UseOpIdx)) {
2254 report("STATEPOINT defs expected to be tied", MI);
2255 break;
2256 }
2257 if (UseOpIdx < FirstGCPtrIdx || UseOpIdx > LastGCPtrIdx) {
2258 report("STATEPOINT def tied to non-gc operand", MI);
2259 break;
2260 }
2261 }
2262
2263 // TODO: verify we have properly encoded deopt arguments
2264 } break;
2265 case TargetOpcode::INSERT_SUBREG: {
2266 unsigned InsertedSize;
2267 if (unsigned SubIdx = MI->getOperand(2).getSubReg())
2268 InsertedSize = TRI->getSubRegIdxSize(SubIdx);
2269 else
2270 InsertedSize = TRI->getRegSizeInBits(MI->getOperand(2).getReg(), *MRI);
2271 unsigned SubRegSize = TRI->getSubRegIdxSize(MI->getOperand(3).getImm());
2272 if (SubRegSize < InsertedSize) {
2273 report("INSERT_SUBREG expected inserted value to have equal or lesser "
2274 "size than the subreg it was inserted into", MI);
2275 break;
2276 }
2277 } break;
2278 case TargetOpcode::REG_SEQUENCE: {
2279 unsigned NumOps = MI->getNumOperands();
2280 if (!(NumOps & 1)) {
2281 report("Invalid number of operands for REG_SEQUENCE", MI);
2282 break;
2283 }
2284
2285 for (unsigned I = 1; I != NumOps; I += 2) {
2286 const MachineOperand &RegOp = MI->getOperand(I);
2287 const MachineOperand &SubRegOp = MI->getOperand(I + 1);
2288
2289 if (!RegOp.isReg())
2290 report("Invalid register operand for REG_SEQUENCE", &RegOp, I);
2291
2292 if (!SubRegOp.isImm() || SubRegOp.getImm() == 0 ||
2293 SubRegOp.getImm() >= TRI->getNumSubRegIndices()) {
2294 report("Invalid subregister index operand for REG_SEQUENCE",
2295 &SubRegOp, I + 1);
2296 }
2297 }
2298
2299 Register DstReg = MI->getOperand(0).getReg();
2300 if (DstReg.isPhysical())
2301 report("REG_SEQUENCE does not support physical register results", MI);
2302
2303 if (MI->getOperand(0).getSubReg())
2304 report("Invalid subreg result for REG_SEQUENCE", MI);
2305
2306 break;
2307 }
2308 }
2309}
2310
2311void
2312MachineVerifier::visitMachineOperand(const MachineOperand *MO, unsigned MONum) {
2313 const MachineInstr *MI = MO->getParent();
2314 const MCInstrDesc &MCID = MI->getDesc();
2315 unsigned NumDefs = MCID.getNumDefs();
2316 if (MCID.getOpcode() == TargetOpcode::PATCHPOINT)
2317 NumDefs = (MONum == 0 && MO->isReg()) ? NumDefs : 0;
2318
2319 // The first MCID.NumDefs operands must be explicit register defines
2320 if (MONum < NumDefs) {
2321 const MCOperandInfo &MCOI = MCID.operands()[MONum];
2322 if (!MO->isReg())
2323 report("Explicit definition must be a register", MO, MONum);
2324 else if (!MO->isDef() && !MCOI.isOptionalDef())
2325 report("Explicit definition marked as use", MO, MONum);
2326 else if (MO->isImplicit())
2327 report("Explicit definition marked as implicit", MO, MONum);
2328 } else if (MONum < MCID.getNumOperands()) {
2329 const MCOperandInfo &MCOI = MCID.operands()[MONum];
2330 // Don't check if it's the last operand in a variadic instruction. See,
2331 // e.g., LDM_RET in the arm back end. Check non-variadic operands only.
2332 bool IsOptional = MI->isVariadic() && MONum == MCID.getNumOperands() - 1;
2333 if (!IsOptional) {
2334 if (MO->isReg()) {
2335 if (MO->isDef() && !MCOI.isOptionalDef() && !MCID.variadicOpsAreDefs())
2336 report("Explicit operand marked as def", MO, MONum);
2337 if (MO->isImplicit())
2338 report("Explicit operand marked as implicit", MO, MONum);
2339 }
2340
2341 // Check that an instruction has register operands only as expected.
2342 if (MCOI.OperandType == MCOI::OPERAND_REGISTER &&
2343 !MO->isReg() && !MO->isFI())
2344 report("Expected a register operand.", MO, MONum);
2345 if (MO->isReg()) {
2348 !TII->isPCRelRegisterOperandLegal(*MO)))
2349 report("Expected a non-register operand.", MO, MONum);
2350 }
2351 }
2352
2353 int TiedTo = MCID.getOperandConstraint(MONum, MCOI::TIED_TO);
2354 if (TiedTo != -1) {
2355 if (!MO->isReg())
2356 report("Tied use must be a register", MO, MONum);
2357 else if (!MO->isTied())
2358 report("Operand should be tied", MO, MONum);
2359 else if (unsigned(TiedTo) != MI->findTiedOperandIdx(MONum))
2360 report("Tied def doesn't match MCInstrDesc", MO, MONum);
2361 else if (MO->getReg().isPhysical()) {
2362 const MachineOperand &MOTied = MI->getOperand(TiedTo);
2363 if (!MOTied.isReg())
2364 report("Tied counterpart must be a register", &MOTied, TiedTo);
2365 else if (MOTied.getReg().isPhysical() &&
2366 MO->getReg() != MOTied.getReg())
2367 report("Tied physical registers must match.", &MOTied, TiedTo);
2368 }
2369 } else if (MO->isReg() && MO->isTied())
2370 report("Explicit operand should not be tied", MO, MONum);
2371 } else if (!MI->isVariadic()) {
2372 // ARM adds %reg0 operands to indicate predicates. We'll allow that.
2373 if (!MO->isValidExcessOperand())
2374 report("Extra explicit operand on non-variadic instruction", MO, MONum);
2375 }
2376
2377 switch (MO->getType()) {
2379 // Verify debug flag on debug instructions. Check this first because reg0
2380 // indicates an undefined debug value.
2381 if (MI->isDebugInstr() && MO->isUse()) {
2382 if (!MO->isDebug())
2383 report("Register operand must be marked debug", MO, MONum);
2384 } else if (MO->isDebug()) {
2385 report("Register operand must not be marked debug", MO, MONum);
2386 }
2387
2388 const Register Reg = MO->getReg();
2389 if (!Reg)
2390 return;
2391 if (MRI->tracksLiveness() && !MI->isDebugInstr())
2392 checkLiveness(MO, MONum);
2393
2394 if (MO->isDef() && MO->isUndef() && !MO->getSubReg() &&
2395 MO->getReg().isVirtual()) // TODO: Apply to physregs too
2396 report("Undef virtual register def operands require a subregister", MO, MONum);
2397
2398 // Verify the consistency of tied operands.
2399 if (MO->isTied()) {
2400 unsigned OtherIdx = MI->findTiedOperandIdx(MONum);
2401 const MachineOperand &OtherMO = MI->getOperand(OtherIdx);
2402 if (!OtherMO.isReg())
2403 report("Must be tied to a register", MO, MONum);
2404 if (!OtherMO.isTied())
2405 report("Missing tie flags on tied operand", MO, MONum);
2406 if (MI->findTiedOperandIdx(OtherIdx) != MONum)
2407 report("Inconsistent tie links", MO, MONum);
2408 if (MONum < MCID.getNumDefs()) {
2409 if (OtherIdx < MCID.getNumOperands()) {
2410 if (-1 == MCID.getOperandConstraint(OtherIdx, MCOI::TIED_TO))
2411 report("Explicit def tied to explicit use without tie constraint",
2412 MO, MONum);
2413 } else {
2414 if (!OtherMO.isImplicit())
2415 report("Explicit def should be tied to implicit use", MO, MONum);
2416 }
2417 }
2418 }
2419
2420 // Verify two-address constraints after the twoaddressinstruction pass.
2421 // Both twoaddressinstruction pass and phi-node-elimination pass call
2422 // MRI->leaveSSA() to set MF as not IsSSA, we should do the verification
2423 // after twoaddressinstruction pass not after phi-node-elimination pass. So
2424 // we shouldn't use the IsSSA as the condition, we should based on
2425 // TiedOpsRewritten property to verify two-address constraints, this
2426 // property will be set in twoaddressinstruction pass.
2427 unsigned DefIdx;
2428 if (MF->getProperties().hasProperty(
2430 MO->isUse() && MI->isRegTiedToDefOperand(MONum, &DefIdx) &&
2431 Reg != MI->getOperand(DefIdx).getReg())
2432 report("Two-address instruction operands must be identical", MO, MONum);
2433
2434 // Check register classes.
2435 unsigned SubIdx = MO->getSubReg();
2436
2437 if (Reg.isPhysical()) {
2438 if (SubIdx) {
2439 report("Illegal subregister index for physical register", MO, MONum);
2440 return;
2441 }
2442 if (MONum < MCID.getNumOperands()) {
2443 if (const TargetRegisterClass *DRC =
2444 TII->getRegClass(MCID, MONum, TRI, *MF)) {
2445 if (!DRC->contains(Reg)) {
2446 report("Illegal physical register for instruction", MO, MONum);
2447 errs() << printReg(Reg, TRI) << " is not a "
2448 << TRI->getRegClassName(DRC) << " register.\n";
2449 }
2450 }
2451 }
2452 if (MO->isRenamable()) {
2453 if (MRI->isReserved(Reg)) {
2454 report("isRenamable set on reserved register", MO, MONum);
2455 return;
2456 }
2457 }
2458 } else {
2459 // Virtual register.
2460 const TargetRegisterClass *RC = MRI->getRegClassOrNull(Reg);
2461 if (!RC) {
2462 // This is a generic virtual register.
2463
2464 // Do not allow undef uses for generic virtual registers. This ensures
2465 // getVRegDef can never fail and return null on a generic register.
2466 //
2467 // FIXME: This restriction should probably be broadened to all SSA
2468 // MIR. However, DetectDeadLanes/ProcessImplicitDefs technically still
2469 // run on the SSA function just before phi elimination.
2470 if (MO->isUndef())
2471 report("Generic virtual register use cannot be undef", MO, MONum);
2472
2473 // Debug value instruction is permitted to use undefined vregs.
2474 // This is a performance measure to skip the overhead of immediately
2475 // pruning unused debug operands. The final undef substitution occurs
2476 // when debug values are allocated in LDVImpl::handleDebugValue, so
2477 // these verifications always apply after this pass.
2478 if (isFunctionTracksDebugUserValues || !MO->isUse() ||
2479 !MI->isDebugValue() || !MRI->def_empty(Reg)) {
2480 // If we're post-Select, we can't have gvregs anymore.
2481 if (isFunctionSelected) {
2482 report("Generic virtual register invalid in a Selected function",
2483 MO, MONum);
2484 return;
2485 }
2486
2487 // The gvreg must have a type and it must not have a SubIdx.
2488 LLT Ty = MRI->getType(Reg);
2489 if (!Ty.isValid()) {
2490 report("Generic virtual register must have a valid type", MO,
2491 MONum);
2492 return;
2493 }
2494
2495 const RegisterBank *RegBank = MRI->getRegBankOrNull(Reg);
2496 const RegisterBankInfo *RBI = MF->getSubtarget().getRegBankInfo();
2497
2498 // If we're post-RegBankSelect, the gvreg must have a bank.
2499 if (!RegBank && isFunctionRegBankSelected) {
2500 report("Generic virtual register must have a bank in a "
2501 "RegBankSelected function",
2502 MO, MONum);
2503 return;
2504 }
2505
2506 // Make sure the register fits into its register bank if any.
2507 if (RegBank && Ty.isValid() && !Ty.isScalableVector() &&
2508 RBI->getMaximumSize(RegBank->getID()) < Ty.getSizeInBits()) {
2509 report("Register bank is too small for virtual register", MO,
2510 MONum);
2511 errs() << "Register bank " << RegBank->getName() << " too small("
2512 << RBI->getMaximumSize(RegBank->getID()) << ") to fit "
2513 << Ty.getSizeInBits() << "-bits\n";
2514 return;
2515 }
2516 }
2517
2518 if (SubIdx) {
2519 report("Generic virtual register does not allow subregister index", MO,
2520 MONum);
2521 return;
2522 }
2523
2524 // If this is a target specific instruction and this operand
2525 // has register class constraint, the virtual register must
2526 // comply to it.
2527 if (!isPreISelGenericOpcode(MCID.getOpcode()) &&
2528 MONum < MCID.getNumOperands() &&
2529 TII->getRegClass(MCID, MONum, TRI, *MF)) {
2530 report("Virtual register does not match instruction constraint", MO,
2531 MONum);
2532 errs() << "Expect register class "
2533 << TRI->getRegClassName(
2534 TII->getRegClass(MCID, MONum, TRI, *MF))
2535 << " but got nothing\n";
2536 return;
2537 }
2538
2539 break;
2540 }
2541 if (SubIdx) {
2542 const TargetRegisterClass *SRC =
2543 TRI->getSubClassWithSubReg(RC, SubIdx);
2544 if (!SRC) {
2545 report("Invalid subregister index for virtual register", MO, MONum);
2546 errs() << "Register class " << TRI->getRegClassName(RC)
2547 << " does not support subreg index " << SubIdx << "\n";
2548 return;
2549 }
2550 if (RC != SRC) {
2551 report("Invalid register class for subregister index", MO, MONum);
2552 errs() << "Register class " << TRI->getRegClassName(RC)
2553 << " does not fully support subreg index " << SubIdx << "\n";
2554 return;
2555 }
2556 }
2557 if (MONum < MCID.getNumOperands()) {
2558 if (const TargetRegisterClass *DRC =
2559 TII->getRegClass(MCID, MONum, TRI, *MF)) {
2560 if (SubIdx) {
2561 const TargetRegisterClass *SuperRC =
2562 TRI->getLargestLegalSuperClass(RC, *MF);
2563 if (!SuperRC) {
2564 report("No largest legal super class exists.", MO, MONum);
2565 return;
2566 }
2567 DRC = TRI->getMatchingSuperRegClass(SuperRC, DRC, SubIdx);
2568 if (!DRC) {
2569 report("No matching super-reg register class.", MO, MONum);
2570 return;
2571 }
2572 }
2573 if (!RC->hasSuperClassEq(DRC)) {
2574 report("Illegal virtual register for instruction", MO, MONum);
2575 errs() << "Expected a " << TRI->getRegClassName(DRC)
2576 << " register, but got a " << TRI->getRegClassName(RC)
2577 << " register\n";
2578 }
2579 }
2580 }
2581 }
2582 break;
2583 }
2584
2586 regMasks.push_back(MO->getRegMask());
2587 break;
2588
2590 if (MI->isPHI() && !MO->getMBB()->isSuccessor(MI->getParent()))
2591 report("PHI operand is not in the CFG", MO, MONum);
2592 break;
2593
2595 if (LiveStks && LiveStks->hasInterval(MO->getIndex()) &&
2596 LiveInts && !LiveInts->isNotInMIMap(*MI)) {
2597 int FI = MO->getIndex();
2598 LiveInterval &LI = LiveStks->getInterval(FI);
2599 SlotIndex Idx = LiveInts->getInstructionIndex(*MI);
2600
2601 bool stores = MI->mayStore();
2602 bool loads = MI->mayLoad();
2603 // For a memory-to-memory move, we need to check if the frame
2604 // index is used for storing or loading, by inspecting the
2605 // memory operands.
2606 if (stores && loads) {
2607 for (auto *MMO : MI->memoperands()) {
2608 const PseudoSourceValue *PSV = MMO->getPseudoValue();
2609 if (PSV == nullptr) continue;
2611 dyn_cast<FixedStackPseudoSourceValue>(PSV);
2612 if (Value == nullptr) continue;
2613 if (Value->getFrameIndex() != FI) continue;
2614
2615 if (MMO->isStore())
2616 loads = false;
2617 else
2618 stores = false;
2619 break;
2620 }
2621 if (loads == stores)
2622 report("Missing fixed stack memoperand.", MI);
2623 }
2624 if (loads && !LI.liveAt(Idx.getRegSlot(true))) {
2625 report("Instruction loads from dead spill slot", MO, MONum);
2626 errs() << "Live stack: " << LI << '\n';
2627 }
2628 if (stores && !LI.liveAt(Idx.getRegSlot())) {
2629 report("Instruction stores to dead spill slot", MO, MONum);
2630 errs() << "Live stack: " << LI << '\n';
2631 }
2632 }
2633 break;
2634
2636 if (MO->getCFIIndex() >= MF->getFrameInstructions().size())
2637 report("CFI instruction has invalid index", MO, MONum);
2638 break;
2639
2640 default:
2641 break;
2642 }
2643}
2644
2645void MachineVerifier::checkLivenessAtUse(const MachineOperand *MO,
2646 unsigned MONum, SlotIndex UseIdx,
2647 const LiveRange &LR,
2648 Register VRegOrUnit,
2649 LaneBitmask LaneMask) {
2650 const MachineInstr *MI = MO->getParent();
2651 LiveQueryResult LRQ = LR.Query(UseIdx);
2652 bool HasValue = LRQ.valueIn() || (MI->isPHI() && LRQ.valueOut());
2653 // Check if we have a segment at the use, note however that we only need one
2654 // live subregister range, the others may be dead.
2655 if (!HasValue && LaneMask.none()) {
2656 report("No live segment at use", MO, MONum);
2657 report_context_liverange(LR);
2658 report_context_vreg_regunit(VRegOrUnit);
2659 report_context(UseIdx);
2660 }
2661 if (MO->isKill() && !LRQ.isKill()) {
2662 report("Live range continues after kill flag", MO, MONum);
2663 report_context_liverange(LR);
2664 report_context_vreg_regunit(VRegOrUnit);
2665 if (LaneMask.any())
2666 report_context_lanemask(LaneMask);
2667 report_context(UseIdx);
2668 }
2669}
2670
2671void MachineVerifier::checkLivenessAtDef(const MachineOperand *MO,
2672 unsigned MONum, SlotIndex DefIdx,
2673 const LiveRange &LR,
2674 Register VRegOrUnit,
2675 bool SubRangeCheck,
2676 LaneBitmask LaneMask) {
2677 if (const VNInfo *VNI = LR.getVNInfoAt(DefIdx)) {
2678 // The LR can correspond to the whole reg and its def slot is not obliged
2679 // to be the same as the MO' def slot. E.g. when we check here "normal"
2680 // subreg MO but there is other EC subreg MO in the same instruction so the
2681 // whole reg has EC def slot and differs from the currently checked MO' def
2682 // slot. For example:
2683 // %0 [16e,32r:0) 0@16e L..3 [16e,32r:0) 0@16e L..C [16r,32r:0) 0@16r
2684 // Check that there is an early-clobber def of the same superregister
2685 // somewhere is performed in visitMachineFunctionAfter()
2686 if (((SubRangeCheck || MO->getSubReg() == 0) && VNI->def != DefIdx) ||
2687 !SlotIndex::isSameInstr(VNI->def, DefIdx) ||
2688 (VNI->def != DefIdx &&
2689 (!VNI->def.isEarlyClobber() || !DefIdx.isRegister()))) {
2690 report("Inconsistent valno->def", MO, MONum);
2691 report_context_liverange(LR);
2692 report_context_vreg_regunit(VRegOrUnit);
2693 if (LaneMask.any())
2694 report_context_lanemask(LaneMask);
2695 report_context(*VNI);
2696 report_context(DefIdx);
2697 }
2698 } else {
2699 report("No live segment at def", MO, MONum);
2700 report_context_liverange(LR);
2701 report_context_vreg_regunit(VRegOrUnit);
2702 if (LaneMask.any())
2703 report_context_lanemask(LaneMask);
2704 report_context(DefIdx);
2705 }
2706 // Check that, if the dead def flag is present, LiveInts agree.
2707 if (MO->isDead()) {
2708 LiveQueryResult LRQ = LR.Query(DefIdx);
2709 if (!LRQ.isDeadDef()) {
2710 assert(VRegOrUnit.isVirtual() && "Expecting a virtual register.");
2711 // A dead subreg def only tells us that the specific subreg is dead. There
2712 // could be other non-dead defs of other subregs, or we could have other
2713 // parts of the register being live through the instruction. So unless we
2714 // are checking liveness for a subrange it is ok for the live range to
2715 // continue, given that we have a dead def of a subregister.
2716 if (SubRangeCheck || MO->getSubReg() == 0) {
2717 report("Live range continues after dead def flag", MO, MONum);
2718 report_context_liverange(LR);
2719 report_context_vreg_regunit(VRegOrUnit);
2720 if (LaneMask.any())
2721 report_context_lanemask(LaneMask);
2722 }
2723 }
2724 }
2725}
2726
2727void MachineVerifier::checkLiveness(const MachineOperand *MO, unsigned MONum) {
2728 const MachineInstr *MI = MO->getParent();
2729 const Register Reg = MO->getReg();
2730 const unsigned SubRegIdx = MO->getSubReg();
2731
2732 const LiveInterval *LI = nullptr;
2733 if (LiveInts && Reg.isVirtual()) {
2734 if (LiveInts->hasInterval(Reg)) {
2735 LI = &LiveInts->getInterval(Reg);
2736 if (SubRegIdx != 0 && (MO->isDef() || !MO->isUndef()) && !LI->empty() &&
2737 !LI->hasSubRanges() && MRI->shouldTrackSubRegLiveness(Reg))
2738 report("Live interval for subreg operand has no subranges", MO, MONum);
2739 } else {
2740 report("Virtual register has no live interval", MO, MONum);
2741 }
2742 }
2743
2744 // Both use and def operands can read a register.
2745 if (MO->readsReg()) {
2746 if (MO->isKill())
2747 addRegWithSubRegs(regsKilled, Reg);
2748
2749 // Check that LiveVars knows this kill (unless we are inside a bundle, in
2750 // which case we have already checked that LiveVars knows any kills on the
2751 // bundle header instead).
2752 if (LiveVars && Reg.isVirtual() && MO->isKill() &&
2753 !MI->isBundledWithPred()) {
2754 LiveVariables::VarInfo &VI = LiveVars->getVarInfo(Reg);
2755 if (!is_contained(VI.Kills, MI))
2756 report("Kill missing from LiveVariables", MO, MONum);
2757 }
2758
2759 // Check LiveInts liveness and kill.
2760 if (LiveInts && !LiveInts->isNotInMIMap(*MI)) {
2761 SlotIndex UseIdx;
2762 if (MI->isPHI()) {
2763 // PHI use occurs on the edge, so check for live out here instead.
2764 UseIdx = LiveInts->getMBBEndIdx(
2765 MI->getOperand(MONum + 1).getMBB()).getPrevSlot();
2766 } else {
2767 UseIdx = LiveInts->getInstructionIndex(*MI);
2768 }
2769 // Check the cached regunit intervals.
2770 if (Reg.isPhysical() && !isReserved(Reg)) {
2771 for (MCRegUnit Unit : TRI->regunits(Reg.asMCReg())) {
2772 if (MRI->isReservedRegUnit(Unit))
2773 continue;
2774 if (const LiveRange *LR = LiveInts->getCachedRegUnit(Unit))
2775 checkLivenessAtUse(MO, MONum, UseIdx, *LR, Unit);
2776 }
2777 }
2778
2779 if (Reg.isVirtual()) {
2780 // This is a virtual register interval.
2781 checkLivenessAtUse(MO, MONum, UseIdx, *LI, Reg);
2782
2783 if (LI->hasSubRanges() && !MO->isDef()) {
2784 LaneBitmask MOMask = SubRegIdx != 0
2785 ? TRI->getSubRegIndexLaneMask(SubRegIdx)
2786 : MRI->getMaxLaneMaskForVReg(Reg);
2787 LaneBitmask LiveInMask;
2788 for (const LiveInterval::SubRange &SR : LI->subranges()) {
2789 if ((MOMask & SR.LaneMask).none())
2790 continue;
2791 checkLivenessAtUse(MO, MONum, UseIdx, SR, Reg, SR.LaneMask);
2792 LiveQueryResult LRQ = SR.Query(UseIdx);
2793 if (LRQ.valueIn() || (MI->isPHI() && LRQ.valueOut()))
2794 LiveInMask |= SR.LaneMask;
2795 }
2796 // At least parts of the register has to be live at the use.
2797 if ((LiveInMask & MOMask).none()) {
2798 report("No live subrange at use", MO, MONum);
2799 report_context(*LI);
2800 report_context(UseIdx);
2801 }
2802 // For PHIs all lanes should be live
2803 if (MI->isPHI() && LiveInMask != MOMask) {
2804 report("Not all lanes of PHI source live at use", MO, MONum);
2805 report_context(*LI);
2806 report_context(UseIdx);
2807 }
2808 }
2809 }
2810 }
2811
2812 // Use of a dead register.
2813 if (!regsLive.count(Reg)) {
2814 if (Reg.isPhysical()) {
2815 // Reserved registers may be used even when 'dead'.
2816 bool Bad = !isReserved(Reg);
2817 // We are fine if just any subregister has a defined value.
2818 if (Bad) {
2819
2820 for (const MCPhysReg &SubReg : TRI->subregs(Reg)) {
2821 if (regsLive.count(SubReg)) {
2822 Bad = false;
2823 break;
2824 }
2825 }
2826 }
2827 // If there is an additional implicit-use of a super register we stop
2828 // here. By definition we are fine if the super register is not
2829 // (completely) dead, if the complete super register is dead we will
2830 // get a report for its operand.
2831 if (Bad) {
2832 for (const MachineOperand &MOP : MI->uses()) {
2833 if (!MOP.isReg() || !MOP.isImplicit())
2834 continue;
2835
2836 if (!MOP.getReg().isPhysical())
2837 continue;
2838
2839 if (llvm::is_contained(TRI->subregs(MOP.getReg()), Reg))
2840 Bad = false;
2841 }
2842 }
2843 if (Bad)
2844 report("Using an undefined physical register", MO, MONum);
2845 } else if (MRI->def_empty(Reg)) {
2846 report("Reading virtual register without a def", MO, MONum);
2847 } else {
2848 BBInfo &MInfo = MBBInfoMap[MI->getParent()];
2849 // We don't know which virtual registers are live in, so only complain
2850 // if vreg was killed in this MBB. Otherwise keep track of vregs that
2851 // must be live in. PHI instructions are handled separately.
2852 if (MInfo.regsKilled.count(Reg))
2853 report("Using a killed virtual register", MO, MONum);
2854 else if (!MI->isPHI())
2855 MInfo.vregsLiveIn.insert(std::make_pair(Reg, MI));
2856 }
2857 }
2858 }
2859
2860 if (MO->isDef()) {
2861 // Register defined.
2862 // TODO: verify that earlyclobber ops are not used.
2863 if (MO->isDead())
2864 addRegWithSubRegs(regsDead, Reg);
2865 else
2866 addRegWithSubRegs(regsDefined, Reg);
2867
2868 // Verify SSA form.
2869 if (MRI->isSSA() && Reg.isVirtual() &&
2870 std::next(MRI->def_begin(Reg)) != MRI->def_end())
2871 report("Multiple virtual register defs in SSA form", MO, MONum);
2872
2873 // Check LiveInts for a live segment, but only for virtual registers.
2874 if (LiveInts && !LiveInts->isNotInMIMap(*MI)) {
2875 SlotIndex DefIdx = LiveInts->getInstructionIndex(*MI);
2876 DefIdx = DefIdx.getRegSlot(MO->isEarlyClobber());
2877
2878 if (Reg.isVirtual()) {
2879 checkLivenessAtDef(MO, MONum, DefIdx, *LI, Reg);
2880
2881 if (LI->hasSubRanges()) {
2882 LaneBitmask MOMask = SubRegIdx != 0
2883 ? TRI->getSubRegIndexLaneMask(SubRegIdx)
2884 : MRI->getMaxLaneMaskForVReg(Reg);
2885 for (const LiveInterval::SubRange &SR : LI->subranges()) {
2886 if ((SR.LaneMask & MOMask).none())
2887 continue;
2888 checkLivenessAtDef(MO, MONum, DefIdx, SR, Reg, true, SR.LaneMask);
2889 }
2890 }
2891 }
2892 }
2893 }
2894}
2895
2896// This function gets called after visiting all instructions in a bundle. The
2897// argument points to the bundle header.
2898// Normal stand-alone instructions are also considered 'bundles', and this
2899// function is called for all of them.
2900void MachineVerifier::visitMachineBundleAfter(const MachineInstr *MI) {
2901 BBInfo &MInfo = MBBInfoMap[MI->getParent()];
2902 set_union(MInfo.regsKilled, regsKilled);
2903 set_subtract(regsLive, regsKilled); regsKilled.clear();
2904 // Kill any masked registers.
2905 while (!regMasks.empty()) {
2906 const uint32_t *Mask = regMasks.pop_back_val();
2907 for (Register Reg : regsLive)
2908 if (Reg.isPhysical() &&
2909 MachineOperand::clobbersPhysReg(Mask, Reg.asMCReg()))
2910 regsDead.push_back(Reg);
2911 }
2912 set_subtract(regsLive, regsDead); regsDead.clear();
2913 set_union(regsLive, regsDefined); regsDefined.clear();
2914}
2915
2916void
2917MachineVerifier::visitMachineBasicBlockAfter(const MachineBasicBlock *MBB) {
2918 MBBInfoMap[MBB].regsLiveOut = regsLive;
2919 regsLive.clear();
2920
2921 if (Indexes) {
2922 SlotIndex stop = Indexes->getMBBEndIdx(MBB);
2923 if (!(stop > lastIndex)) {
2924 report("Block ends before last instruction index", MBB);
2925 errs() << "Block ends at " << stop
2926 << " last instruction was at " << lastIndex << '\n';
2927 }
2928 lastIndex = stop;
2929 }
2930}
2931
2932namespace {
2933// This implements a set of registers that serves as a filter: can filter other
2934// sets by passing through elements not in the filter and blocking those that
2935// are. Any filter implicitly includes the full set of physical registers upon
2936// creation, thus filtering them all out. The filter itself as a set only grows,
2937// and needs to be as efficient as possible.
2938struct VRegFilter {
2939 // Add elements to the filter itself. \pre Input set \p FromRegSet must have
2940 // no duplicates. Both virtual and physical registers are fine.
2941 template <typename RegSetT> void add(const RegSetT &FromRegSet) {
2942 SmallVector<Register, 0> VRegsBuffer;
2943 filterAndAdd(FromRegSet, VRegsBuffer);
2944 }
2945 // Filter \p FromRegSet through the filter and append passed elements into \p
2946 // ToVRegs. All elements appended are then added to the filter itself.
2947 // \returns true if anything changed.
2948 template <typename RegSetT>
2949 bool filterAndAdd(const RegSetT &FromRegSet,
2950 SmallVectorImpl<Register> &ToVRegs) {
2951 unsigned SparseUniverse = Sparse.size();
2952 unsigned NewSparseUniverse = SparseUniverse;
2953 unsigned NewDenseSize = Dense.size();
2954 size_t Begin = ToVRegs.size();
2955 for (Register Reg : FromRegSet) {
2956 if (!Reg.isVirtual())
2957 continue;
2958 unsigned Index = Register::virtReg2Index(Reg);
2959 if (Index < SparseUniverseMax) {
2960 if (Index < SparseUniverse && Sparse.test(Index))
2961 continue;
2962 NewSparseUniverse = std::max(NewSparseUniverse, Index + 1);
2963 } else {
2964 if (Dense.count(Reg))
2965 continue;
2966 ++NewDenseSize;
2967 }
2968 ToVRegs.push_back(Reg);
2969 }
2970 size_t End = ToVRegs.size();
2971 if (Begin == End)
2972 return false;
2973 // Reserving space in sets once performs better than doing so continuously
2974 // and pays easily for double look-ups (even in Dense with SparseUniverseMax
2975 // tuned all the way down) and double iteration (the second one is over a
2976 // SmallVector, which is a lot cheaper compared to DenseSet or BitVector).
2977 Sparse.resize(NewSparseUniverse);
2978 Dense.reserve(NewDenseSize);
2979 for (unsigned I = Begin; I < End; ++I) {
2980 Register Reg = ToVRegs[I];
2981 unsigned Index = Register::virtReg2Index(Reg);
2982 if (Index < SparseUniverseMax)
2983 Sparse.set(Index);
2984 else
2985 Dense.insert(Reg);
2986 }
2987 return true;
2988 }
2989
2990private:
2991 static constexpr unsigned SparseUniverseMax = 10 * 1024 * 8;
2992 // VRegs indexed within SparseUniverseMax are tracked by Sparse, those beyound
2993 // are tracked by Dense. The only purpose of the threashold and the Dense set
2994 // is to have a reasonably growing memory usage in pathological cases (large
2995 // number of very sparse VRegFilter instances live at the same time). In
2996 // practice even in the worst-by-execution time cases having all elements
2997 // tracked by Sparse (very large SparseUniverseMax scenario) tends to be more
2998 // space efficient than if tracked by Dense. The threashold is set to keep the
2999 // worst-case memory usage within 2x of figures determined empirically for
3000 // "all Dense" scenario in such worst-by-execution-time cases.
3001 BitVector Sparse;
3003};
3004
3005// Implements both a transfer function and a (binary, in-place) join operator
3006// for a dataflow over register sets with set union join and filtering transfer
3007// (out_b = in_b \ filter_b). filter_b is expected to be set-up ahead of time.
3008// Maintains out_b as its state, allowing for O(n) iteration over it at any
3009// time, where n is the size of the set (as opposed to O(U) where U is the
3010// universe). filter_b implicitly contains all physical registers at all times.
3011class FilteringVRegSet {
3012 VRegFilter Filter;
3014
3015public:
3016 // Set-up the filter_b. \pre Input register set \p RS must have no duplicates.
3017 // Both virtual and physical registers are fine.
3018 template <typename RegSetT> void addToFilter(const RegSetT &RS) {
3019 Filter.add(RS);
3020 }
3021 // Passes \p RS through the filter_b (transfer function) and adds what's left
3022 // to itself (out_b).
3023 template <typename RegSetT> bool add(const RegSetT &RS) {
3024 // Double-duty the Filter: to maintain VRegs a set (and the join operation
3025 // a set union) just add everything being added here to the Filter as well.
3026 return Filter.filterAndAdd(RS, VRegs);
3027 }
3028 using const_iterator = decltype(VRegs)::const_iterator;
3029 const_iterator begin() const { return VRegs.begin(); }
3030 const_iterator end() const { return VRegs.end(); }
3031 size_t size() const { return VRegs.size(); }
3032};
3033} // namespace
3034
3035// Calculate the largest possible vregsPassed sets. These are the registers that
3036// can pass through an MBB live, but may not be live every time. It is assumed
3037// that all vregsPassed sets are empty before the call.
3038void MachineVerifier::calcRegsPassed() {
3039 if (MF->empty())
3040 // ReversePostOrderTraversal doesn't handle empty functions.
3041 return;
3042
3043 for (const MachineBasicBlock *MB :
3045 FilteringVRegSet VRegs;
3046 BBInfo &Info = MBBInfoMap[MB];
3047 assert(Info.reachable);
3048
3049 VRegs.addToFilter(Info.regsKilled);
3050 VRegs.addToFilter(Info.regsLiveOut);
3051 for (const MachineBasicBlock *Pred : MB->predecessors()) {
3052 const BBInfo &PredInfo = MBBInfoMap[Pred];
3053 if (!PredInfo.reachable)
3054 continue;
3055
3056 VRegs.add(PredInfo.regsLiveOut);
3057 VRegs.add(PredInfo.vregsPassed);
3058 }
3059 Info.vregsPassed.reserve(VRegs.size());
3060 Info.vregsPassed.insert(VRegs.begin(), VRegs.end());
3061 }
3062}
3063
3064// Calculate the set of virtual registers that must be passed through each basic
3065// block in order to satisfy the requirements of successor blocks. This is very
3066// similar to calcRegsPassed, only backwards.
3067void MachineVerifier::calcRegsRequired() {
3068 // First push live-in regs to predecessors' vregsRequired.
3070 for (const auto &MBB : *MF) {
3071 BBInfo &MInfo = MBBInfoMap[&MBB];
3072 for (const MachineBasicBlock *Pred : MBB.predecessors()) {
3073 BBInfo &PInfo = MBBInfoMap[Pred];
3074 if (PInfo.addRequired(MInfo.vregsLiveIn))
3075 todo.insert(Pred);
3076 }
3077
3078 // Handle the PHI node.
3079 for (const MachineInstr &MI : MBB.phis()) {
3080 for (unsigned i = 1, e = MI.getNumOperands(); i != e; i += 2) {
3081 // Skip those Operands which are undef regs or not regs.
3082 if (!MI.getOperand(i).isReg() || !MI.getOperand(i).readsReg())
3083 continue;
3084
3085 // Get register and predecessor for one PHI edge.
3086 Register Reg = MI.getOperand(i).getReg();
3087 const MachineBasicBlock *Pred = MI.getOperand(i + 1).getMBB();
3088
3089 BBInfo &PInfo = MBBInfoMap[Pred];
3090 if (PInfo.addRequired(Reg))
3091 todo.insert(Pred);
3092 }
3093 }
3094 }
3095
3096 // Iteratively push vregsRequired to predecessors. This will converge to the
3097 // same final state regardless of DenseSet iteration order.
3098 while (!todo.empty()) {
3099 const MachineBasicBlock *MBB = *todo.begin();
3100 todo.erase(MBB);
3101 BBInfo &MInfo = MBBInfoMap[MBB];
3102 for (const MachineBasicBlock *Pred : MBB->predecessors()) {
3103 if (Pred == MBB)
3104 continue;
3105 BBInfo &SInfo = MBBInfoMap[Pred];
3106 if (SInfo.addRequired(MInfo.vregsRequired))
3107 todo.insert(Pred);
3108 }
3109 }
3110}
3111
3112// Check PHI instructions at the beginning of MBB. It is assumed that
3113// calcRegsPassed has been run so BBInfo::isLiveOut is valid.
3114void MachineVerifier::checkPHIOps(const MachineBasicBlock &MBB) {
3115 BBInfo &MInfo = MBBInfoMap[&MBB];
3116
3118 for (const MachineInstr &Phi : MBB) {
3119 if (!Phi.isPHI())
3120 break;
3121 seen.clear();
3122
3123 const MachineOperand &MODef = Phi.getOperand(0);
3124 if (!MODef.isReg() || !MODef.isDef()) {
3125 report("Expected first PHI operand to be a register def", &MODef, 0);
3126 continue;
3127 }
3128 if (MODef.isTied() || MODef.isImplicit() || MODef.isInternalRead() ||
3129 MODef.isEarlyClobber() || MODef.isDebug())
3130 report("Unexpected flag on PHI operand", &MODef, 0);
3131 Register DefReg = MODef.getReg();
3132 if (!DefReg.isVirtual())
3133 report("Expected first PHI operand to be a virtual register", &MODef, 0);
3134
3135 for (unsigned I = 1, E = Phi.getNumOperands(); I != E; I += 2) {
3136 const MachineOperand &MO0 = Phi.getOperand(I);
3137 if (!MO0.isReg()) {
3138 report("Expected PHI operand to be a register", &MO0, I);
3139 continue;
3140 }
3141 if (MO0.isImplicit() || MO0.isInternalRead() || MO0.isEarlyClobber() ||
3142 MO0.isDebug() || MO0.isTied())
3143 report("Unexpected flag on PHI operand", &MO0, I);
3144
3145 const MachineOperand &MO1 = Phi.getOperand(I + 1);
3146 if (!MO1.isMBB()) {
3147 report("Expected PHI operand to be a basic block", &MO1, I + 1);
3148 continue;
3149 }
3150
3151 const MachineBasicBlock &Pre = *MO1.getMBB();
3152 if (!Pre.isSuccessor(&MBB)) {
3153 report("PHI input is not a predecessor block", &MO1, I + 1);
3154 continue;
3155 }
3156
3157 if (MInfo.reachable) {
3158 seen.insert(&Pre);
3159 BBInfo &PrInfo = MBBInfoMap[&Pre];
3160 if (!MO0.isUndef() && PrInfo.reachable &&
3161 !PrInfo.isLiveOut(MO0.getReg()))
3162 report("PHI operand is not live-out from predecessor", &MO0, I);
3163 }
3164 }
3165
3166 // Did we see all predecessors?
3167 if (MInfo.reachable) {
3168 for (MachineBasicBlock *Pred : MBB.predecessors()) {
3169 if (!seen.count(Pred)) {
3170 report("Missing PHI operand", &Phi);
3171 errs() << printMBBReference(*Pred)
3172 << " is a predecessor according to the CFG.\n";
3173 }
3174 }
3175 }
3176 }
3177}
3178
3179static void
3181 std::function<void(const Twine &Message)> FailureCB) {
3183 CV.initialize(&errs(), FailureCB, MF);
3184
3185 for (const auto &MBB : MF) {
3186 CV.visit(MBB);
3187 for (const auto &MI : MBB.instrs())
3188 CV.visit(MI);
3189 }
3190
3191 if (CV.sawTokens()) {
3192 DT.recalculate(const_cast<MachineFunction &>(MF));
3193 CV.verify(DT);
3194 }
3195}
3196
3197void MachineVerifier::visitMachineFunctionAfter() {
3198 auto FailureCB = [this](const Twine &Message) {
3199 report(Message.str().c_str(), MF);
3200 };
3201 verifyConvergenceControl(*MF, DT, FailureCB);
3202
3203 calcRegsPassed();
3204
3205 for (const MachineBasicBlock &MBB : *MF)
3206 checkPHIOps(MBB);
3207
3208 // Now check liveness info if available
3209 calcRegsRequired();
3210
3211 // Check for killed virtual registers that should be live out.
3212 for (const auto &MBB : *MF) {
3213 BBInfo &MInfo = MBBInfoMap[&MBB];
3214 for (Register VReg : MInfo.vregsRequired)
3215 if (MInfo.regsKilled.count(VReg)) {
3216 report("Virtual register killed in block, but needed live out.", &MBB);
3217 errs() << "Virtual register " << printReg(VReg)
3218 << " is used after the block.\n";
3219 }
3220 }
3221
3222 if (!MF->empty()) {
3223 BBInfo &MInfo = MBBInfoMap[&MF->front()];
3224 for (Register VReg : MInfo.vregsRequired) {
3225 report("Virtual register defs don't dominate all uses.", MF);
3226 report_context_vreg(VReg);
3227 }
3228 }
3229
3230 if (LiveVars)
3231 verifyLiveVariables();
3232 if (LiveInts)
3233 verifyLiveIntervals();
3234
3235 // Check live-in list of each MBB. If a register is live into MBB, check
3236 // that the register is in regsLiveOut of each predecessor block. Since
3237 // this must come from a definition in the predecesssor or its live-in
3238 // list, this will catch a live-through case where the predecessor does not
3239 // have the register in its live-in list. This currently only checks
3240 // registers that have no aliases, are not allocatable and are not
3241 // reserved, which could mean a condition code register for instance.
3242 if (MRI->tracksLiveness())
3243 for (const auto &MBB : *MF)
3245 MCPhysReg LiveInReg = P.PhysReg;
3246 bool hasAliases = MCRegAliasIterator(LiveInReg, TRI, false).isValid();
3247 if (hasAliases || isAllocatable(LiveInReg) || isReserved(LiveInReg))
3248 continue;
3249 for (const MachineBasicBlock *Pred : MBB.predecessors()) {
3250 BBInfo &PInfo = MBBInfoMap[Pred];
3251 if (!PInfo.regsLiveOut.count(LiveInReg)) {
3252 report("Live in register not found to be live out from predecessor.",
3253 &MBB);
3254 errs() << TRI->getName(LiveInReg)
3255 << " not found to be live out from "
3256 << printMBBReference(*Pred) << "\n";
3257 }
3258 }
3259 }
3260
3261 for (auto CSInfo : MF->getCallSitesInfo())
3262 if (!CSInfo.first->isCall())
3263 report("Call site info referencing instruction that is not call", MF);
3264
3265 // If there's debug-info, check that we don't have any duplicate value
3266 // tracking numbers.
3267 if (MF->getFunction().getSubprogram()) {
3268 DenseSet<unsigned> SeenNumbers;
3269 for (const auto &MBB : *MF) {
3270 for (const auto &MI : MBB) {
3271 if (auto Num = MI.peekDebugInstrNum()) {
3272 auto Result = SeenNumbers.insert((unsigned)Num);
3273 if (!Result.second)
3274 report("Instruction has a duplicated value tracking number", &MI);
3275 }
3276 }
3277 }
3278 }
3279}
3280
3281void MachineVerifier::verifyLiveVariables() {
3282 assert(LiveVars && "Don't call verifyLiveVariables without LiveVars");
3283 for (unsigned I = 0, E = MRI->getNumVirtRegs(); I != E; ++I) {
3285 LiveVariables::VarInfo &VI = LiveVars->getVarInfo(Reg);
3286 for (const auto &MBB : *MF) {
3287 BBInfo &MInfo = MBBInfoMap[&MBB];
3288
3289 // Our vregsRequired should be identical to LiveVariables' AliveBlocks
3290 if (MInfo.vregsRequired.count(Reg)) {
3291 if (!VI.AliveBlocks.test(MBB.getNumber())) {
3292 report("LiveVariables: Block missing from AliveBlocks", &MBB);
3293 errs() << "Virtual register " << printReg(Reg)
3294 << " must be live through the block.\n";
3295 }
3296 } else {
3297 if (VI.AliveBlocks.test(MBB.getNumber())) {
3298 report("LiveVariables: Block should not be in AliveBlocks", &MBB);
3299 errs() << "Virtual register " << printReg(Reg)
3300 << " is not needed live through the block.\n";
3301 }
3302 }
3303 }
3304 }
3305}
3306
3307void MachineVerifier::verifyLiveIntervals() {
3308 assert(LiveInts && "Don't call verifyLiveIntervals without LiveInts");
3309 for (unsigned I = 0, E = MRI->getNumVirtRegs(); I != E; ++I) {
3311
3312 // Spilling and splitting may leave unused registers around. Skip them.
3313 if (MRI->reg_nodbg_empty(Reg))
3314 continue;
3315
3316 if (!LiveInts->hasInterval(Reg)) {
3317 report("Missing live interval for virtual register", MF);
3318 errs() << printReg(Reg, TRI) << " still has defs or uses\n";
3319 continue;
3320 }
3321
3322 const LiveInterval &LI = LiveInts->getInterval(Reg);
3323 assert(Reg == LI.reg() && "Invalid reg to interval mapping");
3324 verifyLiveInterval(LI);
3325 }
3326
3327 // Verify all the cached regunit intervals.
3328 for (unsigned i = 0, e = TRI->getNumRegUnits(); i != e; ++i)
3329 if (const LiveRange *LR = LiveInts->getCachedRegUnit(i))
3330 verifyLiveRange(*LR, i);
3331}
3332
3333void MachineVerifier::verifyLiveRangeValue(const LiveRange &LR,
3334 const VNInfo *VNI, Register Reg,
3335 LaneBitmask LaneMask) {
3336 if (VNI->isUnused())
3337 return;
3338
3339 const VNInfo *DefVNI = LR.getVNInfoAt(VNI->def);
3340
3341 if (!DefVNI) {
3342 report("Value not live at VNInfo def and not marked unused", MF);
3343 report_context(LR, Reg, LaneMask);
3344 report_context(*VNI);
3345 return;
3346 }
3347
3348 if (DefVNI != VNI) {
3349 report("Live segment at def has different VNInfo", MF);
3350 report_context(LR, Reg, LaneMask);
3351 report_context(*VNI);
3352 return;
3353 }
3354
3355 const MachineBasicBlock *MBB = LiveInts->getMBBFromIndex(VNI->def);
3356 if (!MBB) {
3357 report("Invalid VNInfo definition index", MF);
3358 report_context(LR, Reg, LaneMask);
3359 report_context(*VNI);
3360 return;
3361 }
3362
3363 if (VNI->isPHIDef()) {
3364 if (VNI->def != LiveInts->getMBBStartIdx(MBB)) {
3365 report("PHIDef VNInfo is not defined at MBB start", MBB);
3366 report_context(LR, Reg, LaneMask);
3367 report_context(*VNI);
3368 }
3369 return;
3370 }
3371
3372 // Non-PHI def.
3373 const MachineInstr *MI = LiveInts->getInstructionFromIndex(VNI->def);
3374 if (!MI) {
3375 report("No instruction at VNInfo def index", MBB);
3376 report_context(LR, Reg, LaneMask);
3377 report_context(*VNI);
3378 return;
3379 }
3380
3381 if (Reg != 0) {
3382 bool hasDef = false;
3383 bool isEarlyClobber = false;
3384 for (ConstMIBundleOperands MOI(*MI); MOI.isValid(); ++MOI) {
3385 if (!MOI->isReg() || !MOI->isDef())
3386 continue;
3387 if (Reg.isVirtual()) {
3388 if (MOI->getReg() != Reg)
3389 continue;
3390 } else {
3391 if (!MOI->getReg().isPhysical() || !TRI->hasRegUnit(MOI->getReg(), Reg))
3392 continue;
3393 }
3394 if (LaneMask.any() &&
3395 (TRI->getSubRegIndexLaneMask(MOI->getSubReg()) & LaneMask).none())
3396 continue;
3397 hasDef = true;
3398 if (MOI->isEarlyClobber())
3399 isEarlyClobber = true;
3400 }
3401
3402 if (!hasDef) {
3403 report("Defining instruction does not modify register", MI);
3404 report_context(LR, Reg, LaneMask);
3405 report_context(*VNI);
3406 }
3407
3408 // Early clobber defs begin at USE slots, but other defs must begin at
3409 // DEF slots.
3410 if (isEarlyClobber) {
3411 if (!VNI->def.isEarlyClobber()) {
3412 report("Early clobber def must be at an early-clobber slot", MBB);
3413 report_context(LR, Reg, LaneMask);
3414 report_context(*VNI);
3415 }
3416 } else if (!VNI->def.isRegister()) {
3417 report("Non-PHI, non-early clobber def must be at a register slot", MBB);
3418 report_context(LR, Reg, LaneMask);
3419 report_context(*VNI);
3420 }
3421 }
3422}
3423
3424void MachineVerifier::verifyLiveRangeSegment(const LiveRange &LR,
3426 Register Reg,
3427 LaneBitmask LaneMask) {
3428 const LiveRange::Segment &S = *I;
3429 const VNInfo *VNI = S.valno;
3430 assert(VNI && "Live segment has no valno");
3431
3432 if (VNI->id >= LR.getNumValNums() || VNI != LR.getValNumInfo(VNI->id)) {
3433 report("Foreign valno in live segment", MF);
3434 report_context(LR, Reg, LaneMask);
3435 report_context(S);
3436 report_context(*VNI);
3437 }
3438
3439 if (VNI->isUnused()) {
3440 report("Live segment valno is marked unused", MF);
3441 report_context(LR, Reg, LaneMask);
3442 report_context(S);
3443 }
3444
3445 const MachineBasicBlock *MBB = LiveInts->getMBBFromIndex(S.start);
3446 if (!MBB) {
3447 report("Bad start of live segment, no basic block", MF);
3448 report_context(LR, Reg, LaneMask);
3449 report_context(S);
3450 return;
3451 }
3452 SlotIndex MBBStartIdx = LiveInts->getMBBStartIdx(MBB);
3453 if (S.start != MBBStartIdx && S.start != VNI->def) {
3454 report("Live segment must begin at MBB entry or valno def", MBB);
3455 report_context(LR, Reg, LaneMask);
3456 report_context(S);
3457 }
3458
3459 const MachineBasicBlock *EndMBB =
3460 LiveInts->getMBBFromIndex(S.end.getPrevSlot());
3461 if (!EndMBB) {
3462 report("Bad end of live segment, no basic block", MF);
3463 report_context(LR, Reg, LaneMask);
3464 report_context(S);
3465 return;
3466 }
3467
3468 // Checks for non-live-out segments.
3469 if (S.end != LiveInts->getMBBEndIdx(EndMBB)) {
3470 // RegUnit intervals are allowed dead phis.
3471 if (!Reg.isVirtual() && VNI->isPHIDef() && S.start == VNI->def &&
3472 S.end == VNI->def.getDeadSlot())
3473 return;
3474
3475 // The live segment is ending inside EndMBB
3476 const MachineInstr *MI =
3478 if (!MI) {
3479 report("Live segment doesn't end at a valid instruction", EndMBB);
3480 report_context(LR, Reg, LaneMask);
3481 report_context(S);
3482 return;
3483 }
3484
3485 // The block slot must refer to a basic block boundary.
3486 if (S.end.isBlock()) {
3487 report("Live segment ends at B slot of an instruction", EndMBB);
3488 report_context(LR, Reg, LaneMask);
3489 report_context(S);
3490 }
3491
3492 if (S.end.isDead()) {
3493 // Segment ends on the dead slot.
3494 // That means there must be a dead def.
3495 if (!SlotIndex::isSameInstr(S.start, S.end)) {
3496 report("Live segment ending at dead slot spans instructions", EndMBB);
3497 report_context(LR, Reg, LaneMask);
3498 report_context(S);
3499 }
3500 }
3501
3502 // After tied operands are rewritten, a live segment can only end at an
3503 // early-clobber slot if it is being redefined by an early-clobber def.
3504 // TODO: Before tied operands are rewritten, a live segment can only end at
3505 // an early-clobber slot if the last use is tied to an early-clobber def.
3506 if (MF->getProperties().hasProperty(
3508 S.end.isEarlyClobber()) {
3509 if (I + 1 == LR.end() || (I + 1)->start != S.end) {
3510 report("Live segment ending at early clobber slot must be "
3511 "redefined by an EC def in the same instruction",
3512 EndMBB);
3513 report_context(LR, Reg, LaneMask);
3514 report_context(S);
3515 }
3516 }
3517
3518 // The following checks only apply to virtual registers. Physreg liveness
3519 // is too weird to check.
3520 if (Reg.isVirtual()) {
3521 // A live segment can end with either a redefinition, a kill flag on a
3522 // use, or a dead flag on a def.
3523 bool hasRead = false;
3524 bool hasSubRegDef = false;
3525 bool hasDeadDef = false;
3526 for (ConstMIBundleOperands MOI(*MI); MOI.isValid(); ++MOI) {
3527 if (!MOI->isReg() || MOI->getReg() != Reg)
3528 continue;
3529 unsigned Sub = MOI->getSubReg();
3530 LaneBitmask SLM =
3531 Sub != 0 ? TRI->getSubRegIndexLaneMask(Sub) : LaneBitmask::getAll();
3532 if (MOI->isDef()) {
3533 if (Sub != 0) {
3534 hasSubRegDef = true;
3535 // An operand %0:sub0 reads %0:sub1..n. Invert the lane
3536 // mask for subregister defs. Read-undef defs will be handled by
3537 // readsReg below.
3538 SLM = ~SLM;
3539 }
3540 if (MOI->isDead())
3541 hasDeadDef = true;
3542 }
3543 if (LaneMask.any() && (LaneMask & SLM).none())
3544 continue;
3545 if (MOI->readsReg())
3546 hasRead = true;
3547 }
3548 if (S.end.isDead()) {
3549 // Make sure that the corresponding machine operand for a "dead" live
3550 // range has the dead flag. We cannot perform this check for subregister
3551 // liveranges as partially dead values are allowed.
3552 if (LaneMask.none() && !hasDeadDef) {
3553 report(
3554 "Instruction ending live segment on dead slot has no dead flag",
3555 MI);
3556 report_context(LR, Reg, LaneMask);
3557 report_context(S);
3558 }
3559 } else {
3560 if (!hasRead) {
3561 // When tracking subregister liveness, the main range must start new
3562 // values on partial register writes, even if there is no read.
3563 if (!MRI->shouldTrackSubRegLiveness(Reg) || LaneMask.any() ||
3564 !hasSubRegDef) {
3565 report("Instruction ending live segment doesn't read the register",
3566 MI);
3567 report_context(LR, Reg, LaneMask);
3568 report_context(S);
3569 }
3570 }
3571 }
3572 }
3573 }
3574
3575 // Now check all the basic blocks in this live segment.
3577 // Is this live segment the beginning of a non-PHIDef VN?
3578 if (S.start == VNI->def && !VNI->isPHIDef()) {
3579 // Not live-in to any blocks.
3580 if (MBB == EndMBB)
3581 return;
3582 // Skip this block.
3583 ++MFI;
3584 }
3585
3587 if (LaneMask.any()) {
3588 LiveInterval &OwnerLI = LiveInts->getInterval(Reg);
3589 OwnerLI.computeSubRangeUndefs(Undefs, LaneMask, *MRI, *Indexes);
3590 }
3591
3592 while (true) {
3593 assert(LiveInts->isLiveInToMBB(LR, &*MFI));
3594 // We don't know how to track physregs into a landing pad.
3595 if (!Reg.isVirtual() && MFI->isEHPad()) {
3596 if (&*MFI == EndMBB)
3597 break;
3598 ++MFI;
3599 continue;
3600 }
3601
3602 // Is VNI a PHI-def in the current block?
3603 bool IsPHI = VNI->isPHIDef() &&
3604 VNI->def == LiveInts->getMBBStartIdx(&*MFI);
3605
3606 // Check that VNI is live-out of all predecessors.
3607 for (const MachineBasicBlock *Pred : MFI->predecessors()) {
3608 SlotIndex PEnd = LiveInts->getMBBEndIdx(Pred);
3609 // Predecessor of landing pad live-out on last call.
3610 if (MFI->isEHPad()) {
3611 for (const MachineInstr &MI : llvm::reverse(*Pred)) {
3612 if (MI.isCall()) {
3613 PEnd = Indexes->getInstructionIndex(MI).getBoundaryIndex();
3614 break;
3615 }
3616 }
3617 }
3618 const VNInfo *PVNI = LR.getVNInfoBefore(PEnd);
3619
3620 // All predecessors must have a live-out value. However for a phi
3621 // instruction with subregister intervals
3622 // only one of the subregisters (not necessarily the current one) needs to
3623 // be defined.
3624 if (!PVNI && (LaneMask.none() || !IsPHI)) {
3625 if (LiveRangeCalc::isJointlyDominated(Pred, Undefs, *Indexes))
3626 continue;
3627 report("Register not marked live out of predecessor", Pred);
3628 report_context(LR, Reg, LaneMask);
3629 report_context(*VNI);
3630 errs() << " live into " << printMBBReference(*MFI) << '@'
3631 << LiveInts->getMBBStartIdx(&*MFI) << ", not live before "
3632 << PEnd << '\n';
3633 continue;
3634 }
3635
3636 // Only PHI-defs can take different predecessor values.
3637 if (!IsPHI && PVNI != VNI) {
3638 report("Different value live out of predecessor", Pred);
3639 report_context(LR, Reg, LaneMask);
3640 errs() << "Valno #" << PVNI->id << " live out of "
3641 << printMBBReference(*Pred) << '@' << PEnd << "\nValno #"
3642 << VNI->id << " live into " << printMBBReference(*MFI) << '@'
3643 << LiveInts->getMBBStartIdx(&*MFI) << '\n';
3644 }
3645 }
3646 if (&*MFI == EndMBB)
3647 break;
3648 ++MFI;
3649 }
3650}
3651
3652void MachineVerifier::verifyLiveRange(const LiveRange &LR, Register Reg,
3653 LaneBitmask LaneMask) {
3654 for (const VNInfo *VNI : LR.valnos)
3655 verifyLiveRangeValue(LR, VNI, Reg, LaneMask);
3656
3657 for (LiveRange::const_iterator I = LR.begin(), E = LR.end(); I != E; ++I)
3658 verifyLiveRangeSegment(LR, I, Reg, LaneMask);
3659}
3660
3661void MachineVerifier::verifyLiveInterval(const LiveInterval &LI) {
3662 Register Reg = LI.reg();
3663 assert(Reg.isVirtual());
3664 verifyLiveRange(LI, Reg);
3665
3666 if (LI.hasSubRanges()) {
3668 LaneBitmask MaxMask = MRI->getMaxLaneMaskForVReg(Reg);
3669 for (const LiveInterval::SubRange &SR : LI.subranges()) {
3670 if ((Mask & SR.LaneMask).any()) {
3671 report("Lane masks of sub ranges overlap in live interval", MF);
3672 report_context(LI);
3673 }
3674 if ((SR.LaneMask & ~MaxMask).any()) {
3675 report("Subrange lanemask is invalid", MF);
3676 report_context(LI);
3677 }
3678 if (SR.empty()) {
3679 report("Subrange must not be empty", MF);
3680 report_context(SR, LI.reg(), SR.LaneMask);
3681 }
3682 Mask |= SR.LaneMask;
3683 verifyLiveRange(SR, LI.reg(), SR.LaneMask);
3684 if (!LI.covers(SR)) {
3685 report("A Subrange is not covered by the main range", MF);
3686 report_context(LI);
3687 }
3688 }
3689 }
3690
3691 // Check the LI only has one connected component.
3692 ConnectedVNInfoEqClasses ConEQ(*LiveInts);
3693 unsigned NumComp = ConEQ.Classify(LI);
3694 if (NumComp > 1) {
3695 report("Multiple connected components in live interval", MF);
3696 report_context(LI);
3697 for (unsigned comp = 0; comp != NumComp; ++comp) {
3698 errs() << comp << ": valnos";
3699 for (const VNInfo *I : LI.valnos)
3700 if (comp == ConEQ.getEqClass(I))
3701 errs() << ' ' << I->id;
3702 errs() << '\n';
3703 }
3704 }
3705}
3706
3707namespace {
3708
3709 // FrameSetup and FrameDestroy can have zero adjustment, so using a single
3710 // integer, we can't tell whether it is a FrameSetup or FrameDestroy if the
3711 // value is zero.
3712 // We use a bool plus an integer to capture the stack state.
3713 struct StackStateOfBB {
3714 StackStateOfBB() = default;
3715 StackStateOfBB(int EntryVal, int ExitVal, bool EntrySetup, bool ExitSetup) :
3716 EntryValue(EntryVal), ExitValue(ExitVal), EntryIsSetup(EntrySetup),
3717 ExitIsSetup(ExitSetup) {}
3718
3719 // Can be negative, which means we are setting up a frame.
3720 int EntryValue = 0;
3721 int ExitValue = 0;
3722 bool EntryIsSetup = false;
3723 bool ExitIsSetup = false;
3724 };
3725
3726} // end anonymous namespace
3727
3728/// Make sure on every path through the CFG, a FrameSetup <n> is always followed
3729/// by a FrameDestroy <n>, stack adjustments are identical on all
3730/// CFG edges to a merge point, and frame is destroyed at end of a return block.
3731void MachineVerifier::verifyStackFrame() {
3732 unsigned FrameSetupOpcode = TII->getCallFrameSetupOpcode();
3733 unsigned FrameDestroyOpcode = TII->getCallFrameDestroyOpcode();
3734 if (FrameSetupOpcode == ~0u && FrameDestroyOpcode == ~0u)
3735 return;
3736
3738 SPState.resize(MF->getNumBlockIDs());
3740
3741 // Visit the MBBs in DFS order.
3742 for (df_ext_iterator<const MachineFunction *,
3744 DFI = df_ext_begin(MF, Reachable), DFE = df_ext_end(MF, Reachable);
3745 DFI != DFE; ++DFI) {
3746 const MachineBasicBlock *MBB = *DFI;
3747
3748 StackStateOfBB BBState;
3749 // Check the exit state of the DFS stack predecessor.
3750 if (DFI.getPathLength() >= 2) {
3751 const MachineBasicBlock *StackPred = DFI.getPath(DFI.getPathLength() - 2);
3752 assert(Reachable.count(StackPred) &&
3753 "DFS stack predecessor is already visited.\n");
3754 BBState.EntryValue = SPState[StackPred->getNumber()].ExitValue;
3755 BBState.EntryIsSetup = SPState[StackPred->getNumber()].ExitIsSetup;
3756 BBState.ExitValue = BBState.EntryValue;
3757 BBState.ExitIsSetup = BBState.EntryIsSetup;
3758 }
3759
3760 if ((int)MBB->getCallFrameSize() != -BBState.EntryValue) {
3761 report("Call frame size on entry does not match value computed from "
3762 "predecessor",
3763 MBB);
3764 errs() << "Call frame size on entry " << MBB->getCallFrameSize()
3765 << " does not match value computed from predecessor "
3766 << -BBState.EntryValue << '\n';
3767 }
3768
3769 // Update stack state by checking contents of MBB.
3770 for (const auto &I : *MBB) {
3771 if (I.getOpcode() == FrameSetupOpcode) {
3772 if (BBState.ExitIsSetup)
3773 report("FrameSetup is after another FrameSetup", &I);
3774 if (!MRI->isSSA() && !MF->getFrameInfo().adjustsStack())
3775 report("AdjustsStack not set in presence of a frame pseudo "
3776 "instruction.", &I);
3777 BBState.ExitValue -= TII->getFrameTotalSize(I);
3778 BBState.ExitIsSetup = true;
3779 }
3780
3781 if (I.getOpcode() == FrameDestroyOpcode) {
3782 int Size = TII->getFrameTotalSize(I);
3783 if (!BBState.ExitIsSetup)
3784 report("FrameDestroy is not after a FrameSetup", &I);
3785 int AbsSPAdj = BBState.ExitValue < 0 ? -BBState.ExitValue :
3786 BBState.ExitValue;
3787 if (BBState.ExitIsSetup && AbsSPAdj != Size) {
3788 report("FrameDestroy <n> is after FrameSetup <m>", &I);
3789 errs() << "FrameDestroy <" << Size << "> is after FrameSetup <"
3790 << AbsSPAdj << ">.\n";
3791 }
3792 if (!MRI->isSSA() && !MF->getFrameInfo().adjustsStack())
3793 report("AdjustsStack not set in presence of a frame pseudo "
3794 "instruction.", &I);
3795 BBState.ExitValue += Size;
3796 BBState.ExitIsSetup = false;
3797 }
3798 }
3799 SPState[MBB->getNumber()] = BBState;
3800
3801 // Make sure the exit state of any predecessor is consistent with the entry
3802 // state.
3803 for (const MachineBasicBlock *Pred : MBB->predecessors()) {
3804 if (Reachable.count(Pred) &&
3805 (SPState[Pred->getNumber()].ExitValue != BBState.EntryValue ||
3806 SPState[Pred->getNumber()].ExitIsSetup != BBState.EntryIsSetup)) {
3807 report("The exit stack state of a predecessor is inconsistent.", MBB);
3808 errs() << "Predecessor " << printMBBReference(*Pred)
3809 << " has exit state (" << SPState[Pred->getNumber()].ExitValue
3810 << ", " << SPState[Pred->getNumber()].ExitIsSetup << "), while "
3811 << printMBBReference(*MBB) << " has entry state ("
3812 << BBState.EntryValue << ", " << BBState.EntryIsSetup << ").\n";
3813 }
3814 }
3815
3816 // Make sure the entry state of any successor is consistent with the exit
3817 // state.
3818 for (const MachineBasicBlock *Succ : MBB->successors()) {
3819 if (Reachable.count(Succ) &&
3820 (SPState[Succ->getNumber()].EntryValue != BBState.ExitValue ||
3821 SPState[Succ->getNumber()].EntryIsSetup != BBState.ExitIsSetup)) {
3822 report("The entry stack state of a successor is inconsistent.", MBB);
3823 errs() << "Successor " << printMBBReference(*Succ)
3824 << " has entry state (" << SPState[Succ->getNumber()].EntryValue
3825 << ", " << SPState[Succ->getNumber()].EntryIsSetup << "), while "
3826 << printMBBReference(*MBB) << " has exit state ("
3827 << BBState.ExitValue << ", " << BBState.ExitIsSetup << ").\n";
3828 }
3829 }
3830
3831 // Make sure a basic block with return ends with zero stack adjustment.
3832 if (!MBB->empty() && MBB->back().isReturn()) {
3833 if (BBState.ExitIsSetup)
3834 report("A return block ends with a FrameSetup.", MBB);
3835 if (BBState.ExitValue)
3836 report("A return block ends with a nonzero stack adjustment.", MBB);
3837 }
3838 }
3839}
unsigned SubReg
unsigned const MachineRegisterInfo * MRI
aarch64 promote const
MachineBasicBlock & MBB
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
MachineBasicBlock MachineBasicBlock::iterator MBBI
static bool isLoad(int Opcode)
static bool isStore(int Opcode)
This file implements the BitVector class.
Analysis containing CSE Info
Definition: CSEInfo.cpp:27
This file contains the declarations for the subclasses of Constant, which represent the different fla...
Returns the sub type a function will return at a given Idx Should correspond to the result type of an ExtractValue instruction executed with just that one unsigned Idx
This file defines the DenseMap class.
This file defines the DenseSet and SmallDenseSet classes.
This file builds on the ADT/GraphTraits.h file to build generic depth first graph iterator.
std::string Name
uint64_t Size
bool End
Definition: ELF_riscv.cpp:480
Declares convenience wrapper classes for interpreting MachineInstr instances as specific generic oper...
const HexagonInstrInfo * TII
hexagon widen stores
IRTranslator LLVM IR MI
A common definition of LaneBitmask for use in TableGen and CodeGen.
Implement a low-level type suitable for MachineInstr level instruction selection.
#define F(x, y, z)
Definition: MD5.cpp:55
#define I(x, y, z)
Definition: MD5.cpp:58
This file declares the MIR specialization of the GenericConvergenceVerifier template.
unsigned const TargetRegisterInfo * TRI
unsigned Reg
static void verifyConvergenceControl(const MachineFunction &MF, MachineDomTree &DT, std::function< void(const Twine &Message)> FailureCB)
modulo schedule Modulo Schedule test pass
#define P(N)
ppc ctr loops verify
const char LLVMTargetMachineRef TM
#define INITIALIZE_PASS(passName, arg, name, cfg, analysis)
Definition: PassSupport.h:38
This file builds on the ADT/GraphTraits.h file to build a generic graph post order iterator.
const SmallVectorImpl< MachineOperand > MachineBasicBlock * TBB
const SmallVectorImpl< MachineOperand > & Cond
static bool isValid(const char C)
Returns true if C is a valid mangled character: <0-9a-zA-Z_>.
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
static bool isLiveOut(const MachineBasicBlock &MBB, unsigned Reg)
This file contains some templates that are useful if you are working with the STL at all.
This file defines generic set operations that may be used on set's of different types,...
This file defines the SmallPtrSet class.
This file defines the SmallVector class.
This file describes how to lower LLVM code to machine code.
static unsigned getSize(unsigned Kind)
const fltSemantics & getSemantics() const
Definition: APFloat.h:1303
Represent the analysis usage information of a pass.
AnalysisUsage & addUsedIfAvailable()
Add the specified Pass class to the set of analyses used by this pass.
void setPreservesAll()
Set by analyses that do not transform their input at all.
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
Definition: ArrayRef.h:41
size_t size() const
size - Get the array size.
Definition: ArrayRef.h:165
LLVM Basic Block Representation.
Definition: BasicBlock.h:60
bool hasAddressTaken() const
Returns true if there are any uses of this basic block other than direct branches,...
Definition: BasicBlock.h:640
const Instruction * getTerminator() const LLVM_READONLY
Returns the terminator instruction if the block is well formed or null if the block is not well forme...
Definition: BasicBlock.h:221
bool test(unsigned Idx) const
Definition: BitVector.h:461
void clear()
clear - Removes all bits from the bitvector.
Definition: BitVector.h:335
iterator_range< const_set_bits_iterator > set_bits() const
Definition: BitVector.h:140
size_type size() const
size - Returns the number of bits in this bitvector.
Definition: BitVector.h:159
ConnectedVNInfoEqClasses - Helper class that can divide VNInfos in a LiveInterval into equivalence cl...
ConstMIBundleOperands - Iterate over all operands in a const bundle of machine instructions.
ConstantFP - Floating Point Values [float, double].
Definition: Constants.h:268
const APFloat & getValueAPF() const
Definition: Constants.h:311
This is the shared class of boolean and integer constants.
Definition: Constants.h:80
unsigned getBitWidth() const
getBitWidth - Return the scalar bitwidth of this constant.
Definition: Constants.h:148
This class represents an Operation in the Expression.
A parsed version of the target data layout string in and methods for querying it.
Definition: DataLayout.h:110
std::pair< iterator, bool > insert(const std::pair< KeyT, ValueT > &KV)
Definition: DenseMap.h:220
Implements a dense probed hash-table based set.
Definition: DenseSet.h:271
Core dominator tree base class.
void recalculate(ParentType &Func)
recalculate - compute a dominator tree for the given function
Register getReg() const
Base class for user error types.
Definition: Error.h:352
A specialized PseudoSourceValue for holding FixedStack values, which must include a frame index.
FunctionPass class - This class is used to implement most global optimizations.
Definition: Pass.h:311
void initialize(raw_ostream *OS, function_ref< void(const Twine &Message)> FailureCB, const FunctionT &F)
bool isPredicated(const MachineInstr &MI) const override
Returns true if the instruction is already predicated.
bool analyzeBranch(MachineBasicBlock &MBB, MachineBasicBlock *&TBB, MachineBasicBlock *&FBB, SmallVectorImpl< MachineOperand > &Cond, bool AllowModify) const override
Analyze the branching code at the end of MBB, returning true if it cannot be understood (e....
constexpr bool isScalableVector() const
Returns true if the LLT is a scalable vector.
Definition: LowLevelType.h:182
constexpr unsigned getScalarSizeInBits() const
Definition: LowLevelType.h:267
constexpr bool isScalar() const
Definition: LowLevelType.h:146
constexpr bool isValid() const
Definition: LowLevelType.h:145
constexpr uint16_t getNumElements() const
Returns the number of elements in a vector LLT.
Definition: LowLevelType.h:159
constexpr bool isVector() const
Definition: LowLevelType.h:148
constexpr TypeSize getSizeInBits() const
Returns the total size of the type. Must only be called on sized types.
Definition: LowLevelType.h:193
constexpr bool isPointer() const
Definition: LowLevelType.h:149
constexpr LLT getElementType() const
Returns the vector's element type. Only valid for vector types.
Definition: LowLevelType.h:290
constexpr ElementCount getElementCount() const
Definition: LowLevelType.h:184
constexpr unsigned getAddressSpace() const
Definition: LowLevelType.h:280
constexpr bool isPointerOrPointerVector() const
Definition: LowLevelType.h:153
constexpr LLT getScalarType() const
Definition: LowLevelType.h:208
constexpr TypeSize getSizeInBytes() const
Returns the total size of the type in bytes, i.e.
Definition: LowLevelType.h:203
A live range for subregisters.
Definition: LiveInterval.h:694
LiveInterval - This class represents the liveness of a register, or stack slot.
Definition: LiveInterval.h:687
Register reg() const
Definition: LiveInterval.h:718
bool hasSubRanges() const
Returns true if subregister liveness information is available.
Definition: LiveInterval.h:810
iterator_range< subrange_iterator > subranges()
Definition: LiveInterval.h:782
void computeSubRangeUndefs(SmallVectorImpl< SlotIndex > &Undefs, LaneBitmask LaneMask, const MachineRegisterInfo &MRI, const SlotIndexes &Indexes) const
For a given lane mask LaneMask, compute indexes at which the lane is marked undefined by subregister ...
bool hasInterval(Register Reg) const
SlotIndex getMBBStartIdx(const MachineBasicBlock *mbb) const
Return the first index in the given basic block.
MachineInstr * getInstructionFromIndex(SlotIndex index) const
Returns the instruction associated with the given index.
SlotIndex getInstructionIndex(const MachineInstr &Instr) const
Returns the base index of the given instruction.
SlotIndex getMBBEndIdx(const MachineBasicBlock *mbb) const
Return the last index in the given basic block.
LiveRange * getCachedRegUnit(unsigned Unit)
Return the live range for register unit Unit if it has already been computed, or nullptr if it hasn't...
LiveInterval & getInterval(Register Reg)
bool isNotInMIMap(const MachineInstr &Instr) const
Returns true if the specified machine instr has been removed or was never entered in the map.
MachineBasicBlock * getMBBFromIndex(SlotIndex index) const
bool isLiveInToMBB(const LiveRange &LR, const MachineBasicBlock *mbb) const
void print(raw_ostream &O, const Module *=nullptr) const override
Implement the dump method.
Result of a LiveRange query.
Definition: LiveInterval.h:90
bool isDeadDef() const
Return true if this instruction has a dead def.
Definition: LiveInterval.h:117
VNInfo * valueIn() const
Return the value that is live-in to the instruction.
Definition: LiveInterval.h:105
VNInfo * valueOut() const
Return the value leaving the instruction, if any.
Definition: LiveInterval.h:123
bool isKill() const
Return true if the live-in value is killed by this instruction.
Definition: LiveInterval.h:112
static LLVM_ATTRIBUTE_UNUSED bool isJointlyDominated(const MachineBasicBlock *MBB, ArrayRef< SlotIndex > Defs, const SlotIndexes &Indexes)
A diagnostic function to check if the end of the block MBB is jointly dominated by the blocks corresp...
This class represents the liveness of a register, stack slot, etc.
Definition: LiveInterval.h:157
VNInfo * getValNumInfo(unsigned ValNo)
getValNumInfo - Returns pointer to the specified val#.
Definition: LiveInterval.h:317
bool liveAt(SlotIndex index) const
Definition: LiveInterval.h:401
bool covers(const LiveRange &Other) const
Returns true if all segments of the Other live range are completely covered by this live range.
bool empty() const
Definition: LiveInterval.h:382
LiveQueryResult Query(SlotIndex Idx) const
Query Liveness at Idx.
Definition: LiveInterval.h:542
iterator end()
Definition: LiveInterval.h:216
VNInfo * getVNInfoBefore(SlotIndex Idx) const
getVNInfoBefore - Return the VNInfo that is live up to but not necessarilly including Idx,...
Definition: LiveInterval.h:429
unsigned getNumValNums() const
Definition: LiveInterval.h:313
iterator begin()
Definition: LiveInterval.h:215
VNInfoList valnos
Definition: LiveInterval.h:204
VNInfo * getVNInfoAt(SlotIndex Idx) const
getVNInfoAt - Return the VNInfo that is live at Idx, or NULL.
Definition: LiveInterval.h:421
LiveInterval & getInterval(int Slot)
Definition: LiveStacks.h:68
bool hasInterval(int Slot) const
Definition: LiveStacks.h:82
VarInfo & getVarInfo(Register Reg)
getVarInfo - Return the VarInfo structure for the specified VIRTUAL register.
TypeSize getValue() const
This class is intended to be used as a base class for asm properties and features specific to the tar...
Definition: MCAsmInfo.h:56
ExceptionHandling getExceptionHandlingType() const
Definition: MCAsmInfo.h:780
Describe properties that are true of each instruction in the target description file.
Definition: MCInstrDesc.h:198
unsigned getNumOperands() const
Return the number of declared MachineOperands for this MachineInstruction.
Definition: MCInstrDesc.h:237
ArrayRef< MCOperandInfo > operands() const
Definition: MCInstrDesc.h:239
unsigned getNumDefs() const
Return the number of MachineOperands that are register definitions.
Definition: MCInstrDesc.h:248
bool isConvergent() const
Return true if this instruction is convergent.
Definition: MCInstrDesc.h:415
bool variadicOpsAreDefs() const
Return true if variadic operands of this instruction are definitions.
Definition: MCInstrDesc.h:418
int getOperandConstraint(unsigned OpNum, MCOI::OperandConstraint Constraint) const
Returns the value of the specified operand constraint if it is present.
Definition: MCInstrDesc.h:219
unsigned getOpcode() const
Return the opcode number for this descriptor.
Definition: MCInstrDesc.h:230
This holds information about one operand of a machine instruction, indicating the register class for ...
Definition: MCInstrDesc.h:85
bool isOptionalDef() const
Set if this operand is a optional def.
Definition: MCInstrDesc.h:113
uint8_t OperandType
Information about the type of the operand.
Definition: MCInstrDesc.h:97
MCRegAliasIterator enumerates all registers aliasing Reg.
bool isValid() const
isValid - Returns true until all the operands have been visited.
bool isInlineAsmBrIndirectTarget() const
Returns true if this is the indirect dest of an INLINEASM_BR.
unsigned pred_size() const
bool isEHPad() const
Returns true if the block is a landing pad.
iterator_range< livein_iterator > liveins() const
iterator_range< iterator > phis()
Returns a range that iterates over the phis in the basic block.
int getNumber() const
MachineBasicBlocks are uniquely numbered at the function level, unless they're not in a MachineFuncti...
const BasicBlock * getBasicBlock() const
Return the LLVM basic block that this instance corresponded to originally.
bool isIRBlockAddressTaken() const
Test whether this block is the target of an IR BlockAddress.
unsigned succ_size() const
BasicBlock * getAddressTakenIRBlock() const
Retrieves the BasicBlock which corresponds to this MachineBasicBlock.
bool isPredecessor(const MachineBasicBlock *MBB) const
Return true if the specified MBB is a predecessor of this block.
const MachineFunction * getParent() const
Return the MachineFunction containing this basic block.
unsigned getCallFrameSize() const
Return the call frame size on entry to this basic block.
iterator_range< succ_iterator > successors()
bool isSuccessor(const MachineBasicBlock *MBB) const
Return true if the specified MBB is a successor of this block.
iterator_range< pred_iterator > predecessors()
StringRef getName() const
Return the name of the corresponding LLVM basic block, or an empty string.
The MachineFrameInfo class represents an abstract stack frame until prolog/epilog code is inserted.
BitVector getPristineRegs(const MachineFunction &MF) const
Return a set of physical registers that are pristine.
MachineFunctionPass - This class adapts the FunctionPass interface to allow convenient creation of pa...
void getAnalysisUsage(AnalysisUsage &AU) const override
getAnalysisUsage - Subclasses that override getAnalysisUsage must call this.
virtual bool runOnMachineFunction(MachineFunction &MF)=0
runOnMachineFunction - This method must be overloaded to perform the desired machine code transformat...
bool hasProperty(Property P) const
const TargetSubtargetInfo & getSubtarget() const
getSubtarget - Return the subtarget for which this machine code is being compiled.
StringRef getName() const
getName - Return the name of the corresponding LLVM function.
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
const LLVMTargetMachine & getTarget() const
getTarget - Return the target machine this machine code is compiled with
const MachineFunctionProperties & getProperties() const
Get the function properties.
const MachineBasicBlock & front() const
bool verify(Pass *p=nullptr, const char *Banner=nullptr, bool AbortOnError=true) const
Run the current MachineFunction through the machine code verifier, useful for debugger use.
void print(raw_ostream &OS, const SlotIndexes *=nullptr) const
print - Print out the MachineFunction in a format suitable for debugging to the specified stream.
BasicBlockListType::const_iterator const_iterator
Representation of each machine instruction.
Definition: MachineInstr.h:69
unsigned getOpcode() const
Returns the opcode of this MachineInstr.
Definition: MachineInstr.h:558
bool isReturn(QueryType Type=AnyInBundle) const
Definition: MachineInstr.h:929
bool isTerminator(QueryType Type=AnyInBundle) const
Returns true if this instruction part of the terminator for a basic block.
Definition: MachineInstr.h:963
bool isBarrier(QueryType Type=AnyInBundle) const
Returns true if the specified instruction stops control flow from executing the instruction immediate...
Definition: MachineInstr.h:954
A description of a memory reference used in the backend.
LocationSize getSize() const
Return the size in bytes of the memory reference.
const PseudoSourceValue * getPseudoValue() const
AtomicOrdering getSuccessOrdering() const
Return the atomic ordering requirements for this memory operation.
LocationSize getSizeInBits() const
Return the size in bits of the memory reference.
MachineOperand class - Representation of each machine instruction operand.
unsigned getSubReg() const
int64_t getImm() const
bool readsReg() const
readsReg - Returns true if this operand reads the previous value of its register.
bool isImplicit() const
bool isIntrinsicID() const
bool isReg() const
isReg - Tests if this is a MO_Register operand.
MachineBasicBlock * getMBB() const
ArrayRef< int > getShuffleMask() const
bool isImm() const
isImm - Tests if this is a MO_Immediate operand.
bool isValidExcessOperand() const
Return true if this operand can validly be appended to an arbitrary operand list.
bool isShuffleMask() const
unsigned getCFIIndex() const
bool isRenamable() const
isRenamable - Returns true if this register may be renamed, i.e.
MachineInstr * getParent()
getParent - Return the instruction that this operand belongs to.
MachineOperandType getType() const
getType - Returns the MachineOperandType for this operand.
bool isEarlyClobber() const
Register getReg() const
getReg - Returns the register number.
bool isInternalRead() const
bool isFI() const
isFI - Tests if this is a MO_FrameIndex operand.
static bool clobbersPhysReg(const uint32_t *RegMask, MCRegister PhysReg)
clobbersPhysReg - Returns true if this RegMask clobbers PhysReg.
const uint32_t * getRegMask() const
getRegMask - Returns a bit mask of registers preserved by this RegMask operand.
void print(raw_ostream &os, const TargetRegisterInfo *TRI=nullptr, const TargetIntrinsicInfo *IntrinsicInfo=nullptr) const
Print the MachineOperand to os.
@ MO_CFIIndex
MCCFIInstruction index.
@ MO_RegisterMask
Mask of preserved registers.
@ MO_MachineBasicBlock
MachineBasicBlock reference.
@ MO_FrameIndex
Abstract Stack Frame Index.
@ MO_Register
Register operand.
bool isMBB() const
isMBB - Tests if this is a MO_MachineBasicBlock operand.
MachineRegisterInfo - Keep track of information for virtual and physical registers,...
static PassRegistry * getPassRegistry()
getPassRegistry - Access the global registry object, which is automatically initialized at applicatio...
Pass interface - Implemented by all 'passes'.
Definition: Pass.h:94
virtual void print(raw_ostream &OS, const Module *M) const
print - Print out the internal state of the pass.
Definition: Pass.cpp:130
AnalysisType * getAnalysisIfAvailable() const
getAnalysisIfAvailable<AnalysisType>() - Subclasses use this function to get analysis information tha...
Special value supplied for machine level alias analysis.
Holds all the information related to register banks.
const RegisterBank & getRegBank(unsigned ID)
Get the register bank identified by ID.
unsigned getMaximumSize(unsigned RegBankID) const
Get the maximum size in bits that fits in the given register bank.
This class implements the register bank concept.
Definition: RegisterBank.h:28
const char * getName() const
Get a user friendly name of this register bank.
Definition: RegisterBank.h:49
unsigned getID() const
Get the identifier of this register bank.
Definition: RegisterBank.h:45
Wrapper class representing virtual and physical registers.
Definition: Register.h:19
static Register index2VirtReg(unsigned Index)
Convert a 0-based index to a virtual register number.
Definition: Register.h:84
static unsigned virtReg2Index(Register Reg)
Convert a virtual register number to a 0-based index.
Definition: Register.h:77
constexpr bool isVirtual() const
Return true if the specified register number is in the virtual register namespace.
Definition: Register.h:91
static constexpr bool isPhysicalRegister(unsigned Reg)
Return true if the specified register number is in the physical register namespace.
Definition: Register.h:65
constexpr bool isPhysical() const
Return true if the specified register number is in the physical register namespace.
Definition: Register.h:95
SlotIndex - An opaque wrapper around machine indexes.
Definition: SlotIndexes.h:68
static bool isSameInstr(SlotIndex A, SlotIndex B)
isSameInstr - Return true if A and B refer to the same instruction.
Definition: SlotIndexes.h:179
bool isBlock() const
isBlock - Returns true if this is a block boundary slot.
Definition: SlotIndexes.h:212
SlotIndex getDeadSlot() const
Returns the dead def kill slot for the current instruction.
Definition: SlotIndexes.h:245
bool isEarlyClobber() const
isEarlyClobber - Returns true if this is an early-clobber slot.
Definition: SlotIndexes.h:215
bool isRegister() const
isRegister - Returns true if this is a normal register use/def slot.
Definition: SlotIndexes.h:219
SlotIndex getBoundaryIndex() const
Returns the boundary index for associated with this index.
Definition: SlotIndexes.h:234
SlotIndex getPrevSlot() const
Returns the previous slot in the index list.
Definition: SlotIndexes.h:275
SlotIndex getRegSlot(bool EC=false) const
Returns the register use/def slot in the current instruction for a normal or early-clobber def.
Definition: SlotIndexes.h:240
bool isDead() const
isDead - Returns true if this is a dead def kill slot.
Definition: SlotIndexes.h:222
SlotIndexes pass.
Definition: SlotIndexes.h:300
SlotIndex getMBBEndIdx(unsigned Num) const
Returns the last index in the given basic block number.
Definition: SlotIndexes.h:462
MBBIndexIterator MBBIndexBegin() const
Returns an iterator for the begin of the idx2MBBMap.
Definition: SlotIndexes.h:497
MBBIndexIterator MBBIndexEnd() const
Return an iterator for the end of the idx2MBBMap.
Definition: SlotIndexes.h:502
SmallVectorImpl< IdxMBBPair >::const_iterator MBBIndexIterator
Iterator over the idx2MBBMap (sorted pairs of slot index of basic block begin and basic block)
Definition: SlotIndexes.h:473
SlotIndex getInstructionIndex(const MachineInstr &MI, bool IgnoreBundle=false) const
Returns the base index for the given instruction.
Definition: SlotIndexes.h:371
SlotIndex getMBBStartIdx(unsigned Num) const
Returns the first index in the given basic block number.
Definition: SlotIndexes.h:452
bool hasIndex(const MachineInstr &instr) const
Returns true if the given machine instr is mapped to an index, otherwise returns false.
Definition: SlotIndexes.h:366
size_type size() const
Definition: SmallPtrSet.h:94
bool erase(PtrType Ptr)
erase - If the set contains the specified pointer, remove it and return true, otherwise return false.
Definition: SmallPtrSet.h:356
size_type count(ConstPtrType Ptr) const
count - Return 1 if the specified pointer is in the set, 0 otherwise.
Definition: SmallPtrSet.h:360
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
Definition: SmallPtrSet.h:342
iterator begin() const
Definition: SmallPtrSet.h:380
SmallPtrSet - This class implements a set which is optimized for holding SmallSize or less elements.
Definition: SmallPtrSet.h:427
size_t size() const
Definition: SmallVector.h:91
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
Definition: SmallVector.h:586
void resize(size_type N)
Definition: SmallVector.h:651
void push_back(const T &Elt)
Definition: SmallVector.h:426
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
Definition: SmallVector.h:1209
Register getReg() const
MI-level Statepoint operands.
Definition: StackMaps.h:158
StringRef - Represent a constant reference to a string, i.e.
Definition: StringRef.h:50
TargetInstrInfo - Interface to description of machine instruction set.
Primary interface to the complete machine description for the target machine.
Definition: TargetMachine.h:76
bool hasSuperClassEq(const TargetRegisterClass *RC) const
Returns true if RC is a super-class of or equal to this class.
TargetRegisterInfo base class - We assume that the target defines a static array of TargetRegisterDes...
virtual const TargetRegisterInfo * getRegisterInfo() const
getRegisterInfo - If register information is available, return it.
virtual const RegisterBankInfo * getRegBankInfo() const
If the information for the register banks is available, return it.
virtual const TargetInstrInfo * getInstrInfo() const
Twine - A lightweight data structure for efficiently representing the concatenation of temporary valu...
Definition: Twine.h:81
VNInfo - Value Number Information.
Definition: LiveInterval.h:53
bool isUnused() const
Returns true if this value is unused.
Definition: LiveInterval.h:81
unsigned id
The ID number of this value.
Definition: LiveInterval.h:58
SlotIndex def
The index of the defining instruction.
Definition: LiveInterval.h:61
bool isPHIDef() const
Returns true if this value is defined by a PHI instruction (or was, PHI instructions may have been el...
Definition: LiveInterval.h:78
LLVM Value Representation.
Definition: Value.h:74
std::pair< iterator, bool > insert(const ValueT &V)
Definition: DenseSet.h:206
constexpr bool isNonZero() const
Definition: TypeSize.h:158
static constexpr bool isKnownLT(const FixedOrScalableQuantity &LHS, const FixedOrScalableQuantity &RHS)
Definition: TypeSize.h:203
constexpr bool isScalable() const
Returns whether the quantity is scaled by a runtime quantity (vscale).
Definition: TypeSize.h:171
constexpr ScalarTy getKnownMinValue() const
Returns the minimum value this quantity can represent.
Definition: TypeSize.h:168
static constexpr bool isKnownGT(const FixedOrScalableQuantity &LHS, const FixedOrScalableQuantity &RHS)
Definition: TypeSize.h:210
static constexpr bool isKnownGE(const FixedOrScalableQuantity &LHS, const FixedOrScalableQuantity &RHS)
Definition: TypeSize.h:224
self_iterator getIterator()
Definition: ilist_node.h:109
NodeTy * getNextNode()
Get the next node, or nullptr for the list tail.
Definition: ilist_node.h:316
constexpr char Attrs[]
Key for Kernel::Metadata::mAttrs.
const CustomOperand< const MCSubtargetInfo & > Msg[]
constexpr std::underlying_type_t< E > Mask()
Get a bitmask with 1s in all places up to the high-order bit of E's largest value.
Definition: BitmaskEnum.h:121
unsigned ID
LLVM IR allows to use arbitrary numbers as calling convention identifiers.
Definition: CallingConv.h:24
AttributeList getAttributes(LLVMContext &C, ID id)
Return the attributes for an intrinsic.
@ OPERAND_REGISTER
Definition: MCInstrDesc.h:61
@ OPERAND_IMMEDIATE
Definition: MCInstrDesc.h:60
Reg
All possible values of the reg field in the ModR/M byte.
constexpr double e
Definition: MathExtras.h:31
NodeAddr< PhiNode * > Phi
Definition: RDFGraph.h:390
NodeAddr< DefNode * > Def
Definition: RDFGraph.h:384
const_iterator begin(StringRef path, Style style=Style::native)
Get begin iterator over path.
Definition: Path.cpp:227
const_iterator end(StringRef path)
Get end iterator over path.
Definition: Path.cpp:236
This is an optimization pass for GlobalISel generic memory operations.
Definition: AddressRanges.h:18
auto drop_begin(T &&RangeOrContainer, size_t N=1)
Return a range covering RangeOrContainer with the first N elements excluded.
Definition: STLExtras.h:329
@ Offset
Definition: DWP.cpp:456
bool all_of(R &&range, UnaryPredicate P)
Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly.
Definition: STLExtras.h:1722
auto size(R &&Range, std::enable_if_t< std::is_base_of< std::random_access_iterator_tag, typename std::iterator_traits< decltype(Range.begin())>::iterator_category >::value, void > *=nullptr)
Get the size of a range.
Definition: STLExtras.h:1680
@ SjLj
setjmp/longjmp based exceptions
bool isPreISelGenericOpcode(unsigned Opcode)
Check whether the given Opcode is a generic opcode that is not supposed to appear after ISel.
Definition: TargetOpcodes.h:30
Printable printRegUnit(unsigned Unit, const TargetRegisterInfo *TRI)
Create Printable object to print register units on a raw_ostream.
void append_range(Container &C, Range &&R)
Wrapper function to append range R to container C.
Definition: STLExtras.h:2073
void set_subtract(S1Ty &S1, const S2Ty &S2)
set_subtract(A, B) - Compute A := A - B
Definition: SetOperations.h:82
Printable PrintLaneMask(LaneBitmask LaneMask)
Create Printable object to print LaneBitmasks on a raw_ostream.
Definition: LaneBitmask.h:92
bool isPreISelGenericOptimizationHint(unsigned Opcode)
Definition: TargetOpcodes.h:42
bool isScopedEHPersonality(EHPersonality Pers)
Returns true if this personality uses scope-style EH IR instructions: catchswitch,...
void initializeMachineVerifierPassPass(PassRegistry &)
void verifyMachineFunction(const std::string &Banner, const MachineFunction &MF)
auto reverse(ContainerTy &&C)
Definition: STLExtras.h:419
detail::ValueMatchesPoly< M > HasValue(M Matcher)
Definition: Error.h:221
df_ext_iterator< T, SetTy > df_ext_begin(const T &G, SetTy &S)
bool none_of(R &&Range, UnaryPredicate P)
Provide wrappers to std::none_of which take ranges instead of having to pass begin/end explicitly.
Definition: STLExtras.h:1736
void report_fatal_error(Error Err, bool gen_crash_diag=true)
Report a serious error, calling any installed error handler.
Definition: Error.cpp:156
bool set_union(S1Ty &S1, const S2Ty &S2)
set_union(A, B) - Compute A := A u B, return whether A changed.
Definition: SetOperations.h:23
EHPersonality classifyEHPersonality(const Value *Pers)
See if the given exception handling personality function is one that we understand.
raw_fd_ostream & errs()
This returns a reference to a raw_ostream for standard error.
AtomicOrdering
Atomic ordering for LLVM's memory model.
OutputIt move(R &&Range, OutputIt Out)
Provide wrappers to std::move which take ranges instead of having to pass begin/end explicitly.
Definition: STLExtras.h:1849
bool is_contained(R &&Range, const E &Element)
Returns true if Element is found in Range.
Definition: STLExtras.h:1879
df_ext_iterator< T, SetTy > df_ext_end(const T &G, SetTy &S)
Printable printReg(Register Reg, const TargetRegisterInfo *TRI=nullptr, unsigned SubIdx=0, const MachineRegisterInfo *MRI=nullptr)
Prints virtual and physical registers with or without a TRI instance.
Printable printMBBReference(const MachineBasicBlock &MBB)
Prints a machine basic block reference.
FunctionPass * createMachineVerifierPass(const std::string &Banner)
createMachineVerifierPass - This pass verifies cenerated machine code instructions for correctness.
Implement std::hash so that hash_code can be used in STL containers.
Definition: BitVector.h:858
static unsigned getSizeInBits(const fltSemantics &Sem)
Returns the size of the floating point number (in bits) in the given semantics.
Definition: APFloat.cpp:331
static constexpr LaneBitmask getAll()
Definition: LaneBitmask.h:82
constexpr bool none() const
Definition: LaneBitmask.h:52
constexpr bool any() const
Definition: LaneBitmask.h:53
static constexpr LaneBitmask getNone()
Definition: LaneBitmask.h:81
This represents a simple continuous liveness interval for a value.
Definition: LiveInterval.h:162
VarInfo - This represents the regions where a virtual register is live in the program.
Definition: LiveVariables.h:80
Pair of physical register and lane mask.