LLVM 19.0.0git
MachineVerifier.cpp
Go to the documentation of this file.
1//===- MachineVerifier.cpp - Machine Code Verifier ------------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// Pass to verify generated machine code. The following is checked:
10//
11// Operand counts: All explicit operands must be present.
12//
13// Register classes: All physical and virtual register operands must be
14// compatible with the register class required by the instruction descriptor.
15//
16// Register live intervals: Registers must be defined only once, and must be
17// defined before use.
18//
19// The machine code verifier is enabled with the command-line option
20// -verify-machineinstrs.
21//===----------------------------------------------------------------------===//
22
23#include "llvm/ADT/BitVector.h"
24#include "llvm/ADT/DenseMap.h"
25#include "llvm/ADT/DenseSet.h"
28#include "llvm/ADT/STLExtras.h"
32#include "llvm/ADT/StringRef.h"
33#include "llvm/ADT/Twine.h"
62#include "llvm/IR/BasicBlock.h"
63#include "llvm/IR/Constants.h"
65#include "llvm/IR/Function.h"
66#include "llvm/IR/InlineAsm.h"
69#include "llvm/MC/LaneBitmask.h"
70#include "llvm/MC/MCAsmInfo.h"
71#include "llvm/MC/MCDwarf.h"
72#include "llvm/MC/MCInstrDesc.h"
75#include "llvm/Pass.h"
79#include "llvm/Support/ModRef.h"
82#include <algorithm>
83#include <cassert>
84#include <cstddef>
85#include <cstdint>
86#include <iterator>
87#include <string>
88#include <utility>
89
90using namespace llvm;
91
92namespace {
93
94 struct MachineVerifier {
95 MachineVerifier(Pass *pass, const char *b) : PASS(pass), Banner(b) {}
96
97 MachineVerifier(const char *b, LiveVariables *LiveVars,
98 LiveIntervals *LiveInts, LiveStacks *LiveStks,
99 SlotIndexes *Indexes)
100 : Banner(b), LiveVars(LiveVars), LiveInts(LiveInts), LiveStks(LiveStks),
101 Indexes(Indexes) {}
102
103 unsigned verify(const MachineFunction &MF);
104
105 Pass *const PASS = nullptr;
106 const char *Banner;
107 const MachineFunction *MF = nullptr;
108 const TargetMachine *TM = nullptr;
109 const TargetInstrInfo *TII = nullptr;
110 const TargetRegisterInfo *TRI = nullptr;
111 const MachineRegisterInfo *MRI = nullptr;
112 const RegisterBankInfo *RBI = nullptr;
113
114 unsigned foundErrors = 0;
115
116 // Avoid querying the MachineFunctionProperties for each operand.
117 bool isFunctionRegBankSelected = false;
118 bool isFunctionSelected = false;
119 bool isFunctionTracksDebugUserValues = false;
120
121 using RegVector = SmallVector<Register, 16>;
122 using RegMaskVector = SmallVector<const uint32_t *, 4>;
123 using RegSet = DenseSet<Register>;
126
127 const MachineInstr *FirstNonPHI = nullptr;
128 const MachineInstr *FirstTerminator = nullptr;
129 BlockSet FunctionBlocks;
130
131 BitVector regsReserved;
132 RegSet regsLive;
133 RegVector regsDefined, regsDead, regsKilled;
134 RegMaskVector regMasks;
135
136 SlotIndex lastIndex;
137
138 // Add Reg and any sub-registers to RV
139 void addRegWithSubRegs(RegVector &RV, Register Reg) {
140 RV.push_back(Reg);
141 if (Reg.isPhysical())
142 append_range(RV, TRI->subregs(Reg.asMCReg()));
143 }
144
145 struct BBInfo {
146 // Is this MBB reachable from the MF entry point?
147 bool reachable = false;
148
149 // Vregs that must be live in because they are used without being
150 // defined. Map value is the user. vregsLiveIn doesn't include regs
151 // that only are used by PHI nodes.
152 RegMap vregsLiveIn;
153
154 // Regs killed in MBB. They may be defined again, and will then be in both
155 // regsKilled and regsLiveOut.
156 RegSet regsKilled;
157
158 // Regs defined in MBB and live out. Note that vregs passing through may
159 // be live out without being mentioned here.
160 RegSet regsLiveOut;
161
162 // Vregs that pass through MBB untouched. This set is disjoint from
163 // regsKilled and regsLiveOut.
164 RegSet vregsPassed;
165
166 // Vregs that must pass through MBB because they are needed by a successor
167 // block. This set is disjoint from regsLiveOut.
168 RegSet vregsRequired;
169
170 // Set versions of block's predecessor and successor lists.
171 BlockSet Preds, Succs;
172
173 BBInfo() = default;
174
175 // Add register to vregsRequired if it belongs there. Return true if
176 // anything changed.
177 bool addRequired(Register Reg) {
178 if (!Reg.isVirtual())
179 return false;
180 if (regsLiveOut.count(Reg))
181 return false;
182 return vregsRequired.insert(Reg).second;
183 }
184
185 // Same for a full set.
186 bool addRequired(const RegSet &RS) {
187 bool Changed = false;
188 for (Register Reg : RS)
189 Changed |= addRequired(Reg);
190 return Changed;
191 }
192
193 // Same for a full map.
194 bool addRequired(const RegMap &RM) {
195 bool Changed = false;
196 for (const auto &I : RM)
197 Changed |= addRequired(I.first);
198 return Changed;
199 }
200
201 // Live-out registers are either in regsLiveOut or vregsPassed.
202 bool isLiveOut(Register Reg) const {
203 return regsLiveOut.count(Reg) || vregsPassed.count(Reg);
204 }
205 };
206
207 // Extra register info per MBB.
209
210 bool isReserved(Register Reg) {
211 return Reg.id() < regsReserved.size() && regsReserved.test(Reg.id());
212 }
213
214 bool isAllocatable(Register Reg) const {
215 return Reg.id() < TRI->getNumRegs() && TRI->isInAllocatableClass(Reg) &&
216 !regsReserved.test(Reg.id());
217 }
218
219 // Analysis information if available
220 LiveVariables *LiveVars = nullptr;
221 LiveIntervals *LiveInts = nullptr;
222 LiveStacks *LiveStks = nullptr;
223 SlotIndexes *Indexes = nullptr;
224
225 // This is calculated only when trying to verify convergence control tokens.
226 // Similar to the LLVM IR verifier, we calculate this locally instead of
227 // relying on the pass manager.
229
230 void visitMachineFunctionBefore();
231 void visitMachineBasicBlockBefore(const MachineBasicBlock *MBB);
232 void visitMachineBundleBefore(const MachineInstr *MI);
233
234 /// Verify that all of \p MI's virtual register operands are scalars.
235 /// \returns True if all virtual register operands are scalar. False
236 /// otherwise.
237 bool verifyAllRegOpsScalar(const MachineInstr &MI,
238 const MachineRegisterInfo &MRI);
239 bool verifyVectorElementMatch(LLT Ty0, LLT Ty1, const MachineInstr *MI);
240
241 bool verifyGIntrinsicSideEffects(const MachineInstr *MI);
242 bool verifyGIntrinsicConvergence(const MachineInstr *MI);
243 void verifyPreISelGenericInstruction(const MachineInstr *MI);
244
245 void visitMachineInstrBefore(const MachineInstr *MI);
246 void visitMachineOperand(const MachineOperand *MO, unsigned MONum);
247 void visitMachineBundleAfter(const MachineInstr *MI);
248 void visitMachineBasicBlockAfter(const MachineBasicBlock *MBB);
249 void visitMachineFunctionAfter();
250
251 void report(const char *msg, const MachineFunction *MF);
252 void report(const char *msg, const MachineBasicBlock *MBB);
253 void report(const char *msg, const MachineInstr *MI);
254 void report(const char *msg, const MachineOperand *MO, unsigned MONum,
255 LLT MOVRegType = LLT{});
256 void report(const Twine &Msg, const MachineInstr *MI);
257
258 void report_context(const LiveInterval &LI) const;
259 void report_context(const LiveRange &LR, Register VRegUnit,
260 LaneBitmask LaneMask) const;
261 void report_context(const LiveRange::Segment &S) const;
262 void report_context(const VNInfo &VNI) const;
263 void report_context(SlotIndex Pos) const;
264 void report_context(MCPhysReg PhysReg) const;
265 void report_context_liverange(const LiveRange &LR) const;
266 void report_context_lanemask(LaneBitmask LaneMask) const;
267 void report_context_vreg(Register VReg) const;
268 void report_context_vreg_regunit(Register VRegOrUnit) const;
269
270 void verifyInlineAsm(const MachineInstr *MI);
271
272 void checkLiveness(const MachineOperand *MO, unsigned MONum);
273 void checkLivenessAtUse(const MachineOperand *MO, unsigned MONum,
274 SlotIndex UseIdx, const LiveRange &LR,
275 Register VRegOrUnit,
276 LaneBitmask LaneMask = LaneBitmask::getNone());
277 void checkLivenessAtDef(const MachineOperand *MO, unsigned MONum,
278 SlotIndex DefIdx, const LiveRange &LR,
279 Register VRegOrUnit, bool SubRangeCheck = false,
280 LaneBitmask LaneMask = LaneBitmask::getNone());
281
282 void markReachable(const MachineBasicBlock *MBB);
283 void calcRegsPassed();
284 void checkPHIOps(const MachineBasicBlock &MBB);
285
286 void calcRegsRequired();
287 void verifyLiveVariables();
288 void verifyLiveIntervals();
289 void verifyLiveInterval(const LiveInterval&);
290 void verifyLiveRangeValue(const LiveRange &, const VNInfo *, Register,
292 void verifyLiveRangeSegment(const LiveRange &,
295 void verifyLiveRange(const LiveRange &, Register,
296 LaneBitmask LaneMask = LaneBitmask::getNone());
297
298 void verifyStackFrame();
299
300 void verifySlotIndexes() const;
301 void verifyProperties(const MachineFunction &MF);
302 };
303
304 struct MachineVerifierPass : public MachineFunctionPass {
305 static char ID; // Pass ID, replacement for typeid
306
307 const std::string Banner;
308
309 MachineVerifierPass(std::string banner = std::string())
310 : MachineFunctionPass(ID), Banner(std::move(banner)) {
312 }
313
314 void getAnalysisUsage(AnalysisUsage &AU) const override {
319 AU.setPreservesAll();
321 }
322
323 bool runOnMachineFunction(MachineFunction &MF) override {
324 // Skip functions that have known verification problems.
325 // FIXME: Remove this mechanism when all problematic passes have been
326 // fixed.
327 if (MF.getProperties().hasProperty(
328 MachineFunctionProperties::Property::FailsVerification))
329 return false;
330
331 unsigned FoundErrors = MachineVerifier(this, Banner.c_str()).verify(MF);
332 if (FoundErrors)
333 report_fatal_error("Found "+Twine(FoundErrors)+" machine code errors.");
334 return false;
335 }
336 };
337
338} // end anonymous namespace
339
340char MachineVerifierPass::ID = 0;
341
342INITIALIZE_PASS(MachineVerifierPass, "machineverifier",
343 "Verify generated machine code", false, false)
344
346 return new MachineVerifierPass(Banner);
347}
348
349void llvm::verifyMachineFunction(const std::string &Banner,
350 const MachineFunction &MF) {
351 // TODO: Use MFAM after porting below analyses.
352 // LiveVariables *LiveVars;
353 // LiveIntervals *LiveInts;
354 // LiveStacks *LiveStks;
355 // SlotIndexes *Indexes;
356 unsigned FoundErrors = MachineVerifier(nullptr, Banner.c_str()).verify(MF);
357 if (FoundErrors)
358 report_fatal_error("Found " + Twine(FoundErrors) + " machine code errors.");
359}
360
361bool MachineFunction::verify(Pass *p, const char *Banner, bool AbortOnErrors)
362 const {
363 MachineFunction &MF = const_cast<MachineFunction&>(*this);
364 unsigned FoundErrors = MachineVerifier(p, Banner).verify(MF);
365 if (AbortOnErrors && FoundErrors)
366 report_fatal_error("Found "+Twine(FoundErrors)+" machine code errors.");
367 return FoundErrors == 0;
368}
369
371 const char *Banner, bool AbortOnErrors) const {
372 MachineFunction &MF = const_cast<MachineFunction &>(*this);
373 unsigned FoundErrors =
374 MachineVerifier(Banner, nullptr, LiveInts, nullptr, Indexes).verify(MF);
375 if (AbortOnErrors && FoundErrors)
376 report_fatal_error("Found " + Twine(FoundErrors) + " machine code errors.");
377 return FoundErrors == 0;
378}
379
380void MachineVerifier::verifySlotIndexes() const {
381 if (Indexes == nullptr)
382 return;
383
384 // Ensure the IdxMBB list is sorted by slot indexes.
387 E = Indexes->MBBIndexEnd(); I != E; ++I) {
388 assert(!Last.isValid() || I->first > Last);
389 Last = I->first;
390 }
391}
392
393void MachineVerifier::verifyProperties(const MachineFunction &MF) {
394 // If a pass has introduced virtual registers without clearing the
395 // NoVRegs property (or set it without allocating the vregs)
396 // then report an error.
397 if (MF.getProperties().hasProperty(
399 MRI->getNumVirtRegs())
400 report("Function has NoVRegs property but there are VReg operands", &MF);
401}
402
403unsigned MachineVerifier::verify(const MachineFunction &MF) {
404 foundErrors = 0;
405
406 this->MF = &MF;
407 TM = &MF.getTarget();
410 RBI = MF.getSubtarget().getRegBankInfo();
411 MRI = &MF.getRegInfo();
412
413 const bool isFunctionFailedISel = MF.getProperties().hasProperty(
415
416 // If we're mid-GlobalISel and we already triggered the fallback path then
417 // it's expected that the MIR is somewhat broken but that's ok since we'll
418 // reset it and clear the FailedISel attribute in ResetMachineFunctions.
419 if (isFunctionFailedISel)
420 return foundErrors;
421
422 isFunctionRegBankSelected = MF.getProperties().hasProperty(
424 isFunctionSelected = MF.getProperties().hasProperty(
426 isFunctionTracksDebugUserValues = MF.getProperties().hasProperty(
428
429 if (PASS) {
430 LiveInts = PASS->getAnalysisIfAvailable<LiveIntervals>();
431 // We don't want to verify LiveVariables if LiveIntervals is available.
432 if (!LiveInts)
433 LiveVars = PASS->getAnalysisIfAvailable<LiveVariables>();
434 LiveStks = PASS->getAnalysisIfAvailable<LiveStacks>();
435 Indexes = PASS->getAnalysisIfAvailable<SlotIndexes>();
436 }
437
438 verifySlotIndexes();
439
440 verifyProperties(MF);
441
442 visitMachineFunctionBefore();
443 for (const MachineBasicBlock &MBB : MF) {
444 visitMachineBasicBlockBefore(&MBB);
445 // Keep track of the current bundle header.
446 const MachineInstr *CurBundle = nullptr;
447 // Do we expect the next instruction to be part of the same bundle?
448 bool InBundle = false;
449
450 for (const MachineInstr &MI : MBB.instrs()) {
451 if (MI.getParent() != &MBB) {
452 report("Bad instruction parent pointer", &MBB);
453 errs() << "Instruction: " << MI;
454 continue;
455 }
456
457 // Check for consistent bundle flags.
458 if (InBundle && !MI.isBundledWithPred())
459 report("Missing BundledPred flag, "
460 "BundledSucc was set on predecessor",
461 &MI);
462 if (!InBundle && MI.isBundledWithPred())
463 report("BundledPred flag is set, "
464 "but BundledSucc not set on predecessor",
465 &MI);
466
467 // Is this a bundle header?
468 if (!MI.isInsideBundle()) {
469 if (CurBundle)
470 visitMachineBundleAfter(CurBundle);
471 CurBundle = &MI;
472 visitMachineBundleBefore(CurBundle);
473 } else if (!CurBundle)
474 report("No bundle header", &MI);
475 visitMachineInstrBefore(&MI);
476 for (unsigned I = 0, E = MI.getNumOperands(); I != E; ++I) {
477 const MachineOperand &Op = MI.getOperand(I);
478 if (Op.getParent() != &MI) {
479 // Make sure to use correct addOperand / removeOperand / ChangeTo
480 // functions when replacing operands of a MachineInstr.
481 report("Instruction has operand with wrong parent set", &MI);
482 }
483
484 visitMachineOperand(&Op, I);
485 }
486
487 // Was this the last bundled instruction?
488 InBundle = MI.isBundledWithSucc();
489 }
490 if (CurBundle)
491 visitMachineBundleAfter(CurBundle);
492 if (InBundle)
493 report("BundledSucc flag set on last instruction in block", &MBB.back());
494 visitMachineBasicBlockAfter(&MBB);
495 }
496 visitMachineFunctionAfter();
497
498 // Clean up.
499 regsLive.clear();
500 regsDefined.clear();
501 regsDead.clear();
502 regsKilled.clear();
503 regMasks.clear();
504 MBBInfoMap.clear();
505
506 return foundErrors;
507}
508
509void MachineVerifier::report(const char *msg, const MachineFunction *MF) {
510 assert(MF);
511 errs() << '\n';
512 if (!foundErrors++) {
513 if (Banner)
514 errs() << "# " << Banner << '\n';
515 if (LiveInts != nullptr)
516 LiveInts->print(errs());
517 else
518 MF->print(errs(), Indexes);
519 }
520 errs() << "*** Bad machine code: " << msg << " ***\n"
521 << "- function: " << MF->getName() << "\n";
522}
523
524void MachineVerifier::report(const char *msg, const MachineBasicBlock *MBB) {
525 assert(MBB);
526 report(msg, MBB->getParent());
527 errs() << "- basic block: " << printMBBReference(*MBB) << ' '
528 << MBB->getName() << " (" << (const void *)MBB << ')';
529 if (Indexes)
530 errs() << " [" << Indexes->getMBBStartIdx(MBB)
531 << ';' << Indexes->getMBBEndIdx(MBB) << ')';
532 errs() << '\n';
533}
534
535void MachineVerifier::report(const char *msg, const MachineInstr *MI) {
536 assert(MI);
537 report(msg, MI->getParent());
538 errs() << "- instruction: ";
539 if (Indexes && Indexes->hasIndex(*MI))
540 errs() << Indexes->getInstructionIndex(*MI) << '\t';
541 MI->print(errs(), /*IsStandalone=*/true);
542}
543
544void MachineVerifier::report(const char *msg, const MachineOperand *MO,
545 unsigned MONum, LLT MOVRegType) {
546 assert(MO);
547 report(msg, MO->getParent());
548 errs() << "- operand " << MONum << ": ";
549 MO->print(errs(), MOVRegType, TRI);
550 errs() << "\n";
551}
552
553void MachineVerifier::report(const Twine &Msg, const MachineInstr *MI) {
554 report(Msg.str().c_str(), MI);
555}
556
557void MachineVerifier::report_context(SlotIndex Pos) const {
558 errs() << "- at: " << Pos << '\n';
559}
560
561void MachineVerifier::report_context(const LiveInterval &LI) const {
562 errs() << "- interval: " << LI << '\n';
563}
564
565void MachineVerifier::report_context(const LiveRange &LR, Register VRegUnit,
566 LaneBitmask LaneMask) const {
567 report_context_liverange(LR);
568 report_context_vreg_regunit(VRegUnit);
569 if (LaneMask.any())
570 report_context_lanemask(LaneMask);
571}
572
573void MachineVerifier::report_context(const LiveRange::Segment &S) const {
574 errs() << "- segment: " << S << '\n';
575}
576
577void MachineVerifier::report_context(const VNInfo &VNI) const {
578 errs() << "- ValNo: " << VNI.id << " (def " << VNI.def << ")\n";
579}
580
581void MachineVerifier::report_context_liverange(const LiveRange &LR) const {
582 errs() << "- liverange: " << LR << '\n';
583}
584
585void MachineVerifier::report_context(MCPhysReg PReg) const {
586 errs() << "- p. register: " << printReg(PReg, TRI) << '\n';
587}
588
589void MachineVerifier::report_context_vreg(Register VReg) const {
590 errs() << "- v. register: " << printReg(VReg, TRI) << '\n';
591}
592
593void MachineVerifier::report_context_vreg_regunit(Register VRegOrUnit) const {
594 if (VRegOrUnit.isVirtual()) {
595 report_context_vreg(VRegOrUnit);
596 } else {
597 errs() << "- regunit: " << printRegUnit(VRegOrUnit, TRI) << '\n';
598 }
599}
600
601void MachineVerifier::report_context_lanemask(LaneBitmask LaneMask) const {
602 errs() << "- lanemask: " << PrintLaneMask(LaneMask) << '\n';
603}
604
605void MachineVerifier::markReachable(const MachineBasicBlock *MBB) {
606 BBInfo &MInfo = MBBInfoMap[MBB];
607 if (!MInfo.reachable) {
608 MInfo.reachable = true;
609 for (const MachineBasicBlock *Succ : MBB->successors())
610 markReachable(Succ);
611 }
612}
613
614void MachineVerifier::visitMachineFunctionBefore() {
615 lastIndex = SlotIndex();
616 regsReserved = MRI->reservedRegsFrozen() ? MRI->getReservedRegs()
617 : TRI->getReservedRegs(*MF);
618
619 if (!MF->empty())
620 markReachable(&MF->front());
621
622 // Build a set of the basic blocks in the function.
623 FunctionBlocks.clear();
624 for (const auto &MBB : *MF) {
625 FunctionBlocks.insert(&MBB);
626 BBInfo &MInfo = MBBInfoMap[&MBB];
627
628 MInfo.Preds.insert(MBB.pred_begin(), MBB.pred_end());
629 if (MInfo.Preds.size() != MBB.pred_size())
630 report("MBB has duplicate entries in its predecessor list.", &MBB);
631
632 MInfo.Succs.insert(MBB.succ_begin(), MBB.succ_end());
633 if (MInfo.Succs.size() != MBB.succ_size())
634 report("MBB has duplicate entries in its successor list.", &MBB);
635 }
636
637 // Check that the register use lists are sane.
638 MRI->verifyUseLists();
639
640 if (!MF->empty())
641 verifyStackFrame();
642}
643
644void
645MachineVerifier::visitMachineBasicBlockBefore(const MachineBasicBlock *MBB) {
646 FirstTerminator = nullptr;
647 FirstNonPHI = nullptr;
648
649 if (!MF->getProperties().hasProperty(
650 MachineFunctionProperties::Property::NoPHIs) && MRI->tracksLiveness()) {
651 // If this block has allocatable physical registers live-in, check that
652 // it is an entry block or landing pad.
653 for (const auto &LI : MBB->liveins()) {
654 if (isAllocatable(LI.PhysReg) && !MBB->isEHPad() &&
655 MBB->getIterator() != MBB->getParent()->begin() &&
657 report("MBB has allocatable live-in, but isn't entry, landing-pad, or "
658 "inlineasm-br-indirect-target.",
659 MBB);
660 report_context(LI.PhysReg);
661 }
662 }
663 }
664
665 if (MBB->isIRBlockAddressTaken()) {
667 report("ir-block-address-taken is associated with basic block not used by "
668 "a blockaddress.",
669 MBB);
670 }
671
672 // Count the number of landing pad successors.
674 for (const auto *succ : MBB->successors()) {
675 if (succ->isEHPad())
676 LandingPadSuccs.insert(succ);
677 if (!FunctionBlocks.count(succ))
678 report("MBB has successor that isn't part of the function.", MBB);
679 if (!MBBInfoMap[succ].Preds.count(MBB)) {
680 report("Inconsistent CFG", MBB);
681 errs() << "MBB is not in the predecessor list of the successor "
682 << printMBBReference(*succ) << ".\n";
683 }
684 }
685
686 // Check the predecessor list.
687 for (const MachineBasicBlock *Pred : MBB->predecessors()) {
688 if (!FunctionBlocks.count(Pred))
689 report("MBB has predecessor that isn't part of the function.", MBB);
690 if (!MBBInfoMap[Pred].Succs.count(MBB)) {
691 report("Inconsistent CFG", MBB);
692 errs() << "MBB is not in the successor list of the predecessor "
693 << printMBBReference(*Pred) << ".\n";
694 }
695 }
696
697 const MCAsmInfo *AsmInfo = TM->getMCAsmInfo();
698 const BasicBlock *BB = MBB->getBasicBlock();
699 const Function &F = MF->getFunction();
700 if (LandingPadSuccs.size() > 1 &&
701 !(AsmInfo &&
703 BB && isa<SwitchInst>(BB->getTerminator())) &&
704 !isScopedEHPersonality(classifyEHPersonality(F.getPersonalityFn())))
705 report("MBB has more than one landing pad successor", MBB);
706
707 // Call analyzeBranch. If it succeeds, there several more conditions to check.
708 MachineBasicBlock *TBB = nullptr, *FBB = nullptr;
710 if (!TII->analyzeBranch(*const_cast<MachineBasicBlock *>(MBB), TBB, FBB,
711 Cond)) {
712 // Ok, analyzeBranch thinks it knows what's going on with this block. Let's
713 // check whether its answers match up with reality.
714 if (!TBB && !FBB) {
715 // Block falls through to its successor.
716 if (!MBB->empty() && MBB->back().isBarrier() &&
717 !TII->isPredicated(MBB->back())) {
718 report("MBB exits via unconditional fall-through but ends with a "
719 "barrier instruction!", MBB);
720 }
721 if (!Cond.empty()) {
722 report("MBB exits via unconditional fall-through but has a condition!",
723 MBB);
724 }
725 } else if (TBB && !FBB && Cond.empty()) {
726 // Block unconditionally branches somewhere.
727 if (MBB->empty()) {
728 report("MBB exits via unconditional branch but doesn't contain "
729 "any instructions!", MBB);
730 } else if (!MBB->back().isBarrier()) {
731 report("MBB exits via unconditional branch but doesn't end with a "
732 "barrier instruction!", MBB);
733 } else if (!MBB->back().isTerminator()) {
734 report("MBB exits via unconditional branch but the branch isn't a "
735 "terminator instruction!", MBB);
736 }
737 } else if (TBB && !FBB && !Cond.empty()) {
738 // Block conditionally branches somewhere, otherwise falls through.
739 if (MBB->empty()) {
740 report("MBB exits via conditional branch/fall-through but doesn't "
741 "contain any instructions!", MBB);
742 } else if (MBB->back().isBarrier()) {
743 report("MBB exits via conditional branch/fall-through but ends with a "
744 "barrier instruction!", MBB);
745 } else if (!MBB->back().isTerminator()) {
746 report("MBB exits via conditional branch/fall-through but the branch "
747 "isn't a terminator instruction!", MBB);
748 }
749 } else if (TBB && FBB) {
750 // Block conditionally branches somewhere, otherwise branches
751 // somewhere else.
752 if (MBB->empty()) {
753 report("MBB exits via conditional branch/branch but doesn't "
754 "contain any instructions!", MBB);
755 } else if (!MBB->back().isBarrier()) {
756 report("MBB exits via conditional branch/branch but doesn't end with a "
757 "barrier instruction!", MBB);
758 } else if (!MBB->back().isTerminator()) {
759 report("MBB exits via conditional branch/branch but the branch "
760 "isn't a terminator instruction!", MBB);
761 }
762 if (Cond.empty()) {
763 report("MBB exits via conditional branch/branch but there's no "
764 "condition!", MBB);
765 }
766 } else {
767 report("analyzeBranch returned invalid data!", MBB);
768 }
769
770 // Now check that the successors match up with the answers reported by
771 // analyzeBranch.
772 if (TBB && !MBB->isSuccessor(TBB))
773 report("MBB exits via jump or conditional branch, but its target isn't a "
774 "CFG successor!",
775 MBB);
776 if (FBB && !MBB->isSuccessor(FBB))
777 report("MBB exits via conditional branch, but its target isn't a CFG "
778 "successor!",
779 MBB);
780
781 // There might be a fallthrough to the next block if there's either no
782 // unconditional true branch, or if there's a condition, and one of the
783 // branches is missing.
784 bool Fallthrough = !TBB || (!Cond.empty() && !FBB);
785
786 // A conditional fallthrough must be an actual CFG successor, not
787 // unreachable. (Conversely, an unconditional fallthrough might not really
788 // be a successor, because the block might end in unreachable.)
789 if (!Cond.empty() && !FBB) {
791 if (MBBI == MF->end()) {
792 report("MBB conditionally falls through out of function!", MBB);
793 } else if (!MBB->isSuccessor(&*MBBI))
794 report("MBB exits via conditional branch/fall-through but the CFG "
795 "successors don't match the actual successors!",
796 MBB);
797 }
798
799 // Verify that there aren't any extra un-accounted-for successors.
800 for (const MachineBasicBlock *SuccMBB : MBB->successors()) {
801 // If this successor is one of the branch targets, it's okay.
802 if (SuccMBB == TBB || SuccMBB == FBB)
803 continue;
804 // If we might have a fallthrough, and the successor is the fallthrough
805 // block, that's also ok.
806 if (Fallthrough && SuccMBB == MBB->getNextNode())
807 continue;
808 // Also accept successors which are for exception-handling or might be
809 // inlineasm_br targets.
810 if (SuccMBB->isEHPad() || SuccMBB->isInlineAsmBrIndirectTarget())
811 continue;
812 report("MBB has unexpected successors which are not branch targets, "
813 "fallthrough, EHPads, or inlineasm_br targets.",
814 MBB);
815 }
816 }
817
818 regsLive.clear();
819 if (MRI->tracksLiveness()) {
820 for (const auto &LI : MBB->liveins()) {
821 if (!Register::isPhysicalRegister(LI.PhysReg)) {
822 report("MBB live-in list contains non-physical register", MBB);
823 continue;
824 }
825 for (const MCPhysReg &SubReg : TRI->subregs_inclusive(LI.PhysReg))
826 regsLive.insert(SubReg);
827 }
828 }
829
830 const MachineFrameInfo &MFI = MF->getFrameInfo();
831 BitVector PR = MFI.getPristineRegs(*MF);
832 for (unsigned I : PR.set_bits()) {
833 for (const MCPhysReg &SubReg : TRI->subregs_inclusive(I))
834 regsLive.insert(SubReg);
835 }
836
837 regsKilled.clear();
838 regsDefined.clear();
839
840 if (Indexes)
841 lastIndex = Indexes->getMBBStartIdx(MBB);
842}
843
844// This function gets called for all bundle headers, including normal
845// stand-alone unbundled instructions.
846void MachineVerifier::visitMachineBundleBefore(const MachineInstr *MI) {
847 if (Indexes && Indexes->hasIndex(*MI)) {
848 SlotIndex idx = Indexes->getInstructionIndex(*MI);
849 if (!(idx > lastIndex)) {
850 report("Instruction index out of order", MI);
851 errs() << "Last instruction was at " << lastIndex << '\n';
852 }
853 lastIndex = idx;
854 }
855
856 // Ensure non-terminators don't follow terminators.
857 if (MI->isTerminator()) {
858 if (!FirstTerminator)
859 FirstTerminator = MI;
860 } else if (FirstTerminator) {
861 // For GlobalISel, G_INVOKE_REGION_START is a terminator that we allow to
862 // precede non-terminators.
863 if (FirstTerminator->getOpcode() != TargetOpcode::G_INVOKE_REGION_START) {
864 report("Non-terminator instruction after the first terminator", MI);
865 errs() << "First terminator was:\t" << *FirstTerminator;
866 }
867 }
868}
869
870// The operands on an INLINEASM instruction must follow a template.
871// Verify that the flag operands make sense.
872void MachineVerifier::verifyInlineAsm(const MachineInstr *MI) {
873 // The first two operands on INLINEASM are the asm string and global flags.
874 if (MI->getNumOperands() < 2) {
875 report("Too few operands on inline asm", MI);
876 return;
877 }
878 if (!MI->getOperand(0).isSymbol())
879 report("Asm string must be an external symbol", MI);
880 if (!MI->getOperand(1).isImm())
881 report("Asm flags must be an immediate", MI);
882 // Allowed flags are Extra_HasSideEffects = 1, Extra_IsAlignStack = 2,
883 // Extra_AsmDialect = 4, Extra_MayLoad = 8, and Extra_MayStore = 16,
884 // and Extra_IsConvergent = 32.
885 if (!isUInt<6>(MI->getOperand(1).getImm()))
886 report("Unknown asm flags", &MI->getOperand(1), 1);
887
888 static_assert(InlineAsm::MIOp_FirstOperand == 2, "Asm format changed");
889
890 unsigned OpNo = InlineAsm::MIOp_FirstOperand;
891 unsigned NumOps;
892 for (unsigned e = MI->getNumOperands(); OpNo < e; OpNo += NumOps) {
893 const MachineOperand &MO = MI->getOperand(OpNo);
894 // There may be implicit ops after the fixed operands.
895 if (!MO.isImm())
896 break;
897 const InlineAsm::Flag F(MO.getImm());
898 NumOps = 1 + F.getNumOperandRegisters();
899 }
900
901 if (OpNo > MI->getNumOperands())
902 report("Missing operands in last group", MI);
903
904 // An optional MDNode follows the groups.
905 if (OpNo < MI->getNumOperands() && MI->getOperand(OpNo).isMetadata())
906 ++OpNo;
907
908 // All trailing operands must be implicit registers.
909 for (unsigned e = MI->getNumOperands(); OpNo < e; ++OpNo) {
910 const MachineOperand &MO = MI->getOperand(OpNo);
911 if (!MO.isReg() || !MO.isImplicit())
912 report("Expected implicit register after groups", &MO, OpNo);
913 }
914
915 if (MI->getOpcode() == TargetOpcode::INLINEASM_BR) {
916 const MachineBasicBlock *MBB = MI->getParent();
917
918 for (unsigned i = InlineAsm::MIOp_FirstOperand, e = MI->getNumOperands();
919 i != e; ++i) {
920 const MachineOperand &MO = MI->getOperand(i);
921
922 if (!MO.isMBB())
923 continue;
924
925 // Check the successor & predecessor lists look ok, assume they are
926 // not. Find the indirect target without going through the successors.
927 const MachineBasicBlock *IndirectTargetMBB = MO.getMBB();
928 if (!IndirectTargetMBB) {
929 report("INLINEASM_BR indirect target does not exist", &MO, i);
930 break;
931 }
932
933 if (!MBB->isSuccessor(IndirectTargetMBB))
934 report("INLINEASM_BR indirect target missing from successor list", &MO,
935 i);
936
937 if (!IndirectTargetMBB->isPredecessor(MBB))
938 report("INLINEASM_BR indirect target predecessor list missing parent",
939 &MO, i);
940 }
941 }
942}
943
944bool MachineVerifier::verifyAllRegOpsScalar(const MachineInstr &MI,
945 const MachineRegisterInfo &MRI) {
946 if (none_of(MI.explicit_operands(), [&MRI](const MachineOperand &Op) {
947 if (!Op.isReg())
948 return false;
949 const auto Reg = Op.getReg();
950 if (Reg.isPhysical())
951 return false;
952 return !MRI.getType(Reg).isScalar();
953 }))
954 return true;
955 report("All register operands must have scalar types", &MI);
956 return false;
957}
958
959/// Check that types are consistent when two operands need to have the same
960/// number of vector elements.
961/// \return true if the types are valid.
962bool MachineVerifier::verifyVectorElementMatch(LLT Ty0, LLT Ty1,
963 const MachineInstr *MI) {
964 if (Ty0.isVector() != Ty1.isVector()) {
965 report("operand types must be all-vector or all-scalar", MI);
966 // Generally we try to report as many issues as possible at once, but in
967 // this case it's not clear what should we be comparing the size of the
968 // scalar with: the size of the whole vector or its lane. Instead of
969 // making an arbitrary choice and emitting not so helpful message, let's
970 // avoid the extra noise and stop here.
971 return false;
972 }
973
974 if (Ty0.isVector() && Ty0.getElementCount() != Ty1.getElementCount()) {
975 report("operand types must preserve number of vector elements", MI);
976 return false;
977 }
978
979 return true;
980}
981
982bool MachineVerifier::verifyGIntrinsicSideEffects(const MachineInstr *MI) {
983 auto Opcode = MI->getOpcode();
984 bool NoSideEffects = Opcode == TargetOpcode::G_INTRINSIC ||
985 Opcode == TargetOpcode::G_INTRINSIC_CONVERGENT;
986 unsigned IntrID = cast<GIntrinsic>(MI)->getIntrinsicID();
987 if (IntrID != 0 && IntrID < Intrinsic::num_intrinsics) {
989 MF->getFunction().getContext(), static_cast<Intrinsic::ID>(IntrID));
990 bool DeclHasSideEffects = !Attrs.getMemoryEffects().doesNotAccessMemory();
991 if (NoSideEffects && DeclHasSideEffects) {
992 report(Twine(TII->getName(Opcode),
993 " used with intrinsic that accesses memory"),
994 MI);
995 return false;
996 }
997 if (!NoSideEffects && !DeclHasSideEffects) {
998 report(Twine(TII->getName(Opcode), " used with readnone intrinsic"), MI);
999 return false;
1000 }
1001 }
1002
1003 return true;
1004}
1005
1006bool MachineVerifier::verifyGIntrinsicConvergence(const MachineInstr *MI) {
1007 auto Opcode = MI->getOpcode();
1008 bool NotConvergent = Opcode == TargetOpcode::G_INTRINSIC ||
1009 Opcode == TargetOpcode::G_INTRINSIC_W_SIDE_EFFECTS;
1010 unsigned IntrID = cast<GIntrinsic>(MI)->getIntrinsicID();
1011 if (IntrID != 0 && IntrID < Intrinsic::num_intrinsics) {
1013 MF->getFunction().getContext(), static_cast<Intrinsic::ID>(IntrID));
1014 bool DeclIsConvergent = Attrs.hasFnAttr(Attribute::Convergent);
1015 if (NotConvergent && DeclIsConvergent) {
1016 report(Twine(TII->getName(Opcode), " used with a convergent intrinsic"),
1017 MI);
1018 return false;
1019 }
1020 if (!NotConvergent && !DeclIsConvergent) {
1021 report(
1022 Twine(TII->getName(Opcode), " used with a non-convergent intrinsic"),
1023 MI);
1024 return false;
1025 }
1026 }
1027
1028 return true;
1029}
1030
1031void MachineVerifier::verifyPreISelGenericInstruction(const MachineInstr *MI) {
1032 if (isFunctionSelected)
1033 report("Unexpected generic instruction in a Selected function", MI);
1034
1035 const MCInstrDesc &MCID = MI->getDesc();
1036 unsigned NumOps = MI->getNumOperands();
1037
1038 // Branches must reference a basic block if they are not indirect
1039 if (MI->isBranch() && !MI->isIndirectBranch()) {
1040 bool HasMBB = false;
1041 for (const MachineOperand &Op : MI->operands()) {
1042 if (Op.isMBB()) {
1043 HasMBB = true;
1044 break;
1045 }
1046 }
1047
1048 if (!HasMBB) {
1049 report("Branch instruction is missing a basic block operand or "
1050 "isIndirectBranch property",
1051 MI);
1052 }
1053 }
1054
1055 // Check types.
1057 for (unsigned I = 0, E = std::min(MCID.getNumOperands(), NumOps);
1058 I != E; ++I) {
1059 if (!MCID.operands()[I].isGenericType())
1060 continue;
1061 // Generic instructions specify type equality constraints between some of
1062 // their operands. Make sure these are consistent.
1063 size_t TypeIdx = MCID.operands()[I].getGenericTypeIndex();
1064 Types.resize(std::max(TypeIdx + 1, Types.size()));
1065
1066 const MachineOperand *MO = &MI->getOperand(I);
1067 if (!MO->isReg()) {
1068 report("generic instruction must use register operands", MI);
1069 continue;
1070 }
1071
1072 LLT OpTy = MRI->getType(MO->getReg());
1073 // Don't report a type mismatch if there is no actual mismatch, only a
1074 // type missing, to reduce noise:
1075 if (OpTy.isValid()) {
1076 // Only the first valid type for a type index will be printed: don't
1077 // overwrite it later so it's always clear which type was expected:
1078 if (!Types[TypeIdx].isValid())
1079 Types[TypeIdx] = OpTy;
1080 else if (Types[TypeIdx] != OpTy)
1081 report("Type mismatch in generic instruction", MO, I, OpTy);
1082 } else {
1083 // Generic instructions must have types attached to their operands.
1084 report("Generic instruction is missing a virtual register type", MO, I);
1085 }
1086 }
1087
1088 // Generic opcodes must not have physical register operands.
1089 for (unsigned I = 0; I < MI->getNumOperands(); ++I) {
1090 const MachineOperand *MO = &MI->getOperand(I);
1091 if (MO->isReg() && MO->getReg().isPhysical())
1092 report("Generic instruction cannot have physical register", MO, I);
1093 }
1094
1095 // Avoid out of bounds in checks below. This was already reported earlier.
1096 if (MI->getNumOperands() < MCID.getNumOperands())
1097 return;
1098
1100 if (!TII->verifyInstruction(*MI, ErrorInfo))
1101 report(ErrorInfo.data(), MI);
1102
1103 // Verify properties of various specific instruction types
1104 unsigned Opc = MI->getOpcode();
1105 switch (Opc) {
1106 case TargetOpcode::G_ASSERT_SEXT:
1107 case TargetOpcode::G_ASSERT_ZEXT: {
1108 std::string OpcName =
1109 Opc == TargetOpcode::G_ASSERT_ZEXT ? "G_ASSERT_ZEXT" : "G_ASSERT_SEXT";
1110 if (!MI->getOperand(2).isImm()) {
1111 report(Twine(OpcName, " expects an immediate operand #2"), MI);
1112 break;
1113 }
1114
1115 Register Dst = MI->getOperand(0).getReg();
1116 Register Src = MI->getOperand(1).getReg();
1117 LLT SrcTy = MRI->getType(Src);
1118 int64_t Imm = MI->getOperand(2).getImm();
1119 if (Imm <= 0) {
1120 report(Twine(OpcName, " size must be >= 1"), MI);
1121 break;
1122 }
1123
1124 if (Imm >= SrcTy.getScalarSizeInBits()) {
1125 report(Twine(OpcName, " size must be less than source bit width"), MI);
1126 break;
1127 }
1128
1129 const RegisterBank *SrcRB = RBI->getRegBank(Src, *MRI, *TRI);
1130 const RegisterBank *DstRB = RBI->getRegBank(Dst, *MRI, *TRI);
1131
1132 // Allow only the source bank to be set.
1133 if ((SrcRB && DstRB && SrcRB != DstRB) || (DstRB && !SrcRB)) {
1134 report(Twine(OpcName, " cannot change register bank"), MI);
1135 break;
1136 }
1137
1138 // Don't allow a class change. Do allow member class->regbank.
1139 const TargetRegisterClass *DstRC = MRI->getRegClassOrNull(Dst);
1140 if (DstRC && DstRC != MRI->getRegClassOrNull(Src)) {
1141 report(
1142 Twine(OpcName, " source and destination register classes must match"),
1143 MI);
1144 break;
1145 }
1146
1147 break;
1148 }
1149
1150 case TargetOpcode::G_CONSTANT:
1151 case TargetOpcode::G_FCONSTANT: {
1152 LLT DstTy = MRI->getType(MI->getOperand(0).getReg());
1153 if (DstTy.isVector())
1154 report("Instruction cannot use a vector result type", MI);
1155
1156 if (MI->getOpcode() == TargetOpcode::G_CONSTANT) {
1157 if (!MI->getOperand(1).isCImm()) {
1158 report("G_CONSTANT operand must be cimm", MI);
1159 break;
1160 }
1161
1162 const ConstantInt *CI = MI->getOperand(1).getCImm();
1163 if (CI->getBitWidth() != DstTy.getSizeInBits())
1164 report("inconsistent constant size", MI);
1165 } else {
1166 if (!MI->getOperand(1).isFPImm()) {
1167 report("G_FCONSTANT operand must be fpimm", MI);
1168 break;
1169 }
1170 const ConstantFP *CF = MI->getOperand(1).getFPImm();
1171
1173 DstTy.getSizeInBits()) {
1174 report("inconsistent constant size", MI);
1175 }
1176 }
1177
1178 break;
1179 }
1180 case TargetOpcode::G_LOAD:
1181 case TargetOpcode::G_STORE:
1182 case TargetOpcode::G_ZEXTLOAD:
1183 case TargetOpcode::G_SEXTLOAD: {
1184 LLT ValTy = MRI->getType(MI->getOperand(0).getReg());
1185 LLT PtrTy = MRI->getType(MI->getOperand(1).getReg());
1186 if (!PtrTy.isPointer())
1187 report("Generic memory instruction must access a pointer", MI);
1188
1189 // Generic loads and stores must have a single MachineMemOperand
1190 // describing that access.
1191 if (!MI->hasOneMemOperand()) {
1192 report("Generic instruction accessing memory must have one mem operand",
1193 MI);
1194 } else {
1195 const MachineMemOperand &MMO = **MI->memoperands_begin();
1196 if (MI->getOpcode() == TargetOpcode::G_ZEXTLOAD ||
1197 MI->getOpcode() == TargetOpcode::G_SEXTLOAD) {
1199 ValTy.getSizeInBits()))
1200 report("Generic extload must have a narrower memory type", MI);
1201 } else if (MI->getOpcode() == TargetOpcode::G_LOAD) {
1203 ValTy.getSizeInBytes()))
1204 report("load memory size cannot exceed result size", MI);
1205 } else if (MI->getOpcode() == TargetOpcode::G_STORE) {
1207 MMO.getSize().getValue()))
1208 report("store memory size cannot exceed value size", MI);
1209 }
1210
1211 const AtomicOrdering Order = MMO.getSuccessOrdering();
1212 if (Opc == TargetOpcode::G_STORE) {
1213 if (Order == AtomicOrdering::Acquire ||
1215 report("atomic store cannot use acquire ordering", MI);
1216
1217 } else {
1218 if (Order == AtomicOrdering::Release ||
1220 report("atomic load cannot use release ordering", MI);
1221 }
1222 }
1223
1224 break;
1225 }
1226 case TargetOpcode::G_PHI: {
1227 LLT DstTy = MRI->getType(MI->getOperand(0).getReg());
1228 if (!DstTy.isValid() || !all_of(drop_begin(MI->operands()),
1229 [this, &DstTy](const MachineOperand &MO) {
1230 if (!MO.isReg())
1231 return true;
1232 LLT Ty = MRI->getType(MO.getReg());
1233 if (!Ty.isValid() || (Ty != DstTy))
1234 return false;
1235 return true;
1236 }))
1237 report("Generic Instruction G_PHI has operands with incompatible/missing "
1238 "types",
1239 MI);
1240 break;
1241 }
1242 case TargetOpcode::G_BITCAST: {
1243 LLT DstTy = MRI->getType(MI->getOperand(0).getReg());
1244 LLT SrcTy = MRI->getType(MI->getOperand(1).getReg());
1245 if (!DstTy.isValid() || !SrcTy.isValid())
1246 break;
1247
1248 if (SrcTy.isPointer() != DstTy.isPointer())
1249 report("bitcast cannot convert between pointers and other types", MI);
1250
1251 if (SrcTy.getSizeInBits() != DstTy.getSizeInBits())
1252 report("bitcast sizes must match", MI);
1253
1254 if (SrcTy == DstTy)
1255 report("bitcast must change the type", MI);
1256
1257 break;
1258 }
1259 case TargetOpcode::G_INTTOPTR:
1260 case TargetOpcode::G_PTRTOINT:
1261 case TargetOpcode::G_ADDRSPACE_CAST: {
1262 LLT DstTy = MRI->getType(MI->getOperand(0).getReg());
1263 LLT SrcTy = MRI->getType(MI->getOperand(1).getReg());
1264 if (!DstTy.isValid() || !SrcTy.isValid())
1265 break;
1266
1267 verifyVectorElementMatch(DstTy, SrcTy, MI);
1268
1269 DstTy = DstTy.getScalarType();
1270 SrcTy = SrcTy.getScalarType();
1271
1272 if (MI->getOpcode() == TargetOpcode::G_INTTOPTR) {
1273 if (!DstTy.isPointer())
1274 report("inttoptr result type must be a pointer", MI);
1275 if (SrcTy.isPointer())
1276 report("inttoptr source type must not be a pointer", MI);
1277 } else if (MI->getOpcode() == TargetOpcode::G_PTRTOINT) {
1278 if (!SrcTy.isPointer())
1279 report("ptrtoint source type must be a pointer", MI);
1280 if (DstTy.isPointer())
1281 report("ptrtoint result type must not be a pointer", MI);
1282 } else {
1283 assert(MI->getOpcode() == TargetOpcode::G_ADDRSPACE_CAST);
1284 if (!SrcTy.isPointer() || !DstTy.isPointer())
1285 report("addrspacecast types must be pointers", MI);
1286 else {
1287 if (SrcTy.getAddressSpace() == DstTy.getAddressSpace())
1288 report("addrspacecast must convert different address spaces", MI);
1289 }
1290 }
1291
1292 break;
1293 }
1294 case TargetOpcode::G_PTR_ADD: {
1295 LLT DstTy = MRI->getType(MI->getOperand(0).getReg());
1296 LLT PtrTy = MRI->getType(MI->getOperand(1).getReg());
1297 LLT OffsetTy = MRI->getType(MI->getOperand(2).getReg());
1298 if (!DstTy.isValid() || !PtrTy.isValid() || !OffsetTy.isValid())
1299 break;
1300
1301 if (!PtrTy.isPointerOrPointerVector())
1302 report("gep first operand must be a pointer", MI);
1303
1304 if (OffsetTy.isPointerOrPointerVector())
1305 report("gep offset operand must not be a pointer", MI);
1306
1307 if (PtrTy.isPointerOrPointerVector()) {
1308 const DataLayout &DL = MF->getDataLayout();
1309 unsigned AS = PtrTy.getAddressSpace();
1310 unsigned IndexSizeInBits = DL.getIndexSize(AS) * 8;
1311 if (OffsetTy.getScalarSizeInBits() != IndexSizeInBits) {
1312 report("gep offset operand must match index size for address space",
1313 MI);
1314 }
1315 }
1316
1317 // TODO: Is the offset allowed to be a scalar with a vector?
1318 break;
1319 }
1320 case TargetOpcode::G_PTRMASK: {
1321 LLT DstTy = MRI->getType(MI->getOperand(0).getReg());
1322 LLT SrcTy = MRI->getType(MI->getOperand(1).getReg());
1323 LLT MaskTy = MRI->getType(MI->getOperand(2).getReg());
1324 if (!DstTy.isValid() || !SrcTy.isValid() || !MaskTy.isValid())
1325 break;
1326
1327 if (!DstTy.isPointerOrPointerVector())
1328 report("ptrmask result type must be a pointer", MI);
1329
1330 if (!MaskTy.getScalarType().isScalar())
1331 report("ptrmask mask type must be an integer", MI);
1332
1333 verifyVectorElementMatch(DstTy, MaskTy, MI);
1334 break;
1335 }
1336 case TargetOpcode::G_SEXT:
1337 case TargetOpcode::G_ZEXT:
1338 case TargetOpcode::G_ANYEXT:
1339 case TargetOpcode::G_TRUNC:
1340 case TargetOpcode::G_FPEXT:
1341 case TargetOpcode::G_FPTRUNC: {
1342 // Number of operands and presense of types is already checked (and
1343 // reported in case of any issues), so no need to report them again. As
1344 // we're trying to report as many issues as possible at once, however, the
1345 // instructions aren't guaranteed to have the right number of operands or
1346 // types attached to them at this point
1347 assert(MCID.getNumOperands() == 2 && "Expected 2 operands G_*{EXT,TRUNC}");
1348 LLT DstTy = MRI->getType(MI->getOperand(0).getReg());
1349 LLT SrcTy = MRI->getType(MI->getOperand(1).getReg());
1350 if (!DstTy.isValid() || !SrcTy.isValid())
1351 break;
1352
1354 report("Generic extend/truncate can not operate on pointers", MI);
1355
1356 verifyVectorElementMatch(DstTy, SrcTy, MI);
1357
1358 unsigned DstSize = DstTy.getScalarSizeInBits();
1359 unsigned SrcSize = SrcTy.getScalarSizeInBits();
1360 switch (MI->getOpcode()) {
1361 default:
1362 if (DstSize <= SrcSize)
1363 report("Generic extend has destination type no larger than source", MI);
1364 break;
1365 case TargetOpcode::G_TRUNC:
1366 case TargetOpcode::G_FPTRUNC:
1367 if (DstSize >= SrcSize)
1368 report("Generic truncate has destination type no smaller than source",
1369 MI);
1370 break;
1371 }
1372 break;
1373 }
1374 case TargetOpcode::G_SELECT: {
1375 LLT SelTy = MRI->getType(MI->getOperand(0).getReg());
1376 LLT CondTy = MRI->getType(MI->getOperand(1).getReg());
1377 if (!SelTy.isValid() || !CondTy.isValid())
1378 break;
1379
1380 // Scalar condition select on a vector is valid.
1381 if (CondTy.isVector())
1382 verifyVectorElementMatch(SelTy, CondTy, MI);
1383 break;
1384 }
1385 case TargetOpcode::G_MERGE_VALUES: {
1386 // G_MERGE_VALUES should only be used to merge scalars into a larger scalar,
1387 // e.g. s2N = MERGE sN, sN
1388 // Merging multiple scalars into a vector is not allowed, should use
1389 // G_BUILD_VECTOR for that.
1390 LLT DstTy = MRI->getType(MI->getOperand(0).getReg());
1391 LLT SrcTy = MRI->getType(MI->getOperand(1).getReg());
1392 if (DstTy.isVector() || SrcTy.isVector())
1393 report("G_MERGE_VALUES cannot operate on vectors", MI);
1394
1395 const unsigned NumOps = MI->getNumOperands();
1396 if (DstTy.getSizeInBits() != SrcTy.getSizeInBits() * (NumOps - 1))
1397 report("G_MERGE_VALUES result size is inconsistent", MI);
1398
1399 for (unsigned I = 2; I != NumOps; ++I) {
1400 if (MRI->getType(MI->getOperand(I).getReg()) != SrcTy)
1401 report("G_MERGE_VALUES source types do not match", MI);
1402 }
1403
1404 break;
1405 }
1406 case TargetOpcode::G_UNMERGE_VALUES: {
1407 unsigned NumDsts = MI->getNumOperands() - 1;
1408 LLT DstTy = MRI->getType(MI->getOperand(0).getReg());
1409 for (unsigned i = 1; i < NumDsts; ++i) {
1410 if (MRI->getType(MI->getOperand(i).getReg()) != DstTy) {
1411 report("G_UNMERGE_VALUES destination types do not match", MI);
1412 break;
1413 }
1414 }
1415
1416 LLT SrcTy = MRI->getType(MI->getOperand(NumDsts).getReg());
1417 if (DstTy.isVector()) {
1418 // This case is the converse of G_CONCAT_VECTORS.
1419 if (!SrcTy.isVector() || SrcTy.getScalarType() != DstTy.getScalarType() ||
1420 SrcTy.isScalableVector() != DstTy.isScalableVector() ||
1421 SrcTy.getSizeInBits() != NumDsts * DstTy.getSizeInBits())
1422 report("G_UNMERGE_VALUES source operand does not match vector "
1423 "destination operands",
1424 MI);
1425 } else if (SrcTy.isVector()) {
1426 // This case is the converse of G_BUILD_VECTOR, but relaxed to allow
1427 // mismatched types as long as the total size matches:
1428 // %0:_(s64), %1:_(s64) = G_UNMERGE_VALUES %2:_(<4 x s32>)
1429 if (SrcTy.getSizeInBits() != NumDsts * DstTy.getSizeInBits())
1430 report("G_UNMERGE_VALUES vector source operand does not match scalar "
1431 "destination operands",
1432 MI);
1433 } else {
1434 // This case is the converse of G_MERGE_VALUES.
1435 if (SrcTy.getSizeInBits() != NumDsts * DstTy.getSizeInBits()) {
1436 report("G_UNMERGE_VALUES scalar source operand does not match scalar "
1437 "destination operands",
1438 MI);
1439 }
1440 }
1441 break;
1442 }
1443 case TargetOpcode::G_BUILD_VECTOR: {
1444 // Source types must be scalars, dest type a vector. Total size of scalars
1445 // must match the dest vector size.
1446 LLT DstTy = MRI->getType(MI->getOperand(0).getReg());
1447 LLT SrcEltTy = MRI->getType(MI->getOperand(1).getReg());
1448 if (!DstTy.isVector() || SrcEltTy.isVector()) {
1449 report("G_BUILD_VECTOR must produce a vector from scalar operands", MI);
1450 break;
1451 }
1452
1453 if (DstTy.getElementType() != SrcEltTy)
1454 report("G_BUILD_VECTOR result element type must match source type", MI);
1455
1456 if (DstTy.getNumElements() != MI->getNumOperands() - 1)
1457 report("G_BUILD_VECTOR must have an operand for each elemement", MI);
1458
1459 for (const MachineOperand &MO : llvm::drop_begin(MI->operands(), 2))
1460 if (MRI->getType(MI->getOperand(1).getReg()) != MRI->getType(MO.getReg()))
1461 report("G_BUILD_VECTOR source operand types are not homogeneous", MI);
1462
1463 break;
1464 }
1465 case TargetOpcode::G_BUILD_VECTOR_TRUNC: {
1466 // Source types must be scalars, dest type a vector. Scalar types must be
1467 // larger than the dest vector elt type, as this is a truncating operation.
1468 LLT DstTy = MRI->getType(MI->getOperand(0).getReg());
1469 LLT SrcEltTy = MRI->getType(MI->getOperand(1).getReg());
1470 if (!DstTy.isVector() || SrcEltTy.isVector())
1471 report("G_BUILD_VECTOR_TRUNC must produce a vector from scalar operands",
1472 MI);
1473 for (const MachineOperand &MO : llvm::drop_begin(MI->operands(), 2))
1474 if (MRI->getType(MI->getOperand(1).getReg()) != MRI->getType(MO.getReg()))
1475 report("G_BUILD_VECTOR_TRUNC source operand types are not homogeneous",
1476 MI);
1477 if (SrcEltTy.getSizeInBits() <= DstTy.getElementType().getSizeInBits())
1478 report("G_BUILD_VECTOR_TRUNC source operand types are not larger than "
1479 "dest elt type",
1480 MI);
1481 break;
1482 }
1483 case TargetOpcode::G_CONCAT_VECTORS: {
1484 // Source types should be vectors, and total size should match the dest
1485 // vector size.
1486 LLT DstTy = MRI->getType(MI->getOperand(0).getReg());
1487 LLT SrcTy = MRI->getType(MI->getOperand(1).getReg());
1488 if (!DstTy.isVector() || !SrcTy.isVector())
1489 report("G_CONCAT_VECTOR requires vector source and destination operands",
1490 MI);
1491
1492 if (MI->getNumOperands() < 3)
1493 report("G_CONCAT_VECTOR requires at least 2 source operands", MI);
1494
1495 for (const MachineOperand &MO : llvm::drop_begin(MI->operands(), 2))
1496 if (MRI->getType(MI->getOperand(1).getReg()) != MRI->getType(MO.getReg()))
1497 report("G_CONCAT_VECTOR source operand types are not homogeneous", MI);
1498 if (DstTy.getElementCount() !=
1499 SrcTy.getElementCount() * (MI->getNumOperands() - 1))
1500 report("G_CONCAT_VECTOR num dest and source elements should match", MI);
1501 break;
1502 }
1503 case TargetOpcode::G_ICMP:
1504 case TargetOpcode::G_FCMP: {
1505 LLT DstTy = MRI->getType(MI->getOperand(0).getReg());
1506 LLT SrcTy = MRI->getType(MI->getOperand(2).getReg());
1507
1508 if ((DstTy.isVector() != SrcTy.isVector()) ||
1509 (DstTy.isVector() &&
1510 DstTy.getElementCount() != SrcTy.getElementCount()))
1511 report("Generic vector icmp/fcmp must preserve number of lanes", MI);
1512
1513 break;
1514 }
1515 case TargetOpcode::G_EXTRACT: {
1516 const MachineOperand &SrcOp = MI->getOperand(1);
1517 if (!SrcOp.isReg()) {
1518 report("extract source must be a register", MI);
1519 break;
1520 }
1521
1522 const MachineOperand &OffsetOp = MI->getOperand(2);
1523 if (!OffsetOp.isImm()) {
1524 report("extract offset must be a constant", MI);
1525 break;
1526 }
1527
1528 unsigned DstSize = MRI->getType(MI->getOperand(0).getReg()).getSizeInBits();
1529 unsigned SrcSize = MRI->getType(SrcOp.getReg()).getSizeInBits();
1530 if (SrcSize == DstSize)
1531 report("extract source must be larger than result", MI);
1532
1533 if (DstSize + OffsetOp.getImm() > SrcSize)
1534 report("extract reads past end of register", MI);
1535 break;
1536 }
1537 case TargetOpcode::G_INSERT: {
1538 const MachineOperand &SrcOp = MI->getOperand(2);
1539 if (!SrcOp.isReg()) {
1540 report("insert source must be a register", MI);
1541 break;
1542 }
1543
1544 const MachineOperand &OffsetOp = MI->getOperand(3);
1545 if (!OffsetOp.isImm()) {
1546 report("insert offset must be a constant", MI);
1547 break;
1548 }
1549
1550 unsigned DstSize = MRI->getType(MI->getOperand(0).getReg()).getSizeInBits();
1551 unsigned SrcSize = MRI->getType(SrcOp.getReg()).getSizeInBits();
1552
1553 if (DstSize <= SrcSize)
1554 report("inserted size must be smaller than total register", MI);
1555
1556 if (SrcSize + OffsetOp.getImm() > DstSize)
1557 report("insert writes past end of register", MI);
1558
1559 break;
1560 }
1561 case TargetOpcode::G_JUMP_TABLE: {
1562 if (!MI->getOperand(1).isJTI())
1563 report("G_JUMP_TABLE source operand must be a jump table index", MI);
1564 LLT DstTy = MRI->getType(MI->getOperand(0).getReg());
1565 if (!DstTy.isPointer())
1566 report("G_JUMP_TABLE dest operand must have a pointer type", MI);
1567 break;
1568 }
1569 case TargetOpcode::G_BRJT: {
1570 if (!MRI->getType(MI->getOperand(0).getReg()).isPointer())
1571 report("G_BRJT src operand 0 must be a pointer type", MI);
1572
1573 if (!MI->getOperand(1).isJTI())
1574 report("G_BRJT src operand 1 must be a jump table index", MI);
1575
1576 const auto &IdxOp = MI->getOperand(2);
1577 if (!IdxOp.isReg() || MRI->getType(IdxOp.getReg()).isPointer())
1578 report("G_BRJT src operand 2 must be a scalar reg type", MI);
1579 break;
1580 }
1581 case TargetOpcode::G_INTRINSIC:
1582 case TargetOpcode::G_INTRINSIC_W_SIDE_EFFECTS:
1583 case TargetOpcode::G_INTRINSIC_CONVERGENT:
1584 case TargetOpcode::G_INTRINSIC_CONVERGENT_W_SIDE_EFFECTS: {
1585 // TODO: Should verify number of def and use operands, but the current
1586 // interface requires passing in IR types for mangling.
1587 const MachineOperand &IntrIDOp = MI->getOperand(MI->getNumExplicitDefs());
1588 if (!IntrIDOp.isIntrinsicID()) {
1589 report("G_INTRINSIC first src operand must be an intrinsic ID", MI);
1590 break;
1591 }
1592
1593 if (!verifyGIntrinsicSideEffects(MI))
1594 break;
1595 if (!verifyGIntrinsicConvergence(MI))
1596 break;
1597
1598 break;
1599 }
1600 case TargetOpcode::G_SEXT_INREG: {
1601 if (!MI->getOperand(2).isImm()) {
1602 report("G_SEXT_INREG expects an immediate operand #2", MI);
1603 break;
1604 }
1605
1606 LLT SrcTy = MRI->getType(MI->getOperand(1).getReg());
1607 int64_t Imm = MI->getOperand(2).getImm();
1608 if (Imm <= 0)
1609 report("G_SEXT_INREG size must be >= 1", MI);
1610 if (Imm >= SrcTy.getScalarSizeInBits())
1611 report("G_SEXT_INREG size must be less than source bit width", MI);
1612 break;
1613 }
1614 case TargetOpcode::G_BSWAP: {
1615 LLT DstTy = MRI->getType(MI->getOperand(0).getReg());
1616 if (DstTy.getScalarSizeInBits() % 16 != 0)
1617 report("G_BSWAP size must be a multiple of 16 bits", MI);
1618 break;
1619 }
1620 case TargetOpcode::G_VSCALE: {
1621 if (!MI->getOperand(1).isCImm()) {
1622 report("G_VSCALE operand must be cimm", MI);
1623 break;
1624 }
1625 if (MI->getOperand(1).getCImm()->isZero()) {
1626 report("G_VSCALE immediate cannot be zero", MI);
1627 break;
1628 }
1629 break;
1630 }
1631 case TargetOpcode::G_INSERT_SUBVECTOR: {
1632 const MachineOperand &Src0Op = MI->getOperand(1);
1633 if (!Src0Op.isReg()) {
1634 report("G_INSERT_SUBVECTOR first source must be a register", MI);
1635 break;
1636 }
1637
1638 const MachineOperand &Src1Op = MI->getOperand(2);
1639 if (!Src1Op.isReg()) {
1640 report("G_INSERT_SUBVECTOR second source must be a register", MI);
1641 break;
1642 }
1643
1644 const MachineOperand &IndexOp = MI->getOperand(3);
1645 if (!IndexOp.isImm()) {
1646 report("G_INSERT_SUBVECTOR index must be an immediate", MI);
1647 break;
1648 }
1649
1650 LLT DstTy = MRI->getType(MI->getOperand(0).getReg());
1651 LLT Src0Ty = MRI->getType(Src0Op.getReg());
1652 LLT Src1Ty = MRI->getType(Src1Op.getReg());
1653
1654 if (!DstTy.isVector()) {
1655 report("Destination type must be a vector", MI);
1656 break;
1657 }
1658
1659 if (!Src0Ty.isVector()) {
1660 report("First source must be a vector", MI);
1661 break;
1662 }
1663
1664 if (!Src1Ty.isVector()) {
1665 report("Second source must be a vector", MI);
1666 break;
1667 }
1668
1669 if (DstTy != Src0Ty) {
1670 report("Destination type must match the first source vector type", MI);
1671 break;
1672 }
1673
1674 if (Src0Ty.getElementType() != Src1Ty.getElementType()) {
1675 report("Element type of source vectors must be the same", MI);
1676 break;
1677 }
1678
1679 if (IndexOp.getImm() != 0 &&
1680 Src1Ty.getElementCount().getKnownMinValue() % IndexOp.getImm() != 0) {
1681 report("Index must be a multiple of the second source vector's "
1682 "minimum vector length",
1683 MI);
1684 break;
1685 }
1686 break;
1687 }
1688 case TargetOpcode::G_EXTRACT_SUBVECTOR: {
1689 const MachineOperand &SrcOp = MI->getOperand(1);
1690 if (!SrcOp.isReg()) {
1691 report("G_EXTRACT_SUBVECTOR first source must be a register", MI);
1692 break;
1693 }
1694
1695 const MachineOperand &IndexOp = MI->getOperand(2);
1696 if (!IndexOp.isImm()) {
1697 report("G_EXTRACT_SUBVECTOR index must be an immediate", MI);
1698 break;
1699 }
1700
1701 LLT DstTy = MRI->getType(MI->getOperand(0).getReg());
1702 LLT SrcTy = MRI->getType(SrcOp.getReg());
1703
1704 if (!DstTy.isVector()) {
1705 report("Destination type must be a vector", MI);
1706 break;
1707 }
1708
1709 if (!SrcTy.isVector()) {
1710 report("First source must be a vector", MI);
1711 break;
1712 }
1713
1714 if (DstTy.getElementType() != SrcTy.getElementType()) {
1715 report("Element type of vectors must be the same", MI);
1716 break;
1717 }
1718
1719 if (IndexOp.getImm() != 0 &&
1720 SrcTy.getElementCount().getKnownMinValue() % IndexOp.getImm() != 0) {
1721 report("Index must be a multiple of the source vector's minimum vector "
1722 "length",
1723 MI);
1724 break;
1725 }
1726
1727 break;
1728 }
1729 case TargetOpcode::G_SHUFFLE_VECTOR: {
1730 const MachineOperand &MaskOp = MI->getOperand(3);
1731 if (!MaskOp.isShuffleMask()) {
1732 report("Incorrect mask operand type for G_SHUFFLE_VECTOR", MI);
1733 break;
1734 }
1735
1736 LLT DstTy = MRI->getType(MI->getOperand(0).getReg());
1737 LLT Src0Ty = MRI->getType(MI->getOperand(1).getReg());
1738 LLT Src1Ty = MRI->getType(MI->getOperand(2).getReg());
1739
1740 if (Src0Ty != Src1Ty)
1741 report("Source operands must be the same type", MI);
1742
1743 if (Src0Ty.getScalarType() != DstTy.getScalarType())
1744 report("G_SHUFFLE_VECTOR cannot change element type", MI);
1745
1746 // Don't check that all operands are vector because scalars are used in
1747 // place of 1 element vectors.
1748 int SrcNumElts = Src0Ty.isVector() ? Src0Ty.getNumElements() : 1;
1749 int DstNumElts = DstTy.isVector() ? DstTy.getNumElements() : 1;
1750
1751 ArrayRef<int> MaskIdxes = MaskOp.getShuffleMask();
1752
1753 if (static_cast<int>(MaskIdxes.size()) != DstNumElts)
1754 report("Wrong result type for shufflemask", MI);
1755
1756 for (int Idx : MaskIdxes) {
1757 if (Idx < 0)
1758 continue;
1759
1760 if (Idx >= 2 * SrcNumElts)
1761 report("Out of bounds shuffle index", MI);
1762 }
1763
1764 break;
1765 }
1766
1767 case TargetOpcode::G_SPLAT_VECTOR: {
1768 LLT DstTy = MRI->getType(MI->getOperand(0).getReg());
1769 LLT SrcTy = MRI->getType(MI->getOperand(1).getReg());
1770
1771 if (!DstTy.isScalableVector())
1772 report("Destination type must be a scalable vector", MI);
1773
1774 if (!SrcTy.isScalar())
1775 report("Source type must be a scalar", MI);
1776
1777 if (DstTy.getScalarType() != SrcTy)
1778 report("Element type of the destination must be the same type as the "
1779 "source type",
1780 MI);
1781
1782 break;
1783 }
1784 case TargetOpcode::G_DYN_STACKALLOC: {
1785 const MachineOperand &DstOp = MI->getOperand(0);
1786 const MachineOperand &AllocOp = MI->getOperand(1);
1787 const MachineOperand &AlignOp = MI->getOperand(2);
1788
1789 if (!DstOp.isReg() || !MRI->getType(DstOp.getReg()).isPointer()) {
1790 report("dst operand 0 must be a pointer type", MI);
1791 break;
1792 }
1793
1794 if (!AllocOp.isReg() || !MRI->getType(AllocOp.getReg()).isScalar()) {
1795 report("src operand 1 must be a scalar reg type", MI);
1796 break;
1797 }
1798
1799 if (!AlignOp.isImm()) {
1800 report("src operand 2 must be an immediate type", MI);
1801 break;
1802 }
1803 break;
1804 }
1805 case TargetOpcode::G_MEMCPY_INLINE:
1806 case TargetOpcode::G_MEMCPY:
1807 case TargetOpcode::G_MEMMOVE: {
1808 ArrayRef<MachineMemOperand *> MMOs = MI->memoperands();
1809 if (MMOs.size() != 2) {
1810 report("memcpy/memmove must have 2 memory operands", MI);
1811 break;
1812 }
1813
1814 if ((!MMOs[0]->isStore() || MMOs[0]->isLoad()) ||
1815 (MMOs[1]->isStore() || !MMOs[1]->isLoad())) {
1816 report("wrong memory operand types", MI);
1817 break;
1818 }
1819
1820 if (MMOs[0]->getSize() != MMOs[1]->getSize())
1821 report("inconsistent memory operand sizes", MI);
1822
1823 LLT DstPtrTy = MRI->getType(MI->getOperand(0).getReg());
1824 LLT SrcPtrTy = MRI->getType(MI->getOperand(1).getReg());
1825
1826 if (!DstPtrTy.isPointer() || !SrcPtrTy.isPointer()) {
1827 report("memory instruction operand must be a pointer", MI);
1828 break;
1829 }
1830
1831 if (DstPtrTy.getAddressSpace() != MMOs[0]->getAddrSpace())
1832 report("inconsistent store address space", MI);
1833 if (SrcPtrTy.getAddressSpace() != MMOs[1]->getAddrSpace())
1834 report("inconsistent load address space", MI);
1835
1836 if (Opc != TargetOpcode::G_MEMCPY_INLINE)
1837 if (!MI->getOperand(3).isImm() || (MI->getOperand(3).getImm() & ~1LL))
1838 report("'tail' flag (operand 3) must be an immediate 0 or 1", MI);
1839
1840 break;
1841 }
1842 case TargetOpcode::G_BZERO:
1843 case TargetOpcode::G_MEMSET: {
1844 ArrayRef<MachineMemOperand *> MMOs = MI->memoperands();
1845 std::string Name = Opc == TargetOpcode::G_MEMSET ? "memset" : "bzero";
1846 if (MMOs.size() != 1) {
1847 report(Twine(Name, " must have 1 memory operand"), MI);
1848 break;
1849 }
1850
1851 if ((!MMOs[0]->isStore() || MMOs[0]->isLoad())) {
1852 report(Twine(Name, " memory operand must be a store"), MI);
1853 break;
1854 }
1855
1856 LLT DstPtrTy = MRI->getType(MI->getOperand(0).getReg());
1857 if (!DstPtrTy.isPointer()) {
1858 report(Twine(Name, " operand must be a pointer"), MI);
1859 break;
1860 }
1861
1862 if (DstPtrTy.getAddressSpace() != MMOs[0]->getAddrSpace())
1863 report("inconsistent " + Twine(Name, " address space"), MI);
1864
1865 if (!MI->getOperand(MI->getNumOperands() - 1).isImm() ||
1866 (MI->getOperand(MI->getNumOperands() - 1).getImm() & ~1LL))
1867 report("'tail' flag (last operand) must be an immediate 0 or 1", MI);
1868
1869 break;
1870 }
1871 case TargetOpcode::G_UBSANTRAP: {
1872 const MachineOperand &KindOp = MI->getOperand(0);
1873 if (!MI->getOperand(0).isImm()) {
1874 report("Crash kind must be an immediate", &KindOp, 0);
1875 break;
1876 }
1877 int64_t Kind = MI->getOperand(0).getImm();
1878 if (!isInt<8>(Kind))
1879 report("Crash kind must be 8 bit wide", &KindOp, 0);
1880 break;
1881 }
1882 case TargetOpcode::G_VECREDUCE_SEQ_FADD:
1883 case TargetOpcode::G_VECREDUCE_SEQ_FMUL: {
1884 LLT DstTy = MRI->getType(MI->getOperand(0).getReg());
1885 LLT Src1Ty = MRI->getType(MI->getOperand(1).getReg());
1886 LLT Src2Ty = MRI->getType(MI->getOperand(2).getReg());
1887 if (!DstTy.isScalar())
1888 report("Vector reduction requires a scalar destination type", MI);
1889 if (!Src1Ty.isScalar())
1890 report("Sequential FADD/FMUL vector reduction requires a scalar 1st operand", MI);
1891 if (!Src2Ty.isVector())
1892 report("Sequential FADD/FMUL vector reduction must have a vector 2nd operand", MI);
1893 break;
1894 }
1895 case TargetOpcode::G_VECREDUCE_FADD:
1896 case TargetOpcode::G_VECREDUCE_FMUL:
1897 case TargetOpcode::G_VECREDUCE_FMAX:
1898 case TargetOpcode::G_VECREDUCE_FMIN:
1899 case TargetOpcode::G_VECREDUCE_FMAXIMUM:
1900 case TargetOpcode::G_VECREDUCE_FMINIMUM:
1901 case TargetOpcode::G_VECREDUCE_ADD:
1902 case TargetOpcode::G_VECREDUCE_MUL:
1903 case TargetOpcode::G_VECREDUCE_AND:
1904 case TargetOpcode::G_VECREDUCE_OR:
1905 case TargetOpcode::G_VECREDUCE_XOR:
1906 case TargetOpcode::G_VECREDUCE_SMAX:
1907 case TargetOpcode::G_VECREDUCE_SMIN:
1908 case TargetOpcode::G_VECREDUCE_UMAX:
1909 case TargetOpcode::G_VECREDUCE_UMIN: {
1910 LLT DstTy = MRI->getType(MI->getOperand(0).getReg());
1911 if (!DstTy.isScalar())
1912 report("Vector reduction requires a scalar destination type", MI);
1913 break;
1914 }
1915
1916 case TargetOpcode::G_SBFX:
1917 case TargetOpcode::G_UBFX: {
1918 LLT DstTy = MRI->getType(MI->getOperand(0).getReg());
1919 if (DstTy.isVector()) {
1920 report("Bitfield extraction is not supported on vectors", MI);
1921 break;
1922 }
1923 break;
1924 }
1925 case TargetOpcode::G_SHL:
1926 case TargetOpcode::G_LSHR:
1927 case TargetOpcode::G_ASHR:
1928 case TargetOpcode::G_ROTR:
1929 case TargetOpcode::G_ROTL: {
1930 LLT Src1Ty = MRI->getType(MI->getOperand(1).getReg());
1931 LLT Src2Ty = MRI->getType(MI->getOperand(2).getReg());
1932 if (Src1Ty.isVector() != Src2Ty.isVector()) {
1933 report("Shifts and rotates require operands to be either all scalars or "
1934 "all vectors",
1935 MI);
1936 break;
1937 }
1938 break;
1939 }
1940 case TargetOpcode::G_LLROUND:
1941 case TargetOpcode::G_LROUND: {
1942 verifyAllRegOpsScalar(*MI, *MRI);
1943 break;
1944 }
1945 case TargetOpcode::G_IS_FPCLASS: {
1946 LLT DestTy = MRI->getType(MI->getOperand(0).getReg());
1947 LLT DestEltTy = DestTy.getScalarType();
1948 if (!DestEltTy.isScalar()) {
1949 report("Destination must be a scalar or vector of scalars", MI);
1950 break;
1951 }
1952 LLT SrcTy = MRI->getType(MI->getOperand(1).getReg());
1953 LLT SrcEltTy = SrcTy.getScalarType();
1954 if (!SrcEltTy.isScalar()) {
1955 report("Source must be a scalar or vector of scalars", MI);
1956 break;
1957 }
1958 if (!verifyVectorElementMatch(DestTy, SrcTy, MI))
1959 break;
1960 const MachineOperand &TestMO = MI->getOperand(2);
1961 if (!TestMO.isImm()) {
1962 report("floating-point class set (operand 2) must be an immediate", MI);
1963 break;
1964 }
1965 int64_t Test = TestMO.getImm();
1966 if (Test < 0 || Test > fcAllFlags) {
1967 report("Incorrect floating-point class set (operand 2)", MI);
1968 break;
1969 }
1970 break;
1971 }
1972 case TargetOpcode::G_PREFETCH: {
1973 const MachineOperand &AddrOp = MI->getOperand(0);
1974 if (!AddrOp.isReg() || !MRI->getType(AddrOp.getReg()).isPointer()) {
1975 report("addr operand must be a pointer", &AddrOp, 0);
1976 break;
1977 }
1978 const MachineOperand &RWOp = MI->getOperand(1);
1979 if (!RWOp.isImm() || (uint64_t)RWOp.getImm() >= 2) {
1980 report("rw operand must be an immediate 0-1", &RWOp, 1);
1981 break;
1982 }
1983 const MachineOperand &LocalityOp = MI->getOperand(2);
1984 if (!LocalityOp.isImm() || (uint64_t)LocalityOp.getImm() >= 4) {
1985 report("locality operand must be an immediate 0-3", &LocalityOp, 2);
1986 break;
1987 }
1988 const MachineOperand &CacheTypeOp = MI->getOperand(3);
1989 if (!CacheTypeOp.isImm() || (uint64_t)CacheTypeOp.getImm() >= 2) {
1990 report("cache type operand must be an immediate 0-1", &CacheTypeOp, 3);
1991 break;
1992 }
1993 break;
1994 }
1995 case TargetOpcode::G_ASSERT_ALIGN: {
1996 if (MI->getOperand(2).getImm() < 1)
1997 report("alignment immediate must be >= 1", MI);
1998 break;
1999 }
2000 case TargetOpcode::G_CONSTANT_POOL: {
2001 if (!MI->getOperand(1).isCPI())
2002 report("Src operand 1 must be a constant pool index", MI);
2003 if (!MRI->getType(MI->getOperand(0).getReg()).isPointer())
2004 report("Dst operand 0 must be a pointer", MI);
2005 break;
2006 }
2007 default:
2008 break;
2009 }
2010}
2011
2012void MachineVerifier::visitMachineInstrBefore(const MachineInstr *MI) {
2013 const MCInstrDesc &MCID = MI->getDesc();
2014 if (MI->getNumOperands() < MCID.getNumOperands()) {
2015 report("Too few operands", MI);
2016 errs() << MCID.getNumOperands() << " operands expected, but "
2017 << MI->getNumOperands() << " given.\n";
2018 }
2019
2020 if (MI->getFlag(MachineInstr::NoConvergent) && !MCID.isConvergent())
2021 report("NoConvergent flag expected only on convergent instructions.", MI);
2022
2023 if (MI->isPHI()) {
2024 if (MF->getProperties().hasProperty(
2026 report("Found PHI instruction with NoPHIs property set", MI);
2027
2028 if (FirstNonPHI)
2029 report("Found PHI instruction after non-PHI", MI);
2030 } else if (FirstNonPHI == nullptr)
2031 FirstNonPHI = MI;
2032
2033 // Check the tied operands.
2034 if (MI->isInlineAsm())
2035 verifyInlineAsm(MI);
2036
2037 // Check that unspillable terminators define a reg and have at most one use.
2038 if (TII->isUnspillableTerminator(MI)) {
2039 if (!MI->getOperand(0).isReg() || !MI->getOperand(0).isDef())
2040 report("Unspillable Terminator does not define a reg", MI);
2041 Register Def = MI->getOperand(0).getReg();
2042 if (Def.isVirtual() &&
2043 !MF->getProperties().hasProperty(
2045 std::distance(MRI->use_nodbg_begin(Def), MRI->use_nodbg_end()) > 1)
2046 report("Unspillable Terminator expected to have at most one use!", MI);
2047 }
2048
2049 // A fully-formed DBG_VALUE must have a location. Ignore partially formed
2050 // DBG_VALUEs: these are convenient to use in tests, but should never get
2051 // generated.
2052 if (MI->isDebugValue() && MI->getNumOperands() == 4)
2053 if (!MI->getDebugLoc())
2054 report("Missing DebugLoc for debug instruction", MI);
2055
2056 // Meta instructions should never be the subject of debug value tracking,
2057 // they don't create a value in the output program at all.
2058 if (MI->isMetaInstruction() && MI->peekDebugInstrNum())
2059 report("Metadata instruction should not have a value tracking number", MI);
2060
2061 // Check the MachineMemOperands for basic consistency.
2062 for (MachineMemOperand *Op : MI->memoperands()) {
2063 if (Op->isLoad() && !MI->mayLoad())
2064 report("Missing mayLoad flag", MI);
2065 if (Op->isStore() && !MI->mayStore())
2066 report("Missing mayStore flag", MI);
2067 }
2068
2069 // Debug values must not have a slot index.
2070 // Other instructions must have one, unless they are inside a bundle.
2071 if (LiveInts) {
2072 bool mapped = !LiveInts->isNotInMIMap(*MI);
2073 if (MI->isDebugOrPseudoInstr()) {
2074 if (mapped)
2075 report("Debug instruction has a slot index", MI);
2076 } else if (MI->isInsideBundle()) {
2077 if (mapped)
2078 report("Instruction inside bundle has a slot index", MI);
2079 } else {
2080 if (!mapped)
2081 report("Missing slot index", MI);
2082 }
2083 }
2084
2085 unsigned Opc = MCID.getOpcode();
2087 verifyPreISelGenericInstruction(MI);
2088 return;
2089 }
2090
2092 if (!TII->verifyInstruction(*MI, ErrorInfo))
2093 report(ErrorInfo.data(), MI);
2094
2095 // Verify properties of various specific instruction types
2096 switch (MI->getOpcode()) {
2097 case TargetOpcode::COPY: {
2098 const MachineOperand &DstOp = MI->getOperand(0);
2099 const MachineOperand &SrcOp = MI->getOperand(1);
2100 const Register SrcReg = SrcOp.getReg();
2101 const Register DstReg = DstOp.getReg();
2102
2103 LLT DstTy = MRI->getType(DstReg);
2104 LLT SrcTy = MRI->getType(SrcReg);
2105 if (SrcTy.isValid() && DstTy.isValid()) {
2106 // If both types are valid, check that the types are the same.
2107 if (SrcTy != DstTy) {
2108 report("Copy Instruction is illegal with mismatching types", MI);
2109 errs() << "Def = " << DstTy << ", Src = " << SrcTy << "\n";
2110 }
2111
2112 break;
2113 }
2114
2115 if (!SrcTy.isValid() && !DstTy.isValid())
2116 break;
2117
2118 // If we have only one valid type, this is likely a copy between a virtual
2119 // and physical register.
2120 TypeSize SrcSize = TRI->getRegSizeInBits(SrcReg, *MRI);
2121 TypeSize DstSize = TRI->getRegSizeInBits(DstReg, *MRI);
2122 if (SrcReg.isPhysical() && DstTy.isValid()) {
2123 const TargetRegisterClass *SrcRC =
2124 TRI->getMinimalPhysRegClassLLT(SrcReg, DstTy);
2125 if (SrcRC)
2126 SrcSize = TRI->getRegSizeInBits(*SrcRC);
2127 }
2128
2129 if (DstReg.isPhysical() && SrcTy.isValid()) {
2130 const TargetRegisterClass *DstRC =
2131 TRI->getMinimalPhysRegClassLLT(DstReg, SrcTy);
2132 if (DstRC)
2133 DstSize = TRI->getRegSizeInBits(*DstRC);
2134 }
2135
2136 // The next two checks allow COPY between physical and virtual registers,
2137 // when the virtual register has a scalable size and the physical register
2138 // has a fixed size. These checks allow COPY between *potentialy* mismatched
2139 // sizes. However, once RegisterBankSelection occurs, MachineVerifier should
2140 // be able to resolve a fixed size for the scalable vector, and at that
2141 // point this function will know for sure whether the sizes are mismatched
2142 // and correctly report a size mismatch.
2143 if (SrcReg.isPhysical() && DstReg.isVirtual() && DstSize.isScalable() &&
2144 !SrcSize.isScalable())
2145 break;
2146 if (SrcReg.isVirtual() && DstReg.isPhysical() && SrcSize.isScalable() &&
2147 !DstSize.isScalable())
2148 break;
2149
2150 if (SrcSize.isNonZero() && DstSize.isNonZero() && SrcSize != DstSize) {
2151 if (!DstOp.getSubReg() && !SrcOp.getSubReg()) {
2152 report("Copy Instruction is illegal with mismatching sizes", MI);
2153 errs() << "Def Size = " << DstSize << ", Src Size = " << SrcSize
2154 << "\n";
2155 }
2156 }
2157 break;
2158 }
2159 case TargetOpcode::STATEPOINT: {
2160 StatepointOpers SO(MI);
2161 if (!MI->getOperand(SO.getIDPos()).isImm() ||
2162 !MI->getOperand(SO.getNBytesPos()).isImm() ||
2163 !MI->getOperand(SO.getNCallArgsPos()).isImm()) {
2164 report("meta operands to STATEPOINT not constant!", MI);
2165 break;
2166 }
2167
2168 auto VerifyStackMapConstant = [&](unsigned Offset) {
2169 if (Offset >= MI->getNumOperands()) {
2170 report("stack map constant to STATEPOINT is out of range!", MI);
2171 return;
2172 }
2173 if (!MI->getOperand(Offset - 1).isImm() ||
2174 MI->getOperand(Offset - 1).getImm() != StackMaps::ConstantOp ||
2175 !MI->getOperand(Offset).isImm())
2176 report("stack map constant to STATEPOINT not well formed!", MI);
2177 };
2178 VerifyStackMapConstant(SO.getCCIdx());
2179 VerifyStackMapConstant(SO.getFlagsIdx());
2180 VerifyStackMapConstant(SO.getNumDeoptArgsIdx());
2181 VerifyStackMapConstant(SO.getNumGCPtrIdx());
2182 VerifyStackMapConstant(SO.getNumAllocaIdx());
2183 VerifyStackMapConstant(SO.getNumGcMapEntriesIdx());
2184
2185 // Verify that all explicit statepoint defs are tied to gc operands as
2186 // they are expected to be a relocation of gc operands.
2187 unsigned FirstGCPtrIdx = SO.getFirstGCPtrIdx();
2188 unsigned LastGCPtrIdx = SO.getNumAllocaIdx() - 2;
2189 for (unsigned Idx = 0; Idx < MI->getNumDefs(); Idx++) {
2190 unsigned UseOpIdx;
2191 if (!MI->isRegTiedToUseOperand(Idx, &UseOpIdx)) {
2192 report("STATEPOINT defs expected to be tied", MI);
2193 break;
2194 }
2195 if (UseOpIdx < FirstGCPtrIdx || UseOpIdx > LastGCPtrIdx) {
2196 report("STATEPOINT def tied to non-gc operand", MI);
2197 break;
2198 }
2199 }
2200
2201 // TODO: verify we have properly encoded deopt arguments
2202 } break;
2203 case TargetOpcode::INSERT_SUBREG: {
2204 unsigned InsertedSize;
2205 if (unsigned SubIdx = MI->getOperand(2).getSubReg())
2206 InsertedSize = TRI->getSubRegIdxSize(SubIdx);
2207 else
2208 InsertedSize = TRI->getRegSizeInBits(MI->getOperand(2).getReg(), *MRI);
2209 unsigned SubRegSize = TRI->getSubRegIdxSize(MI->getOperand(3).getImm());
2210 if (SubRegSize < InsertedSize) {
2211 report("INSERT_SUBREG expected inserted value to have equal or lesser "
2212 "size than the subreg it was inserted into", MI);
2213 break;
2214 }
2215 } break;
2216 case TargetOpcode::REG_SEQUENCE: {
2217 unsigned NumOps = MI->getNumOperands();
2218 if (!(NumOps & 1)) {
2219 report("Invalid number of operands for REG_SEQUENCE", MI);
2220 break;
2221 }
2222
2223 for (unsigned I = 1; I != NumOps; I += 2) {
2224 const MachineOperand &RegOp = MI->getOperand(I);
2225 const MachineOperand &SubRegOp = MI->getOperand(I + 1);
2226
2227 if (!RegOp.isReg())
2228 report("Invalid register operand for REG_SEQUENCE", &RegOp, I);
2229
2230 if (!SubRegOp.isImm() || SubRegOp.getImm() == 0 ||
2231 SubRegOp.getImm() >= TRI->getNumSubRegIndices()) {
2232 report("Invalid subregister index operand for REG_SEQUENCE",
2233 &SubRegOp, I + 1);
2234 }
2235 }
2236
2237 Register DstReg = MI->getOperand(0).getReg();
2238 if (DstReg.isPhysical())
2239 report("REG_SEQUENCE does not support physical register results", MI);
2240
2241 if (MI->getOperand(0).getSubReg())
2242 report("Invalid subreg result for REG_SEQUENCE", MI);
2243
2244 break;
2245 }
2246 }
2247}
2248
2249void
2250MachineVerifier::visitMachineOperand(const MachineOperand *MO, unsigned MONum) {
2251 const MachineInstr *MI = MO->getParent();
2252 const MCInstrDesc &MCID = MI->getDesc();
2253 unsigned NumDefs = MCID.getNumDefs();
2254 if (MCID.getOpcode() == TargetOpcode::PATCHPOINT)
2255 NumDefs = (MONum == 0 && MO->isReg()) ? NumDefs : 0;
2256
2257 // The first MCID.NumDefs operands must be explicit register defines
2258 if (MONum < NumDefs) {
2259 const MCOperandInfo &MCOI = MCID.operands()[MONum];
2260 if (!MO->isReg())
2261 report("Explicit definition must be a register", MO, MONum);
2262 else if (!MO->isDef() && !MCOI.isOptionalDef())
2263 report("Explicit definition marked as use", MO, MONum);
2264 else if (MO->isImplicit())
2265 report("Explicit definition marked as implicit", MO, MONum);
2266 } else if (MONum < MCID.getNumOperands()) {
2267 const MCOperandInfo &MCOI = MCID.operands()[MONum];
2268 // Don't check if it's the last operand in a variadic instruction. See,
2269 // e.g., LDM_RET in the arm back end. Check non-variadic operands only.
2270 bool IsOptional = MI->isVariadic() && MONum == MCID.getNumOperands() - 1;
2271 if (!IsOptional) {
2272 if (MO->isReg()) {
2273 if (MO->isDef() && !MCOI.isOptionalDef() && !MCID.variadicOpsAreDefs())
2274 report("Explicit operand marked as def", MO, MONum);
2275 if (MO->isImplicit())
2276 report("Explicit operand marked as implicit", MO, MONum);
2277 }
2278
2279 // Check that an instruction has register operands only as expected.
2280 if (MCOI.OperandType == MCOI::OPERAND_REGISTER &&
2281 !MO->isReg() && !MO->isFI())
2282 report("Expected a register operand.", MO, MONum);
2283 if (MO->isReg()) {
2286 !TII->isPCRelRegisterOperandLegal(*MO)))
2287 report("Expected a non-register operand.", MO, MONum);
2288 }
2289 }
2290
2291 int TiedTo = MCID.getOperandConstraint(MONum, MCOI::TIED_TO);
2292 if (TiedTo != -1) {
2293 if (!MO->isReg())
2294 report("Tied use must be a register", MO, MONum);
2295 else if (!MO->isTied())
2296 report("Operand should be tied", MO, MONum);
2297 else if (unsigned(TiedTo) != MI->findTiedOperandIdx(MONum))
2298 report("Tied def doesn't match MCInstrDesc", MO, MONum);
2299 else if (MO->getReg().isPhysical()) {
2300 const MachineOperand &MOTied = MI->getOperand(TiedTo);
2301 if (!MOTied.isReg())
2302 report("Tied counterpart must be a register", &MOTied, TiedTo);
2303 else if (MOTied.getReg().isPhysical() &&
2304 MO->getReg() != MOTied.getReg())
2305 report("Tied physical registers must match.", &MOTied, TiedTo);
2306 }
2307 } else if (MO->isReg() && MO->isTied())
2308 report("Explicit operand should not be tied", MO, MONum);
2309 } else if (!MI->isVariadic()) {
2310 // ARM adds %reg0 operands to indicate predicates. We'll allow that.
2311 if (!MO->isValidExcessOperand())
2312 report("Extra explicit operand on non-variadic instruction", MO, MONum);
2313 }
2314
2315 switch (MO->getType()) {
2317 // Verify debug flag on debug instructions. Check this first because reg0
2318 // indicates an undefined debug value.
2319 if (MI->isDebugInstr() && MO->isUse()) {
2320 if (!MO->isDebug())
2321 report("Register operand must be marked debug", MO, MONum);
2322 } else if (MO->isDebug()) {
2323 report("Register operand must not be marked debug", MO, MONum);
2324 }
2325
2326 const Register Reg = MO->getReg();
2327 if (!Reg)
2328 return;
2329 if (MRI->tracksLiveness() && !MI->isDebugInstr())
2330 checkLiveness(MO, MONum);
2331
2332 if (MO->isDef() && MO->isUndef() && !MO->getSubReg() &&
2333 MO->getReg().isVirtual()) // TODO: Apply to physregs too
2334 report("Undef virtual register def operands require a subregister", MO, MONum);
2335
2336 // Verify the consistency of tied operands.
2337 if (MO->isTied()) {
2338 unsigned OtherIdx = MI->findTiedOperandIdx(MONum);
2339 const MachineOperand &OtherMO = MI->getOperand(OtherIdx);
2340 if (!OtherMO.isReg())
2341 report("Must be tied to a register", MO, MONum);
2342 if (!OtherMO.isTied())
2343 report("Missing tie flags on tied operand", MO, MONum);
2344 if (MI->findTiedOperandIdx(OtherIdx) != MONum)
2345 report("Inconsistent tie links", MO, MONum);
2346 if (MONum < MCID.getNumDefs()) {
2347 if (OtherIdx < MCID.getNumOperands()) {
2348 if (-1 == MCID.getOperandConstraint(OtherIdx, MCOI::TIED_TO))
2349 report("Explicit def tied to explicit use without tie constraint",
2350 MO, MONum);
2351 } else {
2352 if (!OtherMO.isImplicit())
2353 report("Explicit def should be tied to implicit use", MO, MONum);
2354 }
2355 }
2356 }
2357
2358 // Verify two-address constraints after the twoaddressinstruction pass.
2359 // Both twoaddressinstruction pass and phi-node-elimination pass call
2360 // MRI->leaveSSA() to set MF as not IsSSA, we should do the verification
2361 // after twoaddressinstruction pass not after phi-node-elimination pass. So
2362 // we shouldn't use the IsSSA as the condition, we should based on
2363 // TiedOpsRewritten property to verify two-address constraints, this
2364 // property will be set in twoaddressinstruction pass.
2365 unsigned DefIdx;
2366 if (MF->getProperties().hasProperty(
2368 MO->isUse() && MI->isRegTiedToDefOperand(MONum, &DefIdx) &&
2369 Reg != MI->getOperand(DefIdx).getReg())
2370 report("Two-address instruction operands must be identical", MO, MONum);
2371
2372 // Check register classes.
2373 unsigned SubIdx = MO->getSubReg();
2374
2375 if (Reg.isPhysical()) {
2376 if (SubIdx) {
2377 report("Illegal subregister index for physical register", MO, MONum);
2378 return;
2379 }
2380 if (MONum < MCID.getNumOperands()) {
2381 if (const TargetRegisterClass *DRC =
2382 TII->getRegClass(MCID, MONum, TRI, *MF)) {
2383 if (!DRC->contains(Reg)) {
2384 report("Illegal physical register for instruction", MO, MONum);
2385 errs() << printReg(Reg, TRI) << " is not a "
2386 << TRI->getRegClassName(DRC) << " register.\n";
2387 }
2388 }
2389 }
2390 if (MO->isRenamable()) {
2391 if (MRI->isReserved(Reg)) {
2392 report("isRenamable set on reserved register", MO, MONum);
2393 return;
2394 }
2395 }
2396 } else {
2397 // Virtual register.
2398 const TargetRegisterClass *RC = MRI->getRegClassOrNull(Reg);
2399 if (!RC) {
2400 // This is a generic virtual register.
2401
2402 // Do not allow undef uses for generic virtual registers. This ensures
2403 // getVRegDef can never fail and return null on a generic register.
2404 //
2405 // FIXME: This restriction should probably be broadened to all SSA
2406 // MIR. However, DetectDeadLanes/ProcessImplicitDefs technically still
2407 // run on the SSA function just before phi elimination.
2408 if (MO->isUndef())
2409 report("Generic virtual register use cannot be undef", MO, MONum);
2410
2411 // Debug value instruction is permitted to use undefined vregs.
2412 // This is a performance measure to skip the overhead of immediately
2413 // pruning unused debug operands. The final undef substitution occurs
2414 // when debug values are allocated in LDVImpl::handleDebugValue, so
2415 // these verifications always apply after this pass.
2416 if (isFunctionTracksDebugUserValues || !MO->isUse() ||
2417 !MI->isDebugValue() || !MRI->def_empty(Reg)) {
2418 // If we're post-Select, we can't have gvregs anymore.
2419 if (isFunctionSelected) {
2420 report("Generic virtual register invalid in a Selected function",
2421 MO, MONum);
2422 return;
2423 }
2424
2425 // The gvreg must have a type and it must not have a SubIdx.
2426 LLT Ty = MRI->getType(Reg);
2427 if (!Ty.isValid()) {
2428 report("Generic virtual register must have a valid type", MO,
2429 MONum);
2430 return;
2431 }
2432
2433 const RegisterBank *RegBank = MRI->getRegBankOrNull(Reg);
2434 const RegisterBankInfo *RBI = MF->getSubtarget().getRegBankInfo();
2435
2436 // If we're post-RegBankSelect, the gvreg must have a bank.
2437 if (!RegBank && isFunctionRegBankSelected) {
2438 report("Generic virtual register must have a bank in a "
2439 "RegBankSelected function",
2440 MO, MONum);
2441 return;
2442 }
2443
2444 // Make sure the register fits into its register bank if any.
2445 if (RegBank && Ty.isValid() && !Ty.isScalableVector() &&
2446 RBI->getMaximumSize(RegBank->getID()) < Ty.getSizeInBits()) {
2447 report("Register bank is too small for virtual register", MO,
2448 MONum);
2449 errs() << "Register bank " << RegBank->getName() << " too small("
2450 << RBI->getMaximumSize(RegBank->getID()) << ") to fit "
2451 << Ty.getSizeInBits() << "-bits\n";
2452 return;
2453 }
2454 }
2455
2456 if (SubIdx) {
2457 report("Generic virtual register does not allow subregister index", MO,
2458 MONum);
2459 return;
2460 }
2461
2462 // If this is a target specific instruction and this operand
2463 // has register class constraint, the virtual register must
2464 // comply to it.
2465 if (!isPreISelGenericOpcode(MCID.getOpcode()) &&
2466 MONum < MCID.getNumOperands() &&
2467 TII->getRegClass(MCID, MONum, TRI, *MF)) {
2468 report("Virtual register does not match instruction constraint", MO,
2469 MONum);
2470 errs() << "Expect register class "
2471 << TRI->getRegClassName(
2472 TII->getRegClass(MCID, MONum, TRI, *MF))
2473 << " but got nothing\n";
2474 return;
2475 }
2476
2477 break;
2478 }
2479 if (SubIdx) {
2480 const TargetRegisterClass *SRC =
2481 TRI->getSubClassWithSubReg(RC, SubIdx);
2482 if (!SRC) {
2483 report("Invalid subregister index for virtual register", MO, MONum);
2484 errs() << "Register class " << TRI->getRegClassName(RC)
2485 << " does not support subreg index " << SubIdx << "\n";
2486 return;
2487 }
2488 if (RC != SRC) {
2489 report("Invalid register class for subregister index", MO, MONum);
2490 errs() << "Register class " << TRI->getRegClassName(RC)
2491 << " does not fully support subreg index " << SubIdx << "\n";
2492 return;
2493 }
2494 }
2495 if (MONum < MCID.getNumOperands()) {
2496 if (const TargetRegisterClass *DRC =
2497 TII->getRegClass(MCID, MONum, TRI, *MF)) {
2498 if (SubIdx) {
2499 const TargetRegisterClass *SuperRC =
2500 TRI->getLargestLegalSuperClass(RC, *MF);
2501 if (!SuperRC) {
2502 report("No largest legal super class exists.", MO, MONum);
2503 return;
2504 }
2505 DRC = TRI->getMatchingSuperRegClass(SuperRC, DRC, SubIdx);
2506 if (!DRC) {
2507 report("No matching super-reg register class.", MO, MONum);
2508 return;
2509 }
2510 }
2511 if (!RC->hasSuperClassEq(DRC)) {
2512 report("Illegal virtual register for instruction", MO, MONum);
2513 errs() << "Expected a " << TRI->getRegClassName(DRC)
2514 << " register, but got a " << TRI->getRegClassName(RC)
2515 << " register\n";
2516 }
2517 }
2518 }
2519 }
2520 break;
2521 }
2522
2524 regMasks.push_back(MO->getRegMask());
2525 break;
2526
2528 if (MI->isPHI() && !MO->getMBB()->isSuccessor(MI->getParent()))
2529 report("PHI operand is not in the CFG", MO, MONum);
2530 break;
2531
2533 if (LiveStks && LiveStks->hasInterval(MO->getIndex()) &&
2534 LiveInts && !LiveInts->isNotInMIMap(*MI)) {
2535 int FI = MO->getIndex();
2536 LiveInterval &LI = LiveStks->getInterval(FI);
2537 SlotIndex Idx = LiveInts->getInstructionIndex(*MI);
2538
2539 bool stores = MI->mayStore();
2540 bool loads = MI->mayLoad();
2541 // For a memory-to-memory move, we need to check if the frame
2542 // index is used for storing or loading, by inspecting the
2543 // memory operands.
2544 if (stores && loads) {
2545 for (auto *MMO : MI->memoperands()) {
2546 const PseudoSourceValue *PSV = MMO->getPseudoValue();
2547 if (PSV == nullptr) continue;
2549 dyn_cast<FixedStackPseudoSourceValue>(PSV);
2550 if (Value == nullptr) continue;
2551 if (Value->getFrameIndex() != FI) continue;
2552
2553 if (MMO->isStore())
2554 loads = false;
2555 else
2556 stores = false;
2557 break;
2558 }
2559 if (loads == stores)
2560 report("Missing fixed stack memoperand.", MI);
2561 }
2562 if (loads && !LI.liveAt(Idx.getRegSlot(true))) {
2563 report("Instruction loads from dead spill slot", MO, MONum);
2564 errs() << "Live stack: " << LI << '\n';
2565 }
2566 if (stores && !LI.liveAt(Idx.getRegSlot())) {
2567 report("Instruction stores to dead spill slot", MO, MONum);
2568 errs() << "Live stack: " << LI << '\n';
2569 }
2570 }
2571 break;
2572
2574 if (MO->getCFIIndex() >= MF->getFrameInstructions().size())
2575 report("CFI instruction has invalid index", MO, MONum);
2576 break;
2577
2578 default:
2579 break;
2580 }
2581}
2582
2583void MachineVerifier::checkLivenessAtUse(const MachineOperand *MO,
2584 unsigned MONum, SlotIndex UseIdx,
2585 const LiveRange &LR,
2586 Register VRegOrUnit,
2587 LaneBitmask LaneMask) {
2588 const MachineInstr *MI = MO->getParent();
2589 LiveQueryResult LRQ = LR.Query(UseIdx);
2590 bool HasValue = LRQ.valueIn() || (MI->isPHI() && LRQ.valueOut());
2591 // Check if we have a segment at the use, note however that we only need one
2592 // live subregister range, the others may be dead.
2593 if (!HasValue && LaneMask.none()) {
2594 report("No live segment at use", MO, MONum);
2595 report_context_liverange(LR);
2596 report_context_vreg_regunit(VRegOrUnit);
2597 report_context(UseIdx);
2598 }
2599 if (MO->isKill() && !LRQ.isKill()) {
2600 report("Live range continues after kill flag", MO, MONum);
2601 report_context_liverange(LR);
2602 report_context_vreg_regunit(VRegOrUnit);
2603 if (LaneMask.any())
2604 report_context_lanemask(LaneMask);
2605 report_context(UseIdx);
2606 }
2607}
2608
2609void MachineVerifier::checkLivenessAtDef(const MachineOperand *MO,
2610 unsigned MONum, SlotIndex DefIdx,
2611 const LiveRange &LR,
2612 Register VRegOrUnit,
2613 bool SubRangeCheck,
2614 LaneBitmask LaneMask) {
2615 if (const VNInfo *VNI = LR.getVNInfoAt(DefIdx)) {
2616 // The LR can correspond to the whole reg and its def slot is not obliged
2617 // to be the same as the MO' def slot. E.g. when we check here "normal"
2618 // subreg MO but there is other EC subreg MO in the same instruction so the
2619 // whole reg has EC def slot and differs from the currently checked MO' def
2620 // slot. For example:
2621 // %0 [16e,32r:0) 0@16e L..3 [16e,32r:0) 0@16e L..C [16r,32r:0) 0@16r
2622 // Check that there is an early-clobber def of the same superregister
2623 // somewhere is performed in visitMachineFunctionAfter()
2624 if (((SubRangeCheck || MO->getSubReg() == 0) && VNI->def != DefIdx) ||
2625 !SlotIndex::isSameInstr(VNI->def, DefIdx) ||
2626 (VNI->def != DefIdx &&
2627 (!VNI->def.isEarlyClobber() || !DefIdx.isRegister()))) {
2628 report("Inconsistent valno->def", MO, MONum);
2629 report_context_liverange(LR);
2630 report_context_vreg_regunit(VRegOrUnit);
2631 if (LaneMask.any())
2632 report_context_lanemask(LaneMask);
2633 report_context(*VNI);
2634 report_context(DefIdx);
2635 }
2636 } else {
2637 report("No live segment at def", MO, MONum);
2638 report_context_liverange(LR);
2639 report_context_vreg_regunit(VRegOrUnit);
2640 if (LaneMask.any())
2641 report_context_lanemask(LaneMask);
2642 report_context(DefIdx);
2643 }
2644 // Check that, if the dead def flag is present, LiveInts agree.
2645 if (MO->isDead()) {
2646 LiveQueryResult LRQ = LR.Query(DefIdx);
2647 if (!LRQ.isDeadDef()) {
2648 assert(VRegOrUnit.isVirtual() && "Expecting a virtual register.");
2649 // A dead subreg def only tells us that the specific subreg is dead. There
2650 // could be other non-dead defs of other subregs, or we could have other
2651 // parts of the register being live through the instruction. So unless we
2652 // are checking liveness for a subrange it is ok for the live range to
2653 // continue, given that we have a dead def of a subregister.
2654 if (SubRangeCheck || MO->getSubReg() == 0) {
2655 report("Live range continues after dead def flag", MO, MONum);
2656 report_context_liverange(LR);
2657 report_context_vreg_regunit(VRegOrUnit);
2658 if (LaneMask.any())
2659 report_context_lanemask(LaneMask);
2660 }
2661 }
2662 }
2663}
2664
2665void MachineVerifier::checkLiveness(const MachineOperand *MO, unsigned MONum) {
2666 const MachineInstr *MI = MO->getParent();
2667 const Register Reg = MO->getReg();
2668 const unsigned SubRegIdx = MO->getSubReg();
2669
2670 const LiveInterval *LI = nullptr;
2671 if (LiveInts && Reg.isVirtual()) {
2672 if (LiveInts->hasInterval(Reg)) {
2673 LI = &LiveInts->getInterval(Reg);
2674 if (SubRegIdx != 0 && (MO->isDef() || !MO->isUndef()) && !LI->empty() &&
2675 !LI->hasSubRanges() && MRI->shouldTrackSubRegLiveness(Reg))
2676 report("Live interval for subreg operand has no subranges", MO, MONum);
2677 } else {
2678 report("Virtual register has no live interval", MO, MONum);
2679 }
2680 }
2681
2682 // Both use and def operands can read a register.
2683 if (MO->readsReg()) {
2684 if (MO->isKill())
2685 addRegWithSubRegs(regsKilled, Reg);
2686
2687 // Check that LiveVars knows this kill (unless we are inside a bundle, in
2688 // which case we have already checked that LiveVars knows any kills on the
2689 // bundle header instead).
2690 if (LiveVars && Reg.isVirtual() && MO->isKill() &&
2691 !MI->isBundledWithPred()) {
2692 LiveVariables::VarInfo &VI = LiveVars->getVarInfo(Reg);
2693 if (!is_contained(VI.Kills, MI))
2694 report("Kill missing from LiveVariables", MO, MONum);
2695 }
2696
2697 // Check LiveInts liveness and kill.
2698 if (LiveInts && !LiveInts->isNotInMIMap(*MI)) {
2699 SlotIndex UseIdx;
2700 if (MI->isPHI()) {
2701 // PHI use occurs on the edge, so check for live out here instead.
2702 UseIdx = LiveInts->getMBBEndIdx(
2703 MI->getOperand(MONum + 1).getMBB()).getPrevSlot();
2704 } else {
2705 UseIdx = LiveInts->getInstructionIndex(*MI);
2706 }
2707 // Check the cached regunit intervals.
2708 if (Reg.isPhysical() && !isReserved(Reg)) {
2709 for (MCRegUnit Unit : TRI->regunits(Reg.asMCReg())) {
2710 if (MRI->isReservedRegUnit(Unit))
2711 continue;
2712 if (const LiveRange *LR = LiveInts->getCachedRegUnit(Unit))
2713 checkLivenessAtUse(MO, MONum, UseIdx, *LR, Unit);
2714 }
2715 }
2716
2717 if (Reg.isVirtual()) {
2718 // This is a virtual register interval.
2719 checkLivenessAtUse(MO, MONum, UseIdx, *LI, Reg);
2720
2721 if (LI->hasSubRanges() && !MO->isDef()) {
2722 LaneBitmask MOMask = SubRegIdx != 0
2723 ? TRI->getSubRegIndexLaneMask(SubRegIdx)
2724 : MRI->getMaxLaneMaskForVReg(Reg);
2725 LaneBitmask LiveInMask;
2726 for (const LiveInterval::SubRange &SR : LI->subranges()) {
2727 if ((MOMask & SR.LaneMask).none())
2728 continue;
2729 checkLivenessAtUse(MO, MONum, UseIdx, SR, Reg, SR.LaneMask);
2730 LiveQueryResult LRQ = SR.Query(UseIdx);
2731 if (LRQ.valueIn() || (MI->isPHI() && LRQ.valueOut()))
2732 LiveInMask |= SR.LaneMask;
2733 }
2734 // At least parts of the register has to be live at the use.
2735 if ((LiveInMask & MOMask).none()) {
2736 report("No live subrange at use", MO, MONum);
2737 report_context(*LI);
2738 report_context(UseIdx);
2739 }
2740 // For PHIs all lanes should be live
2741 if (MI->isPHI() && LiveInMask != MOMask) {
2742 report("Not all lanes of PHI source live at use", MO, MONum);
2743 report_context(*LI);
2744 report_context(UseIdx);
2745 }
2746 }
2747 }
2748 }
2749
2750 // Use of a dead register.
2751 if (!regsLive.count(Reg)) {
2752 if (Reg.isPhysical()) {
2753 // Reserved registers may be used even when 'dead'.
2754 bool Bad = !isReserved(Reg);
2755 // We are fine if just any subregister has a defined value.
2756 if (Bad) {
2757
2758 for (const MCPhysReg &SubReg : TRI->subregs(Reg)) {
2759 if (regsLive.count(SubReg)) {
2760 Bad = false;
2761 break;
2762 }
2763 }
2764 }
2765 // If there is an additional implicit-use of a super register we stop
2766 // here. By definition we are fine if the super register is not
2767 // (completely) dead, if the complete super register is dead we will
2768 // get a report for its operand.
2769 if (Bad) {
2770 for (const MachineOperand &MOP : MI->uses()) {
2771 if (!MOP.isReg() || !MOP.isImplicit())
2772 continue;
2773
2774 if (!MOP.getReg().isPhysical())
2775 continue;
2776
2777 if (llvm::is_contained(TRI->subregs(MOP.getReg()), Reg))
2778 Bad = false;
2779 }
2780 }
2781 if (Bad)
2782 report("Using an undefined physical register", MO, MONum);
2783 } else if (MRI->def_empty(Reg)) {
2784 report("Reading virtual register without a def", MO, MONum);
2785 } else {
2786 BBInfo &MInfo = MBBInfoMap[MI->getParent()];
2787 // We don't know which virtual registers are live in, so only complain
2788 // if vreg was killed in this MBB. Otherwise keep track of vregs that
2789 // must be live in. PHI instructions are handled separately.
2790 if (MInfo.regsKilled.count(Reg))
2791 report("Using a killed virtual register", MO, MONum);
2792 else if (!MI->isPHI())
2793 MInfo.vregsLiveIn.insert(std::make_pair(Reg, MI));
2794 }
2795 }
2796 }
2797
2798 if (MO->isDef()) {
2799 // Register defined.
2800 // TODO: verify that earlyclobber ops are not used.
2801 if (MO->isDead())
2802 addRegWithSubRegs(regsDead, Reg);
2803 else
2804 addRegWithSubRegs(regsDefined, Reg);
2805
2806 // Verify SSA form.
2807 if (MRI->isSSA() && Reg.isVirtual() &&
2808 std::next(MRI->def_begin(Reg)) != MRI->def_end())
2809 report("Multiple virtual register defs in SSA form", MO, MONum);
2810
2811 // Check LiveInts for a live segment, but only for virtual registers.
2812 if (LiveInts && !LiveInts->isNotInMIMap(*MI)) {
2813 SlotIndex DefIdx = LiveInts->getInstructionIndex(*MI);
2814 DefIdx = DefIdx.getRegSlot(MO->isEarlyClobber());
2815
2816 if (Reg.isVirtual()) {
2817 checkLivenessAtDef(MO, MONum, DefIdx, *LI, Reg);
2818
2819 if (LI->hasSubRanges()) {
2820 LaneBitmask MOMask = SubRegIdx != 0
2821 ? TRI->getSubRegIndexLaneMask(SubRegIdx)
2822 : MRI->getMaxLaneMaskForVReg(Reg);
2823 for (const LiveInterval::SubRange &SR : LI->subranges()) {
2824 if ((SR.LaneMask & MOMask).none())
2825 continue;
2826 checkLivenessAtDef(MO, MONum, DefIdx, SR, Reg, true, SR.LaneMask);
2827 }
2828 }
2829 }
2830 }
2831 }
2832}
2833
2834// This function gets called after visiting all instructions in a bundle. The
2835// argument points to the bundle header.
2836// Normal stand-alone instructions are also considered 'bundles', and this
2837// function is called for all of them.
2838void MachineVerifier::visitMachineBundleAfter(const MachineInstr *MI) {
2839 BBInfo &MInfo = MBBInfoMap[MI->getParent()];
2840 set_union(MInfo.regsKilled, regsKilled);
2841 set_subtract(regsLive, regsKilled); regsKilled.clear();
2842 // Kill any masked registers.
2843 while (!regMasks.empty()) {
2844 const uint32_t *Mask = regMasks.pop_back_val();
2845 for (Register Reg : regsLive)
2846 if (Reg.isPhysical() &&
2847 MachineOperand::clobbersPhysReg(Mask, Reg.asMCReg()))
2848 regsDead.push_back(Reg);
2849 }
2850 set_subtract(regsLive, regsDead); regsDead.clear();
2851 set_union(regsLive, regsDefined); regsDefined.clear();
2852}
2853
2854void
2855MachineVerifier::visitMachineBasicBlockAfter(const MachineBasicBlock *MBB) {
2856 MBBInfoMap[MBB].regsLiveOut = regsLive;
2857 regsLive.clear();
2858
2859 if (Indexes) {
2860 SlotIndex stop = Indexes->getMBBEndIdx(MBB);
2861 if (!(stop > lastIndex)) {
2862 report("Block ends before last instruction index", MBB);
2863 errs() << "Block ends at " << stop
2864 << " last instruction was at " << lastIndex << '\n';
2865 }
2866 lastIndex = stop;
2867 }
2868}
2869
2870namespace {
2871// This implements a set of registers that serves as a filter: can filter other
2872// sets by passing through elements not in the filter and blocking those that
2873// are. Any filter implicitly includes the full set of physical registers upon
2874// creation, thus filtering them all out. The filter itself as a set only grows,
2875// and needs to be as efficient as possible.
2876struct VRegFilter {
2877 // Add elements to the filter itself. \pre Input set \p FromRegSet must have
2878 // no duplicates. Both virtual and physical registers are fine.
2879 template <typename RegSetT> void add(const RegSetT &FromRegSet) {
2880 SmallVector<Register, 0> VRegsBuffer;
2881 filterAndAdd(FromRegSet, VRegsBuffer);
2882 }
2883 // Filter \p FromRegSet through the filter and append passed elements into \p
2884 // ToVRegs. All elements appended are then added to the filter itself.
2885 // \returns true if anything changed.
2886 template <typename RegSetT>
2887 bool filterAndAdd(const RegSetT &FromRegSet,
2888 SmallVectorImpl<Register> &ToVRegs) {
2889 unsigned SparseUniverse = Sparse.size();
2890 unsigned NewSparseUniverse = SparseUniverse;
2891 unsigned NewDenseSize = Dense.size();
2892 size_t Begin = ToVRegs.size();
2893 for (Register Reg : FromRegSet) {
2894 if (!Reg.isVirtual())
2895 continue;
2896 unsigned Index = Register::virtReg2Index(Reg);
2897 if (Index < SparseUniverseMax) {
2898 if (Index < SparseUniverse && Sparse.test(Index))
2899 continue;
2900 NewSparseUniverse = std::max(NewSparseUniverse, Index + 1);
2901 } else {
2902 if (Dense.count(Reg))
2903 continue;
2904 ++NewDenseSize;
2905 }
2906 ToVRegs.push_back(Reg);
2907 }
2908 size_t End = ToVRegs.size();
2909 if (Begin == End)
2910 return false;
2911 // Reserving space in sets once performs better than doing so continuously
2912 // and pays easily for double look-ups (even in Dense with SparseUniverseMax
2913 // tuned all the way down) and double iteration (the second one is over a
2914 // SmallVector, which is a lot cheaper compared to DenseSet or BitVector).
2915 Sparse.resize(NewSparseUniverse);
2916 Dense.reserve(NewDenseSize);
2917 for (unsigned I = Begin; I < End; ++I) {
2918 Register Reg = ToVRegs[I];
2919 unsigned Index = Register::virtReg2Index(Reg);
2920 if (Index < SparseUniverseMax)
2921 Sparse.set(Index);
2922 else
2923 Dense.insert(Reg);
2924 }
2925 return true;
2926 }
2927
2928private:
2929 static constexpr unsigned SparseUniverseMax = 10 * 1024 * 8;
2930 // VRegs indexed within SparseUniverseMax are tracked by Sparse, those beyound
2931 // are tracked by Dense. The only purpose of the threashold and the Dense set
2932 // is to have a reasonably growing memory usage in pathological cases (large
2933 // number of very sparse VRegFilter instances live at the same time). In
2934 // practice even in the worst-by-execution time cases having all elements
2935 // tracked by Sparse (very large SparseUniverseMax scenario) tends to be more
2936 // space efficient than if tracked by Dense. The threashold is set to keep the
2937 // worst-case memory usage within 2x of figures determined empirically for
2938 // "all Dense" scenario in such worst-by-execution-time cases.
2939 BitVector Sparse;
2941};
2942
2943// Implements both a transfer function and a (binary, in-place) join operator
2944// for a dataflow over register sets with set union join and filtering transfer
2945// (out_b = in_b \ filter_b). filter_b is expected to be set-up ahead of time.
2946// Maintains out_b as its state, allowing for O(n) iteration over it at any
2947// time, where n is the size of the set (as opposed to O(U) where U is the
2948// universe). filter_b implicitly contains all physical registers at all times.
2949class FilteringVRegSet {
2950 VRegFilter Filter;
2952
2953public:
2954 // Set-up the filter_b. \pre Input register set \p RS must have no duplicates.
2955 // Both virtual and physical registers are fine.
2956 template <typename RegSetT> void addToFilter(const RegSetT &RS) {
2957 Filter.add(RS);
2958 }
2959 // Passes \p RS through the filter_b (transfer function) and adds what's left
2960 // to itself (out_b).
2961 template <typename RegSetT> bool add(const RegSetT &RS) {
2962 // Double-duty the Filter: to maintain VRegs a set (and the join operation
2963 // a set union) just add everything being added here to the Filter as well.
2964 return Filter.filterAndAdd(RS, VRegs);
2965 }
2966 using const_iterator = decltype(VRegs)::const_iterator;
2967 const_iterator begin() const { return VRegs.begin(); }
2968 const_iterator end() const { return VRegs.end(); }
2969 size_t size() const { return VRegs.size(); }
2970};
2971} // namespace
2972
2973// Calculate the largest possible vregsPassed sets. These are the registers that
2974// can pass through an MBB live, but may not be live every time. It is assumed
2975// that all vregsPassed sets are empty before the call.
2976void MachineVerifier::calcRegsPassed() {
2977 if (MF->empty())
2978 // ReversePostOrderTraversal doesn't handle empty functions.
2979 return;
2980
2981 for (const MachineBasicBlock *MB :
2983 FilteringVRegSet VRegs;
2984 BBInfo &Info = MBBInfoMap[MB];
2985 assert(Info.reachable);
2986
2987 VRegs.addToFilter(Info.regsKilled);
2988 VRegs.addToFilter(Info.regsLiveOut);
2989 for (const MachineBasicBlock *Pred : MB->predecessors()) {
2990 const BBInfo &PredInfo = MBBInfoMap[Pred];
2991 if (!PredInfo.reachable)
2992 continue;
2993
2994 VRegs.add(PredInfo.regsLiveOut);
2995 VRegs.add(PredInfo.vregsPassed);
2996 }
2997 Info.vregsPassed.reserve(VRegs.size());
2998 Info.vregsPassed.insert(VRegs.begin(), VRegs.end());
2999 }
3000}
3001
3002// Calculate the set of virtual registers that must be passed through each basic
3003// block in order to satisfy the requirements of successor blocks. This is very
3004// similar to calcRegsPassed, only backwards.
3005void MachineVerifier::calcRegsRequired() {
3006 // First push live-in regs to predecessors' vregsRequired.
3008 for (const auto &MBB : *MF) {
3009 BBInfo &MInfo = MBBInfoMap[&MBB];
3010 for (const MachineBasicBlock *Pred : MBB.predecessors()) {
3011 BBInfo &PInfo = MBBInfoMap[Pred];
3012 if (PInfo.addRequired(MInfo.vregsLiveIn))
3013 todo.insert(Pred);
3014 }
3015
3016 // Handle the PHI node.
3017 for (const MachineInstr &MI : MBB.phis()) {
3018 for (unsigned i = 1, e = MI.getNumOperands(); i != e; i += 2) {
3019 // Skip those Operands which are undef regs or not regs.
3020 if (!MI.getOperand(i).isReg() || !MI.getOperand(i).readsReg())
3021 continue;
3022
3023 // Get register and predecessor for one PHI edge.
3024 Register Reg = MI.getOperand(i).getReg();
3025 const MachineBasicBlock *Pred = MI.getOperand(i + 1).getMBB();
3026
3027 BBInfo &PInfo = MBBInfoMap[Pred];
3028 if (PInfo.addRequired(Reg))
3029 todo.insert(Pred);
3030 }
3031 }
3032 }
3033
3034 // Iteratively push vregsRequired to predecessors. This will converge to the
3035 // same final state regardless of DenseSet iteration order.
3036 while (!todo.empty()) {
3037 const MachineBasicBlock *MBB = *todo.begin();
3038 todo.erase(MBB);
3039 BBInfo &MInfo = MBBInfoMap[MBB];
3040 for (const MachineBasicBlock *Pred : MBB->predecessors()) {
3041 if (Pred == MBB)
3042 continue;
3043 BBInfo &SInfo = MBBInfoMap[Pred];
3044 if (SInfo.addRequired(MInfo.vregsRequired))
3045 todo.insert(Pred);
3046 }
3047 }
3048}
3049
3050// Check PHI instructions at the beginning of MBB. It is assumed that
3051// calcRegsPassed has been run so BBInfo::isLiveOut is valid.
3052void MachineVerifier::checkPHIOps(const MachineBasicBlock &MBB) {
3053 BBInfo &MInfo = MBBInfoMap[&MBB];
3054
3056 for (const MachineInstr &Phi : MBB) {
3057 if (!Phi.isPHI())
3058 break;
3059 seen.clear();
3060
3061 const MachineOperand &MODef = Phi.getOperand(0);
3062 if (!MODef.isReg() || !MODef.isDef()) {
3063 report("Expected first PHI operand to be a register def", &MODef, 0);
3064 continue;
3065 }
3066 if (MODef.isTied() || MODef.isImplicit() || MODef.isInternalRead() ||
3067 MODef.isEarlyClobber() || MODef.isDebug())
3068 report("Unexpected flag on PHI operand", &MODef, 0);
3069 Register DefReg = MODef.getReg();
3070 if (!DefReg.isVirtual())
3071 report("Expected first PHI operand to be a virtual register", &MODef, 0);
3072
3073 for (unsigned I = 1, E = Phi.getNumOperands(); I != E; I += 2) {
3074 const MachineOperand &MO0 = Phi.getOperand(I);
3075 if (!MO0.isReg()) {
3076 report("Expected PHI operand to be a register", &MO0, I);
3077 continue;
3078 }
3079 if (MO0.isImplicit() || MO0.isInternalRead() || MO0.isEarlyClobber() ||
3080 MO0.isDebug() || MO0.isTied())
3081 report("Unexpected flag on PHI operand", &MO0, I);
3082
3083 const MachineOperand &MO1 = Phi.getOperand(I + 1);
3084 if (!MO1.isMBB()) {
3085 report("Expected PHI operand to be a basic block", &MO1, I + 1);
3086 continue;
3087 }
3088
3089 const MachineBasicBlock &Pre = *MO1.getMBB();
3090 if (!Pre.isSuccessor(&MBB)) {
3091 report("PHI input is not a predecessor block", &MO1, I + 1);
3092 continue;
3093 }
3094
3095 if (MInfo.reachable) {
3096 seen.insert(&Pre);
3097 BBInfo &PrInfo = MBBInfoMap[&Pre];
3098 if (!MO0.isUndef() && PrInfo.reachable &&
3099 !PrInfo.isLiveOut(MO0.getReg()))
3100 report("PHI operand is not live-out from predecessor", &MO0, I);
3101 }
3102 }
3103
3104 // Did we see all predecessors?
3105 if (MInfo.reachable) {
3106 for (MachineBasicBlock *Pred : MBB.predecessors()) {
3107 if (!seen.count(Pred)) {
3108 report("Missing PHI operand", &Phi);
3109 errs() << printMBBReference(*Pred)
3110 << " is a predecessor according to the CFG.\n";
3111 }
3112 }
3113 }
3114 }
3115}
3116
3117static void
3119 std::function<void(const Twine &Message)> FailureCB) {
3121 CV.initialize(&errs(), FailureCB, MF);
3122
3123 for (const auto &MBB : MF) {
3124 CV.visit(MBB);
3125 for (const auto &MI : MBB.instrs())
3126 CV.visit(MI);
3127 }
3128
3129 if (CV.sawTokens()) {
3130 DT.recalculate(const_cast<MachineFunction &>(MF));
3131 CV.verify(DT);
3132 }
3133}
3134
3135void MachineVerifier::visitMachineFunctionAfter() {
3136 auto FailureCB = [this](const Twine &Message) {
3137 report(Message.str().c_str(), MF);
3138 };
3139 verifyConvergenceControl(*MF, DT, FailureCB);
3140
3141 calcRegsPassed();
3142
3143 for (const MachineBasicBlock &MBB : *MF)
3144 checkPHIOps(MBB);
3145
3146 // Now check liveness info if available
3147 calcRegsRequired();
3148
3149 // Check for killed virtual registers that should be live out.
3150 for (const auto &MBB : *MF) {
3151 BBInfo &MInfo = MBBInfoMap[&MBB];
3152 for (Register VReg : MInfo.vregsRequired)
3153 if (MInfo.regsKilled.count(VReg)) {
3154 report("Virtual register killed in block, but needed live out.", &MBB);
3155 errs() << "Virtual register " << printReg(VReg)
3156 << " is used after the block.\n";
3157 }
3158 }
3159
3160 if (!MF->empty()) {
3161 BBInfo &MInfo = MBBInfoMap[&MF->front()];
3162 for (Register VReg : MInfo.vregsRequired) {
3163 report("Virtual register defs don't dominate all uses.", MF);
3164 report_context_vreg(VReg);
3165 }
3166 }
3167
3168 if (LiveVars)
3169 verifyLiveVariables();
3170 if (LiveInts)
3171 verifyLiveIntervals();
3172
3173 // Check live-in list of each MBB. If a register is live into MBB, check
3174 // that the register is in regsLiveOut of each predecessor block. Since
3175 // this must come from a definition in the predecesssor or its live-in
3176 // list, this will catch a live-through case where the predecessor does not
3177 // have the register in its live-in list. This currently only checks
3178 // registers that have no aliases, are not allocatable and are not
3179 // reserved, which could mean a condition code register for instance.
3180 if (MRI->tracksLiveness())
3181 for (const auto &MBB : *MF)
3183 MCPhysReg LiveInReg = P.PhysReg;
3184 bool hasAliases = MCRegAliasIterator(LiveInReg, TRI, false).isValid();
3185 if (hasAliases || isAllocatable(LiveInReg) || isReserved(LiveInReg))
3186 continue;
3187 for (const MachineBasicBlock *Pred : MBB.predecessors()) {
3188 BBInfo &PInfo = MBBInfoMap[Pred];
3189 if (!PInfo.regsLiveOut.count(LiveInReg)) {
3190 report("Live in register not found to be live out from predecessor.",
3191 &MBB);
3192 errs() << TRI->getName(LiveInReg)
3193 << " not found to be live out from "
3194 << printMBBReference(*Pred) << "\n";
3195 }
3196 }
3197 }
3198
3199 for (auto CSInfo : MF->getCallSitesInfo())
3200 if (!CSInfo.first->isCall())
3201 report("Call site info referencing instruction that is not call", MF);
3202
3203 // If there's debug-info, check that we don't have any duplicate value
3204 // tracking numbers.
3205 if (MF->getFunction().getSubprogram()) {
3206 DenseSet<unsigned> SeenNumbers;
3207 for (const auto &MBB : *MF) {
3208 for (const auto &MI : MBB) {
3209 if (auto Num = MI.peekDebugInstrNum()) {
3210 auto Result = SeenNumbers.insert((unsigned)Num);
3211 if (!Result.second)
3212 report("Instruction has a duplicated value tracking number", &MI);
3213 }
3214 }
3215 }
3216 }
3217}
3218
3219void MachineVerifier::verifyLiveVariables() {
3220 assert(LiveVars && "Don't call verifyLiveVariables without LiveVars");
3221 for (unsigned I = 0, E = MRI->getNumVirtRegs(); I != E; ++I) {
3223 LiveVariables::VarInfo &VI = LiveVars->getVarInfo(Reg);
3224 for (const auto &MBB : *MF) {
3225 BBInfo &MInfo = MBBInfoMap[&MBB];
3226
3227 // Our vregsRequired should be identical to LiveVariables' AliveBlocks
3228 if (MInfo.vregsRequired.count(Reg)) {
3229 if (!VI.AliveBlocks.test(MBB.getNumber())) {
3230 report("LiveVariables: Block missing from AliveBlocks", &MBB);
3231 errs() << "Virtual register " << printReg(Reg)
3232 << " must be live through the block.\n";
3233 }
3234 } else {
3235 if (VI.AliveBlocks.test(MBB.getNumber())) {
3236 report("LiveVariables: Block should not be in AliveBlocks", &MBB);
3237 errs() << "Virtual register " << printReg(Reg)
3238 << " is not needed live through the block.\n";
3239 }
3240 }
3241 }
3242 }
3243}
3244
3245void MachineVerifier::verifyLiveIntervals() {
3246 assert(LiveInts && "Don't call verifyLiveIntervals without LiveInts");
3247 for (unsigned I = 0, E = MRI->getNumVirtRegs(); I != E; ++I) {
3249
3250 // Spilling and splitting may leave unused registers around. Skip them.
3251 if (MRI->reg_nodbg_empty(Reg))
3252 continue;
3253
3254 if (!LiveInts->hasInterval(Reg)) {
3255 report("Missing live interval for virtual register", MF);
3256 errs() << printReg(Reg, TRI) << " still has defs or uses\n";
3257 continue;
3258 }
3259
3260 const LiveInterval &LI = LiveInts->getInterval(Reg);
3261 assert(Reg == LI.reg() && "Invalid reg to interval mapping");
3262 verifyLiveInterval(LI);
3263 }
3264
3265 // Verify all the cached regunit intervals.
3266 for (unsigned i = 0, e = TRI->getNumRegUnits(); i != e; ++i)
3267 if (const LiveRange *LR = LiveInts->getCachedRegUnit(i))
3268 verifyLiveRange(*LR, i);
3269}
3270
3271void MachineVerifier::verifyLiveRangeValue(const LiveRange &LR,
3272 const VNInfo *VNI, Register Reg,
3273 LaneBitmask LaneMask) {
3274 if (VNI->isUnused())
3275 return;
3276
3277 const VNInfo *DefVNI = LR.getVNInfoAt(VNI->def);
3278
3279 if (!DefVNI) {
3280 report("Value not live at VNInfo def and not marked unused", MF);
3281 report_context(LR, Reg, LaneMask);
3282 report_context(*VNI);
3283 return;
3284 }
3285
3286 if (DefVNI != VNI) {
3287 report("Live segment at def has different VNInfo", MF);
3288 report_context(LR, Reg, LaneMask);
3289 report_context(*VNI);
3290 return;
3291 }
3292
3293 const MachineBasicBlock *MBB = LiveInts->getMBBFromIndex(VNI->def);
3294 if (!MBB) {
3295 report("Invalid VNInfo definition index", MF);
3296 report_context(LR, Reg, LaneMask);
3297 report_context(*VNI);
3298 return;
3299 }
3300
3301 if (VNI->isPHIDef()) {
3302 if (VNI->def != LiveInts->getMBBStartIdx(MBB)) {
3303 report("PHIDef VNInfo is not defined at MBB start", MBB);
3304 report_context(LR, Reg, LaneMask);
3305 report_context(*VNI);
3306 }
3307 return;
3308 }
3309
3310 // Non-PHI def.
3311 const MachineInstr *MI = LiveInts->getInstructionFromIndex(VNI->def);
3312 if (!MI) {
3313 report("No instruction at VNInfo def index", MBB);
3314 report_context(LR, Reg, LaneMask);
3315 report_context(*VNI);
3316 return;
3317 }
3318
3319 if (Reg != 0) {
3320 bool hasDef = false;
3321 bool isEarlyClobber = false;
3322 for (ConstMIBundleOperands MOI(*MI); MOI.isValid(); ++MOI) {
3323 if (!MOI->isReg() || !MOI->isDef())
3324 continue;
3325 if (Reg.isVirtual()) {
3326 if (MOI->getReg() != Reg)
3327 continue;
3328 } else {
3329 if (!MOI->getReg().isPhysical() || !TRI->hasRegUnit(MOI->getReg(), Reg))
3330 continue;
3331 }
3332 if (LaneMask.any() &&
3333 (TRI->getSubRegIndexLaneMask(MOI->getSubReg()) & LaneMask).none())
3334 continue;
3335 hasDef = true;
3336 if (MOI->isEarlyClobber())
3337 isEarlyClobber = true;
3338 }
3339
3340 if (!hasDef) {
3341 report("Defining instruction does not modify register", MI);
3342 report_context(LR, Reg, LaneMask);
3343 report_context(*VNI);
3344 }
3345
3346 // Early clobber defs begin at USE slots, but other defs must begin at
3347 // DEF slots.
3348 if (isEarlyClobber) {
3349 if (!VNI->def.isEarlyClobber()) {
3350 report("Early clobber def must be at an early-clobber slot", MBB);
3351 report_context(LR, Reg, LaneMask);
3352 report_context(*VNI);
3353 }
3354 } else if (!VNI->def.isRegister()) {
3355 report("Non-PHI, non-early clobber def must be at a register slot", MBB);
3356 report_context(LR, Reg, LaneMask);
3357 report_context(*VNI);
3358 }
3359 }
3360}
3361
3362void MachineVerifier::verifyLiveRangeSegment(const LiveRange &LR,
3364 Register Reg,
3365 LaneBitmask LaneMask) {
3366 const LiveRange::Segment &S = *I;
3367 const VNInfo *VNI = S.valno;
3368 assert(VNI && "Live segment has no valno");
3369
3370 if (VNI->id >= LR.getNumValNums() || VNI != LR.getValNumInfo(VNI->id)) {
3371 report("Foreign valno in live segment", MF);
3372 report_context(LR, Reg, LaneMask);
3373 report_context(S);
3374 report_context(*VNI);
3375 }
3376
3377 if (VNI->isUnused()) {
3378 report("Live segment valno is marked unused", MF);
3379 report_context(LR, Reg, LaneMask);
3380 report_context(S);
3381 }
3382
3383 const MachineBasicBlock *MBB = LiveInts->getMBBFromIndex(S.start);
3384 if (!MBB) {
3385 report("Bad start of live segment, no basic block", MF);
3386 report_context(LR, Reg, LaneMask);
3387 report_context(S);
3388 return;
3389 }
3390 SlotIndex MBBStartIdx = LiveInts->getMBBStartIdx(MBB);
3391 if (S.start != MBBStartIdx && S.start != VNI->def) {
3392 report("Live segment must begin at MBB entry or valno def", MBB);
3393 report_context(LR, Reg, LaneMask);
3394 report_context(S);
3395 }
3396
3397 const MachineBasicBlock *EndMBB =
3398 LiveInts->getMBBFromIndex(S.end.getPrevSlot());
3399 if (!EndMBB) {
3400 report("Bad end of live segment, no basic block", MF);
3401 report_context(LR, Reg, LaneMask);
3402 report_context(S);
3403 return;
3404 }
3405
3406 // Checks for non-live-out segments.
3407 if (S.end != LiveInts->getMBBEndIdx(EndMBB)) {
3408 // RegUnit intervals are allowed dead phis.
3409 if (!Reg.isVirtual() && VNI->isPHIDef() && S.start == VNI->def &&
3410 S.end == VNI->def.getDeadSlot())
3411 return;
3412
3413 // The live segment is ending inside EndMBB
3414 const MachineInstr *MI =
3416 if (!MI) {
3417 report("Live segment doesn't end at a valid instruction", EndMBB);
3418 report_context(LR, Reg, LaneMask);
3419 report_context(S);
3420 return;
3421 }
3422
3423 // The block slot must refer to a basic block boundary.
3424 if (S.end.isBlock()) {
3425 report("Live segment ends at B slot of an instruction", EndMBB);
3426 report_context(LR, Reg, LaneMask);
3427 report_context(S);
3428 }
3429
3430 if (S.end.isDead()) {
3431 // Segment ends on the dead slot.
3432 // That means there must be a dead def.
3433 if (!SlotIndex::isSameInstr(S.start, S.end)) {
3434 report("Live segment ending at dead slot spans instructions", EndMBB);
3435 report_context(LR, Reg, LaneMask);
3436 report_context(S);
3437 }
3438 }
3439
3440 // After tied operands are rewritten, a live segment can only end at an
3441 // early-clobber slot if it is being redefined by an early-clobber def.
3442 // TODO: Before tied operands are rewritten, a live segment can only end at
3443 // an early-clobber slot if the last use is tied to an early-clobber def.
3444 if (MF->getProperties().hasProperty(
3446 S.end.isEarlyClobber()) {
3447 if (I + 1 == LR.end() || (I + 1)->start != S.end) {
3448 report("Live segment ending at early clobber slot must be "
3449 "redefined by an EC def in the same instruction",
3450 EndMBB);
3451 report_context(LR, Reg, LaneMask);
3452 report_context(S);
3453 }
3454 }
3455
3456 // The following checks only apply to virtual registers. Physreg liveness
3457 // is too weird to check.
3458 if (Reg.isVirtual()) {
3459 // A live segment can end with either a redefinition, a kill flag on a
3460 // use, or a dead flag on a def.
3461 bool hasRead = false;
3462 bool hasSubRegDef = false;
3463 bool hasDeadDef = false;
3464 for (ConstMIBundleOperands MOI(*MI); MOI.isValid(); ++MOI) {
3465 if (!MOI->isReg() || MOI->getReg() != Reg)
3466 continue;
3467 unsigned Sub = MOI->getSubReg();
3468 LaneBitmask SLM =
3469 Sub != 0 ? TRI->getSubRegIndexLaneMask(Sub) : LaneBitmask::getAll();
3470 if (MOI->isDef()) {
3471 if (Sub != 0) {
3472 hasSubRegDef = true;
3473 // An operand %0:sub0 reads %0:sub1..n. Invert the lane
3474 // mask for subregister defs. Read-undef defs will be handled by
3475 // readsReg below.
3476 SLM = ~SLM;
3477 }
3478 if (MOI->isDead())
3479 hasDeadDef = true;
3480 }
3481 if (LaneMask.any() && (LaneMask & SLM).none())
3482 continue;
3483 if (MOI->readsReg())
3484 hasRead = true;
3485 }
3486 if (S.end.isDead()) {
3487 // Make sure that the corresponding machine operand for a "dead" live
3488 // range has the dead flag. We cannot perform this check for subregister
3489 // liveranges as partially dead values are allowed.
3490 if (LaneMask.none() && !hasDeadDef) {
3491 report(
3492 "Instruction ending live segment on dead slot has no dead flag",
3493 MI);
3494 report_context(LR, Reg, LaneMask);
3495 report_context(S);
3496 }
3497 } else {
3498 if (!hasRead) {
3499 // When tracking subregister liveness, the main range must start new
3500 // values on partial register writes, even if there is no read.
3501 if (!MRI->shouldTrackSubRegLiveness(Reg) || LaneMask.any() ||
3502 !hasSubRegDef) {
3503 report("Instruction ending live segment doesn't read the register",
3504 MI);
3505 report_context(LR, Reg, LaneMask);
3506 report_context(S);
3507 }
3508 }
3509 }
3510 }
3511 }
3512
3513 // Now check all the basic blocks in this live segment.
3515 // Is this live segment the beginning of a non-PHIDef VN?
3516 if (S.start == VNI->def && !VNI->isPHIDef()) {
3517 // Not live-in to any blocks.
3518 if (MBB == EndMBB)
3519 return;
3520 // Skip this block.
3521 ++MFI;
3522 }
3523
3525 if (LaneMask.any()) {
3526 LiveInterval &OwnerLI = LiveInts->getInterval(Reg);
3527 OwnerLI.computeSubRangeUndefs(Undefs, LaneMask, *MRI, *Indexes);
3528 }
3529
3530 while (true) {
3531 assert(LiveInts->isLiveInToMBB(LR, &*MFI));
3532 // We don't know how to track physregs into a landing pad.
3533 if (!Reg.isVirtual() && MFI->isEHPad()) {
3534 if (&*MFI == EndMBB)
3535 break;
3536 ++MFI;
3537 continue;
3538 }
3539
3540 // Is VNI a PHI-def in the current block?
3541 bool IsPHI = VNI->isPHIDef() &&
3542 VNI->def == LiveInts->getMBBStartIdx(&*MFI);
3543
3544 // Check that VNI is live-out of all predecessors.
3545 for (const MachineBasicBlock *Pred : MFI->predecessors()) {
3546 SlotIndex PEnd = LiveInts->getMBBEndIdx(Pred);
3547 // Predecessor of landing pad live-out on last call.
3548 if (MFI->isEHPad()) {
3549 for (const MachineInstr &MI : llvm::reverse(*Pred)) {
3550 if (MI.isCall()) {
3551 PEnd = Indexes->getInstructionIndex(MI).getBoundaryIndex();
3552 break;
3553 }
3554 }
3555 }
3556 const VNInfo *PVNI = LR.getVNInfoBefore(PEnd);
3557
3558 // All predecessors must have a live-out value. However for a phi
3559 // instruction with subregister intervals
3560 // only one of the subregisters (not necessarily the current one) needs to
3561 // be defined.
3562 if (!PVNI && (LaneMask.none() || !IsPHI)) {
3563 if (LiveRangeCalc::isJointlyDominated(Pred, Undefs, *Indexes))
3564 continue;
3565 report("Register not marked live out of predecessor", Pred);
3566 report_context(LR, Reg, LaneMask);
3567 report_context(*VNI);
3568 errs() << " live into " << printMBBReference(*MFI) << '@'
3569 << LiveInts->getMBBStartIdx(&*MFI) << ", not live before "
3570 << PEnd << '\n';
3571 continue;
3572 }
3573
3574 // Only PHI-defs can take different predecessor values.
3575 if (!IsPHI && PVNI != VNI) {
3576 report("Different value live out of predecessor", Pred);
3577 report_context(LR, Reg, LaneMask);
3578 errs() << "Valno #" << PVNI->id << " live out of "
3579 << printMBBReference(*Pred) << '@' << PEnd << "\nValno #"
3580 << VNI->id << " live into " << printMBBReference(*MFI) << '@'
3581 << LiveInts->getMBBStartIdx(&*MFI) << '\n';
3582 }
3583 }
3584 if (&*MFI == EndMBB)
3585 break;
3586 ++MFI;
3587 }
3588}
3589
3590void MachineVerifier::verifyLiveRange(const LiveRange &LR, Register Reg,
3591 LaneBitmask LaneMask) {
3592 for (const VNInfo *VNI : LR.valnos)
3593 verifyLiveRangeValue(LR, VNI, Reg, LaneMask);
3594
3595 for (LiveRange::const_iterator I = LR.begin(), E = LR.end(); I != E; ++I)
3596 verifyLiveRangeSegment(LR, I, Reg, LaneMask);
3597}
3598
3599void MachineVerifier::verifyLiveInterval(const LiveInterval &LI) {
3600 Register Reg = LI.reg();
3601 assert(Reg.isVirtual());
3602 verifyLiveRange(LI, Reg);
3603
3604 if (LI.hasSubRanges()) {
3606 LaneBitmask MaxMask = MRI->getMaxLaneMaskForVReg(Reg);
3607 for (const LiveInterval::SubRange &SR : LI.subranges()) {
3608 if ((Mask & SR.LaneMask).any()) {
3609 report("Lane masks of sub ranges overlap in live interval", MF);
3610 report_context(LI);
3611 }
3612 if ((SR.LaneMask & ~MaxMask).any()) {
3613 report("Subrange lanemask is invalid", MF);
3614 report_context(LI);
3615 }
3616 if (SR.empty()) {
3617 report("Subrange must not be empty", MF);
3618 report_context(SR, LI.reg(), SR.LaneMask);
3619 }
3620 Mask |= SR.LaneMask;
3621 verifyLiveRange(SR, LI.reg(), SR.LaneMask);
3622 if (!LI.covers(SR)) {
3623 report("A Subrange is not covered by the main range", MF);
3624 report_context(LI);
3625 }
3626 }
3627 }
3628
3629 // Check the LI only has one connected component.
3630 ConnectedVNInfoEqClasses ConEQ(*LiveInts);
3631 unsigned NumComp = ConEQ.Classify(LI);
3632 if (NumComp > 1) {
3633 report("Multiple connected components in live interval", MF);
3634 report_context(LI);
3635 for (unsigned comp = 0; comp != NumComp; ++comp) {
3636 errs() << comp << ": valnos";
3637 for (const VNInfo *I : LI.valnos)
3638 if (comp == ConEQ.getEqClass(I))
3639 errs() << ' ' << I->id;
3640 errs() << '\n';
3641 }
3642 }
3643}
3644
3645namespace {
3646
3647 // FrameSetup and FrameDestroy can have zero adjustment, so using a single
3648 // integer, we can't tell whether it is a FrameSetup or FrameDestroy if the
3649 // value is zero.
3650 // We use a bool plus an integer to capture the stack state.
3651 struct StackStateOfBB {
3652 StackStateOfBB() = default;
3653 StackStateOfBB(int EntryVal, int ExitVal, bool EntrySetup, bool ExitSetup) :
3654 EntryValue(EntryVal), ExitValue(ExitVal), EntryIsSetup(EntrySetup),
3655 ExitIsSetup(ExitSetup) {}
3656
3657 // Can be negative, which means we are setting up a frame.
3658 int EntryValue = 0;
3659 int ExitValue = 0;
3660 bool EntryIsSetup = false;
3661 bool ExitIsSetup = false;
3662 };
3663
3664} // end anonymous namespace
3665
3666/// Make sure on every path through the CFG, a FrameSetup <n> is always followed
3667/// by a FrameDestroy <n>, stack adjustments are identical on all
3668/// CFG edges to a merge point, and frame is destroyed at end of a return block.
3669void MachineVerifier::verifyStackFrame() {
3670 unsigned FrameSetupOpcode = TII->getCallFrameSetupOpcode();
3671 unsigned FrameDestroyOpcode = TII->getCallFrameDestroyOpcode();
3672 if (FrameSetupOpcode == ~0u && FrameDestroyOpcode == ~0u)
3673 return;
3674
3676 SPState.resize(MF->getNumBlockIDs());
3678
3679 // Visit the MBBs in DFS order.
3680 for (df_ext_iterator<const MachineFunction *,
3682 DFI = df_ext_begin(MF, Reachable), DFE = df_ext_end(MF, Reachable);
3683 DFI != DFE; ++DFI) {
3684 const MachineBasicBlock *MBB = *DFI;
3685
3686 StackStateOfBB BBState;
3687 // Check the exit state of the DFS stack predecessor.
3688 if (DFI.getPathLength() >= 2) {
3689 const MachineBasicBlock *StackPred = DFI.getPath(DFI.getPathLength() - 2);
3690 assert(Reachable.count(StackPred) &&
3691 "DFS stack predecessor is already visited.\n");
3692 BBState.EntryValue = SPState[StackPred->getNumber()].ExitValue;
3693 BBState.EntryIsSetup = SPState[StackPred->getNumber()].ExitIsSetup;
3694 BBState.ExitValue = BBState.EntryValue;
3695 BBState.ExitIsSetup = BBState.EntryIsSetup;
3696 }
3697
3698 if ((int)MBB->getCallFrameSize() != -BBState.EntryValue) {
3699 report("Call frame size on entry does not match value computed from "
3700 "predecessor",
3701 MBB);
3702 errs() << "Call frame size on entry " << MBB->getCallFrameSize()
3703 << " does not match value computed from predecessor "
3704 << -BBState.EntryValue << '\n';
3705 }
3706
3707 // Update stack state by checking contents of MBB.
3708 for (const auto &I : *MBB) {
3709 if (I.getOpcode() == FrameSetupOpcode) {
3710 if (BBState.ExitIsSetup)
3711 report("FrameSetup is after another FrameSetup", &I);
3712 if (!MRI->isSSA() && !MF->getFrameInfo().adjustsStack())
3713 report("AdjustsStack not set in presence of a frame pseudo "
3714 "instruction.", &I);
3715 BBState.ExitValue -= TII->getFrameTotalSize(I);
3716 BBState.ExitIsSetup = true;
3717 }
3718
3719 if (I.getOpcode() == FrameDestroyOpcode) {
3720 int Size = TII->getFrameTotalSize(I);
3721 if (!BBState.ExitIsSetup)
3722 report("FrameDestroy is not after a FrameSetup", &I);
3723 int AbsSPAdj = BBState.ExitValue < 0 ? -BBState.ExitValue :
3724 BBState.ExitValue;
3725 if (BBState.ExitIsSetup && AbsSPAdj != Size) {
3726 report("FrameDestroy <n> is after FrameSetup <m>", &I);
3727 errs() << "FrameDestroy <" << Size << "> is after FrameSetup <"
3728 << AbsSPAdj << ">.\n";
3729 }
3730 if (!MRI->isSSA() && !MF->getFrameInfo().adjustsStack())
3731 report("AdjustsStack not set in presence of a frame pseudo "
3732 "instruction.", &I);
3733 BBState.ExitValue += Size;
3734 BBState.ExitIsSetup = false;
3735 }
3736 }
3737 SPState[MBB->getNumber()] = BBState;
3738
3739 // Make sure the exit state of any predecessor is consistent with the entry
3740 // state.
3741 for (const MachineBasicBlock *Pred : MBB->predecessors()) {
3742 if (Reachable.count(Pred) &&
3743 (SPState[Pred->getNumber()].ExitValue != BBState.EntryValue ||
3744 SPState[Pred->getNumber()].ExitIsSetup != BBState.EntryIsSetup)) {
3745 report("The exit stack state of a predecessor is inconsistent.", MBB);
3746 errs() << "Predecessor " << printMBBReference(*Pred)
3747 << " has exit state (" << SPState[Pred->getNumber()].ExitValue
3748 << ", " << SPState[Pred->getNumber()].ExitIsSetup << "), while "
3749 << printMBBReference(*MBB) << " has entry state ("
3750 << BBState.EntryValue << ", " << BBState.EntryIsSetup << ").\n";
3751 }
3752 }
3753
3754 // Make sure the entry state of any successor is consistent with the exit
3755 // state.
3756 for (const MachineBasicBlock *Succ : MBB->successors()) {
3757 if (Reachable.count(Succ) &&
3758 (SPState[Succ->getNumber()].EntryValue != BBState.ExitValue ||
3759 SPState[Succ->getNumber()].EntryIsSetup != BBState.ExitIsSetup)) {
3760 report("The entry stack state of a successor is inconsistent.", MBB);
3761 errs() << "Successor " << printMBBReference(*Succ)
3762 << " has entry state (" << SPState[Succ->getNumber()].EntryValue
3763 << ", " << SPState[Succ->getNumber()].EntryIsSetup << "), while "
3764 << printMBBReference(*MBB) << " has exit state ("
3765 << BBState.ExitValue << ", " << BBState.ExitIsSetup << ").\n";
3766 }
3767 }
3768
3769 // Make sure a basic block with return ends with zero stack adjustment.
3770 if (!MBB->empty() && MBB->back().isReturn()) {
3771 if (BBState.ExitIsSetup)
3772 report("A return block ends with a FrameSetup.", MBB);
3773 if (BBState.ExitValue)
3774 report("A return block ends with a nonzero stack adjustment.", MBB);
3775 }
3776 }
3777}
unsigned SubReg
unsigned const MachineRegisterInfo * MRI
aarch64 promote const
MachineBasicBlock & MBB
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
MachineBasicBlock MachineBasicBlock::iterator MBBI
static bool isLoad(int Opcode)
static bool isStore(int Opcode)
This file implements the BitVector class.
Analysis containing CSE Info
Definition: CSEInfo.cpp:27
This file contains the declarations for the subclasses of Constant, which represent the different fla...
Returns the sub type a function will return at a given Idx Should correspond to the result type of an ExtractValue instruction executed with just that one unsigned Idx
This file defines the DenseMap class.
This file defines the DenseSet and SmallDenseSet classes.
This file builds on the ADT/GraphTraits.h file to build generic depth first graph iterator.
std::string Name
uint64_t Size
bool End
Definition: ELF_riscv.cpp:480
Declares convenience wrapper classes for interpreting MachineInstr instances as specific generic oper...
const HexagonInstrInfo * TII
hexagon widen stores
IRTranslator LLVM IR MI
A common definition of LaneBitmask for use in TableGen and CodeGen.
Implement a low-level type suitable for MachineInstr level instruction selection.
#define F(x, y, z)
Definition: MD5.cpp:55
#define I(x, y, z)
Definition: MD5.cpp:58
This file declares the MIR specialization of the GenericConvergenceVerifier template.
unsigned const TargetRegisterInfo * TRI
unsigned Reg
static void verifyConvergenceControl(const MachineFunction &MF, MachineDomTree &DT, std::function< void(const Twine &Message)> FailureCB)
modulo schedule Modulo Schedule test pass
#define P(N)
ppc ctr loops verify
const char LLVMTargetMachineRef TM
#define INITIALIZE_PASS(passName, arg, name, cfg, analysis)
Definition: PassSupport.h:38
This file builds on the ADT/GraphTraits.h file to build a generic graph post order iterator.
const SmallVectorImpl< MachineOperand > MachineBasicBlock * TBB
const SmallVectorImpl< MachineOperand > & Cond
static bool isValid(const char C)
Returns true if C is a valid mangled character: <0-9a-zA-Z_>.
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
static bool isLiveOut(const MachineBasicBlock &MBB, unsigned Reg)
This file contains some templates that are useful if you are working with the STL at all.
This file defines generic set operations that may be used on set's of different types,...
This file defines the SmallPtrSet class.
This file defines the SmallVector class.
static unsigned getSize(unsigned Kind)
const fltSemantics & getSemantics() const
Definition: APFloat.h:1303
Represent the analysis usage information of a pass.
AnalysisUsage & addUsedIfAvailable()
Add the specified Pass class to the set of analyses used by this pass.
void setPreservesAll()
Set by analyses that do not transform their input at all.
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
Definition: ArrayRef.h:41
size_t size() const
size - Get the array size.
Definition: ArrayRef.h:165
LLVM Basic Block Representation.
Definition: BasicBlock.h:60
bool hasAddressTaken() const
Returns true if there are any uses of this basic block other than direct branches,...
Definition: BasicBlock.h:639
const Instruction * getTerminator() const LLVM_READONLY
Returns the terminator instruction if the block is well formed or null if the block is not well forme...
Definition: BasicBlock.h:220
bool test(unsigned Idx) const
Definition: BitVector.h:461
void clear()
clear - Removes all bits from the bitvector.
Definition: BitVector.h:335
iterator_range< const_set_bits_iterator > set_bits() const
Definition: BitVector.h:140
size_type size() const
size - Returns the number of bits in this bitvector.
Definition: BitVector.h:159
ConnectedVNInfoEqClasses - Helper class that can divide VNInfos in a LiveInterval into equivalence cl...
ConstMIBundleOperands - Iterate over all operands in a const bundle of machine instructions.
ConstantFP - Floating Point Values [float, double].
Definition: Constants.h:268
const APFloat & getValueAPF() const
Definition: Constants.h:311
This is the shared class of boolean and integer constants.
Definition: Constants.h:80
unsigned getBitWidth() const
getBitWidth - Return the scalar bitwidth of this constant.
Definition: Constants.h:148
This class represents an Operation in the Expression.
A parsed version of the target data layout string in and methods for querying it.
Definition: DataLayout.h:110
std::pair< iterator, bool > insert(const std::pair< KeyT, ValueT > &KV)
Definition: DenseMap.h:220
Implements a dense probed hash-table based set.
Definition: DenseSet.h:271
Core dominator tree base class.
void recalculate(ParentType &Func)
recalculate - compute a dominator tree for the given function
Register getReg() const
Base class for user error types.
Definition: Error.h:352
A specialized PseudoSourceValue for holding FixedStack values, which must include a frame index.
FunctionPass class - This class is used to implement most global optimizations.
Definition: Pass.h:311
void initialize(raw_ostream *OS, function_ref< void(const Twine &Message)> FailureCB, const FunctionT &F)
bool isPredicated(const MachineInstr &MI) const override
Returns true if the instruction is already predicated.
bool analyzeBranch(MachineBasicBlock &MBB, MachineBasicBlock *&TBB, MachineBasicBlock *&FBB, SmallVectorImpl< MachineOperand > &Cond, bool AllowModify) const override
Analyze the branching code at the end of MBB, returning true if it cannot be understood (e....
constexpr bool isScalableVector() const
Returns true if the LLT is a scalable vector.
Definition: LowLevelType.h:182
constexpr unsigned getScalarSizeInBits() const
Definition: LowLevelType.h:267
constexpr bool isScalar() const
Definition: LowLevelType.h:146
constexpr bool isValid() const
Definition: LowLevelType.h:145
constexpr uint16_t getNumElements() const
Returns the number of elements in a vector LLT.
Definition: LowLevelType.h:159
constexpr bool isVector() const
Definition: LowLevelType.h:148
constexpr TypeSize getSizeInBits() const
Returns the total size of the type. Must only be called on sized types.
Definition: LowLevelType.h:193
constexpr bool isPointer() const
Definition: LowLevelType.h:149
constexpr LLT getElementType() const
Returns the vector's element type. Only valid for vector types.
Definition: LowLevelType.h:290
constexpr ElementCount getElementCount() const
Definition: LowLevelType.h:184
constexpr unsigned getAddressSpace() const
Definition: LowLevelType.h:280
constexpr bool isPointerOrPointerVector() const
Definition: LowLevelType.h:153
constexpr LLT getScalarType() const
Definition: LowLevelType.h:208
constexpr TypeSize getSizeInBytes() const
Returns the total size of the type in bytes, i.e.
Definition: LowLevelType.h:203
A live range for subregisters.
Definition: LiveInterval.h:694
LiveInterval - This class represents the liveness of a register, or stack slot.
Definition: LiveInterval.h:687
Register reg() const
Definition: LiveInterval.h:718
bool hasSubRanges() const
Returns true if subregister liveness information is available.
Definition: LiveInterval.h:810
iterator_range< subrange_iterator > subranges()
Definition: LiveInterval.h:782
void computeSubRangeUndefs(SmallVectorImpl< SlotIndex > &Undefs, LaneBitmask LaneMask, const MachineRegisterInfo &MRI, const SlotIndexes &Indexes) const
For a given lane mask LaneMask, compute indexes at which the lane is marked undefined by subregister ...
bool hasInterval(Register Reg) const
SlotIndex getMBBStartIdx(const MachineBasicBlock *mbb) const
Return the first index in the given basic block.
MachineInstr * getInstructionFromIndex(SlotIndex index) const
Returns the instruction associated with the given index.
SlotIndex getInstructionIndex(const MachineInstr &Instr) const
Returns the base index of the given instruction.
SlotIndex getMBBEndIdx(const MachineBasicBlock *mbb) const
Return the last index in the given basic block.
LiveRange * getCachedRegUnit(unsigned Unit)
Return the live range for register unit Unit if it has already been computed, or nullptr if it hasn't...
LiveInterval & getInterval(Register Reg)
bool isNotInMIMap(const MachineInstr &Instr) const
Returns true if the specified machine instr has been removed or was never entered in the map.
MachineBasicBlock * getMBBFromIndex(SlotIndex index) const
bool isLiveInToMBB(const LiveRange &LR, const MachineBasicBlock *mbb) const
void print(raw_ostream &O, const Module *=nullptr) const override
Implement the dump method.
Result of a LiveRange query.
Definition: LiveInterval.h:90
bool isDeadDef() const
Return true if this instruction has a dead def.
Definition: LiveInterval.h:117
VNInfo * valueIn() const
Return the value that is live-in to the instruction.
Definition: LiveInterval.h:105
VNInfo * valueOut() const
Return the value leaving the instruction, if any.
Definition: LiveInterval.h:123
bool isKill() const
Return true if the live-in value is killed by this instruction.
Definition: LiveInterval.h:112
static LLVM_ATTRIBUTE_UNUSED bool isJointlyDominated(const MachineBasicBlock *MBB, ArrayRef< SlotIndex > Defs, const SlotIndexes &Indexes)
A diagnostic function to check if the end of the block MBB is jointly dominated by the blocks corresp...
This class represents the liveness of a register, stack slot, etc.
Definition: LiveInterval.h:157
VNInfo * getValNumInfo(unsigned ValNo)
getValNumInfo - Returns pointer to the specified val#.
Definition: LiveInterval.h:317
bool liveAt(SlotIndex index) const
Definition: LiveInterval.h:401
bool covers(const LiveRange &Other) const
Returns true if all segments of the Other live range are completely covered by this live range.
bool empty() const
Definition: LiveInterval.h:382
LiveQueryResult Query(SlotIndex Idx) const
Query Liveness at Idx.
Definition: LiveInterval.h:542
iterator end()
Definition: LiveInterval.h:216
VNInfo * getVNInfoBefore(SlotIndex Idx) const
getVNInfoBefore - Return the VNInfo that is live up to but not necessarilly including Idx,...
Definition: LiveInterval.h:429
unsigned getNumValNums() const
Definition: LiveInterval.h:313
iterator begin()
Definition: LiveInterval.h:215
VNInfoList valnos
Definition: LiveInterval.h:204
VNInfo * getVNInfoAt(SlotIndex Idx) const
getVNInfoAt - Return the VNInfo that is live at Idx, or NULL.
Definition: LiveInterval.h:421
LiveInterval & getInterval(int Slot)
Definition: LiveStacks.h:68
bool hasInterval(int Slot) const
Definition: LiveStacks.h:82
VarInfo & getVarInfo(Register Reg)
getVarInfo - Return the VarInfo structure for the specified VIRTUAL register.
TypeSize getValue() const
This class is intended to be used as a base class for asm properties and features specific to the tar...
Definition: MCAsmInfo.h:56
ExceptionHandling getExceptionHandlingType() const
Definition: MCAsmInfo.h:780
Describe properties that are true of each instruction in the target description file.
Definition: MCInstrDesc.h:198
unsigned getNumOperands() const
Return the number of declared MachineOperands for this MachineInstruction.
Definition: MCInstrDesc.h:237
ArrayRef< MCOperandInfo > operands() const
Definition: MCInstrDesc.h:239
unsigned getNumDefs() const
Return the number of MachineOperands that are register definitions.
Definition: MCInstrDesc.h:248
bool isConvergent() const
Return true if this instruction is convergent.
Definition: MCInstrDesc.h:415
bool variadicOpsAreDefs() const
Return true if variadic operands of this instruction are definitions.
Definition: MCInstrDesc.h:418
int getOperandConstraint(unsigned OpNum, MCOI::OperandConstraint Constraint) const
Returns the value of the specified operand constraint if it is present.
Definition: MCInstrDesc.h:219
unsigned getOpcode() const
Return the opcode number for this descriptor.
Definition: MCInstrDesc.h:230
This holds information about one operand of a machine instruction, indicating the register class for ...
Definition: MCInstrDesc.h:85
bool isOptionalDef() const
Set if this operand is a optional def.
Definition: MCInstrDesc.h:113
uint8_t OperandType
Information about the type of the operand.
Definition: MCInstrDesc.h:97
MCRegAliasIterator enumerates all registers aliasing Reg.
bool isValid() const
isValid - Returns true until all the operands have been visited.
bool isInlineAsmBrIndirectTarget() const
Returns true if this is the indirect dest of an INLINEASM_BR.
unsigned pred_size() const
bool isEHPad() const
Returns true if the block is a landing pad.
iterator_range< livein_iterator > liveins() const
iterator_range< iterator > phis()
Returns a range that iterates over the phis in the basic block.
int getNumber() const
MachineBasicBlocks are uniquely numbered at the function level, unless they're not in a MachineFuncti...
const BasicBlock * getBasicBlock() const
Return the LLVM basic block that this instance corresponded to originally.
bool isIRBlockAddressTaken() const
Test whether this block is the target of an IR BlockAddress.
unsigned succ_size() const
BasicBlock * getAddressTakenIRBlock() const
Retrieves the BasicBlock which corresponds to this MachineBasicBlock.
bool isPredecessor(const MachineBasicBlock *MBB) const
Return true if the specified MBB is a predecessor of this block.
const MachineFunction * getParent() const
Return the MachineFunction containing this basic block.
unsigned getCallFrameSize() const
Return the call frame size on entry to this basic block.
iterator_range< succ_iterator > successors()
bool isSuccessor(const MachineBasicBlock *MBB) const
Return true if the specified MBB is a successor of this block.
iterator_range< pred_iterator > predecessors()
StringRef getName() const
Return the name of the corresponding LLVM basic block, or an empty string.
The MachineFrameInfo class represents an abstract stack frame until prolog/epilog code is inserted.
BitVector getPristineRegs(const MachineFunction &MF) const
Return a set of physical registers that are pristine.
MachineFunctionPass - This class adapts the FunctionPass interface to allow convenient creation of pa...
void getAnalysisUsage(AnalysisUsage &AU) const override
getAnalysisUsage - Subclasses that override getAnalysisUsage must call this.
virtual bool runOnMachineFunction(MachineFunction &MF)=0
runOnMachineFunction - This method must be overloaded to perform the desired machine code transformat...
bool hasProperty(Property P) const
const TargetSubtargetInfo & getSubtarget() const
getSubtarget - Return the subtarget for which this machine code is being compiled.
StringRef getName() const
getName - Return the name of the corresponding LLVM function.
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
const LLVMTargetMachine & getTarget() const
getTarget - Return the target machine this machine code is compiled with
const MachineFunctionProperties & getProperties() const
Get the function properties.
const MachineBasicBlock & front() const
bool verify(Pass *p=nullptr, const char *Banner=nullptr, bool AbortOnError=true) const
Run the current MachineFunction through the machine code verifier, useful for debugger use.
void print(raw_ostream &OS, const SlotIndexes *=nullptr) const
print - Print out the MachineFunction in a format suitable for debugging to the specified stream.
BasicBlockListType::const_iterator const_iterator
Representation of each machine instruction.
Definition: MachineInstr.h:69
unsigned getOpcode() const
Returns the opcode of this MachineInstr.
Definition: MachineInstr.h:546
bool isReturn(QueryType Type=AnyInBundle) const
Definition: MachineInstr.h:908
bool isTerminator(QueryType Type=AnyInBundle) const
Returns true if this instruction part of the terminator for a basic block.
Definition: MachineInstr.h:942
bool isBarrier(QueryType Type=AnyInBundle) const
Returns true if the specified instruction stops control flow from executing the instruction immediate...
Definition: MachineInstr.h:933
A description of a memory reference used in the backend.
LocationSize getSize() const
Return the size in bytes of the memory reference.
const PseudoSourceValue * getPseudoValue() const
AtomicOrdering getSuccessOrdering() const
Return the atomic ordering requirements for this memory operation.
LocationSize getSizeInBits() const
Return the size in bits of the memory reference.
MachineOperand class - Representation of each machine instruction operand.
unsigned getSubReg() const
int64_t getImm() const
bool readsReg() const
readsReg - Returns true if this operand reads the previous value of its register.
bool isImplicit() const
bool isIntrinsicID() const
bool isReg() const
isReg - Tests if this is a MO_Register operand.
MachineBasicBlock * getMBB() const
ArrayRef< int > getShuffleMask() const
bool isImm() const
isImm - Tests if this is a MO_Immediate operand.
bool isValidExcessOperand() const
Return true if this operand can validly be appended to an arbitrary operand list.
bool isShuffleMask() const
unsigned getCFIIndex() const
bool isRenamable() const
isRenamable - Returns true if this register may be renamed, i.e.
MachineInstr * getParent()
getParent - Return the instruction that this operand belongs to.
MachineOperandType getType() const
getType - Returns the MachineOperandType for this operand.
bool isEarlyClobber() const
Register getReg() const
getReg - Returns the register number.
bool isInternalRead() const
bool isFI() const
isFI - Tests if this is a MO_FrameIndex operand.
static bool clobbersPhysReg(const uint32_t *RegMask, MCRegister PhysReg)
clobbersPhysReg - Returns true if this RegMask clobbers PhysReg.
const uint32_t * getRegMask() const
getRegMask - Returns a bit mask of registers preserved by this RegMask operand.
void print(raw_ostream &os, const TargetRegisterInfo *TRI=nullptr, const TargetIntrinsicInfo *IntrinsicInfo=nullptr) const
Print the MachineOperand to os.
@ MO_CFIIndex
MCCFIInstruction index.
@ MO_RegisterMask
Mask of preserved registers.
@ MO_MachineBasicBlock
MachineBasicBlock reference.
@ MO_FrameIndex
Abstract Stack Frame Index.
@ MO_Register
Register operand.
bool isMBB() const
isMBB - Tests if this is a MO_MachineBasicBlock operand.
MachineRegisterInfo - Keep track of information for virtual and physical registers,...
static PassRegistry * getPassRegistry()
getPassRegistry - Access the global registry object, which is automatically initialized at applicatio...
Pass interface - Implemented by all 'passes'.
Definition: Pass.h:94
virtual void print(raw_ostream &OS, const Module *M) const
print - Print out the internal state of the pass.
Definition: Pass.cpp:130
AnalysisType * getAnalysisIfAvailable() const
getAnalysisIfAvailable<AnalysisType>() - Subclasses use this function to get analysis information tha...
Special value supplied for machine level alias analysis.
Holds all the information related to register banks.
const RegisterBank & getRegBank(unsigned ID)
Get the register bank identified by ID.
unsigned getMaximumSize(unsigned RegBankID) const
Get the maximum size in bits that fits in the given register bank.
This class implements the register bank concept.
Definition: RegisterBank.h:28
const char * getName() const
Get a user friendly name of this register bank.
Definition: RegisterBank.h:49
unsigned getID() const
Get the identifier of this register bank.
Definition: RegisterBank.h:45
Wrapper class representing virtual and physical registers.
Definition: Register.h:19
static Register index2VirtReg(unsigned Index)
Convert a 0-based index to a virtual register number.
Definition: Register.h:84
static unsigned virtReg2Index(Register Reg)
Convert a virtual register number to a 0-based index.
Definition: Register.h:77
constexpr bool isVirtual() const
Return true if the specified register number is in the virtual register namespace.
Definition: Register.h:91
static constexpr bool isPhysicalRegister(unsigned Reg)
Return true if the specified register number is in the physical register namespace.
Definition: Register.h:65
constexpr bool isPhysical() const
Return true if the specified register number is in the physical register namespace.
Definition: Register.h:95
SlotIndex - An opaque wrapper around machine indexes.
Definition: SlotIndexes.h:68
static bool isSameInstr(SlotIndex A, SlotIndex B)
isSameInstr - Return true if A and B refer to the same instruction.
Definition: SlotIndexes.h:179
bool isBlock() const
isBlock - Returns true if this is a block boundary slot.
Definition: SlotIndexes.h:212
SlotIndex getDeadSlot() const
Returns the dead def kill slot for the current instruction.
Definition: SlotIndexes.h:245
bool isEarlyClobber() const
isEarlyClobber - Returns true if this is an early-clobber slot.
Definition: SlotIndexes.h:215
bool isRegister() const
isRegister - Returns true if this is a normal register use/def slot.
Definition: SlotIndexes.h:219
SlotIndex getBoundaryIndex() const
Returns the boundary index for associated with this index.
Definition: SlotIndexes.h:234
SlotIndex getPrevSlot() const
Returns the previous slot in the index list.
Definition: SlotIndexes.h:275
SlotIndex getRegSlot(bool EC=false) const
Returns the register use/def slot in the current instruction for a normal or early-clobber def.
Definition: SlotIndexes.h:240
bool isDead() const
isDead - Returns true if this is a dead def kill slot.
Definition: SlotIndexes.h:222
SlotIndexes pass.
Definition: SlotIndexes.h:300
SlotIndex getMBBEndIdx(unsigned Num) const
Returns the last index in the given basic block number.
Definition: SlotIndexes.h:462
MBBIndexIterator MBBIndexBegin() const
Returns an iterator for the begin of the idx2MBBMap.
Definition: SlotIndexes.h:497
MBBIndexIterator MBBIndexEnd() const
Return an iterator for the end of the idx2MBBMap.
Definition: SlotIndexes.h:502
SmallVectorImpl< IdxMBBPair >::const_iterator MBBIndexIterator
Iterator over the idx2MBBMap (sorted pairs of slot index of basic block begin and basic block)
Definition: SlotIndexes.h:473
SlotIndex getInstructionIndex(const MachineInstr &MI, bool IgnoreBundle=false) const
Returns the base index for the given instruction.
Definition: SlotIndexes.h:371
SlotIndex getMBBStartIdx(unsigned Num) const
Returns the first index in the given basic block number.
Definition: SlotIndexes.h:452
bool hasIndex(const MachineInstr &instr) const
Returns true if the given machine instr is mapped to an index, otherwise returns false.
Definition: SlotIndexes.h:366
size_type size() const
Definition: SmallPtrSet.h:94
bool erase(PtrType Ptr)
erase - If the set contains the specified pointer, remove it and return true, otherwise return false.
Definition: SmallPtrSet.h:356
size_type count(ConstPtrType Ptr) const
count - Return 1 if the specified pointer is in the set, 0 otherwise.
Definition: SmallPtrSet.h:360
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
Definition: SmallPtrSet.h:342
iterator begin() const
Definition: SmallPtrSet.h:380
SmallPtrSet - This class implements a set which is optimized for holding SmallSize or less elements.
Definition: SmallPtrSet.h:427
size_t size() const
Definition: SmallVector.h:91
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
Definition: SmallVector.h:586
void resize(size_type N)
Definition: SmallVector.h:651
void push_back(const T &Elt)
Definition: SmallVector.h:426
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
Definition: SmallVector.h:1209
Register getReg() const
MI-level Statepoint operands.
Definition: StackMaps.h:158
StringRef - Represent a constant reference to a string, i.e.
Definition: StringRef.h:50
TargetInstrInfo - Interface to description of machine instruction set.
Primary interface to the complete machine description for the target machine.
Definition: TargetMachine.h:76
bool hasSuperClassEq(const TargetRegisterClass *RC) const
Returns true if RC is a super-class of or equal to this class.
TargetRegisterInfo base class - We assume that the target defines a static array of TargetRegisterDes...
virtual const TargetRegisterInfo * getRegisterInfo() const
getRegisterInfo - If register information is available, return it.
virtual const RegisterBankInfo * getRegBankInfo() const
If the information for the register banks is available, return it.
virtual const TargetInstrInfo * getInstrInfo() const
Twine - A lightweight data structure for efficiently representing the concatenation of temporary valu...
Definition: Twine.h:81
VNInfo - Value Number Information.
Definition: LiveInterval.h:53
bool isUnused() const
Returns true if this value is unused.
Definition: LiveInterval.h:81
unsigned id
The ID number of this value.
Definition: LiveInterval.h:58
SlotIndex def
The index of the defining instruction.
Definition: LiveInterval.h:61
bool isPHIDef() const
Returns true if this value is defined by a PHI instruction (or was, PHI instructions may have been el...
Definition: LiveInterval.h:78
LLVM Value Representation.
Definition: Value.h:74
std::pair< iterator, bool > insert(const ValueT &V)
Definition: DenseSet.h:206
constexpr bool isNonZero() const
Definition: TypeSize.h:158
static constexpr bool isKnownLT(const FixedOrScalableQuantity &LHS, const FixedOrScalableQuantity &RHS)
Definition: TypeSize.h:203
constexpr bool isScalable() const
Returns whether the quantity is scaled by a runtime quantity (vscale).
Definition: TypeSize.h:171
constexpr ScalarTy getKnownMinValue() const
Returns the minimum value this quantity can represent.
Definition: TypeSize.h:168
static constexpr bool isKnownGT(const FixedOrScalableQuantity &LHS, const FixedOrScalableQuantity &RHS)
Definition: TypeSize.h:210
static constexpr bool isKnownGE(const FixedOrScalableQuantity &LHS, const FixedOrScalableQuantity &RHS)
Definition: TypeSize.h:224
self_iterator getIterator()
Definition: ilist_node.h:109
NodeTy * getNextNode()
Get the next node, or nullptr for the list tail.
Definition: ilist_node.h:316
constexpr char Attrs[]
Key for Kernel::Metadata::mAttrs.
const CustomOperand< const MCSubtargetInfo & > Msg[]
constexpr std::underlying_type_t< E > Mask()
Get a bitmask with 1s in all places up to the high-order bit of E's largest value.
Definition: BitmaskEnum.h:121
unsigned ID
LLVM IR allows to use arbitrary numbers as calling convention identifiers.
Definition: CallingConv.h:24
AttributeList getAttributes(LLVMContext &C, ID id)
Return the attributes for an intrinsic.
@ OPERAND_REGISTER
Definition: MCInstrDesc.h:61
@ OPERAND_IMMEDIATE
Definition: MCInstrDesc.h:60
Reg
All possible values of the reg field in the ModR/M byte.
constexpr double e
Definition: MathExtras.h:31
NodeAddr< PhiNode * > Phi
Definition: RDFGraph.h:390
NodeAddr< DefNode * > Def
Definition: RDFGraph.h:384
const_iterator begin(StringRef path, Style style=Style::native)
Get begin iterator over path.
Definition: Path.cpp:227
const_iterator end(StringRef path)
Get end iterator over path.
Definition: Path.cpp:236
This is an optimization pass for GlobalISel generic memory operations.
Definition: AddressRanges.h:18
auto drop_begin(T &&RangeOrContainer, size_t N=1)
Return a range covering RangeOrContainer with the first N elements excluded.
Definition: STLExtras.h:329
@ Offset
Definition: DWP.cpp:456
bool all_of(R &&range, UnaryPredicate P)
Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly.
Definition: STLExtras.h:1731
auto size(R &&Range, std::enable_if_t< std::is_base_of< std::random_access_iterator_tag, typename std::iterator_traits< decltype(Range.begin())>::iterator_category >::value, void > *=nullptr)
Get the size of a range.
Definition: STLExtras.h:1689
@ SjLj
setjmp/longjmp based exceptions
bool isPreISelGenericOpcode(unsigned Opcode)
Check whether the given Opcode is a generic opcode that is not supposed to appear after ISel.
Definition: TargetOpcodes.h:30
Printable printRegUnit(unsigned Unit, const TargetRegisterInfo *TRI)
Create Printable object to print register units on a raw_ostream.
void append_range(Container &C, Range &&R)
Wrapper function to append range R to container C.
Definition: STLExtras.h:2082
void set_subtract(S1Ty &S1, const S2Ty &S2)
set_subtract(A, B) - Compute A := A - B
Definition: SetOperations.h:82
Printable PrintLaneMask(LaneBitmask LaneMask)
Create Printable object to print LaneBitmasks on a raw_ostream.
Definition: LaneBitmask.h:92
bool isPreISelGenericOptimizationHint(unsigned Opcode)
Definition: TargetOpcodes.h:42
bool isScopedEHPersonality(EHPersonality Pers)
Returns true if this personality uses scope-style EH IR instructions: catchswitch,...
void initializeMachineVerifierPassPass(PassRegistry &)
void verifyMachineFunction(const std::string &Banner, const MachineFunction &MF)
auto reverse(ContainerTy &&C)
Definition: STLExtras.h:428
detail::ValueMatchesPoly< M > HasValue(M Matcher)
Definition: Error.h:221
df_ext_iterator< T, SetTy > df_ext_begin(const T &G, SetTy &S)
bool none_of(R &&Range, UnaryPredicate P)
Provide wrappers to std::none_of which take ranges instead of having to pass begin/end explicitly.
Definition: STLExtras.h:1745
void report_fatal_error(Error Err, bool gen_crash_diag=true)
Report a serious error, calling any installed error handler.
Definition: Error.cpp:156
bool set_union(S1Ty &S1, const S2Ty &S2)
set_union(A, B) - Compute A := A u B, return whether A changed.
Definition: SetOperations.h:23
EHPersonality classifyEHPersonality(const Value *Pers)
See if the given exception handling personality function is one that we understand.
raw_fd_ostream & errs()
This returns a reference to a raw_ostream for standard error.
AtomicOrdering
Atomic ordering for LLVM's memory model.
OutputIt move(R &&Range, OutputIt Out)
Provide wrappers to std::move which take ranges instead of having to pass begin/end explicitly.
Definition: STLExtras.h:1858
bool is_contained(R &&Range, const E &Element)
Returns true if Element is found in Range.
Definition: STLExtras.h:1888
df_ext_iterator< T, SetTy > df_ext_end(const T &G, SetTy &S)
Printable printReg(Register Reg, const TargetRegisterInfo *TRI=nullptr, unsigned SubIdx=0, const MachineRegisterInfo *MRI=nullptr)
Prints virtual and physical registers with or without a TRI instance.
Printable printMBBReference(const MachineBasicBlock &MBB)
Prints a machine basic block reference.
FunctionPass * createMachineVerifierPass(const std::string &Banner)
createMachineVerifierPass - This pass verifies cenerated machine code instructions for correctness.
Implement std::hash so that hash_code can be used in STL containers.
Definition: BitVector.h:858
static unsigned getSizeInBits(const fltSemantics &Sem)
Returns the size of the floating point number (in bits) in the given semantics.
Definition: APFloat.cpp:331
static constexpr LaneBitmask getAll()
Definition: LaneBitmask.h:82
constexpr bool none() const
Definition: LaneBitmask.h:52
constexpr bool any() const
Definition: LaneBitmask.h:53
static constexpr LaneBitmask getNone()
Definition: LaneBitmask.h:81
This represents a simple continuous liveness interval for a value.
Definition: LiveInterval.h:162
VarInfo - This represents the regions where a virtual register is live in the program.
Definition: LiveVariables.h:80
Pair of physical register and lane mask.