LLVM 18.0.0git
MachineVerifier.cpp
Go to the documentation of this file.
1//===- MachineVerifier.cpp - Machine Code Verifier ------------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// Pass to verify generated machine code. The following is checked:
10//
11// Operand counts: All explicit operands must be present.
12//
13// Register classes: All physical and virtual register operands must be
14// compatible with the register class required by the instruction descriptor.
15//
16// Register live intervals: Registers must be defined only once, and must be
17// defined before use.
18//
19// The machine code verifier is enabled with the command-line option
20// -verify-machineinstrs.
21//===----------------------------------------------------------------------===//
22
23#include "llvm/ADT/BitVector.h"
24#include "llvm/ADT/DenseMap.h"
25#include "llvm/ADT/DenseSet.h"
28#include "llvm/ADT/STLExtras.h"
32#include "llvm/ADT/StringRef.h"
33#include "llvm/ADT/Twine.h"
60#include "llvm/IR/BasicBlock.h"
61#include "llvm/IR/Constants.h"
63#include "llvm/IR/Function.h"
64#include "llvm/IR/InlineAsm.h"
67#include "llvm/MC/LaneBitmask.h"
68#include "llvm/MC/MCAsmInfo.h"
69#include "llvm/MC/MCDwarf.h"
70#include "llvm/MC/MCInstrDesc.h"
73#include "llvm/Pass.h"
77#include "llvm/Support/ModRef.h"
80#include <algorithm>
81#include <cassert>
82#include <cstddef>
83#include <cstdint>
84#include <iterator>
85#include <string>
86#include <utility>
87
88using namespace llvm;
89
90namespace {
91
92 struct MachineVerifier {
93 MachineVerifier(Pass *pass, const char *b) : PASS(pass), Banner(b) {}
94
95 MachineVerifier(const char *b, LiveVariables *LiveVars,
96 LiveIntervals *LiveInts, LiveStacks *LiveStks,
97 SlotIndexes *Indexes)
98 : Banner(b), LiveVars(LiveVars), LiveInts(LiveInts), LiveStks(LiveStks),
99 Indexes(Indexes) {}
100
101 unsigned verify(const MachineFunction &MF);
102
103 Pass *const PASS = nullptr;
104 const char *Banner;
105 const MachineFunction *MF = nullptr;
106 const TargetMachine *TM = nullptr;
107 const TargetInstrInfo *TII = nullptr;
108 const TargetRegisterInfo *TRI = nullptr;
109 const MachineRegisterInfo *MRI = nullptr;
110 const RegisterBankInfo *RBI = nullptr;
111
112 unsigned foundErrors = 0;
113
114 // Avoid querying the MachineFunctionProperties for each operand.
115 bool isFunctionRegBankSelected = false;
116 bool isFunctionSelected = false;
117 bool isFunctionTracksDebugUserValues = false;
118
119 using RegVector = SmallVector<Register, 16>;
120 using RegMaskVector = SmallVector<const uint32_t *, 4>;
121 using RegSet = DenseSet<Register>;
124
125 const MachineInstr *FirstNonPHI = nullptr;
126 const MachineInstr *FirstTerminator = nullptr;
127 BlockSet FunctionBlocks;
128
129 BitVector regsReserved;
130 RegSet regsLive;
131 RegVector regsDefined, regsDead, regsKilled;
132 RegMaskVector regMasks;
133
134 SlotIndex lastIndex;
135
136 // Add Reg and any sub-registers to RV
137 void addRegWithSubRegs(RegVector &RV, Register Reg) {
138 RV.push_back(Reg);
139 if (Reg.isPhysical())
140 append_range(RV, TRI->subregs(Reg.asMCReg()));
141 }
142
143 struct BBInfo {
144 // Is this MBB reachable from the MF entry point?
145 bool reachable = false;
146
147 // Vregs that must be live in because they are used without being
148 // defined. Map value is the user. vregsLiveIn doesn't include regs
149 // that only are used by PHI nodes.
150 RegMap vregsLiveIn;
151
152 // Regs killed in MBB. They may be defined again, and will then be in both
153 // regsKilled and regsLiveOut.
154 RegSet regsKilled;
155
156 // Regs defined in MBB and live out. Note that vregs passing through may
157 // be live out without being mentioned here.
158 RegSet regsLiveOut;
159
160 // Vregs that pass through MBB untouched. This set is disjoint from
161 // regsKilled and regsLiveOut.
162 RegSet vregsPassed;
163
164 // Vregs that must pass through MBB because they are needed by a successor
165 // block. This set is disjoint from regsLiveOut.
166 RegSet vregsRequired;
167
168 // Set versions of block's predecessor and successor lists.
169 BlockSet Preds, Succs;
170
171 BBInfo() = default;
172
173 // Add register to vregsRequired if it belongs there. Return true if
174 // anything changed.
175 bool addRequired(Register Reg) {
176 if (!Reg.isVirtual())
177 return false;
178 if (regsLiveOut.count(Reg))
179 return false;
180 return vregsRequired.insert(Reg).second;
181 }
182
183 // Same for a full set.
184 bool addRequired(const RegSet &RS) {
185 bool Changed = false;
186 for (Register Reg : RS)
187 Changed |= addRequired(Reg);
188 return Changed;
189 }
190
191 // Same for a full map.
192 bool addRequired(const RegMap &RM) {
193 bool Changed = false;
194 for (const auto &I : RM)
195 Changed |= addRequired(I.first);
196 return Changed;
197 }
198
199 // Live-out registers are either in regsLiveOut or vregsPassed.
200 bool isLiveOut(Register Reg) const {
201 return regsLiveOut.count(Reg) || vregsPassed.count(Reg);
202 }
203 };
204
205 // Extra register info per MBB.
207
208 bool isReserved(Register Reg) {
209 return Reg.id() < regsReserved.size() && regsReserved.test(Reg.id());
210 }
211
212 bool isAllocatable(Register Reg) const {
213 return Reg.id() < TRI->getNumRegs() && TRI->isInAllocatableClass(Reg) &&
214 !regsReserved.test(Reg.id());
215 }
216
217 // Analysis information if available
218 LiveVariables *LiveVars = nullptr;
219 LiveIntervals *LiveInts = nullptr;
220 LiveStacks *LiveStks = nullptr;
221 SlotIndexes *Indexes = nullptr;
222
223 void visitMachineFunctionBefore();
224 void visitMachineBasicBlockBefore(const MachineBasicBlock *MBB);
225 void visitMachineBundleBefore(const MachineInstr *MI);
226
227 /// Verify that all of \p MI's virtual register operands are scalars.
228 /// \returns True if all virtual register operands are scalar. False
229 /// otherwise.
230 bool verifyAllRegOpsScalar(const MachineInstr &MI,
231 const MachineRegisterInfo &MRI);
232 bool verifyVectorElementMatch(LLT Ty0, LLT Ty1, const MachineInstr *MI);
233
234 bool verifyGIntrinsicSideEffects(const MachineInstr *MI);
235 bool verifyGIntrinsicConvergence(const MachineInstr *MI);
236 void verifyPreISelGenericInstruction(const MachineInstr *MI);
237
238 void visitMachineInstrBefore(const MachineInstr *MI);
239 void visitMachineOperand(const MachineOperand *MO, unsigned MONum);
240 void visitMachineBundleAfter(const MachineInstr *MI);
241 void visitMachineBasicBlockAfter(const MachineBasicBlock *MBB);
242 void visitMachineFunctionAfter();
243
244 void report(const char *msg, const MachineFunction *MF);
245 void report(const char *msg, const MachineBasicBlock *MBB);
246 void report(const char *msg, const MachineInstr *MI);
247 void report(const char *msg, const MachineOperand *MO, unsigned MONum,
248 LLT MOVRegType = LLT{});
249 void report(const Twine &Msg, const MachineInstr *MI);
250
251 void report_context(const LiveInterval &LI) const;
252 void report_context(const LiveRange &LR, Register VRegUnit,
253 LaneBitmask LaneMask) const;
254 void report_context(const LiveRange::Segment &S) const;
255 void report_context(const VNInfo &VNI) const;
256 void report_context(SlotIndex Pos) const;
257 void report_context(MCPhysReg PhysReg) const;
258 void report_context_liverange(const LiveRange &LR) const;
259 void report_context_lanemask(LaneBitmask LaneMask) const;
260 void report_context_vreg(Register VReg) const;
261 void report_context_vreg_regunit(Register VRegOrUnit) const;
262
263 void verifyInlineAsm(const MachineInstr *MI);
264
265 void checkLiveness(const MachineOperand *MO, unsigned MONum);
266 void checkLivenessAtUse(const MachineOperand *MO, unsigned MONum,
267 SlotIndex UseIdx, const LiveRange &LR,
268 Register VRegOrUnit,
269 LaneBitmask LaneMask = LaneBitmask::getNone());
270 void checkLivenessAtDef(const MachineOperand *MO, unsigned MONum,
271 SlotIndex DefIdx, const LiveRange &LR,
272 Register VRegOrUnit, bool SubRangeCheck = false,
273 LaneBitmask LaneMask = LaneBitmask::getNone());
274
275 void markReachable(const MachineBasicBlock *MBB);
276 void calcRegsPassed();
277 void checkPHIOps(const MachineBasicBlock &MBB);
278
279 void calcRegsRequired();
280 void verifyLiveVariables();
281 void verifyLiveIntervals();
282 void verifyLiveInterval(const LiveInterval&);
283 void verifyLiveRangeValue(const LiveRange &, const VNInfo *, Register,
285 void verifyLiveRangeSegment(const LiveRange &,
288 void verifyLiveRange(const LiveRange &, Register,
289 LaneBitmask LaneMask = LaneBitmask::getNone());
290
291 void verifyStackFrame();
292
293 void verifySlotIndexes() const;
294 void verifyProperties(const MachineFunction &MF);
295 };
296
297 struct MachineVerifierPass : public MachineFunctionPass {
298 static char ID; // Pass ID, replacement for typeid
299
300 const std::string Banner;
301
302 MachineVerifierPass(std::string banner = std::string())
303 : MachineFunctionPass(ID), Banner(std::move(banner)) {
305 }
306
307 void getAnalysisUsage(AnalysisUsage &AU) const override {
312 AU.setPreservesAll();
314 }
315
316 bool runOnMachineFunction(MachineFunction &MF) override {
317 // Skip functions that have known verification problems.
318 // FIXME: Remove this mechanism when all problematic passes have been
319 // fixed.
320 if (MF.getProperties().hasProperty(
321 MachineFunctionProperties::Property::FailsVerification))
322 return false;
323
324 unsigned FoundErrors = MachineVerifier(this, Banner.c_str()).verify(MF);
325 if (FoundErrors)
326 report_fatal_error("Found "+Twine(FoundErrors)+" machine code errors.");
327 return false;
328 }
329 };
330
331} // end anonymous namespace
332
333char MachineVerifierPass::ID = 0;
334
335INITIALIZE_PASS(MachineVerifierPass, "machineverifier",
336 "Verify generated machine code", false, false)
337
339 return new MachineVerifierPass(Banner);
340}
341
343 const std::string &Banner,
344 const MachineFunction &MF) {
345 // TODO: Use MFAM after porting below analyses.
346 // LiveVariables *LiveVars;
347 // LiveIntervals *LiveInts;
348 // LiveStacks *LiveStks;
349 // SlotIndexes *Indexes;
350 unsigned FoundErrors = MachineVerifier(nullptr, Banner.c_str()).verify(MF);
351 if (FoundErrors)
352 report_fatal_error("Found " + Twine(FoundErrors) + " machine code errors.");
353}
354
355bool MachineFunction::verify(Pass *p, const char *Banner, bool AbortOnErrors)
356 const {
357 MachineFunction &MF = const_cast<MachineFunction&>(*this);
358 unsigned FoundErrors = MachineVerifier(p, Banner).verify(MF);
359 if (AbortOnErrors && FoundErrors)
360 report_fatal_error("Found "+Twine(FoundErrors)+" machine code errors.");
361 return FoundErrors == 0;
362}
363
365 const char *Banner, bool AbortOnErrors) const {
366 MachineFunction &MF = const_cast<MachineFunction &>(*this);
367 unsigned FoundErrors =
368 MachineVerifier(Banner, nullptr, LiveInts, nullptr, Indexes).verify(MF);
369 if (AbortOnErrors && FoundErrors)
370 report_fatal_error("Found " + Twine(FoundErrors) + " machine code errors.");
371 return FoundErrors == 0;
372}
373
374void MachineVerifier::verifySlotIndexes() const {
375 if (Indexes == nullptr)
376 return;
377
378 // Ensure the IdxMBB list is sorted by slot indexes.
381 E = Indexes->MBBIndexEnd(); I != E; ++I) {
382 assert(!Last.isValid() || I->first > Last);
383 Last = I->first;
384 }
385}
386
387void MachineVerifier::verifyProperties(const MachineFunction &MF) {
388 // If a pass has introduced virtual registers without clearing the
389 // NoVRegs property (or set it without allocating the vregs)
390 // then report an error.
391 if (MF.getProperties().hasProperty(
393 MRI->getNumVirtRegs())
394 report("Function has NoVRegs property but there are VReg operands", &MF);
395}
396
397unsigned MachineVerifier::verify(const MachineFunction &MF) {
398 foundErrors = 0;
399
400 this->MF = &MF;
401 TM = &MF.getTarget();
404 RBI = MF.getSubtarget().getRegBankInfo();
405 MRI = &MF.getRegInfo();
406
407 const bool isFunctionFailedISel = MF.getProperties().hasProperty(
409
410 // If we're mid-GlobalISel and we already triggered the fallback path then
411 // it's expected that the MIR is somewhat broken but that's ok since we'll
412 // reset it and clear the FailedISel attribute in ResetMachineFunctions.
413 if (isFunctionFailedISel)
414 return foundErrors;
415
416 isFunctionRegBankSelected = MF.getProperties().hasProperty(
418 isFunctionSelected = MF.getProperties().hasProperty(
420 isFunctionTracksDebugUserValues = MF.getProperties().hasProperty(
422
423 if (PASS) {
424 LiveInts = PASS->getAnalysisIfAvailable<LiveIntervals>();
425 // We don't want to verify LiveVariables if LiveIntervals is available.
426 if (!LiveInts)
427 LiveVars = PASS->getAnalysisIfAvailable<LiveVariables>();
428 LiveStks = PASS->getAnalysisIfAvailable<LiveStacks>();
429 Indexes = PASS->getAnalysisIfAvailable<SlotIndexes>();
430 }
431
432 verifySlotIndexes();
433
434 verifyProperties(MF);
435
436 visitMachineFunctionBefore();
437 for (const MachineBasicBlock &MBB : MF) {
438 visitMachineBasicBlockBefore(&MBB);
439 // Keep track of the current bundle header.
440 const MachineInstr *CurBundle = nullptr;
441 // Do we expect the next instruction to be part of the same bundle?
442 bool InBundle = false;
443
444 for (const MachineInstr &MI : MBB.instrs()) {
445 if (MI.getParent() != &MBB) {
446 report("Bad instruction parent pointer", &MBB);
447 errs() << "Instruction: " << MI;
448 continue;
449 }
450
451 // Check for consistent bundle flags.
452 if (InBundle && !MI.isBundledWithPred())
453 report("Missing BundledPred flag, "
454 "BundledSucc was set on predecessor",
455 &MI);
456 if (!InBundle && MI.isBundledWithPred())
457 report("BundledPred flag is set, "
458 "but BundledSucc not set on predecessor",
459 &MI);
460
461 // Is this a bundle header?
462 if (!MI.isInsideBundle()) {
463 if (CurBundle)
464 visitMachineBundleAfter(CurBundle);
465 CurBundle = &MI;
466 visitMachineBundleBefore(CurBundle);
467 } else if (!CurBundle)
468 report("No bundle header", &MI);
469 visitMachineInstrBefore(&MI);
470 for (unsigned I = 0, E = MI.getNumOperands(); I != E; ++I) {
471 const MachineOperand &Op = MI.getOperand(I);
472 if (Op.getParent() != &MI) {
473 // Make sure to use correct addOperand / removeOperand / ChangeTo
474 // functions when replacing operands of a MachineInstr.
475 report("Instruction has operand with wrong parent set", &MI);
476 }
477
478 visitMachineOperand(&Op, I);
479 }
480
481 // Was this the last bundled instruction?
482 InBundle = MI.isBundledWithSucc();
483 }
484 if (CurBundle)
485 visitMachineBundleAfter(CurBundle);
486 if (InBundle)
487 report("BundledSucc flag set on last instruction in block", &MBB.back());
488 visitMachineBasicBlockAfter(&MBB);
489 }
490 visitMachineFunctionAfter();
491
492 // Clean up.
493 regsLive.clear();
494 regsDefined.clear();
495 regsDead.clear();
496 regsKilled.clear();
497 regMasks.clear();
498 MBBInfoMap.clear();
499
500 return foundErrors;
501}
502
503void MachineVerifier::report(const char *msg, const MachineFunction *MF) {
504 assert(MF);
505 errs() << '\n';
506 if (!foundErrors++) {
507 if (Banner)
508 errs() << "# " << Banner << '\n';
509 if (LiveInts != nullptr)
510 LiveInts->print(errs());
511 else
512 MF->print(errs(), Indexes);
513 }
514 errs() << "*** Bad machine code: " << msg << " ***\n"
515 << "- function: " << MF->getName() << "\n";
516}
517
518void MachineVerifier::report(const char *msg, const MachineBasicBlock *MBB) {
519 assert(MBB);
520 report(msg, MBB->getParent());
521 errs() << "- basic block: " << printMBBReference(*MBB) << ' '
522 << MBB->getName() << " (" << (const void *)MBB << ')';
523 if (Indexes)
524 errs() << " [" << Indexes->getMBBStartIdx(MBB)
525 << ';' << Indexes->getMBBEndIdx(MBB) << ')';
526 errs() << '\n';
527}
528
529void MachineVerifier::report(const char *msg, const MachineInstr *MI) {
530 assert(MI);
531 report(msg, MI->getParent());
532 errs() << "- instruction: ";
533 if (Indexes && Indexes->hasIndex(*MI))
534 errs() << Indexes->getInstructionIndex(*MI) << '\t';
535 MI->print(errs(), /*IsStandalone=*/true);
536}
537
538void MachineVerifier::report(const char *msg, const MachineOperand *MO,
539 unsigned MONum, LLT MOVRegType) {
540 assert(MO);
541 report(msg, MO->getParent());
542 errs() << "- operand " << MONum << ": ";
543 MO->print(errs(), MOVRegType, TRI);
544 errs() << "\n";
545}
546
547void MachineVerifier::report(const Twine &Msg, const MachineInstr *MI) {
548 report(Msg.str().c_str(), MI);
549}
550
551void MachineVerifier::report_context(SlotIndex Pos) const {
552 errs() << "- at: " << Pos << '\n';
553}
554
555void MachineVerifier::report_context(const LiveInterval &LI) const {
556 errs() << "- interval: " << LI << '\n';
557}
558
559void MachineVerifier::report_context(const LiveRange &LR, Register VRegUnit,
560 LaneBitmask LaneMask) const {
561 report_context_liverange(LR);
562 report_context_vreg_regunit(VRegUnit);
563 if (LaneMask.any())
564 report_context_lanemask(LaneMask);
565}
566
567void MachineVerifier::report_context(const LiveRange::Segment &S) const {
568 errs() << "- segment: " << S << '\n';
569}
570
571void MachineVerifier::report_context(const VNInfo &VNI) const {
572 errs() << "- ValNo: " << VNI.id << " (def " << VNI.def << ")\n";
573}
574
575void MachineVerifier::report_context_liverange(const LiveRange &LR) const {
576 errs() << "- liverange: " << LR << '\n';
577}
578
579void MachineVerifier::report_context(MCPhysReg PReg) const {
580 errs() << "- p. register: " << printReg(PReg, TRI) << '\n';
581}
582
583void MachineVerifier::report_context_vreg(Register VReg) const {
584 errs() << "- v. register: " << printReg(VReg, TRI) << '\n';
585}
586
587void MachineVerifier::report_context_vreg_regunit(Register VRegOrUnit) const {
588 if (VRegOrUnit.isVirtual()) {
589 report_context_vreg(VRegOrUnit);
590 } else {
591 errs() << "- regunit: " << printRegUnit(VRegOrUnit, TRI) << '\n';
592 }
593}
594
595void MachineVerifier::report_context_lanemask(LaneBitmask LaneMask) const {
596 errs() << "- lanemask: " << PrintLaneMask(LaneMask) << '\n';
597}
598
599void MachineVerifier::markReachable(const MachineBasicBlock *MBB) {
600 BBInfo &MInfo = MBBInfoMap[MBB];
601 if (!MInfo.reachable) {
602 MInfo.reachable = true;
603 for (const MachineBasicBlock *Succ : MBB->successors())
604 markReachable(Succ);
605 }
606}
607
608void MachineVerifier::visitMachineFunctionBefore() {
609 lastIndex = SlotIndex();
610 regsReserved = MRI->reservedRegsFrozen() ? MRI->getReservedRegs()
611 : TRI->getReservedRegs(*MF);
612
613 if (!MF->empty())
614 markReachable(&MF->front());
615
616 // Build a set of the basic blocks in the function.
617 FunctionBlocks.clear();
618 for (const auto &MBB : *MF) {
619 FunctionBlocks.insert(&MBB);
620 BBInfo &MInfo = MBBInfoMap[&MBB];
621
622 MInfo.Preds.insert(MBB.pred_begin(), MBB.pred_end());
623 if (MInfo.Preds.size() != MBB.pred_size())
624 report("MBB has duplicate entries in its predecessor list.", &MBB);
625
626 MInfo.Succs.insert(MBB.succ_begin(), MBB.succ_end());
627 if (MInfo.Succs.size() != MBB.succ_size())
628 report("MBB has duplicate entries in its successor list.", &MBB);
629 }
630
631 // Check that the register use lists are sane.
632 MRI->verifyUseLists();
633
634 if (!MF->empty())
635 verifyStackFrame();
636}
637
638void
639MachineVerifier::visitMachineBasicBlockBefore(const MachineBasicBlock *MBB) {
640 FirstTerminator = nullptr;
641 FirstNonPHI = nullptr;
642
643 if (!MF->getProperties().hasProperty(
644 MachineFunctionProperties::Property::NoPHIs) && MRI->tracksLiveness()) {
645 // If this block has allocatable physical registers live-in, check that
646 // it is an entry block or landing pad.
647 for (const auto &LI : MBB->liveins()) {
648 if (isAllocatable(LI.PhysReg) && !MBB->isEHPad() &&
649 MBB->getIterator() != MBB->getParent()->begin() &&
651 report("MBB has allocatable live-in, but isn't entry, landing-pad, or "
652 "inlineasm-br-indirect-target.",
653 MBB);
654 report_context(LI.PhysReg);
655 }
656 }
657 }
658
659 if (MBB->isIRBlockAddressTaken()) {
661 report("ir-block-address-taken is associated with basic block not used by "
662 "a blockaddress.",
663 MBB);
664 }
665
666 // Count the number of landing pad successors.
668 for (const auto *succ : MBB->successors()) {
669 if (succ->isEHPad())
670 LandingPadSuccs.insert(succ);
671 if (!FunctionBlocks.count(succ))
672 report("MBB has successor that isn't part of the function.", MBB);
673 if (!MBBInfoMap[succ].Preds.count(MBB)) {
674 report("Inconsistent CFG", MBB);
675 errs() << "MBB is not in the predecessor list of the successor "
676 << printMBBReference(*succ) << ".\n";
677 }
678 }
679
680 // Check the predecessor list.
681 for (const MachineBasicBlock *Pred : MBB->predecessors()) {
682 if (!FunctionBlocks.count(Pred))
683 report("MBB has predecessor that isn't part of the function.", MBB);
684 if (!MBBInfoMap[Pred].Succs.count(MBB)) {
685 report("Inconsistent CFG", MBB);
686 errs() << "MBB is not in the successor list of the predecessor "
687 << printMBBReference(*Pred) << ".\n";
688 }
689 }
690
691 const MCAsmInfo *AsmInfo = TM->getMCAsmInfo();
692 const BasicBlock *BB = MBB->getBasicBlock();
693 const Function &F = MF->getFunction();
694 if (LandingPadSuccs.size() > 1 &&
695 !(AsmInfo &&
697 BB && isa<SwitchInst>(BB->getTerminator())) &&
698 !isScopedEHPersonality(classifyEHPersonality(F.getPersonalityFn())))
699 report("MBB has more than one landing pad successor", MBB);
700
701 // Call analyzeBranch. If it succeeds, there several more conditions to check.
702 MachineBasicBlock *TBB = nullptr, *FBB = nullptr;
704 if (!TII->analyzeBranch(*const_cast<MachineBasicBlock *>(MBB), TBB, FBB,
705 Cond)) {
706 // Ok, analyzeBranch thinks it knows what's going on with this block. Let's
707 // check whether its answers match up with reality.
708 if (!TBB && !FBB) {
709 // Block falls through to its successor.
710 if (!MBB->empty() && MBB->back().isBarrier() &&
711 !TII->isPredicated(MBB->back())) {
712 report("MBB exits via unconditional fall-through but ends with a "
713 "barrier instruction!", MBB);
714 }
715 if (!Cond.empty()) {
716 report("MBB exits via unconditional fall-through but has a condition!",
717 MBB);
718 }
719 } else if (TBB && !FBB && Cond.empty()) {
720 // Block unconditionally branches somewhere.
721 if (MBB->empty()) {
722 report("MBB exits via unconditional branch but doesn't contain "
723 "any instructions!", MBB);
724 } else if (!MBB->back().isBarrier()) {
725 report("MBB exits via unconditional branch but doesn't end with a "
726 "barrier instruction!", MBB);
727 } else if (!MBB->back().isTerminator()) {
728 report("MBB exits via unconditional branch but the branch isn't a "
729 "terminator instruction!", MBB);
730 }
731 } else if (TBB && !FBB && !Cond.empty()) {
732 // Block conditionally branches somewhere, otherwise falls through.
733 if (MBB->empty()) {
734 report("MBB exits via conditional branch/fall-through but doesn't "
735 "contain any instructions!", MBB);
736 } else if (MBB->back().isBarrier()) {
737 report("MBB exits via conditional branch/fall-through but ends with a "
738 "barrier instruction!", MBB);
739 } else if (!MBB->back().isTerminator()) {
740 report("MBB exits via conditional branch/fall-through but the branch "
741 "isn't a terminator instruction!", MBB);
742 }
743 } else if (TBB && FBB) {
744 // Block conditionally branches somewhere, otherwise branches
745 // somewhere else.
746 if (MBB->empty()) {
747 report("MBB exits via conditional branch/branch but doesn't "
748 "contain any instructions!", MBB);
749 } else if (!MBB->back().isBarrier()) {
750 report("MBB exits via conditional branch/branch but doesn't end with a "
751 "barrier instruction!", MBB);
752 } else if (!MBB->back().isTerminator()) {
753 report("MBB exits via conditional branch/branch but the branch "
754 "isn't a terminator instruction!", MBB);
755 }
756 if (Cond.empty()) {
757 report("MBB exits via conditional branch/branch but there's no "
758 "condition!", MBB);
759 }
760 } else {
761 report("analyzeBranch returned invalid data!", MBB);
762 }
763
764 // Now check that the successors match up with the answers reported by
765 // analyzeBranch.
766 if (TBB && !MBB->isSuccessor(TBB))
767 report("MBB exits via jump or conditional branch, but its target isn't a "
768 "CFG successor!",
769 MBB);
770 if (FBB && !MBB->isSuccessor(FBB))
771 report("MBB exits via conditional branch, but its target isn't a CFG "
772 "successor!",
773 MBB);
774
775 // There might be a fallthrough to the next block if there's either no
776 // unconditional true branch, or if there's a condition, and one of the
777 // branches is missing.
778 bool Fallthrough = !TBB || (!Cond.empty() && !FBB);
779
780 // A conditional fallthrough must be an actual CFG successor, not
781 // unreachable. (Conversely, an unconditional fallthrough might not really
782 // be a successor, because the block might end in unreachable.)
783 if (!Cond.empty() && !FBB) {
785 if (MBBI == MF->end()) {
786 report("MBB conditionally falls through out of function!", MBB);
787 } else if (!MBB->isSuccessor(&*MBBI))
788 report("MBB exits via conditional branch/fall-through but the CFG "
789 "successors don't match the actual successors!",
790 MBB);
791 }
792
793 // Verify that there aren't any extra un-accounted-for successors.
794 for (const MachineBasicBlock *SuccMBB : MBB->successors()) {
795 // If this successor is one of the branch targets, it's okay.
796 if (SuccMBB == TBB || SuccMBB == FBB)
797 continue;
798 // If we might have a fallthrough, and the successor is the fallthrough
799 // block, that's also ok.
800 if (Fallthrough && SuccMBB == MBB->getNextNode())
801 continue;
802 // Also accept successors which are for exception-handling or might be
803 // inlineasm_br targets.
804 if (SuccMBB->isEHPad() || SuccMBB->isInlineAsmBrIndirectTarget())
805 continue;
806 report("MBB has unexpected successors which are not branch targets, "
807 "fallthrough, EHPads, or inlineasm_br targets.",
808 MBB);
809 }
810 }
811
812 regsLive.clear();
813 if (MRI->tracksLiveness()) {
814 for (const auto &LI : MBB->liveins()) {
815 if (!Register::isPhysicalRegister(LI.PhysReg)) {
816 report("MBB live-in list contains non-physical register", MBB);
817 continue;
818 }
819 for (const MCPhysReg &SubReg : TRI->subregs_inclusive(LI.PhysReg))
820 regsLive.insert(SubReg);
821 }
822 }
823
824 const MachineFrameInfo &MFI = MF->getFrameInfo();
825 BitVector PR = MFI.getPristineRegs(*MF);
826 for (unsigned I : PR.set_bits()) {
827 for (const MCPhysReg &SubReg : TRI->subregs_inclusive(I))
828 regsLive.insert(SubReg);
829 }
830
831 regsKilled.clear();
832 regsDefined.clear();
833
834 if (Indexes)
835 lastIndex = Indexes->getMBBStartIdx(MBB);
836}
837
838// This function gets called for all bundle headers, including normal
839// stand-alone unbundled instructions.
840void MachineVerifier::visitMachineBundleBefore(const MachineInstr *MI) {
841 if (Indexes && Indexes->hasIndex(*MI)) {
842 SlotIndex idx = Indexes->getInstructionIndex(*MI);
843 if (!(idx > lastIndex)) {
844 report("Instruction index out of order", MI);
845 errs() << "Last instruction was at " << lastIndex << '\n';
846 }
847 lastIndex = idx;
848 }
849
850 // Ensure non-terminators don't follow terminators.
851 if (MI->isTerminator()) {
852 if (!FirstTerminator)
853 FirstTerminator = MI;
854 } else if (FirstTerminator) {
855 // For GlobalISel, G_INVOKE_REGION_START is a terminator that we allow to
856 // precede non-terminators.
857 if (FirstTerminator->getOpcode() != TargetOpcode::G_INVOKE_REGION_START) {
858 report("Non-terminator instruction after the first terminator", MI);
859 errs() << "First terminator was:\t" << *FirstTerminator;
860 }
861 }
862}
863
864// The operands on an INLINEASM instruction must follow a template.
865// Verify that the flag operands make sense.
866void MachineVerifier::verifyInlineAsm(const MachineInstr *MI) {
867 // The first two operands on INLINEASM are the asm string and global flags.
868 if (MI->getNumOperands() < 2) {
869 report("Too few operands on inline asm", MI);
870 return;
871 }
872 if (!MI->getOperand(0).isSymbol())
873 report("Asm string must be an external symbol", MI);
874 if (!MI->getOperand(1).isImm())
875 report("Asm flags must be an immediate", MI);
876 // Allowed flags are Extra_HasSideEffects = 1, Extra_IsAlignStack = 2,
877 // Extra_AsmDialect = 4, Extra_MayLoad = 8, and Extra_MayStore = 16,
878 // and Extra_IsConvergent = 32.
879 if (!isUInt<6>(MI->getOperand(1).getImm()))
880 report("Unknown asm flags", &MI->getOperand(1), 1);
881
882 static_assert(InlineAsm::MIOp_FirstOperand == 2, "Asm format changed");
883
884 unsigned OpNo = InlineAsm::MIOp_FirstOperand;
885 unsigned NumOps;
886 for (unsigned e = MI->getNumOperands(); OpNo < e; OpNo += NumOps) {
887 const MachineOperand &MO = MI->getOperand(OpNo);
888 // There may be implicit ops after the fixed operands.
889 if (!MO.isImm())
890 break;
891 const InlineAsm::Flag F(MO.getImm());
892 NumOps = 1 + F.getNumOperandRegisters();
893 }
894
895 if (OpNo > MI->getNumOperands())
896 report("Missing operands in last group", MI);
897
898 // An optional MDNode follows the groups.
899 if (OpNo < MI->getNumOperands() && MI->getOperand(OpNo).isMetadata())
900 ++OpNo;
901
902 // All trailing operands must be implicit registers.
903 for (unsigned e = MI->getNumOperands(); OpNo < e; ++OpNo) {
904 const MachineOperand &MO = MI->getOperand(OpNo);
905 if (!MO.isReg() || !MO.isImplicit())
906 report("Expected implicit register after groups", &MO, OpNo);
907 }
908
909 if (MI->getOpcode() == TargetOpcode::INLINEASM_BR) {
910 const MachineBasicBlock *MBB = MI->getParent();
911
912 for (unsigned i = InlineAsm::MIOp_FirstOperand, e = MI->getNumOperands();
913 i != e; ++i) {
914 const MachineOperand &MO = MI->getOperand(i);
915
916 if (!MO.isMBB())
917 continue;
918
919 // Check the successor & predecessor lists look ok, assume they are
920 // not. Find the indirect target without going through the successors.
921 const MachineBasicBlock *IndirectTargetMBB = MO.getMBB();
922 if (!IndirectTargetMBB) {
923 report("INLINEASM_BR indirect target does not exist", &MO, i);
924 break;
925 }
926
927 if (!MBB->isSuccessor(IndirectTargetMBB))
928 report("INLINEASM_BR indirect target missing from successor list", &MO,
929 i);
930
931 if (!IndirectTargetMBB->isPredecessor(MBB))
932 report("INLINEASM_BR indirect target predecessor list missing parent",
933 &MO, i);
934 }
935 }
936}
937
938bool MachineVerifier::verifyAllRegOpsScalar(const MachineInstr &MI,
939 const MachineRegisterInfo &MRI) {
940 if (none_of(MI.explicit_operands(), [&MRI](const MachineOperand &Op) {
941 if (!Op.isReg())
942 return false;
943 const auto Reg = Op.getReg();
944 if (Reg.isPhysical())
945 return false;
946 return !MRI.getType(Reg).isScalar();
947 }))
948 return true;
949 report("All register operands must have scalar types", &MI);
950 return false;
951}
952
953/// Check that types are consistent when two operands need to have the same
954/// number of vector elements.
955/// \return true if the types are valid.
956bool MachineVerifier::verifyVectorElementMatch(LLT Ty0, LLT Ty1,
957 const MachineInstr *MI) {
958 if (Ty0.isVector() != Ty1.isVector()) {
959 report("operand types must be all-vector or all-scalar", MI);
960 // Generally we try to report as many issues as possible at once, but in
961 // this case it's not clear what should we be comparing the size of the
962 // scalar with: the size of the whole vector or its lane. Instead of
963 // making an arbitrary choice and emitting not so helpful message, let's
964 // avoid the extra noise and stop here.
965 return false;
966 }
967
968 if (Ty0.isVector() && Ty0.getElementCount() != Ty1.getElementCount()) {
969 report("operand types must preserve number of vector elements", MI);
970 return false;
971 }
972
973 return true;
974}
975
976bool MachineVerifier::verifyGIntrinsicSideEffects(const MachineInstr *MI) {
977 auto Opcode = MI->getOpcode();
978 bool NoSideEffects = Opcode == TargetOpcode::G_INTRINSIC ||
979 Opcode == TargetOpcode::G_INTRINSIC_CONVERGENT;
980 unsigned IntrID = cast<GIntrinsic>(MI)->getIntrinsicID();
981 if (IntrID != 0 && IntrID < Intrinsic::num_intrinsics) {
983 MF->getFunction().getContext(), static_cast<Intrinsic::ID>(IntrID));
984 bool DeclHasSideEffects = !Attrs.getMemoryEffects().doesNotAccessMemory();
985 if (NoSideEffects && DeclHasSideEffects) {
986 report(Twine(TII->getName(Opcode),
987 " used with intrinsic that accesses memory"),
988 MI);
989 return false;
990 }
991 if (!NoSideEffects && !DeclHasSideEffects) {
992 report(Twine(TII->getName(Opcode), " used with readnone intrinsic"), MI);
993 return false;
994 }
995 }
996
997 return true;
998}
999
1000bool MachineVerifier::verifyGIntrinsicConvergence(const MachineInstr *MI) {
1001 auto Opcode = MI->getOpcode();
1002 bool NotConvergent = Opcode == TargetOpcode::G_INTRINSIC ||
1003 Opcode == TargetOpcode::G_INTRINSIC_W_SIDE_EFFECTS;
1004 unsigned IntrID = cast<GIntrinsic>(MI)->getIntrinsicID();
1005 if (IntrID != 0 && IntrID < Intrinsic::num_intrinsics) {
1007 MF->getFunction().getContext(), static_cast<Intrinsic::ID>(IntrID));
1008 bool DeclIsConvergent = Attrs.hasFnAttr(Attribute::Convergent);
1009 if (NotConvergent && DeclIsConvergent) {
1010 report(Twine(TII->getName(Opcode), " used with a convergent intrinsic"),
1011 MI);
1012 return false;
1013 }
1014 if (!NotConvergent && !DeclIsConvergent) {
1015 report(
1016 Twine(TII->getName(Opcode), " used with a non-convergent intrinsic"),
1017 MI);
1018 return false;
1019 }
1020 }
1021
1022 return true;
1023}
1024
1025void MachineVerifier::verifyPreISelGenericInstruction(const MachineInstr *MI) {
1026 if (isFunctionSelected)
1027 report("Unexpected generic instruction in a Selected function", MI);
1028
1029 const MCInstrDesc &MCID = MI->getDesc();
1030 unsigned NumOps = MI->getNumOperands();
1031
1032 // Branches must reference a basic block if they are not indirect
1033 if (MI->isBranch() && !MI->isIndirectBranch()) {
1034 bool HasMBB = false;
1035 for (const MachineOperand &Op : MI->operands()) {
1036 if (Op.isMBB()) {
1037 HasMBB = true;
1038 break;
1039 }
1040 }
1041
1042 if (!HasMBB) {
1043 report("Branch instruction is missing a basic block operand or "
1044 "isIndirectBranch property",
1045 MI);
1046 }
1047 }
1048
1049 // Check types.
1051 for (unsigned I = 0, E = std::min(MCID.getNumOperands(), NumOps);
1052 I != E; ++I) {
1053 if (!MCID.operands()[I].isGenericType())
1054 continue;
1055 // Generic instructions specify type equality constraints between some of
1056 // their operands. Make sure these are consistent.
1057 size_t TypeIdx = MCID.operands()[I].getGenericTypeIndex();
1058 Types.resize(std::max(TypeIdx + 1, Types.size()));
1059
1060 const MachineOperand *MO = &MI->getOperand(I);
1061 if (!MO->isReg()) {
1062 report("generic instruction must use register operands", MI);
1063 continue;
1064 }
1065
1066 LLT OpTy = MRI->getType(MO->getReg());
1067 // Don't report a type mismatch if there is no actual mismatch, only a
1068 // type missing, to reduce noise:
1069 if (OpTy.isValid()) {
1070 // Only the first valid type for a type index will be printed: don't
1071 // overwrite it later so it's always clear which type was expected:
1072 if (!Types[TypeIdx].isValid())
1073 Types[TypeIdx] = OpTy;
1074 else if (Types[TypeIdx] != OpTy)
1075 report("Type mismatch in generic instruction", MO, I, OpTy);
1076 } else {
1077 // Generic instructions must have types attached to their operands.
1078 report("Generic instruction is missing a virtual register type", MO, I);
1079 }
1080 }
1081
1082 // Generic opcodes must not have physical register operands.
1083 for (unsigned I = 0; I < MI->getNumOperands(); ++I) {
1084 const MachineOperand *MO = &MI->getOperand(I);
1085 if (MO->isReg() && MO->getReg().isPhysical())
1086 report("Generic instruction cannot have physical register", MO, I);
1087 }
1088
1089 // Avoid out of bounds in checks below. This was already reported earlier.
1090 if (MI->getNumOperands() < MCID.getNumOperands())
1091 return;
1092
1094 if (!TII->verifyInstruction(*MI, ErrorInfo))
1095 report(ErrorInfo.data(), MI);
1096
1097 // Verify properties of various specific instruction types
1098 unsigned Opc = MI->getOpcode();
1099 switch (Opc) {
1100 case TargetOpcode::G_ASSERT_SEXT:
1101 case TargetOpcode::G_ASSERT_ZEXT: {
1102 std::string OpcName =
1103 Opc == TargetOpcode::G_ASSERT_ZEXT ? "G_ASSERT_ZEXT" : "G_ASSERT_SEXT";
1104 if (!MI->getOperand(2).isImm()) {
1105 report(Twine(OpcName, " expects an immediate operand #2"), MI);
1106 break;
1107 }
1108
1109 Register Dst = MI->getOperand(0).getReg();
1110 Register Src = MI->getOperand(1).getReg();
1111 LLT SrcTy = MRI->getType(Src);
1112 int64_t Imm = MI->getOperand(2).getImm();
1113 if (Imm <= 0) {
1114 report(Twine(OpcName, " size must be >= 1"), MI);
1115 break;
1116 }
1117
1118 if (Imm >= SrcTy.getScalarSizeInBits()) {
1119 report(Twine(OpcName, " size must be less than source bit width"), MI);
1120 break;
1121 }
1122
1123 const RegisterBank *SrcRB = RBI->getRegBank(Src, *MRI, *TRI);
1124 const RegisterBank *DstRB = RBI->getRegBank(Dst, *MRI, *TRI);
1125
1126 // Allow only the source bank to be set.
1127 if ((SrcRB && DstRB && SrcRB != DstRB) || (DstRB && !SrcRB)) {
1128 report(Twine(OpcName, " cannot change register bank"), MI);
1129 break;
1130 }
1131
1132 // Don't allow a class change. Do allow member class->regbank.
1133 const TargetRegisterClass *DstRC = MRI->getRegClassOrNull(Dst);
1134 if (DstRC && DstRC != MRI->getRegClassOrNull(Src)) {
1135 report(
1136 Twine(OpcName, " source and destination register classes must match"),
1137 MI);
1138 break;
1139 }
1140
1141 break;
1142 }
1143
1144 case TargetOpcode::G_CONSTANT:
1145 case TargetOpcode::G_FCONSTANT: {
1146 LLT DstTy = MRI->getType(MI->getOperand(0).getReg());
1147 if (DstTy.isVector())
1148 report("Instruction cannot use a vector result type", MI);
1149
1150 if (MI->getOpcode() == TargetOpcode::G_CONSTANT) {
1151 if (!MI->getOperand(1).isCImm()) {
1152 report("G_CONSTANT operand must be cimm", MI);
1153 break;
1154 }
1155
1156 const ConstantInt *CI = MI->getOperand(1).getCImm();
1157 if (CI->getBitWidth() != DstTy.getSizeInBits())
1158 report("inconsistent constant size", MI);
1159 } else {
1160 if (!MI->getOperand(1).isFPImm()) {
1161 report("G_FCONSTANT operand must be fpimm", MI);
1162 break;
1163 }
1164 const ConstantFP *CF = MI->getOperand(1).getFPImm();
1165
1167 DstTy.getSizeInBits()) {
1168 report("inconsistent constant size", MI);
1169 }
1170 }
1171
1172 break;
1173 }
1174 case TargetOpcode::G_LOAD:
1175 case TargetOpcode::G_STORE:
1176 case TargetOpcode::G_ZEXTLOAD:
1177 case TargetOpcode::G_SEXTLOAD: {
1178 LLT ValTy = MRI->getType(MI->getOperand(0).getReg());
1179 LLT PtrTy = MRI->getType(MI->getOperand(1).getReg());
1180 if (!PtrTy.isPointer())
1181 report("Generic memory instruction must access a pointer", MI);
1182
1183 // Generic loads and stores must have a single MachineMemOperand
1184 // describing that access.
1185 if (!MI->hasOneMemOperand()) {
1186 report("Generic instruction accessing memory must have one mem operand",
1187 MI);
1188 } else {
1189 const MachineMemOperand &MMO = **MI->memoperands_begin();
1190 if (MI->getOpcode() == TargetOpcode::G_ZEXTLOAD ||
1191 MI->getOpcode() == TargetOpcode::G_SEXTLOAD) {
1192 if (MMO.getSizeInBits() >= ValTy.getSizeInBits())
1193 report("Generic extload must have a narrower memory type", MI);
1194 } else if (MI->getOpcode() == TargetOpcode::G_LOAD) {
1195 if (MMO.getSize() > ValTy.getSizeInBytes())
1196 report("load memory size cannot exceed result size", MI);
1197 } else if (MI->getOpcode() == TargetOpcode::G_STORE) {
1198 if (ValTy.getSizeInBytes() < MMO.getSize())
1199 report("store memory size cannot exceed value size", MI);
1200 }
1201
1202 const AtomicOrdering Order = MMO.getSuccessOrdering();
1203 if (Opc == TargetOpcode::G_STORE) {
1204 if (Order == AtomicOrdering::Acquire ||
1206 report("atomic store cannot use acquire ordering", MI);
1207
1208 } else {
1209 if (Order == AtomicOrdering::Release ||
1211 report("atomic load cannot use release ordering", MI);
1212 }
1213 }
1214
1215 break;
1216 }
1217 case TargetOpcode::G_PHI: {
1218 LLT DstTy = MRI->getType(MI->getOperand(0).getReg());
1219 if (!DstTy.isValid() || !all_of(drop_begin(MI->operands()),
1220 [this, &DstTy](const MachineOperand &MO) {
1221 if (!MO.isReg())
1222 return true;
1223 LLT Ty = MRI->getType(MO.getReg());
1224 if (!Ty.isValid() || (Ty != DstTy))
1225 return false;
1226 return true;
1227 }))
1228 report("Generic Instruction G_PHI has operands with incompatible/missing "
1229 "types",
1230 MI);
1231 break;
1232 }
1233 case TargetOpcode::G_BITCAST: {
1234 LLT DstTy = MRI->getType(MI->getOperand(0).getReg());
1235 LLT SrcTy = MRI->getType(MI->getOperand(1).getReg());
1236 if (!DstTy.isValid() || !SrcTy.isValid())
1237 break;
1238
1239 if (SrcTy.isPointer() != DstTy.isPointer())
1240 report("bitcast cannot convert between pointers and other types", MI);
1241
1242 if (SrcTy.getSizeInBits() != DstTy.getSizeInBits())
1243 report("bitcast sizes must match", MI);
1244
1245 if (SrcTy == DstTy)
1246 report("bitcast must change the type", MI);
1247
1248 break;
1249 }
1250 case TargetOpcode::G_INTTOPTR:
1251 case TargetOpcode::G_PTRTOINT:
1252 case TargetOpcode::G_ADDRSPACE_CAST: {
1253 LLT DstTy = MRI->getType(MI->getOperand(0).getReg());
1254 LLT SrcTy = MRI->getType(MI->getOperand(1).getReg());
1255 if (!DstTy.isValid() || !SrcTy.isValid())
1256 break;
1257
1258 verifyVectorElementMatch(DstTy, SrcTy, MI);
1259
1260 DstTy = DstTy.getScalarType();
1261 SrcTy = SrcTy.getScalarType();
1262
1263 if (MI->getOpcode() == TargetOpcode::G_INTTOPTR) {
1264 if (!DstTy.isPointer())
1265 report("inttoptr result type must be a pointer", MI);
1266 if (SrcTy.isPointer())
1267 report("inttoptr source type must not be a pointer", MI);
1268 } else if (MI->getOpcode() == TargetOpcode::G_PTRTOINT) {
1269 if (!SrcTy.isPointer())
1270 report("ptrtoint source type must be a pointer", MI);
1271 if (DstTy.isPointer())
1272 report("ptrtoint result type must not be a pointer", MI);
1273 } else {
1274 assert(MI->getOpcode() == TargetOpcode::G_ADDRSPACE_CAST);
1275 if (!SrcTy.isPointer() || !DstTy.isPointer())
1276 report("addrspacecast types must be pointers", MI);
1277 else {
1278 if (SrcTy.getAddressSpace() == DstTy.getAddressSpace())
1279 report("addrspacecast must convert different address spaces", MI);
1280 }
1281 }
1282
1283 break;
1284 }
1285 case TargetOpcode::G_PTR_ADD: {
1286 LLT DstTy = MRI->getType(MI->getOperand(0).getReg());
1287 LLT PtrTy = MRI->getType(MI->getOperand(1).getReg());
1288 LLT OffsetTy = MRI->getType(MI->getOperand(2).getReg());
1289 if (!DstTy.isValid() || !PtrTy.isValid() || !OffsetTy.isValid())
1290 break;
1291
1292 if (!PtrTy.getScalarType().isPointer())
1293 report("gep first operand must be a pointer", MI);
1294
1295 if (OffsetTy.getScalarType().isPointer())
1296 report("gep offset operand must not be a pointer", MI);
1297
1298 // TODO: Is the offset allowed to be a scalar with a vector?
1299 break;
1300 }
1301 case TargetOpcode::G_PTRMASK: {
1302 LLT DstTy = MRI->getType(MI->getOperand(0).getReg());
1303 LLT SrcTy = MRI->getType(MI->getOperand(1).getReg());
1304 LLT MaskTy = MRI->getType(MI->getOperand(2).getReg());
1305 if (!DstTy.isValid() || !SrcTy.isValid() || !MaskTy.isValid())
1306 break;
1307
1308 if (!DstTy.getScalarType().isPointer())
1309 report("ptrmask result type must be a pointer", MI);
1310
1311 if (!MaskTy.getScalarType().isScalar())
1312 report("ptrmask mask type must be an integer", MI);
1313
1314 verifyVectorElementMatch(DstTy, MaskTy, MI);
1315 break;
1316 }
1317 case TargetOpcode::G_SEXT:
1318 case TargetOpcode::G_ZEXT:
1319 case TargetOpcode::G_ANYEXT:
1320 case TargetOpcode::G_TRUNC:
1321 case TargetOpcode::G_FPEXT:
1322 case TargetOpcode::G_FPTRUNC: {
1323 // Number of operands and presense of types is already checked (and
1324 // reported in case of any issues), so no need to report them again. As
1325 // we're trying to report as many issues as possible at once, however, the
1326 // instructions aren't guaranteed to have the right number of operands or
1327 // types attached to them at this point
1328 assert(MCID.getNumOperands() == 2 && "Expected 2 operands G_*{EXT,TRUNC}");
1329 LLT DstTy = MRI->getType(MI->getOperand(0).getReg());
1330 LLT SrcTy = MRI->getType(MI->getOperand(1).getReg());
1331 if (!DstTy.isValid() || !SrcTy.isValid())
1332 break;
1333
1334 LLT DstElTy = DstTy.getScalarType();
1335 LLT SrcElTy = SrcTy.getScalarType();
1336 if (DstElTy.isPointer() || SrcElTy.isPointer())
1337 report("Generic extend/truncate can not operate on pointers", MI);
1338
1339 verifyVectorElementMatch(DstTy, SrcTy, MI);
1340
1341 unsigned DstSize = DstElTy.getSizeInBits();
1342 unsigned SrcSize = SrcElTy.getSizeInBits();
1343 switch (MI->getOpcode()) {
1344 default:
1345 if (DstSize <= SrcSize)
1346 report("Generic extend has destination type no larger than source", MI);
1347 break;
1348 case TargetOpcode::G_TRUNC:
1349 case TargetOpcode::G_FPTRUNC:
1350 if (DstSize >= SrcSize)
1351 report("Generic truncate has destination type no smaller than source",
1352 MI);
1353 break;
1354 }
1355 break;
1356 }
1357 case TargetOpcode::G_SELECT: {
1358 LLT SelTy = MRI->getType(MI->getOperand(0).getReg());
1359 LLT CondTy = MRI->getType(MI->getOperand(1).getReg());
1360 if (!SelTy.isValid() || !CondTy.isValid())
1361 break;
1362
1363 // Scalar condition select on a vector is valid.
1364 if (CondTy.isVector())
1365 verifyVectorElementMatch(SelTy, CondTy, MI);
1366 break;
1367 }
1368 case TargetOpcode::G_MERGE_VALUES: {
1369 // G_MERGE_VALUES should only be used to merge scalars into a larger scalar,
1370 // e.g. s2N = MERGE sN, sN
1371 // Merging multiple scalars into a vector is not allowed, should use
1372 // G_BUILD_VECTOR for that.
1373 LLT DstTy = MRI->getType(MI->getOperand(0).getReg());
1374 LLT SrcTy = MRI->getType(MI->getOperand(1).getReg());
1375 if (DstTy.isVector() || SrcTy.isVector())
1376 report("G_MERGE_VALUES cannot operate on vectors", MI);
1377
1378 const unsigned NumOps = MI->getNumOperands();
1379 if (DstTy.getSizeInBits() != SrcTy.getSizeInBits() * (NumOps - 1))
1380 report("G_MERGE_VALUES result size is inconsistent", MI);
1381
1382 for (unsigned I = 2; I != NumOps; ++I) {
1383 if (MRI->getType(MI->getOperand(I).getReg()) != SrcTy)
1384 report("G_MERGE_VALUES source types do not match", MI);
1385 }
1386
1387 break;
1388 }
1389 case TargetOpcode::G_UNMERGE_VALUES: {
1390 unsigned NumDsts = MI->getNumOperands() - 1;
1391 LLT DstTy = MRI->getType(MI->getOperand(0).getReg());
1392 for (unsigned i = 1; i < NumDsts; ++i) {
1393 if (MRI->getType(MI->getOperand(i).getReg()) != DstTy) {
1394 report("G_UNMERGE_VALUES destination types do not match", MI);
1395 break;
1396 }
1397 }
1398
1399 LLT SrcTy = MRI->getType(MI->getOperand(NumDsts).getReg());
1400 if (DstTy.isVector()) {
1401 // This case is the converse of G_CONCAT_VECTORS.
1402 if (!SrcTy.isVector() || SrcTy.getScalarType() != DstTy.getScalarType() ||
1403 SrcTy.getNumElements() != NumDsts * DstTy.getNumElements())
1404 report("G_UNMERGE_VALUES source operand does not match vector "
1405 "destination operands",
1406 MI);
1407 } else if (SrcTy.isVector()) {
1408 // This case is the converse of G_BUILD_VECTOR, but relaxed to allow
1409 // mismatched types as long as the total size matches:
1410 // %0:_(s64), %1:_(s64) = G_UNMERGE_VALUES %2:_(<4 x s32>)
1411 if (SrcTy.getSizeInBits() != NumDsts * DstTy.getSizeInBits())
1412 report("G_UNMERGE_VALUES vector source operand does not match scalar "
1413 "destination operands",
1414 MI);
1415 } else {
1416 // This case is the converse of G_MERGE_VALUES.
1417 if (SrcTy.getSizeInBits() != NumDsts * DstTy.getSizeInBits()) {
1418 report("G_UNMERGE_VALUES scalar source operand does not match scalar "
1419 "destination operands",
1420 MI);
1421 }
1422 }
1423 break;
1424 }
1425 case TargetOpcode::G_BUILD_VECTOR: {
1426 // Source types must be scalars, dest type a vector. Total size of scalars
1427 // must match the dest vector size.
1428 LLT DstTy = MRI->getType(MI->getOperand(0).getReg());
1429 LLT SrcEltTy = MRI->getType(MI->getOperand(1).getReg());
1430 if (!DstTy.isVector() || SrcEltTy.isVector()) {
1431 report("G_BUILD_VECTOR must produce a vector from scalar operands", MI);
1432 break;
1433 }
1434
1435 if (DstTy.getElementType() != SrcEltTy)
1436 report("G_BUILD_VECTOR result element type must match source type", MI);
1437
1438 if (DstTy.getNumElements() != MI->getNumOperands() - 1)
1439 report("G_BUILD_VECTOR must have an operand for each elemement", MI);
1440
1441 for (const MachineOperand &MO : llvm::drop_begin(MI->operands(), 2))
1442 if (MRI->getType(MI->getOperand(1).getReg()) != MRI->getType(MO.getReg()))
1443 report("G_BUILD_VECTOR source operand types are not homogeneous", MI);
1444
1445 break;
1446 }
1447 case TargetOpcode::G_BUILD_VECTOR_TRUNC: {
1448 // Source types must be scalars, dest type a vector. Scalar types must be
1449 // larger than the dest vector elt type, as this is a truncating operation.
1450 LLT DstTy = MRI->getType(MI->getOperand(0).getReg());
1451 LLT SrcEltTy = MRI->getType(MI->getOperand(1).getReg());
1452 if (!DstTy.isVector() || SrcEltTy.isVector())
1453 report("G_BUILD_VECTOR_TRUNC must produce a vector from scalar operands",
1454 MI);
1455 for (const MachineOperand &MO : llvm::drop_begin(MI->operands(), 2))
1456 if (MRI->getType(MI->getOperand(1).getReg()) != MRI->getType(MO.getReg()))
1457 report("G_BUILD_VECTOR_TRUNC source operand types are not homogeneous",
1458 MI);
1459 if (SrcEltTy.getSizeInBits() <= DstTy.getElementType().getSizeInBits())
1460 report("G_BUILD_VECTOR_TRUNC source operand types are not larger than "
1461 "dest elt type",
1462 MI);
1463 break;
1464 }
1465 case TargetOpcode::G_CONCAT_VECTORS: {
1466 // Source types should be vectors, and total size should match the dest
1467 // vector size.
1468 LLT DstTy = MRI->getType(MI->getOperand(0).getReg());
1469 LLT SrcTy = MRI->getType(MI->getOperand(1).getReg());
1470 if (!DstTy.isVector() || !SrcTy.isVector())
1471 report("G_CONCAT_VECTOR requires vector source and destination operands",
1472 MI);
1473
1474 if (MI->getNumOperands() < 3)
1475 report("G_CONCAT_VECTOR requires at least 2 source operands", MI);
1476
1477 for (const MachineOperand &MO : llvm::drop_begin(MI->operands(), 2))
1478 if (MRI->getType(MI->getOperand(1).getReg()) != MRI->getType(MO.getReg()))
1479 report("G_CONCAT_VECTOR source operand types are not homogeneous", MI);
1480 if (DstTy.getNumElements() !=
1481 SrcTy.getNumElements() * (MI->getNumOperands() - 1))
1482 report("G_CONCAT_VECTOR num dest and source elements should match", MI);
1483 break;
1484 }
1485 case TargetOpcode::G_ICMP:
1486 case TargetOpcode::G_FCMP: {
1487 LLT DstTy = MRI->getType(MI->getOperand(0).getReg());
1488 LLT SrcTy = MRI->getType(MI->getOperand(2).getReg());
1489
1490 if ((DstTy.isVector() != SrcTy.isVector()) ||
1491 (DstTy.isVector() && DstTy.getNumElements() != SrcTy.getNumElements()))
1492 report("Generic vector icmp/fcmp must preserve number of lanes", MI);
1493
1494 break;
1495 }
1496 case TargetOpcode::G_EXTRACT: {
1497 const MachineOperand &SrcOp = MI->getOperand(1);
1498 if (!SrcOp.isReg()) {
1499 report("extract source must be a register", MI);
1500 break;
1501 }
1502
1503 const MachineOperand &OffsetOp = MI->getOperand(2);
1504 if (!OffsetOp.isImm()) {
1505 report("extract offset must be a constant", MI);
1506 break;
1507 }
1508
1509 unsigned DstSize = MRI->getType(MI->getOperand(0).getReg()).getSizeInBits();
1510 unsigned SrcSize = MRI->getType(SrcOp.getReg()).getSizeInBits();
1511 if (SrcSize == DstSize)
1512 report("extract source must be larger than result", MI);
1513
1514 if (DstSize + OffsetOp.getImm() > SrcSize)
1515 report("extract reads past end of register", MI);
1516 break;
1517 }
1518 case TargetOpcode::G_INSERT: {
1519 const MachineOperand &SrcOp = MI->getOperand(2);
1520 if (!SrcOp.isReg()) {
1521 report("insert source must be a register", MI);
1522 break;
1523 }
1524
1525 const MachineOperand &OffsetOp = MI->getOperand(3);
1526 if (!OffsetOp.isImm()) {
1527 report("insert offset must be a constant", MI);
1528 break;
1529 }
1530
1531 unsigned DstSize = MRI->getType(MI->getOperand(0).getReg()).getSizeInBits();
1532 unsigned SrcSize = MRI->getType(SrcOp.getReg()).getSizeInBits();
1533
1534 if (DstSize <= SrcSize)
1535 report("inserted size must be smaller than total register", MI);
1536
1537 if (SrcSize + OffsetOp.getImm() > DstSize)
1538 report("insert writes past end of register", MI);
1539
1540 break;
1541 }
1542 case TargetOpcode::G_JUMP_TABLE: {
1543 if (!MI->getOperand(1).isJTI())
1544 report("G_JUMP_TABLE source operand must be a jump table index", MI);
1545 LLT DstTy = MRI->getType(MI->getOperand(0).getReg());
1546 if (!DstTy.isPointer())
1547 report("G_JUMP_TABLE dest operand must have a pointer type", MI);
1548 break;
1549 }
1550 case TargetOpcode::G_BRJT: {
1551 if (!MRI->getType(MI->getOperand(0).getReg()).isPointer())
1552 report("G_BRJT src operand 0 must be a pointer type", MI);
1553
1554 if (!MI->getOperand(1).isJTI())
1555 report("G_BRJT src operand 1 must be a jump table index", MI);
1556
1557 const auto &IdxOp = MI->getOperand(2);
1558 if (!IdxOp.isReg() || MRI->getType(IdxOp.getReg()).isPointer())
1559 report("G_BRJT src operand 2 must be a scalar reg type", MI);
1560 break;
1561 }
1562 case TargetOpcode::G_INTRINSIC:
1563 case TargetOpcode::G_INTRINSIC_W_SIDE_EFFECTS:
1564 case TargetOpcode::G_INTRINSIC_CONVERGENT:
1565 case TargetOpcode::G_INTRINSIC_CONVERGENT_W_SIDE_EFFECTS: {
1566 // TODO: Should verify number of def and use operands, but the current
1567 // interface requires passing in IR types for mangling.
1568 const MachineOperand &IntrIDOp = MI->getOperand(MI->getNumExplicitDefs());
1569 if (!IntrIDOp.isIntrinsicID()) {
1570 report("G_INTRINSIC first src operand must be an intrinsic ID", MI);
1571 break;
1572 }
1573
1574 if (!verifyGIntrinsicSideEffects(MI))
1575 break;
1576 if (!verifyGIntrinsicConvergence(MI))
1577 break;
1578
1579 break;
1580 }
1581 case TargetOpcode::G_SEXT_INREG: {
1582 if (!MI->getOperand(2).isImm()) {
1583 report("G_SEXT_INREG expects an immediate operand #2", MI);
1584 break;
1585 }
1586
1587 LLT SrcTy = MRI->getType(MI->getOperand(1).getReg());
1588 int64_t Imm = MI->getOperand(2).getImm();
1589 if (Imm <= 0)
1590 report("G_SEXT_INREG size must be >= 1", MI);
1591 if (Imm >= SrcTy.getScalarSizeInBits())
1592 report("G_SEXT_INREG size must be less than source bit width", MI);
1593 break;
1594 }
1595 case TargetOpcode::G_BSWAP: {
1596 LLT DstTy = MRI->getType(MI->getOperand(0).getReg());
1597 if (DstTy.getScalarSizeInBits() % 16 != 0)
1598 report("G_BSWAP size must be a multiple of 16 bits", MI);
1599 break;
1600 }
1601 case TargetOpcode::G_SHUFFLE_VECTOR: {
1602 const MachineOperand &MaskOp = MI->getOperand(3);
1603 if (!MaskOp.isShuffleMask()) {
1604 report("Incorrect mask operand type for G_SHUFFLE_VECTOR", MI);
1605 break;
1606 }
1607
1608 LLT DstTy = MRI->getType(MI->getOperand(0).getReg());
1609 LLT Src0Ty = MRI->getType(MI->getOperand(1).getReg());
1610 LLT Src1Ty = MRI->getType(MI->getOperand(2).getReg());
1611
1612 if (Src0Ty != Src1Ty)
1613 report("Source operands must be the same type", MI);
1614
1615 if (Src0Ty.getScalarType() != DstTy.getScalarType())
1616 report("G_SHUFFLE_VECTOR cannot change element type", MI);
1617
1618 // Don't check that all operands are vector because scalars are used in
1619 // place of 1 element vectors.
1620 int SrcNumElts = Src0Ty.isVector() ? Src0Ty.getNumElements() : 1;
1621 int DstNumElts = DstTy.isVector() ? DstTy.getNumElements() : 1;
1622
1623 ArrayRef<int> MaskIdxes = MaskOp.getShuffleMask();
1624
1625 if (static_cast<int>(MaskIdxes.size()) != DstNumElts)
1626 report("Wrong result type for shufflemask", MI);
1627
1628 for (int Idx : MaskIdxes) {
1629 if (Idx < 0)
1630 continue;
1631
1632 if (Idx >= 2 * SrcNumElts)
1633 report("Out of bounds shuffle index", MI);
1634 }
1635
1636 break;
1637 }
1638 case TargetOpcode::G_DYN_STACKALLOC: {
1639 const MachineOperand &DstOp = MI->getOperand(0);
1640 const MachineOperand &AllocOp = MI->getOperand(1);
1641 const MachineOperand &AlignOp = MI->getOperand(2);
1642
1643 if (!DstOp.isReg() || !MRI->getType(DstOp.getReg()).isPointer()) {
1644 report("dst operand 0 must be a pointer type", MI);
1645 break;
1646 }
1647
1648 if (!AllocOp.isReg() || !MRI->getType(AllocOp.getReg()).isScalar()) {
1649 report("src operand 1 must be a scalar reg type", MI);
1650 break;
1651 }
1652
1653 if (!AlignOp.isImm()) {
1654 report("src operand 2 must be an immediate type", MI);
1655 break;
1656 }
1657 break;
1658 }
1659 case TargetOpcode::G_MEMCPY_INLINE:
1660 case TargetOpcode::G_MEMCPY:
1661 case TargetOpcode::G_MEMMOVE: {
1662 ArrayRef<MachineMemOperand *> MMOs = MI->memoperands();
1663 if (MMOs.size() != 2) {
1664 report("memcpy/memmove must have 2 memory operands", MI);
1665 break;
1666 }
1667
1668 if ((!MMOs[0]->isStore() || MMOs[0]->isLoad()) ||
1669 (MMOs[1]->isStore() || !MMOs[1]->isLoad())) {
1670 report("wrong memory operand types", MI);
1671 break;
1672 }
1673
1674 if (MMOs[0]->getSize() != MMOs[1]->getSize())
1675 report("inconsistent memory operand sizes", MI);
1676
1677 LLT DstPtrTy = MRI->getType(MI->getOperand(0).getReg());
1678 LLT SrcPtrTy = MRI->getType(MI->getOperand(1).getReg());
1679
1680 if (!DstPtrTy.isPointer() || !SrcPtrTy.isPointer()) {
1681 report("memory instruction operand must be a pointer", MI);
1682 break;
1683 }
1684
1685 if (DstPtrTy.getAddressSpace() != MMOs[0]->getAddrSpace())
1686 report("inconsistent store address space", MI);
1687 if (SrcPtrTy.getAddressSpace() != MMOs[1]->getAddrSpace())
1688 report("inconsistent load address space", MI);
1689
1690 if (Opc != TargetOpcode::G_MEMCPY_INLINE)
1691 if (!MI->getOperand(3).isImm() || (MI->getOperand(3).getImm() & ~1LL))
1692 report("'tail' flag (operand 3) must be an immediate 0 or 1", MI);
1693
1694 break;
1695 }
1696 case TargetOpcode::G_BZERO:
1697 case TargetOpcode::G_MEMSET: {
1698 ArrayRef<MachineMemOperand *> MMOs = MI->memoperands();
1699 std::string Name = Opc == TargetOpcode::G_MEMSET ? "memset" : "bzero";
1700 if (MMOs.size() != 1) {
1701 report(Twine(Name, " must have 1 memory operand"), MI);
1702 break;
1703 }
1704
1705 if ((!MMOs[0]->isStore() || MMOs[0]->isLoad())) {
1706 report(Twine(Name, " memory operand must be a store"), MI);
1707 break;
1708 }
1709
1710 LLT DstPtrTy = MRI->getType(MI->getOperand(0).getReg());
1711 if (!DstPtrTy.isPointer()) {
1712 report(Twine(Name, " operand must be a pointer"), MI);
1713 break;
1714 }
1715
1716 if (DstPtrTy.getAddressSpace() != MMOs[0]->getAddrSpace())
1717 report("inconsistent " + Twine(Name, " address space"), MI);
1718
1719 if (!MI->getOperand(MI->getNumOperands() - 1).isImm() ||
1720 (MI->getOperand(MI->getNumOperands() - 1).getImm() & ~1LL))
1721 report("'tail' flag (last operand) must be an immediate 0 or 1", MI);
1722
1723 break;
1724 }
1725 case TargetOpcode::G_VECREDUCE_SEQ_FADD:
1726 case TargetOpcode::G_VECREDUCE_SEQ_FMUL: {
1727 LLT DstTy = MRI->getType(MI->getOperand(0).getReg());
1728 LLT Src1Ty = MRI->getType(MI->getOperand(1).getReg());
1729 LLT Src2Ty = MRI->getType(MI->getOperand(2).getReg());
1730 if (!DstTy.isScalar())
1731 report("Vector reduction requires a scalar destination type", MI);
1732 if (!Src1Ty.isScalar())
1733 report("Sequential FADD/FMUL vector reduction requires a scalar 1st operand", MI);
1734 if (!Src2Ty.isVector())
1735 report("Sequential FADD/FMUL vector reduction must have a vector 2nd operand", MI);
1736 break;
1737 }
1738 case TargetOpcode::G_VECREDUCE_FADD:
1739 case TargetOpcode::G_VECREDUCE_FMUL:
1740 case TargetOpcode::G_VECREDUCE_FMAX:
1741 case TargetOpcode::G_VECREDUCE_FMIN:
1742 case TargetOpcode::G_VECREDUCE_FMAXIMUM:
1743 case TargetOpcode::G_VECREDUCE_FMINIMUM:
1744 case TargetOpcode::G_VECREDUCE_ADD:
1745 case TargetOpcode::G_VECREDUCE_MUL:
1746 case TargetOpcode::G_VECREDUCE_AND:
1747 case TargetOpcode::G_VECREDUCE_OR:
1748 case TargetOpcode::G_VECREDUCE_XOR:
1749 case TargetOpcode::G_VECREDUCE_SMAX:
1750 case TargetOpcode::G_VECREDUCE_SMIN:
1751 case TargetOpcode::G_VECREDUCE_UMAX:
1752 case TargetOpcode::G_VECREDUCE_UMIN: {
1753 LLT DstTy = MRI->getType(MI->getOperand(0).getReg());
1754 if (!DstTy.isScalar())
1755 report("Vector reduction requires a scalar destination type", MI);
1756 break;
1757 }
1758
1759 case TargetOpcode::G_SBFX:
1760 case TargetOpcode::G_UBFX: {
1761 LLT DstTy = MRI->getType(MI->getOperand(0).getReg());
1762 if (DstTy.isVector()) {
1763 report("Bitfield extraction is not supported on vectors", MI);
1764 break;
1765 }
1766 break;
1767 }
1768 case TargetOpcode::G_SHL:
1769 case TargetOpcode::G_LSHR:
1770 case TargetOpcode::G_ASHR:
1771 case TargetOpcode::G_ROTR:
1772 case TargetOpcode::G_ROTL: {
1773 LLT Src1Ty = MRI->getType(MI->getOperand(1).getReg());
1774 LLT Src2Ty = MRI->getType(MI->getOperand(2).getReg());
1775 if (Src1Ty.isVector() != Src2Ty.isVector()) {
1776 report("Shifts and rotates require operands to be either all scalars or "
1777 "all vectors",
1778 MI);
1779 break;
1780 }
1781 break;
1782 }
1783 case TargetOpcode::G_LLROUND:
1784 case TargetOpcode::G_LROUND: {
1785 verifyAllRegOpsScalar(*MI, *MRI);
1786 break;
1787 }
1788 case TargetOpcode::G_IS_FPCLASS: {
1789 LLT DestTy = MRI->getType(MI->getOperand(0).getReg());
1790 LLT DestEltTy = DestTy.getScalarType();
1791 if (!DestEltTy.isScalar()) {
1792 report("Destination must be a scalar or vector of scalars", MI);
1793 break;
1794 }
1795 LLT SrcTy = MRI->getType(MI->getOperand(1).getReg());
1796 LLT SrcEltTy = SrcTy.getScalarType();
1797 if (!SrcEltTy.isScalar()) {
1798 report("Source must be a scalar or vector of scalars", MI);
1799 break;
1800 }
1801 if (!verifyVectorElementMatch(DestTy, SrcTy, MI))
1802 break;
1803 const MachineOperand &TestMO = MI->getOperand(2);
1804 if (!TestMO.isImm()) {
1805 report("floating-point class set (operand 2) must be an immediate", MI);
1806 break;
1807 }
1808 int64_t Test = TestMO.getImm();
1809 if (Test < 0 || Test > fcAllFlags) {
1810 report("Incorrect floating-point class set (operand 2)", MI);
1811 break;
1812 }
1813 break;
1814 }
1815 case TargetOpcode::G_ASSERT_ALIGN: {
1816 if (MI->getOperand(2).getImm() < 1)
1817 report("alignment immediate must be >= 1", MI);
1818 break;
1819 }
1820 case TargetOpcode::G_CONSTANT_POOL: {
1821 if (!MI->getOperand(1).isCPI())
1822 report("Src operand 1 must be a constant pool index", MI);
1823 if (!MRI->getType(MI->getOperand(0).getReg()).isPointer())
1824 report("Dst operand 0 must be a pointer", MI);
1825 break;
1826 }
1827 default:
1828 break;
1829 }
1830}
1831
1832void MachineVerifier::visitMachineInstrBefore(const MachineInstr *MI) {
1833 const MCInstrDesc &MCID = MI->getDesc();
1834 if (MI->getNumOperands() < MCID.getNumOperands()) {
1835 report("Too few operands", MI);
1836 errs() << MCID.getNumOperands() << " operands expected, but "
1837 << MI->getNumOperands() << " given.\n";
1838 }
1839
1840 if (MI->getFlag(MachineInstr::NoConvergent) && !MCID.isConvergent())
1841 report("NoConvergent flag expected only on convergent instructions.", MI);
1842
1843 if (MI->isPHI()) {
1844 if (MF->getProperties().hasProperty(
1846 report("Found PHI instruction with NoPHIs property set", MI);
1847
1848 if (FirstNonPHI)
1849 report("Found PHI instruction after non-PHI", MI);
1850 } else if (FirstNonPHI == nullptr)
1851 FirstNonPHI = MI;
1852
1853 // Check the tied operands.
1854 if (MI->isInlineAsm())
1855 verifyInlineAsm(MI);
1856
1857 // Check that unspillable terminators define a reg and have at most one use.
1858 if (TII->isUnspillableTerminator(MI)) {
1859 if (!MI->getOperand(0).isReg() || !MI->getOperand(0).isDef())
1860 report("Unspillable Terminator does not define a reg", MI);
1861 Register Def = MI->getOperand(0).getReg();
1862 if (Def.isVirtual() &&
1863 !MF->getProperties().hasProperty(
1865 std::distance(MRI->use_nodbg_begin(Def), MRI->use_nodbg_end()) > 1)
1866 report("Unspillable Terminator expected to have at most one use!", MI);
1867 }
1868
1869 // A fully-formed DBG_VALUE must have a location. Ignore partially formed
1870 // DBG_VALUEs: these are convenient to use in tests, but should never get
1871 // generated.
1872 if (MI->isDebugValue() && MI->getNumOperands() == 4)
1873 if (!MI->getDebugLoc())
1874 report("Missing DebugLoc for debug instruction", MI);
1875
1876 // Meta instructions should never be the subject of debug value tracking,
1877 // they don't create a value in the output program at all.
1878 if (MI->isMetaInstruction() && MI->peekDebugInstrNum())
1879 report("Metadata instruction should not have a value tracking number", MI);
1880
1881 // Check the MachineMemOperands for basic consistency.
1882 for (MachineMemOperand *Op : MI->memoperands()) {
1883 if (Op->isLoad() && !MI->mayLoad())
1884 report("Missing mayLoad flag", MI);
1885 if (Op->isStore() && !MI->mayStore())
1886 report("Missing mayStore flag", MI);
1887 }
1888
1889 // Debug values must not have a slot index.
1890 // Other instructions must have one, unless they are inside a bundle.
1891 if (LiveInts) {
1892 bool mapped = !LiveInts->isNotInMIMap(*MI);
1893 if (MI->isDebugOrPseudoInstr()) {
1894 if (mapped)
1895 report("Debug instruction has a slot index", MI);
1896 } else if (MI->isInsideBundle()) {
1897 if (mapped)
1898 report("Instruction inside bundle has a slot index", MI);
1899 } else {
1900 if (!mapped)
1901 report("Missing slot index", MI);
1902 }
1903 }
1904
1905 unsigned Opc = MCID.getOpcode();
1907 verifyPreISelGenericInstruction(MI);
1908 return;
1909 }
1910
1912 if (!TII->verifyInstruction(*MI, ErrorInfo))
1913 report(ErrorInfo.data(), MI);
1914
1915 // Verify properties of various specific instruction types
1916 switch (MI->getOpcode()) {
1917 case TargetOpcode::COPY: {
1918 const MachineOperand &DstOp = MI->getOperand(0);
1919 const MachineOperand &SrcOp = MI->getOperand(1);
1920 const Register SrcReg = SrcOp.getReg();
1921 const Register DstReg = DstOp.getReg();
1922
1923 LLT DstTy = MRI->getType(DstReg);
1924 LLT SrcTy = MRI->getType(SrcReg);
1925 if (SrcTy.isValid() && DstTy.isValid()) {
1926 // If both types are valid, check that the types are the same.
1927 if (SrcTy != DstTy) {
1928 report("Copy Instruction is illegal with mismatching types", MI);
1929 errs() << "Def = " << DstTy << ", Src = " << SrcTy << "\n";
1930 }
1931
1932 break;
1933 }
1934
1935 if (!SrcTy.isValid() && !DstTy.isValid())
1936 break;
1937
1938 // If we have only one valid type, this is likely a copy between a virtual
1939 // and physical register.
1940 TypeSize SrcSize = TRI->getRegSizeInBits(SrcReg, *MRI);
1941 TypeSize DstSize = TRI->getRegSizeInBits(DstReg, *MRI);
1942 if (SrcReg.isPhysical() && DstTy.isValid()) {
1943 const TargetRegisterClass *SrcRC =
1944 TRI->getMinimalPhysRegClassLLT(SrcReg, DstTy);
1945 if (SrcRC)
1946 SrcSize = TRI->getRegSizeInBits(*SrcRC);
1947 }
1948
1949 if (DstReg.isPhysical() && SrcTy.isValid()) {
1950 const TargetRegisterClass *DstRC =
1951 TRI->getMinimalPhysRegClassLLT(DstReg, SrcTy);
1952 if (DstRC)
1953 DstSize = TRI->getRegSizeInBits(*DstRC);
1954 }
1955
1956 // The next two checks allow COPY between physical and virtual registers,
1957 // when the virtual register has a scalable size and the physical register
1958 // has a fixed size. These checks allow COPY between *potentialy* mismatched
1959 // sizes. However, once RegisterBankSelection occurs, MachineVerifier should
1960 // be able to resolve a fixed size for the scalable vector, and at that
1961 // point this function will know for sure whether the sizes are mismatched
1962 // and correctly report a size mismatch.
1963 if (SrcReg.isPhysical() && DstReg.isVirtual() && DstSize.isScalable() &&
1964 !SrcSize.isScalable())
1965 break;
1966 if (SrcReg.isVirtual() && DstReg.isPhysical() && SrcSize.isScalable() &&
1967 !DstSize.isScalable())
1968 break;
1969
1970 if (SrcSize.isNonZero() && DstSize.isNonZero() && SrcSize != DstSize) {
1971 if (!DstOp.getSubReg() && !SrcOp.getSubReg()) {
1972 report("Copy Instruction is illegal with mismatching sizes", MI);
1973 errs() << "Def Size = " << DstSize << ", Src Size = " << SrcSize
1974 << "\n";
1975 }
1976 }
1977 break;
1978 }
1979 case TargetOpcode::STATEPOINT: {
1980 StatepointOpers SO(MI);
1981 if (!MI->getOperand(SO.getIDPos()).isImm() ||
1982 !MI->getOperand(SO.getNBytesPos()).isImm() ||
1983 !MI->getOperand(SO.getNCallArgsPos()).isImm()) {
1984 report("meta operands to STATEPOINT not constant!", MI);
1985 break;
1986 }
1987
1988 auto VerifyStackMapConstant = [&](unsigned Offset) {
1989 if (Offset >= MI->getNumOperands()) {
1990 report("stack map constant to STATEPOINT is out of range!", MI);
1991 return;
1992 }
1993 if (!MI->getOperand(Offset - 1).isImm() ||
1994 MI->getOperand(Offset - 1).getImm() != StackMaps::ConstantOp ||
1995 !MI->getOperand(Offset).isImm())
1996 report("stack map constant to STATEPOINT not well formed!", MI);
1997 };
1998 VerifyStackMapConstant(SO.getCCIdx());
1999 VerifyStackMapConstant(SO.getFlagsIdx());
2000 VerifyStackMapConstant(SO.getNumDeoptArgsIdx());
2001 VerifyStackMapConstant(SO.getNumGCPtrIdx());
2002 VerifyStackMapConstant(SO.getNumAllocaIdx());
2003 VerifyStackMapConstant(SO.getNumGcMapEntriesIdx());
2004
2005 // Verify that all explicit statepoint defs are tied to gc operands as
2006 // they are expected to be a relocation of gc operands.
2007 unsigned FirstGCPtrIdx = SO.getFirstGCPtrIdx();
2008 unsigned LastGCPtrIdx = SO.getNumAllocaIdx() - 2;
2009 for (unsigned Idx = 0; Idx < MI->getNumDefs(); Idx++) {
2010 unsigned UseOpIdx;
2011 if (!MI->isRegTiedToUseOperand(Idx, &UseOpIdx)) {
2012 report("STATEPOINT defs expected to be tied", MI);
2013 break;
2014 }
2015 if (UseOpIdx < FirstGCPtrIdx || UseOpIdx > LastGCPtrIdx) {
2016 report("STATEPOINT def tied to non-gc operand", MI);
2017 break;
2018 }
2019 }
2020
2021 // TODO: verify we have properly encoded deopt arguments
2022 } break;
2023 case TargetOpcode::INSERT_SUBREG: {
2024 unsigned InsertedSize;
2025 if (unsigned SubIdx = MI->getOperand(2).getSubReg())
2026 InsertedSize = TRI->getSubRegIdxSize(SubIdx);
2027 else
2028 InsertedSize = TRI->getRegSizeInBits(MI->getOperand(2).getReg(), *MRI);
2029 unsigned SubRegSize = TRI->getSubRegIdxSize(MI->getOperand(3).getImm());
2030 if (SubRegSize < InsertedSize) {
2031 report("INSERT_SUBREG expected inserted value to have equal or lesser "
2032 "size than the subreg it was inserted into", MI);
2033 break;
2034 }
2035 } break;
2036 case TargetOpcode::REG_SEQUENCE: {
2037 unsigned NumOps = MI->getNumOperands();
2038 if (!(NumOps & 1)) {
2039 report("Invalid number of operands for REG_SEQUENCE", MI);
2040 break;
2041 }
2042
2043 for (unsigned I = 1; I != NumOps; I += 2) {
2044 const MachineOperand &RegOp = MI->getOperand(I);
2045 const MachineOperand &SubRegOp = MI->getOperand(I + 1);
2046
2047 if (!RegOp.isReg())
2048 report("Invalid register operand for REG_SEQUENCE", &RegOp, I);
2049
2050 if (!SubRegOp.isImm() || SubRegOp.getImm() == 0 ||
2051 SubRegOp.getImm() >= TRI->getNumSubRegIndices()) {
2052 report("Invalid subregister index operand for REG_SEQUENCE",
2053 &SubRegOp, I + 1);
2054 }
2055 }
2056
2057 Register DstReg = MI->getOperand(0).getReg();
2058 if (DstReg.isPhysical())
2059 report("REG_SEQUENCE does not support physical register results", MI);
2060
2061 if (MI->getOperand(0).getSubReg())
2062 report("Invalid subreg result for REG_SEQUENCE", MI);
2063
2064 break;
2065 }
2066 }
2067}
2068
2069void
2070MachineVerifier::visitMachineOperand(const MachineOperand *MO, unsigned MONum) {
2071 const MachineInstr *MI = MO->getParent();
2072 const MCInstrDesc &MCID = MI->getDesc();
2073 unsigned NumDefs = MCID.getNumDefs();
2074 if (MCID.getOpcode() == TargetOpcode::PATCHPOINT)
2075 NumDefs = (MONum == 0 && MO->isReg()) ? NumDefs : 0;
2076
2077 // The first MCID.NumDefs operands must be explicit register defines
2078 if (MONum < NumDefs) {
2079 const MCOperandInfo &MCOI = MCID.operands()[MONum];
2080 if (!MO->isReg())
2081 report("Explicit definition must be a register", MO, MONum);
2082 else if (!MO->isDef() && !MCOI.isOptionalDef())
2083 report("Explicit definition marked as use", MO, MONum);
2084 else if (MO->isImplicit())
2085 report("Explicit definition marked as implicit", MO, MONum);
2086 } else if (MONum < MCID.getNumOperands()) {
2087 const MCOperandInfo &MCOI = MCID.operands()[MONum];
2088 // Don't check if it's the last operand in a variadic instruction. See,
2089 // e.g., LDM_RET in the arm back end. Check non-variadic operands only.
2090 bool IsOptional = MI->isVariadic() && MONum == MCID.getNumOperands() - 1;
2091 if (!IsOptional) {
2092 if (MO->isReg()) {
2093 if (MO->isDef() && !MCOI.isOptionalDef() && !MCID.variadicOpsAreDefs())
2094 report("Explicit operand marked as def", MO, MONum);
2095 if (MO->isImplicit())
2096 report("Explicit operand marked as implicit", MO, MONum);
2097 }
2098
2099 // Check that an instruction has register operands only as expected.
2100 if (MCOI.OperandType == MCOI::OPERAND_REGISTER &&
2101 !MO->isReg() && !MO->isFI())
2102 report("Expected a register operand.", MO, MONum);
2103 if (MO->isReg()) {
2106 !TII->isPCRelRegisterOperandLegal(*MO)))
2107 report("Expected a non-register operand.", MO, MONum);
2108 }
2109 }
2110
2111 int TiedTo = MCID.getOperandConstraint(MONum, MCOI::TIED_TO);
2112 if (TiedTo != -1) {
2113 if (!MO->isReg())
2114 report("Tied use must be a register", MO, MONum);
2115 else if (!MO->isTied())
2116 report("Operand should be tied", MO, MONum);
2117 else if (unsigned(TiedTo) != MI->findTiedOperandIdx(MONum))
2118 report("Tied def doesn't match MCInstrDesc", MO, MONum);
2119 else if (MO->getReg().isPhysical()) {
2120 const MachineOperand &MOTied = MI->getOperand(TiedTo);
2121 if (!MOTied.isReg())
2122 report("Tied counterpart must be a register", &MOTied, TiedTo);
2123 else if (MOTied.getReg().isPhysical() &&
2124 MO->getReg() != MOTied.getReg())
2125 report("Tied physical registers must match.", &MOTied, TiedTo);
2126 }
2127 } else if (MO->isReg() && MO->isTied())
2128 report("Explicit operand should not be tied", MO, MONum);
2129 } else if (!MI->isVariadic()) {
2130 // ARM adds %reg0 operands to indicate predicates. We'll allow that.
2131 if (!MO->isValidExcessOperand())
2132 report("Extra explicit operand on non-variadic instruction", MO, MONum);
2133 }
2134
2135 switch (MO->getType()) {
2137 // Verify debug flag on debug instructions. Check this first because reg0
2138 // indicates an undefined debug value.
2139 if (MI->isDebugInstr() && MO->isUse()) {
2140 if (!MO->isDebug())
2141 report("Register operand must be marked debug", MO, MONum);
2142 } else if (MO->isDebug()) {
2143 report("Register operand must not be marked debug", MO, MONum);
2144 }
2145
2146 const Register Reg = MO->getReg();
2147 if (!Reg)
2148 return;
2149 if (MRI->tracksLiveness() && !MI->isDebugInstr())
2150 checkLiveness(MO, MONum);
2151
2152 if (MO->isDef() && MO->isUndef() && !MO->getSubReg() &&
2153 MO->getReg().isVirtual()) // TODO: Apply to physregs too
2154 report("Undef virtual register def operands require a subregister", MO, MONum);
2155
2156 // Verify the consistency of tied operands.
2157 if (MO->isTied()) {
2158 unsigned OtherIdx = MI->findTiedOperandIdx(MONum);
2159 const MachineOperand &OtherMO = MI->getOperand(OtherIdx);
2160 if (!OtherMO.isReg())
2161 report("Must be tied to a register", MO, MONum);
2162 if (!OtherMO.isTied())
2163 report("Missing tie flags on tied operand", MO, MONum);
2164 if (MI->findTiedOperandIdx(OtherIdx) != MONum)
2165 report("Inconsistent tie links", MO, MONum);
2166 if (MONum < MCID.getNumDefs()) {
2167 if (OtherIdx < MCID.getNumOperands()) {
2168 if (-1 == MCID.getOperandConstraint(OtherIdx, MCOI::TIED_TO))
2169 report("Explicit def tied to explicit use without tie constraint",
2170 MO, MONum);
2171 } else {
2172 if (!OtherMO.isImplicit())
2173 report("Explicit def should be tied to implicit use", MO, MONum);
2174 }
2175 }
2176 }
2177
2178 // Verify two-address constraints after the twoaddressinstruction pass.
2179 // Both twoaddressinstruction pass and phi-node-elimination pass call
2180 // MRI->leaveSSA() to set MF as not IsSSA, we should do the verification
2181 // after twoaddressinstruction pass not after phi-node-elimination pass. So
2182 // we shouldn't use the IsSSA as the condition, we should based on
2183 // TiedOpsRewritten property to verify two-address constraints, this
2184 // property will be set in twoaddressinstruction pass.
2185 unsigned DefIdx;
2186 if (MF->getProperties().hasProperty(
2188 MO->isUse() && MI->isRegTiedToDefOperand(MONum, &DefIdx) &&
2189 Reg != MI->getOperand(DefIdx).getReg())
2190 report("Two-address instruction operands must be identical", MO, MONum);
2191
2192 // Check register classes.
2193 unsigned SubIdx = MO->getSubReg();
2194
2195 if (Reg.isPhysical()) {
2196 if (SubIdx) {
2197 report("Illegal subregister index for physical register", MO, MONum);
2198 return;
2199 }
2200 if (MONum < MCID.getNumOperands()) {
2201 if (const TargetRegisterClass *DRC =
2202 TII->getRegClass(MCID, MONum, TRI, *MF)) {
2203 if (!DRC->contains(Reg)) {
2204 report("Illegal physical register for instruction", MO, MONum);
2205 errs() << printReg(Reg, TRI) << " is not a "
2206 << TRI->getRegClassName(DRC) << " register.\n";
2207 }
2208 }
2209 }
2210 if (MO->isRenamable()) {
2211 if (MRI->isReserved(Reg)) {
2212 report("isRenamable set on reserved register", MO, MONum);
2213 return;
2214 }
2215 }
2216 } else {
2217 // Virtual register.
2218 const TargetRegisterClass *RC = MRI->getRegClassOrNull(Reg);
2219 if (!RC) {
2220 // This is a generic virtual register.
2221
2222 // Do not allow undef uses for generic virtual registers. This ensures
2223 // getVRegDef can never fail and return null on a generic register.
2224 //
2225 // FIXME: This restriction should probably be broadened to all SSA
2226 // MIR. However, DetectDeadLanes/ProcessImplicitDefs technically still
2227 // run on the SSA function just before phi elimination.
2228 if (MO->isUndef())
2229 report("Generic virtual register use cannot be undef", MO, MONum);
2230
2231 // Debug value instruction is permitted to use undefined vregs.
2232 // This is a performance measure to skip the overhead of immediately
2233 // pruning unused debug operands. The final undef substitution occurs
2234 // when debug values are allocated in LDVImpl::handleDebugValue, so
2235 // these verifications always apply after this pass.
2236 if (isFunctionTracksDebugUserValues || !MO->isUse() ||
2237 !MI->isDebugValue() || !MRI->def_empty(Reg)) {
2238 // If we're post-Select, we can't have gvregs anymore.
2239 if (isFunctionSelected) {
2240 report("Generic virtual register invalid in a Selected function",
2241 MO, MONum);
2242 return;
2243 }
2244
2245 // The gvreg must have a type and it must not have a SubIdx.
2246 LLT Ty = MRI->getType(Reg);
2247 if (!Ty.isValid()) {
2248 report("Generic virtual register must have a valid type", MO,
2249 MONum);
2250 return;
2251 }
2252
2253 const RegisterBank *RegBank = MRI->getRegBankOrNull(Reg);
2254 const RegisterBankInfo *RBI = MF->getSubtarget().getRegBankInfo();
2255
2256 // If we're post-RegBankSelect, the gvreg must have a bank.
2257 if (!RegBank && isFunctionRegBankSelected) {
2258 report("Generic virtual register must have a bank in a "
2259 "RegBankSelected function",
2260 MO, MONum);
2261 return;
2262 }
2263
2264 // Make sure the register fits into its register bank if any.
2265 if (RegBank && Ty.isValid() && !Ty.isScalableVector() &&
2266 RBI->getMaximumSize(RegBank->getID()) < Ty.getSizeInBits()) {
2267 report("Register bank is too small for virtual register", MO,
2268 MONum);
2269 errs() << "Register bank " << RegBank->getName() << " too small("
2270 << RBI->getMaximumSize(RegBank->getID()) << ") to fit "
2271 << Ty.getSizeInBits() << "-bits\n";
2272 return;
2273 }
2274 }
2275
2276 if (SubIdx) {
2277 report("Generic virtual register does not allow subregister index", MO,
2278 MONum);
2279 return;
2280 }
2281
2282 // If this is a target specific instruction and this operand
2283 // has register class constraint, the virtual register must
2284 // comply to it.
2285 if (!isPreISelGenericOpcode(MCID.getOpcode()) &&
2286 MONum < MCID.getNumOperands() &&
2287 TII->getRegClass(MCID, MONum, TRI, *MF)) {
2288 report("Virtual register does not match instruction constraint", MO,
2289 MONum);
2290 errs() << "Expect register class "
2291 << TRI->getRegClassName(
2292 TII->getRegClass(MCID, MONum, TRI, *MF))
2293 << " but got nothing\n";
2294 return;
2295 }
2296
2297 break;
2298 }
2299 if (SubIdx) {
2300 const TargetRegisterClass *SRC =
2301 TRI->getSubClassWithSubReg(RC, SubIdx);
2302 if (!SRC) {
2303 report("Invalid subregister index for virtual register", MO, MONum);
2304 errs() << "Register class " << TRI->getRegClassName(RC)
2305 << " does not support subreg index " << SubIdx << "\n";
2306 return;
2307 }
2308 if (RC != SRC) {
2309 report("Invalid register class for subregister index", MO, MONum);
2310 errs() << "Register class " << TRI->getRegClassName(RC)
2311 << " does not fully support subreg index " << SubIdx << "\n";
2312 return;
2313 }
2314 }
2315 if (MONum < MCID.getNumOperands()) {
2316 if (const TargetRegisterClass *DRC =
2317 TII->getRegClass(MCID, MONum, TRI, *MF)) {
2318 if (SubIdx) {
2319 const TargetRegisterClass *SuperRC =
2320 TRI->getLargestLegalSuperClass(RC, *MF);
2321 if (!SuperRC) {
2322 report("No largest legal super class exists.", MO, MONum);
2323 return;
2324 }
2325 DRC = TRI->getMatchingSuperRegClass(SuperRC, DRC, SubIdx);
2326 if (!DRC) {
2327 report("No matching super-reg register class.", MO, MONum);
2328 return;
2329 }
2330 }
2331 if (!RC->hasSuperClassEq(DRC)) {
2332 report("Illegal virtual register for instruction", MO, MONum);
2333 errs() << "Expected a " << TRI->getRegClassName(DRC)
2334 << " register, but got a " << TRI->getRegClassName(RC)
2335 << " register\n";
2336 }
2337 }
2338 }
2339 }
2340 break;
2341 }
2342
2344 regMasks.push_back(MO->getRegMask());
2345 break;
2346
2348 if (MI->isPHI() && !MO->getMBB()->isSuccessor(MI->getParent()))
2349 report("PHI operand is not in the CFG", MO, MONum);
2350 break;
2351
2353 if (LiveStks && LiveStks->hasInterval(MO->getIndex()) &&
2354 LiveInts && !LiveInts->isNotInMIMap(*MI)) {
2355 int FI = MO->getIndex();
2356 LiveInterval &LI = LiveStks->getInterval(FI);
2357 SlotIndex Idx = LiveInts->getInstructionIndex(*MI);
2358
2359 bool stores = MI->mayStore();
2360 bool loads = MI->mayLoad();
2361 // For a memory-to-memory move, we need to check if the frame
2362 // index is used for storing or loading, by inspecting the
2363 // memory operands.
2364 if (stores && loads) {
2365 for (auto *MMO : MI->memoperands()) {
2366 const PseudoSourceValue *PSV = MMO->getPseudoValue();
2367 if (PSV == nullptr) continue;
2369 dyn_cast<FixedStackPseudoSourceValue>(PSV);
2370 if (Value == nullptr) continue;
2371 if (Value->getFrameIndex() != FI) continue;
2372
2373 if (MMO->isStore())
2374 loads = false;
2375 else
2376 stores = false;
2377 break;
2378 }
2379 if (loads == stores)
2380 report("Missing fixed stack memoperand.", MI);
2381 }
2382 if (loads && !LI.liveAt(Idx.getRegSlot(true))) {
2383 report("Instruction loads from dead spill slot", MO, MONum);
2384 errs() << "Live stack: " << LI << '\n';
2385 }
2386 if (stores && !LI.liveAt(Idx.getRegSlot())) {
2387 report("Instruction stores to dead spill slot", MO, MONum);
2388 errs() << "Live stack: " << LI << '\n';
2389 }
2390 }
2391 break;
2392
2394 if (MO->getCFIIndex() >= MF->getFrameInstructions().size())
2395 report("CFI instruction has invalid index", MO, MONum);
2396 break;
2397
2398 default:
2399 break;
2400 }
2401}
2402
2403void MachineVerifier::checkLivenessAtUse(const MachineOperand *MO,
2404 unsigned MONum, SlotIndex UseIdx,
2405 const LiveRange &LR,
2406 Register VRegOrUnit,
2407 LaneBitmask LaneMask) {
2408 const MachineInstr *MI = MO->getParent();
2409 LiveQueryResult LRQ = LR.Query(UseIdx);
2410 bool HasValue = LRQ.valueIn() || (MI->isPHI() && LRQ.valueOut());
2411 // Check if we have a segment at the use, note however that we only need one
2412 // live subregister range, the others may be dead.
2413 if (!HasValue && LaneMask.none()) {
2414 report("No live segment at use", MO, MONum);
2415 report_context_liverange(LR);
2416 report_context_vreg_regunit(VRegOrUnit);
2417 report_context(UseIdx);
2418 }
2419 if (MO->isKill() && !LRQ.isKill()) {
2420 report("Live range continues after kill flag", MO, MONum);
2421 report_context_liverange(LR);
2422 report_context_vreg_regunit(VRegOrUnit);
2423 if (LaneMask.any())
2424 report_context_lanemask(LaneMask);
2425 report_context(UseIdx);
2426 }
2427}
2428
2429void MachineVerifier::checkLivenessAtDef(const MachineOperand *MO,
2430 unsigned MONum, SlotIndex DefIdx,
2431 const LiveRange &LR,
2432 Register VRegOrUnit,
2433 bool SubRangeCheck,
2434 LaneBitmask LaneMask) {
2435 if (const VNInfo *VNI = LR.getVNInfoAt(DefIdx)) {
2436 // The LR can correspond to the whole reg and its def slot is not obliged
2437 // to be the same as the MO' def slot. E.g. when we check here "normal"
2438 // subreg MO but there is other EC subreg MO in the same instruction so the
2439 // whole reg has EC def slot and differs from the currently checked MO' def
2440 // slot. For example:
2441 // %0 [16e,32r:0) 0@16e L..3 [16e,32r:0) 0@16e L..C [16r,32r:0) 0@16r
2442 // Check that there is an early-clobber def of the same superregister
2443 // somewhere is performed in visitMachineFunctionAfter()
2444 if (((SubRangeCheck || MO->getSubReg() == 0) && VNI->def != DefIdx) ||
2445 !SlotIndex::isSameInstr(VNI->def, DefIdx) ||
2446 (VNI->def != DefIdx &&
2447 (!VNI->def.isEarlyClobber() || !DefIdx.isRegister()))) {
2448 report("Inconsistent valno->def", MO, MONum);
2449 report_context_liverange(LR);
2450 report_context_vreg_regunit(VRegOrUnit);
2451 if (LaneMask.any())
2452 report_context_lanemask(LaneMask);
2453 report_context(*VNI);
2454 report_context(DefIdx);
2455 }
2456 } else {
2457 report("No live segment at def", MO, MONum);
2458 report_context_liverange(LR);
2459 report_context_vreg_regunit(VRegOrUnit);
2460 if (LaneMask.any())
2461 report_context_lanemask(LaneMask);
2462 report_context(DefIdx);
2463 }
2464 // Check that, if the dead def flag is present, LiveInts agree.
2465 if (MO->isDead()) {
2466 LiveQueryResult LRQ = LR.Query(DefIdx);
2467 if (!LRQ.isDeadDef()) {
2468 assert(VRegOrUnit.isVirtual() && "Expecting a virtual register.");
2469 // A dead subreg def only tells us that the specific subreg is dead. There
2470 // could be other non-dead defs of other subregs, or we could have other
2471 // parts of the register being live through the instruction. So unless we
2472 // are checking liveness for a subrange it is ok for the live range to
2473 // continue, given that we have a dead def of a subregister.
2474 if (SubRangeCheck || MO->getSubReg() == 0) {
2475 report("Live range continues after dead def flag", MO, MONum);
2476 report_context_liverange(LR);
2477 report_context_vreg_regunit(VRegOrUnit);
2478 if (LaneMask.any())
2479 report_context_lanemask(LaneMask);
2480 }
2481 }
2482 }
2483}
2484
2485void MachineVerifier::checkLiveness(const MachineOperand *MO, unsigned MONum) {
2486 const MachineInstr *MI = MO->getParent();
2487 const Register Reg = MO->getReg();
2488 const unsigned SubRegIdx = MO->getSubReg();
2489
2490 const LiveInterval *LI = nullptr;
2491 if (LiveInts && Reg.isVirtual()) {
2492 if (LiveInts->hasInterval(Reg)) {
2493 LI = &LiveInts->getInterval(Reg);
2494 if (SubRegIdx != 0 && (MO->isDef() || !MO->isUndef()) && !LI->empty() &&
2495 !LI->hasSubRanges() && MRI->shouldTrackSubRegLiveness(Reg))
2496 report("Live interval for subreg operand has no subranges", MO, MONum);
2497 } else {
2498 report("Virtual register has no live interval", MO, MONum);
2499 }
2500 }
2501
2502 // Both use and def operands can read a register.
2503 if (MO->readsReg()) {
2504 if (MO->isKill())
2505 addRegWithSubRegs(regsKilled, Reg);
2506
2507 // Check that LiveVars knows this kill (unless we are inside a bundle, in
2508 // which case we have already checked that LiveVars knows any kills on the
2509 // bundle header instead).
2510 if (LiveVars && Reg.isVirtual() && MO->isKill() &&
2511 !MI->isBundledWithPred()) {
2512 LiveVariables::VarInfo &VI = LiveVars->getVarInfo(Reg);
2513 if (!is_contained(VI.Kills, MI))
2514 report("Kill missing from LiveVariables", MO, MONum);
2515 }
2516
2517 // Check LiveInts liveness and kill.
2518 if (LiveInts && !LiveInts->isNotInMIMap(*MI)) {
2519 SlotIndex UseIdx;
2520 if (MI->isPHI()) {
2521 // PHI use occurs on the edge, so check for live out here instead.
2522 UseIdx = LiveInts->getMBBEndIdx(
2523 MI->getOperand(MONum + 1).getMBB()).getPrevSlot();
2524 } else {
2525 UseIdx = LiveInts->getInstructionIndex(*MI);
2526 }
2527 // Check the cached regunit intervals.
2528 if (Reg.isPhysical() && !isReserved(Reg)) {
2529 for (MCRegUnit Unit : TRI->regunits(Reg.asMCReg())) {
2530 if (MRI->isReservedRegUnit(Unit))
2531 continue;
2532 if (const LiveRange *LR = LiveInts->getCachedRegUnit(Unit))
2533 checkLivenessAtUse(MO, MONum, UseIdx, *LR, Unit);
2534 }
2535 }
2536
2537 if (Reg.isVirtual()) {
2538 // This is a virtual register interval.
2539 checkLivenessAtUse(MO, MONum, UseIdx, *LI, Reg);
2540
2541 if (LI->hasSubRanges() && !MO->isDef()) {
2542 LaneBitmask MOMask = SubRegIdx != 0
2543 ? TRI->getSubRegIndexLaneMask(SubRegIdx)
2544 : MRI->getMaxLaneMaskForVReg(Reg);
2545 LaneBitmask LiveInMask;
2546 for (const LiveInterval::SubRange &SR : LI->subranges()) {
2547 if ((MOMask & SR.LaneMask).none())
2548 continue;
2549 checkLivenessAtUse(MO, MONum, UseIdx, SR, Reg, SR.LaneMask);
2550 LiveQueryResult LRQ = SR.Query(UseIdx);
2551 if (LRQ.valueIn() || (MI->isPHI() && LRQ.valueOut()))
2552 LiveInMask |= SR.LaneMask;
2553 }
2554 // At least parts of the register has to be live at the use.
2555 if ((LiveInMask & MOMask).none()) {
2556 report("No live subrange at use", MO, MONum);
2557 report_context(*LI);
2558 report_context(UseIdx);
2559 }
2560 // For PHIs all lanes should be live
2561 if (MI->isPHI() && LiveInMask != MOMask) {
2562 report("Not all lanes of PHI source live at use", MO, MONum);
2563 report_context(*LI);
2564 report_context(UseIdx);
2565 }
2566 }
2567 }
2568 }
2569
2570 // Use of a dead register.
2571 if (!regsLive.count(Reg)) {
2572 if (Reg.isPhysical()) {
2573 // Reserved registers may be used even when 'dead'.
2574 bool Bad = !isReserved(Reg);
2575 // We are fine if just any subregister has a defined value.
2576 if (Bad) {
2577
2578 for (const MCPhysReg &SubReg : TRI->subregs(Reg)) {
2579 if (regsLive.count(SubReg)) {
2580 Bad = false;
2581 break;
2582 }
2583 }
2584 }
2585 // If there is an additional implicit-use of a super register we stop
2586 // here. By definition we are fine if the super register is not
2587 // (completely) dead, if the complete super register is dead we will
2588 // get a report for its operand.
2589 if (Bad) {
2590 for (const MachineOperand &MOP : MI->uses()) {
2591 if (!MOP.isReg() || !MOP.isImplicit())
2592 continue;
2593
2594 if (!MOP.getReg().isPhysical())
2595 continue;
2596
2597 if (llvm::is_contained(TRI->subregs(MOP.getReg()), Reg))
2598 Bad = false;
2599 }
2600 }
2601 if (Bad)
2602 report("Using an undefined physical register", MO, MONum);
2603 } else if (MRI->def_empty(Reg)) {
2604 report("Reading virtual register without a def", MO, MONum);
2605 } else {
2606 BBInfo &MInfo = MBBInfoMap[MI->getParent()];
2607 // We don't know which virtual registers are live in, so only complain
2608 // if vreg was killed in this MBB. Otherwise keep track of vregs that
2609 // must be live in. PHI instructions are handled separately.
2610 if (MInfo.regsKilled.count(Reg))
2611 report("Using a killed virtual register", MO, MONum);
2612 else if (!MI->isPHI())
2613 MInfo.vregsLiveIn.insert(std::make_pair(Reg, MI));
2614 }
2615 }
2616 }
2617
2618 if (MO->isDef()) {
2619 // Register defined.
2620 // TODO: verify that earlyclobber ops are not used.
2621 if (MO->isDead())
2622 addRegWithSubRegs(regsDead, Reg);
2623 else
2624 addRegWithSubRegs(regsDefined, Reg);
2625
2626 // Verify SSA form.
2627 if (MRI->isSSA() && Reg.isVirtual() &&
2628 std::next(MRI->def_begin(Reg)) != MRI->def_end())
2629 report("Multiple virtual register defs in SSA form", MO, MONum);
2630
2631 // Check LiveInts for a live segment, but only for virtual registers.
2632 if (LiveInts && !LiveInts->isNotInMIMap(*MI)) {
2633 SlotIndex DefIdx = LiveInts->getInstructionIndex(*MI);
2634 DefIdx = DefIdx.getRegSlot(MO->isEarlyClobber());
2635
2636 if (Reg.isVirtual()) {
2637 checkLivenessAtDef(MO, MONum, DefIdx, *LI, Reg);
2638
2639 if (LI->hasSubRanges()) {
2640 LaneBitmask MOMask = SubRegIdx != 0
2641 ? TRI->getSubRegIndexLaneMask(SubRegIdx)
2642 : MRI->getMaxLaneMaskForVReg(Reg);
2643 for (const LiveInterval::SubRange &SR : LI->subranges()) {
2644 if ((SR.LaneMask & MOMask).none())
2645 continue;
2646 checkLivenessAtDef(MO, MONum, DefIdx, SR, Reg, true, SR.LaneMask);
2647 }
2648 }
2649 }
2650 }
2651 }
2652}
2653
2654// This function gets called after visiting all instructions in a bundle. The
2655// argument points to the bundle header.
2656// Normal stand-alone instructions are also considered 'bundles', and this
2657// function is called for all of them.
2658void MachineVerifier::visitMachineBundleAfter(const MachineInstr *MI) {
2659 BBInfo &MInfo = MBBInfoMap[MI->getParent()];
2660 set_union(MInfo.regsKilled, regsKilled);
2661 set_subtract(regsLive, regsKilled); regsKilled.clear();
2662 // Kill any masked registers.
2663 while (!regMasks.empty()) {
2664 const uint32_t *Mask = regMasks.pop_back_val();
2665 for (Register Reg : regsLive)
2666 if (Reg.isPhysical() &&
2667 MachineOperand::clobbersPhysReg(Mask, Reg.asMCReg()))
2668 regsDead.push_back(Reg);
2669 }
2670 set_subtract(regsLive, regsDead); regsDead.clear();
2671 set_union(regsLive, regsDefined); regsDefined.clear();
2672}
2673
2674void
2675MachineVerifier::visitMachineBasicBlockAfter(const MachineBasicBlock *MBB) {
2676 MBBInfoMap[MBB].regsLiveOut = regsLive;
2677 regsLive.clear();
2678
2679 if (Indexes) {
2680 SlotIndex stop = Indexes->getMBBEndIdx(MBB);
2681 if (!(stop > lastIndex)) {
2682 report("Block ends before last instruction index", MBB);
2683 errs() << "Block ends at " << stop
2684 << " last instruction was at " << lastIndex << '\n';
2685 }
2686 lastIndex = stop;
2687 }
2688}
2689
2690namespace {
2691// This implements a set of registers that serves as a filter: can filter other
2692// sets by passing through elements not in the filter and blocking those that
2693// are. Any filter implicitly includes the full set of physical registers upon
2694// creation, thus filtering them all out. The filter itself as a set only grows,
2695// and needs to be as efficient as possible.
2696struct VRegFilter {
2697 // Add elements to the filter itself. \pre Input set \p FromRegSet must have
2698 // no duplicates. Both virtual and physical registers are fine.
2699 template <typename RegSetT> void add(const RegSetT &FromRegSet) {
2700 SmallVector<Register, 0> VRegsBuffer;
2701 filterAndAdd(FromRegSet, VRegsBuffer);
2702 }
2703 // Filter \p FromRegSet through the filter and append passed elements into \p
2704 // ToVRegs. All elements appended are then added to the filter itself.
2705 // \returns true if anything changed.
2706 template <typename RegSetT>
2707 bool filterAndAdd(const RegSetT &FromRegSet,
2708 SmallVectorImpl<Register> &ToVRegs) {
2709 unsigned SparseUniverse = Sparse.size();
2710 unsigned NewSparseUniverse = SparseUniverse;
2711 unsigned NewDenseSize = Dense.size();
2712 size_t Begin = ToVRegs.size();
2713 for (Register Reg : FromRegSet) {
2714 if (!Reg.isVirtual())
2715 continue;
2716 unsigned Index = Register::virtReg2Index(Reg);
2717 if (Index < SparseUniverseMax) {
2718 if (Index < SparseUniverse && Sparse.test(Index))
2719 continue;
2720 NewSparseUniverse = std::max(NewSparseUniverse, Index + 1);
2721 } else {
2722 if (Dense.count(Reg))
2723 continue;
2724 ++NewDenseSize;
2725 }
2726 ToVRegs.push_back(Reg);
2727 }
2728 size_t End = ToVRegs.size();
2729 if (Begin == End)
2730 return false;
2731 // Reserving space in sets once performs better than doing so continuously
2732 // and pays easily for double look-ups (even in Dense with SparseUniverseMax
2733 // tuned all the way down) and double iteration (the second one is over a
2734 // SmallVector, which is a lot cheaper compared to DenseSet or BitVector).
2735 Sparse.resize(NewSparseUniverse);
2736 Dense.reserve(NewDenseSize);
2737 for (unsigned I = Begin; I < End; ++I) {
2738 Register Reg = ToVRegs[I];
2739 unsigned Index = Register::virtReg2Index(Reg);
2740 if (Index < SparseUniverseMax)
2741 Sparse.set(Index);
2742 else
2743 Dense.insert(Reg);
2744 }
2745 return true;
2746 }
2747
2748private:
2749 static constexpr unsigned SparseUniverseMax = 10 * 1024 * 8;
2750 // VRegs indexed within SparseUniverseMax are tracked by Sparse, those beyound
2751 // are tracked by Dense. The only purpose of the threashold and the Dense set
2752 // is to have a reasonably growing memory usage in pathological cases (large
2753 // number of very sparse VRegFilter instances live at the same time). In
2754 // practice even in the worst-by-execution time cases having all elements
2755 // tracked by Sparse (very large SparseUniverseMax scenario) tends to be more
2756 // space efficient than if tracked by Dense. The threashold is set to keep the
2757 // worst-case memory usage within 2x of figures determined empirically for
2758 // "all Dense" scenario in such worst-by-execution-time cases.
2759 BitVector Sparse;
2761};
2762
2763// Implements both a transfer function and a (binary, in-place) join operator
2764// for a dataflow over register sets with set union join and filtering transfer
2765// (out_b = in_b \ filter_b). filter_b is expected to be set-up ahead of time.
2766// Maintains out_b as its state, allowing for O(n) iteration over it at any
2767// time, where n is the size of the set (as opposed to O(U) where U is the
2768// universe). filter_b implicitly contains all physical registers at all times.
2769class FilteringVRegSet {
2770 VRegFilter Filter;
2772
2773public:
2774 // Set-up the filter_b. \pre Input register set \p RS must have no duplicates.
2775 // Both virtual and physical registers are fine.
2776 template <typename RegSetT> void addToFilter(const RegSetT &RS) {
2777 Filter.add(RS);
2778 }
2779 // Passes \p RS through the filter_b (transfer function) and adds what's left
2780 // to itself (out_b).
2781 template <typename RegSetT> bool add(const RegSetT &RS) {
2782 // Double-duty the Filter: to maintain VRegs a set (and the join operation
2783 // a set union) just add everything being added here to the Filter as well.
2784 return Filter.filterAndAdd(RS, VRegs);
2785 }
2786 using const_iterator = decltype(VRegs)::const_iterator;
2787 const_iterator begin() const { return VRegs.begin(); }
2788 const_iterator end() const { return VRegs.end(); }
2789 size_t size() const { return VRegs.size(); }
2790};
2791} // namespace
2792
2793// Calculate the largest possible vregsPassed sets. These are the registers that
2794// can pass through an MBB live, but may not be live every time. It is assumed
2795// that all vregsPassed sets are empty before the call.
2796void MachineVerifier::calcRegsPassed() {
2797 if (MF->empty())
2798 // ReversePostOrderTraversal doesn't handle empty functions.
2799 return;
2800
2801 for (const MachineBasicBlock *MB :
2803 FilteringVRegSet VRegs;
2804 BBInfo &Info = MBBInfoMap[MB];
2805 assert(Info.reachable);
2806
2807 VRegs.addToFilter(Info.regsKilled);
2808 VRegs.addToFilter(Info.regsLiveOut);
2809 for (const MachineBasicBlock *Pred : MB->predecessors()) {
2810 const BBInfo &PredInfo = MBBInfoMap[Pred];
2811 if (!PredInfo.reachable)
2812 continue;
2813
2814 VRegs.add(PredInfo.regsLiveOut);
2815 VRegs.add(PredInfo.vregsPassed);
2816 }
2817 Info.vregsPassed.reserve(VRegs.size());
2818 Info.vregsPassed.insert(VRegs.begin(), VRegs.end());
2819 }
2820}
2821
2822// Calculate the set of virtual registers that must be passed through each basic
2823// block in order to satisfy the requirements of successor blocks. This is very
2824// similar to calcRegsPassed, only backwards.
2825void MachineVerifier::calcRegsRequired() {
2826 // First push live-in regs to predecessors' vregsRequired.
2828 for (const auto &MBB : *MF) {
2829 BBInfo &MInfo = MBBInfoMap[&MBB];
2830 for (const MachineBasicBlock *Pred : MBB.predecessors()) {
2831 BBInfo &PInfo = MBBInfoMap[Pred];
2832 if (PInfo.addRequired(MInfo.vregsLiveIn))
2833 todo.insert(Pred);
2834 }
2835
2836 // Handle the PHI node.
2837 for (const MachineInstr &MI : MBB.phis()) {
2838 for (unsigned i = 1, e = MI.getNumOperands(); i != e; i += 2) {
2839 // Skip those Operands which are undef regs or not regs.
2840 if (!MI.getOperand(i).isReg() || !MI.getOperand(i).readsReg())
2841 continue;
2842
2843 // Get register and predecessor for one PHI edge.
2844 Register Reg = MI.getOperand(i).getReg();
2845 const MachineBasicBlock *Pred = MI.getOperand(i + 1).getMBB();
2846
2847 BBInfo &PInfo = MBBInfoMap[Pred];
2848 if (PInfo.addRequired(Reg))
2849 todo.insert(Pred);
2850 }
2851 }
2852 }
2853
2854 // Iteratively push vregsRequired to predecessors. This will converge to the
2855 // same final state regardless of DenseSet iteration order.
2856 while (!todo.empty()) {
2857 const MachineBasicBlock *MBB = *todo.begin();
2858 todo.erase(MBB);
2859 BBInfo &MInfo = MBBInfoMap[MBB];
2860 for (const MachineBasicBlock *Pred : MBB->predecessors()) {
2861 if (Pred == MBB)
2862 continue;
2863 BBInfo &SInfo = MBBInfoMap[Pred];
2864 if (SInfo.addRequired(MInfo.vregsRequired))
2865 todo.insert(Pred);
2866 }
2867 }
2868}
2869
2870// Check PHI instructions at the beginning of MBB. It is assumed that
2871// calcRegsPassed has been run so BBInfo::isLiveOut is valid.
2872void MachineVerifier::checkPHIOps(const MachineBasicBlock &MBB) {
2873 BBInfo &MInfo = MBBInfoMap[&MBB];
2874
2876 for (const MachineInstr &Phi : MBB) {
2877 if (!Phi.isPHI())
2878 break;
2879 seen.clear();
2880
2881 const MachineOperand &MODef = Phi.getOperand(0);
2882 if (!MODef.isReg() || !MODef.isDef()) {
2883 report("Expected first PHI operand to be a register def", &MODef, 0);
2884 continue;
2885 }
2886 if (MODef.isTied() || MODef.isImplicit() || MODef.isInternalRead() ||
2887 MODef.isEarlyClobber() || MODef.isDebug())
2888 report("Unexpected flag on PHI operand", &MODef, 0);
2889 Register DefReg = MODef.getReg();
2890 if (!DefReg.isVirtual())
2891 report("Expected first PHI operand to be a virtual register", &MODef, 0);
2892
2893 for (unsigned I = 1, E = Phi.getNumOperands(); I != E; I += 2) {
2894 const MachineOperand &MO0 = Phi.getOperand(I);
2895 if (!MO0.isReg()) {
2896 report("Expected PHI operand to be a register", &MO0, I);
2897 continue;
2898 }
2899 if (MO0.isImplicit() || MO0.isInternalRead() || MO0.isEarlyClobber() ||
2900 MO0.isDebug() || MO0.isTied())
2901 report("Unexpected flag on PHI operand", &MO0, I);
2902
2903 const MachineOperand &MO1 = Phi.getOperand(I + 1);
2904 if (!MO1.isMBB()) {
2905 report("Expected PHI operand to be a basic block", &MO1, I + 1);
2906 continue;
2907 }
2908
2909 const MachineBasicBlock &Pre = *MO1.getMBB();
2910 if (!Pre.isSuccessor(&MBB)) {
2911 report("PHI input is not a predecessor block", &MO1, I + 1);
2912 continue;
2913 }
2914
2915 if (MInfo.reachable) {
2916 seen.insert(&Pre);
2917 BBInfo &PrInfo = MBBInfoMap[&Pre];
2918 if (!MO0.isUndef() && PrInfo.reachable &&
2919 !PrInfo.isLiveOut(MO0.getReg()))
2920 report("PHI operand is not live-out from predecessor", &MO0, I);
2921 }
2922 }
2923
2924 // Did we see all predecessors?
2925 if (MInfo.reachable) {
2926 for (MachineBasicBlock *Pred : MBB.predecessors()) {
2927 if (!seen.count(Pred)) {
2928 report("Missing PHI operand", &Phi);
2929 errs() << printMBBReference(*Pred)
2930 << " is a predecessor according to the CFG.\n";
2931 }
2932 }
2933 }
2934 }
2935}
2936
2937void MachineVerifier::visitMachineFunctionAfter() {
2938 calcRegsPassed();
2939
2940 for (const MachineBasicBlock &MBB : *MF)
2941 checkPHIOps(MBB);
2942
2943 // Now check liveness info if available
2944 calcRegsRequired();
2945
2946 // Check for killed virtual registers that should be live out.
2947 for (const auto &MBB : *MF) {
2948 BBInfo &MInfo = MBBInfoMap[&MBB];
2949 for (Register VReg : MInfo.vregsRequired)
2950 if (MInfo.regsKilled.count(VReg)) {
2951 report("Virtual register killed in block, but needed live out.", &MBB);
2952 errs() << "Virtual register " << printReg(VReg)
2953 << " is used after the block.\n";
2954 }
2955 }
2956
2957 if (!MF->empty()) {
2958 BBInfo &MInfo = MBBInfoMap[&MF->front()];
2959 for (Register VReg : MInfo.vregsRequired) {
2960 report("Virtual register defs don't dominate all uses.", MF);
2961 report_context_vreg(VReg);
2962 }
2963 }
2964
2965 if (LiveVars)
2966 verifyLiveVariables();
2967 if (LiveInts)
2968 verifyLiveIntervals();
2969
2970 // Check live-in list of each MBB. If a register is live into MBB, check
2971 // that the register is in regsLiveOut of each predecessor block. Since
2972 // this must come from a definition in the predecesssor or its live-in
2973 // list, this will catch a live-through case where the predecessor does not
2974 // have the register in its live-in list. This currently only checks
2975 // registers that have no aliases, are not allocatable and are not
2976 // reserved, which could mean a condition code register for instance.
2977 if (MRI->tracksLiveness())
2978 for (const auto &MBB : *MF)
2980 MCPhysReg LiveInReg = P.PhysReg;
2981 bool hasAliases = MCRegAliasIterator(LiveInReg, TRI, false).isValid();
2982 if (hasAliases || isAllocatable(LiveInReg) || isReserved(LiveInReg))
2983 continue;
2984 for (const MachineBasicBlock *Pred : MBB.predecessors()) {
2985 BBInfo &PInfo = MBBInfoMap[Pred];
2986 if (!PInfo.regsLiveOut.count(LiveInReg)) {
2987 report("Live in register not found to be live out from predecessor.",
2988 &MBB);
2989 errs() << TRI->getName(LiveInReg)
2990 << " not found to be live out from "
2991 << printMBBReference(*Pred) << "\n";
2992 }
2993 }
2994 }
2995
2996 for (auto CSInfo : MF->getCallSitesInfo())
2997 if (!CSInfo.first->isCall())
2998 report("Call site info referencing instruction that is not call", MF);
2999
3000 // If there's debug-info, check that we don't have any duplicate value
3001 // tracking numbers.
3002 if (MF->getFunction().getSubprogram()) {
3003 DenseSet<unsigned> SeenNumbers;
3004 for (const auto &MBB : *MF) {
3005 for (const auto &MI : MBB) {
3006 if (auto Num = MI.peekDebugInstrNum()) {
3007 auto Result = SeenNumbers.insert((unsigned)Num);
3008 if (!Result.second)
3009 report("Instruction has a duplicated value tracking number", &MI);
3010 }
3011 }
3012 }
3013 }
3014}
3015
3016void MachineVerifier::verifyLiveVariables() {
3017 assert(LiveVars && "Don't call verifyLiveVariables without LiveVars");
3018 for (unsigned I = 0, E = MRI->getNumVirtRegs(); I != E; ++I) {
3020 LiveVariables::VarInfo &VI = LiveVars->getVarInfo(Reg);
3021 for (const auto &MBB : *MF) {
3022 BBInfo &MInfo = MBBInfoMap[&MBB];
3023
3024 // Our vregsRequired should be identical to LiveVariables' AliveBlocks
3025 if (MInfo.vregsRequired.count(Reg)) {
3026 if (!VI.AliveBlocks.test(MBB.getNumber())) {
3027 report("LiveVariables: Block missing from AliveBlocks", &MBB);
3028 errs() << "Virtual register " << printReg(Reg)
3029 << " must be live through the block.\n";
3030 }
3031 } else {
3032 if (VI.AliveBlocks.test(MBB.getNumber())) {
3033 report("LiveVariables: Block should not be in AliveBlocks", &MBB);
3034 errs() << "Virtual register " << printReg(Reg)
3035 << " is not needed live through the block.\n";
3036 }
3037 }
3038 }
3039 }
3040}
3041
3042void MachineVerifier::verifyLiveIntervals() {
3043 assert(LiveInts && "Don't call verifyLiveIntervals without LiveInts");
3044 for (unsigned I = 0, E = MRI->getNumVirtRegs(); I != E; ++I) {
3046
3047 // Spilling and splitting may leave unused registers around. Skip them.
3048 if (MRI->reg_nodbg_empty(Reg))
3049 continue;
3050
3051 if (!LiveInts->hasInterval(Reg)) {
3052 report("Missing live interval for virtual register", MF);
3053 errs() << printReg(Reg, TRI) << " still has defs or uses\n";
3054 continue;
3055 }
3056
3057 const LiveInterval &LI = LiveInts->getInterval(Reg);
3058 assert(Reg == LI.reg() && "Invalid reg to interval mapping");
3059 verifyLiveInterval(LI);
3060 }
3061
3062 // Verify all the cached regunit intervals.
3063 for (unsigned i = 0, e = TRI->getNumRegUnits(); i != e; ++i)
3064 if (const LiveRange *LR = LiveInts->getCachedRegUnit(i))
3065 verifyLiveRange(*LR, i);
3066}
3067
3068void MachineVerifier::verifyLiveRangeValue(const LiveRange &LR,
3069 const VNInfo *VNI, Register Reg,
3070 LaneBitmask LaneMask) {
3071 if (VNI->isUnused())
3072 return;
3073
3074 const VNInfo *DefVNI = LR.getVNInfoAt(VNI->def);
3075
3076 if (!DefVNI) {
3077 report("Value not live at VNInfo def and not marked unused", MF);
3078 report_context(LR, Reg, LaneMask);
3079 report_context(*VNI);
3080 return;
3081 }
3082
3083 if (DefVNI != VNI) {
3084 report("Live segment at def has different VNInfo", MF);
3085 report_context(LR, Reg, LaneMask);
3086 report_context(*VNI);
3087 return;
3088 }
3089
3090 const MachineBasicBlock *MBB = LiveInts->getMBBFromIndex(VNI->def);
3091 if (!MBB) {
3092 report("Invalid VNInfo definition index", MF);
3093 report_context(LR, Reg, LaneMask);
3094 report_context(*VNI);
3095 return;
3096 }
3097
3098 if (VNI->isPHIDef()) {
3099 if (VNI->def != LiveInts->getMBBStartIdx(MBB)) {
3100 report("PHIDef VNInfo is not defined at MBB start", MBB);
3101 report_context(LR, Reg, LaneMask);
3102 report_context(*VNI);
3103 }
3104 return;
3105 }
3106
3107 // Non-PHI def.
3108 const MachineInstr *MI = LiveInts->getInstructionFromIndex(VNI->def);
3109 if (!MI) {
3110 report("No instruction at VNInfo def index", MBB);
3111 report_context(LR, Reg, LaneMask);
3112 report_context(*VNI);
3113 return;
3114 }
3115
3116 if (Reg != 0) {
3117 bool hasDef = false;
3118 bool isEarlyClobber = false;
3119 for (ConstMIBundleOperands MOI(*MI); MOI.isValid(); ++MOI) {
3120 if (!MOI->isReg() || !MOI->isDef())
3121 continue;
3122 if (Reg.isVirtual()) {
3123 if (MOI->getReg() != Reg)
3124 continue;
3125 } else {
3126 if (!MOI->getReg().isPhysical() || !TRI->hasRegUnit(MOI->getReg(), Reg))
3127 continue;
3128 }
3129 if (LaneMask.any() &&
3130 (TRI->getSubRegIndexLaneMask(MOI->getSubReg()) & LaneMask).none())
3131 continue;
3132 hasDef = true;
3133 if (MOI->isEarlyClobber())
3134 isEarlyClobber = true;
3135 }
3136
3137 if (!hasDef) {
3138 report("Defining instruction does not modify register", MI);
3139 report_context(LR, Reg, LaneMask);
3140 report_context(*VNI);
3141 }
3142
3143 // Early clobber defs begin at USE slots, but other defs must begin at
3144 // DEF slots.
3145 if (isEarlyClobber) {
3146 if (!VNI->def.isEarlyClobber()) {
3147 report("Early clobber def must be at an early-clobber slot", MBB);
3148 report_context(LR, Reg, LaneMask);
3149 report_context(*VNI);
3150 }
3151 } else if (!VNI->def.isRegister()) {
3152 report("Non-PHI, non-early clobber def must be at a register slot", MBB);
3153 report_context(LR, Reg, LaneMask);
3154 report_context(*VNI);
3155 }
3156 }
3157}
3158
3159void MachineVerifier::verifyLiveRangeSegment(const LiveRange &LR,
3161 Register Reg,
3162 LaneBitmask LaneMask) {
3163 const LiveRange::Segment &S = *I;
3164 const VNInfo *VNI = S.valno;
3165 assert(VNI && "Live segment has no valno");
3166
3167 if (VNI->id >= LR.getNumValNums() || VNI != LR.getValNumInfo(VNI->id)) {
3168 report("Foreign valno in live segment", MF);
3169 report_context(LR, Reg, LaneMask);
3170 report_context(S);
3171 report_context(*VNI);
3172 }
3173
3174 if (VNI->isUnused()) {
3175 report("Live segment valno is marked unused", MF);
3176 report_context(LR, Reg, LaneMask);
3177 report_context(S);
3178 }
3179
3180 const MachineBasicBlock *MBB = LiveInts->getMBBFromIndex(S.start);
3181 if (!MBB) {
3182 report("Bad start of live segment, no basic block", MF);
3183 report_context(LR, Reg, LaneMask);
3184 report_context(S);
3185 return;
3186 }
3187 SlotIndex MBBStartIdx = LiveInts->getMBBStartIdx(MBB);
3188 if (S.start != MBBStartIdx && S.start != VNI->def) {
3189 report("Live segment must begin at MBB entry or valno def", MBB);
3190 report_context(LR, Reg, LaneMask);
3191 report_context(S);
3192 }
3193
3194 const MachineBasicBlock *EndMBB =
3195 LiveInts->getMBBFromIndex(S.end.getPrevSlot());
3196 if (!EndMBB) {
3197 report("Bad end of live segment, no basic block", MF);
3198 report_context(LR, Reg, LaneMask);
3199 report_context(S);
3200 return;
3201 }
3202
3203 // Checks for non-live-out segments.
3204 if (S.end != LiveInts->getMBBEndIdx(EndMBB)) {
3205 // RegUnit intervals are allowed dead phis.
3206 if (!Reg.isVirtual() && VNI->isPHIDef() && S.start == VNI->def &&
3207 S.end == VNI->def.getDeadSlot())
3208 return;
3209
3210 // The live segment is ending inside EndMBB
3211 const MachineInstr *MI =
3213 if (!MI) {
3214 report("Live segment doesn't end at a valid instruction", EndMBB);
3215 report_context(LR, Reg, LaneMask);
3216 report_context(S);
3217 return;
3218 }
3219
3220 // The block slot must refer to a basic block boundary.
3221 if (S.end.isBlock()) {
3222 report("Live segment ends at B slot of an instruction", EndMBB);
3223 report_context(LR, Reg, LaneMask);
3224 report_context(S);
3225 }
3226
3227 if (S.end.isDead()) {
3228 // Segment ends on the dead slot.
3229 // That means there must be a dead def.
3230 if (!SlotIndex::isSameInstr(S.start, S.end)) {
3231 report("Live segment ending at dead slot spans instructions", EndMBB);
3232 report_context(LR, Reg, LaneMask);
3233 report_context(S);
3234 }
3235 }
3236
3237 // After tied operands are rewritten, a live segment can only end at an
3238 // early-clobber slot if it is being redefined by an early-clobber def.
3239 // TODO: Before tied operands are rewritten, a live segment can only end at
3240 // an early-clobber slot if the last use is tied to an early-clobber def.
3241 if (MF->getProperties().hasProperty(
3243 S.end.isEarlyClobber()) {
3244 if (I + 1 == LR.end() || (I + 1)->start != S.end) {
3245 report("Live segment ending at early clobber slot must be "
3246 "redefined by an EC def in the same instruction",
3247 EndMBB);
3248 report_context(LR, Reg, LaneMask);
3249 report_context(S);
3250 }
3251 }
3252
3253 // The following checks only apply to virtual registers. Physreg liveness
3254 // is too weird to check.
3255 if (Reg.isVirtual()) {
3256 // A live segment can end with either a redefinition, a kill flag on a
3257 // use, or a dead flag on a def.
3258 bool hasRead = false;
3259 bool hasSubRegDef = false;
3260 bool hasDeadDef = false;
3261 for (ConstMIBundleOperands MOI(*MI); MOI.isValid(); ++MOI) {
3262 if (!MOI->isReg() || MOI->getReg() != Reg)
3263 continue;
3264 unsigned Sub = MOI->getSubReg();
3265 LaneBitmask SLM =
3266 Sub != 0 ? TRI->getSubRegIndexLaneMask(Sub) : LaneBitmask::getAll();
3267 if (MOI->isDef()) {
3268 if (Sub != 0) {
3269 hasSubRegDef = true;
3270 // An operand %0:sub0 reads %0:sub1..n. Invert the lane
3271 // mask for subregister defs. Read-undef defs will be handled by
3272 // readsReg below.
3273 SLM = ~SLM;
3274 }
3275 if (MOI->isDead())
3276 hasDeadDef = true;
3277 }
3278 if (LaneMask.any() && (LaneMask & SLM).none())
3279 continue;
3280 if (MOI->readsReg())
3281 hasRead = true;
3282 }
3283 if (S.end.isDead()) {
3284 // Make sure that the corresponding machine operand for a "dead" live
3285 // range has the dead flag. We cannot perform this check for subregister
3286 // liveranges as partially dead values are allowed.
3287 if (LaneMask.none() && !hasDeadDef) {
3288 report(
3289 "Instruction ending live segment on dead slot has no dead flag",
3290 MI);
3291 report_context(LR, Reg, LaneMask);
3292 report_context(S);
3293 }
3294 } else {
3295 if (!hasRead) {
3296 // When tracking subregister liveness, the main range must start new
3297 // values on partial register writes, even if there is no read.
3298 if (!MRI->shouldTrackSubRegLiveness(Reg) || LaneMask.any() ||
3299 !hasSubRegDef) {
3300 report("Instruction ending live segment doesn't read the register",
3301 MI);
3302 report_context(LR, Reg, LaneMask);
3303 report_context(S);
3304 }
3305 }
3306 }
3307 }
3308 }
3309
3310 // Now check all the basic blocks in this live segment.
3312 // Is this live segment the beginning of a non-PHIDef VN?
3313 if (S.start == VNI->def && !VNI->isPHIDef()) {
3314 // Not live-in to any blocks.
3315 if (MBB == EndMBB)
3316 return;
3317 // Skip this block.
3318 ++MFI;
3319 }
3320
3322 if (LaneMask.any()) {
3323 LiveInterval &OwnerLI = LiveInts->getInterval(Reg);
3324 OwnerLI.computeSubRangeUndefs(Undefs, LaneMask, *MRI, *Indexes);
3325 }
3326
3327 while (true) {
3328 assert(LiveInts->isLiveInToMBB(LR, &*MFI));
3329 // We don't know how to track physregs into a landing pad.
3330 if (!Reg.isVirtual() && MFI->isEHPad()) {
3331 if (&*MFI == EndMBB)
3332 break;
3333 ++MFI;
3334 continue;
3335 }
3336
3337 // Is VNI a PHI-def in the current block?
3338 bool IsPHI = VNI->isPHIDef() &&
3339 VNI->def == LiveInts->getMBBStartIdx(&*MFI);
3340
3341 // Check that VNI is live-out of all predecessors.
3342 for (const MachineBasicBlock *Pred : MFI->predecessors()) {
3343 SlotIndex PEnd = LiveInts->getMBBEndIdx(Pred);
3344 // Predecessor of landing pad live-out on last call.
3345 if (MFI->isEHPad()) {
3346 for (const MachineInstr &MI : llvm::reverse(*Pred)) {
3347 if (MI.isCall()) {
3348 PEnd = Indexes->getInstructionIndex(MI).getBoundaryIndex();
3349 break;
3350 }
3351 }
3352 }
3353 const VNInfo *PVNI = LR.getVNInfoBefore(PEnd);
3354
3355 // All predecessors must have a live-out value. However for a phi
3356 // instruction with subregister intervals
3357 // only one of the subregisters (not necessarily the current one) needs to
3358 // be defined.
3359 if (!PVNI && (LaneMask.none() || !IsPHI)) {
3360 if (LiveRangeCalc::isJointlyDominated(Pred, Undefs, *Indexes))
3361 continue;
3362 report("Register not marked live out of predecessor", Pred);
3363 report_context(LR, Reg, LaneMask);
3364 report_context(*VNI);
3365 errs() << " live into " << printMBBReference(*MFI) << '@'
3366 << LiveInts->getMBBStartIdx(&*MFI) << ", not live before "
3367 << PEnd << '\n';
3368 continue;
3369 }
3370
3371 // Only PHI-defs can take different predecessor values.
3372 if (!IsPHI && PVNI != VNI) {
3373 report("Different value live out of predecessor", Pred);
3374 report_context(LR, Reg, LaneMask);
3375 errs() << "Valno #" << PVNI->id << " live out of "
3376 << printMBBReference(*Pred) << '@' << PEnd << "\nValno #"
3377 << VNI->id << " live into " << printMBBReference(*MFI) << '@'
3378 << LiveInts->getMBBStartIdx(&*MFI) << '\n';
3379 }
3380 }
3381 if (&*MFI == EndMBB)
3382 break;
3383 ++MFI;
3384 }
3385}
3386
3387void MachineVerifier::verifyLiveRange(const LiveRange &LR, Register Reg,
3388 LaneBitmask LaneMask) {
3389 for (const VNInfo *VNI : LR.valnos)
3390 verifyLiveRangeValue(LR, VNI, Reg, LaneMask);
3391
3392 for (LiveRange::const_iterator I = LR.begin(), E = LR.end(); I != E; ++I)
3393 verifyLiveRangeSegment(LR, I, Reg, LaneMask);
3394}
3395
3396void MachineVerifier::verifyLiveInterval(const LiveInterval &LI) {
3397 Register Reg = LI.reg();
3398 assert(Reg.isVirtual());
3399 verifyLiveRange(LI, Reg);
3400
3401 if (LI.hasSubRanges()) {
3403 LaneBitmask MaxMask = MRI->getMaxLaneMaskForVReg(Reg);
3404 for (const LiveInterval::SubRange &SR : LI.subranges()) {
3405 if ((Mask & SR.LaneMask).any()) {
3406 report("Lane masks of sub ranges overlap in live interval", MF);
3407 report_context(LI);
3408 }
3409 if ((SR.LaneMask & ~MaxMask).any()) {
3410 report("Subrange lanemask is invalid", MF);
3411 report_context(LI);
3412 }
3413 if (SR.empty()) {
3414 report("Subrange must not be empty", MF);
3415 report_context(SR, LI.reg(), SR.LaneMask);
3416 }
3417 Mask |= SR.LaneMask;
3418 verifyLiveRange(SR, LI.reg(), SR.LaneMask);
3419 if (!LI.covers(SR)) {
3420 report("A Subrange is not covered by the main range", MF);
3421 report_context(LI);
3422 }
3423 }
3424 }
3425
3426 // Check the LI only has one connected component.
3427 ConnectedVNInfoEqClasses ConEQ(*LiveInts);
3428 unsigned NumComp = ConEQ.Classify(LI);
3429 if (NumComp > 1) {
3430 report("Multiple connected components in live interval", MF);
3431 report_context(LI);
3432 for (unsigned comp = 0; comp != NumComp; ++comp) {
3433 errs() << comp << ": valnos";
3434 for (const VNInfo *I : LI.valnos)
3435 if (comp == ConEQ.getEqClass(I))
3436 errs() << ' ' << I->id;
3437 errs() << '\n';
3438 }
3439 }
3440}
3441
3442namespace {
3443
3444 // FrameSetup and FrameDestroy can have zero adjustment, so using a single
3445 // integer, we can't tell whether it is a FrameSetup or FrameDestroy if the
3446 // value is zero.
3447 // We use a bool plus an integer to capture the stack state.
3448 struct StackStateOfBB {
3449 StackStateOfBB() = default;
3450 StackStateOfBB(int EntryVal, int ExitVal, bool EntrySetup, bool ExitSetup) :
3451 EntryValue(EntryVal), ExitValue(ExitVal), EntryIsSetup(EntrySetup),
3452 ExitIsSetup(ExitSetup) {}
3453
3454 // Can be negative, which means we are setting up a frame.
3455 int EntryValue = 0;
3456 int ExitValue = 0;
3457 bool EntryIsSetup = false;
3458 bool ExitIsSetup = false;
3459 };
3460
3461} // end anonymous namespace
3462
3463/// Make sure on every path through the CFG, a FrameSetup <n> is always followed
3464/// by a FrameDestroy <n>, stack adjustments are identical on all
3465/// CFG edges to a merge point, and frame is destroyed at end of a return block.
3466void MachineVerifier::verifyStackFrame() {
3467 unsigned FrameSetupOpcode = TII->getCallFrameSetupOpcode();
3468 unsigned FrameDestroyOpcode = TII->getCallFrameDestroyOpcode();
3469 if (FrameSetupOpcode == ~0u && FrameDestroyOpcode == ~0u)
3470 return;
3471
3473 SPState.resize(MF->getNumBlockIDs());
3475
3476 // Visit the MBBs in DFS order.
3477 for (df_ext_iterator<const MachineFunction *,
3479 DFI = df_ext_begin(MF, Reachable), DFE = df_ext_end(MF, Reachable);
3480 DFI != DFE; ++DFI) {
3481 const MachineBasicBlock *MBB = *DFI;
3482
3483 StackStateOfBB BBState;
3484 // Check the exit state of the DFS stack predecessor.
3485 if (DFI.getPathLength() >= 2) {
3486 const MachineBasicBlock *StackPred = DFI.getPath(DFI.getPathLength() - 2);
3487 assert(Reachable.count(StackPred) &&
3488 "DFS stack predecessor is already visited.\n");
3489 BBState.EntryValue = SPState[StackPred->getNumber()].ExitValue;
3490 BBState.EntryIsSetup = SPState[StackPred->getNumber()].ExitIsSetup;
3491 BBState.ExitValue = BBState.EntryValue;
3492 BBState.ExitIsSetup = BBState.EntryIsSetup;
3493 }
3494
3495 if ((int)MBB->getCallFrameSize() != -BBState.EntryValue) {
3496 report("Call frame size on entry does not match value computed from "
3497 "predecessor",
3498 MBB);
3499 errs() << "Call frame size on entry " << MBB->getCallFrameSize()
3500 << " does not match value computed from predecessor "
3501 << -BBState.EntryValue << '\n';
3502 }
3503
3504 // Update stack state by checking contents of MBB.
3505 for (const auto &I : *MBB) {
3506 if (I.getOpcode() == FrameSetupOpcode) {
3507 if (BBState.ExitIsSetup)
3508 report("FrameSetup is after another FrameSetup", &I);
3509 BBState.ExitValue -= TII->getFrameTotalSize(I);
3510 BBState.ExitIsSetup = true;
3511 }
3512
3513 if (I.getOpcode() == FrameDestroyOpcode) {
3514 int Size = TII->getFrameTotalSize(I);
3515 if (!BBState.ExitIsSetup)
3516 report("FrameDestroy is not after a FrameSetup", &I);
3517 int AbsSPAdj = BBState.ExitValue < 0 ? -BBState.ExitValue :
3518 BBState.ExitValue;
3519 if (BBState.ExitIsSetup && AbsSPAdj != Size) {
3520 report("FrameDestroy <n> is after FrameSetup <m>", &I);
3521 errs() << "FrameDestroy <" << Size << "> is after FrameSetup <"
3522 << AbsSPAdj << ">.\n";
3523 }
3524 BBState.ExitValue += Size;
3525 BBState.ExitIsSetup = false;
3526 }
3527 }
3528 SPState[MBB->getNumber()] = BBState;
3529
3530 // Make sure the exit state of any predecessor is consistent with the entry
3531 // state.
3532 for (const MachineBasicBlock *Pred : MBB->predecessors()) {
3533 if (Reachable.count(Pred) &&
3534 (SPState[Pred->getNumber()].ExitValue != BBState.EntryValue ||
3535 SPState[Pred->getNumber()].ExitIsSetup != BBState.EntryIsSetup)) {
3536 report("The exit stack state of a predecessor is inconsistent.", MBB);
3537 errs() << "Predecessor " << printMBBReference(*Pred)
3538 << " has exit state (" << SPState[Pred->getNumber()].ExitValue
3539 << ", " << SPState[Pred->getNumber()].ExitIsSetup << "), while "
3540 << printMBBReference(*MBB) << " has entry state ("
3541 << BBState.EntryValue << ", " << BBState.EntryIsSetup << ").\n";
3542 }
3543 }
3544
3545 // Make sure the entry state of any successor is consistent with the exit
3546 // state.
3547 for (const MachineBasicBlock *Succ : MBB->successors()) {
3548 if (Reachable.count(Succ) &&
3549 (SPState[Succ->getNumber()].EntryValue != BBState.ExitValue ||
3550 SPState[Succ->getNumber()].EntryIsSetup != BBState.ExitIsSetup)) {
3551 report("The entry stack state of a successor is inconsistent.", MBB);
3552 errs() << "Successor " << printMBBReference(*Succ)
3553 << " has entry state (" << SPState[Succ->getNumber()].EntryValue
3554 << ", " << SPState[Succ->getNumber()].EntryIsSetup << "), while "
3555 << printMBBReference(*MBB) << " has exit state ("
3556 << BBState.ExitValue << ", " << BBState.ExitIsSetup << ").\n";
3557 }
3558 }
3559
3560 // Make sure a basic block with return ends with zero stack adjustment.
3561 if (!MBB->empty() && MBB->back().isReturn()) {
3562 if (BBState.ExitIsSetup)
3563 report("A return block ends with a FrameSetup.", MBB);
3564 if (BBState.ExitValue)
3565 report("A return block ends with a nonzero stack adjustment.", MBB);
3566 }
3567 }
3568}
unsigned SubReg
unsigned const MachineRegisterInfo * MRI
aarch64 promote const
MachineBasicBlock & MBB
MachineBasicBlock MachineBasicBlock::iterator MBBI
static bool isLoad(int Opcode)
static bool isStore(int Opcode)
This file implements the BitVector class.
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
Analysis containing CSE Info
Definition: CSEInfo.cpp:27
This file contains the declarations for the subclasses of Constant, which represent the different fla...
Returns the sub type a function will return at a given Idx Should correspond to the result type of an ExtractValue instruction executed with just that one unsigned Idx
This file defines the DenseMap class.
This file defines the DenseSet and SmallDenseSet classes.
This file builds on the ADT/GraphTraits.h file to build generic depth first graph iterator.
std::string Name
uint64_t Size
bool End
Definition: ELF_riscv.cpp:478
Declares convenience wrapper classes for interpreting MachineInstr instances as specific generic oper...
const HexagonInstrInfo * TII
hexagon widen stores
IRTranslator LLVM IR MI
A common definition of LaneBitmask for use in TableGen and CodeGen.
Implement a low-level type suitable for MachineInstr level instruction selection.
#define F(x, y, z)
Definition: MD5.cpp:55
#define I(x, y, z)
Definition: MD5.cpp:58
unsigned const TargetRegisterInfo * TRI
unsigned Reg
modulo schedule Modulo Schedule test pass
#define P(N)
ppc ctr loops verify
const char LLVMTargetMachineRef TM
#define INITIALIZE_PASS(passName, arg, name, cfg, analysis)
Definition: PassSupport.h:38
This file builds on the ADT/GraphTraits.h file to build a generic graph post order iterator.
const SmallVectorImpl< MachineOperand > MachineBasicBlock * TBB
const SmallVectorImpl< MachineOperand > & Cond
static bool isValid(const char C)
Returns true if C is a valid mangled character: <0-9a-zA-Z_>.
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
static bool isLiveOut(const MachineBasicBlock &MBB, unsigned Reg)
This file contains some templates that are useful if you are working with the STL at all.
This file defines generic set operations that may be used on set's of different types,...
This file defines the SmallPtrSet class.
This file defines the SmallVector class.
static unsigned getSize(unsigned Kind)
static constexpr uint32_t Opcode
Definition: aarch32.h:200
const fltSemantics & getSemantics() const
Definition: APFloat.h:1303
Represent the analysis usage information of a pass.
AnalysisUsage & addUsedIfAvailable()
Add the specified Pass class to the set of analyses used by this pass.
void setPreservesAll()
Set by analyses that do not transform their input at all.
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
Definition: ArrayRef.h:41
size_t size() const
size - Get the array size.
Definition: ArrayRef.h:165
LLVM Basic Block Representation.
Definition: BasicBlock.h:60
bool hasAddressTaken() const
Returns true if there are any uses of this basic block other than direct branches,...
Definition: BasicBlock.h:647
const Instruction * getTerminator() const LLVM_READONLY
Returns the terminator instruction if the block is well formed or null if the block is not well forme...
Definition: BasicBlock.h:228
bool test(unsigned Idx) const
Definition: BitVector.h:461
void clear()
clear - Removes all bits from the bitvector.
Definition: BitVector.h:335
iterator_range< const_set_bits_iterator > set_bits() const
Definition: BitVector.h:140
size_type size() const
size - Returns the number of bits in this bitvector.
Definition: BitVector.h:159
ConnectedVNInfoEqClasses - Helper class that can divide VNInfos in a LiveInterval into equivalence cl...
Definition: LiveInterval.h:999
ConstMIBundleOperands - Iterate over all operands in a const bundle of machine instructions.
ConstantFP - Floating Point Values [float, double].
Definition: Constants.h:261
const APFloat & getValueAPF() const
Definition: Constants.h:297
This is the shared class of boolean and integer constants.
Definition: Constants.h:79
unsigned getBitWidth() const
getBitWidth - Return the bitwidth of this constant.
Definition: Constants.h:140
This class represents an Operation in the Expression.
std::pair< iterator, bool > insert(const std::pair< KeyT, ValueT > &KV)
Definition: DenseMap.h:220
Implements a dense probed hash-table based set.
Definition: DenseSet.h:271
Register getReg() const
Base class for user error types.
Definition: Error.h:352
A specialized PseudoSourceValue for holding FixedStack values, which must include a frame index.
FunctionPass class - This class is used to implement most global optimizations.
Definition: Pass.h:311
bool isPredicated(const MachineInstr &MI) const override
Returns true if the instruction is already predicated.
bool analyzeBranch(MachineBasicBlock &MBB, MachineBasicBlock *&TBB, MachineBasicBlock *&FBB, SmallVectorImpl< MachineOperand > &Cond, bool AllowModify) const override
Analyze the branching code at the end of MBB, returning true if it cannot be understood (e....
constexpr bool isScalableVector() const
Returns true if the LLT is a scalable vector.
Definition: LowLevelType.h:172
constexpr unsigned getScalarSizeInBits() const
Definition: LowLevelType.h:257
constexpr bool isScalar() const
Definition: LowLevelType.h:139
constexpr bool isValid() const
Definition: LowLevelType.h:137
constexpr uint16_t getNumElements() const
Returns the number of elements in a vector LLT.
Definition: LowLevelType.h:149
constexpr bool isVector() const
Definition: LowLevelType.h:145
constexpr TypeSize getSizeInBits() const
Returns the total size of the type. Must only be called on sized types.
Definition: LowLevelType.h:183
constexpr bool isPointer() const
Definition: LowLevelType.h:141
constexpr LLT getElementType() const
Returns the vector's element type. Only valid for vector types.
Definition: LowLevelType.h:280
constexpr ElementCount getElementCount() const
Definition: LowLevelType.h:174
constexpr unsigned getAddressSpace() const
Definition: LowLevelType.h:270
constexpr LLT getScalarType() const
Definition: LowLevelType.h:198
constexpr TypeSize getSizeInBytes() const
Returns the total size of the type in bytes, i.e.
Definition: LowLevelType.h:193
A live range for subregisters.
Definition: LiveInterval.h:694
LiveInterval - This class represents the liveness of a register, or stack slot.
Definition: LiveInterval.h:687
Register reg() const
Definition: LiveInterval.h:718
bool hasSubRanges() const
Returns true if subregister liveness information is available.
Definition: LiveInterval.h:804
iterator_range< subrange_iterator > subranges()
Definition: LiveInterval.h:776
void computeSubRangeUndefs(SmallVectorImpl< SlotIndex > &Undefs, LaneBitmask LaneMask, const MachineRegisterInfo &MRI, const SlotIndexes &Indexes) const
For a given lane mask LaneMask, compute indexes at which the lane is marked undefined by subregister ...
bool hasInterval(Register Reg) const
SlotIndex getMBBStartIdx(const MachineBasicBlock *mbb) const
Return the first index in the given basic block.
MachineInstr * getInstructionFromIndex(SlotIndex index) const
Returns the instruction associated with the given index.
SlotIndex getInstructionIndex(const MachineInstr &Instr) const
Returns the base index of the given instruction.
SlotIndex getMBBEndIdx(const MachineBasicBlock *mbb) const
Return the last index in the given basic block.
LiveRange * getCachedRegUnit(unsigned Unit)
Return the live range for register unit Unit if it has already been computed, or nullptr if it hasn't...
LiveInterval & getInterval(Register Reg)
bool isNotInMIMap(const MachineInstr &Instr) const
Returns true if the specified machine instr has been removed or was never entered in the map.
MachineBasicBlock * getMBBFromIndex(SlotIndex index) const
bool isLiveInToMBB(const LiveRange &LR, const MachineBasicBlock *mbb) const
void print(raw_ostream &O, const Module *=nullptr) const override
Implement the dump method.
Result of a LiveRange query.
Definition: LiveInterval.h:90
bool isDeadDef() const
Return true if this instruction has a dead def.
Definition: LiveInterval.h:117
VNInfo * valueIn() const
Return the value that is live-in to the instruction.
Definition: LiveInterval.h:105
VNInfo * valueOut() const
Return the value leaving the instruction, if any.
Definition: LiveInterval.h:123
bool isKill() const
Return true if the live-in value is killed by this instruction.
Definition: LiveInterval.h:112
static LLVM_ATTRIBUTE_UNUSED bool isJointlyDominated(const MachineBasicBlock *MBB, ArrayRef< SlotIndex > Defs, const SlotIndexes &Indexes)
A diagnostic function to check if the end of the block MBB is jointly dominated by the blocks corresp...
This class represents the liveness of a register, stack slot, etc.
Definition: LiveInterval.h:157
VNInfo * getValNumInfo(unsigned ValNo)
getValNumInfo - Returns pointer to the specified val#.
Definition: LiveInterval.h:317
bool liveAt(SlotIndex index) const
Definition: LiveInterval.h:401
bool covers(const LiveRange &Other) const
Returns true if all segments of the Other live range are completely covered by this live range.
bool empty() const
Definition: LiveInterval.h:382
LiveQueryResult Query(SlotIndex Idx) const
Query Liveness at Idx.
Definition: LiveInterval.h:542
iterator end()
Definition: LiveInterval.h:216
VNInfo * getVNInfoBefore(SlotIndex Idx) const
getVNInfoBefore - Return the VNInfo that is live up to but not necessarilly including Idx,...
Definition: LiveInterval.h:429
unsigned getNumValNums() const
Definition: LiveInterval.h:313
iterator begin()
Definition: LiveInterval.h:215
VNInfoList valnos
Definition: LiveInterval.h:204
VNInfo * getVNInfoAt(SlotIndex Idx) const
getVNInfoAt - Return the VNInfo that is live at Idx, or NULL.
Definition: LiveInterval.h:421
LiveInterval & getInterval(int Slot)
Definition: LiveStacks.h:68
bool hasInterval(int Slot) const
Definition: LiveStacks.h:82
VarInfo & getVarInfo(Register Reg)
getVarInfo - Return the VarInfo structure for the specified VIRTUAL register.
This class is intended to be used as a base class for asm properties and features specific to the tar...
Definition: MCAsmInfo.h:56
ExceptionHandling getExceptionHandlingType() const
Definition: MCAsmInfo.h:787
Describe properties that are true of each instruction in the target description file.
Definition: MCInstrDesc.h:198
unsigned getNumOperands() const
Return the number of declared MachineOperands for this MachineInstruction.
Definition: MCInstrDesc.h:237
ArrayRef< MCOperandInfo > operands() const
Definition: MCInstrDesc.h:239
unsigned getNumDefs() const
Return the number of MachineOperands that are register definitions.
Definition: MCInstrDesc.h:248
bool isConvergent() const
Return true if this instruction is convergent.
Definition: MCInstrDesc.h:415
bool variadicOpsAreDefs() const
Return true if variadic operands of this instruction are definitions.
Definition: MCInstrDesc.h:418
int getOperandConstraint(unsigned OpNum, MCOI::OperandConstraint Constraint) const
Returns the value of the specified operand constraint if it is present.
Definition: MCInstrDesc.h:219
unsigned getOpcode() const
Return the opcode number for this descriptor.
Definition: MCInstrDesc.h:230
This holds information about one operand of a machine instruction, indicating the register class for ...
Definition: MCInstrDesc.h:85
bool isOptionalDef() const
Set if this operand is a optional def.
Definition: MCInstrDesc.h:113
uint8_t OperandType
Information about the type of the operand.
Definition: MCInstrDesc.h:97
MCRegAliasIterator enumerates all registers aliasing Reg.
bool isValid() const
isValid - Returns true until all the operands have been visited.
bool isInlineAsmBrIndirectTarget() const
Returns true if this is the indirect dest of an INLINEASM_BR.
unsigned pred_size() const
bool isEHPad() const
Returns true if the block is a landing pad.
iterator_range< livein_iterator > liveins() const
iterator_range< iterator > phis()
Returns a range that iterates over the phis in the basic block.
int getNumber() const
MachineBasicBlocks are uniquely numbered at the function level, unless they're not in a MachineFuncti...
const BasicBlock * getBasicBlock() const
Return the LLVM basic block that this instance corresponded to originally.
bool isIRBlockAddressTaken() const
Test whether this block is the target of an IR BlockAddress.
unsigned succ_size() const
BasicBlock * getAddressTakenIRBlock() const
Retrieves the BasicBlock which corresponds to this MachineBasicBlock.
bool isPredecessor(const MachineBasicBlock *MBB) const
Return true if the specified MBB is a predecessor of this block.
const MachineFunction * getParent() const
Return the MachineFunction containing this basic block.
unsigned getCallFrameSize() const
Return the call frame size on entry to this basic block.
iterator_range< succ_iterator > successors()
bool isSuccessor(const MachineBasicBlock *MBB) const
Return true if the specified MBB is a successor of this block.
iterator_range< pred_iterator > predecessors()
StringRef getName() const
Return the name of the corresponding LLVM basic block, or an empty string.
The MachineFrameInfo class represents an abstract stack frame until prolog/epilog code is inserted.
BitVector getPristineRegs(const MachineFunction &MF) const
Return a set of physical registers that are pristine.
An AnalysisManager<MachineFunction> that also exposes IR analysis results.
MachineFunctionPass - This class adapts the FunctionPass interface to allow convenient creation of pa...
void getAnalysisUsage(AnalysisUsage &AU) const override
getAnalysisUsage - Subclasses that override getAnalysisUsage must call this.
virtual bool runOnMachineFunction(MachineFunction &MF)=0
runOnMachineFunction - This method must be overloaded to perform the desired machine code transformat...
bool hasProperty(Property P) const
const TargetSubtargetInfo & getSubtarget() const
getSubtarget - Return the subtarget for which this machine code is being compiled.
StringRef getName() const
getName - Return the name of the corresponding LLVM function.
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
const LLVMTargetMachine & getTarget() const
getTarget - Return the target machine this machine code is compiled with
const MachineFunctionProperties & getProperties() const
Get the function properties.
const MachineBasicBlock & front() const
bool verify(Pass *p=nullptr, const char *Banner=nullptr, bool AbortOnError=true) const
Run the current MachineFunction through the machine code verifier, useful for debugger use.
void print(raw_ostream &OS, const SlotIndexes *=nullptr) const
print - Print out the MachineFunction in a format suitable for debugging to the specified stream.
BasicBlockListType::const_iterator const_iterator
Representation of each machine instruction.
Definition: MachineInstr.h:68
unsigned getOpcode() const
Returns the opcode of this MachineInstr.
Definition: MachineInstr.h:543
bool isReturn(QueryType Type=AnyInBundle) const
Definition: MachineInstr.h:905
bool isTerminator(QueryType Type=AnyInBundle) const
Returns true if this instruction part of the terminator for a basic block.
Definition: MachineInstr.h:939
bool isBarrier(QueryType Type=AnyInBundle) const
Returns true if the specified instruction stops control flow from executing the instruction immediate...
Definition: MachineInstr.h:930
A description of a memory reference used in the backend.
const PseudoSourceValue * getPseudoValue() const
uint64_t getSize() const
Return the size in bytes of the memory reference.
AtomicOrdering getSuccessOrdering() const
Return the atomic ordering requirements for this memory operation.
uint64_t getSizeInBits() const
Return the size in bits of the memory reference.
MachineOperand class - Representation of each machine instruction operand.
unsigned getSubReg() const
int64_t getImm() const
bool readsReg() const
readsReg - Returns true if this operand reads the previous value of its register.
bool isImplicit() const
bool isIntrinsicID() const
bool isReg() const
isReg - Tests if this is a MO_Register operand.
MachineBasicBlock * getMBB() const
ArrayRef< int > getShuffleMask() const
bool isImm() const
isImm - Tests if this is a MO_Immediate operand.
bool isValidExcessOperand() const
Return true if this operand can validly be appended to an arbitrary operand list.
bool isShuffleMask() const
unsigned getCFIIndex() const
bool isRenamable() const
isRenamable - Returns true if this register may be renamed, i.e.
MachineInstr * getParent()
getParent - Return the instruction that this operand belongs to.
MachineOperandType getType() const
getType - Returns the MachineOperandType for this operand.
bool isEarlyClobber() const
Register getReg() const
getReg - Returns the register number.
bool isInternalRead() const
bool isFI() const
isFI - Tests if this is a MO_FrameIndex operand.
static bool clobbersPhysReg(const uint32_t *RegMask, MCRegister PhysReg)
clobbersPhysReg - Returns true if this RegMask clobbers PhysReg.
const uint32_t * getRegMask() const
getRegMask - Returns a bit mask of registers preserved by this RegMask operand.
void print(raw_ostream &os, const TargetRegisterInfo *TRI=nullptr, const TargetIntrinsicInfo *IntrinsicInfo=nullptr) const
Print the MachineOperand to os.
@ MO_CFIIndex
MCCFIInstruction index.
@ MO_RegisterMask
Mask of preserved registers.
@ MO_MachineBasicBlock
MachineBasicBlock reference.
@ MO_FrameIndex
Abstract Stack Frame Index.
@ MO_Register
Register operand.
bool isMBB() const
isMBB - Tests if this is a MO_MachineBasicBlock operand.
MachineRegisterInfo - Keep track of information for virtual and physical registers,...
static PassRegistry * getPassRegistry()
getPassRegistry - Access the global registry object, which is automatically initialized at applicatio...
Pass interface - Implemented by all 'passes'.
Definition: Pass.h:94
virtual void print(raw_ostream &OS, const Module *M) const
print - Print out the internal state of the pass.
Definition: Pass.cpp:130
AnalysisType * getAnalysisIfAvailable() const
getAnalysisIfAvailable<AnalysisType>() - Subclasses use this function to get analysis information tha...
Special value supplied for machine level alias analysis.
Holds all the information related to register banks.
const RegisterBank & getRegBank(unsigned ID)
Get the register bank identified by ID.
unsigned getMaximumSize(unsigned RegBankID) const
Get the maximum size in bits that fits in the given register bank.
This class implements the register bank concept.
Definition: RegisterBank.h:28
const char * getName() const
Get a user friendly name of this register bank.
Definition: RegisterBank.h:49
unsigned getID() const
Get the identifier of this register bank.
Definition: RegisterBank.h:45
Wrapper class representing virtual and physical registers.
Definition: Register.h:19
static Register index2VirtReg(unsigned Index)
Convert a 0-based index to a virtual register number.
Definition: Register.h:84
static unsigned virtReg2Index(Register Reg)
Convert a virtual register number to a 0-based index.
Definition: Register.h:77
constexpr bool isVirtual() const
Return true if the specified register number is in the virtual register namespace.
Definition: Register.h:91
static constexpr bool isPhysicalRegister(unsigned Reg)
Return true if the specified register number is in the physical register namespace.
Definition: Register.h:65
constexpr bool isPhysical() const
Return true if the specified register number is in the physical register namespace.
Definition: Register.h:95
SlotIndex - An opaque wrapper around machine indexes.
Definition: SlotIndexes.h:68
static bool isSameInstr(SlotIndex A, SlotIndex B)
isSameInstr - Return true if A and B refer to the same instruction.
Definition: SlotIndexes.h:179
bool isBlock() const
isBlock - Returns true if this is a block boundary slot.
Definition: SlotIndexes.h:212
SlotIndex getDeadSlot() const
Returns the dead def kill slot for the current instruction.
Definition: SlotIndexes.h:245
bool isEarlyClobber() const
isEarlyClobber - Returns true if this is an early-clobber slot.
Definition: SlotIndexes.h:215
bool isRegister() const
isRegister - Returns true if this is a normal register use/def slot.
Definition: SlotIndexes.h:219
SlotIndex getBoundaryIndex() const
Returns the boundary index for associated with this index.
Definition: SlotIndexes.h:234
SlotIndex getPrevSlot() const
Returns the previous slot in the index list.
Definition: SlotIndexes.h:275
SlotIndex getRegSlot(bool EC=false) const
Returns the register use/def slot in the current instruction for a normal or early-clobber def.
Definition: SlotIndexes.h:240
bool isDead() const
isDead - Returns true if this is a dead def kill slot.
Definition: SlotIndexes.h:222
SlotIndexes pass.
Definition: SlotIndexes.h:300
SlotIndex getMBBEndIdx(unsigned Num) const
Returns the last index in the given basic block number.
Definition: SlotIndexes.h:462
MBBIndexIterator MBBIndexBegin() const
Returns an iterator for the begin of the idx2MBBMap.
Definition: SlotIndexes.h:497
MBBIndexIterator MBBIndexEnd() const
Return an iterator for the end of the idx2MBBMap.
Definition: SlotIndexes.h:502
SmallVectorImpl< IdxMBBPair >::const_iterator MBBIndexIterator
Iterator over the idx2MBBMap (sorted pairs of slot index of basic block begin and basic block)
Definition: SlotIndexes.h:473
SlotIndex getInstructionIndex(const MachineInstr &MI, bool IgnoreBundle=false) const
Returns the base index for the given instruction.
Definition: SlotIndexes.h:371
SlotIndex getMBBStartIdx(unsigned Num) const
Returns the first index in the given basic block number.
Definition: SlotIndexes.h:452
bool hasIndex(const MachineInstr &instr) const
Returns true if the given machine instr is mapped to an index, otherwise returns false.
Definition: SlotIndexes.h:366
size_type size() const
Definition: SmallPtrSet.h:93
bool erase(PtrType Ptr)
erase - If the set contains the specified pointer, remove it and return true, otherwise return false.
Definition: SmallPtrSet.h:380
size_type count(ConstPtrType Ptr) const
count - Return 1 if the specified pointer is in the set, 0 otherwise.
Definition: SmallPtrSet.h:384
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
Definition: SmallPtrSet.h:366
iterator begin() const
Definition: SmallPtrSet.h:404
SmallPtrSet - This class implements a set which is optimized for holding SmallSize or less elements.
Definition: SmallPtrSet.h:451
size_t size() const
Definition: SmallVector.h:91
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
Definition: SmallVector.h:577
void resize(size_type N)
Definition: SmallVector.h:642
void push_back(const T &Elt)
Definition: SmallVector.h:416
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
Definition: SmallVector.h:1200
Register getReg() const
MI-level Statepoint operands.
Definition: StackMaps.h:158
StringRef - Represent a constant reference to a string, i.e.
Definition: StringRef.h:50
TargetInstrInfo - Interface to description of machine instruction set.
Primary interface to the complete machine description for the target machine.
Definition: TargetMachine.h:78
bool hasSuperClassEq(const TargetRegisterClass *RC) const
Returns true if RC is a super-class of or equal to this class.
TargetRegisterInfo base class - We assume that the target defines a static array of TargetRegisterDes...
virtual const TargetRegisterInfo * getRegisterInfo() const
getRegisterInfo - If register information is available, return it.
virtual const RegisterBankInfo * getRegBankInfo() const
If the information for the register banks is available, return it.
virtual const TargetInstrInfo * getInstrInfo() const
Twine - A lightweight data structure for efficiently representing the concatenation of temporary valu...
Definition: Twine.h:81
VNInfo - Value Number Information.
Definition: LiveInterval.h:53
bool isUnused() const
Returns true if this value is unused.
Definition: LiveInterval.h:81
unsigned id
The ID number of this value.
Definition: LiveInterval.h:58
SlotIndex def
The index of the defining instruction.
Definition: LiveInterval.h:61
bool isPHIDef() const
Returns true if this value is defined by a PHI instruction (or was, PHI instructions may have been el...
Definition: LiveInterval.h:78
LLVM Value Representation.
Definition: Value.h:74
std::pair< iterator, bool > insert(const ValueT &V)
Definition: DenseSet.h:206
constexpr bool isNonZero() const
Definition: TypeSize.h:159
constexpr bool isScalable() const
Returns whether the quantity is scaled by a runtime quantity (vscale).
Definition: TypeSize.h:172
self_iterator getIterator()
Definition: ilist_node.h:109
NodeTy * getNextNode()
Get the next node, or nullptr for the list tail.
Definition: ilist_node.h:316
constexpr char Attrs[]
Key for Kernel::Metadata::mAttrs.
const CustomOperand< const MCSubtargetInfo & > Msg[]
constexpr std::underlying_type_t< E > Mask()
Get a bitmask with 1s in all places up to the high-order bit of E's largest value.
Definition: BitmaskEnum.h:121
unsigned ID
LLVM IR allows to use arbitrary numbers as calling convention identifiers.
Definition: CallingConv.h:24
AttributeList getAttributes(LLVMContext &C, ID id)
Return the attributes for an intrinsic.
@ OPERAND_REGISTER
Definition: MCInstrDesc.h:61
@ OPERAND_IMMEDIATE
Definition: MCInstrDesc.h:60
Reg
All possible values of the reg field in the ModR/M byte.
constexpr double e
Definition: MathExtras.h:31
NodeAddr< PhiNode * > Phi
Definition: RDFGraph.h:390
NodeAddr< DefNode * > Def
Definition: RDFGraph.h:384
const_iterator begin(StringRef path, Style style=Style::native)
Get begin iterator over path.
Definition: Path.cpp:228
const_iterator end(StringRef path)
Get end iterator over path.
Definition: Path.cpp:237
This is an optimization pass for GlobalISel generic memory operations.
Definition: AddressRanges.h:18
auto drop_begin(T &&RangeOrContainer, size_t N=1)
Return a range covering RangeOrContainer with the first N elements excluded.
Definition: STLExtras.h:329
@ Offset
Definition: DWP.cpp:456
bool all_of(R &&range, UnaryPredicate P)
Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly.
Definition: STLExtras.h:1726
auto size(R &&Range, std::enable_if_t< std::is_base_of< std::random_access_iterator_tag, typename std::iterator_traits< decltype(Range.begin())>::iterator_category >::value, void > *=nullptr)
Get the size of a range.
Definition: STLExtras.h:1684
@ SjLj
setjmp/longjmp based exceptions
bool isPreISelGenericOpcode(unsigned Opcode)
Check whether the given Opcode is a generic opcode that is not supposed to appear after ISel.
Definition: TargetOpcodes.h:30
Printable printRegUnit(unsigned Unit, const TargetRegisterInfo *TRI)
Create Printable object to print register units on a raw_ostream.
void append_range(Container &C, Range &&R)
Wrapper function to append range R to container C.
Definition: STLExtras.h:2042
void set_subtract(S1Ty &S1, const S2Ty &S2)
set_subtract(A, B) - Compute A := A - B
Definition: SetOperations.h:82
Printable PrintLaneMask(LaneBitmask LaneMask)
Create Printable object to print LaneBitmasks on a raw_ostream.
Definition: LaneBitmask.h:92
bool isPreISelGenericOptimizationHint(unsigned Opcode)
Definition: TargetOpcodes.h:42
bool isScopedEHPersonality(EHPersonality Pers)
Returns true if this personality uses scope-style EH IR instructions: catchswitch,...
void initializeMachineVerifierPassPass(PassRegistry &)
auto reverse(ContainerTy &&C)
Definition: STLExtras.h:428
detail::ValueMatchesPoly< M > HasValue(M Matcher)
Definition: Error.h:221
df_ext_iterator< T, SetTy > df_ext_begin(const T &G, SetTy &S)
bool none_of(R &&Range, UnaryPredicate P)
Provide wrappers to std::none_of which take ranges instead of having to pass begin/end explicitly.
Definition: STLExtras.h:1740
void report_fatal_error(Error Err, bool gen_crash_diag=true)
Report a serious error, calling any installed error handler.
Definition: Error.cpp:156
bool set_union(S1Ty &S1, const S2Ty &S2)
set_union(A, B) - Compute A := A u B, return whether A changed.
Definition: SetOperations.h:23
EHPersonality classifyEHPersonality(const Value *Pers)
See if the given exception handling personality function is one that we understand.
raw_fd_ostream & errs()
This returns a reference to a raw_ostream for standard error.
AtomicOrdering
Atomic ordering for LLVM's memory model.
void verifyMachineFunction(MachineFunctionAnalysisManager *, const std::string &Banner, const MachineFunction &MF)
OutputIt move(R &&Range, OutputIt Out)
Provide wrappers to std::move which take ranges instead of having to pass begin/end explicitly.
Definition: STLExtras.h:1853
bool is_contained(R &&Range, const E &Element)
Returns true if Element is found in Range.
Definition: STLExtras.h:1883
df_ext_iterator< T, SetTy > df_ext_end(const T &G, SetTy &S)
Printable printReg(Register Reg, const TargetRegisterInfo *TRI=nullptr, unsigned SubIdx=0, const MachineRegisterInfo *MRI=nullptr)
Prints virtual and physical registers with or without a TRI instance.
Printable printMBBReference(const MachineBasicBlock &MBB)
Prints a machine basic block reference.
FunctionPass * createMachineVerifierPass(const std::string &Banner)
createMachineVerifierPass - This pass verifies cenerated machine code instructions for correctness.
Implement std::hash so that hash_code can be used in STL containers.
Definition: BitVector.h:858
static unsigned getSizeInBits(const fltSemantics &Sem)
Returns the size of the floating point number (in bits) in the given semantics.
Definition: APFloat.cpp:331
static constexpr LaneBitmask getAll()
Definition: LaneBitmask.h:82
constexpr bool none() const
Definition: LaneBitmask.h:52
constexpr bool any() const
Definition: LaneBitmask.h:53
static constexpr LaneBitmask getNone()
Definition: LaneBitmask.h:81
This represents a simple continuous liveness interval for a value.
Definition: LiveInterval.h:162
VarInfo - This represents the regions where a virtual register is live in the program.
Definition: LiveVariables.h:80
Pair of physical register and lane mask.