LLVM 23.0.0git
MachineVerifier.cpp
Go to the documentation of this file.
1//===- MachineVerifier.cpp - Machine Code Verifier ------------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// Pass to verify generated machine code. The following is checked:
10//
11// Operand counts: All explicit operands must be present.
12//
13// Register classes: All physical and virtual register operands must be
14// compatible with the register class required by the instruction descriptor.
15//
16// Register live intervals: Registers must be defined only once, and must be
17// defined before use.
18//
19// The machine code verifier is enabled with the command-line option
20// -verify-machineinstrs.
21//===----------------------------------------------------------------------===//
22
24#include "llvm/ADT/BitVector.h"
25#include "llvm/ADT/DenseMap.h"
26#include "llvm/ADT/DenseSet.h"
29#include "llvm/ADT/STLExtras.h"
33#include "llvm/ADT/StringRef.h"
34#include "llvm/ADT/Twine.h"
64#include "llvm/IR/BasicBlock.h"
65#include "llvm/IR/Constants.h"
67#include "llvm/IR/Function.h"
68#include "llvm/IR/InlineAsm.h"
71#include "llvm/MC/LaneBitmask.h"
72#include "llvm/MC/MCAsmInfo.h"
73#include "llvm/MC/MCDwarf.h"
74#include "llvm/MC/MCInstrDesc.h"
77#include "llvm/Pass.h"
82#include "llvm/Support/ModRef.h"
83#include "llvm/Support/Mutex.h"
86#include <algorithm>
87#include <cassert>
88#include <cstddef>
89#include <cstdint>
90#include <iterator>
91#include <string>
92#include <utility>
93
94using namespace llvm;
95
96namespace {
97
98/// Used the by the ReportedErrors class to guarantee only one error is reported
99/// at one time.
100static ManagedStatic<sys::SmartMutex<true>> ReportedErrorsLock;
101
102struct MachineVerifier {
103 MachineVerifier(MachineFunctionAnalysisManager &MFAM, const char *b,
104 raw_ostream *OS, bool AbortOnError = true)
105 : MFAM(&MFAM), OS(OS ? *OS : nulls()), Banner(b),
106 ReportedErrs(AbortOnError) {}
107
108 MachineVerifier(Pass *pass, const char *b, raw_ostream *OS,
109 bool AbortOnError = true)
110 : PASS(pass), OS(OS ? *OS : nulls()), Banner(b),
111 ReportedErrs(AbortOnError) {}
112
113 MachineVerifier(const char *b, LiveVariables *LiveVars,
114 LiveIntervals *LiveInts, LiveStacks *LiveStks,
115 SlotIndexes *Indexes, raw_ostream *OS,
116 bool AbortOnError = true)
117 : OS(OS ? *OS : nulls()), Banner(b), LiveVars(LiveVars),
118 LiveInts(LiveInts), LiveStks(LiveStks), Indexes(Indexes),
119 ReportedErrs(AbortOnError) {}
120
121 /// \returns true if no problems were found.
122 bool verify(const MachineFunction &MF);
123
124 MachineFunctionAnalysisManager *MFAM = nullptr;
125 Pass *const PASS = nullptr;
126 raw_ostream &OS;
127 const char *Banner;
128 const MachineFunction *MF = nullptr;
129 const TargetMachine *TM = nullptr;
130 const TargetInstrInfo *TII = nullptr;
131 const TargetRegisterInfo *TRI = nullptr;
132 const MachineRegisterInfo *MRI = nullptr;
133 const RegisterBankInfo *RBI = nullptr;
134
135 // Avoid querying the MachineFunctionProperties for each operand.
136 bool isFunctionRegBankSelected = false;
137 bool isFunctionSelected = false;
138 bool isFunctionTracksDebugUserValues = false;
139
140 using RegVector = SmallVector<Register, 16>;
141 using RegMaskVector = SmallVector<const uint32_t *, 4>;
142 using RegSet = DenseSet<Register>;
143 using RegMap = DenseMap<Register, const MachineInstr *>;
144 using BlockSet = SmallPtrSet<const MachineBasicBlock *, 8>;
145
146 const MachineInstr *FirstNonPHI = nullptr;
147 const MachineInstr *FirstTerminator = nullptr;
148 BlockSet FunctionBlocks;
149
150 BitVector regsReserved;
151 RegSet regsLive;
152 RegVector regsDefined, regsDead, regsKilled;
153 RegMaskVector regMasks;
154
155 SlotIndex lastIndex;
156
157 // Add Reg and any sub-registers to RV
158 void addRegWithSubRegs(RegVector &RV, Register Reg) {
159 RV.push_back(Reg);
160 if (Reg.isPhysical())
161 append_range(RV, TRI->subregs(Reg.asMCReg()));
162 }
163
164 struct BBInfo {
165 // Is this MBB reachable from the MF entry point?
166 bool reachable = false;
167
168 // Vregs that must be live in because they are used without being
169 // defined. Map value is the user. vregsLiveIn doesn't include regs
170 // that only are used by PHI nodes.
171 RegMap vregsLiveIn;
172
173 // Regs killed in MBB. They may be defined again, and will then be in both
174 // regsKilled and regsLiveOut.
175 RegSet regsKilled;
176
177 // Regs defined in MBB and live out. Note that vregs passing through may
178 // be live out without being mentioned here.
179 RegSet regsLiveOut;
180
181 // Vregs that pass through MBB untouched. This set is disjoint from
182 // regsKilled and regsLiveOut.
183 RegSet vregsPassed;
184
185 // Vregs that must pass through MBB because they are needed by a successor
186 // block. This set is disjoint from regsLiveOut.
187 RegSet vregsRequired;
188
189 // Set versions of block's predecessor and successor lists.
190 BlockSet Preds, Succs;
191
192 BBInfo() = default;
193
194 // Add register to vregsRequired if it belongs there. Return true if
195 // anything changed.
196 bool addRequired(Register Reg) {
197 if (!Reg.isVirtual())
198 return false;
199 if (regsLiveOut.count(Reg))
200 return false;
201 return vregsRequired.insert(Reg).second;
202 }
203
204 // Same for a full set.
205 bool addRequired(const RegSet &RS) {
206 bool Changed = false;
207 for (Register Reg : RS)
208 Changed |= addRequired(Reg);
209 return Changed;
210 }
211
212 // Same for a full map.
213 bool addRequired(const RegMap &RM) {
214 bool Changed = false;
215 for (const auto &I : RM)
216 Changed |= addRequired(I.first);
217 return Changed;
218 }
219
220 // Live-out registers are either in regsLiveOut or vregsPassed.
221 bool isLiveOut(Register Reg) const {
222 return regsLiveOut.count(Reg) || vregsPassed.count(Reg);
223 }
224 };
225
226 // Extra register info per MBB.
227 DenseMap<const MachineBasicBlock *, BBInfo> MBBInfoMap;
228
229 bool isReserved(Register Reg) {
230 return Reg.id() < regsReserved.size() && regsReserved.test(Reg.id());
231 }
232
233 bool isAllocatable(Register Reg) const {
234 return Reg.id() < TRI->getNumRegs() && TRI->isInAllocatableClass(Reg) &&
235 !regsReserved.test(Reg.id());
236 }
237
238 // Analysis information if available
239 LiveVariables *LiveVars = nullptr;
240 LiveIntervals *LiveInts = nullptr;
241 LiveStacks *LiveStks = nullptr;
242 SlotIndexes *Indexes = nullptr;
243
244 /// A class to track the number of reported error and to guarantee that only
245 /// one error is reported at one time.
246 class ReportedErrors {
247 unsigned NumReported = 0;
248 bool AbortOnError;
249
250 public:
251 /// \param AbortOnError -- If set, abort after printing the first error.
252 ReportedErrors(bool AbortOnError) : AbortOnError(AbortOnError) {}
253
254 ~ReportedErrors() {
255 if (!hasError())
256 return;
257 if (AbortOnError)
258 report_fatal_error("Found " + Twine(NumReported) +
259 " machine code errors.");
260 // Since we haven't aborted, release the lock to allow other threads to
261 // report errors.
262 ReportedErrorsLock->unlock();
263 }
264
265 /// Increment the number of reported errors.
266 /// \returns true if this is the first reported error.
267 bool increment() {
268 // If this is the first error this thread has encountered, grab the lock
269 // to prevent other threads from reporting errors at the same time.
270 // Otherwise we assume we already have the lock.
271 if (!hasError())
272 ReportedErrorsLock->lock();
273 ++NumReported;
274 return NumReported == 1;
275 }
276
277 /// \returns true if an error was reported.
278 bool hasError() { return NumReported; }
279 };
280 ReportedErrors ReportedErrs;
281
282 // This is calculated only when trying to verify convergence control tokens.
283 // Similar to the LLVM IR verifier, we calculate this locally instead of
284 // relying on the pass manager.
285 MachineDominatorTree DT;
286
287 void visitMachineFunctionBefore();
288 void visitMachineBasicBlockBefore(const MachineBasicBlock *MBB);
289 void visitMachineBundleBefore(const MachineInstr *MI);
290
291 /// Verify that all of \p MI's virtual register operands are scalars.
292 /// \returns True if all virtual register operands are scalar. False
293 /// otherwise.
294 bool verifyAllRegOpsScalar(const MachineInstr &MI,
295 const MachineRegisterInfo &MRI);
296 bool verifyVectorElementMatch(LLT Ty0, LLT Ty1, const MachineInstr *MI);
297
298 bool verifyGIntrinsicSideEffects(const MachineInstr *MI);
299 bool verifyGIntrinsicConvergence(const MachineInstr *MI);
300 void verifyPreISelGenericInstruction(const MachineInstr *MI);
301
302 void visitMachineInstrBefore(const MachineInstr *MI);
303 void visitMachineOperand(const MachineOperand *MO, unsigned MONum);
304 void visitMachineBundleAfter(const MachineInstr *MI);
305 void visitMachineBasicBlockAfter(const MachineBasicBlock *MBB);
306 void visitMachineFunctionAfter();
307
308 void report(const char *msg, const MachineFunction *MF);
309 void report(const char *msg, const MachineBasicBlock *MBB);
310 void report(const char *msg, const MachineInstr *MI);
311 void report(const char *msg, const MachineOperand *MO, unsigned MONum,
312 LLT MOVRegType = LLT{});
313 void report(const Twine &Msg, const MachineInstr *MI);
314
315 void report_context(const LiveInterval &LI) const;
316 void report_context(const LiveRange &LR, VirtRegOrUnit VRegOrUnit,
317 LaneBitmask LaneMask) const;
318 void report_context(const LiveRange::Segment &S) const;
319 void report_context(const VNInfo &VNI) const;
320 void report_context(SlotIndex Pos) const;
321 void report_context(MCPhysReg PhysReg) const;
322 void report_context_liverange(const LiveRange &LR) const;
323 void report_context_lanemask(LaneBitmask LaneMask) const;
324 void report_context_vreg(Register VReg) const;
325 void report_context_vreg_regunit(VirtRegOrUnit VRegOrUnit) const;
326
327 void verifyInlineAsm(const MachineInstr *MI);
328
329 void checkLiveness(const MachineOperand *MO, unsigned MONum);
330 void checkLivenessAtUse(const MachineOperand *MO, unsigned MONum,
331 SlotIndex UseIdx, const LiveRange &LR,
332 VirtRegOrUnit VRegOrUnit,
333 LaneBitmask LaneMask = LaneBitmask::getNone());
334 void checkLivenessAtDef(const MachineOperand *MO, unsigned MONum,
335 SlotIndex DefIdx, const LiveRange &LR,
336 VirtRegOrUnit VRegOrUnit, bool SubRangeCheck = false,
337 LaneBitmask LaneMask = LaneBitmask::getNone());
338
339 void markReachable(const MachineBasicBlock *MBB);
340 void calcRegsPassed();
341 void checkPHIOps(const MachineBasicBlock &MBB);
342
343 void calcRegsRequired();
344 void verifyLiveVariables();
345 void verifyLiveIntervals();
346 void verifyLiveInterval(const LiveInterval &);
347 void verifyLiveRangeValue(const LiveRange &, const VNInfo *, VirtRegOrUnit,
348 LaneBitmask);
349 void verifyLiveRangeSegment(const LiveRange &,
350 const LiveRange::const_iterator I, VirtRegOrUnit,
351 LaneBitmask);
352 void verifyLiveRange(const LiveRange &, VirtRegOrUnit,
353 LaneBitmask LaneMask = LaneBitmask::getNone());
354
355 void verifyStackFrame();
356 /// Check that the stack protector is the top-most object in the stack.
357 void verifyStackProtector();
358
359 void verifySlotIndexes() const;
360 void verifyProperties(const MachineFunction &MF);
361};
362
363struct MachineVerifierLegacyPass : public MachineFunctionPass {
364 static char ID; // Pass ID, replacement for typeid
365
366 const std::string Banner;
367
368 MachineVerifierLegacyPass(std::string banner = std::string())
369 : MachineFunctionPass(ID), Banner(std::move(banner)) {}
370
371 void getAnalysisUsage(AnalysisUsage &AU) const override {
372 AU.addUsedIfAvailable<LiveStacksWrapperLegacy>();
373 AU.addUsedIfAvailable<LiveVariablesWrapperPass>();
374 AU.addUsedIfAvailable<SlotIndexesWrapperPass>();
375 AU.addUsedIfAvailable<LiveIntervalsWrapperPass>();
376 AU.setPreservesAll();
378 }
379
380 bool runOnMachineFunction(MachineFunction &MF) override {
381 // Skip functions that have known verification problems.
382 // FIXME: Remove this mechanism when all problematic passes have been
383 // fixed.
384 if (MF.getProperties().hasFailsVerification())
385 return false;
386
387 MachineVerifier(this, Banner.c_str(), &errs()).verify(MF);
388 return false;
389 }
390};
391
392} // end anonymous namespace
393
397 // Skip functions that have known verification problems.
398 // FIXME: Remove this mechanism when all problematic passes have been
399 // fixed.
400 if (MF.getProperties().hasFailsVerification())
401 return PreservedAnalyses::all();
402 MachineVerifier(MFAM, Banner.c_str(), &errs()).verify(MF);
403 return PreservedAnalyses::all();
404}
405
406char MachineVerifierLegacyPass::ID = 0;
407
408INITIALIZE_PASS(MachineVerifierLegacyPass, "machineverifier",
409 "Verify generated machine code", false, false)
410
412 return new MachineVerifierLegacyPass(Banner);
413}
414
415void llvm::verifyMachineFunction(const std::string &Banner,
416 const MachineFunction &MF) {
417 // TODO: Use MFAM after porting below analyses.
418 // LiveVariables *LiveVars;
419 // LiveIntervals *LiveInts;
420 // LiveStacks *LiveStks;
421 // SlotIndexes *Indexes;
422 MachineVerifier(nullptr, Banner.c_str(), &errs()).verify(MF);
423}
424
425bool MachineFunction::verify(Pass *p, const char *Banner, raw_ostream *OS,
426 bool AbortOnError) const {
427 return MachineVerifier(p, Banner, OS, AbortOnError).verify(*this);
428}
429
431 const char *Banner, raw_ostream *OS,
432 bool AbortOnError) const {
433 return MachineVerifier(MFAM, Banner, OS, AbortOnError).verify(*this);
434}
435
437 const char *Banner, raw_ostream *OS,
438 bool AbortOnError) const {
439 return MachineVerifier(Banner, /*LiveVars=*/nullptr, LiveInts,
440 /*LiveStks=*/nullptr, Indexes, OS, AbortOnError)
441 .verify(*this);
442}
443
444void MachineVerifier::verifySlotIndexes() const {
445 if (Indexes == nullptr)
446 return;
447
448 // Ensure the IdxMBB list is sorted by slot indexes.
451 E = Indexes->MBBIndexEnd(); I != E; ++I) {
452 assert(!Last.isValid() || I->first > Last);
453 Last = I->first;
454 }
455}
456
457void MachineVerifier::verifyProperties(const MachineFunction &MF) {
458 // If a pass has introduced virtual registers without clearing the
459 // NoVRegs property (or set it without allocating the vregs)
460 // then report an error.
461 if (MF.getProperties().hasNoVRegs() && MRI->getNumVirtRegs())
462 report("Function has NoVRegs property but there are VReg operands", &MF);
463}
464
465bool MachineVerifier::verify(const MachineFunction &MF) {
466 this->MF = &MF;
467 TM = &MF.getTarget();
470 RBI = MF.getSubtarget().getRegBankInfo();
471 MRI = &MF.getRegInfo();
472
473 const MachineFunctionProperties &Props = MF.getProperties();
474 const bool isFunctionFailedISel = Props.hasFailedISel();
475
476 // If we're mid-GlobalISel and we already triggered the fallback path then
477 // it's expected that the MIR is somewhat broken but that's ok since we'll
478 // reset it and clear the FailedISel attribute in ResetMachineFunctions.
479 if (isFunctionFailedISel)
480 return true;
481
482 isFunctionRegBankSelected = Props.hasRegBankSelected();
483 isFunctionSelected = Props.hasSelected();
484 isFunctionTracksDebugUserValues = Props.hasTracksDebugUserValues();
485
486 if (PASS) {
487 auto *LISWrapper = PASS->getAnalysisIfAvailable<LiveIntervalsWrapperPass>();
488 LiveInts = LISWrapper ? &LISWrapper->getLIS() : nullptr;
489 // We don't want to verify LiveVariables if LiveIntervals is available.
490 auto *LVWrapper = PASS->getAnalysisIfAvailable<LiveVariablesWrapperPass>();
491 if (!LiveInts)
492 LiveVars = LVWrapper ? &LVWrapper->getLV() : nullptr;
493 auto *LSWrapper = PASS->getAnalysisIfAvailable<LiveStacksWrapperLegacy>();
494 LiveStks = LSWrapper ? &LSWrapper->getLS() : nullptr;
495 auto *SIWrapper = PASS->getAnalysisIfAvailable<SlotIndexesWrapperPass>();
496 Indexes = SIWrapper ? &SIWrapper->getSI() : nullptr;
497 }
498 if (MFAM) {
499 MachineFunction &Func = const_cast<MachineFunction &>(MF);
500 LiveInts = MFAM->getCachedResult<LiveIntervalsAnalysis>(Func);
501 if (!LiveInts)
502 LiveVars = MFAM->getCachedResult<LiveVariablesAnalysis>(Func);
503 // TODO: LiveStks = MFAM->getCachedResult<LiveStacksAnalysis>(Func);
504 Indexes = MFAM->getCachedResult<SlotIndexesAnalysis>(Func);
505 }
506
507 verifySlotIndexes();
508
509 verifyProperties(MF);
510
511 visitMachineFunctionBefore();
512 for (const MachineBasicBlock &MBB : MF) {
513 visitMachineBasicBlockBefore(&MBB);
514 // Keep track of the current bundle header.
515 const MachineInstr *CurBundle = nullptr;
516 // Do we expect the next instruction to be part of the same bundle?
517 bool InBundle = false;
518
519 for (const MachineInstr &MI : MBB.instrs()) {
520 if (MI.getParent() != &MBB) {
521 report("Bad instruction parent pointer", &MBB);
522 OS << "Instruction: " << MI;
523 continue;
524 }
525
526 // Check for consistent bundle flags.
527 if (InBundle && !MI.isBundledWithPred())
528 report("Missing BundledPred flag, "
529 "BundledSucc was set on predecessor",
530 &MI);
531 if (!InBundle && MI.isBundledWithPred())
532 report("BundledPred flag is set, "
533 "but BundledSucc not set on predecessor",
534 &MI);
535
536 // Is this a bundle header?
537 if (!MI.isInsideBundle()) {
538 if (CurBundle)
539 visitMachineBundleAfter(CurBundle);
540 CurBundle = &MI;
541 visitMachineBundleBefore(CurBundle);
542 } else if (!CurBundle)
543 report("No bundle header", &MI);
544 visitMachineInstrBefore(&MI);
545 for (unsigned I = 0, E = MI.getNumOperands(); I != E; ++I) {
546 const MachineOperand &Op = MI.getOperand(I);
547 if (Op.getParent() != &MI) {
548 // Make sure to use correct addOperand / removeOperand / ChangeTo
549 // functions when replacing operands of a MachineInstr.
550 report("Instruction has operand with wrong parent set", &MI);
551 }
552
553 visitMachineOperand(&Op, I);
554 }
555
556 // Was this the last bundled instruction?
557 InBundle = MI.isBundledWithSucc();
558 }
559 if (CurBundle)
560 visitMachineBundleAfter(CurBundle);
561 if (InBundle)
562 report("BundledSucc flag set on last instruction in block", &MBB.back());
563 visitMachineBasicBlockAfter(&MBB);
564 }
565 visitMachineFunctionAfter();
566
567 // Clean up.
568 regsLive.clear();
569 regsDefined.clear();
570 regsDead.clear();
571 regsKilled.clear();
572 regMasks.clear();
573 MBBInfoMap.clear();
574
575 return !ReportedErrs.hasError();
576}
577
578void MachineVerifier::report(const char *msg, const MachineFunction *MF) {
579 assert(MF);
580 OS << '\n';
581 if (ReportedErrs.increment()) {
582 if (Banner)
583 OS << "# " << Banner << '\n';
584
585 if (LiveInts != nullptr)
586 LiveInts->print(OS);
587 else
588 MF->print(OS, Indexes);
589 }
590
591 OS << "*** Bad machine code: " << msg << " ***\n"
592 << "- function: " << MF->getName() << '\n';
593}
594
595void MachineVerifier::report(const char *msg, const MachineBasicBlock *MBB) {
596 assert(MBB);
597 report(msg, MBB->getParent());
598 OS << "- basic block: " << printMBBReference(*MBB) << ' ' << MBB->getName()
599 << " (" << (const void *)MBB << ')';
600 if (Indexes)
601 OS << " [" << Indexes->getMBBStartIdx(MBB) << ';'
602 << Indexes->getMBBEndIdx(MBB) << ')';
603 OS << '\n';
604}
605
606void MachineVerifier::report(const char *msg, const MachineInstr *MI) {
607 assert(MI);
608 report(msg, MI->getParent());
609 OS << "- instruction: ";
610 if (Indexes && Indexes->hasIndex(*MI))
611 OS << Indexes->getInstructionIndex(*MI) << '\t';
612 MI->print(OS, /*IsStandalone=*/true);
613}
614
615void MachineVerifier::report(const char *msg, const MachineOperand *MO,
616 unsigned MONum, LLT MOVRegType) {
617 assert(MO);
618 report(msg, MO->getParent());
619 OS << "- operand " << MONum << ": ";
620 MO->print(OS, MOVRegType, TRI);
621 OS << '\n';
622}
623
624void MachineVerifier::report(const Twine &Msg, const MachineInstr *MI) {
625 report(Msg.str().c_str(), MI);
626}
627
628void MachineVerifier::report_context(SlotIndex Pos) const {
629 OS << "- at: " << Pos << '\n';
630}
631
632void MachineVerifier::report_context(const LiveInterval &LI) const {
633 OS << "- interval: " << LI << '\n';
634}
635
636void MachineVerifier::report_context(const LiveRange &LR,
637 VirtRegOrUnit VRegOrUnit,
638 LaneBitmask LaneMask) const {
639 report_context_liverange(LR);
640 report_context_vreg_regunit(VRegOrUnit);
641 if (LaneMask.any())
642 report_context_lanemask(LaneMask);
643}
644
645void MachineVerifier::report_context(const LiveRange::Segment &S) const {
646 OS << "- segment: " << S << '\n';
647}
648
649void MachineVerifier::report_context(const VNInfo &VNI) const {
650 OS << "- ValNo: " << VNI.id << " (def " << VNI.def << ")\n";
651}
652
653void MachineVerifier::report_context_liverange(const LiveRange &LR) const {
654 OS << "- liverange: " << LR << '\n';
655}
656
657void MachineVerifier::report_context(MCPhysReg PReg) const {
658 OS << "- p. register: " << printReg(PReg, TRI) << '\n';
659}
660
661void MachineVerifier::report_context_vreg(Register VReg) const {
662 OS << "- v. register: " << printReg(VReg, TRI) << '\n';
663}
664
665void MachineVerifier::report_context_vreg_regunit(
666 VirtRegOrUnit VRegOrUnit) const {
667 if (VRegOrUnit.isVirtualReg()) {
668 report_context_vreg(VRegOrUnit.asVirtualReg());
669 } else {
670 OS << "- regunit: " << printRegUnit(VRegOrUnit.asMCRegUnit(), TRI)
671 << '\n';
672 }
673}
674
675void MachineVerifier::report_context_lanemask(LaneBitmask LaneMask) const {
676 OS << "- lanemask: " << PrintLaneMask(LaneMask) << '\n';
677}
678
679void MachineVerifier::markReachable(const MachineBasicBlock *MBB) {
680 BBInfo &MInfo = MBBInfoMap[MBB];
681 if (!MInfo.reachable) {
682 MInfo.reachable = true;
683 for (const MachineBasicBlock *Succ : MBB->successors())
684 markReachable(Succ);
685 }
686}
687
688void MachineVerifier::visitMachineFunctionBefore() {
689 lastIndex = SlotIndex();
690 regsReserved = MRI->reservedRegsFrozen() ? MRI->getReservedRegs()
691 : TRI->getReservedRegs(*MF);
692
693 if (!MF->empty())
694 markReachable(&MF->front());
695
696 // Build a set of the basic blocks in the function.
697 FunctionBlocks.clear();
698 for (const auto &MBB : *MF) {
699 FunctionBlocks.insert(&MBB);
700 BBInfo &MInfo = MBBInfoMap[&MBB];
701
702 MInfo.Preds.insert_range(MBB.predecessors());
703 if (MInfo.Preds.size() != MBB.pred_size())
704 report("MBB has duplicate entries in its predecessor list.", &MBB);
705
706 MInfo.Succs.insert_range(MBB.successors());
707 if (MInfo.Succs.size() != MBB.succ_size())
708 report("MBB has duplicate entries in its successor list.", &MBB);
709 }
710
711 // Check that the register use lists are sane.
712 MRI->verifyUseLists();
713
714 if (!MF->empty()) {
715 verifyStackFrame();
716 verifyStackProtector();
717 }
718}
719
720void
721MachineVerifier::visitMachineBasicBlockBefore(const MachineBasicBlock *MBB) {
722 FirstTerminator = nullptr;
723 FirstNonPHI = nullptr;
724
725 if (!MF->getProperties().hasNoPHIs() && MRI->tracksLiveness()) {
726 // If this block has allocatable physical registers live-in, check that
727 // it is an entry block or landing pad.
728 for (const auto &LI : MBB->liveins()) {
729 if (isAllocatable(LI.PhysReg) && !MBB->isEHPad() &&
730 MBB->getIterator() != MBB->getParent()->begin() &&
732 report("MBB has allocatable live-in, but isn't entry, landing-pad, or "
733 "inlineasm-br-indirect-target.",
734 MBB);
735 report_context(LI.PhysReg);
736 }
737 }
738 }
739
740 if (MBB->isIRBlockAddressTaken()) {
742 report("ir-block-address-taken is associated with basic block not used by "
743 "a blockaddress.",
744 MBB);
745 }
746
747 // Count the number of landing pad successors.
749 for (const auto *succ : MBB->successors()) {
750 if (succ->isEHPad())
751 LandingPadSuccs.insert(succ);
752 if (!FunctionBlocks.count(succ))
753 report("MBB has successor that isn't part of the function.", MBB);
754 if (!MBBInfoMap[succ].Preds.count(MBB)) {
755 report("Inconsistent CFG", MBB);
756 OS << "MBB is not in the predecessor list of the successor "
757 << printMBBReference(*succ) << ".\n";
758 }
759 }
760
761 // Check the predecessor list.
762 for (const MachineBasicBlock *Pred : MBB->predecessors()) {
763 if (!FunctionBlocks.count(Pred))
764 report("MBB has predecessor that isn't part of the function.", MBB);
765 if (!MBBInfoMap[Pred].Succs.count(MBB)) {
766 report("Inconsistent CFG", MBB);
767 OS << "MBB is not in the successor list of the predecessor "
768 << printMBBReference(*Pred) << ".\n";
769 }
770 }
771
772 const MCAsmInfo *AsmInfo = TM->getMCAsmInfo();
773 const BasicBlock *BB = MBB->getBasicBlock();
774 const Function &F = MF->getFunction();
775 if (LandingPadSuccs.size() > 1 &&
776 !(AsmInfo &&
778 BB && isa<SwitchInst>(BB->getTerminator())) &&
779 !isScopedEHPersonality(classifyEHPersonality(F.getPersonalityFn())))
780 report("MBB has more than one landing pad successor", MBB);
781
782 // Call analyzeBranch. If it succeeds, there several more conditions to check.
783 MachineBasicBlock *TBB = nullptr, *FBB = nullptr;
785 if (!TII->analyzeBranch(*const_cast<MachineBasicBlock *>(MBB), TBB, FBB,
786 Cond)) {
787 // Ok, analyzeBranch thinks it knows what's going on with this block. Let's
788 // check whether its answers match up with reality.
789 if (!TBB && !FBB) {
790 // Block falls through to its successor.
791 if (!MBB->empty() && MBB->back().isBarrier() &&
792 !TII->isPredicated(MBB->back())) {
793 report("MBB exits via unconditional fall-through but ends with a "
794 "barrier instruction!", MBB);
795 }
796 if (!Cond.empty()) {
797 report("MBB exits via unconditional fall-through but has a condition!",
798 MBB);
799 }
800 } else if (TBB && !FBB && Cond.empty()) {
801 // Block unconditionally branches somewhere.
802 if (MBB->empty()) {
803 report("MBB exits via unconditional branch but doesn't contain "
804 "any instructions!", MBB);
805 } else if (!MBB->back().isBarrier()) {
806 report("MBB exits via unconditional branch but doesn't end with a "
807 "barrier instruction!", MBB);
808 } else if (!MBB->back().isTerminator()) {
809 report("MBB exits via unconditional branch but the branch isn't a "
810 "terminator instruction!", MBB);
811 }
812 } else if (TBB && !FBB && !Cond.empty()) {
813 // Block conditionally branches somewhere, otherwise falls through.
814 if (MBB->empty()) {
815 report("MBB exits via conditional branch/fall-through but doesn't "
816 "contain any instructions!", MBB);
817 } else if (MBB->back().isBarrier()) {
818 report("MBB exits via conditional branch/fall-through but ends with a "
819 "barrier instruction!", MBB);
820 } else if (!MBB->back().isTerminator()) {
821 report("MBB exits via conditional branch/fall-through but the branch "
822 "isn't a terminator instruction!", MBB);
823 }
824 } else if (TBB && FBB) {
825 // Block conditionally branches somewhere, otherwise branches
826 // somewhere else.
827 if (MBB->empty()) {
828 report("MBB exits via conditional branch/branch but doesn't "
829 "contain any instructions!", MBB);
830 } else if (!MBB->back().isBarrier()) {
831 report("MBB exits via conditional branch/branch but doesn't end with a "
832 "barrier instruction!", MBB);
833 } else if (!MBB->back().isTerminator()) {
834 report("MBB exits via conditional branch/branch but the branch "
835 "isn't a terminator instruction!", MBB);
836 }
837 if (Cond.empty()) {
838 report("MBB exits via conditional branch/branch but there's no "
839 "condition!", MBB);
840 }
841 } else {
842 report("analyzeBranch returned invalid data!", MBB);
843 }
844
845 // Now check that the successors match up with the answers reported by
846 // analyzeBranch.
847 if (TBB && !MBB->isSuccessor(TBB))
848 report("MBB exits via jump or conditional branch, but its target isn't a "
849 "CFG successor!",
850 MBB);
851 if (FBB && !MBB->isSuccessor(FBB))
852 report("MBB exits via conditional branch, but its target isn't a CFG "
853 "successor!",
854 MBB);
855
856 // There might be a fallthrough to the next block if there's either no
857 // unconditional true branch, or if there's a condition, and one of the
858 // branches is missing.
859 bool Fallthrough = !TBB || (!Cond.empty() && !FBB);
860
861 // A conditional fallthrough must be an actual CFG successor, not
862 // unreachable. (Conversely, an unconditional fallthrough might not really
863 // be a successor, because the block might end in unreachable.)
864 if (!Cond.empty() && !FBB) {
866 if (MBBI == MF->end()) {
867 report("MBB conditionally falls through out of function!", MBB);
868 } else if (!MBB->isSuccessor(&*MBBI))
869 report("MBB exits via conditional branch/fall-through but the CFG "
870 "successors don't match the actual successors!",
871 MBB);
872 }
873
874 // Verify that there aren't any extra un-accounted-for successors.
875 for (const MachineBasicBlock *SuccMBB : MBB->successors()) {
876 // If this successor is one of the branch targets, it's okay.
877 if (SuccMBB == TBB || SuccMBB == FBB)
878 continue;
879 // If we might have a fallthrough, and the successor is the fallthrough
880 // block, that's also ok.
881 if (Fallthrough && SuccMBB == MBB->getNextNode())
882 continue;
883 // Also accept successors which are for exception-handling or might be
884 // inlineasm_br targets.
885 if (SuccMBB->isEHPad() || SuccMBB->isInlineAsmBrIndirectTarget())
886 continue;
887 report("MBB has unexpected successors which are not branch targets, "
888 "fallthrough, EHPads, or inlineasm_br targets.",
889 MBB);
890 }
891 }
892
893 regsLive.clear();
894 if (MRI->tracksLiveness()) {
895 for (const auto &LI : MBB->liveins()) {
896 if (!LI.PhysReg.isPhysical()) {
897 report("MBB live-in list contains non-physical register", MBB);
898 continue;
899 }
900 regsLive.insert_range(TRI->subregs_inclusive(LI.PhysReg));
901 }
902 }
903
904 const MachineFrameInfo &MFI = MF->getFrameInfo();
905 BitVector PR = MFI.getPristineRegs(*MF);
906 for (unsigned I : PR.set_bits())
907 regsLive.insert_range(TRI->subregs_inclusive(I));
908
909 regsKilled.clear();
910 regsDefined.clear();
911
912 if (Indexes)
913 lastIndex = Indexes->getMBBStartIdx(MBB);
914}
915
916// This function gets called for all bundle headers, including normal
917// stand-alone unbundled instructions.
918void MachineVerifier::visitMachineBundleBefore(const MachineInstr *MI) {
919 if (Indexes && Indexes->hasIndex(*MI)) {
920 SlotIndex idx = Indexes->getInstructionIndex(*MI);
921 if (!(idx > lastIndex)) {
922 report("Instruction index out of order", MI);
923 OS << "Last instruction was at " << lastIndex << '\n';
924 }
925 lastIndex = idx;
926 }
927
928 // Ensure non-terminators don't follow terminators.
929 if (MI->isTerminator()) {
930 if (!FirstTerminator)
931 FirstTerminator = MI;
932 } else if (FirstTerminator) {
933 // For GlobalISel, G_INVOKE_REGION_START is a terminator that we allow to
934 // precede non-terminators.
935 if (FirstTerminator->getOpcode() != TargetOpcode::G_INVOKE_REGION_START) {
936 report("Non-terminator instruction after the first terminator", MI);
937 OS << "First terminator was:\t" << *FirstTerminator;
938 }
939 }
940}
941
942// The operands on an INLINEASM instruction must follow a template.
943// Verify that the flag operands make sense.
944void MachineVerifier::verifyInlineAsm(const MachineInstr *MI) {
945 // The first two operands on INLINEASM are the asm string and global flags.
946 if (MI->getNumOperands() < 2) {
947 report("Too few operands on inline asm", MI);
948 return;
949 }
950 if (!MI->getOperand(0).isSymbol())
951 report("Asm string must be an external symbol", MI);
952 if (!MI->getOperand(1).isImm())
953 report("Asm flags must be an immediate", MI);
954 // Allowed flags are Extra_HasSideEffects = 1, Extra_IsAlignStack = 2,
955 // Extra_AsmDialect = 4, Extra_MayLoad = 8, and Extra_MayStore = 16,
956 // and Extra_IsConvergent = 32, Extra_MayUnwind = 64.
957 if (!isUInt<7>(MI->getOperand(1).getImm()))
958 report("Unknown asm flags", &MI->getOperand(1), 1);
959
960 static_assert(InlineAsm::MIOp_FirstOperand == 2, "Asm format changed");
961
962 unsigned OpNo = InlineAsm::MIOp_FirstOperand;
963 unsigned NumOps;
964 for (unsigned e = MI->getNumOperands(); OpNo < e; OpNo += NumOps) {
965 const MachineOperand &MO = MI->getOperand(OpNo);
966 // There may be implicit ops after the fixed operands.
967 if (!MO.isImm())
968 break;
969 const InlineAsm::Flag F(MO.getImm());
970 NumOps = 1 + F.getNumOperandRegisters();
971 }
972
973 if (OpNo > MI->getNumOperands())
974 report("Missing operands in last group", MI);
975
976 // An optional MDNode follows the groups.
977 if (OpNo < MI->getNumOperands() && MI->getOperand(OpNo).isMetadata())
978 ++OpNo;
979
980 // All trailing operands must be implicit registers.
981 for (unsigned e = MI->getNumOperands(); OpNo < e; ++OpNo) {
982 const MachineOperand &MO = MI->getOperand(OpNo);
983 if (!MO.isReg() || !MO.isImplicit())
984 report("Expected implicit register after groups", &MO, OpNo);
985 }
986
987 if (MI->getOpcode() == TargetOpcode::INLINEASM_BR) {
988 const MachineBasicBlock *MBB = MI->getParent();
989
990 for (unsigned i = InlineAsm::MIOp_FirstOperand, e = MI->getNumOperands();
991 i != e; ++i) {
992 const MachineOperand &MO = MI->getOperand(i);
993
994 if (!MO.isMBB())
995 continue;
996
997 // Check the successor & predecessor lists look ok, assume they are
998 // not. Find the indirect target without going through the successors.
999 const MachineBasicBlock *IndirectTargetMBB = MO.getMBB();
1000 if (!IndirectTargetMBB) {
1001 report("INLINEASM_BR indirect target does not exist", &MO, i);
1002 break;
1003 }
1004
1005 if (!MBB->isSuccessor(IndirectTargetMBB))
1006 report("INLINEASM_BR indirect target missing from successor list", &MO,
1007 i);
1008
1009 if (!IndirectTargetMBB->isPredecessor(MBB))
1010 report("INLINEASM_BR indirect target predecessor list missing parent",
1011 &MO, i);
1012 }
1013 }
1014}
1015
1016bool MachineVerifier::verifyAllRegOpsScalar(const MachineInstr &MI,
1017 const MachineRegisterInfo &MRI) {
1018 if (none_of(MI.explicit_operands(), [&MRI](const MachineOperand &Op) {
1019 if (!Op.isReg())
1020 return false;
1021 const auto Reg = Op.getReg();
1022 if (Reg.isPhysical())
1023 return false;
1024 return !MRI.getType(Reg).isScalar();
1025 }))
1026 return true;
1027 report("All register operands must have scalar types", &MI);
1028 return false;
1029}
1030
1031/// Check that types are consistent when two operands need to have the same
1032/// number of vector elements.
1033/// \return true if the types are valid.
1034bool MachineVerifier::verifyVectorElementMatch(LLT Ty0, LLT Ty1,
1035 const MachineInstr *MI) {
1036 if (Ty0.isVector() != Ty1.isVector()) {
1037 report("operand types must be all-vector or all-scalar", MI);
1038 // Generally we try to report as many issues as possible at once, but in
1039 // this case it's not clear what should we be comparing the size of the
1040 // scalar with: the size of the whole vector or its lane. Instead of
1041 // making an arbitrary choice and emitting not so helpful message, let's
1042 // avoid the extra noise and stop here.
1043 return false;
1044 }
1045
1046 if (Ty0.isVector() && Ty0.getElementCount() != Ty1.getElementCount()) {
1047 report("operand types must preserve number of vector elements", MI);
1048 return false;
1049 }
1050
1051 return true;
1052}
1053
1054bool MachineVerifier::verifyGIntrinsicSideEffects(const MachineInstr *MI) {
1055 auto Opcode = MI->getOpcode();
1056 bool NoSideEffects = Opcode == TargetOpcode::G_INTRINSIC ||
1057 Opcode == TargetOpcode::G_INTRINSIC_CONVERGENT;
1058 unsigned IntrID = cast<GIntrinsic>(MI)->getIntrinsicID();
1059 if (IntrID != 0 && IntrID < Intrinsic::num_intrinsics) {
1061 MF->getFunction().getContext(), static_cast<Intrinsic::ID>(IntrID));
1062 bool DeclHasSideEffects = !Attrs.getMemoryEffects().doesNotAccessMemory();
1063 if (NoSideEffects && DeclHasSideEffects) {
1064 report(Twine(TII->getName(Opcode),
1065 " used with intrinsic that accesses memory"),
1066 MI);
1067 return false;
1068 }
1069 if (!NoSideEffects && !DeclHasSideEffects) {
1070 report(Twine(TII->getName(Opcode), " used with readnone intrinsic"), MI);
1071 return false;
1072 }
1073 }
1074
1075 return true;
1076}
1077
1078bool MachineVerifier::verifyGIntrinsicConvergence(const MachineInstr *MI) {
1079 auto Opcode = MI->getOpcode();
1080 bool NotConvergent = Opcode == TargetOpcode::G_INTRINSIC ||
1081 Opcode == TargetOpcode::G_INTRINSIC_W_SIDE_EFFECTS;
1082 unsigned IntrID = cast<GIntrinsic>(MI)->getIntrinsicID();
1083 if (IntrID != 0 && IntrID < Intrinsic::num_intrinsics) {
1085 MF->getFunction().getContext(), static_cast<Intrinsic::ID>(IntrID));
1086 bool DeclIsConvergent = Attrs.hasAttribute(Attribute::Convergent);
1087 if (NotConvergent && DeclIsConvergent) {
1088 report(Twine(TII->getName(Opcode), " used with a convergent intrinsic"),
1089 MI);
1090 return false;
1091 }
1092 if (!NotConvergent && !DeclIsConvergent) {
1093 report(
1094 Twine(TII->getName(Opcode), " used with a non-convergent intrinsic"),
1095 MI);
1096 return false;
1097 }
1098 }
1099
1100 return true;
1101}
1102
1103void MachineVerifier::verifyPreISelGenericInstruction(const MachineInstr *MI) {
1104 if (isFunctionSelected)
1105 report("Unexpected generic instruction in a Selected function", MI);
1106
1107 const MCInstrDesc &MCID = MI->getDesc();
1108 unsigned NumOps = MI->getNumOperands();
1109
1110 // Branches must reference a basic block if they are not indirect
1111 if (MI->isBranch() && !MI->isIndirectBranch()) {
1112 bool HasMBB = false;
1113 for (const MachineOperand &Op : MI->operands()) {
1114 if (Op.isMBB()) {
1115 HasMBB = true;
1116 break;
1117 }
1118 }
1119
1120 if (!HasMBB) {
1121 report("Branch instruction is missing a basic block operand or "
1122 "isIndirectBranch property",
1123 MI);
1124 }
1125 }
1126
1127 // Check types.
1129 for (unsigned I = 0, E = std::min(MCID.getNumOperands(), NumOps);
1130 I != E; ++I) {
1131 if (!MCID.operands()[I].isGenericType())
1132 continue;
1133 // Generic instructions specify type equality constraints between some of
1134 // their operands. Make sure these are consistent.
1135 size_t TypeIdx = MCID.operands()[I].getGenericTypeIndex();
1136 Types.resize(std::max(TypeIdx + 1, Types.size()));
1137
1138 const MachineOperand *MO = &MI->getOperand(I);
1139 if (!MO->isReg()) {
1140 report("generic instruction must use register operands", MI);
1141 continue;
1142 }
1143
1144 LLT OpTy = MRI->getType(MO->getReg());
1145 // Don't report a type mismatch if there is no actual mismatch, only a
1146 // type missing, to reduce noise:
1147 if (OpTy.isValid()) {
1148 // Only the first valid type for a type index will be printed: don't
1149 // overwrite it later so it's always clear which type was expected:
1150 if (!Types[TypeIdx].isValid())
1151 Types[TypeIdx] = OpTy;
1152 else if (Types[TypeIdx] != OpTy)
1153 report("Type mismatch in generic instruction", MO, I, OpTy);
1154 } else {
1155 // Generic instructions must have types attached to their operands.
1156 report("Generic instruction is missing a virtual register type", MO, I);
1157 }
1158 }
1159
1160 // Generic opcodes must not have physical register operands.
1161 for (unsigned I = 0; I < MI->getNumOperands(); ++I) {
1162 const MachineOperand *MO = &MI->getOperand(I);
1163 if (MO->isReg() && MO->getReg().isPhysical())
1164 report("Generic instruction cannot have physical register", MO, I);
1165 }
1166
1167 // Avoid out of bounds in checks below. This was already reported earlier.
1168 if (MI->getNumOperands() < MCID.getNumOperands())
1169 return;
1170
1172 if (!TII->verifyInstruction(*MI, ErrorInfo))
1173 report(ErrorInfo.data(), MI);
1174
1175 // Verify properties of various specific instruction types
1176 unsigned Opc = MI->getOpcode();
1177 switch (Opc) {
1178 case TargetOpcode::G_ASSERT_SEXT:
1179 case TargetOpcode::G_ASSERT_ZEXT: {
1180 std::string OpcName =
1181 Opc == TargetOpcode::G_ASSERT_ZEXT ? "G_ASSERT_ZEXT" : "G_ASSERT_SEXT";
1182 if (!MI->getOperand(2).isImm()) {
1183 report(Twine(OpcName, " expects an immediate operand #2"), MI);
1184 break;
1185 }
1186
1187 Register Dst = MI->getOperand(0).getReg();
1188 Register Src = MI->getOperand(1).getReg();
1189 LLT SrcTy = MRI->getType(Src);
1190 int64_t Imm = MI->getOperand(2).getImm();
1191 if (Imm <= 0) {
1192 report(Twine(OpcName, " size must be >= 1"), MI);
1193 break;
1194 }
1195
1196 if (Imm >= SrcTy.getScalarSizeInBits()) {
1197 report(Twine(OpcName, " size must be less than source bit width"), MI);
1198 break;
1199 }
1200
1201 const RegisterBank *SrcRB = RBI->getRegBank(Src, *MRI, *TRI);
1202 const RegisterBank *DstRB = RBI->getRegBank(Dst, *MRI, *TRI);
1203
1204 // Allow only the source bank to be set.
1205 if ((SrcRB && DstRB && SrcRB != DstRB) || (DstRB && !SrcRB)) {
1206 report(Twine(OpcName, " cannot change register bank"), MI);
1207 break;
1208 }
1209
1210 // Don't allow a class change. Do allow member class->regbank.
1211 const TargetRegisterClass *DstRC = MRI->getRegClassOrNull(Dst);
1212 if (DstRC && DstRC != MRI->getRegClassOrNull(Src)) {
1213 report(
1214 Twine(OpcName, " source and destination register classes must match"),
1215 MI);
1216 break;
1217 }
1218
1219 break;
1220 }
1221
1222 case TargetOpcode::G_CONSTANT:
1223 case TargetOpcode::G_FCONSTANT: {
1224 LLT DstTy = MRI->getType(MI->getOperand(0).getReg());
1225 if (DstTy.isVector())
1226 report("Instruction cannot use a vector result type", MI);
1227
1228 if (MI->getOpcode() == TargetOpcode::G_CONSTANT) {
1229 if (!MI->getOperand(1).isCImm()) {
1230 report("G_CONSTANT operand must be cimm", MI);
1231 break;
1232 }
1233
1234 const ConstantInt *CI = MI->getOperand(1).getCImm();
1235 if (CI->getBitWidth() != DstTy.getSizeInBits())
1236 report("inconsistent constant size", MI);
1237 } else {
1238 if (!MI->getOperand(1).isFPImm()) {
1239 report("G_FCONSTANT operand must be fpimm", MI);
1240 break;
1241 }
1242 const ConstantFP *CF = MI->getOperand(1).getFPImm();
1243
1245 DstTy.getSizeInBits()) {
1246 report("inconsistent constant size", MI);
1247 }
1248 }
1249
1250 break;
1251 }
1252 case TargetOpcode::G_LOAD:
1253 case TargetOpcode::G_STORE:
1254 case TargetOpcode::G_ZEXTLOAD:
1255 case TargetOpcode::G_SEXTLOAD: {
1256 LLT ValTy = MRI->getType(MI->getOperand(0).getReg());
1257 LLT PtrTy = MRI->getType(MI->getOperand(1).getReg());
1258 if (!PtrTy.isPointer())
1259 report("Generic memory instruction must access a pointer", MI);
1260
1261 // Generic loads and stores must have a single MachineMemOperand
1262 // describing that access.
1263 if (!MI->hasOneMemOperand()) {
1264 report("Generic instruction accessing memory must have one mem operand",
1265 MI);
1266 } else {
1267 const MachineMemOperand &MMO = **MI->memoperands_begin();
1268 if (MI->getOpcode() == TargetOpcode::G_ZEXTLOAD ||
1269 MI->getOpcode() == TargetOpcode::G_SEXTLOAD) {
1271 ValTy.getSizeInBits()))
1272 report("Generic extload must have a narrower memory type", MI);
1273 } else if (MI->getOpcode() == TargetOpcode::G_LOAD) {
1275 ValTy.getSizeInBytes()))
1276 report("load memory size cannot exceed result size", MI);
1277
1278 if (MMO.getRanges()) {
1279 ConstantInt *i =
1281 const LLT RangeTy = LLT::scalar(i->getIntegerType()->getBitWidth());
1282 const LLT MemTy = MMO.getMemoryType();
1283 if (MemTy.getScalarType() != RangeTy ||
1284 ValTy.isScalar() != MemTy.isScalar() ||
1285 (ValTy.isVector() &&
1286 ValTy.getNumElements() != MemTy.getNumElements())) {
1287 report("range is incompatible with the result type", MI);
1288 }
1289 }
1290 } else if (MI->getOpcode() == TargetOpcode::G_STORE) {
1292 MMO.getSize().getValue()))
1293 report("store memory size cannot exceed value size", MI);
1294 }
1295
1296 const AtomicOrdering Order = MMO.getSuccessOrdering();
1297 if (Opc == TargetOpcode::G_STORE) {
1298 if (Order == AtomicOrdering::Acquire ||
1300 report("atomic store cannot use acquire ordering", MI);
1301
1302 } else {
1303 if (Order == AtomicOrdering::Release ||
1305 report("atomic load cannot use release ordering", MI);
1306 }
1307 }
1308
1309 break;
1310 }
1311 case TargetOpcode::G_PHI: {
1312 LLT DstTy = MRI->getType(MI->getOperand(0).getReg());
1313 if (!DstTy.isValid() || !all_of(drop_begin(MI->operands()),
1314 [this, &DstTy](const MachineOperand &MO) {
1315 if (!MO.isReg())
1316 return true;
1317 LLT Ty = MRI->getType(MO.getReg());
1318 if (!Ty.isValid() || (Ty != DstTy))
1319 return false;
1320 return true;
1321 }))
1322 report("Generic Instruction G_PHI has operands with incompatible/missing "
1323 "types",
1324 MI);
1325 break;
1326 }
1327 case TargetOpcode::G_BITCAST: {
1328 LLT DstTy = MRI->getType(MI->getOperand(0).getReg());
1329 LLT SrcTy = MRI->getType(MI->getOperand(1).getReg());
1330 if (!DstTy.isValid() || !SrcTy.isValid())
1331 break;
1332
1333 if (SrcTy.isPointer() != DstTy.isPointer())
1334 report("bitcast cannot convert between pointers and other types", MI);
1335
1336 if (SrcTy.getSizeInBits() != DstTy.getSizeInBits())
1337 report("bitcast sizes must match", MI);
1338
1339 if (SrcTy == DstTy)
1340 report("bitcast must change the type", MI);
1341
1342 break;
1343 }
1344 case TargetOpcode::G_INTTOPTR:
1345 case TargetOpcode::G_PTRTOINT:
1346 case TargetOpcode::G_ADDRSPACE_CAST: {
1347 LLT DstTy = MRI->getType(MI->getOperand(0).getReg());
1348 LLT SrcTy = MRI->getType(MI->getOperand(1).getReg());
1349 if (!DstTy.isValid() || !SrcTy.isValid())
1350 break;
1351
1352 verifyVectorElementMatch(DstTy, SrcTy, MI);
1353
1354 DstTy = DstTy.getScalarType();
1355 SrcTy = SrcTy.getScalarType();
1356
1357 if (MI->getOpcode() == TargetOpcode::G_INTTOPTR) {
1358 if (!DstTy.isPointer())
1359 report("inttoptr result type must be a pointer", MI);
1360 if (SrcTy.isPointer())
1361 report("inttoptr source type must not be a pointer", MI);
1362 } else if (MI->getOpcode() == TargetOpcode::G_PTRTOINT) {
1363 if (!SrcTy.isPointer())
1364 report("ptrtoint source type must be a pointer", MI);
1365 if (DstTy.isPointer())
1366 report("ptrtoint result type must not be a pointer", MI);
1367 } else {
1368 assert(MI->getOpcode() == TargetOpcode::G_ADDRSPACE_CAST);
1369 if (!SrcTy.isPointer() || !DstTy.isPointer())
1370 report("addrspacecast types must be pointers", MI);
1371 else {
1372 if (SrcTy.getAddressSpace() == DstTy.getAddressSpace())
1373 report("addrspacecast must convert different address spaces", MI);
1374 }
1375 }
1376
1377 break;
1378 }
1379 case TargetOpcode::G_PTR_ADD: {
1380 LLT DstTy = MRI->getType(MI->getOperand(0).getReg());
1381 LLT PtrTy = MRI->getType(MI->getOperand(1).getReg());
1382 LLT OffsetTy = MRI->getType(MI->getOperand(2).getReg());
1383 if (!DstTy.isValid() || !PtrTy.isValid() || !OffsetTy.isValid())
1384 break;
1385
1386 if (!PtrTy.isPointerOrPointerVector())
1387 report("gep first operand must be a pointer", MI);
1388
1389 if (OffsetTy.isPointerOrPointerVector())
1390 report("gep offset operand must not be a pointer", MI);
1391
1392 if (PtrTy.isPointerOrPointerVector()) {
1393 const DataLayout &DL = MF->getDataLayout();
1394 unsigned AS = PtrTy.getAddressSpace();
1395 unsigned IndexSizeInBits = DL.getIndexSize(AS) * 8;
1396 if (OffsetTy.getScalarSizeInBits() != IndexSizeInBits) {
1397 report("gep offset operand must match index size for address space",
1398 MI);
1399 }
1400 }
1401
1402 // TODO: Is the offset allowed to be a scalar with a vector?
1403 break;
1404 }
1405 case TargetOpcode::G_PTRMASK: {
1406 LLT DstTy = MRI->getType(MI->getOperand(0).getReg());
1407 LLT SrcTy = MRI->getType(MI->getOperand(1).getReg());
1408 LLT MaskTy = MRI->getType(MI->getOperand(2).getReg());
1409 if (!DstTy.isValid() || !SrcTy.isValid() || !MaskTy.isValid())
1410 break;
1411
1412 if (!DstTy.isPointerOrPointerVector())
1413 report("ptrmask result type must be a pointer", MI);
1414
1415 if (!MaskTy.getScalarType().isScalar())
1416 report("ptrmask mask type must be an integer", MI);
1417
1418 verifyVectorElementMatch(DstTy, MaskTy, MI);
1419 break;
1420 }
1421 case TargetOpcode::G_SEXT:
1422 case TargetOpcode::G_ZEXT:
1423 case TargetOpcode::G_ANYEXT:
1424 case TargetOpcode::G_TRUNC:
1425 case TargetOpcode::G_TRUNC_SSAT_S:
1426 case TargetOpcode::G_TRUNC_SSAT_U:
1427 case TargetOpcode::G_TRUNC_USAT_U:
1428 case TargetOpcode::G_FPEXT:
1429 case TargetOpcode::G_FPTRUNC: {
1430 // Number of operands and presense of types is already checked (and
1431 // reported in case of any issues), so no need to report them again. As
1432 // we're trying to report as many issues as possible at once, however, the
1433 // instructions aren't guaranteed to have the right number of operands or
1434 // types attached to them at this point
1435 assert(MCID.getNumOperands() == 2 && "Expected 2 operands G_*{EXT,TRUNC}");
1436 LLT DstTy = MRI->getType(MI->getOperand(0).getReg());
1437 LLT SrcTy = MRI->getType(MI->getOperand(1).getReg());
1438 if (!DstTy.isValid() || !SrcTy.isValid())
1439 break;
1440
1442 report("Generic extend/truncate can not operate on pointers", MI);
1443
1444 verifyVectorElementMatch(DstTy, SrcTy, MI);
1445
1446 unsigned DstSize = DstTy.getScalarSizeInBits();
1447 unsigned SrcSize = SrcTy.getScalarSizeInBits();
1448 switch (MI->getOpcode()) {
1449 default:
1450 if (DstSize <= SrcSize)
1451 report("Generic extend has destination type no larger than source", MI);
1452 break;
1453 case TargetOpcode::G_TRUNC:
1454 case TargetOpcode::G_TRUNC_SSAT_S:
1455 case TargetOpcode::G_TRUNC_SSAT_U:
1456 case TargetOpcode::G_TRUNC_USAT_U:
1457 case TargetOpcode::G_FPTRUNC:
1458 if (DstSize >= SrcSize)
1459 report("Generic truncate has destination type no smaller than source",
1460 MI);
1461 break;
1462 }
1463 break;
1464 }
1465 case TargetOpcode::G_SELECT: {
1466 LLT SelTy = MRI->getType(MI->getOperand(0).getReg());
1467 LLT CondTy = MRI->getType(MI->getOperand(1).getReg());
1468 if (!SelTy.isValid() || !CondTy.isValid())
1469 break;
1470
1471 // Scalar condition select on a vector is valid.
1472 if (CondTy.isVector())
1473 verifyVectorElementMatch(SelTy, CondTy, MI);
1474 break;
1475 }
1476 case TargetOpcode::G_MERGE_VALUES: {
1477 // G_MERGE_VALUES should only be used to merge scalars into a larger scalar,
1478 // e.g. s2N = MERGE sN, sN
1479 // Merging multiple scalars into a vector is not allowed, should use
1480 // G_BUILD_VECTOR for that.
1481 LLT DstTy = MRI->getType(MI->getOperand(0).getReg());
1482 LLT SrcTy = MRI->getType(MI->getOperand(1).getReg());
1483 if (DstTy.isVector() || SrcTy.isVector())
1484 report("G_MERGE_VALUES cannot operate on vectors", MI);
1485
1486 const unsigned NumOps = MI->getNumOperands();
1487 if (DstTy.getSizeInBits() != SrcTy.getSizeInBits() * (NumOps - 1))
1488 report("G_MERGE_VALUES result size is inconsistent", MI);
1489
1490 for (unsigned I = 2; I != NumOps; ++I) {
1491 if (MRI->getType(MI->getOperand(I).getReg()) != SrcTy)
1492 report("G_MERGE_VALUES source types do not match", MI);
1493 }
1494
1495 break;
1496 }
1497 case TargetOpcode::G_UNMERGE_VALUES: {
1498 unsigned NumDsts = MI->getNumOperands() - 1;
1499 LLT DstTy = MRI->getType(MI->getOperand(0).getReg());
1500 for (unsigned i = 1; i < NumDsts; ++i) {
1501 if (MRI->getType(MI->getOperand(i).getReg()) != DstTy) {
1502 report("G_UNMERGE_VALUES destination types do not match", MI);
1503 break;
1504 }
1505 }
1506
1507 LLT SrcTy = MRI->getType(MI->getOperand(NumDsts).getReg());
1508 if (DstTy.isVector()) {
1509 // This case is the converse of G_CONCAT_VECTORS.
1510 if (!SrcTy.isVector() ||
1511 (SrcTy.getScalarType() != DstTy.getScalarType() &&
1512 !SrcTy.isPointerVector()) ||
1513 SrcTy.isScalableVector() != DstTy.isScalableVector() ||
1514 SrcTy.getSizeInBits() != NumDsts * DstTy.getSizeInBits())
1515 report("G_UNMERGE_VALUES source operand does not match vector "
1516 "destination operands",
1517 MI);
1518 } else if (SrcTy.isVector()) {
1519 // This case is the converse of G_BUILD_VECTOR, but relaxed to allow
1520 // mismatched types as long as the total size matches:
1521 // %0:_(s64), %1:_(s64) = G_UNMERGE_VALUES %2:_(<4 x s32>)
1522 if (SrcTy.getSizeInBits() != NumDsts * DstTy.getSizeInBits())
1523 report("G_UNMERGE_VALUES vector source operand does not match scalar "
1524 "destination operands",
1525 MI);
1526 } else {
1527 // This case is the converse of G_MERGE_VALUES.
1528 if (SrcTy.getSizeInBits() != NumDsts * DstTy.getSizeInBits()) {
1529 report("G_UNMERGE_VALUES scalar source operand does not match scalar "
1530 "destination operands",
1531 MI);
1532 }
1533 }
1534 break;
1535 }
1536 case TargetOpcode::G_BUILD_VECTOR: {
1537 // Source types must be scalars, dest type a vector. Total size of scalars
1538 // must match the dest vector size.
1539 LLT DstTy = MRI->getType(MI->getOperand(0).getReg());
1540 LLT SrcEltTy = MRI->getType(MI->getOperand(1).getReg());
1541 if (!DstTy.isVector() || SrcEltTy.isVector()) {
1542 report("G_BUILD_VECTOR must produce a vector from scalar operands", MI);
1543 break;
1544 }
1545
1546 if (DstTy.getElementType() != SrcEltTy)
1547 report("G_BUILD_VECTOR result element type must match source type", MI);
1548
1549 if (DstTy.getNumElements() != MI->getNumOperands() - 1)
1550 report("G_BUILD_VECTOR must have an operand for each element", MI);
1551
1552 for (const MachineOperand &MO : llvm::drop_begin(MI->operands(), 2))
1553 if (MRI->getType(MI->getOperand(1).getReg()) != MRI->getType(MO.getReg()))
1554 report("G_BUILD_VECTOR source operand types are not homogeneous", MI);
1555
1556 break;
1557 }
1558 case TargetOpcode::G_BUILD_VECTOR_TRUNC: {
1559 // Source types must be scalars, dest type a vector. Scalar types must be
1560 // larger than the dest vector elt type, as this is a truncating operation.
1561 LLT DstTy = MRI->getType(MI->getOperand(0).getReg());
1562 LLT SrcEltTy = MRI->getType(MI->getOperand(1).getReg());
1563 if (!DstTy.isVector() || SrcEltTy.isVector())
1564 report("G_BUILD_VECTOR_TRUNC must produce a vector from scalar operands",
1565 MI);
1566 for (const MachineOperand &MO : llvm::drop_begin(MI->operands(), 2))
1567 if (MRI->getType(MI->getOperand(1).getReg()) != MRI->getType(MO.getReg()))
1568 report("G_BUILD_VECTOR_TRUNC source operand types are not homogeneous",
1569 MI);
1570 if (SrcEltTy.getSizeInBits() <= DstTy.getElementType().getSizeInBits())
1571 report("G_BUILD_VECTOR_TRUNC source operand types are not larger than "
1572 "dest elt type",
1573 MI);
1574 break;
1575 }
1576 case TargetOpcode::G_CONCAT_VECTORS: {
1577 // Source types should be vectors, and total size should match the dest
1578 // vector size.
1579 LLT DstTy = MRI->getType(MI->getOperand(0).getReg());
1580 LLT SrcTy = MRI->getType(MI->getOperand(1).getReg());
1581 if (!DstTy.isVector() || !SrcTy.isVector())
1582 report("G_CONCAT_VECTOR requires vector source and destination operands",
1583 MI);
1584
1585 if (MI->getNumOperands() < 3)
1586 report("G_CONCAT_VECTOR requires at least 2 source operands", MI);
1587
1588 for (const MachineOperand &MO : llvm::drop_begin(MI->operands(), 2))
1589 if (MRI->getType(MI->getOperand(1).getReg()) != MRI->getType(MO.getReg()))
1590 report("G_CONCAT_VECTOR source operand types are not homogeneous", MI);
1591 if (DstTy.getElementCount() !=
1592 SrcTy.getElementCount() * (MI->getNumOperands() - 1))
1593 report("G_CONCAT_VECTOR num dest and source elements should match", MI);
1594 break;
1595 }
1596 case TargetOpcode::G_ICMP:
1597 case TargetOpcode::G_FCMP: {
1598 LLT DstTy = MRI->getType(MI->getOperand(0).getReg());
1599 LLT SrcTy = MRI->getType(MI->getOperand(2).getReg());
1600
1601 if ((DstTy.isVector() != SrcTy.isVector()) ||
1602 (DstTy.isVector() &&
1603 DstTy.getElementCount() != SrcTy.getElementCount()))
1604 report("Generic vector icmp/fcmp must preserve number of lanes", MI);
1605
1606 break;
1607 }
1608 case TargetOpcode::G_SCMP:
1609 case TargetOpcode::G_UCMP: {
1610 LLT DstTy = MRI->getType(MI->getOperand(0).getReg());
1611 LLT SrcTy = MRI->getType(MI->getOperand(1).getReg());
1612
1613 if (SrcTy.isPointerOrPointerVector()) {
1614 report("Generic scmp/ucmp does not support pointers as operands", MI);
1615 break;
1616 }
1617
1618 if (DstTy.isPointerOrPointerVector()) {
1619 report("Generic scmp/ucmp does not support pointers as a result", MI);
1620 break;
1621 }
1622
1623 if (DstTy.getScalarSizeInBits() < 2) {
1624 report("Result type must be at least 2 bits wide", MI);
1625 break;
1626 }
1627
1628 if ((DstTy.isVector() != SrcTy.isVector()) ||
1629 (DstTy.isVector() &&
1630 DstTy.getElementCount() != SrcTy.getElementCount())) {
1631 report("Generic vector scmp/ucmp must preserve number of lanes", MI);
1632 break;
1633 }
1634
1635 break;
1636 }
1637 case TargetOpcode::G_EXTRACT: {
1638 const MachineOperand &SrcOp = MI->getOperand(1);
1639 if (!SrcOp.isReg()) {
1640 report("extract source must be a register", MI);
1641 break;
1642 }
1643
1644 const MachineOperand &OffsetOp = MI->getOperand(2);
1645 if (!OffsetOp.isImm()) {
1646 report("extract offset must be a constant", MI);
1647 break;
1648 }
1649
1650 unsigned DstSize = MRI->getType(MI->getOperand(0).getReg()).getSizeInBits();
1651 unsigned SrcSize = MRI->getType(SrcOp.getReg()).getSizeInBits();
1652 if (SrcSize == DstSize)
1653 report("extract source must be larger than result", MI);
1654
1655 if (DstSize + OffsetOp.getImm() > SrcSize)
1656 report("extract reads past end of register", MI);
1657 break;
1658 }
1659 case TargetOpcode::G_INSERT: {
1660 const MachineOperand &SrcOp = MI->getOperand(2);
1661 if (!SrcOp.isReg()) {
1662 report("insert source must be a register", MI);
1663 break;
1664 }
1665
1666 const MachineOperand &OffsetOp = MI->getOperand(3);
1667 if (!OffsetOp.isImm()) {
1668 report("insert offset must be a constant", MI);
1669 break;
1670 }
1671
1672 unsigned DstSize = MRI->getType(MI->getOperand(0).getReg()).getSizeInBits();
1673 unsigned SrcSize = MRI->getType(SrcOp.getReg()).getSizeInBits();
1674
1675 if (DstSize <= SrcSize)
1676 report("inserted size must be smaller than total register", MI);
1677
1678 if (SrcSize + OffsetOp.getImm() > DstSize)
1679 report("insert writes past end of register", MI);
1680
1681 break;
1682 }
1683 case TargetOpcode::G_JUMP_TABLE: {
1684 if (!MI->getOperand(1).isJTI())
1685 report("G_JUMP_TABLE source operand must be a jump table index", MI);
1686 LLT DstTy = MRI->getType(MI->getOperand(0).getReg());
1687 if (!DstTy.isPointer())
1688 report("G_JUMP_TABLE dest operand must have a pointer type", MI);
1689 break;
1690 }
1691 case TargetOpcode::G_BRJT: {
1692 if (!MRI->getType(MI->getOperand(0).getReg()).isPointer())
1693 report("G_BRJT src operand 0 must be a pointer type", MI);
1694
1695 if (!MI->getOperand(1).isJTI())
1696 report("G_BRJT src operand 1 must be a jump table index", MI);
1697
1698 const auto &IdxOp = MI->getOperand(2);
1699 if (!IdxOp.isReg() || MRI->getType(IdxOp.getReg()).isPointer())
1700 report("G_BRJT src operand 2 must be a scalar reg type", MI);
1701 break;
1702 }
1703 case TargetOpcode::G_INTRINSIC:
1704 case TargetOpcode::G_INTRINSIC_W_SIDE_EFFECTS:
1705 case TargetOpcode::G_INTRINSIC_CONVERGENT:
1706 case TargetOpcode::G_INTRINSIC_CONVERGENT_W_SIDE_EFFECTS: {
1707 // TODO: Should verify number of def and use operands, but the current
1708 // interface requires passing in IR types for mangling.
1709 const MachineOperand &IntrIDOp = MI->getOperand(MI->getNumExplicitDefs());
1710 if (!IntrIDOp.isIntrinsicID()) {
1711 report("G_INTRINSIC first src operand must be an intrinsic ID", MI);
1712 break;
1713 }
1714
1715 if (!verifyGIntrinsicSideEffects(MI))
1716 break;
1717 if (!verifyGIntrinsicConvergence(MI))
1718 break;
1719
1720 break;
1721 }
1722 case TargetOpcode::G_SEXT_INREG: {
1723 if (!MI->getOperand(2).isImm()) {
1724 report("G_SEXT_INREG expects an immediate operand #2", MI);
1725 break;
1726 }
1727
1728 LLT SrcTy = MRI->getType(MI->getOperand(1).getReg());
1729 int64_t Imm = MI->getOperand(2).getImm();
1730 if (Imm <= 0)
1731 report("G_SEXT_INREG size must be >= 1", MI);
1732 if (Imm >= SrcTy.getScalarSizeInBits())
1733 report("G_SEXT_INREG size must be less than source bit width", MI);
1734 break;
1735 }
1736 case TargetOpcode::G_BSWAP: {
1737 LLT DstTy = MRI->getType(MI->getOperand(0).getReg());
1738 if (DstTy.getScalarSizeInBits() % 16 != 0)
1739 report("G_BSWAP size must be a multiple of 16 bits", MI);
1740 break;
1741 }
1742 case TargetOpcode::G_VSCALE: {
1743 if (!MI->getOperand(1).isCImm()) {
1744 report("G_VSCALE operand must be cimm", MI);
1745 break;
1746 }
1747 if (MI->getOperand(1).getCImm()->isZero()) {
1748 report("G_VSCALE immediate cannot be zero", MI);
1749 break;
1750 }
1751 break;
1752 }
1753 case TargetOpcode::G_STEP_VECTOR: {
1754 if (!MI->getOperand(1).isCImm()) {
1755 report("operand must be cimm", MI);
1756 break;
1757 }
1758
1759 if (!MI->getOperand(1).getCImm()->getValue().isStrictlyPositive()) {
1760 report("step must be > 0", MI);
1761 break;
1762 }
1763
1764 LLT DstTy = MRI->getType(MI->getOperand(0).getReg());
1765 if (!DstTy.isScalableVector()) {
1766 report("Destination type must be a scalable vector", MI);
1767 break;
1768 }
1769
1770 // <vscale x 2 x p0>
1771 if (!DstTy.getElementType().isScalar()) {
1772 report("Destination element type must be scalar", MI);
1773 break;
1774 }
1775
1776 if (MI->getOperand(1).getCImm()->getBitWidth() !=
1778 report("step bitwidth differs from result type element bitwidth", MI);
1779 break;
1780 }
1781 break;
1782 }
1783 case TargetOpcode::G_INSERT_SUBVECTOR: {
1784 const MachineOperand &Src0Op = MI->getOperand(1);
1785 if (!Src0Op.isReg()) {
1786 report("G_INSERT_SUBVECTOR first source must be a register", MI);
1787 break;
1788 }
1789
1790 const MachineOperand &Src1Op = MI->getOperand(2);
1791 if (!Src1Op.isReg()) {
1792 report("G_INSERT_SUBVECTOR second source must be a register", MI);
1793 break;
1794 }
1795
1796 const MachineOperand &IndexOp = MI->getOperand(3);
1797 if (!IndexOp.isImm()) {
1798 report("G_INSERT_SUBVECTOR index must be an immediate", MI);
1799 break;
1800 }
1801
1802 LLT DstTy = MRI->getType(MI->getOperand(0).getReg());
1803 LLT Src1Ty = MRI->getType(Src1Op.getReg());
1804
1805 if (!DstTy.isVector()) {
1806 report("Destination type must be a vector", MI);
1807 break;
1808 }
1809
1810 if (!Src1Ty.isVector()) {
1811 report("Second source must be a vector", MI);
1812 break;
1813 }
1814
1815 if (DstTy.getElementType() != Src1Ty.getElementType()) {
1816 report("Element type of vectors must be the same", MI);
1817 break;
1818 }
1819
1820 if (Src1Ty.isScalable() != DstTy.isScalable()) {
1821 report("Vector types must both be fixed or both be scalable", MI);
1822 break;
1823 }
1824
1826 DstTy.getElementCount())) {
1827 report("Second source must be smaller than destination vector", MI);
1828 break;
1829 }
1830
1831 uint64_t Idx = IndexOp.getImm();
1832 uint64_t Src1MinLen = Src1Ty.getElementCount().getKnownMinValue();
1833 if (IndexOp.getImm() % Src1MinLen != 0) {
1834 report("Index must be a multiple of the second source vector's "
1835 "minimum vector length",
1836 MI);
1837 break;
1838 }
1839
1840 uint64_t DstMinLen = DstTy.getElementCount().getKnownMinValue();
1841 if (Idx >= DstMinLen || Idx + Src1MinLen > DstMinLen) {
1842 report("Subvector type and index must not cause insert to overrun the "
1843 "vector being inserted into",
1844 MI);
1845 break;
1846 }
1847
1848 break;
1849 }
1850 case TargetOpcode::G_EXTRACT_SUBVECTOR: {
1851 const MachineOperand &SrcOp = MI->getOperand(1);
1852 if (!SrcOp.isReg()) {
1853 report("G_EXTRACT_SUBVECTOR first source must be a register", MI);
1854 break;
1855 }
1856
1857 const MachineOperand &IndexOp = MI->getOperand(2);
1858 if (!IndexOp.isImm()) {
1859 report("G_EXTRACT_SUBVECTOR index must be an immediate", MI);
1860 break;
1861 }
1862
1863 LLT DstTy = MRI->getType(MI->getOperand(0).getReg());
1864 LLT SrcTy = MRI->getType(SrcOp.getReg());
1865
1866 if (!DstTy.isVector()) {
1867 report("Destination type must be a vector", MI);
1868 break;
1869 }
1870
1871 if (!SrcTy.isVector()) {
1872 report("Source must be a vector", MI);
1873 break;
1874 }
1875
1876 if (DstTy.getElementType() != SrcTy.getElementType()) {
1877 report("Element type of vectors must be the same", MI);
1878 break;
1879 }
1880
1881 if (SrcTy.isScalable() != DstTy.isScalable()) {
1882 report("Vector types must both be fixed or both be scalable", MI);
1883 break;
1884 }
1885
1887 SrcTy.getElementCount())) {
1888 report("Destination vector must be smaller than source vector", MI);
1889 break;
1890 }
1891
1892 uint64_t Idx = IndexOp.getImm();
1893 uint64_t DstMinLen = DstTy.getElementCount().getKnownMinValue();
1894 if (Idx % DstMinLen != 0) {
1895 report("Index must be a multiple of the destination vector's minimum "
1896 "vector length",
1897 MI);
1898 break;
1899 }
1900
1901 uint64_t SrcMinLen = SrcTy.getElementCount().getKnownMinValue();
1902 if (Idx >= SrcMinLen || Idx + DstMinLen > SrcMinLen) {
1903 report("Destination type and index must not cause extract to overrun the "
1904 "source vector",
1905 MI);
1906 break;
1907 }
1908
1909 break;
1910 }
1911 case TargetOpcode::G_SHUFFLE_VECTOR: {
1912 const MachineOperand &MaskOp = MI->getOperand(3);
1913 if (!MaskOp.isShuffleMask()) {
1914 report("Incorrect mask operand type for G_SHUFFLE_VECTOR", MI);
1915 break;
1916 }
1917
1918 LLT DstTy = MRI->getType(MI->getOperand(0).getReg());
1919 LLT Src0Ty = MRI->getType(MI->getOperand(1).getReg());
1920 LLT Src1Ty = MRI->getType(MI->getOperand(2).getReg());
1921
1922 if (Src0Ty != Src1Ty)
1923 report("Source operands must be the same type", MI);
1924
1925 if (Src0Ty.getScalarType() != DstTy.getScalarType()) {
1926 report("G_SHUFFLE_VECTOR cannot change element type", MI);
1927 break;
1928 }
1929 if (!Src0Ty.isVector()) {
1930 report("G_SHUFFLE_VECTOR must have vector src", MI);
1931 break;
1932 }
1933 if (!DstTy.isVector()) {
1934 report("G_SHUFFLE_VECTOR must have vector dst", MI);
1935 break;
1936 }
1937
1938 // Don't check that all operands are vector because scalars are used in
1939 // place of 1 element vectors.
1940 int SrcNumElts = Src0Ty.getNumElements();
1941 int DstNumElts = DstTy.getNumElements();
1942
1943 ArrayRef<int> MaskIdxes = MaskOp.getShuffleMask();
1944
1945 if (static_cast<int>(MaskIdxes.size()) != DstNumElts)
1946 report("Wrong result type for shufflemask", MI);
1947
1948 for (int Idx : MaskIdxes) {
1949 if (Idx < 0)
1950 continue;
1951
1952 if (Idx >= 2 * SrcNumElts)
1953 report("Out of bounds shuffle index", MI);
1954 }
1955
1956 break;
1957 }
1958
1959 case TargetOpcode::G_SPLAT_VECTOR: {
1960 LLT DstTy = MRI->getType(MI->getOperand(0).getReg());
1961 LLT SrcTy = MRI->getType(MI->getOperand(1).getReg());
1962
1963 if (!DstTy.isScalableVector()) {
1964 report("Destination type must be a scalable vector", MI);
1965 break;
1966 }
1967
1968 if (!SrcTy.isScalar() && !SrcTy.isPointer()) {
1969 report("Source type must be a scalar or pointer", MI);
1970 break;
1971 }
1972
1974 SrcTy.getSizeInBits())) {
1975 report("Element type of the destination must be the same size or smaller "
1976 "than the source type",
1977 MI);
1978 break;
1979 }
1980
1981 break;
1982 }
1983 case TargetOpcode::G_EXTRACT_VECTOR_ELT: {
1984 LLT DstTy = MRI->getType(MI->getOperand(0).getReg());
1985 LLT SrcTy = MRI->getType(MI->getOperand(1).getReg());
1986 LLT IdxTy = MRI->getType(MI->getOperand(2).getReg());
1987
1988 if (!DstTy.isScalar() && !DstTy.isPointer()) {
1989 report("Destination type must be a scalar or pointer", MI);
1990 break;
1991 }
1992
1993 if (!SrcTy.isVector()) {
1994 report("First source must be a vector", MI);
1995 break;
1996 }
1997
1998 auto TLI = MF->getSubtarget().getTargetLowering();
1999 if (IdxTy.getSizeInBits() != TLI->getVectorIdxWidth(MF->getDataLayout())) {
2000 report("Index type must match VectorIdxTy", MI);
2001 break;
2002 }
2003
2004 break;
2005 }
2006 case TargetOpcode::G_INSERT_VECTOR_ELT: {
2007 LLT DstTy = MRI->getType(MI->getOperand(0).getReg());
2008 LLT VecTy = MRI->getType(MI->getOperand(1).getReg());
2009 LLT ScaTy = MRI->getType(MI->getOperand(2).getReg());
2010 LLT IdxTy = MRI->getType(MI->getOperand(3).getReg());
2011
2012 if (!DstTy.isVector()) {
2013 report("Destination type must be a vector", MI);
2014 break;
2015 }
2016
2017 if (VecTy != DstTy) {
2018 report("Destination type and vector type must match", MI);
2019 break;
2020 }
2021
2022 if (!ScaTy.isScalar() && !ScaTy.isPointer()) {
2023 report("Inserted element must be a scalar or pointer", MI);
2024 break;
2025 }
2026
2027 auto TLI = MF->getSubtarget().getTargetLowering();
2028 if (IdxTy.getSizeInBits() != TLI->getVectorIdxWidth(MF->getDataLayout())) {
2029 report("Index type must match VectorIdxTy", MI);
2030 break;
2031 }
2032
2033 break;
2034 }
2035 case TargetOpcode::G_DYN_STACKALLOC: {
2036 const MachineOperand &DstOp = MI->getOperand(0);
2037 const MachineOperand &AllocOp = MI->getOperand(1);
2038 const MachineOperand &AlignOp = MI->getOperand(2);
2039
2040 if (!DstOp.isReg() || !MRI->getType(DstOp.getReg()).isPointer()) {
2041 report("dst operand 0 must be a pointer type", MI);
2042 break;
2043 }
2044
2045 if (!AllocOp.isReg() || !MRI->getType(AllocOp.getReg()).isScalar()) {
2046 report("src operand 1 must be a scalar reg type", MI);
2047 break;
2048 }
2049
2050 if (!AlignOp.isImm()) {
2051 report("src operand 2 must be an immediate type", MI);
2052 break;
2053 }
2054 break;
2055 }
2056 case TargetOpcode::G_MEMCPY_INLINE:
2057 case TargetOpcode::G_MEMCPY:
2058 case TargetOpcode::G_MEMMOVE: {
2059 ArrayRef<MachineMemOperand *> MMOs = MI->memoperands();
2060 if (MMOs.size() != 2) {
2061 report("memcpy/memmove must have 2 memory operands", MI);
2062 break;
2063 }
2064
2065 if ((!MMOs[0]->isStore() || MMOs[0]->isLoad()) ||
2066 (MMOs[1]->isStore() || !MMOs[1]->isLoad())) {
2067 report("wrong memory operand types", MI);
2068 break;
2069 }
2070
2071 if (MMOs[0]->getSize() != MMOs[1]->getSize())
2072 report("inconsistent memory operand sizes", MI);
2073
2074 LLT DstPtrTy = MRI->getType(MI->getOperand(0).getReg());
2075 LLT SrcPtrTy = MRI->getType(MI->getOperand(1).getReg());
2076
2077 if (!DstPtrTy.isPointer() || !SrcPtrTy.isPointer()) {
2078 report("memory instruction operand must be a pointer", MI);
2079 break;
2080 }
2081
2082 if (DstPtrTy.getAddressSpace() != MMOs[0]->getAddrSpace())
2083 report("inconsistent store address space", MI);
2084 if (SrcPtrTy.getAddressSpace() != MMOs[1]->getAddrSpace())
2085 report("inconsistent load address space", MI);
2086
2087 if (Opc != TargetOpcode::G_MEMCPY_INLINE)
2088 if (!MI->getOperand(3).isImm() || (MI->getOperand(3).getImm() & ~1LL))
2089 report("'tail' flag (operand 3) must be an immediate 0 or 1", MI);
2090
2091 break;
2092 }
2093 case TargetOpcode::G_BZERO:
2094 case TargetOpcode::G_MEMSET: {
2095 ArrayRef<MachineMemOperand *> MMOs = MI->memoperands();
2096 std::string Name = Opc == TargetOpcode::G_MEMSET ? "memset" : "bzero";
2097 if (MMOs.size() != 1) {
2098 report(Twine(Name, " must have 1 memory operand"), MI);
2099 break;
2100 }
2101
2102 if ((!MMOs[0]->isStore() || MMOs[0]->isLoad())) {
2103 report(Twine(Name, " memory operand must be a store"), MI);
2104 break;
2105 }
2106
2107 LLT DstPtrTy = MRI->getType(MI->getOperand(0).getReg());
2108 if (!DstPtrTy.isPointer()) {
2109 report(Twine(Name, " operand must be a pointer"), MI);
2110 break;
2111 }
2112
2113 if (DstPtrTy.getAddressSpace() != MMOs[0]->getAddrSpace())
2114 report("inconsistent " + Twine(Name, " address space"), MI);
2115
2116 if (!MI->getOperand(MI->getNumOperands() - 1).isImm() ||
2117 (MI->getOperand(MI->getNumOperands() - 1).getImm() & ~1LL))
2118 report("'tail' flag (last operand) must be an immediate 0 or 1", MI);
2119
2120 break;
2121 }
2122 case TargetOpcode::G_UBSANTRAP: {
2123 const MachineOperand &KindOp = MI->getOperand(0);
2124 if (!MI->getOperand(0).isImm()) {
2125 report("Crash kind must be an immediate", &KindOp, 0);
2126 break;
2127 }
2128 int64_t Kind = MI->getOperand(0).getImm();
2129 if (!isInt<8>(Kind))
2130 report("Crash kind must be 8 bit wide", &KindOp, 0);
2131 break;
2132 }
2133 case TargetOpcode::G_VECREDUCE_SEQ_FADD:
2134 case TargetOpcode::G_VECREDUCE_SEQ_FMUL: {
2135 LLT DstTy = MRI->getType(MI->getOperand(0).getReg());
2136 LLT Src1Ty = MRI->getType(MI->getOperand(1).getReg());
2137 LLT Src2Ty = MRI->getType(MI->getOperand(2).getReg());
2138 if (!DstTy.isScalar())
2139 report("Vector reduction requires a scalar destination type", MI);
2140 if (!Src1Ty.isScalar())
2141 report("Sequential FADD/FMUL vector reduction requires a scalar 1st operand", MI);
2142 if (!Src2Ty.isVector())
2143 report("Sequential FADD/FMUL vector reduction must have a vector 2nd operand", MI);
2144 break;
2145 }
2146 case TargetOpcode::G_VECREDUCE_FADD:
2147 case TargetOpcode::G_VECREDUCE_FMUL:
2148 case TargetOpcode::G_VECREDUCE_FMAX:
2149 case TargetOpcode::G_VECREDUCE_FMIN:
2150 case TargetOpcode::G_VECREDUCE_FMAXIMUM:
2151 case TargetOpcode::G_VECREDUCE_FMINIMUM:
2152 case TargetOpcode::G_VECREDUCE_ADD:
2153 case TargetOpcode::G_VECREDUCE_MUL:
2154 case TargetOpcode::G_VECREDUCE_AND:
2155 case TargetOpcode::G_VECREDUCE_OR:
2156 case TargetOpcode::G_VECREDUCE_XOR:
2157 case TargetOpcode::G_VECREDUCE_SMAX:
2158 case TargetOpcode::G_VECREDUCE_SMIN:
2159 case TargetOpcode::G_VECREDUCE_UMAX:
2160 case TargetOpcode::G_VECREDUCE_UMIN: {
2161 LLT DstTy = MRI->getType(MI->getOperand(0).getReg());
2162 if (!DstTy.isScalar())
2163 report("Vector reduction requires a scalar destination type", MI);
2164 break;
2165 }
2166
2167 case TargetOpcode::G_SBFX:
2168 case TargetOpcode::G_UBFX: {
2169 LLT DstTy = MRI->getType(MI->getOperand(0).getReg());
2170 if (DstTy.isVector()) {
2171 report("Bitfield extraction is not supported on vectors", MI);
2172 break;
2173 }
2174 break;
2175 }
2176 case TargetOpcode::G_SHL:
2177 case TargetOpcode::G_LSHR:
2178 case TargetOpcode::G_ASHR:
2179 case TargetOpcode::G_ROTR:
2180 case TargetOpcode::G_ROTL: {
2181 LLT Src1Ty = MRI->getType(MI->getOperand(1).getReg());
2182 LLT Src2Ty = MRI->getType(MI->getOperand(2).getReg());
2183 if (Src1Ty.isVector() != Src2Ty.isVector()) {
2184 report("Shifts and rotates require operands to be either all scalars or "
2185 "all vectors",
2186 MI);
2187 break;
2188 }
2189 break;
2190 }
2191 case TargetOpcode::G_LLROUND:
2192 case TargetOpcode::G_LROUND: {
2193 LLT DstTy = MRI->getType(MI->getOperand(0).getReg());
2194 LLT SrcTy = MRI->getType(MI->getOperand(1).getReg());
2195 if (!DstTy.isValid() || !SrcTy.isValid())
2196 break;
2197 if (SrcTy.isPointer() || DstTy.isPointer()) {
2198 StringRef Op = SrcTy.isPointer() ? "Source" : "Destination";
2199 report(Twine(Op, " operand must not be a pointer type"), MI);
2200 } else if (SrcTy.isScalar()) {
2201 verifyAllRegOpsScalar(*MI, *MRI);
2202 break;
2203 } else if (SrcTy.isVector()) {
2204 verifyVectorElementMatch(SrcTy, DstTy, MI);
2205 break;
2206 }
2207 break;
2208 }
2209 case TargetOpcode::G_IS_FPCLASS: {
2210 LLT DestTy = MRI->getType(MI->getOperand(0).getReg());
2211 LLT DestEltTy = DestTy.getScalarType();
2212 if (!DestEltTy.isScalar()) {
2213 report("Destination must be a scalar or vector of scalars", MI);
2214 break;
2215 }
2216 LLT SrcTy = MRI->getType(MI->getOperand(1).getReg());
2217 LLT SrcEltTy = SrcTy.getScalarType();
2218 if (!SrcEltTy.isScalar()) {
2219 report("Source must be a scalar or vector of scalars", MI);
2220 break;
2221 }
2222 if (!verifyVectorElementMatch(DestTy, SrcTy, MI))
2223 break;
2224 const MachineOperand &TestMO = MI->getOperand(2);
2225 if (!TestMO.isImm()) {
2226 report("floating-point class set (operand 2) must be an immediate", MI);
2227 break;
2228 }
2229 int64_t Test = TestMO.getImm();
2231 report("Incorrect floating-point class set (operand 2)", MI);
2232 break;
2233 }
2234 break;
2235 }
2236 case TargetOpcode::G_PREFETCH: {
2237 const MachineOperand &AddrOp = MI->getOperand(0);
2238 if (!AddrOp.isReg() || !MRI->getType(AddrOp.getReg()).isPointer()) {
2239 report("addr operand must be a pointer", &AddrOp, 0);
2240 break;
2241 }
2242 const MachineOperand &RWOp = MI->getOperand(1);
2243 if (!RWOp.isImm() || (uint64_t)RWOp.getImm() >= 2) {
2244 report("rw operand must be an immediate 0-1", &RWOp, 1);
2245 break;
2246 }
2247 const MachineOperand &LocalityOp = MI->getOperand(2);
2248 if (!LocalityOp.isImm() || (uint64_t)LocalityOp.getImm() >= 4) {
2249 report("locality operand must be an immediate 0-3", &LocalityOp, 2);
2250 break;
2251 }
2252 const MachineOperand &CacheTypeOp = MI->getOperand(3);
2253 if (!CacheTypeOp.isImm() || (uint64_t)CacheTypeOp.getImm() >= 2) {
2254 report("cache type operand must be an immediate 0-1", &CacheTypeOp, 3);
2255 break;
2256 }
2257 break;
2258 }
2259 case TargetOpcode::G_ASSERT_ALIGN: {
2260 if (MI->getOperand(2).getImm() < 1)
2261 report("alignment immediate must be >= 1", MI);
2262 break;
2263 }
2264 case TargetOpcode::G_CONSTANT_POOL: {
2265 if (!MI->getOperand(1).isCPI())
2266 report("Src operand 1 must be a constant pool index", MI);
2267 if (!MRI->getType(MI->getOperand(0).getReg()).isPointer())
2268 report("Dst operand 0 must be a pointer", MI);
2269 break;
2270 }
2271 case TargetOpcode::G_PTRAUTH_GLOBAL_VALUE: {
2272 const MachineOperand &AddrOp = MI->getOperand(1);
2273 if (!AddrOp.isReg() || !MRI->getType(AddrOp.getReg()).isPointer())
2274 report("addr operand must be a pointer", &AddrOp, 1);
2275 break;
2276 }
2277 case TargetOpcode::G_SMIN:
2278 case TargetOpcode::G_SMAX:
2279 case TargetOpcode::G_UMIN:
2280 case TargetOpcode::G_UMAX: {
2281 const LLT DstTy = MRI->getType(MI->getOperand(0).getReg());
2282 if (DstTy.isPointerOrPointerVector())
2283 report("Generic smin/smax/umin/umax does not support pointer operands",
2284 MI);
2285 break;
2286 }
2287 default:
2288 break;
2289 }
2290}
2291
2292void MachineVerifier::visitMachineInstrBefore(const MachineInstr *MI) {
2293 const MCInstrDesc &MCID = MI->getDesc();
2294 if (MI->getNumOperands() < MCID.getNumOperands()) {
2295 report("Too few operands", MI);
2296 OS << MCID.getNumOperands() << " operands expected, but "
2297 << MI->getNumOperands() << " given.\n";
2298 }
2299
2300 if (MI->getFlag(MachineInstr::NoConvergent) && !MCID.isConvergent())
2301 report("NoConvergent flag expected only on convergent instructions.", MI);
2302
2303 if (MI->isPHI()) {
2304 if (MF->getProperties().hasNoPHIs())
2305 report("Found PHI instruction with NoPHIs property set", MI);
2306
2307 if (FirstNonPHI)
2308 report("Found PHI instruction after non-PHI", MI);
2309 } else if (FirstNonPHI == nullptr)
2310 FirstNonPHI = MI;
2311
2312 // Check the tied operands.
2313 if (MI->isInlineAsm())
2314 verifyInlineAsm(MI);
2315
2316 // Check that unspillable terminators define a reg and have at most one use.
2317 if (TII->isUnspillableTerminator(MI)) {
2318 if (!MI->getOperand(0).isReg() || !MI->getOperand(0).isDef())
2319 report("Unspillable Terminator does not define a reg", MI);
2320 Register Def = MI->getOperand(0).getReg();
2321 if (Def.isVirtual() && !MF->getProperties().hasNoPHIs() &&
2322 std::distance(MRI->use_nodbg_begin(Def), MRI->use_nodbg_end()) > 1)
2323 report("Unspillable Terminator expected to have at most one use!", MI);
2324 }
2325
2326 // A fully-formed DBG_VALUE must have a location. Ignore partially formed
2327 // DBG_VALUEs: these are convenient to use in tests, but should never get
2328 // generated.
2329 if (MI->isDebugValue() && MI->getNumOperands() == 4)
2330 if (!MI->getDebugLoc())
2331 report("Missing DebugLoc for debug instruction", MI);
2332
2333 // Meta instructions should never be the subject of debug value tracking,
2334 // they don't create a value in the output program at all.
2335 if (MI->isMetaInstruction() && MI->peekDebugInstrNum())
2336 report("Metadata instruction should not have a value tracking number", MI);
2337
2338 // Check the MachineMemOperands for basic consistency.
2339 for (MachineMemOperand *Op : MI->memoperands()) {
2340 if (Op->isLoad() && !MI->mayLoad())
2341 report("Missing mayLoad flag", MI);
2342 if (Op->isStore() && !MI->mayStore())
2343 report("Missing mayStore flag", MI);
2344 }
2345
2346 // Debug values must not have a slot index.
2347 // Other instructions must have one, unless they are inside a bundle.
2348 if (LiveInts) {
2349 bool mapped = !LiveInts->isNotInMIMap(*MI);
2350 if (MI->isDebugOrPseudoInstr()) {
2351 if (mapped)
2352 report("Debug instruction has a slot index", MI);
2353 } else if (MI->isInsideBundle()) {
2354 if (mapped)
2355 report("Instruction inside bundle has a slot index", MI);
2356 } else {
2357 if (!mapped)
2358 report("Missing slot index", MI);
2359 }
2360 }
2361
2362 unsigned Opc = MCID.getOpcode();
2364 verifyPreISelGenericInstruction(MI);
2365 return;
2366 }
2367
2369 if (!TII->verifyInstruction(*MI, ErrorInfo))
2370 report(ErrorInfo.data(), MI);
2371
2372 // Verify properties of various specific instruction types
2373 switch (MI->getOpcode()) {
2374 case TargetOpcode::COPY: {
2375 const MachineOperand &DstOp = MI->getOperand(0);
2376 const MachineOperand &SrcOp = MI->getOperand(1);
2377 const Register SrcReg = SrcOp.getReg();
2378 const Register DstReg = DstOp.getReg();
2379
2380 LLT DstTy = MRI->getType(DstReg);
2381 LLT SrcTy = MRI->getType(SrcReg);
2382 if (SrcTy.isValid() && DstTy.isValid()) {
2383 // If both types are valid, check that the types are the same.
2384 if (SrcTy != DstTy) {
2385 report("Copy Instruction is illegal with mismatching types", MI);
2386 OS << "Def = " << DstTy << ", Src = " << SrcTy << '\n';
2387 }
2388
2389 break;
2390 }
2391
2392 if (!SrcTy.isValid() && !DstTy.isValid())
2393 break;
2394
2395 // If we have only one valid type, this is likely a copy between a virtual
2396 // and physical register.
2397 TypeSize SrcSize = TypeSize::getZero();
2398 TypeSize DstSize = TypeSize::getZero();
2399 if (SrcReg.isPhysical() && DstTy.isValid()) {
2400 const TargetRegisterClass *SrcRC =
2401 TRI->getMinimalPhysRegClassLLT(SrcReg, DstTy);
2402 if (!SrcRC)
2403 SrcSize = TRI->getRegSizeInBits(SrcReg, *MRI);
2404 } else {
2405 SrcSize = TRI->getRegSizeInBits(SrcReg, *MRI);
2406 }
2407
2408 if (DstReg.isPhysical() && SrcTy.isValid()) {
2409 const TargetRegisterClass *DstRC =
2410 TRI->getMinimalPhysRegClassLLT(DstReg, SrcTy);
2411 if (!DstRC)
2412 DstSize = TRI->getRegSizeInBits(DstReg, *MRI);
2413 } else {
2414 DstSize = TRI->getRegSizeInBits(DstReg, *MRI);
2415 }
2416
2417 // The next two checks allow COPY between physical and virtual registers,
2418 // when the virtual register has a scalable size and the physical register
2419 // has a fixed size. These checks allow COPY between *potentially*
2420 // mismatched sizes. However, once RegisterBankSelection occurs,
2421 // MachineVerifier should be able to resolve a fixed size for the scalable
2422 // vector, and at that point this function will know for sure whether the
2423 // sizes are mismatched and correctly report a size mismatch.
2424 if (SrcReg.isPhysical() && DstReg.isVirtual() && DstSize.isScalable() &&
2425 !SrcSize.isScalable())
2426 break;
2427 if (SrcReg.isVirtual() && DstReg.isPhysical() && SrcSize.isScalable() &&
2428 !DstSize.isScalable())
2429 break;
2430
2431 if (SrcSize.isNonZero() && DstSize.isNonZero() && SrcSize != DstSize) {
2432 if (!DstOp.getSubReg() && !SrcOp.getSubReg()) {
2433 report("Copy Instruction is illegal with mismatching sizes", MI);
2434 OS << "Def Size = " << DstSize << ", Src Size = " << SrcSize << '\n';
2435 }
2436 }
2437 break;
2438 }
2439 case TargetOpcode::COPY_LANEMASK: {
2440 const MachineOperand &DstOp = MI->getOperand(0);
2441 const MachineOperand &SrcOp = MI->getOperand(1);
2442 const MachineOperand &LaneMaskOp = MI->getOperand(2);
2443 const Register SrcReg = SrcOp.getReg();
2444 const LaneBitmask LaneMask = LaneMaskOp.getLaneMask();
2445 LaneBitmask SrcMaxLaneMask = LaneBitmask::getAll();
2446
2447 if (DstOp.getSubReg())
2448 report("COPY_LANEMASK must not use a subregister index", &DstOp, 0);
2449
2450 if (SrcOp.getSubReg())
2451 report("COPY_LANEMASK must not use a subregister index", &SrcOp, 1);
2452
2453 if (LaneMask.none())
2454 report("COPY_LANEMASK must read at least one lane", MI);
2455
2456 if (SrcReg.isPhysical()) {
2457 const TargetRegisterClass *SrcRC = TRI->getMinimalPhysRegClass(SrcReg);
2458 if (SrcRC)
2459 SrcMaxLaneMask = SrcRC->getLaneMask();
2460 } else {
2461 SrcMaxLaneMask = MRI->getMaxLaneMaskForVReg(SrcReg);
2462 }
2463
2464 // COPY_LANEMASK should be used only for partial copy. For full
2465 // copy, one should strictly use the COPY instruction.
2466 if (SrcMaxLaneMask == LaneMask)
2467 report("COPY_LANEMASK cannot be used to do full copy", MI);
2468
2469 // If LaneMask is greater than the SrcMaxLaneMask, it implies
2470 // COPY_LANEMASK is attempting to read from the lanes that
2471 // don't exists in the source register.
2472 if (SrcMaxLaneMask < LaneMask)
2473 report("COPY_LANEMASK attempts to read from the lanes that "
2474 "don't exist in the source register",
2475 MI);
2476
2477 break;
2478 }
2479 case TargetOpcode::STATEPOINT: {
2480 StatepointOpers SO(MI);
2481 if (!MI->getOperand(SO.getIDPos()).isImm() ||
2482 !MI->getOperand(SO.getNBytesPos()).isImm() ||
2483 !MI->getOperand(SO.getNCallArgsPos()).isImm()) {
2484 report("meta operands to STATEPOINT not constant!", MI);
2485 break;
2486 }
2487
2488 auto VerifyStackMapConstant = [&](unsigned Offset) {
2489 if (Offset >= MI->getNumOperands()) {
2490 report("stack map constant to STATEPOINT is out of range!", MI);
2491 return;
2492 }
2493 if (!MI->getOperand(Offset - 1).isImm() ||
2494 MI->getOperand(Offset - 1).getImm() != StackMaps::ConstantOp ||
2495 !MI->getOperand(Offset).isImm())
2496 report("stack map constant to STATEPOINT not well formed!", MI);
2497 };
2498 VerifyStackMapConstant(SO.getCCIdx());
2499 VerifyStackMapConstant(SO.getFlagsIdx());
2500 VerifyStackMapConstant(SO.getNumDeoptArgsIdx());
2501 VerifyStackMapConstant(SO.getNumGCPtrIdx());
2502 VerifyStackMapConstant(SO.getNumAllocaIdx());
2503 VerifyStackMapConstant(SO.getNumGcMapEntriesIdx());
2504
2505 // Verify that all explicit statepoint defs are tied to gc operands as
2506 // they are expected to be a relocation of gc operands.
2507 unsigned FirstGCPtrIdx = SO.getFirstGCPtrIdx();
2508 unsigned LastGCPtrIdx = SO.getNumAllocaIdx() - 2;
2509 for (unsigned Idx = 0; Idx < MI->getNumDefs(); Idx++) {
2510 unsigned UseOpIdx;
2511 if (!MI->isRegTiedToUseOperand(Idx, &UseOpIdx)) {
2512 report("STATEPOINT defs expected to be tied", MI);
2513 break;
2514 }
2515 if (UseOpIdx < FirstGCPtrIdx || UseOpIdx > LastGCPtrIdx) {
2516 report("STATEPOINT def tied to non-gc operand", MI);
2517 break;
2518 }
2519 }
2520
2521 // TODO: verify we have properly encoded deopt arguments
2522 } break;
2523 case TargetOpcode::INSERT_SUBREG: {
2524 unsigned InsertedSize;
2525 if (unsigned SubIdx = MI->getOperand(2).getSubReg())
2526 InsertedSize = TRI->getSubRegIdxSize(SubIdx);
2527 else
2528 InsertedSize = TRI->getRegSizeInBits(MI->getOperand(2).getReg(), *MRI);
2529 unsigned SubRegSize = TRI->getSubRegIdxSize(MI->getOperand(3).getImm());
2530 if (SubRegSize < InsertedSize) {
2531 report("INSERT_SUBREG expected inserted value to have equal or lesser "
2532 "size than the subreg it was inserted into", MI);
2533 break;
2534 }
2535 } break;
2536 case TargetOpcode::REG_SEQUENCE: {
2537 unsigned NumOps = MI->getNumOperands();
2538 if (!(NumOps & 1)) {
2539 report("Invalid number of operands for REG_SEQUENCE", MI);
2540 break;
2541 }
2542
2543 for (unsigned I = 1; I != NumOps; I += 2) {
2544 const MachineOperand &RegOp = MI->getOperand(I);
2545 const MachineOperand &SubRegOp = MI->getOperand(I + 1);
2546
2547 if (!RegOp.isReg())
2548 report("Invalid register operand for REG_SEQUENCE", &RegOp, I);
2549
2550 if (!SubRegOp.isImm() || SubRegOp.getImm() == 0 ||
2551 SubRegOp.getImm() >= TRI->getNumSubRegIndices()) {
2552 report("Invalid subregister index operand for REG_SEQUENCE",
2553 &SubRegOp, I + 1);
2554 }
2555 }
2556
2557 Register DstReg = MI->getOperand(0).getReg();
2558 if (DstReg.isPhysical())
2559 report("REG_SEQUENCE does not support physical register results", MI);
2560
2561 if (MI->getOperand(0).getSubReg())
2562 report("Invalid subreg result for REG_SEQUENCE", MI);
2563
2564 break;
2565 }
2566 }
2567}
2568
2569void
2570MachineVerifier::visitMachineOperand(const MachineOperand *MO, unsigned MONum) {
2571 const MachineInstr *MI = MO->getParent();
2572 const MCInstrDesc &MCID = MI->getDesc();
2573 unsigned NumDefs = MCID.getNumDefs();
2574 if (MCID.getOpcode() == TargetOpcode::PATCHPOINT)
2575 NumDefs = (MONum == 0 && MO->isReg()) ? NumDefs : 0;
2576
2577 // The first MCID.NumDefs operands must be explicit register defines
2578 if (MONum < NumDefs) {
2579 const MCOperandInfo &MCOI = MCID.operands()[MONum];
2580 if (!MO->isReg())
2581 report("Explicit definition must be a register", MO, MONum);
2582 else if (!MO->isDef() && !MCOI.isOptionalDef())
2583 report("Explicit definition marked as use", MO, MONum);
2584 else if (MO->isImplicit())
2585 report("Explicit definition marked as implicit", MO, MONum);
2586 } else if (MONum < MCID.getNumOperands()) {
2587 const MCOperandInfo &MCOI = MCID.operands()[MONum];
2588 // Don't check if it's the last operand in a variadic instruction. See,
2589 // e.g., LDM_RET in the arm back end. Check non-variadic operands only.
2590 bool IsOptional = MI->isVariadic() && MONum == MCID.getNumOperands() - 1;
2591 if (!IsOptional) {
2592 if (MO->isReg()) {
2593 if (MO->isDef() && !MCOI.isOptionalDef() && !MCID.variadicOpsAreDefs())
2594 report("Explicit operand marked as def", MO, MONum);
2595 if (MO->isImplicit())
2596 report("Explicit operand marked as implicit", MO, MONum);
2597 }
2598
2599 // Check that an instruction has register operands only as expected.
2600 if (MCOI.OperandType == MCOI::OPERAND_REGISTER &&
2601 !MO->isReg() && !MO->isFI())
2602 report("Expected a register operand.", MO, MONum);
2603 if (MO->isReg()) {
2604 if (MCOI.OperandType == MCOI::OPERAND_IMMEDIATE ||
2605 (MCOI.OperandType == MCOI::OPERAND_PCREL &&
2606 !TII->isPCRelRegisterOperandLegal(*MO)))
2607 report("Expected a non-register operand.", MO, MONum);
2608 }
2609 }
2610
2611 int TiedTo = MCID.getOperandConstraint(MONum, MCOI::TIED_TO);
2612 if (TiedTo != -1) {
2613 if (!MO->isReg())
2614 report("Tied use must be a register", MO, MONum);
2615 else if (!MO->isTied())
2616 report("Operand should be tied", MO, MONum);
2617 else if (unsigned(TiedTo) != MI->findTiedOperandIdx(MONum))
2618 report("Tied def doesn't match MCInstrDesc", MO, MONum);
2619 else if (MO->getReg().isPhysical()) {
2620 const MachineOperand &MOTied = MI->getOperand(TiedTo);
2621 if (!MOTied.isReg())
2622 report("Tied counterpart must be a register", &MOTied, TiedTo);
2623 else if (MOTied.getReg().isPhysical() &&
2624 MO->getReg() != MOTied.getReg())
2625 report("Tied physical registers must match.", &MOTied, TiedTo);
2626 }
2627 } else if (MO->isReg() && MO->isTied())
2628 report("Explicit operand should not be tied", MO, MONum);
2629 } else if (!MI->isVariadic()) {
2630 // ARM adds %reg0 operands to indicate predicates. We'll allow that.
2631 if (!MO->isValidExcessOperand())
2632 report("Extra explicit operand on non-variadic instruction", MO, MONum);
2633 }
2634
2635 // Verify earlyClobber def operand
2636 if (MCID.getOperandConstraint(MONum, MCOI::EARLY_CLOBBER) != -1) {
2637 if (!MO->isReg())
2638 report("Early clobber must be a register", MI);
2639 if (!MO->isEarlyClobber())
2640 report("Missing earlyClobber flag", MI);
2641 }
2642
2643 switch (MO->getType()) {
2645 // Verify debug flag on debug instructions. Check this first because reg0
2646 // indicates an undefined debug value.
2647 if (MI->isDebugInstr() && MO->isUse()) {
2648 if (!MO->isDebug())
2649 report("Register operand must be marked debug", MO, MONum);
2650 } else if (MO->isDebug()) {
2651 report("Register operand must not be marked debug", MO, MONum);
2652 }
2653
2654 const Register Reg = MO->getReg();
2655 if (!Reg)
2656 return;
2657 if (MRI->tracksLiveness() && !MI->isDebugInstr())
2658 checkLiveness(MO, MONum);
2659
2660 if (MO->isDef() && MO->isUndef() && !MO->getSubReg() &&
2661 MO->getReg().isVirtual()) // TODO: Apply to physregs too
2662 report("Undef virtual register def operands require a subregister", MO, MONum);
2663
2664 // Verify the consistency of tied operands.
2665 if (MO->isTied()) {
2666 unsigned OtherIdx = MI->findTiedOperandIdx(MONum);
2667 const MachineOperand &OtherMO = MI->getOperand(OtherIdx);
2668 if (!OtherMO.isReg())
2669 report("Must be tied to a register", MO, MONum);
2670 if (!OtherMO.isTied())
2671 report("Missing tie flags on tied operand", MO, MONum);
2672 if (MI->findTiedOperandIdx(OtherIdx) != MONum)
2673 report("Inconsistent tie links", MO, MONum);
2674 if (MONum < MCID.getNumDefs()) {
2675 if (OtherIdx < MCID.getNumOperands()) {
2676 if (-1 == MCID.getOperandConstraint(OtherIdx, MCOI::TIED_TO))
2677 report("Explicit def tied to explicit use without tie constraint",
2678 MO, MONum);
2679 } else {
2680 if (!OtherMO.isImplicit())
2681 report("Explicit def should be tied to implicit use", MO, MONum);
2682 }
2683 }
2684 }
2685
2686 // Verify two-address constraints after the twoaddressinstruction pass.
2687 // Both twoaddressinstruction pass and phi-node-elimination pass call
2688 // MRI->leaveSSA() to set MF as not IsSSA, we should do the verification
2689 // after twoaddressinstruction pass not after phi-node-elimination pass. So
2690 // we shouldn't use the IsSSA as the condition, we should based on
2691 // TiedOpsRewritten property to verify two-address constraints, this
2692 // property will be set in twoaddressinstruction pass.
2693 unsigned DefIdx;
2694 if (MF->getProperties().hasTiedOpsRewritten() && MO->isUse() &&
2695 MI->isRegTiedToDefOperand(MONum, &DefIdx) &&
2696 Reg != MI->getOperand(DefIdx).getReg())
2697 report("Two-address instruction operands must be identical", MO, MONum);
2698
2699 // Check register classes.
2700 unsigned SubIdx = MO->getSubReg();
2701
2702 if (Reg.isPhysical()) {
2703 if (SubIdx) {
2704 report("Illegal subregister index for physical register", MO, MONum);
2705 return;
2706 }
2707 if (MONum < MCID.getNumOperands()) {
2708 if (const TargetRegisterClass *DRC = TII->getRegClass(MCID, MONum)) {
2709 if (!DRC->contains(Reg)) {
2710 report("Illegal physical register for instruction", MO, MONum);
2711 OS << printReg(Reg, TRI) << " is not a "
2712 << TRI->getRegClassName(DRC) << " register.\n";
2713 }
2714 }
2715 }
2716 if (MO->isRenamable()) {
2717 if (MRI->isReserved(Reg)) {
2718 report("isRenamable set on reserved register", MO, MONum);
2719 return;
2720 }
2721 }
2722 } else {
2723 // Virtual register.
2724 const TargetRegisterClass *RC = MRI->getRegClassOrNull(Reg);
2725 if (!RC) {
2726 // This is a generic virtual register.
2727
2728 // Do not allow undef uses for generic virtual registers. This ensures
2729 // getVRegDef can never fail and return null on a generic register.
2730 //
2731 // FIXME: This restriction should probably be broadened to all SSA
2732 // MIR. However, DetectDeadLanes/ProcessImplicitDefs technically still
2733 // run on the SSA function just before phi elimination.
2734 if (MO->isUndef())
2735 report("Generic virtual register use cannot be undef", MO, MONum);
2736
2737 // Debug value instruction is permitted to use undefined vregs.
2738 // This is a performance measure to skip the overhead of immediately
2739 // pruning unused debug operands. The final undef substitution occurs
2740 // when debug values are allocated in LDVImpl::handleDebugValue, so
2741 // these verifications always apply after this pass.
2742 if (isFunctionTracksDebugUserValues || !MO->isUse() ||
2743 !MI->isDebugValue() || !MRI->def_empty(Reg)) {
2744 // If we're post-Select, we can't have gvregs anymore.
2745 if (isFunctionSelected) {
2746 report("Generic virtual register invalid in a Selected function",
2747 MO, MONum);
2748 return;
2749 }
2750
2751 // The gvreg must have a type and it must not have a SubIdx.
2752 LLT Ty = MRI->getType(Reg);
2753 if (!Ty.isValid()) {
2754 report("Generic virtual register must have a valid type", MO,
2755 MONum);
2756 return;
2757 }
2758
2759 const RegisterBank *RegBank = MRI->getRegBankOrNull(Reg);
2760 const RegisterBankInfo *RBI = MF->getSubtarget().getRegBankInfo();
2761
2762 // If we're post-RegBankSelect, the gvreg must have a bank.
2763 if (!RegBank && isFunctionRegBankSelected) {
2764 report("Generic virtual register must have a bank in a "
2765 "RegBankSelected function",
2766 MO, MONum);
2767 return;
2768 }
2769
2770 // Make sure the register fits into its register bank if any.
2771 if (RegBank && Ty.isValid() && !Ty.isScalableVector() &&
2772 RBI->getMaximumSize(RegBank->getID()) < Ty.getSizeInBits()) {
2773 report("Register bank is too small for virtual register", MO,
2774 MONum);
2775 OS << "Register bank " << RegBank->getName() << " too small("
2776 << RBI->getMaximumSize(RegBank->getID()) << ") to fit "
2777 << Ty.getSizeInBits() << "-bits\n";
2778 return;
2779 }
2780 }
2781
2782 if (SubIdx) {
2783 report("Generic virtual register does not allow subregister index", MO,
2784 MONum);
2785 return;
2786 }
2787
2788 // If this is a target specific instruction and this operand
2789 // has register class constraint, the virtual register must
2790 // comply to it.
2791 if (!isPreISelGenericOpcode(MCID.getOpcode()) &&
2792 MONum < MCID.getNumOperands() && TII->getRegClass(MCID, MONum)) {
2793 report("Virtual register does not match instruction constraint", MO,
2794 MONum);
2795 OS << "Expect register class "
2796 << TRI->getRegClassName(TII->getRegClass(MCID, MONum))
2797 << " but got nothing\n";
2798 return;
2799 }
2800
2801 break;
2802 }
2803 if (SubIdx) {
2804 const TargetRegisterClass *SRC =
2805 TRI->getSubClassWithSubReg(RC, SubIdx);
2806 if (!SRC) {
2807 report("Invalid subregister index for virtual register", MO, MONum);
2808 OS << "Register class " << TRI->getRegClassName(RC)
2809 << " does not support subreg index "
2810 << TRI->getSubRegIndexName(SubIdx) << '\n';
2811 return;
2812 }
2813 if (RC != SRC) {
2814 report("Invalid register class for subregister index", MO, MONum);
2815 OS << "Register class " << TRI->getRegClassName(RC)
2816 << " does not fully support subreg index "
2817 << TRI->getSubRegIndexName(SubIdx) << '\n';
2818 return;
2819 }
2820 }
2821 if (MONum < MCID.getNumOperands()) {
2822 if (const TargetRegisterClass *DRC = TII->getRegClass(MCID, MONum)) {
2823 if (SubIdx) {
2824 const TargetRegisterClass *SuperRC =
2825 TRI->getLargestLegalSuperClass(RC, *MF);
2826 if (!SuperRC) {
2827 report("No largest legal super class exists.", MO, MONum);
2828 return;
2829 }
2830 DRC = TRI->getMatchingSuperRegClass(SuperRC, DRC, SubIdx);
2831 if (!DRC) {
2832 report("No matching super-reg register class.", MO, MONum);
2833 return;
2834 }
2835 }
2836 if (!RC->hasSuperClassEq(DRC)) {
2837 report("Illegal virtual register for instruction", MO, MONum);
2838 OS << "Expected a " << TRI->getRegClassName(DRC)
2839 << " register, but got a " << TRI->getRegClassName(RC)
2840 << " register\n";
2841 }
2842 }
2843 }
2844 }
2845 break;
2846 }
2847
2849 regMasks.push_back(MO->getRegMask());
2850 break;
2851
2853 if (MI->isPHI() && !MO->getMBB()->isSuccessor(MI->getParent()))
2854 report("PHI operand is not in the CFG", MO, MONum);
2855 break;
2856
2858 if (LiveStks && LiveStks->hasInterval(MO->getIndex()) &&
2859 LiveInts && !LiveInts->isNotInMIMap(*MI)) {
2860 int FI = MO->getIndex();
2861 LiveInterval &LI = LiveStks->getInterval(FI);
2862 SlotIndex Idx = LiveInts->getInstructionIndex(*MI);
2863
2864 bool stores = MI->mayStore();
2865 bool loads = MI->mayLoad();
2866 // For a memory-to-memory move, we need to check if the frame
2867 // index is used for storing or loading, by inspecting the
2868 // memory operands.
2869 if (stores && loads) {
2870 for (auto *MMO : MI->memoperands()) {
2871 const PseudoSourceValue *PSV = MMO->getPseudoValue();
2872 if (PSV == nullptr) continue;
2875 if (Value == nullptr) continue;
2876 if (Value->getFrameIndex() != FI) continue;
2877
2878 if (MMO->isStore())
2879 loads = false;
2880 else
2881 stores = false;
2882 break;
2883 }
2884 if (loads == stores)
2885 report("Missing fixed stack memoperand.", MI);
2886 }
2887 if (loads && !LI.liveAt(Idx.getRegSlot(true))) {
2888 report("Instruction loads from dead spill slot", MO, MONum);
2889 OS << "Live stack: " << LI << '\n';
2890 }
2891 if (stores && !LI.liveAt(Idx.getRegSlot())) {
2892 report("Instruction stores to dead spill slot", MO, MONum);
2893 OS << "Live stack: " << LI << '\n';
2894 }
2895 }
2896 break;
2897
2899 if (MO->getCFIIndex() >= MF->getFrameInstructions().size())
2900 report("CFI instruction has invalid index", MO, MONum);
2901 break;
2902
2903 default:
2904 break;
2905 }
2906}
2907
2908void MachineVerifier::checkLivenessAtUse(const MachineOperand *MO,
2909 unsigned MONum, SlotIndex UseIdx,
2910 const LiveRange &LR,
2911 VirtRegOrUnit VRegOrUnit,
2912 LaneBitmask LaneMask) {
2913 const MachineInstr *MI = MO->getParent();
2914
2915 if (!LR.verify()) {
2916 report("invalid live range", MO, MONum);
2917 report_context_liverange(LR);
2918 report_context_vreg_regunit(VRegOrUnit);
2919 report_context(UseIdx);
2920 return;
2921 }
2922
2923 LiveQueryResult LRQ = LR.Query(UseIdx);
2924 bool HasValue = LRQ.valueIn() || (MI->isPHI() && LRQ.valueOut());
2925 // Check if we have a segment at the use, note however that we only need one
2926 // live subregister range, the others may be dead.
2927 if (!HasValue && LaneMask.none()) {
2928 report("No live segment at use", MO, MONum);
2929 report_context_liverange(LR);
2930 report_context_vreg_regunit(VRegOrUnit);
2931 report_context(UseIdx);
2932 }
2933 if (MO->isKill() && !LRQ.isKill()) {
2934 report("Live range continues after kill flag", MO, MONum);
2935 report_context_liverange(LR);
2936 report_context_vreg_regunit(VRegOrUnit);
2937 if (LaneMask.any())
2938 report_context_lanemask(LaneMask);
2939 report_context(UseIdx);
2940 }
2941}
2942
2943void MachineVerifier::checkLivenessAtDef(const MachineOperand *MO,
2944 unsigned MONum, SlotIndex DefIdx,
2945 const LiveRange &LR,
2946 VirtRegOrUnit VRegOrUnit,
2947 bool SubRangeCheck,
2948 LaneBitmask LaneMask) {
2949 if (!LR.verify()) {
2950 report("invalid live range", MO, MONum);
2951 report_context_liverange(LR);
2952 report_context_vreg_regunit(VRegOrUnit);
2953 if (LaneMask.any())
2954 report_context_lanemask(LaneMask);
2955 report_context(DefIdx);
2956 }
2957
2958 if (const VNInfo *VNI = LR.getVNInfoAt(DefIdx)) {
2959 // The LR can correspond to the whole reg and its def slot is not obliged
2960 // to be the same as the MO' def slot. E.g. when we check here "normal"
2961 // subreg MO but there is other EC subreg MO in the same instruction so the
2962 // whole reg has EC def slot and differs from the currently checked MO' def
2963 // slot. For example:
2964 // %0 [16e,32r:0) 0@16e L..3 [16e,32r:0) 0@16e L..C [16r,32r:0) 0@16r
2965 // Check that there is an early-clobber def of the same superregister
2966 // somewhere is performed in visitMachineFunctionAfter()
2967 if (((SubRangeCheck || MO->getSubReg() == 0) && VNI->def != DefIdx) ||
2968 !SlotIndex::isSameInstr(VNI->def, DefIdx) ||
2969 (VNI->def != DefIdx &&
2970 (!VNI->def.isEarlyClobber() || !DefIdx.isRegister()))) {
2971 report("Inconsistent valno->def", MO, MONum);
2972 report_context_liverange(LR);
2973 report_context_vreg_regunit(VRegOrUnit);
2974 if (LaneMask.any())
2975 report_context_lanemask(LaneMask);
2976 report_context(*VNI);
2977 report_context(DefIdx);
2978 }
2979 } else {
2980 report("No live segment at def", MO, MONum);
2981 report_context_liverange(LR);
2982 report_context_vreg_regunit(VRegOrUnit);
2983 if (LaneMask.any())
2984 report_context_lanemask(LaneMask);
2985 report_context(DefIdx);
2986 }
2987 // Check that, if the dead def flag is present, LiveInts agree.
2988 if (MO->isDead()) {
2989 LiveQueryResult LRQ = LR.Query(DefIdx);
2990 if (!LRQ.isDeadDef()) {
2991 assert(VRegOrUnit.isVirtualReg() && "Expecting a virtual register.");
2992 // A dead subreg def only tells us that the specific subreg is dead. There
2993 // could be other non-dead defs of other subregs, or we could have other
2994 // parts of the register being live through the instruction. So unless we
2995 // are checking liveness for a subrange it is ok for the live range to
2996 // continue, given that we have a dead def of a subregister.
2997 if (SubRangeCheck || MO->getSubReg() == 0) {
2998 report("Live range continues after dead def flag", MO, MONum);
2999 report_context_liverange(LR);
3000 report_context_vreg_regunit(VRegOrUnit);
3001 if (LaneMask.any())
3002 report_context_lanemask(LaneMask);
3003 }
3004 }
3005 }
3006}
3007
3008void MachineVerifier::checkLiveness(const MachineOperand *MO, unsigned MONum) {
3009 const MachineInstr *MI = MO->getParent();
3010 const Register Reg = MO->getReg();
3011 const unsigned SubRegIdx = MO->getSubReg();
3012
3013 const LiveInterval *LI = nullptr;
3014 if (LiveInts && Reg.isVirtual()) {
3015 if (LiveInts->hasInterval(Reg)) {
3016 LI = &LiveInts->getInterval(Reg);
3017 if (SubRegIdx != 0 && (MO->isDef() || !MO->isUndef()) && !LI->empty() &&
3018 !LI->hasSubRanges() && MRI->shouldTrackSubRegLiveness(Reg))
3019 report("Live interval for subreg operand has no subranges", MO, MONum);
3020 } else {
3021 report("Virtual register has no live interval", MO, MONum);
3022 }
3023 }
3024
3025 // Both use and def operands can read a register.
3026 if (MO->readsReg()) {
3027 if (MO->isKill())
3028 addRegWithSubRegs(regsKilled, Reg);
3029
3030 // Check that LiveVars knows this kill (unless we are inside a bundle, in
3031 // which case we have already checked that LiveVars knows any kills on the
3032 // bundle header instead).
3033 if (LiveVars && Reg.isVirtual() && MO->isKill() &&
3034 !MI->isBundledWithPred()) {
3036 if (!is_contained(VI.Kills, MI))
3037 report("Kill missing from LiveVariables", MO, MONum);
3038 }
3039
3040 // Check LiveInts liveness and kill.
3041 if (LiveInts && !LiveInts->isNotInMIMap(*MI)) {
3042 SlotIndex UseIdx;
3043 if (MI->isPHI()) {
3044 // PHI use occurs on the edge, so check for live out here instead.
3045 UseIdx = LiveInts->getMBBEndIdx(
3046 MI->getOperand(MONum + 1).getMBB()).getPrevSlot();
3047 } else {
3048 UseIdx = LiveInts->getInstructionIndex(*MI);
3049 }
3050 // Check the cached regunit intervals.
3051 if (Reg.isPhysical() && !isReserved(Reg)) {
3052 for (MCRegUnit Unit : TRI->regunits(Reg.asMCReg())) {
3053 if (MRI->isReservedRegUnit(Unit))
3054 continue;
3055 if (const LiveRange *LR = LiveInts->getCachedRegUnit(Unit))
3056 checkLivenessAtUse(MO, MONum, UseIdx, *LR, VirtRegOrUnit(Unit));
3057 }
3058 }
3059
3060 if (Reg.isVirtual()) {
3061 // This is a virtual register interval.
3062 checkLivenessAtUse(MO, MONum, UseIdx, *LI, VirtRegOrUnit(Reg));
3063
3064 if (LI->hasSubRanges() && !MO->isDef()) {
3065 LaneBitmask MOMask = SubRegIdx != 0
3066 ? TRI->getSubRegIndexLaneMask(SubRegIdx)
3067 : MRI->getMaxLaneMaskForVReg(Reg);
3068 LaneBitmask LiveInMask;
3069 for (const LiveInterval::SubRange &SR : LI->subranges()) {
3070 if ((MOMask & SR.LaneMask).none())
3071 continue;
3072 checkLivenessAtUse(MO, MONum, UseIdx, SR, VirtRegOrUnit(Reg),
3073 SR.LaneMask);
3074 LiveQueryResult LRQ = SR.Query(UseIdx);
3075 if (LRQ.valueIn() || (MI->isPHI() && LRQ.valueOut()))
3076 LiveInMask |= SR.LaneMask;
3077 }
3078 // At least parts of the register has to be live at the use.
3079 if ((LiveInMask & MOMask).none()) {
3080 report("No live subrange at use", MO, MONum);
3081 report_context(*LI);
3082 report_context(UseIdx);
3083 }
3084 // For PHIs all lanes should be live
3085 if (MI->isPHI() && LiveInMask != MOMask) {
3086 report("Not all lanes of PHI source live at use", MO, MONum);
3087 report_context(*LI);
3088 report_context(UseIdx);
3089 }
3090 }
3091 }
3092 }
3093
3094 // Use of a dead register.
3095 if (!regsLive.count(Reg)) {
3096 if (Reg.isPhysical()) {
3097 // Reserved registers may be used even when 'dead'.
3098 bool Bad = !isReserved(Reg);
3099 // We are fine if just any subregister has a defined value.
3100 if (Bad) {
3101
3102 for (const MCPhysReg &SubReg : TRI->subregs(Reg)) {
3103 if (regsLive.count(SubReg)) {
3104 Bad = false;
3105 break;
3106 }
3107 }
3108 }
3109 // If there is an additional implicit-use of a super register we stop
3110 // here. By definition we are fine if the super register is not
3111 // (completely) dead, if the complete super register is dead we will
3112 // get a report for its operand.
3113 if (Bad) {
3114 for (const MachineOperand &MOP : MI->uses()) {
3115 if (!MOP.isReg() || !MOP.isImplicit())
3116 continue;
3117
3118 if (!MOP.getReg().isPhysical())
3119 continue;
3120
3121 if (MOP.getReg() != Reg &&
3122 all_of(TRI->regunits(Reg), [&](const MCRegUnit RegUnit) {
3123 return llvm::is_contained(TRI->regunits(MOP.getReg()),
3124 RegUnit);
3125 }))
3126 Bad = false;
3127 }
3128 }
3129 if (Bad)
3130 report("Using an undefined physical register", MO, MONum);
3131 } else if (MRI->def_empty(Reg)) {
3132 report("Reading virtual register without a def", MO, MONum);
3133 } else {
3134 BBInfo &MInfo = MBBInfoMap[MI->getParent()];
3135 // We don't know which virtual registers are live in, so only complain
3136 // if vreg was killed in this MBB. Otherwise keep track of vregs that
3137 // must be live in. PHI instructions are handled separately.
3138 if (MInfo.regsKilled.count(Reg))
3139 report("Using a killed virtual register", MO, MONum);
3140 else if (!MI->isPHI())
3141 MInfo.vregsLiveIn.insert(std::make_pair(Reg, MI));
3142 }
3143 }
3144 }
3145
3146 if (MO->isDef()) {
3147 // Register defined.
3148 // TODO: verify that earlyclobber ops are not used.
3149 if (MO->isDead())
3150 addRegWithSubRegs(regsDead, Reg);
3151 else
3152 addRegWithSubRegs(regsDefined, Reg);
3153
3154 // Verify SSA form.
3155 if (MRI->isSSA() && Reg.isVirtual() &&
3156 std::next(MRI->def_begin(Reg)) != MRI->def_end())
3157 report("Multiple virtual register defs in SSA form", MO, MONum);
3158
3159 // Check LiveInts for a live segment, but only for virtual registers.
3160 if (LiveInts && !LiveInts->isNotInMIMap(*MI)) {
3161 SlotIndex DefIdx = LiveInts->getInstructionIndex(*MI);
3162 DefIdx = DefIdx.getRegSlot(MO->isEarlyClobber());
3163
3164 if (Reg.isVirtual()) {
3165 checkLivenessAtDef(MO, MONum, DefIdx, *LI, VirtRegOrUnit(Reg));
3166
3167 if (LI->hasSubRanges()) {
3168 LaneBitmask MOMask = SubRegIdx != 0
3169 ? TRI->getSubRegIndexLaneMask(SubRegIdx)
3170 : MRI->getMaxLaneMaskForVReg(Reg);
3171 for (const LiveInterval::SubRange &SR : LI->subranges()) {
3172 if ((SR.LaneMask & MOMask).none())
3173 continue;
3174 checkLivenessAtDef(MO, MONum, DefIdx, SR, VirtRegOrUnit(Reg), true,
3175 SR.LaneMask);
3176 }
3177 }
3178 }
3179 }
3180 }
3181}
3182
3183// This function gets called after visiting all instructions in a bundle. The
3184// argument points to the bundle header.
3185// Normal stand-alone instructions are also considered 'bundles', and this
3186// function is called for all of them.
3187void MachineVerifier::visitMachineBundleAfter(const MachineInstr *MI) {
3188 BBInfo &MInfo = MBBInfoMap[MI->getParent()];
3189 set_union(MInfo.regsKilled, regsKilled);
3190 set_subtract(regsLive, regsKilled); regsKilled.clear();
3191 // Kill any masked registers.
3192 while (!regMasks.empty()) {
3193 const uint32_t *Mask = regMasks.pop_back_val();
3194 for (Register Reg : regsLive)
3195 if (Reg.isPhysical() &&
3197 regsDead.push_back(Reg);
3198 }
3199 set_subtract(regsLive, regsDead); regsDead.clear();
3200 set_union(regsLive, regsDefined); regsDefined.clear();
3201}
3202
3203void
3204MachineVerifier::visitMachineBasicBlockAfter(const MachineBasicBlock *MBB) {
3205 MBBInfoMap[MBB].regsLiveOut = regsLive;
3206 regsLive.clear();
3207
3208 if (Indexes) {
3209 SlotIndex stop = Indexes->getMBBEndIdx(MBB);
3210 if (!(stop > lastIndex)) {
3211 report("Block ends before last instruction index", MBB);
3212 OS << "Block ends at " << stop << " last instruction was at " << lastIndex
3213 << '\n';
3214 }
3215 lastIndex = stop;
3216 }
3217}
3218
3219namespace {
3220// This implements a set of registers that serves as a filter: can filter other
3221// sets by passing through elements not in the filter and blocking those that
3222// are. Any filter implicitly includes the full set of physical registers upon
3223// creation, thus filtering them all out. The filter itself as a set only grows,
3224// and needs to be as efficient as possible.
3225struct VRegFilter {
3226 // Add elements to the filter itself. \pre Input set \p FromRegSet must have
3227 // no duplicates. Both virtual and physical registers are fine.
3228 template <typename RegSetT> void add(const RegSetT &FromRegSet) {
3229 SmallVector<Register, 0> VRegsBuffer;
3230 filterAndAdd(FromRegSet, VRegsBuffer);
3231 }
3232 // Filter \p FromRegSet through the filter and append passed elements into \p
3233 // ToVRegs. All elements appended are then added to the filter itself.
3234 // \returns true if anything changed.
3235 template <typename RegSetT>
3236 bool filterAndAdd(const RegSetT &FromRegSet,
3237 SmallVectorImpl<Register> &ToVRegs) {
3238 unsigned SparseUniverse = Sparse.size();
3239 unsigned NewSparseUniverse = SparseUniverse;
3240 unsigned NewDenseSize = Dense.size();
3241 size_t Begin = ToVRegs.size();
3242 for (Register Reg : FromRegSet) {
3243 if (!Reg.isVirtual())
3244 continue;
3245 unsigned Index = Reg.virtRegIndex();
3246 if (Index < SparseUniverseMax) {
3247 if (Index < SparseUniverse && Sparse.test(Index))
3248 continue;
3249 NewSparseUniverse = std::max(NewSparseUniverse, Index + 1);
3250 } else {
3251 if (Dense.count(Reg))
3252 continue;
3253 ++NewDenseSize;
3254 }
3255 ToVRegs.push_back(Reg);
3256 }
3257 size_t End = ToVRegs.size();
3258 if (Begin == End)
3259 return false;
3260 // Reserving space in sets once performs better than doing so continuously
3261 // and pays easily for double look-ups (even in Dense with SparseUniverseMax
3262 // tuned all the way down) and double iteration (the second one is over a
3263 // SmallVector, which is a lot cheaper compared to DenseSet or BitVector).
3264 Sparse.resize(NewSparseUniverse);
3265 Dense.reserve(NewDenseSize);
3266 for (unsigned I = Begin; I < End; ++I) {
3267 Register Reg = ToVRegs[I];
3268 unsigned Index = Reg.virtRegIndex();
3269 if (Index < SparseUniverseMax)
3270 Sparse.set(Index);
3271 else
3272 Dense.insert(Reg);
3273 }
3274 return true;
3275 }
3276
3277private:
3278 static constexpr unsigned SparseUniverseMax = 10 * 1024 * 8;
3279 // VRegs indexed within SparseUniverseMax are tracked by Sparse, those beyond
3280 // are tracked by Dense. The only purpose of the threshold and the Dense set
3281 // is to have a reasonably growing memory usage in pathological cases (large
3282 // number of very sparse VRegFilter instances live at the same time). In
3283 // practice even in the worst-by-execution time cases having all elements
3284 // tracked by Sparse (very large SparseUniverseMax scenario) tends to be more
3285 // space efficient than if tracked by Dense. The threshold is set to keep the
3286 // worst-case memory usage within 2x of figures determined empirically for
3287 // "all Dense" scenario in such worst-by-execution-time cases.
3288 BitVector Sparse;
3289 DenseSet<Register> Dense;
3290};
3291
3292// Implements both a transfer function and a (binary, in-place) join operator
3293// for a dataflow over register sets with set union join and filtering transfer
3294// (out_b = in_b \ filter_b). filter_b is expected to be set-up ahead of time.
3295// Maintains out_b as its state, allowing for O(n) iteration over it at any
3296// time, where n is the size of the set (as opposed to O(U) where U is the
3297// universe). filter_b implicitly contains all physical registers at all times.
3298class FilteringVRegSet {
3299 VRegFilter Filter;
3301
3302public:
3303 // Set-up the filter_b. \pre Input register set \p RS must have no duplicates.
3304 // Both virtual and physical registers are fine.
3305 template <typename RegSetT> void addToFilter(const RegSetT &RS) {
3306 Filter.add(RS);
3307 }
3308 // Passes \p RS through the filter_b (transfer function) and adds what's left
3309 // to itself (out_b).
3310 template <typename RegSetT> bool add(const RegSetT &RS) {
3311 // Double-duty the Filter: to maintain VRegs a set (and the join operation
3312 // a set union) just add everything being added here to the Filter as well.
3313 return Filter.filterAndAdd(RS, VRegs);
3314 }
3315 using const_iterator = decltype(VRegs)::const_iterator;
3316 const_iterator begin() const { return VRegs.begin(); }
3317 const_iterator end() const { return VRegs.end(); }
3318 size_t size() const { return VRegs.size(); }
3319};
3320} // namespace
3321
3322// Calculate the largest possible vregsPassed sets. These are the registers that
3323// can pass through an MBB live, but may not be live every time. It is assumed
3324// that all vregsPassed sets are empty before the call.
3325void MachineVerifier::calcRegsPassed() {
3326 if (MF->empty())
3327 // ReversePostOrderTraversal doesn't handle empty functions.
3328 return;
3329
3330 for (const MachineBasicBlock *MB :
3332 FilteringVRegSet VRegs;
3333 BBInfo &Info = MBBInfoMap[MB];
3334 assert(Info.reachable);
3335
3336 VRegs.addToFilter(Info.regsKilled);
3337 VRegs.addToFilter(Info.regsLiveOut);
3338 for (const MachineBasicBlock *Pred : MB->predecessors()) {
3339 const BBInfo &PredInfo = MBBInfoMap[Pred];
3340 if (!PredInfo.reachable)
3341 continue;
3342
3343 VRegs.add(PredInfo.regsLiveOut);
3344 VRegs.add(PredInfo.vregsPassed);
3345 }
3346 Info.vregsPassed.reserve(VRegs.size());
3347 Info.vregsPassed.insert_range(VRegs);
3348 }
3349}
3350
3351// Calculate the set of virtual registers that must be passed through each basic
3352// block in order to satisfy the requirements of successor blocks. This is very
3353// similar to calcRegsPassed, only backwards.
3354void MachineVerifier::calcRegsRequired() {
3355 // First push live-in regs to predecessors' vregsRequired.
3357 for (const auto &MBB : *MF) {
3358 BBInfo &MInfo = MBBInfoMap[&MBB];
3359 for (const MachineBasicBlock *Pred : MBB.predecessors()) {
3360 BBInfo &PInfo = MBBInfoMap[Pred];
3361 if (PInfo.addRequired(MInfo.vregsLiveIn))
3362 todo.insert(Pred);
3363 }
3364
3365 // Handle the PHI node.
3366 for (const MachineInstr &MI : MBB.phis()) {
3367 for (unsigned i = 1, e = MI.getNumOperands(); i != e; i += 2) {
3368 // Skip those Operands which are undef regs or not regs.
3369 if (!MI.getOperand(i).isReg() || !MI.getOperand(i).readsReg())
3370 continue;
3371
3372 // Get register and predecessor for one PHI edge.
3373 Register Reg = MI.getOperand(i).getReg();
3374 const MachineBasicBlock *Pred = MI.getOperand(i + 1).getMBB();
3375
3376 BBInfo &PInfo = MBBInfoMap[Pred];
3377 if (PInfo.addRequired(Reg))
3378 todo.insert(Pred);
3379 }
3380 }
3381 }
3382
3383 // Iteratively push vregsRequired to predecessors. This will converge to the
3384 // same final state regardless of DenseSet iteration order.
3385 while (!todo.empty()) {
3386 const MachineBasicBlock *MBB = *todo.begin();
3387 todo.erase(MBB);
3388 BBInfo &MInfo = MBBInfoMap[MBB];
3389 for (const MachineBasicBlock *Pred : MBB->predecessors()) {
3390 if (Pred == MBB)
3391 continue;
3392 BBInfo &SInfo = MBBInfoMap[Pred];
3393 if (SInfo.addRequired(MInfo.vregsRequired))
3394 todo.insert(Pred);
3395 }
3396 }
3397}
3398
3399// Check PHI instructions at the beginning of MBB. It is assumed that
3400// calcRegsPassed has been run so BBInfo::isLiveOut is valid.
3401void MachineVerifier::checkPHIOps(const MachineBasicBlock &MBB) {
3402 BBInfo &MInfo = MBBInfoMap[&MBB];
3403
3405 for (const MachineInstr &Phi : MBB) {
3406 if (!Phi.isPHI())
3407 break;
3408 seen.clear();
3409
3410 const MachineOperand &MODef = Phi.getOperand(0);
3411 if (!MODef.isReg() || !MODef.isDef()) {
3412 report("Expected first PHI operand to be a register def", &MODef, 0);
3413 continue;
3414 }
3415 if (MODef.isTied() || MODef.isImplicit() || MODef.isInternalRead() ||
3416 MODef.isEarlyClobber() || MODef.isDebug())
3417 report("Unexpected flag on PHI operand", &MODef, 0);
3418 Register DefReg = MODef.getReg();
3419 if (!DefReg.isVirtual())
3420 report("Expected first PHI operand to be a virtual register", &MODef, 0);
3421
3422 for (unsigned I = 1, E = Phi.getNumOperands(); I != E; I += 2) {
3423 const MachineOperand &MO0 = Phi.getOperand(I);
3424 if (!MO0.isReg()) {
3425 report("Expected PHI operand to be a register", &MO0, I);
3426 continue;
3427 }
3428 if (MO0.isImplicit() || MO0.isInternalRead() || MO0.isEarlyClobber() ||
3429 MO0.isDebug() || MO0.isTied())
3430 report("Unexpected flag on PHI operand", &MO0, I);
3431
3432 const MachineOperand &MO1 = Phi.getOperand(I + 1);
3433 if (!MO1.isMBB()) {
3434 report("Expected PHI operand to be a basic block", &MO1, I + 1);
3435 continue;
3436 }
3437
3438 const MachineBasicBlock &Pre = *MO1.getMBB();
3439 if (!Pre.isSuccessor(&MBB)) {
3440 report("PHI input is not a predecessor block", &MO1, I + 1);
3441 continue;
3442 }
3443
3444 if (MInfo.reachable) {
3445 seen.insert(&Pre);
3446 BBInfo &PrInfo = MBBInfoMap[&Pre];
3447 if (!MO0.isUndef() && PrInfo.reachable &&
3448 !PrInfo.isLiveOut(MO0.getReg()))
3449 report("PHI operand is not live-out from predecessor", &MO0, I);
3450 }
3451 }
3452
3453 // Did we see all predecessors?
3454 if (MInfo.reachable) {
3455 for (MachineBasicBlock *Pred : MBB.predecessors()) {
3456 if (!seen.count(Pred)) {
3457 report("Missing PHI operand", &Phi);
3458 OS << printMBBReference(*Pred)
3459 << " is a predecessor according to the CFG.\n";
3460 }
3461 }
3462 }
3463 }
3464}
3465
3466static void
3468 std::function<void(const Twine &Message)> FailureCB,
3469 raw_ostream &OS) {
3471 CV.initialize(&OS, FailureCB, MF);
3472
3473 for (const auto &MBB : MF) {
3474 CV.visit(MBB);
3475 for (const auto &MI : MBB.instrs())
3476 CV.visit(MI);
3477 }
3478
3479 if (CV.sawTokens()) {
3480 DT.recalculate(const_cast<MachineFunction &>(MF));
3481 CV.verify(DT);
3482 }
3483}
3484
3485void MachineVerifier::visitMachineFunctionAfter() {
3486 auto FailureCB = [this](const Twine &Message) {
3487 report(Message.str().c_str(), MF);
3488 };
3489 verifyConvergenceControl(*MF, DT, FailureCB, OS);
3490
3491 calcRegsPassed();
3492
3493 for (const MachineBasicBlock &MBB : *MF)
3494 checkPHIOps(MBB);
3495
3496 // Now check liveness info if available
3497 calcRegsRequired();
3498
3499 // Check for killed virtual registers that should be live out.
3500 for (const auto &MBB : *MF) {
3501 BBInfo &MInfo = MBBInfoMap[&MBB];
3502 for (Register VReg : MInfo.vregsRequired)
3503 if (MInfo.regsKilled.count(VReg)) {
3504 report("Virtual register killed in block, but needed live out.", &MBB);
3505 OS << "Virtual register " << printReg(VReg)
3506 << " is used after the block.\n";
3507 }
3508 }
3509
3510 if (!MF->empty()) {
3511 BBInfo &MInfo = MBBInfoMap[&MF->front()];
3512 for (Register VReg : MInfo.vregsRequired) {
3513 report("Virtual register defs don't dominate all uses.", MF);
3514 report_context_vreg(VReg);
3515 }
3516 }
3517
3518 if (LiveVars)
3519 verifyLiveVariables();
3520 if (LiveInts)
3521 verifyLiveIntervals();
3522
3523 // Check live-in list of each MBB. If a register is live into MBB, check
3524 // that the register is in regsLiveOut of each predecessor block. Since
3525 // this must come from a definition in the predecessor or its live-in
3526 // list, this will catch a live-through case where the predecessor does not
3527 // have the register in its live-in list. This currently only checks
3528 // registers that have no aliases, are not allocatable and are not
3529 // reserved, which could mean a condition code register for instance.
3530 if (MRI->tracksLiveness())
3531 for (const auto &MBB : *MF)
3533 MCRegister LiveInReg = P.PhysReg;
3534 bool hasAliases = MCRegAliasIterator(LiveInReg, TRI, false).isValid();
3535 if (hasAliases || isAllocatable(LiveInReg) || isReserved(LiveInReg))
3536 continue;
3537 for (const MachineBasicBlock *Pred : MBB.predecessors()) {
3538 BBInfo &PInfo = MBBInfoMap[Pred];
3539 if (!PInfo.regsLiveOut.count(LiveInReg)) {
3540 report("Live in register not found to be live out from predecessor.",
3541 &MBB);
3542 OS << TRI->getName(LiveInReg) << " not found to be live out from "
3543 << printMBBReference(*Pred) << '\n';
3544 }
3545 }
3546 }
3547
3548 for (auto CSInfo : MF->getCallSitesInfo())
3549 if (!CSInfo.first->isCall())
3550 report("Call site info referencing instruction that is not call", MF);
3551
3552 // If there's debug-info, check that we don't have any duplicate value
3553 // tracking numbers.
3554 if (MF->getFunction().getSubprogram()) {
3555 DenseSet<unsigned> SeenNumbers;
3556 for (const auto &MBB : *MF) {
3557 for (const auto &MI : MBB) {
3558 if (auto Num = MI.peekDebugInstrNum()) {
3559 auto Result = SeenNumbers.insert((unsigned)Num);
3560 if (!Result.second)
3561 report("Instruction has a duplicated value tracking number", &MI);
3562 }
3563 }
3564 }
3565 }
3566}
3567
3568void MachineVerifier::verifyLiveVariables() {
3569 assert(LiveVars && "Don't call verifyLiveVariables without LiveVars");
3570 for (unsigned I = 0, E = MRI->getNumVirtRegs(); I != E; ++I) {
3573 for (const auto &MBB : *MF) {
3574 BBInfo &MInfo = MBBInfoMap[&MBB];
3575
3576 // Our vregsRequired should be identical to LiveVariables' AliveBlocks
3577 if (MInfo.vregsRequired.count(Reg)) {
3578 if (!VI.AliveBlocks.test(MBB.getNumber())) {
3579 report("LiveVariables: Block missing from AliveBlocks", &MBB);
3580 OS << "Virtual register " << printReg(Reg)
3581 << " must be live through the block.\n";
3582 }
3583 } else {
3584 if (VI.AliveBlocks.test(MBB.getNumber())) {
3585 report("LiveVariables: Block should not be in AliveBlocks", &MBB);
3586 OS << "Virtual register " << printReg(Reg)
3587 << " is not needed live through the block.\n";
3588 }
3589 }
3590 }
3591 }
3592}
3593
3594void MachineVerifier::verifyLiveIntervals() {
3595 assert(LiveInts && "Don't call verifyLiveIntervals without LiveInts");
3596 for (unsigned I = 0, E = MRI->getNumVirtRegs(); I != E; ++I) {
3598
3599 // Spilling and splitting may leave unused registers around. Skip them.
3600 if (MRI->reg_nodbg_empty(Reg))
3601 continue;
3602
3603 if (!LiveInts->hasInterval(Reg)) {
3604 report("Missing live interval for virtual register", MF);
3605 OS << printReg(Reg, TRI) << " still has defs or uses\n";
3606 continue;
3607 }
3608
3609 const LiveInterval &LI = LiveInts->getInterval(Reg);
3610 assert(Reg == LI.reg() && "Invalid reg to interval mapping");
3611 verifyLiveInterval(LI);
3612 }
3613
3614 // Verify all the cached regunit intervals.
3615 for (MCRegUnit Unit : TRI->regunits())
3616 if (const LiveRange *LR = LiveInts->getCachedRegUnit(Unit))
3617 verifyLiveRange(*LR, VirtRegOrUnit(Unit));
3618}
3619
3620void MachineVerifier::verifyLiveRangeValue(const LiveRange &LR,
3621 const VNInfo *VNI,
3622 VirtRegOrUnit VRegOrUnit,
3623 LaneBitmask LaneMask) {
3624 if (VNI->isUnused())
3625 return;
3626
3627 const VNInfo *DefVNI = LR.getVNInfoAt(VNI->def);
3628
3629 if (!DefVNI) {
3630 report("Value not live at VNInfo def and not marked unused", MF);
3631 report_context(LR, VRegOrUnit, LaneMask);
3632 report_context(*VNI);
3633 return;
3634 }
3635
3636 if (DefVNI != VNI) {
3637 report("Live segment at def has different VNInfo", MF);
3638 report_context(LR, VRegOrUnit, LaneMask);
3639 report_context(*VNI);
3640 return;
3641 }
3642
3643 const MachineBasicBlock *MBB = LiveInts->getMBBFromIndex(VNI->def);
3644 if (!MBB) {
3645 report("Invalid VNInfo definition index", MF);
3646 report_context(LR, VRegOrUnit, LaneMask);
3647 report_context(*VNI);
3648 return;
3649 }
3650
3651 if (VNI->isPHIDef()) {
3652 if (VNI->def != LiveInts->getMBBStartIdx(MBB)) {
3653 report("PHIDef VNInfo is not defined at MBB start", MBB);
3654 report_context(LR, VRegOrUnit, LaneMask);
3655 report_context(*VNI);
3656 }
3657 return;
3658 }
3659
3660 // Non-PHI def.
3661 const MachineInstr *MI = LiveInts->getInstructionFromIndex(VNI->def);
3662 if (!MI) {
3663 report("No instruction at VNInfo def index", MBB);
3664 report_context(LR, VRegOrUnit, LaneMask);
3665 report_context(*VNI);
3666 return;
3667 }
3668
3669 bool hasDef = false;
3670 bool isEarlyClobber = false;
3671 for (ConstMIBundleOperands MOI(*MI); MOI.isValid(); ++MOI) {
3672 if (!MOI->isReg() || !MOI->isDef())
3673 continue;
3674 if (VRegOrUnit.isVirtualReg()) {
3675 if (MOI->getReg() != VRegOrUnit.asVirtualReg())
3676 continue;
3677 } else {
3678 if (!MOI->getReg().isPhysical() ||
3679 !TRI->hasRegUnit(MOI->getReg(), VRegOrUnit.asMCRegUnit()))
3680 continue;
3681 }
3682 if (LaneMask.any() &&
3683 (TRI->getSubRegIndexLaneMask(MOI->getSubReg()) & LaneMask).none())
3684 continue;
3685 hasDef = true;
3686 if (MOI->isEarlyClobber())
3687 isEarlyClobber = true;
3688 }
3689
3690 if (!hasDef) {
3691 report("Defining instruction does not modify register", MI);
3692 report_context(LR, VRegOrUnit, LaneMask);
3693 report_context(*VNI);
3694 }
3695
3696 // Early clobber defs begin at USE slots, but other defs must begin at
3697 // DEF slots.
3698 if (isEarlyClobber) {
3699 if (!VNI->def.isEarlyClobber()) {
3700 report("Early clobber def must be at an early-clobber slot", MBB);
3701 report_context(LR, VRegOrUnit, LaneMask);
3702 report_context(*VNI);
3703 }
3704 } else if (!VNI->def.isRegister()) {
3705 report("Non-PHI, non-early clobber def must be at a register slot", MBB);
3706 report_context(LR, VRegOrUnit, LaneMask);
3707 report_context(*VNI);
3708 }
3709}
3710
3711void MachineVerifier::verifyLiveRangeSegment(const LiveRange &LR,
3713 VirtRegOrUnit VRegOrUnit,
3714 LaneBitmask LaneMask) {
3715 const LiveRange::Segment &S = *I;
3716 const VNInfo *VNI = S.valno;
3717 assert(VNI && "Live segment has no valno");
3718
3719 if (VNI->id >= LR.getNumValNums() || VNI != LR.getValNumInfo(VNI->id)) {
3720 report("Foreign valno in live segment", MF);
3721 report_context(LR, VRegOrUnit, LaneMask);
3722 report_context(S);
3723 report_context(*VNI);
3724 }
3725
3726 if (VNI->isUnused()) {
3727 report("Live segment valno is marked unused", MF);
3728 report_context(LR, VRegOrUnit, LaneMask);
3729 report_context(S);
3730 }
3731
3732 const MachineBasicBlock *MBB = LiveInts->getMBBFromIndex(S.start);
3733 if (!MBB) {
3734 report("Bad start of live segment, no basic block", MF);
3735 report_context(LR, VRegOrUnit, LaneMask);
3736 report_context(S);
3737 return;
3738 }
3739 SlotIndex MBBStartIdx = LiveInts->getMBBStartIdx(MBB);
3740 if (S.start != MBBStartIdx && S.start != VNI->def) {
3741 report("Live segment must begin at MBB entry or valno def", MBB);
3742 report_context(LR, VRegOrUnit, LaneMask);
3743 report_context(S);
3744 }
3745
3746 const MachineBasicBlock *EndMBB =
3747 LiveInts->getMBBFromIndex(S.end.getPrevSlot());
3748 if (!EndMBB) {
3749 report("Bad end of live segment, no basic block", MF);
3750 report_context(LR, VRegOrUnit, LaneMask);
3751 report_context(S);
3752 return;
3753 }
3754
3755 // Checks for non-live-out segments.
3756 if (S.end != LiveInts->getMBBEndIdx(EndMBB)) {
3757 // RegUnit intervals are allowed dead phis.
3758 if (!VRegOrUnit.isVirtualReg() && VNI->isPHIDef() && S.start == VNI->def &&
3759 S.end == VNI->def.getDeadSlot())
3760 return;
3761
3762 // The live segment is ending inside EndMBB
3763 const MachineInstr *MI =
3764 LiveInts->getInstructionFromIndex(S.end.getPrevSlot());
3765 if (!MI) {
3766 report("Live segment doesn't end at a valid instruction", EndMBB);
3767 report_context(LR, VRegOrUnit, LaneMask);
3768 report_context(S);
3769 return;
3770 }
3771
3772 // The block slot must refer to a basic block boundary.
3773 if (S.end.isBlock()) {
3774 report("Live segment ends at B slot of an instruction", EndMBB);
3775 report_context(LR, VRegOrUnit, LaneMask);
3776 report_context(S);
3777 }
3778
3779 if (S.end.isDead()) {
3780 // Segment ends on the dead slot.
3781 // That means there must be a dead def.
3782 if (!SlotIndex::isSameInstr(S.start, S.end)) {
3783 report("Live segment ending at dead slot spans instructions", EndMBB);
3784 report_context(LR, VRegOrUnit, LaneMask);
3785 report_context(S);
3786 }
3787 }
3788
3789 // After tied operands are rewritten, a live segment can only end at an
3790 // early-clobber slot if it is being redefined by an early-clobber def.
3791 // TODO: Before tied operands are rewritten, a live segment can only end at
3792 // an early-clobber slot if the last use is tied to an early-clobber def.
3793 if (MF->getProperties().hasTiedOpsRewritten() && S.end.isEarlyClobber()) {
3794 if (I + 1 == LR.end() || (I + 1)->start != S.end) {
3795 report("Live segment ending at early clobber slot must be "
3796 "redefined by an EC def in the same instruction",
3797 EndMBB);
3798 report_context(LR, VRegOrUnit, LaneMask);
3799 report_context(S);
3800 }
3801 }
3802
3803 // The following checks only apply to virtual registers. Physreg liveness
3804 // is too weird to check.
3805 if (VRegOrUnit.isVirtualReg()) {
3806 // A live segment can end with either a redefinition, a kill flag on a
3807 // use, or a dead flag on a def.
3808 bool hasRead = false;
3809 bool hasSubRegDef = false;
3810 bool hasDeadDef = false;
3811 for (ConstMIBundleOperands MOI(*MI); MOI.isValid(); ++MOI) {
3812 if (!MOI->isReg() || MOI->getReg() != VRegOrUnit.asVirtualReg())
3813 continue;
3814 unsigned Sub = MOI->getSubReg();
3815 LaneBitmask SLM =
3816 Sub != 0 ? TRI->getSubRegIndexLaneMask(Sub) : LaneBitmask::getAll();
3817 if (MOI->isDef()) {
3818 if (Sub != 0) {
3819 hasSubRegDef = true;
3820 // An operand %0:sub0 reads %0:sub1..n. Invert the lane
3821 // mask for subregister defs. Read-undef defs will be handled by
3822 // readsReg below.
3823 SLM = ~SLM;
3824 }
3825 if (MOI->isDead())
3826 hasDeadDef = true;
3827 }
3828 if (LaneMask.any() && (LaneMask & SLM).none())
3829 continue;
3830 if (MOI->readsReg())
3831 hasRead = true;
3832 }
3833 if (S.end.isDead()) {
3834 // Make sure that the corresponding machine operand for a "dead" live
3835 // range has the dead flag. We cannot perform this check for subregister
3836 // liveranges as partially dead values are allowed.
3837 if (LaneMask.none() && !hasDeadDef) {
3838 report(
3839 "Instruction ending live segment on dead slot has no dead flag",
3840 MI);
3841 report_context(LR, VRegOrUnit, LaneMask);
3842 report_context(S);
3843 }
3844 } else {
3845 if (!hasRead) {
3846 // When tracking subregister liveness, the main range must start new
3847 // values on partial register writes, even if there is no read.
3848 if (!MRI->shouldTrackSubRegLiveness(VRegOrUnit.asVirtualReg()) ||
3849 LaneMask.any() || !hasSubRegDef) {
3850 report("Instruction ending live segment doesn't read the register",
3851 MI);
3852 report_context(LR, VRegOrUnit, LaneMask);
3853 report_context(S);
3854 }
3855 }
3856 }
3857 }
3858 }
3859
3860 // Now check all the basic blocks in this live segment.
3862 // Is this live segment the beginning of a non-PHIDef VN?
3863 if (S.start == VNI->def && !VNI->isPHIDef()) {
3864 // Not live-in to any blocks.
3865 if (MBB == EndMBB)
3866 return;
3867 // Skip this block.
3868 ++MFI;
3869 }
3870
3872 if (LaneMask.any()) {
3873 LiveInterval &OwnerLI = LiveInts->getInterval(VRegOrUnit.asVirtualReg());
3874 OwnerLI.computeSubRangeUndefs(Undefs, LaneMask, *MRI, *Indexes);
3875 }
3876
3877 while (true) {
3878 assert(LiveInts->isLiveInToMBB(LR, &*MFI));
3879 // We don't know how to track physregs into a landing pad.
3880 if (!VRegOrUnit.isVirtualReg() && MFI->isEHPad()) {
3881 if (&*MFI == EndMBB)
3882 break;
3883 ++MFI;
3884 continue;
3885 }
3886
3887 // Is VNI a PHI-def in the current block?
3888 bool IsPHI = VNI->isPHIDef() &&
3889 VNI->def == LiveInts->getMBBStartIdx(&*MFI);
3890
3891 // Check that VNI is live-out of all predecessors.
3892 for (const MachineBasicBlock *Pred : MFI->predecessors()) {
3893 SlotIndex PEnd = LiveInts->getMBBEndIdx(Pred);
3894 // Predecessor of landing pad live-out on last call.
3895 if (MFI->isEHPad()) {
3896 for (const MachineInstr &MI : llvm::reverse(*Pred)) {
3897 if (MI.isCall()) {
3898 PEnd = Indexes->getInstructionIndex(MI).getBoundaryIndex();
3899 break;
3900 }
3901 }
3902 }
3903 const VNInfo *PVNI = LR.getVNInfoBefore(PEnd);
3904
3905 // All predecessors must have a live-out value. However for a phi
3906 // instruction with subregister intervals
3907 // only one of the subregisters (not necessarily the current one) needs to
3908 // be defined.
3909 if (!PVNI && (LaneMask.none() || !IsPHI)) {
3910 if (LiveRangeCalc::isJointlyDominated(Pred, Undefs, *Indexes))
3911 continue;
3912 report("Register not marked live out of predecessor", Pred);
3913 report_context(LR, VRegOrUnit, LaneMask);
3914 report_context(*VNI);
3915 OS << " live into " << printMBBReference(*MFI) << '@'
3916 << LiveInts->getMBBStartIdx(&*MFI) << ", not live before " << PEnd
3917 << '\n';
3918 continue;
3919 }
3920
3921 // Only PHI-defs can take different predecessor values.
3922 if (!IsPHI && PVNI != VNI) {
3923 report("Different value live out of predecessor", Pred);
3924 report_context(LR, VRegOrUnit, LaneMask);
3925 OS << "Valno #" << PVNI->id << " live out of "
3926 << printMBBReference(*Pred) << '@' << PEnd << "\nValno #" << VNI->id
3927 << " live into " << printMBBReference(*MFI) << '@'
3928 << LiveInts->getMBBStartIdx(&*MFI) << '\n';
3929 }
3930 }
3931 if (&*MFI == EndMBB)
3932 break;
3933 ++MFI;
3934 }
3935}
3936
3937void MachineVerifier::verifyLiveRange(const LiveRange &LR,
3938 VirtRegOrUnit VRegOrUnit,
3939 LaneBitmask LaneMask) {
3940 for (const VNInfo *VNI : LR.valnos)
3941 verifyLiveRangeValue(LR, VNI, VRegOrUnit, LaneMask);
3942
3943 for (LiveRange::const_iterator I = LR.begin(), E = LR.end(); I != E; ++I)
3944 verifyLiveRangeSegment(LR, I, VRegOrUnit, LaneMask);
3945}
3946
3947void MachineVerifier::verifyLiveInterval(const LiveInterval &LI) {
3948 Register Reg = LI.reg();
3949 assert(Reg.isVirtual());
3950 verifyLiveRange(LI, VirtRegOrUnit(Reg));
3951
3952 if (LI.hasSubRanges()) {
3954 LaneBitmask MaxMask = MRI->getMaxLaneMaskForVReg(Reg);
3955 for (const LiveInterval::SubRange &SR : LI.subranges()) {
3956 if ((Mask & SR.LaneMask).any()) {
3957 report("Lane masks of sub ranges overlap in live interval", MF);
3958 report_context(LI);
3959 }
3960 if ((SR.LaneMask & ~MaxMask).any()) {
3961 report("Subrange lanemask is invalid", MF);
3962 report_context(LI);
3963 }
3964 if (SR.empty()) {
3965 report("Subrange must not be empty", MF);
3966 report_context(SR, VirtRegOrUnit(LI.reg()), SR.LaneMask);
3967 }
3968 Mask |= SR.LaneMask;
3969 verifyLiveRange(SR, VirtRegOrUnit(LI.reg()), SR.LaneMask);
3970 if (!LI.covers(SR)) {
3971 report("A Subrange is not covered by the main range", MF);
3972 report_context(LI);
3973 }
3974 }
3975 }
3976
3977 // Check the LI only has one connected component.
3978 ConnectedVNInfoEqClasses ConEQ(*LiveInts);
3979 unsigned NumComp = ConEQ.Classify(LI);
3980 if (NumComp > 1) {
3981 report("Multiple connected components in live interval", MF);
3982 report_context(LI);
3983 for (unsigned comp = 0; comp != NumComp; ++comp) {
3984 OS << comp << ": valnos";
3985 for (const VNInfo *I : LI.valnos)
3986 if (comp == ConEQ.getEqClass(I))
3987 OS << ' ' << I->id;
3988 OS << '\n';
3989 }
3990 }
3991}
3992
3993namespace {
3994
3995 // FrameSetup and FrameDestroy can have zero adjustment, so using a single
3996 // integer, we can't tell whether it is a FrameSetup or FrameDestroy if the
3997 // value is zero.
3998 // We use a bool plus an integer to capture the stack state.
3999struct StackStateOfBB {
4000 StackStateOfBB() = default;
4001 StackStateOfBB(int EntryVal, int ExitVal, bool EntrySetup, bool ExitSetup)
4002 : EntryValue(EntryVal), ExitValue(ExitVal), EntryIsSetup(EntrySetup),
4003 ExitIsSetup(ExitSetup) {}
4004
4005 // Can be negative, which means we are setting up a frame.
4006 int EntryValue = 0;
4007 int ExitValue = 0;
4008 bool EntryIsSetup = false;
4009 bool ExitIsSetup = false;
4010};
4011
4012} // end anonymous namespace
4013
4014/// Make sure on every path through the CFG, a FrameSetup <n> is always followed
4015/// by a FrameDestroy <n>, stack adjustments are identical on all
4016/// CFG edges to a merge point, and frame is destroyed at end of a return block.
4017void MachineVerifier::verifyStackFrame() {
4018 unsigned FrameSetupOpcode = TII->getCallFrameSetupOpcode();
4019 unsigned FrameDestroyOpcode = TII->getCallFrameDestroyOpcode();
4020 if (FrameSetupOpcode == ~0u && FrameDestroyOpcode == ~0u)
4021 return;
4022
4024 SPState.resize(MF->getNumBlockIDs());
4026
4027 // Visit the MBBs in DFS order.
4028 for (df_ext_iterator<const MachineFunction *,
4030 DFI = df_ext_begin(MF, Reachable), DFE = df_ext_end(MF, Reachable);
4031 DFI != DFE; ++DFI) {
4032 const MachineBasicBlock *MBB = *DFI;
4033
4034 StackStateOfBB BBState;
4035 // Check the exit state of the DFS stack predecessor.
4036 if (DFI.getPathLength() >= 2) {
4037 const MachineBasicBlock *StackPred = DFI.getPath(DFI.getPathLength() - 2);
4038 assert(Reachable.count(StackPred) &&
4039 "DFS stack predecessor is already visited.\n");
4040 BBState.EntryValue = SPState[StackPred->getNumber()].ExitValue;
4041 BBState.EntryIsSetup = SPState[StackPred->getNumber()].ExitIsSetup;
4042 BBState.ExitValue = BBState.EntryValue;
4043 BBState.ExitIsSetup = BBState.EntryIsSetup;
4044 }
4045
4046 if ((int)MBB->getCallFrameSize() != -BBState.EntryValue) {
4047 report("Call frame size on entry does not match value computed from "
4048 "predecessor",
4049 MBB);
4050 OS << "Call frame size on entry " << MBB->getCallFrameSize()
4051 << " does not match value computed from predecessor "
4052 << -BBState.EntryValue << '\n';
4053 }
4054
4055 // Update stack state by checking contents of MBB.
4056 for (const auto &I : *MBB) {
4057 if (I.getOpcode() == FrameSetupOpcode) {
4058 if (BBState.ExitIsSetup)
4059 report("FrameSetup is after another FrameSetup", &I);
4060 if (!MRI->isSSA() && !MF->getFrameInfo().adjustsStack())
4061 report("AdjustsStack not set in presence of a frame pseudo "
4062 "instruction.", &I);
4063 BBState.ExitValue -= TII->getFrameTotalSize(I);
4064 BBState.ExitIsSetup = true;
4065 }
4066
4067 if (I.getOpcode() == FrameDestroyOpcode) {
4068 int Size = TII->getFrameTotalSize(I);
4069 if (!BBState.ExitIsSetup)
4070 report("FrameDestroy is not after a FrameSetup", &I);
4071 int AbsSPAdj = BBState.ExitValue < 0 ? -BBState.ExitValue :
4072 BBState.ExitValue;
4073 if (BBState.ExitIsSetup && AbsSPAdj != Size) {
4074 report("FrameDestroy <n> is after FrameSetup <m>", &I);
4075 OS << "FrameDestroy <" << Size << "> is after FrameSetup <"
4076 << AbsSPAdj << ">.\n";
4077 }
4078 if (!MRI->isSSA() && !MF->getFrameInfo().adjustsStack())
4079 report("AdjustsStack not set in presence of a frame pseudo "
4080 "instruction.", &I);
4081 BBState.ExitValue += Size;
4082 BBState.ExitIsSetup = false;
4083 }
4084 }
4085 SPState[MBB->getNumber()] = BBState;
4086
4087 // Make sure the exit state of any predecessor is consistent with the entry
4088 // state.
4089 for (const MachineBasicBlock *Pred : MBB->predecessors()) {
4090 if (Reachable.count(Pred) &&
4091 (SPState[Pred->getNumber()].ExitValue != BBState.EntryValue ||
4092 SPState[Pred->getNumber()].ExitIsSetup != BBState.EntryIsSetup)) {
4093 report("The exit stack state of a predecessor is inconsistent.", MBB);
4094 OS << "Predecessor " << printMBBReference(*Pred) << " has exit state ("
4095 << SPState[Pred->getNumber()].ExitValue << ", "
4096 << SPState[Pred->getNumber()].ExitIsSetup << "), while "
4097 << printMBBReference(*MBB) << " has entry state ("
4098 << BBState.EntryValue << ", " << BBState.EntryIsSetup << ").\n";
4099 }
4100 }
4101
4102 // Make sure the entry state of any successor is consistent with the exit
4103 // state.
4104 for (const MachineBasicBlock *Succ : MBB->successors()) {
4105 if (Reachable.count(Succ) &&
4106 (SPState[Succ->getNumber()].EntryValue != BBState.ExitValue ||
4107 SPState[Succ->getNumber()].EntryIsSetup != BBState.ExitIsSetup)) {
4108 report("The entry stack state of a successor is inconsistent.", MBB);
4109 OS << "Successor " << printMBBReference(*Succ) << " has entry state ("
4110 << SPState[Succ->getNumber()].EntryValue << ", "
4111 << SPState[Succ->getNumber()].EntryIsSetup << "), while "
4112 << printMBBReference(*MBB) << " has exit state ("
4113 << BBState.ExitValue << ", " << BBState.ExitIsSetup << ").\n";
4114 }
4115 }
4116
4117 // Make sure a basic block with return ends with zero stack adjustment.
4118 if (!MBB->empty() && MBB->back().isReturn()) {
4119 if (BBState.ExitIsSetup)
4120 report("A return block ends with a FrameSetup.", MBB);
4121 if (BBState.ExitValue)
4122 report("A return block ends with a nonzero stack adjustment.", MBB);
4123 }
4124 }
4125}
4126
4127void MachineVerifier::verifyStackProtector() {
4128 const MachineFrameInfo &MFI = MF->getFrameInfo();
4129 if (!MFI.hasStackProtectorIndex())
4130 return;
4131 // Only applicable when the offsets of frame objects have been determined,
4132 // which is indicated by a non-zero stack size.
4133 if (!MFI.getStackSize())
4134 return;
4135 const TargetFrameLowering &TFI = *MF->getSubtarget().getFrameLowering();
4136 bool StackGrowsDown =
4138 unsigned FI = MFI.getStackProtectorIndex();
4139 int64_t SPStart = MFI.getObjectOffset(FI);
4140 int64_t SPEnd = SPStart + MFI.getObjectSize(FI);
4141 for (unsigned I = 0, E = MFI.getObjectIndexEnd(); I != E; ++I) {
4142 if (I == FI)
4143 continue;
4144 if (MFI.isDeadObjectIndex(I))
4145 continue;
4146 // FIXME: Skip non-default stack objects, as some targets may place them
4147 // above the stack protector. This is a workaround for the fact that
4148 // backends such as AArch64 may place SVE stack objects *above* the stack
4149 // protector.
4151 continue;
4152 // Skip variable-sized objects because they do not have a fixed offset.
4154 continue;
4155 // FIXME: Skip spill slots which may be allocated above the stack protector.
4156 // Ideally this would only skip callee-saved registers, but we don't have
4157 // that information here. For example, spill-slots used for scavenging are
4158 // not described in CalleeSavedInfo.
4159 if (MFI.isSpillSlotObjectIndex(I))
4160 continue;
4161 int64_t ObjStart = MFI.getObjectOffset(I);
4162 int64_t ObjEnd = ObjStart + MFI.getObjectSize(I);
4163 if (SPStart < ObjEnd && ObjStart < SPEnd) {
4164 report("Stack protector overlaps with another stack object", MF);
4165 break;
4166 }
4167 if ((StackGrowsDown && SPStart <= ObjStart) ||
4168 (!StackGrowsDown && SPStart >= ObjStart)) {
4169 report("Stack protector is not the top-most object on the stack", MF);
4170 break;
4171 }
4172 }
4173}
unsigned SubReg
unsigned const MachineRegisterInfo * MRI
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
aarch64 promote const
static bool isLoad(int Opcode)
static bool isStore(int Opcode)
MachineBasicBlock & MBB
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
MachineBasicBlock MachineBasicBlock::iterator MBBI
This file implements the BitVector class.
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
This file contains the declarations for the subclasses of Constant, which represent the different fla...
This file defines the DenseMap class.
This file defines the DenseSet and SmallDenseSet classes.
This file builds on the ADT/GraphTraits.h file to build generic depth first graph iterator.
Declares convenience wrapper classes for interpreting MachineInstr instances as specific generic oper...
const HexagonInstrInfo * TII
hexagon widen Hexagon Store false hexagon widen loads
hexagon widen stores
IRTranslator LLVM IR MI
std::pair< Instruction::BinaryOps, Value * > OffsetOp
Find all possible pairs (BinOp, RHS) that BinOp V, RHS can be simplified.
const size_t AbstractManglingParser< Derived, Alloc >::NumOps
A common definition of LaneBitmask for use in TableGen and CodeGen.
Implement a low-level type suitable for MachineInstr level instruction selection.
#define F(x, y, z)
Definition MD5.cpp:54
#define I(x, y, z)
Definition MD5.cpp:57
print mir2vec MIR2Vec Vocabulary Printer Pass
Definition MIR2Vec.cpp:593
This file declares the MIR specialization of the GenericConvergenceVerifier template.
Register Reg
Register const TargetRegisterInfo * TRI
static void verifyConvergenceControl(const MachineFunction &MF, MachineDominatorTree &DT, std::function< void(const Twine &Message)> FailureCB, raw_ostream &OS)
Promote Memory to Register
Definition Mem2Reg.cpp:110
modulo schedule Modulo Schedule test pass
#define P(N)
ppc ctr loops verify
#define INITIALIZE_PASS(passName, arg, name, cfg, analysis)
Definition PassSupport.h:56
This file builds on the ADT/GraphTraits.h file to build a generic graph post order iterator.
const SmallVectorImpl< MachineOperand > MachineBasicBlock * TBB
const SmallVectorImpl< MachineOperand > & Cond
static bool isValid(const char C)
Returns true if C is a valid mangled character: <0-9a-zA-Z_>.
static bool isLiveOut(const MachineBasicBlock &MBB, unsigned Reg)
SI Optimize VGPR LiveRange
std::unordered_set< BasicBlock * > BlockSet
This file contains some templates that are useful if you are working with the STL at all.
This file defines generic set operations that may be used on set's of different types,...
This file defines the SmallPtrSet class.
This file defines the SmallVector class.
This file describes how to lower LLVM code to machine code.
static unsigned getSize(unsigned Kind)
static LLVM_ABI unsigned getSizeInBits(const fltSemantics &Sem)
Returns the size of the floating point number (in bits) in the given semantics.
Definition APFloat.cpp:277
const fltSemantics & getSemantics() const
Definition APFloat.h:1520
PassT::Result * getCachedResult(IRUnitT &IR) const
Get the cached result of an analysis pass for a given IR unit.
AnalysisUsage & addUsedIfAvailable()
Add the specified Pass class to the set of analyses used by this pass.
void setPreservesAll()
Set by analyses that do not transform their input at all.
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
Definition ArrayRef.h:40
size_t size() const
size - Get the array size.
Definition ArrayRef.h:142
This class holds the attributes for a particular argument, parameter, function, or return value.
Definition Attributes.h:402
LLVM Basic Block Representation.
Definition BasicBlock.h:62
bool hasAddressTaken() const
Returns true if there are any uses of this basic block other than direct branches,...
Definition BasicBlock.h:701
const Instruction * getTerminator() const LLVM_READONLY
Returns the terminator instruction if the block is well formed or null if the block is not well forme...
Definition BasicBlock.h:233
void clear()
clear - Removes all bits from the bitvector.
Definition BitVector.h:354
iterator_range< const_set_bits_iterator > set_bits() const
Definition BitVector.h:159
ConnectedVNInfoEqClasses - Helper class that can divide VNInfos in a LiveInterval into equivalence cl...
ConstMIBundleOperands - Iterate over all operands in a const bundle of machine instructions.
ConstantFP - Floating Point Values [float, double].
Definition Constants.h:282
const APFloat & getValueAPF() const
Definition Constants.h:325
This is the shared class of boolean and integer constants.
Definition Constants.h:87
IntegerType * getIntegerType() const
Variant of the getType() method to always return an IntegerType, which reduces the amount of casting ...
Definition Constants.h:198
unsigned getBitWidth() const
getBitWidth - Return the scalar bitwidth of this constant.
Definition Constants.h:162
A parsed version of the target data layout string in and methods for querying it.
Definition DataLayout.h:64
Implements a dense probed hash-table based set.
Definition DenseSet.h:279
void recalculate(ParentType &Func)
recalculate - compute a dominator tree for the given function
Register getReg() const
Base class for user error types.
Definition Error.h:354
A specialized PseudoSourceValue for holding FixedStack values, which must include a frame index.
FunctionPass class - This class is used to implement most global optimizations.
Definition Pass.h:314
const Function & getFunction() const
Definition Function.h:164
void initialize(raw_ostream *OS, function_ref< void(const Twine &Message)> FailureCB, const FunctionT &F)
bool isPredicated(const MachineInstr &MI) const override
Returns true if the instruction is already predicated.
bool analyzeBranch(MachineBasicBlock &MBB, MachineBasicBlock *&TBB, MachineBasicBlock *&FBB, SmallVectorImpl< MachineOperand > &Cond, bool AllowModify) const override
Analyze the branching code at the end of MBB, returning true if it cannot be understood (e....
unsigned getBitWidth() const
Get the number of bits in this IntegerType.
constexpr bool isScalableVector() const
Returns true if the LLT is a scalable vector.
constexpr unsigned getScalarSizeInBits() const
constexpr bool isScalar() const
constexpr bool isPointerVector() const
static constexpr LLT scalar(unsigned SizeInBits)
Get a low-level scalar or aggregate "bag of bits".
constexpr bool isValid() const
constexpr uint16_t getNumElements() const
Returns the number of elements in a vector LLT.
constexpr bool isVector() const
constexpr bool isScalable() const
Returns true if the LLT is a scalable vector.
constexpr TypeSize getSizeInBits() const
Returns the total size of the type. Must only be called on sized types.
constexpr bool isPointer() const
constexpr LLT getElementType() const
Returns the vector's element type. Only valid for vector types.
constexpr ElementCount getElementCount() const
constexpr unsigned getAddressSpace() const
constexpr bool isPointerOrPointerVector() const
constexpr LLT getScalarType() const
constexpr TypeSize getSizeInBytes() const
Returns the total size of the type in bytes, i.e.
A live range for subregisters.
LiveInterval - This class represents the liveness of a register, or stack slot.
Register reg() const
bool hasSubRanges() const
Returns true if subregister liveness information is available.
iterator_range< subrange_iterator > subranges()
LLVM_ABI void computeSubRangeUndefs(SmallVectorImpl< SlotIndex > &Undefs, LaneBitmask LaneMask, const MachineRegisterInfo &MRI, const SlotIndexes &Indexes) const
For a given lane mask LaneMask, compute indexes at which the lane is marked undefined by subregister ...
void print(raw_ostream &O, const Module *=nullptr) const override
Implement the dump method.
Result of a LiveRange query.
bool isDeadDef() const
Return true if this instruction has a dead def.
VNInfo * valueIn() const
Return the value that is live-in to the instruction.
VNInfo * valueOut() const
Return the value leaving the instruction, if any.
bool isKill() const
Return true if the live-in value is killed by this instruction.
static LLVM_ABI bool isJointlyDominated(const MachineBasicBlock *MBB, ArrayRef< SlotIndex > Defs, const SlotIndexes &Indexes)
A diagnostic function to check if the end of the block MBB is jointly dominated by the blocks corresp...
This class represents the liveness of a register, stack slot, etc.
VNInfo * getValNumInfo(unsigned ValNo)
getValNumInfo - Returns pointer to the specified val#.
Segments::const_iterator const_iterator
bool liveAt(SlotIndex index) const
LLVM_ABI bool covers(const LiveRange &Other) const
Returns true if all segments of the Other live range are completely covered by this live range.
bool empty() const
LiveQueryResult Query(SlotIndex Idx) const
Query Liveness at Idx.
VNInfo * getVNInfoBefore(SlotIndex Idx) const
getVNInfoBefore - Return the VNInfo that is live up to but not necessarily including Idx,...
bool verify() const
Walk the range and assert if any invariants fail to hold.
unsigned getNumValNums() const
iterator begin()
VNInfoList valnos
VNInfo * getVNInfoAt(SlotIndex Idx) const
getVNInfoAt - Return the VNInfo that is live at Idx, or NULL.
LLVM_ABI VarInfo & getVarInfo(Register Reg)
getVarInfo - Return the VarInfo structure for the specified VIRTUAL register.
TypeSize getValue() const
This class is intended to be used as a base class for asm properties and features specific to the tar...
Definition MCAsmInfo.h:64
ExceptionHandling getExceptionHandlingType() const
Definition MCAsmInfo.h:637
Describe properties that are true of each instruction in the target description file.
This holds information about one operand of a machine instruction, indicating the register class for ...
Definition MCInstrDesc.h:86
MCRegAliasIterator enumerates all registers aliasing Reg.
Wrapper class representing physical registers. Should be passed by value.
Definition MCRegister.h:41
const MDOperand & getOperand(unsigned I) const
Definition Metadata.h:1442
bool isValid() const
isValid - Returns true until all the operands have been visited.
bool isInlineAsmBrIndirectTarget() const
Returns true if this is the indirect dest of an INLINEASM_BR.
bool isEHPad() const
Returns true if the block is a landing pad.
iterator_range< livein_iterator > liveins() const
iterator_range< iterator > phis()
Returns a range that iterates over the phis in the basic block.
int getNumber() const
MachineBasicBlocks are uniquely numbered at the function level, unless they're not in a MachineFuncti...
const BasicBlock * getBasicBlock() const
Return the LLVM basic block that this instance corresponded to originally.
bool isIRBlockAddressTaken() const
Test whether this block is the target of an IR BlockAddress.
BasicBlock * getAddressTakenIRBlock() const
Retrieves the BasicBlock which corresponds to this MachineBasicBlock.
LLVM_ABI bool isPredecessor(const MachineBasicBlock *MBB) const
Return true if the specified MBB is a predecessor of this block.
const MachineFunction * getParent() const
Return the MachineFunction containing this basic block.
unsigned getCallFrameSize() const
Return the call frame size on entry to this basic block.
iterator_range< succ_iterator > successors()
LLVM_ABI bool isSuccessor(const MachineBasicBlock *MBB) const
Return true if the specified MBB is a successor of this block.
iterator_range< pred_iterator > predecessors()
LLVM_ABI StringRef getName() const
Return the name of the corresponding LLVM basic block, or an empty string.
DominatorTree Class - Concrete subclass of DominatorTreeBase that is used to compute a normal dominat...
The MachineFrameInfo class represents an abstract stack frame until prolog/epilog code is inserted.
uint64_t getStackSize() const
Return the number of bytes that must be allocated to hold all of the fixed size frame objects.
int getStackProtectorIndex() const
Return the index for the stack protector object.
bool isSpillSlotObjectIndex(int ObjectIdx) const
Returns true if the specified index corresponds to a spill slot.
int64_t getObjectSize(int ObjectIdx) const
Return the size of the specified object.
LLVM_ABI BitVector getPristineRegs(const MachineFunction &MF) const
Return a set of physical registers that are pristine.
bool isVariableSizedObjectIndex(int ObjectIdx) const
Returns true if the specified index corresponds to a variable sized object.
int getObjectIndexEnd() const
Return one past the maximum frame object index.
bool hasStackProtectorIndex() const
uint8_t getStackID(int ObjectIdx) const
int64_t getObjectOffset(int ObjectIdx) const
Return the assigned stack offset of the specified object from the incoming stack pointer.
bool isDeadObjectIndex(int ObjectIdx) const
Returns true if the specified index corresponds to a dead object.
MachineFunctionPass - This class adapts the FunctionPass interface to allow convenient creation of pa...
void getAnalysisUsage(AnalysisUsage &AU) const override
getAnalysisUsage - Subclasses that override getAnalysisUsage must call this.
Properties which a MachineFunction may have at a given point in time.
const TargetSubtargetInfo & getSubtarget() const
getSubtarget - Return the subtarget for which this machine code is being compiled.
StringRef getName() const
getName - Return the name of the corresponding LLVM function.
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
bool verify(Pass *p=nullptr, const char *Banner=nullptr, raw_ostream *OS=nullptr, bool AbortOnError=true) const
Run the current MachineFunction through the machine code verifier, useful for debugger use.
const MachineFunctionProperties & getProperties() const
Get the function properties.
const MachineBasicBlock & front() const
void print(raw_ostream &OS, const SlotIndexes *=nullptr) const
print - Print out the MachineFunction in a format suitable for debugging to the specified stream.
const TargetMachine & getTarget() const
getTarget - Return the target machine this machine code is compiled with
BasicBlockListType::const_iterator const_iterator
Representation of each machine instruction.
unsigned getOpcode() const
Returns the opcode of this MachineInstr.
bool isReturn(QueryType Type=AnyInBundle) const
bool isTerminator(QueryType Type=AnyInBundle) const
Returns true if this instruction part of the terminator for a basic block.
bool isBarrier(QueryType Type=AnyInBundle) const
Returns true if the specified instruction stops control flow from executing the instruction immediate...
A description of a memory reference used in the backend.
LocationSize getSize() const
Return the size in bytes of the memory reference.
const PseudoSourceValue * getPseudoValue() const
LLT getMemoryType() const
Return the memory type of the memory reference.
const MDNode * getRanges() const
Return the range tag for the memory reference.
AtomicOrdering getSuccessOrdering() const
Return the atomic ordering requirements for this memory operation.
LocationSize getSizeInBits() const
Return the size in bits of the memory reference.
MachineOperand class - Representation of each machine instruction operand.
unsigned getSubReg() const
int64_t getImm() const
bool readsReg() const
readsReg - Returns true if this operand reads the previous value of its register.
bool isIntrinsicID() const
bool isReg() const
isReg - Tests if this is a MO_Register operand.
MachineBasicBlock * getMBB() const
ArrayRef< int > getShuffleMask() const
bool isImm() const
isImm - Tests if this is a MO_Immediate operand.
bool isValidExcessOperand() const
Return true if this operand can validly be appended to an arbitrary operand list.
bool isShuffleMask() const
LLVM_ABI void print(raw_ostream &os, const TargetRegisterInfo *TRI=nullptr) const
Print the MachineOperand to os.
LaneBitmask getLaneMask() const
unsigned getCFIIndex() const
LLVM_ABI bool isRenamable() const
isRenamable - Returns true if this register may be renamed, i.e.
MachineInstr * getParent()
getParent - Return the instruction that this operand belongs to.
MachineOperandType getType() const
getType - Returns the MachineOperandType for this operand.
bool isEarlyClobber() const
Register getReg() const
getReg - Returns the register number.
bool isInternalRead() const
bool isFI() const
isFI - Tests if this is a MO_FrameIndex operand.
static bool clobbersPhysReg(const uint32_t *RegMask, MCRegister PhysReg)
clobbersPhysReg - Returns true if this RegMask clobbers PhysReg.
const uint32_t * getRegMask() const
getRegMask - Returns a bit mask of registers preserved by this RegMask operand.
@ MO_CFIIndex
MCCFIInstruction index.
@ MO_RegisterMask
Mask of preserved registers.
@ MO_MachineBasicBlock
MachineBasicBlock reference.
@ MO_FrameIndex
Abstract Stack Frame Index.
@ MO_Register
Register operand.
bool isMBB() const
isMBB - Tests if this is a MO_MachineBasicBlock operand.
MachineRegisterInfo - Keep track of information for virtual and physical registers,...
LLVM_ABI PreservedAnalyses run(MachineFunction &MF, MachineFunctionAnalysisManager &MFAM)
ManagedStatic - This transparently changes the behavior of global statics to be lazily constructed on...
Pass interface - Implemented by all 'passes'.
Definition Pass.h:99
virtual void print(raw_ostream &OS, const Module *M) const
print - Print out the internal state of the pass.
Definition Pass.cpp:140
AnalysisType * getAnalysisIfAvailable() const
getAnalysisIfAvailable<AnalysisType>() - Subclasses use this function to get analysis information tha...
A set of analyses that are preserved following a run of a transformation pass.
Definition Analysis.h:112
static PreservedAnalyses all()
Construct a special preserved set that preserves all passes.
Definition Analysis.h:118
Special value supplied for machine level alias analysis.
Holds all the information related to register banks.
const RegisterBank & getRegBank(unsigned ID)
Get the register bank identified by ID.
unsigned getMaximumSize(unsigned RegBankID) const
Get the maximum size in bits that fits in the given register bank.
This class implements the register bank concept.
const char * getName() const
Get a user friendly name of this register bank.
unsigned getID() const
Get the identifier of this register bank.
Wrapper class representing virtual and physical registers.
Definition Register.h:20
static Register index2VirtReg(unsigned Index)
Convert a 0-based index to a virtual register number.
Definition Register.h:72
MCRegister asMCReg() const
Utility to check-convert this value to a MCRegister.
Definition Register.h:107
unsigned virtRegIndex() const
Convert a virtual register number to a 0-based index.
Definition Register.h:87
constexpr bool isVirtual() const
Return true if the specified register number is in the virtual register namespace.
Definition Register.h:79
constexpr unsigned id() const
Definition Register.h:100
constexpr bool isPhysical() const
Return true if the specified register number is in the physical register namespace.
Definition Register.h:83
SlotIndex - An opaque wrapper around machine indexes.
Definition SlotIndexes.h:66
static bool isSameInstr(SlotIndex A, SlotIndex B)
isSameInstr - Return true if A and B refer to the same instruction.
bool isBlock() const
isBlock - Returns true if this is a block boundary slot.
SlotIndex getDeadSlot() const
Returns the dead def kill slot for the current instruction.
bool isEarlyClobber() const
isEarlyClobber - Returns true if this is an early-clobber slot.
bool isRegister() const
isRegister - Returns true if this is a normal register use/def slot.
SlotIndex getPrevSlot() const
Returns the previous slot in the index list.
SlotIndex getRegSlot(bool EC=false) const
Returns the register use/def slot in the current instruction for a normal or early-clobber def.
bool isDead() const
isDead - Returns true if this is a dead def kill slot.
SlotIndexes pass.
MBBIndexIterator MBBIndexBegin() const
Returns an iterator for the begin of the idx2MBBMap.
MBBIndexIterator MBBIndexEnd() const
Return an iterator for the end of the idx2MBBMap.
SmallVectorImpl< IdxMBBPair >::const_iterator MBBIndexIterator
Iterator over the idx2MBBMap (sorted pairs of slot index of basic block begin and basic block)
size_type size() const
Definition SmallPtrSet.h:99
bool erase(PtrType Ptr)
Remove pointer from the set.
size_type count(ConstPtrType Ptr) const
count - Return 1 if the specified pointer is in the set, 0 otherwise.
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
iterator begin() const
SmallPtrSet - This class implements a set which is optimized for holding SmallSize or less elements.
void resize(size_type N)
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
Register getReg() const
MI-level Statepoint operands.
Definition StackMaps.h:159
StringRef - Represent a constant reference to a string, i.e.
Definition StringRef.h:55
Information about stack frame layout on the target.
StackDirection getStackGrowthDirection() const
getStackGrowthDirection - Return the direction the stack grows
const MCAsmInfo * getMCAsmInfo() const
Return target specific asm information.
bool hasSuperClassEq(const TargetRegisterClass *RC) const
Returns true if RC is a super-class of or equal to this class.
LaneBitmask getLaneMask() const
Returns the combination of all lane masks of register in this class.
virtual const RegisterBankInfo * getRegBankInfo() const
If the information for the register banks is available, return it.
virtual const TargetInstrInfo * getInstrInfo() const
virtual const TargetRegisterInfo * getRegisterInfo() const =0
Return the target's register information.
Twine - A lightweight data structure for efficiently representing the concatenation of temporary valu...
Definition Twine.h:82
LLVM_ABI std::string str() const
Return the twine contents as a std::string.
Definition Twine.cpp:17
static constexpr TypeSize getZero()
Definition TypeSize.h:349
VNInfo - Value Number Information.
bool isUnused() const
Returns true if this value is unused.
unsigned id
The ID number of this value.
SlotIndex def
The index of the defining instruction.
bool isPHIDef() const
Returns true if this value is defined by a PHI instruction (or was, PHI instructions may have been el...
LLVM Value Representation.
Definition Value.h:75
Wrapper class representing a virtual register or register unit.
Definition Register.h:181
constexpr bool isVirtualReg() const
Definition Register.h:197
constexpr MCRegUnit asMCRegUnit() const
Definition Register.h:201
constexpr Register asVirtualReg() const
Definition Register.h:206
std::pair< iterator, bool > insert(const ValueT &V)
Definition DenseSet.h:202
constexpr bool isNonZero() const
Definition TypeSize.h:155
static constexpr bool isKnownLT(const FixedOrScalableQuantity &LHS, const FixedOrScalableQuantity &RHS)
Definition TypeSize.h:216
constexpr bool isScalable() const
Returns whether the quantity is scaled by a runtime quantity (vscale).
Definition TypeSize.h:168
constexpr ScalarTy getKnownMinValue() const
Returns the minimum value this quantity can represent.
Definition TypeSize.h:165
static constexpr bool isKnownGT(const FixedOrScalableQuantity &LHS, const FixedOrScalableQuantity &RHS)
Definition TypeSize.h:223
static constexpr bool isKnownGE(const FixedOrScalableQuantity &LHS, const FixedOrScalableQuantity &RHS)
Definition TypeSize.h:237
self_iterator getIterator()
Definition ilist_node.h:123
NodeTy * getNextNode()
Get the next node, or nullptr for the list tail.
Definition ilist_node.h:348
This class implements an extremely fast bulk output stream that can only output to a stream.
Definition raw_ostream.h:53
Changed
constexpr char Attrs[]
Key for Kernel::Metadata::mAttrs.
constexpr std::underlying_type_t< E > Mask()
Get a bitmask with 1s in all places up to the high-order bit of E's largest value.
LLVM_ABI AttributeSet getFnAttributes(LLVMContext &C, ID id)
Return the function attributes for an intrinsic.
@ OPERAND_IMMEDIATE
Definition MCInstrDesc.h:61
std::enable_if_t< detail::IsValidPointer< X, Y >::value, X * > extract(Y &&MD)
Extract a Value from Metadata.
Definition Metadata.h:667
NodeAddr< DefNode * > Def
Definition RDFGraph.h:384
NodeAddr< PhiNode * > Phi
Definition RDFGraph.h:390
NodeAddr< FuncNode * > Func
Definition RDFGraph.h:393
iterator end() const
Definition BasicBlock.h:89
LLVM_ABI iterator begin() const
This is an optimization pass for GlobalISel generic memory operations.
Definition Types.h:26
auto drop_begin(T &&RangeOrContainer, size_t N=1)
Return a range covering RangeOrContainer with the first N elements excluded.
Definition STLExtras.h:316
@ Offset
Definition DWP.cpp:532
bool all_of(R &&range, UnaryPredicate P)
Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly.
Definition STLExtras.h:1737
auto size(R &&Range, std::enable_if_t< std::is_base_of< std::random_access_iterator_tag, typename std::iterator_traits< decltype(Range.begin())>::iterator_category >::value, void > *=nullptr)
Get the size of a range.
Definition STLExtras.h:1667
constexpr bool isInt(int64_t x)
Checks if an integer fits into the given bit width.
Definition MathExtras.h:165
decltype(auto) dyn_cast(const From &Val)
dyn_cast<X> - Return the argument parameter cast to the specified type.
Definition Casting.h:643
@ SjLj
setjmp/longjmp based exceptions
Definition CodeGen.h:56
bool isPreISelGenericOpcode(unsigned Opcode)
Check whether the given Opcode is a generic opcode that is not supposed to appear after ISel.
void append_range(Container &C, Range &&R)
Wrapper function to append range R to container C.
Definition STLExtras.h:2198
void set_subtract(S1Ty &S1, const S2Ty &S2)
set_subtract(A, B) - Compute A := A - B
Printable PrintLaneMask(LaneBitmask LaneMask)
Create Printable object to print LaneBitmasks on a raw_ostream.
Definition LaneBitmask.h:92
LLVM_ABI Printable printRegUnit(MCRegUnit Unit, const TargetRegisterInfo *TRI)
Create Printable object to print register units on a raw_ostream.
AnalysisManager< MachineFunction > MachineFunctionAnalysisManager
bool isPreISelGenericOptimizationHint(unsigned Opcode)
bool isScopedEHPersonality(EHPersonality Pers)
Returns true if this personality uses scope-style EH IR instructions: catchswitch,...
LLVM_ABI FunctionPass * createMachineVerifierPass(const std::string &Banner)
createMachineVerifierPass - This pass verifies cenerated machine code instructions for correctness.
LLVM_ABI void verifyMachineFunction(const std::string &Banner, const MachineFunction &MF)
auto reverse(ContainerTy &&C)
Definition STLExtras.h:406
detail::ValueMatchesPoly< M > HasValue(M Matcher)
Definition Error.h:221
df_ext_iterator< T, SetTy > df_ext_begin(const T &G, SetTy &S)
bool none_of(R &&Range, UnaryPredicate P)
Provide wrappers to std::none_of which take ranges instead of having to pass begin/end explicitly.
Definition STLExtras.h:1751
LLVM_ABI void report_fatal_error(Error Err, bool gen_crash_diag=true)
Definition Error.cpp:163
GenericConvergenceVerifier< MachineSSAContext > MachineConvergenceVerifier
constexpr bool isUInt(uint64_t x)
Checks if an unsigned integer fits into the given bit width.
Definition MathExtras.h:189
LLVM_ABI raw_ostream & nulls()
This returns a reference to a raw_ostream which simply discards output.
bool set_union(S1Ty &S1, const S2Ty &S2)
set_union(A, B) - Compute A := A u B, return whether A changed.
LLVM_ABI EHPersonality classifyEHPersonality(const Value *Pers)
See if the given exception handling personality function is one that we understand.
class LLVM_GSL_OWNER SmallVector
Forward declaration of SmallVector so that calculateSmallVectorDefaultInlinedElements can reference s...
bool isa(const From &Val)
isa<X> - Return true if the parameter to the template is an instance of one of the template type argu...
Definition Casting.h:547
LLVM_ABI raw_fd_ostream & errs()
This returns a reference to a raw_ostream for standard error.
AtomicOrdering
Atomic ordering for LLVM's memory model.
@ Sub
Subtraction of integers.
uint16_t MCPhysReg
An unsigned integer type large enough to represent all physical registers, but not necessarily virtua...
Definition MCRegister.h:21
DWARFExpression::Operation Op
OutputIt move(R &&Range, OutputIt Out)
Provide wrappers to std::move which take ranges instead of having to pass begin/end explicitly.
Definition STLExtras.h:1915
decltype(auto) cast(const From &Val)
cast<X> - Return the argument parameter cast to the specified type.
Definition Casting.h:559
bool is_contained(R &&Range, const E &Element)
Returns true if Element is found in Range.
Definition STLExtras.h:1945
df_ext_iterator< T, SetTy > df_ext_end(const T &G, SetTy &S)
LLVM_ABI Printable printReg(Register Reg, const TargetRegisterInfo *TRI=nullptr, unsigned SubIdx=0, const MachineRegisterInfo *MRI=nullptr)
Prints virtual and physical registers with or without a TRI instance.
LLVM_ABI Printable printMBBReference(const MachineBasicBlock &MBB)
Prints a machine basic block reference.
Implement std::hash so that hash_code can be used in STL containers.
Definition BitVector.h:870
static constexpr LaneBitmask getAll()
Definition LaneBitmask.h:82
constexpr bool none() const
Definition LaneBitmask.h:52
constexpr bool any() const
Definition LaneBitmask.h:53
static constexpr LaneBitmask getNone()
Definition LaneBitmask.h:81
This represents a simple continuous liveness interval for a value.
VarInfo - This represents the regions where a virtual register is live in the program.
Pair of physical register and lane mask.