Bug Summary

File:build/source/llvm/lib/CodeGen/MachineVerifier.cpp
Warning:line 2544, column 9
Forming reference to null pointer

Annotated Source Code

Press '?' to see keyboard shortcuts

clang -cc1 -cc1 -triple x86_64-pc-linux-gnu -analyze -disable-free -clear-ast-before-backend -disable-llvm-verifier -discard-value-names -main-file-name MachineVerifier.cpp -analyzer-checker=core -analyzer-checker=apiModeling -analyzer-checker=unix -analyzer-checker=deadcode -analyzer-checker=cplusplus -analyzer-checker=security.insecureAPI.UncheckedReturn -analyzer-checker=security.insecureAPI.getpw -analyzer-checker=security.insecureAPI.gets -analyzer-checker=security.insecureAPI.mktemp -analyzer-checker=security.insecureAPI.mkstemp -analyzer-checker=security.insecureAPI.vfork -analyzer-checker=nullability.NullPassedToNonnull -analyzer-checker=nullability.NullReturnedFromNonnull -analyzer-output plist -w -setup-static-analyzer -analyzer-config-compatibility-mode=true -mrelocation-model pic -pic-level 2 -mframe-pointer=none -fmath-errno -ffp-contract=on -fno-rounding-math -mconstructor-aliases -funwind-tables=2 -target-cpu x86-64 -tune-cpu generic -debugger-tuning=gdb -ffunction-sections -fdata-sections -fcoverage-compilation-dir=/build/source/build-llvm/tools/clang/stage2-bins -resource-dir /usr/lib/llvm-17/lib/clang/17 -D _DEBUG -D _GLIBCXX_ASSERTIONS -D _GNU_SOURCE -D _LIBCPP_ENABLE_ASSERTIONS -D __STDC_CONSTANT_MACROS -D __STDC_FORMAT_MACROS -D __STDC_LIMIT_MACROS -I lib/CodeGen -I /build/source/llvm/lib/CodeGen -I include -I /build/source/llvm/include -D _FORTIFY_SOURCE=2 -D NDEBUG -U NDEBUG -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/10/../../../../include/c++/10 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/10/../../../../include/x86_64-linux-gnu/c++/10 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/10/../../../../include/c++/10/backward -internal-isystem /usr/lib/llvm-17/lib/clang/17/include -internal-isystem /usr/local/include -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/10/../../../../x86_64-linux-gnu/include -internal-externc-isystem /usr/include/x86_64-linux-gnu -internal-externc-isystem /include -internal-externc-isystem /usr/include -fmacro-prefix-map=/build/source/build-llvm/tools/clang/stage2-bins=build-llvm/tools/clang/stage2-bins -fmacro-prefix-map=/build/source/= -fcoverage-prefix-map=/build/source/build-llvm/tools/clang/stage2-bins=build-llvm/tools/clang/stage2-bins -fcoverage-prefix-map=/build/source/= -source-date-epoch 1683717183 -O2 -Wno-unused-command-line-argument -Wno-unused-parameter -Wwrite-strings -Wno-missing-field-initializers -Wno-long-long -Wno-maybe-uninitialized -Wno-class-memaccess -Wno-redundant-move -Wno-pessimizing-move -Wno-noexcept-type -Wno-comment -Wno-misleading-indentation -std=c++17 -fdeprecated-macro -fdebug-compilation-dir=/build/source/build-llvm/tools/clang/stage2-bins -fdebug-prefix-map=/build/source/build-llvm/tools/clang/stage2-bins=build-llvm/tools/clang/stage2-bins -fdebug-prefix-map=/build/source/= -ferror-limit 19 -fvisibility-inlines-hidden -stack-protector 2 -fgnuc-version=4.2.1 -fcolor-diagnostics -vectorize-loops -vectorize-slp -analyzer-output=html -analyzer-config stable-report-filename=true -faddrsig -D__GCC_HAVE_DWARF2_CFI_ASM=1 -o /tmp/scan-build-2023-05-10-133810-16478-1 -x c++ /build/source/llvm/lib/CodeGen/MachineVerifier.cpp
1//===- MachineVerifier.cpp - Machine Code Verifier ------------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// Pass to verify generated machine code. The following is checked:
10//
11// Operand counts: All explicit operands must be present.
12//
13// Register classes: All physical and virtual register operands must be
14// compatible with the register class required by the instruction descriptor.
15//
16// Register live intervals: Registers must be defined only once, and must be
17// defined before use.
18//
19// The machine code verifier is enabled with the command-line option
20// -verify-machineinstrs.
21//===----------------------------------------------------------------------===//
22
23#include "llvm/ADT/BitVector.h"
24#include "llvm/ADT/DenseMap.h"
25#include "llvm/ADT/DenseSet.h"
26#include "llvm/ADT/DepthFirstIterator.h"
27#include "llvm/ADT/PostOrderIterator.h"
28#include "llvm/ADT/STLExtras.h"
29#include "llvm/ADT/SetOperations.h"
30#include "llvm/ADT/SmallPtrSet.h"
31#include "llvm/ADT/SmallVector.h"
32#include "llvm/ADT/StringRef.h"
33#include "llvm/ADT/Twine.h"
34#include "llvm/CodeGen/CodeGenCommonISel.h"
35#include "llvm/CodeGen/LiveInterval.h"
36#include "llvm/CodeGen/LiveIntervals.h"
37#include "llvm/CodeGen/LiveRangeCalc.h"
38#include "llvm/CodeGen/LiveStacks.h"
39#include "llvm/CodeGen/LiveVariables.h"
40#include "llvm/CodeGen/LowLevelType.h"
41#include "llvm/CodeGen/MachineBasicBlock.h"
42#include "llvm/CodeGen/MachineFrameInfo.h"
43#include "llvm/CodeGen/MachineFunction.h"
44#include "llvm/CodeGen/MachineFunctionPass.h"
45#include "llvm/CodeGen/MachineInstr.h"
46#include "llvm/CodeGen/MachineInstrBundle.h"
47#include "llvm/CodeGen/MachineMemOperand.h"
48#include "llvm/CodeGen/MachineOperand.h"
49#include "llvm/CodeGen/MachineRegisterInfo.h"
50#include "llvm/CodeGen/PseudoSourceValue.h"
51#include "llvm/CodeGen/RegisterBank.h"
52#include "llvm/CodeGen/RegisterBankInfo.h"
53#include "llvm/CodeGen/SlotIndexes.h"
54#include "llvm/CodeGen/StackMaps.h"
55#include "llvm/CodeGen/TargetInstrInfo.h"
56#include "llvm/CodeGen/TargetOpcodes.h"
57#include "llvm/CodeGen/TargetRegisterInfo.h"
58#include "llvm/CodeGen/TargetSubtargetInfo.h"
59#include "llvm/IR/BasicBlock.h"
60#include "llvm/IR/Constants.h"
61#include "llvm/IR/EHPersonalities.h"
62#include "llvm/IR/Function.h"
63#include "llvm/IR/InlineAsm.h"
64#include "llvm/IR/Instructions.h"
65#include "llvm/InitializePasses.h"
66#include "llvm/MC/LaneBitmask.h"
67#include "llvm/MC/MCAsmInfo.h"
68#include "llvm/MC/MCDwarf.h"
69#include "llvm/MC/MCInstrDesc.h"
70#include "llvm/MC/MCRegisterInfo.h"
71#include "llvm/MC/MCTargetOptions.h"
72#include "llvm/Pass.h"
73#include "llvm/Support/Casting.h"
74#include "llvm/Support/ErrorHandling.h"
75#include "llvm/Support/MathExtras.h"
76#include "llvm/Support/ModRef.h"
77#include "llvm/Support/raw_ostream.h"
78#include "llvm/Target/TargetMachine.h"
79#include <algorithm>
80#include <cassert>
81#include <cstddef>
82#include <cstdint>
83#include <iterator>
84#include <string>
85#include <utility>
86
87using namespace llvm;
88
89namespace {
90
91 struct MachineVerifier {
92 MachineVerifier(Pass *pass, const char *b) : PASS(pass), Banner(b) {}
93
94 unsigned verify(const MachineFunction &MF);
95
96 Pass *const PASS;
97 const char *Banner;
98 const MachineFunction *MF = nullptr;
99 const TargetMachine *TM = nullptr;
100 const TargetInstrInfo *TII = nullptr;
101 const TargetRegisterInfo *TRI = nullptr;
102 const MachineRegisterInfo *MRI = nullptr;
103 const RegisterBankInfo *RBI = nullptr;
104
105 unsigned foundErrors = 0;
106
107 // Avoid querying the MachineFunctionProperties for each operand.
108 bool isFunctionRegBankSelected = false;
109 bool isFunctionSelected = false;
110 bool isFunctionTracksDebugUserValues = false;
111
112 using RegVector = SmallVector<Register, 16>;
113 using RegMaskVector = SmallVector<const uint32_t *, 4>;
114 using RegSet = DenseSet<Register>;
115 using RegMap = DenseMap<Register, const MachineInstr *>;
116 using BlockSet = SmallPtrSet<const MachineBasicBlock *, 8>;
117
118 const MachineInstr *FirstNonPHI = nullptr;
119 const MachineInstr *FirstTerminator = nullptr;
120 BlockSet FunctionBlocks;
121
122 BitVector regsReserved;
123 RegSet regsLive;
124 RegVector regsDefined, regsDead, regsKilled;
125 RegMaskVector regMasks;
126
127 SlotIndex lastIndex;
128
129 // Add Reg and any sub-registers to RV
130 void addRegWithSubRegs(RegVector &RV, Register Reg) {
131 RV.push_back(Reg);
21
Value assigned to field 'LiveInts', which participates in a condition later
132 if (Reg.isPhysical())
22
Taking false branch
23
Taking false branch
133 append_range(RV, TRI->subregs(Reg.asMCReg()));
134 }
135
136 struct BBInfo {
137 // Is this MBB reachable from the MF entry point?
138 bool reachable = false;
139
140 // Vregs that must be live in because they are used without being
141 // defined. Map value is the user. vregsLiveIn doesn't include regs
142 // that only are used by PHI nodes.
143 RegMap vregsLiveIn;
144
145 // Regs killed in MBB. They may be defined again, and will then be in both
146 // regsKilled and regsLiveOut.
147 RegSet regsKilled;
148
149 // Regs defined in MBB and live out. Note that vregs passing through may
150 // be live out without being mentioned here.
151 RegSet regsLiveOut;
152
153 // Vregs that pass through MBB untouched. This set is disjoint from
154 // regsKilled and regsLiveOut.
155 RegSet vregsPassed;
156
157 // Vregs that must pass through MBB because they are needed by a successor
158 // block. This set is disjoint from regsLiveOut.
159 RegSet vregsRequired;
160
161 // Set versions of block's predecessor and successor lists.
162 BlockSet Preds, Succs;
163
164 BBInfo() = default;
165
166 // Add register to vregsRequired if it belongs there. Return true if
167 // anything changed.
168 bool addRequired(Register Reg) {
169 if (!Reg.isVirtual())
170 return false;
171 if (regsLiveOut.count(Reg))
172 return false;
173 return vregsRequired.insert(Reg).second;
174 }
175
176 // Same for a full set.
177 bool addRequired(const RegSet &RS) {
178 bool Changed = false;
179 for (Register Reg : RS)
180 Changed |= addRequired(Reg);
181 return Changed;
182 }
183
184 // Same for a full map.
185 bool addRequired(const RegMap &RM) {
186 bool Changed = false;
187 for (const auto &I : RM)
188 Changed |= addRequired(I.first);
189 return Changed;
190 }
191
192 // Live-out registers are either in regsLiveOut or vregsPassed.
193 bool isLiveOut(Register Reg) const {
194 return regsLiveOut.count(Reg) || vregsPassed.count(Reg);
195 }
196 };
197
198 // Extra register info per MBB.
199 DenseMap<const MachineBasicBlock*, BBInfo> MBBInfoMap;
200
201 bool isReserved(Register Reg) {
202 return Reg.id() < regsReserved.size() && regsReserved.test(Reg.id());
203 }
204
205 bool isAllocatable(Register Reg) const {
206 return Reg.id() < TRI->getNumRegs() && TRI->isInAllocatableClass(Reg) &&
207 !regsReserved.test(Reg.id());
208 }
209
210 // Analysis information if available
211 LiveVariables *LiveVars = nullptr;
212 LiveIntervals *LiveInts = nullptr;
213 LiveStacks *LiveStks = nullptr;
214 SlotIndexes *Indexes = nullptr;
215
216 void visitMachineFunctionBefore();
217 void visitMachineBasicBlockBefore(const MachineBasicBlock *MBB);
218 void visitMachineBundleBefore(const MachineInstr *MI);
219
220 /// Verify that all of \p MI's virtual register operands are scalars.
221 /// \returns True if all virtual register operands are scalar. False
222 /// otherwise.
223 bool verifyAllRegOpsScalar(const MachineInstr &MI,
224 const MachineRegisterInfo &MRI);
225 bool verifyVectorElementMatch(LLT Ty0, LLT Ty1, const MachineInstr *MI);
226 void verifyPreISelGenericInstruction(const MachineInstr *MI);
227 void visitMachineInstrBefore(const MachineInstr *MI);
228 void visitMachineOperand(const MachineOperand *MO, unsigned MONum);
229 void visitMachineBundleAfter(const MachineInstr *MI);
230 void visitMachineBasicBlockAfter(const MachineBasicBlock *MBB);
231 void visitMachineFunctionAfter();
232
233 void report(const char *msg, const MachineFunction *MF);
234 void report(const char *msg, const MachineBasicBlock *MBB);
235 void report(const char *msg, const MachineInstr *MI);
236 void report(const char *msg, const MachineOperand *MO, unsigned MONum,
237 LLT MOVRegType = LLT{});
238 void report(const Twine &Msg, const MachineInstr *MI);
239
240 void report_context(const LiveInterval &LI) const;
241 void report_context(const LiveRange &LR, Register VRegUnit,
242 LaneBitmask LaneMask) const;
243 void report_context(const LiveRange::Segment &S) const;
244 void report_context(const VNInfo &VNI) const;
245 void report_context(SlotIndex Pos) const;
246 void report_context(MCPhysReg PhysReg) const;
247 void report_context_liverange(const LiveRange &LR) const;
248 void report_context_lanemask(LaneBitmask LaneMask) const;
249 void report_context_vreg(Register VReg) const;
250 void report_context_vreg_regunit(Register VRegOrUnit) const;
251
252 void verifyInlineAsm(const MachineInstr *MI);
253
254 void checkLiveness(const MachineOperand *MO, unsigned MONum);
255 void checkLivenessAtUse(const MachineOperand *MO, unsigned MONum,
256 SlotIndex UseIdx, const LiveRange &LR,
257 Register VRegOrUnit,
258 LaneBitmask LaneMask = LaneBitmask::getNone());
259 void checkLivenessAtDef(const MachineOperand *MO, unsigned MONum,
260 SlotIndex DefIdx, const LiveRange &LR,
261 Register VRegOrUnit, bool SubRangeCheck = false,
262 LaneBitmask LaneMask = LaneBitmask::getNone());
263
264 void markReachable(const MachineBasicBlock *MBB);
265 void calcRegsPassed();
266 void checkPHIOps(const MachineBasicBlock &MBB);
267
268 void calcRegsRequired();
269 void verifyLiveVariables();
270 void verifyLiveIntervals();
271 void verifyLiveInterval(const LiveInterval&);
272 void verifyLiveRangeValue(const LiveRange &, const VNInfo *, Register,
273 LaneBitmask);
274 void verifyLiveRangeSegment(const LiveRange &,
275 const LiveRange::const_iterator I, Register,
276 LaneBitmask);
277 void verifyLiveRange(const LiveRange &, Register,
278 LaneBitmask LaneMask = LaneBitmask::getNone());
279
280 void verifyStackFrame();
281
282 void verifySlotIndexes() const;
283 void verifyProperties(const MachineFunction &MF);
284 };
285
286 struct MachineVerifierPass : public MachineFunctionPass {
287 static char ID; // Pass ID, replacement for typeid
288
289 const std::string Banner;
290
291 MachineVerifierPass(std::string banner = std::string())
292 : MachineFunctionPass(ID), Banner(std::move(banner)) {
293 initializeMachineVerifierPassPass(*PassRegistry::getPassRegistry());
294 }
295
296 void getAnalysisUsage(AnalysisUsage &AU) const override {
297 AU.addUsedIfAvailable<LiveStacks>();
298 AU.addUsedIfAvailable<LiveVariables>();
299 AU.addUsedIfAvailable<SlotIndexes>();
300 AU.setPreservesAll();
301 MachineFunctionPass::getAnalysisUsage(AU);
302 }
303
304 bool runOnMachineFunction(MachineFunction &MF) override {
305 // Skip functions that have known verification problems.
306 // FIXME: Remove this mechanism when all problematic passes have been
307 // fixed.
308 if (MF.getProperties().hasProperty(
309 MachineFunctionProperties::Property::FailsVerification))
310 return false;
311
312 unsigned FoundErrors = MachineVerifier(this, Banner.c_str()).verify(MF);
313 if (FoundErrors)
314 report_fatal_error("Found "+Twine(FoundErrors)+" machine code errors.");
315 return false;
316 }
317 };
318
319} // end anonymous namespace
320
321char MachineVerifierPass::ID = 0;
322
323INITIALIZE_PASS(MachineVerifierPass, "machineverifier",static void *initializeMachineVerifierPassPassOnce(PassRegistry
&Registry) { PassInfo *PI = new PassInfo( "Verify generated machine code"
, "machineverifier", &MachineVerifierPass::ID, PassInfo::
NormalCtor_t(callDefaultCtor<MachineVerifierPass>), false
, false); Registry.registerPass(*PI, true); return PI; } static
llvm::once_flag InitializeMachineVerifierPassPassFlag; void llvm
::initializeMachineVerifierPassPass(PassRegistry &Registry
) { llvm::call_once(InitializeMachineVerifierPassPassFlag, initializeMachineVerifierPassPassOnce
, std::ref(Registry)); }
324 "Verify generated machine code", false, false)static void *initializeMachineVerifierPassPassOnce(PassRegistry
&Registry) { PassInfo *PI = new PassInfo( "Verify generated machine code"
, "machineverifier", &MachineVerifierPass::ID, PassInfo::
NormalCtor_t(callDefaultCtor<MachineVerifierPass>), false
, false); Registry.registerPass(*PI, true); return PI; } static
llvm::once_flag InitializeMachineVerifierPassPassFlag; void llvm
::initializeMachineVerifierPassPass(PassRegistry &Registry
) { llvm::call_once(InitializeMachineVerifierPassPassFlag, initializeMachineVerifierPassPassOnce
, std::ref(Registry)); }
325
326FunctionPass *llvm::createMachineVerifierPass(const std::string &Banner) {
327 return new MachineVerifierPass(Banner);
328}
329
330void llvm::verifyMachineFunction(MachineFunctionAnalysisManager *,
331 const std::string &Banner,
332 const MachineFunction &MF) {
333 // TODO: Use MFAM after porting below analyses.
334 // LiveVariables *LiveVars;
335 // LiveIntervals *LiveInts;
336 // LiveStacks *LiveStks;
337 // SlotIndexes *Indexes;
338 unsigned FoundErrors = MachineVerifier(nullptr, Banner.c_str()).verify(MF);
339 if (FoundErrors)
340 report_fatal_error("Found " + Twine(FoundErrors) + " machine code errors.");
341}
342
343bool MachineFunction::verify(Pass *p, const char *Banner, bool AbortOnErrors)
344 const {
345 MachineFunction &MF = const_cast<MachineFunction&>(*this);
346 unsigned FoundErrors = MachineVerifier(p, Banner).verify(MF);
347 if (AbortOnErrors && FoundErrors)
348 report_fatal_error("Found "+Twine(FoundErrors)+" machine code errors.");
349 return FoundErrors == 0;
350}
351
352void MachineVerifier::verifySlotIndexes() const {
353 if (Indexes == nullptr)
354 return;
355
356 // Ensure the IdxMBB list is sorted by slot indexes.
357 SlotIndex Last;
358 for (SlotIndexes::MBBIndexIterator I = Indexes->MBBIndexBegin(),
359 E = Indexes->MBBIndexEnd(); I != E; ++I) {
360 assert(!Last.isValid() || I->first > Last)(static_cast <bool> (!Last.isValid() || I->first >
Last) ? void (0) : __assert_fail ("!Last.isValid() || I->first > Last"
, "llvm/lib/CodeGen/MachineVerifier.cpp", 360, __extension__ __PRETTY_FUNCTION__
))
;
361 Last = I->first;
362 }
363}
364
365void MachineVerifier::verifyProperties(const MachineFunction &MF) {
366 // If a pass has introduced virtual registers without clearing the
367 // NoVRegs property (or set it without allocating the vregs)
368 // then report an error.
369 if (MF.getProperties().hasProperty(
370 MachineFunctionProperties::Property::NoVRegs) &&
371 MRI->getNumVirtRegs())
372 report("Function has NoVRegs property but there are VReg operands", &MF);
373}
374
375unsigned MachineVerifier::verify(const MachineFunction &MF) {
376 foundErrors = 0;
377
378 this->MF = &MF;
379 TM = &MF.getTarget();
380 TII = MF.getSubtarget().getInstrInfo();
381 TRI = MF.getSubtarget().getRegisterInfo();
382 RBI = MF.getSubtarget().getRegBankInfo();
383 MRI = &MF.getRegInfo();
384
385 const bool isFunctionFailedISel = MF.getProperties().hasProperty(
386 MachineFunctionProperties::Property::FailedISel);
387
388 // If we're mid-GlobalISel and we already triggered the fallback path then
389 // it's expected that the MIR is somewhat broken but that's ok since we'll
390 // reset it and clear the FailedISel attribute in ResetMachineFunctions.
391 if (isFunctionFailedISel)
392 return foundErrors;
393
394 isFunctionRegBankSelected = MF.getProperties().hasProperty(
395 MachineFunctionProperties::Property::RegBankSelected);
396 isFunctionSelected = MF.getProperties().hasProperty(
397 MachineFunctionProperties::Property::Selected);
398 isFunctionTracksDebugUserValues = MF.getProperties().hasProperty(
399 MachineFunctionProperties::Property::TracksDebugUserValues);
400
401 LiveVars = nullptr;
402 LiveInts = nullptr;
403 LiveStks = nullptr;
404 Indexes = nullptr;
405 if (PASS) {
406 LiveInts = PASS->getAnalysisIfAvailable<LiveIntervals>();
407 // We don't want to verify LiveVariables if LiveIntervals is available.
408 if (!LiveInts)
409 LiveVars = PASS->getAnalysisIfAvailable<LiveVariables>();
410 LiveStks = PASS->getAnalysisIfAvailable<LiveStacks>();
411 Indexes = PASS->getAnalysisIfAvailable<SlotIndexes>();
412 }
413
414 verifySlotIndexes();
415
416 verifyProperties(MF);
417
418 visitMachineFunctionBefore();
419 for (const MachineBasicBlock &MBB : MF) {
420 visitMachineBasicBlockBefore(&MBB);
421 // Keep track of the current bundle header.
422 const MachineInstr *CurBundle = nullptr;
423 // Do we expect the next instruction to be part of the same bundle?
424 bool InBundle = false;
425
426 for (const MachineInstr &MI : MBB.instrs()) {
427 if (MI.getParent() != &MBB) {
428 report("Bad instruction parent pointer", &MBB);
429 errs() << "Instruction: " << MI;
430 continue;
431 }
432
433 // Check for consistent bundle flags.
434 if (InBundle && !MI.isBundledWithPred())
435 report("Missing BundledPred flag, "
436 "BundledSucc was set on predecessor",
437 &MI);
438 if (!InBundle && MI.isBundledWithPred())
439 report("BundledPred flag is set, "
440 "but BundledSucc not set on predecessor",
441 &MI);
442
443 // Is this a bundle header?
444 if (!MI.isInsideBundle()) {
445 if (CurBundle)
446 visitMachineBundleAfter(CurBundle);
447 CurBundle = &MI;
448 visitMachineBundleBefore(CurBundle);
449 } else if (!CurBundle)
450 report("No bundle header", &MI);
451 visitMachineInstrBefore(&MI);
452 for (unsigned I = 0, E = MI.getNumOperands(); I != E; ++I) {
453 const MachineOperand &Op = MI.getOperand(I);
454 if (Op.getParent() != &MI) {
455 // Make sure to use correct addOperand / removeOperand / ChangeTo
456 // functions when replacing operands of a MachineInstr.
457 report("Instruction has operand with wrong parent set", &MI);
458 }
459
460 visitMachineOperand(&Op, I);
461 }
462
463 // Was this the last bundled instruction?
464 InBundle = MI.isBundledWithSucc();
465 }
466 if (CurBundle)
467 visitMachineBundleAfter(CurBundle);
468 if (InBundle)
469 report("BundledSucc flag set on last instruction in block", &MBB.back());
470 visitMachineBasicBlockAfter(&MBB);
471 }
472 visitMachineFunctionAfter();
473
474 // Clean up.
475 regsLive.clear();
476 regsDefined.clear();
477 regsDead.clear();
478 regsKilled.clear();
479 regMasks.clear();
480 MBBInfoMap.clear();
481
482 return foundErrors;
483}
484
485void MachineVerifier::report(const char *msg, const MachineFunction *MF) {
486 assert(MF)(static_cast <bool> (MF) ? void (0) : __assert_fail ("MF"
, "llvm/lib/CodeGen/MachineVerifier.cpp", 486, __extension__ __PRETTY_FUNCTION__
))
;
487 errs() << '\n';
488 if (!foundErrors++) {
489 if (Banner)
490 errs() << "# " << Banner << '\n';
491 if (LiveInts != nullptr)
492 LiveInts->print(errs());
493 else
494 MF->print(errs(), Indexes);
495 }
496 errs() << "*** Bad machine code: " << msg << " ***\n"
497 << "- function: " << MF->getName() << "\n";
498}
499
500void MachineVerifier::report(const char *msg, const MachineBasicBlock *MBB) {
501 assert(MBB)(static_cast <bool> (MBB) ? void (0) : __assert_fail ("MBB"
, "llvm/lib/CodeGen/MachineVerifier.cpp", 501, __extension__ __PRETTY_FUNCTION__
))
;
502 report(msg, MBB->getParent());
503 errs() << "- basic block: " << printMBBReference(*MBB) << ' '
504 << MBB->getName() << " (" << (const void *)MBB << ')';
505 if (Indexes)
506 errs() << " [" << Indexes->getMBBStartIdx(MBB)
507 << ';' << Indexes->getMBBEndIdx(MBB) << ')';
508 errs() << '\n';
509}
510
511void MachineVerifier::report(const char *msg, const MachineInstr *MI) {
512 assert(MI)(static_cast <bool> (MI) ? void (0) : __assert_fail ("MI"
, "llvm/lib/CodeGen/MachineVerifier.cpp", 512, __extension__ __PRETTY_FUNCTION__
))
;
513 report(msg, MI->getParent());
514 errs() << "- instruction: ";
515 if (Indexes && Indexes->hasIndex(*MI))
516 errs() << Indexes->getInstructionIndex(*MI) << '\t';
517 MI->print(errs(), /*IsStandalone=*/true);
518}
519
520void MachineVerifier::report(const char *msg, const MachineOperand *MO,
521 unsigned MONum, LLT MOVRegType) {
522 assert(MO)(static_cast <bool> (MO) ? void (0) : __assert_fail ("MO"
, "llvm/lib/CodeGen/MachineVerifier.cpp", 522, __extension__ __PRETTY_FUNCTION__
))
;
523 report(msg, MO->getParent());
524 errs() << "- operand " << MONum << ": ";
525 MO->print(errs(), MOVRegType, TRI);
526 errs() << "\n";
527}
528
529void MachineVerifier::report(const Twine &Msg, const MachineInstr *MI) {
530 report(Msg.str().c_str(), MI);
531}
532
533void MachineVerifier::report_context(SlotIndex Pos) const {
534 errs() << "- at: " << Pos << '\n';
535}
536
537void MachineVerifier::report_context(const LiveInterval &LI) const {
538 errs() << "- interval: " << LI << '\n';
539}
540
541void MachineVerifier::report_context(const LiveRange &LR, Register VRegUnit,
542 LaneBitmask LaneMask) const {
543 report_context_liverange(LR);
544 report_context_vreg_regunit(VRegUnit);
545 if (LaneMask.any())
546 report_context_lanemask(LaneMask);
547}
548
549void MachineVerifier::report_context(const LiveRange::Segment &S) const {
550 errs() << "- segment: " << S << '\n';
551}
552
553void MachineVerifier::report_context(const VNInfo &VNI) const {
554 errs() << "- ValNo: " << VNI.id << " (def " << VNI.def << ")\n";
555}
556
557void MachineVerifier::report_context_liverange(const LiveRange &LR) const {
558 errs() << "- liverange: " << LR << '\n';
559}
560
561void MachineVerifier::report_context(MCPhysReg PReg) const {
562 errs() << "- p. register: " << printReg(PReg, TRI) << '\n';
563}
564
565void MachineVerifier::report_context_vreg(Register VReg) const {
566 errs() << "- v. register: " << printReg(VReg, TRI) << '\n';
567}
568
569void MachineVerifier::report_context_vreg_regunit(Register VRegOrUnit) const {
570 if (VRegOrUnit.isVirtual()) {
571 report_context_vreg(VRegOrUnit);
572 } else {
573 errs() << "- regunit: " << printRegUnit(VRegOrUnit, TRI) << '\n';
574 }
575}
576
577void MachineVerifier::report_context_lanemask(LaneBitmask LaneMask) const {
578 errs() << "- lanemask: " << PrintLaneMask(LaneMask) << '\n';
579}
580
581void MachineVerifier::markReachable(const MachineBasicBlock *MBB) {
582 BBInfo &MInfo = MBBInfoMap[MBB];
583 if (!MInfo.reachable) {
584 MInfo.reachable = true;
585 for (const MachineBasicBlock *Succ : MBB->successors())
586 markReachable(Succ);
587 }
588}
589
590void MachineVerifier::visitMachineFunctionBefore() {
591 lastIndex = SlotIndex();
592 regsReserved = MRI->reservedRegsFrozen() ? MRI->getReservedRegs()
593 : TRI->getReservedRegs(*MF);
594
595 if (!MF->empty())
596 markReachable(&MF->front());
597
598 // Build a set of the basic blocks in the function.
599 FunctionBlocks.clear();
600 for (const auto &MBB : *MF) {
601 FunctionBlocks.insert(&MBB);
602 BBInfo &MInfo = MBBInfoMap[&MBB];
603
604 MInfo.Preds.insert(MBB.pred_begin(), MBB.pred_end());
605 if (MInfo.Preds.size() != MBB.pred_size())
606 report("MBB has duplicate entries in its predecessor list.", &MBB);
607
608 MInfo.Succs.insert(MBB.succ_begin(), MBB.succ_end());
609 if (MInfo.Succs.size() != MBB.succ_size())
610 report("MBB has duplicate entries in its successor list.", &MBB);
611 }
612
613 // Check that the register use lists are sane.
614 MRI->verifyUseLists();
615
616 if (!MF->empty())
617 verifyStackFrame();
618}
619
620void
621MachineVerifier::visitMachineBasicBlockBefore(const MachineBasicBlock *MBB) {
622 FirstTerminator = nullptr;
623 FirstNonPHI = nullptr;
624
625 if (!MF->getProperties().hasProperty(
626 MachineFunctionProperties::Property::NoPHIs) && MRI->tracksLiveness()) {
627 // If this block has allocatable physical registers live-in, check that
628 // it is an entry block or landing pad.
629 for (const auto &LI : MBB->liveins()) {
630 if (isAllocatable(LI.PhysReg) && !MBB->isEHPad() &&
631 MBB->getIterator() != MBB->getParent()->begin() &&
632 !MBB->isInlineAsmBrIndirectTarget()) {
633 report("MBB has allocatable live-in, but isn't entry, landing-pad, or "
634 "inlineasm-br-indirect-target.",
635 MBB);
636 report_context(LI.PhysReg);
637 }
638 }
639 }
640
641 if (MBB->isIRBlockAddressTaken()) {
642 if (!MBB->getAddressTakenIRBlock()->hasAddressTaken())
643 report("ir-block-address-taken is associated with basic block not used by "
644 "a blockaddress.",
645 MBB);
646 }
647
648 // Count the number of landing pad successors.
649 SmallPtrSet<const MachineBasicBlock*, 4> LandingPadSuccs;
650 for (const auto *succ : MBB->successors()) {
651 if (succ->isEHPad())
652 LandingPadSuccs.insert(succ);
653 if (!FunctionBlocks.count(succ))
654 report("MBB has successor that isn't part of the function.", MBB);
655 if (!MBBInfoMap[succ].Preds.count(MBB)) {
656 report("Inconsistent CFG", MBB);
657 errs() << "MBB is not in the predecessor list of the successor "
658 << printMBBReference(*succ) << ".\n";
659 }
660 }
661
662 // Check the predecessor list.
663 for (const MachineBasicBlock *Pred : MBB->predecessors()) {
664 if (!FunctionBlocks.count(Pred))
665 report("MBB has predecessor that isn't part of the function.", MBB);
666 if (!MBBInfoMap[Pred].Succs.count(MBB)) {
667 report("Inconsistent CFG", MBB);
668 errs() << "MBB is not in the successor list of the predecessor "
669 << printMBBReference(*Pred) << ".\n";
670 }
671 }
672
673 const MCAsmInfo *AsmInfo = TM->getMCAsmInfo();
674 const BasicBlock *BB = MBB->getBasicBlock();
675 const Function &F = MF->getFunction();
676 if (LandingPadSuccs.size() > 1 &&
677 !(AsmInfo &&
678 AsmInfo->getExceptionHandlingType() == ExceptionHandling::SjLj &&
679 BB && isa<SwitchInst>(BB->getTerminator())) &&
680 !isScopedEHPersonality(classifyEHPersonality(F.getPersonalityFn())))
681 report("MBB has more than one landing pad successor", MBB);
682
683 // Call analyzeBranch. If it succeeds, there several more conditions to check.
684 MachineBasicBlock *TBB = nullptr, *FBB = nullptr;
685 SmallVector<MachineOperand, 4> Cond;
686 if (!TII->analyzeBranch(*const_cast<MachineBasicBlock *>(MBB), TBB, FBB,
687 Cond)) {
688 // Ok, analyzeBranch thinks it knows what's going on with this block. Let's
689 // check whether its answers match up with reality.
690 if (!TBB && !FBB) {
691 // Block falls through to its successor.
692 if (!MBB->empty() && MBB->back().isBarrier() &&
693 !TII->isPredicated(MBB->back())) {
694 report("MBB exits via unconditional fall-through but ends with a "
695 "barrier instruction!", MBB);
696 }
697 if (!Cond.empty()) {
698 report("MBB exits via unconditional fall-through but has a condition!",
699 MBB);
700 }
701 } else if (TBB && !FBB && Cond.empty()) {
702 // Block unconditionally branches somewhere.
703 if (MBB->empty()) {
704 report("MBB exits via unconditional branch but doesn't contain "
705 "any instructions!", MBB);
706 } else if (!MBB->back().isBarrier()) {
707 report("MBB exits via unconditional branch but doesn't end with a "
708 "barrier instruction!", MBB);
709 } else if (!MBB->back().isTerminator()) {
710 report("MBB exits via unconditional branch but the branch isn't a "
711 "terminator instruction!", MBB);
712 }
713 } else if (TBB && !FBB && !Cond.empty()) {
714 // Block conditionally branches somewhere, otherwise falls through.
715 if (MBB->empty()) {
716 report("MBB exits via conditional branch/fall-through but doesn't "
717 "contain any instructions!", MBB);
718 } else if (MBB->back().isBarrier()) {
719 report("MBB exits via conditional branch/fall-through but ends with a "
720 "barrier instruction!", MBB);
721 } else if (!MBB->back().isTerminator()) {
722 report("MBB exits via conditional branch/fall-through but the branch "
723 "isn't a terminator instruction!", MBB);
724 }
725 } else if (TBB && FBB) {
726 // Block conditionally branches somewhere, otherwise branches
727 // somewhere else.
728 if (MBB->empty()) {
729 report("MBB exits via conditional branch/branch but doesn't "
730 "contain any instructions!", MBB);
731 } else if (!MBB->back().isBarrier()) {
732 report("MBB exits via conditional branch/branch but doesn't end with a "
733 "barrier instruction!", MBB);
734 } else if (!MBB->back().isTerminator()) {
735 report("MBB exits via conditional branch/branch but the branch "
736 "isn't a terminator instruction!", MBB);
737 }
738 if (Cond.empty()) {
739 report("MBB exits via conditional branch/branch but there's no "
740 "condition!", MBB);
741 }
742 } else {
743 report("analyzeBranch returned invalid data!", MBB);
744 }
745
746 // Now check that the successors match up with the answers reported by
747 // analyzeBranch.
748 if (TBB && !MBB->isSuccessor(TBB))
749 report("MBB exits via jump or conditional branch, but its target isn't a "
750 "CFG successor!",
751 MBB);
752 if (FBB && !MBB->isSuccessor(FBB))
753 report("MBB exits via conditional branch, but its target isn't a CFG "
754 "successor!",
755 MBB);
756
757 // There might be a fallthrough to the next block if there's either no
758 // unconditional true branch, or if there's a condition, and one of the
759 // branches is missing.
760 bool Fallthrough = !TBB || (!Cond.empty() && !FBB);
761
762 // A conditional fallthrough must be an actual CFG successor, not
763 // unreachable. (Conversely, an unconditional fallthrough might not really
764 // be a successor, because the block might end in unreachable.)
765 if (!Cond.empty() && !FBB) {
766 MachineFunction::const_iterator MBBI = std::next(MBB->getIterator());
767 if (MBBI == MF->end()) {
768 report("MBB conditionally falls through out of function!", MBB);
769 } else if (!MBB->isSuccessor(&*MBBI))
770 report("MBB exits via conditional branch/fall-through but the CFG "
771 "successors don't match the actual successors!",
772 MBB);
773 }
774
775 // Verify that there aren't any extra un-accounted-for successors.
776 for (const MachineBasicBlock *SuccMBB : MBB->successors()) {
777 // If this successor is one of the branch targets, it's okay.
778 if (SuccMBB == TBB || SuccMBB == FBB)
779 continue;
780 // If we might have a fallthrough, and the successor is the fallthrough
781 // block, that's also ok.
782 if (Fallthrough && SuccMBB == MBB->getNextNode())
783 continue;
784 // Also accept successors which are for exception-handling or might be
785 // inlineasm_br targets.
786 if (SuccMBB->isEHPad() || SuccMBB->isInlineAsmBrIndirectTarget())
787 continue;
788 report("MBB has unexpected successors which are not branch targets, "
789 "fallthrough, EHPads, or inlineasm_br targets.",
790 MBB);
791 }
792 }
793
794 regsLive.clear();
795 if (MRI->tracksLiveness()) {
796 for (const auto &LI : MBB->liveins()) {
797 if (!Register::isPhysicalRegister(LI.PhysReg)) {
798 report("MBB live-in list contains non-physical register", MBB);
799 continue;
800 }
801 for (const MCPhysReg &SubReg : TRI->subregs_inclusive(LI.PhysReg))
802 regsLive.insert(SubReg);
803 }
804 }
805
806 const MachineFrameInfo &MFI = MF->getFrameInfo();
807 BitVector PR = MFI.getPristineRegs(*MF);
808 for (unsigned I : PR.set_bits()) {
809 for (const MCPhysReg &SubReg : TRI->subregs_inclusive(I))
810 regsLive.insert(SubReg);
811 }
812
813 regsKilled.clear();
814 regsDefined.clear();
815
816 if (Indexes)
817 lastIndex = Indexes->getMBBStartIdx(MBB);
818}
819
820// This function gets called for all bundle headers, including normal
821// stand-alone unbundled instructions.
822void MachineVerifier::visitMachineBundleBefore(const MachineInstr *MI) {
823 if (Indexes && Indexes->hasIndex(*MI)) {
824 SlotIndex idx = Indexes->getInstructionIndex(*MI);
825 if (!(idx > lastIndex)) {
826 report("Instruction index out of order", MI);
827 errs() << "Last instruction was at " << lastIndex << '\n';
828 }
829 lastIndex = idx;
830 }
831
832 // Ensure non-terminators don't follow terminators.
833 if (MI->isTerminator()) {
834 if (!FirstTerminator)
835 FirstTerminator = MI;
836 } else if (FirstTerminator) {
837 // For GlobalISel, G_INVOKE_REGION_START is a terminator that we allow to
838 // precede non-terminators.
839 if (FirstTerminator->getOpcode() != TargetOpcode::G_INVOKE_REGION_START) {
840 report("Non-terminator instruction after the first terminator", MI);
841 errs() << "First terminator was:\t" << *FirstTerminator;
842 }
843 }
844}
845
846// The operands on an INLINEASM instruction must follow a template.
847// Verify that the flag operands make sense.
848void MachineVerifier::verifyInlineAsm(const MachineInstr *MI) {
849 // The first two operands on INLINEASM are the asm string and global flags.
850 if (MI->getNumOperands() < 2) {
851 report("Too few operands on inline asm", MI);
852 return;
853 }
854 if (!MI->getOperand(0).isSymbol())
855 report("Asm string must be an external symbol", MI);
856 if (!MI->getOperand(1).isImm())
857 report("Asm flags must be an immediate", MI);
858 // Allowed flags are Extra_HasSideEffects = 1, Extra_IsAlignStack = 2,
859 // Extra_AsmDialect = 4, Extra_MayLoad = 8, and Extra_MayStore = 16,
860 // and Extra_IsConvergent = 32.
861 if (!isUInt<6>(MI->getOperand(1).getImm()))
862 report("Unknown asm flags", &MI->getOperand(1), 1);
863
864 static_assert(InlineAsm::MIOp_FirstOperand == 2, "Asm format changed");
865
866 unsigned OpNo = InlineAsm::MIOp_FirstOperand;
867 unsigned NumOps;
868 for (unsigned e = MI->getNumOperands(); OpNo < e; OpNo += NumOps) {
869 const MachineOperand &MO = MI->getOperand(OpNo);
870 // There may be implicit ops after the fixed operands.
871 if (!MO.isImm())
872 break;
873 NumOps = 1 + InlineAsm::getNumOperandRegisters(MO.getImm());
874 }
875
876 if (OpNo > MI->getNumOperands())
877 report("Missing operands in last group", MI);
878
879 // An optional MDNode follows the groups.
880 if (OpNo < MI->getNumOperands() && MI->getOperand(OpNo).isMetadata())
881 ++OpNo;
882
883 // All trailing operands must be implicit registers.
884 for (unsigned e = MI->getNumOperands(); OpNo < e; ++OpNo) {
885 const MachineOperand &MO = MI->getOperand(OpNo);
886 if (!MO.isReg() || !MO.isImplicit())
887 report("Expected implicit register after groups", &MO, OpNo);
888 }
889
890 if (MI->getOpcode() == TargetOpcode::INLINEASM_BR) {
891 const MachineBasicBlock *MBB = MI->getParent();
892
893 for (unsigned i = InlineAsm::MIOp_FirstOperand, e = MI->getNumOperands();
894 i != e; ++i) {
895 const MachineOperand &MO = MI->getOperand(i);
896
897 if (!MO.isMBB())
898 continue;
899
900 // Check the successor & predecessor lists look ok, assume they are
901 // not. Find the indirect target without going through the successors.
902 const MachineBasicBlock *IndirectTargetMBB = MO.getMBB();
903 if (!IndirectTargetMBB) {
904 report("INLINEASM_BR indirect target does not exist", &MO, i);
905 break;
906 }
907
908 if (!MBB->isSuccessor(IndirectTargetMBB))
909 report("INLINEASM_BR indirect target missing from successor list", &MO,
910 i);
911
912 if (!IndirectTargetMBB->isPredecessor(MBB))
913 report("INLINEASM_BR indirect target predecessor list missing parent",
914 &MO, i);
915 }
916 }
917}
918
919bool MachineVerifier::verifyAllRegOpsScalar(const MachineInstr &MI,
920 const MachineRegisterInfo &MRI) {
921 if (none_of(MI.explicit_operands(), [&MRI](const MachineOperand &Op) {
922 if (!Op.isReg())
923 return false;
924 const auto Reg = Op.getReg();
925 if (Reg.isPhysical())
926 return false;
927 return !MRI.getType(Reg).isScalar();
928 }))
929 return true;
930 report("All register operands must have scalar types", &MI);
931 return false;
932}
933
934/// Check that types are consistent when two operands need to have the same
935/// number of vector elements.
936/// \return true if the types are valid.
937bool MachineVerifier::verifyVectorElementMatch(LLT Ty0, LLT Ty1,
938 const MachineInstr *MI) {
939 if (Ty0.isVector() != Ty1.isVector()) {
940 report("operand types must be all-vector or all-scalar", MI);
941 // Generally we try to report as many issues as possible at once, but in
942 // this case it's not clear what should we be comparing the size of the
943 // scalar with: the size of the whole vector or its lane. Instead of
944 // making an arbitrary choice and emitting not so helpful message, let's
945 // avoid the extra noise and stop here.
946 return false;
947 }
948
949 if (Ty0.isVector() && Ty0.getNumElements() != Ty1.getNumElements()) {
950 report("operand types must preserve number of vector elements", MI);
951 return false;
952 }
953
954 return true;
955}
956
957void MachineVerifier::verifyPreISelGenericInstruction(const MachineInstr *MI) {
958 if (isFunctionSelected)
959 report("Unexpected generic instruction in a Selected function", MI);
960
961 const MCInstrDesc &MCID = MI->getDesc();
962 unsigned NumOps = MI->getNumOperands();
963
964 // Branches must reference a basic block if they are not indirect
965 if (MI->isBranch() && !MI->isIndirectBranch()) {
966 bool HasMBB = false;
967 for (const MachineOperand &Op : MI->operands()) {
968 if (Op.isMBB()) {
969 HasMBB = true;
970 break;
971 }
972 }
973
974 if (!HasMBB) {
975 report("Branch instruction is missing a basic block operand or "
976 "isIndirectBranch property",
977 MI);
978 }
979 }
980
981 // Check types.
982 SmallVector<LLT, 4> Types;
983 for (unsigned I = 0, E = std::min(MCID.getNumOperands(), NumOps);
984 I != E; ++I) {
985 if (!MCID.operands()[I].isGenericType())
986 continue;
987 // Generic instructions specify type equality constraints between some of
988 // their operands. Make sure these are consistent.
989 size_t TypeIdx = MCID.operands()[I].getGenericTypeIndex();
990 Types.resize(std::max(TypeIdx + 1, Types.size()));
991
992 const MachineOperand *MO = &MI->getOperand(I);
993 if (!MO->isReg()) {
994 report("generic instruction must use register operands", MI);
995 continue;
996 }
997
998 LLT OpTy = MRI->getType(MO->getReg());
999 // Don't report a type mismatch if there is no actual mismatch, only a
1000 // type missing, to reduce noise:
1001 if (OpTy.isValid()) {
1002 // Only the first valid type for a type index will be printed: don't
1003 // overwrite it later so it's always clear which type was expected:
1004 if (!Types[TypeIdx].isValid())
1005 Types[TypeIdx] = OpTy;
1006 else if (Types[TypeIdx] != OpTy)
1007 report("Type mismatch in generic instruction", MO, I, OpTy);
1008 } else {
1009 // Generic instructions must have types attached to their operands.
1010 report("Generic instruction is missing a virtual register type", MO, I);
1011 }
1012 }
1013
1014 // Generic opcodes must not have physical register operands.
1015 for (unsigned I = 0; I < MI->getNumOperands(); ++I) {
1016 const MachineOperand *MO = &MI->getOperand(I);
1017 if (MO->isReg() && MO->getReg().isPhysical())
1018 report("Generic instruction cannot have physical register", MO, I);
1019 }
1020
1021 // Avoid out of bounds in checks below. This was already reported earlier.
1022 if (MI->getNumOperands() < MCID.getNumOperands())
1023 return;
1024
1025 StringRef ErrorInfo;
1026 if (!TII->verifyInstruction(*MI, ErrorInfo))
1027 report(ErrorInfo.data(), MI);
1028
1029 // Verify properties of various specific instruction types
1030 unsigned Opc = MI->getOpcode();
1031 switch (Opc) {
1032 case TargetOpcode::G_ASSERT_SEXT:
1033 case TargetOpcode::G_ASSERT_ZEXT: {
1034 std::string OpcName =
1035 Opc == TargetOpcode::G_ASSERT_ZEXT ? "G_ASSERT_ZEXT" : "G_ASSERT_SEXT";
1036 if (!MI->getOperand(2).isImm()) {
1037 report(Twine(OpcName, " expects an immediate operand #2"), MI);
1038 break;
1039 }
1040
1041 Register Dst = MI->getOperand(0).getReg();
1042 Register Src = MI->getOperand(1).getReg();
1043 LLT SrcTy = MRI->getType(Src);
1044 int64_t Imm = MI->getOperand(2).getImm();
1045 if (Imm <= 0) {
1046 report(Twine(OpcName, " size must be >= 1"), MI);
1047 break;
1048 }
1049
1050 if (Imm >= SrcTy.getScalarSizeInBits()) {
1051 report(Twine(OpcName, " size must be less than source bit width"), MI);
1052 break;
1053 }
1054
1055 const RegisterBank *SrcRB = RBI->getRegBank(Src, *MRI, *TRI);
1056 const RegisterBank *DstRB = RBI->getRegBank(Dst, *MRI, *TRI);
1057
1058 // Allow only the source bank to be set.
1059 if ((SrcRB && DstRB && SrcRB != DstRB) || (DstRB && !SrcRB)) {
1060 report(Twine(OpcName, " cannot change register bank"), MI);
1061 break;
1062 }
1063
1064 // Don't allow a class change. Do allow member class->regbank.
1065 const TargetRegisterClass *DstRC = MRI->getRegClassOrNull(Dst);
1066 if (DstRC && DstRC != MRI->getRegClassOrNull(Src)) {
1067 report(
1068 Twine(OpcName, " source and destination register classes must match"),
1069 MI);
1070 break;
1071 }
1072
1073 break;
1074 }
1075
1076 case TargetOpcode::G_CONSTANT:
1077 case TargetOpcode::G_FCONSTANT: {
1078 LLT DstTy = MRI->getType(MI->getOperand(0).getReg());
1079 if (DstTy.isVector())
1080 report("Instruction cannot use a vector result type", MI);
1081
1082 if (MI->getOpcode() == TargetOpcode::G_CONSTANT) {
1083 if (!MI->getOperand(1).isCImm()) {
1084 report("G_CONSTANT operand must be cimm", MI);
1085 break;
1086 }
1087
1088 const ConstantInt *CI = MI->getOperand(1).getCImm();
1089 if (CI->getBitWidth() != DstTy.getSizeInBits())
1090 report("inconsistent constant size", MI);
1091 } else {
1092 if (!MI->getOperand(1).isFPImm()) {
1093 report("G_FCONSTANT operand must be fpimm", MI);
1094 break;
1095 }
1096 const ConstantFP *CF = MI->getOperand(1).getFPImm();
1097
1098 if (APFloat::getSizeInBits(CF->getValueAPF().getSemantics()) !=
1099 DstTy.getSizeInBits()) {
1100 report("inconsistent constant size", MI);
1101 }
1102 }
1103
1104 break;
1105 }
1106 case TargetOpcode::G_LOAD:
1107 case TargetOpcode::G_STORE:
1108 case TargetOpcode::G_ZEXTLOAD:
1109 case TargetOpcode::G_SEXTLOAD: {
1110 LLT ValTy = MRI->getType(MI->getOperand(0).getReg());
1111 LLT PtrTy = MRI->getType(MI->getOperand(1).getReg());
1112 if (!PtrTy.isPointer())
1113 report("Generic memory instruction must access a pointer", MI);
1114
1115 // Generic loads and stores must have a single MachineMemOperand
1116 // describing that access.
1117 if (!MI->hasOneMemOperand()) {
1118 report("Generic instruction accessing memory must have one mem operand",
1119 MI);
1120 } else {
1121 const MachineMemOperand &MMO = **MI->memoperands_begin();
1122 if (MI->getOpcode() == TargetOpcode::G_ZEXTLOAD ||
1123 MI->getOpcode() == TargetOpcode::G_SEXTLOAD) {
1124 if (MMO.getSizeInBits() >= ValTy.getSizeInBits())
1125 report("Generic extload must have a narrower memory type", MI);
1126 } else if (MI->getOpcode() == TargetOpcode::G_LOAD) {
1127 if (MMO.getSize() > ValTy.getSizeInBytes())
1128 report("load memory size cannot exceed result size", MI);
1129 } else if (MI->getOpcode() == TargetOpcode::G_STORE) {
1130 if (ValTy.getSizeInBytes() < MMO.getSize())
1131 report("store memory size cannot exceed value size", MI);
1132 }
1133
1134 const AtomicOrdering Order = MMO.getSuccessOrdering();
1135 if (Opc == TargetOpcode::G_STORE) {
1136 if (Order == AtomicOrdering::Acquire ||
1137 Order == AtomicOrdering::AcquireRelease)
1138 report("atomic store cannot use acquire ordering", MI);
1139
1140 } else {
1141 if (Order == AtomicOrdering::Release ||
1142 Order == AtomicOrdering::AcquireRelease)
1143 report("atomic load cannot use release ordering", MI);
1144 }
1145 }
1146
1147 break;
1148 }
1149 case TargetOpcode::G_PHI: {
1150 LLT DstTy = MRI->getType(MI->getOperand(0).getReg());
1151 if (!DstTy.isValid() || !all_of(drop_begin(MI->operands()),
1152 [this, &DstTy](const MachineOperand &MO) {
1153 if (!MO.isReg())
1154 return true;
1155 LLT Ty = MRI->getType(MO.getReg());
1156 if (!Ty.isValid() || (Ty != DstTy))
1157 return false;
1158 return true;
1159 }))
1160 report("Generic Instruction G_PHI has operands with incompatible/missing "
1161 "types",
1162 MI);
1163 break;
1164 }
1165 case TargetOpcode::G_BITCAST: {
1166 LLT DstTy = MRI->getType(MI->getOperand(0).getReg());
1167 LLT SrcTy = MRI->getType(MI->getOperand(1).getReg());
1168 if (!DstTy.isValid() || !SrcTy.isValid())
1169 break;
1170
1171 if (SrcTy.isPointer() != DstTy.isPointer())
1172 report("bitcast cannot convert between pointers and other types", MI);
1173
1174 if (SrcTy.getSizeInBits() != DstTy.getSizeInBits())
1175 report("bitcast sizes must match", MI);
1176
1177 if (SrcTy == DstTy)
1178 report("bitcast must change the type", MI);
1179
1180 break;
1181 }
1182 case TargetOpcode::G_INTTOPTR:
1183 case TargetOpcode::G_PTRTOINT:
1184 case TargetOpcode::G_ADDRSPACE_CAST: {
1185 LLT DstTy = MRI->getType(MI->getOperand(0).getReg());
1186 LLT SrcTy = MRI->getType(MI->getOperand(1).getReg());
1187 if (!DstTy.isValid() || !SrcTy.isValid())
1188 break;
1189
1190 verifyVectorElementMatch(DstTy, SrcTy, MI);
1191
1192 DstTy = DstTy.getScalarType();
1193 SrcTy = SrcTy.getScalarType();
1194
1195 if (MI->getOpcode() == TargetOpcode::G_INTTOPTR) {
1196 if (!DstTy.isPointer())
1197 report("inttoptr result type must be a pointer", MI);
1198 if (SrcTy.isPointer())
1199 report("inttoptr source type must not be a pointer", MI);
1200 } else if (MI->getOpcode() == TargetOpcode::G_PTRTOINT) {
1201 if (!SrcTy.isPointer())
1202 report("ptrtoint source type must be a pointer", MI);
1203 if (DstTy.isPointer())
1204 report("ptrtoint result type must not be a pointer", MI);
1205 } else {
1206 assert(MI->getOpcode() == TargetOpcode::G_ADDRSPACE_CAST)(static_cast <bool> (MI->getOpcode() == TargetOpcode
::G_ADDRSPACE_CAST) ? void (0) : __assert_fail ("MI->getOpcode() == TargetOpcode::G_ADDRSPACE_CAST"
, "llvm/lib/CodeGen/MachineVerifier.cpp", 1206, __extension__
__PRETTY_FUNCTION__))
;
1207 if (!SrcTy.isPointer() || !DstTy.isPointer())
1208 report("addrspacecast types must be pointers", MI);
1209 else {
1210 if (SrcTy.getAddressSpace() == DstTy.getAddressSpace())
1211 report("addrspacecast must convert different address spaces", MI);
1212 }
1213 }
1214
1215 break;
1216 }
1217 case TargetOpcode::G_PTR_ADD: {
1218 LLT DstTy = MRI->getType(MI->getOperand(0).getReg());
1219 LLT PtrTy = MRI->getType(MI->getOperand(1).getReg());
1220 LLT OffsetTy = MRI->getType(MI->getOperand(2).getReg());
1221 if (!DstTy.isValid() || !PtrTy.isValid() || !OffsetTy.isValid())
1222 break;
1223
1224 if (!PtrTy.getScalarType().isPointer())
1225 report("gep first operand must be a pointer", MI);
1226
1227 if (OffsetTy.getScalarType().isPointer())
1228 report("gep offset operand must not be a pointer", MI);
1229
1230 // TODO: Is the offset allowed to be a scalar with a vector?
1231 break;
1232 }
1233 case TargetOpcode::G_PTRMASK: {
1234 LLT DstTy = MRI->getType(MI->getOperand(0).getReg());
1235 LLT SrcTy = MRI->getType(MI->getOperand(1).getReg());
1236 LLT MaskTy = MRI->getType(MI->getOperand(2).getReg());
1237 if (!DstTy.isValid() || !SrcTy.isValid() || !MaskTy.isValid())
1238 break;
1239
1240 if (!DstTy.getScalarType().isPointer())
1241 report("ptrmask result type must be a pointer", MI);
1242
1243 if (!MaskTy.getScalarType().isScalar())
1244 report("ptrmask mask type must be an integer", MI);
1245
1246 verifyVectorElementMatch(DstTy, MaskTy, MI);
1247 break;
1248 }
1249 case TargetOpcode::G_SEXT:
1250 case TargetOpcode::G_ZEXT:
1251 case TargetOpcode::G_ANYEXT:
1252 case TargetOpcode::G_TRUNC:
1253 case TargetOpcode::G_FPEXT:
1254 case TargetOpcode::G_FPTRUNC: {
1255 // Number of operands and presense of types is already checked (and
1256 // reported in case of any issues), so no need to report them again. As
1257 // we're trying to report as many issues as possible at once, however, the
1258 // instructions aren't guaranteed to have the right number of operands or
1259 // types attached to them at this point
1260 assert(MCID.getNumOperands() == 2 && "Expected 2 operands G_*{EXT,TRUNC}")(static_cast <bool> (MCID.getNumOperands() == 2 &&
"Expected 2 operands G_*{EXT,TRUNC}") ? void (0) : __assert_fail
("MCID.getNumOperands() == 2 && \"Expected 2 operands G_*{EXT,TRUNC}\""
, "llvm/lib/CodeGen/MachineVerifier.cpp", 1260, __extension__
__PRETTY_FUNCTION__))
;
1261 LLT DstTy = MRI->getType(MI->getOperand(0).getReg());
1262 LLT SrcTy = MRI->getType(MI->getOperand(1).getReg());
1263 if (!DstTy.isValid() || !SrcTy.isValid())
1264 break;
1265
1266 LLT DstElTy = DstTy.getScalarType();
1267 LLT SrcElTy = SrcTy.getScalarType();
1268 if (DstElTy.isPointer() || SrcElTy.isPointer())
1269 report("Generic extend/truncate can not operate on pointers", MI);
1270
1271 verifyVectorElementMatch(DstTy, SrcTy, MI);
1272
1273 unsigned DstSize = DstElTy.getSizeInBits();
1274 unsigned SrcSize = SrcElTy.getSizeInBits();
1275 switch (MI->getOpcode()) {
1276 default:
1277 if (DstSize <= SrcSize)
1278 report("Generic extend has destination type no larger than source", MI);
1279 break;
1280 case TargetOpcode::G_TRUNC:
1281 case TargetOpcode::G_FPTRUNC:
1282 if (DstSize >= SrcSize)
1283 report("Generic truncate has destination type no smaller than source",
1284 MI);
1285 break;
1286 }
1287 break;
1288 }
1289 case TargetOpcode::G_SELECT: {
1290 LLT SelTy = MRI->getType(MI->getOperand(0).getReg());
1291 LLT CondTy = MRI->getType(MI->getOperand(1).getReg());
1292 if (!SelTy.isValid() || !CondTy.isValid())
1293 break;
1294
1295 // Scalar condition select on a vector is valid.
1296 if (CondTy.isVector())
1297 verifyVectorElementMatch(SelTy, CondTy, MI);
1298 break;
1299 }
1300 case TargetOpcode::G_MERGE_VALUES: {
1301 // G_MERGE_VALUES should only be used to merge scalars into a larger scalar,
1302 // e.g. s2N = MERGE sN, sN
1303 // Merging multiple scalars into a vector is not allowed, should use
1304 // G_BUILD_VECTOR for that.
1305 LLT DstTy = MRI->getType(MI->getOperand(0).getReg());
1306 LLT SrcTy = MRI->getType(MI->getOperand(1).getReg());
1307 if (DstTy.isVector() || SrcTy.isVector())
1308 report("G_MERGE_VALUES cannot operate on vectors", MI);
1309
1310 const unsigned NumOps = MI->getNumOperands();
1311 if (DstTy.getSizeInBits() != SrcTy.getSizeInBits() * (NumOps - 1))
1312 report("G_MERGE_VALUES result size is inconsistent", MI);
1313
1314 for (unsigned I = 2; I != NumOps; ++I) {
1315 if (MRI->getType(MI->getOperand(I).getReg()) != SrcTy)
1316 report("G_MERGE_VALUES source types do not match", MI);
1317 }
1318
1319 break;
1320 }
1321 case TargetOpcode::G_UNMERGE_VALUES: {
1322 unsigned NumDsts = MI->getNumOperands() - 1;
1323 LLT DstTy = MRI->getType(MI->getOperand(0).getReg());
1324 for (unsigned i = 1; i < NumDsts; ++i) {
1325 if (MRI->getType(MI->getOperand(i).getReg()) != DstTy) {
1326 report("G_UNMERGE_VALUES destination types do not match", MI);
1327 break;
1328 }
1329 }
1330
1331 LLT SrcTy = MRI->getType(MI->getOperand(NumDsts).getReg());
1332 if (DstTy.isVector()) {
1333 // This case is the converse of G_CONCAT_VECTORS.
1334 if (!SrcTy.isVector() || SrcTy.getScalarType() != DstTy.getScalarType() ||
1335 SrcTy.getNumElements() != NumDsts * DstTy.getNumElements())
1336 report("G_UNMERGE_VALUES source operand does not match vector "
1337 "destination operands",
1338 MI);
1339 } else if (SrcTy.isVector()) {
1340 // This case is the converse of G_BUILD_VECTOR, but relaxed to allow
1341 // mismatched types as long as the total size matches:
1342 // %0:_(s64), %1:_(s64) = G_UNMERGE_VALUES %2:_(<4 x s32>)
1343 if (SrcTy.getSizeInBits() != NumDsts * DstTy.getSizeInBits())
1344 report("G_UNMERGE_VALUES vector source operand does not match scalar "
1345 "destination operands",
1346 MI);
1347 } else {
1348 // This case is the converse of G_MERGE_VALUES.
1349 if (SrcTy.getSizeInBits() != NumDsts * DstTy.getSizeInBits()) {
1350 report("G_UNMERGE_VALUES scalar source operand does not match scalar "
1351 "destination operands",
1352 MI);
1353 }
1354 }
1355 break;
1356 }
1357 case TargetOpcode::G_BUILD_VECTOR: {
1358 // Source types must be scalars, dest type a vector. Total size of scalars
1359 // must match the dest vector size.
1360 LLT DstTy = MRI->getType(MI->getOperand(0).getReg());
1361 LLT SrcEltTy = MRI->getType(MI->getOperand(1).getReg());
1362 if (!DstTy.isVector() || SrcEltTy.isVector()) {
1363 report("G_BUILD_VECTOR must produce a vector from scalar operands", MI);
1364 break;
1365 }
1366
1367 if (DstTy.getElementType() != SrcEltTy)
1368 report("G_BUILD_VECTOR result element type must match source type", MI);
1369
1370 if (DstTy.getNumElements() != MI->getNumOperands() - 1)
1371 report("G_BUILD_VECTOR must have an operand for each elemement", MI);
1372
1373 for (const MachineOperand &MO : llvm::drop_begin(MI->operands(), 2))
1374 if (MRI->getType(MI->getOperand(1).getReg()) != MRI->getType(MO.getReg()))
1375 report("G_BUILD_VECTOR source operand types are not homogeneous", MI);
1376
1377 break;
1378 }
1379 case TargetOpcode::G_BUILD_VECTOR_TRUNC: {
1380 // Source types must be scalars, dest type a vector. Scalar types must be
1381 // larger than the dest vector elt type, as this is a truncating operation.
1382 LLT DstTy = MRI->getType(MI->getOperand(0).getReg());
1383 LLT SrcEltTy = MRI->getType(MI->getOperand(1).getReg());
1384 if (!DstTy.isVector() || SrcEltTy.isVector())
1385 report("G_BUILD_VECTOR_TRUNC must produce a vector from scalar operands",
1386 MI);
1387 for (const MachineOperand &MO : llvm::drop_begin(MI->operands(), 2))
1388 if (MRI->getType(MI->getOperand(1).getReg()) != MRI->getType(MO.getReg()))
1389 report("G_BUILD_VECTOR_TRUNC source operand types are not homogeneous",
1390 MI);
1391 if (SrcEltTy.getSizeInBits() <= DstTy.getElementType().getSizeInBits())
1392 report("G_BUILD_VECTOR_TRUNC source operand types are not larger than "
1393 "dest elt type",
1394 MI);
1395 break;
1396 }
1397 case TargetOpcode::G_CONCAT_VECTORS: {
1398 // Source types should be vectors, and total size should match the dest
1399 // vector size.
1400 LLT DstTy = MRI->getType(MI->getOperand(0).getReg());
1401 LLT SrcTy = MRI->getType(MI->getOperand(1).getReg());
1402 if (!DstTy.isVector() || !SrcTy.isVector())
1403 report("G_CONCAT_VECTOR requires vector source and destination operands",
1404 MI);
1405
1406 if (MI->getNumOperands() < 3)
1407 report("G_CONCAT_VECTOR requires at least 2 source operands", MI);
1408
1409 for (const MachineOperand &MO : llvm::drop_begin(MI->operands(), 2))
1410 if (MRI->getType(MI->getOperand(1).getReg()) != MRI->getType(MO.getReg()))
1411 report("G_CONCAT_VECTOR source operand types are not homogeneous", MI);
1412 if (DstTy.getNumElements() !=
1413 SrcTy.getNumElements() * (MI->getNumOperands() - 1))
1414 report("G_CONCAT_VECTOR num dest and source elements should match", MI);
1415 break;
1416 }
1417 case TargetOpcode::G_ICMP:
1418 case TargetOpcode::G_FCMP: {
1419 LLT DstTy = MRI->getType(MI->getOperand(0).getReg());
1420 LLT SrcTy = MRI->getType(MI->getOperand(2).getReg());
1421
1422 if ((DstTy.isVector() != SrcTy.isVector()) ||
1423 (DstTy.isVector() && DstTy.getNumElements() != SrcTy.getNumElements()))
1424 report("Generic vector icmp/fcmp must preserve number of lanes", MI);
1425
1426 break;
1427 }
1428 case TargetOpcode::G_EXTRACT: {
1429 const MachineOperand &SrcOp = MI->getOperand(1);
1430 if (!SrcOp.isReg()) {
1431 report("extract source must be a register", MI);
1432 break;
1433 }
1434
1435 const MachineOperand &OffsetOp = MI->getOperand(2);
1436 if (!OffsetOp.isImm()) {
1437 report("extract offset must be a constant", MI);
1438 break;
1439 }
1440
1441 unsigned DstSize = MRI->getType(MI->getOperand(0).getReg()).getSizeInBits();
1442 unsigned SrcSize = MRI->getType(SrcOp.getReg()).getSizeInBits();
1443 if (SrcSize == DstSize)
1444 report("extract source must be larger than result", MI);
1445
1446 if (DstSize + OffsetOp.getImm() > SrcSize)
1447 report("extract reads past end of register", MI);
1448 break;
1449 }
1450 case TargetOpcode::G_INSERT: {
1451 const MachineOperand &SrcOp = MI->getOperand(2);
1452 if (!SrcOp.isReg()) {
1453 report("insert source must be a register", MI);
1454 break;
1455 }
1456
1457 const MachineOperand &OffsetOp = MI->getOperand(3);
1458 if (!OffsetOp.isImm()) {
1459 report("insert offset must be a constant", MI);
1460 break;
1461 }
1462
1463 unsigned DstSize = MRI->getType(MI->getOperand(0).getReg()).getSizeInBits();
1464 unsigned SrcSize = MRI->getType(SrcOp.getReg()).getSizeInBits();
1465
1466 if (DstSize <= SrcSize)
1467 report("inserted size must be smaller than total register", MI);
1468
1469 if (SrcSize + OffsetOp.getImm() > DstSize)
1470 report("insert writes past end of register", MI);
1471
1472 break;
1473 }
1474 case TargetOpcode::G_JUMP_TABLE: {
1475 if (!MI->getOperand(1).isJTI())
1476 report("G_JUMP_TABLE source operand must be a jump table index", MI);
1477 LLT DstTy = MRI->getType(MI->getOperand(0).getReg());
1478 if (!DstTy.isPointer())
1479 report("G_JUMP_TABLE dest operand must have a pointer type", MI);
1480 break;
1481 }
1482 case TargetOpcode::G_BRJT: {
1483 if (!MRI->getType(MI->getOperand(0).getReg()).isPointer())
1484 report("G_BRJT src operand 0 must be a pointer type", MI);
1485
1486 if (!MI->getOperand(1).isJTI())
1487 report("G_BRJT src operand 1 must be a jump table index", MI);
1488
1489 const auto &IdxOp = MI->getOperand(2);
1490 if (!IdxOp.isReg() || MRI->getType(IdxOp.getReg()).isPointer())
1491 report("G_BRJT src operand 2 must be a scalar reg type", MI);
1492 break;
1493 }
1494 case TargetOpcode::G_INTRINSIC:
1495 case TargetOpcode::G_INTRINSIC_W_SIDE_EFFECTS: {
1496 // TODO: Should verify number of def and use operands, but the current
1497 // interface requires passing in IR types for mangling.
1498 const MachineOperand &IntrIDOp = MI->getOperand(MI->getNumExplicitDefs());
1499 if (!IntrIDOp.isIntrinsicID()) {
1500 report("G_INTRINSIC first src operand must be an intrinsic ID", MI);
1501 break;
1502 }
1503
1504 bool NoSideEffects = MI->getOpcode() == TargetOpcode::G_INTRINSIC;
1505 unsigned IntrID = IntrIDOp.getIntrinsicID();
1506 if (IntrID != 0 && IntrID < Intrinsic::num_intrinsics) {
1507 AttributeList Attrs = Intrinsic::getAttributes(
1508 MF->getFunction().getContext(), static_cast<Intrinsic::ID>(IntrID));
1509 bool DeclHasSideEffects = !Attrs.getMemoryEffects().doesNotAccessMemory();
1510 if (NoSideEffects && DeclHasSideEffects) {
1511 report("G_INTRINSIC used with intrinsic that accesses memory", MI);
1512 break;
1513 }
1514 if (!NoSideEffects && !DeclHasSideEffects) {
1515 report("G_INTRINSIC_W_SIDE_EFFECTS used with readnone intrinsic", MI);
1516 break;
1517 }
1518 }
1519
1520 break;
1521 }
1522 case TargetOpcode::G_SEXT_INREG: {
1523 if (!MI->getOperand(2).isImm()) {
1524 report("G_SEXT_INREG expects an immediate operand #2", MI);
1525 break;
1526 }
1527
1528 LLT SrcTy = MRI->getType(MI->getOperand(1).getReg());
1529 int64_t Imm = MI->getOperand(2).getImm();
1530 if (Imm <= 0)
1531 report("G_SEXT_INREG size must be >= 1", MI);
1532 if (Imm >= SrcTy.getScalarSizeInBits())
1533 report("G_SEXT_INREG size must be less than source bit width", MI);
1534 break;
1535 }
1536 case TargetOpcode::G_SHUFFLE_VECTOR: {
1537 const MachineOperand &MaskOp = MI->getOperand(3);
1538 if (!MaskOp.isShuffleMask()) {
1539 report("Incorrect mask operand type for G_SHUFFLE_VECTOR", MI);
1540 break;
1541 }
1542
1543 LLT DstTy = MRI->getType(MI->getOperand(0).getReg());
1544 LLT Src0Ty = MRI->getType(MI->getOperand(1).getReg());
1545 LLT Src1Ty = MRI->getType(MI->getOperand(2).getReg());
1546
1547 if (Src0Ty != Src1Ty)
1548 report("Source operands must be the same type", MI);
1549
1550 if (Src0Ty.getScalarType() != DstTy.getScalarType())
1551 report("G_SHUFFLE_VECTOR cannot change element type", MI);
1552
1553 // Don't check that all operands are vector because scalars are used in
1554 // place of 1 element vectors.
1555 int SrcNumElts = Src0Ty.isVector() ? Src0Ty.getNumElements() : 1;
1556 int DstNumElts = DstTy.isVector() ? DstTy.getNumElements() : 1;
1557
1558 ArrayRef<int> MaskIdxes = MaskOp.getShuffleMask();
1559
1560 if (static_cast<int>(MaskIdxes.size()) != DstNumElts)
1561 report("Wrong result type for shufflemask", MI);
1562
1563 for (int Idx : MaskIdxes) {
1564 if (Idx < 0)
1565 continue;
1566
1567 if (Idx >= 2 * SrcNumElts)
1568 report("Out of bounds shuffle index", MI);
1569 }
1570
1571 break;
1572 }
1573 case TargetOpcode::G_DYN_STACKALLOC: {
1574 const MachineOperand &DstOp = MI->getOperand(0);
1575 const MachineOperand &AllocOp = MI->getOperand(1);
1576 const MachineOperand &AlignOp = MI->getOperand(2);
1577
1578 if (!DstOp.isReg() || !MRI->getType(DstOp.getReg()).isPointer()) {
1579 report("dst operand 0 must be a pointer type", MI);
1580 break;
1581 }
1582
1583 if (!AllocOp.isReg() || !MRI->getType(AllocOp.getReg()).isScalar()) {
1584 report("src operand 1 must be a scalar reg type", MI);
1585 break;
1586 }
1587
1588 if (!AlignOp.isImm()) {
1589 report("src operand 2 must be an immediate type", MI);
1590 break;
1591 }
1592 break;
1593 }
1594 case TargetOpcode::G_MEMCPY_INLINE:
1595 case TargetOpcode::G_MEMCPY:
1596 case TargetOpcode::G_MEMMOVE: {
1597 ArrayRef<MachineMemOperand *> MMOs = MI->memoperands();
1598 if (MMOs.size() != 2) {
1599 report("memcpy/memmove must have 2 memory operands", MI);
1600 break;
1601 }
1602
1603 if ((!MMOs[0]->isStore() || MMOs[0]->isLoad()) ||
1604 (MMOs[1]->isStore() || !MMOs[1]->isLoad())) {
1605 report("wrong memory operand types", MI);
1606 break;
1607 }
1608
1609 if (MMOs[0]->getSize() != MMOs[1]->getSize())
1610 report("inconsistent memory operand sizes", MI);
1611
1612 LLT DstPtrTy = MRI->getType(MI->getOperand(0).getReg());
1613 LLT SrcPtrTy = MRI->getType(MI->getOperand(1).getReg());
1614
1615 if (!DstPtrTy.isPointer() || !SrcPtrTy.isPointer()) {
1616 report("memory instruction operand must be a pointer", MI);
1617 break;
1618 }
1619
1620 if (DstPtrTy.getAddressSpace() != MMOs[0]->getAddrSpace())
1621 report("inconsistent store address space", MI);
1622 if (SrcPtrTy.getAddressSpace() != MMOs[1]->getAddrSpace())
1623 report("inconsistent load address space", MI);
1624
1625 if (Opc != TargetOpcode::G_MEMCPY_INLINE)
1626 if (!MI->getOperand(3).isImm() || (MI->getOperand(3).getImm() & ~1LL))
1627 report("'tail' flag (operand 3) must be an immediate 0 or 1", MI);
1628
1629 break;
1630 }
1631 case TargetOpcode::G_BZERO:
1632 case TargetOpcode::G_MEMSET: {
1633 ArrayRef<MachineMemOperand *> MMOs = MI->memoperands();
1634 std::string Name = Opc == TargetOpcode::G_MEMSET ? "memset" : "bzero";
1635 if (MMOs.size() != 1) {
1636 report(Twine(Name, " must have 1 memory operand"), MI);
1637 break;
1638 }
1639
1640 if ((!MMOs[0]->isStore() || MMOs[0]->isLoad())) {
1641 report(Twine(Name, " memory operand must be a store"), MI);
1642 break;
1643 }
1644
1645 LLT DstPtrTy = MRI->getType(MI->getOperand(0).getReg());
1646 if (!DstPtrTy.isPointer()) {
1647 report(Twine(Name, " operand must be a pointer"), MI);
1648 break;
1649 }
1650
1651 if (DstPtrTy.getAddressSpace() != MMOs[0]->getAddrSpace())
1652 report("inconsistent " + Twine(Name, " address space"), MI);
1653
1654 if (!MI->getOperand(MI->getNumOperands() - 1).isImm() ||
1655 (MI->getOperand(MI->getNumOperands() - 1).getImm() & ~1LL))
1656 report("'tail' flag (last operand) must be an immediate 0 or 1", MI);
1657
1658 break;
1659 }
1660 case TargetOpcode::G_VECREDUCE_SEQ_FADD:
1661 case TargetOpcode::G_VECREDUCE_SEQ_FMUL: {
1662 LLT DstTy = MRI->getType(MI->getOperand(0).getReg());
1663 LLT Src1Ty = MRI->getType(MI->getOperand(1).getReg());
1664 LLT Src2Ty = MRI->getType(MI->getOperand(2).getReg());
1665 if (!DstTy.isScalar())
1666 report("Vector reduction requires a scalar destination type", MI);
1667 if (!Src1Ty.isScalar())
1668 report("Sequential FADD/FMUL vector reduction requires a scalar 1st operand", MI);
1669 if (!Src2Ty.isVector())
1670 report("Sequential FADD/FMUL vector reduction must have a vector 2nd operand", MI);
1671 break;
1672 }
1673 case TargetOpcode::G_VECREDUCE_FADD:
1674 case TargetOpcode::G_VECREDUCE_FMUL:
1675 case TargetOpcode::G_VECREDUCE_FMAX:
1676 case TargetOpcode::G_VECREDUCE_FMIN:
1677 case TargetOpcode::G_VECREDUCE_ADD:
1678 case TargetOpcode::G_VECREDUCE_MUL:
1679 case TargetOpcode::G_VECREDUCE_AND:
1680 case TargetOpcode::G_VECREDUCE_OR:
1681 case TargetOpcode::G_VECREDUCE_XOR:
1682 case TargetOpcode::G_VECREDUCE_SMAX:
1683 case TargetOpcode::G_VECREDUCE_SMIN:
1684 case TargetOpcode::G_VECREDUCE_UMAX:
1685 case TargetOpcode::G_VECREDUCE_UMIN: {
1686 LLT DstTy = MRI->getType(MI->getOperand(0).getReg());
1687 if (!DstTy.isScalar())
1688 report("Vector reduction requires a scalar destination type", MI);
1689 break;
1690 }
1691
1692 case TargetOpcode::G_SBFX:
1693 case TargetOpcode::G_UBFX: {
1694 LLT DstTy = MRI->getType(MI->getOperand(0).getReg());
1695 if (DstTy.isVector()) {
1696 report("Bitfield extraction is not supported on vectors", MI);
1697 break;
1698 }
1699 break;
1700 }
1701 case TargetOpcode::G_SHL:
1702 case TargetOpcode::G_LSHR:
1703 case TargetOpcode::G_ASHR:
1704 case TargetOpcode::G_ROTR:
1705 case TargetOpcode::G_ROTL: {
1706 LLT Src1Ty = MRI->getType(MI->getOperand(1).getReg());
1707 LLT Src2Ty = MRI->getType(MI->getOperand(2).getReg());
1708 if (Src1Ty.isVector() != Src2Ty.isVector()) {
1709 report("Shifts and rotates require operands to be either all scalars or "
1710 "all vectors",
1711 MI);
1712 break;
1713 }
1714 break;
1715 }
1716 case TargetOpcode::G_LLROUND:
1717 case TargetOpcode::G_LROUND: {
1718 verifyAllRegOpsScalar(*MI, *MRI);
1719 break;
1720 }
1721 case TargetOpcode::G_IS_FPCLASS: {
1722 LLT DestTy = MRI->getType(MI->getOperand(0).getReg());
1723 LLT DestEltTy = DestTy.getScalarType();
1724 if (!DestEltTy.isScalar()) {
1725 report("Destination must be a scalar or vector of scalars", MI);
1726 break;
1727 }
1728 LLT SrcTy = MRI->getType(MI->getOperand(1).getReg());
1729 LLT SrcEltTy = SrcTy.getScalarType();
1730 if (!SrcEltTy.isScalar()) {
1731 report("Source must be a scalar or vector of scalars", MI);
1732 break;
1733 }
1734 if (!verifyVectorElementMatch(DestTy, SrcTy, MI))
1735 break;
1736 const MachineOperand &TestMO = MI->getOperand(2);
1737 if (!TestMO.isImm()) {
1738 report("floating-point class set (operand 2) must be an immediate", MI);
1739 break;
1740 }
1741 int64_t Test = TestMO.getImm();
1742 if (Test < 0 || Test > fcAllFlags) {
1743 report("Incorrect floating-point class set (operand 2)", MI);
1744 break;
1745 }
1746 break;
1747 }
1748 case TargetOpcode::G_ASSERT_ALIGN: {
1749 if (MI->getOperand(2).getImm() < 1)
1750 report("alignment immediate must be >= 1", MI);
1751 break;
1752 }
1753 case TargetOpcode::G_CONSTANT_POOL: {
1754 if (!MI->getOperand(1).isCPI())
1755 report("Src operand 1 must be a constant pool index", MI);
1756 if (!MRI->getType(MI->getOperand(0).getReg()).isPointer())
1757 report("Dst operand 0 must be a pointer", MI);
1758 break;
1759 }
1760 default:
1761 break;
1762 }
1763}
1764
1765void MachineVerifier::visitMachineInstrBefore(const MachineInstr *MI) {
1766 const MCInstrDesc &MCID = MI->getDesc();
1767 if (MI->getNumOperands() < MCID.getNumOperands()) {
1768 report("Too few operands", MI);
1769 errs() << MCID.getNumOperands() << " operands expected, but "
1770 << MI->getNumOperands() << " given.\n";
1771 }
1772
1773 if (MI->isPHI()) {
1774 if (MF->getProperties().hasProperty(
1775 MachineFunctionProperties::Property::NoPHIs))
1776 report("Found PHI instruction with NoPHIs property set", MI);
1777
1778 if (FirstNonPHI)
1779 report("Found PHI instruction after non-PHI", MI);
1780 } else if (FirstNonPHI == nullptr)
1781 FirstNonPHI = MI;
1782
1783 // Check the tied operands.
1784 if (MI->isInlineAsm())
1785 verifyInlineAsm(MI);
1786
1787 // Check that unspillable terminators define a reg and have at most one use.
1788 if (TII->isUnspillableTerminator(MI)) {
1789 if (!MI->getOperand(0).isReg() || !MI->getOperand(0).isDef())
1790 report("Unspillable Terminator does not define a reg", MI);
1791 Register Def = MI->getOperand(0).getReg();
1792 if (Def.isVirtual() &&
1793 !MF->getProperties().hasProperty(
1794 MachineFunctionProperties::Property::NoPHIs) &&
1795 std::distance(MRI->use_nodbg_begin(Def), MRI->use_nodbg_end()) > 1)
1796 report("Unspillable Terminator expected to have at most one use!", MI);
1797 }
1798
1799 // A fully-formed DBG_VALUE must have a location. Ignore partially formed
1800 // DBG_VALUEs: these are convenient to use in tests, but should never get
1801 // generated.
1802 if (MI->isDebugValue() && MI->getNumOperands() == 4)
1803 if (!MI->getDebugLoc())
1804 report("Missing DebugLoc for debug instruction", MI);
1805
1806 // Meta instructions should never be the subject of debug value tracking,
1807 // they don't create a value in the output program at all.
1808 if (MI->isMetaInstruction() && MI->peekDebugInstrNum())
1809 report("Metadata instruction should not have a value tracking number", MI);
1810
1811 // Check the MachineMemOperands for basic consistency.
1812 for (MachineMemOperand *Op : MI->memoperands()) {
1813 if (Op->isLoad() && !MI->mayLoad())
1814 report("Missing mayLoad flag", MI);
1815 if (Op->isStore() && !MI->mayStore())
1816 report("Missing mayStore flag", MI);
1817 }
1818
1819 // Debug values must not have a slot index.
1820 // Other instructions must have one, unless they are inside a bundle.
1821 if (LiveInts) {
1822 bool mapped = !LiveInts->isNotInMIMap(*MI);
1823 if (MI->isDebugOrPseudoInstr()) {
1824 if (mapped)
1825 report("Debug instruction has a slot index", MI);
1826 } else if (MI->isInsideBundle()) {
1827 if (mapped)
1828 report("Instruction inside bundle has a slot index", MI);
1829 } else {
1830 if (!mapped)
1831 report("Missing slot index", MI);
1832 }
1833 }
1834
1835 unsigned Opc = MCID.getOpcode();
1836 if (isPreISelGenericOpcode(Opc) || isPreISelGenericOptimizationHint(Opc)) {
1837 verifyPreISelGenericInstruction(MI);
1838 return;
1839 }
1840
1841 StringRef ErrorInfo;
1842 if (!TII->verifyInstruction(*MI, ErrorInfo))
1843 report(ErrorInfo.data(), MI);
1844
1845 // Verify properties of various specific instruction types
1846 switch (MI->getOpcode()) {
1847 case TargetOpcode::COPY: {
1848 const MachineOperand &DstOp = MI->getOperand(0);
1849 const MachineOperand &SrcOp = MI->getOperand(1);
1850 const Register SrcReg = SrcOp.getReg();
1851 const Register DstReg = DstOp.getReg();
1852
1853 LLT DstTy = MRI->getType(DstReg);
1854 LLT SrcTy = MRI->getType(SrcReg);
1855 if (SrcTy.isValid() && DstTy.isValid()) {
1856 // If both types are valid, check that the types are the same.
1857 if (SrcTy != DstTy) {
1858 report("Copy Instruction is illegal with mismatching types", MI);
1859 errs() << "Def = " << DstTy << ", Src = " << SrcTy << "\n";
1860 }
1861
1862 break;
1863 }
1864
1865 if (!SrcTy.isValid() && !DstTy.isValid())
1866 break;
1867
1868 // If we have only one valid type, this is likely a copy between a virtual
1869 // and physical register.
1870 unsigned SrcSize = 0;
1871 unsigned DstSize = 0;
1872 if (SrcReg.isPhysical() && DstTy.isValid()) {
1873 const TargetRegisterClass *SrcRC =
1874 TRI->getMinimalPhysRegClassLLT(SrcReg, DstTy);
1875 if (SrcRC)
1876 SrcSize = TRI->getRegSizeInBits(*SrcRC);
1877 }
1878
1879 if (SrcSize == 0)
1880 SrcSize = TRI->getRegSizeInBits(SrcReg, *MRI);
1881
1882 if (DstReg.isPhysical() && SrcTy.isValid()) {
1883 const TargetRegisterClass *DstRC =
1884 TRI->getMinimalPhysRegClassLLT(DstReg, SrcTy);
1885 if (DstRC)
1886 DstSize = TRI->getRegSizeInBits(*DstRC);
1887 }
1888
1889 if (DstSize == 0)
1890 DstSize = TRI->getRegSizeInBits(DstReg, *MRI);
1891
1892 if (SrcSize != 0 && DstSize != 0 && SrcSize != DstSize) {
1893 if (!DstOp.getSubReg() && !SrcOp.getSubReg()) {
1894 report("Copy Instruction is illegal with mismatching sizes", MI);
1895 errs() << "Def Size = " << DstSize << ", Src Size = " << SrcSize
1896 << "\n";
1897 }
1898 }
1899 break;
1900 }
1901 case TargetOpcode::STATEPOINT: {
1902 StatepointOpers SO(MI);
1903 if (!MI->getOperand(SO.getIDPos()).isImm() ||
1904 !MI->getOperand(SO.getNBytesPos()).isImm() ||
1905 !MI->getOperand(SO.getNCallArgsPos()).isImm()) {
1906 report("meta operands to STATEPOINT not constant!", MI);
1907 break;
1908 }
1909
1910 auto VerifyStackMapConstant = [&](unsigned Offset) {
1911 if (Offset >= MI->getNumOperands()) {
1912 report("stack map constant to STATEPOINT is out of range!", MI);
1913 return;
1914 }
1915 if (!MI->getOperand(Offset - 1).isImm() ||
1916 MI->getOperand(Offset - 1).getImm() != StackMaps::ConstantOp ||
1917 !MI->getOperand(Offset).isImm())
1918 report("stack map constant to STATEPOINT not well formed!", MI);
1919 };
1920 VerifyStackMapConstant(SO.getCCIdx());
1921 VerifyStackMapConstant(SO.getFlagsIdx());
1922 VerifyStackMapConstant(SO.getNumDeoptArgsIdx());
1923 VerifyStackMapConstant(SO.getNumGCPtrIdx());
1924 VerifyStackMapConstant(SO.getNumAllocaIdx());
1925 VerifyStackMapConstant(SO.getNumGcMapEntriesIdx());
1926
1927 // Verify that all explicit statepoint defs are tied to gc operands as
1928 // they are expected to be a relocation of gc operands.
1929 unsigned FirstGCPtrIdx = SO.getFirstGCPtrIdx();
1930 unsigned LastGCPtrIdx = SO.getNumAllocaIdx() - 2;
1931 for (unsigned Idx = 0; Idx < MI->getNumDefs(); Idx++) {
1932 unsigned UseOpIdx;
1933 if (!MI->isRegTiedToUseOperand(Idx, &UseOpIdx)) {
1934 report("STATEPOINT defs expected to be tied", MI);
1935 break;
1936 }
1937 if (UseOpIdx < FirstGCPtrIdx || UseOpIdx > LastGCPtrIdx) {
1938 report("STATEPOINT def tied to non-gc operand", MI);
1939 break;
1940 }
1941 }
1942
1943 // TODO: verify we have properly encoded deopt arguments
1944 } break;
1945 case TargetOpcode::INSERT_SUBREG: {
1946 unsigned InsertedSize;
1947 if (unsigned SubIdx = MI->getOperand(2).getSubReg())
1948 InsertedSize = TRI->getSubRegIdxSize(SubIdx);
1949 else
1950 InsertedSize = TRI->getRegSizeInBits(MI->getOperand(2).getReg(), *MRI);
1951 unsigned SubRegSize = TRI->getSubRegIdxSize(MI->getOperand(3).getImm());
1952 if (SubRegSize < InsertedSize) {
1953 report("INSERT_SUBREG expected inserted value to have equal or lesser "
1954 "size than the subreg it was inserted into", MI);
1955 break;
1956 }
1957 } break;
1958 case TargetOpcode::REG_SEQUENCE: {
1959 unsigned NumOps = MI->getNumOperands();
1960 if (!(NumOps & 1)) {
1961 report("Invalid number of operands for REG_SEQUENCE", MI);
1962 break;
1963 }
1964
1965 for (unsigned I = 1; I != NumOps; I += 2) {
1966 const MachineOperand &RegOp = MI->getOperand(I);
1967 const MachineOperand &SubRegOp = MI->getOperand(I + 1);
1968
1969 if (!RegOp.isReg())
1970 report("Invalid register operand for REG_SEQUENCE", &RegOp, I);
1971
1972 if (!SubRegOp.isImm() || SubRegOp.getImm() == 0 ||
1973 SubRegOp.getImm() >= TRI->getNumSubRegIndices()) {
1974 report("Invalid subregister index operand for REG_SEQUENCE",
1975 &SubRegOp, I + 1);
1976 }
1977 }
1978
1979 Register DstReg = MI->getOperand(0).getReg();
1980 if (DstReg.isPhysical())
1981 report("REG_SEQUENCE does not support physical register results", MI);
1982
1983 if (MI->getOperand(0).getSubReg())
1984 report("Invalid subreg result for REG_SEQUENCE", MI);
1985
1986 break;
1987 }
1988 }
1989}
1990
1991void
1992MachineVerifier::visitMachineOperand(const MachineOperand *MO, unsigned MONum) {
1993 const MachineInstr *MI = MO->getParent();
1994 const MCInstrDesc &MCID = MI->getDesc();
1995 unsigned NumDefs = MCID.getNumDefs();
1996 if (MCID.getOpcode() == TargetOpcode::PATCHPOINT)
1
Assuming the condition is false
2
Taking false branch
1997 NumDefs = (MONum == 0 && MO->isReg()) ? NumDefs : 0;
1998
1999 // The first MCID.NumDefs operands must be explicit register defines
2000 if (MONum < NumDefs) {
3
Assuming 'MONum' is < 'NumDefs'
4
Taking true branch
2001 const MCOperandInfo &MCOI = MCID.operands()[MONum];
2002 if (!MO->isReg())
2003 report("Explicit definition must be a register", MO, MONum);
2004 else if (!MO->isDef() && !MCOI.isOptionalDef())
5
Assuming the condition is false
2005 report("Explicit definition marked as use", MO, MONum);
2006 else if (MO->isImplicit())
6
Assuming the condition is false
7
Taking false branch
2007 report("Explicit definition marked as implicit", MO, MONum);
2008 } else if (MONum < MCID.getNumOperands()) {
2009 const MCOperandInfo &MCOI = MCID.operands()[MONum];
2010 // Don't check if it's the last operand in a variadic instruction. See,
2011 // e.g., LDM_RET in the arm back end. Check non-variadic operands only.
2012 bool IsOptional = MI->isVariadic() && MONum == MCID.getNumOperands() - 1;
2013 if (!IsOptional) {
2014 if (MO->isReg()) {
2015 if (MO->isDef() && !MCOI.isOptionalDef() && !MCID.variadicOpsAreDefs())
2016 report("Explicit operand marked as def", MO, MONum);
2017 if (MO->isImplicit())
2018 report("Explicit operand marked as implicit", MO, MONum);
2019 }
2020
2021 // Check that an instruction has register operands only as expected.
2022 if (MCOI.OperandType == MCOI::OPERAND_REGISTER &&
2023 !MO->isReg() && !MO->isFI())
2024 report("Expected a register operand.", MO, MONum);
2025 if (MO->isReg()) {
2026 if (MCOI.OperandType == MCOI::OPERAND_IMMEDIATE ||
2027 (MCOI.OperandType == MCOI::OPERAND_PCREL &&
2028 !TII->isPCRelRegisterOperandLegal(*MO)))
2029 report("Expected a non-register operand.", MO, MONum);
2030 }
2031 }
2032
2033 int TiedTo = MCID.getOperandConstraint(MONum, MCOI::TIED_TO);
2034 if (TiedTo != -1) {
2035 if (!MO->isReg())
2036 report("Tied use must be a register", MO, MONum);
2037 else if (!MO->isTied())
2038 report("Operand should be tied", MO, MONum);
2039 else if (unsigned(TiedTo) != MI->findTiedOperandIdx(MONum))
2040 report("Tied def doesn't match MCInstrDesc", MO, MONum);
2041 else if (MO->getReg().isPhysical()) {
2042 const MachineOperand &MOTied = MI->getOperand(TiedTo);
2043 if (!MOTied.isReg())
2044 report("Tied counterpart must be a register", &MOTied, TiedTo);
2045 else if (MOTied.getReg().isPhysical() &&
2046 MO->getReg() != MOTied.getReg())
2047 report("Tied physical registers must match.", &MOTied, TiedTo);
2048 }
2049 } else if (MO->isReg() && MO->isTied())
2050 report("Explicit operand should not be tied", MO, MONum);
2051 } else {
2052 // ARM adds %reg0 operands to indicate predicates. We'll allow that.
2053 if (MO->isReg() && !MO->isImplicit() && !MI->isVariadic() && MO->getReg())
2054 report("Extra explicit operand on non-variadic instruction", MO, MONum);
2055 }
2056
2057 switch (MO->getType()) {
8
Control jumps to 'case MO_Register:' at line 2058
2058 case MachineOperand::MO_Register: {
2059 // Verify debug flag on debug instructions. Check this first because reg0
2060 // indicates an undefined debug value.
2061 if (MI->isDebugInstr() && MO->isUse()) {
2062 if (!MO->isDebug())
2063 report("Register operand must be marked debug", MO, MONum);
2064 } else if (MO->isDebug()) {
9
Assuming the condition is false
10
Taking false branch
2065 report("Register operand must not be marked debug", MO, MONum);
2066 }
2067
2068 const Register Reg = MO->getReg();
2069 if (!Reg)
11
Assuming the condition is false
2070 return;
2071 if (MRI->tracksLiveness() && !MI->isDebugInstr())
12
Taking true branch
2072 checkLiveness(MO, MONum);
13
Calling 'MachineVerifier::checkLiveness'
2073
2074 if (MO->isDef() && MO->isUndef() && !MO->getSubReg() &&
2075 MO->getReg().isVirtual()) // TODO: Apply to physregs too
2076 report("Undef virtual register def operands require a subregister", MO, MONum);
2077
2078 // Verify the consistency of tied operands.
2079 if (MO->isTied()) {
2080 unsigned OtherIdx = MI->findTiedOperandIdx(MONum);
2081 const MachineOperand &OtherMO = MI->getOperand(OtherIdx);
2082 if (!OtherMO.isReg())
2083 report("Must be tied to a register", MO, MONum);
2084 if (!OtherMO.isTied())
2085 report("Missing tie flags on tied operand", MO, MONum);
2086 if (MI->findTiedOperandIdx(OtherIdx) != MONum)
2087 report("Inconsistent tie links", MO, MONum);
2088 if (MONum < MCID.getNumDefs()) {
2089 if (OtherIdx < MCID.getNumOperands()) {
2090 if (-1 == MCID.getOperandConstraint(OtherIdx, MCOI::TIED_TO))
2091 report("Explicit def tied to explicit use without tie constraint",
2092 MO, MONum);
2093 } else {
2094 if (!OtherMO.isImplicit())
2095 report("Explicit def should be tied to implicit use", MO, MONum);
2096 }
2097 }
2098 }
2099
2100 // Verify two-address constraints after the twoaddressinstruction pass.
2101 // Both twoaddressinstruction pass and phi-node-elimination pass call
2102 // MRI->leaveSSA() to set MF as NoSSA, we should do the verification after
2103 // twoaddressinstruction pass not after phi-node-elimination pass. So we
2104 // shouldn't use the NoSSA as the condition, we should based on
2105 // TiedOpsRewritten property to verify two-address constraints, this
2106 // property will be set in twoaddressinstruction pass.
2107 unsigned DefIdx;
2108 if (MF->getProperties().hasProperty(
2109 MachineFunctionProperties::Property::TiedOpsRewritten) &&
2110 MO->isUse() && MI->isRegTiedToDefOperand(MONum, &DefIdx) &&
2111 Reg != MI->getOperand(DefIdx).getReg())
2112 report("Two-address instruction operands must be identical", MO, MONum);
2113
2114 // Check register classes.
2115 unsigned SubIdx = MO->getSubReg();
2116
2117 if (Reg.isPhysical()) {
2118 if (SubIdx) {
2119 report("Illegal subregister index for physical register", MO, MONum);
2120 return;
2121 }
2122 if (MONum < MCID.getNumOperands()) {
2123 if (const TargetRegisterClass *DRC =
2124 TII->getRegClass(MCID, MONum, TRI, *MF)) {
2125 if (!DRC->contains(Reg)) {
2126 report("Illegal physical register for instruction", MO, MONum);
2127 errs() << printReg(Reg, TRI) << " is not a "
2128 << TRI->getRegClassName(DRC) << " register.\n";
2129 }
2130 }
2131 }
2132 if (MO->isRenamable()) {
2133 if (MRI->isReserved(Reg)) {
2134 report("isRenamable set on reserved register", MO, MONum);
2135 return;
2136 }
2137 }
2138 } else {
2139 // Virtual register.
2140 const TargetRegisterClass *RC = MRI->getRegClassOrNull(Reg);
2141 if (!RC) {
2142 // This is a generic virtual register.
2143
2144 // Do not allow undef uses for generic virtual registers. This ensures
2145 // getVRegDef can never fail and return null on a generic register.
2146 //
2147 // FIXME: This restriction should probably be broadened to all SSA
2148 // MIR. However, DetectDeadLanes/ProcessImplicitDefs technically still
2149 // run on the SSA function just before phi elimination.
2150 if (MO->isUndef())
2151 report("Generic virtual register use cannot be undef", MO, MONum);
2152
2153 // Debug value instruction is permitted to use undefined vregs.
2154 // This is a performance measure to skip the overhead of immediately
2155 // pruning unused debug operands. The final undef substitution occurs
2156 // when debug values are allocated in LDVImpl::handleDebugValue, so
2157 // these verifications always apply after this pass.
2158 if (isFunctionTracksDebugUserValues || !MO->isUse() ||
2159 !MI->isDebugValue() || !MRI->def_empty(Reg)) {
2160 // If we're post-Select, we can't have gvregs anymore.
2161 if (isFunctionSelected) {
2162 report("Generic virtual register invalid in a Selected function",
2163 MO, MONum);
2164 return;
2165 }
2166
2167 // The gvreg must have a type and it must not have a SubIdx.
2168 LLT Ty = MRI->getType(Reg);
2169 if (!Ty.isValid()) {
2170 report("Generic virtual register must have a valid type", MO,
2171 MONum);
2172 return;
2173 }
2174
2175 const RegisterBank *RegBank = MRI->getRegBankOrNull(Reg);
2176
2177 // If we're post-RegBankSelect, the gvreg must have a bank.
2178 if (!RegBank && isFunctionRegBankSelected) {
2179 report("Generic virtual register must have a bank in a "
2180 "RegBankSelected function",
2181 MO, MONum);
2182 return;
2183 }
2184
2185 // Make sure the register fits into its register bank if any.
2186 if (RegBank && Ty.isValid() &&
2187 RegBank->getSize() < Ty.getSizeInBits()) {
2188 report("Register bank is too small for virtual register", MO,
2189 MONum);
2190 errs() << "Register bank " << RegBank->getName() << " too small("
2191 << RegBank->getSize() << ") to fit " << Ty.getSizeInBits()
2192 << "-bits\n";
2193 return;
2194 }
2195 }
2196
2197 if (SubIdx) {
2198 report("Generic virtual register does not allow subregister index", MO,
2199 MONum);
2200 return;
2201 }
2202
2203 // If this is a target specific instruction and this operand
2204 // has register class constraint, the virtual register must
2205 // comply to it.
2206 if (!isPreISelGenericOpcode(MCID.getOpcode()) &&
2207 MONum < MCID.getNumOperands() &&
2208 TII->getRegClass(MCID, MONum, TRI, *MF)) {
2209 report("Virtual register does not match instruction constraint", MO,
2210 MONum);
2211 errs() << "Expect register class "
2212 << TRI->getRegClassName(
2213 TII->getRegClass(MCID, MONum, TRI, *MF))
2214 << " but got nothing\n";
2215 return;
2216 }
2217
2218 break;
2219 }
2220 if (SubIdx) {
2221 const TargetRegisterClass *SRC =
2222 TRI->getSubClassWithSubReg(RC, SubIdx);
2223 if (!SRC) {
2224 report("Invalid subregister index for virtual register", MO, MONum);
2225 errs() << "Register class " << TRI->getRegClassName(RC)
2226 << " does not support subreg index " << SubIdx << "\n";
2227 return;
2228 }
2229 if (RC != SRC) {
2230 report("Invalid register class for subregister index", MO, MONum);
2231 errs() << "Register class " << TRI->getRegClassName(RC)
2232 << " does not fully support subreg index " << SubIdx << "\n";
2233 return;
2234 }
2235 }
2236 if (MONum < MCID.getNumOperands()) {
2237 if (const TargetRegisterClass *DRC =
2238 TII->getRegClass(MCID, MONum, TRI, *MF)) {
2239 if (SubIdx) {
2240 const TargetRegisterClass *SuperRC =
2241 TRI->getLargestLegalSuperClass(RC, *MF);
2242 if (!SuperRC) {
2243 report("No largest legal super class exists.", MO, MONum);
2244 return;
2245 }
2246 DRC = TRI->getMatchingSuperRegClass(SuperRC, DRC, SubIdx);
2247 if (!DRC) {
2248 report("No matching super-reg register class.", MO, MONum);
2249 return;
2250 }
2251 }
2252 if (!RC->hasSuperClassEq(DRC)) {
2253 report("Illegal virtual register for instruction", MO, MONum);
2254 errs() << "Expected a " << TRI->getRegClassName(DRC)
2255 << " register, but got a " << TRI->getRegClassName(RC)
2256 << " register\n";
2257 }
2258 }
2259 }
2260 }
2261 break;
2262 }
2263
2264 case MachineOperand::MO_RegisterMask:
2265 regMasks.push_back(MO->getRegMask());
2266 break;
2267
2268 case MachineOperand::MO_MachineBasicBlock:
2269 if (MI->isPHI() && !MO->getMBB()->isSuccessor(MI->getParent()))
2270 report("PHI operand is not in the CFG", MO, MONum);
2271 break;
2272
2273 case MachineOperand::MO_FrameIndex:
2274 if (LiveStks && LiveStks->hasInterval(MO->getIndex()) &&
2275 LiveInts && !LiveInts->isNotInMIMap(*MI)) {
2276 int FI = MO->getIndex();
2277 LiveInterval &LI = LiveStks->getInterval(FI);
2278 SlotIndex Idx = LiveInts->getInstructionIndex(*MI);
2279
2280 bool stores = MI->mayStore();
2281 bool loads = MI->mayLoad();
2282 // For a memory-to-memory move, we need to check if the frame
2283 // index is used for storing or loading, by inspecting the
2284 // memory operands.
2285 if (stores && loads) {
2286 for (auto *MMO : MI->memoperands()) {
2287 const PseudoSourceValue *PSV = MMO->getPseudoValue();
2288 if (PSV == nullptr) continue;
2289 const FixedStackPseudoSourceValue *Value =
2290 dyn_cast<FixedStackPseudoSourceValue>(PSV);
2291 if (Value == nullptr) continue;
2292 if (Value->getFrameIndex() != FI) continue;
2293
2294 if (MMO->isStore())
2295 loads = false;
2296 else
2297 stores = false;
2298 break;
2299 }
2300 if (loads == stores)
2301 report("Missing fixed stack memoperand.", MI);
2302 }
2303 if (loads && !LI.liveAt(Idx.getRegSlot(true))) {
2304 report("Instruction loads from dead spill slot", MO, MONum);
2305 errs() << "Live stack: " << LI << '\n';
2306 }
2307 if (stores && !LI.liveAt(Idx.getRegSlot())) {
2308 report("Instruction stores to dead spill slot", MO, MONum);
2309 errs() << "Live stack: " << LI << '\n';
2310 }
2311 }
2312 break;
2313
2314 case MachineOperand::MO_CFIIndex:
2315 if (MO->getCFIIndex() >= MF->getFrameInstructions().size())
2316 report("CFI instruction has invalid index", MO, MONum);
2317 break;
2318
2319 default:
2320 break;
2321 }
2322}
2323
2324void MachineVerifier::checkLivenessAtUse(const MachineOperand *MO,
2325 unsigned MONum, SlotIndex UseIdx,
2326 const LiveRange &LR,
2327 Register VRegOrUnit,
2328 LaneBitmask LaneMask) {
2329 LiveQueryResult LRQ = LR.Query(UseIdx);
2330 // Check if we have a segment at the use, note however that we only need one
2331 // live subregister range, the others may be dead.
2332 if (!LRQ.valueIn() && LaneMask.none()) {
2333 report("No live segment at use", MO, MONum);
2334 report_context_liverange(LR);
2335 report_context_vreg_regunit(VRegOrUnit);
2336 report_context(UseIdx);
2337 }
2338 if (MO->isKill() && !LRQ.isKill()) {
2339 report("Live range continues after kill flag", MO, MONum);
2340 report_context_liverange(LR);
2341 report_context_vreg_regunit(VRegOrUnit);
2342 if (LaneMask.any())
2343 report_context_lanemask(LaneMask);
2344 report_context(UseIdx);
2345 }
2346}
2347
2348void MachineVerifier::checkLivenessAtDef(const MachineOperand *MO,
2349 unsigned MONum, SlotIndex DefIdx,
2350 const LiveRange &LR,
2351 Register VRegOrUnit,
2352 bool SubRangeCheck,
2353 LaneBitmask LaneMask) {
2354 if (const VNInfo *VNI = LR.getVNInfoAt(DefIdx)) {
2355 // The LR can correspond to the whole reg and its def slot is not obliged
2356 // to be the same as the MO' def slot. E.g. when we check here "normal"
2357 // subreg MO but there is other EC subreg MO in the same instruction so the
2358 // whole reg has EC def slot and differs from the currently checked MO' def
2359 // slot. For example:
2360 // %0 [16e,32r:0) 0@16e L..3 [16e,32r:0) 0@16e L..C [16r,32r:0) 0@16r
2361 // Check that there is an early-clobber def of the same superregister
2362 // somewhere is performed in visitMachineFunctionAfter()
2363 if (((SubRangeCheck || MO->getSubReg() == 0) && VNI->def != DefIdx) ||
2364 !SlotIndex::isSameInstr(VNI->def, DefIdx) ||
2365 (VNI->def != DefIdx &&
2366 (!VNI->def.isEarlyClobber() || !DefIdx.isRegister()))) {
2367 report("Inconsistent valno->def", MO, MONum);
2368 report_context_liverange(LR);
2369 report_context_vreg_regunit(VRegOrUnit);
2370 if (LaneMask.any())
2371 report_context_lanemask(LaneMask);
2372 report_context(*VNI);
2373 report_context(DefIdx);
2374 }
2375 } else {
2376 report("No live segment at def", MO, MONum);
2377 report_context_liverange(LR);
2378 report_context_vreg_regunit(VRegOrUnit);
2379 if (LaneMask.any())
2380 report_context_lanemask(LaneMask);
2381 report_context(DefIdx);
2382 }
2383 // Check that, if the dead def flag is present, LiveInts agree.
2384 if (MO->isDead()) {
2385 LiveQueryResult LRQ = LR.Query(DefIdx);
2386 if (!LRQ.isDeadDef()) {
2387 assert(VRegOrUnit.isVirtual() && "Expecting a virtual register.")(static_cast <bool> (VRegOrUnit.isVirtual() && "Expecting a virtual register."
) ? void (0) : __assert_fail ("VRegOrUnit.isVirtual() && \"Expecting a virtual register.\""
, "llvm/lib/CodeGen/MachineVerifier.cpp", 2387, __extension__
__PRETTY_FUNCTION__))
;
2388 // A dead subreg def only tells us that the specific subreg is dead. There
2389 // could be other non-dead defs of other subregs, or we could have other
2390 // parts of the register being live through the instruction. So unless we
2391 // are checking liveness for a subrange it is ok for the live range to
2392 // continue, given that we have a dead def of a subregister.
2393 if (SubRangeCheck || MO->getSubReg() == 0) {
2394 report("Live range continues after dead def flag", MO, MONum);
2395 report_context_liverange(LR);
2396 report_context_vreg_regunit(VRegOrUnit);
2397 if (LaneMask.any())
2398 report_context_lanemask(LaneMask);
2399 }
2400 }
2401 }
2402}
2403
2404void MachineVerifier::checkLiveness(const MachineOperand *MO, unsigned MONum) {
2405 const MachineInstr *MI = MO->getParent();
2406 const Register Reg = MO->getReg();
2407 const unsigned SubRegIdx = MO->getSubReg();
2408
2409 const LiveInterval *LI = nullptr;
14
'LI' initialized to a null pointer value
2410 if (LiveInts && Reg.isVirtual()) {
15
Assuming field 'LiveInts' is null
2411 if (LiveInts->hasInterval(Reg)) {
2412 LI = &LiveInts->getInterval(Reg);
2413 if (SubRegIdx != 0 && (MO->isDef() || !MO->isUndef()) && !LI->empty() &&
2414 !LI->hasSubRanges() && MRI->shouldTrackSubRegLiveness(Reg))
2415 report("Live interval for subreg operand has no subranges", MO, MONum);
2416 } else {
2417 report("Virtual register has no live interval", MO, MONum);
2418 }
2419 }
2420
2421 // Both use and def operands can read a register.
2422 if (MO->readsReg()) {
16
Taking false branch
2423 if (MO->isKill())
2424 addRegWithSubRegs(regsKilled, Reg);
2425
2426 // Check that LiveVars knows this kill (unless we are inside a bundle, in
2427 // which case we have already checked that LiveVars knows any kills on the
2428 // bundle header instead).
2429 if (LiveVars && Reg.isVirtual() && MO->isKill() &&
2430 !MI->isBundledWithPred()) {
2431 LiveVariables::VarInfo &VI = LiveVars->getVarInfo(Reg);
2432 if (!is_contained(VI.Kills, MI))
2433 report("Kill missing from LiveVariables", MO, MONum);
2434 }
2435
2436 // Check LiveInts liveness and kill.
2437 if (LiveInts && !LiveInts->isNotInMIMap(*MI)) {
2438 SlotIndex UseIdx = LiveInts->getInstructionIndex(*MI);
2439 // Check the cached regunit intervals.
2440 if (Reg.isPhysical() && !isReserved(Reg)) {
2441 for (MCRegUnitIterator Units(Reg.asMCReg(), TRI); Units.isValid();
2442 ++Units) {
2443 if (MRI->isReservedRegUnit(*Units))
2444 continue;
2445 if (const LiveRange *LR = LiveInts->getCachedRegUnit(*Units))
2446 checkLivenessAtUse(MO, MONum, UseIdx, *LR, *Units);
2447 }
2448 }
2449
2450 if (Reg.isVirtual()) {
2451 // This is a virtual register interval.
2452 checkLivenessAtUse(MO, MONum, UseIdx, *LI, Reg);
2453
2454 if (LI->hasSubRanges() && !MO->isDef()) {
2455 LaneBitmask MOMask = SubRegIdx != 0
2456 ? TRI->getSubRegIndexLaneMask(SubRegIdx)
2457 : MRI->getMaxLaneMaskForVReg(Reg);
2458 LaneBitmask LiveInMask;
2459 for (const LiveInterval::SubRange &SR : LI->subranges()) {
2460 if ((MOMask & SR.LaneMask).none())
2461 continue;
2462 checkLivenessAtUse(MO, MONum, UseIdx, SR, Reg, SR.LaneMask);
2463 LiveQueryResult LRQ = SR.Query(UseIdx);
2464 if (LRQ.valueIn())
2465 LiveInMask |= SR.LaneMask;
2466 }
2467 // At least parts of the register has to be live at the use.
2468 if ((LiveInMask & MOMask).none()) {
2469 report("No live subrange at use", MO, MONum);
2470 report_context(*LI);
2471 report_context(UseIdx);
2472 }
2473 }
2474 }
2475 }
2476
2477 // Use of a dead register.
2478 if (!regsLive.count(Reg)) {
2479 if (Reg.isPhysical()) {
2480 // Reserved registers may be used even when 'dead'.
2481 bool Bad = !isReserved(Reg);
2482 // We are fine if just any subregister has a defined value.
2483 if (Bad) {
2484
2485 for (const MCPhysReg &SubReg : TRI->subregs(Reg)) {
2486 if (regsLive.count(SubReg)) {
2487 Bad = false;
2488 break;
2489 }
2490 }
2491 }
2492 // If there is an additional implicit-use of a super register we stop
2493 // here. By definition we are fine if the super register is not
2494 // (completely) dead, if the complete super register is dead we will
2495 // get a report for its operand.
2496 if (Bad) {
2497 for (const MachineOperand &MOP : MI->uses()) {
2498 if (!MOP.isReg() || !MOP.isImplicit())
2499 continue;
2500
2501 if (!MOP.getReg().isPhysical())
2502 continue;
2503
2504 if (llvm::is_contained(TRI->subregs(MOP.getReg()), Reg))
2505 Bad = false;
2506 }
2507 }
2508 if (Bad)
2509 report("Using an undefined physical register", MO, MONum);
2510 } else if (MRI->def_empty(Reg)) {
2511 report("Reading virtual register without a def", MO, MONum);
2512 } else {
2513 BBInfo &MInfo = MBBInfoMap[MI->getParent()];
2514 // We don't know which virtual registers are live in, so only complain
2515 // if vreg was killed in this MBB. Otherwise keep track of vregs that
2516 // must be live in. PHI instructions are handled separately.
2517 if (MInfo.regsKilled.count(Reg))
2518 report("Using a killed virtual register", MO, MONum);
2519 else if (!MI->isPHI())
2520 MInfo.vregsLiveIn.insert(std::make_pair(Reg, MI));
2521 }
2522 }
2523 }
2524
2525 if (MO->isDef()) {
17
Taking true branch
2526 // Register defined.
2527 // TODO: verify that earlyclobber ops are not used.
2528 if (MO->isDead())
18
Assuming the condition is false
19
Taking false branch
2529 addRegWithSubRegs(regsDead, Reg);
2530 else
2531 addRegWithSubRegs(regsDefined, Reg);
20
Calling 'MachineVerifier::addRegWithSubRegs'
24
Returning from 'MachineVerifier::addRegWithSubRegs'
2532
2533 // Verify SSA form.
2534 if (MRI->isSSA() && Reg.isVirtual() &&
2535 std::next(MRI->def_begin(Reg)) != MRI->def_end())
2536 report("Multiple virtual register defs in SSA form", MO, MONum);
2537
2538 // Check LiveInts for a live segment, but only for virtual registers.
2539 if (LiveInts && !LiveInts->isNotInMIMap(*MI)) {
25
Assuming field 'LiveInts' is non-null
26
Taking true branch
2540 SlotIndex DefIdx = LiveInts->getInstructionIndex(*MI);
2541 DefIdx = DefIdx.getRegSlot(MO->isEarlyClobber());
2542
2543 if (Reg.isVirtual()) {
27
Assuming the condition is true
28
Taking true branch
2544 checkLivenessAtDef(MO, MONum, DefIdx, *LI, Reg);
29
Forming reference to null pointer
2545
2546 if (LI->hasSubRanges()) {
2547 LaneBitmask MOMask = SubRegIdx != 0
2548 ? TRI->getSubRegIndexLaneMask(SubRegIdx)
2549 : MRI->getMaxLaneMaskForVReg(Reg);
2550 for (const LiveInterval::SubRange &SR : LI->subranges()) {
2551 if ((SR.LaneMask & MOMask).none())
2552 continue;
2553 checkLivenessAtDef(MO, MONum, DefIdx, SR, Reg, true, SR.LaneMask);
2554 }
2555 }
2556 }
2557 }
2558 }
2559}
2560
2561// This function gets called after visiting all instructions in a bundle. The
2562// argument points to the bundle header.
2563// Normal stand-alone instructions are also considered 'bundles', and this
2564// function is called for all of them.
2565void MachineVerifier::visitMachineBundleAfter(const MachineInstr *MI) {
2566 BBInfo &MInfo = MBBInfoMap[MI->getParent()];
2567 set_union(MInfo.regsKilled, regsKilled);
2568 set_subtract(regsLive, regsKilled); regsKilled.clear();
2569 // Kill any masked registers.
2570 while (!regMasks.empty()) {
2571 const uint32_t *Mask = regMasks.pop_back_val();
2572 for (Register Reg : regsLive)
2573 if (Reg.isPhysical() &&
2574 MachineOperand::clobbersPhysReg(Mask, Reg.asMCReg()))
2575 regsDead.push_back(Reg);
2576 }
2577 set_subtract(regsLive, regsDead); regsDead.clear();
2578 set_union(regsLive, regsDefined); regsDefined.clear();
2579}
2580
2581void
2582MachineVerifier::visitMachineBasicBlockAfter(const MachineBasicBlock *MBB) {
2583 MBBInfoMap[MBB].regsLiveOut = regsLive;
2584 regsLive.clear();
2585
2586 if (Indexes) {
2587 SlotIndex stop = Indexes->getMBBEndIdx(MBB);
2588 if (!(stop > lastIndex)) {
2589 report("Block ends before last instruction index", MBB);
2590 errs() << "Block ends at " << stop
2591 << " last instruction was at " << lastIndex << '\n';
2592 }
2593 lastIndex = stop;
2594 }
2595}
2596
2597namespace {
2598// This implements a set of registers that serves as a filter: can filter other
2599// sets by passing through elements not in the filter and blocking those that
2600// are. Any filter implicitly includes the full set of physical registers upon
2601// creation, thus filtering them all out. The filter itself as a set only grows,
2602// and needs to be as efficient as possible.
2603struct VRegFilter {
2604 // Add elements to the filter itself. \pre Input set \p FromRegSet must have
2605 // no duplicates. Both virtual and physical registers are fine.
2606 template <typename RegSetT> void add(const RegSetT &FromRegSet) {
2607 SmallVector<Register, 0> VRegsBuffer;
2608 filterAndAdd(FromRegSet, VRegsBuffer);
2609 }
2610 // Filter \p FromRegSet through the filter and append passed elements into \p
2611 // ToVRegs. All elements appended are then added to the filter itself.
2612 // \returns true if anything changed.
2613 template <typename RegSetT>
2614 bool filterAndAdd(const RegSetT &FromRegSet,
2615 SmallVectorImpl<Register> &ToVRegs) {
2616 unsigned SparseUniverse = Sparse.size();
2617 unsigned NewSparseUniverse = SparseUniverse;
2618 unsigned NewDenseSize = Dense.size();
2619 size_t Begin = ToVRegs.size();
2620 for (Register Reg : FromRegSet) {
2621 if (!Reg.isVirtual())
2622 continue;
2623 unsigned Index = Register::virtReg2Index(Reg);
2624 if (Index < SparseUniverseMax) {
2625 if (Index < SparseUniverse && Sparse.test(Index))
2626 continue;
2627 NewSparseUniverse = std::max(NewSparseUniverse, Index + 1);
2628 } else {
2629 if (Dense.count(Reg))
2630 continue;
2631 ++NewDenseSize;
2632 }
2633 ToVRegs.push_back(Reg);
2634 }
2635 size_t End = ToVRegs.size();
2636 if (Begin == End)
2637 return false;
2638 // Reserving space in sets once performs better than doing so continuously
2639 // and pays easily for double look-ups (even in Dense with SparseUniverseMax
2640 // tuned all the way down) and double iteration (the second one is over a
2641 // SmallVector, which is a lot cheaper compared to DenseSet or BitVector).
2642 Sparse.resize(NewSparseUniverse);
2643 Dense.reserve(NewDenseSize);
2644 for (unsigned I = Begin; I < End; ++I) {
2645 Register Reg = ToVRegs[I];
2646 unsigned Index = Register::virtReg2Index(Reg);
2647 if (Index < SparseUniverseMax)
2648 Sparse.set(Index);
2649 else
2650 Dense.insert(Reg);
2651 }
2652 return true;
2653 }
2654
2655private:
2656 static constexpr unsigned SparseUniverseMax = 10 * 1024 * 8;
2657 // VRegs indexed within SparseUniverseMax are tracked by Sparse, those beyound
2658 // are tracked by Dense. The only purpose of the threashold and the Dense set
2659 // is to have a reasonably growing memory usage in pathological cases (large
2660 // number of very sparse VRegFilter instances live at the same time). In
2661 // practice even in the worst-by-execution time cases having all elements
2662 // tracked by Sparse (very large SparseUniverseMax scenario) tends to be more
2663 // space efficient than if tracked by Dense. The threashold is set to keep the
2664 // worst-case memory usage within 2x of figures determined empirically for
2665 // "all Dense" scenario in such worst-by-execution-time cases.
2666 BitVector Sparse;
2667 DenseSet<unsigned> Dense;
2668};
2669
2670// Implements both a transfer function and a (binary, in-place) join operator
2671// for a dataflow over register sets with set union join and filtering transfer
2672// (out_b = in_b \ filter_b). filter_b is expected to be set-up ahead of time.
2673// Maintains out_b as its state, allowing for O(n) iteration over it at any
2674// time, where n is the size of the set (as opposed to O(U) where U is the
2675// universe). filter_b implicitly contains all physical registers at all times.
2676class FilteringVRegSet {
2677 VRegFilter Filter;
2678 SmallVector<Register, 0> VRegs;
2679
2680public:
2681 // Set-up the filter_b. \pre Input register set \p RS must have no duplicates.
2682 // Both virtual and physical registers are fine.
2683 template <typename RegSetT> void addToFilter(const RegSetT &RS) {
2684 Filter.add(RS);
2685 }
2686 // Passes \p RS through the filter_b (transfer function) and adds what's left
2687 // to itself (out_b).
2688 template <typename RegSetT> bool add(const RegSetT &RS) {
2689 // Double-duty the Filter: to maintain VRegs a set (and the join operation
2690 // a set union) just add everything being added here to the Filter as well.
2691 return Filter.filterAndAdd(RS, VRegs);
2692 }
2693 using const_iterator = decltype(VRegs)::const_iterator;
2694 const_iterator begin() const { return VRegs.begin(); }
2695 const_iterator end() const { return VRegs.end(); }
2696 size_t size() const { return VRegs.size(); }
2697};
2698} // namespace
2699
2700// Calculate the largest possible vregsPassed sets. These are the registers that
2701// can pass through an MBB live, but may not be live every time. It is assumed
2702// that all vregsPassed sets are empty before the call.
2703void MachineVerifier::calcRegsPassed() {
2704 if (MF->empty())
2705 // ReversePostOrderTraversal doesn't handle empty functions.
2706 return;
2707
2708 for (const MachineBasicBlock *MB :
2709 ReversePostOrderTraversal<const MachineFunction *>(MF)) {
2710 FilteringVRegSet VRegs;
2711 BBInfo &Info = MBBInfoMap[MB];
2712 assert(Info.reachable)(static_cast <bool> (Info.reachable) ? void (0) : __assert_fail
("Info.reachable", "llvm/lib/CodeGen/MachineVerifier.cpp", 2712
, __extension__ __PRETTY_FUNCTION__))
;
2713
2714 VRegs.addToFilter(Info.regsKilled);
2715 VRegs.addToFilter(Info.regsLiveOut);
2716 for (const MachineBasicBlock *Pred : MB->predecessors()) {
2717 const BBInfo &PredInfo = MBBInfoMap[Pred];
2718 if (!PredInfo.reachable)
2719 continue;
2720
2721 VRegs.add(PredInfo.regsLiveOut);
2722 VRegs.add(PredInfo.vregsPassed);
2723 }
2724 Info.vregsPassed.reserve(VRegs.size());
2725 Info.vregsPassed.insert(VRegs.begin(), VRegs.end());
2726 }
2727}
2728
2729// Calculate the set of virtual registers that must be passed through each basic
2730// block in order to satisfy the requirements of successor blocks. This is very
2731// similar to calcRegsPassed, only backwards.
2732void MachineVerifier::calcRegsRequired() {
2733 // First push live-in regs to predecessors' vregsRequired.
2734 SmallPtrSet<const MachineBasicBlock*, 8> todo;
2735 for (const auto &MBB : *MF) {
2736 BBInfo &MInfo = MBBInfoMap[&MBB];
2737 for (const MachineBasicBlock *Pred : MBB.predecessors()) {
2738 BBInfo &PInfo = MBBInfoMap[Pred];
2739 if (PInfo.addRequired(MInfo.vregsLiveIn))
2740 todo.insert(Pred);
2741 }
2742
2743 // Handle the PHI node.
2744 for (const MachineInstr &MI : MBB.phis()) {
2745 for (unsigned i = 1, e = MI.getNumOperands(); i != e; i += 2) {
2746 // Skip those Operands which are undef regs or not regs.
2747 if (!MI.getOperand(i).isReg() || !MI.getOperand(i).readsReg())
2748 continue;
2749
2750 // Get register and predecessor for one PHI edge.
2751 Register Reg = MI.getOperand(i).getReg();
2752 const MachineBasicBlock *Pred = MI.getOperand(i + 1).getMBB();
2753
2754 BBInfo &PInfo = MBBInfoMap[Pred];
2755 if (PInfo.addRequired(Reg))
2756 todo.insert(Pred);
2757 }
2758 }
2759 }
2760
2761 // Iteratively push vregsRequired to predecessors. This will converge to the
2762 // same final state regardless of DenseSet iteration order.
2763 while (!todo.empty()) {
2764 const MachineBasicBlock *MBB = *todo.begin();
2765 todo.erase(MBB);
2766 BBInfo &MInfo = MBBInfoMap[MBB];
2767 for (const MachineBasicBlock *Pred : MBB->predecessors()) {
2768 if (Pred == MBB)
2769 continue;
2770 BBInfo &SInfo = MBBInfoMap[Pred];
2771 if (SInfo.addRequired(MInfo.vregsRequired))
2772 todo.insert(Pred);
2773 }
2774 }
2775}
2776
2777// Check PHI instructions at the beginning of MBB. It is assumed that
2778// calcRegsPassed has been run so BBInfo::isLiveOut is valid.
2779void MachineVerifier::checkPHIOps(const MachineBasicBlock &MBB) {
2780 BBInfo &MInfo = MBBInfoMap[&MBB];
2781
2782 SmallPtrSet<const MachineBasicBlock*, 8> seen;
2783 for (const MachineInstr &Phi : MBB) {
2784 if (!Phi.isPHI())
2785 break;
2786 seen.clear();
2787
2788 const MachineOperand &MODef = Phi.getOperand(0);
2789 if (!MODef.isReg() || !MODef.isDef()) {
2790 report("Expected first PHI operand to be a register def", &MODef, 0);
2791 continue;
2792 }
2793 if (MODef.isTied() || MODef.isImplicit() || MODef.isInternalRead() ||
2794 MODef.isEarlyClobber() || MODef.isDebug())
2795 report("Unexpected flag on PHI operand", &MODef, 0);
2796 Register DefReg = MODef.getReg();
2797 if (!DefReg.isVirtual())
2798 report("Expected first PHI operand to be a virtual register", &MODef, 0);
2799
2800 for (unsigned I = 1, E = Phi.getNumOperands(); I != E; I += 2) {
2801 const MachineOperand &MO0 = Phi.getOperand(I);
2802 if (!MO0.isReg()) {
2803 report("Expected PHI operand to be a register", &MO0, I);
2804 continue;
2805 }
2806 if (MO0.isImplicit() || MO0.isInternalRead() || MO0.isEarlyClobber() ||
2807 MO0.isDebug() || MO0.isTied())
2808 report("Unexpected flag on PHI operand", &MO0, I);
2809
2810 const MachineOperand &MO1 = Phi.getOperand(I + 1);
2811 if (!MO1.isMBB()) {
2812 report("Expected PHI operand to be a basic block", &MO1, I + 1);
2813 continue;
2814 }
2815
2816 const MachineBasicBlock &Pre = *MO1.getMBB();
2817 if (!Pre.isSuccessor(&MBB)) {
2818 report("PHI input is not a predecessor block", &MO1, I + 1);
2819 continue;
2820 }
2821
2822 if (MInfo.reachable) {
2823 seen.insert(&Pre);
2824 BBInfo &PrInfo = MBBInfoMap[&Pre];
2825 if (!MO0.isUndef() && PrInfo.reachable &&
2826 !PrInfo.isLiveOut(MO0.getReg()))
2827 report("PHI operand is not live-out from predecessor", &MO0, I);
2828 }
2829 }
2830
2831 // Did we see all predecessors?
2832 if (MInfo.reachable) {
2833 for (MachineBasicBlock *Pred : MBB.predecessors()) {
2834 if (!seen.count(Pred)) {
2835 report("Missing PHI operand", &Phi);
2836 errs() << printMBBReference(*Pred)
2837 << " is a predecessor according to the CFG.\n";
2838 }
2839 }
2840 }
2841 }
2842}
2843
2844void MachineVerifier::visitMachineFunctionAfter() {
2845 calcRegsPassed();
2846
2847 for (const MachineBasicBlock &MBB : *MF)
2848 checkPHIOps(MBB);
2849
2850 // Now check liveness info if available
2851 calcRegsRequired();
2852
2853 // Check for killed virtual registers that should be live out.
2854 for (const auto &MBB : *MF) {
2855 BBInfo &MInfo = MBBInfoMap[&MBB];
2856 for (Register VReg : MInfo.vregsRequired)
2857 if (MInfo.regsKilled.count(VReg)) {
2858 report("Virtual register killed in block, but needed live out.", &MBB);
2859 errs() << "Virtual register " << printReg(VReg)
2860 << " is used after the block.\n";
2861 }
2862 }
2863
2864 if (!MF->empty()) {
2865 BBInfo &MInfo = MBBInfoMap[&MF->front()];
2866 for (Register VReg : MInfo.vregsRequired) {
2867 report("Virtual register defs don't dominate all uses.", MF);
2868 report_context_vreg(VReg);
2869 }
2870 }
2871
2872 if (LiveVars)
2873 verifyLiveVariables();
2874 if (LiveInts)
2875 verifyLiveIntervals();
2876
2877 // Check live-in list of each MBB. If a register is live into MBB, check
2878 // that the register is in regsLiveOut of each predecessor block. Since
2879 // this must come from a definition in the predecesssor or its live-in
2880 // list, this will catch a live-through case where the predecessor does not
2881 // have the register in its live-in list. This currently only checks
2882 // registers that have no aliases, are not allocatable and are not
2883 // reserved, which could mean a condition code register for instance.
2884 if (MRI->tracksLiveness())
2885 for (const auto &MBB : *MF)
2886 for (MachineBasicBlock::RegisterMaskPair P : MBB.liveins()) {
2887 MCPhysReg LiveInReg = P.PhysReg;
2888 bool hasAliases = MCRegAliasIterator(LiveInReg, TRI, false).isValid();
2889 if (hasAliases || isAllocatable(LiveInReg) || isReserved(LiveInReg))
2890 continue;
2891 for (const MachineBasicBlock *Pred : MBB.predecessors()) {
2892 BBInfo &PInfo = MBBInfoMap[Pred];
2893 if (!PInfo.regsLiveOut.count(LiveInReg)) {
2894 report("Live in register not found to be live out from predecessor.",
2895 &MBB);
2896 errs() << TRI->getName(LiveInReg)
2897 << " not found to be live out from "
2898 << printMBBReference(*Pred) << "\n";
2899 }
2900 }
2901 }
2902
2903 for (auto CSInfo : MF->getCallSitesInfo())
2904 if (!CSInfo.first->isCall())
2905 report("Call site info referencing instruction that is not call", MF);
2906
2907 // If there's debug-info, check that we don't have any duplicate value
2908 // tracking numbers.
2909 if (MF->getFunction().getSubprogram()) {
2910 DenseSet<unsigned> SeenNumbers;
2911 for (const auto &MBB : *MF) {
2912 for (const auto &MI : MBB) {
2913 if (auto Num = MI.peekDebugInstrNum()) {
2914 auto Result = SeenNumbers.insert((unsigned)Num);
2915 if (!Result.second)
2916 report("Instruction has a duplicated value tracking number", &MI);
2917 }
2918 }
2919 }
2920 }
2921}
2922
2923void MachineVerifier::verifyLiveVariables() {
2924 assert(LiveVars && "Don't call verifyLiveVariables without LiveVars")(static_cast <bool> (LiveVars && "Don't call verifyLiveVariables without LiveVars"
) ? void (0) : __assert_fail ("LiveVars && \"Don't call verifyLiveVariables without LiveVars\""
, "llvm/lib/CodeGen/MachineVerifier.cpp", 2924, __extension__
__PRETTY_FUNCTION__))
;
2925 for (unsigned I = 0, E = MRI->getNumVirtRegs(); I != E; ++I) {
2926 Register Reg = Register::index2VirtReg(I);
2927 LiveVariables::VarInfo &VI = LiveVars->getVarInfo(Reg);
2928 for (const auto &MBB : *MF) {
2929 BBInfo &MInfo = MBBInfoMap[&MBB];
2930
2931 // Our vregsRequired should be identical to LiveVariables' AliveBlocks
2932 if (MInfo.vregsRequired.count(Reg)) {
2933 if (!VI.AliveBlocks.test(MBB.getNumber())) {
2934 report("LiveVariables: Block missing from AliveBlocks", &MBB);
2935 errs() << "Virtual register " << printReg(Reg)
2936 << " must be live through the block.\n";
2937 }
2938 } else {
2939 if (VI.AliveBlocks.test(MBB.getNumber())) {
2940 report("LiveVariables: Block should not be in AliveBlocks", &MBB);
2941 errs() << "Virtual register " << printReg(Reg)
2942 << " is not needed live through the block.\n";
2943 }
2944 }
2945 }
2946 }
2947}
2948
2949void MachineVerifier::verifyLiveIntervals() {
2950 assert(LiveInts && "Don't call verifyLiveIntervals without LiveInts")(static_cast <bool> (LiveInts && "Don't call verifyLiveIntervals without LiveInts"
) ? void (0) : __assert_fail ("LiveInts && \"Don't call verifyLiveIntervals without LiveInts\""
, "llvm/lib/CodeGen/MachineVerifier.cpp", 2950, __extension__
__PRETTY_FUNCTION__))
;
2951 for (unsigned I = 0, E = MRI->getNumVirtRegs(); I != E; ++I) {
2952 Register Reg = Register::index2VirtReg(I);
2953
2954 // Spilling and splitting may leave unused registers around. Skip them.
2955 if (MRI->reg_nodbg_empty(Reg))
2956 continue;
2957
2958 if (!LiveInts->hasInterval(Reg)) {
2959 report("Missing live interval for virtual register", MF);
2960 errs() << printReg(Reg, TRI) << " still has defs or uses\n";
2961 continue;
2962 }
2963
2964 const LiveInterval &LI = LiveInts->getInterval(Reg);
2965 assert(Reg == LI.reg() && "Invalid reg to interval mapping")(static_cast <bool> (Reg == LI.reg() && "Invalid reg to interval mapping"
) ? void (0) : __assert_fail ("Reg == LI.reg() && \"Invalid reg to interval mapping\""
, "llvm/lib/CodeGen/MachineVerifier.cpp", 2965, __extension__
__PRETTY_FUNCTION__))
;
2966 verifyLiveInterval(LI);
2967 }
2968
2969 // Verify all the cached regunit intervals.
2970 for (unsigned i = 0, e = TRI->getNumRegUnits(); i != e; ++i)
2971 if (const LiveRange *LR = LiveInts->getCachedRegUnit(i))
2972 verifyLiveRange(*LR, i);
2973}
2974
2975void MachineVerifier::verifyLiveRangeValue(const LiveRange &LR,
2976 const VNInfo *VNI, Register Reg,
2977 LaneBitmask LaneMask) {
2978 if (VNI->isUnused())
2979 return;
2980
2981 const VNInfo *DefVNI = LR.getVNInfoAt(VNI->def);
2982
2983 if (!DefVNI) {
2984 report("Value not live at VNInfo def and not marked unused", MF);
2985 report_context(LR, Reg, LaneMask);
2986 report_context(*VNI);
2987 return;
2988 }
2989
2990 if (DefVNI != VNI) {
2991 report("Live segment at def has different VNInfo", MF);
2992 report_context(LR, Reg, LaneMask);
2993 report_context(*VNI);
2994 return;
2995 }
2996
2997 const MachineBasicBlock *MBB = LiveInts->getMBBFromIndex(VNI->def);
2998 if (!MBB) {
2999 report("Invalid VNInfo definition index", MF);
3000 report_context(LR, Reg, LaneMask);
3001 report_context(*VNI);
3002 return;
3003 }
3004
3005 if (VNI->isPHIDef()) {
3006 if (VNI->def != LiveInts->getMBBStartIdx(MBB)) {
3007 report("PHIDef VNInfo is not defined at MBB start", MBB);
3008 report_context(LR, Reg, LaneMask);
3009 report_context(*VNI);
3010 }
3011 return;
3012 }
3013
3014 // Non-PHI def.
3015 const MachineInstr *MI = LiveInts->getInstructionFromIndex(VNI->def);
3016 if (!MI) {
3017 report("No instruction at VNInfo def index", MBB);
3018 report_context(LR, Reg, LaneMask);
3019 report_context(*VNI);
3020 return;
3021 }
3022
3023 if (Reg != 0) {
3024 bool hasDef = false;
3025 bool isEarlyClobber = false;
3026 for (ConstMIBundleOperands MOI(*MI); MOI.isValid(); ++MOI) {
3027 if (!MOI->isReg() || !MOI->isDef())
3028 continue;
3029 if (Reg.isVirtual()) {
3030 if (MOI->getReg() != Reg)
3031 continue;
3032 } else {
3033 if (!MOI->getReg().isPhysical() || !TRI->hasRegUnit(MOI->getReg(), Reg))
3034 continue;
3035 }
3036 if (LaneMask.any() &&
3037 (TRI->getSubRegIndexLaneMask(MOI->getSubReg()) & LaneMask).none())
3038 continue;
3039 hasDef = true;
3040 if (MOI->isEarlyClobber())
3041 isEarlyClobber = true;
3042 }
3043
3044 if (!hasDef) {
3045 report("Defining instruction does not modify register", MI);
3046 report_context(LR, Reg, LaneMask);
3047 report_context(*VNI);
3048 }
3049
3050 // Early clobber defs begin at USE slots, but other defs must begin at
3051 // DEF slots.
3052 if (isEarlyClobber) {
3053 if (!VNI->def.isEarlyClobber()) {
3054 report("Early clobber def must be at an early-clobber slot", MBB);
3055 report_context(LR, Reg, LaneMask);
3056 report_context(*VNI);
3057 }
3058 } else if (!VNI->def.isRegister()) {
3059 report("Non-PHI, non-early clobber def must be at a register slot", MBB);
3060 report_context(LR, Reg, LaneMask);
3061 report_context(*VNI);
3062 }
3063 }
3064}
3065
3066void MachineVerifier::verifyLiveRangeSegment(const LiveRange &LR,
3067 const LiveRange::const_iterator I,
3068 Register Reg,
3069 LaneBitmask LaneMask) {
3070 const LiveRange::Segment &S = *I;
3071 const VNInfo *VNI = S.valno;
3072 assert(VNI && "Live segment has no valno")(static_cast <bool> (VNI && "Live segment has no valno"
) ? void (0) : __assert_fail ("VNI && \"Live segment has no valno\""
, "llvm/lib/CodeGen/MachineVerifier.cpp", 3072, __extension__
__PRETTY_FUNCTION__))
;
3073
3074 if (VNI->id >= LR.getNumValNums() || VNI != LR.getValNumInfo(VNI->id)) {
3075 report("Foreign valno in live segment", MF);
3076 report_context(LR, Reg, LaneMask);
3077 report_context(S);
3078 report_context(*VNI);
3079 }
3080
3081 if (VNI->isUnused()) {
3082 report("Live segment valno is marked unused", MF);
3083 report_context(LR, Reg, LaneMask);
3084 report_context(S);
3085 }
3086
3087 const MachineBasicBlock *MBB = LiveInts->getMBBFromIndex(S.start);
3088 if (!MBB) {
3089 report("Bad start of live segment, no basic block", MF);
3090 report_context(LR, Reg, LaneMask);
3091 report_context(S);
3092 return;
3093 }
3094 SlotIndex MBBStartIdx = LiveInts->getMBBStartIdx(MBB);
3095 if (S.start != MBBStartIdx && S.start != VNI->def) {
3096 report("Live segment must begin at MBB entry or valno def", MBB);
3097 report_context(LR, Reg, LaneMask);
3098 report_context(S);
3099 }
3100
3101 const MachineBasicBlock *EndMBB =
3102 LiveInts->getMBBFromIndex(S.end.getPrevSlot());
3103 if (!EndMBB) {
3104 report("Bad end of live segment, no basic block", MF);
3105 report_context(LR, Reg, LaneMask);
3106 report_context(S);
3107 return;
3108 }
3109
3110 // No more checks for live-out segments.
3111 if (S.end == LiveInts->getMBBEndIdx(EndMBB))
3112 return;
3113
3114 // RegUnit intervals are allowed dead phis.
3115 if (!Reg.isVirtual() && VNI->isPHIDef() && S.start == VNI->def &&
3116 S.end == VNI->def.getDeadSlot())
3117 return;
3118
3119 // The live segment is ending inside EndMBB
3120 const MachineInstr *MI =
3121 LiveInts->getInstructionFromIndex(S.end.getPrevSlot());
3122 if (!MI) {
3123 report("Live segment doesn't end at a valid instruction", EndMBB);
3124 report_context(LR, Reg, LaneMask);
3125 report_context(S);
3126 return;
3127 }
3128
3129 // The block slot must refer to a basic block boundary.
3130 if (S.end.isBlock()) {
3131 report("Live segment ends at B slot of an instruction", EndMBB);
3132 report_context(LR, Reg, LaneMask);
3133 report_context(S);
3134 }
3135
3136 if (S.end.isDead()) {
3137 // Segment ends on the dead slot.
3138 // That means there must be a dead def.
3139 if (!SlotIndex::isSameInstr(S.start, S.end)) {
3140 report("Live segment ending at dead slot spans instructions", EndMBB);
3141 report_context(LR, Reg, LaneMask);
3142 report_context(S);
3143 }
3144 }
3145
3146 // After tied operands are rewritten, a live segment can only end at an
3147 // early-clobber slot if it is being redefined by an early-clobber def.
3148 // TODO: Before tied operands are rewritten, a live segment can only end at an
3149 // early-clobber slot if the last use is tied to an early-clobber def.
3150 if (MF->getProperties().hasProperty(
3151 MachineFunctionProperties::Property::TiedOpsRewritten) &&
3152 S.end.isEarlyClobber()) {
3153 if (I+1 == LR.end() || (I+1)->start != S.end) {
3154 report("Live segment ending at early clobber slot must be "
3155 "redefined by an EC def in the same instruction", EndMBB);
3156 report_context(LR, Reg, LaneMask);
3157 report_context(S);
3158 }
3159 }
3160
3161 // The following checks only apply to virtual registers. Physreg liveness
3162 // is too weird to check.
3163 if (Reg.isVirtual()) {
3164 // A live segment can end with either a redefinition, a kill flag on a
3165 // use, or a dead flag on a def.
3166 bool hasRead = false;
3167 bool hasSubRegDef = false;
3168 bool hasDeadDef = false;
3169 for (ConstMIBundleOperands MOI(*MI); MOI.isValid(); ++MOI) {
3170 if (!MOI->isReg() || MOI->getReg() != Reg)
3171 continue;
3172 unsigned Sub = MOI->getSubReg();
3173 LaneBitmask SLM = Sub != 0 ? TRI->getSubRegIndexLaneMask(Sub)
3174 : LaneBitmask::getAll();
3175 if (MOI->isDef()) {
3176 if (Sub != 0) {
3177 hasSubRegDef = true;
3178 // An operand %0:sub0 reads %0:sub1..n. Invert the lane
3179 // mask for subregister defs. Read-undef defs will be handled by
3180 // readsReg below.
3181 SLM = ~SLM;
3182 }
3183 if (MOI->isDead())
3184 hasDeadDef = true;
3185 }
3186 if (LaneMask.any() && (LaneMask & SLM).none())
3187 continue;
3188 if (MOI->readsReg())
3189 hasRead = true;
3190 }
3191 if (S.end.isDead()) {
3192 // Make sure that the corresponding machine operand for a "dead" live
3193 // range has the dead flag. We cannot perform this check for subregister
3194 // liveranges as partially dead values are allowed.
3195 if (LaneMask.none() && !hasDeadDef) {
3196 report("Instruction ending live segment on dead slot has no dead flag",
3197 MI);
3198 report_context(LR, Reg, LaneMask);
3199 report_context(S);
3200 }
3201 } else {
3202 if (!hasRead) {
3203 // When tracking subregister liveness, the main range must start new
3204 // values on partial register writes, even if there is no read.
3205 if (!MRI->shouldTrackSubRegLiveness(Reg) || LaneMask.any() ||
3206 !hasSubRegDef) {
3207 report("Instruction ending live segment doesn't read the register",
3208 MI);
3209 report_context(LR, Reg, LaneMask);
3210 report_context(S);
3211 }
3212 }
3213 }
3214 }
3215
3216 // Now check all the basic blocks in this live segment.
3217 MachineFunction::const_iterator MFI = MBB->getIterator();
3218 // Is this live segment the beginning of a non-PHIDef VN?
3219 if (S.start == VNI->def && !VNI->isPHIDef()) {
3220 // Not live-in to any blocks.
3221 if (MBB == EndMBB)
3222 return;
3223 // Skip this block.
3224 ++MFI;
3225 }
3226
3227 SmallVector<SlotIndex, 4> Undefs;
3228 if (LaneMask.any()) {
3229 LiveInterval &OwnerLI = LiveInts->getInterval(Reg);
3230 OwnerLI.computeSubRangeUndefs(Undefs, LaneMask, *MRI, *Indexes);
3231 }
3232
3233 while (true) {
3234 assert(LiveInts->isLiveInToMBB(LR, &*MFI))(static_cast <bool> (LiveInts->isLiveInToMBB(LR, &
*MFI)) ? void (0) : __assert_fail ("LiveInts->isLiveInToMBB(LR, &*MFI)"
, "llvm/lib/CodeGen/MachineVerifier.cpp", 3234, __extension__
__PRETTY_FUNCTION__))
;
3235 // We don't know how to track physregs into a landing pad.
3236 if (!Reg.isVirtual() && MFI->isEHPad()) {
3237 if (&*MFI == EndMBB)
3238 break;
3239 ++MFI;
3240 continue;
3241 }
3242
3243 // Is VNI a PHI-def in the current block?
3244 bool IsPHI = VNI->isPHIDef() &&
3245 VNI->def == LiveInts->getMBBStartIdx(&*MFI);
3246
3247 // Check that VNI is live-out of all predecessors.
3248 for (const MachineBasicBlock *Pred : MFI->predecessors()) {
3249 SlotIndex PEnd = LiveInts->getMBBEndIdx(Pred);
3250 // Predecessor of landing pad live-out on last call.
3251 if (MFI->isEHPad()) {
3252 for (const MachineInstr &MI : llvm::reverse(*Pred)) {
3253 if (MI.isCall()) {
3254 PEnd = Indexes->getInstructionIndex(MI).getBoundaryIndex();
3255 break;
3256 }
3257 }
3258 }
3259 const VNInfo *PVNI = LR.getVNInfoBefore(PEnd);
3260
3261 // All predecessors must have a live-out value. However for a phi
3262 // instruction with subregister intervals
3263 // only one of the subregisters (not necessarily the current one) needs to
3264 // be defined.
3265 if (!PVNI && (LaneMask.none() || !IsPHI)) {
3266 if (LiveRangeCalc::isJointlyDominated(Pred, Undefs, *Indexes))
3267 continue;
3268 report("Register not marked live out of predecessor", Pred);
3269 report_context(LR, Reg, LaneMask);
3270 report_context(*VNI);
3271 errs() << " live into " << printMBBReference(*MFI) << '@'
3272 << LiveInts->getMBBStartIdx(&*MFI) << ", not live before "
3273 << PEnd << '\n';
3274 continue;
3275 }
3276
3277 // Only PHI-defs can take different predecessor values.
3278 if (!IsPHI && PVNI != VNI) {
3279 report("Different value live out of predecessor", Pred);
3280 report_context(LR, Reg, LaneMask);
3281 errs() << "Valno #" << PVNI->id << " live out of "
3282 << printMBBReference(*Pred) << '@' << PEnd << "\nValno #"
3283 << VNI->id << " live into " << printMBBReference(*MFI) << '@'
3284 << LiveInts->getMBBStartIdx(&*MFI) << '\n';
3285 }
3286 }
3287 if (&*MFI == EndMBB)
3288 break;
3289 ++MFI;
3290 }
3291}
3292
3293void MachineVerifier::verifyLiveRange(const LiveRange &LR, Register Reg,
3294 LaneBitmask LaneMask) {
3295 for (const VNInfo *VNI : LR.valnos)
3296 verifyLiveRangeValue(LR, VNI, Reg, LaneMask);
3297
3298 for (LiveRange::const_iterator I = LR.begin(), E = LR.end(); I != E; ++I)
3299 verifyLiveRangeSegment(LR, I, Reg, LaneMask);
3300}
3301
3302void MachineVerifier::verifyLiveInterval(const LiveInterval &LI) {
3303 Register Reg = LI.reg();
3304 assert(Reg.isVirtual())(static_cast <bool> (Reg.isVirtual()) ? void (0) : __assert_fail
("Reg.isVirtual()", "llvm/lib/CodeGen/MachineVerifier.cpp", 3304
, __extension__ __PRETTY_FUNCTION__))
;
3305 verifyLiveRange(LI, Reg);
3306
3307 LaneBitmask Mask;
3308 LaneBitmask MaxMask = MRI->getMaxLaneMaskForVReg(Reg);
3309 for (const LiveInterval::SubRange &SR : LI.subranges()) {
3310 if ((Mask & SR.LaneMask).any()) {
3311 report("Lane masks of sub ranges overlap in live interval", MF);
3312 report_context(LI);
3313 }
3314 if ((SR.LaneMask & ~MaxMask).any()) {
3315 report("Subrange lanemask is invalid", MF);
3316 report_context(LI);
3317 }
3318 if (SR.empty()) {
3319 report("Subrange must not be empty", MF);
3320 report_context(SR, LI.reg(), SR.LaneMask);
3321 }
3322 Mask |= SR.LaneMask;
3323 verifyLiveRange(SR, LI.reg(), SR.LaneMask);
3324 if (!LI.covers(SR)) {
3325 report("A Subrange is not covered by the main range", MF);
3326 report_context(LI);
3327 }
3328 }
3329
3330 // Check the LI only has one connected component.
3331 ConnectedVNInfoEqClasses ConEQ(*LiveInts);
3332 unsigned NumComp = ConEQ.Classify(LI);
3333 if (NumComp > 1) {
3334 report("Multiple connected components in live interval", MF);
3335 report_context(LI);
3336 for (unsigned comp = 0; comp != NumComp; ++comp) {
3337 errs() << comp << ": valnos";
3338 for (const VNInfo *I : LI.valnos)
3339 if (comp == ConEQ.getEqClass(I))
3340 errs() << ' ' << I->id;
3341 errs() << '\n';
3342 }
3343 }
3344}
3345
3346namespace {
3347
3348 // FrameSetup and FrameDestroy can have zero adjustment, so using a single
3349 // integer, we can't tell whether it is a FrameSetup or FrameDestroy if the
3350 // value is zero.
3351 // We use a bool plus an integer to capture the stack state.
3352 struct StackStateOfBB {
3353 StackStateOfBB() = default;
3354 StackStateOfBB(int EntryVal, int ExitVal, bool EntrySetup, bool ExitSetup) :
3355 EntryValue(EntryVal), ExitValue(ExitVal), EntryIsSetup(EntrySetup),
3356 ExitIsSetup(ExitSetup) {}
3357
3358 // Can be negative, which means we are setting up a frame.
3359 int EntryValue = 0;
3360 int ExitValue = 0;
3361 bool EntryIsSetup = false;
3362 bool ExitIsSetup = false;
3363 };
3364
3365} // end anonymous namespace
3366
3367/// Make sure on every path through the CFG, a FrameSetup <n> is always followed
3368/// by a FrameDestroy <n>, stack adjustments are identical on all
3369/// CFG edges to a merge point, and frame is destroyed at end of a return block.
3370void MachineVerifier::verifyStackFrame() {
3371 unsigned FrameSetupOpcode = TII->getCallFrameSetupOpcode();
3372 unsigned FrameDestroyOpcode = TII->getCallFrameDestroyOpcode();
3373 if (FrameSetupOpcode == ~0u && FrameDestroyOpcode == ~0u)
3374 return;
3375
3376 SmallVector<StackStateOfBB, 8> SPState;
3377 SPState.resize(MF->getNumBlockIDs());
3378 df_iterator_default_set<const MachineBasicBlock*> Reachable;
3379
3380 // Visit the MBBs in DFS order.
3381 for (df_ext_iterator<const MachineFunction *,
3382 df_iterator_default_set<const MachineBasicBlock *>>
3383 DFI = df_ext_begin(MF, Reachable), DFE = df_ext_end(MF, Reachable);
3384 DFI != DFE; ++DFI) {
3385 const MachineBasicBlock *MBB = *DFI;
3386
3387 StackStateOfBB BBState;
3388 // Check the exit state of the DFS stack predecessor.
3389 if (DFI.getPathLength() >= 2) {
3390 const MachineBasicBlock *StackPred = DFI.getPath(DFI.getPathLength() - 2);
3391 assert(Reachable.count(StackPred) &&(static_cast <bool> (Reachable.count(StackPred) &&
"DFS stack predecessor is already visited.\n") ? void (0) : __assert_fail
("Reachable.count(StackPred) && \"DFS stack predecessor is already visited.\\n\""
, "llvm/lib/CodeGen/MachineVerifier.cpp", 3392, __extension__
__PRETTY_FUNCTION__))
3392 "DFS stack predecessor is already visited.\n")(static_cast <bool> (Reachable.count(StackPred) &&
"DFS stack predecessor is already visited.\n") ? void (0) : __assert_fail
("Reachable.count(StackPred) && \"DFS stack predecessor is already visited.\\n\""
, "llvm/lib/CodeGen/MachineVerifier.cpp", 3392, __extension__
__PRETTY_FUNCTION__))
;
3393 BBState.EntryValue = SPState[StackPred->getNumber()].ExitValue;
3394 BBState.EntryIsSetup = SPState[StackPred->getNumber()].ExitIsSetup;
3395 BBState.ExitValue = BBState.EntryValue;
3396 BBState.ExitIsSetup = BBState.EntryIsSetup;
3397 }
3398
3399 // Update stack state by checking contents of MBB.
3400 for (const auto &I : *MBB) {
3401 if (I.getOpcode() == FrameSetupOpcode) {
3402 if (BBState.ExitIsSetup)
3403 report("FrameSetup is after another FrameSetup", &I);
3404 BBState.ExitValue -= TII->getFrameTotalSize(I);
3405 BBState.ExitIsSetup = true;
3406 }
3407
3408 if (I.getOpcode() == FrameDestroyOpcode) {
3409 int Size = TII->getFrameTotalSize(I);
3410 if (!BBState.ExitIsSetup)
3411 report("FrameDestroy is not after a FrameSetup", &I);
3412 int AbsSPAdj = BBState.ExitValue < 0 ? -BBState.ExitValue :
3413 BBState.ExitValue;
3414 if (BBState.ExitIsSetup && AbsSPAdj != Size) {
3415 report("FrameDestroy <n> is after FrameSetup <m>", &I);
3416 errs() << "FrameDestroy <" << Size << "> is after FrameSetup <"
3417 << AbsSPAdj << ">.\n";
3418 }
3419 BBState.ExitValue += Size;
3420 BBState.ExitIsSetup = false;
3421 }
3422 }
3423 SPState[MBB->getNumber()] = BBState;
3424
3425 // Make sure the exit state of any predecessor is consistent with the entry
3426 // state.
3427 for (const MachineBasicBlock *Pred : MBB->predecessors()) {
3428 if (Reachable.count(Pred) &&
3429 (SPState[Pred->getNumber()].ExitValue != BBState.EntryValue ||
3430 SPState[Pred->getNumber()].ExitIsSetup != BBState.EntryIsSetup)) {
3431 report("The exit stack state of a predecessor is inconsistent.", MBB);
3432 errs() << "Predecessor " << printMBBReference(*Pred)
3433 << " has exit state (" << SPState[Pred->getNumber()].ExitValue
3434 << ", " << SPState[Pred->getNumber()].ExitIsSetup << "), while "
3435 << printMBBReference(*MBB) << " has entry state ("
3436 << BBState.EntryValue << ", " << BBState.EntryIsSetup << ").\n";
3437 }
3438 }
3439
3440 // Make sure the entry state of any successor is consistent with the exit
3441 // state.
3442 for (const MachineBasicBlock *Succ : MBB->successors()) {
3443 if (Reachable.count(Succ) &&
3444 (SPState[Succ->getNumber()].EntryValue != BBState.ExitValue ||
3445 SPState[Succ->getNumber()].EntryIsSetup != BBState.ExitIsSetup)) {
3446 report("The entry stack state of a successor is inconsistent.", MBB);
3447 errs() << "Successor " << printMBBReference(*Succ)
3448 << " has entry state (" << SPState[Succ->getNumber()].EntryValue
3449 << ", " << SPState[Succ->getNumber()].EntryIsSetup << "), while "
3450 << printMBBReference(*MBB) << " has exit state ("
3451 << BBState.ExitValue << ", " << BBState.ExitIsSetup << ").\n";
3452 }
3453 }
3454
3455 // Make sure a basic block with return ends with zero stack adjustment.
3456 if (!MBB->empty() && MBB->back().isReturn()) {
3457 if (BBState.ExitIsSetup)
3458 report("A return block ends with a FrameSetup.", MBB);
3459 if (BBState.ExitValue)
3460 report("A return block ends with a nonzero stack adjustment.", MBB);
3461 }
3462 }
3463}