LLVM 23.0.0git
Verifier.cpp
Go to the documentation of this file.
1//===-- Verifier.cpp - Implement the Module Verifier -----------------------==//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file defines the function verifier interface, that can be used for some
10// basic correctness checking of input to the system.
11//
12// Note that this does not provide full `Java style' security and verifications,
13// instead it just tries to ensure that code is well-formed.
14//
15// * Both of a binary operator's parameters are of the same type
16// * Verify that the indices of mem access instructions match other operands
17// * Verify that arithmetic and other things are only performed on first-class
18// types. Verify that shifts & logicals only happen on integrals f.e.
19// * All of the constants in a switch statement are of the correct type
20// * The code is in valid SSA form
21// * It should be illegal to put a label into any other type (like a structure)
22// or to return one. [except constant arrays!]
23// * Only phi nodes can be self referential: 'add i32 %0, %0 ; <int>:0' is bad
24// * PHI nodes must have an entry for each predecessor, with no extras.
25// * PHI nodes must be the first thing in a basic block, all grouped together
26// * All basic blocks should only end with terminator insts, not contain them
27// * The entry node to a function must not have predecessors
28// * All Instructions must be embedded into a basic block
29// * Functions cannot take a void-typed parameter
30// * Verify that a function's argument list agrees with it's declared type.
31// * It is illegal to specify a name for a void value.
32// * It is illegal to have a internal global value with no initializer
33// * It is illegal to have a ret instruction that returns a value that does not
34// agree with the function return value type.
35// * Function call argument types match the function prototype
36// * A landing pad is defined by a landingpad instruction, and can be jumped to
37// only by the unwind edge of an invoke instruction.
38// * A landingpad instruction must be the first non-PHI instruction in the
39// block.
40// * Landingpad instructions must be in a function with a personality function.
41// * Convergence control intrinsics are introduced in ConvergentOperations.rst.
42// The applied restrictions are too numerous to list here.
43// * The convergence entry intrinsic and the loop heart must be the first
44// non-PHI instruction in their respective block. This does not conflict with
45// the landing pads, since these two kinds cannot occur in the same block.
46// * All other things that are tested by asserts spread about the code...
47//
48//===----------------------------------------------------------------------===//
49
50#include "llvm/IR/Verifier.h"
51#include "llvm/ADT/APFloat.h"
52#include "llvm/ADT/APInt.h"
53#include "llvm/ADT/ArrayRef.h"
54#include "llvm/ADT/DenseMap.h"
55#include "llvm/ADT/MapVector.h"
56#include "llvm/ADT/STLExtras.h"
60#include "llvm/ADT/StringRef.h"
61#include "llvm/ADT/Twine.h"
63#include "llvm/IR/Argument.h"
65#include "llvm/IR/Attributes.h"
66#include "llvm/IR/BasicBlock.h"
67#include "llvm/IR/CFG.h"
68#include "llvm/IR/CallingConv.h"
69#include "llvm/IR/Comdat.h"
70#include "llvm/IR/Constant.h"
73#include "llvm/IR/Constants.h"
75#include "llvm/IR/DataLayout.h"
76#include "llvm/IR/DebugInfo.h"
78#include "llvm/IR/DebugLoc.h"
80#include "llvm/IR/Dominators.h"
82#include "llvm/IR/FPEnv.h"
83#include "llvm/IR/Function.h"
84#include "llvm/IR/GCStrategy.h"
86#include "llvm/IR/GlobalAlias.h"
87#include "llvm/IR/GlobalValue.h"
89#include "llvm/IR/InlineAsm.h"
90#include "llvm/IR/InstVisitor.h"
91#include "llvm/IR/InstrTypes.h"
92#include "llvm/IR/Instruction.h"
95#include "llvm/IR/Intrinsics.h"
96#include "llvm/IR/IntrinsicsAArch64.h"
97#include "llvm/IR/IntrinsicsAMDGPU.h"
98#include "llvm/IR/IntrinsicsARM.h"
99#include "llvm/IR/IntrinsicsNVPTX.h"
100#include "llvm/IR/IntrinsicsWebAssembly.h"
101#include "llvm/IR/LLVMContext.h"
103#include "llvm/IR/Metadata.h"
104#include "llvm/IR/Module.h"
106#include "llvm/IR/PassManager.h"
108#include "llvm/IR/Statepoint.h"
109#include "llvm/IR/Type.h"
110#include "llvm/IR/Use.h"
111#include "llvm/IR/User.h"
113#include "llvm/IR/Value.h"
115#include "llvm/Pass.h"
119#include "llvm/Support/Casting.h"
123#include "llvm/Support/ModRef.h"
126#include <algorithm>
127#include <cassert>
128#include <cstdint>
129#include <memory>
130#include <optional>
131#include <string>
132#include <utility>
133
134using namespace llvm;
135
137 "verify-noalias-scope-decl-dom", cl::Hidden, cl::init(false),
138 cl::desc("Ensure that llvm.experimental.noalias.scope.decl for identical "
139 "scopes are not dominating"));
140
143 const Module &M;
145 const Triple &TT;
148
149 /// Track the brokenness of the module while recursively visiting.
150 bool Broken = false;
151 /// Broken debug info can be "recovered" from by stripping the debug info.
152 bool BrokenDebugInfo = false;
153 /// Whether to treat broken debug info as an error.
155
157 : OS(OS), M(M), MST(&M), TT(M.getTargetTriple()), DL(M.getDataLayout()),
158 Context(M.getContext()) {}
159
160private:
161 void Write(const Module *M) {
162 *OS << "; ModuleID = '" << M->getModuleIdentifier() << "'\n";
163 }
164
165 void Write(const Value *V) {
166 if (V)
167 Write(*V);
168 }
169
170 void Write(const Value &V) {
171 if (isa<Instruction>(V)) {
172 V.print(*OS, MST);
173 *OS << '\n';
174 } else {
175 V.printAsOperand(*OS, true, MST);
176 *OS << '\n';
177 }
178 }
179
180 void Write(const DbgRecord *DR) {
181 if (DR) {
182 DR->print(*OS, MST, false);
183 *OS << '\n';
184 }
185 }
186
188 switch (Type) {
190 *OS << "value";
191 break;
193 *OS << "declare";
194 break;
196 *OS << "declare_value";
197 break;
199 *OS << "assign";
200 break;
202 *OS << "end";
203 break;
205 *OS << "any";
206 break;
207 };
208 }
209
210 void Write(const Metadata *MD) {
211 if (!MD)
212 return;
213 MD->print(*OS, MST, &M);
214 *OS << '\n';
215 }
216
217 template <class T> void Write(const MDTupleTypedArrayWrapper<T> &MD) {
218 Write(MD.get());
219 }
220
221 void Write(const NamedMDNode *NMD) {
222 if (!NMD)
223 return;
224 NMD->print(*OS, MST);
225 *OS << '\n';
226 }
227
228 void Write(Type *T) {
229 if (!T)
230 return;
231 *OS << ' ' << *T;
232 }
233
234 void Write(const Comdat *C) {
235 if (!C)
236 return;
237 *OS << *C;
238 }
239
240 void Write(const APInt *AI) {
241 if (!AI)
242 return;
243 *OS << *AI << '\n';
244 }
245
246 void Write(const unsigned i) { *OS << i << '\n'; }
247
248 // NOLINTNEXTLINE(readability-identifier-naming)
249 void Write(const Attribute *A) {
250 if (!A)
251 return;
252 *OS << A->getAsString() << '\n';
253 }
254
255 // NOLINTNEXTLINE(readability-identifier-naming)
256 void Write(const AttributeSet *AS) {
257 if (!AS)
258 return;
259 *OS << AS->getAsString() << '\n';
260 }
261
262 // NOLINTNEXTLINE(readability-identifier-naming)
263 void Write(const AttributeList *AL) {
264 if (!AL)
265 return;
266 AL->print(*OS);
267 }
268
269 void Write(Printable P) { *OS << P << '\n'; }
270
271 template <typename T> void Write(ArrayRef<T> Vs) {
272 for (const T &V : Vs)
273 Write(V);
274 }
275
276 template <typename T1, typename... Ts>
277 void WriteTs(const T1 &V1, const Ts &... Vs) {
278 Write(V1);
279 WriteTs(Vs...);
280 }
281
282 template <typename... Ts> void WriteTs() {}
283
284public:
285 /// A check failed, so printout out the condition and the message.
286 ///
287 /// This provides a nice place to put a breakpoint if you want to see why
288 /// something is not correct.
289 void CheckFailed(const Twine &Message) {
290 if (OS)
291 *OS << Message << '\n';
292 Broken = true;
293 }
294
295 /// A check failed (with values to print).
296 ///
297 /// This calls the Message-only version so that the above is easier to set a
298 /// breakpoint on.
299 template <typename T1, typename... Ts>
300 void CheckFailed(const Twine &Message, const T1 &V1, const Ts &... Vs) {
301 CheckFailed(Message);
302 if (OS)
303 WriteTs(V1, Vs...);
304 }
305
306 /// A debug info check failed.
307 void DebugInfoCheckFailed(const Twine &Message) {
308 if (OS)
309 *OS << Message << '\n';
311 BrokenDebugInfo = true;
312 }
313
314 /// A debug info check failed (with values to print).
315 template <typename T1, typename... Ts>
316 void DebugInfoCheckFailed(const Twine &Message, const T1 &V1,
317 const Ts &... Vs) {
318 DebugInfoCheckFailed(Message);
319 if (OS)
320 WriteTs(V1, Vs...);
321 }
322};
323
324namespace {
325
326class Verifier : public InstVisitor<Verifier>, VerifierSupport {
327 friend class InstVisitor<Verifier>;
328 DominatorTree DT;
329
330 /// When verifying a basic block, keep track of all of the
331 /// instructions we have seen so far.
332 ///
333 /// This allows us to do efficient dominance checks for the case when an
334 /// instruction has an operand that is an instruction in the same block.
335 SmallPtrSet<Instruction *, 16> InstsInThisBlock;
336
337 /// Keep track of the metadata nodes that have been checked already.
339
340 /// Keep track which DISubprogram is attached to which function.
342
343 /// Track all DICompileUnits visited.
345
346 /// The result type for a landingpad.
347 Type *LandingPadResultTy;
348
349 /// Whether we've seen a call to @llvm.localescape in this function
350 /// already.
351 bool SawFrameEscape;
352
353 /// Whether the current function has a DISubprogram attached to it.
354 bool HasDebugInfo = false;
355
356 /// Stores the count of how many objects were passed to llvm.localescape for a
357 /// given function and the largest index passed to llvm.localrecover.
359
360 // Maps catchswitches and cleanuppads that unwind to siblings to the
361 // terminators that indicate the unwind, used to detect cycles therein.
363
364 /// Cache which blocks are in which funclet, if an EH funclet personality is
365 /// in use. Otherwise empty.
366 DenseMap<BasicBlock *, ColorVector> BlockEHFuncletColors;
367
368 /// Cache of constants visited in search of ConstantExprs.
369 SmallPtrSet<const Constant *, 32> ConstantExprVisited;
370
371 /// Cache of declarations of the llvm.experimental.deoptimize.<ty> intrinsic.
372 SmallVector<const Function *, 4> DeoptimizeDeclarations;
373
374 /// Cache of attribute lists verified.
375 SmallPtrSet<const void *, 32> AttributeListsVisited;
376
377 // Verify that this GlobalValue is only used in this module.
378 // This map is used to avoid visiting uses twice. We can arrive at a user
379 // twice, if they have multiple operands. In particular for very large
380 // constant expressions, we can arrive at a particular user many times.
381 SmallPtrSet<const Value *, 32> GlobalValueVisited;
382
383 // Keeps track of duplicate function argument debug info.
385
386 TBAAVerifier TBAAVerifyHelper;
387 ConvergenceVerifier ConvergenceVerifyHelper;
388
389 SmallVector<IntrinsicInst *, 4> NoAliasScopeDecls;
390
391 void checkAtomicMemAccessSize(Type *Ty, const Instruction *I);
392
393public:
394 explicit Verifier(raw_ostream *OS, bool ShouldTreatBrokenDebugInfoAsError,
395 const Module &M)
396 : VerifierSupport(OS, M), LandingPadResultTy(nullptr),
397 SawFrameEscape(false), TBAAVerifyHelper(this) {
398 TreatBrokenDebugInfoAsError = ShouldTreatBrokenDebugInfoAsError;
399 }
400
401 bool hasBrokenDebugInfo() const { return BrokenDebugInfo; }
402
403 bool verify(const Function &F) {
404 llvm::TimeTraceScope timeScope("Verifier");
405 assert(F.getParent() == &M &&
406 "An instance of this class only works with a specific module!");
407
408 // First ensure the function is well-enough formed to compute dominance
409 // information, and directly compute a dominance tree. We don't rely on the
410 // pass manager to provide this as it isolates us from a potentially
411 // out-of-date dominator tree and makes it significantly more complex to run
412 // this code outside of a pass manager.
413
414 // First check that every basic block has a terminator, otherwise we can't
415 // even inspect the CFG.
416 for (const BasicBlock &BB : F) {
417 if (!BB.empty() && BB.back().isTerminator())
418 continue;
419
420 if (OS) {
421 *OS << "Basic Block in function '" << F.getName()
422 << "' does not have terminator!\n";
423 BB.printAsOperand(*OS, true, MST);
424 *OS << "\n";
425 }
426 return false;
427 }
428
429 // FIXME: It's really gross that we have to cast away constness here.
430 if (!F.empty())
431 DT.recalculate(const_cast<Function &>(F));
432
433 auto FailureCB = [this](const Twine &Message) {
434 this->CheckFailed(Message);
435 };
436 ConvergenceVerifyHelper.initialize(OS, FailureCB, F);
437
438 Broken = false;
439 // FIXME: We strip const here because the inst visitor strips const.
440 visit(const_cast<Function &>(F));
441 verifySiblingFuncletUnwinds();
442
443 if (ConvergenceVerifyHelper.sawTokens())
444 ConvergenceVerifyHelper.verify(DT);
445
446 InstsInThisBlock.clear();
447 DebugFnArgs.clear();
448 LandingPadResultTy = nullptr;
449 SawFrameEscape = false;
450 SiblingFuncletInfo.clear();
451 verifyNoAliasScopeDecl();
452 NoAliasScopeDecls.clear();
453
454 return !Broken;
455 }
456
457 /// Verify the module that this instance of \c Verifier was initialized with.
458 bool verify() {
459 Broken = false;
460
461 // Collect all declarations of the llvm.experimental.deoptimize intrinsic.
462 for (const Function &F : M)
463 if (F.getIntrinsicID() == Intrinsic::experimental_deoptimize)
464 DeoptimizeDeclarations.push_back(&F);
465
466 // Now that we've visited every function, verify that we never asked to
467 // recover a frame index that wasn't escaped.
468 verifyFrameRecoverIndices();
469 for (const GlobalVariable &GV : M.globals())
470 visitGlobalVariable(GV);
471
472 for (const GlobalAlias &GA : M.aliases())
473 visitGlobalAlias(GA);
474
475 for (const GlobalIFunc &GI : M.ifuncs())
476 visitGlobalIFunc(GI);
477
478 for (const NamedMDNode &NMD : M.named_metadata())
479 visitNamedMDNode(NMD);
480
481 for (const StringMapEntry<Comdat> &SMEC : M.getComdatSymbolTable())
482 visitComdat(SMEC.getValue());
483
484 visitModuleFlags();
485 visitModuleIdents();
486 visitModuleCommandLines();
487 visitModuleErrnoTBAA();
488
489 verifyCompileUnits();
490
491 verifyDeoptimizeCallingConvs();
492 DISubprogramAttachments.clear();
493 return !Broken;
494 }
495
496private:
497 /// Whether a metadata node is allowed to be, or contain, a DILocation.
498 enum class AreDebugLocsAllowed { No, Yes };
499
500 /// Metadata that should be treated as a range, with slightly different
501 /// requirements.
502 enum class RangeLikeMetadataKind {
503 Range, // MD_range
504 AbsoluteSymbol, // MD_absolute_symbol
505 NoaliasAddrspace // MD_noalias_addrspace
506 };
507
508 // Verification methods...
509 void visitGlobalValue(const GlobalValue &GV);
510 void visitGlobalVariable(const GlobalVariable &GV);
511 void visitGlobalAlias(const GlobalAlias &GA);
512 void visitGlobalIFunc(const GlobalIFunc &GI);
513 void visitAliaseeSubExpr(const GlobalAlias &A, const Constant &C);
514 void visitAliaseeSubExpr(SmallPtrSetImpl<const GlobalAlias *> &Visited,
515 const GlobalAlias &A, const Constant &C);
516 void visitNamedMDNode(const NamedMDNode &NMD);
517 void visitMDNode(const MDNode &MD, AreDebugLocsAllowed AllowLocs);
518 void visitMetadataAsValue(const MetadataAsValue &MD, Function *F);
519 void visitValueAsMetadata(const ValueAsMetadata &MD, Function *F);
520 void visitDIArgList(const DIArgList &AL, Function *F);
521 void visitComdat(const Comdat &C);
522 void visitModuleIdents();
523 void visitModuleCommandLines();
524 void visitModuleErrnoTBAA();
525 void visitModuleFlags();
526 void visitModuleFlag(const MDNode *Op,
527 DenseMap<const MDString *, const MDNode *> &SeenIDs,
528 SmallVectorImpl<const MDNode *> &Requirements);
529 void visitModuleFlagCGProfileEntry(const MDOperand &MDO);
530 void visitFunction(const Function &F);
531 void visitBasicBlock(BasicBlock &BB);
532 void verifyRangeLikeMetadata(const Value &V, const MDNode *Range, Type *Ty,
533 RangeLikeMetadataKind Kind);
534 void visitRangeMetadata(Instruction &I, MDNode *Range, Type *Ty);
535 void visitNoFPClassMetadata(Instruction &I, MDNode *Range, Type *Ty);
536 void visitNoaliasAddrspaceMetadata(Instruction &I, MDNode *Range, Type *Ty);
537 void visitDereferenceableMetadata(Instruction &I, MDNode *MD);
538 void visitNofreeMetadata(Instruction &I, MDNode *MD);
539 void visitProfMetadata(Instruction &I, MDNode *MD);
540 void visitCallStackMetadata(MDNode *MD);
541 void visitMemProfMetadata(Instruction &I, MDNode *MD);
542 void visitCallsiteMetadata(Instruction &I, MDNode *MD);
543 void visitCalleeTypeMetadata(Instruction &I, MDNode *MD);
544 void visitDIAssignIDMetadata(Instruction &I, MDNode *MD);
545 void visitMMRAMetadata(Instruction &I, MDNode *MD);
546 void visitAnnotationMetadata(MDNode *Annotation);
547 void visitAliasScopeMetadata(const MDNode *MD);
548 void visitAliasScopeListMetadata(const MDNode *MD);
549 void visitAccessGroupMetadata(const MDNode *MD);
550 void visitCapturesMetadata(Instruction &I, const MDNode *Captures);
551 void visitAllocTokenMetadata(Instruction &I, MDNode *MD);
552
553 template <class Ty> bool isValidMetadataArray(const MDTuple &N);
554#define HANDLE_SPECIALIZED_MDNODE_LEAF(CLASS) void visit##CLASS(const CLASS &N);
555#include "llvm/IR/Metadata.def"
556 void visitDIScope(const DIScope &N);
557 void visitDIVariable(const DIVariable &N);
558 void visitDILexicalBlockBase(const DILexicalBlockBase &N);
559 void visitDITemplateParameter(const DITemplateParameter &N);
560
561 void visitTemplateParams(const MDNode &N, const Metadata &RawParams);
562
563 void visit(DbgLabelRecord &DLR);
564 void visit(DbgVariableRecord &DVR);
565 // InstVisitor overrides...
566 using InstVisitor<Verifier>::visit;
567 void visitDbgRecords(Instruction &I);
568 void visit(Instruction &I);
569
570 void visitTruncInst(TruncInst &I);
571 void visitZExtInst(ZExtInst &I);
572 void visitSExtInst(SExtInst &I);
573 void visitFPTruncInst(FPTruncInst &I);
574 void visitFPExtInst(FPExtInst &I);
575 void visitFPToUIInst(FPToUIInst &I);
576 void visitFPToSIInst(FPToSIInst &I);
577 void visitUIToFPInst(UIToFPInst &I);
578 void visitSIToFPInst(SIToFPInst &I);
579 void visitIntToPtrInst(IntToPtrInst &I);
580 void checkPtrToAddr(Type *SrcTy, Type *DestTy, const Value &V);
581 void visitPtrToAddrInst(PtrToAddrInst &I);
582 void visitPtrToIntInst(PtrToIntInst &I);
583 void visitBitCastInst(BitCastInst &I);
584 void visitAddrSpaceCastInst(AddrSpaceCastInst &I);
585 void visitPHINode(PHINode &PN);
586 void visitCallBase(CallBase &Call);
587 void visitUnaryOperator(UnaryOperator &U);
588 void visitBinaryOperator(BinaryOperator &B);
589 void visitICmpInst(ICmpInst &IC);
590 void visitFCmpInst(FCmpInst &FC);
591 void visitExtractElementInst(ExtractElementInst &EI);
592 void visitInsertElementInst(InsertElementInst &EI);
593 void visitShuffleVectorInst(ShuffleVectorInst &EI);
594 void visitVAArgInst(VAArgInst &VAA) { visitInstruction(VAA); }
595 void visitCallInst(CallInst &CI);
596 void visitInvokeInst(InvokeInst &II);
597 void visitGetElementPtrInst(GetElementPtrInst &GEP);
598 void visitLoadInst(LoadInst &LI);
599 void visitStoreInst(StoreInst &SI);
600 void verifyDominatesUse(Instruction &I, unsigned i);
601 void visitInstruction(Instruction &I);
602 void visitTerminator(Instruction &I);
603 void visitCondBrInst(CondBrInst &BI);
604 void visitReturnInst(ReturnInst &RI);
605 void visitSwitchInst(SwitchInst &SI);
606 void visitIndirectBrInst(IndirectBrInst &BI);
607 void visitCallBrInst(CallBrInst &CBI);
608 void visitSelectInst(SelectInst &SI);
609 void visitUserOp1(Instruction &I);
610 void visitUserOp2(Instruction &I) { visitUserOp1(I); }
611 void visitIntrinsicCall(Intrinsic::ID ID, CallBase &Call);
612 void visitConstrainedFPIntrinsic(ConstrainedFPIntrinsic &FPI);
613 void visitVPIntrinsic(VPIntrinsic &VPI);
614 void visitDbgLabelIntrinsic(StringRef Kind, DbgLabelInst &DLI);
615 void visitAtomicCmpXchgInst(AtomicCmpXchgInst &CXI);
616 void visitAtomicRMWInst(AtomicRMWInst &RMWI);
617 void visitFenceInst(FenceInst &FI);
618 void visitAllocaInst(AllocaInst &AI);
619 void visitExtractValueInst(ExtractValueInst &EVI);
620 void visitInsertValueInst(InsertValueInst &IVI);
621 void visitEHPadPredecessors(Instruction &I);
622 void visitLandingPadInst(LandingPadInst &LPI);
623 void visitResumeInst(ResumeInst &RI);
624 void visitCatchPadInst(CatchPadInst &CPI);
625 void visitCatchReturnInst(CatchReturnInst &CatchReturn);
626 void visitCleanupPadInst(CleanupPadInst &CPI);
627 void visitFuncletPadInst(FuncletPadInst &FPI);
628 void visitCatchSwitchInst(CatchSwitchInst &CatchSwitch);
629 void visitCleanupReturnInst(CleanupReturnInst &CRI);
630
631 void verifySwiftErrorCall(CallBase &Call, const Value *SwiftErrorVal);
632 void verifySwiftErrorValue(const Value *SwiftErrorVal);
633 void verifyTailCCMustTailAttrs(const AttrBuilder &Attrs, StringRef Context);
634 void verifyMustTailCall(CallInst &CI);
635 bool verifyAttributeCount(AttributeList Attrs, unsigned Params);
636 void verifyAttributeTypes(AttributeSet Attrs, const Value *V);
637 void verifyParameterAttrs(AttributeSet Attrs, Type *Ty, const Value *V);
638 void checkUnsignedBaseTenFuncAttr(AttributeList Attrs, StringRef Attr,
639 const Value *V);
640 void verifyFunctionAttrs(FunctionType *FT, AttributeList Attrs,
641 const Value *V, bool IsIntrinsic, bool IsInlineAsm);
642 void verifyFunctionMetadata(ArrayRef<std::pair<unsigned, MDNode *>> MDs);
643 void verifyUnknownProfileMetadata(MDNode *MD);
644 void visitConstantExprsRecursively(const Constant *EntryC);
645 void visitConstantExpr(const ConstantExpr *CE);
646 void visitConstantPtrAuth(const ConstantPtrAuth *CPA);
647 void verifyInlineAsmCall(const CallBase &Call);
648 void verifyStatepoint(const CallBase &Call);
649 void verifyFrameRecoverIndices();
650 void verifySiblingFuncletUnwinds();
651
652 void verifyFragmentExpression(const DbgVariableRecord &I);
653 template <typename ValueOrMetadata>
654 void verifyFragmentExpression(const DIVariable &V,
656 ValueOrMetadata *Desc);
657 void verifyFnArgs(const DbgVariableRecord &DVR);
658 void verifyNotEntryValue(const DbgVariableRecord &I);
659
660 /// Module-level debug info verification...
661 void verifyCompileUnits();
662
663 /// Module-level verification that all @llvm.experimental.deoptimize
664 /// declarations share the same calling convention.
665 void verifyDeoptimizeCallingConvs();
666
667 void verifyAttachedCallBundle(const CallBase &Call,
668 const OperandBundleUse &BU);
669
670 /// Verify the llvm.experimental.noalias.scope.decl declarations
671 void verifyNoAliasScopeDecl();
672};
673
674} // end anonymous namespace
675
676/// We know that cond should be true, if not print an error message.
677#define Check(C, ...) \
678 do { \
679 if (!(C)) { \
680 CheckFailed(__VA_ARGS__); \
681 return; \
682 } \
683 } while (false)
684
685/// We know that a debug info condition should be true, if not print
686/// an error message.
687#define CheckDI(C, ...) \
688 do { \
689 if (!(C)) { \
690 DebugInfoCheckFailed(__VA_ARGS__); \
691 return; \
692 } \
693 } while (false)
694
695void Verifier::visitDbgRecords(Instruction &I) {
696 if (!I.DebugMarker)
697 return;
698 CheckDI(I.DebugMarker->MarkedInstr == &I,
699 "Instruction has invalid DebugMarker", &I);
700 CheckDI(!isa<PHINode>(&I) || !I.hasDbgRecords(),
701 "PHI Node must not have any attached DbgRecords", &I);
702 for (DbgRecord &DR : I.getDbgRecordRange()) {
703 CheckDI(DR.getMarker() == I.DebugMarker,
704 "DbgRecord had invalid DebugMarker", &I, &DR);
705 if (auto *Loc =
707 visitMDNode(*Loc, AreDebugLocsAllowed::Yes);
708 if (auto *DVR = dyn_cast<DbgVariableRecord>(&DR)) {
709 visit(*DVR);
710 // These have to appear after `visit` for consistency with existing
711 // intrinsic behaviour.
712 verifyFragmentExpression(*DVR);
713 verifyNotEntryValue(*DVR);
714 } else if (auto *DLR = dyn_cast<DbgLabelRecord>(&DR)) {
715 visit(*DLR);
716 }
717 }
718}
719
720void Verifier::visit(Instruction &I) {
721 visitDbgRecords(I);
722 for (unsigned i = 0, e = I.getNumOperands(); i != e; ++i)
723 Check(I.getOperand(i) != nullptr, "Operand is null", &I);
725}
726
727// Helper to iterate over indirect users. By returning false, the callback can ask to stop traversing further.
728static void forEachUser(const Value *User,
730 llvm::function_ref<bool(const Value *)> Callback) {
731 if (!Visited.insert(User).second)
732 return;
733
735 while (!WorkList.empty()) {
736 const Value *Cur = WorkList.pop_back_val();
737 if (!Visited.insert(Cur).second)
738 continue;
739 if (Callback(Cur))
740 append_range(WorkList, Cur->materialized_users());
741 }
742}
743
744void Verifier::visitGlobalValue(const GlobalValue &GV) {
746 "Global is external, but doesn't have external or weak linkage!", &GV);
747
748 if (const GlobalObject *GO = dyn_cast<GlobalObject>(&GV)) {
749 if (const MDNode *Associated =
750 GO->getMetadata(LLVMContext::MD_associated)) {
751 Check(Associated->getNumOperands() == 1,
752 "associated metadata must have one operand", &GV, Associated);
753 const Metadata *Op = Associated->getOperand(0).get();
754 Check(Op, "associated metadata must have a global value", GO, Associated);
755
756 const auto *VM = dyn_cast_or_null<ValueAsMetadata>(Op);
757 Check(VM, "associated metadata must be ValueAsMetadata", GO, Associated);
758 if (VM) {
759 Check(isa<PointerType>(VM->getValue()->getType()),
760 "associated value must be pointer typed", GV, Associated);
761
762 const Value *Stripped = VM->getValue()->stripPointerCastsAndAliases();
763 Check(isa<GlobalObject>(Stripped) || isa<Constant>(Stripped),
764 "associated metadata must point to a GlobalObject", GO, Stripped);
765 Check(Stripped != GO,
766 "global values should not associate to themselves", GO,
767 Associated);
768 }
769 }
770
771 // FIXME: Why is getMetadata on GlobalValue protected?
772 if (const MDNode *AbsoluteSymbol =
773 GO->getMetadata(LLVMContext::MD_absolute_symbol)) {
774 verifyRangeLikeMetadata(*GO, AbsoluteSymbol,
775 DL.getIntPtrType(GO->getType()),
776 RangeLikeMetadataKind::AbsoluteSymbol);
777 }
778
779 if (GO->hasMetadata(LLVMContext::MD_implicit_ref)) {
780 Check(!GO->isDeclaration(),
781 "ref metadata must not be placed on a declaration", GO);
782
784 GO->getMetadata(LLVMContext::MD_implicit_ref, MDs);
785 for (const MDNode *MD : MDs) {
786 Check(MD->getNumOperands() == 1, "ref metadata must have one operand",
787 &GV, MD);
788 const Metadata *Op = MD->getOperand(0).get();
789 const auto *VM = dyn_cast_or_null<ValueAsMetadata>(Op);
790 Check(VM, "ref metadata must be ValueAsMetadata", GO, MD);
791 if (VM) {
792 Check(isa<PointerType>(VM->getValue()->getType()),
793 "ref value must be pointer typed", GV, MD);
794
795 const Value *Stripped = VM->getValue()->stripPointerCastsAndAliases();
796 Check(isa<GlobalObject>(Stripped) || isa<Constant>(Stripped),
797 "ref metadata must point to a GlobalObject", GO, Stripped);
798 Check(Stripped != GO, "values should not reference themselves", GO,
799 MD);
800 }
801 }
802 }
803 }
804
806 "Only global variables can have appending linkage!", &GV);
807
808 if (GV.hasAppendingLinkage()) {
809 const GlobalVariable *GVar = dyn_cast<GlobalVariable>(&GV);
810 Check(GVar && GVar->getValueType()->isArrayTy(),
811 "Only global arrays can have appending linkage!", GVar);
812 }
813
814 if (GV.isDeclarationForLinker())
815 Check(!GV.hasComdat(), "Declaration may not be in a Comdat!", &GV);
816
817 if (GV.hasDLLExportStorageClass()) {
819 "dllexport GlobalValue must have default or protected visibility",
820 &GV);
821 }
822 if (GV.hasDLLImportStorageClass()) {
824 "dllimport GlobalValue must have default visibility", &GV);
825 Check(!GV.isDSOLocal(), "GlobalValue with DLLImport Storage is dso_local!",
826 &GV);
827
828 Check((GV.isDeclaration() &&
831 "Global is marked as dllimport, but not external", &GV);
832 }
833
834 if (GV.isImplicitDSOLocal())
835 Check(GV.isDSOLocal(),
836 "GlobalValue with local linkage or non-default "
837 "visibility must be dso_local!",
838 &GV);
839
840 forEachUser(&GV, GlobalValueVisited, [&](const Value *V) -> bool {
841 if (const Instruction *I = dyn_cast<Instruction>(V)) {
842 if (!I->getParent() || !I->getParent()->getParent())
843 CheckFailed("Global is referenced by parentless instruction!", &GV, &M,
844 I);
845 else if (I->getParent()->getParent()->getParent() != &M)
846 CheckFailed("Global is referenced in a different module!", &GV, &M, I,
847 I->getParent()->getParent(),
848 I->getParent()->getParent()->getParent());
849 return false;
850 } else if (const Function *F = dyn_cast<Function>(V)) {
851 if (F->getParent() != &M)
852 CheckFailed("Global is used by function in a different module", &GV, &M,
853 F, F->getParent());
854 return false;
855 }
856 return true;
857 });
858}
859
860void Verifier::visitGlobalVariable(const GlobalVariable &GV) {
861 Type *GVType = GV.getValueType();
862
863 if (MaybeAlign A = GV.getAlign()) {
864 Check(A->value() <= Value::MaximumAlignment,
865 "huge alignment values are unsupported", &GV);
866 }
867
868 if (GV.hasInitializer()) {
869 Check(GV.getInitializer()->getType() == GVType,
870 "Global variable initializer type does not match global "
871 "variable type!",
872 &GV);
874 "Global variable initializer must be sized", &GV);
875 visitConstantExprsRecursively(GV.getInitializer());
876 // If the global has common linkage, it must have a zero initializer and
877 // cannot be constant.
878 if (GV.hasCommonLinkage()) {
880 "'common' global must have a zero initializer!", &GV);
881 Check(!GV.isConstant(), "'common' global may not be marked constant!",
882 &GV);
883 Check(!GV.hasComdat(), "'common' global may not be in a Comdat!", &GV);
884 }
885 }
886
887 if (GV.hasName() && (GV.getName() == "llvm.global_ctors" ||
888 GV.getName() == "llvm.global_dtors")) {
890 "invalid linkage for intrinsic global variable", &GV);
892 "invalid uses of intrinsic global variable", &GV);
893
894 // Don't worry about emitting an error for it not being an array,
895 // visitGlobalValue will complain on appending non-array.
896 if (ArrayType *ATy = dyn_cast<ArrayType>(GVType)) {
897 StructType *STy = dyn_cast<StructType>(ATy->getElementType());
898 PointerType *FuncPtrTy =
899 PointerType::get(Context, DL.getProgramAddressSpace());
900 Check(STy && (STy->getNumElements() == 2 || STy->getNumElements() == 3) &&
901 STy->getTypeAtIndex(0u)->isIntegerTy(32) &&
902 STy->getTypeAtIndex(1) == FuncPtrTy,
903 "wrong type for intrinsic global variable", &GV);
904 Check(STy->getNumElements() == 3,
905 "the third field of the element type is mandatory, "
906 "specify ptr null to migrate from the obsoleted 2-field form");
907 Type *ETy = STy->getTypeAtIndex(2);
908 Check(ETy->isPointerTy(), "wrong type for intrinsic global variable",
909 &GV);
910 }
911 }
912
913 if (GV.hasName() && (GV.getName() == "llvm.used" ||
914 GV.getName() == "llvm.compiler.used")) {
916 "invalid linkage for intrinsic global variable", &GV);
918 "invalid uses of intrinsic global variable", &GV);
919
920 if (ArrayType *ATy = dyn_cast<ArrayType>(GVType)) {
921 PointerType *PTy = dyn_cast<PointerType>(ATy->getElementType());
922 Check(PTy, "wrong type for intrinsic global variable", &GV);
923 if (GV.hasInitializer()) {
924 const Constant *Init = GV.getInitializer();
925 const ConstantArray *InitArray = dyn_cast<ConstantArray>(Init);
926 Check(InitArray, "wrong initializer for intrinsic global variable",
927 Init);
928 for (Value *Op : InitArray->operands()) {
929 Value *V = Op->stripPointerCasts();
932 Twine("invalid ") + GV.getName() + " member", V);
933 Check(V->hasName(),
934 Twine("members of ") + GV.getName() + " must be named", V);
935 }
936 }
937 }
938 }
939
940 // Visit any debug info attachments.
942 GV.getMetadata(LLVMContext::MD_dbg, MDs);
943 for (auto *MD : MDs) {
944 if (auto *GVE = dyn_cast<DIGlobalVariableExpression>(MD))
945 visitDIGlobalVariableExpression(*GVE);
946 else
947 CheckDI(false, "!dbg attachment of global variable must be a "
948 "DIGlobalVariableExpression");
949 }
950
951 // Scalable vectors cannot be global variables, since we don't know
952 // the runtime size.
953 Check(!GVType->isScalableTy(), "Globals cannot contain scalable types", &GV);
954
955 // Check if it is or contains a target extension type that disallows being
956 // used as a global.
958 "Global @" + GV.getName() + " has illegal target extension type",
959 GVType);
960
961 // Check that the the address space can hold all bits of the type, recognized
962 // by an access in the address space being able to reach all bytes of the
963 // type.
964 Check(!GVType->isSized() ||
965 isUIntN(DL.getAddressSizeInBits(GV.getAddressSpace()),
966 GV.getGlobalSize(DL)),
967 "Global variable is too large to fit into the address space", &GV,
968 GVType);
969
970 if (!GV.hasInitializer()) {
971 visitGlobalValue(GV);
972 return;
973 }
974
975 // Walk any aggregate initializers looking for bitcasts between address spaces
976 visitConstantExprsRecursively(GV.getInitializer());
977
978 visitGlobalValue(GV);
979}
980
981void Verifier::visitAliaseeSubExpr(const GlobalAlias &GA, const Constant &C) {
982 SmallPtrSet<const GlobalAlias*, 4> Visited;
983 Visited.insert(&GA);
984 visitAliaseeSubExpr(Visited, GA, C);
985}
986
987void Verifier::visitAliaseeSubExpr(SmallPtrSetImpl<const GlobalAlias*> &Visited,
988 const GlobalAlias &GA, const Constant &C) {
991 cast<GlobalValue>(C).hasAvailableExternallyLinkage(),
992 "available_externally alias must point to available_externally "
993 "global value",
994 &GA);
995 }
996 if (const auto *GV = dyn_cast<GlobalValue>(&C)) {
998 Check(!GV->isDeclarationForLinker(), "Alias must point to a definition",
999 &GA);
1000 }
1001
1002 if (const auto *GA2 = dyn_cast<GlobalAlias>(GV)) {
1003 Check(Visited.insert(GA2).second, "Aliases cannot form a cycle", &GA);
1004
1005 Check(!GA2->isInterposable(),
1006 "Alias cannot point to an interposable alias", &GA);
1007 } else {
1008 // Only continue verifying subexpressions of GlobalAliases.
1009 // Do not recurse into global initializers.
1010 return;
1011 }
1012 }
1013
1014 if (const auto *CE = dyn_cast<ConstantExpr>(&C))
1015 visitConstantExprsRecursively(CE);
1016
1017 for (const Use &U : C.operands()) {
1018 Value *V = &*U;
1019 if (const auto *GA2 = dyn_cast<GlobalAlias>(V))
1020 visitAliaseeSubExpr(Visited, GA, *GA2->getAliasee());
1021 else if (const auto *C2 = dyn_cast<Constant>(V))
1022 visitAliaseeSubExpr(Visited, GA, *C2);
1023 }
1024}
1025
1026void Verifier::visitGlobalAlias(const GlobalAlias &GA) {
1028 "Alias should have private, internal, linkonce, weak, linkonce_odr, "
1029 "weak_odr, external, or available_externally linkage!",
1030 &GA);
1031 const Constant *Aliasee = GA.getAliasee();
1032 Check(Aliasee, "Aliasee cannot be NULL!", &GA);
1033 Check(GA.getType() == Aliasee->getType(),
1034 "Alias and aliasee types should match!", &GA);
1035
1036 Check(isa<GlobalValue>(Aliasee) || isa<ConstantExpr>(Aliasee),
1037 "Aliasee should be either GlobalValue or ConstantExpr", &GA);
1038
1039 visitAliaseeSubExpr(GA, *Aliasee);
1040
1041 visitGlobalValue(GA);
1042}
1043
1044void Verifier::visitGlobalIFunc(const GlobalIFunc &GI) {
1045 visitGlobalValue(GI);
1046
1048 GI.getAllMetadata(MDs);
1049 for (const auto &I : MDs) {
1050 CheckDI(I.first != LLVMContext::MD_dbg,
1051 "an ifunc may not have a !dbg attachment", &GI);
1052 Check(I.first != LLVMContext::MD_prof,
1053 "an ifunc may not have a !prof attachment", &GI);
1054 visitMDNode(*I.second, AreDebugLocsAllowed::No);
1055 }
1056
1058 "IFunc should have private, internal, linkonce, weak, linkonce_odr, "
1059 "weak_odr, or external linkage!",
1060 &GI);
1061 // Pierce through ConstantExprs and GlobalAliases and check that the resolver
1062 // is a Function definition.
1063 const Function *Resolver = GI.getResolverFunction();
1064 Check(Resolver, "IFunc must have a Function resolver", &GI);
1065 Check(!Resolver->isDeclarationForLinker(),
1066 "IFunc resolver must be a definition", &GI);
1067
1068 // Check that the immediate resolver operand (prior to any bitcasts) has the
1069 // correct type.
1070 const Type *ResolverTy = GI.getResolver()->getType();
1071
1073 "IFunc resolver must return a pointer", &GI);
1074
1075 Check(ResolverTy == PointerType::get(Context, GI.getAddressSpace()),
1076 "IFunc resolver has incorrect type", &GI);
1077}
1078
1079void Verifier::visitNamedMDNode(const NamedMDNode &NMD) {
1080 // There used to be various other llvm.dbg.* nodes, but we don't support
1081 // upgrading them and we want to reserve the namespace for future uses.
1082 if (NMD.getName().starts_with("llvm.dbg."))
1083 CheckDI(NMD.getName() == "llvm.dbg.cu",
1084 "unrecognized named metadata node in the llvm.dbg namespace", &NMD);
1085 for (const MDNode *MD : NMD.operands()) {
1086 if (NMD.getName() == "llvm.dbg.cu")
1087 CheckDI(MD && isa<DICompileUnit>(MD), "invalid compile unit", &NMD, MD);
1088
1089 if (!MD)
1090 continue;
1091
1092 visitMDNode(*MD, AreDebugLocsAllowed::Yes);
1093 }
1094}
1095
1096void Verifier::visitMDNode(const MDNode &MD, AreDebugLocsAllowed AllowLocs) {
1097 // Only visit each node once. Metadata can be mutually recursive, so this
1098 // avoids infinite recursion here, as well as being an optimization.
1099 if (!MDNodes.insert(&MD).second)
1100 return;
1101
1102 Check(&MD.getContext() == &Context,
1103 "MDNode context does not match Module context!", &MD);
1104
1105 switch (MD.getMetadataID()) {
1106 default:
1107 llvm_unreachable("Invalid MDNode subclass");
1108 case Metadata::MDTupleKind:
1109 break;
1110#define HANDLE_SPECIALIZED_MDNODE_LEAF(CLASS) \
1111 case Metadata::CLASS##Kind: \
1112 visit##CLASS(cast<CLASS>(MD)); \
1113 break;
1114#include "llvm/IR/Metadata.def"
1115 }
1116
1117 for (const Metadata *Op : MD.operands()) {
1118 if (!Op)
1119 continue;
1120 Check(!isa<LocalAsMetadata>(Op), "Invalid operand for global metadata!",
1121 &MD, Op);
1122 CheckDI(!isa<DILocation>(Op) || AllowLocs == AreDebugLocsAllowed::Yes,
1123 "DILocation not allowed within this metadata node", &MD, Op);
1124 if (auto *N = dyn_cast<MDNode>(Op)) {
1125 visitMDNode(*N, AllowLocs);
1126 continue;
1127 }
1128 if (auto *V = dyn_cast<ValueAsMetadata>(Op)) {
1129 visitValueAsMetadata(*V, nullptr);
1130 continue;
1131 }
1132 }
1133
1134 // Check llvm.loop.estimated_trip_count.
1135 if (MD.getNumOperands() > 0 &&
1137 Check(MD.getNumOperands() == 2, "Expected two operands", &MD);
1139 Check(Count && Count->getType()->isIntegerTy() &&
1140 cast<IntegerType>(Count->getType())->getBitWidth() <= 32,
1141 "Expected second operand to be an integer constant of type i32 or "
1142 "smaller",
1143 &MD);
1144 }
1145
1146 // Check these last, so we diagnose problems in operands first.
1147 Check(!MD.isTemporary(), "Expected no forward declarations!", &MD);
1148 Check(MD.isResolved(), "All nodes should be resolved!", &MD);
1149}
1150
1151void Verifier::visitValueAsMetadata(const ValueAsMetadata &MD, Function *F) {
1152 Check(MD.getValue(), "Expected valid value", &MD);
1153 Check(!MD.getValue()->getType()->isMetadataTy(),
1154 "Unexpected metadata round-trip through values", &MD, MD.getValue());
1155
1156 auto *L = dyn_cast<LocalAsMetadata>(&MD);
1157 if (!L)
1158 return;
1159
1160 Check(F, "function-local metadata used outside a function", L);
1161
1162 // If this was an instruction, bb, or argument, verify that it is in the
1163 // function that we expect.
1164 Function *ActualF = nullptr;
1165 if (Instruction *I = dyn_cast<Instruction>(L->getValue())) {
1166 Check(I->getParent(), "function-local metadata not in basic block", L, I);
1167 ActualF = I->getParent()->getParent();
1168 } else if (BasicBlock *BB = dyn_cast<BasicBlock>(L->getValue()))
1169 ActualF = BB->getParent();
1170 else if (Argument *A = dyn_cast<Argument>(L->getValue()))
1171 ActualF = A->getParent();
1172 assert(ActualF && "Unimplemented function local metadata case!");
1173
1174 Check(ActualF == F, "function-local metadata used in wrong function", L);
1175}
1176
1177void Verifier::visitDIArgList(const DIArgList &AL, Function *F) {
1178 for (const ValueAsMetadata *VAM : AL.getArgs())
1179 visitValueAsMetadata(*VAM, F);
1180}
1181
1182void Verifier::visitMetadataAsValue(const MetadataAsValue &MDV, Function *F) {
1183 Metadata *MD = MDV.getMetadata();
1184 if (auto *N = dyn_cast<MDNode>(MD)) {
1185 visitMDNode(*N, AreDebugLocsAllowed::No);
1186 return;
1187 }
1188
1189 // Only visit each node once. Metadata can be mutually recursive, so this
1190 // avoids infinite recursion here, as well as being an optimization.
1191 if (!MDNodes.insert(MD).second)
1192 return;
1193
1194 if (auto *V = dyn_cast<ValueAsMetadata>(MD))
1195 visitValueAsMetadata(*V, F);
1196
1197 if (auto *AL = dyn_cast<DIArgList>(MD))
1198 visitDIArgList(*AL, F);
1199}
1200
1201static bool isType(const Metadata *MD) { return !MD || isa<DIType>(MD); }
1202static bool isScope(const Metadata *MD) { return !MD || isa<DIScope>(MD); }
1203static bool isDINode(const Metadata *MD) { return !MD || isa<DINode>(MD); }
1204static bool isMDTuple(const Metadata *MD) { return !MD || isa<MDTuple>(MD); }
1205
1206void Verifier::visitDILocation(const DILocation &N) {
1207 CheckDI(N.getRawScope() && isa<DILocalScope>(N.getRawScope()),
1208 "location requires a valid scope", &N, N.getRawScope());
1209 if (auto *IA = N.getRawInlinedAt())
1210 CheckDI(isa<DILocation>(IA), "inlined-at should be a location", &N, IA);
1211 if (auto *SP = dyn_cast<DISubprogram>(N.getRawScope()))
1212 CheckDI(SP->isDefinition(), "scope points into the type hierarchy", &N);
1213}
1214
1215void Verifier::visitGenericDINode(const GenericDINode &N) {
1216 CheckDI(N.getTag(), "invalid tag", &N);
1217}
1218
1219void Verifier::visitDIScope(const DIScope &N) {
1220 if (auto *F = N.getRawFile())
1221 CheckDI(isa<DIFile>(F), "invalid file", &N, F);
1222}
1223
1224void Verifier::visitDISubrangeType(const DISubrangeType &N) {
1225 CheckDI(N.getTag() == dwarf::DW_TAG_subrange_type, "invalid tag", &N);
1226 auto *BaseType = N.getRawBaseType();
1227 CheckDI(!BaseType || isType(BaseType), "BaseType must be a type");
1228 auto *LBound = N.getRawLowerBound();
1229 CheckDI(!LBound || isa<ConstantAsMetadata>(LBound) ||
1230 isa<DIVariable>(LBound) || isa<DIExpression>(LBound) ||
1231 isa<DIDerivedType>(LBound),
1232 "LowerBound must be signed constant or DIVariable or DIExpression or "
1233 "DIDerivedType",
1234 &N);
1235 auto *UBound = N.getRawUpperBound();
1236 CheckDI(!UBound || isa<ConstantAsMetadata>(UBound) ||
1237 isa<DIVariable>(UBound) || isa<DIExpression>(UBound) ||
1238 isa<DIDerivedType>(UBound),
1239 "UpperBound must be signed constant or DIVariable or DIExpression or "
1240 "DIDerivedType",
1241 &N);
1242 auto *Stride = N.getRawStride();
1243 CheckDI(!Stride || isa<ConstantAsMetadata>(Stride) ||
1244 isa<DIVariable>(Stride) || isa<DIExpression>(Stride),
1245 "Stride must be signed constant or DIVariable or DIExpression", &N);
1246 auto *Bias = N.getRawBias();
1247 CheckDI(!Bias || isa<ConstantAsMetadata>(Bias) || isa<DIVariable>(Bias) ||
1248 isa<DIExpression>(Bias),
1249 "Bias must be signed constant or DIVariable or DIExpression", &N);
1250 // Subrange types currently only support constant size.
1251 auto *Size = N.getRawSizeInBits();
1253 "SizeInBits must be a constant");
1254}
1255
1256void Verifier::visitDISubrange(const DISubrange &N) {
1257 CheckDI(N.getTag() == dwarf::DW_TAG_subrange_type, "invalid tag", &N);
1258 CheckDI(!N.getRawCountNode() || !N.getRawUpperBound(),
1259 "Subrange can have any one of count or upperBound", &N);
1260 auto *CBound = N.getRawCountNode();
1261 CheckDI(!CBound || isa<ConstantAsMetadata>(CBound) ||
1262 isa<DIVariable>(CBound) || isa<DIExpression>(CBound),
1263 "Count must be signed constant or DIVariable or DIExpression", &N);
1264 auto Count = N.getCount();
1266 cast<ConstantInt *>(Count)->getSExtValue() >= -1,
1267 "invalid subrange count", &N);
1268 auto *LBound = N.getRawLowerBound();
1269 CheckDI(!LBound || isa<ConstantAsMetadata>(LBound) ||
1270 isa<DIVariable>(LBound) || isa<DIExpression>(LBound),
1271 "LowerBound must be signed constant or DIVariable or DIExpression",
1272 &N);
1273 auto *UBound = N.getRawUpperBound();
1274 CheckDI(!UBound || isa<ConstantAsMetadata>(UBound) ||
1275 isa<DIVariable>(UBound) || isa<DIExpression>(UBound),
1276 "UpperBound must be signed constant or DIVariable or DIExpression",
1277 &N);
1278 auto *Stride = N.getRawStride();
1279 CheckDI(!Stride || isa<ConstantAsMetadata>(Stride) ||
1280 isa<DIVariable>(Stride) || isa<DIExpression>(Stride),
1281 "Stride must be signed constant or DIVariable or DIExpression", &N);
1282}
1283
1284void Verifier::visitDIGenericSubrange(const DIGenericSubrange &N) {
1285 CheckDI(N.getTag() == dwarf::DW_TAG_generic_subrange, "invalid tag", &N);
1286 CheckDI(!N.getRawCountNode() || !N.getRawUpperBound(),
1287 "GenericSubrange can have any one of count or upperBound", &N);
1288 auto *CBound = N.getRawCountNode();
1289 CheckDI(!CBound || isa<DIVariable>(CBound) || isa<DIExpression>(CBound),
1290 "Count must be signed constant or DIVariable or DIExpression", &N);
1291 auto *LBound = N.getRawLowerBound();
1292 CheckDI(LBound, "GenericSubrange must contain lowerBound", &N);
1293 CheckDI(isa<DIVariable>(LBound) || isa<DIExpression>(LBound),
1294 "LowerBound must be signed constant or DIVariable or DIExpression",
1295 &N);
1296 auto *UBound = N.getRawUpperBound();
1297 CheckDI(!UBound || isa<DIVariable>(UBound) || isa<DIExpression>(UBound),
1298 "UpperBound must be signed constant or DIVariable or DIExpression",
1299 &N);
1300 auto *Stride = N.getRawStride();
1301 CheckDI(Stride, "GenericSubrange must contain stride", &N);
1302 CheckDI(isa<DIVariable>(Stride) || isa<DIExpression>(Stride),
1303 "Stride must be signed constant or DIVariable or DIExpression", &N);
1304}
1305
1306void Verifier::visitDIEnumerator(const DIEnumerator &N) {
1307 CheckDI(N.getTag() == dwarf::DW_TAG_enumerator, "invalid tag", &N);
1308}
1309
1310void Verifier::visitDIBasicType(const DIBasicType &N) {
1311 CheckDI(N.getTag() == dwarf::DW_TAG_base_type ||
1312 N.getTag() == dwarf::DW_TAG_unspecified_type ||
1313 N.getTag() == dwarf::DW_TAG_string_type,
1314 "invalid tag", &N);
1315 // Basic types currently only support constant size.
1316 auto *Size = N.getRawSizeInBits();
1318 "SizeInBits must be a constant");
1319}
1320
1321void Verifier::visitDIFixedPointType(const DIFixedPointType &N) {
1322 visitDIBasicType(N);
1323
1324 CheckDI(N.getTag() == dwarf::DW_TAG_base_type, "invalid tag", &N);
1325 CheckDI(N.getEncoding() == dwarf::DW_ATE_signed_fixed ||
1326 N.getEncoding() == dwarf::DW_ATE_unsigned_fixed,
1327 "invalid encoding", &N);
1331 "invalid kind", &N);
1333 N.getFactorRaw() == 0,
1334 "factor should be 0 for rationals", &N);
1336 (N.getNumeratorRaw() == 0 && N.getDenominatorRaw() == 0),
1337 "numerator and denominator should be 0 for non-rationals", &N);
1338}
1339
1340void Verifier::visitDIStringType(const DIStringType &N) {
1341 CheckDI(N.getTag() == dwarf::DW_TAG_string_type, "invalid tag", &N);
1342 CheckDI(!(N.isBigEndian() && N.isLittleEndian()), "has conflicting flags",
1343 &N);
1344}
1345
1346void Verifier::visitDIDerivedType(const DIDerivedType &N) {
1347 // Common scope checks.
1348 visitDIScope(N);
1349
1350 CheckDI(N.getTag() == dwarf::DW_TAG_typedef ||
1351 N.getTag() == dwarf::DW_TAG_pointer_type ||
1352 N.getTag() == dwarf::DW_TAG_ptr_to_member_type ||
1353 N.getTag() == dwarf::DW_TAG_reference_type ||
1354 N.getTag() == dwarf::DW_TAG_rvalue_reference_type ||
1355 N.getTag() == dwarf::DW_TAG_const_type ||
1356 N.getTag() == dwarf::DW_TAG_immutable_type ||
1357 N.getTag() == dwarf::DW_TAG_volatile_type ||
1358 N.getTag() == dwarf::DW_TAG_restrict_type ||
1359 N.getTag() == dwarf::DW_TAG_atomic_type ||
1360 N.getTag() == dwarf::DW_TAG_LLVM_ptrauth_type ||
1361 N.getTag() == dwarf::DW_TAG_member ||
1362 (N.getTag() == dwarf::DW_TAG_variable && N.isStaticMember()) ||
1363 N.getTag() == dwarf::DW_TAG_inheritance ||
1364 N.getTag() == dwarf::DW_TAG_friend ||
1365 N.getTag() == dwarf::DW_TAG_set_type ||
1366 N.getTag() == dwarf::DW_TAG_template_alias,
1367 "invalid tag", &N);
1368 if (N.getTag() == dwarf::DW_TAG_ptr_to_member_type) {
1369 CheckDI(isType(N.getRawExtraData()), "invalid pointer to member type", &N,
1370 N.getRawExtraData());
1371 } else if (N.getTag() == dwarf::DW_TAG_template_alias) {
1372 CheckDI(isMDTuple(N.getRawExtraData()), "invalid template parameters", &N,
1373 N.getRawExtraData());
1374 } else if (N.getTag() == dwarf::DW_TAG_inheritance ||
1375 N.getTag() == dwarf::DW_TAG_member ||
1376 N.getTag() == dwarf::DW_TAG_variable) {
1377 auto *ExtraData = N.getRawExtraData();
1378 auto IsValidExtraData = [&]() {
1379 if (ExtraData == nullptr)
1380 return true;
1381 if (isa<ConstantAsMetadata>(ExtraData) || isa<MDString>(ExtraData) ||
1382 isa<DIObjCProperty>(ExtraData))
1383 return true;
1384 if (auto *Tuple = dyn_cast<MDTuple>(ExtraData)) {
1385 if (Tuple->getNumOperands() != 1)
1386 return false;
1387 return isa_and_nonnull<ConstantAsMetadata>(Tuple->getOperand(0).get());
1388 }
1389 return false;
1390 };
1391 CheckDI(IsValidExtraData(),
1392 "extraData must be ConstantAsMetadata, MDString, DIObjCProperty, "
1393 "or MDTuple with single ConstantAsMetadata operand",
1394 &N, ExtraData);
1395 }
1396
1397 if (N.getTag() == dwarf::DW_TAG_set_type) {
1398 if (auto *T = N.getRawBaseType()) {
1402 CheckDI(
1403 (Enum && Enum->getTag() == dwarf::DW_TAG_enumeration_type) ||
1404 (Subrange && Subrange->getTag() == dwarf::DW_TAG_subrange_type) ||
1405 (Basic && (Basic->getEncoding() == dwarf::DW_ATE_unsigned ||
1406 Basic->getEncoding() == dwarf::DW_ATE_signed ||
1407 Basic->getEncoding() == dwarf::DW_ATE_unsigned_char ||
1408 Basic->getEncoding() == dwarf::DW_ATE_signed_char ||
1409 Basic->getEncoding() == dwarf::DW_ATE_boolean)),
1410 "invalid set base type", &N, T);
1411 }
1412 }
1413
1414 CheckDI(isScope(N.getRawScope()), "invalid scope", &N, N.getRawScope());
1415 CheckDI(isType(N.getRawBaseType()), "invalid base type", &N,
1416 N.getRawBaseType());
1417
1418 if (N.getDWARFAddressSpace()) {
1419 CheckDI(N.getTag() == dwarf::DW_TAG_pointer_type ||
1420 N.getTag() == dwarf::DW_TAG_reference_type ||
1421 N.getTag() == dwarf::DW_TAG_rvalue_reference_type,
1422 "DWARF address space only applies to pointer or reference types",
1423 &N);
1424 }
1425
1426 auto *Size = N.getRawSizeInBits();
1429 "SizeInBits must be a constant or DIVariable or DIExpression");
1430}
1431
1432/// Detect mutually exclusive flags.
1433static bool hasConflictingReferenceFlags(unsigned Flags) {
1434 return ((Flags & DINode::FlagLValueReference) &&
1435 (Flags & DINode::FlagRValueReference)) ||
1436 ((Flags & DINode::FlagTypePassByValue) &&
1437 (Flags & DINode::FlagTypePassByReference));
1438}
1439
1440void Verifier::visitTemplateParams(const MDNode &N, const Metadata &RawParams) {
1441 auto *Params = dyn_cast<MDTuple>(&RawParams);
1442 CheckDI(Params, "invalid template params", &N, &RawParams);
1443 for (Metadata *Op : Params->operands()) {
1444 CheckDI(Op && isa<DITemplateParameter>(Op), "invalid template parameter",
1445 &N, Params, Op);
1446 }
1447}
1448
1449void Verifier::visitDICompositeType(const DICompositeType &N) {
1450 // Common scope checks.
1451 visitDIScope(N);
1452
1453 CheckDI(N.getTag() == dwarf::DW_TAG_array_type ||
1454 N.getTag() == dwarf::DW_TAG_structure_type ||
1455 N.getTag() == dwarf::DW_TAG_union_type ||
1456 N.getTag() == dwarf::DW_TAG_enumeration_type ||
1457 N.getTag() == dwarf::DW_TAG_class_type ||
1458 N.getTag() == dwarf::DW_TAG_variant_part ||
1459 N.getTag() == dwarf::DW_TAG_variant ||
1460 N.getTag() == dwarf::DW_TAG_namelist,
1461 "invalid tag", &N);
1462
1463 CheckDI(isScope(N.getRawScope()), "invalid scope", &N, N.getRawScope());
1464 CheckDI(isType(N.getRawBaseType()), "invalid base type", &N,
1465 N.getRawBaseType());
1466
1467 CheckDI(!N.getRawElements() || isa<MDTuple>(N.getRawElements()),
1468 "invalid composite elements", &N, N.getRawElements());
1469 CheckDI(isType(N.getRawVTableHolder()), "invalid vtable holder", &N,
1470 N.getRawVTableHolder());
1472 "invalid reference flags", &N);
1473 unsigned DIBlockByRefStruct = 1 << 4;
1474 CheckDI((N.getFlags() & DIBlockByRefStruct) == 0,
1475 "DIBlockByRefStruct on DICompositeType is no longer supported", &N);
1476 CheckDI(llvm::all_of(N.getElements(), [](const DINode *N) { return N; }),
1477 "DISubprogram contains null entry in `elements` field", &N);
1478
1479 if (N.isVector()) {
1480 const DINodeArray Elements = N.getElements();
1481 CheckDI(Elements.size() == 1 &&
1482 Elements[0]->getTag() == dwarf::DW_TAG_subrange_type,
1483 "invalid vector, expected one element of type subrange", &N);
1484 }
1485
1486 if (auto *Params = N.getRawTemplateParams())
1487 visitTemplateParams(N, *Params);
1488
1489 if (auto *D = N.getRawDiscriminator()) {
1490 CheckDI(isa<DIDerivedType>(D) && N.getTag() == dwarf::DW_TAG_variant_part,
1491 "discriminator can only appear on variant part");
1492 }
1493
1494 if (N.getRawDataLocation()) {
1495 CheckDI(N.getTag() == dwarf::DW_TAG_array_type,
1496 "dataLocation can only appear in array type");
1497 }
1498
1499 if (N.getRawAssociated()) {
1500 CheckDI(N.getTag() == dwarf::DW_TAG_array_type,
1501 "associated can only appear in array type");
1502 }
1503
1504 if (N.getRawAllocated()) {
1505 CheckDI(N.getTag() == dwarf::DW_TAG_array_type,
1506 "allocated can only appear in array type");
1507 }
1508
1509 if (N.getRawRank()) {
1510 CheckDI(N.getTag() == dwarf::DW_TAG_array_type,
1511 "rank can only appear in array type");
1512 }
1513
1514 if (N.getTag() == dwarf::DW_TAG_array_type) {
1515 CheckDI(N.getRawBaseType(), "array types must have a base type", &N);
1516 }
1517
1518 auto *Size = N.getRawSizeInBits();
1521 "SizeInBits must be a constant or DIVariable or DIExpression");
1522}
1523
1524void Verifier::visitDISubroutineType(const DISubroutineType &N) {
1525 CheckDI(N.getTag() == dwarf::DW_TAG_subroutine_type, "invalid tag", &N);
1526 if (auto *Types = N.getRawTypeArray()) {
1527 CheckDI(isa<MDTuple>(Types), "invalid composite elements", &N, Types);
1528 for (Metadata *Ty : N.getTypeArray()->operands()) {
1529 CheckDI(isType(Ty), "invalid subroutine type ref", &N, Types, Ty);
1530 }
1531 }
1533 "invalid reference flags", &N);
1534}
1535
1536void Verifier::visitDIFile(const DIFile &N) {
1537 CheckDI(N.getTag() == dwarf::DW_TAG_file_type, "invalid tag", &N);
1538 std::optional<DIFile::ChecksumInfo<StringRef>> Checksum = N.getChecksum();
1539 if (Checksum) {
1540 CheckDI(Checksum->Kind <= DIFile::ChecksumKind::CSK_Last,
1541 "invalid checksum kind", &N);
1542 size_t Size;
1543 switch (Checksum->Kind) {
1544 case DIFile::CSK_MD5:
1545 Size = 32;
1546 break;
1547 case DIFile::CSK_SHA1:
1548 Size = 40;
1549 break;
1550 case DIFile::CSK_SHA256:
1551 Size = 64;
1552 break;
1553 }
1554 CheckDI(Checksum->Value.size() == Size, "invalid checksum length", &N);
1555 CheckDI(Checksum->Value.find_if_not(llvm::isHexDigit) == StringRef::npos,
1556 "invalid checksum", &N);
1557 }
1558}
1559
1560void Verifier::visitDICompileUnit(const DICompileUnit &N) {
1561 CheckDI(N.isDistinct(), "compile units must be distinct", &N);
1562 CheckDI(N.getTag() == dwarf::DW_TAG_compile_unit, "invalid tag", &N);
1563
1564 // Don't bother verifying the compilation directory or producer string
1565 // as those could be empty.
1566 CheckDI(N.getRawFile() && isa<DIFile>(N.getRawFile()), "invalid file", &N,
1567 N.getRawFile());
1568 CheckDI(!N.getFile()->getFilename().empty(), "invalid filename", &N,
1569 N.getFile());
1570
1571 CheckDI((N.getEmissionKind() <= DICompileUnit::LastEmissionKind),
1572 "invalid emission kind", &N);
1573
1574 if (auto *Array = N.getRawEnumTypes()) {
1575 CheckDI(isa<MDTuple>(Array), "invalid enum list", &N, Array);
1576 for (Metadata *Op : N.getEnumTypes()->operands()) {
1578 CheckDI(Enum && Enum->getTag() == dwarf::DW_TAG_enumeration_type,
1579 "invalid enum type", &N, N.getEnumTypes(), Op);
1580 CheckDI(!Enum->getScope() || !isa<DILocalScope>(Enum->getScope()),
1581 "function-local enum in a DICompileUnit's enum list", &N,
1582 N.getEnumTypes(), Op);
1583 }
1584 }
1585 if (auto *Array = N.getRawRetainedTypes()) {
1586 CheckDI(isa<MDTuple>(Array), "invalid retained type list", &N, Array);
1587 for (Metadata *Op : N.getRetainedTypes()->operands()) {
1588 CheckDI(
1589 Op && (isa<DIType>(Op) || (isa<DISubprogram>(Op) &&
1590 !cast<DISubprogram>(Op)->isDefinition())),
1591 "invalid retained type", &N, Op);
1592 }
1593 }
1594 if (auto *Array = N.getRawGlobalVariables()) {
1595 CheckDI(isa<MDTuple>(Array), "invalid global variable list", &N, Array);
1596 for (Metadata *Op : N.getGlobalVariables()->operands()) {
1598 "invalid global variable ref", &N, Op);
1599 }
1600 }
1601 if (auto *Array = N.getRawImportedEntities()) {
1602 CheckDI(isa<MDTuple>(Array), "invalid imported entity list", &N, Array);
1603 for (Metadata *Op : N.getImportedEntities()->operands()) {
1605 CheckDI(IE, "invalid imported entity ref", &N, Op);
1607 "function-local imports are not allowed in a DICompileUnit's "
1608 "imported entities list",
1609 &N, Op);
1610 }
1611 }
1612 if (auto *Array = N.getRawMacros()) {
1613 CheckDI(isa<MDTuple>(Array), "invalid macro list", &N, Array);
1614 for (Metadata *Op : N.getMacros()->operands()) {
1615 CheckDI(Op && isa<DIMacroNode>(Op), "invalid macro ref", &N, Op);
1616 }
1617 }
1618 CUVisited.insert(&N);
1619}
1620
1621void Verifier::visitDISubprogram(const DISubprogram &N) {
1622 CheckDI(N.getTag() == dwarf::DW_TAG_subprogram, "invalid tag", &N);
1623 CheckDI(isScope(N.getRawScope()), "invalid scope", &N, N.getRawScope());
1624 if (auto *F = N.getRawFile())
1625 CheckDI(isa<DIFile>(F), "invalid file", &N, F);
1626 else
1627 CheckDI(N.getLine() == 0, "line specified with no file", &N, N.getLine());
1628 if (auto *T = N.getRawType())
1629 CheckDI(isa<DISubroutineType>(T), "invalid subroutine type", &N, T);
1630 CheckDI(isType(N.getRawContainingType()), "invalid containing type", &N,
1631 N.getRawContainingType());
1632 if (auto *Params = N.getRawTemplateParams())
1633 visitTemplateParams(N, *Params);
1634 if (auto *S = N.getRawDeclaration())
1635 CheckDI(isa<DISubprogram>(S) && !cast<DISubprogram>(S)->isDefinition(),
1636 "invalid subprogram declaration", &N, S);
1637 if (auto *RawNode = N.getRawRetainedNodes()) {
1638 auto *Node = dyn_cast<MDTuple>(RawNode);
1639 CheckDI(Node, "invalid retained nodes list", &N, RawNode);
1640
1641 DenseMap<unsigned, DILocalVariable *> Args;
1642 for (Metadata *Op : Node->operands()) {
1643 CheckDI(Op, "nullptr in retained nodes", &N, Node);
1644
1645 auto True = [](const Metadata *) { return true; };
1646 auto False = [](const Metadata *) { return false; };
1647 bool IsTypeCorrect = DISubprogram::visitRetainedNode<bool>(
1648 Op, True, True, True, True, False);
1649 CheckDI(IsTypeCorrect,
1650 "invalid retained nodes, expected DILocalVariable, DILabel, "
1651 "DIImportedEntity or DIType",
1652 &N, Node, Op);
1653
1654 auto *RetainedNode = cast<DINode>(Op);
1655 auto *RetainedNodeScope = dyn_cast_or_null<DILocalScope>(
1657 CheckDI(RetainedNodeScope,
1658 "invalid retained nodes, retained node is not local", &N, Node,
1659 RetainedNode);
1660
1661 DISubprogram *RetainedNodeSP = RetainedNodeScope->getSubprogram();
1662 DICompileUnit *RetainedNodeUnit =
1663 RetainedNodeSP ? RetainedNodeSP->getUnit() : nullptr;
1664 CheckDI(
1665 RetainedNodeSP == &N,
1666 "invalid retained nodes, retained node does not belong to subprogram",
1667 &N, Node, RetainedNode, RetainedNodeScope, RetainedNodeSP,
1668 RetainedNodeUnit);
1669
1670 auto *DV = dyn_cast<DILocalVariable>(RetainedNode);
1671 if (!DV)
1672 continue;
1673 if (unsigned ArgNum = DV->getArg()) {
1674 auto [ArgI, Inserted] = Args.insert({ArgNum, DV});
1675 CheckDI(Inserted || DV == ArgI->second,
1676 "invalid retained nodes, more than one local variable with the "
1677 "same argument index",
1678 &N, N.getUnit(), Node, RetainedNode, Args[ArgNum]);
1679 }
1680 }
1681 }
1683 "invalid reference flags", &N);
1684
1685 auto *Unit = N.getRawUnit();
1686 if (N.isDefinition()) {
1687 // Subprogram definitions (not part of the type hierarchy).
1688 CheckDI(N.isDistinct(), "subprogram definitions must be distinct", &N);
1689 CheckDI(Unit, "subprogram definitions must have a compile unit", &N);
1690 CheckDI(isa<DICompileUnit>(Unit), "invalid unit type", &N, Unit);
1691 // There's no good way to cross the CU boundary to insert a nested
1692 // DISubprogram definition in one CU into a type defined in another CU.
1693 auto *CT = dyn_cast_or_null<DICompositeType>(N.getRawScope());
1694 if (CT && CT->getRawIdentifier() &&
1695 M.getContext().isODRUniquingDebugTypes())
1696 CheckDI(N.getDeclaration(),
1697 "definition subprograms cannot be nested within DICompositeType "
1698 "when enabling ODR",
1699 &N);
1700 } else {
1701 // Subprogram declarations (part of the type hierarchy).
1702 CheckDI(!Unit, "subprogram declarations must not have a compile unit", &N);
1703 CheckDI(!N.getRawDeclaration(),
1704 "subprogram declaration must not have a declaration field");
1705 }
1706
1707 if (auto *RawThrownTypes = N.getRawThrownTypes()) {
1708 auto *ThrownTypes = dyn_cast<MDTuple>(RawThrownTypes);
1709 CheckDI(ThrownTypes, "invalid thrown types list", &N, RawThrownTypes);
1710 for (Metadata *Op : ThrownTypes->operands())
1711 CheckDI(Op && isa<DIType>(Op), "invalid thrown type", &N, ThrownTypes,
1712 Op);
1713 }
1714
1715 if (N.areAllCallsDescribed())
1716 CheckDI(N.isDefinition(),
1717 "DIFlagAllCallsDescribed must be attached to a definition");
1718}
1719
1720void Verifier::visitDILexicalBlockBase(const DILexicalBlockBase &N) {
1721 CheckDI(N.getTag() == dwarf::DW_TAG_lexical_block, "invalid tag", &N);
1722 CheckDI(N.getRawScope() && isa<DILocalScope>(N.getRawScope()),
1723 "invalid local scope", &N, N.getRawScope());
1724 if (auto *SP = dyn_cast<DISubprogram>(N.getRawScope()))
1725 CheckDI(SP->isDefinition(), "scope points into the type hierarchy", &N);
1726}
1727
1728void Verifier::visitDILexicalBlock(const DILexicalBlock &N) {
1729 visitDILexicalBlockBase(N);
1730
1731 CheckDI(N.getLine() || !N.getColumn(),
1732 "cannot have column info without line info", &N);
1733}
1734
1735void Verifier::visitDILexicalBlockFile(const DILexicalBlockFile &N) {
1736 visitDILexicalBlockBase(N);
1737}
1738
1739void Verifier::visitDICommonBlock(const DICommonBlock &N) {
1740 CheckDI(N.getTag() == dwarf::DW_TAG_common_block, "invalid tag", &N);
1741 if (auto *S = N.getRawScope())
1742 CheckDI(isa<DIScope>(S), "invalid scope ref", &N, S);
1743 if (auto *S = N.getRawDecl())
1744 CheckDI(isa<DIGlobalVariable>(S), "invalid declaration", &N, S);
1745}
1746
1747void Verifier::visitDINamespace(const DINamespace &N) {
1748 CheckDI(N.getTag() == dwarf::DW_TAG_namespace, "invalid tag", &N);
1749 if (auto *S = N.getRawScope())
1750 CheckDI(isa<DIScope>(S), "invalid scope ref", &N, S);
1751}
1752
1753void Verifier::visitDIMacro(const DIMacro &N) {
1754 CheckDI(N.getMacinfoType() == dwarf::DW_MACINFO_define ||
1755 N.getMacinfoType() == dwarf::DW_MACINFO_undef,
1756 "invalid macinfo type", &N);
1757 CheckDI(!N.getName().empty(), "anonymous macro", &N);
1758 if (!N.getValue().empty()) {
1759 assert(N.getValue().data()[0] != ' ' && "Macro value has a space prefix");
1760 }
1761}
1762
1763void Verifier::visitDIMacroFile(const DIMacroFile &N) {
1764 CheckDI(N.getMacinfoType() == dwarf::DW_MACINFO_start_file,
1765 "invalid macinfo type", &N);
1766 if (auto *F = N.getRawFile())
1767 CheckDI(isa<DIFile>(F), "invalid file", &N, F);
1768
1769 if (auto *Array = N.getRawElements()) {
1770 CheckDI(isa<MDTuple>(Array), "invalid macro list", &N, Array);
1771 for (Metadata *Op : N.getElements()->operands()) {
1772 CheckDI(Op && isa<DIMacroNode>(Op), "invalid macro ref", &N, Op);
1773 }
1774 }
1775}
1776
1777void Verifier::visitDIModule(const DIModule &N) {
1778 CheckDI(N.getTag() == dwarf::DW_TAG_module, "invalid tag", &N);
1779 CheckDI(!N.getName().empty(), "anonymous module", &N);
1780}
1781
1782void Verifier::visitDITemplateParameter(const DITemplateParameter &N) {
1783 CheckDI(isType(N.getRawType()), "invalid type ref", &N, N.getRawType());
1784}
1785
1786void Verifier::visitDITemplateTypeParameter(const DITemplateTypeParameter &N) {
1787 visitDITemplateParameter(N);
1788
1789 CheckDI(N.getTag() == dwarf::DW_TAG_template_type_parameter, "invalid tag",
1790 &N);
1791}
1792
1793void Verifier::visitDITemplateValueParameter(
1794 const DITemplateValueParameter &N) {
1795 visitDITemplateParameter(N);
1796
1797 CheckDI(N.getTag() == dwarf::DW_TAG_template_value_parameter ||
1798 N.getTag() == dwarf::DW_TAG_GNU_template_template_param ||
1799 N.getTag() == dwarf::DW_TAG_GNU_template_parameter_pack,
1800 "invalid tag", &N);
1801}
1802
1803void Verifier::visitDIVariable(const DIVariable &N) {
1804 if (auto *S = N.getRawScope())
1805 CheckDI(isa<DIScope>(S), "invalid scope", &N, S);
1806 if (auto *F = N.getRawFile())
1807 CheckDI(isa<DIFile>(F), "invalid file", &N, F);
1808}
1809
1810void Verifier::visitDIGlobalVariable(const DIGlobalVariable &N) {
1811 // Checks common to all variables.
1812 visitDIVariable(N);
1813
1814 CheckDI(N.getTag() == dwarf::DW_TAG_variable, "invalid tag", &N);
1815 CheckDI(isType(N.getRawType()), "invalid type ref", &N, N.getRawType());
1816 // Check only if the global variable is not an extern
1817 if (N.isDefinition())
1818 CheckDI(N.getType(), "missing global variable type", &N);
1819 if (auto *Member = N.getRawStaticDataMemberDeclaration()) {
1821 "invalid static data member declaration", &N, Member);
1822 }
1823}
1824
1825void Verifier::visitDILocalVariable(const DILocalVariable &N) {
1826 // Checks common to all variables.
1827 visitDIVariable(N);
1828
1829 CheckDI(isType(N.getRawType()), "invalid type ref", &N, N.getRawType());
1830 CheckDI(N.getTag() == dwarf::DW_TAG_variable, "invalid tag", &N);
1831 CheckDI(N.getRawScope() && isa<DILocalScope>(N.getRawScope()),
1832 "local variable requires a valid scope", &N, N.getRawScope());
1833 if (auto Ty = N.getType())
1834 CheckDI(!isa<DISubroutineType>(Ty), "invalid type", &N, N.getType());
1835}
1836
1837void Verifier::visitDIAssignID(const DIAssignID &N) {
1838 CheckDI(!N.getNumOperands(), "DIAssignID has no arguments", &N);
1839 CheckDI(N.isDistinct(), "DIAssignID must be distinct", &N);
1840}
1841
1842void Verifier::visitDILabel(const DILabel &N) {
1843 if (auto *S = N.getRawScope())
1844 CheckDI(isa<DIScope>(S), "invalid scope", &N, S);
1845 if (auto *F = N.getRawFile())
1846 CheckDI(isa<DIFile>(F), "invalid file", &N, F);
1847
1848 CheckDI(N.getTag() == dwarf::DW_TAG_label, "invalid tag", &N);
1849 CheckDI(N.getRawScope() && isa<DILocalScope>(N.getRawScope()),
1850 "label requires a valid scope", &N, N.getRawScope());
1851}
1852
1853void Verifier::visitDIExpression(const DIExpression &N) {
1854 CheckDI(N.isValid(), "invalid expression", &N);
1855}
1856
1857void Verifier::visitDIGlobalVariableExpression(
1858 const DIGlobalVariableExpression &GVE) {
1859 CheckDI(GVE.getVariable(), "missing variable");
1860 if (auto *Var = GVE.getVariable())
1861 visitDIGlobalVariable(*Var);
1862 if (auto *Expr = GVE.getExpression()) {
1863 visitDIExpression(*Expr);
1864 if (auto Fragment = Expr->getFragmentInfo())
1865 verifyFragmentExpression(*GVE.getVariable(), *Fragment, &GVE);
1866 }
1867}
1868
1869void Verifier::visitDIObjCProperty(const DIObjCProperty &N) {
1870 CheckDI(N.getTag() == dwarf::DW_TAG_APPLE_property, "invalid tag", &N);
1871 if (auto *T = N.getRawType())
1872 CheckDI(isType(T), "invalid type ref", &N, T);
1873 if (auto *F = N.getRawFile())
1874 CheckDI(isa<DIFile>(F), "invalid file", &N, F);
1875}
1876
1877void Verifier::visitDIImportedEntity(const DIImportedEntity &N) {
1878 CheckDI(N.getTag() == dwarf::DW_TAG_imported_module ||
1879 N.getTag() == dwarf::DW_TAG_imported_declaration,
1880 "invalid tag", &N);
1881 if (auto *S = N.getRawScope())
1882 CheckDI(isa<DIScope>(S), "invalid scope for imported entity", &N, S);
1883 CheckDI(isDINode(N.getRawEntity()), "invalid imported entity", &N,
1884 N.getRawEntity());
1885}
1886
1887void Verifier::visitComdat(const Comdat &C) {
1888 // In COFF the Module is invalid if the GlobalValue has private linkage.
1889 // Entities with private linkage don't have entries in the symbol table.
1890 if (TT.isOSBinFormatCOFF())
1891 if (const GlobalValue *GV = M.getNamedValue(C.getName()))
1892 Check(!GV->hasPrivateLinkage(), "comdat global value has private linkage",
1893 GV);
1894}
1895
1896void Verifier::visitModuleIdents() {
1897 const NamedMDNode *Idents = M.getNamedMetadata("llvm.ident");
1898 if (!Idents)
1899 return;
1900
1901 // llvm.ident takes a list of metadata entry. Each entry has only one string.
1902 // Scan each llvm.ident entry and make sure that this requirement is met.
1903 for (const MDNode *N : Idents->operands()) {
1904 Check(N->getNumOperands() == 1,
1905 "incorrect number of operands in llvm.ident metadata", N);
1906 Check(dyn_cast_or_null<MDString>(N->getOperand(0)),
1907 ("invalid value for llvm.ident metadata entry operand"
1908 "(the operand should be a string)"),
1909 N->getOperand(0));
1910 }
1911}
1912
1913void Verifier::visitModuleCommandLines() {
1914 const NamedMDNode *CommandLines = M.getNamedMetadata("llvm.commandline");
1915 if (!CommandLines)
1916 return;
1917
1918 // llvm.commandline takes a list of metadata entry. Each entry has only one
1919 // string. Scan each llvm.commandline entry and make sure that this
1920 // requirement is met.
1921 for (const MDNode *N : CommandLines->operands()) {
1922 Check(N->getNumOperands() == 1,
1923 "incorrect number of operands in llvm.commandline metadata", N);
1924 Check(dyn_cast_or_null<MDString>(N->getOperand(0)),
1925 ("invalid value for llvm.commandline metadata entry operand"
1926 "(the operand should be a string)"),
1927 N->getOperand(0));
1928 }
1929}
1930
1931void Verifier::visitModuleErrnoTBAA() {
1932 const NamedMDNode *ErrnoTBAA = M.getNamedMetadata("llvm.errno.tbaa");
1933 if (!ErrnoTBAA)
1934 return;
1935
1936 Check(ErrnoTBAA->getNumOperands() >= 1,
1937 "llvm.errno.tbaa must have at least one operand", ErrnoTBAA);
1938
1939 for (const MDNode *N : ErrnoTBAA->operands())
1940 TBAAVerifyHelper.visitTBAAMetadata(nullptr, N);
1941}
1942
1943void Verifier::visitModuleFlags() {
1944 const NamedMDNode *Flags = M.getModuleFlagsMetadata();
1945 if (!Flags) return;
1946
1947 // Scan each flag, and track the flags and requirements.
1948 DenseMap<const MDString*, const MDNode*> SeenIDs;
1949 SmallVector<const MDNode*, 16> Requirements;
1950 uint64_t PAuthABIPlatform = -1;
1951 uint64_t PAuthABIVersion = -1;
1952 for (const MDNode *MDN : Flags->operands()) {
1953 visitModuleFlag(MDN, SeenIDs, Requirements);
1954 if (MDN->getNumOperands() != 3)
1955 continue;
1956 if (const auto *FlagName = dyn_cast_or_null<MDString>(MDN->getOperand(1))) {
1957 if (FlagName->getString() == "aarch64-elf-pauthabi-platform") {
1958 if (const auto *PAP =
1960 PAuthABIPlatform = PAP->getZExtValue();
1961 } else if (FlagName->getString() == "aarch64-elf-pauthabi-version") {
1962 if (const auto *PAV =
1964 PAuthABIVersion = PAV->getZExtValue();
1965 }
1966 }
1967 }
1968
1969 if ((PAuthABIPlatform == uint64_t(-1)) != (PAuthABIVersion == uint64_t(-1)))
1970 CheckFailed("either both or no 'aarch64-elf-pauthabi-platform' and "
1971 "'aarch64-elf-pauthabi-version' module flags must be present");
1972
1973 // Validate that the requirements in the module are valid.
1974 for (const MDNode *Requirement : Requirements) {
1975 const MDString *Flag = cast<MDString>(Requirement->getOperand(0));
1976 const Metadata *ReqValue = Requirement->getOperand(1);
1977
1978 const MDNode *Op = SeenIDs.lookup(Flag);
1979 if (!Op) {
1980 CheckFailed("invalid requirement on flag, flag is not present in module",
1981 Flag);
1982 continue;
1983 }
1984
1985 if (Op->getOperand(2) != ReqValue) {
1986 CheckFailed(("invalid requirement on flag, "
1987 "flag does not have the required value"),
1988 Flag);
1989 continue;
1990 }
1991 }
1992}
1993
1994void
1995Verifier::visitModuleFlag(const MDNode *Op,
1996 DenseMap<const MDString *, const MDNode *> &SeenIDs,
1997 SmallVectorImpl<const MDNode *> &Requirements) {
1998 // Each module flag should have three arguments, the merge behavior (a
1999 // constant int), the flag ID (an MDString), and the value.
2000 Check(Op->getNumOperands() == 3,
2001 "incorrect number of operands in module flag", Op);
2002 Module::ModFlagBehavior MFB;
2003 if (!Module::isValidModFlagBehavior(Op->getOperand(0), MFB)) {
2005 "invalid behavior operand in module flag (expected constant integer)",
2006 Op->getOperand(0));
2007 Check(false,
2008 "invalid behavior operand in module flag (unexpected constant)",
2009 Op->getOperand(0));
2010 }
2011 MDString *ID = dyn_cast_or_null<MDString>(Op->getOperand(1));
2012 Check(ID, "invalid ID operand in module flag (expected metadata string)",
2013 Op->getOperand(1));
2014
2015 // Check the values for behaviors with additional requirements.
2016 switch (MFB) {
2017 case Module::Error:
2018 case Module::Warning:
2019 case Module::Override:
2020 // These behavior types accept any value.
2021 break;
2022
2023 case Module::Min: {
2024 auto *V = mdconst::dyn_extract_or_null<ConstantInt>(Op->getOperand(2));
2025 Check(V && V->getValue().isNonNegative(),
2026 "invalid value for 'min' module flag (expected constant non-negative "
2027 "integer)",
2028 Op->getOperand(2));
2029 break;
2030 }
2031
2032 case Module::Max: {
2034 "invalid value for 'max' module flag (expected constant integer)",
2035 Op->getOperand(2));
2036 break;
2037 }
2038
2039 case Module::Require: {
2040 // The value should itself be an MDNode with two operands, a flag ID (an
2041 // MDString), and a value.
2042 MDNode *Value = dyn_cast<MDNode>(Op->getOperand(2));
2043 Check(Value && Value->getNumOperands() == 2,
2044 "invalid value for 'require' module flag (expected metadata pair)",
2045 Op->getOperand(2));
2046 Check(isa<MDString>(Value->getOperand(0)),
2047 ("invalid value for 'require' module flag "
2048 "(first value operand should be a string)"),
2049 Value->getOperand(0));
2050
2051 // Append it to the list of requirements, to check once all module flags are
2052 // scanned.
2053 Requirements.push_back(Value);
2054 break;
2055 }
2056
2057 case Module::Append:
2058 case Module::AppendUnique: {
2059 // These behavior types require the operand be an MDNode.
2060 Check(isa<MDNode>(Op->getOperand(2)),
2061 "invalid value for 'append'-type module flag "
2062 "(expected a metadata node)",
2063 Op->getOperand(2));
2064 break;
2065 }
2066 }
2067
2068 // Unless this is a "requires" flag, check the ID is unique.
2069 if (MFB != Module::Require) {
2070 bool Inserted = SeenIDs.insert(std::make_pair(ID, Op)).second;
2071 Check(Inserted,
2072 "module flag identifiers must be unique (or of 'require' type)", ID);
2073 }
2074
2075 if (ID->getString() == "wchar_size") {
2076 ConstantInt *Value
2078 Check(Value, "wchar_size metadata requires constant integer argument");
2079 }
2080
2081 if (ID->getString() == "Linker Options") {
2082 // If the llvm.linker.options named metadata exists, we assume that the
2083 // bitcode reader has upgraded the module flag. Otherwise the flag might
2084 // have been created by a client directly.
2085 Check(M.getNamedMetadata("llvm.linker.options"),
2086 "'Linker Options' named metadata no longer supported");
2087 }
2088
2089 if (ID->getString() == "SemanticInterposition") {
2090 ConstantInt *Value =
2092 Check(Value,
2093 "SemanticInterposition metadata requires constant integer argument");
2094 }
2095
2096 if (ID->getString() == "CG Profile") {
2097 for (const MDOperand &MDO : cast<MDNode>(Op->getOperand(2))->operands())
2098 visitModuleFlagCGProfileEntry(MDO);
2099 }
2100}
2101
2102void Verifier::visitModuleFlagCGProfileEntry(const MDOperand &MDO) {
2103 auto CheckFunction = [&](const MDOperand &FuncMDO) {
2104 if (!FuncMDO)
2105 return;
2106 auto F = dyn_cast<ValueAsMetadata>(FuncMDO);
2107 Check(F && isa<Function>(F->getValue()->stripPointerCasts()),
2108 "expected a Function or null", FuncMDO);
2109 };
2110 auto Node = dyn_cast_or_null<MDNode>(MDO);
2111 Check(Node && Node->getNumOperands() == 3, "expected a MDNode triple", MDO);
2112 CheckFunction(Node->getOperand(0));
2113 CheckFunction(Node->getOperand(1));
2114 auto Count = dyn_cast_or_null<ConstantAsMetadata>(Node->getOperand(2));
2115 Check(Count && Count->getType()->isIntegerTy(),
2116 "expected an integer constant", Node->getOperand(2));
2117}
2118
2119void Verifier::verifyAttributeTypes(AttributeSet Attrs, const Value *V) {
2120 for (Attribute A : Attrs) {
2121
2122 if (A.isStringAttribute()) {
2123#define GET_ATTR_NAMES
2124#define ATTRIBUTE_ENUM(ENUM_NAME, DISPLAY_NAME)
2125#define ATTRIBUTE_STRBOOL(ENUM_NAME, DISPLAY_NAME) \
2126 if (A.getKindAsString() == #DISPLAY_NAME) { \
2127 auto V = A.getValueAsString(); \
2128 if (!(V.empty() || V == "true" || V == "false")) \
2129 CheckFailed("invalid value for '" #DISPLAY_NAME "' attribute: " + V + \
2130 ""); \
2131 }
2132
2133#include "llvm/IR/Attributes.inc"
2134 continue;
2135 }
2136
2137 if (A.isIntAttribute() != Attribute::isIntAttrKind(A.getKindAsEnum())) {
2138 CheckFailed("Attribute '" + A.getAsString() + "' should have an Argument",
2139 V);
2140 return;
2141 }
2142 }
2143}
2144
2145// VerifyParameterAttrs - Check the given attributes for an argument or return
2146// value of the specified type. The value V is printed in error messages.
2147void Verifier::verifyParameterAttrs(AttributeSet Attrs, Type *Ty,
2148 const Value *V) {
2149 if (!Attrs.hasAttributes())
2150 return;
2151
2152 verifyAttributeTypes(Attrs, V);
2153
2154 for (Attribute Attr : Attrs)
2155 Check(Attr.isStringAttribute() ||
2156 Attribute::canUseAsParamAttr(Attr.getKindAsEnum()),
2157 "Attribute '" + Attr.getAsString() + "' does not apply to parameters",
2158 V);
2159
2160 if (Attrs.hasAttribute(Attribute::ImmArg)) {
2161 unsigned AttrCount =
2162 Attrs.getNumAttributes() - Attrs.hasAttribute(Attribute::Range);
2163 Check(AttrCount == 1,
2164 "Attribute 'immarg' is incompatible with other attributes except the "
2165 "'range' attribute",
2166 V);
2167 }
2168
2169 // Check for mutually incompatible attributes. Only inreg is compatible with
2170 // sret.
2171 unsigned AttrCount = 0;
2172 AttrCount += Attrs.hasAttribute(Attribute::ByVal);
2173 AttrCount += Attrs.hasAttribute(Attribute::InAlloca);
2174 AttrCount += Attrs.hasAttribute(Attribute::Preallocated);
2175 AttrCount += Attrs.hasAttribute(Attribute::StructRet) ||
2176 Attrs.hasAttribute(Attribute::InReg);
2177 AttrCount += Attrs.hasAttribute(Attribute::Nest);
2178 AttrCount += Attrs.hasAttribute(Attribute::ByRef);
2179 Check(AttrCount <= 1,
2180 "Attributes 'byval', 'inalloca', 'preallocated', 'inreg', 'nest', "
2181 "'byref', and 'sret' are incompatible!",
2182 V);
2183
2184 Check(!(Attrs.hasAttribute(Attribute::InAlloca) &&
2185 Attrs.hasAttribute(Attribute::ReadOnly)),
2186 "Attributes "
2187 "'inalloca and readonly' are incompatible!",
2188 V);
2189
2190 Check(!(Attrs.hasAttribute(Attribute::StructRet) &&
2191 Attrs.hasAttribute(Attribute::Returned)),
2192 "Attributes "
2193 "'sret and returned' are incompatible!",
2194 V);
2195
2196 Check(!(Attrs.hasAttribute(Attribute::ZExt) &&
2197 Attrs.hasAttribute(Attribute::SExt)),
2198 "Attributes "
2199 "'zeroext and signext' are incompatible!",
2200 V);
2201
2202 Check(!(Attrs.hasAttribute(Attribute::ReadNone) &&
2203 Attrs.hasAttribute(Attribute::ReadOnly)),
2204 "Attributes "
2205 "'readnone and readonly' are incompatible!",
2206 V);
2207
2208 Check(!(Attrs.hasAttribute(Attribute::ReadNone) &&
2209 Attrs.hasAttribute(Attribute::WriteOnly)),
2210 "Attributes "
2211 "'readnone and writeonly' are incompatible!",
2212 V);
2213
2214 Check(!(Attrs.hasAttribute(Attribute::ReadOnly) &&
2215 Attrs.hasAttribute(Attribute::WriteOnly)),
2216 "Attributes "
2217 "'readonly and writeonly' are incompatible!",
2218 V);
2219
2220 Check(!(Attrs.hasAttribute(Attribute::NoInline) &&
2221 Attrs.hasAttribute(Attribute::AlwaysInline)),
2222 "Attributes "
2223 "'noinline and alwaysinline' are incompatible!",
2224 V);
2225
2226 Check(!(Attrs.hasAttribute(Attribute::Writable) &&
2227 Attrs.hasAttribute(Attribute::ReadNone)),
2228 "Attributes writable and readnone are incompatible!", V);
2229
2230 Check(!(Attrs.hasAttribute(Attribute::Writable) &&
2231 Attrs.hasAttribute(Attribute::ReadOnly)),
2232 "Attributes writable and readonly are incompatible!", V);
2233
2234 AttributeMask IncompatibleAttrs = AttributeFuncs::typeIncompatible(Ty, Attrs);
2235 for (Attribute Attr : Attrs) {
2236 if (!Attr.isStringAttribute() &&
2237 IncompatibleAttrs.contains(Attr.getKindAsEnum())) {
2238 CheckFailed("Attribute '" + Attr.getAsString() +
2239 "' applied to incompatible type!", V);
2240 return;
2241 }
2242 }
2243
2244 if (isa<PointerType>(Ty)) {
2245 if (Attrs.hasAttribute(Attribute::Alignment)) {
2246 Align AttrAlign = Attrs.getAlignment().valueOrOne();
2247 Check(AttrAlign.value() <= Value::MaximumAlignment,
2248 "huge alignment values are unsupported", V);
2249 }
2250 if (Attrs.hasAttribute(Attribute::ByVal)) {
2251 Type *ByValTy = Attrs.getByValType();
2252 SmallPtrSet<Type *, 4> Visited;
2253 Check(ByValTy->isSized(&Visited),
2254 "Attribute 'byval' does not support unsized types!", V);
2255 // Check if it is or contains a target extension type that disallows being
2256 // used on the stack.
2258 "'byval' argument has illegal target extension type", V);
2259 Check(DL.getTypeAllocSize(ByValTy).getKnownMinValue() < (1ULL << 32),
2260 "huge 'byval' arguments are unsupported", V);
2261 }
2262 if (Attrs.hasAttribute(Attribute::ByRef)) {
2263 SmallPtrSet<Type *, 4> Visited;
2264 Check(Attrs.getByRefType()->isSized(&Visited),
2265 "Attribute 'byref' does not support unsized types!", V);
2266 Check(DL.getTypeAllocSize(Attrs.getByRefType()).getKnownMinValue() <
2267 (1ULL << 32),
2268 "huge 'byref' arguments are unsupported", V);
2269 }
2270 if (Attrs.hasAttribute(Attribute::InAlloca)) {
2271 SmallPtrSet<Type *, 4> Visited;
2272 Check(Attrs.getInAllocaType()->isSized(&Visited),
2273 "Attribute 'inalloca' does not support unsized types!", V);
2274 Check(DL.getTypeAllocSize(Attrs.getInAllocaType()).getKnownMinValue() <
2275 (1ULL << 32),
2276 "huge 'inalloca' arguments are unsupported", V);
2277 }
2278 if (Attrs.hasAttribute(Attribute::Preallocated)) {
2279 SmallPtrSet<Type *, 4> Visited;
2280 Check(Attrs.getPreallocatedType()->isSized(&Visited),
2281 "Attribute 'preallocated' does not support unsized types!", V);
2282 Check(
2283 DL.getTypeAllocSize(Attrs.getPreallocatedType()).getKnownMinValue() <
2284 (1ULL << 32),
2285 "huge 'preallocated' arguments are unsupported", V);
2286 }
2287 }
2288
2289 if (Attrs.hasAttribute(Attribute::Initializes)) {
2290 auto Inits = Attrs.getAttribute(Attribute::Initializes).getInitializes();
2291 Check(!Inits.empty(), "Attribute 'initializes' does not support empty list",
2292 V);
2294 "Attribute 'initializes' does not support unordered ranges", V);
2295 }
2296
2297 if (Attrs.hasAttribute(Attribute::NoFPClass)) {
2298 uint64_t Val = Attrs.getAttribute(Attribute::NoFPClass).getValueAsInt();
2299 Check(Val != 0, "Attribute 'nofpclass' must have at least one test bit set",
2300 V);
2301 Check((Val & ~static_cast<unsigned>(fcAllFlags)) == 0,
2302 "Invalid value for 'nofpclass' test mask", V);
2303 }
2304 if (Attrs.hasAttribute(Attribute::Range)) {
2305 const ConstantRange &CR =
2306 Attrs.getAttribute(Attribute::Range).getValueAsConstantRange();
2308 "Range bit width must match type bit width!", V);
2309 }
2310}
2311
2312void Verifier::checkUnsignedBaseTenFuncAttr(AttributeList Attrs, StringRef Attr,
2313 const Value *V) {
2314 if (Attrs.hasFnAttr(Attr)) {
2315 StringRef S = Attrs.getFnAttr(Attr).getValueAsString();
2316 unsigned N;
2317 if (S.getAsInteger(10, N))
2318 CheckFailed("\"" + Attr + "\" takes an unsigned integer: " + S, V);
2319 }
2320}
2321
2322// Check parameter attributes against a function type.
2323// The value V is printed in error messages.
2324void Verifier::verifyFunctionAttrs(FunctionType *FT, AttributeList Attrs,
2325 const Value *V, bool IsIntrinsic,
2326 bool IsInlineAsm) {
2327 if (Attrs.isEmpty())
2328 return;
2329
2330 if (AttributeListsVisited.insert(Attrs.getRawPointer()).second) {
2331 Check(Attrs.hasParentContext(Context),
2332 "Attribute list does not match Module context!", &Attrs, V);
2333 for (const auto &AttrSet : Attrs) {
2334 Check(!AttrSet.hasAttributes() || AttrSet.hasParentContext(Context),
2335 "Attribute set does not match Module context!", &AttrSet, V);
2336 for (const auto &A : AttrSet) {
2337 Check(A.hasParentContext(Context),
2338 "Attribute does not match Module context!", &A, V);
2339 }
2340 }
2341 }
2342
2343 bool SawNest = false;
2344 bool SawReturned = false;
2345 bool SawSRet = false;
2346 bool SawSwiftSelf = false;
2347 bool SawSwiftAsync = false;
2348 bool SawSwiftError = false;
2349
2350 // Verify return value attributes.
2351 AttributeSet RetAttrs = Attrs.getRetAttrs();
2352 for (Attribute RetAttr : RetAttrs)
2353 Check(RetAttr.isStringAttribute() ||
2354 Attribute::canUseAsRetAttr(RetAttr.getKindAsEnum()),
2355 "Attribute '" + RetAttr.getAsString() +
2356 "' does not apply to function return values",
2357 V);
2358
2359 unsigned MaxParameterWidth = 0;
2360 auto GetMaxParameterWidth = [&MaxParameterWidth](Type *Ty) {
2361 if (Ty->isVectorTy()) {
2362 if (auto *VT = dyn_cast<FixedVectorType>(Ty)) {
2363 unsigned Size = VT->getPrimitiveSizeInBits().getFixedValue();
2364 if (Size > MaxParameterWidth)
2365 MaxParameterWidth = Size;
2366 }
2367 }
2368 };
2369 GetMaxParameterWidth(FT->getReturnType());
2370 verifyParameterAttrs(RetAttrs, FT->getReturnType(), V);
2371
2372 // Verify parameter attributes.
2373 for (unsigned i = 0, e = FT->getNumParams(); i != e; ++i) {
2374 Type *Ty = FT->getParamType(i);
2375 AttributeSet ArgAttrs = Attrs.getParamAttrs(i);
2376
2377 if (!IsIntrinsic) {
2378 Check(!ArgAttrs.hasAttribute(Attribute::ImmArg),
2379 "immarg attribute only applies to intrinsics", V);
2380 if (!IsInlineAsm)
2381 Check(!ArgAttrs.hasAttribute(Attribute::ElementType),
2382 "Attribute 'elementtype' can only be applied to intrinsics"
2383 " and inline asm.",
2384 V);
2385 }
2386
2387 verifyParameterAttrs(ArgAttrs, Ty, V);
2388 GetMaxParameterWidth(Ty);
2389
2390 if (ArgAttrs.hasAttribute(Attribute::Nest)) {
2391 Check(!SawNest, "More than one parameter has attribute nest!", V);
2392 SawNest = true;
2393 }
2394
2395 if (ArgAttrs.hasAttribute(Attribute::Returned)) {
2396 Check(!SawReturned, "More than one parameter has attribute returned!", V);
2397 Check(Ty->canLosslesslyBitCastTo(FT->getReturnType()),
2398 "Incompatible argument and return types for 'returned' attribute",
2399 V);
2400 SawReturned = true;
2401 }
2402
2403 if (ArgAttrs.hasAttribute(Attribute::StructRet)) {
2404 Check(!SawSRet, "Cannot have multiple 'sret' parameters!", V);
2405 Check(i == 0 || i == 1,
2406 "Attribute 'sret' is not on first or second parameter!", V);
2407 SawSRet = true;
2408 }
2409
2410 if (ArgAttrs.hasAttribute(Attribute::SwiftSelf)) {
2411 Check(!SawSwiftSelf, "Cannot have multiple 'swiftself' parameters!", V);
2412 SawSwiftSelf = true;
2413 }
2414
2415 if (ArgAttrs.hasAttribute(Attribute::SwiftAsync)) {
2416 Check(!SawSwiftAsync, "Cannot have multiple 'swiftasync' parameters!", V);
2417 SawSwiftAsync = true;
2418 }
2419
2420 if (ArgAttrs.hasAttribute(Attribute::SwiftError)) {
2421 Check(!SawSwiftError, "Cannot have multiple 'swifterror' parameters!", V);
2422 SawSwiftError = true;
2423 }
2424
2425 if (ArgAttrs.hasAttribute(Attribute::InAlloca)) {
2426 Check(i == FT->getNumParams() - 1,
2427 "inalloca isn't on the last parameter!", V);
2428 }
2429 }
2430
2431 if (!Attrs.hasFnAttrs())
2432 return;
2433
2434 verifyAttributeTypes(Attrs.getFnAttrs(), V);
2435 for (Attribute FnAttr : Attrs.getFnAttrs())
2436 Check(FnAttr.isStringAttribute() ||
2437 Attribute::canUseAsFnAttr(FnAttr.getKindAsEnum()),
2438 "Attribute '" + FnAttr.getAsString() +
2439 "' does not apply to functions!",
2440 V);
2441
2442 Check(!(Attrs.hasFnAttr(Attribute::NoInline) &&
2443 Attrs.hasFnAttr(Attribute::AlwaysInline)),
2444 "Attributes 'noinline and alwaysinline' are incompatible!", V);
2445
2446 if (Attrs.hasFnAttr(Attribute::OptimizeNone)) {
2447 Check(Attrs.hasFnAttr(Attribute::NoInline),
2448 "Attribute 'optnone' requires 'noinline'!", V);
2449
2450 Check(!Attrs.hasFnAttr(Attribute::OptimizeForSize),
2451 "Attributes 'optsize and optnone' are incompatible!", V);
2452
2453 Check(!Attrs.hasFnAttr(Attribute::MinSize),
2454 "Attributes 'minsize and optnone' are incompatible!", V);
2455
2456 Check(!Attrs.hasFnAttr(Attribute::OptimizeForDebugging),
2457 "Attributes 'optdebug and optnone' are incompatible!", V);
2458 }
2459
2460 Check(!(Attrs.hasFnAttr(Attribute::SanitizeRealtime) &&
2461 Attrs.hasFnAttr(Attribute::SanitizeRealtimeBlocking)),
2462 "Attributes "
2463 "'sanitize_realtime and sanitize_realtime_blocking' are incompatible!",
2464 V);
2465
2466 if (Attrs.hasFnAttr(Attribute::OptimizeForDebugging)) {
2467 Check(!Attrs.hasFnAttr(Attribute::OptimizeForSize),
2468 "Attributes 'optsize and optdebug' are incompatible!", V);
2469
2470 Check(!Attrs.hasFnAttr(Attribute::MinSize),
2471 "Attributes 'minsize and optdebug' are incompatible!", V);
2472 }
2473
2474 Check(!Attrs.hasAttrSomewhere(Attribute::Writable) ||
2475 isModSet(Attrs.getMemoryEffects().getModRef(IRMemLocation::ArgMem)),
2476 "Attribute writable and memory without argmem: write are incompatible!",
2477 V);
2478
2479 if (Attrs.hasFnAttr("aarch64_pstate_sm_enabled")) {
2480 Check(!Attrs.hasFnAttr("aarch64_pstate_sm_compatible"),
2481 "Attributes 'aarch64_pstate_sm_enabled and "
2482 "aarch64_pstate_sm_compatible' are incompatible!",
2483 V);
2484 }
2485
2486 Check((Attrs.hasFnAttr("aarch64_new_za") + Attrs.hasFnAttr("aarch64_in_za") +
2487 Attrs.hasFnAttr("aarch64_inout_za") +
2488 Attrs.hasFnAttr("aarch64_out_za") +
2489 Attrs.hasFnAttr("aarch64_preserves_za") +
2490 Attrs.hasFnAttr("aarch64_za_state_agnostic")) <= 1,
2491 "Attributes 'aarch64_new_za', 'aarch64_in_za', 'aarch64_out_za', "
2492 "'aarch64_inout_za', 'aarch64_preserves_za' and "
2493 "'aarch64_za_state_agnostic' are mutually exclusive",
2494 V);
2495
2496 Check((Attrs.hasFnAttr("aarch64_new_zt0") +
2497 Attrs.hasFnAttr("aarch64_in_zt0") +
2498 Attrs.hasFnAttr("aarch64_inout_zt0") +
2499 Attrs.hasFnAttr("aarch64_out_zt0") +
2500 Attrs.hasFnAttr("aarch64_preserves_zt0") +
2501 Attrs.hasFnAttr("aarch64_za_state_agnostic")) <= 1,
2502 "Attributes 'aarch64_new_zt0', 'aarch64_in_zt0', 'aarch64_out_zt0', "
2503 "'aarch64_inout_zt0', 'aarch64_preserves_zt0' and "
2504 "'aarch64_za_state_agnostic' are mutually exclusive",
2505 V);
2506
2507 if (Attrs.hasFnAttr(Attribute::JumpTable)) {
2508 const GlobalValue *GV = cast<GlobalValue>(V);
2510 "Attribute 'jumptable' requires 'unnamed_addr'", V);
2511 }
2512
2513 if (auto Args = Attrs.getFnAttrs().getAllocSizeArgs()) {
2514 auto CheckParam = [&](StringRef Name, unsigned ParamNo) {
2515 if (ParamNo >= FT->getNumParams()) {
2516 CheckFailed("'allocsize' " + Name + " argument is out of bounds", V);
2517 return false;
2518 }
2519
2520 if (!FT->getParamType(ParamNo)->isIntegerTy()) {
2521 CheckFailed("'allocsize' " + Name +
2522 " argument must refer to an integer parameter",
2523 V);
2524 return false;
2525 }
2526
2527 return true;
2528 };
2529
2530 if (!CheckParam("element size", Args->first))
2531 return;
2532
2533 if (Args->second && !CheckParam("number of elements", *Args->second))
2534 return;
2535 }
2536
2537 if (Attrs.hasFnAttr(Attribute::AllocKind)) {
2538 AllocFnKind K = Attrs.getAllocKind();
2540 K & (AllocFnKind::Alloc | AllocFnKind::Realloc | AllocFnKind::Free);
2541 if (!is_contained(
2542 {AllocFnKind::Alloc, AllocFnKind::Realloc, AllocFnKind::Free},
2543 Type))
2544 CheckFailed(
2545 "'allockind()' requires exactly one of alloc, realloc, and free");
2546 if ((Type == AllocFnKind::Free) &&
2547 ((K & (AllocFnKind::Uninitialized | AllocFnKind::Zeroed |
2548 AllocFnKind::Aligned)) != AllocFnKind::Unknown))
2549 CheckFailed("'allockind(\"free\")' doesn't allow uninitialized, zeroed, "
2550 "or aligned modifiers.");
2551 AllocFnKind ZeroedUninit = AllocFnKind::Uninitialized | AllocFnKind::Zeroed;
2552 if ((K & ZeroedUninit) == ZeroedUninit)
2553 CheckFailed("'allockind()' can't be both zeroed and uninitialized");
2554 }
2555
2556 if (Attribute A = Attrs.getFnAttr("alloc-variant-zeroed"); A.isValid()) {
2557 StringRef S = A.getValueAsString();
2558 Check(!S.empty(), "'alloc-variant-zeroed' must not be empty");
2559 Function *Variant = M.getFunction(S);
2560 if (Variant) {
2561 Attribute Family = Attrs.getFnAttr("alloc-family");
2562 Attribute VariantFamily = Variant->getFnAttribute("alloc-family");
2563 if (Family.isValid())
2564 Check(VariantFamily.isValid() &&
2565 VariantFamily.getValueAsString() == Family.getValueAsString(),
2566 "'alloc-variant-zeroed' must name a function belonging to the "
2567 "same 'alloc-family'");
2568
2569 Check(Variant->hasFnAttribute(Attribute::AllocKind) &&
2570 (Variant->getFnAttribute(Attribute::AllocKind).getAllocKind() &
2571 AllocFnKind::Zeroed) != AllocFnKind::Unknown,
2572 "'alloc-variant-zeroed' must name a function with "
2573 "'allockind(\"zeroed\")'");
2574
2575 Check(FT == Variant->getFunctionType(),
2576 "'alloc-variant-zeroed' must name a function with the same "
2577 "signature");
2578
2579 if (const Function *F = dyn_cast<Function>(V))
2580 Check(F->getCallingConv() == Variant->getCallingConv(),
2581 "'alloc-variant-zeroed' must name a function with the same "
2582 "calling convention");
2583 }
2584 }
2585
2586 if (Attrs.hasFnAttr(Attribute::VScaleRange)) {
2587 unsigned VScaleMin = Attrs.getFnAttrs().getVScaleRangeMin();
2588 if (VScaleMin == 0)
2589 CheckFailed("'vscale_range' minimum must be greater than 0", V);
2590 else if (!isPowerOf2_32(VScaleMin))
2591 CheckFailed("'vscale_range' minimum must be power-of-two value", V);
2592 std::optional<unsigned> VScaleMax = Attrs.getFnAttrs().getVScaleRangeMax();
2593 if (VScaleMax && VScaleMin > VScaleMax)
2594 CheckFailed("'vscale_range' minimum cannot be greater than maximum", V);
2595 else if (VScaleMax && !isPowerOf2_32(*VScaleMax))
2596 CheckFailed("'vscale_range' maximum must be power-of-two value", V);
2597 }
2598
2599 if (Attribute FPAttr = Attrs.getFnAttr("frame-pointer"); FPAttr.isValid()) {
2600 StringRef FP = FPAttr.getValueAsString();
2601 if (FP != "all" && FP != "non-leaf" && FP != "none" && FP != "reserved" &&
2602 FP != "non-leaf-no-reserve")
2603 CheckFailed("invalid value for 'frame-pointer' attribute: " + FP, V);
2604 }
2605
2606 checkUnsignedBaseTenFuncAttr(Attrs, "patchable-function-prefix", V);
2607 checkUnsignedBaseTenFuncAttr(Attrs, "patchable-function-entry", V);
2608 if (Attrs.hasFnAttr("patchable-function-entry-section"))
2609 Check(!Attrs.getFnAttr("patchable-function-entry-section")
2610 .getValueAsString()
2611 .empty(),
2612 "\"patchable-function-entry-section\" must not be empty");
2613 checkUnsignedBaseTenFuncAttr(Attrs, "warn-stack-size", V);
2614
2615 if (auto A = Attrs.getFnAttr("sign-return-address"); A.isValid()) {
2616 StringRef S = A.getValueAsString();
2617 if (S != "none" && S != "all" && S != "non-leaf")
2618 CheckFailed("invalid value for 'sign-return-address' attribute: " + S, V);
2619 }
2620
2621 if (auto A = Attrs.getFnAttr("sign-return-address-key"); A.isValid()) {
2622 StringRef S = A.getValueAsString();
2623 if (S != "a_key" && S != "b_key")
2624 CheckFailed("invalid value for 'sign-return-address-key' attribute: " + S,
2625 V);
2626 if (auto AA = Attrs.getFnAttr("sign-return-address"); !AA.isValid()) {
2627 CheckFailed(
2628 "'sign-return-address-key' present without `sign-return-address`");
2629 }
2630 }
2631
2632 if (auto A = Attrs.getFnAttr("branch-target-enforcement"); A.isValid()) {
2633 StringRef S = A.getValueAsString();
2634 if (S != "" && S != "true" && S != "false")
2635 CheckFailed(
2636 "invalid value for 'branch-target-enforcement' attribute: " + S, V);
2637 }
2638
2639 if (auto A = Attrs.getFnAttr("branch-protection-pauth-lr"); A.isValid()) {
2640 StringRef S = A.getValueAsString();
2641 if (S != "" && S != "true" && S != "false")
2642 CheckFailed(
2643 "invalid value for 'branch-protection-pauth-lr' attribute: " + S, V);
2644 }
2645
2646 if (auto A = Attrs.getFnAttr("guarded-control-stack"); A.isValid()) {
2647 StringRef S = A.getValueAsString();
2648 if (S != "" && S != "true" && S != "false")
2649 CheckFailed("invalid value for 'guarded-control-stack' attribute: " + S,
2650 V);
2651 }
2652
2653 if (auto A = Attrs.getFnAttr("vector-function-abi-variant"); A.isValid()) {
2654 StringRef S = A.getValueAsString();
2655 const std::optional<VFInfo> Info = VFABI::tryDemangleForVFABI(S, FT);
2656 if (!Info)
2657 CheckFailed("invalid name for a VFABI variant: " + S, V);
2658 }
2659
2660 if (auto A = Attrs.getFnAttr("modular-format"); A.isValid()) {
2661 StringRef S = A.getValueAsString();
2663 S.split(Args, ',');
2664 Check(Args.size() >= 5,
2665 "modular-format attribute requires at least 5 arguments", V);
2666 unsigned FirstArgIdx;
2667 Check(!Args[2].getAsInteger(10, FirstArgIdx),
2668 "modular-format attribute first arg index is not an integer", V);
2669 unsigned UpperBound = FT->getNumParams() + (FT->isVarArg() ? 1 : 0);
2670 Check(FirstArgIdx > 0 && FirstArgIdx <= UpperBound,
2671 "modular-format attribute first arg index is out of bounds", V);
2672 }
2673
2674 if (auto A = Attrs.getFnAttr("target-features"); A.isValid()) {
2675 StringRef S = A.getValueAsString();
2676 if (!S.empty()) {
2677 for (auto FeatureFlag : split(S, ',')) {
2678 if (FeatureFlag.empty())
2679 CheckFailed(
2680 "target-features attribute should not contain an empty string");
2681 else
2682 Check(FeatureFlag[0] == '+' || FeatureFlag[0] == '-',
2683 "target feature '" + FeatureFlag +
2684 "' must start with a '+' or '-'",
2685 V);
2686 }
2687 }
2688 }
2689}
2690void Verifier::verifyUnknownProfileMetadata(MDNode *MD) {
2691 Check(MD->getNumOperands() == 2,
2692 "'unknown' !prof should have a single additional operand", MD);
2693 auto *PassName = dyn_cast<MDString>(MD->getOperand(1));
2694 Check(PassName != nullptr,
2695 "'unknown' !prof should have an additional operand of type "
2696 "string");
2697 Check(!PassName->getString().empty(),
2698 "the 'unknown' !prof operand should not be an empty string");
2699}
2700
2701void Verifier::verifyFunctionMetadata(
2702 ArrayRef<std::pair<unsigned, MDNode *>> MDs) {
2703 for (const auto &Pair : MDs) {
2704 if (Pair.first == LLVMContext::MD_prof) {
2705 MDNode *MD = Pair.second;
2706 Check(MD->getNumOperands() >= 2,
2707 "!prof annotations should have no less than 2 operands", MD);
2708 // We may have functions that are synthesized by the compiler, e.g. in
2709 // WPD, that we can't currently determine the entry count.
2710 if (MD->getOperand(0).equalsStr(
2712 verifyUnknownProfileMetadata(MD);
2713 continue;
2714 }
2715
2716 // Check first operand.
2717 Check(MD->getOperand(0) != nullptr, "first operand should not be null",
2718 MD);
2720 "expected string with name of the !prof annotation", MD);
2721 MDString *MDS = cast<MDString>(MD->getOperand(0));
2722 StringRef ProfName = MDS->getString();
2725 "first operand should be 'function_entry_count'"
2726 " or 'synthetic_function_entry_count'",
2727 MD);
2728
2729 // Check second operand.
2730 Check(MD->getOperand(1) != nullptr, "second operand should not be null",
2731 MD);
2733 "expected integer argument to function_entry_count", MD);
2734 } else if (Pair.first == LLVMContext::MD_kcfi_type) {
2735 MDNode *MD = Pair.second;
2736 Check(MD->getNumOperands() == 1,
2737 "!kcfi_type must have exactly one operand", MD);
2738 Check(MD->getOperand(0) != nullptr, "!kcfi_type operand must not be null",
2739 MD);
2741 "expected a constant operand for !kcfi_type", MD);
2742 Constant *C = cast<ConstantAsMetadata>(MD->getOperand(0))->getValue();
2743 Check(isa<ConstantInt>(C) && isa<IntegerType>(C->getType()),
2744 "expected a constant integer operand for !kcfi_type", MD);
2746 "expected a 32-bit integer constant operand for !kcfi_type", MD);
2747 }
2748 }
2749}
2750
2751void Verifier::visitConstantExprsRecursively(const Constant *EntryC) {
2752 if (EntryC->getNumOperands() == 0)
2753 return;
2754
2755 if (!ConstantExprVisited.insert(EntryC).second)
2756 return;
2757
2759 Stack.push_back(EntryC);
2760
2761 while (!Stack.empty()) {
2762 const Constant *C = Stack.pop_back_val();
2763
2764 // Check this constant expression.
2765 if (const auto *CE = dyn_cast<ConstantExpr>(C))
2766 visitConstantExpr(CE);
2767
2768 if (const auto *CPA = dyn_cast<ConstantPtrAuth>(C))
2769 visitConstantPtrAuth(CPA);
2770
2771 if (const auto *GV = dyn_cast<GlobalValue>(C)) {
2772 // Global Values get visited separately, but we do need to make sure
2773 // that the global value is in the correct module
2774 Check(GV->getParent() == &M, "Referencing global in another module!",
2775 EntryC, &M, GV, GV->getParent());
2776 continue;
2777 }
2778
2779 // Visit all sub-expressions.
2780 for (const Use &U : C->operands()) {
2781 const auto *OpC = dyn_cast<Constant>(U);
2782 if (!OpC)
2783 continue;
2784 if (!ConstantExprVisited.insert(OpC).second)
2785 continue;
2786 Stack.push_back(OpC);
2787 }
2788 }
2789}
2790
2791void Verifier::visitConstantExpr(const ConstantExpr *CE) {
2792 if (CE->getOpcode() == Instruction::BitCast)
2793 Check(CastInst::castIsValid(Instruction::BitCast, CE->getOperand(0),
2794 CE->getType()),
2795 "Invalid bitcast", CE);
2796 else if (CE->getOpcode() == Instruction::PtrToAddr)
2797 checkPtrToAddr(CE->getOperand(0)->getType(), CE->getType(), *CE);
2798}
2799
2800void Verifier::visitConstantPtrAuth(const ConstantPtrAuth *CPA) {
2801 Check(CPA->getPointer()->getType()->isPointerTy(),
2802 "signed ptrauth constant base pointer must have pointer type");
2803
2804 Check(CPA->getType() == CPA->getPointer()->getType(),
2805 "signed ptrauth constant must have same type as its base pointer");
2806
2807 Check(CPA->getKey()->getBitWidth() == 32,
2808 "signed ptrauth constant key must be i32 constant integer");
2809
2811 "signed ptrauth constant address discriminator must be a pointer");
2812
2813 Check(CPA->getDiscriminator()->getBitWidth() == 64,
2814 "signed ptrauth constant discriminator must be i64 constant integer");
2815
2817 "signed ptrauth constant deactivation symbol must be a pointer");
2818
2821 "signed ptrauth constant deactivation symbol must be a global value "
2822 "or null");
2823}
2824
2825bool Verifier::verifyAttributeCount(AttributeList Attrs, unsigned Params) {
2826 // There shouldn't be more attribute sets than there are parameters plus the
2827 // function and return value.
2828 return Attrs.getNumAttrSets() <= Params + 2;
2829}
2830
2831void Verifier::verifyInlineAsmCall(const CallBase &Call) {
2832 const InlineAsm *IA = cast<InlineAsm>(Call.getCalledOperand());
2833 unsigned ArgNo = 0;
2834 unsigned LabelNo = 0;
2835 for (const InlineAsm::ConstraintInfo &CI : IA->ParseConstraints()) {
2836 if (CI.Type == InlineAsm::isLabel) {
2837 ++LabelNo;
2838 continue;
2839 }
2840
2841 // Only deal with constraints that correspond to call arguments.
2842 if (!CI.hasArg())
2843 continue;
2844
2845 if (CI.isIndirect) {
2846 const Value *Arg = Call.getArgOperand(ArgNo);
2847 Check(Arg->getType()->isPointerTy(),
2848 "Operand for indirect constraint must have pointer type", &Call);
2849
2851 "Operand for indirect constraint must have elementtype attribute",
2852 &Call);
2853 } else {
2854 Check(!Call.paramHasAttr(ArgNo, Attribute::ElementType),
2855 "Elementtype attribute can only be applied for indirect "
2856 "constraints",
2857 &Call);
2858 }
2859
2860 ArgNo++;
2861 }
2862
2863 if (auto *CallBr = dyn_cast<CallBrInst>(&Call)) {
2864 Check(LabelNo == CallBr->getNumIndirectDests(),
2865 "Number of label constraints does not match number of callbr dests",
2866 &Call);
2867 } else {
2868 Check(LabelNo == 0, "Label constraints can only be used with callbr",
2869 &Call);
2870 }
2871}
2872
2873/// Verify that statepoint intrinsic is well formed.
2874void Verifier::verifyStatepoint(const CallBase &Call) {
2875 assert(Call.getIntrinsicID() == Intrinsic::experimental_gc_statepoint);
2876
2879 "gc.statepoint must read and write all memory to preserve "
2880 "reordering restrictions required by safepoint semantics",
2881 Call);
2882
2883 const int64_t NumPatchBytes =
2884 cast<ConstantInt>(Call.getArgOperand(1))->getSExtValue();
2885 assert(isInt<32>(NumPatchBytes) && "NumPatchBytesV is an i32!");
2886 Check(NumPatchBytes >= 0,
2887 "gc.statepoint number of patchable bytes must be "
2888 "positive",
2889 Call);
2890
2891 Type *TargetElemType = Call.getParamElementType(2);
2892 Check(TargetElemType,
2893 "gc.statepoint callee argument must have elementtype attribute", Call);
2894 FunctionType *TargetFuncType = dyn_cast<FunctionType>(TargetElemType);
2895 Check(TargetFuncType,
2896 "gc.statepoint callee elementtype must be function type", Call);
2897
2898 const int NumCallArgs = cast<ConstantInt>(Call.getArgOperand(3))->getZExtValue();
2899 Check(NumCallArgs >= 0,
2900 "gc.statepoint number of arguments to underlying call "
2901 "must be positive",
2902 Call);
2903 const int NumParams = (int)TargetFuncType->getNumParams();
2904 if (TargetFuncType->isVarArg()) {
2905 Check(NumCallArgs >= NumParams,
2906 "gc.statepoint mismatch in number of vararg call args", Call);
2907
2908 // TODO: Remove this limitation
2909 Check(TargetFuncType->getReturnType()->isVoidTy(),
2910 "gc.statepoint doesn't support wrapping non-void "
2911 "vararg functions yet",
2912 Call);
2913 } else
2914 Check(NumCallArgs == NumParams,
2915 "gc.statepoint mismatch in number of call args", Call);
2916
2917 const uint64_t Flags
2918 = cast<ConstantInt>(Call.getArgOperand(4))->getZExtValue();
2919 Check((Flags & ~(uint64_t)StatepointFlags::MaskAll) == 0,
2920 "unknown flag used in gc.statepoint flags argument", Call);
2921
2922 // Verify that the types of the call parameter arguments match
2923 // the type of the wrapped callee.
2924 AttributeList Attrs = Call.getAttributes();
2925 for (int i = 0; i < NumParams; i++) {
2926 Type *ParamType = TargetFuncType->getParamType(i);
2927 Type *ArgType = Call.getArgOperand(5 + i)->getType();
2928 Check(ArgType == ParamType,
2929 "gc.statepoint call argument does not match wrapped "
2930 "function type",
2931 Call);
2932
2933 if (TargetFuncType->isVarArg()) {
2934 AttributeSet ArgAttrs = Attrs.getParamAttrs(5 + i);
2935 Check(!ArgAttrs.hasAttribute(Attribute::StructRet),
2936 "Attribute 'sret' cannot be used for vararg call arguments!", Call);
2937 }
2938 }
2939
2940 const int EndCallArgsInx = 4 + NumCallArgs;
2941
2942 const Value *NumTransitionArgsV = Call.getArgOperand(EndCallArgsInx + 1);
2943 Check(isa<ConstantInt>(NumTransitionArgsV),
2944 "gc.statepoint number of transition arguments "
2945 "must be constant integer",
2946 Call);
2947 const int NumTransitionArgs =
2948 cast<ConstantInt>(NumTransitionArgsV)->getZExtValue();
2949 Check(NumTransitionArgs == 0,
2950 "gc.statepoint w/inline transition bundle is deprecated", Call);
2951 const int EndTransitionArgsInx = EndCallArgsInx + 1 + NumTransitionArgs;
2952
2953 const Value *NumDeoptArgsV = Call.getArgOperand(EndTransitionArgsInx + 1);
2954 Check(isa<ConstantInt>(NumDeoptArgsV),
2955 "gc.statepoint number of deoptimization arguments "
2956 "must be constant integer",
2957 Call);
2958 const int NumDeoptArgs = cast<ConstantInt>(NumDeoptArgsV)->getZExtValue();
2959 Check(NumDeoptArgs == 0,
2960 "gc.statepoint w/inline deopt operands is deprecated", Call);
2961
2962 const int ExpectedNumArgs = 7 + NumCallArgs;
2963 Check(ExpectedNumArgs == (int)Call.arg_size(),
2964 "gc.statepoint too many arguments", Call);
2965
2966 // Check that the only uses of this gc.statepoint are gc.result or
2967 // gc.relocate calls which are tied to this statepoint and thus part
2968 // of the same statepoint sequence
2969 for (const User *U : Call.users()) {
2970 const CallInst *UserCall = dyn_cast<const CallInst>(U);
2971 Check(UserCall, "illegal use of statepoint token", Call, U);
2972 if (!UserCall)
2973 continue;
2974 Check(isa<GCRelocateInst>(UserCall) || isa<GCResultInst>(UserCall),
2975 "gc.result or gc.relocate are the only value uses "
2976 "of a gc.statepoint",
2977 Call, U);
2978 if (isa<GCResultInst>(UserCall)) {
2979 Check(UserCall->getArgOperand(0) == &Call,
2980 "gc.result connected to wrong gc.statepoint", Call, UserCall);
2981 } else if (isa<GCRelocateInst>(Call)) {
2982 Check(UserCall->getArgOperand(0) == &Call,
2983 "gc.relocate connected to wrong gc.statepoint", Call, UserCall);
2984 }
2985 }
2986
2987 // Note: It is legal for a single derived pointer to be listed multiple
2988 // times. It's non-optimal, but it is legal. It can also happen after
2989 // insertion if we strip a bitcast away.
2990 // Note: It is really tempting to check that each base is relocated and
2991 // that a derived pointer is never reused as a base pointer. This turns
2992 // out to be problematic since optimizations run after safepoint insertion
2993 // can recognize equality properties that the insertion logic doesn't know
2994 // about. See example statepoint.ll in the verifier subdirectory
2995}
2996
2997void Verifier::verifyFrameRecoverIndices() {
2998 for (auto &Counts : FrameEscapeInfo) {
2999 Function *F = Counts.first;
3000 unsigned EscapedObjectCount = Counts.second.first;
3001 unsigned MaxRecoveredIndex = Counts.second.second;
3002 Check(MaxRecoveredIndex <= EscapedObjectCount,
3003 "all indices passed to llvm.localrecover must be less than the "
3004 "number of arguments passed to llvm.localescape in the parent "
3005 "function",
3006 F);
3007 }
3008}
3009
3010static Instruction *getSuccPad(Instruction *Terminator) {
3011 BasicBlock *UnwindDest;
3012 if (auto *II = dyn_cast<InvokeInst>(Terminator))
3013 UnwindDest = II->getUnwindDest();
3014 else if (auto *CSI = dyn_cast<CatchSwitchInst>(Terminator))
3015 UnwindDest = CSI->getUnwindDest();
3016 else
3017 UnwindDest = cast<CleanupReturnInst>(Terminator)->getUnwindDest();
3018 return &*UnwindDest->getFirstNonPHIIt();
3019}
3020
3021void Verifier::verifySiblingFuncletUnwinds() {
3022 llvm::TimeTraceScope timeScope("Verifier verify sibling funclet unwinds");
3023 SmallPtrSet<Instruction *, 8> Visited;
3024 SmallPtrSet<Instruction *, 8> Active;
3025 for (const auto &Pair : SiblingFuncletInfo) {
3026 Instruction *PredPad = Pair.first;
3027 if (Visited.count(PredPad))
3028 continue;
3029 Active.insert(PredPad);
3030 Instruction *Terminator = Pair.second;
3031 do {
3032 Instruction *SuccPad = getSuccPad(Terminator);
3033 if (Active.count(SuccPad)) {
3034 // Found a cycle; report error
3035 Instruction *CyclePad = SuccPad;
3036 SmallVector<Instruction *, 8> CycleNodes;
3037 do {
3038 CycleNodes.push_back(CyclePad);
3039 Instruction *CycleTerminator = SiblingFuncletInfo[CyclePad];
3040 if (CycleTerminator != CyclePad)
3041 CycleNodes.push_back(CycleTerminator);
3042 CyclePad = getSuccPad(CycleTerminator);
3043 } while (CyclePad != SuccPad);
3044 Check(false, "EH pads can't handle each other's exceptions",
3045 ArrayRef<Instruction *>(CycleNodes));
3046 }
3047 // Don't re-walk a node we've already checked
3048 if (!Visited.insert(SuccPad).second)
3049 break;
3050 // Walk to this successor if it has a map entry.
3051 PredPad = SuccPad;
3052 auto TermI = SiblingFuncletInfo.find(PredPad);
3053 if (TermI == SiblingFuncletInfo.end())
3054 break;
3055 Terminator = TermI->second;
3056 Active.insert(PredPad);
3057 } while (true);
3058 // Each node only has one successor, so we've walked all the active
3059 // nodes' successors.
3060 Active.clear();
3061 }
3062}
3063
3064// visitFunction - Verify that a function is ok.
3065//
3066void Verifier::visitFunction(const Function &F) {
3067 visitGlobalValue(F);
3068
3069 // Check function arguments.
3070 FunctionType *FT = F.getFunctionType();
3071 unsigned NumArgs = F.arg_size();
3072
3073 Check(&Context == &F.getContext(),
3074 "Function context does not match Module context!", &F);
3075
3076 Check(!F.hasCommonLinkage(), "Functions may not have common linkage", &F);
3077 Check(FT->getNumParams() == NumArgs,
3078 "# formal arguments must match # of arguments for function type!", &F,
3079 FT);
3080 Check(F.getReturnType()->isFirstClassType() ||
3081 F.getReturnType()->isVoidTy() || F.getReturnType()->isStructTy(),
3082 "Functions cannot return aggregate values!", &F);
3083
3084 Check(!F.hasStructRetAttr() || F.getReturnType()->isVoidTy(),
3085 "Invalid struct return type!", &F);
3086
3087 if (MaybeAlign A = F.getAlign()) {
3088 Check(A->value() <= Value::MaximumAlignment,
3089 "huge alignment values are unsupported", &F);
3090 }
3091
3092 AttributeList Attrs = F.getAttributes();
3093
3094 Check(verifyAttributeCount(Attrs, FT->getNumParams()),
3095 "Attribute after last parameter!", &F);
3096
3097 bool IsIntrinsic = F.isIntrinsic();
3098
3099 // Check function attributes.
3100 verifyFunctionAttrs(FT, Attrs, &F, IsIntrinsic, /* IsInlineAsm */ false);
3101
3102 // On function declarations/definitions, we do not support the builtin
3103 // attribute. We do not check this in VerifyFunctionAttrs since that is
3104 // checking for Attributes that can/can not ever be on functions.
3105 Check(!Attrs.hasFnAttr(Attribute::Builtin),
3106 "Attribute 'builtin' can only be applied to a callsite.", &F);
3107
3108 Check(!Attrs.hasAttrSomewhere(Attribute::ElementType),
3109 "Attribute 'elementtype' can only be applied to a callsite.", &F);
3110
3111 Check(!Attrs.hasFnAttr("aarch64_zt0_undef"),
3112 "Attribute 'aarch64_zt0_undef' can only be applied to a callsite.");
3113
3114 if (Attrs.hasFnAttr(Attribute::Naked))
3115 for (const Argument &Arg : F.args())
3116 Check(Arg.use_empty(), "cannot use argument of naked function", &Arg);
3117
3118 // Check that this function meets the restrictions on this calling convention.
3119 // Sometimes varargs is used for perfectly forwarding thunks, so some of these
3120 // restrictions can be lifted.
3121 switch (F.getCallingConv()) {
3122 default:
3123 case CallingConv::C:
3124 break;
3125 case CallingConv::X86_INTR: {
3126 Check(F.arg_empty() || Attrs.hasParamAttr(0, Attribute::ByVal),
3127 "Calling convention parameter requires byval", &F);
3128 break;
3129 }
3130 case CallingConv::AMDGPU_KERNEL:
3131 case CallingConv::SPIR_KERNEL:
3132 case CallingConv::AMDGPU_CS_Chain:
3133 case CallingConv::AMDGPU_CS_ChainPreserve:
3134 Check(F.getReturnType()->isVoidTy(),
3135 "Calling convention requires void return type", &F);
3136 [[fallthrough]];
3137 case CallingConv::AMDGPU_VS:
3138 case CallingConv::AMDGPU_HS:
3139 case CallingConv::AMDGPU_GS:
3140 case CallingConv::AMDGPU_PS:
3141 case CallingConv::AMDGPU_CS:
3142 Check(!F.hasStructRetAttr(), "Calling convention does not allow sret", &F);
3143 if (F.getCallingConv() != CallingConv::SPIR_KERNEL) {
3144 const unsigned StackAS = DL.getAllocaAddrSpace();
3145 unsigned i = 0;
3146 for (const Argument &Arg : F.args()) {
3147 Check(!Attrs.hasParamAttr(i, Attribute::ByVal),
3148 "Calling convention disallows byval", &F);
3149 Check(!Attrs.hasParamAttr(i, Attribute::Preallocated),
3150 "Calling convention disallows preallocated", &F);
3151 Check(!Attrs.hasParamAttr(i, Attribute::InAlloca),
3152 "Calling convention disallows inalloca", &F);
3153
3154 if (Attrs.hasParamAttr(i, Attribute::ByRef)) {
3155 // FIXME: Should also disallow LDS and GDS, but we don't have the enum
3156 // value here.
3157 Check(Arg.getType()->getPointerAddressSpace() != StackAS,
3158 "Calling convention disallows stack byref", &F);
3159 }
3160
3161 ++i;
3162 }
3163 }
3164
3165 [[fallthrough]];
3166 case CallingConv::Fast:
3167 case CallingConv::Cold:
3168 case CallingConv::Intel_OCL_BI:
3169 case CallingConv::PTX_Kernel:
3170 case CallingConv::PTX_Device:
3171 Check(!F.isVarArg(),
3172 "Calling convention does not support varargs or "
3173 "perfect forwarding!",
3174 &F);
3175 break;
3176 case CallingConv::AMDGPU_Gfx_WholeWave:
3177 Check(!F.arg_empty() && F.arg_begin()->getType()->isIntegerTy(1),
3178 "Calling convention requires first argument to be i1", &F);
3179 Check(!F.arg_begin()->hasInRegAttr(),
3180 "Calling convention requires first argument to not be inreg", &F);
3181 Check(!F.isVarArg(),
3182 "Calling convention does not support varargs or "
3183 "perfect forwarding!",
3184 &F);
3185 break;
3186 }
3187
3188 // Check that the argument values match the function type for this function...
3189 unsigned i = 0;
3190 for (const Argument &Arg : F.args()) {
3191 Check(Arg.getType() == FT->getParamType(i),
3192 "Argument value does not match function argument type!", &Arg,
3193 FT->getParamType(i));
3194 Check(Arg.getType()->isFirstClassType(),
3195 "Function arguments must have first-class types!", &Arg);
3196 if (!IsIntrinsic) {
3197 Check(!Arg.getType()->isMetadataTy(),
3198 "Function takes metadata but isn't an intrinsic", &Arg, &F);
3199 Check(!Arg.getType()->isTokenLikeTy(),
3200 "Function takes token but isn't an intrinsic", &Arg, &F);
3201 Check(!Arg.getType()->isX86_AMXTy(),
3202 "Function takes x86_amx but isn't an intrinsic", &Arg, &F);
3203 }
3204
3205 // Check that swifterror argument is only used by loads and stores.
3206 if (Attrs.hasParamAttr(i, Attribute::SwiftError)) {
3207 verifySwiftErrorValue(&Arg);
3208 }
3209 ++i;
3210 }
3211
3212 if (!IsIntrinsic) {
3213 Check(!F.getReturnType()->isTokenLikeTy(),
3214 "Function returns a token but isn't an intrinsic", &F);
3215 Check(!F.getReturnType()->isX86_AMXTy(),
3216 "Function returns a x86_amx but isn't an intrinsic", &F);
3217 }
3218
3219 // Get the function metadata attachments.
3221 F.getAllMetadata(MDs);
3222 assert(F.hasMetadata() != MDs.empty() && "Bit out-of-sync");
3223 verifyFunctionMetadata(MDs);
3224
3225 // Check validity of the personality function
3226 if (F.hasPersonalityFn()) {
3227 auto *Per = dyn_cast<Function>(F.getPersonalityFn()->stripPointerCasts());
3228 if (Per)
3229 Check(Per->getParent() == F.getParent(),
3230 "Referencing personality function in another module!", &F,
3231 F.getParent(), Per, Per->getParent());
3232 }
3233
3234 // EH funclet coloring can be expensive, recompute on-demand
3235 BlockEHFuncletColors.clear();
3236
3237 if (F.isMaterializable()) {
3238 // Function has a body somewhere we can't see.
3239 Check(MDs.empty(), "unmaterialized function cannot have metadata", &F,
3240 MDs.empty() ? nullptr : MDs.front().second);
3241 } else if (F.isDeclaration()) {
3242 for (const auto &I : MDs) {
3243 // This is used for call site debug information.
3244 CheckDI(I.first != LLVMContext::MD_dbg ||
3245 !cast<DISubprogram>(I.second)->isDistinct(),
3246 "function declaration may only have a unique !dbg attachment",
3247 &F);
3248 Check(I.first != LLVMContext::MD_prof,
3249 "function declaration may not have a !prof attachment", &F);
3250
3251 // Verify the metadata itself.
3252 visitMDNode(*I.second, AreDebugLocsAllowed::Yes);
3253 }
3254 Check(!F.hasPersonalityFn(),
3255 "Function declaration shouldn't have a personality routine", &F);
3256 } else {
3257 // Verify that this function (which has a body) is not named "llvm.*". It
3258 // is not legal to define intrinsics.
3259 Check(!IsIntrinsic, "llvm intrinsics cannot be defined!", &F);
3260
3261 // Check the entry node
3262 const BasicBlock *Entry = &F.getEntryBlock();
3263 Check(pred_empty(Entry),
3264 "Entry block to function must not have predecessors!", Entry);
3265
3266 // The address of the entry block cannot be taken, unless it is dead.
3267 if (Entry->hasAddressTaken()) {
3268 Check(!BlockAddress::lookup(Entry)->isConstantUsed(),
3269 "blockaddress may not be used with the entry block!", Entry);
3270 }
3271
3272 unsigned NumDebugAttachments = 0, NumProfAttachments = 0,
3273 NumKCFIAttachments = 0;
3274 // Visit metadata attachments.
3275 for (const auto &I : MDs) {
3276 // Verify that the attachment is legal.
3277 auto AllowLocs = AreDebugLocsAllowed::No;
3278 switch (I.first) {
3279 default:
3280 break;
3281 case LLVMContext::MD_dbg: {
3282 ++NumDebugAttachments;
3283 CheckDI(NumDebugAttachments == 1,
3284 "function must have a single !dbg attachment", &F, I.second);
3285 CheckDI(isa<DISubprogram>(I.second),
3286 "function !dbg attachment must be a subprogram", &F, I.second);
3287 CheckDI(cast<DISubprogram>(I.second)->isDistinct(),
3288 "function definition may only have a distinct !dbg attachment",
3289 &F);
3290
3291 auto *SP = cast<DISubprogram>(I.second);
3292 const Function *&AttachedTo = DISubprogramAttachments[SP];
3293 CheckDI(!AttachedTo || AttachedTo == &F,
3294 "DISubprogram attached to more than one function", SP, &F);
3295 AttachedTo = &F;
3296 AllowLocs = AreDebugLocsAllowed::Yes;
3297 break;
3298 }
3299 case LLVMContext::MD_prof:
3300 ++NumProfAttachments;
3301 Check(NumProfAttachments == 1,
3302 "function must have a single !prof attachment", &F, I.second);
3303 break;
3304 case LLVMContext::MD_kcfi_type:
3305 ++NumKCFIAttachments;
3306 Check(NumKCFIAttachments == 1,
3307 "function must have a single !kcfi_type attachment", &F,
3308 I.second);
3309 break;
3310 }
3311
3312 // Verify the metadata itself.
3313 visitMDNode(*I.second, AllowLocs);
3314 }
3315 }
3316
3317 // If this function is actually an intrinsic, verify that it is only used in
3318 // direct call/invokes, never having its "address taken".
3319 // Only do this if the module is materialized, otherwise we don't have all the
3320 // uses.
3321 if (F.isIntrinsic() && F.getParent()->isMaterialized()) {
3322 const User *U;
3323 if (F.hasAddressTaken(&U, false, true, false,
3324 /*IgnoreARCAttachedCall=*/true))
3325 Check(false, "Invalid user of intrinsic instruction!", U);
3326 }
3327
3328 // Check intrinsics' signatures.
3329 switch (F.getIntrinsicID()) {
3330 case Intrinsic::experimental_gc_get_pointer_base: {
3331 FunctionType *FT = F.getFunctionType();
3332 Check(FT->getNumParams() == 1, "wrong number of parameters", F);
3333 Check(isa<PointerType>(F.getReturnType()),
3334 "gc.get.pointer.base must return a pointer", F);
3335 Check(FT->getParamType(0) == F.getReturnType(),
3336 "gc.get.pointer.base operand and result must be of the same type", F);
3337 break;
3338 }
3339 case Intrinsic::experimental_gc_get_pointer_offset: {
3340 FunctionType *FT = F.getFunctionType();
3341 Check(FT->getNumParams() == 1, "wrong number of parameters", F);
3342 Check(isa<PointerType>(FT->getParamType(0)),
3343 "gc.get.pointer.offset operand must be a pointer", F);
3344 Check(F.getReturnType()->isIntegerTy(),
3345 "gc.get.pointer.offset must return integer", F);
3346 break;
3347 }
3348 }
3349
3350 auto *N = F.getSubprogram();
3351 HasDebugInfo = (N != nullptr);
3352 if (!HasDebugInfo)
3353 return;
3354
3355 // Check that all !dbg attachments lead to back to N.
3356 //
3357 // FIXME: Check this incrementally while visiting !dbg attachments.
3358 // FIXME: Only check when N is the canonical subprogram for F.
3359 SmallPtrSet<const MDNode *, 32> Seen;
3360 auto VisitDebugLoc = [&](const Instruction &I, const MDNode *Node) {
3361 // Be careful about using DILocation here since we might be dealing with
3362 // broken code (this is the Verifier after all).
3363 const DILocation *DL = dyn_cast_or_null<DILocation>(Node);
3364 if (!DL)
3365 return;
3366 if (!Seen.insert(DL).second)
3367 return;
3368
3369 Metadata *Parent = DL->getRawScope();
3370 CheckDI(Parent && isa<DILocalScope>(Parent),
3371 "DILocation's scope must be a DILocalScope", N, &F, &I, DL, Parent);
3372
3373 DILocalScope *Scope = DL->getInlinedAtScope();
3374 Check(Scope, "Failed to find DILocalScope", DL);
3375
3376 if (!Seen.insert(Scope).second)
3377 return;
3378
3379 DISubprogram *SP = Scope->getSubprogram();
3380
3381 // Scope and SP could be the same MDNode and we don't want to skip
3382 // validation in that case
3383 if ((Scope != SP) && !Seen.insert(SP).second)
3384 return;
3385
3386 CheckDI(SP->describes(&F),
3387 "!dbg attachment points at wrong subprogram for function", N, &F,
3388 &I, DL, Scope, SP);
3389 };
3390 for (auto &BB : F)
3391 for (auto &I : BB) {
3392 VisitDebugLoc(I, I.getDebugLoc().getAsMDNode());
3393 // The llvm.loop annotations also contain two DILocations.
3394 if (auto MD = I.getMetadata(LLVMContext::MD_loop))
3395 for (unsigned i = 1; i < MD->getNumOperands(); ++i)
3396 VisitDebugLoc(I, dyn_cast_or_null<MDNode>(MD->getOperand(i)));
3397 if (BrokenDebugInfo)
3398 return;
3399 }
3400}
3401
3402// verifyBasicBlock - Verify that a basic block is well formed...
3403//
3404void Verifier::visitBasicBlock(BasicBlock &BB) {
3405 InstsInThisBlock.clear();
3406 ConvergenceVerifyHelper.visit(BB);
3407
3408 // Ensure that basic blocks have terminators!
3409 Check(BB.getTerminator(), "Basic Block does not have terminator!", &BB);
3410
3411 // Check constraints that this basic block imposes on all of the PHI nodes in
3412 // it.
3413 if (isa<PHINode>(BB.front())) {
3414 SmallVector<BasicBlock *, 8> Preds(predecessors(&BB));
3416 llvm::sort(Preds);
3417 for (const PHINode &PN : BB.phis()) {
3418 Check(PN.getNumIncomingValues() == Preds.size(),
3419 "PHINode should have one entry for each predecessor of its "
3420 "parent basic block!",
3421 &PN);
3422
3423 // Get and sort all incoming values in the PHI node...
3424 Values.clear();
3425 Values.reserve(PN.getNumIncomingValues());
3426 for (unsigned i = 0, e = PN.getNumIncomingValues(); i != e; ++i)
3427 Values.push_back(
3428 std::make_pair(PN.getIncomingBlock(i), PN.getIncomingValue(i)));
3429 llvm::sort(Values);
3430
3431 for (unsigned i = 0, e = Values.size(); i != e; ++i) {
3432 // Check to make sure that if there is more than one entry for a
3433 // particular basic block in this PHI node, that the incoming values are
3434 // all identical.
3435 //
3436 Check(i == 0 || Values[i].first != Values[i - 1].first ||
3437 Values[i].second == Values[i - 1].second,
3438 "PHI node has multiple entries for the same basic block with "
3439 "different incoming values!",
3440 &PN, Values[i].first, Values[i].second, Values[i - 1].second);
3441
3442 // Check to make sure that the predecessors and PHI node entries are
3443 // matched up.
3444 Check(Values[i].first == Preds[i],
3445 "PHI node entries do not match predecessors!", &PN,
3446 Values[i].first, Preds[i]);
3447 }
3448 }
3449 }
3450
3451 // Check that all instructions have their parent pointers set up correctly.
3452 for (auto &I : BB)
3453 {
3454 Check(I.getParent() == &BB, "Instruction has bogus parent pointer!");
3455 }
3456
3457 // Confirm that no issues arise from the debug program.
3458 CheckDI(!BB.getTrailingDbgRecords(), "Basic Block has trailing DbgRecords!",
3459 &BB);
3460}
3461
3462void Verifier::visitTerminator(Instruction &I) {
3463 // Ensure that terminators only exist at the end of the basic block.
3464 Check(&I == I.getParent()->getTerminator(),
3465 "Terminator found in the middle of a basic block!", I.getParent());
3466 visitInstruction(I);
3467}
3468
3469void Verifier::visitCondBrInst(CondBrInst &BI) {
3471 "Branch condition is not 'i1' type!", &BI, BI.getCondition());
3472 visitTerminator(BI);
3473}
3474
3475void Verifier::visitReturnInst(ReturnInst &RI) {
3476 Function *F = RI.getParent()->getParent();
3477 unsigned N = RI.getNumOperands();
3478 if (F->getReturnType()->isVoidTy())
3479 Check(N == 0,
3480 "Found return instr that returns non-void in Function of void "
3481 "return type!",
3482 &RI, F->getReturnType());
3483 else
3484 Check(N == 1 && F->getReturnType() == RI.getOperand(0)->getType(),
3485 "Function return type does not match operand "
3486 "type of return inst!",
3487 &RI, F->getReturnType());
3488
3489 // Check to make sure that the return value has necessary properties for
3490 // terminators...
3491 visitTerminator(RI);
3492}
3493
3494void Verifier::visitSwitchInst(SwitchInst &SI) {
3495 Check(SI.getType()->isVoidTy(), "Switch must have void result type!", &SI);
3496 // Check to make sure that all of the constants in the switch instruction
3497 // have the same type as the switched-on value.
3498 Type *SwitchTy = SI.getCondition()->getType();
3499 SmallPtrSet<ConstantInt*, 32> Constants;
3500 for (auto &Case : SI.cases()) {
3501 Check(isa<ConstantInt>(Case.getCaseValue()),
3502 "Case value is not a constant integer.", &SI);
3503 Check(Case.getCaseValue()->getType() == SwitchTy,
3504 "Switch constants must all be same type as switch value!", &SI);
3505 Check(Constants.insert(Case.getCaseValue()).second,
3506 "Duplicate integer as switch case", &SI, Case.getCaseValue());
3507 }
3508
3509 visitTerminator(SI);
3510}
3511
3512void Verifier::visitIndirectBrInst(IndirectBrInst &BI) {
3514 "Indirectbr operand must have pointer type!", &BI);
3515 for (unsigned i = 0, e = BI.getNumDestinations(); i != e; ++i)
3517 "Indirectbr destinations must all have pointer type!", &BI);
3518
3519 visitTerminator(BI);
3520}
3521
3522void Verifier::visitCallBrInst(CallBrInst &CBI) {
3523 if (!CBI.isInlineAsm()) {
3525 "Callbr: indirect function / invalid signature");
3526 Check(!CBI.hasOperandBundles(),
3527 "Callbr for intrinsics currently doesn't support operand bundles");
3528
3529 switch (CBI.getIntrinsicID()) {
3530 case Intrinsic::amdgcn_kill: {
3531 Check(CBI.getNumIndirectDests() == 1,
3532 "Callbr amdgcn_kill only supports one indirect dest");
3533 bool Unreachable = isa<UnreachableInst>(CBI.getIndirectDest(0)->begin());
3534 CallInst *Call = dyn_cast<CallInst>(CBI.getIndirectDest(0)->begin());
3535 Check(Unreachable || (Call && Call->getIntrinsicID() ==
3536 Intrinsic::amdgcn_unreachable),
3537 "Callbr amdgcn_kill indirect dest needs to be unreachable");
3538 break;
3539 }
3540 default:
3541 CheckFailed(
3542 "Callbr currently only supports asm-goto and selected intrinsics");
3543 }
3544 visitIntrinsicCall(CBI.getIntrinsicID(), CBI);
3545 } else {
3546 const InlineAsm *IA = cast<InlineAsm>(CBI.getCalledOperand());
3547 Check(!IA->canThrow(), "Unwinding from Callbr is not allowed");
3548
3549 verifyInlineAsmCall(CBI);
3550 }
3551 visitTerminator(CBI);
3552}
3553
3554void Verifier::visitSelectInst(SelectInst &SI) {
3555 Check(!SelectInst::areInvalidOperands(SI.getOperand(0), SI.getOperand(1),
3556 SI.getOperand(2)),
3557 "Invalid operands for select instruction!", &SI);
3558
3559 Check(SI.getTrueValue()->getType() == SI.getType(),
3560 "Select values must have same type as select instruction!", &SI);
3561 visitInstruction(SI);
3562}
3563
3564/// visitUserOp1 - User defined operators shouldn't live beyond the lifetime of
3565/// a pass, if any exist, it's an error.
3566///
3567void Verifier::visitUserOp1(Instruction &I) {
3568 Check(false, "User-defined operators should not live outside of a pass!", &I);
3569}
3570
3571void Verifier::visitTruncInst(TruncInst &I) {
3572 // Get the source and destination types
3573 Type *SrcTy = I.getOperand(0)->getType();
3574 Type *DestTy = I.getType();
3575
3576 // Get the size of the types in bits, we'll need this later
3577 unsigned SrcBitSize = SrcTy->getScalarSizeInBits();
3578 unsigned DestBitSize = DestTy->getScalarSizeInBits();
3579
3580 Check(SrcTy->isIntOrIntVectorTy(), "Trunc only operates on integer", &I);
3581 Check(DestTy->isIntOrIntVectorTy(), "Trunc only produces integer", &I);
3582 Check(SrcTy->isVectorTy() == DestTy->isVectorTy(),
3583 "trunc source and destination must both be a vector or neither", &I);
3584 Check(SrcBitSize > DestBitSize, "DestTy too big for Trunc", &I);
3585
3586 visitInstruction(I);
3587}
3588
3589void Verifier::visitZExtInst(ZExtInst &I) {
3590 // Get the source and destination types
3591 Type *SrcTy = I.getOperand(0)->getType();
3592 Type *DestTy = I.getType();
3593
3594 // Get the size of the types in bits, we'll need this later
3595 Check(SrcTy->isIntOrIntVectorTy(), "ZExt only operates on integer", &I);
3596 Check(DestTy->isIntOrIntVectorTy(), "ZExt only produces an integer", &I);
3597 Check(SrcTy->isVectorTy() == DestTy->isVectorTy(),
3598 "zext source and destination must both be a vector or neither", &I);
3599 unsigned SrcBitSize = SrcTy->getScalarSizeInBits();
3600 unsigned DestBitSize = DestTy->getScalarSizeInBits();
3601
3602 Check(SrcBitSize < DestBitSize, "Type too small for ZExt", &I);
3603
3604 visitInstruction(I);
3605}
3606
3607void Verifier::visitSExtInst(SExtInst &I) {
3608 // Get the source and destination types
3609 Type *SrcTy = I.getOperand(0)->getType();
3610 Type *DestTy = I.getType();
3611
3612 // Get the size of the types in bits, we'll need this later
3613 unsigned SrcBitSize = SrcTy->getScalarSizeInBits();
3614 unsigned DestBitSize = DestTy->getScalarSizeInBits();
3615
3616 Check(SrcTy->isIntOrIntVectorTy(), "SExt only operates on integer", &I);
3617 Check(DestTy->isIntOrIntVectorTy(), "SExt only produces an integer", &I);
3618 Check(SrcTy->isVectorTy() == DestTy->isVectorTy(),
3619 "sext source and destination must both be a vector or neither", &I);
3620 Check(SrcBitSize < DestBitSize, "Type too small for SExt", &I);
3621
3622 visitInstruction(I);
3623}
3624
3625void Verifier::visitFPTruncInst(FPTruncInst &I) {
3626 // Get the source and destination types
3627 Type *SrcTy = I.getOperand(0)->getType();
3628 Type *DestTy = I.getType();
3629 // Get the size of the types in bits, we'll need this later
3630 unsigned SrcBitSize = SrcTy->getScalarSizeInBits();
3631 unsigned DestBitSize = DestTy->getScalarSizeInBits();
3632
3633 Check(SrcTy->isFPOrFPVectorTy(), "FPTrunc only operates on FP", &I);
3634 Check(DestTy->isFPOrFPVectorTy(), "FPTrunc only produces an FP", &I);
3635 Check(SrcTy->isVectorTy() == DestTy->isVectorTy(),
3636 "fptrunc source and destination must both be a vector or neither", &I);
3637 Check(SrcBitSize > DestBitSize, "DestTy too big for FPTrunc", &I);
3638
3639 visitInstruction(I);
3640}
3641
3642void Verifier::visitFPExtInst(FPExtInst &I) {
3643 // Get the source and destination types
3644 Type *SrcTy = I.getOperand(0)->getType();
3645 Type *DestTy = I.getType();
3646
3647 // Get the size of the types in bits, we'll need this later
3648 unsigned SrcBitSize = SrcTy->getScalarSizeInBits();
3649 unsigned DestBitSize = DestTy->getScalarSizeInBits();
3650
3651 Check(SrcTy->isFPOrFPVectorTy(), "FPExt only operates on FP", &I);
3652 Check(DestTy->isFPOrFPVectorTy(), "FPExt only produces an FP", &I);
3653 Check(SrcTy->isVectorTy() == DestTy->isVectorTy(),
3654 "fpext source and destination must both be a vector or neither", &I);
3655 Check(SrcBitSize < DestBitSize, "DestTy too small for FPExt", &I);
3656
3657 visitInstruction(I);
3658}
3659
3660void Verifier::visitUIToFPInst(UIToFPInst &I) {
3661 // Get the source and destination types
3662 Type *SrcTy = I.getOperand(0)->getType();
3663 Type *DestTy = I.getType();
3664
3665 bool SrcVec = SrcTy->isVectorTy();
3666 bool DstVec = DestTy->isVectorTy();
3667
3668 Check(SrcVec == DstVec,
3669 "UIToFP source and dest must both be vector or scalar", &I);
3670 Check(SrcTy->isIntOrIntVectorTy(),
3671 "UIToFP source must be integer or integer vector", &I);
3672 Check(DestTy->isFPOrFPVectorTy(), "UIToFP result must be FP or FP vector",
3673 &I);
3674
3675 if (SrcVec && DstVec)
3676 Check(cast<VectorType>(SrcTy)->getElementCount() ==
3677 cast<VectorType>(DestTy)->getElementCount(),
3678 "UIToFP source and dest vector length mismatch", &I);
3679
3680 visitInstruction(I);
3681}
3682
3683void Verifier::visitSIToFPInst(SIToFPInst &I) {
3684 // Get the source and destination types
3685 Type *SrcTy = I.getOperand(0)->getType();
3686 Type *DestTy = I.getType();
3687
3688 bool SrcVec = SrcTy->isVectorTy();
3689 bool DstVec = DestTy->isVectorTy();
3690
3691 Check(SrcVec == DstVec,
3692 "SIToFP source and dest must both be vector or scalar", &I);
3693 Check(SrcTy->isIntOrIntVectorTy(),
3694 "SIToFP source must be integer or integer vector", &I);
3695 Check(DestTy->isFPOrFPVectorTy(), "SIToFP result must be FP or FP vector",
3696 &I);
3697
3698 if (SrcVec && DstVec)
3699 Check(cast<VectorType>(SrcTy)->getElementCount() ==
3700 cast<VectorType>(DestTy)->getElementCount(),
3701 "SIToFP source and dest vector length mismatch", &I);
3702
3703 visitInstruction(I);
3704}
3705
3706void Verifier::visitFPToUIInst(FPToUIInst &I) {
3707 // Get the source and destination types
3708 Type *SrcTy = I.getOperand(0)->getType();
3709 Type *DestTy = I.getType();
3710
3711 bool SrcVec = SrcTy->isVectorTy();
3712 bool DstVec = DestTy->isVectorTy();
3713
3714 Check(SrcVec == DstVec,
3715 "FPToUI source and dest must both be vector or scalar", &I);
3716 Check(SrcTy->isFPOrFPVectorTy(), "FPToUI source must be FP or FP vector", &I);
3717 Check(DestTy->isIntOrIntVectorTy(),
3718 "FPToUI result must be integer or integer vector", &I);
3719
3720 if (SrcVec && DstVec)
3721 Check(cast<VectorType>(SrcTy)->getElementCount() ==
3722 cast<VectorType>(DestTy)->getElementCount(),
3723 "FPToUI source and dest vector length mismatch", &I);
3724
3725 visitInstruction(I);
3726}
3727
3728void Verifier::visitFPToSIInst(FPToSIInst &I) {
3729 // Get the source and destination types
3730 Type *SrcTy = I.getOperand(0)->getType();
3731 Type *DestTy = I.getType();
3732
3733 bool SrcVec = SrcTy->isVectorTy();
3734 bool DstVec = DestTy->isVectorTy();
3735
3736 Check(SrcVec == DstVec,
3737 "FPToSI source and dest must both be vector or scalar", &I);
3738 Check(SrcTy->isFPOrFPVectorTy(), "FPToSI source must be FP or FP vector", &I);
3739 Check(DestTy->isIntOrIntVectorTy(),
3740 "FPToSI result must be integer or integer vector", &I);
3741
3742 if (SrcVec && DstVec)
3743 Check(cast<VectorType>(SrcTy)->getElementCount() ==
3744 cast<VectorType>(DestTy)->getElementCount(),
3745 "FPToSI source and dest vector length mismatch", &I);
3746
3747 visitInstruction(I);
3748}
3749
3750void Verifier::checkPtrToAddr(Type *SrcTy, Type *DestTy, const Value &V) {
3751 Check(SrcTy->isPtrOrPtrVectorTy(), "PtrToAddr source must be pointer", V);
3752 Check(DestTy->isIntOrIntVectorTy(), "PtrToAddr result must be integral", V);
3753 Check(SrcTy->isVectorTy() == DestTy->isVectorTy(), "PtrToAddr type mismatch",
3754 V);
3755
3756 if (SrcTy->isVectorTy()) {
3757 auto *VSrc = cast<VectorType>(SrcTy);
3758 auto *VDest = cast<VectorType>(DestTy);
3759 Check(VSrc->getElementCount() == VDest->getElementCount(),
3760 "PtrToAddr vector length mismatch", V);
3761 }
3762
3763 Type *AddrTy = DL.getAddressType(SrcTy);
3764 Check(AddrTy == DestTy, "PtrToAddr result must be address width", V);
3765}
3766
3767void Verifier::visitPtrToAddrInst(PtrToAddrInst &I) {
3768 checkPtrToAddr(I.getOperand(0)->getType(), I.getType(), I);
3769 visitInstruction(I);
3770}
3771
3772void Verifier::visitPtrToIntInst(PtrToIntInst &I) {
3773 // Get the source and destination types
3774 Type *SrcTy = I.getOperand(0)->getType();
3775 Type *DestTy = I.getType();
3776
3777 Check(SrcTy->isPtrOrPtrVectorTy(), "PtrToInt source must be pointer", &I);
3778
3779 Check(DestTy->isIntOrIntVectorTy(), "PtrToInt result must be integral", &I);
3780 Check(SrcTy->isVectorTy() == DestTy->isVectorTy(), "PtrToInt type mismatch",
3781 &I);
3782
3783 if (SrcTy->isVectorTy()) {
3784 auto *VSrc = cast<VectorType>(SrcTy);
3785 auto *VDest = cast<VectorType>(DestTy);
3786 Check(VSrc->getElementCount() == VDest->getElementCount(),
3787 "PtrToInt Vector length mismatch", &I);
3788 }
3789
3790 visitInstruction(I);
3791}
3792
3793void Verifier::visitIntToPtrInst(IntToPtrInst &I) {
3794 // Get the source and destination types
3795 Type *SrcTy = I.getOperand(0)->getType();
3796 Type *DestTy = I.getType();
3797
3798 Check(SrcTy->isIntOrIntVectorTy(), "IntToPtr source must be an integral", &I);
3799 Check(DestTy->isPtrOrPtrVectorTy(), "IntToPtr result must be a pointer", &I);
3800
3801 Check(SrcTy->isVectorTy() == DestTy->isVectorTy(), "IntToPtr type mismatch",
3802 &I);
3803 if (SrcTy->isVectorTy()) {
3804 auto *VSrc = cast<VectorType>(SrcTy);
3805 auto *VDest = cast<VectorType>(DestTy);
3806 Check(VSrc->getElementCount() == VDest->getElementCount(),
3807 "IntToPtr Vector length mismatch", &I);
3808 }
3809 visitInstruction(I);
3810}
3811
3812void Verifier::visitBitCastInst(BitCastInst &I) {
3813 Check(
3814 CastInst::castIsValid(Instruction::BitCast, I.getOperand(0), I.getType()),
3815 "Invalid bitcast", &I);
3816 visitInstruction(I);
3817}
3818
3819void Verifier::visitAddrSpaceCastInst(AddrSpaceCastInst &I) {
3820 Type *SrcTy = I.getOperand(0)->getType();
3821 Type *DestTy = I.getType();
3822
3823 Check(SrcTy->isPtrOrPtrVectorTy(), "AddrSpaceCast source must be a pointer",
3824 &I);
3825 Check(DestTy->isPtrOrPtrVectorTy(), "AddrSpaceCast result must be a pointer",
3826 &I);
3828 "AddrSpaceCast must be between different address spaces", &I);
3829 if (auto *SrcVTy = dyn_cast<VectorType>(SrcTy))
3830 Check(SrcVTy->getElementCount() ==
3831 cast<VectorType>(DestTy)->getElementCount(),
3832 "AddrSpaceCast vector pointer number of elements mismatch", &I);
3833 visitInstruction(I);
3834}
3835
3836/// visitPHINode - Ensure that a PHI node is well formed.
3837///
3838void Verifier::visitPHINode(PHINode &PN) {
3839 // Ensure that the PHI nodes are all grouped together at the top of the block.
3840 // This can be tested by checking whether the instruction before this is
3841 // either nonexistent (because this is begin()) or is a PHI node. If not,
3842 // then there is some other instruction before a PHI.
3843 Check(&PN == &PN.getParent()->front() ||
3845 "PHI nodes not grouped at top of basic block!", &PN, PN.getParent());
3846
3847 // Check that a PHI doesn't yield a Token.
3848 Check(!PN.getType()->isTokenLikeTy(), "PHI nodes cannot have token type!");
3849
3850 // Check that all of the values of the PHI node have the same type as the
3851 // result.
3852 for (Value *IncValue : PN.incoming_values()) {
3853 Check(PN.getType() == IncValue->getType(),
3854 "PHI node operands are not the same type as the result!", &PN);
3855 }
3856
3857 // All other PHI node constraints are checked in the visitBasicBlock method.
3858
3859 visitInstruction(PN);
3860}
3861
3862void Verifier::visitCallBase(CallBase &Call) {
3864 "Called function must be a pointer!", Call);
3865 FunctionType *FTy = Call.getFunctionType();
3866
3867 // Verify that the correct number of arguments are being passed
3868 if (FTy->isVarArg())
3869 Check(Call.arg_size() >= FTy->getNumParams(),
3870 "Called function requires more parameters than were provided!", Call);
3871 else
3872 Check(Call.arg_size() == FTy->getNumParams(),
3873 "Incorrect number of arguments passed to called function!", Call);
3874
3875 // Verify that all arguments to the call match the function type.
3876 for (unsigned i = 0, e = FTy->getNumParams(); i != e; ++i)
3877 Check(Call.getArgOperand(i)->getType() == FTy->getParamType(i),
3878 "Call parameter type does not match function signature!",
3879 Call.getArgOperand(i), FTy->getParamType(i), Call);
3880
3881 AttributeList Attrs = Call.getAttributes();
3882
3883 Check(verifyAttributeCount(Attrs, Call.arg_size()),
3884 "Attribute after last parameter!", Call);
3885
3886 Function *Callee =
3888 bool IsIntrinsic = Callee && Callee->isIntrinsic();
3889 if (IsIntrinsic)
3890 Check(Callee->getFunctionType() == FTy,
3891 "Intrinsic called with incompatible signature", Call);
3892
3893 // Verify if the calling convention of the callee is callable.
3895 "calling convention does not permit calls", Call);
3896
3897 // Disallow passing/returning values with alignment higher than we can
3898 // represent.
3899 // FIXME: Consider making DataLayout cap the alignment, so this isn't
3900 // necessary.
3901 auto VerifyTypeAlign = [&](Type *Ty, const Twine &Message) {
3902 if (!Ty->isSized())
3903 return;
3904 Align ABIAlign = DL.getABITypeAlign(Ty);
3905 Check(ABIAlign.value() <= Value::MaximumAlignment,
3906 "Incorrect alignment of " + Message + " to called function!", Call);
3907 };
3908
3909 if (!IsIntrinsic) {
3910 VerifyTypeAlign(FTy->getReturnType(), "return type");
3911 for (unsigned i = 0, e = FTy->getNumParams(); i != e; ++i) {
3912 Type *Ty = FTy->getParamType(i);
3913 VerifyTypeAlign(Ty, "argument passed");
3914 }
3915 }
3916
3917 if (Attrs.hasFnAttr(Attribute::Speculatable)) {
3918 // Don't allow speculatable on call sites, unless the underlying function
3919 // declaration is also speculatable.
3920 Check(Callee && Callee->isSpeculatable(),
3921 "speculatable attribute may not apply to call sites", Call);
3922 }
3923
3924 if (Attrs.hasFnAttr(Attribute::Preallocated)) {
3925 Check(Call.getIntrinsicID() == Intrinsic::call_preallocated_arg,
3926 "preallocated as a call site attribute can only be on "
3927 "llvm.call.preallocated.arg");
3928 }
3929
3930 Check(!Attrs.hasFnAttr(Attribute::DenormalFPEnv),
3931 "denormal_fpenv attribute may not apply to call sites", Call);
3932
3933 // Verify call attributes.
3934 verifyFunctionAttrs(FTy, Attrs, &Call, IsIntrinsic, Call.isInlineAsm());
3935
3936 // Conservatively check the inalloca argument.
3937 // We have a bug if we can find that there is an underlying alloca without
3938 // inalloca.
3939 if (Call.hasInAllocaArgument()) {
3940 Value *InAllocaArg = Call.getArgOperand(FTy->getNumParams() - 1);
3941 if (auto AI = dyn_cast<AllocaInst>(InAllocaArg->stripInBoundsOffsets()))
3942 Check(AI->isUsedWithInAlloca(),
3943 "inalloca argument for call has mismatched alloca", AI, Call);
3944 }
3945
3946 // For each argument of the callsite, if it has the swifterror argument,
3947 // make sure the underlying alloca/parameter it comes from has a swifterror as
3948 // well.
3949 for (unsigned i = 0, e = FTy->getNumParams(); i != e; ++i) {
3950 if (Call.paramHasAttr(i, Attribute::SwiftError)) {
3951 Value *SwiftErrorArg = Call.getArgOperand(i);
3952 if (auto AI = dyn_cast<AllocaInst>(SwiftErrorArg->stripInBoundsOffsets())) {
3953 Check(AI->isSwiftError(),
3954 "swifterror argument for call has mismatched alloca", AI, Call);
3955 continue;
3956 }
3957 auto ArgI = dyn_cast<Argument>(SwiftErrorArg);
3958 Check(ArgI, "swifterror argument should come from an alloca or parameter",
3959 SwiftErrorArg, Call);
3960 Check(ArgI->hasSwiftErrorAttr(),
3961 "swifterror argument for call has mismatched parameter", ArgI,
3962 Call);
3963 }
3964
3965 if (Attrs.hasParamAttr(i, Attribute::ImmArg)) {
3966 // Don't allow immarg on call sites, unless the underlying declaration
3967 // also has the matching immarg.
3968 Check(Callee && Callee->hasParamAttribute(i, Attribute::ImmArg),
3969 "immarg may not apply only to call sites", Call.getArgOperand(i),
3970 Call);
3971 }
3972
3973 if (Call.paramHasAttr(i, Attribute::ImmArg)) {
3974 Value *ArgVal = Call.getArgOperand(i);
3975 Check(isa<ConstantInt>(ArgVal) || isa<ConstantFP>(ArgVal),
3976 "immarg operand has non-immediate parameter", ArgVal, Call);
3977
3978 // If the imm-arg is an integer and also has a range attached,
3979 // check if the given value is within the range.
3980 if (Call.paramHasAttr(i, Attribute::Range)) {
3981 if (auto *CI = dyn_cast<ConstantInt>(ArgVal)) {
3982 const ConstantRange &CR =
3983 Call.getParamAttr(i, Attribute::Range).getValueAsConstantRange();
3984 Check(CR.contains(CI->getValue()),
3985 "immarg value " + Twine(CI->getValue().getSExtValue()) +
3986 " out of range [" + Twine(CR.getLower().getSExtValue()) +
3987 ", " + Twine(CR.getUpper().getSExtValue()) + ")",
3988 Call);
3989 }
3990 }
3991 }
3992
3993 if (Call.paramHasAttr(i, Attribute::Preallocated)) {
3994 Value *ArgVal = Call.getArgOperand(i);
3995 bool hasOB =
3997 bool isMustTail = Call.isMustTailCall();
3998 Check(hasOB != isMustTail,
3999 "preallocated operand either requires a preallocated bundle or "
4000 "the call to be musttail (but not both)",
4001 ArgVal, Call);
4002 }
4003 }
4004
4005 if (FTy->isVarArg()) {
4006 // FIXME? is 'nest' even legal here?
4007 bool SawNest = false;
4008 bool SawReturned = false;
4009
4010 for (unsigned Idx = 0; Idx < FTy->getNumParams(); ++Idx) {
4011 if (Attrs.hasParamAttr(Idx, Attribute::Nest))
4012 SawNest = true;
4013 if (Attrs.hasParamAttr(Idx, Attribute::Returned))
4014 SawReturned = true;
4015 }
4016
4017 // Check attributes on the varargs part.
4018 for (unsigned Idx = FTy->getNumParams(); Idx < Call.arg_size(); ++Idx) {
4019 Type *Ty = Call.getArgOperand(Idx)->getType();
4020 AttributeSet ArgAttrs = Attrs.getParamAttrs(Idx);
4021 verifyParameterAttrs(ArgAttrs, Ty, &Call);
4022
4023 if (ArgAttrs.hasAttribute(Attribute::Nest)) {
4024 Check(!SawNest, "More than one parameter has attribute nest!", Call);
4025 SawNest = true;
4026 }
4027
4028 if (ArgAttrs.hasAttribute(Attribute::Returned)) {
4029 Check(!SawReturned, "More than one parameter has attribute returned!",
4030 Call);
4031 Check(Ty->canLosslesslyBitCastTo(FTy->getReturnType()),
4032 "Incompatible argument and return types for 'returned' "
4033 "attribute",
4034 Call);
4035 SawReturned = true;
4036 }
4037
4038 // Statepoint intrinsic is vararg but the wrapped function may be not.
4039 // Allow sret here and check the wrapped function in verifyStatepoint.
4040 if (Call.getIntrinsicID() != Intrinsic::experimental_gc_statepoint)
4041 Check(!ArgAttrs.hasAttribute(Attribute::StructRet),
4042 "Attribute 'sret' cannot be used for vararg call arguments!",
4043 Call);
4044
4045 if (ArgAttrs.hasAttribute(Attribute::InAlloca))
4046 Check(Idx == Call.arg_size() - 1,
4047 "inalloca isn't on the last argument!", Call);
4048 }
4049 }
4050
4051 // Verify that there's no metadata unless it's a direct call to an intrinsic.
4052 if (!IsIntrinsic) {
4053 for (Type *ParamTy : FTy->params()) {
4054 Check(!ParamTy->isMetadataTy(),
4055 "Function has metadata parameter but isn't an intrinsic", Call);
4056 Check(!ParamTy->isTokenLikeTy(),
4057 "Function has token parameter but isn't an intrinsic", Call);
4058 }
4059 }
4060
4061 // Verify that indirect calls don't return tokens.
4062 if (!Call.getCalledFunction()) {
4063 Check(!FTy->getReturnType()->isTokenLikeTy(),
4064 "Return type cannot be token for indirect call!");
4065 Check(!FTy->getReturnType()->isX86_AMXTy(),
4066 "Return type cannot be x86_amx for indirect call!");
4067 }
4068
4070 visitIntrinsicCall(ID, Call);
4071
4072 // Verify that a callsite has at most one "deopt", at most one "funclet", at
4073 // most one "gc-transition", at most one "cfguardtarget", at most one
4074 // "preallocated" operand bundle, and at most one "ptrauth" operand bundle.
4075 bool FoundDeoptBundle = false, FoundFuncletBundle = false,
4076 FoundGCTransitionBundle = false, FoundCFGuardTargetBundle = false,
4077 FoundPreallocatedBundle = false, FoundGCLiveBundle = false,
4078 FoundPtrauthBundle = false, FoundKCFIBundle = false,
4079 FoundAttachedCallBundle = false;
4080 for (unsigned i = 0, e = Call.getNumOperandBundles(); i < e; ++i) {
4081 OperandBundleUse BU = Call.getOperandBundleAt(i);
4082 uint32_t Tag = BU.getTagID();
4083 if (Tag == LLVMContext::OB_deopt) {
4084 Check(!FoundDeoptBundle, "Multiple deopt operand bundles", Call);
4085 FoundDeoptBundle = true;
4086 } else if (Tag == LLVMContext::OB_gc_transition) {
4087 Check(!FoundGCTransitionBundle, "Multiple gc-transition operand bundles",
4088 Call);
4089 FoundGCTransitionBundle = true;
4090 } else if (Tag == LLVMContext::OB_funclet) {
4091 Check(!FoundFuncletBundle, "Multiple funclet operand bundles", Call);
4092 FoundFuncletBundle = true;
4093 Check(BU.Inputs.size() == 1,
4094 "Expected exactly one funclet bundle operand", Call);
4095 Check(isa<FuncletPadInst>(BU.Inputs.front()),
4096 "Funclet bundle operands should correspond to a FuncletPadInst",
4097 Call);
4098 } else if (Tag == LLVMContext::OB_cfguardtarget) {
4099 Check(!FoundCFGuardTargetBundle, "Multiple CFGuardTarget operand bundles",
4100 Call);
4101 FoundCFGuardTargetBundle = true;
4102 Check(BU.Inputs.size() == 1,
4103 "Expected exactly one cfguardtarget bundle operand", Call);
4104 } else if (Tag == LLVMContext::OB_ptrauth) {
4105 Check(!FoundPtrauthBundle, "Multiple ptrauth operand bundles", Call);
4106 FoundPtrauthBundle = true;
4107 Check(BU.Inputs.size() == 2,
4108 "Expected exactly two ptrauth bundle operands", Call);
4109 Check(isa<ConstantInt>(BU.Inputs[0]) &&
4110 BU.Inputs[0]->getType()->isIntegerTy(32),
4111 "Ptrauth bundle key operand must be an i32 constant", Call);
4112 Check(BU.Inputs[1]->getType()->isIntegerTy(64),
4113 "Ptrauth bundle discriminator operand must be an i64", Call);
4114 } else if (Tag == LLVMContext::OB_kcfi) {
4115 Check(!FoundKCFIBundle, "Multiple kcfi operand bundles", Call);
4116 FoundKCFIBundle = true;
4117 Check(BU.Inputs.size() == 1, "Expected exactly one kcfi bundle operand",
4118 Call);
4119 Check(isa<ConstantInt>(BU.Inputs[0]) &&
4120 BU.Inputs[0]->getType()->isIntegerTy(32),
4121 "Kcfi bundle operand must be an i32 constant", Call);
4122 } else if (Tag == LLVMContext::OB_preallocated) {
4123 Check(!FoundPreallocatedBundle, "Multiple preallocated operand bundles",
4124 Call);
4125 FoundPreallocatedBundle = true;
4126 Check(BU.Inputs.size() == 1,
4127 "Expected exactly one preallocated bundle operand", Call);
4128 auto Input = dyn_cast<IntrinsicInst>(BU.Inputs.front());
4129 Check(Input &&
4130 Input->getIntrinsicID() == Intrinsic::call_preallocated_setup,
4131 "\"preallocated\" argument must be a token from "
4132 "llvm.call.preallocated.setup",
4133 Call);
4134 } else if (Tag == LLVMContext::OB_gc_live) {
4135 Check(!FoundGCLiveBundle, "Multiple gc-live operand bundles", Call);
4136 FoundGCLiveBundle = true;
4138 Check(!FoundAttachedCallBundle,
4139 "Multiple \"clang.arc.attachedcall\" operand bundles", Call);
4140 FoundAttachedCallBundle = true;
4141 verifyAttachedCallBundle(Call, BU);
4142 }
4143 }
4144
4145 // Verify that callee and callsite agree on whether to use pointer auth.
4146 Check(!(Call.getCalledFunction() && FoundPtrauthBundle),
4147 "Direct call cannot have a ptrauth bundle", Call);
4148
4149 // Verify that each inlinable callsite of a debug-info-bearing function in a
4150 // debug-info-bearing function has a debug location attached to it. Failure to
4151 // do so causes assertion failures when the inliner sets up inline scope info
4152 // (Interposable functions are not inlinable, neither are functions without
4153 // definitions.)
4159 "inlinable function call in a function with "
4160 "debug info must have a !dbg location",
4161 Call);
4162
4163 if (Call.isInlineAsm())
4164 verifyInlineAsmCall(Call);
4165
4166 ConvergenceVerifyHelper.visit(Call);
4167
4168 visitInstruction(Call);
4169}
4170
4171void Verifier::verifyTailCCMustTailAttrs(const AttrBuilder &Attrs,
4172 StringRef Context) {
4173 Check(!Attrs.contains(Attribute::InAlloca),
4174 Twine("inalloca attribute not allowed in ") + Context);
4175 Check(!Attrs.contains(Attribute::InReg),
4176 Twine("inreg attribute not allowed in ") + Context);
4177 Check(!Attrs.contains(Attribute::SwiftError),
4178 Twine("swifterror attribute not allowed in ") + Context);
4179 Check(!Attrs.contains(Attribute::Preallocated),
4180 Twine("preallocated attribute not allowed in ") + Context);
4181 Check(!Attrs.contains(Attribute::ByRef),
4182 Twine("byref attribute not allowed in ") + Context);
4183}
4184
4185/// Two types are "congruent" if they are identical, or if they are both pointer
4186/// types with different pointee types and the same address space.
4187static bool isTypeCongruent(Type *L, Type *R) {
4188 if (L == R)
4189 return true;
4192 if (!PL || !PR)
4193 return false;
4194 return PL->getAddressSpace() == PR->getAddressSpace();
4195}
4196
4197static AttrBuilder getParameterABIAttributes(LLVMContext& C, unsigned I, AttributeList Attrs) {
4198 static const Attribute::AttrKind ABIAttrs[] = {
4199 Attribute::StructRet, Attribute::ByVal, Attribute::InAlloca,
4200 Attribute::InReg, Attribute::StackAlignment, Attribute::SwiftSelf,
4201 Attribute::SwiftAsync, Attribute::SwiftError, Attribute::Preallocated,
4202 Attribute::ByRef};
4203 AttrBuilder Copy(C);
4204 for (auto AK : ABIAttrs) {
4205 Attribute Attr = Attrs.getParamAttrs(I).getAttribute(AK);
4206 if (Attr.isValid())
4207 Copy.addAttribute(Attr);
4208 }
4209
4210 // `align` is ABI-affecting only in combination with `byval` or `byref`.
4211 if (Attrs.hasParamAttr(I, Attribute::Alignment) &&
4212 (Attrs.hasParamAttr(I, Attribute::ByVal) ||
4213 Attrs.hasParamAttr(I, Attribute::ByRef)))
4214 Copy.addAlignmentAttr(Attrs.getParamAlignment(I));
4215 return Copy;
4216}
4217
4218void Verifier::verifyMustTailCall(CallInst &CI) {
4219 Check(!CI.isInlineAsm(), "cannot use musttail call with inline asm", &CI);
4220
4221 Function *F = CI.getParent()->getParent();
4222 FunctionType *CallerTy = F->getFunctionType();
4223 FunctionType *CalleeTy = CI.getFunctionType();
4224 Check(CallerTy->isVarArg() == CalleeTy->isVarArg(),
4225 "cannot guarantee tail call due to mismatched varargs", &CI);
4226 Check(isTypeCongruent(CallerTy->getReturnType(), CalleeTy->getReturnType()),
4227 "cannot guarantee tail call due to mismatched return types", &CI);
4228
4229 // - The calling conventions of the caller and callee must match.
4230 Check(F->getCallingConv() == CI.getCallingConv(),
4231 "cannot guarantee tail call due to mismatched calling conv", &CI);
4232
4233 // - The call must immediately precede a :ref:`ret <i_ret>` instruction,
4234 // or a pointer bitcast followed by a ret instruction.
4235 // - The ret instruction must return the (possibly bitcasted) value
4236 // produced by the call or void.
4237 Value *RetVal = &CI;
4239
4240 // Handle the optional bitcast.
4241 if (BitCastInst *BI = dyn_cast_or_null<BitCastInst>(Next)) {
4242 Check(BI->getOperand(0) == RetVal,
4243 "bitcast following musttail call must use the call", BI);
4244 RetVal = BI;
4245 Next = BI->getNextNode();
4246 }
4247
4248 // Check the return.
4249 ReturnInst *Ret = dyn_cast_or_null<ReturnInst>(Next);
4250 Check(Ret, "musttail call must precede a ret with an optional bitcast", &CI);
4251 Check(!Ret->getReturnValue() || Ret->getReturnValue() == RetVal ||
4253 "musttail call result must be returned", Ret);
4254
4255 AttributeList CallerAttrs = F->getAttributes();
4256 AttributeList CalleeAttrs = CI.getAttributes();
4257 if (CI.getCallingConv() == CallingConv::SwiftTail ||
4258 CI.getCallingConv() == CallingConv::Tail) {
4259 StringRef CCName =
4260 CI.getCallingConv() == CallingConv::Tail ? "tailcc" : "swifttailcc";
4261
4262 // - Only sret, byval, swiftself, and swiftasync ABI-impacting attributes
4263 // are allowed in swifttailcc call
4264 for (unsigned I = 0, E = CallerTy->getNumParams(); I != E; ++I) {
4265 AttrBuilder ABIAttrs = getParameterABIAttributes(F->getContext(), I, CallerAttrs);
4266 SmallString<32> Context{CCName, StringRef(" musttail caller")};
4267 verifyTailCCMustTailAttrs(ABIAttrs, Context);
4268 }
4269 for (unsigned I = 0, E = CalleeTy->getNumParams(); I != E; ++I) {
4270 AttrBuilder ABIAttrs = getParameterABIAttributes(F->getContext(), I, CalleeAttrs);
4271 SmallString<32> Context{CCName, StringRef(" musttail callee")};
4272 verifyTailCCMustTailAttrs(ABIAttrs, Context);
4273 }
4274 // - Varargs functions are not allowed
4275 Check(!CallerTy->isVarArg(), Twine("cannot guarantee ") + CCName +
4276 " tail call for varargs function");
4277 return;
4278 }
4279
4280 // - The caller and callee prototypes must match. Pointer types of
4281 // parameters or return types may differ in pointee type, but not
4282 // address space.
4283 if (!CI.getIntrinsicID()) {
4284 Check(CallerTy->getNumParams() == CalleeTy->getNumParams(),
4285 "cannot guarantee tail call due to mismatched parameter counts", &CI);
4286 for (unsigned I = 0, E = CallerTy->getNumParams(); I != E; ++I) {
4287 Check(
4288 isTypeCongruent(CallerTy->getParamType(I), CalleeTy->getParamType(I)),
4289 "cannot guarantee tail call due to mismatched parameter types", &CI);
4290 }
4291 }
4292
4293 // - All ABI-impacting function attributes, such as sret, byval, inreg,
4294 // returned, preallocated, and inalloca, must match.
4295 for (unsigned I = 0, E = CallerTy->getNumParams(); I != E; ++I) {
4296 AttrBuilder CallerABIAttrs = getParameterABIAttributes(F->getContext(), I, CallerAttrs);
4297 AttrBuilder CalleeABIAttrs = getParameterABIAttributes(F->getContext(), I, CalleeAttrs);
4298 Check(CallerABIAttrs == CalleeABIAttrs,
4299 "cannot guarantee tail call due to mismatched ABI impacting "
4300 "function attributes",
4301 &CI, CI.getOperand(I));
4302 }
4303}
4304
4305void Verifier::visitCallInst(CallInst &CI) {
4306 visitCallBase(CI);
4307
4308 if (CI.isMustTailCall())
4309 verifyMustTailCall(CI);
4310}
4311
4312void Verifier::visitInvokeInst(InvokeInst &II) {
4313 visitCallBase(II);
4314
4315 // Verify that the first non-PHI instruction of the unwind destination is an
4316 // exception handling instruction.
4317 Check(
4318 II.getUnwindDest()->isEHPad(),
4319 "The unwind destination does not have an exception handling instruction!",
4320 &II);
4321
4322 visitTerminator(II);
4323}
4324
4325/// visitUnaryOperator - Check the argument to the unary operator.
4326///
4327void Verifier::visitUnaryOperator(UnaryOperator &U) {
4328 Check(U.getType() == U.getOperand(0)->getType(),
4329 "Unary operators must have same type for"
4330 "operands and result!",
4331 &U);
4332
4333 switch (U.getOpcode()) {
4334 // Check that floating-point arithmetic operators are only used with
4335 // floating-point operands.
4336 case Instruction::FNeg:
4337 Check(U.getType()->isFPOrFPVectorTy(),
4338 "FNeg operator only works with float types!", &U);
4339 break;
4340 default:
4341 llvm_unreachable("Unknown UnaryOperator opcode!");
4342 }
4343
4344 visitInstruction(U);
4345}
4346
4347/// visitBinaryOperator - Check that both arguments to the binary operator are
4348/// of the same type!
4349///
4350void Verifier::visitBinaryOperator(BinaryOperator &B) {
4351 Check(B.getOperand(0)->getType() == B.getOperand(1)->getType(),
4352 "Both operands to a binary operator are not of the same type!", &B);
4353
4354 switch (B.getOpcode()) {
4355 // Check that integer arithmetic operators are only used with
4356 // integral operands.
4357 case Instruction::Add:
4358 case Instruction::Sub:
4359 case Instruction::Mul:
4360 case Instruction::SDiv:
4361 case Instruction::UDiv:
4362 case Instruction::SRem:
4363 case Instruction::URem:
4364 Check(B.getType()->isIntOrIntVectorTy(),
4365 "Integer arithmetic operators only work with integral types!", &B);
4366 Check(B.getType() == B.getOperand(0)->getType(),
4367 "Integer arithmetic operators must have same type "
4368 "for operands and result!",
4369 &B);
4370 break;
4371 // Check that floating-point arithmetic operators are only used with
4372 // floating-point operands.
4373 case Instruction::FAdd:
4374 case Instruction::FSub:
4375 case Instruction::FMul:
4376 case Instruction::FDiv:
4377 case Instruction::FRem:
4378 Check(B.getType()->isFPOrFPVectorTy(),
4379 "Floating-point arithmetic operators only work with "
4380 "floating-point types!",
4381 &B);
4382 Check(B.getType() == B.getOperand(0)->getType(),
4383 "Floating-point arithmetic operators must have same type "
4384 "for operands and result!",
4385 &B);
4386 break;
4387 // Check that logical operators are only used with integral operands.
4388 case Instruction::And:
4389 case Instruction::Or:
4390 case Instruction::Xor:
4391 Check(B.getType()->isIntOrIntVectorTy(),
4392 "Logical operators only work with integral types!", &B);
4393 Check(B.getType() == B.getOperand(0)->getType(),
4394 "Logical operators must have same type for operands and result!", &B);
4395 break;
4396 case Instruction::Shl:
4397 case Instruction::LShr:
4398 case Instruction::AShr:
4399 Check(B.getType()->isIntOrIntVectorTy(),
4400 "Shifts only work with integral types!", &B);
4401 Check(B.getType() == B.getOperand(0)->getType(),
4402 "Shift return type must be same as operands!", &B);
4403 break;
4404 default:
4405 llvm_unreachable("Unknown BinaryOperator opcode!");
4406 }
4407
4408 visitInstruction(B);
4409}
4410
4411void Verifier::visitICmpInst(ICmpInst &IC) {
4412 // Check that the operands are the same type
4413 Type *Op0Ty = IC.getOperand(0)->getType();
4414 Type *Op1Ty = IC.getOperand(1)->getType();
4415 Check(Op0Ty == Op1Ty,
4416 "Both operands to ICmp instruction are not of the same type!", &IC);
4417 // Check that the operands are the right type
4418 Check(Op0Ty->isIntOrIntVectorTy() || Op0Ty->isPtrOrPtrVectorTy(),
4419 "Invalid operand types for ICmp instruction", &IC);
4420 // Check that the predicate is valid.
4421 Check(IC.isIntPredicate(), "Invalid predicate in ICmp instruction!", &IC);
4422
4423 visitInstruction(IC);
4424}
4425
4426void Verifier::visitFCmpInst(FCmpInst &FC) {
4427 // Check that the operands are the same type
4428 Type *Op0Ty = FC.getOperand(0)->getType();
4429 Type *Op1Ty = FC.getOperand(1)->getType();
4430 Check(Op0Ty == Op1Ty,
4431 "Both operands to FCmp instruction are not of the same type!", &FC);
4432 // Check that the operands are the right type
4433 Check(Op0Ty->isFPOrFPVectorTy(), "Invalid operand types for FCmp instruction",
4434 &FC);
4435 // Check that the predicate is valid.
4436 Check(FC.isFPPredicate(), "Invalid predicate in FCmp instruction!", &FC);
4437
4438 visitInstruction(FC);
4439}
4440
4441void Verifier::visitExtractElementInst(ExtractElementInst &EI) {
4443 "Invalid extractelement operands!", &EI);
4444 visitInstruction(EI);
4445}
4446
4447void Verifier::visitInsertElementInst(InsertElementInst &IE) {
4448 Check(InsertElementInst::isValidOperands(IE.getOperand(0), IE.getOperand(1),
4449 IE.getOperand(2)),
4450 "Invalid insertelement operands!", &IE);
4451 visitInstruction(IE);
4452}
4453
4454void Verifier::visitShuffleVectorInst(ShuffleVectorInst &SV) {
4456 SV.getShuffleMask()),
4457 "Invalid shufflevector operands!", &SV);
4458 visitInstruction(SV);
4459}
4460
4461void Verifier::visitGetElementPtrInst(GetElementPtrInst &GEP) {
4462 Type *TargetTy = GEP.getPointerOperandType()->getScalarType();
4463
4464 Check(isa<PointerType>(TargetTy),
4465 "GEP base pointer is not a vector or a vector of pointers", &GEP);
4466 Check(GEP.getSourceElementType()->isSized(), "GEP into unsized type!", &GEP);
4467
4468 if (auto *STy = dyn_cast<StructType>(GEP.getSourceElementType())) {
4469 Check(!STy->isScalableTy(),
4470 "getelementptr cannot target structure that contains scalable vector"
4471 "type",
4472 &GEP);
4473 }
4474
4475 SmallVector<Value *, 16> Idxs(GEP.indices());
4476 Check(
4477 all_of(Idxs, [](Value *V) { return V->getType()->isIntOrIntVectorTy(); }),
4478 "GEP indexes must be integers", &GEP);
4479 Type *ElTy =
4480 GetElementPtrInst::getIndexedType(GEP.getSourceElementType(), Idxs);
4481 Check(ElTy, "Invalid indices for GEP pointer type!", &GEP);
4482
4483 PointerType *PtrTy = dyn_cast<PointerType>(GEP.getType()->getScalarType());
4484
4485 Check(PtrTy && GEP.getResultElementType() == ElTy,
4486 "GEP is not of right type for indices!", &GEP, ElTy);
4487
4488 if (auto *GEPVTy = dyn_cast<VectorType>(GEP.getType())) {
4489 // Additional checks for vector GEPs.
4490 ElementCount GEPWidth = GEPVTy->getElementCount();
4491 if (GEP.getPointerOperandType()->isVectorTy())
4492 Check(
4493 GEPWidth ==
4494 cast<VectorType>(GEP.getPointerOperandType())->getElementCount(),
4495 "Vector GEP result width doesn't match operand's", &GEP);
4496 for (Value *Idx : Idxs) {
4497 Type *IndexTy = Idx->getType();
4498 if (auto *IndexVTy = dyn_cast<VectorType>(IndexTy)) {
4499 ElementCount IndexWidth = IndexVTy->getElementCount();
4500 Check(IndexWidth == GEPWidth, "Invalid GEP index vector width", &GEP);
4501 }
4502 Check(IndexTy->isIntOrIntVectorTy(),
4503 "All GEP indices should be of integer type");
4504 }
4505 }
4506
4507 // Check that GEP does not index into a vector with non-byte-addressable
4508 // elements.
4510 GTI != GTE; ++GTI) {
4511 if (GTI.isVector()) {
4512 Type *ElemTy = GTI.getIndexedType();
4513 Check(DL.typeSizeEqualsStoreSize(ElemTy),
4514 "GEP into vector with non-byte-addressable element type", &GEP);
4515 }
4516 }
4517
4518 Check(GEP.getAddressSpace() == PtrTy->getAddressSpace(),
4519 "GEP address space doesn't match type", &GEP);
4520
4521 visitInstruction(GEP);
4522}
4523
4524static bool isContiguous(const ConstantRange &A, const ConstantRange &B) {
4525 return A.getUpper() == B.getLower() || A.getLower() == B.getUpper();
4526}
4527
4528/// Verify !range and !absolute_symbol metadata. These have the same
4529/// restrictions, except !absolute_symbol allows the full set.
4530void Verifier::verifyRangeLikeMetadata(const Value &I, const MDNode *Range,
4531 Type *Ty, RangeLikeMetadataKind Kind) {
4532 unsigned NumOperands = Range->getNumOperands();
4533 Check(NumOperands % 2 == 0, "Unfinished range!", Range);
4534 unsigned NumRanges = NumOperands / 2;
4535 Check(NumRanges >= 1, "It should have at least one range!", Range);
4536
4537 ConstantRange LastRange(1, true); // Dummy initial value
4538 for (unsigned i = 0; i < NumRanges; ++i) {
4539 ConstantInt *Low =
4540 mdconst::dyn_extract<ConstantInt>(Range->getOperand(2 * i));
4541 Check(Low, "The lower limit must be an integer!", Low);
4542 ConstantInt *High =
4543 mdconst::dyn_extract<ConstantInt>(Range->getOperand(2 * i + 1));
4544 Check(High, "The upper limit must be an integer!", High);
4545
4546 Check(High->getType() == Low->getType(), "Range pair types must match!",
4547 &I);
4548
4549 if (Kind == RangeLikeMetadataKind::NoaliasAddrspace) {
4550 Check(High->getType()->isIntegerTy(32),
4551 "noalias.addrspace type must be i32!", &I);
4552 } else {
4553 Check(High->getType() == Ty->getScalarType(),
4554 "Range types must match instruction type!", &I);
4555 }
4556
4557 APInt HighV = High->getValue();
4558 APInt LowV = Low->getValue();
4559
4560 // ConstantRange asserts if the ranges are the same except for the min/max
4561 // value. Leave the cases it tolerates for the empty range error below.
4562 Check(LowV != HighV || LowV.isMaxValue() || LowV.isMinValue(),
4563 "The upper and lower limits cannot be the same value", &I);
4564
4565 ConstantRange CurRange(LowV, HighV);
4566 Check(!CurRange.isEmptySet() &&
4567 (Kind == RangeLikeMetadataKind::AbsoluteSymbol ||
4568 !CurRange.isFullSet()),
4569 "Range must not be empty!", Range);
4570 if (i != 0) {
4571 Check(CurRange.intersectWith(LastRange).isEmptySet(),
4572 "Intervals are overlapping", Range);
4573 Check(LowV.sgt(LastRange.getLower()), "Intervals are not in order",
4574 Range);
4575 Check(!isContiguous(CurRange, LastRange), "Intervals are contiguous",
4576 Range);
4577 }
4578 LastRange = ConstantRange(LowV, HighV);
4579 }
4580 if (NumRanges > 2) {
4581 APInt FirstLow =
4582 mdconst::dyn_extract<ConstantInt>(Range->getOperand(0))->getValue();
4583 APInt FirstHigh =
4584 mdconst::dyn_extract<ConstantInt>(Range->getOperand(1))->getValue();
4585 ConstantRange FirstRange(FirstLow, FirstHigh);
4586 Check(FirstRange.intersectWith(LastRange).isEmptySet(),
4587 "Intervals are overlapping", Range);
4588 Check(!isContiguous(FirstRange, LastRange), "Intervals are contiguous",
4589 Range);
4590 }
4591}
4592
4593void Verifier::visitRangeMetadata(Instruction &I, MDNode *Range, Type *Ty) {
4594 assert(Range && Range == I.getMetadata(LLVMContext::MD_range) &&
4595 "precondition violation");
4596 verifyRangeLikeMetadata(I, Range, Ty, RangeLikeMetadataKind::Range);
4597}
4598
4599void Verifier::visitNoFPClassMetadata(Instruction &I, MDNode *NoFPClass,
4600 Type *Ty) {
4601 Check(AttributeFuncs::isNoFPClassCompatibleType(Ty),
4602 "nofpclass only applies to floating-point typed loads", I);
4603
4604 Check(NoFPClass->getNumOperands() == 1,
4605 "nofpclass must have exactly one entry", NoFPClass);
4606 ConstantInt *MaskVal =
4608 Check(MaskVal && MaskVal->getType()->isIntegerTy(32),
4609 "nofpclass entry must be a constant i32", NoFPClass);
4610 uint32_t Val = MaskVal->getZExtValue();
4611 Check(Val != 0, "'nofpclass' must have at least one test bit set", NoFPClass,
4612 I);
4613
4614 Check((Val & ~static_cast<unsigned>(fcAllFlags)) == 0,
4615 "Invalid value for 'nofpclass' test mask", NoFPClass, I);
4616}
4617
4618void Verifier::visitNoaliasAddrspaceMetadata(Instruction &I, MDNode *Range,
4619 Type *Ty) {
4620 assert(Range && Range == I.getMetadata(LLVMContext::MD_noalias_addrspace) &&
4621 "precondition violation");
4622 verifyRangeLikeMetadata(I, Range, Ty,
4623 RangeLikeMetadataKind::NoaliasAddrspace);
4624}
4625
4626void Verifier::checkAtomicMemAccessSize(Type *Ty, const Instruction *I) {
4627 unsigned Size = DL.getTypeSizeInBits(Ty).getFixedValue();
4628 Check(Size >= 8, "atomic memory access' size must be byte-sized", Ty, I);
4629 Check(!(Size & (Size - 1)),
4630 "atomic memory access' operand must have a power-of-two size", Ty, I);
4631}
4632
4633void Verifier::visitLoadInst(LoadInst &LI) {
4635 Check(PTy, "Load operand must be a pointer.", &LI);
4636 Type *ElTy = LI.getType();
4637 if (MaybeAlign A = LI.getAlign()) {
4638 Check(A->value() <= Value::MaximumAlignment,
4639 "huge alignment values are unsupported", &LI);
4640 }
4641 Check(ElTy->isSized(), "loading unsized types is not allowed", &LI);
4642 if (LI.isAtomic()) {
4643 Check(LI.getOrdering() != AtomicOrdering::Release &&
4644 LI.getOrdering() != AtomicOrdering::AcquireRelease,
4645 "Load cannot have Release ordering", &LI);
4646 Check(ElTy->getScalarType()->isIntOrPtrTy() ||
4647 ElTy->getScalarType()->isByteTy() ||
4649 "atomic load operand must have integer, byte, pointer, floating "
4650 "point, or vector type!",
4651 ElTy, &LI);
4652
4653 checkAtomicMemAccessSize(ElTy, &LI);
4654 } else {
4656 "Non-atomic load cannot have SynchronizationScope specified", &LI);
4657 }
4658
4659 visitInstruction(LI);
4660}
4661
4662void Verifier::visitStoreInst(StoreInst &SI) {
4663 PointerType *PTy = dyn_cast<PointerType>(SI.getOperand(1)->getType());
4664 Check(PTy, "Store operand must be a pointer.", &SI);
4665 Type *ElTy = SI.getOperand(0)->getType();
4666 if (MaybeAlign A = SI.getAlign()) {
4667 Check(A->value() <= Value::MaximumAlignment,
4668 "huge alignment values are unsupported", &SI);
4669 }
4670 Check(ElTy->isSized(), "storing unsized types is not allowed", &SI);
4671 if (SI.isAtomic()) {
4672 Check(SI.getOrdering() != AtomicOrdering::Acquire &&
4673 SI.getOrdering() != AtomicOrdering::AcquireRelease,
4674 "Store cannot have Acquire ordering", &SI);
4675 Check(ElTy->getScalarType()->isIntOrPtrTy() ||
4676 ElTy->getScalarType()->isByteTy() ||
4678 "atomic store operand must have integer, byte, pointer, floating "
4679 "point, or vector type!",
4680 ElTy, &SI);
4681 checkAtomicMemAccessSize(ElTy, &SI);
4682 } else {
4683 Check(SI.getSyncScopeID() == SyncScope::System,
4684 "Non-atomic store cannot have SynchronizationScope specified", &SI);
4685 }
4686 visitInstruction(SI);
4687}
4688
4689/// Check that SwiftErrorVal is used as a swifterror argument in CS.
4690void Verifier::verifySwiftErrorCall(CallBase &Call,
4691 const Value *SwiftErrorVal) {
4692 for (const auto &I : llvm::enumerate(Call.args())) {
4693 if (I.value() == SwiftErrorVal) {
4694 Check(Call.paramHasAttr(I.index(), Attribute::SwiftError),
4695 "swifterror value when used in a callsite should be marked "
4696 "with swifterror attribute",
4697 SwiftErrorVal, Call);
4698 }
4699 }
4700}
4701
4702void Verifier::verifySwiftErrorValue(const Value *SwiftErrorVal) {
4703 // Check that swifterror value is only used by loads, stores, or as
4704 // a swifterror argument.
4705 for (const User *U : SwiftErrorVal->users()) {
4707 isa<InvokeInst>(U),
4708 "swifterror value can only be loaded and stored from, or "
4709 "as a swifterror argument!",
4710 SwiftErrorVal, U);
4711 // If it is used by a store, check it is the second operand.
4712 if (auto StoreI = dyn_cast<StoreInst>(U))
4713 Check(StoreI->getOperand(1) == SwiftErrorVal,
4714 "swifterror value should be the second operand when used "
4715 "by stores",
4716 SwiftErrorVal, U);
4717 if (auto *Call = dyn_cast<CallBase>(U))
4718 verifySwiftErrorCall(*const_cast<CallBase *>(Call), SwiftErrorVal);
4719 }
4720}
4721
4722void Verifier::visitAllocaInst(AllocaInst &AI) {
4723 Type *Ty = AI.getAllocatedType();
4724 SmallPtrSet<Type*, 4> Visited;
4725 Check(Ty->isSized(&Visited), "Cannot allocate unsized type", &AI);
4726 // Check if it's a target extension type that disallows being used on the
4727 // stack.
4729 "Alloca has illegal target extension type", &AI);
4731 "Alloca array size must have integer type", &AI);
4732 if (MaybeAlign A = AI.getAlign()) {
4733 Check(A->value() <= Value::MaximumAlignment,
4734 "huge alignment values are unsupported", &AI);
4735 }
4736
4737 if (AI.isSwiftError()) {
4738 Check(Ty->isPointerTy(), "swifterror alloca must have pointer type", &AI);
4740 "swifterror alloca must not be array allocation", &AI);
4741 verifySwiftErrorValue(&AI);
4742 }
4743
4744 if (TT.isAMDGPU()) {
4746 "alloca on amdgpu must be in addrspace(5)", &AI);
4747 }
4748
4749 visitInstruction(AI);
4750}
4751
4752void Verifier::visitAtomicCmpXchgInst(AtomicCmpXchgInst &CXI) {
4753 Type *ElTy = CXI.getOperand(1)->getType();
4754 Check(ElTy->isIntOrPtrTy(),
4755 "cmpxchg operand must have integer or pointer type", ElTy, &CXI);
4756 checkAtomicMemAccessSize(ElTy, &CXI);
4757 visitInstruction(CXI);
4758}
4759
4760void Verifier::visitAtomicRMWInst(AtomicRMWInst &RMWI) {
4761 Check(RMWI.getOrdering() != AtomicOrdering::Unordered,
4762 "atomicrmw instructions cannot be unordered.", &RMWI);
4763 auto Op = RMWI.getOperation();
4764 Type *ElTy = RMWI.getOperand(1)->getType();
4765 if (Op == AtomicRMWInst::Xchg) {
4766 Check(ElTy->isIntegerTy() || ElTy->isFloatingPointTy() ||
4767 ElTy->isPointerTy(),
4768 "atomicrmw " + AtomicRMWInst::getOperationName(Op) +
4769 " operand must have integer or floating point type!",
4770 &RMWI, ElTy);
4771 } else if (AtomicRMWInst::isFPOperation(Op)) {
4773 "atomicrmw " + AtomicRMWInst::getOperationName(Op) +
4774 " operand must have floating-point or fixed vector of floating-point "
4775 "type!",
4776 &RMWI, ElTy);
4777 } else {
4778 Check(ElTy->isIntegerTy(),
4779 "atomicrmw " + AtomicRMWInst::getOperationName(Op) +
4780 " operand must have integer type!",
4781 &RMWI, ElTy);
4782 }
4783 checkAtomicMemAccessSize(ElTy, &RMWI);
4785 "Invalid binary operation!", &RMWI);
4786 visitInstruction(RMWI);
4787}
4788
4789void Verifier::visitFenceInst(FenceInst &FI) {
4790 const AtomicOrdering Ordering = FI.getOrdering();
4791 Check(Ordering == AtomicOrdering::Acquire ||
4792 Ordering == AtomicOrdering::Release ||
4793 Ordering == AtomicOrdering::AcquireRelease ||
4794 Ordering == AtomicOrdering::SequentiallyConsistent,
4795 "fence instructions may only have acquire, release, acq_rel, or "
4796 "seq_cst ordering.",
4797 &FI);
4798 visitInstruction(FI);
4799}
4800
4801void Verifier::visitExtractValueInst(ExtractValueInst &EVI) {
4803 EVI.getIndices()) == EVI.getType(),
4804 "Invalid ExtractValueInst operands!", &EVI);
4805
4806 visitInstruction(EVI);
4807}
4808
4809void Verifier::visitInsertValueInst(InsertValueInst &IVI) {
4811 IVI.getIndices()) ==
4812 IVI.getOperand(1)->getType(),
4813 "Invalid InsertValueInst operands!", &IVI);
4814
4815 visitInstruction(IVI);
4816}
4817
4818static Value *getParentPad(Value *EHPad) {
4819 if (auto *FPI = dyn_cast<FuncletPadInst>(EHPad))
4820 return FPI->getParentPad();
4821
4822 return cast<CatchSwitchInst>(EHPad)->getParentPad();
4823}
4824
4825void Verifier::visitEHPadPredecessors(Instruction &I) {
4826 assert(I.isEHPad());
4827
4828 BasicBlock *BB = I.getParent();
4829 Function *F = BB->getParent();
4830
4831 Check(BB != &F->getEntryBlock(), "EH pad cannot be in entry block.", &I);
4832
4833 if (auto *LPI = dyn_cast<LandingPadInst>(&I)) {
4834 // The landingpad instruction defines its parent as a landing pad block. The
4835 // landing pad block may be branched to only by the unwind edge of an
4836 // invoke.
4837 for (BasicBlock *PredBB : predecessors(BB)) {
4838 const auto *II = dyn_cast<InvokeInst>(PredBB->getTerminator());
4839 Check(II && II->getUnwindDest() == BB && II->getNormalDest() != BB,
4840 "Block containing LandingPadInst must be jumped to "
4841 "only by the unwind edge of an invoke.",
4842 LPI);
4843 }
4844 return;
4845 }
4846 if (auto *CPI = dyn_cast<CatchPadInst>(&I)) {
4847 if (!pred_empty(BB))
4848 Check(BB->getUniquePredecessor() == CPI->getCatchSwitch()->getParent(),
4849 "Block containg CatchPadInst must be jumped to "
4850 "only by its catchswitch.",
4851 CPI);
4852 Check(BB != CPI->getCatchSwitch()->getUnwindDest(),
4853 "Catchswitch cannot unwind to one of its catchpads",
4854 CPI->getCatchSwitch(), CPI);
4855 return;
4856 }
4857
4858 // Verify that each pred has a legal terminator with a legal to/from EH
4859 // pad relationship.
4860 Instruction *ToPad = &I;
4861 Value *ToPadParent = getParentPad(ToPad);
4862 for (BasicBlock *PredBB : predecessors(BB)) {
4863 Instruction *TI = PredBB->getTerminator();
4864 Value *FromPad;
4865 if (auto *II = dyn_cast<InvokeInst>(TI)) {
4866 Check(II->getUnwindDest() == BB && II->getNormalDest() != BB,
4867 "EH pad must be jumped to via an unwind edge", ToPad, II);
4868 auto *CalledFn =
4869 dyn_cast<Function>(II->getCalledOperand()->stripPointerCasts());
4870 if (CalledFn && CalledFn->isIntrinsic() && II->doesNotThrow() &&
4871 !IntrinsicInst::mayLowerToFunctionCall(CalledFn->getIntrinsicID()))
4872 continue;
4873 if (auto Bundle = II->getOperandBundle(LLVMContext::OB_funclet))
4874 FromPad = Bundle->Inputs[0];
4875 else
4876 FromPad = ConstantTokenNone::get(II->getContext());
4877 } else if (auto *CRI = dyn_cast<CleanupReturnInst>(TI)) {
4878 FromPad = CRI->getOperand(0);
4879 Check(FromPad != ToPadParent, "A cleanupret must exit its cleanup", CRI);
4880 } else if (auto *CSI = dyn_cast<CatchSwitchInst>(TI)) {
4881 FromPad = CSI;
4882 } else {
4883 Check(false, "EH pad must be jumped to via an unwind edge", ToPad, TI);
4884 }
4885
4886 // The edge may exit from zero or more nested pads.
4887 SmallPtrSet<Value *, 8> Seen;
4888 for (;; FromPad = getParentPad(FromPad)) {
4889 Check(FromPad != ToPad,
4890 "EH pad cannot handle exceptions raised within it", FromPad, TI);
4891 if (FromPad == ToPadParent) {
4892 // This is a legal unwind edge.
4893 break;
4894 }
4895 Check(!isa<ConstantTokenNone>(FromPad),
4896 "A single unwind edge may only enter one EH pad", TI);
4897 Check(Seen.insert(FromPad).second, "EH pad jumps through a cycle of pads",
4898 FromPad);
4899
4900 // This will be diagnosed on the corresponding instruction already. We
4901 // need the extra check here to make sure getParentPad() works.
4902 Check(isa<FuncletPadInst>(FromPad) || isa<CatchSwitchInst>(FromPad),
4903 "Parent pad must be catchpad/cleanuppad/catchswitch", TI);
4904 }
4905 }
4906}
4907
4908void Verifier::visitLandingPadInst(LandingPadInst &LPI) {
4909 // The landingpad instruction is ill-formed if it doesn't have any clauses and
4910 // isn't a cleanup.
4911 Check(LPI.getNumClauses() > 0 || LPI.isCleanup(),
4912 "LandingPadInst needs at least one clause or to be a cleanup.", &LPI);
4913
4914 visitEHPadPredecessors(LPI);
4915
4916 if (!LandingPadResultTy)
4917 LandingPadResultTy = LPI.getType();
4918 else
4919 Check(LandingPadResultTy == LPI.getType(),
4920 "The landingpad instruction should have a consistent result type "
4921 "inside a function.",
4922 &LPI);
4923
4924 Function *F = LPI.getParent()->getParent();
4925 Check(F->hasPersonalityFn(),
4926 "LandingPadInst needs to be in a function with a personality.", &LPI);
4927
4928 // The landingpad instruction must be the first non-PHI instruction in the
4929 // block.
4930 Check(LPI.getParent()->getLandingPadInst() == &LPI,
4931 "LandingPadInst not the first non-PHI instruction in the block.", &LPI);
4932
4933 for (unsigned i = 0, e = LPI.getNumClauses(); i < e; ++i) {
4934 Constant *Clause = LPI.getClause(i);
4935 if (LPI.isCatch(i)) {
4936 Check(isa<PointerType>(Clause->getType()),
4937 "Catch operand does not have pointer type!", &LPI);
4938 } else {
4939 Check(LPI.isFilter(i), "Clause is neither catch nor filter!", &LPI);
4941 "Filter operand is not an array of constants!", &LPI);
4942 }
4943 }
4944
4945 visitInstruction(LPI);
4946}
4947
4948void Verifier::visitResumeInst(ResumeInst &RI) {
4950 "ResumeInst needs to be in a function with a personality.", &RI);
4951
4952 if (!LandingPadResultTy)
4953 LandingPadResultTy = RI.getValue()->getType();
4954 else
4955 Check(LandingPadResultTy == RI.getValue()->getType(),
4956 "The resume instruction should have a consistent result type "
4957 "inside a function.",
4958 &RI);
4959
4960 visitTerminator(RI);
4961}
4962
4963void Verifier::visitCatchPadInst(CatchPadInst &CPI) {
4964 BasicBlock *BB = CPI.getParent();
4965
4966 Function *F = BB->getParent();
4967 Check(F->hasPersonalityFn(),
4968 "CatchPadInst needs to be in a function with a personality.", &CPI);
4969
4971 "CatchPadInst needs to be directly nested in a CatchSwitchInst.",
4972 CPI.getParentPad());
4973
4974 // The catchpad instruction must be the first non-PHI instruction in the
4975 // block.
4976 Check(&*BB->getFirstNonPHIIt() == &CPI,
4977 "CatchPadInst not the first non-PHI instruction in the block.", &CPI);
4978
4979 visitEHPadPredecessors(CPI);
4980 visitFuncletPadInst(CPI);
4981}
4982
4983void Verifier::visitCatchReturnInst(CatchReturnInst &CatchReturn) {
4984 Check(isa<CatchPadInst>(CatchReturn.getOperand(0)),
4985 "CatchReturnInst needs to be provided a CatchPad", &CatchReturn,
4986 CatchReturn.getOperand(0));
4987
4988 visitTerminator(CatchReturn);
4989}
4990
4991void Verifier::visitCleanupPadInst(CleanupPadInst &CPI) {
4992 BasicBlock *BB = CPI.getParent();
4993
4994 Function *F = BB->getParent();
4995 Check(F->hasPersonalityFn(),
4996 "CleanupPadInst needs to be in a function with a personality.", &CPI);
4997
4998 // The cleanuppad instruction must be the first non-PHI instruction in the
4999 // block.
5000 Check(&*BB->getFirstNonPHIIt() == &CPI,
5001 "CleanupPadInst not the first non-PHI instruction in the block.", &CPI);
5002
5003 auto *ParentPad = CPI.getParentPad();
5004 Check(isa<ConstantTokenNone>(ParentPad) || isa<FuncletPadInst>(ParentPad),
5005 "CleanupPadInst has an invalid parent.", &CPI);
5006
5007 visitEHPadPredecessors(CPI);
5008 visitFuncletPadInst(CPI);
5009}
5010
5011void Verifier::visitFuncletPadInst(FuncletPadInst &FPI) {
5012 User *FirstUser = nullptr;
5013 Value *FirstUnwindPad = nullptr;
5014 SmallVector<FuncletPadInst *, 8> Worklist({&FPI});
5015 SmallPtrSet<FuncletPadInst *, 8> Seen;
5016
5017 while (!Worklist.empty()) {
5018 FuncletPadInst *CurrentPad = Worklist.pop_back_val();
5019 Check(Seen.insert(CurrentPad).second,
5020 "FuncletPadInst must not be nested within itself", CurrentPad);
5021 Value *UnresolvedAncestorPad = nullptr;
5022 for (User *U : CurrentPad->users()) {
5023 BasicBlock *UnwindDest;
5024 if (auto *CRI = dyn_cast<CleanupReturnInst>(U)) {
5025 UnwindDest = CRI->getUnwindDest();
5026 } else if (auto *CSI = dyn_cast<CatchSwitchInst>(U)) {
5027 // We allow catchswitch unwind to caller to nest
5028 // within an outer pad that unwinds somewhere else,
5029 // because catchswitch doesn't have a nounwind variant.
5030 // See e.g. SimplifyCFGOpt::SimplifyUnreachable.
5031 if (CSI->unwindsToCaller())
5032 continue;
5033 UnwindDest = CSI->getUnwindDest();
5034 } else if (auto *II = dyn_cast<InvokeInst>(U)) {
5035 UnwindDest = II->getUnwindDest();
5036 } else if (isa<CallInst>(U)) {
5037 // Calls which don't unwind may be found inside funclet
5038 // pads that unwind somewhere else. We don't *require*
5039 // such calls to be annotated nounwind.
5040 continue;
5041 } else if (auto *CPI = dyn_cast<CleanupPadInst>(U)) {
5042 // The unwind dest for a cleanup can only be found by
5043 // recursive search. Add it to the worklist, and we'll
5044 // search for its first use that determines where it unwinds.
5045 Worklist.push_back(CPI);
5046 continue;
5047 } else {
5048 Check(isa<CatchReturnInst>(U), "Bogus funclet pad use", U);
5049 continue;
5050 }
5051
5052 Value *UnwindPad;
5053 bool ExitsFPI;
5054 if (UnwindDest) {
5055 UnwindPad = &*UnwindDest->getFirstNonPHIIt();
5056 if (!cast<Instruction>(UnwindPad)->isEHPad())
5057 continue;
5058 Value *UnwindParent = getParentPad(UnwindPad);
5059 // Ignore unwind edges that don't exit CurrentPad.
5060 if (UnwindParent == CurrentPad)
5061 continue;
5062 // Determine whether the original funclet pad is exited,
5063 // and if we are scanning nested pads determine how many
5064 // of them are exited so we can stop searching their
5065 // children.
5066 Value *ExitedPad = CurrentPad;
5067 ExitsFPI = false;
5068 do {
5069 if (ExitedPad == &FPI) {
5070 ExitsFPI = true;
5071 // Now we can resolve any ancestors of CurrentPad up to
5072 // FPI, but not including FPI since we need to make sure
5073 // to check all direct users of FPI for consistency.
5074 UnresolvedAncestorPad = &FPI;
5075 break;
5076 }
5077 Value *ExitedParent = getParentPad(ExitedPad);
5078 if (ExitedParent == UnwindParent) {
5079 // ExitedPad is the ancestor-most pad which this unwind
5080 // edge exits, so we can resolve up to it, meaning that
5081 // ExitedParent is the first ancestor still unresolved.
5082 UnresolvedAncestorPad = ExitedParent;
5083 break;
5084 }
5085 ExitedPad = ExitedParent;
5086 } while (!isa<ConstantTokenNone>(ExitedPad));
5087 } else {
5088 // Unwinding to caller exits all pads.
5089 UnwindPad = ConstantTokenNone::get(FPI.getContext());
5090 ExitsFPI = true;
5091 UnresolvedAncestorPad = &FPI;
5092 }
5093
5094 if (ExitsFPI) {
5095 // This unwind edge exits FPI. Make sure it agrees with other
5096 // such edges.
5097 if (FirstUser) {
5098 Check(UnwindPad == FirstUnwindPad,
5099 "Unwind edges out of a funclet "
5100 "pad must have the same unwind "
5101 "dest",
5102 &FPI, U, FirstUser);
5103 } else {
5104 FirstUser = U;
5105 FirstUnwindPad = UnwindPad;
5106 // Record cleanup sibling unwinds for verifySiblingFuncletUnwinds
5107 if (isa<CleanupPadInst>(&FPI) && !isa<ConstantTokenNone>(UnwindPad) &&
5108 getParentPad(UnwindPad) == getParentPad(&FPI))
5109 SiblingFuncletInfo[&FPI] = cast<Instruction>(U);
5110 }
5111 }
5112 // Make sure we visit all uses of FPI, but for nested pads stop as
5113 // soon as we know where they unwind to.
5114 if (CurrentPad != &FPI)
5115 break;
5116 }
5117 if (UnresolvedAncestorPad) {
5118 if (CurrentPad == UnresolvedAncestorPad) {
5119 // When CurrentPad is FPI itself, we don't mark it as resolved even if
5120 // we've found an unwind edge that exits it, because we need to verify
5121 // all direct uses of FPI.
5122 assert(CurrentPad == &FPI);
5123 continue;
5124 }
5125 // Pop off the worklist any nested pads that we've found an unwind
5126 // destination for. The pads on the worklist are the uncles,
5127 // great-uncles, etc. of CurrentPad. We've found an unwind destination
5128 // for all ancestors of CurrentPad up to but not including
5129 // UnresolvedAncestorPad.
5130 Value *ResolvedPad = CurrentPad;
5131 while (!Worklist.empty()) {
5132 Value *UnclePad = Worklist.back();
5133 Value *AncestorPad = getParentPad(UnclePad);
5134 // Walk ResolvedPad up the ancestor list until we either find the
5135 // uncle's parent or the last resolved ancestor.
5136 while (ResolvedPad != AncestorPad) {
5137 Value *ResolvedParent = getParentPad(ResolvedPad);
5138 if (ResolvedParent == UnresolvedAncestorPad) {
5139 break;
5140 }
5141 ResolvedPad = ResolvedParent;
5142 }
5143 // If the resolved ancestor search didn't find the uncle's parent,
5144 // then the uncle is not yet resolved.
5145 if (ResolvedPad != AncestorPad)
5146 break;
5147 // This uncle is resolved, so pop it from the worklist.
5148 Worklist.pop_back();
5149 }
5150 }
5151 }
5152
5153 if (FirstUnwindPad) {
5154 if (auto *CatchSwitch = dyn_cast<CatchSwitchInst>(FPI.getParentPad())) {
5155 BasicBlock *SwitchUnwindDest = CatchSwitch->getUnwindDest();
5156 Value *SwitchUnwindPad;
5157 if (SwitchUnwindDest)
5158 SwitchUnwindPad = &*SwitchUnwindDest->getFirstNonPHIIt();
5159 else
5160 SwitchUnwindPad = ConstantTokenNone::get(FPI.getContext());
5161 Check(SwitchUnwindPad == FirstUnwindPad,
5162 "Unwind edges out of a catch must have the same unwind dest as "
5163 "the parent catchswitch",
5164 &FPI, FirstUser, CatchSwitch);
5165 }
5166 }
5167
5168 visitInstruction(FPI);
5169}
5170
5171void Verifier::visitCatchSwitchInst(CatchSwitchInst &CatchSwitch) {
5172 BasicBlock *BB = CatchSwitch.getParent();
5173
5174 Function *F = BB->getParent();
5175 Check(F->hasPersonalityFn(),
5176 "CatchSwitchInst needs to be in a function with a personality.",
5177 &CatchSwitch);
5178
5179 // The catchswitch instruction must be the first non-PHI instruction in the
5180 // block.
5181 Check(&*BB->getFirstNonPHIIt() == &CatchSwitch,
5182 "CatchSwitchInst not the first non-PHI instruction in the block.",
5183 &CatchSwitch);
5184
5185 auto *ParentPad = CatchSwitch.getParentPad();
5186 Check(isa<ConstantTokenNone>(ParentPad) || isa<FuncletPadInst>(ParentPad),
5187 "CatchSwitchInst has an invalid parent.", ParentPad);
5188
5189 if (BasicBlock *UnwindDest = CatchSwitch.getUnwindDest()) {
5190 BasicBlock::iterator I = UnwindDest->getFirstNonPHIIt();
5191 Check(I->isEHPad() && !isa<LandingPadInst>(I),
5192 "CatchSwitchInst must unwind to an EH block which is not a "
5193 "landingpad.",
5194 &CatchSwitch);
5195
5196 // Record catchswitch sibling unwinds for verifySiblingFuncletUnwinds
5197 if (getParentPad(&*I) == ParentPad)
5198 SiblingFuncletInfo[&CatchSwitch] = &CatchSwitch;
5199 }
5200
5201 Check(CatchSwitch.getNumHandlers() != 0,
5202 "CatchSwitchInst cannot have empty handler list", &CatchSwitch);
5203
5204 for (BasicBlock *Handler : CatchSwitch.handlers()) {
5205 Check(isa<CatchPadInst>(Handler->getFirstNonPHIIt()),
5206 "CatchSwitchInst handlers must be catchpads", &CatchSwitch, Handler);
5207 }
5208
5209 visitEHPadPredecessors(CatchSwitch);
5210 visitTerminator(CatchSwitch);
5211}
5212
5213void Verifier::visitCleanupReturnInst(CleanupReturnInst &CRI) {
5215 "CleanupReturnInst needs to be provided a CleanupPad", &CRI,
5216 CRI.getOperand(0));
5217
5218 if (BasicBlock *UnwindDest = CRI.getUnwindDest()) {
5219 BasicBlock::iterator I = UnwindDest->getFirstNonPHIIt();
5220 Check(I->isEHPad() && !isa<LandingPadInst>(I),
5221 "CleanupReturnInst must unwind to an EH block which is not a "
5222 "landingpad.",
5223 &CRI);
5224 }
5225
5226 visitTerminator(CRI);
5227}
5228
5229void Verifier::verifyDominatesUse(Instruction &I, unsigned i) {
5230 Instruction *Op = cast<Instruction>(I.getOperand(i));
5231 // If the we have an invalid invoke, don't try to compute the dominance.
5232 // We already reject it in the invoke specific checks and the dominance
5233 // computation doesn't handle multiple edges.
5234 if (InvokeInst *II = dyn_cast<InvokeInst>(Op)) {
5235 if (II->getNormalDest() == II->getUnwindDest())
5236 return;
5237 }
5238
5239 // Quick check whether the def has already been encountered in the same block.
5240 // PHI nodes are not checked to prevent accepting preceding PHIs, because PHI
5241 // uses are defined to happen on the incoming edge, not at the instruction.
5242 //
5243 // FIXME: If this operand is a MetadataAsValue (wrapping a LocalAsMetadata)
5244 // wrapping an SSA value, assert that we've already encountered it. See
5245 // related FIXME in Mapper::mapLocalAsMetadata in ValueMapper.cpp.
5246 if (!isa<PHINode>(I) && InstsInThisBlock.count(Op))
5247 return;
5248
5249 const Use &U = I.getOperandUse(i);
5250 Check(DT.dominates(Op, U), "Instruction does not dominate all uses!", Op, &I);
5251}
5252
5253void Verifier::visitDereferenceableMetadata(Instruction& I, MDNode* MD) {
5254 Check(I.getType()->isPointerTy(),
5255 "dereferenceable, dereferenceable_or_null "
5256 "apply only to pointer types",
5257 &I);
5259 "dereferenceable, dereferenceable_or_null apply only to load"
5260 " and inttoptr instructions, use attributes for calls or invokes",
5261 &I);
5262 Check(MD->getNumOperands() == 1,
5263 "dereferenceable, dereferenceable_or_null "
5264 "take one operand!",
5265 &I);
5266 ConstantInt *CI = mdconst::dyn_extract<ConstantInt>(MD->getOperand(0));
5267 Check(CI && CI->getType()->isIntegerTy(64),
5268 "dereferenceable, "
5269 "dereferenceable_or_null metadata value must be an i64!",
5270 &I);
5271}
5272
5273void Verifier::visitNofreeMetadata(Instruction &I, MDNode *MD) {
5274 Check(I.getType()->isPointerTy(), "nofree applies only to pointer types", &I);
5275 Check((isa<IntToPtrInst>(I)), "nofree applies only to inttoptr instruction",
5276 &I);
5277 Check(MD->getNumOperands() == 0, "nofree metadata must be empty", &I);
5278}
5279
5280void Verifier::visitProfMetadata(Instruction &I, MDNode *MD) {
5281 auto GetBranchingTerminatorNumOperands = [&]() {
5282 unsigned ExpectedNumOperands = 0;
5283 if (CondBrInst *BI = dyn_cast<CondBrInst>(&I))
5284 ExpectedNumOperands = BI->getNumSuccessors();
5285 else if (SwitchInst *SI = dyn_cast<SwitchInst>(&I))
5286 ExpectedNumOperands = SI->getNumSuccessors();
5287 else if (isa<CallInst>(&I))
5288 ExpectedNumOperands = 1;
5289 else if (IndirectBrInst *IBI = dyn_cast<IndirectBrInst>(&I))
5290 ExpectedNumOperands = IBI->getNumDestinations();
5291 else if (isa<SelectInst>(&I))
5292 ExpectedNumOperands = 2;
5293 else if (CallBrInst *CI = dyn_cast<CallBrInst>(&I))
5294 ExpectedNumOperands = CI->getNumSuccessors();
5295 return ExpectedNumOperands;
5296 };
5297 Check(MD->getNumOperands() >= 1,
5298 "!prof annotations should have at least 1 operand", MD);
5299 // Check first operand.
5300 Check(MD->getOperand(0) != nullptr, "first operand should not be null", MD);
5302 "expected string with name of the !prof annotation", MD);
5303 MDString *MDS = cast<MDString>(MD->getOperand(0));
5304 StringRef ProfName = MDS->getString();
5305
5307 Check(GetBranchingTerminatorNumOperands() != 0 || isa<InvokeInst>(I),
5308 "'unknown' !prof should only appear on instructions on which "
5309 "'branch_weights' would",
5310 MD);
5311 verifyUnknownProfileMetadata(MD);
5312 return;
5313 }
5314
5315 Check(MD->getNumOperands() >= 2,
5316 "!prof annotations should have no less than 2 operands", MD);
5317
5318 // Check consistency of !prof branch_weights metadata.
5319 if (ProfName == MDProfLabels::BranchWeights) {
5320 unsigned NumBranchWeights = getNumBranchWeights(*MD);
5321 if (isa<InvokeInst>(&I)) {
5322 Check(NumBranchWeights == 1 || NumBranchWeights == 2,
5323 "Wrong number of InvokeInst branch_weights operands", MD);
5324 } else {
5325 const unsigned ExpectedNumOperands = GetBranchingTerminatorNumOperands();
5326 if (ExpectedNumOperands == 0)
5327 CheckFailed("!prof branch_weights are not allowed for this instruction",
5328 MD);
5329
5330 Check(NumBranchWeights == ExpectedNumOperands, "Wrong number of operands",
5331 MD);
5332 }
5333 for (unsigned i = getBranchWeightOffset(MD); i < MD->getNumOperands();
5334 ++i) {
5335 auto &MDO = MD->getOperand(i);
5336 Check(MDO, "second operand should not be null", MD);
5338 "!prof brunch_weights operand is not a const int");
5339 }
5340 } else if (ProfName == MDProfLabels::ValueProfile) {
5341 Check(isValueProfileMD(MD), "invalid value profiling metadata", MD);
5342 ConstantInt *KindInt = mdconst::dyn_extract<ConstantInt>(MD->getOperand(1));
5343 Check(KindInt, "VP !prof missing kind argument", MD);
5344
5345 auto Kind = KindInt->getZExtValue();
5346 Check(Kind >= InstrProfValueKind::IPVK_First &&
5347 Kind <= InstrProfValueKind::IPVK_Last,
5348 "Invalid VP !prof kind", MD);
5349 Check(MD->getNumOperands() % 2 == 1,
5350 "VP !prof should have an even number "
5351 "of arguments after 'VP'",
5352 MD);
5353 if (Kind == InstrProfValueKind::IPVK_IndirectCallTarget ||
5354 Kind == InstrProfValueKind::IPVK_MemOPSize)
5356 "VP !prof indirect call or memop size expected to be applied to "
5357 "CallBase instructions only",
5358 MD);
5359 } else {
5360 CheckFailed("expected either branch_weights or VP profile name", MD);
5361 }
5362}
5363
5364void Verifier::visitDIAssignIDMetadata(Instruction &I, MDNode *MD) {
5365 assert(I.hasMetadata(LLVMContext::MD_DIAssignID));
5366 // DIAssignID metadata must be attached to either an alloca or some form of
5367 // store/memory-writing instruction.
5368 // FIXME: We allow all intrinsic insts here to avoid trying to enumerate all
5369 // possible store intrinsics.
5370 bool ExpectedInstTy =
5372 CheckDI(ExpectedInstTy, "!DIAssignID attached to unexpected instruction kind",
5373 I, MD);
5374 // Iterate over the MetadataAsValue uses of the DIAssignID - these should
5375 // only be found as DbgAssignIntrinsic operands.
5376 if (auto *AsValue = MetadataAsValue::getIfExists(Context, MD)) {
5377 for (auto *User : AsValue->users()) {
5379 "!DIAssignID should only be used by llvm.dbg.assign intrinsics",
5380 MD, User);
5381 // All of the dbg.assign intrinsics should be in the same function as I.
5382 if (auto *DAI = dyn_cast<DbgAssignIntrinsic>(User))
5383 CheckDI(DAI->getFunction() == I.getFunction(),
5384 "dbg.assign not in same function as inst", DAI, &I);
5385 }
5386 }
5387 for (DbgVariableRecord *DVR :
5388 cast<DIAssignID>(MD)->getAllDbgVariableRecordUsers()) {
5389 CheckDI(DVR->isDbgAssign(),
5390 "!DIAssignID should only be used by Assign DVRs.", MD, DVR);
5391 CheckDI(DVR->getFunction() == I.getFunction(),
5392 "DVRAssign not in same function as inst", DVR, &I);
5393 }
5394}
5395
5396void Verifier::visitMMRAMetadata(Instruction &I, MDNode *MD) {
5398 "!mmra metadata attached to unexpected instruction kind", I, MD);
5399
5400 // MMRA Metadata should either be a tag, e.g. !{!"foo", !"bar"}, or a
5401 // list of tags such as !2 in the following example:
5402 // !0 = !{!"a", !"b"}
5403 // !1 = !{!"c", !"d"}
5404 // !2 = !{!0, !1}
5405 if (MMRAMetadata::isTagMD(MD))
5406 return;
5407
5408 Check(isa<MDTuple>(MD), "!mmra expected to be a metadata tuple", I, MD);
5409 for (const MDOperand &MDOp : MD->operands())
5410 Check(MMRAMetadata::isTagMD(MDOp.get()),
5411 "!mmra metadata tuple operand is not an MMRA tag", I, MDOp.get());
5412}
5413
5414void Verifier::visitCallStackMetadata(MDNode *MD) {
5415 // Call stack metadata should consist of a list of at least 1 constant int
5416 // (representing a hash of the location).
5417 Check(MD->getNumOperands() >= 1,
5418 "call stack metadata should have at least 1 operand", MD);
5419
5420 for (const auto &Op : MD->operands())
5422 "call stack metadata operand should be constant integer", Op);
5423}
5424
5425void Verifier::visitMemProfMetadata(Instruction &I, MDNode *MD) {
5426 Check(isa<CallBase>(I), "!memprof metadata should only exist on calls", &I);
5427 Check(MD->getNumOperands() >= 1,
5428 "!memprof annotations should have at least 1 metadata operand "
5429 "(MemInfoBlock)",
5430 MD);
5431
5432 // Check each MIB
5433 for (auto &MIBOp : MD->operands()) {
5434 MDNode *MIB = dyn_cast<MDNode>(MIBOp);
5435 // The first operand of an MIB should be the call stack metadata.
5436 // There rest of the operands should be MDString tags, and there should be
5437 // at least one.
5438 Check(MIB->getNumOperands() >= 2,
5439 "Each !memprof MemInfoBlock should have at least 2 operands", MIB);
5440
5441 // Check call stack metadata (first operand).
5442 Check(MIB->getOperand(0) != nullptr,
5443 "!memprof MemInfoBlock first operand should not be null", MIB);
5444 Check(isa<MDNode>(MIB->getOperand(0)),
5445 "!memprof MemInfoBlock first operand should be an MDNode", MIB);
5446 MDNode *StackMD = dyn_cast<MDNode>(MIB->getOperand(0));
5447 visitCallStackMetadata(StackMD);
5448
5449 // The second MIB operand should be MDString.
5451 "!memprof MemInfoBlock second operand should be an MDString", MIB);
5452
5453 // Any remaining should be MDNode that are pairs of integers
5454 for (unsigned I = 2; I < MIB->getNumOperands(); ++I) {
5455 MDNode *OpNode = dyn_cast<MDNode>(MIB->getOperand(I));
5456 Check(OpNode, "Not all !memprof MemInfoBlock operands 2 to N are MDNode",
5457 MIB);
5458 Check(OpNode->getNumOperands() == 2,
5459 "Not all !memprof MemInfoBlock operands 2 to N are MDNode with 2 "
5460 "operands",
5461 MIB);
5462 // Check that all of Op's operands are ConstantInt.
5463 Check(llvm::all_of(OpNode->operands(),
5464 [](const MDOperand &Op) {
5465 return mdconst::hasa<ConstantInt>(Op);
5466 }),
5467 "Not all !memprof MemInfoBlock operands 2 to N are MDNode with "
5468 "ConstantInt operands",
5469 MIB);
5470 }
5471 }
5472}
5473
5474void Verifier::visitCallsiteMetadata(Instruction &I, MDNode *MD) {
5475 Check(isa<CallBase>(I), "!callsite metadata should only exist on calls", &I);
5476 // Verify the partial callstack annotated from memprof profiles. This callsite
5477 // is a part of a profiled allocation callstack.
5478 visitCallStackMetadata(MD);
5479}
5480
5481static inline bool isConstantIntMetadataOperand(const Metadata *MD) {
5482 if (auto *VAL = dyn_cast<ValueAsMetadata>(MD))
5483 return isa<ConstantInt>(VAL->getValue());
5484 return false;
5485}
5486
5487void Verifier::visitCalleeTypeMetadata(Instruction &I, MDNode *MD) {
5488 Check(isa<CallBase>(I), "!callee_type metadata should only exist on calls",
5489 &I);
5490 for (Metadata *Op : MD->operands()) {
5492 "The callee_type metadata must be a list of type metadata nodes", Op);
5493 auto *TypeMD = cast<MDNode>(Op);
5494 Check(TypeMD->getNumOperands() == 2,
5495 "Well-formed generalized type metadata must contain exactly two "
5496 "operands",
5497 Op);
5498 Check(isConstantIntMetadataOperand(TypeMD->getOperand(0)) &&
5499 mdconst::extract<ConstantInt>(TypeMD->getOperand(0))->isZero(),
5500 "The first operand of type metadata for functions must be zero", Op);
5501 Check(TypeMD->hasGeneralizedMDString(),
5502 "Only generalized type metadata can be part of the callee_type "
5503 "metadata list",
5504 Op);
5505 }
5506}
5507
5508void Verifier::visitAnnotationMetadata(MDNode *Annotation) {
5509 Check(isa<MDTuple>(Annotation), "annotation must be a tuple");
5510 Check(Annotation->getNumOperands() >= 1,
5511 "annotation must have at least one operand");
5512 for (const MDOperand &Op : Annotation->operands()) {
5513 bool TupleOfStrings =
5514 isa<MDTuple>(Op.get()) &&
5515 all_of(cast<MDTuple>(Op)->operands(), [](auto &Annotation) {
5516 return isa<MDString>(Annotation.get());
5517 });
5518 Check(isa<MDString>(Op.get()) || TupleOfStrings,
5519 "operands must be a string or a tuple of strings");
5520 }
5521}
5522
5523void Verifier::visitAliasScopeMetadata(const MDNode *MD) {
5524 unsigned NumOps = MD->getNumOperands();
5525 Check(NumOps >= 2 && NumOps <= 3, "scope must have two or three operands",
5526 MD);
5527 Check(MD->getOperand(0).get() == MD || isa<MDString>(MD->getOperand(0)),
5528 "first scope operand must be self-referential or string", MD);
5529 if (NumOps == 3)
5531 "third scope operand must be string (if used)", MD);
5532
5533 MDNode *Domain = dyn_cast<MDNode>(MD->getOperand(1));
5534 Check(Domain != nullptr, "second scope operand must be MDNode", MD);
5535
5536 unsigned NumDomainOps = Domain->getNumOperands();
5537 Check(NumDomainOps >= 1 && NumDomainOps <= 2,
5538 "domain must have one or two operands", Domain);
5539 Check(Domain->getOperand(0).get() == Domain ||
5540 isa<MDString>(Domain->getOperand(0)),
5541 "first domain operand must be self-referential or string", Domain);
5542 if (NumDomainOps == 2)
5543 Check(isa<MDString>(Domain->getOperand(1)),
5544 "second domain operand must be string (if used)", Domain);
5545}
5546
5547void Verifier::visitAliasScopeListMetadata(const MDNode *MD) {
5548 for (const MDOperand &Op : MD->operands()) {
5549 const MDNode *OpMD = dyn_cast<MDNode>(Op);
5550 Check(OpMD != nullptr, "scope list must consist of MDNodes", MD);
5551 visitAliasScopeMetadata(OpMD);
5552 }
5553}
5554
5555void Verifier::visitAccessGroupMetadata(const MDNode *MD) {
5556 auto IsValidAccessScope = [](const MDNode *MD) {
5557 return MD->getNumOperands() == 0 && MD->isDistinct();
5558 };
5559
5560 // It must be either an access scope itself...
5561 if (IsValidAccessScope(MD))
5562 return;
5563
5564 // ...or a list of access scopes.
5565 for (const MDOperand &Op : MD->operands()) {
5566 const MDNode *OpMD = dyn_cast<MDNode>(Op);
5567 Check(OpMD != nullptr, "Access scope list must consist of MDNodes", MD);
5568 Check(IsValidAccessScope(OpMD),
5569 "Access scope list contains invalid access scope", MD);
5570 }
5571}
5572
5573void Verifier::visitCapturesMetadata(Instruction &I, const MDNode *Captures) {
5574 static const char *ValidArgs[] = {"address_is_null", "address",
5575 "read_provenance", "provenance"};
5576
5577 auto *SI = dyn_cast<StoreInst>(&I);
5578 Check(SI, "!captures metadata can only be applied to store instructions", &I);
5579 Check(SI->getValueOperand()->getType()->isPointerTy(),
5580 "!captures metadata can only be applied to store with value operand of "
5581 "pointer type",
5582 &I);
5583 Check(Captures->getNumOperands() != 0, "!captures metadata cannot be empty",
5584 &I);
5585
5586 for (Metadata *Op : Captures->operands()) {
5587 auto *Str = dyn_cast<MDString>(Op);
5588 Check(Str, "!captures metadata must be a list of strings", &I);
5589 Check(is_contained(ValidArgs, Str->getString()),
5590 "invalid entry in !captures metadata", &I, Str);
5591 }
5592}
5593
5594void Verifier::visitAllocTokenMetadata(Instruction &I, MDNode *MD) {
5595 Check(isa<CallBase>(I), "!alloc_token should only exist on calls", &I);
5596 Check(MD->getNumOperands() == 2, "!alloc_token must have 2 operands", MD);
5597 Check(isa<MDString>(MD->getOperand(0)), "expected string", MD);
5599 "expected integer constant", MD);
5600}
5601
5602/// verifyInstruction - Verify that an instruction is well formed.
5603///
5604void Verifier::visitInstruction(Instruction &I) {
5605 BasicBlock *BB = I.getParent();
5606 Check(BB, "Instruction not embedded in basic block!", &I);
5607
5608 if (!isa<PHINode>(I)) { // Check that non-phi nodes are not self referential
5609 for (User *U : I.users()) {
5610 Check(U != (User *)&I || !DT.isReachableFromEntry(BB),
5611 "Only PHI nodes may reference their own value!", &I);
5612 }
5613 }
5614
5615 // Check that void typed values don't have names
5616 Check(!I.getType()->isVoidTy() || !I.hasName(),
5617 "Instruction has a name, but provides a void value!", &I);
5618
5619 // Check that the return value of the instruction is either void or a legal
5620 // value type.
5621 Check(I.getType()->isVoidTy() || I.getType()->isFirstClassType(),
5622 "Instruction returns a non-scalar type!", &I);
5623
5624 // Check that the instruction doesn't produce metadata. Calls are already
5625 // checked against the callee type.
5626 Check(!I.getType()->isMetadataTy() || isa<CallInst>(I) || isa<InvokeInst>(I),
5627 "Invalid use of metadata!", &I);
5628
5629 // Check that all uses of the instruction, if they are instructions
5630 // themselves, actually have parent basic blocks. If the use is not an
5631 // instruction, it is an error!
5632 for (Use &U : I.uses()) {
5633 if (Instruction *Used = dyn_cast<Instruction>(U.getUser()))
5634 Check(Used->getParent() != nullptr,
5635 "Instruction referencing"
5636 " instruction not embedded in a basic block!",
5637 &I, Used);
5638 else {
5639 CheckFailed("Use of instruction is not an instruction!", U);
5640 return;
5641 }
5642 }
5643
5644 // Get a pointer to the call base of the instruction if it is some form of
5645 // call.
5646 const CallBase *CBI = dyn_cast<CallBase>(&I);
5647
5648 for (unsigned i = 0, e = I.getNumOperands(); i != e; ++i) {
5649 Check(I.getOperand(i) != nullptr, "Instruction has null operand!", &I);
5650
5651 // Check to make sure that only first-class-values are operands to
5652 // instructions.
5653 if (!I.getOperand(i)->getType()->isFirstClassType()) {
5654 Check(false, "Instruction operands must be first-class values!", &I);
5655 }
5656
5657 if (Function *F = dyn_cast<Function>(I.getOperand(i))) {
5658 // This code checks whether the function is used as the operand of a
5659 // clang_arc_attachedcall operand bundle.
5660 auto IsAttachedCallOperand = [](Function *F, const CallBase *CBI,
5661 int Idx) {
5662 return CBI && CBI->isOperandBundleOfType(
5664 };
5665
5666 // Check to make sure that the "address of" an intrinsic function is never
5667 // taken. Ignore cases where the address of the intrinsic function is used
5668 // as the argument of operand bundle "clang.arc.attachedcall" as those
5669 // cases are handled in verifyAttachedCallBundle.
5670 Check((!F->isIntrinsic() ||
5671 (CBI && &CBI->getCalledOperandUse() == &I.getOperandUse(i)) ||
5672 IsAttachedCallOperand(F, CBI, i)),
5673 "Cannot take the address of an intrinsic!", &I);
5674 Check(!F->isIntrinsic() || isa<CallInst>(I) || isa<CallBrInst>(I) ||
5675 F->getIntrinsicID() == Intrinsic::donothing ||
5676 F->getIntrinsicID() == Intrinsic::seh_try_begin ||
5677 F->getIntrinsicID() == Intrinsic::seh_try_end ||
5678 F->getIntrinsicID() == Intrinsic::seh_scope_begin ||
5679 F->getIntrinsicID() == Intrinsic::seh_scope_end ||
5680 F->getIntrinsicID() == Intrinsic::coro_resume ||
5681 F->getIntrinsicID() == Intrinsic::coro_destroy ||
5682 F->getIntrinsicID() == Intrinsic::coro_await_suspend_void ||
5683 F->getIntrinsicID() == Intrinsic::coro_await_suspend_bool ||
5684 F->getIntrinsicID() == Intrinsic::coro_await_suspend_handle ||
5685 F->getIntrinsicID() ==
5686 Intrinsic::experimental_patchpoint_void ||
5687 F->getIntrinsicID() == Intrinsic::experimental_patchpoint ||
5688 F->getIntrinsicID() == Intrinsic::fake_use ||
5689 F->getIntrinsicID() == Intrinsic::experimental_gc_statepoint ||
5690 F->getIntrinsicID() == Intrinsic::wasm_throw ||
5691 F->getIntrinsicID() == Intrinsic::wasm_rethrow ||
5692 IsAttachedCallOperand(F, CBI, i),
5693 "Cannot invoke an intrinsic other than donothing, patchpoint, "
5694 "statepoint, coro_resume, coro_destroy, clang.arc.attachedcall or "
5695 "wasm.(re)throw",
5696 &I);
5697 Check(F->getParent() == &M, "Referencing function in another module!", &I,
5698 &M, F, F->getParent());
5699 } else if (BasicBlock *OpBB = dyn_cast<BasicBlock>(I.getOperand(i))) {
5700 Check(OpBB->getParent() == BB->getParent(),
5701 "Referring to a basic block in another function!", &I);
5702 } else if (Argument *OpArg = dyn_cast<Argument>(I.getOperand(i))) {
5703 Check(OpArg->getParent() == BB->getParent(),
5704 "Referring to an argument in another function!", &I);
5705 } else if (GlobalValue *GV = dyn_cast<GlobalValue>(I.getOperand(i))) {
5706 Check(GV->getParent() == &M, "Referencing global in another module!", &I,
5707 &M, GV, GV->getParent());
5708 } else if (Instruction *OpInst = dyn_cast<Instruction>(I.getOperand(i))) {
5709 Check(OpInst->getFunction() == BB->getParent(),
5710 "Referring to an instruction in another function!", &I);
5711 verifyDominatesUse(I, i);
5712 } else if (isa<InlineAsm>(I.getOperand(i))) {
5713 Check(CBI && &CBI->getCalledOperandUse() == &I.getOperandUse(i),
5714 "Cannot take the address of an inline asm!", &I);
5715 } else if (auto *C = dyn_cast<Constant>(I.getOperand(i))) {
5716 visitConstantExprsRecursively(C);
5717 }
5718 }
5719
5720 if (MDNode *MD = I.getMetadata(LLVMContext::MD_fpmath)) {
5721 Check(I.getType()->isFPOrFPVectorTy(),
5722 "fpmath requires a floating point result!", &I);
5723 Check(MD->getNumOperands() == 1, "fpmath takes one operand!", &I);
5724 if (ConstantFP *CFP0 =
5726 const APFloat &Accuracy = CFP0->getValueAPF();
5727 Check(&Accuracy.getSemantics() == &APFloat::IEEEsingle(),
5728 "fpmath accuracy must have float type", &I);
5729 Check(Accuracy.isFiniteNonZero() && !Accuracy.isNegative(),
5730 "fpmath accuracy not a positive number!", &I);
5731 } else {
5732 Check(false, "invalid fpmath accuracy!", &I);
5733 }
5734 }
5735
5736 if (MDNode *Range = I.getMetadata(LLVMContext::MD_range)) {
5738 "Ranges are only for loads, calls and invokes!", &I);
5739 visitRangeMetadata(I, Range, I.getType());
5740 }
5741
5742 if (MDNode *MD = I.getMetadata(LLVMContext::MD_nofpclass)) {
5743 Check(isa<LoadInst>(I), "nofpclass is only for loads", &I);
5744 visitNoFPClassMetadata(I, MD, I.getType());
5745 }
5746
5747 if (MDNode *Range = I.getMetadata(LLVMContext::MD_noalias_addrspace)) {
5750 "noalias.addrspace are only for memory operations!", &I);
5751 visitNoaliasAddrspaceMetadata(I, Range, I.getType());
5752 }
5753
5754 if (I.hasMetadata(LLVMContext::MD_invariant_group)) {
5756 "invariant.group metadata is only for loads and stores", &I);
5757 }
5758
5759 if (MDNode *MD = I.getMetadata(LLVMContext::MD_nonnull)) {
5760 Check(I.getType()->isPointerTy(), "nonnull applies only to pointer types",
5761 &I);
5763 "nonnull applies only to load instructions, use attributes"
5764 " for calls or invokes",
5765 &I);
5766 Check(MD->getNumOperands() == 0, "nonnull metadata must be empty", &I);
5767 }
5768
5769 if (MDNode *MD = I.getMetadata(LLVMContext::MD_dereferenceable))
5770 visitDereferenceableMetadata(I, MD);
5771
5772 if (MDNode *MD = I.getMetadata(LLVMContext::MD_dereferenceable_or_null))
5773 visitDereferenceableMetadata(I, MD);
5774
5775 if (MDNode *MD = I.getMetadata(LLVMContext::MD_nofree))
5776 visitNofreeMetadata(I, MD);
5777
5778 if (MDNode *TBAA = I.getMetadata(LLVMContext::MD_tbaa))
5779 TBAAVerifyHelper.visitTBAAMetadata(&I, TBAA);
5780
5781 if (MDNode *MD = I.getMetadata(LLVMContext::MD_noalias))
5782 visitAliasScopeListMetadata(MD);
5783 if (MDNode *MD = I.getMetadata(LLVMContext::MD_alias_scope))
5784 visitAliasScopeListMetadata(MD);
5785
5786 if (MDNode *MD = I.getMetadata(LLVMContext::MD_access_group))
5787 visitAccessGroupMetadata(MD);
5788
5789 if (MDNode *AlignMD = I.getMetadata(LLVMContext::MD_align)) {
5790 Check(I.getType()->isPointerTy(), "align applies only to pointer types",
5791 &I);
5793 "align applies only to load instructions, "
5794 "use attributes for calls or invokes",
5795 &I);
5796 Check(AlignMD->getNumOperands() == 1, "align takes one operand!", &I);
5797 ConstantInt *CI = mdconst::dyn_extract<ConstantInt>(AlignMD->getOperand(0));
5798 Check(CI && CI->getType()->isIntegerTy(64),
5799 "align metadata value must be an i64!", &I);
5800 uint64_t Align = CI->getZExtValue();
5801 Check(isPowerOf2_64(Align), "align metadata value must be a power of 2!",
5802 &I);
5803 Check(Align <= Value::MaximumAlignment,
5804 "alignment is larger that implementation defined limit", &I);
5805 }
5806
5807 if (MDNode *MD = I.getMetadata(LLVMContext::MD_prof))
5808 visitProfMetadata(I, MD);
5809
5810 if (MDNode *MD = I.getMetadata(LLVMContext::MD_memprof))
5811 visitMemProfMetadata(I, MD);
5812
5813 if (MDNode *MD = I.getMetadata(LLVMContext::MD_callsite))
5814 visitCallsiteMetadata(I, MD);
5815
5816 if (MDNode *MD = I.getMetadata(LLVMContext::MD_callee_type))
5817 visitCalleeTypeMetadata(I, MD);
5818
5819 if (MDNode *MD = I.getMetadata(LLVMContext::MD_DIAssignID))
5820 visitDIAssignIDMetadata(I, MD);
5821
5822 if (MDNode *MMRA = I.getMetadata(LLVMContext::MD_mmra))
5823 visitMMRAMetadata(I, MMRA);
5824
5825 if (MDNode *Annotation = I.getMetadata(LLVMContext::MD_annotation))
5826 visitAnnotationMetadata(Annotation);
5827
5828 if (MDNode *Captures = I.getMetadata(LLVMContext::MD_captures))
5829 visitCapturesMetadata(I, Captures);
5830
5831 if (MDNode *MD = I.getMetadata(LLVMContext::MD_alloc_token))
5832 visitAllocTokenMetadata(I, MD);
5833
5834 if (MDNode *N = I.getDebugLoc().getAsMDNode()) {
5835 CheckDI(isa<DILocation>(N), "invalid !dbg metadata attachment", &I, N);
5836 visitMDNode(*N, AreDebugLocsAllowed::Yes);
5837
5838 if (auto *DL = dyn_cast<DILocation>(N)) {
5839 if (DL->getAtomGroup()) {
5840 CheckDI(DL->getScope()->getSubprogram()->getKeyInstructionsEnabled(),
5841 "DbgLoc uses atomGroup but DISubprogram doesn't have Key "
5842 "Instructions enabled",
5843 DL, DL->getScope()->getSubprogram());
5844 }
5845 }
5846 }
5847
5849 I.getAllMetadata(MDs);
5850 for (auto Attachment : MDs) {
5851 unsigned Kind = Attachment.first;
5852 auto AllowLocs =
5853 (Kind == LLVMContext::MD_dbg || Kind == LLVMContext::MD_loop)
5854 ? AreDebugLocsAllowed::Yes
5855 : AreDebugLocsAllowed::No;
5856 visitMDNode(*Attachment.second, AllowLocs);
5857 }
5858
5859 InstsInThisBlock.insert(&I);
5860}
5861
5862/// Allow intrinsics to be verified in different ways.
5863void Verifier::visitIntrinsicCall(Intrinsic::ID ID, CallBase &Call) {
5865 Check(IF->isDeclaration(), "Intrinsic functions should never be defined!",
5866 IF);
5867
5868 // Verify that the intrinsic prototype lines up with what the .td files
5869 // describe.
5870 FunctionType *IFTy = IF->getFunctionType();
5871 bool IsVarArg = IFTy->isVarArg();
5872
5876
5877 // Walk the descriptors to extract overloaded types.
5882 "Intrinsic has incorrect return type!", IF);
5884 "Intrinsic has incorrect argument type!", IF);
5885
5886 // Verify if the intrinsic call matches the vararg property.
5887 if (IsVarArg)
5889 "Intrinsic was not defined with variable arguments!", IF);
5890 else
5892 "Callsite was not defined with variable arguments!", IF);
5893
5894 // All descriptors should be absorbed by now.
5895 Check(TableRef.empty(), "Intrinsic has too few arguments!", IF);
5896
5897 // Now that we have the intrinsic ID and the actual argument types (and we
5898 // know they are legal for the intrinsic!) get the intrinsic name through the
5899 // usual means. This allows us to verify the mangling of argument types into
5900 // the name.
5901 const std::string ExpectedName =
5902 Intrinsic::getName(ID, ArgTys, IF->getParent(), IFTy);
5903 Check(ExpectedName == IF->getName(),
5904 "Intrinsic name not mangled correctly for type arguments! "
5905 "Should be: " +
5906 ExpectedName,
5907 IF);
5908
5909 // If the intrinsic takes MDNode arguments, verify that they are either global
5910 // or are local to *this* function.
5911 for (Value *V : Call.args()) {
5912 if (auto *MD = dyn_cast<MetadataAsValue>(V))
5913 visitMetadataAsValue(*MD, Call.getCaller());
5914 if (auto *Const = dyn_cast<Constant>(V))
5915 Check(!Const->getType()->isX86_AMXTy(),
5916 "const x86_amx is not allowed in argument!");
5917 }
5918
5919 switch (ID) {
5920 default:
5921 break;
5922 case Intrinsic::assume: {
5923 if (Call.hasOperandBundles()) {
5925 Check(Cond && Cond->isOne(),
5926 "assume with operand bundles must have i1 true condition", Call);
5927 }
5928 for (auto &Elem : Call.bundle_op_infos()) {
5929 unsigned ArgCount = Elem.End - Elem.Begin;
5930 // Separate storage assumptions are special insofar as they're the only
5931 // operand bundles allowed on assumes that aren't parameter attributes.
5932 if (Elem.Tag->getKey() == "separate_storage") {
5933 Check(ArgCount == 2,
5934 "separate_storage assumptions should have 2 arguments", Call);
5935 Check(Call.getOperand(Elem.Begin)->getType()->isPointerTy() &&
5936 Call.getOperand(Elem.Begin + 1)->getType()->isPointerTy(),
5937 "arguments to separate_storage assumptions should be pointers",
5938 Call);
5939 continue;
5940 }
5941 Check(Elem.Tag->getKey() == "ignore" ||
5942 Attribute::isExistingAttribute(Elem.Tag->getKey()),
5943 "tags must be valid attribute names", Call);
5944 Attribute::AttrKind Kind =
5945 Attribute::getAttrKindFromName(Elem.Tag->getKey());
5946 if (Kind == Attribute::Alignment) {
5947 Check(ArgCount <= 3 && ArgCount >= 2,
5948 "alignment assumptions should have 2 or 3 arguments", Call);
5949 Check(Call.getOperand(Elem.Begin)->getType()->isPointerTy(),
5950 "first argument should be a pointer", Call);
5951 Check(Call.getOperand(Elem.Begin + 1)->getType()->isIntegerTy(),
5952 "second argument should be an integer", Call);
5953 if (ArgCount == 3)
5954 Check(Call.getOperand(Elem.Begin + 2)->getType()->isIntegerTy(),
5955 "third argument should be an integer if present", Call);
5956 continue;
5957 }
5958 if (Kind == Attribute::Dereferenceable) {
5959 Check(ArgCount == 2,
5960 "dereferenceable assumptions should have 2 arguments", Call);
5961 Check(Call.getOperand(Elem.Begin)->getType()->isPointerTy(),
5962 "first argument should be a pointer", Call);
5963 Check(Call.getOperand(Elem.Begin + 1)->getType()->isIntegerTy(),
5964 "second argument should be an integer", Call);
5965 continue;
5966 }
5967 Check(ArgCount <= 2, "too many arguments", Call);
5968 if (Kind == Attribute::None)
5969 break;
5970 if (Attribute::isIntAttrKind(Kind)) {
5971 Check(ArgCount == 2, "this attribute should have 2 arguments", Call);
5972 Check(isa<ConstantInt>(Call.getOperand(Elem.Begin + 1)),
5973 "the second argument should be a constant integral value", Call);
5974 } else if (Attribute::canUseAsParamAttr(Kind)) {
5975 Check((ArgCount) == 1, "this attribute should have one argument", Call);
5976 } else if (Attribute::canUseAsFnAttr(Kind)) {
5977 Check((ArgCount) == 0, "this attribute has no argument", Call);
5978 }
5979 }
5980 break;
5981 }
5982 case Intrinsic::ucmp:
5983 case Intrinsic::scmp: {
5984 Type *SrcTy = Call.getOperand(0)->getType();
5985 Type *DestTy = Call.getType();
5986
5987 Check(DestTy->getScalarSizeInBits() >= 2,
5988 "result type must be at least 2 bits wide", Call);
5989
5990 bool IsDestTypeVector = DestTy->isVectorTy();
5991 Check(SrcTy->isVectorTy() == IsDestTypeVector,
5992 "ucmp/scmp argument and result types must both be either vector or "
5993 "scalar types",
5994 Call);
5995 if (IsDestTypeVector) {
5996 auto SrcVecLen = cast<VectorType>(SrcTy)->getElementCount();
5997 auto DestVecLen = cast<VectorType>(DestTy)->getElementCount();
5998 Check(SrcVecLen == DestVecLen,
5999 "return type and arguments must have the same number of "
6000 "elements",
6001 Call);
6002 }
6003 break;
6004 }
6005 case Intrinsic::coro_id: {
6006 auto *InfoArg = Call.getArgOperand(3)->stripPointerCasts();
6007 if (isa<ConstantPointerNull>(InfoArg))
6008 break;
6009 auto *GV = dyn_cast<GlobalVariable>(InfoArg);
6010 Check(GV && GV->isConstant() && GV->hasDefinitiveInitializer(),
6011 "info argument of llvm.coro.id must refer to an initialized "
6012 "constant");
6013 Constant *Init = GV->getInitializer();
6015 "info argument of llvm.coro.id must refer to either a struct or "
6016 "an array");
6017 break;
6018 }
6019 case Intrinsic::is_fpclass: {
6020 const ConstantInt *TestMask = cast<ConstantInt>(Call.getOperand(1));
6021 Check((TestMask->getZExtValue() & ~static_cast<unsigned>(fcAllFlags)) == 0,
6022 "unsupported bits for llvm.is.fpclass test mask");
6023 break;
6024 }
6025 case Intrinsic::fptrunc_round: {
6026 // Check the rounding mode
6027 Metadata *MD = nullptr;
6029 if (MAV)
6030 MD = MAV->getMetadata();
6031
6032 Check(MD != nullptr, "missing rounding mode argument", Call);
6033
6034 Check(isa<MDString>(MD),
6035 ("invalid value for llvm.fptrunc.round metadata operand"
6036 " (the operand should be a string)"),
6037 MD);
6038
6039 std::optional<RoundingMode> RoundMode =
6040 convertStrToRoundingMode(cast<MDString>(MD)->getString());
6041 Check(RoundMode && *RoundMode != RoundingMode::Dynamic,
6042 "unsupported rounding mode argument", Call);
6043 break;
6044 }
6045 case Intrinsic::convert_to_arbitrary_fp: {
6046 // Check that vector element counts are consistent.
6047 Type *ValueTy = Call.getArgOperand(0)->getType();
6048 Type *IntTy = Call.getType();
6049
6050 if (auto *ValueVecTy = dyn_cast<VectorType>(ValueTy)) {
6051 auto *IntVecTy = dyn_cast<VectorType>(IntTy);
6052 Check(IntVecTy,
6053 "if floating-point operand is a vector, integer operand must also "
6054 "be a vector",
6055 Call);
6056 Check(ValueVecTy->getElementCount() == IntVecTy->getElementCount(),
6057 "floating-point and integer vector operands must have the same "
6058 "element count",
6059 Call);
6060 }
6061
6062 // Check interpretation metadata (argoperand 1).
6063 auto *InterpMAV = dyn_cast<MetadataAsValue>(Call.getArgOperand(1));
6064 Check(InterpMAV, "missing interpretation metadata operand", Call);
6065 auto *InterpStr = dyn_cast<MDString>(InterpMAV->getMetadata());
6066 Check(InterpStr, "interpretation metadata operand must be a string", Call);
6067 StringRef Interp = InterpStr->getString();
6068
6069 Check(!Interp.empty(), "interpretation metadata string must not be empty",
6070 Call);
6071
6072 // Valid interpretation strings: mini-float format names.
6074 "unsupported interpretation metadata string", Call);
6075
6076 // Check rounding mode metadata (argoperand 2).
6077 auto *RoundingMAV = dyn_cast<MetadataAsValue>(Call.getArgOperand(2));
6078 Check(RoundingMAV, "missing rounding mode metadata operand", Call);
6079 auto *RoundingStr = dyn_cast<MDString>(RoundingMAV->getMetadata());
6080 Check(RoundingStr, "rounding mode metadata operand must be a string", Call);
6081
6082 std::optional<RoundingMode> RM =
6083 convertStrToRoundingMode(RoundingStr->getString());
6084 Check(RM && *RM != RoundingMode::Dynamic,
6085 "unsupported rounding mode argument", Call);
6086 break;
6087 }
6088 case Intrinsic::convert_from_arbitrary_fp: {
6089 // Check that vector element counts are consistent.
6090 Type *IntTy = Call.getArgOperand(0)->getType();
6091 Type *ValueTy = Call.getType();
6092
6093 if (auto *ValueVecTy = dyn_cast<VectorType>(ValueTy)) {
6094 auto *IntVecTy = dyn_cast<VectorType>(IntTy);
6095 Check(IntVecTy,
6096 "if floating-point operand is a vector, integer operand must also "
6097 "be a vector",
6098 Call);
6099 Check(ValueVecTy->getElementCount() == IntVecTy->getElementCount(),
6100 "floating-point and integer vector operands must have the same "
6101 "element count",
6102 Call);
6103 }
6104
6105 // Check interpretation metadata (argoperand 1).
6106 auto *InterpMAV = dyn_cast<MetadataAsValue>(Call.getArgOperand(1));
6107 Check(InterpMAV, "missing interpretation metadata operand", Call);
6108 auto *InterpStr = dyn_cast<MDString>(InterpMAV->getMetadata());
6109 Check(InterpStr, "interpretation metadata operand must be a string", Call);
6110 StringRef Interp = InterpStr->getString();
6111
6112 Check(!Interp.empty(), "interpretation metadata string must not be empty",
6113 Call);
6114
6115 // Valid interpretation strings: mini-float format names.
6117 "unsupported interpretation metadata string", Call);
6118 break;
6119 }
6120#define BEGIN_REGISTER_VP_INTRINSIC(VPID, ...) case Intrinsic::VPID:
6121#include "llvm/IR/VPIntrinsics.def"
6122#undef BEGIN_REGISTER_VP_INTRINSIC
6123 visitVPIntrinsic(cast<VPIntrinsic>(Call));
6124 break;
6125#define INSTRUCTION(NAME, NARGS, ROUND_MODE, INTRINSIC) \
6126 case Intrinsic::INTRINSIC:
6127#include "llvm/IR/ConstrainedOps.def"
6128#undef INSTRUCTION
6129 visitConstrainedFPIntrinsic(cast<ConstrainedFPIntrinsic>(Call));
6130 break;
6131 case Intrinsic::dbg_declare: // llvm.dbg.declare
6132 case Intrinsic::dbg_value: // llvm.dbg.value
6133 case Intrinsic::dbg_assign: // llvm.dbg.assign
6134 case Intrinsic::dbg_label: // llvm.dbg.label
6135 // We no longer interpret debug intrinsics (the old variable-location
6136 // design). They're meaningless as far as LLVM is concerned we could make
6137 // it an error for them to appear, but it's possible we'll have users
6138 // converting back to intrinsics for the forseeable future (such as DXIL),
6139 // so tolerate their existance.
6140 break;
6141 case Intrinsic::memcpy:
6142 case Intrinsic::memcpy_inline:
6143 case Intrinsic::memmove:
6144 case Intrinsic::memset:
6145 case Intrinsic::memset_inline:
6146 break;
6147 case Intrinsic::experimental_memset_pattern: {
6148 const auto Memset = cast<MemSetPatternInst>(&Call);
6149 Check(Memset->getValue()->getType()->isSized(),
6150 "unsized types cannot be used as memset patterns", Call);
6151 break;
6152 }
6153 case Intrinsic::memcpy_element_unordered_atomic:
6154 case Intrinsic::memmove_element_unordered_atomic:
6155 case Intrinsic::memset_element_unordered_atomic: {
6156 const auto *AMI = cast<AnyMemIntrinsic>(&Call);
6157
6158 ConstantInt *ElementSizeCI =
6159 cast<ConstantInt>(AMI->getRawElementSizeInBytes());
6160 const APInt &ElementSizeVal = ElementSizeCI->getValue();
6161 Check(ElementSizeVal.isPowerOf2(),
6162 "element size of the element-wise atomic memory intrinsic "
6163 "must be a power of 2",
6164 Call);
6165
6166 auto IsValidAlignment = [&](MaybeAlign Alignment) {
6167 return Alignment && ElementSizeVal.ule(Alignment->value());
6168 };
6169 Check(IsValidAlignment(AMI->getDestAlign()),
6170 "incorrect alignment of the destination argument", Call);
6171 if (const auto *AMT = dyn_cast<AnyMemTransferInst>(AMI)) {
6172 Check(IsValidAlignment(AMT->getSourceAlign()),
6173 "incorrect alignment of the source argument", Call);
6174 }
6175 break;
6176 }
6177 case Intrinsic::call_preallocated_setup: {
6178 auto *NumArgs = cast<ConstantInt>(Call.getArgOperand(0));
6179 bool FoundCall = false;
6180 for (User *U : Call.users()) {
6181 auto *UseCall = dyn_cast<CallBase>(U);
6182 Check(UseCall != nullptr,
6183 "Uses of llvm.call.preallocated.setup must be calls");
6184 Intrinsic::ID IID = UseCall->getIntrinsicID();
6185 if (IID == Intrinsic::call_preallocated_arg) {
6186 auto *AllocArgIndex = dyn_cast<ConstantInt>(UseCall->getArgOperand(1));
6187 Check(AllocArgIndex != nullptr,
6188 "llvm.call.preallocated.alloc arg index must be a constant");
6189 auto AllocArgIndexInt = AllocArgIndex->getValue();
6190 Check(AllocArgIndexInt.sge(0) &&
6191 AllocArgIndexInt.slt(NumArgs->getValue()),
6192 "llvm.call.preallocated.alloc arg index must be between 0 and "
6193 "corresponding "
6194 "llvm.call.preallocated.setup's argument count");
6195 } else if (IID == Intrinsic::call_preallocated_teardown) {
6196 // nothing to do
6197 } else {
6198 Check(!FoundCall, "Can have at most one call corresponding to a "
6199 "llvm.call.preallocated.setup");
6200 FoundCall = true;
6201 size_t NumPreallocatedArgs = 0;
6202 for (unsigned i = 0; i < UseCall->arg_size(); i++) {
6203 if (UseCall->paramHasAttr(i, Attribute::Preallocated)) {
6204 ++NumPreallocatedArgs;
6205 }
6206 }
6207 Check(NumPreallocatedArgs != 0,
6208 "cannot use preallocated intrinsics on a call without "
6209 "preallocated arguments");
6210 Check(NumArgs->equalsInt(NumPreallocatedArgs),
6211 "llvm.call.preallocated.setup arg size must be equal to number "
6212 "of preallocated arguments "
6213 "at call site",
6214 Call, *UseCall);
6215 // getOperandBundle() cannot be called if more than one of the operand
6216 // bundle exists. There is already a check elsewhere for this, so skip
6217 // here if we see more than one.
6218 if (UseCall->countOperandBundlesOfType(LLVMContext::OB_preallocated) >
6219 1) {
6220 return;
6221 }
6222 auto PreallocatedBundle =
6223 UseCall->getOperandBundle(LLVMContext::OB_preallocated);
6224 Check(PreallocatedBundle,
6225 "Use of llvm.call.preallocated.setup outside intrinsics "
6226 "must be in \"preallocated\" operand bundle");
6227 Check(PreallocatedBundle->Inputs.front().get() == &Call,
6228 "preallocated bundle must have token from corresponding "
6229 "llvm.call.preallocated.setup");
6230 }
6231 }
6232 break;
6233 }
6234 case Intrinsic::call_preallocated_arg: {
6235 auto *Token = dyn_cast<CallBase>(Call.getArgOperand(0));
6236 Check(Token &&
6237 Token->getIntrinsicID() == Intrinsic::call_preallocated_setup,
6238 "llvm.call.preallocated.arg token argument must be a "
6239 "llvm.call.preallocated.setup");
6240 Check(Call.hasFnAttr(Attribute::Preallocated),
6241 "llvm.call.preallocated.arg must be called with a \"preallocated\" "
6242 "call site attribute");
6243 break;
6244 }
6245 case Intrinsic::call_preallocated_teardown: {
6246 auto *Token = dyn_cast<CallBase>(Call.getArgOperand(0));
6247 Check(Token &&
6248 Token->getIntrinsicID() == Intrinsic::call_preallocated_setup,
6249 "llvm.call.preallocated.teardown token argument must be a "
6250 "llvm.call.preallocated.setup");
6251 break;
6252 }
6253 case Intrinsic::gcroot:
6254 case Intrinsic::gcwrite:
6255 case Intrinsic::gcread:
6256 if (ID == Intrinsic::gcroot) {
6257 AllocaInst *AI =
6259 Check(AI, "llvm.gcroot parameter #1 must be an alloca.", Call);
6261 "llvm.gcroot parameter #2 must be a constant.", Call);
6262 if (!AI->getAllocatedType()->isPointerTy()) {
6264 "llvm.gcroot parameter #1 must either be a pointer alloca, "
6265 "or argument #2 must be a non-null constant.",
6266 Call);
6267 }
6268 }
6269
6270 Check(Call.getParent()->getParent()->hasGC(),
6271 "Enclosing function does not use GC.", Call);
6272 break;
6273 case Intrinsic::init_trampoline:
6275 "llvm.init_trampoline parameter #2 must resolve to a function.",
6276 Call);
6277 break;
6278 case Intrinsic::prefetch:
6279 Check(cast<ConstantInt>(Call.getArgOperand(1))->getZExtValue() < 2,
6280 "rw argument to llvm.prefetch must be 0-1", Call);
6281 Check(cast<ConstantInt>(Call.getArgOperand(2))->getZExtValue() < 4,
6282 "locality argument to llvm.prefetch must be 0-3", Call);
6283 Check(cast<ConstantInt>(Call.getArgOperand(3))->getZExtValue() < 2,
6284 "cache type argument to llvm.prefetch must be 0-1", Call);
6285 break;
6286 case Intrinsic::reloc_none: {
6288 cast<MetadataAsValue>(Call.getArgOperand(0))->getMetadata()),
6289 "llvm.reloc.none argument must be a metadata string", &Call);
6290 break;
6291 }
6292 case Intrinsic::stackprotector:
6294 "llvm.stackprotector parameter #2 must resolve to an alloca.", Call);
6295 break;
6296 case Intrinsic::localescape: {
6297 BasicBlock *BB = Call.getParent();
6298 Check(BB->isEntryBlock(), "llvm.localescape used outside of entry block",
6299 Call);
6300 Check(!SawFrameEscape, "multiple calls to llvm.localescape in one function",
6301 Call);
6302 for (Value *Arg : Call.args()) {
6303 if (isa<ConstantPointerNull>(Arg))
6304 continue; // Null values are allowed as placeholders.
6305 auto *AI = dyn_cast<AllocaInst>(Arg->stripPointerCasts());
6306 Check(AI && AI->isStaticAlloca(),
6307 "llvm.localescape only accepts static allocas", Call);
6308 }
6309 FrameEscapeInfo[BB->getParent()].first = Call.arg_size();
6310 SawFrameEscape = true;
6311 break;
6312 }
6313 case Intrinsic::localrecover: {
6315 Function *Fn = dyn_cast<Function>(FnArg);
6316 Check(Fn && !Fn->isDeclaration(),
6317 "llvm.localrecover first "
6318 "argument must be function defined in this module",
6319 Call);
6320 auto *IdxArg = cast<ConstantInt>(Call.getArgOperand(2));
6321 auto &Entry = FrameEscapeInfo[Fn];
6322 Entry.second = unsigned(
6323 std::max(uint64_t(Entry.second), IdxArg->getLimitedValue(~0U) + 1));
6324 break;
6325 }
6326
6327 case Intrinsic::experimental_gc_statepoint:
6328 if (auto *CI = dyn_cast<CallInst>(&Call))
6329 Check(!CI->isInlineAsm(),
6330 "gc.statepoint support for inline assembly unimplemented", CI);
6331 Check(Call.getParent()->getParent()->hasGC(),
6332 "Enclosing function does not use GC.", Call);
6333
6334 verifyStatepoint(Call);
6335 break;
6336 case Intrinsic::experimental_gc_result: {
6337 Check(Call.getParent()->getParent()->hasGC(),
6338 "Enclosing function does not use GC.", Call);
6339
6340 auto *Statepoint = Call.getArgOperand(0);
6341 if (isa<UndefValue>(Statepoint))
6342 break;
6343
6344 // Are we tied to a statepoint properly?
6345 const auto *StatepointCall = dyn_cast<CallBase>(Statepoint);
6346 Check(StatepointCall && StatepointCall->getIntrinsicID() ==
6347 Intrinsic::experimental_gc_statepoint,
6348 "gc.result operand #1 must be from a statepoint", Call,
6349 Call.getArgOperand(0));
6350
6351 // Check that result type matches wrapped callee.
6352 auto *TargetFuncType =
6353 cast<FunctionType>(StatepointCall->getParamElementType(2));
6354 Check(Call.getType() == TargetFuncType->getReturnType(),
6355 "gc.result result type does not match wrapped callee", Call);
6356 break;
6357 }
6358 case Intrinsic::experimental_gc_relocate: {
6359 Check(Call.arg_size() == 3, "wrong number of arguments", Call);
6360
6362 "gc.relocate must return a pointer or a vector of pointers", Call);
6363
6364 // Check that this relocate is correctly tied to the statepoint
6365
6366 // This is case for relocate on the unwinding path of an invoke statepoint
6367 if (LandingPadInst *LandingPad =
6369
6370 const BasicBlock *InvokeBB =
6371 LandingPad->getParent()->getUniquePredecessor();
6372
6373 // Landingpad relocates should have only one predecessor with invoke
6374 // statepoint terminator
6375 Check(InvokeBB, "safepoints should have unique landingpads",
6376 LandingPad->getParent());
6377 Check(InvokeBB->getTerminator(), "safepoint block should be well formed",
6378 InvokeBB);
6380 "gc relocate should be linked to a statepoint", InvokeBB);
6381 } else {
6382 // In all other cases relocate should be tied to the statepoint directly.
6383 // This covers relocates on a normal return path of invoke statepoint and
6384 // relocates of a call statepoint.
6385 auto *Token = Call.getArgOperand(0);
6387 "gc relocate is incorrectly tied to the statepoint", Call, Token);
6388 }
6389
6390 // Verify rest of the relocate arguments.
6391 const Value &StatepointCall = *cast<GCRelocateInst>(Call).getStatepoint();
6392
6393 // Both the base and derived must be piped through the safepoint.
6396 "gc.relocate operand #2 must be integer offset", Call);
6397
6398 Value *Derived = Call.getArgOperand(2);
6399 Check(isa<ConstantInt>(Derived),
6400 "gc.relocate operand #3 must be integer offset", Call);
6401
6402 const uint64_t BaseIndex = cast<ConstantInt>(Base)->getZExtValue();
6403 const uint64_t DerivedIndex = cast<ConstantInt>(Derived)->getZExtValue();
6404
6405 // Check the bounds
6406 if (isa<UndefValue>(StatepointCall))
6407 break;
6408 if (auto Opt = cast<GCStatepointInst>(StatepointCall)
6409 .getOperandBundle(LLVMContext::OB_gc_live)) {
6410 Check(BaseIndex < Opt->Inputs.size(),
6411 "gc.relocate: statepoint base index out of bounds", Call);
6412 Check(DerivedIndex < Opt->Inputs.size(),
6413 "gc.relocate: statepoint derived index out of bounds", Call);
6414 }
6415
6416 // Relocated value must be either a pointer type or vector-of-pointer type,
6417 // but gc_relocate does not need to return the same pointer type as the
6418 // relocated pointer. It can be casted to the correct type later if it's
6419 // desired. However, they must have the same address space and 'vectorness'
6420 GCRelocateInst &Relocate = cast<GCRelocateInst>(Call);
6421 auto *ResultType = Call.getType();
6422 auto *DerivedType = Relocate.getDerivedPtr()->getType();
6423 auto *BaseType = Relocate.getBasePtr()->getType();
6424
6425 Check(BaseType->isPtrOrPtrVectorTy(),
6426 "gc.relocate: relocated value must be a pointer", Call);
6427 Check(DerivedType->isPtrOrPtrVectorTy(),
6428 "gc.relocate: relocated value must be a pointer", Call);
6429
6430 Check(ResultType->isVectorTy() == DerivedType->isVectorTy(),
6431 "gc.relocate: vector relocates to vector and pointer to pointer",
6432 Call);
6433 Check(
6434 ResultType->getPointerAddressSpace() ==
6435 DerivedType->getPointerAddressSpace(),
6436 "gc.relocate: relocating a pointer shouldn't change its address space",
6437 Call);
6438
6439 auto GC = llvm::getGCStrategy(Relocate.getFunction()->getGC());
6440 Check(GC, "gc.relocate: calling function must have GCStrategy",
6441 Call.getFunction());
6442 if (GC) {
6443 auto isGCPtr = [&GC](Type *PTy) {
6444 return GC->isGCManagedPointer(PTy->getScalarType()).value_or(true);
6445 };
6446 Check(isGCPtr(ResultType), "gc.relocate: must return gc pointer", Call);
6447 Check(isGCPtr(BaseType),
6448 "gc.relocate: relocated value must be a gc pointer", Call);
6449 Check(isGCPtr(DerivedType),
6450 "gc.relocate: relocated value must be a gc pointer", Call);
6451 }
6452 break;
6453 }
6454 case Intrinsic::experimental_patchpoint: {
6455 if (Call.getCallingConv() == CallingConv::AnyReg) {
6457 "patchpoint: invalid return type used with anyregcc", Call);
6458 }
6459 break;
6460 }
6461 case Intrinsic::eh_exceptioncode:
6462 case Intrinsic::eh_exceptionpointer: {
6464 "eh.exceptionpointer argument must be a catchpad", Call);
6465 break;
6466 }
6467 case Intrinsic::get_active_lane_mask: {
6469 "get_active_lane_mask: must return a "
6470 "vector",
6471 Call);
6472 auto *ElemTy = Call.getType()->getScalarType();
6473 Check(ElemTy->isIntegerTy(1),
6474 "get_active_lane_mask: element type is not "
6475 "i1",
6476 Call);
6477 break;
6478 }
6479 case Intrinsic::experimental_get_vector_length: {
6480 ConstantInt *VF = cast<ConstantInt>(Call.getArgOperand(1));
6481 Check(!VF->isNegative() && !VF->isZero(),
6482 "get_vector_length: VF must be positive", Call);
6483 break;
6484 }
6485 case Intrinsic::masked_load: {
6486 Check(Call.getType()->isVectorTy(), "masked_load: must return a vector",
6487 Call);
6488
6490 Value *PassThru = Call.getArgOperand(2);
6491 Check(Mask->getType()->isVectorTy(), "masked_load: mask must be vector",
6492 Call);
6493 Check(PassThru->getType() == Call.getType(),
6494 "masked_load: pass through and return type must match", Call);
6495 Check(cast<VectorType>(Mask->getType())->getElementCount() ==
6496 cast<VectorType>(Call.getType())->getElementCount(),
6497 "masked_load: vector mask must be same length as return", Call);
6498 break;
6499 }
6500 case Intrinsic::masked_store: {
6501 Value *Val = Call.getArgOperand(0);
6503 Check(Mask->getType()->isVectorTy(), "masked_store: mask must be vector",
6504 Call);
6505 Check(cast<VectorType>(Mask->getType())->getElementCount() ==
6506 cast<VectorType>(Val->getType())->getElementCount(),
6507 "masked_store: vector mask must be same length as value", Call);
6508 break;
6509 }
6510
6511 case Intrinsic::experimental_guard: {
6512 Check(isa<CallInst>(Call), "experimental_guard cannot be invoked", Call);
6514 "experimental_guard must have exactly one "
6515 "\"deopt\" operand bundle");
6516 break;
6517 }
6518
6519 case Intrinsic::experimental_deoptimize: {
6520 Check(isa<CallInst>(Call), "experimental_deoptimize cannot be invoked",
6521 Call);
6523 "experimental_deoptimize must have exactly one "
6524 "\"deopt\" operand bundle");
6526 "experimental_deoptimize return type must match caller return type");
6527
6528 if (isa<CallInst>(Call)) {
6530 Check(RI,
6531 "calls to experimental_deoptimize must be followed by a return");
6532
6533 if (!Call.getType()->isVoidTy() && RI)
6534 Check(RI->getReturnValue() == &Call,
6535 "calls to experimental_deoptimize must be followed by a return "
6536 "of the value computed by experimental_deoptimize");
6537 }
6538
6539 break;
6540 }
6541 case Intrinsic::vastart: {
6543 "va_start called in a non-varargs function");
6544 break;
6545 }
6546 case Intrinsic::get_dynamic_area_offset: {
6547 auto *IntTy = dyn_cast<IntegerType>(Call.getType());
6548 Check(IntTy && DL.getPointerSizeInBits(DL.getAllocaAddrSpace()) ==
6549 IntTy->getBitWidth(),
6550 "get_dynamic_area_offset result type must be scalar integer matching "
6551 "alloca address space width",
6552 Call);
6553 break;
6554 }
6555 case Intrinsic::vector_reduce_and:
6556 case Intrinsic::vector_reduce_or:
6557 case Intrinsic::vector_reduce_xor:
6558 case Intrinsic::vector_reduce_add:
6559 case Intrinsic::vector_reduce_mul:
6560 case Intrinsic::vector_reduce_smax:
6561 case Intrinsic::vector_reduce_smin:
6562 case Intrinsic::vector_reduce_umax:
6563 case Intrinsic::vector_reduce_umin: {
6564 Type *ArgTy = Call.getArgOperand(0)->getType();
6565 Check(ArgTy->isIntOrIntVectorTy() && ArgTy->isVectorTy(),
6566 "Intrinsic has incorrect argument type!");
6567 break;
6568 }
6569 case Intrinsic::vector_reduce_fmax:
6570 case Intrinsic::vector_reduce_fmin: {
6571 Type *ArgTy = Call.getArgOperand(0)->getType();
6572 Check(ArgTy->isFPOrFPVectorTy() && ArgTy->isVectorTy(),
6573 "Intrinsic has incorrect argument type!");
6574 break;
6575 }
6576 case Intrinsic::vector_reduce_fadd:
6577 case Intrinsic::vector_reduce_fmul: {
6578 // Unlike the other reductions, the first argument is a start value. The
6579 // second argument is the vector to be reduced.
6580 Type *ArgTy = Call.getArgOperand(1)->getType();
6581 Check(ArgTy->isFPOrFPVectorTy() && ArgTy->isVectorTy(),
6582 "Intrinsic has incorrect argument type!");
6583 break;
6584 }
6585 case Intrinsic::smul_fix:
6586 case Intrinsic::smul_fix_sat:
6587 case Intrinsic::umul_fix:
6588 case Intrinsic::umul_fix_sat:
6589 case Intrinsic::sdiv_fix:
6590 case Intrinsic::sdiv_fix_sat:
6591 case Intrinsic::udiv_fix:
6592 case Intrinsic::udiv_fix_sat: {
6593 Value *Op1 = Call.getArgOperand(0);
6594 Value *Op2 = Call.getArgOperand(1);
6596 "first operand of [us][mul|div]_fix[_sat] must be an int type or "
6597 "vector of ints");
6599 "second operand of [us][mul|div]_fix[_sat] must be an int type or "
6600 "vector of ints");
6601
6602 auto *Op3 = cast<ConstantInt>(Call.getArgOperand(2));
6603 Check(Op3->getType()->isIntegerTy(),
6604 "third operand of [us][mul|div]_fix[_sat] must be an int type");
6605 Check(Op3->getBitWidth() <= 32,
6606 "third operand of [us][mul|div]_fix[_sat] must fit within 32 bits");
6607
6608 if (ID == Intrinsic::smul_fix || ID == Intrinsic::smul_fix_sat ||
6609 ID == Intrinsic::sdiv_fix || ID == Intrinsic::sdiv_fix_sat) {
6610 Check(Op3->getZExtValue() < Op1->getType()->getScalarSizeInBits(),
6611 "the scale of s[mul|div]_fix[_sat] must be less than the width of "
6612 "the operands");
6613 } else {
6614 Check(Op3->getZExtValue() <= Op1->getType()->getScalarSizeInBits(),
6615 "the scale of u[mul|div]_fix[_sat] must be less than or equal "
6616 "to the width of the operands");
6617 }
6618 break;
6619 }
6620 case Intrinsic::lrint:
6621 case Intrinsic::llrint:
6622 case Intrinsic::lround:
6623 case Intrinsic::llround: {
6624 Type *ValTy = Call.getArgOperand(0)->getType();
6625 Type *ResultTy = Call.getType();
6626 auto *VTy = dyn_cast<VectorType>(ValTy);
6627 auto *RTy = dyn_cast<VectorType>(ResultTy);
6628 Check(ValTy->isFPOrFPVectorTy() && ResultTy->isIntOrIntVectorTy(),
6629 ExpectedName + ": argument must be floating-point or vector "
6630 "of floating-points, and result must be integer or "
6631 "vector of integers",
6632 &Call);
6633 Check(ValTy->isVectorTy() == ResultTy->isVectorTy(),
6634 ExpectedName + ": argument and result disagree on vector use", &Call);
6635 if (VTy) {
6636 Check(VTy->getElementCount() == RTy->getElementCount(),
6637 ExpectedName + ": argument must be same length as result", &Call);
6638 }
6639 break;
6640 }
6641 case Intrinsic::bswap: {
6642 Type *Ty = Call.getType();
6643 unsigned Size = Ty->getScalarSizeInBits();
6644 Check(Size % 16 == 0, "bswap must be an even number of bytes", &Call);
6645 break;
6646 }
6647 case Intrinsic::invariant_start: {
6648 ConstantInt *InvariantSize = dyn_cast<ConstantInt>(Call.getArgOperand(0));
6649 Check(InvariantSize &&
6650 (!InvariantSize->isNegative() || InvariantSize->isMinusOne()),
6651 "invariant_start parameter must be -1, 0 or a positive number",
6652 &Call);
6653 break;
6654 }
6655 case Intrinsic::matrix_multiply:
6656 case Intrinsic::matrix_transpose:
6657 case Intrinsic::matrix_column_major_load:
6658 case Intrinsic::matrix_column_major_store: {
6660 ConstantInt *Stride = nullptr;
6661 ConstantInt *NumRows;
6662 ConstantInt *NumColumns;
6663 VectorType *ResultTy;
6664 Type *Op0ElemTy = nullptr;
6665 Type *Op1ElemTy = nullptr;
6666 switch (ID) {
6667 case Intrinsic::matrix_multiply: {
6668 NumRows = cast<ConstantInt>(Call.getArgOperand(2));
6669 ConstantInt *N = cast<ConstantInt>(Call.getArgOperand(3));
6670 NumColumns = cast<ConstantInt>(Call.getArgOperand(4));
6672 ->getNumElements() ==
6673 NumRows->getZExtValue() * N->getZExtValue(),
6674 "First argument of a matrix operation does not match specified "
6675 "shape!");
6677 ->getNumElements() ==
6678 N->getZExtValue() * NumColumns->getZExtValue(),
6679 "Second argument of a matrix operation does not match specified "
6680 "shape!");
6681
6682 ResultTy = cast<VectorType>(Call.getType());
6683 Op0ElemTy =
6684 cast<VectorType>(Call.getArgOperand(0)->getType())->getElementType();
6685 Op1ElemTy =
6686 cast<VectorType>(Call.getArgOperand(1)->getType())->getElementType();
6687 break;
6688 }
6689 case Intrinsic::matrix_transpose:
6690 NumRows = cast<ConstantInt>(Call.getArgOperand(1));
6691 NumColumns = cast<ConstantInt>(Call.getArgOperand(2));
6692 ResultTy = cast<VectorType>(Call.getType());
6693 Op0ElemTy =
6694 cast<VectorType>(Call.getArgOperand(0)->getType())->getElementType();
6695 break;
6696 case Intrinsic::matrix_column_major_load: {
6698 NumRows = cast<ConstantInt>(Call.getArgOperand(3));
6699 NumColumns = cast<ConstantInt>(Call.getArgOperand(4));
6700 ResultTy = cast<VectorType>(Call.getType());
6701 break;
6702 }
6703 case Intrinsic::matrix_column_major_store: {
6705 NumRows = cast<ConstantInt>(Call.getArgOperand(4));
6706 NumColumns = cast<ConstantInt>(Call.getArgOperand(5));
6707 ResultTy = cast<VectorType>(Call.getArgOperand(0)->getType());
6708 Op0ElemTy =
6709 cast<VectorType>(Call.getArgOperand(0)->getType())->getElementType();
6710 break;
6711 }
6712 default:
6713 llvm_unreachable("unexpected intrinsic");
6714 }
6715
6716 Check(ResultTy->getElementType()->isIntegerTy() ||
6717 ResultTy->getElementType()->isFloatingPointTy(),
6718 "Result type must be an integer or floating-point type!", IF);
6719
6720 if (Op0ElemTy)
6721 Check(ResultTy->getElementType() == Op0ElemTy,
6722 "Vector element type mismatch of the result and first operand "
6723 "vector!",
6724 IF);
6725
6726 if (Op1ElemTy)
6727 Check(ResultTy->getElementType() == Op1ElemTy,
6728 "Vector element type mismatch of the result and second operand "
6729 "vector!",
6730 IF);
6731
6733 NumRows->getZExtValue() * NumColumns->getZExtValue(),
6734 "Result of a matrix operation does not fit in the returned vector!");
6735
6736 if (Stride) {
6737 Check(Stride->getBitWidth() <= 64, "Stride bitwidth cannot exceed 64!",
6738 IF);
6739 Check(Stride->getZExtValue() >= NumRows->getZExtValue(),
6740 "Stride must be greater or equal than the number of rows!", IF);
6741 }
6742
6743 break;
6744 }
6745 case Intrinsic::stepvector: {
6747 Check(VecTy && VecTy->getScalarType()->isIntegerTy() &&
6748 VecTy->getScalarSizeInBits() >= 8,
6749 "stepvector only supported for vectors of integers "
6750 "with a bitwidth of at least 8.",
6751 &Call);
6752 break;
6753 }
6754 case Intrinsic::experimental_vector_match: {
6755 Value *Op1 = Call.getArgOperand(0);
6756 Value *Op2 = Call.getArgOperand(1);
6758
6759 VectorType *Op1Ty = dyn_cast<VectorType>(Op1->getType());
6760 VectorType *Op2Ty = dyn_cast<VectorType>(Op2->getType());
6761 VectorType *MaskTy = dyn_cast<VectorType>(Mask->getType());
6762
6763 Check(Op1Ty && Op2Ty && MaskTy, "Operands must be vectors.", &Call);
6765 "Second operand must be a fixed length vector.", &Call);
6766 Check(Op1Ty->getElementType()->isIntegerTy(),
6767 "First operand must be a vector of integers.", &Call);
6768 Check(Op1Ty->getElementType() == Op2Ty->getElementType(),
6769 "First two operands must have the same element type.", &Call);
6770 Check(Op1Ty->getElementCount() == MaskTy->getElementCount(),
6771 "First operand and mask must have the same number of elements.",
6772 &Call);
6773 Check(MaskTy->getElementType()->isIntegerTy(1),
6774 "Mask must be a vector of i1's.", &Call);
6775 Check(Call.getType() == MaskTy, "Return type must match the mask type.",
6776 &Call);
6777 break;
6778 }
6779 case Intrinsic::vector_insert: {
6780 Value *Vec = Call.getArgOperand(0);
6781 Value *SubVec = Call.getArgOperand(1);
6782 Value *Idx = Call.getArgOperand(2);
6783 unsigned IdxN = cast<ConstantInt>(Idx)->getZExtValue();
6784
6785 VectorType *VecTy = cast<VectorType>(Vec->getType());
6786 VectorType *SubVecTy = cast<VectorType>(SubVec->getType());
6787
6788 ElementCount VecEC = VecTy->getElementCount();
6789 ElementCount SubVecEC = SubVecTy->getElementCount();
6790 Check(VecTy->getElementType() == SubVecTy->getElementType(),
6791 "vector_insert parameters must have the same element "
6792 "type.",
6793 &Call);
6794 Check(IdxN % SubVecEC.getKnownMinValue() == 0,
6795 "vector_insert index must be a constant multiple of "
6796 "the subvector's known minimum vector length.");
6797
6798 // If this insertion is not the 'mixed' case where a fixed vector is
6799 // inserted into a scalable vector, ensure that the insertion of the
6800 // subvector does not overrun the parent vector.
6801 if (VecEC.isScalable() == SubVecEC.isScalable()) {
6802 Check(IdxN < VecEC.getKnownMinValue() &&
6803 IdxN + SubVecEC.getKnownMinValue() <= VecEC.getKnownMinValue(),
6804 "subvector operand of vector_insert would overrun the "
6805 "vector being inserted into.");
6806 }
6807 break;
6808 }
6809 case Intrinsic::vector_extract: {
6810 Value *Vec = Call.getArgOperand(0);
6811 Value *Idx = Call.getArgOperand(1);
6812 unsigned IdxN = cast<ConstantInt>(Idx)->getZExtValue();
6813
6814 VectorType *ResultTy = cast<VectorType>(Call.getType());
6815 VectorType *VecTy = cast<VectorType>(Vec->getType());
6816
6817 ElementCount VecEC = VecTy->getElementCount();
6818 ElementCount ResultEC = ResultTy->getElementCount();
6819
6820 Check(ResultTy->getElementType() == VecTy->getElementType(),
6821 "vector_extract result must have the same element "
6822 "type as the input vector.",
6823 &Call);
6824 Check(IdxN % ResultEC.getKnownMinValue() == 0,
6825 "vector_extract index must be a constant multiple of "
6826 "the result type's known minimum vector length.");
6827
6828 // If this extraction is not the 'mixed' case where a fixed vector is
6829 // extracted from a scalable vector, ensure that the extraction does not
6830 // overrun the parent vector.
6831 if (VecEC.isScalable() == ResultEC.isScalable()) {
6832 Check(IdxN < VecEC.getKnownMinValue() &&
6833 IdxN + ResultEC.getKnownMinValue() <= VecEC.getKnownMinValue(),
6834 "vector_extract would overrun.");
6835 }
6836 break;
6837 }
6838 case Intrinsic::vector_partial_reduce_fadd:
6839 case Intrinsic::vector_partial_reduce_add: {
6842
6843 unsigned VecWidth = VecTy->getElementCount().getKnownMinValue();
6844 unsigned AccWidth = AccTy->getElementCount().getKnownMinValue();
6845
6846 Check((VecWidth % AccWidth) == 0,
6847 "Invalid vector widths for partial "
6848 "reduction. The width of the input vector "
6849 "must be a positive integer multiple of "
6850 "the width of the accumulator vector.");
6851 break;
6852 }
6853 case Intrinsic::experimental_noalias_scope_decl: {
6854 NoAliasScopeDecls.push_back(cast<IntrinsicInst>(&Call));
6855 break;
6856 }
6857 case Intrinsic::preserve_array_access_index:
6858 case Intrinsic::preserve_struct_access_index:
6859 case Intrinsic::aarch64_ldaxr:
6860 case Intrinsic::aarch64_ldxr:
6861 case Intrinsic::arm_ldaex:
6862 case Intrinsic::arm_ldrex: {
6863 Type *ElemTy = Call.getParamElementType(0);
6864 Check(ElemTy, "Intrinsic requires elementtype attribute on first argument.",
6865 &Call);
6866 break;
6867 }
6868 case Intrinsic::aarch64_stlxr:
6869 case Intrinsic::aarch64_stxr:
6870 case Intrinsic::arm_stlex:
6871 case Intrinsic::arm_strex: {
6872 Type *ElemTy = Call.getAttributes().getParamElementType(1);
6873 Check(ElemTy,
6874 "Intrinsic requires elementtype attribute on second argument.",
6875 &Call);
6876 break;
6877 }
6878 case Intrinsic::aarch64_prefetch: {
6879 Check(cast<ConstantInt>(Call.getArgOperand(1))->getZExtValue() < 2,
6880 "write argument to llvm.aarch64.prefetch must be 0 or 1", Call);
6881 Check(cast<ConstantInt>(Call.getArgOperand(2))->getZExtValue() < 4,
6882 "target argument to llvm.aarch64.prefetch must be 0-3", Call);
6883 Check(cast<ConstantInt>(Call.getArgOperand(3))->getZExtValue() < 2,
6884 "stream argument to llvm.aarch64.prefetch must be 0 or 1", Call);
6885 Check(cast<ConstantInt>(Call.getArgOperand(4))->getZExtValue() < 2,
6886 "isdata argument to llvm.aarch64.prefetch must be 0 or 1", Call);
6887 break;
6888 }
6889 case Intrinsic::aarch64_range_prefetch: {
6890 Check(cast<ConstantInt>(Call.getArgOperand(1))->getZExtValue() < 2,
6891 "write argument to llvm.aarch64.range.prefetch must be 0 or 1", Call);
6892 Check(cast<ConstantInt>(Call.getArgOperand(2))->getZExtValue() < 2,
6893 "stream argument to llvm.aarch64.range.prefetch must be 0 or 1",
6894 Call);
6895 break;
6896 }
6897 case Intrinsic::aarch64_stshh_atomic_store: {
6898 uint64_t Order = cast<ConstantInt>(Call.getArgOperand(2))->getZExtValue();
6899 Check(Order == static_cast<uint64_t>(AtomicOrderingCABI::relaxed) ||
6900 Order == static_cast<uint64_t>(AtomicOrderingCABI::release) ||
6901 Order == static_cast<uint64_t>(AtomicOrderingCABI::seq_cst),
6902 "order argument to llvm.aarch64.stshh.atomic.store must be 0, 3 or 5",
6903 Call);
6904
6905 Check(cast<ConstantInt>(Call.getArgOperand(3))->getZExtValue() < 2,
6906 "policy argument to llvm.aarch64.stshh.atomic.store must be 0 or 1",
6907 Call);
6908
6909 uint64_t Size = cast<ConstantInt>(Call.getArgOperand(4))->getZExtValue();
6910 Check(Size == 8 || Size == 16 || Size == 32 || Size == 64,
6911 "size argument to llvm.aarch64.stshh.atomic.store must be 8, 16, "
6912 "32 or 64",
6913 Call);
6914 break;
6915 }
6916 case Intrinsic::callbr_landingpad: {
6917 const auto *CBR = dyn_cast<CallBrInst>(Call.getOperand(0));
6918 Check(CBR, "intrinstic requires callbr operand", &Call);
6919 if (!CBR)
6920 break;
6921
6922 const BasicBlock *LandingPadBB = Call.getParent();
6923 const BasicBlock *PredBB = LandingPadBB->getUniquePredecessor();
6924 if (!PredBB) {
6925 CheckFailed("Intrinsic in block must have 1 unique predecessor", &Call);
6926 break;
6927 }
6928 if (!isa<CallBrInst>(PredBB->getTerminator())) {
6929 CheckFailed("Intrinsic must have corresponding callbr in predecessor",
6930 &Call);
6931 break;
6932 }
6933 Check(llvm::is_contained(CBR->getIndirectDests(), LandingPadBB),
6934 "Intrinsic's corresponding callbr must have intrinsic's parent basic "
6935 "block in indirect destination list",
6936 &Call);
6937 const Instruction &First = *LandingPadBB->begin();
6938 Check(&First == &Call, "No other instructions may proceed intrinsic",
6939 &Call);
6940 break;
6941 }
6942 case Intrinsic::structured_gep: {
6943 // Parser should refuse those 2 cases.
6944 assert(Call.arg_size() >= 1);
6946
6947 Check(Call.paramHasAttr(0, Attribute::ElementType),
6948 "Intrinsic first parameter is missing an ElementType attribute",
6949 &Call);
6950
6951 Type *T = Call.getParamAttr(0, Attribute::ElementType).getValueAsType();
6952 for (unsigned I = 1; I < Call.arg_size(); ++I) {
6954 ConstantInt *CI = dyn_cast<ConstantInt>(Index);
6955 Check(Index->getType()->isIntegerTy(),
6956 "Index operand type must be an integer", &Call);
6957
6958 if (ArrayType *AT = dyn_cast<ArrayType>(T)) {
6959 T = AT->getElementType();
6960 } else if (StructType *ST = dyn_cast<StructType>(T)) {
6961 Check(CI, "Indexing into a struct requires a constant int", &Call);
6962 Check(CI->getZExtValue() < ST->getNumElements(),
6963 "Indexing in a struct should be inbounds", &Call);
6964 T = ST->getElementType(CI->getZExtValue());
6965 } else if (VectorType *VT = dyn_cast<VectorType>(T)) {
6966 T = VT->getElementType();
6967 } else {
6968 CheckFailed("Reached a non-composite type with more indices to process",
6969 &Call);
6970 }
6971 }
6972 break;
6973 }
6974 case Intrinsic::amdgcn_cs_chain: {
6975 auto CallerCC = Call.getCaller()->getCallingConv();
6976 switch (CallerCC) {
6977 case CallingConv::AMDGPU_CS:
6978 case CallingConv::AMDGPU_CS_Chain:
6979 case CallingConv::AMDGPU_CS_ChainPreserve:
6980 case CallingConv::AMDGPU_ES:
6981 case CallingConv::AMDGPU_GS:
6982 case CallingConv::AMDGPU_HS:
6983 case CallingConv::AMDGPU_LS:
6984 case CallingConv::AMDGPU_VS:
6985 break;
6986 default:
6987 CheckFailed("Intrinsic cannot be called from functions with this "
6988 "calling convention",
6989 &Call);
6990 break;
6991 }
6992
6993 Check(Call.paramHasAttr(2, Attribute::InReg),
6994 "SGPR arguments must have the `inreg` attribute", &Call);
6995 Check(!Call.paramHasAttr(3, Attribute::InReg),
6996 "VGPR arguments must not have the `inreg` attribute", &Call);
6997
6998 auto *Next = Call.getNextNode();
6999 bool IsAMDUnreachable = Next && isa<IntrinsicInst>(Next) &&
7000 cast<IntrinsicInst>(Next)->getIntrinsicID() ==
7001 Intrinsic::amdgcn_unreachable;
7002 Check(Next && (isa<UnreachableInst>(Next) || IsAMDUnreachable),
7003 "llvm.amdgcn.cs.chain must be followed by unreachable", &Call);
7004 break;
7005 }
7006 case Intrinsic::amdgcn_init_exec_from_input: {
7007 const Argument *Arg = dyn_cast<Argument>(Call.getOperand(0));
7008 Check(Arg && Arg->hasInRegAttr(),
7009 "only inreg arguments to the parent function are valid as inputs to "
7010 "this intrinsic",
7011 &Call);
7012 break;
7013 }
7014 case Intrinsic::amdgcn_set_inactive_chain_arg: {
7015 auto CallerCC = Call.getCaller()->getCallingConv();
7016 switch (CallerCC) {
7017 case CallingConv::AMDGPU_CS_Chain:
7018 case CallingConv::AMDGPU_CS_ChainPreserve:
7019 break;
7020 default:
7021 CheckFailed("Intrinsic can only be used from functions with the "
7022 "amdgpu_cs_chain or amdgpu_cs_chain_preserve "
7023 "calling conventions",
7024 &Call);
7025 break;
7026 }
7027
7028 unsigned InactiveIdx = 1;
7029 Check(!Call.paramHasAttr(InactiveIdx, Attribute::InReg),
7030 "Value for inactive lanes must not have the `inreg` attribute",
7031 &Call);
7032 Check(isa<Argument>(Call.getArgOperand(InactiveIdx)),
7033 "Value for inactive lanes must be a function argument", &Call);
7034 Check(!cast<Argument>(Call.getArgOperand(InactiveIdx))->hasInRegAttr(),
7035 "Value for inactive lanes must be a VGPR function argument", &Call);
7036 break;
7037 }
7038 case Intrinsic::amdgcn_call_whole_wave: {
7040 Check(F, "Indirect whole wave calls are not allowed", &Call);
7041
7042 CallingConv::ID CC = F->getCallingConv();
7043 Check(CC == CallingConv::AMDGPU_Gfx_WholeWave,
7044 "Callee must have the amdgpu_gfx_whole_wave calling convention",
7045 &Call);
7046
7047 Check(!F->isVarArg(), "Variadic whole wave calls are not allowed", &Call);
7048
7049 Check(Call.arg_size() == F->arg_size(),
7050 "Call argument count must match callee argument count", &Call);
7051
7052 // The first argument of the call is the callee, and the first argument of
7053 // the callee is the active mask. The rest of the arguments must match.
7054 Check(F->arg_begin()->getType()->isIntegerTy(1),
7055 "Callee must have i1 as its first argument", &Call);
7056 for (auto [CallArg, FuncArg] :
7057 drop_begin(zip_equal(Call.args(), F->args()))) {
7058 Check(CallArg->getType() == FuncArg.getType(),
7059 "Argument types must match", &Call);
7060
7061 // Check that inreg attributes match between call site and function
7062 Check(Call.paramHasAttr(FuncArg.getArgNo(), Attribute::InReg) ==
7063 FuncArg.hasInRegAttr(),
7064 "Argument inreg attributes must match", &Call);
7065 }
7066 break;
7067 }
7068 case Intrinsic::amdgcn_s_prefetch_data: {
7069 Check(
7072 "llvm.amdgcn.s.prefetch.data only supports global or constant memory");
7073 break;
7074 }
7075 case Intrinsic::amdgcn_mfma_scale_f32_16x16x128_f8f6f4:
7076 case Intrinsic::amdgcn_mfma_scale_f32_32x32x64_f8f6f4: {
7077 Value *Src0 = Call.getArgOperand(0);
7078 Value *Src1 = Call.getArgOperand(1);
7079
7080 uint64_t CBSZ = cast<ConstantInt>(Call.getArgOperand(3))->getZExtValue();
7081 uint64_t BLGP = cast<ConstantInt>(Call.getArgOperand(4))->getZExtValue();
7082 Check(CBSZ <= 4, "invalid value for cbsz format", Call,
7083 Call.getArgOperand(3));
7084 Check(BLGP <= 4, "invalid value for blgp format", Call,
7085 Call.getArgOperand(4));
7086
7087 // AMDGPU::MFMAScaleFormats values
7088 auto getFormatNumRegs = [](unsigned FormatVal) {
7089 switch (FormatVal) {
7090 case 0:
7091 case 1:
7092 return 8u;
7093 case 2:
7094 case 3:
7095 return 6u;
7096 case 4:
7097 return 4u;
7098 default:
7099 llvm_unreachable("invalid format value");
7100 }
7101 };
7102
7103 auto isValidSrcASrcBVector = [](FixedVectorType *Ty) {
7104 if (!Ty || !Ty->getElementType()->isIntegerTy(32))
7105 return false;
7106 unsigned NumElts = Ty->getNumElements();
7107 return NumElts == 4 || NumElts == 6 || NumElts == 8;
7108 };
7109
7110 auto *Src0Ty = dyn_cast<FixedVectorType>(Src0->getType());
7111 auto *Src1Ty = dyn_cast<FixedVectorType>(Src1->getType());
7112 Check(isValidSrcASrcBVector(Src0Ty),
7113 "operand 0 must be 4, 6 or 8 element i32 vector", &Call, Src0);
7114 Check(isValidSrcASrcBVector(Src1Ty),
7115 "operand 1 must be 4, 6 or 8 element i32 vector", &Call, Src1);
7116
7117 // Permit excess registers for the format.
7118 Check(Src0Ty->getNumElements() >= getFormatNumRegs(CBSZ),
7119 "invalid vector type for format", &Call, Src0, Call.getArgOperand(3));
7120 Check(Src1Ty->getNumElements() >= getFormatNumRegs(BLGP),
7121 "invalid vector type for format", &Call, Src1, Call.getArgOperand(5));
7122 break;
7123 }
7124 case Intrinsic::amdgcn_wmma_f32_16x16x128_f8f6f4:
7125 case Intrinsic::amdgcn_wmma_scale_f32_16x16x128_f8f6f4:
7126 case Intrinsic::amdgcn_wmma_scale16_f32_16x16x128_f8f6f4: {
7127 Value *Src0 = Call.getArgOperand(1);
7128 Value *Src1 = Call.getArgOperand(3);
7129
7130 unsigned FmtA = cast<ConstantInt>(Call.getArgOperand(0))->getZExtValue();
7131 unsigned FmtB = cast<ConstantInt>(Call.getArgOperand(2))->getZExtValue();
7132 Check(FmtA <= 4, "invalid value for matrix format", Call,
7133 Call.getArgOperand(0));
7134 Check(FmtB <= 4, "invalid value for matrix format", Call,
7135 Call.getArgOperand(2));
7136
7137 // AMDGPU::MatrixFMT values
7138 auto getFormatNumRegs = [](unsigned FormatVal) {
7139 switch (FormatVal) {
7140 case 0:
7141 case 1:
7142 return 16u;
7143 case 2:
7144 case 3:
7145 return 12u;
7146 case 4:
7147 return 8u;
7148 default:
7149 llvm_unreachable("invalid format value");
7150 }
7151 };
7152
7153 auto isValidSrcASrcBVector = [](FixedVectorType *Ty) {
7154 if (!Ty || !Ty->getElementType()->isIntegerTy(32))
7155 return false;
7156 unsigned NumElts = Ty->getNumElements();
7157 return NumElts == 16 || NumElts == 12 || NumElts == 8;
7158 };
7159
7160 auto *Src0Ty = dyn_cast<FixedVectorType>(Src0->getType());
7161 auto *Src1Ty = dyn_cast<FixedVectorType>(Src1->getType());
7162 Check(isValidSrcASrcBVector(Src0Ty),
7163 "operand 1 must be 8, 12 or 16 element i32 vector", &Call, Src0);
7164 Check(isValidSrcASrcBVector(Src1Ty),
7165 "operand 3 must be 8, 12 or 16 element i32 vector", &Call, Src1);
7166
7167 // Permit excess registers for the format.
7168 Check(Src0Ty->getNumElements() >= getFormatNumRegs(FmtA),
7169 "invalid vector type for format", &Call, Src0, Call.getArgOperand(0));
7170 Check(Src1Ty->getNumElements() >= getFormatNumRegs(FmtB),
7171 "invalid vector type for format", &Call, Src1, Call.getArgOperand(2));
7172 break;
7173 }
7174 case Intrinsic::amdgcn_cooperative_atomic_load_32x4B:
7175 case Intrinsic::amdgcn_cooperative_atomic_load_16x8B:
7176 case Intrinsic::amdgcn_cooperative_atomic_load_8x16B:
7177 case Intrinsic::amdgcn_cooperative_atomic_store_32x4B:
7178 case Intrinsic::amdgcn_cooperative_atomic_store_16x8B:
7179 case Intrinsic::amdgcn_cooperative_atomic_store_8x16B: {
7180 // Check we only use this intrinsic on the FLAT or GLOBAL address spaces.
7181 Value *PtrArg = Call.getArgOperand(0);
7182 const unsigned AS = PtrArg->getType()->getPointerAddressSpace();
7184 "cooperative atomic intrinsics require a generic or global pointer",
7185 &Call, PtrArg);
7186
7187 // Last argument must be a MD string
7189 MDNode *MD = cast<MDNode>(Op->getMetadata());
7190 Check((MD->getNumOperands() == 1) && isa<MDString>(MD->getOperand(0)),
7191 "cooperative atomic intrinsics require that the last argument is a "
7192 "metadata string",
7193 &Call, Op);
7194 break;
7195 }
7196 case Intrinsic::nvvm_setmaxnreg_inc_sync_aligned_u32:
7197 case Intrinsic::nvvm_setmaxnreg_dec_sync_aligned_u32: {
7198 Value *V = Call.getArgOperand(0);
7199 unsigned RegCount = cast<ConstantInt>(V)->getZExtValue();
7200 Check(RegCount % 8 == 0,
7201 "reg_count argument to nvvm.setmaxnreg must be in multiples of 8");
7202 break;
7203 }
7204 case Intrinsic::experimental_convergence_entry:
7205 case Intrinsic::experimental_convergence_anchor:
7206 break;
7207 case Intrinsic::experimental_convergence_loop:
7208 break;
7209 case Intrinsic::ptrmask: {
7210 Type *Ty0 = Call.getArgOperand(0)->getType();
7211 Type *Ty1 = Call.getArgOperand(1)->getType();
7213 "llvm.ptrmask intrinsic first argument must be pointer or vector "
7214 "of pointers",
7215 &Call);
7216 Check(
7217 Ty0->isVectorTy() == Ty1->isVectorTy(),
7218 "llvm.ptrmask intrinsic arguments must be both scalars or both vectors",
7219 &Call);
7220 if (Ty0->isVectorTy())
7221 Check(cast<VectorType>(Ty0)->getElementCount() ==
7222 cast<VectorType>(Ty1)->getElementCount(),
7223 "llvm.ptrmask intrinsic arguments must have the same number of "
7224 "elements",
7225 &Call);
7226 Check(DL.getIndexTypeSizeInBits(Ty0) == Ty1->getScalarSizeInBits(),
7227 "llvm.ptrmask intrinsic second argument bitwidth must match "
7228 "pointer index type size of first argument",
7229 &Call);
7230 break;
7231 }
7232 case Intrinsic::thread_pointer: {
7234 DL.getDefaultGlobalsAddressSpace(),
7235 "llvm.thread.pointer intrinsic return type must be for the globals "
7236 "address space",
7237 &Call);
7238 break;
7239 }
7240 case Intrinsic::threadlocal_address: {
7241 const Value &Arg0 = *Call.getArgOperand(0);
7242 Check(isa<GlobalValue>(Arg0),
7243 "llvm.threadlocal.address first argument must be a GlobalValue");
7244 Check(cast<GlobalValue>(Arg0).isThreadLocal(),
7245 "llvm.threadlocal.address operand isThreadLocal() must be true");
7246 break;
7247 }
7248 case Intrinsic::lifetime_start:
7249 case Intrinsic::lifetime_end: {
7250 Value *Ptr = Call.getArgOperand(0);
7252 "llvm.lifetime.start/end can only be used on alloca or poison",
7253 &Call);
7254 break;
7255 }
7256 case Intrinsic::sponentry: {
7257 const unsigned StackAS = DL.getAllocaAddrSpace();
7258 const Type *RetTy = Call.getFunctionType()->getReturnType();
7259 Check(RetTy->getPointerAddressSpace() == StackAS,
7260 "llvm.sponentry must return a pointer to the stack", &Call);
7261 break;
7262 }
7263 };
7264
7265 // Verify that there aren't any unmediated control transfers between funclets.
7267 Function *F = Call.getParent()->getParent();
7268 if (F->hasPersonalityFn() &&
7269 isScopedEHPersonality(classifyEHPersonality(F->getPersonalityFn()))) {
7270 // Run EH funclet coloring on-demand and cache results for other intrinsic
7271 // calls in this function
7272 if (BlockEHFuncletColors.empty())
7273 BlockEHFuncletColors = colorEHFunclets(*F);
7274
7275 // Check for catch-/cleanup-pad in first funclet block
7276 bool InEHFunclet = false;
7277 BasicBlock *CallBB = Call.getParent();
7278 const ColorVector &CV = BlockEHFuncletColors.find(CallBB)->second;
7279 assert(CV.size() > 0 && "Uncolored block");
7280 for (BasicBlock *ColorFirstBB : CV)
7281 if (auto It = ColorFirstBB->getFirstNonPHIIt();
7282 It != ColorFirstBB->end())
7284 InEHFunclet = true;
7285
7286 // Check for funclet operand bundle
7287 bool HasToken = false;
7288 for (unsigned I = 0, E = Call.getNumOperandBundles(); I != E; ++I)
7290 HasToken = true;
7291
7292 // This would cause silent code truncation in WinEHPrepare
7293 if (InEHFunclet)
7294 Check(HasToken, "Missing funclet token on intrinsic call", &Call);
7295 }
7296 }
7297}
7298
7299/// Carefully grab the subprogram from a local scope.
7300///
7301/// This carefully grabs the subprogram from a local scope, avoiding the
7302/// built-in assertions that would typically fire.
7304 if (!LocalScope)
7305 return nullptr;
7306
7307 if (auto *SP = dyn_cast<DISubprogram>(LocalScope))
7308 return SP;
7309
7310 if (auto *LB = dyn_cast<DILexicalBlockBase>(LocalScope))
7311 return getSubprogram(LB->getRawScope());
7312
7313 // Just return null; broken scope chains are checked elsewhere.
7314 assert(!isa<DILocalScope>(LocalScope) && "Unknown type of local scope");
7315 return nullptr;
7316}
7317
7318void Verifier::visit(DbgLabelRecord &DLR) {
7320 "invalid #dbg_label intrinsic variable", &DLR, DLR.getRawLabel());
7321
7322 // Ignore broken !dbg attachments; they're checked elsewhere.
7323 if (MDNode *N = DLR.getDebugLoc().getAsMDNode())
7324 if (!isa<DILocation>(N))
7325 return;
7326
7327 BasicBlock *BB = DLR.getParent();
7328 Function *F = BB ? BB->getParent() : nullptr;
7329
7330 // The scopes for variables and !dbg attachments must agree.
7331 DILabel *Label = DLR.getLabel();
7332 DILocation *Loc = DLR.getDebugLoc();
7333 CheckDI(Loc, "#dbg_label record requires a !dbg attachment", &DLR, BB, F);
7334
7335 DISubprogram *LabelSP = getSubprogram(Label->getRawScope());
7336 DISubprogram *LocSP = getSubprogram(Loc->getRawScope());
7337 if (!LabelSP || !LocSP)
7338 return;
7339
7340 CheckDI(LabelSP == LocSP,
7341 "mismatched subprogram between #dbg_label label and !dbg attachment",
7342 &DLR, BB, F, Label, Label->getScope()->getSubprogram(), Loc,
7343 Loc->getScope()->getSubprogram());
7344}
7345
7346void Verifier::visit(DbgVariableRecord &DVR) {
7347 BasicBlock *BB = DVR.getParent();
7348 Function *F = BB->getParent();
7349
7350 CheckDI(DVR.getType() == DbgVariableRecord::LocationType::Value ||
7351 DVR.getType() == DbgVariableRecord::LocationType::Declare ||
7352 DVR.getType() == DbgVariableRecord::LocationType::DeclareValue ||
7353 DVR.getType() == DbgVariableRecord::LocationType::Assign,
7354 "invalid #dbg record type", &DVR, DVR.getType(), BB, F);
7355
7356 // The location for a DbgVariableRecord must be either a ValueAsMetadata,
7357 // DIArgList, or an empty MDNode (which is a legacy representation for an
7358 // "undef" location).
7359 auto *MD = DVR.getRawLocation();
7360 CheckDI(MD && (isa<ValueAsMetadata>(MD) || isa<DIArgList>(MD) ||
7361 (isa<MDNode>(MD) && !cast<MDNode>(MD)->getNumOperands())),
7362 "invalid #dbg record address/value", &DVR, MD, BB, F);
7363 if (auto *VAM = dyn_cast<ValueAsMetadata>(MD)) {
7364 visitValueAsMetadata(*VAM, F);
7365 if (DVR.isDbgDeclare()) {
7366 // Allow integers here to support inttoptr salvage.
7367 Type *Ty = VAM->getValue()->getType();
7368 CheckDI(Ty->isPointerTy() || Ty->isIntegerTy(),
7369 "location of #dbg_declare must be a pointer or int", &DVR, MD, BB,
7370 F);
7371 }
7372 } else if (auto *AL = dyn_cast<DIArgList>(MD)) {
7373 visitDIArgList(*AL, F);
7374 }
7375
7377 "invalid #dbg record variable", &DVR, DVR.getRawVariable(), BB, F);
7378 visitMDNode(*DVR.getRawVariable(), AreDebugLocsAllowed::No);
7379
7381 "invalid #dbg record expression", &DVR, DVR.getRawExpression(), BB,
7382 F);
7383 visitMDNode(*DVR.getExpression(), AreDebugLocsAllowed::No);
7384
7385 if (DVR.isDbgAssign()) {
7387 "invalid #dbg_assign DIAssignID", &DVR, DVR.getRawAssignID(), BB,
7388 F);
7389 visitMDNode(*cast<DIAssignID>(DVR.getRawAssignID()),
7390 AreDebugLocsAllowed::No);
7391
7392 const auto *RawAddr = DVR.getRawAddress();
7393 // Similarly to the location above, the address for an assign
7394 // DbgVariableRecord must be a ValueAsMetadata or an empty MDNode, which
7395 // represents an undef address.
7396 CheckDI(
7397 isa<ValueAsMetadata>(RawAddr) ||
7398 (isa<MDNode>(RawAddr) && !cast<MDNode>(RawAddr)->getNumOperands()),
7399 "invalid #dbg_assign address", &DVR, DVR.getRawAddress(), BB, F);
7400 if (auto *VAM = dyn_cast<ValueAsMetadata>(RawAddr))
7401 visitValueAsMetadata(*VAM, F);
7402
7404 "invalid #dbg_assign address expression", &DVR,
7405 DVR.getRawAddressExpression(), BB, F);
7406 visitMDNode(*DVR.getAddressExpression(), AreDebugLocsAllowed::No);
7407
7408 // All of the linked instructions should be in the same function as DVR.
7409 for (Instruction *I : at::getAssignmentInsts(&DVR))
7410 CheckDI(DVR.getFunction() == I->getFunction(),
7411 "inst not in same function as #dbg_assign", I, &DVR, BB, F);
7412 }
7413
7414 // This check is redundant with one in visitLocalVariable().
7415 DILocalVariable *Var = DVR.getVariable();
7416 CheckDI(isType(Var->getRawType()), "invalid type ref", Var, Var->getRawType(),
7417 BB, F);
7418
7419 auto *DLNode = DVR.getDebugLoc().getAsMDNode();
7420 CheckDI(isa_and_nonnull<DILocation>(DLNode), "invalid #dbg record DILocation",
7421 &DVR, DLNode, BB, F);
7422 DILocation *Loc = DVR.getDebugLoc();
7423
7424 // The scopes for variables and !dbg attachments must agree.
7425 DISubprogram *VarSP = getSubprogram(Var->getRawScope());
7426 DISubprogram *LocSP = getSubprogram(Loc->getRawScope());
7427 if (!VarSP || !LocSP)
7428 return; // Broken scope chains are checked elsewhere.
7429
7430 CheckDI(VarSP == LocSP,
7431 "mismatched subprogram between #dbg record variable and DILocation",
7432 &DVR, BB, F, Var, Var->getScope()->getSubprogram(), Loc,
7433 Loc->getScope()->getSubprogram(), BB, F);
7434
7435 verifyFnArgs(DVR);
7436}
7437
7438void Verifier::visitVPIntrinsic(VPIntrinsic &VPI) {
7439 if (auto *VPCast = dyn_cast<VPCastIntrinsic>(&VPI)) {
7440 auto *RetTy = cast<VectorType>(VPCast->getType());
7441 auto *ValTy = cast<VectorType>(VPCast->getOperand(0)->getType());
7442 Check(RetTy->getElementCount() == ValTy->getElementCount(),
7443 "VP cast intrinsic first argument and result vector lengths must be "
7444 "equal",
7445 *VPCast);
7446
7447 switch (VPCast->getIntrinsicID()) {
7448 default:
7449 llvm_unreachable("Unknown VP cast intrinsic");
7450 case Intrinsic::vp_trunc:
7451 Check(RetTy->isIntOrIntVectorTy() && ValTy->isIntOrIntVectorTy(),
7452 "llvm.vp.trunc intrinsic first argument and result element type "
7453 "must be integer",
7454 *VPCast);
7455 Check(RetTy->getScalarSizeInBits() < ValTy->getScalarSizeInBits(),
7456 "llvm.vp.trunc intrinsic the bit size of first argument must be "
7457 "larger than the bit size of the return type",
7458 *VPCast);
7459 break;
7460 case Intrinsic::vp_zext:
7461 case Intrinsic::vp_sext:
7462 Check(RetTy->isIntOrIntVectorTy() && ValTy->isIntOrIntVectorTy(),
7463 "llvm.vp.zext or llvm.vp.sext intrinsic first argument and result "
7464 "element type must be integer",
7465 *VPCast);
7466 Check(RetTy->getScalarSizeInBits() > ValTy->getScalarSizeInBits(),
7467 "llvm.vp.zext or llvm.vp.sext intrinsic the bit size of first "
7468 "argument must be smaller than the bit size of the return type",
7469 *VPCast);
7470 break;
7471 case Intrinsic::vp_fptoui:
7472 case Intrinsic::vp_fptosi:
7473 case Intrinsic::vp_lrint:
7474 case Intrinsic::vp_llrint:
7475 Check(
7476 RetTy->isIntOrIntVectorTy() && ValTy->isFPOrFPVectorTy(),
7477 "llvm.vp.fptoui, llvm.vp.fptosi, llvm.vp.lrint or llvm.vp.llrint" "intrinsic first argument element "
7478 "type must be floating-point and result element type must be integer",
7479 *VPCast);
7480 break;
7481 case Intrinsic::vp_uitofp:
7482 case Intrinsic::vp_sitofp:
7483 Check(
7484 RetTy->isFPOrFPVectorTy() && ValTy->isIntOrIntVectorTy(),
7485 "llvm.vp.uitofp or llvm.vp.sitofp intrinsic first argument element "
7486 "type must be integer and result element type must be floating-point",
7487 *VPCast);
7488 break;
7489 case Intrinsic::vp_fptrunc:
7490 Check(RetTy->isFPOrFPVectorTy() && ValTy->isFPOrFPVectorTy(),
7491 "llvm.vp.fptrunc intrinsic first argument and result element type "
7492 "must be floating-point",
7493 *VPCast);
7494 Check(RetTy->getScalarSizeInBits() < ValTy->getScalarSizeInBits(),
7495 "llvm.vp.fptrunc intrinsic the bit size of first argument must be "
7496 "larger than the bit size of the return type",
7497 *VPCast);
7498 break;
7499 case Intrinsic::vp_fpext:
7500 Check(RetTy->isFPOrFPVectorTy() && ValTy->isFPOrFPVectorTy(),
7501 "llvm.vp.fpext intrinsic first argument and result element type "
7502 "must be floating-point",
7503 *VPCast);
7504 Check(RetTy->getScalarSizeInBits() > ValTy->getScalarSizeInBits(),
7505 "llvm.vp.fpext intrinsic the bit size of first argument must be "
7506 "smaller than the bit size of the return type",
7507 *VPCast);
7508 break;
7509 case Intrinsic::vp_ptrtoint:
7510 Check(RetTy->isIntOrIntVectorTy() && ValTy->isPtrOrPtrVectorTy(),
7511 "llvm.vp.ptrtoint intrinsic first argument element type must be "
7512 "pointer and result element type must be integer",
7513 *VPCast);
7514 break;
7515 case Intrinsic::vp_inttoptr:
7516 Check(RetTy->isPtrOrPtrVectorTy() && ValTy->isIntOrIntVectorTy(),
7517 "llvm.vp.inttoptr intrinsic first argument element type must be "
7518 "integer and result element type must be pointer",
7519 *VPCast);
7520 break;
7521 }
7522 }
7523
7524 switch (VPI.getIntrinsicID()) {
7525 case Intrinsic::vp_fcmp: {
7526 auto Pred = cast<VPCmpIntrinsic>(&VPI)->getPredicate();
7528 "invalid predicate for VP FP comparison intrinsic", &VPI);
7529 break;
7530 }
7531 case Intrinsic::vp_icmp: {
7532 auto Pred = cast<VPCmpIntrinsic>(&VPI)->getPredicate();
7534 "invalid predicate for VP integer comparison intrinsic", &VPI);
7535 break;
7536 }
7537 case Intrinsic::vp_is_fpclass: {
7538 auto TestMask = cast<ConstantInt>(VPI.getOperand(1));
7539 Check((TestMask->getZExtValue() & ~static_cast<unsigned>(fcAllFlags)) == 0,
7540 "unsupported bits for llvm.vp.is.fpclass test mask");
7541 break;
7542 }
7543 case Intrinsic::experimental_vp_splice: {
7544 VectorType *VecTy = cast<VectorType>(VPI.getType());
7545 int64_t Idx = cast<ConstantInt>(VPI.getArgOperand(2))->getSExtValue();
7546 int64_t KnownMinNumElements = VecTy->getElementCount().getKnownMinValue();
7547 if (VPI.getParent() && VPI.getParent()->getParent()) {
7548 AttributeList Attrs = VPI.getParent()->getParent()->getAttributes();
7549 if (Attrs.hasFnAttr(Attribute::VScaleRange))
7550 KnownMinNumElements *= Attrs.getFnAttrs().getVScaleRangeMin();
7551 }
7552 Check((Idx < 0 && std::abs(Idx) <= KnownMinNumElements) ||
7553 (Idx >= 0 && Idx < KnownMinNumElements),
7554 "The splice index exceeds the range [-VL, VL-1] where VL is the "
7555 "known minimum number of elements in the vector. For scalable "
7556 "vectors the minimum number of elements is determined from "
7557 "vscale_range.",
7558 &VPI);
7559 break;
7560 }
7561 }
7562}
7563
7564void Verifier::visitConstrainedFPIntrinsic(ConstrainedFPIntrinsic &FPI) {
7565 unsigned NumOperands = FPI.getNonMetadataArgCount();
7566 bool HasRoundingMD =
7568
7569 // Add the expected number of metadata operands.
7570 NumOperands += (1 + HasRoundingMD);
7571
7572 // Compare intrinsics carry an extra predicate metadata operand.
7574 NumOperands += 1;
7575 Check((FPI.arg_size() == NumOperands),
7576 "invalid arguments for constrained FP intrinsic", &FPI);
7577
7578 switch (FPI.getIntrinsicID()) {
7579 case Intrinsic::experimental_constrained_lrint:
7580 case Intrinsic::experimental_constrained_llrint: {
7581 Type *ValTy = FPI.getArgOperand(0)->getType();
7582 Type *ResultTy = FPI.getType();
7583 Check(!ValTy->isVectorTy() && !ResultTy->isVectorTy(),
7584 "Intrinsic does not support vectors", &FPI);
7585 break;
7586 }
7587
7588 case Intrinsic::experimental_constrained_lround:
7589 case Intrinsic::experimental_constrained_llround: {
7590 Type *ValTy = FPI.getArgOperand(0)->getType();
7591 Type *ResultTy = FPI.getType();
7592 Check(!ValTy->isVectorTy() && !ResultTy->isVectorTy(),
7593 "Intrinsic does not support vectors", &FPI);
7594 break;
7595 }
7596
7597 case Intrinsic::experimental_constrained_fcmp:
7598 case Intrinsic::experimental_constrained_fcmps: {
7599 auto Pred = cast<ConstrainedFPCmpIntrinsic>(&FPI)->getPredicate();
7601 "invalid predicate for constrained FP comparison intrinsic", &FPI);
7602 break;
7603 }
7604
7605 case Intrinsic::experimental_constrained_fptosi:
7606 case Intrinsic::experimental_constrained_fptoui: {
7607 Value *Operand = FPI.getArgOperand(0);
7608 ElementCount SrcEC;
7609 Check(Operand->getType()->isFPOrFPVectorTy(),
7610 "Intrinsic first argument must be floating point", &FPI);
7611 if (auto *OperandT = dyn_cast<VectorType>(Operand->getType())) {
7612 SrcEC = cast<VectorType>(OperandT)->getElementCount();
7613 }
7614
7615 Operand = &FPI;
7616 Check(SrcEC.isNonZero() == Operand->getType()->isVectorTy(),
7617 "Intrinsic first argument and result disagree on vector use", &FPI);
7618 Check(Operand->getType()->isIntOrIntVectorTy(),
7619 "Intrinsic result must be an integer", &FPI);
7620 if (auto *OperandT = dyn_cast<VectorType>(Operand->getType())) {
7621 Check(SrcEC == cast<VectorType>(OperandT)->getElementCount(),
7622 "Intrinsic first argument and result vector lengths must be equal",
7623 &FPI);
7624 }
7625 break;
7626 }
7627
7628 case Intrinsic::experimental_constrained_sitofp:
7629 case Intrinsic::experimental_constrained_uitofp: {
7630 Value *Operand = FPI.getArgOperand(0);
7631 ElementCount SrcEC;
7632 Check(Operand->getType()->isIntOrIntVectorTy(),
7633 "Intrinsic first argument must be integer", &FPI);
7634 if (auto *OperandT = dyn_cast<VectorType>(Operand->getType())) {
7635 SrcEC = cast<VectorType>(OperandT)->getElementCount();
7636 }
7637
7638 Operand = &FPI;
7639 Check(SrcEC.isNonZero() == Operand->getType()->isVectorTy(),
7640 "Intrinsic first argument and result disagree on vector use", &FPI);
7641 Check(Operand->getType()->isFPOrFPVectorTy(),
7642 "Intrinsic result must be a floating point", &FPI);
7643 if (auto *OperandT = dyn_cast<VectorType>(Operand->getType())) {
7644 Check(SrcEC == cast<VectorType>(OperandT)->getElementCount(),
7645 "Intrinsic first argument and result vector lengths must be equal",
7646 &FPI);
7647 }
7648 break;
7649 }
7650
7651 case Intrinsic::experimental_constrained_fptrunc:
7652 case Intrinsic::experimental_constrained_fpext: {
7653 Value *Operand = FPI.getArgOperand(0);
7654 Type *OperandTy = Operand->getType();
7655 Value *Result = &FPI;
7656 Type *ResultTy = Result->getType();
7657 Check(OperandTy->isFPOrFPVectorTy(),
7658 "Intrinsic first argument must be FP or FP vector", &FPI);
7659 Check(ResultTy->isFPOrFPVectorTy(),
7660 "Intrinsic result must be FP or FP vector", &FPI);
7661 Check(OperandTy->isVectorTy() == ResultTy->isVectorTy(),
7662 "Intrinsic first argument and result disagree on vector use", &FPI);
7663 if (OperandTy->isVectorTy()) {
7664 Check(cast<VectorType>(OperandTy)->getElementCount() ==
7665 cast<VectorType>(ResultTy)->getElementCount(),
7666 "Intrinsic first argument and result vector lengths must be equal",
7667 &FPI);
7668 }
7669 if (FPI.getIntrinsicID() == Intrinsic::experimental_constrained_fptrunc) {
7670 Check(OperandTy->getScalarSizeInBits() > ResultTy->getScalarSizeInBits(),
7671 "Intrinsic first argument's type must be larger than result type",
7672 &FPI);
7673 } else {
7674 Check(OperandTy->getScalarSizeInBits() < ResultTy->getScalarSizeInBits(),
7675 "Intrinsic first argument's type must be smaller than result type",
7676 &FPI);
7677 }
7678 break;
7679 }
7680
7681 default:
7682 break;
7683 }
7684
7685 // If a non-metadata argument is passed in a metadata slot then the
7686 // error will be caught earlier when the incorrect argument doesn't
7687 // match the specification in the intrinsic call table. Thus, no
7688 // argument type check is needed here.
7689
7690 Check(FPI.getExceptionBehavior().has_value(),
7691 "invalid exception behavior argument", &FPI);
7692 if (HasRoundingMD) {
7693 Check(FPI.getRoundingMode().has_value(), "invalid rounding mode argument",
7694 &FPI);
7695 }
7696}
7697
7698void Verifier::verifyFragmentExpression(const DbgVariableRecord &DVR) {
7699 DILocalVariable *V = dyn_cast_or_null<DILocalVariable>(DVR.getRawVariable());
7700 DIExpression *E = dyn_cast_or_null<DIExpression>(DVR.getRawExpression());
7701
7702 // We don't know whether this intrinsic verified correctly.
7703 if (!V || !E || !E->isValid())
7704 return;
7705
7706 // Nothing to do if this isn't a DW_OP_LLVM_fragment expression.
7707 auto Fragment = E->getFragmentInfo();
7708 if (!Fragment)
7709 return;
7710
7711 // The frontend helps out GDB by emitting the members of local anonymous
7712 // unions as artificial local variables with shared storage. When SROA splits
7713 // the storage for artificial local variables that are smaller than the entire
7714 // union, the overhang piece will be outside of the allotted space for the
7715 // variable and this check fails.
7716 // FIXME: Remove this check as soon as clang stops doing this; it hides bugs.
7717 if (V->isArtificial())
7718 return;
7719
7720 verifyFragmentExpression(*V, *Fragment, &DVR);
7721}
7722
7723template <typename ValueOrMetadata>
7724void Verifier::verifyFragmentExpression(const DIVariable &V,
7726 ValueOrMetadata *Desc) {
7727 // If there's no size, the type is broken, but that should be checked
7728 // elsewhere.
7729 auto VarSize = V.getSizeInBits();
7730 if (!VarSize)
7731 return;
7732
7733 unsigned FragSize = Fragment.SizeInBits;
7734 unsigned FragOffset = Fragment.OffsetInBits;
7735 CheckDI(FragSize + FragOffset <= *VarSize,
7736 "fragment is larger than or outside of variable", Desc, &V);
7737 CheckDI(FragSize != *VarSize, "fragment covers entire variable", Desc, &V);
7738}
7739
7740void Verifier::verifyFnArgs(const DbgVariableRecord &DVR) {
7741 // This function does not take the scope of noninlined function arguments into
7742 // account. Don't run it if current function is nodebug, because it may
7743 // contain inlined debug intrinsics.
7744 if (!HasDebugInfo)
7745 return;
7746
7747 // For performance reasons only check non-inlined ones.
7748 if (DVR.getDebugLoc()->getInlinedAt())
7749 return;
7750
7751 DILocalVariable *Var = DVR.getVariable();
7752 CheckDI(Var, "#dbg record without variable");
7753
7754 unsigned ArgNo = Var->getArg();
7755 if (!ArgNo)
7756 return;
7757
7758 // Verify there are no duplicate function argument debug info entries.
7759 // These will cause hard-to-debug assertions in the DWARF backend.
7760 if (DebugFnArgs.size() < ArgNo)
7761 DebugFnArgs.resize(ArgNo, nullptr);
7762
7763 auto *Prev = DebugFnArgs[ArgNo - 1];
7764 DebugFnArgs[ArgNo - 1] = Var;
7765 CheckDI(!Prev || (Prev == Var), "conflicting debug info for argument", &DVR,
7766 Prev, Var);
7767}
7768
7769void Verifier::verifyNotEntryValue(const DbgVariableRecord &DVR) {
7770 DIExpression *E = dyn_cast_or_null<DIExpression>(DVR.getRawExpression());
7771
7772 // We don't know whether this intrinsic verified correctly.
7773 if (!E || !E->isValid())
7774 return;
7775
7777 Value *VarValue = DVR.getVariableLocationOp(0);
7778 if (isa<UndefValue>(VarValue) || isa<PoisonValue>(VarValue))
7779 return;
7780 // We allow EntryValues for swift async arguments, as they have an
7781 // ABI-guarantee to be turned into a specific register.
7782 if (auto *ArgLoc = dyn_cast_or_null<Argument>(VarValue);
7783 ArgLoc && ArgLoc->hasAttribute(Attribute::SwiftAsync))
7784 return;
7785 }
7786
7787 CheckDI(!E->isEntryValue(),
7788 "Entry values are only allowed in MIR unless they target a "
7789 "swiftasync Argument",
7790 &DVR);
7791}
7792
7793void Verifier::verifyCompileUnits() {
7794 // When more than one Module is imported into the same context, such as during
7795 // an LTO build before linking the modules, ODR type uniquing may cause types
7796 // to point to a different CU. This check does not make sense in this case.
7797 if (M.getContext().isODRUniquingDebugTypes())
7798 return;
7799 auto *CUs = M.getNamedMetadata("llvm.dbg.cu");
7800 SmallPtrSet<const Metadata *, 2> Listed;
7801 if (CUs)
7802 Listed.insert_range(CUs->operands());
7803 for (const auto *CU : CUVisited)
7804 CheckDI(Listed.count(CU), "DICompileUnit not listed in llvm.dbg.cu", CU);
7805 CUVisited.clear();
7806}
7807
7808void Verifier::verifyDeoptimizeCallingConvs() {
7809 if (DeoptimizeDeclarations.empty())
7810 return;
7811
7812 const Function *First = DeoptimizeDeclarations[0];
7813 for (const auto *F : ArrayRef(DeoptimizeDeclarations).slice(1)) {
7814 Check(First->getCallingConv() == F->getCallingConv(),
7815 "All llvm.experimental.deoptimize declarations must have the same "
7816 "calling convention",
7817 First, F);
7818 }
7819}
7820
7821void Verifier::verifyAttachedCallBundle(const CallBase &Call,
7822 const OperandBundleUse &BU) {
7823 FunctionType *FTy = Call.getFunctionType();
7824
7825 Check((FTy->getReturnType()->isPointerTy() ||
7826 (Call.doesNotReturn() && FTy->getReturnType()->isVoidTy())),
7827 "a call with operand bundle \"clang.arc.attachedcall\" must call a "
7828 "function returning a pointer or a non-returning function that has a "
7829 "void return type",
7830 Call);
7831
7832 Check(BU.Inputs.size() == 1 && isa<Function>(BU.Inputs.front()),
7833 "operand bundle \"clang.arc.attachedcall\" requires one function as "
7834 "an argument",
7835 Call);
7836
7837 auto *Fn = cast<Function>(BU.Inputs.front());
7838 Intrinsic::ID IID = Fn->getIntrinsicID();
7839
7840 if (IID) {
7841 Check((IID == Intrinsic::objc_retainAutoreleasedReturnValue ||
7842 IID == Intrinsic::objc_claimAutoreleasedReturnValue ||
7843 IID == Intrinsic::objc_unsafeClaimAutoreleasedReturnValue),
7844 "invalid function argument", Call);
7845 } else {
7846 StringRef FnName = Fn->getName();
7847 Check((FnName == "objc_retainAutoreleasedReturnValue" ||
7848 FnName == "objc_claimAutoreleasedReturnValue" ||
7849 FnName == "objc_unsafeClaimAutoreleasedReturnValue"),
7850 "invalid function argument", Call);
7851 }
7852}
7853
7854void Verifier::verifyNoAliasScopeDecl() {
7855 if (NoAliasScopeDecls.empty())
7856 return;
7857
7858 // only a single scope must be declared at a time.
7859 for (auto *II : NoAliasScopeDecls) {
7860 assert(II->getIntrinsicID() == Intrinsic::experimental_noalias_scope_decl &&
7861 "Not a llvm.experimental.noalias.scope.decl ?");
7862 const auto *ScopeListMV = dyn_cast<MetadataAsValue>(
7864 Check(ScopeListMV != nullptr,
7865 "llvm.experimental.noalias.scope.decl must have a MetadataAsValue "
7866 "argument",
7867 II);
7868
7869 const auto *ScopeListMD = dyn_cast<MDNode>(ScopeListMV->getMetadata());
7870 Check(ScopeListMD != nullptr, "!id.scope.list must point to an MDNode", II);
7871 Check(ScopeListMD->getNumOperands() == 1,
7872 "!id.scope.list must point to a list with a single scope", II);
7873 visitAliasScopeListMetadata(ScopeListMD);
7874 }
7875
7876 // Only check the domination rule when requested. Once all passes have been
7877 // adapted this option can go away.
7879 return;
7880
7881 // Now sort the intrinsics based on the scope MDNode so that declarations of
7882 // the same scopes are next to each other.
7883 auto GetScope = [](IntrinsicInst *II) {
7884 const auto *ScopeListMV = cast<MetadataAsValue>(
7886 return &cast<MDNode>(ScopeListMV->getMetadata())->getOperand(0);
7887 };
7888
7889 // We are sorting on MDNode pointers here. For valid input IR this is ok.
7890 // TODO: Sort on Metadata ID to avoid non-deterministic error messages.
7891 auto Compare = [GetScope](IntrinsicInst *Lhs, IntrinsicInst *Rhs) {
7892 return GetScope(Lhs) < GetScope(Rhs);
7893 };
7894
7895 llvm::sort(NoAliasScopeDecls, Compare);
7896
7897 // Go over the intrinsics and check that for the same scope, they are not
7898 // dominating each other.
7899 auto ItCurrent = NoAliasScopeDecls.begin();
7900 while (ItCurrent != NoAliasScopeDecls.end()) {
7901 auto CurScope = GetScope(*ItCurrent);
7902 auto ItNext = ItCurrent;
7903 do {
7904 ++ItNext;
7905 } while (ItNext != NoAliasScopeDecls.end() &&
7906 GetScope(*ItNext) == CurScope);
7907
7908 // [ItCurrent, ItNext) represents the declarations for the same scope.
7909 // Ensure they are not dominating each other.. but only if it is not too
7910 // expensive.
7911 if (ItNext - ItCurrent < 32)
7912 for (auto *I : llvm::make_range(ItCurrent, ItNext))
7913 for (auto *J : llvm::make_range(ItCurrent, ItNext))
7914 if (I != J)
7915 Check(!DT.dominates(I, J),
7916 "llvm.experimental.noalias.scope.decl dominates another one "
7917 "with the same scope",
7918 I);
7919 ItCurrent = ItNext;
7920 }
7921}
7922
7923//===----------------------------------------------------------------------===//
7924// Implement the public interfaces to this file...
7925//===----------------------------------------------------------------------===//
7926
7928 Function &F = const_cast<Function &>(f);
7929
7930 // Don't use a raw_null_ostream. Printing IR is expensive.
7931 Verifier V(OS, /*ShouldTreatBrokenDebugInfoAsError=*/true, *f.getParent());
7932
7933 // Note that this function's return value is inverted from what you would
7934 // expect of a function called "verify".
7935 return !V.verify(F);
7936}
7937
7939 bool *BrokenDebugInfo) {
7940 // Don't use a raw_null_ostream. Printing IR is expensive.
7941 Verifier V(OS, /*ShouldTreatBrokenDebugInfoAsError=*/!BrokenDebugInfo, M);
7942
7943 bool Broken = false;
7944 for (const Function &F : M)
7945 Broken |= !V.verify(F);
7946
7947 Broken |= !V.verify();
7948 if (BrokenDebugInfo)
7949 *BrokenDebugInfo = V.hasBrokenDebugInfo();
7950 // Note that this function's return value is inverted from what you would
7951 // expect of a function called "verify".
7952 return Broken;
7953}
7954
7955namespace {
7956
7957struct VerifierLegacyPass : public FunctionPass {
7958 static char ID;
7959
7960 std::unique_ptr<Verifier> V;
7961 bool FatalErrors = true;
7962
7963 VerifierLegacyPass() : FunctionPass(ID) {}
7964 explicit VerifierLegacyPass(bool FatalErrors)
7965 : FunctionPass(ID), FatalErrors(FatalErrors) {}
7966
7967 bool doInitialization(Module &M) override {
7968 V = std::make_unique<Verifier>(
7969 &dbgs(), /*ShouldTreatBrokenDebugInfoAsError=*/false, M);
7970 return false;
7971 }
7972
7973 bool runOnFunction(Function &F) override {
7974 if (!V->verify(F) && FatalErrors) {
7975 errs() << "in function " << F.getName() << '\n';
7976 report_fatal_error("Broken function found, compilation aborted!");
7977 }
7978 return false;
7979 }
7980
7981 bool doFinalization(Module &M) override {
7982 bool HasErrors = false;
7983 for (Function &F : M)
7984 if (F.isDeclaration())
7985 HasErrors |= !V->verify(F);
7986
7987 HasErrors |= !V->verify();
7988 if (FatalErrors && (HasErrors || V->hasBrokenDebugInfo()))
7989 report_fatal_error("Broken module found, compilation aborted!");
7990 return false;
7991 }
7992
7993 void getAnalysisUsage(AnalysisUsage &AU) const override {
7994 AU.setPreservesAll();
7995 }
7996};
7997
7998} // end anonymous namespace
7999
8000/// Helper to issue failure from the TBAA verification
8001template <typename... Tys> void TBAAVerifier::CheckFailed(Tys &&... Args) {
8002 if (Diagnostic)
8003 return Diagnostic->CheckFailed(Args...);
8004}
8005
8006#define CheckTBAA(C, ...) \
8007 do { \
8008 if (!(C)) { \
8009 CheckFailed(__VA_ARGS__); \
8010 return false; \
8011 } \
8012 } while (false)
8013
8014/// Verify that \p BaseNode can be used as the "base type" in the struct-path
8015/// TBAA scheme. This means \p BaseNode is either a scalar node, or a
8016/// struct-type node describing an aggregate data structure (like a struct).
8017TBAAVerifier::TBAABaseNodeSummary
8018TBAAVerifier::verifyTBAABaseNode(const Instruction *I, const MDNode *BaseNode,
8019 bool IsNewFormat) {
8020 if (BaseNode->getNumOperands() < 2) {
8021 CheckFailed("Base nodes must have at least two operands", I, BaseNode);
8022 return {true, ~0u};
8023 }
8024
8025 auto Itr = TBAABaseNodes.find(BaseNode);
8026 if (Itr != TBAABaseNodes.end())
8027 return Itr->second;
8028
8029 auto Result = verifyTBAABaseNodeImpl(I, BaseNode, IsNewFormat);
8030 auto InsertResult = TBAABaseNodes.insert({BaseNode, Result});
8031 (void)InsertResult;
8032 assert(InsertResult.second && "We just checked!");
8033 return Result;
8034}
8035
8036TBAAVerifier::TBAABaseNodeSummary
8037TBAAVerifier::verifyTBAABaseNodeImpl(const Instruction *I,
8038 const MDNode *BaseNode, bool IsNewFormat) {
8039 const TBAAVerifier::TBAABaseNodeSummary InvalidNode = {true, ~0u};
8040
8041 if (BaseNode->getNumOperands() == 2) {
8042 // Scalar nodes can only be accessed at offset 0.
8043 return isValidScalarTBAANode(BaseNode)
8044 ? TBAAVerifier::TBAABaseNodeSummary({false, 0})
8045 : InvalidNode;
8046 }
8047
8048 if (IsNewFormat) {
8049 if (BaseNode->getNumOperands() % 3 != 0) {
8050 CheckFailed("Access tag nodes must have the number of operands that is a "
8051 "multiple of 3!", BaseNode);
8052 return InvalidNode;
8053 }
8054 } else {
8055 if (BaseNode->getNumOperands() % 2 != 1) {
8056 CheckFailed("Struct tag nodes must have an odd number of operands!",
8057 BaseNode);
8058 return InvalidNode;
8059 }
8060 }
8061
8062 // Check the type size field.
8063 if (IsNewFormat) {
8064 auto *TypeSizeNode = mdconst::dyn_extract_or_null<ConstantInt>(
8065 BaseNode->getOperand(1));
8066 if (!TypeSizeNode) {
8067 CheckFailed("Type size nodes must be constants!", I, BaseNode);
8068 return InvalidNode;
8069 }
8070 }
8071
8072 // Check the type name field. In the new format it can be anything.
8073 if (!IsNewFormat && !isa<MDString>(BaseNode->getOperand(0))) {
8074 CheckFailed("Struct tag nodes have a string as their first operand",
8075 BaseNode);
8076 return InvalidNode;
8077 }
8078
8079 bool Failed = false;
8080
8081 std::optional<APInt> PrevOffset;
8082 unsigned BitWidth = ~0u;
8083
8084 // We've already checked that BaseNode is not a degenerate root node with one
8085 // operand in \c verifyTBAABaseNode, so this loop should run at least once.
8086 unsigned FirstFieldOpNo = IsNewFormat ? 3 : 1;
8087 unsigned NumOpsPerField = IsNewFormat ? 3 : 2;
8088 for (unsigned Idx = FirstFieldOpNo; Idx < BaseNode->getNumOperands();
8089 Idx += NumOpsPerField) {
8090 const MDOperand &FieldTy = BaseNode->getOperand(Idx);
8091 const MDOperand &FieldOffset = BaseNode->getOperand(Idx + 1);
8092 if (!isa<MDNode>(FieldTy)) {
8093 CheckFailed("Incorrect field entry in struct type node!", I, BaseNode);
8094 Failed = true;
8095 continue;
8096 }
8097
8098 auto *OffsetEntryCI =
8100 if (!OffsetEntryCI) {
8101 CheckFailed("Offset entries must be constants!", I, BaseNode);
8102 Failed = true;
8103 continue;
8104 }
8105
8106 if (BitWidth == ~0u)
8107 BitWidth = OffsetEntryCI->getBitWidth();
8108
8109 if (OffsetEntryCI->getBitWidth() != BitWidth) {
8110 CheckFailed(
8111 "Bitwidth between the offsets and struct type entries must match", I,
8112 BaseNode);
8113 Failed = true;
8114 continue;
8115 }
8116
8117 // NB! As far as I can tell, we generate a non-strictly increasing offset
8118 // sequence only from structs that have zero size bit fields. When
8119 // recursing into a contained struct in \c getFieldNodeFromTBAABaseNode we
8120 // pick the field lexically the latest in struct type metadata node. This
8121 // mirrors the actual behavior of the alias analysis implementation.
8122 bool IsAscending =
8123 !PrevOffset || PrevOffset->ule(OffsetEntryCI->getValue());
8124
8125 if (!IsAscending) {
8126 CheckFailed("Offsets must be increasing!", I, BaseNode);
8127 Failed = true;
8128 }
8129
8130 PrevOffset = OffsetEntryCI->getValue();
8131
8132 if (IsNewFormat) {
8133 auto *MemberSizeNode = mdconst::dyn_extract_or_null<ConstantInt>(
8134 BaseNode->getOperand(Idx + 2));
8135 if (!MemberSizeNode) {
8136 CheckFailed("Member size entries must be constants!", I, BaseNode);
8137 Failed = true;
8138 continue;
8139 }
8140 }
8141 }
8142
8143 return Failed ? InvalidNode
8144 : TBAAVerifier::TBAABaseNodeSummary(false, BitWidth);
8145}
8146
8147static bool IsRootTBAANode(const MDNode *MD) {
8148 return MD->getNumOperands() < 2;
8149}
8150
8151static bool IsScalarTBAANodeImpl(const MDNode *MD,
8153 if (MD->getNumOperands() != 2 && MD->getNumOperands() != 3)
8154 return false;
8155
8156 if (!isa<MDString>(MD->getOperand(0)))
8157 return false;
8158
8159 if (MD->getNumOperands() == 3) {
8161 if (!(Offset && Offset->isZero() && isa<MDString>(MD->getOperand(0))))
8162 return false;
8163 }
8164
8165 auto *Parent = dyn_cast_or_null<MDNode>(MD->getOperand(1));
8166 return Parent && Visited.insert(Parent).second &&
8167 (IsRootTBAANode(Parent) || IsScalarTBAANodeImpl(Parent, Visited));
8168}
8169
8170bool TBAAVerifier::isValidScalarTBAANode(const MDNode *MD) {
8171 auto ResultIt = TBAAScalarNodes.find(MD);
8172 if (ResultIt != TBAAScalarNodes.end())
8173 return ResultIt->second;
8174
8175 SmallPtrSet<const MDNode *, 4> Visited;
8176 bool Result = IsScalarTBAANodeImpl(MD, Visited);
8177 auto InsertResult = TBAAScalarNodes.insert({MD, Result});
8178 (void)InsertResult;
8179 assert(InsertResult.second && "Just checked!");
8180
8181 return Result;
8182}
8183
8184/// Returns the field node at the offset \p Offset in \p BaseNode. Update \p
8185/// Offset in place to be the offset within the field node returned.
8186///
8187/// We assume we've okayed \p BaseNode via \c verifyTBAABaseNode.
8188MDNode *TBAAVerifier::getFieldNodeFromTBAABaseNode(const Instruction *I,
8189 const MDNode *BaseNode,
8190 APInt &Offset,
8191 bool IsNewFormat) {
8192 assert(BaseNode->getNumOperands() >= 2 && "Invalid base node!");
8193
8194 // Scalar nodes have only one possible "field" -- their parent in the access
8195 // hierarchy. Offset must be zero at this point, but our caller is supposed
8196 // to check that.
8197 if (BaseNode->getNumOperands() == 2)
8198 return cast<MDNode>(BaseNode->getOperand(1));
8199
8200 unsigned FirstFieldOpNo = IsNewFormat ? 3 : 1;
8201 unsigned NumOpsPerField = IsNewFormat ? 3 : 2;
8202 for (unsigned Idx = FirstFieldOpNo; Idx < BaseNode->getNumOperands();
8203 Idx += NumOpsPerField) {
8204 auto *OffsetEntryCI =
8205 mdconst::extract<ConstantInt>(BaseNode->getOperand(Idx + 1));
8206 if (OffsetEntryCI->getValue().ugt(Offset)) {
8207 if (Idx == FirstFieldOpNo) {
8208 CheckFailed("Could not find TBAA parent in struct type node", I,
8209 BaseNode, &Offset);
8210 return nullptr;
8211 }
8212
8213 unsigned PrevIdx = Idx - NumOpsPerField;
8214 auto *PrevOffsetEntryCI =
8215 mdconst::extract<ConstantInt>(BaseNode->getOperand(PrevIdx + 1));
8216 Offset -= PrevOffsetEntryCI->getValue();
8217 return cast<MDNode>(BaseNode->getOperand(PrevIdx));
8218 }
8219 }
8220
8221 unsigned LastIdx = BaseNode->getNumOperands() - NumOpsPerField;
8222 auto *LastOffsetEntryCI = mdconst::extract<ConstantInt>(
8223 BaseNode->getOperand(LastIdx + 1));
8224 Offset -= LastOffsetEntryCI->getValue();
8225 return cast<MDNode>(BaseNode->getOperand(LastIdx));
8226}
8227
8229 if (!Type || Type->getNumOperands() < 3)
8230 return false;
8231
8232 // In the new format type nodes shall have a reference to the parent type as
8233 // its first operand.
8234 return isa_and_nonnull<MDNode>(Type->getOperand(0));
8235}
8236
8238 CheckTBAA(MD->getNumOperands() > 0, "TBAA metadata cannot have 0 operands", I,
8239 MD);
8240
8241 if (I)
8245 "This instruction shall not have a TBAA access tag!", I);
8246
8247 bool IsStructPathTBAA =
8248 isa<MDNode>(MD->getOperand(0)) && MD->getNumOperands() >= 3;
8249
8250 CheckTBAA(IsStructPathTBAA,
8251 "Old-style TBAA is no longer allowed, use struct-path TBAA instead",
8252 I);
8253
8254 MDNode *BaseNode = dyn_cast_or_null<MDNode>(MD->getOperand(0));
8255 MDNode *AccessType = dyn_cast_or_null<MDNode>(MD->getOperand(1));
8256
8257 bool IsNewFormat = isNewFormatTBAATypeNode(AccessType);
8258
8259 if (IsNewFormat) {
8260 CheckTBAA(MD->getNumOperands() == 4 || MD->getNumOperands() == 5,
8261 "Access tag metadata must have either 4 or 5 operands", I, MD);
8262 } else {
8263 CheckTBAA(MD->getNumOperands() < 5,
8264 "Struct tag metadata must have either 3 or 4 operands", I, MD);
8265 }
8266
8267 // Check the access size field.
8268 if (IsNewFormat) {
8269 auto *AccessSizeNode = mdconst::dyn_extract_or_null<ConstantInt>(
8270 MD->getOperand(3));
8271 CheckTBAA(AccessSizeNode, "Access size field must be a constant", I, MD);
8272 }
8273
8274 // Check the immutability flag.
8275 unsigned ImmutabilityFlagOpNo = IsNewFormat ? 4 : 3;
8276 if (MD->getNumOperands() == ImmutabilityFlagOpNo + 1) {
8277 auto *IsImmutableCI = mdconst::dyn_extract_or_null<ConstantInt>(
8278 MD->getOperand(ImmutabilityFlagOpNo));
8279 CheckTBAA(IsImmutableCI,
8280 "Immutability tag on struct tag metadata must be a constant", I,
8281 MD);
8282 CheckTBAA(
8283 IsImmutableCI->isZero() || IsImmutableCI->isOne(),
8284 "Immutability part of the struct tag metadata must be either 0 or 1", I,
8285 MD);
8286 }
8287
8288 CheckTBAA(BaseNode && AccessType,
8289 "Malformed struct tag metadata: base and access-type "
8290 "should be non-null and point to Metadata nodes",
8291 I, MD, BaseNode, AccessType);
8292
8293 if (!IsNewFormat) {
8294 CheckTBAA(isValidScalarTBAANode(AccessType),
8295 "Access type node must be a valid scalar type", I, MD,
8296 AccessType);
8297 }
8298
8300 CheckTBAA(OffsetCI, "Offset must be constant integer", I, MD);
8301
8302 APInt Offset = OffsetCI->getValue();
8303 bool SeenAccessTypeInPath = false;
8304
8305 SmallPtrSet<MDNode *, 4> StructPath;
8306
8307 for (/* empty */; BaseNode && !IsRootTBAANode(BaseNode);
8308 BaseNode =
8309 getFieldNodeFromTBAABaseNode(I, BaseNode, Offset, IsNewFormat)) {
8310 if (!StructPath.insert(BaseNode).second) {
8311 CheckFailed("Cycle detected in struct path", I, MD);
8312 return false;
8313 }
8314
8315 bool Invalid;
8316 unsigned BaseNodeBitWidth;
8317 std::tie(Invalid, BaseNodeBitWidth) =
8318 verifyTBAABaseNode(I, BaseNode, IsNewFormat);
8319
8320 // If the base node is invalid in itself, then we've already printed all the
8321 // errors we wanted to print.
8322 if (Invalid)
8323 return false;
8324
8325 SeenAccessTypeInPath |= BaseNode == AccessType;
8326
8327 if (isValidScalarTBAANode(BaseNode) || BaseNode == AccessType)
8328 CheckTBAA(Offset == 0, "Offset not zero at the point of scalar access", I,
8329 MD, &Offset);
8330
8331 CheckTBAA(BaseNodeBitWidth == Offset.getBitWidth() ||
8332 (BaseNodeBitWidth == 0 && Offset == 0) ||
8333 (IsNewFormat && BaseNodeBitWidth == ~0u),
8334 "Access bit-width not the same as description bit-width", I, MD,
8335 BaseNodeBitWidth, Offset.getBitWidth());
8336
8337 if (IsNewFormat && SeenAccessTypeInPath)
8338 break;
8339 }
8340
8341 CheckTBAA(SeenAccessTypeInPath, "Did not see access type in access path!", I,
8342 MD);
8343 return true;
8344}
8345
8346char VerifierLegacyPass::ID = 0;
8347INITIALIZE_PASS(VerifierLegacyPass, "verify", "Module Verifier", false, false)
8348
8350 return new VerifierLegacyPass(FatalErrors);
8351}
8352
8353AnalysisKey VerifierAnalysis::Key;
8360
8365
8367 auto Res = AM.getResult<VerifierAnalysis>(M);
8368 if (FatalErrors && (Res.IRBroken || Res.DebugInfoBroken))
8369 report_fatal_error("Broken module found, compilation aborted!");
8370
8371 return PreservedAnalyses::all();
8372}
8373
8375 auto res = AM.getResult<VerifierAnalysis>(F);
8376 if (res.IRBroken && FatalErrors)
8377 report_fatal_error("Broken function found, compilation aborted!");
8378
8379 return PreservedAnalyses::all();
8380}
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
AMDGPU address space definition.
ArrayRef< TableEntry > TableRef
This file declares a class to represent arbitrary precision floating point values and provide a varie...
This file implements a class to represent arbitrary precision integral constant values and operations...
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
Atomic ordering constants.
@ RetAttr
@ FnAttr
This file contains the simple types necessary to represent the attributes associated with functions a...
static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")
static GCRegistry::Add< StatepointGC > D("statepoint-example", "an example strategy for statepoint")
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
This file contains the declarations for the subclasses of Constant, which represent the different fla...
This file declares the LLVM IR specialization of the GenericConvergenceVerifier template.
static DISubprogram * getSubprogram(bool IsDistinct, Ts &&...Args)
dxil translate DXIL Translate Metadata
This file defines the DenseMap class.
This file contains constants used for implementing Dwarf debug support.
static bool runOnFunction(Function &F, bool PostInlining)
This file contains the declarations of entities that describe floating point environment and related ...
#define Check(C,...)
Hexagon Common GEP
This file provides various utilities for inspecting and working with the control flow graph in LLVM I...
Module.h This file contains the declarations for the Module class.
This header defines various interfaces for pass management in LLVM.
This defines the Use class.
const size_t AbstractManglingParser< Derived, Alloc >::NumOps
#define F(x, y, z)
Definition MD5.cpp:54
#define I(x, y, z)
Definition MD5.cpp:57
Machine Check Debug Module
This file implements a map that provides insertion order iteration.
This file provides utility for Memory Model Relaxation Annotations (MMRAs).
static bool isContiguous(const ConstantRange &A, const ConstantRange &B)
This file contains the declarations for metadata subclasses.
#define T
#define T1
ConstantRange Range(APInt(BitWidth, Low), APInt(BitWidth, High))
uint64_t High
uint64_t IntrinsicInst * II
#define P(N)
ppc ctr loops verify
#define INITIALIZE_PASS(passName, arg, name, cfg, analysis)
Definition PassSupport.h:56
This file contains the declarations for profiling metadata utility functions.
const SmallVectorImpl< MachineOperand > & Cond
static unsigned getNumElements(Type *Ty)
static void visit(BasicBlock &Start, std::function< bool(BasicBlock *)> op)
This file contains some templates that are useful if you are working with the STL at all.
verify safepoint Safepoint IR Verifier
BaseType
A given derived pointer can have multiple base pointers through phi/selects.
This file defines the SmallPtrSet class.
This file defines the SmallVector class.
This file contains some functions that are useful when dealing with strings.
static unsigned getBitWidth(Type *Ty, const DataLayout &DL)
Returns the bitwidth of the given scalar or pointer type.
static bool IsScalarTBAANodeImpl(const MDNode *MD, SmallPtrSetImpl< const MDNode * > &Visited)
static bool isType(const Metadata *MD)
static Instruction * getSuccPad(Instruction *Terminator)
static bool isMDTuple(const Metadata *MD)
static bool isNewFormatTBAATypeNode(llvm::MDNode *Type)
#define CheckDI(C,...)
We know that a debug info condition should be true, if not print an error message.
Definition Verifier.cpp:687
static void forEachUser(const Value *User, SmallPtrSet< const Value *, 32 > &Visited, llvm::function_ref< bool(const Value *)> Callback)
Definition Verifier.cpp:728
static bool isDINode(const Metadata *MD)
static bool isScope(const Metadata *MD)
static cl::opt< bool > VerifyNoAliasScopeDomination("verify-noalias-scope-decl-dom", cl::Hidden, cl::init(false), cl::desc("Ensure that llvm.experimental.noalias.scope.decl for identical " "scopes are not dominating"))
static bool isTypeCongruent(Type *L, Type *R)
Two types are "congruent" if they are identical, or if they are both pointer types with different poi...
#define CheckTBAA(C,...)
static bool isConstantIntMetadataOperand(const Metadata *MD)
static bool IsRootTBAANode(const MDNode *MD)
static Value * getParentPad(Value *EHPad)
static bool hasConflictingReferenceFlags(unsigned Flags)
Detect mutually exclusive flags.
static AttrBuilder getParameterABIAttributes(LLVMContext &C, unsigned I, AttributeList Attrs)
static const char PassName[]
static LLVM_ABI bool isValidArbitraryFPFormat(StringRef Format)
Returns true if the given string is a valid arbitrary floating-point format interpretation for llvm....
Definition APFloat.cpp:5990
bool isFiniteNonZero() const
Definition APFloat.h:1526
bool isNegative() const
Definition APFloat.h:1516
const fltSemantics & getSemantics() const
Definition APFloat.h:1524
Class for arbitrary precision integers.
Definition APInt.h:78
bool sgt(const APInt &RHS) const
Signed greater than comparison.
Definition APInt.h:1208
bool isMinValue() const
Determine if this is the smallest unsigned value.
Definition APInt.h:418
bool ule(const APInt &RHS) const
Unsigned less or equal comparison.
Definition APInt.h:1157
bool isPowerOf2() const
Check if this APInt's value is a power of two greater than zero.
Definition APInt.h:441
int64_t getSExtValue() const
Get sign extended value.
Definition APInt.h:1577
bool isMaxValue() const
Determine if this is the largest unsigned value.
Definition APInt.h:400
This class represents a conversion between pointers from one address space to another.
bool isSwiftError() const
Return true if this alloca is used as a swifterror argument to a call.
LLVM_ABI bool isStaticAlloca() const
Return true if this alloca is in the entry block of the function and is a constant size.
Align getAlign() const
Return the alignment of the memory that is being allocated by the instruction.
Type * getAllocatedType() const
Return the type that is being allocated by the instruction.
unsigned getAddressSpace() const
Return the address space for the allocation.
LLVM_ABI bool isArrayAllocation() const
Return true if there is an allocation size parameter to the allocation instruction that is not 1.
const Value * getArraySize() const
Get the number of elements allocated.
PassT::Result & getResult(IRUnitT &IR, ExtraArgTs... ExtraArgs)
Get the result of an analysis pass for a given IR unit.
void setPreservesAll()
Set by analyses that do not transform their input at all.
LLVM_ABI bool hasInRegAttr() const
Return true if this argument has the inreg attribute.
Definition Function.cpp:292
bool empty() const
empty - Check if the array is empty.
Definition ArrayRef.h:137
static bool isFPOperation(BinOp Op)
BinOp getOperation() const
static LLVM_ABI StringRef getOperationName(BinOp Op)
AtomicOrdering getOrdering() const
Returns the ordering constraint of this rmw instruction.
bool contains(Attribute::AttrKind A) const
Return true if the builder has the specified attribute.
LLVM_ABI bool hasAttribute(Attribute::AttrKind Kind) const
Return true if the attribute exists in this set.
LLVM_ABI std::string getAsString(bool InAttrGrp=false) const
Functions, function parameters, and return types can have attributes to indicate how they should be t...
Definition Attributes.h:105
LLVM_ABI const ConstantRange & getValueAsConstantRange() const
Return the attribute's value as a ConstantRange.
LLVM_ABI StringRef getValueAsString() const
Return the attribute's value as a string.
AttrKind
This enumeration lists the attributes that can be associated with parameters, function results,...
Definition Attributes.h:124
bool isValid() const
Return true if the attribute is any kind of attribute.
Definition Attributes.h:261
LLVM_ABI Type * getValueAsType() const
Return the attribute's value as a Type.
LLVM Basic Block Representation.
Definition BasicBlock.h:62
iterator begin()
Instruction iterator methods.
Definition BasicBlock.h:449
iterator_range< const_phi_iterator > phis() const
Returns a range that iterates over the phis in the basic block.
Definition BasicBlock.h:518
const Function * getParent() const
Return the enclosing method, or null if none.
Definition BasicBlock.h:213
LLVM_ABI InstListType::const_iterator getFirstNonPHIIt() const
Returns an iterator to the first instruction in this block that is not a PHINode instruction.
LLVM_ABI bool isEntryBlock() const
Return true if this is the entry block of the containing function.
const Instruction & front() const
Definition BasicBlock.h:472
LLVM_ABI const BasicBlock * getUniquePredecessor() const
Return the predecessor of this block if it has a unique predecessor block.
InstListType::iterator iterator
Instruction iterators...
Definition BasicBlock.h:170
const Instruction * getTerminator() const LLVM_READONLY
Returns the terminator instruction if the block is well formed or null if the block is not well forme...
Definition BasicBlock.h:233
This class represents a no-op cast from one type to another.
static LLVM_ABI BlockAddress * lookup(const BasicBlock *BB)
Lookup an existing BlockAddress constant for the given BasicBlock.
Base class for all callable instructions (InvokeInst and CallInst) Holds everything related to callin...
bool isInlineAsm() const
Check if this call is an inline asm statement.
bool hasInAllocaArgument() const
Determine if there are is an inalloca argument.
OperandBundleUse getOperandBundleAt(unsigned Index) const
Return the operand bundle at a specific index.
Function * getCalledFunction() const
Returns the function called, or null if this is an indirect function invocation or the function signa...
bool doesNotAccessMemory(unsigned OpNo) const
bool hasFnAttr(Attribute::AttrKind Kind) const
Determine whether this call has the given attribute.
unsigned getNumOperandBundles() const
Return the number of operand bundles associated with this User.
CallingConv::ID getCallingConv() const
LLVM_ABI bool paramHasAttr(unsigned ArgNo, Attribute::AttrKind Kind) const
Determine whether the argument or parameter has the given attribute.
Attribute getParamAttr(unsigned ArgNo, Attribute::AttrKind Kind) const
Get the attribute of a given kind from a given arg.
iterator_range< bundle_op_iterator > bundle_op_infos()
Return the range [bundle_op_info_begin, bundle_op_info_end).
unsigned countOperandBundlesOfType(StringRef Name) const
Return the number of operand bundles with the tag Name attached to this instruction.
bool onlyReadsMemory(unsigned OpNo) const
Value * getCalledOperand() const
Type * getParamElementType(unsigned ArgNo) const
Extract the elementtype type for a parameter.
Value * getArgOperand(unsigned i) const
FunctionType * getFunctionType() const
LLVM_ABI Intrinsic::ID getIntrinsicID() const
Returns the intrinsic ID of the intrinsic called or Intrinsic::not_intrinsic if the called function i...
iterator_range< User::op_iterator > args()
Iteration adapter for range-for loops.
bool doesNotReturn() const
Determine if the call cannot return.
LLVM_ABI bool onlyAccessesArgMemory() const
Determine if the call can access memmory only using pointers based on its arguments.
unsigned arg_size() const
AttributeList getAttributes() const
Return the attributes for this call.
bool hasOperandBundles() const
Return true if this User has any operand bundles.
LLVM_ABI Function * getCaller()
Helper to get the caller (the parent function).
BasicBlock * getIndirectDest(unsigned i) const
unsigned getNumIndirectDests() const
Return the number of callbr indirect dest labels.
bool isMustTailCall() const
static LLVM_ABI bool castIsValid(Instruction::CastOps op, Type *SrcTy, Type *DstTy)
This method can be used to determine if a cast from SrcTy to DstTy using Opcode op is valid or not.
unsigned getNumHandlers() const
return the number of 'handlers' in this catchswitch instruction, except the default handler
Value * getParentPad() const
BasicBlock * getUnwindDest() const
handler_range handlers()
iteration adapter for range-for loops.
BasicBlock * getUnwindDest() const
bool isFPPredicate() const
Definition InstrTypes.h:782
bool isIntPredicate() const
Definition InstrTypes.h:783
static bool isIntPredicate(Predicate P)
Definition InstrTypes.h:776
Value * getCondition() const
bool isMinusOne() const
This function will return true iff every bit in this constant is set to true.
Definition Constants.h:231
bool isNegative() const
Definition Constants.h:214
bool isZero() const
This is just a convenience method to make client code smaller for a common code.
Definition Constants.h:219
unsigned getBitWidth() const
getBitWidth - Return the scalar bitwidth of this constant.
Definition Constants.h:162
uint64_t getZExtValue() const
Return the constant as a 64-bit unsigned integer value after it has been zero extended as appropriate...
Definition Constants.h:168
const APInt & getValue() const
Return the constant as an APInt value reference.
Definition Constants.h:159
Constant * getAddrDiscriminator() const
The address discriminator if any, or the null constant.
Definition Constants.h:1239
Constant * getPointer() const
The pointer that is signed in this ptrauth signed pointer.
Definition Constants.h:1226
ConstantInt * getKey() const
The Key ID, an i32 constant.
Definition Constants.h:1229
Constant * getDeactivationSymbol() const
Definition Constants.h:1248
ConstantInt * getDiscriminator() const
The integer discriminator, an i64 constant, or 0.
Definition Constants.h:1232
static LLVM_ABI bool isOrderedRanges(ArrayRef< ConstantRange > RangesRef)
This class represents a range of values.
const APInt & getLower() const
Return the lower value for this range.
const APInt & getUpper() const
Return the upper value for this range.
LLVM_ABI bool contains(const APInt &Val) const
Return true if the specified value is in the set.
uint32_t getBitWidth() const
Get the bit width of this ConstantRange.
static LLVM_ABI ConstantTokenNone * get(LLVMContext &Context)
Return the ConstantTokenNone.
LLVM_ABI bool isNullValue() const
Return true if this is the value that would be returned by getNullValue.
Definition Constants.cpp:74
LLVM_ABI std::optional< fp::ExceptionBehavior > getExceptionBehavior() const
LLVM_ABI std::optional< RoundingMode > getRoundingMode() const
LLVM_ABI unsigned getNonMetadataArgCount() const
DbgVariableFragmentInfo FragmentInfo
@ FixedPointBinary
Scale factor 2^Factor.
@ FixedPointDecimal
Scale factor 10^Factor.
@ FixedPointRational
Arbitrary rational scale factor.
DIGlobalVariable * getVariable() const
LLVM_ABI DISubprogram * getSubprogram() const
Get the subprogram for this scope.
DILocalScope * getScope() const
Get the local scope for this variable.
Metadata * getRawScope() const
Base class for scope-like contexts.
Subprogram description. Uses SubclassData1.
static const DIScope * getRawRetainedNodeScope(const MDNode *N)
Base class for template parameters.
Base class for variables.
Metadata * getRawType() const
Metadata * getRawScope() const
A parsed version of the target data layout string in and methods for querying it.
Definition DataLayout.h:64
Records a position in IR for a source label (DILabel).
Base class for non-instruction debug metadata records that have positions within IR.
LLVM_ABI void print(raw_ostream &O, bool IsForDebug=false) const
DebugLoc getDebugLoc() const
LLVM_ABI const BasicBlock * getParent() const
LLVM_ABI Function * getFunction()
Record of a variable value-assignment, aka a non instruction representation of the dbg....
LLVM_ABI Value * getVariableLocationOp(unsigned OpIdx) const
DIExpression * getExpression() const
DILocalVariable * getVariable() const
Metadata * getRawLocation() const
Returns the metadata operand for the first location description.
@ End
Marks the end of the concrete types.
@ Any
To indicate all LocationTypes in searches.
DIExpression * getAddressExpression() const
MDNode * getAsMDNode() const
Return this as a bar MDNode.
Definition DebugLoc.h:290
ValueT lookup(const_arg_type_t< KeyT > Val) const
lookup - Return the entry for the specified key, or a default constructed value if no such entry exis...
Definition DenseMap.h:205
iterator find(const_arg_type_t< KeyT > Val)
Definition DenseMap.h:178
bool empty() const
Definition DenseMap.h:109
std::pair< iterator, bool > insert(const std::pair< KeyT, ValueT > &KV)
Definition DenseMap.h:241
Concrete subclass of DominatorTreeBase that is used to compute a normal dominator tree.
Definition Dominators.h:159
This instruction extracts a single (scalar) element from a VectorType value.
static LLVM_ABI bool isValidOperands(const Value *Vec, const Value *Idx)
Return true if an extractelement instruction can be formed with the specified operands.
ArrayRef< unsigned > getIndices() const
static LLVM_ABI Type * getIndexedType(Type *Agg, ArrayRef< unsigned > Idxs)
Returns the type of the element that would be extracted with an extractvalue instruction with the spe...
This instruction compares its operands according to the predicate given to the constructor.
This class represents an extension of floating point types.
This class represents a cast from floating point to signed integer.
This class represents a cast from floating point to unsigned integer.
This class represents a truncation of floating point types.
AtomicOrdering getOrdering() const
Returns the ordering constraint of this fence instruction.
Value * getParentPad() const
Convenience accessors.
FunctionPass class - This class is used to implement most global optimizations.
Definition Pass.h:314
Type * getReturnType() const
FunctionType * getFunctionType() const
Returns the FunctionType for me.
Definition Function.h:211
Intrinsic::ID getIntrinsicID() const LLVM_READONLY
getIntrinsicID - This method returns the ID number of the specified function, or Intrinsic::not_intri...
Definition Function.h:246
DISubprogram * getSubprogram() const
Get the attached subprogram.
CallingConv::ID getCallingConv() const
getCallingConv()/setCallingConv(CC) - These method get and set the calling convention of this functio...
Definition Function.h:272
bool hasPersonalityFn() const
Check whether this function has a personality function.
Definition Function.h:905
const Function & getFunction() const
Definition Function.h:166
const std::string & getGC() const
Definition Function.cpp:818
Type * getReturnType() const
Returns the type of the ret val.
Definition Function.h:216
bool isVarArg() const
isVarArg - Return true if this function takes a variable number of arguments.
Definition Function.h:229
LLVM_ABI Value * getBasePtr() const
LLVM_ABI Value * getDerivedPtr() const
static LLVM_ABI Type * getIndexedType(Type *Ty, ArrayRef< Value * > IdxList)
Returns the result type of a getelementptr with the given source element type and indexes.
static bool isValidLinkage(LinkageTypes L)
Definition GlobalAlias.h:98
const Constant * getAliasee() const
Definition GlobalAlias.h:87
LLVM_ABI const Function * getResolverFunction() const
Definition Globals.cpp:680
static bool isValidLinkage(LinkageTypes L)
Definition GlobalIFunc.h:86
const Constant * getResolver() const
Definition GlobalIFunc.h:73
LLVM_ABI void getAllMetadata(SmallVectorImpl< std::pair< unsigned, MDNode * > > &MDs) const
Appends all metadata attached to this value to MDs, sorting by KindID.
bool hasComdat() const
MDNode * getMetadata(unsigned KindID) const
Get the current metadata attachments for the given kind, if any.
Definition Value.h:577
bool hasExternalLinkage() const
bool isDSOLocal() const
bool isImplicitDSOLocal() const
LLVM_ABI bool isDeclaration() const
Return true if the primary definition of this global value is outside of the current translation unit...
Definition Globals.cpp:329
bool hasValidDeclarationLinkage() const
LinkageTypes getLinkage() const
bool hasDefaultVisibility() const
bool hasPrivateLinkage() const
bool hasHiddenVisibility() const
bool hasExternalWeakLinkage() const
bool hasDLLImportStorageClass() const
bool hasDLLExportStorageClass() const
bool isDeclarationForLinker() const
unsigned getAddressSpace() const
Module * getParent()
Get the module that this global value is contained inside of...
PointerType * getType() const
Global values are always pointers.
LLVM_ABI bool isInterposable() const
Return true if this global's definition can be substituted with an arbitrary definition at link time ...
Definition Globals.cpp:108
bool hasComdat() const
bool hasCommonLinkage() const
bool hasGlobalUnnamedAddr() const
bool hasAppendingLinkage() const
bool hasAvailableExternallyLinkage() const
Type * getValueType() const
const Constant * getInitializer() const
getInitializer - Return the initializer for this global variable.
bool hasInitializer() const
Definitions have initializers, declarations don't.
MaybeAlign getAlign() const
Returns the alignment of the given variable.
LLVM_ABI uint64_t getGlobalSize(const DataLayout &DL) const
Get the size of this global variable in bytes.
Definition Globals.cpp:561
bool isConstant() const
If the value is a global constant, its value is immutable throughout the runtime execution of the pro...
bool hasDefinitiveInitializer() const
hasDefinitiveInitializer - Whether the global variable has an initializer, and any other instances of...
This instruction compares its operands according to the predicate given to the constructor.
BasicBlock * getDestination(unsigned i)
Return the specified destination.
unsigned getNumDestinations() const
return the number of possible destinations in this indirectbr instruction.
unsigned getNumSuccessors() const
This instruction inserts a single (scalar) element into a VectorType value.
static LLVM_ABI bool isValidOperands(const Value *Vec, const Value *NewElt, const Value *Idx)
Return true if an insertelement instruction can be formed with the specified operands.
ArrayRef< unsigned > getIndices() const
Base class for instruction visitors.
Definition InstVisitor.h:78
void visit(Iterator Start, Iterator End)
Definition InstVisitor.h:87
LLVM_ABI unsigned getNumSuccessors() const LLVM_READONLY
Return the number of successors that this instruction has.
const DebugLoc & getDebugLoc() const
Return the debug location for this node as a DebugLoc.
LLVM_ABI bool isAtomic() const LLVM_READONLY
Return true if this instruction has an AtomicOrdering of unordered or higher.
LLVM_ABI const Function * getFunction() const
Return the function this instruction belongs to.
This class represents a cast from an integer to a pointer.
static LLVM_ABI bool mayLowerToFunctionCall(Intrinsic::ID IID)
Check if the intrinsic might lower into a regular function call in the course of IR transformations.
Intrinsic::ID getIntrinsicID() const
Return the intrinsic ID of this intrinsic.
This is an important class for using LLVM in a threaded context.
Definition LLVMContext.h:68
bool isCleanup() const
Return 'true' if this landingpad instruction is a cleanup.
unsigned getNumClauses() const
Get the number of clauses for this landing pad.
bool isCatch(unsigned Idx) const
Return 'true' if the clause and index Idx is a catch clause.
bool isFilter(unsigned Idx) const
Return 'true' if the clause and index Idx is a filter clause.
Constant * getClause(unsigned Idx) const
Get the value of the clause at index Idx.
AtomicOrdering getOrdering() const
Returns the ordering constraint of this load instruction.
SyncScope::ID getSyncScopeID() const
Returns the synchronization scope ID of this load instruction.
Align getAlign() const
Return the alignment of the access that is being performed.
Metadata node.
Definition Metadata.h:1080
const MDOperand & getOperand(unsigned I) const
Definition Metadata.h:1444
bool isTemporary() const
Definition Metadata.h:1264
ArrayRef< MDOperand > operands() const
Definition Metadata.h:1442
unsigned getNumOperands() const
Return number of MDNode operands.
Definition Metadata.h:1450
bool isDistinct() const
Definition Metadata.h:1263
bool isResolved() const
Check if node is fully resolved.
Definition Metadata.h:1260
LLVMContext & getContext() const
Definition Metadata.h:1244
bool equalsStr(StringRef Str) const
Definition Metadata.h:924
Metadata * get() const
Definition Metadata.h:931
LLVM_ABI StringRef getString() const
Definition Metadata.cpp:632
static LLVM_ABI bool isTagMD(const Metadata *MD)
This class implements a map that also provides access to all stored values in a deterministic order.
Definition MapVector.h:36
static LLVM_ABI MetadataAsValue * getIfExists(LLVMContext &Context, Metadata *MD)
Definition Metadata.cpp:118
Metadata * getMetadata() const
Definition Metadata.h:202
Root of the metadata hierarchy.
Definition Metadata.h:64
LLVM_ABI void print(raw_ostream &OS, const Module *M=nullptr, bool IsForDebug=false) const
Print.
unsigned getMetadataID() const
Definition Metadata.h:104
Manage lifetime of a slot tracker for printing IR.
A Module instance is used to store all the information related to an LLVM module.
Definition Module.h:67
LLVM_ABI StringRef getName() const
LLVM_ABI void print(raw_ostream &ROS, bool IsForDebug=false) const
LLVM_ABI unsigned getNumOperands() const
iterator_range< op_iterator > operands()
Definition Metadata.h:1856
op_range incoming_values()
A set of analyses that are preserved following a run of a transformation pass.
Definition Analysis.h:112
static PreservedAnalyses all()
Construct a special preserved set that preserves all passes.
Definition Analysis.h:118
This class represents a cast from a pointer to an address (non-capturing ptrtoint).
This class represents a cast from a pointer to an integer.
Value * getValue() const
Convenience accessor.
Value * getReturnValue() const
Convenience accessor. Returns null if there is no return value.
This class represents a sign extension of integer types.
This class represents a cast from signed integer to floating point.
static LLVM_ABI const char * areInvalidOperands(Value *Cond, Value *True, Value *False)
Return a string if the specified operands are invalid for a select operation, otherwise return null.
This instruction constructs a fixed permutation of two input vectors.
static LLVM_ABI bool isValidOperands(const Value *V1, const Value *V2, const Value *Mask)
Return true if a shufflevector instruction can be formed with the specified operands.
static LLVM_ABI void getShuffleMask(const Constant *Mask, SmallVectorImpl< int > &Result)
Convert the input shuffle mask operand to a vector of integers.
A templated base class for SmallPtrSet which provides the typesafe interface that is common across al...
size_type count(ConstPtrType Ptr) const
count - Return 1 if the specified pointer is in the set, 0 otherwise.
void insert_range(Range &&R)
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
SmallPtrSet - This class implements a set which is optimized for holding SmallSize or less elements.
void reserve(size_type N)
iterator insert(iterator I, T &&Elt)
void resize(size_type N)
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
std::pair< StringRef, StringRef > split(char Separator) const
Split into two substrings around the first occurrence of a separator character.
Definition StringRef.h:730
static constexpr size_t npos
Definition StringRef.h:57
bool getAsInteger(unsigned Radix, T &Result) const
Parse the current string as an integer of the specified radix.
Definition StringRef.h:490
bool starts_with(StringRef Prefix) const
Check if this string starts with the given Prefix.
Definition StringRef.h:258
constexpr bool empty() const
empty - Check if the string is empty.
Definition StringRef.h:140
unsigned getNumElements() const
Random access to the elements.
LLVM_ABI Type * getTypeAtIndex(const Value *V) const
Given an index value into the type, return the type of the element.
Definition Type.cpp:788
LLVM_ABI bool isScalableTy(SmallPtrSetImpl< const Type * > &Visited) const
Returns true if this struct contains a scalable vector.
Definition Type.cpp:510
Verify that the TBAA Metadatas are valid.
Definition Verifier.h:40
LLVM_ABI bool visitTBAAMetadata(const Instruction *I, const MDNode *MD)
Visit an instruction, or a TBAA node itself as part of a metadata, and return true if it is valid,...
unsigned size() const
Triple - Helper class for working with autoconf configuration names.
Definition Triple.h:47
This class represents a truncation of integer types.
Twine - A lightweight data structure for efficiently representing the concatenation of temporary valu...
Definition Twine.h:82
The instances of the Type class are immutable: once they are created, they are never changed.
Definition Type.h:46
bool isByteTy() const
True if this is an instance of ByteType.
Definition Type.h:242
bool isVectorTy() const
True if this is an instance of VectorType.
Definition Type.h:290
LLVM_ABI bool containsNonGlobalTargetExtType(SmallPtrSetImpl< const Type * > &Visited) const
Return true if this type is or contains a target extension type that disallows being used as a global...
Definition Type.cpp:78
bool isArrayTy() const
True if this is an instance of ArrayType.
Definition Type.h:281
LLVM_ABI bool containsNonLocalTargetExtType(SmallPtrSetImpl< const Type * > &Visited) const
Return true if this type is or contains a target extension type that disallows being used as a local.
Definition Type.cpp:94
LLVM_ABI bool isScalableTy(SmallPtrSetImpl< const Type * > &Visited) const
Return true if this is a type whose size is a known multiple of vscale.
Definition Type.cpp:65
bool isLabelTy() const
Return true if this is 'label'.
Definition Type.h:230
bool isIntOrIntVectorTy() const
Return true if this is an integer type or a vector of integer types.
Definition Type.h:263
bool isPointerTy() const
True if this is an instance of PointerType.
Definition Type.h:284
LLVM_ABI bool isTokenLikeTy() const
Returns true if this is 'token' or a token-like target type.s.
Definition Type.cpp:1136
LLVM_ABI unsigned getPointerAddressSpace() const
Get the address space of this pointer or pointer vector type.
bool isSingleValueType() const
Return true if the type is a valid type for a register in codegen.
Definition Type.h:313
LLVM_ABI bool canLosslesslyBitCastTo(Type *Ty) const
Return true if this type could be converted with a lossless BitCast to type 'Ty'.
Definition Type.cpp:157
Type * getScalarType() const
If this is a vector type, return the element type, otherwise return 'this'.
Definition Type.h:370
bool isSized(SmallPtrSetImpl< Type * > *Visited=nullptr) const
Return true if it makes sense to take the size of this type.
Definition Type.h:328
LLVM_ABI unsigned getScalarSizeInBits() const LLVM_READONLY
If this is a vector type, return the getPrimitiveSizeInBits value for the element type.
Definition Type.cpp:236
bool isFloatingPointTy() const
Return true if this is one of the floating-point types.
Definition Type.h:186
bool isPtrOrPtrVectorTy() const
Return true if this is a pointer type or a vector of pointer types.
Definition Type.h:287
bool isIntOrPtrTy() const
Return true if this is an integer type or a pointer type.
Definition Type.h:272
bool isIntegerTy() const
True if this is an instance of IntegerType.
Definition Type.h:257
bool isFPOrFPVectorTy() const
Return true if this is a FP type or a vector of FP.
Definition Type.h:227
bool isVoidTy() const
Return true if this is 'void'.
Definition Type.h:141
bool isMetadataTy() const
Return true if this is 'metadata'.
Definition Type.h:233
This class represents a cast unsigned integer to floating point.
op_range operands()
Definition User.h:267
Value * getOperand(unsigned i) const
Definition User.h:207
unsigned getNumOperands() const
Definition User.h:229
This class represents the va_arg llvm instruction, which returns an argument of the specified type gi...
Value * getValue() const
Definition Metadata.h:499
LLVM Value Representation.
Definition Value.h:75
iterator_range< user_iterator > materialized_users()
Definition Value.h:421
Type * getType() const
All values are typed, get the type of this value.
Definition Value.h:256
LLVM_ABI const Value * stripPointerCastsAndAliases() const
Strip off pointer casts, all-zero GEPs, address space casts, and aliases.
Definition Value.cpp:717
LLVMContext & getContext() const
All values hold a context through their type.
Definition Value.h:259
LLVM_ABI const Value * stripInBoundsOffsets(function_ref< void(const Value *)> Func=[](const Value *) {}) const
Strip off pointer casts and inbounds GEPs.
Definition Value.cpp:824
iterator_range< user_iterator > users()
Definition Value.h:427
bool materialized_use_empty() const
Definition Value.h:352
LLVM_ABI const Value * stripPointerCasts() const
Strip off pointer casts, all-zero GEPs and address space casts.
Definition Value.cpp:713
bool hasName() const
Definition Value.h:262
LLVM_ABI StringRef getName() const
Return a constant reference to the value's name.
Definition Value.cpp:322
Check a module for errors, and report separate error states for IR and debug info errors.
Definition Verifier.h:109
LLVM_ABI Result run(Module &M, ModuleAnalysisManager &)
LLVM_ABI PreservedAnalyses run(Module &M, ModuleAnalysisManager &AM)
This class represents zero extension of integer types.
constexpr bool isNonZero() const
Definition TypeSize.h:155
constexpr bool isScalable() const
Returns whether the quantity is scaled by a runtime quantity (vscale).
Definition TypeSize.h:168
constexpr ScalarTy getKnownMinValue() const
Returns the minimum value this quantity can represent.
Definition TypeSize.h:165
An efficient, type-erasing, non-owning reference to a callable.
const ParentTy * getParent() const
Definition ilist_node.h:34
NodeTy * getNextNode()
Get the next node, or nullptr for the list tail.
Definition ilist_node.h:348
This class implements an extremely fast bulk output stream that can only output to a stream.
Definition raw_ostream.h:53
CallInst * Call
This file contains the declaration of the Comdat class, which represents a single COMDAT in LLVM.
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
@ FLAT_ADDRESS
Address space for flat memory.
@ GLOBAL_ADDRESS
Address space for global memory (RAT0, VTX0).
@ PRIVATE_ADDRESS
Address space for private memory.
constexpr char Align[]
Key for Kernel::Arg::Metadata::mAlign.
constexpr char Args[]
Key for Kernel::Metadata::mArgs.
constexpr char Attrs[]
Key for Kernel::Metadata::mAttrs.
bool isFlatGlobalAddrSpace(unsigned AS)
constexpr std::underlying_type_t< E > Mask()
Get a bitmask with 1s in all places up to the high-order bit of E's largest value.
@ Entry
Definition COFF.h:862
unsigned ID
LLVM IR allows to use arbitrary numbers as calling convention identifiers.
Definition CallingConv.h:24
@ C
The default llvm calling convention, compatible with C.
Definition CallingConv.h:34
@ BasicBlock
Various leaf nodes.
Definition ISDOpcodes.h:81
LLVM_ABI MatchIntrinsicTypesResult matchIntrinsicSignature(FunctionType *FTy, ArrayRef< IITDescriptor > &Infos, SmallVectorImpl< Type * > &ArgTys)
Match the specified function type with the type constraints specified by the .td file.
LLVM_ABI void getIntrinsicInfoTableEntries(ID id, SmallVectorImpl< IITDescriptor > &T)
Return the IIT table descriptor for the specified intrinsic into an array of IITDescriptors.
@ MatchIntrinsicTypes_NoMatchRet
Definition Intrinsics.h:258
@ MatchIntrinsicTypes_NoMatchArg
Definition Intrinsics.h:259
LLVM_ABI bool hasConstrainedFPRoundingModeOperand(ID QID)
Returns true if the intrinsic ID is for one of the "ConstrainedFloating-Point Intrinsics" that take r...
LLVM_ABI StringRef getName(ID id)
Return the LLVM name for an intrinsic, such as "llvm.ppc.altivec.lvx".
static const int NoAliasScopeDeclScopeArg
Definition Intrinsics.h:41
LLVM_ABI bool matchIntrinsicVarArg(bool isVarArg, ArrayRef< IITDescriptor > &Infos)
Verify if the intrinsic has variable arguments.
std::variant< std::monostate, Loc::Single, Loc::Multi, Loc::MMI, Loc::EntryValue > Variant
Alias for the std::variant specialization base class of DbgVariable.
Definition DwarfDebug.h:190
Flag
These should be considered private to the implementation of the MCInstrDesc class.
@ System
Synchronized with respect to all concurrently executing threads.
Definition LLVMContext.h:58
LLVM_ABI std::optional< VFInfo > tryDemangleForVFABI(StringRef MangledName, const FunctionType *FTy)
Function to construct a VFInfo out of a mangled names in the following format:
@ CE
Windows NT (Windows on ARM)
Definition MCAsmInfo.h:48
LLVM_ABI AssignmentInstRange getAssignmentInsts(DIAssignID *ID)
Return a range of instructions (typically just one) that have ID as an attachment.
initializer< Ty > init(const Ty &Val)
@ DW_MACINFO_undef
Definition Dwarf.h:818
@ DW_MACINFO_start_file
Definition Dwarf.h:819
@ DW_MACINFO_define
Definition Dwarf.h:817
std::enable_if_t< detail::IsValidPointer< X, Y >::value, X * > dyn_extract_or_null(Y &&MD)
Extract a Value from Metadata, if any, allowing null.
Definition Metadata.h:709
std::enable_if_t< detail::IsValidPointer< X, Y >::value, X * > dyn_extract(Y &&MD)
Extract a Value from Metadata, if any.
Definition Metadata.h:696
std::enable_if_t< detail::IsValidPointer< X, Y >::value, X * > extract(Y &&MD)
Extract a Value from Metadata.
Definition Metadata.h:668
@ User
could "use" a pointer
NodeAddr< UseNode * > Use
Definition RDFGraph.h:385
NodeAddr< NodeBase * > Node
Definition RDFGraph.h:381
friend class Instruction
Iterator for Instructions in a `BasicBlock.
Definition BasicBlock.h:73
This is an optimization pass for GlobalISel generic memory operations.
Definition Types.h:26
auto drop_begin(T &&RangeOrContainer, size_t N=1)
Return a range covering RangeOrContainer with the first N elements excluded.
Definition STLExtras.h:316
@ Low
Lower the current thread's priority such that it does not affect foreground tasks significantly.
Definition Threading.h:280
@ Offset
Definition DWP.cpp:532
FunctionAddr VTableAddr Value
Definition InstrProf.h:137
bool all_of(R &&range, UnaryPredicate P)
Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly.
Definition STLExtras.h:1739
LLVM_ABI bool canInstructionHaveMMRAs(const Instruction &I)
detail::zippy< detail::zip_first, T, U, Args... > zip_equal(T &&t, U &&u, Args &&...args)
zip iterator that assumes that all iteratees have the same length.
Definition STLExtras.h:841
LLVM_ABI unsigned getBranchWeightOffset(const MDNode *ProfileData)
Return the offset to the first branch weight data.
constexpr bool isInt(int64_t x)
Checks if an integer fits into the given bit width.
Definition MathExtras.h:165
auto enumerate(FirstRange &&First, RestRanges &&...Rest)
Given two or more input ranges, returns a new range whose values are tuples (A, B,...
Definition STLExtras.h:2554
decltype(auto) dyn_cast(const From &Val)
dyn_cast<X> - Return the argument parameter cast to the specified type.
Definition Casting.h:643
LLVM_ABI bool verifyFunction(const Function &F, raw_ostream *OS=nullptr)
Check a function for errors, useful for use when debugging a pass.
AllocFnKind
Definition Attributes.h:53
testing::Matcher< const detail::ErrorHolder & > Failed()
Definition Error.h:198
iterator_range< T > make_range(T x, T y)
Convenience function for iterating over sub-ranges.
void append_range(Container &C, Range &&R)
Wrapper function to append range R to container C.
Definition STLExtras.h:2208
LLVM_ABI DenseMap< BasicBlock *, ColorVector > colorEHFunclets(Function &F)
If an EH funclet personality is in use (see isFuncletEHPersonality), this will recompute which blocks...
constexpr bool isUIntN(unsigned N, uint64_t x)
Checks if an unsigned integer fits into the given (dynamic) bit width.
Definition MathExtras.h:243
constexpr bool isPowerOf2_64(uint64_t Value)
Return true if the argument is a power of two > 0 (64 bit edition.)
Definition MathExtras.h:284
gep_type_iterator gep_type_end(const User *GEP)
bool isa_and_nonnull(const Y &Val)
Definition Casting.h:676
Op::Description Desc
bool isScopedEHPersonality(EHPersonality Pers)
Returns true if this personality uses scope-style EH IR instructions: catchswitch,...
auto dyn_cast_or_null(const Y &Val)
Definition Casting.h:753
GenericConvergenceVerifier< SSAContext > ConvergenceVerifier
constexpr bool isPowerOf2_32(uint32_t Value)
Return true if the argument is a power of two > 0.
Definition MathExtras.h:279
bool isModSet(const ModRefInfo MRI)
Definition ModRef.h:49
void sort(IteratorTy Start, IteratorTy End)
Definition STLExtras.h:1636
LLVM_ABI raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
Definition Debug.cpp:207
LLVM_ABI void report_fatal_error(Error Err, bool gen_crash_diag=true)
Definition Error.cpp:163
generic_gep_type_iterator<> gep_type_iterator
FunctionAddr VTableAddr Count
Definition InstrProf.h:139
LLVM_ABI EHPersonality classifyEHPersonality(const Value *Pers)
See if the given exception handling personality function is one that we understand.
iterator_range< SplittingIterator > split(StringRef Str, StringRef Separator)
Split the specified string over a separator and return a range-compatible iterable over its partition...
class LLVM_GSL_OWNER SmallVector
Forward declaration of SmallVector so that calculateSmallVectorDefaultInlinedElements can reference s...
bool isa(const From &Val)
isa<X> - Return true if the parameter to the template is an instance of one of the template type argu...
Definition Casting.h:547
LLVM_ABI bool isValueProfileMD(const MDNode *ProfileData)
Checks if an MDNode contains value profiling Metadata.
LLVM_ABI raw_fd_ostream & errs()
This returns a reference to a raw_ostream for standard error.
LLVM_ABI unsigned getNumBranchWeights(const MDNode &ProfileData)
AtomicOrdering
Atomic ordering for LLVM's memory model.
@ First
Helpers to iterate all locations in the MemoryEffectsBase class.
Definition ModRef.h:74
LLVM_ABI FunctionPass * createVerifierPass(bool FatalErrors=true)
FunctionAddr VTableAddr Next
Definition InstrProf.h:141
DWARFExpression::Operation Op
ArrayRef(const T &OneElt) -> ArrayRef< T >
constexpr unsigned BitWidth
TinyPtrVector< BasicBlock * > ColorVector
LLVM_ABI const char * LLVMLoopEstimatedTripCount
Profile-based loop metadata that should be accessed only by using llvm::getLoopEstimatedTripCount and...
decltype(auto) cast(const From &Val)
cast<X> - Return the argument parameter cast to the specified type.
Definition Casting.h:559
LLVM_ABI std::optional< RoundingMode > convertStrToRoundingMode(StringRef)
Returns a valid RoundingMode enumerator when given a string that is valid as input in constrained int...
Definition FPEnv.cpp:25
gep_type_iterator gep_type_begin(const User *GEP)
LLVM_ABI std::unique_ptr< GCStrategy > getGCStrategy(const StringRef Name)
Lookup the GCStrategy object associated with the given gc name.
auto predecessors(const MachineBasicBlock *BB)
bool is_contained(R &&Range, const E &Element)
Returns true if Element is found in Range.
Definition STLExtras.h:1947
bool pred_empty(const BasicBlock *BB)
Definition CFG.h:119
bool isHexDigit(char C)
Checks if character C is a hexadecimal numeric character.
AnalysisManager< Function > FunctionAnalysisManager
Convenience typedef for the Function analysis manager.
constexpr bool isCallableCC(CallingConv::ID CC)
LLVM_ABI bool verifyModule(const Module &M, raw_ostream *OS=nullptr, bool *BrokenDebugInfo=nullptr)
Check a module for errors.
AnalysisManager< Module > ModuleAnalysisManager
Convenience typedef for the Module analysis manager.
Definition MIRParser.h:39
#define N
constexpr uint64_t value() const
This is a hole in the type system and should not be abused.
Definition Alignment.h:77
A special type used by analysis passes to provide an address that identifies that particular analysis...
Definition Analysis.h:29
static LLVM_ABI const char * SyntheticFunctionEntryCount
static LLVM_ABI const char * UnknownBranchWeightsMarker
static LLVM_ABI const char * ValueProfile
static LLVM_ABI const char * FunctionEntryCount
static LLVM_ABI const char * BranchWeights
uint32_t getTagID() const
Return the tag of this operand bundle as an integer.
ArrayRef< Use > Inputs
void DebugInfoCheckFailed(const Twine &Message)
A debug info check failed.
Definition Verifier.cpp:307
VerifierSupport(raw_ostream *OS, const Module &M)
Definition Verifier.cpp:156
bool Broken
Track the brokenness of the module while recursively visiting.
Definition Verifier.cpp:150
void CheckFailed(const Twine &Message, const T1 &V1, const Ts &... Vs)
A check failed (with values to print).
Definition Verifier.cpp:300
bool BrokenDebugInfo
Broken debug info can be "recovered" from by stripping the debug info.
Definition Verifier.cpp:152
LLVMContext & Context
Definition Verifier.cpp:147
bool TreatBrokenDebugInfoAsError
Whether to treat broken debug info as an error.
Definition Verifier.cpp:154
void CheckFailed(const Twine &Message)
A check failed, so printout out the condition and the message.
Definition Verifier.cpp:289
const Module & M
Definition Verifier.cpp:143
const DataLayout & DL
Definition Verifier.cpp:146
void DebugInfoCheckFailed(const Twine &Message, const T1 &V1, const Ts &... Vs)
A debug info check failed (with values to print).
Definition Verifier.cpp:316
const Triple & TT
Definition Verifier.cpp:145
ModuleSlotTracker MST
Definition Verifier.cpp:144