LLVM 23.0.0git
Verifier.cpp
Go to the documentation of this file.
1//===-- Verifier.cpp - Implement the Module Verifier -----------------------==//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file defines the function verifier interface, that can be used for some
10// basic correctness checking of input to the system.
11//
12// Note that this does not provide full `Java style' security and verifications,
13// instead it just tries to ensure that code is well-formed.
14//
15// * Both of a binary operator's parameters are of the same type
16// * Verify that the indices of mem access instructions match other operands
17// * Verify that arithmetic and other things are only performed on first-class
18// types. Verify that shifts & logicals only happen on integrals f.e.
19// * All of the constants in a switch statement are of the correct type
20// * The code is in valid SSA form
21// * It should be illegal to put a label into any other type (like a structure)
22// or to return one. [except constant arrays!]
23// * Only phi nodes can be self referential: 'add i32 %0, %0 ; <int>:0' is bad
24// * PHI nodes must have an entry for each predecessor, with no extras.
25// * PHI nodes must be the first thing in a basic block, all grouped together
26// * All basic blocks should only end with terminator insts, not contain them
27// * The entry node to a function must not have predecessors
28// * All Instructions must be embedded into a basic block
29// * Functions cannot take a void-typed parameter
30// * Verify that a function's argument list agrees with it's declared type.
31// * It is illegal to specify a name for a void value.
32// * It is illegal to have a internal global value with no initializer
33// * It is illegal to have a ret instruction that returns a value that does not
34// agree with the function return value type.
35// * Function call argument types match the function prototype
36// * A landing pad is defined by a landingpad instruction, and can be jumped to
37// only by the unwind edge of an invoke instruction.
38// * A landingpad instruction must be the first non-PHI instruction in the
39// block.
40// * Landingpad instructions must be in a function with a personality function.
41// * Convergence control intrinsics are introduced in ConvergentOperations.rst.
42// The applied restrictions are too numerous to list here.
43// * The convergence entry intrinsic and the loop heart must be the first
44// non-PHI instruction in their respective block. This does not conflict with
45// the landing pads, since these two kinds cannot occur in the same block.
46// * All other things that are tested by asserts spread about the code...
47//
48//===----------------------------------------------------------------------===//
49
50#include "llvm/IR/Verifier.h"
51#include "llvm/ADT/APFloat.h"
52#include "llvm/ADT/APInt.h"
53#include "llvm/ADT/ArrayRef.h"
54#include "llvm/ADT/DenseMap.h"
55#include "llvm/ADT/MapVector.h"
56#include "llvm/ADT/STLExtras.h"
60#include "llvm/ADT/StringRef.h"
61#include "llvm/ADT/Twine.h"
63#include "llvm/IR/Argument.h"
65#include "llvm/IR/Attributes.h"
66#include "llvm/IR/BasicBlock.h"
67#include "llvm/IR/CFG.h"
68#include "llvm/IR/CallingConv.h"
69#include "llvm/IR/Comdat.h"
70#include "llvm/IR/Constant.h"
73#include "llvm/IR/Constants.h"
75#include "llvm/IR/DataLayout.h"
76#include "llvm/IR/DebugInfo.h"
78#include "llvm/IR/DebugLoc.h"
80#include "llvm/IR/Dominators.h"
82#include "llvm/IR/FPEnv.h"
83#include "llvm/IR/Function.h"
84#include "llvm/IR/GCStrategy.h"
85#include "llvm/IR/GlobalAlias.h"
86#include "llvm/IR/GlobalValue.h"
88#include "llvm/IR/InlineAsm.h"
89#include "llvm/IR/InstVisitor.h"
90#include "llvm/IR/InstrTypes.h"
91#include "llvm/IR/Instruction.h"
94#include "llvm/IR/Intrinsics.h"
95#include "llvm/IR/IntrinsicsAArch64.h"
96#include "llvm/IR/IntrinsicsAMDGPU.h"
97#include "llvm/IR/IntrinsicsARM.h"
98#include "llvm/IR/IntrinsicsNVPTX.h"
99#include "llvm/IR/IntrinsicsWebAssembly.h"
100#include "llvm/IR/LLVMContext.h"
102#include "llvm/IR/Metadata.h"
103#include "llvm/IR/Module.h"
105#include "llvm/IR/PassManager.h"
107#include "llvm/IR/Statepoint.h"
108#include "llvm/IR/Type.h"
109#include "llvm/IR/Use.h"
110#include "llvm/IR/User.h"
112#include "llvm/IR/Value.h"
114#include "llvm/Pass.h"
118#include "llvm/Support/Casting.h"
122#include "llvm/Support/ModRef.h"
125#include <algorithm>
126#include <cassert>
127#include <cstdint>
128#include <memory>
129#include <optional>
130#include <string>
131#include <utility>
132
133using namespace llvm;
134
136 "verify-noalias-scope-decl-dom", cl::Hidden, cl::init(false),
137 cl::desc("Ensure that llvm.experimental.noalias.scope.decl for identical "
138 "scopes are not dominating"));
139
142 const Module &M;
144 const Triple &TT;
147
148 /// Track the brokenness of the module while recursively visiting.
149 bool Broken = false;
150 /// Broken debug info can be "recovered" from by stripping the debug info.
151 bool BrokenDebugInfo = false;
152 /// Whether to treat broken debug info as an error.
154
156 : OS(OS), M(M), MST(&M), TT(M.getTargetTriple()), DL(M.getDataLayout()),
157 Context(M.getContext()) {}
158
159private:
160 void Write(const Module *M) {
161 *OS << "; ModuleID = '" << M->getModuleIdentifier() << "'\n";
162 }
163
164 void Write(const Value *V) {
165 if (V)
166 Write(*V);
167 }
168
169 void Write(const Value &V) {
170 if (isa<Instruction>(V)) {
171 V.print(*OS, MST);
172 *OS << '\n';
173 } else {
174 V.printAsOperand(*OS, true, MST);
175 *OS << '\n';
176 }
177 }
178
179 void Write(const DbgRecord *DR) {
180 if (DR) {
181 DR->print(*OS, MST, false);
182 *OS << '\n';
183 }
184 }
185
187 switch (Type) {
189 *OS << "value";
190 break;
192 *OS << "declare";
193 break;
195 *OS << "declare_value";
196 break;
198 *OS << "assign";
199 break;
201 *OS << "end";
202 break;
204 *OS << "any";
205 break;
206 };
207 }
208
209 void Write(const Metadata *MD) {
210 if (!MD)
211 return;
212 MD->print(*OS, MST, &M);
213 *OS << '\n';
214 }
215
216 template <class T> void Write(const MDTupleTypedArrayWrapper<T> &MD) {
217 Write(MD.get());
218 }
219
220 void Write(const NamedMDNode *NMD) {
221 if (!NMD)
222 return;
223 NMD->print(*OS, MST);
224 *OS << '\n';
225 }
226
227 void Write(Type *T) {
228 if (!T)
229 return;
230 *OS << ' ' << *T;
231 }
232
233 void Write(const Comdat *C) {
234 if (!C)
235 return;
236 *OS << *C;
237 }
238
239 void Write(const APInt *AI) {
240 if (!AI)
241 return;
242 *OS << *AI << '\n';
243 }
244
245 void Write(const unsigned i) { *OS << i << '\n'; }
246
247 // NOLINTNEXTLINE(readability-identifier-naming)
248 void Write(const Attribute *A) {
249 if (!A)
250 return;
251 *OS << A->getAsString() << '\n';
252 }
253
254 // NOLINTNEXTLINE(readability-identifier-naming)
255 void Write(const AttributeSet *AS) {
256 if (!AS)
257 return;
258 *OS << AS->getAsString() << '\n';
259 }
260
261 // NOLINTNEXTLINE(readability-identifier-naming)
262 void Write(const AttributeList *AL) {
263 if (!AL)
264 return;
265 AL->print(*OS);
266 }
267
268 void Write(Printable P) { *OS << P << '\n'; }
269
270 template <typename T> void Write(ArrayRef<T> Vs) {
271 for (const T &V : Vs)
272 Write(V);
273 }
274
275 template <typename T1, typename... Ts>
276 void WriteTs(const T1 &V1, const Ts &... Vs) {
277 Write(V1);
278 WriteTs(Vs...);
279 }
280
281 template <typename... Ts> void WriteTs() {}
282
283public:
284 /// A check failed, so printout out the condition and the message.
285 ///
286 /// This provides a nice place to put a breakpoint if you want to see why
287 /// something is not correct.
288 void CheckFailed(const Twine &Message) {
289 if (OS)
290 *OS << Message << '\n';
291 Broken = true;
292 }
293
294 /// A check failed (with values to print).
295 ///
296 /// This calls the Message-only version so that the above is easier to set a
297 /// breakpoint on.
298 template <typename T1, typename... Ts>
299 void CheckFailed(const Twine &Message, const T1 &V1, const Ts &... Vs) {
300 CheckFailed(Message);
301 if (OS)
302 WriteTs(V1, Vs...);
303 }
304
305 /// A debug info check failed.
306 void DebugInfoCheckFailed(const Twine &Message) {
307 if (OS)
308 *OS << Message << '\n';
310 BrokenDebugInfo = true;
311 }
312
313 /// A debug info check failed (with values to print).
314 template <typename T1, typename... Ts>
315 void DebugInfoCheckFailed(const Twine &Message, const T1 &V1,
316 const Ts &... Vs) {
317 DebugInfoCheckFailed(Message);
318 if (OS)
319 WriteTs(V1, Vs...);
320 }
321};
322
323namespace {
324
325class Verifier : public InstVisitor<Verifier>, VerifierSupport {
326 friend class InstVisitor<Verifier>;
327 DominatorTree DT;
328
329 /// When verifying a basic block, keep track of all of the
330 /// instructions we have seen so far.
331 ///
332 /// This allows us to do efficient dominance checks for the case when an
333 /// instruction has an operand that is an instruction in the same block.
334 SmallPtrSet<Instruction *, 16> InstsInThisBlock;
335
336 /// Keep track of the metadata nodes that have been checked already.
338
339 /// Keep track which DISubprogram is attached to which function.
341
342 /// Track all DICompileUnits visited.
344
345 /// The result type for a landingpad.
346 Type *LandingPadResultTy;
347
348 /// Whether we've seen a call to @llvm.localescape in this function
349 /// already.
350 bool SawFrameEscape;
351
352 /// Whether the current function has a DISubprogram attached to it.
353 bool HasDebugInfo = false;
354
355 /// Stores the count of how many objects were passed to llvm.localescape for a
356 /// given function and the largest index passed to llvm.localrecover.
358
359 // Maps catchswitches and cleanuppads that unwind to siblings to the
360 // terminators that indicate the unwind, used to detect cycles therein.
362
363 /// Cache which blocks are in which funclet, if an EH funclet personality is
364 /// in use. Otherwise empty.
365 DenseMap<BasicBlock *, ColorVector> BlockEHFuncletColors;
366
367 /// Cache of constants visited in search of ConstantExprs.
368 SmallPtrSet<const Constant *, 32> ConstantExprVisited;
369
370 /// Cache of declarations of the llvm.experimental.deoptimize.<ty> intrinsic.
371 SmallVector<const Function *, 4> DeoptimizeDeclarations;
372
373 /// Cache of attribute lists verified.
374 SmallPtrSet<const void *, 32> AttributeListsVisited;
375
376 // Verify that this GlobalValue is only used in this module.
377 // This map is used to avoid visiting uses twice. We can arrive at a user
378 // twice, if they have multiple operands. In particular for very large
379 // constant expressions, we can arrive at a particular user many times.
380 SmallPtrSet<const Value *, 32> GlobalValueVisited;
381
382 // Keeps track of duplicate function argument debug info.
384
385 TBAAVerifier TBAAVerifyHelper;
386 ConvergenceVerifier ConvergenceVerifyHelper;
387
388 SmallVector<IntrinsicInst *, 4> NoAliasScopeDecls;
389
390 void checkAtomicMemAccessSize(Type *Ty, const Instruction *I);
391
392public:
393 explicit Verifier(raw_ostream *OS, bool ShouldTreatBrokenDebugInfoAsError,
394 const Module &M)
395 : VerifierSupport(OS, M), LandingPadResultTy(nullptr),
396 SawFrameEscape(false), TBAAVerifyHelper(this) {
397 TreatBrokenDebugInfoAsError = ShouldTreatBrokenDebugInfoAsError;
398 }
399
400 bool hasBrokenDebugInfo() const { return BrokenDebugInfo; }
401
402 bool verify(const Function &F) {
403 llvm::TimeTraceScope timeScope("Verifier");
404 assert(F.getParent() == &M &&
405 "An instance of this class only works with a specific module!");
406
407 // First ensure the function is well-enough formed to compute dominance
408 // information, and directly compute a dominance tree. We don't rely on the
409 // pass manager to provide this as it isolates us from a potentially
410 // out-of-date dominator tree and makes it significantly more complex to run
411 // this code outside of a pass manager.
412 // FIXME: It's really gross that we have to cast away constness here.
413 if (!F.empty())
414 DT.recalculate(const_cast<Function &>(F));
415
416 for (const BasicBlock &BB : F) {
417 if (!BB.empty() && BB.back().isTerminator())
418 continue;
419
420 if (OS) {
421 *OS << "Basic Block in function '" << F.getName()
422 << "' does not have terminator!\n";
423 BB.printAsOperand(*OS, true, MST);
424 *OS << "\n";
425 }
426 return false;
427 }
428
429 auto FailureCB = [this](const Twine &Message) {
430 this->CheckFailed(Message);
431 };
432 ConvergenceVerifyHelper.initialize(OS, FailureCB, F);
433
434 Broken = false;
435 // FIXME: We strip const here because the inst visitor strips const.
436 visit(const_cast<Function &>(F));
437 verifySiblingFuncletUnwinds();
438
439 if (ConvergenceVerifyHelper.sawTokens())
440 ConvergenceVerifyHelper.verify(DT);
441
442 InstsInThisBlock.clear();
443 DebugFnArgs.clear();
444 LandingPadResultTy = nullptr;
445 SawFrameEscape = false;
446 SiblingFuncletInfo.clear();
447 verifyNoAliasScopeDecl();
448 NoAliasScopeDecls.clear();
449
450 return !Broken;
451 }
452
453 /// Verify the module that this instance of \c Verifier was initialized with.
454 bool verify() {
455 Broken = false;
456
457 // Collect all declarations of the llvm.experimental.deoptimize intrinsic.
458 for (const Function &F : M)
459 if (F.getIntrinsicID() == Intrinsic::experimental_deoptimize)
460 DeoptimizeDeclarations.push_back(&F);
461
462 // Now that we've visited every function, verify that we never asked to
463 // recover a frame index that wasn't escaped.
464 verifyFrameRecoverIndices();
465 for (const GlobalVariable &GV : M.globals())
466 visitGlobalVariable(GV);
467
468 for (const GlobalAlias &GA : M.aliases())
469 visitGlobalAlias(GA);
470
471 for (const GlobalIFunc &GI : M.ifuncs())
472 visitGlobalIFunc(GI);
473
474 for (const NamedMDNode &NMD : M.named_metadata())
475 visitNamedMDNode(NMD);
476
477 for (const StringMapEntry<Comdat> &SMEC : M.getComdatSymbolTable())
478 visitComdat(SMEC.getValue());
479
480 visitModuleFlags();
481 visitModuleIdents();
482 visitModuleCommandLines();
483 visitModuleErrnoTBAA();
484
485 verifyCompileUnits();
486
487 verifyDeoptimizeCallingConvs();
488 DISubprogramAttachments.clear();
489 return !Broken;
490 }
491
492private:
493 /// Whether a metadata node is allowed to be, or contain, a DILocation.
494 enum class AreDebugLocsAllowed { No, Yes };
495
496 /// Metadata that should be treated as a range, with slightly different
497 /// requirements.
498 enum class RangeLikeMetadataKind {
499 Range, // MD_range
500 AbsoluteSymbol, // MD_absolute_symbol
501 NoaliasAddrspace // MD_noalias_addrspace
502 };
503
504 // Verification methods...
505 void visitGlobalValue(const GlobalValue &GV);
506 void visitGlobalVariable(const GlobalVariable &GV);
507 void visitGlobalAlias(const GlobalAlias &GA);
508 void visitGlobalIFunc(const GlobalIFunc &GI);
509 void visitAliaseeSubExpr(const GlobalAlias &A, const Constant &C);
510 void visitAliaseeSubExpr(SmallPtrSetImpl<const GlobalAlias *> &Visited,
511 const GlobalAlias &A, const Constant &C);
512 void visitNamedMDNode(const NamedMDNode &NMD);
513 void visitMDNode(const MDNode &MD, AreDebugLocsAllowed AllowLocs);
514 void visitMetadataAsValue(const MetadataAsValue &MD, Function *F);
515 void visitValueAsMetadata(const ValueAsMetadata &MD, Function *F);
516 void visitDIArgList(const DIArgList &AL, Function *F);
517 void visitComdat(const Comdat &C);
518 void visitModuleIdents();
519 void visitModuleCommandLines();
520 void visitModuleErrnoTBAA();
521 void visitModuleFlags();
522 void visitModuleFlag(const MDNode *Op,
523 DenseMap<const MDString *, const MDNode *> &SeenIDs,
524 SmallVectorImpl<const MDNode *> &Requirements);
525 void visitModuleFlagCGProfileEntry(const MDOperand &MDO);
526 void visitFunction(const Function &F);
527 void visitBasicBlock(BasicBlock &BB);
528 void verifyRangeLikeMetadata(const Value &V, const MDNode *Range, Type *Ty,
529 RangeLikeMetadataKind Kind);
530 void visitRangeMetadata(Instruction &I, MDNode *Range, Type *Ty);
531 void visitNoaliasAddrspaceMetadata(Instruction &I, MDNode *Range, Type *Ty);
532 void visitDereferenceableMetadata(Instruction &I, MDNode *MD);
533 void visitNofreeMetadata(Instruction &I, MDNode *MD);
534 void visitProfMetadata(Instruction &I, MDNode *MD);
535 void visitCallStackMetadata(MDNode *MD);
536 void visitMemProfMetadata(Instruction &I, MDNode *MD);
537 void visitCallsiteMetadata(Instruction &I, MDNode *MD);
538 void visitCalleeTypeMetadata(Instruction &I, MDNode *MD);
539 void visitDIAssignIDMetadata(Instruction &I, MDNode *MD);
540 void visitMMRAMetadata(Instruction &I, MDNode *MD);
541 void visitAnnotationMetadata(MDNode *Annotation);
542 void visitAliasScopeMetadata(const MDNode *MD);
543 void visitAliasScopeListMetadata(const MDNode *MD);
544 void visitAccessGroupMetadata(const MDNode *MD);
545 void visitCapturesMetadata(Instruction &I, const MDNode *Captures);
546 void visitAllocTokenMetadata(Instruction &I, MDNode *MD);
547
548 template <class Ty> bool isValidMetadataArray(const MDTuple &N);
549#define HANDLE_SPECIALIZED_MDNODE_LEAF(CLASS) void visit##CLASS(const CLASS &N);
550#include "llvm/IR/Metadata.def"
551 void visitDIScope(const DIScope &N);
552 void visitDIVariable(const DIVariable &N);
553 void visitDILexicalBlockBase(const DILexicalBlockBase &N);
554 void visitDITemplateParameter(const DITemplateParameter &N);
555
556 void visitTemplateParams(const MDNode &N, const Metadata &RawParams);
557
558 void visit(DbgLabelRecord &DLR);
559 void visit(DbgVariableRecord &DVR);
560 // InstVisitor overrides...
561 using InstVisitor<Verifier>::visit;
562 void visitDbgRecords(Instruction &I);
563 void visit(Instruction &I);
564
565 void visitTruncInst(TruncInst &I);
566 void visitZExtInst(ZExtInst &I);
567 void visitSExtInst(SExtInst &I);
568 void visitFPTruncInst(FPTruncInst &I);
569 void visitFPExtInst(FPExtInst &I);
570 void visitFPToUIInst(FPToUIInst &I);
571 void visitFPToSIInst(FPToSIInst &I);
572 void visitUIToFPInst(UIToFPInst &I);
573 void visitSIToFPInst(SIToFPInst &I);
574 void visitIntToPtrInst(IntToPtrInst &I);
575 void checkPtrToAddr(Type *SrcTy, Type *DestTy, const Value &V);
576 void visitPtrToAddrInst(PtrToAddrInst &I);
577 void visitPtrToIntInst(PtrToIntInst &I);
578 void visitBitCastInst(BitCastInst &I);
579 void visitAddrSpaceCastInst(AddrSpaceCastInst &I);
580 void visitPHINode(PHINode &PN);
581 void visitCallBase(CallBase &Call);
582 void visitUnaryOperator(UnaryOperator &U);
583 void visitBinaryOperator(BinaryOperator &B);
584 void visitICmpInst(ICmpInst &IC);
585 void visitFCmpInst(FCmpInst &FC);
586 void visitExtractElementInst(ExtractElementInst &EI);
587 void visitInsertElementInst(InsertElementInst &EI);
588 void visitShuffleVectorInst(ShuffleVectorInst &EI);
589 void visitVAArgInst(VAArgInst &VAA) { visitInstruction(VAA); }
590 void visitCallInst(CallInst &CI);
591 void visitInvokeInst(InvokeInst &II);
592 void visitGetElementPtrInst(GetElementPtrInst &GEP);
593 void visitLoadInst(LoadInst &LI);
594 void visitStoreInst(StoreInst &SI);
595 void verifyDominatesUse(Instruction &I, unsigned i);
596 void visitInstruction(Instruction &I);
597 void visitTerminator(Instruction &I);
598 void visitBranchInst(BranchInst &BI);
599 void visitReturnInst(ReturnInst &RI);
600 void visitSwitchInst(SwitchInst &SI);
601 void visitIndirectBrInst(IndirectBrInst &BI);
602 void visitCallBrInst(CallBrInst &CBI);
603 void visitSelectInst(SelectInst &SI);
604 void visitUserOp1(Instruction &I);
605 void visitUserOp2(Instruction &I) { visitUserOp1(I); }
606 void visitIntrinsicCall(Intrinsic::ID ID, CallBase &Call);
607 void visitConstrainedFPIntrinsic(ConstrainedFPIntrinsic &FPI);
608 void visitVPIntrinsic(VPIntrinsic &VPI);
609 void visitDbgLabelIntrinsic(StringRef Kind, DbgLabelInst &DLI);
610 void visitAtomicCmpXchgInst(AtomicCmpXchgInst &CXI);
611 void visitAtomicRMWInst(AtomicRMWInst &RMWI);
612 void visitFenceInst(FenceInst &FI);
613 void visitAllocaInst(AllocaInst &AI);
614 void visitExtractValueInst(ExtractValueInst &EVI);
615 void visitInsertValueInst(InsertValueInst &IVI);
616 void visitEHPadPredecessors(Instruction &I);
617 void visitLandingPadInst(LandingPadInst &LPI);
618 void visitResumeInst(ResumeInst &RI);
619 void visitCatchPadInst(CatchPadInst &CPI);
620 void visitCatchReturnInst(CatchReturnInst &CatchReturn);
621 void visitCleanupPadInst(CleanupPadInst &CPI);
622 void visitFuncletPadInst(FuncletPadInst &FPI);
623 void visitCatchSwitchInst(CatchSwitchInst &CatchSwitch);
624 void visitCleanupReturnInst(CleanupReturnInst &CRI);
625
626 void verifySwiftErrorCall(CallBase &Call, const Value *SwiftErrorVal);
627 void verifySwiftErrorValue(const Value *SwiftErrorVal);
628 void verifyTailCCMustTailAttrs(const AttrBuilder &Attrs, StringRef Context);
629 void verifyMustTailCall(CallInst &CI);
630 bool verifyAttributeCount(AttributeList Attrs, unsigned Params);
631 void verifyAttributeTypes(AttributeSet Attrs, const Value *V);
632 void verifyParameterAttrs(AttributeSet Attrs, Type *Ty, const Value *V);
633 void checkUnsignedBaseTenFuncAttr(AttributeList Attrs, StringRef Attr,
634 const Value *V);
635 void verifyFunctionAttrs(FunctionType *FT, AttributeList Attrs,
636 const Value *V, bool IsIntrinsic, bool IsInlineAsm);
637 void verifyFunctionMetadata(ArrayRef<std::pair<unsigned, MDNode *>> MDs);
638 void verifyUnknownProfileMetadata(MDNode *MD);
639 void visitConstantExprsRecursively(const Constant *EntryC);
640 void visitConstantExpr(const ConstantExpr *CE);
641 void visitConstantPtrAuth(const ConstantPtrAuth *CPA);
642 void verifyInlineAsmCall(const CallBase &Call);
643 void verifyStatepoint(const CallBase &Call);
644 void verifyFrameRecoverIndices();
645 void verifySiblingFuncletUnwinds();
646
647 void verifyFragmentExpression(const DbgVariableRecord &I);
648 template <typename ValueOrMetadata>
649 void verifyFragmentExpression(const DIVariable &V,
651 ValueOrMetadata *Desc);
652 void verifyFnArgs(const DbgVariableRecord &DVR);
653 void verifyNotEntryValue(const DbgVariableRecord &I);
654
655 /// Module-level debug info verification...
656 void verifyCompileUnits();
657
658 /// Module-level verification that all @llvm.experimental.deoptimize
659 /// declarations share the same calling convention.
660 void verifyDeoptimizeCallingConvs();
661
662 void verifyAttachedCallBundle(const CallBase &Call,
663 const OperandBundleUse &BU);
664
665 /// Verify the llvm.experimental.noalias.scope.decl declarations
666 void verifyNoAliasScopeDecl();
667};
668
669} // end anonymous namespace
670
671/// We know that cond should be true, if not print an error message.
672#define Check(C, ...) \
673 do { \
674 if (!(C)) { \
675 CheckFailed(__VA_ARGS__); \
676 return; \
677 } \
678 } while (false)
679
680/// We know that a debug info condition should be true, if not print
681/// an error message.
682#define CheckDI(C, ...) \
683 do { \
684 if (!(C)) { \
685 DebugInfoCheckFailed(__VA_ARGS__); \
686 return; \
687 } \
688 } while (false)
689
690void Verifier::visitDbgRecords(Instruction &I) {
691 if (!I.DebugMarker)
692 return;
693 CheckDI(I.DebugMarker->MarkedInstr == &I,
694 "Instruction has invalid DebugMarker", &I);
695 CheckDI(!isa<PHINode>(&I) || !I.hasDbgRecords(),
696 "PHI Node must not have any attached DbgRecords", &I);
697 for (DbgRecord &DR : I.getDbgRecordRange()) {
698 CheckDI(DR.getMarker() == I.DebugMarker,
699 "DbgRecord had invalid DebugMarker", &I, &DR);
700 if (auto *Loc =
702 visitMDNode(*Loc, AreDebugLocsAllowed::Yes);
703 if (auto *DVR = dyn_cast<DbgVariableRecord>(&DR)) {
704 visit(*DVR);
705 // These have to appear after `visit` for consistency with existing
706 // intrinsic behaviour.
707 verifyFragmentExpression(*DVR);
708 verifyNotEntryValue(*DVR);
709 } else if (auto *DLR = dyn_cast<DbgLabelRecord>(&DR)) {
710 visit(*DLR);
711 }
712 }
713}
714
715void Verifier::visit(Instruction &I) {
716 visitDbgRecords(I);
717 for (unsigned i = 0, e = I.getNumOperands(); i != e; ++i)
718 Check(I.getOperand(i) != nullptr, "Operand is null", &I);
720}
721
722// Helper to iterate over indirect users. By returning false, the callback can ask to stop traversing further.
723static void forEachUser(const Value *User,
725 llvm::function_ref<bool(const Value *)> Callback) {
726 if (!Visited.insert(User).second)
727 return;
728
730 while (!WorkList.empty()) {
731 const Value *Cur = WorkList.pop_back_val();
732 if (!Visited.insert(Cur).second)
733 continue;
734 if (Callback(Cur))
735 append_range(WorkList, Cur->materialized_users());
736 }
737}
738
739void Verifier::visitGlobalValue(const GlobalValue &GV) {
741 "Global is external, but doesn't have external or weak linkage!", &GV);
742
743 if (const GlobalObject *GO = dyn_cast<GlobalObject>(&GV)) {
744 if (const MDNode *Associated =
745 GO->getMetadata(LLVMContext::MD_associated)) {
746 Check(Associated->getNumOperands() == 1,
747 "associated metadata must have one operand", &GV, Associated);
748 const Metadata *Op = Associated->getOperand(0).get();
749 Check(Op, "associated metadata must have a global value", GO, Associated);
750
751 const auto *VM = dyn_cast_or_null<ValueAsMetadata>(Op);
752 Check(VM, "associated metadata must be ValueAsMetadata", GO, Associated);
753 if (VM) {
754 Check(isa<PointerType>(VM->getValue()->getType()),
755 "associated value must be pointer typed", GV, Associated);
756
757 const Value *Stripped = VM->getValue()->stripPointerCastsAndAliases();
758 Check(isa<GlobalObject>(Stripped) || isa<Constant>(Stripped),
759 "associated metadata must point to a GlobalObject", GO, Stripped);
760 Check(Stripped != GO,
761 "global values should not associate to themselves", GO,
762 Associated);
763 }
764 }
765
766 // FIXME: Why is getMetadata on GlobalValue protected?
767 if (const MDNode *AbsoluteSymbol =
768 GO->getMetadata(LLVMContext::MD_absolute_symbol)) {
769 verifyRangeLikeMetadata(*GO, AbsoluteSymbol,
770 DL.getIntPtrType(GO->getType()),
771 RangeLikeMetadataKind::AbsoluteSymbol);
772 }
773
774 if (GO->hasMetadata(LLVMContext::MD_implicit_ref)) {
775 Check(!GO->isDeclaration(),
776 "ref metadata must not be placed on a declaration", GO);
777
779 GO->getMetadata(LLVMContext::MD_implicit_ref, MDs);
780 for (const MDNode *MD : MDs) {
781 Check(MD->getNumOperands() == 1, "ref metadata must have one operand",
782 &GV, MD);
783 const Metadata *Op = MD->getOperand(0).get();
784 const auto *VM = dyn_cast_or_null<ValueAsMetadata>(Op);
785 Check(VM, "ref metadata must be ValueAsMetadata", GO, MD);
786 if (VM) {
787 Check(isa<PointerType>(VM->getValue()->getType()),
788 "ref value must be pointer typed", GV, MD);
789
790 const Value *Stripped = VM->getValue()->stripPointerCastsAndAliases();
791 Check(isa<GlobalObject>(Stripped) || isa<Constant>(Stripped),
792 "ref metadata must point to a GlobalObject", GO, Stripped);
793 Check(Stripped != GO, "values should not reference themselves", GO,
794 MD);
795 }
796 }
797 }
798 }
799
801 "Only global variables can have appending linkage!", &GV);
802
803 if (GV.hasAppendingLinkage()) {
804 const GlobalVariable *GVar = dyn_cast<GlobalVariable>(&GV);
805 Check(GVar && GVar->getValueType()->isArrayTy(),
806 "Only global arrays can have appending linkage!", GVar);
807 }
808
809 if (GV.isDeclarationForLinker())
810 Check(!GV.hasComdat(), "Declaration may not be in a Comdat!", &GV);
811
812 if (GV.hasDLLExportStorageClass()) {
814 "dllexport GlobalValue must have default or protected visibility",
815 &GV);
816 }
817 if (GV.hasDLLImportStorageClass()) {
819 "dllimport GlobalValue must have default visibility", &GV);
820 Check(!GV.isDSOLocal(), "GlobalValue with DLLImport Storage is dso_local!",
821 &GV);
822
823 Check((GV.isDeclaration() &&
826 "Global is marked as dllimport, but not external", &GV);
827 }
828
829 if (GV.isImplicitDSOLocal())
830 Check(GV.isDSOLocal(),
831 "GlobalValue with local linkage or non-default "
832 "visibility must be dso_local!",
833 &GV);
834
835 forEachUser(&GV, GlobalValueVisited, [&](const Value *V) -> bool {
836 if (const Instruction *I = dyn_cast<Instruction>(V)) {
837 if (!I->getParent() || !I->getParent()->getParent())
838 CheckFailed("Global is referenced by parentless instruction!", &GV, &M,
839 I);
840 else if (I->getParent()->getParent()->getParent() != &M)
841 CheckFailed("Global is referenced in a different module!", &GV, &M, I,
842 I->getParent()->getParent(),
843 I->getParent()->getParent()->getParent());
844 return false;
845 } else if (const Function *F = dyn_cast<Function>(V)) {
846 if (F->getParent() != &M)
847 CheckFailed("Global is used by function in a different module", &GV, &M,
848 F, F->getParent());
849 return false;
850 }
851 return true;
852 });
853}
854
855void Verifier::visitGlobalVariable(const GlobalVariable &GV) {
856 Type *GVType = GV.getValueType();
857
858 if (MaybeAlign A = GV.getAlign()) {
859 Check(A->value() <= Value::MaximumAlignment,
860 "huge alignment values are unsupported", &GV);
861 }
862
863 if (GV.hasInitializer()) {
864 Check(GV.getInitializer()->getType() == GVType,
865 "Global variable initializer type does not match global "
866 "variable type!",
867 &GV);
869 "Global variable initializer must be sized", &GV);
870 visitConstantExprsRecursively(GV.getInitializer());
871 // If the global has common linkage, it must have a zero initializer and
872 // cannot be constant.
873 if (GV.hasCommonLinkage()) {
875 "'common' global must have a zero initializer!", &GV);
876 Check(!GV.isConstant(), "'common' global may not be marked constant!",
877 &GV);
878 Check(!GV.hasComdat(), "'common' global may not be in a Comdat!", &GV);
879 }
880 }
881
882 if (GV.hasName() && (GV.getName() == "llvm.global_ctors" ||
883 GV.getName() == "llvm.global_dtors")) {
885 "invalid linkage for intrinsic global variable", &GV);
887 "invalid uses of intrinsic global variable", &GV);
888
889 // Don't worry about emitting an error for it not being an array,
890 // visitGlobalValue will complain on appending non-array.
891 if (ArrayType *ATy = dyn_cast<ArrayType>(GVType)) {
892 StructType *STy = dyn_cast<StructType>(ATy->getElementType());
893 PointerType *FuncPtrTy =
894 PointerType::get(Context, DL.getProgramAddressSpace());
895 Check(STy && (STy->getNumElements() == 2 || STy->getNumElements() == 3) &&
896 STy->getTypeAtIndex(0u)->isIntegerTy(32) &&
897 STy->getTypeAtIndex(1) == FuncPtrTy,
898 "wrong type for intrinsic global variable", &GV);
899 Check(STy->getNumElements() == 3,
900 "the third field of the element type is mandatory, "
901 "specify ptr null to migrate from the obsoleted 2-field form");
902 Type *ETy = STy->getTypeAtIndex(2);
903 Check(ETy->isPointerTy(), "wrong type for intrinsic global variable",
904 &GV);
905 }
906 }
907
908 if (GV.hasName() && (GV.getName() == "llvm.used" ||
909 GV.getName() == "llvm.compiler.used")) {
911 "invalid linkage for intrinsic global variable", &GV);
913 "invalid uses of intrinsic global variable", &GV);
914
915 if (ArrayType *ATy = dyn_cast<ArrayType>(GVType)) {
916 PointerType *PTy = dyn_cast<PointerType>(ATy->getElementType());
917 Check(PTy, "wrong type for intrinsic global variable", &GV);
918 if (GV.hasInitializer()) {
919 const Constant *Init = GV.getInitializer();
920 const ConstantArray *InitArray = dyn_cast<ConstantArray>(Init);
921 Check(InitArray, "wrong initializer for intrinsic global variable",
922 Init);
923 for (Value *Op : InitArray->operands()) {
924 Value *V = Op->stripPointerCasts();
927 Twine("invalid ") + GV.getName() + " member", V);
928 Check(V->hasName(),
929 Twine("members of ") + GV.getName() + " must be named", V);
930 }
931 }
932 }
933 }
934
935 // Visit any debug info attachments.
937 GV.getMetadata(LLVMContext::MD_dbg, MDs);
938 for (auto *MD : MDs) {
939 if (auto *GVE = dyn_cast<DIGlobalVariableExpression>(MD))
940 visitDIGlobalVariableExpression(*GVE);
941 else
942 CheckDI(false, "!dbg attachment of global variable must be a "
943 "DIGlobalVariableExpression");
944 }
945
946 // Scalable vectors cannot be global variables, since we don't know
947 // the runtime size.
948 Check(!GVType->isScalableTy(), "Globals cannot contain scalable types", &GV);
949
950 // Check if it is or contains a target extension type that disallows being
951 // used as a global.
953 "Global @" + GV.getName() + " has illegal target extension type",
954 GVType);
955
956 if (!GV.hasInitializer()) {
957 visitGlobalValue(GV);
958 return;
959 }
960
961 // Walk any aggregate initializers looking for bitcasts between address spaces
962 visitConstantExprsRecursively(GV.getInitializer());
963
964 visitGlobalValue(GV);
965}
966
967void Verifier::visitAliaseeSubExpr(const GlobalAlias &GA, const Constant &C) {
968 SmallPtrSet<const GlobalAlias*, 4> Visited;
969 Visited.insert(&GA);
970 visitAliaseeSubExpr(Visited, GA, C);
971}
972
973void Verifier::visitAliaseeSubExpr(SmallPtrSetImpl<const GlobalAlias*> &Visited,
974 const GlobalAlias &GA, const Constant &C) {
977 cast<GlobalValue>(C).hasAvailableExternallyLinkage(),
978 "available_externally alias must point to available_externally "
979 "global value",
980 &GA);
981 }
982 if (const auto *GV = dyn_cast<GlobalValue>(&C)) {
984 Check(!GV->isDeclarationForLinker(), "Alias must point to a definition",
985 &GA);
986 }
987
988 if (const auto *GA2 = dyn_cast<GlobalAlias>(GV)) {
989 Check(Visited.insert(GA2).second, "Aliases cannot form a cycle", &GA);
990
991 Check(!GA2->isInterposable(),
992 "Alias cannot point to an interposable alias", &GA);
993 } else {
994 // Only continue verifying subexpressions of GlobalAliases.
995 // Do not recurse into global initializers.
996 return;
997 }
998 }
999
1000 if (const auto *CE = dyn_cast<ConstantExpr>(&C))
1001 visitConstantExprsRecursively(CE);
1002
1003 for (const Use &U : C.operands()) {
1004 Value *V = &*U;
1005 if (const auto *GA2 = dyn_cast<GlobalAlias>(V))
1006 visitAliaseeSubExpr(Visited, GA, *GA2->getAliasee());
1007 else if (const auto *C2 = dyn_cast<Constant>(V))
1008 visitAliaseeSubExpr(Visited, GA, *C2);
1009 }
1010}
1011
1012void Verifier::visitGlobalAlias(const GlobalAlias &GA) {
1014 "Alias should have private, internal, linkonce, weak, linkonce_odr, "
1015 "weak_odr, external, or available_externally linkage!",
1016 &GA);
1017 const Constant *Aliasee = GA.getAliasee();
1018 Check(Aliasee, "Aliasee cannot be NULL!", &GA);
1019 Check(GA.getType() == Aliasee->getType(),
1020 "Alias and aliasee types should match!", &GA);
1021
1022 Check(isa<GlobalValue>(Aliasee) || isa<ConstantExpr>(Aliasee),
1023 "Aliasee should be either GlobalValue or ConstantExpr", &GA);
1024
1025 visitAliaseeSubExpr(GA, *Aliasee);
1026
1027 visitGlobalValue(GA);
1028}
1029
1030void Verifier::visitGlobalIFunc(const GlobalIFunc &GI) {
1031 visitGlobalValue(GI);
1032
1034 GI.getAllMetadata(MDs);
1035 for (const auto &I : MDs) {
1036 CheckDI(I.first != LLVMContext::MD_dbg,
1037 "an ifunc may not have a !dbg attachment", &GI);
1038 Check(I.first != LLVMContext::MD_prof,
1039 "an ifunc may not have a !prof attachment", &GI);
1040 visitMDNode(*I.second, AreDebugLocsAllowed::No);
1041 }
1042
1044 "IFunc should have private, internal, linkonce, weak, linkonce_odr, "
1045 "weak_odr, or external linkage!",
1046 &GI);
1047 // Pierce through ConstantExprs and GlobalAliases and check that the resolver
1048 // is a Function definition.
1049 const Function *Resolver = GI.getResolverFunction();
1050 Check(Resolver, "IFunc must have a Function resolver", &GI);
1051 Check(!Resolver->isDeclarationForLinker(),
1052 "IFunc resolver must be a definition", &GI);
1053
1054 // Check that the immediate resolver operand (prior to any bitcasts) has the
1055 // correct type.
1056 const Type *ResolverTy = GI.getResolver()->getType();
1057
1059 "IFunc resolver must return a pointer", &GI);
1060
1061 Check(ResolverTy == PointerType::get(Context, GI.getAddressSpace()),
1062 "IFunc resolver has incorrect type", &GI);
1063}
1064
1065void Verifier::visitNamedMDNode(const NamedMDNode &NMD) {
1066 // There used to be various other llvm.dbg.* nodes, but we don't support
1067 // upgrading them and we want to reserve the namespace for future uses.
1068 if (NMD.getName().starts_with("llvm.dbg."))
1069 CheckDI(NMD.getName() == "llvm.dbg.cu",
1070 "unrecognized named metadata node in the llvm.dbg namespace", &NMD);
1071 for (const MDNode *MD : NMD.operands()) {
1072 if (NMD.getName() == "llvm.dbg.cu")
1073 CheckDI(MD && isa<DICompileUnit>(MD), "invalid compile unit", &NMD, MD);
1074
1075 if (!MD)
1076 continue;
1077
1078 visitMDNode(*MD, AreDebugLocsAllowed::Yes);
1079 }
1080}
1081
1082void Verifier::visitMDNode(const MDNode &MD, AreDebugLocsAllowed AllowLocs) {
1083 // Only visit each node once. Metadata can be mutually recursive, so this
1084 // avoids infinite recursion here, as well as being an optimization.
1085 if (!MDNodes.insert(&MD).second)
1086 return;
1087
1088 Check(&MD.getContext() == &Context,
1089 "MDNode context does not match Module context!", &MD);
1090
1091 switch (MD.getMetadataID()) {
1092 default:
1093 llvm_unreachable("Invalid MDNode subclass");
1094 case Metadata::MDTupleKind:
1095 break;
1096#define HANDLE_SPECIALIZED_MDNODE_LEAF(CLASS) \
1097 case Metadata::CLASS##Kind: \
1098 visit##CLASS(cast<CLASS>(MD)); \
1099 break;
1100#include "llvm/IR/Metadata.def"
1101 }
1102
1103 for (const Metadata *Op : MD.operands()) {
1104 if (!Op)
1105 continue;
1106 Check(!isa<LocalAsMetadata>(Op), "Invalid operand for global metadata!",
1107 &MD, Op);
1108 CheckDI(!isa<DILocation>(Op) || AllowLocs == AreDebugLocsAllowed::Yes,
1109 "DILocation not allowed within this metadata node", &MD, Op);
1110 if (auto *N = dyn_cast<MDNode>(Op)) {
1111 visitMDNode(*N, AllowLocs);
1112 continue;
1113 }
1114 if (auto *V = dyn_cast<ValueAsMetadata>(Op)) {
1115 visitValueAsMetadata(*V, nullptr);
1116 continue;
1117 }
1118 }
1119
1120 // Check llvm.loop.estimated_trip_count.
1121 if (MD.getNumOperands() > 0 &&
1123 Check(MD.getNumOperands() == 2, "Expected two operands", &MD);
1125 Check(Count && Count->getType()->isIntegerTy() &&
1126 cast<IntegerType>(Count->getType())->getBitWidth() <= 32,
1127 "Expected second operand to be an integer constant of type i32 or "
1128 "smaller",
1129 &MD);
1130 }
1131
1132 // Check these last, so we diagnose problems in operands first.
1133 Check(!MD.isTemporary(), "Expected no forward declarations!", &MD);
1134 Check(MD.isResolved(), "All nodes should be resolved!", &MD);
1135}
1136
1137void Verifier::visitValueAsMetadata(const ValueAsMetadata &MD, Function *F) {
1138 Check(MD.getValue(), "Expected valid value", &MD);
1139 Check(!MD.getValue()->getType()->isMetadataTy(),
1140 "Unexpected metadata round-trip through values", &MD, MD.getValue());
1141
1142 auto *L = dyn_cast<LocalAsMetadata>(&MD);
1143 if (!L)
1144 return;
1145
1146 Check(F, "function-local metadata used outside a function", L);
1147
1148 // If this was an instruction, bb, or argument, verify that it is in the
1149 // function that we expect.
1150 Function *ActualF = nullptr;
1151 if (Instruction *I = dyn_cast<Instruction>(L->getValue())) {
1152 Check(I->getParent(), "function-local metadata not in basic block", L, I);
1153 ActualF = I->getParent()->getParent();
1154 } else if (BasicBlock *BB = dyn_cast<BasicBlock>(L->getValue()))
1155 ActualF = BB->getParent();
1156 else if (Argument *A = dyn_cast<Argument>(L->getValue()))
1157 ActualF = A->getParent();
1158 assert(ActualF && "Unimplemented function local metadata case!");
1159
1160 Check(ActualF == F, "function-local metadata used in wrong function", L);
1161}
1162
1163void Verifier::visitDIArgList(const DIArgList &AL, Function *F) {
1164 for (const ValueAsMetadata *VAM : AL.getArgs())
1165 visitValueAsMetadata(*VAM, F);
1166}
1167
1168void Verifier::visitMetadataAsValue(const MetadataAsValue &MDV, Function *F) {
1169 Metadata *MD = MDV.getMetadata();
1170 if (auto *N = dyn_cast<MDNode>(MD)) {
1171 visitMDNode(*N, AreDebugLocsAllowed::No);
1172 return;
1173 }
1174
1175 // Only visit each node once. Metadata can be mutually recursive, so this
1176 // avoids infinite recursion here, as well as being an optimization.
1177 if (!MDNodes.insert(MD).second)
1178 return;
1179
1180 if (auto *V = dyn_cast<ValueAsMetadata>(MD))
1181 visitValueAsMetadata(*V, F);
1182
1183 if (auto *AL = dyn_cast<DIArgList>(MD))
1184 visitDIArgList(*AL, F);
1185}
1186
1187static bool isType(const Metadata *MD) { return !MD || isa<DIType>(MD); }
1188static bool isScope(const Metadata *MD) { return !MD || isa<DIScope>(MD); }
1189static bool isDINode(const Metadata *MD) { return !MD || isa<DINode>(MD); }
1190static bool isMDTuple(const Metadata *MD) { return !MD || isa<MDTuple>(MD); }
1191
1192void Verifier::visitDILocation(const DILocation &N) {
1193 CheckDI(N.getRawScope() && isa<DILocalScope>(N.getRawScope()),
1194 "location requires a valid scope", &N, N.getRawScope());
1195 if (auto *IA = N.getRawInlinedAt())
1196 CheckDI(isa<DILocation>(IA), "inlined-at should be a location", &N, IA);
1197 if (auto *SP = dyn_cast<DISubprogram>(N.getRawScope()))
1198 CheckDI(SP->isDefinition(), "scope points into the type hierarchy", &N);
1199}
1200
1201void Verifier::visitGenericDINode(const GenericDINode &N) {
1202 CheckDI(N.getTag(), "invalid tag", &N);
1203}
1204
1205void Verifier::visitDIScope(const DIScope &N) {
1206 if (auto *F = N.getRawFile())
1207 CheckDI(isa<DIFile>(F), "invalid file", &N, F);
1208}
1209
1210void Verifier::visitDISubrangeType(const DISubrangeType &N) {
1211 CheckDI(N.getTag() == dwarf::DW_TAG_subrange_type, "invalid tag", &N);
1212 auto *BaseType = N.getRawBaseType();
1213 CheckDI(!BaseType || isType(BaseType), "BaseType must be a type");
1214 auto *LBound = N.getRawLowerBound();
1215 CheckDI(!LBound || isa<ConstantAsMetadata>(LBound) ||
1216 isa<DIVariable>(LBound) || isa<DIExpression>(LBound) ||
1217 isa<DIDerivedType>(LBound),
1218 "LowerBound must be signed constant or DIVariable or DIExpression or "
1219 "DIDerivedType",
1220 &N);
1221 auto *UBound = N.getRawUpperBound();
1222 CheckDI(!UBound || isa<ConstantAsMetadata>(UBound) ||
1223 isa<DIVariable>(UBound) || isa<DIExpression>(UBound) ||
1224 isa<DIDerivedType>(UBound),
1225 "UpperBound must be signed constant or DIVariable or DIExpression or "
1226 "DIDerivedType",
1227 &N);
1228 auto *Stride = N.getRawStride();
1229 CheckDI(!Stride || isa<ConstantAsMetadata>(Stride) ||
1230 isa<DIVariable>(Stride) || isa<DIExpression>(Stride),
1231 "Stride must be signed constant or DIVariable or DIExpression", &N);
1232 auto *Bias = N.getRawBias();
1233 CheckDI(!Bias || isa<ConstantAsMetadata>(Bias) || isa<DIVariable>(Bias) ||
1234 isa<DIExpression>(Bias),
1235 "Bias must be signed constant or DIVariable or DIExpression", &N);
1236 // Subrange types currently only support constant size.
1237 auto *Size = N.getRawSizeInBits();
1239 "SizeInBits must be a constant");
1240}
1241
1242void Verifier::visitDISubrange(const DISubrange &N) {
1243 CheckDI(N.getTag() == dwarf::DW_TAG_subrange_type, "invalid tag", &N);
1244 CheckDI(!N.getRawCountNode() || !N.getRawUpperBound(),
1245 "Subrange can have any one of count or upperBound", &N);
1246 auto *CBound = N.getRawCountNode();
1247 CheckDI(!CBound || isa<ConstantAsMetadata>(CBound) ||
1248 isa<DIVariable>(CBound) || isa<DIExpression>(CBound),
1249 "Count must be signed constant or DIVariable or DIExpression", &N);
1250 auto Count = N.getCount();
1252 cast<ConstantInt *>(Count)->getSExtValue() >= -1,
1253 "invalid subrange count", &N);
1254 auto *LBound = N.getRawLowerBound();
1255 CheckDI(!LBound || isa<ConstantAsMetadata>(LBound) ||
1256 isa<DIVariable>(LBound) || isa<DIExpression>(LBound),
1257 "LowerBound must be signed constant or DIVariable or DIExpression",
1258 &N);
1259 auto *UBound = N.getRawUpperBound();
1260 CheckDI(!UBound || isa<ConstantAsMetadata>(UBound) ||
1261 isa<DIVariable>(UBound) || isa<DIExpression>(UBound),
1262 "UpperBound must be signed constant or DIVariable or DIExpression",
1263 &N);
1264 auto *Stride = N.getRawStride();
1265 CheckDI(!Stride || isa<ConstantAsMetadata>(Stride) ||
1266 isa<DIVariable>(Stride) || isa<DIExpression>(Stride),
1267 "Stride must be signed constant or DIVariable or DIExpression", &N);
1268}
1269
1270void Verifier::visitDIGenericSubrange(const DIGenericSubrange &N) {
1271 CheckDI(N.getTag() == dwarf::DW_TAG_generic_subrange, "invalid tag", &N);
1272 CheckDI(!N.getRawCountNode() || !N.getRawUpperBound(),
1273 "GenericSubrange can have any one of count or upperBound", &N);
1274 auto *CBound = N.getRawCountNode();
1275 CheckDI(!CBound || isa<DIVariable>(CBound) || isa<DIExpression>(CBound),
1276 "Count must be signed constant or DIVariable or DIExpression", &N);
1277 auto *LBound = N.getRawLowerBound();
1278 CheckDI(LBound, "GenericSubrange must contain lowerBound", &N);
1279 CheckDI(isa<DIVariable>(LBound) || isa<DIExpression>(LBound),
1280 "LowerBound must be signed constant or DIVariable or DIExpression",
1281 &N);
1282 auto *UBound = N.getRawUpperBound();
1283 CheckDI(!UBound || isa<DIVariable>(UBound) || isa<DIExpression>(UBound),
1284 "UpperBound must be signed constant or DIVariable or DIExpression",
1285 &N);
1286 auto *Stride = N.getRawStride();
1287 CheckDI(Stride, "GenericSubrange must contain stride", &N);
1288 CheckDI(isa<DIVariable>(Stride) || isa<DIExpression>(Stride),
1289 "Stride must be signed constant or DIVariable or DIExpression", &N);
1290}
1291
1292void Verifier::visitDIEnumerator(const DIEnumerator &N) {
1293 CheckDI(N.getTag() == dwarf::DW_TAG_enumerator, "invalid tag", &N);
1294}
1295
1296void Verifier::visitDIBasicType(const DIBasicType &N) {
1297 CheckDI(N.getTag() == dwarf::DW_TAG_base_type ||
1298 N.getTag() == dwarf::DW_TAG_unspecified_type ||
1299 N.getTag() == dwarf::DW_TAG_string_type,
1300 "invalid tag", &N);
1301 // Basic types currently only support constant size.
1302 auto *Size = N.getRawSizeInBits();
1304 "SizeInBits must be a constant");
1305}
1306
1307void Verifier::visitDIFixedPointType(const DIFixedPointType &N) {
1308 visitDIBasicType(N);
1309
1310 CheckDI(N.getTag() == dwarf::DW_TAG_base_type, "invalid tag", &N);
1311 CheckDI(N.getEncoding() == dwarf::DW_ATE_signed_fixed ||
1312 N.getEncoding() == dwarf::DW_ATE_unsigned_fixed,
1313 "invalid encoding", &N);
1317 "invalid kind", &N);
1319 N.getFactorRaw() == 0,
1320 "factor should be 0 for rationals", &N);
1322 (N.getNumeratorRaw() == 0 && N.getDenominatorRaw() == 0),
1323 "numerator and denominator should be 0 for non-rationals", &N);
1324}
1325
1326void Verifier::visitDIStringType(const DIStringType &N) {
1327 CheckDI(N.getTag() == dwarf::DW_TAG_string_type, "invalid tag", &N);
1328 CheckDI(!(N.isBigEndian() && N.isLittleEndian()), "has conflicting flags",
1329 &N);
1330}
1331
1332void Verifier::visitDIDerivedType(const DIDerivedType &N) {
1333 // Common scope checks.
1334 visitDIScope(N);
1335
1336 CheckDI(N.getTag() == dwarf::DW_TAG_typedef ||
1337 N.getTag() == dwarf::DW_TAG_pointer_type ||
1338 N.getTag() == dwarf::DW_TAG_ptr_to_member_type ||
1339 N.getTag() == dwarf::DW_TAG_reference_type ||
1340 N.getTag() == dwarf::DW_TAG_rvalue_reference_type ||
1341 N.getTag() == dwarf::DW_TAG_const_type ||
1342 N.getTag() == dwarf::DW_TAG_immutable_type ||
1343 N.getTag() == dwarf::DW_TAG_volatile_type ||
1344 N.getTag() == dwarf::DW_TAG_restrict_type ||
1345 N.getTag() == dwarf::DW_TAG_atomic_type ||
1346 N.getTag() == dwarf::DW_TAG_LLVM_ptrauth_type ||
1347 N.getTag() == dwarf::DW_TAG_member ||
1348 (N.getTag() == dwarf::DW_TAG_variable && N.isStaticMember()) ||
1349 N.getTag() == dwarf::DW_TAG_inheritance ||
1350 N.getTag() == dwarf::DW_TAG_friend ||
1351 N.getTag() == dwarf::DW_TAG_set_type ||
1352 N.getTag() == dwarf::DW_TAG_template_alias,
1353 "invalid tag", &N);
1354 if (N.getTag() == dwarf::DW_TAG_ptr_to_member_type) {
1355 CheckDI(isType(N.getRawExtraData()), "invalid pointer to member type", &N,
1356 N.getRawExtraData());
1357 } else if (N.getTag() == dwarf::DW_TAG_template_alias) {
1358 CheckDI(isMDTuple(N.getRawExtraData()), "invalid template parameters", &N,
1359 N.getRawExtraData());
1360 } else if (N.getTag() == dwarf::DW_TAG_inheritance ||
1361 N.getTag() == dwarf::DW_TAG_member ||
1362 N.getTag() == dwarf::DW_TAG_variable) {
1363 auto *ExtraData = N.getRawExtraData();
1364 auto IsValidExtraData = [&]() {
1365 if (ExtraData == nullptr)
1366 return true;
1367 if (isa<ConstantAsMetadata>(ExtraData) || isa<MDString>(ExtraData) ||
1368 isa<DIObjCProperty>(ExtraData))
1369 return true;
1370 if (auto *Tuple = dyn_cast<MDTuple>(ExtraData)) {
1371 if (Tuple->getNumOperands() != 1)
1372 return false;
1373 return isa_and_nonnull<ConstantAsMetadata>(Tuple->getOperand(0).get());
1374 }
1375 return false;
1376 };
1377 CheckDI(IsValidExtraData(),
1378 "extraData must be ConstantAsMetadata, MDString, DIObjCProperty, "
1379 "or MDTuple with single ConstantAsMetadata operand",
1380 &N, ExtraData);
1381 }
1382
1383 if (N.getTag() == dwarf::DW_TAG_set_type) {
1384 if (auto *T = N.getRawBaseType()) {
1388 CheckDI(
1389 (Enum && Enum->getTag() == dwarf::DW_TAG_enumeration_type) ||
1390 (Subrange && Subrange->getTag() == dwarf::DW_TAG_subrange_type) ||
1391 (Basic && (Basic->getEncoding() == dwarf::DW_ATE_unsigned ||
1392 Basic->getEncoding() == dwarf::DW_ATE_signed ||
1393 Basic->getEncoding() == dwarf::DW_ATE_unsigned_char ||
1394 Basic->getEncoding() == dwarf::DW_ATE_signed_char ||
1395 Basic->getEncoding() == dwarf::DW_ATE_boolean)),
1396 "invalid set base type", &N, T);
1397 }
1398 }
1399
1400 CheckDI(isScope(N.getRawScope()), "invalid scope", &N, N.getRawScope());
1401 CheckDI(isType(N.getRawBaseType()), "invalid base type", &N,
1402 N.getRawBaseType());
1403
1404 if (N.getDWARFAddressSpace()) {
1405 CheckDI(N.getTag() == dwarf::DW_TAG_pointer_type ||
1406 N.getTag() == dwarf::DW_TAG_reference_type ||
1407 N.getTag() == dwarf::DW_TAG_rvalue_reference_type,
1408 "DWARF address space only applies to pointer or reference types",
1409 &N);
1410 }
1411
1412 auto *Size = N.getRawSizeInBits();
1415 "SizeInBits must be a constant or DIVariable or DIExpression");
1416}
1417
1418/// Detect mutually exclusive flags.
1419static bool hasConflictingReferenceFlags(unsigned Flags) {
1420 return ((Flags & DINode::FlagLValueReference) &&
1421 (Flags & DINode::FlagRValueReference)) ||
1422 ((Flags & DINode::FlagTypePassByValue) &&
1423 (Flags & DINode::FlagTypePassByReference));
1424}
1425
1426void Verifier::visitTemplateParams(const MDNode &N, const Metadata &RawParams) {
1427 auto *Params = dyn_cast<MDTuple>(&RawParams);
1428 CheckDI(Params, "invalid template params", &N, &RawParams);
1429 for (Metadata *Op : Params->operands()) {
1430 CheckDI(Op && isa<DITemplateParameter>(Op), "invalid template parameter",
1431 &N, Params, Op);
1432 }
1433}
1434
1435void Verifier::visitDICompositeType(const DICompositeType &N) {
1436 // Common scope checks.
1437 visitDIScope(N);
1438
1439 CheckDI(N.getTag() == dwarf::DW_TAG_array_type ||
1440 N.getTag() == dwarf::DW_TAG_structure_type ||
1441 N.getTag() == dwarf::DW_TAG_union_type ||
1442 N.getTag() == dwarf::DW_TAG_enumeration_type ||
1443 N.getTag() == dwarf::DW_TAG_class_type ||
1444 N.getTag() == dwarf::DW_TAG_variant_part ||
1445 N.getTag() == dwarf::DW_TAG_variant ||
1446 N.getTag() == dwarf::DW_TAG_namelist,
1447 "invalid tag", &N);
1448
1449 CheckDI(isScope(N.getRawScope()), "invalid scope", &N, N.getRawScope());
1450 CheckDI(isType(N.getRawBaseType()), "invalid base type", &N,
1451 N.getRawBaseType());
1452
1453 CheckDI(!N.getRawElements() || isa<MDTuple>(N.getRawElements()),
1454 "invalid composite elements", &N, N.getRawElements());
1455 CheckDI(isType(N.getRawVTableHolder()), "invalid vtable holder", &N,
1456 N.getRawVTableHolder());
1458 "invalid reference flags", &N);
1459 unsigned DIBlockByRefStruct = 1 << 4;
1460 CheckDI((N.getFlags() & DIBlockByRefStruct) == 0,
1461 "DIBlockByRefStruct on DICompositeType is no longer supported", &N);
1462 CheckDI(llvm::all_of(N.getElements(), [](const DINode *N) { return N; }),
1463 "DISubprogram contains null entry in `elements` field", &N);
1464
1465 if (N.isVector()) {
1466 const DINodeArray Elements = N.getElements();
1467 CheckDI(Elements.size() == 1 &&
1468 Elements[0]->getTag() == dwarf::DW_TAG_subrange_type,
1469 "invalid vector, expected one element of type subrange", &N);
1470 }
1471
1472 if (auto *Params = N.getRawTemplateParams())
1473 visitTemplateParams(N, *Params);
1474
1475 if (auto *D = N.getRawDiscriminator()) {
1476 CheckDI(isa<DIDerivedType>(D) && N.getTag() == dwarf::DW_TAG_variant_part,
1477 "discriminator can only appear on variant part");
1478 }
1479
1480 if (N.getRawDataLocation()) {
1481 CheckDI(N.getTag() == dwarf::DW_TAG_array_type,
1482 "dataLocation can only appear in array type");
1483 }
1484
1485 if (N.getRawAssociated()) {
1486 CheckDI(N.getTag() == dwarf::DW_TAG_array_type,
1487 "associated can only appear in array type");
1488 }
1489
1490 if (N.getRawAllocated()) {
1491 CheckDI(N.getTag() == dwarf::DW_TAG_array_type,
1492 "allocated can only appear in array type");
1493 }
1494
1495 if (N.getRawRank()) {
1496 CheckDI(N.getTag() == dwarf::DW_TAG_array_type,
1497 "rank can only appear in array type");
1498 }
1499
1500 if (N.getTag() == dwarf::DW_TAG_array_type) {
1501 CheckDI(N.getRawBaseType(), "array types must have a base type", &N);
1502 }
1503
1504 auto *Size = N.getRawSizeInBits();
1507 "SizeInBits must be a constant or DIVariable or DIExpression");
1508}
1509
1510void Verifier::visitDISubroutineType(const DISubroutineType &N) {
1511 CheckDI(N.getTag() == dwarf::DW_TAG_subroutine_type, "invalid tag", &N);
1512 if (auto *Types = N.getRawTypeArray()) {
1513 CheckDI(isa<MDTuple>(Types), "invalid composite elements", &N, Types);
1514 for (Metadata *Ty : N.getTypeArray()->operands()) {
1515 CheckDI(isType(Ty), "invalid subroutine type ref", &N, Types, Ty);
1516 }
1517 }
1519 "invalid reference flags", &N);
1520}
1521
1522void Verifier::visitDIFile(const DIFile &N) {
1523 CheckDI(N.getTag() == dwarf::DW_TAG_file_type, "invalid tag", &N);
1524 std::optional<DIFile::ChecksumInfo<StringRef>> Checksum = N.getChecksum();
1525 if (Checksum) {
1526 CheckDI(Checksum->Kind <= DIFile::ChecksumKind::CSK_Last,
1527 "invalid checksum kind", &N);
1528 size_t Size;
1529 switch (Checksum->Kind) {
1530 case DIFile::CSK_MD5:
1531 Size = 32;
1532 break;
1533 case DIFile::CSK_SHA1:
1534 Size = 40;
1535 break;
1536 case DIFile::CSK_SHA256:
1537 Size = 64;
1538 break;
1539 }
1540 CheckDI(Checksum->Value.size() == Size, "invalid checksum length", &N);
1541 CheckDI(Checksum->Value.find_if_not(llvm::isHexDigit) == StringRef::npos,
1542 "invalid checksum", &N);
1543 }
1544}
1545
1546void Verifier::visitDICompileUnit(const DICompileUnit &N) {
1547 CheckDI(N.isDistinct(), "compile units must be distinct", &N);
1548 CheckDI(N.getTag() == dwarf::DW_TAG_compile_unit, "invalid tag", &N);
1549
1550 // Don't bother verifying the compilation directory or producer string
1551 // as those could be empty.
1552 CheckDI(N.getRawFile() && isa<DIFile>(N.getRawFile()), "invalid file", &N,
1553 N.getRawFile());
1554 CheckDI(!N.getFile()->getFilename().empty(), "invalid filename", &N,
1555 N.getFile());
1556
1557 CheckDI((N.getEmissionKind() <= DICompileUnit::LastEmissionKind),
1558 "invalid emission kind", &N);
1559
1560 if (auto *Array = N.getRawEnumTypes()) {
1561 CheckDI(isa<MDTuple>(Array), "invalid enum list", &N, Array);
1562 for (Metadata *Op : N.getEnumTypes()->operands()) {
1564 CheckDI(Enum && Enum->getTag() == dwarf::DW_TAG_enumeration_type,
1565 "invalid enum type", &N, N.getEnumTypes(), Op);
1566 }
1567 }
1568 if (auto *Array = N.getRawRetainedTypes()) {
1569 CheckDI(isa<MDTuple>(Array), "invalid retained type list", &N, Array);
1570 for (Metadata *Op : N.getRetainedTypes()->operands()) {
1571 CheckDI(
1572 Op && (isa<DIType>(Op) || (isa<DISubprogram>(Op) &&
1573 !cast<DISubprogram>(Op)->isDefinition())),
1574 "invalid retained type", &N, Op);
1575 }
1576 }
1577 if (auto *Array = N.getRawGlobalVariables()) {
1578 CheckDI(isa<MDTuple>(Array), "invalid global variable list", &N, Array);
1579 for (Metadata *Op : N.getGlobalVariables()->operands()) {
1581 "invalid global variable ref", &N, Op);
1582 }
1583 }
1584 if (auto *Array = N.getRawImportedEntities()) {
1585 CheckDI(isa<MDTuple>(Array), "invalid imported entity list", &N, Array);
1586 for (Metadata *Op : N.getImportedEntities()->operands()) {
1587 CheckDI(Op && isa<DIImportedEntity>(Op), "invalid imported entity ref",
1588 &N, Op);
1589 }
1590 }
1591 if (auto *Array = N.getRawMacros()) {
1592 CheckDI(isa<MDTuple>(Array), "invalid macro list", &N, Array);
1593 for (Metadata *Op : N.getMacros()->operands()) {
1594 CheckDI(Op && isa<DIMacroNode>(Op), "invalid macro ref", &N, Op);
1595 }
1596 }
1597 CUVisited.insert(&N);
1598}
1599
1600void Verifier::visitDISubprogram(const DISubprogram &N) {
1601 CheckDI(N.getTag() == dwarf::DW_TAG_subprogram, "invalid tag", &N);
1602 CheckDI(isScope(N.getRawScope()), "invalid scope", &N, N.getRawScope());
1603 if (auto *F = N.getRawFile())
1604 CheckDI(isa<DIFile>(F), "invalid file", &N, F);
1605 else
1606 CheckDI(N.getLine() == 0, "line specified with no file", &N, N.getLine());
1607 if (auto *T = N.getRawType())
1608 CheckDI(isa<DISubroutineType>(T), "invalid subroutine type", &N, T);
1609 CheckDI(isType(N.getRawContainingType()), "invalid containing type", &N,
1610 N.getRawContainingType());
1611 if (auto *Params = N.getRawTemplateParams())
1612 visitTemplateParams(N, *Params);
1613 if (auto *S = N.getRawDeclaration())
1614 CheckDI(isa<DISubprogram>(S) && !cast<DISubprogram>(S)->isDefinition(),
1615 "invalid subprogram declaration", &N, S);
1616 if (auto *RawNode = N.getRawRetainedNodes()) {
1617 auto *Node = dyn_cast<MDTuple>(RawNode);
1618 CheckDI(Node, "invalid retained nodes list", &N, RawNode);
1619 for (Metadata *Op : Node->operands()) {
1620 CheckDI(Op, "nullptr in retained nodes", &N, Node);
1621
1622 auto True = [](const Metadata *) { return true; };
1623 auto False = [](const Metadata *) { return false; };
1624 bool IsTypeCorrect =
1625 DISubprogram::visitRetainedNode<bool>(Op, True, True, True, False);
1626 CheckDI(IsTypeCorrect,
1627 "invalid retained nodes, expected DILocalVariable, DILabel or "
1628 "DIImportedEntity",
1629 &N, Node, Op);
1630
1631 auto *RetainedNode = cast<DINode>(Op);
1632 auto *RetainedNodeScope = dyn_cast_or_null<DILocalScope>(
1634 CheckDI(RetainedNodeScope,
1635 "invalid retained nodes, retained node is not local", &N, Node,
1636 RetainedNode);
1637 CheckDI(
1638 RetainedNodeScope->getSubprogram() == &N,
1639 "invalid retained nodes, retained node does not belong to subprogram",
1640 &N, Node, RetainedNode, RetainedNodeScope);
1641 }
1642 }
1644 "invalid reference flags", &N);
1645
1646 auto *Unit = N.getRawUnit();
1647 if (N.isDefinition()) {
1648 // Subprogram definitions (not part of the type hierarchy).
1649 CheckDI(N.isDistinct(), "subprogram definitions must be distinct", &N);
1650 CheckDI(Unit, "subprogram definitions must have a compile unit", &N);
1651 CheckDI(isa<DICompileUnit>(Unit), "invalid unit type", &N, Unit);
1652 // There's no good way to cross the CU boundary to insert a nested
1653 // DISubprogram definition in one CU into a type defined in another CU.
1654 auto *CT = dyn_cast_or_null<DICompositeType>(N.getRawScope());
1655 if (CT && CT->getRawIdentifier() &&
1656 M.getContext().isODRUniquingDebugTypes())
1657 CheckDI(N.getDeclaration(),
1658 "definition subprograms cannot be nested within DICompositeType "
1659 "when enabling ODR",
1660 &N);
1661 } else {
1662 // Subprogram declarations (part of the type hierarchy).
1663 CheckDI(!Unit, "subprogram declarations must not have a compile unit", &N);
1664 CheckDI(!N.getRawDeclaration(),
1665 "subprogram declaration must not have a declaration field");
1666 }
1667
1668 if (auto *RawThrownTypes = N.getRawThrownTypes()) {
1669 auto *ThrownTypes = dyn_cast<MDTuple>(RawThrownTypes);
1670 CheckDI(ThrownTypes, "invalid thrown types list", &N, RawThrownTypes);
1671 for (Metadata *Op : ThrownTypes->operands())
1672 CheckDI(Op && isa<DIType>(Op), "invalid thrown type", &N, ThrownTypes,
1673 Op);
1674 }
1675
1676 if (N.areAllCallsDescribed())
1677 CheckDI(N.isDefinition(),
1678 "DIFlagAllCallsDescribed must be attached to a definition");
1679}
1680
1681void Verifier::visitDILexicalBlockBase(const DILexicalBlockBase &N) {
1682 CheckDI(N.getTag() == dwarf::DW_TAG_lexical_block, "invalid tag", &N);
1683 CheckDI(N.getRawScope() && isa<DILocalScope>(N.getRawScope()),
1684 "invalid local scope", &N, N.getRawScope());
1685 if (auto *SP = dyn_cast<DISubprogram>(N.getRawScope()))
1686 CheckDI(SP->isDefinition(), "scope points into the type hierarchy", &N);
1687}
1688
1689void Verifier::visitDILexicalBlock(const DILexicalBlock &N) {
1690 visitDILexicalBlockBase(N);
1691
1692 CheckDI(N.getLine() || !N.getColumn(),
1693 "cannot have column info without line info", &N);
1694}
1695
1696void Verifier::visitDILexicalBlockFile(const DILexicalBlockFile &N) {
1697 visitDILexicalBlockBase(N);
1698}
1699
1700void Verifier::visitDICommonBlock(const DICommonBlock &N) {
1701 CheckDI(N.getTag() == dwarf::DW_TAG_common_block, "invalid tag", &N);
1702 if (auto *S = N.getRawScope())
1703 CheckDI(isa<DIScope>(S), "invalid scope ref", &N, S);
1704 if (auto *S = N.getRawDecl())
1705 CheckDI(isa<DIGlobalVariable>(S), "invalid declaration", &N, S);
1706}
1707
1708void Verifier::visitDINamespace(const DINamespace &N) {
1709 CheckDI(N.getTag() == dwarf::DW_TAG_namespace, "invalid tag", &N);
1710 if (auto *S = N.getRawScope())
1711 CheckDI(isa<DIScope>(S), "invalid scope ref", &N, S);
1712}
1713
1714void Verifier::visitDIMacro(const DIMacro &N) {
1715 CheckDI(N.getMacinfoType() == dwarf::DW_MACINFO_define ||
1716 N.getMacinfoType() == dwarf::DW_MACINFO_undef,
1717 "invalid macinfo type", &N);
1718 CheckDI(!N.getName().empty(), "anonymous macro", &N);
1719 if (!N.getValue().empty()) {
1720 assert(N.getValue().data()[0] != ' ' && "Macro value has a space prefix");
1721 }
1722}
1723
1724void Verifier::visitDIMacroFile(const DIMacroFile &N) {
1725 CheckDI(N.getMacinfoType() == dwarf::DW_MACINFO_start_file,
1726 "invalid macinfo type", &N);
1727 if (auto *F = N.getRawFile())
1728 CheckDI(isa<DIFile>(F), "invalid file", &N, F);
1729
1730 if (auto *Array = N.getRawElements()) {
1731 CheckDI(isa<MDTuple>(Array), "invalid macro list", &N, Array);
1732 for (Metadata *Op : N.getElements()->operands()) {
1733 CheckDI(Op && isa<DIMacroNode>(Op), "invalid macro ref", &N, Op);
1734 }
1735 }
1736}
1737
1738void Verifier::visitDIModule(const DIModule &N) {
1739 CheckDI(N.getTag() == dwarf::DW_TAG_module, "invalid tag", &N);
1740 CheckDI(!N.getName().empty(), "anonymous module", &N);
1741}
1742
1743void Verifier::visitDITemplateParameter(const DITemplateParameter &N) {
1744 CheckDI(isType(N.getRawType()), "invalid type ref", &N, N.getRawType());
1745}
1746
1747void Verifier::visitDITemplateTypeParameter(const DITemplateTypeParameter &N) {
1748 visitDITemplateParameter(N);
1749
1750 CheckDI(N.getTag() == dwarf::DW_TAG_template_type_parameter, "invalid tag",
1751 &N);
1752}
1753
1754void Verifier::visitDITemplateValueParameter(
1755 const DITemplateValueParameter &N) {
1756 visitDITemplateParameter(N);
1757
1758 CheckDI(N.getTag() == dwarf::DW_TAG_template_value_parameter ||
1759 N.getTag() == dwarf::DW_TAG_GNU_template_template_param ||
1760 N.getTag() == dwarf::DW_TAG_GNU_template_parameter_pack,
1761 "invalid tag", &N);
1762}
1763
1764void Verifier::visitDIVariable(const DIVariable &N) {
1765 if (auto *S = N.getRawScope())
1766 CheckDI(isa<DIScope>(S), "invalid scope", &N, S);
1767 if (auto *F = N.getRawFile())
1768 CheckDI(isa<DIFile>(F), "invalid file", &N, F);
1769}
1770
1771void Verifier::visitDIGlobalVariable(const DIGlobalVariable &N) {
1772 // Checks common to all variables.
1773 visitDIVariable(N);
1774
1775 CheckDI(N.getTag() == dwarf::DW_TAG_variable, "invalid tag", &N);
1776 CheckDI(isType(N.getRawType()), "invalid type ref", &N, N.getRawType());
1777 // Check only if the global variable is not an extern
1778 if (N.isDefinition())
1779 CheckDI(N.getType(), "missing global variable type", &N);
1780 if (auto *Member = N.getRawStaticDataMemberDeclaration()) {
1782 "invalid static data member declaration", &N, Member);
1783 }
1784}
1785
1786void Verifier::visitDILocalVariable(const DILocalVariable &N) {
1787 // Checks common to all variables.
1788 visitDIVariable(N);
1789
1790 CheckDI(isType(N.getRawType()), "invalid type ref", &N, N.getRawType());
1791 CheckDI(N.getTag() == dwarf::DW_TAG_variable, "invalid tag", &N);
1792 CheckDI(N.getRawScope() && isa<DILocalScope>(N.getRawScope()),
1793 "local variable requires a valid scope", &N, N.getRawScope());
1794 if (auto Ty = N.getType())
1795 CheckDI(!isa<DISubroutineType>(Ty), "invalid type", &N, N.getType());
1796}
1797
1798void Verifier::visitDIAssignID(const DIAssignID &N) {
1799 CheckDI(!N.getNumOperands(), "DIAssignID has no arguments", &N);
1800 CheckDI(N.isDistinct(), "DIAssignID must be distinct", &N);
1801}
1802
1803void Verifier::visitDILabel(const DILabel &N) {
1804 if (auto *S = N.getRawScope())
1805 CheckDI(isa<DIScope>(S), "invalid scope", &N, S);
1806 if (auto *F = N.getRawFile())
1807 CheckDI(isa<DIFile>(F), "invalid file", &N, F);
1808
1809 CheckDI(N.getTag() == dwarf::DW_TAG_label, "invalid tag", &N);
1810 CheckDI(N.getRawScope() && isa<DILocalScope>(N.getRawScope()),
1811 "label requires a valid scope", &N, N.getRawScope());
1812}
1813
1814void Verifier::visitDIExpression(const DIExpression &N) {
1815 CheckDI(N.isValid(), "invalid expression", &N);
1816}
1817
1818void Verifier::visitDIGlobalVariableExpression(
1819 const DIGlobalVariableExpression &GVE) {
1820 CheckDI(GVE.getVariable(), "missing variable");
1821 if (auto *Var = GVE.getVariable())
1822 visitDIGlobalVariable(*Var);
1823 if (auto *Expr = GVE.getExpression()) {
1824 visitDIExpression(*Expr);
1825 if (auto Fragment = Expr->getFragmentInfo())
1826 verifyFragmentExpression(*GVE.getVariable(), *Fragment, &GVE);
1827 }
1828}
1829
1830void Verifier::visitDIObjCProperty(const DIObjCProperty &N) {
1831 CheckDI(N.getTag() == dwarf::DW_TAG_APPLE_property, "invalid tag", &N);
1832 if (auto *T = N.getRawType())
1833 CheckDI(isType(T), "invalid type ref", &N, T);
1834 if (auto *F = N.getRawFile())
1835 CheckDI(isa<DIFile>(F), "invalid file", &N, F);
1836}
1837
1838void Verifier::visitDIImportedEntity(const DIImportedEntity &N) {
1839 CheckDI(N.getTag() == dwarf::DW_TAG_imported_module ||
1840 N.getTag() == dwarf::DW_TAG_imported_declaration,
1841 "invalid tag", &N);
1842 if (auto *S = N.getRawScope())
1843 CheckDI(isa<DIScope>(S), "invalid scope for imported entity", &N, S);
1844 CheckDI(isDINode(N.getRawEntity()), "invalid imported entity", &N,
1845 N.getRawEntity());
1846}
1847
1848void Verifier::visitComdat(const Comdat &C) {
1849 // In COFF the Module is invalid if the GlobalValue has private linkage.
1850 // Entities with private linkage don't have entries in the symbol table.
1851 if (TT.isOSBinFormatCOFF())
1852 if (const GlobalValue *GV = M.getNamedValue(C.getName()))
1853 Check(!GV->hasPrivateLinkage(), "comdat global value has private linkage",
1854 GV);
1855}
1856
1857void Verifier::visitModuleIdents() {
1858 const NamedMDNode *Idents = M.getNamedMetadata("llvm.ident");
1859 if (!Idents)
1860 return;
1861
1862 // llvm.ident takes a list of metadata entry. Each entry has only one string.
1863 // Scan each llvm.ident entry and make sure that this requirement is met.
1864 for (const MDNode *N : Idents->operands()) {
1865 Check(N->getNumOperands() == 1,
1866 "incorrect number of operands in llvm.ident metadata", N);
1867 Check(dyn_cast_or_null<MDString>(N->getOperand(0)),
1868 ("invalid value for llvm.ident metadata entry operand"
1869 "(the operand should be a string)"),
1870 N->getOperand(0));
1871 }
1872}
1873
1874void Verifier::visitModuleCommandLines() {
1875 const NamedMDNode *CommandLines = M.getNamedMetadata("llvm.commandline");
1876 if (!CommandLines)
1877 return;
1878
1879 // llvm.commandline takes a list of metadata entry. Each entry has only one
1880 // string. Scan each llvm.commandline entry and make sure that this
1881 // requirement is met.
1882 for (const MDNode *N : CommandLines->operands()) {
1883 Check(N->getNumOperands() == 1,
1884 "incorrect number of operands in llvm.commandline metadata", N);
1885 Check(dyn_cast_or_null<MDString>(N->getOperand(0)),
1886 ("invalid value for llvm.commandline metadata entry operand"
1887 "(the operand should be a string)"),
1888 N->getOperand(0));
1889 }
1890}
1891
1892void Verifier::visitModuleErrnoTBAA() {
1893 const NamedMDNode *ErrnoTBAA = M.getNamedMetadata("llvm.errno.tbaa");
1894 if (!ErrnoTBAA)
1895 return;
1896
1897 Check(ErrnoTBAA->getNumOperands() >= 1,
1898 "llvm.errno.tbaa must have at least one operand", ErrnoTBAA);
1899
1900 for (const MDNode *N : ErrnoTBAA->operands())
1901 TBAAVerifyHelper.visitTBAAMetadata(nullptr, N);
1902}
1903
1904void Verifier::visitModuleFlags() {
1905 const NamedMDNode *Flags = M.getModuleFlagsMetadata();
1906 if (!Flags) return;
1907
1908 // Scan each flag, and track the flags and requirements.
1909 DenseMap<const MDString*, const MDNode*> SeenIDs;
1910 SmallVector<const MDNode*, 16> Requirements;
1911 uint64_t PAuthABIPlatform = -1;
1912 uint64_t PAuthABIVersion = -1;
1913 for (const MDNode *MDN : Flags->operands()) {
1914 visitModuleFlag(MDN, SeenIDs, Requirements);
1915 if (MDN->getNumOperands() != 3)
1916 continue;
1917 if (const auto *FlagName = dyn_cast_or_null<MDString>(MDN->getOperand(1))) {
1918 if (FlagName->getString() == "aarch64-elf-pauthabi-platform") {
1919 if (const auto *PAP =
1921 PAuthABIPlatform = PAP->getZExtValue();
1922 } else if (FlagName->getString() == "aarch64-elf-pauthabi-version") {
1923 if (const auto *PAV =
1925 PAuthABIVersion = PAV->getZExtValue();
1926 }
1927 }
1928 }
1929
1930 if ((PAuthABIPlatform == uint64_t(-1)) != (PAuthABIVersion == uint64_t(-1)))
1931 CheckFailed("either both or no 'aarch64-elf-pauthabi-platform' and "
1932 "'aarch64-elf-pauthabi-version' module flags must be present");
1933
1934 // Validate that the requirements in the module are valid.
1935 for (const MDNode *Requirement : Requirements) {
1936 const MDString *Flag = cast<MDString>(Requirement->getOperand(0));
1937 const Metadata *ReqValue = Requirement->getOperand(1);
1938
1939 const MDNode *Op = SeenIDs.lookup(Flag);
1940 if (!Op) {
1941 CheckFailed("invalid requirement on flag, flag is not present in module",
1942 Flag);
1943 continue;
1944 }
1945
1946 if (Op->getOperand(2) != ReqValue) {
1947 CheckFailed(("invalid requirement on flag, "
1948 "flag does not have the required value"),
1949 Flag);
1950 continue;
1951 }
1952 }
1953}
1954
1955void
1956Verifier::visitModuleFlag(const MDNode *Op,
1957 DenseMap<const MDString *, const MDNode *> &SeenIDs,
1958 SmallVectorImpl<const MDNode *> &Requirements) {
1959 // Each module flag should have three arguments, the merge behavior (a
1960 // constant int), the flag ID (an MDString), and the value.
1961 Check(Op->getNumOperands() == 3,
1962 "incorrect number of operands in module flag", Op);
1963 Module::ModFlagBehavior MFB;
1964 if (!Module::isValidModFlagBehavior(Op->getOperand(0), MFB)) {
1966 "invalid behavior operand in module flag (expected constant integer)",
1967 Op->getOperand(0));
1968 Check(false,
1969 "invalid behavior operand in module flag (unexpected constant)",
1970 Op->getOperand(0));
1971 }
1972 MDString *ID = dyn_cast_or_null<MDString>(Op->getOperand(1));
1973 Check(ID, "invalid ID operand in module flag (expected metadata string)",
1974 Op->getOperand(1));
1975
1976 // Check the values for behaviors with additional requirements.
1977 switch (MFB) {
1978 case Module::Error:
1979 case Module::Warning:
1980 case Module::Override:
1981 // These behavior types accept any value.
1982 break;
1983
1984 case Module::Min: {
1985 auto *V = mdconst::dyn_extract_or_null<ConstantInt>(Op->getOperand(2));
1986 Check(V && V->getValue().isNonNegative(),
1987 "invalid value for 'min' module flag (expected constant non-negative "
1988 "integer)",
1989 Op->getOperand(2));
1990 break;
1991 }
1992
1993 case Module::Max: {
1995 "invalid value for 'max' module flag (expected constant integer)",
1996 Op->getOperand(2));
1997 break;
1998 }
1999
2000 case Module::Require: {
2001 // The value should itself be an MDNode with two operands, a flag ID (an
2002 // MDString), and a value.
2003 MDNode *Value = dyn_cast<MDNode>(Op->getOperand(2));
2004 Check(Value && Value->getNumOperands() == 2,
2005 "invalid value for 'require' module flag (expected metadata pair)",
2006 Op->getOperand(2));
2007 Check(isa<MDString>(Value->getOperand(0)),
2008 ("invalid value for 'require' module flag "
2009 "(first value operand should be a string)"),
2010 Value->getOperand(0));
2011
2012 // Append it to the list of requirements, to check once all module flags are
2013 // scanned.
2014 Requirements.push_back(Value);
2015 break;
2016 }
2017
2018 case Module::Append:
2019 case Module::AppendUnique: {
2020 // These behavior types require the operand be an MDNode.
2021 Check(isa<MDNode>(Op->getOperand(2)),
2022 "invalid value for 'append'-type module flag "
2023 "(expected a metadata node)",
2024 Op->getOperand(2));
2025 break;
2026 }
2027 }
2028
2029 // Unless this is a "requires" flag, check the ID is unique.
2030 if (MFB != Module::Require) {
2031 bool Inserted = SeenIDs.insert(std::make_pair(ID, Op)).second;
2032 Check(Inserted,
2033 "module flag identifiers must be unique (or of 'require' type)", ID);
2034 }
2035
2036 if (ID->getString() == "wchar_size") {
2037 ConstantInt *Value
2039 Check(Value, "wchar_size metadata requires constant integer argument");
2040 }
2041
2042 if (ID->getString() == "Linker Options") {
2043 // If the llvm.linker.options named metadata exists, we assume that the
2044 // bitcode reader has upgraded the module flag. Otherwise the flag might
2045 // have been created by a client directly.
2046 Check(M.getNamedMetadata("llvm.linker.options"),
2047 "'Linker Options' named metadata no longer supported");
2048 }
2049
2050 if (ID->getString() == "SemanticInterposition") {
2051 ConstantInt *Value =
2053 Check(Value,
2054 "SemanticInterposition metadata requires constant integer argument");
2055 }
2056
2057 if (ID->getString() == "CG Profile") {
2058 for (const MDOperand &MDO : cast<MDNode>(Op->getOperand(2))->operands())
2059 visitModuleFlagCGProfileEntry(MDO);
2060 }
2061}
2062
2063void Verifier::visitModuleFlagCGProfileEntry(const MDOperand &MDO) {
2064 auto CheckFunction = [&](const MDOperand &FuncMDO) {
2065 if (!FuncMDO)
2066 return;
2067 auto F = dyn_cast<ValueAsMetadata>(FuncMDO);
2068 Check(F && isa<Function>(F->getValue()->stripPointerCasts()),
2069 "expected a Function or null", FuncMDO);
2070 };
2071 auto Node = dyn_cast_or_null<MDNode>(MDO);
2072 Check(Node && Node->getNumOperands() == 3, "expected a MDNode triple", MDO);
2073 CheckFunction(Node->getOperand(0));
2074 CheckFunction(Node->getOperand(1));
2075 auto Count = dyn_cast_or_null<ConstantAsMetadata>(Node->getOperand(2));
2076 Check(Count && Count->getType()->isIntegerTy(),
2077 "expected an integer constant", Node->getOperand(2));
2078}
2079
2080void Verifier::verifyAttributeTypes(AttributeSet Attrs, const Value *V) {
2081 for (Attribute A : Attrs) {
2082
2083 if (A.isStringAttribute()) {
2084#define GET_ATTR_NAMES
2085#define ATTRIBUTE_ENUM(ENUM_NAME, DISPLAY_NAME)
2086#define ATTRIBUTE_STRBOOL(ENUM_NAME, DISPLAY_NAME) \
2087 if (A.getKindAsString() == #DISPLAY_NAME) { \
2088 auto V = A.getValueAsString(); \
2089 if (!(V.empty() || V == "true" || V == "false")) \
2090 CheckFailed("invalid value for '" #DISPLAY_NAME "' attribute: " + V + \
2091 ""); \
2092 }
2093
2094#include "llvm/IR/Attributes.inc"
2095 continue;
2096 }
2097
2098 if (A.isIntAttribute() != Attribute::isIntAttrKind(A.getKindAsEnum())) {
2099 CheckFailed("Attribute '" + A.getAsString() + "' should have an Argument",
2100 V);
2101 return;
2102 }
2103 }
2104}
2105
2106// VerifyParameterAttrs - Check the given attributes for an argument or return
2107// value of the specified type. The value V is printed in error messages.
2108void Verifier::verifyParameterAttrs(AttributeSet Attrs, Type *Ty,
2109 const Value *V) {
2110 if (!Attrs.hasAttributes())
2111 return;
2112
2113 verifyAttributeTypes(Attrs, V);
2114
2115 for (Attribute Attr : Attrs)
2116 Check(Attr.isStringAttribute() ||
2117 Attribute::canUseAsParamAttr(Attr.getKindAsEnum()),
2118 "Attribute '" + Attr.getAsString() + "' does not apply to parameters",
2119 V);
2120
2121 if (Attrs.hasAttribute(Attribute::ImmArg)) {
2122 unsigned AttrCount =
2123 Attrs.getNumAttributes() - Attrs.hasAttribute(Attribute::Range);
2124 Check(AttrCount == 1,
2125 "Attribute 'immarg' is incompatible with other attributes except the "
2126 "'range' attribute",
2127 V);
2128 }
2129
2130 // Check for mutually incompatible attributes. Only inreg is compatible with
2131 // sret.
2132 unsigned AttrCount = 0;
2133 AttrCount += Attrs.hasAttribute(Attribute::ByVal);
2134 AttrCount += Attrs.hasAttribute(Attribute::InAlloca);
2135 AttrCount += Attrs.hasAttribute(Attribute::Preallocated);
2136 AttrCount += Attrs.hasAttribute(Attribute::StructRet) ||
2137 Attrs.hasAttribute(Attribute::InReg);
2138 AttrCount += Attrs.hasAttribute(Attribute::Nest);
2139 AttrCount += Attrs.hasAttribute(Attribute::ByRef);
2140 Check(AttrCount <= 1,
2141 "Attributes 'byval', 'inalloca', 'preallocated', 'inreg', 'nest', "
2142 "'byref', and 'sret' are incompatible!",
2143 V);
2144
2145 Check(!(Attrs.hasAttribute(Attribute::InAlloca) &&
2146 Attrs.hasAttribute(Attribute::ReadOnly)),
2147 "Attributes "
2148 "'inalloca and readonly' are incompatible!",
2149 V);
2150
2151 Check(!(Attrs.hasAttribute(Attribute::StructRet) &&
2152 Attrs.hasAttribute(Attribute::Returned)),
2153 "Attributes "
2154 "'sret and returned' are incompatible!",
2155 V);
2156
2157 Check(!(Attrs.hasAttribute(Attribute::ZExt) &&
2158 Attrs.hasAttribute(Attribute::SExt)),
2159 "Attributes "
2160 "'zeroext and signext' are incompatible!",
2161 V);
2162
2163 Check(!(Attrs.hasAttribute(Attribute::ReadNone) &&
2164 Attrs.hasAttribute(Attribute::ReadOnly)),
2165 "Attributes "
2166 "'readnone and readonly' are incompatible!",
2167 V);
2168
2169 Check(!(Attrs.hasAttribute(Attribute::ReadNone) &&
2170 Attrs.hasAttribute(Attribute::WriteOnly)),
2171 "Attributes "
2172 "'readnone and writeonly' are incompatible!",
2173 V);
2174
2175 Check(!(Attrs.hasAttribute(Attribute::ReadOnly) &&
2176 Attrs.hasAttribute(Attribute::WriteOnly)),
2177 "Attributes "
2178 "'readonly and writeonly' are incompatible!",
2179 V);
2180
2181 Check(!(Attrs.hasAttribute(Attribute::NoInline) &&
2182 Attrs.hasAttribute(Attribute::AlwaysInline)),
2183 "Attributes "
2184 "'noinline and alwaysinline' are incompatible!",
2185 V);
2186
2187 Check(!(Attrs.hasAttribute(Attribute::Writable) &&
2188 Attrs.hasAttribute(Attribute::ReadNone)),
2189 "Attributes writable and readnone are incompatible!", V);
2190
2191 Check(!(Attrs.hasAttribute(Attribute::Writable) &&
2192 Attrs.hasAttribute(Attribute::ReadOnly)),
2193 "Attributes writable and readonly are incompatible!", V);
2194
2195 AttributeMask IncompatibleAttrs = AttributeFuncs::typeIncompatible(Ty, Attrs);
2196 for (Attribute Attr : Attrs) {
2197 if (!Attr.isStringAttribute() &&
2198 IncompatibleAttrs.contains(Attr.getKindAsEnum())) {
2199 CheckFailed("Attribute '" + Attr.getAsString() +
2200 "' applied to incompatible type!", V);
2201 return;
2202 }
2203 }
2204
2205 if (isa<PointerType>(Ty)) {
2206 if (Attrs.hasAttribute(Attribute::Alignment)) {
2207 Align AttrAlign = Attrs.getAlignment().valueOrOne();
2208 Check(AttrAlign.value() <= Value::MaximumAlignment,
2209 "huge alignment values are unsupported", V);
2210 }
2211 if (Attrs.hasAttribute(Attribute::ByVal)) {
2212 Type *ByValTy = Attrs.getByValType();
2213 SmallPtrSet<Type *, 4> Visited;
2214 Check(ByValTy->isSized(&Visited),
2215 "Attribute 'byval' does not support unsized types!", V);
2216 // Check if it is or contains a target extension type that disallows being
2217 // used on the stack.
2219 "'byval' argument has illegal target extension type", V);
2220 Check(DL.getTypeAllocSize(ByValTy).getKnownMinValue() < (1ULL << 32),
2221 "huge 'byval' arguments are unsupported", V);
2222 }
2223 if (Attrs.hasAttribute(Attribute::ByRef)) {
2224 SmallPtrSet<Type *, 4> Visited;
2225 Check(Attrs.getByRefType()->isSized(&Visited),
2226 "Attribute 'byref' does not support unsized types!", V);
2227 Check(DL.getTypeAllocSize(Attrs.getByRefType()).getKnownMinValue() <
2228 (1ULL << 32),
2229 "huge 'byref' arguments are unsupported", V);
2230 }
2231 if (Attrs.hasAttribute(Attribute::InAlloca)) {
2232 SmallPtrSet<Type *, 4> Visited;
2233 Check(Attrs.getInAllocaType()->isSized(&Visited),
2234 "Attribute 'inalloca' does not support unsized types!", V);
2235 Check(DL.getTypeAllocSize(Attrs.getInAllocaType()).getKnownMinValue() <
2236 (1ULL << 32),
2237 "huge 'inalloca' arguments are unsupported", V);
2238 }
2239 if (Attrs.hasAttribute(Attribute::Preallocated)) {
2240 SmallPtrSet<Type *, 4> Visited;
2241 Check(Attrs.getPreallocatedType()->isSized(&Visited),
2242 "Attribute 'preallocated' does not support unsized types!", V);
2243 Check(
2244 DL.getTypeAllocSize(Attrs.getPreallocatedType()).getKnownMinValue() <
2245 (1ULL << 32),
2246 "huge 'preallocated' arguments are unsupported", V);
2247 }
2248 }
2249
2250 if (Attrs.hasAttribute(Attribute::Initializes)) {
2251 auto Inits = Attrs.getAttribute(Attribute::Initializes).getInitializes();
2252 Check(!Inits.empty(), "Attribute 'initializes' does not support empty list",
2253 V);
2255 "Attribute 'initializes' does not support unordered ranges", V);
2256 }
2257
2258 if (Attrs.hasAttribute(Attribute::NoFPClass)) {
2259 uint64_t Val = Attrs.getAttribute(Attribute::NoFPClass).getValueAsInt();
2260 Check(Val != 0, "Attribute 'nofpclass' must have at least one test bit set",
2261 V);
2262 Check((Val & ~static_cast<unsigned>(fcAllFlags)) == 0,
2263 "Invalid value for 'nofpclass' test mask", V);
2264 }
2265 if (Attrs.hasAttribute(Attribute::Range)) {
2266 const ConstantRange &CR =
2267 Attrs.getAttribute(Attribute::Range).getValueAsConstantRange();
2269 "Range bit width must match type bit width!", V);
2270 }
2271}
2272
2273void Verifier::checkUnsignedBaseTenFuncAttr(AttributeList Attrs, StringRef Attr,
2274 const Value *V) {
2275 if (Attrs.hasFnAttr(Attr)) {
2276 StringRef S = Attrs.getFnAttr(Attr).getValueAsString();
2277 unsigned N;
2278 if (S.getAsInteger(10, N))
2279 CheckFailed("\"" + Attr + "\" takes an unsigned integer: " + S, V);
2280 }
2281}
2282
2283// Check parameter attributes against a function type.
2284// The value V is printed in error messages.
2285void Verifier::verifyFunctionAttrs(FunctionType *FT, AttributeList Attrs,
2286 const Value *V, bool IsIntrinsic,
2287 bool IsInlineAsm) {
2288 if (Attrs.isEmpty())
2289 return;
2290
2291 if (AttributeListsVisited.insert(Attrs.getRawPointer()).second) {
2292 Check(Attrs.hasParentContext(Context),
2293 "Attribute list does not match Module context!", &Attrs, V);
2294 for (const auto &AttrSet : Attrs) {
2295 Check(!AttrSet.hasAttributes() || AttrSet.hasParentContext(Context),
2296 "Attribute set does not match Module context!", &AttrSet, V);
2297 for (const auto &A : AttrSet) {
2298 Check(A.hasParentContext(Context),
2299 "Attribute does not match Module context!", &A, V);
2300 }
2301 }
2302 }
2303
2304 bool SawNest = false;
2305 bool SawReturned = false;
2306 bool SawSRet = false;
2307 bool SawSwiftSelf = false;
2308 bool SawSwiftAsync = false;
2309 bool SawSwiftError = false;
2310
2311 // Verify return value attributes.
2312 AttributeSet RetAttrs = Attrs.getRetAttrs();
2313 for (Attribute RetAttr : RetAttrs)
2314 Check(RetAttr.isStringAttribute() ||
2315 Attribute::canUseAsRetAttr(RetAttr.getKindAsEnum()),
2316 "Attribute '" + RetAttr.getAsString() +
2317 "' does not apply to function return values",
2318 V);
2319
2320 unsigned MaxParameterWidth = 0;
2321 auto GetMaxParameterWidth = [&MaxParameterWidth](Type *Ty) {
2322 if (Ty->isVectorTy()) {
2323 if (auto *VT = dyn_cast<FixedVectorType>(Ty)) {
2324 unsigned Size = VT->getPrimitiveSizeInBits().getFixedValue();
2325 if (Size > MaxParameterWidth)
2326 MaxParameterWidth = Size;
2327 }
2328 }
2329 };
2330 GetMaxParameterWidth(FT->getReturnType());
2331 verifyParameterAttrs(RetAttrs, FT->getReturnType(), V);
2332
2333 // Verify parameter attributes.
2334 for (unsigned i = 0, e = FT->getNumParams(); i != e; ++i) {
2335 Type *Ty = FT->getParamType(i);
2336 AttributeSet ArgAttrs = Attrs.getParamAttrs(i);
2337
2338 if (!IsIntrinsic) {
2339 Check(!ArgAttrs.hasAttribute(Attribute::ImmArg),
2340 "immarg attribute only applies to intrinsics", V);
2341 if (!IsInlineAsm)
2342 Check(!ArgAttrs.hasAttribute(Attribute::ElementType),
2343 "Attribute 'elementtype' can only be applied to intrinsics"
2344 " and inline asm.",
2345 V);
2346 }
2347
2348 verifyParameterAttrs(ArgAttrs, Ty, V);
2349 GetMaxParameterWidth(Ty);
2350
2351 if (ArgAttrs.hasAttribute(Attribute::Nest)) {
2352 Check(!SawNest, "More than one parameter has attribute nest!", V);
2353 SawNest = true;
2354 }
2355
2356 if (ArgAttrs.hasAttribute(Attribute::Returned)) {
2357 Check(!SawReturned, "More than one parameter has attribute returned!", V);
2358 Check(Ty->canLosslesslyBitCastTo(FT->getReturnType()),
2359 "Incompatible argument and return types for 'returned' attribute",
2360 V);
2361 SawReturned = true;
2362 }
2363
2364 if (ArgAttrs.hasAttribute(Attribute::StructRet)) {
2365 Check(!SawSRet, "Cannot have multiple 'sret' parameters!", V);
2366 Check(i == 0 || i == 1,
2367 "Attribute 'sret' is not on first or second parameter!", V);
2368 SawSRet = true;
2369 }
2370
2371 if (ArgAttrs.hasAttribute(Attribute::SwiftSelf)) {
2372 Check(!SawSwiftSelf, "Cannot have multiple 'swiftself' parameters!", V);
2373 SawSwiftSelf = true;
2374 }
2375
2376 if (ArgAttrs.hasAttribute(Attribute::SwiftAsync)) {
2377 Check(!SawSwiftAsync, "Cannot have multiple 'swiftasync' parameters!", V);
2378 SawSwiftAsync = true;
2379 }
2380
2381 if (ArgAttrs.hasAttribute(Attribute::SwiftError)) {
2382 Check(!SawSwiftError, "Cannot have multiple 'swifterror' parameters!", V);
2383 SawSwiftError = true;
2384 }
2385
2386 if (ArgAttrs.hasAttribute(Attribute::InAlloca)) {
2387 Check(i == FT->getNumParams() - 1,
2388 "inalloca isn't on the last parameter!", V);
2389 }
2390 }
2391
2392 if (!Attrs.hasFnAttrs())
2393 return;
2394
2395 verifyAttributeTypes(Attrs.getFnAttrs(), V);
2396 for (Attribute FnAttr : Attrs.getFnAttrs())
2397 Check(FnAttr.isStringAttribute() ||
2398 Attribute::canUseAsFnAttr(FnAttr.getKindAsEnum()),
2399 "Attribute '" + FnAttr.getAsString() +
2400 "' does not apply to functions!",
2401 V);
2402
2403 Check(!(Attrs.hasFnAttr(Attribute::NoInline) &&
2404 Attrs.hasFnAttr(Attribute::AlwaysInline)),
2405 "Attributes 'noinline and alwaysinline' are incompatible!", V);
2406
2407 if (Attrs.hasFnAttr(Attribute::OptimizeNone)) {
2408 Check(Attrs.hasFnAttr(Attribute::NoInline),
2409 "Attribute 'optnone' requires 'noinline'!", V);
2410
2411 Check(!Attrs.hasFnAttr(Attribute::OptimizeForSize),
2412 "Attributes 'optsize and optnone' are incompatible!", V);
2413
2414 Check(!Attrs.hasFnAttr(Attribute::MinSize),
2415 "Attributes 'minsize and optnone' are incompatible!", V);
2416
2417 Check(!Attrs.hasFnAttr(Attribute::OptimizeForDebugging),
2418 "Attributes 'optdebug and optnone' are incompatible!", V);
2419 }
2420
2421 Check(!(Attrs.hasFnAttr(Attribute::SanitizeRealtime) &&
2422 Attrs.hasFnAttr(Attribute::SanitizeRealtimeBlocking)),
2423 "Attributes "
2424 "'sanitize_realtime and sanitize_realtime_blocking' are incompatible!",
2425 V);
2426
2427 if (Attrs.hasFnAttr(Attribute::OptimizeForDebugging)) {
2428 Check(!Attrs.hasFnAttr(Attribute::OptimizeForSize),
2429 "Attributes 'optsize and optdebug' are incompatible!", V);
2430
2431 Check(!Attrs.hasFnAttr(Attribute::MinSize),
2432 "Attributes 'minsize and optdebug' are incompatible!", V);
2433 }
2434
2435 Check(!Attrs.hasAttrSomewhere(Attribute::Writable) ||
2436 isModSet(Attrs.getMemoryEffects().getModRef(IRMemLocation::ArgMem)),
2437 "Attribute writable and memory without argmem: write are incompatible!",
2438 V);
2439
2440 if (Attrs.hasFnAttr("aarch64_pstate_sm_enabled")) {
2441 Check(!Attrs.hasFnAttr("aarch64_pstate_sm_compatible"),
2442 "Attributes 'aarch64_pstate_sm_enabled and "
2443 "aarch64_pstate_sm_compatible' are incompatible!",
2444 V);
2445 }
2446
2447 Check((Attrs.hasFnAttr("aarch64_new_za") + Attrs.hasFnAttr("aarch64_in_za") +
2448 Attrs.hasFnAttr("aarch64_inout_za") +
2449 Attrs.hasFnAttr("aarch64_out_za") +
2450 Attrs.hasFnAttr("aarch64_preserves_za") +
2451 Attrs.hasFnAttr("aarch64_za_state_agnostic")) <= 1,
2452 "Attributes 'aarch64_new_za', 'aarch64_in_za', 'aarch64_out_za', "
2453 "'aarch64_inout_za', 'aarch64_preserves_za' and "
2454 "'aarch64_za_state_agnostic' are mutually exclusive",
2455 V);
2456
2457 Check((Attrs.hasFnAttr("aarch64_new_zt0") +
2458 Attrs.hasFnAttr("aarch64_in_zt0") +
2459 Attrs.hasFnAttr("aarch64_inout_zt0") +
2460 Attrs.hasFnAttr("aarch64_out_zt0") +
2461 Attrs.hasFnAttr("aarch64_preserves_zt0") +
2462 Attrs.hasFnAttr("aarch64_za_state_agnostic")) <= 1,
2463 "Attributes 'aarch64_new_zt0', 'aarch64_in_zt0', 'aarch64_out_zt0', "
2464 "'aarch64_inout_zt0', 'aarch64_preserves_zt0' and "
2465 "'aarch64_za_state_agnostic' are mutually exclusive",
2466 V);
2467
2468 if (Attrs.hasFnAttr(Attribute::JumpTable)) {
2469 const GlobalValue *GV = cast<GlobalValue>(V);
2471 "Attribute 'jumptable' requires 'unnamed_addr'", V);
2472 }
2473
2474 if (auto Args = Attrs.getFnAttrs().getAllocSizeArgs()) {
2475 auto CheckParam = [&](StringRef Name, unsigned ParamNo) {
2476 if (ParamNo >= FT->getNumParams()) {
2477 CheckFailed("'allocsize' " + Name + " argument is out of bounds", V);
2478 return false;
2479 }
2480
2481 if (!FT->getParamType(ParamNo)->isIntegerTy()) {
2482 CheckFailed("'allocsize' " + Name +
2483 " argument must refer to an integer parameter",
2484 V);
2485 return false;
2486 }
2487
2488 return true;
2489 };
2490
2491 if (!CheckParam("element size", Args->first))
2492 return;
2493
2494 if (Args->second && !CheckParam("number of elements", *Args->second))
2495 return;
2496 }
2497
2498 if (Attrs.hasFnAttr(Attribute::AllocKind)) {
2499 AllocFnKind K = Attrs.getAllocKind();
2501 K & (AllocFnKind::Alloc | AllocFnKind::Realloc | AllocFnKind::Free);
2502 if (!is_contained(
2503 {AllocFnKind::Alloc, AllocFnKind::Realloc, AllocFnKind::Free},
2504 Type))
2505 CheckFailed(
2506 "'allockind()' requires exactly one of alloc, realloc, and free");
2507 if ((Type == AllocFnKind::Free) &&
2508 ((K & (AllocFnKind::Uninitialized | AllocFnKind::Zeroed |
2509 AllocFnKind::Aligned)) != AllocFnKind::Unknown))
2510 CheckFailed("'allockind(\"free\")' doesn't allow uninitialized, zeroed, "
2511 "or aligned modifiers.");
2512 AllocFnKind ZeroedUninit = AllocFnKind::Uninitialized | AllocFnKind::Zeroed;
2513 if ((K & ZeroedUninit) == ZeroedUninit)
2514 CheckFailed("'allockind()' can't be both zeroed and uninitialized");
2515 }
2516
2517 if (Attribute A = Attrs.getFnAttr("alloc-variant-zeroed"); A.isValid()) {
2518 StringRef S = A.getValueAsString();
2519 Check(!S.empty(), "'alloc-variant-zeroed' must not be empty");
2520 Function *Variant = M.getFunction(S);
2521 if (Variant) {
2522 Attribute Family = Attrs.getFnAttr("alloc-family");
2523 Attribute VariantFamily = Variant->getFnAttribute("alloc-family");
2524 if (Family.isValid())
2525 Check(VariantFamily.isValid() &&
2526 VariantFamily.getValueAsString() == Family.getValueAsString(),
2527 "'alloc-variant-zeroed' must name a function belonging to the "
2528 "same 'alloc-family'");
2529
2530 Check(Variant->hasFnAttribute(Attribute::AllocKind) &&
2531 (Variant->getFnAttribute(Attribute::AllocKind).getAllocKind() &
2532 AllocFnKind::Zeroed) != AllocFnKind::Unknown,
2533 "'alloc-variant-zeroed' must name a function with "
2534 "'allockind(\"zeroed\")'");
2535
2536 Check(FT == Variant->getFunctionType(),
2537 "'alloc-variant-zeroed' must name a function with the same "
2538 "signature");
2539 }
2540 }
2541
2542 if (Attrs.hasFnAttr(Attribute::VScaleRange)) {
2543 unsigned VScaleMin = Attrs.getFnAttrs().getVScaleRangeMin();
2544 if (VScaleMin == 0)
2545 CheckFailed("'vscale_range' minimum must be greater than 0", V);
2546 else if (!isPowerOf2_32(VScaleMin))
2547 CheckFailed("'vscale_range' minimum must be power-of-two value", V);
2548 std::optional<unsigned> VScaleMax = Attrs.getFnAttrs().getVScaleRangeMax();
2549 if (VScaleMax && VScaleMin > VScaleMax)
2550 CheckFailed("'vscale_range' minimum cannot be greater than maximum", V);
2551 else if (VScaleMax && !isPowerOf2_32(*VScaleMax))
2552 CheckFailed("'vscale_range' maximum must be power-of-two value", V);
2553 }
2554
2555 if (Attribute FPAttr = Attrs.getFnAttr("frame-pointer"); FPAttr.isValid()) {
2556 StringRef FP = FPAttr.getValueAsString();
2557 if (FP != "all" && FP != "non-leaf" && FP != "none" && FP != "reserved" &&
2558 FP != "non-leaf-no-reserve")
2559 CheckFailed("invalid value for 'frame-pointer' attribute: " + FP, V);
2560 }
2561
2562 checkUnsignedBaseTenFuncAttr(Attrs, "patchable-function-prefix", V);
2563 checkUnsignedBaseTenFuncAttr(Attrs, "patchable-function-entry", V);
2564 if (Attrs.hasFnAttr("patchable-function-entry-section"))
2565 Check(!Attrs.getFnAttr("patchable-function-entry-section")
2566 .getValueAsString()
2567 .empty(),
2568 "\"patchable-function-entry-section\" must not be empty");
2569 checkUnsignedBaseTenFuncAttr(Attrs, "warn-stack-size", V);
2570
2571 if (auto A = Attrs.getFnAttr("sign-return-address"); A.isValid()) {
2572 StringRef S = A.getValueAsString();
2573 if (S != "none" && S != "all" && S != "non-leaf")
2574 CheckFailed("invalid value for 'sign-return-address' attribute: " + S, V);
2575 }
2576
2577 if (auto A = Attrs.getFnAttr("sign-return-address-key"); A.isValid()) {
2578 StringRef S = A.getValueAsString();
2579 if (S != "a_key" && S != "b_key")
2580 CheckFailed("invalid value for 'sign-return-address-key' attribute: " + S,
2581 V);
2582 if (auto AA = Attrs.getFnAttr("sign-return-address"); !AA.isValid()) {
2583 CheckFailed(
2584 "'sign-return-address-key' present without `sign-return-address`");
2585 }
2586 }
2587
2588 if (auto A = Attrs.getFnAttr("branch-target-enforcement"); A.isValid()) {
2589 StringRef S = A.getValueAsString();
2590 if (S != "" && S != "true" && S != "false")
2591 CheckFailed(
2592 "invalid value for 'branch-target-enforcement' attribute: " + S, V);
2593 }
2594
2595 if (auto A = Attrs.getFnAttr("branch-protection-pauth-lr"); A.isValid()) {
2596 StringRef S = A.getValueAsString();
2597 if (S != "" && S != "true" && S != "false")
2598 CheckFailed(
2599 "invalid value for 'branch-protection-pauth-lr' attribute: " + S, V);
2600 }
2601
2602 if (auto A = Attrs.getFnAttr("guarded-control-stack"); A.isValid()) {
2603 StringRef S = A.getValueAsString();
2604 if (S != "" && S != "true" && S != "false")
2605 CheckFailed("invalid value for 'guarded-control-stack' attribute: " + S,
2606 V);
2607 }
2608
2609 if (auto A = Attrs.getFnAttr("vector-function-abi-variant"); A.isValid()) {
2610 StringRef S = A.getValueAsString();
2611 const std::optional<VFInfo> Info = VFABI::tryDemangleForVFABI(S, FT);
2612 if (!Info)
2613 CheckFailed("invalid name for a VFABI variant: " + S, V);
2614 }
2615
2616 if (auto A = Attrs.getFnAttr("denormal-fp-math"); A.isValid()) {
2617 StringRef S = A.getValueAsString();
2619 CheckFailed("invalid value for 'denormal-fp-math' attribute: " + S, V);
2620 }
2621
2622 if (auto A = Attrs.getFnAttr("denormal-fp-math-f32"); A.isValid()) {
2623 StringRef S = A.getValueAsString();
2625 CheckFailed("invalid value for 'denormal-fp-math-f32' attribute: " + S,
2626 V);
2627 }
2628
2629 if (auto A = Attrs.getFnAttr("modular-format"); A.isValid()) {
2630 StringRef S = A.getValueAsString();
2632 S.split(Args, ',');
2633 Check(Args.size() >= 5,
2634 "modular-format attribute requires at least 5 arguments", V);
2635 unsigned FirstArgIdx;
2636 Check(!Args[2].getAsInteger(10, FirstArgIdx),
2637 "modular-format attribute first arg index is not an integer", V);
2638 unsigned UpperBound = FT->getNumParams() + (FT->isVarArg() ? 1 : 0);
2639 Check(FirstArgIdx > 0 && FirstArgIdx <= UpperBound,
2640 "modular-format attribute first arg index is out of bounds", V);
2641 }
2642
2643 if (auto A = Attrs.getFnAttr("target-features"); A.isValid()) {
2644 StringRef S = A.getValueAsString();
2645 if (!S.empty()) {
2646 for (auto FeatureFlag : split(S, ',')) {
2647 if (FeatureFlag.empty())
2648 CheckFailed(
2649 "target-features attribute should not contain an empty string");
2650 else
2651 Check(FeatureFlag[0] == '+' || FeatureFlag[0] == '-',
2652 "target feature '" + FeatureFlag +
2653 "' must start with a '+' or '-'",
2654 V);
2655 }
2656 }
2657 }
2658}
2659void Verifier::verifyUnknownProfileMetadata(MDNode *MD) {
2660 Check(MD->getNumOperands() == 2,
2661 "'unknown' !prof should have a single additional operand", MD);
2662 auto *PassName = dyn_cast<MDString>(MD->getOperand(1));
2663 Check(PassName != nullptr,
2664 "'unknown' !prof should have an additional operand of type "
2665 "string");
2666 Check(!PassName->getString().empty(),
2667 "the 'unknown' !prof operand should not be an empty string");
2668}
2669
2670void Verifier::verifyFunctionMetadata(
2671 ArrayRef<std::pair<unsigned, MDNode *>> MDs) {
2672 for (const auto &Pair : MDs) {
2673 if (Pair.first == LLVMContext::MD_prof) {
2674 MDNode *MD = Pair.second;
2675 Check(MD->getNumOperands() >= 2,
2676 "!prof annotations should have no less than 2 operands", MD);
2677 // We may have functions that are synthesized by the compiler, e.g. in
2678 // WPD, that we can't currently determine the entry count.
2679 if (MD->getOperand(0).equalsStr(
2681 verifyUnknownProfileMetadata(MD);
2682 continue;
2683 }
2684
2685 // Check first operand.
2686 Check(MD->getOperand(0) != nullptr, "first operand should not be null",
2687 MD);
2689 "expected string with name of the !prof annotation", MD);
2690 MDString *MDS = cast<MDString>(MD->getOperand(0));
2691 StringRef ProfName = MDS->getString();
2694 "first operand should be 'function_entry_count'"
2695 " or 'synthetic_function_entry_count'",
2696 MD);
2697
2698 // Check second operand.
2699 Check(MD->getOperand(1) != nullptr, "second operand should not be null",
2700 MD);
2702 "expected integer argument to function_entry_count", MD);
2703 } else if (Pair.first == LLVMContext::MD_kcfi_type) {
2704 MDNode *MD = Pair.second;
2705 Check(MD->getNumOperands() == 1,
2706 "!kcfi_type must have exactly one operand", MD);
2707 Check(MD->getOperand(0) != nullptr, "!kcfi_type operand must not be null",
2708 MD);
2710 "expected a constant operand for !kcfi_type", MD);
2711 Constant *C = cast<ConstantAsMetadata>(MD->getOperand(0))->getValue();
2712 Check(isa<ConstantInt>(C) && isa<IntegerType>(C->getType()),
2713 "expected a constant integer operand for !kcfi_type", MD);
2715 "expected a 32-bit integer constant operand for !kcfi_type", MD);
2716 }
2717 }
2718}
2719
2720void Verifier::visitConstantExprsRecursively(const Constant *EntryC) {
2721 if (EntryC->getNumOperands() == 0)
2722 return;
2723
2724 if (!ConstantExprVisited.insert(EntryC).second)
2725 return;
2726
2728 Stack.push_back(EntryC);
2729
2730 while (!Stack.empty()) {
2731 const Constant *C = Stack.pop_back_val();
2732
2733 // Check this constant expression.
2734 if (const auto *CE = dyn_cast<ConstantExpr>(C))
2735 visitConstantExpr(CE);
2736
2737 if (const auto *CPA = dyn_cast<ConstantPtrAuth>(C))
2738 visitConstantPtrAuth(CPA);
2739
2740 if (const auto *GV = dyn_cast<GlobalValue>(C)) {
2741 // Global Values get visited separately, but we do need to make sure
2742 // that the global value is in the correct module
2743 Check(GV->getParent() == &M, "Referencing global in another module!",
2744 EntryC, &M, GV, GV->getParent());
2745 continue;
2746 }
2747
2748 // Visit all sub-expressions.
2749 for (const Use &U : C->operands()) {
2750 const auto *OpC = dyn_cast<Constant>(U);
2751 if (!OpC)
2752 continue;
2753 if (!ConstantExprVisited.insert(OpC).second)
2754 continue;
2755 Stack.push_back(OpC);
2756 }
2757 }
2758}
2759
2760void Verifier::visitConstantExpr(const ConstantExpr *CE) {
2761 if (CE->getOpcode() == Instruction::BitCast)
2762 Check(CastInst::castIsValid(Instruction::BitCast, CE->getOperand(0),
2763 CE->getType()),
2764 "Invalid bitcast", CE);
2765 else if (CE->getOpcode() == Instruction::PtrToAddr)
2766 checkPtrToAddr(CE->getOperand(0)->getType(), CE->getType(), *CE);
2767}
2768
2769void Verifier::visitConstantPtrAuth(const ConstantPtrAuth *CPA) {
2770 Check(CPA->getPointer()->getType()->isPointerTy(),
2771 "signed ptrauth constant base pointer must have pointer type");
2772
2773 Check(CPA->getType() == CPA->getPointer()->getType(),
2774 "signed ptrauth constant must have same type as its base pointer");
2775
2776 Check(CPA->getKey()->getBitWidth() == 32,
2777 "signed ptrauth constant key must be i32 constant integer");
2778
2780 "signed ptrauth constant address discriminator must be a pointer");
2781
2782 Check(CPA->getDiscriminator()->getBitWidth() == 64,
2783 "signed ptrauth constant discriminator must be i64 constant integer");
2784
2786 "signed ptrauth constant deactivation symbol must be a pointer");
2787
2790 "signed ptrauth constant deactivation symbol must be a global value "
2791 "or null");
2792}
2793
2794bool Verifier::verifyAttributeCount(AttributeList Attrs, unsigned Params) {
2795 // There shouldn't be more attribute sets than there are parameters plus the
2796 // function and return value.
2797 return Attrs.getNumAttrSets() <= Params + 2;
2798}
2799
2800void Verifier::verifyInlineAsmCall(const CallBase &Call) {
2801 const InlineAsm *IA = cast<InlineAsm>(Call.getCalledOperand());
2802 unsigned ArgNo = 0;
2803 unsigned LabelNo = 0;
2804 for (const InlineAsm::ConstraintInfo &CI : IA->ParseConstraints()) {
2805 if (CI.Type == InlineAsm::isLabel) {
2806 ++LabelNo;
2807 continue;
2808 }
2809
2810 // Only deal with constraints that correspond to call arguments.
2811 if (!CI.hasArg())
2812 continue;
2813
2814 if (CI.isIndirect) {
2815 const Value *Arg = Call.getArgOperand(ArgNo);
2816 Check(Arg->getType()->isPointerTy(),
2817 "Operand for indirect constraint must have pointer type", &Call);
2818
2820 "Operand for indirect constraint must have elementtype attribute",
2821 &Call);
2822 } else {
2823 Check(!Call.paramHasAttr(ArgNo, Attribute::ElementType),
2824 "Elementtype attribute can only be applied for indirect "
2825 "constraints",
2826 &Call);
2827 }
2828
2829 ArgNo++;
2830 }
2831
2832 if (auto *CallBr = dyn_cast<CallBrInst>(&Call)) {
2833 Check(LabelNo == CallBr->getNumIndirectDests(),
2834 "Number of label constraints does not match number of callbr dests",
2835 &Call);
2836 } else {
2837 Check(LabelNo == 0, "Label constraints can only be used with callbr",
2838 &Call);
2839 }
2840}
2841
2842/// Verify that statepoint intrinsic is well formed.
2843void Verifier::verifyStatepoint(const CallBase &Call) {
2844 assert(Call.getIntrinsicID() == Intrinsic::experimental_gc_statepoint);
2845
2848 "gc.statepoint must read and write all memory to preserve "
2849 "reordering restrictions required by safepoint semantics",
2850 Call);
2851
2852 const int64_t NumPatchBytes =
2853 cast<ConstantInt>(Call.getArgOperand(1))->getSExtValue();
2854 assert(isInt<32>(NumPatchBytes) && "NumPatchBytesV is an i32!");
2855 Check(NumPatchBytes >= 0,
2856 "gc.statepoint number of patchable bytes must be "
2857 "positive",
2858 Call);
2859
2860 Type *TargetElemType = Call.getParamElementType(2);
2861 Check(TargetElemType,
2862 "gc.statepoint callee argument must have elementtype attribute", Call);
2863 FunctionType *TargetFuncType = dyn_cast<FunctionType>(TargetElemType);
2864 Check(TargetFuncType,
2865 "gc.statepoint callee elementtype must be function type", Call);
2866
2867 const int NumCallArgs = cast<ConstantInt>(Call.getArgOperand(3))->getZExtValue();
2868 Check(NumCallArgs >= 0,
2869 "gc.statepoint number of arguments to underlying call "
2870 "must be positive",
2871 Call);
2872 const int NumParams = (int)TargetFuncType->getNumParams();
2873 if (TargetFuncType->isVarArg()) {
2874 Check(NumCallArgs >= NumParams,
2875 "gc.statepoint mismatch in number of vararg call args", Call);
2876
2877 // TODO: Remove this limitation
2878 Check(TargetFuncType->getReturnType()->isVoidTy(),
2879 "gc.statepoint doesn't support wrapping non-void "
2880 "vararg functions yet",
2881 Call);
2882 } else
2883 Check(NumCallArgs == NumParams,
2884 "gc.statepoint mismatch in number of call args", Call);
2885
2886 const uint64_t Flags
2887 = cast<ConstantInt>(Call.getArgOperand(4))->getZExtValue();
2888 Check((Flags & ~(uint64_t)StatepointFlags::MaskAll) == 0,
2889 "unknown flag used in gc.statepoint flags argument", Call);
2890
2891 // Verify that the types of the call parameter arguments match
2892 // the type of the wrapped callee.
2893 AttributeList Attrs = Call.getAttributes();
2894 for (int i = 0; i < NumParams; i++) {
2895 Type *ParamType = TargetFuncType->getParamType(i);
2896 Type *ArgType = Call.getArgOperand(5 + i)->getType();
2897 Check(ArgType == ParamType,
2898 "gc.statepoint call argument does not match wrapped "
2899 "function type",
2900 Call);
2901
2902 if (TargetFuncType->isVarArg()) {
2903 AttributeSet ArgAttrs = Attrs.getParamAttrs(5 + i);
2904 Check(!ArgAttrs.hasAttribute(Attribute::StructRet),
2905 "Attribute 'sret' cannot be used for vararg call arguments!", Call);
2906 }
2907 }
2908
2909 const int EndCallArgsInx = 4 + NumCallArgs;
2910
2911 const Value *NumTransitionArgsV = Call.getArgOperand(EndCallArgsInx + 1);
2912 Check(isa<ConstantInt>(NumTransitionArgsV),
2913 "gc.statepoint number of transition arguments "
2914 "must be constant integer",
2915 Call);
2916 const int NumTransitionArgs =
2917 cast<ConstantInt>(NumTransitionArgsV)->getZExtValue();
2918 Check(NumTransitionArgs == 0,
2919 "gc.statepoint w/inline transition bundle is deprecated", Call);
2920 const int EndTransitionArgsInx = EndCallArgsInx + 1 + NumTransitionArgs;
2921
2922 const Value *NumDeoptArgsV = Call.getArgOperand(EndTransitionArgsInx + 1);
2923 Check(isa<ConstantInt>(NumDeoptArgsV),
2924 "gc.statepoint number of deoptimization arguments "
2925 "must be constant integer",
2926 Call);
2927 const int NumDeoptArgs = cast<ConstantInt>(NumDeoptArgsV)->getZExtValue();
2928 Check(NumDeoptArgs == 0,
2929 "gc.statepoint w/inline deopt operands is deprecated", Call);
2930
2931 const int ExpectedNumArgs = 7 + NumCallArgs;
2932 Check(ExpectedNumArgs == (int)Call.arg_size(),
2933 "gc.statepoint too many arguments", Call);
2934
2935 // Check that the only uses of this gc.statepoint are gc.result or
2936 // gc.relocate calls which are tied to this statepoint and thus part
2937 // of the same statepoint sequence
2938 for (const User *U : Call.users()) {
2939 const CallInst *UserCall = dyn_cast<const CallInst>(U);
2940 Check(UserCall, "illegal use of statepoint token", Call, U);
2941 if (!UserCall)
2942 continue;
2943 Check(isa<GCRelocateInst>(UserCall) || isa<GCResultInst>(UserCall),
2944 "gc.result or gc.relocate are the only value uses "
2945 "of a gc.statepoint",
2946 Call, U);
2947 if (isa<GCResultInst>(UserCall)) {
2948 Check(UserCall->getArgOperand(0) == &Call,
2949 "gc.result connected to wrong gc.statepoint", Call, UserCall);
2950 } else if (isa<GCRelocateInst>(Call)) {
2951 Check(UserCall->getArgOperand(0) == &Call,
2952 "gc.relocate connected to wrong gc.statepoint", Call, UserCall);
2953 }
2954 }
2955
2956 // Note: It is legal for a single derived pointer to be listed multiple
2957 // times. It's non-optimal, but it is legal. It can also happen after
2958 // insertion if we strip a bitcast away.
2959 // Note: It is really tempting to check that each base is relocated and
2960 // that a derived pointer is never reused as a base pointer. This turns
2961 // out to be problematic since optimizations run after safepoint insertion
2962 // can recognize equality properties that the insertion logic doesn't know
2963 // about. See example statepoint.ll in the verifier subdirectory
2964}
2965
2966void Verifier::verifyFrameRecoverIndices() {
2967 for (auto &Counts : FrameEscapeInfo) {
2968 Function *F = Counts.first;
2969 unsigned EscapedObjectCount = Counts.second.first;
2970 unsigned MaxRecoveredIndex = Counts.second.second;
2971 Check(MaxRecoveredIndex <= EscapedObjectCount,
2972 "all indices passed to llvm.localrecover must be less than the "
2973 "number of arguments passed to llvm.localescape in the parent "
2974 "function",
2975 F);
2976 }
2977}
2978
2979static Instruction *getSuccPad(Instruction *Terminator) {
2980 BasicBlock *UnwindDest;
2981 if (auto *II = dyn_cast<InvokeInst>(Terminator))
2982 UnwindDest = II->getUnwindDest();
2983 else if (auto *CSI = dyn_cast<CatchSwitchInst>(Terminator))
2984 UnwindDest = CSI->getUnwindDest();
2985 else
2986 UnwindDest = cast<CleanupReturnInst>(Terminator)->getUnwindDest();
2987 return &*UnwindDest->getFirstNonPHIIt();
2988}
2989
2990void Verifier::verifySiblingFuncletUnwinds() {
2991 llvm::TimeTraceScope timeScope("Verifier verify sibling funclet unwinds");
2992 SmallPtrSet<Instruction *, 8> Visited;
2993 SmallPtrSet<Instruction *, 8> Active;
2994 for (const auto &Pair : SiblingFuncletInfo) {
2995 Instruction *PredPad = Pair.first;
2996 if (Visited.count(PredPad))
2997 continue;
2998 Active.insert(PredPad);
2999 Instruction *Terminator = Pair.second;
3000 do {
3001 Instruction *SuccPad = getSuccPad(Terminator);
3002 if (Active.count(SuccPad)) {
3003 // Found a cycle; report error
3004 Instruction *CyclePad = SuccPad;
3005 SmallVector<Instruction *, 8> CycleNodes;
3006 do {
3007 CycleNodes.push_back(CyclePad);
3008 Instruction *CycleTerminator = SiblingFuncletInfo[CyclePad];
3009 if (CycleTerminator != CyclePad)
3010 CycleNodes.push_back(CycleTerminator);
3011 CyclePad = getSuccPad(CycleTerminator);
3012 } while (CyclePad != SuccPad);
3013 Check(false, "EH pads can't handle each other's exceptions",
3014 ArrayRef<Instruction *>(CycleNodes));
3015 }
3016 // Don't re-walk a node we've already checked
3017 if (!Visited.insert(SuccPad).second)
3018 break;
3019 // Walk to this successor if it has a map entry.
3020 PredPad = SuccPad;
3021 auto TermI = SiblingFuncletInfo.find(PredPad);
3022 if (TermI == SiblingFuncletInfo.end())
3023 break;
3024 Terminator = TermI->second;
3025 Active.insert(PredPad);
3026 } while (true);
3027 // Each node only has one successor, so we've walked all the active
3028 // nodes' successors.
3029 Active.clear();
3030 }
3031}
3032
3033// visitFunction - Verify that a function is ok.
3034//
3035void Verifier::visitFunction(const Function &F) {
3036 visitGlobalValue(F);
3037
3038 // Check function arguments.
3039 FunctionType *FT = F.getFunctionType();
3040 unsigned NumArgs = F.arg_size();
3041
3042 Check(&Context == &F.getContext(),
3043 "Function context does not match Module context!", &F);
3044
3045 Check(!F.hasCommonLinkage(), "Functions may not have common linkage", &F);
3046 Check(FT->getNumParams() == NumArgs,
3047 "# formal arguments must match # of arguments for function type!", &F,
3048 FT);
3049 Check(F.getReturnType()->isFirstClassType() ||
3050 F.getReturnType()->isVoidTy() || F.getReturnType()->isStructTy(),
3051 "Functions cannot return aggregate values!", &F);
3052
3053 Check(!F.hasStructRetAttr() || F.getReturnType()->isVoidTy(),
3054 "Invalid struct return type!", &F);
3055
3056 if (MaybeAlign A = F.getAlign()) {
3057 Check(A->value() <= Value::MaximumAlignment,
3058 "huge alignment values are unsupported", &F);
3059 }
3060
3061 AttributeList Attrs = F.getAttributes();
3062
3063 Check(verifyAttributeCount(Attrs, FT->getNumParams()),
3064 "Attribute after last parameter!", &F);
3065
3066 bool IsIntrinsic = F.isIntrinsic();
3067
3068 // Check function attributes.
3069 verifyFunctionAttrs(FT, Attrs, &F, IsIntrinsic, /* IsInlineAsm */ false);
3070
3071 // On function declarations/definitions, we do not support the builtin
3072 // attribute. We do not check this in VerifyFunctionAttrs since that is
3073 // checking for Attributes that can/can not ever be on functions.
3074 Check(!Attrs.hasFnAttr(Attribute::Builtin),
3075 "Attribute 'builtin' can only be applied to a callsite.", &F);
3076
3077 Check(!Attrs.hasAttrSomewhere(Attribute::ElementType),
3078 "Attribute 'elementtype' can only be applied to a callsite.", &F);
3079
3080 Check(!Attrs.hasFnAttr("aarch64_zt0_undef"),
3081 "Attribute 'aarch64_zt0_undef' can only be applied to a callsite.");
3082
3083 if (Attrs.hasFnAttr(Attribute::Naked))
3084 for (const Argument &Arg : F.args())
3085 Check(Arg.use_empty(), "cannot use argument of naked function", &Arg);
3086
3087 // Check that this function meets the restrictions on this calling convention.
3088 // Sometimes varargs is used for perfectly forwarding thunks, so some of these
3089 // restrictions can be lifted.
3090 switch (F.getCallingConv()) {
3091 default:
3092 case CallingConv::C:
3093 break;
3094 case CallingConv::X86_INTR: {
3095 Check(F.arg_empty() || Attrs.hasParamAttr(0, Attribute::ByVal),
3096 "Calling convention parameter requires byval", &F);
3097 break;
3098 }
3099 case CallingConv::AMDGPU_KERNEL:
3100 case CallingConv::SPIR_KERNEL:
3101 case CallingConv::AMDGPU_CS_Chain:
3102 case CallingConv::AMDGPU_CS_ChainPreserve:
3103 Check(F.getReturnType()->isVoidTy(),
3104 "Calling convention requires void return type", &F);
3105 [[fallthrough]];
3106 case CallingConv::AMDGPU_VS:
3107 case CallingConv::AMDGPU_HS:
3108 case CallingConv::AMDGPU_GS:
3109 case CallingConv::AMDGPU_PS:
3110 case CallingConv::AMDGPU_CS:
3111 Check(!F.hasStructRetAttr(), "Calling convention does not allow sret", &F);
3112 if (F.getCallingConv() != CallingConv::SPIR_KERNEL) {
3113 const unsigned StackAS = DL.getAllocaAddrSpace();
3114 unsigned i = 0;
3115 for (const Argument &Arg : F.args()) {
3116 Check(!Attrs.hasParamAttr(i, Attribute::ByVal),
3117 "Calling convention disallows byval", &F);
3118 Check(!Attrs.hasParamAttr(i, Attribute::Preallocated),
3119 "Calling convention disallows preallocated", &F);
3120 Check(!Attrs.hasParamAttr(i, Attribute::InAlloca),
3121 "Calling convention disallows inalloca", &F);
3122
3123 if (Attrs.hasParamAttr(i, Attribute::ByRef)) {
3124 // FIXME: Should also disallow LDS and GDS, but we don't have the enum
3125 // value here.
3126 Check(Arg.getType()->getPointerAddressSpace() != StackAS,
3127 "Calling convention disallows stack byref", &F);
3128 }
3129
3130 ++i;
3131 }
3132 }
3133
3134 [[fallthrough]];
3135 case CallingConv::Fast:
3136 case CallingConv::Cold:
3137 case CallingConv::Intel_OCL_BI:
3138 case CallingConv::PTX_Kernel:
3139 case CallingConv::PTX_Device:
3140 Check(!F.isVarArg(),
3141 "Calling convention does not support varargs or "
3142 "perfect forwarding!",
3143 &F);
3144 break;
3145 case CallingConv::AMDGPU_Gfx_WholeWave:
3146 Check(!F.arg_empty() && F.arg_begin()->getType()->isIntegerTy(1),
3147 "Calling convention requires first argument to be i1", &F);
3148 Check(!F.arg_begin()->hasInRegAttr(),
3149 "Calling convention requires first argument to not be inreg", &F);
3150 Check(!F.isVarArg(),
3151 "Calling convention does not support varargs or "
3152 "perfect forwarding!",
3153 &F);
3154 break;
3155 }
3156
3157 // Check that the argument values match the function type for this function...
3158 unsigned i = 0;
3159 for (const Argument &Arg : F.args()) {
3160 Check(Arg.getType() == FT->getParamType(i),
3161 "Argument value does not match function argument type!", &Arg,
3162 FT->getParamType(i));
3163 Check(Arg.getType()->isFirstClassType(),
3164 "Function arguments must have first-class types!", &Arg);
3165 if (!IsIntrinsic) {
3166 Check(!Arg.getType()->isMetadataTy(),
3167 "Function takes metadata but isn't an intrinsic", &Arg, &F);
3168 Check(!Arg.getType()->isTokenLikeTy(),
3169 "Function takes token but isn't an intrinsic", &Arg, &F);
3170 Check(!Arg.getType()->isX86_AMXTy(),
3171 "Function takes x86_amx but isn't an intrinsic", &Arg, &F);
3172 }
3173
3174 // Check that swifterror argument is only used by loads and stores.
3175 if (Attrs.hasParamAttr(i, Attribute::SwiftError)) {
3176 verifySwiftErrorValue(&Arg);
3177 }
3178 ++i;
3179 }
3180
3181 if (!IsIntrinsic) {
3182 Check(!F.getReturnType()->isTokenLikeTy(),
3183 "Function returns a token but isn't an intrinsic", &F);
3184 Check(!F.getReturnType()->isX86_AMXTy(),
3185 "Function returns a x86_amx but isn't an intrinsic", &F);
3186 }
3187
3188 // Get the function metadata attachments.
3190 F.getAllMetadata(MDs);
3191 assert(F.hasMetadata() != MDs.empty() && "Bit out-of-sync");
3192 verifyFunctionMetadata(MDs);
3193
3194 // Check validity of the personality function
3195 if (F.hasPersonalityFn()) {
3196 auto *Per = dyn_cast<Function>(F.getPersonalityFn()->stripPointerCasts());
3197 if (Per)
3198 Check(Per->getParent() == F.getParent(),
3199 "Referencing personality function in another module!", &F,
3200 F.getParent(), Per, Per->getParent());
3201 }
3202
3203 // EH funclet coloring can be expensive, recompute on-demand
3204 BlockEHFuncletColors.clear();
3205
3206 if (F.isMaterializable()) {
3207 // Function has a body somewhere we can't see.
3208 Check(MDs.empty(), "unmaterialized function cannot have metadata", &F,
3209 MDs.empty() ? nullptr : MDs.front().second);
3210 } else if (F.isDeclaration()) {
3211 for (const auto &I : MDs) {
3212 // This is used for call site debug information.
3213 CheckDI(I.first != LLVMContext::MD_dbg ||
3214 !cast<DISubprogram>(I.second)->isDistinct(),
3215 "function declaration may only have a unique !dbg attachment",
3216 &F);
3217 Check(I.first != LLVMContext::MD_prof,
3218 "function declaration may not have a !prof attachment", &F);
3219
3220 // Verify the metadata itself.
3221 visitMDNode(*I.second, AreDebugLocsAllowed::Yes);
3222 }
3223 Check(!F.hasPersonalityFn(),
3224 "Function declaration shouldn't have a personality routine", &F);
3225 } else {
3226 // Verify that this function (which has a body) is not named "llvm.*". It
3227 // is not legal to define intrinsics.
3228 Check(!IsIntrinsic, "llvm intrinsics cannot be defined!", &F);
3229
3230 // Check the entry node
3231 const BasicBlock *Entry = &F.getEntryBlock();
3232 Check(pred_empty(Entry),
3233 "Entry block to function must not have predecessors!", Entry);
3234
3235 // The address of the entry block cannot be taken, unless it is dead.
3236 if (Entry->hasAddressTaken()) {
3237 Check(!BlockAddress::lookup(Entry)->isConstantUsed(),
3238 "blockaddress may not be used with the entry block!", Entry);
3239 }
3240
3241 unsigned NumDebugAttachments = 0, NumProfAttachments = 0,
3242 NumKCFIAttachments = 0;
3243 // Visit metadata attachments.
3244 for (const auto &I : MDs) {
3245 // Verify that the attachment is legal.
3246 auto AllowLocs = AreDebugLocsAllowed::No;
3247 switch (I.first) {
3248 default:
3249 break;
3250 case LLVMContext::MD_dbg: {
3251 ++NumDebugAttachments;
3252 CheckDI(NumDebugAttachments == 1,
3253 "function must have a single !dbg attachment", &F, I.second);
3254 CheckDI(isa<DISubprogram>(I.second),
3255 "function !dbg attachment must be a subprogram", &F, I.second);
3256 CheckDI(cast<DISubprogram>(I.second)->isDistinct(),
3257 "function definition may only have a distinct !dbg attachment",
3258 &F);
3259
3260 auto *SP = cast<DISubprogram>(I.second);
3261 const Function *&AttachedTo = DISubprogramAttachments[SP];
3262 CheckDI(!AttachedTo || AttachedTo == &F,
3263 "DISubprogram attached to more than one function", SP, &F);
3264 AttachedTo = &F;
3265 AllowLocs = AreDebugLocsAllowed::Yes;
3266 break;
3267 }
3268 case LLVMContext::MD_prof:
3269 ++NumProfAttachments;
3270 Check(NumProfAttachments == 1,
3271 "function must have a single !prof attachment", &F, I.second);
3272 break;
3273 case LLVMContext::MD_kcfi_type:
3274 ++NumKCFIAttachments;
3275 Check(NumKCFIAttachments == 1,
3276 "function must have a single !kcfi_type attachment", &F,
3277 I.second);
3278 break;
3279 }
3280
3281 // Verify the metadata itself.
3282 visitMDNode(*I.second, AllowLocs);
3283 }
3284 }
3285
3286 // If this function is actually an intrinsic, verify that it is only used in
3287 // direct call/invokes, never having its "address taken".
3288 // Only do this if the module is materialized, otherwise we don't have all the
3289 // uses.
3290 if (F.isIntrinsic() && F.getParent()->isMaterialized()) {
3291 const User *U;
3292 if (F.hasAddressTaken(&U, false, true, false,
3293 /*IgnoreARCAttachedCall=*/true))
3294 Check(false, "Invalid user of intrinsic instruction!", U);
3295 }
3296
3297 // Check intrinsics' signatures.
3298 switch (F.getIntrinsicID()) {
3299 case Intrinsic::experimental_gc_get_pointer_base: {
3300 FunctionType *FT = F.getFunctionType();
3301 Check(FT->getNumParams() == 1, "wrong number of parameters", F);
3302 Check(isa<PointerType>(F.getReturnType()),
3303 "gc.get.pointer.base must return a pointer", F);
3304 Check(FT->getParamType(0) == F.getReturnType(),
3305 "gc.get.pointer.base operand and result must be of the same type", F);
3306 break;
3307 }
3308 case Intrinsic::experimental_gc_get_pointer_offset: {
3309 FunctionType *FT = F.getFunctionType();
3310 Check(FT->getNumParams() == 1, "wrong number of parameters", F);
3311 Check(isa<PointerType>(FT->getParamType(0)),
3312 "gc.get.pointer.offset operand must be a pointer", F);
3313 Check(F.getReturnType()->isIntegerTy(),
3314 "gc.get.pointer.offset must return integer", F);
3315 break;
3316 }
3317 }
3318
3319 auto *N = F.getSubprogram();
3320 HasDebugInfo = (N != nullptr);
3321 if (!HasDebugInfo)
3322 return;
3323
3324 // Check that all !dbg attachments lead to back to N.
3325 //
3326 // FIXME: Check this incrementally while visiting !dbg attachments.
3327 // FIXME: Only check when N is the canonical subprogram for F.
3328 SmallPtrSet<const MDNode *, 32> Seen;
3329 auto VisitDebugLoc = [&](const Instruction &I, const MDNode *Node) {
3330 // Be careful about using DILocation here since we might be dealing with
3331 // broken code (this is the Verifier after all).
3332 const DILocation *DL = dyn_cast_or_null<DILocation>(Node);
3333 if (!DL)
3334 return;
3335 if (!Seen.insert(DL).second)
3336 return;
3337
3338 Metadata *Parent = DL->getRawScope();
3339 CheckDI(Parent && isa<DILocalScope>(Parent),
3340 "DILocation's scope must be a DILocalScope", N, &F, &I, DL, Parent);
3341
3342 DILocalScope *Scope = DL->getInlinedAtScope();
3343 Check(Scope, "Failed to find DILocalScope", DL);
3344
3345 if (!Seen.insert(Scope).second)
3346 return;
3347
3348 DISubprogram *SP = Scope->getSubprogram();
3349
3350 // Scope and SP could be the same MDNode and we don't want to skip
3351 // validation in that case
3352 if ((Scope != SP) && !Seen.insert(SP).second)
3353 return;
3354
3355 CheckDI(SP->describes(&F),
3356 "!dbg attachment points at wrong subprogram for function", N, &F,
3357 &I, DL, Scope, SP);
3358 };
3359 for (auto &BB : F)
3360 for (auto &I : BB) {
3361 VisitDebugLoc(I, I.getDebugLoc().getAsMDNode());
3362 // The llvm.loop annotations also contain two DILocations.
3363 if (auto MD = I.getMetadata(LLVMContext::MD_loop))
3364 for (unsigned i = 1; i < MD->getNumOperands(); ++i)
3365 VisitDebugLoc(I, dyn_cast_or_null<MDNode>(MD->getOperand(i)));
3366 if (BrokenDebugInfo)
3367 return;
3368 }
3369}
3370
3371// verifyBasicBlock - Verify that a basic block is well formed...
3372//
3373void Verifier::visitBasicBlock(BasicBlock &BB) {
3374 InstsInThisBlock.clear();
3375 ConvergenceVerifyHelper.visit(BB);
3376
3377 // Ensure that basic blocks have terminators!
3378 Check(BB.getTerminator(), "Basic Block does not have terminator!", &BB);
3379
3380 // Check constraints that this basic block imposes on all of the PHI nodes in
3381 // it.
3382 if (isa<PHINode>(BB.front())) {
3383 SmallVector<BasicBlock *, 8> Preds(predecessors(&BB));
3385 llvm::sort(Preds);
3386 for (const PHINode &PN : BB.phis()) {
3387 Check(PN.getNumIncomingValues() == Preds.size(),
3388 "PHINode should have one entry for each predecessor of its "
3389 "parent basic block!",
3390 &PN);
3391
3392 // Get and sort all incoming values in the PHI node...
3393 Values.clear();
3394 Values.reserve(PN.getNumIncomingValues());
3395 for (unsigned i = 0, e = PN.getNumIncomingValues(); i != e; ++i)
3396 Values.push_back(
3397 std::make_pair(PN.getIncomingBlock(i), PN.getIncomingValue(i)));
3398 llvm::sort(Values);
3399
3400 for (unsigned i = 0, e = Values.size(); i != e; ++i) {
3401 // Check to make sure that if there is more than one entry for a
3402 // particular basic block in this PHI node, that the incoming values are
3403 // all identical.
3404 //
3405 Check(i == 0 || Values[i].first != Values[i - 1].first ||
3406 Values[i].second == Values[i - 1].second,
3407 "PHI node has multiple entries for the same basic block with "
3408 "different incoming values!",
3409 &PN, Values[i].first, Values[i].second, Values[i - 1].second);
3410
3411 // Check to make sure that the predecessors and PHI node entries are
3412 // matched up.
3413 Check(Values[i].first == Preds[i],
3414 "PHI node entries do not match predecessors!", &PN,
3415 Values[i].first, Preds[i]);
3416 }
3417 }
3418 }
3419
3420 // Check that all instructions have their parent pointers set up correctly.
3421 for (auto &I : BB)
3422 {
3423 Check(I.getParent() == &BB, "Instruction has bogus parent pointer!");
3424 }
3425
3426 // Confirm that no issues arise from the debug program.
3427 CheckDI(!BB.getTrailingDbgRecords(), "Basic Block has trailing DbgRecords!",
3428 &BB);
3429}
3430
3431void Verifier::visitTerminator(Instruction &I) {
3432 // Ensure that terminators only exist at the end of the basic block.
3433 Check(&I == I.getParent()->getTerminator(),
3434 "Terminator found in the middle of a basic block!", I.getParent());
3435 visitInstruction(I);
3436}
3437
3438void Verifier::visitBranchInst(BranchInst &BI) {
3439 if (BI.isConditional()) {
3441 "Branch condition is not 'i1' type!", &BI, BI.getCondition());
3442 }
3443 visitTerminator(BI);
3444}
3445
3446void Verifier::visitReturnInst(ReturnInst &RI) {
3447 Function *F = RI.getParent()->getParent();
3448 unsigned N = RI.getNumOperands();
3449 if (F->getReturnType()->isVoidTy())
3450 Check(N == 0,
3451 "Found return instr that returns non-void in Function of void "
3452 "return type!",
3453 &RI, F->getReturnType());
3454 else
3455 Check(N == 1 && F->getReturnType() == RI.getOperand(0)->getType(),
3456 "Function return type does not match operand "
3457 "type of return inst!",
3458 &RI, F->getReturnType());
3459
3460 // Check to make sure that the return value has necessary properties for
3461 // terminators...
3462 visitTerminator(RI);
3463}
3464
3465void Verifier::visitSwitchInst(SwitchInst &SI) {
3466 Check(SI.getType()->isVoidTy(), "Switch must have void result type!", &SI);
3467 // Check to make sure that all of the constants in the switch instruction
3468 // have the same type as the switched-on value.
3469 Type *SwitchTy = SI.getCondition()->getType();
3470 SmallPtrSet<ConstantInt*, 32> Constants;
3471 for (auto &Case : SI.cases()) {
3472 Check(isa<ConstantInt>(Case.getCaseValue()),
3473 "Case value is not a constant integer.", &SI);
3474 Check(Case.getCaseValue()->getType() == SwitchTy,
3475 "Switch constants must all be same type as switch value!", &SI);
3476 Check(Constants.insert(Case.getCaseValue()).second,
3477 "Duplicate integer as switch case", &SI, Case.getCaseValue());
3478 }
3479
3480 visitTerminator(SI);
3481}
3482
3483void Verifier::visitIndirectBrInst(IndirectBrInst &BI) {
3485 "Indirectbr operand must have pointer type!", &BI);
3486 for (unsigned i = 0, e = BI.getNumDestinations(); i != e; ++i)
3488 "Indirectbr destinations must all have pointer type!", &BI);
3489
3490 visitTerminator(BI);
3491}
3492
3493void Verifier::visitCallBrInst(CallBrInst &CBI) {
3494 if (!CBI.isInlineAsm()) {
3496 "Callbr: indirect function / invalid signature");
3497 Check(!CBI.hasOperandBundles(),
3498 "Callbr for intrinsics currently doesn't support operand bundles");
3499
3500 switch (CBI.getIntrinsicID()) {
3501 case Intrinsic::amdgcn_kill: {
3502 Check(CBI.getNumIndirectDests() == 1,
3503 "Callbr amdgcn_kill only supports one indirect dest");
3504 bool Unreachable = isa<UnreachableInst>(CBI.getIndirectDest(0)->begin());
3505 CallInst *Call = dyn_cast<CallInst>(CBI.getIndirectDest(0)->begin());
3506 Check(Unreachable || (Call && Call->getIntrinsicID() ==
3507 Intrinsic::amdgcn_unreachable),
3508 "Callbr amdgcn_kill indirect dest needs to be unreachable");
3509 break;
3510 }
3511 default:
3512 CheckFailed(
3513 "Callbr currently only supports asm-goto and selected intrinsics");
3514 }
3515 visitIntrinsicCall(CBI.getIntrinsicID(), CBI);
3516 } else {
3517 const InlineAsm *IA = cast<InlineAsm>(CBI.getCalledOperand());
3518 Check(!IA->canThrow(), "Unwinding from Callbr is not allowed");
3519
3520 verifyInlineAsmCall(CBI);
3521 }
3522 visitTerminator(CBI);
3523}
3524
3525void Verifier::visitSelectInst(SelectInst &SI) {
3526 Check(!SelectInst::areInvalidOperands(SI.getOperand(0), SI.getOperand(1),
3527 SI.getOperand(2)),
3528 "Invalid operands for select instruction!", &SI);
3529
3530 Check(SI.getTrueValue()->getType() == SI.getType(),
3531 "Select values must have same type as select instruction!", &SI);
3532 visitInstruction(SI);
3533}
3534
3535/// visitUserOp1 - User defined operators shouldn't live beyond the lifetime of
3536/// a pass, if any exist, it's an error.
3537///
3538void Verifier::visitUserOp1(Instruction &I) {
3539 Check(false, "User-defined operators should not live outside of a pass!", &I);
3540}
3541
3542void Verifier::visitTruncInst(TruncInst &I) {
3543 // Get the source and destination types
3544 Type *SrcTy = I.getOperand(0)->getType();
3545 Type *DestTy = I.getType();
3546
3547 // Get the size of the types in bits, we'll need this later
3548 unsigned SrcBitSize = SrcTy->getScalarSizeInBits();
3549 unsigned DestBitSize = DestTy->getScalarSizeInBits();
3550
3551 Check(SrcTy->isIntOrIntVectorTy(), "Trunc only operates on integer", &I);
3552 Check(DestTy->isIntOrIntVectorTy(), "Trunc only produces integer", &I);
3553 Check(SrcTy->isVectorTy() == DestTy->isVectorTy(),
3554 "trunc source and destination must both be a vector or neither", &I);
3555 Check(SrcBitSize > DestBitSize, "DestTy too big for Trunc", &I);
3556
3557 visitInstruction(I);
3558}
3559
3560void Verifier::visitZExtInst(ZExtInst &I) {
3561 // Get the source and destination types
3562 Type *SrcTy = I.getOperand(0)->getType();
3563 Type *DestTy = I.getType();
3564
3565 // Get the size of the types in bits, we'll need this later
3566 Check(SrcTy->isIntOrIntVectorTy(), "ZExt only operates on integer", &I);
3567 Check(DestTy->isIntOrIntVectorTy(), "ZExt only produces an integer", &I);
3568 Check(SrcTy->isVectorTy() == DestTy->isVectorTy(),
3569 "zext source and destination must both be a vector or neither", &I);
3570 unsigned SrcBitSize = SrcTy->getScalarSizeInBits();
3571 unsigned DestBitSize = DestTy->getScalarSizeInBits();
3572
3573 Check(SrcBitSize < DestBitSize, "Type too small for ZExt", &I);
3574
3575 visitInstruction(I);
3576}
3577
3578void Verifier::visitSExtInst(SExtInst &I) {
3579 // Get the source and destination types
3580 Type *SrcTy = I.getOperand(0)->getType();
3581 Type *DestTy = I.getType();
3582
3583 // Get the size of the types in bits, we'll need this later
3584 unsigned SrcBitSize = SrcTy->getScalarSizeInBits();
3585 unsigned DestBitSize = DestTy->getScalarSizeInBits();
3586
3587 Check(SrcTy->isIntOrIntVectorTy(), "SExt only operates on integer", &I);
3588 Check(DestTy->isIntOrIntVectorTy(), "SExt only produces an integer", &I);
3589 Check(SrcTy->isVectorTy() == DestTy->isVectorTy(),
3590 "sext source and destination must both be a vector or neither", &I);
3591 Check(SrcBitSize < DestBitSize, "Type too small for SExt", &I);
3592
3593 visitInstruction(I);
3594}
3595
3596void Verifier::visitFPTruncInst(FPTruncInst &I) {
3597 // Get the source and destination types
3598 Type *SrcTy = I.getOperand(0)->getType();
3599 Type *DestTy = I.getType();
3600 // Get the size of the types in bits, we'll need this later
3601 unsigned SrcBitSize = SrcTy->getScalarSizeInBits();
3602 unsigned DestBitSize = DestTy->getScalarSizeInBits();
3603
3604 Check(SrcTy->isFPOrFPVectorTy(), "FPTrunc only operates on FP", &I);
3605 Check(DestTy->isFPOrFPVectorTy(), "FPTrunc only produces an FP", &I);
3606 Check(SrcTy->isVectorTy() == DestTy->isVectorTy(),
3607 "fptrunc source and destination must both be a vector or neither", &I);
3608 Check(SrcBitSize > DestBitSize, "DestTy too big for FPTrunc", &I);
3609
3610 visitInstruction(I);
3611}
3612
3613void Verifier::visitFPExtInst(FPExtInst &I) {
3614 // Get the source and destination types
3615 Type *SrcTy = I.getOperand(0)->getType();
3616 Type *DestTy = I.getType();
3617
3618 // Get the size of the types in bits, we'll need this later
3619 unsigned SrcBitSize = SrcTy->getScalarSizeInBits();
3620 unsigned DestBitSize = DestTy->getScalarSizeInBits();
3621
3622 Check(SrcTy->isFPOrFPVectorTy(), "FPExt only operates on FP", &I);
3623 Check(DestTy->isFPOrFPVectorTy(), "FPExt only produces an FP", &I);
3624 Check(SrcTy->isVectorTy() == DestTy->isVectorTy(),
3625 "fpext source and destination must both be a vector or neither", &I);
3626 Check(SrcBitSize < DestBitSize, "DestTy too small for FPExt", &I);
3627
3628 visitInstruction(I);
3629}
3630
3631void Verifier::visitUIToFPInst(UIToFPInst &I) {
3632 // Get the source and destination types
3633 Type *SrcTy = I.getOperand(0)->getType();
3634 Type *DestTy = I.getType();
3635
3636 bool SrcVec = SrcTy->isVectorTy();
3637 bool DstVec = DestTy->isVectorTy();
3638
3639 Check(SrcVec == DstVec,
3640 "UIToFP source and dest must both be vector or scalar", &I);
3641 Check(SrcTy->isIntOrIntVectorTy(),
3642 "UIToFP source must be integer or integer vector", &I);
3643 Check(DestTy->isFPOrFPVectorTy(), "UIToFP result must be FP or FP vector",
3644 &I);
3645
3646 if (SrcVec && DstVec)
3647 Check(cast<VectorType>(SrcTy)->getElementCount() ==
3648 cast<VectorType>(DestTy)->getElementCount(),
3649 "UIToFP source and dest vector length mismatch", &I);
3650
3651 visitInstruction(I);
3652}
3653
3654void Verifier::visitSIToFPInst(SIToFPInst &I) {
3655 // Get the source and destination types
3656 Type *SrcTy = I.getOperand(0)->getType();
3657 Type *DestTy = I.getType();
3658
3659 bool SrcVec = SrcTy->isVectorTy();
3660 bool DstVec = DestTy->isVectorTy();
3661
3662 Check(SrcVec == DstVec,
3663 "SIToFP source and dest must both be vector or scalar", &I);
3664 Check(SrcTy->isIntOrIntVectorTy(),
3665 "SIToFP source must be integer or integer vector", &I);
3666 Check(DestTy->isFPOrFPVectorTy(), "SIToFP result must be FP or FP vector",
3667 &I);
3668
3669 if (SrcVec && DstVec)
3670 Check(cast<VectorType>(SrcTy)->getElementCount() ==
3671 cast<VectorType>(DestTy)->getElementCount(),
3672 "SIToFP source and dest vector length mismatch", &I);
3673
3674 visitInstruction(I);
3675}
3676
3677void Verifier::visitFPToUIInst(FPToUIInst &I) {
3678 // Get the source and destination types
3679 Type *SrcTy = I.getOperand(0)->getType();
3680 Type *DestTy = I.getType();
3681
3682 bool SrcVec = SrcTy->isVectorTy();
3683 bool DstVec = DestTy->isVectorTy();
3684
3685 Check(SrcVec == DstVec,
3686 "FPToUI source and dest must both be vector or scalar", &I);
3687 Check(SrcTy->isFPOrFPVectorTy(), "FPToUI source must be FP or FP vector", &I);
3688 Check(DestTy->isIntOrIntVectorTy(),
3689 "FPToUI result must be integer or integer vector", &I);
3690
3691 if (SrcVec && DstVec)
3692 Check(cast<VectorType>(SrcTy)->getElementCount() ==
3693 cast<VectorType>(DestTy)->getElementCount(),
3694 "FPToUI source and dest vector length mismatch", &I);
3695
3696 visitInstruction(I);
3697}
3698
3699void Verifier::visitFPToSIInst(FPToSIInst &I) {
3700 // Get the source and destination types
3701 Type *SrcTy = I.getOperand(0)->getType();
3702 Type *DestTy = I.getType();
3703
3704 bool SrcVec = SrcTy->isVectorTy();
3705 bool DstVec = DestTy->isVectorTy();
3706
3707 Check(SrcVec == DstVec,
3708 "FPToSI source and dest must both be vector or scalar", &I);
3709 Check(SrcTy->isFPOrFPVectorTy(), "FPToSI source must be FP or FP vector", &I);
3710 Check(DestTy->isIntOrIntVectorTy(),
3711 "FPToSI result must be integer or integer vector", &I);
3712
3713 if (SrcVec && DstVec)
3714 Check(cast<VectorType>(SrcTy)->getElementCount() ==
3715 cast<VectorType>(DestTy)->getElementCount(),
3716 "FPToSI source and dest vector length mismatch", &I);
3717
3718 visitInstruction(I);
3719}
3720
3721void Verifier::checkPtrToAddr(Type *SrcTy, Type *DestTy, const Value &V) {
3722 Check(SrcTy->isPtrOrPtrVectorTy(), "PtrToAddr source must be pointer", V);
3723 Check(DestTy->isIntOrIntVectorTy(), "PtrToAddr result must be integral", V);
3724 Check(SrcTy->isVectorTy() == DestTy->isVectorTy(), "PtrToAddr type mismatch",
3725 V);
3726
3727 if (SrcTy->isVectorTy()) {
3728 auto *VSrc = cast<VectorType>(SrcTy);
3729 auto *VDest = cast<VectorType>(DestTy);
3730 Check(VSrc->getElementCount() == VDest->getElementCount(),
3731 "PtrToAddr vector length mismatch", V);
3732 }
3733
3734 Type *AddrTy = DL.getAddressType(SrcTy);
3735 Check(AddrTy == DestTy, "PtrToAddr result must be address width", V);
3736}
3737
3738void Verifier::visitPtrToAddrInst(PtrToAddrInst &I) {
3739 checkPtrToAddr(I.getOperand(0)->getType(), I.getType(), I);
3740 visitInstruction(I);
3741}
3742
3743void Verifier::visitPtrToIntInst(PtrToIntInst &I) {
3744 // Get the source and destination types
3745 Type *SrcTy = I.getOperand(0)->getType();
3746 Type *DestTy = I.getType();
3747
3748 Check(SrcTy->isPtrOrPtrVectorTy(), "PtrToInt source must be pointer", &I);
3749
3750 Check(DestTy->isIntOrIntVectorTy(), "PtrToInt result must be integral", &I);
3751 Check(SrcTy->isVectorTy() == DestTy->isVectorTy(), "PtrToInt type mismatch",
3752 &I);
3753
3754 if (SrcTy->isVectorTy()) {
3755 auto *VSrc = cast<VectorType>(SrcTy);
3756 auto *VDest = cast<VectorType>(DestTy);
3757 Check(VSrc->getElementCount() == VDest->getElementCount(),
3758 "PtrToInt Vector length mismatch", &I);
3759 }
3760
3761 visitInstruction(I);
3762}
3763
3764void Verifier::visitIntToPtrInst(IntToPtrInst &I) {
3765 // Get the source and destination types
3766 Type *SrcTy = I.getOperand(0)->getType();
3767 Type *DestTy = I.getType();
3768
3769 Check(SrcTy->isIntOrIntVectorTy(), "IntToPtr source must be an integral", &I);
3770 Check(DestTy->isPtrOrPtrVectorTy(), "IntToPtr result must be a pointer", &I);
3771
3772 Check(SrcTy->isVectorTy() == DestTy->isVectorTy(), "IntToPtr type mismatch",
3773 &I);
3774 if (SrcTy->isVectorTy()) {
3775 auto *VSrc = cast<VectorType>(SrcTy);
3776 auto *VDest = cast<VectorType>(DestTy);
3777 Check(VSrc->getElementCount() == VDest->getElementCount(),
3778 "IntToPtr Vector length mismatch", &I);
3779 }
3780 visitInstruction(I);
3781}
3782
3783void Verifier::visitBitCastInst(BitCastInst &I) {
3784 Check(
3785 CastInst::castIsValid(Instruction::BitCast, I.getOperand(0), I.getType()),
3786 "Invalid bitcast", &I);
3787 visitInstruction(I);
3788}
3789
3790void Verifier::visitAddrSpaceCastInst(AddrSpaceCastInst &I) {
3791 Type *SrcTy = I.getOperand(0)->getType();
3792 Type *DestTy = I.getType();
3793
3794 Check(SrcTy->isPtrOrPtrVectorTy(), "AddrSpaceCast source must be a pointer",
3795 &I);
3796 Check(DestTy->isPtrOrPtrVectorTy(), "AddrSpaceCast result must be a pointer",
3797 &I);
3799 "AddrSpaceCast must be between different address spaces", &I);
3800 if (auto *SrcVTy = dyn_cast<VectorType>(SrcTy))
3801 Check(SrcVTy->getElementCount() ==
3802 cast<VectorType>(DestTy)->getElementCount(),
3803 "AddrSpaceCast vector pointer number of elements mismatch", &I);
3804 visitInstruction(I);
3805}
3806
3807/// visitPHINode - Ensure that a PHI node is well formed.
3808///
3809void Verifier::visitPHINode(PHINode &PN) {
3810 // Ensure that the PHI nodes are all grouped together at the top of the block.
3811 // This can be tested by checking whether the instruction before this is
3812 // either nonexistent (because this is begin()) or is a PHI node. If not,
3813 // then there is some other instruction before a PHI.
3814 Check(&PN == &PN.getParent()->front() ||
3816 "PHI nodes not grouped at top of basic block!", &PN, PN.getParent());
3817
3818 // Check that a PHI doesn't yield a Token.
3819 Check(!PN.getType()->isTokenLikeTy(), "PHI nodes cannot have token type!");
3820
3821 // Check that all of the values of the PHI node have the same type as the
3822 // result.
3823 for (Value *IncValue : PN.incoming_values()) {
3824 Check(PN.getType() == IncValue->getType(),
3825 "PHI node operands are not the same type as the result!", &PN);
3826 }
3827
3828 // All other PHI node constraints are checked in the visitBasicBlock method.
3829
3830 visitInstruction(PN);
3831}
3832
3833void Verifier::visitCallBase(CallBase &Call) {
3835 "Called function must be a pointer!", Call);
3836 FunctionType *FTy = Call.getFunctionType();
3837
3838 // Verify that the correct number of arguments are being passed
3839 if (FTy->isVarArg())
3840 Check(Call.arg_size() >= FTy->getNumParams(),
3841 "Called function requires more parameters than were provided!", Call);
3842 else
3843 Check(Call.arg_size() == FTy->getNumParams(),
3844 "Incorrect number of arguments passed to called function!", Call);
3845
3846 // Verify that all arguments to the call match the function type.
3847 for (unsigned i = 0, e = FTy->getNumParams(); i != e; ++i)
3848 Check(Call.getArgOperand(i)->getType() == FTy->getParamType(i),
3849 "Call parameter type does not match function signature!",
3850 Call.getArgOperand(i), FTy->getParamType(i), Call);
3851
3852 AttributeList Attrs = Call.getAttributes();
3853
3854 Check(verifyAttributeCount(Attrs, Call.arg_size()),
3855 "Attribute after last parameter!", Call);
3856
3857 Function *Callee =
3859 bool IsIntrinsic = Callee && Callee->isIntrinsic();
3860 if (IsIntrinsic)
3861 Check(Callee->getValueType() == FTy,
3862 "Intrinsic called with incompatible signature", Call);
3863
3864 // Verify if the calling convention of the callee is callable.
3866 "calling convention does not permit calls", Call);
3867
3868 // Disallow passing/returning values with alignment higher than we can
3869 // represent.
3870 // FIXME: Consider making DataLayout cap the alignment, so this isn't
3871 // necessary.
3872 auto VerifyTypeAlign = [&](Type *Ty, const Twine &Message) {
3873 if (!Ty->isSized())
3874 return;
3875 Align ABIAlign = DL.getABITypeAlign(Ty);
3876 Check(ABIAlign.value() <= Value::MaximumAlignment,
3877 "Incorrect alignment of " + Message + " to called function!", Call);
3878 };
3879
3880 if (!IsIntrinsic) {
3881 VerifyTypeAlign(FTy->getReturnType(), "return type");
3882 for (unsigned i = 0, e = FTy->getNumParams(); i != e; ++i) {
3883 Type *Ty = FTy->getParamType(i);
3884 VerifyTypeAlign(Ty, "argument passed");
3885 }
3886 }
3887
3888 if (Attrs.hasFnAttr(Attribute::Speculatable)) {
3889 // Don't allow speculatable on call sites, unless the underlying function
3890 // declaration is also speculatable.
3891 Check(Callee && Callee->isSpeculatable(),
3892 "speculatable attribute may not apply to call sites", Call);
3893 }
3894
3895 if (Attrs.hasFnAttr(Attribute::Preallocated)) {
3896 Check(Call.getIntrinsicID() == Intrinsic::call_preallocated_arg,
3897 "preallocated as a call site attribute can only be on "
3898 "llvm.call.preallocated.arg");
3899 }
3900
3901 // Verify call attributes.
3902 verifyFunctionAttrs(FTy, Attrs, &Call, IsIntrinsic, Call.isInlineAsm());
3903
3904 // Conservatively check the inalloca argument.
3905 // We have a bug if we can find that there is an underlying alloca without
3906 // inalloca.
3907 if (Call.hasInAllocaArgument()) {
3908 Value *InAllocaArg = Call.getArgOperand(FTy->getNumParams() - 1);
3909 if (auto AI = dyn_cast<AllocaInst>(InAllocaArg->stripInBoundsOffsets()))
3910 Check(AI->isUsedWithInAlloca(),
3911 "inalloca argument for call has mismatched alloca", AI, Call);
3912 }
3913
3914 // For each argument of the callsite, if it has the swifterror argument,
3915 // make sure the underlying alloca/parameter it comes from has a swifterror as
3916 // well.
3917 for (unsigned i = 0, e = FTy->getNumParams(); i != e; ++i) {
3918 if (Call.paramHasAttr(i, Attribute::SwiftError)) {
3919 Value *SwiftErrorArg = Call.getArgOperand(i);
3920 if (auto AI = dyn_cast<AllocaInst>(SwiftErrorArg->stripInBoundsOffsets())) {
3921 Check(AI->isSwiftError(),
3922 "swifterror argument for call has mismatched alloca", AI, Call);
3923 continue;
3924 }
3925 auto ArgI = dyn_cast<Argument>(SwiftErrorArg);
3926 Check(ArgI, "swifterror argument should come from an alloca or parameter",
3927 SwiftErrorArg, Call);
3928 Check(ArgI->hasSwiftErrorAttr(),
3929 "swifterror argument for call has mismatched parameter", ArgI,
3930 Call);
3931 }
3932
3933 if (Attrs.hasParamAttr(i, Attribute::ImmArg)) {
3934 // Don't allow immarg on call sites, unless the underlying declaration
3935 // also has the matching immarg.
3936 Check(Callee && Callee->hasParamAttribute(i, Attribute::ImmArg),
3937 "immarg may not apply only to call sites", Call.getArgOperand(i),
3938 Call);
3939 }
3940
3941 if (Call.paramHasAttr(i, Attribute::ImmArg)) {
3942 Value *ArgVal = Call.getArgOperand(i);
3943 Check(isa<ConstantInt>(ArgVal) || isa<ConstantFP>(ArgVal),
3944 "immarg operand has non-immediate parameter", ArgVal, Call);
3945
3946 // If the imm-arg is an integer and also has a range attached,
3947 // check if the given value is within the range.
3948 if (Call.paramHasAttr(i, Attribute::Range)) {
3949 if (auto *CI = dyn_cast<ConstantInt>(ArgVal)) {
3950 const ConstantRange &CR =
3951 Call.getParamAttr(i, Attribute::Range).getValueAsConstantRange();
3952 Check(CR.contains(CI->getValue()),
3953 "immarg value " + Twine(CI->getValue().getSExtValue()) +
3954 " out of range [" + Twine(CR.getLower().getSExtValue()) +
3955 ", " + Twine(CR.getUpper().getSExtValue()) + ")",
3956 Call);
3957 }
3958 }
3959 }
3960
3961 if (Call.paramHasAttr(i, Attribute::Preallocated)) {
3962 Value *ArgVal = Call.getArgOperand(i);
3963 bool hasOB =
3965 bool isMustTail = Call.isMustTailCall();
3966 Check(hasOB != isMustTail,
3967 "preallocated operand either requires a preallocated bundle or "
3968 "the call to be musttail (but not both)",
3969 ArgVal, Call);
3970 }
3971 }
3972
3973 if (FTy->isVarArg()) {
3974 // FIXME? is 'nest' even legal here?
3975 bool SawNest = false;
3976 bool SawReturned = false;
3977
3978 for (unsigned Idx = 0; Idx < FTy->getNumParams(); ++Idx) {
3979 if (Attrs.hasParamAttr(Idx, Attribute::Nest))
3980 SawNest = true;
3981 if (Attrs.hasParamAttr(Idx, Attribute::Returned))
3982 SawReturned = true;
3983 }
3984
3985 // Check attributes on the varargs part.
3986 for (unsigned Idx = FTy->getNumParams(); Idx < Call.arg_size(); ++Idx) {
3987 Type *Ty = Call.getArgOperand(Idx)->getType();
3988 AttributeSet ArgAttrs = Attrs.getParamAttrs(Idx);
3989 verifyParameterAttrs(ArgAttrs, Ty, &Call);
3990
3991 if (ArgAttrs.hasAttribute(Attribute::Nest)) {
3992 Check(!SawNest, "More than one parameter has attribute nest!", Call);
3993 SawNest = true;
3994 }
3995
3996 if (ArgAttrs.hasAttribute(Attribute::Returned)) {
3997 Check(!SawReturned, "More than one parameter has attribute returned!",
3998 Call);
3999 Check(Ty->canLosslesslyBitCastTo(FTy->getReturnType()),
4000 "Incompatible argument and return types for 'returned' "
4001 "attribute",
4002 Call);
4003 SawReturned = true;
4004 }
4005
4006 // Statepoint intrinsic is vararg but the wrapped function may be not.
4007 // Allow sret here and check the wrapped function in verifyStatepoint.
4008 if (Call.getIntrinsicID() != Intrinsic::experimental_gc_statepoint)
4009 Check(!ArgAttrs.hasAttribute(Attribute::StructRet),
4010 "Attribute 'sret' cannot be used for vararg call arguments!",
4011 Call);
4012
4013 if (ArgAttrs.hasAttribute(Attribute::InAlloca))
4014 Check(Idx == Call.arg_size() - 1,
4015 "inalloca isn't on the last argument!", Call);
4016 }
4017 }
4018
4019 // Verify that there's no metadata unless it's a direct call to an intrinsic.
4020 if (!IsIntrinsic) {
4021 for (Type *ParamTy : FTy->params()) {
4022 Check(!ParamTy->isMetadataTy(),
4023 "Function has metadata parameter but isn't an intrinsic", Call);
4024 Check(!ParamTy->isTokenLikeTy(),
4025 "Function has token parameter but isn't an intrinsic", Call);
4026 }
4027 }
4028
4029 // Verify that indirect calls don't return tokens.
4030 if (!Call.getCalledFunction()) {
4031 Check(!FTy->getReturnType()->isTokenLikeTy(),
4032 "Return type cannot be token for indirect call!");
4033 Check(!FTy->getReturnType()->isX86_AMXTy(),
4034 "Return type cannot be x86_amx for indirect call!");
4035 }
4036
4038 visitIntrinsicCall(ID, Call);
4039
4040 // Verify that a callsite has at most one "deopt", at most one "funclet", at
4041 // most one "gc-transition", at most one "cfguardtarget", at most one
4042 // "preallocated" operand bundle, and at most one "ptrauth" operand bundle.
4043 bool FoundDeoptBundle = false, FoundFuncletBundle = false,
4044 FoundGCTransitionBundle = false, FoundCFGuardTargetBundle = false,
4045 FoundPreallocatedBundle = false, FoundGCLiveBundle = false,
4046 FoundPtrauthBundle = false, FoundKCFIBundle = false,
4047 FoundAttachedCallBundle = false;
4048 for (unsigned i = 0, e = Call.getNumOperandBundles(); i < e; ++i) {
4049 OperandBundleUse BU = Call.getOperandBundleAt(i);
4050 uint32_t Tag = BU.getTagID();
4051 if (Tag == LLVMContext::OB_deopt) {
4052 Check(!FoundDeoptBundle, "Multiple deopt operand bundles", Call);
4053 FoundDeoptBundle = true;
4054 } else if (Tag == LLVMContext::OB_gc_transition) {
4055 Check(!FoundGCTransitionBundle, "Multiple gc-transition operand bundles",
4056 Call);
4057 FoundGCTransitionBundle = true;
4058 } else if (Tag == LLVMContext::OB_funclet) {
4059 Check(!FoundFuncletBundle, "Multiple funclet operand bundles", Call);
4060 FoundFuncletBundle = true;
4061 Check(BU.Inputs.size() == 1,
4062 "Expected exactly one funclet bundle operand", Call);
4063 Check(isa<FuncletPadInst>(BU.Inputs.front()),
4064 "Funclet bundle operands should correspond to a FuncletPadInst",
4065 Call);
4066 } else if (Tag == LLVMContext::OB_cfguardtarget) {
4067 Check(!FoundCFGuardTargetBundle, "Multiple CFGuardTarget operand bundles",
4068 Call);
4069 FoundCFGuardTargetBundle = true;
4070 Check(BU.Inputs.size() == 1,
4071 "Expected exactly one cfguardtarget bundle operand", Call);
4072 } else if (Tag == LLVMContext::OB_ptrauth) {
4073 Check(!FoundPtrauthBundle, "Multiple ptrauth operand bundles", Call);
4074 FoundPtrauthBundle = true;
4075 Check(BU.Inputs.size() == 2,
4076 "Expected exactly two ptrauth bundle operands", Call);
4077 Check(isa<ConstantInt>(BU.Inputs[0]) &&
4078 BU.Inputs[0]->getType()->isIntegerTy(32),
4079 "Ptrauth bundle key operand must be an i32 constant", Call);
4080 Check(BU.Inputs[1]->getType()->isIntegerTy(64),
4081 "Ptrauth bundle discriminator operand must be an i64", Call);
4082 } else if (Tag == LLVMContext::OB_kcfi) {
4083 Check(!FoundKCFIBundle, "Multiple kcfi operand bundles", Call);
4084 FoundKCFIBundle = true;
4085 Check(BU.Inputs.size() == 1, "Expected exactly one kcfi bundle operand",
4086 Call);
4087 Check(isa<ConstantInt>(BU.Inputs[0]) &&
4088 BU.Inputs[0]->getType()->isIntegerTy(32),
4089 "Kcfi bundle operand must be an i32 constant", Call);
4090 } else if (Tag == LLVMContext::OB_preallocated) {
4091 Check(!FoundPreallocatedBundle, "Multiple preallocated operand bundles",
4092 Call);
4093 FoundPreallocatedBundle = true;
4094 Check(BU.Inputs.size() == 1,
4095 "Expected exactly one preallocated bundle operand", Call);
4096 auto Input = dyn_cast<IntrinsicInst>(BU.Inputs.front());
4097 Check(Input &&
4098 Input->getIntrinsicID() == Intrinsic::call_preallocated_setup,
4099 "\"preallocated\" argument must be a token from "
4100 "llvm.call.preallocated.setup",
4101 Call);
4102 } else if (Tag == LLVMContext::OB_gc_live) {
4103 Check(!FoundGCLiveBundle, "Multiple gc-live operand bundles", Call);
4104 FoundGCLiveBundle = true;
4106 Check(!FoundAttachedCallBundle,
4107 "Multiple \"clang.arc.attachedcall\" operand bundles", Call);
4108 FoundAttachedCallBundle = true;
4109 verifyAttachedCallBundle(Call, BU);
4110 }
4111 }
4112
4113 // Verify that callee and callsite agree on whether to use pointer auth.
4114 Check(!(Call.getCalledFunction() && FoundPtrauthBundle),
4115 "Direct call cannot have a ptrauth bundle", Call);
4116
4117 // Verify that each inlinable callsite of a debug-info-bearing function in a
4118 // debug-info-bearing function has a debug location attached to it. Failure to
4119 // do so causes assertion failures when the inliner sets up inline scope info
4120 // (Interposable functions are not inlinable, neither are functions without
4121 // definitions.)
4127 "inlinable function call in a function with "
4128 "debug info must have a !dbg location",
4129 Call);
4130
4131 if (Call.isInlineAsm())
4132 verifyInlineAsmCall(Call);
4133
4134 ConvergenceVerifyHelper.visit(Call);
4135
4136 visitInstruction(Call);
4137}
4138
4139void Verifier::verifyTailCCMustTailAttrs(const AttrBuilder &Attrs,
4140 StringRef Context) {
4141 Check(!Attrs.contains(Attribute::InAlloca),
4142 Twine("inalloca attribute not allowed in ") + Context);
4143 Check(!Attrs.contains(Attribute::InReg),
4144 Twine("inreg attribute not allowed in ") + Context);
4145 Check(!Attrs.contains(Attribute::SwiftError),
4146 Twine("swifterror attribute not allowed in ") + Context);
4147 Check(!Attrs.contains(Attribute::Preallocated),
4148 Twine("preallocated attribute not allowed in ") + Context);
4149 Check(!Attrs.contains(Attribute::ByRef),
4150 Twine("byref attribute not allowed in ") + Context);
4151}
4152
4153/// Two types are "congruent" if they are identical, or if they are both pointer
4154/// types with different pointee types and the same address space.
4155static bool isTypeCongruent(Type *L, Type *R) {
4156 if (L == R)
4157 return true;
4160 if (!PL || !PR)
4161 return false;
4162 return PL->getAddressSpace() == PR->getAddressSpace();
4163}
4164
4165static AttrBuilder getParameterABIAttributes(LLVMContext& C, unsigned I, AttributeList Attrs) {
4166 static const Attribute::AttrKind ABIAttrs[] = {
4167 Attribute::StructRet, Attribute::ByVal, Attribute::InAlloca,
4168 Attribute::InReg, Attribute::StackAlignment, Attribute::SwiftSelf,
4169 Attribute::SwiftAsync, Attribute::SwiftError, Attribute::Preallocated,
4170 Attribute::ByRef};
4171 AttrBuilder Copy(C);
4172 for (auto AK : ABIAttrs) {
4173 Attribute Attr = Attrs.getParamAttrs(I).getAttribute(AK);
4174 if (Attr.isValid())
4175 Copy.addAttribute(Attr);
4176 }
4177
4178 // `align` is ABI-affecting only in combination with `byval` or `byref`.
4179 if (Attrs.hasParamAttr(I, Attribute::Alignment) &&
4180 (Attrs.hasParamAttr(I, Attribute::ByVal) ||
4181 Attrs.hasParamAttr(I, Attribute::ByRef)))
4182 Copy.addAlignmentAttr(Attrs.getParamAlignment(I));
4183 return Copy;
4184}
4185
4186void Verifier::verifyMustTailCall(CallInst &CI) {
4187 Check(!CI.isInlineAsm(), "cannot use musttail call with inline asm", &CI);
4188
4189 Function *F = CI.getParent()->getParent();
4190 FunctionType *CallerTy = F->getFunctionType();
4191 FunctionType *CalleeTy = CI.getFunctionType();
4192 Check(CallerTy->isVarArg() == CalleeTy->isVarArg(),
4193 "cannot guarantee tail call due to mismatched varargs", &CI);
4194 Check(isTypeCongruent(CallerTy->getReturnType(), CalleeTy->getReturnType()),
4195 "cannot guarantee tail call due to mismatched return types", &CI);
4196
4197 // - The calling conventions of the caller and callee must match.
4198 Check(F->getCallingConv() == CI.getCallingConv(),
4199 "cannot guarantee tail call due to mismatched calling conv", &CI);
4200
4201 // - The call must immediately precede a :ref:`ret <i_ret>` instruction,
4202 // or a pointer bitcast followed by a ret instruction.
4203 // - The ret instruction must return the (possibly bitcasted) value
4204 // produced by the call or void.
4205 Value *RetVal = &CI;
4207
4208 // Handle the optional bitcast.
4209 if (BitCastInst *BI = dyn_cast_or_null<BitCastInst>(Next)) {
4210 Check(BI->getOperand(0) == RetVal,
4211 "bitcast following musttail call must use the call", BI);
4212 RetVal = BI;
4213 Next = BI->getNextNode();
4214 }
4215
4216 // Check the return.
4217 ReturnInst *Ret = dyn_cast_or_null<ReturnInst>(Next);
4218 Check(Ret, "musttail call must precede a ret with an optional bitcast", &CI);
4219 Check(!Ret->getReturnValue() || Ret->getReturnValue() == RetVal ||
4221 "musttail call result must be returned", Ret);
4222
4223 AttributeList CallerAttrs = F->getAttributes();
4224 AttributeList CalleeAttrs = CI.getAttributes();
4225 if (CI.getCallingConv() == CallingConv::SwiftTail ||
4226 CI.getCallingConv() == CallingConv::Tail) {
4227 StringRef CCName =
4228 CI.getCallingConv() == CallingConv::Tail ? "tailcc" : "swifttailcc";
4229
4230 // - Only sret, byval, swiftself, and swiftasync ABI-impacting attributes
4231 // are allowed in swifttailcc call
4232 for (unsigned I = 0, E = CallerTy->getNumParams(); I != E; ++I) {
4233 AttrBuilder ABIAttrs = getParameterABIAttributes(F->getContext(), I, CallerAttrs);
4234 SmallString<32> Context{CCName, StringRef(" musttail caller")};
4235 verifyTailCCMustTailAttrs(ABIAttrs, Context);
4236 }
4237 for (unsigned I = 0, E = CalleeTy->getNumParams(); I != E; ++I) {
4238 AttrBuilder ABIAttrs = getParameterABIAttributes(F->getContext(), I, CalleeAttrs);
4239 SmallString<32> Context{CCName, StringRef(" musttail callee")};
4240 verifyTailCCMustTailAttrs(ABIAttrs, Context);
4241 }
4242 // - Varargs functions are not allowed
4243 Check(!CallerTy->isVarArg(), Twine("cannot guarantee ") + CCName +
4244 " tail call for varargs function");
4245 return;
4246 }
4247
4248 // - The caller and callee prototypes must match. Pointer types of
4249 // parameters or return types may differ in pointee type, but not
4250 // address space.
4251 if (!CI.getIntrinsicID()) {
4252 Check(CallerTy->getNumParams() == CalleeTy->getNumParams(),
4253 "cannot guarantee tail call due to mismatched parameter counts", &CI);
4254 for (unsigned I = 0, E = CallerTy->getNumParams(); I != E; ++I) {
4255 Check(
4256 isTypeCongruent(CallerTy->getParamType(I), CalleeTy->getParamType(I)),
4257 "cannot guarantee tail call due to mismatched parameter types", &CI);
4258 }
4259 }
4260
4261 // - All ABI-impacting function attributes, such as sret, byval, inreg,
4262 // returned, preallocated, and inalloca, must match.
4263 for (unsigned I = 0, E = CallerTy->getNumParams(); I != E; ++I) {
4264 AttrBuilder CallerABIAttrs = getParameterABIAttributes(F->getContext(), I, CallerAttrs);
4265 AttrBuilder CalleeABIAttrs = getParameterABIAttributes(F->getContext(), I, CalleeAttrs);
4266 Check(CallerABIAttrs == CalleeABIAttrs,
4267 "cannot guarantee tail call due to mismatched ABI impacting "
4268 "function attributes",
4269 &CI, CI.getOperand(I));
4270 }
4271}
4272
4273void Verifier::visitCallInst(CallInst &CI) {
4274 visitCallBase(CI);
4275
4276 if (CI.isMustTailCall())
4277 verifyMustTailCall(CI);
4278}
4279
4280void Verifier::visitInvokeInst(InvokeInst &II) {
4281 visitCallBase(II);
4282
4283 // Verify that the first non-PHI instruction of the unwind destination is an
4284 // exception handling instruction.
4285 Check(
4286 II.getUnwindDest()->isEHPad(),
4287 "The unwind destination does not have an exception handling instruction!",
4288 &II);
4289
4290 visitTerminator(II);
4291}
4292
4293/// visitUnaryOperator - Check the argument to the unary operator.
4294///
4295void Verifier::visitUnaryOperator(UnaryOperator &U) {
4296 Check(U.getType() == U.getOperand(0)->getType(),
4297 "Unary operators must have same type for"
4298 "operands and result!",
4299 &U);
4300
4301 switch (U.getOpcode()) {
4302 // Check that floating-point arithmetic operators are only used with
4303 // floating-point operands.
4304 case Instruction::FNeg:
4305 Check(U.getType()->isFPOrFPVectorTy(),
4306 "FNeg operator only works with float types!", &U);
4307 break;
4308 default:
4309 llvm_unreachable("Unknown UnaryOperator opcode!");
4310 }
4311
4312 visitInstruction(U);
4313}
4314
4315/// visitBinaryOperator - Check that both arguments to the binary operator are
4316/// of the same type!
4317///
4318void Verifier::visitBinaryOperator(BinaryOperator &B) {
4319 Check(B.getOperand(0)->getType() == B.getOperand(1)->getType(),
4320 "Both operands to a binary operator are not of the same type!", &B);
4321
4322 switch (B.getOpcode()) {
4323 // Check that integer arithmetic operators are only used with
4324 // integral operands.
4325 case Instruction::Add:
4326 case Instruction::Sub:
4327 case Instruction::Mul:
4328 case Instruction::SDiv:
4329 case Instruction::UDiv:
4330 case Instruction::SRem:
4331 case Instruction::URem:
4332 Check(B.getType()->isIntOrIntVectorTy(),
4333 "Integer arithmetic operators only work with integral types!", &B);
4334 Check(B.getType() == B.getOperand(0)->getType(),
4335 "Integer arithmetic operators must have same type "
4336 "for operands and result!",
4337 &B);
4338 break;
4339 // Check that floating-point arithmetic operators are only used with
4340 // floating-point operands.
4341 case Instruction::FAdd:
4342 case Instruction::FSub:
4343 case Instruction::FMul:
4344 case Instruction::FDiv:
4345 case Instruction::FRem:
4346 Check(B.getType()->isFPOrFPVectorTy(),
4347 "Floating-point arithmetic operators only work with "
4348 "floating-point types!",
4349 &B);
4350 Check(B.getType() == B.getOperand(0)->getType(),
4351 "Floating-point arithmetic operators must have same type "
4352 "for operands and result!",
4353 &B);
4354 break;
4355 // Check that logical operators are only used with integral operands.
4356 case Instruction::And:
4357 case Instruction::Or:
4358 case Instruction::Xor:
4359 Check(B.getType()->isIntOrIntVectorTy(),
4360 "Logical operators only work with integral types!", &B);
4361 Check(B.getType() == B.getOperand(0)->getType(),
4362 "Logical operators must have same type for operands and result!", &B);
4363 break;
4364 case Instruction::Shl:
4365 case Instruction::LShr:
4366 case Instruction::AShr:
4367 Check(B.getType()->isIntOrIntVectorTy(),
4368 "Shifts only work with integral types!", &B);
4369 Check(B.getType() == B.getOperand(0)->getType(),
4370 "Shift return type must be same as operands!", &B);
4371 break;
4372 default:
4373 llvm_unreachable("Unknown BinaryOperator opcode!");
4374 }
4375
4376 visitInstruction(B);
4377}
4378
4379void Verifier::visitICmpInst(ICmpInst &IC) {
4380 // Check that the operands are the same type
4381 Type *Op0Ty = IC.getOperand(0)->getType();
4382 Type *Op1Ty = IC.getOperand(1)->getType();
4383 Check(Op0Ty == Op1Ty,
4384 "Both operands to ICmp instruction are not of the same type!", &IC);
4385 // Check that the operands are the right type
4386 Check(Op0Ty->isIntOrIntVectorTy() || Op0Ty->isPtrOrPtrVectorTy(),
4387 "Invalid operand types for ICmp instruction", &IC);
4388 // Check that the predicate is valid.
4389 Check(IC.isIntPredicate(), "Invalid predicate in ICmp instruction!", &IC);
4390
4391 visitInstruction(IC);
4392}
4393
4394void Verifier::visitFCmpInst(FCmpInst &FC) {
4395 // Check that the operands are the same type
4396 Type *Op0Ty = FC.getOperand(0)->getType();
4397 Type *Op1Ty = FC.getOperand(1)->getType();
4398 Check(Op0Ty == Op1Ty,
4399 "Both operands to FCmp instruction are not of the same type!", &FC);
4400 // Check that the operands are the right type
4401 Check(Op0Ty->isFPOrFPVectorTy(), "Invalid operand types for FCmp instruction",
4402 &FC);
4403 // Check that the predicate is valid.
4404 Check(FC.isFPPredicate(), "Invalid predicate in FCmp instruction!", &FC);
4405
4406 visitInstruction(FC);
4407}
4408
4409void Verifier::visitExtractElementInst(ExtractElementInst &EI) {
4411 "Invalid extractelement operands!", &EI);
4412 visitInstruction(EI);
4413}
4414
4415void Verifier::visitInsertElementInst(InsertElementInst &IE) {
4416 Check(InsertElementInst::isValidOperands(IE.getOperand(0), IE.getOperand(1),
4417 IE.getOperand(2)),
4418 "Invalid insertelement operands!", &IE);
4419 visitInstruction(IE);
4420}
4421
4422void Verifier::visitShuffleVectorInst(ShuffleVectorInst &SV) {
4424 SV.getShuffleMask()),
4425 "Invalid shufflevector operands!", &SV);
4426 visitInstruction(SV);
4427}
4428
4429void Verifier::visitGetElementPtrInst(GetElementPtrInst &GEP) {
4430 Type *TargetTy = GEP.getPointerOperandType()->getScalarType();
4431
4432 Check(isa<PointerType>(TargetTy),
4433 "GEP base pointer is not a vector or a vector of pointers", &GEP);
4434 Check(GEP.getSourceElementType()->isSized(), "GEP into unsized type!", &GEP);
4435
4436 if (auto *STy = dyn_cast<StructType>(GEP.getSourceElementType())) {
4437 Check(!STy->isScalableTy(),
4438 "getelementptr cannot target structure that contains scalable vector"
4439 "type",
4440 &GEP);
4441 }
4442
4443 SmallVector<Value *, 16> Idxs(GEP.indices());
4444 Check(
4445 all_of(Idxs, [](Value *V) { return V->getType()->isIntOrIntVectorTy(); }),
4446 "GEP indexes must be integers", &GEP);
4447 Type *ElTy =
4448 GetElementPtrInst::getIndexedType(GEP.getSourceElementType(), Idxs);
4449 Check(ElTy, "Invalid indices for GEP pointer type!", &GEP);
4450
4451 PointerType *PtrTy = dyn_cast<PointerType>(GEP.getType()->getScalarType());
4452
4453 Check(PtrTy && GEP.getResultElementType() == ElTy,
4454 "GEP is not of right type for indices!", &GEP, ElTy);
4455
4456 if (auto *GEPVTy = dyn_cast<VectorType>(GEP.getType())) {
4457 // Additional checks for vector GEPs.
4458 ElementCount GEPWidth = GEPVTy->getElementCount();
4459 if (GEP.getPointerOperandType()->isVectorTy())
4460 Check(
4461 GEPWidth ==
4462 cast<VectorType>(GEP.getPointerOperandType())->getElementCount(),
4463 "Vector GEP result width doesn't match operand's", &GEP);
4464 for (Value *Idx : Idxs) {
4465 Type *IndexTy = Idx->getType();
4466 if (auto *IndexVTy = dyn_cast<VectorType>(IndexTy)) {
4467 ElementCount IndexWidth = IndexVTy->getElementCount();
4468 Check(IndexWidth == GEPWidth, "Invalid GEP index vector width", &GEP);
4469 }
4470 Check(IndexTy->isIntOrIntVectorTy(),
4471 "All GEP indices should be of integer type");
4472 }
4473 }
4474
4475 Check(GEP.getAddressSpace() == PtrTy->getAddressSpace(),
4476 "GEP address space doesn't match type", &GEP);
4477
4478 visitInstruction(GEP);
4479}
4480
4481static bool isContiguous(const ConstantRange &A, const ConstantRange &B) {
4482 return A.getUpper() == B.getLower() || A.getLower() == B.getUpper();
4483}
4484
4485/// Verify !range and !absolute_symbol metadata. These have the same
4486/// restrictions, except !absolute_symbol allows the full set.
4487void Verifier::verifyRangeLikeMetadata(const Value &I, const MDNode *Range,
4488 Type *Ty, RangeLikeMetadataKind Kind) {
4489 unsigned NumOperands = Range->getNumOperands();
4490 Check(NumOperands % 2 == 0, "Unfinished range!", Range);
4491 unsigned NumRanges = NumOperands / 2;
4492 Check(NumRanges >= 1, "It should have at least one range!", Range);
4493
4494 ConstantRange LastRange(1, true); // Dummy initial value
4495 for (unsigned i = 0; i < NumRanges; ++i) {
4496 ConstantInt *Low =
4497 mdconst::dyn_extract<ConstantInt>(Range->getOperand(2 * i));
4498 Check(Low, "The lower limit must be an integer!", Low);
4499 ConstantInt *High =
4500 mdconst::dyn_extract<ConstantInt>(Range->getOperand(2 * i + 1));
4501 Check(High, "The upper limit must be an integer!", High);
4502
4503 Check(High->getType() == Low->getType(), "Range pair types must match!",
4504 &I);
4505
4506 if (Kind == RangeLikeMetadataKind::NoaliasAddrspace) {
4507 Check(High->getType()->isIntegerTy(32),
4508 "noalias.addrspace type must be i32!", &I);
4509 } else {
4510 Check(High->getType() == Ty->getScalarType(),
4511 "Range types must match instruction type!", &I);
4512 }
4513
4514 APInt HighV = High->getValue();
4515 APInt LowV = Low->getValue();
4516
4517 // ConstantRange asserts if the ranges are the same except for the min/max
4518 // value. Leave the cases it tolerates for the empty range error below.
4519 Check(LowV != HighV || LowV.isMaxValue() || LowV.isMinValue(),
4520 "The upper and lower limits cannot be the same value", &I);
4521
4522 ConstantRange CurRange(LowV, HighV);
4523 Check(!CurRange.isEmptySet() &&
4524 (Kind == RangeLikeMetadataKind::AbsoluteSymbol ||
4525 !CurRange.isFullSet()),
4526 "Range must not be empty!", Range);
4527 if (i != 0) {
4528 Check(CurRange.intersectWith(LastRange).isEmptySet(),
4529 "Intervals are overlapping", Range);
4530 Check(LowV.sgt(LastRange.getLower()), "Intervals are not in order",
4531 Range);
4532 Check(!isContiguous(CurRange, LastRange), "Intervals are contiguous",
4533 Range);
4534 }
4535 LastRange = ConstantRange(LowV, HighV);
4536 }
4537 if (NumRanges > 2) {
4538 APInt FirstLow =
4539 mdconst::dyn_extract<ConstantInt>(Range->getOperand(0))->getValue();
4540 APInt FirstHigh =
4541 mdconst::dyn_extract<ConstantInt>(Range->getOperand(1))->getValue();
4542 ConstantRange FirstRange(FirstLow, FirstHigh);
4543 Check(FirstRange.intersectWith(LastRange).isEmptySet(),
4544 "Intervals are overlapping", Range);
4545 Check(!isContiguous(FirstRange, LastRange), "Intervals are contiguous",
4546 Range);
4547 }
4548}
4549
4550void Verifier::visitRangeMetadata(Instruction &I, MDNode *Range, Type *Ty) {
4551 assert(Range && Range == I.getMetadata(LLVMContext::MD_range) &&
4552 "precondition violation");
4553 verifyRangeLikeMetadata(I, Range, Ty, RangeLikeMetadataKind::Range);
4554}
4555
4556void Verifier::visitNoaliasAddrspaceMetadata(Instruction &I, MDNode *Range,
4557 Type *Ty) {
4558 assert(Range && Range == I.getMetadata(LLVMContext::MD_noalias_addrspace) &&
4559 "precondition violation");
4560 verifyRangeLikeMetadata(I, Range, Ty,
4561 RangeLikeMetadataKind::NoaliasAddrspace);
4562}
4563
4564void Verifier::checkAtomicMemAccessSize(Type *Ty, const Instruction *I) {
4565 unsigned Size = DL.getTypeSizeInBits(Ty).getFixedValue();
4566 Check(Size >= 8, "atomic memory access' size must be byte-sized", Ty, I);
4567 Check(!(Size & (Size - 1)),
4568 "atomic memory access' operand must have a power-of-two size", Ty, I);
4569}
4570
4571void Verifier::visitLoadInst(LoadInst &LI) {
4573 Check(PTy, "Load operand must be a pointer.", &LI);
4574 Type *ElTy = LI.getType();
4575 if (MaybeAlign A = LI.getAlign()) {
4576 Check(A->value() <= Value::MaximumAlignment,
4577 "huge alignment values are unsupported", &LI);
4578 }
4579 Check(ElTy->isSized(), "loading unsized types is not allowed", &LI);
4580 if (LI.isAtomic()) {
4581 Check(LI.getOrdering() != AtomicOrdering::Release &&
4582 LI.getOrdering() != AtomicOrdering::AcquireRelease,
4583 "Load cannot have Release ordering", &LI);
4584 Check(ElTy->getScalarType()->isIntOrPtrTy() ||
4586 "atomic load operand must have integer, pointer, floating point, "
4587 "or vector type!",
4588 ElTy, &LI);
4589
4590 checkAtomicMemAccessSize(ElTy, &LI);
4591 } else {
4593 "Non-atomic load cannot have SynchronizationScope specified", &LI);
4594 }
4595
4596 visitInstruction(LI);
4597}
4598
4599void Verifier::visitStoreInst(StoreInst &SI) {
4600 PointerType *PTy = dyn_cast<PointerType>(SI.getOperand(1)->getType());
4601 Check(PTy, "Store operand must be a pointer.", &SI);
4602 Type *ElTy = SI.getOperand(0)->getType();
4603 if (MaybeAlign A = SI.getAlign()) {
4604 Check(A->value() <= Value::MaximumAlignment,
4605 "huge alignment values are unsupported", &SI);
4606 }
4607 Check(ElTy->isSized(), "storing unsized types is not allowed", &SI);
4608 if (SI.isAtomic()) {
4609 Check(SI.getOrdering() != AtomicOrdering::Acquire &&
4610 SI.getOrdering() != AtomicOrdering::AcquireRelease,
4611 "Store cannot have Acquire ordering", &SI);
4612 Check(ElTy->getScalarType()->isIntOrPtrTy() ||
4614 "atomic store operand must have integer, pointer, floating point, "
4615 "or vector type!",
4616 ElTy, &SI);
4617 checkAtomicMemAccessSize(ElTy, &SI);
4618 } else {
4619 Check(SI.getSyncScopeID() == SyncScope::System,
4620 "Non-atomic store cannot have SynchronizationScope specified", &SI);
4621 }
4622 visitInstruction(SI);
4623}
4624
4625/// Check that SwiftErrorVal is used as a swifterror argument in CS.
4626void Verifier::verifySwiftErrorCall(CallBase &Call,
4627 const Value *SwiftErrorVal) {
4628 for (const auto &I : llvm::enumerate(Call.args())) {
4629 if (I.value() == SwiftErrorVal) {
4630 Check(Call.paramHasAttr(I.index(), Attribute::SwiftError),
4631 "swifterror value when used in a callsite should be marked "
4632 "with swifterror attribute",
4633 SwiftErrorVal, Call);
4634 }
4635 }
4636}
4637
4638void Verifier::verifySwiftErrorValue(const Value *SwiftErrorVal) {
4639 // Check that swifterror value is only used by loads, stores, or as
4640 // a swifterror argument.
4641 for (const User *U : SwiftErrorVal->users()) {
4643 isa<InvokeInst>(U),
4644 "swifterror value can only be loaded and stored from, or "
4645 "as a swifterror argument!",
4646 SwiftErrorVal, U);
4647 // If it is used by a store, check it is the second operand.
4648 if (auto StoreI = dyn_cast<StoreInst>(U))
4649 Check(StoreI->getOperand(1) == SwiftErrorVal,
4650 "swifterror value should be the second operand when used "
4651 "by stores",
4652 SwiftErrorVal, U);
4653 if (auto *Call = dyn_cast<CallBase>(U))
4654 verifySwiftErrorCall(*const_cast<CallBase *>(Call), SwiftErrorVal);
4655 }
4656}
4657
4658void Verifier::visitAllocaInst(AllocaInst &AI) {
4659 Type *Ty = AI.getAllocatedType();
4660 SmallPtrSet<Type*, 4> Visited;
4661 Check(Ty->isSized(&Visited), "Cannot allocate unsized type", &AI);
4662 // Check if it's a target extension type that disallows being used on the
4663 // stack.
4665 "Alloca has illegal target extension type", &AI);
4667 "Alloca array size must have integer type", &AI);
4668 if (MaybeAlign A = AI.getAlign()) {
4669 Check(A->value() <= Value::MaximumAlignment,
4670 "huge alignment values are unsupported", &AI);
4671 }
4672
4673 if (AI.isSwiftError()) {
4674 Check(Ty->isPointerTy(), "swifterror alloca must have pointer type", &AI);
4676 "swifterror alloca must not be array allocation", &AI);
4677 verifySwiftErrorValue(&AI);
4678 }
4679
4680 if (TT.isAMDGPU()) {
4682 "alloca on amdgpu must be in addrspace(5)", &AI);
4683 }
4684
4685 visitInstruction(AI);
4686}
4687
4688void Verifier::visitAtomicCmpXchgInst(AtomicCmpXchgInst &CXI) {
4689 Type *ElTy = CXI.getOperand(1)->getType();
4690 Check(ElTy->isIntOrPtrTy(),
4691 "cmpxchg operand must have integer or pointer type", ElTy, &CXI);
4692 checkAtomicMemAccessSize(ElTy, &CXI);
4693 visitInstruction(CXI);
4694}
4695
4696void Verifier::visitAtomicRMWInst(AtomicRMWInst &RMWI) {
4697 Check(RMWI.getOrdering() != AtomicOrdering::Unordered,
4698 "atomicrmw instructions cannot be unordered.", &RMWI);
4699 auto Op = RMWI.getOperation();
4700 Type *ElTy = RMWI.getOperand(1)->getType();
4701 if (Op == AtomicRMWInst::Xchg) {
4702 Check(ElTy->isIntegerTy() || ElTy->isFloatingPointTy() ||
4703 ElTy->isPointerTy(),
4704 "atomicrmw " + AtomicRMWInst::getOperationName(Op) +
4705 " operand must have integer or floating point type!",
4706 &RMWI, ElTy);
4707 } else if (AtomicRMWInst::isFPOperation(Op)) {
4709 "atomicrmw " + AtomicRMWInst::getOperationName(Op) +
4710 " operand must have floating-point or fixed vector of floating-point "
4711 "type!",
4712 &RMWI, ElTy);
4713 } else {
4714 Check(ElTy->isIntegerTy(),
4715 "atomicrmw " + AtomicRMWInst::getOperationName(Op) +
4716 " operand must have integer type!",
4717 &RMWI, ElTy);
4718 }
4719 checkAtomicMemAccessSize(ElTy, &RMWI);
4721 "Invalid binary operation!", &RMWI);
4722 visitInstruction(RMWI);
4723}
4724
4725void Verifier::visitFenceInst(FenceInst &FI) {
4726 const AtomicOrdering Ordering = FI.getOrdering();
4727 Check(Ordering == AtomicOrdering::Acquire ||
4728 Ordering == AtomicOrdering::Release ||
4729 Ordering == AtomicOrdering::AcquireRelease ||
4730 Ordering == AtomicOrdering::SequentiallyConsistent,
4731 "fence instructions may only have acquire, release, acq_rel, or "
4732 "seq_cst ordering.",
4733 &FI);
4734 visitInstruction(FI);
4735}
4736
4737void Verifier::visitExtractValueInst(ExtractValueInst &EVI) {
4739 EVI.getIndices()) == EVI.getType(),
4740 "Invalid ExtractValueInst operands!", &EVI);
4741
4742 visitInstruction(EVI);
4743}
4744
4745void Verifier::visitInsertValueInst(InsertValueInst &IVI) {
4747 IVI.getIndices()) ==
4748 IVI.getOperand(1)->getType(),
4749 "Invalid InsertValueInst operands!", &IVI);
4750
4751 visitInstruction(IVI);
4752}
4753
4754static Value *getParentPad(Value *EHPad) {
4755 if (auto *FPI = dyn_cast<FuncletPadInst>(EHPad))
4756 return FPI->getParentPad();
4757
4758 return cast<CatchSwitchInst>(EHPad)->getParentPad();
4759}
4760
4761void Verifier::visitEHPadPredecessors(Instruction &I) {
4762 assert(I.isEHPad());
4763
4764 BasicBlock *BB = I.getParent();
4765 Function *F = BB->getParent();
4766
4767 Check(BB != &F->getEntryBlock(), "EH pad cannot be in entry block.", &I);
4768
4769 if (auto *LPI = dyn_cast<LandingPadInst>(&I)) {
4770 // The landingpad instruction defines its parent as a landing pad block. The
4771 // landing pad block may be branched to only by the unwind edge of an
4772 // invoke.
4773 for (BasicBlock *PredBB : predecessors(BB)) {
4774 const auto *II = dyn_cast<InvokeInst>(PredBB->getTerminator());
4775 Check(II && II->getUnwindDest() == BB && II->getNormalDest() != BB,
4776 "Block containing LandingPadInst must be jumped to "
4777 "only by the unwind edge of an invoke.",
4778 LPI);
4779 }
4780 return;
4781 }
4782 if (auto *CPI = dyn_cast<CatchPadInst>(&I)) {
4783 if (!pred_empty(BB))
4784 Check(BB->getUniquePredecessor() == CPI->getCatchSwitch()->getParent(),
4785 "Block containg CatchPadInst must be jumped to "
4786 "only by its catchswitch.",
4787 CPI);
4788 Check(BB != CPI->getCatchSwitch()->getUnwindDest(),
4789 "Catchswitch cannot unwind to one of its catchpads",
4790 CPI->getCatchSwitch(), CPI);
4791 return;
4792 }
4793
4794 // Verify that each pred has a legal terminator with a legal to/from EH
4795 // pad relationship.
4796 Instruction *ToPad = &I;
4797 Value *ToPadParent = getParentPad(ToPad);
4798 for (BasicBlock *PredBB : predecessors(BB)) {
4799 Instruction *TI = PredBB->getTerminator();
4800 Value *FromPad;
4801 if (auto *II = dyn_cast<InvokeInst>(TI)) {
4802 Check(II->getUnwindDest() == BB && II->getNormalDest() != BB,
4803 "EH pad must be jumped to via an unwind edge", ToPad, II);
4804 auto *CalledFn =
4805 dyn_cast<Function>(II->getCalledOperand()->stripPointerCasts());
4806 if (CalledFn && CalledFn->isIntrinsic() && II->doesNotThrow() &&
4807 !IntrinsicInst::mayLowerToFunctionCall(CalledFn->getIntrinsicID()))
4808 continue;
4809 if (auto Bundle = II->getOperandBundle(LLVMContext::OB_funclet))
4810 FromPad = Bundle->Inputs[0];
4811 else
4812 FromPad = ConstantTokenNone::get(II->getContext());
4813 } else if (auto *CRI = dyn_cast<CleanupReturnInst>(TI)) {
4814 FromPad = CRI->getOperand(0);
4815 Check(FromPad != ToPadParent, "A cleanupret must exit its cleanup", CRI);
4816 } else if (auto *CSI = dyn_cast<CatchSwitchInst>(TI)) {
4817 FromPad = CSI;
4818 } else {
4819 Check(false, "EH pad must be jumped to via an unwind edge", ToPad, TI);
4820 }
4821
4822 // The edge may exit from zero or more nested pads.
4823 SmallPtrSet<Value *, 8> Seen;
4824 for (;; FromPad = getParentPad(FromPad)) {
4825 Check(FromPad != ToPad,
4826 "EH pad cannot handle exceptions raised within it", FromPad, TI);
4827 if (FromPad == ToPadParent) {
4828 // This is a legal unwind edge.
4829 break;
4830 }
4831 Check(!isa<ConstantTokenNone>(FromPad),
4832 "A single unwind edge may only enter one EH pad", TI);
4833 Check(Seen.insert(FromPad).second, "EH pad jumps through a cycle of pads",
4834 FromPad);
4835
4836 // This will be diagnosed on the corresponding instruction already. We
4837 // need the extra check here to make sure getParentPad() works.
4838 Check(isa<FuncletPadInst>(FromPad) || isa<CatchSwitchInst>(FromPad),
4839 "Parent pad must be catchpad/cleanuppad/catchswitch", TI);
4840 }
4841 }
4842}
4843
4844void Verifier::visitLandingPadInst(LandingPadInst &LPI) {
4845 // The landingpad instruction is ill-formed if it doesn't have any clauses and
4846 // isn't a cleanup.
4847 Check(LPI.getNumClauses() > 0 || LPI.isCleanup(),
4848 "LandingPadInst needs at least one clause or to be a cleanup.", &LPI);
4849
4850 visitEHPadPredecessors(LPI);
4851
4852 if (!LandingPadResultTy)
4853 LandingPadResultTy = LPI.getType();
4854 else
4855 Check(LandingPadResultTy == LPI.getType(),
4856 "The landingpad instruction should have a consistent result type "
4857 "inside a function.",
4858 &LPI);
4859
4860 Function *F = LPI.getParent()->getParent();
4861 Check(F->hasPersonalityFn(),
4862 "LandingPadInst needs to be in a function with a personality.", &LPI);
4863
4864 // The landingpad instruction must be the first non-PHI instruction in the
4865 // block.
4866 Check(LPI.getParent()->getLandingPadInst() == &LPI,
4867 "LandingPadInst not the first non-PHI instruction in the block.", &LPI);
4868
4869 for (unsigned i = 0, e = LPI.getNumClauses(); i < e; ++i) {
4870 Constant *Clause = LPI.getClause(i);
4871 if (LPI.isCatch(i)) {
4872 Check(isa<PointerType>(Clause->getType()),
4873 "Catch operand does not have pointer type!", &LPI);
4874 } else {
4875 Check(LPI.isFilter(i), "Clause is neither catch nor filter!", &LPI);
4877 "Filter operand is not an array of constants!", &LPI);
4878 }
4879 }
4880
4881 visitInstruction(LPI);
4882}
4883
4884void Verifier::visitResumeInst(ResumeInst &RI) {
4886 "ResumeInst needs to be in a function with a personality.", &RI);
4887
4888 if (!LandingPadResultTy)
4889 LandingPadResultTy = RI.getValue()->getType();
4890 else
4891 Check(LandingPadResultTy == RI.getValue()->getType(),
4892 "The resume instruction should have a consistent result type "
4893 "inside a function.",
4894 &RI);
4895
4896 visitTerminator(RI);
4897}
4898
4899void Verifier::visitCatchPadInst(CatchPadInst &CPI) {
4900 BasicBlock *BB = CPI.getParent();
4901
4902 Function *F = BB->getParent();
4903 Check(F->hasPersonalityFn(),
4904 "CatchPadInst needs to be in a function with a personality.", &CPI);
4905
4907 "CatchPadInst needs to be directly nested in a CatchSwitchInst.",
4908 CPI.getParentPad());
4909
4910 // The catchpad instruction must be the first non-PHI instruction in the
4911 // block.
4912 Check(&*BB->getFirstNonPHIIt() == &CPI,
4913 "CatchPadInst not the first non-PHI instruction in the block.", &CPI);
4914
4915 visitEHPadPredecessors(CPI);
4916 visitFuncletPadInst(CPI);
4917}
4918
4919void Verifier::visitCatchReturnInst(CatchReturnInst &CatchReturn) {
4920 Check(isa<CatchPadInst>(CatchReturn.getOperand(0)),
4921 "CatchReturnInst needs to be provided a CatchPad", &CatchReturn,
4922 CatchReturn.getOperand(0));
4923
4924 visitTerminator(CatchReturn);
4925}
4926
4927void Verifier::visitCleanupPadInst(CleanupPadInst &CPI) {
4928 BasicBlock *BB = CPI.getParent();
4929
4930 Function *F = BB->getParent();
4931 Check(F->hasPersonalityFn(),
4932 "CleanupPadInst needs to be in a function with a personality.", &CPI);
4933
4934 // The cleanuppad instruction must be the first non-PHI instruction in the
4935 // block.
4936 Check(&*BB->getFirstNonPHIIt() == &CPI,
4937 "CleanupPadInst not the first non-PHI instruction in the block.", &CPI);
4938
4939 auto *ParentPad = CPI.getParentPad();
4940 Check(isa<ConstantTokenNone>(ParentPad) || isa<FuncletPadInst>(ParentPad),
4941 "CleanupPadInst has an invalid parent.", &CPI);
4942
4943 visitEHPadPredecessors(CPI);
4944 visitFuncletPadInst(CPI);
4945}
4946
4947void Verifier::visitFuncletPadInst(FuncletPadInst &FPI) {
4948 User *FirstUser = nullptr;
4949 Value *FirstUnwindPad = nullptr;
4950 SmallVector<FuncletPadInst *, 8> Worklist({&FPI});
4951 SmallPtrSet<FuncletPadInst *, 8> Seen;
4952
4953 while (!Worklist.empty()) {
4954 FuncletPadInst *CurrentPad = Worklist.pop_back_val();
4955 Check(Seen.insert(CurrentPad).second,
4956 "FuncletPadInst must not be nested within itself", CurrentPad);
4957 Value *UnresolvedAncestorPad = nullptr;
4958 for (User *U : CurrentPad->users()) {
4959 BasicBlock *UnwindDest;
4960 if (auto *CRI = dyn_cast<CleanupReturnInst>(U)) {
4961 UnwindDest = CRI->getUnwindDest();
4962 } else if (auto *CSI = dyn_cast<CatchSwitchInst>(U)) {
4963 // We allow catchswitch unwind to caller to nest
4964 // within an outer pad that unwinds somewhere else,
4965 // because catchswitch doesn't have a nounwind variant.
4966 // See e.g. SimplifyCFGOpt::SimplifyUnreachable.
4967 if (CSI->unwindsToCaller())
4968 continue;
4969 UnwindDest = CSI->getUnwindDest();
4970 } else if (auto *II = dyn_cast<InvokeInst>(U)) {
4971 UnwindDest = II->getUnwindDest();
4972 } else if (isa<CallInst>(U)) {
4973 // Calls which don't unwind may be found inside funclet
4974 // pads that unwind somewhere else. We don't *require*
4975 // such calls to be annotated nounwind.
4976 continue;
4977 } else if (auto *CPI = dyn_cast<CleanupPadInst>(U)) {
4978 // The unwind dest for a cleanup can only be found by
4979 // recursive search. Add it to the worklist, and we'll
4980 // search for its first use that determines where it unwinds.
4981 Worklist.push_back(CPI);
4982 continue;
4983 } else {
4984 Check(isa<CatchReturnInst>(U), "Bogus funclet pad use", U);
4985 continue;
4986 }
4987
4988 Value *UnwindPad;
4989 bool ExitsFPI;
4990 if (UnwindDest) {
4991 UnwindPad = &*UnwindDest->getFirstNonPHIIt();
4992 if (!cast<Instruction>(UnwindPad)->isEHPad())
4993 continue;
4994 Value *UnwindParent = getParentPad(UnwindPad);
4995 // Ignore unwind edges that don't exit CurrentPad.
4996 if (UnwindParent == CurrentPad)
4997 continue;
4998 // Determine whether the original funclet pad is exited,
4999 // and if we are scanning nested pads determine how many
5000 // of them are exited so we can stop searching their
5001 // children.
5002 Value *ExitedPad = CurrentPad;
5003 ExitsFPI = false;
5004 do {
5005 if (ExitedPad == &FPI) {
5006 ExitsFPI = true;
5007 // Now we can resolve any ancestors of CurrentPad up to
5008 // FPI, but not including FPI since we need to make sure
5009 // to check all direct users of FPI for consistency.
5010 UnresolvedAncestorPad = &FPI;
5011 break;
5012 }
5013 Value *ExitedParent = getParentPad(ExitedPad);
5014 if (ExitedParent == UnwindParent) {
5015 // ExitedPad is the ancestor-most pad which this unwind
5016 // edge exits, so we can resolve up to it, meaning that
5017 // ExitedParent is the first ancestor still unresolved.
5018 UnresolvedAncestorPad = ExitedParent;
5019 break;
5020 }
5021 ExitedPad = ExitedParent;
5022 } while (!isa<ConstantTokenNone>(ExitedPad));
5023 } else {
5024 // Unwinding to caller exits all pads.
5025 UnwindPad = ConstantTokenNone::get(FPI.getContext());
5026 ExitsFPI = true;
5027 UnresolvedAncestorPad = &FPI;
5028 }
5029
5030 if (ExitsFPI) {
5031 // This unwind edge exits FPI. Make sure it agrees with other
5032 // such edges.
5033 if (FirstUser) {
5034 Check(UnwindPad == FirstUnwindPad,
5035 "Unwind edges out of a funclet "
5036 "pad must have the same unwind "
5037 "dest",
5038 &FPI, U, FirstUser);
5039 } else {
5040 FirstUser = U;
5041 FirstUnwindPad = UnwindPad;
5042 // Record cleanup sibling unwinds for verifySiblingFuncletUnwinds
5043 if (isa<CleanupPadInst>(&FPI) && !isa<ConstantTokenNone>(UnwindPad) &&
5044 getParentPad(UnwindPad) == getParentPad(&FPI))
5045 SiblingFuncletInfo[&FPI] = cast<Instruction>(U);
5046 }
5047 }
5048 // Make sure we visit all uses of FPI, but for nested pads stop as
5049 // soon as we know where they unwind to.
5050 if (CurrentPad != &FPI)
5051 break;
5052 }
5053 if (UnresolvedAncestorPad) {
5054 if (CurrentPad == UnresolvedAncestorPad) {
5055 // When CurrentPad is FPI itself, we don't mark it as resolved even if
5056 // we've found an unwind edge that exits it, because we need to verify
5057 // all direct uses of FPI.
5058 assert(CurrentPad == &FPI);
5059 continue;
5060 }
5061 // Pop off the worklist any nested pads that we've found an unwind
5062 // destination for. The pads on the worklist are the uncles,
5063 // great-uncles, etc. of CurrentPad. We've found an unwind destination
5064 // for all ancestors of CurrentPad up to but not including
5065 // UnresolvedAncestorPad.
5066 Value *ResolvedPad = CurrentPad;
5067 while (!Worklist.empty()) {
5068 Value *UnclePad = Worklist.back();
5069 Value *AncestorPad = getParentPad(UnclePad);
5070 // Walk ResolvedPad up the ancestor list until we either find the
5071 // uncle's parent or the last resolved ancestor.
5072 while (ResolvedPad != AncestorPad) {
5073 Value *ResolvedParent = getParentPad(ResolvedPad);
5074 if (ResolvedParent == UnresolvedAncestorPad) {
5075 break;
5076 }
5077 ResolvedPad = ResolvedParent;
5078 }
5079 // If the resolved ancestor search didn't find the uncle's parent,
5080 // then the uncle is not yet resolved.
5081 if (ResolvedPad != AncestorPad)
5082 break;
5083 // This uncle is resolved, so pop it from the worklist.
5084 Worklist.pop_back();
5085 }
5086 }
5087 }
5088
5089 if (FirstUnwindPad) {
5090 if (auto *CatchSwitch = dyn_cast<CatchSwitchInst>(FPI.getParentPad())) {
5091 BasicBlock *SwitchUnwindDest = CatchSwitch->getUnwindDest();
5092 Value *SwitchUnwindPad;
5093 if (SwitchUnwindDest)
5094 SwitchUnwindPad = &*SwitchUnwindDest->getFirstNonPHIIt();
5095 else
5096 SwitchUnwindPad = ConstantTokenNone::get(FPI.getContext());
5097 Check(SwitchUnwindPad == FirstUnwindPad,
5098 "Unwind edges out of a catch must have the same unwind dest as "
5099 "the parent catchswitch",
5100 &FPI, FirstUser, CatchSwitch);
5101 }
5102 }
5103
5104 visitInstruction(FPI);
5105}
5106
5107void Verifier::visitCatchSwitchInst(CatchSwitchInst &CatchSwitch) {
5108 BasicBlock *BB = CatchSwitch.getParent();
5109
5110 Function *F = BB->getParent();
5111 Check(F->hasPersonalityFn(),
5112 "CatchSwitchInst needs to be in a function with a personality.",
5113 &CatchSwitch);
5114
5115 // The catchswitch instruction must be the first non-PHI instruction in the
5116 // block.
5117 Check(&*BB->getFirstNonPHIIt() == &CatchSwitch,
5118 "CatchSwitchInst not the first non-PHI instruction in the block.",
5119 &CatchSwitch);
5120
5121 auto *ParentPad = CatchSwitch.getParentPad();
5122 Check(isa<ConstantTokenNone>(ParentPad) || isa<FuncletPadInst>(ParentPad),
5123 "CatchSwitchInst has an invalid parent.", ParentPad);
5124
5125 if (BasicBlock *UnwindDest = CatchSwitch.getUnwindDest()) {
5126 BasicBlock::iterator I = UnwindDest->getFirstNonPHIIt();
5127 Check(I->isEHPad() && !isa<LandingPadInst>(I),
5128 "CatchSwitchInst must unwind to an EH block which is not a "
5129 "landingpad.",
5130 &CatchSwitch);
5131
5132 // Record catchswitch sibling unwinds for verifySiblingFuncletUnwinds
5133 if (getParentPad(&*I) == ParentPad)
5134 SiblingFuncletInfo[&CatchSwitch] = &CatchSwitch;
5135 }
5136
5137 Check(CatchSwitch.getNumHandlers() != 0,
5138 "CatchSwitchInst cannot have empty handler list", &CatchSwitch);
5139
5140 for (BasicBlock *Handler : CatchSwitch.handlers()) {
5141 Check(isa<CatchPadInst>(Handler->getFirstNonPHIIt()),
5142 "CatchSwitchInst handlers must be catchpads", &CatchSwitch, Handler);
5143 }
5144
5145 visitEHPadPredecessors(CatchSwitch);
5146 visitTerminator(CatchSwitch);
5147}
5148
5149void Verifier::visitCleanupReturnInst(CleanupReturnInst &CRI) {
5151 "CleanupReturnInst needs to be provided a CleanupPad", &CRI,
5152 CRI.getOperand(0));
5153
5154 if (BasicBlock *UnwindDest = CRI.getUnwindDest()) {
5155 BasicBlock::iterator I = UnwindDest->getFirstNonPHIIt();
5156 Check(I->isEHPad() && !isa<LandingPadInst>(I),
5157 "CleanupReturnInst must unwind to an EH block which is not a "
5158 "landingpad.",
5159 &CRI);
5160 }
5161
5162 visitTerminator(CRI);
5163}
5164
5165void Verifier::verifyDominatesUse(Instruction &I, unsigned i) {
5166 Instruction *Op = cast<Instruction>(I.getOperand(i));
5167 // If the we have an invalid invoke, don't try to compute the dominance.
5168 // We already reject it in the invoke specific checks and the dominance
5169 // computation doesn't handle multiple edges.
5170 if (InvokeInst *II = dyn_cast<InvokeInst>(Op)) {
5171 if (II->getNormalDest() == II->getUnwindDest())
5172 return;
5173 }
5174
5175 // Quick check whether the def has already been encountered in the same block.
5176 // PHI nodes are not checked to prevent accepting preceding PHIs, because PHI
5177 // uses are defined to happen on the incoming edge, not at the instruction.
5178 //
5179 // FIXME: If this operand is a MetadataAsValue (wrapping a LocalAsMetadata)
5180 // wrapping an SSA value, assert that we've already encountered it. See
5181 // related FIXME in Mapper::mapLocalAsMetadata in ValueMapper.cpp.
5182 if (!isa<PHINode>(I) && InstsInThisBlock.count(Op))
5183 return;
5184
5185 const Use &U = I.getOperandUse(i);
5186 Check(DT.dominates(Op, U), "Instruction does not dominate all uses!", Op, &I);
5187}
5188
5189void Verifier::visitDereferenceableMetadata(Instruction& I, MDNode* MD) {
5190 Check(I.getType()->isPointerTy(),
5191 "dereferenceable, dereferenceable_or_null "
5192 "apply only to pointer types",
5193 &I);
5195 "dereferenceable, dereferenceable_or_null apply only to load"
5196 " and inttoptr instructions, use attributes for calls or invokes",
5197 &I);
5198 Check(MD->getNumOperands() == 1,
5199 "dereferenceable, dereferenceable_or_null "
5200 "take one operand!",
5201 &I);
5202 ConstantInt *CI = mdconst::dyn_extract<ConstantInt>(MD->getOperand(0));
5203 Check(CI && CI->getType()->isIntegerTy(64),
5204 "dereferenceable, "
5205 "dereferenceable_or_null metadata value must be an i64!",
5206 &I);
5207}
5208
5209void Verifier::visitNofreeMetadata(Instruction &I, MDNode *MD) {
5210 Check(I.getType()->isPointerTy(), "nofree applies only to pointer types", &I);
5211 Check((isa<IntToPtrInst>(I)), "nofree applies only to inttoptr instruction",
5212 &I);
5213 Check(MD->getNumOperands() == 0, "nofree metadata must be empty", &I);
5214}
5215
5216void Verifier::visitProfMetadata(Instruction &I, MDNode *MD) {
5217 auto GetBranchingTerminatorNumOperands = [&]() {
5218 unsigned ExpectedNumOperands = 0;
5219 if (BranchInst *BI = dyn_cast<BranchInst>(&I))
5220 ExpectedNumOperands = BI->getNumSuccessors();
5221 else if (SwitchInst *SI = dyn_cast<SwitchInst>(&I))
5222 ExpectedNumOperands = SI->getNumSuccessors();
5223 else if (isa<CallInst>(&I))
5224 ExpectedNumOperands = 1;
5225 else if (IndirectBrInst *IBI = dyn_cast<IndirectBrInst>(&I))
5226 ExpectedNumOperands = IBI->getNumDestinations();
5227 else if (isa<SelectInst>(&I))
5228 ExpectedNumOperands = 2;
5229 else if (CallBrInst *CI = dyn_cast<CallBrInst>(&I))
5230 ExpectedNumOperands = CI->getNumSuccessors();
5231 return ExpectedNumOperands;
5232 };
5233 Check(MD->getNumOperands() >= 1,
5234 "!prof annotations should have at least 1 operand", MD);
5235 // Check first operand.
5236 Check(MD->getOperand(0) != nullptr, "first operand should not be null", MD);
5238 "expected string with name of the !prof annotation", MD);
5239 MDString *MDS = cast<MDString>(MD->getOperand(0));
5240 StringRef ProfName = MDS->getString();
5241
5243 Check(GetBranchingTerminatorNumOperands() != 0 || isa<InvokeInst>(I),
5244 "'unknown' !prof should only appear on instructions on which "
5245 "'branch_weights' would",
5246 MD);
5247 verifyUnknownProfileMetadata(MD);
5248 return;
5249 }
5250
5251 Check(MD->getNumOperands() >= 2,
5252 "!prof annotations should have no less than 2 operands", MD);
5253
5254 // Check consistency of !prof branch_weights metadata.
5255 if (ProfName == MDProfLabels::BranchWeights) {
5256 unsigned NumBranchWeights = getNumBranchWeights(*MD);
5257 if (isa<InvokeInst>(&I)) {
5258 Check(NumBranchWeights == 1 || NumBranchWeights == 2,
5259 "Wrong number of InvokeInst branch_weights operands", MD);
5260 } else {
5261 const unsigned ExpectedNumOperands = GetBranchingTerminatorNumOperands();
5262 if (ExpectedNumOperands == 0)
5263 CheckFailed("!prof branch_weights are not allowed for this instruction",
5264 MD);
5265
5266 Check(NumBranchWeights == ExpectedNumOperands, "Wrong number of operands",
5267 MD);
5268 }
5269 for (unsigned i = getBranchWeightOffset(MD); i < MD->getNumOperands();
5270 ++i) {
5271 auto &MDO = MD->getOperand(i);
5272 Check(MDO, "second operand should not be null", MD);
5274 "!prof brunch_weights operand is not a const int");
5275 }
5276 } else if (ProfName == MDProfLabels::ValueProfile) {
5277 Check(isValueProfileMD(MD), "invalid value profiling metadata", MD);
5278 ConstantInt *KindInt = mdconst::dyn_extract<ConstantInt>(MD->getOperand(1));
5279 Check(KindInt, "VP !prof missing kind argument", MD);
5280
5281 auto Kind = KindInt->getZExtValue();
5282 Check(Kind >= InstrProfValueKind::IPVK_First &&
5283 Kind <= InstrProfValueKind::IPVK_Last,
5284 "Invalid VP !prof kind", MD);
5285 Check(MD->getNumOperands() % 2 == 1,
5286 "VP !prof should have an even number "
5287 "of arguments after 'VP'",
5288 MD);
5289 if (Kind == InstrProfValueKind::IPVK_IndirectCallTarget ||
5290 Kind == InstrProfValueKind::IPVK_MemOPSize)
5292 "VP !prof indirect call or memop size expected to be applied to "
5293 "CallBase instructions only",
5294 MD);
5295 } else {
5296 CheckFailed("expected either branch_weights or VP profile name", MD);
5297 }
5298}
5299
5300void Verifier::visitDIAssignIDMetadata(Instruction &I, MDNode *MD) {
5301 assert(I.hasMetadata(LLVMContext::MD_DIAssignID));
5302 // DIAssignID metadata must be attached to either an alloca or some form of
5303 // store/memory-writing instruction.
5304 // FIXME: We allow all intrinsic insts here to avoid trying to enumerate all
5305 // possible store intrinsics.
5306 bool ExpectedInstTy =
5308 CheckDI(ExpectedInstTy, "!DIAssignID attached to unexpected instruction kind",
5309 I, MD);
5310 // Iterate over the MetadataAsValue uses of the DIAssignID - these should
5311 // only be found as DbgAssignIntrinsic operands.
5312 if (auto *AsValue = MetadataAsValue::getIfExists(Context, MD)) {
5313 for (auto *User : AsValue->users()) {
5315 "!DIAssignID should only be used by llvm.dbg.assign intrinsics",
5316 MD, User);
5317 // All of the dbg.assign intrinsics should be in the same function as I.
5318 if (auto *DAI = dyn_cast<DbgAssignIntrinsic>(User))
5319 CheckDI(DAI->getFunction() == I.getFunction(),
5320 "dbg.assign not in same function as inst", DAI, &I);
5321 }
5322 }
5323 for (DbgVariableRecord *DVR :
5324 cast<DIAssignID>(MD)->getAllDbgVariableRecordUsers()) {
5325 CheckDI(DVR->isDbgAssign(),
5326 "!DIAssignID should only be used by Assign DVRs.", MD, DVR);
5327 CheckDI(DVR->getFunction() == I.getFunction(),
5328 "DVRAssign not in same function as inst", DVR, &I);
5329 }
5330}
5331
5332void Verifier::visitMMRAMetadata(Instruction &I, MDNode *MD) {
5334 "!mmra metadata attached to unexpected instruction kind", I, MD);
5335
5336 // MMRA Metadata should either be a tag, e.g. !{!"foo", !"bar"}, or a
5337 // list of tags such as !2 in the following example:
5338 // !0 = !{!"a", !"b"}
5339 // !1 = !{!"c", !"d"}
5340 // !2 = !{!0, !1}
5341 if (MMRAMetadata::isTagMD(MD))
5342 return;
5343
5344 Check(isa<MDTuple>(MD), "!mmra expected to be a metadata tuple", I, MD);
5345 for (const MDOperand &MDOp : MD->operands())
5346 Check(MMRAMetadata::isTagMD(MDOp.get()),
5347 "!mmra metadata tuple operand is not an MMRA tag", I, MDOp.get());
5348}
5349
5350void Verifier::visitCallStackMetadata(MDNode *MD) {
5351 // Call stack metadata should consist of a list of at least 1 constant int
5352 // (representing a hash of the location).
5353 Check(MD->getNumOperands() >= 1,
5354 "call stack metadata should have at least 1 operand", MD);
5355
5356 for (const auto &Op : MD->operands())
5358 "call stack metadata operand should be constant integer", Op);
5359}
5360
5361void Verifier::visitMemProfMetadata(Instruction &I, MDNode *MD) {
5362 Check(isa<CallBase>(I), "!memprof metadata should only exist on calls", &I);
5363 Check(MD->getNumOperands() >= 1,
5364 "!memprof annotations should have at least 1 metadata operand "
5365 "(MemInfoBlock)",
5366 MD);
5367
5368 // Check each MIB
5369 for (auto &MIBOp : MD->operands()) {
5370 MDNode *MIB = dyn_cast<MDNode>(MIBOp);
5371 // The first operand of an MIB should be the call stack metadata.
5372 // There rest of the operands should be MDString tags, and there should be
5373 // at least one.
5374 Check(MIB->getNumOperands() >= 2,
5375 "Each !memprof MemInfoBlock should have at least 2 operands", MIB);
5376
5377 // Check call stack metadata (first operand).
5378 Check(MIB->getOperand(0) != nullptr,
5379 "!memprof MemInfoBlock first operand should not be null", MIB);
5380 Check(isa<MDNode>(MIB->getOperand(0)),
5381 "!memprof MemInfoBlock first operand should be an MDNode", MIB);
5382 MDNode *StackMD = dyn_cast<MDNode>(MIB->getOperand(0));
5383 visitCallStackMetadata(StackMD);
5384
5385 // The second MIB operand should be MDString.
5387 "!memprof MemInfoBlock second operand should be an MDString", MIB);
5388
5389 // Any remaining should be MDNode that are pairs of integers
5390 for (unsigned I = 2; I < MIB->getNumOperands(); ++I) {
5391 MDNode *OpNode = dyn_cast<MDNode>(MIB->getOperand(I));
5392 Check(OpNode, "Not all !memprof MemInfoBlock operands 2 to N are MDNode",
5393 MIB);
5394 Check(OpNode->getNumOperands() == 2,
5395 "Not all !memprof MemInfoBlock operands 2 to N are MDNode with 2 "
5396 "operands",
5397 MIB);
5398 // Check that all of Op's operands are ConstantInt.
5399 Check(llvm::all_of(OpNode->operands(),
5400 [](const MDOperand &Op) {
5401 return mdconst::hasa<ConstantInt>(Op);
5402 }),
5403 "Not all !memprof MemInfoBlock operands 2 to N are MDNode with "
5404 "ConstantInt operands",
5405 MIB);
5406 }
5407 }
5408}
5409
5410void Verifier::visitCallsiteMetadata(Instruction &I, MDNode *MD) {
5411 Check(isa<CallBase>(I), "!callsite metadata should only exist on calls", &I);
5412 // Verify the partial callstack annotated from memprof profiles. This callsite
5413 // is a part of a profiled allocation callstack.
5414 visitCallStackMetadata(MD);
5415}
5416
5417static inline bool isConstantIntMetadataOperand(const Metadata *MD) {
5418 if (auto *VAL = dyn_cast<ValueAsMetadata>(MD))
5419 return isa<ConstantInt>(VAL->getValue());
5420 return false;
5421}
5422
5423void Verifier::visitCalleeTypeMetadata(Instruction &I, MDNode *MD) {
5424 Check(isa<CallBase>(I), "!callee_type metadata should only exist on calls",
5425 &I);
5426 for (Metadata *Op : MD->operands()) {
5428 "The callee_type metadata must be a list of type metadata nodes", Op);
5429 auto *TypeMD = cast<MDNode>(Op);
5430 Check(TypeMD->getNumOperands() == 2,
5431 "Well-formed generalized type metadata must contain exactly two "
5432 "operands",
5433 Op);
5434 Check(isConstantIntMetadataOperand(TypeMD->getOperand(0)) &&
5435 mdconst::extract<ConstantInt>(TypeMD->getOperand(0))->isZero(),
5436 "The first operand of type metadata for functions must be zero", Op);
5437 Check(TypeMD->hasGeneralizedMDString(),
5438 "Only generalized type metadata can be part of the callee_type "
5439 "metadata list",
5440 Op);
5441 }
5442}
5443
5444void Verifier::visitAnnotationMetadata(MDNode *Annotation) {
5445 Check(isa<MDTuple>(Annotation), "annotation must be a tuple");
5446 Check(Annotation->getNumOperands() >= 1,
5447 "annotation must have at least one operand");
5448 for (const MDOperand &Op : Annotation->operands()) {
5449 bool TupleOfStrings =
5450 isa<MDTuple>(Op.get()) &&
5451 all_of(cast<MDTuple>(Op)->operands(), [](auto &Annotation) {
5452 return isa<MDString>(Annotation.get());
5453 });
5454 Check(isa<MDString>(Op.get()) || TupleOfStrings,
5455 "operands must be a string or a tuple of strings");
5456 }
5457}
5458
5459void Verifier::visitAliasScopeMetadata(const MDNode *MD) {
5460 unsigned NumOps = MD->getNumOperands();
5461 Check(NumOps >= 2 && NumOps <= 3, "scope must have two or three operands",
5462 MD);
5463 Check(MD->getOperand(0).get() == MD || isa<MDString>(MD->getOperand(0)),
5464 "first scope operand must be self-referential or string", MD);
5465 if (NumOps == 3)
5467 "third scope operand must be string (if used)", MD);
5468
5469 MDNode *Domain = dyn_cast<MDNode>(MD->getOperand(1));
5470 Check(Domain != nullptr, "second scope operand must be MDNode", MD);
5471
5472 unsigned NumDomainOps = Domain->getNumOperands();
5473 Check(NumDomainOps >= 1 && NumDomainOps <= 2,
5474 "domain must have one or two operands", Domain);
5475 Check(Domain->getOperand(0).get() == Domain ||
5476 isa<MDString>(Domain->getOperand(0)),
5477 "first domain operand must be self-referential or string", Domain);
5478 if (NumDomainOps == 2)
5479 Check(isa<MDString>(Domain->getOperand(1)),
5480 "second domain operand must be string (if used)", Domain);
5481}
5482
5483void Verifier::visitAliasScopeListMetadata(const MDNode *MD) {
5484 for (const MDOperand &Op : MD->operands()) {
5485 const MDNode *OpMD = dyn_cast<MDNode>(Op);
5486 Check(OpMD != nullptr, "scope list must consist of MDNodes", MD);
5487 visitAliasScopeMetadata(OpMD);
5488 }
5489}
5490
5491void Verifier::visitAccessGroupMetadata(const MDNode *MD) {
5492 auto IsValidAccessScope = [](const MDNode *MD) {
5493 return MD->getNumOperands() == 0 && MD->isDistinct();
5494 };
5495
5496 // It must be either an access scope itself...
5497 if (IsValidAccessScope(MD))
5498 return;
5499
5500 // ...or a list of access scopes.
5501 for (const MDOperand &Op : MD->operands()) {
5502 const MDNode *OpMD = dyn_cast<MDNode>(Op);
5503 Check(OpMD != nullptr, "Access scope list must consist of MDNodes", MD);
5504 Check(IsValidAccessScope(OpMD),
5505 "Access scope list contains invalid access scope", MD);
5506 }
5507}
5508
5509void Verifier::visitCapturesMetadata(Instruction &I, const MDNode *Captures) {
5510 static const char *ValidArgs[] = {"address_is_null", "address",
5511 "read_provenance", "provenance"};
5512
5513 auto *SI = dyn_cast<StoreInst>(&I);
5514 Check(SI, "!captures metadata can only be applied to store instructions", &I);
5515 Check(SI->getValueOperand()->getType()->isPointerTy(),
5516 "!captures metadata can only be applied to store with value operand of "
5517 "pointer type",
5518 &I);
5519 Check(Captures->getNumOperands() != 0, "!captures metadata cannot be empty",
5520 &I);
5521
5522 for (Metadata *Op : Captures->operands()) {
5523 auto *Str = dyn_cast<MDString>(Op);
5524 Check(Str, "!captures metadata must be a list of strings", &I);
5525 Check(is_contained(ValidArgs, Str->getString()),
5526 "invalid entry in !captures metadata", &I, Str);
5527 }
5528}
5529
5530void Verifier::visitAllocTokenMetadata(Instruction &I, MDNode *MD) {
5531 Check(isa<CallBase>(I), "!alloc_token should only exist on calls", &I);
5532 Check(MD->getNumOperands() == 2, "!alloc_token must have 2 operands", MD);
5533 Check(isa<MDString>(MD->getOperand(0)), "expected string", MD);
5535 "expected integer constant", MD);
5536}
5537
5538/// verifyInstruction - Verify that an instruction is well formed.
5539///
5540void Verifier::visitInstruction(Instruction &I) {
5541 BasicBlock *BB = I.getParent();
5542 Check(BB, "Instruction not embedded in basic block!", &I);
5543
5544 if (!isa<PHINode>(I)) { // Check that non-phi nodes are not self referential
5545 for (User *U : I.users()) {
5546 Check(U != (User *)&I || !DT.isReachableFromEntry(BB),
5547 "Only PHI nodes may reference their own value!", &I);
5548 }
5549 }
5550
5551 // Check that void typed values don't have names
5552 Check(!I.getType()->isVoidTy() || !I.hasName(),
5553 "Instruction has a name, but provides a void value!", &I);
5554
5555 // Check that the return value of the instruction is either void or a legal
5556 // value type.
5557 Check(I.getType()->isVoidTy() || I.getType()->isFirstClassType(),
5558 "Instruction returns a non-scalar type!", &I);
5559
5560 // Check that the instruction doesn't produce metadata. Calls are already
5561 // checked against the callee type.
5562 Check(!I.getType()->isMetadataTy() || isa<CallInst>(I) || isa<InvokeInst>(I),
5563 "Invalid use of metadata!", &I);
5564
5565 // Check that all uses of the instruction, if they are instructions
5566 // themselves, actually have parent basic blocks. If the use is not an
5567 // instruction, it is an error!
5568 for (Use &U : I.uses()) {
5569 if (Instruction *Used = dyn_cast<Instruction>(U.getUser()))
5570 Check(Used->getParent() != nullptr,
5571 "Instruction referencing"
5572 " instruction not embedded in a basic block!",
5573 &I, Used);
5574 else {
5575 CheckFailed("Use of instruction is not an instruction!", U);
5576 return;
5577 }
5578 }
5579
5580 // Get a pointer to the call base of the instruction if it is some form of
5581 // call.
5582 const CallBase *CBI = dyn_cast<CallBase>(&I);
5583
5584 for (unsigned i = 0, e = I.getNumOperands(); i != e; ++i) {
5585 Check(I.getOperand(i) != nullptr, "Instruction has null operand!", &I);
5586
5587 // Check to make sure that only first-class-values are operands to
5588 // instructions.
5589 if (!I.getOperand(i)->getType()->isFirstClassType()) {
5590 Check(false, "Instruction operands must be first-class values!", &I);
5591 }
5592
5593 if (Function *F = dyn_cast<Function>(I.getOperand(i))) {
5594 // This code checks whether the function is used as the operand of a
5595 // clang_arc_attachedcall operand bundle.
5596 auto IsAttachedCallOperand = [](Function *F, const CallBase *CBI,
5597 int Idx) {
5598 return CBI && CBI->isOperandBundleOfType(
5600 };
5601
5602 // Check to make sure that the "address of" an intrinsic function is never
5603 // taken. Ignore cases where the address of the intrinsic function is used
5604 // as the argument of operand bundle "clang.arc.attachedcall" as those
5605 // cases are handled in verifyAttachedCallBundle.
5606 Check((!F->isIntrinsic() ||
5607 (CBI && &CBI->getCalledOperandUse() == &I.getOperandUse(i)) ||
5608 IsAttachedCallOperand(F, CBI, i)),
5609 "Cannot take the address of an intrinsic!", &I);
5610 Check(!F->isIntrinsic() || isa<CallInst>(I) || isa<CallBrInst>(I) ||
5611 F->getIntrinsicID() == Intrinsic::donothing ||
5612 F->getIntrinsicID() == Intrinsic::seh_try_begin ||
5613 F->getIntrinsicID() == Intrinsic::seh_try_end ||
5614 F->getIntrinsicID() == Intrinsic::seh_scope_begin ||
5615 F->getIntrinsicID() == Intrinsic::seh_scope_end ||
5616 F->getIntrinsicID() == Intrinsic::coro_resume ||
5617 F->getIntrinsicID() == Intrinsic::coro_destroy ||
5618 F->getIntrinsicID() == Intrinsic::coro_await_suspend_void ||
5619 F->getIntrinsicID() == Intrinsic::coro_await_suspend_bool ||
5620 F->getIntrinsicID() == Intrinsic::coro_await_suspend_handle ||
5621 F->getIntrinsicID() ==
5622 Intrinsic::experimental_patchpoint_void ||
5623 F->getIntrinsicID() == Intrinsic::experimental_patchpoint ||
5624 F->getIntrinsicID() == Intrinsic::fake_use ||
5625 F->getIntrinsicID() == Intrinsic::experimental_gc_statepoint ||
5626 F->getIntrinsicID() == Intrinsic::wasm_throw ||
5627 F->getIntrinsicID() == Intrinsic::wasm_rethrow ||
5628 IsAttachedCallOperand(F, CBI, i),
5629 "Cannot invoke an intrinsic other than donothing, patchpoint, "
5630 "statepoint, coro_resume, coro_destroy, clang.arc.attachedcall or "
5631 "wasm.(re)throw",
5632 &I);
5633 Check(F->getParent() == &M, "Referencing function in another module!", &I,
5634 &M, F, F->getParent());
5635 } else if (BasicBlock *OpBB = dyn_cast<BasicBlock>(I.getOperand(i))) {
5636 Check(OpBB->getParent() == BB->getParent(),
5637 "Referring to a basic block in another function!", &I);
5638 } else if (Argument *OpArg = dyn_cast<Argument>(I.getOperand(i))) {
5639 Check(OpArg->getParent() == BB->getParent(),
5640 "Referring to an argument in another function!", &I);
5641 } else if (GlobalValue *GV = dyn_cast<GlobalValue>(I.getOperand(i))) {
5642 Check(GV->getParent() == &M, "Referencing global in another module!", &I,
5643 &M, GV, GV->getParent());
5644 } else if (Instruction *OpInst = dyn_cast<Instruction>(I.getOperand(i))) {
5645 Check(OpInst->getFunction() == BB->getParent(),
5646 "Referring to an instruction in another function!", &I);
5647 verifyDominatesUse(I, i);
5648 } else if (isa<InlineAsm>(I.getOperand(i))) {
5649 Check(CBI && &CBI->getCalledOperandUse() == &I.getOperandUse(i),
5650 "Cannot take the address of an inline asm!", &I);
5651 } else if (auto *C = dyn_cast<Constant>(I.getOperand(i))) {
5652 visitConstantExprsRecursively(C);
5653 }
5654 }
5655
5656 if (MDNode *MD = I.getMetadata(LLVMContext::MD_fpmath)) {
5657 Check(I.getType()->isFPOrFPVectorTy(),
5658 "fpmath requires a floating point result!", &I);
5659 Check(MD->getNumOperands() == 1, "fpmath takes one operand!", &I);
5660 if (ConstantFP *CFP0 =
5662 const APFloat &Accuracy = CFP0->getValueAPF();
5663 Check(&Accuracy.getSemantics() == &APFloat::IEEEsingle(),
5664 "fpmath accuracy must have float type", &I);
5665 Check(Accuracy.isFiniteNonZero() && !Accuracy.isNegative(),
5666 "fpmath accuracy not a positive number!", &I);
5667 } else {
5668 Check(false, "invalid fpmath accuracy!", &I);
5669 }
5670 }
5671
5672 if (MDNode *Range = I.getMetadata(LLVMContext::MD_range)) {
5674 "Ranges are only for loads, calls and invokes!", &I);
5675 visitRangeMetadata(I, Range, I.getType());
5676 }
5677
5678 if (MDNode *Range = I.getMetadata(LLVMContext::MD_noalias_addrspace)) {
5681 "noalias.addrspace are only for memory operations!", &I);
5682 visitNoaliasAddrspaceMetadata(I, Range, I.getType());
5683 }
5684
5685 if (I.hasMetadata(LLVMContext::MD_invariant_group)) {
5687 "invariant.group metadata is only for loads and stores", &I);
5688 }
5689
5690 if (MDNode *MD = I.getMetadata(LLVMContext::MD_nonnull)) {
5691 Check(I.getType()->isPointerTy(), "nonnull applies only to pointer types",
5692 &I);
5694 "nonnull applies only to load instructions, use attributes"
5695 " for calls or invokes",
5696 &I);
5697 Check(MD->getNumOperands() == 0, "nonnull metadata must be empty", &I);
5698 }
5699
5700 if (MDNode *MD = I.getMetadata(LLVMContext::MD_dereferenceable))
5701 visitDereferenceableMetadata(I, MD);
5702
5703 if (MDNode *MD = I.getMetadata(LLVMContext::MD_dereferenceable_or_null))
5704 visitDereferenceableMetadata(I, MD);
5705
5706 if (MDNode *MD = I.getMetadata(LLVMContext::MD_nofree))
5707 visitNofreeMetadata(I, MD);
5708
5709 if (MDNode *TBAA = I.getMetadata(LLVMContext::MD_tbaa))
5710 TBAAVerifyHelper.visitTBAAMetadata(&I, TBAA);
5711
5712 if (MDNode *MD = I.getMetadata(LLVMContext::MD_noalias))
5713 visitAliasScopeListMetadata(MD);
5714 if (MDNode *MD = I.getMetadata(LLVMContext::MD_alias_scope))
5715 visitAliasScopeListMetadata(MD);
5716
5717 if (MDNode *MD = I.getMetadata(LLVMContext::MD_access_group))
5718 visitAccessGroupMetadata(MD);
5719
5720 if (MDNode *AlignMD = I.getMetadata(LLVMContext::MD_align)) {
5721 Check(I.getType()->isPointerTy(), "align applies only to pointer types",
5722 &I);
5724 "align applies only to load instructions, "
5725 "use attributes for calls or invokes",
5726 &I);
5727 Check(AlignMD->getNumOperands() == 1, "align takes one operand!", &I);
5728 ConstantInt *CI = mdconst::dyn_extract<ConstantInt>(AlignMD->getOperand(0));
5729 Check(CI && CI->getType()->isIntegerTy(64),
5730 "align metadata value must be an i64!", &I);
5731 uint64_t Align = CI->getZExtValue();
5732 Check(isPowerOf2_64(Align), "align metadata value must be a power of 2!",
5733 &I);
5734 Check(Align <= Value::MaximumAlignment,
5735 "alignment is larger that implementation defined limit", &I);
5736 }
5737
5738 if (MDNode *MD = I.getMetadata(LLVMContext::MD_prof))
5739 visitProfMetadata(I, MD);
5740
5741 if (MDNode *MD = I.getMetadata(LLVMContext::MD_memprof))
5742 visitMemProfMetadata(I, MD);
5743
5744 if (MDNode *MD = I.getMetadata(LLVMContext::MD_callsite))
5745 visitCallsiteMetadata(I, MD);
5746
5747 if (MDNode *MD = I.getMetadata(LLVMContext::MD_callee_type))
5748 visitCalleeTypeMetadata(I, MD);
5749
5750 if (MDNode *MD = I.getMetadata(LLVMContext::MD_DIAssignID))
5751 visitDIAssignIDMetadata(I, MD);
5752
5753 if (MDNode *MMRA = I.getMetadata(LLVMContext::MD_mmra))
5754 visitMMRAMetadata(I, MMRA);
5755
5756 if (MDNode *Annotation = I.getMetadata(LLVMContext::MD_annotation))
5757 visitAnnotationMetadata(Annotation);
5758
5759 if (MDNode *Captures = I.getMetadata(LLVMContext::MD_captures))
5760 visitCapturesMetadata(I, Captures);
5761
5762 if (MDNode *MD = I.getMetadata(LLVMContext::MD_alloc_token))
5763 visitAllocTokenMetadata(I, MD);
5764
5765 if (MDNode *N = I.getDebugLoc().getAsMDNode()) {
5766 CheckDI(isa<DILocation>(N), "invalid !dbg metadata attachment", &I, N);
5767 visitMDNode(*N, AreDebugLocsAllowed::Yes);
5768
5769 if (auto *DL = dyn_cast<DILocation>(N)) {
5770 if (DL->getAtomGroup()) {
5771 CheckDI(DL->getScope()->getSubprogram()->getKeyInstructionsEnabled(),
5772 "DbgLoc uses atomGroup but DISubprogram doesn't have Key "
5773 "Instructions enabled",
5774 DL, DL->getScope()->getSubprogram());
5775 }
5776 }
5777 }
5778
5780 I.getAllMetadata(MDs);
5781 for (auto Attachment : MDs) {
5782 unsigned Kind = Attachment.first;
5783 auto AllowLocs =
5784 (Kind == LLVMContext::MD_dbg || Kind == LLVMContext::MD_loop)
5785 ? AreDebugLocsAllowed::Yes
5786 : AreDebugLocsAllowed::No;
5787 visitMDNode(*Attachment.second, AllowLocs);
5788 }
5789
5790 InstsInThisBlock.insert(&I);
5791}
5792
5793/// Allow intrinsics to be verified in different ways.
5794void Verifier::visitIntrinsicCall(Intrinsic::ID ID, CallBase &Call) {
5796 Check(IF->isDeclaration(), "Intrinsic functions should never be defined!",
5797 IF);
5798
5799 // Verify that the intrinsic prototype lines up with what the .td files
5800 // describe.
5801 FunctionType *IFTy = IF->getFunctionType();
5802 bool IsVarArg = IFTy->isVarArg();
5803
5807
5808 // Walk the descriptors to extract overloaded types.
5813 "Intrinsic has incorrect return type!", IF);
5815 "Intrinsic has incorrect argument type!", IF);
5816
5817 // Verify if the intrinsic call matches the vararg property.
5818 if (IsVarArg)
5820 "Intrinsic was not defined with variable arguments!", IF);
5821 else
5823 "Callsite was not defined with variable arguments!", IF);
5824
5825 // All descriptors should be absorbed by now.
5826 Check(TableRef.empty(), "Intrinsic has too few arguments!", IF);
5827
5828 // Now that we have the intrinsic ID and the actual argument types (and we
5829 // know they are legal for the intrinsic!) get the intrinsic name through the
5830 // usual means. This allows us to verify the mangling of argument types into
5831 // the name.
5832 const std::string ExpectedName =
5833 Intrinsic::getName(ID, ArgTys, IF->getParent(), IFTy);
5834 Check(ExpectedName == IF->getName(),
5835 "Intrinsic name not mangled correctly for type arguments! "
5836 "Should be: " +
5837 ExpectedName,
5838 IF);
5839
5840 // If the intrinsic takes MDNode arguments, verify that they are either global
5841 // or are local to *this* function.
5842 for (Value *V : Call.args()) {
5843 if (auto *MD = dyn_cast<MetadataAsValue>(V))
5844 visitMetadataAsValue(*MD, Call.getCaller());
5845 if (auto *Const = dyn_cast<Constant>(V))
5846 Check(!Const->getType()->isX86_AMXTy(),
5847 "const x86_amx is not allowed in argument!");
5848 }
5849
5850 switch (ID) {
5851 default:
5852 break;
5853 case Intrinsic::assume: {
5854 if (Call.hasOperandBundles()) {
5856 Check(Cond && Cond->isOne(),
5857 "assume with operand bundles must have i1 true condition", Call);
5858 }
5859 for (auto &Elem : Call.bundle_op_infos()) {
5860 unsigned ArgCount = Elem.End - Elem.Begin;
5861 // Separate storage assumptions are special insofar as they're the only
5862 // operand bundles allowed on assumes that aren't parameter attributes.
5863 if (Elem.Tag->getKey() == "separate_storage") {
5864 Check(ArgCount == 2,
5865 "separate_storage assumptions should have 2 arguments", Call);
5866 Check(Call.getOperand(Elem.Begin)->getType()->isPointerTy() &&
5867 Call.getOperand(Elem.Begin + 1)->getType()->isPointerTy(),
5868 "arguments to separate_storage assumptions should be pointers",
5869 Call);
5870 continue;
5871 }
5872 Check(Elem.Tag->getKey() == "ignore" ||
5873 Attribute::isExistingAttribute(Elem.Tag->getKey()),
5874 "tags must be valid attribute names", Call);
5875 Attribute::AttrKind Kind =
5876 Attribute::getAttrKindFromName(Elem.Tag->getKey());
5877 if (Kind == Attribute::Alignment) {
5878 Check(ArgCount <= 3 && ArgCount >= 2,
5879 "alignment assumptions should have 2 or 3 arguments", Call);
5880 Check(Call.getOperand(Elem.Begin)->getType()->isPointerTy(),
5881 "first argument should be a pointer", Call);
5882 Check(Call.getOperand(Elem.Begin + 1)->getType()->isIntegerTy(),
5883 "second argument should be an integer", Call);
5884 if (ArgCount == 3)
5885 Check(Call.getOperand(Elem.Begin + 2)->getType()->isIntegerTy(),
5886 "third argument should be an integer if present", Call);
5887 continue;
5888 }
5889 if (Kind == Attribute::Dereferenceable) {
5890 Check(ArgCount == 2,
5891 "dereferenceable assumptions should have 2 arguments", Call);
5892 Check(Call.getOperand(Elem.Begin)->getType()->isPointerTy(),
5893 "first argument should be a pointer", Call);
5894 Check(Call.getOperand(Elem.Begin + 1)->getType()->isIntegerTy(),
5895 "second argument should be an integer", Call);
5896 continue;
5897 }
5898 Check(ArgCount <= 2, "too many arguments", Call);
5899 if (Kind == Attribute::None)
5900 break;
5901 if (Attribute::isIntAttrKind(Kind)) {
5902 Check(ArgCount == 2, "this attribute should have 2 arguments", Call);
5903 Check(isa<ConstantInt>(Call.getOperand(Elem.Begin + 1)),
5904 "the second argument should be a constant integral value", Call);
5905 } else if (Attribute::canUseAsParamAttr(Kind)) {
5906 Check((ArgCount) == 1, "this attribute should have one argument", Call);
5907 } else if (Attribute::canUseAsFnAttr(Kind)) {
5908 Check((ArgCount) == 0, "this attribute has no argument", Call);
5909 }
5910 }
5911 break;
5912 }
5913 case Intrinsic::ucmp:
5914 case Intrinsic::scmp: {
5915 Type *SrcTy = Call.getOperand(0)->getType();
5916 Type *DestTy = Call.getType();
5917
5918 Check(DestTy->getScalarSizeInBits() >= 2,
5919 "result type must be at least 2 bits wide", Call);
5920
5921 bool IsDestTypeVector = DestTy->isVectorTy();
5922 Check(SrcTy->isVectorTy() == IsDestTypeVector,
5923 "ucmp/scmp argument and result types must both be either vector or "
5924 "scalar types",
5925 Call);
5926 if (IsDestTypeVector) {
5927 auto SrcVecLen = cast<VectorType>(SrcTy)->getElementCount();
5928 auto DestVecLen = cast<VectorType>(DestTy)->getElementCount();
5929 Check(SrcVecLen == DestVecLen,
5930 "return type and arguments must have the same number of "
5931 "elements",
5932 Call);
5933 }
5934 break;
5935 }
5936 case Intrinsic::coro_id: {
5937 auto *InfoArg = Call.getArgOperand(3)->stripPointerCasts();
5938 if (isa<ConstantPointerNull>(InfoArg))
5939 break;
5940 auto *GV = dyn_cast<GlobalVariable>(InfoArg);
5941 Check(GV && GV->isConstant() && GV->hasDefinitiveInitializer(),
5942 "info argument of llvm.coro.id must refer to an initialized "
5943 "constant");
5944 Constant *Init = GV->getInitializer();
5946 "info argument of llvm.coro.id must refer to either a struct or "
5947 "an array");
5948 break;
5949 }
5950 case Intrinsic::is_fpclass: {
5951 const ConstantInt *TestMask = cast<ConstantInt>(Call.getOperand(1));
5952 Check((TestMask->getZExtValue() & ~static_cast<unsigned>(fcAllFlags)) == 0,
5953 "unsupported bits for llvm.is.fpclass test mask");
5954 break;
5955 }
5956 case Intrinsic::fptrunc_round: {
5957 // Check the rounding mode
5958 Metadata *MD = nullptr;
5960 if (MAV)
5961 MD = MAV->getMetadata();
5962
5963 Check(MD != nullptr, "missing rounding mode argument", Call);
5964
5965 Check(isa<MDString>(MD),
5966 ("invalid value for llvm.fptrunc.round metadata operand"
5967 " (the operand should be a string)"),
5968 MD);
5969
5970 std::optional<RoundingMode> RoundMode =
5971 convertStrToRoundingMode(cast<MDString>(MD)->getString());
5972 Check(RoundMode && *RoundMode != RoundingMode::Dynamic,
5973 "unsupported rounding mode argument", Call);
5974 break;
5975 }
5976 case Intrinsic::convert_to_arbitrary_fp: {
5977 // Check that vector element counts are consistent.
5978 Type *ValueTy = Call.getArgOperand(0)->getType();
5979 Type *IntTy = Call.getType();
5980
5981 if (auto *ValueVecTy = dyn_cast<VectorType>(ValueTy)) {
5982 auto *IntVecTy = dyn_cast<VectorType>(IntTy);
5983 Check(IntVecTy,
5984 "if floating-point operand is a vector, integer operand must also "
5985 "be a vector",
5986 Call);
5987 Check(ValueVecTy->getElementCount() == IntVecTy->getElementCount(),
5988 "floating-point and integer vector operands must have the same "
5989 "element count",
5990 Call);
5991 }
5992
5993 // Check interpretation metadata (argoperand 1).
5994 auto *InterpMAV = dyn_cast<MetadataAsValue>(Call.getArgOperand(1));
5995 Check(InterpMAV, "missing interpretation metadata operand", Call);
5996 auto *InterpStr = dyn_cast<MDString>(InterpMAV->getMetadata());
5997 Check(InterpStr, "interpretation metadata operand must be a string", Call);
5998 StringRef Interp = InterpStr->getString();
5999
6000 Check(!Interp.empty(), "interpretation metadata string must not be empty",
6001 Call);
6002
6003 // Valid interpretation strings: mini-float format names.
6005 "unsupported interpretation metadata string", Call);
6006
6007 // Check rounding mode metadata (argoperand 2).
6008 auto *RoundingMAV = dyn_cast<MetadataAsValue>(Call.getArgOperand(2));
6009 Check(RoundingMAV, "missing rounding mode metadata operand", Call);
6010 auto *RoundingStr = dyn_cast<MDString>(RoundingMAV->getMetadata());
6011 Check(RoundingStr, "rounding mode metadata operand must be a string", Call);
6012
6013 std::optional<RoundingMode> RM =
6014 convertStrToRoundingMode(RoundingStr->getString());
6015 Check(RM && *RM != RoundingMode::Dynamic,
6016 "unsupported rounding mode argument", Call);
6017 break;
6018 }
6019 case Intrinsic::convert_from_arbitrary_fp: {
6020 // Check that vector element counts are consistent.
6021 Type *IntTy = Call.getArgOperand(0)->getType();
6022 Type *ValueTy = Call.getType();
6023
6024 if (auto *ValueVecTy = dyn_cast<VectorType>(ValueTy)) {
6025 auto *IntVecTy = dyn_cast<VectorType>(IntTy);
6026 Check(IntVecTy,
6027 "if floating-point operand is a vector, integer operand must also "
6028 "be a vector",
6029 Call);
6030 Check(ValueVecTy->getElementCount() == IntVecTy->getElementCount(),
6031 "floating-point and integer vector operands must have the same "
6032 "element count",
6033 Call);
6034 }
6035
6036 // Check interpretation metadata (argoperand 1).
6037 auto *InterpMAV = dyn_cast<MetadataAsValue>(Call.getArgOperand(1));
6038 Check(InterpMAV, "missing interpretation metadata operand", Call);
6039 auto *InterpStr = dyn_cast<MDString>(InterpMAV->getMetadata());
6040 Check(InterpStr, "interpretation metadata operand must be a string", Call);
6041 StringRef Interp = InterpStr->getString();
6042
6043 Check(!Interp.empty(), "interpretation metadata string must not be empty",
6044 Call);
6045
6046 // Valid interpretation strings: mini-float format names.
6048 "unsupported interpretation metadata string", Call);
6049 break;
6050 }
6051#define BEGIN_REGISTER_VP_INTRINSIC(VPID, ...) case Intrinsic::VPID:
6052#include "llvm/IR/VPIntrinsics.def"
6053#undef BEGIN_REGISTER_VP_INTRINSIC
6054 visitVPIntrinsic(cast<VPIntrinsic>(Call));
6055 break;
6056#define INSTRUCTION(NAME, NARGS, ROUND_MODE, INTRINSIC) \
6057 case Intrinsic::INTRINSIC:
6058#include "llvm/IR/ConstrainedOps.def"
6059#undef INSTRUCTION
6060 visitConstrainedFPIntrinsic(cast<ConstrainedFPIntrinsic>(Call));
6061 break;
6062 case Intrinsic::dbg_declare: // llvm.dbg.declare
6063 case Intrinsic::dbg_value: // llvm.dbg.value
6064 case Intrinsic::dbg_assign: // llvm.dbg.assign
6065 case Intrinsic::dbg_label: // llvm.dbg.label
6066 // We no longer interpret debug intrinsics (the old variable-location
6067 // design). They're meaningless as far as LLVM is concerned we could make
6068 // it an error for them to appear, but it's possible we'll have users
6069 // converting back to intrinsics for the forseeable future (such as DXIL),
6070 // so tolerate their existance.
6071 break;
6072 case Intrinsic::memcpy:
6073 case Intrinsic::memcpy_inline:
6074 case Intrinsic::memmove:
6075 case Intrinsic::memset:
6076 case Intrinsic::memset_inline:
6077 break;
6078 case Intrinsic::experimental_memset_pattern: {
6079 const auto Memset = cast<MemSetPatternInst>(&Call);
6080 Check(Memset->getValue()->getType()->isSized(),
6081 "unsized types cannot be used as memset patterns", Call);
6082 break;
6083 }
6084 case Intrinsic::memcpy_element_unordered_atomic:
6085 case Intrinsic::memmove_element_unordered_atomic:
6086 case Intrinsic::memset_element_unordered_atomic: {
6087 const auto *AMI = cast<AnyMemIntrinsic>(&Call);
6088
6089 ConstantInt *ElementSizeCI =
6090 cast<ConstantInt>(AMI->getRawElementSizeInBytes());
6091 const APInt &ElementSizeVal = ElementSizeCI->getValue();
6092 Check(ElementSizeVal.isPowerOf2(),
6093 "element size of the element-wise atomic memory intrinsic "
6094 "must be a power of 2",
6095 Call);
6096
6097 auto IsValidAlignment = [&](MaybeAlign Alignment) {
6098 return Alignment && ElementSizeVal.ule(Alignment->value());
6099 };
6100 Check(IsValidAlignment(AMI->getDestAlign()),
6101 "incorrect alignment of the destination argument", Call);
6102 if (const auto *AMT = dyn_cast<AnyMemTransferInst>(AMI)) {
6103 Check(IsValidAlignment(AMT->getSourceAlign()),
6104 "incorrect alignment of the source argument", Call);
6105 }
6106 break;
6107 }
6108 case Intrinsic::call_preallocated_setup: {
6109 auto *NumArgs = cast<ConstantInt>(Call.getArgOperand(0));
6110 bool FoundCall = false;
6111 for (User *U : Call.users()) {
6112 auto *UseCall = dyn_cast<CallBase>(U);
6113 Check(UseCall != nullptr,
6114 "Uses of llvm.call.preallocated.setup must be calls");
6115 Intrinsic::ID IID = UseCall->getIntrinsicID();
6116 if (IID == Intrinsic::call_preallocated_arg) {
6117 auto *AllocArgIndex = dyn_cast<ConstantInt>(UseCall->getArgOperand(1));
6118 Check(AllocArgIndex != nullptr,
6119 "llvm.call.preallocated.alloc arg index must be a constant");
6120 auto AllocArgIndexInt = AllocArgIndex->getValue();
6121 Check(AllocArgIndexInt.sge(0) &&
6122 AllocArgIndexInt.slt(NumArgs->getValue()),
6123 "llvm.call.preallocated.alloc arg index must be between 0 and "
6124 "corresponding "
6125 "llvm.call.preallocated.setup's argument count");
6126 } else if (IID == Intrinsic::call_preallocated_teardown) {
6127 // nothing to do
6128 } else {
6129 Check(!FoundCall, "Can have at most one call corresponding to a "
6130 "llvm.call.preallocated.setup");
6131 FoundCall = true;
6132 size_t NumPreallocatedArgs = 0;
6133 for (unsigned i = 0; i < UseCall->arg_size(); i++) {
6134 if (UseCall->paramHasAttr(i, Attribute::Preallocated)) {
6135 ++NumPreallocatedArgs;
6136 }
6137 }
6138 Check(NumPreallocatedArgs != 0,
6139 "cannot use preallocated intrinsics on a call without "
6140 "preallocated arguments");
6141 Check(NumArgs->equalsInt(NumPreallocatedArgs),
6142 "llvm.call.preallocated.setup arg size must be equal to number "
6143 "of preallocated arguments "
6144 "at call site",
6145 Call, *UseCall);
6146 // getOperandBundle() cannot be called if more than one of the operand
6147 // bundle exists. There is already a check elsewhere for this, so skip
6148 // here if we see more than one.
6149 if (UseCall->countOperandBundlesOfType(LLVMContext::OB_preallocated) >
6150 1) {
6151 return;
6152 }
6153 auto PreallocatedBundle =
6154 UseCall->getOperandBundle(LLVMContext::OB_preallocated);
6155 Check(PreallocatedBundle,
6156 "Use of llvm.call.preallocated.setup outside intrinsics "
6157 "must be in \"preallocated\" operand bundle");
6158 Check(PreallocatedBundle->Inputs.front().get() == &Call,
6159 "preallocated bundle must have token from corresponding "
6160 "llvm.call.preallocated.setup");
6161 }
6162 }
6163 break;
6164 }
6165 case Intrinsic::call_preallocated_arg: {
6166 auto *Token = dyn_cast<CallBase>(Call.getArgOperand(0));
6167 Check(Token &&
6168 Token->getIntrinsicID() == Intrinsic::call_preallocated_setup,
6169 "llvm.call.preallocated.arg token argument must be a "
6170 "llvm.call.preallocated.setup");
6171 Check(Call.hasFnAttr(Attribute::Preallocated),
6172 "llvm.call.preallocated.arg must be called with a \"preallocated\" "
6173 "call site attribute");
6174 break;
6175 }
6176 case Intrinsic::call_preallocated_teardown: {
6177 auto *Token = dyn_cast<CallBase>(Call.getArgOperand(0));
6178 Check(Token &&
6179 Token->getIntrinsicID() == Intrinsic::call_preallocated_setup,
6180 "llvm.call.preallocated.teardown token argument must be a "
6181 "llvm.call.preallocated.setup");
6182 break;
6183 }
6184 case Intrinsic::gcroot:
6185 case Intrinsic::gcwrite:
6186 case Intrinsic::gcread:
6187 if (ID == Intrinsic::gcroot) {
6188 AllocaInst *AI =
6190 Check(AI, "llvm.gcroot parameter #1 must be an alloca.", Call);
6192 "llvm.gcroot parameter #2 must be a constant.", Call);
6193 if (!AI->getAllocatedType()->isPointerTy()) {
6195 "llvm.gcroot parameter #1 must either be a pointer alloca, "
6196 "or argument #2 must be a non-null constant.",
6197 Call);
6198 }
6199 }
6200
6201 Check(Call.getParent()->getParent()->hasGC(),
6202 "Enclosing function does not use GC.", Call);
6203 break;
6204 case Intrinsic::init_trampoline:
6206 "llvm.init_trampoline parameter #2 must resolve to a function.",
6207 Call);
6208 break;
6209 case Intrinsic::prefetch:
6210 Check(cast<ConstantInt>(Call.getArgOperand(1))->getZExtValue() < 2,
6211 "rw argument to llvm.prefetch must be 0-1", Call);
6212 Check(cast<ConstantInt>(Call.getArgOperand(2))->getZExtValue() < 4,
6213 "locality argument to llvm.prefetch must be 0-3", Call);
6214 Check(cast<ConstantInt>(Call.getArgOperand(3))->getZExtValue() < 2,
6215 "cache type argument to llvm.prefetch must be 0-1", Call);
6216 break;
6217 case Intrinsic::reloc_none: {
6219 cast<MetadataAsValue>(Call.getArgOperand(0))->getMetadata()),
6220 "llvm.reloc.none argument must be a metadata string", &Call);
6221 break;
6222 }
6223 case Intrinsic::stackprotector:
6225 "llvm.stackprotector parameter #2 must resolve to an alloca.", Call);
6226 break;
6227 case Intrinsic::localescape: {
6228 BasicBlock *BB = Call.getParent();
6229 Check(BB->isEntryBlock(), "llvm.localescape used outside of entry block",
6230 Call);
6231 Check(!SawFrameEscape, "multiple calls to llvm.localescape in one function",
6232 Call);
6233 for (Value *Arg : Call.args()) {
6234 if (isa<ConstantPointerNull>(Arg))
6235 continue; // Null values are allowed as placeholders.
6236 auto *AI = dyn_cast<AllocaInst>(Arg->stripPointerCasts());
6237 Check(AI && AI->isStaticAlloca(),
6238 "llvm.localescape only accepts static allocas", Call);
6239 }
6240 FrameEscapeInfo[BB->getParent()].first = Call.arg_size();
6241 SawFrameEscape = true;
6242 break;
6243 }
6244 case Intrinsic::localrecover: {
6246 Function *Fn = dyn_cast<Function>(FnArg);
6247 Check(Fn && !Fn->isDeclaration(),
6248 "llvm.localrecover first "
6249 "argument must be function defined in this module",
6250 Call);
6251 auto *IdxArg = cast<ConstantInt>(Call.getArgOperand(2));
6252 auto &Entry = FrameEscapeInfo[Fn];
6253 Entry.second = unsigned(
6254 std::max(uint64_t(Entry.second), IdxArg->getLimitedValue(~0U) + 1));
6255 break;
6256 }
6257
6258 case Intrinsic::experimental_gc_statepoint:
6259 if (auto *CI = dyn_cast<CallInst>(&Call))
6260 Check(!CI->isInlineAsm(),
6261 "gc.statepoint support for inline assembly unimplemented", CI);
6262 Check(Call.getParent()->getParent()->hasGC(),
6263 "Enclosing function does not use GC.", Call);
6264
6265 verifyStatepoint(Call);
6266 break;
6267 case Intrinsic::experimental_gc_result: {
6268 Check(Call.getParent()->getParent()->hasGC(),
6269 "Enclosing function does not use GC.", Call);
6270
6271 auto *Statepoint = Call.getArgOperand(0);
6272 if (isa<UndefValue>(Statepoint))
6273 break;
6274
6275 // Are we tied to a statepoint properly?
6276 const auto *StatepointCall = dyn_cast<CallBase>(Statepoint);
6277 Check(StatepointCall && StatepointCall->getIntrinsicID() ==
6278 Intrinsic::experimental_gc_statepoint,
6279 "gc.result operand #1 must be from a statepoint", Call,
6280 Call.getArgOperand(0));
6281
6282 // Check that result type matches wrapped callee.
6283 auto *TargetFuncType =
6284 cast<FunctionType>(StatepointCall->getParamElementType(2));
6285 Check(Call.getType() == TargetFuncType->getReturnType(),
6286 "gc.result result type does not match wrapped callee", Call);
6287 break;
6288 }
6289 case Intrinsic::experimental_gc_relocate: {
6290 Check(Call.arg_size() == 3, "wrong number of arguments", Call);
6291
6293 "gc.relocate must return a pointer or a vector of pointers", Call);
6294
6295 // Check that this relocate is correctly tied to the statepoint
6296
6297 // This is case for relocate on the unwinding path of an invoke statepoint
6298 if (LandingPadInst *LandingPad =
6300
6301 const BasicBlock *InvokeBB =
6302 LandingPad->getParent()->getUniquePredecessor();
6303
6304 // Landingpad relocates should have only one predecessor with invoke
6305 // statepoint terminator
6306 Check(InvokeBB, "safepoints should have unique landingpads",
6307 LandingPad->getParent());
6308 Check(InvokeBB->getTerminator(), "safepoint block should be well formed",
6309 InvokeBB);
6311 "gc relocate should be linked to a statepoint", InvokeBB);
6312 } else {
6313 // In all other cases relocate should be tied to the statepoint directly.
6314 // This covers relocates on a normal return path of invoke statepoint and
6315 // relocates of a call statepoint.
6316 auto *Token = Call.getArgOperand(0);
6318 "gc relocate is incorrectly tied to the statepoint", Call, Token);
6319 }
6320
6321 // Verify rest of the relocate arguments.
6322 const Value &StatepointCall = *cast<GCRelocateInst>(Call).getStatepoint();
6323
6324 // Both the base and derived must be piped through the safepoint.
6327 "gc.relocate operand #2 must be integer offset", Call);
6328
6329 Value *Derived = Call.getArgOperand(2);
6330 Check(isa<ConstantInt>(Derived),
6331 "gc.relocate operand #3 must be integer offset", Call);
6332
6333 const uint64_t BaseIndex = cast<ConstantInt>(Base)->getZExtValue();
6334 const uint64_t DerivedIndex = cast<ConstantInt>(Derived)->getZExtValue();
6335
6336 // Check the bounds
6337 if (isa<UndefValue>(StatepointCall))
6338 break;
6339 if (auto Opt = cast<GCStatepointInst>(StatepointCall)
6340 .getOperandBundle(LLVMContext::OB_gc_live)) {
6341 Check(BaseIndex < Opt->Inputs.size(),
6342 "gc.relocate: statepoint base index out of bounds", Call);
6343 Check(DerivedIndex < Opt->Inputs.size(),
6344 "gc.relocate: statepoint derived index out of bounds", Call);
6345 }
6346
6347 // Relocated value must be either a pointer type or vector-of-pointer type,
6348 // but gc_relocate does not need to return the same pointer type as the
6349 // relocated pointer. It can be casted to the correct type later if it's
6350 // desired. However, they must have the same address space and 'vectorness'
6351 GCRelocateInst &Relocate = cast<GCRelocateInst>(Call);
6352 auto *ResultType = Call.getType();
6353 auto *DerivedType = Relocate.getDerivedPtr()->getType();
6354 auto *BaseType = Relocate.getBasePtr()->getType();
6355
6356 Check(BaseType->isPtrOrPtrVectorTy(),
6357 "gc.relocate: relocated value must be a pointer", Call);
6358 Check(DerivedType->isPtrOrPtrVectorTy(),
6359 "gc.relocate: relocated value must be a pointer", Call);
6360
6361 Check(ResultType->isVectorTy() == DerivedType->isVectorTy(),
6362 "gc.relocate: vector relocates to vector and pointer to pointer",
6363 Call);
6364 Check(
6365 ResultType->getPointerAddressSpace() ==
6366 DerivedType->getPointerAddressSpace(),
6367 "gc.relocate: relocating a pointer shouldn't change its address space",
6368 Call);
6369
6370 auto GC = llvm::getGCStrategy(Relocate.getFunction()->getGC());
6371 Check(GC, "gc.relocate: calling function must have GCStrategy",
6372 Call.getFunction());
6373 if (GC) {
6374 auto isGCPtr = [&GC](Type *PTy) {
6375 return GC->isGCManagedPointer(PTy->getScalarType()).value_or(true);
6376 };
6377 Check(isGCPtr(ResultType), "gc.relocate: must return gc pointer", Call);
6378 Check(isGCPtr(BaseType),
6379 "gc.relocate: relocated value must be a gc pointer", Call);
6380 Check(isGCPtr(DerivedType),
6381 "gc.relocate: relocated value must be a gc pointer", Call);
6382 }
6383 break;
6384 }
6385 case Intrinsic::experimental_patchpoint: {
6386 if (Call.getCallingConv() == CallingConv::AnyReg) {
6388 "patchpoint: invalid return type used with anyregcc", Call);
6389 }
6390 break;
6391 }
6392 case Intrinsic::eh_exceptioncode:
6393 case Intrinsic::eh_exceptionpointer: {
6395 "eh.exceptionpointer argument must be a catchpad", Call);
6396 break;
6397 }
6398 case Intrinsic::get_active_lane_mask: {
6400 "get_active_lane_mask: must return a "
6401 "vector",
6402 Call);
6403 auto *ElemTy = Call.getType()->getScalarType();
6404 Check(ElemTy->isIntegerTy(1),
6405 "get_active_lane_mask: element type is not "
6406 "i1",
6407 Call);
6408 break;
6409 }
6410 case Intrinsic::experimental_get_vector_length: {
6411 ConstantInt *VF = cast<ConstantInt>(Call.getArgOperand(1));
6412 Check(!VF->isNegative() && !VF->isZero(),
6413 "get_vector_length: VF must be positive", Call);
6414 break;
6415 }
6416 case Intrinsic::masked_load: {
6417 Check(Call.getType()->isVectorTy(), "masked_load: must return a vector",
6418 Call);
6419
6421 Value *PassThru = Call.getArgOperand(2);
6422 Check(Mask->getType()->isVectorTy(), "masked_load: mask must be vector",
6423 Call);
6424 Check(PassThru->getType() == Call.getType(),
6425 "masked_load: pass through and return type must match", Call);
6426 Check(cast<VectorType>(Mask->getType())->getElementCount() ==
6427 cast<VectorType>(Call.getType())->getElementCount(),
6428 "masked_load: vector mask must be same length as return", Call);
6429 break;
6430 }
6431 case Intrinsic::masked_store: {
6432 Value *Val = Call.getArgOperand(0);
6434 Check(Mask->getType()->isVectorTy(), "masked_store: mask must be vector",
6435 Call);
6436 Check(cast<VectorType>(Mask->getType())->getElementCount() ==
6437 cast<VectorType>(Val->getType())->getElementCount(),
6438 "masked_store: vector mask must be same length as value", Call);
6439 break;
6440 }
6441
6442 case Intrinsic::experimental_guard: {
6443 Check(isa<CallInst>(Call), "experimental_guard cannot be invoked", Call);
6445 "experimental_guard must have exactly one "
6446 "\"deopt\" operand bundle");
6447 break;
6448 }
6449
6450 case Intrinsic::experimental_deoptimize: {
6451 Check(isa<CallInst>(Call), "experimental_deoptimize cannot be invoked",
6452 Call);
6454 "experimental_deoptimize must have exactly one "
6455 "\"deopt\" operand bundle");
6457 "experimental_deoptimize return type must match caller return type");
6458
6459 if (isa<CallInst>(Call)) {
6461 Check(RI,
6462 "calls to experimental_deoptimize must be followed by a return");
6463
6464 if (!Call.getType()->isVoidTy() && RI)
6465 Check(RI->getReturnValue() == &Call,
6466 "calls to experimental_deoptimize must be followed by a return "
6467 "of the value computed by experimental_deoptimize");
6468 }
6469
6470 break;
6471 }
6472 case Intrinsic::vastart: {
6474 "va_start called in a non-varargs function");
6475 break;
6476 }
6477 case Intrinsic::get_dynamic_area_offset: {
6478 auto *IntTy = dyn_cast<IntegerType>(Call.getType());
6479 Check(IntTy && DL.getPointerSizeInBits(DL.getAllocaAddrSpace()) ==
6480 IntTy->getBitWidth(),
6481 "get_dynamic_area_offset result type must be scalar integer matching "
6482 "alloca address space width",
6483 Call);
6484 break;
6485 }
6486 case Intrinsic::vector_reduce_and:
6487 case Intrinsic::vector_reduce_or:
6488 case Intrinsic::vector_reduce_xor:
6489 case Intrinsic::vector_reduce_add:
6490 case Intrinsic::vector_reduce_mul:
6491 case Intrinsic::vector_reduce_smax:
6492 case Intrinsic::vector_reduce_smin:
6493 case Intrinsic::vector_reduce_umax:
6494 case Intrinsic::vector_reduce_umin: {
6495 Type *ArgTy = Call.getArgOperand(0)->getType();
6496 Check(ArgTy->isIntOrIntVectorTy() && ArgTy->isVectorTy(),
6497 "Intrinsic has incorrect argument type!");
6498 break;
6499 }
6500 case Intrinsic::vector_reduce_fmax:
6501 case Intrinsic::vector_reduce_fmin: {
6502 Type *ArgTy = Call.getArgOperand(0)->getType();
6503 Check(ArgTy->isFPOrFPVectorTy() && ArgTy->isVectorTy(),
6504 "Intrinsic has incorrect argument type!");
6505 break;
6506 }
6507 case Intrinsic::vector_reduce_fadd:
6508 case Intrinsic::vector_reduce_fmul: {
6509 // Unlike the other reductions, the first argument is a start value. The
6510 // second argument is the vector to be reduced.
6511 Type *ArgTy = Call.getArgOperand(1)->getType();
6512 Check(ArgTy->isFPOrFPVectorTy() && ArgTy->isVectorTy(),
6513 "Intrinsic has incorrect argument type!");
6514 break;
6515 }
6516 case Intrinsic::smul_fix:
6517 case Intrinsic::smul_fix_sat:
6518 case Intrinsic::umul_fix:
6519 case Intrinsic::umul_fix_sat:
6520 case Intrinsic::sdiv_fix:
6521 case Intrinsic::sdiv_fix_sat:
6522 case Intrinsic::udiv_fix:
6523 case Intrinsic::udiv_fix_sat: {
6524 Value *Op1 = Call.getArgOperand(0);
6525 Value *Op2 = Call.getArgOperand(1);
6527 "first operand of [us][mul|div]_fix[_sat] must be an int type or "
6528 "vector of ints");
6530 "second operand of [us][mul|div]_fix[_sat] must be an int type or "
6531 "vector of ints");
6532
6533 auto *Op3 = cast<ConstantInt>(Call.getArgOperand(2));
6534 Check(Op3->getType()->isIntegerTy(),
6535 "third operand of [us][mul|div]_fix[_sat] must be an int type");
6536 Check(Op3->getBitWidth() <= 32,
6537 "third operand of [us][mul|div]_fix[_sat] must fit within 32 bits");
6538
6539 if (ID == Intrinsic::smul_fix || ID == Intrinsic::smul_fix_sat ||
6540 ID == Intrinsic::sdiv_fix || ID == Intrinsic::sdiv_fix_sat) {
6541 Check(Op3->getZExtValue() < Op1->getType()->getScalarSizeInBits(),
6542 "the scale of s[mul|div]_fix[_sat] must be less than the width of "
6543 "the operands");
6544 } else {
6545 Check(Op3->getZExtValue() <= Op1->getType()->getScalarSizeInBits(),
6546 "the scale of u[mul|div]_fix[_sat] must be less than or equal "
6547 "to the width of the operands");
6548 }
6549 break;
6550 }
6551 case Intrinsic::lrint:
6552 case Intrinsic::llrint:
6553 case Intrinsic::lround:
6554 case Intrinsic::llround: {
6555 Type *ValTy = Call.getArgOperand(0)->getType();
6556 Type *ResultTy = Call.getType();
6557 auto *VTy = dyn_cast<VectorType>(ValTy);
6558 auto *RTy = dyn_cast<VectorType>(ResultTy);
6559 Check(ValTy->isFPOrFPVectorTy() && ResultTy->isIntOrIntVectorTy(),
6560 ExpectedName + ": argument must be floating-point or vector "
6561 "of floating-points, and result must be integer or "
6562 "vector of integers",
6563 &Call);
6564 Check(ValTy->isVectorTy() == ResultTy->isVectorTy(),
6565 ExpectedName + ": argument and result disagree on vector use", &Call);
6566 if (VTy) {
6567 Check(VTy->getElementCount() == RTy->getElementCount(),
6568 ExpectedName + ": argument must be same length as result", &Call);
6569 }
6570 break;
6571 }
6572 case Intrinsic::bswap: {
6573 Type *Ty = Call.getType();
6574 unsigned Size = Ty->getScalarSizeInBits();
6575 Check(Size % 16 == 0, "bswap must be an even number of bytes", &Call);
6576 break;
6577 }
6578 case Intrinsic::invariant_start: {
6579 ConstantInt *InvariantSize = dyn_cast<ConstantInt>(Call.getArgOperand(0));
6580 Check(InvariantSize &&
6581 (!InvariantSize->isNegative() || InvariantSize->isMinusOne()),
6582 "invariant_start parameter must be -1, 0 or a positive number",
6583 &Call);
6584 break;
6585 }
6586 case Intrinsic::matrix_multiply:
6587 case Intrinsic::matrix_transpose:
6588 case Intrinsic::matrix_column_major_load:
6589 case Intrinsic::matrix_column_major_store: {
6591 ConstantInt *Stride = nullptr;
6592 ConstantInt *NumRows;
6593 ConstantInt *NumColumns;
6594 VectorType *ResultTy;
6595 Type *Op0ElemTy = nullptr;
6596 Type *Op1ElemTy = nullptr;
6597 switch (ID) {
6598 case Intrinsic::matrix_multiply: {
6599 NumRows = cast<ConstantInt>(Call.getArgOperand(2));
6600 ConstantInt *N = cast<ConstantInt>(Call.getArgOperand(3));
6601 NumColumns = cast<ConstantInt>(Call.getArgOperand(4));
6603 ->getNumElements() ==
6604 NumRows->getZExtValue() * N->getZExtValue(),
6605 "First argument of a matrix operation does not match specified "
6606 "shape!");
6608 ->getNumElements() ==
6609 N->getZExtValue() * NumColumns->getZExtValue(),
6610 "Second argument of a matrix operation does not match specified "
6611 "shape!");
6612
6613 ResultTy = cast<VectorType>(Call.getType());
6614 Op0ElemTy =
6615 cast<VectorType>(Call.getArgOperand(0)->getType())->getElementType();
6616 Op1ElemTy =
6617 cast<VectorType>(Call.getArgOperand(1)->getType())->getElementType();
6618 break;
6619 }
6620 case Intrinsic::matrix_transpose:
6621 NumRows = cast<ConstantInt>(Call.getArgOperand(1));
6622 NumColumns = cast<ConstantInt>(Call.getArgOperand(2));
6623 ResultTy = cast<VectorType>(Call.getType());
6624 Op0ElemTy =
6625 cast<VectorType>(Call.getArgOperand(0)->getType())->getElementType();
6626 break;
6627 case Intrinsic::matrix_column_major_load: {
6629 NumRows = cast<ConstantInt>(Call.getArgOperand(3));
6630 NumColumns = cast<ConstantInt>(Call.getArgOperand(4));
6631 ResultTy = cast<VectorType>(Call.getType());
6632 break;
6633 }
6634 case Intrinsic::matrix_column_major_store: {
6636 NumRows = cast<ConstantInt>(Call.getArgOperand(4));
6637 NumColumns = cast<ConstantInt>(Call.getArgOperand(5));
6638 ResultTy = cast<VectorType>(Call.getArgOperand(0)->getType());
6639 Op0ElemTy =
6640 cast<VectorType>(Call.getArgOperand(0)->getType())->getElementType();
6641 break;
6642 }
6643 default:
6644 llvm_unreachable("unexpected intrinsic");
6645 }
6646
6647 Check(ResultTy->getElementType()->isIntegerTy() ||
6648 ResultTy->getElementType()->isFloatingPointTy(),
6649 "Result type must be an integer or floating-point type!", IF);
6650
6651 if (Op0ElemTy)
6652 Check(ResultTy->getElementType() == Op0ElemTy,
6653 "Vector element type mismatch of the result and first operand "
6654 "vector!",
6655 IF);
6656
6657 if (Op1ElemTy)
6658 Check(ResultTy->getElementType() == Op1ElemTy,
6659 "Vector element type mismatch of the result and second operand "
6660 "vector!",
6661 IF);
6662
6664 NumRows->getZExtValue() * NumColumns->getZExtValue(),
6665 "Result of a matrix operation does not fit in the returned vector!");
6666
6667 if (Stride) {
6668 Check(Stride->getBitWidth() <= 64, "Stride bitwidth cannot exceed 64!",
6669 IF);
6670 Check(Stride->getZExtValue() >= NumRows->getZExtValue(),
6671 "Stride must be greater or equal than the number of rows!", IF);
6672 }
6673
6674 break;
6675 }
6676 case Intrinsic::vector_splice_left:
6677 case Intrinsic::vector_splice_right: {
6679 uint64_t Idx = cast<ConstantInt>(Call.getArgOperand(2))->getZExtValue();
6680 uint64_t KnownMinNumElements = VecTy->getElementCount().getKnownMinValue();
6681 if (VecTy->isScalableTy() && Call.getParent() &&
6682 Call.getParent()->getParent()) {
6683 AttributeList Attrs = Call.getParent()->getParent()->getAttributes();
6684 if (Attrs.hasFnAttr(Attribute::VScaleRange))
6685 KnownMinNumElements *= Attrs.getFnAttrs().getVScaleRangeMin();
6686 }
6687 if (ID == Intrinsic::vector_splice_left)
6688 Check(Idx < KnownMinNumElements,
6689 "The splice index exceeds the range [0, VL-1] where VL is the "
6690 "known minimum number of elements in the vector. For scalable "
6691 "vectors the minimum number of elements is determined from "
6692 "vscale_range.",
6693 &Call);
6694 else
6695 Check(Idx <= KnownMinNumElements,
6696 "The splice index exceeds the range [0, VL] where VL is the "
6697 "known minimum number of elements in the vector. For scalable "
6698 "vectors the minimum number of elements is determined from "
6699 "vscale_range.",
6700 &Call);
6701 break;
6702 }
6703 case Intrinsic::stepvector: {
6705 Check(VecTy && VecTy->getScalarType()->isIntegerTy() &&
6706 VecTy->getScalarSizeInBits() >= 8,
6707 "stepvector only supported for vectors of integers "
6708 "with a bitwidth of at least 8.",
6709 &Call);
6710 break;
6711 }
6712 case Intrinsic::experimental_vector_match: {
6713 Value *Op1 = Call.getArgOperand(0);
6714 Value *Op2 = Call.getArgOperand(1);
6716
6717 VectorType *Op1Ty = dyn_cast<VectorType>(Op1->getType());
6718 VectorType *Op2Ty = dyn_cast<VectorType>(Op2->getType());
6719 VectorType *MaskTy = dyn_cast<VectorType>(Mask->getType());
6720
6721 Check(Op1Ty && Op2Ty && MaskTy, "Operands must be vectors.", &Call);
6723 "Second operand must be a fixed length vector.", &Call);
6724 Check(Op1Ty->getElementType()->isIntegerTy(),
6725 "First operand must be a vector of integers.", &Call);
6726 Check(Op1Ty->getElementType() == Op2Ty->getElementType(),
6727 "First two operands must have the same element type.", &Call);
6728 Check(Op1Ty->getElementCount() == MaskTy->getElementCount(),
6729 "First operand and mask must have the same number of elements.",
6730 &Call);
6731 Check(MaskTy->getElementType()->isIntegerTy(1),
6732 "Mask must be a vector of i1's.", &Call);
6733 Check(Call.getType() == MaskTy, "Return type must match the mask type.",
6734 &Call);
6735 break;
6736 }
6737 case Intrinsic::vector_insert: {
6738 Value *Vec = Call.getArgOperand(0);
6739 Value *SubVec = Call.getArgOperand(1);
6740 Value *Idx = Call.getArgOperand(2);
6741 unsigned IdxN = cast<ConstantInt>(Idx)->getZExtValue();
6742
6743 VectorType *VecTy = cast<VectorType>(Vec->getType());
6744 VectorType *SubVecTy = cast<VectorType>(SubVec->getType());
6745
6746 ElementCount VecEC = VecTy->getElementCount();
6747 ElementCount SubVecEC = SubVecTy->getElementCount();
6748 Check(VecTy->getElementType() == SubVecTy->getElementType(),
6749 "vector_insert parameters must have the same element "
6750 "type.",
6751 &Call);
6752 Check(IdxN % SubVecEC.getKnownMinValue() == 0,
6753 "vector_insert index must be a constant multiple of "
6754 "the subvector's known minimum vector length.");
6755
6756 // If this insertion is not the 'mixed' case where a fixed vector is
6757 // inserted into a scalable vector, ensure that the insertion of the
6758 // subvector does not overrun the parent vector.
6759 if (VecEC.isScalable() == SubVecEC.isScalable()) {
6760 Check(IdxN < VecEC.getKnownMinValue() &&
6761 IdxN + SubVecEC.getKnownMinValue() <= VecEC.getKnownMinValue(),
6762 "subvector operand of vector_insert would overrun the "
6763 "vector being inserted into.");
6764 }
6765 break;
6766 }
6767 case Intrinsic::vector_extract: {
6768 Value *Vec = Call.getArgOperand(0);
6769 Value *Idx = Call.getArgOperand(1);
6770 unsigned IdxN = cast<ConstantInt>(Idx)->getZExtValue();
6771
6772 VectorType *ResultTy = cast<VectorType>(Call.getType());
6773 VectorType *VecTy = cast<VectorType>(Vec->getType());
6774
6775 ElementCount VecEC = VecTy->getElementCount();
6776 ElementCount ResultEC = ResultTy->getElementCount();
6777
6778 Check(ResultTy->getElementType() == VecTy->getElementType(),
6779 "vector_extract result must have the same element "
6780 "type as the input vector.",
6781 &Call);
6782 Check(IdxN % ResultEC.getKnownMinValue() == 0,
6783 "vector_extract index must be a constant multiple of "
6784 "the result type's known minimum vector length.");
6785
6786 // If this extraction is not the 'mixed' case where a fixed vector is
6787 // extracted from a scalable vector, ensure that the extraction does not
6788 // overrun the parent vector.
6789 if (VecEC.isScalable() == ResultEC.isScalable()) {
6790 Check(IdxN < VecEC.getKnownMinValue() &&
6791 IdxN + ResultEC.getKnownMinValue() <= VecEC.getKnownMinValue(),
6792 "vector_extract would overrun.");
6793 }
6794 break;
6795 }
6796 case Intrinsic::vector_partial_reduce_fadd:
6797 case Intrinsic::vector_partial_reduce_add: {
6800
6801 unsigned VecWidth = VecTy->getElementCount().getKnownMinValue();
6802 unsigned AccWidth = AccTy->getElementCount().getKnownMinValue();
6803
6804 Check((VecWidth % AccWidth) == 0,
6805 "Invalid vector widths for partial "
6806 "reduction. The width of the input vector "
6807 "must be a positive integer multiple of "
6808 "the width of the accumulator vector.");
6809 break;
6810 }
6811 case Intrinsic::experimental_noalias_scope_decl: {
6812 NoAliasScopeDecls.push_back(cast<IntrinsicInst>(&Call));
6813 break;
6814 }
6815 case Intrinsic::preserve_array_access_index:
6816 case Intrinsic::preserve_struct_access_index:
6817 case Intrinsic::aarch64_ldaxr:
6818 case Intrinsic::aarch64_ldxr:
6819 case Intrinsic::arm_ldaex:
6820 case Intrinsic::arm_ldrex: {
6821 Type *ElemTy = Call.getParamElementType(0);
6822 Check(ElemTy, "Intrinsic requires elementtype attribute on first argument.",
6823 &Call);
6824 break;
6825 }
6826 case Intrinsic::aarch64_stlxr:
6827 case Intrinsic::aarch64_stxr:
6828 case Intrinsic::arm_stlex:
6829 case Intrinsic::arm_strex: {
6830 Type *ElemTy = Call.getAttributes().getParamElementType(1);
6831 Check(ElemTy,
6832 "Intrinsic requires elementtype attribute on second argument.",
6833 &Call);
6834 break;
6835 }
6836 case Intrinsic::aarch64_prefetch: {
6837 Check(cast<ConstantInt>(Call.getArgOperand(1))->getZExtValue() < 2,
6838 "write argument to llvm.aarch64.prefetch must be 0 or 1", Call);
6839 Check(cast<ConstantInt>(Call.getArgOperand(2))->getZExtValue() < 4,
6840 "target argument to llvm.aarch64.prefetch must be 0-3", Call);
6841 Check(cast<ConstantInt>(Call.getArgOperand(3))->getZExtValue() < 2,
6842 "stream argument to llvm.aarch64.prefetch must be 0 or 1", Call);
6843 Check(cast<ConstantInt>(Call.getArgOperand(4))->getZExtValue() < 2,
6844 "isdata argument to llvm.aarch64.prefetch must be 0 or 1", Call);
6845 break;
6846 }
6847 case Intrinsic::aarch64_range_prefetch: {
6848 Check(cast<ConstantInt>(Call.getArgOperand(1))->getZExtValue() < 2,
6849 "write argument to llvm.aarch64.range.prefetch must be 0 or 1", Call);
6850 Check(cast<ConstantInt>(Call.getArgOperand(2))->getZExtValue() < 2,
6851 "stream argument to llvm.aarch64.range.prefetch must be 0 or 1",
6852 Call);
6853 break;
6854 }
6855 case Intrinsic::callbr_landingpad: {
6856 const auto *CBR = dyn_cast<CallBrInst>(Call.getOperand(0));
6857 Check(CBR, "intrinstic requires callbr operand", &Call);
6858 if (!CBR)
6859 break;
6860
6861 const BasicBlock *LandingPadBB = Call.getParent();
6862 const BasicBlock *PredBB = LandingPadBB->getUniquePredecessor();
6863 if (!PredBB) {
6864 CheckFailed("Intrinsic in block must have 1 unique predecessor", &Call);
6865 break;
6866 }
6867 if (!isa<CallBrInst>(PredBB->getTerminator())) {
6868 CheckFailed("Intrinsic must have corresponding callbr in predecessor",
6869 &Call);
6870 break;
6871 }
6872 Check(llvm::is_contained(CBR->getIndirectDests(), LandingPadBB),
6873 "Intrinsic's corresponding callbr must have intrinsic's parent basic "
6874 "block in indirect destination list",
6875 &Call);
6876 const Instruction &First = *LandingPadBB->begin();
6877 Check(&First == &Call, "No other instructions may proceed intrinsic",
6878 &Call);
6879 break;
6880 }
6881 case Intrinsic::amdgcn_cs_chain: {
6882 auto CallerCC = Call.getCaller()->getCallingConv();
6883 switch (CallerCC) {
6884 case CallingConv::AMDGPU_CS:
6885 case CallingConv::AMDGPU_CS_Chain:
6886 case CallingConv::AMDGPU_CS_ChainPreserve:
6887 case CallingConv::AMDGPU_ES:
6888 case CallingConv::AMDGPU_GS:
6889 case CallingConv::AMDGPU_HS:
6890 case CallingConv::AMDGPU_LS:
6891 case CallingConv::AMDGPU_VS:
6892 break;
6893 default:
6894 CheckFailed("Intrinsic cannot be called from functions with this "
6895 "calling convention",
6896 &Call);
6897 break;
6898 }
6899
6900 Check(Call.paramHasAttr(2, Attribute::InReg),
6901 "SGPR arguments must have the `inreg` attribute", &Call);
6902 Check(!Call.paramHasAttr(3, Attribute::InReg),
6903 "VGPR arguments must not have the `inreg` attribute", &Call);
6904
6905 auto *Next = Call.getNextNode();
6906 bool IsAMDUnreachable = Next && isa<IntrinsicInst>(Next) &&
6907 cast<IntrinsicInst>(Next)->getIntrinsicID() ==
6908 Intrinsic::amdgcn_unreachable;
6909 Check(Next && (isa<UnreachableInst>(Next) || IsAMDUnreachable),
6910 "llvm.amdgcn.cs.chain must be followed by unreachable", &Call);
6911 break;
6912 }
6913 case Intrinsic::amdgcn_init_exec_from_input: {
6914 const Argument *Arg = dyn_cast<Argument>(Call.getOperand(0));
6915 Check(Arg && Arg->hasInRegAttr(),
6916 "only inreg arguments to the parent function are valid as inputs to "
6917 "this intrinsic",
6918 &Call);
6919 break;
6920 }
6921 case Intrinsic::amdgcn_set_inactive_chain_arg: {
6922 auto CallerCC = Call.getCaller()->getCallingConv();
6923 switch (CallerCC) {
6924 case CallingConv::AMDGPU_CS_Chain:
6925 case CallingConv::AMDGPU_CS_ChainPreserve:
6926 break;
6927 default:
6928 CheckFailed("Intrinsic can only be used from functions with the "
6929 "amdgpu_cs_chain or amdgpu_cs_chain_preserve "
6930 "calling conventions",
6931 &Call);
6932 break;
6933 }
6934
6935 unsigned InactiveIdx = 1;
6936 Check(!Call.paramHasAttr(InactiveIdx, Attribute::InReg),
6937 "Value for inactive lanes must not have the `inreg` attribute",
6938 &Call);
6939 Check(isa<Argument>(Call.getArgOperand(InactiveIdx)),
6940 "Value for inactive lanes must be a function argument", &Call);
6941 Check(!cast<Argument>(Call.getArgOperand(InactiveIdx))->hasInRegAttr(),
6942 "Value for inactive lanes must be a VGPR function argument", &Call);
6943 break;
6944 }
6945 case Intrinsic::amdgcn_call_whole_wave: {
6947 Check(F, "Indirect whole wave calls are not allowed", &Call);
6948
6949 CallingConv::ID CC = F->getCallingConv();
6950 Check(CC == CallingConv::AMDGPU_Gfx_WholeWave,
6951 "Callee must have the amdgpu_gfx_whole_wave calling convention",
6952 &Call);
6953
6954 Check(!F->isVarArg(), "Variadic whole wave calls are not allowed", &Call);
6955
6956 Check(Call.arg_size() == F->arg_size(),
6957 "Call argument count must match callee argument count", &Call);
6958
6959 // The first argument of the call is the callee, and the first argument of
6960 // the callee is the active mask. The rest of the arguments must match.
6961 Check(F->arg_begin()->getType()->isIntegerTy(1),
6962 "Callee must have i1 as its first argument", &Call);
6963 for (auto [CallArg, FuncArg] :
6964 drop_begin(zip_equal(Call.args(), F->args()))) {
6965 Check(CallArg->getType() == FuncArg.getType(),
6966 "Argument types must match", &Call);
6967
6968 // Check that inreg attributes match between call site and function
6969 Check(Call.paramHasAttr(FuncArg.getArgNo(), Attribute::InReg) ==
6970 FuncArg.hasInRegAttr(),
6971 "Argument inreg attributes must match", &Call);
6972 }
6973 break;
6974 }
6975 case Intrinsic::amdgcn_s_prefetch_data: {
6976 Check(
6979 "llvm.amdgcn.s.prefetch.data only supports global or constant memory");
6980 break;
6981 }
6982 case Intrinsic::amdgcn_mfma_scale_f32_16x16x128_f8f6f4:
6983 case Intrinsic::amdgcn_mfma_scale_f32_32x32x64_f8f6f4: {
6984 Value *Src0 = Call.getArgOperand(0);
6985 Value *Src1 = Call.getArgOperand(1);
6986
6987 uint64_t CBSZ = cast<ConstantInt>(Call.getArgOperand(3))->getZExtValue();
6988 uint64_t BLGP = cast<ConstantInt>(Call.getArgOperand(4))->getZExtValue();
6989 Check(CBSZ <= 4, "invalid value for cbsz format", Call,
6990 Call.getArgOperand(3));
6991 Check(BLGP <= 4, "invalid value for blgp format", Call,
6992 Call.getArgOperand(4));
6993
6994 // AMDGPU::MFMAScaleFormats values
6995 auto getFormatNumRegs = [](unsigned FormatVal) {
6996 switch (FormatVal) {
6997 case 0:
6998 case 1:
6999 return 8u;
7000 case 2:
7001 case 3:
7002 return 6u;
7003 case 4:
7004 return 4u;
7005 default:
7006 llvm_unreachable("invalid format value");
7007 }
7008 };
7009
7010 auto isValidSrcASrcBVector = [](FixedVectorType *Ty) {
7011 if (!Ty || !Ty->getElementType()->isIntegerTy(32))
7012 return false;
7013 unsigned NumElts = Ty->getNumElements();
7014 return NumElts == 4 || NumElts == 6 || NumElts == 8;
7015 };
7016
7017 auto *Src0Ty = dyn_cast<FixedVectorType>(Src0->getType());
7018 auto *Src1Ty = dyn_cast<FixedVectorType>(Src1->getType());
7019 Check(isValidSrcASrcBVector(Src0Ty),
7020 "operand 0 must be 4, 6 or 8 element i32 vector", &Call, Src0);
7021 Check(isValidSrcASrcBVector(Src1Ty),
7022 "operand 1 must be 4, 6 or 8 element i32 vector", &Call, Src1);
7023
7024 // Permit excess registers for the format.
7025 Check(Src0Ty->getNumElements() >= getFormatNumRegs(CBSZ),
7026 "invalid vector type for format", &Call, Src0, Call.getArgOperand(3));
7027 Check(Src1Ty->getNumElements() >= getFormatNumRegs(BLGP),
7028 "invalid vector type for format", &Call, Src1, Call.getArgOperand(5));
7029 break;
7030 }
7031 case Intrinsic::amdgcn_wmma_f32_16x16x128_f8f6f4:
7032 case Intrinsic::amdgcn_wmma_scale_f32_16x16x128_f8f6f4:
7033 case Intrinsic::amdgcn_wmma_scale16_f32_16x16x128_f8f6f4: {
7034 Value *Src0 = Call.getArgOperand(1);
7035 Value *Src1 = Call.getArgOperand(3);
7036
7037 unsigned FmtA = cast<ConstantInt>(Call.getArgOperand(0))->getZExtValue();
7038 unsigned FmtB = cast<ConstantInt>(Call.getArgOperand(2))->getZExtValue();
7039 Check(FmtA <= 4, "invalid value for matrix format", Call,
7040 Call.getArgOperand(0));
7041 Check(FmtB <= 4, "invalid value for matrix format", Call,
7042 Call.getArgOperand(2));
7043
7044 // AMDGPU::MatrixFMT values
7045 auto getFormatNumRegs = [](unsigned FormatVal) {
7046 switch (FormatVal) {
7047 case 0:
7048 case 1:
7049 return 16u;
7050 case 2:
7051 case 3:
7052 return 12u;
7053 case 4:
7054 return 8u;
7055 default:
7056 llvm_unreachable("invalid format value");
7057 }
7058 };
7059
7060 auto isValidSrcASrcBVector = [](FixedVectorType *Ty) {
7061 if (!Ty || !Ty->getElementType()->isIntegerTy(32))
7062 return false;
7063 unsigned NumElts = Ty->getNumElements();
7064 return NumElts == 16 || NumElts == 12 || NumElts == 8;
7065 };
7066
7067 auto *Src0Ty = dyn_cast<FixedVectorType>(Src0->getType());
7068 auto *Src1Ty = dyn_cast<FixedVectorType>(Src1->getType());
7069 Check(isValidSrcASrcBVector(Src0Ty),
7070 "operand 1 must be 8, 12 or 16 element i32 vector", &Call, Src0);
7071 Check(isValidSrcASrcBVector(Src1Ty),
7072 "operand 3 must be 8, 12 or 16 element i32 vector", &Call, Src1);
7073
7074 // Permit excess registers for the format.
7075 Check(Src0Ty->getNumElements() >= getFormatNumRegs(FmtA),
7076 "invalid vector type for format", &Call, Src0, Call.getArgOperand(0));
7077 Check(Src1Ty->getNumElements() >= getFormatNumRegs(FmtB),
7078 "invalid vector type for format", &Call, Src1, Call.getArgOperand(2));
7079 break;
7080 }
7081 case Intrinsic::amdgcn_cooperative_atomic_load_32x4B:
7082 case Intrinsic::amdgcn_cooperative_atomic_load_16x8B:
7083 case Intrinsic::amdgcn_cooperative_atomic_load_8x16B:
7084 case Intrinsic::amdgcn_cooperative_atomic_store_32x4B:
7085 case Intrinsic::amdgcn_cooperative_atomic_store_16x8B:
7086 case Intrinsic::amdgcn_cooperative_atomic_store_8x16B: {
7087 // Check we only use this intrinsic on the FLAT or GLOBAL address spaces.
7088 Value *PtrArg = Call.getArgOperand(0);
7089 const unsigned AS = PtrArg->getType()->getPointerAddressSpace();
7091 "cooperative atomic intrinsics require a generic or global pointer",
7092 &Call, PtrArg);
7093
7094 // Last argument must be a MD string
7096 MDNode *MD = cast<MDNode>(Op->getMetadata());
7097 Check((MD->getNumOperands() == 1) && isa<MDString>(MD->getOperand(0)),
7098 "cooperative atomic intrinsics require that the last argument is a "
7099 "metadata string",
7100 &Call, Op);
7101 break;
7102 }
7103 case Intrinsic::nvvm_setmaxnreg_inc_sync_aligned_u32:
7104 case Intrinsic::nvvm_setmaxnreg_dec_sync_aligned_u32: {
7105 Value *V = Call.getArgOperand(0);
7106 unsigned RegCount = cast<ConstantInt>(V)->getZExtValue();
7107 Check(RegCount % 8 == 0,
7108 "reg_count argument to nvvm.setmaxnreg must be in multiples of 8");
7109 break;
7110 }
7111 case Intrinsic::experimental_convergence_entry:
7112 case Intrinsic::experimental_convergence_anchor:
7113 break;
7114 case Intrinsic::experimental_convergence_loop:
7115 break;
7116 case Intrinsic::ptrmask: {
7117 Type *Ty0 = Call.getArgOperand(0)->getType();
7118 Type *Ty1 = Call.getArgOperand(1)->getType();
7120 "llvm.ptrmask intrinsic first argument must be pointer or vector "
7121 "of pointers",
7122 &Call);
7123 Check(
7124 Ty0->isVectorTy() == Ty1->isVectorTy(),
7125 "llvm.ptrmask intrinsic arguments must be both scalars or both vectors",
7126 &Call);
7127 if (Ty0->isVectorTy())
7128 Check(cast<VectorType>(Ty0)->getElementCount() ==
7129 cast<VectorType>(Ty1)->getElementCount(),
7130 "llvm.ptrmask intrinsic arguments must have the same number of "
7131 "elements",
7132 &Call);
7133 Check(DL.getIndexTypeSizeInBits(Ty0) == Ty1->getScalarSizeInBits(),
7134 "llvm.ptrmask intrinsic second argument bitwidth must match "
7135 "pointer index type size of first argument",
7136 &Call);
7137 break;
7138 }
7139 case Intrinsic::thread_pointer: {
7141 DL.getDefaultGlobalsAddressSpace(),
7142 "llvm.thread.pointer intrinsic return type must be for the globals "
7143 "address space",
7144 &Call);
7145 break;
7146 }
7147 case Intrinsic::threadlocal_address: {
7148 const Value &Arg0 = *Call.getArgOperand(0);
7149 Check(isa<GlobalValue>(Arg0),
7150 "llvm.threadlocal.address first argument must be a GlobalValue");
7151 Check(cast<GlobalValue>(Arg0).isThreadLocal(),
7152 "llvm.threadlocal.address operand isThreadLocal() must be true");
7153 break;
7154 }
7155 case Intrinsic::lifetime_start:
7156 case Intrinsic::lifetime_end: {
7157 Value *Ptr = Call.getArgOperand(0);
7159 "llvm.lifetime.start/end can only be used on alloca or poison",
7160 &Call);
7161 break;
7162 }
7163 };
7164
7165 // Verify that there aren't any unmediated control transfers between funclets.
7167 Function *F = Call.getParent()->getParent();
7168 if (F->hasPersonalityFn() &&
7169 isScopedEHPersonality(classifyEHPersonality(F->getPersonalityFn()))) {
7170 // Run EH funclet coloring on-demand and cache results for other intrinsic
7171 // calls in this function
7172 if (BlockEHFuncletColors.empty())
7173 BlockEHFuncletColors = colorEHFunclets(*F);
7174
7175 // Check for catch-/cleanup-pad in first funclet block
7176 bool InEHFunclet = false;
7177 BasicBlock *CallBB = Call.getParent();
7178 const ColorVector &CV = BlockEHFuncletColors.find(CallBB)->second;
7179 assert(CV.size() > 0 && "Uncolored block");
7180 for (BasicBlock *ColorFirstBB : CV)
7181 if (auto It = ColorFirstBB->getFirstNonPHIIt();
7182 It != ColorFirstBB->end())
7184 InEHFunclet = true;
7185
7186 // Check for funclet operand bundle
7187 bool HasToken = false;
7188 for (unsigned I = 0, E = Call.getNumOperandBundles(); I != E; ++I)
7190 HasToken = true;
7191
7192 // This would cause silent code truncation in WinEHPrepare
7193 if (InEHFunclet)
7194 Check(HasToken, "Missing funclet token on intrinsic call", &Call);
7195 }
7196 }
7197}
7198
7199/// Carefully grab the subprogram from a local scope.
7200///
7201/// This carefully grabs the subprogram from a local scope, avoiding the
7202/// built-in assertions that would typically fire.
7204 if (!LocalScope)
7205 return nullptr;
7206
7207 if (auto *SP = dyn_cast<DISubprogram>(LocalScope))
7208 return SP;
7209
7210 if (auto *LB = dyn_cast<DILexicalBlockBase>(LocalScope))
7211 return getSubprogram(LB->getRawScope());
7212
7213 // Just return null; broken scope chains are checked elsewhere.
7214 assert(!isa<DILocalScope>(LocalScope) && "Unknown type of local scope");
7215 return nullptr;
7216}
7217
7218void Verifier::visit(DbgLabelRecord &DLR) {
7220 "invalid #dbg_label intrinsic variable", &DLR, DLR.getRawLabel());
7221
7222 // Ignore broken !dbg attachments; they're checked elsewhere.
7223 if (MDNode *N = DLR.getDebugLoc().getAsMDNode())
7224 if (!isa<DILocation>(N))
7225 return;
7226
7227 BasicBlock *BB = DLR.getParent();
7228 Function *F = BB ? BB->getParent() : nullptr;
7229
7230 // The scopes for variables and !dbg attachments must agree.
7231 DILabel *Label = DLR.getLabel();
7232 DILocation *Loc = DLR.getDebugLoc();
7233 CheckDI(Loc, "#dbg_label record requires a !dbg attachment", &DLR, BB, F);
7234
7235 DISubprogram *LabelSP = getSubprogram(Label->getRawScope());
7236 DISubprogram *LocSP = getSubprogram(Loc->getRawScope());
7237 if (!LabelSP || !LocSP)
7238 return;
7239
7240 CheckDI(LabelSP == LocSP,
7241 "mismatched subprogram between #dbg_label label and !dbg attachment",
7242 &DLR, BB, F, Label, Label->getScope()->getSubprogram(), Loc,
7243 Loc->getScope()->getSubprogram());
7244}
7245
7246void Verifier::visit(DbgVariableRecord &DVR) {
7247 BasicBlock *BB = DVR.getParent();
7248 Function *F = BB->getParent();
7249
7250 CheckDI(DVR.getType() == DbgVariableRecord::LocationType::Value ||
7251 DVR.getType() == DbgVariableRecord::LocationType::Declare ||
7252 DVR.getType() == DbgVariableRecord::LocationType::DeclareValue ||
7253 DVR.getType() == DbgVariableRecord::LocationType::Assign,
7254 "invalid #dbg record type", &DVR, DVR.getType(), BB, F);
7255
7256 // The location for a DbgVariableRecord must be either a ValueAsMetadata,
7257 // DIArgList, or an empty MDNode (which is a legacy representation for an
7258 // "undef" location).
7259 auto *MD = DVR.getRawLocation();
7260 CheckDI(MD && (isa<ValueAsMetadata>(MD) || isa<DIArgList>(MD) ||
7261 (isa<MDNode>(MD) && !cast<MDNode>(MD)->getNumOperands())),
7262 "invalid #dbg record address/value", &DVR, MD, BB, F);
7263 if (auto *VAM = dyn_cast<ValueAsMetadata>(MD)) {
7264 visitValueAsMetadata(*VAM, F);
7265 if (DVR.isDbgDeclare()) {
7266 // Allow integers here to support inttoptr salvage.
7267 Type *Ty = VAM->getValue()->getType();
7268 CheckDI(Ty->isPointerTy() || Ty->isIntegerTy(),
7269 "location of #dbg_declare must be a pointer or int", &DVR, MD, BB,
7270 F);
7271 }
7272 } else if (auto *AL = dyn_cast<DIArgList>(MD)) {
7273 visitDIArgList(*AL, F);
7274 }
7275
7277 "invalid #dbg record variable", &DVR, DVR.getRawVariable(), BB, F);
7278 visitMDNode(*DVR.getRawVariable(), AreDebugLocsAllowed::No);
7279
7281 "invalid #dbg record expression", &DVR, DVR.getRawExpression(), BB,
7282 F);
7283 visitMDNode(*DVR.getExpression(), AreDebugLocsAllowed::No);
7284
7285 if (DVR.isDbgAssign()) {
7287 "invalid #dbg_assign DIAssignID", &DVR, DVR.getRawAssignID(), BB,
7288 F);
7289 visitMDNode(*cast<DIAssignID>(DVR.getRawAssignID()),
7290 AreDebugLocsAllowed::No);
7291
7292 const auto *RawAddr = DVR.getRawAddress();
7293 // Similarly to the location above, the address for an assign
7294 // DbgVariableRecord must be a ValueAsMetadata or an empty MDNode, which
7295 // represents an undef address.
7296 CheckDI(
7297 isa<ValueAsMetadata>(RawAddr) ||
7298 (isa<MDNode>(RawAddr) && !cast<MDNode>(RawAddr)->getNumOperands()),
7299 "invalid #dbg_assign address", &DVR, DVR.getRawAddress(), BB, F);
7300 if (auto *VAM = dyn_cast<ValueAsMetadata>(RawAddr))
7301 visitValueAsMetadata(*VAM, F);
7302
7304 "invalid #dbg_assign address expression", &DVR,
7305 DVR.getRawAddressExpression(), BB, F);
7306 visitMDNode(*DVR.getAddressExpression(), AreDebugLocsAllowed::No);
7307
7308 // All of the linked instructions should be in the same function as DVR.
7309 for (Instruction *I : at::getAssignmentInsts(&DVR))
7310 CheckDI(DVR.getFunction() == I->getFunction(),
7311 "inst not in same function as #dbg_assign", I, &DVR, BB, F);
7312 }
7313
7314 // This check is redundant with one in visitLocalVariable().
7315 DILocalVariable *Var = DVR.getVariable();
7316 CheckDI(isType(Var->getRawType()), "invalid type ref", Var, Var->getRawType(),
7317 BB, F);
7318
7319 auto *DLNode = DVR.getDebugLoc().getAsMDNode();
7320 CheckDI(isa_and_nonnull<DILocation>(DLNode), "invalid #dbg record DILocation",
7321 &DVR, DLNode, BB, F);
7322 DILocation *Loc = DVR.getDebugLoc();
7323
7324 // The scopes for variables and !dbg attachments must agree.
7325 DISubprogram *VarSP = getSubprogram(Var->getRawScope());
7326 DISubprogram *LocSP = getSubprogram(Loc->getRawScope());
7327 if (!VarSP || !LocSP)
7328 return; // Broken scope chains are checked elsewhere.
7329
7330 CheckDI(VarSP == LocSP,
7331 "mismatched subprogram between #dbg record variable and DILocation",
7332 &DVR, BB, F, Var, Var->getScope()->getSubprogram(), Loc,
7333 Loc->getScope()->getSubprogram(), BB, F);
7334
7335 verifyFnArgs(DVR);
7336}
7337
7338void Verifier::visitVPIntrinsic(VPIntrinsic &VPI) {
7339 if (auto *VPCast = dyn_cast<VPCastIntrinsic>(&VPI)) {
7340 auto *RetTy = cast<VectorType>(VPCast->getType());
7341 auto *ValTy = cast<VectorType>(VPCast->getOperand(0)->getType());
7342 Check(RetTy->getElementCount() == ValTy->getElementCount(),
7343 "VP cast intrinsic first argument and result vector lengths must be "
7344 "equal",
7345 *VPCast);
7346
7347 switch (VPCast->getIntrinsicID()) {
7348 default:
7349 llvm_unreachable("Unknown VP cast intrinsic");
7350 case Intrinsic::vp_trunc:
7351 Check(RetTy->isIntOrIntVectorTy() && ValTy->isIntOrIntVectorTy(),
7352 "llvm.vp.trunc intrinsic first argument and result element type "
7353 "must be integer",
7354 *VPCast);
7355 Check(RetTy->getScalarSizeInBits() < ValTy->getScalarSizeInBits(),
7356 "llvm.vp.trunc intrinsic the bit size of first argument must be "
7357 "larger than the bit size of the return type",
7358 *VPCast);
7359 break;
7360 case Intrinsic::vp_zext:
7361 case Intrinsic::vp_sext:
7362 Check(RetTy->isIntOrIntVectorTy() && ValTy->isIntOrIntVectorTy(),
7363 "llvm.vp.zext or llvm.vp.sext intrinsic first argument and result "
7364 "element type must be integer",
7365 *VPCast);
7366 Check(RetTy->getScalarSizeInBits() > ValTy->getScalarSizeInBits(),
7367 "llvm.vp.zext or llvm.vp.sext intrinsic the bit size of first "
7368 "argument must be smaller than the bit size of the return type",
7369 *VPCast);
7370 break;
7371 case Intrinsic::vp_fptoui:
7372 case Intrinsic::vp_fptosi:
7373 case Intrinsic::vp_lrint:
7374 case Intrinsic::vp_llrint:
7375 Check(
7376 RetTy->isIntOrIntVectorTy() && ValTy->isFPOrFPVectorTy(),
7377 "llvm.vp.fptoui, llvm.vp.fptosi, llvm.vp.lrint or llvm.vp.llrint" "intrinsic first argument element "
7378 "type must be floating-point and result element type must be integer",
7379 *VPCast);
7380 break;
7381 case Intrinsic::vp_uitofp:
7382 case Intrinsic::vp_sitofp:
7383 Check(
7384 RetTy->isFPOrFPVectorTy() && ValTy->isIntOrIntVectorTy(),
7385 "llvm.vp.uitofp or llvm.vp.sitofp intrinsic first argument element "
7386 "type must be integer and result element type must be floating-point",
7387 *VPCast);
7388 break;
7389 case Intrinsic::vp_fptrunc:
7390 Check(RetTy->isFPOrFPVectorTy() && ValTy->isFPOrFPVectorTy(),
7391 "llvm.vp.fptrunc intrinsic first argument and result element type "
7392 "must be floating-point",
7393 *VPCast);
7394 Check(RetTy->getScalarSizeInBits() < ValTy->getScalarSizeInBits(),
7395 "llvm.vp.fptrunc intrinsic the bit size of first argument must be "
7396 "larger than the bit size of the return type",
7397 *VPCast);
7398 break;
7399 case Intrinsic::vp_fpext:
7400 Check(RetTy->isFPOrFPVectorTy() && ValTy->isFPOrFPVectorTy(),
7401 "llvm.vp.fpext intrinsic first argument and result element type "
7402 "must be floating-point",
7403 *VPCast);
7404 Check(RetTy->getScalarSizeInBits() > ValTy->getScalarSizeInBits(),
7405 "llvm.vp.fpext intrinsic the bit size of first argument must be "
7406 "smaller than the bit size of the return type",
7407 *VPCast);
7408 break;
7409 case Intrinsic::vp_ptrtoint:
7410 Check(RetTy->isIntOrIntVectorTy() && ValTy->isPtrOrPtrVectorTy(),
7411 "llvm.vp.ptrtoint intrinsic first argument element type must be "
7412 "pointer and result element type must be integer",
7413 *VPCast);
7414 break;
7415 case Intrinsic::vp_inttoptr:
7416 Check(RetTy->isPtrOrPtrVectorTy() && ValTy->isIntOrIntVectorTy(),
7417 "llvm.vp.inttoptr intrinsic first argument element type must be "
7418 "integer and result element type must be pointer",
7419 *VPCast);
7420 break;
7421 }
7422 }
7423
7424 switch (VPI.getIntrinsicID()) {
7425 case Intrinsic::vp_fcmp: {
7426 auto Pred = cast<VPCmpIntrinsic>(&VPI)->getPredicate();
7428 "invalid predicate for VP FP comparison intrinsic", &VPI);
7429 break;
7430 }
7431 case Intrinsic::vp_icmp: {
7432 auto Pred = cast<VPCmpIntrinsic>(&VPI)->getPredicate();
7434 "invalid predicate for VP integer comparison intrinsic", &VPI);
7435 break;
7436 }
7437 case Intrinsic::vp_is_fpclass: {
7438 auto TestMask = cast<ConstantInt>(VPI.getOperand(1));
7439 Check((TestMask->getZExtValue() & ~static_cast<unsigned>(fcAllFlags)) == 0,
7440 "unsupported bits for llvm.vp.is.fpclass test mask");
7441 break;
7442 }
7443 case Intrinsic::experimental_vp_splice: {
7444 VectorType *VecTy = cast<VectorType>(VPI.getType());
7445 int64_t Idx = cast<ConstantInt>(VPI.getArgOperand(2))->getSExtValue();
7446 int64_t KnownMinNumElements = VecTy->getElementCount().getKnownMinValue();
7447 if (VPI.getParent() && VPI.getParent()->getParent()) {
7448 AttributeList Attrs = VPI.getParent()->getParent()->getAttributes();
7449 if (Attrs.hasFnAttr(Attribute::VScaleRange))
7450 KnownMinNumElements *= Attrs.getFnAttrs().getVScaleRangeMin();
7451 }
7452 Check((Idx < 0 && std::abs(Idx) <= KnownMinNumElements) ||
7453 (Idx >= 0 && Idx < KnownMinNumElements),
7454 "The splice index exceeds the range [-VL, VL-1] where VL is the "
7455 "known minimum number of elements in the vector. For scalable "
7456 "vectors the minimum number of elements is determined from "
7457 "vscale_range.",
7458 &VPI);
7459 break;
7460 }
7461 }
7462}
7463
7464void Verifier::visitConstrainedFPIntrinsic(ConstrainedFPIntrinsic &FPI) {
7465 unsigned NumOperands = FPI.getNonMetadataArgCount();
7466 bool HasRoundingMD =
7468
7469 // Add the expected number of metadata operands.
7470 NumOperands += (1 + HasRoundingMD);
7471
7472 // Compare intrinsics carry an extra predicate metadata operand.
7474 NumOperands += 1;
7475 Check((FPI.arg_size() == NumOperands),
7476 "invalid arguments for constrained FP intrinsic", &FPI);
7477
7478 switch (FPI.getIntrinsicID()) {
7479 case Intrinsic::experimental_constrained_lrint:
7480 case Intrinsic::experimental_constrained_llrint: {
7481 Type *ValTy = FPI.getArgOperand(0)->getType();
7482 Type *ResultTy = FPI.getType();
7483 Check(!ValTy->isVectorTy() && !ResultTy->isVectorTy(),
7484 "Intrinsic does not support vectors", &FPI);
7485 break;
7486 }
7487
7488 case Intrinsic::experimental_constrained_lround:
7489 case Intrinsic::experimental_constrained_llround: {
7490 Type *ValTy = FPI.getArgOperand(0)->getType();
7491 Type *ResultTy = FPI.getType();
7492 Check(!ValTy->isVectorTy() && !ResultTy->isVectorTy(),
7493 "Intrinsic does not support vectors", &FPI);
7494 break;
7495 }
7496
7497 case Intrinsic::experimental_constrained_fcmp:
7498 case Intrinsic::experimental_constrained_fcmps: {
7499 auto Pred = cast<ConstrainedFPCmpIntrinsic>(&FPI)->getPredicate();
7501 "invalid predicate for constrained FP comparison intrinsic", &FPI);
7502 break;
7503 }
7504
7505 case Intrinsic::experimental_constrained_fptosi:
7506 case Intrinsic::experimental_constrained_fptoui: {
7507 Value *Operand = FPI.getArgOperand(0);
7508 ElementCount SrcEC;
7509 Check(Operand->getType()->isFPOrFPVectorTy(),
7510 "Intrinsic first argument must be floating point", &FPI);
7511 if (auto *OperandT = dyn_cast<VectorType>(Operand->getType())) {
7512 SrcEC = cast<VectorType>(OperandT)->getElementCount();
7513 }
7514
7515 Operand = &FPI;
7516 Check(SrcEC.isNonZero() == Operand->getType()->isVectorTy(),
7517 "Intrinsic first argument and result disagree on vector use", &FPI);
7518 Check(Operand->getType()->isIntOrIntVectorTy(),
7519 "Intrinsic result must be an integer", &FPI);
7520 if (auto *OperandT = dyn_cast<VectorType>(Operand->getType())) {
7521 Check(SrcEC == cast<VectorType>(OperandT)->getElementCount(),
7522 "Intrinsic first argument and result vector lengths must be equal",
7523 &FPI);
7524 }
7525 break;
7526 }
7527
7528 case Intrinsic::experimental_constrained_sitofp:
7529 case Intrinsic::experimental_constrained_uitofp: {
7530 Value *Operand = FPI.getArgOperand(0);
7531 ElementCount SrcEC;
7532 Check(Operand->getType()->isIntOrIntVectorTy(),
7533 "Intrinsic first argument must be integer", &FPI);
7534 if (auto *OperandT = dyn_cast<VectorType>(Operand->getType())) {
7535 SrcEC = cast<VectorType>(OperandT)->getElementCount();
7536 }
7537
7538 Operand = &FPI;
7539 Check(SrcEC.isNonZero() == Operand->getType()->isVectorTy(),
7540 "Intrinsic first argument and result disagree on vector use", &FPI);
7541 Check(Operand->getType()->isFPOrFPVectorTy(),
7542 "Intrinsic result must be a floating point", &FPI);
7543 if (auto *OperandT = dyn_cast<VectorType>(Operand->getType())) {
7544 Check(SrcEC == cast<VectorType>(OperandT)->getElementCount(),
7545 "Intrinsic first argument and result vector lengths must be equal",
7546 &FPI);
7547 }
7548 break;
7549 }
7550
7551 case Intrinsic::experimental_constrained_fptrunc:
7552 case Intrinsic::experimental_constrained_fpext: {
7553 Value *Operand = FPI.getArgOperand(0);
7554 Type *OperandTy = Operand->getType();
7555 Value *Result = &FPI;
7556 Type *ResultTy = Result->getType();
7557 Check(OperandTy->isFPOrFPVectorTy(),
7558 "Intrinsic first argument must be FP or FP vector", &FPI);
7559 Check(ResultTy->isFPOrFPVectorTy(),
7560 "Intrinsic result must be FP or FP vector", &FPI);
7561 Check(OperandTy->isVectorTy() == ResultTy->isVectorTy(),
7562 "Intrinsic first argument and result disagree on vector use", &FPI);
7563 if (OperandTy->isVectorTy()) {
7564 Check(cast<VectorType>(OperandTy)->getElementCount() ==
7565 cast<VectorType>(ResultTy)->getElementCount(),
7566 "Intrinsic first argument and result vector lengths must be equal",
7567 &FPI);
7568 }
7569 if (FPI.getIntrinsicID() == Intrinsic::experimental_constrained_fptrunc) {
7570 Check(OperandTy->getScalarSizeInBits() > ResultTy->getScalarSizeInBits(),
7571 "Intrinsic first argument's type must be larger than result type",
7572 &FPI);
7573 } else {
7574 Check(OperandTy->getScalarSizeInBits() < ResultTy->getScalarSizeInBits(),
7575 "Intrinsic first argument's type must be smaller than result type",
7576 &FPI);
7577 }
7578 break;
7579 }
7580
7581 default:
7582 break;
7583 }
7584
7585 // If a non-metadata argument is passed in a metadata slot then the
7586 // error will be caught earlier when the incorrect argument doesn't
7587 // match the specification in the intrinsic call table. Thus, no
7588 // argument type check is needed here.
7589
7590 Check(FPI.getExceptionBehavior().has_value(),
7591 "invalid exception behavior argument", &FPI);
7592 if (HasRoundingMD) {
7593 Check(FPI.getRoundingMode().has_value(), "invalid rounding mode argument",
7594 &FPI);
7595 }
7596}
7597
7598void Verifier::verifyFragmentExpression(const DbgVariableRecord &DVR) {
7599 DILocalVariable *V = dyn_cast_or_null<DILocalVariable>(DVR.getRawVariable());
7600 DIExpression *E = dyn_cast_or_null<DIExpression>(DVR.getRawExpression());
7601
7602 // We don't know whether this intrinsic verified correctly.
7603 if (!V || !E || !E->isValid())
7604 return;
7605
7606 // Nothing to do if this isn't a DW_OP_LLVM_fragment expression.
7607 auto Fragment = E->getFragmentInfo();
7608 if (!Fragment)
7609 return;
7610
7611 // The frontend helps out GDB by emitting the members of local anonymous
7612 // unions as artificial local variables with shared storage. When SROA splits
7613 // the storage for artificial local variables that are smaller than the entire
7614 // union, the overhang piece will be outside of the allotted space for the
7615 // variable and this check fails.
7616 // FIXME: Remove this check as soon as clang stops doing this; it hides bugs.
7617 if (V->isArtificial())
7618 return;
7619
7620 verifyFragmentExpression(*V, *Fragment, &DVR);
7621}
7622
7623template <typename ValueOrMetadata>
7624void Verifier::verifyFragmentExpression(const DIVariable &V,
7626 ValueOrMetadata *Desc) {
7627 // If there's no size, the type is broken, but that should be checked
7628 // elsewhere.
7629 auto VarSize = V.getSizeInBits();
7630 if (!VarSize)
7631 return;
7632
7633 unsigned FragSize = Fragment.SizeInBits;
7634 unsigned FragOffset = Fragment.OffsetInBits;
7635 CheckDI(FragSize + FragOffset <= *VarSize,
7636 "fragment is larger than or outside of variable", Desc, &V);
7637 CheckDI(FragSize != *VarSize, "fragment covers entire variable", Desc, &V);
7638}
7639
7640void Verifier::verifyFnArgs(const DbgVariableRecord &DVR) {
7641 // This function does not take the scope of noninlined function arguments into
7642 // account. Don't run it if current function is nodebug, because it may
7643 // contain inlined debug intrinsics.
7644 if (!HasDebugInfo)
7645 return;
7646
7647 // For performance reasons only check non-inlined ones.
7648 if (DVR.getDebugLoc()->getInlinedAt())
7649 return;
7650
7651 DILocalVariable *Var = DVR.getVariable();
7652 CheckDI(Var, "#dbg record without variable");
7653
7654 unsigned ArgNo = Var->getArg();
7655 if (!ArgNo)
7656 return;
7657
7658 // Verify there are no duplicate function argument debug info entries.
7659 // These will cause hard-to-debug assertions in the DWARF backend.
7660 if (DebugFnArgs.size() < ArgNo)
7661 DebugFnArgs.resize(ArgNo, nullptr);
7662
7663 auto *Prev = DebugFnArgs[ArgNo - 1];
7664 DebugFnArgs[ArgNo - 1] = Var;
7665 CheckDI(!Prev || (Prev == Var), "conflicting debug info for argument", &DVR,
7666 Prev, Var);
7667}
7668
7669void Verifier::verifyNotEntryValue(const DbgVariableRecord &DVR) {
7670 DIExpression *E = dyn_cast_or_null<DIExpression>(DVR.getRawExpression());
7671
7672 // We don't know whether this intrinsic verified correctly.
7673 if (!E || !E->isValid())
7674 return;
7675
7677 Value *VarValue = DVR.getVariableLocationOp(0);
7678 if (isa<UndefValue>(VarValue) || isa<PoisonValue>(VarValue))
7679 return;
7680 // We allow EntryValues for swift async arguments, as they have an
7681 // ABI-guarantee to be turned into a specific register.
7682 if (auto *ArgLoc = dyn_cast_or_null<Argument>(VarValue);
7683 ArgLoc && ArgLoc->hasAttribute(Attribute::SwiftAsync))
7684 return;
7685 }
7686
7687 CheckDI(!E->isEntryValue(),
7688 "Entry values are only allowed in MIR unless they target a "
7689 "swiftasync Argument",
7690 &DVR);
7691}
7692
7693void Verifier::verifyCompileUnits() {
7694 // When more than one Module is imported into the same context, such as during
7695 // an LTO build before linking the modules, ODR type uniquing may cause types
7696 // to point to a different CU. This check does not make sense in this case.
7697 if (M.getContext().isODRUniquingDebugTypes())
7698 return;
7699 auto *CUs = M.getNamedMetadata("llvm.dbg.cu");
7700 SmallPtrSet<const Metadata *, 2> Listed;
7701 if (CUs)
7702 Listed.insert_range(CUs->operands());
7703 for (const auto *CU : CUVisited)
7704 CheckDI(Listed.count(CU), "DICompileUnit not listed in llvm.dbg.cu", CU);
7705 CUVisited.clear();
7706}
7707
7708void Verifier::verifyDeoptimizeCallingConvs() {
7709 if (DeoptimizeDeclarations.empty())
7710 return;
7711
7712 const Function *First = DeoptimizeDeclarations[0];
7713 for (const auto *F : ArrayRef(DeoptimizeDeclarations).slice(1)) {
7714 Check(First->getCallingConv() == F->getCallingConv(),
7715 "All llvm.experimental.deoptimize declarations must have the same "
7716 "calling convention",
7717 First, F);
7718 }
7719}
7720
7721void Verifier::verifyAttachedCallBundle(const CallBase &Call,
7722 const OperandBundleUse &BU) {
7723 FunctionType *FTy = Call.getFunctionType();
7724
7725 Check((FTy->getReturnType()->isPointerTy() ||
7726 (Call.doesNotReturn() && FTy->getReturnType()->isVoidTy())),
7727 "a call with operand bundle \"clang.arc.attachedcall\" must call a "
7728 "function returning a pointer or a non-returning function that has a "
7729 "void return type",
7730 Call);
7731
7732 Check(BU.Inputs.size() == 1 && isa<Function>(BU.Inputs.front()),
7733 "operand bundle \"clang.arc.attachedcall\" requires one function as "
7734 "an argument",
7735 Call);
7736
7737 auto *Fn = cast<Function>(BU.Inputs.front());
7738 Intrinsic::ID IID = Fn->getIntrinsicID();
7739
7740 if (IID) {
7741 Check((IID == Intrinsic::objc_retainAutoreleasedReturnValue ||
7742 IID == Intrinsic::objc_claimAutoreleasedReturnValue ||
7743 IID == Intrinsic::objc_unsafeClaimAutoreleasedReturnValue),
7744 "invalid function argument", Call);
7745 } else {
7746 StringRef FnName = Fn->getName();
7747 Check((FnName == "objc_retainAutoreleasedReturnValue" ||
7748 FnName == "objc_claimAutoreleasedReturnValue" ||
7749 FnName == "objc_unsafeClaimAutoreleasedReturnValue"),
7750 "invalid function argument", Call);
7751 }
7752}
7753
7754void Verifier::verifyNoAliasScopeDecl() {
7755 if (NoAliasScopeDecls.empty())
7756 return;
7757
7758 // only a single scope must be declared at a time.
7759 for (auto *II : NoAliasScopeDecls) {
7760 assert(II->getIntrinsicID() == Intrinsic::experimental_noalias_scope_decl &&
7761 "Not a llvm.experimental.noalias.scope.decl ?");
7762 const auto *ScopeListMV = dyn_cast<MetadataAsValue>(
7764 Check(ScopeListMV != nullptr,
7765 "llvm.experimental.noalias.scope.decl must have a MetadataAsValue "
7766 "argument",
7767 II);
7768
7769 const auto *ScopeListMD = dyn_cast<MDNode>(ScopeListMV->getMetadata());
7770 Check(ScopeListMD != nullptr, "!id.scope.list must point to an MDNode", II);
7771 Check(ScopeListMD->getNumOperands() == 1,
7772 "!id.scope.list must point to a list with a single scope", II);
7773 visitAliasScopeListMetadata(ScopeListMD);
7774 }
7775
7776 // Only check the domination rule when requested. Once all passes have been
7777 // adapted this option can go away.
7779 return;
7780
7781 // Now sort the intrinsics based on the scope MDNode so that declarations of
7782 // the same scopes are next to each other.
7783 auto GetScope = [](IntrinsicInst *II) {
7784 const auto *ScopeListMV = cast<MetadataAsValue>(
7786 return &cast<MDNode>(ScopeListMV->getMetadata())->getOperand(0);
7787 };
7788
7789 // We are sorting on MDNode pointers here. For valid input IR this is ok.
7790 // TODO: Sort on Metadata ID to avoid non-deterministic error messages.
7791 auto Compare = [GetScope](IntrinsicInst *Lhs, IntrinsicInst *Rhs) {
7792 return GetScope(Lhs) < GetScope(Rhs);
7793 };
7794
7795 llvm::sort(NoAliasScopeDecls, Compare);
7796
7797 // Go over the intrinsics and check that for the same scope, they are not
7798 // dominating each other.
7799 auto ItCurrent = NoAliasScopeDecls.begin();
7800 while (ItCurrent != NoAliasScopeDecls.end()) {
7801 auto CurScope = GetScope(*ItCurrent);
7802 auto ItNext = ItCurrent;
7803 do {
7804 ++ItNext;
7805 } while (ItNext != NoAliasScopeDecls.end() &&
7806 GetScope(*ItNext) == CurScope);
7807
7808 // [ItCurrent, ItNext) represents the declarations for the same scope.
7809 // Ensure they are not dominating each other.. but only if it is not too
7810 // expensive.
7811 if (ItNext - ItCurrent < 32)
7812 for (auto *I : llvm::make_range(ItCurrent, ItNext))
7813 for (auto *J : llvm::make_range(ItCurrent, ItNext))
7814 if (I != J)
7815 Check(!DT.dominates(I, J),
7816 "llvm.experimental.noalias.scope.decl dominates another one "
7817 "with the same scope",
7818 I);
7819 ItCurrent = ItNext;
7820 }
7821}
7822
7823//===----------------------------------------------------------------------===//
7824// Implement the public interfaces to this file...
7825//===----------------------------------------------------------------------===//
7826
7828 Function &F = const_cast<Function &>(f);
7829
7830 // Don't use a raw_null_ostream. Printing IR is expensive.
7831 Verifier V(OS, /*ShouldTreatBrokenDebugInfoAsError=*/true, *f.getParent());
7832
7833 // Note that this function's return value is inverted from what you would
7834 // expect of a function called "verify".
7835 return !V.verify(F);
7836}
7837
7839 bool *BrokenDebugInfo) {
7840 // Don't use a raw_null_ostream. Printing IR is expensive.
7841 Verifier V(OS, /*ShouldTreatBrokenDebugInfoAsError=*/!BrokenDebugInfo, M);
7842
7843 bool Broken = false;
7844 for (const Function &F : M)
7845 Broken |= !V.verify(F);
7846
7847 Broken |= !V.verify();
7848 if (BrokenDebugInfo)
7849 *BrokenDebugInfo = V.hasBrokenDebugInfo();
7850 // Note that this function's return value is inverted from what you would
7851 // expect of a function called "verify".
7852 return Broken;
7853}
7854
7855namespace {
7856
7857struct VerifierLegacyPass : public FunctionPass {
7858 static char ID;
7859
7860 std::unique_ptr<Verifier> V;
7861 bool FatalErrors = true;
7862
7863 VerifierLegacyPass() : FunctionPass(ID) {
7865 }
7866 explicit VerifierLegacyPass(bool FatalErrors)
7867 : FunctionPass(ID),
7868 FatalErrors(FatalErrors) {
7870 }
7871
7872 bool doInitialization(Module &M) override {
7873 V = std::make_unique<Verifier>(
7874 &dbgs(), /*ShouldTreatBrokenDebugInfoAsError=*/false, M);
7875 return false;
7876 }
7877
7878 bool runOnFunction(Function &F) override {
7879 if (!V->verify(F) && FatalErrors) {
7880 errs() << "in function " << F.getName() << '\n';
7881 report_fatal_error("Broken function found, compilation aborted!");
7882 }
7883 return false;
7884 }
7885
7886 bool doFinalization(Module &M) override {
7887 bool HasErrors = false;
7888 for (Function &F : M)
7889 if (F.isDeclaration())
7890 HasErrors |= !V->verify(F);
7891
7892 HasErrors |= !V->verify();
7893 if (FatalErrors && (HasErrors || V->hasBrokenDebugInfo()))
7894 report_fatal_error("Broken module found, compilation aborted!");
7895 return false;
7896 }
7897
7898 void getAnalysisUsage(AnalysisUsage &AU) const override {
7899 AU.setPreservesAll();
7900 }
7901};
7902
7903} // end anonymous namespace
7904
7905/// Helper to issue failure from the TBAA verification
7906template <typename... Tys> void TBAAVerifier::CheckFailed(Tys &&... Args) {
7907 if (Diagnostic)
7908 return Diagnostic->CheckFailed(Args...);
7909}
7910
7911#define CheckTBAA(C, ...) \
7912 do { \
7913 if (!(C)) { \
7914 CheckFailed(__VA_ARGS__); \
7915 return false; \
7916 } \
7917 } while (false)
7918
7919/// Verify that \p BaseNode can be used as the "base type" in the struct-path
7920/// TBAA scheme. This means \p BaseNode is either a scalar node, or a
7921/// struct-type node describing an aggregate data structure (like a struct).
7922TBAAVerifier::TBAABaseNodeSummary
7923TBAAVerifier::verifyTBAABaseNode(const Instruction *I, const MDNode *BaseNode,
7924 bool IsNewFormat) {
7925 if (BaseNode->getNumOperands() < 2) {
7926 CheckFailed("Base nodes must have at least two operands", I, BaseNode);
7927 return {true, ~0u};
7928 }
7929
7930 auto Itr = TBAABaseNodes.find(BaseNode);
7931 if (Itr != TBAABaseNodes.end())
7932 return Itr->second;
7933
7934 auto Result = verifyTBAABaseNodeImpl(I, BaseNode, IsNewFormat);
7935 auto InsertResult = TBAABaseNodes.insert({BaseNode, Result});
7936 (void)InsertResult;
7937 assert(InsertResult.second && "We just checked!");
7938 return Result;
7939}
7940
7941TBAAVerifier::TBAABaseNodeSummary
7942TBAAVerifier::verifyTBAABaseNodeImpl(const Instruction *I,
7943 const MDNode *BaseNode, bool IsNewFormat) {
7944 const TBAAVerifier::TBAABaseNodeSummary InvalidNode = {true, ~0u};
7945
7946 if (BaseNode->getNumOperands() == 2) {
7947 // Scalar nodes can only be accessed at offset 0.
7948 return isValidScalarTBAANode(BaseNode)
7949 ? TBAAVerifier::TBAABaseNodeSummary({false, 0})
7950 : InvalidNode;
7951 }
7952
7953 if (IsNewFormat) {
7954 if (BaseNode->getNumOperands() % 3 != 0) {
7955 CheckFailed("Access tag nodes must have the number of operands that is a "
7956 "multiple of 3!", BaseNode);
7957 return InvalidNode;
7958 }
7959 } else {
7960 if (BaseNode->getNumOperands() % 2 != 1) {
7961 CheckFailed("Struct tag nodes must have an odd number of operands!",
7962 BaseNode);
7963 return InvalidNode;
7964 }
7965 }
7966
7967 // Check the type size field.
7968 if (IsNewFormat) {
7969 auto *TypeSizeNode = mdconst::dyn_extract_or_null<ConstantInt>(
7970 BaseNode->getOperand(1));
7971 if (!TypeSizeNode) {
7972 CheckFailed("Type size nodes must be constants!", I, BaseNode);
7973 return InvalidNode;
7974 }
7975 }
7976
7977 // Check the type name field. In the new format it can be anything.
7978 if (!IsNewFormat && !isa<MDString>(BaseNode->getOperand(0))) {
7979 CheckFailed("Struct tag nodes have a string as their first operand",
7980 BaseNode);
7981 return InvalidNode;
7982 }
7983
7984 bool Failed = false;
7985
7986 std::optional<APInt> PrevOffset;
7987 unsigned BitWidth = ~0u;
7988
7989 // We've already checked that BaseNode is not a degenerate root node with one
7990 // operand in \c verifyTBAABaseNode, so this loop should run at least once.
7991 unsigned FirstFieldOpNo = IsNewFormat ? 3 : 1;
7992 unsigned NumOpsPerField = IsNewFormat ? 3 : 2;
7993 for (unsigned Idx = FirstFieldOpNo; Idx < BaseNode->getNumOperands();
7994 Idx += NumOpsPerField) {
7995 const MDOperand &FieldTy = BaseNode->getOperand(Idx);
7996 const MDOperand &FieldOffset = BaseNode->getOperand(Idx + 1);
7997 if (!isa<MDNode>(FieldTy)) {
7998 CheckFailed("Incorrect field entry in struct type node!", I, BaseNode);
7999 Failed = true;
8000 continue;
8001 }
8002
8003 auto *OffsetEntryCI =
8005 if (!OffsetEntryCI) {
8006 CheckFailed("Offset entries must be constants!", I, BaseNode);
8007 Failed = true;
8008 continue;
8009 }
8010
8011 if (BitWidth == ~0u)
8012 BitWidth = OffsetEntryCI->getBitWidth();
8013
8014 if (OffsetEntryCI->getBitWidth() != BitWidth) {
8015 CheckFailed(
8016 "Bitwidth between the offsets and struct type entries must match", I,
8017 BaseNode);
8018 Failed = true;
8019 continue;
8020 }
8021
8022 // NB! As far as I can tell, we generate a non-strictly increasing offset
8023 // sequence only from structs that have zero size bit fields. When
8024 // recursing into a contained struct in \c getFieldNodeFromTBAABaseNode we
8025 // pick the field lexically the latest in struct type metadata node. This
8026 // mirrors the actual behavior of the alias analysis implementation.
8027 bool IsAscending =
8028 !PrevOffset || PrevOffset->ule(OffsetEntryCI->getValue());
8029
8030 if (!IsAscending) {
8031 CheckFailed("Offsets must be increasing!", I, BaseNode);
8032 Failed = true;
8033 }
8034
8035 PrevOffset = OffsetEntryCI->getValue();
8036
8037 if (IsNewFormat) {
8038 auto *MemberSizeNode = mdconst::dyn_extract_or_null<ConstantInt>(
8039 BaseNode->getOperand(Idx + 2));
8040 if (!MemberSizeNode) {
8041 CheckFailed("Member size entries must be constants!", I, BaseNode);
8042 Failed = true;
8043 continue;
8044 }
8045 }
8046 }
8047
8048 return Failed ? InvalidNode
8049 : TBAAVerifier::TBAABaseNodeSummary(false, BitWidth);
8050}
8051
8052static bool IsRootTBAANode(const MDNode *MD) {
8053 return MD->getNumOperands() < 2;
8054}
8055
8056static bool IsScalarTBAANodeImpl(const MDNode *MD,
8058 if (MD->getNumOperands() != 2 && MD->getNumOperands() != 3)
8059 return false;
8060
8061 if (!isa<MDString>(MD->getOperand(0)))
8062 return false;
8063
8064 if (MD->getNumOperands() == 3) {
8066 if (!(Offset && Offset->isZero() && isa<MDString>(MD->getOperand(0))))
8067 return false;
8068 }
8069
8070 auto *Parent = dyn_cast_or_null<MDNode>(MD->getOperand(1));
8071 return Parent && Visited.insert(Parent).second &&
8072 (IsRootTBAANode(Parent) || IsScalarTBAANodeImpl(Parent, Visited));
8073}
8074
8075bool TBAAVerifier::isValidScalarTBAANode(const MDNode *MD) {
8076 auto ResultIt = TBAAScalarNodes.find(MD);
8077 if (ResultIt != TBAAScalarNodes.end())
8078 return ResultIt->second;
8079
8080 SmallPtrSet<const MDNode *, 4> Visited;
8081 bool Result = IsScalarTBAANodeImpl(MD, Visited);
8082 auto InsertResult = TBAAScalarNodes.insert({MD, Result});
8083 (void)InsertResult;
8084 assert(InsertResult.second && "Just checked!");
8085
8086 return Result;
8087}
8088
8089/// Returns the field node at the offset \p Offset in \p BaseNode. Update \p
8090/// Offset in place to be the offset within the field node returned.
8091///
8092/// We assume we've okayed \p BaseNode via \c verifyTBAABaseNode.
8093MDNode *TBAAVerifier::getFieldNodeFromTBAABaseNode(const Instruction *I,
8094 const MDNode *BaseNode,
8095 APInt &Offset,
8096 bool IsNewFormat) {
8097 assert(BaseNode->getNumOperands() >= 2 && "Invalid base node!");
8098
8099 // Scalar nodes have only one possible "field" -- their parent in the access
8100 // hierarchy. Offset must be zero at this point, but our caller is supposed
8101 // to check that.
8102 if (BaseNode->getNumOperands() == 2)
8103 return cast<MDNode>(BaseNode->getOperand(1));
8104
8105 unsigned FirstFieldOpNo = IsNewFormat ? 3 : 1;
8106 unsigned NumOpsPerField = IsNewFormat ? 3 : 2;
8107 for (unsigned Idx = FirstFieldOpNo; Idx < BaseNode->getNumOperands();
8108 Idx += NumOpsPerField) {
8109 auto *OffsetEntryCI =
8110 mdconst::extract<ConstantInt>(BaseNode->getOperand(Idx + 1));
8111 if (OffsetEntryCI->getValue().ugt(Offset)) {
8112 if (Idx == FirstFieldOpNo) {
8113 CheckFailed("Could not find TBAA parent in struct type node", I,
8114 BaseNode, &Offset);
8115 return nullptr;
8116 }
8117
8118 unsigned PrevIdx = Idx - NumOpsPerField;
8119 auto *PrevOffsetEntryCI =
8120 mdconst::extract<ConstantInt>(BaseNode->getOperand(PrevIdx + 1));
8121 Offset -= PrevOffsetEntryCI->getValue();
8122 return cast<MDNode>(BaseNode->getOperand(PrevIdx));
8123 }
8124 }
8125
8126 unsigned LastIdx = BaseNode->getNumOperands() - NumOpsPerField;
8127 auto *LastOffsetEntryCI = mdconst::extract<ConstantInt>(
8128 BaseNode->getOperand(LastIdx + 1));
8129 Offset -= LastOffsetEntryCI->getValue();
8130 return cast<MDNode>(BaseNode->getOperand(LastIdx));
8131}
8132
8134 if (!Type || Type->getNumOperands() < 3)
8135 return false;
8136
8137 // In the new format type nodes shall have a reference to the parent type as
8138 // its first operand.
8139 return isa_and_nonnull<MDNode>(Type->getOperand(0));
8140}
8141
8143 CheckTBAA(MD->getNumOperands() > 0, "TBAA metadata cannot have 0 operands", I,
8144 MD);
8145
8146 if (I)
8150 "This instruction shall not have a TBAA access tag!", I);
8151
8152 bool IsStructPathTBAA =
8153 isa<MDNode>(MD->getOperand(0)) && MD->getNumOperands() >= 3;
8154
8155 CheckTBAA(IsStructPathTBAA,
8156 "Old-style TBAA is no longer allowed, use struct-path TBAA instead",
8157 I);
8158
8159 MDNode *BaseNode = dyn_cast_or_null<MDNode>(MD->getOperand(0));
8160 MDNode *AccessType = dyn_cast_or_null<MDNode>(MD->getOperand(1));
8161
8162 bool IsNewFormat = isNewFormatTBAATypeNode(AccessType);
8163
8164 if (IsNewFormat) {
8165 CheckTBAA(MD->getNumOperands() == 4 || MD->getNumOperands() == 5,
8166 "Access tag metadata must have either 4 or 5 operands", I, MD);
8167 } else {
8168 CheckTBAA(MD->getNumOperands() < 5,
8169 "Struct tag metadata must have either 3 or 4 operands", I, MD);
8170 }
8171
8172 // Check the access size field.
8173 if (IsNewFormat) {
8174 auto *AccessSizeNode = mdconst::dyn_extract_or_null<ConstantInt>(
8175 MD->getOperand(3));
8176 CheckTBAA(AccessSizeNode, "Access size field must be a constant", I, MD);
8177 }
8178
8179 // Check the immutability flag.
8180 unsigned ImmutabilityFlagOpNo = IsNewFormat ? 4 : 3;
8181 if (MD->getNumOperands() == ImmutabilityFlagOpNo + 1) {
8182 auto *IsImmutableCI = mdconst::dyn_extract_or_null<ConstantInt>(
8183 MD->getOperand(ImmutabilityFlagOpNo));
8184 CheckTBAA(IsImmutableCI,
8185 "Immutability tag on struct tag metadata must be a constant", I,
8186 MD);
8187 CheckTBAA(
8188 IsImmutableCI->isZero() || IsImmutableCI->isOne(),
8189 "Immutability part of the struct tag metadata must be either 0 or 1", I,
8190 MD);
8191 }
8192
8193 CheckTBAA(BaseNode && AccessType,
8194 "Malformed struct tag metadata: base and access-type "
8195 "should be non-null and point to Metadata nodes",
8196 I, MD, BaseNode, AccessType);
8197
8198 if (!IsNewFormat) {
8199 CheckTBAA(isValidScalarTBAANode(AccessType),
8200 "Access type node must be a valid scalar type", I, MD,
8201 AccessType);
8202 }
8203
8205 CheckTBAA(OffsetCI, "Offset must be constant integer", I, MD);
8206
8207 APInt Offset = OffsetCI->getValue();
8208 bool SeenAccessTypeInPath = false;
8209
8210 SmallPtrSet<MDNode *, 4> StructPath;
8211
8212 for (/* empty */; BaseNode && !IsRootTBAANode(BaseNode);
8213 BaseNode =
8214 getFieldNodeFromTBAABaseNode(I, BaseNode, Offset, IsNewFormat)) {
8215 if (!StructPath.insert(BaseNode).second) {
8216 CheckFailed("Cycle detected in struct path", I, MD);
8217 return false;
8218 }
8219
8220 bool Invalid;
8221 unsigned BaseNodeBitWidth;
8222 std::tie(Invalid, BaseNodeBitWidth) =
8223 verifyTBAABaseNode(I, BaseNode, IsNewFormat);
8224
8225 // If the base node is invalid in itself, then we've already printed all the
8226 // errors we wanted to print.
8227 if (Invalid)
8228 return false;
8229
8230 SeenAccessTypeInPath |= BaseNode == AccessType;
8231
8232 if (isValidScalarTBAANode(BaseNode) || BaseNode == AccessType)
8233 CheckTBAA(Offset == 0, "Offset not zero at the point of scalar access", I,
8234 MD, &Offset);
8235
8236 CheckTBAA(BaseNodeBitWidth == Offset.getBitWidth() ||
8237 (BaseNodeBitWidth == 0 && Offset == 0) ||
8238 (IsNewFormat && BaseNodeBitWidth == ~0u),
8239 "Access bit-width not the same as description bit-width", I, MD,
8240 BaseNodeBitWidth, Offset.getBitWidth());
8241
8242 if (IsNewFormat && SeenAccessTypeInPath)
8243 break;
8244 }
8245
8246 CheckTBAA(SeenAccessTypeInPath, "Did not see access type in access path!", I,
8247 MD);
8248 return true;
8249}
8250
8251char VerifierLegacyPass::ID = 0;
8252INITIALIZE_PASS(VerifierLegacyPass, "verify", "Module Verifier", false, false)
8253
8255 return new VerifierLegacyPass(FatalErrors);
8256}
8257
8258AnalysisKey VerifierAnalysis::Key;
8265
8270
8272 auto Res = AM.getResult<VerifierAnalysis>(M);
8273 if (FatalErrors && (Res.IRBroken || Res.DebugInfoBroken))
8274 report_fatal_error("Broken module found, compilation aborted!");
8275
8276 return PreservedAnalyses::all();
8277}
8278
8280 auto res = AM.getResult<VerifierAnalysis>(F);
8281 if (res.IRBroken && FatalErrors)
8282 report_fatal_error("Broken function found, compilation aborted!");
8283
8284 return PreservedAnalyses::all();
8285}
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
AMDGPU address space definition.
ArrayRef< TableEntry > TableRef
This file declares a class to represent arbitrary precision floating point values and provide a varie...
This file implements a class to represent arbitrary precision integral constant values and operations...
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
Atomic ordering constants.
@ RetAttr
@ FnAttr
This file contains the simple types necessary to represent the attributes associated with functions a...
static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")
static GCRegistry::Add< StatepointGC > D("statepoint-example", "an example strategy for statepoint")
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
Analysis containing CSE Info
Definition CSEInfo.cpp:27
This file contains the declarations for the subclasses of Constant, which represent the different fla...
This file declares the LLVM IR specialization of the GenericConvergenceVerifier template.
static DISubprogram * getSubprogram(bool IsDistinct, Ts &&...Args)
dxil translate DXIL Translate Metadata
This file defines the DenseMap class.
This file contains constants used for implementing Dwarf debug support.
static bool runOnFunction(Function &F, bool PostInlining)
This file contains the declarations of entities that describe floating point environment and related ...
#define Check(C,...)
Hexagon Common GEP
This file provides various utilities for inspecting and working with the control flow graph in LLVM I...
Module.h This file contains the declarations for the Module class.
This header defines various interfaces for pass management in LLVM.
This defines the Use class.
const size_t AbstractManglingParser< Derived, Alloc >::NumOps
#define F(x, y, z)
Definition MD5.cpp:54
#define I(x, y, z)
Definition MD5.cpp:57
Machine Check Debug Module
This file implements a map that provides insertion order iteration.
This file provides utility for Memory Model Relaxation Annotations (MMRAs).
static bool isContiguous(const ConstantRange &A, const ConstantRange &B)
This file contains the declarations for metadata subclasses.
#define T
#define T1
ConstantRange Range(APInt(BitWidth, Low), APInt(BitWidth, High))
uint64_t High
uint64_t IntrinsicInst * II
#define P(N)
ppc ctr loops verify
#define INITIALIZE_PASS(passName, arg, name, cfg, analysis)
Definition PassSupport.h:56
This file contains the declarations for profiling metadata utility functions.
const SmallVectorImpl< MachineOperand > & Cond
static bool isValid(const char C)
Returns true if C is a valid mangled character: <0-9a-zA-Z_>.
static unsigned getNumElements(Type *Ty)
void visit(MachineFunction &MF, MachineBasicBlock &Start, std::function< void(MachineBasicBlock *)> op)
This file contains some templates that are useful if you are working with the STL at all.
verify safepoint Safepoint IR Verifier
BaseType
A given derived pointer can have multiple base pointers through phi/selects.
This file defines the SmallPtrSet class.
This file defines the SmallVector class.
This file contains some functions that are useful when dealing with strings.
static unsigned getBitWidth(Type *Ty, const DataLayout &DL)
Returns the bitwidth of the given scalar or pointer type.
static bool IsScalarTBAANodeImpl(const MDNode *MD, SmallPtrSetImpl< const MDNode * > &Visited)
static bool isType(const Metadata *MD)
static Instruction * getSuccPad(Instruction *Terminator)
static bool isMDTuple(const Metadata *MD)
static bool isNewFormatTBAATypeNode(llvm::MDNode *Type)
#define CheckDI(C,...)
We know that a debug info condition should be true, if not print an error message.
Definition Verifier.cpp:682
static void forEachUser(const Value *User, SmallPtrSet< const Value *, 32 > &Visited, llvm::function_ref< bool(const Value *)> Callback)
Definition Verifier.cpp:723
static bool isDINode(const Metadata *MD)
static bool isScope(const Metadata *MD)
static cl::opt< bool > VerifyNoAliasScopeDomination("verify-noalias-scope-decl-dom", cl::Hidden, cl::init(false), cl::desc("Ensure that llvm.experimental.noalias.scope.decl for identical " "scopes are not dominating"))
static bool isTypeCongruent(Type *L, Type *R)
Two types are "congruent" if they are identical, or if they are both pointer types with different poi...
#define CheckTBAA(C,...)
static bool isConstantIntMetadataOperand(const Metadata *MD)
static bool IsRootTBAANode(const MDNode *MD)
static Value * getParentPad(Value *EHPad)
static bool hasConflictingReferenceFlags(unsigned Flags)
Detect mutually exclusive flags.
static AttrBuilder getParameterABIAttributes(LLVMContext &C, unsigned I, AttributeList Attrs)
static const char PassName[]
static LLVM_ABI bool isValidArbitraryFPFormat(StringRef Format)
Returns true if the given string is a valid arbitrary floating-point format interpretation for llvm....
Definition APFloat.cpp:6080
bool isFiniteNonZero() const
Definition APFloat.h:1522
bool isNegative() const
Definition APFloat.h:1512
const fltSemantics & getSemantics() const
Definition APFloat.h:1520
Class for arbitrary precision integers.
Definition APInt.h:78
bool sgt(const APInt &RHS) const
Signed greater than comparison.
Definition APInt.h:1202
bool isMinValue() const
Determine if this is the smallest unsigned value.
Definition APInt.h:418
bool ule(const APInt &RHS) const
Unsigned less or equal comparison.
Definition APInt.h:1151
bool isPowerOf2() const
Check if this APInt's value is a power of two greater than zero.
Definition APInt.h:441
int64_t getSExtValue() const
Get sign extended value.
Definition APInt.h:1571
bool isMaxValue() const
Determine if this is the largest unsigned value.
Definition APInt.h:400
This class represents a conversion between pointers from one address space to another.
bool isSwiftError() const
Return true if this alloca is used as a swifterror argument to a call.
LLVM_ABI bool isStaticAlloca() const
Return true if this alloca is in the entry block of the function and is a constant size.
Align getAlign() const
Return the alignment of the memory that is being allocated by the instruction.
Type * getAllocatedType() const
Return the type that is being allocated by the instruction.
unsigned getAddressSpace() const
Return the address space for the allocation.
LLVM_ABI bool isArrayAllocation() const
Return true if there is an allocation size parameter to the allocation instruction that is not 1.
const Value * getArraySize() const
Get the number of elements allocated.
PassT::Result & getResult(IRUnitT &IR, ExtraArgTs... ExtraArgs)
Get the result of an analysis pass for a given IR unit.
void setPreservesAll()
Set by analyses that do not transform their input at all.
LLVM_ABI bool hasInRegAttr() const
Return true if this argument has the inreg attribute.
Definition Function.cpp:293
bool empty() const
empty - Check if the array is empty.
Definition ArrayRef.h:137
static bool isFPOperation(BinOp Op)
BinOp getOperation() const
static LLVM_ABI StringRef getOperationName(BinOp Op)
AtomicOrdering getOrdering() const
Returns the ordering constraint of this rmw instruction.
bool contains(Attribute::AttrKind A) const
Return true if the builder has the specified attribute.
LLVM_ABI bool hasAttribute(Attribute::AttrKind Kind) const
Return true if the attribute exists in this set.
LLVM_ABI std::string getAsString(bool InAttrGrp=false) const
Functions, function parameters, and return types can have attributes to indicate how they should be t...
Definition Attributes.h:69
LLVM_ABI const ConstantRange & getValueAsConstantRange() const
Return the attribute's value as a ConstantRange.
LLVM_ABI StringRef getValueAsString() const
Return the attribute's value as a string.
AttrKind
This enumeration lists the attributes that can be associated with parameters, function results,...
Definition Attributes.h:88
bool isValid() const
Return true if the attribute is any kind of attribute.
Definition Attributes.h:223
LLVM Basic Block Representation.
Definition BasicBlock.h:62
iterator begin()
Instruction iterator methods.
Definition BasicBlock.h:470
iterator_range< const_phi_iterator > phis() const
Returns a range that iterates over the phis in the basic block.
Definition BasicBlock.h:539
const Function * getParent() const
Return the enclosing method, or null if none.
Definition BasicBlock.h:213
LLVM_ABI InstListType::const_iterator getFirstNonPHIIt() const
Returns an iterator to the first instruction in this block that is not a PHINode instruction.
LLVM_ABI bool isEntryBlock() const
Return true if this is the entry block of the containing function.
const Instruction & front() const
Definition BasicBlock.h:493
LLVM_ABI const BasicBlock * getUniquePredecessor() const
Return the predecessor of this block if it has a unique predecessor block.
InstListType::iterator iterator
Instruction iterators...
Definition BasicBlock.h:170
const Instruction * getTerminator() const LLVM_READONLY
Returns the terminator instruction if the block is well formed or null if the block is not well forme...
Definition BasicBlock.h:233
This class represents a no-op cast from one type to another.
static LLVM_ABI BlockAddress * lookup(const BasicBlock *BB)
Lookup an existing BlockAddress constant for the given BasicBlock.
bool isConditional() const
Value * getCondition() const
Base class for all callable instructions (InvokeInst and CallInst) Holds everything related to callin...
bool isInlineAsm() const
Check if this call is an inline asm statement.
bool hasInAllocaArgument() const
Determine if there are is an inalloca argument.
OperandBundleUse getOperandBundleAt(unsigned Index) const
Return the operand bundle at a specific index.
Function * getCalledFunction() const
Returns the function called, or null if this is an indirect function invocation or the function signa...
bool doesNotAccessMemory(unsigned OpNo) const
bool hasFnAttr(Attribute::AttrKind Kind) const
Determine whether this call has the given attribute.
unsigned getNumOperandBundles() const
Return the number of operand bundles associated with this User.
CallingConv::ID getCallingConv() const
LLVM_ABI bool paramHasAttr(unsigned ArgNo, Attribute::AttrKind Kind) const
Determine whether the argument or parameter has the given attribute.
Attribute getParamAttr(unsigned ArgNo, Attribute::AttrKind Kind) const
Get the attribute of a given kind from a given arg.
iterator_range< bundle_op_iterator > bundle_op_infos()
Return the range [bundle_op_info_begin, bundle_op_info_end).
unsigned countOperandBundlesOfType(StringRef Name) const
Return the number of operand bundles with the tag Name attached to this instruction.
bool onlyReadsMemory(unsigned OpNo) const
Value * getCalledOperand() const
Type * getParamElementType(unsigned ArgNo) const
Extract the elementtype type for a parameter.
Value * getArgOperand(unsigned i) const
FunctionType * getFunctionType() const
LLVM_ABI Intrinsic::ID getIntrinsicID() const
Returns the intrinsic ID of the intrinsic called or Intrinsic::not_intrinsic if the called function i...
iterator_range< User::op_iterator > args()
Iteration adapter for range-for loops.
bool doesNotReturn() const
Determine if the call cannot return.
LLVM_ABI bool onlyAccessesArgMemory() const
Determine if the call can access memmory only using pointers based on its arguments.
unsigned arg_size() const
AttributeList getAttributes() const
Return the attributes for this call.
bool hasOperandBundles() const
Return true if this User has any operand bundles.
LLVM_ABI Function * getCaller()
Helper to get the caller (the parent function).
BasicBlock * getIndirectDest(unsigned i) const
unsigned getNumIndirectDests() const
Return the number of callbr indirect dest labels.
bool isMustTailCall() const
static LLVM_ABI bool castIsValid(Instruction::CastOps op, Type *SrcTy, Type *DstTy)
This method can be used to determine if a cast from SrcTy to DstTy using Opcode op is valid or not.
unsigned getNumHandlers() const
return the number of 'handlers' in this catchswitch instruction, except the default handler
Value * getParentPad() const
BasicBlock * getUnwindDest() const
handler_range handlers()
iteration adapter for range-for loops.
BasicBlock * getUnwindDest() const
bool isFPPredicate() const
Definition InstrTypes.h:782
bool isIntPredicate() const
Definition InstrTypes.h:783
static bool isIntPredicate(Predicate P)
Definition InstrTypes.h:776
bool isMinusOne() const
This function will return true iff every bit in this constant is set to true.
Definition Constants.h:231
bool isNegative() const
Definition Constants.h:214
bool isZero() const
This is just a convenience method to make client code smaller for a common code.
Definition Constants.h:219
unsigned getBitWidth() const
getBitWidth - Return the scalar bitwidth of this constant.
Definition Constants.h:162
uint64_t getZExtValue() const
Return the constant as a 64-bit unsigned integer value after it has been zero extended as appropriate...
Definition Constants.h:168
const APInt & getValue() const
Return the constant as an APInt value reference.
Definition Constants.h:159
Constant * getAddrDiscriminator() const
The address discriminator if any, or the null constant.
Definition Constants.h:1078
Constant * getPointer() const
The pointer that is signed in this ptrauth signed pointer.
Definition Constants.h:1065
ConstantInt * getKey() const
The Key ID, an i32 constant.
Definition Constants.h:1068
Constant * getDeactivationSymbol() const
Definition Constants.h:1087
ConstantInt * getDiscriminator() const
The integer discriminator, an i64 constant, or 0.
Definition Constants.h:1071
static LLVM_ABI bool isOrderedRanges(ArrayRef< ConstantRange > RangesRef)
This class represents a range of values.
const APInt & getLower() const
Return the lower value for this range.
const APInt & getUpper() const
Return the upper value for this range.
LLVM_ABI bool contains(const APInt &Val) const
Return true if the specified value is in the set.
uint32_t getBitWidth() const
Get the bit width of this ConstantRange.
static LLVM_ABI ConstantTokenNone * get(LLVMContext &Context)
Return the ConstantTokenNone.
LLVM_ABI bool isNullValue() const
Return true if this is the value that would be returned by getNullValue.
Definition Constants.cpp:90
LLVM_ABI std::optional< fp::ExceptionBehavior > getExceptionBehavior() const
LLVM_ABI std::optional< RoundingMode > getRoundingMode() const
LLVM_ABI unsigned getNonMetadataArgCount() const
DbgVariableFragmentInfo FragmentInfo
@ FixedPointBinary
Scale factor 2^Factor.
@ FixedPointDecimal
Scale factor 10^Factor.
@ FixedPointRational
Arbitrary rational scale factor.
DIGlobalVariable * getVariable() const
LLVM_ABI DISubprogram * getSubprogram() const
Get the subprogram for this scope.
DILocalScope * getScope() const
Get the local scope for this variable.
Metadata * getRawScope() const
Base class for scope-like contexts.
Subprogram description. Uses SubclassData1.
static const DIScope * getRawRetainedNodeScope(const MDNode *N)
Base class for template parameters.
Base class for variables.
Metadata * getRawType() const
Metadata * getRawScope() const
A parsed version of the target data layout string in and methods for querying it.
Definition DataLayout.h:64
Records a position in IR for a source label (DILabel).
Base class for non-instruction debug metadata records that have positions within IR.
LLVM_ABI void print(raw_ostream &O, bool IsForDebug=false) const
DebugLoc getDebugLoc() const
LLVM_ABI const BasicBlock * getParent() const
LLVM_ABI Function * getFunction()
Record of a variable value-assignment, aka a non instruction representation of the dbg....
LLVM_ABI Value * getVariableLocationOp(unsigned OpIdx) const
DIExpression * getExpression() const
DILocalVariable * getVariable() const
Metadata * getRawLocation() const
Returns the metadata operand for the first location description.
@ End
Marks the end of the concrete types.
@ Any
To indicate all LocationTypes in searches.
DIExpression * getAddressExpression() const
MDNode * getAsMDNode() const
Return this as a bar MDNode.
Definition DebugLoc.h:290
ValueT lookup(const_arg_type_t< KeyT > Val) const
lookup - Return the entry for the specified key, or a default constructed value if no such entry exis...
Definition DenseMap.h:205
iterator find(const_arg_type_t< KeyT > Val)
Definition DenseMap.h:178
bool empty() const
Definition DenseMap.h:109
std::pair< iterator, bool > insert(const std::pair< KeyT, ValueT > &KV)
Definition DenseMap.h:241
Concrete subclass of DominatorTreeBase that is used to compute a normal dominator tree.
Definition Dominators.h:164
This instruction extracts a single (scalar) element from a VectorType value.
static LLVM_ABI bool isValidOperands(const Value *Vec, const Value *Idx)
Return true if an extractelement instruction can be formed with the specified operands.
ArrayRef< unsigned > getIndices() const
static LLVM_ABI Type * getIndexedType(Type *Agg, ArrayRef< unsigned > Idxs)
Returns the type of the element that would be extracted with an extractvalue instruction with the spe...
This instruction compares its operands according to the predicate given to the constructor.
This class represents an extension of floating point types.
This class represents a cast from floating point to signed integer.
This class represents a cast from floating point to unsigned integer.
This class represents a truncation of floating point types.
AtomicOrdering getOrdering() const
Returns the ordering constraint of this fence instruction.
Value * getParentPad() const
Convenience accessors.
FunctionPass class - This class is used to implement most global optimizations.
Definition Pass.h:314
Type * getReturnType() const
FunctionType * getFunctionType() const
Returns the FunctionType for me.
Definition Function.h:209
Intrinsic::ID getIntrinsicID() const LLVM_READONLY
getIntrinsicID - This method returns the ID number of the specified function, or Intrinsic::not_intri...
Definition Function.h:244
DISubprogram * getSubprogram() const
Get the attached subprogram.
CallingConv::ID getCallingConv() const
getCallingConv()/setCallingConv(CC) - These method get and set the calling convention of this functio...
Definition Function.h:270
bool hasPersonalityFn() const
Check whether this function has a personality function.
Definition Function.h:903
const Function & getFunction() const
Definition Function.h:164
const std::string & getGC() const
Definition Function.cpp:834
Type * getReturnType() const
Returns the type of the ret val.
Definition Function.h:214
bool isVarArg() const
isVarArg - Return true if this function takes a variable number of arguments.
Definition Function.h:227
LLVM_ABI Value * getBasePtr() const
LLVM_ABI Value * getDerivedPtr() const
static LLVM_ABI Type * getIndexedType(Type *Ty, ArrayRef< Value * > IdxList)
Returns the result type of a getelementptr with the given source element type and indexes.
static bool isValidLinkage(LinkageTypes L)
Definition GlobalAlias.h:98
const Constant * getAliasee() const
Definition GlobalAlias.h:87
LLVM_ABI const Function * getResolverFunction() const
Definition Globals.cpp:665
static bool isValidLinkage(LinkageTypes L)
Definition GlobalIFunc.h:86
const Constant * getResolver() const
Definition GlobalIFunc.h:73
LLVM_ABI void getAllMetadata(SmallVectorImpl< std::pair< unsigned, MDNode * > > &MDs) const
Appends all metadata attached to this value to MDs, sorting by KindID.
bool hasComdat() const
MDNode * getMetadata(unsigned KindID) const
Get the current metadata attachments for the given kind, if any.
Definition Value.h:576
bool hasExternalLinkage() const
bool isDSOLocal() const
bool isImplicitDSOLocal() const
LLVM_ABI bool isDeclaration() const
Return true if the primary definition of this global value is outside of the current translation unit...
Definition Globals.cpp:328
bool hasValidDeclarationLinkage() const
LinkageTypes getLinkage() const
bool hasDefaultVisibility() const
bool hasPrivateLinkage() const
bool hasHiddenVisibility() const
bool hasExternalWeakLinkage() const
bool hasDLLImportStorageClass() const
bool hasDLLExportStorageClass() const
bool isDeclarationForLinker() const
unsigned getAddressSpace() const
Module * getParent()
Get the module that this global value is contained inside of...
PointerType * getType() const
Global values are always pointers.
LLVM_ABI bool isInterposable() const
Return true if this global's definition can be substituted with an arbitrary definition at link time ...
Definition Globals.cpp:107
bool hasComdat() const
bool hasCommonLinkage() const
bool hasGlobalUnnamedAddr() const
bool hasAppendingLinkage() const
bool hasAvailableExternallyLinkage() const
Type * getValueType() const
const Constant * getInitializer() const
getInitializer - Return the initializer for this global variable.
bool hasInitializer() const
Definitions have initializers, declarations don't.
MaybeAlign getAlign() const
Returns the alignment of the given variable.
bool isConstant() const
If the value is a global constant, its value is immutable throughout the runtime execution of the pro...
bool hasDefinitiveInitializer() const
hasDefinitiveInitializer - Whether the global variable has an initializer, and any other instances of...
This instruction compares its operands according to the predicate given to the constructor.
BasicBlock * getDestination(unsigned i)
Return the specified destination.
unsigned getNumDestinations() const
return the number of possible destinations in this indirectbr instruction.
unsigned getNumSuccessors() const
This instruction inserts a single (scalar) element into a VectorType value.
static LLVM_ABI bool isValidOperands(const Value *Vec, const Value *NewElt, const Value *Idx)
Return true if an insertelement instruction can be formed with the specified operands.
ArrayRef< unsigned > getIndices() const
Base class for instruction visitors.
Definition InstVisitor.h:78
void visit(Iterator Start, Iterator End)
Definition InstVisitor.h:87
LLVM_ABI unsigned getNumSuccessors() const LLVM_READONLY
Return the number of successors that this instruction has.
const DebugLoc & getDebugLoc() const
Return the debug location for this node as a DebugLoc.
LLVM_ABI bool isAtomic() const LLVM_READONLY
Return true if this instruction has an AtomicOrdering of unordered or higher.
LLVM_ABI const Function * getFunction() const
Return the function this instruction belongs to.
This class represents a cast from an integer to a pointer.
static LLVM_ABI bool mayLowerToFunctionCall(Intrinsic::ID IID)
Check if the intrinsic might lower into a regular function call in the course of IR transformations.
Intrinsic::ID getIntrinsicID() const
Return the intrinsic ID of this intrinsic.
This is an important class for using LLVM in a threaded context.
Definition LLVMContext.h:68
bool isCleanup() const
Return 'true' if this landingpad instruction is a cleanup.
unsigned getNumClauses() const
Get the number of clauses for this landing pad.
bool isCatch(unsigned Idx) const
Return 'true' if the clause and index Idx is a catch clause.
bool isFilter(unsigned Idx) const
Return 'true' if the clause and index Idx is a filter clause.
Constant * getClause(unsigned Idx) const
Get the value of the clause at index Idx.
AtomicOrdering getOrdering() const
Returns the ordering constraint of this load instruction.
SyncScope::ID getSyncScopeID() const
Returns the synchronization scope ID of this load instruction.
Align getAlign() const
Return the alignment of the access that is being performed.
Metadata node.
Definition Metadata.h:1078
const MDOperand & getOperand(unsigned I) const
Definition Metadata.h:1442
bool isTemporary() const
Definition Metadata.h:1262
ArrayRef< MDOperand > operands() const
Definition Metadata.h:1440
unsigned getNumOperands() const
Return number of MDNode operands.
Definition Metadata.h:1448
bool isDistinct() const
Definition Metadata.h:1261
bool isResolved() const
Check if node is fully resolved.
Definition Metadata.h:1258
LLVMContext & getContext() const
Definition Metadata.h:1242
bool equalsStr(StringRef Str) const
Definition Metadata.h:922
Metadata * get() const
Definition Metadata.h:929
LLVM_ABI StringRef getString() const
Definition Metadata.cpp:624
static LLVM_ABI bool isTagMD(const Metadata *MD)
This class implements a map that also provides access to all stored values in a deterministic order.
Definition MapVector.h:36
static LLVM_ABI MetadataAsValue * getIfExists(LLVMContext &Context, Metadata *MD)
Definition Metadata.cpp:118
Metadata * getMetadata() const
Definition Metadata.h:201
Root of the metadata hierarchy.
Definition Metadata.h:64
LLVM_ABI void print(raw_ostream &OS, const Module *M=nullptr, bool IsForDebug=false) const
Print.
unsigned getMetadataID() const
Definition Metadata.h:104
Manage lifetime of a slot tracker for printing IR.
A Module instance is used to store all the information related to an LLVM module.
Definition Module.h:67
LLVM_ABI StringRef getName() const
LLVM_ABI void print(raw_ostream &ROS, bool IsForDebug=false) const
LLVM_ABI unsigned getNumOperands() const
iterator_range< op_iterator > operands()
Definition Metadata.h:1853
op_range incoming_values()
static LLVM_ABI PassRegistry * getPassRegistry()
getPassRegistry - Access the global registry object, which is automatically initialized at applicatio...
A set of analyses that are preserved following a run of a transformation pass.
Definition Analysis.h:112
static PreservedAnalyses all()
Construct a special preserved set that preserves all passes.
Definition Analysis.h:118
This class represents a cast from a pointer to an address (non-capturing ptrtoint).
This class represents a cast from a pointer to an integer.
Value * getValue() const
Convenience accessor.
Value * getReturnValue() const
Convenience accessor. Returns null if there is no return value.
This class represents a sign extension of integer types.
This class represents a cast from signed integer to floating point.
static LLVM_ABI const char * areInvalidOperands(Value *Cond, Value *True, Value *False)
Return a string if the specified operands are invalid for a select operation, otherwise return null.
This instruction constructs a fixed permutation of two input vectors.
static LLVM_ABI bool isValidOperands(const Value *V1, const Value *V2, const Value *Mask)
Return true if a shufflevector instruction can be formed with the specified operands.
static LLVM_ABI void getShuffleMask(const Constant *Mask, SmallVectorImpl< int > &Result)
Convert the input shuffle mask operand to a vector of integers.
A templated base class for SmallPtrSet which provides the typesafe interface that is common across al...
size_type count(ConstPtrType Ptr) const
count - Return 1 if the specified pointer is in the set, 0 otherwise.
void insert_range(Range &&R)
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
SmallPtrSet - This class implements a set which is optimized for holding SmallSize or less elements.
void reserve(size_type N)
iterator insert(iterator I, T &&Elt)
void resize(size_type N)
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
std::pair< StringRef, StringRef > split(char Separator) const
Split into two substrings around the first occurrence of a separator character.
Definition StringRef.h:712
static constexpr size_t npos
Definition StringRef.h:57
bool getAsInteger(unsigned Radix, T &Result) const
Parse the current string as an integer of the specified radix.
Definition StringRef.h:472
bool starts_with(StringRef Prefix) const
Check if this string starts with the given Prefix.
Definition StringRef.h:261
constexpr bool empty() const
empty - Check if the string is empty.
Definition StringRef.h:143
unsigned getNumElements() const
Random access to the elements.
LLVM_ABI Type * getTypeAtIndex(const Value *V) const
Given an index value into the type, return the type of the element.
Definition Type.cpp:718
LLVM_ABI bool isScalableTy(SmallPtrSetImpl< const Type * > &Visited) const
Returns true if this struct contains a scalable vector.
Definition Type.cpp:440
Verify that the TBAA Metadatas are valid.
Definition Verifier.h:40
LLVM_ABI bool visitTBAAMetadata(const Instruction *I, const MDNode *MD)
Visit an instruction, or a TBAA node itself as part of a metadata, and return true if it is valid,...
unsigned size() const
Triple - Helper class for working with autoconf configuration names.
Definition Triple.h:47
This class represents a truncation of integer types.
Twine - A lightweight data structure for efficiently representing the concatenation of temporary valu...
Definition Twine.h:82
The instances of the Type class are immutable: once they are created, they are never changed.
Definition Type.h:45
bool isVectorTy() const
True if this is an instance of VectorType.
Definition Type.h:273
LLVM_ABI bool containsNonGlobalTargetExtType(SmallPtrSetImpl< const Type * > &Visited) const
Return true if this type is or contains a target extension type that disallows being used as a global...
Definition Type.cpp:74
bool isArrayTy() const
True if this is an instance of ArrayType.
Definition Type.h:264
LLVM_ABI bool containsNonLocalTargetExtType(SmallPtrSetImpl< const Type * > &Visited) const
Return true if this type is or contains a target extension type that disallows being used as a local.
Definition Type.cpp:90
LLVM_ABI bool isScalableTy(SmallPtrSetImpl< const Type * > &Visited) const
Return true if this is a type whose size is a known multiple of vscale.
Definition Type.cpp:61
bool isLabelTy() const
Return true if this is 'label'.
Definition Type.h:228
bool isIntOrIntVectorTy() const
Return true if this is an integer type or a vector of integer types.
Definition Type.h:246
bool isPointerTy() const
True if this is an instance of PointerType.
Definition Type.h:267
LLVM_ABI bool isTokenLikeTy() const
Returns true if this is 'token' or a token-like target type.s.
Definition Type.cpp:1065
LLVM_ABI unsigned getPointerAddressSpace() const
Get the address space of this pointer or pointer vector type.
bool isSingleValueType() const
Return true if the type is a valid type for a register in codegen.
Definition Type.h:296
LLVM_ABI bool canLosslesslyBitCastTo(Type *Ty) const
Return true if this type could be converted with a lossless BitCast to type 'Ty'.
Definition Type.cpp:153
Type * getScalarType() const
If this is a vector type, return the element type, otherwise return 'this'.
Definition Type.h:352
bool isSized(SmallPtrSetImpl< Type * > *Visited=nullptr) const
Return true if it makes sense to take the size of this type.
Definition Type.h:311
LLVM_ABI unsigned getScalarSizeInBits() const LLVM_READONLY
If this is a vector type, return the getPrimitiveSizeInBits value for the element type.
Definition Type.cpp:230
bool isFloatingPointTy() const
Return true if this is one of the floating-point types.
Definition Type.h:184
bool isPtrOrPtrVectorTy() const
Return true if this is a pointer type or a vector of pointer types.
Definition Type.h:270
bool isIntOrPtrTy() const
Return true if this is an integer type or a pointer type.
Definition Type.h:255
bool isIntegerTy() const
True if this is an instance of IntegerType.
Definition Type.h:240
bool isFPOrFPVectorTy() const
Return true if this is a FP type or a vector of FP.
Definition Type.h:225
bool isVoidTy() const
Return true if this is 'void'.
Definition Type.h:139
bool isMetadataTy() const
Return true if this is 'metadata'.
Definition Type.h:231
This class represents a cast unsigned integer to floating point.
op_range operands()
Definition User.h:293
Value * getOperand(unsigned i) const
Definition User.h:233
unsigned getNumOperands() const
Definition User.h:255
This class represents the va_arg llvm instruction, which returns an argument of the specified type gi...
Value * getValue() const
Definition Metadata.h:498
LLVM Value Representation.
Definition Value.h:75
iterator_range< user_iterator > materialized_users()
Definition Value.h:420
Type * getType() const
All values are typed, get the type of this value.
Definition Value.h:256
LLVM_ABI const Value * stripPointerCastsAndAliases() const
Strip off pointer casts, all-zero GEPs, address space casts, and aliases.
Definition Value.cpp:712
LLVM_ABI const Value * stripInBoundsOffsets(function_ref< void(const Value *)> Func=[](const Value *) {}) const
Strip off pointer casts and inbounds GEPs.
Definition Value.cpp:819
iterator_range< user_iterator > users()
Definition Value.h:426
bool materialized_use_empty() const
Definition Value.h:351
LLVM_ABI const Value * stripPointerCasts() const
Strip off pointer casts, all-zero GEPs and address space casts.
Definition Value.cpp:708
LLVM_ABI LLVMContext & getContext() const
All values hold a context through their type.
Definition Value.cpp:1106
bool hasName() const
Definition Value.h:262
LLVM_ABI StringRef getName() const
Return a constant reference to the value's name.
Definition Value.cpp:322
Check a module for errors, and report separate error states for IR and debug info errors.
Definition Verifier.h:109
LLVM_ABI Result run(Module &M, ModuleAnalysisManager &)
LLVM_ABI PreservedAnalyses run(Module &M, ModuleAnalysisManager &AM)
This class represents zero extension of integer types.
constexpr bool isNonZero() const
Definition TypeSize.h:155
constexpr bool isScalable() const
Returns whether the quantity is scaled by a runtime quantity (vscale).
Definition TypeSize.h:168
constexpr ScalarTy getKnownMinValue() const
Returns the minimum value this quantity can represent.
Definition TypeSize.h:165
An efficient, type-erasing, non-owning reference to a callable.
const ParentTy * getParent() const
Definition ilist_node.h:34
NodeTy * getNextNode()
Get the next node, or nullptr for the list tail.
Definition ilist_node.h:348
This class implements an extremely fast bulk output stream that can only output to a stream.
Definition raw_ostream.h:53
CallInst * Call
This file contains the declaration of the Comdat class, which represents a single COMDAT in LLVM.
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
@ FLAT_ADDRESS
Address space for flat memory.
@ GLOBAL_ADDRESS
Address space for global memory (RAT0, VTX0).
@ PRIVATE_ADDRESS
Address space for private memory.
constexpr char Align[]
Key for Kernel::Arg::Metadata::mAlign.
constexpr char Args[]
Key for Kernel::Metadata::mArgs.
constexpr char Attrs[]
Key for Kernel::Metadata::mAttrs.
bool isFlatGlobalAddrSpace(unsigned AS)
constexpr std::underlying_type_t< E > Mask()
Get a bitmask with 1s in all places up to the high-order bit of E's largest value.
@ Entry
Definition COFF.h:862
unsigned ID
LLVM IR allows to use arbitrary numbers as calling convention identifiers.
Definition CallingConv.h:24
@ C
The default llvm calling convention, compatible with C.
Definition CallingConv.h:34
@ BasicBlock
Various leaf nodes.
Definition ISDOpcodes.h:81
LLVM_ABI MatchIntrinsicTypesResult matchIntrinsicSignature(FunctionType *FTy, ArrayRef< IITDescriptor > &Infos, SmallVectorImpl< Type * > &ArgTys)
Match the specified function type with the type constraints specified by the .td file.
LLVM_ABI void getIntrinsicInfoTableEntries(ID id, SmallVectorImpl< IITDescriptor > &T)
Return the IIT table descriptor for the specified intrinsic into an array of IITDescriptors.
@ MatchIntrinsicTypes_NoMatchRet
Definition Intrinsics.h:260
@ MatchIntrinsicTypes_NoMatchArg
Definition Intrinsics.h:261
LLVM_ABI bool hasConstrainedFPRoundingModeOperand(ID QID)
Returns true if the intrinsic ID is for one of the "ConstrainedFloating-Point Intrinsics" that take r...
LLVM_ABI StringRef getName(ID id)
Return the LLVM name for an intrinsic, such as "llvm.ppc.altivec.lvx".
static const int NoAliasScopeDeclScopeArg
Definition Intrinsics.h:41
LLVM_ABI bool matchIntrinsicVarArg(bool isVarArg, ArrayRef< IITDescriptor > &Infos)
Verify if the intrinsic has variable arguments.
std::variant< std::monostate, Loc::Single, Loc::Multi, Loc::MMI, Loc::EntryValue > Variant
Alias for the std::variant specialization base class of DbgVariable.
Definition DwarfDebug.h:189
Flag
These should be considered private to the implementation of the MCInstrDesc class.
@ System
Synchronized with respect to all concurrently executing threads.
Definition LLVMContext.h:58
LLVM_ABI std::optional< VFInfo > tryDemangleForVFABI(StringRef MangledName, const FunctionType *FTy)
Function to construct a VFInfo out of a mangled names in the following format:
@ CE
Windows NT (Windows on ARM)
Definition MCAsmInfo.h:48
LLVM_ABI AssignmentInstRange getAssignmentInsts(DIAssignID *ID)
Return a range of instructions (typically just one) that have ID as an attachment.
initializer< Ty > init(const Ty &Val)
@ DW_MACINFO_undef
Definition Dwarf.h:818
@ DW_MACINFO_start_file
Definition Dwarf.h:819
@ DW_MACINFO_define
Definition Dwarf.h:817
std::enable_if_t< detail::IsValidPointer< X, Y >::value, X * > dyn_extract_or_null(Y &&MD)
Extract a Value from Metadata, if any, allowing null.
Definition Metadata.h:708
std::enable_if_t< detail::IsValidPointer< X, Y >::value, X * > dyn_extract(Y &&MD)
Extract a Value from Metadata, if any.
Definition Metadata.h:695
std::enable_if_t< detail::IsValidPointer< X, Y >::value, X * > extract(Y &&MD)
Extract a Value from Metadata.
Definition Metadata.h:667
@ User
could "use" a pointer
NodeAddr< UseNode * > Use
Definition RDFGraph.h:385
NodeAddr< NodeBase * > Node
Definition RDFGraph.h:381
friend class Instruction
Iterator for Instructions in a `BasicBlock.
Definition BasicBlock.h:73
This is an optimization pass for GlobalISel generic memory operations.
Definition Types.h:26
auto drop_begin(T &&RangeOrContainer, size_t N=1)
Return a range covering RangeOrContainer with the first N elements excluded.
Definition STLExtras.h:316
@ Low
Lower the current thread's priority such that it does not affect foreground tasks significantly.
Definition Threading.h:280
@ Offset
Definition DWP.cpp:532
FunctionAddr VTableAddr Value
Definition InstrProf.h:137
bool all_of(R &&range, UnaryPredicate P)
Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly.
Definition STLExtras.h:1737
LLVM_ABI bool canInstructionHaveMMRAs(const Instruction &I)
detail::zippy< detail::zip_first, T, U, Args... > zip_equal(T &&t, U &&u, Args &&...args)
zip iterator that assumes that all iteratees have the same length.
Definition STLExtras.h:839
LLVM_ABI unsigned getBranchWeightOffset(const MDNode *ProfileData)
Return the offset to the first branch weight data.
constexpr bool isInt(int64_t x)
Checks if an integer fits into the given bit width.
Definition MathExtras.h:165
auto enumerate(FirstRange &&First, RestRanges &&...Rest)
Given two or more input ranges, returns a new range whose values are tuples (A, B,...
Definition STLExtras.h:2544
decltype(auto) dyn_cast(const From &Val)
dyn_cast<X> - Return the argument parameter cast to the specified type.
Definition Casting.h:643
LLVM_ABI bool verifyFunction(const Function &F, raw_ostream *OS=nullptr)
Check a function for errors, useful for use when debugging a pass.
AllocFnKind
Definition Attributes.h:51
testing::Matcher< const detail::ErrorHolder & > Failed()
Definition Error.h:198
iterator_range< T > make_range(T x, T y)
Convenience function for iterating over sub-ranges.
void append_range(Container &C, Range &&R)
Wrapper function to append range R to container C.
Definition STLExtras.h:2198
LLVM_ABI DenseMap< BasicBlock *, ColorVector > colorEHFunclets(Function &F)
If an EH funclet personality is in use (see isFuncletEHPersonality), this will recompute which blocks...
constexpr bool isPowerOf2_64(uint64_t Value)
Return true if the argument is a power of two > 0 (64 bit edition.)
Definition MathExtras.h:284
bool isa_and_nonnull(const Y &Val)
Definition Casting.h:676
Op::Description Desc
bool isScopedEHPersonality(EHPersonality Pers)
Returns true if this personality uses scope-style EH IR instructions: catchswitch,...
auto dyn_cast_or_null(const Y &Val)
Definition Casting.h:753
GenericConvergenceVerifier< SSAContext > ConvergenceVerifier
LLVM_ABI void initializeVerifierLegacyPassPass(PassRegistry &)
constexpr bool isPowerOf2_32(uint32_t Value)
Return true if the argument is a power of two > 0.
Definition MathExtras.h:279
bool isModSet(const ModRefInfo MRI)
Definition ModRef.h:49
void sort(IteratorTy Start, IteratorTy End)
Definition STLExtras.h:1634
LLVM_ABI raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
Definition Debug.cpp:207
LLVM_ABI void report_fatal_error(Error Err, bool gen_crash_diag=true)
Definition Error.cpp:163
FunctionAddr VTableAddr Count
Definition InstrProf.h:139
LLVM_ABI EHPersonality classifyEHPersonality(const Value *Pers)
See if the given exception handling personality function is one that we understand.
iterator_range< SplittingIterator > split(StringRef Str, StringRef Separator)
Split the specified string over a separator and return a range-compatible iterable over its partition...
class LLVM_GSL_OWNER SmallVector
Forward declaration of SmallVector so that calculateSmallVectorDefaultInlinedElements can reference s...
bool isa(const From &Val)
isa<X> - Return true if the parameter to the template is an instance of one of the template type argu...
Definition Casting.h:547
LLVM_ABI bool isValueProfileMD(const MDNode *ProfileData)
Checks if an MDNode contains value profiling Metadata.
LLVM_ABI raw_fd_ostream & errs()
This returns a reference to a raw_ostream for standard error.
LLVM_ABI unsigned getNumBranchWeights(const MDNode &ProfileData)
AtomicOrdering
Atomic ordering for LLVM's memory model.
@ First
Helpers to iterate all locations in the MemoryEffectsBase class.
Definition ModRef.h:74
LLVM_ABI FunctionPass * createVerifierPass(bool FatalErrors=true)
FunctionAddr VTableAddr Next
Definition InstrProf.h:141
DWARFExpression::Operation Op
ArrayRef(const T &OneElt) -> ArrayRef< T >
constexpr unsigned BitWidth
TinyPtrVector< BasicBlock * > ColorVector
LLVM_ABI const char * LLVMLoopEstimatedTripCount
Profile-based loop metadata that should be accessed only by using llvm::getLoopEstimatedTripCount and...
DenormalMode parseDenormalFPAttribute(StringRef Str)
Returns the denormal mode to use for inputs and outputs.
decltype(auto) cast(const From &Val)
cast<X> - Return the argument parameter cast to the specified type.
Definition Casting.h:559
LLVM_ABI std::optional< RoundingMode > convertStrToRoundingMode(StringRef)
Returns a valid RoundingMode enumerator when given a string that is valid as input in constrained int...
Definition FPEnv.cpp:25
LLVM_ABI std::unique_ptr< GCStrategy > getGCStrategy(const StringRef Name)
Lookup the GCStrategy object associated with the given gc name.
auto predecessors(const MachineBasicBlock *BB)
bool is_contained(R &&Range, const E &Element)
Returns true if Element is found in Range.
Definition STLExtras.h:1945
bool pred_empty(const BasicBlock *BB)
Definition CFG.h:119
bool isHexDigit(char C)
Checks if character C is a hexadecimal numeric character.
AnalysisManager< Function > FunctionAnalysisManager
Convenience typedef for the Function analysis manager.
constexpr bool isCallableCC(CallingConv::ID CC)
LLVM_ABI bool verifyModule(const Module &M, raw_ostream *OS=nullptr, bool *BrokenDebugInfo=nullptr)
Check a module for errors.
AnalysisManager< Module > ModuleAnalysisManager
Convenience typedef for the Module analysis manager.
Definition MIRParser.h:39
#define N
constexpr uint64_t value() const
This is a hole in the type system and should not be abused.
Definition Alignment.h:77
A special type used by analysis passes to provide an address that identifies that particular analysis...
Definition Analysis.h:29
static LLVM_ABI const char * SyntheticFunctionEntryCount
static LLVM_ABI const char * UnknownBranchWeightsMarker
static LLVM_ABI const char * ValueProfile
static LLVM_ABI const char * FunctionEntryCount
static LLVM_ABI const char * BranchWeights
uint32_t getTagID() const
Return the tag of this operand bundle as an integer.
ArrayRef< Use > Inputs
void DebugInfoCheckFailed(const Twine &Message)
A debug info check failed.
Definition Verifier.cpp:306
VerifierSupport(raw_ostream *OS, const Module &M)
Definition Verifier.cpp:155
bool Broken
Track the brokenness of the module while recursively visiting.
Definition Verifier.cpp:149
void CheckFailed(const Twine &Message, const T1 &V1, const Ts &... Vs)
A check failed (with values to print).
Definition Verifier.cpp:299
bool BrokenDebugInfo
Broken debug info can be "recovered" from by stripping the debug info.
Definition Verifier.cpp:151
LLVMContext & Context
Definition Verifier.cpp:146
bool TreatBrokenDebugInfoAsError
Whether to treat broken debug info as an error.
Definition Verifier.cpp:153
void CheckFailed(const Twine &Message)
A check failed, so printout out the condition and the message.
Definition Verifier.cpp:288
const Module & M
Definition Verifier.cpp:142
const DataLayout & DL
Definition Verifier.cpp:145
void DebugInfoCheckFailed(const Twine &Message, const T1 &V1, const Ts &... Vs)
A debug info check failed (with values to print).
Definition Verifier.cpp:315
const Triple & TT
Definition Verifier.cpp:144
ModuleSlotTracker MST
Definition Verifier.cpp:143