LLVM 23.0.0git
Verifier.cpp
Go to the documentation of this file.
1//===-- Verifier.cpp - Implement the Module Verifier -----------------------==//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file defines the function verifier interface, that can be used for some
10// basic correctness checking of input to the system.
11//
12// Note that this does not provide full `Java style' security and verifications,
13// instead it just tries to ensure that code is well-formed.
14//
15// * Both of a binary operator's parameters are of the same type
16// * Verify that the indices of mem access instructions match other operands
17// * Verify that arithmetic and other things are only performed on first-class
18// types. Verify that shifts & logicals only happen on integrals f.e.
19// * All of the constants in a switch statement are of the correct type
20// * The code is in valid SSA form
21// * It should be illegal to put a label into any other type (like a structure)
22// or to return one. [except constant arrays!]
23// * Only phi nodes can be self referential: 'add i32 %0, %0 ; <int>:0' is bad
24// * PHI nodes must have an entry for each predecessor, with no extras.
25// * PHI nodes must be the first thing in a basic block, all grouped together
26// * All basic blocks should only end with terminator insts, not contain them
27// * The entry node to a function must not have predecessors
28// * All Instructions must be embedded into a basic block
29// * Functions cannot take a void-typed parameter
30// * Verify that a function's argument list agrees with it's declared type.
31// * It is illegal to specify a name for a void value.
32// * It is illegal to have a internal global value with no initializer
33// * It is illegal to have a ret instruction that returns a value that does not
34// agree with the function return value type.
35// * Function call argument types match the function prototype
36// * A landing pad is defined by a landingpad instruction, and can be jumped to
37// only by the unwind edge of an invoke instruction.
38// * A landingpad instruction must be the first non-PHI instruction in the
39// block.
40// * Landingpad instructions must be in a function with a personality function.
41// * Convergence control intrinsics are introduced in ConvergentOperations.rst.
42// The applied restrictions are too numerous to list here.
43// * The convergence entry intrinsic and the loop heart must be the first
44// non-PHI instruction in their respective block. This does not conflict with
45// the landing pads, since these two kinds cannot occur in the same block.
46// * All other things that are tested by asserts spread about the code...
47//
48//===----------------------------------------------------------------------===//
49
50#include "llvm/IR/Verifier.h"
51#include "llvm/ADT/APFloat.h"
52#include "llvm/ADT/APInt.h"
53#include "llvm/ADT/ArrayRef.h"
54#include "llvm/ADT/DenseMap.h"
55#include "llvm/ADT/MapVector.h"
56#include "llvm/ADT/STLExtras.h"
60#include "llvm/ADT/StringRef.h"
61#include "llvm/ADT/Twine.h"
63#include "llvm/IR/Argument.h"
65#include "llvm/IR/Attributes.h"
66#include "llvm/IR/BasicBlock.h"
67#include "llvm/IR/CFG.h"
68#include "llvm/IR/CallingConv.h"
69#include "llvm/IR/Comdat.h"
70#include "llvm/IR/Constant.h"
73#include "llvm/IR/Constants.h"
75#include "llvm/IR/DataLayout.h"
76#include "llvm/IR/DebugInfo.h"
78#include "llvm/IR/DebugLoc.h"
80#include "llvm/IR/Dominators.h"
82#include "llvm/IR/FPEnv.h"
83#include "llvm/IR/Function.h"
84#include "llvm/IR/GCStrategy.h"
86#include "llvm/IR/GlobalAlias.h"
87#include "llvm/IR/GlobalValue.h"
89#include "llvm/IR/InlineAsm.h"
90#include "llvm/IR/InstVisitor.h"
91#include "llvm/IR/InstrTypes.h"
92#include "llvm/IR/Instruction.h"
95#include "llvm/IR/Intrinsics.h"
96#include "llvm/IR/IntrinsicsAArch64.h"
97#include "llvm/IR/IntrinsicsAMDGPU.h"
98#include "llvm/IR/IntrinsicsARM.h"
99#include "llvm/IR/IntrinsicsNVPTX.h"
100#include "llvm/IR/IntrinsicsWebAssembly.h"
101#include "llvm/IR/LLVMContext.h"
103#include "llvm/IR/Metadata.h"
104#include "llvm/IR/Module.h"
106#include "llvm/IR/PassManager.h"
108#include "llvm/IR/Statepoint.h"
109#include "llvm/IR/Type.h"
110#include "llvm/IR/Use.h"
111#include "llvm/IR/User.h"
113#include "llvm/IR/Value.h"
115#include "llvm/Pass.h"
119#include "llvm/Support/Casting.h"
123#include "llvm/Support/ModRef.h"
126#include <algorithm>
127#include <cassert>
128#include <cstdint>
129#include <memory>
130#include <optional>
131#include <string>
132#include <utility>
133
134using namespace llvm;
135
137 "verify-noalias-scope-decl-dom", cl::Hidden, cl::init(false),
138 cl::desc("Ensure that llvm.experimental.noalias.scope.decl for identical "
139 "scopes are not dominating"));
140
143 const Module &M;
145 const Triple &TT;
148
149 /// Track the brokenness of the module while recursively visiting.
150 bool Broken = false;
151 /// Broken debug info can be "recovered" from by stripping the debug info.
152 bool BrokenDebugInfo = false;
153 /// Whether to treat broken debug info as an error.
155
157 : OS(OS), M(M), MST(&M), TT(M.getTargetTriple()), DL(M.getDataLayout()),
158 Context(M.getContext()) {}
159
160private:
161 void Write(const Module *M) {
162 *OS << "; ModuleID = '" << M->getModuleIdentifier() << "'\n";
163 }
164
165 void Write(const Value *V) {
166 if (V)
167 Write(*V);
168 }
169
170 void Write(const Value &V) {
171 if (isa<Instruction>(V)) {
172 V.print(*OS, MST);
173 *OS << '\n';
174 } else {
175 V.printAsOperand(*OS, true, MST);
176 *OS << '\n';
177 }
178 }
179
180 void Write(const DbgRecord *DR) {
181 if (DR) {
182 DR->print(*OS, MST, false);
183 *OS << '\n';
184 }
185 }
186
188 switch (Type) {
190 *OS << "value";
191 break;
193 *OS << "declare";
194 break;
196 *OS << "declare_value";
197 break;
199 *OS << "assign";
200 break;
202 *OS << "end";
203 break;
205 *OS << "any";
206 break;
207 };
208 }
209
210 void Write(const Metadata *MD) {
211 if (!MD)
212 return;
213 MD->print(*OS, MST, &M);
214 *OS << '\n';
215 }
216
217 template <class T> void Write(const MDTupleTypedArrayWrapper<T> &MD) {
218 Write(MD.get());
219 }
220
221 void Write(const NamedMDNode *NMD) {
222 if (!NMD)
223 return;
224 NMD->print(*OS, MST);
225 *OS << '\n';
226 }
227
228 void Write(Type *T) {
229 if (!T)
230 return;
231 *OS << ' ' << *T;
232 }
233
234 void Write(const Comdat *C) {
235 if (!C)
236 return;
237 *OS << *C;
238 }
239
240 void Write(const APInt *AI) {
241 if (!AI)
242 return;
243 *OS << *AI << '\n';
244 }
245
246 void Write(const unsigned i) { *OS << i << '\n'; }
247
248 // NOLINTNEXTLINE(readability-identifier-naming)
249 void Write(const Attribute *A) {
250 if (!A)
251 return;
252 *OS << A->getAsString() << '\n';
253 }
254
255 // NOLINTNEXTLINE(readability-identifier-naming)
256 void Write(const AttributeSet *AS) {
257 if (!AS)
258 return;
259 *OS << AS->getAsString() << '\n';
260 }
261
262 // NOLINTNEXTLINE(readability-identifier-naming)
263 void Write(const AttributeList *AL) {
264 if (!AL)
265 return;
266 AL->print(*OS);
267 }
268
269 void Write(Printable P) { *OS << P << '\n'; }
270
271 template <typename T> void Write(ArrayRef<T> Vs) {
272 for (const T &V : Vs)
273 Write(V);
274 }
275
276 template <typename T1, typename... Ts>
277 void WriteTs(const T1 &V1, const Ts &... Vs) {
278 Write(V1);
279 WriteTs(Vs...);
280 }
281
282 template <typename... Ts> void WriteTs() {}
283
284public:
285 /// A check failed, so printout out the condition and the message.
286 ///
287 /// This provides a nice place to put a breakpoint if you want to see why
288 /// something is not correct.
289 void CheckFailed(const Twine &Message) {
290 if (OS)
291 *OS << Message << '\n';
292 Broken = true;
293 }
294
295 /// A check failed (with values to print).
296 ///
297 /// This calls the Message-only version so that the above is easier to set a
298 /// breakpoint on.
299 template <typename T1, typename... Ts>
300 void CheckFailed(const Twine &Message, const T1 &V1, const Ts &... Vs) {
301 CheckFailed(Message);
302 if (OS)
303 WriteTs(V1, Vs...);
304 }
305
306 /// A debug info check failed.
307 void DebugInfoCheckFailed(const Twine &Message) {
308 if (OS)
309 *OS << Message << '\n';
311 BrokenDebugInfo = true;
312 }
313
314 /// A debug info check failed (with values to print).
315 template <typename T1, typename... Ts>
316 void DebugInfoCheckFailed(const Twine &Message, const T1 &V1,
317 const Ts &... Vs) {
318 DebugInfoCheckFailed(Message);
319 if (OS)
320 WriteTs(V1, Vs...);
321 }
322};
323
324namespace {
325
326class Verifier : public InstVisitor<Verifier>, VerifierSupport {
327 friend class InstVisitor<Verifier>;
328 DominatorTree DT;
329
330 /// When verifying a basic block, keep track of all of the
331 /// instructions we have seen so far.
332 ///
333 /// This allows us to do efficient dominance checks for the case when an
334 /// instruction has an operand that is an instruction in the same block.
335 SmallPtrSet<Instruction *, 16> InstsInThisBlock;
336
337 /// Keep track of the metadata nodes that have been checked already.
339
340 /// Keep track which DISubprogram is attached to which function.
342
343 /// Track all DICompileUnits visited.
345
346 /// The result type for a landingpad.
347 Type *LandingPadResultTy;
348
349 /// Whether we've seen a call to @llvm.localescape in this function
350 /// already.
351 bool SawFrameEscape;
352
353 /// Whether the current function has a DISubprogram attached to it.
354 bool HasDebugInfo = false;
355
356 /// Stores the count of how many objects were passed to llvm.localescape for a
357 /// given function and the largest index passed to llvm.localrecover.
359
360 // Maps catchswitches and cleanuppads that unwind to siblings to the
361 // terminators that indicate the unwind, used to detect cycles therein.
363
364 /// Cache which blocks are in which funclet, if an EH funclet personality is
365 /// in use. Otherwise empty.
366 DenseMap<BasicBlock *, ColorVector> BlockEHFuncletColors;
367
368 /// Cache of constants visited in search of ConstantExprs.
369 SmallPtrSet<const Constant *, 32> ConstantExprVisited;
370
371 /// Cache of declarations of the llvm.experimental.deoptimize.<ty> intrinsic.
372 SmallVector<const Function *, 4> DeoptimizeDeclarations;
373
374 /// Cache of attribute lists verified.
375 SmallPtrSet<const void *, 32> AttributeListsVisited;
376
377 // Verify that this GlobalValue is only used in this module.
378 // This map is used to avoid visiting uses twice. We can arrive at a user
379 // twice, if they have multiple operands. In particular for very large
380 // constant expressions, we can arrive at a particular user many times.
381 SmallPtrSet<const Value *, 32> GlobalValueVisited;
382
383 // Keeps track of duplicate function argument debug info.
385
386 TBAAVerifier TBAAVerifyHelper;
387 ConvergenceVerifier ConvergenceVerifyHelper;
388
389 SmallVector<IntrinsicInst *, 4> NoAliasScopeDecls;
390
391 void checkAtomicMemAccessSize(Type *Ty, const Instruction *I);
392
393public:
394 explicit Verifier(raw_ostream *OS, bool ShouldTreatBrokenDebugInfoAsError,
395 const Module &M)
396 : VerifierSupport(OS, M), LandingPadResultTy(nullptr),
397 SawFrameEscape(false), TBAAVerifyHelper(this) {
398 TreatBrokenDebugInfoAsError = ShouldTreatBrokenDebugInfoAsError;
399 }
400
401 bool hasBrokenDebugInfo() const { return BrokenDebugInfo; }
402
403 bool verify(const Function &F) {
404 llvm::TimeTraceScope timeScope("Verifier");
405 assert(F.getParent() == &M &&
406 "An instance of this class only works with a specific module!");
407
408 // First ensure the function is well-enough formed to compute dominance
409 // information, and directly compute a dominance tree. We don't rely on the
410 // pass manager to provide this as it isolates us from a potentially
411 // out-of-date dominator tree and makes it significantly more complex to run
412 // this code outside of a pass manager.
413 // FIXME: It's really gross that we have to cast away constness here.
414 if (!F.empty())
415 DT.recalculate(const_cast<Function &>(F));
416
417 for (const BasicBlock &BB : F) {
418 if (!BB.empty() && BB.back().isTerminator())
419 continue;
420
421 if (OS) {
422 *OS << "Basic Block in function '" << F.getName()
423 << "' does not have terminator!\n";
424 BB.printAsOperand(*OS, true, MST);
425 *OS << "\n";
426 }
427 return false;
428 }
429
430 auto FailureCB = [this](const Twine &Message) {
431 this->CheckFailed(Message);
432 };
433 ConvergenceVerifyHelper.initialize(OS, FailureCB, F);
434
435 Broken = false;
436 // FIXME: We strip const here because the inst visitor strips const.
437 visit(const_cast<Function &>(F));
438 verifySiblingFuncletUnwinds();
439
440 if (ConvergenceVerifyHelper.sawTokens())
441 ConvergenceVerifyHelper.verify(DT);
442
443 InstsInThisBlock.clear();
444 DebugFnArgs.clear();
445 LandingPadResultTy = nullptr;
446 SawFrameEscape = false;
447 SiblingFuncletInfo.clear();
448 verifyNoAliasScopeDecl();
449 NoAliasScopeDecls.clear();
450
451 return !Broken;
452 }
453
454 /// Verify the module that this instance of \c Verifier was initialized with.
455 bool verify() {
456 Broken = false;
457
458 // Collect all declarations of the llvm.experimental.deoptimize intrinsic.
459 for (const Function &F : M)
460 if (F.getIntrinsicID() == Intrinsic::experimental_deoptimize)
461 DeoptimizeDeclarations.push_back(&F);
462
463 // Now that we've visited every function, verify that we never asked to
464 // recover a frame index that wasn't escaped.
465 verifyFrameRecoverIndices();
466 for (const GlobalVariable &GV : M.globals())
467 visitGlobalVariable(GV);
468
469 for (const GlobalAlias &GA : M.aliases())
470 visitGlobalAlias(GA);
471
472 for (const GlobalIFunc &GI : M.ifuncs())
473 visitGlobalIFunc(GI);
474
475 for (const NamedMDNode &NMD : M.named_metadata())
476 visitNamedMDNode(NMD);
477
478 for (const StringMapEntry<Comdat> &SMEC : M.getComdatSymbolTable())
479 visitComdat(SMEC.getValue());
480
481 visitModuleFlags();
482 visitModuleIdents();
483 visitModuleCommandLines();
484 visitModuleErrnoTBAA();
485
486 verifyCompileUnits();
487
488 verifyDeoptimizeCallingConvs();
489 DISubprogramAttachments.clear();
490 return !Broken;
491 }
492
493private:
494 /// Whether a metadata node is allowed to be, or contain, a DILocation.
495 enum class AreDebugLocsAllowed { No, Yes };
496
497 /// Metadata that should be treated as a range, with slightly different
498 /// requirements.
499 enum class RangeLikeMetadataKind {
500 Range, // MD_range
501 AbsoluteSymbol, // MD_absolute_symbol
502 NoaliasAddrspace // MD_noalias_addrspace
503 };
504
505 // Verification methods...
506 void visitGlobalValue(const GlobalValue &GV);
507 void visitGlobalVariable(const GlobalVariable &GV);
508 void visitGlobalAlias(const GlobalAlias &GA);
509 void visitGlobalIFunc(const GlobalIFunc &GI);
510 void visitAliaseeSubExpr(const GlobalAlias &A, const Constant &C);
511 void visitAliaseeSubExpr(SmallPtrSetImpl<const GlobalAlias *> &Visited,
512 const GlobalAlias &A, const Constant &C);
513 void visitNamedMDNode(const NamedMDNode &NMD);
514 void visitMDNode(const MDNode &MD, AreDebugLocsAllowed AllowLocs);
515 void visitMetadataAsValue(const MetadataAsValue &MD, Function *F);
516 void visitValueAsMetadata(const ValueAsMetadata &MD, Function *F);
517 void visitDIArgList(const DIArgList &AL, Function *F);
518 void visitComdat(const Comdat &C);
519 void visitModuleIdents();
520 void visitModuleCommandLines();
521 void visitModuleErrnoTBAA();
522 void visitModuleFlags();
523 void visitModuleFlag(const MDNode *Op,
524 DenseMap<const MDString *, const MDNode *> &SeenIDs,
525 SmallVectorImpl<const MDNode *> &Requirements);
526 void visitModuleFlagCGProfileEntry(const MDOperand &MDO);
527 void visitFunction(const Function &F);
528 void visitBasicBlock(BasicBlock &BB);
529 void verifyRangeLikeMetadata(const Value &V, const MDNode *Range, Type *Ty,
530 RangeLikeMetadataKind Kind);
531 void visitRangeMetadata(Instruction &I, MDNode *Range, Type *Ty);
532 void visitNoaliasAddrspaceMetadata(Instruction &I, MDNode *Range, Type *Ty);
533 void visitDereferenceableMetadata(Instruction &I, MDNode *MD);
534 void visitNofreeMetadata(Instruction &I, MDNode *MD);
535 void visitProfMetadata(Instruction &I, MDNode *MD);
536 void visitCallStackMetadata(MDNode *MD);
537 void visitMemProfMetadata(Instruction &I, MDNode *MD);
538 void visitCallsiteMetadata(Instruction &I, MDNode *MD);
539 void visitCalleeTypeMetadata(Instruction &I, MDNode *MD);
540 void visitDIAssignIDMetadata(Instruction &I, MDNode *MD);
541 void visitMMRAMetadata(Instruction &I, MDNode *MD);
542 void visitAnnotationMetadata(MDNode *Annotation);
543 void visitAliasScopeMetadata(const MDNode *MD);
544 void visitAliasScopeListMetadata(const MDNode *MD);
545 void visitAccessGroupMetadata(const MDNode *MD);
546 void visitCapturesMetadata(Instruction &I, const MDNode *Captures);
547 void visitAllocTokenMetadata(Instruction &I, MDNode *MD);
548
549 template <class Ty> bool isValidMetadataArray(const MDTuple &N);
550#define HANDLE_SPECIALIZED_MDNODE_LEAF(CLASS) void visit##CLASS(const CLASS &N);
551#include "llvm/IR/Metadata.def"
552 void visitDIScope(const DIScope &N);
553 void visitDIVariable(const DIVariable &N);
554 void visitDILexicalBlockBase(const DILexicalBlockBase &N);
555 void visitDITemplateParameter(const DITemplateParameter &N);
556
557 void visitTemplateParams(const MDNode &N, const Metadata &RawParams);
558
559 void visit(DbgLabelRecord &DLR);
560 void visit(DbgVariableRecord &DVR);
561 // InstVisitor overrides...
562 using InstVisitor<Verifier>::visit;
563 void visitDbgRecords(Instruction &I);
564 void visit(Instruction &I);
565
566 void visitTruncInst(TruncInst &I);
567 void visitZExtInst(ZExtInst &I);
568 void visitSExtInst(SExtInst &I);
569 void visitFPTruncInst(FPTruncInst &I);
570 void visitFPExtInst(FPExtInst &I);
571 void visitFPToUIInst(FPToUIInst &I);
572 void visitFPToSIInst(FPToSIInst &I);
573 void visitUIToFPInst(UIToFPInst &I);
574 void visitSIToFPInst(SIToFPInst &I);
575 void visitIntToPtrInst(IntToPtrInst &I);
576 void checkPtrToAddr(Type *SrcTy, Type *DestTy, const Value &V);
577 void visitPtrToAddrInst(PtrToAddrInst &I);
578 void visitPtrToIntInst(PtrToIntInst &I);
579 void visitBitCastInst(BitCastInst &I);
580 void visitAddrSpaceCastInst(AddrSpaceCastInst &I);
581 void visitPHINode(PHINode &PN);
582 void visitCallBase(CallBase &Call);
583 void visitUnaryOperator(UnaryOperator &U);
584 void visitBinaryOperator(BinaryOperator &B);
585 void visitICmpInst(ICmpInst &IC);
586 void visitFCmpInst(FCmpInst &FC);
587 void visitExtractElementInst(ExtractElementInst &EI);
588 void visitInsertElementInst(InsertElementInst &EI);
589 void visitShuffleVectorInst(ShuffleVectorInst &EI);
590 void visitVAArgInst(VAArgInst &VAA) { visitInstruction(VAA); }
591 void visitCallInst(CallInst &CI);
592 void visitInvokeInst(InvokeInst &II);
593 void visitGetElementPtrInst(GetElementPtrInst &GEP);
594 void visitLoadInst(LoadInst &LI);
595 void visitStoreInst(StoreInst &SI);
596 void verifyDominatesUse(Instruction &I, unsigned i);
597 void visitInstruction(Instruction &I);
598 void visitTerminator(Instruction &I);
599 void visitBranchInst(BranchInst &BI);
600 void visitReturnInst(ReturnInst &RI);
601 void visitSwitchInst(SwitchInst &SI);
602 void visitIndirectBrInst(IndirectBrInst &BI);
603 void visitCallBrInst(CallBrInst &CBI);
604 void visitSelectInst(SelectInst &SI);
605 void visitUserOp1(Instruction &I);
606 void visitUserOp2(Instruction &I) { visitUserOp1(I); }
607 void visitIntrinsicCall(Intrinsic::ID ID, CallBase &Call);
608 void visitConstrainedFPIntrinsic(ConstrainedFPIntrinsic &FPI);
609 void visitVPIntrinsic(VPIntrinsic &VPI);
610 void visitDbgLabelIntrinsic(StringRef Kind, DbgLabelInst &DLI);
611 void visitAtomicCmpXchgInst(AtomicCmpXchgInst &CXI);
612 void visitAtomicRMWInst(AtomicRMWInst &RMWI);
613 void visitFenceInst(FenceInst &FI);
614 void visitAllocaInst(AllocaInst &AI);
615 void visitExtractValueInst(ExtractValueInst &EVI);
616 void visitInsertValueInst(InsertValueInst &IVI);
617 void visitEHPadPredecessors(Instruction &I);
618 void visitLandingPadInst(LandingPadInst &LPI);
619 void visitResumeInst(ResumeInst &RI);
620 void visitCatchPadInst(CatchPadInst &CPI);
621 void visitCatchReturnInst(CatchReturnInst &CatchReturn);
622 void visitCleanupPadInst(CleanupPadInst &CPI);
623 void visitFuncletPadInst(FuncletPadInst &FPI);
624 void visitCatchSwitchInst(CatchSwitchInst &CatchSwitch);
625 void visitCleanupReturnInst(CleanupReturnInst &CRI);
626
627 void verifySwiftErrorCall(CallBase &Call, const Value *SwiftErrorVal);
628 void verifySwiftErrorValue(const Value *SwiftErrorVal);
629 void verifyTailCCMustTailAttrs(const AttrBuilder &Attrs, StringRef Context);
630 void verifyMustTailCall(CallInst &CI);
631 bool verifyAttributeCount(AttributeList Attrs, unsigned Params);
632 void verifyAttributeTypes(AttributeSet Attrs, const Value *V);
633 void verifyParameterAttrs(AttributeSet Attrs, Type *Ty, const Value *V);
634 void checkUnsignedBaseTenFuncAttr(AttributeList Attrs, StringRef Attr,
635 const Value *V);
636 void verifyFunctionAttrs(FunctionType *FT, AttributeList Attrs,
637 const Value *V, bool IsIntrinsic, bool IsInlineAsm);
638 void verifyFunctionMetadata(ArrayRef<std::pair<unsigned, MDNode *>> MDs);
639 void verifyUnknownProfileMetadata(MDNode *MD);
640 void visitConstantExprsRecursively(const Constant *EntryC);
641 void visitConstantExpr(const ConstantExpr *CE);
642 void visitConstantPtrAuth(const ConstantPtrAuth *CPA);
643 void verifyInlineAsmCall(const CallBase &Call);
644 void verifyStatepoint(const CallBase &Call);
645 void verifyFrameRecoverIndices();
646 void verifySiblingFuncletUnwinds();
647
648 void verifyFragmentExpression(const DbgVariableRecord &I);
649 template <typename ValueOrMetadata>
650 void verifyFragmentExpression(const DIVariable &V,
652 ValueOrMetadata *Desc);
653 void verifyFnArgs(const DbgVariableRecord &DVR);
654 void verifyNotEntryValue(const DbgVariableRecord &I);
655
656 /// Module-level debug info verification...
657 void verifyCompileUnits();
658
659 /// Module-level verification that all @llvm.experimental.deoptimize
660 /// declarations share the same calling convention.
661 void verifyDeoptimizeCallingConvs();
662
663 void verifyAttachedCallBundle(const CallBase &Call,
664 const OperandBundleUse &BU);
665
666 /// Verify the llvm.experimental.noalias.scope.decl declarations
667 void verifyNoAliasScopeDecl();
668};
669
670} // end anonymous namespace
671
672/// We know that cond should be true, if not print an error message.
673#define Check(C, ...) \
674 do { \
675 if (!(C)) { \
676 CheckFailed(__VA_ARGS__); \
677 return; \
678 } \
679 } while (false)
680
681/// We know that a debug info condition should be true, if not print
682/// an error message.
683#define CheckDI(C, ...) \
684 do { \
685 if (!(C)) { \
686 DebugInfoCheckFailed(__VA_ARGS__); \
687 return; \
688 } \
689 } while (false)
690
691void Verifier::visitDbgRecords(Instruction &I) {
692 if (!I.DebugMarker)
693 return;
694 CheckDI(I.DebugMarker->MarkedInstr == &I,
695 "Instruction has invalid DebugMarker", &I);
696 CheckDI(!isa<PHINode>(&I) || !I.hasDbgRecords(),
697 "PHI Node must not have any attached DbgRecords", &I);
698 for (DbgRecord &DR : I.getDbgRecordRange()) {
699 CheckDI(DR.getMarker() == I.DebugMarker,
700 "DbgRecord had invalid DebugMarker", &I, &DR);
701 if (auto *Loc =
703 visitMDNode(*Loc, AreDebugLocsAllowed::Yes);
704 if (auto *DVR = dyn_cast<DbgVariableRecord>(&DR)) {
705 visit(*DVR);
706 // These have to appear after `visit` for consistency with existing
707 // intrinsic behaviour.
708 verifyFragmentExpression(*DVR);
709 verifyNotEntryValue(*DVR);
710 } else if (auto *DLR = dyn_cast<DbgLabelRecord>(&DR)) {
711 visit(*DLR);
712 }
713 }
714}
715
716void Verifier::visit(Instruction &I) {
717 visitDbgRecords(I);
718 for (unsigned i = 0, e = I.getNumOperands(); i != e; ++i)
719 Check(I.getOperand(i) != nullptr, "Operand is null", &I);
721}
722
723// Helper to iterate over indirect users. By returning false, the callback can ask to stop traversing further.
724static void forEachUser(const Value *User,
726 llvm::function_ref<bool(const Value *)> Callback) {
727 if (!Visited.insert(User).second)
728 return;
729
731 while (!WorkList.empty()) {
732 const Value *Cur = WorkList.pop_back_val();
733 if (!Visited.insert(Cur).second)
734 continue;
735 if (Callback(Cur))
736 append_range(WorkList, Cur->materialized_users());
737 }
738}
739
740void Verifier::visitGlobalValue(const GlobalValue &GV) {
742 "Global is external, but doesn't have external or weak linkage!", &GV);
743
744 if (const GlobalObject *GO = dyn_cast<GlobalObject>(&GV)) {
745 if (const MDNode *Associated =
746 GO->getMetadata(LLVMContext::MD_associated)) {
747 Check(Associated->getNumOperands() == 1,
748 "associated metadata must have one operand", &GV, Associated);
749 const Metadata *Op = Associated->getOperand(0).get();
750 Check(Op, "associated metadata must have a global value", GO, Associated);
751
752 const auto *VM = dyn_cast_or_null<ValueAsMetadata>(Op);
753 Check(VM, "associated metadata must be ValueAsMetadata", GO, Associated);
754 if (VM) {
755 Check(isa<PointerType>(VM->getValue()->getType()),
756 "associated value must be pointer typed", GV, Associated);
757
758 const Value *Stripped = VM->getValue()->stripPointerCastsAndAliases();
759 Check(isa<GlobalObject>(Stripped) || isa<Constant>(Stripped),
760 "associated metadata must point to a GlobalObject", GO, Stripped);
761 Check(Stripped != GO,
762 "global values should not associate to themselves", GO,
763 Associated);
764 }
765 }
766
767 // FIXME: Why is getMetadata on GlobalValue protected?
768 if (const MDNode *AbsoluteSymbol =
769 GO->getMetadata(LLVMContext::MD_absolute_symbol)) {
770 verifyRangeLikeMetadata(*GO, AbsoluteSymbol,
771 DL.getIntPtrType(GO->getType()),
772 RangeLikeMetadataKind::AbsoluteSymbol);
773 }
774
775 if (GO->hasMetadata(LLVMContext::MD_implicit_ref)) {
776 Check(!GO->isDeclaration(),
777 "ref metadata must not be placed on a declaration", GO);
778
780 GO->getMetadata(LLVMContext::MD_implicit_ref, MDs);
781 for (const MDNode *MD : MDs) {
782 Check(MD->getNumOperands() == 1, "ref metadata must have one operand",
783 &GV, MD);
784 const Metadata *Op = MD->getOperand(0).get();
785 const auto *VM = dyn_cast_or_null<ValueAsMetadata>(Op);
786 Check(VM, "ref metadata must be ValueAsMetadata", GO, MD);
787 if (VM) {
788 Check(isa<PointerType>(VM->getValue()->getType()),
789 "ref value must be pointer typed", GV, MD);
790
791 const Value *Stripped = VM->getValue()->stripPointerCastsAndAliases();
792 Check(isa<GlobalObject>(Stripped) || isa<Constant>(Stripped),
793 "ref metadata must point to a GlobalObject", GO, Stripped);
794 Check(Stripped != GO, "values should not reference themselves", GO,
795 MD);
796 }
797 }
798 }
799 }
800
802 "Only global variables can have appending linkage!", &GV);
803
804 if (GV.hasAppendingLinkage()) {
805 const GlobalVariable *GVar = dyn_cast<GlobalVariable>(&GV);
806 Check(GVar && GVar->getValueType()->isArrayTy(),
807 "Only global arrays can have appending linkage!", GVar);
808 }
809
810 if (GV.isDeclarationForLinker())
811 Check(!GV.hasComdat(), "Declaration may not be in a Comdat!", &GV);
812
813 if (GV.hasDLLExportStorageClass()) {
815 "dllexport GlobalValue must have default or protected visibility",
816 &GV);
817 }
818 if (GV.hasDLLImportStorageClass()) {
820 "dllimport GlobalValue must have default visibility", &GV);
821 Check(!GV.isDSOLocal(), "GlobalValue with DLLImport Storage is dso_local!",
822 &GV);
823
824 Check((GV.isDeclaration() &&
827 "Global is marked as dllimport, but not external", &GV);
828 }
829
830 if (GV.isImplicitDSOLocal())
831 Check(GV.isDSOLocal(),
832 "GlobalValue with local linkage or non-default "
833 "visibility must be dso_local!",
834 &GV);
835
836 forEachUser(&GV, GlobalValueVisited, [&](const Value *V) -> bool {
837 if (const Instruction *I = dyn_cast<Instruction>(V)) {
838 if (!I->getParent() || !I->getParent()->getParent())
839 CheckFailed("Global is referenced by parentless instruction!", &GV, &M,
840 I);
841 else if (I->getParent()->getParent()->getParent() != &M)
842 CheckFailed("Global is referenced in a different module!", &GV, &M, I,
843 I->getParent()->getParent(),
844 I->getParent()->getParent()->getParent());
845 return false;
846 } else if (const Function *F = dyn_cast<Function>(V)) {
847 if (F->getParent() != &M)
848 CheckFailed("Global is used by function in a different module", &GV, &M,
849 F, F->getParent());
850 return false;
851 }
852 return true;
853 });
854}
855
856void Verifier::visitGlobalVariable(const GlobalVariable &GV) {
857 Type *GVType = GV.getValueType();
858
859 if (MaybeAlign A = GV.getAlign()) {
860 Check(A->value() <= Value::MaximumAlignment,
861 "huge alignment values are unsupported", &GV);
862 }
863
864 if (GV.hasInitializer()) {
865 Check(GV.getInitializer()->getType() == GVType,
866 "Global variable initializer type does not match global "
867 "variable type!",
868 &GV);
870 "Global variable initializer must be sized", &GV);
871 visitConstantExprsRecursively(GV.getInitializer());
872 // If the global has common linkage, it must have a zero initializer and
873 // cannot be constant.
874 if (GV.hasCommonLinkage()) {
876 "'common' global must have a zero initializer!", &GV);
877 Check(!GV.isConstant(), "'common' global may not be marked constant!",
878 &GV);
879 Check(!GV.hasComdat(), "'common' global may not be in a Comdat!", &GV);
880 }
881 }
882
883 if (GV.hasName() && (GV.getName() == "llvm.global_ctors" ||
884 GV.getName() == "llvm.global_dtors")) {
886 "invalid linkage for intrinsic global variable", &GV);
888 "invalid uses of intrinsic global variable", &GV);
889
890 // Don't worry about emitting an error for it not being an array,
891 // visitGlobalValue will complain on appending non-array.
892 if (ArrayType *ATy = dyn_cast<ArrayType>(GVType)) {
893 StructType *STy = dyn_cast<StructType>(ATy->getElementType());
894 PointerType *FuncPtrTy =
895 PointerType::get(Context, DL.getProgramAddressSpace());
896 Check(STy && (STy->getNumElements() == 2 || STy->getNumElements() == 3) &&
897 STy->getTypeAtIndex(0u)->isIntegerTy(32) &&
898 STy->getTypeAtIndex(1) == FuncPtrTy,
899 "wrong type for intrinsic global variable", &GV);
900 Check(STy->getNumElements() == 3,
901 "the third field of the element type is mandatory, "
902 "specify ptr null to migrate from the obsoleted 2-field form");
903 Type *ETy = STy->getTypeAtIndex(2);
904 Check(ETy->isPointerTy(), "wrong type for intrinsic global variable",
905 &GV);
906 }
907 }
908
909 if (GV.hasName() && (GV.getName() == "llvm.used" ||
910 GV.getName() == "llvm.compiler.used")) {
912 "invalid linkage for intrinsic global variable", &GV);
914 "invalid uses of intrinsic global variable", &GV);
915
916 if (ArrayType *ATy = dyn_cast<ArrayType>(GVType)) {
917 PointerType *PTy = dyn_cast<PointerType>(ATy->getElementType());
918 Check(PTy, "wrong type for intrinsic global variable", &GV);
919 if (GV.hasInitializer()) {
920 const Constant *Init = GV.getInitializer();
921 const ConstantArray *InitArray = dyn_cast<ConstantArray>(Init);
922 Check(InitArray, "wrong initializer for intrinsic global variable",
923 Init);
924 for (Value *Op : InitArray->operands()) {
925 Value *V = Op->stripPointerCasts();
928 Twine("invalid ") + GV.getName() + " member", V);
929 Check(V->hasName(),
930 Twine("members of ") + GV.getName() + " must be named", V);
931 }
932 }
933 }
934 }
935
936 // Visit any debug info attachments.
938 GV.getMetadata(LLVMContext::MD_dbg, MDs);
939 for (auto *MD : MDs) {
940 if (auto *GVE = dyn_cast<DIGlobalVariableExpression>(MD))
941 visitDIGlobalVariableExpression(*GVE);
942 else
943 CheckDI(false, "!dbg attachment of global variable must be a "
944 "DIGlobalVariableExpression");
945 }
946
947 // Scalable vectors cannot be global variables, since we don't know
948 // the runtime size.
949 Check(!GVType->isScalableTy(), "Globals cannot contain scalable types", &GV);
950
951 // Check if it is or contains a target extension type that disallows being
952 // used as a global.
954 "Global @" + GV.getName() + " has illegal target extension type",
955 GVType);
956
957 if (!GV.hasInitializer()) {
958 visitGlobalValue(GV);
959 return;
960 }
961
962 // Walk any aggregate initializers looking for bitcasts between address spaces
963 visitConstantExprsRecursively(GV.getInitializer());
964
965 visitGlobalValue(GV);
966}
967
968void Verifier::visitAliaseeSubExpr(const GlobalAlias &GA, const Constant &C) {
969 SmallPtrSet<const GlobalAlias*, 4> Visited;
970 Visited.insert(&GA);
971 visitAliaseeSubExpr(Visited, GA, C);
972}
973
974void Verifier::visitAliaseeSubExpr(SmallPtrSetImpl<const GlobalAlias*> &Visited,
975 const GlobalAlias &GA, const Constant &C) {
978 cast<GlobalValue>(C).hasAvailableExternallyLinkage(),
979 "available_externally alias must point to available_externally "
980 "global value",
981 &GA);
982 }
983 if (const auto *GV = dyn_cast<GlobalValue>(&C)) {
985 Check(!GV->isDeclarationForLinker(), "Alias must point to a definition",
986 &GA);
987 }
988
989 if (const auto *GA2 = dyn_cast<GlobalAlias>(GV)) {
990 Check(Visited.insert(GA2).second, "Aliases cannot form a cycle", &GA);
991
992 Check(!GA2->isInterposable(),
993 "Alias cannot point to an interposable alias", &GA);
994 } else {
995 // Only continue verifying subexpressions of GlobalAliases.
996 // Do not recurse into global initializers.
997 return;
998 }
999 }
1000
1001 if (const auto *CE = dyn_cast<ConstantExpr>(&C))
1002 visitConstantExprsRecursively(CE);
1003
1004 for (const Use &U : C.operands()) {
1005 Value *V = &*U;
1006 if (const auto *GA2 = dyn_cast<GlobalAlias>(V))
1007 visitAliaseeSubExpr(Visited, GA, *GA2->getAliasee());
1008 else if (const auto *C2 = dyn_cast<Constant>(V))
1009 visitAliaseeSubExpr(Visited, GA, *C2);
1010 }
1011}
1012
1013void Verifier::visitGlobalAlias(const GlobalAlias &GA) {
1015 "Alias should have private, internal, linkonce, weak, linkonce_odr, "
1016 "weak_odr, external, or available_externally linkage!",
1017 &GA);
1018 const Constant *Aliasee = GA.getAliasee();
1019 Check(Aliasee, "Aliasee cannot be NULL!", &GA);
1020 Check(GA.getType() == Aliasee->getType(),
1021 "Alias and aliasee types should match!", &GA);
1022
1023 Check(isa<GlobalValue>(Aliasee) || isa<ConstantExpr>(Aliasee),
1024 "Aliasee should be either GlobalValue or ConstantExpr", &GA);
1025
1026 visitAliaseeSubExpr(GA, *Aliasee);
1027
1028 visitGlobalValue(GA);
1029}
1030
1031void Verifier::visitGlobalIFunc(const GlobalIFunc &GI) {
1032 visitGlobalValue(GI);
1033
1035 GI.getAllMetadata(MDs);
1036 for (const auto &I : MDs) {
1037 CheckDI(I.first != LLVMContext::MD_dbg,
1038 "an ifunc may not have a !dbg attachment", &GI);
1039 Check(I.first != LLVMContext::MD_prof,
1040 "an ifunc may not have a !prof attachment", &GI);
1041 visitMDNode(*I.second, AreDebugLocsAllowed::No);
1042 }
1043
1045 "IFunc should have private, internal, linkonce, weak, linkonce_odr, "
1046 "weak_odr, or external linkage!",
1047 &GI);
1048 // Pierce through ConstantExprs and GlobalAliases and check that the resolver
1049 // is a Function definition.
1050 const Function *Resolver = GI.getResolverFunction();
1051 Check(Resolver, "IFunc must have a Function resolver", &GI);
1052 Check(!Resolver->isDeclarationForLinker(),
1053 "IFunc resolver must be a definition", &GI);
1054
1055 // Check that the immediate resolver operand (prior to any bitcasts) has the
1056 // correct type.
1057 const Type *ResolverTy = GI.getResolver()->getType();
1058
1060 "IFunc resolver must return a pointer", &GI);
1061
1062 Check(ResolverTy == PointerType::get(Context, GI.getAddressSpace()),
1063 "IFunc resolver has incorrect type", &GI);
1064}
1065
1066void Verifier::visitNamedMDNode(const NamedMDNode &NMD) {
1067 // There used to be various other llvm.dbg.* nodes, but we don't support
1068 // upgrading them and we want to reserve the namespace for future uses.
1069 if (NMD.getName().starts_with("llvm.dbg."))
1070 CheckDI(NMD.getName() == "llvm.dbg.cu",
1071 "unrecognized named metadata node in the llvm.dbg namespace", &NMD);
1072 for (const MDNode *MD : NMD.operands()) {
1073 if (NMD.getName() == "llvm.dbg.cu")
1074 CheckDI(MD && isa<DICompileUnit>(MD), "invalid compile unit", &NMD, MD);
1075
1076 if (!MD)
1077 continue;
1078
1079 visitMDNode(*MD, AreDebugLocsAllowed::Yes);
1080 }
1081}
1082
1083void Verifier::visitMDNode(const MDNode &MD, AreDebugLocsAllowed AllowLocs) {
1084 // Only visit each node once. Metadata can be mutually recursive, so this
1085 // avoids infinite recursion here, as well as being an optimization.
1086 if (!MDNodes.insert(&MD).second)
1087 return;
1088
1089 Check(&MD.getContext() == &Context,
1090 "MDNode context does not match Module context!", &MD);
1091
1092 switch (MD.getMetadataID()) {
1093 default:
1094 llvm_unreachable("Invalid MDNode subclass");
1095 case Metadata::MDTupleKind:
1096 break;
1097#define HANDLE_SPECIALIZED_MDNODE_LEAF(CLASS) \
1098 case Metadata::CLASS##Kind: \
1099 visit##CLASS(cast<CLASS>(MD)); \
1100 break;
1101#include "llvm/IR/Metadata.def"
1102 }
1103
1104 for (const Metadata *Op : MD.operands()) {
1105 if (!Op)
1106 continue;
1107 Check(!isa<LocalAsMetadata>(Op), "Invalid operand for global metadata!",
1108 &MD, Op);
1109 CheckDI(!isa<DILocation>(Op) || AllowLocs == AreDebugLocsAllowed::Yes,
1110 "DILocation not allowed within this metadata node", &MD, Op);
1111 if (auto *N = dyn_cast<MDNode>(Op)) {
1112 visitMDNode(*N, AllowLocs);
1113 continue;
1114 }
1115 if (auto *V = dyn_cast<ValueAsMetadata>(Op)) {
1116 visitValueAsMetadata(*V, nullptr);
1117 continue;
1118 }
1119 }
1120
1121 // Check llvm.loop.estimated_trip_count.
1122 if (MD.getNumOperands() > 0 &&
1124 Check(MD.getNumOperands() == 2, "Expected two operands", &MD);
1126 Check(Count && Count->getType()->isIntegerTy() &&
1127 cast<IntegerType>(Count->getType())->getBitWidth() <= 32,
1128 "Expected second operand to be an integer constant of type i32 or "
1129 "smaller",
1130 &MD);
1131 }
1132
1133 // Check these last, so we diagnose problems in operands first.
1134 Check(!MD.isTemporary(), "Expected no forward declarations!", &MD);
1135 Check(MD.isResolved(), "All nodes should be resolved!", &MD);
1136}
1137
1138void Verifier::visitValueAsMetadata(const ValueAsMetadata &MD, Function *F) {
1139 Check(MD.getValue(), "Expected valid value", &MD);
1140 Check(!MD.getValue()->getType()->isMetadataTy(),
1141 "Unexpected metadata round-trip through values", &MD, MD.getValue());
1142
1143 auto *L = dyn_cast<LocalAsMetadata>(&MD);
1144 if (!L)
1145 return;
1146
1147 Check(F, "function-local metadata used outside a function", L);
1148
1149 // If this was an instruction, bb, or argument, verify that it is in the
1150 // function that we expect.
1151 Function *ActualF = nullptr;
1152 if (Instruction *I = dyn_cast<Instruction>(L->getValue())) {
1153 Check(I->getParent(), "function-local metadata not in basic block", L, I);
1154 ActualF = I->getParent()->getParent();
1155 } else if (BasicBlock *BB = dyn_cast<BasicBlock>(L->getValue()))
1156 ActualF = BB->getParent();
1157 else if (Argument *A = dyn_cast<Argument>(L->getValue()))
1158 ActualF = A->getParent();
1159 assert(ActualF && "Unimplemented function local metadata case!");
1160
1161 Check(ActualF == F, "function-local metadata used in wrong function", L);
1162}
1163
1164void Verifier::visitDIArgList(const DIArgList &AL, Function *F) {
1165 for (const ValueAsMetadata *VAM : AL.getArgs())
1166 visitValueAsMetadata(*VAM, F);
1167}
1168
1169void Verifier::visitMetadataAsValue(const MetadataAsValue &MDV, Function *F) {
1170 Metadata *MD = MDV.getMetadata();
1171 if (auto *N = dyn_cast<MDNode>(MD)) {
1172 visitMDNode(*N, AreDebugLocsAllowed::No);
1173 return;
1174 }
1175
1176 // Only visit each node once. Metadata can be mutually recursive, so this
1177 // avoids infinite recursion here, as well as being an optimization.
1178 if (!MDNodes.insert(MD).second)
1179 return;
1180
1181 if (auto *V = dyn_cast<ValueAsMetadata>(MD))
1182 visitValueAsMetadata(*V, F);
1183
1184 if (auto *AL = dyn_cast<DIArgList>(MD))
1185 visitDIArgList(*AL, F);
1186}
1187
1188static bool isType(const Metadata *MD) { return !MD || isa<DIType>(MD); }
1189static bool isScope(const Metadata *MD) { return !MD || isa<DIScope>(MD); }
1190static bool isDINode(const Metadata *MD) { return !MD || isa<DINode>(MD); }
1191static bool isMDTuple(const Metadata *MD) { return !MD || isa<MDTuple>(MD); }
1192
1193void Verifier::visitDILocation(const DILocation &N) {
1194 CheckDI(N.getRawScope() && isa<DILocalScope>(N.getRawScope()),
1195 "location requires a valid scope", &N, N.getRawScope());
1196 if (auto *IA = N.getRawInlinedAt())
1197 CheckDI(isa<DILocation>(IA), "inlined-at should be a location", &N, IA);
1198 if (auto *SP = dyn_cast<DISubprogram>(N.getRawScope()))
1199 CheckDI(SP->isDefinition(), "scope points into the type hierarchy", &N);
1200}
1201
1202void Verifier::visitGenericDINode(const GenericDINode &N) {
1203 CheckDI(N.getTag(), "invalid tag", &N);
1204}
1205
1206void Verifier::visitDIScope(const DIScope &N) {
1207 if (auto *F = N.getRawFile())
1208 CheckDI(isa<DIFile>(F), "invalid file", &N, F);
1209}
1210
1211void Verifier::visitDISubrangeType(const DISubrangeType &N) {
1212 CheckDI(N.getTag() == dwarf::DW_TAG_subrange_type, "invalid tag", &N);
1213 auto *BaseType = N.getRawBaseType();
1214 CheckDI(!BaseType || isType(BaseType), "BaseType must be a type");
1215 auto *LBound = N.getRawLowerBound();
1216 CheckDI(!LBound || isa<ConstantAsMetadata>(LBound) ||
1217 isa<DIVariable>(LBound) || isa<DIExpression>(LBound) ||
1218 isa<DIDerivedType>(LBound),
1219 "LowerBound must be signed constant or DIVariable or DIExpression or "
1220 "DIDerivedType",
1221 &N);
1222 auto *UBound = N.getRawUpperBound();
1223 CheckDI(!UBound || isa<ConstantAsMetadata>(UBound) ||
1224 isa<DIVariable>(UBound) || isa<DIExpression>(UBound) ||
1225 isa<DIDerivedType>(UBound),
1226 "UpperBound must be signed constant or DIVariable or DIExpression or "
1227 "DIDerivedType",
1228 &N);
1229 auto *Stride = N.getRawStride();
1230 CheckDI(!Stride || isa<ConstantAsMetadata>(Stride) ||
1231 isa<DIVariable>(Stride) || isa<DIExpression>(Stride),
1232 "Stride must be signed constant or DIVariable or DIExpression", &N);
1233 auto *Bias = N.getRawBias();
1234 CheckDI(!Bias || isa<ConstantAsMetadata>(Bias) || isa<DIVariable>(Bias) ||
1235 isa<DIExpression>(Bias),
1236 "Bias must be signed constant or DIVariable or DIExpression", &N);
1237 // Subrange types currently only support constant size.
1238 auto *Size = N.getRawSizeInBits();
1240 "SizeInBits must be a constant");
1241}
1242
1243void Verifier::visitDISubrange(const DISubrange &N) {
1244 CheckDI(N.getTag() == dwarf::DW_TAG_subrange_type, "invalid tag", &N);
1245 CheckDI(!N.getRawCountNode() || !N.getRawUpperBound(),
1246 "Subrange can have any one of count or upperBound", &N);
1247 auto *CBound = N.getRawCountNode();
1248 CheckDI(!CBound || isa<ConstantAsMetadata>(CBound) ||
1249 isa<DIVariable>(CBound) || isa<DIExpression>(CBound),
1250 "Count must be signed constant or DIVariable or DIExpression", &N);
1251 auto Count = N.getCount();
1253 cast<ConstantInt *>(Count)->getSExtValue() >= -1,
1254 "invalid subrange count", &N);
1255 auto *LBound = N.getRawLowerBound();
1256 CheckDI(!LBound || isa<ConstantAsMetadata>(LBound) ||
1257 isa<DIVariable>(LBound) || isa<DIExpression>(LBound),
1258 "LowerBound must be signed constant or DIVariable or DIExpression",
1259 &N);
1260 auto *UBound = N.getRawUpperBound();
1261 CheckDI(!UBound || isa<ConstantAsMetadata>(UBound) ||
1262 isa<DIVariable>(UBound) || isa<DIExpression>(UBound),
1263 "UpperBound must be signed constant or DIVariable or DIExpression",
1264 &N);
1265 auto *Stride = N.getRawStride();
1266 CheckDI(!Stride || isa<ConstantAsMetadata>(Stride) ||
1267 isa<DIVariable>(Stride) || isa<DIExpression>(Stride),
1268 "Stride must be signed constant or DIVariable or DIExpression", &N);
1269}
1270
1271void Verifier::visitDIGenericSubrange(const DIGenericSubrange &N) {
1272 CheckDI(N.getTag() == dwarf::DW_TAG_generic_subrange, "invalid tag", &N);
1273 CheckDI(!N.getRawCountNode() || !N.getRawUpperBound(),
1274 "GenericSubrange can have any one of count or upperBound", &N);
1275 auto *CBound = N.getRawCountNode();
1276 CheckDI(!CBound || isa<DIVariable>(CBound) || isa<DIExpression>(CBound),
1277 "Count must be signed constant or DIVariable or DIExpression", &N);
1278 auto *LBound = N.getRawLowerBound();
1279 CheckDI(LBound, "GenericSubrange must contain lowerBound", &N);
1280 CheckDI(isa<DIVariable>(LBound) || isa<DIExpression>(LBound),
1281 "LowerBound must be signed constant or DIVariable or DIExpression",
1282 &N);
1283 auto *UBound = N.getRawUpperBound();
1284 CheckDI(!UBound || isa<DIVariable>(UBound) || isa<DIExpression>(UBound),
1285 "UpperBound must be signed constant or DIVariable or DIExpression",
1286 &N);
1287 auto *Stride = N.getRawStride();
1288 CheckDI(Stride, "GenericSubrange must contain stride", &N);
1289 CheckDI(isa<DIVariable>(Stride) || isa<DIExpression>(Stride),
1290 "Stride must be signed constant or DIVariable or DIExpression", &N);
1291}
1292
1293void Verifier::visitDIEnumerator(const DIEnumerator &N) {
1294 CheckDI(N.getTag() == dwarf::DW_TAG_enumerator, "invalid tag", &N);
1295}
1296
1297void Verifier::visitDIBasicType(const DIBasicType &N) {
1298 CheckDI(N.getTag() == dwarf::DW_TAG_base_type ||
1299 N.getTag() == dwarf::DW_TAG_unspecified_type ||
1300 N.getTag() == dwarf::DW_TAG_string_type,
1301 "invalid tag", &N);
1302 // Basic types currently only support constant size.
1303 auto *Size = N.getRawSizeInBits();
1305 "SizeInBits must be a constant");
1306}
1307
1308void Verifier::visitDIFixedPointType(const DIFixedPointType &N) {
1309 visitDIBasicType(N);
1310
1311 CheckDI(N.getTag() == dwarf::DW_TAG_base_type, "invalid tag", &N);
1312 CheckDI(N.getEncoding() == dwarf::DW_ATE_signed_fixed ||
1313 N.getEncoding() == dwarf::DW_ATE_unsigned_fixed,
1314 "invalid encoding", &N);
1318 "invalid kind", &N);
1320 N.getFactorRaw() == 0,
1321 "factor should be 0 for rationals", &N);
1323 (N.getNumeratorRaw() == 0 && N.getDenominatorRaw() == 0),
1324 "numerator and denominator should be 0 for non-rationals", &N);
1325}
1326
1327void Verifier::visitDIStringType(const DIStringType &N) {
1328 CheckDI(N.getTag() == dwarf::DW_TAG_string_type, "invalid tag", &N);
1329 CheckDI(!(N.isBigEndian() && N.isLittleEndian()), "has conflicting flags",
1330 &N);
1331}
1332
1333void Verifier::visitDIDerivedType(const DIDerivedType &N) {
1334 // Common scope checks.
1335 visitDIScope(N);
1336
1337 CheckDI(N.getTag() == dwarf::DW_TAG_typedef ||
1338 N.getTag() == dwarf::DW_TAG_pointer_type ||
1339 N.getTag() == dwarf::DW_TAG_ptr_to_member_type ||
1340 N.getTag() == dwarf::DW_TAG_reference_type ||
1341 N.getTag() == dwarf::DW_TAG_rvalue_reference_type ||
1342 N.getTag() == dwarf::DW_TAG_const_type ||
1343 N.getTag() == dwarf::DW_TAG_immutable_type ||
1344 N.getTag() == dwarf::DW_TAG_volatile_type ||
1345 N.getTag() == dwarf::DW_TAG_restrict_type ||
1346 N.getTag() == dwarf::DW_TAG_atomic_type ||
1347 N.getTag() == dwarf::DW_TAG_LLVM_ptrauth_type ||
1348 N.getTag() == dwarf::DW_TAG_member ||
1349 (N.getTag() == dwarf::DW_TAG_variable && N.isStaticMember()) ||
1350 N.getTag() == dwarf::DW_TAG_inheritance ||
1351 N.getTag() == dwarf::DW_TAG_friend ||
1352 N.getTag() == dwarf::DW_TAG_set_type ||
1353 N.getTag() == dwarf::DW_TAG_template_alias,
1354 "invalid tag", &N);
1355 if (N.getTag() == dwarf::DW_TAG_ptr_to_member_type) {
1356 CheckDI(isType(N.getRawExtraData()), "invalid pointer to member type", &N,
1357 N.getRawExtraData());
1358 } else if (N.getTag() == dwarf::DW_TAG_template_alias) {
1359 CheckDI(isMDTuple(N.getRawExtraData()), "invalid template parameters", &N,
1360 N.getRawExtraData());
1361 } else if (N.getTag() == dwarf::DW_TAG_inheritance ||
1362 N.getTag() == dwarf::DW_TAG_member ||
1363 N.getTag() == dwarf::DW_TAG_variable) {
1364 auto *ExtraData = N.getRawExtraData();
1365 auto IsValidExtraData = [&]() {
1366 if (ExtraData == nullptr)
1367 return true;
1368 if (isa<ConstantAsMetadata>(ExtraData) || isa<MDString>(ExtraData) ||
1369 isa<DIObjCProperty>(ExtraData))
1370 return true;
1371 if (auto *Tuple = dyn_cast<MDTuple>(ExtraData)) {
1372 if (Tuple->getNumOperands() != 1)
1373 return false;
1374 return isa_and_nonnull<ConstantAsMetadata>(Tuple->getOperand(0).get());
1375 }
1376 return false;
1377 };
1378 CheckDI(IsValidExtraData(),
1379 "extraData must be ConstantAsMetadata, MDString, DIObjCProperty, "
1380 "or MDTuple with single ConstantAsMetadata operand",
1381 &N, ExtraData);
1382 }
1383
1384 if (N.getTag() == dwarf::DW_TAG_set_type) {
1385 if (auto *T = N.getRawBaseType()) {
1389 CheckDI(
1390 (Enum && Enum->getTag() == dwarf::DW_TAG_enumeration_type) ||
1391 (Subrange && Subrange->getTag() == dwarf::DW_TAG_subrange_type) ||
1392 (Basic && (Basic->getEncoding() == dwarf::DW_ATE_unsigned ||
1393 Basic->getEncoding() == dwarf::DW_ATE_signed ||
1394 Basic->getEncoding() == dwarf::DW_ATE_unsigned_char ||
1395 Basic->getEncoding() == dwarf::DW_ATE_signed_char ||
1396 Basic->getEncoding() == dwarf::DW_ATE_boolean)),
1397 "invalid set base type", &N, T);
1398 }
1399 }
1400
1401 CheckDI(isScope(N.getRawScope()), "invalid scope", &N, N.getRawScope());
1402 CheckDI(isType(N.getRawBaseType()), "invalid base type", &N,
1403 N.getRawBaseType());
1404
1405 if (N.getDWARFAddressSpace()) {
1406 CheckDI(N.getTag() == dwarf::DW_TAG_pointer_type ||
1407 N.getTag() == dwarf::DW_TAG_reference_type ||
1408 N.getTag() == dwarf::DW_TAG_rvalue_reference_type,
1409 "DWARF address space only applies to pointer or reference types",
1410 &N);
1411 }
1412
1413 auto *Size = N.getRawSizeInBits();
1416 "SizeInBits must be a constant or DIVariable or DIExpression");
1417}
1418
1419/// Detect mutually exclusive flags.
1420static bool hasConflictingReferenceFlags(unsigned Flags) {
1421 return ((Flags & DINode::FlagLValueReference) &&
1422 (Flags & DINode::FlagRValueReference)) ||
1423 ((Flags & DINode::FlagTypePassByValue) &&
1424 (Flags & DINode::FlagTypePassByReference));
1425}
1426
1427void Verifier::visitTemplateParams(const MDNode &N, const Metadata &RawParams) {
1428 auto *Params = dyn_cast<MDTuple>(&RawParams);
1429 CheckDI(Params, "invalid template params", &N, &RawParams);
1430 for (Metadata *Op : Params->operands()) {
1431 CheckDI(Op && isa<DITemplateParameter>(Op), "invalid template parameter",
1432 &N, Params, Op);
1433 }
1434}
1435
1436void Verifier::visitDICompositeType(const DICompositeType &N) {
1437 // Common scope checks.
1438 visitDIScope(N);
1439
1440 CheckDI(N.getTag() == dwarf::DW_TAG_array_type ||
1441 N.getTag() == dwarf::DW_TAG_structure_type ||
1442 N.getTag() == dwarf::DW_TAG_union_type ||
1443 N.getTag() == dwarf::DW_TAG_enumeration_type ||
1444 N.getTag() == dwarf::DW_TAG_class_type ||
1445 N.getTag() == dwarf::DW_TAG_variant_part ||
1446 N.getTag() == dwarf::DW_TAG_variant ||
1447 N.getTag() == dwarf::DW_TAG_namelist,
1448 "invalid tag", &N);
1449
1450 CheckDI(isScope(N.getRawScope()), "invalid scope", &N, N.getRawScope());
1451 CheckDI(isType(N.getRawBaseType()), "invalid base type", &N,
1452 N.getRawBaseType());
1453
1454 CheckDI(!N.getRawElements() || isa<MDTuple>(N.getRawElements()),
1455 "invalid composite elements", &N, N.getRawElements());
1456 CheckDI(isType(N.getRawVTableHolder()), "invalid vtable holder", &N,
1457 N.getRawVTableHolder());
1459 "invalid reference flags", &N);
1460 unsigned DIBlockByRefStruct = 1 << 4;
1461 CheckDI((N.getFlags() & DIBlockByRefStruct) == 0,
1462 "DIBlockByRefStruct on DICompositeType is no longer supported", &N);
1463 CheckDI(llvm::all_of(N.getElements(), [](const DINode *N) { return N; }),
1464 "DISubprogram contains null entry in `elements` field", &N);
1465
1466 if (N.isVector()) {
1467 const DINodeArray Elements = N.getElements();
1468 CheckDI(Elements.size() == 1 &&
1469 Elements[0]->getTag() == dwarf::DW_TAG_subrange_type,
1470 "invalid vector, expected one element of type subrange", &N);
1471 }
1472
1473 if (auto *Params = N.getRawTemplateParams())
1474 visitTemplateParams(N, *Params);
1475
1476 if (auto *D = N.getRawDiscriminator()) {
1477 CheckDI(isa<DIDerivedType>(D) && N.getTag() == dwarf::DW_TAG_variant_part,
1478 "discriminator can only appear on variant part");
1479 }
1480
1481 if (N.getRawDataLocation()) {
1482 CheckDI(N.getTag() == dwarf::DW_TAG_array_type,
1483 "dataLocation can only appear in array type");
1484 }
1485
1486 if (N.getRawAssociated()) {
1487 CheckDI(N.getTag() == dwarf::DW_TAG_array_type,
1488 "associated can only appear in array type");
1489 }
1490
1491 if (N.getRawAllocated()) {
1492 CheckDI(N.getTag() == dwarf::DW_TAG_array_type,
1493 "allocated can only appear in array type");
1494 }
1495
1496 if (N.getRawRank()) {
1497 CheckDI(N.getTag() == dwarf::DW_TAG_array_type,
1498 "rank can only appear in array type");
1499 }
1500
1501 if (N.getTag() == dwarf::DW_TAG_array_type) {
1502 CheckDI(N.getRawBaseType(), "array types must have a base type", &N);
1503 }
1504
1505 auto *Size = N.getRawSizeInBits();
1508 "SizeInBits must be a constant or DIVariable or DIExpression");
1509}
1510
1511void Verifier::visitDISubroutineType(const DISubroutineType &N) {
1512 CheckDI(N.getTag() == dwarf::DW_TAG_subroutine_type, "invalid tag", &N);
1513 if (auto *Types = N.getRawTypeArray()) {
1514 CheckDI(isa<MDTuple>(Types), "invalid composite elements", &N, Types);
1515 for (Metadata *Ty : N.getTypeArray()->operands()) {
1516 CheckDI(isType(Ty), "invalid subroutine type ref", &N, Types, Ty);
1517 }
1518 }
1520 "invalid reference flags", &N);
1521}
1522
1523void Verifier::visitDIFile(const DIFile &N) {
1524 CheckDI(N.getTag() == dwarf::DW_TAG_file_type, "invalid tag", &N);
1525 std::optional<DIFile::ChecksumInfo<StringRef>> Checksum = N.getChecksum();
1526 if (Checksum) {
1527 CheckDI(Checksum->Kind <= DIFile::ChecksumKind::CSK_Last,
1528 "invalid checksum kind", &N);
1529 size_t Size;
1530 switch (Checksum->Kind) {
1531 case DIFile::CSK_MD5:
1532 Size = 32;
1533 break;
1534 case DIFile::CSK_SHA1:
1535 Size = 40;
1536 break;
1537 case DIFile::CSK_SHA256:
1538 Size = 64;
1539 break;
1540 }
1541 CheckDI(Checksum->Value.size() == Size, "invalid checksum length", &N);
1542 CheckDI(Checksum->Value.find_if_not(llvm::isHexDigit) == StringRef::npos,
1543 "invalid checksum", &N);
1544 }
1545}
1546
1547void Verifier::visitDICompileUnit(const DICompileUnit &N) {
1548 CheckDI(N.isDistinct(), "compile units must be distinct", &N);
1549 CheckDI(N.getTag() == dwarf::DW_TAG_compile_unit, "invalid tag", &N);
1550
1551 // Don't bother verifying the compilation directory or producer string
1552 // as those could be empty.
1553 CheckDI(N.getRawFile() && isa<DIFile>(N.getRawFile()), "invalid file", &N,
1554 N.getRawFile());
1555 CheckDI(!N.getFile()->getFilename().empty(), "invalid filename", &N,
1556 N.getFile());
1557
1558 CheckDI((N.getEmissionKind() <= DICompileUnit::LastEmissionKind),
1559 "invalid emission kind", &N);
1560
1561 if (auto *Array = N.getRawEnumTypes()) {
1562 CheckDI(isa<MDTuple>(Array), "invalid enum list", &N, Array);
1563 for (Metadata *Op : N.getEnumTypes()->operands()) {
1565 CheckDI(Enum && Enum->getTag() == dwarf::DW_TAG_enumeration_type,
1566 "invalid enum type", &N, N.getEnumTypes(), Op);
1567 }
1568 }
1569 if (auto *Array = N.getRawRetainedTypes()) {
1570 CheckDI(isa<MDTuple>(Array), "invalid retained type list", &N, Array);
1571 for (Metadata *Op : N.getRetainedTypes()->operands()) {
1572 CheckDI(
1573 Op && (isa<DIType>(Op) || (isa<DISubprogram>(Op) &&
1574 !cast<DISubprogram>(Op)->isDefinition())),
1575 "invalid retained type", &N, Op);
1576 }
1577 }
1578 if (auto *Array = N.getRawGlobalVariables()) {
1579 CheckDI(isa<MDTuple>(Array), "invalid global variable list", &N, Array);
1580 for (Metadata *Op : N.getGlobalVariables()->operands()) {
1582 "invalid global variable ref", &N, Op);
1583 }
1584 }
1585 if (auto *Array = N.getRawImportedEntities()) {
1586 CheckDI(isa<MDTuple>(Array), "invalid imported entity list", &N, Array);
1587 for (Metadata *Op : N.getImportedEntities()->operands()) {
1588 CheckDI(Op && isa<DIImportedEntity>(Op), "invalid imported entity ref",
1589 &N, Op);
1590 }
1591 }
1592 if (auto *Array = N.getRawMacros()) {
1593 CheckDI(isa<MDTuple>(Array), "invalid macro list", &N, Array);
1594 for (Metadata *Op : N.getMacros()->operands()) {
1595 CheckDI(Op && isa<DIMacroNode>(Op), "invalid macro ref", &N, Op);
1596 }
1597 }
1598 CUVisited.insert(&N);
1599}
1600
1601void Verifier::visitDISubprogram(const DISubprogram &N) {
1602 CheckDI(N.getTag() == dwarf::DW_TAG_subprogram, "invalid tag", &N);
1603 CheckDI(isScope(N.getRawScope()), "invalid scope", &N, N.getRawScope());
1604 if (auto *F = N.getRawFile())
1605 CheckDI(isa<DIFile>(F), "invalid file", &N, F);
1606 else
1607 CheckDI(N.getLine() == 0, "line specified with no file", &N, N.getLine());
1608 if (auto *T = N.getRawType())
1609 CheckDI(isa<DISubroutineType>(T), "invalid subroutine type", &N, T);
1610 CheckDI(isType(N.getRawContainingType()), "invalid containing type", &N,
1611 N.getRawContainingType());
1612 if (auto *Params = N.getRawTemplateParams())
1613 visitTemplateParams(N, *Params);
1614 if (auto *S = N.getRawDeclaration())
1615 CheckDI(isa<DISubprogram>(S) && !cast<DISubprogram>(S)->isDefinition(),
1616 "invalid subprogram declaration", &N, S);
1617 if (auto *RawNode = N.getRawRetainedNodes()) {
1618 auto *Node = dyn_cast<MDTuple>(RawNode);
1619 CheckDI(Node, "invalid retained nodes list", &N, RawNode);
1620 for (Metadata *Op : Node->operands()) {
1621 CheckDI(Op, "nullptr in retained nodes", &N, Node);
1622
1623 auto True = [](const Metadata *) { return true; };
1624 auto False = [](const Metadata *) { return false; };
1625 bool IsTypeCorrect =
1626 DISubprogram::visitRetainedNode<bool>(Op, True, True, True, False);
1627 CheckDI(IsTypeCorrect,
1628 "invalid retained nodes, expected DILocalVariable, DILabel or "
1629 "DIImportedEntity",
1630 &N, Node, Op);
1631
1632 auto *RetainedNode = cast<DINode>(Op);
1633 auto *RetainedNodeScope = dyn_cast_or_null<DILocalScope>(
1635 CheckDI(RetainedNodeScope,
1636 "invalid retained nodes, retained node is not local", &N, Node,
1637 RetainedNode);
1638 CheckDI(
1639 RetainedNodeScope->getSubprogram() == &N,
1640 "invalid retained nodes, retained node does not belong to subprogram",
1641 &N, Node, RetainedNode, RetainedNodeScope);
1642 }
1643 }
1645 "invalid reference flags", &N);
1646
1647 auto *Unit = N.getRawUnit();
1648 if (N.isDefinition()) {
1649 // Subprogram definitions (not part of the type hierarchy).
1650 CheckDI(N.isDistinct(), "subprogram definitions must be distinct", &N);
1651 CheckDI(Unit, "subprogram definitions must have a compile unit", &N);
1652 CheckDI(isa<DICompileUnit>(Unit), "invalid unit type", &N, Unit);
1653 // There's no good way to cross the CU boundary to insert a nested
1654 // DISubprogram definition in one CU into a type defined in another CU.
1655 auto *CT = dyn_cast_or_null<DICompositeType>(N.getRawScope());
1656 if (CT && CT->getRawIdentifier() &&
1657 M.getContext().isODRUniquingDebugTypes())
1658 CheckDI(N.getDeclaration(),
1659 "definition subprograms cannot be nested within DICompositeType "
1660 "when enabling ODR",
1661 &N);
1662 } else {
1663 // Subprogram declarations (part of the type hierarchy).
1664 CheckDI(!Unit, "subprogram declarations must not have a compile unit", &N);
1665 CheckDI(!N.getRawDeclaration(),
1666 "subprogram declaration must not have a declaration field");
1667 }
1668
1669 if (auto *RawThrownTypes = N.getRawThrownTypes()) {
1670 auto *ThrownTypes = dyn_cast<MDTuple>(RawThrownTypes);
1671 CheckDI(ThrownTypes, "invalid thrown types list", &N, RawThrownTypes);
1672 for (Metadata *Op : ThrownTypes->operands())
1673 CheckDI(Op && isa<DIType>(Op), "invalid thrown type", &N, ThrownTypes,
1674 Op);
1675 }
1676
1677 if (N.areAllCallsDescribed())
1678 CheckDI(N.isDefinition(),
1679 "DIFlagAllCallsDescribed must be attached to a definition");
1680}
1681
1682void Verifier::visitDILexicalBlockBase(const DILexicalBlockBase &N) {
1683 CheckDI(N.getTag() == dwarf::DW_TAG_lexical_block, "invalid tag", &N);
1684 CheckDI(N.getRawScope() && isa<DILocalScope>(N.getRawScope()),
1685 "invalid local scope", &N, N.getRawScope());
1686 if (auto *SP = dyn_cast<DISubprogram>(N.getRawScope()))
1687 CheckDI(SP->isDefinition(), "scope points into the type hierarchy", &N);
1688}
1689
1690void Verifier::visitDILexicalBlock(const DILexicalBlock &N) {
1691 visitDILexicalBlockBase(N);
1692
1693 CheckDI(N.getLine() || !N.getColumn(),
1694 "cannot have column info without line info", &N);
1695}
1696
1697void Verifier::visitDILexicalBlockFile(const DILexicalBlockFile &N) {
1698 visitDILexicalBlockBase(N);
1699}
1700
1701void Verifier::visitDICommonBlock(const DICommonBlock &N) {
1702 CheckDI(N.getTag() == dwarf::DW_TAG_common_block, "invalid tag", &N);
1703 if (auto *S = N.getRawScope())
1704 CheckDI(isa<DIScope>(S), "invalid scope ref", &N, S);
1705 if (auto *S = N.getRawDecl())
1706 CheckDI(isa<DIGlobalVariable>(S), "invalid declaration", &N, S);
1707}
1708
1709void Verifier::visitDINamespace(const DINamespace &N) {
1710 CheckDI(N.getTag() == dwarf::DW_TAG_namespace, "invalid tag", &N);
1711 if (auto *S = N.getRawScope())
1712 CheckDI(isa<DIScope>(S), "invalid scope ref", &N, S);
1713}
1714
1715void Verifier::visitDIMacro(const DIMacro &N) {
1716 CheckDI(N.getMacinfoType() == dwarf::DW_MACINFO_define ||
1717 N.getMacinfoType() == dwarf::DW_MACINFO_undef,
1718 "invalid macinfo type", &N);
1719 CheckDI(!N.getName().empty(), "anonymous macro", &N);
1720 if (!N.getValue().empty()) {
1721 assert(N.getValue().data()[0] != ' ' && "Macro value has a space prefix");
1722 }
1723}
1724
1725void Verifier::visitDIMacroFile(const DIMacroFile &N) {
1726 CheckDI(N.getMacinfoType() == dwarf::DW_MACINFO_start_file,
1727 "invalid macinfo type", &N);
1728 if (auto *F = N.getRawFile())
1729 CheckDI(isa<DIFile>(F), "invalid file", &N, F);
1730
1731 if (auto *Array = N.getRawElements()) {
1732 CheckDI(isa<MDTuple>(Array), "invalid macro list", &N, Array);
1733 for (Metadata *Op : N.getElements()->operands()) {
1734 CheckDI(Op && isa<DIMacroNode>(Op), "invalid macro ref", &N, Op);
1735 }
1736 }
1737}
1738
1739void Verifier::visitDIModule(const DIModule &N) {
1740 CheckDI(N.getTag() == dwarf::DW_TAG_module, "invalid tag", &N);
1741 CheckDI(!N.getName().empty(), "anonymous module", &N);
1742}
1743
1744void Verifier::visitDITemplateParameter(const DITemplateParameter &N) {
1745 CheckDI(isType(N.getRawType()), "invalid type ref", &N, N.getRawType());
1746}
1747
1748void Verifier::visitDITemplateTypeParameter(const DITemplateTypeParameter &N) {
1749 visitDITemplateParameter(N);
1750
1751 CheckDI(N.getTag() == dwarf::DW_TAG_template_type_parameter, "invalid tag",
1752 &N);
1753}
1754
1755void Verifier::visitDITemplateValueParameter(
1756 const DITemplateValueParameter &N) {
1757 visitDITemplateParameter(N);
1758
1759 CheckDI(N.getTag() == dwarf::DW_TAG_template_value_parameter ||
1760 N.getTag() == dwarf::DW_TAG_GNU_template_template_param ||
1761 N.getTag() == dwarf::DW_TAG_GNU_template_parameter_pack,
1762 "invalid tag", &N);
1763}
1764
1765void Verifier::visitDIVariable(const DIVariable &N) {
1766 if (auto *S = N.getRawScope())
1767 CheckDI(isa<DIScope>(S), "invalid scope", &N, S);
1768 if (auto *F = N.getRawFile())
1769 CheckDI(isa<DIFile>(F), "invalid file", &N, F);
1770}
1771
1772void Verifier::visitDIGlobalVariable(const DIGlobalVariable &N) {
1773 // Checks common to all variables.
1774 visitDIVariable(N);
1775
1776 CheckDI(N.getTag() == dwarf::DW_TAG_variable, "invalid tag", &N);
1777 CheckDI(isType(N.getRawType()), "invalid type ref", &N, N.getRawType());
1778 // Check only if the global variable is not an extern
1779 if (N.isDefinition())
1780 CheckDI(N.getType(), "missing global variable type", &N);
1781 if (auto *Member = N.getRawStaticDataMemberDeclaration()) {
1783 "invalid static data member declaration", &N, Member);
1784 }
1785}
1786
1787void Verifier::visitDILocalVariable(const DILocalVariable &N) {
1788 // Checks common to all variables.
1789 visitDIVariable(N);
1790
1791 CheckDI(isType(N.getRawType()), "invalid type ref", &N, N.getRawType());
1792 CheckDI(N.getTag() == dwarf::DW_TAG_variable, "invalid tag", &N);
1793 CheckDI(N.getRawScope() && isa<DILocalScope>(N.getRawScope()),
1794 "local variable requires a valid scope", &N, N.getRawScope());
1795 if (auto Ty = N.getType())
1796 CheckDI(!isa<DISubroutineType>(Ty), "invalid type", &N, N.getType());
1797}
1798
1799void Verifier::visitDIAssignID(const DIAssignID &N) {
1800 CheckDI(!N.getNumOperands(), "DIAssignID has no arguments", &N);
1801 CheckDI(N.isDistinct(), "DIAssignID must be distinct", &N);
1802}
1803
1804void Verifier::visitDILabel(const DILabel &N) {
1805 if (auto *S = N.getRawScope())
1806 CheckDI(isa<DIScope>(S), "invalid scope", &N, S);
1807 if (auto *F = N.getRawFile())
1808 CheckDI(isa<DIFile>(F), "invalid file", &N, F);
1809
1810 CheckDI(N.getTag() == dwarf::DW_TAG_label, "invalid tag", &N);
1811 CheckDI(N.getRawScope() && isa<DILocalScope>(N.getRawScope()),
1812 "label requires a valid scope", &N, N.getRawScope());
1813}
1814
1815void Verifier::visitDIExpression(const DIExpression &N) {
1816 CheckDI(N.isValid(), "invalid expression", &N);
1817}
1818
1819void Verifier::visitDIGlobalVariableExpression(
1820 const DIGlobalVariableExpression &GVE) {
1821 CheckDI(GVE.getVariable(), "missing variable");
1822 if (auto *Var = GVE.getVariable())
1823 visitDIGlobalVariable(*Var);
1824 if (auto *Expr = GVE.getExpression()) {
1825 visitDIExpression(*Expr);
1826 if (auto Fragment = Expr->getFragmentInfo())
1827 verifyFragmentExpression(*GVE.getVariable(), *Fragment, &GVE);
1828 }
1829}
1830
1831void Verifier::visitDIObjCProperty(const DIObjCProperty &N) {
1832 CheckDI(N.getTag() == dwarf::DW_TAG_APPLE_property, "invalid tag", &N);
1833 if (auto *T = N.getRawType())
1834 CheckDI(isType(T), "invalid type ref", &N, T);
1835 if (auto *F = N.getRawFile())
1836 CheckDI(isa<DIFile>(F), "invalid file", &N, F);
1837}
1838
1839void Verifier::visitDIImportedEntity(const DIImportedEntity &N) {
1840 CheckDI(N.getTag() == dwarf::DW_TAG_imported_module ||
1841 N.getTag() == dwarf::DW_TAG_imported_declaration,
1842 "invalid tag", &N);
1843 if (auto *S = N.getRawScope())
1844 CheckDI(isa<DIScope>(S), "invalid scope for imported entity", &N, S);
1845 CheckDI(isDINode(N.getRawEntity()), "invalid imported entity", &N,
1846 N.getRawEntity());
1847}
1848
1849void Verifier::visitComdat(const Comdat &C) {
1850 // In COFF the Module is invalid if the GlobalValue has private linkage.
1851 // Entities with private linkage don't have entries in the symbol table.
1852 if (TT.isOSBinFormatCOFF())
1853 if (const GlobalValue *GV = M.getNamedValue(C.getName()))
1854 Check(!GV->hasPrivateLinkage(), "comdat global value has private linkage",
1855 GV);
1856}
1857
1858void Verifier::visitModuleIdents() {
1859 const NamedMDNode *Idents = M.getNamedMetadata("llvm.ident");
1860 if (!Idents)
1861 return;
1862
1863 // llvm.ident takes a list of metadata entry. Each entry has only one string.
1864 // Scan each llvm.ident entry and make sure that this requirement is met.
1865 for (const MDNode *N : Idents->operands()) {
1866 Check(N->getNumOperands() == 1,
1867 "incorrect number of operands in llvm.ident metadata", N);
1868 Check(dyn_cast_or_null<MDString>(N->getOperand(0)),
1869 ("invalid value for llvm.ident metadata entry operand"
1870 "(the operand should be a string)"),
1871 N->getOperand(0));
1872 }
1873}
1874
1875void Verifier::visitModuleCommandLines() {
1876 const NamedMDNode *CommandLines = M.getNamedMetadata("llvm.commandline");
1877 if (!CommandLines)
1878 return;
1879
1880 // llvm.commandline takes a list of metadata entry. Each entry has only one
1881 // string. Scan each llvm.commandline entry and make sure that this
1882 // requirement is met.
1883 for (const MDNode *N : CommandLines->operands()) {
1884 Check(N->getNumOperands() == 1,
1885 "incorrect number of operands in llvm.commandline metadata", N);
1886 Check(dyn_cast_or_null<MDString>(N->getOperand(0)),
1887 ("invalid value for llvm.commandline metadata entry operand"
1888 "(the operand should be a string)"),
1889 N->getOperand(0));
1890 }
1891}
1892
1893void Verifier::visitModuleErrnoTBAA() {
1894 const NamedMDNode *ErrnoTBAA = M.getNamedMetadata("llvm.errno.tbaa");
1895 if (!ErrnoTBAA)
1896 return;
1897
1898 Check(ErrnoTBAA->getNumOperands() >= 1,
1899 "llvm.errno.tbaa must have at least one operand", ErrnoTBAA);
1900
1901 for (const MDNode *N : ErrnoTBAA->operands())
1902 TBAAVerifyHelper.visitTBAAMetadata(nullptr, N);
1903}
1904
1905void Verifier::visitModuleFlags() {
1906 const NamedMDNode *Flags = M.getModuleFlagsMetadata();
1907 if (!Flags) return;
1908
1909 // Scan each flag, and track the flags and requirements.
1910 DenseMap<const MDString*, const MDNode*> SeenIDs;
1911 SmallVector<const MDNode*, 16> Requirements;
1912 uint64_t PAuthABIPlatform = -1;
1913 uint64_t PAuthABIVersion = -1;
1914 for (const MDNode *MDN : Flags->operands()) {
1915 visitModuleFlag(MDN, SeenIDs, Requirements);
1916 if (MDN->getNumOperands() != 3)
1917 continue;
1918 if (const auto *FlagName = dyn_cast_or_null<MDString>(MDN->getOperand(1))) {
1919 if (FlagName->getString() == "aarch64-elf-pauthabi-platform") {
1920 if (const auto *PAP =
1922 PAuthABIPlatform = PAP->getZExtValue();
1923 } else if (FlagName->getString() == "aarch64-elf-pauthabi-version") {
1924 if (const auto *PAV =
1926 PAuthABIVersion = PAV->getZExtValue();
1927 }
1928 }
1929 }
1930
1931 if ((PAuthABIPlatform == uint64_t(-1)) != (PAuthABIVersion == uint64_t(-1)))
1932 CheckFailed("either both or no 'aarch64-elf-pauthabi-platform' and "
1933 "'aarch64-elf-pauthabi-version' module flags must be present");
1934
1935 // Validate that the requirements in the module are valid.
1936 for (const MDNode *Requirement : Requirements) {
1937 const MDString *Flag = cast<MDString>(Requirement->getOperand(0));
1938 const Metadata *ReqValue = Requirement->getOperand(1);
1939
1940 const MDNode *Op = SeenIDs.lookup(Flag);
1941 if (!Op) {
1942 CheckFailed("invalid requirement on flag, flag is not present in module",
1943 Flag);
1944 continue;
1945 }
1946
1947 if (Op->getOperand(2) != ReqValue) {
1948 CheckFailed(("invalid requirement on flag, "
1949 "flag does not have the required value"),
1950 Flag);
1951 continue;
1952 }
1953 }
1954}
1955
1956void
1957Verifier::visitModuleFlag(const MDNode *Op,
1958 DenseMap<const MDString *, const MDNode *> &SeenIDs,
1959 SmallVectorImpl<const MDNode *> &Requirements) {
1960 // Each module flag should have three arguments, the merge behavior (a
1961 // constant int), the flag ID (an MDString), and the value.
1962 Check(Op->getNumOperands() == 3,
1963 "incorrect number of operands in module flag", Op);
1964 Module::ModFlagBehavior MFB;
1965 if (!Module::isValidModFlagBehavior(Op->getOperand(0), MFB)) {
1967 "invalid behavior operand in module flag (expected constant integer)",
1968 Op->getOperand(0));
1969 Check(false,
1970 "invalid behavior operand in module flag (unexpected constant)",
1971 Op->getOperand(0));
1972 }
1973 MDString *ID = dyn_cast_or_null<MDString>(Op->getOperand(1));
1974 Check(ID, "invalid ID operand in module flag (expected metadata string)",
1975 Op->getOperand(1));
1976
1977 // Check the values for behaviors with additional requirements.
1978 switch (MFB) {
1979 case Module::Error:
1980 case Module::Warning:
1981 case Module::Override:
1982 // These behavior types accept any value.
1983 break;
1984
1985 case Module::Min: {
1986 auto *V = mdconst::dyn_extract_or_null<ConstantInt>(Op->getOperand(2));
1987 Check(V && V->getValue().isNonNegative(),
1988 "invalid value for 'min' module flag (expected constant non-negative "
1989 "integer)",
1990 Op->getOperand(2));
1991 break;
1992 }
1993
1994 case Module::Max: {
1996 "invalid value for 'max' module flag (expected constant integer)",
1997 Op->getOperand(2));
1998 break;
1999 }
2000
2001 case Module::Require: {
2002 // The value should itself be an MDNode with two operands, a flag ID (an
2003 // MDString), and a value.
2004 MDNode *Value = dyn_cast<MDNode>(Op->getOperand(2));
2005 Check(Value && Value->getNumOperands() == 2,
2006 "invalid value for 'require' module flag (expected metadata pair)",
2007 Op->getOperand(2));
2008 Check(isa<MDString>(Value->getOperand(0)),
2009 ("invalid value for 'require' module flag "
2010 "(first value operand should be a string)"),
2011 Value->getOperand(0));
2012
2013 // Append it to the list of requirements, to check once all module flags are
2014 // scanned.
2015 Requirements.push_back(Value);
2016 break;
2017 }
2018
2019 case Module::Append:
2020 case Module::AppendUnique: {
2021 // These behavior types require the operand be an MDNode.
2022 Check(isa<MDNode>(Op->getOperand(2)),
2023 "invalid value for 'append'-type module flag "
2024 "(expected a metadata node)",
2025 Op->getOperand(2));
2026 break;
2027 }
2028 }
2029
2030 // Unless this is a "requires" flag, check the ID is unique.
2031 if (MFB != Module::Require) {
2032 bool Inserted = SeenIDs.insert(std::make_pair(ID, Op)).second;
2033 Check(Inserted,
2034 "module flag identifiers must be unique (or of 'require' type)", ID);
2035 }
2036
2037 if (ID->getString() == "wchar_size") {
2038 ConstantInt *Value
2040 Check(Value, "wchar_size metadata requires constant integer argument");
2041 }
2042
2043 if (ID->getString() == "Linker Options") {
2044 // If the llvm.linker.options named metadata exists, we assume that the
2045 // bitcode reader has upgraded the module flag. Otherwise the flag might
2046 // have been created by a client directly.
2047 Check(M.getNamedMetadata("llvm.linker.options"),
2048 "'Linker Options' named metadata no longer supported");
2049 }
2050
2051 if (ID->getString() == "SemanticInterposition") {
2052 ConstantInt *Value =
2054 Check(Value,
2055 "SemanticInterposition metadata requires constant integer argument");
2056 }
2057
2058 if (ID->getString() == "CG Profile") {
2059 for (const MDOperand &MDO : cast<MDNode>(Op->getOperand(2))->operands())
2060 visitModuleFlagCGProfileEntry(MDO);
2061 }
2062}
2063
2064void Verifier::visitModuleFlagCGProfileEntry(const MDOperand &MDO) {
2065 auto CheckFunction = [&](const MDOperand &FuncMDO) {
2066 if (!FuncMDO)
2067 return;
2068 auto F = dyn_cast<ValueAsMetadata>(FuncMDO);
2069 Check(F && isa<Function>(F->getValue()->stripPointerCasts()),
2070 "expected a Function or null", FuncMDO);
2071 };
2072 auto Node = dyn_cast_or_null<MDNode>(MDO);
2073 Check(Node && Node->getNumOperands() == 3, "expected a MDNode triple", MDO);
2074 CheckFunction(Node->getOperand(0));
2075 CheckFunction(Node->getOperand(1));
2076 auto Count = dyn_cast_or_null<ConstantAsMetadata>(Node->getOperand(2));
2077 Check(Count && Count->getType()->isIntegerTy(),
2078 "expected an integer constant", Node->getOperand(2));
2079}
2080
2081void Verifier::verifyAttributeTypes(AttributeSet Attrs, const Value *V) {
2082 for (Attribute A : Attrs) {
2083
2084 if (A.isStringAttribute()) {
2085#define GET_ATTR_NAMES
2086#define ATTRIBUTE_ENUM(ENUM_NAME, DISPLAY_NAME)
2087#define ATTRIBUTE_STRBOOL(ENUM_NAME, DISPLAY_NAME) \
2088 if (A.getKindAsString() == #DISPLAY_NAME) { \
2089 auto V = A.getValueAsString(); \
2090 if (!(V.empty() || V == "true" || V == "false")) \
2091 CheckFailed("invalid value for '" #DISPLAY_NAME "' attribute: " + V + \
2092 ""); \
2093 }
2094
2095#include "llvm/IR/Attributes.inc"
2096 continue;
2097 }
2098
2099 if (A.isIntAttribute() != Attribute::isIntAttrKind(A.getKindAsEnum())) {
2100 CheckFailed("Attribute '" + A.getAsString() + "' should have an Argument",
2101 V);
2102 return;
2103 }
2104 }
2105}
2106
2107// VerifyParameterAttrs - Check the given attributes for an argument or return
2108// value of the specified type. The value V is printed in error messages.
2109void Verifier::verifyParameterAttrs(AttributeSet Attrs, Type *Ty,
2110 const Value *V) {
2111 if (!Attrs.hasAttributes())
2112 return;
2113
2114 verifyAttributeTypes(Attrs, V);
2115
2116 for (Attribute Attr : Attrs)
2117 Check(Attr.isStringAttribute() ||
2118 Attribute::canUseAsParamAttr(Attr.getKindAsEnum()),
2119 "Attribute '" + Attr.getAsString() + "' does not apply to parameters",
2120 V);
2121
2122 if (Attrs.hasAttribute(Attribute::ImmArg)) {
2123 unsigned AttrCount =
2124 Attrs.getNumAttributes() - Attrs.hasAttribute(Attribute::Range);
2125 Check(AttrCount == 1,
2126 "Attribute 'immarg' is incompatible with other attributes except the "
2127 "'range' attribute",
2128 V);
2129 }
2130
2131 // Check for mutually incompatible attributes. Only inreg is compatible with
2132 // sret.
2133 unsigned AttrCount = 0;
2134 AttrCount += Attrs.hasAttribute(Attribute::ByVal);
2135 AttrCount += Attrs.hasAttribute(Attribute::InAlloca);
2136 AttrCount += Attrs.hasAttribute(Attribute::Preallocated);
2137 AttrCount += Attrs.hasAttribute(Attribute::StructRet) ||
2138 Attrs.hasAttribute(Attribute::InReg);
2139 AttrCount += Attrs.hasAttribute(Attribute::Nest);
2140 AttrCount += Attrs.hasAttribute(Attribute::ByRef);
2141 Check(AttrCount <= 1,
2142 "Attributes 'byval', 'inalloca', 'preallocated', 'inreg', 'nest', "
2143 "'byref', and 'sret' are incompatible!",
2144 V);
2145
2146 Check(!(Attrs.hasAttribute(Attribute::InAlloca) &&
2147 Attrs.hasAttribute(Attribute::ReadOnly)),
2148 "Attributes "
2149 "'inalloca and readonly' are incompatible!",
2150 V);
2151
2152 Check(!(Attrs.hasAttribute(Attribute::StructRet) &&
2153 Attrs.hasAttribute(Attribute::Returned)),
2154 "Attributes "
2155 "'sret and returned' are incompatible!",
2156 V);
2157
2158 Check(!(Attrs.hasAttribute(Attribute::ZExt) &&
2159 Attrs.hasAttribute(Attribute::SExt)),
2160 "Attributes "
2161 "'zeroext and signext' are incompatible!",
2162 V);
2163
2164 Check(!(Attrs.hasAttribute(Attribute::ReadNone) &&
2165 Attrs.hasAttribute(Attribute::ReadOnly)),
2166 "Attributes "
2167 "'readnone and readonly' are incompatible!",
2168 V);
2169
2170 Check(!(Attrs.hasAttribute(Attribute::ReadNone) &&
2171 Attrs.hasAttribute(Attribute::WriteOnly)),
2172 "Attributes "
2173 "'readnone and writeonly' are incompatible!",
2174 V);
2175
2176 Check(!(Attrs.hasAttribute(Attribute::ReadOnly) &&
2177 Attrs.hasAttribute(Attribute::WriteOnly)),
2178 "Attributes "
2179 "'readonly and writeonly' are incompatible!",
2180 V);
2181
2182 Check(!(Attrs.hasAttribute(Attribute::NoInline) &&
2183 Attrs.hasAttribute(Attribute::AlwaysInline)),
2184 "Attributes "
2185 "'noinline and alwaysinline' are incompatible!",
2186 V);
2187
2188 Check(!(Attrs.hasAttribute(Attribute::Writable) &&
2189 Attrs.hasAttribute(Attribute::ReadNone)),
2190 "Attributes writable and readnone are incompatible!", V);
2191
2192 Check(!(Attrs.hasAttribute(Attribute::Writable) &&
2193 Attrs.hasAttribute(Attribute::ReadOnly)),
2194 "Attributes writable and readonly are incompatible!", V);
2195
2196 AttributeMask IncompatibleAttrs = AttributeFuncs::typeIncompatible(Ty, Attrs);
2197 for (Attribute Attr : Attrs) {
2198 if (!Attr.isStringAttribute() &&
2199 IncompatibleAttrs.contains(Attr.getKindAsEnum())) {
2200 CheckFailed("Attribute '" + Attr.getAsString() +
2201 "' applied to incompatible type!", V);
2202 return;
2203 }
2204 }
2205
2206 if (isa<PointerType>(Ty)) {
2207 if (Attrs.hasAttribute(Attribute::Alignment)) {
2208 Align AttrAlign = Attrs.getAlignment().valueOrOne();
2209 Check(AttrAlign.value() <= Value::MaximumAlignment,
2210 "huge alignment values are unsupported", V);
2211 }
2212 if (Attrs.hasAttribute(Attribute::ByVal)) {
2213 Type *ByValTy = Attrs.getByValType();
2214 SmallPtrSet<Type *, 4> Visited;
2215 Check(ByValTy->isSized(&Visited),
2216 "Attribute 'byval' does not support unsized types!", V);
2217 // Check if it is or contains a target extension type that disallows being
2218 // used on the stack.
2220 "'byval' argument has illegal target extension type", V);
2221 Check(DL.getTypeAllocSize(ByValTy).getKnownMinValue() < (1ULL << 32),
2222 "huge 'byval' arguments are unsupported", V);
2223 }
2224 if (Attrs.hasAttribute(Attribute::ByRef)) {
2225 SmallPtrSet<Type *, 4> Visited;
2226 Check(Attrs.getByRefType()->isSized(&Visited),
2227 "Attribute 'byref' does not support unsized types!", V);
2228 Check(DL.getTypeAllocSize(Attrs.getByRefType()).getKnownMinValue() <
2229 (1ULL << 32),
2230 "huge 'byref' arguments are unsupported", V);
2231 }
2232 if (Attrs.hasAttribute(Attribute::InAlloca)) {
2233 SmallPtrSet<Type *, 4> Visited;
2234 Check(Attrs.getInAllocaType()->isSized(&Visited),
2235 "Attribute 'inalloca' does not support unsized types!", V);
2236 Check(DL.getTypeAllocSize(Attrs.getInAllocaType()).getKnownMinValue() <
2237 (1ULL << 32),
2238 "huge 'inalloca' arguments are unsupported", V);
2239 }
2240 if (Attrs.hasAttribute(Attribute::Preallocated)) {
2241 SmallPtrSet<Type *, 4> Visited;
2242 Check(Attrs.getPreallocatedType()->isSized(&Visited),
2243 "Attribute 'preallocated' does not support unsized types!", V);
2244 Check(
2245 DL.getTypeAllocSize(Attrs.getPreallocatedType()).getKnownMinValue() <
2246 (1ULL << 32),
2247 "huge 'preallocated' arguments are unsupported", V);
2248 }
2249 }
2250
2251 if (Attrs.hasAttribute(Attribute::Initializes)) {
2252 auto Inits = Attrs.getAttribute(Attribute::Initializes).getInitializes();
2253 Check(!Inits.empty(), "Attribute 'initializes' does not support empty list",
2254 V);
2256 "Attribute 'initializes' does not support unordered ranges", V);
2257 }
2258
2259 if (Attrs.hasAttribute(Attribute::NoFPClass)) {
2260 uint64_t Val = Attrs.getAttribute(Attribute::NoFPClass).getValueAsInt();
2261 Check(Val != 0, "Attribute 'nofpclass' must have at least one test bit set",
2262 V);
2263 Check((Val & ~static_cast<unsigned>(fcAllFlags)) == 0,
2264 "Invalid value for 'nofpclass' test mask", V);
2265 }
2266 if (Attrs.hasAttribute(Attribute::Range)) {
2267 const ConstantRange &CR =
2268 Attrs.getAttribute(Attribute::Range).getValueAsConstantRange();
2270 "Range bit width must match type bit width!", V);
2271 }
2272}
2273
2274void Verifier::checkUnsignedBaseTenFuncAttr(AttributeList Attrs, StringRef Attr,
2275 const Value *V) {
2276 if (Attrs.hasFnAttr(Attr)) {
2277 StringRef S = Attrs.getFnAttr(Attr).getValueAsString();
2278 unsigned N;
2279 if (S.getAsInteger(10, N))
2280 CheckFailed("\"" + Attr + "\" takes an unsigned integer: " + S, V);
2281 }
2282}
2283
2284// Check parameter attributes against a function type.
2285// The value V is printed in error messages.
2286void Verifier::verifyFunctionAttrs(FunctionType *FT, AttributeList Attrs,
2287 const Value *V, bool IsIntrinsic,
2288 bool IsInlineAsm) {
2289 if (Attrs.isEmpty())
2290 return;
2291
2292 if (AttributeListsVisited.insert(Attrs.getRawPointer()).second) {
2293 Check(Attrs.hasParentContext(Context),
2294 "Attribute list does not match Module context!", &Attrs, V);
2295 for (const auto &AttrSet : Attrs) {
2296 Check(!AttrSet.hasAttributes() || AttrSet.hasParentContext(Context),
2297 "Attribute set does not match Module context!", &AttrSet, V);
2298 for (const auto &A : AttrSet) {
2299 Check(A.hasParentContext(Context),
2300 "Attribute does not match Module context!", &A, V);
2301 }
2302 }
2303 }
2304
2305 bool SawNest = false;
2306 bool SawReturned = false;
2307 bool SawSRet = false;
2308 bool SawSwiftSelf = false;
2309 bool SawSwiftAsync = false;
2310 bool SawSwiftError = false;
2311
2312 // Verify return value attributes.
2313 AttributeSet RetAttrs = Attrs.getRetAttrs();
2314 for (Attribute RetAttr : RetAttrs)
2315 Check(RetAttr.isStringAttribute() ||
2316 Attribute::canUseAsRetAttr(RetAttr.getKindAsEnum()),
2317 "Attribute '" + RetAttr.getAsString() +
2318 "' does not apply to function return values",
2319 V);
2320
2321 unsigned MaxParameterWidth = 0;
2322 auto GetMaxParameterWidth = [&MaxParameterWidth](Type *Ty) {
2323 if (Ty->isVectorTy()) {
2324 if (auto *VT = dyn_cast<FixedVectorType>(Ty)) {
2325 unsigned Size = VT->getPrimitiveSizeInBits().getFixedValue();
2326 if (Size > MaxParameterWidth)
2327 MaxParameterWidth = Size;
2328 }
2329 }
2330 };
2331 GetMaxParameterWidth(FT->getReturnType());
2332 verifyParameterAttrs(RetAttrs, FT->getReturnType(), V);
2333
2334 // Verify parameter attributes.
2335 for (unsigned i = 0, e = FT->getNumParams(); i != e; ++i) {
2336 Type *Ty = FT->getParamType(i);
2337 AttributeSet ArgAttrs = Attrs.getParamAttrs(i);
2338
2339 if (!IsIntrinsic) {
2340 Check(!ArgAttrs.hasAttribute(Attribute::ImmArg),
2341 "immarg attribute only applies to intrinsics", V);
2342 if (!IsInlineAsm)
2343 Check(!ArgAttrs.hasAttribute(Attribute::ElementType),
2344 "Attribute 'elementtype' can only be applied to intrinsics"
2345 " and inline asm.",
2346 V);
2347 }
2348
2349 verifyParameterAttrs(ArgAttrs, Ty, V);
2350 GetMaxParameterWidth(Ty);
2351
2352 if (ArgAttrs.hasAttribute(Attribute::Nest)) {
2353 Check(!SawNest, "More than one parameter has attribute nest!", V);
2354 SawNest = true;
2355 }
2356
2357 if (ArgAttrs.hasAttribute(Attribute::Returned)) {
2358 Check(!SawReturned, "More than one parameter has attribute returned!", V);
2359 Check(Ty->canLosslesslyBitCastTo(FT->getReturnType()),
2360 "Incompatible argument and return types for 'returned' attribute",
2361 V);
2362 SawReturned = true;
2363 }
2364
2365 if (ArgAttrs.hasAttribute(Attribute::StructRet)) {
2366 Check(!SawSRet, "Cannot have multiple 'sret' parameters!", V);
2367 Check(i == 0 || i == 1,
2368 "Attribute 'sret' is not on first or second parameter!", V);
2369 SawSRet = true;
2370 }
2371
2372 if (ArgAttrs.hasAttribute(Attribute::SwiftSelf)) {
2373 Check(!SawSwiftSelf, "Cannot have multiple 'swiftself' parameters!", V);
2374 SawSwiftSelf = true;
2375 }
2376
2377 if (ArgAttrs.hasAttribute(Attribute::SwiftAsync)) {
2378 Check(!SawSwiftAsync, "Cannot have multiple 'swiftasync' parameters!", V);
2379 SawSwiftAsync = true;
2380 }
2381
2382 if (ArgAttrs.hasAttribute(Attribute::SwiftError)) {
2383 Check(!SawSwiftError, "Cannot have multiple 'swifterror' parameters!", V);
2384 SawSwiftError = true;
2385 }
2386
2387 if (ArgAttrs.hasAttribute(Attribute::InAlloca)) {
2388 Check(i == FT->getNumParams() - 1,
2389 "inalloca isn't on the last parameter!", V);
2390 }
2391 }
2392
2393 if (!Attrs.hasFnAttrs())
2394 return;
2395
2396 verifyAttributeTypes(Attrs.getFnAttrs(), V);
2397 for (Attribute FnAttr : Attrs.getFnAttrs())
2398 Check(FnAttr.isStringAttribute() ||
2399 Attribute::canUseAsFnAttr(FnAttr.getKindAsEnum()),
2400 "Attribute '" + FnAttr.getAsString() +
2401 "' does not apply to functions!",
2402 V);
2403
2404 Check(!(Attrs.hasFnAttr(Attribute::NoInline) &&
2405 Attrs.hasFnAttr(Attribute::AlwaysInline)),
2406 "Attributes 'noinline and alwaysinline' are incompatible!", V);
2407
2408 if (Attrs.hasFnAttr(Attribute::OptimizeNone)) {
2409 Check(Attrs.hasFnAttr(Attribute::NoInline),
2410 "Attribute 'optnone' requires 'noinline'!", V);
2411
2412 Check(!Attrs.hasFnAttr(Attribute::OptimizeForSize),
2413 "Attributes 'optsize and optnone' are incompatible!", V);
2414
2415 Check(!Attrs.hasFnAttr(Attribute::MinSize),
2416 "Attributes 'minsize and optnone' are incompatible!", V);
2417
2418 Check(!Attrs.hasFnAttr(Attribute::OptimizeForDebugging),
2419 "Attributes 'optdebug and optnone' are incompatible!", V);
2420 }
2421
2422 Check(!(Attrs.hasFnAttr(Attribute::SanitizeRealtime) &&
2423 Attrs.hasFnAttr(Attribute::SanitizeRealtimeBlocking)),
2424 "Attributes "
2425 "'sanitize_realtime and sanitize_realtime_blocking' are incompatible!",
2426 V);
2427
2428 if (Attrs.hasFnAttr(Attribute::OptimizeForDebugging)) {
2429 Check(!Attrs.hasFnAttr(Attribute::OptimizeForSize),
2430 "Attributes 'optsize and optdebug' are incompatible!", V);
2431
2432 Check(!Attrs.hasFnAttr(Attribute::MinSize),
2433 "Attributes 'minsize and optdebug' are incompatible!", V);
2434 }
2435
2436 Check(!Attrs.hasAttrSomewhere(Attribute::Writable) ||
2437 isModSet(Attrs.getMemoryEffects().getModRef(IRMemLocation::ArgMem)),
2438 "Attribute writable and memory without argmem: write are incompatible!",
2439 V);
2440
2441 if (Attrs.hasFnAttr("aarch64_pstate_sm_enabled")) {
2442 Check(!Attrs.hasFnAttr("aarch64_pstate_sm_compatible"),
2443 "Attributes 'aarch64_pstate_sm_enabled and "
2444 "aarch64_pstate_sm_compatible' are incompatible!",
2445 V);
2446 }
2447
2448 Check((Attrs.hasFnAttr("aarch64_new_za") + Attrs.hasFnAttr("aarch64_in_za") +
2449 Attrs.hasFnAttr("aarch64_inout_za") +
2450 Attrs.hasFnAttr("aarch64_out_za") +
2451 Attrs.hasFnAttr("aarch64_preserves_za") +
2452 Attrs.hasFnAttr("aarch64_za_state_agnostic")) <= 1,
2453 "Attributes 'aarch64_new_za', 'aarch64_in_za', 'aarch64_out_za', "
2454 "'aarch64_inout_za', 'aarch64_preserves_za' and "
2455 "'aarch64_za_state_agnostic' are mutually exclusive",
2456 V);
2457
2458 Check((Attrs.hasFnAttr("aarch64_new_zt0") +
2459 Attrs.hasFnAttr("aarch64_in_zt0") +
2460 Attrs.hasFnAttr("aarch64_inout_zt0") +
2461 Attrs.hasFnAttr("aarch64_out_zt0") +
2462 Attrs.hasFnAttr("aarch64_preserves_zt0") +
2463 Attrs.hasFnAttr("aarch64_za_state_agnostic")) <= 1,
2464 "Attributes 'aarch64_new_zt0', 'aarch64_in_zt0', 'aarch64_out_zt0', "
2465 "'aarch64_inout_zt0', 'aarch64_preserves_zt0' and "
2466 "'aarch64_za_state_agnostic' are mutually exclusive",
2467 V);
2468
2469 if (Attrs.hasFnAttr(Attribute::JumpTable)) {
2470 const GlobalValue *GV = cast<GlobalValue>(V);
2472 "Attribute 'jumptable' requires 'unnamed_addr'", V);
2473 }
2474
2475 if (auto Args = Attrs.getFnAttrs().getAllocSizeArgs()) {
2476 auto CheckParam = [&](StringRef Name, unsigned ParamNo) {
2477 if (ParamNo >= FT->getNumParams()) {
2478 CheckFailed("'allocsize' " + Name + " argument is out of bounds", V);
2479 return false;
2480 }
2481
2482 if (!FT->getParamType(ParamNo)->isIntegerTy()) {
2483 CheckFailed("'allocsize' " + Name +
2484 " argument must refer to an integer parameter",
2485 V);
2486 return false;
2487 }
2488
2489 return true;
2490 };
2491
2492 if (!CheckParam("element size", Args->first))
2493 return;
2494
2495 if (Args->second && !CheckParam("number of elements", *Args->second))
2496 return;
2497 }
2498
2499 if (Attrs.hasFnAttr(Attribute::AllocKind)) {
2500 AllocFnKind K = Attrs.getAllocKind();
2502 K & (AllocFnKind::Alloc | AllocFnKind::Realloc | AllocFnKind::Free);
2503 if (!is_contained(
2504 {AllocFnKind::Alloc, AllocFnKind::Realloc, AllocFnKind::Free},
2505 Type))
2506 CheckFailed(
2507 "'allockind()' requires exactly one of alloc, realloc, and free");
2508 if ((Type == AllocFnKind::Free) &&
2509 ((K & (AllocFnKind::Uninitialized | AllocFnKind::Zeroed |
2510 AllocFnKind::Aligned)) != AllocFnKind::Unknown))
2511 CheckFailed("'allockind(\"free\")' doesn't allow uninitialized, zeroed, "
2512 "or aligned modifiers.");
2513 AllocFnKind ZeroedUninit = AllocFnKind::Uninitialized | AllocFnKind::Zeroed;
2514 if ((K & ZeroedUninit) == ZeroedUninit)
2515 CheckFailed("'allockind()' can't be both zeroed and uninitialized");
2516 }
2517
2518 if (Attribute A = Attrs.getFnAttr("alloc-variant-zeroed"); A.isValid()) {
2519 StringRef S = A.getValueAsString();
2520 Check(!S.empty(), "'alloc-variant-zeroed' must not be empty");
2521 Function *Variant = M.getFunction(S);
2522 if (Variant) {
2523 Attribute Family = Attrs.getFnAttr("alloc-family");
2524 Attribute VariantFamily = Variant->getFnAttribute("alloc-family");
2525 if (Family.isValid())
2526 Check(VariantFamily.isValid() &&
2527 VariantFamily.getValueAsString() == Family.getValueAsString(),
2528 "'alloc-variant-zeroed' must name a function belonging to the "
2529 "same 'alloc-family'");
2530
2531 Check(Variant->hasFnAttribute(Attribute::AllocKind) &&
2532 (Variant->getFnAttribute(Attribute::AllocKind).getAllocKind() &
2533 AllocFnKind::Zeroed) != AllocFnKind::Unknown,
2534 "'alloc-variant-zeroed' must name a function with "
2535 "'allockind(\"zeroed\")'");
2536
2537 Check(FT == Variant->getFunctionType(),
2538 "'alloc-variant-zeroed' must name a function with the same "
2539 "signature");
2540
2541 if (const Function *F = dyn_cast<Function>(V))
2542 Check(F->getCallingConv() == Variant->getCallingConv(),
2543 "'alloc-variant-zeroed' must name a function with the same "
2544 "calling convention");
2545 }
2546 }
2547
2548 if (Attrs.hasFnAttr(Attribute::VScaleRange)) {
2549 unsigned VScaleMin = Attrs.getFnAttrs().getVScaleRangeMin();
2550 if (VScaleMin == 0)
2551 CheckFailed("'vscale_range' minimum must be greater than 0", V);
2552 else if (!isPowerOf2_32(VScaleMin))
2553 CheckFailed("'vscale_range' minimum must be power-of-two value", V);
2554 std::optional<unsigned> VScaleMax = Attrs.getFnAttrs().getVScaleRangeMax();
2555 if (VScaleMax && VScaleMin > VScaleMax)
2556 CheckFailed("'vscale_range' minimum cannot be greater than maximum", V);
2557 else if (VScaleMax && !isPowerOf2_32(*VScaleMax))
2558 CheckFailed("'vscale_range' maximum must be power-of-two value", V);
2559 }
2560
2561 if (Attribute FPAttr = Attrs.getFnAttr("frame-pointer"); FPAttr.isValid()) {
2562 StringRef FP = FPAttr.getValueAsString();
2563 if (FP != "all" && FP != "non-leaf" && FP != "none" && FP != "reserved" &&
2564 FP != "non-leaf-no-reserve")
2565 CheckFailed("invalid value for 'frame-pointer' attribute: " + FP, V);
2566 }
2567
2568 checkUnsignedBaseTenFuncAttr(Attrs, "patchable-function-prefix", V);
2569 checkUnsignedBaseTenFuncAttr(Attrs, "patchable-function-entry", V);
2570 if (Attrs.hasFnAttr("patchable-function-entry-section"))
2571 Check(!Attrs.getFnAttr("patchable-function-entry-section")
2572 .getValueAsString()
2573 .empty(),
2574 "\"patchable-function-entry-section\" must not be empty");
2575 checkUnsignedBaseTenFuncAttr(Attrs, "warn-stack-size", V);
2576
2577 if (auto A = Attrs.getFnAttr("sign-return-address"); A.isValid()) {
2578 StringRef S = A.getValueAsString();
2579 if (S != "none" && S != "all" && S != "non-leaf")
2580 CheckFailed("invalid value for 'sign-return-address' attribute: " + S, V);
2581 }
2582
2583 if (auto A = Attrs.getFnAttr("sign-return-address-key"); A.isValid()) {
2584 StringRef S = A.getValueAsString();
2585 if (S != "a_key" && S != "b_key")
2586 CheckFailed("invalid value for 'sign-return-address-key' attribute: " + S,
2587 V);
2588 if (auto AA = Attrs.getFnAttr("sign-return-address"); !AA.isValid()) {
2589 CheckFailed(
2590 "'sign-return-address-key' present without `sign-return-address`");
2591 }
2592 }
2593
2594 if (auto A = Attrs.getFnAttr("branch-target-enforcement"); A.isValid()) {
2595 StringRef S = A.getValueAsString();
2596 if (S != "" && S != "true" && S != "false")
2597 CheckFailed(
2598 "invalid value for 'branch-target-enforcement' attribute: " + S, V);
2599 }
2600
2601 if (auto A = Attrs.getFnAttr("branch-protection-pauth-lr"); A.isValid()) {
2602 StringRef S = A.getValueAsString();
2603 if (S != "" && S != "true" && S != "false")
2604 CheckFailed(
2605 "invalid value for 'branch-protection-pauth-lr' attribute: " + S, V);
2606 }
2607
2608 if (auto A = Attrs.getFnAttr("guarded-control-stack"); A.isValid()) {
2609 StringRef S = A.getValueAsString();
2610 if (S != "" && S != "true" && S != "false")
2611 CheckFailed("invalid value for 'guarded-control-stack' attribute: " + S,
2612 V);
2613 }
2614
2615 if (auto A = Attrs.getFnAttr("vector-function-abi-variant"); A.isValid()) {
2616 StringRef S = A.getValueAsString();
2617 const std::optional<VFInfo> Info = VFABI::tryDemangleForVFABI(S, FT);
2618 if (!Info)
2619 CheckFailed("invalid name for a VFABI variant: " + S, V);
2620 }
2621
2622 if (auto A = Attrs.getFnAttr("denormal-fp-math"); A.isValid()) {
2623 StringRef S = A.getValueAsString();
2625 CheckFailed("invalid value for 'denormal-fp-math' attribute: " + S, V);
2626 }
2627
2628 if (auto A = Attrs.getFnAttr("denormal-fp-math-f32"); A.isValid()) {
2629 StringRef S = A.getValueAsString();
2631 CheckFailed("invalid value for 'denormal-fp-math-f32' attribute: " + S,
2632 V);
2633 }
2634
2635 if (auto A = Attrs.getFnAttr("modular-format"); A.isValid()) {
2636 StringRef S = A.getValueAsString();
2638 S.split(Args, ',');
2639 Check(Args.size() >= 5,
2640 "modular-format attribute requires at least 5 arguments", V);
2641 unsigned FirstArgIdx;
2642 Check(!Args[2].getAsInteger(10, FirstArgIdx),
2643 "modular-format attribute first arg index is not an integer", V);
2644 unsigned UpperBound = FT->getNumParams() + (FT->isVarArg() ? 1 : 0);
2645 Check(FirstArgIdx > 0 && FirstArgIdx <= UpperBound,
2646 "modular-format attribute first arg index is out of bounds", V);
2647 }
2648
2649 if (auto A = Attrs.getFnAttr("target-features"); A.isValid()) {
2650 StringRef S = A.getValueAsString();
2651 if (!S.empty()) {
2652 for (auto FeatureFlag : split(S, ',')) {
2653 if (FeatureFlag.empty())
2654 CheckFailed(
2655 "target-features attribute should not contain an empty string");
2656 else
2657 Check(FeatureFlag[0] == '+' || FeatureFlag[0] == '-',
2658 "target feature '" + FeatureFlag +
2659 "' must start with a '+' or '-'",
2660 V);
2661 }
2662 }
2663 }
2664}
2665void Verifier::verifyUnknownProfileMetadata(MDNode *MD) {
2666 Check(MD->getNumOperands() == 2,
2667 "'unknown' !prof should have a single additional operand", MD);
2668 auto *PassName = dyn_cast<MDString>(MD->getOperand(1));
2669 Check(PassName != nullptr,
2670 "'unknown' !prof should have an additional operand of type "
2671 "string");
2672 Check(!PassName->getString().empty(),
2673 "the 'unknown' !prof operand should not be an empty string");
2674}
2675
2676void Verifier::verifyFunctionMetadata(
2677 ArrayRef<std::pair<unsigned, MDNode *>> MDs) {
2678 for (const auto &Pair : MDs) {
2679 if (Pair.first == LLVMContext::MD_prof) {
2680 MDNode *MD = Pair.second;
2681 Check(MD->getNumOperands() >= 2,
2682 "!prof annotations should have no less than 2 operands", MD);
2683 // We may have functions that are synthesized by the compiler, e.g. in
2684 // WPD, that we can't currently determine the entry count.
2685 if (MD->getOperand(0).equalsStr(
2687 verifyUnknownProfileMetadata(MD);
2688 continue;
2689 }
2690
2691 // Check first operand.
2692 Check(MD->getOperand(0) != nullptr, "first operand should not be null",
2693 MD);
2695 "expected string with name of the !prof annotation", MD);
2696 MDString *MDS = cast<MDString>(MD->getOperand(0));
2697 StringRef ProfName = MDS->getString();
2700 "first operand should be 'function_entry_count'"
2701 " or 'synthetic_function_entry_count'",
2702 MD);
2703
2704 // Check second operand.
2705 Check(MD->getOperand(1) != nullptr, "second operand should not be null",
2706 MD);
2708 "expected integer argument to function_entry_count", MD);
2709 } else if (Pair.first == LLVMContext::MD_kcfi_type) {
2710 MDNode *MD = Pair.second;
2711 Check(MD->getNumOperands() == 1,
2712 "!kcfi_type must have exactly one operand", MD);
2713 Check(MD->getOperand(0) != nullptr, "!kcfi_type operand must not be null",
2714 MD);
2716 "expected a constant operand for !kcfi_type", MD);
2717 Constant *C = cast<ConstantAsMetadata>(MD->getOperand(0))->getValue();
2718 Check(isa<ConstantInt>(C) && isa<IntegerType>(C->getType()),
2719 "expected a constant integer operand for !kcfi_type", MD);
2721 "expected a 32-bit integer constant operand for !kcfi_type", MD);
2722 }
2723 }
2724}
2725
2726void Verifier::visitConstantExprsRecursively(const Constant *EntryC) {
2727 if (EntryC->getNumOperands() == 0)
2728 return;
2729
2730 if (!ConstantExprVisited.insert(EntryC).second)
2731 return;
2732
2734 Stack.push_back(EntryC);
2735
2736 while (!Stack.empty()) {
2737 const Constant *C = Stack.pop_back_val();
2738
2739 // Check this constant expression.
2740 if (const auto *CE = dyn_cast<ConstantExpr>(C))
2741 visitConstantExpr(CE);
2742
2743 if (const auto *CPA = dyn_cast<ConstantPtrAuth>(C))
2744 visitConstantPtrAuth(CPA);
2745
2746 if (const auto *GV = dyn_cast<GlobalValue>(C)) {
2747 // Global Values get visited separately, but we do need to make sure
2748 // that the global value is in the correct module
2749 Check(GV->getParent() == &M, "Referencing global in another module!",
2750 EntryC, &M, GV, GV->getParent());
2751 continue;
2752 }
2753
2754 // Visit all sub-expressions.
2755 for (const Use &U : C->operands()) {
2756 const auto *OpC = dyn_cast<Constant>(U);
2757 if (!OpC)
2758 continue;
2759 if (!ConstantExprVisited.insert(OpC).second)
2760 continue;
2761 Stack.push_back(OpC);
2762 }
2763 }
2764}
2765
2766void Verifier::visitConstantExpr(const ConstantExpr *CE) {
2767 if (CE->getOpcode() == Instruction::BitCast)
2768 Check(CastInst::castIsValid(Instruction::BitCast, CE->getOperand(0),
2769 CE->getType()),
2770 "Invalid bitcast", CE);
2771 else if (CE->getOpcode() == Instruction::PtrToAddr)
2772 checkPtrToAddr(CE->getOperand(0)->getType(), CE->getType(), *CE);
2773}
2774
2775void Verifier::visitConstantPtrAuth(const ConstantPtrAuth *CPA) {
2776 Check(CPA->getPointer()->getType()->isPointerTy(),
2777 "signed ptrauth constant base pointer must have pointer type");
2778
2779 Check(CPA->getType() == CPA->getPointer()->getType(),
2780 "signed ptrauth constant must have same type as its base pointer");
2781
2782 Check(CPA->getKey()->getBitWidth() == 32,
2783 "signed ptrauth constant key must be i32 constant integer");
2784
2786 "signed ptrauth constant address discriminator must be a pointer");
2787
2788 Check(CPA->getDiscriminator()->getBitWidth() == 64,
2789 "signed ptrauth constant discriminator must be i64 constant integer");
2790
2792 "signed ptrauth constant deactivation symbol must be a pointer");
2793
2796 "signed ptrauth constant deactivation symbol must be a global value "
2797 "or null");
2798}
2799
2800bool Verifier::verifyAttributeCount(AttributeList Attrs, unsigned Params) {
2801 // There shouldn't be more attribute sets than there are parameters plus the
2802 // function and return value.
2803 return Attrs.getNumAttrSets() <= Params + 2;
2804}
2805
2806void Verifier::verifyInlineAsmCall(const CallBase &Call) {
2807 const InlineAsm *IA = cast<InlineAsm>(Call.getCalledOperand());
2808 unsigned ArgNo = 0;
2809 unsigned LabelNo = 0;
2810 for (const InlineAsm::ConstraintInfo &CI : IA->ParseConstraints()) {
2811 if (CI.Type == InlineAsm::isLabel) {
2812 ++LabelNo;
2813 continue;
2814 }
2815
2816 // Only deal with constraints that correspond to call arguments.
2817 if (!CI.hasArg())
2818 continue;
2819
2820 if (CI.isIndirect) {
2821 const Value *Arg = Call.getArgOperand(ArgNo);
2822 Check(Arg->getType()->isPointerTy(),
2823 "Operand for indirect constraint must have pointer type", &Call);
2824
2826 "Operand for indirect constraint must have elementtype attribute",
2827 &Call);
2828 } else {
2829 Check(!Call.paramHasAttr(ArgNo, Attribute::ElementType),
2830 "Elementtype attribute can only be applied for indirect "
2831 "constraints",
2832 &Call);
2833 }
2834
2835 ArgNo++;
2836 }
2837
2838 if (auto *CallBr = dyn_cast<CallBrInst>(&Call)) {
2839 Check(LabelNo == CallBr->getNumIndirectDests(),
2840 "Number of label constraints does not match number of callbr dests",
2841 &Call);
2842 } else {
2843 Check(LabelNo == 0, "Label constraints can only be used with callbr",
2844 &Call);
2845 }
2846}
2847
2848/// Verify that statepoint intrinsic is well formed.
2849void Verifier::verifyStatepoint(const CallBase &Call) {
2850 assert(Call.getIntrinsicID() == Intrinsic::experimental_gc_statepoint);
2851
2854 "gc.statepoint must read and write all memory to preserve "
2855 "reordering restrictions required by safepoint semantics",
2856 Call);
2857
2858 const int64_t NumPatchBytes =
2859 cast<ConstantInt>(Call.getArgOperand(1))->getSExtValue();
2860 assert(isInt<32>(NumPatchBytes) && "NumPatchBytesV is an i32!");
2861 Check(NumPatchBytes >= 0,
2862 "gc.statepoint number of patchable bytes must be "
2863 "positive",
2864 Call);
2865
2866 Type *TargetElemType = Call.getParamElementType(2);
2867 Check(TargetElemType,
2868 "gc.statepoint callee argument must have elementtype attribute", Call);
2869 FunctionType *TargetFuncType = dyn_cast<FunctionType>(TargetElemType);
2870 Check(TargetFuncType,
2871 "gc.statepoint callee elementtype must be function type", Call);
2872
2873 const int NumCallArgs = cast<ConstantInt>(Call.getArgOperand(3))->getZExtValue();
2874 Check(NumCallArgs >= 0,
2875 "gc.statepoint number of arguments to underlying call "
2876 "must be positive",
2877 Call);
2878 const int NumParams = (int)TargetFuncType->getNumParams();
2879 if (TargetFuncType->isVarArg()) {
2880 Check(NumCallArgs >= NumParams,
2881 "gc.statepoint mismatch in number of vararg call args", Call);
2882
2883 // TODO: Remove this limitation
2884 Check(TargetFuncType->getReturnType()->isVoidTy(),
2885 "gc.statepoint doesn't support wrapping non-void "
2886 "vararg functions yet",
2887 Call);
2888 } else
2889 Check(NumCallArgs == NumParams,
2890 "gc.statepoint mismatch in number of call args", Call);
2891
2892 const uint64_t Flags
2893 = cast<ConstantInt>(Call.getArgOperand(4))->getZExtValue();
2894 Check((Flags & ~(uint64_t)StatepointFlags::MaskAll) == 0,
2895 "unknown flag used in gc.statepoint flags argument", Call);
2896
2897 // Verify that the types of the call parameter arguments match
2898 // the type of the wrapped callee.
2899 AttributeList Attrs = Call.getAttributes();
2900 for (int i = 0; i < NumParams; i++) {
2901 Type *ParamType = TargetFuncType->getParamType(i);
2902 Type *ArgType = Call.getArgOperand(5 + i)->getType();
2903 Check(ArgType == ParamType,
2904 "gc.statepoint call argument does not match wrapped "
2905 "function type",
2906 Call);
2907
2908 if (TargetFuncType->isVarArg()) {
2909 AttributeSet ArgAttrs = Attrs.getParamAttrs(5 + i);
2910 Check(!ArgAttrs.hasAttribute(Attribute::StructRet),
2911 "Attribute 'sret' cannot be used for vararg call arguments!", Call);
2912 }
2913 }
2914
2915 const int EndCallArgsInx = 4 + NumCallArgs;
2916
2917 const Value *NumTransitionArgsV = Call.getArgOperand(EndCallArgsInx + 1);
2918 Check(isa<ConstantInt>(NumTransitionArgsV),
2919 "gc.statepoint number of transition arguments "
2920 "must be constant integer",
2921 Call);
2922 const int NumTransitionArgs =
2923 cast<ConstantInt>(NumTransitionArgsV)->getZExtValue();
2924 Check(NumTransitionArgs == 0,
2925 "gc.statepoint w/inline transition bundle is deprecated", Call);
2926 const int EndTransitionArgsInx = EndCallArgsInx + 1 + NumTransitionArgs;
2927
2928 const Value *NumDeoptArgsV = Call.getArgOperand(EndTransitionArgsInx + 1);
2929 Check(isa<ConstantInt>(NumDeoptArgsV),
2930 "gc.statepoint number of deoptimization arguments "
2931 "must be constant integer",
2932 Call);
2933 const int NumDeoptArgs = cast<ConstantInt>(NumDeoptArgsV)->getZExtValue();
2934 Check(NumDeoptArgs == 0,
2935 "gc.statepoint w/inline deopt operands is deprecated", Call);
2936
2937 const int ExpectedNumArgs = 7 + NumCallArgs;
2938 Check(ExpectedNumArgs == (int)Call.arg_size(),
2939 "gc.statepoint too many arguments", Call);
2940
2941 // Check that the only uses of this gc.statepoint are gc.result or
2942 // gc.relocate calls which are tied to this statepoint and thus part
2943 // of the same statepoint sequence
2944 for (const User *U : Call.users()) {
2945 const CallInst *UserCall = dyn_cast<const CallInst>(U);
2946 Check(UserCall, "illegal use of statepoint token", Call, U);
2947 if (!UserCall)
2948 continue;
2949 Check(isa<GCRelocateInst>(UserCall) || isa<GCResultInst>(UserCall),
2950 "gc.result or gc.relocate are the only value uses "
2951 "of a gc.statepoint",
2952 Call, U);
2953 if (isa<GCResultInst>(UserCall)) {
2954 Check(UserCall->getArgOperand(0) == &Call,
2955 "gc.result connected to wrong gc.statepoint", Call, UserCall);
2956 } else if (isa<GCRelocateInst>(Call)) {
2957 Check(UserCall->getArgOperand(0) == &Call,
2958 "gc.relocate connected to wrong gc.statepoint", Call, UserCall);
2959 }
2960 }
2961
2962 // Note: It is legal for a single derived pointer to be listed multiple
2963 // times. It's non-optimal, but it is legal. It can also happen after
2964 // insertion if we strip a bitcast away.
2965 // Note: It is really tempting to check that each base is relocated and
2966 // that a derived pointer is never reused as a base pointer. This turns
2967 // out to be problematic since optimizations run after safepoint insertion
2968 // can recognize equality properties that the insertion logic doesn't know
2969 // about. See example statepoint.ll in the verifier subdirectory
2970}
2971
2972void Verifier::verifyFrameRecoverIndices() {
2973 for (auto &Counts : FrameEscapeInfo) {
2974 Function *F = Counts.first;
2975 unsigned EscapedObjectCount = Counts.second.first;
2976 unsigned MaxRecoveredIndex = Counts.second.second;
2977 Check(MaxRecoveredIndex <= EscapedObjectCount,
2978 "all indices passed to llvm.localrecover must be less than the "
2979 "number of arguments passed to llvm.localescape in the parent "
2980 "function",
2981 F);
2982 }
2983}
2984
2985static Instruction *getSuccPad(Instruction *Terminator) {
2986 BasicBlock *UnwindDest;
2987 if (auto *II = dyn_cast<InvokeInst>(Terminator))
2988 UnwindDest = II->getUnwindDest();
2989 else if (auto *CSI = dyn_cast<CatchSwitchInst>(Terminator))
2990 UnwindDest = CSI->getUnwindDest();
2991 else
2992 UnwindDest = cast<CleanupReturnInst>(Terminator)->getUnwindDest();
2993 return &*UnwindDest->getFirstNonPHIIt();
2994}
2995
2996void Verifier::verifySiblingFuncletUnwinds() {
2997 llvm::TimeTraceScope timeScope("Verifier verify sibling funclet unwinds");
2998 SmallPtrSet<Instruction *, 8> Visited;
2999 SmallPtrSet<Instruction *, 8> Active;
3000 for (const auto &Pair : SiblingFuncletInfo) {
3001 Instruction *PredPad = Pair.first;
3002 if (Visited.count(PredPad))
3003 continue;
3004 Active.insert(PredPad);
3005 Instruction *Terminator = Pair.second;
3006 do {
3007 Instruction *SuccPad = getSuccPad(Terminator);
3008 if (Active.count(SuccPad)) {
3009 // Found a cycle; report error
3010 Instruction *CyclePad = SuccPad;
3011 SmallVector<Instruction *, 8> CycleNodes;
3012 do {
3013 CycleNodes.push_back(CyclePad);
3014 Instruction *CycleTerminator = SiblingFuncletInfo[CyclePad];
3015 if (CycleTerminator != CyclePad)
3016 CycleNodes.push_back(CycleTerminator);
3017 CyclePad = getSuccPad(CycleTerminator);
3018 } while (CyclePad != SuccPad);
3019 Check(false, "EH pads can't handle each other's exceptions",
3020 ArrayRef<Instruction *>(CycleNodes));
3021 }
3022 // Don't re-walk a node we've already checked
3023 if (!Visited.insert(SuccPad).second)
3024 break;
3025 // Walk to this successor if it has a map entry.
3026 PredPad = SuccPad;
3027 auto TermI = SiblingFuncletInfo.find(PredPad);
3028 if (TermI == SiblingFuncletInfo.end())
3029 break;
3030 Terminator = TermI->second;
3031 Active.insert(PredPad);
3032 } while (true);
3033 // Each node only has one successor, so we've walked all the active
3034 // nodes' successors.
3035 Active.clear();
3036 }
3037}
3038
3039// visitFunction - Verify that a function is ok.
3040//
3041void Verifier::visitFunction(const Function &F) {
3042 visitGlobalValue(F);
3043
3044 // Check function arguments.
3045 FunctionType *FT = F.getFunctionType();
3046 unsigned NumArgs = F.arg_size();
3047
3048 Check(&Context == &F.getContext(),
3049 "Function context does not match Module context!", &F);
3050
3051 Check(!F.hasCommonLinkage(), "Functions may not have common linkage", &F);
3052 Check(FT->getNumParams() == NumArgs,
3053 "# formal arguments must match # of arguments for function type!", &F,
3054 FT);
3055 Check(F.getReturnType()->isFirstClassType() ||
3056 F.getReturnType()->isVoidTy() || F.getReturnType()->isStructTy(),
3057 "Functions cannot return aggregate values!", &F);
3058
3059 Check(!F.hasStructRetAttr() || F.getReturnType()->isVoidTy(),
3060 "Invalid struct return type!", &F);
3061
3062 if (MaybeAlign A = F.getAlign()) {
3063 Check(A->value() <= Value::MaximumAlignment,
3064 "huge alignment values are unsupported", &F);
3065 }
3066
3067 AttributeList Attrs = F.getAttributes();
3068
3069 Check(verifyAttributeCount(Attrs, FT->getNumParams()),
3070 "Attribute after last parameter!", &F);
3071
3072 bool IsIntrinsic = F.isIntrinsic();
3073
3074 // Check function attributes.
3075 verifyFunctionAttrs(FT, Attrs, &F, IsIntrinsic, /* IsInlineAsm */ false);
3076
3077 // On function declarations/definitions, we do not support the builtin
3078 // attribute. We do not check this in VerifyFunctionAttrs since that is
3079 // checking for Attributes that can/can not ever be on functions.
3080 Check(!Attrs.hasFnAttr(Attribute::Builtin),
3081 "Attribute 'builtin' can only be applied to a callsite.", &F);
3082
3083 Check(!Attrs.hasAttrSomewhere(Attribute::ElementType),
3084 "Attribute 'elementtype' can only be applied to a callsite.", &F);
3085
3086 Check(!Attrs.hasFnAttr("aarch64_zt0_undef"),
3087 "Attribute 'aarch64_zt0_undef' can only be applied to a callsite.");
3088
3089 if (Attrs.hasFnAttr(Attribute::Naked))
3090 for (const Argument &Arg : F.args())
3091 Check(Arg.use_empty(), "cannot use argument of naked function", &Arg);
3092
3093 // Check that this function meets the restrictions on this calling convention.
3094 // Sometimes varargs is used for perfectly forwarding thunks, so some of these
3095 // restrictions can be lifted.
3096 switch (F.getCallingConv()) {
3097 default:
3098 case CallingConv::C:
3099 break;
3100 case CallingConv::X86_INTR: {
3101 Check(F.arg_empty() || Attrs.hasParamAttr(0, Attribute::ByVal),
3102 "Calling convention parameter requires byval", &F);
3103 break;
3104 }
3105 case CallingConv::AMDGPU_KERNEL:
3106 case CallingConv::SPIR_KERNEL:
3107 case CallingConv::AMDGPU_CS_Chain:
3108 case CallingConv::AMDGPU_CS_ChainPreserve:
3109 Check(F.getReturnType()->isVoidTy(),
3110 "Calling convention requires void return type", &F);
3111 [[fallthrough]];
3112 case CallingConv::AMDGPU_VS:
3113 case CallingConv::AMDGPU_HS:
3114 case CallingConv::AMDGPU_GS:
3115 case CallingConv::AMDGPU_PS:
3116 case CallingConv::AMDGPU_CS:
3117 Check(!F.hasStructRetAttr(), "Calling convention does not allow sret", &F);
3118 if (F.getCallingConv() != CallingConv::SPIR_KERNEL) {
3119 const unsigned StackAS = DL.getAllocaAddrSpace();
3120 unsigned i = 0;
3121 for (const Argument &Arg : F.args()) {
3122 Check(!Attrs.hasParamAttr(i, Attribute::ByVal),
3123 "Calling convention disallows byval", &F);
3124 Check(!Attrs.hasParamAttr(i, Attribute::Preallocated),
3125 "Calling convention disallows preallocated", &F);
3126 Check(!Attrs.hasParamAttr(i, Attribute::InAlloca),
3127 "Calling convention disallows inalloca", &F);
3128
3129 if (Attrs.hasParamAttr(i, Attribute::ByRef)) {
3130 // FIXME: Should also disallow LDS and GDS, but we don't have the enum
3131 // value here.
3132 Check(Arg.getType()->getPointerAddressSpace() != StackAS,
3133 "Calling convention disallows stack byref", &F);
3134 }
3135
3136 ++i;
3137 }
3138 }
3139
3140 [[fallthrough]];
3141 case CallingConv::Fast:
3142 case CallingConv::Cold:
3143 case CallingConv::Intel_OCL_BI:
3144 case CallingConv::PTX_Kernel:
3145 case CallingConv::PTX_Device:
3146 Check(!F.isVarArg(),
3147 "Calling convention does not support varargs or "
3148 "perfect forwarding!",
3149 &F);
3150 break;
3151 case CallingConv::AMDGPU_Gfx_WholeWave:
3152 Check(!F.arg_empty() && F.arg_begin()->getType()->isIntegerTy(1),
3153 "Calling convention requires first argument to be i1", &F);
3154 Check(!F.arg_begin()->hasInRegAttr(),
3155 "Calling convention requires first argument to not be inreg", &F);
3156 Check(!F.isVarArg(),
3157 "Calling convention does not support varargs or "
3158 "perfect forwarding!",
3159 &F);
3160 break;
3161 }
3162
3163 // Check that the argument values match the function type for this function...
3164 unsigned i = 0;
3165 for (const Argument &Arg : F.args()) {
3166 Check(Arg.getType() == FT->getParamType(i),
3167 "Argument value does not match function argument type!", &Arg,
3168 FT->getParamType(i));
3169 Check(Arg.getType()->isFirstClassType(),
3170 "Function arguments must have first-class types!", &Arg);
3171 if (!IsIntrinsic) {
3172 Check(!Arg.getType()->isMetadataTy(),
3173 "Function takes metadata but isn't an intrinsic", &Arg, &F);
3174 Check(!Arg.getType()->isTokenLikeTy(),
3175 "Function takes token but isn't an intrinsic", &Arg, &F);
3176 Check(!Arg.getType()->isX86_AMXTy(),
3177 "Function takes x86_amx but isn't an intrinsic", &Arg, &F);
3178 }
3179
3180 // Check that swifterror argument is only used by loads and stores.
3181 if (Attrs.hasParamAttr(i, Attribute::SwiftError)) {
3182 verifySwiftErrorValue(&Arg);
3183 }
3184 ++i;
3185 }
3186
3187 if (!IsIntrinsic) {
3188 Check(!F.getReturnType()->isTokenLikeTy(),
3189 "Function returns a token but isn't an intrinsic", &F);
3190 Check(!F.getReturnType()->isX86_AMXTy(),
3191 "Function returns a x86_amx but isn't an intrinsic", &F);
3192 }
3193
3194 // Get the function metadata attachments.
3196 F.getAllMetadata(MDs);
3197 assert(F.hasMetadata() != MDs.empty() && "Bit out-of-sync");
3198 verifyFunctionMetadata(MDs);
3199
3200 // Check validity of the personality function
3201 if (F.hasPersonalityFn()) {
3202 auto *Per = dyn_cast<Function>(F.getPersonalityFn()->stripPointerCasts());
3203 if (Per)
3204 Check(Per->getParent() == F.getParent(),
3205 "Referencing personality function in another module!", &F,
3206 F.getParent(), Per, Per->getParent());
3207 }
3208
3209 // EH funclet coloring can be expensive, recompute on-demand
3210 BlockEHFuncletColors.clear();
3211
3212 if (F.isMaterializable()) {
3213 // Function has a body somewhere we can't see.
3214 Check(MDs.empty(), "unmaterialized function cannot have metadata", &F,
3215 MDs.empty() ? nullptr : MDs.front().second);
3216 } else if (F.isDeclaration()) {
3217 for (const auto &I : MDs) {
3218 // This is used for call site debug information.
3219 CheckDI(I.first != LLVMContext::MD_dbg ||
3220 !cast<DISubprogram>(I.second)->isDistinct(),
3221 "function declaration may only have a unique !dbg attachment",
3222 &F);
3223 Check(I.first != LLVMContext::MD_prof,
3224 "function declaration may not have a !prof attachment", &F);
3225
3226 // Verify the metadata itself.
3227 visitMDNode(*I.second, AreDebugLocsAllowed::Yes);
3228 }
3229 Check(!F.hasPersonalityFn(),
3230 "Function declaration shouldn't have a personality routine", &F);
3231 } else {
3232 // Verify that this function (which has a body) is not named "llvm.*". It
3233 // is not legal to define intrinsics.
3234 Check(!IsIntrinsic, "llvm intrinsics cannot be defined!", &F);
3235
3236 // Check the entry node
3237 const BasicBlock *Entry = &F.getEntryBlock();
3238 Check(pred_empty(Entry),
3239 "Entry block to function must not have predecessors!", Entry);
3240
3241 // The address of the entry block cannot be taken, unless it is dead.
3242 if (Entry->hasAddressTaken()) {
3243 Check(!BlockAddress::lookup(Entry)->isConstantUsed(),
3244 "blockaddress may not be used with the entry block!", Entry);
3245 }
3246
3247 unsigned NumDebugAttachments = 0, NumProfAttachments = 0,
3248 NumKCFIAttachments = 0;
3249 // Visit metadata attachments.
3250 for (const auto &I : MDs) {
3251 // Verify that the attachment is legal.
3252 auto AllowLocs = AreDebugLocsAllowed::No;
3253 switch (I.first) {
3254 default:
3255 break;
3256 case LLVMContext::MD_dbg: {
3257 ++NumDebugAttachments;
3258 CheckDI(NumDebugAttachments == 1,
3259 "function must have a single !dbg attachment", &F, I.second);
3260 CheckDI(isa<DISubprogram>(I.second),
3261 "function !dbg attachment must be a subprogram", &F, I.second);
3262 CheckDI(cast<DISubprogram>(I.second)->isDistinct(),
3263 "function definition may only have a distinct !dbg attachment",
3264 &F);
3265
3266 auto *SP = cast<DISubprogram>(I.second);
3267 const Function *&AttachedTo = DISubprogramAttachments[SP];
3268 CheckDI(!AttachedTo || AttachedTo == &F,
3269 "DISubprogram attached to more than one function", SP, &F);
3270 AttachedTo = &F;
3271 AllowLocs = AreDebugLocsAllowed::Yes;
3272 break;
3273 }
3274 case LLVMContext::MD_prof:
3275 ++NumProfAttachments;
3276 Check(NumProfAttachments == 1,
3277 "function must have a single !prof attachment", &F, I.second);
3278 break;
3279 case LLVMContext::MD_kcfi_type:
3280 ++NumKCFIAttachments;
3281 Check(NumKCFIAttachments == 1,
3282 "function must have a single !kcfi_type attachment", &F,
3283 I.second);
3284 break;
3285 }
3286
3287 // Verify the metadata itself.
3288 visitMDNode(*I.second, AllowLocs);
3289 }
3290 }
3291
3292 // If this function is actually an intrinsic, verify that it is only used in
3293 // direct call/invokes, never having its "address taken".
3294 // Only do this if the module is materialized, otherwise we don't have all the
3295 // uses.
3296 if (F.isIntrinsic() && F.getParent()->isMaterialized()) {
3297 const User *U;
3298 if (F.hasAddressTaken(&U, false, true, false,
3299 /*IgnoreARCAttachedCall=*/true))
3300 Check(false, "Invalid user of intrinsic instruction!", U);
3301 }
3302
3303 // Check intrinsics' signatures.
3304 switch (F.getIntrinsicID()) {
3305 case Intrinsic::experimental_gc_get_pointer_base: {
3306 FunctionType *FT = F.getFunctionType();
3307 Check(FT->getNumParams() == 1, "wrong number of parameters", F);
3308 Check(isa<PointerType>(F.getReturnType()),
3309 "gc.get.pointer.base must return a pointer", F);
3310 Check(FT->getParamType(0) == F.getReturnType(),
3311 "gc.get.pointer.base operand and result must be of the same type", F);
3312 break;
3313 }
3314 case Intrinsic::experimental_gc_get_pointer_offset: {
3315 FunctionType *FT = F.getFunctionType();
3316 Check(FT->getNumParams() == 1, "wrong number of parameters", F);
3317 Check(isa<PointerType>(FT->getParamType(0)),
3318 "gc.get.pointer.offset operand must be a pointer", F);
3319 Check(F.getReturnType()->isIntegerTy(),
3320 "gc.get.pointer.offset must return integer", F);
3321 break;
3322 }
3323 }
3324
3325 auto *N = F.getSubprogram();
3326 HasDebugInfo = (N != nullptr);
3327 if (!HasDebugInfo)
3328 return;
3329
3330 // Check that all !dbg attachments lead to back to N.
3331 //
3332 // FIXME: Check this incrementally while visiting !dbg attachments.
3333 // FIXME: Only check when N is the canonical subprogram for F.
3334 SmallPtrSet<const MDNode *, 32> Seen;
3335 auto VisitDebugLoc = [&](const Instruction &I, const MDNode *Node) {
3336 // Be careful about using DILocation here since we might be dealing with
3337 // broken code (this is the Verifier after all).
3338 const DILocation *DL = dyn_cast_or_null<DILocation>(Node);
3339 if (!DL)
3340 return;
3341 if (!Seen.insert(DL).second)
3342 return;
3343
3344 Metadata *Parent = DL->getRawScope();
3345 CheckDI(Parent && isa<DILocalScope>(Parent),
3346 "DILocation's scope must be a DILocalScope", N, &F, &I, DL, Parent);
3347
3348 DILocalScope *Scope = DL->getInlinedAtScope();
3349 Check(Scope, "Failed to find DILocalScope", DL);
3350
3351 if (!Seen.insert(Scope).second)
3352 return;
3353
3354 DISubprogram *SP = Scope->getSubprogram();
3355
3356 // Scope and SP could be the same MDNode and we don't want to skip
3357 // validation in that case
3358 if ((Scope != SP) && !Seen.insert(SP).second)
3359 return;
3360
3361 CheckDI(SP->describes(&F),
3362 "!dbg attachment points at wrong subprogram for function", N, &F,
3363 &I, DL, Scope, SP);
3364 };
3365 for (auto &BB : F)
3366 for (auto &I : BB) {
3367 VisitDebugLoc(I, I.getDebugLoc().getAsMDNode());
3368 // The llvm.loop annotations also contain two DILocations.
3369 if (auto MD = I.getMetadata(LLVMContext::MD_loop))
3370 for (unsigned i = 1; i < MD->getNumOperands(); ++i)
3371 VisitDebugLoc(I, dyn_cast_or_null<MDNode>(MD->getOperand(i)));
3372 if (BrokenDebugInfo)
3373 return;
3374 }
3375}
3376
3377// verifyBasicBlock - Verify that a basic block is well formed...
3378//
3379void Verifier::visitBasicBlock(BasicBlock &BB) {
3380 InstsInThisBlock.clear();
3381 ConvergenceVerifyHelper.visit(BB);
3382
3383 // Ensure that basic blocks have terminators!
3384 Check(BB.getTerminator(), "Basic Block does not have terminator!", &BB);
3385
3386 // Check constraints that this basic block imposes on all of the PHI nodes in
3387 // it.
3388 if (isa<PHINode>(BB.front())) {
3389 SmallVector<BasicBlock *, 8> Preds(predecessors(&BB));
3391 llvm::sort(Preds);
3392 for (const PHINode &PN : BB.phis()) {
3393 Check(PN.getNumIncomingValues() == Preds.size(),
3394 "PHINode should have one entry for each predecessor of its "
3395 "parent basic block!",
3396 &PN);
3397
3398 // Get and sort all incoming values in the PHI node...
3399 Values.clear();
3400 Values.reserve(PN.getNumIncomingValues());
3401 for (unsigned i = 0, e = PN.getNumIncomingValues(); i != e; ++i)
3402 Values.push_back(
3403 std::make_pair(PN.getIncomingBlock(i), PN.getIncomingValue(i)));
3404 llvm::sort(Values);
3405
3406 for (unsigned i = 0, e = Values.size(); i != e; ++i) {
3407 // Check to make sure that if there is more than one entry for a
3408 // particular basic block in this PHI node, that the incoming values are
3409 // all identical.
3410 //
3411 Check(i == 0 || Values[i].first != Values[i - 1].first ||
3412 Values[i].second == Values[i - 1].second,
3413 "PHI node has multiple entries for the same basic block with "
3414 "different incoming values!",
3415 &PN, Values[i].first, Values[i].second, Values[i - 1].second);
3416
3417 // Check to make sure that the predecessors and PHI node entries are
3418 // matched up.
3419 Check(Values[i].first == Preds[i],
3420 "PHI node entries do not match predecessors!", &PN,
3421 Values[i].first, Preds[i]);
3422 }
3423 }
3424 }
3425
3426 // Check that all instructions have their parent pointers set up correctly.
3427 for (auto &I : BB)
3428 {
3429 Check(I.getParent() == &BB, "Instruction has bogus parent pointer!");
3430 }
3431
3432 // Confirm that no issues arise from the debug program.
3433 CheckDI(!BB.getTrailingDbgRecords(), "Basic Block has trailing DbgRecords!",
3434 &BB);
3435}
3436
3437void Verifier::visitTerminator(Instruction &I) {
3438 // Ensure that terminators only exist at the end of the basic block.
3439 Check(&I == I.getParent()->getTerminator(),
3440 "Terminator found in the middle of a basic block!", I.getParent());
3441 visitInstruction(I);
3442}
3443
3444void Verifier::visitBranchInst(BranchInst &BI) {
3445 if (BI.isConditional()) {
3447 "Branch condition is not 'i1' type!", &BI, BI.getCondition());
3448 }
3449 visitTerminator(BI);
3450}
3451
3452void Verifier::visitReturnInst(ReturnInst &RI) {
3453 Function *F = RI.getParent()->getParent();
3454 unsigned N = RI.getNumOperands();
3455 if (F->getReturnType()->isVoidTy())
3456 Check(N == 0,
3457 "Found return instr that returns non-void in Function of void "
3458 "return type!",
3459 &RI, F->getReturnType());
3460 else
3461 Check(N == 1 && F->getReturnType() == RI.getOperand(0)->getType(),
3462 "Function return type does not match operand "
3463 "type of return inst!",
3464 &RI, F->getReturnType());
3465
3466 // Check to make sure that the return value has necessary properties for
3467 // terminators...
3468 visitTerminator(RI);
3469}
3470
3471void Verifier::visitSwitchInst(SwitchInst &SI) {
3472 Check(SI.getType()->isVoidTy(), "Switch must have void result type!", &SI);
3473 // Check to make sure that all of the constants in the switch instruction
3474 // have the same type as the switched-on value.
3475 Type *SwitchTy = SI.getCondition()->getType();
3476 SmallPtrSet<ConstantInt*, 32> Constants;
3477 for (auto &Case : SI.cases()) {
3478 Check(isa<ConstantInt>(Case.getCaseValue()),
3479 "Case value is not a constant integer.", &SI);
3480 Check(Case.getCaseValue()->getType() == SwitchTy,
3481 "Switch constants must all be same type as switch value!", &SI);
3482 Check(Constants.insert(Case.getCaseValue()).second,
3483 "Duplicate integer as switch case", &SI, Case.getCaseValue());
3484 }
3485
3486 visitTerminator(SI);
3487}
3488
3489void Verifier::visitIndirectBrInst(IndirectBrInst &BI) {
3491 "Indirectbr operand must have pointer type!", &BI);
3492 for (unsigned i = 0, e = BI.getNumDestinations(); i != e; ++i)
3494 "Indirectbr destinations must all have pointer type!", &BI);
3495
3496 visitTerminator(BI);
3497}
3498
3499void Verifier::visitCallBrInst(CallBrInst &CBI) {
3500 if (!CBI.isInlineAsm()) {
3502 "Callbr: indirect function / invalid signature");
3503 Check(!CBI.hasOperandBundles(),
3504 "Callbr for intrinsics currently doesn't support operand bundles");
3505
3506 switch (CBI.getIntrinsicID()) {
3507 case Intrinsic::amdgcn_kill: {
3508 Check(CBI.getNumIndirectDests() == 1,
3509 "Callbr amdgcn_kill only supports one indirect dest");
3510 bool Unreachable = isa<UnreachableInst>(CBI.getIndirectDest(0)->begin());
3511 CallInst *Call = dyn_cast<CallInst>(CBI.getIndirectDest(0)->begin());
3512 Check(Unreachable || (Call && Call->getIntrinsicID() ==
3513 Intrinsic::amdgcn_unreachable),
3514 "Callbr amdgcn_kill indirect dest needs to be unreachable");
3515 break;
3516 }
3517 default:
3518 CheckFailed(
3519 "Callbr currently only supports asm-goto and selected intrinsics");
3520 }
3521 visitIntrinsicCall(CBI.getIntrinsicID(), CBI);
3522 } else {
3523 const InlineAsm *IA = cast<InlineAsm>(CBI.getCalledOperand());
3524 Check(!IA->canThrow(), "Unwinding from Callbr is not allowed");
3525
3526 verifyInlineAsmCall(CBI);
3527 }
3528 visitTerminator(CBI);
3529}
3530
3531void Verifier::visitSelectInst(SelectInst &SI) {
3532 Check(!SelectInst::areInvalidOperands(SI.getOperand(0), SI.getOperand(1),
3533 SI.getOperand(2)),
3534 "Invalid operands for select instruction!", &SI);
3535
3536 Check(SI.getTrueValue()->getType() == SI.getType(),
3537 "Select values must have same type as select instruction!", &SI);
3538 visitInstruction(SI);
3539}
3540
3541/// visitUserOp1 - User defined operators shouldn't live beyond the lifetime of
3542/// a pass, if any exist, it's an error.
3543///
3544void Verifier::visitUserOp1(Instruction &I) {
3545 Check(false, "User-defined operators should not live outside of a pass!", &I);
3546}
3547
3548void Verifier::visitTruncInst(TruncInst &I) {
3549 // Get the source and destination types
3550 Type *SrcTy = I.getOperand(0)->getType();
3551 Type *DestTy = I.getType();
3552
3553 // Get the size of the types in bits, we'll need this later
3554 unsigned SrcBitSize = SrcTy->getScalarSizeInBits();
3555 unsigned DestBitSize = DestTy->getScalarSizeInBits();
3556
3557 Check(SrcTy->isIntOrIntVectorTy(), "Trunc only operates on integer", &I);
3558 Check(DestTy->isIntOrIntVectorTy(), "Trunc only produces integer", &I);
3559 Check(SrcTy->isVectorTy() == DestTy->isVectorTy(),
3560 "trunc source and destination must both be a vector or neither", &I);
3561 Check(SrcBitSize > DestBitSize, "DestTy too big for Trunc", &I);
3562
3563 visitInstruction(I);
3564}
3565
3566void Verifier::visitZExtInst(ZExtInst &I) {
3567 // Get the source and destination types
3568 Type *SrcTy = I.getOperand(0)->getType();
3569 Type *DestTy = I.getType();
3570
3571 // Get the size of the types in bits, we'll need this later
3572 Check(SrcTy->isIntOrIntVectorTy(), "ZExt only operates on integer", &I);
3573 Check(DestTy->isIntOrIntVectorTy(), "ZExt only produces an integer", &I);
3574 Check(SrcTy->isVectorTy() == DestTy->isVectorTy(),
3575 "zext source and destination must both be a vector or neither", &I);
3576 unsigned SrcBitSize = SrcTy->getScalarSizeInBits();
3577 unsigned DestBitSize = DestTy->getScalarSizeInBits();
3578
3579 Check(SrcBitSize < DestBitSize, "Type too small for ZExt", &I);
3580
3581 visitInstruction(I);
3582}
3583
3584void Verifier::visitSExtInst(SExtInst &I) {
3585 // Get the source and destination types
3586 Type *SrcTy = I.getOperand(0)->getType();
3587 Type *DestTy = I.getType();
3588
3589 // Get the size of the types in bits, we'll need this later
3590 unsigned SrcBitSize = SrcTy->getScalarSizeInBits();
3591 unsigned DestBitSize = DestTy->getScalarSizeInBits();
3592
3593 Check(SrcTy->isIntOrIntVectorTy(), "SExt only operates on integer", &I);
3594 Check(DestTy->isIntOrIntVectorTy(), "SExt only produces an integer", &I);
3595 Check(SrcTy->isVectorTy() == DestTy->isVectorTy(),
3596 "sext source and destination must both be a vector or neither", &I);
3597 Check(SrcBitSize < DestBitSize, "Type too small for SExt", &I);
3598
3599 visitInstruction(I);
3600}
3601
3602void Verifier::visitFPTruncInst(FPTruncInst &I) {
3603 // Get the source and destination types
3604 Type *SrcTy = I.getOperand(0)->getType();
3605 Type *DestTy = I.getType();
3606 // Get the size of the types in bits, we'll need this later
3607 unsigned SrcBitSize = SrcTy->getScalarSizeInBits();
3608 unsigned DestBitSize = DestTy->getScalarSizeInBits();
3609
3610 Check(SrcTy->isFPOrFPVectorTy(), "FPTrunc only operates on FP", &I);
3611 Check(DestTy->isFPOrFPVectorTy(), "FPTrunc only produces an FP", &I);
3612 Check(SrcTy->isVectorTy() == DestTy->isVectorTy(),
3613 "fptrunc source and destination must both be a vector or neither", &I);
3614 Check(SrcBitSize > DestBitSize, "DestTy too big for FPTrunc", &I);
3615
3616 visitInstruction(I);
3617}
3618
3619void Verifier::visitFPExtInst(FPExtInst &I) {
3620 // Get the source and destination types
3621 Type *SrcTy = I.getOperand(0)->getType();
3622 Type *DestTy = I.getType();
3623
3624 // Get the size of the types in bits, we'll need this later
3625 unsigned SrcBitSize = SrcTy->getScalarSizeInBits();
3626 unsigned DestBitSize = DestTy->getScalarSizeInBits();
3627
3628 Check(SrcTy->isFPOrFPVectorTy(), "FPExt only operates on FP", &I);
3629 Check(DestTy->isFPOrFPVectorTy(), "FPExt only produces an FP", &I);
3630 Check(SrcTy->isVectorTy() == DestTy->isVectorTy(),
3631 "fpext source and destination must both be a vector or neither", &I);
3632 Check(SrcBitSize < DestBitSize, "DestTy too small for FPExt", &I);
3633
3634 visitInstruction(I);
3635}
3636
3637void Verifier::visitUIToFPInst(UIToFPInst &I) {
3638 // Get the source and destination types
3639 Type *SrcTy = I.getOperand(0)->getType();
3640 Type *DestTy = I.getType();
3641
3642 bool SrcVec = SrcTy->isVectorTy();
3643 bool DstVec = DestTy->isVectorTy();
3644
3645 Check(SrcVec == DstVec,
3646 "UIToFP source and dest must both be vector or scalar", &I);
3647 Check(SrcTy->isIntOrIntVectorTy(),
3648 "UIToFP source must be integer or integer vector", &I);
3649 Check(DestTy->isFPOrFPVectorTy(), "UIToFP result must be FP or FP vector",
3650 &I);
3651
3652 if (SrcVec && DstVec)
3653 Check(cast<VectorType>(SrcTy)->getElementCount() ==
3654 cast<VectorType>(DestTy)->getElementCount(),
3655 "UIToFP source and dest vector length mismatch", &I);
3656
3657 visitInstruction(I);
3658}
3659
3660void Verifier::visitSIToFPInst(SIToFPInst &I) {
3661 // Get the source and destination types
3662 Type *SrcTy = I.getOperand(0)->getType();
3663 Type *DestTy = I.getType();
3664
3665 bool SrcVec = SrcTy->isVectorTy();
3666 bool DstVec = DestTy->isVectorTy();
3667
3668 Check(SrcVec == DstVec,
3669 "SIToFP source and dest must both be vector or scalar", &I);
3670 Check(SrcTy->isIntOrIntVectorTy(),
3671 "SIToFP source must be integer or integer vector", &I);
3672 Check(DestTy->isFPOrFPVectorTy(), "SIToFP result must be FP or FP vector",
3673 &I);
3674
3675 if (SrcVec && DstVec)
3676 Check(cast<VectorType>(SrcTy)->getElementCount() ==
3677 cast<VectorType>(DestTy)->getElementCount(),
3678 "SIToFP source and dest vector length mismatch", &I);
3679
3680 visitInstruction(I);
3681}
3682
3683void Verifier::visitFPToUIInst(FPToUIInst &I) {
3684 // Get the source and destination types
3685 Type *SrcTy = I.getOperand(0)->getType();
3686 Type *DestTy = I.getType();
3687
3688 bool SrcVec = SrcTy->isVectorTy();
3689 bool DstVec = DestTy->isVectorTy();
3690
3691 Check(SrcVec == DstVec,
3692 "FPToUI source and dest must both be vector or scalar", &I);
3693 Check(SrcTy->isFPOrFPVectorTy(), "FPToUI source must be FP or FP vector", &I);
3694 Check(DestTy->isIntOrIntVectorTy(),
3695 "FPToUI result must be integer or integer vector", &I);
3696
3697 if (SrcVec && DstVec)
3698 Check(cast<VectorType>(SrcTy)->getElementCount() ==
3699 cast<VectorType>(DestTy)->getElementCount(),
3700 "FPToUI source and dest vector length mismatch", &I);
3701
3702 visitInstruction(I);
3703}
3704
3705void Verifier::visitFPToSIInst(FPToSIInst &I) {
3706 // Get the source and destination types
3707 Type *SrcTy = I.getOperand(0)->getType();
3708 Type *DestTy = I.getType();
3709
3710 bool SrcVec = SrcTy->isVectorTy();
3711 bool DstVec = DestTy->isVectorTy();
3712
3713 Check(SrcVec == DstVec,
3714 "FPToSI source and dest must both be vector or scalar", &I);
3715 Check(SrcTy->isFPOrFPVectorTy(), "FPToSI source must be FP or FP vector", &I);
3716 Check(DestTy->isIntOrIntVectorTy(),
3717 "FPToSI result must be integer or integer vector", &I);
3718
3719 if (SrcVec && DstVec)
3720 Check(cast<VectorType>(SrcTy)->getElementCount() ==
3721 cast<VectorType>(DestTy)->getElementCount(),
3722 "FPToSI source and dest vector length mismatch", &I);
3723
3724 visitInstruction(I);
3725}
3726
3727void Verifier::checkPtrToAddr(Type *SrcTy, Type *DestTy, const Value &V) {
3728 Check(SrcTy->isPtrOrPtrVectorTy(), "PtrToAddr source must be pointer", V);
3729 Check(DestTy->isIntOrIntVectorTy(), "PtrToAddr result must be integral", V);
3730 Check(SrcTy->isVectorTy() == DestTy->isVectorTy(), "PtrToAddr type mismatch",
3731 V);
3732
3733 if (SrcTy->isVectorTy()) {
3734 auto *VSrc = cast<VectorType>(SrcTy);
3735 auto *VDest = cast<VectorType>(DestTy);
3736 Check(VSrc->getElementCount() == VDest->getElementCount(),
3737 "PtrToAddr vector length mismatch", V);
3738 }
3739
3740 Type *AddrTy = DL.getAddressType(SrcTy);
3741 Check(AddrTy == DestTy, "PtrToAddr result must be address width", V);
3742}
3743
3744void Verifier::visitPtrToAddrInst(PtrToAddrInst &I) {
3745 checkPtrToAddr(I.getOperand(0)->getType(), I.getType(), I);
3746 visitInstruction(I);
3747}
3748
3749void Verifier::visitPtrToIntInst(PtrToIntInst &I) {
3750 // Get the source and destination types
3751 Type *SrcTy = I.getOperand(0)->getType();
3752 Type *DestTy = I.getType();
3753
3754 Check(SrcTy->isPtrOrPtrVectorTy(), "PtrToInt source must be pointer", &I);
3755
3756 Check(DestTy->isIntOrIntVectorTy(), "PtrToInt result must be integral", &I);
3757 Check(SrcTy->isVectorTy() == DestTy->isVectorTy(), "PtrToInt type mismatch",
3758 &I);
3759
3760 if (SrcTy->isVectorTy()) {
3761 auto *VSrc = cast<VectorType>(SrcTy);
3762 auto *VDest = cast<VectorType>(DestTy);
3763 Check(VSrc->getElementCount() == VDest->getElementCount(),
3764 "PtrToInt Vector length mismatch", &I);
3765 }
3766
3767 visitInstruction(I);
3768}
3769
3770void Verifier::visitIntToPtrInst(IntToPtrInst &I) {
3771 // Get the source and destination types
3772 Type *SrcTy = I.getOperand(0)->getType();
3773 Type *DestTy = I.getType();
3774
3775 Check(SrcTy->isIntOrIntVectorTy(), "IntToPtr source must be an integral", &I);
3776 Check(DestTy->isPtrOrPtrVectorTy(), "IntToPtr result must be a pointer", &I);
3777
3778 Check(SrcTy->isVectorTy() == DestTy->isVectorTy(), "IntToPtr type mismatch",
3779 &I);
3780 if (SrcTy->isVectorTy()) {
3781 auto *VSrc = cast<VectorType>(SrcTy);
3782 auto *VDest = cast<VectorType>(DestTy);
3783 Check(VSrc->getElementCount() == VDest->getElementCount(),
3784 "IntToPtr Vector length mismatch", &I);
3785 }
3786 visitInstruction(I);
3787}
3788
3789void Verifier::visitBitCastInst(BitCastInst &I) {
3790 Check(
3791 CastInst::castIsValid(Instruction::BitCast, I.getOperand(0), I.getType()),
3792 "Invalid bitcast", &I);
3793 visitInstruction(I);
3794}
3795
3796void Verifier::visitAddrSpaceCastInst(AddrSpaceCastInst &I) {
3797 Type *SrcTy = I.getOperand(0)->getType();
3798 Type *DestTy = I.getType();
3799
3800 Check(SrcTy->isPtrOrPtrVectorTy(), "AddrSpaceCast source must be a pointer",
3801 &I);
3802 Check(DestTy->isPtrOrPtrVectorTy(), "AddrSpaceCast result must be a pointer",
3803 &I);
3805 "AddrSpaceCast must be between different address spaces", &I);
3806 if (auto *SrcVTy = dyn_cast<VectorType>(SrcTy))
3807 Check(SrcVTy->getElementCount() ==
3808 cast<VectorType>(DestTy)->getElementCount(),
3809 "AddrSpaceCast vector pointer number of elements mismatch", &I);
3810 visitInstruction(I);
3811}
3812
3813/// visitPHINode - Ensure that a PHI node is well formed.
3814///
3815void Verifier::visitPHINode(PHINode &PN) {
3816 // Ensure that the PHI nodes are all grouped together at the top of the block.
3817 // This can be tested by checking whether the instruction before this is
3818 // either nonexistent (because this is begin()) or is a PHI node. If not,
3819 // then there is some other instruction before a PHI.
3820 Check(&PN == &PN.getParent()->front() ||
3822 "PHI nodes not grouped at top of basic block!", &PN, PN.getParent());
3823
3824 // Check that a PHI doesn't yield a Token.
3825 Check(!PN.getType()->isTokenLikeTy(), "PHI nodes cannot have token type!");
3826
3827 // Check that all of the values of the PHI node have the same type as the
3828 // result.
3829 for (Value *IncValue : PN.incoming_values()) {
3830 Check(PN.getType() == IncValue->getType(),
3831 "PHI node operands are not the same type as the result!", &PN);
3832 }
3833
3834 // All other PHI node constraints are checked in the visitBasicBlock method.
3835
3836 visitInstruction(PN);
3837}
3838
3839void Verifier::visitCallBase(CallBase &Call) {
3841 "Called function must be a pointer!", Call);
3842 FunctionType *FTy = Call.getFunctionType();
3843
3844 // Verify that the correct number of arguments are being passed
3845 if (FTy->isVarArg())
3846 Check(Call.arg_size() >= FTy->getNumParams(),
3847 "Called function requires more parameters than were provided!", Call);
3848 else
3849 Check(Call.arg_size() == FTy->getNumParams(),
3850 "Incorrect number of arguments passed to called function!", Call);
3851
3852 // Verify that all arguments to the call match the function type.
3853 for (unsigned i = 0, e = FTy->getNumParams(); i != e; ++i)
3854 Check(Call.getArgOperand(i)->getType() == FTy->getParamType(i),
3855 "Call parameter type does not match function signature!",
3856 Call.getArgOperand(i), FTy->getParamType(i), Call);
3857
3858 AttributeList Attrs = Call.getAttributes();
3859
3860 Check(verifyAttributeCount(Attrs, Call.arg_size()),
3861 "Attribute after last parameter!", Call);
3862
3863 Function *Callee =
3865 bool IsIntrinsic = Callee && Callee->isIntrinsic();
3866 if (IsIntrinsic)
3867 Check(Callee->getValueType() == FTy,
3868 "Intrinsic called with incompatible signature", Call);
3869
3870 // Verify if the calling convention of the callee is callable.
3872 "calling convention does not permit calls", Call);
3873
3874 // Disallow passing/returning values with alignment higher than we can
3875 // represent.
3876 // FIXME: Consider making DataLayout cap the alignment, so this isn't
3877 // necessary.
3878 auto VerifyTypeAlign = [&](Type *Ty, const Twine &Message) {
3879 if (!Ty->isSized())
3880 return;
3881 Align ABIAlign = DL.getABITypeAlign(Ty);
3882 Check(ABIAlign.value() <= Value::MaximumAlignment,
3883 "Incorrect alignment of " + Message + " to called function!", Call);
3884 };
3885
3886 if (!IsIntrinsic) {
3887 VerifyTypeAlign(FTy->getReturnType(), "return type");
3888 for (unsigned i = 0, e = FTy->getNumParams(); i != e; ++i) {
3889 Type *Ty = FTy->getParamType(i);
3890 VerifyTypeAlign(Ty, "argument passed");
3891 }
3892 }
3893
3894 if (Attrs.hasFnAttr(Attribute::Speculatable)) {
3895 // Don't allow speculatable on call sites, unless the underlying function
3896 // declaration is also speculatable.
3897 Check(Callee && Callee->isSpeculatable(),
3898 "speculatable attribute may not apply to call sites", Call);
3899 }
3900
3901 if (Attrs.hasFnAttr(Attribute::Preallocated)) {
3902 Check(Call.getIntrinsicID() == Intrinsic::call_preallocated_arg,
3903 "preallocated as a call site attribute can only be on "
3904 "llvm.call.preallocated.arg");
3905 }
3906
3907 // Verify call attributes.
3908 verifyFunctionAttrs(FTy, Attrs, &Call, IsIntrinsic, Call.isInlineAsm());
3909
3910 // Conservatively check the inalloca argument.
3911 // We have a bug if we can find that there is an underlying alloca without
3912 // inalloca.
3913 if (Call.hasInAllocaArgument()) {
3914 Value *InAllocaArg = Call.getArgOperand(FTy->getNumParams() - 1);
3915 if (auto AI = dyn_cast<AllocaInst>(InAllocaArg->stripInBoundsOffsets()))
3916 Check(AI->isUsedWithInAlloca(),
3917 "inalloca argument for call has mismatched alloca", AI, Call);
3918 }
3919
3920 // For each argument of the callsite, if it has the swifterror argument,
3921 // make sure the underlying alloca/parameter it comes from has a swifterror as
3922 // well.
3923 for (unsigned i = 0, e = FTy->getNumParams(); i != e; ++i) {
3924 if (Call.paramHasAttr(i, Attribute::SwiftError)) {
3925 Value *SwiftErrorArg = Call.getArgOperand(i);
3926 if (auto AI = dyn_cast<AllocaInst>(SwiftErrorArg->stripInBoundsOffsets())) {
3927 Check(AI->isSwiftError(),
3928 "swifterror argument for call has mismatched alloca", AI, Call);
3929 continue;
3930 }
3931 auto ArgI = dyn_cast<Argument>(SwiftErrorArg);
3932 Check(ArgI, "swifterror argument should come from an alloca or parameter",
3933 SwiftErrorArg, Call);
3934 Check(ArgI->hasSwiftErrorAttr(),
3935 "swifterror argument for call has mismatched parameter", ArgI,
3936 Call);
3937 }
3938
3939 if (Attrs.hasParamAttr(i, Attribute::ImmArg)) {
3940 // Don't allow immarg on call sites, unless the underlying declaration
3941 // also has the matching immarg.
3942 Check(Callee && Callee->hasParamAttribute(i, Attribute::ImmArg),
3943 "immarg may not apply only to call sites", Call.getArgOperand(i),
3944 Call);
3945 }
3946
3947 if (Call.paramHasAttr(i, Attribute::ImmArg)) {
3948 Value *ArgVal = Call.getArgOperand(i);
3949 Check(isa<ConstantInt>(ArgVal) || isa<ConstantFP>(ArgVal),
3950 "immarg operand has non-immediate parameter", ArgVal, Call);
3951
3952 // If the imm-arg is an integer and also has a range attached,
3953 // check if the given value is within the range.
3954 if (Call.paramHasAttr(i, Attribute::Range)) {
3955 if (auto *CI = dyn_cast<ConstantInt>(ArgVal)) {
3956 const ConstantRange &CR =
3957 Call.getParamAttr(i, Attribute::Range).getValueAsConstantRange();
3958 Check(CR.contains(CI->getValue()),
3959 "immarg value " + Twine(CI->getValue().getSExtValue()) +
3960 " out of range [" + Twine(CR.getLower().getSExtValue()) +
3961 ", " + Twine(CR.getUpper().getSExtValue()) + ")",
3962 Call);
3963 }
3964 }
3965 }
3966
3967 if (Call.paramHasAttr(i, Attribute::Preallocated)) {
3968 Value *ArgVal = Call.getArgOperand(i);
3969 bool hasOB =
3971 bool isMustTail = Call.isMustTailCall();
3972 Check(hasOB != isMustTail,
3973 "preallocated operand either requires a preallocated bundle or "
3974 "the call to be musttail (but not both)",
3975 ArgVal, Call);
3976 }
3977 }
3978
3979 if (FTy->isVarArg()) {
3980 // FIXME? is 'nest' even legal here?
3981 bool SawNest = false;
3982 bool SawReturned = false;
3983
3984 for (unsigned Idx = 0; Idx < FTy->getNumParams(); ++Idx) {
3985 if (Attrs.hasParamAttr(Idx, Attribute::Nest))
3986 SawNest = true;
3987 if (Attrs.hasParamAttr(Idx, Attribute::Returned))
3988 SawReturned = true;
3989 }
3990
3991 // Check attributes on the varargs part.
3992 for (unsigned Idx = FTy->getNumParams(); Idx < Call.arg_size(); ++Idx) {
3993 Type *Ty = Call.getArgOperand(Idx)->getType();
3994 AttributeSet ArgAttrs = Attrs.getParamAttrs(Idx);
3995 verifyParameterAttrs(ArgAttrs, Ty, &Call);
3996
3997 if (ArgAttrs.hasAttribute(Attribute::Nest)) {
3998 Check(!SawNest, "More than one parameter has attribute nest!", Call);
3999 SawNest = true;
4000 }
4001
4002 if (ArgAttrs.hasAttribute(Attribute::Returned)) {
4003 Check(!SawReturned, "More than one parameter has attribute returned!",
4004 Call);
4005 Check(Ty->canLosslesslyBitCastTo(FTy->getReturnType()),
4006 "Incompatible argument and return types for 'returned' "
4007 "attribute",
4008 Call);
4009 SawReturned = true;
4010 }
4011
4012 // Statepoint intrinsic is vararg but the wrapped function may be not.
4013 // Allow sret here and check the wrapped function in verifyStatepoint.
4014 if (Call.getIntrinsicID() != Intrinsic::experimental_gc_statepoint)
4015 Check(!ArgAttrs.hasAttribute(Attribute::StructRet),
4016 "Attribute 'sret' cannot be used for vararg call arguments!",
4017 Call);
4018
4019 if (ArgAttrs.hasAttribute(Attribute::InAlloca))
4020 Check(Idx == Call.arg_size() - 1,
4021 "inalloca isn't on the last argument!", Call);
4022 }
4023 }
4024
4025 // Verify that there's no metadata unless it's a direct call to an intrinsic.
4026 if (!IsIntrinsic) {
4027 for (Type *ParamTy : FTy->params()) {
4028 Check(!ParamTy->isMetadataTy(),
4029 "Function has metadata parameter but isn't an intrinsic", Call);
4030 Check(!ParamTy->isTokenLikeTy(),
4031 "Function has token parameter but isn't an intrinsic", Call);
4032 }
4033 }
4034
4035 // Verify that indirect calls don't return tokens.
4036 if (!Call.getCalledFunction()) {
4037 Check(!FTy->getReturnType()->isTokenLikeTy(),
4038 "Return type cannot be token for indirect call!");
4039 Check(!FTy->getReturnType()->isX86_AMXTy(),
4040 "Return type cannot be x86_amx for indirect call!");
4041 }
4042
4044 visitIntrinsicCall(ID, Call);
4045
4046 // Verify that a callsite has at most one "deopt", at most one "funclet", at
4047 // most one "gc-transition", at most one "cfguardtarget", at most one
4048 // "preallocated" operand bundle, and at most one "ptrauth" operand bundle.
4049 bool FoundDeoptBundle = false, FoundFuncletBundle = false,
4050 FoundGCTransitionBundle = false, FoundCFGuardTargetBundle = false,
4051 FoundPreallocatedBundle = false, FoundGCLiveBundle = false,
4052 FoundPtrauthBundle = false, FoundKCFIBundle = false,
4053 FoundAttachedCallBundle = false;
4054 for (unsigned i = 0, e = Call.getNumOperandBundles(); i < e; ++i) {
4055 OperandBundleUse BU = Call.getOperandBundleAt(i);
4056 uint32_t Tag = BU.getTagID();
4057 if (Tag == LLVMContext::OB_deopt) {
4058 Check(!FoundDeoptBundle, "Multiple deopt operand bundles", Call);
4059 FoundDeoptBundle = true;
4060 } else if (Tag == LLVMContext::OB_gc_transition) {
4061 Check(!FoundGCTransitionBundle, "Multiple gc-transition operand bundles",
4062 Call);
4063 FoundGCTransitionBundle = true;
4064 } else if (Tag == LLVMContext::OB_funclet) {
4065 Check(!FoundFuncletBundle, "Multiple funclet operand bundles", Call);
4066 FoundFuncletBundle = true;
4067 Check(BU.Inputs.size() == 1,
4068 "Expected exactly one funclet bundle operand", Call);
4069 Check(isa<FuncletPadInst>(BU.Inputs.front()),
4070 "Funclet bundle operands should correspond to a FuncletPadInst",
4071 Call);
4072 } else if (Tag == LLVMContext::OB_cfguardtarget) {
4073 Check(!FoundCFGuardTargetBundle, "Multiple CFGuardTarget operand bundles",
4074 Call);
4075 FoundCFGuardTargetBundle = true;
4076 Check(BU.Inputs.size() == 1,
4077 "Expected exactly one cfguardtarget bundle operand", Call);
4078 } else if (Tag == LLVMContext::OB_ptrauth) {
4079 Check(!FoundPtrauthBundle, "Multiple ptrauth operand bundles", Call);
4080 FoundPtrauthBundle = true;
4081 Check(BU.Inputs.size() == 2,
4082 "Expected exactly two ptrauth bundle operands", Call);
4083 Check(isa<ConstantInt>(BU.Inputs[0]) &&
4084 BU.Inputs[0]->getType()->isIntegerTy(32),
4085 "Ptrauth bundle key operand must be an i32 constant", Call);
4086 Check(BU.Inputs[1]->getType()->isIntegerTy(64),
4087 "Ptrauth bundle discriminator operand must be an i64", Call);
4088 } else if (Tag == LLVMContext::OB_kcfi) {
4089 Check(!FoundKCFIBundle, "Multiple kcfi operand bundles", Call);
4090 FoundKCFIBundle = true;
4091 Check(BU.Inputs.size() == 1, "Expected exactly one kcfi bundle operand",
4092 Call);
4093 Check(isa<ConstantInt>(BU.Inputs[0]) &&
4094 BU.Inputs[0]->getType()->isIntegerTy(32),
4095 "Kcfi bundle operand must be an i32 constant", Call);
4096 } else if (Tag == LLVMContext::OB_preallocated) {
4097 Check(!FoundPreallocatedBundle, "Multiple preallocated operand bundles",
4098 Call);
4099 FoundPreallocatedBundle = true;
4100 Check(BU.Inputs.size() == 1,
4101 "Expected exactly one preallocated bundle operand", Call);
4102 auto Input = dyn_cast<IntrinsicInst>(BU.Inputs.front());
4103 Check(Input &&
4104 Input->getIntrinsicID() == Intrinsic::call_preallocated_setup,
4105 "\"preallocated\" argument must be a token from "
4106 "llvm.call.preallocated.setup",
4107 Call);
4108 } else if (Tag == LLVMContext::OB_gc_live) {
4109 Check(!FoundGCLiveBundle, "Multiple gc-live operand bundles", Call);
4110 FoundGCLiveBundle = true;
4112 Check(!FoundAttachedCallBundle,
4113 "Multiple \"clang.arc.attachedcall\" operand bundles", Call);
4114 FoundAttachedCallBundle = true;
4115 verifyAttachedCallBundle(Call, BU);
4116 }
4117 }
4118
4119 // Verify that callee and callsite agree on whether to use pointer auth.
4120 Check(!(Call.getCalledFunction() && FoundPtrauthBundle),
4121 "Direct call cannot have a ptrauth bundle", Call);
4122
4123 // Verify that each inlinable callsite of a debug-info-bearing function in a
4124 // debug-info-bearing function has a debug location attached to it. Failure to
4125 // do so causes assertion failures when the inliner sets up inline scope info
4126 // (Interposable functions are not inlinable, neither are functions without
4127 // definitions.)
4133 "inlinable function call in a function with "
4134 "debug info must have a !dbg location",
4135 Call);
4136
4137 if (Call.isInlineAsm())
4138 verifyInlineAsmCall(Call);
4139
4140 ConvergenceVerifyHelper.visit(Call);
4141
4142 visitInstruction(Call);
4143}
4144
4145void Verifier::verifyTailCCMustTailAttrs(const AttrBuilder &Attrs,
4146 StringRef Context) {
4147 Check(!Attrs.contains(Attribute::InAlloca),
4148 Twine("inalloca attribute not allowed in ") + Context);
4149 Check(!Attrs.contains(Attribute::InReg),
4150 Twine("inreg attribute not allowed in ") + Context);
4151 Check(!Attrs.contains(Attribute::SwiftError),
4152 Twine("swifterror attribute not allowed in ") + Context);
4153 Check(!Attrs.contains(Attribute::Preallocated),
4154 Twine("preallocated attribute not allowed in ") + Context);
4155 Check(!Attrs.contains(Attribute::ByRef),
4156 Twine("byref attribute not allowed in ") + Context);
4157}
4158
4159/// Two types are "congruent" if they are identical, or if they are both pointer
4160/// types with different pointee types and the same address space.
4161static bool isTypeCongruent(Type *L, Type *R) {
4162 if (L == R)
4163 return true;
4166 if (!PL || !PR)
4167 return false;
4168 return PL->getAddressSpace() == PR->getAddressSpace();
4169}
4170
4171static AttrBuilder getParameterABIAttributes(LLVMContext& C, unsigned I, AttributeList Attrs) {
4172 static const Attribute::AttrKind ABIAttrs[] = {
4173 Attribute::StructRet, Attribute::ByVal, Attribute::InAlloca,
4174 Attribute::InReg, Attribute::StackAlignment, Attribute::SwiftSelf,
4175 Attribute::SwiftAsync, Attribute::SwiftError, Attribute::Preallocated,
4176 Attribute::ByRef};
4177 AttrBuilder Copy(C);
4178 for (auto AK : ABIAttrs) {
4179 Attribute Attr = Attrs.getParamAttrs(I).getAttribute(AK);
4180 if (Attr.isValid())
4181 Copy.addAttribute(Attr);
4182 }
4183
4184 // `align` is ABI-affecting only in combination with `byval` or `byref`.
4185 if (Attrs.hasParamAttr(I, Attribute::Alignment) &&
4186 (Attrs.hasParamAttr(I, Attribute::ByVal) ||
4187 Attrs.hasParamAttr(I, Attribute::ByRef)))
4188 Copy.addAlignmentAttr(Attrs.getParamAlignment(I));
4189 return Copy;
4190}
4191
4192void Verifier::verifyMustTailCall(CallInst &CI) {
4193 Check(!CI.isInlineAsm(), "cannot use musttail call with inline asm", &CI);
4194
4195 Function *F = CI.getParent()->getParent();
4196 FunctionType *CallerTy = F->getFunctionType();
4197 FunctionType *CalleeTy = CI.getFunctionType();
4198 Check(CallerTy->isVarArg() == CalleeTy->isVarArg(),
4199 "cannot guarantee tail call due to mismatched varargs", &CI);
4200 Check(isTypeCongruent(CallerTy->getReturnType(), CalleeTy->getReturnType()),
4201 "cannot guarantee tail call due to mismatched return types", &CI);
4202
4203 // - The calling conventions of the caller and callee must match.
4204 Check(F->getCallingConv() == CI.getCallingConv(),
4205 "cannot guarantee tail call due to mismatched calling conv", &CI);
4206
4207 // - The call must immediately precede a :ref:`ret <i_ret>` instruction,
4208 // or a pointer bitcast followed by a ret instruction.
4209 // - The ret instruction must return the (possibly bitcasted) value
4210 // produced by the call or void.
4211 Value *RetVal = &CI;
4213
4214 // Handle the optional bitcast.
4215 if (BitCastInst *BI = dyn_cast_or_null<BitCastInst>(Next)) {
4216 Check(BI->getOperand(0) == RetVal,
4217 "bitcast following musttail call must use the call", BI);
4218 RetVal = BI;
4219 Next = BI->getNextNode();
4220 }
4221
4222 // Check the return.
4223 ReturnInst *Ret = dyn_cast_or_null<ReturnInst>(Next);
4224 Check(Ret, "musttail call must precede a ret with an optional bitcast", &CI);
4225 Check(!Ret->getReturnValue() || Ret->getReturnValue() == RetVal ||
4227 "musttail call result must be returned", Ret);
4228
4229 AttributeList CallerAttrs = F->getAttributes();
4230 AttributeList CalleeAttrs = CI.getAttributes();
4231 if (CI.getCallingConv() == CallingConv::SwiftTail ||
4232 CI.getCallingConv() == CallingConv::Tail) {
4233 StringRef CCName =
4234 CI.getCallingConv() == CallingConv::Tail ? "tailcc" : "swifttailcc";
4235
4236 // - Only sret, byval, swiftself, and swiftasync ABI-impacting attributes
4237 // are allowed in swifttailcc call
4238 for (unsigned I = 0, E = CallerTy->getNumParams(); I != E; ++I) {
4239 AttrBuilder ABIAttrs = getParameterABIAttributes(F->getContext(), I, CallerAttrs);
4240 SmallString<32> Context{CCName, StringRef(" musttail caller")};
4241 verifyTailCCMustTailAttrs(ABIAttrs, Context);
4242 }
4243 for (unsigned I = 0, E = CalleeTy->getNumParams(); I != E; ++I) {
4244 AttrBuilder ABIAttrs = getParameterABIAttributes(F->getContext(), I, CalleeAttrs);
4245 SmallString<32> Context{CCName, StringRef(" musttail callee")};
4246 verifyTailCCMustTailAttrs(ABIAttrs, Context);
4247 }
4248 // - Varargs functions are not allowed
4249 Check(!CallerTy->isVarArg(), Twine("cannot guarantee ") + CCName +
4250 " tail call for varargs function");
4251 return;
4252 }
4253
4254 // - The caller and callee prototypes must match. Pointer types of
4255 // parameters or return types may differ in pointee type, but not
4256 // address space.
4257 if (!CI.getIntrinsicID()) {
4258 Check(CallerTy->getNumParams() == CalleeTy->getNumParams(),
4259 "cannot guarantee tail call due to mismatched parameter counts", &CI);
4260 for (unsigned I = 0, E = CallerTy->getNumParams(); I != E; ++I) {
4261 Check(
4262 isTypeCongruent(CallerTy->getParamType(I), CalleeTy->getParamType(I)),
4263 "cannot guarantee tail call due to mismatched parameter types", &CI);
4264 }
4265 }
4266
4267 // - All ABI-impacting function attributes, such as sret, byval, inreg,
4268 // returned, preallocated, and inalloca, must match.
4269 for (unsigned I = 0, E = CallerTy->getNumParams(); I != E; ++I) {
4270 AttrBuilder CallerABIAttrs = getParameterABIAttributes(F->getContext(), I, CallerAttrs);
4271 AttrBuilder CalleeABIAttrs = getParameterABIAttributes(F->getContext(), I, CalleeAttrs);
4272 Check(CallerABIAttrs == CalleeABIAttrs,
4273 "cannot guarantee tail call due to mismatched ABI impacting "
4274 "function attributes",
4275 &CI, CI.getOperand(I));
4276 }
4277}
4278
4279void Verifier::visitCallInst(CallInst &CI) {
4280 visitCallBase(CI);
4281
4282 if (CI.isMustTailCall())
4283 verifyMustTailCall(CI);
4284}
4285
4286void Verifier::visitInvokeInst(InvokeInst &II) {
4287 visitCallBase(II);
4288
4289 // Verify that the first non-PHI instruction of the unwind destination is an
4290 // exception handling instruction.
4291 Check(
4292 II.getUnwindDest()->isEHPad(),
4293 "The unwind destination does not have an exception handling instruction!",
4294 &II);
4295
4296 visitTerminator(II);
4297}
4298
4299/// visitUnaryOperator - Check the argument to the unary operator.
4300///
4301void Verifier::visitUnaryOperator(UnaryOperator &U) {
4302 Check(U.getType() == U.getOperand(0)->getType(),
4303 "Unary operators must have same type for"
4304 "operands and result!",
4305 &U);
4306
4307 switch (U.getOpcode()) {
4308 // Check that floating-point arithmetic operators are only used with
4309 // floating-point operands.
4310 case Instruction::FNeg:
4311 Check(U.getType()->isFPOrFPVectorTy(),
4312 "FNeg operator only works with float types!", &U);
4313 break;
4314 default:
4315 llvm_unreachable("Unknown UnaryOperator opcode!");
4316 }
4317
4318 visitInstruction(U);
4319}
4320
4321/// visitBinaryOperator - Check that both arguments to the binary operator are
4322/// of the same type!
4323///
4324void Verifier::visitBinaryOperator(BinaryOperator &B) {
4325 Check(B.getOperand(0)->getType() == B.getOperand(1)->getType(),
4326 "Both operands to a binary operator are not of the same type!", &B);
4327
4328 switch (B.getOpcode()) {
4329 // Check that integer arithmetic operators are only used with
4330 // integral operands.
4331 case Instruction::Add:
4332 case Instruction::Sub:
4333 case Instruction::Mul:
4334 case Instruction::SDiv:
4335 case Instruction::UDiv:
4336 case Instruction::SRem:
4337 case Instruction::URem:
4338 Check(B.getType()->isIntOrIntVectorTy(),
4339 "Integer arithmetic operators only work with integral types!", &B);
4340 Check(B.getType() == B.getOperand(0)->getType(),
4341 "Integer arithmetic operators must have same type "
4342 "for operands and result!",
4343 &B);
4344 break;
4345 // Check that floating-point arithmetic operators are only used with
4346 // floating-point operands.
4347 case Instruction::FAdd:
4348 case Instruction::FSub:
4349 case Instruction::FMul:
4350 case Instruction::FDiv:
4351 case Instruction::FRem:
4352 Check(B.getType()->isFPOrFPVectorTy(),
4353 "Floating-point arithmetic operators only work with "
4354 "floating-point types!",
4355 &B);
4356 Check(B.getType() == B.getOperand(0)->getType(),
4357 "Floating-point arithmetic operators must have same type "
4358 "for operands and result!",
4359 &B);
4360 break;
4361 // Check that logical operators are only used with integral operands.
4362 case Instruction::And:
4363 case Instruction::Or:
4364 case Instruction::Xor:
4365 Check(B.getType()->isIntOrIntVectorTy(),
4366 "Logical operators only work with integral types!", &B);
4367 Check(B.getType() == B.getOperand(0)->getType(),
4368 "Logical operators must have same type for operands and result!", &B);
4369 break;
4370 case Instruction::Shl:
4371 case Instruction::LShr:
4372 case Instruction::AShr:
4373 Check(B.getType()->isIntOrIntVectorTy(),
4374 "Shifts only work with integral types!", &B);
4375 Check(B.getType() == B.getOperand(0)->getType(),
4376 "Shift return type must be same as operands!", &B);
4377 break;
4378 default:
4379 llvm_unreachable("Unknown BinaryOperator opcode!");
4380 }
4381
4382 visitInstruction(B);
4383}
4384
4385void Verifier::visitICmpInst(ICmpInst &IC) {
4386 // Check that the operands are the same type
4387 Type *Op0Ty = IC.getOperand(0)->getType();
4388 Type *Op1Ty = IC.getOperand(1)->getType();
4389 Check(Op0Ty == Op1Ty,
4390 "Both operands to ICmp instruction are not of the same type!", &IC);
4391 // Check that the operands are the right type
4392 Check(Op0Ty->isIntOrIntVectorTy() || Op0Ty->isPtrOrPtrVectorTy(),
4393 "Invalid operand types for ICmp instruction", &IC);
4394 // Check that the predicate is valid.
4395 Check(IC.isIntPredicate(), "Invalid predicate in ICmp instruction!", &IC);
4396
4397 visitInstruction(IC);
4398}
4399
4400void Verifier::visitFCmpInst(FCmpInst &FC) {
4401 // Check that the operands are the same type
4402 Type *Op0Ty = FC.getOperand(0)->getType();
4403 Type *Op1Ty = FC.getOperand(1)->getType();
4404 Check(Op0Ty == Op1Ty,
4405 "Both operands to FCmp instruction are not of the same type!", &FC);
4406 // Check that the operands are the right type
4407 Check(Op0Ty->isFPOrFPVectorTy(), "Invalid operand types for FCmp instruction",
4408 &FC);
4409 // Check that the predicate is valid.
4410 Check(FC.isFPPredicate(), "Invalid predicate in FCmp instruction!", &FC);
4411
4412 visitInstruction(FC);
4413}
4414
4415void Verifier::visitExtractElementInst(ExtractElementInst &EI) {
4417 "Invalid extractelement operands!", &EI);
4418 visitInstruction(EI);
4419}
4420
4421void Verifier::visitInsertElementInst(InsertElementInst &IE) {
4422 Check(InsertElementInst::isValidOperands(IE.getOperand(0), IE.getOperand(1),
4423 IE.getOperand(2)),
4424 "Invalid insertelement operands!", &IE);
4425 visitInstruction(IE);
4426}
4427
4428void Verifier::visitShuffleVectorInst(ShuffleVectorInst &SV) {
4430 SV.getShuffleMask()),
4431 "Invalid shufflevector operands!", &SV);
4432 visitInstruction(SV);
4433}
4434
4435void Verifier::visitGetElementPtrInst(GetElementPtrInst &GEP) {
4436 Type *TargetTy = GEP.getPointerOperandType()->getScalarType();
4437
4438 Check(isa<PointerType>(TargetTy),
4439 "GEP base pointer is not a vector or a vector of pointers", &GEP);
4440 Check(GEP.getSourceElementType()->isSized(), "GEP into unsized type!", &GEP);
4441
4442 if (auto *STy = dyn_cast<StructType>(GEP.getSourceElementType())) {
4443 Check(!STy->isScalableTy(),
4444 "getelementptr cannot target structure that contains scalable vector"
4445 "type",
4446 &GEP);
4447 }
4448
4449 SmallVector<Value *, 16> Idxs(GEP.indices());
4450 Check(
4451 all_of(Idxs, [](Value *V) { return V->getType()->isIntOrIntVectorTy(); }),
4452 "GEP indexes must be integers", &GEP);
4453 Type *ElTy =
4454 GetElementPtrInst::getIndexedType(GEP.getSourceElementType(), Idxs);
4455 Check(ElTy, "Invalid indices for GEP pointer type!", &GEP);
4456
4457 PointerType *PtrTy = dyn_cast<PointerType>(GEP.getType()->getScalarType());
4458
4459 Check(PtrTy && GEP.getResultElementType() == ElTy,
4460 "GEP is not of right type for indices!", &GEP, ElTy);
4461
4462 if (auto *GEPVTy = dyn_cast<VectorType>(GEP.getType())) {
4463 // Additional checks for vector GEPs.
4464 ElementCount GEPWidth = GEPVTy->getElementCount();
4465 if (GEP.getPointerOperandType()->isVectorTy())
4466 Check(
4467 GEPWidth ==
4468 cast<VectorType>(GEP.getPointerOperandType())->getElementCount(),
4469 "Vector GEP result width doesn't match operand's", &GEP);
4470 for (Value *Idx : Idxs) {
4471 Type *IndexTy = Idx->getType();
4472 if (auto *IndexVTy = dyn_cast<VectorType>(IndexTy)) {
4473 ElementCount IndexWidth = IndexVTy->getElementCount();
4474 Check(IndexWidth == GEPWidth, "Invalid GEP index vector width", &GEP);
4475 }
4476 Check(IndexTy->isIntOrIntVectorTy(),
4477 "All GEP indices should be of integer type");
4478 }
4479 }
4480
4481 // Check that GEP does not index into a vector with non-byte-addressable
4482 // elements.
4484 GTI != GTE; ++GTI) {
4485 if (GTI.isVector()) {
4486 Type *ElemTy = GTI.getIndexedType();
4487 Check(DL.typeSizeEqualsStoreSize(ElemTy),
4488 "GEP into vector with non-byte-addressable element type", &GEP);
4489 }
4490 }
4491
4492 Check(GEP.getAddressSpace() == PtrTy->getAddressSpace(),
4493 "GEP address space doesn't match type", &GEP);
4494
4495 visitInstruction(GEP);
4496}
4497
4498static bool isContiguous(const ConstantRange &A, const ConstantRange &B) {
4499 return A.getUpper() == B.getLower() || A.getLower() == B.getUpper();
4500}
4501
4502/// Verify !range and !absolute_symbol metadata. These have the same
4503/// restrictions, except !absolute_symbol allows the full set.
4504void Verifier::verifyRangeLikeMetadata(const Value &I, const MDNode *Range,
4505 Type *Ty, RangeLikeMetadataKind Kind) {
4506 unsigned NumOperands = Range->getNumOperands();
4507 Check(NumOperands % 2 == 0, "Unfinished range!", Range);
4508 unsigned NumRanges = NumOperands / 2;
4509 Check(NumRanges >= 1, "It should have at least one range!", Range);
4510
4511 ConstantRange LastRange(1, true); // Dummy initial value
4512 for (unsigned i = 0; i < NumRanges; ++i) {
4513 ConstantInt *Low =
4514 mdconst::dyn_extract<ConstantInt>(Range->getOperand(2 * i));
4515 Check(Low, "The lower limit must be an integer!", Low);
4516 ConstantInt *High =
4517 mdconst::dyn_extract<ConstantInt>(Range->getOperand(2 * i + 1));
4518 Check(High, "The upper limit must be an integer!", High);
4519
4520 Check(High->getType() == Low->getType(), "Range pair types must match!",
4521 &I);
4522
4523 if (Kind == RangeLikeMetadataKind::NoaliasAddrspace) {
4524 Check(High->getType()->isIntegerTy(32),
4525 "noalias.addrspace type must be i32!", &I);
4526 } else {
4527 Check(High->getType() == Ty->getScalarType(),
4528 "Range types must match instruction type!", &I);
4529 }
4530
4531 APInt HighV = High->getValue();
4532 APInt LowV = Low->getValue();
4533
4534 // ConstantRange asserts if the ranges are the same except for the min/max
4535 // value. Leave the cases it tolerates for the empty range error below.
4536 Check(LowV != HighV || LowV.isMaxValue() || LowV.isMinValue(),
4537 "The upper and lower limits cannot be the same value", &I);
4538
4539 ConstantRange CurRange(LowV, HighV);
4540 Check(!CurRange.isEmptySet() &&
4541 (Kind == RangeLikeMetadataKind::AbsoluteSymbol ||
4542 !CurRange.isFullSet()),
4543 "Range must not be empty!", Range);
4544 if (i != 0) {
4545 Check(CurRange.intersectWith(LastRange).isEmptySet(),
4546 "Intervals are overlapping", Range);
4547 Check(LowV.sgt(LastRange.getLower()), "Intervals are not in order",
4548 Range);
4549 Check(!isContiguous(CurRange, LastRange), "Intervals are contiguous",
4550 Range);
4551 }
4552 LastRange = ConstantRange(LowV, HighV);
4553 }
4554 if (NumRanges > 2) {
4555 APInt FirstLow =
4556 mdconst::dyn_extract<ConstantInt>(Range->getOperand(0))->getValue();
4557 APInt FirstHigh =
4558 mdconst::dyn_extract<ConstantInt>(Range->getOperand(1))->getValue();
4559 ConstantRange FirstRange(FirstLow, FirstHigh);
4560 Check(FirstRange.intersectWith(LastRange).isEmptySet(),
4561 "Intervals are overlapping", Range);
4562 Check(!isContiguous(FirstRange, LastRange), "Intervals are contiguous",
4563 Range);
4564 }
4565}
4566
4567void Verifier::visitRangeMetadata(Instruction &I, MDNode *Range, Type *Ty) {
4568 assert(Range && Range == I.getMetadata(LLVMContext::MD_range) &&
4569 "precondition violation");
4570 verifyRangeLikeMetadata(I, Range, Ty, RangeLikeMetadataKind::Range);
4571}
4572
4573void Verifier::visitNoaliasAddrspaceMetadata(Instruction &I, MDNode *Range,
4574 Type *Ty) {
4575 assert(Range && Range == I.getMetadata(LLVMContext::MD_noalias_addrspace) &&
4576 "precondition violation");
4577 verifyRangeLikeMetadata(I, Range, Ty,
4578 RangeLikeMetadataKind::NoaliasAddrspace);
4579}
4580
4581void Verifier::checkAtomicMemAccessSize(Type *Ty, const Instruction *I) {
4582 unsigned Size = DL.getTypeSizeInBits(Ty).getFixedValue();
4583 Check(Size >= 8, "atomic memory access' size must be byte-sized", Ty, I);
4584 Check(!(Size & (Size - 1)),
4585 "atomic memory access' operand must have a power-of-two size", Ty, I);
4586}
4587
4588void Verifier::visitLoadInst(LoadInst &LI) {
4590 Check(PTy, "Load operand must be a pointer.", &LI);
4591 Type *ElTy = LI.getType();
4592 if (MaybeAlign A = LI.getAlign()) {
4593 Check(A->value() <= Value::MaximumAlignment,
4594 "huge alignment values are unsupported", &LI);
4595 }
4596 Check(ElTy->isSized(), "loading unsized types is not allowed", &LI);
4597 if (LI.isAtomic()) {
4598 Check(LI.getOrdering() != AtomicOrdering::Release &&
4599 LI.getOrdering() != AtomicOrdering::AcquireRelease,
4600 "Load cannot have Release ordering", &LI);
4601 Check(ElTy->getScalarType()->isIntOrPtrTy() ||
4603 "atomic load operand must have integer, pointer, floating point, "
4604 "or vector type!",
4605 ElTy, &LI);
4606
4607 checkAtomicMemAccessSize(ElTy, &LI);
4608 } else {
4610 "Non-atomic load cannot have SynchronizationScope specified", &LI);
4611 }
4612
4613 visitInstruction(LI);
4614}
4615
4616void Verifier::visitStoreInst(StoreInst &SI) {
4617 PointerType *PTy = dyn_cast<PointerType>(SI.getOperand(1)->getType());
4618 Check(PTy, "Store operand must be a pointer.", &SI);
4619 Type *ElTy = SI.getOperand(0)->getType();
4620 if (MaybeAlign A = SI.getAlign()) {
4621 Check(A->value() <= Value::MaximumAlignment,
4622 "huge alignment values are unsupported", &SI);
4623 }
4624 Check(ElTy->isSized(), "storing unsized types is not allowed", &SI);
4625 if (SI.isAtomic()) {
4626 Check(SI.getOrdering() != AtomicOrdering::Acquire &&
4627 SI.getOrdering() != AtomicOrdering::AcquireRelease,
4628 "Store cannot have Acquire ordering", &SI);
4629 Check(ElTy->getScalarType()->isIntOrPtrTy() ||
4631 "atomic store operand must have integer, pointer, floating point, "
4632 "or vector type!",
4633 ElTy, &SI);
4634 checkAtomicMemAccessSize(ElTy, &SI);
4635 } else {
4636 Check(SI.getSyncScopeID() == SyncScope::System,
4637 "Non-atomic store cannot have SynchronizationScope specified", &SI);
4638 }
4639 visitInstruction(SI);
4640}
4641
4642/// Check that SwiftErrorVal is used as a swifterror argument in CS.
4643void Verifier::verifySwiftErrorCall(CallBase &Call,
4644 const Value *SwiftErrorVal) {
4645 for (const auto &I : llvm::enumerate(Call.args())) {
4646 if (I.value() == SwiftErrorVal) {
4647 Check(Call.paramHasAttr(I.index(), Attribute::SwiftError),
4648 "swifterror value when used in a callsite should be marked "
4649 "with swifterror attribute",
4650 SwiftErrorVal, Call);
4651 }
4652 }
4653}
4654
4655void Verifier::verifySwiftErrorValue(const Value *SwiftErrorVal) {
4656 // Check that swifterror value is only used by loads, stores, or as
4657 // a swifterror argument.
4658 for (const User *U : SwiftErrorVal->users()) {
4660 isa<InvokeInst>(U),
4661 "swifterror value can only be loaded and stored from, or "
4662 "as a swifterror argument!",
4663 SwiftErrorVal, U);
4664 // If it is used by a store, check it is the second operand.
4665 if (auto StoreI = dyn_cast<StoreInst>(U))
4666 Check(StoreI->getOperand(1) == SwiftErrorVal,
4667 "swifterror value should be the second operand when used "
4668 "by stores",
4669 SwiftErrorVal, U);
4670 if (auto *Call = dyn_cast<CallBase>(U))
4671 verifySwiftErrorCall(*const_cast<CallBase *>(Call), SwiftErrorVal);
4672 }
4673}
4674
4675void Verifier::visitAllocaInst(AllocaInst &AI) {
4676 Type *Ty = AI.getAllocatedType();
4677 SmallPtrSet<Type*, 4> Visited;
4678 Check(Ty->isSized(&Visited), "Cannot allocate unsized type", &AI);
4679 // Check if it's a target extension type that disallows being used on the
4680 // stack.
4682 "Alloca has illegal target extension type", &AI);
4684 "Alloca array size must have integer type", &AI);
4685 if (MaybeAlign A = AI.getAlign()) {
4686 Check(A->value() <= Value::MaximumAlignment,
4687 "huge alignment values are unsupported", &AI);
4688 }
4689
4690 if (AI.isSwiftError()) {
4691 Check(Ty->isPointerTy(), "swifterror alloca must have pointer type", &AI);
4693 "swifterror alloca must not be array allocation", &AI);
4694 verifySwiftErrorValue(&AI);
4695 }
4696
4697 if (TT.isAMDGPU()) {
4699 "alloca on amdgpu must be in addrspace(5)", &AI);
4700 }
4701
4702 visitInstruction(AI);
4703}
4704
4705void Verifier::visitAtomicCmpXchgInst(AtomicCmpXchgInst &CXI) {
4706 Type *ElTy = CXI.getOperand(1)->getType();
4707 Check(ElTy->isIntOrPtrTy(),
4708 "cmpxchg operand must have integer or pointer type", ElTy, &CXI);
4709 checkAtomicMemAccessSize(ElTy, &CXI);
4710 visitInstruction(CXI);
4711}
4712
4713void Verifier::visitAtomicRMWInst(AtomicRMWInst &RMWI) {
4714 Check(RMWI.getOrdering() != AtomicOrdering::Unordered,
4715 "atomicrmw instructions cannot be unordered.", &RMWI);
4716 auto Op = RMWI.getOperation();
4717 Type *ElTy = RMWI.getOperand(1)->getType();
4718 if (Op == AtomicRMWInst::Xchg) {
4719 Check(ElTy->isIntegerTy() || ElTy->isFloatingPointTy() ||
4720 ElTy->isPointerTy(),
4721 "atomicrmw " + AtomicRMWInst::getOperationName(Op) +
4722 " operand must have integer or floating point type!",
4723 &RMWI, ElTy);
4724 } else if (AtomicRMWInst::isFPOperation(Op)) {
4726 "atomicrmw " + AtomicRMWInst::getOperationName(Op) +
4727 " operand must have floating-point or fixed vector of floating-point "
4728 "type!",
4729 &RMWI, ElTy);
4730 } else {
4731 Check(ElTy->isIntegerTy(),
4732 "atomicrmw " + AtomicRMWInst::getOperationName(Op) +
4733 " operand must have integer type!",
4734 &RMWI, ElTy);
4735 }
4736 checkAtomicMemAccessSize(ElTy, &RMWI);
4738 "Invalid binary operation!", &RMWI);
4739 visitInstruction(RMWI);
4740}
4741
4742void Verifier::visitFenceInst(FenceInst &FI) {
4743 const AtomicOrdering Ordering = FI.getOrdering();
4744 Check(Ordering == AtomicOrdering::Acquire ||
4745 Ordering == AtomicOrdering::Release ||
4746 Ordering == AtomicOrdering::AcquireRelease ||
4747 Ordering == AtomicOrdering::SequentiallyConsistent,
4748 "fence instructions may only have acquire, release, acq_rel, or "
4749 "seq_cst ordering.",
4750 &FI);
4751 visitInstruction(FI);
4752}
4753
4754void Verifier::visitExtractValueInst(ExtractValueInst &EVI) {
4756 EVI.getIndices()) == EVI.getType(),
4757 "Invalid ExtractValueInst operands!", &EVI);
4758
4759 visitInstruction(EVI);
4760}
4761
4762void Verifier::visitInsertValueInst(InsertValueInst &IVI) {
4764 IVI.getIndices()) ==
4765 IVI.getOperand(1)->getType(),
4766 "Invalid InsertValueInst operands!", &IVI);
4767
4768 visitInstruction(IVI);
4769}
4770
4771static Value *getParentPad(Value *EHPad) {
4772 if (auto *FPI = dyn_cast<FuncletPadInst>(EHPad))
4773 return FPI->getParentPad();
4774
4775 return cast<CatchSwitchInst>(EHPad)->getParentPad();
4776}
4777
4778void Verifier::visitEHPadPredecessors(Instruction &I) {
4779 assert(I.isEHPad());
4780
4781 BasicBlock *BB = I.getParent();
4782 Function *F = BB->getParent();
4783
4784 Check(BB != &F->getEntryBlock(), "EH pad cannot be in entry block.", &I);
4785
4786 if (auto *LPI = dyn_cast<LandingPadInst>(&I)) {
4787 // The landingpad instruction defines its parent as a landing pad block. The
4788 // landing pad block may be branched to only by the unwind edge of an
4789 // invoke.
4790 for (BasicBlock *PredBB : predecessors(BB)) {
4791 const auto *II = dyn_cast<InvokeInst>(PredBB->getTerminator());
4792 Check(II && II->getUnwindDest() == BB && II->getNormalDest() != BB,
4793 "Block containing LandingPadInst must be jumped to "
4794 "only by the unwind edge of an invoke.",
4795 LPI);
4796 }
4797 return;
4798 }
4799 if (auto *CPI = dyn_cast<CatchPadInst>(&I)) {
4800 if (!pred_empty(BB))
4801 Check(BB->getUniquePredecessor() == CPI->getCatchSwitch()->getParent(),
4802 "Block containg CatchPadInst must be jumped to "
4803 "only by its catchswitch.",
4804 CPI);
4805 Check(BB != CPI->getCatchSwitch()->getUnwindDest(),
4806 "Catchswitch cannot unwind to one of its catchpads",
4807 CPI->getCatchSwitch(), CPI);
4808 return;
4809 }
4810
4811 // Verify that each pred has a legal terminator with a legal to/from EH
4812 // pad relationship.
4813 Instruction *ToPad = &I;
4814 Value *ToPadParent = getParentPad(ToPad);
4815 for (BasicBlock *PredBB : predecessors(BB)) {
4816 Instruction *TI = PredBB->getTerminator();
4817 Value *FromPad;
4818 if (auto *II = dyn_cast<InvokeInst>(TI)) {
4819 Check(II->getUnwindDest() == BB && II->getNormalDest() != BB,
4820 "EH pad must be jumped to via an unwind edge", ToPad, II);
4821 auto *CalledFn =
4822 dyn_cast<Function>(II->getCalledOperand()->stripPointerCasts());
4823 if (CalledFn && CalledFn->isIntrinsic() && II->doesNotThrow() &&
4824 !IntrinsicInst::mayLowerToFunctionCall(CalledFn->getIntrinsicID()))
4825 continue;
4826 if (auto Bundle = II->getOperandBundle(LLVMContext::OB_funclet))
4827 FromPad = Bundle->Inputs[0];
4828 else
4829 FromPad = ConstantTokenNone::get(II->getContext());
4830 } else if (auto *CRI = dyn_cast<CleanupReturnInst>(TI)) {
4831 FromPad = CRI->getOperand(0);
4832 Check(FromPad != ToPadParent, "A cleanupret must exit its cleanup", CRI);
4833 } else if (auto *CSI = dyn_cast<CatchSwitchInst>(TI)) {
4834 FromPad = CSI;
4835 } else {
4836 Check(false, "EH pad must be jumped to via an unwind edge", ToPad, TI);
4837 }
4838
4839 // The edge may exit from zero or more nested pads.
4840 SmallPtrSet<Value *, 8> Seen;
4841 for (;; FromPad = getParentPad(FromPad)) {
4842 Check(FromPad != ToPad,
4843 "EH pad cannot handle exceptions raised within it", FromPad, TI);
4844 if (FromPad == ToPadParent) {
4845 // This is a legal unwind edge.
4846 break;
4847 }
4848 Check(!isa<ConstantTokenNone>(FromPad),
4849 "A single unwind edge may only enter one EH pad", TI);
4850 Check(Seen.insert(FromPad).second, "EH pad jumps through a cycle of pads",
4851 FromPad);
4852
4853 // This will be diagnosed on the corresponding instruction already. We
4854 // need the extra check here to make sure getParentPad() works.
4855 Check(isa<FuncletPadInst>(FromPad) || isa<CatchSwitchInst>(FromPad),
4856 "Parent pad must be catchpad/cleanuppad/catchswitch", TI);
4857 }
4858 }
4859}
4860
4861void Verifier::visitLandingPadInst(LandingPadInst &LPI) {
4862 // The landingpad instruction is ill-formed if it doesn't have any clauses and
4863 // isn't a cleanup.
4864 Check(LPI.getNumClauses() > 0 || LPI.isCleanup(),
4865 "LandingPadInst needs at least one clause or to be a cleanup.", &LPI);
4866
4867 visitEHPadPredecessors(LPI);
4868
4869 if (!LandingPadResultTy)
4870 LandingPadResultTy = LPI.getType();
4871 else
4872 Check(LandingPadResultTy == LPI.getType(),
4873 "The landingpad instruction should have a consistent result type "
4874 "inside a function.",
4875 &LPI);
4876
4877 Function *F = LPI.getParent()->getParent();
4878 Check(F->hasPersonalityFn(),
4879 "LandingPadInst needs to be in a function with a personality.", &LPI);
4880
4881 // The landingpad instruction must be the first non-PHI instruction in the
4882 // block.
4883 Check(LPI.getParent()->getLandingPadInst() == &LPI,
4884 "LandingPadInst not the first non-PHI instruction in the block.", &LPI);
4885
4886 for (unsigned i = 0, e = LPI.getNumClauses(); i < e; ++i) {
4887 Constant *Clause = LPI.getClause(i);
4888 if (LPI.isCatch(i)) {
4889 Check(isa<PointerType>(Clause->getType()),
4890 "Catch operand does not have pointer type!", &LPI);
4891 } else {
4892 Check(LPI.isFilter(i), "Clause is neither catch nor filter!", &LPI);
4894 "Filter operand is not an array of constants!", &LPI);
4895 }
4896 }
4897
4898 visitInstruction(LPI);
4899}
4900
4901void Verifier::visitResumeInst(ResumeInst &RI) {
4903 "ResumeInst needs to be in a function with a personality.", &RI);
4904
4905 if (!LandingPadResultTy)
4906 LandingPadResultTy = RI.getValue()->getType();
4907 else
4908 Check(LandingPadResultTy == RI.getValue()->getType(),
4909 "The resume instruction should have a consistent result type "
4910 "inside a function.",
4911 &RI);
4912
4913 visitTerminator(RI);
4914}
4915
4916void Verifier::visitCatchPadInst(CatchPadInst &CPI) {
4917 BasicBlock *BB = CPI.getParent();
4918
4919 Function *F = BB->getParent();
4920 Check(F->hasPersonalityFn(),
4921 "CatchPadInst needs to be in a function with a personality.", &CPI);
4922
4924 "CatchPadInst needs to be directly nested in a CatchSwitchInst.",
4925 CPI.getParentPad());
4926
4927 // The catchpad instruction must be the first non-PHI instruction in the
4928 // block.
4929 Check(&*BB->getFirstNonPHIIt() == &CPI,
4930 "CatchPadInst not the first non-PHI instruction in the block.", &CPI);
4931
4932 visitEHPadPredecessors(CPI);
4933 visitFuncletPadInst(CPI);
4934}
4935
4936void Verifier::visitCatchReturnInst(CatchReturnInst &CatchReturn) {
4937 Check(isa<CatchPadInst>(CatchReturn.getOperand(0)),
4938 "CatchReturnInst needs to be provided a CatchPad", &CatchReturn,
4939 CatchReturn.getOperand(0));
4940
4941 visitTerminator(CatchReturn);
4942}
4943
4944void Verifier::visitCleanupPadInst(CleanupPadInst &CPI) {
4945 BasicBlock *BB = CPI.getParent();
4946
4947 Function *F = BB->getParent();
4948 Check(F->hasPersonalityFn(),
4949 "CleanupPadInst needs to be in a function with a personality.", &CPI);
4950
4951 // The cleanuppad instruction must be the first non-PHI instruction in the
4952 // block.
4953 Check(&*BB->getFirstNonPHIIt() == &CPI,
4954 "CleanupPadInst not the first non-PHI instruction in the block.", &CPI);
4955
4956 auto *ParentPad = CPI.getParentPad();
4957 Check(isa<ConstantTokenNone>(ParentPad) || isa<FuncletPadInst>(ParentPad),
4958 "CleanupPadInst has an invalid parent.", &CPI);
4959
4960 visitEHPadPredecessors(CPI);
4961 visitFuncletPadInst(CPI);
4962}
4963
4964void Verifier::visitFuncletPadInst(FuncletPadInst &FPI) {
4965 User *FirstUser = nullptr;
4966 Value *FirstUnwindPad = nullptr;
4967 SmallVector<FuncletPadInst *, 8> Worklist({&FPI});
4968 SmallPtrSet<FuncletPadInst *, 8> Seen;
4969
4970 while (!Worklist.empty()) {
4971 FuncletPadInst *CurrentPad = Worklist.pop_back_val();
4972 Check(Seen.insert(CurrentPad).second,
4973 "FuncletPadInst must not be nested within itself", CurrentPad);
4974 Value *UnresolvedAncestorPad = nullptr;
4975 for (User *U : CurrentPad->users()) {
4976 BasicBlock *UnwindDest;
4977 if (auto *CRI = dyn_cast<CleanupReturnInst>(U)) {
4978 UnwindDest = CRI->getUnwindDest();
4979 } else if (auto *CSI = dyn_cast<CatchSwitchInst>(U)) {
4980 // We allow catchswitch unwind to caller to nest
4981 // within an outer pad that unwinds somewhere else,
4982 // because catchswitch doesn't have a nounwind variant.
4983 // See e.g. SimplifyCFGOpt::SimplifyUnreachable.
4984 if (CSI->unwindsToCaller())
4985 continue;
4986 UnwindDest = CSI->getUnwindDest();
4987 } else if (auto *II = dyn_cast<InvokeInst>(U)) {
4988 UnwindDest = II->getUnwindDest();
4989 } else if (isa<CallInst>(U)) {
4990 // Calls which don't unwind may be found inside funclet
4991 // pads that unwind somewhere else. We don't *require*
4992 // such calls to be annotated nounwind.
4993 continue;
4994 } else if (auto *CPI = dyn_cast<CleanupPadInst>(U)) {
4995 // The unwind dest for a cleanup can only be found by
4996 // recursive search. Add it to the worklist, and we'll
4997 // search for its first use that determines where it unwinds.
4998 Worklist.push_back(CPI);
4999 continue;
5000 } else {
5001 Check(isa<CatchReturnInst>(U), "Bogus funclet pad use", U);
5002 continue;
5003 }
5004
5005 Value *UnwindPad;
5006 bool ExitsFPI;
5007 if (UnwindDest) {
5008 UnwindPad = &*UnwindDest->getFirstNonPHIIt();
5009 if (!cast<Instruction>(UnwindPad)->isEHPad())
5010 continue;
5011 Value *UnwindParent = getParentPad(UnwindPad);
5012 // Ignore unwind edges that don't exit CurrentPad.
5013 if (UnwindParent == CurrentPad)
5014 continue;
5015 // Determine whether the original funclet pad is exited,
5016 // and if we are scanning nested pads determine how many
5017 // of them are exited so we can stop searching their
5018 // children.
5019 Value *ExitedPad = CurrentPad;
5020 ExitsFPI = false;
5021 do {
5022 if (ExitedPad == &FPI) {
5023 ExitsFPI = true;
5024 // Now we can resolve any ancestors of CurrentPad up to
5025 // FPI, but not including FPI since we need to make sure
5026 // to check all direct users of FPI for consistency.
5027 UnresolvedAncestorPad = &FPI;
5028 break;
5029 }
5030 Value *ExitedParent = getParentPad(ExitedPad);
5031 if (ExitedParent == UnwindParent) {
5032 // ExitedPad is the ancestor-most pad which this unwind
5033 // edge exits, so we can resolve up to it, meaning that
5034 // ExitedParent is the first ancestor still unresolved.
5035 UnresolvedAncestorPad = ExitedParent;
5036 break;
5037 }
5038 ExitedPad = ExitedParent;
5039 } while (!isa<ConstantTokenNone>(ExitedPad));
5040 } else {
5041 // Unwinding to caller exits all pads.
5042 UnwindPad = ConstantTokenNone::get(FPI.getContext());
5043 ExitsFPI = true;
5044 UnresolvedAncestorPad = &FPI;
5045 }
5046
5047 if (ExitsFPI) {
5048 // This unwind edge exits FPI. Make sure it agrees with other
5049 // such edges.
5050 if (FirstUser) {
5051 Check(UnwindPad == FirstUnwindPad,
5052 "Unwind edges out of a funclet "
5053 "pad must have the same unwind "
5054 "dest",
5055 &FPI, U, FirstUser);
5056 } else {
5057 FirstUser = U;
5058 FirstUnwindPad = UnwindPad;
5059 // Record cleanup sibling unwinds for verifySiblingFuncletUnwinds
5060 if (isa<CleanupPadInst>(&FPI) && !isa<ConstantTokenNone>(UnwindPad) &&
5061 getParentPad(UnwindPad) == getParentPad(&FPI))
5062 SiblingFuncletInfo[&FPI] = cast<Instruction>(U);
5063 }
5064 }
5065 // Make sure we visit all uses of FPI, but for nested pads stop as
5066 // soon as we know where they unwind to.
5067 if (CurrentPad != &FPI)
5068 break;
5069 }
5070 if (UnresolvedAncestorPad) {
5071 if (CurrentPad == UnresolvedAncestorPad) {
5072 // When CurrentPad is FPI itself, we don't mark it as resolved even if
5073 // we've found an unwind edge that exits it, because we need to verify
5074 // all direct uses of FPI.
5075 assert(CurrentPad == &FPI);
5076 continue;
5077 }
5078 // Pop off the worklist any nested pads that we've found an unwind
5079 // destination for. The pads on the worklist are the uncles,
5080 // great-uncles, etc. of CurrentPad. We've found an unwind destination
5081 // for all ancestors of CurrentPad up to but not including
5082 // UnresolvedAncestorPad.
5083 Value *ResolvedPad = CurrentPad;
5084 while (!Worklist.empty()) {
5085 Value *UnclePad = Worklist.back();
5086 Value *AncestorPad = getParentPad(UnclePad);
5087 // Walk ResolvedPad up the ancestor list until we either find the
5088 // uncle's parent or the last resolved ancestor.
5089 while (ResolvedPad != AncestorPad) {
5090 Value *ResolvedParent = getParentPad(ResolvedPad);
5091 if (ResolvedParent == UnresolvedAncestorPad) {
5092 break;
5093 }
5094 ResolvedPad = ResolvedParent;
5095 }
5096 // If the resolved ancestor search didn't find the uncle's parent,
5097 // then the uncle is not yet resolved.
5098 if (ResolvedPad != AncestorPad)
5099 break;
5100 // This uncle is resolved, so pop it from the worklist.
5101 Worklist.pop_back();
5102 }
5103 }
5104 }
5105
5106 if (FirstUnwindPad) {
5107 if (auto *CatchSwitch = dyn_cast<CatchSwitchInst>(FPI.getParentPad())) {
5108 BasicBlock *SwitchUnwindDest = CatchSwitch->getUnwindDest();
5109 Value *SwitchUnwindPad;
5110 if (SwitchUnwindDest)
5111 SwitchUnwindPad = &*SwitchUnwindDest->getFirstNonPHIIt();
5112 else
5113 SwitchUnwindPad = ConstantTokenNone::get(FPI.getContext());
5114 Check(SwitchUnwindPad == FirstUnwindPad,
5115 "Unwind edges out of a catch must have the same unwind dest as "
5116 "the parent catchswitch",
5117 &FPI, FirstUser, CatchSwitch);
5118 }
5119 }
5120
5121 visitInstruction(FPI);
5122}
5123
5124void Verifier::visitCatchSwitchInst(CatchSwitchInst &CatchSwitch) {
5125 BasicBlock *BB = CatchSwitch.getParent();
5126
5127 Function *F = BB->getParent();
5128 Check(F->hasPersonalityFn(),
5129 "CatchSwitchInst needs to be in a function with a personality.",
5130 &CatchSwitch);
5131
5132 // The catchswitch instruction must be the first non-PHI instruction in the
5133 // block.
5134 Check(&*BB->getFirstNonPHIIt() == &CatchSwitch,
5135 "CatchSwitchInst not the first non-PHI instruction in the block.",
5136 &CatchSwitch);
5137
5138 auto *ParentPad = CatchSwitch.getParentPad();
5139 Check(isa<ConstantTokenNone>(ParentPad) || isa<FuncletPadInst>(ParentPad),
5140 "CatchSwitchInst has an invalid parent.", ParentPad);
5141
5142 if (BasicBlock *UnwindDest = CatchSwitch.getUnwindDest()) {
5143 BasicBlock::iterator I = UnwindDest->getFirstNonPHIIt();
5144 Check(I->isEHPad() && !isa<LandingPadInst>(I),
5145 "CatchSwitchInst must unwind to an EH block which is not a "
5146 "landingpad.",
5147 &CatchSwitch);
5148
5149 // Record catchswitch sibling unwinds for verifySiblingFuncletUnwinds
5150 if (getParentPad(&*I) == ParentPad)
5151 SiblingFuncletInfo[&CatchSwitch] = &CatchSwitch;
5152 }
5153
5154 Check(CatchSwitch.getNumHandlers() != 0,
5155 "CatchSwitchInst cannot have empty handler list", &CatchSwitch);
5156
5157 for (BasicBlock *Handler : CatchSwitch.handlers()) {
5158 Check(isa<CatchPadInst>(Handler->getFirstNonPHIIt()),
5159 "CatchSwitchInst handlers must be catchpads", &CatchSwitch, Handler);
5160 }
5161
5162 visitEHPadPredecessors(CatchSwitch);
5163 visitTerminator(CatchSwitch);
5164}
5165
5166void Verifier::visitCleanupReturnInst(CleanupReturnInst &CRI) {
5168 "CleanupReturnInst needs to be provided a CleanupPad", &CRI,
5169 CRI.getOperand(0));
5170
5171 if (BasicBlock *UnwindDest = CRI.getUnwindDest()) {
5172 BasicBlock::iterator I = UnwindDest->getFirstNonPHIIt();
5173 Check(I->isEHPad() && !isa<LandingPadInst>(I),
5174 "CleanupReturnInst must unwind to an EH block which is not a "
5175 "landingpad.",
5176 &CRI);
5177 }
5178
5179 visitTerminator(CRI);
5180}
5181
5182void Verifier::verifyDominatesUse(Instruction &I, unsigned i) {
5183 Instruction *Op = cast<Instruction>(I.getOperand(i));
5184 // If the we have an invalid invoke, don't try to compute the dominance.
5185 // We already reject it in the invoke specific checks and the dominance
5186 // computation doesn't handle multiple edges.
5187 if (InvokeInst *II = dyn_cast<InvokeInst>(Op)) {
5188 if (II->getNormalDest() == II->getUnwindDest())
5189 return;
5190 }
5191
5192 // Quick check whether the def has already been encountered in the same block.
5193 // PHI nodes are not checked to prevent accepting preceding PHIs, because PHI
5194 // uses are defined to happen on the incoming edge, not at the instruction.
5195 //
5196 // FIXME: If this operand is a MetadataAsValue (wrapping a LocalAsMetadata)
5197 // wrapping an SSA value, assert that we've already encountered it. See
5198 // related FIXME in Mapper::mapLocalAsMetadata in ValueMapper.cpp.
5199 if (!isa<PHINode>(I) && InstsInThisBlock.count(Op))
5200 return;
5201
5202 const Use &U = I.getOperandUse(i);
5203 Check(DT.dominates(Op, U), "Instruction does not dominate all uses!", Op, &I);
5204}
5205
5206void Verifier::visitDereferenceableMetadata(Instruction& I, MDNode* MD) {
5207 Check(I.getType()->isPointerTy(),
5208 "dereferenceable, dereferenceable_or_null "
5209 "apply only to pointer types",
5210 &I);
5212 "dereferenceable, dereferenceable_or_null apply only to load"
5213 " and inttoptr instructions, use attributes for calls or invokes",
5214 &I);
5215 Check(MD->getNumOperands() == 1,
5216 "dereferenceable, dereferenceable_or_null "
5217 "take one operand!",
5218 &I);
5219 ConstantInt *CI = mdconst::dyn_extract<ConstantInt>(MD->getOperand(0));
5220 Check(CI && CI->getType()->isIntegerTy(64),
5221 "dereferenceable, "
5222 "dereferenceable_or_null metadata value must be an i64!",
5223 &I);
5224}
5225
5226void Verifier::visitNofreeMetadata(Instruction &I, MDNode *MD) {
5227 Check(I.getType()->isPointerTy(), "nofree applies only to pointer types", &I);
5228 Check((isa<IntToPtrInst>(I)), "nofree applies only to inttoptr instruction",
5229 &I);
5230 Check(MD->getNumOperands() == 0, "nofree metadata must be empty", &I);
5231}
5232
5233void Verifier::visitProfMetadata(Instruction &I, MDNode *MD) {
5234 auto GetBranchingTerminatorNumOperands = [&]() {
5235 unsigned ExpectedNumOperands = 0;
5236 if (BranchInst *BI = dyn_cast<BranchInst>(&I))
5237 ExpectedNumOperands = BI->getNumSuccessors();
5238 else if (SwitchInst *SI = dyn_cast<SwitchInst>(&I))
5239 ExpectedNumOperands = SI->getNumSuccessors();
5240 else if (isa<CallInst>(&I))
5241 ExpectedNumOperands = 1;
5242 else if (IndirectBrInst *IBI = dyn_cast<IndirectBrInst>(&I))
5243 ExpectedNumOperands = IBI->getNumDestinations();
5244 else if (isa<SelectInst>(&I))
5245 ExpectedNumOperands = 2;
5246 else if (CallBrInst *CI = dyn_cast<CallBrInst>(&I))
5247 ExpectedNumOperands = CI->getNumSuccessors();
5248 return ExpectedNumOperands;
5249 };
5250 Check(MD->getNumOperands() >= 1,
5251 "!prof annotations should have at least 1 operand", MD);
5252 // Check first operand.
5253 Check(MD->getOperand(0) != nullptr, "first operand should not be null", MD);
5255 "expected string with name of the !prof annotation", MD);
5256 MDString *MDS = cast<MDString>(MD->getOperand(0));
5257 StringRef ProfName = MDS->getString();
5258
5260 Check(GetBranchingTerminatorNumOperands() != 0 || isa<InvokeInst>(I),
5261 "'unknown' !prof should only appear on instructions on which "
5262 "'branch_weights' would",
5263 MD);
5264 verifyUnknownProfileMetadata(MD);
5265 return;
5266 }
5267
5268 Check(MD->getNumOperands() >= 2,
5269 "!prof annotations should have no less than 2 operands", MD);
5270
5271 // Check consistency of !prof branch_weights metadata.
5272 if (ProfName == MDProfLabels::BranchWeights) {
5273 unsigned NumBranchWeights = getNumBranchWeights(*MD);
5274 if (isa<InvokeInst>(&I)) {
5275 Check(NumBranchWeights == 1 || NumBranchWeights == 2,
5276 "Wrong number of InvokeInst branch_weights operands", MD);
5277 } else {
5278 const unsigned ExpectedNumOperands = GetBranchingTerminatorNumOperands();
5279 if (ExpectedNumOperands == 0)
5280 CheckFailed("!prof branch_weights are not allowed for this instruction",
5281 MD);
5282
5283 Check(NumBranchWeights == ExpectedNumOperands, "Wrong number of operands",
5284 MD);
5285 }
5286 for (unsigned i = getBranchWeightOffset(MD); i < MD->getNumOperands();
5287 ++i) {
5288 auto &MDO = MD->getOperand(i);
5289 Check(MDO, "second operand should not be null", MD);
5291 "!prof brunch_weights operand is not a const int");
5292 }
5293 } else if (ProfName == MDProfLabels::ValueProfile) {
5294 Check(isValueProfileMD(MD), "invalid value profiling metadata", MD);
5295 ConstantInt *KindInt = mdconst::dyn_extract<ConstantInt>(MD->getOperand(1));
5296 Check(KindInt, "VP !prof missing kind argument", MD);
5297
5298 auto Kind = KindInt->getZExtValue();
5299 Check(Kind >= InstrProfValueKind::IPVK_First &&
5300 Kind <= InstrProfValueKind::IPVK_Last,
5301 "Invalid VP !prof kind", MD);
5302 Check(MD->getNumOperands() % 2 == 1,
5303 "VP !prof should have an even number "
5304 "of arguments after 'VP'",
5305 MD);
5306 if (Kind == InstrProfValueKind::IPVK_IndirectCallTarget ||
5307 Kind == InstrProfValueKind::IPVK_MemOPSize)
5309 "VP !prof indirect call or memop size expected to be applied to "
5310 "CallBase instructions only",
5311 MD);
5312 } else {
5313 CheckFailed("expected either branch_weights or VP profile name", MD);
5314 }
5315}
5316
5317void Verifier::visitDIAssignIDMetadata(Instruction &I, MDNode *MD) {
5318 assert(I.hasMetadata(LLVMContext::MD_DIAssignID));
5319 // DIAssignID metadata must be attached to either an alloca or some form of
5320 // store/memory-writing instruction.
5321 // FIXME: We allow all intrinsic insts here to avoid trying to enumerate all
5322 // possible store intrinsics.
5323 bool ExpectedInstTy =
5325 CheckDI(ExpectedInstTy, "!DIAssignID attached to unexpected instruction kind",
5326 I, MD);
5327 // Iterate over the MetadataAsValue uses of the DIAssignID - these should
5328 // only be found as DbgAssignIntrinsic operands.
5329 if (auto *AsValue = MetadataAsValue::getIfExists(Context, MD)) {
5330 for (auto *User : AsValue->users()) {
5332 "!DIAssignID should only be used by llvm.dbg.assign intrinsics",
5333 MD, User);
5334 // All of the dbg.assign intrinsics should be in the same function as I.
5335 if (auto *DAI = dyn_cast<DbgAssignIntrinsic>(User))
5336 CheckDI(DAI->getFunction() == I.getFunction(),
5337 "dbg.assign not in same function as inst", DAI, &I);
5338 }
5339 }
5340 for (DbgVariableRecord *DVR :
5341 cast<DIAssignID>(MD)->getAllDbgVariableRecordUsers()) {
5342 CheckDI(DVR->isDbgAssign(),
5343 "!DIAssignID should only be used by Assign DVRs.", MD, DVR);
5344 CheckDI(DVR->getFunction() == I.getFunction(),
5345 "DVRAssign not in same function as inst", DVR, &I);
5346 }
5347}
5348
5349void Verifier::visitMMRAMetadata(Instruction &I, MDNode *MD) {
5351 "!mmra metadata attached to unexpected instruction kind", I, MD);
5352
5353 // MMRA Metadata should either be a tag, e.g. !{!"foo", !"bar"}, or a
5354 // list of tags such as !2 in the following example:
5355 // !0 = !{!"a", !"b"}
5356 // !1 = !{!"c", !"d"}
5357 // !2 = !{!0, !1}
5358 if (MMRAMetadata::isTagMD(MD))
5359 return;
5360
5361 Check(isa<MDTuple>(MD), "!mmra expected to be a metadata tuple", I, MD);
5362 for (const MDOperand &MDOp : MD->operands())
5363 Check(MMRAMetadata::isTagMD(MDOp.get()),
5364 "!mmra metadata tuple operand is not an MMRA tag", I, MDOp.get());
5365}
5366
5367void Verifier::visitCallStackMetadata(MDNode *MD) {
5368 // Call stack metadata should consist of a list of at least 1 constant int
5369 // (representing a hash of the location).
5370 Check(MD->getNumOperands() >= 1,
5371 "call stack metadata should have at least 1 operand", MD);
5372
5373 for (const auto &Op : MD->operands())
5375 "call stack metadata operand should be constant integer", Op);
5376}
5377
5378void Verifier::visitMemProfMetadata(Instruction &I, MDNode *MD) {
5379 Check(isa<CallBase>(I), "!memprof metadata should only exist on calls", &I);
5380 Check(MD->getNumOperands() >= 1,
5381 "!memprof annotations should have at least 1 metadata operand "
5382 "(MemInfoBlock)",
5383 MD);
5384
5385 // Check each MIB
5386 for (auto &MIBOp : MD->operands()) {
5387 MDNode *MIB = dyn_cast<MDNode>(MIBOp);
5388 // The first operand of an MIB should be the call stack metadata.
5389 // There rest of the operands should be MDString tags, and there should be
5390 // at least one.
5391 Check(MIB->getNumOperands() >= 2,
5392 "Each !memprof MemInfoBlock should have at least 2 operands", MIB);
5393
5394 // Check call stack metadata (first operand).
5395 Check(MIB->getOperand(0) != nullptr,
5396 "!memprof MemInfoBlock first operand should not be null", MIB);
5397 Check(isa<MDNode>(MIB->getOperand(0)),
5398 "!memprof MemInfoBlock first operand should be an MDNode", MIB);
5399 MDNode *StackMD = dyn_cast<MDNode>(MIB->getOperand(0));
5400 visitCallStackMetadata(StackMD);
5401
5402 // The second MIB operand should be MDString.
5404 "!memprof MemInfoBlock second operand should be an MDString", MIB);
5405
5406 // Any remaining should be MDNode that are pairs of integers
5407 for (unsigned I = 2; I < MIB->getNumOperands(); ++I) {
5408 MDNode *OpNode = dyn_cast<MDNode>(MIB->getOperand(I));
5409 Check(OpNode, "Not all !memprof MemInfoBlock operands 2 to N are MDNode",
5410 MIB);
5411 Check(OpNode->getNumOperands() == 2,
5412 "Not all !memprof MemInfoBlock operands 2 to N are MDNode with 2 "
5413 "operands",
5414 MIB);
5415 // Check that all of Op's operands are ConstantInt.
5416 Check(llvm::all_of(OpNode->operands(),
5417 [](const MDOperand &Op) {
5418 return mdconst::hasa<ConstantInt>(Op);
5419 }),
5420 "Not all !memprof MemInfoBlock operands 2 to N are MDNode with "
5421 "ConstantInt operands",
5422 MIB);
5423 }
5424 }
5425}
5426
5427void Verifier::visitCallsiteMetadata(Instruction &I, MDNode *MD) {
5428 Check(isa<CallBase>(I), "!callsite metadata should only exist on calls", &I);
5429 // Verify the partial callstack annotated from memprof profiles. This callsite
5430 // is a part of a profiled allocation callstack.
5431 visitCallStackMetadata(MD);
5432}
5433
5434static inline bool isConstantIntMetadataOperand(const Metadata *MD) {
5435 if (auto *VAL = dyn_cast<ValueAsMetadata>(MD))
5436 return isa<ConstantInt>(VAL->getValue());
5437 return false;
5438}
5439
5440void Verifier::visitCalleeTypeMetadata(Instruction &I, MDNode *MD) {
5441 Check(isa<CallBase>(I), "!callee_type metadata should only exist on calls",
5442 &I);
5443 for (Metadata *Op : MD->operands()) {
5445 "The callee_type metadata must be a list of type metadata nodes", Op);
5446 auto *TypeMD = cast<MDNode>(Op);
5447 Check(TypeMD->getNumOperands() == 2,
5448 "Well-formed generalized type metadata must contain exactly two "
5449 "operands",
5450 Op);
5451 Check(isConstantIntMetadataOperand(TypeMD->getOperand(0)) &&
5452 mdconst::extract<ConstantInt>(TypeMD->getOperand(0))->isZero(),
5453 "The first operand of type metadata for functions must be zero", Op);
5454 Check(TypeMD->hasGeneralizedMDString(),
5455 "Only generalized type metadata can be part of the callee_type "
5456 "metadata list",
5457 Op);
5458 }
5459}
5460
5461void Verifier::visitAnnotationMetadata(MDNode *Annotation) {
5462 Check(isa<MDTuple>(Annotation), "annotation must be a tuple");
5463 Check(Annotation->getNumOperands() >= 1,
5464 "annotation must have at least one operand");
5465 for (const MDOperand &Op : Annotation->operands()) {
5466 bool TupleOfStrings =
5467 isa<MDTuple>(Op.get()) &&
5468 all_of(cast<MDTuple>(Op)->operands(), [](auto &Annotation) {
5469 return isa<MDString>(Annotation.get());
5470 });
5471 Check(isa<MDString>(Op.get()) || TupleOfStrings,
5472 "operands must be a string or a tuple of strings");
5473 }
5474}
5475
5476void Verifier::visitAliasScopeMetadata(const MDNode *MD) {
5477 unsigned NumOps = MD->getNumOperands();
5478 Check(NumOps >= 2 && NumOps <= 3, "scope must have two or three operands",
5479 MD);
5480 Check(MD->getOperand(0).get() == MD || isa<MDString>(MD->getOperand(0)),
5481 "first scope operand must be self-referential or string", MD);
5482 if (NumOps == 3)
5484 "third scope operand must be string (if used)", MD);
5485
5486 MDNode *Domain = dyn_cast<MDNode>(MD->getOperand(1));
5487 Check(Domain != nullptr, "second scope operand must be MDNode", MD);
5488
5489 unsigned NumDomainOps = Domain->getNumOperands();
5490 Check(NumDomainOps >= 1 && NumDomainOps <= 2,
5491 "domain must have one or two operands", Domain);
5492 Check(Domain->getOperand(0).get() == Domain ||
5493 isa<MDString>(Domain->getOperand(0)),
5494 "first domain operand must be self-referential or string", Domain);
5495 if (NumDomainOps == 2)
5496 Check(isa<MDString>(Domain->getOperand(1)),
5497 "second domain operand must be string (if used)", Domain);
5498}
5499
5500void Verifier::visitAliasScopeListMetadata(const MDNode *MD) {
5501 for (const MDOperand &Op : MD->operands()) {
5502 const MDNode *OpMD = dyn_cast<MDNode>(Op);
5503 Check(OpMD != nullptr, "scope list must consist of MDNodes", MD);
5504 visitAliasScopeMetadata(OpMD);
5505 }
5506}
5507
5508void Verifier::visitAccessGroupMetadata(const MDNode *MD) {
5509 auto IsValidAccessScope = [](const MDNode *MD) {
5510 return MD->getNumOperands() == 0 && MD->isDistinct();
5511 };
5512
5513 // It must be either an access scope itself...
5514 if (IsValidAccessScope(MD))
5515 return;
5516
5517 // ...or a list of access scopes.
5518 for (const MDOperand &Op : MD->operands()) {
5519 const MDNode *OpMD = dyn_cast<MDNode>(Op);
5520 Check(OpMD != nullptr, "Access scope list must consist of MDNodes", MD);
5521 Check(IsValidAccessScope(OpMD),
5522 "Access scope list contains invalid access scope", MD);
5523 }
5524}
5525
5526void Verifier::visitCapturesMetadata(Instruction &I, const MDNode *Captures) {
5527 static const char *ValidArgs[] = {"address_is_null", "address",
5528 "read_provenance", "provenance"};
5529
5530 auto *SI = dyn_cast<StoreInst>(&I);
5531 Check(SI, "!captures metadata can only be applied to store instructions", &I);
5532 Check(SI->getValueOperand()->getType()->isPointerTy(),
5533 "!captures metadata can only be applied to store with value operand of "
5534 "pointer type",
5535 &I);
5536 Check(Captures->getNumOperands() != 0, "!captures metadata cannot be empty",
5537 &I);
5538
5539 for (Metadata *Op : Captures->operands()) {
5540 auto *Str = dyn_cast<MDString>(Op);
5541 Check(Str, "!captures metadata must be a list of strings", &I);
5542 Check(is_contained(ValidArgs, Str->getString()),
5543 "invalid entry in !captures metadata", &I, Str);
5544 }
5545}
5546
5547void Verifier::visitAllocTokenMetadata(Instruction &I, MDNode *MD) {
5548 Check(isa<CallBase>(I), "!alloc_token should only exist on calls", &I);
5549 Check(MD->getNumOperands() == 2, "!alloc_token must have 2 operands", MD);
5550 Check(isa<MDString>(MD->getOperand(0)), "expected string", MD);
5552 "expected integer constant", MD);
5553}
5554
5555/// verifyInstruction - Verify that an instruction is well formed.
5556///
5557void Verifier::visitInstruction(Instruction &I) {
5558 BasicBlock *BB = I.getParent();
5559 Check(BB, "Instruction not embedded in basic block!", &I);
5560
5561 if (!isa<PHINode>(I)) { // Check that non-phi nodes are not self referential
5562 for (User *U : I.users()) {
5563 Check(U != (User *)&I || !DT.isReachableFromEntry(BB),
5564 "Only PHI nodes may reference their own value!", &I);
5565 }
5566 }
5567
5568 // Check that void typed values don't have names
5569 Check(!I.getType()->isVoidTy() || !I.hasName(),
5570 "Instruction has a name, but provides a void value!", &I);
5571
5572 // Check that the return value of the instruction is either void or a legal
5573 // value type.
5574 Check(I.getType()->isVoidTy() || I.getType()->isFirstClassType(),
5575 "Instruction returns a non-scalar type!", &I);
5576
5577 // Check that the instruction doesn't produce metadata. Calls are already
5578 // checked against the callee type.
5579 Check(!I.getType()->isMetadataTy() || isa<CallInst>(I) || isa<InvokeInst>(I),
5580 "Invalid use of metadata!", &I);
5581
5582 // Check that all uses of the instruction, if they are instructions
5583 // themselves, actually have parent basic blocks. If the use is not an
5584 // instruction, it is an error!
5585 for (Use &U : I.uses()) {
5586 if (Instruction *Used = dyn_cast<Instruction>(U.getUser()))
5587 Check(Used->getParent() != nullptr,
5588 "Instruction referencing"
5589 " instruction not embedded in a basic block!",
5590 &I, Used);
5591 else {
5592 CheckFailed("Use of instruction is not an instruction!", U);
5593 return;
5594 }
5595 }
5596
5597 // Get a pointer to the call base of the instruction if it is some form of
5598 // call.
5599 const CallBase *CBI = dyn_cast<CallBase>(&I);
5600
5601 for (unsigned i = 0, e = I.getNumOperands(); i != e; ++i) {
5602 Check(I.getOperand(i) != nullptr, "Instruction has null operand!", &I);
5603
5604 // Check to make sure that only first-class-values are operands to
5605 // instructions.
5606 if (!I.getOperand(i)->getType()->isFirstClassType()) {
5607 Check(false, "Instruction operands must be first-class values!", &I);
5608 }
5609
5610 if (Function *F = dyn_cast<Function>(I.getOperand(i))) {
5611 // This code checks whether the function is used as the operand of a
5612 // clang_arc_attachedcall operand bundle.
5613 auto IsAttachedCallOperand = [](Function *F, const CallBase *CBI,
5614 int Idx) {
5615 return CBI && CBI->isOperandBundleOfType(
5617 };
5618
5619 // Check to make sure that the "address of" an intrinsic function is never
5620 // taken. Ignore cases where the address of the intrinsic function is used
5621 // as the argument of operand bundle "clang.arc.attachedcall" as those
5622 // cases are handled in verifyAttachedCallBundle.
5623 Check((!F->isIntrinsic() ||
5624 (CBI && &CBI->getCalledOperandUse() == &I.getOperandUse(i)) ||
5625 IsAttachedCallOperand(F, CBI, i)),
5626 "Cannot take the address of an intrinsic!", &I);
5627 Check(!F->isIntrinsic() || isa<CallInst>(I) || isa<CallBrInst>(I) ||
5628 F->getIntrinsicID() == Intrinsic::donothing ||
5629 F->getIntrinsicID() == Intrinsic::seh_try_begin ||
5630 F->getIntrinsicID() == Intrinsic::seh_try_end ||
5631 F->getIntrinsicID() == Intrinsic::seh_scope_begin ||
5632 F->getIntrinsicID() == Intrinsic::seh_scope_end ||
5633 F->getIntrinsicID() == Intrinsic::coro_resume ||
5634 F->getIntrinsicID() == Intrinsic::coro_destroy ||
5635 F->getIntrinsicID() == Intrinsic::coro_await_suspend_void ||
5636 F->getIntrinsicID() == Intrinsic::coro_await_suspend_bool ||
5637 F->getIntrinsicID() == Intrinsic::coro_await_suspend_handle ||
5638 F->getIntrinsicID() ==
5639 Intrinsic::experimental_patchpoint_void ||
5640 F->getIntrinsicID() == Intrinsic::experimental_patchpoint ||
5641 F->getIntrinsicID() == Intrinsic::fake_use ||
5642 F->getIntrinsicID() == Intrinsic::experimental_gc_statepoint ||
5643 F->getIntrinsicID() == Intrinsic::wasm_throw ||
5644 F->getIntrinsicID() == Intrinsic::wasm_rethrow ||
5645 IsAttachedCallOperand(F, CBI, i),
5646 "Cannot invoke an intrinsic other than donothing, patchpoint, "
5647 "statepoint, coro_resume, coro_destroy, clang.arc.attachedcall or "
5648 "wasm.(re)throw",
5649 &I);
5650 Check(F->getParent() == &M, "Referencing function in another module!", &I,
5651 &M, F, F->getParent());
5652 } else if (BasicBlock *OpBB = dyn_cast<BasicBlock>(I.getOperand(i))) {
5653 Check(OpBB->getParent() == BB->getParent(),
5654 "Referring to a basic block in another function!", &I);
5655 } else if (Argument *OpArg = dyn_cast<Argument>(I.getOperand(i))) {
5656 Check(OpArg->getParent() == BB->getParent(),
5657 "Referring to an argument in another function!", &I);
5658 } else if (GlobalValue *GV = dyn_cast<GlobalValue>(I.getOperand(i))) {
5659 Check(GV->getParent() == &M, "Referencing global in another module!", &I,
5660 &M, GV, GV->getParent());
5661 } else if (Instruction *OpInst = dyn_cast<Instruction>(I.getOperand(i))) {
5662 Check(OpInst->getFunction() == BB->getParent(),
5663 "Referring to an instruction in another function!", &I);
5664 verifyDominatesUse(I, i);
5665 } else if (isa<InlineAsm>(I.getOperand(i))) {
5666 Check(CBI && &CBI->getCalledOperandUse() == &I.getOperandUse(i),
5667 "Cannot take the address of an inline asm!", &I);
5668 } else if (auto *C = dyn_cast<Constant>(I.getOperand(i))) {
5669 visitConstantExprsRecursively(C);
5670 }
5671 }
5672
5673 if (MDNode *MD = I.getMetadata(LLVMContext::MD_fpmath)) {
5674 Check(I.getType()->isFPOrFPVectorTy(),
5675 "fpmath requires a floating point result!", &I);
5676 Check(MD->getNumOperands() == 1, "fpmath takes one operand!", &I);
5677 if (ConstantFP *CFP0 =
5679 const APFloat &Accuracy = CFP0->getValueAPF();
5680 Check(&Accuracy.getSemantics() == &APFloat::IEEEsingle(),
5681 "fpmath accuracy must have float type", &I);
5682 Check(Accuracy.isFiniteNonZero() && !Accuracy.isNegative(),
5683 "fpmath accuracy not a positive number!", &I);
5684 } else {
5685 Check(false, "invalid fpmath accuracy!", &I);
5686 }
5687 }
5688
5689 if (MDNode *Range = I.getMetadata(LLVMContext::MD_range)) {
5691 "Ranges are only for loads, calls and invokes!", &I);
5692 visitRangeMetadata(I, Range, I.getType());
5693 }
5694
5695 if (MDNode *Range = I.getMetadata(LLVMContext::MD_noalias_addrspace)) {
5698 "noalias.addrspace are only for memory operations!", &I);
5699 visitNoaliasAddrspaceMetadata(I, Range, I.getType());
5700 }
5701
5702 if (I.hasMetadata(LLVMContext::MD_invariant_group)) {
5704 "invariant.group metadata is only for loads and stores", &I);
5705 }
5706
5707 if (MDNode *MD = I.getMetadata(LLVMContext::MD_nonnull)) {
5708 Check(I.getType()->isPointerTy(), "nonnull applies only to pointer types",
5709 &I);
5711 "nonnull applies only to load instructions, use attributes"
5712 " for calls or invokes",
5713 &I);
5714 Check(MD->getNumOperands() == 0, "nonnull metadata must be empty", &I);
5715 }
5716
5717 if (MDNode *MD = I.getMetadata(LLVMContext::MD_dereferenceable))
5718 visitDereferenceableMetadata(I, MD);
5719
5720 if (MDNode *MD = I.getMetadata(LLVMContext::MD_dereferenceable_or_null))
5721 visitDereferenceableMetadata(I, MD);
5722
5723 if (MDNode *MD = I.getMetadata(LLVMContext::MD_nofree))
5724 visitNofreeMetadata(I, MD);
5725
5726 if (MDNode *TBAA = I.getMetadata(LLVMContext::MD_tbaa))
5727 TBAAVerifyHelper.visitTBAAMetadata(&I, TBAA);
5728
5729 if (MDNode *MD = I.getMetadata(LLVMContext::MD_noalias))
5730 visitAliasScopeListMetadata(MD);
5731 if (MDNode *MD = I.getMetadata(LLVMContext::MD_alias_scope))
5732 visitAliasScopeListMetadata(MD);
5733
5734 if (MDNode *MD = I.getMetadata(LLVMContext::MD_access_group))
5735 visitAccessGroupMetadata(MD);
5736
5737 if (MDNode *AlignMD = I.getMetadata(LLVMContext::MD_align)) {
5738 Check(I.getType()->isPointerTy(), "align applies only to pointer types",
5739 &I);
5741 "align applies only to load instructions, "
5742 "use attributes for calls or invokes",
5743 &I);
5744 Check(AlignMD->getNumOperands() == 1, "align takes one operand!", &I);
5745 ConstantInt *CI = mdconst::dyn_extract<ConstantInt>(AlignMD->getOperand(0));
5746 Check(CI && CI->getType()->isIntegerTy(64),
5747 "align metadata value must be an i64!", &I);
5748 uint64_t Align = CI->getZExtValue();
5749 Check(isPowerOf2_64(Align), "align metadata value must be a power of 2!",
5750 &I);
5751 Check(Align <= Value::MaximumAlignment,
5752 "alignment is larger that implementation defined limit", &I);
5753 }
5754
5755 if (MDNode *MD = I.getMetadata(LLVMContext::MD_prof))
5756 visitProfMetadata(I, MD);
5757
5758 if (MDNode *MD = I.getMetadata(LLVMContext::MD_memprof))
5759 visitMemProfMetadata(I, MD);
5760
5761 if (MDNode *MD = I.getMetadata(LLVMContext::MD_callsite))
5762 visitCallsiteMetadata(I, MD);
5763
5764 if (MDNode *MD = I.getMetadata(LLVMContext::MD_callee_type))
5765 visitCalleeTypeMetadata(I, MD);
5766
5767 if (MDNode *MD = I.getMetadata(LLVMContext::MD_DIAssignID))
5768 visitDIAssignIDMetadata(I, MD);
5769
5770 if (MDNode *MMRA = I.getMetadata(LLVMContext::MD_mmra))
5771 visitMMRAMetadata(I, MMRA);
5772
5773 if (MDNode *Annotation = I.getMetadata(LLVMContext::MD_annotation))
5774 visitAnnotationMetadata(Annotation);
5775
5776 if (MDNode *Captures = I.getMetadata(LLVMContext::MD_captures))
5777 visitCapturesMetadata(I, Captures);
5778
5779 if (MDNode *MD = I.getMetadata(LLVMContext::MD_alloc_token))
5780 visitAllocTokenMetadata(I, MD);
5781
5782 if (MDNode *N = I.getDebugLoc().getAsMDNode()) {
5783 CheckDI(isa<DILocation>(N), "invalid !dbg metadata attachment", &I, N);
5784 visitMDNode(*N, AreDebugLocsAllowed::Yes);
5785
5786 if (auto *DL = dyn_cast<DILocation>(N)) {
5787 if (DL->getAtomGroup()) {
5788 CheckDI(DL->getScope()->getSubprogram()->getKeyInstructionsEnabled(),
5789 "DbgLoc uses atomGroup but DISubprogram doesn't have Key "
5790 "Instructions enabled",
5791 DL, DL->getScope()->getSubprogram());
5792 }
5793 }
5794 }
5795
5797 I.getAllMetadata(MDs);
5798 for (auto Attachment : MDs) {
5799 unsigned Kind = Attachment.first;
5800 auto AllowLocs =
5801 (Kind == LLVMContext::MD_dbg || Kind == LLVMContext::MD_loop)
5802 ? AreDebugLocsAllowed::Yes
5803 : AreDebugLocsAllowed::No;
5804 visitMDNode(*Attachment.second, AllowLocs);
5805 }
5806
5807 InstsInThisBlock.insert(&I);
5808}
5809
5810/// Allow intrinsics to be verified in different ways.
5811void Verifier::visitIntrinsicCall(Intrinsic::ID ID, CallBase &Call) {
5813 Check(IF->isDeclaration(), "Intrinsic functions should never be defined!",
5814 IF);
5815
5816 // Verify that the intrinsic prototype lines up with what the .td files
5817 // describe.
5818 FunctionType *IFTy = IF->getFunctionType();
5819 bool IsVarArg = IFTy->isVarArg();
5820
5824
5825 // Walk the descriptors to extract overloaded types.
5830 "Intrinsic has incorrect return type!", IF);
5832 "Intrinsic has incorrect argument type!", IF);
5833
5834 // Verify if the intrinsic call matches the vararg property.
5835 if (IsVarArg)
5837 "Intrinsic was not defined with variable arguments!", IF);
5838 else
5840 "Callsite was not defined with variable arguments!", IF);
5841
5842 // All descriptors should be absorbed by now.
5843 Check(TableRef.empty(), "Intrinsic has too few arguments!", IF);
5844
5845 // Now that we have the intrinsic ID and the actual argument types (and we
5846 // know they are legal for the intrinsic!) get the intrinsic name through the
5847 // usual means. This allows us to verify the mangling of argument types into
5848 // the name.
5849 const std::string ExpectedName =
5850 Intrinsic::getName(ID, ArgTys, IF->getParent(), IFTy);
5851 Check(ExpectedName == IF->getName(),
5852 "Intrinsic name not mangled correctly for type arguments! "
5853 "Should be: " +
5854 ExpectedName,
5855 IF);
5856
5857 // If the intrinsic takes MDNode arguments, verify that they are either global
5858 // or are local to *this* function.
5859 for (Value *V : Call.args()) {
5860 if (auto *MD = dyn_cast<MetadataAsValue>(V))
5861 visitMetadataAsValue(*MD, Call.getCaller());
5862 if (auto *Const = dyn_cast<Constant>(V))
5863 Check(!Const->getType()->isX86_AMXTy(),
5864 "const x86_amx is not allowed in argument!");
5865 }
5866
5867 switch (ID) {
5868 default:
5869 break;
5870 case Intrinsic::assume: {
5871 if (Call.hasOperandBundles()) {
5873 Check(Cond && Cond->isOne(),
5874 "assume with operand bundles must have i1 true condition", Call);
5875 }
5876 for (auto &Elem : Call.bundle_op_infos()) {
5877 unsigned ArgCount = Elem.End - Elem.Begin;
5878 // Separate storage assumptions are special insofar as they're the only
5879 // operand bundles allowed on assumes that aren't parameter attributes.
5880 if (Elem.Tag->getKey() == "separate_storage") {
5881 Check(ArgCount == 2,
5882 "separate_storage assumptions should have 2 arguments", Call);
5883 Check(Call.getOperand(Elem.Begin)->getType()->isPointerTy() &&
5884 Call.getOperand(Elem.Begin + 1)->getType()->isPointerTy(),
5885 "arguments to separate_storage assumptions should be pointers",
5886 Call);
5887 continue;
5888 }
5889 Check(Elem.Tag->getKey() == "ignore" ||
5890 Attribute::isExistingAttribute(Elem.Tag->getKey()),
5891 "tags must be valid attribute names", Call);
5892 Attribute::AttrKind Kind =
5893 Attribute::getAttrKindFromName(Elem.Tag->getKey());
5894 if (Kind == Attribute::Alignment) {
5895 Check(ArgCount <= 3 && ArgCount >= 2,
5896 "alignment assumptions should have 2 or 3 arguments", Call);
5897 Check(Call.getOperand(Elem.Begin)->getType()->isPointerTy(),
5898 "first argument should be a pointer", Call);
5899 Check(Call.getOperand(Elem.Begin + 1)->getType()->isIntegerTy(),
5900 "second argument should be an integer", Call);
5901 if (ArgCount == 3)
5902 Check(Call.getOperand(Elem.Begin + 2)->getType()->isIntegerTy(),
5903 "third argument should be an integer if present", Call);
5904 continue;
5905 }
5906 if (Kind == Attribute::Dereferenceable) {
5907 Check(ArgCount == 2,
5908 "dereferenceable assumptions should have 2 arguments", Call);
5909 Check(Call.getOperand(Elem.Begin)->getType()->isPointerTy(),
5910 "first argument should be a pointer", Call);
5911 Check(Call.getOperand(Elem.Begin + 1)->getType()->isIntegerTy(),
5912 "second argument should be an integer", Call);
5913 continue;
5914 }
5915 Check(ArgCount <= 2, "too many arguments", Call);
5916 if (Kind == Attribute::None)
5917 break;
5918 if (Attribute::isIntAttrKind(Kind)) {
5919 Check(ArgCount == 2, "this attribute should have 2 arguments", Call);
5920 Check(isa<ConstantInt>(Call.getOperand(Elem.Begin + 1)),
5921 "the second argument should be a constant integral value", Call);
5922 } else if (Attribute::canUseAsParamAttr(Kind)) {
5923 Check((ArgCount) == 1, "this attribute should have one argument", Call);
5924 } else if (Attribute::canUseAsFnAttr(Kind)) {
5925 Check((ArgCount) == 0, "this attribute has no argument", Call);
5926 }
5927 }
5928 break;
5929 }
5930 case Intrinsic::ucmp:
5931 case Intrinsic::scmp: {
5932 Type *SrcTy = Call.getOperand(0)->getType();
5933 Type *DestTy = Call.getType();
5934
5935 Check(DestTy->getScalarSizeInBits() >= 2,
5936 "result type must be at least 2 bits wide", Call);
5937
5938 bool IsDestTypeVector = DestTy->isVectorTy();
5939 Check(SrcTy->isVectorTy() == IsDestTypeVector,
5940 "ucmp/scmp argument and result types must both be either vector or "
5941 "scalar types",
5942 Call);
5943 if (IsDestTypeVector) {
5944 auto SrcVecLen = cast<VectorType>(SrcTy)->getElementCount();
5945 auto DestVecLen = cast<VectorType>(DestTy)->getElementCount();
5946 Check(SrcVecLen == DestVecLen,
5947 "return type and arguments must have the same number of "
5948 "elements",
5949 Call);
5950 }
5951 break;
5952 }
5953 case Intrinsic::coro_id: {
5954 auto *InfoArg = Call.getArgOperand(3)->stripPointerCasts();
5955 if (isa<ConstantPointerNull>(InfoArg))
5956 break;
5957 auto *GV = dyn_cast<GlobalVariable>(InfoArg);
5958 Check(GV && GV->isConstant() && GV->hasDefinitiveInitializer(),
5959 "info argument of llvm.coro.id must refer to an initialized "
5960 "constant");
5961 Constant *Init = GV->getInitializer();
5963 "info argument of llvm.coro.id must refer to either a struct or "
5964 "an array");
5965 break;
5966 }
5967 case Intrinsic::is_fpclass: {
5968 const ConstantInt *TestMask = cast<ConstantInt>(Call.getOperand(1));
5969 Check((TestMask->getZExtValue() & ~static_cast<unsigned>(fcAllFlags)) == 0,
5970 "unsupported bits for llvm.is.fpclass test mask");
5971 break;
5972 }
5973 case Intrinsic::fptrunc_round: {
5974 // Check the rounding mode
5975 Metadata *MD = nullptr;
5977 if (MAV)
5978 MD = MAV->getMetadata();
5979
5980 Check(MD != nullptr, "missing rounding mode argument", Call);
5981
5982 Check(isa<MDString>(MD),
5983 ("invalid value for llvm.fptrunc.round metadata operand"
5984 " (the operand should be a string)"),
5985 MD);
5986
5987 std::optional<RoundingMode> RoundMode =
5988 convertStrToRoundingMode(cast<MDString>(MD)->getString());
5989 Check(RoundMode && *RoundMode != RoundingMode::Dynamic,
5990 "unsupported rounding mode argument", Call);
5991 break;
5992 }
5993 case Intrinsic::convert_to_arbitrary_fp: {
5994 // Check that vector element counts are consistent.
5995 Type *ValueTy = Call.getArgOperand(0)->getType();
5996 Type *IntTy = Call.getType();
5997
5998 if (auto *ValueVecTy = dyn_cast<VectorType>(ValueTy)) {
5999 auto *IntVecTy = dyn_cast<VectorType>(IntTy);
6000 Check(IntVecTy,
6001 "if floating-point operand is a vector, integer operand must also "
6002 "be a vector",
6003 Call);
6004 Check(ValueVecTy->getElementCount() == IntVecTy->getElementCount(),
6005 "floating-point and integer vector operands must have the same "
6006 "element count",
6007 Call);
6008 }
6009
6010 // Check interpretation metadata (argoperand 1).
6011 auto *InterpMAV = dyn_cast<MetadataAsValue>(Call.getArgOperand(1));
6012 Check(InterpMAV, "missing interpretation metadata operand", Call);
6013 auto *InterpStr = dyn_cast<MDString>(InterpMAV->getMetadata());
6014 Check(InterpStr, "interpretation metadata operand must be a string", Call);
6015 StringRef Interp = InterpStr->getString();
6016
6017 Check(!Interp.empty(), "interpretation metadata string must not be empty",
6018 Call);
6019
6020 // Valid interpretation strings: mini-float format names.
6022 "unsupported interpretation metadata string", Call);
6023
6024 // Check rounding mode metadata (argoperand 2).
6025 auto *RoundingMAV = dyn_cast<MetadataAsValue>(Call.getArgOperand(2));
6026 Check(RoundingMAV, "missing rounding mode metadata operand", Call);
6027 auto *RoundingStr = dyn_cast<MDString>(RoundingMAV->getMetadata());
6028 Check(RoundingStr, "rounding mode metadata operand must be a string", Call);
6029
6030 std::optional<RoundingMode> RM =
6031 convertStrToRoundingMode(RoundingStr->getString());
6032 Check(RM && *RM != RoundingMode::Dynamic,
6033 "unsupported rounding mode argument", Call);
6034 break;
6035 }
6036 case Intrinsic::convert_from_arbitrary_fp: {
6037 // Check that vector element counts are consistent.
6038 Type *IntTy = Call.getArgOperand(0)->getType();
6039 Type *ValueTy = Call.getType();
6040
6041 if (auto *ValueVecTy = dyn_cast<VectorType>(ValueTy)) {
6042 auto *IntVecTy = dyn_cast<VectorType>(IntTy);
6043 Check(IntVecTy,
6044 "if floating-point operand is a vector, integer operand must also "
6045 "be a vector",
6046 Call);
6047 Check(ValueVecTy->getElementCount() == IntVecTy->getElementCount(),
6048 "floating-point and integer vector operands must have the same "
6049 "element count",
6050 Call);
6051 }
6052
6053 // Check interpretation metadata (argoperand 1).
6054 auto *InterpMAV = dyn_cast<MetadataAsValue>(Call.getArgOperand(1));
6055 Check(InterpMAV, "missing interpretation metadata operand", Call);
6056 auto *InterpStr = dyn_cast<MDString>(InterpMAV->getMetadata());
6057 Check(InterpStr, "interpretation metadata operand must be a string", Call);
6058 StringRef Interp = InterpStr->getString();
6059
6060 Check(!Interp.empty(), "interpretation metadata string must not be empty",
6061 Call);
6062
6063 // Valid interpretation strings: mini-float format names.
6065 "unsupported interpretation metadata string", Call);
6066 break;
6067 }
6068#define BEGIN_REGISTER_VP_INTRINSIC(VPID, ...) case Intrinsic::VPID:
6069#include "llvm/IR/VPIntrinsics.def"
6070#undef BEGIN_REGISTER_VP_INTRINSIC
6071 visitVPIntrinsic(cast<VPIntrinsic>(Call));
6072 break;
6073#define INSTRUCTION(NAME, NARGS, ROUND_MODE, INTRINSIC) \
6074 case Intrinsic::INTRINSIC:
6075#include "llvm/IR/ConstrainedOps.def"
6076#undef INSTRUCTION
6077 visitConstrainedFPIntrinsic(cast<ConstrainedFPIntrinsic>(Call));
6078 break;
6079 case Intrinsic::dbg_declare: // llvm.dbg.declare
6080 case Intrinsic::dbg_value: // llvm.dbg.value
6081 case Intrinsic::dbg_assign: // llvm.dbg.assign
6082 case Intrinsic::dbg_label: // llvm.dbg.label
6083 // We no longer interpret debug intrinsics (the old variable-location
6084 // design). They're meaningless as far as LLVM is concerned we could make
6085 // it an error for them to appear, but it's possible we'll have users
6086 // converting back to intrinsics for the forseeable future (such as DXIL),
6087 // so tolerate their existance.
6088 break;
6089 case Intrinsic::memcpy:
6090 case Intrinsic::memcpy_inline:
6091 case Intrinsic::memmove:
6092 case Intrinsic::memset:
6093 case Intrinsic::memset_inline:
6094 break;
6095 case Intrinsic::experimental_memset_pattern: {
6096 const auto Memset = cast<MemSetPatternInst>(&Call);
6097 Check(Memset->getValue()->getType()->isSized(),
6098 "unsized types cannot be used as memset patterns", Call);
6099 break;
6100 }
6101 case Intrinsic::memcpy_element_unordered_atomic:
6102 case Intrinsic::memmove_element_unordered_atomic:
6103 case Intrinsic::memset_element_unordered_atomic: {
6104 const auto *AMI = cast<AnyMemIntrinsic>(&Call);
6105
6106 ConstantInt *ElementSizeCI =
6107 cast<ConstantInt>(AMI->getRawElementSizeInBytes());
6108 const APInt &ElementSizeVal = ElementSizeCI->getValue();
6109 Check(ElementSizeVal.isPowerOf2(),
6110 "element size of the element-wise atomic memory intrinsic "
6111 "must be a power of 2",
6112 Call);
6113
6114 auto IsValidAlignment = [&](MaybeAlign Alignment) {
6115 return Alignment && ElementSizeVal.ule(Alignment->value());
6116 };
6117 Check(IsValidAlignment(AMI->getDestAlign()),
6118 "incorrect alignment of the destination argument", Call);
6119 if (const auto *AMT = dyn_cast<AnyMemTransferInst>(AMI)) {
6120 Check(IsValidAlignment(AMT->getSourceAlign()),
6121 "incorrect alignment of the source argument", Call);
6122 }
6123 break;
6124 }
6125 case Intrinsic::call_preallocated_setup: {
6126 auto *NumArgs = cast<ConstantInt>(Call.getArgOperand(0));
6127 bool FoundCall = false;
6128 for (User *U : Call.users()) {
6129 auto *UseCall = dyn_cast<CallBase>(U);
6130 Check(UseCall != nullptr,
6131 "Uses of llvm.call.preallocated.setup must be calls");
6132 Intrinsic::ID IID = UseCall->getIntrinsicID();
6133 if (IID == Intrinsic::call_preallocated_arg) {
6134 auto *AllocArgIndex = dyn_cast<ConstantInt>(UseCall->getArgOperand(1));
6135 Check(AllocArgIndex != nullptr,
6136 "llvm.call.preallocated.alloc arg index must be a constant");
6137 auto AllocArgIndexInt = AllocArgIndex->getValue();
6138 Check(AllocArgIndexInt.sge(0) &&
6139 AllocArgIndexInt.slt(NumArgs->getValue()),
6140 "llvm.call.preallocated.alloc arg index must be between 0 and "
6141 "corresponding "
6142 "llvm.call.preallocated.setup's argument count");
6143 } else if (IID == Intrinsic::call_preallocated_teardown) {
6144 // nothing to do
6145 } else {
6146 Check(!FoundCall, "Can have at most one call corresponding to a "
6147 "llvm.call.preallocated.setup");
6148 FoundCall = true;
6149 size_t NumPreallocatedArgs = 0;
6150 for (unsigned i = 0; i < UseCall->arg_size(); i++) {
6151 if (UseCall->paramHasAttr(i, Attribute::Preallocated)) {
6152 ++NumPreallocatedArgs;
6153 }
6154 }
6155 Check(NumPreallocatedArgs != 0,
6156 "cannot use preallocated intrinsics on a call without "
6157 "preallocated arguments");
6158 Check(NumArgs->equalsInt(NumPreallocatedArgs),
6159 "llvm.call.preallocated.setup arg size must be equal to number "
6160 "of preallocated arguments "
6161 "at call site",
6162 Call, *UseCall);
6163 // getOperandBundle() cannot be called if more than one of the operand
6164 // bundle exists. There is already a check elsewhere for this, so skip
6165 // here if we see more than one.
6166 if (UseCall->countOperandBundlesOfType(LLVMContext::OB_preallocated) >
6167 1) {
6168 return;
6169 }
6170 auto PreallocatedBundle =
6171 UseCall->getOperandBundle(LLVMContext::OB_preallocated);
6172 Check(PreallocatedBundle,
6173 "Use of llvm.call.preallocated.setup outside intrinsics "
6174 "must be in \"preallocated\" operand bundle");
6175 Check(PreallocatedBundle->Inputs.front().get() == &Call,
6176 "preallocated bundle must have token from corresponding "
6177 "llvm.call.preallocated.setup");
6178 }
6179 }
6180 break;
6181 }
6182 case Intrinsic::call_preallocated_arg: {
6183 auto *Token = dyn_cast<CallBase>(Call.getArgOperand(0));
6184 Check(Token &&
6185 Token->getIntrinsicID() == Intrinsic::call_preallocated_setup,
6186 "llvm.call.preallocated.arg token argument must be a "
6187 "llvm.call.preallocated.setup");
6188 Check(Call.hasFnAttr(Attribute::Preallocated),
6189 "llvm.call.preallocated.arg must be called with a \"preallocated\" "
6190 "call site attribute");
6191 break;
6192 }
6193 case Intrinsic::call_preallocated_teardown: {
6194 auto *Token = dyn_cast<CallBase>(Call.getArgOperand(0));
6195 Check(Token &&
6196 Token->getIntrinsicID() == Intrinsic::call_preallocated_setup,
6197 "llvm.call.preallocated.teardown token argument must be a "
6198 "llvm.call.preallocated.setup");
6199 break;
6200 }
6201 case Intrinsic::gcroot:
6202 case Intrinsic::gcwrite:
6203 case Intrinsic::gcread:
6204 if (ID == Intrinsic::gcroot) {
6205 AllocaInst *AI =
6207 Check(AI, "llvm.gcroot parameter #1 must be an alloca.", Call);
6209 "llvm.gcroot parameter #2 must be a constant.", Call);
6210 if (!AI->getAllocatedType()->isPointerTy()) {
6212 "llvm.gcroot parameter #1 must either be a pointer alloca, "
6213 "or argument #2 must be a non-null constant.",
6214 Call);
6215 }
6216 }
6217
6218 Check(Call.getParent()->getParent()->hasGC(),
6219 "Enclosing function does not use GC.", Call);
6220 break;
6221 case Intrinsic::init_trampoline:
6223 "llvm.init_trampoline parameter #2 must resolve to a function.",
6224 Call);
6225 break;
6226 case Intrinsic::prefetch:
6227 Check(cast<ConstantInt>(Call.getArgOperand(1))->getZExtValue() < 2,
6228 "rw argument to llvm.prefetch must be 0-1", Call);
6229 Check(cast<ConstantInt>(Call.getArgOperand(2))->getZExtValue() < 4,
6230 "locality argument to llvm.prefetch must be 0-3", Call);
6231 Check(cast<ConstantInt>(Call.getArgOperand(3))->getZExtValue() < 2,
6232 "cache type argument to llvm.prefetch must be 0-1", Call);
6233 break;
6234 case Intrinsic::reloc_none: {
6236 cast<MetadataAsValue>(Call.getArgOperand(0))->getMetadata()),
6237 "llvm.reloc.none argument must be a metadata string", &Call);
6238 break;
6239 }
6240 case Intrinsic::stackprotector:
6242 "llvm.stackprotector parameter #2 must resolve to an alloca.", Call);
6243 break;
6244 case Intrinsic::localescape: {
6245 BasicBlock *BB = Call.getParent();
6246 Check(BB->isEntryBlock(), "llvm.localescape used outside of entry block",
6247 Call);
6248 Check(!SawFrameEscape, "multiple calls to llvm.localescape in one function",
6249 Call);
6250 for (Value *Arg : Call.args()) {
6251 if (isa<ConstantPointerNull>(Arg))
6252 continue; // Null values are allowed as placeholders.
6253 auto *AI = dyn_cast<AllocaInst>(Arg->stripPointerCasts());
6254 Check(AI && AI->isStaticAlloca(),
6255 "llvm.localescape only accepts static allocas", Call);
6256 }
6257 FrameEscapeInfo[BB->getParent()].first = Call.arg_size();
6258 SawFrameEscape = true;
6259 break;
6260 }
6261 case Intrinsic::localrecover: {
6263 Function *Fn = dyn_cast<Function>(FnArg);
6264 Check(Fn && !Fn->isDeclaration(),
6265 "llvm.localrecover first "
6266 "argument must be function defined in this module",
6267 Call);
6268 auto *IdxArg = cast<ConstantInt>(Call.getArgOperand(2));
6269 auto &Entry = FrameEscapeInfo[Fn];
6270 Entry.second = unsigned(
6271 std::max(uint64_t(Entry.second), IdxArg->getLimitedValue(~0U) + 1));
6272 break;
6273 }
6274
6275 case Intrinsic::experimental_gc_statepoint:
6276 if (auto *CI = dyn_cast<CallInst>(&Call))
6277 Check(!CI->isInlineAsm(),
6278 "gc.statepoint support for inline assembly unimplemented", CI);
6279 Check(Call.getParent()->getParent()->hasGC(),
6280 "Enclosing function does not use GC.", Call);
6281
6282 verifyStatepoint(Call);
6283 break;
6284 case Intrinsic::experimental_gc_result: {
6285 Check(Call.getParent()->getParent()->hasGC(),
6286 "Enclosing function does not use GC.", Call);
6287
6288 auto *Statepoint = Call.getArgOperand(0);
6289 if (isa<UndefValue>(Statepoint))
6290 break;
6291
6292 // Are we tied to a statepoint properly?
6293 const auto *StatepointCall = dyn_cast<CallBase>(Statepoint);
6294 Check(StatepointCall && StatepointCall->getIntrinsicID() ==
6295 Intrinsic::experimental_gc_statepoint,
6296 "gc.result operand #1 must be from a statepoint", Call,
6297 Call.getArgOperand(0));
6298
6299 // Check that result type matches wrapped callee.
6300 auto *TargetFuncType =
6301 cast<FunctionType>(StatepointCall->getParamElementType(2));
6302 Check(Call.getType() == TargetFuncType->getReturnType(),
6303 "gc.result result type does not match wrapped callee", Call);
6304 break;
6305 }
6306 case Intrinsic::experimental_gc_relocate: {
6307 Check(Call.arg_size() == 3, "wrong number of arguments", Call);
6308
6310 "gc.relocate must return a pointer or a vector of pointers", Call);
6311
6312 // Check that this relocate is correctly tied to the statepoint
6313
6314 // This is case for relocate on the unwinding path of an invoke statepoint
6315 if (LandingPadInst *LandingPad =
6317
6318 const BasicBlock *InvokeBB =
6319 LandingPad->getParent()->getUniquePredecessor();
6320
6321 // Landingpad relocates should have only one predecessor with invoke
6322 // statepoint terminator
6323 Check(InvokeBB, "safepoints should have unique landingpads",
6324 LandingPad->getParent());
6325 Check(InvokeBB->getTerminator(), "safepoint block should be well formed",
6326 InvokeBB);
6328 "gc relocate should be linked to a statepoint", InvokeBB);
6329 } else {
6330 // In all other cases relocate should be tied to the statepoint directly.
6331 // This covers relocates on a normal return path of invoke statepoint and
6332 // relocates of a call statepoint.
6333 auto *Token = Call.getArgOperand(0);
6335 "gc relocate is incorrectly tied to the statepoint", Call, Token);
6336 }
6337
6338 // Verify rest of the relocate arguments.
6339 const Value &StatepointCall = *cast<GCRelocateInst>(Call).getStatepoint();
6340
6341 // Both the base and derived must be piped through the safepoint.
6344 "gc.relocate operand #2 must be integer offset", Call);
6345
6346 Value *Derived = Call.getArgOperand(2);
6347 Check(isa<ConstantInt>(Derived),
6348 "gc.relocate operand #3 must be integer offset", Call);
6349
6350 const uint64_t BaseIndex = cast<ConstantInt>(Base)->getZExtValue();
6351 const uint64_t DerivedIndex = cast<ConstantInt>(Derived)->getZExtValue();
6352
6353 // Check the bounds
6354 if (isa<UndefValue>(StatepointCall))
6355 break;
6356 if (auto Opt = cast<GCStatepointInst>(StatepointCall)
6357 .getOperandBundle(LLVMContext::OB_gc_live)) {
6358 Check(BaseIndex < Opt->Inputs.size(),
6359 "gc.relocate: statepoint base index out of bounds", Call);
6360 Check(DerivedIndex < Opt->Inputs.size(),
6361 "gc.relocate: statepoint derived index out of bounds", Call);
6362 }
6363
6364 // Relocated value must be either a pointer type or vector-of-pointer type,
6365 // but gc_relocate does not need to return the same pointer type as the
6366 // relocated pointer. It can be casted to the correct type later if it's
6367 // desired. However, they must have the same address space and 'vectorness'
6368 GCRelocateInst &Relocate = cast<GCRelocateInst>(Call);
6369 auto *ResultType = Call.getType();
6370 auto *DerivedType = Relocate.getDerivedPtr()->getType();
6371 auto *BaseType = Relocate.getBasePtr()->getType();
6372
6373 Check(BaseType->isPtrOrPtrVectorTy(),
6374 "gc.relocate: relocated value must be a pointer", Call);
6375 Check(DerivedType->isPtrOrPtrVectorTy(),
6376 "gc.relocate: relocated value must be a pointer", Call);
6377
6378 Check(ResultType->isVectorTy() == DerivedType->isVectorTy(),
6379 "gc.relocate: vector relocates to vector and pointer to pointer",
6380 Call);
6381 Check(
6382 ResultType->getPointerAddressSpace() ==
6383 DerivedType->getPointerAddressSpace(),
6384 "gc.relocate: relocating a pointer shouldn't change its address space",
6385 Call);
6386
6387 auto GC = llvm::getGCStrategy(Relocate.getFunction()->getGC());
6388 Check(GC, "gc.relocate: calling function must have GCStrategy",
6389 Call.getFunction());
6390 if (GC) {
6391 auto isGCPtr = [&GC](Type *PTy) {
6392 return GC->isGCManagedPointer(PTy->getScalarType()).value_or(true);
6393 };
6394 Check(isGCPtr(ResultType), "gc.relocate: must return gc pointer", Call);
6395 Check(isGCPtr(BaseType),
6396 "gc.relocate: relocated value must be a gc pointer", Call);
6397 Check(isGCPtr(DerivedType),
6398 "gc.relocate: relocated value must be a gc pointer", Call);
6399 }
6400 break;
6401 }
6402 case Intrinsic::experimental_patchpoint: {
6403 if (Call.getCallingConv() == CallingConv::AnyReg) {
6405 "patchpoint: invalid return type used with anyregcc", Call);
6406 }
6407 break;
6408 }
6409 case Intrinsic::eh_exceptioncode:
6410 case Intrinsic::eh_exceptionpointer: {
6412 "eh.exceptionpointer argument must be a catchpad", Call);
6413 break;
6414 }
6415 case Intrinsic::get_active_lane_mask: {
6417 "get_active_lane_mask: must return a "
6418 "vector",
6419 Call);
6420 auto *ElemTy = Call.getType()->getScalarType();
6421 Check(ElemTy->isIntegerTy(1),
6422 "get_active_lane_mask: element type is not "
6423 "i1",
6424 Call);
6425 break;
6426 }
6427 case Intrinsic::experimental_get_vector_length: {
6428 ConstantInt *VF = cast<ConstantInt>(Call.getArgOperand(1));
6429 Check(!VF->isNegative() && !VF->isZero(),
6430 "get_vector_length: VF must be positive", Call);
6431 break;
6432 }
6433 case Intrinsic::masked_load: {
6434 Check(Call.getType()->isVectorTy(), "masked_load: must return a vector",
6435 Call);
6436
6438 Value *PassThru = Call.getArgOperand(2);
6439 Check(Mask->getType()->isVectorTy(), "masked_load: mask must be vector",
6440 Call);
6441 Check(PassThru->getType() == Call.getType(),
6442 "masked_load: pass through and return type must match", Call);
6443 Check(cast<VectorType>(Mask->getType())->getElementCount() ==
6444 cast<VectorType>(Call.getType())->getElementCount(),
6445 "masked_load: vector mask must be same length as return", Call);
6446 break;
6447 }
6448 case Intrinsic::masked_store: {
6449 Value *Val = Call.getArgOperand(0);
6451 Check(Mask->getType()->isVectorTy(), "masked_store: mask must be vector",
6452 Call);
6453 Check(cast<VectorType>(Mask->getType())->getElementCount() ==
6454 cast<VectorType>(Val->getType())->getElementCount(),
6455 "masked_store: vector mask must be same length as value", Call);
6456 break;
6457 }
6458
6459 case Intrinsic::experimental_guard: {
6460 Check(isa<CallInst>(Call), "experimental_guard cannot be invoked", Call);
6462 "experimental_guard must have exactly one "
6463 "\"deopt\" operand bundle");
6464 break;
6465 }
6466
6467 case Intrinsic::experimental_deoptimize: {
6468 Check(isa<CallInst>(Call), "experimental_deoptimize cannot be invoked",
6469 Call);
6471 "experimental_deoptimize must have exactly one "
6472 "\"deopt\" operand bundle");
6474 "experimental_deoptimize return type must match caller return type");
6475
6476 if (isa<CallInst>(Call)) {
6478 Check(RI,
6479 "calls to experimental_deoptimize must be followed by a return");
6480
6481 if (!Call.getType()->isVoidTy() && RI)
6482 Check(RI->getReturnValue() == &Call,
6483 "calls to experimental_deoptimize must be followed by a return "
6484 "of the value computed by experimental_deoptimize");
6485 }
6486
6487 break;
6488 }
6489 case Intrinsic::vastart: {
6491 "va_start called in a non-varargs function");
6492 break;
6493 }
6494 case Intrinsic::get_dynamic_area_offset: {
6495 auto *IntTy = dyn_cast<IntegerType>(Call.getType());
6496 Check(IntTy && DL.getPointerSizeInBits(DL.getAllocaAddrSpace()) ==
6497 IntTy->getBitWidth(),
6498 "get_dynamic_area_offset result type must be scalar integer matching "
6499 "alloca address space width",
6500 Call);
6501 break;
6502 }
6503 case Intrinsic::vector_reduce_and:
6504 case Intrinsic::vector_reduce_or:
6505 case Intrinsic::vector_reduce_xor:
6506 case Intrinsic::vector_reduce_add:
6507 case Intrinsic::vector_reduce_mul:
6508 case Intrinsic::vector_reduce_smax:
6509 case Intrinsic::vector_reduce_smin:
6510 case Intrinsic::vector_reduce_umax:
6511 case Intrinsic::vector_reduce_umin: {
6512 Type *ArgTy = Call.getArgOperand(0)->getType();
6513 Check(ArgTy->isIntOrIntVectorTy() && ArgTy->isVectorTy(),
6514 "Intrinsic has incorrect argument type!");
6515 break;
6516 }
6517 case Intrinsic::vector_reduce_fmax:
6518 case Intrinsic::vector_reduce_fmin: {
6519 Type *ArgTy = Call.getArgOperand(0)->getType();
6520 Check(ArgTy->isFPOrFPVectorTy() && ArgTy->isVectorTy(),
6521 "Intrinsic has incorrect argument type!");
6522 break;
6523 }
6524 case Intrinsic::vector_reduce_fadd:
6525 case Intrinsic::vector_reduce_fmul: {
6526 // Unlike the other reductions, the first argument is a start value. The
6527 // second argument is the vector to be reduced.
6528 Type *ArgTy = Call.getArgOperand(1)->getType();
6529 Check(ArgTy->isFPOrFPVectorTy() && ArgTy->isVectorTy(),
6530 "Intrinsic has incorrect argument type!");
6531 break;
6532 }
6533 case Intrinsic::smul_fix:
6534 case Intrinsic::smul_fix_sat:
6535 case Intrinsic::umul_fix:
6536 case Intrinsic::umul_fix_sat:
6537 case Intrinsic::sdiv_fix:
6538 case Intrinsic::sdiv_fix_sat:
6539 case Intrinsic::udiv_fix:
6540 case Intrinsic::udiv_fix_sat: {
6541 Value *Op1 = Call.getArgOperand(0);
6542 Value *Op2 = Call.getArgOperand(1);
6544 "first operand of [us][mul|div]_fix[_sat] must be an int type or "
6545 "vector of ints");
6547 "second operand of [us][mul|div]_fix[_sat] must be an int type or "
6548 "vector of ints");
6549
6550 auto *Op3 = cast<ConstantInt>(Call.getArgOperand(2));
6551 Check(Op3->getType()->isIntegerTy(),
6552 "third operand of [us][mul|div]_fix[_sat] must be an int type");
6553 Check(Op3->getBitWidth() <= 32,
6554 "third operand of [us][mul|div]_fix[_sat] must fit within 32 bits");
6555
6556 if (ID == Intrinsic::smul_fix || ID == Intrinsic::smul_fix_sat ||
6557 ID == Intrinsic::sdiv_fix || ID == Intrinsic::sdiv_fix_sat) {
6558 Check(Op3->getZExtValue() < Op1->getType()->getScalarSizeInBits(),
6559 "the scale of s[mul|div]_fix[_sat] must be less than the width of "
6560 "the operands");
6561 } else {
6562 Check(Op3->getZExtValue() <= Op1->getType()->getScalarSizeInBits(),
6563 "the scale of u[mul|div]_fix[_sat] must be less than or equal "
6564 "to the width of the operands");
6565 }
6566 break;
6567 }
6568 case Intrinsic::lrint:
6569 case Intrinsic::llrint:
6570 case Intrinsic::lround:
6571 case Intrinsic::llround: {
6572 Type *ValTy = Call.getArgOperand(0)->getType();
6573 Type *ResultTy = Call.getType();
6574 auto *VTy = dyn_cast<VectorType>(ValTy);
6575 auto *RTy = dyn_cast<VectorType>(ResultTy);
6576 Check(ValTy->isFPOrFPVectorTy() && ResultTy->isIntOrIntVectorTy(),
6577 ExpectedName + ": argument must be floating-point or vector "
6578 "of floating-points, and result must be integer or "
6579 "vector of integers",
6580 &Call);
6581 Check(ValTy->isVectorTy() == ResultTy->isVectorTy(),
6582 ExpectedName + ": argument and result disagree on vector use", &Call);
6583 if (VTy) {
6584 Check(VTy->getElementCount() == RTy->getElementCount(),
6585 ExpectedName + ": argument must be same length as result", &Call);
6586 }
6587 break;
6588 }
6589 case Intrinsic::bswap: {
6590 Type *Ty = Call.getType();
6591 unsigned Size = Ty->getScalarSizeInBits();
6592 Check(Size % 16 == 0, "bswap must be an even number of bytes", &Call);
6593 break;
6594 }
6595 case Intrinsic::invariant_start: {
6596 ConstantInt *InvariantSize = dyn_cast<ConstantInt>(Call.getArgOperand(0));
6597 Check(InvariantSize &&
6598 (!InvariantSize->isNegative() || InvariantSize->isMinusOne()),
6599 "invariant_start parameter must be -1, 0 or a positive number",
6600 &Call);
6601 break;
6602 }
6603 case Intrinsic::matrix_multiply:
6604 case Intrinsic::matrix_transpose:
6605 case Intrinsic::matrix_column_major_load:
6606 case Intrinsic::matrix_column_major_store: {
6608 ConstantInt *Stride = nullptr;
6609 ConstantInt *NumRows;
6610 ConstantInt *NumColumns;
6611 VectorType *ResultTy;
6612 Type *Op0ElemTy = nullptr;
6613 Type *Op1ElemTy = nullptr;
6614 switch (ID) {
6615 case Intrinsic::matrix_multiply: {
6616 NumRows = cast<ConstantInt>(Call.getArgOperand(2));
6617 ConstantInt *N = cast<ConstantInt>(Call.getArgOperand(3));
6618 NumColumns = cast<ConstantInt>(Call.getArgOperand(4));
6620 ->getNumElements() ==
6621 NumRows->getZExtValue() * N->getZExtValue(),
6622 "First argument of a matrix operation does not match specified "
6623 "shape!");
6625 ->getNumElements() ==
6626 N->getZExtValue() * NumColumns->getZExtValue(),
6627 "Second argument of a matrix operation does not match specified "
6628 "shape!");
6629
6630 ResultTy = cast<VectorType>(Call.getType());
6631 Op0ElemTy =
6632 cast<VectorType>(Call.getArgOperand(0)->getType())->getElementType();
6633 Op1ElemTy =
6634 cast<VectorType>(Call.getArgOperand(1)->getType())->getElementType();
6635 break;
6636 }
6637 case Intrinsic::matrix_transpose:
6638 NumRows = cast<ConstantInt>(Call.getArgOperand(1));
6639 NumColumns = cast<ConstantInt>(Call.getArgOperand(2));
6640 ResultTy = cast<VectorType>(Call.getType());
6641 Op0ElemTy =
6642 cast<VectorType>(Call.getArgOperand(0)->getType())->getElementType();
6643 break;
6644 case Intrinsic::matrix_column_major_load: {
6646 NumRows = cast<ConstantInt>(Call.getArgOperand(3));
6647 NumColumns = cast<ConstantInt>(Call.getArgOperand(4));
6648 ResultTy = cast<VectorType>(Call.getType());
6649 break;
6650 }
6651 case Intrinsic::matrix_column_major_store: {
6653 NumRows = cast<ConstantInt>(Call.getArgOperand(4));
6654 NumColumns = cast<ConstantInt>(Call.getArgOperand(5));
6655 ResultTy = cast<VectorType>(Call.getArgOperand(0)->getType());
6656 Op0ElemTy =
6657 cast<VectorType>(Call.getArgOperand(0)->getType())->getElementType();
6658 break;
6659 }
6660 default:
6661 llvm_unreachable("unexpected intrinsic");
6662 }
6663
6664 Check(ResultTy->getElementType()->isIntegerTy() ||
6665 ResultTy->getElementType()->isFloatingPointTy(),
6666 "Result type must be an integer or floating-point type!", IF);
6667
6668 if (Op0ElemTy)
6669 Check(ResultTy->getElementType() == Op0ElemTy,
6670 "Vector element type mismatch of the result and first operand "
6671 "vector!",
6672 IF);
6673
6674 if (Op1ElemTy)
6675 Check(ResultTy->getElementType() == Op1ElemTy,
6676 "Vector element type mismatch of the result and second operand "
6677 "vector!",
6678 IF);
6679
6681 NumRows->getZExtValue() * NumColumns->getZExtValue(),
6682 "Result of a matrix operation does not fit in the returned vector!");
6683
6684 if (Stride) {
6685 Check(Stride->getBitWidth() <= 64, "Stride bitwidth cannot exceed 64!",
6686 IF);
6687 Check(Stride->getZExtValue() >= NumRows->getZExtValue(),
6688 "Stride must be greater or equal than the number of rows!", IF);
6689 }
6690
6691 break;
6692 }
6693 case Intrinsic::vector_splice_left:
6694 case Intrinsic::vector_splice_right: {
6696 uint64_t Idx = cast<ConstantInt>(Call.getArgOperand(2))->getZExtValue();
6697 uint64_t KnownMinNumElements = VecTy->getElementCount().getKnownMinValue();
6698 if (VecTy->isScalableTy() && Call.getParent() &&
6699 Call.getParent()->getParent()) {
6700 AttributeList Attrs = Call.getParent()->getParent()->getAttributes();
6701 if (Attrs.hasFnAttr(Attribute::VScaleRange))
6702 KnownMinNumElements *= Attrs.getFnAttrs().getVScaleRangeMin();
6703 }
6704 if (ID == Intrinsic::vector_splice_left)
6705 Check(Idx < KnownMinNumElements,
6706 "The splice index exceeds the range [0, VL-1] where VL is the "
6707 "known minimum number of elements in the vector. For scalable "
6708 "vectors the minimum number of elements is determined from "
6709 "vscale_range.",
6710 &Call);
6711 else
6712 Check(Idx <= KnownMinNumElements,
6713 "The splice index exceeds the range [0, VL] where VL is the "
6714 "known minimum number of elements in the vector. For scalable "
6715 "vectors the minimum number of elements is determined from "
6716 "vscale_range.",
6717 &Call);
6718 break;
6719 }
6720 case Intrinsic::stepvector: {
6722 Check(VecTy && VecTy->getScalarType()->isIntegerTy() &&
6723 VecTy->getScalarSizeInBits() >= 8,
6724 "stepvector only supported for vectors of integers "
6725 "with a bitwidth of at least 8.",
6726 &Call);
6727 break;
6728 }
6729 case Intrinsic::experimental_vector_match: {
6730 Value *Op1 = Call.getArgOperand(0);
6731 Value *Op2 = Call.getArgOperand(1);
6733
6734 VectorType *Op1Ty = dyn_cast<VectorType>(Op1->getType());
6735 VectorType *Op2Ty = dyn_cast<VectorType>(Op2->getType());
6736 VectorType *MaskTy = dyn_cast<VectorType>(Mask->getType());
6737
6738 Check(Op1Ty && Op2Ty && MaskTy, "Operands must be vectors.", &Call);
6740 "Second operand must be a fixed length vector.", &Call);
6741 Check(Op1Ty->getElementType()->isIntegerTy(),
6742 "First operand must be a vector of integers.", &Call);
6743 Check(Op1Ty->getElementType() == Op2Ty->getElementType(),
6744 "First two operands must have the same element type.", &Call);
6745 Check(Op1Ty->getElementCount() == MaskTy->getElementCount(),
6746 "First operand and mask must have the same number of elements.",
6747 &Call);
6748 Check(MaskTy->getElementType()->isIntegerTy(1),
6749 "Mask must be a vector of i1's.", &Call);
6750 Check(Call.getType() == MaskTy, "Return type must match the mask type.",
6751 &Call);
6752 break;
6753 }
6754 case Intrinsic::vector_insert: {
6755 Value *Vec = Call.getArgOperand(0);
6756 Value *SubVec = Call.getArgOperand(1);
6757 Value *Idx = Call.getArgOperand(2);
6758 unsigned IdxN = cast<ConstantInt>(Idx)->getZExtValue();
6759
6760 VectorType *VecTy = cast<VectorType>(Vec->getType());
6761 VectorType *SubVecTy = cast<VectorType>(SubVec->getType());
6762
6763 ElementCount VecEC = VecTy->getElementCount();
6764 ElementCount SubVecEC = SubVecTy->getElementCount();
6765 Check(VecTy->getElementType() == SubVecTy->getElementType(),
6766 "vector_insert parameters must have the same element "
6767 "type.",
6768 &Call);
6769 Check(IdxN % SubVecEC.getKnownMinValue() == 0,
6770 "vector_insert index must be a constant multiple of "
6771 "the subvector's known minimum vector length.");
6772
6773 // If this insertion is not the 'mixed' case where a fixed vector is
6774 // inserted into a scalable vector, ensure that the insertion of the
6775 // subvector does not overrun the parent vector.
6776 if (VecEC.isScalable() == SubVecEC.isScalable()) {
6777 Check(IdxN < VecEC.getKnownMinValue() &&
6778 IdxN + SubVecEC.getKnownMinValue() <= VecEC.getKnownMinValue(),
6779 "subvector operand of vector_insert would overrun the "
6780 "vector being inserted into.");
6781 }
6782 break;
6783 }
6784 case Intrinsic::vector_extract: {
6785 Value *Vec = Call.getArgOperand(0);
6786 Value *Idx = Call.getArgOperand(1);
6787 unsigned IdxN = cast<ConstantInt>(Idx)->getZExtValue();
6788
6789 VectorType *ResultTy = cast<VectorType>(Call.getType());
6790 VectorType *VecTy = cast<VectorType>(Vec->getType());
6791
6792 ElementCount VecEC = VecTy->getElementCount();
6793 ElementCount ResultEC = ResultTy->getElementCount();
6794
6795 Check(ResultTy->getElementType() == VecTy->getElementType(),
6796 "vector_extract result must have the same element "
6797 "type as the input vector.",
6798 &Call);
6799 Check(IdxN % ResultEC.getKnownMinValue() == 0,
6800 "vector_extract index must be a constant multiple of "
6801 "the result type's known minimum vector length.");
6802
6803 // If this extraction is not the 'mixed' case where a fixed vector is
6804 // extracted from a scalable vector, ensure that the extraction does not
6805 // overrun the parent vector.
6806 if (VecEC.isScalable() == ResultEC.isScalable()) {
6807 Check(IdxN < VecEC.getKnownMinValue() &&
6808 IdxN + ResultEC.getKnownMinValue() <= VecEC.getKnownMinValue(),
6809 "vector_extract would overrun.");
6810 }
6811 break;
6812 }
6813 case Intrinsic::vector_partial_reduce_fadd:
6814 case Intrinsic::vector_partial_reduce_add: {
6817
6818 unsigned VecWidth = VecTy->getElementCount().getKnownMinValue();
6819 unsigned AccWidth = AccTy->getElementCount().getKnownMinValue();
6820
6821 Check((VecWidth % AccWidth) == 0,
6822 "Invalid vector widths for partial "
6823 "reduction. The width of the input vector "
6824 "must be a positive integer multiple of "
6825 "the width of the accumulator vector.");
6826 break;
6827 }
6828 case Intrinsic::experimental_noalias_scope_decl: {
6829 NoAliasScopeDecls.push_back(cast<IntrinsicInst>(&Call));
6830 break;
6831 }
6832 case Intrinsic::preserve_array_access_index:
6833 case Intrinsic::preserve_struct_access_index:
6834 case Intrinsic::aarch64_ldaxr:
6835 case Intrinsic::aarch64_ldxr:
6836 case Intrinsic::arm_ldaex:
6837 case Intrinsic::arm_ldrex: {
6838 Type *ElemTy = Call.getParamElementType(0);
6839 Check(ElemTy, "Intrinsic requires elementtype attribute on first argument.",
6840 &Call);
6841 break;
6842 }
6843 case Intrinsic::aarch64_stlxr:
6844 case Intrinsic::aarch64_stxr:
6845 case Intrinsic::arm_stlex:
6846 case Intrinsic::arm_strex: {
6847 Type *ElemTy = Call.getAttributes().getParamElementType(1);
6848 Check(ElemTy,
6849 "Intrinsic requires elementtype attribute on second argument.",
6850 &Call);
6851 break;
6852 }
6853 case Intrinsic::aarch64_prefetch: {
6854 Check(cast<ConstantInt>(Call.getArgOperand(1))->getZExtValue() < 2,
6855 "write argument to llvm.aarch64.prefetch must be 0 or 1", Call);
6856 Check(cast<ConstantInt>(Call.getArgOperand(2))->getZExtValue() < 4,
6857 "target argument to llvm.aarch64.prefetch must be 0-3", Call);
6858 Check(cast<ConstantInt>(Call.getArgOperand(3))->getZExtValue() < 2,
6859 "stream argument to llvm.aarch64.prefetch must be 0 or 1", Call);
6860 Check(cast<ConstantInt>(Call.getArgOperand(4))->getZExtValue() < 2,
6861 "isdata argument to llvm.aarch64.prefetch must be 0 or 1", Call);
6862 break;
6863 }
6864 case Intrinsic::aarch64_range_prefetch: {
6865 Check(cast<ConstantInt>(Call.getArgOperand(1))->getZExtValue() < 2,
6866 "write argument to llvm.aarch64.range.prefetch must be 0 or 1", Call);
6867 Check(cast<ConstantInt>(Call.getArgOperand(2))->getZExtValue() < 2,
6868 "stream argument to llvm.aarch64.range.prefetch must be 0 or 1",
6869 Call);
6870 break;
6871 }
6872 case Intrinsic::callbr_landingpad: {
6873 const auto *CBR = dyn_cast<CallBrInst>(Call.getOperand(0));
6874 Check(CBR, "intrinstic requires callbr operand", &Call);
6875 if (!CBR)
6876 break;
6877
6878 const BasicBlock *LandingPadBB = Call.getParent();
6879 const BasicBlock *PredBB = LandingPadBB->getUniquePredecessor();
6880 if (!PredBB) {
6881 CheckFailed("Intrinsic in block must have 1 unique predecessor", &Call);
6882 break;
6883 }
6884 if (!isa<CallBrInst>(PredBB->getTerminator())) {
6885 CheckFailed("Intrinsic must have corresponding callbr in predecessor",
6886 &Call);
6887 break;
6888 }
6889 Check(llvm::is_contained(CBR->getIndirectDests(), LandingPadBB),
6890 "Intrinsic's corresponding callbr must have intrinsic's parent basic "
6891 "block in indirect destination list",
6892 &Call);
6893 const Instruction &First = *LandingPadBB->begin();
6894 Check(&First == &Call, "No other instructions may proceed intrinsic",
6895 &Call);
6896 break;
6897 }
6898 case Intrinsic::amdgcn_cs_chain: {
6899 auto CallerCC = Call.getCaller()->getCallingConv();
6900 switch (CallerCC) {
6901 case CallingConv::AMDGPU_CS:
6902 case CallingConv::AMDGPU_CS_Chain:
6903 case CallingConv::AMDGPU_CS_ChainPreserve:
6904 case CallingConv::AMDGPU_ES:
6905 case CallingConv::AMDGPU_GS:
6906 case CallingConv::AMDGPU_HS:
6907 case CallingConv::AMDGPU_LS:
6908 case CallingConv::AMDGPU_VS:
6909 break;
6910 default:
6911 CheckFailed("Intrinsic cannot be called from functions with this "
6912 "calling convention",
6913 &Call);
6914 break;
6915 }
6916
6917 Check(Call.paramHasAttr(2, Attribute::InReg),
6918 "SGPR arguments must have the `inreg` attribute", &Call);
6919 Check(!Call.paramHasAttr(3, Attribute::InReg),
6920 "VGPR arguments must not have the `inreg` attribute", &Call);
6921
6922 auto *Next = Call.getNextNode();
6923 bool IsAMDUnreachable = Next && isa<IntrinsicInst>(Next) &&
6924 cast<IntrinsicInst>(Next)->getIntrinsicID() ==
6925 Intrinsic::amdgcn_unreachable;
6926 Check(Next && (isa<UnreachableInst>(Next) || IsAMDUnreachable),
6927 "llvm.amdgcn.cs.chain must be followed by unreachable", &Call);
6928 break;
6929 }
6930 case Intrinsic::amdgcn_init_exec_from_input: {
6931 const Argument *Arg = dyn_cast<Argument>(Call.getOperand(0));
6932 Check(Arg && Arg->hasInRegAttr(),
6933 "only inreg arguments to the parent function are valid as inputs to "
6934 "this intrinsic",
6935 &Call);
6936 break;
6937 }
6938 case Intrinsic::amdgcn_set_inactive_chain_arg: {
6939 auto CallerCC = Call.getCaller()->getCallingConv();
6940 switch (CallerCC) {
6941 case CallingConv::AMDGPU_CS_Chain:
6942 case CallingConv::AMDGPU_CS_ChainPreserve:
6943 break;
6944 default:
6945 CheckFailed("Intrinsic can only be used from functions with the "
6946 "amdgpu_cs_chain or amdgpu_cs_chain_preserve "
6947 "calling conventions",
6948 &Call);
6949 break;
6950 }
6951
6952 unsigned InactiveIdx = 1;
6953 Check(!Call.paramHasAttr(InactiveIdx, Attribute::InReg),
6954 "Value for inactive lanes must not have the `inreg` attribute",
6955 &Call);
6956 Check(isa<Argument>(Call.getArgOperand(InactiveIdx)),
6957 "Value for inactive lanes must be a function argument", &Call);
6958 Check(!cast<Argument>(Call.getArgOperand(InactiveIdx))->hasInRegAttr(),
6959 "Value for inactive lanes must be a VGPR function argument", &Call);
6960 break;
6961 }
6962 case Intrinsic::amdgcn_call_whole_wave: {
6964 Check(F, "Indirect whole wave calls are not allowed", &Call);
6965
6966 CallingConv::ID CC = F->getCallingConv();
6967 Check(CC == CallingConv::AMDGPU_Gfx_WholeWave,
6968 "Callee must have the amdgpu_gfx_whole_wave calling convention",
6969 &Call);
6970
6971 Check(!F->isVarArg(), "Variadic whole wave calls are not allowed", &Call);
6972
6973 Check(Call.arg_size() == F->arg_size(),
6974 "Call argument count must match callee argument count", &Call);
6975
6976 // The first argument of the call is the callee, and the first argument of
6977 // the callee is the active mask. The rest of the arguments must match.
6978 Check(F->arg_begin()->getType()->isIntegerTy(1),
6979 "Callee must have i1 as its first argument", &Call);
6980 for (auto [CallArg, FuncArg] :
6981 drop_begin(zip_equal(Call.args(), F->args()))) {
6982 Check(CallArg->getType() == FuncArg.getType(),
6983 "Argument types must match", &Call);
6984
6985 // Check that inreg attributes match between call site and function
6986 Check(Call.paramHasAttr(FuncArg.getArgNo(), Attribute::InReg) ==
6987 FuncArg.hasInRegAttr(),
6988 "Argument inreg attributes must match", &Call);
6989 }
6990 break;
6991 }
6992 case Intrinsic::amdgcn_s_prefetch_data: {
6993 Check(
6996 "llvm.amdgcn.s.prefetch.data only supports global or constant memory");
6997 break;
6998 }
6999 case Intrinsic::amdgcn_mfma_scale_f32_16x16x128_f8f6f4:
7000 case Intrinsic::amdgcn_mfma_scale_f32_32x32x64_f8f6f4: {
7001 Value *Src0 = Call.getArgOperand(0);
7002 Value *Src1 = Call.getArgOperand(1);
7003
7004 uint64_t CBSZ = cast<ConstantInt>(Call.getArgOperand(3))->getZExtValue();
7005 uint64_t BLGP = cast<ConstantInt>(Call.getArgOperand(4))->getZExtValue();
7006 Check(CBSZ <= 4, "invalid value for cbsz format", Call,
7007 Call.getArgOperand(3));
7008 Check(BLGP <= 4, "invalid value for blgp format", Call,
7009 Call.getArgOperand(4));
7010
7011 // AMDGPU::MFMAScaleFormats values
7012 auto getFormatNumRegs = [](unsigned FormatVal) {
7013 switch (FormatVal) {
7014 case 0:
7015 case 1:
7016 return 8u;
7017 case 2:
7018 case 3:
7019 return 6u;
7020 case 4:
7021 return 4u;
7022 default:
7023 llvm_unreachable("invalid format value");
7024 }
7025 };
7026
7027 auto isValidSrcASrcBVector = [](FixedVectorType *Ty) {
7028 if (!Ty || !Ty->getElementType()->isIntegerTy(32))
7029 return false;
7030 unsigned NumElts = Ty->getNumElements();
7031 return NumElts == 4 || NumElts == 6 || NumElts == 8;
7032 };
7033
7034 auto *Src0Ty = dyn_cast<FixedVectorType>(Src0->getType());
7035 auto *Src1Ty = dyn_cast<FixedVectorType>(Src1->getType());
7036 Check(isValidSrcASrcBVector(Src0Ty),
7037 "operand 0 must be 4, 6 or 8 element i32 vector", &Call, Src0);
7038 Check(isValidSrcASrcBVector(Src1Ty),
7039 "operand 1 must be 4, 6 or 8 element i32 vector", &Call, Src1);
7040
7041 // Permit excess registers for the format.
7042 Check(Src0Ty->getNumElements() >= getFormatNumRegs(CBSZ),
7043 "invalid vector type for format", &Call, Src0, Call.getArgOperand(3));
7044 Check(Src1Ty->getNumElements() >= getFormatNumRegs(BLGP),
7045 "invalid vector type for format", &Call, Src1, Call.getArgOperand(5));
7046 break;
7047 }
7048 case Intrinsic::amdgcn_wmma_f32_16x16x128_f8f6f4:
7049 case Intrinsic::amdgcn_wmma_scale_f32_16x16x128_f8f6f4:
7050 case Intrinsic::amdgcn_wmma_scale16_f32_16x16x128_f8f6f4: {
7051 Value *Src0 = Call.getArgOperand(1);
7052 Value *Src1 = Call.getArgOperand(3);
7053
7054 unsigned FmtA = cast<ConstantInt>(Call.getArgOperand(0))->getZExtValue();
7055 unsigned FmtB = cast<ConstantInt>(Call.getArgOperand(2))->getZExtValue();
7056 Check(FmtA <= 4, "invalid value for matrix format", Call,
7057 Call.getArgOperand(0));
7058 Check(FmtB <= 4, "invalid value for matrix format", Call,
7059 Call.getArgOperand(2));
7060
7061 // AMDGPU::MatrixFMT values
7062 auto getFormatNumRegs = [](unsigned FormatVal) {
7063 switch (FormatVal) {
7064 case 0:
7065 case 1:
7066 return 16u;
7067 case 2:
7068 case 3:
7069 return 12u;
7070 case 4:
7071 return 8u;
7072 default:
7073 llvm_unreachable("invalid format value");
7074 }
7075 };
7076
7077 auto isValidSrcASrcBVector = [](FixedVectorType *Ty) {
7078 if (!Ty || !Ty->getElementType()->isIntegerTy(32))
7079 return false;
7080 unsigned NumElts = Ty->getNumElements();
7081 return NumElts == 16 || NumElts == 12 || NumElts == 8;
7082 };
7083
7084 auto *Src0Ty = dyn_cast<FixedVectorType>(Src0->getType());
7085 auto *Src1Ty = dyn_cast<FixedVectorType>(Src1->getType());
7086 Check(isValidSrcASrcBVector(Src0Ty),
7087 "operand 1 must be 8, 12 or 16 element i32 vector", &Call, Src0);
7088 Check(isValidSrcASrcBVector(Src1Ty),
7089 "operand 3 must be 8, 12 or 16 element i32 vector", &Call, Src1);
7090
7091 // Permit excess registers for the format.
7092 Check(Src0Ty->getNumElements() >= getFormatNumRegs(FmtA),
7093 "invalid vector type for format", &Call, Src0, Call.getArgOperand(0));
7094 Check(Src1Ty->getNumElements() >= getFormatNumRegs(FmtB),
7095 "invalid vector type for format", &Call, Src1, Call.getArgOperand(2));
7096 break;
7097 }
7098 case Intrinsic::amdgcn_cooperative_atomic_load_32x4B:
7099 case Intrinsic::amdgcn_cooperative_atomic_load_16x8B:
7100 case Intrinsic::amdgcn_cooperative_atomic_load_8x16B:
7101 case Intrinsic::amdgcn_cooperative_atomic_store_32x4B:
7102 case Intrinsic::amdgcn_cooperative_atomic_store_16x8B:
7103 case Intrinsic::amdgcn_cooperative_atomic_store_8x16B: {
7104 // Check we only use this intrinsic on the FLAT or GLOBAL address spaces.
7105 Value *PtrArg = Call.getArgOperand(0);
7106 const unsigned AS = PtrArg->getType()->getPointerAddressSpace();
7108 "cooperative atomic intrinsics require a generic or global pointer",
7109 &Call, PtrArg);
7110
7111 // Last argument must be a MD string
7113 MDNode *MD = cast<MDNode>(Op->getMetadata());
7114 Check((MD->getNumOperands() == 1) && isa<MDString>(MD->getOperand(0)),
7115 "cooperative atomic intrinsics require that the last argument is a "
7116 "metadata string",
7117 &Call, Op);
7118 break;
7119 }
7120 case Intrinsic::nvvm_setmaxnreg_inc_sync_aligned_u32:
7121 case Intrinsic::nvvm_setmaxnreg_dec_sync_aligned_u32: {
7122 Value *V = Call.getArgOperand(0);
7123 unsigned RegCount = cast<ConstantInt>(V)->getZExtValue();
7124 Check(RegCount % 8 == 0,
7125 "reg_count argument to nvvm.setmaxnreg must be in multiples of 8");
7126 break;
7127 }
7128 case Intrinsic::experimental_convergence_entry:
7129 case Intrinsic::experimental_convergence_anchor:
7130 break;
7131 case Intrinsic::experimental_convergence_loop:
7132 break;
7133 case Intrinsic::ptrmask: {
7134 Type *Ty0 = Call.getArgOperand(0)->getType();
7135 Type *Ty1 = Call.getArgOperand(1)->getType();
7137 "llvm.ptrmask intrinsic first argument must be pointer or vector "
7138 "of pointers",
7139 &Call);
7140 Check(
7141 Ty0->isVectorTy() == Ty1->isVectorTy(),
7142 "llvm.ptrmask intrinsic arguments must be both scalars or both vectors",
7143 &Call);
7144 if (Ty0->isVectorTy())
7145 Check(cast<VectorType>(Ty0)->getElementCount() ==
7146 cast<VectorType>(Ty1)->getElementCount(),
7147 "llvm.ptrmask intrinsic arguments must have the same number of "
7148 "elements",
7149 &Call);
7150 Check(DL.getIndexTypeSizeInBits(Ty0) == Ty1->getScalarSizeInBits(),
7151 "llvm.ptrmask intrinsic second argument bitwidth must match "
7152 "pointer index type size of first argument",
7153 &Call);
7154 break;
7155 }
7156 case Intrinsic::thread_pointer: {
7158 DL.getDefaultGlobalsAddressSpace(),
7159 "llvm.thread.pointer intrinsic return type must be for the globals "
7160 "address space",
7161 &Call);
7162 break;
7163 }
7164 case Intrinsic::threadlocal_address: {
7165 const Value &Arg0 = *Call.getArgOperand(0);
7166 Check(isa<GlobalValue>(Arg0),
7167 "llvm.threadlocal.address first argument must be a GlobalValue");
7168 Check(cast<GlobalValue>(Arg0).isThreadLocal(),
7169 "llvm.threadlocal.address operand isThreadLocal() must be true");
7170 break;
7171 }
7172 case Intrinsic::lifetime_start:
7173 case Intrinsic::lifetime_end: {
7174 Value *Ptr = Call.getArgOperand(0);
7176 "llvm.lifetime.start/end can only be used on alloca or poison",
7177 &Call);
7178 break;
7179 }
7180 };
7181
7182 // Verify that there aren't any unmediated control transfers between funclets.
7184 Function *F = Call.getParent()->getParent();
7185 if (F->hasPersonalityFn() &&
7186 isScopedEHPersonality(classifyEHPersonality(F->getPersonalityFn()))) {
7187 // Run EH funclet coloring on-demand and cache results for other intrinsic
7188 // calls in this function
7189 if (BlockEHFuncletColors.empty())
7190 BlockEHFuncletColors = colorEHFunclets(*F);
7191
7192 // Check for catch-/cleanup-pad in first funclet block
7193 bool InEHFunclet = false;
7194 BasicBlock *CallBB = Call.getParent();
7195 const ColorVector &CV = BlockEHFuncletColors.find(CallBB)->second;
7196 assert(CV.size() > 0 && "Uncolored block");
7197 for (BasicBlock *ColorFirstBB : CV)
7198 if (auto It = ColorFirstBB->getFirstNonPHIIt();
7199 It != ColorFirstBB->end())
7201 InEHFunclet = true;
7202
7203 // Check for funclet operand bundle
7204 bool HasToken = false;
7205 for (unsigned I = 0, E = Call.getNumOperandBundles(); I != E; ++I)
7207 HasToken = true;
7208
7209 // This would cause silent code truncation in WinEHPrepare
7210 if (InEHFunclet)
7211 Check(HasToken, "Missing funclet token on intrinsic call", &Call);
7212 }
7213 }
7214}
7215
7216/// Carefully grab the subprogram from a local scope.
7217///
7218/// This carefully grabs the subprogram from a local scope, avoiding the
7219/// built-in assertions that would typically fire.
7221 if (!LocalScope)
7222 return nullptr;
7223
7224 if (auto *SP = dyn_cast<DISubprogram>(LocalScope))
7225 return SP;
7226
7227 if (auto *LB = dyn_cast<DILexicalBlockBase>(LocalScope))
7228 return getSubprogram(LB->getRawScope());
7229
7230 // Just return null; broken scope chains are checked elsewhere.
7231 assert(!isa<DILocalScope>(LocalScope) && "Unknown type of local scope");
7232 return nullptr;
7233}
7234
7235void Verifier::visit(DbgLabelRecord &DLR) {
7237 "invalid #dbg_label intrinsic variable", &DLR, DLR.getRawLabel());
7238
7239 // Ignore broken !dbg attachments; they're checked elsewhere.
7240 if (MDNode *N = DLR.getDebugLoc().getAsMDNode())
7241 if (!isa<DILocation>(N))
7242 return;
7243
7244 BasicBlock *BB = DLR.getParent();
7245 Function *F = BB ? BB->getParent() : nullptr;
7246
7247 // The scopes for variables and !dbg attachments must agree.
7248 DILabel *Label = DLR.getLabel();
7249 DILocation *Loc = DLR.getDebugLoc();
7250 CheckDI(Loc, "#dbg_label record requires a !dbg attachment", &DLR, BB, F);
7251
7252 DISubprogram *LabelSP = getSubprogram(Label->getRawScope());
7253 DISubprogram *LocSP = getSubprogram(Loc->getRawScope());
7254 if (!LabelSP || !LocSP)
7255 return;
7256
7257 CheckDI(LabelSP == LocSP,
7258 "mismatched subprogram between #dbg_label label and !dbg attachment",
7259 &DLR, BB, F, Label, Label->getScope()->getSubprogram(), Loc,
7260 Loc->getScope()->getSubprogram());
7261}
7262
7263void Verifier::visit(DbgVariableRecord &DVR) {
7264 BasicBlock *BB = DVR.getParent();
7265 Function *F = BB->getParent();
7266
7267 CheckDI(DVR.getType() == DbgVariableRecord::LocationType::Value ||
7268 DVR.getType() == DbgVariableRecord::LocationType::Declare ||
7269 DVR.getType() == DbgVariableRecord::LocationType::DeclareValue ||
7270 DVR.getType() == DbgVariableRecord::LocationType::Assign,
7271 "invalid #dbg record type", &DVR, DVR.getType(), BB, F);
7272
7273 // The location for a DbgVariableRecord must be either a ValueAsMetadata,
7274 // DIArgList, or an empty MDNode (which is a legacy representation for an
7275 // "undef" location).
7276 auto *MD = DVR.getRawLocation();
7277 CheckDI(MD && (isa<ValueAsMetadata>(MD) || isa<DIArgList>(MD) ||
7278 (isa<MDNode>(MD) && !cast<MDNode>(MD)->getNumOperands())),
7279 "invalid #dbg record address/value", &DVR, MD, BB, F);
7280 if (auto *VAM = dyn_cast<ValueAsMetadata>(MD)) {
7281 visitValueAsMetadata(*VAM, F);
7282 if (DVR.isDbgDeclare()) {
7283 // Allow integers here to support inttoptr salvage.
7284 Type *Ty = VAM->getValue()->getType();
7285 CheckDI(Ty->isPointerTy() || Ty->isIntegerTy(),
7286 "location of #dbg_declare must be a pointer or int", &DVR, MD, BB,
7287 F);
7288 }
7289 } else if (auto *AL = dyn_cast<DIArgList>(MD)) {
7290 visitDIArgList(*AL, F);
7291 }
7292
7294 "invalid #dbg record variable", &DVR, DVR.getRawVariable(), BB, F);
7295 visitMDNode(*DVR.getRawVariable(), AreDebugLocsAllowed::No);
7296
7298 "invalid #dbg record expression", &DVR, DVR.getRawExpression(), BB,
7299 F);
7300 visitMDNode(*DVR.getExpression(), AreDebugLocsAllowed::No);
7301
7302 if (DVR.isDbgAssign()) {
7304 "invalid #dbg_assign DIAssignID", &DVR, DVR.getRawAssignID(), BB,
7305 F);
7306 visitMDNode(*cast<DIAssignID>(DVR.getRawAssignID()),
7307 AreDebugLocsAllowed::No);
7308
7309 const auto *RawAddr = DVR.getRawAddress();
7310 // Similarly to the location above, the address for an assign
7311 // DbgVariableRecord must be a ValueAsMetadata or an empty MDNode, which
7312 // represents an undef address.
7313 CheckDI(
7314 isa<ValueAsMetadata>(RawAddr) ||
7315 (isa<MDNode>(RawAddr) && !cast<MDNode>(RawAddr)->getNumOperands()),
7316 "invalid #dbg_assign address", &DVR, DVR.getRawAddress(), BB, F);
7317 if (auto *VAM = dyn_cast<ValueAsMetadata>(RawAddr))
7318 visitValueAsMetadata(*VAM, F);
7319
7321 "invalid #dbg_assign address expression", &DVR,
7322 DVR.getRawAddressExpression(), BB, F);
7323 visitMDNode(*DVR.getAddressExpression(), AreDebugLocsAllowed::No);
7324
7325 // All of the linked instructions should be in the same function as DVR.
7326 for (Instruction *I : at::getAssignmentInsts(&DVR))
7327 CheckDI(DVR.getFunction() == I->getFunction(),
7328 "inst not in same function as #dbg_assign", I, &DVR, BB, F);
7329 }
7330
7331 // This check is redundant with one in visitLocalVariable().
7332 DILocalVariable *Var = DVR.getVariable();
7333 CheckDI(isType(Var->getRawType()), "invalid type ref", Var, Var->getRawType(),
7334 BB, F);
7335
7336 auto *DLNode = DVR.getDebugLoc().getAsMDNode();
7337 CheckDI(isa_and_nonnull<DILocation>(DLNode), "invalid #dbg record DILocation",
7338 &DVR, DLNode, BB, F);
7339 DILocation *Loc = DVR.getDebugLoc();
7340
7341 // The scopes for variables and !dbg attachments must agree.
7342 DISubprogram *VarSP = getSubprogram(Var->getRawScope());
7343 DISubprogram *LocSP = getSubprogram(Loc->getRawScope());
7344 if (!VarSP || !LocSP)
7345 return; // Broken scope chains are checked elsewhere.
7346
7347 CheckDI(VarSP == LocSP,
7348 "mismatched subprogram between #dbg record variable and DILocation",
7349 &DVR, BB, F, Var, Var->getScope()->getSubprogram(), Loc,
7350 Loc->getScope()->getSubprogram(), BB, F);
7351
7352 verifyFnArgs(DVR);
7353}
7354
7355void Verifier::visitVPIntrinsic(VPIntrinsic &VPI) {
7356 if (auto *VPCast = dyn_cast<VPCastIntrinsic>(&VPI)) {
7357 auto *RetTy = cast<VectorType>(VPCast->getType());
7358 auto *ValTy = cast<VectorType>(VPCast->getOperand(0)->getType());
7359 Check(RetTy->getElementCount() == ValTy->getElementCount(),
7360 "VP cast intrinsic first argument and result vector lengths must be "
7361 "equal",
7362 *VPCast);
7363
7364 switch (VPCast->getIntrinsicID()) {
7365 default:
7366 llvm_unreachable("Unknown VP cast intrinsic");
7367 case Intrinsic::vp_trunc:
7368 Check(RetTy->isIntOrIntVectorTy() && ValTy->isIntOrIntVectorTy(),
7369 "llvm.vp.trunc intrinsic first argument and result element type "
7370 "must be integer",
7371 *VPCast);
7372 Check(RetTy->getScalarSizeInBits() < ValTy->getScalarSizeInBits(),
7373 "llvm.vp.trunc intrinsic the bit size of first argument must be "
7374 "larger than the bit size of the return type",
7375 *VPCast);
7376 break;
7377 case Intrinsic::vp_zext:
7378 case Intrinsic::vp_sext:
7379 Check(RetTy->isIntOrIntVectorTy() && ValTy->isIntOrIntVectorTy(),
7380 "llvm.vp.zext or llvm.vp.sext intrinsic first argument and result "
7381 "element type must be integer",
7382 *VPCast);
7383 Check(RetTy->getScalarSizeInBits() > ValTy->getScalarSizeInBits(),
7384 "llvm.vp.zext or llvm.vp.sext intrinsic the bit size of first "
7385 "argument must be smaller than the bit size of the return type",
7386 *VPCast);
7387 break;
7388 case Intrinsic::vp_fptoui:
7389 case Intrinsic::vp_fptosi:
7390 case Intrinsic::vp_lrint:
7391 case Intrinsic::vp_llrint:
7392 Check(
7393 RetTy->isIntOrIntVectorTy() && ValTy->isFPOrFPVectorTy(),
7394 "llvm.vp.fptoui, llvm.vp.fptosi, llvm.vp.lrint or llvm.vp.llrint" "intrinsic first argument element "
7395 "type must be floating-point and result element type must be integer",
7396 *VPCast);
7397 break;
7398 case Intrinsic::vp_uitofp:
7399 case Intrinsic::vp_sitofp:
7400 Check(
7401 RetTy->isFPOrFPVectorTy() && ValTy->isIntOrIntVectorTy(),
7402 "llvm.vp.uitofp or llvm.vp.sitofp intrinsic first argument element "
7403 "type must be integer and result element type must be floating-point",
7404 *VPCast);
7405 break;
7406 case Intrinsic::vp_fptrunc:
7407 Check(RetTy->isFPOrFPVectorTy() && ValTy->isFPOrFPVectorTy(),
7408 "llvm.vp.fptrunc intrinsic first argument and result element type "
7409 "must be floating-point",
7410 *VPCast);
7411 Check(RetTy->getScalarSizeInBits() < ValTy->getScalarSizeInBits(),
7412 "llvm.vp.fptrunc intrinsic the bit size of first argument must be "
7413 "larger than the bit size of the return type",
7414 *VPCast);
7415 break;
7416 case Intrinsic::vp_fpext:
7417 Check(RetTy->isFPOrFPVectorTy() && ValTy->isFPOrFPVectorTy(),
7418 "llvm.vp.fpext intrinsic first argument and result element type "
7419 "must be floating-point",
7420 *VPCast);
7421 Check(RetTy->getScalarSizeInBits() > ValTy->getScalarSizeInBits(),
7422 "llvm.vp.fpext intrinsic the bit size of first argument must be "
7423 "smaller than the bit size of the return type",
7424 *VPCast);
7425 break;
7426 case Intrinsic::vp_ptrtoint:
7427 Check(RetTy->isIntOrIntVectorTy() && ValTy->isPtrOrPtrVectorTy(),
7428 "llvm.vp.ptrtoint intrinsic first argument element type must be "
7429 "pointer and result element type must be integer",
7430 *VPCast);
7431 break;
7432 case Intrinsic::vp_inttoptr:
7433 Check(RetTy->isPtrOrPtrVectorTy() && ValTy->isIntOrIntVectorTy(),
7434 "llvm.vp.inttoptr intrinsic first argument element type must be "
7435 "integer and result element type must be pointer",
7436 *VPCast);
7437 break;
7438 }
7439 }
7440
7441 switch (VPI.getIntrinsicID()) {
7442 case Intrinsic::vp_fcmp: {
7443 auto Pred = cast<VPCmpIntrinsic>(&VPI)->getPredicate();
7445 "invalid predicate for VP FP comparison intrinsic", &VPI);
7446 break;
7447 }
7448 case Intrinsic::vp_icmp: {
7449 auto Pred = cast<VPCmpIntrinsic>(&VPI)->getPredicate();
7451 "invalid predicate for VP integer comparison intrinsic", &VPI);
7452 break;
7453 }
7454 case Intrinsic::vp_is_fpclass: {
7455 auto TestMask = cast<ConstantInt>(VPI.getOperand(1));
7456 Check((TestMask->getZExtValue() & ~static_cast<unsigned>(fcAllFlags)) == 0,
7457 "unsupported bits for llvm.vp.is.fpclass test mask");
7458 break;
7459 }
7460 case Intrinsic::experimental_vp_splice: {
7461 VectorType *VecTy = cast<VectorType>(VPI.getType());
7462 int64_t Idx = cast<ConstantInt>(VPI.getArgOperand(2))->getSExtValue();
7463 int64_t KnownMinNumElements = VecTy->getElementCount().getKnownMinValue();
7464 if (VPI.getParent() && VPI.getParent()->getParent()) {
7465 AttributeList Attrs = VPI.getParent()->getParent()->getAttributes();
7466 if (Attrs.hasFnAttr(Attribute::VScaleRange))
7467 KnownMinNumElements *= Attrs.getFnAttrs().getVScaleRangeMin();
7468 }
7469 Check((Idx < 0 && std::abs(Idx) <= KnownMinNumElements) ||
7470 (Idx >= 0 && Idx < KnownMinNumElements),
7471 "The splice index exceeds the range [-VL, VL-1] where VL is the "
7472 "known minimum number of elements in the vector. For scalable "
7473 "vectors the minimum number of elements is determined from "
7474 "vscale_range.",
7475 &VPI);
7476 break;
7477 }
7478 }
7479}
7480
7481void Verifier::visitConstrainedFPIntrinsic(ConstrainedFPIntrinsic &FPI) {
7482 unsigned NumOperands = FPI.getNonMetadataArgCount();
7483 bool HasRoundingMD =
7485
7486 // Add the expected number of metadata operands.
7487 NumOperands += (1 + HasRoundingMD);
7488
7489 // Compare intrinsics carry an extra predicate metadata operand.
7491 NumOperands += 1;
7492 Check((FPI.arg_size() == NumOperands),
7493 "invalid arguments for constrained FP intrinsic", &FPI);
7494
7495 switch (FPI.getIntrinsicID()) {
7496 case Intrinsic::experimental_constrained_lrint:
7497 case Intrinsic::experimental_constrained_llrint: {
7498 Type *ValTy = FPI.getArgOperand(0)->getType();
7499 Type *ResultTy = FPI.getType();
7500 Check(!ValTy->isVectorTy() && !ResultTy->isVectorTy(),
7501 "Intrinsic does not support vectors", &FPI);
7502 break;
7503 }
7504
7505 case Intrinsic::experimental_constrained_lround:
7506 case Intrinsic::experimental_constrained_llround: {
7507 Type *ValTy = FPI.getArgOperand(0)->getType();
7508 Type *ResultTy = FPI.getType();
7509 Check(!ValTy->isVectorTy() && !ResultTy->isVectorTy(),
7510 "Intrinsic does not support vectors", &FPI);
7511 break;
7512 }
7513
7514 case Intrinsic::experimental_constrained_fcmp:
7515 case Intrinsic::experimental_constrained_fcmps: {
7516 auto Pred = cast<ConstrainedFPCmpIntrinsic>(&FPI)->getPredicate();
7518 "invalid predicate for constrained FP comparison intrinsic", &FPI);
7519 break;
7520 }
7521
7522 case Intrinsic::experimental_constrained_fptosi:
7523 case Intrinsic::experimental_constrained_fptoui: {
7524 Value *Operand = FPI.getArgOperand(0);
7525 ElementCount SrcEC;
7526 Check(Operand->getType()->isFPOrFPVectorTy(),
7527 "Intrinsic first argument must be floating point", &FPI);
7528 if (auto *OperandT = dyn_cast<VectorType>(Operand->getType())) {
7529 SrcEC = cast<VectorType>(OperandT)->getElementCount();
7530 }
7531
7532 Operand = &FPI;
7533 Check(SrcEC.isNonZero() == Operand->getType()->isVectorTy(),
7534 "Intrinsic first argument and result disagree on vector use", &FPI);
7535 Check(Operand->getType()->isIntOrIntVectorTy(),
7536 "Intrinsic result must be an integer", &FPI);
7537 if (auto *OperandT = dyn_cast<VectorType>(Operand->getType())) {
7538 Check(SrcEC == cast<VectorType>(OperandT)->getElementCount(),
7539 "Intrinsic first argument and result vector lengths must be equal",
7540 &FPI);
7541 }
7542 break;
7543 }
7544
7545 case Intrinsic::experimental_constrained_sitofp:
7546 case Intrinsic::experimental_constrained_uitofp: {
7547 Value *Operand = FPI.getArgOperand(0);
7548 ElementCount SrcEC;
7549 Check(Operand->getType()->isIntOrIntVectorTy(),
7550 "Intrinsic first argument must be integer", &FPI);
7551 if (auto *OperandT = dyn_cast<VectorType>(Operand->getType())) {
7552 SrcEC = cast<VectorType>(OperandT)->getElementCount();
7553 }
7554
7555 Operand = &FPI;
7556 Check(SrcEC.isNonZero() == Operand->getType()->isVectorTy(),
7557 "Intrinsic first argument and result disagree on vector use", &FPI);
7558 Check(Operand->getType()->isFPOrFPVectorTy(),
7559 "Intrinsic result must be a floating point", &FPI);
7560 if (auto *OperandT = dyn_cast<VectorType>(Operand->getType())) {
7561 Check(SrcEC == cast<VectorType>(OperandT)->getElementCount(),
7562 "Intrinsic first argument and result vector lengths must be equal",
7563 &FPI);
7564 }
7565 break;
7566 }
7567
7568 case Intrinsic::experimental_constrained_fptrunc:
7569 case Intrinsic::experimental_constrained_fpext: {
7570 Value *Operand = FPI.getArgOperand(0);
7571 Type *OperandTy = Operand->getType();
7572 Value *Result = &FPI;
7573 Type *ResultTy = Result->getType();
7574 Check(OperandTy->isFPOrFPVectorTy(),
7575 "Intrinsic first argument must be FP or FP vector", &FPI);
7576 Check(ResultTy->isFPOrFPVectorTy(),
7577 "Intrinsic result must be FP or FP vector", &FPI);
7578 Check(OperandTy->isVectorTy() == ResultTy->isVectorTy(),
7579 "Intrinsic first argument and result disagree on vector use", &FPI);
7580 if (OperandTy->isVectorTy()) {
7581 Check(cast<VectorType>(OperandTy)->getElementCount() ==
7582 cast<VectorType>(ResultTy)->getElementCount(),
7583 "Intrinsic first argument and result vector lengths must be equal",
7584 &FPI);
7585 }
7586 if (FPI.getIntrinsicID() == Intrinsic::experimental_constrained_fptrunc) {
7587 Check(OperandTy->getScalarSizeInBits() > ResultTy->getScalarSizeInBits(),
7588 "Intrinsic first argument's type must be larger than result type",
7589 &FPI);
7590 } else {
7591 Check(OperandTy->getScalarSizeInBits() < ResultTy->getScalarSizeInBits(),
7592 "Intrinsic first argument's type must be smaller than result type",
7593 &FPI);
7594 }
7595 break;
7596 }
7597
7598 default:
7599 break;
7600 }
7601
7602 // If a non-metadata argument is passed in a metadata slot then the
7603 // error will be caught earlier when the incorrect argument doesn't
7604 // match the specification in the intrinsic call table. Thus, no
7605 // argument type check is needed here.
7606
7607 Check(FPI.getExceptionBehavior().has_value(),
7608 "invalid exception behavior argument", &FPI);
7609 if (HasRoundingMD) {
7610 Check(FPI.getRoundingMode().has_value(), "invalid rounding mode argument",
7611 &FPI);
7612 }
7613}
7614
7615void Verifier::verifyFragmentExpression(const DbgVariableRecord &DVR) {
7616 DILocalVariable *V = dyn_cast_or_null<DILocalVariable>(DVR.getRawVariable());
7617 DIExpression *E = dyn_cast_or_null<DIExpression>(DVR.getRawExpression());
7618
7619 // We don't know whether this intrinsic verified correctly.
7620 if (!V || !E || !E->isValid())
7621 return;
7622
7623 // Nothing to do if this isn't a DW_OP_LLVM_fragment expression.
7624 auto Fragment = E->getFragmentInfo();
7625 if (!Fragment)
7626 return;
7627
7628 // The frontend helps out GDB by emitting the members of local anonymous
7629 // unions as artificial local variables with shared storage. When SROA splits
7630 // the storage for artificial local variables that are smaller than the entire
7631 // union, the overhang piece will be outside of the allotted space for the
7632 // variable and this check fails.
7633 // FIXME: Remove this check as soon as clang stops doing this; it hides bugs.
7634 if (V->isArtificial())
7635 return;
7636
7637 verifyFragmentExpression(*V, *Fragment, &DVR);
7638}
7639
7640template <typename ValueOrMetadata>
7641void Verifier::verifyFragmentExpression(const DIVariable &V,
7643 ValueOrMetadata *Desc) {
7644 // If there's no size, the type is broken, but that should be checked
7645 // elsewhere.
7646 auto VarSize = V.getSizeInBits();
7647 if (!VarSize)
7648 return;
7649
7650 unsigned FragSize = Fragment.SizeInBits;
7651 unsigned FragOffset = Fragment.OffsetInBits;
7652 CheckDI(FragSize + FragOffset <= *VarSize,
7653 "fragment is larger than or outside of variable", Desc, &V);
7654 CheckDI(FragSize != *VarSize, "fragment covers entire variable", Desc, &V);
7655}
7656
7657void Verifier::verifyFnArgs(const DbgVariableRecord &DVR) {
7658 // This function does not take the scope of noninlined function arguments into
7659 // account. Don't run it if current function is nodebug, because it may
7660 // contain inlined debug intrinsics.
7661 if (!HasDebugInfo)
7662 return;
7663
7664 // For performance reasons only check non-inlined ones.
7665 if (DVR.getDebugLoc()->getInlinedAt())
7666 return;
7667
7668 DILocalVariable *Var = DVR.getVariable();
7669 CheckDI(Var, "#dbg record without variable");
7670
7671 unsigned ArgNo = Var->getArg();
7672 if (!ArgNo)
7673 return;
7674
7675 // Verify there are no duplicate function argument debug info entries.
7676 // These will cause hard-to-debug assertions in the DWARF backend.
7677 if (DebugFnArgs.size() < ArgNo)
7678 DebugFnArgs.resize(ArgNo, nullptr);
7679
7680 auto *Prev = DebugFnArgs[ArgNo - 1];
7681 DebugFnArgs[ArgNo - 1] = Var;
7682 CheckDI(!Prev || (Prev == Var), "conflicting debug info for argument", &DVR,
7683 Prev, Var);
7684}
7685
7686void Verifier::verifyNotEntryValue(const DbgVariableRecord &DVR) {
7687 DIExpression *E = dyn_cast_or_null<DIExpression>(DVR.getRawExpression());
7688
7689 // We don't know whether this intrinsic verified correctly.
7690 if (!E || !E->isValid())
7691 return;
7692
7694 Value *VarValue = DVR.getVariableLocationOp(0);
7695 if (isa<UndefValue>(VarValue) || isa<PoisonValue>(VarValue))
7696 return;
7697 // We allow EntryValues for swift async arguments, as they have an
7698 // ABI-guarantee to be turned into a specific register.
7699 if (auto *ArgLoc = dyn_cast_or_null<Argument>(VarValue);
7700 ArgLoc && ArgLoc->hasAttribute(Attribute::SwiftAsync))
7701 return;
7702 }
7703
7704 CheckDI(!E->isEntryValue(),
7705 "Entry values are only allowed in MIR unless they target a "
7706 "swiftasync Argument",
7707 &DVR);
7708}
7709
7710void Verifier::verifyCompileUnits() {
7711 // When more than one Module is imported into the same context, such as during
7712 // an LTO build before linking the modules, ODR type uniquing may cause types
7713 // to point to a different CU. This check does not make sense in this case.
7714 if (M.getContext().isODRUniquingDebugTypes())
7715 return;
7716 auto *CUs = M.getNamedMetadata("llvm.dbg.cu");
7717 SmallPtrSet<const Metadata *, 2> Listed;
7718 if (CUs)
7719 Listed.insert_range(CUs->operands());
7720 for (const auto *CU : CUVisited)
7721 CheckDI(Listed.count(CU), "DICompileUnit not listed in llvm.dbg.cu", CU);
7722 CUVisited.clear();
7723}
7724
7725void Verifier::verifyDeoptimizeCallingConvs() {
7726 if (DeoptimizeDeclarations.empty())
7727 return;
7728
7729 const Function *First = DeoptimizeDeclarations[0];
7730 for (const auto *F : ArrayRef(DeoptimizeDeclarations).slice(1)) {
7731 Check(First->getCallingConv() == F->getCallingConv(),
7732 "All llvm.experimental.deoptimize declarations must have the same "
7733 "calling convention",
7734 First, F);
7735 }
7736}
7737
7738void Verifier::verifyAttachedCallBundle(const CallBase &Call,
7739 const OperandBundleUse &BU) {
7740 FunctionType *FTy = Call.getFunctionType();
7741
7742 Check((FTy->getReturnType()->isPointerTy() ||
7743 (Call.doesNotReturn() && FTy->getReturnType()->isVoidTy())),
7744 "a call with operand bundle \"clang.arc.attachedcall\" must call a "
7745 "function returning a pointer or a non-returning function that has a "
7746 "void return type",
7747 Call);
7748
7749 Check(BU.Inputs.size() == 1 && isa<Function>(BU.Inputs.front()),
7750 "operand bundle \"clang.arc.attachedcall\" requires one function as "
7751 "an argument",
7752 Call);
7753
7754 auto *Fn = cast<Function>(BU.Inputs.front());
7755 Intrinsic::ID IID = Fn->getIntrinsicID();
7756
7757 if (IID) {
7758 Check((IID == Intrinsic::objc_retainAutoreleasedReturnValue ||
7759 IID == Intrinsic::objc_claimAutoreleasedReturnValue ||
7760 IID == Intrinsic::objc_unsafeClaimAutoreleasedReturnValue),
7761 "invalid function argument", Call);
7762 } else {
7763 StringRef FnName = Fn->getName();
7764 Check((FnName == "objc_retainAutoreleasedReturnValue" ||
7765 FnName == "objc_claimAutoreleasedReturnValue" ||
7766 FnName == "objc_unsafeClaimAutoreleasedReturnValue"),
7767 "invalid function argument", Call);
7768 }
7769}
7770
7771void Verifier::verifyNoAliasScopeDecl() {
7772 if (NoAliasScopeDecls.empty())
7773 return;
7774
7775 // only a single scope must be declared at a time.
7776 for (auto *II : NoAliasScopeDecls) {
7777 assert(II->getIntrinsicID() == Intrinsic::experimental_noalias_scope_decl &&
7778 "Not a llvm.experimental.noalias.scope.decl ?");
7779 const auto *ScopeListMV = dyn_cast<MetadataAsValue>(
7781 Check(ScopeListMV != nullptr,
7782 "llvm.experimental.noalias.scope.decl must have a MetadataAsValue "
7783 "argument",
7784 II);
7785
7786 const auto *ScopeListMD = dyn_cast<MDNode>(ScopeListMV->getMetadata());
7787 Check(ScopeListMD != nullptr, "!id.scope.list must point to an MDNode", II);
7788 Check(ScopeListMD->getNumOperands() == 1,
7789 "!id.scope.list must point to a list with a single scope", II);
7790 visitAliasScopeListMetadata(ScopeListMD);
7791 }
7792
7793 // Only check the domination rule when requested. Once all passes have been
7794 // adapted this option can go away.
7796 return;
7797
7798 // Now sort the intrinsics based on the scope MDNode so that declarations of
7799 // the same scopes are next to each other.
7800 auto GetScope = [](IntrinsicInst *II) {
7801 const auto *ScopeListMV = cast<MetadataAsValue>(
7803 return &cast<MDNode>(ScopeListMV->getMetadata())->getOperand(0);
7804 };
7805
7806 // We are sorting on MDNode pointers here. For valid input IR this is ok.
7807 // TODO: Sort on Metadata ID to avoid non-deterministic error messages.
7808 auto Compare = [GetScope](IntrinsicInst *Lhs, IntrinsicInst *Rhs) {
7809 return GetScope(Lhs) < GetScope(Rhs);
7810 };
7811
7812 llvm::sort(NoAliasScopeDecls, Compare);
7813
7814 // Go over the intrinsics and check that for the same scope, they are not
7815 // dominating each other.
7816 auto ItCurrent = NoAliasScopeDecls.begin();
7817 while (ItCurrent != NoAliasScopeDecls.end()) {
7818 auto CurScope = GetScope(*ItCurrent);
7819 auto ItNext = ItCurrent;
7820 do {
7821 ++ItNext;
7822 } while (ItNext != NoAliasScopeDecls.end() &&
7823 GetScope(*ItNext) == CurScope);
7824
7825 // [ItCurrent, ItNext) represents the declarations for the same scope.
7826 // Ensure they are not dominating each other.. but only if it is not too
7827 // expensive.
7828 if (ItNext - ItCurrent < 32)
7829 for (auto *I : llvm::make_range(ItCurrent, ItNext))
7830 for (auto *J : llvm::make_range(ItCurrent, ItNext))
7831 if (I != J)
7832 Check(!DT.dominates(I, J),
7833 "llvm.experimental.noalias.scope.decl dominates another one "
7834 "with the same scope",
7835 I);
7836 ItCurrent = ItNext;
7837 }
7838}
7839
7840//===----------------------------------------------------------------------===//
7841// Implement the public interfaces to this file...
7842//===----------------------------------------------------------------------===//
7843
7845 Function &F = const_cast<Function &>(f);
7846
7847 // Don't use a raw_null_ostream. Printing IR is expensive.
7848 Verifier V(OS, /*ShouldTreatBrokenDebugInfoAsError=*/true, *f.getParent());
7849
7850 // Note that this function's return value is inverted from what you would
7851 // expect of a function called "verify".
7852 return !V.verify(F);
7853}
7854
7856 bool *BrokenDebugInfo) {
7857 // Don't use a raw_null_ostream. Printing IR is expensive.
7858 Verifier V(OS, /*ShouldTreatBrokenDebugInfoAsError=*/!BrokenDebugInfo, M);
7859
7860 bool Broken = false;
7861 for (const Function &F : M)
7862 Broken |= !V.verify(F);
7863
7864 Broken |= !V.verify();
7865 if (BrokenDebugInfo)
7866 *BrokenDebugInfo = V.hasBrokenDebugInfo();
7867 // Note that this function's return value is inverted from what you would
7868 // expect of a function called "verify".
7869 return Broken;
7870}
7871
7872namespace {
7873
7874struct VerifierLegacyPass : public FunctionPass {
7875 static char ID;
7876
7877 std::unique_ptr<Verifier> V;
7878 bool FatalErrors = true;
7879
7880 VerifierLegacyPass() : FunctionPass(ID) {
7882 }
7883 explicit VerifierLegacyPass(bool FatalErrors)
7884 : FunctionPass(ID),
7885 FatalErrors(FatalErrors) {
7887 }
7888
7889 bool doInitialization(Module &M) override {
7890 V = std::make_unique<Verifier>(
7891 &dbgs(), /*ShouldTreatBrokenDebugInfoAsError=*/false, M);
7892 return false;
7893 }
7894
7895 bool runOnFunction(Function &F) override {
7896 if (!V->verify(F) && FatalErrors) {
7897 errs() << "in function " << F.getName() << '\n';
7898 report_fatal_error("Broken function found, compilation aborted!");
7899 }
7900 return false;
7901 }
7902
7903 bool doFinalization(Module &M) override {
7904 bool HasErrors = false;
7905 for (Function &F : M)
7906 if (F.isDeclaration())
7907 HasErrors |= !V->verify(F);
7908
7909 HasErrors |= !V->verify();
7910 if (FatalErrors && (HasErrors || V->hasBrokenDebugInfo()))
7911 report_fatal_error("Broken module found, compilation aborted!");
7912 return false;
7913 }
7914
7915 void getAnalysisUsage(AnalysisUsage &AU) const override {
7916 AU.setPreservesAll();
7917 }
7918};
7919
7920} // end anonymous namespace
7921
7922/// Helper to issue failure from the TBAA verification
7923template <typename... Tys> void TBAAVerifier::CheckFailed(Tys &&... Args) {
7924 if (Diagnostic)
7925 return Diagnostic->CheckFailed(Args...);
7926}
7927
7928#define CheckTBAA(C, ...) \
7929 do { \
7930 if (!(C)) { \
7931 CheckFailed(__VA_ARGS__); \
7932 return false; \
7933 } \
7934 } while (false)
7935
7936/// Verify that \p BaseNode can be used as the "base type" in the struct-path
7937/// TBAA scheme. This means \p BaseNode is either a scalar node, or a
7938/// struct-type node describing an aggregate data structure (like a struct).
7939TBAAVerifier::TBAABaseNodeSummary
7940TBAAVerifier::verifyTBAABaseNode(const Instruction *I, const MDNode *BaseNode,
7941 bool IsNewFormat) {
7942 if (BaseNode->getNumOperands() < 2) {
7943 CheckFailed("Base nodes must have at least two operands", I, BaseNode);
7944 return {true, ~0u};
7945 }
7946
7947 auto Itr = TBAABaseNodes.find(BaseNode);
7948 if (Itr != TBAABaseNodes.end())
7949 return Itr->second;
7950
7951 auto Result = verifyTBAABaseNodeImpl(I, BaseNode, IsNewFormat);
7952 auto InsertResult = TBAABaseNodes.insert({BaseNode, Result});
7953 (void)InsertResult;
7954 assert(InsertResult.second && "We just checked!");
7955 return Result;
7956}
7957
7958TBAAVerifier::TBAABaseNodeSummary
7959TBAAVerifier::verifyTBAABaseNodeImpl(const Instruction *I,
7960 const MDNode *BaseNode, bool IsNewFormat) {
7961 const TBAAVerifier::TBAABaseNodeSummary InvalidNode = {true, ~0u};
7962
7963 if (BaseNode->getNumOperands() == 2) {
7964 // Scalar nodes can only be accessed at offset 0.
7965 return isValidScalarTBAANode(BaseNode)
7966 ? TBAAVerifier::TBAABaseNodeSummary({false, 0})
7967 : InvalidNode;
7968 }
7969
7970 if (IsNewFormat) {
7971 if (BaseNode->getNumOperands() % 3 != 0) {
7972 CheckFailed("Access tag nodes must have the number of operands that is a "
7973 "multiple of 3!", BaseNode);
7974 return InvalidNode;
7975 }
7976 } else {
7977 if (BaseNode->getNumOperands() % 2 != 1) {
7978 CheckFailed("Struct tag nodes must have an odd number of operands!",
7979 BaseNode);
7980 return InvalidNode;
7981 }
7982 }
7983
7984 // Check the type size field.
7985 if (IsNewFormat) {
7986 auto *TypeSizeNode = mdconst::dyn_extract_or_null<ConstantInt>(
7987 BaseNode->getOperand(1));
7988 if (!TypeSizeNode) {
7989 CheckFailed("Type size nodes must be constants!", I, BaseNode);
7990 return InvalidNode;
7991 }
7992 }
7993
7994 // Check the type name field. In the new format it can be anything.
7995 if (!IsNewFormat && !isa<MDString>(BaseNode->getOperand(0))) {
7996 CheckFailed("Struct tag nodes have a string as their first operand",
7997 BaseNode);
7998 return InvalidNode;
7999 }
8000
8001 bool Failed = false;
8002
8003 std::optional<APInt> PrevOffset;
8004 unsigned BitWidth = ~0u;
8005
8006 // We've already checked that BaseNode is not a degenerate root node with one
8007 // operand in \c verifyTBAABaseNode, so this loop should run at least once.
8008 unsigned FirstFieldOpNo = IsNewFormat ? 3 : 1;
8009 unsigned NumOpsPerField = IsNewFormat ? 3 : 2;
8010 for (unsigned Idx = FirstFieldOpNo; Idx < BaseNode->getNumOperands();
8011 Idx += NumOpsPerField) {
8012 const MDOperand &FieldTy = BaseNode->getOperand(Idx);
8013 const MDOperand &FieldOffset = BaseNode->getOperand(Idx + 1);
8014 if (!isa<MDNode>(FieldTy)) {
8015 CheckFailed("Incorrect field entry in struct type node!", I, BaseNode);
8016 Failed = true;
8017 continue;
8018 }
8019
8020 auto *OffsetEntryCI =
8022 if (!OffsetEntryCI) {
8023 CheckFailed("Offset entries must be constants!", I, BaseNode);
8024 Failed = true;
8025 continue;
8026 }
8027
8028 if (BitWidth == ~0u)
8029 BitWidth = OffsetEntryCI->getBitWidth();
8030
8031 if (OffsetEntryCI->getBitWidth() != BitWidth) {
8032 CheckFailed(
8033 "Bitwidth between the offsets and struct type entries must match", I,
8034 BaseNode);
8035 Failed = true;
8036 continue;
8037 }
8038
8039 // NB! As far as I can tell, we generate a non-strictly increasing offset
8040 // sequence only from structs that have zero size bit fields. When
8041 // recursing into a contained struct in \c getFieldNodeFromTBAABaseNode we
8042 // pick the field lexically the latest in struct type metadata node. This
8043 // mirrors the actual behavior of the alias analysis implementation.
8044 bool IsAscending =
8045 !PrevOffset || PrevOffset->ule(OffsetEntryCI->getValue());
8046
8047 if (!IsAscending) {
8048 CheckFailed("Offsets must be increasing!", I, BaseNode);
8049 Failed = true;
8050 }
8051
8052 PrevOffset = OffsetEntryCI->getValue();
8053
8054 if (IsNewFormat) {
8055 auto *MemberSizeNode = mdconst::dyn_extract_or_null<ConstantInt>(
8056 BaseNode->getOperand(Idx + 2));
8057 if (!MemberSizeNode) {
8058 CheckFailed("Member size entries must be constants!", I, BaseNode);
8059 Failed = true;
8060 continue;
8061 }
8062 }
8063 }
8064
8065 return Failed ? InvalidNode
8066 : TBAAVerifier::TBAABaseNodeSummary(false, BitWidth);
8067}
8068
8069static bool IsRootTBAANode(const MDNode *MD) {
8070 return MD->getNumOperands() < 2;
8071}
8072
8073static bool IsScalarTBAANodeImpl(const MDNode *MD,
8075 if (MD->getNumOperands() != 2 && MD->getNumOperands() != 3)
8076 return false;
8077
8078 if (!isa<MDString>(MD->getOperand(0)))
8079 return false;
8080
8081 if (MD->getNumOperands() == 3) {
8083 if (!(Offset && Offset->isZero() && isa<MDString>(MD->getOperand(0))))
8084 return false;
8085 }
8086
8087 auto *Parent = dyn_cast_or_null<MDNode>(MD->getOperand(1));
8088 return Parent && Visited.insert(Parent).second &&
8089 (IsRootTBAANode(Parent) || IsScalarTBAANodeImpl(Parent, Visited));
8090}
8091
8092bool TBAAVerifier::isValidScalarTBAANode(const MDNode *MD) {
8093 auto ResultIt = TBAAScalarNodes.find(MD);
8094 if (ResultIt != TBAAScalarNodes.end())
8095 return ResultIt->second;
8096
8097 SmallPtrSet<const MDNode *, 4> Visited;
8098 bool Result = IsScalarTBAANodeImpl(MD, Visited);
8099 auto InsertResult = TBAAScalarNodes.insert({MD, Result});
8100 (void)InsertResult;
8101 assert(InsertResult.second && "Just checked!");
8102
8103 return Result;
8104}
8105
8106/// Returns the field node at the offset \p Offset in \p BaseNode. Update \p
8107/// Offset in place to be the offset within the field node returned.
8108///
8109/// We assume we've okayed \p BaseNode via \c verifyTBAABaseNode.
8110MDNode *TBAAVerifier::getFieldNodeFromTBAABaseNode(const Instruction *I,
8111 const MDNode *BaseNode,
8112 APInt &Offset,
8113 bool IsNewFormat) {
8114 assert(BaseNode->getNumOperands() >= 2 && "Invalid base node!");
8115
8116 // Scalar nodes have only one possible "field" -- their parent in the access
8117 // hierarchy. Offset must be zero at this point, but our caller is supposed
8118 // to check that.
8119 if (BaseNode->getNumOperands() == 2)
8120 return cast<MDNode>(BaseNode->getOperand(1));
8121
8122 unsigned FirstFieldOpNo = IsNewFormat ? 3 : 1;
8123 unsigned NumOpsPerField = IsNewFormat ? 3 : 2;
8124 for (unsigned Idx = FirstFieldOpNo; Idx < BaseNode->getNumOperands();
8125 Idx += NumOpsPerField) {
8126 auto *OffsetEntryCI =
8127 mdconst::extract<ConstantInt>(BaseNode->getOperand(Idx + 1));
8128 if (OffsetEntryCI->getValue().ugt(Offset)) {
8129 if (Idx == FirstFieldOpNo) {
8130 CheckFailed("Could not find TBAA parent in struct type node", I,
8131 BaseNode, &Offset);
8132 return nullptr;
8133 }
8134
8135 unsigned PrevIdx = Idx - NumOpsPerField;
8136 auto *PrevOffsetEntryCI =
8137 mdconst::extract<ConstantInt>(BaseNode->getOperand(PrevIdx + 1));
8138 Offset -= PrevOffsetEntryCI->getValue();
8139 return cast<MDNode>(BaseNode->getOperand(PrevIdx));
8140 }
8141 }
8142
8143 unsigned LastIdx = BaseNode->getNumOperands() - NumOpsPerField;
8144 auto *LastOffsetEntryCI = mdconst::extract<ConstantInt>(
8145 BaseNode->getOperand(LastIdx + 1));
8146 Offset -= LastOffsetEntryCI->getValue();
8147 return cast<MDNode>(BaseNode->getOperand(LastIdx));
8148}
8149
8151 if (!Type || Type->getNumOperands() < 3)
8152 return false;
8153
8154 // In the new format type nodes shall have a reference to the parent type as
8155 // its first operand.
8156 return isa_and_nonnull<MDNode>(Type->getOperand(0));
8157}
8158
8160 CheckTBAA(MD->getNumOperands() > 0, "TBAA metadata cannot have 0 operands", I,
8161 MD);
8162
8163 if (I)
8167 "This instruction shall not have a TBAA access tag!", I);
8168
8169 bool IsStructPathTBAA =
8170 isa<MDNode>(MD->getOperand(0)) && MD->getNumOperands() >= 3;
8171
8172 CheckTBAA(IsStructPathTBAA,
8173 "Old-style TBAA is no longer allowed, use struct-path TBAA instead",
8174 I);
8175
8176 MDNode *BaseNode = dyn_cast_or_null<MDNode>(MD->getOperand(0));
8177 MDNode *AccessType = dyn_cast_or_null<MDNode>(MD->getOperand(1));
8178
8179 bool IsNewFormat = isNewFormatTBAATypeNode(AccessType);
8180
8181 if (IsNewFormat) {
8182 CheckTBAA(MD->getNumOperands() == 4 || MD->getNumOperands() == 5,
8183 "Access tag metadata must have either 4 or 5 operands", I, MD);
8184 } else {
8185 CheckTBAA(MD->getNumOperands() < 5,
8186 "Struct tag metadata must have either 3 or 4 operands", I, MD);
8187 }
8188
8189 // Check the access size field.
8190 if (IsNewFormat) {
8191 auto *AccessSizeNode = mdconst::dyn_extract_or_null<ConstantInt>(
8192 MD->getOperand(3));
8193 CheckTBAA(AccessSizeNode, "Access size field must be a constant", I, MD);
8194 }
8195
8196 // Check the immutability flag.
8197 unsigned ImmutabilityFlagOpNo = IsNewFormat ? 4 : 3;
8198 if (MD->getNumOperands() == ImmutabilityFlagOpNo + 1) {
8199 auto *IsImmutableCI = mdconst::dyn_extract_or_null<ConstantInt>(
8200 MD->getOperand(ImmutabilityFlagOpNo));
8201 CheckTBAA(IsImmutableCI,
8202 "Immutability tag on struct tag metadata must be a constant", I,
8203 MD);
8204 CheckTBAA(
8205 IsImmutableCI->isZero() || IsImmutableCI->isOne(),
8206 "Immutability part of the struct tag metadata must be either 0 or 1", I,
8207 MD);
8208 }
8209
8210 CheckTBAA(BaseNode && AccessType,
8211 "Malformed struct tag metadata: base and access-type "
8212 "should be non-null and point to Metadata nodes",
8213 I, MD, BaseNode, AccessType);
8214
8215 if (!IsNewFormat) {
8216 CheckTBAA(isValidScalarTBAANode(AccessType),
8217 "Access type node must be a valid scalar type", I, MD,
8218 AccessType);
8219 }
8220
8222 CheckTBAA(OffsetCI, "Offset must be constant integer", I, MD);
8223
8224 APInt Offset = OffsetCI->getValue();
8225 bool SeenAccessTypeInPath = false;
8226
8227 SmallPtrSet<MDNode *, 4> StructPath;
8228
8229 for (/* empty */; BaseNode && !IsRootTBAANode(BaseNode);
8230 BaseNode =
8231 getFieldNodeFromTBAABaseNode(I, BaseNode, Offset, IsNewFormat)) {
8232 if (!StructPath.insert(BaseNode).second) {
8233 CheckFailed("Cycle detected in struct path", I, MD);
8234 return false;
8235 }
8236
8237 bool Invalid;
8238 unsigned BaseNodeBitWidth;
8239 std::tie(Invalid, BaseNodeBitWidth) =
8240 verifyTBAABaseNode(I, BaseNode, IsNewFormat);
8241
8242 // If the base node is invalid in itself, then we've already printed all the
8243 // errors we wanted to print.
8244 if (Invalid)
8245 return false;
8246
8247 SeenAccessTypeInPath |= BaseNode == AccessType;
8248
8249 if (isValidScalarTBAANode(BaseNode) || BaseNode == AccessType)
8250 CheckTBAA(Offset == 0, "Offset not zero at the point of scalar access", I,
8251 MD, &Offset);
8252
8253 CheckTBAA(BaseNodeBitWidth == Offset.getBitWidth() ||
8254 (BaseNodeBitWidth == 0 && Offset == 0) ||
8255 (IsNewFormat && BaseNodeBitWidth == ~0u),
8256 "Access bit-width not the same as description bit-width", I, MD,
8257 BaseNodeBitWidth, Offset.getBitWidth());
8258
8259 if (IsNewFormat && SeenAccessTypeInPath)
8260 break;
8261 }
8262
8263 CheckTBAA(SeenAccessTypeInPath, "Did not see access type in access path!", I,
8264 MD);
8265 return true;
8266}
8267
8268char VerifierLegacyPass::ID = 0;
8269INITIALIZE_PASS(VerifierLegacyPass, "verify", "Module Verifier", false, false)
8270
8272 return new VerifierLegacyPass(FatalErrors);
8273}
8274
8275AnalysisKey VerifierAnalysis::Key;
8282
8287
8289 auto Res = AM.getResult<VerifierAnalysis>(M);
8290 if (FatalErrors && (Res.IRBroken || Res.DebugInfoBroken))
8291 report_fatal_error("Broken module found, compilation aborted!");
8292
8293 return PreservedAnalyses::all();
8294}
8295
8297 auto res = AM.getResult<VerifierAnalysis>(F);
8298 if (res.IRBroken && FatalErrors)
8299 report_fatal_error("Broken function found, compilation aborted!");
8300
8301 return PreservedAnalyses::all();
8302}
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
AMDGPU address space definition.
ArrayRef< TableEntry > TableRef
This file declares a class to represent arbitrary precision floating point values and provide a varie...
This file implements a class to represent arbitrary precision integral constant values and operations...
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
Atomic ordering constants.
@ RetAttr
@ FnAttr
This file contains the simple types necessary to represent the attributes associated with functions a...
static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")
static GCRegistry::Add< StatepointGC > D("statepoint-example", "an example strategy for statepoint")
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
Analysis containing CSE Info
Definition CSEInfo.cpp:27
This file contains the declarations for the subclasses of Constant, which represent the different fla...
This file declares the LLVM IR specialization of the GenericConvergenceVerifier template.
static DISubprogram * getSubprogram(bool IsDistinct, Ts &&...Args)
dxil translate DXIL Translate Metadata
This file defines the DenseMap class.
This file contains constants used for implementing Dwarf debug support.
static bool runOnFunction(Function &F, bool PostInlining)
This file contains the declarations of entities that describe floating point environment and related ...
#define Check(C,...)
Hexagon Common GEP
This file provides various utilities for inspecting and working with the control flow graph in LLVM I...
Module.h This file contains the declarations for the Module class.
This header defines various interfaces for pass management in LLVM.
This defines the Use class.
const size_t AbstractManglingParser< Derived, Alloc >::NumOps
#define F(x, y, z)
Definition MD5.cpp:54
#define I(x, y, z)
Definition MD5.cpp:57
Machine Check Debug Module
This file implements a map that provides insertion order iteration.
This file provides utility for Memory Model Relaxation Annotations (MMRAs).
static bool isContiguous(const ConstantRange &A, const ConstantRange &B)
This file contains the declarations for metadata subclasses.
#define T
#define T1
ConstantRange Range(APInt(BitWidth, Low), APInt(BitWidth, High))
uint64_t High
uint64_t IntrinsicInst * II
#define P(N)
ppc ctr loops verify
#define INITIALIZE_PASS(passName, arg, name, cfg, analysis)
Definition PassSupport.h:56
This file contains the declarations for profiling metadata utility functions.
const SmallVectorImpl< MachineOperand > & Cond
static bool isValid(const char C)
Returns true if C is a valid mangled character: <0-9a-zA-Z_>.
static unsigned getNumElements(Type *Ty)
void visit(MachineFunction &MF, MachineBasicBlock &Start, std::function< void(MachineBasicBlock *)> op)
This file contains some templates that are useful if you are working with the STL at all.
verify safepoint Safepoint IR Verifier
BaseType
A given derived pointer can have multiple base pointers through phi/selects.
This file defines the SmallPtrSet class.
This file defines the SmallVector class.
This file contains some functions that are useful when dealing with strings.
static unsigned getBitWidth(Type *Ty, const DataLayout &DL)
Returns the bitwidth of the given scalar or pointer type.
static bool IsScalarTBAANodeImpl(const MDNode *MD, SmallPtrSetImpl< const MDNode * > &Visited)
static bool isType(const Metadata *MD)
static Instruction * getSuccPad(Instruction *Terminator)
static bool isMDTuple(const Metadata *MD)
static bool isNewFormatTBAATypeNode(llvm::MDNode *Type)
#define CheckDI(C,...)
We know that a debug info condition should be true, if not print an error message.
Definition Verifier.cpp:683
static void forEachUser(const Value *User, SmallPtrSet< const Value *, 32 > &Visited, llvm::function_ref< bool(const Value *)> Callback)
Definition Verifier.cpp:724
static bool isDINode(const Metadata *MD)
static bool isScope(const Metadata *MD)
static cl::opt< bool > VerifyNoAliasScopeDomination("verify-noalias-scope-decl-dom", cl::Hidden, cl::init(false), cl::desc("Ensure that llvm.experimental.noalias.scope.decl for identical " "scopes are not dominating"))
static bool isTypeCongruent(Type *L, Type *R)
Two types are "congruent" if they are identical, or if they are both pointer types with different poi...
#define CheckTBAA(C,...)
static bool isConstantIntMetadataOperand(const Metadata *MD)
static bool IsRootTBAANode(const MDNode *MD)
static Value * getParentPad(Value *EHPad)
static bool hasConflictingReferenceFlags(unsigned Flags)
Detect mutually exclusive flags.
static AttrBuilder getParameterABIAttributes(LLVMContext &C, unsigned I, AttributeList Attrs)
static const char PassName[]
static LLVM_ABI bool isValidArbitraryFPFormat(StringRef Format)
Returns true if the given string is a valid arbitrary floating-point format interpretation for llvm....
Definition APFloat.cpp:6080
bool isFiniteNonZero() const
Definition APFloat.h:1522
bool isNegative() const
Definition APFloat.h:1512
const fltSemantics & getSemantics() const
Definition APFloat.h:1520
Class for arbitrary precision integers.
Definition APInt.h:78
bool sgt(const APInt &RHS) const
Signed greater than comparison.
Definition APInt.h:1202
bool isMinValue() const
Determine if this is the smallest unsigned value.
Definition APInt.h:418
bool ule(const APInt &RHS) const
Unsigned less or equal comparison.
Definition APInt.h:1151
bool isPowerOf2() const
Check if this APInt's value is a power of two greater than zero.
Definition APInt.h:441
int64_t getSExtValue() const
Get sign extended value.
Definition APInt.h:1571
bool isMaxValue() const
Determine if this is the largest unsigned value.
Definition APInt.h:400
This class represents a conversion between pointers from one address space to another.
bool isSwiftError() const
Return true if this alloca is used as a swifterror argument to a call.
LLVM_ABI bool isStaticAlloca() const
Return true if this alloca is in the entry block of the function and is a constant size.
Align getAlign() const
Return the alignment of the memory that is being allocated by the instruction.
Type * getAllocatedType() const
Return the type that is being allocated by the instruction.
unsigned getAddressSpace() const
Return the address space for the allocation.
LLVM_ABI bool isArrayAllocation() const
Return true if there is an allocation size parameter to the allocation instruction that is not 1.
const Value * getArraySize() const
Get the number of elements allocated.
PassT::Result & getResult(IRUnitT &IR, ExtraArgTs... ExtraArgs)
Get the result of an analysis pass for a given IR unit.
void setPreservesAll()
Set by analyses that do not transform their input at all.
LLVM_ABI bool hasInRegAttr() const
Return true if this argument has the inreg attribute.
Definition Function.cpp:293
bool empty() const
empty - Check if the array is empty.
Definition ArrayRef.h:137
static bool isFPOperation(BinOp Op)
BinOp getOperation() const
static LLVM_ABI StringRef getOperationName(BinOp Op)
AtomicOrdering getOrdering() const
Returns the ordering constraint of this rmw instruction.
bool contains(Attribute::AttrKind A) const
Return true if the builder has the specified attribute.
LLVM_ABI bool hasAttribute(Attribute::AttrKind Kind) const
Return true if the attribute exists in this set.
LLVM_ABI std::string getAsString(bool InAttrGrp=false) const
Functions, function parameters, and return types can have attributes to indicate how they should be t...
Definition Attributes.h:69
LLVM_ABI const ConstantRange & getValueAsConstantRange() const
Return the attribute's value as a ConstantRange.
LLVM_ABI StringRef getValueAsString() const
Return the attribute's value as a string.
AttrKind
This enumeration lists the attributes that can be associated with parameters, function results,...
Definition Attributes.h:88
bool isValid() const
Return true if the attribute is any kind of attribute.
Definition Attributes.h:223
LLVM Basic Block Representation.
Definition BasicBlock.h:62
iterator begin()
Instruction iterator methods.
Definition BasicBlock.h:470
iterator_range< const_phi_iterator > phis() const
Returns a range that iterates over the phis in the basic block.
Definition BasicBlock.h:539
const Function * getParent() const
Return the enclosing method, or null if none.
Definition BasicBlock.h:213
LLVM_ABI InstListType::const_iterator getFirstNonPHIIt() const
Returns an iterator to the first instruction in this block that is not a PHINode instruction.
LLVM_ABI bool isEntryBlock() const
Return true if this is the entry block of the containing function.
const Instruction & front() const
Definition BasicBlock.h:493
LLVM_ABI const BasicBlock * getUniquePredecessor() const
Return the predecessor of this block if it has a unique predecessor block.
InstListType::iterator iterator
Instruction iterators...
Definition BasicBlock.h:170
const Instruction * getTerminator() const LLVM_READONLY
Returns the terminator instruction if the block is well formed or null if the block is not well forme...
Definition BasicBlock.h:233
This class represents a no-op cast from one type to another.
static LLVM_ABI BlockAddress * lookup(const BasicBlock *BB)
Lookup an existing BlockAddress constant for the given BasicBlock.
bool isConditional() const
Value * getCondition() const
Base class for all callable instructions (InvokeInst and CallInst) Holds everything related to callin...
bool isInlineAsm() const
Check if this call is an inline asm statement.
bool hasInAllocaArgument() const
Determine if there are is an inalloca argument.
OperandBundleUse getOperandBundleAt(unsigned Index) const
Return the operand bundle at a specific index.
Function * getCalledFunction() const
Returns the function called, or null if this is an indirect function invocation or the function signa...
bool doesNotAccessMemory(unsigned OpNo) const
bool hasFnAttr(Attribute::AttrKind Kind) const
Determine whether this call has the given attribute.
unsigned getNumOperandBundles() const
Return the number of operand bundles associated with this User.
CallingConv::ID getCallingConv() const
LLVM_ABI bool paramHasAttr(unsigned ArgNo, Attribute::AttrKind Kind) const
Determine whether the argument or parameter has the given attribute.
Attribute getParamAttr(unsigned ArgNo, Attribute::AttrKind Kind) const
Get the attribute of a given kind from a given arg.
iterator_range< bundle_op_iterator > bundle_op_infos()
Return the range [bundle_op_info_begin, bundle_op_info_end).
unsigned countOperandBundlesOfType(StringRef Name) const
Return the number of operand bundles with the tag Name attached to this instruction.
bool onlyReadsMemory(unsigned OpNo) const
Value * getCalledOperand() const
Type * getParamElementType(unsigned ArgNo) const
Extract the elementtype type for a parameter.
Value * getArgOperand(unsigned i) const
FunctionType * getFunctionType() const
LLVM_ABI Intrinsic::ID getIntrinsicID() const
Returns the intrinsic ID of the intrinsic called or Intrinsic::not_intrinsic if the called function i...
iterator_range< User::op_iterator > args()
Iteration adapter for range-for loops.
bool doesNotReturn() const
Determine if the call cannot return.
LLVM_ABI bool onlyAccessesArgMemory() const
Determine if the call can access memmory only using pointers based on its arguments.
unsigned arg_size() const
AttributeList getAttributes() const
Return the attributes for this call.
bool hasOperandBundles() const
Return true if this User has any operand bundles.
LLVM_ABI Function * getCaller()
Helper to get the caller (the parent function).
BasicBlock * getIndirectDest(unsigned i) const
unsigned getNumIndirectDests() const
Return the number of callbr indirect dest labels.
bool isMustTailCall() const
static LLVM_ABI bool castIsValid(Instruction::CastOps op, Type *SrcTy, Type *DstTy)
This method can be used to determine if a cast from SrcTy to DstTy using Opcode op is valid or not.
unsigned getNumHandlers() const
return the number of 'handlers' in this catchswitch instruction, except the default handler
Value * getParentPad() const
BasicBlock * getUnwindDest() const
handler_range handlers()
iteration adapter for range-for loops.
BasicBlock * getUnwindDest() const
bool isFPPredicate() const
Definition InstrTypes.h:782
bool isIntPredicate() const
Definition InstrTypes.h:783
static bool isIntPredicate(Predicate P)
Definition InstrTypes.h:776
bool isMinusOne() const
This function will return true iff every bit in this constant is set to true.
Definition Constants.h:231
bool isNegative() const
Definition Constants.h:214
bool isZero() const
This is just a convenience method to make client code smaller for a common code.
Definition Constants.h:219
unsigned getBitWidth() const
getBitWidth - Return the scalar bitwidth of this constant.
Definition Constants.h:162
uint64_t getZExtValue() const
Return the constant as a 64-bit unsigned integer value after it has been zero extended as appropriate...
Definition Constants.h:168
const APInt & getValue() const
Return the constant as an APInt value reference.
Definition Constants.h:159
Constant * getAddrDiscriminator() const
The address discriminator if any, or the null constant.
Definition Constants.h:1078
Constant * getPointer() const
The pointer that is signed in this ptrauth signed pointer.
Definition Constants.h:1065
ConstantInt * getKey() const
The Key ID, an i32 constant.
Definition Constants.h:1068
Constant * getDeactivationSymbol() const
Definition Constants.h:1087
ConstantInt * getDiscriminator() const
The integer discriminator, an i64 constant, or 0.
Definition Constants.h:1071
static LLVM_ABI bool isOrderedRanges(ArrayRef< ConstantRange > RangesRef)
This class represents a range of values.
const APInt & getLower() const
Return the lower value for this range.
const APInt & getUpper() const
Return the upper value for this range.
LLVM_ABI bool contains(const APInt &Val) const
Return true if the specified value is in the set.
uint32_t getBitWidth() const
Get the bit width of this ConstantRange.
static LLVM_ABI ConstantTokenNone * get(LLVMContext &Context)
Return the ConstantTokenNone.
LLVM_ABI bool isNullValue() const
Return true if this is the value that would be returned by getNullValue.
Definition Constants.cpp:90
LLVM_ABI std::optional< fp::ExceptionBehavior > getExceptionBehavior() const
LLVM_ABI std::optional< RoundingMode > getRoundingMode() const
LLVM_ABI unsigned getNonMetadataArgCount() const
DbgVariableFragmentInfo FragmentInfo
@ FixedPointBinary
Scale factor 2^Factor.
@ FixedPointDecimal
Scale factor 10^Factor.
@ FixedPointRational
Arbitrary rational scale factor.
DIGlobalVariable * getVariable() const
LLVM_ABI DISubprogram * getSubprogram() const
Get the subprogram for this scope.
DILocalScope * getScope() const
Get the local scope for this variable.
Metadata * getRawScope() const
Base class for scope-like contexts.
Subprogram description. Uses SubclassData1.
static const DIScope * getRawRetainedNodeScope(const MDNode *N)
Base class for template parameters.
Base class for variables.
Metadata * getRawType() const
Metadata * getRawScope() const
A parsed version of the target data layout string in and methods for querying it.
Definition DataLayout.h:64
Records a position in IR for a source label (DILabel).
Base class for non-instruction debug metadata records that have positions within IR.
LLVM_ABI void print(raw_ostream &O, bool IsForDebug=false) const
DebugLoc getDebugLoc() const
LLVM_ABI const BasicBlock * getParent() const
LLVM_ABI Function * getFunction()
Record of a variable value-assignment, aka a non instruction representation of the dbg....
LLVM_ABI Value * getVariableLocationOp(unsigned OpIdx) const
DIExpression * getExpression() const
DILocalVariable * getVariable() const
Metadata * getRawLocation() const
Returns the metadata operand for the first location description.
@ End
Marks the end of the concrete types.
@ Any
To indicate all LocationTypes in searches.
DIExpression * getAddressExpression() const
MDNode * getAsMDNode() const
Return this as a bar MDNode.
Definition DebugLoc.h:290
ValueT lookup(const_arg_type_t< KeyT > Val) const
lookup - Return the entry for the specified key, or a default constructed value if no such entry exis...
Definition DenseMap.h:205
iterator find(const_arg_type_t< KeyT > Val)
Definition DenseMap.h:178
bool empty() const
Definition DenseMap.h:109
std::pair< iterator, bool > insert(const std::pair< KeyT, ValueT > &KV)
Definition DenseMap.h:241
Concrete subclass of DominatorTreeBase that is used to compute a normal dominator tree.
Definition Dominators.h:164
This instruction extracts a single (scalar) element from a VectorType value.
static LLVM_ABI bool isValidOperands(const Value *Vec, const Value *Idx)
Return true if an extractelement instruction can be formed with the specified operands.
ArrayRef< unsigned > getIndices() const
static LLVM_ABI Type * getIndexedType(Type *Agg, ArrayRef< unsigned > Idxs)
Returns the type of the element that would be extracted with an extractvalue instruction with the spe...
This instruction compares its operands according to the predicate given to the constructor.
This class represents an extension of floating point types.
This class represents a cast from floating point to signed integer.
This class represents a cast from floating point to unsigned integer.
This class represents a truncation of floating point types.
AtomicOrdering getOrdering() const
Returns the ordering constraint of this fence instruction.
Value * getParentPad() const
Convenience accessors.
FunctionPass class - This class is used to implement most global optimizations.
Definition Pass.h:314
Type * getReturnType() const
FunctionType * getFunctionType() const
Returns the FunctionType for me.
Definition Function.h:209
Intrinsic::ID getIntrinsicID() const LLVM_READONLY
getIntrinsicID - This method returns the ID number of the specified function, or Intrinsic::not_intri...
Definition Function.h:244
DISubprogram * getSubprogram() const
Get the attached subprogram.
CallingConv::ID getCallingConv() const
getCallingConv()/setCallingConv(CC) - These method get and set the calling convention of this functio...
Definition Function.h:270
bool hasPersonalityFn() const
Check whether this function has a personality function.
Definition Function.h:903
const Function & getFunction() const
Definition Function.h:164
const std::string & getGC() const
Definition Function.cpp:834
Type * getReturnType() const
Returns the type of the ret val.
Definition Function.h:214
bool isVarArg() const
isVarArg - Return true if this function takes a variable number of arguments.
Definition Function.h:227
LLVM_ABI Value * getBasePtr() const
LLVM_ABI Value * getDerivedPtr() const
static LLVM_ABI Type * getIndexedType(Type *Ty, ArrayRef< Value * > IdxList)
Returns the result type of a getelementptr with the given source element type and indexes.
static bool isValidLinkage(LinkageTypes L)
Definition GlobalAlias.h:98
const Constant * getAliasee() const
Definition GlobalAlias.h:87
LLVM_ABI const Function * getResolverFunction() const
Definition Globals.cpp:674
static bool isValidLinkage(LinkageTypes L)
Definition GlobalIFunc.h:86
const Constant * getResolver() const
Definition GlobalIFunc.h:73
LLVM_ABI void getAllMetadata(SmallVectorImpl< std::pair< unsigned, MDNode * > > &MDs) const
Appends all metadata attached to this value to MDs, sorting by KindID.
bool hasComdat() const
MDNode * getMetadata(unsigned KindID) const
Get the current metadata attachments for the given kind, if any.
Definition Value.h:576
bool hasExternalLinkage() const
bool isDSOLocal() const
bool isImplicitDSOLocal() const
LLVM_ABI bool isDeclaration() const
Return true if the primary definition of this global value is outside of the current translation unit...
Definition Globals.cpp:328
bool hasValidDeclarationLinkage() const
LinkageTypes getLinkage() const
bool hasDefaultVisibility() const
bool hasPrivateLinkage() const
bool hasHiddenVisibility() const
bool hasExternalWeakLinkage() const
bool hasDLLImportStorageClass() const
bool hasDLLExportStorageClass() const
bool isDeclarationForLinker() const
unsigned getAddressSpace() const
Module * getParent()
Get the module that this global value is contained inside of...
PointerType * getType() const
Global values are always pointers.
LLVM_ABI bool isInterposable() const
Return true if this global's definition can be substituted with an arbitrary definition at link time ...
Definition Globals.cpp:107
bool hasComdat() const
bool hasCommonLinkage() const
bool hasGlobalUnnamedAddr() const
bool hasAppendingLinkage() const
bool hasAvailableExternallyLinkage() const
Type * getValueType() const
const Constant * getInitializer() const
getInitializer - Return the initializer for this global variable.
bool hasInitializer() const
Definitions have initializers, declarations don't.
MaybeAlign getAlign() const
Returns the alignment of the given variable.
bool isConstant() const
If the value is a global constant, its value is immutable throughout the runtime execution of the pro...
bool hasDefinitiveInitializer() const
hasDefinitiveInitializer - Whether the global variable has an initializer, and any other instances of...
This instruction compares its operands according to the predicate given to the constructor.
BasicBlock * getDestination(unsigned i)
Return the specified destination.
unsigned getNumDestinations() const
return the number of possible destinations in this indirectbr instruction.
unsigned getNumSuccessors() const
This instruction inserts a single (scalar) element into a VectorType value.
static LLVM_ABI bool isValidOperands(const Value *Vec, const Value *NewElt, const Value *Idx)
Return true if an insertelement instruction can be formed with the specified operands.
ArrayRef< unsigned > getIndices() const
Base class for instruction visitors.
Definition InstVisitor.h:78
void visit(Iterator Start, Iterator End)
Definition InstVisitor.h:87
LLVM_ABI unsigned getNumSuccessors() const LLVM_READONLY
Return the number of successors that this instruction has.
const DebugLoc & getDebugLoc() const
Return the debug location for this node as a DebugLoc.
LLVM_ABI bool isAtomic() const LLVM_READONLY
Return true if this instruction has an AtomicOrdering of unordered or higher.
LLVM_ABI const Function * getFunction() const
Return the function this instruction belongs to.
This class represents a cast from an integer to a pointer.
static LLVM_ABI bool mayLowerToFunctionCall(Intrinsic::ID IID)
Check if the intrinsic might lower into a regular function call in the course of IR transformations.
Intrinsic::ID getIntrinsicID() const
Return the intrinsic ID of this intrinsic.
This is an important class for using LLVM in a threaded context.
Definition LLVMContext.h:68
bool isCleanup() const
Return 'true' if this landingpad instruction is a cleanup.
unsigned getNumClauses() const
Get the number of clauses for this landing pad.
bool isCatch(unsigned Idx) const
Return 'true' if the clause and index Idx is a catch clause.
bool isFilter(unsigned Idx) const
Return 'true' if the clause and index Idx is a filter clause.
Constant * getClause(unsigned Idx) const
Get the value of the clause at index Idx.
AtomicOrdering getOrdering() const
Returns the ordering constraint of this load instruction.
SyncScope::ID getSyncScopeID() const
Returns the synchronization scope ID of this load instruction.
Align getAlign() const
Return the alignment of the access that is being performed.
Metadata node.
Definition Metadata.h:1078
const MDOperand & getOperand(unsigned I) const
Definition Metadata.h:1442
bool isTemporary() const
Definition Metadata.h:1262
ArrayRef< MDOperand > operands() const
Definition Metadata.h:1440
unsigned getNumOperands() const
Return number of MDNode operands.
Definition Metadata.h:1448
bool isDistinct() const
Definition Metadata.h:1261
bool isResolved() const
Check if node is fully resolved.
Definition Metadata.h:1258
LLVMContext & getContext() const
Definition Metadata.h:1242
bool equalsStr(StringRef Str) const
Definition Metadata.h:922
Metadata * get() const
Definition Metadata.h:929
LLVM_ABI StringRef getString() const
Definition Metadata.cpp:624
static LLVM_ABI bool isTagMD(const Metadata *MD)
This class implements a map that also provides access to all stored values in a deterministic order.
Definition MapVector.h:36
static LLVM_ABI MetadataAsValue * getIfExists(LLVMContext &Context, Metadata *MD)
Definition Metadata.cpp:118
Metadata * getMetadata() const
Definition Metadata.h:201
Root of the metadata hierarchy.
Definition Metadata.h:64
LLVM_ABI void print(raw_ostream &OS, const Module *M=nullptr, bool IsForDebug=false) const
Print.
unsigned getMetadataID() const
Definition Metadata.h:104
Manage lifetime of a slot tracker for printing IR.
A Module instance is used to store all the information related to an LLVM module.
Definition Module.h:67
LLVM_ABI StringRef getName() const
LLVM_ABI void print(raw_ostream &ROS, bool IsForDebug=false) const
LLVM_ABI unsigned getNumOperands() const
iterator_range< op_iterator > operands()
Definition Metadata.h:1853
op_range incoming_values()
static LLVM_ABI PassRegistry * getPassRegistry()
getPassRegistry - Access the global registry object, which is automatically initialized at applicatio...
A set of analyses that are preserved following a run of a transformation pass.
Definition Analysis.h:112
static PreservedAnalyses all()
Construct a special preserved set that preserves all passes.
Definition Analysis.h:118
This class represents a cast from a pointer to an address (non-capturing ptrtoint).
This class represents a cast from a pointer to an integer.
Value * getValue() const
Convenience accessor.
Value * getReturnValue() const
Convenience accessor. Returns null if there is no return value.
This class represents a sign extension of integer types.
This class represents a cast from signed integer to floating point.
static LLVM_ABI const char * areInvalidOperands(Value *Cond, Value *True, Value *False)
Return a string if the specified operands are invalid for a select operation, otherwise return null.
This instruction constructs a fixed permutation of two input vectors.
static LLVM_ABI bool isValidOperands(const Value *V1, const Value *V2, const Value *Mask)
Return true if a shufflevector instruction can be formed with the specified operands.
static LLVM_ABI void getShuffleMask(const Constant *Mask, SmallVectorImpl< int > &Result)
Convert the input shuffle mask operand to a vector of integers.
A templated base class for SmallPtrSet which provides the typesafe interface that is common across al...
size_type count(ConstPtrType Ptr) const
count - Return 1 if the specified pointer is in the set, 0 otherwise.
void insert_range(Range &&R)
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
SmallPtrSet - This class implements a set which is optimized for holding SmallSize or less elements.
void reserve(size_type N)
iterator insert(iterator I, T &&Elt)
void resize(size_type N)
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
std::pair< StringRef, StringRef > split(char Separator) const
Split into two substrings around the first occurrence of a separator character.
Definition StringRef.h:712
static constexpr size_t npos
Definition StringRef.h:57
bool getAsInteger(unsigned Radix, T &Result) const
Parse the current string as an integer of the specified radix.
Definition StringRef.h:472
bool starts_with(StringRef Prefix) const
Check if this string starts with the given Prefix.
Definition StringRef.h:261
constexpr bool empty() const
empty - Check if the string is empty.
Definition StringRef.h:143
unsigned getNumElements() const
Random access to the elements.
LLVM_ABI Type * getTypeAtIndex(const Value *V) const
Given an index value into the type, return the type of the element.
Definition Type.cpp:718
LLVM_ABI bool isScalableTy(SmallPtrSetImpl< const Type * > &Visited) const
Returns true if this struct contains a scalable vector.
Definition Type.cpp:440
Verify that the TBAA Metadatas are valid.
Definition Verifier.h:40
LLVM_ABI bool visitTBAAMetadata(const Instruction *I, const MDNode *MD)
Visit an instruction, or a TBAA node itself as part of a metadata, and return true if it is valid,...
unsigned size() const
Triple - Helper class for working with autoconf configuration names.
Definition Triple.h:47
This class represents a truncation of integer types.
Twine - A lightweight data structure for efficiently representing the concatenation of temporary valu...
Definition Twine.h:82
The instances of the Type class are immutable: once they are created, they are never changed.
Definition Type.h:45
bool isVectorTy() const
True if this is an instance of VectorType.
Definition Type.h:273
LLVM_ABI bool containsNonGlobalTargetExtType(SmallPtrSetImpl< const Type * > &Visited) const
Return true if this type is or contains a target extension type that disallows being used as a global...
Definition Type.cpp:74
bool isArrayTy() const
True if this is an instance of ArrayType.
Definition Type.h:264
LLVM_ABI bool containsNonLocalTargetExtType(SmallPtrSetImpl< const Type * > &Visited) const
Return true if this type is or contains a target extension type that disallows being used as a local.
Definition Type.cpp:90
LLVM_ABI bool isScalableTy(SmallPtrSetImpl< const Type * > &Visited) const
Return true if this is a type whose size is a known multiple of vscale.
Definition Type.cpp:61
bool isLabelTy() const
Return true if this is 'label'.
Definition Type.h:228
bool isIntOrIntVectorTy() const
Return true if this is an integer type or a vector of integer types.
Definition Type.h:246
bool isPointerTy() const
True if this is an instance of PointerType.
Definition Type.h:267
LLVM_ABI bool isTokenLikeTy() const
Returns true if this is 'token' or a token-like target type.s.
Definition Type.cpp:1065
LLVM_ABI unsigned getPointerAddressSpace() const
Get the address space of this pointer or pointer vector type.
bool isSingleValueType() const
Return true if the type is a valid type for a register in codegen.
Definition Type.h:296
LLVM_ABI bool canLosslesslyBitCastTo(Type *Ty) const
Return true if this type could be converted with a lossless BitCast to type 'Ty'.
Definition Type.cpp:153
Type * getScalarType() const
If this is a vector type, return the element type, otherwise return 'this'.
Definition Type.h:352
bool isSized(SmallPtrSetImpl< Type * > *Visited=nullptr) const
Return true if it makes sense to take the size of this type.
Definition Type.h:311
LLVM_ABI unsigned getScalarSizeInBits() const LLVM_READONLY
If this is a vector type, return the getPrimitiveSizeInBits value for the element type.
Definition Type.cpp:230
bool isFloatingPointTy() const
Return true if this is one of the floating-point types.
Definition Type.h:184
bool isPtrOrPtrVectorTy() const
Return true if this is a pointer type or a vector of pointer types.
Definition Type.h:270
bool isIntOrPtrTy() const
Return true if this is an integer type or a pointer type.
Definition Type.h:255
bool isIntegerTy() const
True if this is an instance of IntegerType.
Definition Type.h:240
bool isFPOrFPVectorTy() const
Return true if this is a FP type or a vector of FP.
Definition Type.h:225
bool isVoidTy() const
Return true if this is 'void'.
Definition Type.h:139
bool isMetadataTy() const
Return true if this is 'metadata'.
Definition Type.h:231
This class represents a cast unsigned integer to floating point.
op_range operands()
Definition User.h:267
Value * getOperand(unsigned i) const
Definition User.h:207
unsigned getNumOperands() const
Definition User.h:229
This class represents the va_arg llvm instruction, which returns an argument of the specified type gi...
Value * getValue() const
Definition Metadata.h:498
LLVM Value Representation.
Definition Value.h:75
iterator_range< user_iterator > materialized_users()
Definition Value.h:420
Type * getType() const
All values are typed, get the type of this value.
Definition Value.h:256
LLVM_ABI const Value * stripPointerCastsAndAliases() const
Strip off pointer casts, all-zero GEPs, address space casts, and aliases.
Definition Value.cpp:712
LLVMContext & getContext() const
All values hold a context through their type.
Definition Value.h:259
LLVM_ABI const Value * stripInBoundsOffsets(function_ref< void(const Value *)> Func=[](const Value *) {}) const
Strip off pointer casts and inbounds GEPs.
Definition Value.cpp:819
iterator_range< user_iterator > users()
Definition Value.h:426
bool materialized_use_empty() const
Definition Value.h:351
LLVM_ABI const Value * stripPointerCasts() const
Strip off pointer casts, all-zero GEPs and address space casts.
Definition Value.cpp:708
bool hasName() const
Definition Value.h:262
LLVM_ABI StringRef getName() const
Return a constant reference to the value's name.
Definition Value.cpp:322
Check a module for errors, and report separate error states for IR and debug info errors.
Definition Verifier.h:109
LLVM_ABI Result run(Module &M, ModuleAnalysisManager &)
LLVM_ABI PreservedAnalyses run(Module &M, ModuleAnalysisManager &AM)
This class represents zero extension of integer types.
constexpr bool isNonZero() const
Definition TypeSize.h:155
constexpr bool isScalable() const
Returns whether the quantity is scaled by a runtime quantity (vscale).
Definition TypeSize.h:168
constexpr ScalarTy getKnownMinValue() const
Returns the minimum value this quantity can represent.
Definition TypeSize.h:165
An efficient, type-erasing, non-owning reference to a callable.
const ParentTy * getParent() const
Definition ilist_node.h:34
NodeTy * getNextNode()
Get the next node, or nullptr for the list tail.
Definition ilist_node.h:348
This class implements an extremely fast bulk output stream that can only output to a stream.
Definition raw_ostream.h:53
CallInst * Call
This file contains the declaration of the Comdat class, which represents a single COMDAT in LLVM.
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
@ FLAT_ADDRESS
Address space for flat memory.
@ GLOBAL_ADDRESS
Address space for global memory (RAT0, VTX0).
@ PRIVATE_ADDRESS
Address space for private memory.
constexpr char Align[]
Key for Kernel::Arg::Metadata::mAlign.
constexpr char Args[]
Key for Kernel::Metadata::mArgs.
constexpr char Attrs[]
Key for Kernel::Metadata::mAttrs.
bool isFlatGlobalAddrSpace(unsigned AS)
constexpr std::underlying_type_t< E > Mask()
Get a bitmask with 1s in all places up to the high-order bit of E's largest value.
@ Entry
Definition COFF.h:862
unsigned ID
LLVM IR allows to use arbitrary numbers as calling convention identifiers.
Definition CallingConv.h:24
@ C
The default llvm calling convention, compatible with C.
Definition CallingConv.h:34
@ BasicBlock
Various leaf nodes.
Definition ISDOpcodes.h:81
LLVM_ABI MatchIntrinsicTypesResult matchIntrinsicSignature(FunctionType *FTy, ArrayRef< IITDescriptor > &Infos, SmallVectorImpl< Type * > &ArgTys)
Match the specified function type with the type constraints specified by the .td file.
LLVM_ABI void getIntrinsicInfoTableEntries(ID id, SmallVectorImpl< IITDescriptor > &T)
Return the IIT table descriptor for the specified intrinsic into an array of IITDescriptors.
@ MatchIntrinsicTypes_NoMatchRet
Definition Intrinsics.h:260
@ MatchIntrinsicTypes_NoMatchArg
Definition Intrinsics.h:261
LLVM_ABI bool hasConstrainedFPRoundingModeOperand(ID QID)
Returns true if the intrinsic ID is for one of the "ConstrainedFloating-Point Intrinsics" that take r...
LLVM_ABI StringRef getName(ID id)
Return the LLVM name for an intrinsic, such as "llvm.ppc.altivec.lvx".
static const int NoAliasScopeDeclScopeArg
Definition Intrinsics.h:41
LLVM_ABI bool matchIntrinsicVarArg(bool isVarArg, ArrayRef< IITDescriptor > &Infos)
Verify if the intrinsic has variable arguments.
std::variant< std::monostate, Loc::Single, Loc::Multi, Loc::MMI, Loc::EntryValue > Variant
Alias for the std::variant specialization base class of DbgVariable.
Definition DwarfDebug.h:189
Flag
These should be considered private to the implementation of the MCInstrDesc class.
@ System
Synchronized with respect to all concurrently executing threads.
Definition LLVMContext.h:58
LLVM_ABI std::optional< VFInfo > tryDemangleForVFABI(StringRef MangledName, const FunctionType *FTy)
Function to construct a VFInfo out of a mangled names in the following format:
@ CE
Windows NT (Windows on ARM)
Definition MCAsmInfo.h:48
LLVM_ABI AssignmentInstRange getAssignmentInsts(DIAssignID *ID)
Return a range of instructions (typically just one) that have ID as an attachment.
initializer< Ty > init(const Ty &Val)
@ DW_MACINFO_undef
Definition Dwarf.h:818
@ DW_MACINFO_start_file
Definition Dwarf.h:819
@ DW_MACINFO_define
Definition Dwarf.h:817
std::enable_if_t< detail::IsValidPointer< X, Y >::value, X * > dyn_extract_or_null(Y &&MD)
Extract a Value from Metadata, if any, allowing null.
Definition Metadata.h:708
std::enable_if_t< detail::IsValidPointer< X, Y >::value, X * > dyn_extract(Y &&MD)
Extract a Value from Metadata, if any.
Definition Metadata.h:695
std::enable_if_t< detail::IsValidPointer< X, Y >::value, X * > extract(Y &&MD)
Extract a Value from Metadata.
Definition Metadata.h:667
@ User
could "use" a pointer
NodeAddr< UseNode * > Use
Definition RDFGraph.h:385
NodeAddr< NodeBase * > Node
Definition RDFGraph.h:381
friend class Instruction
Iterator for Instructions in a `BasicBlock.
Definition BasicBlock.h:73
This is an optimization pass for GlobalISel generic memory operations.
Definition Types.h:26
auto drop_begin(T &&RangeOrContainer, size_t N=1)
Return a range covering RangeOrContainer with the first N elements excluded.
Definition STLExtras.h:316
@ Low
Lower the current thread's priority such that it does not affect foreground tasks significantly.
Definition Threading.h:280
@ Offset
Definition DWP.cpp:532
FunctionAddr VTableAddr Value
Definition InstrProf.h:137
bool all_of(R &&range, UnaryPredicate P)
Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly.
Definition STLExtras.h:1737
LLVM_ABI bool canInstructionHaveMMRAs(const Instruction &I)
detail::zippy< detail::zip_first, T, U, Args... > zip_equal(T &&t, U &&u, Args &&...args)
zip iterator that assumes that all iteratees have the same length.
Definition STLExtras.h:839
LLVM_ABI unsigned getBranchWeightOffset(const MDNode *ProfileData)
Return the offset to the first branch weight data.
constexpr bool isInt(int64_t x)
Checks if an integer fits into the given bit width.
Definition MathExtras.h:165
auto enumerate(FirstRange &&First, RestRanges &&...Rest)
Given two or more input ranges, returns a new range whose values are tuples (A, B,...
Definition STLExtras.h:2544
decltype(auto) dyn_cast(const From &Val)
dyn_cast<X> - Return the argument parameter cast to the specified type.
Definition Casting.h:643
LLVM_ABI bool verifyFunction(const Function &F, raw_ostream *OS=nullptr)
Check a function for errors, useful for use when debugging a pass.
AllocFnKind
Definition Attributes.h:51
testing::Matcher< const detail::ErrorHolder & > Failed()
Definition Error.h:198
iterator_range< T > make_range(T x, T y)
Convenience function for iterating over sub-ranges.
void append_range(Container &C, Range &&R)
Wrapper function to append range R to container C.
Definition STLExtras.h:2198
LLVM_ABI DenseMap< BasicBlock *, ColorVector > colorEHFunclets(Function &F)
If an EH funclet personality is in use (see isFuncletEHPersonality), this will recompute which blocks...
constexpr bool isPowerOf2_64(uint64_t Value)
Return true if the argument is a power of two > 0 (64 bit edition.)
Definition MathExtras.h:284
gep_type_iterator gep_type_end(const User *GEP)
bool isa_and_nonnull(const Y &Val)
Definition Casting.h:676
Op::Description Desc
bool isScopedEHPersonality(EHPersonality Pers)
Returns true if this personality uses scope-style EH IR instructions: catchswitch,...
auto dyn_cast_or_null(const Y &Val)
Definition Casting.h:753
GenericConvergenceVerifier< SSAContext > ConvergenceVerifier
LLVM_ABI void initializeVerifierLegacyPassPass(PassRegistry &)
constexpr bool isPowerOf2_32(uint32_t Value)
Return true if the argument is a power of two > 0.
Definition MathExtras.h:279
bool isModSet(const ModRefInfo MRI)
Definition ModRef.h:49
void sort(IteratorTy Start, IteratorTy End)
Definition STLExtras.h:1634
LLVM_ABI raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
Definition Debug.cpp:207
LLVM_ABI void report_fatal_error(Error Err, bool gen_crash_diag=true)
Definition Error.cpp:163
generic_gep_type_iterator<> gep_type_iterator
FunctionAddr VTableAddr Count
Definition InstrProf.h:139
LLVM_ABI EHPersonality classifyEHPersonality(const Value *Pers)
See if the given exception handling personality function is one that we understand.
iterator_range< SplittingIterator > split(StringRef Str, StringRef Separator)
Split the specified string over a separator and return a range-compatible iterable over its partition...
class LLVM_GSL_OWNER SmallVector
Forward declaration of SmallVector so that calculateSmallVectorDefaultInlinedElements can reference s...
bool isa(const From &Val)
isa<X> - Return true if the parameter to the template is an instance of one of the template type argu...
Definition Casting.h:547
LLVM_ABI bool isValueProfileMD(const MDNode *ProfileData)
Checks if an MDNode contains value profiling Metadata.
LLVM_ABI raw_fd_ostream & errs()
This returns a reference to a raw_ostream for standard error.
LLVM_ABI unsigned getNumBranchWeights(const MDNode &ProfileData)
AtomicOrdering
Atomic ordering for LLVM's memory model.
@ First
Helpers to iterate all locations in the MemoryEffectsBase class.
Definition ModRef.h:74
LLVM_ABI FunctionPass * createVerifierPass(bool FatalErrors=true)
FunctionAddr VTableAddr Next
Definition InstrProf.h:141
DWARFExpression::Operation Op
ArrayRef(const T &OneElt) -> ArrayRef< T >
constexpr unsigned BitWidth
TinyPtrVector< BasicBlock * > ColorVector
LLVM_ABI const char * LLVMLoopEstimatedTripCount
Profile-based loop metadata that should be accessed only by using llvm::getLoopEstimatedTripCount and...
DenormalMode parseDenormalFPAttribute(StringRef Str)
Returns the denormal mode to use for inputs and outputs.
decltype(auto) cast(const From &Val)
cast<X> - Return the argument parameter cast to the specified type.
Definition Casting.h:559
LLVM_ABI std::optional< RoundingMode > convertStrToRoundingMode(StringRef)
Returns a valid RoundingMode enumerator when given a string that is valid as input in constrained int...
Definition FPEnv.cpp:25
gep_type_iterator gep_type_begin(const User *GEP)
LLVM_ABI std::unique_ptr< GCStrategy > getGCStrategy(const StringRef Name)
Lookup the GCStrategy object associated with the given gc name.
auto predecessors(const MachineBasicBlock *BB)
bool is_contained(R &&Range, const E &Element)
Returns true if Element is found in Range.
Definition STLExtras.h:1945
bool pred_empty(const BasicBlock *BB)
Definition CFG.h:119
bool isHexDigit(char C)
Checks if character C is a hexadecimal numeric character.
AnalysisManager< Function > FunctionAnalysisManager
Convenience typedef for the Function analysis manager.
constexpr bool isCallableCC(CallingConv::ID CC)
LLVM_ABI bool verifyModule(const Module &M, raw_ostream *OS=nullptr, bool *BrokenDebugInfo=nullptr)
Check a module for errors.
AnalysisManager< Module > ModuleAnalysisManager
Convenience typedef for the Module analysis manager.
Definition MIRParser.h:39
#define N
constexpr uint64_t value() const
This is a hole in the type system and should not be abused.
Definition Alignment.h:77
A special type used by analysis passes to provide an address that identifies that particular analysis...
Definition Analysis.h:29
static LLVM_ABI const char * SyntheticFunctionEntryCount
static LLVM_ABI const char * UnknownBranchWeightsMarker
static LLVM_ABI const char * ValueProfile
static LLVM_ABI const char * FunctionEntryCount
static LLVM_ABI const char * BranchWeights
uint32_t getTagID() const
Return the tag of this operand bundle as an integer.
ArrayRef< Use > Inputs
void DebugInfoCheckFailed(const Twine &Message)
A debug info check failed.
Definition Verifier.cpp:307
VerifierSupport(raw_ostream *OS, const Module &M)
Definition Verifier.cpp:156
bool Broken
Track the brokenness of the module while recursively visiting.
Definition Verifier.cpp:150
void CheckFailed(const Twine &Message, const T1 &V1, const Ts &... Vs)
A check failed (with values to print).
Definition Verifier.cpp:300
bool BrokenDebugInfo
Broken debug info can be "recovered" from by stripping the debug info.
Definition Verifier.cpp:152
LLVMContext & Context
Definition Verifier.cpp:147
bool TreatBrokenDebugInfoAsError
Whether to treat broken debug info as an error.
Definition Verifier.cpp:154
void CheckFailed(const Twine &Message)
A check failed, so printout out the condition and the message.
Definition Verifier.cpp:289
const Module & M
Definition Verifier.cpp:143
const DataLayout & DL
Definition Verifier.cpp:146
void DebugInfoCheckFailed(const Twine &Message, const T1 &V1, const Ts &... Vs)
A debug info check failed (with values to print).
Definition Verifier.cpp:316
const Triple & TT
Definition Verifier.cpp:145
ModuleSlotTracker MST
Definition Verifier.cpp:144