LLVM 23.0.0git
Verifier.cpp
Go to the documentation of this file.
1//===-- Verifier.cpp - Implement the Module Verifier -----------------------==//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file defines the function verifier interface, that can be used for some
10// basic correctness checking of input to the system.
11//
12// Note that this does not provide full `Java style' security and verifications,
13// instead it just tries to ensure that code is well-formed.
14//
15// * Both of a binary operator's parameters are of the same type
16// * Verify that the indices of mem access instructions match other operands
17// * Verify that arithmetic and other things are only performed on first-class
18// types. Verify that shifts & logicals only happen on integrals f.e.
19// * All of the constants in a switch statement are of the correct type
20// * The code is in valid SSA form
21// * It should be illegal to put a label into any other type (like a structure)
22// or to return one. [except constant arrays!]
23// * Only phi nodes can be self referential: 'add i32 %0, %0 ; <int>:0' is bad
24// * PHI nodes must have an entry for each predecessor, with no extras.
25// * PHI nodes must be the first thing in a basic block, all grouped together
26// * All basic blocks should only end with terminator insts, not contain them
27// * The entry node to a function must not have predecessors
28// * All Instructions must be embedded into a basic block
29// * Functions cannot take a void-typed parameter
30// * Verify that a function's argument list agrees with it's declared type.
31// * It is illegal to specify a name for a void value.
32// * It is illegal to have a internal global value with no initializer
33// * It is illegal to have a ret instruction that returns a value that does not
34// agree with the function return value type.
35// * Function call argument types match the function prototype
36// * A landing pad is defined by a landingpad instruction, and can be jumped to
37// only by the unwind edge of an invoke instruction.
38// * A landingpad instruction must be the first non-PHI instruction in the
39// block.
40// * Landingpad instructions must be in a function with a personality function.
41// * Convergence control intrinsics are introduced in ConvergentOperations.rst.
42// The applied restrictions are too numerous to list here.
43// * The convergence entry intrinsic and the loop heart must be the first
44// non-PHI instruction in their respective block. This does not conflict with
45// the landing pads, since these two kinds cannot occur in the same block.
46// * All other things that are tested by asserts spread about the code...
47//
48//===----------------------------------------------------------------------===//
49
50#include "llvm/IR/Verifier.h"
51#include "llvm/ADT/APFloat.h"
52#include "llvm/ADT/APInt.h"
53#include "llvm/ADT/ArrayRef.h"
54#include "llvm/ADT/DenseMap.h"
55#include "llvm/ADT/MapVector.h"
56#include "llvm/ADT/STLExtras.h"
60#include "llvm/ADT/StringRef.h"
61#include "llvm/ADT/Twine.h"
63#include "llvm/IR/Argument.h"
65#include "llvm/IR/Attributes.h"
66#include "llvm/IR/BasicBlock.h"
67#include "llvm/IR/CFG.h"
68#include "llvm/IR/CallingConv.h"
69#include "llvm/IR/Comdat.h"
70#include "llvm/IR/Constant.h"
73#include "llvm/IR/Constants.h"
75#include "llvm/IR/DataLayout.h"
76#include "llvm/IR/DebugInfo.h"
78#include "llvm/IR/DebugLoc.h"
80#include "llvm/IR/Dominators.h"
82#include "llvm/IR/FPEnv.h"
83#include "llvm/IR/Function.h"
84#include "llvm/IR/GCStrategy.h"
85#include "llvm/IR/GlobalAlias.h"
86#include "llvm/IR/GlobalValue.h"
88#include "llvm/IR/InlineAsm.h"
89#include "llvm/IR/InstVisitor.h"
90#include "llvm/IR/InstrTypes.h"
91#include "llvm/IR/Instruction.h"
94#include "llvm/IR/Intrinsics.h"
95#include "llvm/IR/IntrinsicsAArch64.h"
96#include "llvm/IR/IntrinsicsAMDGPU.h"
97#include "llvm/IR/IntrinsicsARM.h"
98#include "llvm/IR/IntrinsicsNVPTX.h"
99#include "llvm/IR/IntrinsicsWebAssembly.h"
100#include "llvm/IR/LLVMContext.h"
102#include "llvm/IR/Metadata.h"
103#include "llvm/IR/Module.h"
105#include "llvm/IR/PassManager.h"
107#include "llvm/IR/Statepoint.h"
108#include "llvm/IR/Type.h"
109#include "llvm/IR/Use.h"
110#include "llvm/IR/User.h"
112#include "llvm/IR/Value.h"
114#include "llvm/Pass.h"
118#include "llvm/Support/Casting.h"
122#include "llvm/Support/ModRef.h"
125#include <algorithm>
126#include <cassert>
127#include <cstdint>
128#include <memory>
129#include <optional>
130#include <string>
131#include <utility>
132
133using namespace llvm;
134
136 "verify-noalias-scope-decl-dom", cl::Hidden, cl::init(false),
137 cl::desc("Ensure that llvm.experimental.noalias.scope.decl for identical "
138 "scopes are not dominating"));
139
142 const Module &M;
144 const Triple &TT;
147
148 /// Track the brokenness of the module while recursively visiting.
149 bool Broken = false;
150 /// Broken debug info can be "recovered" from by stripping the debug info.
151 bool BrokenDebugInfo = false;
152 /// Whether to treat broken debug info as an error.
154
156 : OS(OS), M(M), MST(&M), TT(M.getTargetTriple()), DL(M.getDataLayout()),
157 Context(M.getContext()) {}
158
159private:
160 void Write(const Module *M) {
161 *OS << "; ModuleID = '" << M->getModuleIdentifier() << "'\n";
162 }
163
164 void Write(const Value *V) {
165 if (V)
166 Write(*V);
167 }
168
169 void Write(const Value &V) {
170 if (isa<Instruction>(V)) {
171 V.print(*OS, MST);
172 *OS << '\n';
173 } else {
174 V.printAsOperand(*OS, true, MST);
175 *OS << '\n';
176 }
177 }
178
179 void Write(const DbgRecord *DR) {
180 if (DR) {
181 DR->print(*OS, MST, false);
182 *OS << '\n';
183 }
184 }
185
187 switch (Type) {
189 *OS << "value";
190 break;
192 *OS << "declare";
193 break;
195 *OS << "declare_value";
196 break;
198 *OS << "assign";
199 break;
201 *OS << "end";
202 break;
204 *OS << "any";
205 break;
206 };
207 }
208
209 void Write(const Metadata *MD) {
210 if (!MD)
211 return;
212 MD->print(*OS, MST, &M);
213 *OS << '\n';
214 }
215
216 template <class T> void Write(const MDTupleTypedArrayWrapper<T> &MD) {
217 Write(MD.get());
218 }
219
220 void Write(const NamedMDNode *NMD) {
221 if (!NMD)
222 return;
223 NMD->print(*OS, MST);
224 *OS << '\n';
225 }
226
227 void Write(Type *T) {
228 if (!T)
229 return;
230 *OS << ' ' << *T;
231 }
232
233 void Write(const Comdat *C) {
234 if (!C)
235 return;
236 *OS << *C;
237 }
238
239 void Write(const APInt *AI) {
240 if (!AI)
241 return;
242 *OS << *AI << '\n';
243 }
244
245 void Write(const unsigned i) { *OS << i << '\n'; }
246
247 // NOLINTNEXTLINE(readability-identifier-naming)
248 void Write(const Attribute *A) {
249 if (!A)
250 return;
251 *OS << A->getAsString() << '\n';
252 }
253
254 // NOLINTNEXTLINE(readability-identifier-naming)
255 void Write(const AttributeSet *AS) {
256 if (!AS)
257 return;
258 *OS << AS->getAsString() << '\n';
259 }
260
261 // NOLINTNEXTLINE(readability-identifier-naming)
262 void Write(const AttributeList *AL) {
263 if (!AL)
264 return;
265 AL->print(*OS);
266 }
267
268 void Write(Printable P) { *OS << P << '\n'; }
269
270 template <typename T> void Write(ArrayRef<T> Vs) {
271 for (const T &V : Vs)
272 Write(V);
273 }
274
275 template <typename T1, typename... Ts>
276 void WriteTs(const T1 &V1, const Ts &... Vs) {
277 Write(V1);
278 WriteTs(Vs...);
279 }
280
281 template <typename... Ts> void WriteTs() {}
282
283public:
284 /// A check failed, so printout out the condition and the message.
285 ///
286 /// This provides a nice place to put a breakpoint if you want to see why
287 /// something is not correct.
288 void CheckFailed(const Twine &Message) {
289 if (OS)
290 *OS << Message << '\n';
291 Broken = true;
292 }
293
294 /// A check failed (with values to print).
295 ///
296 /// This calls the Message-only version so that the above is easier to set a
297 /// breakpoint on.
298 template <typename T1, typename... Ts>
299 void CheckFailed(const Twine &Message, const T1 &V1, const Ts &... Vs) {
300 CheckFailed(Message);
301 if (OS)
302 WriteTs(V1, Vs...);
303 }
304
305 /// A debug info check failed.
306 void DebugInfoCheckFailed(const Twine &Message) {
307 if (OS)
308 *OS << Message << '\n';
310 BrokenDebugInfo = true;
311 }
312
313 /// A debug info check failed (with values to print).
314 template <typename T1, typename... Ts>
315 void DebugInfoCheckFailed(const Twine &Message, const T1 &V1,
316 const Ts &... Vs) {
317 DebugInfoCheckFailed(Message);
318 if (OS)
319 WriteTs(V1, Vs...);
320 }
321};
322
323namespace {
324
325class Verifier : public InstVisitor<Verifier>, VerifierSupport {
326 friend class InstVisitor<Verifier>;
327 DominatorTree DT;
328
329 /// When verifying a basic block, keep track of all of the
330 /// instructions we have seen so far.
331 ///
332 /// This allows us to do efficient dominance checks for the case when an
333 /// instruction has an operand that is an instruction in the same block.
334 SmallPtrSet<Instruction *, 16> InstsInThisBlock;
335
336 /// Keep track of the metadata nodes that have been checked already.
338
339 /// Keep track which DISubprogram is attached to which function.
341
342 /// Track all DICompileUnits visited.
344
345 /// The result type for a landingpad.
346 Type *LandingPadResultTy;
347
348 /// Whether we've seen a call to @llvm.localescape in this function
349 /// already.
350 bool SawFrameEscape;
351
352 /// Whether the current function has a DISubprogram attached to it.
353 bool HasDebugInfo = false;
354
355 /// Stores the count of how many objects were passed to llvm.localescape for a
356 /// given function and the largest index passed to llvm.localrecover.
358
359 // Maps catchswitches and cleanuppads that unwind to siblings to the
360 // terminators that indicate the unwind, used to detect cycles therein.
362
363 /// Cache which blocks are in which funclet, if an EH funclet personality is
364 /// in use. Otherwise empty.
365 DenseMap<BasicBlock *, ColorVector> BlockEHFuncletColors;
366
367 /// Cache of constants visited in search of ConstantExprs.
368 SmallPtrSet<const Constant *, 32> ConstantExprVisited;
369
370 /// Cache of declarations of the llvm.experimental.deoptimize.<ty> intrinsic.
371 SmallVector<const Function *, 4> DeoptimizeDeclarations;
372
373 /// Cache of attribute lists verified.
374 SmallPtrSet<const void *, 32> AttributeListsVisited;
375
376 // Verify that this GlobalValue is only used in this module.
377 // This map is used to avoid visiting uses twice. We can arrive at a user
378 // twice, if they have multiple operands. In particular for very large
379 // constant expressions, we can arrive at a particular user many times.
380 SmallPtrSet<const Value *, 32> GlobalValueVisited;
381
382 // Keeps track of duplicate function argument debug info.
384
385 TBAAVerifier TBAAVerifyHelper;
386 ConvergenceVerifier ConvergenceVerifyHelper;
387
388 SmallVector<IntrinsicInst *, 4> NoAliasScopeDecls;
389
390 void checkAtomicMemAccessSize(Type *Ty, const Instruction *I);
391
392public:
393 explicit Verifier(raw_ostream *OS, bool ShouldTreatBrokenDebugInfoAsError,
394 const Module &M)
395 : VerifierSupport(OS, M), LandingPadResultTy(nullptr),
396 SawFrameEscape(false), TBAAVerifyHelper(this) {
397 TreatBrokenDebugInfoAsError = ShouldTreatBrokenDebugInfoAsError;
398 }
399
400 bool hasBrokenDebugInfo() const { return BrokenDebugInfo; }
401
402 bool verify(const Function &F) {
403 llvm::TimeTraceScope timeScope("Verifier");
404 assert(F.getParent() == &M &&
405 "An instance of this class only works with a specific module!");
406
407 // First ensure the function is well-enough formed to compute dominance
408 // information, and directly compute a dominance tree. We don't rely on the
409 // pass manager to provide this as it isolates us from a potentially
410 // out-of-date dominator tree and makes it significantly more complex to run
411 // this code outside of a pass manager.
412 // FIXME: It's really gross that we have to cast away constness here.
413 if (!F.empty())
414 DT.recalculate(const_cast<Function &>(F));
415
416 for (const BasicBlock &BB : F) {
417 if (!BB.empty() && BB.back().isTerminator())
418 continue;
419
420 if (OS) {
421 *OS << "Basic Block in function '" << F.getName()
422 << "' does not have terminator!\n";
423 BB.printAsOperand(*OS, true, MST);
424 *OS << "\n";
425 }
426 return false;
427 }
428
429 auto FailureCB = [this](const Twine &Message) {
430 this->CheckFailed(Message);
431 };
432 ConvergenceVerifyHelper.initialize(OS, FailureCB, F);
433
434 Broken = false;
435 // FIXME: We strip const here because the inst visitor strips const.
436 visit(const_cast<Function &>(F));
437 verifySiblingFuncletUnwinds();
438
439 if (ConvergenceVerifyHelper.sawTokens())
440 ConvergenceVerifyHelper.verify(DT);
441
442 InstsInThisBlock.clear();
443 DebugFnArgs.clear();
444 LandingPadResultTy = nullptr;
445 SawFrameEscape = false;
446 SiblingFuncletInfo.clear();
447 verifyNoAliasScopeDecl();
448 NoAliasScopeDecls.clear();
449
450 return !Broken;
451 }
452
453 /// Verify the module that this instance of \c Verifier was initialized with.
454 bool verify() {
455 Broken = false;
456
457 // Collect all declarations of the llvm.experimental.deoptimize intrinsic.
458 for (const Function &F : M)
459 if (F.getIntrinsicID() == Intrinsic::experimental_deoptimize)
460 DeoptimizeDeclarations.push_back(&F);
461
462 // Now that we've visited every function, verify that we never asked to
463 // recover a frame index that wasn't escaped.
464 verifyFrameRecoverIndices();
465 for (const GlobalVariable &GV : M.globals())
466 visitGlobalVariable(GV);
467
468 for (const GlobalAlias &GA : M.aliases())
469 visitGlobalAlias(GA);
470
471 for (const GlobalIFunc &GI : M.ifuncs())
472 visitGlobalIFunc(GI);
473
474 for (const NamedMDNode &NMD : M.named_metadata())
475 visitNamedMDNode(NMD);
476
477 for (const StringMapEntry<Comdat> &SMEC : M.getComdatSymbolTable())
478 visitComdat(SMEC.getValue());
479
480 visitModuleFlags();
481 visitModuleIdents();
482 visitModuleCommandLines();
483 visitModuleErrnoTBAA();
484
485 verifyCompileUnits();
486
487 verifyDeoptimizeCallingConvs();
488 DISubprogramAttachments.clear();
489 return !Broken;
490 }
491
492private:
493 /// Whether a metadata node is allowed to be, or contain, a DILocation.
494 enum class AreDebugLocsAllowed { No, Yes };
495
496 /// Metadata that should be treated as a range, with slightly different
497 /// requirements.
498 enum class RangeLikeMetadataKind {
499 Range, // MD_range
500 AbsoluteSymbol, // MD_absolute_symbol
501 NoaliasAddrspace // MD_noalias_addrspace
502 };
503
504 // Verification methods...
505 void visitGlobalValue(const GlobalValue &GV);
506 void visitGlobalVariable(const GlobalVariable &GV);
507 void visitGlobalAlias(const GlobalAlias &GA);
508 void visitGlobalIFunc(const GlobalIFunc &GI);
509 void visitAliaseeSubExpr(const GlobalAlias &A, const Constant &C);
510 void visitAliaseeSubExpr(SmallPtrSetImpl<const GlobalAlias *> &Visited,
511 const GlobalAlias &A, const Constant &C);
512 void visitNamedMDNode(const NamedMDNode &NMD);
513 void visitMDNode(const MDNode &MD, AreDebugLocsAllowed AllowLocs);
514 void visitMetadataAsValue(const MetadataAsValue &MD, Function *F);
515 void visitValueAsMetadata(const ValueAsMetadata &MD, Function *F);
516 void visitDIArgList(const DIArgList &AL, Function *F);
517 void visitComdat(const Comdat &C);
518 void visitModuleIdents();
519 void visitModuleCommandLines();
520 void visitModuleErrnoTBAA();
521 void visitModuleFlags();
522 void visitModuleFlag(const MDNode *Op,
523 DenseMap<const MDString *, const MDNode *> &SeenIDs,
524 SmallVectorImpl<const MDNode *> &Requirements);
525 void visitModuleFlagCGProfileEntry(const MDOperand &MDO);
526 void visitFunction(const Function &F);
527 void visitBasicBlock(BasicBlock &BB);
528 void verifyRangeLikeMetadata(const Value &V, const MDNode *Range, Type *Ty,
529 RangeLikeMetadataKind Kind);
530 void visitRangeMetadata(Instruction &I, MDNode *Range, Type *Ty);
531 void visitNoaliasAddrspaceMetadata(Instruction &I, MDNode *Range, Type *Ty);
532 void visitDereferenceableMetadata(Instruction &I, MDNode *MD);
533 void visitNofreeMetadata(Instruction &I, MDNode *MD);
534 void visitProfMetadata(Instruction &I, MDNode *MD);
535 void visitCallStackMetadata(MDNode *MD);
536 void visitMemProfMetadata(Instruction &I, MDNode *MD);
537 void visitCallsiteMetadata(Instruction &I, MDNode *MD);
538 void visitCalleeTypeMetadata(Instruction &I, MDNode *MD);
539 void visitDIAssignIDMetadata(Instruction &I, MDNode *MD);
540 void visitMMRAMetadata(Instruction &I, MDNode *MD);
541 void visitAnnotationMetadata(MDNode *Annotation);
542 void visitAliasScopeMetadata(const MDNode *MD);
543 void visitAliasScopeListMetadata(const MDNode *MD);
544 void visitAccessGroupMetadata(const MDNode *MD);
545 void visitCapturesMetadata(Instruction &I, const MDNode *Captures);
546 void visitAllocTokenMetadata(Instruction &I, MDNode *MD);
547
548 template <class Ty> bool isValidMetadataArray(const MDTuple &N);
549#define HANDLE_SPECIALIZED_MDNODE_LEAF(CLASS) void visit##CLASS(const CLASS &N);
550#include "llvm/IR/Metadata.def"
551 void visitDIScope(const DIScope &N);
552 void visitDIVariable(const DIVariable &N);
553 void visitDILexicalBlockBase(const DILexicalBlockBase &N);
554 void visitDITemplateParameter(const DITemplateParameter &N);
555
556 void visitTemplateParams(const MDNode &N, const Metadata &RawParams);
557
558 void visit(DbgLabelRecord &DLR);
559 void visit(DbgVariableRecord &DVR);
560 // InstVisitor overrides...
561 using InstVisitor<Verifier>::visit;
562 void visitDbgRecords(Instruction &I);
563 void visit(Instruction &I);
564
565 void visitTruncInst(TruncInst &I);
566 void visitZExtInst(ZExtInst &I);
567 void visitSExtInst(SExtInst &I);
568 void visitFPTruncInst(FPTruncInst &I);
569 void visitFPExtInst(FPExtInst &I);
570 void visitFPToUIInst(FPToUIInst &I);
571 void visitFPToSIInst(FPToSIInst &I);
572 void visitUIToFPInst(UIToFPInst &I);
573 void visitSIToFPInst(SIToFPInst &I);
574 void visitIntToPtrInst(IntToPtrInst &I);
575 void checkPtrToAddr(Type *SrcTy, Type *DestTy, const Value &V);
576 void visitPtrToAddrInst(PtrToAddrInst &I);
577 void visitPtrToIntInst(PtrToIntInst &I);
578 void visitBitCastInst(BitCastInst &I);
579 void visitAddrSpaceCastInst(AddrSpaceCastInst &I);
580 void visitPHINode(PHINode &PN);
581 void visitCallBase(CallBase &Call);
582 void visitUnaryOperator(UnaryOperator &U);
583 void visitBinaryOperator(BinaryOperator &B);
584 void visitICmpInst(ICmpInst &IC);
585 void visitFCmpInst(FCmpInst &FC);
586 void visitExtractElementInst(ExtractElementInst &EI);
587 void visitInsertElementInst(InsertElementInst &EI);
588 void visitShuffleVectorInst(ShuffleVectorInst &EI);
589 void visitVAArgInst(VAArgInst &VAA) { visitInstruction(VAA); }
590 void visitCallInst(CallInst &CI);
591 void visitInvokeInst(InvokeInst &II);
592 void visitGetElementPtrInst(GetElementPtrInst &GEP);
593 void visitLoadInst(LoadInst &LI);
594 void visitStoreInst(StoreInst &SI);
595 void verifyDominatesUse(Instruction &I, unsigned i);
596 void visitInstruction(Instruction &I);
597 void visitTerminator(Instruction &I);
598 void visitBranchInst(BranchInst &BI);
599 void visitReturnInst(ReturnInst &RI);
600 void visitSwitchInst(SwitchInst &SI);
601 void visitIndirectBrInst(IndirectBrInst &BI);
602 void visitCallBrInst(CallBrInst &CBI);
603 void visitSelectInst(SelectInst &SI);
604 void visitUserOp1(Instruction &I);
605 void visitUserOp2(Instruction &I) { visitUserOp1(I); }
606 void visitIntrinsicCall(Intrinsic::ID ID, CallBase &Call);
607 void visitConstrainedFPIntrinsic(ConstrainedFPIntrinsic &FPI);
608 void visitVPIntrinsic(VPIntrinsic &VPI);
609 void visitDbgLabelIntrinsic(StringRef Kind, DbgLabelInst &DLI);
610 void visitAtomicCmpXchgInst(AtomicCmpXchgInst &CXI);
611 void visitAtomicRMWInst(AtomicRMWInst &RMWI);
612 void visitFenceInst(FenceInst &FI);
613 void visitAllocaInst(AllocaInst &AI);
614 void visitExtractValueInst(ExtractValueInst &EVI);
615 void visitInsertValueInst(InsertValueInst &IVI);
616 void visitEHPadPredecessors(Instruction &I);
617 void visitLandingPadInst(LandingPadInst &LPI);
618 void visitResumeInst(ResumeInst &RI);
619 void visitCatchPadInst(CatchPadInst &CPI);
620 void visitCatchReturnInst(CatchReturnInst &CatchReturn);
621 void visitCleanupPadInst(CleanupPadInst &CPI);
622 void visitFuncletPadInst(FuncletPadInst &FPI);
623 void visitCatchSwitchInst(CatchSwitchInst &CatchSwitch);
624 void visitCleanupReturnInst(CleanupReturnInst &CRI);
625
626 void verifySwiftErrorCall(CallBase &Call, const Value *SwiftErrorVal);
627 void verifySwiftErrorValue(const Value *SwiftErrorVal);
628 void verifyTailCCMustTailAttrs(const AttrBuilder &Attrs, StringRef Context);
629 void verifyMustTailCall(CallInst &CI);
630 bool verifyAttributeCount(AttributeList Attrs, unsigned Params);
631 void verifyAttributeTypes(AttributeSet Attrs, const Value *V);
632 void verifyParameterAttrs(AttributeSet Attrs, Type *Ty, const Value *V);
633 void checkUnsignedBaseTenFuncAttr(AttributeList Attrs, StringRef Attr,
634 const Value *V);
635 void verifyFunctionAttrs(FunctionType *FT, AttributeList Attrs,
636 const Value *V, bool IsIntrinsic, bool IsInlineAsm);
637 void verifyFunctionMetadata(ArrayRef<std::pair<unsigned, MDNode *>> MDs);
638 void verifyUnknownProfileMetadata(MDNode *MD);
639 void visitConstantExprsRecursively(const Constant *EntryC);
640 void visitConstantExpr(const ConstantExpr *CE);
641 void visitConstantPtrAuth(const ConstantPtrAuth *CPA);
642 void verifyInlineAsmCall(const CallBase &Call);
643 void verifyStatepoint(const CallBase &Call);
644 void verifyFrameRecoverIndices();
645 void verifySiblingFuncletUnwinds();
646
647 void verifyFragmentExpression(const DbgVariableRecord &I);
648 template <typename ValueOrMetadata>
649 void verifyFragmentExpression(const DIVariable &V,
651 ValueOrMetadata *Desc);
652 void verifyFnArgs(const DbgVariableRecord &DVR);
653 void verifyNotEntryValue(const DbgVariableRecord &I);
654
655 /// Module-level debug info verification...
656 void verifyCompileUnits();
657
658 /// Module-level verification that all @llvm.experimental.deoptimize
659 /// declarations share the same calling convention.
660 void verifyDeoptimizeCallingConvs();
661
662 void verifyAttachedCallBundle(const CallBase &Call,
663 const OperandBundleUse &BU);
664
665 /// Verify the llvm.experimental.noalias.scope.decl declarations
666 void verifyNoAliasScopeDecl();
667};
668
669} // end anonymous namespace
670
671/// We know that cond should be true, if not print an error message.
672#define Check(C, ...) \
673 do { \
674 if (!(C)) { \
675 CheckFailed(__VA_ARGS__); \
676 return; \
677 } \
678 } while (false)
679
680/// We know that a debug info condition should be true, if not print
681/// an error message.
682#define CheckDI(C, ...) \
683 do { \
684 if (!(C)) { \
685 DebugInfoCheckFailed(__VA_ARGS__); \
686 return; \
687 } \
688 } while (false)
689
690void Verifier::visitDbgRecords(Instruction &I) {
691 if (!I.DebugMarker)
692 return;
693 CheckDI(I.DebugMarker->MarkedInstr == &I,
694 "Instruction has invalid DebugMarker", &I);
695 CheckDI(!isa<PHINode>(&I) || !I.hasDbgRecords(),
696 "PHI Node must not have any attached DbgRecords", &I);
697 for (DbgRecord &DR : I.getDbgRecordRange()) {
698 CheckDI(DR.getMarker() == I.DebugMarker,
699 "DbgRecord had invalid DebugMarker", &I, &DR);
700 if (auto *Loc =
702 visitMDNode(*Loc, AreDebugLocsAllowed::Yes);
703 if (auto *DVR = dyn_cast<DbgVariableRecord>(&DR)) {
704 visit(*DVR);
705 // These have to appear after `visit` for consistency with existing
706 // intrinsic behaviour.
707 verifyFragmentExpression(*DVR);
708 verifyNotEntryValue(*DVR);
709 } else if (auto *DLR = dyn_cast<DbgLabelRecord>(&DR)) {
710 visit(*DLR);
711 }
712 }
713}
714
715void Verifier::visit(Instruction &I) {
716 visitDbgRecords(I);
717 for (unsigned i = 0, e = I.getNumOperands(); i != e; ++i)
718 Check(I.getOperand(i) != nullptr, "Operand is null", &I);
720}
721
722// Helper to iterate over indirect users. By returning false, the callback can ask to stop traversing further.
723static void forEachUser(const Value *User,
725 llvm::function_ref<bool(const Value *)> Callback) {
726 if (!Visited.insert(User).second)
727 return;
728
730 while (!WorkList.empty()) {
731 const Value *Cur = WorkList.pop_back_val();
732 if (!Visited.insert(Cur).second)
733 continue;
734 if (Callback(Cur))
735 append_range(WorkList, Cur->materialized_users());
736 }
737}
738
739void Verifier::visitGlobalValue(const GlobalValue &GV) {
741 "Global is external, but doesn't have external or weak linkage!", &GV);
742
743 if (const GlobalObject *GO = dyn_cast<GlobalObject>(&GV)) {
744 if (const MDNode *Associated =
745 GO->getMetadata(LLVMContext::MD_associated)) {
746 Check(Associated->getNumOperands() == 1,
747 "associated metadata must have one operand", &GV, Associated);
748 const Metadata *Op = Associated->getOperand(0).get();
749 Check(Op, "associated metadata must have a global value", GO, Associated);
750
751 const auto *VM = dyn_cast_or_null<ValueAsMetadata>(Op);
752 Check(VM, "associated metadata must be ValueAsMetadata", GO, Associated);
753 if (VM) {
754 Check(isa<PointerType>(VM->getValue()->getType()),
755 "associated value must be pointer typed", GV, Associated);
756
757 const Value *Stripped = VM->getValue()->stripPointerCastsAndAliases();
758 Check(isa<GlobalObject>(Stripped) || isa<Constant>(Stripped),
759 "associated metadata must point to a GlobalObject", GO, Stripped);
760 Check(Stripped != GO,
761 "global values should not associate to themselves", GO,
762 Associated);
763 }
764 }
765
766 // FIXME: Why is getMetadata on GlobalValue protected?
767 if (const MDNode *AbsoluteSymbol =
768 GO->getMetadata(LLVMContext::MD_absolute_symbol)) {
769 verifyRangeLikeMetadata(*GO, AbsoluteSymbol,
770 DL.getIntPtrType(GO->getType()),
771 RangeLikeMetadataKind::AbsoluteSymbol);
772 }
773
774 if (GO->hasMetadata(LLVMContext::MD_implicit_ref)) {
775 Check(!GO->isDeclaration(),
776 "ref metadata must not be placed on a declaration", GO);
777
779 GO->getMetadata(LLVMContext::MD_implicit_ref, MDs);
780 for (const MDNode *MD : MDs) {
781 Check(MD->getNumOperands() == 1, "ref metadata must have one operand",
782 &GV, MD);
783 const Metadata *Op = MD->getOperand(0).get();
784 const auto *VM = dyn_cast_or_null<ValueAsMetadata>(Op);
785 Check(VM, "ref metadata must be ValueAsMetadata", GO, MD);
786 if (VM) {
787 Check(isa<PointerType>(VM->getValue()->getType()),
788 "ref value must be pointer typed", GV, MD);
789
790 const Value *Stripped = VM->getValue()->stripPointerCastsAndAliases();
791 Check(isa<GlobalObject>(Stripped) || isa<Constant>(Stripped),
792 "ref metadata must point to a GlobalObject", GO, Stripped);
793 Check(Stripped != GO, "values should not reference themselves", GO,
794 MD);
795 }
796 }
797 }
798 }
799
801 "Only global variables can have appending linkage!", &GV);
802
803 if (GV.hasAppendingLinkage()) {
804 const GlobalVariable *GVar = dyn_cast<GlobalVariable>(&GV);
805 Check(GVar && GVar->getValueType()->isArrayTy(),
806 "Only global arrays can have appending linkage!", GVar);
807 }
808
809 if (GV.isDeclarationForLinker())
810 Check(!GV.hasComdat(), "Declaration may not be in a Comdat!", &GV);
811
812 if (GV.hasDLLExportStorageClass()) {
814 "dllexport GlobalValue must have default or protected visibility",
815 &GV);
816 }
817 if (GV.hasDLLImportStorageClass()) {
819 "dllimport GlobalValue must have default visibility", &GV);
820 Check(!GV.isDSOLocal(), "GlobalValue with DLLImport Storage is dso_local!",
821 &GV);
822
823 Check((GV.isDeclaration() &&
826 "Global is marked as dllimport, but not external", &GV);
827 }
828
829 if (GV.isImplicitDSOLocal())
830 Check(GV.isDSOLocal(),
831 "GlobalValue with local linkage or non-default "
832 "visibility must be dso_local!",
833 &GV);
834
835 forEachUser(&GV, GlobalValueVisited, [&](const Value *V) -> bool {
836 if (const Instruction *I = dyn_cast<Instruction>(V)) {
837 if (!I->getParent() || !I->getParent()->getParent())
838 CheckFailed("Global is referenced by parentless instruction!", &GV, &M,
839 I);
840 else if (I->getParent()->getParent()->getParent() != &M)
841 CheckFailed("Global is referenced in a different module!", &GV, &M, I,
842 I->getParent()->getParent(),
843 I->getParent()->getParent()->getParent());
844 return false;
845 } else if (const Function *F = dyn_cast<Function>(V)) {
846 if (F->getParent() != &M)
847 CheckFailed("Global is used by function in a different module", &GV, &M,
848 F, F->getParent());
849 return false;
850 }
851 return true;
852 });
853}
854
855void Verifier::visitGlobalVariable(const GlobalVariable &GV) {
856 Type *GVType = GV.getValueType();
857
858 if (MaybeAlign A = GV.getAlign()) {
859 Check(A->value() <= Value::MaximumAlignment,
860 "huge alignment values are unsupported", &GV);
861 }
862
863 if (GV.hasInitializer()) {
864 Check(GV.getInitializer()->getType() == GVType,
865 "Global variable initializer type does not match global "
866 "variable type!",
867 &GV);
869 "Global variable initializer must be sized", &GV);
870 visitConstantExprsRecursively(GV.getInitializer());
871 // If the global has common linkage, it must have a zero initializer and
872 // cannot be constant.
873 if (GV.hasCommonLinkage()) {
875 "'common' global must have a zero initializer!", &GV);
876 Check(!GV.isConstant(), "'common' global may not be marked constant!",
877 &GV);
878 Check(!GV.hasComdat(), "'common' global may not be in a Comdat!", &GV);
879 }
880 }
881
882 if (GV.hasName() && (GV.getName() == "llvm.global_ctors" ||
883 GV.getName() == "llvm.global_dtors")) {
885 "invalid linkage for intrinsic global variable", &GV);
887 "invalid uses of intrinsic global variable", &GV);
888
889 // Don't worry about emitting an error for it not being an array,
890 // visitGlobalValue will complain on appending non-array.
891 if (ArrayType *ATy = dyn_cast<ArrayType>(GVType)) {
892 StructType *STy = dyn_cast<StructType>(ATy->getElementType());
893 PointerType *FuncPtrTy =
894 PointerType::get(Context, DL.getProgramAddressSpace());
895 Check(STy && (STy->getNumElements() == 2 || STy->getNumElements() == 3) &&
896 STy->getTypeAtIndex(0u)->isIntegerTy(32) &&
897 STy->getTypeAtIndex(1) == FuncPtrTy,
898 "wrong type for intrinsic global variable", &GV);
899 Check(STy->getNumElements() == 3,
900 "the third field of the element type is mandatory, "
901 "specify ptr null to migrate from the obsoleted 2-field form");
902 Type *ETy = STy->getTypeAtIndex(2);
903 Check(ETy->isPointerTy(), "wrong type for intrinsic global variable",
904 &GV);
905 }
906 }
907
908 if (GV.hasName() && (GV.getName() == "llvm.used" ||
909 GV.getName() == "llvm.compiler.used")) {
911 "invalid linkage for intrinsic global variable", &GV);
913 "invalid uses of intrinsic global variable", &GV);
914
915 if (ArrayType *ATy = dyn_cast<ArrayType>(GVType)) {
916 PointerType *PTy = dyn_cast<PointerType>(ATy->getElementType());
917 Check(PTy, "wrong type for intrinsic global variable", &GV);
918 if (GV.hasInitializer()) {
919 const Constant *Init = GV.getInitializer();
920 const ConstantArray *InitArray = dyn_cast<ConstantArray>(Init);
921 Check(InitArray, "wrong initializer for intrinsic global variable",
922 Init);
923 for (Value *Op : InitArray->operands()) {
924 Value *V = Op->stripPointerCasts();
927 Twine("invalid ") + GV.getName() + " member", V);
928 Check(V->hasName(),
929 Twine("members of ") + GV.getName() + " must be named", V);
930 }
931 }
932 }
933 }
934
935 // Visit any debug info attachments.
937 GV.getMetadata(LLVMContext::MD_dbg, MDs);
938 for (auto *MD : MDs) {
939 if (auto *GVE = dyn_cast<DIGlobalVariableExpression>(MD))
940 visitDIGlobalVariableExpression(*GVE);
941 else
942 CheckDI(false, "!dbg attachment of global variable must be a "
943 "DIGlobalVariableExpression");
944 }
945
946 // Scalable vectors cannot be global variables, since we don't know
947 // the runtime size.
948 Check(!GVType->isScalableTy(), "Globals cannot contain scalable types", &GV);
949
950 // Check if it is or contains a target extension type that disallows being
951 // used as a global.
953 "Global @" + GV.getName() + " has illegal target extension type",
954 GVType);
955
956 if (!GV.hasInitializer()) {
957 visitGlobalValue(GV);
958 return;
959 }
960
961 // Walk any aggregate initializers looking for bitcasts between address spaces
962 visitConstantExprsRecursively(GV.getInitializer());
963
964 visitGlobalValue(GV);
965}
966
967void Verifier::visitAliaseeSubExpr(const GlobalAlias &GA, const Constant &C) {
968 SmallPtrSet<const GlobalAlias*, 4> Visited;
969 Visited.insert(&GA);
970 visitAliaseeSubExpr(Visited, GA, C);
971}
972
973void Verifier::visitAliaseeSubExpr(SmallPtrSetImpl<const GlobalAlias*> &Visited,
974 const GlobalAlias &GA, const Constant &C) {
977 cast<GlobalValue>(C).hasAvailableExternallyLinkage(),
978 "available_externally alias must point to available_externally "
979 "global value",
980 &GA);
981 }
982 if (const auto *GV = dyn_cast<GlobalValue>(&C)) {
984 Check(!GV->isDeclarationForLinker(), "Alias must point to a definition",
985 &GA);
986 }
987
988 if (const auto *GA2 = dyn_cast<GlobalAlias>(GV)) {
989 Check(Visited.insert(GA2).second, "Aliases cannot form a cycle", &GA);
990
991 Check(!GA2->isInterposable(),
992 "Alias cannot point to an interposable alias", &GA);
993 } else {
994 // Only continue verifying subexpressions of GlobalAliases.
995 // Do not recurse into global initializers.
996 return;
997 }
998 }
999
1000 if (const auto *CE = dyn_cast<ConstantExpr>(&C))
1001 visitConstantExprsRecursively(CE);
1002
1003 for (const Use &U : C.operands()) {
1004 Value *V = &*U;
1005 if (const auto *GA2 = dyn_cast<GlobalAlias>(V))
1006 visitAliaseeSubExpr(Visited, GA, *GA2->getAliasee());
1007 else if (const auto *C2 = dyn_cast<Constant>(V))
1008 visitAliaseeSubExpr(Visited, GA, *C2);
1009 }
1010}
1011
1012void Verifier::visitGlobalAlias(const GlobalAlias &GA) {
1014 "Alias should have private, internal, linkonce, weak, linkonce_odr, "
1015 "weak_odr, external, or available_externally linkage!",
1016 &GA);
1017 const Constant *Aliasee = GA.getAliasee();
1018 Check(Aliasee, "Aliasee cannot be NULL!", &GA);
1019 Check(GA.getType() == Aliasee->getType(),
1020 "Alias and aliasee types should match!", &GA);
1021
1022 Check(isa<GlobalValue>(Aliasee) || isa<ConstantExpr>(Aliasee),
1023 "Aliasee should be either GlobalValue or ConstantExpr", &GA);
1024
1025 visitAliaseeSubExpr(GA, *Aliasee);
1026
1027 visitGlobalValue(GA);
1028}
1029
1030void Verifier::visitGlobalIFunc(const GlobalIFunc &GI) {
1031 visitGlobalValue(GI);
1032
1034 GI.getAllMetadata(MDs);
1035 for (const auto &I : MDs) {
1036 CheckDI(I.first != LLVMContext::MD_dbg,
1037 "an ifunc may not have a !dbg attachment", &GI);
1038 Check(I.first != LLVMContext::MD_prof,
1039 "an ifunc may not have a !prof attachment", &GI);
1040 visitMDNode(*I.second, AreDebugLocsAllowed::No);
1041 }
1042
1044 "IFunc should have private, internal, linkonce, weak, linkonce_odr, "
1045 "weak_odr, or external linkage!",
1046 &GI);
1047 // Pierce through ConstantExprs and GlobalAliases and check that the resolver
1048 // is a Function definition.
1049 const Function *Resolver = GI.getResolverFunction();
1050 Check(Resolver, "IFunc must have a Function resolver", &GI);
1051 Check(!Resolver->isDeclarationForLinker(),
1052 "IFunc resolver must be a definition", &GI);
1053
1054 // Check that the immediate resolver operand (prior to any bitcasts) has the
1055 // correct type.
1056 const Type *ResolverTy = GI.getResolver()->getType();
1057
1059 "IFunc resolver must return a pointer", &GI);
1060
1061 Check(ResolverTy == PointerType::get(Context, GI.getAddressSpace()),
1062 "IFunc resolver has incorrect type", &GI);
1063}
1064
1065void Verifier::visitNamedMDNode(const NamedMDNode &NMD) {
1066 // There used to be various other llvm.dbg.* nodes, but we don't support
1067 // upgrading them and we want to reserve the namespace for future uses.
1068 if (NMD.getName().starts_with("llvm.dbg."))
1069 CheckDI(NMD.getName() == "llvm.dbg.cu",
1070 "unrecognized named metadata node in the llvm.dbg namespace", &NMD);
1071 for (const MDNode *MD : NMD.operands()) {
1072 if (NMD.getName() == "llvm.dbg.cu")
1073 CheckDI(MD && isa<DICompileUnit>(MD), "invalid compile unit", &NMD, MD);
1074
1075 if (!MD)
1076 continue;
1077
1078 visitMDNode(*MD, AreDebugLocsAllowed::Yes);
1079 }
1080}
1081
1082void Verifier::visitMDNode(const MDNode &MD, AreDebugLocsAllowed AllowLocs) {
1083 // Only visit each node once. Metadata can be mutually recursive, so this
1084 // avoids infinite recursion here, as well as being an optimization.
1085 if (!MDNodes.insert(&MD).second)
1086 return;
1087
1088 Check(&MD.getContext() == &Context,
1089 "MDNode context does not match Module context!", &MD);
1090
1091 switch (MD.getMetadataID()) {
1092 default:
1093 llvm_unreachable("Invalid MDNode subclass");
1094 case Metadata::MDTupleKind:
1095 break;
1096#define HANDLE_SPECIALIZED_MDNODE_LEAF(CLASS) \
1097 case Metadata::CLASS##Kind: \
1098 visit##CLASS(cast<CLASS>(MD)); \
1099 break;
1100#include "llvm/IR/Metadata.def"
1101 }
1102
1103 for (const Metadata *Op : MD.operands()) {
1104 if (!Op)
1105 continue;
1106 Check(!isa<LocalAsMetadata>(Op), "Invalid operand for global metadata!",
1107 &MD, Op);
1108 CheckDI(!isa<DILocation>(Op) || AllowLocs == AreDebugLocsAllowed::Yes,
1109 "DILocation not allowed within this metadata node", &MD, Op);
1110 if (auto *N = dyn_cast<MDNode>(Op)) {
1111 visitMDNode(*N, AllowLocs);
1112 continue;
1113 }
1114 if (auto *V = dyn_cast<ValueAsMetadata>(Op)) {
1115 visitValueAsMetadata(*V, nullptr);
1116 continue;
1117 }
1118 }
1119
1120 // Check llvm.loop.estimated_trip_count.
1121 if (MD.getNumOperands() > 0 &&
1123 Check(MD.getNumOperands() == 2, "Expected two operands", &MD);
1125 Check(Count && Count->getType()->isIntegerTy() &&
1126 cast<IntegerType>(Count->getType())->getBitWidth() <= 32,
1127 "Expected second operand to be an integer constant of type i32 or "
1128 "smaller",
1129 &MD);
1130 }
1131
1132 // Check these last, so we diagnose problems in operands first.
1133 Check(!MD.isTemporary(), "Expected no forward declarations!", &MD);
1134 Check(MD.isResolved(), "All nodes should be resolved!", &MD);
1135}
1136
1137void Verifier::visitValueAsMetadata(const ValueAsMetadata &MD, Function *F) {
1138 Check(MD.getValue(), "Expected valid value", &MD);
1139 Check(!MD.getValue()->getType()->isMetadataTy(),
1140 "Unexpected metadata round-trip through values", &MD, MD.getValue());
1141
1142 auto *L = dyn_cast<LocalAsMetadata>(&MD);
1143 if (!L)
1144 return;
1145
1146 Check(F, "function-local metadata used outside a function", L);
1147
1148 // If this was an instruction, bb, or argument, verify that it is in the
1149 // function that we expect.
1150 Function *ActualF = nullptr;
1151 if (Instruction *I = dyn_cast<Instruction>(L->getValue())) {
1152 Check(I->getParent(), "function-local metadata not in basic block", L, I);
1153 ActualF = I->getParent()->getParent();
1154 } else if (BasicBlock *BB = dyn_cast<BasicBlock>(L->getValue()))
1155 ActualF = BB->getParent();
1156 else if (Argument *A = dyn_cast<Argument>(L->getValue()))
1157 ActualF = A->getParent();
1158 assert(ActualF && "Unimplemented function local metadata case!");
1159
1160 Check(ActualF == F, "function-local metadata used in wrong function", L);
1161}
1162
1163void Verifier::visitDIArgList(const DIArgList &AL, Function *F) {
1164 for (const ValueAsMetadata *VAM : AL.getArgs())
1165 visitValueAsMetadata(*VAM, F);
1166}
1167
1168void Verifier::visitMetadataAsValue(const MetadataAsValue &MDV, Function *F) {
1169 Metadata *MD = MDV.getMetadata();
1170 if (auto *N = dyn_cast<MDNode>(MD)) {
1171 visitMDNode(*N, AreDebugLocsAllowed::No);
1172 return;
1173 }
1174
1175 // Only visit each node once. Metadata can be mutually recursive, so this
1176 // avoids infinite recursion here, as well as being an optimization.
1177 if (!MDNodes.insert(MD).second)
1178 return;
1179
1180 if (auto *V = dyn_cast<ValueAsMetadata>(MD))
1181 visitValueAsMetadata(*V, F);
1182
1183 if (auto *AL = dyn_cast<DIArgList>(MD))
1184 visitDIArgList(*AL, F);
1185}
1186
1187static bool isType(const Metadata *MD) { return !MD || isa<DIType>(MD); }
1188static bool isScope(const Metadata *MD) { return !MD || isa<DIScope>(MD); }
1189static bool isDINode(const Metadata *MD) { return !MD || isa<DINode>(MD); }
1190static bool isMDTuple(const Metadata *MD) { return !MD || isa<MDTuple>(MD); }
1191
1192void Verifier::visitDILocation(const DILocation &N) {
1193 CheckDI(N.getRawScope() && isa<DILocalScope>(N.getRawScope()),
1194 "location requires a valid scope", &N, N.getRawScope());
1195 if (auto *IA = N.getRawInlinedAt())
1196 CheckDI(isa<DILocation>(IA), "inlined-at should be a location", &N, IA);
1197 if (auto *SP = dyn_cast<DISubprogram>(N.getRawScope()))
1198 CheckDI(SP->isDefinition(), "scope points into the type hierarchy", &N);
1199}
1200
1201void Verifier::visitGenericDINode(const GenericDINode &N) {
1202 CheckDI(N.getTag(), "invalid tag", &N);
1203}
1204
1205void Verifier::visitDIScope(const DIScope &N) {
1206 if (auto *F = N.getRawFile())
1207 CheckDI(isa<DIFile>(F), "invalid file", &N, F);
1208}
1209
1210void Verifier::visitDISubrangeType(const DISubrangeType &N) {
1211 CheckDI(N.getTag() == dwarf::DW_TAG_subrange_type, "invalid tag", &N);
1212 auto *BaseType = N.getRawBaseType();
1213 CheckDI(!BaseType || isType(BaseType), "BaseType must be a type");
1214 auto *LBound = N.getRawLowerBound();
1215 CheckDI(!LBound || isa<ConstantAsMetadata>(LBound) ||
1216 isa<DIVariable>(LBound) || isa<DIExpression>(LBound) ||
1217 isa<DIDerivedType>(LBound),
1218 "LowerBound must be signed constant or DIVariable or DIExpression or "
1219 "DIDerivedType",
1220 &N);
1221 auto *UBound = N.getRawUpperBound();
1222 CheckDI(!UBound || isa<ConstantAsMetadata>(UBound) ||
1223 isa<DIVariable>(UBound) || isa<DIExpression>(UBound) ||
1224 isa<DIDerivedType>(UBound),
1225 "UpperBound must be signed constant or DIVariable or DIExpression or "
1226 "DIDerivedType",
1227 &N);
1228 auto *Stride = N.getRawStride();
1229 CheckDI(!Stride || isa<ConstantAsMetadata>(Stride) ||
1230 isa<DIVariable>(Stride) || isa<DIExpression>(Stride),
1231 "Stride must be signed constant or DIVariable or DIExpression", &N);
1232 auto *Bias = N.getRawBias();
1233 CheckDI(!Bias || isa<ConstantAsMetadata>(Bias) || isa<DIVariable>(Bias) ||
1234 isa<DIExpression>(Bias),
1235 "Bias must be signed constant or DIVariable or DIExpression", &N);
1236 // Subrange types currently only support constant size.
1237 auto *Size = N.getRawSizeInBits();
1239 "SizeInBits must be a constant");
1240}
1241
1242void Verifier::visitDISubrange(const DISubrange &N) {
1243 CheckDI(N.getTag() == dwarf::DW_TAG_subrange_type, "invalid tag", &N);
1244 CheckDI(!N.getRawCountNode() || !N.getRawUpperBound(),
1245 "Subrange can have any one of count or upperBound", &N);
1246 auto *CBound = N.getRawCountNode();
1247 CheckDI(!CBound || isa<ConstantAsMetadata>(CBound) ||
1248 isa<DIVariable>(CBound) || isa<DIExpression>(CBound),
1249 "Count must be signed constant or DIVariable or DIExpression", &N);
1250 auto Count = N.getCount();
1252 cast<ConstantInt *>(Count)->getSExtValue() >= -1,
1253 "invalid subrange count", &N);
1254 auto *LBound = N.getRawLowerBound();
1255 CheckDI(!LBound || isa<ConstantAsMetadata>(LBound) ||
1256 isa<DIVariable>(LBound) || isa<DIExpression>(LBound),
1257 "LowerBound must be signed constant or DIVariable or DIExpression",
1258 &N);
1259 auto *UBound = N.getRawUpperBound();
1260 CheckDI(!UBound || isa<ConstantAsMetadata>(UBound) ||
1261 isa<DIVariable>(UBound) || isa<DIExpression>(UBound),
1262 "UpperBound must be signed constant or DIVariable or DIExpression",
1263 &N);
1264 auto *Stride = N.getRawStride();
1265 CheckDI(!Stride || isa<ConstantAsMetadata>(Stride) ||
1266 isa<DIVariable>(Stride) || isa<DIExpression>(Stride),
1267 "Stride must be signed constant or DIVariable or DIExpression", &N);
1268}
1269
1270void Verifier::visitDIGenericSubrange(const DIGenericSubrange &N) {
1271 CheckDI(N.getTag() == dwarf::DW_TAG_generic_subrange, "invalid tag", &N);
1272 CheckDI(!N.getRawCountNode() || !N.getRawUpperBound(),
1273 "GenericSubrange can have any one of count or upperBound", &N);
1274 auto *CBound = N.getRawCountNode();
1275 CheckDI(!CBound || isa<DIVariable>(CBound) || isa<DIExpression>(CBound),
1276 "Count must be signed constant or DIVariable or DIExpression", &N);
1277 auto *LBound = N.getRawLowerBound();
1278 CheckDI(LBound, "GenericSubrange must contain lowerBound", &N);
1279 CheckDI(isa<DIVariable>(LBound) || isa<DIExpression>(LBound),
1280 "LowerBound must be signed constant or DIVariable or DIExpression",
1281 &N);
1282 auto *UBound = N.getRawUpperBound();
1283 CheckDI(!UBound || isa<DIVariable>(UBound) || isa<DIExpression>(UBound),
1284 "UpperBound must be signed constant or DIVariable or DIExpression",
1285 &N);
1286 auto *Stride = N.getRawStride();
1287 CheckDI(Stride, "GenericSubrange must contain stride", &N);
1288 CheckDI(isa<DIVariable>(Stride) || isa<DIExpression>(Stride),
1289 "Stride must be signed constant or DIVariable or DIExpression", &N);
1290}
1291
1292void Verifier::visitDIEnumerator(const DIEnumerator &N) {
1293 CheckDI(N.getTag() == dwarf::DW_TAG_enumerator, "invalid tag", &N);
1294}
1295
1296void Verifier::visitDIBasicType(const DIBasicType &N) {
1297 CheckDI(N.getTag() == dwarf::DW_TAG_base_type ||
1298 N.getTag() == dwarf::DW_TAG_unspecified_type ||
1299 N.getTag() == dwarf::DW_TAG_string_type,
1300 "invalid tag", &N);
1301 // Basic types currently only support constant size.
1302 auto *Size = N.getRawSizeInBits();
1304 "SizeInBits must be a constant");
1305}
1306
1307void Verifier::visitDIFixedPointType(const DIFixedPointType &N) {
1308 visitDIBasicType(N);
1309
1310 CheckDI(N.getTag() == dwarf::DW_TAG_base_type, "invalid tag", &N);
1311 CheckDI(N.getEncoding() == dwarf::DW_ATE_signed_fixed ||
1312 N.getEncoding() == dwarf::DW_ATE_unsigned_fixed,
1313 "invalid encoding", &N);
1317 "invalid kind", &N);
1319 N.getFactorRaw() == 0,
1320 "factor should be 0 for rationals", &N);
1322 (N.getNumeratorRaw() == 0 && N.getDenominatorRaw() == 0),
1323 "numerator and denominator should be 0 for non-rationals", &N);
1324}
1325
1326void Verifier::visitDIStringType(const DIStringType &N) {
1327 CheckDI(N.getTag() == dwarf::DW_TAG_string_type, "invalid tag", &N);
1328 CheckDI(!(N.isBigEndian() && N.isLittleEndian()), "has conflicting flags",
1329 &N);
1330}
1331
1332void Verifier::visitDIDerivedType(const DIDerivedType &N) {
1333 // Common scope checks.
1334 visitDIScope(N);
1335
1336 CheckDI(N.getTag() == dwarf::DW_TAG_typedef ||
1337 N.getTag() == dwarf::DW_TAG_pointer_type ||
1338 N.getTag() == dwarf::DW_TAG_ptr_to_member_type ||
1339 N.getTag() == dwarf::DW_TAG_reference_type ||
1340 N.getTag() == dwarf::DW_TAG_rvalue_reference_type ||
1341 N.getTag() == dwarf::DW_TAG_const_type ||
1342 N.getTag() == dwarf::DW_TAG_immutable_type ||
1343 N.getTag() == dwarf::DW_TAG_volatile_type ||
1344 N.getTag() == dwarf::DW_TAG_restrict_type ||
1345 N.getTag() == dwarf::DW_TAG_atomic_type ||
1346 N.getTag() == dwarf::DW_TAG_LLVM_ptrauth_type ||
1347 N.getTag() == dwarf::DW_TAG_member ||
1348 (N.getTag() == dwarf::DW_TAG_variable && N.isStaticMember()) ||
1349 N.getTag() == dwarf::DW_TAG_inheritance ||
1350 N.getTag() == dwarf::DW_TAG_friend ||
1351 N.getTag() == dwarf::DW_TAG_set_type ||
1352 N.getTag() == dwarf::DW_TAG_template_alias,
1353 "invalid tag", &N);
1354 if (N.getTag() == dwarf::DW_TAG_ptr_to_member_type) {
1355 CheckDI(isType(N.getRawExtraData()), "invalid pointer to member type", &N,
1356 N.getRawExtraData());
1357 } else if (N.getTag() == dwarf::DW_TAG_template_alias) {
1358 CheckDI(isMDTuple(N.getRawExtraData()), "invalid template parameters", &N,
1359 N.getRawExtraData());
1360 } else if (N.getTag() == dwarf::DW_TAG_inheritance ||
1361 N.getTag() == dwarf::DW_TAG_member ||
1362 N.getTag() == dwarf::DW_TAG_variable) {
1363 auto *ExtraData = N.getRawExtraData();
1364 auto IsValidExtraData = [&]() {
1365 if (ExtraData == nullptr)
1366 return true;
1367 if (isa<ConstantAsMetadata>(ExtraData) || isa<MDString>(ExtraData) ||
1368 isa<DIObjCProperty>(ExtraData))
1369 return true;
1370 if (auto *Tuple = dyn_cast<MDTuple>(ExtraData)) {
1371 if (Tuple->getNumOperands() != 1)
1372 return false;
1373 return isa_and_nonnull<ConstantAsMetadata>(Tuple->getOperand(0).get());
1374 }
1375 return false;
1376 };
1377 CheckDI(IsValidExtraData(),
1378 "extraData must be ConstantAsMetadata, MDString, DIObjCProperty, "
1379 "or MDTuple with single ConstantAsMetadata operand",
1380 &N, ExtraData);
1381 }
1382
1383 if (N.getTag() == dwarf::DW_TAG_set_type) {
1384 if (auto *T = N.getRawBaseType()) {
1388 CheckDI(
1389 (Enum && Enum->getTag() == dwarf::DW_TAG_enumeration_type) ||
1390 (Subrange && Subrange->getTag() == dwarf::DW_TAG_subrange_type) ||
1391 (Basic && (Basic->getEncoding() == dwarf::DW_ATE_unsigned ||
1392 Basic->getEncoding() == dwarf::DW_ATE_signed ||
1393 Basic->getEncoding() == dwarf::DW_ATE_unsigned_char ||
1394 Basic->getEncoding() == dwarf::DW_ATE_signed_char ||
1395 Basic->getEncoding() == dwarf::DW_ATE_boolean)),
1396 "invalid set base type", &N, T);
1397 }
1398 }
1399
1400 CheckDI(isScope(N.getRawScope()), "invalid scope", &N, N.getRawScope());
1401 CheckDI(isType(N.getRawBaseType()), "invalid base type", &N,
1402 N.getRawBaseType());
1403
1404 if (N.getDWARFAddressSpace()) {
1405 CheckDI(N.getTag() == dwarf::DW_TAG_pointer_type ||
1406 N.getTag() == dwarf::DW_TAG_reference_type ||
1407 N.getTag() == dwarf::DW_TAG_rvalue_reference_type,
1408 "DWARF address space only applies to pointer or reference types",
1409 &N);
1410 }
1411
1412 auto *Size = N.getRawSizeInBits();
1415 "SizeInBits must be a constant or DIVariable or DIExpression");
1416}
1417
1418/// Detect mutually exclusive flags.
1419static bool hasConflictingReferenceFlags(unsigned Flags) {
1420 return ((Flags & DINode::FlagLValueReference) &&
1421 (Flags & DINode::FlagRValueReference)) ||
1422 ((Flags & DINode::FlagTypePassByValue) &&
1423 (Flags & DINode::FlagTypePassByReference));
1424}
1425
1426void Verifier::visitTemplateParams(const MDNode &N, const Metadata &RawParams) {
1427 auto *Params = dyn_cast<MDTuple>(&RawParams);
1428 CheckDI(Params, "invalid template params", &N, &RawParams);
1429 for (Metadata *Op : Params->operands()) {
1430 CheckDI(Op && isa<DITemplateParameter>(Op), "invalid template parameter",
1431 &N, Params, Op);
1432 }
1433}
1434
1435void Verifier::visitDICompositeType(const DICompositeType &N) {
1436 // Common scope checks.
1437 visitDIScope(N);
1438
1439 CheckDI(N.getTag() == dwarf::DW_TAG_array_type ||
1440 N.getTag() == dwarf::DW_TAG_structure_type ||
1441 N.getTag() == dwarf::DW_TAG_union_type ||
1442 N.getTag() == dwarf::DW_TAG_enumeration_type ||
1443 N.getTag() == dwarf::DW_TAG_class_type ||
1444 N.getTag() == dwarf::DW_TAG_variant_part ||
1445 N.getTag() == dwarf::DW_TAG_variant ||
1446 N.getTag() == dwarf::DW_TAG_namelist,
1447 "invalid tag", &N);
1448
1449 CheckDI(isScope(N.getRawScope()), "invalid scope", &N, N.getRawScope());
1450 CheckDI(isType(N.getRawBaseType()), "invalid base type", &N,
1451 N.getRawBaseType());
1452
1453 CheckDI(!N.getRawElements() || isa<MDTuple>(N.getRawElements()),
1454 "invalid composite elements", &N, N.getRawElements());
1455 CheckDI(isType(N.getRawVTableHolder()), "invalid vtable holder", &N,
1456 N.getRawVTableHolder());
1458 "invalid reference flags", &N);
1459 unsigned DIBlockByRefStruct = 1 << 4;
1460 CheckDI((N.getFlags() & DIBlockByRefStruct) == 0,
1461 "DIBlockByRefStruct on DICompositeType is no longer supported", &N);
1462 CheckDI(llvm::all_of(N.getElements(), [](const DINode *N) { return N; }),
1463 "DISubprogram contains null entry in `elements` field", &N);
1464
1465 if (N.isVector()) {
1466 const DINodeArray Elements = N.getElements();
1467 CheckDI(Elements.size() == 1 &&
1468 Elements[0]->getTag() == dwarf::DW_TAG_subrange_type,
1469 "invalid vector, expected one element of type subrange", &N);
1470 }
1471
1472 if (auto *Params = N.getRawTemplateParams())
1473 visitTemplateParams(N, *Params);
1474
1475 if (auto *D = N.getRawDiscriminator()) {
1476 CheckDI(isa<DIDerivedType>(D) && N.getTag() == dwarf::DW_TAG_variant_part,
1477 "discriminator can only appear on variant part");
1478 }
1479
1480 if (N.getRawDataLocation()) {
1481 CheckDI(N.getTag() == dwarf::DW_TAG_array_type,
1482 "dataLocation can only appear in array type");
1483 }
1484
1485 if (N.getRawAssociated()) {
1486 CheckDI(N.getTag() == dwarf::DW_TAG_array_type,
1487 "associated can only appear in array type");
1488 }
1489
1490 if (N.getRawAllocated()) {
1491 CheckDI(N.getTag() == dwarf::DW_TAG_array_type,
1492 "allocated can only appear in array type");
1493 }
1494
1495 if (N.getRawRank()) {
1496 CheckDI(N.getTag() == dwarf::DW_TAG_array_type,
1497 "rank can only appear in array type");
1498 }
1499
1500 if (N.getTag() == dwarf::DW_TAG_array_type) {
1501 CheckDI(N.getRawBaseType(), "array types must have a base type", &N);
1502 }
1503
1504 auto *Size = N.getRawSizeInBits();
1507 "SizeInBits must be a constant or DIVariable or DIExpression");
1508}
1509
1510void Verifier::visitDISubroutineType(const DISubroutineType &N) {
1511 CheckDI(N.getTag() == dwarf::DW_TAG_subroutine_type, "invalid tag", &N);
1512 if (auto *Types = N.getRawTypeArray()) {
1513 CheckDI(isa<MDTuple>(Types), "invalid composite elements", &N, Types);
1514 for (Metadata *Ty : N.getTypeArray()->operands()) {
1515 CheckDI(isType(Ty), "invalid subroutine type ref", &N, Types, Ty);
1516 }
1517 }
1519 "invalid reference flags", &N);
1520}
1521
1522void Verifier::visitDIFile(const DIFile &N) {
1523 CheckDI(N.getTag() == dwarf::DW_TAG_file_type, "invalid tag", &N);
1524 std::optional<DIFile::ChecksumInfo<StringRef>> Checksum = N.getChecksum();
1525 if (Checksum) {
1526 CheckDI(Checksum->Kind <= DIFile::ChecksumKind::CSK_Last,
1527 "invalid checksum kind", &N);
1528 size_t Size;
1529 switch (Checksum->Kind) {
1530 case DIFile::CSK_MD5:
1531 Size = 32;
1532 break;
1533 case DIFile::CSK_SHA1:
1534 Size = 40;
1535 break;
1536 case DIFile::CSK_SHA256:
1537 Size = 64;
1538 break;
1539 }
1540 CheckDI(Checksum->Value.size() == Size, "invalid checksum length", &N);
1541 CheckDI(Checksum->Value.find_if_not(llvm::isHexDigit) == StringRef::npos,
1542 "invalid checksum", &N);
1543 }
1544}
1545
1546void Verifier::visitDICompileUnit(const DICompileUnit &N) {
1547 CheckDI(N.isDistinct(), "compile units must be distinct", &N);
1548 CheckDI(N.getTag() == dwarf::DW_TAG_compile_unit, "invalid tag", &N);
1549
1550 // Don't bother verifying the compilation directory or producer string
1551 // as those could be empty.
1552 CheckDI(N.getRawFile() && isa<DIFile>(N.getRawFile()), "invalid file", &N,
1553 N.getRawFile());
1554 CheckDI(!N.getFile()->getFilename().empty(), "invalid filename", &N,
1555 N.getFile());
1556
1557 CheckDI((N.getEmissionKind() <= DICompileUnit::LastEmissionKind),
1558 "invalid emission kind", &N);
1559
1560 if (auto *Array = N.getRawEnumTypes()) {
1561 CheckDI(isa<MDTuple>(Array), "invalid enum list", &N, Array);
1562 for (Metadata *Op : N.getEnumTypes()->operands()) {
1564 CheckDI(Enum && Enum->getTag() == dwarf::DW_TAG_enumeration_type,
1565 "invalid enum type", &N, N.getEnumTypes(), Op);
1566 }
1567 }
1568 if (auto *Array = N.getRawRetainedTypes()) {
1569 CheckDI(isa<MDTuple>(Array), "invalid retained type list", &N, Array);
1570 for (Metadata *Op : N.getRetainedTypes()->operands()) {
1571 CheckDI(
1572 Op && (isa<DIType>(Op) || (isa<DISubprogram>(Op) &&
1573 !cast<DISubprogram>(Op)->isDefinition())),
1574 "invalid retained type", &N, Op);
1575 }
1576 }
1577 if (auto *Array = N.getRawGlobalVariables()) {
1578 CheckDI(isa<MDTuple>(Array), "invalid global variable list", &N, Array);
1579 for (Metadata *Op : N.getGlobalVariables()->operands()) {
1581 "invalid global variable ref", &N, Op);
1582 }
1583 }
1584 if (auto *Array = N.getRawImportedEntities()) {
1585 CheckDI(isa<MDTuple>(Array), "invalid imported entity list", &N, Array);
1586 for (Metadata *Op : N.getImportedEntities()->operands()) {
1587 CheckDI(Op && isa<DIImportedEntity>(Op), "invalid imported entity ref",
1588 &N, Op);
1589 }
1590 }
1591 if (auto *Array = N.getRawMacros()) {
1592 CheckDI(isa<MDTuple>(Array), "invalid macro list", &N, Array);
1593 for (Metadata *Op : N.getMacros()->operands()) {
1594 CheckDI(Op && isa<DIMacroNode>(Op), "invalid macro ref", &N, Op);
1595 }
1596 }
1597 CUVisited.insert(&N);
1598}
1599
1600void Verifier::visitDISubprogram(const DISubprogram &N) {
1601 CheckDI(N.getTag() == dwarf::DW_TAG_subprogram, "invalid tag", &N);
1602 CheckDI(isScope(N.getRawScope()), "invalid scope", &N, N.getRawScope());
1603 if (auto *F = N.getRawFile())
1604 CheckDI(isa<DIFile>(F), "invalid file", &N, F);
1605 else
1606 CheckDI(N.getLine() == 0, "line specified with no file", &N, N.getLine());
1607 if (auto *T = N.getRawType())
1608 CheckDI(isa<DISubroutineType>(T), "invalid subroutine type", &N, T);
1609 CheckDI(isType(N.getRawContainingType()), "invalid containing type", &N,
1610 N.getRawContainingType());
1611 if (auto *Params = N.getRawTemplateParams())
1612 visitTemplateParams(N, *Params);
1613 if (auto *S = N.getRawDeclaration())
1614 CheckDI(isa<DISubprogram>(S) && !cast<DISubprogram>(S)->isDefinition(),
1615 "invalid subprogram declaration", &N, S);
1616 if (auto *RawNode = N.getRawRetainedNodes()) {
1617 auto *Node = dyn_cast<MDTuple>(RawNode);
1618 CheckDI(Node, "invalid retained nodes list", &N, RawNode);
1619 for (Metadata *Op : Node->operands()) {
1620 CheckDI(Op, "nullptr in retained nodes", &N, Node);
1621
1622 auto True = [](const Metadata *) { return true; };
1623 auto False = [](const Metadata *) { return false; };
1624 bool IsTypeCorrect =
1625 DISubprogram::visitRetainedNode<bool>(Op, True, True, True, False);
1626 CheckDI(IsTypeCorrect,
1627 "invalid retained nodes, expected DILocalVariable, DILabel or "
1628 "DIImportedEntity",
1629 &N, Node, Op);
1630
1631 auto *RetainedNode = cast<DINode>(Op);
1632 auto *RetainedNodeScope = dyn_cast_or_null<DILocalScope>(
1634 CheckDI(RetainedNodeScope,
1635 "invalid retained nodes, retained node is not local", &N, Node,
1636 RetainedNode);
1637 CheckDI(
1638 RetainedNodeScope->getSubprogram() == &N,
1639 "invalid retained nodes, retained node does not belong to subprogram",
1640 &N, Node, RetainedNode, RetainedNodeScope);
1641 }
1642 }
1644 "invalid reference flags", &N);
1645
1646 auto *Unit = N.getRawUnit();
1647 if (N.isDefinition()) {
1648 // Subprogram definitions (not part of the type hierarchy).
1649 CheckDI(N.isDistinct(), "subprogram definitions must be distinct", &N);
1650 CheckDI(Unit, "subprogram definitions must have a compile unit", &N);
1651 CheckDI(isa<DICompileUnit>(Unit), "invalid unit type", &N, Unit);
1652 // There's no good way to cross the CU boundary to insert a nested
1653 // DISubprogram definition in one CU into a type defined in another CU.
1654 auto *CT = dyn_cast_or_null<DICompositeType>(N.getRawScope());
1655 if (CT && CT->getRawIdentifier() &&
1656 M.getContext().isODRUniquingDebugTypes())
1657 CheckDI(N.getDeclaration(),
1658 "definition subprograms cannot be nested within DICompositeType "
1659 "when enabling ODR",
1660 &N);
1661 } else {
1662 // Subprogram declarations (part of the type hierarchy).
1663 CheckDI(!Unit, "subprogram declarations must not have a compile unit", &N);
1664 CheckDI(!N.getRawDeclaration(),
1665 "subprogram declaration must not have a declaration field");
1666 }
1667
1668 if (auto *RawThrownTypes = N.getRawThrownTypes()) {
1669 auto *ThrownTypes = dyn_cast<MDTuple>(RawThrownTypes);
1670 CheckDI(ThrownTypes, "invalid thrown types list", &N, RawThrownTypes);
1671 for (Metadata *Op : ThrownTypes->operands())
1672 CheckDI(Op && isa<DIType>(Op), "invalid thrown type", &N, ThrownTypes,
1673 Op);
1674 }
1675
1676 if (N.areAllCallsDescribed())
1677 CheckDI(N.isDefinition(),
1678 "DIFlagAllCallsDescribed must be attached to a definition");
1679}
1680
1681void Verifier::visitDILexicalBlockBase(const DILexicalBlockBase &N) {
1682 CheckDI(N.getTag() == dwarf::DW_TAG_lexical_block, "invalid tag", &N);
1683 CheckDI(N.getRawScope() && isa<DILocalScope>(N.getRawScope()),
1684 "invalid local scope", &N, N.getRawScope());
1685 if (auto *SP = dyn_cast<DISubprogram>(N.getRawScope()))
1686 CheckDI(SP->isDefinition(), "scope points into the type hierarchy", &N);
1687}
1688
1689void Verifier::visitDILexicalBlock(const DILexicalBlock &N) {
1690 visitDILexicalBlockBase(N);
1691
1692 CheckDI(N.getLine() || !N.getColumn(),
1693 "cannot have column info without line info", &N);
1694}
1695
1696void Verifier::visitDILexicalBlockFile(const DILexicalBlockFile &N) {
1697 visitDILexicalBlockBase(N);
1698}
1699
1700void Verifier::visitDICommonBlock(const DICommonBlock &N) {
1701 CheckDI(N.getTag() == dwarf::DW_TAG_common_block, "invalid tag", &N);
1702 if (auto *S = N.getRawScope())
1703 CheckDI(isa<DIScope>(S), "invalid scope ref", &N, S);
1704 if (auto *S = N.getRawDecl())
1705 CheckDI(isa<DIGlobalVariable>(S), "invalid declaration", &N, S);
1706}
1707
1708void Verifier::visitDINamespace(const DINamespace &N) {
1709 CheckDI(N.getTag() == dwarf::DW_TAG_namespace, "invalid tag", &N);
1710 if (auto *S = N.getRawScope())
1711 CheckDI(isa<DIScope>(S), "invalid scope ref", &N, S);
1712}
1713
1714void Verifier::visitDIMacro(const DIMacro &N) {
1715 CheckDI(N.getMacinfoType() == dwarf::DW_MACINFO_define ||
1716 N.getMacinfoType() == dwarf::DW_MACINFO_undef,
1717 "invalid macinfo type", &N);
1718 CheckDI(!N.getName().empty(), "anonymous macro", &N);
1719 if (!N.getValue().empty()) {
1720 assert(N.getValue().data()[0] != ' ' && "Macro value has a space prefix");
1721 }
1722}
1723
1724void Verifier::visitDIMacroFile(const DIMacroFile &N) {
1725 CheckDI(N.getMacinfoType() == dwarf::DW_MACINFO_start_file,
1726 "invalid macinfo type", &N);
1727 if (auto *F = N.getRawFile())
1728 CheckDI(isa<DIFile>(F), "invalid file", &N, F);
1729
1730 if (auto *Array = N.getRawElements()) {
1731 CheckDI(isa<MDTuple>(Array), "invalid macro list", &N, Array);
1732 for (Metadata *Op : N.getElements()->operands()) {
1733 CheckDI(Op && isa<DIMacroNode>(Op), "invalid macro ref", &N, Op);
1734 }
1735 }
1736}
1737
1738void Verifier::visitDIModule(const DIModule &N) {
1739 CheckDI(N.getTag() == dwarf::DW_TAG_module, "invalid tag", &N);
1740 CheckDI(!N.getName().empty(), "anonymous module", &N);
1741}
1742
1743void Verifier::visitDITemplateParameter(const DITemplateParameter &N) {
1744 CheckDI(isType(N.getRawType()), "invalid type ref", &N, N.getRawType());
1745}
1746
1747void Verifier::visitDITemplateTypeParameter(const DITemplateTypeParameter &N) {
1748 visitDITemplateParameter(N);
1749
1750 CheckDI(N.getTag() == dwarf::DW_TAG_template_type_parameter, "invalid tag",
1751 &N);
1752}
1753
1754void Verifier::visitDITemplateValueParameter(
1755 const DITemplateValueParameter &N) {
1756 visitDITemplateParameter(N);
1757
1758 CheckDI(N.getTag() == dwarf::DW_TAG_template_value_parameter ||
1759 N.getTag() == dwarf::DW_TAG_GNU_template_template_param ||
1760 N.getTag() == dwarf::DW_TAG_GNU_template_parameter_pack,
1761 "invalid tag", &N);
1762}
1763
1764void Verifier::visitDIVariable(const DIVariable &N) {
1765 if (auto *S = N.getRawScope())
1766 CheckDI(isa<DIScope>(S), "invalid scope", &N, S);
1767 if (auto *F = N.getRawFile())
1768 CheckDI(isa<DIFile>(F), "invalid file", &N, F);
1769}
1770
1771void Verifier::visitDIGlobalVariable(const DIGlobalVariable &N) {
1772 // Checks common to all variables.
1773 visitDIVariable(N);
1774
1775 CheckDI(N.getTag() == dwarf::DW_TAG_variable, "invalid tag", &N);
1776 CheckDI(isType(N.getRawType()), "invalid type ref", &N, N.getRawType());
1777 // Check only if the global variable is not an extern
1778 if (N.isDefinition())
1779 CheckDI(N.getType(), "missing global variable type", &N);
1780 if (auto *Member = N.getRawStaticDataMemberDeclaration()) {
1782 "invalid static data member declaration", &N, Member);
1783 }
1784}
1785
1786void Verifier::visitDILocalVariable(const DILocalVariable &N) {
1787 // Checks common to all variables.
1788 visitDIVariable(N);
1789
1790 CheckDI(isType(N.getRawType()), "invalid type ref", &N, N.getRawType());
1791 CheckDI(N.getTag() == dwarf::DW_TAG_variable, "invalid tag", &N);
1792 CheckDI(N.getRawScope() && isa<DILocalScope>(N.getRawScope()),
1793 "local variable requires a valid scope", &N, N.getRawScope());
1794 if (auto Ty = N.getType())
1795 CheckDI(!isa<DISubroutineType>(Ty), "invalid type", &N, N.getType());
1796}
1797
1798void Verifier::visitDIAssignID(const DIAssignID &N) {
1799 CheckDI(!N.getNumOperands(), "DIAssignID has no arguments", &N);
1800 CheckDI(N.isDistinct(), "DIAssignID must be distinct", &N);
1801}
1802
1803void Verifier::visitDILabel(const DILabel &N) {
1804 if (auto *S = N.getRawScope())
1805 CheckDI(isa<DIScope>(S), "invalid scope", &N, S);
1806 if (auto *F = N.getRawFile())
1807 CheckDI(isa<DIFile>(F), "invalid file", &N, F);
1808
1809 CheckDI(N.getTag() == dwarf::DW_TAG_label, "invalid tag", &N);
1810 CheckDI(N.getRawScope() && isa<DILocalScope>(N.getRawScope()),
1811 "label requires a valid scope", &N, N.getRawScope());
1812}
1813
1814void Verifier::visitDIExpression(const DIExpression &N) {
1815 CheckDI(N.isValid(), "invalid expression", &N);
1816}
1817
1818void Verifier::visitDIGlobalVariableExpression(
1819 const DIGlobalVariableExpression &GVE) {
1820 CheckDI(GVE.getVariable(), "missing variable");
1821 if (auto *Var = GVE.getVariable())
1822 visitDIGlobalVariable(*Var);
1823 if (auto *Expr = GVE.getExpression()) {
1824 visitDIExpression(*Expr);
1825 if (auto Fragment = Expr->getFragmentInfo())
1826 verifyFragmentExpression(*GVE.getVariable(), *Fragment, &GVE);
1827 }
1828}
1829
1830void Verifier::visitDIObjCProperty(const DIObjCProperty &N) {
1831 CheckDI(N.getTag() == dwarf::DW_TAG_APPLE_property, "invalid tag", &N);
1832 if (auto *T = N.getRawType())
1833 CheckDI(isType(T), "invalid type ref", &N, T);
1834 if (auto *F = N.getRawFile())
1835 CheckDI(isa<DIFile>(F), "invalid file", &N, F);
1836}
1837
1838void Verifier::visitDIImportedEntity(const DIImportedEntity &N) {
1839 CheckDI(N.getTag() == dwarf::DW_TAG_imported_module ||
1840 N.getTag() == dwarf::DW_TAG_imported_declaration,
1841 "invalid tag", &N);
1842 if (auto *S = N.getRawScope())
1843 CheckDI(isa<DIScope>(S), "invalid scope for imported entity", &N, S);
1844 CheckDI(isDINode(N.getRawEntity()), "invalid imported entity", &N,
1845 N.getRawEntity());
1846}
1847
1848void Verifier::visitComdat(const Comdat &C) {
1849 // In COFF the Module is invalid if the GlobalValue has private linkage.
1850 // Entities with private linkage don't have entries in the symbol table.
1851 if (TT.isOSBinFormatCOFF())
1852 if (const GlobalValue *GV = M.getNamedValue(C.getName()))
1853 Check(!GV->hasPrivateLinkage(), "comdat global value has private linkage",
1854 GV);
1855}
1856
1857void Verifier::visitModuleIdents() {
1858 const NamedMDNode *Idents = M.getNamedMetadata("llvm.ident");
1859 if (!Idents)
1860 return;
1861
1862 // llvm.ident takes a list of metadata entry. Each entry has only one string.
1863 // Scan each llvm.ident entry and make sure that this requirement is met.
1864 for (const MDNode *N : Idents->operands()) {
1865 Check(N->getNumOperands() == 1,
1866 "incorrect number of operands in llvm.ident metadata", N);
1867 Check(dyn_cast_or_null<MDString>(N->getOperand(0)),
1868 ("invalid value for llvm.ident metadata entry operand"
1869 "(the operand should be a string)"),
1870 N->getOperand(0));
1871 }
1872}
1873
1874void Verifier::visitModuleCommandLines() {
1875 const NamedMDNode *CommandLines = M.getNamedMetadata("llvm.commandline");
1876 if (!CommandLines)
1877 return;
1878
1879 // llvm.commandline takes a list of metadata entry. Each entry has only one
1880 // string. Scan each llvm.commandline entry and make sure that this
1881 // requirement is met.
1882 for (const MDNode *N : CommandLines->operands()) {
1883 Check(N->getNumOperands() == 1,
1884 "incorrect number of operands in llvm.commandline metadata", N);
1885 Check(dyn_cast_or_null<MDString>(N->getOperand(0)),
1886 ("invalid value for llvm.commandline metadata entry operand"
1887 "(the operand should be a string)"),
1888 N->getOperand(0));
1889 }
1890}
1891
1892void Verifier::visitModuleErrnoTBAA() {
1893 const NamedMDNode *ErrnoTBAA = M.getNamedMetadata("llvm.errno.tbaa");
1894 if (!ErrnoTBAA)
1895 return;
1896
1897 Check(ErrnoTBAA->getNumOperands() >= 1,
1898 "llvm.errno.tbaa must have at least one operand", ErrnoTBAA);
1899
1900 for (const MDNode *N : ErrnoTBAA->operands())
1901 TBAAVerifyHelper.visitTBAAMetadata(nullptr, N);
1902}
1903
1904void Verifier::visitModuleFlags() {
1905 const NamedMDNode *Flags = M.getModuleFlagsMetadata();
1906 if (!Flags) return;
1907
1908 // Scan each flag, and track the flags and requirements.
1909 DenseMap<const MDString*, const MDNode*> SeenIDs;
1910 SmallVector<const MDNode*, 16> Requirements;
1911 uint64_t PAuthABIPlatform = -1;
1912 uint64_t PAuthABIVersion = -1;
1913 for (const MDNode *MDN : Flags->operands()) {
1914 visitModuleFlag(MDN, SeenIDs, Requirements);
1915 if (MDN->getNumOperands() != 3)
1916 continue;
1917 if (const auto *FlagName = dyn_cast_or_null<MDString>(MDN->getOperand(1))) {
1918 if (FlagName->getString() == "aarch64-elf-pauthabi-platform") {
1919 if (const auto *PAP =
1921 PAuthABIPlatform = PAP->getZExtValue();
1922 } else if (FlagName->getString() == "aarch64-elf-pauthabi-version") {
1923 if (const auto *PAV =
1925 PAuthABIVersion = PAV->getZExtValue();
1926 }
1927 }
1928 }
1929
1930 if ((PAuthABIPlatform == uint64_t(-1)) != (PAuthABIVersion == uint64_t(-1)))
1931 CheckFailed("either both or no 'aarch64-elf-pauthabi-platform' and "
1932 "'aarch64-elf-pauthabi-version' module flags must be present");
1933
1934 // Validate that the requirements in the module are valid.
1935 for (const MDNode *Requirement : Requirements) {
1936 const MDString *Flag = cast<MDString>(Requirement->getOperand(0));
1937 const Metadata *ReqValue = Requirement->getOperand(1);
1938
1939 const MDNode *Op = SeenIDs.lookup(Flag);
1940 if (!Op) {
1941 CheckFailed("invalid requirement on flag, flag is not present in module",
1942 Flag);
1943 continue;
1944 }
1945
1946 if (Op->getOperand(2) != ReqValue) {
1947 CheckFailed(("invalid requirement on flag, "
1948 "flag does not have the required value"),
1949 Flag);
1950 continue;
1951 }
1952 }
1953}
1954
1955void
1956Verifier::visitModuleFlag(const MDNode *Op,
1957 DenseMap<const MDString *, const MDNode *> &SeenIDs,
1958 SmallVectorImpl<const MDNode *> &Requirements) {
1959 // Each module flag should have three arguments, the merge behavior (a
1960 // constant int), the flag ID (an MDString), and the value.
1961 Check(Op->getNumOperands() == 3,
1962 "incorrect number of operands in module flag", Op);
1963 Module::ModFlagBehavior MFB;
1964 if (!Module::isValidModFlagBehavior(Op->getOperand(0), MFB)) {
1966 "invalid behavior operand in module flag (expected constant integer)",
1967 Op->getOperand(0));
1968 Check(false,
1969 "invalid behavior operand in module flag (unexpected constant)",
1970 Op->getOperand(0));
1971 }
1972 MDString *ID = dyn_cast_or_null<MDString>(Op->getOperand(1));
1973 Check(ID, "invalid ID operand in module flag (expected metadata string)",
1974 Op->getOperand(1));
1975
1976 // Check the values for behaviors with additional requirements.
1977 switch (MFB) {
1978 case Module::Error:
1979 case Module::Warning:
1980 case Module::Override:
1981 // These behavior types accept any value.
1982 break;
1983
1984 case Module::Min: {
1985 auto *V = mdconst::dyn_extract_or_null<ConstantInt>(Op->getOperand(2));
1986 Check(V && V->getValue().isNonNegative(),
1987 "invalid value for 'min' module flag (expected constant non-negative "
1988 "integer)",
1989 Op->getOperand(2));
1990 break;
1991 }
1992
1993 case Module::Max: {
1995 "invalid value for 'max' module flag (expected constant integer)",
1996 Op->getOperand(2));
1997 break;
1998 }
1999
2000 case Module::Require: {
2001 // The value should itself be an MDNode with two operands, a flag ID (an
2002 // MDString), and a value.
2003 MDNode *Value = dyn_cast<MDNode>(Op->getOperand(2));
2004 Check(Value && Value->getNumOperands() == 2,
2005 "invalid value for 'require' module flag (expected metadata pair)",
2006 Op->getOperand(2));
2007 Check(isa<MDString>(Value->getOperand(0)),
2008 ("invalid value for 'require' module flag "
2009 "(first value operand should be a string)"),
2010 Value->getOperand(0));
2011
2012 // Append it to the list of requirements, to check once all module flags are
2013 // scanned.
2014 Requirements.push_back(Value);
2015 break;
2016 }
2017
2018 case Module::Append:
2019 case Module::AppendUnique: {
2020 // These behavior types require the operand be an MDNode.
2021 Check(isa<MDNode>(Op->getOperand(2)),
2022 "invalid value for 'append'-type module flag "
2023 "(expected a metadata node)",
2024 Op->getOperand(2));
2025 break;
2026 }
2027 }
2028
2029 // Unless this is a "requires" flag, check the ID is unique.
2030 if (MFB != Module::Require) {
2031 bool Inserted = SeenIDs.insert(std::make_pair(ID, Op)).second;
2032 Check(Inserted,
2033 "module flag identifiers must be unique (or of 'require' type)", ID);
2034 }
2035
2036 if (ID->getString() == "wchar_size") {
2037 ConstantInt *Value
2039 Check(Value, "wchar_size metadata requires constant integer argument");
2040 }
2041
2042 if (ID->getString() == "Linker Options") {
2043 // If the llvm.linker.options named metadata exists, we assume that the
2044 // bitcode reader has upgraded the module flag. Otherwise the flag might
2045 // have been created by a client directly.
2046 Check(M.getNamedMetadata("llvm.linker.options"),
2047 "'Linker Options' named metadata no longer supported");
2048 }
2049
2050 if (ID->getString() == "SemanticInterposition") {
2051 ConstantInt *Value =
2053 Check(Value,
2054 "SemanticInterposition metadata requires constant integer argument");
2055 }
2056
2057 if (ID->getString() == "CG Profile") {
2058 for (const MDOperand &MDO : cast<MDNode>(Op->getOperand(2))->operands())
2059 visitModuleFlagCGProfileEntry(MDO);
2060 }
2061}
2062
2063void Verifier::visitModuleFlagCGProfileEntry(const MDOperand &MDO) {
2064 auto CheckFunction = [&](const MDOperand &FuncMDO) {
2065 if (!FuncMDO)
2066 return;
2067 auto F = dyn_cast<ValueAsMetadata>(FuncMDO);
2068 Check(F && isa<Function>(F->getValue()->stripPointerCasts()),
2069 "expected a Function or null", FuncMDO);
2070 };
2071 auto Node = dyn_cast_or_null<MDNode>(MDO);
2072 Check(Node && Node->getNumOperands() == 3, "expected a MDNode triple", MDO);
2073 CheckFunction(Node->getOperand(0));
2074 CheckFunction(Node->getOperand(1));
2075 auto Count = dyn_cast_or_null<ConstantAsMetadata>(Node->getOperand(2));
2076 Check(Count && Count->getType()->isIntegerTy(),
2077 "expected an integer constant", Node->getOperand(2));
2078}
2079
2080void Verifier::verifyAttributeTypes(AttributeSet Attrs, const Value *V) {
2081 for (Attribute A : Attrs) {
2082
2083 if (A.isStringAttribute()) {
2084#define GET_ATTR_NAMES
2085#define ATTRIBUTE_ENUM(ENUM_NAME, DISPLAY_NAME)
2086#define ATTRIBUTE_STRBOOL(ENUM_NAME, DISPLAY_NAME) \
2087 if (A.getKindAsString() == #DISPLAY_NAME) { \
2088 auto V = A.getValueAsString(); \
2089 if (!(V.empty() || V == "true" || V == "false")) \
2090 CheckFailed("invalid value for '" #DISPLAY_NAME "' attribute: " + V + \
2091 ""); \
2092 }
2093
2094#include "llvm/IR/Attributes.inc"
2095 continue;
2096 }
2097
2098 if (A.isIntAttribute() != Attribute::isIntAttrKind(A.getKindAsEnum())) {
2099 CheckFailed("Attribute '" + A.getAsString() + "' should have an Argument",
2100 V);
2101 return;
2102 }
2103 }
2104}
2105
2106// VerifyParameterAttrs - Check the given attributes for an argument or return
2107// value of the specified type. The value V is printed in error messages.
2108void Verifier::verifyParameterAttrs(AttributeSet Attrs, Type *Ty,
2109 const Value *V) {
2110 if (!Attrs.hasAttributes())
2111 return;
2112
2113 verifyAttributeTypes(Attrs, V);
2114
2115 for (Attribute Attr : Attrs)
2116 Check(Attr.isStringAttribute() ||
2117 Attribute::canUseAsParamAttr(Attr.getKindAsEnum()),
2118 "Attribute '" + Attr.getAsString() + "' does not apply to parameters",
2119 V);
2120
2121 if (Attrs.hasAttribute(Attribute::ImmArg)) {
2122 unsigned AttrCount =
2123 Attrs.getNumAttributes() - Attrs.hasAttribute(Attribute::Range);
2124 Check(AttrCount == 1,
2125 "Attribute 'immarg' is incompatible with other attributes except the "
2126 "'range' attribute",
2127 V);
2128 }
2129
2130 // Check for mutually incompatible attributes. Only inreg is compatible with
2131 // sret.
2132 unsigned AttrCount = 0;
2133 AttrCount += Attrs.hasAttribute(Attribute::ByVal);
2134 AttrCount += Attrs.hasAttribute(Attribute::InAlloca);
2135 AttrCount += Attrs.hasAttribute(Attribute::Preallocated);
2136 AttrCount += Attrs.hasAttribute(Attribute::StructRet) ||
2137 Attrs.hasAttribute(Attribute::InReg);
2138 AttrCount += Attrs.hasAttribute(Attribute::Nest);
2139 AttrCount += Attrs.hasAttribute(Attribute::ByRef);
2140 Check(AttrCount <= 1,
2141 "Attributes 'byval', 'inalloca', 'preallocated', 'inreg', 'nest', "
2142 "'byref', and 'sret' are incompatible!",
2143 V);
2144
2145 Check(!(Attrs.hasAttribute(Attribute::InAlloca) &&
2146 Attrs.hasAttribute(Attribute::ReadOnly)),
2147 "Attributes "
2148 "'inalloca and readonly' are incompatible!",
2149 V);
2150
2151 Check(!(Attrs.hasAttribute(Attribute::StructRet) &&
2152 Attrs.hasAttribute(Attribute::Returned)),
2153 "Attributes "
2154 "'sret and returned' are incompatible!",
2155 V);
2156
2157 Check(!(Attrs.hasAttribute(Attribute::ZExt) &&
2158 Attrs.hasAttribute(Attribute::SExt)),
2159 "Attributes "
2160 "'zeroext and signext' are incompatible!",
2161 V);
2162
2163 Check(!(Attrs.hasAttribute(Attribute::ReadNone) &&
2164 Attrs.hasAttribute(Attribute::ReadOnly)),
2165 "Attributes "
2166 "'readnone and readonly' are incompatible!",
2167 V);
2168
2169 Check(!(Attrs.hasAttribute(Attribute::ReadNone) &&
2170 Attrs.hasAttribute(Attribute::WriteOnly)),
2171 "Attributes "
2172 "'readnone and writeonly' are incompatible!",
2173 V);
2174
2175 Check(!(Attrs.hasAttribute(Attribute::ReadOnly) &&
2176 Attrs.hasAttribute(Attribute::WriteOnly)),
2177 "Attributes "
2178 "'readonly and writeonly' are incompatible!",
2179 V);
2180
2181 Check(!(Attrs.hasAttribute(Attribute::NoInline) &&
2182 Attrs.hasAttribute(Attribute::AlwaysInline)),
2183 "Attributes "
2184 "'noinline and alwaysinline' are incompatible!",
2185 V);
2186
2187 Check(!(Attrs.hasAttribute(Attribute::Writable) &&
2188 Attrs.hasAttribute(Attribute::ReadNone)),
2189 "Attributes writable and readnone are incompatible!", V);
2190
2191 Check(!(Attrs.hasAttribute(Attribute::Writable) &&
2192 Attrs.hasAttribute(Attribute::ReadOnly)),
2193 "Attributes writable and readonly are incompatible!", V);
2194
2195 AttributeMask IncompatibleAttrs = AttributeFuncs::typeIncompatible(Ty, Attrs);
2196 for (Attribute Attr : Attrs) {
2197 if (!Attr.isStringAttribute() &&
2198 IncompatibleAttrs.contains(Attr.getKindAsEnum())) {
2199 CheckFailed("Attribute '" + Attr.getAsString() +
2200 "' applied to incompatible type!", V);
2201 return;
2202 }
2203 }
2204
2205 if (isa<PointerType>(Ty)) {
2206 if (Attrs.hasAttribute(Attribute::Alignment)) {
2207 Align AttrAlign = Attrs.getAlignment().valueOrOne();
2208 Check(AttrAlign.value() <= Value::MaximumAlignment,
2209 "huge alignment values are unsupported", V);
2210 }
2211 if (Attrs.hasAttribute(Attribute::ByVal)) {
2212 Type *ByValTy = Attrs.getByValType();
2213 SmallPtrSet<Type *, 4> Visited;
2214 Check(ByValTy->isSized(&Visited),
2215 "Attribute 'byval' does not support unsized types!", V);
2216 // Check if it is or contains a target extension type that disallows being
2217 // used on the stack.
2219 "'byval' argument has illegal target extension type", V);
2220 Check(DL.getTypeAllocSize(ByValTy).getKnownMinValue() < (1ULL << 32),
2221 "huge 'byval' arguments are unsupported", V);
2222 }
2223 if (Attrs.hasAttribute(Attribute::ByRef)) {
2224 SmallPtrSet<Type *, 4> Visited;
2225 Check(Attrs.getByRefType()->isSized(&Visited),
2226 "Attribute 'byref' does not support unsized types!", V);
2227 Check(DL.getTypeAllocSize(Attrs.getByRefType()).getKnownMinValue() <
2228 (1ULL << 32),
2229 "huge 'byref' arguments are unsupported", V);
2230 }
2231 if (Attrs.hasAttribute(Attribute::InAlloca)) {
2232 SmallPtrSet<Type *, 4> Visited;
2233 Check(Attrs.getInAllocaType()->isSized(&Visited),
2234 "Attribute 'inalloca' does not support unsized types!", V);
2235 Check(DL.getTypeAllocSize(Attrs.getInAllocaType()).getKnownMinValue() <
2236 (1ULL << 32),
2237 "huge 'inalloca' arguments are unsupported", V);
2238 }
2239 if (Attrs.hasAttribute(Attribute::Preallocated)) {
2240 SmallPtrSet<Type *, 4> Visited;
2241 Check(Attrs.getPreallocatedType()->isSized(&Visited),
2242 "Attribute 'preallocated' does not support unsized types!", V);
2243 Check(
2244 DL.getTypeAllocSize(Attrs.getPreallocatedType()).getKnownMinValue() <
2245 (1ULL << 32),
2246 "huge 'preallocated' arguments are unsupported", V);
2247 }
2248 }
2249
2250 if (Attrs.hasAttribute(Attribute::Initializes)) {
2251 auto Inits = Attrs.getAttribute(Attribute::Initializes).getInitializes();
2252 Check(!Inits.empty(), "Attribute 'initializes' does not support empty list",
2253 V);
2255 "Attribute 'initializes' does not support unordered ranges", V);
2256 }
2257
2258 if (Attrs.hasAttribute(Attribute::NoFPClass)) {
2259 uint64_t Val = Attrs.getAttribute(Attribute::NoFPClass).getValueAsInt();
2260 Check(Val != 0, "Attribute 'nofpclass' must have at least one test bit set",
2261 V);
2262 Check((Val & ~static_cast<unsigned>(fcAllFlags)) == 0,
2263 "Invalid value for 'nofpclass' test mask", V);
2264 }
2265 if (Attrs.hasAttribute(Attribute::Range)) {
2266 const ConstantRange &CR =
2267 Attrs.getAttribute(Attribute::Range).getValueAsConstantRange();
2269 "Range bit width must match type bit width!", V);
2270 }
2271}
2272
2273void Verifier::checkUnsignedBaseTenFuncAttr(AttributeList Attrs, StringRef Attr,
2274 const Value *V) {
2275 if (Attrs.hasFnAttr(Attr)) {
2276 StringRef S = Attrs.getFnAttr(Attr).getValueAsString();
2277 unsigned N;
2278 if (S.getAsInteger(10, N))
2279 CheckFailed("\"" + Attr + "\" takes an unsigned integer: " + S, V);
2280 }
2281}
2282
2283// Check parameter attributes against a function type.
2284// The value V is printed in error messages.
2285void Verifier::verifyFunctionAttrs(FunctionType *FT, AttributeList Attrs,
2286 const Value *V, bool IsIntrinsic,
2287 bool IsInlineAsm) {
2288 if (Attrs.isEmpty())
2289 return;
2290
2291 if (AttributeListsVisited.insert(Attrs.getRawPointer()).second) {
2292 Check(Attrs.hasParentContext(Context),
2293 "Attribute list does not match Module context!", &Attrs, V);
2294 for (const auto &AttrSet : Attrs) {
2295 Check(!AttrSet.hasAttributes() || AttrSet.hasParentContext(Context),
2296 "Attribute set does not match Module context!", &AttrSet, V);
2297 for (const auto &A : AttrSet) {
2298 Check(A.hasParentContext(Context),
2299 "Attribute does not match Module context!", &A, V);
2300 }
2301 }
2302 }
2303
2304 bool SawNest = false;
2305 bool SawReturned = false;
2306 bool SawSRet = false;
2307 bool SawSwiftSelf = false;
2308 bool SawSwiftAsync = false;
2309 bool SawSwiftError = false;
2310
2311 // Verify return value attributes.
2312 AttributeSet RetAttrs = Attrs.getRetAttrs();
2313 for (Attribute RetAttr : RetAttrs)
2314 Check(RetAttr.isStringAttribute() ||
2315 Attribute::canUseAsRetAttr(RetAttr.getKindAsEnum()),
2316 "Attribute '" + RetAttr.getAsString() +
2317 "' does not apply to function return values",
2318 V);
2319
2320 unsigned MaxParameterWidth = 0;
2321 auto GetMaxParameterWidth = [&MaxParameterWidth](Type *Ty) {
2322 if (Ty->isVectorTy()) {
2323 if (auto *VT = dyn_cast<FixedVectorType>(Ty)) {
2324 unsigned Size = VT->getPrimitiveSizeInBits().getFixedValue();
2325 if (Size > MaxParameterWidth)
2326 MaxParameterWidth = Size;
2327 }
2328 }
2329 };
2330 GetMaxParameterWidth(FT->getReturnType());
2331 verifyParameterAttrs(RetAttrs, FT->getReturnType(), V);
2332
2333 // Verify parameter attributes.
2334 for (unsigned i = 0, e = FT->getNumParams(); i != e; ++i) {
2335 Type *Ty = FT->getParamType(i);
2336 AttributeSet ArgAttrs = Attrs.getParamAttrs(i);
2337
2338 if (!IsIntrinsic) {
2339 Check(!ArgAttrs.hasAttribute(Attribute::ImmArg),
2340 "immarg attribute only applies to intrinsics", V);
2341 if (!IsInlineAsm)
2342 Check(!ArgAttrs.hasAttribute(Attribute::ElementType),
2343 "Attribute 'elementtype' can only be applied to intrinsics"
2344 " and inline asm.",
2345 V);
2346 }
2347
2348 verifyParameterAttrs(ArgAttrs, Ty, V);
2349 GetMaxParameterWidth(Ty);
2350
2351 if (ArgAttrs.hasAttribute(Attribute::Nest)) {
2352 Check(!SawNest, "More than one parameter has attribute nest!", V);
2353 SawNest = true;
2354 }
2355
2356 if (ArgAttrs.hasAttribute(Attribute::Returned)) {
2357 Check(!SawReturned, "More than one parameter has attribute returned!", V);
2358 Check(Ty->canLosslesslyBitCastTo(FT->getReturnType()),
2359 "Incompatible argument and return types for 'returned' attribute",
2360 V);
2361 SawReturned = true;
2362 }
2363
2364 if (ArgAttrs.hasAttribute(Attribute::StructRet)) {
2365 Check(!SawSRet, "Cannot have multiple 'sret' parameters!", V);
2366 Check(i == 0 || i == 1,
2367 "Attribute 'sret' is not on first or second parameter!", V);
2368 SawSRet = true;
2369 }
2370
2371 if (ArgAttrs.hasAttribute(Attribute::SwiftSelf)) {
2372 Check(!SawSwiftSelf, "Cannot have multiple 'swiftself' parameters!", V);
2373 SawSwiftSelf = true;
2374 }
2375
2376 if (ArgAttrs.hasAttribute(Attribute::SwiftAsync)) {
2377 Check(!SawSwiftAsync, "Cannot have multiple 'swiftasync' parameters!", V);
2378 SawSwiftAsync = true;
2379 }
2380
2381 if (ArgAttrs.hasAttribute(Attribute::SwiftError)) {
2382 Check(!SawSwiftError, "Cannot have multiple 'swifterror' parameters!", V);
2383 SawSwiftError = true;
2384 }
2385
2386 if (ArgAttrs.hasAttribute(Attribute::InAlloca)) {
2387 Check(i == FT->getNumParams() - 1,
2388 "inalloca isn't on the last parameter!", V);
2389 }
2390 }
2391
2392 if (!Attrs.hasFnAttrs())
2393 return;
2394
2395 verifyAttributeTypes(Attrs.getFnAttrs(), V);
2396 for (Attribute FnAttr : Attrs.getFnAttrs())
2397 Check(FnAttr.isStringAttribute() ||
2398 Attribute::canUseAsFnAttr(FnAttr.getKindAsEnum()),
2399 "Attribute '" + FnAttr.getAsString() +
2400 "' does not apply to functions!",
2401 V);
2402
2403 Check(!(Attrs.hasFnAttr(Attribute::NoInline) &&
2404 Attrs.hasFnAttr(Attribute::AlwaysInline)),
2405 "Attributes 'noinline and alwaysinline' are incompatible!", V);
2406
2407 if (Attrs.hasFnAttr(Attribute::OptimizeNone)) {
2408 Check(Attrs.hasFnAttr(Attribute::NoInline),
2409 "Attribute 'optnone' requires 'noinline'!", V);
2410
2411 Check(!Attrs.hasFnAttr(Attribute::OptimizeForSize),
2412 "Attributes 'optsize and optnone' are incompatible!", V);
2413
2414 Check(!Attrs.hasFnAttr(Attribute::MinSize),
2415 "Attributes 'minsize and optnone' are incompatible!", V);
2416
2417 Check(!Attrs.hasFnAttr(Attribute::OptimizeForDebugging),
2418 "Attributes 'optdebug and optnone' are incompatible!", V);
2419 }
2420
2421 Check(!(Attrs.hasFnAttr(Attribute::SanitizeRealtime) &&
2422 Attrs.hasFnAttr(Attribute::SanitizeRealtimeBlocking)),
2423 "Attributes "
2424 "'sanitize_realtime and sanitize_realtime_blocking' are incompatible!",
2425 V);
2426
2427 if (Attrs.hasFnAttr(Attribute::OptimizeForDebugging)) {
2428 Check(!Attrs.hasFnAttr(Attribute::OptimizeForSize),
2429 "Attributes 'optsize and optdebug' are incompatible!", V);
2430
2431 Check(!Attrs.hasFnAttr(Attribute::MinSize),
2432 "Attributes 'minsize and optdebug' are incompatible!", V);
2433 }
2434
2435 Check(!Attrs.hasAttrSomewhere(Attribute::Writable) ||
2436 isModSet(Attrs.getMemoryEffects().getModRef(IRMemLocation::ArgMem)),
2437 "Attribute writable and memory without argmem: write are incompatible!",
2438 V);
2439
2440 if (Attrs.hasFnAttr("aarch64_pstate_sm_enabled")) {
2441 Check(!Attrs.hasFnAttr("aarch64_pstate_sm_compatible"),
2442 "Attributes 'aarch64_pstate_sm_enabled and "
2443 "aarch64_pstate_sm_compatible' are incompatible!",
2444 V);
2445 }
2446
2447 Check((Attrs.hasFnAttr("aarch64_new_za") + Attrs.hasFnAttr("aarch64_in_za") +
2448 Attrs.hasFnAttr("aarch64_inout_za") +
2449 Attrs.hasFnAttr("aarch64_out_za") +
2450 Attrs.hasFnAttr("aarch64_preserves_za") +
2451 Attrs.hasFnAttr("aarch64_za_state_agnostic")) <= 1,
2452 "Attributes 'aarch64_new_za', 'aarch64_in_za', 'aarch64_out_za', "
2453 "'aarch64_inout_za', 'aarch64_preserves_za' and "
2454 "'aarch64_za_state_agnostic' are mutually exclusive",
2455 V);
2456
2457 Check((Attrs.hasFnAttr("aarch64_new_zt0") +
2458 Attrs.hasFnAttr("aarch64_in_zt0") +
2459 Attrs.hasFnAttr("aarch64_inout_zt0") +
2460 Attrs.hasFnAttr("aarch64_out_zt0") +
2461 Attrs.hasFnAttr("aarch64_preserves_zt0") +
2462 Attrs.hasFnAttr("aarch64_za_state_agnostic")) <= 1,
2463 "Attributes 'aarch64_new_zt0', 'aarch64_in_zt0', 'aarch64_out_zt0', "
2464 "'aarch64_inout_zt0', 'aarch64_preserves_zt0' and "
2465 "'aarch64_za_state_agnostic' are mutually exclusive",
2466 V);
2467
2468 if (Attrs.hasFnAttr(Attribute::JumpTable)) {
2469 const GlobalValue *GV = cast<GlobalValue>(V);
2471 "Attribute 'jumptable' requires 'unnamed_addr'", V);
2472 }
2473
2474 if (auto Args = Attrs.getFnAttrs().getAllocSizeArgs()) {
2475 auto CheckParam = [&](StringRef Name, unsigned ParamNo) {
2476 if (ParamNo >= FT->getNumParams()) {
2477 CheckFailed("'allocsize' " + Name + " argument is out of bounds", V);
2478 return false;
2479 }
2480
2481 if (!FT->getParamType(ParamNo)->isIntegerTy()) {
2482 CheckFailed("'allocsize' " + Name +
2483 " argument must refer to an integer parameter",
2484 V);
2485 return false;
2486 }
2487
2488 return true;
2489 };
2490
2491 if (!CheckParam("element size", Args->first))
2492 return;
2493
2494 if (Args->second && !CheckParam("number of elements", *Args->second))
2495 return;
2496 }
2497
2498 if (Attrs.hasFnAttr(Attribute::AllocKind)) {
2499 AllocFnKind K = Attrs.getAllocKind();
2501 K & (AllocFnKind::Alloc | AllocFnKind::Realloc | AllocFnKind::Free);
2502 if (!is_contained(
2503 {AllocFnKind::Alloc, AllocFnKind::Realloc, AllocFnKind::Free},
2504 Type))
2505 CheckFailed(
2506 "'allockind()' requires exactly one of alloc, realloc, and free");
2507 if ((Type == AllocFnKind::Free) &&
2508 ((K & (AllocFnKind::Uninitialized | AllocFnKind::Zeroed |
2509 AllocFnKind::Aligned)) != AllocFnKind::Unknown))
2510 CheckFailed("'allockind(\"free\")' doesn't allow uninitialized, zeroed, "
2511 "or aligned modifiers.");
2512 AllocFnKind ZeroedUninit = AllocFnKind::Uninitialized | AllocFnKind::Zeroed;
2513 if ((K & ZeroedUninit) == ZeroedUninit)
2514 CheckFailed("'allockind()' can't be both zeroed and uninitialized");
2515 }
2516
2517 if (Attribute A = Attrs.getFnAttr("alloc-variant-zeroed"); A.isValid()) {
2518 StringRef S = A.getValueAsString();
2519 Check(!S.empty(), "'alloc-variant-zeroed' must not be empty");
2520 Function *Variant = M.getFunction(S);
2521 if (Variant) {
2522 Attribute Family = Attrs.getFnAttr("alloc-family");
2523 Attribute VariantFamily = Variant->getFnAttribute("alloc-family");
2524 if (Family.isValid())
2525 Check(VariantFamily.isValid() &&
2526 VariantFamily.getValueAsString() == Family.getValueAsString(),
2527 "'alloc-variant-zeroed' must name a function belonging to the "
2528 "same 'alloc-family'");
2529
2530 Check(Variant->hasFnAttribute(Attribute::AllocKind) &&
2531 (Variant->getFnAttribute(Attribute::AllocKind).getAllocKind() &
2532 AllocFnKind::Zeroed) != AllocFnKind::Unknown,
2533 "'alloc-variant-zeroed' must name a function with "
2534 "'allockind(\"zeroed\")'");
2535
2536 Check(FT == Variant->getFunctionType(),
2537 "'alloc-variant-zeroed' must name a function with the same "
2538 "signature");
2539
2540 if (const Function *F = dyn_cast<Function>(V))
2541 Check(F->getCallingConv() == Variant->getCallingConv(),
2542 "'alloc-variant-zeroed' must name a function with the same "
2543 "calling convention");
2544 }
2545 }
2546
2547 if (Attrs.hasFnAttr(Attribute::VScaleRange)) {
2548 unsigned VScaleMin = Attrs.getFnAttrs().getVScaleRangeMin();
2549 if (VScaleMin == 0)
2550 CheckFailed("'vscale_range' minimum must be greater than 0", V);
2551 else if (!isPowerOf2_32(VScaleMin))
2552 CheckFailed("'vscale_range' minimum must be power-of-two value", V);
2553 std::optional<unsigned> VScaleMax = Attrs.getFnAttrs().getVScaleRangeMax();
2554 if (VScaleMax && VScaleMin > VScaleMax)
2555 CheckFailed("'vscale_range' minimum cannot be greater than maximum", V);
2556 else if (VScaleMax && !isPowerOf2_32(*VScaleMax))
2557 CheckFailed("'vscale_range' maximum must be power-of-two value", V);
2558 }
2559
2560 if (Attribute FPAttr = Attrs.getFnAttr("frame-pointer"); FPAttr.isValid()) {
2561 StringRef FP = FPAttr.getValueAsString();
2562 if (FP != "all" && FP != "non-leaf" && FP != "none" && FP != "reserved" &&
2563 FP != "non-leaf-no-reserve")
2564 CheckFailed("invalid value for 'frame-pointer' attribute: " + FP, V);
2565 }
2566
2567 checkUnsignedBaseTenFuncAttr(Attrs, "patchable-function-prefix", V);
2568 checkUnsignedBaseTenFuncAttr(Attrs, "patchable-function-entry", V);
2569 if (Attrs.hasFnAttr("patchable-function-entry-section"))
2570 Check(!Attrs.getFnAttr("patchable-function-entry-section")
2571 .getValueAsString()
2572 .empty(),
2573 "\"patchable-function-entry-section\" must not be empty");
2574 checkUnsignedBaseTenFuncAttr(Attrs, "warn-stack-size", V);
2575
2576 if (auto A = Attrs.getFnAttr("sign-return-address"); A.isValid()) {
2577 StringRef S = A.getValueAsString();
2578 if (S != "none" && S != "all" && S != "non-leaf")
2579 CheckFailed("invalid value for 'sign-return-address' attribute: " + S, V);
2580 }
2581
2582 if (auto A = Attrs.getFnAttr("sign-return-address-key"); A.isValid()) {
2583 StringRef S = A.getValueAsString();
2584 if (S != "a_key" && S != "b_key")
2585 CheckFailed("invalid value for 'sign-return-address-key' attribute: " + S,
2586 V);
2587 if (auto AA = Attrs.getFnAttr("sign-return-address"); !AA.isValid()) {
2588 CheckFailed(
2589 "'sign-return-address-key' present without `sign-return-address`");
2590 }
2591 }
2592
2593 if (auto A = Attrs.getFnAttr("branch-target-enforcement"); A.isValid()) {
2594 StringRef S = A.getValueAsString();
2595 if (S != "" && S != "true" && S != "false")
2596 CheckFailed(
2597 "invalid value for 'branch-target-enforcement' attribute: " + S, V);
2598 }
2599
2600 if (auto A = Attrs.getFnAttr("branch-protection-pauth-lr"); A.isValid()) {
2601 StringRef S = A.getValueAsString();
2602 if (S != "" && S != "true" && S != "false")
2603 CheckFailed(
2604 "invalid value for 'branch-protection-pauth-lr' attribute: " + S, V);
2605 }
2606
2607 if (auto A = Attrs.getFnAttr("guarded-control-stack"); A.isValid()) {
2608 StringRef S = A.getValueAsString();
2609 if (S != "" && S != "true" && S != "false")
2610 CheckFailed("invalid value for 'guarded-control-stack' attribute: " + S,
2611 V);
2612 }
2613
2614 if (auto A = Attrs.getFnAttr("vector-function-abi-variant"); A.isValid()) {
2615 StringRef S = A.getValueAsString();
2616 const std::optional<VFInfo> Info = VFABI::tryDemangleForVFABI(S, FT);
2617 if (!Info)
2618 CheckFailed("invalid name for a VFABI variant: " + S, V);
2619 }
2620
2621 if (auto A = Attrs.getFnAttr("denormal-fp-math"); A.isValid()) {
2622 StringRef S = A.getValueAsString();
2624 CheckFailed("invalid value for 'denormal-fp-math' attribute: " + S, V);
2625 }
2626
2627 if (auto A = Attrs.getFnAttr("denormal-fp-math-f32"); A.isValid()) {
2628 StringRef S = A.getValueAsString();
2630 CheckFailed("invalid value for 'denormal-fp-math-f32' attribute: " + S,
2631 V);
2632 }
2633
2634 if (auto A = Attrs.getFnAttr("modular-format"); A.isValid()) {
2635 StringRef S = A.getValueAsString();
2637 S.split(Args, ',');
2638 Check(Args.size() >= 5,
2639 "modular-format attribute requires at least 5 arguments", V);
2640 unsigned FirstArgIdx;
2641 Check(!Args[2].getAsInteger(10, FirstArgIdx),
2642 "modular-format attribute first arg index is not an integer", V);
2643 unsigned UpperBound = FT->getNumParams() + (FT->isVarArg() ? 1 : 0);
2644 Check(FirstArgIdx > 0 && FirstArgIdx <= UpperBound,
2645 "modular-format attribute first arg index is out of bounds", V);
2646 }
2647
2648 if (auto A = Attrs.getFnAttr("target-features"); A.isValid()) {
2649 StringRef S = A.getValueAsString();
2650 if (!S.empty()) {
2651 for (auto FeatureFlag : split(S, ',')) {
2652 if (FeatureFlag.empty())
2653 CheckFailed(
2654 "target-features attribute should not contain an empty string");
2655 else
2656 Check(FeatureFlag[0] == '+' || FeatureFlag[0] == '-',
2657 "target feature '" + FeatureFlag +
2658 "' must start with a '+' or '-'",
2659 V);
2660 }
2661 }
2662 }
2663}
2664void Verifier::verifyUnknownProfileMetadata(MDNode *MD) {
2665 Check(MD->getNumOperands() == 2,
2666 "'unknown' !prof should have a single additional operand", MD);
2667 auto *PassName = dyn_cast<MDString>(MD->getOperand(1));
2668 Check(PassName != nullptr,
2669 "'unknown' !prof should have an additional operand of type "
2670 "string");
2671 Check(!PassName->getString().empty(),
2672 "the 'unknown' !prof operand should not be an empty string");
2673}
2674
2675void Verifier::verifyFunctionMetadata(
2676 ArrayRef<std::pair<unsigned, MDNode *>> MDs) {
2677 for (const auto &Pair : MDs) {
2678 if (Pair.first == LLVMContext::MD_prof) {
2679 MDNode *MD = Pair.second;
2680 Check(MD->getNumOperands() >= 2,
2681 "!prof annotations should have no less than 2 operands", MD);
2682 // We may have functions that are synthesized by the compiler, e.g. in
2683 // WPD, that we can't currently determine the entry count.
2684 if (MD->getOperand(0).equalsStr(
2686 verifyUnknownProfileMetadata(MD);
2687 continue;
2688 }
2689
2690 // Check first operand.
2691 Check(MD->getOperand(0) != nullptr, "first operand should not be null",
2692 MD);
2694 "expected string with name of the !prof annotation", MD);
2695 MDString *MDS = cast<MDString>(MD->getOperand(0));
2696 StringRef ProfName = MDS->getString();
2699 "first operand should be 'function_entry_count'"
2700 " or 'synthetic_function_entry_count'",
2701 MD);
2702
2703 // Check second operand.
2704 Check(MD->getOperand(1) != nullptr, "second operand should not be null",
2705 MD);
2707 "expected integer argument to function_entry_count", MD);
2708 } else if (Pair.first == LLVMContext::MD_kcfi_type) {
2709 MDNode *MD = Pair.second;
2710 Check(MD->getNumOperands() == 1,
2711 "!kcfi_type must have exactly one operand", MD);
2712 Check(MD->getOperand(0) != nullptr, "!kcfi_type operand must not be null",
2713 MD);
2715 "expected a constant operand for !kcfi_type", MD);
2716 Constant *C = cast<ConstantAsMetadata>(MD->getOperand(0))->getValue();
2717 Check(isa<ConstantInt>(C) && isa<IntegerType>(C->getType()),
2718 "expected a constant integer operand for !kcfi_type", MD);
2720 "expected a 32-bit integer constant operand for !kcfi_type", MD);
2721 }
2722 }
2723}
2724
2725void Verifier::visitConstantExprsRecursively(const Constant *EntryC) {
2726 if (EntryC->getNumOperands() == 0)
2727 return;
2728
2729 if (!ConstantExprVisited.insert(EntryC).second)
2730 return;
2731
2733 Stack.push_back(EntryC);
2734
2735 while (!Stack.empty()) {
2736 const Constant *C = Stack.pop_back_val();
2737
2738 // Check this constant expression.
2739 if (const auto *CE = dyn_cast<ConstantExpr>(C))
2740 visitConstantExpr(CE);
2741
2742 if (const auto *CPA = dyn_cast<ConstantPtrAuth>(C))
2743 visitConstantPtrAuth(CPA);
2744
2745 if (const auto *GV = dyn_cast<GlobalValue>(C)) {
2746 // Global Values get visited separately, but we do need to make sure
2747 // that the global value is in the correct module
2748 Check(GV->getParent() == &M, "Referencing global in another module!",
2749 EntryC, &M, GV, GV->getParent());
2750 continue;
2751 }
2752
2753 // Visit all sub-expressions.
2754 for (const Use &U : C->operands()) {
2755 const auto *OpC = dyn_cast<Constant>(U);
2756 if (!OpC)
2757 continue;
2758 if (!ConstantExprVisited.insert(OpC).second)
2759 continue;
2760 Stack.push_back(OpC);
2761 }
2762 }
2763}
2764
2765void Verifier::visitConstantExpr(const ConstantExpr *CE) {
2766 if (CE->getOpcode() == Instruction::BitCast)
2767 Check(CastInst::castIsValid(Instruction::BitCast, CE->getOperand(0),
2768 CE->getType()),
2769 "Invalid bitcast", CE);
2770 else if (CE->getOpcode() == Instruction::PtrToAddr)
2771 checkPtrToAddr(CE->getOperand(0)->getType(), CE->getType(), *CE);
2772}
2773
2774void Verifier::visitConstantPtrAuth(const ConstantPtrAuth *CPA) {
2775 Check(CPA->getPointer()->getType()->isPointerTy(),
2776 "signed ptrauth constant base pointer must have pointer type");
2777
2778 Check(CPA->getType() == CPA->getPointer()->getType(),
2779 "signed ptrauth constant must have same type as its base pointer");
2780
2781 Check(CPA->getKey()->getBitWidth() == 32,
2782 "signed ptrauth constant key must be i32 constant integer");
2783
2785 "signed ptrauth constant address discriminator must be a pointer");
2786
2787 Check(CPA->getDiscriminator()->getBitWidth() == 64,
2788 "signed ptrauth constant discriminator must be i64 constant integer");
2789
2791 "signed ptrauth constant deactivation symbol must be a pointer");
2792
2795 "signed ptrauth constant deactivation symbol must be a global value "
2796 "or null");
2797}
2798
2799bool Verifier::verifyAttributeCount(AttributeList Attrs, unsigned Params) {
2800 // There shouldn't be more attribute sets than there are parameters plus the
2801 // function and return value.
2802 return Attrs.getNumAttrSets() <= Params + 2;
2803}
2804
2805void Verifier::verifyInlineAsmCall(const CallBase &Call) {
2806 const InlineAsm *IA = cast<InlineAsm>(Call.getCalledOperand());
2807 unsigned ArgNo = 0;
2808 unsigned LabelNo = 0;
2809 for (const InlineAsm::ConstraintInfo &CI : IA->ParseConstraints()) {
2810 if (CI.Type == InlineAsm::isLabel) {
2811 ++LabelNo;
2812 continue;
2813 }
2814
2815 // Only deal with constraints that correspond to call arguments.
2816 if (!CI.hasArg())
2817 continue;
2818
2819 if (CI.isIndirect) {
2820 const Value *Arg = Call.getArgOperand(ArgNo);
2821 Check(Arg->getType()->isPointerTy(),
2822 "Operand for indirect constraint must have pointer type", &Call);
2823
2825 "Operand for indirect constraint must have elementtype attribute",
2826 &Call);
2827 } else {
2828 Check(!Call.paramHasAttr(ArgNo, Attribute::ElementType),
2829 "Elementtype attribute can only be applied for indirect "
2830 "constraints",
2831 &Call);
2832 }
2833
2834 ArgNo++;
2835 }
2836
2837 if (auto *CallBr = dyn_cast<CallBrInst>(&Call)) {
2838 Check(LabelNo == CallBr->getNumIndirectDests(),
2839 "Number of label constraints does not match number of callbr dests",
2840 &Call);
2841 } else {
2842 Check(LabelNo == 0, "Label constraints can only be used with callbr",
2843 &Call);
2844 }
2845}
2846
2847/// Verify that statepoint intrinsic is well formed.
2848void Verifier::verifyStatepoint(const CallBase &Call) {
2849 assert(Call.getIntrinsicID() == Intrinsic::experimental_gc_statepoint);
2850
2853 "gc.statepoint must read and write all memory to preserve "
2854 "reordering restrictions required by safepoint semantics",
2855 Call);
2856
2857 const int64_t NumPatchBytes =
2858 cast<ConstantInt>(Call.getArgOperand(1))->getSExtValue();
2859 assert(isInt<32>(NumPatchBytes) && "NumPatchBytesV is an i32!");
2860 Check(NumPatchBytes >= 0,
2861 "gc.statepoint number of patchable bytes must be "
2862 "positive",
2863 Call);
2864
2865 Type *TargetElemType = Call.getParamElementType(2);
2866 Check(TargetElemType,
2867 "gc.statepoint callee argument must have elementtype attribute", Call);
2868 FunctionType *TargetFuncType = dyn_cast<FunctionType>(TargetElemType);
2869 Check(TargetFuncType,
2870 "gc.statepoint callee elementtype must be function type", Call);
2871
2872 const int NumCallArgs = cast<ConstantInt>(Call.getArgOperand(3))->getZExtValue();
2873 Check(NumCallArgs >= 0,
2874 "gc.statepoint number of arguments to underlying call "
2875 "must be positive",
2876 Call);
2877 const int NumParams = (int)TargetFuncType->getNumParams();
2878 if (TargetFuncType->isVarArg()) {
2879 Check(NumCallArgs >= NumParams,
2880 "gc.statepoint mismatch in number of vararg call args", Call);
2881
2882 // TODO: Remove this limitation
2883 Check(TargetFuncType->getReturnType()->isVoidTy(),
2884 "gc.statepoint doesn't support wrapping non-void "
2885 "vararg functions yet",
2886 Call);
2887 } else
2888 Check(NumCallArgs == NumParams,
2889 "gc.statepoint mismatch in number of call args", Call);
2890
2891 const uint64_t Flags
2892 = cast<ConstantInt>(Call.getArgOperand(4))->getZExtValue();
2893 Check((Flags & ~(uint64_t)StatepointFlags::MaskAll) == 0,
2894 "unknown flag used in gc.statepoint flags argument", Call);
2895
2896 // Verify that the types of the call parameter arguments match
2897 // the type of the wrapped callee.
2898 AttributeList Attrs = Call.getAttributes();
2899 for (int i = 0; i < NumParams; i++) {
2900 Type *ParamType = TargetFuncType->getParamType(i);
2901 Type *ArgType = Call.getArgOperand(5 + i)->getType();
2902 Check(ArgType == ParamType,
2903 "gc.statepoint call argument does not match wrapped "
2904 "function type",
2905 Call);
2906
2907 if (TargetFuncType->isVarArg()) {
2908 AttributeSet ArgAttrs = Attrs.getParamAttrs(5 + i);
2909 Check(!ArgAttrs.hasAttribute(Attribute::StructRet),
2910 "Attribute 'sret' cannot be used for vararg call arguments!", Call);
2911 }
2912 }
2913
2914 const int EndCallArgsInx = 4 + NumCallArgs;
2915
2916 const Value *NumTransitionArgsV = Call.getArgOperand(EndCallArgsInx + 1);
2917 Check(isa<ConstantInt>(NumTransitionArgsV),
2918 "gc.statepoint number of transition arguments "
2919 "must be constant integer",
2920 Call);
2921 const int NumTransitionArgs =
2922 cast<ConstantInt>(NumTransitionArgsV)->getZExtValue();
2923 Check(NumTransitionArgs == 0,
2924 "gc.statepoint w/inline transition bundle is deprecated", Call);
2925 const int EndTransitionArgsInx = EndCallArgsInx + 1 + NumTransitionArgs;
2926
2927 const Value *NumDeoptArgsV = Call.getArgOperand(EndTransitionArgsInx + 1);
2928 Check(isa<ConstantInt>(NumDeoptArgsV),
2929 "gc.statepoint number of deoptimization arguments "
2930 "must be constant integer",
2931 Call);
2932 const int NumDeoptArgs = cast<ConstantInt>(NumDeoptArgsV)->getZExtValue();
2933 Check(NumDeoptArgs == 0,
2934 "gc.statepoint w/inline deopt operands is deprecated", Call);
2935
2936 const int ExpectedNumArgs = 7 + NumCallArgs;
2937 Check(ExpectedNumArgs == (int)Call.arg_size(),
2938 "gc.statepoint too many arguments", Call);
2939
2940 // Check that the only uses of this gc.statepoint are gc.result or
2941 // gc.relocate calls which are tied to this statepoint and thus part
2942 // of the same statepoint sequence
2943 for (const User *U : Call.users()) {
2944 const CallInst *UserCall = dyn_cast<const CallInst>(U);
2945 Check(UserCall, "illegal use of statepoint token", Call, U);
2946 if (!UserCall)
2947 continue;
2948 Check(isa<GCRelocateInst>(UserCall) || isa<GCResultInst>(UserCall),
2949 "gc.result or gc.relocate are the only value uses "
2950 "of a gc.statepoint",
2951 Call, U);
2952 if (isa<GCResultInst>(UserCall)) {
2953 Check(UserCall->getArgOperand(0) == &Call,
2954 "gc.result connected to wrong gc.statepoint", Call, UserCall);
2955 } else if (isa<GCRelocateInst>(Call)) {
2956 Check(UserCall->getArgOperand(0) == &Call,
2957 "gc.relocate connected to wrong gc.statepoint", Call, UserCall);
2958 }
2959 }
2960
2961 // Note: It is legal for a single derived pointer to be listed multiple
2962 // times. It's non-optimal, but it is legal. It can also happen after
2963 // insertion if we strip a bitcast away.
2964 // Note: It is really tempting to check that each base is relocated and
2965 // that a derived pointer is never reused as a base pointer. This turns
2966 // out to be problematic since optimizations run after safepoint insertion
2967 // can recognize equality properties that the insertion logic doesn't know
2968 // about. See example statepoint.ll in the verifier subdirectory
2969}
2970
2971void Verifier::verifyFrameRecoverIndices() {
2972 for (auto &Counts : FrameEscapeInfo) {
2973 Function *F = Counts.first;
2974 unsigned EscapedObjectCount = Counts.second.first;
2975 unsigned MaxRecoveredIndex = Counts.second.second;
2976 Check(MaxRecoveredIndex <= EscapedObjectCount,
2977 "all indices passed to llvm.localrecover must be less than the "
2978 "number of arguments passed to llvm.localescape in the parent "
2979 "function",
2980 F);
2981 }
2982}
2983
2984static Instruction *getSuccPad(Instruction *Terminator) {
2985 BasicBlock *UnwindDest;
2986 if (auto *II = dyn_cast<InvokeInst>(Terminator))
2987 UnwindDest = II->getUnwindDest();
2988 else if (auto *CSI = dyn_cast<CatchSwitchInst>(Terminator))
2989 UnwindDest = CSI->getUnwindDest();
2990 else
2991 UnwindDest = cast<CleanupReturnInst>(Terminator)->getUnwindDest();
2992 return &*UnwindDest->getFirstNonPHIIt();
2993}
2994
2995void Verifier::verifySiblingFuncletUnwinds() {
2996 llvm::TimeTraceScope timeScope("Verifier verify sibling funclet unwinds");
2997 SmallPtrSet<Instruction *, 8> Visited;
2998 SmallPtrSet<Instruction *, 8> Active;
2999 for (const auto &Pair : SiblingFuncletInfo) {
3000 Instruction *PredPad = Pair.first;
3001 if (Visited.count(PredPad))
3002 continue;
3003 Active.insert(PredPad);
3004 Instruction *Terminator = Pair.second;
3005 do {
3006 Instruction *SuccPad = getSuccPad(Terminator);
3007 if (Active.count(SuccPad)) {
3008 // Found a cycle; report error
3009 Instruction *CyclePad = SuccPad;
3010 SmallVector<Instruction *, 8> CycleNodes;
3011 do {
3012 CycleNodes.push_back(CyclePad);
3013 Instruction *CycleTerminator = SiblingFuncletInfo[CyclePad];
3014 if (CycleTerminator != CyclePad)
3015 CycleNodes.push_back(CycleTerminator);
3016 CyclePad = getSuccPad(CycleTerminator);
3017 } while (CyclePad != SuccPad);
3018 Check(false, "EH pads can't handle each other's exceptions",
3019 ArrayRef<Instruction *>(CycleNodes));
3020 }
3021 // Don't re-walk a node we've already checked
3022 if (!Visited.insert(SuccPad).second)
3023 break;
3024 // Walk to this successor if it has a map entry.
3025 PredPad = SuccPad;
3026 auto TermI = SiblingFuncletInfo.find(PredPad);
3027 if (TermI == SiblingFuncletInfo.end())
3028 break;
3029 Terminator = TermI->second;
3030 Active.insert(PredPad);
3031 } while (true);
3032 // Each node only has one successor, so we've walked all the active
3033 // nodes' successors.
3034 Active.clear();
3035 }
3036}
3037
3038// visitFunction - Verify that a function is ok.
3039//
3040void Verifier::visitFunction(const Function &F) {
3041 visitGlobalValue(F);
3042
3043 // Check function arguments.
3044 FunctionType *FT = F.getFunctionType();
3045 unsigned NumArgs = F.arg_size();
3046
3047 Check(&Context == &F.getContext(),
3048 "Function context does not match Module context!", &F);
3049
3050 Check(!F.hasCommonLinkage(), "Functions may not have common linkage", &F);
3051 Check(FT->getNumParams() == NumArgs,
3052 "# formal arguments must match # of arguments for function type!", &F,
3053 FT);
3054 Check(F.getReturnType()->isFirstClassType() ||
3055 F.getReturnType()->isVoidTy() || F.getReturnType()->isStructTy(),
3056 "Functions cannot return aggregate values!", &F);
3057
3058 Check(!F.hasStructRetAttr() || F.getReturnType()->isVoidTy(),
3059 "Invalid struct return type!", &F);
3060
3061 if (MaybeAlign A = F.getAlign()) {
3062 Check(A->value() <= Value::MaximumAlignment,
3063 "huge alignment values are unsupported", &F);
3064 }
3065
3066 AttributeList Attrs = F.getAttributes();
3067
3068 Check(verifyAttributeCount(Attrs, FT->getNumParams()),
3069 "Attribute after last parameter!", &F);
3070
3071 bool IsIntrinsic = F.isIntrinsic();
3072
3073 // Check function attributes.
3074 verifyFunctionAttrs(FT, Attrs, &F, IsIntrinsic, /* IsInlineAsm */ false);
3075
3076 // On function declarations/definitions, we do not support the builtin
3077 // attribute. We do not check this in VerifyFunctionAttrs since that is
3078 // checking for Attributes that can/can not ever be on functions.
3079 Check(!Attrs.hasFnAttr(Attribute::Builtin),
3080 "Attribute 'builtin' can only be applied to a callsite.", &F);
3081
3082 Check(!Attrs.hasAttrSomewhere(Attribute::ElementType),
3083 "Attribute 'elementtype' can only be applied to a callsite.", &F);
3084
3085 Check(!Attrs.hasFnAttr("aarch64_zt0_undef"),
3086 "Attribute 'aarch64_zt0_undef' can only be applied to a callsite.");
3087
3088 if (Attrs.hasFnAttr(Attribute::Naked))
3089 for (const Argument &Arg : F.args())
3090 Check(Arg.use_empty(), "cannot use argument of naked function", &Arg);
3091
3092 // Check that this function meets the restrictions on this calling convention.
3093 // Sometimes varargs is used for perfectly forwarding thunks, so some of these
3094 // restrictions can be lifted.
3095 switch (F.getCallingConv()) {
3096 default:
3097 case CallingConv::C:
3098 break;
3099 case CallingConv::X86_INTR: {
3100 Check(F.arg_empty() || Attrs.hasParamAttr(0, Attribute::ByVal),
3101 "Calling convention parameter requires byval", &F);
3102 break;
3103 }
3104 case CallingConv::AMDGPU_KERNEL:
3105 case CallingConv::SPIR_KERNEL:
3106 case CallingConv::AMDGPU_CS_Chain:
3107 case CallingConv::AMDGPU_CS_ChainPreserve:
3108 Check(F.getReturnType()->isVoidTy(),
3109 "Calling convention requires void return type", &F);
3110 [[fallthrough]];
3111 case CallingConv::AMDGPU_VS:
3112 case CallingConv::AMDGPU_HS:
3113 case CallingConv::AMDGPU_GS:
3114 case CallingConv::AMDGPU_PS:
3115 case CallingConv::AMDGPU_CS:
3116 Check(!F.hasStructRetAttr(), "Calling convention does not allow sret", &F);
3117 if (F.getCallingConv() != CallingConv::SPIR_KERNEL) {
3118 const unsigned StackAS = DL.getAllocaAddrSpace();
3119 unsigned i = 0;
3120 for (const Argument &Arg : F.args()) {
3121 Check(!Attrs.hasParamAttr(i, Attribute::ByVal),
3122 "Calling convention disallows byval", &F);
3123 Check(!Attrs.hasParamAttr(i, Attribute::Preallocated),
3124 "Calling convention disallows preallocated", &F);
3125 Check(!Attrs.hasParamAttr(i, Attribute::InAlloca),
3126 "Calling convention disallows inalloca", &F);
3127
3128 if (Attrs.hasParamAttr(i, Attribute::ByRef)) {
3129 // FIXME: Should also disallow LDS and GDS, but we don't have the enum
3130 // value here.
3131 Check(Arg.getType()->getPointerAddressSpace() != StackAS,
3132 "Calling convention disallows stack byref", &F);
3133 }
3134
3135 ++i;
3136 }
3137 }
3138
3139 [[fallthrough]];
3140 case CallingConv::Fast:
3141 case CallingConv::Cold:
3142 case CallingConv::Intel_OCL_BI:
3143 case CallingConv::PTX_Kernel:
3144 case CallingConv::PTX_Device:
3145 Check(!F.isVarArg(),
3146 "Calling convention does not support varargs or "
3147 "perfect forwarding!",
3148 &F);
3149 break;
3150 case CallingConv::AMDGPU_Gfx_WholeWave:
3151 Check(!F.arg_empty() && F.arg_begin()->getType()->isIntegerTy(1),
3152 "Calling convention requires first argument to be i1", &F);
3153 Check(!F.arg_begin()->hasInRegAttr(),
3154 "Calling convention requires first argument to not be inreg", &F);
3155 Check(!F.isVarArg(),
3156 "Calling convention does not support varargs or "
3157 "perfect forwarding!",
3158 &F);
3159 break;
3160 }
3161
3162 // Check that the argument values match the function type for this function...
3163 unsigned i = 0;
3164 for (const Argument &Arg : F.args()) {
3165 Check(Arg.getType() == FT->getParamType(i),
3166 "Argument value does not match function argument type!", &Arg,
3167 FT->getParamType(i));
3168 Check(Arg.getType()->isFirstClassType(),
3169 "Function arguments must have first-class types!", &Arg);
3170 if (!IsIntrinsic) {
3171 Check(!Arg.getType()->isMetadataTy(),
3172 "Function takes metadata but isn't an intrinsic", &Arg, &F);
3173 Check(!Arg.getType()->isTokenLikeTy(),
3174 "Function takes token but isn't an intrinsic", &Arg, &F);
3175 Check(!Arg.getType()->isX86_AMXTy(),
3176 "Function takes x86_amx but isn't an intrinsic", &Arg, &F);
3177 }
3178
3179 // Check that swifterror argument is only used by loads and stores.
3180 if (Attrs.hasParamAttr(i, Attribute::SwiftError)) {
3181 verifySwiftErrorValue(&Arg);
3182 }
3183 ++i;
3184 }
3185
3186 if (!IsIntrinsic) {
3187 Check(!F.getReturnType()->isTokenLikeTy(),
3188 "Function returns a token but isn't an intrinsic", &F);
3189 Check(!F.getReturnType()->isX86_AMXTy(),
3190 "Function returns a x86_amx but isn't an intrinsic", &F);
3191 }
3192
3193 // Get the function metadata attachments.
3195 F.getAllMetadata(MDs);
3196 assert(F.hasMetadata() != MDs.empty() && "Bit out-of-sync");
3197 verifyFunctionMetadata(MDs);
3198
3199 // Check validity of the personality function
3200 if (F.hasPersonalityFn()) {
3201 auto *Per = dyn_cast<Function>(F.getPersonalityFn()->stripPointerCasts());
3202 if (Per)
3203 Check(Per->getParent() == F.getParent(),
3204 "Referencing personality function in another module!", &F,
3205 F.getParent(), Per, Per->getParent());
3206 }
3207
3208 // EH funclet coloring can be expensive, recompute on-demand
3209 BlockEHFuncletColors.clear();
3210
3211 if (F.isMaterializable()) {
3212 // Function has a body somewhere we can't see.
3213 Check(MDs.empty(), "unmaterialized function cannot have metadata", &F,
3214 MDs.empty() ? nullptr : MDs.front().second);
3215 } else if (F.isDeclaration()) {
3216 for (const auto &I : MDs) {
3217 // This is used for call site debug information.
3218 CheckDI(I.first != LLVMContext::MD_dbg ||
3219 !cast<DISubprogram>(I.second)->isDistinct(),
3220 "function declaration may only have a unique !dbg attachment",
3221 &F);
3222 Check(I.first != LLVMContext::MD_prof,
3223 "function declaration may not have a !prof attachment", &F);
3224
3225 // Verify the metadata itself.
3226 visitMDNode(*I.second, AreDebugLocsAllowed::Yes);
3227 }
3228 Check(!F.hasPersonalityFn(),
3229 "Function declaration shouldn't have a personality routine", &F);
3230 } else {
3231 // Verify that this function (which has a body) is not named "llvm.*". It
3232 // is not legal to define intrinsics.
3233 Check(!IsIntrinsic, "llvm intrinsics cannot be defined!", &F);
3234
3235 // Check the entry node
3236 const BasicBlock *Entry = &F.getEntryBlock();
3237 Check(pred_empty(Entry),
3238 "Entry block to function must not have predecessors!", Entry);
3239
3240 // The address of the entry block cannot be taken, unless it is dead.
3241 if (Entry->hasAddressTaken()) {
3242 Check(!BlockAddress::lookup(Entry)->isConstantUsed(),
3243 "blockaddress may not be used with the entry block!", Entry);
3244 }
3245
3246 unsigned NumDebugAttachments = 0, NumProfAttachments = 0,
3247 NumKCFIAttachments = 0;
3248 // Visit metadata attachments.
3249 for (const auto &I : MDs) {
3250 // Verify that the attachment is legal.
3251 auto AllowLocs = AreDebugLocsAllowed::No;
3252 switch (I.first) {
3253 default:
3254 break;
3255 case LLVMContext::MD_dbg: {
3256 ++NumDebugAttachments;
3257 CheckDI(NumDebugAttachments == 1,
3258 "function must have a single !dbg attachment", &F, I.second);
3259 CheckDI(isa<DISubprogram>(I.second),
3260 "function !dbg attachment must be a subprogram", &F, I.second);
3261 CheckDI(cast<DISubprogram>(I.second)->isDistinct(),
3262 "function definition may only have a distinct !dbg attachment",
3263 &F);
3264
3265 auto *SP = cast<DISubprogram>(I.second);
3266 const Function *&AttachedTo = DISubprogramAttachments[SP];
3267 CheckDI(!AttachedTo || AttachedTo == &F,
3268 "DISubprogram attached to more than one function", SP, &F);
3269 AttachedTo = &F;
3270 AllowLocs = AreDebugLocsAllowed::Yes;
3271 break;
3272 }
3273 case LLVMContext::MD_prof:
3274 ++NumProfAttachments;
3275 Check(NumProfAttachments == 1,
3276 "function must have a single !prof attachment", &F, I.second);
3277 break;
3278 case LLVMContext::MD_kcfi_type:
3279 ++NumKCFIAttachments;
3280 Check(NumKCFIAttachments == 1,
3281 "function must have a single !kcfi_type attachment", &F,
3282 I.second);
3283 break;
3284 }
3285
3286 // Verify the metadata itself.
3287 visitMDNode(*I.second, AllowLocs);
3288 }
3289 }
3290
3291 // If this function is actually an intrinsic, verify that it is only used in
3292 // direct call/invokes, never having its "address taken".
3293 // Only do this if the module is materialized, otherwise we don't have all the
3294 // uses.
3295 if (F.isIntrinsic() && F.getParent()->isMaterialized()) {
3296 const User *U;
3297 if (F.hasAddressTaken(&U, false, true, false,
3298 /*IgnoreARCAttachedCall=*/true))
3299 Check(false, "Invalid user of intrinsic instruction!", U);
3300 }
3301
3302 // Check intrinsics' signatures.
3303 switch (F.getIntrinsicID()) {
3304 case Intrinsic::experimental_gc_get_pointer_base: {
3305 FunctionType *FT = F.getFunctionType();
3306 Check(FT->getNumParams() == 1, "wrong number of parameters", F);
3307 Check(isa<PointerType>(F.getReturnType()),
3308 "gc.get.pointer.base must return a pointer", F);
3309 Check(FT->getParamType(0) == F.getReturnType(),
3310 "gc.get.pointer.base operand and result must be of the same type", F);
3311 break;
3312 }
3313 case Intrinsic::experimental_gc_get_pointer_offset: {
3314 FunctionType *FT = F.getFunctionType();
3315 Check(FT->getNumParams() == 1, "wrong number of parameters", F);
3316 Check(isa<PointerType>(FT->getParamType(0)),
3317 "gc.get.pointer.offset operand must be a pointer", F);
3318 Check(F.getReturnType()->isIntegerTy(),
3319 "gc.get.pointer.offset must return integer", F);
3320 break;
3321 }
3322 }
3323
3324 auto *N = F.getSubprogram();
3325 HasDebugInfo = (N != nullptr);
3326 if (!HasDebugInfo)
3327 return;
3328
3329 // Check that all !dbg attachments lead to back to N.
3330 //
3331 // FIXME: Check this incrementally while visiting !dbg attachments.
3332 // FIXME: Only check when N is the canonical subprogram for F.
3333 SmallPtrSet<const MDNode *, 32> Seen;
3334 auto VisitDebugLoc = [&](const Instruction &I, const MDNode *Node) {
3335 // Be careful about using DILocation here since we might be dealing with
3336 // broken code (this is the Verifier after all).
3337 const DILocation *DL = dyn_cast_or_null<DILocation>(Node);
3338 if (!DL)
3339 return;
3340 if (!Seen.insert(DL).second)
3341 return;
3342
3343 Metadata *Parent = DL->getRawScope();
3344 CheckDI(Parent && isa<DILocalScope>(Parent),
3345 "DILocation's scope must be a DILocalScope", N, &F, &I, DL, Parent);
3346
3347 DILocalScope *Scope = DL->getInlinedAtScope();
3348 Check(Scope, "Failed to find DILocalScope", DL);
3349
3350 if (!Seen.insert(Scope).second)
3351 return;
3352
3353 DISubprogram *SP = Scope->getSubprogram();
3354
3355 // Scope and SP could be the same MDNode and we don't want to skip
3356 // validation in that case
3357 if ((Scope != SP) && !Seen.insert(SP).second)
3358 return;
3359
3360 CheckDI(SP->describes(&F),
3361 "!dbg attachment points at wrong subprogram for function", N, &F,
3362 &I, DL, Scope, SP);
3363 };
3364 for (auto &BB : F)
3365 for (auto &I : BB) {
3366 VisitDebugLoc(I, I.getDebugLoc().getAsMDNode());
3367 // The llvm.loop annotations also contain two DILocations.
3368 if (auto MD = I.getMetadata(LLVMContext::MD_loop))
3369 for (unsigned i = 1; i < MD->getNumOperands(); ++i)
3370 VisitDebugLoc(I, dyn_cast_or_null<MDNode>(MD->getOperand(i)));
3371 if (BrokenDebugInfo)
3372 return;
3373 }
3374}
3375
3376// verifyBasicBlock - Verify that a basic block is well formed...
3377//
3378void Verifier::visitBasicBlock(BasicBlock &BB) {
3379 InstsInThisBlock.clear();
3380 ConvergenceVerifyHelper.visit(BB);
3381
3382 // Ensure that basic blocks have terminators!
3383 Check(BB.getTerminator(), "Basic Block does not have terminator!", &BB);
3384
3385 // Check constraints that this basic block imposes on all of the PHI nodes in
3386 // it.
3387 if (isa<PHINode>(BB.front())) {
3388 SmallVector<BasicBlock *, 8> Preds(predecessors(&BB));
3390 llvm::sort(Preds);
3391 for (const PHINode &PN : BB.phis()) {
3392 Check(PN.getNumIncomingValues() == Preds.size(),
3393 "PHINode should have one entry for each predecessor of its "
3394 "parent basic block!",
3395 &PN);
3396
3397 // Get and sort all incoming values in the PHI node...
3398 Values.clear();
3399 Values.reserve(PN.getNumIncomingValues());
3400 for (unsigned i = 0, e = PN.getNumIncomingValues(); i != e; ++i)
3401 Values.push_back(
3402 std::make_pair(PN.getIncomingBlock(i), PN.getIncomingValue(i)));
3403 llvm::sort(Values);
3404
3405 for (unsigned i = 0, e = Values.size(); i != e; ++i) {
3406 // Check to make sure that if there is more than one entry for a
3407 // particular basic block in this PHI node, that the incoming values are
3408 // all identical.
3409 //
3410 Check(i == 0 || Values[i].first != Values[i - 1].first ||
3411 Values[i].second == Values[i - 1].second,
3412 "PHI node has multiple entries for the same basic block with "
3413 "different incoming values!",
3414 &PN, Values[i].first, Values[i].second, Values[i - 1].second);
3415
3416 // Check to make sure that the predecessors and PHI node entries are
3417 // matched up.
3418 Check(Values[i].first == Preds[i],
3419 "PHI node entries do not match predecessors!", &PN,
3420 Values[i].first, Preds[i]);
3421 }
3422 }
3423 }
3424
3425 // Check that all instructions have their parent pointers set up correctly.
3426 for (auto &I : BB)
3427 {
3428 Check(I.getParent() == &BB, "Instruction has bogus parent pointer!");
3429 }
3430
3431 // Confirm that no issues arise from the debug program.
3432 CheckDI(!BB.getTrailingDbgRecords(), "Basic Block has trailing DbgRecords!",
3433 &BB);
3434}
3435
3436void Verifier::visitTerminator(Instruction &I) {
3437 // Ensure that terminators only exist at the end of the basic block.
3438 Check(&I == I.getParent()->getTerminator(),
3439 "Terminator found in the middle of a basic block!", I.getParent());
3440 visitInstruction(I);
3441}
3442
3443void Verifier::visitBranchInst(BranchInst &BI) {
3444 if (BI.isConditional()) {
3446 "Branch condition is not 'i1' type!", &BI, BI.getCondition());
3447 }
3448 visitTerminator(BI);
3449}
3450
3451void Verifier::visitReturnInst(ReturnInst &RI) {
3452 Function *F = RI.getParent()->getParent();
3453 unsigned N = RI.getNumOperands();
3454 if (F->getReturnType()->isVoidTy())
3455 Check(N == 0,
3456 "Found return instr that returns non-void in Function of void "
3457 "return type!",
3458 &RI, F->getReturnType());
3459 else
3460 Check(N == 1 && F->getReturnType() == RI.getOperand(0)->getType(),
3461 "Function return type does not match operand "
3462 "type of return inst!",
3463 &RI, F->getReturnType());
3464
3465 // Check to make sure that the return value has necessary properties for
3466 // terminators...
3467 visitTerminator(RI);
3468}
3469
3470void Verifier::visitSwitchInst(SwitchInst &SI) {
3471 Check(SI.getType()->isVoidTy(), "Switch must have void result type!", &SI);
3472 // Check to make sure that all of the constants in the switch instruction
3473 // have the same type as the switched-on value.
3474 Type *SwitchTy = SI.getCondition()->getType();
3475 SmallPtrSet<ConstantInt*, 32> Constants;
3476 for (auto &Case : SI.cases()) {
3477 Check(isa<ConstantInt>(Case.getCaseValue()),
3478 "Case value is not a constant integer.", &SI);
3479 Check(Case.getCaseValue()->getType() == SwitchTy,
3480 "Switch constants must all be same type as switch value!", &SI);
3481 Check(Constants.insert(Case.getCaseValue()).second,
3482 "Duplicate integer as switch case", &SI, Case.getCaseValue());
3483 }
3484
3485 visitTerminator(SI);
3486}
3487
3488void Verifier::visitIndirectBrInst(IndirectBrInst &BI) {
3490 "Indirectbr operand must have pointer type!", &BI);
3491 for (unsigned i = 0, e = BI.getNumDestinations(); i != e; ++i)
3493 "Indirectbr destinations must all have pointer type!", &BI);
3494
3495 visitTerminator(BI);
3496}
3497
3498void Verifier::visitCallBrInst(CallBrInst &CBI) {
3499 if (!CBI.isInlineAsm()) {
3501 "Callbr: indirect function / invalid signature");
3502 Check(!CBI.hasOperandBundles(),
3503 "Callbr for intrinsics currently doesn't support operand bundles");
3504
3505 switch (CBI.getIntrinsicID()) {
3506 case Intrinsic::amdgcn_kill: {
3507 Check(CBI.getNumIndirectDests() == 1,
3508 "Callbr amdgcn_kill only supports one indirect dest");
3509 bool Unreachable = isa<UnreachableInst>(CBI.getIndirectDest(0)->begin());
3510 CallInst *Call = dyn_cast<CallInst>(CBI.getIndirectDest(0)->begin());
3511 Check(Unreachable || (Call && Call->getIntrinsicID() ==
3512 Intrinsic::amdgcn_unreachable),
3513 "Callbr amdgcn_kill indirect dest needs to be unreachable");
3514 break;
3515 }
3516 default:
3517 CheckFailed(
3518 "Callbr currently only supports asm-goto and selected intrinsics");
3519 }
3520 visitIntrinsicCall(CBI.getIntrinsicID(), CBI);
3521 } else {
3522 const InlineAsm *IA = cast<InlineAsm>(CBI.getCalledOperand());
3523 Check(!IA->canThrow(), "Unwinding from Callbr is not allowed");
3524
3525 verifyInlineAsmCall(CBI);
3526 }
3527 visitTerminator(CBI);
3528}
3529
3530void Verifier::visitSelectInst(SelectInst &SI) {
3531 Check(!SelectInst::areInvalidOperands(SI.getOperand(0), SI.getOperand(1),
3532 SI.getOperand(2)),
3533 "Invalid operands for select instruction!", &SI);
3534
3535 Check(SI.getTrueValue()->getType() == SI.getType(),
3536 "Select values must have same type as select instruction!", &SI);
3537 visitInstruction(SI);
3538}
3539
3540/// visitUserOp1 - User defined operators shouldn't live beyond the lifetime of
3541/// a pass, if any exist, it's an error.
3542///
3543void Verifier::visitUserOp1(Instruction &I) {
3544 Check(false, "User-defined operators should not live outside of a pass!", &I);
3545}
3546
3547void Verifier::visitTruncInst(TruncInst &I) {
3548 // Get the source and destination types
3549 Type *SrcTy = I.getOperand(0)->getType();
3550 Type *DestTy = I.getType();
3551
3552 // Get the size of the types in bits, we'll need this later
3553 unsigned SrcBitSize = SrcTy->getScalarSizeInBits();
3554 unsigned DestBitSize = DestTy->getScalarSizeInBits();
3555
3556 Check(SrcTy->isIntOrIntVectorTy(), "Trunc only operates on integer", &I);
3557 Check(DestTy->isIntOrIntVectorTy(), "Trunc only produces integer", &I);
3558 Check(SrcTy->isVectorTy() == DestTy->isVectorTy(),
3559 "trunc source and destination must both be a vector or neither", &I);
3560 Check(SrcBitSize > DestBitSize, "DestTy too big for Trunc", &I);
3561
3562 visitInstruction(I);
3563}
3564
3565void Verifier::visitZExtInst(ZExtInst &I) {
3566 // Get the source and destination types
3567 Type *SrcTy = I.getOperand(0)->getType();
3568 Type *DestTy = I.getType();
3569
3570 // Get the size of the types in bits, we'll need this later
3571 Check(SrcTy->isIntOrIntVectorTy(), "ZExt only operates on integer", &I);
3572 Check(DestTy->isIntOrIntVectorTy(), "ZExt only produces an integer", &I);
3573 Check(SrcTy->isVectorTy() == DestTy->isVectorTy(),
3574 "zext source and destination must both be a vector or neither", &I);
3575 unsigned SrcBitSize = SrcTy->getScalarSizeInBits();
3576 unsigned DestBitSize = DestTy->getScalarSizeInBits();
3577
3578 Check(SrcBitSize < DestBitSize, "Type too small for ZExt", &I);
3579
3580 visitInstruction(I);
3581}
3582
3583void Verifier::visitSExtInst(SExtInst &I) {
3584 // Get the source and destination types
3585 Type *SrcTy = I.getOperand(0)->getType();
3586 Type *DestTy = I.getType();
3587
3588 // Get the size of the types in bits, we'll need this later
3589 unsigned SrcBitSize = SrcTy->getScalarSizeInBits();
3590 unsigned DestBitSize = DestTy->getScalarSizeInBits();
3591
3592 Check(SrcTy->isIntOrIntVectorTy(), "SExt only operates on integer", &I);
3593 Check(DestTy->isIntOrIntVectorTy(), "SExt only produces an integer", &I);
3594 Check(SrcTy->isVectorTy() == DestTy->isVectorTy(),
3595 "sext source and destination must both be a vector or neither", &I);
3596 Check(SrcBitSize < DestBitSize, "Type too small for SExt", &I);
3597
3598 visitInstruction(I);
3599}
3600
3601void Verifier::visitFPTruncInst(FPTruncInst &I) {
3602 // Get the source and destination types
3603 Type *SrcTy = I.getOperand(0)->getType();
3604 Type *DestTy = I.getType();
3605 // Get the size of the types in bits, we'll need this later
3606 unsigned SrcBitSize = SrcTy->getScalarSizeInBits();
3607 unsigned DestBitSize = DestTy->getScalarSizeInBits();
3608
3609 Check(SrcTy->isFPOrFPVectorTy(), "FPTrunc only operates on FP", &I);
3610 Check(DestTy->isFPOrFPVectorTy(), "FPTrunc only produces an FP", &I);
3611 Check(SrcTy->isVectorTy() == DestTy->isVectorTy(),
3612 "fptrunc source and destination must both be a vector or neither", &I);
3613 Check(SrcBitSize > DestBitSize, "DestTy too big for FPTrunc", &I);
3614
3615 visitInstruction(I);
3616}
3617
3618void Verifier::visitFPExtInst(FPExtInst &I) {
3619 // Get the source and destination types
3620 Type *SrcTy = I.getOperand(0)->getType();
3621 Type *DestTy = I.getType();
3622
3623 // Get the size of the types in bits, we'll need this later
3624 unsigned SrcBitSize = SrcTy->getScalarSizeInBits();
3625 unsigned DestBitSize = DestTy->getScalarSizeInBits();
3626
3627 Check(SrcTy->isFPOrFPVectorTy(), "FPExt only operates on FP", &I);
3628 Check(DestTy->isFPOrFPVectorTy(), "FPExt only produces an FP", &I);
3629 Check(SrcTy->isVectorTy() == DestTy->isVectorTy(),
3630 "fpext source and destination must both be a vector or neither", &I);
3631 Check(SrcBitSize < DestBitSize, "DestTy too small for FPExt", &I);
3632
3633 visitInstruction(I);
3634}
3635
3636void Verifier::visitUIToFPInst(UIToFPInst &I) {
3637 // Get the source and destination types
3638 Type *SrcTy = I.getOperand(0)->getType();
3639 Type *DestTy = I.getType();
3640
3641 bool SrcVec = SrcTy->isVectorTy();
3642 bool DstVec = DestTy->isVectorTy();
3643
3644 Check(SrcVec == DstVec,
3645 "UIToFP source and dest must both be vector or scalar", &I);
3646 Check(SrcTy->isIntOrIntVectorTy(),
3647 "UIToFP source must be integer or integer vector", &I);
3648 Check(DestTy->isFPOrFPVectorTy(), "UIToFP result must be FP or FP vector",
3649 &I);
3650
3651 if (SrcVec && DstVec)
3652 Check(cast<VectorType>(SrcTy)->getElementCount() ==
3653 cast<VectorType>(DestTy)->getElementCount(),
3654 "UIToFP source and dest vector length mismatch", &I);
3655
3656 visitInstruction(I);
3657}
3658
3659void Verifier::visitSIToFPInst(SIToFPInst &I) {
3660 // Get the source and destination types
3661 Type *SrcTy = I.getOperand(0)->getType();
3662 Type *DestTy = I.getType();
3663
3664 bool SrcVec = SrcTy->isVectorTy();
3665 bool DstVec = DestTy->isVectorTy();
3666
3667 Check(SrcVec == DstVec,
3668 "SIToFP source and dest must both be vector or scalar", &I);
3669 Check(SrcTy->isIntOrIntVectorTy(),
3670 "SIToFP source must be integer or integer vector", &I);
3671 Check(DestTy->isFPOrFPVectorTy(), "SIToFP result must be FP or FP vector",
3672 &I);
3673
3674 if (SrcVec && DstVec)
3675 Check(cast<VectorType>(SrcTy)->getElementCount() ==
3676 cast<VectorType>(DestTy)->getElementCount(),
3677 "SIToFP source and dest vector length mismatch", &I);
3678
3679 visitInstruction(I);
3680}
3681
3682void Verifier::visitFPToUIInst(FPToUIInst &I) {
3683 // Get the source and destination types
3684 Type *SrcTy = I.getOperand(0)->getType();
3685 Type *DestTy = I.getType();
3686
3687 bool SrcVec = SrcTy->isVectorTy();
3688 bool DstVec = DestTy->isVectorTy();
3689
3690 Check(SrcVec == DstVec,
3691 "FPToUI source and dest must both be vector or scalar", &I);
3692 Check(SrcTy->isFPOrFPVectorTy(), "FPToUI source must be FP or FP vector", &I);
3693 Check(DestTy->isIntOrIntVectorTy(),
3694 "FPToUI result must be integer or integer vector", &I);
3695
3696 if (SrcVec && DstVec)
3697 Check(cast<VectorType>(SrcTy)->getElementCount() ==
3698 cast<VectorType>(DestTy)->getElementCount(),
3699 "FPToUI source and dest vector length mismatch", &I);
3700
3701 visitInstruction(I);
3702}
3703
3704void Verifier::visitFPToSIInst(FPToSIInst &I) {
3705 // Get the source and destination types
3706 Type *SrcTy = I.getOperand(0)->getType();
3707 Type *DestTy = I.getType();
3708
3709 bool SrcVec = SrcTy->isVectorTy();
3710 bool DstVec = DestTy->isVectorTy();
3711
3712 Check(SrcVec == DstVec,
3713 "FPToSI source and dest must both be vector or scalar", &I);
3714 Check(SrcTy->isFPOrFPVectorTy(), "FPToSI source must be FP or FP vector", &I);
3715 Check(DestTy->isIntOrIntVectorTy(),
3716 "FPToSI result must be integer or integer vector", &I);
3717
3718 if (SrcVec && DstVec)
3719 Check(cast<VectorType>(SrcTy)->getElementCount() ==
3720 cast<VectorType>(DestTy)->getElementCount(),
3721 "FPToSI source and dest vector length mismatch", &I);
3722
3723 visitInstruction(I);
3724}
3725
3726void Verifier::checkPtrToAddr(Type *SrcTy, Type *DestTy, const Value &V) {
3727 Check(SrcTy->isPtrOrPtrVectorTy(), "PtrToAddr source must be pointer", V);
3728 Check(DestTy->isIntOrIntVectorTy(), "PtrToAddr result must be integral", V);
3729 Check(SrcTy->isVectorTy() == DestTy->isVectorTy(), "PtrToAddr type mismatch",
3730 V);
3731
3732 if (SrcTy->isVectorTy()) {
3733 auto *VSrc = cast<VectorType>(SrcTy);
3734 auto *VDest = cast<VectorType>(DestTy);
3735 Check(VSrc->getElementCount() == VDest->getElementCount(),
3736 "PtrToAddr vector length mismatch", V);
3737 }
3738
3739 Type *AddrTy = DL.getAddressType(SrcTy);
3740 Check(AddrTy == DestTy, "PtrToAddr result must be address width", V);
3741}
3742
3743void Verifier::visitPtrToAddrInst(PtrToAddrInst &I) {
3744 checkPtrToAddr(I.getOperand(0)->getType(), I.getType(), I);
3745 visitInstruction(I);
3746}
3747
3748void Verifier::visitPtrToIntInst(PtrToIntInst &I) {
3749 // Get the source and destination types
3750 Type *SrcTy = I.getOperand(0)->getType();
3751 Type *DestTy = I.getType();
3752
3753 Check(SrcTy->isPtrOrPtrVectorTy(), "PtrToInt source must be pointer", &I);
3754
3755 Check(DestTy->isIntOrIntVectorTy(), "PtrToInt result must be integral", &I);
3756 Check(SrcTy->isVectorTy() == DestTy->isVectorTy(), "PtrToInt type mismatch",
3757 &I);
3758
3759 if (SrcTy->isVectorTy()) {
3760 auto *VSrc = cast<VectorType>(SrcTy);
3761 auto *VDest = cast<VectorType>(DestTy);
3762 Check(VSrc->getElementCount() == VDest->getElementCount(),
3763 "PtrToInt Vector length mismatch", &I);
3764 }
3765
3766 visitInstruction(I);
3767}
3768
3769void Verifier::visitIntToPtrInst(IntToPtrInst &I) {
3770 // Get the source and destination types
3771 Type *SrcTy = I.getOperand(0)->getType();
3772 Type *DestTy = I.getType();
3773
3774 Check(SrcTy->isIntOrIntVectorTy(), "IntToPtr source must be an integral", &I);
3775 Check(DestTy->isPtrOrPtrVectorTy(), "IntToPtr result must be a pointer", &I);
3776
3777 Check(SrcTy->isVectorTy() == DestTy->isVectorTy(), "IntToPtr type mismatch",
3778 &I);
3779 if (SrcTy->isVectorTy()) {
3780 auto *VSrc = cast<VectorType>(SrcTy);
3781 auto *VDest = cast<VectorType>(DestTy);
3782 Check(VSrc->getElementCount() == VDest->getElementCount(),
3783 "IntToPtr Vector length mismatch", &I);
3784 }
3785 visitInstruction(I);
3786}
3787
3788void Verifier::visitBitCastInst(BitCastInst &I) {
3789 Check(
3790 CastInst::castIsValid(Instruction::BitCast, I.getOperand(0), I.getType()),
3791 "Invalid bitcast", &I);
3792 visitInstruction(I);
3793}
3794
3795void Verifier::visitAddrSpaceCastInst(AddrSpaceCastInst &I) {
3796 Type *SrcTy = I.getOperand(0)->getType();
3797 Type *DestTy = I.getType();
3798
3799 Check(SrcTy->isPtrOrPtrVectorTy(), "AddrSpaceCast source must be a pointer",
3800 &I);
3801 Check(DestTy->isPtrOrPtrVectorTy(), "AddrSpaceCast result must be a pointer",
3802 &I);
3804 "AddrSpaceCast must be between different address spaces", &I);
3805 if (auto *SrcVTy = dyn_cast<VectorType>(SrcTy))
3806 Check(SrcVTy->getElementCount() ==
3807 cast<VectorType>(DestTy)->getElementCount(),
3808 "AddrSpaceCast vector pointer number of elements mismatch", &I);
3809 visitInstruction(I);
3810}
3811
3812/// visitPHINode - Ensure that a PHI node is well formed.
3813///
3814void Verifier::visitPHINode(PHINode &PN) {
3815 // Ensure that the PHI nodes are all grouped together at the top of the block.
3816 // This can be tested by checking whether the instruction before this is
3817 // either nonexistent (because this is begin()) or is a PHI node. If not,
3818 // then there is some other instruction before a PHI.
3819 Check(&PN == &PN.getParent()->front() ||
3821 "PHI nodes not grouped at top of basic block!", &PN, PN.getParent());
3822
3823 // Check that a PHI doesn't yield a Token.
3824 Check(!PN.getType()->isTokenLikeTy(), "PHI nodes cannot have token type!");
3825
3826 // Check that all of the values of the PHI node have the same type as the
3827 // result.
3828 for (Value *IncValue : PN.incoming_values()) {
3829 Check(PN.getType() == IncValue->getType(),
3830 "PHI node operands are not the same type as the result!", &PN);
3831 }
3832
3833 // All other PHI node constraints are checked in the visitBasicBlock method.
3834
3835 visitInstruction(PN);
3836}
3837
3838void Verifier::visitCallBase(CallBase &Call) {
3840 "Called function must be a pointer!", Call);
3841 FunctionType *FTy = Call.getFunctionType();
3842
3843 // Verify that the correct number of arguments are being passed
3844 if (FTy->isVarArg())
3845 Check(Call.arg_size() >= FTy->getNumParams(),
3846 "Called function requires more parameters than were provided!", Call);
3847 else
3848 Check(Call.arg_size() == FTy->getNumParams(),
3849 "Incorrect number of arguments passed to called function!", Call);
3850
3851 // Verify that all arguments to the call match the function type.
3852 for (unsigned i = 0, e = FTy->getNumParams(); i != e; ++i)
3853 Check(Call.getArgOperand(i)->getType() == FTy->getParamType(i),
3854 "Call parameter type does not match function signature!",
3855 Call.getArgOperand(i), FTy->getParamType(i), Call);
3856
3857 AttributeList Attrs = Call.getAttributes();
3858
3859 Check(verifyAttributeCount(Attrs, Call.arg_size()),
3860 "Attribute after last parameter!", Call);
3861
3862 Function *Callee =
3864 bool IsIntrinsic = Callee && Callee->isIntrinsic();
3865 if (IsIntrinsic)
3866 Check(Callee->getValueType() == FTy,
3867 "Intrinsic called with incompatible signature", Call);
3868
3869 // Verify if the calling convention of the callee is callable.
3871 "calling convention does not permit calls", Call);
3872
3873 // Disallow passing/returning values with alignment higher than we can
3874 // represent.
3875 // FIXME: Consider making DataLayout cap the alignment, so this isn't
3876 // necessary.
3877 auto VerifyTypeAlign = [&](Type *Ty, const Twine &Message) {
3878 if (!Ty->isSized())
3879 return;
3880 Align ABIAlign = DL.getABITypeAlign(Ty);
3881 Check(ABIAlign.value() <= Value::MaximumAlignment,
3882 "Incorrect alignment of " + Message + " to called function!", Call);
3883 };
3884
3885 if (!IsIntrinsic) {
3886 VerifyTypeAlign(FTy->getReturnType(), "return type");
3887 for (unsigned i = 0, e = FTy->getNumParams(); i != e; ++i) {
3888 Type *Ty = FTy->getParamType(i);
3889 VerifyTypeAlign(Ty, "argument passed");
3890 }
3891 }
3892
3893 if (Attrs.hasFnAttr(Attribute::Speculatable)) {
3894 // Don't allow speculatable on call sites, unless the underlying function
3895 // declaration is also speculatable.
3896 Check(Callee && Callee->isSpeculatable(),
3897 "speculatable attribute may not apply to call sites", Call);
3898 }
3899
3900 if (Attrs.hasFnAttr(Attribute::Preallocated)) {
3901 Check(Call.getIntrinsicID() == Intrinsic::call_preallocated_arg,
3902 "preallocated as a call site attribute can only be on "
3903 "llvm.call.preallocated.arg");
3904 }
3905
3906 // Verify call attributes.
3907 verifyFunctionAttrs(FTy, Attrs, &Call, IsIntrinsic, Call.isInlineAsm());
3908
3909 // Conservatively check the inalloca argument.
3910 // We have a bug if we can find that there is an underlying alloca without
3911 // inalloca.
3912 if (Call.hasInAllocaArgument()) {
3913 Value *InAllocaArg = Call.getArgOperand(FTy->getNumParams() - 1);
3914 if (auto AI = dyn_cast<AllocaInst>(InAllocaArg->stripInBoundsOffsets()))
3915 Check(AI->isUsedWithInAlloca(),
3916 "inalloca argument for call has mismatched alloca", AI, Call);
3917 }
3918
3919 // For each argument of the callsite, if it has the swifterror argument,
3920 // make sure the underlying alloca/parameter it comes from has a swifterror as
3921 // well.
3922 for (unsigned i = 0, e = FTy->getNumParams(); i != e; ++i) {
3923 if (Call.paramHasAttr(i, Attribute::SwiftError)) {
3924 Value *SwiftErrorArg = Call.getArgOperand(i);
3925 if (auto AI = dyn_cast<AllocaInst>(SwiftErrorArg->stripInBoundsOffsets())) {
3926 Check(AI->isSwiftError(),
3927 "swifterror argument for call has mismatched alloca", AI, Call);
3928 continue;
3929 }
3930 auto ArgI = dyn_cast<Argument>(SwiftErrorArg);
3931 Check(ArgI, "swifterror argument should come from an alloca or parameter",
3932 SwiftErrorArg, Call);
3933 Check(ArgI->hasSwiftErrorAttr(),
3934 "swifterror argument for call has mismatched parameter", ArgI,
3935 Call);
3936 }
3937
3938 if (Attrs.hasParamAttr(i, Attribute::ImmArg)) {
3939 // Don't allow immarg on call sites, unless the underlying declaration
3940 // also has the matching immarg.
3941 Check(Callee && Callee->hasParamAttribute(i, Attribute::ImmArg),
3942 "immarg may not apply only to call sites", Call.getArgOperand(i),
3943 Call);
3944 }
3945
3946 if (Call.paramHasAttr(i, Attribute::ImmArg)) {
3947 Value *ArgVal = Call.getArgOperand(i);
3948 Check(isa<ConstantInt>(ArgVal) || isa<ConstantFP>(ArgVal),
3949 "immarg operand has non-immediate parameter", ArgVal, Call);
3950
3951 // If the imm-arg is an integer and also has a range attached,
3952 // check if the given value is within the range.
3953 if (Call.paramHasAttr(i, Attribute::Range)) {
3954 if (auto *CI = dyn_cast<ConstantInt>(ArgVal)) {
3955 const ConstantRange &CR =
3956 Call.getParamAttr(i, Attribute::Range).getValueAsConstantRange();
3957 Check(CR.contains(CI->getValue()),
3958 "immarg value " + Twine(CI->getValue().getSExtValue()) +
3959 " out of range [" + Twine(CR.getLower().getSExtValue()) +
3960 ", " + Twine(CR.getUpper().getSExtValue()) + ")",
3961 Call);
3962 }
3963 }
3964 }
3965
3966 if (Call.paramHasAttr(i, Attribute::Preallocated)) {
3967 Value *ArgVal = Call.getArgOperand(i);
3968 bool hasOB =
3970 bool isMustTail = Call.isMustTailCall();
3971 Check(hasOB != isMustTail,
3972 "preallocated operand either requires a preallocated bundle or "
3973 "the call to be musttail (but not both)",
3974 ArgVal, Call);
3975 }
3976 }
3977
3978 if (FTy->isVarArg()) {
3979 // FIXME? is 'nest' even legal here?
3980 bool SawNest = false;
3981 bool SawReturned = false;
3982
3983 for (unsigned Idx = 0; Idx < FTy->getNumParams(); ++Idx) {
3984 if (Attrs.hasParamAttr(Idx, Attribute::Nest))
3985 SawNest = true;
3986 if (Attrs.hasParamAttr(Idx, Attribute::Returned))
3987 SawReturned = true;
3988 }
3989
3990 // Check attributes on the varargs part.
3991 for (unsigned Idx = FTy->getNumParams(); Idx < Call.arg_size(); ++Idx) {
3992 Type *Ty = Call.getArgOperand(Idx)->getType();
3993 AttributeSet ArgAttrs = Attrs.getParamAttrs(Idx);
3994 verifyParameterAttrs(ArgAttrs, Ty, &Call);
3995
3996 if (ArgAttrs.hasAttribute(Attribute::Nest)) {
3997 Check(!SawNest, "More than one parameter has attribute nest!", Call);
3998 SawNest = true;
3999 }
4000
4001 if (ArgAttrs.hasAttribute(Attribute::Returned)) {
4002 Check(!SawReturned, "More than one parameter has attribute returned!",
4003 Call);
4004 Check(Ty->canLosslesslyBitCastTo(FTy->getReturnType()),
4005 "Incompatible argument and return types for 'returned' "
4006 "attribute",
4007 Call);
4008 SawReturned = true;
4009 }
4010
4011 // Statepoint intrinsic is vararg but the wrapped function may be not.
4012 // Allow sret here and check the wrapped function in verifyStatepoint.
4013 if (Call.getIntrinsicID() != Intrinsic::experimental_gc_statepoint)
4014 Check(!ArgAttrs.hasAttribute(Attribute::StructRet),
4015 "Attribute 'sret' cannot be used for vararg call arguments!",
4016 Call);
4017
4018 if (ArgAttrs.hasAttribute(Attribute::InAlloca))
4019 Check(Idx == Call.arg_size() - 1,
4020 "inalloca isn't on the last argument!", Call);
4021 }
4022 }
4023
4024 // Verify that there's no metadata unless it's a direct call to an intrinsic.
4025 if (!IsIntrinsic) {
4026 for (Type *ParamTy : FTy->params()) {
4027 Check(!ParamTy->isMetadataTy(),
4028 "Function has metadata parameter but isn't an intrinsic", Call);
4029 Check(!ParamTy->isTokenLikeTy(),
4030 "Function has token parameter but isn't an intrinsic", Call);
4031 }
4032 }
4033
4034 // Verify that indirect calls don't return tokens.
4035 if (!Call.getCalledFunction()) {
4036 Check(!FTy->getReturnType()->isTokenLikeTy(),
4037 "Return type cannot be token for indirect call!");
4038 Check(!FTy->getReturnType()->isX86_AMXTy(),
4039 "Return type cannot be x86_amx for indirect call!");
4040 }
4041
4043 visitIntrinsicCall(ID, Call);
4044
4045 // Verify that a callsite has at most one "deopt", at most one "funclet", at
4046 // most one "gc-transition", at most one "cfguardtarget", at most one
4047 // "preallocated" operand bundle, and at most one "ptrauth" operand bundle.
4048 bool FoundDeoptBundle = false, FoundFuncletBundle = false,
4049 FoundGCTransitionBundle = false, FoundCFGuardTargetBundle = false,
4050 FoundPreallocatedBundle = false, FoundGCLiveBundle = false,
4051 FoundPtrauthBundle = false, FoundKCFIBundle = false,
4052 FoundAttachedCallBundle = false;
4053 for (unsigned i = 0, e = Call.getNumOperandBundles(); i < e; ++i) {
4054 OperandBundleUse BU = Call.getOperandBundleAt(i);
4055 uint32_t Tag = BU.getTagID();
4056 if (Tag == LLVMContext::OB_deopt) {
4057 Check(!FoundDeoptBundle, "Multiple deopt operand bundles", Call);
4058 FoundDeoptBundle = true;
4059 } else if (Tag == LLVMContext::OB_gc_transition) {
4060 Check(!FoundGCTransitionBundle, "Multiple gc-transition operand bundles",
4061 Call);
4062 FoundGCTransitionBundle = true;
4063 } else if (Tag == LLVMContext::OB_funclet) {
4064 Check(!FoundFuncletBundle, "Multiple funclet operand bundles", Call);
4065 FoundFuncletBundle = true;
4066 Check(BU.Inputs.size() == 1,
4067 "Expected exactly one funclet bundle operand", Call);
4068 Check(isa<FuncletPadInst>(BU.Inputs.front()),
4069 "Funclet bundle operands should correspond to a FuncletPadInst",
4070 Call);
4071 } else if (Tag == LLVMContext::OB_cfguardtarget) {
4072 Check(!FoundCFGuardTargetBundle, "Multiple CFGuardTarget operand bundles",
4073 Call);
4074 FoundCFGuardTargetBundle = true;
4075 Check(BU.Inputs.size() == 1,
4076 "Expected exactly one cfguardtarget bundle operand", Call);
4077 } else if (Tag == LLVMContext::OB_ptrauth) {
4078 Check(!FoundPtrauthBundle, "Multiple ptrauth operand bundles", Call);
4079 FoundPtrauthBundle = true;
4080 Check(BU.Inputs.size() == 2,
4081 "Expected exactly two ptrauth bundle operands", Call);
4082 Check(isa<ConstantInt>(BU.Inputs[0]) &&
4083 BU.Inputs[0]->getType()->isIntegerTy(32),
4084 "Ptrauth bundle key operand must be an i32 constant", Call);
4085 Check(BU.Inputs[1]->getType()->isIntegerTy(64),
4086 "Ptrauth bundle discriminator operand must be an i64", Call);
4087 } else if (Tag == LLVMContext::OB_kcfi) {
4088 Check(!FoundKCFIBundle, "Multiple kcfi operand bundles", Call);
4089 FoundKCFIBundle = true;
4090 Check(BU.Inputs.size() == 1, "Expected exactly one kcfi bundle operand",
4091 Call);
4092 Check(isa<ConstantInt>(BU.Inputs[0]) &&
4093 BU.Inputs[0]->getType()->isIntegerTy(32),
4094 "Kcfi bundle operand must be an i32 constant", Call);
4095 } else if (Tag == LLVMContext::OB_preallocated) {
4096 Check(!FoundPreallocatedBundle, "Multiple preallocated operand bundles",
4097 Call);
4098 FoundPreallocatedBundle = true;
4099 Check(BU.Inputs.size() == 1,
4100 "Expected exactly one preallocated bundle operand", Call);
4101 auto Input = dyn_cast<IntrinsicInst>(BU.Inputs.front());
4102 Check(Input &&
4103 Input->getIntrinsicID() == Intrinsic::call_preallocated_setup,
4104 "\"preallocated\" argument must be a token from "
4105 "llvm.call.preallocated.setup",
4106 Call);
4107 } else if (Tag == LLVMContext::OB_gc_live) {
4108 Check(!FoundGCLiveBundle, "Multiple gc-live operand bundles", Call);
4109 FoundGCLiveBundle = true;
4111 Check(!FoundAttachedCallBundle,
4112 "Multiple \"clang.arc.attachedcall\" operand bundles", Call);
4113 FoundAttachedCallBundle = true;
4114 verifyAttachedCallBundle(Call, BU);
4115 }
4116 }
4117
4118 // Verify that callee and callsite agree on whether to use pointer auth.
4119 Check(!(Call.getCalledFunction() && FoundPtrauthBundle),
4120 "Direct call cannot have a ptrauth bundle", Call);
4121
4122 // Verify that each inlinable callsite of a debug-info-bearing function in a
4123 // debug-info-bearing function has a debug location attached to it. Failure to
4124 // do so causes assertion failures when the inliner sets up inline scope info
4125 // (Interposable functions are not inlinable, neither are functions without
4126 // definitions.)
4132 "inlinable function call in a function with "
4133 "debug info must have a !dbg location",
4134 Call);
4135
4136 if (Call.isInlineAsm())
4137 verifyInlineAsmCall(Call);
4138
4139 ConvergenceVerifyHelper.visit(Call);
4140
4141 visitInstruction(Call);
4142}
4143
4144void Verifier::verifyTailCCMustTailAttrs(const AttrBuilder &Attrs,
4145 StringRef Context) {
4146 Check(!Attrs.contains(Attribute::InAlloca),
4147 Twine("inalloca attribute not allowed in ") + Context);
4148 Check(!Attrs.contains(Attribute::InReg),
4149 Twine("inreg attribute not allowed in ") + Context);
4150 Check(!Attrs.contains(Attribute::SwiftError),
4151 Twine("swifterror attribute not allowed in ") + Context);
4152 Check(!Attrs.contains(Attribute::Preallocated),
4153 Twine("preallocated attribute not allowed in ") + Context);
4154 Check(!Attrs.contains(Attribute::ByRef),
4155 Twine("byref attribute not allowed in ") + Context);
4156}
4157
4158/// Two types are "congruent" if they are identical, or if they are both pointer
4159/// types with different pointee types and the same address space.
4160static bool isTypeCongruent(Type *L, Type *R) {
4161 if (L == R)
4162 return true;
4165 if (!PL || !PR)
4166 return false;
4167 return PL->getAddressSpace() == PR->getAddressSpace();
4168}
4169
4170static AttrBuilder getParameterABIAttributes(LLVMContext& C, unsigned I, AttributeList Attrs) {
4171 static const Attribute::AttrKind ABIAttrs[] = {
4172 Attribute::StructRet, Attribute::ByVal, Attribute::InAlloca,
4173 Attribute::InReg, Attribute::StackAlignment, Attribute::SwiftSelf,
4174 Attribute::SwiftAsync, Attribute::SwiftError, Attribute::Preallocated,
4175 Attribute::ByRef};
4176 AttrBuilder Copy(C);
4177 for (auto AK : ABIAttrs) {
4178 Attribute Attr = Attrs.getParamAttrs(I).getAttribute(AK);
4179 if (Attr.isValid())
4180 Copy.addAttribute(Attr);
4181 }
4182
4183 // `align` is ABI-affecting only in combination with `byval` or `byref`.
4184 if (Attrs.hasParamAttr(I, Attribute::Alignment) &&
4185 (Attrs.hasParamAttr(I, Attribute::ByVal) ||
4186 Attrs.hasParamAttr(I, Attribute::ByRef)))
4187 Copy.addAlignmentAttr(Attrs.getParamAlignment(I));
4188 return Copy;
4189}
4190
4191void Verifier::verifyMustTailCall(CallInst &CI) {
4192 Check(!CI.isInlineAsm(), "cannot use musttail call with inline asm", &CI);
4193
4194 Function *F = CI.getParent()->getParent();
4195 FunctionType *CallerTy = F->getFunctionType();
4196 FunctionType *CalleeTy = CI.getFunctionType();
4197 Check(CallerTy->isVarArg() == CalleeTy->isVarArg(),
4198 "cannot guarantee tail call due to mismatched varargs", &CI);
4199 Check(isTypeCongruent(CallerTy->getReturnType(), CalleeTy->getReturnType()),
4200 "cannot guarantee tail call due to mismatched return types", &CI);
4201
4202 // - The calling conventions of the caller and callee must match.
4203 Check(F->getCallingConv() == CI.getCallingConv(),
4204 "cannot guarantee tail call due to mismatched calling conv", &CI);
4205
4206 // - The call must immediately precede a :ref:`ret <i_ret>` instruction,
4207 // or a pointer bitcast followed by a ret instruction.
4208 // - The ret instruction must return the (possibly bitcasted) value
4209 // produced by the call or void.
4210 Value *RetVal = &CI;
4212
4213 // Handle the optional bitcast.
4214 if (BitCastInst *BI = dyn_cast_or_null<BitCastInst>(Next)) {
4215 Check(BI->getOperand(0) == RetVal,
4216 "bitcast following musttail call must use the call", BI);
4217 RetVal = BI;
4218 Next = BI->getNextNode();
4219 }
4220
4221 // Check the return.
4222 ReturnInst *Ret = dyn_cast_or_null<ReturnInst>(Next);
4223 Check(Ret, "musttail call must precede a ret with an optional bitcast", &CI);
4224 Check(!Ret->getReturnValue() || Ret->getReturnValue() == RetVal ||
4226 "musttail call result must be returned", Ret);
4227
4228 AttributeList CallerAttrs = F->getAttributes();
4229 AttributeList CalleeAttrs = CI.getAttributes();
4230 if (CI.getCallingConv() == CallingConv::SwiftTail ||
4231 CI.getCallingConv() == CallingConv::Tail) {
4232 StringRef CCName =
4233 CI.getCallingConv() == CallingConv::Tail ? "tailcc" : "swifttailcc";
4234
4235 // - Only sret, byval, swiftself, and swiftasync ABI-impacting attributes
4236 // are allowed in swifttailcc call
4237 for (unsigned I = 0, E = CallerTy->getNumParams(); I != E; ++I) {
4238 AttrBuilder ABIAttrs = getParameterABIAttributes(F->getContext(), I, CallerAttrs);
4239 SmallString<32> Context{CCName, StringRef(" musttail caller")};
4240 verifyTailCCMustTailAttrs(ABIAttrs, Context);
4241 }
4242 for (unsigned I = 0, E = CalleeTy->getNumParams(); I != E; ++I) {
4243 AttrBuilder ABIAttrs = getParameterABIAttributes(F->getContext(), I, CalleeAttrs);
4244 SmallString<32> Context{CCName, StringRef(" musttail callee")};
4245 verifyTailCCMustTailAttrs(ABIAttrs, Context);
4246 }
4247 // - Varargs functions are not allowed
4248 Check(!CallerTy->isVarArg(), Twine("cannot guarantee ") + CCName +
4249 " tail call for varargs function");
4250 return;
4251 }
4252
4253 // - The caller and callee prototypes must match. Pointer types of
4254 // parameters or return types may differ in pointee type, but not
4255 // address space.
4256 if (!CI.getIntrinsicID()) {
4257 Check(CallerTy->getNumParams() == CalleeTy->getNumParams(),
4258 "cannot guarantee tail call due to mismatched parameter counts", &CI);
4259 for (unsigned I = 0, E = CallerTy->getNumParams(); I != E; ++I) {
4260 Check(
4261 isTypeCongruent(CallerTy->getParamType(I), CalleeTy->getParamType(I)),
4262 "cannot guarantee tail call due to mismatched parameter types", &CI);
4263 }
4264 }
4265
4266 // - All ABI-impacting function attributes, such as sret, byval, inreg,
4267 // returned, preallocated, and inalloca, must match.
4268 for (unsigned I = 0, E = CallerTy->getNumParams(); I != E; ++I) {
4269 AttrBuilder CallerABIAttrs = getParameterABIAttributes(F->getContext(), I, CallerAttrs);
4270 AttrBuilder CalleeABIAttrs = getParameterABIAttributes(F->getContext(), I, CalleeAttrs);
4271 Check(CallerABIAttrs == CalleeABIAttrs,
4272 "cannot guarantee tail call due to mismatched ABI impacting "
4273 "function attributes",
4274 &CI, CI.getOperand(I));
4275 }
4276}
4277
4278void Verifier::visitCallInst(CallInst &CI) {
4279 visitCallBase(CI);
4280
4281 if (CI.isMustTailCall())
4282 verifyMustTailCall(CI);
4283}
4284
4285void Verifier::visitInvokeInst(InvokeInst &II) {
4286 visitCallBase(II);
4287
4288 // Verify that the first non-PHI instruction of the unwind destination is an
4289 // exception handling instruction.
4290 Check(
4291 II.getUnwindDest()->isEHPad(),
4292 "The unwind destination does not have an exception handling instruction!",
4293 &II);
4294
4295 visitTerminator(II);
4296}
4297
4298/// visitUnaryOperator - Check the argument to the unary operator.
4299///
4300void Verifier::visitUnaryOperator(UnaryOperator &U) {
4301 Check(U.getType() == U.getOperand(0)->getType(),
4302 "Unary operators must have same type for"
4303 "operands and result!",
4304 &U);
4305
4306 switch (U.getOpcode()) {
4307 // Check that floating-point arithmetic operators are only used with
4308 // floating-point operands.
4309 case Instruction::FNeg:
4310 Check(U.getType()->isFPOrFPVectorTy(),
4311 "FNeg operator only works with float types!", &U);
4312 break;
4313 default:
4314 llvm_unreachable("Unknown UnaryOperator opcode!");
4315 }
4316
4317 visitInstruction(U);
4318}
4319
4320/// visitBinaryOperator - Check that both arguments to the binary operator are
4321/// of the same type!
4322///
4323void Verifier::visitBinaryOperator(BinaryOperator &B) {
4324 Check(B.getOperand(0)->getType() == B.getOperand(1)->getType(),
4325 "Both operands to a binary operator are not of the same type!", &B);
4326
4327 switch (B.getOpcode()) {
4328 // Check that integer arithmetic operators are only used with
4329 // integral operands.
4330 case Instruction::Add:
4331 case Instruction::Sub:
4332 case Instruction::Mul:
4333 case Instruction::SDiv:
4334 case Instruction::UDiv:
4335 case Instruction::SRem:
4336 case Instruction::URem:
4337 Check(B.getType()->isIntOrIntVectorTy(),
4338 "Integer arithmetic operators only work with integral types!", &B);
4339 Check(B.getType() == B.getOperand(0)->getType(),
4340 "Integer arithmetic operators must have same type "
4341 "for operands and result!",
4342 &B);
4343 break;
4344 // Check that floating-point arithmetic operators are only used with
4345 // floating-point operands.
4346 case Instruction::FAdd:
4347 case Instruction::FSub:
4348 case Instruction::FMul:
4349 case Instruction::FDiv:
4350 case Instruction::FRem:
4351 Check(B.getType()->isFPOrFPVectorTy(),
4352 "Floating-point arithmetic operators only work with "
4353 "floating-point types!",
4354 &B);
4355 Check(B.getType() == B.getOperand(0)->getType(),
4356 "Floating-point arithmetic operators must have same type "
4357 "for operands and result!",
4358 &B);
4359 break;
4360 // Check that logical operators are only used with integral operands.
4361 case Instruction::And:
4362 case Instruction::Or:
4363 case Instruction::Xor:
4364 Check(B.getType()->isIntOrIntVectorTy(),
4365 "Logical operators only work with integral types!", &B);
4366 Check(B.getType() == B.getOperand(0)->getType(),
4367 "Logical operators must have same type for operands and result!", &B);
4368 break;
4369 case Instruction::Shl:
4370 case Instruction::LShr:
4371 case Instruction::AShr:
4372 Check(B.getType()->isIntOrIntVectorTy(),
4373 "Shifts only work with integral types!", &B);
4374 Check(B.getType() == B.getOperand(0)->getType(),
4375 "Shift return type must be same as operands!", &B);
4376 break;
4377 default:
4378 llvm_unreachable("Unknown BinaryOperator opcode!");
4379 }
4380
4381 visitInstruction(B);
4382}
4383
4384void Verifier::visitICmpInst(ICmpInst &IC) {
4385 // Check that the operands are the same type
4386 Type *Op0Ty = IC.getOperand(0)->getType();
4387 Type *Op1Ty = IC.getOperand(1)->getType();
4388 Check(Op0Ty == Op1Ty,
4389 "Both operands to ICmp instruction are not of the same type!", &IC);
4390 // Check that the operands are the right type
4391 Check(Op0Ty->isIntOrIntVectorTy() || Op0Ty->isPtrOrPtrVectorTy(),
4392 "Invalid operand types for ICmp instruction", &IC);
4393 // Check that the predicate is valid.
4394 Check(IC.isIntPredicate(), "Invalid predicate in ICmp instruction!", &IC);
4395
4396 visitInstruction(IC);
4397}
4398
4399void Verifier::visitFCmpInst(FCmpInst &FC) {
4400 // Check that the operands are the same type
4401 Type *Op0Ty = FC.getOperand(0)->getType();
4402 Type *Op1Ty = FC.getOperand(1)->getType();
4403 Check(Op0Ty == Op1Ty,
4404 "Both operands to FCmp instruction are not of the same type!", &FC);
4405 // Check that the operands are the right type
4406 Check(Op0Ty->isFPOrFPVectorTy(), "Invalid operand types for FCmp instruction",
4407 &FC);
4408 // Check that the predicate is valid.
4409 Check(FC.isFPPredicate(), "Invalid predicate in FCmp instruction!", &FC);
4410
4411 visitInstruction(FC);
4412}
4413
4414void Verifier::visitExtractElementInst(ExtractElementInst &EI) {
4416 "Invalid extractelement operands!", &EI);
4417 visitInstruction(EI);
4418}
4419
4420void Verifier::visitInsertElementInst(InsertElementInst &IE) {
4421 Check(InsertElementInst::isValidOperands(IE.getOperand(0), IE.getOperand(1),
4422 IE.getOperand(2)),
4423 "Invalid insertelement operands!", &IE);
4424 visitInstruction(IE);
4425}
4426
4427void Verifier::visitShuffleVectorInst(ShuffleVectorInst &SV) {
4429 SV.getShuffleMask()),
4430 "Invalid shufflevector operands!", &SV);
4431 visitInstruction(SV);
4432}
4433
4434void Verifier::visitGetElementPtrInst(GetElementPtrInst &GEP) {
4435 Type *TargetTy = GEP.getPointerOperandType()->getScalarType();
4436
4437 Check(isa<PointerType>(TargetTy),
4438 "GEP base pointer is not a vector or a vector of pointers", &GEP);
4439 Check(GEP.getSourceElementType()->isSized(), "GEP into unsized type!", &GEP);
4440
4441 if (auto *STy = dyn_cast<StructType>(GEP.getSourceElementType())) {
4442 Check(!STy->isScalableTy(),
4443 "getelementptr cannot target structure that contains scalable vector"
4444 "type",
4445 &GEP);
4446 }
4447
4448 SmallVector<Value *, 16> Idxs(GEP.indices());
4449 Check(
4450 all_of(Idxs, [](Value *V) { return V->getType()->isIntOrIntVectorTy(); }),
4451 "GEP indexes must be integers", &GEP);
4452 Type *ElTy =
4453 GetElementPtrInst::getIndexedType(GEP.getSourceElementType(), Idxs);
4454 Check(ElTy, "Invalid indices for GEP pointer type!", &GEP);
4455
4456 PointerType *PtrTy = dyn_cast<PointerType>(GEP.getType()->getScalarType());
4457
4458 Check(PtrTy && GEP.getResultElementType() == ElTy,
4459 "GEP is not of right type for indices!", &GEP, ElTy);
4460
4461 if (auto *GEPVTy = dyn_cast<VectorType>(GEP.getType())) {
4462 // Additional checks for vector GEPs.
4463 ElementCount GEPWidth = GEPVTy->getElementCount();
4464 if (GEP.getPointerOperandType()->isVectorTy())
4465 Check(
4466 GEPWidth ==
4467 cast<VectorType>(GEP.getPointerOperandType())->getElementCount(),
4468 "Vector GEP result width doesn't match operand's", &GEP);
4469 for (Value *Idx : Idxs) {
4470 Type *IndexTy = Idx->getType();
4471 if (auto *IndexVTy = dyn_cast<VectorType>(IndexTy)) {
4472 ElementCount IndexWidth = IndexVTy->getElementCount();
4473 Check(IndexWidth == GEPWidth, "Invalid GEP index vector width", &GEP);
4474 }
4475 Check(IndexTy->isIntOrIntVectorTy(),
4476 "All GEP indices should be of integer type");
4477 }
4478 }
4479
4480 Check(GEP.getAddressSpace() == PtrTy->getAddressSpace(),
4481 "GEP address space doesn't match type", &GEP);
4482
4483 visitInstruction(GEP);
4484}
4485
4486static bool isContiguous(const ConstantRange &A, const ConstantRange &B) {
4487 return A.getUpper() == B.getLower() || A.getLower() == B.getUpper();
4488}
4489
4490/// Verify !range and !absolute_symbol metadata. These have the same
4491/// restrictions, except !absolute_symbol allows the full set.
4492void Verifier::verifyRangeLikeMetadata(const Value &I, const MDNode *Range,
4493 Type *Ty, RangeLikeMetadataKind Kind) {
4494 unsigned NumOperands = Range->getNumOperands();
4495 Check(NumOperands % 2 == 0, "Unfinished range!", Range);
4496 unsigned NumRanges = NumOperands / 2;
4497 Check(NumRanges >= 1, "It should have at least one range!", Range);
4498
4499 ConstantRange LastRange(1, true); // Dummy initial value
4500 for (unsigned i = 0; i < NumRanges; ++i) {
4501 ConstantInt *Low =
4502 mdconst::dyn_extract<ConstantInt>(Range->getOperand(2 * i));
4503 Check(Low, "The lower limit must be an integer!", Low);
4504 ConstantInt *High =
4505 mdconst::dyn_extract<ConstantInt>(Range->getOperand(2 * i + 1));
4506 Check(High, "The upper limit must be an integer!", High);
4507
4508 Check(High->getType() == Low->getType(), "Range pair types must match!",
4509 &I);
4510
4511 if (Kind == RangeLikeMetadataKind::NoaliasAddrspace) {
4512 Check(High->getType()->isIntegerTy(32),
4513 "noalias.addrspace type must be i32!", &I);
4514 } else {
4515 Check(High->getType() == Ty->getScalarType(),
4516 "Range types must match instruction type!", &I);
4517 }
4518
4519 APInt HighV = High->getValue();
4520 APInt LowV = Low->getValue();
4521
4522 // ConstantRange asserts if the ranges are the same except for the min/max
4523 // value. Leave the cases it tolerates for the empty range error below.
4524 Check(LowV != HighV || LowV.isMaxValue() || LowV.isMinValue(),
4525 "The upper and lower limits cannot be the same value", &I);
4526
4527 ConstantRange CurRange(LowV, HighV);
4528 Check(!CurRange.isEmptySet() &&
4529 (Kind == RangeLikeMetadataKind::AbsoluteSymbol ||
4530 !CurRange.isFullSet()),
4531 "Range must not be empty!", Range);
4532 if (i != 0) {
4533 Check(CurRange.intersectWith(LastRange).isEmptySet(),
4534 "Intervals are overlapping", Range);
4535 Check(LowV.sgt(LastRange.getLower()), "Intervals are not in order",
4536 Range);
4537 Check(!isContiguous(CurRange, LastRange), "Intervals are contiguous",
4538 Range);
4539 }
4540 LastRange = ConstantRange(LowV, HighV);
4541 }
4542 if (NumRanges > 2) {
4543 APInt FirstLow =
4544 mdconst::dyn_extract<ConstantInt>(Range->getOperand(0))->getValue();
4545 APInt FirstHigh =
4546 mdconst::dyn_extract<ConstantInt>(Range->getOperand(1))->getValue();
4547 ConstantRange FirstRange(FirstLow, FirstHigh);
4548 Check(FirstRange.intersectWith(LastRange).isEmptySet(),
4549 "Intervals are overlapping", Range);
4550 Check(!isContiguous(FirstRange, LastRange), "Intervals are contiguous",
4551 Range);
4552 }
4553}
4554
4555void Verifier::visitRangeMetadata(Instruction &I, MDNode *Range, Type *Ty) {
4556 assert(Range && Range == I.getMetadata(LLVMContext::MD_range) &&
4557 "precondition violation");
4558 verifyRangeLikeMetadata(I, Range, Ty, RangeLikeMetadataKind::Range);
4559}
4560
4561void Verifier::visitNoaliasAddrspaceMetadata(Instruction &I, MDNode *Range,
4562 Type *Ty) {
4563 assert(Range && Range == I.getMetadata(LLVMContext::MD_noalias_addrspace) &&
4564 "precondition violation");
4565 verifyRangeLikeMetadata(I, Range, Ty,
4566 RangeLikeMetadataKind::NoaliasAddrspace);
4567}
4568
4569void Verifier::checkAtomicMemAccessSize(Type *Ty, const Instruction *I) {
4570 unsigned Size = DL.getTypeSizeInBits(Ty).getFixedValue();
4571 Check(Size >= 8, "atomic memory access' size must be byte-sized", Ty, I);
4572 Check(!(Size & (Size - 1)),
4573 "atomic memory access' operand must have a power-of-two size", Ty, I);
4574}
4575
4576void Verifier::visitLoadInst(LoadInst &LI) {
4578 Check(PTy, "Load operand must be a pointer.", &LI);
4579 Type *ElTy = LI.getType();
4580 if (MaybeAlign A = LI.getAlign()) {
4581 Check(A->value() <= Value::MaximumAlignment,
4582 "huge alignment values are unsupported", &LI);
4583 }
4584 Check(ElTy->isSized(), "loading unsized types is not allowed", &LI);
4585 if (LI.isAtomic()) {
4586 Check(LI.getOrdering() != AtomicOrdering::Release &&
4587 LI.getOrdering() != AtomicOrdering::AcquireRelease,
4588 "Load cannot have Release ordering", &LI);
4589 Check(ElTy->getScalarType()->isIntOrPtrTy() ||
4591 "atomic load operand must have integer, pointer, floating point, "
4592 "or vector type!",
4593 ElTy, &LI);
4594
4595 checkAtomicMemAccessSize(ElTy, &LI);
4596 } else {
4598 "Non-atomic load cannot have SynchronizationScope specified", &LI);
4599 }
4600
4601 visitInstruction(LI);
4602}
4603
4604void Verifier::visitStoreInst(StoreInst &SI) {
4605 PointerType *PTy = dyn_cast<PointerType>(SI.getOperand(1)->getType());
4606 Check(PTy, "Store operand must be a pointer.", &SI);
4607 Type *ElTy = SI.getOperand(0)->getType();
4608 if (MaybeAlign A = SI.getAlign()) {
4609 Check(A->value() <= Value::MaximumAlignment,
4610 "huge alignment values are unsupported", &SI);
4611 }
4612 Check(ElTy->isSized(), "storing unsized types is not allowed", &SI);
4613 if (SI.isAtomic()) {
4614 Check(SI.getOrdering() != AtomicOrdering::Acquire &&
4615 SI.getOrdering() != AtomicOrdering::AcquireRelease,
4616 "Store cannot have Acquire ordering", &SI);
4617 Check(ElTy->getScalarType()->isIntOrPtrTy() ||
4619 "atomic store operand must have integer, pointer, floating point, "
4620 "or vector type!",
4621 ElTy, &SI);
4622 checkAtomicMemAccessSize(ElTy, &SI);
4623 } else {
4624 Check(SI.getSyncScopeID() == SyncScope::System,
4625 "Non-atomic store cannot have SynchronizationScope specified", &SI);
4626 }
4627 visitInstruction(SI);
4628}
4629
4630/// Check that SwiftErrorVal is used as a swifterror argument in CS.
4631void Verifier::verifySwiftErrorCall(CallBase &Call,
4632 const Value *SwiftErrorVal) {
4633 for (const auto &I : llvm::enumerate(Call.args())) {
4634 if (I.value() == SwiftErrorVal) {
4635 Check(Call.paramHasAttr(I.index(), Attribute::SwiftError),
4636 "swifterror value when used in a callsite should be marked "
4637 "with swifterror attribute",
4638 SwiftErrorVal, Call);
4639 }
4640 }
4641}
4642
4643void Verifier::verifySwiftErrorValue(const Value *SwiftErrorVal) {
4644 // Check that swifterror value is only used by loads, stores, or as
4645 // a swifterror argument.
4646 for (const User *U : SwiftErrorVal->users()) {
4648 isa<InvokeInst>(U),
4649 "swifterror value can only be loaded and stored from, or "
4650 "as a swifterror argument!",
4651 SwiftErrorVal, U);
4652 // If it is used by a store, check it is the second operand.
4653 if (auto StoreI = dyn_cast<StoreInst>(U))
4654 Check(StoreI->getOperand(1) == SwiftErrorVal,
4655 "swifterror value should be the second operand when used "
4656 "by stores",
4657 SwiftErrorVal, U);
4658 if (auto *Call = dyn_cast<CallBase>(U))
4659 verifySwiftErrorCall(*const_cast<CallBase *>(Call), SwiftErrorVal);
4660 }
4661}
4662
4663void Verifier::visitAllocaInst(AllocaInst &AI) {
4664 Type *Ty = AI.getAllocatedType();
4665 SmallPtrSet<Type*, 4> Visited;
4666 Check(Ty->isSized(&Visited), "Cannot allocate unsized type", &AI);
4667 // Check if it's a target extension type that disallows being used on the
4668 // stack.
4670 "Alloca has illegal target extension type", &AI);
4672 "Alloca array size must have integer type", &AI);
4673 if (MaybeAlign A = AI.getAlign()) {
4674 Check(A->value() <= Value::MaximumAlignment,
4675 "huge alignment values are unsupported", &AI);
4676 }
4677
4678 if (AI.isSwiftError()) {
4679 Check(Ty->isPointerTy(), "swifterror alloca must have pointer type", &AI);
4681 "swifterror alloca must not be array allocation", &AI);
4682 verifySwiftErrorValue(&AI);
4683 }
4684
4685 if (TT.isAMDGPU()) {
4687 "alloca on amdgpu must be in addrspace(5)", &AI);
4688 }
4689
4690 visitInstruction(AI);
4691}
4692
4693void Verifier::visitAtomicCmpXchgInst(AtomicCmpXchgInst &CXI) {
4694 Type *ElTy = CXI.getOperand(1)->getType();
4695 Check(ElTy->isIntOrPtrTy(),
4696 "cmpxchg operand must have integer or pointer type", ElTy, &CXI);
4697 checkAtomicMemAccessSize(ElTy, &CXI);
4698 visitInstruction(CXI);
4699}
4700
4701void Verifier::visitAtomicRMWInst(AtomicRMWInst &RMWI) {
4702 Check(RMWI.getOrdering() != AtomicOrdering::Unordered,
4703 "atomicrmw instructions cannot be unordered.", &RMWI);
4704 auto Op = RMWI.getOperation();
4705 Type *ElTy = RMWI.getOperand(1)->getType();
4706 if (Op == AtomicRMWInst::Xchg) {
4707 Check(ElTy->isIntegerTy() || ElTy->isFloatingPointTy() ||
4708 ElTy->isPointerTy(),
4709 "atomicrmw " + AtomicRMWInst::getOperationName(Op) +
4710 " operand must have integer or floating point type!",
4711 &RMWI, ElTy);
4712 } else if (AtomicRMWInst::isFPOperation(Op)) {
4714 "atomicrmw " + AtomicRMWInst::getOperationName(Op) +
4715 " operand must have floating-point or fixed vector of floating-point "
4716 "type!",
4717 &RMWI, ElTy);
4718 } else {
4719 Check(ElTy->isIntegerTy(),
4720 "atomicrmw " + AtomicRMWInst::getOperationName(Op) +
4721 " operand must have integer type!",
4722 &RMWI, ElTy);
4723 }
4724 checkAtomicMemAccessSize(ElTy, &RMWI);
4726 "Invalid binary operation!", &RMWI);
4727 visitInstruction(RMWI);
4728}
4729
4730void Verifier::visitFenceInst(FenceInst &FI) {
4731 const AtomicOrdering Ordering = FI.getOrdering();
4732 Check(Ordering == AtomicOrdering::Acquire ||
4733 Ordering == AtomicOrdering::Release ||
4734 Ordering == AtomicOrdering::AcquireRelease ||
4735 Ordering == AtomicOrdering::SequentiallyConsistent,
4736 "fence instructions may only have acquire, release, acq_rel, or "
4737 "seq_cst ordering.",
4738 &FI);
4739 visitInstruction(FI);
4740}
4741
4742void Verifier::visitExtractValueInst(ExtractValueInst &EVI) {
4744 EVI.getIndices()) == EVI.getType(),
4745 "Invalid ExtractValueInst operands!", &EVI);
4746
4747 visitInstruction(EVI);
4748}
4749
4750void Verifier::visitInsertValueInst(InsertValueInst &IVI) {
4752 IVI.getIndices()) ==
4753 IVI.getOperand(1)->getType(),
4754 "Invalid InsertValueInst operands!", &IVI);
4755
4756 visitInstruction(IVI);
4757}
4758
4759static Value *getParentPad(Value *EHPad) {
4760 if (auto *FPI = dyn_cast<FuncletPadInst>(EHPad))
4761 return FPI->getParentPad();
4762
4763 return cast<CatchSwitchInst>(EHPad)->getParentPad();
4764}
4765
4766void Verifier::visitEHPadPredecessors(Instruction &I) {
4767 assert(I.isEHPad());
4768
4769 BasicBlock *BB = I.getParent();
4770 Function *F = BB->getParent();
4771
4772 Check(BB != &F->getEntryBlock(), "EH pad cannot be in entry block.", &I);
4773
4774 if (auto *LPI = dyn_cast<LandingPadInst>(&I)) {
4775 // The landingpad instruction defines its parent as a landing pad block. The
4776 // landing pad block may be branched to only by the unwind edge of an
4777 // invoke.
4778 for (BasicBlock *PredBB : predecessors(BB)) {
4779 const auto *II = dyn_cast<InvokeInst>(PredBB->getTerminator());
4780 Check(II && II->getUnwindDest() == BB && II->getNormalDest() != BB,
4781 "Block containing LandingPadInst must be jumped to "
4782 "only by the unwind edge of an invoke.",
4783 LPI);
4784 }
4785 return;
4786 }
4787 if (auto *CPI = dyn_cast<CatchPadInst>(&I)) {
4788 if (!pred_empty(BB))
4789 Check(BB->getUniquePredecessor() == CPI->getCatchSwitch()->getParent(),
4790 "Block containg CatchPadInst must be jumped to "
4791 "only by its catchswitch.",
4792 CPI);
4793 Check(BB != CPI->getCatchSwitch()->getUnwindDest(),
4794 "Catchswitch cannot unwind to one of its catchpads",
4795 CPI->getCatchSwitch(), CPI);
4796 return;
4797 }
4798
4799 // Verify that each pred has a legal terminator with a legal to/from EH
4800 // pad relationship.
4801 Instruction *ToPad = &I;
4802 Value *ToPadParent = getParentPad(ToPad);
4803 for (BasicBlock *PredBB : predecessors(BB)) {
4804 Instruction *TI = PredBB->getTerminator();
4805 Value *FromPad;
4806 if (auto *II = dyn_cast<InvokeInst>(TI)) {
4807 Check(II->getUnwindDest() == BB && II->getNormalDest() != BB,
4808 "EH pad must be jumped to via an unwind edge", ToPad, II);
4809 auto *CalledFn =
4810 dyn_cast<Function>(II->getCalledOperand()->stripPointerCasts());
4811 if (CalledFn && CalledFn->isIntrinsic() && II->doesNotThrow() &&
4812 !IntrinsicInst::mayLowerToFunctionCall(CalledFn->getIntrinsicID()))
4813 continue;
4814 if (auto Bundle = II->getOperandBundle(LLVMContext::OB_funclet))
4815 FromPad = Bundle->Inputs[0];
4816 else
4817 FromPad = ConstantTokenNone::get(II->getContext());
4818 } else if (auto *CRI = dyn_cast<CleanupReturnInst>(TI)) {
4819 FromPad = CRI->getOperand(0);
4820 Check(FromPad != ToPadParent, "A cleanupret must exit its cleanup", CRI);
4821 } else if (auto *CSI = dyn_cast<CatchSwitchInst>(TI)) {
4822 FromPad = CSI;
4823 } else {
4824 Check(false, "EH pad must be jumped to via an unwind edge", ToPad, TI);
4825 }
4826
4827 // The edge may exit from zero or more nested pads.
4828 SmallPtrSet<Value *, 8> Seen;
4829 for (;; FromPad = getParentPad(FromPad)) {
4830 Check(FromPad != ToPad,
4831 "EH pad cannot handle exceptions raised within it", FromPad, TI);
4832 if (FromPad == ToPadParent) {
4833 // This is a legal unwind edge.
4834 break;
4835 }
4836 Check(!isa<ConstantTokenNone>(FromPad),
4837 "A single unwind edge may only enter one EH pad", TI);
4838 Check(Seen.insert(FromPad).second, "EH pad jumps through a cycle of pads",
4839 FromPad);
4840
4841 // This will be diagnosed on the corresponding instruction already. We
4842 // need the extra check here to make sure getParentPad() works.
4843 Check(isa<FuncletPadInst>(FromPad) || isa<CatchSwitchInst>(FromPad),
4844 "Parent pad must be catchpad/cleanuppad/catchswitch", TI);
4845 }
4846 }
4847}
4848
4849void Verifier::visitLandingPadInst(LandingPadInst &LPI) {
4850 // The landingpad instruction is ill-formed if it doesn't have any clauses and
4851 // isn't a cleanup.
4852 Check(LPI.getNumClauses() > 0 || LPI.isCleanup(),
4853 "LandingPadInst needs at least one clause or to be a cleanup.", &LPI);
4854
4855 visitEHPadPredecessors(LPI);
4856
4857 if (!LandingPadResultTy)
4858 LandingPadResultTy = LPI.getType();
4859 else
4860 Check(LandingPadResultTy == LPI.getType(),
4861 "The landingpad instruction should have a consistent result type "
4862 "inside a function.",
4863 &LPI);
4864
4865 Function *F = LPI.getParent()->getParent();
4866 Check(F->hasPersonalityFn(),
4867 "LandingPadInst needs to be in a function with a personality.", &LPI);
4868
4869 // The landingpad instruction must be the first non-PHI instruction in the
4870 // block.
4871 Check(LPI.getParent()->getLandingPadInst() == &LPI,
4872 "LandingPadInst not the first non-PHI instruction in the block.", &LPI);
4873
4874 for (unsigned i = 0, e = LPI.getNumClauses(); i < e; ++i) {
4875 Constant *Clause = LPI.getClause(i);
4876 if (LPI.isCatch(i)) {
4877 Check(isa<PointerType>(Clause->getType()),
4878 "Catch operand does not have pointer type!", &LPI);
4879 } else {
4880 Check(LPI.isFilter(i), "Clause is neither catch nor filter!", &LPI);
4882 "Filter operand is not an array of constants!", &LPI);
4883 }
4884 }
4885
4886 visitInstruction(LPI);
4887}
4888
4889void Verifier::visitResumeInst(ResumeInst &RI) {
4891 "ResumeInst needs to be in a function with a personality.", &RI);
4892
4893 if (!LandingPadResultTy)
4894 LandingPadResultTy = RI.getValue()->getType();
4895 else
4896 Check(LandingPadResultTy == RI.getValue()->getType(),
4897 "The resume instruction should have a consistent result type "
4898 "inside a function.",
4899 &RI);
4900
4901 visitTerminator(RI);
4902}
4903
4904void Verifier::visitCatchPadInst(CatchPadInst &CPI) {
4905 BasicBlock *BB = CPI.getParent();
4906
4907 Function *F = BB->getParent();
4908 Check(F->hasPersonalityFn(),
4909 "CatchPadInst needs to be in a function with a personality.", &CPI);
4910
4912 "CatchPadInst needs to be directly nested in a CatchSwitchInst.",
4913 CPI.getParentPad());
4914
4915 // The catchpad instruction must be the first non-PHI instruction in the
4916 // block.
4917 Check(&*BB->getFirstNonPHIIt() == &CPI,
4918 "CatchPadInst not the first non-PHI instruction in the block.", &CPI);
4919
4920 visitEHPadPredecessors(CPI);
4921 visitFuncletPadInst(CPI);
4922}
4923
4924void Verifier::visitCatchReturnInst(CatchReturnInst &CatchReturn) {
4925 Check(isa<CatchPadInst>(CatchReturn.getOperand(0)),
4926 "CatchReturnInst needs to be provided a CatchPad", &CatchReturn,
4927 CatchReturn.getOperand(0));
4928
4929 visitTerminator(CatchReturn);
4930}
4931
4932void Verifier::visitCleanupPadInst(CleanupPadInst &CPI) {
4933 BasicBlock *BB = CPI.getParent();
4934
4935 Function *F = BB->getParent();
4936 Check(F->hasPersonalityFn(),
4937 "CleanupPadInst needs to be in a function with a personality.", &CPI);
4938
4939 // The cleanuppad instruction must be the first non-PHI instruction in the
4940 // block.
4941 Check(&*BB->getFirstNonPHIIt() == &CPI,
4942 "CleanupPadInst not the first non-PHI instruction in the block.", &CPI);
4943
4944 auto *ParentPad = CPI.getParentPad();
4945 Check(isa<ConstantTokenNone>(ParentPad) || isa<FuncletPadInst>(ParentPad),
4946 "CleanupPadInst has an invalid parent.", &CPI);
4947
4948 visitEHPadPredecessors(CPI);
4949 visitFuncletPadInst(CPI);
4950}
4951
4952void Verifier::visitFuncletPadInst(FuncletPadInst &FPI) {
4953 User *FirstUser = nullptr;
4954 Value *FirstUnwindPad = nullptr;
4955 SmallVector<FuncletPadInst *, 8> Worklist({&FPI});
4956 SmallPtrSet<FuncletPadInst *, 8> Seen;
4957
4958 while (!Worklist.empty()) {
4959 FuncletPadInst *CurrentPad = Worklist.pop_back_val();
4960 Check(Seen.insert(CurrentPad).second,
4961 "FuncletPadInst must not be nested within itself", CurrentPad);
4962 Value *UnresolvedAncestorPad = nullptr;
4963 for (User *U : CurrentPad->users()) {
4964 BasicBlock *UnwindDest;
4965 if (auto *CRI = dyn_cast<CleanupReturnInst>(U)) {
4966 UnwindDest = CRI->getUnwindDest();
4967 } else if (auto *CSI = dyn_cast<CatchSwitchInst>(U)) {
4968 // We allow catchswitch unwind to caller to nest
4969 // within an outer pad that unwinds somewhere else,
4970 // because catchswitch doesn't have a nounwind variant.
4971 // See e.g. SimplifyCFGOpt::SimplifyUnreachable.
4972 if (CSI->unwindsToCaller())
4973 continue;
4974 UnwindDest = CSI->getUnwindDest();
4975 } else if (auto *II = dyn_cast<InvokeInst>(U)) {
4976 UnwindDest = II->getUnwindDest();
4977 } else if (isa<CallInst>(U)) {
4978 // Calls which don't unwind may be found inside funclet
4979 // pads that unwind somewhere else. We don't *require*
4980 // such calls to be annotated nounwind.
4981 continue;
4982 } else if (auto *CPI = dyn_cast<CleanupPadInst>(U)) {
4983 // The unwind dest for a cleanup can only be found by
4984 // recursive search. Add it to the worklist, and we'll
4985 // search for its first use that determines where it unwinds.
4986 Worklist.push_back(CPI);
4987 continue;
4988 } else {
4989 Check(isa<CatchReturnInst>(U), "Bogus funclet pad use", U);
4990 continue;
4991 }
4992
4993 Value *UnwindPad;
4994 bool ExitsFPI;
4995 if (UnwindDest) {
4996 UnwindPad = &*UnwindDest->getFirstNonPHIIt();
4997 if (!cast<Instruction>(UnwindPad)->isEHPad())
4998 continue;
4999 Value *UnwindParent = getParentPad(UnwindPad);
5000 // Ignore unwind edges that don't exit CurrentPad.
5001 if (UnwindParent == CurrentPad)
5002 continue;
5003 // Determine whether the original funclet pad is exited,
5004 // and if we are scanning nested pads determine how many
5005 // of them are exited so we can stop searching their
5006 // children.
5007 Value *ExitedPad = CurrentPad;
5008 ExitsFPI = false;
5009 do {
5010 if (ExitedPad == &FPI) {
5011 ExitsFPI = true;
5012 // Now we can resolve any ancestors of CurrentPad up to
5013 // FPI, but not including FPI since we need to make sure
5014 // to check all direct users of FPI for consistency.
5015 UnresolvedAncestorPad = &FPI;
5016 break;
5017 }
5018 Value *ExitedParent = getParentPad(ExitedPad);
5019 if (ExitedParent == UnwindParent) {
5020 // ExitedPad is the ancestor-most pad which this unwind
5021 // edge exits, so we can resolve up to it, meaning that
5022 // ExitedParent is the first ancestor still unresolved.
5023 UnresolvedAncestorPad = ExitedParent;
5024 break;
5025 }
5026 ExitedPad = ExitedParent;
5027 } while (!isa<ConstantTokenNone>(ExitedPad));
5028 } else {
5029 // Unwinding to caller exits all pads.
5030 UnwindPad = ConstantTokenNone::get(FPI.getContext());
5031 ExitsFPI = true;
5032 UnresolvedAncestorPad = &FPI;
5033 }
5034
5035 if (ExitsFPI) {
5036 // This unwind edge exits FPI. Make sure it agrees with other
5037 // such edges.
5038 if (FirstUser) {
5039 Check(UnwindPad == FirstUnwindPad,
5040 "Unwind edges out of a funclet "
5041 "pad must have the same unwind "
5042 "dest",
5043 &FPI, U, FirstUser);
5044 } else {
5045 FirstUser = U;
5046 FirstUnwindPad = UnwindPad;
5047 // Record cleanup sibling unwinds for verifySiblingFuncletUnwinds
5048 if (isa<CleanupPadInst>(&FPI) && !isa<ConstantTokenNone>(UnwindPad) &&
5049 getParentPad(UnwindPad) == getParentPad(&FPI))
5050 SiblingFuncletInfo[&FPI] = cast<Instruction>(U);
5051 }
5052 }
5053 // Make sure we visit all uses of FPI, but for nested pads stop as
5054 // soon as we know where they unwind to.
5055 if (CurrentPad != &FPI)
5056 break;
5057 }
5058 if (UnresolvedAncestorPad) {
5059 if (CurrentPad == UnresolvedAncestorPad) {
5060 // When CurrentPad is FPI itself, we don't mark it as resolved even if
5061 // we've found an unwind edge that exits it, because we need to verify
5062 // all direct uses of FPI.
5063 assert(CurrentPad == &FPI);
5064 continue;
5065 }
5066 // Pop off the worklist any nested pads that we've found an unwind
5067 // destination for. The pads on the worklist are the uncles,
5068 // great-uncles, etc. of CurrentPad. We've found an unwind destination
5069 // for all ancestors of CurrentPad up to but not including
5070 // UnresolvedAncestorPad.
5071 Value *ResolvedPad = CurrentPad;
5072 while (!Worklist.empty()) {
5073 Value *UnclePad = Worklist.back();
5074 Value *AncestorPad = getParentPad(UnclePad);
5075 // Walk ResolvedPad up the ancestor list until we either find the
5076 // uncle's parent or the last resolved ancestor.
5077 while (ResolvedPad != AncestorPad) {
5078 Value *ResolvedParent = getParentPad(ResolvedPad);
5079 if (ResolvedParent == UnresolvedAncestorPad) {
5080 break;
5081 }
5082 ResolvedPad = ResolvedParent;
5083 }
5084 // If the resolved ancestor search didn't find the uncle's parent,
5085 // then the uncle is not yet resolved.
5086 if (ResolvedPad != AncestorPad)
5087 break;
5088 // This uncle is resolved, so pop it from the worklist.
5089 Worklist.pop_back();
5090 }
5091 }
5092 }
5093
5094 if (FirstUnwindPad) {
5095 if (auto *CatchSwitch = dyn_cast<CatchSwitchInst>(FPI.getParentPad())) {
5096 BasicBlock *SwitchUnwindDest = CatchSwitch->getUnwindDest();
5097 Value *SwitchUnwindPad;
5098 if (SwitchUnwindDest)
5099 SwitchUnwindPad = &*SwitchUnwindDest->getFirstNonPHIIt();
5100 else
5101 SwitchUnwindPad = ConstantTokenNone::get(FPI.getContext());
5102 Check(SwitchUnwindPad == FirstUnwindPad,
5103 "Unwind edges out of a catch must have the same unwind dest as "
5104 "the parent catchswitch",
5105 &FPI, FirstUser, CatchSwitch);
5106 }
5107 }
5108
5109 visitInstruction(FPI);
5110}
5111
5112void Verifier::visitCatchSwitchInst(CatchSwitchInst &CatchSwitch) {
5113 BasicBlock *BB = CatchSwitch.getParent();
5114
5115 Function *F = BB->getParent();
5116 Check(F->hasPersonalityFn(),
5117 "CatchSwitchInst needs to be in a function with a personality.",
5118 &CatchSwitch);
5119
5120 // The catchswitch instruction must be the first non-PHI instruction in the
5121 // block.
5122 Check(&*BB->getFirstNonPHIIt() == &CatchSwitch,
5123 "CatchSwitchInst not the first non-PHI instruction in the block.",
5124 &CatchSwitch);
5125
5126 auto *ParentPad = CatchSwitch.getParentPad();
5127 Check(isa<ConstantTokenNone>(ParentPad) || isa<FuncletPadInst>(ParentPad),
5128 "CatchSwitchInst has an invalid parent.", ParentPad);
5129
5130 if (BasicBlock *UnwindDest = CatchSwitch.getUnwindDest()) {
5131 BasicBlock::iterator I = UnwindDest->getFirstNonPHIIt();
5132 Check(I->isEHPad() && !isa<LandingPadInst>(I),
5133 "CatchSwitchInst must unwind to an EH block which is not a "
5134 "landingpad.",
5135 &CatchSwitch);
5136
5137 // Record catchswitch sibling unwinds for verifySiblingFuncletUnwinds
5138 if (getParentPad(&*I) == ParentPad)
5139 SiblingFuncletInfo[&CatchSwitch] = &CatchSwitch;
5140 }
5141
5142 Check(CatchSwitch.getNumHandlers() != 0,
5143 "CatchSwitchInst cannot have empty handler list", &CatchSwitch);
5144
5145 for (BasicBlock *Handler : CatchSwitch.handlers()) {
5146 Check(isa<CatchPadInst>(Handler->getFirstNonPHIIt()),
5147 "CatchSwitchInst handlers must be catchpads", &CatchSwitch, Handler);
5148 }
5149
5150 visitEHPadPredecessors(CatchSwitch);
5151 visitTerminator(CatchSwitch);
5152}
5153
5154void Verifier::visitCleanupReturnInst(CleanupReturnInst &CRI) {
5156 "CleanupReturnInst needs to be provided a CleanupPad", &CRI,
5157 CRI.getOperand(0));
5158
5159 if (BasicBlock *UnwindDest = CRI.getUnwindDest()) {
5160 BasicBlock::iterator I = UnwindDest->getFirstNonPHIIt();
5161 Check(I->isEHPad() && !isa<LandingPadInst>(I),
5162 "CleanupReturnInst must unwind to an EH block which is not a "
5163 "landingpad.",
5164 &CRI);
5165 }
5166
5167 visitTerminator(CRI);
5168}
5169
5170void Verifier::verifyDominatesUse(Instruction &I, unsigned i) {
5171 Instruction *Op = cast<Instruction>(I.getOperand(i));
5172 // If the we have an invalid invoke, don't try to compute the dominance.
5173 // We already reject it in the invoke specific checks and the dominance
5174 // computation doesn't handle multiple edges.
5175 if (InvokeInst *II = dyn_cast<InvokeInst>(Op)) {
5176 if (II->getNormalDest() == II->getUnwindDest())
5177 return;
5178 }
5179
5180 // Quick check whether the def has already been encountered in the same block.
5181 // PHI nodes are not checked to prevent accepting preceding PHIs, because PHI
5182 // uses are defined to happen on the incoming edge, not at the instruction.
5183 //
5184 // FIXME: If this operand is a MetadataAsValue (wrapping a LocalAsMetadata)
5185 // wrapping an SSA value, assert that we've already encountered it. See
5186 // related FIXME in Mapper::mapLocalAsMetadata in ValueMapper.cpp.
5187 if (!isa<PHINode>(I) && InstsInThisBlock.count(Op))
5188 return;
5189
5190 const Use &U = I.getOperandUse(i);
5191 Check(DT.dominates(Op, U), "Instruction does not dominate all uses!", Op, &I);
5192}
5193
5194void Verifier::visitDereferenceableMetadata(Instruction& I, MDNode* MD) {
5195 Check(I.getType()->isPointerTy(),
5196 "dereferenceable, dereferenceable_or_null "
5197 "apply only to pointer types",
5198 &I);
5200 "dereferenceable, dereferenceable_or_null apply only to load"
5201 " and inttoptr instructions, use attributes for calls or invokes",
5202 &I);
5203 Check(MD->getNumOperands() == 1,
5204 "dereferenceable, dereferenceable_or_null "
5205 "take one operand!",
5206 &I);
5207 ConstantInt *CI = mdconst::dyn_extract<ConstantInt>(MD->getOperand(0));
5208 Check(CI && CI->getType()->isIntegerTy(64),
5209 "dereferenceable, "
5210 "dereferenceable_or_null metadata value must be an i64!",
5211 &I);
5212}
5213
5214void Verifier::visitNofreeMetadata(Instruction &I, MDNode *MD) {
5215 Check(I.getType()->isPointerTy(), "nofree applies only to pointer types", &I);
5216 Check((isa<IntToPtrInst>(I)), "nofree applies only to inttoptr instruction",
5217 &I);
5218 Check(MD->getNumOperands() == 0, "nofree metadata must be empty", &I);
5219}
5220
5221void Verifier::visitProfMetadata(Instruction &I, MDNode *MD) {
5222 auto GetBranchingTerminatorNumOperands = [&]() {
5223 unsigned ExpectedNumOperands = 0;
5224 if (BranchInst *BI = dyn_cast<BranchInst>(&I))
5225 ExpectedNumOperands = BI->getNumSuccessors();
5226 else if (SwitchInst *SI = dyn_cast<SwitchInst>(&I))
5227 ExpectedNumOperands = SI->getNumSuccessors();
5228 else if (isa<CallInst>(&I))
5229 ExpectedNumOperands = 1;
5230 else if (IndirectBrInst *IBI = dyn_cast<IndirectBrInst>(&I))
5231 ExpectedNumOperands = IBI->getNumDestinations();
5232 else if (isa<SelectInst>(&I))
5233 ExpectedNumOperands = 2;
5234 else if (CallBrInst *CI = dyn_cast<CallBrInst>(&I))
5235 ExpectedNumOperands = CI->getNumSuccessors();
5236 return ExpectedNumOperands;
5237 };
5238 Check(MD->getNumOperands() >= 1,
5239 "!prof annotations should have at least 1 operand", MD);
5240 // Check first operand.
5241 Check(MD->getOperand(0) != nullptr, "first operand should not be null", MD);
5243 "expected string with name of the !prof annotation", MD);
5244 MDString *MDS = cast<MDString>(MD->getOperand(0));
5245 StringRef ProfName = MDS->getString();
5246
5248 Check(GetBranchingTerminatorNumOperands() != 0 || isa<InvokeInst>(I),
5249 "'unknown' !prof should only appear on instructions on which "
5250 "'branch_weights' would",
5251 MD);
5252 verifyUnknownProfileMetadata(MD);
5253 return;
5254 }
5255
5256 Check(MD->getNumOperands() >= 2,
5257 "!prof annotations should have no less than 2 operands", MD);
5258
5259 // Check consistency of !prof branch_weights metadata.
5260 if (ProfName == MDProfLabels::BranchWeights) {
5261 unsigned NumBranchWeights = getNumBranchWeights(*MD);
5262 if (isa<InvokeInst>(&I)) {
5263 Check(NumBranchWeights == 1 || NumBranchWeights == 2,
5264 "Wrong number of InvokeInst branch_weights operands", MD);
5265 } else {
5266 const unsigned ExpectedNumOperands = GetBranchingTerminatorNumOperands();
5267 if (ExpectedNumOperands == 0)
5268 CheckFailed("!prof branch_weights are not allowed for this instruction",
5269 MD);
5270
5271 Check(NumBranchWeights == ExpectedNumOperands, "Wrong number of operands",
5272 MD);
5273 }
5274 for (unsigned i = getBranchWeightOffset(MD); i < MD->getNumOperands();
5275 ++i) {
5276 auto &MDO = MD->getOperand(i);
5277 Check(MDO, "second operand should not be null", MD);
5279 "!prof brunch_weights operand is not a const int");
5280 }
5281 } else if (ProfName == MDProfLabels::ValueProfile) {
5282 Check(isValueProfileMD(MD), "invalid value profiling metadata", MD);
5283 ConstantInt *KindInt = mdconst::dyn_extract<ConstantInt>(MD->getOperand(1));
5284 Check(KindInt, "VP !prof missing kind argument", MD);
5285
5286 auto Kind = KindInt->getZExtValue();
5287 Check(Kind >= InstrProfValueKind::IPVK_First &&
5288 Kind <= InstrProfValueKind::IPVK_Last,
5289 "Invalid VP !prof kind", MD);
5290 Check(MD->getNumOperands() % 2 == 1,
5291 "VP !prof should have an even number "
5292 "of arguments after 'VP'",
5293 MD);
5294 if (Kind == InstrProfValueKind::IPVK_IndirectCallTarget ||
5295 Kind == InstrProfValueKind::IPVK_MemOPSize)
5297 "VP !prof indirect call or memop size expected to be applied to "
5298 "CallBase instructions only",
5299 MD);
5300 } else {
5301 CheckFailed("expected either branch_weights or VP profile name", MD);
5302 }
5303}
5304
5305void Verifier::visitDIAssignIDMetadata(Instruction &I, MDNode *MD) {
5306 assert(I.hasMetadata(LLVMContext::MD_DIAssignID));
5307 // DIAssignID metadata must be attached to either an alloca or some form of
5308 // store/memory-writing instruction.
5309 // FIXME: We allow all intrinsic insts here to avoid trying to enumerate all
5310 // possible store intrinsics.
5311 bool ExpectedInstTy =
5313 CheckDI(ExpectedInstTy, "!DIAssignID attached to unexpected instruction kind",
5314 I, MD);
5315 // Iterate over the MetadataAsValue uses of the DIAssignID - these should
5316 // only be found as DbgAssignIntrinsic operands.
5317 if (auto *AsValue = MetadataAsValue::getIfExists(Context, MD)) {
5318 for (auto *User : AsValue->users()) {
5320 "!DIAssignID should only be used by llvm.dbg.assign intrinsics",
5321 MD, User);
5322 // All of the dbg.assign intrinsics should be in the same function as I.
5323 if (auto *DAI = dyn_cast<DbgAssignIntrinsic>(User))
5324 CheckDI(DAI->getFunction() == I.getFunction(),
5325 "dbg.assign not in same function as inst", DAI, &I);
5326 }
5327 }
5328 for (DbgVariableRecord *DVR :
5329 cast<DIAssignID>(MD)->getAllDbgVariableRecordUsers()) {
5330 CheckDI(DVR->isDbgAssign(),
5331 "!DIAssignID should only be used by Assign DVRs.", MD, DVR);
5332 CheckDI(DVR->getFunction() == I.getFunction(),
5333 "DVRAssign not in same function as inst", DVR, &I);
5334 }
5335}
5336
5337void Verifier::visitMMRAMetadata(Instruction &I, MDNode *MD) {
5339 "!mmra metadata attached to unexpected instruction kind", I, MD);
5340
5341 // MMRA Metadata should either be a tag, e.g. !{!"foo", !"bar"}, or a
5342 // list of tags such as !2 in the following example:
5343 // !0 = !{!"a", !"b"}
5344 // !1 = !{!"c", !"d"}
5345 // !2 = !{!0, !1}
5346 if (MMRAMetadata::isTagMD(MD))
5347 return;
5348
5349 Check(isa<MDTuple>(MD), "!mmra expected to be a metadata tuple", I, MD);
5350 for (const MDOperand &MDOp : MD->operands())
5351 Check(MMRAMetadata::isTagMD(MDOp.get()),
5352 "!mmra metadata tuple operand is not an MMRA tag", I, MDOp.get());
5353}
5354
5355void Verifier::visitCallStackMetadata(MDNode *MD) {
5356 // Call stack metadata should consist of a list of at least 1 constant int
5357 // (representing a hash of the location).
5358 Check(MD->getNumOperands() >= 1,
5359 "call stack metadata should have at least 1 operand", MD);
5360
5361 for (const auto &Op : MD->operands())
5363 "call stack metadata operand should be constant integer", Op);
5364}
5365
5366void Verifier::visitMemProfMetadata(Instruction &I, MDNode *MD) {
5367 Check(isa<CallBase>(I), "!memprof metadata should only exist on calls", &I);
5368 Check(MD->getNumOperands() >= 1,
5369 "!memprof annotations should have at least 1 metadata operand "
5370 "(MemInfoBlock)",
5371 MD);
5372
5373 // Check each MIB
5374 for (auto &MIBOp : MD->operands()) {
5375 MDNode *MIB = dyn_cast<MDNode>(MIBOp);
5376 // The first operand of an MIB should be the call stack metadata.
5377 // There rest of the operands should be MDString tags, and there should be
5378 // at least one.
5379 Check(MIB->getNumOperands() >= 2,
5380 "Each !memprof MemInfoBlock should have at least 2 operands", MIB);
5381
5382 // Check call stack metadata (first operand).
5383 Check(MIB->getOperand(0) != nullptr,
5384 "!memprof MemInfoBlock first operand should not be null", MIB);
5385 Check(isa<MDNode>(MIB->getOperand(0)),
5386 "!memprof MemInfoBlock first operand should be an MDNode", MIB);
5387 MDNode *StackMD = dyn_cast<MDNode>(MIB->getOperand(0));
5388 visitCallStackMetadata(StackMD);
5389
5390 // The second MIB operand should be MDString.
5392 "!memprof MemInfoBlock second operand should be an MDString", MIB);
5393
5394 // Any remaining should be MDNode that are pairs of integers
5395 for (unsigned I = 2; I < MIB->getNumOperands(); ++I) {
5396 MDNode *OpNode = dyn_cast<MDNode>(MIB->getOperand(I));
5397 Check(OpNode, "Not all !memprof MemInfoBlock operands 2 to N are MDNode",
5398 MIB);
5399 Check(OpNode->getNumOperands() == 2,
5400 "Not all !memprof MemInfoBlock operands 2 to N are MDNode with 2 "
5401 "operands",
5402 MIB);
5403 // Check that all of Op's operands are ConstantInt.
5404 Check(llvm::all_of(OpNode->operands(),
5405 [](const MDOperand &Op) {
5406 return mdconst::hasa<ConstantInt>(Op);
5407 }),
5408 "Not all !memprof MemInfoBlock operands 2 to N are MDNode with "
5409 "ConstantInt operands",
5410 MIB);
5411 }
5412 }
5413}
5414
5415void Verifier::visitCallsiteMetadata(Instruction &I, MDNode *MD) {
5416 Check(isa<CallBase>(I), "!callsite metadata should only exist on calls", &I);
5417 // Verify the partial callstack annotated from memprof profiles. This callsite
5418 // is a part of a profiled allocation callstack.
5419 visitCallStackMetadata(MD);
5420}
5421
5422static inline bool isConstantIntMetadataOperand(const Metadata *MD) {
5423 if (auto *VAL = dyn_cast<ValueAsMetadata>(MD))
5424 return isa<ConstantInt>(VAL->getValue());
5425 return false;
5426}
5427
5428void Verifier::visitCalleeTypeMetadata(Instruction &I, MDNode *MD) {
5429 Check(isa<CallBase>(I), "!callee_type metadata should only exist on calls",
5430 &I);
5431 for (Metadata *Op : MD->operands()) {
5433 "The callee_type metadata must be a list of type metadata nodes", Op);
5434 auto *TypeMD = cast<MDNode>(Op);
5435 Check(TypeMD->getNumOperands() == 2,
5436 "Well-formed generalized type metadata must contain exactly two "
5437 "operands",
5438 Op);
5439 Check(isConstantIntMetadataOperand(TypeMD->getOperand(0)) &&
5440 mdconst::extract<ConstantInt>(TypeMD->getOperand(0))->isZero(),
5441 "The first operand of type metadata for functions must be zero", Op);
5442 Check(TypeMD->hasGeneralizedMDString(),
5443 "Only generalized type metadata can be part of the callee_type "
5444 "metadata list",
5445 Op);
5446 }
5447}
5448
5449void Verifier::visitAnnotationMetadata(MDNode *Annotation) {
5450 Check(isa<MDTuple>(Annotation), "annotation must be a tuple");
5451 Check(Annotation->getNumOperands() >= 1,
5452 "annotation must have at least one operand");
5453 for (const MDOperand &Op : Annotation->operands()) {
5454 bool TupleOfStrings =
5455 isa<MDTuple>(Op.get()) &&
5456 all_of(cast<MDTuple>(Op)->operands(), [](auto &Annotation) {
5457 return isa<MDString>(Annotation.get());
5458 });
5459 Check(isa<MDString>(Op.get()) || TupleOfStrings,
5460 "operands must be a string or a tuple of strings");
5461 }
5462}
5463
5464void Verifier::visitAliasScopeMetadata(const MDNode *MD) {
5465 unsigned NumOps = MD->getNumOperands();
5466 Check(NumOps >= 2 && NumOps <= 3, "scope must have two or three operands",
5467 MD);
5468 Check(MD->getOperand(0).get() == MD || isa<MDString>(MD->getOperand(0)),
5469 "first scope operand must be self-referential or string", MD);
5470 if (NumOps == 3)
5472 "third scope operand must be string (if used)", MD);
5473
5474 MDNode *Domain = dyn_cast<MDNode>(MD->getOperand(1));
5475 Check(Domain != nullptr, "second scope operand must be MDNode", MD);
5476
5477 unsigned NumDomainOps = Domain->getNumOperands();
5478 Check(NumDomainOps >= 1 && NumDomainOps <= 2,
5479 "domain must have one or two operands", Domain);
5480 Check(Domain->getOperand(0).get() == Domain ||
5481 isa<MDString>(Domain->getOperand(0)),
5482 "first domain operand must be self-referential or string", Domain);
5483 if (NumDomainOps == 2)
5484 Check(isa<MDString>(Domain->getOperand(1)),
5485 "second domain operand must be string (if used)", Domain);
5486}
5487
5488void Verifier::visitAliasScopeListMetadata(const MDNode *MD) {
5489 for (const MDOperand &Op : MD->operands()) {
5490 const MDNode *OpMD = dyn_cast<MDNode>(Op);
5491 Check(OpMD != nullptr, "scope list must consist of MDNodes", MD);
5492 visitAliasScopeMetadata(OpMD);
5493 }
5494}
5495
5496void Verifier::visitAccessGroupMetadata(const MDNode *MD) {
5497 auto IsValidAccessScope = [](const MDNode *MD) {
5498 return MD->getNumOperands() == 0 && MD->isDistinct();
5499 };
5500
5501 // It must be either an access scope itself...
5502 if (IsValidAccessScope(MD))
5503 return;
5504
5505 // ...or a list of access scopes.
5506 for (const MDOperand &Op : MD->operands()) {
5507 const MDNode *OpMD = dyn_cast<MDNode>(Op);
5508 Check(OpMD != nullptr, "Access scope list must consist of MDNodes", MD);
5509 Check(IsValidAccessScope(OpMD),
5510 "Access scope list contains invalid access scope", MD);
5511 }
5512}
5513
5514void Verifier::visitCapturesMetadata(Instruction &I, const MDNode *Captures) {
5515 static const char *ValidArgs[] = {"address_is_null", "address",
5516 "read_provenance", "provenance"};
5517
5518 auto *SI = dyn_cast<StoreInst>(&I);
5519 Check(SI, "!captures metadata can only be applied to store instructions", &I);
5520 Check(SI->getValueOperand()->getType()->isPointerTy(),
5521 "!captures metadata can only be applied to store with value operand of "
5522 "pointer type",
5523 &I);
5524 Check(Captures->getNumOperands() != 0, "!captures metadata cannot be empty",
5525 &I);
5526
5527 for (Metadata *Op : Captures->operands()) {
5528 auto *Str = dyn_cast<MDString>(Op);
5529 Check(Str, "!captures metadata must be a list of strings", &I);
5530 Check(is_contained(ValidArgs, Str->getString()),
5531 "invalid entry in !captures metadata", &I, Str);
5532 }
5533}
5534
5535void Verifier::visitAllocTokenMetadata(Instruction &I, MDNode *MD) {
5536 Check(isa<CallBase>(I), "!alloc_token should only exist on calls", &I);
5537 Check(MD->getNumOperands() == 2, "!alloc_token must have 2 operands", MD);
5538 Check(isa<MDString>(MD->getOperand(0)), "expected string", MD);
5540 "expected integer constant", MD);
5541}
5542
5543/// verifyInstruction - Verify that an instruction is well formed.
5544///
5545void Verifier::visitInstruction(Instruction &I) {
5546 BasicBlock *BB = I.getParent();
5547 Check(BB, "Instruction not embedded in basic block!", &I);
5548
5549 if (!isa<PHINode>(I)) { // Check that non-phi nodes are not self referential
5550 for (User *U : I.users()) {
5551 Check(U != (User *)&I || !DT.isReachableFromEntry(BB),
5552 "Only PHI nodes may reference their own value!", &I);
5553 }
5554 }
5555
5556 // Check that void typed values don't have names
5557 Check(!I.getType()->isVoidTy() || !I.hasName(),
5558 "Instruction has a name, but provides a void value!", &I);
5559
5560 // Check that the return value of the instruction is either void or a legal
5561 // value type.
5562 Check(I.getType()->isVoidTy() || I.getType()->isFirstClassType(),
5563 "Instruction returns a non-scalar type!", &I);
5564
5565 // Check that the instruction doesn't produce metadata. Calls are already
5566 // checked against the callee type.
5567 Check(!I.getType()->isMetadataTy() || isa<CallInst>(I) || isa<InvokeInst>(I),
5568 "Invalid use of metadata!", &I);
5569
5570 // Check that all uses of the instruction, if they are instructions
5571 // themselves, actually have parent basic blocks. If the use is not an
5572 // instruction, it is an error!
5573 for (Use &U : I.uses()) {
5574 if (Instruction *Used = dyn_cast<Instruction>(U.getUser()))
5575 Check(Used->getParent() != nullptr,
5576 "Instruction referencing"
5577 " instruction not embedded in a basic block!",
5578 &I, Used);
5579 else {
5580 CheckFailed("Use of instruction is not an instruction!", U);
5581 return;
5582 }
5583 }
5584
5585 // Get a pointer to the call base of the instruction if it is some form of
5586 // call.
5587 const CallBase *CBI = dyn_cast<CallBase>(&I);
5588
5589 for (unsigned i = 0, e = I.getNumOperands(); i != e; ++i) {
5590 Check(I.getOperand(i) != nullptr, "Instruction has null operand!", &I);
5591
5592 // Check to make sure that only first-class-values are operands to
5593 // instructions.
5594 if (!I.getOperand(i)->getType()->isFirstClassType()) {
5595 Check(false, "Instruction operands must be first-class values!", &I);
5596 }
5597
5598 if (Function *F = dyn_cast<Function>(I.getOperand(i))) {
5599 // This code checks whether the function is used as the operand of a
5600 // clang_arc_attachedcall operand bundle.
5601 auto IsAttachedCallOperand = [](Function *F, const CallBase *CBI,
5602 int Idx) {
5603 return CBI && CBI->isOperandBundleOfType(
5605 };
5606
5607 // Check to make sure that the "address of" an intrinsic function is never
5608 // taken. Ignore cases where the address of the intrinsic function is used
5609 // as the argument of operand bundle "clang.arc.attachedcall" as those
5610 // cases are handled in verifyAttachedCallBundle.
5611 Check((!F->isIntrinsic() ||
5612 (CBI && &CBI->getCalledOperandUse() == &I.getOperandUse(i)) ||
5613 IsAttachedCallOperand(F, CBI, i)),
5614 "Cannot take the address of an intrinsic!", &I);
5615 Check(!F->isIntrinsic() || isa<CallInst>(I) || isa<CallBrInst>(I) ||
5616 F->getIntrinsicID() == Intrinsic::donothing ||
5617 F->getIntrinsicID() == Intrinsic::seh_try_begin ||
5618 F->getIntrinsicID() == Intrinsic::seh_try_end ||
5619 F->getIntrinsicID() == Intrinsic::seh_scope_begin ||
5620 F->getIntrinsicID() == Intrinsic::seh_scope_end ||
5621 F->getIntrinsicID() == Intrinsic::coro_resume ||
5622 F->getIntrinsicID() == Intrinsic::coro_destroy ||
5623 F->getIntrinsicID() == Intrinsic::coro_await_suspend_void ||
5624 F->getIntrinsicID() == Intrinsic::coro_await_suspend_bool ||
5625 F->getIntrinsicID() == Intrinsic::coro_await_suspend_handle ||
5626 F->getIntrinsicID() ==
5627 Intrinsic::experimental_patchpoint_void ||
5628 F->getIntrinsicID() == Intrinsic::experimental_patchpoint ||
5629 F->getIntrinsicID() == Intrinsic::fake_use ||
5630 F->getIntrinsicID() == Intrinsic::experimental_gc_statepoint ||
5631 F->getIntrinsicID() == Intrinsic::wasm_throw ||
5632 F->getIntrinsicID() == Intrinsic::wasm_rethrow ||
5633 IsAttachedCallOperand(F, CBI, i),
5634 "Cannot invoke an intrinsic other than donothing, patchpoint, "
5635 "statepoint, coro_resume, coro_destroy, clang.arc.attachedcall or "
5636 "wasm.(re)throw",
5637 &I);
5638 Check(F->getParent() == &M, "Referencing function in another module!", &I,
5639 &M, F, F->getParent());
5640 } else if (BasicBlock *OpBB = dyn_cast<BasicBlock>(I.getOperand(i))) {
5641 Check(OpBB->getParent() == BB->getParent(),
5642 "Referring to a basic block in another function!", &I);
5643 } else if (Argument *OpArg = dyn_cast<Argument>(I.getOperand(i))) {
5644 Check(OpArg->getParent() == BB->getParent(),
5645 "Referring to an argument in another function!", &I);
5646 } else if (GlobalValue *GV = dyn_cast<GlobalValue>(I.getOperand(i))) {
5647 Check(GV->getParent() == &M, "Referencing global in another module!", &I,
5648 &M, GV, GV->getParent());
5649 } else if (Instruction *OpInst = dyn_cast<Instruction>(I.getOperand(i))) {
5650 Check(OpInst->getFunction() == BB->getParent(),
5651 "Referring to an instruction in another function!", &I);
5652 verifyDominatesUse(I, i);
5653 } else if (isa<InlineAsm>(I.getOperand(i))) {
5654 Check(CBI && &CBI->getCalledOperandUse() == &I.getOperandUse(i),
5655 "Cannot take the address of an inline asm!", &I);
5656 } else if (auto *C = dyn_cast<Constant>(I.getOperand(i))) {
5657 visitConstantExprsRecursively(C);
5658 }
5659 }
5660
5661 if (MDNode *MD = I.getMetadata(LLVMContext::MD_fpmath)) {
5662 Check(I.getType()->isFPOrFPVectorTy(),
5663 "fpmath requires a floating point result!", &I);
5664 Check(MD->getNumOperands() == 1, "fpmath takes one operand!", &I);
5665 if (ConstantFP *CFP0 =
5667 const APFloat &Accuracy = CFP0->getValueAPF();
5668 Check(&Accuracy.getSemantics() == &APFloat::IEEEsingle(),
5669 "fpmath accuracy must have float type", &I);
5670 Check(Accuracy.isFiniteNonZero() && !Accuracy.isNegative(),
5671 "fpmath accuracy not a positive number!", &I);
5672 } else {
5673 Check(false, "invalid fpmath accuracy!", &I);
5674 }
5675 }
5676
5677 if (MDNode *Range = I.getMetadata(LLVMContext::MD_range)) {
5679 "Ranges are only for loads, calls and invokes!", &I);
5680 visitRangeMetadata(I, Range, I.getType());
5681 }
5682
5683 if (MDNode *Range = I.getMetadata(LLVMContext::MD_noalias_addrspace)) {
5686 "noalias.addrspace are only for memory operations!", &I);
5687 visitNoaliasAddrspaceMetadata(I, Range, I.getType());
5688 }
5689
5690 if (I.hasMetadata(LLVMContext::MD_invariant_group)) {
5692 "invariant.group metadata is only for loads and stores", &I);
5693 }
5694
5695 if (MDNode *MD = I.getMetadata(LLVMContext::MD_nonnull)) {
5696 Check(I.getType()->isPointerTy(), "nonnull applies only to pointer types",
5697 &I);
5699 "nonnull applies only to load instructions, use attributes"
5700 " for calls or invokes",
5701 &I);
5702 Check(MD->getNumOperands() == 0, "nonnull metadata must be empty", &I);
5703 }
5704
5705 if (MDNode *MD = I.getMetadata(LLVMContext::MD_dereferenceable))
5706 visitDereferenceableMetadata(I, MD);
5707
5708 if (MDNode *MD = I.getMetadata(LLVMContext::MD_dereferenceable_or_null))
5709 visitDereferenceableMetadata(I, MD);
5710
5711 if (MDNode *MD = I.getMetadata(LLVMContext::MD_nofree))
5712 visitNofreeMetadata(I, MD);
5713
5714 if (MDNode *TBAA = I.getMetadata(LLVMContext::MD_tbaa))
5715 TBAAVerifyHelper.visitTBAAMetadata(&I, TBAA);
5716
5717 if (MDNode *MD = I.getMetadata(LLVMContext::MD_noalias))
5718 visitAliasScopeListMetadata(MD);
5719 if (MDNode *MD = I.getMetadata(LLVMContext::MD_alias_scope))
5720 visitAliasScopeListMetadata(MD);
5721
5722 if (MDNode *MD = I.getMetadata(LLVMContext::MD_access_group))
5723 visitAccessGroupMetadata(MD);
5724
5725 if (MDNode *AlignMD = I.getMetadata(LLVMContext::MD_align)) {
5726 Check(I.getType()->isPointerTy(), "align applies only to pointer types",
5727 &I);
5729 "align applies only to load instructions, "
5730 "use attributes for calls or invokes",
5731 &I);
5732 Check(AlignMD->getNumOperands() == 1, "align takes one operand!", &I);
5733 ConstantInt *CI = mdconst::dyn_extract<ConstantInt>(AlignMD->getOperand(0));
5734 Check(CI && CI->getType()->isIntegerTy(64),
5735 "align metadata value must be an i64!", &I);
5736 uint64_t Align = CI->getZExtValue();
5737 Check(isPowerOf2_64(Align), "align metadata value must be a power of 2!",
5738 &I);
5739 Check(Align <= Value::MaximumAlignment,
5740 "alignment is larger that implementation defined limit", &I);
5741 }
5742
5743 if (MDNode *MD = I.getMetadata(LLVMContext::MD_prof))
5744 visitProfMetadata(I, MD);
5745
5746 if (MDNode *MD = I.getMetadata(LLVMContext::MD_memprof))
5747 visitMemProfMetadata(I, MD);
5748
5749 if (MDNode *MD = I.getMetadata(LLVMContext::MD_callsite))
5750 visitCallsiteMetadata(I, MD);
5751
5752 if (MDNode *MD = I.getMetadata(LLVMContext::MD_callee_type))
5753 visitCalleeTypeMetadata(I, MD);
5754
5755 if (MDNode *MD = I.getMetadata(LLVMContext::MD_DIAssignID))
5756 visitDIAssignIDMetadata(I, MD);
5757
5758 if (MDNode *MMRA = I.getMetadata(LLVMContext::MD_mmra))
5759 visitMMRAMetadata(I, MMRA);
5760
5761 if (MDNode *Annotation = I.getMetadata(LLVMContext::MD_annotation))
5762 visitAnnotationMetadata(Annotation);
5763
5764 if (MDNode *Captures = I.getMetadata(LLVMContext::MD_captures))
5765 visitCapturesMetadata(I, Captures);
5766
5767 if (MDNode *MD = I.getMetadata(LLVMContext::MD_alloc_token))
5768 visitAllocTokenMetadata(I, MD);
5769
5770 if (MDNode *N = I.getDebugLoc().getAsMDNode()) {
5771 CheckDI(isa<DILocation>(N), "invalid !dbg metadata attachment", &I, N);
5772 visitMDNode(*N, AreDebugLocsAllowed::Yes);
5773
5774 if (auto *DL = dyn_cast<DILocation>(N)) {
5775 if (DL->getAtomGroup()) {
5776 CheckDI(DL->getScope()->getSubprogram()->getKeyInstructionsEnabled(),
5777 "DbgLoc uses atomGroup but DISubprogram doesn't have Key "
5778 "Instructions enabled",
5779 DL, DL->getScope()->getSubprogram());
5780 }
5781 }
5782 }
5783
5785 I.getAllMetadata(MDs);
5786 for (auto Attachment : MDs) {
5787 unsigned Kind = Attachment.first;
5788 auto AllowLocs =
5789 (Kind == LLVMContext::MD_dbg || Kind == LLVMContext::MD_loop)
5790 ? AreDebugLocsAllowed::Yes
5791 : AreDebugLocsAllowed::No;
5792 visitMDNode(*Attachment.second, AllowLocs);
5793 }
5794
5795 InstsInThisBlock.insert(&I);
5796}
5797
5798/// Allow intrinsics to be verified in different ways.
5799void Verifier::visitIntrinsicCall(Intrinsic::ID ID, CallBase &Call) {
5801 Check(IF->isDeclaration(), "Intrinsic functions should never be defined!",
5802 IF);
5803
5804 // Verify that the intrinsic prototype lines up with what the .td files
5805 // describe.
5806 FunctionType *IFTy = IF->getFunctionType();
5807 bool IsVarArg = IFTy->isVarArg();
5808
5812
5813 // Walk the descriptors to extract overloaded types.
5818 "Intrinsic has incorrect return type!", IF);
5820 "Intrinsic has incorrect argument type!", IF);
5821
5822 // Verify if the intrinsic call matches the vararg property.
5823 if (IsVarArg)
5825 "Intrinsic was not defined with variable arguments!", IF);
5826 else
5828 "Callsite was not defined with variable arguments!", IF);
5829
5830 // All descriptors should be absorbed by now.
5831 Check(TableRef.empty(), "Intrinsic has too few arguments!", IF);
5832
5833 // Now that we have the intrinsic ID and the actual argument types (and we
5834 // know they are legal for the intrinsic!) get the intrinsic name through the
5835 // usual means. This allows us to verify the mangling of argument types into
5836 // the name.
5837 const std::string ExpectedName =
5838 Intrinsic::getName(ID, ArgTys, IF->getParent(), IFTy);
5839 Check(ExpectedName == IF->getName(),
5840 "Intrinsic name not mangled correctly for type arguments! "
5841 "Should be: " +
5842 ExpectedName,
5843 IF);
5844
5845 // If the intrinsic takes MDNode arguments, verify that they are either global
5846 // or are local to *this* function.
5847 for (Value *V : Call.args()) {
5848 if (auto *MD = dyn_cast<MetadataAsValue>(V))
5849 visitMetadataAsValue(*MD, Call.getCaller());
5850 if (auto *Const = dyn_cast<Constant>(V))
5851 Check(!Const->getType()->isX86_AMXTy(),
5852 "const x86_amx is not allowed in argument!");
5853 }
5854
5855 switch (ID) {
5856 default:
5857 break;
5858 case Intrinsic::assume: {
5859 if (Call.hasOperandBundles()) {
5861 Check(Cond && Cond->isOne(),
5862 "assume with operand bundles must have i1 true condition", Call);
5863 }
5864 for (auto &Elem : Call.bundle_op_infos()) {
5865 unsigned ArgCount = Elem.End - Elem.Begin;
5866 // Separate storage assumptions are special insofar as they're the only
5867 // operand bundles allowed on assumes that aren't parameter attributes.
5868 if (Elem.Tag->getKey() == "separate_storage") {
5869 Check(ArgCount == 2,
5870 "separate_storage assumptions should have 2 arguments", Call);
5871 Check(Call.getOperand(Elem.Begin)->getType()->isPointerTy() &&
5872 Call.getOperand(Elem.Begin + 1)->getType()->isPointerTy(),
5873 "arguments to separate_storage assumptions should be pointers",
5874 Call);
5875 continue;
5876 }
5877 Check(Elem.Tag->getKey() == "ignore" ||
5878 Attribute::isExistingAttribute(Elem.Tag->getKey()),
5879 "tags must be valid attribute names", Call);
5880 Attribute::AttrKind Kind =
5881 Attribute::getAttrKindFromName(Elem.Tag->getKey());
5882 if (Kind == Attribute::Alignment) {
5883 Check(ArgCount <= 3 && ArgCount >= 2,
5884 "alignment assumptions should have 2 or 3 arguments", Call);
5885 Check(Call.getOperand(Elem.Begin)->getType()->isPointerTy(),
5886 "first argument should be a pointer", Call);
5887 Check(Call.getOperand(Elem.Begin + 1)->getType()->isIntegerTy(),
5888 "second argument should be an integer", Call);
5889 if (ArgCount == 3)
5890 Check(Call.getOperand(Elem.Begin + 2)->getType()->isIntegerTy(),
5891 "third argument should be an integer if present", Call);
5892 continue;
5893 }
5894 if (Kind == Attribute::Dereferenceable) {
5895 Check(ArgCount == 2,
5896 "dereferenceable assumptions should have 2 arguments", Call);
5897 Check(Call.getOperand(Elem.Begin)->getType()->isPointerTy(),
5898 "first argument should be a pointer", Call);
5899 Check(Call.getOperand(Elem.Begin + 1)->getType()->isIntegerTy(),
5900 "second argument should be an integer", Call);
5901 continue;
5902 }
5903 Check(ArgCount <= 2, "too many arguments", Call);
5904 if (Kind == Attribute::None)
5905 break;
5906 if (Attribute::isIntAttrKind(Kind)) {
5907 Check(ArgCount == 2, "this attribute should have 2 arguments", Call);
5908 Check(isa<ConstantInt>(Call.getOperand(Elem.Begin + 1)),
5909 "the second argument should be a constant integral value", Call);
5910 } else if (Attribute::canUseAsParamAttr(Kind)) {
5911 Check((ArgCount) == 1, "this attribute should have one argument", Call);
5912 } else if (Attribute::canUseAsFnAttr(Kind)) {
5913 Check((ArgCount) == 0, "this attribute has no argument", Call);
5914 }
5915 }
5916 break;
5917 }
5918 case Intrinsic::ucmp:
5919 case Intrinsic::scmp: {
5920 Type *SrcTy = Call.getOperand(0)->getType();
5921 Type *DestTy = Call.getType();
5922
5923 Check(DestTy->getScalarSizeInBits() >= 2,
5924 "result type must be at least 2 bits wide", Call);
5925
5926 bool IsDestTypeVector = DestTy->isVectorTy();
5927 Check(SrcTy->isVectorTy() == IsDestTypeVector,
5928 "ucmp/scmp argument and result types must both be either vector or "
5929 "scalar types",
5930 Call);
5931 if (IsDestTypeVector) {
5932 auto SrcVecLen = cast<VectorType>(SrcTy)->getElementCount();
5933 auto DestVecLen = cast<VectorType>(DestTy)->getElementCount();
5934 Check(SrcVecLen == DestVecLen,
5935 "return type and arguments must have the same number of "
5936 "elements",
5937 Call);
5938 }
5939 break;
5940 }
5941 case Intrinsic::coro_id: {
5942 auto *InfoArg = Call.getArgOperand(3)->stripPointerCasts();
5943 if (isa<ConstantPointerNull>(InfoArg))
5944 break;
5945 auto *GV = dyn_cast<GlobalVariable>(InfoArg);
5946 Check(GV && GV->isConstant() && GV->hasDefinitiveInitializer(),
5947 "info argument of llvm.coro.id must refer to an initialized "
5948 "constant");
5949 Constant *Init = GV->getInitializer();
5951 "info argument of llvm.coro.id must refer to either a struct or "
5952 "an array");
5953 break;
5954 }
5955 case Intrinsic::is_fpclass: {
5956 const ConstantInt *TestMask = cast<ConstantInt>(Call.getOperand(1));
5957 Check((TestMask->getZExtValue() & ~static_cast<unsigned>(fcAllFlags)) == 0,
5958 "unsupported bits for llvm.is.fpclass test mask");
5959 break;
5960 }
5961 case Intrinsic::fptrunc_round: {
5962 // Check the rounding mode
5963 Metadata *MD = nullptr;
5965 if (MAV)
5966 MD = MAV->getMetadata();
5967
5968 Check(MD != nullptr, "missing rounding mode argument", Call);
5969
5970 Check(isa<MDString>(MD),
5971 ("invalid value for llvm.fptrunc.round metadata operand"
5972 " (the operand should be a string)"),
5973 MD);
5974
5975 std::optional<RoundingMode> RoundMode =
5976 convertStrToRoundingMode(cast<MDString>(MD)->getString());
5977 Check(RoundMode && *RoundMode != RoundingMode::Dynamic,
5978 "unsupported rounding mode argument", Call);
5979 break;
5980 }
5981 case Intrinsic::convert_to_arbitrary_fp: {
5982 // Check that vector element counts are consistent.
5983 Type *ValueTy = Call.getArgOperand(0)->getType();
5984 Type *IntTy = Call.getType();
5985
5986 if (auto *ValueVecTy = dyn_cast<VectorType>(ValueTy)) {
5987 auto *IntVecTy = dyn_cast<VectorType>(IntTy);
5988 Check(IntVecTy,
5989 "if floating-point operand is a vector, integer operand must also "
5990 "be a vector",
5991 Call);
5992 Check(ValueVecTy->getElementCount() == IntVecTy->getElementCount(),
5993 "floating-point and integer vector operands must have the same "
5994 "element count",
5995 Call);
5996 }
5997
5998 // Check interpretation metadata (argoperand 1).
5999 auto *InterpMAV = dyn_cast<MetadataAsValue>(Call.getArgOperand(1));
6000 Check(InterpMAV, "missing interpretation metadata operand", Call);
6001 auto *InterpStr = dyn_cast<MDString>(InterpMAV->getMetadata());
6002 Check(InterpStr, "interpretation metadata operand must be a string", Call);
6003 StringRef Interp = InterpStr->getString();
6004
6005 Check(!Interp.empty(), "interpretation metadata string must not be empty",
6006 Call);
6007
6008 // Valid interpretation strings: mini-float format names.
6010 "unsupported interpretation metadata string", Call);
6011
6012 // Check rounding mode metadata (argoperand 2).
6013 auto *RoundingMAV = dyn_cast<MetadataAsValue>(Call.getArgOperand(2));
6014 Check(RoundingMAV, "missing rounding mode metadata operand", Call);
6015 auto *RoundingStr = dyn_cast<MDString>(RoundingMAV->getMetadata());
6016 Check(RoundingStr, "rounding mode metadata operand must be a string", Call);
6017
6018 std::optional<RoundingMode> RM =
6019 convertStrToRoundingMode(RoundingStr->getString());
6020 Check(RM && *RM != RoundingMode::Dynamic,
6021 "unsupported rounding mode argument", Call);
6022 break;
6023 }
6024 case Intrinsic::convert_from_arbitrary_fp: {
6025 // Check that vector element counts are consistent.
6026 Type *IntTy = Call.getArgOperand(0)->getType();
6027 Type *ValueTy = Call.getType();
6028
6029 if (auto *ValueVecTy = dyn_cast<VectorType>(ValueTy)) {
6030 auto *IntVecTy = dyn_cast<VectorType>(IntTy);
6031 Check(IntVecTy,
6032 "if floating-point operand is a vector, integer operand must also "
6033 "be a vector",
6034 Call);
6035 Check(ValueVecTy->getElementCount() == IntVecTy->getElementCount(),
6036 "floating-point and integer vector operands must have the same "
6037 "element count",
6038 Call);
6039 }
6040
6041 // Check interpretation metadata (argoperand 1).
6042 auto *InterpMAV = dyn_cast<MetadataAsValue>(Call.getArgOperand(1));
6043 Check(InterpMAV, "missing interpretation metadata operand", Call);
6044 auto *InterpStr = dyn_cast<MDString>(InterpMAV->getMetadata());
6045 Check(InterpStr, "interpretation metadata operand must be a string", Call);
6046 StringRef Interp = InterpStr->getString();
6047
6048 Check(!Interp.empty(), "interpretation metadata string must not be empty",
6049 Call);
6050
6051 // Valid interpretation strings: mini-float format names.
6053 "unsupported interpretation metadata string", Call);
6054 break;
6055 }
6056#define BEGIN_REGISTER_VP_INTRINSIC(VPID, ...) case Intrinsic::VPID:
6057#include "llvm/IR/VPIntrinsics.def"
6058#undef BEGIN_REGISTER_VP_INTRINSIC
6059 visitVPIntrinsic(cast<VPIntrinsic>(Call));
6060 break;
6061#define INSTRUCTION(NAME, NARGS, ROUND_MODE, INTRINSIC) \
6062 case Intrinsic::INTRINSIC:
6063#include "llvm/IR/ConstrainedOps.def"
6064#undef INSTRUCTION
6065 visitConstrainedFPIntrinsic(cast<ConstrainedFPIntrinsic>(Call));
6066 break;
6067 case Intrinsic::dbg_declare: // llvm.dbg.declare
6068 case Intrinsic::dbg_value: // llvm.dbg.value
6069 case Intrinsic::dbg_assign: // llvm.dbg.assign
6070 case Intrinsic::dbg_label: // llvm.dbg.label
6071 // We no longer interpret debug intrinsics (the old variable-location
6072 // design). They're meaningless as far as LLVM is concerned we could make
6073 // it an error for them to appear, but it's possible we'll have users
6074 // converting back to intrinsics for the forseeable future (such as DXIL),
6075 // so tolerate their existance.
6076 break;
6077 case Intrinsic::memcpy:
6078 case Intrinsic::memcpy_inline:
6079 case Intrinsic::memmove:
6080 case Intrinsic::memset:
6081 case Intrinsic::memset_inline:
6082 break;
6083 case Intrinsic::experimental_memset_pattern: {
6084 const auto Memset = cast<MemSetPatternInst>(&Call);
6085 Check(Memset->getValue()->getType()->isSized(),
6086 "unsized types cannot be used as memset patterns", Call);
6087 break;
6088 }
6089 case Intrinsic::memcpy_element_unordered_atomic:
6090 case Intrinsic::memmove_element_unordered_atomic:
6091 case Intrinsic::memset_element_unordered_atomic: {
6092 const auto *AMI = cast<AnyMemIntrinsic>(&Call);
6093
6094 ConstantInt *ElementSizeCI =
6095 cast<ConstantInt>(AMI->getRawElementSizeInBytes());
6096 const APInt &ElementSizeVal = ElementSizeCI->getValue();
6097 Check(ElementSizeVal.isPowerOf2(),
6098 "element size of the element-wise atomic memory intrinsic "
6099 "must be a power of 2",
6100 Call);
6101
6102 auto IsValidAlignment = [&](MaybeAlign Alignment) {
6103 return Alignment && ElementSizeVal.ule(Alignment->value());
6104 };
6105 Check(IsValidAlignment(AMI->getDestAlign()),
6106 "incorrect alignment of the destination argument", Call);
6107 if (const auto *AMT = dyn_cast<AnyMemTransferInst>(AMI)) {
6108 Check(IsValidAlignment(AMT->getSourceAlign()),
6109 "incorrect alignment of the source argument", Call);
6110 }
6111 break;
6112 }
6113 case Intrinsic::call_preallocated_setup: {
6114 auto *NumArgs = cast<ConstantInt>(Call.getArgOperand(0));
6115 bool FoundCall = false;
6116 for (User *U : Call.users()) {
6117 auto *UseCall = dyn_cast<CallBase>(U);
6118 Check(UseCall != nullptr,
6119 "Uses of llvm.call.preallocated.setup must be calls");
6120 Intrinsic::ID IID = UseCall->getIntrinsicID();
6121 if (IID == Intrinsic::call_preallocated_arg) {
6122 auto *AllocArgIndex = dyn_cast<ConstantInt>(UseCall->getArgOperand(1));
6123 Check(AllocArgIndex != nullptr,
6124 "llvm.call.preallocated.alloc arg index must be a constant");
6125 auto AllocArgIndexInt = AllocArgIndex->getValue();
6126 Check(AllocArgIndexInt.sge(0) &&
6127 AllocArgIndexInt.slt(NumArgs->getValue()),
6128 "llvm.call.preallocated.alloc arg index must be between 0 and "
6129 "corresponding "
6130 "llvm.call.preallocated.setup's argument count");
6131 } else if (IID == Intrinsic::call_preallocated_teardown) {
6132 // nothing to do
6133 } else {
6134 Check(!FoundCall, "Can have at most one call corresponding to a "
6135 "llvm.call.preallocated.setup");
6136 FoundCall = true;
6137 size_t NumPreallocatedArgs = 0;
6138 for (unsigned i = 0; i < UseCall->arg_size(); i++) {
6139 if (UseCall->paramHasAttr(i, Attribute::Preallocated)) {
6140 ++NumPreallocatedArgs;
6141 }
6142 }
6143 Check(NumPreallocatedArgs != 0,
6144 "cannot use preallocated intrinsics on a call without "
6145 "preallocated arguments");
6146 Check(NumArgs->equalsInt(NumPreallocatedArgs),
6147 "llvm.call.preallocated.setup arg size must be equal to number "
6148 "of preallocated arguments "
6149 "at call site",
6150 Call, *UseCall);
6151 // getOperandBundle() cannot be called if more than one of the operand
6152 // bundle exists. There is already a check elsewhere for this, so skip
6153 // here if we see more than one.
6154 if (UseCall->countOperandBundlesOfType(LLVMContext::OB_preallocated) >
6155 1) {
6156 return;
6157 }
6158 auto PreallocatedBundle =
6159 UseCall->getOperandBundle(LLVMContext::OB_preallocated);
6160 Check(PreallocatedBundle,
6161 "Use of llvm.call.preallocated.setup outside intrinsics "
6162 "must be in \"preallocated\" operand bundle");
6163 Check(PreallocatedBundle->Inputs.front().get() == &Call,
6164 "preallocated bundle must have token from corresponding "
6165 "llvm.call.preallocated.setup");
6166 }
6167 }
6168 break;
6169 }
6170 case Intrinsic::call_preallocated_arg: {
6171 auto *Token = dyn_cast<CallBase>(Call.getArgOperand(0));
6172 Check(Token &&
6173 Token->getIntrinsicID() == Intrinsic::call_preallocated_setup,
6174 "llvm.call.preallocated.arg token argument must be a "
6175 "llvm.call.preallocated.setup");
6176 Check(Call.hasFnAttr(Attribute::Preallocated),
6177 "llvm.call.preallocated.arg must be called with a \"preallocated\" "
6178 "call site attribute");
6179 break;
6180 }
6181 case Intrinsic::call_preallocated_teardown: {
6182 auto *Token = dyn_cast<CallBase>(Call.getArgOperand(0));
6183 Check(Token &&
6184 Token->getIntrinsicID() == Intrinsic::call_preallocated_setup,
6185 "llvm.call.preallocated.teardown token argument must be a "
6186 "llvm.call.preallocated.setup");
6187 break;
6188 }
6189 case Intrinsic::gcroot:
6190 case Intrinsic::gcwrite:
6191 case Intrinsic::gcread:
6192 if (ID == Intrinsic::gcroot) {
6193 AllocaInst *AI =
6195 Check(AI, "llvm.gcroot parameter #1 must be an alloca.", Call);
6197 "llvm.gcroot parameter #2 must be a constant.", Call);
6198 if (!AI->getAllocatedType()->isPointerTy()) {
6200 "llvm.gcroot parameter #1 must either be a pointer alloca, "
6201 "or argument #2 must be a non-null constant.",
6202 Call);
6203 }
6204 }
6205
6206 Check(Call.getParent()->getParent()->hasGC(),
6207 "Enclosing function does not use GC.", Call);
6208 break;
6209 case Intrinsic::init_trampoline:
6211 "llvm.init_trampoline parameter #2 must resolve to a function.",
6212 Call);
6213 break;
6214 case Intrinsic::prefetch:
6215 Check(cast<ConstantInt>(Call.getArgOperand(1))->getZExtValue() < 2,
6216 "rw argument to llvm.prefetch must be 0-1", Call);
6217 Check(cast<ConstantInt>(Call.getArgOperand(2))->getZExtValue() < 4,
6218 "locality argument to llvm.prefetch must be 0-3", Call);
6219 Check(cast<ConstantInt>(Call.getArgOperand(3))->getZExtValue() < 2,
6220 "cache type argument to llvm.prefetch must be 0-1", Call);
6221 break;
6222 case Intrinsic::reloc_none: {
6224 cast<MetadataAsValue>(Call.getArgOperand(0))->getMetadata()),
6225 "llvm.reloc.none argument must be a metadata string", &Call);
6226 break;
6227 }
6228 case Intrinsic::stackprotector:
6230 "llvm.stackprotector parameter #2 must resolve to an alloca.", Call);
6231 break;
6232 case Intrinsic::localescape: {
6233 BasicBlock *BB = Call.getParent();
6234 Check(BB->isEntryBlock(), "llvm.localescape used outside of entry block",
6235 Call);
6236 Check(!SawFrameEscape, "multiple calls to llvm.localescape in one function",
6237 Call);
6238 for (Value *Arg : Call.args()) {
6239 if (isa<ConstantPointerNull>(Arg))
6240 continue; // Null values are allowed as placeholders.
6241 auto *AI = dyn_cast<AllocaInst>(Arg->stripPointerCasts());
6242 Check(AI && AI->isStaticAlloca(),
6243 "llvm.localescape only accepts static allocas", Call);
6244 }
6245 FrameEscapeInfo[BB->getParent()].first = Call.arg_size();
6246 SawFrameEscape = true;
6247 break;
6248 }
6249 case Intrinsic::localrecover: {
6251 Function *Fn = dyn_cast<Function>(FnArg);
6252 Check(Fn && !Fn->isDeclaration(),
6253 "llvm.localrecover first "
6254 "argument must be function defined in this module",
6255 Call);
6256 auto *IdxArg = cast<ConstantInt>(Call.getArgOperand(2));
6257 auto &Entry = FrameEscapeInfo[Fn];
6258 Entry.second = unsigned(
6259 std::max(uint64_t(Entry.second), IdxArg->getLimitedValue(~0U) + 1));
6260 break;
6261 }
6262
6263 case Intrinsic::experimental_gc_statepoint:
6264 if (auto *CI = dyn_cast<CallInst>(&Call))
6265 Check(!CI->isInlineAsm(),
6266 "gc.statepoint support for inline assembly unimplemented", CI);
6267 Check(Call.getParent()->getParent()->hasGC(),
6268 "Enclosing function does not use GC.", Call);
6269
6270 verifyStatepoint(Call);
6271 break;
6272 case Intrinsic::experimental_gc_result: {
6273 Check(Call.getParent()->getParent()->hasGC(),
6274 "Enclosing function does not use GC.", Call);
6275
6276 auto *Statepoint = Call.getArgOperand(0);
6277 if (isa<UndefValue>(Statepoint))
6278 break;
6279
6280 // Are we tied to a statepoint properly?
6281 const auto *StatepointCall = dyn_cast<CallBase>(Statepoint);
6282 Check(StatepointCall && StatepointCall->getIntrinsicID() ==
6283 Intrinsic::experimental_gc_statepoint,
6284 "gc.result operand #1 must be from a statepoint", Call,
6285 Call.getArgOperand(0));
6286
6287 // Check that result type matches wrapped callee.
6288 auto *TargetFuncType =
6289 cast<FunctionType>(StatepointCall->getParamElementType(2));
6290 Check(Call.getType() == TargetFuncType->getReturnType(),
6291 "gc.result result type does not match wrapped callee", Call);
6292 break;
6293 }
6294 case Intrinsic::experimental_gc_relocate: {
6295 Check(Call.arg_size() == 3, "wrong number of arguments", Call);
6296
6298 "gc.relocate must return a pointer or a vector of pointers", Call);
6299
6300 // Check that this relocate is correctly tied to the statepoint
6301
6302 // This is case for relocate on the unwinding path of an invoke statepoint
6303 if (LandingPadInst *LandingPad =
6305
6306 const BasicBlock *InvokeBB =
6307 LandingPad->getParent()->getUniquePredecessor();
6308
6309 // Landingpad relocates should have only one predecessor with invoke
6310 // statepoint terminator
6311 Check(InvokeBB, "safepoints should have unique landingpads",
6312 LandingPad->getParent());
6313 Check(InvokeBB->getTerminator(), "safepoint block should be well formed",
6314 InvokeBB);
6316 "gc relocate should be linked to a statepoint", InvokeBB);
6317 } else {
6318 // In all other cases relocate should be tied to the statepoint directly.
6319 // This covers relocates on a normal return path of invoke statepoint and
6320 // relocates of a call statepoint.
6321 auto *Token = Call.getArgOperand(0);
6323 "gc relocate is incorrectly tied to the statepoint", Call, Token);
6324 }
6325
6326 // Verify rest of the relocate arguments.
6327 const Value &StatepointCall = *cast<GCRelocateInst>(Call).getStatepoint();
6328
6329 // Both the base and derived must be piped through the safepoint.
6332 "gc.relocate operand #2 must be integer offset", Call);
6333
6334 Value *Derived = Call.getArgOperand(2);
6335 Check(isa<ConstantInt>(Derived),
6336 "gc.relocate operand #3 must be integer offset", Call);
6337
6338 const uint64_t BaseIndex = cast<ConstantInt>(Base)->getZExtValue();
6339 const uint64_t DerivedIndex = cast<ConstantInt>(Derived)->getZExtValue();
6340
6341 // Check the bounds
6342 if (isa<UndefValue>(StatepointCall))
6343 break;
6344 if (auto Opt = cast<GCStatepointInst>(StatepointCall)
6345 .getOperandBundle(LLVMContext::OB_gc_live)) {
6346 Check(BaseIndex < Opt->Inputs.size(),
6347 "gc.relocate: statepoint base index out of bounds", Call);
6348 Check(DerivedIndex < Opt->Inputs.size(),
6349 "gc.relocate: statepoint derived index out of bounds", Call);
6350 }
6351
6352 // Relocated value must be either a pointer type or vector-of-pointer type,
6353 // but gc_relocate does not need to return the same pointer type as the
6354 // relocated pointer. It can be casted to the correct type later if it's
6355 // desired. However, they must have the same address space and 'vectorness'
6356 GCRelocateInst &Relocate = cast<GCRelocateInst>(Call);
6357 auto *ResultType = Call.getType();
6358 auto *DerivedType = Relocate.getDerivedPtr()->getType();
6359 auto *BaseType = Relocate.getBasePtr()->getType();
6360
6361 Check(BaseType->isPtrOrPtrVectorTy(),
6362 "gc.relocate: relocated value must be a pointer", Call);
6363 Check(DerivedType->isPtrOrPtrVectorTy(),
6364 "gc.relocate: relocated value must be a pointer", Call);
6365
6366 Check(ResultType->isVectorTy() == DerivedType->isVectorTy(),
6367 "gc.relocate: vector relocates to vector and pointer to pointer",
6368 Call);
6369 Check(
6370 ResultType->getPointerAddressSpace() ==
6371 DerivedType->getPointerAddressSpace(),
6372 "gc.relocate: relocating a pointer shouldn't change its address space",
6373 Call);
6374
6375 auto GC = llvm::getGCStrategy(Relocate.getFunction()->getGC());
6376 Check(GC, "gc.relocate: calling function must have GCStrategy",
6377 Call.getFunction());
6378 if (GC) {
6379 auto isGCPtr = [&GC](Type *PTy) {
6380 return GC->isGCManagedPointer(PTy->getScalarType()).value_or(true);
6381 };
6382 Check(isGCPtr(ResultType), "gc.relocate: must return gc pointer", Call);
6383 Check(isGCPtr(BaseType),
6384 "gc.relocate: relocated value must be a gc pointer", Call);
6385 Check(isGCPtr(DerivedType),
6386 "gc.relocate: relocated value must be a gc pointer", Call);
6387 }
6388 break;
6389 }
6390 case Intrinsic::experimental_patchpoint: {
6391 if (Call.getCallingConv() == CallingConv::AnyReg) {
6393 "patchpoint: invalid return type used with anyregcc", Call);
6394 }
6395 break;
6396 }
6397 case Intrinsic::eh_exceptioncode:
6398 case Intrinsic::eh_exceptionpointer: {
6400 "eh.exceptionpointer argument must be a catchpad", Call);
6401 break;
6402 }
6403 case Intrinsic::get_active_lane_mask: {
6405 "get_active_lane_mask: must return a "
6406 "vector",
6407 Call);
6408 auto *ElemTy = Call.getType()->getScalarType();
6409 Check(ElemTy->isIntegerTy(1),
6410 "get_active_lane_mask: element type is not "
6411 "i1",
6412 Call);
6413 break;
6414 }
6415 case Intrinsic::experimental_get_vector_length: {
6416 ConstantInt *VF = cast<ConstantInt>(Call.getArgOperand(1));
6417 Check(!VF->isNegative() && !VF->isZero(),
6418 "get_vector_length: VF must be positive", Call);
6419 break;
6420 }
6421 case Intrinsic::masked_load: {
6422 Check(Call.getType()->isVectorTy(), "masked_load: must return a vector",
6423 Call);
6424
6426 Value *PassThru = Call.getArgOperand(2);
6427 Check(Mask->getType()->isVectorTy(), "masked_load: mask must be vector",
6428 Call);
6429 Check(PassThru->getType() == Call.getType(),
6430 "masked_load: pass through and return type must match", Call);
6431 Check(cast<VectorType>(Mask->getType())->getElementCount() ==
6432 cast<VectorType>(Call.getType())->getElementCount(),
6433 "masked_load: vector mask must be same length as return", Call);
6434 break;
6435 }
6436 case Intrinsic::masked_store: {
6437 Value *Val = Call.getArgOperand(0);
6439 Check(Mask->getType()->isVectorTy(), "masked_store: mask must be vector",
6440 Call);
6441 Check(cast<VectorType>(Mask->getType())->getElementCount() ==
6442 cast<VectorType>(Val->getType())->getElementCount(),
6443 "masked_store: vector mask must be same length as value", Call);
6444 break;
6445 }
6446
6447 case Intrinsic::experimental_guard: {
6448 Check(isa<CallInst>(Call), "experimental_guard cannot be invoked", Call);
6450 "experimental_guard must have exactly one "
6451 "\"deopt\" operand bundle");
6452 break;
6453 }
6454
6455 case Intrinsic::experimental_deoptimize: {
6456 Check(isa<CallInst>(Call), "experimental_deoptimize cannot be invoked",
6457 Call);
6459 "experimental_deoptimize must have exactly one "
6460 "\"deopt\" operand bundle");
6462 "experimental_deoptimize return type must match caller return type");
6463
6464 if (isa<CallInst>(Call)) {
6466 Check(RI,
6467 "calls to experimental_deoptimize must be followed by a return");
6468
6469 if (!Call.getType()->isVoidTy() && RI)
6470 Check(RI->getReturnValue() == &Call,
6471 "calls to experimental_deoptimize must be followed by a return "
6472 "of the value computed by experimental_deoptimize");
6473 }
6474
6475 break;
6476 }
6477 case Intrinsic::vastart: {
6479 "va_start called in a non-varargs function");
6480 break;
6481 }
6482 case Intrinsic::get_dynamic_area_offset: {
6483 auto *IntTy = dyn_cast<IntegerType>(Call.getType());
6484 Check(IntTy && DL.getPointerSizeInBits(DL.getAllocaAddrSpace()) ==
6485 IntTy->getBitWidth(),
6486 "get_dynamic_area_offset result type must be scalar integer matching "
6487 "alloca address space width",
6488 Call);
6489 break;
6490 }
6491 case Intrinsic::vector_reduce_and:
6492 case Intrinsic::vector_reduce_or:
6493 case Intrinsic::vector_reduce_xor:
6494 case Intrinsic::vector_reduce_add:
6495 case Intrinsic::vector_reduce_mul:
6496 case Intrinsic::vector_reduce_smax:
6497 case Intrinsic::vector_reduce_smin:
6498 case Intrinsic::vector_reduce_umax:
6499 case Intrinsic::vector_reduce_umin: {
6500 Type *ArgTy = Call.getArgOperand(0)->getType();
6501 Check(ArgTy->isIntOrIntVectorTy() && ArgTy->isVectorTy(),
6502 "Intrinsic has incorrect argument type!");
6503 break;
6504 }
6505 case Intrinsic::vector_reduce_fmax:
6506 case Intrinsic::vector_reduce_fmin: {
6507 Type *ArgTy = Call.getArgOperand(0)->getType();
6508 Check(ArgTy->isFPOrFPVectorTy() && ArgTy->isVectorTy(),
6509 "Intrinsic has incorrect argument type!");
6510 break;
6511 }
6512 case Intrinsic::vector_reduce_fadd:
6513 case Intrinsic::vector_reduce_fmul: {
6514 // Unlike the other reductions, the first argument is a start value. The
6515 // second argument is the vector to be reduced.
6516 Type *ArgTy = Call.getArgOperand(1)->getType();
6517 Check(ArgTy->isFPOrFPVectorTy() && ArgTy->isVectorTy(),
6518 "Intrinsic has incorrect argument type!");
6519 break;
6520 }
6521 case Intrinsic::smul_fix:
6522 case Intrinsic::smul_fix_sat:
6523 case Intrinsic::umul_fix:
6524 case Intrinsic::umul_fix_sat:
6525 case Intrinsic::sdiv_fix:
6526 case Intrinsic::sdiv_fix_sat:
6527 case Intrinsic::udiv_fix:
6528 case Intrinsic::udiv_fix_sat: {
6529 Value *Op1 = Call.getArgOperand(0);
6530 Value *Op2 = Call.getArgOperand(1);
6532 "first operand of [us][mul|div]_fix[_sat] must be an int type or "
6533 "vector of ints");
6535 "second operand of [us][mul|div]_fix[_sat] must be an int type or "
6536 "vector of ints");
6537
6538 auto *Op3 = cast<ConstantInt>(Call.getArgOperand(2));
6539 Check(Op3->getType()->isIntegerTy(),
6540 "third operand of [us][mul|div]_fix[_sat] must be an int type");
6541 Check(Op3->getBitWidth() <= 32,
6542 "third operand of [us][mul|div]_fix[_sat] must fit within 32 bits");
6543
6544 if (ID == Intrinsic::smul_fix || ID == Intrinsic::smul_fix_sat ||
6545 ID == Intrinsic::sdiv_fix || ID == Intrinsic::sdiv_fix_sat) {
6546 Check(Op3->getZExtValue() < Op1->getType()->getScalarSizeInBits(),
6547 "the scale of s[mul|div]_fix[_sat] must be less than the width of "
6548 "the operands");
6549 } else {
6550 Check(Op3->getZExtValue() <= Op1->getType()->getScalarSizeInBits(),
6551 "the scale of u[mul|div]_fix[_sat] must be less than or equal "
6552 "to the width of the operands");
6553 }
6554 break;
6555 }
6556 case Intrinsic::lrint:
6557 case Intrinsic::llrint:
6558 case Intrinsic::lround:
6559 case Intrinsic::llround: {
6560 Type *ValTy = Call.getArgOperand(0)->getType();
6561 Type *ResultTy = Call.getType();
6562 auto *VTy = dyn_cast<VectorType>(ValTy);
6563 auto *RTy = dyn_cast<VectorType>(ResultTy);
6564 Check(ValTy->isFPOrFPVectorTy() && ResultTy->isIntOrIntVectorTy(),
6565 ExpectedName + ": argument must be floating-point or vector "
6566 "of floating-points, and result must be integer or "
6567 "vector of integers",
6568 &Call);
6569 Check(ValTy->isVectorTy() == ResultTy->isVectorTy(),
6570 ExpectedName + ": argument and result disagree on vector use", &Call);
6571 if (VTy) {
6572 Check(VTy->getElementCount() == RTy->getElementCount(),
6573 ExpectedName + ": argument must be same length as result", &Call);
6574 }
6575 break;
6576 }
6577 case Intrinsic::bswap: {
6578 Type *Ty = Call.getType();
6579 unsigned Size = Ty->getScalarSizeInBits();
6580 Check(Size % 16 == 0, "bswap must be an even number of bytes", &Call);
6581 break;
6582 }
6583 case Intrinsic::invariant_start: {
6584 ConstantInt *InvariantSize = dyn_cast<ConstantInt>(Call.getArgOperand(0));
6585 Check(InvariantSize &&
6586 (!InvariantSize->isNegative() || InvariantSize->isMinusOne()),
6587 "invariant_start parameter must be -1, 0 or a positive number",
6588 &Call);
6589 break;
6590 }
6591 case Intrinsic::matrix_multiply:
6592 case Intrinsic::matrix_transpose:
6593 case Intrinsic::matrix_column_major_load:
6594 case Intrinsic::matrix_column_major_store: {
6596 ConstantInt *Stride = nullptr;
6597 ConstantInt *NumRows;
6598 ConstantInt *NumColumns;
6599 VectorType *ResultTy;
6600 Type *Op0ElemTy = nullptr;
6601 Type *Op1ElemTy = nullptr;
6602 switch (ID) {
6603 case Intrinsic::matrix_multiply: {
6604 NumRows = cast<ConstantInt>(Call.getArgOperand(2));
6605 ConstantInt *N = cast<ConstantInt>(Call.getArgOperand(3));
6606 NumColumns = cast<ConstantInt>(Call.getArgOperand(4));
6608 ->getNumElements() ==
6609 NumRows->getZExtValue() * N->getZExtValue(),
6610 "First argument of a matrix operation does not match specified "
6611 "shape!");
6613 ->getNumElements() ==
6614 N->getZExtValue() * NumColumns->getZExtValue(),
6615 "Second argument of a matrix operation does not match specified "
6616 "shape!");
6617
6618 ResultTy = cast<VectorType>(Call.getType());
6619 Op0ElemTy =
6620 cast<VectorType>(Call.getArgOperand(0)->getType())->getElementType();
6621 Op1ElemTy =
6622 cast<VectorType>(Call.getArgOperand(1)->getType())->getElementType();
6623 break;
6624 }
6625 case Intrinsic::matrix_transpose:
6626 NumRows = cast<ConstantInt>(Call.getArgOperand(1));
6627 NumColumns = cast<ConstantInt>(Call.getArgOperand(2));
6628 ResultTy = cast<VectorType>(Call.getType());
6629 Op0ElemTy =
6630 cast<VectorType>(Call.getArgOperand(0)->getType())->getElementType();
6631 break;
6632 case Intrinsic::matrix_column_major_load: {
6634 NumRows = cast<ConstantInt>(Call.getArgOperand(3));
6635 NumColumns = cast<ConstantInt>(Call.getArgOperand(4));
6636 ResultTy = cast<VectorType>(Call.getType());
6637 break;
6638 }
6639 case Intrinsic::matrix_column_major_store: {
6641 NumRows = cast<ConstantInt>(Call.getArgOperand(4));
6642 NumColumns = cast<ConstantInt>(Call.getArgOperand(5));
6643 ResultTy = cast<VectorType>(Call.getArgOperand(0)->getType());
6644 Op0ElemTy =
6645 cast<VectorType>(Call.getArgOperand(0)->getType())->getElementType();
6646 break;
6647 }
6648 default:
6649 llvm_unreachable("unexpected intrinsic");
6650 }
6651
6652 Check(ResultTy->getElementType()->isIntegerTy() ||
6653 ResultTy->getElementType()->isFloatingPointTy(),
6654 "Result type must be an integer or floating-point type!", IF);
6655
6656 if (Op0ElemTy)
6657 Check(ResultTy->getElementType() == Op0ElemTy,
6658 "Vector element type mismatch of the result and first operand "
6659 "vector!",
6660 IF);
6661
6662 if (Op1ElemTy)
6663 Check(ResultTy->getElementType() == Op1ElemTy,
6664 "Vector element type mismatch of the result and second operand "
6665 "vector!",
6666 IF);
6667
6669 NumRows->getZExtValue() * NumColumns->getZExtValue(),
6670 "Result of a matrix operation does not fit in the returned vector!");
6671
6672 if (Stride) {
6673 Check(Stride->getBitWidth() <= 64, "Stride bitwidth cannot exceed 64!",
6674 IF);
6675 Check(Stride->getZExtValue() >= NumRows->getZExtValue(),
6676 "Stride must be greater or equal than the number of rows!", IF);
6677 }
6678
6679 break;
6680 }
6681 case Intrinsic::vector_splice_left:
6682 case Intrinsic::vector_splice_right: {
6684 uint64_t Idx = cast<ConstantInt>(Call.getArgOperand(2))->getZExtValue();
6685 uint64_t KnownMinNumElements = VecTy->getElementCount().getKnownMinValue();
6686 if (VecTy->isScalableTy() && Call.getParent() &&
6687 Call.getParent()->getParent()) {
6688 AttributeList Attrs = Call.getParent()->getParent()->getAttributes();
6689 if (Attrs.hasFnAttr(Attribute::VScaleRange))
6690 KnownMinNumElements *= Attrs.getFnAttrs().getVScaleRangeMin();
6691 }
6692 if (ID == Intrinsic::vector_splice_left)
6693 Check(Idx < KnownMinNumElements,
6694 "The splice index exceeds the range [0, VL-1] where VL is the "
6695 "known minimum number of elements in the vector. For scalable "
6696 "vectors the minimum number of elements is determined from "
6697 "vscale_range.",
6698 &Call);
6699 else
6700 Check(Idx <= KnownMinNumElements,
6701 "The splice index exceeds the range [0, VL] where VL is the "
6702 "known minimum number of elements in the vector. For scalable "
6703 "vectors the minimum number of elements is determined from "
6704 "vscale_range.",
6705 &Call);
6706 break;
6707 }
6708 case Intrinsic::stepvector: {
6710 Check(VecTy && VecTy->getScalarType()->isIntegerTy() &&
6711 VecTy->getScalarSizeInBits() >= 8,
6712 "stepvector only supported for vectors of integers "
6713 "with a bitwidth of at least 8.",
6714 &Call);
6715 break;
6716 }
6717 case Intrinsic::experimental_vector_match: {
6718 Value *Op1 = Call.getArgOperand(0);
6719 Value *Op2 = Call.getArgOperand(1);
6721
6722 VectorType *Op1Ty = dyn_cast<VectorType>(Op1->getType());
6723 VectorType *Op2Ty = dyn_cast<VectorType>(Op2->getType());
6724 VectorType *MaskTy = dyn_cast<VectorType>(Mask->getType());
6725
6726 Check(Op1Ty && Op2Ty && MaskTy, "Operands must be vectors.", &Call);
6728 "Second operand must be a fixed length vector.", &Call);
6729 Check(Op1Ty->getElementType()->isIntegerTy(),
6730 "First operand must be a vector of integers.", &Call);
6731 Check(Op1Ty->getElementType() == Op2Ty->getElementType(),
6732 "First two operands must have the same element type.", &Call);
6733 Check(Op1Ty->getElementCount() == MaskTy->getElementCount(),
6734 "First operand and mask must have the same number of elements.",
6735 &Call);
6736 Check(MaskTy->getElementType()->isIntegerTy(1),
6737 "Mask must be a vector of i1's.", &Call);
6738 Check(Call.getType() == MaskTy, "Return type must match the mask type.",
6739 &Call);
6740 break;
6741 }
6742 case Intrinsic::vector_insert: {
6743 Value *Vec = Call.getArgOperand(0);
6744 Value *SubVec = Call.getArgOperand(1);
6745 Value *Idx = Call.getArgOperand(2);
6746 unsigned IdxN = cast<ConstantInt>(Idx)->getZExtValue();
6747
6748 VectorType *VecTy = cast<VectorType>(Vec->getType());
6749 VectorType *SubVecTy = cast<VectorType>(SubVec->getType());
6750
6751 ElementCount VecEC = VecTy->getElementCount();
6752 ElementCount SubVecEC = SubVecTy->getElementCount();
6753 Check(VecTy->getElementType() == SubVecTy->getElementType(),
6754 "vector_insert parameters must have the same element "
6755 "type.",
6756 &Call);
6757 Check(IdxN % SubVecEC.getKnownMinValue() == 0,
6758 "vector_insert index must be a constant multiple of "
6759 "the subvector's known minimum vector length.");
6760
6761 // If this insertion is not the 'mixed' case where a fixed vector is
6762 // inserted into a scalable vector, ensure that the insertion of the
6763 // subvector does not overrun the parent vector.
6764 if (VecEC.isScalable() == SubVecEC.isScalable()) {
6765 Check(IdxN < VecEC.getKnownMinValue() &&
6766 IdxN + SubVecEC.getKnownMinValue() <= VecEC.getKnownMinValue(),
6767 "subvector operand of vector_insert would overrun the "
6768 "vector being inserted into.");
6769 }
6770 break;
6771 }
6772 case Intrinsic::vector_extract: {
6773 Value *Vec = Call.getArgOperand(0);
6774 Value *Idx = Call.getArgOperand(1);
6775 unsigned IdxN = cast<ConstantInt>(Idx)->getZExtValue();
6776
6777 VectorType *ResultTy = cast<VectorType>(Call.getType());
6778 VectorType *VecTy = cast<VectorType>(Vec->getType());
6779
6780 ElementCount VecEC = VecTy->getElementCount();
6781 ElementCount ResultEC = ResultTy->getElementCount();
6782
6783 Check(ResultTy->getElementType() == VecTy->getElementType(),
6784 "vector_extract result must have the same element "
6785 "type as the input vector.",
6786 &Call);
6787 Check(IdxN % ResultEC.getKnownMinValue() == 0,
6788 "vector_extract index must be a constant multiple of "
6789 "the result type's known minimum vector length.");
6790
6791 // If this extraction is not the 'mixed' case where a fixed vector is
6792 // extracted from a scalable vector, ensure that the extraction does not
6793 // overrun the parent vector.
6794 if (VecEC.isScalable() == ResultEC.isScalable()) {
6795 Check(IdxN < VecEC.getKnownMinValue() &&
6796 IdxN + ResultEC.getKnownMinValue() <= VecEC.getKnownMinValue(),
6797 "vector_extract would overrun.");
6798 }
6799 break;
6800 }
6801 case Intrinsic::vector_partial_reduce_fadd:
6802 case Intrinsic::vector_partial_reduce_add: {
6805
6806 unsigned VecWidth = VecTy->getElementCount().getKnownMinValue();
6807 unsigned AccWidth = AccTy->getElementCount().getKnownMinValue();
6808
6809 Check((VecWidth % AccWidth) == 0,
6810 "Invalid vector widths for partial "
6811 "reduction. The width of the input vector "
6812 "must be a positive integer multiple of "
6813 "the width of the accumulator vector.");
6814 break;
6815 }
6816 case Intrinsic::experimental_noalias_scope_decl: {
6817 NoAliasScopeDecls.push_back(cast<IntrinsicInst>(&Call));
6818 break;
6819 }
6820 case Intrinsic::preserve_array_access_index:
6821 case Intrinsic::preserve_struct_access_index:
6822 case Intrinsic::aarch64_ldaxr:
6823 case Intrinsic::aarch64_ldxr:
6824 case Intrinsic::arm_ldaex:
6825 case Intrinsic::arm_ldrex: {
6826 Type *ElemTy = Call.getParamElementType(0);
6827 Check(ElemTy, "Intrinsic requires elementtype attribute on first argument.",
6828 &Call);
6829 break;
6830 }
6831 case Intrinsic::aarch64_stlxr:
6832 case Intrinsic::aarch64_stxr:
6833 case Intrinsic::arm_stlex:
6834 case Intrinsic::arm_strex: {
6835 Type *ElemTy = Call.getAttributes().getParamElementType(1);
6836 Check(ElemTy,
6837 "Intrinsic requires elementtype attribute on second argument.",
6838 &Call);
6839 break;
6840 }
6841 case Intrinsic::aarch64_prefetch: {
6842 Check(cast<ConstantInt>(Call.getArgOperand(1))->getZExtValue() < 2,
6843 "write argument to llvm.aarch64.prefetch must be 0 or 1", Call);
6844 Check(cast<ConstantInt>(Call.getArgOperand(2))->getZExtValue() < 4,
6845 "target argument to llvm.aarch64.prefetch must be 0-3", Call);
6846 Check(cast<ConstantInt>(Call.getArgOperand(3))->getZExtValue() < 2,
6847 "stream argument to llvm.aarch64.prefetch must be 0 or 1", Call);
6848 Check(cast<ConstantInt>(Call.getArgOperand(4))->getZExtValue() < 2,
6849 "isdata argument to llvm.aarch64.prefetch must be 0 or 1", Call);
6850 break;
6851 }
6852 case Intrinsic::aarch64_range_prefetch: {
6853 Check(cast<ConstantInt>(Call.getArgOperand(1))->getZExtValue() < 2,
6854 "write argument to llvm.aarch64.range.prefetch must be 0 or 1", Call);
6855 Check(cast<ConstantInt>(Call.getArgOperand(2))->getZExtValue() < 2,
6856 "stream argument to llvm.aarch64.range.prefetch must be 0 or 1",
6857 Call);
6858 break;
6859 }
6860 case Intrinsic::callbr_landingpad: {
6861 const auto *CBR = dyn_cast<CallBrInst>(Call.getOperand(0));
6862 Check(CBR, "intrinstic requires callbr operand", &Call);
6863 if (!CBR)
6864 break;
6865
6866 const BasicBlock *LandingPadBB = Call.getParent();
6867 const BasicBlock *PredBB = LandingPadBB->getUniquePredecessor();
6868 if (!PredBB) {
6869 CheckFailed("Intrinsic in block must have 1 unique predecessor", &Call);
6870 break;
6871 }
6872 if (!isa<CallBrInst>(PredBB->getTerminator())) {
6873 CheckFailed("Intrinsic must have corresponding callbr in predecessor",
6874 &Call);
6875 break;
6876 }
6877 Check(llvm::is_contained(CBR->getIndirectDests(), LandingPadBB),
6878 "Intrinsic's corresponding callbr must have intrinsic's parent basic "
6879 "block in indirect destination list",
6880 &Call);
6881 const Instruction &First = *LandingPadBB->begin();
6882 Check(&First == &Call, "No other instructions may proceed intrinsic",
6883 &Call);
6884 break;
6885 }
6886 case Intrinsic::amdgcn_cs_chain: {
6887 auto CallerCC = Call.getCaller()->getCallingConv();
6888 switch (CallerCC) {
6889 case CallingConv::AMDGPU_CS:
6890 case CallingConv::AMDGPU_CS_Chain:
6891 case CallingConv::AMDGPU_CS_ChainPreserve:
6892 case CallingConv::AMDGPU_ES:
6893 case CallingConv::AMDGPU_GS:
6894 case CallingConv::AMDGPU_HS:
6895 case CallingConv::AMDGPU_LS:
6896 case CallingConv::AMDGPU_VS:
6897 break;
6898 default:
6899 CheckFailed("Intrinsic cannot be called from functions with this "
6900 "calling convention",
6901 &Call);
6902 break;
6903 }
6904
6905 Check(Call.paramHasAttr(2, Attribute::InReg),
6906 "SGPR arguments must have the `inreg` attribute", &Call);
6907 Check(!Call.paramHasAttr(3, Attribute::InReg),
6908 "VGPR arguments must not have the `inreg` attribute", &Call);
6909
6910 auto *Next = Call.getNextNode();
6911 bool IsAMDUnreachable = Next && isa<IntrinsicInst>(Next) &&
6912 cast<IntrinsicInst>(Next)->getIntrinsicID() ==
6913 Intrinsic::amdgcn_unreachable;
6914 Check(Next && (isa<UnreachableInst>(Next) || IsAMDUnreachable),
6915 "llvm.amdgcn.cs.chain must be followed by unreachable", &Call);
6916 break;
6917 }
6918 case Intrinsic::amdgcn_init_exec_from_input: {
6919 const Argument *Arg = dyn_cast<Argument>(Call.getOperand(0));
6920 Check(Arg && Arg->hasInRegAttr(),
6921 "only inreg arguments to the parent function are valid as inputs to "
6922 "this intrinsic",
6923 &Call);
6924 break;
6925 }
6926 case Intrinsic::amdgcn_set_inactive_chain_arg: {
6927 auto CallerCC = Call.getCaller()->getCallingConv();
6928 switch (CallerCC) {
6929 case CallingConv::AMDGPU_CS_Chain:
6930 case CallingConv::AMDGPU_CS_ChainPreserve:
6931 break;
6932 default:
6933 CheckFailed("Intrinsic can only be used from functions with the "
6934 "amdgpu_cs_chain or amdgpu_cs_chain_preserve "
6935 "calling conventions",
6936 &Call);
6937 break;
6938 }
6939
6940 unsigned InactiveIdx = 1;
6941 Check(!Call.paramHasAttr(InactiveIdx, Attribute::InReg),
6942 "Value for inactive lanes must not have the `inreg` attribute",
6943 &Call);
6944 Check(isa<Argument>(Call.getArgOperand(InactiveIdx)),
6945 "Value for inactive lanes must be a function argument", &Call);
6946 Check(!cast<Argument>(Call.getArgOperand(InactiveIdx))->hasInRegAttr(),
6947 "Value for inactive lanes must be a VGPR function argument", &Call);
6948 break;
6949 }
6950 case Intrinsic::amdgcn_call_whole_wave: {
6952 Check(F, "Indirect whole wave calls are not allowed", &Call);
6953
6954 CallingConv::ID CC = F->getCallingConv();
6955 Check(CC == CallingConv::AMDGPU_Gfx_WholeWave,
6956 "Callee must have the amdgpu_gfx_whole_wave calling convention",
6957 &Call);
6958
6959 Check(!F->isVarArg(), "Variadic whole wave calls are not allowed", &Call);
6960
6961 Check(Call.arg_size() == F->arg_size(),
6962 "Call argument count must match callee argument count", &Call);
6963
6964 // The first argument of the call is the callee, and the first argument of
6965 // the callee is the active mask. The rest of the arguments must match.
6966 Check(F->arg_begin()->getType()->isIntegerTy(1),
6967 "Callee must have i1 as its first argument", &Call);
6968 for (auto [CallArg, FuncArg] :
6969 drop_begin(zip_equal(Call.args(), F->args()))) {
6970 Check(CallArg->getType() == FuncArg.getType(),
6971 "Argument types must match", &Call);
6972
6973 // Check that inreg attributes match between call site and function
6974 Check(Call.paramHasAttr(FuncArg.getArgNo(), Attribute::InReg) ==
6975 FuncArg.hasInRegAttr(),
6976 "Argument inreg attributes must match", &Call);
6977 }
6978 break;
6979 }
6980 case Intrinsic::amdgcn_s_prefetch_data: {
6981 Check(
6984 "llvm.amdgcn.s.prefetch.data only supports global or constant memory");
6985 break;
6986 }
6987 case Intrinsic::amdgcn_mfma_scale_f32_16x16x128_f8f6f4:
6988 case Intrinsic::amdgcn_mfma_scale_f32_32x32x64_f8f6f4: {
6989 Value *Src0 = Call.getArgOperand(0);
6990 Value *Src1 = Call.getArgOperand(1);
6991
6992 uint64_t CBSZ = cast<ConstantInt>(Call.getArgOperand(3))->getZExtValue();
6993 uint64_t BLGP = cast<ConstantInt>(Call.getArgOperand(4))->getZExtValue();
6994 Check(CBSZ <= 4, "invalid value for cbsz format", Call,
6995 Call.getArgOperand(3));
6996 Check(BLGP <= 4, "invalid value for blgp format", Call,
6997 Call.getArgOperand(4));
6998
6999 // AMDGPU::MFMAScaleFormats values
7000 auto getFormatNumRegs = [](unsigned FormatVal) {
7001 switch (FormatVal) {
7002 case 0:
7003 case 1:
7004 return 8u;
7005 case 2:
7006 case 3:
7007 return 6u;
7008 case 4:
7009 return 4u;
7010 default:
7011 llvm_unreachable("invalid format value");
7012 }
7013 };
7014
7015 auto isValidSrcASrcBVector = [](FixedVectorType *Ty) {
7016 if (!Ty || !Ty->getElementType()->isIntegerTy(32))
7017 return false;
7018 unsigned NumElts = Ty->getNumElements();
7019 return NumElts == 4 || NumElts == 6 || NumElts == 8;
7020 };
7021
7022 auto *Src0Ty = dyn_cast<FixedVectorType>(Src0->getType());
7023 auto *Src1Ty = dyn_cast<FixedVectorType>(Src1->getType());
7024 Check(isValidSrcASrcBVector(Src0Ty),
7025 "operand 0 must be 4, 6 or 8 element i32 vector", &Call, Src0);
7026 Check(isValidSrcASrcBVector(Src1Ty),
7027 "operand 1 must be 4, 6 or 8 element i32 vector", &Call, Src1);
7028
7029 // Permit excess registers for the format.
7030 Check(Src0Ty->getNumElements() >= getFormatNumRegs(CBSZ),
7031 "invalid vector type for format", &Call, Src0, Call.getArgOperand(3));
7032 Check(Src1Ty->getNumElements() >= getFormatNumRegs(BLGP),
7033 "invalid vector type for format", &Call, Src1, Call.getArgOperand(5));
7034 break;
7035 }
7036 case Intrinsic::amdgcn_wmma_f32_16x16x128_f8f6f4:
7037 case Intrinsic::amdgcn_wmma_scale_f32_16x16x128_f8f6f4:
7038 case Intrinsic::amdgcn_wmma_scale16_f32_16x16x128_f8f6f4: {
7039 Value *Src0 = Call.getArgOperand(1);
7040 Value *Src1 = Call.getArgOperand(3);
7041
7042 unsigned FmtA = cast<ConstantInt>(Call.getArgOperand(0))->getZExtValue();
7043 unsigned FmtB = cast<ConstantInt>(Call.getArgOperand(2))->getZExtValue();
7044 Check(FmtA <= 4, "invalid value for matrix format", Call,
7045 Call.getArgOperand(0));
7046 Check(FmtB <= 4, "invalid value for matrix format", Call,
7047 Call.getArgOperand(2));
7048
7049 // AMDGPU::MatrixFMT values
7050 auto getFormatNumRegs = [](unsigned FormatVal) {
7051 switch (FormatVal) {
7052 case 0:
7053 case 1:
7054 return 16u;
7055 case 2:
7056 case 3:
7057 return 12u;
7058 case 4:
7059 return 8u;
7060 default:
7061 llvm_unreachable("invalid format value");
7062 }
7063 };
7064
7065 auto isValidSrcASrcBVector = [](FixedVectorType *Ty) {
7066 if (!Ty || !Ty->getElementType()->isIntegerTy(32))
7067 return false;
7068 unsigned NumElts = Ty->getNumElements();
7069 return NumElts == 16 || NumElts == 12 || NumElts == 8;
7070 };
7071
7072 auto *Src0Ty = dyn_cast<FixedVectorType>(Src0->getType());
7073 auto *Src1Ty = dyn_cast<FixedVectorType>(Src1->getType());
7074 Check(isValidSrcASrcBVector(Src0Ty),
7075 "operand 1 must be 8, 12 or 16 element i32 vector", &Call, Src0);
7076 Check(isValidSrcASrcBVector(Src1Ty),
7077 "operand 3 must be 8, 12 or 16 element i32 vector", &Call, Src1);
7078
7079 // Permit excess registers for the format.
7080 Check(Src0Ty->getNumElements() >= getFormatNumRegs(FmtA),
7081 "invalid vector type for format", &Call, Src0, Call.getArgOperand(0));
7082 Check(Src1Ty->getNumElements() >= getFormatNumRegs(FmtB),
7083 "invalid vector type for format", &Call, Src1, Call.getArgOperand(2));
7084 break;
7085 }
7086 case Intrinsic::amdgcn_cooperative_atomic_load_32x4B:
7087 case Intrinsic::amdgcn_cooperative_atomic_load_16x8B:
7088 case Intrinsic::amdgcn_cooperative_atomic_load_8x16B:
7089 case Intrinsic::amdgcn_cooperative_atomic_store_32x4B:
7090 case Intrinsic::amdgcn_cooperative_atomic_store_16x8B:
7091 case Intrinsic::amdgcn_cooperative_atomic_store_8x16B: {
7092 // Check we only use this intrinsic on the FLAT or GLOBAL address spaces.
7093 Value *PtrArg = Call.getArgOperand(0);
7094 const unsigned AS = PtrArg->getType()->getPointerAddressSpace();
7096 "cooperative atomic intrinsics require a generic or global pointer",
7097 &Call, PtrArg);
7098
7099 // Last argument must be a MD string
7101 MDNode *MD = cast<MDNode>(Op->getMetadata());
7102 Check((MD->getNumOperands() == 1) && isa<MDString>(MD->getOperand(0)),
7103 "cooperative atomic intrinsics require that the last argument is a "
7104 "metadata string",
7105 &Call, Op);
7106 break;
7107 }
7108 case Intrinsic::nvvm_setmaxnreg_inc_sync_aligned_u32:
7109 case Intrinsic::nvvm_setmaxnreg_dec_sync_aligned_u32: {
7110 Value *V = Call.getArgOperand(0);
7111 unsigned RegCount = cast<ConstantInt>(V)->getZExtValue();
7112 Check(RegCount % 8 == 0,
7113 "reg_count argument to nvvm.setmaxnreg must be in multiples of 8");
7114 break;
7115 }
7116 case Intrinsic::experimental_convergence_entry:
7117 case Intrinsic::experimental_convergence_anchor:
7118 break;
7119 case Intrinsic::experimental_convergence_loop:
7120 break;
7121 case Intrinsic::ptrmask: {
7122 Type *Ty0 = Call.getArgOperand(0)->getType();
7123 Type *Ty1 = Call.getArgOperand(1)->getType();
7125 "llvm.ptrmask intrinsic first argument must be pointer or vector "
7126 "of pointers",
7127 &Call);
7128 Check(
7129 Ty0->isVectorTy() == Ty1->isVectorTy(),
7130 "llvm.ptrmask intrinsic arguments must be both scalars or both vectors",
7131 &Call);
7132 if (Ty0->isVectorTy())
7133 Check(cast<VectorType>(Ty0)->getElementCount() ==
7134 cast<VectorType>(Ty1)->getElementCount(),
7135 "llvm.ptrmask intrinsic arguments must have the same number of "
7136 "elements",
7137 &Call);
7138 Check(DL.getIndexTypeSizeInBits(Ty0) == Ty1->getScalarSizeInBits(),
7139 "llvm.ptrmask intrinsic second argument bitwidth must match "
7140 "pointer index type size of first argument",
7141 &Call);
7142 break;
7143 }
7144 case Intrinsic::thread_pointer: {
7146 DL.getDefaultGlobalsAddressSpace(),
7147 "llvm.thread.pointer intrinsic return type must be for the globals "
7148 "address space",
7149 &Call);
7150 break;
7151 }
7152 case Intrinsic::threadlocal_address: {
7153 const Value &Arg0 = *Call.getArgOperand(0);
7154 Check(isa<GlobalValue>(Arg0),
7155 "llvm.threadlocal.address first argument must be a GlobalValue");
7156 Check(cast<GlobalValue>(Arg0).isThreadLocal(),
7157 "llvm.threadlocal.address operand isThreadLocal() must be true");
7158 break;
7159 }
7160 case Intrinsic::lifetime_start:
7161 case Intrinsic::lifetime_end: {
7162 Value *Ptr = Call.getArgOperand(0);
7164 "llvm.lifetime.start/end can only be used on alloca or poison",
7165 &Call);
7166 break;
7167 }
7168 };
7169
7170 // Verify that there aren't any unmediated control transfers between funclets.
7172 Function *F = Call.getParent()->getParent();
7173 if (F->hasPersonalityFn() &&
7174 isScopedEHPersonality(classifyEHPersonality(F->getPersonalityFn()))) {
7175 // Run EH funclet coloring on-demand and cache results for other intrinsic
7176 // calls in this function
7177 if (BlockEHFuncletColors.empty())
7178 BlockEHFuncletColors = colorEHFunclets(*F);
7179
7180 // Check for catch-/cleanup-pad in first funclet block
7181 bool InEHFunclet = false;
7182 BasicBlock *CallBB = Call.getParent();
7183 const ColorVector &CV = BlockEHFuncletColors.find(CallBB)->second;
7184 assert(CV.size() > 0 && "Uncolored block");
7185 for (BasicBlock *ColorFirstBB : CV)
7186 if (auto It = ColorFirstBB->getFirstNonPHIIt();
7187 It != ColorFirstBB->end())
7189 InEHFunclet = true;
7190
7191 // Check for funclet operand bundle
7192 bool HasToken = false;
7193 for (unsigned I = 0, E = Call.getNumOperandBundles(); I != E; ++I)
7195 HasToken = true;
7196
7197 // This would cause silent code truncation in WinEHPrepare
7198 if (InEHFunclet)
7199 Check(HasToken, "Missing funclet token on intrinsic call", &Call);
7200 }
7201 }
7202}
7203
7204/// Carefully grab the subprogram from a local scope.
7205///
7206/// This carefully grabs the subprogram from a local scope, avoiding the
7207/// built-in assertions that would typically fire.
7209 if (!LocalScope)
7210 return nullptr;
7211
7212 if (auto *SP = dyn_cast<DISubprogram>(LocalScope))
7213 return SP;
7214
7215 if (auto *LB = dyn_cast<DILexicalBlockBase>(LocalScope))
7216 return getSubprogram(LB->getRawScope());
7217
7218 // Just return null; broken scope chains are checked elsewhere.
7219 assert(!isa<DILocalScope>(LocalScope) && "Unknown type of local scope");
7220 return nullptr;
7221}
7222
7223void Verifier::visit(DbgLabelRecord &DLR) {
7225 "invalid #dbg_label intrinsic variable", &DLR, DLR.getRawLabel());
7226
7227 // Ignore broken !dbg attachments; they're checked elsewhere.
7228 if (MDNode *N = DLR.getDebugLoc().getAsMDNode())
7229 if (!isa<DILocation>(N))
7230 return;
7231
7232 BasicBlock *BB = DLR.getParent();
7233 Function *F = BB ? BB->getParent() : nullptr;
7234
7235 // The scopes for variables and !dbg attachments must agree.
7236 DILabel *Label = DLR.getLabel();
7237 DILocation *Loc = DLR.getDebugLoc();
7238 CheckDI(Loc, "#dbg_label record requires a !dbg attachment", &DLR, BB, F);
7239
7240 DISubprogram *LabelSP = getSubprogram(Label->getRawScope());
7241 DISubprogram *LocSP = getSubprogram(Loc->getRawScope());
7242 if (!LabelSP || !LocSP)
7243 return;
7244
7245 CheckDI(LabelSP == LocSP,
7246 "mismatched subprogram between #dbg_label label and !dbg attachment",
7247 &DLR, BB, F, Label, Label->getScope()->getSubprogram(), Loc,
7248 Loc->getScope()->getSubprogram());
7249}
7250
7251void Verifier::visit(DbgVariableRecord &DVR) {
7252 BasicBlock *BB = DVR.getParent();
7253 Function *F = BB->getParent();
7254
7255 CheckDI(DVR.getType() == DbgVariableRecord::LocationType::Value ||
7256 DVR.getType() == DbgVariableRecord::LocationType::Declare ||
7257 DVR.getType() == DbgVariableRecord::LocationType::DeclareValue ||
7258 DVR.getType() == DbgVariableRecord::LocationType::Assign,
7259 "invalid #dbg record type", &DVR, DVR.getType(), BB, F);
7260
7261 // The location for a DbgVariableRecord must be either a ValueAsMetadata,
7262 // DIArgList, or an empty MDNode (which is a legacy representation for an
7263 // "undef" location).
7264 auto *MD = DVR.getRawLocation();
7265 CheckDI(MD && (isa<ValueAsMetadata>(MD) || isa<DIArgList>(MD) ||
7266 (isa<MDNode>(MD) && !cast<MDNode>(MD)->getNumOperands())),
7267 "invalid #dbg record address/value", &DVR, MD, BB, F);
7268 if (auto *VAM = dyn_cast<ValueAsMetadata>(MD)) {
7269 visitValueAsMetadata(*VAM, F);
7270 if (DVR.isDbgDeclare()) {
7271 // Allow integers here to support inttoptr salvage.
7272 Type *Ty = VAM->getValue()->getType();
7273 CheckDI(Ty->isPointerTy() || Ty->isIntegerTy(),
7274 "location of #dbg_declare must be a pointer or int", &DVR, MD, BB,
7275 F);
7276 }
7277 } else if (auto *AL = dyn_cast<DIArgList>(MD)) {
7278 visitDIArgList(*AL, F);
7279 }
7280
7282 "invalid #dbg record variable", &DVR, DVR.getRawVariable(), BB, F);
7283 visitMDNode(*DVR.getRawVariable(), AreDebugLocsAllowed::No);
7284
7286 "invalid #dbg record expression", &DVR, DVR.getRawExpression(), BB,
7287 F);
7288 visitMDNode(*DVR.getExpression(), AreDebugLocsAllowed::No);
7289
7290 if (DVR.isDbgAssign()) {
7292 "invalid #dbg_assign DIAssignID", &DVR, DVR.getRawAssignID(), BB,
7293 F);
7294 visitMDNode(*cast<DIAssignID>(DVR.getRawAssignID()),
7295 AreDebugLocsAllowed::No);
7296
7297 const auto *RawAddr = DVR.getRawAddress();
7298 // Similarly to the location above, the address for an assign
7299 // DbgVariableRecord must be a ValueAsMetadata or an empty MDNode, which
7300 // represents an undef address.
7301 CheckDI(
7302 isa<ValueAsMetadata>(RawAddr) ||
7303 (isa<MDNode>(RawAddr) && !cast<MDNode>(RawAddr)->getNumOperands()),
7304 "invalid #dbg_assign address", &DVR, DVR.getRawAddress(), BB, F);
7305 if (auto *VAM = dyn_cast<ValueAsMetadata>(RawAddr))
7306 visitValueAsMetadata(*VAM, F);
7307
7309 "invalid #dbg_assign address expression", &DVR,
7310 DVR.getRawAddressExpression(), BB, F);
7311 visitMDNode(*DVR.getAddressExpression(), AreDebugLocsAllowed::No);
7312
7313 // All of the linked instructions should be in the same function as DVR.
7314 for (Instruction *I : at::getAssignmentInsts(&DVR))
7315 CheckDI(DVR.getFunction() == I->getFunction(),
7316 "inst not in same function as #dbg_assign", I, &DVR, BB, F);
7317 }
7318
7319 // This check is redundant with one in visitLocalVariable().
7320 DILocalVariable *Var = DVR.getVariable();
7321 CheckDI(isType(Var->getRawType()), "invalid type ref", Var, Var->getRawType(),
7322 BB, F);
7323
7324 auto *DLNode = DVR.getDebugLoc().getAsMDNode();
7325 CheckDI(isa_and_nonnull<DILocation>(DLNode), "invalid #dbg record DILocation",
7326 &DVR, DLNode, BB, F);
7327 DILocation *Loc = DVR.getDebugLoc();
7328
7329 // The scopes for variables and !dbg attachments must agree.
7330 DISubprogram *VarSP = getSubprogram(Var->getRawScope());
7331 DISubprogram *LocSP = getSubprogram(Loc->getRawScope());
7332 if (!VarSP || !LocSP)
7333 return; // Broken scope chains are checked elsewhere.
7334
7335 CheckDI(VarSP == LocSP,
7336 "mismatched subprogram between #dbg record variable and DILocation",
7337 &DVR, BB, F, Var, Var->getScope()->getSubprogram(), Loc,
7338 Loc->getScope()->getSubprogram(), BB, F);
7339
7340 verifyFnArgs(DVR);
7341}
7342
7343void Verifier::visitVPIntrinsic(VPIntrinsic &VPI) {
7344 if (auto *VPCast = dyn_cast<VPCastIntrinsic>(&VPI)) {
7345 auto *RetTy = cast<VectorType>(VPCast->getType());
7346 auto *ValTy = cast<VectorType>(VPCast->getOperand(0)->getType());
7347 Check(RetTy->getElementCount() == ValTy->getElementCount(),
7348 "VP cast intrinsic first argument and result vector lengths must be "
7349 "equal",
7350 *VPCast);
7351
7352 switch (VPCast->getIntrinsicID()) {
7353 default:
7354 llvm_unreachable("Unknown VP cast intrinsic");
7355 case Intrinsic::vp_trunc:
7356 Check(RetTy->isIntOrIntVectorTy() && ValTy->isIntOrIntVectorTy(),
7357 "llvm.vp.trunc intrinsic first argument and result element type "
7358 "must be integer",
7359 *VPCast);
7360 Check(RetTy->getScalarSizeInBits() < ValTy->getScalarSizeInBits(),
7361 "llvm.vp.trunc intrinsic the bit size of first argument must be "
7362 "larger than the bit size of the return type",
7363 *VPCast);
7364 break;
7365 case Intrinsic::vp_zext:
7366 case Intrinsic::vp_sext:
7367 Check(RetTy->isIntOrIntVectorTy() && ValTy->isIntOrIntVectorTy(),
7368 "llvm.vp.zext or llvm.vp.sext intrinsic first argument and result "
7369 "element type must be integer",
7370 *VPCast);
7371 Check(RetTy->getScalarSizeInBits() > ValTy->getScalarSizeInBits(),
7372 "llvm.vp.zext or llvm.vp.sext intrinsic the bit size of first "
7373 "argument must be smaller than the bit size of the return type",
7374 *VPCast);
7375 break;
7376 case Intrinsic::vp_fptoui:
7377 case Intrinsic::vp_fptosi:
7378 case Intrinsic::vp_lrint:
7379 case Intrinsic::vp_llrint:
7380 Check(
7381 RetTy->isIntOrIntVectorTy() && ValTy->isFPOrFPVectorTy(),
7382 "llvm.vp.fptoui, llvm.vp.fptosi, llvm.vp.lrint or llvm.vp.llrint" "intrinsic first argument element "
7383 "type must be floating-point and result element type must be integer",
7384 *VPCast);
7385 break;
7386 case Intrinsic::vp_uitofp:
7387 case Intrinsic::vp_sitofp:
7388 Check(
7389 RetTy->isFPOrFPVectorTy() && ValTy->isIntOrIntVectorTy(),
7390 "llvm.vp.uitofp or llvm.vp.sitofp intrinsic first argument element "
7391 "type must be integer and result element type must be floating-point",
7392 *VPCast);
7393 break;
7394 case Intrinsic::vp_fptrunc:
7395 Check(RetTy->isFPOrFPVectorTy() && ValTy->isFPOrFPVectorTy(),
7396 "llvm.vp.fptrunc intrinsic first argument and result element type "
7397 "must be floating-point",
7398 *VPCast);
7399 Check(RetTy->getScalarSizeInBits() < ValTy->getScalarSizeInBits(),
7400 "llvm.vp.fptrunc intrinsic the bit size of first argument must be "
7401 "larger than the bit size of the return type",
7402 *VPCast);
7403 break;
7404 case Intrinsic::vp_fpext:
7405 Check(RetTy->isFPOrFPVectorTy() && ValTy->isFPOrFPVectorTy(),
7406 "llvm.vp.fpext intrinsic first argument and result element type "
7407 "must be floating-point",
7408 *VPCast);
7409 Check(RetTy->getScalarSizeInBits() > ValTy->getScalarSizeInBits(),
7410 "llvm.vp.fpext intrinsic the bit size of first argument must be "
7411 "smaller than the bit size of the return type",
7412 *VPCast);
7413 break;
7414 case Intrinsic::vp_ptrtoint:
7415 Check(RetTy->isIntOrIntVectorTy() && ValTy->isPtrOrPtrVectorTy(),
7416 "llvm.vp.ptrtoint intrinsic first argument element type must be "
7417 "pointer and result element type must be integer",
7418 *VPCast);
7419 break;
7420 case Intrinsic::vp_inttoptr:
7421 Check(RetTy->isPtrOrPtrVectorTy() && ValTy->isIntOrIntVectorTy(),
7422 "llvm.vp.inttoptr intrinsic first argument element type must be "
7423 "integer and result element type must be pointer",
7424 *VPCast);
7425 break;
7426 }
7427 }
7428
7429 switch (VPI.getIntrinsicID()) {
7430 case Intrinsic::vp_fcmp: {
7431 auto Pred = cast<VPCmpIntrinsic>(&VPI)->getPredicate();
7433 "invalid predicate for VP FP comparison intrinsic", &VPI);
7434 break;
7435 }
7436 case Intrinsic::vp_icmp: {
7437 auto Pred = cast<VPCmpIntrinsic>(&VPI)->getPredicate();
7439 "invalid predicate for VP integer comparison intrinsic", &VPI);
7440 break;
7441 }
7442 case Intrinsic::vp_is_fpclass: {
7443 auto TestMask = cast<ConstantInt>(VPI.getOperand(1));
7444 Check((TestMask->getZExtValue() & ~static_cast<unsigned>(fcAllFlags)) == 0,
7445 "unsupported bits for llvm.vp.is.fpclass test mask");
7446 break;
7447 }
7448 case Intrinsic::experimental_vp_splice: {
7449 VectorType *VecTy = cast<VectorType>(VPI.getType());
7450 int64_t Idx = cast<ConstantInt>(VPI.getArgOperand(2))->getSExtValue();
7451 int64_t KnownMinNumElements = VecTy->getElementCount().getKnownMinValue();
7452 if (VPI.getParent() && VPI.getParent()->getParent()) {
7453 AttributeList Attrs = VPI.getParent()->getParent()->getAttributes();
7454 if (Attrs.hasFnAttr(Attribute::VScaleRange))
7455 KnownMinNumElements *= Attrs.getFnAttrs().getVScaleRangeMin();
7456 }
7457 Check((Idx < 0 && std::abs(Idx) <= KnownMinNumElements) ||
7458 (Idx >= 0 && Idx < KnownMinNumElements),
7459 "The splice index exceeds the range [-VL, VL-1] where VL is the "
7460 "known minimum number of elements in the vector. For scalable "
7461 "vectors the minimum number of elements is determined from "
7462 "vscale_range.",
7463 &VPI);
7464 break;
7465 }
7466 }
7467}
7468
7469void Verifier::visitConstrainedFPIntrinsic(ConstrainedFPIntrinsic &FPI) {
7470 unsigned NumOperands = FPI.getNonMetadataArgCount();
7471 bool HasRoundingMD =
7473
7474 // Add the expected number of metadata operands.
7475 NumOperands += (1 + HasRoundingMD);
7476
7477 // Compare intrinsics carry an extra predicate metadata operand.
7479 NumOperands += 1;
7480 Check((FPI.arg_size() == NumOperands),
7481 "invalid arguments for constrained FP intrinsic", &FPI);
7482
7483 switch (FPI.getIntrinsicID()) {
7484 case Intrinsic::experimental_constrained_lrint:
7485 case Intrinsic::experimental_constrained_llrint: {
7486 Type *ValTy = FPI.getArgOperand(0)->getType();
7487 Type *ResultTy = FPI.getType();
7488 Check(!ValTy->isVectorTy() && !ResultTy->isVectorTy(),
7489 "Intrinsic does not support vectors", &FPI);
7490 break;
7491 }
7492
7493 case Intrinsic::experimental_constrained_lround:
7494 case Intrinsic::experimental_constrained_llround: {
7495 Type *ValTy = FPI.getArgOperand(0)->getType();
7496 Type *ResultTy = FPI.getType();
7497 Check(!ValTy->isVectorTy() && !ResultTy->isVectorTy(),
7498 "Intrinsic does not support vectors", &FPI);
7499 break;
7500 }
7501
7502 case Intrinsic::experimental_constrained_fcmp:
7503 case Intrinsic::experimental_constrained_fcmps: {
7504 auto Pred = cast<ConstrainedFPCmpIntrinsic>(&FPI)->getPredicate();
7506 "invalid predicate for constrained FP comparison intrinsic", &FPI);
7507 break;
7508 }
7509
7510 case Intrinsic::experimental_constrained_fptosi:
7511 case Intrinsic::experimental_constrained_fptoui: {
7512 Value *Operand = FPI.getArgOperand(0);
7513 ElementCount SrcEC;
7514 Check(Operand->getType()->isFPOrFPVectorTy(),
7515 "Intrinsic first argument must be floating point", &FPI);
7516 if (auto *OperandT = dyn_cast<VectorType>(Operand->getType())) {
7517 SrcEC = cast<VectorType>(OperandT)->getElementCount();
7518 }
7519
7520 Operand = &FPI;
7521 Check(SrcEC.isNonZero() == Operand->getType()->isVectorTy(),
7522 "Intrinsic first argument and result disagree on vector use", &FPI);
7523 Check(Operand->getType()->isIntOrIntVectorTy(),
7524 "Intrinsic result must be an integer", &FPI);
7525 if (auto *OperandT = dyn_cast<VectorType>(Operand->getType())) {
7526 Check(SrcEC == cast<VectorType>(OperandT)->getElementCount(),
7527 "Intrinsic first argument and result vector lengths must be equal",
7528 &FPI);
7529 }
7530 break;
7531 }
7532
7533 case Intrinsic::experimental_constrained_sitofp:
7534 case Intrinsic::experimental_constrained_uitofp: {
7535 Value *Operand = FPI.getArgOperand(0);
7536 ElementCount SrcEC;
7537 Check(Operand->getType()->isIntOrIntVectorTy(),
7538 "Intrinsic first argument must be integer", &FPI);
7539 if (auto *OperandT = dyn_cast<VectorType>(Operand->getType())) {
7540 SrcEC = cast<VectorType>(OperandT)->getElementCount();
7541 }
7542
7543 Operand = &FPI;
7544 Check(SrcEC.isNonZero() == Operand->getType()->isVectorTy(),
7545 "Intrinsic first argument and result disagree on vector use", &FPI);
7546 Check(Operand->getType()->isFPOrFPVectorTy(),
7547 "Intrinsic result must be a floating point", &FPI);
7548 if (auto *OperandT = dyn_cast<VectorType>(Operand->getType())) {
7549 Check(SrcEC == cast<VectorType>(OperandT)->getElementCount(),
7550 "Intrinsic first argument and result vector lengths must be equal",
7551 &FPI);
7552 }
7553 break;
7554 }
7555
7556 case Intrinsic::experimental_constrained_fptrunc:
7557 case Intrinsic::experimental_constrained_fpext: {
7558 Value *Operand = FPI.getArgOperand(0);
7559 Type *OperandTy = Operand->getType();
7560 Value *Result = &FPI;
7561 Type *ResultTy = Result->getType();
7562 Check(OperandTy->isFPOrFPVectorTy(),
7563 "Intrinsic first argument must be FP or FP vector", &FPI);
7564 Check(ResultTy->isFPOrFPVectorTy(),
7565 "Intrinsic result must be FP or FP vector", &FPI);
7566 Check(OperandTy->isVectorTy() == ResultTy->isVectorTy(),
7567 "Intrinsic first argument and result disagree on vector use", &FPI);
7568 if (OperandTy->isVectorTy()) {
7569 Check(cast<VectorType>(OperandTy)->getElementCount() ==
7570 cast<VectorType>(ResultTy)->getElementCount(),
7571 "Intrinsic first argument and result vector lengths must be equal",
7572 &FPI);
7573 }
7574 if (FPI.getIntrinsicID() == Intrinsic::experimental_constrained_fptrunc) {
7575 Check(OperandTy->getScalarSizeInBits() > ResultTy->getScalarSizeInBits(),
7576 "Intrinsic first argument's type must be larger than result type",
7577 &FPI);
7578 } else {
7579 Check(OperandTy->getScalarSizeInBits() < ResultTy->getScalarSizeInBits(),
7580 "Intrinsic first argument's type must be smaller than result type",
7581 &FPI);
7582 }
7583 break;
7584 }
7585
7586 default:
7587 break;
7588 }
7589
7590 // If a non-metadata argument is passed in a metadata slot then the
7591 // error will be caught earlier when the incorrect argument doesn't
7592 // match the specification in the intrinsic call table. Thus, no
7593 // argument type check is needed here.
7594
7595 Check(FPI.getExceptionBehavior().has_value(),
7596 "invalid exception behavior argument", &FPI);
7597 if (HasRoundingMD) {
7598 Check(FPI.getRoundingMode().has_value(), "invalid rounding mode argument",
7599 &FPI);
7600 }
7601}
7602
7603void Verifier::verifyFragmentExpression(const DbgVariableRecord &DVR) {
7604 DILocalVariable *V = dyn_cast_or_null<DILocalVariable>(DVR.getRawVariable());
7605 DIExpression *E = dyn_cast_or_null<DIExpression>(DVR.getRawExpression());
7606
7607 // We don't know whether this intrinsic verified correctly.
7608 if (!V || !E || !E->isValid())
7609 return;
7610
7611 // Nothing to do if this isn't a DW_OP_LLVM_fragment expression.
7612 auto Fragment = E->getFragmentInfo();
7613 if (!Fragment)
7614 return;
7615
7616 // The frontend helps out GDB by emitting the members of local anonymous
7617 // unions as artificial local variables with shared storage. When SROA splits
7618 // the storage for artificial local variables that are smaller than the entire
7619 // union, the overhang piece will be outside of the allotted space for the
7620 // variable and this check fails.
7621 // FIXME: Remove this check as soon as clang stops doing this; it hides bugs.
7622 if (V->isArtificial())
7623 return;
7624
7625 verifyFragmentExpression(*V, *Fragment, &DVR);
7626}
7627
7628template <typename ValueOrMetadata>
7629void Verifier::verifyFragmentExpression(const DIVariable &V,
7631 ValueOrMetadata *Desc) {
7632 // If there's no size, the type is broken, but that should be checked
7633 // elsewhere.
7634 auto VarSize = V.getSizeInBits();
7635 if (!VarSize)
7636 return;
7637
7638 unsigned FragSize = Fragment.SizeInBits;
7639 unsigned FragOffset = Fragment.OffsetInBits;
7640 CheckDI(FragSize + FragOffset <= *VarSize,
7641 "fragment is larger than or outside of variable", Desc, &V);
7642 CheckDI(FragSize != *VarSize, "fragment covers entire variable", Desc, &V);
7643}
7644
7645void Verifier::verifyFnArgs(const DbgVariableRecord &DVR) {
7646 // This function does not take the scope of noninlined function arguments into
7647 // account. Don't run it if current function is nodebug, because it may
7648 // contain inlined debug intrinsics.
7649 if (!HasDebugInfo)
7650 return;
7651
7652 // For performance reasons only check non-inlined ones.
7653 if (DVR.getDebugLoc()->getInlinedAt())
7654 return;
7655
7656 DILocalVariable *Var = DVR.getVariable();
7657 CheckDI(Var, "#dbg record without variable");
7658
7659 unsigned ArgNo = Var->getArg();
7660 if (!ArgNo)
7661 return;
7662
7663 // Verify there are no duplicate function argument debug info entries.
7664 // These will cause hard-to-debug assertions in the DWARF backend.
7665 if (DebugFnArgs.size() < ArgNo)
7666 DebugFnArgs.resize(ArgNo, nullptr);
7667
7668 auto *Prev = DebugFnArgs[ArgNo - 1];
7669 DebugFnArgs[ArgNo - 1] = Var;
7670 CheckDI(!Prev || (Prev == Var), "conflicting debug info for argument", &DVR,
7671 Prev, Var);
7672}
7673
7674void Verifier::verifyNotEntryValue(const DbgVariableRecord &DVR) {
7675 DIExpression *E = dyn_cast_or_null<DIExpression>(DVR.getRawExpression());
7676
7677 // We don't know whether this intrinsic verified correctly.
7678 if (!E || !E->isValid())
7679 return;
7680
7682 Value *VarValue = DVR.getVariableLocationOp(0);
7683 if (isa<UndefValue>(VarValue) || isa<PoisonValue>(VarValue))
7684 return;
7685 // We allow EntryValues for swift async arguments, as they have an
7686 // ABI-guarantee to be turned into a specific register.
7687 if (auto *ArgLoc = dyn_cast_or_null<Argument>(VarValue);
7688 ArgLoc && ArgLoc->hasAttribute(Attribute::SwiftAsync))
7689 return;
7690 }
7691
7692 CheckDI(!E->isEntryValue(),
7693 "Entry values are only allowed in MIR unless they target a "
7694 "swiftasync Argument",
7695 &DVR);
7696}
7697
7698void Verifier::verifyCompileUnits() {
7699 // When more than one Module is imported into the same context, such as during
7700 // an LTO build before linking the modules, ODR type uniquing may cause types
7701 // to point to a different CU. This check does not make sense in this case.
7702 if (M.getContext().isODRUniquingDebugTypes())
7703 return;
7704 auto *CUs = M.getNamedMetadata("llvm.dbg.cu");
7705 SmallPtrSet<const Metadata *, 2> Listed;
7706 if (CUs)
7707 Listed.insert_range(CUs->operands());
7708 for (const auto *CU : CUVisited)
7709 CheckDI(Listed.count(CU), "DICompileUnit not listed in llvm.dbg.cu", CU);
7710 CUVisited.clear();
7711}
7712
7713void Verifier::verifyDeoptimizeCallingConvs() {
7714 if (DeoptimizeDeclarations.empty())
7715 return;
7716
7717 const Function *First = DeoptimizeDeclarations[0];
7718 for (const auto *F : ArrayRef(DeoptimizeDeclarations).slice(1)) {
7719 Check(First->getCallingConv() == F->getCallingConv(),
7720 "All llvm.experimental.deoptimize declarations must have the same "
7721 "calling convention",
7722 First, F);
7723 }
7724}
7725
7726void Verifier::verifyAttachedCallBundle(const CallBase &Call,
7727 const OperandBundleUse &BU) {
7728 FunctionType *FTy = Call.getFunctionType();
7729
7730 Check((FTy->getReturnType()->isPointerTy() ||
7731 (Call.doesNotReturn() && FTy->getReturnType()->isVoidTy())),
7732 "a call with operand bundle \"clang.arc.attachedcall\" must call a "
7733 "function returning a pointer or a non-returning function that has a "
7734 "void return type",
7735 Call);
7736
7737 Check(BU.Inputs.size() == 1 && isa<Function>(BU.Inputs.front()),
7738 "operand bundle \"clang.arc.attachedcall\" requires one function as "
7739 "an argument",
7740 Call);
7741
7742 auto *Fn = cast<Function>(BU.Inputs.front());
7743 Intrinsic::ID IID = Fn->getIntrinsicID();
7744
7745 if (IID) {
7746 Check((IID == Intrinsic::objc_retainAutoreleasedReturnValue ||
7747 IID == Intrinsic::objc_claimAutoreleasedReturnValue ||
7748 IID == Intrinsic::objc_unsafeClaimAutoreleasedReturnValue),
7749 "invalid function argument", Call);
7750 } else {
7751 StringRef FnName = Fn->getName();
7752 Check((FnName == "objc_retainAutoreleasedReturnValue" ||
7753 FnName == "objc_claimAutoreleasedReturnValue" ||
7754 FnName == "objc_unsafeClaimAutoreleasedReturnValue"),
7755 "invalid function argument", Call);
7756 }
7757}
7758
7759void Verifier::verifyNoAliasScopeDecl() {
7760 if (NoAliasScopeDecls.empty())
7761 return;
7762
7763 // only a single scope must be declared at a time.
7764 for (auto *II : NoAliasScopeDecls) {
7765 assert(II->getIntrinsicID() == Intrinsic::experimental_noalias_scope_decl &&
7766 "Not a llvm.experimental.noalias.scope.decl ?");
7767 const auto *ScopeListMV = dyn_cast<MetadataAsValue>(
7769 Check(ScopeListMV != nullptr,
7770 "llvm.experimental.noalias.scope.decl must have a MetadataAsValue "
7771 "argument",
7772 II);
7773
7774 const auto *ScopeListMD = dyn_cast<MDNode>(ScopeListMV->getMetadata());
7775 Check(ScopeListMD != nullptr, "!id.scope.list must point to an MDNode", II);
7776 Check(ScopeListMD->getNumOperands() == 1,
7777 "!id.scope.list must point to a list with a single scope", II);
7778 visitAliasScopeListMetadata(ScopeListMD);
7779 }
7780
7781 // Only check the domination rule when requested. Once all passes have been
7782 // adapted this option can go away.
7784 return;
7785
7786 // Now sort the intrinsics based on the scope MDNode so that declarations of
7787 // the same scopes are next to each other.
7788 auto GetScope = [](IntrinsicInst *II) {
7789 const auto *ScopeListMV = cast<MetadataAsValue>(
7791 return &cast<MDNode>(ScopeListMV->getMetadata())->getOperand(0);
7792 };
7793
7794 // We are sorting on MDNode pointers here. For valid input IR this is ok.
7795 // TODO: Sort on Metadata ID to avoid non-deterministic error messages.
7796 auto Compare = [GetScope](IntrinsicInst *Lhs, IntrinsicInst *Rhs) {
7797 return GetScope(Lhs) < GetScope(Rhs);
7798 };
7799
7800 llvm::sort(NoAliasScopeDecls, Compare);
7801
7802 // Go over the intrinsics and check that for the same scope, they are not
7803 // dominating each other.
7804 auto ItCurrent = NoAliasScopeDecls.begin();
7805 while (ItCurrent != NoAliasScopeDecls.end()) {
7806 auto CurScope = GetScope(*ItCurrent);
7807 auto ItNext = ItCurrent;
7808 do {
7809 ++ItNext;
7810 } while (ItNext != NoAliasScopeDecls.end() &&
7811 GetScope(*ItNext) == CurScope);
7812
7813 // [ItCurrent, ItNext) represents the declarations for the same scope.
7814 // Ensure they are not dominating each other.. but only if it is not too
7815 // expensive.
7816 if (ItNext - ItCurrent < 32)
7817 for (auto *I : llvm::make_range(ItCurrent, ItNext))
7818 for (auto *J : llvm::make_range(ItCurrent, ItNext))
7819 if (I != J)
7820 Check(!DT.dominates(I, J),
7821 "llvm.experimental.noalias.scope.decl dominates another one "
7822 "with the same scope",
7823 I);
7824 ItCurrent = ItNext;
7825 }
7826}
7827
7828//===----------------------------------------------------------------------===//
7829// Implement the public interfaces to this file...
7830//===----------------------------------------------------------------------===//
7831
7833 Function &F = const_cast<Function &>(f);
7834
7835 // Don't use a raw_null_ostream. Printing IR is expensive.
7836 Verifier V(OS, /*ShouldTreatBrokenDebugInfoAsError=*/true, *f.getParent());
7837
7838 // Note that this function's return value is inverted from what you would
7839 // expect of a function called "verify".
7840 return !V.verify(F);
7841}
7842
7844 bool *BrokenDebugInfo) {
7845 // Don't use a raw_null_ostream. Printing IR is expensive.
7846 Verifier V(OS, /*ShouldTreatBrokenDebugInfoAsError=*/!BrokenDebugInfo, M);
7847
7848 bool Broken = false;
7849 for (const Function &F : M)
7850 Broken |= !V.verify(F);
7851
7852 Broken |= !V.verify();
7853 if (BrokenDebugInfo)
7854 *BrokenDebugInfo = V.hasBrokenDebugInfo();
7855 // Note that this function's return value is inverted from what you would
7856 // expect of a function called "verify".
7857 return Broken;
7858}
7859
7860namespace {
7861
7862struct VerifierLegacyPass : public FunctionPass {
7863 static char ID;
7864
7865 std::unique_ptr<Verifier> V;
7866 bool FatalErrors = true;
7867
7868 VerifierLegacyPass() : FunctionPass(ID) {
7870 }
7871 explicit VerifierLegacyPass(bool FatalErrors)
7872 : FunctionPass(ID),
7873 FatalErrors(FatalErrors) {
7875 }
7876
7877 bool doInitialization(Module &M) override {
7878 V = std::make_unique<Verifier>(
7879 &dbgs(), /*ShouldTreatBrokenDebugInfoAsError=*/false, M);
7880 return false;
7881 }
7882
7883 bool runOnFunction(Function &F) override {
7884 if (!V->verify(F) && FatalErrors) {
7885 errs() << "in function " << F.getName() << '\n';
7886 report_fatal_error("Broken function found, compilation aborted!");
7887 }
7888 return false;
7889 }
7890
7891 bool doFinalization(Module &M) override {
7892 bool HasErrors = false;
7893 for (Function &F : M)
7894 if (F.isDeclaration())
7895 HasErrors |= !V->verify(F);
7896
7897 HasErrors |= !V->verify();
7898 if (FatalErrors && (HasErrors || V->hasBrokenDebugInfo()))
7899 report_fatal_error("Broken module found, compilation aborted!");
7900 return false;
7901 }
7902
7903 void getAnalysisUsage(AnalysisUsage &AU) const override {
7904 AU.setPreservesAll();
7905 }
7906};
7907
7908} // end anonymous namespace
7909
7910/// Helper to issue failure from the TBAA verification
7911template <typename... Tys> void TBAAVerifier::CheckFailed(Tys &&... Args) {
7912 if (Diagnostic)
7913 return Diagnostic->CheckFailed(Args...);
7914}
7915
7916#define CheckTBAA(C, ...) \
7917 do { \
7918 if (!(C)) { \
7919 CheckFailed(__VA_ARGS__); \
7920 return false; \
7921 } \
7922 } while (false)
7923
7924/// Verify that \p BaseNode can be used as the "base type" in the struct-path
7925/// TBAA scheme. This means \p BaseNode is either a scalar node, or a
7926/// struct-type node describing an aggregate data structure (like a struct).
7927TBAAVerifier::TBAABaseNodeSummary
7928TBAAVerifier::verifyTBAABaseNode(const Instruction *I, const MDNode *BaseNode,
7929 bool IsNewFormat) {
7930 if (BaseNode->getNumOperands() < 2) {
7931 CheckFailed("Base nodes must have at least two operands", I, BaseNode);
7932 return {true, ~0u};
7933 }
7934
7935 auto Itr = TBAABaseNodes.find(BaseNode);
7936 if (Itr != TBAABaseNodes.end())
7937 return Itr->second;
7938
7939 auto Result = verifyTBAABaseNodeImpl(I, BaseNode, IsNewFormat);
7940 auto InsertResult = TBAABaseNodes.insert({BaseNode, Result});
7941 (void)InsertResult;
7942 assert(InsertResult.second && "We just checked!");
7943 return Result;
7944}
7945
7946TBAAVerifier::TBAABaseNodeSummary
7947TBAAVerifier::verifyTBAABaseNodeImpl(const Instruction *I,
7948 const MDNode *BaseNode, bool IsNewFormat) {
7949 const TBAAVerifier::TBAABaseNodeSummary InvalidNode = {true, ~0u};
7950
7951 if (BaseNode->getNumOperands() == 2) {
7952 // Scalar nodes can only be accessed at offset 0.
7953 return isValidScalarTBAANode(BaseNode)
7954 ? TBAAVerifier::TBAABaseNodeSummary({false, 0})
7955 : InvalidNode;
7956 }
7957
7958 if (IsNewFormat) {
7959 if (BaseNode->getNumOperands() % 3 != 0) {
7960 CheckFailed("Access tag nodes must have the number of operands that is a "
7961 "multiple of 3!", BaseNode);
7962 return InvalidNode;
7963 }
7964 } else {
7965 if (BaseNode->getNumOperands() % 2 != 1) {
7966 CheckFailed("Struct tag nodes must have an odd number of operands!",
7967 BaseNode);
7968 return InvalidNode;
7969 }
7970 }
7971
7972 // Check the type size field.
7973 if (IsNewFormat) {
7974 auto *TypeSizeNode = mdconst::dyn_extract_or_null<ConstantInt>(
7975 BaseNode->getOperand(1));
7976 if (!TypeSizeNode) {
7977 CheckFailed("Type size nodes must be constants!", I, BaseNode);
7978 return InvalidNode;
7979 }
7980 }
7981
7982 // Check the type name field. In the new format it can be anything.
7983 if (!IsNewFormat && !isa<MDString>(BaseNode->getOperand(0))) {
7984 CheckFailed("Struct tag nodes have a string as their first operand",
7985 BaseNode);
7986 return InvalidNode;
7987 }
7988
7989 bool Failed = false;
7990
7991 std::optional<APInt> PrevOffset;
7992 unsigned BitWidth = ~0u;
7993
7994 // We've already checked that BaseNode is not a degenerate root node with one
7995 // operand in \c verifyTBAABaseNode, so this loop should run at least once.
7996 unsigned FirstFieldOpNo = IsNewFormat ? 3 : 1;
7997 unsigned NumOpsPerField = IsNewFormat ? 3 : 2;
7998 for (unsigned Idx = FirstFieldOpNo; Idx < BaseNode->getNumOperands();
7999 Idx += NumOpsPerField) {
8000 const MDOperand &FieldTy = BaseNode->getOperand(Idx);
8001 const MDOperand &FieldOffset = BaseNode->getOperand(Idx + 1);
8002 if (!isa<MDNode>(FieldTy)) {
8003 CheckFailed("Incorrect field entry in struct type node!", I, BaseNode);
8004 Failed = true;
8005 continue;
8006 }
8007
8008 auto *OffsetEntryCI =
8010 if (!OffsetEntryCI) {
8011 CheckFailed("Offset entries must be constants!", I, BaseNode);
8012 Failed = true;
8013 continue;
8014 }
8015
8016 if (BitWidth == ~0u)
8017 BitWidth = OffsetEntryCI->getBitWidth();
8018
8019 if (OffsetEntryCI->getBitWidth() != BitWidth) {
8020 CheckFailed(
8021 "Bitwidth between the offsets and struct type entries must match", I,
8022 BaseNode);
8023 Failed = true;
8024 continue;
8025 }
8026
8027 // NB! As far as I can tell, we generate a non-strictly increasing offset
8028 // sequence only from structs that have zero size bit fields. When
8029 // recursing into a contained struct in \c getFieldNodeFromTBAABaseNode we
8030 // pick the field lexically the latest in struct type metadata node. This
8031 // mirrors the actual behavior of the alias analysis implementation.
8032 bool IsAscending =
8033 !PrevOffset || PrevOffset->ule(OffsetEntryCI->getValue());
8034
8035 if (!IsAscending) {
8036 CheckFailed("Offsets must be increasing!", I, BaseNode);
8037 Failed = true;
8038 }
8039
8040 PrevOffset = OffsetEntryCI->getValue();
8041
8042 if (IsNewFormat) {
8043 auto *MemberSizeNode = mdconst::dyn_extract_or_null<ConstantInt>(
8044 BaseNode->getOperand(Idx + 2));
8045 if (!MemberSizeNode) {
8046 CheckFailed("Member size entries must be constants!", I, BaseNode);
8047 Failed = true;
8048 continue;
8049 }
8050 }
8051 }
8052
8053 return Failed ? InvalidNode
8054 : TBAAVerifier::TBAABaseNodeSummary(false, BitWidth);
8055}
8056
8057static bool IsRootTBAANode(const MDNode *MD) {
8058 return MD->getNumOperands() < 2;
8059}
8060
8061static bool IsScalarTBAANodeImpl(const MDNode *MD,
8063 if (MD->getNumOperands() != 2 && MD->getNumOperands() != 3)
8064 return false;
8065
8066 if (!isa<MDString>(MD->getOperand(0)))
8067 return false;
8068
8069 if (MD->getNumOperands() == 3) {
8071 if (!(Offset && Offset->isZero() && isa<MDString>(MD->getOperand(0))))
8072 return false;
8073 }
8074
8075 auto *Parent = dyn_cast_or_null<MDNode>(MD->getOperand(1));
8076 return Parent && Visited.insert(Parent).second &&
8077 (IsRootTBAANode(Parent) || IsScalarTBAANodeImpl(Parent, Visited));
8078}
8079
8080bool TBAAVerifier::isValidScalarTBAANode(const MDNode *MD) {
8081 auto ResultIt = TBAAScalarNodes.find(MD);
8082 if (ResultIt != TBAAScalarNodes.end())
8083 return ResultIt->second;
8084
8085 SmallPtrSet<const MDNode *, 4> Visited;
8086 bool Result = IsScalarTBAANodeImpl(MD, Visited);
8087 auto InsertResult = TBAAScalarNodes.insert({MD, Result});
8088 (void)InsertResult;
8089 assert(InsertResult.second && "Just checked!");
8090
8091 return Result;
8092}
8093
8094/// Returns the field node at the offset \p Offset in \p BaseNode. Update \p
8095/// Offset in place to be the offset within the field node returned.
8096///
8097/// We assume we've okayed \p BaseNode via \c verifyTBAABaseNode.
8098MDNode *TBAAVerifier::getFieldNodeFromTBAABaseNode(const Instruction *I,
8099 const MDNode *BaseNode,
8100 APInt &Offset,
8101 bool IsNewFormat) {
8102 assert(BaseNode->getNumOperands() >= 2 && "Invalid base node!");
8103
8104 // Scalar nodes have only one possible "field" -- their parent in the access
8105 // hierarchy. Offset must be zero at this point, but our caller is supposed
8106 // to check that.
8107 if (BaseNode->getNumOperands() == 2)
8108 return cast<MDNode>(BaseNode->getOperand(1));
8109
8110 unsigned FirstFieldOpNo = IsNewFormat ? 3 : 1;
8111 unsigned NumOpsPerField = IsNewFormat ? 3 : 2;
8112 for (unsigned Idx = FirstFieldOpNo; Idx < BaseNode->getNumOperands();
8113 Idx += NumOpsPerField) {
8114 auto *OffsetEntryCI =
8115 mdconst::extract<ConstantInt>(BaseNode->getOperand(Idx + 1));
8116 if (OffsetEntryCI->getValue().ugt(Offset)) {
8117 if (Idx == FirstFieldOpNo) {
8118 CheckFailed("Could not find TBAA parent in struct type node", I,
8119 BaseNode, &Offset);
8120 return nullptr;
8121 }
8122
8123 unsigned PrevIdx = Idx - NumOpsPerField;
8124 auto *PrevOffsetEntryCI =
8125 mdconst::extract<ConstantInt>(BaseNode->getOperand(PrevIdx + 1));
8126 Offset -= PrevOffsetEntryCI->getValue();
8127 return cast<MDNode>(BaseNode->getOperand(PrevIdx));
8128 }
8129 }
8130
8131 unsigned LastIdx = BaseNode->getNumOperands() - NumOpsPerField;
8132 auto *LastOffsetEntryCI = mdconst::extract<ConstantInt>(
8133 BaseNode->getOperand(LastIdx + 1));
8134 Offset -= LastOffsetEntryCI->getValue();
8135 return cast<MDNode>(BaseNode->getOperand(LastIdx));
8136}
8137
8139 if (!Type || Type->getNumOperands() < 3)
8140 return false;
8141
8142 // In the new format type nodes shall have a reference to the parent type as
8143 // its first operand.
8144 return isa_and_nonnull<MDNode>(Type->getOperand(0));
8145}
8146
8148 CheckTBAA(MD->getNumOperands() > 0, "TBAA metadata cannot have 0 operands", I,
8149 MD);
8150
8151 if (I)
8155 "This instruction shall not have a TBAA access tag!", I);
8156
8157 bool IsStructPathTBAA =
8158 isa<MDNode>(MD->getOperand(0)) && MD->getNumOperands() >= 3;
8159
8160 CheckTBAA(IsStructPathTBAA,
8161 "Old-style TBAA is no longer allowed, use struct-path TBAA instead",
8162 I);
8163
8164 MDNode *BaseNode = dyn_cast_or_null<MDNode>(MD->getOperand(0));
8165 MDNode *AccessType = dyn_cast_or_null<MDNode>(MD->getOperand(1));
8166
8167 bool IsNewFormat = isNewFormatTBAATypeNode(AccessType);
8168
8169 if (IsNewFormat) {
8170 CheckTBAA(MD->getNumOperands() == 4 || MD->getNumOperands() == 5,
8171 "Access tag metadata must have either 4 or 5 operands", I, MD);
8172 } else {
8173 CheckTBAA(MD->getNumOperands() < 5,
8174 "Struct tag metadata must have either 3 or 4 operands", I, MD);
8175 }
8176
8177 // Check the access size field.
8178 if (IsNewFormat) {
8179 auto *AccessSizeNode = mdconst::dyn_extract_or_null<ConstantInt>(
8180 MD->getOperand(3));
8181 CheckTBAA(AccessSizeNode, "Access size field must be a constant", I, MD);
8182 }
8183
8184 // Check the immutability flag.
8185 unsigned ImmutabilityFlagOpNo = IsNewFormat ? 4 : 3;
8186 if (MD->getNumOperands() == ImmutabilityFlagOpNo + 1) {
8187 auto *IsImmutableCI = mdconst::dyn_extract_or_null<ConstantInt>(
8188 MD->getOperand(ImmutabilityFlagOpNo));
8189 CheckTBAA(IsImmutableCI,
8190 "Immutability tag on struct tag metadata must be a constant", I,
8191 MD);
8192 CheckTBAA(
8193 IsImmutableCI->isZero() || IsImmutableCI->isOne(),
8194 "Immutability part of the struct tag metadata must be either 0 or 1", I,
8195 MD);
8196 }
8197
8198 CheckTBAA(BaseNode && AccessType,
8199 "Malformed struct tag metadata: base and access-type "
8200 "should be non-null and point to Metadata nodes",
8201 I, MD, BaseNode, AccessType);
8202
8203 if (!IsNewFormat) {
8204 CheckTBAA(isValidScalarTBAANode(AccessType),
8205 "Access type node must be a valid scalar type", I, MD,
8206 AccessType);
8207 }
8208
8210 CheckTBAA(OffsetCI, "Offset must be constant integer", I, MD);
8211
8212 APInt Offset = OffsetCI->getValue();
8213 bool SeenAccessTypeInPath = false;
8214
8215 SmallPtrSet<MDNode *, 4> StructPath;
8216
8217 for (/* empty */; BaseNode && !IsRootTBAANode(BaseNode);
8218 BaseNode =
8219 getFieldNodeFromTBAABaseNode(I, BaseNode, Offset, IsNewFormat)) {
8220 if (!StructPath.insert(BaseNode).second) {
8221 CheckFailed("Cycle detected in struct path", I, MD);
8222 return false;
8223 }
8224
8225 bool Invalid;
8226 unsigned BaseNodeBitWidth;
8227 std::tie(Invalid, BaseNodeBitWidth) =
8228 verifyTBAABaseNode(I, BaseNode, IsNewFormat);
8229
8230 // If the base node is invalid in itself, then we've already printed all the
8231 // errors we wanted to print.
8232 if (Invalid)
8233 return false;
8234
8235 SeenAccessTypeInPath |= BaseNode == AccessType;
8236
8237 if (isValidScalarTBAANode(BaseNode) || BaseNode == AccessType)
8238 CheckTBAA(Offset == 0, "Offset not zero at the point of scalar access", I,
8239 MD, &Offset);
8240
8241 CheckTBAA(BaseNodeBitWidth == Offset.getBitWidth() ||
8242 (BaseNodeBitWidth == 0 && Offset == 0) ||
8243 (IsNewFormat && BaseNodeBitWidth == ~0u),
8244 "Access bit-width not the same as description bit-width", I, MD,
8245 BaseNodeBitWidth, Offset.getBitWidth());
8246
8247 if (IsNewFormat && SeenAccessTypeInPath)
8248 break;
8249 }
8250
8251 CheckTBAA(SeenAccessTypeInPath, "Did not see access type in access path!", I,
8252 MD);
8253 return true;
8254}
8255
8256char VerifierLegacyPass::ID = 0;
8257INITIALIZE_PASS(VerifierLegacyPass, "verify", "Module Verifier", false, false)
8258
8260 return new VerifierLegacyPass(FatalErrors);
8261}
8262
8263AnalysisKey VerifierAnalysis::Key;
8270
8275
8277 auto Res = AM.getResult<VerifierAnalysis>(M);
8278 if (FatalErrors && (Res.IRBroken || Res.DebugInfoBroken))
8279 report_fatal_error("Broken module found, compilation aborted!");
8280
8281 return PreservedAnalyses::all();
8282}
8283
8285 auto res = AM.getResult<VerifierAnalysis>(F);
8286 if (res.IRBroken && FatalErrors)
8287 report_fatal_error("Broken function found, compilation aborted!");
8288
8289 return PreservedAnalyses::all();
8290}
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
AMDGPU address space definition.
ArrayRef< TableEntry > TableRef
This file declares a class to represent arbitrary precision floating point values and provide a varie...
This file implements a class to represent arbitrary precision integral constant values and operations...
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
Atomic ordering constants.
@ RetAttr
@ FnAttr
This file contains the simple types necessary to represent the attributes associated with functions a...
static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")
static GCRegistry::Add< StatepointGC > D("statepoint-example", "an example strategy for statepoint")
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
Analysis containing CSE Info
Definition CSEInfo.cpp:27
This file contains the declarations for the subclasses of Constant, which represent the different fla...
This file declares the LLVM IR specialization of the GenericConvergenceVerifier template.
static DISubprogram * getSubprogram(bool IsDistinct, Ts &&...Args)
dxil translate DXIL Translate Metadata
This file defines the DenseMap class.
This file contains constants used for implementing Dwarf debug support.
static bool runOnFunction(Function &F, bool PostInlining)
This file contains the declarations of entities that describe floating point environment and related ...
#define Check(C,...)
Hexagon Common GEP
This file provides various utilities for inspecting and working with the control flow graph in LLVM I...
Module.h This file contains the declarations for the Module class.
This header defines various interfaces for pass management in LLVM.
This defines the Use class.
const size_t AbstractManglingParser< Derived, Alloc >::NumOps
#define F(x, y, z)
Definition MD5.cpp:54
#define I(x, y, z)
Definition MD5.cpp:57
Machine Check Debug Module
This file implements a map that provides insertion order iteration.
This file provides utility for Memory Model Relaxation Annotations (MMRAs).
static bool isContiguous(const ConstantRange &A, const ConstantRange &B)
This file contains the declarations for metadata subclasses.
#define T
#define T1
ConstantRange Range(APInt(BitWidth, Low), APInt(BitWidth, High))
uint64_t High
uint64_t IntrinsicInst * II
#define P(N)
ppc ctr loops verify
#define INITIALIZE_PASS(passName, arg, name, cfg, analysis)
Definition PassSupport.h:56
This file contains the declarations for profiling metadata utility functions.
const SmallVectorImpl< MachineOperand > & Cond
static bool isValid(const char C)
Returns true if C is a valid mangled character: <0-9a-zA-Z_>.
static unsigned getNumElements(Type *Ty)
void visit(MachineFunction &MF, MachineBasicBlock &Start, std::function< void(MachineBasicBlock *)> op)
This file contains some templates that are useful if you are working with the STL at all.
verify safepoint Safepoint IR Verifier
BaseType
A given derived pointer can have multiple base pointers through phi/selects.
This file defines the SmallPtrSet class.
This file defines the SmallVector class.
This file contains some functions that are useful when dealing with strings.
static unsigned getBitWidth(Type *Ty, const DataLayout &DL)
Returns the bitwidth of the given scalar or pointer type.
static bool IsScalarTBAANodeImpl(const MDNode *MD, SmallPtrSetImpl< const MDNode * > &Visited)
static bool isType(const Metadata *MD)
static Instruction * getSuccPad(Instruction *Terminator)
static bool isMDTuple(const Metadata *MD)
static bool isNewFormatTBAATypeNode(llvm::MDNode *Type)
#define CheckDI(C,...)
We know that a debug info condition should be true, if not print an error message.
Definition Verifier.cpp:682
static void forEachUser(const Value *User, SmallPtrSet< const Value *, 32 > &Visited, llvm::function_ref< bool(const Value *)> Callback)
Definition Verifier.cpp:723
static bool isDINode(const Metadata *MD)
static bool isScope(const Metadata *MD)
static cl::opt< bool > VerifyNoAliasScopeDomination("verify-noalias-scope-decl-dom", cl::Hidden, cl::init(false), cl::desc("Ensure that llvm.experimental.noalias.scope.decl for identical " "scopes are not dominating"))
static bool isTypeCongruent(Type *L, Type *R)
Two types are "congruent" if they are identical, or if they are both pointer types with different poi...
#define CheckTBAA(C,...)
static bool isConstantIntMetadataOperand(const Metadata *MD)
static bool IsRootTBAANode(const MDNode *MD)
static Value * getParentPad(Value *EHPad)
static bool hasConflictingReferenceFlags(unsigned Flags)
Detect mutually exclusive flags.
static AttrBuilder getParameterABIAttributes(LLVMContext &C, unsigned I, AttributeList Attrs)
static const char PassName[]
static LLVM_ABI bool isValidArbitraryFPFormat(StringRef Format)
Returns true if the given string is a valid arbitrary floating-point format interpretation for llvm....
Definition APFloat.cpp:6080
bool isFiniteNonZero() const
Definition APFloat.h:1522
bool isNegative() const
Definition APFloat.h:1512
const fltSemantics & getSemantics() const
Definition APFloat.h:1520
Class for arbitrary precision integers.
Definition APInt.h:78
bool sgt(const APInt &RHS) const
Signed greater than comparison.
Definition APInt.h:1202
bool isMinValue() const
Determine if this is the smallest unsigned value.
Definition APInt.h:418
bool ule(const APInt &RHS) const
Unsigned less or equal comparison.
Definition APInt.h:1151
bool isPowerOf2() const
Check if this APInt's value is a power of two greater than zero.
Definition APInt.h:441
int64_t getSExtValue() const
Get sign extended value.
Definition APInt.h:1571
bool isMaxValue() const
Determine if this is the largest unsigned value.
Definition APInt.h:400
This class represents a conversion between pointers from one address space to another.
bool isSwiftError() const
Return true if this alloca is used as a swifterror argument to a call.
LLVM_ABI bool isStaticAlloca() const
Return true if this alloca is in the entry block of the function and is a constant size.
Align getAlign() const
Return the alignment of the memory that is being allocated by the instruction.
Type * getAllocatedType() const
Return the type that is being allocated by the instruction.
unsigned getAddressSpace() const
Return the address space for the allocation.
LLVM_ABI bool isArrayAllocation() const
Return true if there is an allocation size parameter to the allocation instruction that is not 1.
const Value * getArraySize() const
Get the number of elements allocated.
PassT::Result & getResult(IRUnitT &IR, ExtraArgTs... ExtraArgs)
Get the result of an analysis pass for a given IR unit.
void setPreservesAll()
Set by analyses that do not transform their input at all.
LLVM_ABI bool hasInRegAttr() const
Return true if this argument has the inreg attribute.
Definition Function.cpp:293
bool empty() const
empty - Check if the array is empty.
Definition ArrayRef.h:137
static bool isFPOperation(BinOp Op)
BinOp getOperation() const
static LLVM_ABI StringRef getOperationName(BinOp Op)
AtomicOrdering getOrdering() const
Returns the ordering constraint of this rmw instruction.
bool contains(Attribute::AttrKind A) const
Return true if the builder has the specified attribute.
LLVM_ABI bool hasAttribute(Attribute::AttrKind Kind) const
Return true if the attribute exists in this set.
LLVM_ABI std::string getAsString(bool InAttrGrp=false) const
Functions, function parameters, and return types can have attributes to indicate how they should be t...
Definition Attributes.h:69
LLVM_ABI const ConstantRange & getValueAsConstantRange() const
Return the attribute's value as a ConstantRange.
LLVM_ABI StringRef getValueAsString() const
Return the attribute's value as a string.
AttrKind
This enumeration lists the attributes that can be associated with parameters, function results,...
Definition Attributes.h:88
bool isValid() const
Return true if the attribute is any kind of attribute.
Definition Attributes.h:223
LLVM Basic Block Representation.
Definition BasicBlock.h:62
iterator begin()
Instruction iterator methods.
Definition BasicBlock.h:470
iterator_range< const_phi_iterator > phis() const
Returns a range that iterates over the phis in the basic block.
Definition BasicBlock.h:539
const Function * getParent() const
Return the enclosing method, or null if none.
Definition BasicBlock.h:213
LLVM_ABI InstListType::const_iterator getFirstNonPHIIt() const
Returns an iterator to the first instruction in this block that is not a PHINode instruction.
LLVM_ABI bool isEntryBlock() const
Return true if this is the entry block of the containing function.
const Instruction & front() const
Definition BasicBlock.h:493
LLVM_ABI const BasicBlock * getUniquePredecessor() const
Return the predecessor of this block if it has a unique predecessor block.
InstListType::iterator iterator
Instruction iterators...
Definition BasicBlock.h:170
const Instruction * getTerminator() const LLVM_READONLY
Returns the terminator instruction if the block is well formed or null if the block is not well forme...
Definition BasicBlock.h:233
This class represents a no-op cast from one type to another.
static LLVM_ABI BlockAddress * lookup(const BasicBlock *BB)
Lookup an existing BlockAddress constant for the given BasicBlock.
bool isConditional() const
Value * getCondition() const
Base class for all callable instructions (InvokeInst and CallInst) Holds everything related to callin...
bool isInlineAsm() const
Check if this call is an inline asm statement.
bool hasInAllocaArgument() const
Determine if there are is an inalloca argument.
OperandBundleUse getOperandBundleAt(unsigned Index) const
Return the operand bundle at a specific index.
Function * getCalledFunction() const
Returns the function called, or null if this is an indirect function invocation or the function signa...
bool doesNotAccessMemory(unsigned OpNo) const
bool hasFnAttr(Attribute::AttrKind Kind) const
Determine whether this call has the given attribute.
unsigned getNumOperandBundles() const
Return the number of operand bundles associated with this User.
CallingConv::ID getCallingConv() const
LLVM_ABI bool paramHasAttr(unsigned ArgNo, Attribute::AttrKind Kind) const
Determine whether the argument or parameter has the given attribute.
Attribute getParamAttr(unsigned ArgNo, Attribute::AttrKind Kind) const
Get the attribute of a given kind from a given arg.
iterator_range< bundle_op_iterator > bundle_op_infos()
Return the range [bundle_op_info_begin, bundle_op_info_end).
unsigned countOperandBundlesOfType(StringRef Name) const
Return the number of operand bundles with the tag Name attached to this instruction.
bool onlyReadsMemory(unsigned OpNo) const
Value * getCalledOperand() const
Type * getParamElementType(unsigned ArgNo) const
Extract the elementtype type for a parameter.
Value * getArgOperand(unsigned i) const
FunctionType * getFunctionType() const
LLVM_ABI Intrinsic::ID getIntrinsicID() const
Returns the intrinsic ID of the intrinsic called or Intrinsic::not_intrinsic if the called function i...
iterator_range< User::op_iterator > args()
Iteration adapter for range-for loops.
bool doesNotReturn() const
Determine if the call cannot return.
LLVM_ABI bool onlyAccessesArgMemory() const
Determine if the call can access memmory only using pointers based on its arguments.
unsigned arg_size() const
AttributeList getAttributes() const
Return the attributes for this call.
bool hasOperandBundles() const
Return true if this User has any operand bundles.
LLVM_ABI Function * getCaller()
Helper to get the caller (the parent function).
BasicBlock * getIndirectDest(unsigned i) const
unsigned getNumIndirectDests() const
Return the number of callbr indirect dest labels.
bool isMustTailCall() const
static LLVM_ABI bool castIsValid(Instruction::CastOps op, Type *SrcTy, Type *DstTy)
This method can be used to determine if a cast from SrcTy to DstTy using Opcode op is valid or not.
unsigned getNumHandlers() const
return the number of 'handlers' in this catchswitch instruction, except the default handler
Value * getParentPad() const
BasicBlock * getUnwindDest() const
handler_range handlers()
iteration adapter for range-for loops.
BasicBlock * getUnwindDest() const
bool isFPPredicate() const
Definition InstrTypes.h:782
bool isIntPredicate() const
Definition InstrTypes.h:783
static bool isIntPredicate(Predicate P)
Definition InstrTypes.h:776
bool isMinusOne() const
This function will return true iff every bit in this constant is set to true.
Definition Constants.h:231
bool isNegative() const
Definition Constants.h:214
bool isZero() const
This is just a convenience method to make client code smaller for a common code.
Definition Constants.h:219
unsigned getBitWidth() const
getBitWidth - Return the scalar bitwidth of this constant.
Definition Constants.h:162
uint64_t getZExtValue() const
Return the constant as a 64-bit unsigned integer value after it has been zero extended as appropriate...
Definition Constants.h:168
const APInt & getValue() const
Return the constant as an APInt value reference.
Definition Constants.h:159
Constant * getAddrDiscriminator() const
The address discriminator if any, or the null constant.
Definition Constants.h:1078
Constant * getPointer() const
The pointer that is signed in this ptrauth signed pointer.
Definition Constants.h:1065
ConstantInt * getKey() const
The Key ID, an i32 constant.
Definition Constants.h:1068
Constant * getDeactivationSymbol() const
Definition Constants.h:1087
ConstantInt * getDiscriminator() const
The integer discriminator, an i64 constant, or 0.
Definition Constants.h:1071
static LLVM_ABI bool isOrderedRanges(ArrayRef< ConstantRange > RangesRef)
This class represents a range of values.
const APInt & getLower() const
Return the lower value for this range.
const APInt & getUpper() const
Return the upper value for this range.
LLVM_ABI bool contains(const APInt &Val) const
Return true if the specified value is in the set.
uint32_t getBitWidth() const
Get the bit width of this ConstantRange.
static LLVM_ABI ConstantTokenNone * get(LLVMContext &Context)
Return the ConstantTokenNone.
LLVM_ABI bool isNullValue() const
Return true if this is the value that would be returned by getNullValue.
Definition Constants.cpp:90
LLVM_ABI std::optional< fp::ExceptionBehavior > getExceptionBehavior() const
LLVM_ABI std::optional< RoundingMode > getRoundingMode() const
LLVM_ABI unsigned getNonMetadataArgCount() const
DbgVariableFragmentInfo FragmentInfo
@ FixedPointBinary
Scale factor 2^Factor.
@ FixedPointDecimal
Scale factor 10^Factor.
@ FixedPointRational
Arbitrary rational scale factor.
DIGlobalVariable * getVariable() const
LLVM_ABI DISubprogram * getSubprogram() const
Get the subprogram for this scope.
DILocalScope * getScope() const
Get the local scope for this variable.
Metadata * getRawScope() const
Base class for scope-like contexts.
Subprogram description. Uses SubclassData1.
static const DIScope * getRawRetainedNodeScope(const MDNode *N)
Base class for template parameters.
Base class for variables.
Metadata * getRawType() const
Metadata * getRawScope() const
A parsed version of the target data layout string in and methods for querying it.
Definition DataLayout.h:64
Records a position in IR for a source label (DILabel).
Base class for non-instruction debug metadata records that have positions within IR.
LLVM_ABI void print(raw_ostream &O, bool IsForDebug=false) const
DebugLoc getDebugLoc() const
LLVM_ABI const BasicBlock * getParent() const
LLVM_ABI Function * getFunction()
Record of a variable value-assignment, aka a non instruction representation of the dbg....
LLVM_ABI Value * getVariableLocationOp(unsigned OpIdx) const
DIExpression * getExpression() const
DILocalVariable * getVariable() const
Metadata * getRawLocation() const
Returns the metadata operand for the first location description.
@ End
Marks the end of the concrete types.
@ Any
To indicate all LocationTypes in searches.
DIExpression * getAddressExpression() const
MDNode * getAsMDNode() const
Return this as a bar MDNode.
Definition DebugLoc.h:290
ValueT lookup(const_arg_type_t< KeyT > Val) const
lookup - Return the entry for the specified key, or a default constructed value if no such entry exis...
Definition DenseMap.h:205
iterator find(const_arg_type_t< KeyT > Val)
Definition DenseMap.h:178
bool empty() const
Definition DenseMap.h:109
std::pair< iterator, bool > insert(const std::pair< KeyT, ValueT > &KV)
Definition DenseMap.h:241
Concrete subclass of DominatorTreeBase that is used to compute a normal dominator tree.
Definition Dominators.h:164
This instruction extracts a single (scalar) element from a VectorType value.
static LLVM_ABI bool isValidOperands(const Value *Vec, const Value *Idx)
Return true if an extractelement instruction can be formed with the specified operands.
ArrayRef< unsigned > getIndices() const
static LLVM_ABI Type * getIndexedType(Type *Agg, ArrayRef< unsigned > Idxs)
Returns the type of the element that would be extracted with an extractvalue instruction with the spe...
This instruction compares its operands according to the predicate given to the constructor.
This class represents an extension of floating point types.
This class represents a cast from floating point to signed integer.
This class represents a cast from floating point to unsigned integer.
This class represents a truncation of floating point types.
AtomicOrdering getOrdering() const
Returns the ordering constraint of this fence instruction.
Value * getParentPad() const
Convenience accessors.
FunctionPass class - This class is used to implement most global optimizations.
Definition Pass.h:314
Type * getReturnType() const
FunctionType * getFunctionType() const
Returns the FunctionType for me.
Definition Function.h:209
Intrinsic::ID getIntrinsicID() const LLVM_READONLY
getIntrinsicID - This method returns the ID number of the specified function, or Intrinsic::not_intri...
Definition Function.h:244
DISubprogram * getSubprogram() const
Get the attached subprogram.
CallingConv::ID getCallingConv() const
getCallingConv()/setCallingConv(CC) - These method get and set the calling convention of this functio...
Definition Function.h:270
bool hasPersonalityFn() const
Check whether this function has a personality function.
Definition Function.h:903
const Function & getFunction() const
Definition Function.h:164
const std::string & getGC() const
Definition Function.cpp:834
Type * getReturnType() const
Returns the type of the ret val.
Definition Function.h:214
bool isVarArg() const
isVarArg - Return true if this function takes a variable number of arguments.
Definition Function.h:227
LLVM_ABI Value * getBasePtr() const
LLVM_ABI Value * getDerivedPtr() const
static LLVM_ABI Type * getIndexedType(Type *Ty, ArrayRef< Value * > IdxList)
Returns the result type of a getelementptr with the given source element type and indexes.
static bool isValidLinkage(LinkageTypes L)
Definition GlobalAlias.h:98
const Constant * getAliasee() const
Definition GlobalAlias.h:87
LLVM_ABI const Function * getResolverFunction() const
Definition Globals.cpp:674
static bool isValidLinkage(LinkageTypes L)
Definition GlobalIFunc.h:86
const Constant * getResolver() const
Definition GlobalIFunc.h:73
LLVM_ABI void getAllMetadata(SmallVectorImpl< std::pair< unsigned, MDNode * > > &MDs) const
Appends all metadata attached to this value to MDs, sorting by KindID.
bool hasComdat() const
MDNode * getMetadata(unsigned KindID) const
Get the current metadata attachments for the given kind, if any.
Definition Value.h:576
bool hasExternalLinkage() const
bool isDSOLocal() const
bool isImplicitDSOLocal() const
LLVM_ABI bool isDeclaration() const
Return true if the primary definition of this global value is outside of the current translation unit...
Definition Globals.cpp:328
bool hasValidDeclarationLinkage() const
LinkageTypes getLinkage() const
bool hasDefaultVisibility() const
bool hasPrivateLinkage() const
bool hasHiddenVisibility() const
bool hasExternalWeakLinkage() const
bool hasDLLImportStorageClass() const
bool hasDLLExportStorageClass() const
bool isDeclarationForLinker() const
unsigned getAddressSpace() const
Module * getParent()
Get the module that this global value is contained inside of...
PointerType * getType() const
Global values are always pointers.
LLVM_ABI bool isInterposable() const
Return true if this global's definition can be substituted with an arbitrary definition at link time ...
Definition Globals.cpp:107
bool hasComdat() const
bool hasCommonLinkage() const
bool hasGlobalUnnamedAddr() const
bool hasAppendingLinkage() const
bool hasAvailableExternallyLinkage() const
Type * getValueType() const
const Constant * getInitializer() const
getInitializer - Return the initializer for this global variable.
bool hasInitializer() const
Definitions have initializers, declarations don't.
MaybeAlign getAlign() const
Returns the alignment of the given variable.
bool isConstant() const
If the value is a global constant, its value is immutable throughout the runtime execution of the pro...
bool hasDefinitiveInitializer() const
hasDefinitiveInitializer - Whether the global variable has an initializer, and any other instances of...
This instruction compares its operands according to the predicate given to the constructor.
BasicBlock * getDestination(unsigned i)
Return the specified destination.
unsigned getNumDestinations() const
return the number of possible destinations in this indirectbr instruction.
unsigned getNumSuccessors() const
This instruction inserts a single (scalar) element into a VectorType value.
static LLVM_ABI bool isValidOperands(const Value *Vec, const Value *NewElt, const Value *Idx)
Return true if an insertelement instruction can be formed with the specified operands.
ArrayRef< unsigned > getIndices() const
Base class for instruction visitors.
Definition InstVisitor.h:78
void visit(Iterator Start, Iterator End)
Definition InstVisitor.h:87
LLVM_ABI unsigned getNumSuccessors() const LLVM_READONLY
Return the number of successors that this instruction has.
const DebugLoc & getDebugLoc() const
Return the debug location for this node as a DebugLoc.
LLVM_ABI bool isAtomic() const LLVM_READONLY
Return true if this instruction has an AtomicOrdering of unordered or higher.
LLVM_ABI const Function * getFunction() const
Return the function this instruction belongs to.
This class represents a cast from an integer to a pointer.
static LLVM_ABI bool mayLowerToFunctionCall(Intrinsic::ID IID)
Check if the intrinsic might lower into a regular function call in the course of IR transformations.
Intrinsic::ID getIntrinsicID() const
Return the intrinsic ID of this intrinsic.
This is an important class for using LLVM in a threaded context.
Definition LLVMContext.h:68
bool isCleanup() const
Return 'true' if this landingpad instruction is a cleanup.
unsigned getNumClauses() const
Get the number of clauses for this landing pad.
bool isCatch(unsigned Idx) const
Return 'true' if the clause and index Idx is a catch clause.
bool isFilter(unsigned Idx) const
Return 'true' if the clause and index Idx is a filter clause.
Constant * getClause(unsigned Idx) const
Get the value of the clause at index Idx.
AtomicOrdering getOrdering() const
Returns the ordering constraint of this load instruction.
SyncScope::ID getSyncScopeID() const
Returns the synchronization scope ID of this load instruction.
Align getAlign() const
Return the alignment of the access that is being performed.
Metadata node.
Definition Metadata.h:1078
const MDOperand & getOperand(unsigned I) const
Definition Metadata.h:1442
bool isTemporary() const
Definition Metadata.h:1262
ArrayRef< MDOperand > operands() const
Definition Metadata.h:1440
unsigned getNumOperands() const
Return number of MDNode operands.
Definition Metadata.h:1448
bool isDistinct() const
Definition Metadata.h:1261
bool isResolved() const
Check if node is fully resolved.
Definition Metadata.h:1258
LLVMContext & getContext() const
Definition Metadata.h:1242
bool equalsStr(StringRef Str) const
Definition Metadata.h:922
Metadata * get() const
Definition Metadata.h:929
LLVM_ABI StringRef getString() const
Definition Metadata.cpp:624
static LLVM_ABI bool isTagMD(const Metadata *MD)
This class implements a map that also provides access to all stored values in a deterministic order.
Definition MapVector.h:36
static LLVM_ABI MetadataAsValue * getIfExists(LLVMContext &Context, Metadata *MD)
Definition Metadata.cpp:118
Metadata * getMetadata() const
Definition Metadata.h:201
Root of the metadata hierarchy.
Definition Metadata.h:64
LLVM_ABI void print(raw_ostream &OS, const Module *M=nullptr, bool IsForDebug=false) const
Print.
unsigned getMetadataID() const
Definition Metadata.h:104
Manage lifetime of a slot tracker for printing IR.
A Module instance is used to store all the information related to an LLVM module.
Definition Module.h:67
LLVM_ABI StringRef getName() const
LLVM_ABI void print(raw_ostream &ROS, bool IsForDebug=false) const
LLVM_ABI unsigned getNumOperands() const
iterator_range< op_iterator > operands()
Definition Metadata.h:1853
op_range incoming_values()
static LLVM_ABI PassRegistry * getPassRegistry()
getPassRegistry - Access the global registry object, which is automatically initialized at applicatio...
A set of analyses that are preserved following a run of a transformation pass.
Definition Analysis.h:112
static PreservedAnalyses all()
Construct a special preserved set that preserves all passes.
Definition Analysis.h:118
This class represents a cast from a pointer to an address (non-capturing ptrtoint).
This class represents a cast from a pointer to an integer.
Value * getValue() const
Convenience accessor.
Value * getReturnValue() const
Convenience accessor. Returns null if there is no return value.
This class represents a sign extension of integer types.
This class represents a cast from signed integer to floating point.
static LLVM_ABI const char * areInvalidOperands(Value *Cond, Value *True, Value *False)
Return a string if the specified operands are invalid for a select operation, otherwise return null.
This instruction constructs a fixed permutation of two input vectors.
static LLVM_ABI bool isValidOperands(const Value *V1, const Value *V2, const Value *Mask)
Return true if a shufflevector instruction can be formed with the specified operands.
static LLVM_ABI void getShuffleMask(const Constant *Mask, SmallVectorImpl< int > &Result)
Convert the input shuffle mask operand to a vector of integers.
A templated base class for SmallPtrSet which provides the typesafe interface that is common across al...
size_type count(ConstPtrType Ptr) const
count - Return 1 if the specified pointer is in the set, 0 otherwise.
void insert_range(Range &&R)
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
SmallPtrSet - This class implements a set which is optimized for holding SmallSize or less elements.
void reserve(size_type N)
iterator insert(iterator I, T &&Elt)
void resize(size_type N)
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
std::pair< StringRef, StringRef > split(char Separator) const
Split into two substrings around the first occurrence of a separator character.
Definition StringRef.h:712
static constexpr size_t npos
Definition StringRef.h:57
bool getAsInteger(unsigned Radix, T &Result) const
Parse the current string as an integer of the specified radix.
Definition StringRef.h:472
bool starts_with(StringRef Prefix) const
Check if this string starts with the given Prefix.
Definition StringRef.h:261
constexpr bool empty() const
empty - Check if the string is empty.
Definition StringRef.h:143
unsigned getNumElements() const
Random access to the elements.
LLVM_ABI Type * getTypeAtIndex(const Value *V) const
Given an index value into the type, return the type of the element.
Definition Type.cpp:718
LLVM_ABI bool isScalableTy(SmallPtrSetImpl< const Type * > &Visited) const
Returns true if this struct contains a scalable vector.
Definition Type.cpp:440
Verify that the TBAA Metadatas are valid.
Definition Verifier.h:40
LLVM_ABI bool visitTBAAMetadata(const Instruction *I, const MDNode *MD)
Visit an instruction, or a TBAA node itself as part of a metadata, and return true if it is valid,...
unsigned size() const
Triple - Helper class for working with autoconf configuration names.
Definition Triple.h:47
This class represents a truncation of integer types.
Twine - A lightweight data structure for efficiently representing the concatenation of temporary valu...
Definition Twine.h:82
The instances of the Type class are immutable: once they are created, they are never changed.
Definition Type.h:45
bool isVectorTy() const
True if this is an instance of VectorType.
Definition Type.h:273
LLVM_ABI bool containsNonGlobalTargetExtType(SmallPtrSetImpl< const Type * > &Visited) const
Return true if this type is or contains a target extension type that disallows being used as a global...
Definition Type.cpp:74
bool isArrayTy() const
True if this is an instance of ArrayType.
Definition Type.h:264
LLVM_ABI bool containsNonLocalTargetExtType(SmallPtrSetImpl< const Type * > &Visited) const
Return true if this type is or contains a target extension type that disallows being used as a local.
Definition Type.cpp:90
LLVM_ABI bool isScalableTy(SmallPtrSetImpl< const Type * > &Visited) const
Return true if this is a type whose size is a known multiple of vscale.
Definition Type.cpp:61
bool isLabelTy() const
Return true if this is 'label'.
Definition Type.h:228
bool isIntOrIntVectorTy() const
Return true if this is an integer type or a vector of integer types.
Definition Type.h:246
bool isPointerTy() const
True if this is an instance of PointerType.
Definition Type.h:267
LLVM_ABI bool isTokenLikeTy() const
Returns true if this is 'token' or a token-like target type.s.
Definition Type.cpp:1065
LLVM_ABI unsigned getPointerAddressSpace() const
Get the address space of this pointer or pointer vector type.
bool isSingleValueType() const
Return true if the type is a valid type for a register in codegen.
Definition Type.h:296
LLVM_ABI bool canLosslesslyBitCastTo(Type *Ty) const
Return true if this type could be converted with a lossless BitCast to type 'Ty'.
Definition Type.cpp:153
Type * getScalarType() const
If this is a vector type, return the element type, otherwise return 'this'.
Definition Type.h:352
bool isSized(SmallPtrSetImpl< Type * > *Visited=nullptr) const
Return true if it makes sense to take the size of this type.
Definition Type.h:311
LLVM_ABI unsigned getScalarSizeInBits() const LLVM_READONLY
If this is a vector type, return the getPrimitiveSizeInBits value for the element type.
Definition Type.cpp:230
bool isFloatingPointTy() const
Return true if this is one of the floating-point types.
Definition Type.h:184
bool isPtrOrPtrVectorTy() const
Return true if this is a pointer type or a vector of pointer types.
Definition Type.h:270
bool isIntOrPtrTy() const
Return true if this is an integer type or a pointer type.
Definition Type.h:255
bool isIntegerTy() const
True if this is an instance of IntegerType.
Definition Type.h:240
bool isFPOrFPVectorTy() const
Return true if this is a FP type or a vector of FP.
Definition Type.h:225
bool isVoidTy() const
Return true if this is 'void'.
Definition Type.h:139
bool isMetadataTy() const
Return true if this is 'metadata'.
Definition Type.h:231
This class represents a cast unsigned integer to floating point.
op_range operands()
Definition User.h:267
Value * getOperand(unsigned i) const
Definition User.h:207
unsigned getNumOperands() const
Definition User.h:229
This class represents the va_arg llvm instruction, which returns an argument of the specified type gi...
Value * getValue() const
Definition Metadata.h:498
LLVM Value Representation.
Definition Value.h:75
iterator_range< user_iterator > materialized_users()
Definition Value.h:420
Type * getType() const
All values are typed, get the type of this value.
Definition Value.h:256
LLVM_ABI const Value * stripPointerCastsAndAliases() const
Strip off pointer casts, all-zero GEPs, address space casts, and aliases.
Definition Value.cpp:712
LLVM_ABI const Value * stripInBoundsOffsets(function_ref< void(const Value *)> Func=[](const Value *) {}) const
Strip off pointer casts and inbounds GEPs.
Definition Value.cpp:819
iterator_range< user_iterator > users()
Definition Value.h:426
bool materialized_use_empty() const
Definition Value.h:351
LLVM_ABI const Value * stripPointerCasts() const
Strip off pointer casts, all-zero GEPs and address space casts.
Definition Value.cpp:708
LLVM_ABI LLVMContext & getContext() const
All values hold a context through their type.
Definition Value.cpp:1106
bool hasName() const
Definition Value.h:262
LLVM_ABI StringRef getName() const
Return a constant reference to the value's name.
Definition Value.cpp:322
Check a module for errors, and report separate error states for IR and debug info errors.
Definition Verifier.h:109
LLVM_ABI Result run(Module &M, ModuleAnalysisManager &)
LLVM_ABI PreservedAnalyses run(Module &M, ModuleAnalysisManager &AM)
This class represents zero extension of integer types.
constexpr bool isNonZero() const
Definition TypeSize.h:155
constexpr bool isScalable() const
Returns whether the quantity is scaled by a runtime quantity (vscale).
Definition TypeSize.h:168
constexpr ScalarTy getKnownMinValue() const
Returns the minimum value this quantity can represent.
Definition TypeSize.h:165
An efficient, type-erasing, non-owning reference to a callable.
const ParentTy * getParent() const
Definition ilist_node.h:34
NodeTy * getNextNode()
Get the next node, or nullptr for the list tail.
Definition ilist_node.h:348
This class implements an extremely fast bulk output stream that can only output to a stream.
Definition raw_ostream.h:53
CallInst * Call
This file contains the declaration of the Comdat class, which represents a single COMDAT in LLVM.
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
@ FLAT_ADDRESS
Address space for flat memory.
@ GLOBAL_ADDRESS
Address space for global memory (RAT0, VTX0).
@ PRIVATE_ADDRESS
Address space for private memory.
constexpr char Align[]
Key for Kernel::Arg::Metadata::mAlign.
constexpr char Args[]
Key for Kernel::Metadata::mArgs.
constexpr char Attrs[]
Key for Kernel::Metadata::mAttrs.
bool isFlatGlobalAddrSpace(unsigned AS)
constexpr std::underlying_type_t< E > Mask()
Get a bitmask with 1s in all places up to the high-order bit of E's largest value.
@ Entry
Definition COFF.h:862
unsigned ID
LLVM IR allows to use arbitrary numbers as calling convention identifiers.
Definition CallingConv.h:24
@ C
The default llvm calling convention, compatible with C.
Definition CallingConv.h:34
@ BasicBlock
Various leaf nodes.
Definition ISDOpcodes.h:81
LLVM_ABI MatchIntrinsicTypesResult matchIntrinsicSignature(FunctionType *FTy, ArrayRef< IITDescriptor > &Infos, SmallVectorImpl< Type * > &ArgTys)
Match the specified function type with the type constraints specified by the .td file.
LLVM_ABI void getIntrinsicInfoTableEntries(ID id, SmallVectorImpl< IITDescriptor > &T)
Return the IIT table descriptor for the specified intrinsic into an array of IITDescriptors.
@ MatchIntrinsicTypes_NoMatchRet
Definition Intrinsics.h:260
@ MatchIntrinsicTypes_NoMatchArg
Definition Intrinsics.h:261
LLVM_ABI bool hasConstrainedFPRoundingModeOperand(ID QID)
Returns true if the intrinsic ID is for one of the "ConstrainedFloating-Point Intrinsics" that take r...
LLVM_ABI StringRef getName(ID id)
Return the LLVM name for an intrinsic, such as "llvm.ppc.altivec.lvx".
static const int NoAliasScopeDeclScopeArg
Definition Intrinsics.h:41
LLVM_ABI bool matchIntrinsicVarArg(bool isVarArg, ArrayRef< IITDescriptor > &Infos)
Verify if the intrinsic has variable arguments.
std::variant< std::monostate, Loc::Single, Loc::Multi, Loc::MMI, Loc::EntryValue > Variant
Alias for the std::variant specialization base class of DbgVariable.
Definition DwarfDebug.h:189
Flag
These should be considered private to the implementation of the MCInstrDesc class.
@ System
Synchronized with respect to all concurrently executing threads.
Definition LLVMContext.h:58
LLVM_ABI std::optional< VFInfo > tryDemangleForVFABI(StringRef MangledName, const FunctionType *FTy)
Function to construct a VFInfo out of a mangled names in the following format:
@ CE
Windows NT (Windows on ARM)
Definition MCAsmInfo.h:48
LLVM_ABI AssignmentInstRange getAssignmentInsts(DIAssignID *ID)
Return a range of instructions (typically just one) that have ID as an attachment.
initializer< Ty > init(const Ty &Val)
@ DW_MACINFO_undef
Definition Dwarf.h:818
@ DW_MACINFO_start_file
Definition Dwarf.h:819
@ DW_MACINFO_define
Definition Dwarf.h:817
std::enable_if_t< detail::IsValidPointer< X, Y >::value, X * > dyn_extract_or_null(Y &&MD)
Extract a Value from Metadata, if any, allowing null.
Definition Metadata.h:708
std::enable_if_t< detail::IsValidPointer< X, Y >::value, X * > dyn_extract(Y &&MD)
Extract a Value from Metadata, if any.
Definition Metadata.h:695
std::enable_if_t< detail::IsValidPointer< X, Y >::value, X * > extract(Y &&MD)
Extract a Value from Metadata.
Definition Metadata.h:667
@ User
could "use" a pointer
NodeAddr< UseNode * > Use
Definition RDFGraph.h:385
NodeAddr< NodeBase * > Node
Definition RDFGraph.h:381
friend class Instruction
Iterator for Instructions in a `BasicBlock.
Definition BasicBlock.h:73
This is an optimization pass for GlobalISel generic memory operations.
Definition Types.h:26
auto drop_begin(T &&RangeOrContainer, size_t N=1)
Return a range covering RangeOrContainer with the first N elements excluded.
Definition STLExtras.h:316
@ Low
Lower the current thread's priority such that it does not affect foreground tasks significantly.
Definition Threading.h:280
@ Offset
Definition DWP.cpp:532
FunctionAddr VTableAddr Value
Definition InstrProf.h:137
bool all_of(R &&range, UnaryPredicate P)
Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly.
Definition STLExtras.h:1737
LLVM_ABI bool canInstructionHaveMMRAs(const Instruction &I)
detail::zippy< detail::zip_first, T, U, Args... > zip_equal(T &&t, U &&u, Args &&...args)
zip iterator that assumes that all iteratees have the same length.
Definition STLExtras.h:839
LLVM_ABI unsigned getBranchWeightOffset(const MDNode *ProfileData)
Return the offset to the first branch weight data.
constexpr bool isInt(int64_t x)
Checks if an integer fits into the given bit width.
Definition MathExtras.h:165
auto enumerate(FirstRange &&First, RestRanges &&...Rest)
Given two or more input ranges, returns a new range whose values are tuples (A, B,...
Definition STLExtras.h:2544
decltype(auto) dyn_cast(const From &Val)
dyn_cast<X> - Return the argument parameter cast to the specified type.
Definition Casting.h:643
LLVM_ABI bool verifyFunction(const Function &F, raw_ostream *OS=nullptr)
Check a function for errors, useful for use when debugging a pass.
AllocFnKind
Definition Attributes.h:51
testing::Matcher< const detail::ErrorHolder & > Failed()
Definition Error.h:198
iterator_range< T > make_range(T x, T y)
Convenience function for iterating over sub-ranges.
void append_range(Container &C, Range &&R)
Wrapper function to append range R to container C.
Definition STLExtras.h:2198
LLVM_ABI DenseMap< BasicBlock *, ColorVector > colorEHFunclets(Function &F)
If an EH funclet personality is in use (see isFuncletEHPersonality), this will recompute which blocks...
constexpr bool isPowerOf2_64(uint64_t Value)
Return true if the argument is a power of two > 0 (64 bit edition.)
Definition MathExtras.h:284
bool isa_and_nonnull(const Y &Val)
Definition Casting.h:676
Op::Description Desc
bool isScopedEHPersonality(EHPersonality Pers)
Returns true if this personality uses scope-style EH IR instructions: catchswitch,...
auto dyn_cast_or_null(const Y &Val)
Definition Casting.h:753
GenericConvergenceVerifier< SSAContext > ConvergenceVerifier
LLVM_ABI void initializeVerifierLegacyPassPass(PassRegistry &)
constexpr bool isPowerOf2_32(uint32_t Value)
Return true if the argument is a power of two > 0.
Definition MathExtras.h:279
bool isModSet(const ModRefInfo MRI)
Definition ModRef.h:49
void sort(IteratorTy Start, IteratorTy End)
Definition STLExtras.h:1634
LLVM_ABI raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
Definition Debug.cpp:207
LLVM_ABI void report_fatal_error(Error Err, bool gen_crash_diag=true)
Definition Error.cpp:163
FunctionAddr VTableAddr Count
Definition InstrProf.h:139
LLVM_ABI EHPersonality classifyEHPersonality(const Value *Pers)
See if the given exception handling personality function is one that we understand.
iterator_range< SplittingIterator > split(StringRef Str, StringRef Separator)
Split the specified string over a separator and return a range-compatible iterable over its partition...
class LLVM_GSL_OWNER SmallVector
Forward declaration of SmallVector so that calculateSmallVectorDefaultInlinedElements can reference s...
bool isa(const From &Val)
isa<X> - Return true if the parameter to the template is an instance of one of the template type argu...
Definition Casting.h:547
LLVM_ABI bool isValueProfileMD(const MDNode *ProfileData)
Checks if an MDNode contains value profiling Metadata.
LLVM_ABI raw_fd_ostream & errs()
This returns a reference to a raw_ostream for standard error.
LLVM_ABI unsigned getNumBranchWeights(const MDNode &ProfileData)
AtomicOrdering
Atomic ordering for LLVM's memory model.
@ First
Helpers to iterate all locations in the MemoryEffectsBase class.
Definition ModRef.h:74
LLVM_ABI FunctionPass * createVerifierPass(bool FatalErrors=true)
FunctionAddr VTableAddr Next
Definition InstrProf.h:141
DWARFExpression::Operation Op
ArrayRef(const T &OneElt) -> ArrayRef< T >
constexpr unsigned BitWidth
TinyPtrVector< BasicBlock * > ColorVector
LLVM_ABI const char * LLVMLoopEstimatedTripCount
Profile-based loop metadata that should be accessed only by using llvm::getLoopEstimatedTripCount and...
DenormalMode parseDenormalFPAttribute(StringRef Str)
Returns the denormal mode to use for inputs and outputs.
decltype(auto) cast(const From &Val)
cast<X> - Return the argument parameter cast to the specified type.
Definition Casting.h:559
LLVM_ABI std::optional< RoundingMode > convertStrToRoundingMode(StringRef)
Returns a valid RoundingMode enumerator when given a string that is valid as input in constrained int...
Definition FPEnv.cpp:25
LLVM_ABI std::unique_ptr< GCStrategy > getGCStrategy(const StringRef Name)
Lookup the GCStrategy object associated with the given gc name.
auto predecessors(const MachineBasicBlock *BB)
bool is_contained(R &&Range, const E &Element)
Returns true if Element is found in Range.
Definition STLExtras.h:1945
bool pred_empty(const BasicBlock *BB)
Definition CFG.h:119
bool isHexDigit(char C)
Checks if character C is a hexadecimal numeric character.
AnalysisManager< Function > FunctionAnalysisManager
Convenience typedef for the Function analysis manager.
constexpr bool isCallableCC(CallingConv::ID CC)
LLVM_ABI bool verifyModule(const Module &M, raw_ostream *OS=nullptr, bool *BrokenDebugInfo=nullptr)
Check a module for errors.
AnalysisManager< Module > ModuleAnalysisManager
Convenience typedef for the Module analysis manager.
Definition MIRParser.h:39
#define N
constexpr uint64_t value() const
This is a hole in the type system and should not be abused.
Definition Alignment.h:77
A special type used by analysis passes to provide an address that identifies that particular analysis...
Definition Analysis.h:29
static LLVM_ABI const char * SyntheticFunctionEntryCount
static LLVM_ABI const char * UnknownBranchWeightsMarker
static LLVM_ABI const char * ValueProfile
static LLVM_ABI const char * FunctionEntryCount
static LLVM_ABI const char * BranchWeights
uint32_t getTagID() const
Return the tag of this operand bundle as an integer.
ArrayRef< Use > Inputs
void DebugInfoCheckFailed(const Twine &Message)
A debug info check failed.
Definition Verifier.cpp:306
VerifierSupport(raw_ostream *OS, const Module &M)
Definition Verifier.cpp:155
bool Broken
Track the brokenness of the module while recursively visiting.
Definition Verifier.cpp:149
void CheckFailed(const Twine &Message, const T1 &V1, const Ts &... Vs)
A check failed (with values to print).
Definition Verifier.cpp:299
bool BrokenDebugInfo
Broken debug info can be "recovered" from by stripping the debug info.
Definition Verifier.cpp:151
LLVMContext & Context
Definition Verifier.cpp:146
bool TreatBrokenDebugInfoAsError
Whether to treat broken debug info as an error.
Definition Verifier.cpp:153
void CheckFailed(const Twine &Message)
A check failed, so printout out the condition and the message.
Definition Verifier.cpp:288
const Module & M
Definition Verifier.cpp:142
const DataLayout & DL
Definition Verifier.cpp:145
void DebugInfoCheckFailed(const Twine &Message, const T1 &V1, const Ts &... Vs)
A debug info check failed (with values to print).
Definition Verifier.cpp:315
const Triple & TT
Definition Verifier.cpp:144
ModuleSlotTracker MST
Definition Verifier.cpp:143