LLVM 22.0.0git
Verifier.cpp
Go to the documentation of this file.
1//===-- Verifier.cpp - Implement the Module Verifier -----------------------==//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file defines the function verifier interface, that can be used for some
10// basic correctness checking of input to the system.
11//
12// Note that this does not provide full `Java style' security and verifications,
13// instead it just tries to ensure that code is well-formed.
14//
15// * Both of a binary operator's parameters are of the same type
16// * Verify that the indices of mem access instructions match other operands
17// * Verify that arithmetic and other things are only performed on first-class
18// types. Verify that shifts & logicals only happen on integrals f.e.
19// * All of the constants in a switch statement are of the correct type
20// * The code is in valid SSA form
21// * It should be illegal to put a label into any other type (like a structure)
22// or to return one. [except constant arrays!]
23// * Only phi nodes can be self referential: 'add i32 %0, %0 ; <int>:0' is bad
24// * PHI nodes must have an entry for each predecessor, with no extras.
25// * PHI nodes must be the first thing in a basic block, all grouped together
26// * All basic blocks should only end with terminator insts, not contain them
27// * The entry node to a function must not have predecessors
28// * All Instructions must be embedded into a basic block
29// * Functions cannot take a void-typed parameter
30// * Verify that a function's argument list agrees with it's declared type.
31// * It is illegal to specify a name for a void value.
32// * It is illegal to have a internal global value with no initializer
33// * It is illegal to have a ret instruction that returns a value that does not
34// agree with the function return value type.
35// * Function call argument types match the function prototype
36// * A landing pad is defined by a landingpad instruction, and can be jumped to
37// only by the unwind edge of an invoke instruction.
38// * A landingpad instruction must be the first non-PHI instruction in the
39// block.
40// * Landingpad instructions must be in a function with a personality function.
41// * Convergence control intrinsics are introduced in ConvergentOperations.rst.
42// The applied restrictions are too numerous to list here.
43// * The convergence entry intrinsic and the loop heart must be the first
44// non-PHI instruction in their respective block. This does not conflict with
45// the landing pads, since these two kinds cannot occur in the same block.
46// * All other things that are tested by asserts spread about the code...
47//
48//===----------------------------------------------------------------------===//
49
50#include "llvm/IR/Verifier.h"
51#include "llvm/ADT/APFloat.h"
52#include "llvm/ADT/APInt.h"
53#include "llvm/ADT/ArrayRef.h"
54#include "llvm/ADT/DenseMap.h"
55#include "llvm/ADT/MapVector.h"
56#include "llvm/ADT/STLExtras.h"
60#include "llvm/ADT/StringRef.h"
61#include "llvm/ADT/Twine.h"
63#include "llvm/IR/Argument.h"
65#include "llvm/IR/Attributes.h"
66#include "llvm/IR/BasicBlock.h"
67#include "llvm/IR/CFG.h"
68#include "llvm/IR/CallingConv.h"
69#include "llvm/IR/Comdat.h"
70#include "llvm/IR/Constant.h"
73#include "llvm/IR/Constants.h"
75#include "llvm/IR/DataLayout.h"
76#include "llvm/IR/DebugInfo.h"
78#include "llvm/IR/DebugLoc.h"
80#include "llvm/IR/Dominators.h"
82#include "llvm/IR/Function.h"
83#include "llvm/IR/GCStrategy.h"
84#include "llvm/IR/GlobalAlias.h"
85#include "llvm/IR/GlobalValue.h"
87#include "llvm/IR/InlineAsm.h"
88#include "llvm/IR/InstVisitor.h"
89#include "llvm/IR/InstrTypes.h"
90#include "llvm/IR/Instruction.h"
93#include "llvm/IR/Intrinsics.h"
94#include "llvm/IR/IntrinsicsAArch64.h"
95#include "llvm/IR/IntrinsicsAMDGPU.h"
96#include "llvm/IR/IntrinsicsARM.h"
97#include "llvm/IR/IntrinsicsNVPTX.h"
98#include "llvm/IR/IntrinsicsWebAssembly.h"
99#include "llvm/IR/LLVMContext.h"
101#include "llvm/IR/Metadata.h"
102#include "llvm/IR/Module.h"
104#include "llvm/IR/PassManager.h"
106#include "llvm/IR/Statepoint.h"
107#include "llvm/IR/Type.h"
108#include "llvm/IR/Use.h"
109#include "llvm/IR/User.h"
111#include "llvm/IR/Value.h"
113#include "llvm/Pass.h"
117#include "llvm/Support/Casting.h"
121#include "llvm/Support/ModRef.h"
124#include <algorithm>
125#include <cassert>
126#include <cstdint>
127#include <memory>
128#include <optional>
129#include <string>
130#include <utility>
131
132using namespace llvm;
133
135 "verify-noalias-scope-decl-dom", cl::Hidden, cl::init(false),
136 cl::desc("Ensure that llvm.experimental.noalias.scope.decl for identical "
137 "scopes are not dominating"));
138
141 const Module &M;
143 const Triple &TT;
146
147 /// Track the brokenness of the module while recursively visiting.
148 bool Broken = false;
149 /// Broken debug info can be "recovered" from by stripping the debug info.
150 bool BrokenDebugInfo = false;
151 /// Whether to treat broken debug info as an error.
153
155 : OS(OS), M(M), MST(&M), TT(M.getTargetTriple()), DL(M.getDataLayout()),
156 Context(M.getContext()) {}
157
158private:
159 void Write(const Module *M) {
160 *OS << "; ModuleID = '" << M->getModuleIdentifier() << "'\n";
161 }
162
163 void Write(const Value *V) {
164 if (V)
165 Write(*V);
166 }
167
168 void Write(const Value &V) {
169 if (isa<Instruction>(V)) {
170 V.print(*OS, MST);
171 *OS << '\n';
172 } else {
173 V.printAsOperand(*OS, true, MST);
174 *OS << '\n';
175 }
176 }
177
178 void Write(const DbgRecord *DR) {
179 if (DR) {
180 DR->print(*OS, MST, false);
181 *OS << '\n';
182 }
183 }
184
186 switch (Type) {
188 *OS << "value";
189 break;
191 *OS << "declare";
192 break;
194 *OS << "declare_value";
195 break;
197 *OS << "assign";
198 break;
200 *OS << "end";
201 break;
203 *OS << "any";
204 break;
205 };
206 }
207
208 void Write(const Metadata *MD) {
209 if (!MD)
210 return;
211 MD->print(*OS, MST, &M);
212 *OS << '\n';
213 }
214
215 template <class T> void Write(const MDTupleTypedArrayWrapper<T> &MD) {
216 Write(MD.get());
217 }
218
219 void Write(const NamedMDNode *NMD) {
220 if (!NMD)
221 return;
222 NMD->print(*OS, MST);
223 *OS << '\n';
224 }
225
226 void Write(Type *T) {
227 if (!T)
228 return;
229 *OS << ' ' << *T;
230 }
231
232 void Write(const Comdat *C) {
233 if (!C)
234 return;
235 *OS << *C;
236 }
237
238 void Write(const APInt *AI) {
239 if (!AI)
240 return;
241 *OS << *AI << '\n';
242 }
243
244 void Write(const unsigned i) { *OS << i << '\n'; }
245
246 // NOLINTNEXTLINE(readability-identifier-naming)
247 void Write(const Attribute *A) {
248 if (!A)
249 return;
250 *OS << A->getAsString() << '\n';
251 }
252
253 // NOLINTNEXTLINE(readability-identifier-naming)
254 void Write(const AttributeSet *AS) {
255 if (!AS)
256 return;
257 *OS << AS->getAsString() << '\n';
258 }
259
260 // NOLINTNEXTLINE(readability-identifier-naming)
261 void Write(const AttributeList *AL) {
262 if (!AL)
263 return;
264 AL->print(*OS);
265 }
266
267 void Write(Printable P) { *OS << P << '\n'; }
268
269 template <typename T> void Write(ArrayRef<T> Vs) {
270 for (const T &V : Vs)
271 Write(V);
272 }
273
274 template <typename T1, typename... Ts>
275 void WriteTs(const T1 &V1, const Ts &... Vs) {
276 Write(V1);
277 WriteTs(Vs...);
278 }
279
280 template <typename... Ts> void WriteTs() {}
281
282public:
283 /// A check failed, so printout out the condition and the message.
284 ///
285 /// This provides a nice place to put a breakpoint if you want to see why
286 /// something is not correct.
287 void CheckFailed(const Twine &Message) {
288 if (OS)
289 *OS << Message << '\n';
290 Broken = true;
291 }
292
293 /// A check failed (with values to print).
294 ///
295 /// This calls the Message-only version so that the above is easier to set a
296 /// breakpoint on.
297 template <typename T1, typename... Ts>
298 void CheckFailed(const Twine &Message, const T1 &V1, const Ts &... Vs) {
299 CheckFailed(Message);
300 if (OS)
301 WriteTs(V1, Vs...);
302 }
303
304 /// A debug info check failed.
305 void DebugInfoCheckFailed(const Twine &Message) {
306 if (OS)
307 *OS << Message << '\n';
309 BrokenDebugInfo = true;
310 }
311
312 /// A debug info check failed (with values to print).
313 template <typename T1, typename... Ts>
314 void DebugInfoCheckFailed(const Twine &Message, const T1 &V1,
315 const Ts &... Vs) {
316 DebugInfoCheckFailed(Message);
317 if (OS)
318 WriteTs(V1, Vs...);
319 }
320};
321
322namespace {
323
324class Verifier : public InstVisitor<Verifier>, VerifierSupport {
325 friend class InstVisitor<Verifier>;
326 DominatorTree DT;
327
328 /// When verifying a basic block, keep track of all of the
329 /// instructions we have seen so far.
330 ///
331 /// This allows us to do efficient dominance checks for the case when an
332 /// instruction has an operand that is an instruction in the same block.
333 SmallPtrSet<Instruction *, 16> InstsInThisBlock;
334
335 /// Keep track of the metadata nodes that have been checked already.
337
338 /// Keep track which DISubprogram is attached to which function.
340
341 /// Track all DICompileUnits visited.
343
344 /// The result type for a landingpad.
345 Type *LandingPadResultTy;
346
347 /// Whether we've seen a call to @llvm.localescape in this function
348 /// already.
349 bool SawFrameEscape;
350
351 /// Whether the current function has a DISubprogram attached to it.
352 bool HasDebugInfo = false;
353
354 /// Stores the count of how many objects were passed to llvm.localescape for a
355 /// given function and the largest index passed to llvm.localrecover.
357
358 // Maps catchswitches and cleanuppads that unwind to siblings to the
359 // terminators that indicate the unwind, used to detect cycles therein.
361
362 /// Cache which blocks are in which funclet, if an EH funclet personality is
363 /// in use. Otherwise empty.
364 DenseMap<BasicBlock *, ColorVector> BlockEHFuncletColors;
365
366 /// Cache of constants visited in search of ConstantExprs.
367 SmallPtrSet<const Constant *, 32> ConstantExprVisited;
368
369 /// Cache of declarations of the llvm.experimental.deoptimize.<ty> intrinsic.
370 SmallVector<const Function *, 4> DeoptimizeDeclarations;
371
372 /// Cache of attribute lists verified.
373 SmallPtrSet<const void *, 32> AttributeListsVisited;
374
375 // Verify that this GlobalValue is only used in this module.
376 // This map is used to avoid visiting uses twice. We can arrive at a user
377 // twice, if they have multiple operands. In particular for very large
378 // constant expressions, we can arrive at a particular user many times.
379 SmallPtrSet<const Value *, 32> GlobalValueVisited;
380
381 // Keeps track of duplicate function argument debug info.
383
384 TBAAVerifier TBAAVerifyHelper;
385 ConvergenceVerifier ConvergenceVerifyHelper;
386
387 SmallVector<IntrinsicInst *, 4> NoAliasScopeDecls;
388
389 void checkAtomicMemAccessSize(Type *Ty, const Instruction *I);
390
391public:
392 explicit Verifier(raw_ostream *OS, bool ShouldTreatBrokenDebugInfoAsError,
393 const Module &M)
394 : VerifierSupport(OS, M), LandingPadResultTy(nullptr),
395 SawFrameEscape(false), TBAAVerifyHelper(this) {
396 TreatBrokenDebugInfoAsError = ShouldTreatBrokenDebugInfoAsError;
397 }
398
399 bool hasBrokenDebugInfo() const { return BrokenDebugInfo; }
400
401 bool verify(const Function &F) {
402 llvm::TimeTraceScope timeScope("Verifier");
403 assert(F.getParent() == &M &&
404 "An instance of this class only works with a specific module!");
405
406 // First ensure the function is well-enough formed to compute dominance
407 // information, and directly compute a dominance tree. We don't rely on the
408 // pass manager to provide this as it isolates us from a potentially
409 // out-of-date dominator tree and makes it significantly more complex to run
410 // this code outside of a pass manager.
411 // FIXME: It's really gross that we have to cast away constness here.
412 if (!F.empty())
413 DT.recalculate(const_cast<Function &>(F));
414
415 for (const BasicBlock &BB : F) {
416 if (!BB.empty() && BB.back().isTerminator())
417 continue;
418
419 if (OS) {
420 *OS << "Basic Block in function '" << F.getName()
421 << "' does not have terminator!\n";
422 BB.printAsOperand(*OS, true, MST);
423 *OS << "\n";
424 }
425 return false;
426 }
427
428 auto FailureCB = [this](const Twine &Message) {
429 this->CheckFailed(Message);
430 };
431 ConvergenceVerifyHelper.initialize(OS, FailureCB, F);
432
433 Broken = false;
434 // FIXME: We strip const here because the inst visitor strips const.
435 visit(const_cast<Function &>(F));
436 verifySiblingFuncletUnwinds();
437
438 if (ConvergenceVerifyHelper.sawTokens())
439 ConvergenceVerifyHelper.verify(DT);
440
441 InstsInThisBlock.clear();
442 DebugFnArgs.clear();
443 LandingPadResultTy = nullptr;
444 SawFrameEscape = false;
445 SiblingFuncletInfo.clear();
446 verifyNoAliasScopeDecl();
447 NoAliasScopeDecls.clear();
448
449 return !Broken;
450 }
451
452 /// Verify the module that this instance of \c Verifier was initialized with.
453 bool verify() {
454 Broken = false;
455
456 // Collect all declarations of the llvm.experimental.deoptimize intrinsic.
457 for (const Function &F : M)
458 if (F.getIntrinsicID() == Intrinsic::experimental_deoptimize)
459 DeoptimizeDeclarations.push_back(&F);
460
461 // Now that we've visited every function, verify that we never asked to
462 // recover a frame index that wasn't escaped.
463 verifyFrameRecoverIndices();
464 for (const GlobalVariable &GV : M.globals())
465 visitGlobalVariable(GV);
466
467 for (const GlobalAlias &GA : M.aliases())
468 visitGlobalAlias(GA);
469
470 for (const GlobalIFunc &GI : M.ifuncs())
471 visitGlobalIFunc(GI);
472
473 for (const NamedMDNode &NMD : M.named_metadata())
474 visitNamedMDNode(NMD);
475
476 for (const StringMapEntry<Comdat> &SMEC : M.getComdatSymbolTable())
477 visitComdat(SMEC.getValue());
478
479 visitModuleFlags();
480 visitModuleIdents();
481 visitModuleCommandLines();
482 visitModuleErrnoTBAA();
483
484 verifyCompileUnits();
485
486 verifyDeoptimizeCallingConvs();
487 DISubprogramAttachments.clear();
488 return !Broken;
489 }
490
491private:
492 /// Whether a metadata node is allowed to be, or contain, a DILocation.
493 enum class AreDebugLocsAllowed { No, Yes };
494
495 /// Metadata that should be treated as a range, with slightly different
496 /// requirements.
497 enum class RangeLikeMetadataKind {
498 Range, // MD_range
499 AbsoluteSymbol, // MD_absolute_symbol
500 NoaliasAddrspace // MD_noalias_addrspace
501 };
502
503 // Verification methods...
504 void visitGlobalValue(const GlobalValue &GV);
505 void visitGlobalVariable(const GlobalVariable &GV);
506 void visitGlobalAlias(const GlobalAlias &GA);
507 void visitGlobalIFunc(const GlobalIFunc &GI);
508 void visitAliaseeSubExpr(const GlobalAlias &A, const Constant &C);
509 void visitAliaseeSubExpr(SmallPtrSetImpl<const GlobalAlias *> &Visited,
510 const GlobalAlias &A, const Constant &C);
511 void visitNamedMDNode(const NamedMDNode &NMD);
512 void visitMDNode(const MDNode &MD, AreDebugLocsAllowed AllowLocs);
513 void visitMetadataAsValue(const MetadataAsValue &MD, Function *F);
514 void visitValueAsMetadata(const ValueAsMetadata &MD, Function *F);
515 void visitDIArgList(const DIArgList &AL, Function *F);
516 void visitComdat(const Comdat &C);
517 void visitModuleIdents();
518 void visitModuleCommandLines();
519 void visitModuleErrnoTBAA();
520 void visitModuleFlags();
521 void visitModuleFlag(const MDNode *Op,
522 DenseMap<const MDString *, const MDNode *> &SeenIDs,
523 SmallVectorImpl<const MDNode *> &Requirements);
524 void visitModuleFlagCGProfileEntry(const MDOperand &MDO);
525 void visitFunction(const Function &F);
526 void visitBasicBlock(BasicBlock &BB);
527 void verifyRangeLikeMetadata(const Value &V, const MDNode *Range, Type *Ty,
528 RangeLikeMetadataKind Kind);
529 void visitRangeMetadata(Instruction &I, MDNode *Range, Type *Ty);
530 void visitNoaliasAddrspaceMetadata(Instruction &I, MDNode *Range, Type *Ty);
531 void visitDereferenceableMetadata(Instruction &I, MDNode *MD);
532 void visitNofreeMetadata(Instruction &I, MDNode *MD);
533 void visitProfMetadata(Instruction &I, MDNode *MD);
534 void visitCallStackMetadata(MDNode *MD);
535 void visitMemProfMetadata(Instruction &I, MDNode *MD);
536 void visitCallsiteMetadata(Instruction &I, MDNode *MD);
537 void visitCalleeTypeMetadata(Instruction &I, MDNode *MD);
538 void visitDIAssignIDMetadata(Instruction &I, MDNode *MD);
539 void visitMMRAMetadata(Instruction &I, MDNode *MD);
540 void visitAnnotationMetadata(MDNode *Annotation);
541 void visitAliasScopeMetadata(const MDNode *MD);
542 void visitAliasScopeListMetadata(const MDNode *MD);
543 void visitAccessGroupMetadata(const MDNode *MD);
544 void visitCapturesMetadata(Instruction &I, const MDNode *Captures);
545 void visitAllocTokenMetadata(Instruction &I, MDNode *MD);
546
547 template <class Ty> bool isValidMetadataArray(const MDTuple &N);
548#define HANDLE_SPECIALIZED_MDNODE_LEAF(CLASS) void visit##CLASS(const CLASS &N);
549#include "llvm/IR/Metadata.def"
550 void visitDIScope(const DIScope &N);
551 void visitDIVariable(const DIVariable &N);
552 void visitDILexicalBlockBase(const DILexicalBlockBase &N);
553 void visitDITemplateParameter(const DITemplateParameter &N);
554
555 void visitTemplateParams(const MDNode &N, const Metadata &RawParams);
556
557 void visit(DbgLabelRecord &DLR);
558 void visit(DbgVariableRecord &DVR);
559 // InstVisitor overrides...
560 using InstVisitor<Verifier>::visit;
561 void visitDbgRecords(Instruction &I);
562 void visit(Instruction &I);
563
564 void visitTruncInst(TruncInst &I);
565 void visitZExtInst(ZExtInst &I);
566 void visitSExtInst(SExtInst &I);
567 void visitFPTruncInst(FPTruncInst &I);
568 void visitFPExtInst(FPExtInst &I);
569 void visitFPToUIInst(FPToUIInst &I);
570 void visitFPToSIInst(FPToSIInst &I);
571 void visitUIToFPInst(UIToFPInst &I);
572 void visitSIToFPInst(SIToFPInst &I);
573 void visitIntToPtrInst(IntToPtrInst &I);
574 void checkPtrToAddr(Type *SrcTy, Type *DestTy, const Value &V);
575 void visitPtrToAddrInst(PtrToAddrInst &I);
576 void visitPtrToIntInst(PtrToIntInst &I);
577 void visitBitCastInst(BitCastInst &I);
578 void visitAddrSpaceCastInst(AddrSpaceCastInst &I);
579 void visitPHINode(PHINode &PN);
580 void visitCallBase(CallBase &Call);
581 void visitUnaryOperator(UnaryOperator &U);
582 void visitBinaryOperator(BinaryOperator &B);
583 void visitICmpInst(ICmpInst &IC);
584 void visitFCmpInst(FCmpInst &FC);
585 void visitExtractElementInst(ExtractElementInst &EI);
586 void visitInsertElementInst(InsertElementInst &EI);
587 void visitShuffleVectorInst(ShuffleVectorInst &EI);
588 void visitVAArgInst(VAArgInst &VAA) { visitInstruction(VAA); }
589 void visitCallInst(CallInst &CI);
590 void visitInvokeInst(InvokeInst &II);
591 void visitGetElementPtrInst(GetElementPtrInst &GEP);
592 void visitLoadInst(LoadInst &LI);
593 void visitStoreInst(StoreInst &SI);
594 void verifyDominatesUse(Instruction &I, unsigned i);
595 void visitInstruction(Instruction &I);
596 void visitTerminator(Instruction &I);
597 void visitBranchInst(BranchInst &BI);
598 void visitReturnInst(ReturnInst &RI);
599 void visitSwitchInst(SwitchInst &SI);
600 void visitIndirectBrInst(IndirectBrInst &BI);
601 void visitCallBrInst(CallBrInst &CBI);
602 void visitSelectInst(SelectInst &SI);
603 void visitUserOp1(Instruction &I);
604 void visitUserOp2(Instruction &I) { visitUserOp1(I); }
605 void visitIntrinsicCall(Intrinsic::ID ID, CallBase &Call);
606 void visitConstrainedFPIntrinsic(ConstrainedFPIntrinsic &FPI);
607 void visitVPIntrinsic(VPIntrinsic &VPI);
608 void visitDbgLabelIntrinsic(StringRef Kind, DbgLabelInst &DLI);
609 void visitAtomicCmpXchgInst(AtomicCmpXchgInst &CXI);
610 void visitAtomicRMWInst(AtomicRMWInst &RMWI);
611 void visitFenceInst(FenceInst &FI);
612 void visitAllocaInst(AllocaInst &AI);
613 void visitExtractValueInst(ExtractValueInst &EVI);
614 void visitInsertValueInst(InsertValueInst &IVI);
615 void visitEHPadPredecessors(Instruction &I);
616 void visitLandingPadInst(LandingPadInst &LPI);
617 void visitResumeInst(ResumeInst &RI);
618 void visitCatchPadInst(CatchPadInst &CPI);
619 void visitCatchReturnInst(CatchReturnInst &CatchReturn);
620 void visitCleanupPadInst(CleanupPadInst &CPI);
621 void visitFuncletPadInst(FuncletPadInst &FPI);
622 void visitCatchSwitchInst(CatchSwitchInst &CatchSwitch);
623 void visitCleanupReturnInst(CleanupReturnInst &CRI);
624
625 void verifySwiftErrorCall(CallBase &Call, const Value *SwiftErrorVal);
626 void verifySwiftErrorValue(const Value *SwiftErrorVal);
627 void verifyTailCCMustTailAttrs(const AttrBuilder &Attrs, StringRef Context);
628 void verifyMustTailCall(CallInst &CI);
629 bool verifyAttributeCount(AttributeList Attrs, unsigned Params);
630 void verifyAttributeTypes(AttributeSet Attrs, const Value *V);
631 void verifyParameterAttrs(AttributeSet Attrs, Type *Ty, const Value *V);
632 void checkUnsignedBaseTenFuncAttr(AttributeList Attrs, StringRef Attr,
633 const Value *V);
634 void verifyFunctionAttrs(FunctionType *FT, AttributeList Attrs,
635 const Value *V, bool IsIntrinsic, bool IsInlineAsm);
636 void verifyFunctionMetadata(ArrayRef<std::pair<unsigned, MDNode *>> MDs);
637 void verifyUnknownProfileMetadata(MDNode *MD);
638 void visitConstantExprsRecursively(const Constant *EntryC);
639 void visitConstantExpr(const ConstantExpr *CE);
640 void visitConstantPtrAuth(const ConstantPtrAuth *CPA);
641 void verifyInlineAsmCall(const CallBase &Call);
642 void verifyStatepoint(const CallBase &Call);
643 void verifyFrameRecoverIndices();
644 void verifySiblingFuncletUnwinds();
645
646 void verifyFragmentExpression(const DbgVariableRecord &I);
647 template <typename ValueOrMetadata>
648 void verifyFragmentExpression(const DIVariable &V,
650 ValueOrMetadata *Desc);
651 void verifyFnArgs(const DbgVariableRecord &DVR);
652 void verifyNotEntryValue(const DbgVariableRecord &I);
653
654 /// Module-level debug info verification...
655 void verifyCompileUnits();
656
657 /// Module-level verification that all @llvm.experimental.deoptimize
658 /// declarations share the same calling convention.
659 void verifyDeoptimizeCallingConvs();
660
661 void verifyAttachedCallBundle(const CallBase &Call,
662 const OperandBundleUse &BU);
663
664 /// Verify the llvm.experimental.noalias.scope.decl declarations
665 void verifyNoAliasScopeDecl();
666};
667
668} // end anonymous namespace
669
670/// We know that cond should be true, if not print an error message.
671#define Check(C, ...) \
672 do { \
673 if (!(C)) { \
674 CheckFailed(__VA_ARGS__); \
675 return; \
676 } \
677 } while (false)
678
679/// We know that a debug info condition should be true, if not print
680/// an error message.
681#define CheckDI(C, ...) \
682 do { \
683 if (!(C)) { \
684 DebugInfoCheckFailed(__VA_ARGS__); \
685 return; \
686 } \
687 } while (false)
688
689void Verifier::visitDbgRecords(Instruction &I) {
690 if (!I.DebugMarker)
691 return;
692 CheckDI(I.DebugMarker->MarkedInstr == &I,
693 "Instruction has invalid DebugMarker", &I);
694 CheckDI(!isa<PHINode>(&I) || !I.hasDbgRecords(),
695 "PHI Node must not have any attached DbgRecords", &I);
696 for (DbgRecord &DR : I.getDbgRecordRange()) {
697 CheckDI(DR.getMarker() == I.DebugMarker,
698 "DbgRecord had invalid DebugMarker", &I, &DR);
699 if (auto *Loc =
701 visitMDNode(*Loc, AreDebugLocsAllowed::Yes);
702 if (auto *DVR = dyn_cast<DbgVariableRecord>(&DR)) {
703 visit(*DVR);
704 // These have to appear after `visit` for consistency with existing
705 // intrinsic behaviour.
706 verifyFragmentExpression(*DVR);
707 verifyNotEntryValue(*DVR);
708 } else if (auto *DLR = dyn_cast<DbgLabelRecord>(&DR)) {
709 visit(*DLR);
710 }
711 }
712}
713
714void Verifier::visit(Instruction &I) {
715 visitDbgRecords(I);
716 for (unsigned i = 0, e = I.getNumOperands(); i != e; ++i)
717 Check(I.getOperand(i) != nullptr, "Operand is null", &I);
719}
720
721// Helper to iterate over indirect users. By returning false, the callback can ask to stop traversing further.
722static void forEachUser(const Value *User,
724 llvm::function_ref<bool(const Value *)> Callback) {
725 if (!Visited.insert(User).second)
726 return;
727
729 while (!WorkList.empty()) {
730 const Value *Cur = WorkList.pop_back_val();
731 if (!Visited.insert(Cur).second)
732 continue;
733 if (Callback(Cur))
734 append_range(WorkList, Cur->materialized_users());
735 }
736}
737
738void Verifier::visitGlobalValue(const GlobalValue &GV) {
740 "Global is external, but doesn't have external or weak linkage!", &GV);
741
742 if (const GlobalObject *GO = dyn_cast<GlobalObject>(&GV)) {
743 if (const MDNode *Associated =
744 GO->getMetadata(LLVMContext::MD_associated)) {
745 Check(Associated->getNumOperands() == 1,
746 "associated metadata must have one operand", &GV, Associated);
747 const Metadata *Op = Associated->getOperand(0).get();
748 Check(Op, "associated metadata must have a global value", GO, Associated);
749
750 const auto *VM = dyn_cast_or_null<ValueAsMetadata>(Op);
751 Check(VM, "associated metadata must be ValueAsMetadata", GO, Associated);
752 if (VM) {
753 Check(isa<PointerType>(VM->getValue()->getType()),
754 "associated value must be pointer typed", GV, Associated);
755
756 const Value *Stripped = VM->getValue()->stripPointerCastsAndAliases();
757 Check(isa<GlobalObject>(Stripped) || isa<Constant>(Stripped),
758 "associated metadata must point to a GlobalObject", GO, Stripped);
759 Check(Stripped != GO,
760 "global values should not associate to themselves", GO,
761 Associated);
762 }
763 }
764
765 // FIXME: Why is getMetadata on GlobalValue protected?
766 if (const MDNode *AbsoluteSymbol =
767 GO->getMetadata(LLVMContext::MD_absolute_symbol)) {
768 verifyRangeLikeMetadata(*GO, AbsoluteSymbol,
769 DL.getIntPtrType(GO->getType()),
770 RangeLikeMetadataKind::AbsoluteSymbol);
771 }
772 }
773
775 "Only global variables can have appending linkage!", &GV);
776
777 if (GV.hasAppendingLinkage()) {
778 const GlobalVariable *GVar = dyn_cast<GlobalVariable>(&GV);
779 Check(GVar && GVar->getValueType()->isArrayTy(),
780 "Only global arrays can have appending linkage!", GVar);
781 }
782
783 if (GV.isDeclarationForLinker())
784 Check(!GV.hasComdat(), "Declaration may not be in a Comdat!", &GV);
785
786 if (GV.hasDLLExportStorageClass()) {
788 "dllexport GlobalValue must have default or protected visibility",
789 &GV);
790 }
791 if (GV.hasDLLImportStorageClass()) {
793 "dllimport GlobalValue must have default visibility", &GV);
794 Check(!GV.isDSOLocal(), "GlobalValue with DLLImport Storage is dso_local!",
795 &GV);
796
797 Check((GV.isDeclaration() &&
800 "Global is marked as dllimport, but not external", &GV);
801 }
802
803 if (GV.isImplicitDSOLocal())
804 Check(GV.isDSOLocal(),
805 "GlobalValue with local linkage or non-default "
806 "visibility must be dso_local!",
807 &GV);
808
809 forEachUser(&GV, GlobalValueVisited, [&](const Value *V) -> bool {
810 if (const Instruction *I = dyn_cast<Instruction>(V)) {
811 if (!I->getParent() || !I->getParent()->getParent())
812 CheckFailed("Global is referenced by parentless instruction!", &GV, &M,
813 I);
814 else if (I->getParent()->getParent()->getParent() != &M)
815 CheckFailed("Global is referenced in a different module!", &GV, &M, I,
816 I->getParent()->getParent(),
817 I->getParent()->getParent()->getParent());
818 return false;
819 } else if (const Function *F = dyn_cast<Function>(V)) {
820 if (F->getParent() != &M)
821 CheckFailed("Global is used by function in a different module", &GV, &M,
822 F, F->getParent());
823 return false;
824 }
825 return true;
826 });
827}
828
829void Verifier::visitGlobalVariable(const GlobalVariable &GV) {
830 Type *GVType = GV.getValueType();
831
832 if (MaybeAlign A = GV.getAlign()) {
833 Check(A->value() <= Value::MaximumAlignment,
834 "huge alignment values are unsupported", &GV);
835 }
836
837 if (GV.hasInitializer()) {
838 Check(GV.getInitializer()->getType() == GVType,
839 "Global variable initializer type does not match global "
840 "variable type!",
841 &GV);
843 "Global variable initializer must be sized", &GV);
844 visitConstantExprsRecursively(GV.getInitializer());
845 // If the global has common linkage, it must have a zero initializer and
846 // cannot be constant.
847 if (GV.hasCommonLinkage()) {
849 "'common' global must have a zero initializer!", &GV);
850 Check(!GV.isConstant(), "'common' global may not be marked constant!",
851 &GV);
852 Check(!GV.hasComdat(), "'common' global may not be in a Comdat!", &GV);
853 }
854 }
855
856 if (GV.hasName() && (GV.getName() == "llvm.global_ctors" ||
857 GV.getName() == "llvm.global_dtors")) {
859 "invalid linkage for intrinsic global variable", &GV);
861 "invalid uses of intrinsic global variable", &GV);
862
863 // Don't worry about emitting an error for it not being an array,
864 // visitGlobalValue will complain on appending non-array.
865 if (ArrayType *ATy = dyn_cast<ArrayType>(GVType)) {
866 StructType *STy = dyn_cast<StructType>(ATy->getElementType());
867 PointerType *FuncPtrTy =
868 PointerType::get(Context, DL.getProgramAddressSpace());
869 Check(STy && (STy->getNumElements() == 2 || STy->getNumElements() == 3) &&
870 STy->getTypeAtIndex(0u)->isIntegerTy(32) &&
871 STy->getTypeAtIndex(1) == FuncPtrTy,
872 "wrong type for intrinsic global variable", &GV);
873 Check(STy->getNumElements() == 3,
874 "the third field of the element type is mandatory, "
875 "specify ptr null to migrate from the obsoleted 2-field form");
876 Type *ETy = STy->getTypeAtIndex(2);
877 Check(ETy->isPointerTy(), "wrong type for intrinsic global variable",
878 &GV);
879 }
880 }
881
882 if (GV.hasName() && (GV.getName() == "llvm.used" ||
883 GV.getName() == "llvm.compiler.used")) {
885 "invalid linkage for intrinsic global variable", &GV);
887 "invalid uses of intrinsic global variable", &GV);
888
889 if (ArrayType *ATy = dyn_cast<ArrayType>(GVType)) {
890 PointerType *PTy = dyn_cast<PointerType>(ATy->getElementType());
891 Check(PTy, "wrong type for intrinsic global variable", &GV);
892 if (GV.hasInitializer()) {
893 const Constant *Init = GV.getInitializer();
894 const ConstantArray *InitArray = dyn_cast<ConstantArray>(Init);
895 Check(InitArray, "wrong initializer for intrinsic global variable",
896 Init);
897 for (Value *Op : InitArray->operands()) {
898 Value *V = Op->stripPointerCasts();
901 Twine("invalid ") + GV.getName() + " member", V);
902 Check(V->hasName(),
903 Twine("members of ") + GV.getName() + " must be named", V);
904 }
905 }
906 }
907 }
908
909 // Visit any debug info attachments.
911 GV.getMetadata(LLVMContext::MD_dbg, MDs);
912 for (auto *MD : MDs) {
913 if (auto *GVE = dyn_cast<DIGlobalVariableExpression>(MD))
914 visitDIGlobalVariableExpression(*GVE);
915 else
916 CheckDI(false, "!dbg attachment of global variable must be a "
917 "DIGlobalVariableExpression");
918 }
919
920 // Scalable vectors cannot be global variables, since we don't know
921 // the runtime size.
922 Check(!GVType->isScalableTy(), "Globals cannot contain scalable types", &GV);
923
924 // Check if it is or contains a target extension type that disallows being
925 // used as a global.
927 "Global @" + GV.getName() + " has illegal target extension type",
928 GVType);
929
930 if (!GV.hasInitializer()) {
931 visitGlobalValue(GV);
932 return;
933 }
934
935 // Walk any aggregate initializers looking for bitcasts between address spaces
936 visitConstantExprsRecursively(GV.getInitializer());
937
938 visitGlobalValue(GV);
939}
940
941void Verifier::visitAliaseeSubExpr(const GlobalAlias &GA, const Constant &C) {
942 SmallPtrSet<const GlobalAlias*, 4> Visited;
943 Visited.insert(&GA);
944 visitAliaseeSubExpr(Visited, GA, C);
945}
946
947void Verifier::visitAliaseeSubExpr(SmallPtrSetImpl<const GlobalAlias*> &Visited,
948 const GlobalAlias &GA, const Constant &C) {
951 cast<GlobalValue>(C).hasAvailableExternallyLinkage(),
952 "available_externally alias must point to available_externally "
953 "global value",
954 &GA);
955 }
956 if (const auto *GV = dyn_cast<GlobalValue>(&C)) {
958 Check(!GV->isDeclarationForLinker(), "Alias must point to a definition",
959 &GA);
960 }
961
962 if (const auto *GA2 = dyn_cast<GlobalAlias>(GV)) {
963 Check(Visited.insert(GA2).second, "Aliases cannot form a cycle", &GA);
964
965 Check(!GA2->isInterposable(),
966 "Alias cannot point to an interposable alias", &GA);
967 } else {
968 // Only continue verifying subexpressions of GlobalAliases.
969 // Do not recurse into global initializers.
970 return;
971 }
972 }
973
974 if (const auto *CE = dyn_cast<ConstantExpr>(&C))
975 visitConstantExprsRecursively(CE);
976
977 for (const Use &U : C.operands()) {
978 Value *V = &*U;
979 if (const auto *GA2 = dyn_cast<GlobalAlias>(V))
980 visitAliaseeSubExpr(Visited, GA, *GA2->getAliasee());
981 else if (const auto *C2 = dyn_cast<Constant>(V))
982 visitAliaseeSubExpr(Visited, GA, *C2);
983 }
984}
985
986void Verifier::visitGlobalAlias(const GlobalAlias &GA) {
988 "Alias should have private, internal, linkonce, weak, linkonce_odr, "
989 "weak_odr, external, or available_externally linkage!",
990 &GA);
991 const Constant *Aliasee = GA.getAliasee();
992 Check(Aliasee, "Aliasee cannot be NULL!", &GA);
993 Check(GA.getType() == Aliasee->getType(),
994 "Alias and aliasee types should match!", &GA);
995
996 Check(isa<GlobalValue>(Aliasee) || isa<ConstantExpr>(Aliasee),
997 "Aliasee should be either GlobalValue or ConstantExpr", &GA);
998
999 visitAliaseeSubExpr(GA, *Aliasee);
1000
1001 visitGlobalValue(GA);
1002}
1003
1004void Verifier::visitGlobalIFunc(const GlobalIFunc &GI) {
1005 visitGlobalValue(GI);
1006
1008 GI.getAllMetadata(MDs);
1009 for (const auto &I : MDs) {
1010 CheckDI(I.first != LLVMContext::MD_dbg,
1011 "an ifunc may not have a !dbg attachment", &GI);
1012 Check(I.first != LLVMContext::MD_prof,
1013 "an ifunc may not have a !prof attachment", &GI);
1014 visitMDNode(*I.second, AreDebugLocsAllowed::No);
1015 }
1016
1018 "IFunc should have private, internal, linkonce, weak, linkonce_odr, "
1019 "weak_odr, or external linkage!",
1020 &GI);
1021 // Pierce through ConstantExprs and GlobalAliases and check that the resolver
1022 // is a Function definition.
1023 const Function *Resolver = GI.getResolverFunction();
1024 Check(Resolver, "IFunc must have a Function resolver", &GI);
1025 Check(!Resolver->isDeclarationForLinker(),
1026 "IFunc resolver must be a definition", &GI);
1027
1028 // Check that the immediate resolver operand (prior to any bitcasts) has the
1029 // correct type.
1030 const Type *ResolverTy = GI.getResolver()->getType();
1031
1033 "IFunc resolver must return a pointer", &GI);
1034
1035 Check(ResolverTy == PointerType::get(Context, GI.getAddressSpace()),
1036 "IFunc resolver has incorrect type", &GI);
1037}
1038
1039void Verifier::visitNamedMDNode(const NamedMDNode &NMD) {
1040 // There used to be various other llvm.dbg.* nodes, but we don't support
1041 // upgrading them and we want to reserve the namespace for future uses.
1042 if (NMD.getName().starts_with("llvm.dbg."))
1043 CheckDI(NMD.getName() == "llvm.dbg.cu",
1044 "unrecognized named metadata node in the llvm.dbg namespace", &NMD);
1045 for (const MDNode *MD : NMD.operands()) {
1046 if (NMD.getName() == "llvm.dbg.cu")
1047 CheckDI(MD && isa<DICompileUnit>(MD), "invalid compile unit", &NMD, MD);
1048
1049 if (!MD)
1050 continue;
1051
1052 visitMDNode(*MD, AreDebugLocsAllowed::Yes);
1053 }
1054}
1055
1056void Verifier::visitMDNode(const MDNode &MD, AreDebugLocsAllowed AllowLocs) {
1057 // Only visit each node once. Metadata can be mutually recursive, so this
1058 // avoids infinite recursion here, as well as being an optimization.
1059 if (!MDNodes.insert(&MD).second)
1060 return;
1061
1062 Check(&MD.getContext() == &Context,
1063 "MDNode context does not match Module context!", &MD);
1064
1065 switch (MD.getMetadataID()) {
1066 default:
1067 llvm_unreachable("Invalid MDNode subclass");
1068 case Metadata::MDTupleKind:
1069 break;
1070#define HANDLE_SPECIALIZED_MDNODE_LEAF(CLASS) \
1071 case Metadata::CLASS##Kind: \
1072 visit##CLASS(cast<CLASS>(MD)); \
1073 break;
1074#include "llvm/IR/Metadata.def"
1075 }
1076
1077 for (const Metadata *Op : MD.operands()) {
1078 if (!Op)
1079 continue;
1080 Check(!isa<LocalAsMetadata>(Op), "Invalid operand for global metadata!",
1081 &MD, Op);
1082 CheckDI(!isa<DILocation>(Op) || AllowLocs == AreDebugLocsAllowed::Yes,
1083 "DILocation not allowed within this metadata node", &MD, Op);
1084 if (auto *N = dyn_cast<MDNode>(Op)) {
1085 visitMDNode(*N, AllowLocs);
1086 continue;
1087 }
1088 if (auto *V = dyn_cast<ValueAsMetadata>(Op)) {
1089 visitValueAsMetadata(*V, nullptr);
1090 continue;
1091 }
1092 }
1093
1094 // Check llvm.loop.estimated_trip_count.
1095 if (MD.getNumOperands() > 0 &&
1097 Check(MD.getNumOperands() == 2, "Expected two operands", &MD);
1099 Check(Count && Count->getType()->isIntegerTy() &&
1100 cast<IntegerType>(Count->getType())->getBitWidth() <= 32,
1101 "Expected second operand to be an integer constant of type i32 or "
1102 "smaller",
1103 &MD);
1104 }
1105
1106 // Check these last, so we diagnose problems in operands first.
1107 Check(!MD.isTemporary(), "Expected no forward declarations!", &MD);
1108 Check(MD.isResolved(), "All nodes should be resolved!", &MD);
1109}
1110
1111void Verifier::visitValueAsMetadata(const ValueAsMetadata &MD, Function *F) {
1112 Check(MD.getValue(), "Expected valid value", &MD);
1113 Check(!MD.getValue()->getType()->isMetadataTy(),
1114 "Unexpected metadata round-trip through values", &MD, MD.getValue());
1115
1116 auto *L = dyn_cast<LocalAsMetadata>(&MD);
1117 if (!L)
1118 return;
1119
1120 Check(F, "function-local metadata used outside a function", L);
1121
1122 // If this was an instruction, bb, or argument, verify that it is in the
1123 // function that we expect.
1124 Function *ActualF = nullptr;
1125 if (Instruction *I = dyn_cast<Instruction>(L->getValue())) {
1126 Check(I->getParent(), "function-local metadata not in basic block", L, I);
1127 ActualF = I->getParent()->getParent();
1128 } else if (BasicBlock *BB = dyn_cast<BasicBlock>(L->getValue()))
1129 ActualF = BB->getParent();
1130 else if (Argument *A = dyn_cast<Argument>(L->getValue()))
1131 ActualF = A->getParent();
1132 assert(ActualF && "Unimplemented function local metadata case!");
1133
1134 Check(ActualF == F, "function-local metadata used in wrong function", L);
1135}
1136
1137void Verifier::visitDIArgList(const DIArgList &AL, Function *F) {
1138 for (const ValueAsMetadata *VAM : AL.getArgs())
1139 visitValueAsMetadata(*VAM, F);
1140}
1141
1142void Verifier::visitMetadataAsValue(const MetadataAsValue &MDV, Function *F) {
1143 Metadata *MD = MDV.getMetadata();
1144 if (auto *N = dyn_cast<MDNode>(MD)) {
1145 visitMDNode(*N, AreDebugLocsAllowed::No);
1146 return;
1147 }
1148
1149 // Only visit each node once. Metadata can be mutually recursive, so this
1150 // avoids infinite recursion here, as well as being an optimization.
1151 if (!MDNodes.insert(MD).second)
1152 return;
1153
1154 if (auto *V = dyn_cast<ValueAsMetadata>(MD))
1155 visitValueAsMetadata(*V, F);
1156
1157 if (auto *AL = dyn_cast<DIArgList>(MD))
1158 visitDIArgList(*AL, F);
1159}
1160
1161static bool isType(const Metadata *MD) { return !MD || isa<DIType>(MD); }
1162static bool isScope(const Metadata *MD) { return !MD || isa<DIScope>(MD); }
1163static bool isDINode(const Metadata *MD) { return !MD || isa<DINode>(MD); }
1164static bool isMDTuple(const Metadata *MD) { return !MD || isa<MDTuple>(MD); }
1165
1166void Verifier::visitDILocation(const DILocation &N) {
1167 CheckDI(N.getRawScope() && isa<DILocalScope>(N.getRawScope()),
1168 "location requires a valid scope", &N, N.getRawScope());
1169 if (auto *IA = N.getRawInlinedAt())
1170 CheckDI(isa<DILocation>(IA), "inlined-at should be a location", &N, IA);
1171 if (auto *SP = dyn_cast<DISubprogram>(N.getRawScope()))
1172 CheckDI(SP->isDefinition(), "scope points into the type hierarchy", &N);
1173}
1174
1175void Verifier::visitGenericDINode(const GenericDINode &N) {
1176 CheckDI(N.getTag(), "invalid tag", &N);
1177}
1178
1179void Verifier::visitDIScope(const DIScope &N) {
1180 if (auto *F = N.getRawFile())
1181 CheckDI(isa<DIFile>(F), "invalid file", &N, F);
1182}
1183
1184void Verifier::visitDISubrangeType(const DISubrangeType &N) {
1185 CheckDI(N.getTag() == dwarf::DW_TAG_subrange_type, "invalid tag", &N);
1186 auto *BaseType = N.getRawBaseType();
1187 CheckDI(!BaseType || isType(BaseType), "BaseType must be a type");
1188 auto *LBound = N.getRawLowerBound();
1189 CheckDI(!LBound || isa<ConstantAsMetadata>(LBound) ||
1190 isa<DIVariable>(LBound) || isa<DIExpression>(LBound),
1191 "LowerBound must be signed constant or DIVariable or DIExpression",
1192 &N);
1193 auto *UBound = N.getRawUpperBound();
1194 CheckDI(!UBound || isa<ConstantAsMetadata>(UBound) ||
1195 isa<DIVariable>(UBound) || isa<DIExpression>(UBound),
1196 "UpperBound must be signed constant or DIVariable or DIExpression",
1197 &N);
1198 auto *Stride = N.getRawStride();
1199 CheckDI(!Stride || isa<ConstantAsMetadata>(Stride) ||
1200 isa<DIVariable>(Stride) || isa<DIExpression>(Stride),
1201 "Stride must be signed constant or DIVariable or DIExpression", &N);
1202 auto *Bias = N.getRawBias();
1203 CheckDI(!Bias || isa<ConstantAsMetadata>(Bias) || isa<DIVariable>(Bias) ||
1204 isa<DIExpression>(Bias),
1205 "Bias must be signed constant or DIVariable or DIExpression", &N);
1206 // Subrange types currently only support constant size.
1207 auto *Size = N.getRawSizeInBits();
1209 "SizeInBits must be a constant");
1210}
1211
1212void Verifier::visitDISubrange(const DISubrange &N) {
1213 CheckDI(N.getTag() == dwarf::DW_TAG_subrange_type, "invalid tag", &N);
1214 CheckDI(!N.getRawCountNode() || !N.getRawUpperBound(),
1215 "Subrange can have any one of count or upperBound", &N);
1216 auto *CBound = N.getRawCountNode();
1217 CheckDI(!CBound || isa<ConstantAsMetadata>(CBound) ||
1218 isa<DIVariable>(CBound) || isa<DIExpression>(CBound),
1219 "Count must be signed constant or DIVariable or DIExpression", &N);
1220 auto Count = N.getCount();
1222 cast<ConstantInt *>(Count)->getSExtValue() >= -1,
1223 "invalid subrange count", &N);
1224 auto *LBound = N.getRawLowerBound();
1225 CheckDI(!LBound || isa<ConstantAsMetadata>(LBound) ||
1226 isa<DIVariable>(LBound) || isa<DIExpression>(LBound),
1227 "LowerBound must be signed constant or DIVariable or DIExpression",
1228 &N);
1229 auto *UBound = N.getRawUpperBound();
1230 CheckDI(!UBound || isa<ConstantAsMetadata>(UBound) ||
1231 isa<DIVariable>(UBound) || isa<DIExpression>(UBound),
1232 "UpperBound must be signed constant or DIVariable or DIExpression",
1233 &N);
1234 auto *Stride = N.getRawStride();
1235 CheckDI(!Stride || isa<ConstantAsMetadata>(Stride) ||
1236 isa<DIVariable>(Stride) || isa<DIExpression>(Stride),
1237 "Stride must be signed constant or DIVariable or DIExpression", &N);
1238}
1239
1240void Verifier::visitDIGenericSubrange(const DIGenericSubrange &N) {
1241 CheckDI(N.getTag() == dwarf::DW_TAG_generic_subrange, "invalid tag", &N);
1242 CheckDI(!N.getRawCountNode() || !N.getRawUpperBound(),
1243 "GenericSubrange can have any one of count or upperBound", &N);
1244 auto *CBound = N.getRawCountNode();
1245 CheckDI(!CBound || isa<DIVariable>(CBound) || isa<DIExpression>(CBound),
1246 "Count must be signed constant or DIVariable or DIExpression", &N);
1247 auto *LBound = N.getRawLowerBound();
1248 CheckDI(LBound, "GenericSubrange must contain lowerBound", &N);
1249 CheckDI(isa<DIVariable>(LBound) || isa<DIExpression>(LBound),
1250 "LowerBound must be signed constant or DIVariable or DIExpression",
1251 &N);
1252 auto *UBound = N.getRawUpperBound();
1253 CheckDI(!UBound || isa<DIVariable>(UBound) || isa<DIExpression>(UBound),
1254 "UpperBound must be signed constant or DIVariable or DIExpression",
1255 &N);
1256 auto *Stride = N.getRawStride();
1257 CheckDI(Stride, "GenericSubrange must contain stride", &N);
1258 CheckDI(isa<DIVariable>(Stride) || isa<DIExpression>(Stride),
1259 "Stride must be signed constant or DIVariable or DIExpression", &N);
1260}
1261
1262void Verifier::visitDIEnumerator(const DIEnumerator &N) {
1263 CheckDI(N.getTag() == dwarf::DW_TAG_enumerator, "invalid tag", &N);
1264}
1265
1266void Verifier::visitDIBasicType(const DIBasicType &N) {
1267 CheckDI(N.getTag() == dwarf::DW_TAG_base_type ||
1268 N.getTag() == dwarf::DW_TAG_unspecified_type ||
1269 N.getTag() == dwarf::DW_TAG_string_type,
1270 "invalid tag", &N);
1271 // Basic types currently only support constant size.
1272 auto *Size = N.getRawSizeInBits();
1274 "SizeInBits must be a constant");
1275}
1276
1277void Verifier::visitDIFixedPointType(const DIFixedPointType &N) {
1278 visitDIBasicType(N);
1279
1280 CheckDI(N.getTag() == dwarf::DW_TAG_base_type, "invalid tag", &N);
1281 CheckDI(N.getEncoding() == dwarf::DW_ATE_signed_fixed ||
1282 N.getEncoding() == dwarf::DW_ATE_unsigned_fixed,
1283 "invalid encoding", &N);
1287 "invalid kind", &N);
1289 N.getFactorRaw() == 0,
1290 "factor should be 0 for rationals", &N);
1292 (N.getNumeratorRaw() == 0 && N.getDenominatorRaw() == 0),
1293 "numerator and denominator should be 0 for non-rationals", &N);
1294}
1295
1296void Verifier::visitDIStringType(const DIStringType &N) {
1297 CheckDI(N.getTag() == dwarf::DW_TAG_string_type, "invalid tag", &N);
1298 CheckDI(!(N.isBigEndian() && N.isLittleEndian()), "has conflicting flags",
1299 &N);
1300}
1301
1302void Verifier::visitDIDerivedType(const DIDerivedType &N) {
1303 // Common scope checks.
1304 visitDIScope(N);
1305
1306 CheckDI(N.getTag() == dwarf::DW_TAG_typedef ||
1307 N.getTag() == dwarf::DW_TAG_pointer_type ||
1308 N.getTag() == dwarf::DW_TAG_ptr_to_member_type ||
1309 N.getTag() == dwarf::DW_TAG_reference_type ||
1310 N.getTag() == dwarf::DW_TAG_rvalue_reference_type ||
1311 N.getTag() == dwarf::DW_TAG_const_type ||
1312 N.getTag() == dwarf::DW_TAG_immutable_type ||
1313 N.getTag() == dwarf::DW_TAG_volatile_type ||
1314 N.getTag() == dwarf::DW_TAG_restrict_type ||
1315 N.getTag() == dwarf::DW_TAG_atomic_type ||
1316 N.getTag() == dwarf::DW_TAG_LLVM_ptrauth_type ||
1317 N.getTag() == dwarf::DW_TAG_member ||
1318 (N.getTag() == dwarf::DW_TAG_variable && N.isStaticMember()) ||
1319 N.getTag() == dwarf::DW_TAG_inheritance ||
1320 N.getTag() == dwarf::DW_TAG_friend ||
1321 N.getTag() == dwarf::DW_TAG_set_type ||
1322 N.getTag() == dwarf::DW_TAG_template_alias,
1323 "invalid tag", &N);
1324 if (N.getTag() == dwarf::DW_TAG_ptr_to_member_type) {
1325 CheckDI(isType(N.getRawExtraData()), "invalid pointer to member type", &N,
1326 N.getRawExtraData());
1327 } else if (N.getTag() == dwarf::DW_TAG_template_alias) {
1328 CheckDI(isMDTuple(N.getRawExtraData()), "invalid template parameters", &N,
1329 N.getRawExtraData());
1330 } else if (N.getTag() == dwarf::DW_TAG_inheritance ||
1331 N.getTag() == dwarf::DW_TAG_member ||
1332 N.getTag() == dwarf::DW_TAG_variable) {
1333 auto *ExtraData = N.getRawExtraData();
1334 auto IsValidExtraData = [&]() {
1335 if (ExtraData == nullptr)
1336 return true;
1337 if (isa<ConstantAsMetadata>(ExtraData) || isa<MDString>(ExtraData) ||
1338 isa<DIObjCProperty>(ExtraData))
1339 return true;
1340 if (auto *Tuple = dyn_cast<MDTuple>(ExtraData)) {
1341 if (Tuple->getNumOperands() != 1)
1342 return false;
1343 return isa_and_nonnull<ConstantAsMetadata>(Tuple->getOperand(0).get());
1344 }
1345 return false;
1346 };
1347 CheckDI(IsValidExtraData(),
1348 "extraData must be ConstantAsMetadata, MDString, DIObjCProperty, "
1349 "or MDTuple with single ConstantAsMetadata operand",
1350 &N, ExtraData);
1351 }
1352
1353 if (N.getTag() == dwarf::DW_TAG_set_type) {
1354 if (auto *T = N.getRawBaseType()) {
1358 CheckDI(
1359 (Enum && Enum->getTag() == dwarf::DW_TAG_enumeration_type) ||
1360 (Subrange && Subrange->getTag() == dwarf::DW_TAG_subrange_type) ||
1361 (Basic && (Basic->getEncoding() == dwarf::DW_ATE_unsigned ||
1362 Basic->getEncoding() == dwarf::DW_ATE_signed ||
1363 Basic->getEncoding() == dwarf::DW_ATE_unsigned_char ||
1364 Basic->getEncoding() == dwarf::DW_ATE_signed_char ||
1365 Basic->getEncoding() == dwarf::DW_ATE_boolean)),
1366 "invalid set base type", &N, T);
1367 }
1368 }
1369
1370 CheckDI(isScope(N.getRawScope()), "invalid scope", &N, N.getRawScope());
1371 CheckDI(isType(N.getRawBaseType()), "invalid base type", &N,
1372 N.getRawBaseType());
1373
1374 if (N.getDWARFAddressSpace()) {
1375 CheckDI(N.getTag() == dwarf::DW_TAG_pointer_type ||
1376 N.getTag() == dwarf::DW_TAG_reference_type ||
1377 N.getTag() == dwarf::DW_TAG_rvalue_reference_type,
1378 "DWARF address space only applies to pointer or reference types",
1379 &N);
1380 }
1381
1382 auto *Size = N.getRawSizeInBits();
1385 "SizeInBits must be a constant or DIVariable or DIExpression");
1386}
1387
1388/// Detect mutually exclusive flags.
1389static bool hasConflictingReferenceFlags(unsigned Flags) {
1390 return ((Flags & DINode::FlagLValueReference) &&
1391 (Flags & DINode::FlagRValueReference)) ||
1392 ((Flags & DINode::FlagTypePassByValue) &&
1393 (Flags & DINode::FlagTypePassByReference));
1394}
1395
1396void Verifier::visitTemplateParams(const MDNode &N, const Metadata &RawParams) {
1397 auto *Params = dyn_cast<MDTuple>(&RawParams);
1398 CheckDI(Params, "invalid template params", &N, &RawParams);
1399 for (Metadata *Op : Params->operands()) {
1400 CheckDI(Op && isa<DITemplateParameter>(Op), "invalid template parameter",
1401 &N, Params, Op);
1402 }
1403}
1404
1405void Verifier::visitDICompositeType(const DICompositeType &N) {
1406 // Common scope checks.
1407 visitDIScope(N);
1408
1409 CheckDI(N.getTag() == dwarf::DW_TAG_array_type ||
1410 N.getTag() == dwarf::DW_TAG_structure_type ||
1411 N.getTag() == dwarf::DW_TAG_union_type ||
1412 N.getTag() == dwarf::DW_TAG_enumeration_type ||
1413 N.getTag() == dwarf::DW_TAG_class_type ||
1414 N.getTag() == dwarf::DW_TAG_variant_part ||
1415 N.getTag() == dwarf::DW_TAG_variant ||
1416 N.getTag() == dwarf::DW_TAG_namelist,
1417 "invalid tag", &N);
1418
1419 CheckDI(isScope(N.getRawScope()), "invalid scope", &N, N.getRawScope());
1420 CheckDI(isType(N.getRawBaseType()), "invalid base type", &N,
1421 N.getRawBaseType());
1422
1423 CheckDI(!N.getRawElements() || isa<MDTuple>(N.getRawElements()),
1424 "invalid composite elements", &N, N.getRawElements());
1425 CheckDI(isType(N.getRawVTableHolder()), "invalid vtable holder", &N,
1426 N.getRawVTableHolder());
1428 "invalid reference flags", &N);
1429 unsigned DIBlockByRefStruct = 1 << 4;
1430 CheckDI((N.getFlags() & DIBlockByRefStruct) == 0,
1431 "DIBlockByRefStruct on DICompositeType is no longer supported", &N);
1432 CheckDI(llvm::all_of(N.getElements(), [](const DINode *N) { return N; }),
1433 "DISubprogram contains null entry in `elements` field", &N);
1434
1435 if (N.isVector()) {
1436 const DINodeArray Elements = N.getElements();
1437 CheckDI(Elements.size() == 1 &&
1438 Elements[0]->getTag() == dwarf::DW_TAG_subrange_type,
1439 "invalid vector, expected one element of type subrange", &N);
1440 }
1441
1442 if (auto *Params = N.getRawTemplateParams())
1443 visitTemplateParams(N, *Params);
1444
1445 if (auto *D = N.getRawDiscriminator()) {
1446 CheckDI(isa<DIDerivedType>(D) && N.getTag() == dwarf::DW_TAG_variant_part,
1447 "discriminator can only appear on variant part");
1448 }
1449
1450 if (N.getRawDataLocation()) {
1451 CheckDI(N.getTag() == dwarf::DW_TAG_array_type,
1452 "dataLocation can only appear in array type");
1453 }
1454
1455 if (N.getRawAssociated()) {
1456 CheckDI(N.getTag() == dwarf::DW_TAG_array_type,
1457 "associated can only appear in array type");
1458 }
1459
1460 if (N.getRawAllocated()) {
1461 CheckDI(N.getTag() == dwarf::DW_TAG_array_type,
1462 "allocated can only appear in array type");
1463 }
1464
1465 if (N.getRawRank()) {
1466 CheckDI(N.getTag() == dwarf::DW_TAG_array_type,
1467 "rank can only appear in array type");
1468 }
1469
1470 if (N.getTag() == dwarf::DW_TAG_array_type) {
1471 CheckDI(N.getRawBaseType(), "array types must have a base type", &N);
1472 }
1473
1474 auto *Size = N.getRawSizeInBits();
1477 "SizeInBits must be a constant or DIVariable or DIExpression");
1478}
1479
1480void Verifier::visitDISubroutineType(const DISubroutineType &N) {
1481 CheckDI(N.getTag() == dwarf::DW_TAG_subroutine_type, "invalid tag", &N);
1482 if (auto *Types = N.getRawTypeArray()) {
1483 CheckDI(isa<MDTuple>(Types), "invalid composite elements", &N, Types);
1484 for (Metadata *Ty : N.getTypeArray()->operands()) {
1485 CheckDI(isType(Ty), "invalid subroutine type ref", &N, Types, Ty);
1486 }
1487 }
1489 "invalid reference flags", &N);
1490}
1491
1492void Verifier::visitDIFile(const DIFile &N) {
1493 CheckDI(N.getTag() == dwarf::DW_TAG_file_type, "invalid tag", &N);
1494 std::optional<DIFile::ChecksumInfo<StringRef>> Checksum = N.getChecksum();
1495 if (Checksum) {
1496 CheckDI(Checksum->Kind <= DIFile::ChecksumKind::CSK_Last,
1497 "invalid checksum kind", &N);
1498 size_t Size;
1499 switch (Checksum->Kind) {
1500 case DIFile::CSK_MD5:
1501 Size = 32;
1502 break;
1503 case DIFile::CSK_SHA1:
1504 Size = 40;
1505 break;
1506 case DIFile::CSK_SHA256:
1507 Size = 64;
1508 break;
1509 }
1510 CheckDI(Checksum->Value.size() == Size, "invalid checksum length", &N);
1511 CheckDI(Checksum->Value.find_if_not(llvm::isHexDigit) == StringRef::npos,
1512 "invalid checksum", &N);
1513 }
1514}
1515
1516void Verifier::visitDICompileUnit(const DICompileUnit &N) {
1517 CheckDI(N.isDistinct(), "compile units must be distinct", &N);
1518 CheckDI(N.getTag() == dwarf::DW_TAG_compile_unit, "invalid tag", &N);
1519
1520 // Don't bother verifying the compilation directory or producer string
1521 // as those could be empty.
1522 CheckDI(N.getRawFile() && isa<DIFile>(N.getRawFile()), "invalid file", &N,
1523 N.getRawFile());
1524 CheckDI(!N.getFile()->getFilename().empty(), "invalid filename", &N,
1525 N.getFile());
1526
1527 CheckDI((N.getEmissionKind() <= DICompileUnit::LastEmissionKind),
1528 "invalid emission kind", &N);
1529
1530 if (auto *Array = N.getRawEnumTypes()) {
1531 CheckDI(isa<MDTuple>(Array), "invalid enum list", &N, Array);
1532 for (Metadata *Op : N.getEnumTypes()->operands()) {
1534 CheckDI(Enum && Enum->getTag() == dwarf::DW_TAG_enumeration_type,
1535 "invalid enum type", &N, N.getEnumTypes(), Op);
1536 }
1537 }
1538 if (auto *Array = N.getRawRetainedTypes()) {
1539 CheckDI(isa<MDTuple>(Array), "invalid retained type list", &N, Array);
1540 for (Metadata *Op : N.getRetainedTypes()->operands()) {
1541 CheckDI(
1542 Op && (isa<DIType>(Op) || (isa<DISubprogram>(Op) &&
1543 !cast<DISubprogram>(Op)->isDefinition())),
1544 "invalid retained type", &N, Op);
1545 }
1546 }
1547 if (auto *Array = N.getRawGlobalVariables()) {
1548 CheckDI(isa<MDTuple>(Array), "invalid global variable list", &N, Array);
1549 for (Metadata *Op : N.getGlobalVariables()->operands()) {
1551 "invalid global variable ref", &N, Op);
1552 }
1553 }
1554 if (auto *Array = N.getRawImportedEntities()) {
1555 CheckDI(isa<MDTuple>(Array), "invalid imported entity list", &N, Array);
1556 for (Metadata *Op : N.getImportedEntities()->operands()) {
1557 CheckDI(Op && isa<DIImportedEntity>(Op), "invalid imported entity ref",
1558 &N, Op);
1559 }
1560 }
1561 if (auto *Array = N.getRawMacros()) {
1562 CheckDI(isa<MDTuple>(Array), "invalid macro list", &N, Array);
1563 for (Metadata *Op : N.getMacros()->operands()) {
1564 CheckDI(Op && isa<DIMacroNode>(Op), "invalid macro ref", &N, Op);
1565 }
1566 }
1567 CUVisited.insert(&N);
1568}
1569
1570void Verifier::visitDISubprogram(const DISubprogram &N) {
1571 CheckDI(N.getTag() == dwarf::DW_TAG_subprogram, "invalid tag", &N);
1572 CheckDI(isScope(N.getRawScope()), "invalid scope", &N, N.getRawScope());
1573 if (auto *F = N.getRawFile())
1574 CheckDI(isa<DIFile>(F), "invalid file", &N, F);
1575 else
1576 CheckDI(N.getLine() == 0, "line specified with no file", &N, N.getLine());
1577 if (auto *T = N.getRawType())
1578 CheckDI(isa<DISubroutineType>(T), "invalid subroutine type", &N, T);
1579 CheckDI(isType(N.getRawContainingType()), "invalid containing type", &N,
1580 N.getRawContainingType());
1581 if (auto *Params = N.getRawTemplateParams())
1582 visitTemplateParams(N, *Params);
1583 if (auto *S = N.getRawDeclaration())
1584 CheckDI(isa<DISubprogram>(S) && !cast<DISubprogram>(S)->isDefinition(),
1585 "invalid subprogram declaration", &N, S);
1586 if (auto *RawNode = N.getRawRetainedNodes()) {
1587 auto *Node = dyn_cast<MDTuple>(RawNode);
1588 CheckDI(Node, "invalid retained nodes list", &N, RawNode);
1589 for (Metadata *Op : Node->operands()) {
1590 CheckDI(Op, "nullptr in retained nodes", &N, Node);
1591
1592 auto True = [](const Metadata *) { return true; };
1593 auto False = [](const Metadata *) { return false; };
1594 bool IsTypeCorrect =
1595 DISubprogram::visitRetainedNode<bool>(Op, True, True, True, False);
1596 CheckDI(IsTypeCorrect,
1597 "invalid retained nodes, expected DILocalVariable, DILabel or "
1598 "DIImportedEntity",
1599 &N, Node, Op);
1600
1601 auto *RetainedNode = cast<DINode>(Op);
1602 auto *RetainedNodeScope = dyn_cast_or_null<DILocalScope>(
1604 CheckDI(RetainedNodeScope,
1605 "invalid retained nodes, retained node is not local", &N, Node,
1606 RetainedNode);
1607 CheckDI(
1608 RetainedNodeScope->getSubprogram() == &N,
1609 "invalid retained nodes, retained node does not belong to subprogram",
1610 &N, Node, RetainedNode, RetainedNodeScope);
1611 }
1612 }
1614 "invalid reference flags", &N);
1615
1616 auto *Unit = N.getRawUnit();
1617 if (N.isDefinition()) {
1618 // Subprogram definitions (not part of the type hierarchy).
1619 CheckDI(N.isDistinct(), "subprogram definitions must be distinct", &N);
1620 CheckDI(Unit, "subprogram definitions must have a compile unit", &N);
1621 CheckDI(isa<DICompileUnit>(Unit), "invalid unit type", &N, Unit);
1622 // There's no good way to cross the CU boundary to insert a nested
1623 // DISubprogram definition in one CU into a type defined in another CU.
1624 auto *CT = dyn_cast_or_null<DICompositeType>(N.getRawScope());
1625 if (CT && CT->getRawIdentifier() &&
1626 M.getContext().isODRUniquingDebugTypes())
1627 CheckDI(N.getDeclaration(),
1628 "definition subprograms cannot be nested within DICompositeType "
1629 "when enabling ODR",
1630 &N);
1631 } else {
1632 // Subprogram declarations (part of the type hierarchy).
1633 CheckDI(!Unit, "subprogram declarations must not have a compile unit", &N);
1634 CheckDI(!N.getRawDeclaration(),
1635 "subprogram declaration must not have a declaration field");
1636 }
1637
1638 if (auto *RawThrownTypes = N.getRawThrownTypes()) {
1639 auto *ThrownTypes = dyn_cast<MDTuple>(RawThrownTypes);
1640 CheckDI(ThrownTypes, "invalid thrown types list", &N, RawThrownTypes);
1641 for (Metadata *Op : ThrownTypes->operands())
1642 CheckDI(Op && isa<DIType>(Op), "invalid thrown type", &N, ThrownTypes,
1643 Op);
1644 }
1645
1646 if (N.areAllCallsDescribed())
1647 CheckDI(N.isDefinition(),
1648 "DIFlagAllCallsDescribed must be attached to a definition");
1649}
1650
1651void Verifier::visitDILexicalBlockBase(const DILexicalBlockBase &N) {
1652 CheckDI(N.getTag() == dwarf::DW_TAG_lexical_block, "invalid tag", &N);
1653 CheckDI(N.getRawScope() && isa<DILocalScope>(N.getRawScope()),
1654 "invalid local scope", &N, N.getRawScope());
1655 if (auto *SP = dyn_cast<DISubprogram>(N.getRawScope()))
1656 CheckDI(SP->isDefinition(), "scope points into the type hierarchy", &N);
1657}
1658
1659void Verifier::visitDILexicalBlock(const DILexicalBlock &N) {
1660 visitDILexicalBlockBase(N);
1661
1662 CheckDI(N.getLine() || !N.getColumn(),
1663 "cannot have column info without line info", &N);
1664}
1665
1666void Verifier::visitDILexicalBlockFile(const DILexicalBlockFile &N) {
1667 visitDILexicalBlockBase(N);
1668}
1669
1670void Verifier::visitDICommonBlock(const DICommonBlock &N) {
1671 CheckDI(N.getTag() == dwarf::DW_TAG_common_block, "invalid tag", &N);
1672 if (auto *S = N.getRawScope())
1673 CheckDI(isa<DIScope>(S), "invalid scope ref", &N, S);
1674 if (auto *S = N.getRawDecl())
1675 CheckDI(isa<DIGlobalVariable>(S), "invalid declaration", &N, S);
1676}
1677
1678void Verifier::visitDINamespace(const DINamespace &N) {
1679 CheckDI(N.getTag() == dwarf::DW_TAG_namespace, "invalid tag", &N);
1680 if (auto *S = N.getRawScope())
1681 CheckDI(isa<DIScope>(S), "invalid scope ref", &N, S);
1682}
1683
1684void Verifier::visitDIMacro(const DIMacro &N) {
1685 CheckDI(N.getMacinfoType() == dwarf::DW_MACINFO_define ||
1686 N.getMacinfoType() == dwarf::DW_MACINFO_undef,
1687 "invalid macinfo type", &N);
1688 CheckDI(!N.getName().empty(), "anonymous macro", &N);
1689 if (!N.getValue().empty()) {
1690 assert(N.getValue().data()[0] != ' ' && "Macro value has a space prefix");
1691 }
1692}
1693
1694void Verifier::visitDIMacroFile(const DIMacroFile &N) {
1695 CheckDI(N.getMacinfoType() == dwarf::DW_MACINFO_start_file,
1696 "invalid macinfo type", &N);
1697 if (auto *F = N.getRawFile())
1698 CheckDI(isa<DIFile>(F), "invalid file", &N, F);
1699
1700 if (auto *Array = N.getRawElements()) {
1701 CheckDI(isa<MDTuple>(Array), "invalid macro list", &N, Array);
1702 for (Metadata *Op : N.getElements()->operands()) {
1703 CheckDI(Op && isa<DIMacroNode>(Op), "invalid macro ref", &N, Op);
1704 }
1705 }
1706}
1707
1708void Verifier::visitDIModule(const DIModule &N) {
1709 CheckDI(N.getTag() == dwarf::DW_TAG_module, "invalid tag", &N);
1710 CheckDI(!N.getName().empty(), "anonymous module", &N);
1711}
1712
1713void Verifier::visitDITemplateParameter(const DITemplateParameter &N) {
1714 CheckDI(isType(N.getRawType()), "invalid type ref", &N, N.getRawType());
1715}
1716
1717void Verifier::visitDITemplateTypeParameter(const DITemplateTypeParameter &N) {
1718 visitDITemplateParameter(N);
1719
1720 CheckDI(N.getTag() == dwarf::DW_TAG_template_type_parameter, "invalid tag",
1721 &N);
1722}
1723
1724void Verifier::visitDITemplateValueParameter(
1725 const DITemplateValueParameter &N) {
1726 visitDITemplateParameter(N);
1727
1728 CheckDI(N.getTag() == dwarf::DW_TAG_template_value_parameter ||
1729 N.getTag() == dwarf::DW_TAG_GNU_template_template_param ||
1730 N.getTag() == dwarf::DW_TAG_GNU_template_parameter_pack,
1731 "invalid tag", &N);
1732}
1733
1734void Verifier::visitDIVariable(const DIVariable &N) {
1735 if (auto *S = N.getRawScope())
1736 CheckDI(isa<DIScope>(S), "invalid scope", &N, S);
1737 if (auto *F = N.getRawFile())
1738 CheckDI(isa<DIFile>(F), "invalid file", &N, F);
1739}
1740
1741void Verifier::visitDIGlobalVariable(const DIGlobalVariable &N) {
1742 // Checks common to all variables.
1743 visitDIVariable(N);
1744
1745 CheckDI(N.getTag() == dwarf::DW_TAG_variable, "invalid tag", &N);
1746 CheckDI(isType(N.getRawType()), "invalid type ref", &N, N.getRawType());
1747 // Check only if the global variable is not an extern
1748 if (N.isDefinition())
1749 CheckDI(N.getType(), "missing global variable type", &N);
1750 if (auto *Member = N.getRawStaticDataMemberDeclaration()) {
1752 "invalid static data member declaration", &N, Member);
1753 }
1754}
1755
1756void Verifier::visitDILocalVariable(const DILocalVariable &N) {
1757 // Checks common to all variables.
1758 visitDIVariable(N);
1759
1760 CheckDI(isType(N.getRawType()), "invalid type ref", &N, N.getRawType());
1761 CheckDI(N.getTag() == dwarf::DW_TAG_variable, "invalid tag", &N);
1762 CheckDI(N.getRawScope() && isa<DILocalScope>(N.getRawScope()),
1763 "local variable requires a valid scope", &N, N.getRawScope());
1764 if (auto Ty = N.getType())
1765 CheckDI(!isa<DISubroutineType>(Ty), "invalid type", &N, N.getType());
1766}
1767
1768void Verifier::visitDIAssignID(const DIAssignID &N) {
1769 CheckDI(!N.getNumOperands(), "DIAssignID has no arguments", &N);
1770 CheckDI(N.isDistinct(), "DIAssignID must be distinct", &N);
1771}
1772
1773void Verifier::visitDILabel(const DILabel &N) {
1774 if (auto *S = N.getRawScope())
1775 CheckDI(isa<DIScope>(S), "invalid scope", &N, S);
1776 if (auto *F = N.getRawFile())
1777 CheckDI(isa<DIFile>(F), "invalid file", &N, F);
1778
1779 CheckDI(N.getTag() == dwarf::DW_TAG_label, "invalid tag", &N);
1780 CheckDI(N.getRawScope() && isa<DILocalScope>(N.getRawScope()),
1781 "label requires a valid scope", &N, N.getRawScope());
1782}
1783
1784void Verifier::visitDIExpression(const DIExpression &N) {
1785 CheckDI(N.isValid(), "invalid expression", &N);
1786}
1787
1788void Verifier::visitDIGlobalVariableExpression(
1789 const DIGlobalVariableExpression &GVE) {
1790 CheckDI(GVE.getVariable(), "missing variable");
1791 if (auto *Var = GVE.getVariable())
1792 visitDIGlobalVariable(*Var);
1793 if (auto *Expr = GVE.getExpression()) {
1794 visitDIExpression(*Expr);
1795 if (auto Fragment = Expr->getFragmentInfo())
1796 verifyFragmentExpression(*GVE.getVariable(), *Fragment, &GVE);
1797 }
1798}
1799
1800void Verifier::visitDIObjCProperty(const DIObjCProperty &N) {
1801 CheckDI(N.getTag() == dwarf::DW_TAG_APPLE_property, "invalid tag", &N);
1802 if (auto *T = N.getRawType())
1803 CheckDI(isType(T), "invalid type ref", &N, T);
1804 if (auto *F = N.getRawFile())
1805 CheckDI(isa<DIFile>(F), "invalid file", &N, F);
1806}
1807
1808void Verifier::visitDIImportedEntity(const DIImportedEntity &N) {
1809 CheckDI(N.getTag() == dwarf::DW_TAG_imported_module ||
1810 N.getTag() == dwarf::DW_TAG_imported_declaration,
1811 "invalid tag", &N);
1812 if (auto *S = N.getRawScope())
1813 CheckDI(isa<DIScope>(S), "invalid scope for imported entity", &N, S);
1814 CheckDI(isDINode(N.getRawEntity()), "invalid imported entity", &N,
1815 N.getRawEntity());
1816}
1817
1818void Verifier::visitComdat(const Comdat &C) {
1819 // In COFF the Module is invalid if the GlobalValue has private linkage.
1820 // Entities with private linkage don't have entries in the symbol table.
1821 if (TT.isOSBinFormatCOFF())
1822 if (const GlobalValue *GV = M.getNamedValue(C.getName()))
1823 Check(!GV->hasPrivateLinkage(), "comdat global value has private linkage",
1824 GV);
1825}
1826
1827void Verifier::visitModuleIdents() {
1828 const NamedMDNode *Idents = M.getNamedMetadata("llvm.ident");
1829 if (!Idents)
1830 return;
1831
1832 // llvm.ident takes a list of metadata entry. Each entry has only one string.
1833 // Scan each llvm.ident entry and make sure that this requirement is met.
1834 for (const MDNode *N : Idents->operands()) {
1835 Check(N->getNumOperands() == 1,
1836 "incorrect number of operands in llvm.ident metadata", N);
1837 Check(dyn_cast_or_null<MDString>(N->getOperand(0)),
1838 ("invalid value for llvm.ident metadata entry operand"
1839 "(the operand should be a string)"),
1840 N->getOperand(0));
1841 }
1842}
1843
1844void Verifier::visitModuleCommandLines() {
1845 const NamedMDNode *CommandLines = M.getNamedMetadata("llvm.commandline");
1846 if (!CommandLines)
1847 return;
1848
1849 // llvm.commandline takes a list of metadata entry. Each entry has only one
1850 // string. Scan each llvm.commandline entry and make sure that this
1851 // requirement is met.
1852 for (const MDNode *N : CommandLines->operands()) {
1853 Check(N->getNumOperands() == 1,
1854 "incorrect number of operands in llvm.commandline metadata", N);
1855 Check(dyn_cast_or_null<MDString>(N->getOperand(0)),
1856 ("invalid value for llvm.commandline metadata entry operand"
1857 "(the operand should be a string)"),
1858 N->getOperand(0));
1859 }
1860}
1861
1862void Verifier::visitModuleErrnoTBAA() {
1863 const NamedMDNode *ErrnoTBAA = M.getNamedMetadata("llvm.errno.tbaa");
1864 if (!ErrnoTBAA)
1865 return;
1866
1867 Check(ErrnoTBAA->getNumOperands() >= 1,
1868 "llvm.errno.tbaa must have at least one operand", ErrnoTBAA);
1869
1870 for (const MDNode *N : ErrnoTBAA->operands())
1871 TBAAVerifyHelper.visitTBAAMetadata(nullptr, N);
1872}
1873
1874void Verifier::visitModuleFlags() {
1875 const NamedMDNode *Flags = M.getModuleFlagsMetadata();
1876 if (!Flags) return;
1877
1878 // Scan each flag, and track the flags and requirements.
1879 DenseMap<const MDString*, const MDNode*> SeenIDs;
1880 SmallVector<const MDNode*, 16> Requirements;
1881 uint64_t PAuthABIPlatform = -1;
1882 uint64_t PAuthABIVersion = -1;
1883 for (const MDNode *MDN : Flags->operands()) {
1884 visitModuleFlag(MDN, SeenIDs, Requirements);
1885 if (MDN->getNumOperands() != 3)
1886 continue;
1887 if (const auto *FlagName = dyn_cast_or_null<MDString>(MDN->getOperand(1))) {
1888 if (FlagName->getString() == "aarch64-elf-pauthabi-platform") {
1889 if (const auto *PAP =
1891 PAuthABIPlatform = PAP->getZExtValue();
1892 } else if (FlagName->getString() == "aarch64-elf-pauthabi-version") {
1893 if (const auto *PAV =
1895 PAuthABIVersion = PAV->getZExtValue();
1896 }
1897 }
1898 }
1899
1900 if ((PAuthABIPlatform == uint64_t(-1)) != (PAuthABIVersion == uint64_t(-1)))
1901 CheckFailed("either both or no 'aarch64-elf-pauthabi-platform' and "
1902 "'aarch64-elf-pauthabi-version' module flags must be present");
1903
1904 // Validate that the requirements in the module are valid.
1905 for (const MDNode *Requirement : Requirements) {
1906 const MDString *Flag = cast<MDString>(Requirement->getOperand(0));
1907 const Metadata *ReqValue = Requirement->getOperand(1);
1908
1909 const MDNode *Op = SeenIDs.lookup(Flag);
1910 if (!Op) {
1911 CheckFailed("invalid requirement on flag, flag is not present in module",
1912 Flag);
1913 continue;
1914 }
1915
1916 if (Op->getOperand(2) != ReqValue) {
1917 CheckFailed(("invalid requirement on flag, "
1918 "flag does not have the required value"),
1919 Flag);
1920 continue;
1921 }
1922 }
1923}
1924
1925void
1926Verifier::visitModuleFlag(const MDNode *Op,
1927 DenseMap<const MDString *, const MDNode *> &SeenIDs,
1928 SmallVectorImpl<const MDNode *> &Requirements) {
1929 // Each module flag should have three arguments, the merge behavior (a
1930 // constant int), the flag ID (an MDString), and the value.
1931 Check(Op->getNumOperands() == 3,
1932 "incorrect number of operands in module flag", Op);
1933 Module::ModFlagBehavior MFB;
1934 if (!Module::isValidModFlagBehavior(Op->getOperand(0), MFB)) {
1936 "invalid behavior operand in module flag (expected constant integer)",
1937 Op->getOperand(0));
1938 Check(false,
1939 "invalid behavior operand in module flag (unexpected constant)",
1940 Op->getOperand(0));
1941 }
1942 MDString *ID = dyn_cast_or_null<MDString>(Op->getOperand(1));
1943 Check(ID, "invalid ID operand in module flag (expected metadata string)",
1944 Op->getOperand(1));
1945
1946 // Check the values for behaviors with additional requirements.
1947 switch (MFB) {
1948 case Module::Error:
1949 case Module::Warning:
1950 case Module::Override:
1951 // These behavior types accept any value.
1952 break;
1953
1954 case Module::Min: {
1955 auto *V = mdconst::dyn_extract_or_null<ConstantInt>(Op->getOperand(2));
1956 Check(V && V->getValue().isNonNegative(),
1957 "invalid value for 'min' module flag (expected constant non-negative "
1958 "integer)",
1959 Op->getOperand(2));
1960 break;
1961 }
1962
1963 case Module::Max: {
1965 "invalid value for 'max' module flag (expected constant integer)",
1966 Op->getOperand(2));
1967 break;
1968 }
1969
1970 case Module::Require: {
1971 // The value should itself be an MDNode with two operands, a flag ID (an
1972 // MDString), and a value.
1973 MDNode *Value = dyn_cast<MDNode>(Op->getOperand(2));
1974 Check(Value && Value->getNumOperands() == 2,
1975 "invalid value for 'require' module flag (expected metadata pair)",
1976 Op->getOperand(2));
1977 Check(isa<MDString>(Value->getOperand(0)),
1978 ("invalid value for 'require' module flag "
1979 "(first value operand should be a string)"),
1980 Value->getOperand(0));
1981
1982 // Append it to the list of requirements, to check once all module flags are
1983 // scanned.
1984 Requirements.push_back(Value);
1985 break;
1986 }
1987
1988 case Module::Append:
1989 case Module::AppendUnique: {
1990 // These behavior types require the operand be an MDNode.
1991 Check(isa<MDNode>(Op->getOperand(2)),
1992 "invalid value for 'append'-type module flag "
1993 "(expected a metadata node)",
1994 Op->getOperand(2));
1995 break;
1996 }
1997 }
1998
1999 // Unless this is a "requires" flag, check the ID is unique.
2000 if (MFB != Module::Require) {
2001 bool Inserted = SeenIDs.insert(std::make_pair(ID, Op)).second;
2002 Check(Inserted,
2003 "module flag identifiers must be unique (or of 'require' type)", ID);
2004 }
2005
2006 if (ID->getString() == "wchar_size") {
2007 ConstantInt *Value
2009 Check(Value, "wchar_size metadata requires constant integer argument");
2010 }
2011
2012 if (ID->getString() == "Linker Options") {
2013 // If the llvm.linker.options named metadata exists, we assume that the
2014 // bitcode reader has upgraded the module flag. Otherwise the flag might
2015 // have been created by a client directly.
2016 Check(M.getNamedMetadata("llvm.linker.options"),
2017 "'Linker Options' named metadata no longer supported");
2018 }
2019
2020 if (ID->getString() == "SemanticInterposition") {
2021 ConstantInt *Value =
2023 Check(Value,
2024 "SemanticInterposition metadata requires constant integer argument");
2025 }
2026
2027 if (ID->getString() == "CG Profile") {
2028 for (const MDOperand &MDO : cast<MDNode>(Op->getOperand(2))->operands())
2029 visitModuleFlagCGProfileEntry(MDO);
2030 }
2031}
2032
2033void Verifier::visitModuleFlagCGProfileEntry(const MDOperand &MDO) {
2034 auto CheckFunction = [&](const MDOperand &FuncMDO) {
2035 if (!FuncMDO)
2036 return;
2037 auto F = dyn_cast<ValueAsMetadata>(FuncMDO);
2038 Check(F && isa<Function>(F->getValue()->stripPointerCasts()),
2039 "expected a Function or null", FuncMDO);
2040 };
2041 auto Node = dyn_cast_or_null<MDNode>(MDO);
2042 Check(Node && Node->getNumOperands() == 3, "expected a MDNode triple", MDO);
2043 CheckFunction(Node->getOperand(0));
2044 CheckFunction(Node->getOperand(1));
2045 auto Count = dyn_cast_or_null<ConstantAsMetadata>(Node->getOperand(2));
2046 Check(Count && Count->getType()->isIntegerTy(),
2047 "expected an integer constant", Node->getOperand(2));
2048}
2049
2050void Verifier::verifyAttributeTypes(AttributeSet Attrs, const Value *V) {
2051 for (Attribute A : Attrs) {
2052
2053 if (A.isStringAttribute()) {
2054#define GET_ATTR_NAMES
2055#define ATTRIBUTE_ENUM(ENUM_NAME, DISPLAY_NAME)
2056#define ATTRIBUTE_STRBOOL(ENUM_NAME, DISPLAY_NAME) \
2057 if (A.getKindAsString() == #DISPLAY_NAME) { \
2058 auto V = A.getValueAsString(); \
2059 if (!(V.empty() || V == "true" || V == "false")) \
2060 CheckFailed("invalid value for '" #DISPLAY_NAME "' attribute: " + V + \
2061 ""); \
2062 }
2063
2064#include "llvm/IR/Attributes.inc"
2065 continue;
2066 }
2067
2068 if (A.isIntAttribute() != Attribute::isIntAttrKind(A.getKindAsEnum())) {
2069 CheckFailed("Attribute '" + A.getAsString() + "' should have an Argument",
2070 V);
2071 return;
2072 }
2073 }
2074}
2075
2076// VerifyParameterAttrs - Check the given attributes for an argument or return
2077// value of the specified type. The value V is printed in error messages.
2078void Verifier::verifyParameterAttrs(AttributeSet Attrs, Type *Ty,
2079 const Value *V) {
2080 if (!Attrs.hasAttributes())
2081 return;
2082
2083 verifyAttributeTypes(Attrs, V);
2084
2085 for (Attribute Attr : Attrs)
2086 Check(Attr.isStringAttribute() ||
2087 Attribute::canUseAsParamAttr(Attr.getKindAsEnum()),
2088 "Attribute '" + Attr.getAsString() + "' does not apply to parameters",
2089 V);
2090
2091 if (Attrs.hasAttribute(Attribute::ImmArg)) {
2092 unsigned AttrCount =
2093 Attrs.getNumAttributes() - Attrs.hasAttribute(Attribute::Range);
2094 Check(AttrCount == 1,
2095 "Attribute 'immarg' is incompatible with other attributes except the "
2096 "'range' attribute",
2097 V);
2098 }
2099
2100 // Check for mutually incompatible attributes. Only inreg is compatible with
2101 // sret.
2102 unsigned AttrCount = 0;
2103 AttrCount += Attrs.hasAttribute(Attribute::ByVal);
2104 AttrCount += Attrs.hasAttribute(Attribute::InAlloca);
2105 AttrCount += Attrs.hasAttribute(Attribute::Preallocated);
2106 AttrCount += Attrs.hasAttribute(Attribute::StructRet) ||
2107 Attrs.hasAttribute(Attribute::InReg);
2108 AttrCount += Attrs.hasAttribute(Attribute::Nest);
2109 AttrCount += Attrs.hasAttribute(Attribute::ByRef);
2110 Check(AttrCount <= 1,
2111 "Attributes 'byval', 'inalloca', 'preallocated', 'inreg', 'nest', "
2112 "'byref', and 'sret' are incompatible!",
2113 V);
2114
2115 Check(!(Attrs.hasAttribute(Attribute::InAlloca) &&
2116 Attrs.hasAttribute(Attribute::ReadOnly)),
2117 "Attributes "
2118 "'inalloca and readonly' are incompatible!",
2119 V);
2120
2121 Check(!(Attrs.hasAttribute(Attribute::StructRet) &&
2122 Attrs.hasAttribute(Attribute::Returned)),
2123 "Attributes "
2124 "'sret and returned' are incompatible!",
2125 V);
2126
2127 Check(!(Attrs.hasAttribute(Attribute::ZExt) &&
2128 Attrs.hasAttribute(Attribute::SExt)),
2129 "Attributes "
2130 "'zeroext and signext' are incompatible!",
2131 V);
2132
2133 Check(!(Attrs.hasAttribute(Attribute::ReadNone) &&
2134 Attrs.hasAttribute(Attribute::ReadOnly)),
2135 "Attributes "
2136 "'readnone and readonly' are incompatible!",
2137 V);
2138
2139 Check(!(Attrs.hasAttribute(Attribute::ReadNone) &&
2140 Attrs.hasAttribute(Attribute::WriteOnly)),
2141 "Attributes "
2142 "'readnone and writeonly' are incompatible!",
2143 V);
2144
2145 Check(!(Attrs.hasAttribute(Attribute::ReadOnly) &&
2146 Attrs.hasAttribute(Attribute::WriteOnly)),
2147 "Attributes "
2148 "'readonly and writeonly' are incompatible!",
2149 V);
2150
2151 Check(!(Attrs.hasAttribute(Attribute::NoInline) &&
2152 Attrs.hasAttribute(Attribute::AlwaysInline)),
2153 "Attributes "
2154 "'noinline and alwaysinline' are incompatible!",
2155 V);
2156
2157 Check(!(Attrs.hasAttribute(Attribute::Writable) &&
2158 Attrs.hasAttribute(Attribute::ReadNone)),
2159 "Attributes writable and readnone are incompatible!", V);
2160
2161 Check(!(Attrs.hasAttribute(Attribute::Writable) &&
2162 Attrs.hasAttribute(Attribute::ReadOnly)),
2163 "Attributes writable and readonly are incompatible!", V);
2164
2165 AttributeMask IncompatibleAttrs = AttributeFuncs::typeIncompatible(Ty, Attrs);
2166 for (Attribute Attr : Attrs) {
2167 if (!Attr.isStringAttribute() &&
2168 IncompatibleAttrs.contains(Attr.getKindAsEnum())) {
2169 CheckFailed("Attribute '" + Attr.getAsString() +
2170 "' applied to incompatible type!", V);
2171 return;
2172 }
2173 }
2174
2175 if (isa<PointerType>(Ty)) {
2176 if (Attrs.hasAttribute(Attribute::Alignment)) {
2177 Align AttrAlign = Attrs.getAlignment().valueOrOne();
2178 Check(AttrAlign.value() <= Value::MaximumAlignment,
2179 "huge alignment values are unsupported", V);
2180 }
2181 if (Attrs.hasAttribute(Attribute::ByVal)) {
2182 Type *ByValTy = Attrs.getByValType();
2183 SmallPtrSet<Type *, 4> Visited;
2184 Check(ByValTy->isSized(&Visited),
2185 "Attribute 'byval' does not support unsized types!", V);
2186 // Check if it is or contains a target extension type that disallows being
2187 // used on the stack.
2189 "'byval' argument has illegal target extension type", V);
2190 Check(DL.getTypeAllocSize(ByValTy).getKnownMinValue() < (1ULL << 32),
2191 "huge 'byval' arguments are unsupported", V);
2192 }
2193 if (Attrs.hasAttribute(Attribute::ByRef)) {
2194 SmallPtrSet<Type *, 4> Visited;
2195 Check(Attrs.getByRefType()->isSized(&Visited),
2196 "Attribute 'byref' does not support unsized types!", V);
2197 Check(DL.getTypeAllocSize(Attrs.getByRefType()).getKnownMinValue() <
2198 (1ULL << 32),
2199 "huge 'byref' arguments are unsupported", V);
2200 }
2201 if (Attrs.hasAttribute(Attribute::InAlloca)) {
2202 SmallPtrSet<Type *, 4> Visited;
2203 Check(Attrs.getInAllocaType()->isSized(&Visited),
2204 "Attribute 'inalloca' does not support unsized types!", V);
2205 Check(DL.getTypeAllocSize(Attrs.getInAllocaType()).getKnownMinValue() <
2206 (1ULL << 32),
2207 "huge 'inalloca' arguments are unsupported", V);
2208 }
2209 if (Attrs.hasAttribute(Attribute::Preallocated)) {
2210 SmallPtrSet<Type *, 4> Visited;
2211 Check(Attrs.getPreallocatedType()->isSized(&Visited),
2212 "Attribute 'preallocated' does not support unsized types!", V);
2213 Check(
2214 DL.getTypeAllocSize(Attrs.getPreallocatedType()).getKnownMinValue() <
2215 (1ULL << 32),
2216 "huge 'preallocated' arguments are unsupported", V);
2217 }
2218 }
2219
2220 if (Attrs.hasAttribute(Attribute::Initializes)) {
2221 auto Inits = Attrs.getAttribute(Attribute::Initializes).getInitializes();
2222 Check(!Inits.empty(), "Attribute 'initializes' does not support empty list",
2223 V);
2225 "Attribute 'initializes' does not support unordered ranges", V);
2226 }
2227
2228 if (Attrs.hasAttribute(Attribute::NoFPClass)) {
2229 uint64_t Val = Attrs.getAttribute(Attribute::NoFPClass).getValueAsInt();
2230 Check(Val != 0, "Attribute 'nofpclass' must have at least one test bit set",
2231 V);
2232 Check((Val & ~static_cast<unsigned>(fcAllFlags)) == 0,
2233 "Invalid value for 'nofpclass' test mask", V);
2234 }
2235 if (Attrs.hasAttribute(Attribute::Range)) {
2236 const ConstantRange &CR =
2237 Attrs.getAttribute(Attribute::Range).getValueAsConstantRange();
2239 "Range bit width must match type bit width!", V);
2240 }
2241}
2242
2243void Verifier::checkUnsignedBaseTenFuncAttr(AttributeList Attrs, StringRef Attr,
2244 const Value *V) {
2245 if (Attrs.hasFnAttr(Attr)) {
2246 StringRef S = Attrs.getFnAttr(Attr).getValueAsString();
2247 unsigned N;
2248 if (S.getAsInteger(10, N))
2249 CheckFailed("\"" + Attr + "\" takes an unsigned integer: " + S, V);
2250 }
2251}
2252
2253// Check parameter attributes against a function type.
2254// The value V is printed in error messages.
2255void Verifier::verifyFunctionAttrs(FunctionType *FT, AttributeList Attrs,
2256 const Value *V, bool IsIntrinsic,
2257 bool IsInlineAsm) {
2258 if (Attrs.isEmpty())
2259 return;
2260
2261 if (AttributeListsVisited.insert(Attrs.getRawPointer()).second) {
2262 Check(Attrs.hasParentContext(Context),
2263 "Attribute list does not match Module context!", &Attrs, V);
2264 for (const auto &AttrSet : Attrs) {
2265 Check(!AttrSet.hasAttributes() || AttrSet.hasParentContext(Context),
2266 "Attribute set does not match Module context!", &AttrSet, V);
2267 for (const auto &A : AttrSet) {
2268 Check(A.hasParentContext(Context),
2269 "Attribute does not match Module context!", &A, V);
2270 }
2271 }
2272 }
2273
2274 bool SawNest = false;
2275 bool SawReturned = false;
2276 bool SawSRet = false;
2277 bool SawSwiftSelf = false;
2278 bool SawSwiftAsync = false;
2279 bool SawSwiftError = false;
2280
2281 // Verify return value attributes.
2282 AttributeSet RetAttrs = Attrs.getRetAttrs();
2283 for (Attribute RetAttr : RetAttrs)
2284 Check(RetAttr.isStringAttribute() ||
2285 Attribute::canUseAsRetAttr(RetAttr.getKindAsEnum()),
2286 "Attribute '" + RetAttr.getAsString() +
2287 "' does not apply to function return values",
2288 V);
2289
2290 unsigned MaxParameterWidth = 0;
2291 auto GetMaxParameterWidth = [&MaxParameterWidth](Type *Ty) {
2292 if (Ty->isVectorTy()) {
2293 if (auto *VT = dyn_cast<FixedVectorType>(Ty)) {
2294 unsigned Size = VT->getPrimitiveSizeInBits().getFixedValue();
2295 if (Size > MaxParameterWidth)
2296 MaxParameterWidth = Size;
2297 }
2298 }
2299 };
2300 GetMaxParameterWidth(FT->getReturnType());
2301 verifyParameterAttrs(RetAttrs, FT->getReturnType(), V);
2302
2303 // Verify parameter attributes.
2304 for (unsigned i = 0, e = FT->getNumParams(); i != e; ++i) {
2305 Type *Ty = FT->getParamType(i);
2306 AttributeSet ArgAttrs = Attrs.getParamAttrs(i);
2307
2308 if (!IsIntrinsic) {
2309 Check(!ArgAttrs.hasAttribute(Attribute::ImmArg),
2310 "immarg attribute only applies to intrinsics", V);
2311 if (!IsInlineAsm)
2312 Check(!ArgAttrs.hasAttribute(Attribute::ElementType),
2313 "Attribute 'elementtype' can only be applied to intrinsics"
2314 " and inline asm.",
2315 V);
2316 }
2317
2318 verifyParameterAttrs(ArgAttrs, Ty, V);
2319 GetMaxParameterWidth(Ty);
2320
2321 if (ArgAttrs.hasAttribute(Attribute::Nest)) {
2322 Check(!SawNest, "More than one parameter has attribute nest!", V);
2323 SawNest = true;
2324 }
2325
2326 if (ArgAttrs.hasAttribute(Attribute::Returned)) {
2327 Check(!SawReturned, "More than one parameter has attribute returned!", V);
2328 Check(Ty->canLosslesslyBitCastTo(FT->getReturnType()),
2329 "Incompatible argument and return types for 'returned' attribute",
2330 V);
2331 SawReturned = true;
2332 }
2333
2334 if (ArgAttrs.hasAttribute(Attribute::StructRet)) {
2335 Check(!SawSRet, "Cannot have multiple 'sret' parameters!", V);
2336 Check(i == 0 || i == 1,
2337 "Attribute 'sret' is not on first or second parameter!", V);
2338 SawSRet = true;
2339 }
2340
2341 if (ArgAttrs.hasAttribute(Attribute::SwiftSelf)) {
2342 Check(!SawSwiftSelf, "Cannot have multiple 'swiftself' parameters!", V);
2343 SawSwiftSelf = true;
2344 }
2345
2346 if (ArgAttrs.hasAttribute(Attribute::SwiftAsync)) {
2347 Check(!SawSwiftAsync, "Cannot have multiple 'swiftasync' parameters!", V);
2348 SawSwiftAsync = true;
2349 }
2350
2351 if (ArgAttrs.hasAttribute(Attribute::SwiftError)) {
2352 Check(!SawSwiftError, "Cannot have multiple 'swifterror' parameters!", V);
2353 SawSwiftError = true;
2354 }
2355
2356 if (ArgAttrs.hasAttribute(Attribute::InAlloca)) {
2357 Check(i == FT->getNumParams() - 1,
2358 "inalloca isn't on the last parameter!", V);
2359 }
2360 }
2361
2362 if (!Attrs.hasFnAttrs())
2363 return;
2364
2365 verifyAttributeTypes(Attrs.getFnAttrs(), V);
2366 for (Attribute FnAttr : Attrs.getFnAttrs())
2367 Check(FnAttr.isStringAttribute() ||
2368 Attribute::canUseAsFnAttr(FnAttr.getKindAsEnum()),
2369 "Attribute '" + FnAttr.getAsString() +
2370 "' does not apply to functions!",
2371 V);
2372
2373 Check(!(Attrs.hasFnAttr(Attribute::NoInline) &&
2374 Attrs.hasFnAttr(Attribute::AlwaysInline)),
2375 "Attributes 'noinline and alwaysinline' are incompatible!", V);
2376
2377 if (Attrs.hasFnAttr(Attribute::OptimizeNone)) {
2378 Check(Attrs.hasFnAttr(Attribute::NoInline),
2379 "Attribute 'optnone' requires 'noinline'!", V);
2380
2381 Check(!Attrs.hasFnAttr(Attribute::OptimizeForSize),
2382 "Attributes 'optsize and optnone' are incompatible!", V);
2383
2384 Check(!Attrs.hasFnAttr(Attribute::MinSize),
2385 "Attributes 'minsize and optnone' are incompatible!", V);
2386
2387 Check(!Attrs.hasFnAttr(Attribute::OptimizeForDebugging),
2388 "Attributes 'optdebug and optnone' are incompatible!", V);
2389 }
2390
2391 Check(!(Attrs.hasFnAttr(Attribute::SanitizeRealtime) &&
2392 Attrs.hasFnAttr(Attribute::SanitizeRealtimeBlocking)),
2393 "Attributes "
2394 "'sanitize_realtime and sanitize_realtime_blocking' are incompatible!",
2395 V);
2396
2397 if (Attrs.hasFnAttr(Attribute::OptimizeForDebugging)) {
2398 Check(!Attrs.hasFnAttr(Attribute::OptimizeForSize),
2399 "Attributes 'optsize and optdebug' are incompatible!", V);
2400
2401 Check(!Attrs.hasFnAttr(Attribute::MinSize),
2402 "Attributes 'minsize and optdebug' are incompatible!", V);
2403 }
2404
2405 Check(!Attrs.hasAttrSomewhere(Attribute::Writable) ||
2406 isModSet(Attrs.getMemoryEffects().getModRef(IRMemLocation::ArgMem)),
2407 "Attribute writable and memory without argmem: write are incompatible!",
2408 V);
2409
2410 if (Attrs.hasFnAttr("aarch64_pstate_sm_enabled")) {
2411 Check(!Attrs.hasFnAttr("aarch64_pstate_sm_compatible"),
2412 "Attributes 'aarch64_pstate_sm_enabled and "
2413 "aarch64_pstate_sm_compatible' are incompatible!",
2414 V);
2415 }
2416
2417 Check((Attrs.hasFnAttr("aarch64_new_za") + Attrs.hasFnAttr("aarch64_in_za") +
2418 Attrs.hasFnAttr("aarch64_inout_za") +
2419 Attrs.hasFnAttr("aarch64_out_za") +
2420 Attrs.hasFnAttr("aarch64_preserves_za") +
2421 Attrs.hasFnAttr("aarch64_za_state_agnostic")) <= 1,
2422 "Attributes 'aarch64_new_za', 'aarch64_in_za', 'aarch64_out_za', "
2423 "'aarch64_inout_za', 'aarch64_preserves_za' and "
2424 "'aarch64_za_state_agnostic' are mutually exclusive",
2425 V);
2426
2427 Check((Attrs.hasFnAttr("aarch64_new_zt0") +
2428 Attrs.hasFnAttr("aarch64_in_zt0") +
2429 Attrs.hasFnAttr("aarch64_inout_zt0") +
2430 Attrs.hasFnAttr("aarch64_out_zt0") +
2431 Attrs.hasFnAttr("aarch64_preserves_zt0") +
2432 Attrs.hasFnAttr("aarch64_za_state_agnostic")) <= 1,
2433 "Attributes 'aarch64_new_zt0', 'aarch64_in_zt0', 'aarch64_out_zt0', "
2434 "'aarch64_inout_zt0', 'aarch64_preserves_zt0' and "
2435 "'aarch64_za_state_agnostic' are mutually exclusive",
2436 V);
2437
2438 if (Attrs.hasFnAttr(Attribute::JumpTable)) {
2439 const GlobalValue *GV = cast<GlobalValue>(V);
2441 "Attribute 'jumptable' requires 'unnamed_addr'", V);
2442 }
2443
2444 if (auto Args = Attrs.getFnAttrs().getAllocSizeArgs()) {
2445 auto CheckParam = [&](StringRef Name, unsigned ParamNo) {
2446 if (ParamNo >= FT->getNumParams()) {
2447 CheckFailed("'allocsize' " + Name + " argument is out of bounds", V);
2448 return false;
2449 }
2450
2451 if (!FT->getParamType(ParamNo)->isIntegerTy()) {
2452 CheckFailed("'allocsize' " + Name +
2453 " argument must refer to an integer parameter",
2454 V);
2455 return false;
2456 }
2457
2458 return true;
2459 };
2460
2461 if (!CheckParam("element size", Args->first))
2462 return;
2463
2464 if (Args->second && !CheckParam("number of elements", *Args->second))
2465 return;
2466 }
2467
2468 if (Attrs.hasFnAttr(Attribute::AllocKind)) {
2469 AllocFnKind K = Attrs.getAllocKind();
2471 K & (AllocFnKind::Alloc | AllocFnKind::Realloc | AllocFnKind::Free);
2472 if (!is_contained(
2473 {AllocFnKind::Alloc, AllocFnKind::Realloc, AllocFnKind::Free},
2474 Type))
2475 CheckFailed(
2476 "'allockind()' requires exactly one of alloc, realloc, and free");
2477 if ((Type == AllocFnKind::Free) &&
2478 ((K & (AllocFnKind::Uninitialized | AllocFnKind::Zeroed |
2479 AllocFnKind::Aligned)) != AllocFnKind::Unknown))
2480 CheckFailed("'allockind(\"free\")' doesn't allow uninitialized, zeroed, "
2481 "or aligned modifiers.");
2482 AllocFnKind ZeroedUninit = AllocFnKind::Uninitialized | AllocFnKind::Zeroed;
2483 if ((K & ZeroedUninit) == ZeroedUninit)
2484 CheckFailed("'allockind()' can't be both zeroed and uninitialized");
2485 }
2486
2487 if (Attribute A = Attrs.getFnAttr("alloc-variant-zeroed"); A.isValid()) {
2488 StringRef S = A.getValueAsString();
2489 Check(!S.empty(), "'alloc-variant-zeroed' must not be empty");
2490 Function *Variant = M.getFunction(S);
2491 if (Variant) {
2492 Attribute Family = Attrs.getFnAttr("alloc-family");
2493 Attribute VariantFamily = Variant->getFnAttribute("alloc-family");
2494 if (Family.isValid())
2495 Check(VariantFamily.isValid() &&
2496 VariantFamily.getValueAsString() == Family.getValueAsString(),
2497 "'alloc-variant-zeroed' must name a function belonging to the "
2498 "same 'alloc-family'");
2499
2500 Check(Variant->hasFnAttribute(Attribute::AllocKind) &&
2501 (Variant->getFnAttribute(Attribute::AllocKind).getAllocKind() &
2502 AllocFnKind::Zeroed) != AllocFnKind::Unknown,
2503 "'alloc-variant-zeroed' must name a function with "
2504 "'allockind(\"zeroed\")'");
2505
2506 Check(FT == Variant->getFunctionType(),
2507 "'alloc-variant-zeroed' must name a function with the same "
2508 "signature");
2509 }
2510 }
2511
2512 if (Attrs.hasFnAttr(Attribute::VScaleRange)) {
2513 unsigned VScaleMin = Attrs.getFnAttrs().getVScaleRangeMin();
2514 if (VScaleMin == 0)
2515 CheckFailed("'vscale_range' minimum must be greater than 0", V);
2516 else if (!isPowerOf2_32(VScaleMin))
2517 CheckFailed("'vscale_range' minimum must be power-of-two value", V);
2518 std::optional<unsigned> VScaleMax = Attrs.getFnAttrs().getVScaleRangeMax();
2519 if (VScaleMax && VScaleMin > VScaleMax)
2520 CheckFailed("'vscale_range' minimum cannot be greater than maximum", V);
2521 else if (VScaleMax && !isPowerOf2_32(*VScaleMax))
2522 CheckFailed("'vscale_range' maximum must be power-of-two value", V);
2523 }
2524
2525 if (Attribute FPAttr = Attrs.getFnAttr("frame-pointer"); FPAttr.isValid()) {
2526 StringRef FP = FPAttr.getValueAsString();
2527 if (FP != "all" && FP != "non-leaf" && FP != "none" && FP != "reserved" &&
2528 FP != "non-leaf-no-reserve")
2529 CheckFailed("invalid value for 'frame-pointer' attribute: " + FP, V);
2530 }
2531
2532 checkUnsignedBaseTenFuncAttr(Attrs, "patchable-function-prefix", V);
2533 checkUnsignedBaseTenFuncAttr(Attrs, "patchable-function-entry", V);
2534 if (Attrs.hasFnAttr("patchable-function-entry-section"))
2535 Check(!Attrs.getFnAttr("patchable-function-entry-section")
2536 .getValueAsString()
2537 .empty(),
2538 "\"patchable-function-entry-section\" must not be empty");
2539 checkUnsignedBaseTenFuncAttr(Attrs, "warn-stack-size", V);
2540
2541 if (auto A = Attrs.getFnAttr("sign-return-address"); A.isValid()) {
2542 StringRef S = A.getValueAsString();
2543 if (S != "none" && S != "all" && S != "non-leaf")
2544 CheckFailed("invalid value for 'sign-return-address' attribute: " + S, V);
2545 }
2546
2547 if (auto A = Attrs.getFnAttr("sign-return-address-key"); A.isValid()) {
2548 StringRef S = A.getValueAsString();
2549 if (S != "a_key" && S != "b_key")
2550 CheckFailed("invalid value for 'sign-return-address-key' attribute: " + S,
2551 V);
2552 if (auto AA = Attrs.getFnAttr("sign-return-address"); !AA.isValid()) {
2553 CheckFailed(
2554 "'sign-return-address-key' present without `sign-return-address`");
2555 }
2556 }
2557
2558 if (auto A = Attrs.getFnAttr("branch-target-enforcement"); A.isValid()) {
2559 StringRef S = A.getValueAsString();
2560 if (S != "" && S != "true" && S != "false")
2561 CheckFailed(
2562 "invalid value for 'branch-target-enforcement' attribute: " + S, V);
2563 }
2564
2565 if (auto A = Attrs.getFnAttr("branch-protection-pauth-lr"); A.isValid()) {
2566 StringRef S = A.getValueAsString();
2567 if (S != "" && S != "true" && S != "false")
2568 CheckFailed(
2569 "invalid value for 'branch-protection-pauth-lr' attribute: " + S, V);
2570 }
2571
2572 if (auto A = Attrs.getFnAttr("guarded-control-stack"); A.isValid()) {
2573 StringRef S = A.getValueAsString();
2574 if (S != "" && S != "true" && S != "false")
2575 CheckFailed("invalid value for 'guarded-control-stack' attribute: " + S,
2576 V);
2577 }
2578
2579 if (auto A = Attrs.getFnAttr("vector-function-abi-variant"); A.isValid()) {
2580 StringRef S = A.getValueAsString();
2581 const std::optional<VFInfo> Info = VFABI::tryDemangleForVFABI(S, FT);
2582 if (!Info)
2583 CheckFailed("invalid name for a VFABI variant: " + S, V);
2584 }
2585
2586 if (auto A = Attrs.getFnAttr("denormal-fp-math"); A.isValid()) {
2587 StringRef S = A.getValueAsString();
2589 CheckFailed("invalid value for 'denormal-fp-math' attribute: " + S, V);
2590 }
2591
2592 if (auto A = Attrs.getFnAttr("denormal-fp-math-f32"); A.isValid()) {
2593 StringRef S = A.getValueAsString();
2595 CheckFailed("invalid value for 'denormal-fp-math-f32' attribute: " + S,
2596 V);
2597 }
2598
2599 if (auto A = Attrs.getFnAttr("modular-format"); A.isValid()) {
2600 StringRef S = A.getValueAsString();
2602 S.split(Args, ',');
2603 Check(Args.size() >= 5,
2604 "modular-format attribute requires at least 5 arguments", V);
2605 unsigned FirstArgIdx;
2606 Check(!Args[2].getAsInteger(10, FirstArgIdx),
2607 "modular-format attribute first arg index is not an integer", V);
2608 unsigned UpperBound = FT->getNumParams() + (FT->isVarArg() ? 1 : 0);
2609 Check(FirstArgIdx > 0 && FirstArgIdx <= UpperBound,
2610 "modular-format attribute first arg index is out of bounds", V);
2611 }
2612}
2613void Verifier::verifyUnknownProfileMetadata(MDNode *MD) {
2614 Check(MD->getNumOperands() == 2,
2615 "'unknown' !prof should have a single additional operand", MD);
2616 auto *PassName = dyn_cast<MDString>(MD->getOperand(1));
2617 Check(PassName != nullptr,
2618 "'unknown' !prof should have an additional operand of type "
2619 "string");
2620 Check(!PassName->getString().empty(),
2621 "the 'unknown' !prof operand should not be an empty string");
2622}
2623
2624void Verifier::verifyFunctionMetadata(
2625 ArrayRef<std::pair<unsigned, MDNode *>> MDs) {
2626 for (const auto &Pair : MDs) {
2627 if (Pair.first == LLVMContext::MD_prof) {
2628 MDNode *MD = Pair.second;
2629 Check(MD->getNumOperands() >= 2,
2630 "!prof annotations should have no less than 2 operands", MD);
2631 // We may have functions that are synthesized by the compiler, e.g. in
2632 // WPD, that we can't currently determine the entry count.
2633 if (MD->getOperand(0).equalsStr(
2635 verifyUnknownProfileMetadata(MD);
2636 continue;
2637 }
2638
2639 // Check first operand.
2640 Check(MD->getOperand(0) != nullptr, "first operand should not be null",
2641 MD);
2643 "expected string with name of the !prof annotation", MD);
2644 MDString *MDS = cast<MDString>(MD->getOperand(0));
2645 StringRef ProfName = MDS->getString();
2648 "first operand should be 'function_entry_count'"
2649 " or 'synthetic_function_entry_count'",
2650 MD);
2651
2652 // Check second operand.
2653 Check(MD->getOperand(1) != nullptr, "second operand should not be null",
2654 MD);
2656 "expected integer argument to function_entry_count", MD);
2657 } else if (Pair.first == LLVMContext::MD_kcfi_type) {
2658 MDNode *MD = Pair.second;
2659 Check(MD->getNumOperands() == 1,
2660 "!kcfi_type must have exactly one operand", MD);
2661 Check(MD->getOperand(0) != nullptr, "!kcfi_type operand must not be null",
2662 MD);
2664 "expected a constant operand for !kcfi_type", MD);
2665 Constant *C = cast<ConstantAsMetadata>(MD->getOperand(0))->getValue();
2666 Check(isa<ConstantInt>(C) && isa<IntegerType>(C->getType()),
2667 "expected a constant integer operand for !kcfi_type", MD);
2669 "expected a 32-bit integer constant operand for !kcfi_type", MD);
2670 }
2671 }
2672}
2673
2674void Verifier::visitConstantExprsRecursively(const Constant *EntryC) {
2675 if (!ConstantExprVisited.insert(EntryC).second)
2676 return;
2677
2679 Stack.push_back(EntryC);
2680
2681 while (!Stack.empty()) {
2682 const Constant *C = Stack.pop_back_val();
2683
2684 // Check this constant expression.
2685 if (const auto *CE = dyn_cast<ConstantExpr>(C))
2686 visitConstantExpr(CE);
2687
2688 if (const auto *CPA = dyn_cast<ConstantPtrAuth>(C))
2689 visitConstantPtrAuth(CPA);
2690
2691 if (const auto *GV = dyn_cast<GlobalValue>(C)) {
2692 // Global Values get visited separately, but we do need to make sure
2693 // that the global value is in the correct module
2694 Check(GV->getParent() == &M, "Referencing global in another module!",
2695 EntryC, &M, GV, GV->getParent());
2696 continue;
2697 }
2698
2699 // Visit all sub-expressions.
2700 for (const Use &U : C->operands()) {
2701 const auto *OpC = dyn_cast<Constant>(U);
2702 if (!OpC)
2703 continue;
2704 if (!ConstantExprVisited.insert(OpC).second)
2705 continue;
2706 Stack.push_back(OpC);
2707 }
2708 }
2709}
2710
2711void Verifier::visitConstantExpr(const ConstantExpr *CE) {
2712 if (CE->getOpcode() == Instruction::BitCast)
2713 Check(CastInst::castIsValid(Instruction::BitCast, CE->getOperand(0),
2714 CE->getType()),
2715 "Invalid bitcast", CE);
2716 else if (CE->getOpcode() == Instruction::PtrToAddr)
2717 checkPtrToAddr(CE->getOperand(0)->getType(), CE->getType(), *CE);
2718}
2719
2720void Verifier::visitConstantPtrAuth(const ConstantPtrAuth *CPA) {
2721 Check(CPA->getPointer()->getType()->isPointerTy(),
2722 "signed ptrauth constant base pointer must have pointer type");
2723
2724 Check(CPA->getType() == CPA->getPointer()->getType(),
2725 "signed ptrauth constant must have same type as its base pointer");
2726
2727 Check(CPA->getKey()->getBitWidth() == 32,
2728 "signed ptrauth constant key must be i32 constant integer");
2729
2731 "signed ptrauth constant address discriminator must be a pointer");
2732
2733 Check(CPA->getDiscriminator()->getBitWidth() == 64,
2734 "signed ptrauth constant discriminator must be i64 constant integer");
2735
2737 "signed ptrauth constant deactivation symbol must be a pointer");
2738
2741 "signed ptrauth constant deactivation symbol must be a global value "
2742 "or null");
2743}
2744
2745bool Verifier::verifyAttributeCount(AttributeList Attrs, unsigned Params) {
2746 // There shouldn't be more attribute sets than there are parameters plus the
2747 // function and return value.
2748 return Attrs.getNumAttrSets() <= Params + 2;
2749}
2750
2751void Verifier::verifyInlineAsmCall(const CallBase &Call) {
2752 const InlineAsm *IA = cast<InlineAsm>(Call.getCalledOperand());
2753 unsigned ArgNo = 0;
2754 unsigned LabelNo = 0;
2755 for (const InlineAsm::ConstraintInfo &CI : IA->ParseConstraints()) {
2756 if (CI.Type == InlineAsm::isLabel) {
2757 ++LabelNo;
2758 continue;
2759 }
2760
2761 // Only deal with constraints that correspond to call arguments.
2762 if (!CI.hasArg())
2763 continue;
2764
2765 if (CI.isIndirect) {
2766 const Value *Arg = Call.getArgOperand(ArgNo);
2767 Check(Arg->getType()->isPointerTy(),
2768 "Operand for indirect constraint must have pointer type", &Call);
2769
2771 "Operand for indirect constraint must have elementtype attribute",
2772 &Call);
2773 } else {
2774 Check(!Call.paramHasAttr(ArgNo, Attribute::ElementType),
2775 "Elementtype attribute can only be applied for indirect "
2776 "constraints",
2777 &Call);
2778 }
2779
2780 ArgNo++;
2781 }
2782
2783 if (auto *CallBr = dyn_cast<CallBrInst>(&Call)) {
2784 Check(LabelNo == CallBr->getNumIndirectDests(),
2785 "Number of label constraints does not match number of callbr dests",
2786 &Call);
2787 } else {
2788 Check(LabelNo == 0, "Label constraints can only be used with callbr",
2789 &Call);
2790 }
2791}
2792
2793/// Verify that statepoint intrinsic is well formed.
2794void Verifier::verifyStatepoint(const CallBase &Call) {
2795 assert(Call.getIntrinsicID() == Intrinsic::experimental_gc_statepoint);
2796
2799 "gc.statepoint must read and write all memory to preserve "
2800 "reordering restrictions required by safepoint semantics",
2801 Call);
2802
2803 const int64_t NumPatchBytes =
2804 cast<ConstantInt>(Call.getArgOperand(1))->getSExtValue();
2805 assert(isInt<32>(NumPatchBytes) && "NumPatchBytesV is an i32!");
2806 Check(NumPatchBytes >= 0,
2807 "gc.statepoint number of patchable bytes must be "
2808 "positive",
2809 Call);
2810
2811 Type *TargetElemType = Call.getParamElementType(2);
2812 Check(TargetElemType,
2813 "gc.statepoint callee argument must have elementtype attribute", Call);
2814 FunctionType *TargetFuncType = dyn_cast<FunctionType>(TargetElemType);
2815 Check(TargetFuncType,
2816 "gc.statepoint callee elementtype must be function type", Call);
2817
2818 const int NumCallArgs = cast<ConstantInt>(Call.getArgOperand(3))->getZExtValue();
2819 Check(NumCallArgs >= 0,
2820 "gc.statepoint number of arguments to underlying call "
2821 "must be positive",
2822 Call);
2823 const int NumParams = (int)TargetFuncType->getNumParams();
2824 if (TargetFuncType->isVarArg()) {
2825 Check(NumCallArgs >= NumParams,
2826 "gc.statepoint mismatch in number of vararg call args", Call);
2827
2828 // TODO: Remove this limitation
2829 Check(TargetFuncType->getReturnType()->isVoidTy(),
2830 "gc.statepoint doesn't support wrapping non-void "
2831 "vararg functions yet",
2832 Call);
2833 } else
2834 Check(NumCallArgs == NumParams,
2835 "gc.statepoint mismatch in number of call args", Call);
2836
2837 const uint64_t Flags
2838 = cast<ConstantInt>(Call.getArgOperand(4))->getZExtValue();
2839 Check((Flags & ~(uint64_t)StatepointFlags::MaskAll) == 0,
2840 "unknown flag used in gc.statepoint flags argument", Call);
2841
2842 // Verify that the types of the call parameter arguments match
2843 // the type of the wrapped callee.
2844 AttributeList Attrs = Call.getAttributes();
2845 for (int i = 0; i < NumParams; i++) {
2846 Type *ParamType = TargetFuncType->getParamType(i);
2847 Type *ArgType = Call.getArgOperand(5 + i)->getType();
2848 Check(ArgType == ParamType,
2849 "gc.statepoint call argument does not match wrapped "
2850 "function type",
2851 Call);
2852
2853 if (TargetFuncType->isVarArg()) {
2854 AttributeSet ArgAttrs = Attrs.getParamAttrs(5 + i);
2855 Check(!ArgAttrs.hasAttribute(Attribute::StructRet),
2856 "Attribute 'sret' cannot be used for vararg call arguments!", Call);
2857 }
2858 }
2859
2860 const int EndCallArgsInx = 4 + NumCallArgs;
2861
2862 const Value *NumTransitionArgsV = Call.getArgOperand(EndCallArgsInx + 1);
2863 Check(isa<ConstantInt>(NumTransitionArgsV),
2864 "gc.statepoint number of transition arguments "
2865 "must be constant integer",
2866 Call);
2867 const int NumTransitionArgs =
2868 cast<ConstantInt>(NumTransitionArgsV)->getZExtValue();
2869 Check(NumTransitionArgs == 0,
2870 "gc.statepoint w/inline transition bundle is deprecated", Call);
2871 const int EndTransitionArgsInx = EndCallArgsInx + 1 + NumTransitionArgs;
2872
2873 const Value *NumDeoptArgsV = Call.getArgOperand(EndTransitionArgsInx + 1);
2874 Check(isa<ConstantInt>(NumDeoptArgsV),
2875 "gc.statepoint number of deoptimization arguments "
2876 "must be constant integer",
2877 Call);
2878 const int NumDeoptArgs = cast<ConstantInt>(NumDeoptArgsV)->getZExtValue();
2879 Check(NumDeoptArgs == 0,
2880 "gc.statepoint w/inline deopt operands is deprecated", Call);
2881
2882 const int ExpectedNumArgs = 7 + NumCallArgs;
2883 Check(ExpectedNumArgs == (int)Call.arg_size(),
2884 "gc.statepoint too many arguments", Call);
2885
2886 // Check that the only uses of this gc.statepoint are gc.result or
2887 // gc.relocate calls which are tied to this statepoint and thus part
2888 // of the same statepoint sequence
2889 for (const User *U : Call.users()) {
2890 const CallInst *UserCall = dyn_cast<const CallInst>(U);
2891 Check(UserCall, "illegal use of statepoint token", Call, U);
2892 if (!UserCall)
2893 continue;
2894 Check(isa<GCRelocateInst>(UserCall) || isa<GCResultInst>(UserCall),
2895 "gc.result or gc.relocate are the only value uses "
2896 "of a gc.statepoint",
2897 Call, U);
2898 if (isa<GCResultInst>(UserCall)) {
2899 Check(UserCall->getArgOperand(0) == &Call,
2900 "gc.result connected to wrong gc.statepoint", Call, UserCall);
2901 } else if (isa<GCRelocateInst>(Call)) {
2902 Check(UserCall->getArgOperand(0) == &Call,
2903 "gc.relocate connected to wrong gc.statepoint", Call, UserCall);
2904 }
2905 }
2906
2907 // Note: It is legal for a single derived pointer to be listed multiple
2908 // times. It's non-optimal, but it is legal. It can also happen after
2909 // insertion if we strip a bitcast away.
2910 // Note: It is really tempting to check that each base is relocated and
2911 // that a derived pointer is never reused as a base pointer. This turns
2912 // out to be problematic since optimizations run after safepoint insertion
2913 // can recognize equality properties that the insertion logic doesn't know
2914 // about. See example statepoint.ll in the verifier subdirectory
2915}
2916
2917void Verifier::verifyFrameRecoverIndices() {
2918 for (auto &Counts : FrameEscapeInfo) {
2919 Function *F = Counts.first;
2920 unsigned EscapedObjectCount = Counts.second.first;
2921 unsigned MaxRecoveredIndex = Counts.second.second;
2922 Check(MaxRecoveredIndex <= EscapedObjectCount,
2923 "all indices passed to llvm.localrecover must be less than the "
2924 "number of arguments passed to llvm.localescape in the parent "
2925 "function",
2926 F);
2927 }
2928}
2929
2930static Instruction *getSuccPad(Instruction *Terminator) {
2931 BasicBlock *UnwindDest;
2932 if (auto *II = dyn_cast<InvokeInst>(Terminator))
2933 UnwindDest = II->getUnwindDest();
2934 else if (auto *CSI = dyn_cast<CatchSwitchInst>(Terminator))
2935 UnwindDest = CSI->getUnwindDest();
2936 else
2937 UnwindDest = cast<CleanupReturnInst>(Terminator)->getUnwindDest();
2938 return &*UnwindDest->getFirstNonPHIIt();
2939}
2940
2941void Verifier::verifySiblingFuncletUnwinds() {
2942 llvm::TimeTraceScope timeScope("Verifier verify sibling funclet unwinds");
2943 SmallPtrSet<Instruction *, 8> Visited;
2944 SmallPtrSet<Instruction *, 8> Active;
2945 for (const auto &Pair : SiblingFuncletInfo) {
2946 Instruction *PredPad = Pair.first;
2947 if (Visited.count(PredPad))
2948 continue;
2949 Active.insert(PredPad);
2950 Instruction *Terminator = Pair.second;
2951 do {
2952 Instruction *SuccPad = getSuccPad(Terminator);
2953 if (Active.count(SuccPad)) {
2954 // Found a cycle; report error
2955 Instruction *CyclePad = SuccPad;
2956 SmallVector<Instruction *, 8> CycleNodes;
2957 do {
2958 CycleNodes.push_back(CyclePad);
2959 Instruction *CycleTerminator = SiblingFuncletInfo[CyclePad];
2960 if (CycleTerminator != CyclePad)
2961 CycleNodes.push_back(CycleTerminator);
2962 CyclePad = getSuccPad(CycleTerminator);
2963 } while (CyclePad != SuccPad);
2964 Check(false, "EH pads can't handle each other's exceptions",
2965 ArrayRef<Instruction *>(CycleNodes));
2966 }
2967 // Don't re-walk a node we've already checked
2968 if (!Visited.insert(SuccPad).second)
2969 break;
2970 // Walk to this successor if it has a map entry.
2971 PredPad = SuccPad;
2972 auto TermI = SiblingFuncletInfo.find(PredPad);
2973 if (TermI == SiblingFuncletInfo.end())
2974 break;
2975 Terminator = TermI->second;
2976 Active.insert(PredPad);
2977 } while (true);
2978 // Each node only has one successor, so we've walked all the active
2979 // nodes' successors.
2980 Active.clear();
2981 }
2982}
2983
2984// visitFunction - Verify that a function is ok.
2985//
2986void Verifier::visitFunction(const Function &F) {
2987 visitGlobalValue(F);
2988
2989 // Check function arguments.
2990 FunctionType *FT = F.getFunctionType();
2991 unsigned NumArgs = F.arg_size();
2992
2993 Check(&Context == &F.getContext(),
2994 "Function context does not match Module context!", &F);
2995
2996 Check(!F.hasCommonLinkage(), "Functions may not have common linkage", &F);
2997 Check(FT->getNumParams() == NumArgs,
2998 "# formal arguments must match # of arguments for function type!", &F,
2999 FT);
3000 Check(F.getReturnType()->isFirstClassType() ||
3001 F.getReturnType()->isVoidTy() || F.getReturnType()->isStructTy(),
3002 "Functions cannot return aggregate values!", &F);
3003
3004 Check(!F.hasStructRetAttr() || F.getReturnType()->isVoidTy(),
3005 "Invalid struct return type!", &F);
3006
3007 if (MaybeAlign A = F.getAlign()) {
3008 Check(A->value() <= Value::MaximumAlignment,
3009 "huge alignment values are unsupported", &F);
3010 }
3011
3012 AttributeList Attrs = F.getAttributes();
3013
3014 Check(verifyAttributeCount(Attrs, FT->getNumParams()),
3015 "Attribute after last parameter!", &F);
3016
3017 bool IsIntrinsic = F.isIntrinsic();
3018
3019 // Check function attributes.
3020 verifyFunctionAttrs(FT, Attrs, &F, IsIntrinsic, /* IsInlineAsm */ false);
3021
3022 // On function declarations/definitions, we do not support the builtin
3023 // attribute. We do not check this in VerifyFunctionAttrs since that is
3024 // checking for Attributes that can/can not ever be on functions.
3025 Check(!Attrs.hasFnAttr(Attribute::Builtin),
3026 "Attribute 'builtin' can only be applied to a callsite.", &F);
3027
3028 Check(!Attrs.hasAttrSomewhere(Attribute::ElementType),
3029 "Attribute 'elementtype' can only be applied to a callsite.", &F);
3030
3031 Check(!Attrs.hasFnAttr("aarch64_zt0_undef"),
3032 "Attribute 'aarch64_zt0_undef' can only be applied to a callsite.");
3033
3034 if (Attrs.hasFnAttr(Attribute::Naked))
3035 for (const Argument &Arg : F.args())
3036 Check(Arg.use_empty(), "cannot use argument of naked function", &Arg);
3037
3038 // Check that this function meets the restrictions on this calling convention.
3039 // Sometimes varargs is used for perfectly forwarding thunks, so some of these
3040 // restrictions can be lifted.
3041 switch (F.getCallingConv()) {
3042 default:
3043 case CallingConv::C:
3044 break;
3045 case CallingConv::X86_INTR: {
3046 Check(F.arg_empty() || Attrs.hasParamAttr(0, Attribute::ByVal),
3047 "Calling convention parameter requires byval", &F);
3048 break;
3049 }
3050 case CallingConv::AMDGPU_KERNEL:
3051 case CallingConv::SPIR_KERNEL:
3052 case CallingConv::AMDGPU_CS_Chain:
3053 case CallingConv::AMDGPU_CS_ChainPreserve:
3054 Check(F.getReturnType()->isVoidTy(),
3055 "Calling convention requires void return type", &F);
3056 [[fallthrough]];
3057 case CallingConv::AMDGPU_VS:
3058 case CallingConv::AMDGPU_HS:
3059 case CallingConv::AMDGPU_GS:
3060 case CallingConv::AMDGPU_PS:
3061 case CallingConv::AMDGPU_CS:
3062 Check(!F.hasStructRetAttr(), "Calling convention does not allow sret", &F);
3063 if (F.getCallingConv() != CallingConv::SPIR_KERNEL) {
3064 const unsigned StackAS = DL.getAllocaAddrSpace();
3065 unsigned i = 0;
3066 for (const Argument &Arg : F.args()) {
3067 Check(!Attrs.hasParamAttr(i, Attribute::ByVal),
3068 "Calling convention disallows byval", &F);
3069 Check(!Attrs.hasParamAttr(i, Attribute::Preallocated),
3070 "Calling convention disallows preallocated", &F);
3071 Check(!Attrs.hasParamAttr(i, Attribute::InAlloca),
3072 "Calling convention disallows inalloca", &F);
3073
3074 if (Attrs.hasParamAttr(i, Attribute::ByRef)) {
3075 // FIXME: Should also disallow LDS and GDS, but we don't have the enum
3076 // value here.
3077 Check(Arg.getType()->getPointerAddressSpace() != StackAS,
3078 "Calling convention disallows stack byref", &F);
3079 }
3080
3081 ++i;
3082 }
3083 }
3084
3085 [[fallthrough]];
3086 case CallingConv::Fast:
3087 case CallingConv::Cold:
3088 case CallingConv::Intel_OCL_BI:
3089 case CallingConv::PTX_Kernel:
3090 case CallingConv::PTX_Device:
3091 Check(!F.isVarArg(),
3092 "Calling convention does not support varargs or "
3093 "perfect forwarding!",
3094 &F);
3095 break;
3096 case CallingConv::AMDGPU_Gfx_WholeWave:
3097 Check(!F.arg_empty() && F.arg_begin()->getType()->isIntegerTy(1),
3098 "Calling convention requires first argument to be i1", &F);
3099 Check(!F.arg_begin()->hasInRegAttr(),
3100 "Calling convention requires first argument to not be inreg", &F);
3101 Check(!F.isVarArg(),
3102 "Calling convention does not support varargs or "
3103 "perfect forwarding!",
3104 &F);
3105 break;
3106 }
3107
3108 // Check that the argument values match the function type for this function...
3109 unsigned i = 0;
3110 for (const Argument &Arg : F.args()) {
3111 Check(Arg.getType() == FT->getParamType(i),
3112 "Argument value does not match function argument type!", &Arg,
3113 FT->getParamType(i));
3114 Check(Arg.getType()->isFirstClassType(),
3115 "Function arguments must have first-class types!", &Arg);
3116 if (!IsIntrinsic) {
3117 Check(!Arg.getType()->isMetadataTy(),
3118 "Function takes metadata but isn't an intrinsic", &Arg, &F);
3119 Check(!Arg.getType()->isTokenLikeTy(),
3120 "Function takes token but isn't an intrinsic", &Arg, &F);
3121 Check(!Arg.getType()->isX86_AMXTy(),
3122 "Function takes x86_amx but isn't an intrinsic", &Arg, &F);
3123 }
3124
3125 // Check that swifterror argument is only used by loads and stores.
3126 if (Attrs.hasParamAttr(i, Attribute::SwiftError)) {
3127 verifySwiftErrorValue(&Arg);
3128 }
3129 ++i;
3130 }
3131
3132 if (!IsIntrinsic) {
3133 Check(!F.getReturnType()->isTokenLikeTy(),
3134 "Function returns a token but isn't an intrinsic", &F);
3135 Check(!F.getReturnType()->isX86_AMXTy(),
3136 "Function returns a x86_amx but isn't an intrinsic", &F);
3137 }
3138
3139 // Get the function metadata attachments.
3141 F.getAllMetadata(MDs);
3142 assert(F.hasMetadata() != MDs.empty() && "Bit out-of-sync");
3143 verifyFunctionMetadata(MDs);
3144
3145 // Check validity of the personality function
3146 if (F.hasPersonalityFn()) {
3147 auto *Per = dyn_cast<Function>(F.getPersonalityFn()->stripPointerCasts());
3148 if (Per)
3149 Check(Per->getParent() == F.getParent(),
3150 "Referencing personality function in another module!", &F,
3151 F.getParent(), Per, Per->getParent());
3152 }
3153
3154 // EH funclet coloring can be expensive, recompute on-demand
3155 BlockEHFuncletColors.clear();
3156
3157 if (F.isMaterializable()) {
3158 // Function has a body somewhere we can't see.
3159 Check(MDs.empty(), "unmaterialized function cannot have metadata", &F,
3160 MDs.empty() ? nullptr : MDs.front().second);
3161 } else if (F.isDeclaration()) {
3162 for (const auto &I : MDs) {
3163 // This is used for call site debug information.
3164 CheckDI(I.first != LLVMContext::MD_dbg ||
3165 !cast<DISubprogram>(I.second)->isDistinct(),
3166 "function declaration may only have a unique !dbg attachment",
3167 &F);
3168 Check(I.first != LLVMContext::MD_prof,
3169 "function declaration may not have a !prof attachment", &F);
3170
3171 // Verify the metadata itself.
3172 visitMDNode(*I.second, AreDebugLocsAllowed::Yes);
3173 }
3174 Check(!F.hasPersonalityFn(),
3175 "Function declaration shouldn't have a personality routine", &F);
3176 } else {
3177 // Verify that this function (which has a body) is not named "llvm.*". It
3178 // is not legal to define intrinsics.
3179 Check(!IsIntrinsic, "llvm intrinsics cannot be defined!", &F);
3180
3181 // Check the entry node
3182 const BasicBlock *Entry = &F.getEntryBlock();
3183 Check(pred_empty(Entry),
3184 "Entry block to function must not have predecessors!", Entry);
3185
3186 // The address of the entry block cannot be taken, unless it is dead.
3187 if (Entry->hasAddressTaken()) {
3188 Check(!BlockAddress::lookup(Entry)->isConstantUsed(),
3189 "blockaddress may not be used with the entry block!", Entry);
3190 }
3191
3192 unsigned NumDebugAttachments = 0, NumProfAttachments = 0,
3193 NumKCFIAttachments = 0;
3194 // Visit metadata attachments.
3195 for (const auto &I : MDs) {
3196 // Verify that the attachment is legal.
3197 auto AllowLocs = AreDebugLocsAllowed::No;
3198 switch (I.first) {
3199 default:
3200 break;
3201 case LLVMContext::MD_dbg: {
3202 ++NumDebugAttachments;
3203 CheckDI(NumDebugAttachments == 1,
3204 "function must have a single !dbg attachment", &F, I.second);
3205 CheckDI(isa<DISubprogram>(I.second),
3206 "function !dbg attachment must be a subprogram", &F, I.second);
3207 CheckDI(cast<DISubprogram>(I.second)->isDistinct(),
3208 "function definition may only have a distinct !dbg attachment",
3209 &F);
3210
3211 auto *SP = cast<DISubprogram>(I.second);
3212 const Function *&AttachedTo = DISubprogramAttachments[SP];
3213 CheckDI(!AttachedTo || AttachedTo == &F,
3214 "DISubprogram attached to more than one function", SP, &F);
3215 AttachedTo = &F;
3216 AllowLocs = AreDebugLocsAllowed::Yes;
3217 break;
3218 }
3219 case LLVMContext::MD_prof:
3220 ++NumProfAttachments;
3221 Check(NumProfAttachments == 1,
3222 "function must have a single !prof attachment", &F, I.second);
3223 break;
3224 case LLVMContext::MD_kcfi_type:
3225 ++NumKCFIAttachments;
3226 Check(NumKCFIAttachments == 1,
3227 "function must have a single !kcfi_type attachment", &F,
3228 I.second);
3229 break;
3230 }
3231
3232 // Verify the metadata itself.
3233 visitMDNode(*I.second, AllowLocs);
3234 }
3235 }
3236
3237 // If this function is actually an intrinsic, verify that it is only used in
3238 // direct call/invokes, never having its "address taken".
3239 // Only do this if the module is materialized, otherwise we don't have all the
3240 // uses.
3241 if (F.isIntrinsic() && F.getParent()->isMaterialized()) {
3242 const User *U;
3243 if (F.hasAddressTaken(&U, false, true, false,
3244 /*IgnoreARCAttachedCall=*/true))
3245 Check(false, "Invalid user of intrinsic instruction!", U);
3246 }
3247
3248 // Check intrinsics' signatures.
3249 switch (F.getIntrinsicID()) {
3250 case Intrinsic::experimental_gc_get_pointer_base: {
3251 FunctionType *FT = F.getFunctionType();
3252 Check(FT->getNumParams() == 1, "wrong number of parameters", F);
3253 Check(isa<PointerType>(F.getReturnType()),
3254 "gc.get.pointer.base must return a pointer", F);
3255 Check(FT->getParamType(0) == F.getReturnType(),
3256 "gc.get.pointer.base operand and result must be of the same type", F);
3257 break;
3258 }
3259 case Intrinsic::experimental_gc_get_pointer_offset: {
3260 FunctionType *FT = F.getFunctionType();
3261 Check(FT->getNumParams() == 1, "wrong number of parameters", F);
3262 Check(isa<PointerType>(FT->getParamType(0)),
3263 "gc.get.pointer.offset operand must be a pointer", F);
3264 Check(F.getReturnType()->isIntegerTy(),
3265 "gc.get.pointer.offset must return integer", F);
3266 break;
3267 }
3268 }
3269
3270 auto *N = F.getSubprogram();
3271 HasDebugInfo = (N != nullptr);
3272 if (!HasDebugInfo)
3273 return;
3274
3275 // Check that all !dbg attachments lead to back to N.
3276 //
3277 // FIXME: Check this incrementally while visiting !dbg attachments.
3278 // FIXME: Only check when N is the canonical subprogram for F.
3279 SmallPtrSet<const MDNode *, 32> Seen;
3280 auto VisitDebugLoc = [&](const Instruction &I, const MDNode *Node) {
3281 // Be careful about using DILocation here since we might be dealing with
3282 // broken code (this is the Verifier after all).
3283 const DILocation *DL = dyn_cast_or_null<DILocation>(Node);
3284 if (!DL)
3285 return;
3286 if (!Seen.insert(DL).second)
3287 return;
3288
3289 Metadata *Parent = DL->getRawScope();
3290 CheckDI(Parent && isa<DILocalScope>(Parent),
3291 "DILocation's scope must be a DILocalScope", N, &F, &I, DL, Parent);
3292
3293 DILocalScope *Scope = DL->getInlinedAtScope();
3294 Check(Scope, "Failed to find DILocalScope", DL);
3295
3296 if (!Seen.insert(Scope).second)
3297 return;
3298
3299 DISubprogram *SP = Scope->getSubprogram();
3300
3301 // Scope and SP could be the same MDNode and we don't want to skip
3302 // validation in that case
3303 if ((Scope != SP) && !Seen.insert(SP).second)
3304 return;
3305
3306 CheckDI(SP->describes(&F),
3307 "!dbg attachment points at wrong subprogram for function", N, &F,
3308 &I, DL, Scope, SP);
3309 };
3310 for (auto &BB : F)
3311 for (auto &I : BB) {
3312 VisitDebugLoc(I, I.getDebugLoc().getAsMDNode());
3313 // The llvm.loop annotations also contain two DILocations.
3314 if (auto MD = I.getMetadata(LLVMContext::MD_loop))
3315 for (unsigned i = 1; i < MD->getNumOperands(); ++i)
3316 VisitDebugLoc(I, dyn_cast_or_null<MDNode>(MD->getOperand(i)));
3317 if (BrokenDebugInfo)
3318 return;
3319 }
3320}
3321
3322// verifyBasicBlock - Verify that a basic block is well formed...
3323//
3324void Verifier::visitBasicBlock(BasicBlock &BB) {
3325 InstsInThisBlock.clear();
3326 ConvergenceVerifyHelper.visit(BB);
3327
3328 // Ensure that basic blocks have terminators!
3329 Check(BB.getTerminator(), "Basic Block does not have terminator!", &BB);
3330
3331 // Check constraints that this basic block imposes on all of the PHI nodes in
3332 // it.
3333 if (isa<PHINode>(BB.front())) {
3334 SmallVector<BasicBlock *, 8> Preds(predecessors(&BB));
3336 llvm::sort(Preds);
3337 for (const PHINode &PN : BB.phis()) {
3338 Check(PN.getNumIncomingValues() == Preds.size(),
3339 "PHINode should have one entry for each predecessor of its "
3340 "parent basic block!",
3341 &PN);
3342
3343 // Get and sort all incoming values in the PHI node...
3344 Values.clear();
3345 Values.reserve(PN.getNumIncomingValues());
3346 for (unsigned i = 0, e = PN.getNumIncomingValues(); i != e; ++i)
3347 Values.push_back(
3348 std::make_pair(PN.getIncomingBlock(i), PN.getIncomingValue(i)));
3349 llvm::sort(Values);
3350
3351 for (unsigned i = 0, e = Values.size(); i != e; ++i) {
3352 // Check to make sure that if there is more than one entry for a
3353 // particular basic block in this PHI node, that the incoming values are
3354 // all identical.
3355 //
3356 Check(i == 0 || Values[i].first != Values[i - 1].first ||
3357 Values[i].second == Values[i - 1].second,
3358 "PHI node has multiple entries for the same basic block with "
3359 "different incoming values!",
3360 &PN, Values[i].first, Values[i].second, Values[i - 1].second);
3361
3362 // Check to make sure that the predecessors and PHI node entries are
3363 // matched up.
3364 Check(Values[i].first == Preds[i],
3365 "PHI node entries do not match predecessors!", &PN,
3366 Values[i].first, Preds[i]);
3367 }
3368 }
3369 }
3370
3371 // Check that all instructions have their parent pointers set up correctly.
3372 for (auto &I : BB)
3373 {
3374 Check(I.getParent() == &BB, "Instruction has bogus parent pointer!");
3375 }
3376
3377 // Confirm that no issues arise from the debug program.
3378 CheckDI(!BB.getTrailingDbgRecords(), "Basic Block has trailing DbgRecords!",
3379 &BB);
3380}
3381
3382void Verifier::visitTerminator(Instruction &I) {
3383 // Ensure that terminators only exist at the end of the basic block.
3384 Check(&I == I.getParent()->getTerminator(),
3385 "Terminator found in the middle of a basic block!", I.getParent());
3386 visitInstruction(I);
3387}
3388
3389void Verifier::visitBranchInst(BranchInst &BI) {
3390 if (BI.isConditional()) {
3392 "Branch condition is not 'i1' type!", &BI, BI.getCondition());
3393 }
3394 visitTerminator(BI);
3395}
3396
3397void Verifier::visitReturnInst(ReturnInst &RI) {
3398 Function *F = RI.getParent()->getParent();
3399 unsigned N = RI.getNumOperands();
3400 if (F->getReturnType()->isVoidTy())
3401 Check(N == 0,
3402 "Found return instr that returns non-void in Function of void "
3403 "return type!",
3404 &RI, F->getReturnType());
3405 else
3406 Check(N == 1 && F->getReturnType() == RI.getOperand(0)->getType(),
3407 "Function return type does not match operand "
3408 "type of return inst!",
3409 &RI, F->getReturnType());
3410
3411 // Check to make sure that the return value has necessary properties for
3412 // terminators...
3413 visitTerminator(RI);
3414}
3415
3416void Verifier::visitSwitchInst(SwitchInst &SI) {
3417 Check(SI.getType()->isVoidTy(), "Switch must have void result type!", &SI);
3418 // Check to make sure that all of the constants in the switch instruction
3419 // have the same type as the switched-on value.
3420 Type *SwitchTy = SI.getCondition()->getType();
3421 SmallPtrSet<ConstantInt*, 32> Constants;
3422 for (auto &Case : SI.cases()) {
3423 Check(isa<ConstantInt>(SI.getOperand(Case.getCaseIndex() * 2 + 2)),
3424 "Case value is not a constant integer.", &SI);
3425 Check(Case.getCaseValue()->getType() == SwitchTy,
3426 "Switch constants must all be same type as switch value!", &SI);
3427 Check(Constants.insert(Case.getCaseValue()).second,
3428 "Duplicate integer as switch case", &SI, Case.getCaseValue());
3429 }
3430
3431 visitTerminator(SI);
3432}
3433
3434void Verifier::visitIndirectBrInst(IndirectBrInst &BI) {
3436 "Indirectbr operand must have pointer type!", &BI);
3437 for (unsigned i = 0, e = BI.getNumDestinations(); i != e; ++i)
3439 "Indirectbr destinations must all have pointer type!", &BI);
3440
3441 visitTerminator(BI);
3442}
3443
3444void Verifier::visitCallBrInst(CallBrInst &CBI) {
3445 Check(CBI.isInlineAsm(), "Callbr is currently only used for asm-goto!", &CBI);
3446 const InlineAsm *IA = cast<InlineAsm>(CBI.getCalledOperand());
3447 Check(!IA->canThrow(), "Unwinding from Callbr is not allowed");
3448
3449 verifyInlineAsmCall(CBI);
3450 visitTerminator(CBI);
3451}
3452
3453void Verifier::visitSelectInst(SelectInst &SI) {
3454 Check(!SelectInst::areInvalidOperands(SI.getOperand(0), SI.getOperand(1),
3455 SI.getOperand(2)),
3456 "Invalid operands for select instruction!", &SI);
3457
3458 Check(SI.getTrueValue()->getType() == SI.getType(),
3459 "Select values must have same type as select instruction!", &SI);
3460 visitInstruction(SI);
3461}
3462
3463/// visitUserOp1 - User defined operators shouldn't live beyond the lifetime of
3464/// a pass, if any exist, it's an error.
3465///
3466void Verifier::visitUserOp1(Instruction &I) {
3467 Check(false, "User-defined operators should not live outside of a pass!", &I);
3468}
3469
3470void Verifier::visitTruncInst(TruncInst &I) {
3471 // Get the source and destination types
3472 Type *SrcTy = I.getOperand(0)->getType();
3473 Type *DestTy = I.getType();
3474
3475 // Get the size of the types in bits, we'll need this later
3476 unsigned SrcBitSize = SrcTy->getScalarSizeInBits();
3477 unsigned DestBitSize = DestTy->getScalarSizeInBits();
3478
3479 Check(SrcTy->isIntOrIntVectorTy(), "Trunc only operates on integer", &I);
3480 Check(DestTy->isIntOrIntVectorTy(), "Trunc only produces integer", &I);
3481 Check(SrcTy->isVectorTy() == DestTy->isVectorTy(),
3482 "trunc source and destination must both be a vector or neither", &I);
3483 Check(SrcBitSize > DestBitSize, "DestTy too big for Trunc", &I);
3484
3485 visitInstruction(I);
3486}
3487
3488void Verifier::visitZExtInst(ZExtInst &I) {
3489 // Get the source and destination types
3490 Type *SrcTy = I.getOperand(0)->getType();
3491 Type *DestTy = I.getType();
3492
3493 // Get the size of the types in bits, we'll need this later
3494 Check(SrcTy->isIntOrIntVectorTy(), "ZExt only operates on integer", &I);
3495 Check(DestTy->isIntOrIntVectorTy(), "ZExt only produces an integer", &I);
3496 Check(SrcTy->isVectorTy() == DestTy->isVectorTy(),
3497 "zext source and destination must both be a vector or neither", &I);
3498 unsigned SrcBitSize = SrcTy->getScalarSizeInBits();
3499 unsigned DestBitSize = DestTy->getScalarSizeInBits();
3500
3501 Check(SrcBitSize < DestBitSize, "Type too small for ZExt", &I);
3502
3503 visitInstruction(I);
3504}
3505
3506void Verifier::visitSExtInst(SExtInst &I) {
3507 // Get the source and destination types
3508 Type *SrcTy = I.getOperand(0)->getType();
3509 Type *DestTy = I.getType();
3510
3511 // Get the size of the types in bits, we'll need this later
3512 unsigned SrcBitSize = SrcTy->getScalarSizeInBits();
3513 unsigned DestBitSize = DestTy->getScalarSizeInBits();
3514
3515 Check(SrcTy->isIntOrIntVectorTy(), "SExt only operates on integer", &I);
3516 Check(DestTy->isIntOrIntVectorTy(), "SExt only produces an integer", &I);
3517 Check(SrcTy->isVectorTy() == DestTy->isVectorTy(),
3518 "sext source and destination must both be a vector or neither", &I);
3519 Check(SrcBitSize < DestBitSize, "Type too small for SExt", &I);
3520
3521 visitInstruction(I);
3522}
3523
3524void Verifier::visitFPTruncInst(FPTruncInst &I) {
3525 // Get the source and destination types
3526 Type *SrcTy = I.getOperand(0)->getType();
3527 Type *DestTy = I.getType();
3528 // Get the size of the types in bits, we'll need this later
3529 unsigned SrcBitSize = SrcTy->getScalarSizeInBits();
3530 unsigned DestBitSize = DestTy->getScalarSizeInBits();
3531
3532 Check(SrcTy->isFPOrFPVectorTy(), "FPTrunc only operates on FP", &I);
3533 Check(DestTy->isFPOrFPVectorTy(), "FPTrunc only produces an FP", &I);
3534 Check(SrcTy->isVectorTy() == DestTy->isVectorTy(),
3535 "fptrunc source and destination must both be a vector or neither", &I);
3536 Check(SrcBitSize > DestBitSize, "DestTy too big for FPTrunc", &I);
3537
3538 visitInstruction(I);
3539}
3540
3541void Verifier::visitFPExtInst(FPExtInst &I) {
3542 // Get the source and destination types
3543 Type *SrcTy = I.getOperand(0)->getType();
3544 Type *DestTy = I.getType();
3545
3546 // Get the size of the types in bits, we'll need this later
3547 unsigned SrcBitSize = SrcTy->getScalarSizeInBits();
3548 unsigned DestBitSize = DestTy->getScalarSizeInBits();
3549
3550 Check(SrcTy->isFPOrFPVectorTy(), "FPExt only operates on FP", &I);
3551 Check(DestTy->isFPOrFPVectorTy(), "FPExt only produces an FP", &I);
3552 Check(SrcTy->isVectorTy() == DestTy->isVectorTy(),
3553 "fpext source and destination must both be a vector or neither", &I);
3554 Check(SrcBitSize < DestBitSize, "DestTy too small for FPExt", &I);
3555
3556 visitInstruction(I);
3557}
3558
3559void Verifier::visitUIToFPInst(UIToFPInst &I) {
3560 // Get the source and destination types
3561 Type *SrcTy = I.getOperand(0)->getType();
3562 Type *DestTy = I.getType();
3563
3564 bool SrcVec = SrcTy->isVectorTy();
3565 bool DstVec = DestTy->isVectorTy();
3566
3567 Check(SrcVec == DstVec,
3568 "UIToFP source and dest must both be vector or scalar", &I);
3569 Check(SrcTy->isIntOrIntVectorTy(),
3570 "UIToFP source must be integer or integer vector", &I);
3571 Check(DestTy->isFPOrFPVectorTy(), "UIToFP result must be FP or FP vector",
3572 &I);
3573
3574 if (SrcVec && DstVec)
3575 Check(cast<VectorType>(SrcTy)->getElementCount() ==
3576 cast<VectorType>(DestTy)->getElementCount(),
3577 "UIToFP source and dest vector length mismatch", &I);
3578
3579 visitInstruction(I);
3580}
3581
3582void Verifier::visitSIToFPInst(SIToFPInst &I) {
3583 // Get the source and destination types
3584 Type *SrcTy = I.getOperand(0)->getType();
3585 Type *DestTy = I.getType();
3586
3587 bool SrcVec = SrcTy->isVectorTy();
3588 bool DstVec = DestTy->isVectorTy();
3589
3590 Check(SrcVec == DstVec,
3591 "SIToFP source and dest must both be vector or scalar", &I);
3592 Check(SrcTy->isIntOrIntVectorTy(),
3593 "SIToFP source must be integer or integer vector", &I);
3594 Check(DestTy->isFPOrFPVectorTy(), "SIToFP result must be FP or FP vector",
3595 &I);
3596
3597 if (SrcVec && DstVec)
3598 Check(cast<VectorType>(SrcTy)->getElementCount() ==
3599 cast<VectorType>(DestTy)->getElementCount(),
3600 "SIToFP source and dest vector length mismatch", &I);
3601
3602 visitInstruction(I);
3603}
3604
3605void Verifier::visitFPToUIInst(FPToUIInst &I) {
3606 // Get the source and destination types
3607 Type *SrcTy = I.getOperand(0)->getType();
3608 Type *DestTy = I.getType();
3609
3610 bool SrcVec = SrcTy->isVectorTy();
3611 bool DstVec = DestTy->isVectorTy();
3612
3613 Check(SrcVec == DstVec,
3614 "FPToUI source and dest must both be vector or scalar", &I);
3615 Check(SrcTy->isFPOrFPVectorTy(), "FPToUI source must be FP or FP vector", &I);
3616 Check(DestTy->isIntOrIntVectorTy(),
3617 "FPToUI result must be integer or integer vector", &I);
3618
3619 if (SrcVec && DstVec)
3620 Check(cast<VectorType>(SrcTy)->getElementCount() ==
3621 cast<VectorType>(DestTy)->getElementCount(),
3622 "FPToUI source and dest vector length mismatch", &I);
3623
3624 visitInstruction(I);
3625}
3626
3627void Verifier::visitFPToSIInst(FPToSIInst &I) {
3628 // Get the source and destination types
3629 Type *SrcTy = I.getOperand(0)->getType();
3630 Type *DestTy = I.getType();
3631
3632 bool SrcVec = SrcTy->isVectorTy();
3633 bool DstVec = DestTy->isVectorTy();
3634
3635 Check(SrcVec == DstVec,
3636 "FPToSI source and dest must both be vector or scalar", &I);
3637 Check(SrcTy->isFPOrFPVectorTy(), "FPToSI source must be FP or FP vector", &I);
3638 Check(DestTy->isIntOrIntVectorTy(),
3639 "FPToSI result must be integer or integer vector", &I);
3640
3641 if (SrcVec && DstVec)
3642 Check(cast<VectorType>(SrcTy)->getElementCount() ==
3643 cast<VectorType>(DestTy)->getElementCount(),
3644 "FPToSI source and dest vector length mismatch", &I);
3645
3646 visitInstruction(I);
3647}
3648
3649void Verifier::checkPtrToAddr(Type *SrcTy, Type *DestTy, const Value &V) {
3650 Check(SrcTy->isPtrOrPtrVectorTy(), "PtrToAddr source must be pointer", V);
3651 Check(DestTy->isIntOrIntVectorTy(), "PtrToAddr result must be integral", V);
3652 Check(SrcTy->isVectorTy() == DestTy->isVectorTy(), "PtrToAddr type mismatch",
3653 V);
3654
3655 if (SrcTy->isVectorTy()) {
3656 auto *VSrc = cast<VectorType>(SrcTy);
3657 auto *VDest = cast<VectorType>(DestTy);
3658 Check(VSrc->getElementCount() == VDest->getElementCount(),
3659 "PtrToAddr vector length mismatch", V);
3660 }
3661
3662 Type *AddrTy = DL.getAddressType(SrcTy);
3663 Check(AddrTy == DestTy, "PtrToAddr result must be address width", V);
3664}
3665
3666void Verifier::visitPtrToAddrInst(PtrToAddrInst &I) {
3667 checkPtrToAddr(I.getOperand(0)->getType(), I.getType(), I);
3668 visitInstruction(I);
3669}
3670
3671void Verifier::visitPtrToIntInst(PtrToIntInst &I) {
3672 // Get the source and destination types
3673 Type *SrcTy = I.getOperand(0)->getType();
3674 Type *DestTy = I.getType();
3675
3676 Check(SrcTy->isPtrOrPtrVectorTy(), "PtrToInt source must be pointer", &I);
3677
3678 Check(DestTy->isIntOrIntVectorTy(), "PtrToInt result must be integral", &I);
3679 Check(SrcTy->isVectorTy() == DestTy->isVectorTy(), "PtrToInt type mismatch",
3680 &I);
3681
3682 if (SrcTy->isVectorTy()) {
3683 auto *VSrc = cast<VectorType>(SrcTy);
3684 auto *VDest = cast<VectorType>(DestTy);
3685 Check(VSrc->getElementCount() == VDest->getElementCount(),
3686 "PtrToInt Vector length mismatch", &I);
3687 }
3688
3689 visitInstruction(I);
3690}
3691
3692void Verifier::visitIntToPtrInst(IntToPtrInst &I) {
3693 // Get the source and destination types
3694 Type *SrcTy = I.getOperand(0)->getType();
3695 Type *DestTy = I.getType();
3696
3697 Check(SrcTy->isIntOrIntVectorTy(), "IntToPtr source must be an integral", &I);
3698 Check(DestTy->isPtrOrPtrVectorTy(), "IntToPtr result must be a pointer", &I);
3699
3700 Check(SrcTy->isVectorTy() == DestTy->isVectorTy(), "IntToPtr type mismatch",
3701 &I);
3702 if (SrcTy->isVectorTy()) {
3703 auto *VSrc = cast<VectorType>(SrcTy);
3704 auto *VDest = cast<VectorType>(DestTy);
3705 Check(VSrc->getElementCount() == VDest->getElementCount(),
3706 "IntToPtr Vector length mismatch", &I);
3707 }
3708 visitInstruction(I);
3709}
3710
3711void Verifier::visitBitCastInst(BitCastInst &I) {
3712 Check(
3713 CastInst::castIsValid(Instruction::BitCast, I.getOperand(0), I.getType()),
3714 "Invalid bitcast", &I);
3715 visitInstruction(I);
3716}
3717
3718void Verifier::visitAddrSpaceCastInst(AddrSpaceCastInst &I) {
3719 Type *SrcTy = I.getOperand(0)->getType();
3720 Type *DestTy = I.getType();
3721
3722 Check(SrcTy->isPtrOrPtrVectorTy(), "AddrSpaceCast source must be a pointer",
3723 &I);
3724 Check(DestTy->isPtrOrPtrVectorTy(), "AddrSpaceCast result must be a pointer",
3725 &I);
3727 "AddrSpaceCast must be between different address spaces", &I);
3728 if (auto *SrcVTy = dyn_cast<VectorType>(SrcTy))
3729 Check(SrcVTy->getElementCount() ==
3730 cast<VectorType>(DestTy)->getElementCount(),
3731 "AddrSpaceCast vector pointer number of elements mismatch", &I);
3732 visitInstruction(I);
3733}
3734
3735/// visitPHINode - Ensure that a PHI node is well formed.
3736///
3737void Verifier::visitPHINode(PHINode &PN) {
3738 // Ensure that the PHI nodes are all grouped together at the top of the block.
3739 // This can be tested by checking whether the instruction before this is
3740 // either nonexistent (because this is begin()) or is a PHI node. If not,
3741 // then there is some other instruction before a PHI.
3742 Check(&PN == &PN.getParent()->front() ||
3744 "PHI nodes not grouped at top of basic block!", &PN, PN.getParent());
3745
3746 // Check that a PHI doesn't yield a Token.
3747 Check(!PN.getType()->isTokenLikeTy(), "PHI nodes cannot have token type!");
3748
3749 // Check that all of the values of the PHI node have the same type as the
3750 // result.
3751 for (Value *IncValue : PN.incoming_values()) {
3752 Check(PN.getType() == IncValue->getType(),
3753 "PHI node operands are not the same type as the result!", &PN);
3754 }
3755
3756 // All other PHI node constraints are checked in the visitBasicBlock method.
3757
3758 visitInstruction(PN);
3759}
3760
3761void Verifier::visitCallBase(CallBase &Call) {
3763 "Called function must be a pointer!", Call);
3764 FunctionType *FTy = Call.getFunctionType();
3765
3766 // Verify that the correct number of arguments are being passed
3767 if (FTy->isVarArg())
3768 Check(Call.arg_size() >= FTy->getNumParams(),
3769 "Called function requires more parameters than were provided!", Call);
3770 else
3771 Check(Call.arg_size() == FTy->getNumParams(),
3772 "Incorrect number of arguments passed to called function!", Call);
3773
3774 // Verify that all arguments to the call match the function type.
3775 for (unsigned i = 0, e = FTy->getNumParams(); i != e; ++i)
3776 Check(Call.getArgOperand(i)->getType() == FTy->getParamType(i),
3777 "Call parameter type does not match function signature!",
3778 Call.getArgOperand(i), FTy->getParamType(i), Call);
3779
3780 AttributeList Attrs = Call.getAttributes();
3781
3782 Check(verifyAttributeCount(Attrs, Call.arg_size()),
3783 "Attribute after last parameter!", Call);
3784
3785 Function *Callee =
3787 bool IsIntrinsic = Callee && Callee->isIntrinsic();
3788 if (IsIntrinsic)
3789 Check(Callee->getValueType() == FTy,
3790 "Intrinsic called with incompatible signature", Call);
3791
3792 // Verify if the calling convention of the callee is callable.
3794 "calling convention does not permit calls", Call);
3795
3796 // Disallow passing/returning values with alignment higher than we can
3797 // represent.
3798 // FIXME: Consider making DataLayout cap the alignment, so this isn't
3799 // necessary.
3800 auto VerifyTypeAlign = [&](Type *Ty, const Twine &Message) {
3801 if (!Ty->isSized())
3802 return;
3803 Align ABIAlign = DL.getABITypeAlign(Ty);
3804 Check(ABIAlign.value() <= Value::MaximumAlignment,
3805 "Incorrect alignment of " + Message + " to called function!", Call);
3806 };
3807
3808 if (!IsIntrinsic) {
3809 VerifyTypeAlign(FTy->getReturnType(), "return type");
3810 for (unsigned i = 0, e = FTy->getNumParams(); i != e; ++i) {
3811 Type *Ty = FTy->getParamType(i);
3812 VerifyTypeAlign(Ty, "argument passed");
3813 }
3814 }
3815
3816 if (Attrs.hasFnAttr(Attribute::Speculatable)) {
3817 // Don't allow speculatable on call sites, unless the underlying function
3818 // declaration is also speculatable.
3819 Check(Callee && Callee->isSpeculatable(),
3820 "speculatable attribute may not apply to call sites", Call);
3821 }
3822
3823 if (Attrs.hasFnAttr(Attribute::Preallocated)) {
3824 Check(Call.getIntrinsicID() == Intrinsic::call_preallocated_arg,
3825 "preallocated as a call site attribute can only be on "
3826 "llvm.call.preallocated.arg");
3827 }
3828
3829 // Verify call attributes.
3830 verifyFunctionAttrs(FTy, Attrs, &Call, IsIntrinsic, Call.isInlineAsm());
3831
3832 // Conservatively check the inalloca argument.
3833 // We have a bug if we can find that there is an underlying alloca without
3834 // inalloca.
3835 if (Call.hasInAllocaArgument()) {
3836 Value *InAllocaArg = Call.getArgOperand(FTy->getNumParams() - 1);
3837 if (auto AI = dyn_cast<AllocaInst>(InAllocaArg->stripInBoundsOffsets()))
3838 Check(AI->isUsedWithInAlloca(),
3839 "inalloca argument for call has mismatched alloca", AI, Call);
3840 }
3841
3842 // For each argument of the callsite, if it has the swifterror argument,
3843 // make sure the underlying alloca/parameter it comes from has a swifterror as
3844 // well.
3845 for (unsigned i = 0, e = FTy->getNumParams(); i != e; ++i) {
3846 if (Call.paramHasAttr(i, Attribute::SwiftError)) {
3847 Value *SwiftErrorArg = Call.getArgOperand(i);
3848 if (auto AI = dyn_cast<AllocaInst>(SwiftErrorArg->stripInBoundsOffsets())) {
3849 Check(AI->isSwiftError(),
3850 "swifterror argument for call has mismatched alloca", AI, Call);
3851 continue;
3852 }
3853 auto ArgI = dyn_cast<Argument>(SwiftErrorArg);
3854 Check(ArgI, "swifterror argument should come from an alloca or parameter",
3855 SwiftErrorArg, Call);
3856 Check(ArgI->hasSwiftErrorAttr(),
3857 "swifterror argument for call has mismatched parameter", ArgI,
3858 Call);
3859 }
3860
3861 if (Attrs.hasParamAttr(i, Attribute::ImmArg)) {
3862 // Don't allow immarg on call sites, unless the underlying declaration
3863 // also has the matching immarg.
3864 Check(Callee && Callee->hasParamAttribute(i, Attribute::ImmArg),
3865 "immarg may not apply only to call sites", Call.getArgOperand(i),
3866 Call);
3867 }
3868
3869 if (Call.paramHasAttr(i, Attribute::ImmArg)) {
3870 Value *ArgVal = Call.getArgOperand(i);
3871 Check(isa<ConstantInt>(ArgVal) || isa<ConstantFP>(ArgVal),
3872 "immarg operand has non-immediate parameter", ArgVal, Call);
3873
3874 // If the imm-arg is an integer and also has a range attached,
3875 // check if the given value is within the range.
3876 if (Call.paramHasAttr(i, Attribute::Range)) {
3877 if (auto *CI = dyn_cast<ConstantInt>(ArgVal)) {
3878 const ConstantRange &CR =
3879 Call.getParamAttr(i, Attribute::Range).getValueAsConstantRange();
3880 Check(CR.contains(CI->getValue()),
3881 "immarg value " + Twine(CI->getValue().getSExtValue()) +
3882 " out of range [" + Twine(CR.getLower().getSExtValue()) +
3883 ", " + Twine(CR.getUpper().getSExtValue()) + ")",
3884 Call);
3885 }
3886 }
3887 }
3888
3889 if (Call.paramHasAttr(i, Attribute::Preallocated)) {
3890 Value *ArgVal = Call.getArgOperand(i);
3891 bool hasOB =
3893 bool isMustTail = Call.isMustTailCall();
3894 Check(hasOB != isMustTail,
3895 "preallocated operand either requires a preallocated bundle or "
3896 "the call to be musttail (but not both)",
3897 ArgVal, Call);
3898 }
3899 }
3900
3901 if (FTy->isVarArg()) {
3902 // FIXME? is 'nest' even legal here?
3903 bool SawNest = false;
3904 bool SawReturned = false;
3905
3906 for (unsigned Idx = 0; Idx < FTy->getNumParams(); ++Idx) {
3907 if (Attrs.hasParamAttr(Idx, Attribute::Nest))
3908 SawNest = true;
3909 if (Attrs.hasParamAttr(Idx, Attribute::Returned))
3910 SawReturned = true;
3911 }
3912
3913 // Check attributes on the varargs part.
3914 for (unsigned Idx = FTy->getNumParams(); Idx < Call.arg_size(); ++Idx) {
3915 Type *Ty = Call.getArgOperand(Idx)->getType();
3916 AttributeSet ArgAttrs = Attrs.getParamAttrs(Idx);
3917 verifyParameterAttrs(ArgAttrs, Ty, &Call);
3918
3919 if (ArgAttrs.hasAttribute(Attribute::Nest)) {
3920 Check(!SawNest, "More than one parameter has attribute nest!", Call);
3921 SawNest = true;
3922 }
3923
3924 if (ArgAttrs.hasAttribute(Attribute::Returned)) {
3925 Check(!SawReturned, "More than one parameter has attribute returned!",
3926 Call);
3927 Check(Ty->canLosslesslyBitCastTo(FTy->getReturnType()),
3928 "Incompatible argument and return types for 'returned' "
3929 "attribute",
3930 Call);
3931 SawReturned = true;
3932 }
3933
3934 // Statepoint intrinsic is vararg but the wrapped function may be not.
3935 // Allow sret here and check the wrapped function in verifyStatepoint.
3936 if (Call.getIntrinsicID() != Intrinsic::experimental_gc_statepoint)
3937 Check(!ArgAttrs.hasAttribute(Attribute::StructRet),
3938 "Attribute 'sret' cannot be used for vararg call arguments!",
3939 Call);
3940
3941 if (ArgAttrs.hasAttribute(Attribute::InAlloca))
3942 Check(Idx == Call.arg_size() - 1,
3943 "inalloca isn't on the last argument!", Call);
3944 }
3945 }
3946
3947 // Verify that there's no metadata unless it's a direct call to an intrinsic.
3948 if (!IsIntrinsic) {
3949 for (Type *ParamTy : FTy->params()) {
3950 Check(!ParamTy->isMetadataTy(),
3951 "Function has metadata parameter but isn't an intrinsic", Call);
3952 Check(!ParamTy->isTokenLikeTy(),
3953 "Function has token parameter but isn't an intrinsic", Call);
3954 }
3955 }
3956
3957 // Verify that indirect calls don't return tokens.
3958 if (!Call.getCalledFunction()) {
3959 Check(!FTy->getReturnType()->isTokenLikeTy(),
3960 "Return type cannot be token for indirect call!");
3961 Check(!FTy->getReturnType()->isX86_AMXTy(),
3962 "Return type cannot be x86_amx for indirect call!");
3963 }
3964
3966 visitIntrinsicCall(ID, Call);
3967
3968 // Verify that a callsite has at most one "deopt", at most one "funclet", at
3969 // most one "gc-transition", at most one "cfguardtarget", at most one
3970 // "preallocated" operand bundle, and at most one "ptrauth" operand bundle.
3971 bool FoundDeoptBundle = false, FoundFuncletBundle = false,
3972 FoundGCTransitionBundle = false, FoundCFGuardTargetBundle = false,
3973 FoundPreallocatedBundle = false, FoundGCLiveBundle = false,
3974 FoundPtrauthBundle = false, FoundKCFIBundle = false,
3975 FoundAttachedCallBundle = false;
3976 for (unsigned i = 0, e = Call.getNumOperandBundles(); i < e; ++i) {
3977 OperandBundleUse BU = Call.getOperandBundleAt(i);
3978 uint32_t Tag = BU.getTagID();
3979 if (Tag == LLVMContext::OB_deopt) {
3980 Check(!FoundDeoptBundle, "Multiple deopt operand bundles", Call);
3981 FoundDeoptBundle = true;
3982 } else if (Tag == LLVMContext::OB_gc_transition) {
3983 Check(!FoundGCTransitionBundle, "Multiple gc-transition operand bundles",
3984 Call);
3985 FoundGCTransitionBundle = true;
3986 } else if (Tag == LLVMContext::OB_funclet) {
3987 Check(!FoundFuncletBundle, "Multiple funclet operand bundles", Call);
3988 FoundFuncletBundle = true;
3989 Check(BU.Inputs.size() == 1,
3990 "Expected exactly one funclet bundle operand", Call);
3991 Check(isa<FuncletPadInst>(BU.Inputs.front()),
3992 "Funclet bundle operands should correspond to a FuncletPadInst",
3993 Call);
3994 } else if (Tag == LLVMContext::OB_cfguardtarget) {
3995 Check(!FoundCFGuardTargetBundle, "Multiple CFGuardTarget operand bundles",
3996 Call);
3997 FoundCFGuardTargetBundle = true;
3998 Check(BU.Inputs.size() == 1,
3999 "Expected exactly one cfguardtarget bundle operand", Call);
4000 } else if (Tag == LLVMContext::OB_ptrauth) {
4001 Check(!FoundPtrauthBundle, "Multiple ptrauth operand bundles", Call);
4002 FoundPtrauthBundle = true;
4003 Check(BU.Inputs.size() == 2,
4004 "Expected exactly two ptrauth bundle operands", Call);
4005 Check(isa<ConstantInt>(BU.Inputs[0]) &&
4006 BU.Inputs[0]->getType()->isIntegerTy(32),
4007 "Ptrauth bundle key operand must be an i32 constant", Call);
4008 Check(BU.Inputs[1]->getType()->isIntegerTy(64),
4009 "Ptrauth bundle discriminator operand must be an i64", Call);
4010 } else if (Tag == LLVMContext::OB_kcfi) {
4011 Check(!FoundKCFIBundle, "Multiple kcfi operand bundles", Call);
4012 FoundKCFIBundle = true;
4013 Check(BU.Inputs.size() == 1, "Expected exactly one kcfi bundle operand",
4014 Call);
4015 Check(isa<ConstantInt>(BU.Inputs[0]) &&
4016 BU.Inputs[0]->getType()->isIntegerTy(32),
4017 "Kcfi bundle operand must be an i32 constant", Call);
4018 } else if (Tag == LLVMContext::OB_preallocated) {
4019 Check(!FoundPreallocatedBundle, "Multiple preallocated operand bundles",
4020 Call);
4021 FoundPreallocatedBundle = true;
4022 Check(BU.Inputs.size() == 1,
4023 "Expected exactly one preallocated bundle operand", Call);
4024 auto Input = dyn_cast<IntrinsicInst>(BU.Inputs.front());
4025 Check(Input &&
4026 Input->getIntrinsicID() == Intrinsic::call_preallocated_setup,
4027 "\"preallocated\" argument must be a token from "
4028 "llvm.call.preallocated.setup",
4029 Call);
4030 } else if (Tag == LLVMContext::OB_gc_live) {
4031 Check(!FoundGCLiveBundle, "Multiple gc-live operand bundles", Call);
4032 FoundGCLiveBundle = true;
4034 Check(!FoundAttachedCallBundle,
4035 "Multiple \"clang.arc.attachedcall\" operand bundles", Call);
4036 FoundAttachedCallBundle = true;
4037 verifyAttachedCallBundle(Call, BU);
4038 }
4039 }
4040
4041 // Verify that callee and callsite agree on whether to use pointer auth.
4042 Check(!(Call.getCalledFunction() && FoundPtrauthBundle),
4043 "Direct call cannot have a ptrauth bundle", Call);
4044
4045 // Verify that each inlinable callsite of a debug-info-bearing function in a
4046 // debug-info-bearing function has a debug location attached to it. Failure to
4047 // do so causes assertion failures when the inliner sets up inline scope info
4048 // (Interposable functions are not inlinable, neither are functions without
4049 // definitions.)
4055 "inlinable function call in a function with "
4056 "debug info must have a !dbg location",
4057 Call);
4058
4059 if (Call.isInlineAsm())
4060 verifyInlineAsmCall(Call);
4061
4062 ConvergenceVerifyHelper.visit(Call);
4063
4064 visitInstruction(Call);
4065}
4066
4067void Verifier::verifyTailCCMustTailAttrs(const AttrBuilder &Attrs,
4068 StringRef Context) {
4069 Check(!Attrs.contains(Attribute::InAlloca),
4070 Twine("inalloca attribute not allowed in ") + Context);
4071 Check(!Attrs.contains(Attribute::InReg),
4072 Twine("inreg attribute not allowed in ") + Context);
4073 Check(!Attrs.contains(Attribute::SwiftError),
4074 Twine("swifterror attribute not allowed in ") + Context);
4075 Check(!Attrs.contains(Attribute::Preallocated),
4076 Twine("preallocated attribute not allowed in ") + Context);
4077 Check(!Attrs.contains(Attribute::ByRef),
4078 Twine("byref attribute not allowed in ") + Context);
4079}
4080
4081/// Two types are "congruent" if they are identical, or if they are both pointer
4082/// types with different pointee types and the same address space.
4083static bool isTypeCongruent(Type *L, Type *R) {
4084 if (L == R)
4085 return true;
4088 if (!PL || !PR)
4089 return false;
4090 return PL->getAddressSpace() == PR->getAddressSpace();
4091}
4092
4093static AttrBuilder getParameterABIAttributes(LLVMContext& C, unsigned I, AttributeList Attrs) {
4094 static const Attribute::AttrKind ABIAttrs[] = {
4095 Attribute::StructRet, Attribute::ByVal, Attribute::InAlloca,
4096 Attribute::InReg, Attribute::StackAlignment, Attribute::SwiftSelf,
4097 Attribute::SwiftAsync, Attribute::SwiftError, Attribute::Preallocated,
4098 Attribute::ByRef};
4099 AttrBuilder Copy(C);
4100 for (auto AK : ABIAttrs) {
4101 Attribute Attr = Attrs.getParamAttrs(I).getAttribute(AK);
4102 if (Attr.isValid())
4103 Copy.addAttribute(Attr);
4104 }
4105
4106 // `align` is ABI-affecting only in combination with `byval` or `byref`.
4107 if (Attrs.hasParamAttr(I, Attribute::Alignment) &&
4108 (Attrs.hasParamAttr(I, Attribute::ByVal) ||
4109 Attrs.hasParamAttr(I, Attribute::ByRef)))
4110 Copy.addAlignmentAttr(Attrs.getParamAlignment(I));
4111 return Copy;
4112}
4113
4114void Verifier::verifyMustTailCall(CallInst &CI) {
4115 Check(!CI.isInlineAsm(), "cannot use musttail call with inline asm", &CI);
4116
4117 Function *F = CI.getParent()->getParent();
4118 FunctionType *CallerTy = F->getFunctionType();
4119 FunctionType *CalleeTy = CI.getFunctionType();
4120 Check(CallerTy->isVarArg() == CalleeTy->isVarArg(),
4121 "cannot guarantee tail call due to mismatched varargs", &CI);
4122 Check(isTypeCongruent(CallerTy->getReturnType(), CalleeTy->getReturnType()),
4123 "cannot guarantee tail call due to mismatched return types", &CI);
4124
4125 // - The calling conventions of the caller and callee must match.
4126 Check(F->getCallingConv() == CI.getCallingConv(),
4127 "cannot guarantee tail call due to mismatched calling conv", &CI);
4128
4129 // - The call must immediately precede a :ref:`ret <i_ret>` instruction,
4130 // or a pointer bitcast followed by a ret instruction.
4131 // - The ret instruction must return the (possibly bitcasted) value
4132 // produced by the call or void.
4133 Value *RetVal = &CI;
4135
4136 // Handle the optional bitcast.
4137 if (BitCastInst *BI = dyn_cast_or_null<BitCastInst>(Next)) {
4138 Check(BI->getOperand(0) == RetVal,
4139 "bitcast following musttail call must use the call", BI);
4140 RetVal = BI;
4141 Next = BI->getNextNode();
4142 }
4143
4144 // Check the return.
4145 ReturnInst *Ret = dyn_cast_or_null<ReturnInst>(Next);
4146 Check(Ret, "musttail call must precede a ret with an optional bitcast", &CI);
4147 Check(!Ret->getReturnValue() || Ret->getReturnValue() == RetVal ||
4148 isa<UndefValue>(Ret->getReturnValue()),
4149 "musttail call result must be returned", Ret);
4150
4151 AttributeList CallerAttrs = F->getAttributes();
4152 AttributeList CalleeAttrs = CI.getAttributes();
4153 if (CI.getCallingConv() == CallingConv::SwiftTail ||
4154 CI.getCallingConv() == CallingConv::Tail) {
4155 StringRef CCName =
4156 CI.getCallingConv() == CallingConv::Tail ? "tailcc" : "swifttailcc";
4157
4158 // - Only sret, byval, swiftself, and swiftasync ABI-impacting attributes
4159 // are allowed in swifttailcc call
4160 for (unsigned I = 0, E = CallerTy->getNumParams(); I != E; ++I) {
4161 AttrBuilder ABIAttrs = getParameterABIAttributes(F->getContext(), I, CallerAttrs);
4162 SmallString<32> Context{CCName, StringRef(" musttail caller")};
4163 verifyTailCCMustTailAttrs(ABIAttrs, Context);
4164 }
4165 for (unsigned I = 0, E = CalleeTy->getNumParams(); I != E; ++I) {
4166 AttrBuilder ABIAttrs = getParameterABIAttributes(F->getContext(), I, CalleeAttrs);
4167 SmallString<32> Context{CCName, StringRef(" musttail callee")};
4168 verifyTailCCMustTailAttrs(ABIAttrs, Context);
4169 }
4170 // - Varargs functions are not allowed
4171 Check(!CallerTy->isVarArg(), Twine("cannot guarantee ") + CCName +
4172 " tail call for varargs function");
4173 return;
4174 }
4175
4176 // - The caller and callee prototypes must match. Pointer types of
4177 // parameters or return types may differ in pointee type, but not
4178 // address space.
4179 if (!CI.getIntrinsicID()) {
4180 Check(CallerTy->getNumParams() == CalleeTy->getNumParams(),
4181 "cannot guarantee tail call due to mismatched parameter counts", &CI);
4182 for (unsigned I = 0, E = CallerTy->getNumParams(); I != E; ++I) {
4183 Check(
4184 isTypeCongruent(CallerTy->getParamType(I), CalleeTy->getParamType(I)),
4185 "cannot guarantee tail call due to mismatched parameter types", &CI);
4186 }
4187 }
4188
4189 // - All ABI-impacting function attributes, such as sret, byval, inreg,
4190 // returned, preallocated, and inalloca, must match.
4191 for (unsigned I = 0, E = CallerTy->getNumParams(); I != E; ++I) {
4192 AttrBuilder CallerABIAttrs = getParameterABIAttributes(F->getContext(), I, CallerAttrs);
4193 AttrBuilder CalleeABIAttrs = getParameterABIAttributes(F->getContext(), I, CalleeAttrs);
4194 Check(CallerABIAttrs == CalleeABIAttrs,
4195 "cannot guarantee tail call due to mismatched ABI impacting "
4196 "function attributes",
4197 &CI, CI.getOperand(I));
4198 }
4199}
4200
4201void Verifier::visitCallInst(CallInst &CI) {
4202 visitCallBase(CI);
4203
4204 if (CI.isMustTailCall())
4205 verifyMustTailCall(CI);
4206}
4207
4208void Verifier::visitInvokeInst(InvokeInst &II) {
4209 visitCallBase(II);
4210
4211 // Verify that the first non-PHI instruction of the unwind destination is an
4212 // exception handling instruction.
4213 Check(
4214 II.getUnwindDest()->isEHPad(),
4215 "The unwind destination does not have an exception handling instruction!",
4216 &II);
4217
4218 visitTerminator(II);
4219}
4220
4221/// visitUnaryOperator - Check the argument to the unary operator.
4222///
4223void Verifier::visitUnaryOperator(UnaryOperator &U) {
4224 Check(U.getType() == U.getOperand(0)->getType(),
4225 "Unary operators must have same type for"
4226 "operands and result!",
4227 &U);
4228
4229 switch (U.getOpcode()) {
4230 // Check that floating-point arithmetic operators are only used with
4231 // floating-point operands.
4232 case Instruction::FNeg:
4233 Check(U.getType()->isFPOrFPVectorTy(),
4234 "FNeg operator only works with float types!", &U);
4235 break;
4236 default:
4237 llvm_unreachable("Unknown UnaryOperator opcode!");
4238 }
4239
4240 visitInstruction(U);
4241}
4242
4243/// visitBinaryOperator - Check that both arguments to the binary operator are
4244/// of the same type!
4245///
4246void Verifier::visitBinaryOperator(BinaryOperator &B) {
4247 Check(B.getOperand(0)->getType() == B.getOperand(1)->getType(),
4248 "Both operands to a binary operator are not of the same type!", &B);
4249
4250 switch (B.getOpcode()) {
4251 // Check that integer arithmetic operators are only used with
4252 // integral operands.
4253 case Instruction::Add:
4254 case Instruction::Sub:
4255 case Instruction::Mul:
4256 case Instruction::SDiv:
4257 case Instruction::UDiv:
4258 case Instruction::SRem:
4259 case Instruction::URem:
4260 Check(B.getType()->isIntOrIntVectorTy(),
4261 "Integer arithmetic operators only work with integral types!", &B);
4262 Check(B.getType() == B.getOperand(0)->getType(),
4263 "Integer arithmetic operators must have same type "
4264 "for operands and result!",
4265 &B);
4266 break;
4267 // Check that floating-point arithmetic operators are only used with
4268 // floating-point operands.
4269 case Instruction::FAdd:
4270 case Instruction::FSub:
4271 case Instruction::FMul:
4272 case Instruction::FDiv:
4273 case Instruction::FRem:
4274 Check(B.getType()->isFPOrFPVectorTy(),
4275 "Floating-point arithmetic operators only work with "
4276 "floating-point types!",
4277 &B);
4278 Check(B.getType() == B.getOperand(0)->getType(),
4279 "Floating-point arithmetic operators must have same type "
4280 "for operands and result!",
4281 &B);
4282 break;
4283 // Check that logical operators are only used with integral operands.
4284 case Instruction::And:
4285 case Instruction::Or:
4286 case Instruction::Xor:
4287 Check(B.getType()->isIntOrIntVectorTy(),
4288 "Logical operators only work with integral types!", &B);
4289 Check(B.getType() == B.getOperand(0)->getType(),
4290 "Logical operators must have same type for operands and result!", &B);
4291 break;
4292 case Instruction::Shl:
4293 case Instruction::LShr:
4294 case Instruction::AShr:
4295 Check(B.getType()->isIntOrIntVectorTy(),
4296 "Shifts only work with integral types!", &B);
4297 Check(B.getType() == B.getOperand(0)->getType(),
4298 "Shift return type must be same as operands!", &B);
4299 break;
4300 default:
4301 llvm_unreachable("Unknown BinaryOperator opcode!");
4302 }
4303
4304 visitInstruction(B);
4305}
4306
4307void Verifier::visitICmpInst(ICmpInst &IC) {
4308 // Check that the operands are the same type
4309 Type *Op0Ty = IC.getOperand(0)->getType();
4310 Type *Op1Ty = IC.getOperand(1)->getType();
4311 Check(Op0Ty == Op1Ty,
4312 "Both operands to ICmp instruction are not of the same type!", &IC);
4313 // Check that the operands are the right type
4314 Check(Op0Ty->isIntOrIntVectorTy() || Op0Ty->isPtrOrPtrVectorTy(),
4315 "Invalid operand types for ICmp instruction", &IC);
4316 // Check that the predicate is valid.
4317 Check(IC.isIntPredicate(), "Invalid predicate in ICmp instruction!", &IC);
4318
4319 visitInstruction(IC);
4320}
4321
4322void Verifier::visitFCmpInst(FCmpInst &FC) {
4323 // Check that the operands are the same type
4324 Type *Op0Ty = FC.getOperand(0)->getType();
4325 Type *Op1Ty = FC.getOperand(1)->getType();
4326 Check(Op0Ty == Op1Ty,
4327 "Both operands to FCmp instruction are not of the same type!", &FC);
4328 // Check that the operands are the right type
4329 Check(Op0Ty->isFPOrFPVectorTy(), "Invalid operand types for FCmp instruction",
4330 &FC);
4331 // Check that the predicate is valid.
4332 Check(FC.isFPPredicate(), "Invalid predicate in FCmp instruction!", &FC);
4333
4334 visitInstruction(FC);
4335}
4336
4337void Verifier::visitExtractElementInst(ExtractElementInst &EI) {
4339 "Invalid extractelement operands!", &EI);
4340 visitInstruction(EI);
4341}
4342
4343void Verifier::visitInsertElementInst(InsertElementInst &IE) {
4344 Check(InsertElementInst::isValidOperands(IE.getOperand(0), IE.getOperand(1),
4345 IE.getOperand(2)),
4346 "Invalid insertelement operands!", &IE);
4347 visitInstruction(IE);
4348}
4349
4350void Verifier::visitShuffleVectorInst(ShuffleVectorInst &SV) {
4352 SV.getShuffleMask()),
4353 "Invalid shufflevector operands!", &SV);
4354 visitInstruction(SV);
4355}
4356
4357void Verifier::visitGetElementPtrInst(GetElementPtrInst &GEP) {
4358 Type *TargetTy = GEP.getPointerOperandType()->getScalarType();
4359
4360 Check(isa<PointerType>(TargetTy),
4361 "GEP base pointer is not a vector or a vector of pointers", &GEP);
4362 Check(GEP.getSourceElementType()->isSized(), "GEP into unsized type!", &GEP);
4363
4364 if (auto *STy = dyn_cast<StructType>(GEP.getSourceElementType())) {
4365 Check(!STy->isScalableTy(),
4366 "getelementptr cannot target structure that contains scalable vector"
4367 "type",
4368 &GEP);
4369 }
4370
4371 SmallVector<Value *, 16> Idxs(GEP.indices());
4372 Check(
4373 all_of(Idxs, [](Value *V) { return V->getType()->isIntOrIntVectorTy(); }),
4374 "GEP indexes must be integers", &GEP);
4375 Type *ElTy =
4376 GetElementPtrInst::getIndexedType(GEP.getSourceElementType(), Idxs);
4377 Check(ElTy, "Invalid indices for GEP pointer type!", &GEP);
4378
4379 PointerType *PtrTy = dyn_cast<PointerType>(GEP.getType()->getScalarType());
4380
4381 Check(PtrTy && GEP.getResultElementType() == ElTy,
4382 "GEP is not of right type for indices!", &GEP, ElTy);
4383
4384 if (auto *GEPVTy = dyn_cast<VectorType>(GEP.getType())) {
4385 // Additional checks for vector GEPs.
4386 ElementCount GEPWidth = GEPVTy->getElementCount();
4387 if (GEP.getPointerOperandType()->isVectorTy())
4388 Check(
4389 GEPWidth ==
4390 cast<VectorType>(GEP.getPointerOperandType())->getElementCount(),
4391 "Vector GEP result width doesn't match operand's", &GEP);
4392 for (Value *Idx : Idxs) {
4393 Type *IndexTy = Idx->getType();
4394 if (auto *IndexVTy = dyn_cast<VectorType>(IndexTy)) {
4395 ElementCount IndexWidth = IndexVTy->getElementCount();
4396 Check(IndexWidth == GEPWidth, "Invalid GEP index vector width", &GEP);
4397 }
4398 Check(IndexTy->isIntOrIntVectorTy(),
4399 "All GEP indices should be of integer type");
4400 }
4401 }
4402
4403 Check(GEP.getAddressSpace() == PtrTy->getAddressSpace(),
4404 "GEP address space doesn't match type", &GEP);
4405
4406 visitInstruction(GEP);
4407}
4408
4409static bool isContiguous(const ConstantRange &A, const ConstantRange &B) {
4410 return A.getUpper() == B.getLower() || A.getLower() == B.getUpper();
4411}
4412
4413/// Verify !range and !absolute_symbol metadata. These have the same
4414/// restrictions, except !absolute_symbol allows the full set.
4415void Verifier::verifyRangeLikeMetadata(const Value &I, const MDNode *Range,
4416 Type *Ty, RangeLikeMetadataKind Kind) {
4417 unsigned NumOperands = Range->getNumOperands();
4418 Check(NumOperands % 2 == 0, "Unfinished range!", Range);
4419 unsigned NumRanges = NumOperands / 2;
4420 Check(NumRanges >= 1, "It should have at least one range!", Range);
4421
4422 ConstantRange LastRange(1, true); // Dummy initial value
4423 for (unsigned i = 0; i < NumRanges; ++i) {
4424 ConstantInt *Low =
4425 mdconst::dyn_extract<ConstantInt>(Range->getOperand(2 * i));
4426 Check(Low, "The lower limit must be an integer!", Low);
4427 ConstantInt *High =
4428 mdconst::dyn_extract<ConstantInt>(Range->getOperand(2 * i + 1));
4429 Check(High, "The upper limit must be an integer!", High);
4430
4431 Check(High->getType() == Low->getType(), "Range pair types must match!",
4432 &I);
4433
4434 if (Kind == RangeLikeMetadataKind::NoaliasAddrspace) {
4435 Check(High->getType()->isIntegerTy(32),
4436 "noalias.addrspace type must be i32!", &I);
4437 } else {
4438 Check(High->getType() == Ty->getScalarType(),
4439 "Range types must match instruction type!", &I);
4440 }
4441
4442 APInt HighV = High->getValue();
4443 APInt LowV = Low->getValue();
4444
4445 // ConstantRange asserts if the ranges are the same except for the min/max
4446 // value. Leave the cases it tolerates for the empty range error below.
4447 Check(LowV != HighV || LowV.isMaxValue() || LowV.isMinValue(),
4448 "The upper and lower limits cannot be the same value", &I);
4449
4450 ConstantRange CurRange(LowV, HighV);
4451 Check(!CurRange.isEmptySet() &&
4452 (Kind == RangeLikeMetadataKind::AbsoluteSymbol ||
4453 !CurRange.isFullSet()),
4454 "Range must not be empty!", Range);
4455 if (i != 0) {
4456 Check(CurRange.intersectWith(LastRange).isEmptySet(),
4457 "Intervals are overlapping", Range);
4458 Check(LowV.sgt(LastRange.getLower()), "Intervals are not in order",
4459 Range);
4460 Check(!isContiguous(CurRange, LastRange), "Intervals are contiguous",
4461 Range);
4462 }
4463 LastRange = ConstantRange(LowV, HighV);
4464 }
4465 if (NumRanges > 2) {
4466 APInt FirstLow =
4467 mdconst::dyn_extract<ConstantInt>(Range->getOperand(0))->getValue();
4468 APInt FirstHigh =
4469 mdconst::dyn_extract<ConstantInt>(Range->getOperand(1))->getValue();
4470 ConstantRange FirstRange(FirstLow, FirstHigh);
4471 Check(FirstRange.intersectWith(LastRange).isEmptySet(),
4472 "Intervals are overlapping", Range);
4473 Check(!isContiguous(FirstRange, LastRange), "Intervals are contiguous",
4474 Range);
4475 }
4476}
4477
4478void Verifier::visitRangeMetadata(Instruction &I, MDNode *Range, Type *Ty) {
4479 assert(Range && Range == I.getMetadata(LLVMContext::MD_range) &&
4480 "precondition violation");
4481 verifyRangeLikeMetadata(I, Range, Ty, RangeLikeMetadataKind::Range);
4482}
4483
4484void Verifier::visitNoaliasAddrspaceMetadata(Instruction &I, MDNode *Range,
4485 Type *Ty) {
4486 assert(Range && Range == I.getMetadata(LLVMContext::MD_noalias_addrspace) &&
4487 "precondition violation");
4488 verifyRangeLikeMetadata(I, Range, Ty,
4489 RangeLikeMetadataKind::NoaliasAddrspace);
4490}
4491
4492void Verifier::checkAtomicMemAccessSize(Type *Ty, const Instruction *I) {
4493 unsigned Size = DL.getTypeSizeInBits(Ty).getFixedValue();
4494 Check(Size >= 8, "atomic memory access' size must be byte-sized", Ty, I);
4495 Check(!(Size & (Size - 1)),
4496 "atomic memory access' operand must have a power-of-two size", Ty, I);
4497}
4498
4499void Verifier::visitLoadInst(LoadInst &LI) {
4501 Check(PTy, "Load operand must be a pointer.", &LI);
4502 Type *ElTy = LI.getType();
4503 if (MaybeAlign A = LI.getAlign()) {
4504 Check(A->value() <= Value::MaximumAlignment,
4505 "huge alignment values are unsupported", &LI);
4506 }
4507 Check(ElTy->isSized(), "loading unsized types is not allowed", &LI);
4508 if (LI.isAtomic()) {
4509 Check(LI.getOrdering() != AtomicOrdering::Release &&
4510 LI.getOrdering() != AtomicOrdering::AcquireRelease,
4511 "Load cannot have Release ordering", &LI);
4512 Check(ElTy->getScalarType()->isIntOrPtrTy() ||
4514 "atomic load operand must have integer, pointer, floating point, "
4515 "or vector type!",
4516 ElTy, &LI);
4517
4518 checkAtomicMemAccessSize(ElTy, &LI);
4519 } else {
4521 "Non-atomic load cannot have SynchronizationScope specified", &LI);
4522 }
4523
4524 visitInstruction(LI);
4525}
4526
4527void Verifier::visitStoreInst(StoreInst &SI) {
4528 PointerType *PTy = dyn_cast<PointerType>(SI.getOperand(1)->getType());
4529 Check(PTy, "Store operand must be a pointer.", &SI);
4530 Type *ElTy = SI.getOperand(0)->getType();
4531 if (MaybeAlign A = SI.getAlign()) {
4532 Check(A->value() <= Value::MaximumAlignment,
4533 "huge alignment values are unsupported", &SI);
4534 }
4535 Check(ElTy->isSized(), "storing unsized types is not allowed", &SI);
4536 if (SI.isAtomic()) {
4537 Check(SI.getOrdering() != AtomicOrdering::Acquire &&
4538 SI.getOrdering() != AtomicOrdering::AcquireRelease,
4539 "Store cannot have Acquire ordering", &SI);
4540 Check(ElTy->getScalarType()->isIntOrPtrTy() ||
4542 "atomic store operand must have integer, pointer, floating point, "
4543 "or vector type!",
4544 ElTy, &SI);
4545 checkAtomicMemAccessSize(ElTy, &SI);
4546 } else {
4547 Check(SI.getSyncScopeID() == SyncScope::System,
4548 "Non-atomic store cannot have SynchronizationScope specified", &SI);
4549 }
4550 visitInstruction(SI);
4551}
4552
4553/// Check that SwiftErrorVal is used as a swifterror argument in CS.
4554void Verifier::verifySwiftErrorCall(CallBase &Call,
4555 const Value *SwiftErrorVal) {
4556 for (const auto &I : llvm::enumerate(Call.args())) {
4557 if (I.value() == SwiftErrorVal) {
4558 Check(Call.paramHasAttr(I.index(), Attribute::SwiftError),
4559 "swifterror value when used in a callsite should be marked "
4560 "with swifterror attribute",
4561 SwiftErrorVal, Call);
4562 }
4563 }
4564}
4565
4566void Verifier::verifySwiftErrorValue(const Value *SwiftErrorVal) {
4567 // Check that swifterror value is only used by loads, stores, or as
4568 // a swifterror argument.
4569 for (const User *U : SwiftErrorVal->users()) {
4571 isa<InvokeInst>(U),
4572 "swifterror value can only be loaded and stored from, or "
4573 "as a swifterror argument!",
4574 SwiftErrorVal, U);
4575 // If it is used by a store, check it is the second operand.
4576 if (auto StoreI = dyn_cast<StoreInst>(U))
4577 Check(StoreI->getOperand(1) == SwiftErrorVal,
4578 "swifterror value should be the second operand when used "
4579 "by stores",
4580 SwiftErrorVal, U);
4581 if (auto *Call = dyn_cast<CallBase>(U))
4582 verifySwiftErrorCall(*const_cast<CallBase *>(Call), SwiftErrorVal);
4583 }
4584}
4585
4586void Verifier::visitAllocaInst(AllocaInst &AI) {
4587 Type *Ty = AI.getAllocatedType();
4588 SmallPtrSet<Type*, 4> Visited;
4589 Check(Ty->isSized(&Visited), "Cannot allocate unsized type", &AI);
4590 // Check if it's a target extension type that disallows being used on the
4591 // stack.
4593 "Alloca has illegal target extension type", &AI);
4595 "Alloca array size must have integer type", &AI);
4596 if (MaybeAlign A = AI.getAlign()) {
4597 Check(A->value() <= Value::MaximumAlignment,
4598 "huge alignment values are unsupported", &AI);
4599 }
4600
4601 if (AI.isSwiftError()) {
4602 Check(Ty->isPointerTy(), "swifterror alloca must have pointer type", &AI);
4604 "swifterror alloca must not be array allocation", &AI);
4605 verifySwiftErrorValue(&AI);
4606 }
4607
4608 if (TT.isAMDGPU()) {
4610 "alloca on amdgpu must be in addrspace(5)", &AI);
4611 }
4612
4613 visitInstruction(AI);
4614}
4615
4616void Verifier::visitAtomicCmpXchgInst(AtomicCmpXchgInst &CXI) {
4617 Type *ElTy = CXI.getOperand(1)->getType();
4618 Check(ElTy->isIntOrPtrTy(),
4619 "cmpxchg operand must have integer or pointer type", ElTy, &CXI);
4620 checkAtomicMemAccessSize(ElTy, &CXI);
4621 visitInstruction(CXI);
4622}
4623
4624void Verifier::visitAtomicRMWInst(AtomicRMWInst &RMWI) {
4625 Check(RMWI.getOrdering() != AtomicOrdering::Unordered,
4626 "atomicrmw instructions cannot be unordered.", &RMWI);
4627 auto Op = RMWI.getOperation();
4628 Type *ElTy = RMWI.getOperand(1)->getType();
4629 if (Op == AtomicRMWInst::Xchg) {
4630 Check(ElTy->isIntegerTy() || ElTy->isFloatingPointTy() ||
4631 ElTy->isPointerTy(),
4632 "atomicrmw " + AtomicRMWInst::getOperationName(Op) +
4633 " operand must have integer or floating point type!",
4634 &RMWI, ElTy);
4635 } else if (AtomicRMWInst::isFPOperation(Op)) {
4637 "atomicrmw " + AtomicRMWInst::getOperationName(Op) +
4638 " operand must have floating-point or fixed vector of floating-point "
4639 "type!",
4640 &RMWI, ElTy);
4641 } else {
4642 Check(ElTy->isIntegerTy(),
4643 "atomicrmw " + AtomicRMWInst::getOperationName(Op) +
4644 " operand must have integer type!",
4645 &RMWI, ElTy);
4646 }
4647 checkAtomicMemAccessSize(ElTy, &RMWI);
4649 "Invalid binary operation!", &RMWI);
4650 visitInstruction(RMWI);
4651}
4652
4653void Verifier::visitFenceInst(FenceInst &FI) {
4654 const AtomicOrdering Ordering = FI.getOrdering();
4655 Check(Ordering == AtomicOrdering::Acquire ||
4656 Ordering == AtomicOrdering::Release ||
4657 Ordering == AtomicOrdering::AcquireRelease ||
4658 Ordering == AtomicOrdering::SequentiallyConsistent,
4659 "fence instructions may only have acquire, release, acq_rel, or "
4660 "seq_cst ordering.",
4661 &FI);
4662 visitInstruction(FI);
4663}
4664
4665void Verifier::visitExtractValueInst(ExtractValueInst &EVI) {
4667 EVI.getIndices()) == EVI.getType(),
4668 "Invalid ExtractValueInst operands!", &EVI);
4669
4670 visitInstruction(EVI);
4671}
4672
4673void Verifier::visitInsertValueInst(InsertValueInst &IVI) {
4675 IVI.getIndices()) ==
4676 IVI.getOperand(1)->getType(),
4677 "Invalid InsertValueInst operands!", &IVI);
4678
4679 visitInstruction(IVI);
4680}
4681
4682static Value *getParentPad(Value *EHPad) {
4683 if (auto *FPI = dyn_cast<FuncletPadInst>(EHPad))
4684 return FPI->getParentPad();
4685
4686 return cast<CatchSwitchInst>(EHPad)->getParentPad();
4687}
4688
4689void Verifier::visitEHPadPredecessors(Instruction &I) {
4690 assert(I.isEHPad());
4691
4692 BasicBlock *BB = I.getParent();
4693 Function *F = BB->getParent();
4694
4695 Check(BB != &F->getEntryBlock(), "EH pad cannot be in entry block.", &I);
4696
4697 if (auto *LPI = dyn_cast<LandingPadInst>(&I)) {
4698 // The landingpad instruction defines its parent as a landing pad block. The
4699 // landing pad block may be branched to only by the unwind edge of an
4700 // invoke.
4701 for (BasicBlock *PredBB : predecessors(BB)) {
4702 const auto *II = dyn_cast<InvokeInst>(PredBB->getTerminator());
4703 Check(II && II->getUnwindDest() == BB && II->getNormalDest() != BB,
4704 "Block containing LandingPadInst must be jumped to "
4705 "only by the unwind edge of an invoke.",
4706 LPI);
4707 }
4708 return;
4709 }
4710 if (auto *CPI = dyn_cast<CatchPadInst>(&I)) {
4711 if (!pred_empty(BB))
4712 Check(BB->getUniquePredecessor() == CPI->getCatchSwitch()->getParent(),
4713 "Block containg CatchPadInst must be jumped to "
4714 "only by its catchswitch.",
4715 CPI);
4716 Check(BB != CPI->getCatchSwitch()->getUnwindDest(),
4717 "Catchswitch cannot unwind to one of its catchpads",
4718 CPI->getCatchSwitch(), CPI);
4719 return;
4720 }
4721
4722 // Verify that each pred has a legal terminator with a legal to/from EH
4723 // pad relationship.
4724 Instruction *ToPad = &I;
4725 Value *ToPadParent = getParentPad(ToPad);
4726 for (BasicBlock *PredBB : predecessors(BB)) {
4727 Instruction *TI = PredBB->getTerminator();
4728 Value *FromPad;
4729 if (auto *II = dyn_cast<InvokeInst>(TI)) {
4730 Check(II->getUnwindDest() == BB && II->getNormalDest() != BB,
4731 "EH pad must be jumped to via an unwind edge", ToPad, II);
4732 auto *CalledFn =
4733 dyn_cast<Function>(II->getCalledOperand()->stripPointerCasts());
4734 if (CalledFn && CalledFn->isIntrinsic() && II->doesNotThrow() &&
4735 !IntrinsicInst::mayLowerToFunctionCall(CalledFn->getIntrinsicID()))
4736 continue;
4737 if (auto Bundle = II->getOperandBundle(LLVMContext::OB_funclet))
4738 FromPad = Bundle->Inputs[0];
4739 else
4740 FromPad = ConstantTokenNone::get(II->getContext());
4741 } else if (auto *CRI = dyn_cast<CleanupReturnInst>(TI)) {
4742 FromPad = CRI->getOperand(0);
4743 Check(FromPad != ToPadParent, "A cleanupret must exit its cleanup", CRI);
4744 } else if (auto *CSI = dyn_cast<CatchSwitchInst>(TI)) {
4745 FromPad = CSI;
4746 } else {
4747 Check(false, "EH pad must be jumped to via an unwind edge", ToPad, TI);
4748 }
4749
4750 // The edge may exit from zero or more nested pads.
4751 SmallPtrSet<Value *, 8> Seen;
4752 for (;; FromPad = getParentPad(FromPad)) {
4753 Check(FromPad != ToPad,
4754 "EH pad cannot handle exceptions raised within it", FromPad, TI);
4755 if (FromPad == ToPadParent) {
4756 // This is a legal unwind edge.
4757 break;
4758 }
4759 Check(!isa<ConstantTokenNone>(FromPad),
4760 "A single unwind edge may only enter one EH pad", TI);
4761 Check(Seen.insert(FromPad).second, "EH pad jumps through a cycle of pads",
4762 FromPad);
4763
4764 // This will be diagnosed on the corresponding instruction already. We
4765 // need the extra check here to make sure getParentPad() works.
4766 Check(isa<FuncletPadInst>(FromPad) || isa<CatchSwitchInst>(FromPad),
4767 "Parent pad must be catchpad/cleanuppad/catchswitch", TI);
4768 }
4769 }
4770}
4771
4772void Verifier::visitLandingPadInst(LandingPadInst &LPI) {
4773 // The landingpad instruction is ill-formed if it doesn't have any clauses and
4774 // isn't a cleanup.
4775 Check(LPI.getNumClauses() > 0 || LPI.isCleanup(),
4776 "LandingPadInst needs at least one clause or to be a cleanup.", &LPI);
4777
4778 visitEHPadPredecessors(LPI);
4779
4780 if (!LandingPadResultTy)
4781 LandingPadResultTy = LPI.getType();
4782 else
4783 Check(LandingPadResultTy == LPI.getType(),
4784 "The landingpad instruction should have a consistent result type "
4785 "inside a function.",
4786 &LPI);
4787
4788 Function *F = LPI.getParent()->getParent();
4789 Check(F->hasPersonalityFn(),
4790 "LandingPadInst needs to be in a function with a personality.", &LPI);
4791
4792 // The landingpad instruction must be the first non-PHI instruction in the
4793 // block.
4794 Check(LPI.getParent()->getLandingPadInst() == &LPI,
4795 "LandingPadInst not the first non-PHI instruction in the block.", &LPI);
4796
4797 for (unsigned i = 0, e = LPI.getNumClauses(); i < e; ++i) {
4798 Constant *Clause = LPI.getClause(i);
4799 if (LPI.isCatch(i)) {
4800 Check(isa<PointerType>(Clause->getType()),
4801 "Catch operand does not have pointer type!", &LPI);
4802 } else {
4803 Check(LPI.isFilter(i), "Clause is neither catch nor filter!", &LPI);
4805 "Filter operand is not an array of constants!", &LPI);
4806 }
4807 }
4808
4809 visitInstruction(LPI);
4810}
4811
4812void Verifier::visitResumeInst(ResumeInst &RI) {
4814 "ResumeInst needs to be in a function with a personality.", &RI);
4815
4816 if (!LandingPadResultTy)
4817 LandingPadResultTy = RI.getValue()->getType();
4818 else
4819 Check(LandingPadResultTy == RI.getValue()->getType(),
4820 "The resume instruction should have a consistent result type "
4821 "inside a function.",
4822 &RI);
4823
4824 visitTerminator(RI);
4825}
4826
4827void Verifier::visitCatchPadInst(CatchPadInst &CPI) {
4828 BasicBlock *BB = CPI.getParent();
4829
4830 Function *F = BB->getParent();
4831 Check(F->hasPersonalityFn(),
4832 "CatchPadInst needs to be in a function with a personality.", &CPI);
4833
4835 "CatchPadInst needs to be directly nested in a CatchSwitchInst.",
4836 CPI.getParentPad());
4837
4838 // The catchpad instruction must be the first non-PHI instruction in the
4839 // block.
4840 Check(&*BB->getFirstNonPHIIt() == &CPI,
4841 "CatchPadInst not the first non-PHI instruction in the block.", &CPI);
4842
4843 visitEHPadPredecessors(CPI);
4844 visitFuncletPadInst(CPI);
4845}
4846
4847void Verifier::visitCatchReturnInst(CatchReturnInst &CatchReturn) {
4848 Check(isa<CatchPadInst>(CatchReturn.getOperand(0)),
4849 "CatchReturnInst needs to be provided a CatchPad", &CatchReturn,
4850 CatchReturn.getOperand(0));
4851
4852 visitTerminator(CatchReturn);
4853}
4854
4855void Verifier::visitCleanupPadInst(CleanupPadInst &CPI) {
4856 BasicBlock *BB = CPI.getParent();
4857
4858 Function *F = BB->getParent();
4859 Check(F->hasPersonalityFn(),
4860 "CleanupPadInst needs to be in a function with a personality.", &CPI);
4861
4862 // The cleanuppad instruction must be the first non-PHI instruction in the
4863 // block.
4864 Check(&*BB->getFirstNonPHIIt() == &CPI,
4865 "CleanupPadInst not the first non-PHI instruction in the block.", &CPI);
4866
4867 auto *ParentPad = CPI.getParentPad();
4868 Check(isa<ConstantTokenNone>(ParentPad) || isa<FuncletPadInst>(ParentPad),
4869 "CleanupPadInst has an invalid parent.", &CPI);
4870
4871 visitEHPadPredecessors(CPI);
4872 visitFuncletPadInst(CPI);
4873}
4874
4875void Verifier::visitFuncletPadInst(FuncletPadInst &FPI) {
4876 User *FirstUser = nullptr;
4877 Value *FirstUnwindPad = nullptr;
4878 SmallVector<FuncletPadInst *, 8> Worklist({&FPI});
4879 SmallPtrSet<FuncletPadInst *, 8> Seen;
4880
4881 while (!Worklist.empty()) {
4882 FuncletPadInst *CurrentPad = Worklist.pop_back_val();
4883 Check(Seen.insert(CurrentPad).second,
4884 "FuncletPadInst must not be nested within itself", CurrentPad);
4885 Value *UnresolvedAncestorPad = nullptr;
4886 for (User *U : CurrentPad->users()) {
4887 BasicBlock *UnwindDest;
4888 if (auto *CRI = dyn_cast<CleanupReturnInst>(U)) {
4889 UnwindDest = CRI->getUnwindDest();
4890 } else if (auto *CSI = dyn_cast<CatchSwitchInst>(U)) {
4891 // We allow catchswitch unwind to caller to nest
4892 // within an outer pad that unwinds somewhere else,
4893 // because catchswitch doesn't have a nounwind variant.
4894 // See e.g. SimplifyCFGOpt::SimplifyUnreachable.
4895 if (CSI->unwindsToCaller())
4896 continue;
4897 UnwindDest = CSI->getUnwindDest();
4898 } else if (auto *II = dyn_cast<InvokeInst>(U)) {
4899 UnwindDest = II->getUnwindDest();
4900 } else if (isa<CallInst>(U)) {
4901 // Calls which don't unwind may be found inside funclet
4902 // pads that unwind somewhere else. We don't *require*
4903 // such calls to be annotated nounwind.
4904 continue;
4905 } else if (auto *CPI = dyn_cast<CleanupPadInst>(U)) {
4906 // The unwind dest for a cleanup can only be found by
4907 // recursive search. Add it to the worklist, and we'll
4908 // search for its first use that determines where it unwinds.
4909 Worklist.push_back(CPI);
4910 continue;
4911 } else {
4912 Check(isa<CatchReturnInst>(U), "Bogus funclet pad use", U);
4913 continue;
4914 }
4915
4916 Value *UnwindPad;
4917 bool ExitsFPI;
4918 if (UnwindDest) {
4919 UnwindPad = &*UnwindDest->getFirstNonPHIIt();
4920 if (!cast<Instruction>(UnwindPad)->isEHPad())
4921 continue;
4922 Value *UnwindParent = getParentPad(UnwindPad);
4923 // Ignore unwind edges that don't exit CurrentPad.
4924 if (UnwindParent == CurrentPad)
4925 continue;
4926 // Determine whether the original funclet pad is exited,
4927 // and if we are scanning nested pads determine how many
4928 // of them are exited so we can stop searching their
4929 // children.
4930 Value *ExitedPad = CurrentPad;
4931 ExitsFPI = false;
4932 do {
4933 if (ExitedPad == &FPI) {
4934 ExitsFPI = true;
4935 // Now we can resolve any ancestors of CurrentPad up to
4936 // FPI, but not including FPI since we need to make sure
4937 // to check all direct users of FPI for consistency.
4938 UnresolvedAncestorPad = &FPI;
4939 break;
4940 }
4941 Value *ExitedParent = getParentPad(ExitedPad);
4942 if (ExitedParent == UnwindParent) {
4943 // ExitedPad is the ancestor-most pad which this unwind
4944 // edge exits, so we can resolve up to it, meaning that
4945 // ExitedParent is the first ancestor still unresolved.
4946 UnresolvedAncestorPad = ExitedParent;
4947 break;
4948 }
4949 ExitedPad = ExitedParent;
4950 } while (!isa<ConstantTokenNone>(ExitedPad));
4951 } else {
4952 // Unwinding to caller exits all pads.
4953 UnwindPad = ConstantTokenNone::get(FPI.getContext());
4954 ExitsFPI = true;
4955 UnresolvedAncestorPad = &FPI;
4956 }
4957
4958 if (ExitsFPI) {
4959 // This unwind edge exits FPI. Make sure it agrees with other
4960 // such edges.
4961 if (FirstUser) {
4962 Check(UnwindPad == FirstUnwindPad,
4963 "Unwind edges out of a funclet "
4964 "pad must have the same unwind "
4965 "dest",
4966 &FPI, U, FirstUser);
4967 } else {
4968 FirstUser = U;
4969 FirstUnwindPad = UnwindPad;
4970 // Record cleanup sibling unwinds for verifySiblingFuncletUnwinds
4971 if (isa<CleanupPadInst>(&FPI) && !isa<ConstantTokenNone>(UnwindPad) &&
4972 getParentPad(UnwindPad) == getParentPad(&FPI))
4973 SiblingFuncletInfo[&FPI] = cast<Instruction>(U);
4974 }
4975 }
4976 // Make sure we visit all uses of FPI, but for nested pads stop as
4977 // soon as we know where they unwind to.
4978 if (CurrentPad != &FPI)
4979 break;
4980 }
4981 if (UnresolvedAncestorPad) {
4982 if (CurrentPad == UnresolvedAncestorPad) {
4983 // When CurrentPad is FPI itself, we don't mark it as resolved even if
4984 // we've found an unwind edge that exits it, because we need to verify
4985 // all direct uses of FPI.
4986 assert(CurrentPad == &FPI);
4987 continue;
4988 }
4989 // Pop off the worklist any nested pads that we've found an unwind
4990 // destination for. The pads on the worklist are the uncles,
4991 // great-uncles, etc. of CurrentPad. We've found an unwind destination
4992 // for all ancestors of CurrentPad up to but not including
4993 // UnresolvedAncestorPad.
4994 Value *ResolvedPad = CurrentPad;
4995 while (!Worklist.empty()) {
4996 Value *UnclePad = Worklist.back();
4997 Value *AncestorPad = getParentPad(UnclePad);
4998 // Walk ResolvedPad up the ancestor list until we either find the
4999 // uncle's parent or the last resolved ancestor.
5000 while (ResolvedPad != AncestorPad) {
5001 Value *ResolvedParent = getParentPad(ResolvedPad);
5002 if (ResolvedParent == UnresolvedAncestorPad) {
5003 break;
5004 }
5005 ResolvedPad = ResolvedParent;
5006 }
5007 // If the resolved ancestor search didn't find the uncle's parent,
5008 // then the uncle is not yet resolved.
5009 if (ResolvedPad != AncestorPad)
5010 break;
5011 // This uncle is resolved, so pop it from the worklist.
5012 Worklist.pop_back();
5013 }
5014 }
5015 }
5016
5017 if (FirstUnwindPad) {
5018 if (auto *CatchSwitch = dyn_cast<CatchSwitchInst>(FPI.getParentPad())) {
5019 BasicBlock *SwitchUnwindDest = CatchSwitch->getUnwindDest();
5020 Value *SwitchUnwindPad;
5021 if (SwitchUnwindDest)
5022 SwitchUnwindPad = &*SwitchUnwindDest->getFirstNonPHIIt();
5023 else
5024 SwitchUnwindPad = ConstantTokenNone::get(FPI.getContext());
5025 Check(SwitchUnwindPad == FirstUnwindPad,
5026 "Unwind edges out of a catch must have the same unwind dest as "
5027 "the parent catchswitch",
5028 &FPI, FirstUser, CatchSwitch);
5029 }
5030 }
5031
5032 visitInstruction(FPI);
5033}
5034
5035void Verifier::visitCatchSwitchInst(CatchSwitchInst &CatchSwitch) {
5036 BasicBlock *BB = CatchSwitch.getParent();
5037
5038 Function *F = BB->getParent();
5039 Check(F->hasPersonalityFn(),
5040 "CatchSwitchInst needs to be in a function with a personality.",
5041 &CatchSwitch);
5042
5043 // The catchswitch instruction must be the first non-PHI instruction in the
5044 // block.
5045 Check(&*BB->getFirstNonPHIIt() == &CatchSwitch,
5046 "CatchSwitchInst not the first non-PHI instruction in the block.",
5047 &CatchSwitch);
5048
5049 auto *ParentPad = CatchSwitch.getParentPad();
5050 Check(isa<ConstantTokenNone>(ParentPad) || isa<FuncletPadInst>(ParentPad),
5051 "CatchSwitchInst has an invalid parent.", ParentPad);
5052
5053 if (BasicBlock *UnwindDest = CatchSwitch.getUnwindDest()) {
5054 BasicBlock::iterator I = UnwindDest->getFirstNonPHIIt();
5055 Check(I->isEHPad() && !isa<LandingPadInst>(I),
5056 "CatchSwitchInst must unwind to an EH block which is not a "
5057 "landingpad.",
5058 &CatchSwitch);
5059
5060 // Record catchswitch sibling unwinds for verifySiblingFuncletUnwinds
5061 if (getParentPad(&*I) == ParentPad)
5062 SiblingFuncletInfo[&CatchSwitch] = &CatchSwitch;
5063 }
5064
5065 Check(CatchSwitch.getNumHandlers() != 0,
5066 "CatchSwitchInst cannot have empty handler list", &CatchSwitch);
5067
5068 for (BasicBlock *Handler : CatchSwitch.handlers()) {
5069 Check(isa<CatchPadInst>(Handler->getFirstNonPHIIt()),
5070 "CatchSwitchInst handlers must be catchpads", &CatchSwitch, Handler);
5071 }
5072
5073 visitEHPadPredecessors(CatchSwitch);
5074 visitTerminator(CatchSwitch);
5075}
5076
5077void Verifier::visitCleanupReturnInst(CleanupReturnInst &CRI) {
5079 "CleanupReturnInst needs to be provided a CleanupPad", &CRI,
5080 CRI.getOperand(0));
5081
5082 if (BasicBlock *UnwindDest = CRI.getUnwindDest()) {
5083 BasicBlock::iterator I = UnwindDest->getFirstNonPHIIt();
5084 Check(I->isEHPad() && !isa<LandingPadInst>(I),
5085 "CleanupReturnInst must unwind to an EH block which is not a "
5086 "landingpad.",
5087 &CRI);
5088 }
5089
5090 visitTerminator(CRI);
5091}
5092
5093void Verifier::verifyDominatesUse(Instruction &I, unsigned i) {
5094 Instruction *Op = cast<Instruction>(I.getOperand(i));
5095 // If the we have an invalid invoke, don't try to compute the dominance.
5096 // We already reject it in the invoke specific checks and the dominance
5097 // computation doesn't handle multiple edges.
5098 if (InvokeInst *II = dyn_cast<InvokeInst>(Op)) {
5099 if (II->getNormalDest() == II->getUnwindDest())
5100 return;
5101 }
5102
5103 // Quick check whether the def has already been encountered in the same block.
5104 // PHI nodes are not checked to prevent accepting preceding PHIs, because PHI
5105 // uses are defined to happen on the incoming edge, not at the instruction.
5106 //
5107 // FIXME: If this operand is a MetadataAsValue (wrapping a LocalAsMetadata)
5108 // wrapping an SSA value, assert that we've already encountered it. See
5109 // related FIXME in Mapper::mapLocalAsMetadata in ValueMapper.cpp.
5110 if (!isa<PHINode>(I) && InstsInThisBlock.count(Op))
5111 return;
5112
5113 const Use &U = I.getOperandUse(i);
5114 Check(DT.dominates(Op, U), "Instruction does not dominate all uses!", Op, &I);
5115}
5116
5117void Verifier::visitDereferenceableMetadata(Instruction& I, MDNode* MD) {
5118 Check(I.getType()->isPointerTy(),
5119 "dereferenceable, dereferenceable_or_null "
5120 "apply only to pointer types",
5121 &I);
5123 "dereferenceable, dereferenceable_or_null apply only to load"
5124 " and inttoptr instructions, use attributes for calls or invokes",
5125 &I);
5126 Check(MD->getNumOperands() == 1,
5127 "dereferenceable, dereferenceable_or_null "
5128 "take one operand!",
5129 &I);
5130 ConstantInt *CI = mdconst::dyn_extract<ConstantInt>(MD->getOperand(0));
5131 Check(CI && CI->getType()->isIntegerTy(64),
5132 "dereferenceable, "
5133 "dereferenceable_or_null metadata value must be an i64!",
5134 &I);
5135}
5136
5137void Verifier::visitNofreeMetadata(Instruction &I, MDNode *MD) {
5138 Check(I.getType()->isPointerTy(), "nofree applies only to pointer types", &I);
5139 Check((isa<IntToPtrInst>(I)), "nofree applies only to inttoptr instruction",
5140 &I);
5141 Check(MD->getNumOperands() == 0, "nofree metadata must be empty", &I);
5142}
5143
5144void Verifier::visitProfMetadata(Instruction &I, MDNode *MD) {
5145 auto GetBranchingTerminatorNumOperands = [&]() {
5146 unsigned ExpectedNumOperands = 0;
5147 if (BranchInst *BI = dyn_cast<BranchInst>(&I))
5148 ExpectedNumOperands = BI->getNumSuccessors();
5149 else if (SwitchInst *SI = dyn_cast<SwitchInst>(&I))
5150 ExpectedNumOperands = SI->getNumSuccessors();
5151 else if (isa<CallInst>(&I))
5152 ExpectedNumOperands = 1;
5153 else if (IndirectBrInst *IBI = dyn_cast<IndirectBrInst>(&I))
5154 ExpectedNumOperands = IBI->getNumDestinations();
5155 else if (isa<SelectInst>(&I))
5156 ExpectedNumOperands = 2;
5157 else if (CallBrInst *CI = dyn_cast<CallBrInst>(&I))
5158 ExpectedNumOperands = CI->getNumSuccessors();
5159 return ExpectedNumOperands;
5160 };
5161 Check(MD->getNumOperands() >= 1,
5162 "!prof annotations should have at least 1 operand", MD);
5163 // Check first operand.
5164 Check(MD->getOperand(0) != nullptr, "first operand should not be null", MD);
5166 "expected string with name of the !prof annotation", MD);
5167 MDString *MDS = cast<MDString>(MD->getOperand(0));
5168 StringRef ProfName = MDS->getString();
5169
5171 Check(GetBranchingTerminatorNumOperands() != 0 || isa<InvokeInst>(I),
5172 "'unknown' !prof should only appear on instructions on which "
5173 "'branch_weights' would",
5174 MD);
5175 verifyUnknownProfileMetadata(MD);
5176 return;
5177 }
5178
5179 Check(MD->getNumOperands() >= 2,
5180 "!prof annotations should have no less than 2 operands", MD);
5181
5182 // Check consistency of !prof branch_weights metadata.
5183 if (ProfName == MDProfLabels::BranchWeights) {
5184 unsigned NumBranchWeights = getNumBranchWeights(*MD);
5185 if (isa<InvokeInst>(&I)) {
5186 Check(NumBranchWeights == 1 || NumBranchWeights == 2,
5187 "Wrong number of InvokeInst branch_weights operands", MD);
5188 } else {
5189 const unsigned ExpectedNumOperands = GetBranchingTerminatorNumOperands();
5190 if (ExpectedNumOperands == 0)
5191 CheckFailed("!prof branch_weights are not allowed for this instruction",
5192 MD);
5193
5194 Check(NumBranchWeights == ExpectedNumOperands, "Wrong number of operands",
5195 MD);
5196 }
5197 for (unsigned i = getBranchWeightOffset(MD); i < MD->getNumOperands();
5198 ++i) {
5199 auto &MDO = MD->getOperand(i);
5200 Check(MDO, "second operand should not be null", MD);
5202 "!prof brunch_weights operand is not a const int");
5203 }
5204 } else if (ProfName == MDProfLabels::ValueProfile) {
5205 Check(isValueProfileMD(MD), "invalid value profiling metadata", MD);
5206 ConstantInt *KindInt = mdconst::dyn_extract<ConstantInt>(MD->getOperand(1));
5207 Check(KindInt, "VP !prof missing kind argument", MD);
5208
5209 auto Kind = KindInt->getZExtValue();
5210 Check(Kind >= InstrProfValueKind::IPVK_First &&
5211 Kind <= InstrProfValueKind::IPVK_Last,
5212 "Invalid VP !prof kind", MD);
5213 Check(MD->getNumOperands() % 2 == 1,
5214 "VP !prof should have an even number "
5215 "of arguments after 'VP'",
5216 MD);
5217 if (Kind == InstrProfValueKind::IPVK_IndirectCallTarget ||
5218 Kind == InstrProfValueKind::IPVK_MemOPSize)
5220 "VP !prof indirect call or memop size expected to be applied to "
5221 "CallBase instructions only",
5222 MD);
5223 } else {
5224 CheckFailed("expected either branch_weights or VP profile name", MD);
5225 }
5226}
5227
5228void Verifier::visitDIAssignIDMetadata(Instruction &I, MDNode *MD) {
5229 assert(I.hasMetadata(LLVMContext::MD_DIAssignID));
5230 // DIAssignID metadata must be attached to either an alloca or some form of
5231 // store/memory-writing instruction.
5232 // FIXME: We allow all intrinsic insts here to avoid trying to enumerate all
5233 // possible store intrinsics.
5234 bool ExpectedInstTy =
5236 CheckDI(ExpectedInstTy, "!DIAssignID attached to unexpected instruction kind",
5237 I, MD);
5238 // Iterate over the MetadataAsValue uses of the DIAssignID - these should
5239 // only be found as DbgAssignIntrinsic operands.
5240 if (auto *AsValue = MetadataAsValue::getIfExists(Context, MD)) {
5241 for (auto *User : AsValue->users()) {
5243 "!DIAssignID should only be used by llvm.dbg.assign intrinsics",
5244 MD, User);
5245 // All of the dbg.assign intrinsics should be in the same function as I.
5246 if (auto *DAI = dyn_cast<DbgAssignIntrinsic>(User))
5247 CheckDI(DAI->getFunction() == I.getFunction(),
5248 "dbg.assign not in same function as inst", DAI, &I);
5249 }
5250 }
5251 for (DbgVariableRecord *DVR :
5252 cast<DIAssignID>(MD)->getAllDbgVariableRecordUsers()) {
5253 CheckDI(DVR->isDbgAssign(),
5254 "!DIAssignID should only be used by Assign DVRs.", MD, DVR);
5255 CheckDI(DVR->getFunction() == I.getFunction(),
5256 "DVRAssign not in same function as inst", DVR, &I);
5257 }
5258}
5259
5260void Verifier::visitMMRAMetadata(Instruction &I, MDNode *MD) {
5262 "!mmra metadata attached to unexpected instruction kind", I, MD);
5263
5264 // MMRA Metadata should either be a tag, e.g. !{!"foo", !"bar"}, or a
5265 // list of tags such as !2 in the following example:
5266 // !0 = !{!"a", !"b"}
5267 // !1 = !{!"c", !"d"}
5268 // !2 = !{!0, !1}
5269 if (MMRAMetadata::isTagMD(MD))
5270 return;
5271
5272 Check(isa<MDTuple>(MD), "!mmra expected to be a metadata tuple", I, MD);
5273 for (const MDOperand &MDOp : MD->operands())
5274 Check(MMRAMetadata::isTagMD(MDOp.get()),
5275 "!mmra metadata tuple operand is not an MMRA tag", I, MDOp.get());
5276}
5277
5278void Verifier::visitCallStackMetadata(MDNode *MD) {
5279 // Call stack metadata should consist of a list of at least 1 constant int
5280 // (representing a hash of the location).
5281 Check(MD->getNumOperands() >= 1,
5282 "call stack metadata should have at least 1 operand", MD);
5283
5284 for (const auto &Op : MD->operands())
5286 "call stack metadata operand should be constant integer", Op);
5287}
5288
5289void Verifier::visitMemProfMetadata(Instruction &I, MDNode *MD) {
5290 Check(isa<CallBase>(I), "!memprof metadata should only exist on calls", &I);
5291 Check(MD->getNumOperands() >= 1,
5292 "!memprof annotations should have at least 1 metadata operand "
5293 "(MemInfoBlock)",
5294 MD);
5295
5296 // Check each MIB
5297 for (auto &MIBOp : MD->operands()) {
5298 MDNode *MIB = dyn_cast<MDNode>(MIBOp);
5299 // The first operand of an MIB should be the call stack metadata.
5300 // There rest of the operands should be MDString tags, and there should be
5301 // at least one.
5302 Check(MIB->getNumOperands() >= 2,
5303 "Each !memprof MemInfoBlock should have at least 2 operands", MIB);
5304
5305 // Check call stack metadata (first operand).
5306 Check(MIB->getOperand(0) != nullptr,
5307 "!memprof MemInfoBlock first operand should not be null", MIB);
5308 Check(isa<MDNode>(MIB->getOperand(0)),
5309 "!memprof MemInfoBlock first operand should be an MDNode", MIB);
5310 MDNode *StackMD = dyn_cast<MDNode>(MIB->getOperand(0));
5311 visitCallStackMetadata(StackMD);
5312
5313 // The next set of 1 or more operands should be MDString.
5314 unsigned I = 1;
5315 for (; I < MIB->getNumOperands(); ++I) {
5316 if (!isa<MDString>(MIB->getOperand(I))) {
5317 Check(I > 1,
5318 "!memprof MemInfoBlock second operand should be an MDString",
5319 MIB);
5320 break;
5321 }
5322 }
5323
5324 // Any remaining should be MDNode that are pairs of integers
5325 for (; I < MIB->getNumOperands(); ++I) {
5326 MDNode *OpNode = dyn_cast<MDNode>(MIB->getOperand(I));
5327 Check(OpNode, "Not all !memprof MemInfoBlock operands 2 to N are MDNode",
5328 MIB);
5329 Check(OpNode->getNumOperands() == 2,
5330 "Not all !memprof MemInfoBlock operands 2 to N are MDNode with 2 "
5331 "operands",
5332 MIB);
5333 // Check that all of Op's operands are ConstantInt.
5334 Check(llvm::all_of(OpNode->operands(),
5335 [](const MDOperand &Op) {
5336 return mdconst::hasa<ConstantInt>(Op);
5337 }),
5338 "Not all !memprof MemInfoBlock operands 2 to N are MDNode with "
5339 "ConstantInt operands",
5340 MIB);
5341 }
5342 }
5343}
5344
5345void Verifier::visitCallsiteMetadata(Instruction &I, MDNode *MD) {
5346 Check(isa<CallBase>(I), "!callsite metadata should only exist on calls", &I);
5347 // Verify the partial callstack annotated from memprof profiles. This callsite
5348 // is a part of a profiled allocation callstack.
5349 visitCallStackMetadata(MD);
5350}
5351
5352static inline bool isConstantIntMetadataOperand(const Metadata *MD) {
5353 if (auto *VAL = dyn_cast<ValueAsMetadata>(MD))
5354 return isa<ConstantInt>(VAL->getValue());
5355 return false;
5356}
5357
5358void Verifier::visitCalleeTypeMetadata(Instruction &I, MDNode *MD) {
5359 Check(isa<CallBase>(I), "!callee_type metadata should only exist on calls",
5360 &I);
5361 for (Metadata *Op : MD->operands()) {
5363 "The callee_type metadata must be a list of type metadata nodes", Op);
5364 auto *TypeMD = cast<MDNode>(Op);
5365 Check(TypeMD->getNumOperands() == 2,
5366 "Well-formed generalized type metadata must contain exactly two "
5367 "operands",
5368 Op);
5369 Check(isConstantIntMetadataOperand(TypeMD->getOperand(0)) &&
5370 mdconst::extract<ConstantInt>(TypeMD->getOperand(0))->isZero(),
5371 "The first operand of type metadata for functions must be zero", Op);
5372 Check(TypeMD->hasGeneralizedMDString(),
5373 "Only generalized type metadata can be part of the callee_type "
5374 "metadata list",
5375 Op);
5376 }
5377}
5378
5379void Verifier::visitAnnotationMetadata(MDNode *Annotation) {
5380 Check(isa<MDTuple>(Annotation), "annotation must be a tuple");
5381 Check(Annotation->getNumOperands() >= 1,
5382 "annotation must have at least one operand");
5383 for (const MDOperand &Op : Annotation->operands()) {
5384 bool TupleOfStrings =
5385 isa<MDTuple>(Op.get()) &&
5386 all_of(cast<MDTuple>(Op)->operands(), [](auto &Annotation) {
5387 return isa<MDString>(Annotation.get());
5388 });
5389 Check(isa<MDString>(Op.get()) || TupleOfStrings,
5390 "operands must be a string or a tuple of strings");
5391 }
5392}
5393
5394void Verifier::visitAliasScopeMetadata(const MDNode *MD) {
5395 unsigned NumOps = MD->getNumOperands();
5396 Check(NumOps >= 2 && NumOps <= 3, "scope must have two or three operands",
5397 MD);
5398 Check(MD->getOperand(0).get() == MD || isa<MDString>(MD->getOperand(0)),
5399 "first scope operand must be self-referential or string", MD);
5400 if (NumOps == 3)
5402 "third scope operand must be string (if used)", MD);
5403
5404 MDNode *Domain = dyn_cast<MDNode>(MD->getOperand(1));
5405 Check(Domain != nullptr, "second scope operand must be MDNode", MD);
5406
5407 unsigned NumDomainOps = Domain->getNumOperands();
5408 Check(NumDomainOps >= 1 && NumDomainOps <= 2,
5409 "domain must have one or two operands", Domain);
5410 Check(Domain->getOperand(0).get() == Domain ||
5411 isa<MDString>(Domain->getOperand(0)),
5412 "first domain operand must be self-referential or string", Domain);
5413 if (NumDomainOps == 2)
5414 Check(isa<MDString>(Domain->getOperand(1)),
5415 "second domain operand must be string (if used)", Domain);
5416}
5417
5418void Verifier::visitAliasScopeListMetadata(const MDNode *MD) {
5419 for (const MDOperand &Op : MD->operands()) {
5420 const MDNode *OpMD = dyn_cast<MDNode>(Op);
5421 Check(OpMD != nullptr, "scope list must consist of MDNodes", MD);
5422 visitAliasScopeMetadata(OpMD);
5423 }
5424}
5425
5426void Verifier::visitAccessGroupMetadata(const MDNode *MD) {
5427 auto IsValidAccessScope = [](const MDNode *MD) {
5428 return MD->getNumOperands() == 0 && MD->isDistinct();
5429 };
5430
5431 // It must be either an access scope itself...
5432 if (IsValidAccessScope(MD))
5433 return;
5434
5435 // ...or a list of access scopes.
5436 for (const MDOperand &Op : MD->operands()) {
5437 const MDNode *OpMD = dyn_cast<MDNode>(Op);
5438 Check(OpMD != nullptr, "Access scope list must consist of MDNodes", MD);
5439 Check(IsValidAccessScope(OpMD),
5440 "Access scope list contains invalid access scope", MD);
5441 }
5442}
5443
5444void Verifier::visitCapturesMetadata(Instruction &I, const MDNode *Captures) {
5445 static const char *ValidArgs[] = {"address_is_null", "address",
5446 "read_provenance", "provenance"};
5447
5448 auto *SI = dyn_cast<StoreInst>(&I);
5449 Check(SI, "!captures metadata can only be applied to store instructions", &I);
5450 Check(SI->getValueOperand()->getType()->isPointerTy(),
5451 "!captures metadata can only be applied to store with value operand of "
5452 "pointer type",
5453 &I);
5454 Check(Captures->getNumOperands() != 0, "!captures metadata cannot be empty",
5455 &I);
5456
5457 for (Metadata *Op : Captures->operands()) {
5458 auto *Str = dyn_cast<MDString>(Op);
5459 Check(Str, "!captures metadata must be a list of strings", &I);
5460 Check(is_contained(ValidArgs, Str->getString()),
5461 "invalid entry in !captures metadata", &I, Str);
5462 }
5463}
5464
5465void Verifier::visitAllocTokenMetadata(Instruction &I, MDNode *MD) {
5466 Check(isa<CallBase>(I), "!alloc_token should only exist on calls", &I);
5467 Check(MD->getNumOperands() == 2, "!alloc_token must have 2 operands", MD);
5468 Check(isa<MDString>(MD->getOperand(0)), "expected string", MD);
5470 "expected integer constant", MD);
5471}
5472
5473/// verifyInstruction - Verify that an instruction is well formed.
5474///
5475void Verifier::visitInstruction(Instruction &I) {
5476 BasicBlock *BB = I.getParent();
5477 Check(BB, "Instruction not embedded in basic block!", &I);
5478
5479 if (!isa<PHINode>(I)) { // Check that non-phi nodes are not self referential
5480 for (User *U : I.users()) {
5481 Check(U != (User *)&I || !DT.isReachableFromEntry(BB),
5482 "Only PHI nodes may reference their own value!", &I);
5483 }
5484 }
5485
5486 // Check that void typed values don't have names
5487 Check(!I.getType()->isVoidTy() || !I.hasName(),
5488 "Instruction has a name, but provides a void value!", &I);
5489
5490 // Check that the return value of the instruction is either void or a legal
5491 // value type.
5492 Check(I.getType()->isVoidTy() || I.getType()->isFirstClassType(),
5493 "Instruction returns a non-scalar type!", &I);
5494
5495 // Check that the instruction doesn't produce metadata. Calls are already
5496 // checked against the callee type.
5497 Check(!I.getType()->isMetadataTy() || isa<CallInst>(I) || isa<InvokeInst>(I),
5498 "Invalid use of metadata!", &I);
5499
5500 // Check that all uses of the instruction, if they are instructions
5501 // themselves, actually have parent basic blocks. If the use is not an
5502 // instruction, it is an error!
5503 for (Use &U : I.uses()) {
5504 if (Instruction *Used = dyn_cast<Instruction>(U.getUser()))
5505 Check(Used->getParent() != nullptr,
5506 "Instruction referencing"
5507 " instruction not embedded in a basic block!",
5508 &I, Used);
5509 else {
5510 CheckFailed("Use of instruction is not an instruction!", U);
5511 return;
5512 }
5513 }
5514
5515 // Get a pointer to the call base of the instruction if it is some form of
5516 // call.
5517 const CallBase *CBI = dyn_cast<CallBase>(&I);
5518
5519 for (unsigned i = 0, e = I.getNumOperands(); i != e; ++i) {
5520 Check(I.getOperand(i) != nullptr, "Instruction has null operand!", &I);
5521
5522 // Check to make sure that only first-class-values are operands to
5523 // instructions.
5524 if (!I.getOperand(i)->getType()->isFirstClassType()) {
5525 Check(false, "Instruction operands must be first-class values!", &I);
5526 }
5527
5528 if (Function *F = dyn_cast<Function>(I.getOperand(i))) {
5529 // This code checks whether the function is used as the operand of a
5530 // clang_arc_attachedcall operand bundle.
5531 auto IsAttachedCallOperand = [](Function *F, const CallBase *CBI,
5532 int Idx) {
5533 return CBI && CBI->isOperandBundleOfType(
5535 };
5536
5537 // Check to make sure that the "address of" an intrinsic function is never
5538 // taken. Ignore cases where the address of the intrinsic function is used
5539 // as the argument of operand bundle "clang.arc.attachedcall" as those
5540 // cases are handled in verifyAttachedCallBundle.
5541 Check((!F->isIntrinsic() ||
5542 (CBI && &CBI->getCalledOperandUse() == &I.getOperandUse(i)) ||
5543 IsAttachedCallOperand(F, CBI, i)),
5544 "Cannot take the address of an intrinsic!", &I);
5545 Check(!F->isIntrinsic() || isa<CallInst>(I) ||
5546 F->getIntrinsicID() == Intrinsic::donothing ||
5547 F->getIntrinsicID() == Intrinsic::seh_try_begin ||
5548 F->getIntrinsicID() == Intrinsic::seh_try_end ||
5549 F->getIntrinsicID() == Intrinsic::seh_scope_begin ||
5550 F->getIntrinsicID() == Intrinsic::seh_scope_end ||
5551 F->getIntrinsicID() == Intrinsic::coro_resume ||
5552 F->getIntrinsicID() == Intrinsic::coro_destroy ||
5553 F->getIntrinsicID() == Intrinsic::coro_await_suspend_void ||
5554 F->getIntrinsicID() == Intrinsic::coro_await_suspend_bool ||
5555 F->getIntrinsicID() == Intrinsic::coro_await_suspend_handle ||
5556 F->getIntrinsicID() ==
5557 Intrinsic::experimental_patchpoint_void ||
5558 F->getIntrinsicID() == Intrinsic::experimental_patchpoint ||
5559 F->getIntrinsicID() == Intrinsic::fake_use ||
5560 F->getIntrinsicID() == Intrinsic::experimental_gc_statepoint ||
5561 F->getIntrinsicID() == Intrinsic::wasm_throw ||
5562 F->getIntrinsicID() == Intrinsic::wasm_rethrow ||
5563 IsAttachedCallOperand(F, CBI, i),
5564 "Cannot invoke an intrinsic other than donothing, patchpoint, "
5565 "statepoint, coro_resume, coro_destroy, clang.arc.attachedcall or "
5566 "wasm.(re)throw",
5567 &I);
5568 Check(F->getParent() == &M, "Referencing function in another module!", &I,
5569 &M, F, F->getParent());
5570 } else if (BasicBlock *OpBB = dyn_cast<BasicBlock>(I.getOperand(i))) {
5571 Check(OpBB->getParent() == BB->getParent(),
5572 "Referring to a basic block in another function!", &I);
5573 } else if (Argument *OpArg = dyn_cast<Argument>(I.getOperand(i))) {
5574 Check(OpArg->getParent() == BB->getParent(),
5575 "Referring to an argument in another function!", &I);
5576 } else if (GlobalValue *GV = dyn_cast<GlobalValue>(I.getOperand(i))) {
5577 Check(GV->getParent() == &M, "Referencing global in another module!", &I,
5578 &M, GV, GV->getParent());
5579 } else if (Instruction *OpInst = dyn_cast<Instruction>(I.getOperand(i))) {
5580 Check(OpInst->getFunction() == BB->getParent(),
5581 "Referring to an instruction in another function!", &I);
5582 verifyDominatesUse(I, i);
5583 } else if (isa<InlineAsm>(I.getOperand(i))) {
5584 Check(CBI && &CBI->getCalledOperandUse() == &I.getOperandUse(i),
5585 "Cannot take the address of an inline asm!", &I);
5586 } else if (auto *CPA = dyn_cast<ConstantPtrAuth>(I.getOperand(i))) {
5587 visitConstantExprsRecursively(CPA);
5588 } else if (ConstantExpr *CE = dyn_cast<ConstantExpr>(I.getOperand(i))) {
5589 if (CE->getType()->isPtrOrPtrVectorTy()) {
5590 // If we have a ConstantExpr pointer, we need to see if it came from an
5591 // illegal bitcast.
5592 visitConstantExprsRecursively(CE);
5593 }
5594 }
5595 }
5596
5597 if (MDNode *MD = I.getMetadata(LLVMContext::MD_fpmath)) {
5598 Check(I.getType()->isFPOrFPVectorTy(),
5599 "fpmath requires a floating point result!", &I);
5600 Check(MD->getNumOperands() == 1, "fpmath takes one operand!", &I);
5601 if (ConstantFP *CFP0 =
5603 const APFloat &Accuracy = CFP0->getValueAPF();
5604 Check(&Accuracy.getSemantics() == &APFloat::IEEEsingle(),
5605 "fpmath accuracy must have float type", &I);
5606 Check(Accuracy.isFiniteNonZero() && !Accuracy.isNegative(),
5607 "fpmath accuracy not a positive number!", &I);
5608 } else {
5609 Check(false, "invalid fpmath accuracy!", &I);
5610 }
5611 }
5612
5613 if (MDNode *Range = I.getMetadata(LLVMContext::MD_range)) {
5615 "Ranges are only for loads, calls and invokes!", &I);
5616 visitRangeMetadata(I, Range, I.getType());
5617 }
5618
5619 if (MDNode *Range = I.getMetadata(LLVMContext::MD_noalias_addrspace)) {
5622 "noalias.addrspace are only for memory operations!", &I);
5623 visitNoaliasAddrspaceMetadata(I, Range, I.getType());
5624 }
5625
5626 if (I.hasMetadata(LLVMContext::MD_invariant_group)) {
5628 "invariant.group metadata is only for loads and stores", &I);
5629 }
5630
5631 if (MDNode *MD = I.getMetadata(LLVMContext::MD_nonnull)) {
5632 Check(I.getType()->isPointerTy(), "nonnull applies only to pointer types",
5633 &I);
5635 "nonnull applies only to load instructions, use attributes"
5636 " for calls or invokes",
5637 &I);
5638 Check(MD->getNumOperands() == 0, "nonnull metadata must be empty", &I);
5639 }
5640
5641 if (MDNode *MD = I.getMetadata(LLVMContext::MD_dereferenceable))
5642 visitDereferenceableMetadata(I, MD);
5643
5644 if (MDNode *MD = I.getMetadata(LLVMContext::MD_dereferenceable_or_null))
5645 visitDereferenceableMetadata(I, MD);
5646
5647 if (MDNode *MD = I.getMetadata(LLVMContext::MD_nofree))
5648 visitNofreeMetadata(I, MD);
5649
5650 if (MDNode *TBAA = I.getMetadata(LLVMContext::MD_tbaa))
5651 TBAAVerifyHelper.visitTBAAMetadata(&I, TBAA);
5652
5653 if (MDNode *MD = I.getMetadata(LLVMContext::MD_noalias))
5654 visitAliasScopeListMetadata(MD);
5655 if (MDNode *MD = I.getMetadata(LLVMContext::MD_alias_scope))
5656 visitAliasScopeListMetadata(MD);
5657
5658 if (MDNode *MD = I.getMetadata(LLVMContext::MD_access_group))
5659 visitAccessGroupMetadata(MD);
5660
5661 if (MDNode *AlignMD = I.getMetadata(LLVMContext::MD_align)) {
5662 Check(I.getType()->isPointerTy(), "align applies only to pointer types",
5663 &I);
5665 "align applies only to load instructions, "
5666 "use attributes for calls or invokes",
5667 &I);
5668 Check(AlignMD->getNumOperands() == 1, "align takes one operand!", &I);
5669 ConstantInt *CI = mdconst::dyn_extract<ConstantInt>(AlignMD->getOperand(0));
5670 Check(CI && CI->getType()->isIntegerTy(64),
5671 "align metadata value must be an i64!", &I);
5672 uint64_t Align = CI->getZExtValue();
5673 Check(isPowerOf2_64(Align), "align metadata value must be a power of 2!",
5674 &I);
5675 Check(Align <= Value::MaximumAlignment,
5676 "alignment is larger that implementation defined limit", &I);
5677 }
5678
5679 if (MDNode *MD = I.getMetadata(LLVMContext::MD_prof))
5680 visitProfMetadata(I, MD);
5681
5682 if (MDNode *MD = I.getMetadata(LLVMContext::MD_memprof))
5683 visitMemProfMetadata(I, MD);
5684
5685 if (MDNode *MD = I.getMetadata(LLVMContext::MD_callsite))
5686 visitCallsiteMetadata(I, MD);
5687
5688 if (MDNode *MD = I.getMetadata(LLVMContext::MD_callee_type))
5689 visitCalleeTypeMetadata(I, MD);
5690
5691 if (MDNode *MD = I.getMetadata(LLVMContext::MD_DIAssignID))
5692 visitDIAssignIDMetadata(I, MD);
5693
5694 if (MDNode *MMRA = I.getMetadata(LLVMContext::MD_mmra))
5695 visitMMRAMetadata(I, MMRA);
5696
5697 if (MDNode *Annotation = I.getMetadata(LLVMContext::MD_annotation))
5698 visitAnnotationMetadata(Annotation);
5699
5700 if (MDNode *Captures = I.getMetadata(LLVMContext::MD_captures))
5701 visitCapturesMetadata(I, Captures);
5702
5703 if (MDNode *MD = I.getMetadata(LLVMContext::MD_alloc_token))
5704 visitAllocTokenMetadata(I, MD);
5705
5706 if (MDNode *N = I.getDebugLoc().getAsMDNode()) {
5707 CheckDI(isa<DILocation>(N), "invalid !dbg metadata attachment", &I, N);
5708 visitMDNode(*N, AreDebugLocsAllowed::Yes);
5709
5710 if (auto *DL = dyn_cast<DILocation>(N)) {
5711 if (DL->getAtomGroup()) {
5712 CheckDI(DL->getScope()->getSubprogram()->getKeyInstructionsEnabled(),
5713 "DbgLoc uses atomGroup but DISubprogram doesn't have Key "
5714 "Instructions enabled",
5715 DL, DL->getScope()->getSubprogram());
5716 }
5717 }
5718 }
5719
5721 I.getAllMetadata(MDs);
5722 for (auto Attachment : MDs) {
5723 unsigned Kind = Attachment.first;
5724 auto AllowLocs =
5725 (Kind == LLVMContext::MD_dbg || Kind == LLVMContext::MD_loop)
5726 ? AreDebugLocsAllowed::Yes
5727 : AreDebugLocsAllowed::No;
5728 visitMDNode(*Attachment.second, AllowLocs);
5729 }
5730
5731 InstsInThisBlock.insert(&I);
5732}
5733
5734/// Allow intrinsics to be verified in different ways.
5735void Verifier::visitIntrinsicCall(Intrinsic::ID ID, CallBase &Call) {
5737 Check(IF->isDeclaration(), "Intrinsic functions should never be defined!",
5738 IF);
5739
5740 // Verify that the intrinsic prototype lines up with what the .td files
5741 // describe.
5742 FunctionType *IFTy = IF->getFunctionType();
5743 bool IsVarArg = IFTy->isVarArg();
5744
5748
5749 // Walk the descriptors to extract overloaded types.
5754 "Intrinsic has incorrect return type!", IF);
5756 "Intrinsic has incorrect argument type!", IF);
5757
5758 // Verify if the intrinsic call matches the vararg property.
5759 if (IsVarArg)
5761 "Intrinsic was not defined with variable arguments!", IF);
5762 else
5764 "Callsite was not defined with variable arguments!", IF);
5765
5766 // All descriptors should be absorbed by now.
5767 Check(TableRef.empty(), "Intrinsic has too few arguments!", IF);
5768
5769 // Now that we have the intrinsic ID and the actual argument types (and we
5770 // know they are legal for the intrinsic!) get the intrinsic name through the
5771 // usual means. This allows us to verify the mangling of argument types into
5772 // the name.
5773 const std::string ExpectedName =
5774 Intrinsic::getName(ID, ArgTys, IF->getParent(), IFTy);
5775 Check(ExpectedName == IF->getName(),
5776 "Intrinsic name not mangled correctly for type arguments! "
5777 "Should be: " +
5778 ExpectedName,
5779 IF);
5780
5781 // If the intrinsic takes MDNode arguments, verify that they are either global
5782 // or are local to *this* function.
5783 for (Value *V : Call.args()) {
5784 if (auto *MD = dyn_cast<MetadataAsValue>(V))
5785 visitMetadataAsValue(*MD, Call.getCaller());
5786 if (auto *Const = dyn_cast<Constant>(V))
5787 Check(!Const->getType()->isX86_AMXTy(),
5788 "const x86_amx is not allowed in argument!");
5789 }
5790
5791 switch (ID) {
5792 default:
5793 break;
5794 case Intrinsic::assume: {
5795 if (Call.hasOperandBundles()) {
5797 Check(Cond && Cond->isOne(),
5798 "assume with operand bundles must have i1 true condition", Call);
5799 }
5800 for (auto &Elem : Call.bundle_op_infos()) {
5801 unsigned ArgCount = Elem.End - Elem.Begin;
5802 // Separate storage assumptions are special insofar as they're the only
5803 // operand bundles allowed on assumes that aren't parameter attributes.
5804 if (Elem.Tag->getKey() == "separate_storage") {
5805 Check(ArgCount == 2,
5806 "separate_storage assumptions should have 2 arguments", Call);
5807 Check(Call.getOperand(Elem.Begin)->getType()->isPointerTy() &&
5808 Call.getOperand(Elem.Begin + 1)->getType()->isPointerTy(),
5809 "arguments to separate_storage assumptions should be pointers",
5810 Call);
5811 continue;
5812 }
5813 Check(Elem.Tag->getKey() == "ignore" ||
5814 Attribute::isExistingAttribute(Elem.Tag->getKey()),
5815 "tags must be valid attribute names", Call);
5816 Attribute::AttrKind Kind =
5817 Attribute::getAttrKindFromName(Elem.Tag->getKey());
5818 if (Kind == Attribute::Alignment) {
5819 Check(ArgCount <= 3 && ArgCount >= 2,
5820 "alignment assumptions should have 2 or 3 arguments", Call);
5821 Check(Call.getOperand(Elem.Begin)->getType()->isPointerTy(),
5822 "first argument should be a pointer", Call);
5823 Check(Call.getOperand(Elem.Begin + 1)->getType()->isIntegerTy(),
5824 "second argument should be an integer", Call);
5825 if (ArgCount == 3)
5826 Check(Call.getOperand(Elem.Begin + 2)->getType()->isIntegerTy(),
5827 "third argument should be an integer if present", Call);
5828 continue;
5829 }
5830 if (Kind == Attribute::Dereferenceable) {
5831 Check(ArgCount == 2,
5832 "dereferenceable assumptions should have 2 arguments", Call);
5833 Check(Call.getOperand(Elem.Begin)->getType()->isPointerTy(),
5834 "first argument should be a pointer", Call);
5835 Check(Call.getOperand(Elem.Begin + 1)->getType()->isIntegerTy(),
5836 "second argument should be an integer", Call);
5837 continue;
5838 }
5839 Check(ArgCount <= 2, "too many arguments", Call);
5840 if (Kind == Attribute::None)
5841 break;
5842 if (Attribute::isIntAttrKind(Kind)) {
5843 Check(ArgCount == 2, "this attribute should have 2 arguments", Call);
5844 Check(isa<ConstantInt>(Call.getOperand(Elem.Begin + 1)),
5845 "the second argument should be a constant integral value", Call);
5846 } else if (Attribute::canUseAsParamAttr(Kind)) {
5847 Check((ArgCount) == 1, "this attribute should have one argument", Call);
5848 } else if (Attribute::canUseAsFnAttr(Kind)) {
5849 Check((ArgCount) == 0, "this attribute has no argument", Call);
5850 }
5851 }
5852 break;
5853 }
5854 case Intrinsic::ucmp:
5855 case Intrinsic::scmp: {
5856 Type *SrcTy = Call.getOperand(0)->getType();
5857 Type *DestTy = Call.getType();
5858
5859 Check(DestTy->getScalarSizeInBits() >= 2,
5860 "result type must be at least 2 bits wide", Call);
5861
5862 bool IsDestTypeVector = DestTy->isVectorTy();
5863 Check(SrcTy->isVectorTy() == IsDestTypeVector,
5864 "ucmp/scmp argument and result types must both be either vector or "
5865 "scalar types",
5866 Call);
5867 if (IsDestTypeVector) {
5868 auto SrcVecLen = cast<VectorType>(SrcTy)->getElementCount();
5869 auto DestVecLen = cast<VectorType>(DestTy)->getElementCount();
5870 Check(SrcVecLen == DestVecLen,
5871 "return type and arguments must have the same number of "
5872 "elements",
5873 Call);
5874 }
5875 break;
5876 }
5877 case Intrinsic::coro_id: {
5878 auto *InfoArg = Call.getArgOperand(3)->stripPointerCasts();
5879 if (isa<ConstantPointerNull>(InfoArg))
5880 break;
5881 auto *GV = dyn_cast<GlobalVariable>(InfoArg);
5882 Check(GV && GV->isConstant() && GV->hasDefinitiveInitializer(),
5883 "info argument of llvm.coro.id must refer to an initialized "
5884 "constant");
5885 Constant *Init = GV->getInitializer();
5887 "info argument of llvm.coro.id must refer to either a struct or "
5888 "an array");
5889 break;
5890 }
5891 case Intrinsic::is_fpclass: {
5892 const ConstantInt *TestMask = cast<ConstantInt>(Call.getOperand(1));
5893 Check((TestMask->getZExtValue() & ~static_cast<unsigned>(fcAllFlags)) == 0,
5894 "unsupported bits for llvm.is.fpclass test mask");
5895 break;
5896 }
5897 case Intrinsic::fptrunc_round: {
5898 // Check the rounding mode
5899 Metadata *MD = nullptr;
5901 if (MAV)
5902 MD = MAV->getMetadata();
5903
5904 Check(MD != nullptr, "missing rounding mode argument", Call);
5905
5906 Check(isa<MDString>(MD),
5907 ("invalid value for llvm.fptrunc.round metadata operand"
5908 " (the operand should be a string)"),
5909 MD);
5910
5911 std::optional<RoundingMode> RoundMode =
5912 convertStrToRoundingMode(cast<MDString>(MD)->getString());
5913 Check(RoundMode && *RoundMode != RoundingMode::Dynamic,
5914 "unsupported rounding mode argument", Call);
5915 break;
5916 }
5917#define BEGIN_REGISTER_VP_INTRINSIC(VPID, ...) case Intrinsic::VPID:
5918#include "llvm/IR/VPIntrinsics.def"
5919#undef BEGIN_REGISTER_VP_INTRINSIC
5920 visitVPIntrinsic(cast<VPIntrinsic>(Call));
5921 break;
5922#define INSTRUCTION(NAME, NARGS, ROUND_MODE, INTRINSIC) \
5923 case Intrinsic::INTRINSIC:
5924#include "llvm/IR/ConstrainedOps.def"
5925#undef INSTRUCTION
5926 visitConstrainedFPIntrinsic(cast<ConstrainedFPIntrinsic>(Call));
5927 break;
5928 case Intrinsic::dbg_declare: // llvm.dbg.declare
5929 case Intrinsic::dbg_value: // llvm.dbg.value
5930 case Intrinsic::dbg_assign: // llvm.dbg.assign
5931 case Intrinsic::dbg_label: // llvm.dbg.label
5932 // We no longer interpret debug intrinsics (the old variable-location
5933 // design). They're meaningless as far as LLVM is concerned we could make
5934 // it an error for them to appear, but it's possible we'll have users
5935 // converting back to intrinsics for the forseeable future (such as DXIL),
5936 // so tolerate their existance.
5937 break;
5938 case Intrinsic::memcpy:
5939 case Intrinsic::memcpy_inline:
5940 case Intrinsic::memmove:
5941 case Intrinsic::memset:
5942 case Intrinsic::memset_inline:
5943 break;
5944 case Intrinsic::experimental_memset_pattern: {
5945 const auto Memset = cast<MemSetPatternInst>(&Call);
5946 Check(Memset->getValue()->getType()->isSized(),
5947 "unsized types cannot be used as memset patterns", Call);
5948 break;
5949 }
5950 case Intrinsic::memcpy_element_unordered_atomic:
5951 case Intrinsic::memmove_element_unordered_atomic:
5952 case Intrinsic::memset_element_unordered_atomic: {
5953 const auto *AMI = cast<AnyMemIntrinsic>(&Call);
5954
5955 ConstantInt *ElementSizeCI =
5956 cast<ConstantInt>(AMI->getRawElementSizeInBytes());
5957 const APInt &ElementSizeVal = ElementSizeCI->getValue();
5958 Check(ElementSizeVal.isPowerOf2(),
5959 "element size of the element-wise atomic memory intrinsic "
5960 "must be a power of 2",
5961 Call);
5962
5963 auto IsValidAlignment = [&](MaybeAlign Alignment) {
5964 return Alignment && ElementSizeVal.ule(Alignment->value());
5965 };
5966 Check(IsValidAlignment(AMI->getDestAlign()),
5967 "incorrect alignment of the destination argument", Call);
5968 if (const auto *AMT = dyn_cast<AnyMemTransferInst>(AMI)) {
5969 Check(IsValidAlignment(AMT->getSourceAlign()),
5970 "incorrect alignment of the source argument", Call);
5971 }
5972 break;
5973 }
5974 case Intrinsic::call_preallocated_setup: {
5975 auto *NumArgs = cast<ConstantInt>(Call.getArgOperand(0));
5976 bool FoundCall = false;
5977 for (User *U : Call.users()) {
5978 auto *UseCall = dyn_cast<CallBase>(U);
5979 Check(UseCall != nullptr,
5980 "Uses of llvm.call.preallocated.setup must be calls");
5981 Intrinsic::ID IID = UseCall->getIntrinsicID();
5982 if (IID == Intrinsic::call_preallocated_arg) {
5983 auto *AllocArgIndex = dyn_cast<ConstantInt>(UseCall->getArgOperand(1));
5984 Check(AllocArgIndex != nullptr,
5985 "llvm.call.preallocated.alloc arg index must be a constant");
5986 auto AllocArgIndexInt = AllocArgIndex->getValue();
5987 Check(AllocArgIndexInt.sge(0) &&
5988 AllocArgIndexInt.slt(NumArgs->getValue()),
5989 "llvm.call.preallocated.alloc arg index must be between 0 and "
5990 "corresponding "
5991 "llvm.call.preallocated.setup's argument count");
5992 } else if (IID == Intrinsic::call_preallocated_teardown) {
5993 // nothing to do
5994 } else {
5995 Check(!FoundCall, "Can have at most one call corresponding to a "
5996 "llvm.call.preallocated.setup");
5997 FoundCall = true;
5998 size_t NumPreallocatedArgs = 0;
5999 for (unsigned i = 0; i < UseCall->arg_size(); i++) {
6000 if (UseCall->paramHasAttr(i, Attribute::Preallocated)) {
6001 ++NumPreallocatedArgs;
6002 }
6003 }
6004 Check(NumPreallocatedArgs != 0,
6005 "cannot use preallocated intrinsics on a call without "
6006 "preallocated arguments");
6007 Check(NumArgs->equalsInt(NumPreallocatedArgs),
6008 "llvm.call.preallocated.setup arg size must be equal to number "
6009 "of preallocated arguments "
6010 "at call site",
6011 Call, *UseCall);
6012 // getOperandBundle() cannot be called if more than one of the operand
6013 // bundle exists. There is already a check elsewhere for this, so skip
6014 // here if we see more than one.
6015 if (UseCall->countOperandBundlesOfType(LLVMContext::OB_preallocated) >
6016 1) {
6017 return;
6018 }
6019 auto PreallocatedBundle =
6020 UseCall->getOperandBundle(LLVMContext::OB_preallocated);
6021 Check(PreallocatedBundle,
6022 "Use of llvm.call.preallocated.setup outside intrinsics "
6023 "must be in \"preallocated\" operand bundle");
6024 Check(PreallocatedBundle->Inputs.front().get() == &Call,
6025 "preallocated bundle must have token from corresponding "
6026 "llvm.call.preallocated.setup");
6027 }
6028 }
6029 break;
6030 }
6031 case Intrinsic::call_preallocated_arg: {
6032 auto *Token = dyn_cast<CallBase>(Call.getArgOperand(0));
6033 Check(Token &&
6034 Token->getIntrinsicID() == Intrinsic::call_preallocated_setup,
6035 "llvm.call.preallocated.arg token argument must be a "
6036 "llvm.call.preallocated.setup");
6037 Check(Call.hasFnAttr(Attribute::Preallocated),
6038 "llvm.call.preallocated.arg must be called with a \"preallocated\" "
6039 "call site attribute");
6040 break;
6041 }
6042 case Intrinsic::call_preallocated_teardown: {
6043 auto *Token = dyn_cast<CallBase>(Call.getArgOperand(0));
6044 Check(Token &&
6045 Token->getIntrinsicID() == Intrinsic::call_preallocated_setup,
6046 "llvm.call.preallocated.teardown token argument must be a "
6047 "llvm.call.preallocated.setup");
6048 break;
6049 }
6050 case Intrinsic::gcroot:
6051 case Intrinsic::gcwrite:
6052 case Intrinsic::gcread:
6053 if (ID == Intrinsic::gcroot) {
6054 AllocaInst *AI =
6056 Check(AI, "llvm.gcroot parameter #1 must be an alloca.", Call);
6058 "llvm.gcroot parameter #2 must be a constant.", Call);
6059 if (!AI->getAllocatedType()->isPointerTy()) {
6061 "llvm.gcroot parameter #1 must either be a pointer alloca, "
6062 "or argument #2 must be a non-null constant.",
6063 Call);
6064 }
6065 }
6066
6067 Check(Call.getParent()->getParent()->hasGC(),
6068 "Enclosing function does not use GC.", Call);
6069 break;
6070 case Intrinsic::init_trampoline:
6072 "llvm.init_trampoline parameter #2 must resolve to a function.",
6073 Call);
6074 break;
6075 case Intrinsic::prefetch:
6076 Check(cast<ConstantInt>(Call.getArgOperand(1))->getZExtValue() < 2,
6077 "rw argument to llvm.prefetch must be 0-1", Call);
6078 Check(cast<ConstantInt>(Call.getArgOperand(2))->getZExtValue() < 4,
6079 "locality argument to llvm.prefetch must be 0-3", Call);
6080 Check(cast<ConstantInt>(Call.getArgOperand(3))->getZExtValue() < 2,
6081 "cache type argument to llvm.prefetch must be 0-1", Call);
6082 break;
6083 case Intrinsic::reloc_none: {
6085 cast<MetadataAsValue>(Call.getArgOperand(0))->getMetadata()),
6086 "llvm.reloc.none argument must be a metadata string", &Call);
6087 break;
6088 }
6089 case Intrinsic::stackprotector:
6091 "llvm.stackprotector parameter #2 must resolve to an alloca.", Call);
6092 break;
6093 case Intrinsic::localescape: {
6094 BasicBlock *BB = Call.getParent();
6095 Check(BB->isEntryBlock(), "llvm.localescape used outside of entry block",
6096 Call);
6097 Check(!SawFrameEscape, "multiple calls to llvm.localescape in one function",
6098 Call);
6099 for (Value *Arg : Call.args()) {
6100 if (isa<ConstantPointerNull>(Arg))
6101 continue; // Null values are allowed as placeholders.
6102 auto *AI = dyn_cast<AllocaInst>(Arg->stripPointerCasts());
6103 Check(AI && AI->isStaticAlloca(),
6104 "llvm.localescape only accepts static allocas", Call);
6105 }
6106 FrameEscapeInfo[BB->getParent()].first = Call.arg_size();
6107 SawFrameEscape = true;
6108 break;
6109 }
6110 case Intrinsic::localrecover: {
6112 Function *Fn = dyn_cast<Function>(FnArg);
6113 Check(Fn && !Fn->isDeclaration(),
6114 "llvm.localrecover first "
6115 "argument must be function defined in this module",
6116 Call);
6117 auto *IdxArg = cast<ConstantInt>(Call.getArgOperand(2));
6118 auto &Entry = FrameEscapeInfo[Fn];
6119 Entry.second = unsigned(
6120 std::max(uint64_t(Entry.second), IdxArg->getLimitedValue(~0U) + 1));
6121 break;
6122 }
6123
6124 case Intrinsic::experimental_gc_statepoint:
6125 if (auto *CI = dyn_cast<CallInst>(&Call))
6126 Check(!CI->isInlineAsm(),
6127 "gc.statepoint support for inline assembly unimplemented", CI);
6128 Check(Call.getParent()->getParent()->hasGC(),
6129 "Enclosing function does not use GC.", Call);
6130
6131 verifyStatepoint(Call);
6132 break;
6133 case Intrinsic::experimental_gc_result: {
6134 Check(Call.getParent()->getParent()->hasGC(),
6135 "Enclosing function does not use GC.", Call);
6136
6137 auto *Statepoint = Call.getArgOperand(0);
6138 if (isa<UndefValue>(Statepoint))
6139 break;
6140
6141 // Are we tied to a statepoint properly?
6142 const auto *StatepointCall = dyn_cast<CallBase>(Statepoint);
6143 Check(StatepointCall && StatepointCall->getIntrinsicID() ==
6144 Intrinsic::experimental_gc_statepoint,
6145 "gc.result operand #1 must be from a statepoint", Call,
6146 Call.getArgOperand(0));
6147
6148 // Check that result type matches wrapped callee.
6149 auto *TargetFuncType =
6150 cast<FunctionType>(StatepointCall->getParamElementType(2));
6151 Check(Call.getType() == TargetFuncType->getReturnType(),
6152 "gc.result result type does not match wrapped callee", Call);
6153 break;
6154 }
6155 case Intrinsic::experimental_gc_relocate: {
6156 Check(Call.arg_size() == 3, "wrong number of arguments", Call);
6157
6159 "gc.relocate must return a pointer or a vector of pointers", Call);
6160
6161 // Check that this relocate is correctly tied to the statepoint
6162
6163 // This is case for relocate on the unwinding path of an invoke statepoint
6164 if (LandingPadInst *LandingPad =
6166
6167 const BasicBlock *InvokeBB =
6168 LandingPad->getParent()->getUniquePredecessor();
6169
6170 // Landingpad relocates should have only one predecessor with invoke
6171 // statepoint terminator
6172 Check(InvokeBB, "safepoints should have unique landingpads",
6173 LandingPad->getParent());
6174 Check(InvokeBB->getTerminator(), "safepoint block should be well formed",
6175 InvokeBB);
6177 "gc relocate should be linked to a statepoint", InvokeBB);
6178 } else {
6179 // In all other cases relocate should be tied to the statepoint directly.
6180 // This covers relocates on a normal return path of invoke statepoint and
6181 // relocates of a call statepoint.
6182 auto *Token = Call.getArgOperand(0);
6184 "gc relocate is incorrectly tied to the statepoint", Call, Token);
6185 }
6186
6187 // Verify rest of the relocate arguments.
6188 const Value &StatepointCall = *cast<GCRelocateInst>(Call).getStatepoint();
6189
6190 // Both the base and derived must be piped through the safepoint.
6193 "gc.relocate operand #2 must be integer offset", Call);
6194
6195 Value *Derived = Call.getArgOperand(2);
6196 Check(isa<ConstantInt>(Derived),
6197 "gc.relocate operand #3 must be integer offset", Call);
6198
6199 const uint64_t BaseIndex = cast<ConstantInt>(Base)->getZExtValue();
6200 const uint64_t DerivedIndex = cast<ConstantInt>(Derived)->getZExtValue();
6201
6202 // Check the bounds
6203 if (isa<UndefValue>(StatepointCall))
6204 break;
6205 if (auto Opt = cast<GCStatepointInst>(StatepointCall)
6206 .getOperandBundle(LLVMContext::OB_gc_live)) {
6207 Check(BaseIndex < Opt->Inputs.size(),
6208 "gc.relocate: statepoint base index out of bounds", Call);
6209 Check(DerivedIndex < Opt->Inputs.size(),
6210 "gc.relocate: statepoint derived index out of bounds", Call);
6211 }
6212
6213 // Relocated value must be either a pointer type or vector-of-pointer type,
6214 // but gc_relocate does not need to return the same pointer type as the
6215 // relocated pointer. It can be casted to the correct type later if it's
6216 // desired. However, they must have the same address space and 'vectorness'
6217 GCRelocateInst &Relocate = cast<GCRelocateInst>(Call);
6218 auto *ResultType = Call.getType();
6219 auto *DerivedType = Relocate.getDerivedPtr()->getType();
6220 auto *BaseType = Relocate.getBasePtr()->getType();
6221
6222 Check(BaseType->isPtrOrPtrVectorTy(),
6223 "gc.relocate: relocated value must be a pointer", Call);
6224 Check(DerivedType->isPtrOrPtrVectorTy(),
6225 "gc.relocate: relocated value must be a pointer", Call);
6226
6227 Check(ResultType->isVectorTy() == DerivedType->isVectorTy(),
6228 "gc.relocate: vector relocates to vector and pointer to pointer",
6229 Call);
6230 Check(
6231 ResultType->getPointerAddressSpace() ==
6232 DerivedType->getPointerAddressSpace(),
6233 "gc.relocate: relocating a pointer shouldn't change its address space",
6234 Call);
6235
6236 auto GC = llvm::getGCStrategy(Relocate.getFunction()->getGC());
6237 Check(GC, "gc.relocate: calling function must have GCStrategy",
6238 Call.getFunction());
6239 if (GC) {
6240 auto isGCPtr = [&GC](Type *PTy) {
6241 return GC->isGCManagedPointer(PTy->getScalarType()).value_or(true);
6242 };
6243 Check(isGCPtr(ResultType), "gc.relocate: must return gc pointer", Call);
6244 Check(isGCPtr(BaseType),
6245 "gc.relocate: relocated value must be a gc pointer", Call);
6246 Check(isGCPtr(DerivedType),
6247 "gc.relocate: relocated value must be a gc pointer", Call);
6248 }
6249 break;
6250 }
6251 case Intrinsic::experimental_patchpoint: {
6252 if (Call.getCallingConv() == CallingConv::AnyReg) {
6254 "patchpoint: invalid return type used with anyregcc", Call);
6255 }
6256 break;
6257 }
6258 case Intrinsic::eh_exceptioncode:
6259 case Intrinsic::eh_exceptionpointer: {
6261 "eh.exceptionpointer argument must be a catchpad", Call);
6262 break;
6263 }
6264 case Intrinsic::get_active_lane_mask: {
6266 "get_active_lane_mask: must return a "
6267 "vector",
6268 Call);
6269 auto *ElemTy = Call.getType()->getScalarType();
6270 Check(ElemTy->isIntegerTy(1),
6271 "get_active_lane_mask: element type is not "
6272 "i1",
6273 Call);
6274 break;
6275 }
6276 case Intrinsic::experimental_get_vector_length: {
6277 ConstantInt *VF = cast<ConstantInt>(Call.getArgOperand(1));
6278 Check(!VF->isNegative() && !VF->isZero(),
6279 "get_vector_length: VF must be positive", Call);
6280 break;
6281 }
6282 case Intrinsic::masked_load: {
6283 Check(Call.getType()->isVectorTy(), "masked_load: must return a vector",
6284 Call);
6285
6287 Value *PassThru = Call.getArgOperand(2);
6288 Check(Mask->getType()->isVectorTy(), "masked_load: mask must be vector",
6289 Call);
6290 Check(PassThru->getType() == Call.getType(),
6291 "masked_load: pass through and return type must match", Call);
6292 Check(cast<VectorType>(Mask->getType())->getElementCount() ==
6293 cast<VectorType>(Call.getType())->getElementCount(),
6294 "masked_load: vector mask must be same length as return", Call);
6295 break;
6296 }
6297 case Intrinsic::masked_store: {
6298 Value *Val = Call.getArgOperand(0);
6300 Check(Mask->getType()->isVectorTy(), "masked_store: mask must be vector",
6301 Call);
6302 Check(cast<VectorType>(Mask->getType())->getElementCount() ==
6303 cast<VectorType>(Val->getType())->getElementCount(),
6304 "masked_store: vector mask must be same length as value", Call);
6305 break;
6306 }
6307
6308 case Intrinsic::experimental_guard: {
6309 Check(isa<CallInst>(Call), "experimental_guard cannot be invoked", Call);
6311 "experimental_guard must have exactly one "
6312 "\"deopt\" operand bundle");
6313 break;
6314 }
6315
6316 case Intrinsic::experimental_deoptimize: {
6317 Check(isa<CallInst>(Call), "experimental_deoptimize cannot be invoked",
6318 Call);
6320 "experimental_deoptimize must have exactly one "
6321 "\"deopt\" operand bundle");
6323 "experimental_deoptimize return type must match caller return type");
6324
6325 if (isa<CallInst>(Call)) {
6327 Check(RI,
6328 "calls to experimental_deoptimize must be followed by a return");
6329
6330 if (!Call.getType()->isVoidTy() && RI)
6331 Check(RI->getReturnValue() == &Call,
6332 "calls to experimental_deoptimize must be followed by a return "
6333 "of the value computed by experimental_deoptimize");
6334 }
6335
6336 break;
6337 }
6338 case Intrinsic::vastart: {
6340 "va_start called in a non-varargs function");
6341 break;
6342 }
6343 case Intrinsic::get_dynamic_area_offset: {
6344 auto *IntTy = dyn_cast<IntegerType>(Call.getType());
6345 Check(IntTy && DL.getPointerSizeInBits(DL.getAllocaAddrSpace()) ==
6346 IntTy->getBitWidth(),
6347 "get_dynamic_area_offset result type must be scalar integer matching "
6348 "alloca address space width",
6349 Call);
6350 break;
6351 }
6352 case Intrinsic::vector_reduce_and:
6353 case Intrinsic::vector_reduce_or:
6354 case Intrinsic::vector_reduce_xor:
6355 case Intrinsic::vector_reduce_add:
6356 case Intrinsic::vector_reduce_mul:
6357 case Intrinsic::vector_reduce_smax:
6358 case Intrinsic::vector_reduce_smin:
6359 case Intrinsic::vector_reduce_umax:
6360 case Intrinsic::vector_reduce_umin: {
6361 Type *ArgTy = Call.getArgOperand(0)->getType();
6362 Check(ArgTy->isIntOrIntVectorTy() && ArgTy->isVectorTy(),
6363 "Intrinsic has incorrect argument type!");
6364 break;
6365 }
6366 case Intrinsic::vector_reduce_fmax:
6367 case Intrinsic::vector_reduce_fmin: {
6368 Type *ArgTy = Call.getArgOperand(0)->getType();
6369 Check(ArgTy->isFPOrFPVectorTy() && ArgTy->isVectorTy(),
6370 "Intrinsic has incorrect argument type!");
6371 break;
6372 }
6373 case Intrinsic::vector_reduce_fadd:
6374 case Intrinsic::vector_reduce_fmul: {
6375 // Unlike the other reductions, the first argument is a start value. The
6376 // second argument is the vector to be reduced.
6377 Type *ArgTy = Call.getArgOperand(1)->getType();
6378 Check(ArgTy->isFPOrFPVectorTy() && ArgTy->isVectorTy(),
6379 "Intrinsic has incorrect argument type!");
6380 break;
6381 }
6382 case Intrinsic::smul_fix:
6383 case Intrinsic::smul_fix_sat:
6384 case Intrinsic::umul_fix:
6385 case Intrinsic::umul_fix_sat:
6386 case Intrinsic::sdiv_fix:
6387 case Intrinsic::sdiv_fix_sat:
6388 case Intrinsic::udiv_fix:
6389 case Intrinsic::udiv_fix_sat: {
6390 Value *Op1 = Call.getArgOperand(0);
6391 Value *Op2 = Call.getArgOperand(1);
6393 "first operand of [us][mul|div]_fix[_sat] must be an int type or "
6394 "vector of ints");
6396 "second operand of [us][mul|div]_fix[_sat] must be an int type or "
6397 "vector of ints");
6398
6399 auto *Op3 = cast<ConstantInt>(Call.getArgOperand(2));
6400 Check(Op3->getType()->isIntegerTy(),
6401 "third operand of [us][mul|div]_fix[_sat] must be an int type");
6402 Check(Op3->getBitWidth() <= 32,
6403 "third operand of [us][mul|div]_fix[_sat] must fit within 32 bits");
6404
6405 if (ID == Intrinsic::smul_fix || ID == Intrinsic::smul_fix_sat ||
6406 ID == Intrinsic::sdiv_fix || ID == Intrinsic::sdiv_fix_sat) {
6407 Check(Op3->getZExtValue() < Op1->getType()->getScalarSizeInBits(),
6408 "the scale of s[mul|div]_fix[_sat] must be less than the width of "
6409 "the operands");
6410 } else {
6411 Check(Op3->getZExtValue() <= Op1->getType()->getScalarSizeInBits(),
6412 "the scale of u[mul|div]_fix[_sat] must be less than or equal "
6413 "to the width of the operands");
6414 }
6415 break;
6416 }
6417 case Intrinsic::lrint:
6418 case Intrinsic::llrint:
6419 case Intrinsic::lround:
6420 case Intrinsic::llround: {
6421 Type *ValTy = Call.getArgOperand(0)->getType();
6422 Type *ResultTy = Call.getType();
6423 auto *VTy = dyn_cast<VectorType>(ValTy);
6424 auto *RTy = dyn_cast<VectorType>(ResultTy);
6425 Check(ValTy->isFPOrFPVectorTy() && ResultTy->isIntOrIntVectorTy(),
6426 ExpectedName + ": argument must be floating-point or vector "
6427 "of floating-points, and result must be integer or "
6428 "vector of integers",
6429 &Call);
6430 Check(ValTy->isVectorTy() == ResultTy->isVectorTy(),
6431 ExpectedName + ": argument and result disagree on vector use", &Call);
6432 if (VTy) {
6433 Check(VTy->getElementCount() == RTy->getElementCount(),
6434 ExpectedName + ": argument must be same length as result", &Call);
6435 }
6436 break;
6437 }
6438 case Intrinsic::bswap: {
6439 Type *Ty = Call.getType();
6440 unsigned Size = Ty->getScalarSizeInBits();
6441 Check(Size % 16 == 0, "bswap must be an even number of bytes", &Call);
6442 break;
6443 }
6444 case Intrinsic::invariant_start: {
6445 ConstantInt *InvariantSize = dyn_cast<ConstantInt>(Call.getArgOperand(0));
6446 Check(InvariantSize &&
6447 (!InvariantSize->isNegative() || InvariantSize->isMinusOne()),
6448 "invariant_start parameter must be -1, 0 or a positive number",
6449 &Call);
6450 break;
6451 }
6452 case Intrinsic::matrix_multiply:
6453 case Intrinsic::matrix_transpose:
6454 case Intrinsic::matrix_column_major_load:
6455 case Intrinsic::matrix_column_major_store: {
6457 ConstantInt *Stride = nullptr;
6458 ConstantInt *NumRows;
6459 ConstantInt *NumColumns;
6460 VectorType *ResultTy;
6461 Type *Op0ElemTy = nullptr;
6462 Type *Op1ElemTy = nullptr;
6463 switch (ID) {
6464 case Intrinsic::matrix_multiply: {
6465 NumRows = cast<ConstantInt>(Call.getArgOperand(2));
6466 ConstantInt *N = cast<ConstantInt>(Call.getArgOperand(3));
6467 NumColumns = cast<ConstantInt>(Call.getArgOperand(4));
6469 ->getNumElements() ==
6470 NumRows->getZExtValue() * N->getZExtValue(),
6471 "First argument of a matrix operation does not match specified "
6472 "shape!");
6474 ->getNumElements() ==
6475 N->getZExtValue() * NumColumns->getZExtValue(),
6476 "Second argument of a matrix operation does not match specified "
6477 "shape!");
6478
6479 ResultTy = cast<VectorType>(Call.getType());
6480 Op0ElemTy =
6481 cast<VectorType>(Call.getArgOperand(0)->getType())->getElementType();
6482 Op1ElemTy =
6483 cast<VectorType>(Call.getArgOperand(1)->getType())->getElementType();
6484 break;
6485 }
6486 case Intrinsic::matrix_transpose:
6487 NumRows = cast<ConstantInt>(Call.getArgOperand(1));
6488 NumColumns = cast<ConstantInt>(Call.getArgOperand(2));
6489 ResultTy = cast<VectorType>(Call.getType());
6490 Op0ElemTy =
6491 cast<VectorType>(Call.getArgOperand(0)->getType())->getElementType();
6492 break;
6493 case Intrinsic::matrix_column_major_load: {
6495 NumRows = cast<ConstantInt>(Call.getArgOperand(3));
6496 NumColumns = cast<ConstantInt>(Call.getArgOperand(4));
6497 ResultTy = cast<VectorType>(Call.getType());
6498 break;
6499 }
6500 case Intrinsic::matrix_column_major_store: {
6502 NumRows = cast<ConstantInt>(Call.getArgOperand(4));
6503 NumColumns = cast<ConstantInt>(Call.getArgOperand(5));
6504 ResultTy = cast<VectorType>(Call.getArgOperand(0)->getType());
6505 Op0ElemTy =
6506 cast<VectorType>(Call.getArgOperand(0)->getType())->getElementType();
6507 break;
6508 }
6509 default:
6510 llvm_unreachable("unexpected intrinsic");
6511 }
6512
6513 Check(ResultTy->getElementType()->isIntegerTy() ||
6514 ResultTy->getElementType()->isFloatingPointTy(),
6515 "Result type must be an integer or floating-point type!", IF);
6516
6517 if (Op0ElemTy)
6518 Check(ResultTy->getElementType() == Op0ElemTy,
6519 "Vector element type mismatch of the result and first operand "
6520 "vector!",
6521 IF);
6522
6523 if (Op1ElemTy)
6524 Check(ResultTy->getElementType() == Op1ElemTy,
6525 "Vector element type mismatch of the result and second operand "
6526 "vector!",
6527 IF);
6528
6530 NumRows->getZExtValue() * NumColumns->getZExtValue(),
6531 "Result of a matrix operation does not fit in the returned vector!");
6532
6533 if (Stride) {
6534 Check(Stride->getBitWidth() <= 64, "Stride bitwidth cannot exceed 64!",
6535 IF);
6536 Check(Stride->getZExtValue() >= NumRows->getZExtValue(),
6537 "Stride must be greater or equal than the number of rows!", IF);
6538 }
6539
6540 break;
6541 }
6542 case Intrinsic::vector_splice: {
6544 int64_t Idx = cast<ConstantInt>(Call.getArgOperand(2))->getSExtValue();
6545 int64_t KnownMinNumElements = VecTy->getElementCount().getKnownMinValue();
6546 if (Call.getParent() && Call.getParent()->getParent()) {
6547 AttributeList Attrs = Call.getParent()->getParent()->getAttributes();
6548 if (Attrs.hasFnAttr(Attribute::VScaleRange))
6549 KnownMinNumElements *= Attrs.getFnAttrs().getVScaleRangeMin();
6550 }
6551 Check((Idx < 0 && std::abs(Idx) <= KnownMinNumElements) ||
6552 (Idx >= 0 && Idx < KnownMinNumElements),
6553 "The splice index exceeds the range [-VL, VL-1] where VL is the "
6554 "known minimum number of elements in the vector. For scalable "
6555 "vectors the minimum number of elements is determined from "
6556 "vscale_range.",
6557 &Call);
6558 break;
6559 }
6560 case Intrinsic::stepvector: {
6562 Check(VecTy && VecTy->getScalarType()->isIntegerTy() &&
6563 VecTy->getScalarSizeInBits() >= 8,
6564 "stepvector only supported for vectors of integers "
6565 "with a bitwidth of at least 8.",
6566 &Call);
6567 break;
6568 }
6569 case Intrinsic::experimental_vector_match: {
6570 Value *Op1 = Call.getArgOperand(0);
6571 Value *Op2 = Call.getArgOperand(1);
6573
6574 VectorType *Op1Ty = dyn_cast<VectorType>(Op1->getType());
6575 VectorType *Op2Ty = dyn_cast<VectorType>(Op2->getType());
6576 VectorType *MaskTy = dyn_cast<VectorType>(Mask->getType());
6577
6578 Check(Op1Ty && Op2Ty && MaskTy, "Operands must be vectors.", &Call);
6580 "Second operand must be a fixed length vector.", &Call);
6581 Check(Op1Ty->getElementType()->isIntegerTy(),
6582 "First operand must be a vector of integers.", &Call);
6583 Check(Op1Ty->getElementType() == Op2Ty->getElementType(),
6584 "First two operands must have the same element type.", &Call);
6585 Check(Op1Ty->getElementCount() == MaskTy->getElementCount(),
6586 "First operand and mask must have the same number of elements.",
6587 &Call);
6588 Check(MaskTy->getElementType()->isIntegerTy(1),
6589 "Mask must be a vector of i1's.", &Call);
6590 Check(Call.getType() == MaskTy, "Return type must match the mask type.",
6591 &Call);
6592 break;
6593 }
6594 case Intrinsic::vector_insert: {
6595 Value *Vec = Call.getArgOperand(0);
6596 Value *SubVec = Call.getArgOperand(1);
6597 Value *Idx = Call.getArgOperand(2);
6598 unsigned IdxN = cast<ConstantInt>(Idx)->getZExtValue();
6599
6600 VectorType *VecTy = cast<VectorType>(Vec->getType());
6601 VectorType *SubVecTy = cast<VectorType>(SubVec->getType());
6602
6603 ElementCount VecEC = VecTy->getElementCount();
6604 ElementCount SubVecEC = SubVecTy->getElementCount();
6605 Check(VecTy->getElementType() == SubVecTy->getElementType(),
6606 "vector_insert parameters must have the same element "
6607 "type.",
6608 &Call);
6609 Check(IdxN % SubVecEC.getKnownMinValue() == 0,
6610 "vector_insert index must be a constant multiple of "
6611 "the subvector's known minimum vector length.");
6612
6613 // If this insertion is not the 'mixed' case where a fixed vector is
6614 // inserted into a scalable vector, ensure that the insertion of the
6615 // subvector does not overrun the parent vector.
6616 if (VecEC.isScalable() == SubVecEC.isScalable()) {
6617 Check(IdxN < VecEC.getKnownMinValue() &&
6618 IdxN + SubVecEC.getKnownMinValue() <= VecEC.getKnownMinValue(),
6619 "subvector operand of vector_insert would overrun the "
6620 "vector being inserted into.");
6621 }
6622 break;
6623 }
6624 case Intrinsic::vector_extract: {
6625 Value *Vec = Call.getArgOperand(0);
6626 Value *Idx = Call.getArgOperand(1);
6627 unsigned IdxN = cast<ConstantInt>(Idx)->getZExtValue();
6628
6629 VectorType *ResultTy = cast<VectorType>(Call.getType());
6630 VectorType *VecTy = cast<VectorType>(Vec->getType());
6631
6632 ElementCount VecEC = VecTy->getElementCount();
6633 ElementCount ResultEC = ResultTy->getElementCount();
6634
6635 Check(ResultTy->getElementType() == VecTy->getElementType(),
6636 "vector_extract result must have the same element "
6637 "type as the input vector.",
6638 &Call);
6639 Check(IdxN % ResultEC.getKnownMinValue() == 0,
6640 "vector_extract index must be a constant multiple of "
6641 "the result type's known minimum vector length.");
6642
6643 // If this extraction is not the 'mixed' case where a fixed vector is
6644 // extracted from a scalable vector, ensure that the extraction does not
6645 // overrun the parent vector.
6646 if (VecEC.isScalable() == ResultEC.isScalable()) {
6647 Check(IdxN < VecEC.getKnownMinValue() &&
6648 IdxN + ResultEC.getKnownMinValue() <= VecEC.getKnownMinValue(),
6649 "vector_extract would overrun.");
6650 }
6651 break;
6652 }
6653 case Intrinsic::vector_partial_reduce_fadd:
6654 case Intrinsic::vector_partial_reduce_add: {
6657
6658 unsigned VecWidth = VecTy->getElementCount().getKnownMinValue();
6659 unsigned AccWidth = AccTy->getElementCount().getKnownMinValue();
6660
6661 Check((VecWidth % AccWidth) == 0,
6662 "Invalid vector widths for partial "
6663 "reduction. The width of the input vector "
6664 "must be a positive integer multiple of "
6665 "the width of the accumulator vector.");
6666 break;
6667 }
6668 case Intrinsic::experimental_noalias_scope_decl: {
6669 NoAliasScopeDecls.push_back(cast<IntrinsicInst>(&Call));
6670 break;
6671 }
6672 case Intrinsic::preserve_array_access_index:
6673 case Intrinsic::preserve_struct_access_index:
6674 case Intrinsic::aarch64_ldaxr:
6675 case Intrinsic::aarch64_ldxr:
6676 case Intrinsic::arm_ldaex:
6677 case Intrinsic::arm_ldrex: {
6678 Type *ElemTy = Call.getParamElementType(0);
6679 Check(ElemTy, "Intrinsic requires elementtype attribute on first argument.",
6680 &Call);
6681 break;
6682 }
6683 case Intrinsic::aarch64_stlxr:
6684 case Intrinsic::aarch64_stxr:
6685 case Intrinsic::arm_stlex:
6686 case Intrinsic::arm_strex: {
6687 Type *ElemTy = Call.getAttributes().getParamElementType(1);
6688 Check(ElemTy,
6689 "Intrinsic requires elementtype attribute on second argument.",
6690 &Call);
6691 break;
6692 }
6693 case Intrinsic::aarch64_prefetch: {
6694 Check(cast<ConstantInt>(Call.getArgOperand(1))->getZExtValue() < 2,
6695 "write argument to llvm.aarch64.prefetch must be 0 or 1", Call);
6696 Check(cast<ConstantInt>(Call.getArgOperand(2))->getZExtValue() < 4,
6697 "target argument to llvm.aarch64.prefetch must be 0-3", Call);
6698 Check(cast<ConstantInt>(Call.getArgOperand(3))->getZExtValue() < 2,
6699 "stream argument to llvm.aarch64.prefetch must be 0 or 1", Call);
6700 Check(cast<ConstantInt>(Call.getArgOperand(4))->getZExtValue() < 2,
6701 "isdata argument to llvm.aarch64.prefetch must be 0 or 1", Call);
6702 break;
6703 }
6704 case Intrinsic::callbr_landingpad: {
6705 const auto *CBR = dyn_cast<CallBrInst>(Call.getOperand(0));
6706 Check(CBR, "intrinstic requires callbr operand", &Call);
6707 if (!CBR)
6708 break;
6709
6710 const BasicBlock *LandingPadBB = Call.getParent();
6711 const BasicBlock *PredBB = LandingPadBB->getUniquePredecessor();
6712 if (!PredBB) {
6713 CheckFailed("Intrinsic in block must have 1 unique predecessor", &Call);
6714 break;
6715 }
6716 if (!isa<CallBrInst>(PredBB->getTerminator())) {
6717 CheckFailed("Intrinsic must have corresponding callbr in predecessor",
6718 &Call);
6719 break;
6720 }
6721 Check(llvm::is_contained(CBR->getIndirectDests(), LandingPadBB),
6722 "Intrinsic's corresponding callbr must have intrinsic's parent basic "
6723 "block in indirect destination list",
6724 &Call);
6725 const Instruction &First = *LandingPadBB->begin();
6726 Check(&First == &Call, "No other instructions may proceed intrinsic",
6727 &Call);
6728 break;
6729 }
6730 case Intrinsic::amdgcn_cs_chain: {
6731 auto CallerCC = Call.getCaller()->getCallingConv();
6732 switch (CallerCC) {
6733 case CallingConv::AMDGPU_CS:
6734 case CallingConv::AMDGPU_CS_Chain:
6735 case CallingConv::AMDGPU_CS_ChainPreserve:
6736 break;
6737 default:
6738 CheckFailed("Intrinsic can only be used from functions with the "
6739 "amdgpu_cs, amdgpu_cs_chain or amdgpu_cs_chain_preserve "
6740 "calling conventions",
6741 &Call);
6742 break;
6743 }
6744
6745 Check(Call.paramHasAttr(2, Attribute::InReg),
6746 "SGPR arguments must have the `inreg` attribute", &Call);
6747 Check(!Call.paramHasAttr(3, Attribute::InReg),
6748 "VGPR arguments must not have the `inreg` attribute", &Call);
6749
6750 auto *Next = Call.getNextNode();
6751 bool IsAMDUnreachable = Next && isa<IntrinsicInst>(Next) &&
6752 cast<IntrinsicInst>(Next)->getIntrinsicID() ==
6753 Intrinsic::amdgcn_unreachable;
6754 Check(Next && (isa<UnreachableInst>(Next) || IsAMDUnreachable),
6755 "llvm.amdgcn.cs.chain must be followed by unreachable", &Call);
6756 break;
6757 }
6758 case Intrinsic::amdgcn_init_exec_from_input: {
6759 const Argument *Arg = dyn_cast<Argument>(Call.getOperand(0));
6760 Check(Arg && Arg->hasInRegAttr(),
6761 "only inreg arguments to the parent function are valid as inputs to "
6762 "this intrinsic",
6763 &Call);
6764 break;
6765 }
6766 case Intrinsic::amdgcn_set_inactive_chain_arg: {
6767 auto CallerCC = Call.getCaller()->getCallingConv();
6768 switch (CallerCC) {
6769 case CallingConv::AMDGPU_CS_Chain:
6770 case CallingConv::AMDGPU_CS_ChainPreserve:
6771 break;
6772 default:
6773 CheckFailed("Intrinsic can only be used from functions with the "
6774 "amdgpu_cs_chain or amdgpu_cs_chain_preserve "
6775 "calling conventions",
6776 &Call);
6777 break;
6778 }
6779
6780 unsigned InactiveIdx = 1;
6781 Check(!Call.paramHasAttr(InactiveIdx, Attribute::InReg),
6782 "Value for inactive lanes must not have the `inreg` attribute",
6783 &Call);
6784 Check(isa<Argument>(Call.getArgOperand(InactiveIdx)),
6785 "Value for inactive lanes must be a function argument", &Call);
6786 Check(!cast<Argument>(Call.getArgOperand(InactiveIdx))->hasInRegAttr(),
6787 "Value for inactive lanes must be a VGPR function argument", &Call);
6788 break;
6789 }
6790 case Intrinsic::amdgcn_call_whole_wave: {
6792 Check(F, "Indirect whole wave calls are not allowed", &Call);
6793
6794 CallingConv::ID CC = F->getCallingConv();
6795 Check(CC == CallingConv::AMDGPU_Gfx_WholeWave,
6796 "Callee must have the amdgpu_gfx_whole_wave calling convention",
6797 &Call);
6798
6799 Check(!F->isVarArg(), "Variadic whole wave calls are not allowed", &Call);
6800
6801 Check(Call.arg_size() == F->arg_size(),
6802 "Call argument count must match callee argument count", &Call);
6803
6804 // The first argument of the call is the callee, and the first argument of
6805 // the callee is the active mask. The rest of the arguments must match.
6806 Check(F->arg_begin()->getType()->isIntegerTy(1),
6807 "Callee must have i1 as its first argument", &Call);
6808 for (auto [CallArg, FuncArg] :
6809 drop_begin(zip_equal(Call.args(), F->args()))) {
6810 Check(CallArg->getType() == FuncArg.getType(),
6811 "Argument types must match", &Call);
6812
6813 // Check that inreg attributes match between call site and function
6814 Check(Call.paramHasAttr(FuncArg.getArgNo(), Attribute::InReg) ==
6815 FuncArg.hasInRegAttr(),
6816 "Argument inreg attributes must match", &Call);
6817 }
6818 break;
6819 }
6820 case Intrinsic::amdgcn_s_prefetch_data: {
6821 Check(
6824 "llvm.amdgcn.s.prefetch.data only supports global or constant memory");
6825 break;
6826 }
6827 case Intrinsic::amdgcn_mfma_scale_f32_16x16x128_f8f6f4:
6828 case Intrinsic::amdgcn_mfma_scale_f32_32x32x64_f8f6f4: {
6829 Value *Src0 = Call.getArgOperand(0);
6830 Value *Src1 = Call.getArgOperand(1);
6831
6832 uint64_t CBSZ = cast<ConstantInt>(Call.getArgOperand(3))->getZExtValue();
6833 uint64_t BLGP = cast<ConstantInt>(Call.getArgOperand(4))->getZExtValue();
6834 Check(CBSZ <= 4, "invalid value for cbsz format", Call,
6835 Call.getArgOperand(3));
6836 Check(BLGP <= 4, "invalid value for blgp format", Call,
6837 Call.getArgOperand(4));
6838
6839 // AMDGPU::MFMAScaleFormats values
6840 auto getFormatNumRegs = [](unsigned FormatVal) {
6841 switch (FormatVal) {
6842 case 0:
6843 case 1:
6844 return 8u;
6845 case 2:
6846 case 3:
6847 return 6u;
6848 case 4:
6849 return 4u;
6850 default:
6851 llvm_unreachable("invalid format value");
6852 }
6853 };
6854
6855 auto isValidSrcASrcBVector = [](FixedVectorType *Ty) {
6856 if (!Ty || !Ty->getElementType()->isIntegerTy(32))
6857 return false;
6858 unsigned NumElts = Ty->getNumElements();
6859 return NumElts == 4 || NumElts == 6 || NumElts == 8;
6860 };
6861
6862 auto *Src0Ty = dyn_cast<FixedVectorType>(Src0->getType());
6863 auto *Src1Ty = dyn_cast<FixedVectorType>(Src1->getType());
6864 Check(isValidSrcASrcBVector(Src0Ty),
6865 "operand 0 must be 4, 6 or 8 element i32 vector", &Call, Src0);
6866 Check(isValidSrcASrcBVector(Src1Ty),
6867 "operand 1 must be 4, 6 or 8 element i32 vector", &Call, Src1);
6868
6869 // Permit excess registers for the format.
6870 Check(Src0Ty->getNumElements() >= getFormatNumRegs(CBSZ),
6871 "invalid vector type for format", &Call, Src0, Call.getArgOperand(3));
6872 Check(Src1Ty->getNumElements() >= getFormatNumRegs(BLGP),
6873 "invalid vector type for format", &Call, Src1, Call.getArgOperand(5));
6874 break;
6875 }
6876 case Intrinsic::amdgcn_wmma_f32_16x16x128_f8f6f4:
6877 case Intrinsic::amdgcn_wmma_scale_f32_16x16x128_f8f6f4:
6878 case Intrinsic::amdgcn_wmma_scale16_f32_16x16x128_f8f6f4: {
6879 Value *Src0 = Call.getArgOperand(1);
6880 Value *Src1 = Call.getArgOperand(3);
6881
6882 unsigned FmtA = cast<ConstantInt>(Call.getArgOperand(0))->getZExtValue();
6883 unsigned FmtB = cast<ConstantInt>(Call.getArgOperand(2))->getZExtValue();
6884 Check(FmtA <= 4, "invalid value for matrix format", Call,
6885 Call.getArgOperand(0));
6886 Check(FmtB <= 4, "invalid value for matrix format", Call,
6887 Call.getArgOperand(2));
6888
6889 // AMDGPU::MatrixFMT values
6890 auto getFormatNumRegs = [](unsigned FormatVal) {
6891 switch (FormatVal) {
6892 case 0:
6893 case 1:
6894 return 16u;
6895 case 2:
6896 case 3:
6897 return 12u;
6898 case 4:
6899 return 8u;
6900 default:
6901 llvm_unreachable("invalid format value");
6902 }
6903 };
6904
6905 auto isValidSrcASrcBVector = [](FixedVectorType *Ty) {
6906 if (!Ty || !Ty->getElementType()->isIntegerTy(32))
6907 return false;
6908 unsigned NumElts = Ty->getNumElements();
6909 return NumElts == 16 || NumElts == 12 || NumElts == 8;
6910 };
6911
6912 auto *Src0Ty = dyn_cast<FixedVectorType>(Src0->getType());
6913 auto *Src1Ty = dyn_cast<FixedVectorType>(Src1->getType());
6914 Check(isValidSrcASrcBVector(Src0Ty),
6915 "operand 1 must be 8, 12 or 16 element i32 vector", &Call, Src0);
6916 Check(isValidSrcASrcBVector(Src1Ty),
6917 "operand 3 must be 8, 12 or 16 element i32 vector", &Call, Src1);
6918
6919 // Permit excess registers for the format.
6920 Check(Src0Ty->getNumElements() >= getFormatNumRegs(FmtA),
6921 "invalid vector type for format", &Call, Src0, Call.getArgOperand(0));
6922 Check(Src1Ty->getNumElements() >= getFormatNumRegs(FmtB),
6923 "invalid vector type for format", &Call, Src1, Call.getArgOperand(2));
6924 break;
6925 }
6926 case Intrinsic::amdgcn_cooperative_atomic_load_32x4B:
6927 case Intrinsic::amdgcn_cooperative_atomic_load_16x8B:
6928 case Intrinsic::amdgcn_cooperative_atomic_load_8x16B:
6929 case Intrinsic::amdgcn_cooperative_atomic_store_32x4B:
6930 case Intrinsic::amdgcn_cooperative_atomic_store_16x8B:
6931 case Intrinsic::amdgcn_cooperative_atomic_store_8x16B: {
6932 // Check we only use this intrinsic on the FLAT or GLOBAL address spaces.
6933 Value *PtrArg = Call.getArgOperand(0);
6934 const unsigned AS = PtrArg->getType()->getPointerAddressSpace();
6936 "cooperative atomic intrinsics require a generic or global pointer",
6937 &Call, PtrArg);
6938
6939 // Last argument must be a MD string
6941 MDNode *MD = cast<MDNode>(Op->getMetadata());
6942 Check((MD->getNumOperands() == 1) && isa<MDString>(MD->getOperand(0)),
6943 "cooperative atomic intrinsics require that the last argument is a "
6944 "metadata string",
6945 &Call, Op);
6946 break;
6947 }
6948 case Intrinsic::nvvm_setmaxnreg_inc_sync_aligned_u32:
6949 case Intrinsic::nvvm_setmaxnreg_dec_sync_aligned_u32: {
6950 Value *V = Call.getArgOperand(0);
6951 unsigned RegCount = cast<ConstantInt>(V)->getZExtValue();
6952 Check(RegCount % 8 == 0,
6953 "reg_count argument to nvvm.setmaxnreg must be in multiples of 8");
6954 break;
6955 }
6956 case Intrinsic::experimental_convergence_entry:
6957 case Intrinsic::experimental_convergence_anchor:
6958 break;
6959 case Intrinsic::experimental_convergence_loop:
6960 break;
6961 case Intrinsic::ptrmask: {
6962 Type *Ty0 = Call.getArgOperand(0)->getType();
6963 Type *Ty1 = Call.getArgOperand(1)->getType();
6965 "llvm.ptrmask intrinsic first argument must be pointer or vector "
6966 "of pointers",
6967 &Call);
6968 Check(
6969 Ty0->isVectorTy() == Ty1->isVectorTy(),
6970 "llvm.ptrmask intrinsic arguments must be both scalars or both vectors",
6971 &Call);
6972 if (Ty0->isVectorTy())
6973 Check(cast<VectorType>(Ty0)->getElementCount() ==
6974 cast<VectorType>(Ty1)->getElementCount(),
6975 "llvm.ptrmask intrinsic arguments must have the same number of "
6976 "elements",
6977 &Call);
6978 Check(DL.getIndexTypeSizeInBits(Ty0) == Ty1->getScalarSizeInBits(),
6979 "llvm.ptrmask intrinsic second argument bitwidth must match "
6980 "pointer index type size of first argument",
6981 &Call);
6982 break;
6983 }
6984 case Intrinsic::thread_pointer: {
6986 DL.getDefaultGlobalsAddressSpace(),
6987 "llvm.thread.pointer intrinsic return type must be for the globals "
6988 "address space",
6989 &Call);
6990 break;
6991 }
6992 case Intrinsic::threadlocal_address: {
6993 const Value &Arg0 = *Call.getArgOperand(0);
6994 Check(isa<GlobalValue>(Arg0),
6995 "llvm.threadlocal.address first argument must be a GlobalValue");
6996 Check(cast<GlobalValue>(Arg0).isThreadLocal(),
6997 "llvm.threadlocal.address operand isThreadLocal() must be true");
6998 break;
6999 }
7000 case Intrinsic::lifetime_start:
7001 case Intrinsic::lifetime_end: {
7002 Value *Ptr = Call.getArgOperand(0);
7004 "llvm.lifetime.start/end can only be used on alloca or poison",
7005 &Call);
7006 break;
7007 }
7008 };
7009
7010 // Verify that there aren't any unmediated control transfers between funclets.
7012 Function *F = Call.getParent()->getParent();
7013 if (F->hasPersonalityFn() &&
7014 isScopedEHPersonality(classifyEHPersonality(F->getPersonalityFn()))) {
7015 // Run EH funclet coloring on-demand and cache results for other intrinsic
7016 // calls in this function
7017 if (BlockEHFuncletColors.empty())
7018 BlockEHFuncletColors = colorEHFunclets(*F);
7019
7020 // Check for catch-/cleanup-pad in first funclet block
7021 bool InEHFunclet = false;
7022 BasicBlock *CallBB = Call.getParent();
7023 const ColorVector &CV = BlockEHFuncletColors.find(CallBB)->second;
7024 assert(CV.size() > 0 && "Uncolored block");
7025 for (BasicBlock *ColorFirstBB : CV)
7026 if (auto It = ColorFirstBB->getFirstNonPHIIt();
7027 It != ColorFirstBB->end())
7029 InEHFunclet = true;
7030
7031 // Check for funclet operand bundle
7032 bool HasToken = false;
7033 for (unsigned I = 0, E = Call.getNumOperandBundles(); I != E; ++I)
7035 HasToken = true;
7036
7037 // This would cause silent code truncation in WinEHPrepare
7038 if (InEHFunclet)
7039 Check(HasToken, "Missing funclet token on intrinsic call", &Call);
7040 }
7041 }
7042}
7043
7044/// Carefully grab the subprogram from a local scope.
7045///
7046/// This carefully grabs the subprogram from a local scope, avoiding the
7047/// built-in assertions that would typically fire.
7049 if (!LocalScope)
7050 return nullptr;
7051
7052 if (auto *SP = dyn_cast<DISubprogram>(LocalScope))
7053 return SP;
7054
7055 if (auto *LB = dyn_cast<DILexicalBlockBase>(LocalScope))
7056 return getSubprogram(LB->getRawScope());
7057
7058 // Just return null; broken scope chains are checked elsewhere.
7059 assert(!isa<DILocalScope>(LocalScope) && "Unknown type of local scope");
7060 return nullptr;
7061}
7062
7063void Verifier::visit(DbgLabelRecord &DLR) {
7065 "invalid #dbg_label intrinsic variable", &DLR, DLR.getRawLabel());
7066
7067 // Ignore broken !dbg attachments; they're checked elsewhere.
7068 if (MDNode *N = DLR.getDebugLoc().getAsMDNode())
7069 if (!isa<DILocation>(N))
7070 return;
7071
7072 BasicBlock *BB = DLR.getParent();
7073 Function *F = BB ? BB->getParent() : nullptr;
7074
7075 // The scopes for variables and !dbg attachments must agree.
7076 DILabel *Label = DLR.getLabel();
7077 DILocation *Loc = DLR.getDebugLoc();
7078 CheckDI(Loc, "#dbg_label record requires a !dbg attachment", &DLR, BB, F);
7079
7080 DISubprogram *LabelSP = getSubprogram(Label->getRawScope());
7081 DISubprogram *LocSP = getSubprogram(Loc->getRawScope());
7082 if (!LabelSP || !LocSP)
7083 return;
7084
7085 CheckDI(LabelSP == LocSP,
7086 "mismatched subprogram between #dbg_label label and !dbg attachment",
7087 &DLR, BB, F, Label, Label->getScope()->getSubprogram(), Loc,
7088 Loc->getScope()->getSubprogram());
7089}
7090
7091void Verifier::visit(DbgVariableRecord &DVR) {
7092 BasicBlock *BB = DVR.getParent();
7093 Function *F = BB->getParent();
7094
7095 CheckDI(DVR.getType() == DbgVariableRecord::LocationType::Value ||
7096 DVR.getType() == DbgVariableRecord::LocationType::Declare ||
7097 DVR.getType() == DbgVariableRecord::LocationType::DeclareValue ||
7098 DVR.getType() == DbgVariableRecord::LocationType::Assign,
7099 "invalid #dbg record type", &DVR, DVR.getType(), BB, F);
7100
7101 // The location for a DbgVariableRecord must be either a ValueAsMetadata,
7102 // DIArgList, or an empty MDNode (which is a legacy representation for an
7103 // "undef" location).
7104 auto *MD = DVR.getRawLocation();
7105 CheckDI(MD && (isa<ValueAsMetadata>(MD) || isa<DIArgList>(MD) ||
7106 (isa<MDNode>(MD) && !cast<MDNode>(MD)->getNumOperands())),
7107 "invalid #dbg record address/value", &DVR, MD, BB, F);
7108 if (auto *VAM = dyn_cast<ValueAsMetadata>(MD)) {
7109 visitValueAsMetadata(*VAM, F);
7110 if (DVR.isDbgDeclare()) {
7111 // Allow integers here to support inttoptr salvage.
7112 Type *Ty = VAM->getValue()->getType();
7113 CheckDI(Ty->isPointerTy() || Ty->isIntegerTy(),
7114 "location of #dbg_declare must be a pointer or int", &DVR, MD, BB,
7115 F);
7116 }
7117 } else if (auto *AL = dyn_cast<DIArgList>(MD)) {
7118 visitDIArgList(*AL, F);
7119 }
7120
7122 "invalid #dbg record variable", &DVR, DVR.getRawVariable(), BB, F);
7123 visitMDNode(*DVR.getRawVariable(), AreDebugLocsAllowed::No);
7124
7126 "invalid #dbg record expression", &DVR, DVR.getRawExpression(), BB,
7127 F);
7128 visitMDNode(*DVR.getExpression(), AreDebugLocsAllowed::No);
7129
7130 if (DVR.isDbgAssign()) {
7132 "invalid #dbg_assign DIAssignID", &DVR, DVR.getRawAssignID(), BB,
7133 F);
7134 visitMDNode(*cast<DIAssignID>(DVR.getRawAssignID()),
7135 AreDebugLocsAllowed::No);
7136
7137 const auto *RawAddr = DVR.getRawAddress();
7138 // Similarly to the location above, the address for an assign
7139 // DbgVariableRecord must be a ValueAsMetadata or an empty MDNode, which
7140 // represents an undef address.
7141 CheckDI(
7142 isa<ValueAsMetadata>(RawAddr) ||
7143 (isa<MDNode>(RawAddr) && !cast<MDNode>(RawAddr)->getNumOperands()),
7144 "invalid #dbg_assign address", &DVR, DVR.getRawAddress(), BB, F);
7145 if (auto *VAM = dyn_cast<ValueAsMetadata>(RawAddr))
7146 visitValueAsMetadata(*VAM, F);
7147
7149 "invalid #dbg_assign address expression", &DVR,
7150 DVR.getRawAddressExpression(), BB, F);
7151 visitMDNode(*DVR.getAddressExpression(), AreDebugLocsAllowed::No);
7152
7153 // All of the linked instructions should be in the same function as DVR.
7154 for (Instruction *I : at::getAssignmentInsts(&DVR))
7155 CheckDI(DVR.getFunction() == I->getFunction(),
7156 "inst not in same function as #dbg_assign", I, &DVR, BB, F);
7157 }
7158
7159 // This check is redundant with one in visitLocalVariable().
7160 DILocalVariable *Var = DVR.getVariable();
7161 CheckDI(isType(Var->getRawType()), "invalid type ref", Var, Var->getRawType(),
7162 BB, F);
7163
7164 auto *DLNode = DVR.getDebugLoc().getAsMDNode();
7165 CheckDI(isa_and_nonnull<DILocation>(DLNode), "invalid #dbg record DILocation",
7166 &DVR, DLNode, BB, F);
7167 DILocation *Loc = DVR.getDebugLoc();
7168
7169 // The scopes for variables and !dbg attachments must agree.
7170 DISubprogram *VarSP = getSubprogram(Var->getRawScope());
7171 DISubprogram *LocSP = getSubprogram(Loc->getRawScope());
7172 if (!VarSP || !LocSP)
7173 return; // Broken scope chains are checked elsewhere.
7174
7175 CheckDI(VarSP == LocSP,
7176 "mismatched subprogram between #dbg record variable and DILocation",
7177 &DVR, BB, F, Var, Var->getScope()->getSubprogram(), Loc,
7178 Loc->getScope()->getSubprogram(), BB, F);
7179
7180 verifyFnArgs(DVR);
7181}
7182
7183void Verifier::visitVPIntrinsic(VPIntrinsic &VPI) {
7184 if (auto *VPCast = dyn_cast<VPCastIntrinsic>(&VPI)) {
7185 auto *RetTy = cast<VectorType>(VPCast->getType());
7186 auto *ValTy = cast<VectorType>(VPCast->getOperand(0)->getType());
7187 Check(RetTy->getElementCount() == ValTy->getElementCount(),
7188 "VP cast intrinsic first argument and result vector lengths must be "
7189 "equal",
7190 *VPCast);
7191
7192 switch (VPCast->getIntrinsicID()) {
7193 default:
7194 llvm_unreachable("Unknown VP cast intrinsic");
7195 case Intrinsic::vp_trunc:
7196 Check(RetTy->isIntOrIntVectorTy() && ValTy->isIntOrIntVectorTy(),
7197 "llvm.vp.trunc intrinsic first argument and result element type "
7198 "must be integer",
7199 *VPCast);
7200 Check(RetTy->getScalarSizeInBits() < ValTy->getScalarSizeInBits(),
7201 "llvm.vp.trunc intrinsic the bit size of first argument must be "
7202 "larger than the bit size of the return type",
7203 *VPCast);
7204 break;
7205 case Intrinsic::vp_zext:
7206 case Intrinsic::vp_sext:
7207 Check(RetTy->isIntOrIntVectorTy() && ValTy->isIntOrIntVectorTy(),
7208 "llvm.vp.zext or llvm.vp.sext intrinsic first argument and result "
7209 "element type must be integer",
7210 *VPCast);
7211 Check(RetTy->getScalarSizeInBits() > ValTy->getScalarSizeInBits(),
7212 "llvm.vp.zext or llvm.vp.sext intrinsic the bit size of first "
7213 "argument must be smaller than the bit size of the return type",
7214 *VPCast);
7215 break;
7216 case Intrinsic::vp_fptoui:
7217 case Intrinsic::vp_fptosi:
7218 case Intrinsic::vp_lrint:
7219 case Intrinsic::vp_llrint:
7220 Check(
7221 RetTy->isIntOrIntVectorTy() && ValTy->isFPOrFPVectorTy(),
7222 "llvm.vp.fptoui, llvm.vp.fptosi, llvm.vp.lrint or llvm.vp.llrint" "intrinsic first argument element "
7223 "type must be floating-point and result element type must be integer",
7224 *VPCast);
7225 break;
7226 case Intrinsic::vp_uitofp:
7227 case Intrinsic::vp_sitofp:
7228 Check(
7229 RetTy->isFPOrFPVectorTy() && ValTy->isIntOrIntVectorTy(),
7230 "llvm.vp.uitofp or llvm.vp.sitofp intrinsic first argument element "
7231 "type must be integer and result element type must be floating-point",
7232 *VPCast);
7233 break;
7234 case Intrinsic::vp_fptrunc:
7235 Check(RetTy->isFPOrFPVectorTy() && ValTy->isFPOrFPVectorTy(),
7236 "llvm.vp.fptrunc intrinsic first argument and result element type "
7237 "must be floating-point",
7238 *VPCast);
7239 Check(RetTy->getScalarSizeInBits() < ValTy->getScalarSizeInBits(),
7240 "llvm.vp.fptrunc intrinsic the bit size of first argument must be "
7241 "larger than the bit size of the return type",
7242 *VPCast);
7243 break;
7244 case Intrinsic::vp_fpext:
7245 Check(RetTy->isFPOrFPVectorTy() && ValTy->isFPOrFPVectorTy(),
7246 "llvm.vp.fpext intrinsic first argument and result element type "
7247 "must be floating-point",
7248 *VPCast);
7249 Check(RetTy->getScalarSizeInBits() > ValTy->getScalarSizeInBits(),
7250 "llvm.vp.fpext intrinsic the bit size of first argument must be "
7251 "smaller than the bit size of the return type",
7252 *VPCast);
7253 break;
7254 case Intrinsic::vp_ptrtoint:
7255 Check(RetTy->isIntOrIntVectorTy() && ValTy->isPtrOrPtrVectorTy(),
7256 "llvm.vp.ptrtoint intrinsic first argument element type must be "
7257 "pointer and result element type must be integer",
7258 *VPCast);
7259 break;
7260 case Intrinsic::vp_inttoptr:
7261 Check(RetTy->isPtrOrPtrVectorTy() && ValTy->isIntOrIntVectorTy(),
7262 "llvm.vp.inttoptr intrinsic first argument element type must be "
7263 "integer and result element type must be pointer",
7264 *VPCast);
7265 break;
7266 }
7267 }
7268
7269 switch (VPI.getIntrinsicID()) {
7270 case Intrinsic::vp_fcmp: {
7271 auto Pred = cast<VPCmpIntrinsic>(&VPI)->getPredicate();
7273 "invalid predicate for VP FP comparison intrinsic", &VPI);
7274 break;
7275 }
7276 case Intrinsic::vp_icmp: {
7277 auto Pred = cast<VPCmpIntrinsic>(&VPI)->getPredicate();
7279 "invalid predicate for VP integer comparison intrinsic", &VPI);
7280 break;
7281 }
7282 case Intrinsic::vp_is_fpclass: {
7283 auto TestMask = cast<ConstantInt>(VPI.getOperand(1));
7284 Check((TestMask->getZExtValue() & ~static_cast<unsigned>(fcAllFlags)) == 0,
7285 "unsupported bits for llvm.vp.is.fpclass test mask");
7286 break;
7287 }
7288 case Intrinsic::experimental_vp_splice: {
7289 VectorType *VecTy = cast<VectorType>(VPI.getType());
7290 int64_t Idx = cast<ConstantInt>(VPI.getArgOperand(2))->getSExtValue();
7291 int64_t KnownMinNumElements = VecTy->getElementCount().getKnownMinValue();
7292 if (VPI.getParent() && VPI.getParent()->getParent()) {
7293 AttributeList Attrs = VPI.getParent()->getParent()->getAttributes();
7294 if (Attrs.hasFnAttr(Attribute::VScaleRange))
7295 KnownMinNumElements *= Attrs.getFnAttrs().getVScaleRangeMin();
7296 }
7297 Check((Idx < 0 && std::abs(Idx) <= KnownMinNumElements) ||
7298 (Idx >= 0 && Idx < KnownMinNumElements),
7299 "The splice index exceeds the range [-VL, VL-1] where VL is the "
7300 "known minimum number of elements in the vector. For scalable "
7301 "vectors the minimum number of elements is determined from "
7302 "vscale_range.",
7303 &VPI);
7304 break;
7305 }
7306 }
7307}
7308
7309void Verifier::visitConstrainedFPIntrinsic(ConstrainedFPIntrinsic &FPI) {
7310 unsigned NumOperands = FPI.getNonMetadataArgCount();
7311 bool HasRoundingMD =
7313
7314 // Add the expected number of metadata operands.
7315 NumOperands += (1 + HasRoundingMD);
7316
7317 // Compare intrinsics carry an extra predicate metadata operand.
7319 NumOperands += 1;
7320 Check((FPI.arg_size() == NumOperands),
7321 "invalid arguments for constrained FP intrinsic", &FPI);
7322
7323 switch (FPI.getIntrinsicID()) {
7324 case Intrinsic::experimental_constrained_lrint:
7325 case Intrinsic::experimental_constrained_llrint: {
7326 Type *ValTy = FPI.getArgOperand(0)->getType();
7327 Type *ResultTy = FPI.getType();
7328 Check(!ValTy->isVectorTy() && !ResultTy->isVectorTy(),
7329 "Intrinsic does not support vectors", &FPI);
7330 break;
7331 }
7332
7333 case Intrinsic::experimental_constrained_lround:
7334 case Intrinsic::experimental_constrained_llround: {
7335 Type *ValTy = FPI.getArgOperand(0)->getType();
7336 Type *ResultTy = FPI.getType();
7337 Check(!ValTy->isVectorTy() && !ResultTy->isVectorTy(),
7338 "Intrinsic does not support vectors", &FPI);
7339 break;
7340 }
7341
7342 case Intrinsic::experimental_constrained_fcmp:
7343 case Intrinsic::experimental_constrained_fcmps: {
7344 auto Pred = cast<ConstrainedFPCmpIntrinsic>(&FPI)->getPredicate();
7346 "invalid predicate for constrained FP comparison intrinsic", &FPI);
7347 break;
7348 }
7349
7350 case Intrinsic::experimental_constrained_fptosi:
7351 case Intrinsic::experimental_constrained_fptoui: {
7352 Value *Operand = FPI.getArgOperand(0);
7353 ElementCount SrcEC;
7354 Check(Operand->getType()->isFPOrFPVectorTy(),
7355 "Intrinsic first argument must be floating point", &FPI);
7356 if (auto *OperandT = dyn_cast<VectorType>(Operand->getType())) {
7357 SrcEC = cast<VectorType>(OperandT)->getElementCount();
7358 }
7359
7360 Operand = &FPI;
7361 Check(SrcEC.isNonZero() == Operand->getType()->isVectorTy(),
7362 "Intrinsic first argument and result disagree on vector use", &FPI);
7363 Check(Operand->getType()->isIntOrIntVectorTy(),
7364 "Intrinsic result must be an integer", &FPI);
7365 if (auto *OperandT = dyn_cast<VectorType>(Operand->getType())) {
7366 Check(SrcEC == cast<VectorType>(OperandT)->getElementCount(),
7367 "Intrinsic first argument and result vector lengths must be equal",
7368 &FPI);
7369 }
7370 break;
7371 }
7372
7373 case Intrinsic::experimental_constrained_sitofp:
7374 case Intrinsic::experimental_constrained_uitofp: {
7375 Value *Operand = FPI.getArgOperand(0);
7376 ElementCount SrcEC;
7377 Check(Operand->getType()->isIntOrIntVectorTy(),
7378 "Intrinsic first argument must be integer", &FPI);
7379 if (auto *OperandT = dyn_cast<VectorType>(Operand->getType())) {
7380 SrcEC = cast<VectorType>(OperandT)->getElementCount();
7381 }
7382
7383 Operand = &FPI;
7384 Check(SrcEC.isNonZero() == Operand->getType()->isVectorTy(),
7385 "Intrinsic first argument and result disagree on vector use", &FPI);
7386 Check(Operand->getType()->isFPOrFPVectorTy(),
7387 "Intrinsic result must be a floating point", &FPI);
7388 if (auto *OperandT = dyn_cast<VectorType>(Operand->getType())) {
7389 Check(SrcEC == cast<VectorType>(OperandT)->getElementCount(),
7390 "Intrinsic first argument and result vector lengths must be equal",
7391 &FPI);
7392 }
7393 break;
7394 }
7395
7396 case Intrinsic::experimental_constrained_fptrunc:
7397 case Intrinsic::experimental_constrained_fpext: {
7398 Value *Operand = FPI.getArgOperand(0);
7399 Type *OperandTy = Operand->getType();
7400 Value *Result = &FPI;
7401 Type *ResultTy = Result->getType();
7402 Check(OperandTy->isFPOrFPVectorTy(),
7403 "Intrinsic first argument must be FP or FP vector", &FPI);
7404 Check(ResultTy->isFPOrFPVectorTy(),
7405 "Intrinsic result must be FP or FP vector", &FPI);
7406 Check(OperandTy->isVectorTy() == ResultTy->isVectorTy(),
7407 "Intrinsic first argument and result disagree on vector use", &FPI);
7408 if (OperandTy->isVectorTy()) {
7409 Check(cast<VectorType>(OperandTy)->getElementCount() ==
7410 cast<VectorType>(ResultTy)->getElementCount(),
7411 "Intrinsic first argument and result vector lengths must be equal",
7412 &FPI);
7413 }
7414 if (FPI.getIntrinsicID() == Intrinsic::experimental_constrained_fptrunc) {
7415 Check(OperandTy->getScalarSizeInBits() > ResultTy->getScalarSizeInBits(),
7416 "Intrinsic first argument's type must be larger than result type",
7417 &FPI);
7418 } else {
7419 Check(OperandTy->getScalarSizeInBits() < ResultTy->getScalarSizeInBits(),
7420 "Intrinsic first argument's type must be smaller than result type",
7421 &FPI);
7422 }
7423 break;
7424 }
7425
7426 default:
7427 break;
7428 }
7429
7430 // If a non-metadata argument is passed in a metadata slot then the
7431 // error will be caught earlier when the incorrect argument doesn't
7432 // match the specification in the intrinsic call table. Thus, no
7433 // argument type check is needed here.
7434
7435 Check(FPI.getExceptionBehavior().has_value(),
7436 "invalid exception behavior argument", &FPI);
7437 if (HasRoundingMD) {
7438 Check(FPI.getRoundingMode().has_value(), "invalid rounding mode argument",
7439 &FPI);
7440 }
7441}
7442
7443void Verifier::verifyFragmentExpression(const DbgVariableRecord &DVR) {
7444 DILocalVariable *V = dyn_cast_or_null<DILocalVariable>(DVR.getRawVariable());
7445 DIExpression *E = dyn_cast_or_null<DIExpression>(DVR.getRawExpression());
7446
7447 // We don't know whether this intrinsic verified correctly.
7448 if (!V || !E || !E->isValid())
7449 return;
7450
7451 // Nothing to do if this isn't a DW_OP_LLVM_fragment expression.
7452 auto Fragment = E->getFragmentInfo();
7453 if (!Fragment)
7454 return;
7455
7456 // The frontend helps out GDB by emitting the members of local anonymous
7457 // unions as artificial local variables with shared storage. When SROA splits
7458 // the storage for artificial local variables that are smaller than the entire
7459 // union, the overhang piece will be outside of the allotted space for the
7460 // variable and this check fails.
7461 // FIXME: Remove this check as soon as clang stops doing this; it hides bugs.
7462 if (V->isArtificial())
7463 return;
7464
7465 verifyFragmentExpression(*V, *Fragment, &DVR);
7466}
7467
7468template <typename ValueOrMetadata>
7469void Verifier::verifyFragmentExpression(const DIVariable &V,
7471 ValueOrMetadata *Desc) {
7472 // If there's no size, the type is broken, but that should be checked
7473 // elsewhere.
7474 auto VarSize = V.getSizeInBits();
7475 if (!VarSize)
7476 return;
7477
7478 unsigned FragSize = Fragment.SizeInBits;
7479 unsigned FragOffset = Fragment.OffsetInBits;
7480 CheckDI(FragSize + FragOffset <= *VarSize,
7481 "fragment is larger than or outside of variable", Desc, &V);
7482 CheckDI(FragSize != *VarSize, "fragment covers entire variable", Desc, &V);
7483}
7484
7485void Verifier::verifyFnArgs(const DbgVariableRecord &DVR) {
7486 // This function does not take the scope of noninlined function arguments into
7487 // account. Don't run it if current function is nodebug, because it may
7488 // contain inlined debug intrinsics.
7489 if (!HasDebugInfo)
7490 return;
7491
7492 // For performance reasons only check non-inlined ones.
7493 if (DVR.getDebugLoc()->getInlinedAt())
7494 return;
7495
7496 DILocalVariable *Var = DVR.getVariable();
7497 CheckDI(Var, "#dbg record without variable");
7498
7499 unsigned ArgNo = Var->getArg();
7500 if (!ArgNo)
7501 return;
7502
7503 // Verify there are no duplicate function argument debug info entries.
7504 // These will cause hard-to-debug assertions in the DWARF backend.
7505 if (DebugFnArgs.size() < ArgNo)
7506 DebugFnArgs.resize(ArgNo, nullptr);
7507
7508 auto *Prev = DebugFnArgs[ArgNo - 1];
7509 DebugFnArgs[ArgNo - 1] = Var;
7510 CheckDI(!Prev || (Prev == Var), "conflicting debug info for argument", &DVR,
7511 Prev, Var);
7512}
7513
7514void Verifier::verifyNotEntryValue(const DbgVariableRecord &DVR) {
7515 DIExpression *E = dyn_cast_or_null<DIExpression>(DVR.getRawExpression());
7516
7517 // We don't know whether this intrinsic verified correctly.
7518 if (!E || !E->isValid())
7519 return;
7520
7522 Value *VarValue = DVR.getVariableLocationOp(0);
7523 if (isa<UndefValue>(VarValue) || isa<PoisonValue>(VarValue))
7524 return;
7525 // We allow EntryValues for swift async arguments, as they have an
7526 // ABI-guarantee to be turned into a specific register.
7527 if (auto *ArgLoc = dyn_cast_or_null<Argument>(VarValue);
7528 ArgLoc && ArgLoc->hasAttribute(Attribute::SwiftAsync))
7529 return;
7530 }
7531
7532 CheckDI(!E->isEntryValue(),
7533 "Entry values are only allowed in MIR unless they target a "
7534 "swiftasync Argument",
7535 &DVR);
7536}
7537
7538void Verifier::verifyCompileUnits() {
7539 // When more than one Module is imported into the same context, such as during
7540 // an LTO build before linking the modules, ODR type uniquing may cause types
7541 // to point to a different CU. This check does not make sense in this case.
7542 if (M.getContext().isODRUniquingDebugTypes())
7543 return;
7544 auto *CUs = M.getNamedMetadata("llvm.dbg.cu");
7545 SmallPtrSet<const Metadata *, 2> Listed;
7546 if (CUs)
7547 Listed.insert_range(CUs->operands());
7548 for (const auto *CU : CUVisited)
7549 CheckDI(Listed.count(CU), "DICompileUnit not listed in llvm.dbg.cu", CU);
7550 CUVisited.clear();
7551}
7552
7553void Verifier::verifyDeoptimizeCallingConvs() {
7554 if (DeoptimizeDeclarations.empty())
7555 return;
7556
7557 const Function *First = DeoptimizeDeclarations[0];
7558 for (const auto *F : ArrayRef(DeoptimizeDeclarations).slice(1)) {
7559 Check(First->getCallingConv() == F->getCallingConv(),
7560 "All llvm.experimental.deoptimize declarations must have the same "
7561 "calling convention",
7562 First, F);
7563 }
7564}
7565
7566void Verifier::verifyAttachedCallBundle(const CallBase &Call,
7567 const OperandBundleUse &BU) {
7568 FunctionType *FTy = Call.getFunctionType();
7569
7570 Check((FTy->getReturnType()->isPointerTy() ||
7571 (Call.doesNotReturn() && FTy->getReturnType()->isVoidTy())),
7572 "a call with operand bundle \"clang.arc.attachedcall\" must call a "
7573 "function returning a pointer or a non-returning function that has a "
7574 "void return type",
7575 Call);
7576
7577 Check(BU.Inputs.size() == 1 && isa<Function>(BU.Inputs.front()),
7578 "operand bundle \"clang.arc.attachedcall\" requires one function as "
7579 "an argument",
7580 Call);
7581
7582 auto *Fn = cast<Function>(BU.Inputs.front());
7583 Intrinsic::ID IID = Fn->getIntrinsicID();
7584
7585 if (IID) {
7586 Check((IID == Intrinsic::objc_retainAutoreleasedReturnValue ||
7587 IID == Intrinsic::objc_claimAutoreleasedReturnValue ||
7588 IID == Intrinsic::objc_unsafeClaimAutoreleasedReturnValue),
7589 "invalid function argument", Call);
7590 } else {
7591 StringRef FnName = Fn->getName();
7592 Check((FnName == "objc_retainAutoreleasedReturnValue" ||
7593 FnName == "objc_claimAutoreleasedReturnValue" ||
7594 FnName == "objc_unsafeClaimAutoreleasedReturnValue"),
7595 "invalid function argument", Call);
7596 }
7597}
7598
7599void Verifier::verifyNoAliasScopeDecl() {
7600 if (NoAliasScopeDecls.empty())
7601 return;
7602
7603 // only a single scope must be declared at a time.
7604 for (auto *II : NoAliasScopeDecls) {
7605 assert(II->getIntrinsicID() == Intrinsic::experimental_noalias_scope_decl &&
7606 "Not a llvm.experimental.noalias.scope.decl ?");
7607 const auto *ScopeListMV = dyn_cast<MetadataAsValue>(
7609 Check(ScopeListMV != nullptr,
7610 "llvm.experimental.noalias.scope.decl must have a MetadataAsValue "
7611 "argument",
7612 II);
7613
7614 const auto *ScopeListMD = dyn_cast<MDNode>(ScopeListMV->getMetadata());
7615 Check(ScopeListMD != nullptr, "!id.scope.list must point to an MDNode", II);
7616 Check(ScopeListMD->getNumOperands() == 1,
7617 "!id.scope.list must point to a list with a single scope", II);
7618 visitAliasScopeListMetadata(ScopeListMD);
7619 }
7620
7621 // Only check the domination rule when requested. Once all passes have been
7622 // adapted this option can go away.
7624 return;
7625
7626 // Now sort the intrinsics based on the scope MDNode so that declarations of
7627 // the same scopes are next to each other.
7628 auto GetScope = [](IntrinsicInst *II) {
7629 const auto *ScopeListMV = cast<MetadataAsValue>(
7631 return &cast<MDNode>(ScopeListMV->getMetadata())->getOperand(0);
7632 };
7633
7634 // We are sorting on MDNode pointers here. For valid input IR this is ok.
7635 // TODO: Sort on Metadata ID to avoid non-deterministic error messages.
7636 auto Compare = [GetScope](IntrinsicInst *Lhs, IntrinsicInst *Rhs) {
7637 return GetScope(Lhs) < GetScope(Rhs);
7638 };
7639
7640 llvm::sort(NoAliasScopeDecls, Compare);
7641
7642 // Go over the intrinsics and check that for the same scope, they are not
7643 // dominating each other.
7644 auto ItCurrent = NoAliasScopeDecls.begin();
7645 while (ItCurrent != NoAliasScopeDecls.end()) {
7646 auto CurScope = GetScope(*ItCurrent);
7647 auto ItNext = ItCurrent;
7648 do {
7649 ++ItNext;
7650 } while (ItNext != NoAliasScopeDecls.end() &&
7651 GetScope(*ItNext) == CurScope);
7652
7653 // [ItCurrent, ItNext) represents the declarations for the same scope.
7654 // Ensure they are not dominating each other.. but only if it is not too
7655 // expensive.
7656 if (ItNext - ItCurrent < 32)
7657 for (auto *I : llvm::make_range(ItCurrent, ItNext))
7658 for (auto *J : llvm::make_range(ItCurrent, ItNext))
7659 if (I != J)
7660 Check(!DT.dominates(I, J),
7661 "llvm.experimental.noalias.scope.decl dominates another one "
7662 "with the same scope",
7663 I);
7664 ItCurrent = ItNext;
7665 }
7666}
7667
7668//===----------------------------------------------------------------------===//
7669// Implement the public interfaces to this file...
7670//===----------------------------------------------------------------------===//
7671
7673 Function &F = const_cast<Function &>(f);
7674
7675 // Don't use a raw_null_ostream. Printing IR is expensive.
7676 Verifier V(OS, /*ShouldTreatBrokenDebugInfoAsError=*/true, *f.getParent());
7677
7678 // Note that this function's return value is inverted from what you would
7679 // expect of a function called "verify".
7680 return !V.verify(F);
7681}
7682
7684 bool *BrokenDebugInfo) {
7685 // Don't use a raw_null_ostream. Printing IR is expensive.
7686 Verifier V(OS, /*ShouldTreatBrokenDebugInfoAsError=*/!BrokenDebugInfo, M);
7687
7688 bool Broken = false;
7689 for (const Function &F : M)
7690 Broken |= !V.verify(F);
7691
7692 Broken |= !V.verify();
7693 if (BrokenDebugInfo)
7694 *BrokenDebugInfo = V.hasBrokenDebugInfo();
7695 // Note that this function's return value is inverted from what you would
7696 // expect of a function called "verify".
7697 return Broken;
7698}
7699
7700namespace {
7701
7702struct VerifierLegacyPass : public FunctionPass {
7703 static char ID;
7704
7705 std::unique_ptr<Verifier> V;
7706 bool FatalErrors = true;
7707
7708 VerifierLegacyPass() : FunctionPass(ID) {
7710 }
7711 explicit VerifierLegacyPass(bool FatalErrors)
7712 : FunctionPass(ID),
7713 FatalErrors(FatalErrors) {
7715 }
7716
7717 bool doInitialization(Module &M) override {
7718 V = std::make_unique<Verifier>(
7719 &dbgs(), /*ShouldTreatBrokenDebugInfoAsError=*/false, M);
7720 return false;
7721 }
7722
7723 bool runOnFunction(Function &F) override {
7724 if (!V->verify(F) && FatalErrors) {
7725 errs() << "in function " << F.getName() << '\n';
7726 report_fatal_error("Broken function found, compilation aborted!");
7727 }
7728 return false;
7729 }
7730
7731 bool doFinalization(Module &M) override {
7732 bool HasErrors = false;
7733 for (Function &F : M)
7734 if (F.isDeclaration())
7735 HasErrors |= !V->verify(F);
7736
7737 HasErrors |= !V->verify();
7738 if (FatalErrors && (HasErrors || V->hasBrokenDebugInfo()))
7739 report_fatal_error("Broken module found, compilation aborted!");
7740 return false;
7741 }
7742
7743 void getAnalysisUsage(AnalysisUsage &AU) const override {
7744 AU.setPreservesAll();
7745 }
7746};
7747
7748} // end anonymous namespace
7749
7750/// Helper to issue failure from the TBAA verification
7751template <typename... Tys> void TBAAVerifier::CheckFailed(Tys &&... Args) {
7752 if (Diagnostic)
7753 return Diagnostic->CheckFailed(Args...);
7754}
7755
7756#define CheckTBAA(C, ...) \
7757 do { \
7758 if (!(C)) { \
7759 CheckFailed(__VA_ARGS__); \
7760 return false; \
7761 } \
7762 } while (false)
7763
7764/// Verify that \p BaseNode can be used as the "base type" in the struct-path
7765/// TBAA scheme. This means \p BaseNode is either a scalar node, or a
7766/// struct-type node describing an aggregate data structure (like a struct).
7767TBAAVerifier::TBAABaseNodeSummary
7768TBAAVerifier::verifyTBAABaseNode(const Instruction *I, const MDNode *BaseNode,
7769 bool IsNewFormat) {
7770 if (BaseNode->getNumOperands() < 2) {
7771 CheckFailed("Base nodes must have at least two operands", I, BaseNode);
7772 return {true, ~0u};
7773 }
7774
7775 auto Itr = TBAABaseNodes.find(BaseNode);
7776 if (Itr != TBAABaseNodes.end())
7777 return Itr->second;
7778
7779 auto Result = verifyTBAABaseNodeImpl(I, BaseNode, IsNewFormat);
7780 auto InsertResult = TBAABaseNodes.insert({BaseNode, Result});
7781 (void)InsertResult;
7782 assert(InsertResult.second && "We just checked!");
7783 return Result;
7784}
7785
7786TBAAVerifier::TBAABaseNodeSummary
7787TBAAVerifier::verifyTBAABaseNodeImpl(const Instruction *I,
7788 const MDNode *BaseNode, bool IsNewFormat) {
7789 const TBAAVerifier::TBAABaseNodeSummary InvalidNode = {true, ~0u};
7790
7791 if (BaseNode->getNumOperands() == 2) {
7792 // Scalar nodes can only be accessed at offset 0.
7793 return isValidScalarTBAANode(BaseNode)
7794 ? TBAAVerifier::TBAABaseNodeSummary({false, 0})
7795 : InvalidNode;
7796 }
7797
7798 if (IsNewFormat) {
7799 if (BaseNode->getNumOperands() % 3 != 0) {
7800 CheckFailed("Access tag nodes must have the number of operands that is a "
7801 "multiple of 3!", BaseNode);
7802 return InvalidNode;
7803 }
7804 } else {
7805 if (BaseNode->getNumOperands() % 2 != 1) {
7806 CheckFailed("Struct tag nodes must have an odd number of operands!",
7807 BaseNode);
7808 return InvalidNode;
7809 }
7810 }
7811
7812 // Check the type size field.
7813 if (IsNewFormat) {
7814 auto *TypeSizeNode = mdconst::dyn_extract_or_null<ConstantInt>(
7815 BaseNode->getOperand(1));
7816 if (!TypeSizeNode) {
7817 CheckFailed("Type size nodes must be constants!", I, BaseNode);
7818 return InvalidNode;
7819 }
7820 }
7821
7822 // Check the type name field. In the new format it can be anything.
7823 if (!IsNewFormat && !isa<MDString>(BaseNode->getOperand(0))) {
7824 CheckFailed("Struct tag nodes have a string as their first operand",
7825 BaseNode);
7826 return InvalidNode;
7827 }
7828
7829 bool Failed = false;
7830
7831 std::optional<APInt> PrevOffset;
7832 unsigned BitWidth = ~0u;
7833
7834 // We've already checked that BaseNode is not a degenerate root node with one
7835 // operand in \c verifyTBAABaseNode, so this loop should run at least once.
7836 unsigned FirstFieldOpNo = IsNewFormat ? 3 : 1;
7837 unsigned NumOpsPerField = IsNewFormat ? 3 : 2;
7838 for (unsigned Idx = FirstFieldOpNo; Idx < BaseNode->getNumOperands();
7839 Idx += NumOpsPerField) {
7840 const MDOperand &FieldTy = BaseNode->getOperand(Idx);
7841 const MDOperand &FieldOffset = BaseNode->getOperand(Idx + 1);
7842 if (!isa<MDNode>(FieldTy)) {
7843 CheckFailed("Incorrect field entry in struct type node!", I, BaseNode);
7844 Failed = true;
7845 continue;
7846 }
7847
7848 auto *OffsetEntryCI =
7850 if (!OffsetEntryCI) {
7851 CheckFailed("Offset entries must be constants!", I, BaseNode);
7852 Failed = true;
7853 continue;
7854 }
7855
7856 if (BitWidth == ~0u)
7857 BitWidth = OffsetEntryCI->getBitWidth();
7858
7859 if (OffsetEntryCI->getBitWidth() != BitWidth) {
7860 CheckFailed(
7861 "Bitwidth between the offsets and struct type entries must match", I,
7862 BaseNode);
7863 Failed = true;
7864 continue;
7865 }
7866
7867 // NB! As far as I can tell, we generate a non-strictly increasing offset
7868 // sequence only from structs that have zero size bit fields. When
7869 // recursing into a contained struct in \c getFieldNodeFromTBAABaseNode we
7870 // pick the field lexically the latest in struct type metadata node. This
7871 // mirrors the actual behavior of the alias analysis implementation.
7872 bool IsAscending =
7873 !PrevOffset || PrevOffset->ule(OffsetEntryCI->getValue());
7874
7875 if (!IsAscending) {
7876 CheckFailed("Offsets must be increasing!", I, BaseNode);
7877 Failed = true;
7878 }
7879
7880 PrevOffset = OffsetEntryCI->getValue();
7881
7882 if (IsNewFormat) {
7883 auto *MemberSizeNode = mdconst::dyn_extract_or_null<ConstantInt>(
7884 BaseNode->getOperand(Idx + 2));
7885 if (!MemberSizeNode) {
7886 CheckFailed("Member size entries must be constants!", I, BaseNode);
7887 Failed = true;
7888 continue;
7889 }
7890 }
7891 }
7892
7893 return Failed ? InvalidNode
7894 : TBAAVerifier::TBAABaseNodeSummary(false, BitWidth);
7895}
7896
7897static bool IsRootTBAANode(const MDNode *MD) {
7898 return MD->getNumOperands() < 2;
7899}
7900
7901static bool IsScalarTBAANodeImpl(const MDNode *MD,
7903 if (MD->getNumOperands() != 2 && MD->getNumOperands() != 3)
7904 return false;
7905
7906 if (!isa<MDString>(MD->getOperand(0)))
7907 return false;
7908
7909 if (MD->getNumOperands() == 3) {
7911 if (!(Offset && Offset->isZero() && isa<MDString>(MD->getOperand(0))))
7912 return false;
7913 }
7914
7915 auto *Parent = dyn_cast_or_null<MDNode>(MD->getOperand(1));
7916 return Parent && Visited.insert(Parent).second &&
7917 (IsRootTBAANode(Parent) || IsScalarTBAANodeImpl(Parent, Visited));
7918}
7919
7920bool TBAAVerifier::isValidScalarTBAANode(const MDNode *MD) {
7921 auto ResultIt = TBAAScalarNodes.find(MD);
7922 if (ResultIt != TBAAScalarNodes.end())
7923 return ResultIt->second;
7924
7925 SmallPtrSet<const MDNode *, 4> Visited;
7926 bool Result = IsScalarTBAANodeImpl(MD, Visited);
7927 auto InsertResult = TBAAScalarNodes.insert({MD, Result});
7928 (void)InsertResult;
7929 assert(InsertResult.second && "Just checked!");
7930
7931 return Result;
7932}
7933
7934/// Returns the field node at the offset \p Offset in \p BaseNode. Update \p
7935/// Offset in place to be the offset within the field node returned.
7936///
7937/// We assume we've okayed \p BaseNode via \c verifyTBAABaseNode.
7938MDNode *TBAAVerifier::getFieldNodeFromTBAABaseNode(const Instruction *I,
7939 const MDNode *BaseNode,
7940 APInt &Offset,
7941 bool IsNewFormat) {
7942 assert(BaseNode->getNumOperands() >= 2 && "Invalid base node!");
7943
7944 // Scalar nodes have only one possible "field" -- their parent in the access
7945 // hierarchy. Offset must be zero at this point, but our caller is supposed
7946 // to check that.
7947 if (BaseNode->getNumOperands() == 2)
7948 return cast<MDNode>(BaseNode->getOperand(1));
7949
7950 unsigned FirstFieldOpNo = IsNewFormat ? 3 : 1;
7951 unsigned NumOpsPerField = IsNewFormat ? 3 : 2;
7952 for (unsigned Idx = FirstFieldOpNo; Idx < BaseNode->getNumOperands();
7953 Idx += NumOpsPerField) {
7954 auto *OffsetEntryCI =
7955 mdconst::extract<ConstantInt>(BaseNode->getOperand(Idx + 1));
7956 if (OffsetEntryCI->getValue().ugt(Offset)) {
7957 if (Idx == FirstFieldOpNo) {
7958 CheckFailed("Could not find TBAA parent in struct type node", I,
7959 BaseNode, &Offset);
7960 return nullptr;
7961 }
7962
7963 unsigned PrevIdx = Idx - NumOpsPerField;
7964 auto *PrevOffsetEntryCI =
7965 mdconst::extract<ConstantInt>(BaseNode->getOperand(PrevIdx + 1));
7966 Offset -= PrevOffsetEntryCI->getValue();
7967 return cast<MDNode>(BaseNode->getOperand(PrevIdx));
7968 }
7969 }
7970
7971 unsigned LastIdx = BaseNode->getNumOperands() - NumOpsPerField;
7972 auto *LastOffsetEntryCI = mdconst::extract<ConstantInt>(
7973 BaseNode->getOperand(LastIdx + 1));
7974 Offset -= LastOffsetEntryCI->getValue();
7975 return cast<MDNode>(BaseNode->getOperand(LastIdx));
7976}
7977
7979 if (!Type || Type->getNumOperands() < 3)
7980 return false;
7981
7982 // In the new format type nodes shall have a reference to the parent type as
7983 // its first operand.
7984 return isa_and_nonnull<MDNode>(Type->getOperand(0));
7985}
7986
7988 CheckTBAA(MD->getNumOperands() > 0, "TBAA metadata cannot have 0 operands", I,
7989 MD);
7990
7991 if (I)
7995 "This instruction shall not have a TBAA access tag!", I);
7996
7997 bool IsStructPathTBAA =
7998 isa<MDNode>(MD->getOperand(0)) && MD->getNumOperands() >= 3;
7999
8000 CheckTBAA(IsStructPathTBAA,
8001 "Old-style TBAA is no longer allowed, use struct-path TBAA instead",
8002 I);
8003
8004 MDNode *BaseNode = dyn_cast_or_null<MDNode>(MD->getOperand(0));
8005 MDNode *AccessType = dyn_cast_or_null<MDNode>(MD->getOperand(1));
8006
8007 bool IsNewFormat = isNewFormatTBAATypeNode(AccessType);
8008
8009 if (IsNewFormat) {
8010 CheckTBAA(MD->getNumOperands() == 4 || MD->getNumOperands() == 5,
8011 "Access tag metadata must have either 4 or 5 operands", I, MD);
8012 } else {
8013 CheckTBAA(MD->getNumOperands() < 5,
8014 "Struct tag metadata must have either 3 or 4 operands", I, MD);
8015 }
8016
8017 // Check the access size field.
8018 if (IsNewFormat) {
8019 auto *AccessSizeNode = mdconst::dyn_extract_or_null<ConstantInt>(
8020 MD->getOperand(3));
8021 CheckTBAA(AccessSizeNode, "Access size field must be a constant", I, MD);
8022 }
8023
8024 // Check the immutability flag.
8025 unsigned ImmutabilityFlagOpNo = IsNewFormat ? 4 : 3;
8026 if (MD->getNumOperands() == ImmutabilityFlagOpNo + 1) {
8027 auto *IsImmutableCI = mdconst::dyn_extract_or_null<ConstantInt>(
8028 MD->getOperand(ImmutabilityFlagOpNo));
8029 CheckTBAA(IsImmutableCI,
8030 "Immutability tag on struct tag metadata must be a constant", I,
8031 MD);
8032 CheckTBAA(
8033 IsImmutableCI->isZero() || IsImmutableCI->isOne(),
8034 "Immutability part of the struct tag metadata must be either 0 or 1", I,
8035 MD);
8036 }
8037
8038 CheckTBAA(BaseNode && AccessType,
8039 "Malformed struct tag metadata: base and access-type "
8040 "should be non-null and point to Metadata nodes",
8041 I, MD, BaseNode, AccessType);
8042
8043 if (!IsNewFormat) {
8044 CheckTBAA(isValidScalarTBAANode(AccessType),
8045 "Access type node must be a valid scalar type", I, MD,
8046 AccessType);
8047 }
8048
8050 CheckTBAA(OffsetCI, "Offset must be constant integer", I, MD);
8051
8052 APInt Offset = OffsetCI->getValue();
8053 bool SeenAccessTypeInPath = false;
8054
8055 SmallPtrSet<MDNode *, 4> StructPath;
8056
8057 for (/* empty */; BaseNode && !IsRootTBAANode(BaseNode);
8058 BaseNode =
8059 getFieldNodeFromTBAABaseNode(I, BaseNode, Offset, IsNewFormat)) {
8060 if (!StructPath.insert(BaseNode).second) {
8061 CheckFailed("Cycle detected in struct path", I, MD);
8062 return false;
8063 }
8064
8065 bool Invalid;
8066 unsigned BaseNodeBitWidth;
8067 std::tie(Invalid, BaseNodeBitWidth) =
8068 verifyTBAABaseNode(I, BaseNode, IsNewFormat);
8069
8070 // If the base node is invalid in itself, then we've already printed all the
8071 // errors we wanted to print.
8072 if (Invalid)
8073 return false;
8074
8075 SeenAccessTypeInPath |= BaseNode == AccessType;
8076
8077 if (isValidScalarTBAANode(BaseNode) || BaseNode == AccessType)
8078 CheckTBAA(Offset == 0, "Offset not zero at the point of scalar access", I,
8079 MD, &Offset);
8080
8081 CheckTBAA(BaseNodeBitWidth == Offset.getBitWidth() ||
8082 (BaseNodeBitWidth == 0 && Offset == 0) ||
8083 (IsNewFormat && BaseNodeBitWidth == ~0u),
8084 "Access bit-width not the same as description bit-width", I, MD,
8085 BaseNodeBitWidth, Offset.getBitWidth());
8086
8087 if (IsNewFormat && SeenAccessTypeInPath)
8088 break;
8089 }
8090
8091 CheckTBAA(SeenAccessTypeInPath, "Did not see access type in access path!", I,
8092 MD);
8093 return true;
8094}
8095
8096char VerifierLegacyPass::ID = 0;
8097INITIALIZE_PASS(VerifierLegacyPass, "verify", "Module Verifier", false, false)
8098
8100 return new VerifierLegacyPass(FatalErrors);
8101}
8102
8103AnalysisKey VerifierAnalysis::Key;
8110
8115
8117 auto Res = AM.getResult<VerifierAnalysis>(M);
8118 if (FatalErrors && (Res.IRBroken || Res.DebugInfoBroken))
8119 report_fatal_error("Broken module found, compilation aborted!");
8120
8121 return PreservedAnalyses::all();
8122}
8123
8125 auto res = AM.getResult<VerifierAnalysis>(F);
8126 if (res.IRBroken && FatalErrors)
8127 report_fatal_error("Broken function found, compilation aborted!");
8128
8129 return PreservedAnalyses::all();
8130}
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
AMDGPU address space definition.
ArrayRef< TableEntry > TableRef
This file declares a class to represent arbitrary precision floating point values and provide a varie...
This file implements a class to represent arbitrary precision integral constant values and operations...
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
Atomic ordering constants.
@ RetAttr
@ FnAttr
This file contains the simple types necessary to represent the attributes associated with functions a...
static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")
static GCRegistry::Add< StatepointGC > D("statepoint-example", "an example strategy for statepoint")
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
Analysis containing CSE Info
Definition CSEInfo.cpp:27
This file contains the declarations for the subclasses of Constant, which represent the different fla...
This file declares the LLVM IR specialization of the GenericConvergenceVerifier template.
static DISubprogram * getSubprogram(bool IsDistinct, Ts &&...Args)
dxil translate DXIL Translate Metadata
This file defines the DenseMap class.
This file contains constants used for implementing Dwarf debug support.
static bool runOnFunction(Function &F, bool PostInlining)
#define Check(C,...)
Hexagon Common GEP
This file provides various utilities for inspecting and working with the control flow graph in LLVM I...
Module.h This file contains the declarations for the Module class.
This header defines various interfaces for pass management in LLVM.
This defines the Use class.
const size_t AbstractManglingParser< Derived, Alloc >::NumOps
#define F(x, y, z)
Definition MD5.cpp:54
#define I(x, y, z)
Definition MD5.cpp:57
Machine Check Debug Module
This file implements a map that provides insertion order iteration.
This file provides utility for Memory Model Relaxation Annotations (MMRAs).
static bool isContiguous(const ConstantRange &A, const ConstantRange &B)
This file contains the declarations for metadata subclasses.
#define T
#define T1
ConstantRange Range(APInt(BitWidth, Low), APInt(BitWidth, High))
uint64_t High
uint64_t IntrinsicInst * II
#define P(N)
ppc ctr loops verify
#define INITIALIZE_PASS(passName, arg, name, cfg, analysis)
Definition PassSupport.h:56
This file contains the declarations for profiling metadata utility functions.
const SmallVectorImpl< MachineOperand > & Cond
static bool isValid(const char C)
Returns true if C is a valid mangled character: <0-9a-zA-Z_>.
static unsigned getNumElements(Type *Ty)
void visit(MachineFunction &MF, MachineBasicBlock &Start, std::function< void(MachineBasicBlock *)> op)
This file contains some templates that are useful if you are working with the STL at all.
verify safepoint Safepoint IR Verifier
BaseType
A given derived pointer can have multiple base pointers through phi/selects.
This file defines the SmallPtrSet class.
This file defines the SmallVector class.
This file contains some functions that are useful when dealing with strings.
static unsigned getBitWidth(Type *Ty, const DataLayout &DL)
Returns the bitwidth of the given scalar or pointer type.
static bool IsScalarTBAANodeImpl(const MDNode *MD, SmallPtrSetImpl< const MDNode * > &Visited)
static bool isType(const Metadata *MD)
static Instruction * getSuccPad(Instruction *Terminator)
static bool isMDTuple(const Metadata *MD)
static bool isNewFormatTBAATypeNode(llvm::MDNode *Type)
#define CheckDI(C,...)
We know that a debug info condition should be true, if not print an error message.
Definition Verifier.cpp:681
static void forEachUser(const Value *User, SmallPtrSet< const Value *, 32 > &Visited, llvm::function_ref< bool(const Value *)> Callback)
Definition Verifier.cpp:722
static bool isDINode(const Metadata *MD)
static bool isScope(const Metadata *MD)
static cl::opt< bool > VerifyNoAliasScopeDomination("verify-noalias-scope-decl-dom", cl::Hidden, cl::init(false), cl::desc("Ensure that llvm.experimental.noalias.scope.decl for identical " "scopes are not dominating"))
static bool isTypeCongruent(Type *L, Type *R)
Two types are "congruent" if they are identical, or if they are both pointer types with different poi...
#define CheckTBAA(C,...)
static bool isConstantIntMetadataOperand(const Metadata *MD)
static bool IsRootTBAANode(const MDNode *MD)
static Value * getParentPad(Value *EHPad)
static bool hasConflictingReferenceFlags(unsigned Flags)
Detect mutually exclusive flags.
static AttrBuilder getParameterABIAttributes(LLVMContext &C, unsigned I, AttributeList Attrs)
static const char PassName[]
bool isFiniteNonZero() const
Definition APFloat.h:1441
bool isNegative() const
Definition APFloat.h:1431
const fltSemantics & getSemantics() const
Definition APFloat.h:1439
Class for arbitrary precision integers.
Definition APInt.h:78
bool sgt(const APInt &RHS) const
Signed greater than comparison.
Definition APInt.h:1202
bool isMinValue() const
Determine if this is the smallest unsigned value.
Definition APInt.h:418
bool ule(const APInt &RHS) const
Unsigned less or equal comparison.
Definition APInt.h:1151
bool isPowerOf2() const
Check if this APInt's value is a power of two greater than zero.
Definition APInt.h:441
int64_t getSExtValue() const
Get sign extended value.
Definition APInt.h:1563
bool isMaxValue() const
Determine if this is the largest unsigned value.
Definition APInt.h:400
This class represents a conversion between pointers from one address space to another.
bool isSwiftError() const
Return true if this alloca is used as a swifterror argument to a call.
LLVM_ABI bool isStaticAlloca() const
Return true if this alloca is in the entry block of the function and is a constant size.
Align getAlign() const
Return the alignment of the memory that is being allocated by the instruction.
Type * getAllocatedType() const
Return the type that is being allocated by the instruction.
unsigned getAddressSpace() const
Return the address space for the allocation.
LLVM_ABI bool isArrayAllocation() const
Return true if there is an allocation size parameter to the allocation instruction that is not 1.
const Value * getArraySize() const
Get the number of elements allocated.
PassT::Result & getResult(IRUnitT &IR, ExtraArgTs... ExtraArgs)
Get the result of an analysis pass for a given IR unit.
void setPreservesAll()
Set by analyses that do not transform their input at all.
LLVM_ABI bool hasInRegAttr() const
Return true if this argument has the inreg attribute.
Definition Function.cpp:293
bool empty() const
empty - Check if the array is empty.
Definition ArrayRef.h:137
static bool isFPOperation(BinOp Op)
BinOp getOperation() const
static LLVM_ABI StringRef getOperationName(BinOp Op)
AtomicOrdering getOrdering() const
Returns the ordering constraint of this rmw instruction.
bool contains(Attribute::AttrKind A) const
Return true if the builder has the specified attribute.
LLVM_ABI bool hasAttribute(Attribute::AttrKind Kind) const
Return true if the attribute exists in this set.
LLVM_ABI std::string getAsString(bool InAttrGrp=false) const
Functions, function parameters, and return types can have attributes to indicate how they should be t...
Definition Attributes.h:69
LLVM_ABI const ConstantRange & getValueAsConstantRange() const
Return the attribute's value as a ConstantRange.
LLVM_ABI StringRef getValueAsString() const
Return the attribute's value as a string.
AttrKind
This enumeration lists the attributes that can be associated with parameters, function results,...
Definition Attributes.h:88
bool isValid() const
Return true if the attribute is any kind of attribute.
Definition Attributes.h:223
LLVM Basic Block Representation.
Definition BasicBlock.h:62
iterator begin()
Instruction iterator methods.
Definition BasicBlock.h:459
iterator_range< const_phi_iterator > phis() const
Returns a range that iterates over the phis in the basic block.
Definition BasicBlock.h:528
const Function * getParent() const
Return the enclosing method, or null if none.
Definition BasicBlock.h:213
LLVM_ABI InstListType::const_iterator getFirstNonPHIIt() const
Returns an iterator to the first instruction in this block that is not a PHINode instruction.
LLVM_ABI bool isEntryBlock() const
Return true if this is the entry block of the containing function.
const Instruction & front() const
Definition BasicBlock.h:482
LLVM_ABI const BasicBlock * getUniquePredecessor() const
Return the predecessor of this block if it has a unique predecessor block.
InstListType::iterator iterator
Instruction iterators...
Definition BasicBlock.h:170
const Instruction * getTerminator() const LLVM_READONLY
Returns the terminator instruction if the block is well formed or null if the block is not well forme...
Definition BasicBlock.h:233
This class represents a no-op cast from one type to another.
static LLVM_ABI BlockAddress * lookup(const BasicBlock *BB)
Lookup an existing BlockAddress constant for the given BasicBlock.
bool isConditional() const
Value * getCondition() const
Base class for all callable instructions (InvokeInst and CallInst) Holds everything related to callin...
bool isInlineAsm() const
Check if this call is an inline asm statement.
bool hasInAllocaArgument() const
Determine if there are is an inalloca argument.
OperandBundleUse getOperandBundleAt(unsigned Index) const
Return the operand bundle at a specific index.
Function * getCalledFunction() const
Returns the function called, or null if this is an indirect function invocation or the function signa...
bool doesNotAccessMemory(unsigned OpNo) const
bool hasFnAttr(Attribute::AttrKind Kind) const
Determine whether this call has the given attribute.
unsigned getNumOperandBundles() const
Return the number of operand bundles associated with this User.
CallingConv::ID getCallingConv() const
LLVM_ABI bool paramHasAttr(unsigned ArgNo, Attribute::AttrKind Kind) const
Determine whether the argument or parameter has the given attribute.
Attribute getParamAttr(unsigned ArgNo, Attribute::AttrKind Kind) const
Get the attribute of a given kind from a given arg.
iterator_range< bundle_op_iterator > bundle_op_infos()
Return the range [bundle_op_info_begin, bundle_op_info_end).
unsigned countOperandBundlesOfType(StringRef Name) const
Return the number of operand bundles with the tag Name attached to this instruction.
bool onlyReadsMemory(unsigned OpNo) const
Value * getCalledOperand() const
Type * getParamElementType(unsigned ArgNo) const
Extract the elementtype type for a parameter.
Value * getArgOperand(unsigned i) const
FunctionType * getFunctionType() const
LLVM_ABI Intrinsic::ID getIntrinsicID() const
Returns the intrinsic ID of the intrinsic called or Intrinsic::not_intrinsic if the called function i...
iterator_range< User::op_iterator > args()
Iteration adapter for range-for loops.
bool doesNotReturn() const
Determine if the call cannot return.
LLVM_ABI bool onlyAccessesArgMemory() const
Determine if the call can access memmory only using pointers based on its arguments.
unsigned arg_size() const
AttributeList getAttributes() const
Return the attributes for this call.
bool hasOperandBundles() const
Return true if this User has any operand bundles.
LLVM_ABI Function * getCaller()
Helper to get the caller (the parent function).
bool isMustTailCall() const
static LLVM_ABI bool castIsValid(Instruction::CastOps op, Type *SrcTy, Type *DstTy)
This method can be used to determine if a cast from SrcTy to DstTy using Opcode op is valid or not.
unsigned getNumHandlers() const
return the number of 'handlers' in this catchswitch instruction, except the default handler
Value * getParentPad() const
BasicBlock * getUnwindDest() const
handler_range handlers()
iteration adapter for range-for loops.
BasicBlock * getUnwindDest() const
bool isFPPredicate() const
Definition InstrTypes.h:782
bool isIntPredicate() const
Definition InstrTypes.h:783
static bool isIntPredicate(Predicate P)
Definition InstrTypes.h:776
bool isMinusOne() const
This function will return true iff every bit in this constant is set to true.
Definition Constants.h:226
bool isNegative() const
Definition Constants.h:209
bool isZero() const
This is just a convenience method to make client code smaller for a common code.
Definition Constants.h:214
unsigned getBitWidth() const
getBitWidth - Return the scalar bitwidth of this constant.
Definition Constants.h:157
uint64_t getZExtValue() const
Return the constant as a 64-bit unsigned integer value after it has been zero extended as appropriate...
Definition Constants.h:163
const APInt & getValue() const
Return the constant as an APInt value reference.
Definition Constants.h:154
Constant * getAddrDiscriminator() const
The address discriminator if any, or the null constant.
Definition Constants.h:1073
Constant * getPointer() const
The pointer that is signed in this ptrauth signed pointer.
Definition Constants.h:1060
ConstantInt * getKey() const
The Key ID, an i32 constant.
Definition Constants.h:1063
Constant * getDeactivationSymbol() const
Definition Constants.h:1082
ConstantInt * getDiscriminator() const
The integer discriminator, an i64 constant, or 0.
Definition Constants.h:1066
static LLVM_ABI bool isOrderedRanges(ArrayRef< ConstantRange > RangesRef)
This class represents a range of values.
const APInt & getLower() const
Return the lower value for this range.
const APInt & getUpper() const
Return the upper value for this range.
LLVM_ABI bool contains(const APInt &Val) const
Return true if the specified value is in the set.
uint32_t getBitWidth() const
Get the bit width of this ConstantRange.
static LLVM_ABI ConstantTokenNone * get(LLVMContext &Context)
Return the ConstantTokenNone.
LLVM_ABI bool isNullValue() const
Return true if this is the value that would be returned by getNullValue.
Definition Constants.cpp:90
LLVM_ABI std::optional< fp::ExceptionBehavior > getExceptionBehavior() const
LLVM_ABI std::optional< RoundingMode > getRoundingMode() const
LLVM_ABI unsigned getNonMetadataArgCount() const
DbgVariableFragmentInfo FragmentInfo
@ FixedPointBinary
Scale factor 2^Factor.
@ FixedPointDecimal
Scale factor 10^Factor.
@ FixedPointRational
Arbitrary rational scale factor.
DIGlobalVariable * getVariable() const
LLVM_ABI DISubprogram * getSubprogram() const
Get the subprogram for this scope.
DILocalScope * getScope() const
Get the local scope for this variable.
Metadata * getRawScope() const
Base class for scope-like contexts.
Subprogram description. Uses SubclassData1.
static const DIScope * getRawRetainedNodeScope(const MDNode *N)
Base class for template parameters.
Base class for variables.
Metadata * getRawType() const
Metadata * getRawScope() const
A parsed version of the target data layout string in and methods for querying it.
Definition DataLayout.h:63
Records a position in IR for a source label (DILabel).
Base class for non-instruction debug metadata records that have positions within IR.
LLVM_ABI void print(raw_ostream &O, bool IsForDebug=false) const
DebugLoc getDebugLoc() const
LLVM_ABI const BasicBlock * getParent() const
LLVM_ABI Function * getFunction()
Record of a variable value-assignment, aka a non instruction representation of the dbg....
LLVM_ABI Value * getVariableLocationOp(unsigned OpIdx) const
DIExpression * getExpression() const
DILocalVariable * getVariable() const
Metadata * getRawLocation() const
Returns the metadata operand for the first location description.
@ End
Marks the end of the concrete types.
@ Any
To indicate all LocationTypes in searches.
DIExpression * getAddressExpression() const
MDNode * getAsMDNode() const
Return this as a bar MDNode.
Definition DebugLoc.h:291
ValueT lookup(const_arg_type_t< KeyT > Val) const
lookup - Return the entry for the specified key, or a default constructed value if no such entry exis...
Definition DenseMap.h:205
iterator find(const_arg_type_t< KeyT > Val)
Definition DenseMap.h:178
bool empty() const
Definition DenseMap.h:109
std::pair< iterator, bool > insert(const std::pair< KeyT, ValueT > &KV)
Definition DenseMap.h:241
Concrete subclass of DominatorTreeBase that is used to compute a normal dominator tree.
Definition Dominators.h:164
This instruction extracts a single (scalar) element from a VectorType value.
static LLVM_ABI bool isValidOperands(const Value *Vec, const Value *Idx)
Return true if an extractelement instruction can be formed with the specified operands.
ArrayRef< unsigned > getIndices() const
static LLVM_ABI Type * getIndexedType(Type *Agg, ArrayRef< unsigned > Idxs)
Returns the type of the element that would be extracted with an extractvalue instruction with the spe...
This instruction compares its operands according to the predicate given to the constructor.
This class represents an extension of floating point types.
This class represents a cast from floating point to signed integer.
This class represents a cast from floating point to unsigned integer.
This class represents a truncation of floating point types.
AtomicOrdering getOrdering() const
Returns the ordering constraint of this fence instruction.
Value * getParentPad() const
Convenience accessors.
FunctionPass class - This class is used to implement most global optimizations.
Definition Pass.h:314
Type * getReturnType() const
FunctionType * getFunctionType() const
Returns the FunctionType for me.
Definition Function.h:209
Intrinsic::ID getIntrinsicID() const LLVM_READONLY
getIntrinsicID - This method returns the ID number of the specified function, or Intrinsic::not_intri...
Definition Function.h:244
DISubprogram * getSubprogram() const
Get the attached subprogram.
CallingConv::ID getCallingConv() const
getCallingConv()/setCallingConv(CC) - These method get and set the calling convention of this functio...
Definition Function.h:270
bool hasPersonalityFn() const
Check whether this function has a personality function.
Definition Function.h:903
const Function & getFunction() const
Definition Function.h:164
const std::string & getGC() const
Definition Function.cpp:834
Type * getReturnType() const
Returns the type of the ret val.
Definition Function.h:214
bool isVarArg() const
isVarArg - Return true if this function takes a variable number of arguments.
Definition Function.h:227
LLVM_ABI Value * getBasePtr() const
LLVM_ABI Value * getDerivedPtr() const
static LLVM_ABI Type * getIndexedType(Type *Ty, ArrayRef< Value * > IdxList)
Returns the result type of a getelementptr with the given source element type and indexes.
static bool isValidLinkage(LinkageTypes L)
Definition GlobalAlias.h:98
const Constant * getAliasee() const
Definition GlobalAlias.h:87
LLVM_ABI const Function * getResolverFunction() const
Definition Globals.cpp:665
static bool isValidLinkage(LinkageTypes L)
Definition GlobalIFunc.h:86
const Constant * getResolver() const
Definition GlobalIFunc.h:73
LLVM_ABI void getAllMetadata(SmallVectorImpl< std::pair< unsigned, MDNode * > > &MDs) const
Appends all metadata attached to this value to MDs, sorting by KindID.
bool hasComdat() const
MDNode * getMetadata(unsigned KindID) const
Get the current metadata attachments for the given kind, if any.
Definition Value.h:576
bool hasExternalLinkage() const
bool isDSOLocal() const
bool isImplicitDSOLocal() const
LLVM_ABI bool isDeclaration() const
Return true if the primary definition of this global value is outside of the current translation unit...
Definition Globals.cpp:328
bool hasValidDeclarationLinkage() const
LinkageTypes getLinkage() const
bool hasDefaultVisibility() const
bool hasPrivateLinkage() const
bool hasHiddenVisibility() const
bool hasExternalWeakLinkage() const
bool hasDLLImportStorageClass() const
bool hasDLLExportStorageClass() const
bool isDeclarationForLinker() const
unsigned getAddressSpace() const
Module * getParent()
Get the module that this global value is contained inside of...
PointerType * getType() const
Global values are always pointers.
LLVM_ABI bool isInterposable() const
Return true if this global's definition can be substituted with an arbitrary definition at link time ...
Definition Globals.cpp:107
bool hasComdat() const
bool hasCommonLinkage() const
bool hasGlobalUnnamedAddr() const
bool hasAppendingLinkage() const
bool hasAvailableExternallyLinkage() const
Type * getValueType() const
const Constant * getInitializer() const
getInitializer - Return the initializer for this global variable.
bool hasInitializer() const
Definitions have initializers, declarations don't.
MaybeAlign getAlign() const
Returns the alignment of the given variable.
bool isConstant() const
If the value is a global constant, its value is immutable throughout the runtime execution of the pro...
bool hasDefinitiveInitializer() const
hasDefinitiveInitializer - Whether the global variable has an initializer, and any other instances of...
This instruction compares its operands according to the predicate given to the constructor.
BasicBlock * getDestination(unsigned i)
Return the specified destination.
unsigned getNumDestinations() const
return the number of possible destinations in this indirectbr instruction.
unsigned getNumSuccessors() const
This instruction inserts a single (scalar) element into a VectorType value.
static LLVM_ABI bool isValidOperands(const Value *Vec, const Value *NewElt, const Value *Idx)
Return true if an insertelement instruction can be formed with the specified operands.
ArrayRef< unsigned > getIndices() const
Base class for instruction visitors.
Definition InstVisitor.h:78
void visit(Iterator Start, Iterator End)
Definition InstVisitor.h:87
LLVM_ABI unsigned getNumSuccessors() const LLVM_READONLY
Return the number of successors that this instruction has.
const DebugLoc & getDebugLoc() const
Return the debug location for this node as a DebugLoc.
LLVM_ABI bool isAtomic() const LLVM_READONLY
Return true if this instruction has an AtomicOrdering of unordered or higher.
LLVM_ABI const Function * getFunction() const
Return the function this instruction belongs to.
This class represents a cast from an integer to a pointer.
static LLVM_ABI bool mayLowerToFunctionCall(Intrinsic::ID IID)
Check if the intrinsic might lower into a regular function call in the course of IR transformations.
Intrinsic::ID getIntrinsicID() const
Return the intrinsic ID of this intrinsic.
This is an important class for using LLVM in a threaded context.
Definition LLVMContext.h:68
bool isCleanup() const
Return 'true' if this landingpad instruction is a cleanup.
unsigned getNumClauses() const
Get the number of clauses for this landing pad.
bool isCatch(unsigned Idx) const
Return 'true' if the clause and index Idx is a catch clause.
bool isFilter(unsigned Idx) const
Return 'true' if the clause and index Idx is a filter clause.
Constant * getClause(unsigned Idx) const
Get the value of the clause at index Idx.
AtomicOrdering getOrdering() const
Returns the ordering constraint of this load instruction.
SyncScope::ID getSyncScopeID() const
Returns the synchronization scope ID of this load instruction.
Align getAlign() const
Return the alignment of the access that is being performed.
Metadata node.
Definition Metadata.h:1078
const MDOperand & getOperand(unsigned I) const
Definition Metadata.h:1442
bool isTemporary() const
Definition Metadata.h:1262
ArrayRef< MDOperand > operands() const
Definition Metadata.h:1440
unsigned getNumOperands() const
Return number of MDNode operands.
Definition Metadata.h:1448
bool isDistinct() const
Definition Metadata.h:1261
bool isResolved() const
Check if node is fully resolved.
Definition Metadata.h:1258
LLVMContext & getContext() const
Definition Metadata.h:1242
bool equalsStr(StringRef Str) const
Definition Metadata.h:922
Metadata * get() const
Definition Metadata.h:929
LLVM_ABI StringRef getString() const
Definition Metadata.cpp:618
static LLVM_ABI bool isTagMD(const Metadata *MD)
This class implements a map that also provides access to all stored values in a deterministic order.
Definition MapVector.h:36
static LLVM_ABI MetadataAsValue * getIfExists(LLVMContext &Context, Metadata *MD)
Definition Metadata.cpp:112
Metadata * getMetadata() const
Definition Metadata.h:201
Root of the metadata hierarchy.
Definition Metadata.h:64
LLVM_ABI void print(raw_ostream &OS, const Module *M=nullptr, bool IsForDebug=false) const
Print.
unsigned getMetadataID() const
Definition Metadata.h:104
Manage lifetime of a slot tracker for printing IR.
A Module instance is used to store all the information related to an LLVM module.
Definition Module.h:67
LLVM_ABI StringRef getName() const
LLVM_ABI void print(raw_ostream &ROS, bool IsForDebug=false) const
LLVM_ABI unsigned getNumOperands() const
iterator_range< op_iterator > operands()
Definition Metadata.h:1853
op_range incoming_values()
static LLVM_ABI PassRegistry * getPassRegistry()
getPassRegistry - Access the global registry object, which is automatically initialized at applicatio...
A set of analyses that are preserved following a run of a transformation pass.
Definition Analysis.h:112
static PreservedAnalyses all()
Construct a special preserved set that preserves all passes.
Definition Analysis.h:118
This class represents a cast from a pointer to an address (non-capturing ptrtoint).
This class represents a cast from a pointer to an integer.
Value * getValue() const
Convenience accessor.
This class represents a sign extension of integer types.
This class represents a cast from signed integer to floating point.
static LLVM_ABI const char * areInvalidOperands(Value *Cond, Value *True, Value *False)
Return a string if the specified operands are invalid for a select operation, otherwise return null.
This instruction constructs a fixed permutation of two input vectors.
static LLVM_ABI bool isValidOperands(const Value *V1, const Value *V2, const Value *Mask)
Return true if a shufflevector instruction can be formed with the specified operands.
static LLVM_ABI void getShuffleMask(const Constant *Mask, SmallVectorImpl< int > &Result)
Convert the input shuffle mask operand to a vector of integers.
A templated base class for SmallPtrSet which provides the typesafe interface that is common across al...
size_type count(ConstPtrType Ptr) const
count - Return 1 if the specified pointer is in the set, 0 otherwise.
void insert_range(Range &&R)
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
SmallPtrSet - This class implements a set which is optimized for holding SmallSize or less elements.
void reserve(size_type N)
iterator insert(iterator I, T &&Elt)
void resize(size_type N)
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
std::pair< StringRef, StringRef > split(char Separator) const
Split into two substrings around the first occurrence of a separator character.
Definition StringRef.h:702
static constexpr size_t npos
Definition StringRef.h:57
bool getAsInteger(unsigned Radix, T &Result) const
Parse the current string as an integer of the specified radix.
Definition StringRef.h:472
bool starts_with(StringRef Prefix) const
Check if this string starts with the given Prefix.
Definition StringRef.h:261
constexpr bool empty() const
empty - Check if the string is empty.
Definition StringRef.h:143
unsigned getNumElements() const
Random access to the elements.
LLVM_ABI Type * getTypeAtIndex(const Value *V) const
Given an index value into the type, return the type of the element.
Definition Type.cpp:718
LLVM_ABI bool isScalableTy(SmallPtrSetImpl< const Type * > &Visited) const
Returns true if this struct contains a scalable vector.
Definition Type.cpp:440
Verify that the TBAA Metadatas are valid.
Definition Verifier.h:40
LLVM_ABI bool visitTBAAMetadata(const Instruction *I, const MDNode *MD)
Visit an instruction, or a TBAA node itself as part of a metadata, and return true if it is valid,...
unsigned size() const
Triple - Helper class for working with autoconf configuration names.
Definition Triple.h:47
This class represents a truncation of integer types.
Twine - A lightweight data structure for efficiently representing the concatenation of temporary valu...
Definition Twine.h:82
The instances of the Type class are immutable: once they are created, they are never changed.
Definition Type.h:45
bool isVectorTy() const
True if this is an instance of VectorType.
Definition Type.h:273
LLVM_ABI bool containsNonGlobalTargetExtType(SmallPtrSetImpl< const Type * > &Visited) const
Return true if this type is or contains a target extension type that disallows being used as a global...
Definition Type.cpp:74
bool isArrayTy() const
True if this is an instance of ArrayType.
Definition Type.h:264
LLVM_ABI bool containsNonLocalTargetExtType(SmallPtrSetImpl< const Type * > &Visited) const
Return true if this type is or contains a target extension type that disallows being used as a local.
Definition Type.cpp:90
LLVM_ABI bool isScalableTy(SmallPtrSetImpl< const Type * > &Visited) const
Return true if this is a type whose size is a known multiple of vscale.
Definition Type.cpp:61
bool isLabelTy() const
Return true if this is 'label'.
Definition Type.h:228
bool isIntOrIntVectorTy() const
Return true if this is an integer type or a vector of integer types.
Definition Type.h:246
bool isPointerTy() const
True if this is an instance of PointerType.
Definition Type.h:267
bool isTokenLikeTy() const
Returns true if this is 'token' or a token-like target type.s.
Definition Type.cpp:1065
LLVM_ABI unsigned getPointerAddressSpace() const
Get the address space of this pointer or pointer vector type.
bool isSingleValueType() const
Return true if the type is a valid type for a register in codegen.
Definition Type.h:296
LLVM_ABI bool canLosslesslyBitCastTo(Type *Ty) const
Return true if this type could be converted with a lossless BitCast to type 'Ty'.
Definition Type.cpp:153
Type * getScalarType() const
If this is a vector type, return the element type, otherwise return 'this'.
Definition Type.h:352
bool isSized(SmallPtrSetImpl< Type * > *Visited=nullptr) const
Return true if it makes sense to take the size of this type.
Definition Type.h:311
LLVM_ABI unsigned getScalarSizeInBits() const LLVM_READONLY
If this is a vector type, return the getPrimitiveSizeInBits value for the element type.
Definition Type.cpp:230
bool isFloatingPointTy() const
Return true if this is one of the floating-point types.
Definition Type.h:184
bool isPtrOrPtrVectorTy() const
Return true if this is a pointer type or a vector of pointer types.
Definition Type.h:270
bool isIntOrPtrTy() const
Return true if this is an integer type or a pointer type.
Definition Type.h:255
bool isIntegerTy() const
True if this is an instance of IntegerType.
Definition Type.h:240
bool isFPOrFPVectorTy() const
Return true if this is a FP type or a vector of FP.
Definition Type.h:225
bool isVoidTy() const
Return true if this is 'void'.
Definition Type.h:139
bool isMetadataTy() const
Return true if this is 'metadata'.
Definition Type.h:231
This class represents a cast unsigned integer to floating point.
op_range operands()
Definition User.h:292
Value * getOperand(unsigned i) const
Definition User.h:232
unsigned getNumOperands() const
Definition User.h:254
This class represents the va_arg llvm instruction, which returns an argument of the specified type gi...
Value * getValue() const
Definition Metadata.h:498
LLVM Value Representation.
Definition Value.h:75
iterator_range< user_iterator > materialized_users()
Definition Value.h:420
Type * getType() const
All values are typed, get the type of this value.
Definition Value.h:256
LLVM_ABI const Value * stripInBoundsOffsets(function_ref< void(const Value *)> Func=[](const Value *) {}) const
Strip off pointer casts and inbounds GEPs.
Definition Value.cpp:812
iterator_range< user_iterator > users()
Definition Value.h:426
bool materialized_use_empty() const
Definition Value.h:351
LLVM_ABI const Value * stripPointerCasts() const
Strip off pointer casts, all-zero GEPs and address space casts.
Definition Value.cpp:701
LLVM_ABI LLVMContext & getContext() const
All values hold a context through their type.
Definition Value.cpp:1099
bool hasName() const
Definition Value.h:262
LLVM_ABI StringRef getName() const
Return a constant reference to the value's name.
Definition Value.cpp:322
Check a module for errors, and report separate error states for IR and debug info errors.
Definition Verifier.h:109
LLVM_ABI Result run(Module &M, ModuleAnalysisManager &)
LLVM_ABI PreservedAnalyses run(Module &M, ModuleAnalysisManager &AM)
This class represents zero extension of integer types.
constexpr bool isNonZero() const
Definition TypeSize.h:155
constexpr bool isScalable() const
Returns whether the quantity is scaled by a runtime quantity (vscale).
Definition TypeSize.h:168
constexpr ScalarTy getKnownMinValue() const
Returns the minimum value this quantity can represent.
Definition TypeSize.h:165
An efficient, type-erasing, non-owning reference to a callable.
const ParentTy * getParent() const
Definition ilist_node.h:34
NodeTy * getNextNode()
Get the next node, or nullptr for the list tail.
Definition ilist_node.h:348
This class implements an extremely fast bulk output stream that can only output to a stream.
Definition raw_ostream.h:53
CallInst * Call
This file contains the declaration of the Comdat class, which represents a single COMDAT in LLVM.
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
@ FLAT_ADDRESS
Address space for flat memory.
@ GLOBAL_ADDRESS
Address space for global memory (RAT0, VTX0).
@ PRIVATE_ADDRESS
Address space for private memory.
constexpr char Align[]
Key for Kernel::Arg::Metadata::mAlign.
constexpr char Args[]
Key for Kernel::Metadata::mArgs.
constexpr char Attrs[]
Key for Kernel::Metadata::mAttrs.
bool isFlatGlobalAddrSpace(unsigned AS)
constexpr std::underlying_type_t< E > Mask()
Get a bitmask with 1s in all places up to the high-order bit of E's largest value.
@ Entry
Definition COFF.h:862
unsigned ID
LLVM IR allows to use arbitrary numbers as calling convention identifiers.
Definition CallingConv.h:24
@ C
The default llvm calling convention, compatible with C.
Definition CallingConv.h:34
@ BasicBlock
Various leaf nodes.
Definition ISDOpcodes.h:81
LLVM_ABI MatchIntrinsicTypesResult matchIntrinsicSignature(FunctionType *FTy, ArrayRef< IITDescriptor > &Infos, SmallVectorImpl< Type * > &ArgTys)
Match the specified function type with the type constraints specified by the .td file.
LLVM_ABI void getIntrinsicInfoTableEntries(ID id, SmallVectorImpl< IITDescriptor > &T)
Return the IIT table descriptor for the specified intrinsic into an array of IITDescriptors.
@ MatchIntrinsicTypes_NoMatchRet
Definition Intrinsics.h:245
@ MatchIntrinsicTypes_NoMatchArg
Definition Intrinsics.h:246
LLVM_ABI bool hasConstrainedFPRoundingModeOperand(ID QID)
Returns true if the intrinsic ID is for one of the "ConstrainedFloating-Point Intrinsics" that take r...
LLVM_ABI StringRef getName(ID id)
Return the LLVM name for an intrinsic, such as "llvm.ppc.altivec.lvx".
static const int NoAliasScopeDeclScopeArg
Definition Intrinsics.h:41
LLVM_ABI bool matchIntrinsicVarArg(bool isVarArg, ArrayRef< IITDescriptor > &Infos)
Verify if the intrinsic has variable arguments.
std::variant< std::monostate, Loc::Single, Loc::Multi, Loc::MMI, Loc::EntryValue > Variant
Alias for the std::variant specialization base class of DbgVariable.
Definition DwarfDebug.h:189
Flag
These should be considered private to the implementation of the MCInstrDesc class.
@ System
Synchronized with respect to all concurrently executing threads.
Definition LLVMContext.h:58
LLVM_ABI std::optional< VFInfo > tryDemangleForVFABI(StringRef MangledName, const FunctionType *FTy)
Function to construct a VFInfo out of a mangled names in the following format:
@ CE
Windows NT (Windows on ARM)
Definition MCAsmInfo.h:48
LLVM_ABI AssignmentInstRange getAssignmentInsts(DIAssignID *ID)
Return a range of instructions (typically just one) that have ID as an attachment.
initializer< Ty > init(const Ty &Val)
@ DW_MACINFO_undef
Definition Dwarf.h:818
@ DW_MACINFO_start_file
Definition Dwarf.h:819
@ DW_MACINFO_define
Definition Dwarf.h:817
std::enable_if_t< detail::IsValidPointer< X, Y >::value, X * > dyn_extract_or_null(Y &&MD)
Extract a Value from Metadata, if any, allowing null.
Definition Metadata.h:708
std::enable_if_t< detail::IsValidPointer< X, Y >::value, X * > dyn_extract(Y &&MD)
Extract a Value from Metadata, if any.
Definition Metadata.h:695
std::enable_if_t< detail::IsValidPointer< X, Y >::value, X * > extract(Y &&MD)
Extract a Value from Metadata.
Definition Metadata.h:667
@ User
could "use" a pointer
NodeAddr< UseNode * > Use
Definition RDFGraph.h:385
NodeAddr< NodeBase * > Node
Definition RDFGraph.h:381
friend class Instruction
Iterator for Instructions in a `BasicBlock.
Definition BasicBlock.h:73
This is an optimization pass for GlobalISel generic memory operations.
auto drop_begin(T &&RangeOrContainer, size_t N=1)
Return a range covering RangeOrContainer with the first N elements excluded.
Definition STLExtras.h:316
@ Low
Lower the current thread's priority such that it does not affect foreground tasks significantly.
Definition Threading.h:280
@ Offset
Definition DWP.cpp:532
FunctionAddr VTableAddr Value
Definition InstrProf.h:137
bool all_of(R &&range, UnaryPredicate P)
Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly.
Definition STLExtras.h:1725
LLVM_ABI bool canInstructionHaveMMRAs(const Instruction &I)
detail::zippy< detail::zip_first, T, U, Args... > zip_equal(T &&t, U &&u, Args &&...args)
zip iterator that assumes that all iteratees have the same length.
Definition STLExtras.h:839
LLVM_ABI unsigned getBranchWeightOffset(const MDNode *ProfileData)
Return the offset to the first branch weight data.
constexpr bool isInt(int64_t x)
Checks if an integer fits into the given bit width.
Definition MathExtras.h:165
auto enumerate(FirstRange &&First, RestRanges &&...Rest)
Given two or more input ranges, returns a new range whose values are tuples (A, B,...
Definition STLExtras.h:2472
decltype(auto) dyn_cast(const From &Val)
dyn_cast<X> - Return the argument parameter cast to the specified type.
Definition Casting.h:643
LLVM_ABI bool verifyFunction(const Function &F, raw_ostream *OS=nullptr)
Check a function for errors, useful for use when debugging a pass.
AllocFnKind
Definition Attributes.h:51
testing::Matcher< const detail::ErrorHolder & > Failed()
Definition Error.h:198
iterator_range< T > make_range(T x, T y)
Convenience function for iterating over sub-ranges.
void append_range(Container &C, Range &&R)
Wrapper function to append range R to container C.
Definition STLExtras.h:2136
LLVM_ABI DenseMap< BasicBlock *, ColorVector > colorEHFunclets(Function &F)
If an EH funclet personality is in use (see isFuncletEHPersonality), this will recompute which blocks...
constexpr bool isPowerOf2_64(uint64_t Value)
Return true if the argument is a power of two > 0 (64 bit edition.)
Definition MathExtras.h:284
bool isa_and_nonnull(const Y &Val)
Definition Casting.h:676
Op::Description Desc
bool isScopedEHPersonality(EHPersonality Pers)
Returns true if this personality uses scope-style EH IR instructions: catchswitch,...
auto dyn_cast_or_null(const Y &Val)
Definition Casting.h:753
GenericConvergenceVerifier< SSAContext > ConvergenceVerifier
LLVM_ABI void initializeVerifierLegacyPassPass(PassRegistry &)
constexpr bool isPowerOf2_32(uint32_t Value)
Return true if the argument is a power of two > 0.
Definition MathExtras.h:279
bool isModSet(const ModRefInfo MRI)
Definition ModRef.h:49
void sort(IteratorTy Start, IteratorTy End)
Definition STLExtras.h:1622
LLVM_ABI raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
Definition Debug.cpp:207
LLVM_ABI void report_fatal_error(Error Err, bool gen_crash_diag=true)
Definition Error.cpp:167
FunctionAddr VTableAddr Count
Definition InstrProf.h:139
LLVM_ABI EHPersonality classifyEHPersonality(const Value *Pers)
See if the given exception handling personality function is one that we understand.
class LLVM_GSL_OWNER SmallVector
Forward declaration of SmallVector so that calculateSmallVectorDefaultInlinedElements can reference s...
bool isa(const From &Val)
isa<X> - Return true if the parameter to the template is an instance of one of the template type argu...
Definition Casting.h:547
LLVM_ABI bool isValueProfileMD(const MDNode *ProfileData)
Checks if an MDNode contains value profiling Metadata.
LLVM_ABI raw_fd_ostream & errs()
This returns a reference to a raw_ostream for standard error.
LLVM_ABI unsigned getNumBranchWeights(const MDNode &ProfileData)
AtomicOrdering
Atomic ordering for LLVM's memory model.
@ First
Helpers to iterate all locations in the MemoryEffectsBase class.
Definition ModRef.h:74
LLVM_ABI FunctionPass * createVerifierPass(bool FatalErrors=true)
FunctionAddr VTableAddr Next
Definition InstrProf.h:141
DWARFExpression::Operation Op
ArrayRef(const T &OneElt) -> ArrayRef< T >
constexpr unsigned BitWidth
TinyPtrVector< BasicBlock * > ColorVector
LLVM_ABI const char * LLVMLoopEstimatedTripCount
Profile-based loop metadata that should be accessed only by using llvm::getLoopEstimatedTripCount and...
DenormalMode parseDenormalFPAttribute(StringRef Str)
Returns the denormal mode to use for inputs and outputs.
decltype(auto) cast(const From &Val)
cast<X> - Return the argument parameter cast to the specified type.
Definition Casting.h:559
LLVM_ABI std::optional< RoundingMode > convertStrToRoundingMode(StringRef)
Returns a valid RoundingMode enumerator when given a string that is valid as input in constrained int...
Definition FPEnv.cpp:25
LLVM_ABI std::unique_ptr< GCStrategy > getGCStrategy(const StringRef Name)
Lookup the GCStrategy object associated with the given gc name.
auto predecessors(const MachineBasicBlock *BB)
bool is_contained(R &&Range, const E &Element)
Returns true if Element is found in Range.
Definition STLExtras.h:1897
bool pred_empty(const BasicBlock *BB)
Definition CFG.h:119
bool isHexDigit(char C)
Checks if character C is a hexadecimal numeric character.
AnalysisManager< Function > FunctionAnalysisManager
Convenience typedef for the Function analysis manager.
constexpr bool isCallableCC(CallingConv::ID CC)
LLVM_ABI bool verifyModule(const Module &M, raw_ostream *OS=nullptr, bool *BrokenDebugInfo=nullptr)
Check a module for errors.
AnalysisManager< Module > ModuleAnalysisManager
Convenience typedef for the Module analysis manager.
Definition MIRParser.h:39
#define N
constexpr uint64_t value() const
This is a hole in the type system and should not be abused.
Definition Alignment.h:77
A special type used by analysis passes to provide an address that identifies that particular analysis...
Definition Analysis.h:29
static LLVM_ABI const char * SyntheticFunctionEntryCount
static LLVM_ABI const char * UnknownBranchWeightsMarker
static LLVM_ABI const char * ValueProfile
static LLVM_ABI const char * FunctionEntryCount
static LLVM_ABI const char * BranchWeights
uint32_t getTagID() const
Return the tag of this operand bundle as an integer.
ArrayRef< Use > Inputs
void DebugInfoCheckFailed(const Twine &Message)
A debug info check failed.
Definition Verifier.cpp:305
VerifierSupport(raw_ostream *OS, const Module &M)
Definition Verifier.cpp:154
bool Broken
Track the brokenness of the module while recursively visiting.
Definition Verifier.cpp:148
void CheckFailed(const Twine &Message, const T1 &V1, const Ts &... Vs)
A check failed (with values to print).
Definition Verifier.cpp:298
bool BrokenDebugInfo
Broken debug info can be "recovered" from by stripping the debug info.
Definition Verifier.cpp:150
LLVMContext & Context
Definition Verifier.cpp:145
bool TreatBrokenDebugInfoAsError
Whether to treat broken debug info as an error.
Definition Verifier.cpp:152
void CheckFailed(const Twine &Message)
A check failed, so printout out the condition and the message.
Definition Verifier.cpp:287
const Module & M
Definition Verifier.cpp:141
const DataLayout & DL
Definition Verifier.cpp:144
void DebugInfoCheckFailed(const Twine &Message, const T1 &V1, const Ts &... Vs)
A debug info check failed (with values to print).
Definition Verifier.cpp:314
const Triple & TT
Definition Verifier.cpp:143
ModuleSlotTracker MST
Definition Verifier.cpp:142