LLVM 22.0.0git
Verifier.cpp
Go to the documentation of this file.
1//===-- Verifier.cpp - Implement the Module Verifier -----------------------==//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file defines the function verifier interface, that can be used for some
10// basic correctness checking of input to the system.
11//
12// Note that this does not provide full `Java style' security and verifications,
13// instead it just tries to ensure that code is well-formed.
14//
15// * Both of a binary operator's parameters are of the same type
16// * Verify that the indices of mem access instructions match other operands
17// * Verify that arithmetic and other things are only performed on first-class
18// types. Verify that shifts & logicals only happen on integrals f.e.
19// * All of the constants in a switch statement are of the correct type
20// * The code is in valid SSA form
21// * It should be illegal to put a label into any other type (like a structure)
22// or to return one. [except constant arrays!]
23// * Only phi nodes can be self referential: 'add i32 %0, %0 ; <int>:0' is bad
24// * PHI nodes must have an entry for each predecessor, with no extras.
25// * PHI nodes must be the first thing in a basic block, all grouped together
26// * All basic blocks should only end with terminator insts, not contain them
27// * The entry node to a function must not have predecessors
28// * All Instructions must be embedded into a basic block
29// * Functions cannot take a void-typed parameter
30// * Verify that a function's argument list agrees with it's declared type.
31// * It is illegal to specify a name for a void value.
32// * It is illegal to have a internal global value with no initializer
33// * It is illegal to have a ret instruction that returns a value that does not
34// agree with the function return value type.
35// * Function call argument types match the function prototype
36// * A landing pad is defined by a landingpad instruction, and can be jumped to
37// only by the unwind edge of an invoke instruction.
38// * A landingpad instruction must be the first non-PHI instruction in the
39// block.
40// * Landingpad instructions must be in a function with a personality function.
41// * Convergence control intrinsics are introduced in ConvergentOperations.rst.
42// The applied restrictions are too numerous to list here.
43// * The convergence entry intrinsic and the loop heart must be the first
44// non-PHI instruction in their respective block. This does not conflict with
45// the landing pads, since these two kinds cannot occur in the same block.
46// * All other things that are tested by asserts spread about the code...
47//
48//===----------------------------------------------------------------------===//
49
50#include "llvm/IR/Verifier.h"
51#include "llvm/ADT/APFloat.h"
52#include "llvm/ADT/APInt.h"
53#include "llvm/ADT/ArrayRef.h"
54#include "llvm/ADT/DenseMap.h"
55#include "llvm/ADT/MapVector.h"
56#include "llvm/ADT/STLExtras.h"
60#include "llvm/ADT/StringRef.h"
61#include "llvm/ADT/Twine.h"
63#include "llvm/IR/Argument.h"
65#include "llvm/IR/Attributes.h"
66#include "llvm/IR/BasicBlock.h"
67#include "llvm/IR/CFG.h"
68#include "llvm/IR/CallingConv.h"
69#include "llvm/IR/Comdat.h"
70#include "llvm/IR/Constant.h"
73#include "llvm/IR/Constants.h"
75#include "llvm/IR/DataLayout.h"
76#include "llvm/IR/DebugInfo.h"
78#include "llvm/IR/DebugLoc.h"
80#include "llvm/IR/Dominators.h"
82#include "llvm/IR/Function.h"
83#include "llvm/IR/GCStrategy.h"
84#include "llvm/IR/GlobalAlias.h"
85#include "llvm/IR/GlobalValue.h"
87#include "llvm/IR/InlineAsm.h"
88#include "llvm/IR/InstVisitor.h"
89#include "llvm/IR/InstrTypes.h"
90#include "llvm/IR/Instruction.h"
93#include "llvm/IR/Intrinsics.h"
94#include "llvm/IR/IntrinsicsAArch64.h"
95#include "llvm/IR/IntrinsicsAMDGPU.h"
96#include "llvm/IR/IntrinsicsARM.h"
97#include "llvm/IR/IntrinsicsNVPTX.h"
98#include "llvm/IR/IntrinsicsWebAssembly.h"
99#include "llvm/IR/LLVMContext.h"
101#include "llvm/IR/Metadata.h"
102#include "llvm/IR/Module.h"
104#include "llvm/IR/PassManager.h"
106#include "llvm/IR/Statepoint.h"
107#include "llvm/IR/Type.h"
108#include "llvm/IR/Use.h"
109#include "llvm/IR/User.h"
111#include "llvm/IR/Value.h"
113#include "llvm/Pass.h"
117#include "llvm/Support/Casting.h"
121#include "llvm/Support/ModRef.h"
124#include <algorithm>
125#include <cassert>
126#include <cstdint>
127#include <memory>
128#include <optional>
129#include <string>
130#include <utility>
131
132using namespace llvm;
133
135 "verify-noalias-scope-decl-dom", cl::Hidden, cl::init(false),
136 cl::desc("Ensure that llvm.experimental.noalias.scope.decl for identical "
137 "scopes are not dominating"));
138
141 const Module &M;
143 const Triple &TT;
146
147 /// Track the brokenness of the module while recursively visiting.
148 bool Broken = false;
149 /// Broken debug info can be "recovered" from by stripping the debug info.
150 bool BrokenDebugInfo = false;
151 /// Whether to treat broken debug info as an error.
153
155 : OS(OS), M(M), MST(&M), TT(M.getTargetTriple()), DL(M.getDataLayout()),
156 Context(M.getContext()) {}
157
158private:
159 void Write(const Module *M) {
160 *OS << "; ModuleID = '" << M->getModuleIdentifier() << "'\n";
161 }
162
163 void Write(const Value *V) {
164 if (V)
165 Write(*V);
166 }
167
168 void Write(const Value &V) {
169 if (isa<Instruction>(V)) {
170 V.print(*OS, MST);
171 *OS << '\n';
172 } else {
173 V.printAsOperand(*OS, true, MST);
174 *OS << '\n';
175 }
176 }
177
178 void Write(const DbgRecord *DR) {
179 if (DR) {
180 DR->print(*OS, MST, false);
181 *OS << '\n';
182 }
183 }
184
186 switch (Type) {
188 *OS << "value";
189 break;
191 *OS << "declare";
192 break;
194 *OS << "assign";
195 break;
197 *OS << "end";
198 break;
200 *OS << "any";
201 break;
202 };
203 }
204
205 void Write(const Metadata *MD) {
206 if (!MD)
207 return;
208 MD->print(*OS, MST, &M);
209 *OS << '\n';
210 }
211
212 template <class T> void Write(const MDTupleTypedArrayWrapper<T> &MD) {
213 Write(MD.get());
214 }
215
216 void Write(const NamedMDNode *NMD) {
217 if (!NMD)
218 return;
219 NMD->print(*OS, MST);
220 *OS << '\n';
221 }
222
223 void Write(Type *T) {
224 if (!T)
225 return;
226 *OS << ' ' << *T;
227 }
228
229 void Write(const Comdat *C) {
230 if (!C)
231 return;
232 *OS << *C;
233 }
234
235 void Write(const APInt *AI) {
236 if (!AI)
237 return;
238 *OS << *AI << '\n';
239 }
240
241 void Write(const unsigned i) { *OS << i << '\n'; }
242
243 // NOLINTNEXTLINE(readability-identifier-naming)
244 void Write(const Attribute *A) {
245 if (!A)
246 return;
247 *OS << A->getAsString() << '\n';
248 }
249
250 // NOLINTNEXTLINE(readability-identifier-naming)
251 void Write(const AttributeSet *AS) {
252 if (!AS)
253 return;
254 *OS << AS->getAsString() << '\n';
255 }
256
257 // NOLINTNEXTLINE(readability-identifier-naming)
258 void Write(const AttributeList *AL) {
259 if (!AL)
260 return;
261 AL->print(*OS);
262 }
263
264 void Write(Printable P) { *OS << P << '\n'; }
265
266 template <typename T> void Write(ArrayRef<T> Vs) {
267 for (const T &V : Vs)
268 Write(V);
269 }
270
271 template <typename T1, typename... Ts>
272 void WriteTs(const T1 &V1, const Ts &... Vs) {
273 Write(V1);
274 WriteTs(Vs...);
275 }
276
277 template <typename... Ts> void WriteTs() {}
278
279public:
280 /// A check failed, so printout out the condition and the message.
281 ///
282 /// This provides a nice place to put a breakpoint if you want to see why
283 /// something is not correct.
284 void CheckFailed(const Twine &Message) {
285 if (OS)
286 *OS << Message << '\n';
287 Broken = true;
288 }
289
290 /// A check failed (with values to print).
291 ///
292 /// This calls the Message-only version so that the above is easier to set a
293 /// breakpoint on.
294 template <typename T1, typename... Ts>
295 void CheckFailed(const Twine &Message, const T1 &V1, const Ts &... Vs) {
296 CheckFailed(Message);
297 if (OS)
298 WriteTs(V1, Vs...);
299 }
300
301 /// A debug info check failed.
302 void DebugInfoCheckFailed(const Twine &Message) {
303 if (OS)
304 *OS << Message << '\n';
306 BrokenDebugInfo = true;
307 }
308
309 /// A debug info check failed (with values to print).
310 template <typename T1, typename... Ts>
311 void DebugInfoCheckFailed(const Twine &Message, const T1 &V1,
312 const Ts &... Vs) {
313 DebugInfoCheckFailed(Message);
314 if (OS)
315 WriteTs(V1, Vs...);
316 }
317};
318
319namespace {
320
321class Verifier : public InstVisitor<Verifier>, VerifierSupport {
322 friend class InstVisitor<Verifier>;
323 DominatorTree DT;
324
325 /// When verifying a basic block, keep track of all of the
326 /// instructions we have seen so far.
327 ///
328 /// This allows us to do efficient dominance checks for the case when an
329 /// instruction has an operand that is an instruction in the same block.
330 SmallPtrSet<Instruction *, 16> InstsInThisBlock;
331
332 /// Keep track of the metadata nodes that have been checked already.
334
335 /// Keep track which DISubprogram is attached to which function.
337
338 /// Track all DICompileUnits visited.
340
341 /// The result type for a landingpad.
342 Type *LandingPadResultTy;
343
344 /// Whether we've seen a call to @llvm.localescape in this function
345 /// already.
346 bool SawFrameEscape;
347
348 /// Whether the current function has a DISubprogram attached to it.
349 bool HasDebugInfo = false;
350
351 /// Stores the count of how many objects were passed to llvm.localescape for a
352 /// given function and the largest index passed to llvm.localrecover.
354
355 // Maps catchswitches and cleanuppads that unwind to siblings to the
356 // terminators that indicate the unwind, used to detect cycles therein.
358
359 /// Cache which blocks are in which funclet, if an EH funclet personality is
360 /// in use. Otherwise empty.
361 DenseMap<BasicBlock *, ColorVector> BlockEHFuncletColors;
362
363 /// Cache of constants visited in search of ConstantExprs.
364 SmallPtrSet<const Constant *, 32> ConstantExprVisited;
365
366 /// Cache of declarations of the llvm.experimental.deoptimize.<ty> intrinsic.
367 SmallVector<const Function *, 4> DeoptimizeDeclarations;
368
369 /// Cache of attribute lists verified.
370 SmallPtrSet<const void *, 32> AttributeListsVisited;
371
372 // Verify that this GlobalValue is only used in this module.
373 // This map is used to avoid visiting uses twice. We can arrive at a user
374 // twice, if they have multiple operands. In particular for very large
375 // constant expressions, we can arrive at a particular user many times.
376 SmallPtrSet<const Value *, 32> GlobalValueVisited;
377
378 // Keeps track of duplicate function argument debug info.
380
381 TBAAVerifier TBAAVerifyHelper;
382 ConvergenceVerifier ConvergenceVerifyHelper;
383
384 SmallVector<IntrinsicInst *, 4> NoAliasScopeDecls;
385
386 void checkAtomicMemAccessSize(Type *Ty, const Instruction *I);
387
388public:
389 explicit Verifier(raw_ostream *OS, bool ShouldTreatBrokenDebugInfoAsError,
390 const Module &M)
391 : VerifierSupport(OS, M), LandingPadResultTy(nullptr),
392 SawFrameEscape(false), TBAAVerifyHelper(this) {
393 TreatBrokenDebugInfoAsError = ShouldTreatBrokenDebugInfoAsError;
394 }
395
396 bool hasBrokenDebugInfo() const { return BrokenDebugInfo; }
397
398 bool verify(const Function &F) {
399 llvm::TimeTraceScope timeScope("Verifier");
400 assert(F.getParent() == &M &&
401 "An instance of this class only works with a specific module!");
402
403 // First ensure the function is well-enough formed to compute dominance
404 // information, and directly compute a dominance tree. We don't rely on the
405 // pass manager to provide this as it isolates us from a potentially
406 // out-of-date dominator tree and makes it significantly more complex to run
407 // this code outside of a pass manager.
408 // FIXME: It's really gross that we have to cast away constness here.
409 if (!F.empty())
410 DT.recalculate(const_cast<Function &>(F));
411
412 for (const BasicBlock &BB : F) {
413 if (!BB.empty() && BB.back().isTerminator())
414 continue;
415
416 if (OS) {
417 *OS << "Basic Block in function '" << F.getName()
418 << "' does not have terminator!\n";
419 BB.printAsOperand(*OS, true, MST);
420 *OS << "\n";
421 }
422 return false;
423 }
424
425 auto FailureCB = [this](const Twine &Message) {
426 this->CheckFailed(Message);
427 };
428 ConvergenceVerifyHelper.initialize(OS, FailureCB, F);
429
430 Broken = false;
431 // FIXME: We strip const here because the inst visitor strips const.
432 visit(const_cast<Function &>(F));
433 verifySiblingFuncletUnwinds();
434
435 if (ConvergenceVerifyHelper.sawTokens())
436 ConvergenceVerifyHelper.verify(DT);
437
438 InstsInThisBlock.clear();
439 DebugFnArgs.clear();
440 LandingPadResultTy = nullptr;
441 SawFrameEscape = false;
442 SiblingFuncletInfo.clear();
443 verifyNoAliasScopeDecl();
444 NoAliasScopeDecls.clear();
445
446 return !Broken;
447 }
448
449 /// Verify the module that this instance of \c Verifier was initialized with.
450 bool verify() {
451 Broken = false;
452
453 // Collect all declarations of the llvm.experimental.deoptimize intrinsic.
454 for (const Function &F : M)
455 if (F.getIntrinsicID() == Intrinsic::experimental_deoptimize)
456 DeoptimizeDeclarations.push_back(&F);
457
458 // Now that we've visited every function, verify that we never asked to
459 // recover a frame index that wasn't escaped.
460 verifyFrameRecoverIndices();
461 for (const GlobalVariable &GV : M.globals())
462 visitGlobalVariable(GV);
463
464 for (const GlobalAlias &GA : M.aliases())
465 visitGlobalAlias(GA);
466
467 for (const GlobalIFunc &GI : M.ifuncs())
468 visitGlobalIFunc(GI);
469
470 for (const NamedMDNode &NMD : M.named_metadata())
471 visitNamedMDNode(NMD);
472
473 for (const StringMapEntry<Comdat> &SMEC : M.getComdatSymbolTable())
474 visitComdat(SMEC.getValue());
475
476 visitModuleFlags();
477 visitModuleIdents();
478 visitModuleCommandLines();
479 visitModuleErrnoTBAA();
480
481 verifyCompileUnits();
482
483 verifyDeoptimizeCallingConvs();
484 DISubprogramAttachments.clear();
485 return !Broken;
486 }
487
488private:
489 /// Whether a metadata node is allowed to be, or contain, a DILocation.
490 enum class AreDebugLocsAllowed { No, Yes };
491
492 /// Metadata that should be treated as a range, with slightly different
493 /// requirements.
494 enum class RangeLikeMetadataKind {
495 Range, // MD_range
496 AbsoluteSymbol, // MD_absolute_symbol
497 NoaliasAddrspace // MD_noalias_addrspace
498 };
499
500 // Verification methods...
501 void visitGlobalValue(const GlobalValue &GV);
502 void visitGlobalVariable(const GlobalVariable &GV);
503 void visitGlobalAlias(const GlobalAlias &GA);
504 void visitGlobalIFunc(const GlobalIFunc &GI);
505 void visitAliaseeSubExpr(const GlobalAlias &A, const Constant &C);
506 void visitAliaseeSubExpr(SmallPtrSetImpl<const GlobalAlias *> &Visited,
507 const GlobalAlias &A, const Constant &C);
508 void visitNamedMDNode(const NamedMDNode &NMD);
509 void visitMDNode(const MDNode &MD, AreDebugLocsAllowed AllowLocs);
510 void visitMetadataAsValue(const MetadataAsValue &MD, Function *F);
511 void visitValueAsMetadata(const ValueAsMetadata &MD, Function *F);
512 void visitDIArgList(const DIArgList &AL, Function *F);
513 void visitComdat(const Comdat &C);
514 void visitModuleIdents();
515 void visitModuleCommandLines();
516 void visitModuleErrnoTBAA();
517 void visitModuleFlags();
518 void visitModuleFlag(const MDNode *Op,
519 DenseMap<const MDString *, const MDNode *> &SeenIDs,
520 SmallVectorImpl<const MDNode *> &Requirements);
521 void visitModuleFlagCGProfileEntry(const MDOperand &MDO);
522 void visitFunction(const Function &F);
523 void visitBasicBlock(BasicBlock &BB);
524 void verifyRangeLikeMetadata(const Value &V, const MDNode *Range, Type *Ty,
525 RangeLikeMetadataKind Kind);
526 void visitRangeMetadata(Instruction &I, MDNode *Range, Type *Ty);
527 void visitNoaliasAddrspaceMetadata(Instruction &I, MDNode *Range, Type *Ty);
528 void visitDereferenceableMetadata(Instruction &I, MDNode *MD);
529 void visitNofreeMetadata(Instruction &I, MDNode *MD);
530 void visitProfMetadata(Instruction &I, MDNode *MD);
531 void visitCallStackMetadata(MDNode *MD);
532 void visitMemProfMetadata(Instruction &I, MDNode *MD);
533 void visitCallsiteMetadata(Instruction &I, MDNode *MD);
534 void visitCalleeTypeMetadata(Instruction &I, MDNode *MD);
535 void visitDIAssignIDMetadata(Instruction &I, MDNode *MD);
536 void visitMMRAMetadata(Instruction &I, MDNode *MD);
537 void visitAnnotationMetadata(MDNode *Annotation);
538 void visitAliasScopeMetadata(const MDNode *MD);
539 void visitAliasScopeListMetadata(const MDNode *MD);
540 void visitAccessGroupMetadata(const MDNode *MD);
541 void visitCapturesMetadata(Instruction &I, const MDNode *Captures);
542 void visitAllocTokenMetadata(Instruction &I, MDNode *MD);
543
544 template <class Ty> bool isValidMetadataArray(const MDTuple &N);
545#define HANDLE_SPECIALIZED_MDNODE_LEAF(CLASS) void visit##CLASS(const CLASS &N);
546#include "llvm/IR/Metadata.def"
547 void visitDIScope(const DIScope &N);
548 void visitDIVariable(const DIVariable &N);
549 void visitDILexicalBlockBase(const DILexicalBlockBase &N);
550 void visitDITemplateParameter(const DITemplateParameter &N);
551
552 void visitTemplateParams(const MDNode &N, const Metadata &RawParams);
553
554 void visit(DbgLabelRecord &DLR);
555 void visit(DbgVariableRecord &DVR);
556 // InstVisitor overrides...
557 using InstVisitor<Verifier>::visit;
558 void visitDbgRecords(Instruction &I);
559 void visit(Instruction &I);
560
561 void visitTruncInst(TruncInst &I);
562 void visitZExtInst(ZExtInst &I);
563 void visitSExtInst(SExtInst &I);
564 void visitFPTruncInst(FPTruncInst &I);
565 void visitFPExtInst(FPExtInst &I);
566 void visitFPToUIInst(FPToUIInst &I);
567 void visitFPToSIInst(FPToSIInst &I);
568 void visitUIToFPInst(UIToFPInst &I);
569 void visitSIToFPInst(SIToFPInst &I);
570 void visitIntToPtrInst(IntToPtrInst &I);
571 void checkPtrToAddr(Type *SrcTy, Type *DestTy, const Value &V);
572 void visitPtrToAddrInst(PtrToAddrInst &I);
573 void visitPtrToIntInst(PtrToIntInst &I);
574 void visitBitCastInst(BitCastInst &I);
575 void visitAddrSpaceCastInst(AddrSpaceCastInst &I);
576 void visitPHINode(PHINode &PN);
577 void visitCallBase(CallBase &Call);
578 void visitUnaryOperator(UnaryOperator &U);
579 void visitBinaryOperator(BinaryOperator &B);
580 void visitICmpInst(ICmpInst &IC);
581 void visitFCmpInst(FCmpInst &FC);
582 void visitExtractElementInst(ExtractElementInst &EI);
583 void visitInsertElementInst(InsertElementInst &EI);
584 void visitShuffleVectorInst(ShuffleVectorInst &EI);
585 void visitVAArgInst(VAArgInst &VAA) { visitInstruction(VAA); }
586 void visitCallInst(CallInst &CI);
587 void visitInvokeInst(InvokeInst &II);
588 void visitGetElementPtrInst(GetElementPtrInst &GEP);
589 void visitLoadInst(LoadInst &LI);
590 void visitStoreInst(StoreInst &SI);
591 void verifyDominatesUse(Instruction &I, unsigned i);
592 void visitInstruction(Instruction &I);
593 void visitTerminator(Instruction &I);
594 void visitBranchInst(BranchInst &BI);
595 void visitReturnInst(ReturnInst &RI);
596 void visitSwitchInst(SwitchInst &SI);
597 void visitIndirectBrInst(IndirectBrInst &BI);
598 void visitCallBrInst(CallBrInst &CBI);
599 void visitSelectInst(SelectInst &SI);
600 void visitUserOp1(Instruction &I);
601 void visitUserOp2(Instruction &I) { visitUserOp1(I); }
602 void visitIntrinsicCall(Intrinsic::ID ID, CallBase &Call);
603 void visitConstrainedFPIntrinsic(ConstrainedFPIntrinsic &FPI);
604 void visitVPIntrinsic(VPIntrinsic &VPI);
605 void visitDbgLabelIntrinsic(StringRef Kind, DbgLabelInst &DLI);
606 void visitAtomicCmpXchgInst(AtomicCmpXchgInst &CXI);
607 void visitAtomicRMWInst(AtomicRMWInst &RMWI);
608 void visitFenceInst(FenceInst &FI);
609 void visitAllocaInst(AllocaInst &AI);
610 void visitExtractValueInst(ExtractValueInst &EVI);
611 void visitInsertValueInst(InsertValueInst &IVI);
612 void visitEHPadPredecessors(Instruction &I);
613 void visitLandingPadInst(LandingPadInst &LPI);
614 void visitResumeInst(ResumeInst &RI);
615 void visitCatchPadInst(CatchPadInst &CPI);
616 void visitCatchReturnInst(CatchReturnInst &CatchReturn);
617 void visitCleanupPadInst(CleanupPadInst &CPI);
618 void visitFuncletPadInst(FuncletPadInst &FPI);
619 void visitCatchSwitchInst(CatchSwitchInst &CatchSwitch);
620 void visitCleanupReturnInst(CleanupReturnInst &CRI);
621
622 void verifySwiftErrorCall(CallBase &Call, const Value *SwiftErrorVal);
623 void verifySwiftErrorValue(const Value *SwiftErrorVal);
624 void verifyTailCCMustTailAttrs(const AttrBuilder &Attrs, StringRef Context);
625 void verifyMustTailCall(CallInst &CI);
626 bool verifyAttributeCount(AttributeList Attrs, unsigned Params);
627 void verifyAttributeTypes(AttributeSet Attrs, const Value *V);
628 void verifyParameterAttrs(AttributeSet Attrs, Type *Ty, const Value *V);
629 void checkUnsignedBaseTenFuncAttr(AttributeList Attrs, StringRef Attr,
630 const Value *V);
631 void verifyFunctionAttrs(FunctionType *FT, AttributeList Attrs,
632 const Value *V, bool IsIntrinsic, bool IsInlineAsm);
633 void verifyFunctionMetadata(ArrayRef<std::pair<unsigned, MDNode *>> MDs);
634 void verifyUnknownProfileMetadata(MDNode *MD);
635 void visitConstantExprsRecursively(const Constant *EntryC);
636 void visitConstantExpr(const ConstantExpr *CE);
637 void visitConstantPtrAuth(const ConstantPtrAuth *CPA);
638 void verifyInlineAsmCall(const CallBase &Call);
639 void verifyStatepoint(const CallBase &Call);
640 void verifyFrameRecoverIndices();
641 void verifySiblingFuncletUnwinds();
642
643 void verifyFragmentExpression(const DbgVariableRecord &I);
644 template <typename ValueOrMetadata>
645 void verifyFragmentExpression(const DIVariable &V,
647 ValueOrMetadata *Desc);
648 void verifyFnArgs(const DbgVariableRecord &DVR);
649 void verifyNotEntryValue(const DbgVariableRecord &I);
650
651 /// Module-level debug info verification...
652 void verifyCompileUnits();
653
654 /// Module-level verification that all @llvm.experimental.deoptimize
655 /// declarations share the same calling convention.
656 void verifyDeoptimizeCallingConvs();
657
658 void verifyAttachedCallBundle(const CallBase &Call,
659 const OperandBundleUse &BU);
660
661 /// Verify the llvm.experimental.noalias.scope.decl declarations
662 void verifyNoAliasScopeDecl();
663};
664
665} // end anonymous namespace
666
667/// We know that cond should be true, if not print an error message.
668#define Check(C, ...) \
669 do { \
670 if (!(C)) { \
671 CheckFailed(__VA_ARGS__); \
672 return; \
673 } \
674 } while (false)
675
676/// We know that a debug info condition should be true, if not print
677/// an error message.
678#define CheckDI(C, ...) \
679 do { \
680 if (!(C)) { \
681 DebugInfoCheckFailed(__VA_ARGS__); \
682 return; \
683 } \
684 } while (false)
685
686void Verifier::visitDbgRecords(Instruction &I) {
687 if (!I.DebugMarker)
688 return;
689 CheckDI(I.DebugMarker->MarkedInstr == &I,
690 "Instruction has invalid DebugMarker", &I);
691 CheckDI(!isa<PHINode>(&I) || !I.hasDbgRecords(),
692 "PHI Node must not have any attached DbgRecords", &I);
693 for (DbgRecord &DR : I.getDbgRecordRange()) {
694 CheckDI(DR.getMarker() == I.DebugMarker,
695 "DbgRecord had invalid DebugMarker", &I, &DR);
696 if (auto *Loc =
698 visitMDNode(*Loc, AreDebugLocsAllowed::Yes);
699 if (auto *DVR = dyn_cast<DbgVariableRecord>(&DR)) {
700 visit(*DVR);
701 // These have to appear after `visit` for consistency with existing
702 // intrinsic behaviour.
703 verifyFragmentExpression(*DVR);
704 verifyNotEntryValue(*DVR);
705 } else if (auto *DLR = dyn_cast<DbgLabelRecord>(&DR)) {
706 visit(*DLR);
707 }
708 }
709}
710
711void Verifier::visit(Instruction &I) {
712 visitDbgRecords(I);
713 for (unsigned i = 0, e = I.getNumOperands(); i != e; ++i)
714 Check(I.getOperand(i) != nullptr, "Operand is null", &I);
716}
717
718// Helper to iterate over indirect users. By returning false, the callback can ask to stop traversing further.
719static void forEachUser(const Value *User,
721 llvm::function_ref<bool(const Value *)> Callback) {
722 if (!Visited.insert(User).second)
723 return;
724
726 while (!WorkList.empty()) {
727 const Value *Cur = WorkList.pop_back_val();
728 if (!Visited.insert(Cur).second)
729 continue;
730 if (Callback(Cur))
731 append_range(WorkList, Cur->materialized_users());
732 }
733}
734
735void Verifier::visitGlobalValue(const GlobalValue &GV) {
737 "Global is external, but doesn't have external or weak linkage!", &GV);
738
739 if (const GlobalObject *GO = dyn_cast<GlobalObject>(&GV)) {
740 if (const MDNode *Associated =
741 GO->getMetadata(LLVMContext::MD_associated)) {
742 Check(Associated->getNumOperands() == 1,
743 "associated metadata must have one operand", &GV, Associated);
744 const Metadata *Op = Associated->getOperand(0).get();
745 Check(Op, "associated metadata must have a global value", GO, Associated);
746
747 const auto *VM = dyn_cast_or_null<ValueAsMetadata>(Op);
748 Check(VM, "associated metadata must be ValueAsMetadata", GO, Associated);
749 if (VM) {
750 Check(isa<PointerType>(VM->getValue()->getType()),
751 "associated value must be pointer typed", GV, Associated);
752
753 const Value *Stripped = VM->getValue()->stripPointerCastsAndAliases();
754 Check(isa<GlobalObject>(Stripped) || isa<Constant>(Stripped),
755 "associated metadata must point to a GlobalObject", GO, Stripped);
756 Check(Stripped != GO,
757 "global values should not associate to themselves", GO,
758 Associated);
759 }
760 }
761
762 // FIXME: Why is getMetadata on GlobalValue protected?
763 if (const MDNode *AbsoluteSymbol =
764 GO->getMetadata(LLVMContext::MD_absolute_symbol)) {
765 verifyRangeLikeMetadata(*GO, AbsoluteSymbol,
766 DL.getIntPtrType(GO->getType()),
767 RangeLikeMetadataKind::AbsoluteSymbol);
768 }
769 }
770
772 "Only global variables can have appending linkage!", &GV);
773
774 if (GV.hasAppendingLinkage()) {
775 const GlobalVariable *GVar = dyn_cast<GlobalVariable>(&GV);
776 Check(GVar && GVar->getValueType()->isArrayTy(),
777 "Only global arrays can have appending linkage!", GVar);
778 }
779
780 if (GV.isDeclarationForLinker())
781 Check(!GV.hasComdat(), "Declaration may not be in a Comdat!", &GV);
782
783 if (GV.hasDLLExportStorageClass()) {
785 "dllexport GlobalValue must have default or protected visibility",
786 &GV);
787 }
788 if (GV.hasDLLImportStorageClass()) {
790 "dllimport GlobalValue must have default visibility", &GV);
791 Check(!GV.isDSOLocal(), "GlobalValue with DLLImport Storage is dso_local!",
792 &GV);
793
794 Check((GV.isDeclaration() &&
797 "Global is marked as dllimport, but not external", &GV);
798 }
799
800 if (GV.isImplicitDSOLocal())
801 Check(GV.isDSOLocal(),
802 "GlobalValue with local linkage or non-default "
803 "visibility must be dso_local!",
804 &GV);
805
806 forEachUser(&GV, GlobalValueVisited, [&](const Value *V) -> bool {
807 if (const Instruction *I = dyn_cast<Instruction>(V)) {
808 if (!I->getParent() || !I->getParent()->getParent())
809 CheckFailed("Global is referenced by parentless instruction!", &GV, &M,
810 I);
811 else if (I->getParent()->getParent()->getParent() != &M)
812 CheckFailed("Global is referenced in a different module!", &GV, &M, I,
813 I->getParent()->getParent(),
814 I->getParent()->getParent()->getParent());
815 return false;
816 } else if (const Function *F = dyn_cast<Function>(V)) {
817 if (F->getParent() != &M)
818 CheckFailed("Global is used by function in a different module", &GV, &M,
819 F, F->getParent());
820 return false;
821 }
822 return true;
823 });
824}
825
826void Verifier::visitGlobalVariable(const GlobalVariable &GV) {
827 Type *GVType = GV.getValueType();
828
829 if (MaybeAlign A = GV.getAlign()) {
830 Check(A->value() <= Value::MaximumAlignment,
831 "huge alignment values are unsupported", &GV);
832 }
833
834 if (GV.hasInitializer()) {
835 Check(GV.getInitializer()->getType() == GVType,
836 "Global variable initializer type does not match global "
837 "variable type!",
838 &GV);
840 "Global variable initializer must be sized", &GV);
841 visitConstantExprsRecursively(GV.getInitializer());
842 // If the global has common linkage, it must have a zero initializer and
843 // cannot be constant.
844 if (GV.hasCommonLinkage()) {
846 "'common' global must have a zero initializer!", &GV);
847 Check(!GV.isConstant(), "'common' global may not be marked constant!",
848 &GV);
849 Check(!GV.hasComdat(), "'common' global may not be in a Comdat!", &GV);
850 }
851 }
852
853 if (GV.hasName() && (GV.getName() == "llvm.global_ctors" ||
854 GV.getName() == "llvm.global_dtors")) {
856 "invalid linkage for intrinsic global variable", &GV);
858 "invalid uses of intrinsic global variable", &GV);
859
860 // Don't worry about emitting an error for it not being an array,
861 // visitGlobalValue will complain on appending non-array.
862 if (ArrayType *ATy = dyn_cast<ArrayType>(GVType)) {
863 StructType *STy = dyn_cast<StructType>(ATy->getElementType());
864 PointerType *FuncPtrTy =
865 PointerType::get(Context, DL.getProgramAddressSpace());
866 Check(STy && (STy->getNumElements() == 2 || STy->getNumElements() == 3) &&
867 STy->getTypeAtIndex(0u)->isIntegerTy(32) &&
868 STy->getTypeAtIndex(1) == FuncPtrTy,
869 "wrong type for intrinsic global variable", &GV);
870 Check(STy->getNumElements() == 3,
871 "the third field of the element type is mandatory, "
872 "specify ptr null to migrate from the obsoleted 2-field form");
873 Type *ETy = STy->getTypeAtIndex(2);
874 Check(ETy->isPointerTy(), "wrong type for intrinsic global variable",
875 &GV);
876 }
877 }
878
879 if (GV.hasName() && (GV.getName() == "llvm.used" ||
880 GV.getName() == "llvm.compiler.used")) {
882 "invalid linkage for intrinsic global variable", &GV);
884 "invalid uses of intrinsic global variable", &GV);
885
886 if (ArrayType *ATy = dyn_cast<ArrayType>(GVType)) {
887 PointerType *PTy = dyn_cast<PointerType>(ATy->getElementType());
888 Check(PTy, "wrong type for intrinsic global variable", &GV);
889 if (GV.hasInitializer()) {
890 const Constant *Init = GV.getInitializer();
891 const ConstantArray *InitArray = dyn_cast<ConstantArray>(Init);
892 Check(InitArray, "wrong initializer for intrinsic global variable",
893 Init);
894 for (Value *Op : InitArray->operands()) {
895 Value *V = Op->stripPointerCasts();
898 Twine("invalid ") + GV.getName() + " member", V);
899 Check(V->hasName(),
900 Twine("members of ") + GV.getName() + " must be named", V);
901 }
902 }
903 }
904 }
905
906 // Visit any debug info attachments.
908 GV.getMetadata(LLVMContext::MD_dbg, MDs);
909 for (auto *MD : MDs) {
910 if (auto *GVE = dyn_cast<DIGlobalVariableExpression>(MD))
911 visitDIGlobalVariableExpression(*GVE);
912 else
913 CheckDI(false, "!dbg attachment of global variable must be a "
914 "DIGlobalVariableExpression");
915 }
916
917 // Scalable vectors cannot be global variables, since we don't know
918 // the runtime size.
919 Check(!GVType->isScalableTy(), "Globals cannot contain scalable types", &GV);
920
921 // Check if it is or contains a target extension type that disallows being
922 // used as a global.
924 "Global @" + GV.getName() + " has illegal target extension type",
925 GVType);
926
927 if (!GV.hasInitializer()) {
928 visitGlobalValue(GV);
929 return;
930 }
931
932 // Walk any aggregate initializers looking for bitcasts between address spaces
933 visitConstantExprsRecursively(GV.getInitializer());
934
935 visitGlobalValue(GV);
936}
937
938void Verifier::visitAliaseeSubExpr(const GlobalAlias &GA, const Constant &C) {
939 SmallPtrSet<const GlobalAlias*, 4> Visited;
940 Visited.insert(&GA);
941 visitAliaseeSubExpr(Visited, GA, C);
942}
943
944void Verifier::visitAliaseeSubExpr(SmallPtrSetImpl<const GlobalAlias*> &Visited,
945 const GlobalAlias &GA, const Constant &C) {
948 cast<GlobalValue>(C).hasAvailableExternallyLinkage(),
949 "available_externally alias must point to available_externally "
950 "global value",
951 &GA);
952 }
953 if (const auto *GV = dyn_cast<GlobalValue>(&C)) {
955 Check(!GV->isDeclarationForLinker(), "Alias must point to a definition",
956 &GA);
957 }
958
959 if (const auto *GA2 = dyn_cast<GlobalAlias>(GV)) {
960 Check(Visited.insert(GA2).second, "Aliases cannot form a cycle", &GA);
961
962 Check(!GA2->isInterposable(),
963 "Alias cannot point to an interposable alias", &GA);
964 } else {
965 // Only continue verifying subexpressions of GlobalAliases.
966 // Do not recurse into global initializers.
967 return;
968 }
969 }
970
971 if (const auto *CE = dyn_cast<ConstantExpr>(&C))
972 visitConstantExprsRecursively(CE);
973
974 for (const Use &U : C.operands()) {
975 Value *V = &*U;
976 if (const auto *GA2 = dyn_cast<GlobalAlias>(V))
977 visitAliaseeSubExpr(Visited, GA, *GA2->getAliasee());
978 else if (const auto *C2 = dyn_cast<Constant>(V))
979 visitAliaseeSubExpr(Visited, GA, *C2);
980 }
981}
982
983void Verifier::visitGlobalAlias(const GlobalAlias &GA) {
985 "Alias should have private, internal, linkonce, weak, linkonce_odr, "
986 "weak_odr, external, or available_externally linkage!",
987 &GA);
988 const Constant *Aliasee = GA.getAliasee();
989 Check(Aliasee, "Aliasee cannot be NULL!", &GA);
990 Check(GA.getType() == Aliasee->getType(),
991 "Alias and aliasee types should match!", &GA);
992
993 Check(isa<GlobalValue>(Aliasee) || isa<ConstantExpr>(Aliasee),
994 "Aliasee should be either GlobalValue or ConstantExpr", &GA);
995
996 visitAliaseeSubExpr(GA, *Aliasee);
997
998 visitGlobalValue(GA);
999}
1000
1001void Verifier::visitGlobalIFunc(const GlobalIFunc &GI) {
1002 visitGlobalValue(GI);
1003
1005 GI.getAllMetadata(MDs);
1006 for (const auto &I : MDs) {
1007 CheckDI(I.first != LLVMContext::MD_dbg,
1008 "an ifunc may not have a !dbg attachment", &GI);
1009 Check(I.first != LLVMContext::MD_prof,
1010 "an ifunc may not have a !prof attachment", &GI);
1011 visitMDNode(*I.second, AreDebugLocsAllowed::No);
1012 }
1013
1015 "IFunc should have private, internal, linkonce, weak, linkonce_odr, "
1016 "weak_odr, or external linkage!",
1017 &GI);
1018 // Pierce through ConstantExprs and GlobalAliases and check that the resolver
1019 // is a Function definition.
1020 const Function *Resolver = GI.getResolverFunction();
1021 Check(Resolver, "IFunc must have a Function resolver", &GI);
1022 Check(!Resolver->isDeclarationForLinker(),
1023 "IFunc resolver must be a definition", &GI);
1024
1025 // Check that the immediate resolver operand (prior to any bitcasts) has the
1026 // correct type.
1027 const Type *ResolverTy = GI.getResolver()->getType();
1028
1030 "IFunc resolver must return a pointer", &GI);
1031
1032 Check(ResolverTy == PointerType::get(Context, GI.getAddressSpace()),
1033 "IFunc resolver has incorrect type", &GI);
1034}
1035
1036void Verifier::visitNamedMDNode(const NamedMDNode &NMD) {
1037 // There used to be various other llvm.dbg.* nodes, but we don't support
1038 // upgrading them and we want to reserve the namespace for future uses.
1039 if (NMD.getName().starts_with("llvm.dbg."))
1040 CheckDI(NMD.getName() == "llvm.dbg.cu",
1041 "unrecognized named metadata node in the llvm.dbg namespace", &NMD);
1042 for (const MDNode *MD : NMD.operands()) {
1043 if (NMD.getName() == "llvm.dbg.cu")
1044 CheckDI(MD && isa<DICompileUnit>(MD), "invalid compile unit", &NMD, MD);
1045
1046 if (!MD)
1047 continue;
1048
1049 visitMDNode(*MD, AreDebugLocsAllowed::Yes);
1050 }
1051}
1052
1053void Verifier::visitMDNode(const MDNode &MD, AreDebugLocsAllowed AllowLocs) {
1054 // Only visit each node once. Metadata can be mutually recursive, so this
1055 // avoids infinite recursion here, as well as being an optimization.
1056 if (!MDNodes.insert(&MD).second)
1057 return;
1058
1059 Check(&MD.getContext() == &Context,
1060 "MDNode context does not match Module context!", &MD);
1061
1062 switch (MD.getMetadataID()) {
1063 default:
1064 llvm_unreachable("Invalid MDNode subclass");
1065 case Metadata::MDTupleKind:
1066 break;
1067#define HANDLE_SPECIALIZED_MDNODE_LEAF(CLASS) \
1068 case Metadata::CLASS##Kind: \
1069 visit##CLASS(cast<CLASS>(MD)); \
1070 break;
1071#include "llvm/IR/Metadata.def"
1072 }
1073
1074 for (const Metadata *Op : MD.operands()) {
1075 if (!Op)
1076 continue;
1077 Check(!isa<LocalAsMetadata>(Op), "Invalid operand for global metadata!",
1078 &MD, Op);
1079 CheckDI(!isa<DILocation>(Op) || AllowLocs == AreDebugLocsAllowed::Yes,
1080 "DILocation not allowed within this metadata node", &MD, Op);
1081 if (auto *N = dyn_cast<MDNode>(Op)) {
1082 visitMDNode(*N, AllowLocs);
1083 continue;
1084 }
1085 if (auto *V = dyn_cast<ValueAsMetadata>(Op)) {
1086 visitValueAsMetadata(*V, nullptr);
1087 continue;
1088 }
1089 }
1090
1091 // Check llvm.loop.estimated_trip_count.
1092 if (MD.getNumOperands() > 0 &&
1094 Check(MD.getNumOperands() == 2, "Expected two operands", &MD);
1096 Check(Count && Count->getType()->isIntegerTy() &&
1097 cast<IntegerType>(Count->getType())->getBitWidth() <= 32,
1098 "Expected second operand to be an integer constant of type i32 or "
1099 "smaller",
1100 &MD);
1101 }
1102
1103 // Check these last, so we diagnose problems in operands first.
1104 Check(!MD.isTemporary(), "Expected no forward declarations!", &MD);
1105 Check(MD.isResolved(), "All nodes should be resolved!", &MD);
1106}
1107
1108void Verifier::visitValueAsMetadata(const ValueAsMetadata &MD, Function *F) {
1109 Check(MD.getValue(), "Expected valid value", &MD);
1110 Check(!MD.getValue()->getType()->isMetadataTy(),
1111 "Unexpected metadata round-trip through values", &MD, MD.getValue());
1112
1113 auto *L = dyn_cast<LocalAsMetadata>(&MD);
1114 if (!L)
1115 return;
1116
1117 Check(F, "function-local metadata used outside a function", L);
1118
1119 // If this was an instruction, bb, or argument, verify that it is in the
1120 // function that we expect.
1121 Function *ActualF = nullptr;
1122 if (Instruction *I = dyn_cast<Instruction>(L->getValue())) {
1123 Check(I->getParent(), "function-local metadata not in basic block", L, I);
1124 ActualF = I->getParent()->getParent();
1125 } else if (BasicBlock *BB = dyn_cast<BasicBlock>(L->getValue()))
1126 ActualF = BB->getParent();
1127 else if (Argument *A = dyn_cast<Argument>(L->getValue()))
1128 ActualF = A->getParent();
1129 assert(ActualF && "Unimplemented function local metadata case!");
1130
1131 Check(ActualF == F, "function-local metadata used in wrong function", L);
1132}
1133
1134void Verifier::visitDIArgList(const DIArgList &AL, Function *F) {
1135 for (const ValueAsMetadata *VAM : AL.getArgs())
1136 visitValueAsMetadata(*VAM, F);
1137}
1138
1139void Verifier::visitMetadataAsValue(const MetadataAsValue &MDV, Function *F) {
1140 Metadata *MD = MDV.getMetadata();
1141 if (auto *N = dyn_cast<MDNode>(MD)) {
1142 visitMDNode(*N, AreDebugLocsAllowed::No);
1143 return;
1144 }
1145
1146 // Only visit each node once. Metadata can be mutually recursive, so this
1147 // avoids infinite recursion here, as well as being an optimization.
1148 if (!MDNodes.insert(MD).second)
1149 return;
1150
1151 if (auto *V = dyn_cast<ValueAsMetadata>(MD))
1152 visitValueAsMetadata(*V, F);
1153
1154 if (auto *AL = dyn_cast<DIArgList>(MD))
1155 visitDIArgList(*AL, F);
1156}
1157
1158static bool isType(const Metadata *MD) { return !MD || isa<DIType>(MD); }
1159static bool isScope(const Metadata *MD) { return !MD || isa<DIScope>(MD); }
1160static bool isDINode(const Metadata *MD) { return !MD || isa<DINode>(MD); }
1161static bool isMDTuple(const Metadata *MD) { return !MD || isa<MDTuple>(MD); }
1162
1163void Verifier::visitDILocation(const DILocation &N) {
1164 CheckDI(N.getRawScope() && isa<DILocalScope>(N.getRawScope()),
1165 "location requires a valid scope", &N, N.getRawScope());
1166 if (auto *IA = N.getRawInlinedAt())
1167 CheckDI(isa<DILocation>(IA), "inlined-at should be a location", &N, IA);
1168 if (auto *SP = dyn_cast<DISubprogram>(N.getRawScope()))
1169 CheckDI(SP->isDefinition(), "scope points into the type hierarchy", &N);
1170}
1171
1172void Verifier::visitGenericDINode(const GenericDINode &N) {
1173 CheckDI(N.getTag(), "invalid tag", &N);
1174}
1175
1176void Verifier::visitDIScope(const DIScope &N) {
1177 if (auto *F = N.getRawFile())
1178 CheckDI(isa<DIFile>(F), "invalid file", &N, F);
1179}
1180
1181void Verifier::visitDISubrangeType(const DISubrangeType &N) {
1182 CheckDI(N.getTag() == dwarf::DW_TAG_subrange_type, "invalid tag", &N);
1183 auto *BaseType = N.getRawBaseType();
1184 CheckDI(!BaseType || isType(BaseType), "BaseType must be a type");
1185 auto *LBound = N.getRawLowerBound();
1186 CheckDI(!LBound || isa<ConstantAsMetadata>(LBound) ||
1187 isa<DIVariable>(LBound) || isa<DIExpression>(LBound),
1188 "LowerBound must be signed constant or DIVariable or DIExpression",
1189 &N);
1190 auto *UBound = N.getRawUpperBound();
1191 CheckDI(!UBound || isa<ConstantAsMetadata>(UBound) ||
1192 isa<DIVariable>(UBound) || isa<DIExpression>(UBound),
1193 "UpperBound must be signed constant or DIVariable or DIExpression",
1194 &N);
1195 auto *Stride = N.getRawStride();
1196 CheckDI(!Stride || isa<ConstantAsMetadata>(Stride) ||
1197 isa<DIVariable>(Stride) || isa<DIExpression>(Stride),
1198 "Stride must be signed constant or DIVariable or DIExpression", &N);
1199 auto *Bias = N.getRawBias();
1200 CheckDI(!Bias || isa<ConstantAsMetadata>(Bias) || isa<DIVariable>(Bias) ||
1201 isa<DIExpression>(Bias),
1202 "Bias must be signed constant or DIVariable or DIExpression", &N);
1203 // Subrange types currently only support constant size.
1204 auto *Size = N.getRawSizeInBits();
1206 "SizeInBits must be a constant");
1207}
1208
1209void Verifier::visitDISubrange(const DISubrange &N) {
1210 CheckDI(N.getTag() == dwarf::DW_TAG_subrange_type, "invalid tag", &N);
1211 CheckDI(!N.getRawCountNode() || !N.getRawUpperBound(),
1212 "Subrange can have any one of count or upperBound", &N);
1213 auto *CBound = N.getRawCountNode();
1214 CheckDI(!CBound || isa<ConstantAsMetadata>(CBound) ||
1215 isa<DIVariable>(CBound) || isa<DIExpression>(CBound),
1216 "Count must be signed constant or DIVariable or DIExpression", &N);
1217 auto Count = N.getCount();
1219 cast<ConstantInt *>(Count)->getSExtValue() >= -1,
1220 "invalid subrange count", &N);
1221 auto *LBound = N.getRawLowerBound();
1222 CheckDI(!LBound || isa<ConstantAsMetadata>(LBound) ||
1223 isa<DIVariable>(LBound) || isa<DIExpression>(LBound),
1224 "LowerBound must be signed constant or DIVariable or DIExpression",
1225 &N);
1226 auto *UBound = N.getRawUpperBound();
1227 CheckDI(!UBound || isa<ConstantAsMetadata>(UBound) ||
1228 isa<DIVariable>(UBound) || isa<DIExpression>(UBound),
1229 "UpperBound must be signed constant or DIVariable or DIExpression",
1230 &N);
1231 auto *Stride = N.getRawStride();
1232 CheckDI(!Stride || isa<ConstantAsMetadata>(Stride) ||
1233 isa<DIVariable>(Stride) || isa<DIExpression>(Stride),
1234 "Stride must be signed constant or DIVariable or DIExpression", &N);
1235}
1236
1237void Verifier::visitDIGenericSubrange(const DIGenericSubrange &N) {
1238 CheckDI(N.getTag() == dwarf::DW_TAG_generic_subrange, "invalid tag", &N);
1239 CheckDI(!N.getRawCountNode() || !N.getRawUpperBound(),
1240 "GenericSubrange can have any one of count or upperBound", &N);
1241 auto *CBound = N.getRawCountNode();
1242 CheckDI(!CBound || isa<DIVariable>(CBound) || isa<DIExpression>(CBound),
1243 "Count must be signed constant or DIVariable or DIExpression", &N);
1244 auto *LBound = N.getRawLowerBound();
1245 CheckDI(LBound, "GenericSubrange must contain lowerBound", &N);
1246 CheckDI(isa<DIVariable>(LBound) || isa<DIExpression>(LBound),
1247 "LowerBound must be signed constant or DIVariable or DIExpression",
1248 &N);
1249 auto *UBound = N.getRawUpperBound();
1250 CheckDI(!UBound || isa<DIVariable>(UBound) || isa<DIExpression>(UBound),
1251 "UpperBound must be signed constant or DIVariable or DIExpression",
1252 &N);
1253 auto *Stride = N.getRawStride();
1254 CheckDI(Stride, "GenericSubrange must contain stride", &N);
1255 CheckDI(isa<DIVariable>(Stride) || isa<DIExpression>(Stride),
1256 "Stride must be signed constant or DIVariable or DIExpression", &N);
1257}
1258
1259void Verifier::visitDIEnumerator(const DIEnumerator &N) {
1260 CheckDI(N.getTag() == dwarf::DW_TAG_enumerator, "invalid tag", &N);
1261}
1262
1263void Verifier::visitDIBasicType(const DIBasicType &N) {
1264 CheckDI(N.getTag() == dwarf::DW_TAG_base_type ||
1265 N.getTag() == dwarf::DW_TAG_unspecified_type ||
1266 N.getTag() == dwarf::DW_TAG_string_type,
1267 "invalid tag", &N);
1268 // Basic types currently only support constant size.
1269 auto *Size = N.getRawSizeInBits();
1271 "SizeInBits must be a constant");
1272}
1273
1274void Verifier::visitDIFixedPointType(const DIFixedPointType &N) {
1275 visitDIBasicType(N);
1276
1277 CheckDI(N.getTag() == dwarf::DW_TAG_base_type, "invalid tag", &N);
1278 CheckDI(N.getEncoding() == dwarf::DW_ATE_signed_fixed ||
1279 N.getEncoding() == dwarf::DW_ATE_unsigned_fixed,
1280 "invalid encoding", &N);
1284 "invalid kind", &N);
1286 N.getFactorRaw() == 0,
1287 "factor should be 0 for rationals", &N);
1289 (N.getNumeratorRaw() == 0 && N.getDenominatorRaw() == 0),
1290 "numerator and denominator should be 0 for non-rationals", &N);
1291}
1292
1293void Verifier::visitDIStringType(const DIStringType &N) {
1294 CheckDI(N.getTag() == dwarf::DW_TAG_string_type, "invalid tag", &N);
1295 CheckDI(!(N.isBigEndian() && N.isLittleEndian()), "has conflicting flags",
1296 &N);
1297}
1298
1299void Verifier::visitDIDerivedType(const DIDerivedType &N) {
1300 // Common scope checks.
1301 visitDIScope(N);
1302
1303 CheckDI(N.getTag() == dwarf::DW_TAG_typedef ||
1304 N.getTag() == dwarf::DW_TAG_pointer_type ||
1305 N.getTag() == dwarf::DW_TAG_ptr_to_member_type ||
1306 N.getTag() == dwarf::DW_TAG_reference_type ||
1307 N.getTag() == dwarf::DW_TAG_rvalue_reference_type ||
1308 N.getTag() == dwarf::DW_TAG_const_type ||
1309 N.getTag() == dwarf::DW_TAG_immutable_type ||
1310 N.getTag() == dwarf::DW_TAG_volatile_type ||
1311 N.getTag() == dwarf::DW_TAG_restrict_type ||
1312 N.getTag() == dwarf::DW_TAG_atomic_type ||
1313 N.getTag() == dwarf::DW_TAG_LLVM_ptrauth_type ||
1314 N.getTag() == dwarf::DW_TAG_member ||
1315 (N.getTag() == dwarf::DW_TAG_variable && N.isStaticMember()) ||
1316 N.getTag() == dwarf::DW_TAG_inheritance ||
1317 N.getTag() == dwarf::DW_TAG_friend ||
1318 N.getTag() == dwarf::DW_TAG_set_type ||
1319 N.getTag() == dwarf::DW_TAG_template_alias,
1320 "invalid tag", &N);
1321 if (N.getTag() == dwarf::DW_TAG_ptr_to_member_type) {
1322 CheckDI(isType(N.getRawExtraData()), "invalid pointer to member type", &N,
1323 N.getRawExtraData());
1324 } else if (N.getTag() == dwarf::DW_TAG_template_alias) {
1325 CheckDI(isMDTuple(N.getRawExtraData()), "invalid template parameters", &N,
1326 N.getRawExtraData());
1327 } else if (N.getTag() == dwarf::DW_TAG_inheritance ||
1328 N.getTag() == dwarf::DW_TAG_member ||
1329 N.getTag() == dwarf::DW_TAG_variable) {
1330 auto *ExtraData = N.getRawExtraData();
1331 auto IsValidExtraData = [&]() {
1332 if (ExtraData == nullptr)
1333 return true;
1334 if (isa<ConstantAsMetadata>(ExtraData) || isa<MDString>(ExtraData) ||
1335 isa<DIObjCProperty>(ExtraData))
1336 return true;
1337 if (auto *Tuple = dyn_cast<MDTuple>(ExtraData)) {
1338 if (Tuple->getNumOperands() != 1)
1339 return false;
1340 return isa_and_nonnull<ConstantAsMetadata>(Tuple->getOperand(0).get());
1341 }
1342 return false;
1343 };
1344 CheckDI(IsValidExtraData(),
1345 "extraData must be ConstantAsMetadata, MDString, DIObjCProperty, "
1346 "or MDTuple with single ConstantAsMetadata operand",
1347 &N, ExtraData);
1348 }
1349
1350 if (N.getTag() == dwarf::DW_TAG_set_type) {
1351 if (auto *T = N.getRawBaseType()) {
1355 CheckDI(
1356 (Enum && Enum->getTag() == dwarf::DW_TAG_enumeration_type) ||
1357 (Subrange && Subrange->getTag() == dwarf::DW_TAG_subrange_type) ||
1358 (Basic && (Basic->getEncoding() == dwarf::DW_ATE_unsigned ||
1359 Basic->getEncoding() == dwarf::DW_ATE_signed ||
1360 Basic->getEncoding() == dwarf::DW_ATE_unsigned_char ||
1361 Basic->getEncoding() == dwarf::DW_ATE_signed_char ||
1362 Basic->getEncoding() == dwarf::DW_ATE_boolean)),
1363 "invalid set base type", &N, T);
1364 }
1365 }
1366
1367 CheckDI(isScope(N.getRawScope()), "invalid scope", &N, N.getRawScope());
1368 CheckDI(isType(N.getRawBaseType()), "invalid base type", &N,
1369 N.getRawBaseType());
1370
1371 if (N.getDWARFAddressSpace()) {
1372 CheckDI(N.getTag() == dwarf::DW_TAG_pointer_type ||
1373 N.getTag() == dwarf::DW_TAG_reference_type ||
1374 N.getTag() == dwarf::DW_TAG_rvalue_reference_type,
1375 "DWARF address space only applies to pointer or reference types",
1376 &N);
1377 }
1378
1379 auto *Size = N.getRawSizeInBits();
1382 "SizeInBits must be a constant or DIVariable or DIExpression");
1383}
1384
1385/// Detect mutually exclusive flags.
1386static bool hasConflictingReferenceFlags(unsigned Flags) {
1387 return ((Flags & DINode::FlagLValueReference) &&
1388 (Flags & DINode::FlagRValueReference)) ||
1389 ((Flags & DINode::FlagTypePassByValue) &&
1390 (Flags & DINode::FlagTypePassByReference));
1391}
1392
1393void Verifier::visitTemplateParams(const MDNode &N, const Metadata &RawParams) {
1394 auto *Params = dyn_cast<MDTuple>(&RawParams);
1395 CheckDI(Params, "invalid template params", &N, &RawParams);
1396 for (Metadata *Op : Params->operands()) {
1397 CheckDI(Op && isa<DITemplateParameter>(Op), "invalid template parameter",
1398 &N, Params, Op);
1399 }
1400}
1401
1402void Verifier::visitDICompositeType(const DICompositeType &N) {
1403 // Common scope checks.
1404 visitDIScope(N);
1405
1406 CheckDI(N.getTag() == dwarf::DW_TAG_array_type ||
1407 N.getTag() == dwarf::DW_TAG_structure_type ||
1408 N.getTag() == dwarf::DW_TAG_union_type ||
1409 N.getTag() == dwarf::DW_TAG_enumeration_type ||
1410 N.getTag() == dwarf::DW_TAG_class_type ||
1411 N.getTag() == dwarf::DW_TAG_variant_part ||
1412 N.getTag() == dwarf::DW_TAG_variant ||
1413 N.getTag() == dwarf::DW_TAG_namelist,
1414 "invalid tag", &N);
1415
1416 CheckDI(isScope(N.getRawScope()), "invalid scope", &N, N.getRawScope());
1417 CheckDI(isType(N.getRawBaseType()), "invalid base type", &N,
1418 N.getRawBaseType());
1419
1420 CheckDI(!N.getRawElements() || isa<MDTuple>(N.getRawElements()),
1421 "invalid composite elements", &N, N.getRawElements());
1422 CheckDI(isType(N.getRawVTableHolder()), "invalid vtable holder", &N,
1423 N.getRawVTableHolder());
1425 "invalid reference flags", &N);
1426 unsigned DIBlockByRefStruct = 1 << 4;
1427 CheckDI((N.getFlags() & DIBlockByRefStruct) == 0,
1428 "DIBlockByRefStruct on DICompositeType is no longer supported", &N);
1429 CheckDI(llvm::all_of(N.getElements(), [](const DINode *N) { return N; }),
1430 "DISubprogram contains null entry in `elements` field", &N);
1431
1432 if (N.isVector()) {
1433 const DINodeArray Elements = N.getElements();
1434 CheckDI(Elements.size() == 1 &&
1435 Elements[0]->getTag() == dwarf::DW_TAG_subrange_type,
1436 "invalid vector, expected one element of type subrange", &N);
1437 }
1438
1439 if (auto *Params = N.getRawTemplateParams())
1440 visitTemplateParams(N, *Params);
1441
1442 if (auto *D = N.getRawDiscriminator()) {
1443 CheckDI(isa<DIDerivedType>(D) && N.getTag() == dwarf::DW_TAG_variant_part,
1444 "discriminator can only appear on variant part");
1445 }
1446
1447 if (N.getRawDataLocation()) {
1448 CheckDI(N.getTag() == dwarf::DW_TAG_array_type,
1449 "dataLocation can only appear in array type");
1450 }
1451
1452 if (N.getRawAssociated()) {
1453 CheckDI(N.getTag() == dwarf::DW_TAG_array_type,
1454 "associated can only appear in array type");
1455 }
1456
1457 if (N.getRawAllocated()) {
1458 CheckDI(N.getTag() == dwarf::DW_TAG_array_type,
1459 "allocated can only appear in array type");
1460 }
1461
1462 if (N.getRawRank()) {
1463 CheckDI(N.getTag() == dwarf::DW_TAG_array_type,
1464 "rank can only appear in array type");
1465 }
1466
1467 if (N.getTag() == dwarf::DW_TAG_array_type) {
1468 CheckDI(N.getRawBaseType(), "array types must have a base type", &N);
1469 }
1470
1471 auto *Size = N.getRawSizeInBits();
1474 "SizeInBits must be a constant or DIVariable or DIExpression");
1475}
1476
1477void Verifier::visitDISubroutineType(const DISubroutineType &N) {
1478 CheckDI(N.getTag() == dwarf::DW_TAG_subroutine_type, "invalid tag", &N);
1479 if (auto *Types = N.getRawTypeArray()) {
1480 CheckDI(isa<MDTuple>(Types), "invalid composite elements", &N, Types);
1481 for (Metadata *Ty : N.getTypeArray()->operands()) {
1482 CheckDI(isType(Ty), "invalid subroutine type ref", &N, Types, Ty);
1483 }
1484 }
1486 "invalid reference flags", &N);
1487}
1488
1489void Verifier::visitDIFile(const DIFile &N) {
1490 CheckDI(N.getTag() == dwarf::DW_TAG_file_type, "invalid tag", &N);
1491 std::optional<DIFile::ChecksumInfo<StringRef>> Checksum = N.getChecksum();
1492 if (Checksum) {
1493 CheckDI(Checksum->Kind <= DIFile::ChecksumKind::CSK_Last,
1494 "invalid checksum kind", &N);
1495 size_t Size;
1496 switch (Checksum->Kind) {
1497 case DIFile::CSK_MD5:
1498 Size = 32;
1499 break;
1500 case DIFile::CSK_SHA1:
1501 Size = 40;
1502 break;
1503 case DIFile::CSK_SHA256:
1504 Size = 64;
1505 break;
1506 }
1507 CheckDI(Checksum->Value.size() == Size, "invalid checksum length", &N);
1508 CheckDI(Checksum->Value.find_if_not(llvm::isHexDigit) == StringRef::npos,
1509 "invalid checksum", &N);
1510 }
1511}
1512
1513void Verifier::visitDICompileUnit(const DICompileUnit &N) {
1514 CheckDI(N.isDistinct(), "compile units must be distinct", &N);
1515 CheckDI(N.getTag() == dwarf::DW_TAG_compile_unit, "invalid tag", &N);
1516
1517 // Don't bother verifying the compilation directory or producer string
1518 // as those could be empty.
1519 CheckDI(N.getRawFile() && isa<DIFile>(N.getRawFile()), "invalid file", &N,
1520 N.getRawFile());
1521 CheckDI(!N.getFile()->getFilename().empty(), "invalid filename", &N,
1522 N.getFile());
1523
1524 CheckDI((N.getEmissionKind() <= DICompileUnit::LastEmissionKind),
1525 "invalid emission kind", &N);
1526
1527 if (auto *Array = N.getRawEnumTypes()) {
1528 CheckDI(isa<MDTuple>(Array), "invalid enum list", &N, Array);
1529 for (Metadata *Op : N.getEnumTypes()->operands()) {
1531 CheckDI(Enum && Enum->getTag() == dwarf::DW_TAG_enumeration_type,
1532 "invalid enum type", &N, N.getEnumTypes(), Op);
1533 }
1534 }
1535 if (auto *Array = N.getRawRetainedTypes()) {
1536 CheckDI(isa<MDTuple>(Array), "invalid retained type list", &N, Array);
1537 for (Metadata *Op : N.getRetainedTypes()->operands()) {
1538 CheckDI(
1539 Op && (isa<DIType>(Op) || (isa<DISubprogram>(Op) &&
1540 !cast<DISubprogram>(Op)->isDefinition())),
1541 "invalid retained type", &N, Op);
1542 }
1543 }
1544 if (auto *Array = N.getRawGlobalVariables()) {
1545 CheckDI(isa<MDTuple>(Array), "invalid global variable list", &N, Array);
1546 for (Metadata *Op : N.getGlobalVariables()->operands()) {
1548 "invalid global variable ref", &N, Op);
1549 }
1550 }
1551 if (auto *Array = N.getRawImportedEntities()) {
1552 CheckDI(isa<MDTuple>(Array), "invalid imported entity list", &N, Array);
1553 for (Metadata *Op : N.getImportedEntities()->operands()) {
1554 CheckDI(Op && isa<DIImportedEntity>(Op), "invalid imported entity ref",
1555 &N, Op);
1556 }
1557 }
1558 if (auto *Array = N.getRawMacros()) {
1559 CheckDI(isa<MDTuple>(Array), "invalid macro list", &N, Array);
1560 for (Metadata *Op : N.getMacros()->operands()) {
1561 CheckDI(Op && isa<DIMacroNode>(Op), "invalid macro ref", &N, Op);
1562 }
1563 }
1564 CUVisited.insert(&N);
1565}
1566
1567void Verifier::visitDISubprogram(const DISubprogram &N) {
1568 CheckDI(N.getTag() == dwarf::DW_TAG_subprogram, "invalid tag", &N);
1569 CheckDI(isScope(N.getRawScope()), "invalid scope", &N, N.getRawScope());
1570 if (auto *F = N.getRawFile())
1571 CheckDI(isa<DIFile>(F), "invalid file", &N, F);
1572 else
1573 CheckDI(N.getLine() == 0, "line specified with no file", &N, N.getLine());
1574 if (auto *T = N.getRawType())
1575 CheckDI(isa<DISubroutineType>(T), "invalid subroutine type", &N, T);
1576 CheckDI(isType(N.getRawContainingType()), "invalid containing type", &N,
1577 N.getRawContainingType());
1578 if (auto *Params = N.getRawTemplateParams())
1579 visitTemplateParams(N, *Params);
1580 if (auto *S = N.getRawDeclaration())
1581 CheckDI(isa<DISubprogram>(S) && !cast<DISubprogram>(S)->isDefinition(),
1582 "invalid subprogram declaration", &N, S);
1583 if (auto *RawNode = N.getRawRetainedNodes()) {
1584 auto *Node = dyn_cast<MDTuple>(RawNode);
1585 CheckDI(Node, "invalid retained nodes list", &N, RawNode);
1586 for (Metadata *Op : Node->operands()) {
1587 CheckDI(Op, "nullptr in retained nodes", &N, Node);
1588
1589 auto True = [](const Metadata *) { return true; };
1590 auto False = [](const Metadata *) { return false; };
1591 bool IsTypeCorrect =
1592 DISubprogram::visitRetainedNode<bool>(Op, True, True, True, False);
1593 CheckDI(IsTypeCorrect,
1594 "invalid retained nodes, expected DILocalVariable, DILabel or "
1595 "DIImportedEntity",
1596 &N, Node, Op);
1597
1598 auto *RetainedNode = cast<DINode>(Op);
1599 auto *RetainedNodeScope = dyn_cast_or_null<DILocalScope>(
1601 CheckDI(RetainedNodeScope,
1602 "invalid retained nodes, retained node is not local", &N, Node,
1603 RetainedNode);
1604 CheckDI(
1605 RetainedNodeScope->getSubprogram() == &N,
1606 "invalid retained nodes, retained node does not belong to subprogram",
1607 &N, Node, RetainedNode, RetainedNodeScope);
1608 }
1609 }
1611 "invalid reference flags", &N);
1612
1613 auto *Unit = N.getRawUnit();
1614 if (N.isDefinition()) {
1615 // Subprogram definitions (not part of the type hierarchy).
1616 CheckDI(N.isDistinct(), "subprogram definitions must be distinct", &N);
1617 CheckDI(Unit, "subprogram definitions must have a compile unit", &N);
1618 CheckDI(isa<DICompileUnit>(Unit), "invalid unit type", &N, Unit);
1619 // There's no good way to cross the CU boundary to insert a nested
1620 // DISubprogram definition in one CU into a type defined in another CU.
1621 auto *CT = dyn_cast_or_null<DICompositeType>(N.getRawScope());
1622 if (CT && CT->getRawIdentifier() &&
1623 M.getContext().isODRUniquingDebugTypes())
1624 CheckDI(N.getDeclaration(),
1625 "definition subprograms cannot be nested within DICompositeType "
1626 "when enabling ODR",
1627 &N);
1628 } else {
1629 // Subprogram declarations (part of the type hierarchy).
1630 CheckDI(!Unit, "subprogram declarations must not have a compile unit", &N);
1631 CheckDI(!N.getRawDeclaration(),
1632 "subprogram declaration must not have a declaration field");
1633 }
1634
1635 if (auto *RawThrownTypes = N.getRawThrownTypes()) {
1636 auto *ThrownTypes = dyn_cast<MDTuple>(RawThrownTypes);
1637 CheckDI(ThrownTypes, "invalid thrown types list", &N, RawThrownTypes);
1638 for (Metadata *Op : ThrownTypes->operands())
1639 CheckDI(Op && isa<DIType>(Op), "invalid thrown type", &N, ThrownTypes,
1640 Op);
1641 }
1642
1643 if (N.areAllCallsDescribed())
1644 CheckDI(N.isDefinition(),
1645 "DIFlagAllCallsDescribed must be attached to a definition");
1646}
1647
1648void Verifier::visitDILexicalBlockBase(const DILexicalBlockBase &N) {
1649 CheckDI(N.getTag() == dwarf::DW_TAG_lexical_block, "invalid tag", &N);
1650 CheckDI(N.getRawScope() && isa<DILocalScope>(N.getRawScope()),
1651 "invalid local scope", &N, N.getRawScope());
1652 if (auto *SP = dyn_cast<DISubprogram>(N.getRawScope()))
1653 CheckDI(SP->isDefinition(), "scope points into the type hierarchy", &N);
1654}
1655
1656void Verifier::visitDILexicalBlock(const DILexicalBlock &N) {
1657 visitDILexicalBlockBase(N);
1658
1659 CheckDI(N.getLine() || !N.getColumn(),
1660 "cannot have column info without line info", &N);
1661}
1662
1663void Verifier::visitDILexicalBlockFile(const DILexicalBlockFile &N) {
1664 visitDILexicalBlockBase(N);
1665}
1666
1667void Verifier::visitDICommonBlock(const DICommonBlock &N) {
1668 CheckDI(N.getTag() == dwarf::DW_TAG_common_block, "invalid tag", &N);
1669 if (auto *S = N.getRawScope())
1670 CheckDI(isa<DIScope>(S), "invalid scope ref", &N, S);
1671 if (auto *S = N.getRawDecl())
1672 CheckDI(isa<DIGlobalVariable>(S), "invalid declaration", &N, S);
1673}
1674
1675void Verifier::visitDINamespace(const DINamespace &N) {
1676 CheckDI(N.getTag() == dwarf::DW_TAG_namespace, "invalid tag", &N);
1677 if (auto *S = N.getRawScope())
1678 CheckDI(isa<DIScope>(S), "invalid scope ref", &N, S);
1679}
1680
1681void Verifier::visitDIMacro(const DIMacro &N) {
1682 CheckDI(N.getMacinfoType() == dwarf::DW_MACINFO_define ||
1683 N.getMacinfoType() == dwarf::DW_MACINFO_undef,
1684 "invalid macinfo type", &N);
1685 CheckDI(!N.getName().empty(), "anonymous macro", &N);
1686 if (!N.getValue().empty()) {
1687 assert(N.getValue().data()[0] != ' ' && "Macro value has a space prefix");
1688 }
1689}
1690
1691void Verifier::visitDIMacroFile(const DIMacroFile &N) {
1692 CheckDI(N.getMacinfoType() == dwarf::DW_MACINFO_start_file,
1693 "invalid macinfo type", &N);
1694 if (auto *F = N.getRawFile())
1695 CheckDI(isa<DIFile>(F), "invalid file", &N, F);
1696
1697 if (auto *Array = N.getRawElements()) {
1698 CheckDI(isa<MDTuple>(Array), "invalid macro list", &N, Array);
1699 for (Metadata *Op : N.getElements()->operands()) {
1700 CheckDI(Op && isa<DIMacroNode>(Op), "invalid macro ref", &N, Op);
1701 }
1702 }
1703}
1704
1705void Verifier::visitDIModule(const DIModule &N) {
1706 CheckDI(N.getTag() == dwarf::DW_TAG_module, "invalid tag", &N);
1707 CheckDI(!N.getName().empty(), "anonymous module", &N);
1708}
1709
1710void Verifier::visitDITemplateParameter(const DITemplateParameter &N) {
1711 CheckDI(isType(N.getRawType()), "invalid type ref", &N, N.getRawType());
1712}
1713
1714void Verifier::visitDITemplateTypeParameter(const DITemplateTypeParameter &N) {
1715 visitDITemplateParameter(N);
1716
1717 CheckDI(N.getTag() == dwarf::DW_TAG_template_type_parameter, "invalid tag",
1718 &N);
1719}
1720
1721void Verifier::visitDITemplateValueParameter(
1722 const DITemplateValueParameter &N) {
1723 visitDITemplateParameter(N);
1724
1725 CheckDI(N.getTag() == dwarf::DW_TAG_template_value_parameter ||
1726 N.getTag() == dwarf::DW_TAG_GNU_template_template_param ||
1727 N.getTag() == dwarf::DW_TAG_GNU_template_parameter_pack,
1728 "invalid tag", &N);
1729}
1730
1731void Verifier::visitDIVariable(const DIVariable &N) {
1732 if (auto *S = N.getRawScope())
1733 CheckDI(isa<DIScope>(S), "invalid scope", &N, S);
1734 if (auto *F = N.getRawFile())
1735 CheckDI(isa<DIFile>(F), "invalid file", &N, F);
1736}
1737
1738void Verifier::visitDIGlobalVariable(const DIGlobalVariable &N) {
1739 // Checks common to all variables.
1740 visitDIVariable(N);
1741
1742 CheckDI(N.getTag() == dwarf::DW_TAG_variable, "invalid tag", &N);
1743 CheckDI(isType(N.getRawType()), "invalid type ref", &N, N.getRawType());
1744 // Check only if the global variable is not an extern
1745 if (N.isDefinition())
1746 CheckDI(N.getType(), "missing global variable type", &N);
1747 if (auto *Member = N.getRawStaticDataMemberDeclaration()) {
1749 "invalid static data member declaration", &N, Member);
1750 }
1751}
1752
1753void Verifier::visitDILocalVariable(const DILocalVariable &N) {
1754 // Checks common to all variables.
1755 visitDIVariable(N);
1756
1757 CheckDI(isType(N.getRawType()), "invalid type ref", &N, N.getRawType());
1758 CheckDI(N.getTag() == dwarf::DW_TAG_variable, "invalid tag", &N);
1759 CheckDI(N.getRawScope() && isa<DILocalScope>(N.getRawScope()),
1760 "local variable requires a valid scope", &N, N.getRawScope());
1761 if (auto Ty = N.getType())
1762 CheckDI(!isa<DISubroutineType>(Ty), "invalid type", &N, N.getType());
1763}
1764
1765void Verifier::visitDIAssignID(const DIAssignID &N) {
1766 CheckDI(!N.getNumOperands(), "DIAssignID has no arguments", &N);
1767 CheckDI(N.isDistinct(), "DIAssignID must be distinct", &N);
1768}
1769
1770void Verifier::visitDILabel(const DILabel &N) {
1771 if (auto *S = N.getRawScope())
1772 CheckDI(isa<DIScope>(S), "invalid scope", &N, S);
1773 if (auto *F = N.getRawFile())
1774 CheckDI(isa<DIFile>(F), "invalid file", &N, F);
1775
1776 CheckDI(N.getTag() == dwarf::DW_TAG_label, "invalid tag", &N);
1777 CheckDI(N.getRawScope() && isa<DILocalScope>(N.getRawScope()),
1778 "label requires a valid scope", &N, N.getRawScope());
1779}
1780
1781void Verifier::visitDIExpression(const DIExpression &N) {
1782 CheckDI(N.isValid(), "invalid expression", &N);
1783}
1784
1785void Verifier::visitDIGlobalVariableExpression(
1786 const DIGlobalVariableExpression &GVE) {
1787 CheckDI(GVE.getVariable(), "missing variable");
1788 if (auto *Var = GVE.getVariable())
1789 visitDIGlobalVariable(*Var);
1790 if (auto *Expr = GVE.getExpression()) {
1791 visitDIExpression(*Expr);
1792 if (auto Fragment = Expr->getFragmentInfo())
1793 verifyFragmentExpression(*GVE.getVariable(), *Fragment, &GVE);
1794 }
1795}
1796
1797void Verifier::visitDIObjCProperty(const DIObjCProperty &N) {
1798 CheckDI(N.getTag() == dwarf::DW_TAG_APPLE_property, "invalid tag", &N);
1799 if (auto *T = N.getRawType())
1800 CheckDI(isType(T), "invalid type ref", &N, T);
1801 if (auto *F = N.getRawFile())
1802 CheckDI(isa<DIFile>(F), "invalid file", &N, F);
1803}
1804
1805void Verifier::visitDIImportedEntity(const DIImportedEntity &N) {
1806 CheckDI(N.getTag() == dwarf::DW_TAG_imported_module ||
1807 N.getTag() == dwarf::DW_TAG_imported_declaration,
1808 "invalid tag", &N);
1809 if (auto *S = N.getRawScope())
1810 CheckDI(isa<DIScope>(S), "invalid scope for imported entity", &N, S);
1811 CheckDI(isDINode(N.getRawEntity()), "invalid imported entity", &N,
1812 N.getRawEntity());
1813}
1814
1815void Verifier::visitComdat(const Comdat &C) {
1816 // In COFF the Module is invalid if the GlobalValue has private linkage.
1817 // Entities with private linkage don't have entries in the symbol table.
1818 if (TT.isOSBinFormatCOFF())
1819 if (const GlobalValue *GV = M.getNamedValue(C.getName()))
1820 Check(!GV->hasPrivateLinkage(), "comdat global value has private linkage",
1821 GV);
1822}
1823
1824void Verifier::visitModuleIdents() {
1825 const NamedMDNode *Idents = M.getNamedMetadata("llvm.ident");
1826 if (!Idents)
1827 return;
1828
1829 // llvm.ident takes a list of metadata entry. Each entry has only one string.
1830 // Scan each llvm.ident entry and make sure that this requirement is met.
1831 for (const MDNode *N : Idents->operands()) {
1832 Check(N->getNumOperands() == 1,
1833 "incorrect number of operands in llvm.ident metadata", N);
1834 Check(dyn_cast_or_null<MDString>(N->getOperand(0)),
1835 ("invalid value for llvm.ident metadata entry operand"
1836 "(the operand should be a string)"),
1837 N->getOperand(0));
1838 }
1839}
1840
1841void Verifier::visitModuleCommandLines() {
1842 const NamedMDNode *CommandLines = M.getNamedMetadata("llvm.commandline");
1843 if (!CommandLines)
1844 return;
1845
1846 // llvm.commandline takes a list of metadata entry. Each entry has only one
1847 // string. Scan each llvm.commandline entry and make sure that this
1848 // requirement is met.
1849 for (const MDNode *N : CommandLines->operands()) {
1850 Check(N->getNumOperands() == 1,
1851 "incorrect number of operands in llvm.commandline metadata", N);
1852 Check(dyn_cast_or_null<MDString>(N->getOperand(0)),
1853 ("invalid value for llvm.commandline metadata entry operand"
1854 "(the operand should be a string)"),
1855 N->getOperand(0));
1856 }
1857}
1858
1859void Verifier::visitModuleErrnoTBAA() {
1860 const NamedMDNode *ErrnoTBAA = M.getNamedMetadata("llvm.errno.tbaa");
1861 if (!ErrnoTBAA)
1862 return;
1863
1864 Check(ErrnoTBAA->getNumOperands() >= 1,
1865 "llvm.errno.tbaa must have at least one operand", ErrnoTBAA);
1866
1867 for (const MDNode *N : ErrnoTBAA->operands())
1868 TBAAVerifyHelper.visitTBAAMetadata(nullptr, N);
1869}
1870
1871void Verifier::visitModuleFlags() {
1872 const NamedMDNode *Flags = M.getModuleFlagsMetadata();
1873 if (!Flags) return;
1874
1875 // Scan each flag, and track the flags and requirements.
1876 DenseMap<const MDString*, const MDNode*> SeenIDs;
1877 SmallVector<const MDNode*, 16> Requirements;
1878 uint64_t PAuthABIPlatform = -1;
1879 uint64_t PAuthABIVersion = -1;
1880 for (const MDNode *MDN : Flags->operands()) {
1881 visitModuleFlag(MDN, SeenIDs, Requirements);
1882 if (MDN->getNumOperands() != 3)
1883 continue;
1884 if (const auto *FlagName = dyn_cast_or_null<MDString>(MDN->getOperand(1))) {
1885 if (FlagName->getString() == "aarch64-elf-pauthabi-platform") {
1886 if (const auto *PAP =
1888 PAuthABIPlatform = PAP->getZExtValue();
1889 } else if (FlagName->getString() == "aarch64-elf-pauthabi-version") {
1890 if (const auto *PAV =
1892 PAuthABIVersion = PAV->getZExtValue();
1893 }
1894 }
1895 }
1896
1897 if ((PAuthABIPlatform == uint64_t(-1)) != (PAuthABIVersion == uint64_t(-1)))
1898 CheckFailed("either both or no 'aarch64-elf-pauthabi-platform' and "
1899 "'aarch64-elf-pauthabi-version' module flags must be present");
1900
1901 // Validate that the requirements in the module are valid.
1902 for (const MDNode *Requirement : Requirements) {
1903 const MDString *Flag = cast<MDString>(Requirement->getOperand(0));
1904 const Metadata *ReqValue = Requirement->getOperand(1);
1905
1906 const MDNode *Op = SeenIDs.lookup(Flag);
1907 if (!Op) {
1908 CheckFailed("invalid requirement on flag, flag is not present in module",
1909 Flag);
1910 continue;
1911 }
1912
1913 if (Op->getOperand(2) != ReqValue) {
1914 CheckFailed(("invalid requirement on flag, "
1915 "flag does not have the required value"),
1916 Flag);
1917 continue;
1918 }
1919 }
1920}
1921
1922void
1923Verifier::visitModuleFlag(const MDNode *Op,
1924 DenseMap<const MDString *, const MDNode *> &SeenIDs,
1925 SmallVectorImpl<const MDNode *> &Requirements) {
1926 // Each module flag should have three arguments, the merge behavior (a
1927 // constant int), the flag ID (an MDString), and the value.
1928 Check(Op->getNumOperands() == 3,
1929 "incorrect number of operands in module flag", Op);
1930 Module::ModFlagBehavior MFB;
1931 if (!Module::isValidModFlagBehavior(Op->getOperand(0), MFB)) {
1933 "invalid behavior operand in module flag (expected constant integer)",
1934 Op->getOperand(0));
1935 Check(false,
1936 "invalid behavior operand in module flag (unexpected constant)",
1937 Op->getOperand(0));
1938 }
1939 MDString *ID = dyn_cast_or_null<MDString>(Op->getOperand(1));
1940 Check(ID, "invalid ID operand in module flag (expected metadata string)",
1941 Op->getOperand(1));
1942
1943 // Check the values for behaviors with additional requirements.
1944 switch (MFB) {
1945 case Module::Error:
1946 case Module::Warning:
1947 case Module::Override:
1948 // These behavior types accept any value.
1949 break;
1950
1951 case Module::Min: {
1952 auto *V = mdconst::dyn_extract_or_null<ConstantInt>(Op->getOperand(2));
1953 Check(V && V->getValue().isNonNegative(),
1954 "invalid value for 'min' module flag (expected constant non-negative "
1955 "integer)",
1956 Op->getOperand(2));
1957 break;
1958 }
1959
1960 case Module::Max: {
1962 "invalid value for 'max' module flag (expected constant integer)",
1963 Op->getOperand(2));
1964 break;
1965 }
1966
1967 case Module::Require: {
1968 // The value should itself be an MDNode with two operands, a flag ID (an
1969 // MDString), and a value.
1970 MDNode *Value = dyn_cast<MDNode>(Op->getOperand(2));
1971 Check(Value && Value->getNumOperands() == 2,
1972 "invalid value for 'require' module flag (expected metadata pair)",
1973 Op->getOperand(2));
1974 Check(isa<MDString>(Value->getOperand(0)),
1975 ("invalid value for 'require' module flag "
1976 "(first value operand should be a string)"),
1977 Value->getOperand(0));
1978
1979 // Append it to the list of requirements, to check once all module flags are
1980 // scanned.
1981 Requirements.push_back(Value);
1982 break;
1983 }
1984
1985 case Module::Append:
1986 case Module::AppendUnique: {
1987 // These behavior types require the operand be an MDNode.
1988 Check(isa<MDNode>(Op->getOperand(2)),
1989 "invalid value for 'append'-type module flag "
1990 "(expected a metadata node)",
1991 Op->getOperand(2));
1992 break;
1993 }
1994 }
1995
1996 // Unless this is a "requires" flag, check the ID is unique.
1997 if (MFB != Module::Require) {
1998 bool Inserted = SeenIDs.insert(std::make_pair(ID, Op)).second;
1999 Check(Inserted,
2000 "module flag identifiers must be unique (or of 'require' type)", ID);
2001 }
2002
2003 if (ID->getString() == "wchar_size") {
2004 ConstantInt *Value
2006 Check(Value, "wchar_size metadata requires constant integer argument");
2007 }
2008
2009 if (ID->getString() == "Linker Options") {
2010 // If the llvm.linker.options named metadata exists, we assume that the
2011 // bitcode reader has upgraded the module flag. Otherwise the flag might
2012 // have been created by a client directly.
2013 Check(M.getNamedMetadata("llvm.linker.options"),
2014 "'Linker Options' named metadata no longer supported");
2015 }
2016
2017 if (ID->getString() == "SemanticInterposition") {
2018 ConstantInt *Value =
2020 Check(Value,
2021 "SemanticInterposition metadata requires constant integer argument");
2022 }
2023
2024 if (ID->getString() == "CG Profile") {
2025 for (const MDOperand &MDO : cast<MDNode>(Op->getOperand(2))->operands())
2026 visitModuleFlagCGProfileEntry(MDO);
2027 }
2028}
2029
2030void Verifier::visitModuleFlagCGProfileEntry(const MDOperand &MDO) {
2031 auto CheckFunction = [&](const MDOperand &FuncMDO) {
2032 if (!FuncMDO)
2033 return;
2034 auto F = dyn_cast<ValueAsMetadata>(FuncMDO);
2035 Check(F && isa<Function>(F->getValue()->stripPointerCasts()),
2036 "expected a Function or null", FuncMDO);
2037 };
2038 auto Node = dyn_cast_or_null<MDNode>(MDO);
2039 Check(Node && Node->getNumOperands() == 3, "expected a MDNode triple", MDO);
2040 CheckFunction(Node->getOperand(0));
2041 CheckFunction(Node->getOperand(1));
2042 auto Count = dyn_cast_or_null<ConstantAsMetadata>(Node->getOperand(2));
2043 Check(Count && Count->getType()->isIntegerTy(),
2044 "expected an integer constant", Node->getOperand(2));
2045}
2046
2047void Verifier::verifyAttributeTypes(AttributeSet Attrs, const Value *V) {
2048 for (Attribute A : Attrs) {
2049
2050 if (A.isStringAttribute()) {
2051#define GET_ATTR_NAMES
2052#define ATTRIBUTE_ENUM(ENUM_NAME, DISPLAY_NAME)
2053#define ATTRIBUTE_STRBOOL(ENUM_NAME, DISPLAY_NAME) \
2054 if (A.getKindAsString() == #DISPLAY_NAME) { \
2055 auto V = A.getValueAsString(); \
2056 if (!(V.empty() || V == "true" || V == "false")) \
2057 CheckFailed("invalid value for '" #DISPLAY_NAME "' attribute: " + V + \
2058 ""); \
2059 }
2060
2061#include "llvm/IR/Attributes.inc"
2062 continue;
2063 }
2064
2065 if (A.isIntAttribute() != Attribute::isIntAttrKind(A.getKindAsEnum())) {
2066 CheckFailed("Attribute '" + A.getAsString() + "' should have an Argument",
2067 V);
2068 return;
2069 }
2070 }
2071}
2072
2073// VerifyParameterAttrs - Check the given attributes for an argument or return
2074// value of the specified type. The value V is printed in error messages.
2075void Verifier::verifyParameterAttrs(AttributeSet Attrs, Type *Ty,
2076 const Value *V) {
2077 if (!Attrs.hasAttributes())
2078 return;
2079
2080 verifyAttributeTypes(Attrs, V);
2081
2082 for (Attribute Attr : Attrs)
2083 Check(Attr.isStringAttribute() ||
2084 Attribute::canUseAsParamAttr(Attr.getKindAsEnum()),
2085 "Attribute '" + Attr.getAsString() + "' does not apply to parameters",
2086 V);
2087
2088 if (Attrs.hasAttribute(Attribute::ImmArg)) {
2089 unsigned AttrCount =
2090 Attrs.getNumAttributes() - Attrs.hasAttribute(Attribute::Range);
2091 Check(AttrCount == 1,
2092 "Attribute 'immarg' is incompatible with other attributes except the "
2093 "'range' attribute",
2094 V);
2095 }
2096
2097 // Check for mutually incompatible attributes. Only inreg is compatible with
2098 // sret.
2099 unsigned AttrCount = 0;
2100 AttrCount += Attrs.hasAttribute(Attribute::ByVal);
2101 AttrCount += Attrs.hasAttribute(Attribute::InAlloca);
2102 AttrCount += Attrs.hasAttribute(Attribute::Preallocated);
2103 AttrCount += Attrs.hasAttribute(Attribute::StructRet) ||
2104 Attrs.hasAttribute(Attribute::InReg);
2105 AttrCount += Attrs.hasAttribute(Attribute::Nest);
2106 AttrCount += Attrs.hasAttribute(Attribute::ByRef);
2107 Check(AttrCount <= 1,
2108 "Attributes 'byval', 'inalloca', 'preallocated', 'inreg', 'nest', "
2109 "'byref', and 'sret' are incompatible!",
2110 V);
2111
2112 Check(!(Attrs.hasAttribute(Attribute::InAlloca) &&
2113 Attrs.hasAttribute(Attribute::ReadOnly)),
2114 "Attributes "
2115 "'inalloca and readonly' are incompatible!",
2116 V);
2117
2118 Check(!(Attrs.hasAttribute(Attribute::StructRet) &&
2119 Attrs.hasAttribute(Attribute::Returned)),
2120 "Attributes "
2121 "'sret and returned' are incompatible!",
2122 V);
2123
2124 Check(!(Attrs.hasAttribute(Attribute::ZExt) &&
2125 Attrs.hasAttribute(Attribute::SExt)),
2126 "Attributes "
2127 "'zeroext and signext' are incompatible!",
2128 V);
2129
2130 Check(!(Attrs.hasAttribute(Attribute::ReadNone) &&
2131 Attrs.hasAttribute(Attribute::ReadOnly)),
2132 "Attributes "
2133 "'readnone and readonly' are incompatible!",
2134 V);
2135
2136 Check(!(Attrs.hasAttribute(Attribute::ReadNone) &&
2137 Attrs.hasAttribute(Attribute::WriteOnly)),
2138 "Attributes "
2139 "'readnone and writeonly' are incompatible!",
2140 V);
2141
2142 Check(!(Attrs.hasAttribute(Attribute::ReadOnly) &&
2143 Attrs.hasAttribute(Attribute::WriteOnly)),
2144 "Attributes "
2145 "'readonly and writeonly' are incompatible!",
2146 V);
2147
2148 Check(!(Attrs.hasAttribute(Attribute::NoInline) &&
2149 Attrs.hasAttribute(Attribute::AlwaysInline)),
2150 "Attributes "
2151 "'noinline and alwaysinline' are incompatible!",
2152 V);
2153
2154 Check(!(Attrs.hasAttribute(Attribute::Writable) &&
2155 Attrs.hasAttribute(Attribute::ReadNone)),
2156 "Attributes writable and readnone are incompatible!", V);
2157
2158 Check(!(Attrs.hasAttribute(Attribute::Writable) &&
2159 Attrs.hasAttribute(Attribute::ReadOnly)),
2160 "Attributes writable and readonly are incompatible!", V);
2161
2162 AttributeMask IncompatibleAttrs = AttributeFuncs::typeIncompatible(Ty, Attrs);
2163 for (Attribute Attr : Attrs) {
2164 if (!Attr.isStringAttribute() &&
2165 IncompatibleAttrs.contains(Attr.getKindAsEnum())) {
2166 CheckFailed("Attribute '" + Attr.getAsString() +
2167 "' applied to incompatible type!", V);
2168 return;
2169 }
2170 }
2171
2172 if (isa<PointerType>(Ty)) {
2173 if (Attrs.hasAttribute(Attribute::Alignment)) {
2174 Align AttrAlign = Attrs.getAlignment().valueOrOne();
2175 Check(AttrAlign.value() <= Value::MaximumAlignment,
2176 "huge alignment values are unsupported", V);
2177 }
2178 if (Attrs.hasAttribute(Attribute::ByVal)) {
2179 Type *ByValTy = Attrs.getByValType();
2180 SmallPtrSet<Type *, 4> Visited;
2181 Check(ByValTy->isSized(&Visited),
2182 "Attribute 'byval' does not support unsized types!", V);
2183 // Check if it is or contains a target extension type that disallows being
2184 // used on the stack.
2186 "'byval' argument has illegal target extension type", V);
2187 Check(DL.getTypeAllocSize(ByValTy).getKnownMinValue() < (1ULL << 32),
2188 "huge 'byval' arguments are unsupported", V);
2189 }
2190 if (Attrs.hasAttribute(Attribute::ByRef)) {
2191 SmallPtrSet<Type *, 4> Visited;
2192 Check(Attrs.getByRefType()->isSized(&Visited),
2193 "Attribute 'byref' does not support unsized types!", V);
2194 Check(DL.getTypeAllocSize(Attrs.getByRefType()).getKnownMinValue() <
2195 (1ULL << 32),
2196 "huge 'byref' arguments are unsupported", V);
2197 }
2198 if (Attrs.hasAttribute(Attribute::InAlloca)) {
2199 SmallPtrSet<Type *, 4> Visited;
2200 Check(Attrs.getInAllocaType()->isSized(&Visited),
2201 "Attribute 'inalloca' does not support unsized types!", V);
2202 Check(DL.getTypeAllocSize(Attrs.getInAllocaType()).getKnownMinValue() <
2203 (1ULL << 32),
2204 "huge 'inalloca' arguments are unsupported", V);
2205 }
2206 if (Attrs.hasAttribute(Attribute::Preallocated)) {
2207 SmallPtrSet<Type *, 4> Visited;
2208 Check(Attrs.getPreallocatedType()->isSized(&Visited),
2209 "Attribute 'preallocated' does not support unsized types!", V);
2210 Check(
2211 DL.getTypeAllocSize(Attrs.getPreallocatedType()).getKnownMinValue() <
2212 (1ULL << 32),
2213 "huge 'preallocated' arguments are unsupported", V);
2214 }
2215 }
2216
2217 if (Attrs.hasAttribute(Attribute::Initializes)) {
2218 auto Inits = Attrs.getAttribute(Attribute::Initializes).getInitializes();
2219 Check(!Inits.empty(), "Attribute 'initializes' does not support empty list",
2220 V);
2222 "Attribute 'initializes' does not support unordered ranges", V);
2223 }
2224
2225 if (Attrs.hasAttribute(Attribute::NoFPClass)) {
2226 uint64_t Val = Attrs.getAttribute(Attribute::NoFPClass).getValueAsInt();
2227 Check(Val != 0, "Attribute 'nofpclass' must have at least one test bit set",
2228 V);
2229 Check((Val & ~static_cast<unsigned>(fcAllFlags)) == 0,
2230 "Invalid value for 'nofpclass' test mask", V);
2231 }
2232 if (Attrs.hasAttribute(Attribute::Range)) {
2233 const ConstantRange &CR =
2234 Attrs.getAttribute(Attribute::Range).getValueAsConstantRange();
2236 "Range bit width must match type bit width!", V);
2237 }
2238}
2239
2240void Verifier::checkUnsignedBaseTenFuncAttr(AttributeList Attrs, StringRef Attr,
2241 const Value *V) {
2242 if (Attrs.hasFnAttr(Attr)) {
2243 StringRef S = Attrs.getFnAttr(Attr).getValueAsString();
2244 unsigned N;
2245 if (S.getAsInteger(10, N))
2246 CheckFailed("\"" + Attr + "\" takes an unsigned integer: " + S, V);
2247 }
2248}
2249
2250// Check parameter attributes against a function type.
2251// The value V is printed in error messages.
2252void Verifier::verifyFunctionAttrs(FunctionType *FT, AttributeList Attrs,
2253 const Value *V, bool IsIntrinsic,
2254 bool IsInlineAsm) {
2255 if (Attrs.isEmpty())
2256 return;
2257
2258 if (AttributeListsVisited.insert(Attrs.getRawPointer()).second) {
2259 Check(Attrs.hasParentContext(Context),
2260 "Attribute list does not match Module context!", &Attrs, V);
2261 for (const auto &AttrSet : Attrs) {
2262 Check(!AttrSet.hasAttributes() || AttrSet.hasParentContext(Context),
2263 "Attribute set does not match Module context!", &AttrSet, V);
2264 for (const auto &A : AttrSet) {
2265 Check(A.hasParentContext(Context),
2266 "Attribute does not match Module context!", &A, V);
2267 }
2268 }
2269 }
2270
2271 bool SawNest = false;
2272 bool SawReturned = false;
2273 bool SawSRet = false;
2274 bool SawSwiftSelf = false;
2275 bool SawSwiftAsync = false;
2276 bool SawSwiftError = false;
2277
2278 // Verify return value attributes.
2279 AttributeSet RetAttrs = Attrs.getRetAttrs();
2280 for (Attribute RetAttr : RetAttrs)
2281 Check(RetAttr.isStringAttribute() ||
2282 Attribute::canUseAsRetAttr(RetAttr.getKindAsEnum()),
2283 "Attribute '" + RetAttr.getAsString() +
2284 "' does not apply to function return values",
2285 V);
2286
2287 unsigned MaxParameterWidth = 0;
2288 auto GetMaxParameterWidth = [&MaxParameterWidth](Type *Ty) {
2289 if (Ty->isVectorTy()) {
2290 if (auto *VT = dyn_cast<FixedVectorType>(Ty)) {
2291 unsigned Size = VT->getPrimitiveSizeInBits().getFixedValue();
2292 if (Size > MaxParameterWidth)
2293 MaxParameterWidth = Size;
2294 }
2295 }
2296 };
2297 GetMaxParameterWidth(FT->getReturnType());
2298 verifyParameterAttrs(RetAttrs, FT->getReturnType(), V);
2299
2300 // Verify parameter attributes.
2301 for (unsigned i = 0, e = FT->getNumParams(); i != e; ++i) {
2302 Type *Ty = FT->getParamType(i);
2303 AttributeSet ArgAttrs = Attrs.getParamAttrs(i);
2304
2305 if (!IsIntrinsic) {
2306 Check(!ArgAttrs.hasAttribute(Attribute::ImmArg),
2307 "immarg attribute only applies to intrinsics", V);
2308 if (!IsInlineAsm)
2309 Check(!ArgAttrs.hasAttribute(Attribute::ElementType),
2310 "Attribute 'elementtype' can only be applied to intrinsics"
2311 " and inline asm.",
2312 V);
2313 }
2314
2315 verifyParameterAttrs(ArgAttrs, Ty, V);
2316 GetMaxParameterWidth(Ty);
2317
2318 if (ArgAttrs.hasAttribute(Attribute::Nest)) {
2319 Check(!SawNest, "More than one parameter has attribute nest!", V);
2320 SawNest = true;
2321 }
2322
2323 if (ArgAttrs.hasAttribute(Attribute::Returned)) {
2324 Check(!SawReturned, "More than one parameter has attribute returned!", V);
2325 Check(Ty->canLosslesslyBitCastTo(FT->getReturnType()),
2326 "Incompatible argument and return types for 'returned' attribute",
2327 V);
2328 SawReturned = true;
2329 }
2330
2331 if (ArgAttrs.hasAttribute(Attribute::StructRet)) {
2332 Check(!SawSRet, "Cannot have multiple 'sret' parameters!", V);
2333 Check(i == 0 || i == 1,
2334 "Attribute 'sret' is not on first or second parameter!", V);
2335 SawSRet = true;
2336 }
2337
2338 if (ArgAttrs.hasAttribute(Attribute::SwiftSelf)) {
2339 Check(!SawSwiftSelf, "Cannot have multiple 'swiftself' parameters!", V);
2340 SawSwiftSelf = true;
2341 }
2342
2343 if (ArgAttrs.hasAttribute(Attribute::SwiftAsync)) {
2344 Check(!SawSwiftAsync, "Cannot have multiple 'swiftasync' parameters!", V);
2345 SawSwiftAsync = true;
2346 }
2347
2348 if (ArgAttrs.hasAttribute(Attribute::SwiftError)) {
2349 Check(!SawSwiftError, "Cannot have multiple 'swifterror' parameters!", V);
2350 SawSwiftError = true;
2351 }
2352
2353 if (ArgAttrs.hasAttribute(Attribute::InAlloca)) {
2354 Check(i == FT->getNumParams() - 1,
2355 "inalloca isn't on the last parameter!", V);
2356 }
2357 }
2358
2359 if (!Attrs.hasFnAttrs())
2360 return;
2361
2362 verifyAttributeTypes(Attrs.getFnAttrs(), V);
2363 for (Attribute FnAttr : Attrs.getFnAttrs())
2364 Check(FnAttr.isStringAttribute() ||
2365 Attribute::canUseAsFnAttr(FnAttr.getKindAsEnum()),
2366 "Attribute '" + FnAttr.getAsString() +
2367 "' does not apply to functions!",
2368 V);
2369
2370 Check(!(Attrs.hasFnAttr(Attribute::NoInline) &&
2371 Attrs.hasFnAttr(Attribute::AlwaysInline)),
2372 "Attributes 'noinline and alwaysinline' are incompatible!", V);
2373
2374 if (Attrs.hasFnAttr(Attribute::OptimizeNone)) {
2375 Check(Attrs.hasFnAttr(Attribute::NoInline),
2376 "Attribute 'optnone' requires 'noinline'!", V);
2377
2378 Check(!Attrs.hasFnAttr(Attribute::OptimizeForSize),
2379 "Attributes 'optsize and optnone' are incompatible!", V);
2380
2381 Check(!Attrs.hasFnAttr(Attribute::MinSize),
2382 "Attributes 'minsize and optnone' are incompatible!", V);
2383
2384 Check(!Attrs.hasFnAttr(Attribute::OptimizeForDebugging),
2385 "Attributes 'optdebug and optnone' are incompatible!", V);
2386 }
2387
2388 Check(!(Attrs.hasFnAttr(Attribute::SanitizeRealtime) &&
2389 Attrs.hasFnAttr(Attribute::SanitizeRealtimeBlocking)),
2390 "Attributes "
2391 "'sanitize_realtime and sanitize_realtime_blocking' are incompatible!",
2392 V);
2393
2394 if (Attrs.hasFnAttr(Attribute::OptimizeForDebugging)) {
2395 Check(!Attrs.hasFnAttr(Attribute::OptimizeForSize),
2396 "Attributes 'optsize and optdebug' are incompatible!", V);
2397
2398 Check(!Attrs.hasFnAttr(Attribute::MinSize),
2399 "Attributes 'minsize and optdebug' are incompatible!", V);
2400 }
2401
2402 Check(!Attrs.hasAttrSomewhere(Attribute::Writable) ||
2403 isModSet(Attrs.getMemoryEffects().getModRef(IRMemLocation::ArgMem)),
2404 "Attribute writable and memory without argmem: write are incompatible!",
2405 V);
2406
2407 if (Attrs.hasFnAttr("aarch64_pstate_sm_enabled")) {
2408 Check(!Attrs.hasFnAttr("aarch64_pstate_sm_compatible"),
2409 "Attributes 'aarch64_pstate_sm_enabled and "
2410 "aarch64_pstate_sm_compatible' are incompatible!",
2411 V);
2412 }
2413
2414 Check((Attrs.hasFnAttr("aarch64_new_za") + Attrs.hasFnAttr("aarch64_in_za") +
2415 Attrs.hasFnAttr("aarch64_inout_za") +
2416 Attrs.hasFnAttr("aarch64_out_za") +
2417 Attrs.hasFnAttr("aarch64_preserves_za") +
2418 Attrs.hasFnAttr("aarch64_za_state_agnostic")) <= 1,
2419 "Attributes 'aarch64_new_za', 'aarch64_in_za', 'aarch64_out_za', "
2420 "'aarch64_inout_za', 'aarch64_preserves_za' and "
2421 "'aarch64_za_state_agnostic' are mutually exclusive",
2422 V);
2423
2424 Check((Attrs.hasFnAttr("aarch64_new_zt0") +
2425 Attrs.hasFnAttr("aarch64_in_zt0") +
2426 Attrs.hasFnAttr("aarch64_inout_zt0") +
2427 Attrs.hasFnAttr("aarch64_out_zt0") +
2428 Attrs.hasFnAttr("aarch64_preserves_zt0") +
2429 Attrs.hasFnAttr("aarch64_za_state_agnostic")) <= 1,
2430 "Attributes 'aarch64_new_zt0', 'aarch64_in_zt0', 'aarch64_out_zt0', "
2431 "'aarch64_inout_zt0', 'aarch64_preserves_zt0' and "
2432 "'aarch64_za_state_agnostic' are mutually exclusive",
2433 V);
2434
2435 if (Attrs.hasFnAttr(Attribute::JumpTable)) {
2436 const GlobalValue *GV = cast<GlobalValue>(V);
2438 "Attribute 'jumptable' requires 'unnamed_addr'", V);
2439 }
2440
2441 if (auto Args = Attrs.getFnAttrs().getAllocSizeArgs()) {
2442 auto CheckParam = [&](StringRef Name, unsigned ParamNo) {
2443 if (ParamNo >= FT->getNumParams()) {
2444 CheckFailed("'allocsize' " + Name + " argument is out of bounds", V);
2445 return false;
2446 }
2447
2448 if (!FT->getParamType(ParamNo)->isIntegerTy()) {
2449 CheckFailed("'allocsize' " + Name +
2450 " argument must refer to an integer parameter",
2451 V);
2452 return false;
2453 }
2454
2455 return true;
2456 };
2457
2458 if (!CheckParam("element size", Args->first))
2459 return;
2460
2461 if (Args->second && !CheckParam("number of elements", *Args->second))
2462 return;
2463 }
2464
2465 if (Attrs.hasFnAttr(Attribute::AllocKind)) {
2466 AllocFnKind K = Attrs.getAllocKind();
2468 K & (AllocFnKind::Alloc | AllocFnKind::Realloc | AllocFnKind::Free);
2469 if (!is_contained(
2470 {AllocFnKind::Alloc, AllocFnKind::Realloc, AllocFnKind::Free},
2471 Type))
2472 CheckFailed(
2473 "'allockind()' requires exactly one of alloc, realloc, and free");
2474 if ((Type == AllocFnKind::Free) &&
2475 ((K & (AllocFnKind::Uninitialized | AllocFnKind::Zeroed |
2476 AllocFnKind::Aligned)) != AllocFnKind::Unknown))
2477 CheckFailed("'allockind(\"free\")' doesn't allow uninitialized, zeroed, "
2478 "or aligned modifiers.");
2479 AllocFnKind ZeroedUninit = AllocFnKind::Uninitialized | AllocFnKind::Zeroed;
2480 if ((K & ZeroedUninit) == ZeroedUninit)
2481 CheckFailed("'allockind()' can't be both zeroed and uninitialized");
2482 }
2483
2484 if (Attribute A = Attrs.getFnAttr("alloc-variant-zeroed"); A.isValid()) {
2485 StringRef S = A.getValueAsString();
2486 Check(!S.empty(), "'alloc-variant-zeroed' must not be empty");
2487 Function *Variant = M.getFunction(S);
2488 if (Variant) {
2489 Attribute Family = Attrs.getFnAttr("alloc-family");
2490 Attribute VariantFamily = Variant->getFnAttribute("alloc-family");
2491 if (Family.isValid())
2492 Check(VariantFamily.isValid() &&
2493 VariantFamily.getValueAsString() == Family.getValueAsString(),
2494 "'alloc-variant-zeroed' must name a function belonging to the "
2495 "same 'alloc-family'");
2496
2497 Check(Variant->hasFnAttribute(Attribute::AllocKind) &&
2498 (Variant->getFnAttribute(Attribute::AllocKind).getAllocKind() &
2499 AllocFnKind::Zeroed) != AllocFnKind::Unknown,
2500 "'alloc-variant-zeroed' must name a function with "
2501 "'allockind(\"zeroed\")'");
2502
2503 Check(FT == Variant->getFunctionType(),
2504 "'alloc-variant-zeroed' must name a function with the same "
2505 "signature");
2506 }
2507 }
2508
2509 if (Attrs.hasFnAttr(Attribute::VScaleRange)) {
2510 unsigned VScaleMin = Attrs.getFnAttrs().getVScaleRangeMin();
2511 if (VScaleMin == 0)
2512 CheckFailed("'vscale_range' minimum must be greater than 0", V);
2513 else if (!isPowerOf2_32(VScaleMin))
2514 CheckFailed("'vscale_range' minimum must be power-of-two value", V);
2515 std::optional<unsigned> VScaleMax = Attrs.getFnAttrs().getVScaleRangeMax();
2516 if (VScaleMax && VScaleMin > VScaleMax)
2517 CheckFailed("'vscale_range' minimum cannot be greater than maximum", V);
2518 else if (VScaleMax && !isPowerOf2_32(*VScaleMax))
2519 CheckFailed("'vscale_range' maximum must be power-of-two value", V);
2520 }
2521
2522 if (Attribute FPAttr = Attrs.getFnAttr("frame-pointer"); FPAttr.isValid()) {
2523 StringRef FP = FPAttr.getValueAsString();
2524 if (FP != "all" && FP != "non-leaf" && FP != "none" && FP != "reserved" &&
2525 FP != "non-leaf-no-reserve")
2526 CheckFailed("invalid value for 'frame-pointer' attribute: " + FP, V);
2527 }
2528
2529 checkUnsignedBaseTenFuncAttr(Attrs, "patchable-function-prefix", V);
2530 checkUnsignedBaseTenFuncAttr(Attrs, "patchable-function-entry", V);
2531 if (Attrs.hasFnAttr("patchable-function-entry-section"))
2532 Check(!Attrs.getFnAttr("patchable-function-entry-section")
2533 .getValueAsString()
2534 .empty(),
2535 "\"patchable-function-entry-section\" must not be empty");
2536 checkUnsignedBaseTenFuncAttr(Attrs, "warn-stack-size", V);
2537
2538 if (auto A = Attrs.getFnAttr("sign-return-address"); A.isValid()) {
2539 StringRef S = A.getValueAsString();
2540 if (S != "none" && S != "all" && S != "non-leaf")
2541 CheckFailed("invalid value for 'sign-return-address' attribute: " + S, V);
2542 }
2543
2544 if (auto A = Attrs.getFnAttr("sign-return-address-key"); A.isValid()) {
2545 StringRef S = A.getValueAsString();
2546 if (S != "a_key" && S != "b_key")
2547 CheckFailed("invalid value for 'sign-return-address-key' attribute: " + S,
2548 V);
2549 if (auto AA = Attrs.getFnAttr("sign-return-address"); !AA.isValid()) {
2550 CheckFailed(
2551 "'sign-return-address-key' present without `sign-return-address`");
2552 }
2553 }
2554
2555 if (auto A = Attrs.getFnAttr("branch-target-enforcement"); A.isValid()) {
2556 StringRef S = A.getValueAsString();
2557 if (S != "" && S != "true" && S != "false")
2558 CheckFailed(
2559 "invalid value for 'branch-target-enforcement' attribute: " + S, V);
2560 }
2561
2562 if (auto A = Attrs.getFnAttr("branch-protection-pauth-lr"); A.isValid()) {
2563 StringRef S = A.getValueAsString();
2564 if (S != "" && S != "true" && S != "false")
2565 CheckFailed(
2566 "invalid value for 'branch-protection-pauth-lr' attribute: " + S, V);
2567 }
2568
2569 if (auto A = Attrs.getFnAttr("guarded-control-stack"); A.isValid()) {
2570 StringRef S = A.getValueAsString();
2571 if (S != "" && S != "true" && S != "false")
2572 CheckFailed("invalid value for 'guarded-control-stack' attribute: " + S,
2573 V);
2574 }
2575
2576 if (auto A = Attrs.getFnAttr("vector-function-abi-variant"); A.isValid()) {
2577 StringRef S = A.getValueAsString();
2578 const std::optional<VFInfo> Info = VFABI::tryDemangleForVFABI(S, FT);
2579 if (!Info)
2580 CheckFailed("invalid name for a VFABI variant: " + S, V);
2581 }
2582
2583 if (auto A = Attrs.getFnAttr("denormal-fp-math"); A.isValid()) {
2584 StringRef S = A.getValueAsString();
2586 CheckFailed("invalid value for 'denormal-fp-math' attribute: " + S, V);
2587 }
2588
2589 if (auto A = Attrs.getFnAttr("denormal-fp-math-f32"); A.isValid()) {
2590 StringRef S = A.getValueAsString();
2592 CheckFailed("invalid value for 'denormal-fp-math-f32' attribute: " + S,
2593 V);
2594 }
2595
2596 if (auto A = Attrs.getFnAttr("modular-format"); A.isValid()) {
2597 StringRef S = A.getValueAsString();
2599 S.split(Args, ',');
2600 Check(Args.size() >= 5,
2601 "modular-format attribute requires at least 5 arguments", V);
2602 unsigned FirstArgIdx;
2603 Check(!Args[2].getAsInteger(10, FirstArgIdx),
2604 "modular-format attribute first arg index is not an integer", V);
2605 unsigned UpperBound = FT->getNumParams() + (FT->isVarArg() ? 1 : 0);
2606 Check(FirstArgIdx > 0 && FirstArgIdx <= UpperBound,
2607 "modular-format attribute first arg index is out of bounds", V);
2608 }
2609}
2610void Verifier::verifyUnknownProfileMetadata(MDNode *MD) {
2611 Check(MD->getNumOperands() == 2,
2612 "'unknown' !prof should have a single additional operand", MD);
2613 auto *PassName = dyn_cast<MDString>(MD->getOperand(1));
2614 Check(PassName != nullptr,
2615 "'unknown' !prof should have an additional operand of type "
2616 "string");
2617 Check(!PassName->getString().empty(),
2618 "the 'unknown' !prof operand should not be an empty string");
2619}
2620
2621void Verifier::verifyFunctionMetadata(
2622 ArrayRef<std::pair<unsigned, MDNode *>> MDs) {
2623 for (const auto &Pair : MDs) {
2624 if (Pair.first == LLVMContext::MD_prof) {
2625 MDNode *MD = Pair.second;
2626 Check(MD->getNumOperands() >= 2,
2627 "!prof annotations should have no less than 2 operands", MD);
2628 // We may have functions that are synthesized by the compiler, e.g. in
2629 // WPD, that we can't currently determine the entry count.
2630 if (MD->getOperand(0).equalsStr(
2632 verifyUnknownProfileMetadata(MD);
2633 continue;
2634 }
2635
2636 // Check first operand.
2637 Check(MD->getOperand(0) != nullptr, "first operand should not be null",
2638 MD);
2640 "expected string with name of the !prof annotation", MD);
2641 MDString *MDS = cast<MDString>(MD->getOperand(0));
2642 StringRef ProfName = MDS->getString();
2645 "first operand should be 'function_entry_count'"
2646 " or 'synthetic_function_entry_count'",
2647 MD);
2648
2649 // Check second operand.
2650 Check(MD->getOperand(1) != nullptr, "second operand should not be null",
2651 MD);
2653 "expected integer argument to function_entry_count", MD);
2654 } else if (Pair.first == LLVMContext::MD_kcfi_type) {
2655 MDNode *MD = Pair.second;
2656 Check(MD->getNumOperands() == 1,
2657 "!kcfi_type must have exactly one operand", MD);
2658 Check(MD->getOperand(0) != nullptr, "!kcfi_type operand must not be null",
2659 MD);
2661 "expected a constant operand for !kcfi_type", MD);
2662 Constant *C = cast<ConstantAsMetadata>(MD->getOperand(0))->getValue();
2663 Check(isa<ConstantInt>(C) && isa<IntegerType>(C->getType()),
2664 "expected a constant integer operand for !kcfi_type", MD);
2666 "expected a 32-bit integer constant operand for !kcfi_type", MD);
2667 }
2668 }
2669}
2670
2671void Verifier::visitConstantExprsRecursively(const Constant *EntryC) {
2672 if (!ConstantExprVisited.insert(EntryC).second)
2673 return;
2674
2676 Stack.push_back(EntryC);
2677
2678 while (!Stack.empty()) {
2679 const Constant *C = Stack.pop_back_val();
2680
2681 // Check this constant expression.
2682 if (const auto *CE = dyn_cast<ConstantExpr>(C))
2683 visitConstantExpr(CE);
2684
2685 if (const auto *CPA = dyn_cast<ConstantPtrAuth>(C))
2686 visitConstantPtrAuth(CPA);
2687
2688 if (const auto *GV = dyn_cast<GlobalValue>(C)) {
2689 // Global Values get visited separately, but we do need to make sure
2690 // that the global value is in the correct module
2691 Check(GV->getParent() == &M, "Referencing global in another module!",
2692 EntryC, &M, GV, GV->getParent());
2693 continue;
2694 }
2695
2696 // Visit all sub-expressions.
2697 for (const Use &U : C->operands()) {
2698 const auto *OpC = dyn_cast<Constant>(U);
2699 if (!OpC)
2700 continue;
2701 if (!ConstantExprVisited.insert(OpC).second)
2702 continue;
2703 Stack.push_back(OpC);
2704 }
2705 }
2706}
2707
2708void Verifier::visitConstantExpr(const ConstantExpr *CE) {
2709 if (CE->getOpcode() == Instruction::BitCast)
2710 Check(CastInst::castIsValid(Instruction::BitCast, CE->getOperand(0),
2711 CE->getType()),
2712 "Invalid bitcast", CE);
2713 else if (CE->getOpcode() == Instruction::PtrToAddr)
2714 checkPtrToAddr(CE->getOperand(0)->getType(), CE->getType(), *CE);
2715}
2716
2717void Verifier::visitConstantPtrAuth(const ConstantPtrAuth *CPA) {
2718 Check(CPA->getPointer()->getType()->isPointerTy(),
2719 "signed ptrauth constant base pointer must have pointer type");
2720
2721 Check(CPA->getType() == CPA->getPointer()->getType(),
2722 "signed ptrauth constant must have same type as its base pointer");
2723
2724 Check(CPA->getKey()->getBitWidth() == 32,
2725 "signed ptrauth constant key must be i32 constant integer");
2726
2728 "signed ptrauth constant address discriminator must be a pointer");
2729
2730 Check(CPA->getDiscriminator()->getBitWidth() == 64,
2731 "signed ptrauth constant discriminator must be i64 constant integer");
2732}
2733
2734bool Verifier::verifyAttributeCount(AttributeList Attrs, unsigned Params) {
2735 // There shouldn't be more attribute sets than there are parameters plus the
2736 // function and return value.
2737 return Attrs.getNumAttrSets() <= Params + 2;
2738}
2739
2740void Verifier::verifyInlineAsmCall(const CallBase &Call) {
2741 const InlineAsm *IA = cast<InlineAsm>(Call.getCalledOperand());
2742 unsigned ArgNo = 0;
2743 unsigned LabelNo = 0;
2744 for (const InlineAsm::ConstraintInfo &CI : IA->ParseConstraints()) {
2745 if (CI.Type == InlineAsm::isLabel) {
2746 ++LabelNo;
2747 continue;
2748 }
2749
2750 // Only deal with constraints that correspond to call arguments.
2751 if (!CI.hasArg())
2752 continue;
2753
2754 if (CI.isIndirect) {
2755 const Value *Arg = Call.getArgOperand(ArgNo);
2756 Check(Arg->getType()->isPointerTy(),
2757 "Operand for indirect constraint must have pointer type", &Call);
2758
2760 "Operand for indirect constraint must have elementtype attribute",
2761 &Call);
2762 } else {
2763 Check(!Call.paramHasAttr(ArgNo, Attribute::ElementType),
2764 "Elementtype attribute can only be applied for indirect "
2765 "constraints",
2766 &Call);
2767 }
2768
2769 ArgNo++;
2770 }
2771
2772 if (auto *CallBr = dyn_cast<CallBrInst>(&Call)) {
2773 Check(LabelNo == CallBr->getNumIndirectDests(),
2774 "Number of label constraints does not match number of callbr dests",
2775 &Call);
2776 } else {
2777 Check(LabelNo == 0, "Label constraints can only be used with callbr",
2778 &Call);
2779 }
2780}
2781
2782/// Verify that statepoint intrinsic is well formed.
2783void Verifier::verifyStatepoint(const CallBase &Call) {
2784 assert(Call.getIntrinsicID() == Intrinsic::experimental_gc_statepoint);
2785
2788 "gc.statepoint must read and write all memory to preserve "
2789 "reordering restrictions required by safepoint semantics",
2790 Call);
2791
2792 const int64_t NumPatchBytes =
2793 cast<ConstantInt>(Call.getArgOperand(1))->getSExtValue();
2794 assert(isInt<32>(NumPatchBytes) && "NumPatchBytesV is an i32!");
2795 Check(NumPatchBytes >= 0,
2796 "gc.statepoint number of patchable bytes must be "
2797 "positive",
2798 Call);
2799
2800 Type *TargetElemType = Call.getParamElementType(2);
2801 Check(TargetElemType,
2802 "gc.statepoint callee argument must have elementtype attribute", Call);
2803 FunctionType *TargetFuncType = dyn_cast<FunctionType>(TargetElemType);
2804 Check(TargetFuncType,
2805 "gc.statepoint callee elementtype must be function type", Call);
2806
2807 const int NumCallArgs = cast<ConstantInt>(Call.getArgOperand(3))->getZExtValue();
2808 Check(NumCallArgs >= 0,
2809 "gc.statepoint number of arguments to underlying call "
2810 "must be positive",
2811 Call);
2812 const int NumParams = (int)TargetFuncType->getNumParams();
2813 if (TargetFuncType->isVarArg()) {
2814 Check(NumCallArgs >= NumParams,
2815 "gc.statepoint mismatch in number of vararg call args", Call);
2816
2817 // TODO: Remove this limitation
2818 Check(TargetFuncType->getReturnType()->isVoidTy(),
2819 "gc.statepoint doesn't support wrapping non-void "
2820 "vararg functions yet",
2821 Call);
2822 } else
2823 Check(NumCallArgs == NumParams,
2824 "gc.statepoint mismatch in number of call args", Call);
2825
2826 const uint64_t Flags
2827 = cast<ConstantInt>(Call.getArgOperand(4))->getZExtValue();
2828 Check((Flags & ~(uint64_t)StatepointFlags::MaskAll) == 0,
2829 "unknown flag used in gc.statepoint flags argument", Call);
2830
2831 // Verify that the types of the call parameter arguments match
2832 // the type of the wrapped callee.
2833 AttributeList Attrs = Call.getAttributes();
2834 for (int i = 0; i < NumParams; i++) {
2835 Type *ParamType = TargetFuncType->getParamType(i);
2836 Type *ArgType = Call.getArgOperand(5 + i)->getType();
2837 Check(ArgType == ParamType,
2838 "gc.statepoint call argument does not match wrapped "
2839 "function type",
2840 Call);
2841
2842 if (TargetFuncType->isVarArg()) {
2843 AttributeSet ArgAttrs = Attrs.getParamAttrs(5 + i);
2844 Check(!ArgAttrs.hasAttribute(Attribute::StructRet),
2845 "Attribute 'sret' cannot be used for vararg call arguments!", Call);
2846 }
2847 }
2848
2849 const int EndCallArgsInx = 4 + NumCallArgs;
2850
2851 const Value *NumTransitionArgsV = Call.getArgOperand(EndCallArgsInx + 1);
2852 Check(isa<ConstantInt>(NumTransitionArgsV),
2853 "gc.statepoint number of transition arguments "
2854 "must be constant integer",
2855 Call);
2856 const int NumTransitionArgs =
2857 cast<ConstantInt>(NumTransitionArgsV)->getZExtValue();
2858 Check(NumTransitionArgs == 0,
2859 "gc.statepoint w/inline transition bundle is deprecated", Call);
2860 const int EndTransitionArgsInx = EndCallArgsInx + 1 + NumTransitionArgs;
2861
2862 const Value *NumDeoptArgsV = Call.getArgOperand(EndTransitionArgsInx + 1);
2863 Check(isa<ConstantInt>(NumDeoptArgsV),
2864 "gc.statepoint number of deoptimization arguments "
2865 "must be constant integer",
2866 Call);
2867 const int NumDeoptArgs = cast<ConstantInt>(NumDeoptArgsV)->getZExtValue();
2868 Check(NumDeoptArgs == 0,
2869 "gc.statepoint w/inline deopt operands is deprecated", Call);
2870
2871 const int ExpectedNumArgs = 7 + NumCallArgs;
2872 Check(ExpectedNumArgs == (int)Call.arg_size(),
2873 "gc.statepoint too many arguments", Call);
2874
2875 // Check that the only uses of this gc.statepoint are gc.result or
2876 // gc.relocate calls which are tied to this statepoint and thus part
2877 // of the same statepoint sequence
2878 for (const User *U : Call.users()) {
2879 const CallInst *UserCall = dyn_cast<const CallInst>(U);
2880 Check(UserCall, "illegal use of statepoint token", Call, U);
2881 if (!UserCall)
2882 continue;
2883 Check(isa<GCRelocateInst>(UserCall) || isa<GCResultInst>(UserCall),
2884 "gc.result or gc.relocate are the only value uses "
2885 "of a gc.statepoint",
2886 Call, U);
2887 if (isa<GCResultInst>(UserCall)) {
2888 Check(UserCall->getArgOperand(0) == &Call,
2889 "gc.result connected to wrong gc.statepoint", Call, UserCall);
2890 } else if (isa<GCRelocateInst>(Call)) {
2891 Check(UserCall->getArgOperand(0) == &Call,
2892 "gc.relocate connected to wrong gc.statepoint", Call, UserCall);
2893 }
2894 }
2895
2896 // Note: It is legal for a single derived pointer to be listed multiple
2897 // times. It's non-optimal, but it is legal. It can also happen after
2898 // insertion if we strip a bitcast away.
2899 // Note: It is really tempting to check that each base is relocated and
2900 // that a derived pointer is never reused as a base pointer. This turns
2901 // out to be problematic since optimizations run after safepoint insertion
2902 // can recognize equality properties that the insertion logic doesn't know
2903 // about. See example statepoint.ll in the verifier subdirectory
2904}
2905
2906void Verifier::verifyFrameRecoverIndices() {
2907 for (auto &Counts : FrameEscapeInfo) {
2908 Function *F = Counts.first;
2909 unsigned EscapedObjectCount = Counts.second.first;
2910 unsigned MaxRecoveredIndex = Counts.second.second;
2911 Check(MaxRecoveredIndex <= EscapedObjectCount,
2912 "all indices passed to llvm.localrecover must be less than the "
2913 "number of arguments passed to llvm.localescape in the parent "
2914 "function",
2915 F);
2916 }
2917}
2918
2919static Instruction *getSuccPad(Instruction *Terminator) {
2920 BasicBlock *UnwindDest;
2921 if (auto *II = dyn_cast<InvokeInst>(Terminator))
2922 UnwindDest = II->getUnwindDest();
2923 else if (auto *CSI = dyn_cast<CatchSwitchInst>(Terminator))
2924 UnwindDest = CSI->getUnwindDest();
2925 else
2926 UnwindDest = cast<CleanupReturnInst>(Terminator)->getUnwindDest();
2927 return &*UnwindDest->getFirstNonPHIIt();
2928}
2929
2930void Verifier::verifySiblingFuncletUnwinds() {
2931 llvm::TimeTraceScope timeScope("Verifier verify sibling funclet unwinds");
2932 SmallPtrSet<Instruction *, 8> Visited;
2933 SmallPtrSet<Instruction *, 8> Active;
2934 for (const auto &Pair : SiblingFuncletInfo) {
2935 Instruction *PredPad = Pair.first;
2936 if (Visited.count(PredPad))
2937 continue;
2938 Active.insert(PredPad);
2939 Instruction *Terminator = Pair.second;
2940 do {
2941 Instruction *SuccPad = getSuccPad(Terminator);
2942 if (Active.count(SuccPad)) {
2943 // Found a cycle; report error
2944 Instruction *CyclePad = SuccPad;
2945 SmallVector<Instruction *, 8> CycleNodes;
2946 do {
2947 CycleNodes.push_back(CyclePad);
2948 Instruction *CycleTerminator = SiblingFuncletInfo[CyclePad];
2949 if (CycleTerminator != CyclePad)
2950 CycleNodes.push_back(CycleTerminator);
2951 CyclePad = getSuccPad(CycleTerminator);
2952 } while (CyclePad != SuccPad);
2953 Check(false, "EH pads can't handle each other's exceptions",
2954 ArrayRef<Instruction *>(CycleNodes));
2955 }
2956 // Don't re-walk a node we've already checked
2957 if (!Visited.insert(SuccPad).second)
2958 break;
2959 // Walk to this successor if it has a map entry.
2960 PredPad = SuccPad;
2961 auto TermI = SiblingFuncletInfo.find(PredPad);
2962 if (TermI == SiblingFuncletInfo.end())
2963 break;
2964 Terminator = TermI->second;
2965 Active.insert(PredPad);
2966 } while (true);
2967 // Each node only has one successor, so we've walked all the active
2968 // nodes' successors.
2969 Active.clear();
2970 }
2971}
2972
2973// visitFunction - Verify that a function is ok.
2974//
2975void Verifier::visitFunction(const Function &F) {
2976 visitGlobalValue(F);
2977
2978 // Check function arguments.
2979 FunctionType *FT = F.getFunctionType();
2980 unsigned NumArgs = F.arg_size();
2981
2982 Check(&Context == &F.getContext(),
2983 "Function context does not match Module context!", &F);
2984
2985 Check(!F.hasCommonLinkage(), "Functions may not have common linkage", &F);
2986 Check(FT->getNumParams() == NumArgs,
2987 "# formal arguments must match # of arguments for function type!", &F,
2988 FT);
2989 Check(F.getReturnType()->isFirstClassType() ||
2990 F.getReturnType()->isVoidTy() || F.getReturnType()->isStructTy(),
2991 "Functions cannot return aggregate values!", &F);
2992
2993 Check(!F.hasStructRetAttr() || F.getReturnType()->isVoidTy(),
2994 "Invalid struct return type!", &F);
2995
2996 if (MaybeAlign A = F.getAlign()) {
2997 Check(A->value() <= Value::MaximumAlignment,
2998 "huge alignment values are unsupported", &F);
2999 }
3000
3001 AttributeList Attrs = F.getAttributes();
3002
3003 Check(verifyAttributeCount(Attrs, FT->getNumParams()),
3004 "Attribute after last parameter!", &F);
3005
3006 bool IsIntrinsic = F.isIntrinsic();
3007
3008 // Check function attributes.
3009 verifyFunctionAttrs(FT, Attrs, &F, IsIntrinsic, /* IsInlineAsm */ false);
3010
3011 // On function declarations/definitions, we do not support the builtin
3012 // attribute. We do not check this in VerifyFunctionAttrs since that is
3013 // checking for Attributes that can/can not ever be on functions.
3014 Check(!Attrs.hasFnAttr(Attribute::Builtin),
3015 "Attribute 'builtin' can only be applied to a callsite.", &F);
3016
3017 Check(!Attrs.hasAttrSomewhere(Attribute::ElementType),
3018 "Attribute 'elementtype' can only be applied to a callsite.", &F);
3019
3020 Check(!Attrs.hasFnAttr("aarch64_zt0_undef"),
3021 "Attribute 'aarch64_zt0_undef' can only be applied to a callsite.");
3022
3023 if (Attrs.hasFnAttr(Attribute::Naked))
3024 for (const Argument &Arg : F.args())
3025 Check(Arg.use_empty(), "cannot use argument of naked function", &Arg);
3026
3027 // Check that this function meets the restrictions on this calling convention.
3028 // Sometimes varargs is used for perfectly forwarding thunks, so some of these
3029 // restrictions can be lifted.
3030 switch (F.getCallingConv()) {
3031 default:
3032 case CallingConv::C:
3033 break;
3034 case CallingConv::X86_INTR: {
3035 Check(F.arg_empty() || Attrs.hasParamAttr(0, Attribute::ByVal),
3036 "Calling convention parameter requires byval", &F);
3037 break;
3038 }
3039 case CallingConv::AMDGPU_KERNEL:
3040 case CallingConv::SPIR_KERNEL:
3041 case CallingConv::AMDGPU_CS_Chain:
3042 case CallingConv::AMDGPU_CS_ChainPreserve:
3043 Check(F.getReturnType()->isVoidTy(),
3044 "Calling convention requires void return type", &F);
3045 [[fallthrough]];
3046 case CallingConv::AMDGPU_VS:
3047 case CallingConv::AMDGPU_HS:
3048 case CallingConv::AMDGPU_GS:
3049 case CallingConv::AMDGPU_PS:
3050 case CallingConv::AMDGPU_CS:
3051 Check(!F.hasStructRetAttr(), "Calling convention does not allow sret", &F);
3052 if (F.getCallingConv() != CallingConv::SPIR_KERNEL) {
3053 const unsigned StackAS = DL.getAllocaAddrSpace();
3054 unsigned i = 0;
3055 for (const Argument &Arg : F.args()) {
3056 Check(!Attrs.hasParamAttr(i, Attribute::ByVal),
3057 "Calling convention disallows byval", &F);
3058 Check(!Attrs.hasParamAttr(i, Attribute::Preallocated),
3059 "Calling convention disallows preallocated", &F);
3060 Check(!Attrs.hasParamAttr(i, Attribute::InAlloca),
3061 "Calling convention disallows inalloca", &F);
3062
3063 if (Attrs.hasParamAttr(i, Attribute::ByRef)) {
3064 // FIXME: Should also disallow LDS and GDS, but we don't have the enum
3065 // value here.
3066 Check(Arg.getType()->getPointerAddressSpace() != StackAS,
3067 "Calling convention disallows stack byref", &F);
3068 }
3069
3070 ++i;
3071 }
3072 }
3073
3074 [[fallthrough]];
3075 case CallingConv::Fast:
3076 case CallingConv::Cold:
3077 case CallingConv::Intel_OCL_BI:
3078 case CallingConv::PTX_Kernel:
3079 case CallingConv::PTX_Device:
3080 Check(!F.isVarArg(),
3081 "Calling convention does not support varargs or "
3082 "perfect forwarding!",
3083 &F);
3084 break;
3085 case CallingConv::AMDGPU_Gfx_WholeWave:
3086 Check(!F.arg_empty() && F.arg_begin()->getType()->isIntegerTy(1),
3087 "Calling convention requires first argument to be i1", &F);
3088 Check(!F.arg_begin()->hasInRegAttr(),
3089 "Calling convention requires first argument to not be inreg", &F);
3090 Check(!F.isVarArg(),
3091 "Calling convention does not support varargs or "
3092 "perfect forwarding!",
3093 &F);
3094 break;
3095 }
3096
3097 // Check that the argument values match the function type for this function...
3098 unsigned i = 0;
3099 for (const Argument &Arg : F.args()) {
3100 Check(Arg.getType() == FT->getParamType(i),
3101 "Argument value does not match function argument type!", &Arg,
3102 FT->getParamType(i));
3103 Check(Arg.getType()->isFirstClassType(),
3104 "Function arguments must have first-class types!", &Arg);
3105 if (!IsIntrinsic) {
3106 Check(!Arg.getType()->isMetadataTy(),
3107 "Function takes metadata but isn't an intrinsic", &Arg, &F);
3108 Check(!Arg.getType()->isTokenLikeTy(),
3109 "Function takes token but isn't an intrinsic", &Arg, &F);
3110 Check(!Arg.getType()->isX86_AMXTy(),
3111 "Function takes x86_amx but isn't an intrinsic", &Arg, &F);
3112 }
3113
3114 // Check that swifterror argument is only used by loads and stores.
3115 if (Attrs.hasParamAttr(i, Attribute::SwiftError)) {
3116 verifySwiftErrorValue(&Arg);
3117 }
3118 ++i;
3119 }
3120
3121 if (!IsIntrinsic) {
3122 Check(!F.getReturnType()->isTokenLikeTy(),
3123 "Function returns a token but isn't an intrinsic", &F);
3124 Check(!F.getReturnType()->isX86_AMXTy(),
3125 "Function returns a x86_amx but isn't an intrinsic", &F);
3126 }
3127
3128 // Get the function metadata attachments.
3130 F.getAllMetadata(MDs);
3131 assert(F.hasMetadata() != MDs.empty() && "Bit out-of-sync");
3132 verifyFunctionMetadata(MDs);
3133
3134 // Check validity of the personality function
3135 if (F.hasPersonalityFn()) {
3136 auto *Per = dyn_cast<Function>(F.getPersonalityFn()->stripPointerCasts());
3137 if (Per)
3138 Check(Per->getParent() == F.getParent(),
3139 "Referencing personality function in another module!", &F,
3140 F.getParent(), Per, Per->getParent());
3141 }
3142
3143 // EH funclet coloring can be expensive, recompute on-demand
3144 BlockEHFuncletColors.clear();
3145
3146 if (F.isMaterializable()) {
3147 // Function has a body somewhere we can't see.
3148 Check(MDs.empty(), "unmaterialized function cannot have metadata", &F,
3149 MDs.empty() ? nullptr : MDs.front().second);
3150 } else if (F.isDeclaration()) {
3151 for (const auto &I : MDs) {
3152 // This is used for call site debug information.
3153 CheckDI(I.first != LLVMContext::MD_dbg ||
3154 !cast<DISubprogram>(I.second)->isDistinct(),
3155 "function declaration may only have a unique !dbg attachment",
3156 &F);
3157 Check(I.first != LLVMContext::MD_prof,
3158 "function declaration may not have a !prof attachment", &F);
3159
3160 // Verify the metadata itself.
3161 visitMDNode(*I.second, AreDebugLocsAllowed::Yes);
3162 }
3163 Check(!F.hasPersonalityFn(),
3164 "Function declaration shouldn't have a personality routine", &F);
3165 } else {
3166 // Verify that this function (which has a body) is not named "llvm.*". It
3167 // is not legal to define intrinsics.
3168 Check(!IsIntrinsic, "llvm intrinsics cannot be defined!", &F);
3169
3170 // Check the entry node
3171 const BasicBlock *Entry = &F.getEntryBlock();
3172 Check(pred_empty(Entry),
3173 "Entry block to function must not have predecessors!", Entry);
3174
3175 // The address of the entry block cannot be taken, unless it is dead.
3176 if (Entry->hasAddressTaken()) {
3177 Check(!BlockAddress::lookup(Entry)->isConstantUsed(),
3178 "blockaddress may not be used with the entry block!", Entry);
3179 }
3180
3181 unsigned NumDebugAttachments = 0, NumProfAttachments = 0,
3182 NumKCFIAttachments = 0;
3183 // Visit metadata attachments.
3184 for (const auto &I : MDs) {
3185 // Verify that the attachment is legal.
3186 auto AllowLocs = AreDebugLocsAllowed::No;
3187 switch (I.first) {
3188 default:
3189 break;
3190 case LLVMContext::MD_dbg: {
3191 ++NumDebugAttachments;
3192 CheckDI(NumDebugAttachments == 1,
3193 "function must have a single !dbg attachment", &F, I.second);
3194 CheckDI(isa<DISubprogram>(I.second),
3195 "function !dbg attachment must be a subprogram", &F, I.second);
3196 CheckDI(cast<DISubprogram>(I.second)->isDistinct(),
3197 "function definition may only have a distinct !dbg attachment",
3198 &F);
3199
3200 auto *SP = cast<DISubprogram>(I.second);
3201 const Function *&AttachedTo = DISubprogramAttachments[SP];
3202 CheckDI(!AttachedTo || AttachedTo == &F,
3203 "DISubprogram attached to more than one function", SP, &F);
3204 AttachedTo = &F;
3205 AllowLocs = AreDebugLocsAllowed::Yes;
3206 break;
3207 }
3208 case LLVMContext::MD_prof:
3209 ++NumProfAttachments;
3210 Check(NumProfAttachments == 1,
3211 "function must have a single !prof attachment", &F, I.second);
3212 break;
3213 case LLVMContext::MD_kcfi_type:
3214 ++NumKCFIAttachments;
3215 Check(NumKCFIAttachments == 1,
3216 "function must have a single !kcfi_type attachment", &F,
3217 I.second);
3218 break;
3219 }
3220
3221 // Verify the metadata itself.
3222 visitMDNode(*I.second, AllowLocs);
3223 }
3224 }
3225
3226 // If this function is actually an intrinsic, verify that it is only used in
3227 // direct call/invokes, never having its "address taken".
3228 // Only do this if the module is materialized, otherwise we don't have all the
3229 // uses.
3230 if (F.isIntrinsic() && F.getParent()->isMaterialized()) {
3231 const User *U;
3232 if (F.hasAddressTaken(&U, false, true, false,
3233 /*IgnoreARCAttachedCall=*/true))
3234 Check(false, "Invalid user of intrinsic instruction!", U);
3235 }
3236
3237 // Check intrinsics' signatures.
3238 switch (F.getIntrinsicID()) {
3239 case Intrinsic::experimental_gc_get_pointer_base: {
3240 FunctionType *FT = F.getFunctionType();
3241 Check(FT->getNumParams() == 1, "wrong number of parameters", F);
3242 Check(isa<PointerType>(F.getReturnType()),
3243 "gc.get.pointer.base must return a pointer", F);
3244 Check(FT->getParamType(0) == F.getReturnType(),
3245 "gc.get.pointer.base operand and result must be of the same type", F);
3246 break;
3247 }
3248 case Intrinsic::experimental_gc_get_pointer_offset: {
3249 FunctionType *FT = F.getFunctionType();
3250 Check(FT->getNumParams() == 1, "wrong number of parameters", F);
3251 Check(isa<PointerType>(FT->getParamType(0)),
3252 "gc.get.pointer.offset operand must be a pointer", F);
3253 Check(F.getReturnType()->isIntegerTy(),
3254 "gc.get.pointer.offset must return integer", F);
3255 break;
3256 }
3257 }
3258
3259 auto *N = F.getSubprogram();
3260 HasDebugInfo = (N != nullptr);
3261 if (!HasDebugInfo)
3262 return;
3263
3264 // Check that all !dbg attachments lead to back to N.
3265 //
3266 // FIXME: Check this incrementally while visiting !dbg attachments.
3267 // FIXME: Only check when N is the canonical subprogram for F.
3268 SmallPtrSet<const MDNode *, 32> Seen;
3269 auto VisitDebugLoc = [&](const Instruction &I, const MDNode *Node) {
3270 // Be careful about using DILocation here since we might be dealing with
3271 // broken code (this is the Verifier after all).
3272 const DILocation *DL = dyn_cast_or_null<DILocation>(Node);
3273 if (!DL)
3274 return;
3275 if (!Seen.insert(DL).second)
3276 return;
3277
3278 Metadata *Parent = DL->getRawScope();
3279 CheckDI(Parent && isa<DILocalScope>(Parent),
3280 "DILocation's scope must be a DILocalScope", N, &F, &I, DL, Parent);
3281
3282 DILocalScope *Scope = DL->getInlinedAtScope();
3283 Check(Scope, "Failed to find DILocalScope", DL);
3284
3285 if (!Seen.insert(Scope).second)
3286 return;
3287
3288 DISubprogram *SP = Scope->getSubprogram();
3289
3290 // Scope and SP could be the same MDNode and we don't want to skip
3291 // validation in that case
3292 if ((Scope != SP) && !Seen.insert(SP).second)
3293 return;
3294
3295 CheckDI(SP->describes(&F),
3296 "!dbg attachment points at wrong subprogram for function", N, &F,
3297 &I, DL, Scope, SP);
3298 };
3299 for (auto &BB : F)
3300 for (auto &I : BB) {
3301 VisitDebugLoc(I, I.getDebugLoc().getAsMDNode());
3302 // The llvm.loop annotations also contain two DILocations.
3303 if (auto MD = I.getMetadata(LLVMContext::MD_loop))
3304 for (unsigned i = 1; i < MD->getNumOperands(); ++i)
3305 VisitDebugLoc(I, dyn_cast_or_null<MDNode>(MD->getOperand(i)));
3306 if (BrokenDebugInfo)
3307 return;
3308 }
3309}
3310
3311// verifyBasicBlock - Verify that a basic block is well formed...
3312//
3313void Verifier::visitBasicBlock(BasicBlock &BB) {
3314 InstsInThisBlock.clear();
3315 ConvergenceVerifyHelper.visit(BB);
3316
3317 // Ensure that basic blocks have terminators!
3318 Check(BB.getTerminator(), "Basic Block does not have terminator!", &BB);
3319
3320 // Check constraints that this basic block imposes on all of the PHI nodes in
3321 // it.
3322 if (isa<PHINode>(BB.front())) {
3323 SmallVector<BasicBlock *, 8> Preds(predecessors(&BB));
3325 llvm::sort(Preds);
3326 for (const PHINode &PN : BB.phis()) {
3327 Check(PN.getNumIncomingValues() == Preds.size(),
3328 "PHINode should have one entry for each predecessor of its "
3329 "parent basic block!",
3330 &PN);
3331
3332 // Get and sort all incoming values in the PHI node...
3333 Values.clear();
3334 Values.reserve(PN.getNumIncomingValues());
3335 for (unsigned i = 0, e = PN.getNumIncomingValues(); i != e; ++i)
3336 Values.push_back(
3337 std::make_pair(PN.getIncomingBlock(i), PN.getIncomingValue(i)));
3338 llvm::sort(Values);
3339
3340 for (unsigned i = 0, e = Values.size(); i != e; ++i) {
3341 // Check to make sure that if there is more than one entry for a
3342 // particular basic block in this PHI node, that the incoming values are
3343 // all identical.
3344 //
3345 Check(i == 0 || Values[i].first != Values[i - 1].first ||
3346 Values[i].second == Values[i - 1].second,
3347 "PHI node has multiple entries for the same basic block with "
3348 "different incoming values!",
3349 &PN, Values[i].first, Values[i].second, Values[i - 1].second);
3350
3351 // Check to make sure that the predecessors and PHI node entries are
3352 // matched up.
3353 Check(Values[i].first == Preds[i],
3354 "PHI node entries do not match predecessors!", &PN,
3355 Values[i].first, Preds[i]);
3356 }
3357 }
3358 }
3359
3360 // Check that all instructions have their parent pointers set up correctly.
3361 for (auto &I : BB)
3362 {
3363 Check(I.getParent() == &BB, "Instruction has bogus parent pointer!");
3364 }
3365
3366 // Confirm that no issues arise from the debug program.
3367 CheckDI(!BB.getTrailingDbgRecords(), "Basic Block has trailing DbgRecords!",
3368 &BB);
3369}
3370
3371void Verifier::visitTerminator(Instruction &I) {
3372 // Ensure that terminators only exist at the end of the basic block.
3373 Check(&I == I.getParent()->getTerminator(),
3374 "Terminator found in the middle of a basic block!", I.getParent());
3375 visitInstruction(I);
3376}
3377
3378void Verifier::visitBranchInst(BranchInst &BI) {
3379 if (BI.isConditional()) {
3381 "Branch condition is not 'i1' type!", &BI, BI.getCondition());
3382 }
3383 visitTerminator(BI);
3384}
3385
3386void Verifier::visitReturnInst(ReturnInst &RI) {
3387 Function *F = RI.getParent()->getParent();
3388 unsigned N = RI.getNumOperands();
3389 if (F->getReturnType()->isVoidTy())
3390 Check(N == 0,
3391 "Found return instr that returns non-void in Function of void "
3392 "return type!",
3393 &RI, F->getReturnType());
3394 else
3395 Check(N == 1 && F->getReturnType() == RI.getOperand(0)->getType(),
3396 "Function return type does not match operand "
3397 "type of return inst!",
3398 &RI, F->getReturnType());
3399
3400 // Check to make sure that the return value has necessary properties for
3401 // terminators...
3402 visitTerminator(RI);
3403}
3404
3405void Verifier::visitSwitchInst(SwitchInst &SI) {
3406 Check(SI.getType()->isVoidTy(), "Switch must have void result type!", &SI);
3407 // Check to make sure that all of the constants in the switch instruction
3408 // have the same type as the switched-on value.
3409 Type *SwitchTy = SI.getCondition()->getType();
3410 SmallPtrSet<ConstantInt*, 32> Constants;
3411 for (auto &Case : SI.cases()) {
3412 Check(isa<ConstantInt>(SI.getOperand(Case.getCaseIndex() * 2 + 2)),
3413 "Case value is not a constant integer.", &SI);
3414 Check(Case.getCaseValue()->getType() == SwitchTy,
3415 "Switch constants must all be same type as switch value!", &SI);
3416 Check(Constants.insert(Case.getCaseValue()).second,
3417 "Duplicate integer as switch case", &SI, Case.getCaseValue());
3418 }
3419
3420 visitTerminator(SI);
3421}
3422
3423void Verifier::visitIndirectBrInst(IndirectBrInst &BI) {
3425 "Indirectbr operand must have pointer type!", &BI);
3426 for (unsigned i = 0, e = BI.getNumDestinations(); i != e; ++i)
3428 "Indirectbr destinations must all have pointer type!", &BI);
3429
3430 visitTerminator(BI);
3431}
3432
3433void Verifier::visitCallBrInst(CallBrInst &CBI) {
3434 Check(CBI.isInlineAsm(), "Callbr is currently only used for asm-goto!", &CBI);
3435 const InlineAsm *IA = cast<InlineAsm>(CBI.getCalledOperand());
3436 Check(!IA->canThrow(), "Unwinding from Callbr is not allowed");
3437
3438 verifyInlineAsmCall(CBI);
3439 visitTerminator(CBI);
3440}
3441
3442void Verifier::visitSelectInst(SelectInst &SI) {
3443 Check(!SelectInst::areInvalidOperands(SI.getOperand(0), SI.getOperand(1),
3444 SI.getOperand(2)),
3445 "Invalid operands for select instruction!", &SI);
3446
3447 Check(SI.getTrueValue()->getType() == SI.getType(),
3448 "Select values must have same type as select instruction!", &SI);
3449 visitInstruction(SI);
3450}
3451
3452/// visitUserOp1 - User defined operators shouldn't live beyond the lifetime of
3453/// a pass, if any exist, it's an error.
3454///
3455void Verifier::visitUserOp1(Instruction &I) {
3456 Check(false, "User-defined operators should not live outside of a pass!", &I);
3457}
3458
3459void Verifier::visitTruncInst(TruncInst &I) {
3460 // Get the source and destination types
3461 Type *SrcTy = I.getOperand(0)->getType();
3462 Type *DestTy = I.getType();
3463
3464 // Get the size of the types in bits, we'll need this later
3465 unsigned SrcBitSize = SrcTy->getScalarSizeInBits();
3466 unsigned DestBitSize = DestTy->getScalarSizeInBits();
3467
3468 Check(SrcTy->isIntOrIntVectorTy(), "Trunc only operates on integer", &I);
3469 Check(DestTy->isIntOrIntVectorTy(), "Trunc only produces integer", &I);
3470 Check(SrcTy->isVectorTy() == DestTy->isVectorTy(),
3471 "trunc source and destination must both be a vector or neither", &I);
3472 Check(SrcBitSize > DestBitSize, "DestTy too big for Trunc", &I);
3473
3474 visitInstruction(I);
3475}
3476
3477void Verifier::visitZExtInst(ZExtInst &I) {
3478 // Get the source and destination types
3479 Type *SrcTy = I.getOperand(0)->getType();
3480 Type *DestTy = I.getType();
3481
3482 // Get the size of the types in bits, we'll need this later
3483 Check(SrcTy->isIntOrIntVectorTy(), "ZExt only operates on integer", &I);
3484 Check(DestTy->isIntOrIntVectorTy(), "ZExt only produces an integer", &I);
3485 Check(SrcTy->isVectorTy() == DestTy->isVectorTy(),
3486 "zext source and destination must both be a vector or neither", &I);
3487 unsigned SrcBitSize = SrcTy->getScalarSizeInBits();
3488 unsigned DestBitSize = DestTy->getScalarSizeInBits();
3489
3490 Check(SrcBitSize < DestBitSize, "Type too small for ZExt", &I);
3491
3492 visitInstruction(I);
3493}
3494
3495void Verifier::visitSExtInst(SExtInst &I) {
3496 // Get the source and destination types
3497 Type *SrcTy = I.getOperand(0)->getType();
3498 Type *DestTy = I.getType();
3499
3500 // Get the size of the types in bits, we'll need this later
3501 unsigned SrcBitSize = SrcTy->getScalarSizeInBits();
3502 unsigned DestBitSize = DestTy->getScalarSizeInBits();
3503
3504 Check(SrcTy->isIntOrIntVectorTy(), "SExt only operates on integer", &I);
3505 Check(DestTy->isIntOrIntVectorTy(), "SExt only produces an integer", &I);
3506 Check(SrcTy->isVectorTy() == DestTy->isVectorTy(),
3507 "sext source and destination must both be a vector or neither", &I);
3508 Check(SrcBitSize < DestBitSize, "Type too small for SExt", &I);
3509
3510 visitInstruction(I);
3511}
3512
3513void Verifier::visitFPTruncInst(FPTruncInst &I) {
3514 // Get the source and destination types
3515 Type *SrcTy = I.getOperand(0)->getType();
3516 Type *DestTy = I.getType();
3517 // Get the size of the types in bits, we'll need this later
3518 unsigned SrcBitSize = SrcTy->getScalarSizeInBits();
3519 unsigned DestBitSize = DestTy->getScalarSizeInBits();
3520
3521 Check(SrcTy->isFPOrFPVectorTy(), "FPTrunc only operates on FP", &I);
3522 Check(DestTy->isFPOrFPVectorTy(), "FPTrunc only produces an FP", &I);
3523 Check(SrcTy->isVectorTy() == DestTy->isVectorTy(),
3524 "fptrunc source and destination must both be a vector or neither", &I);
3525 Check(SrcBitSize > DestBitSize, "DestTy too big for FPTrunc", &I);
3526
3527 visitInstruction(I);
3528}
3529
3530void Verifier::visitFPExtInst(FPExtInst &I) {
3531 // Get the source and destination types
3532 Type *SrcTy = I.getOperand(0)->getType();
3533 Type *DestTy = I.getType();
3534
3535 // Get the size of the types in bits, we'll need this later
3536 unsigned SrcBitSize = SrcTy->getScalarSizeInBits();
3537 unsigned DestBitSize = DestTy->getScalarSizeInBits();
3538
3539 Check(SrcTy->isFPOrFPVectorTy(), "FPExt only operates on FP", &I);
3540 Check(DestTy->isFPOrFPVectorTy(), "FPExt only produces an FP", &I);
3541 Check(SrcTy->isVectorTy() == DestTy->isVectorTy(),
3542 "fpext source and destination must both be a vector or neither", &I);
3543 Check(SrcBitSize < DestBitSize, "DestTy too small for FPExt", &I);
3544
3545 visitInstruction(I);
3546}
3547
3548void Verifier::visitUIToFPInst(UIToFPInst &I) {
3549 // Get the source and destination types
3550 Type *SrcTy = I.getOperand(0)->getType();
3551 Type *DestTy = I.getType();
3552
3553 bool SrcVec = SrcTy->isVectorTy();
3554 bool DstVec = DestTy->isVectorTy();
3555
3556 Check(SrcVec == DstVec,
3557 "UIToFP source and dest must both be vector or scalar", &I);
3558 Check(SrcTy->isIntOrIntVectorTy(),
3559 "UIToFP source must be integer or integer vector", &I);
3560 Check(DestTy->isFPOrFPVectorTy(), "UIToFP result must be FP or FP vector",
3561 &I);
3562
3563 if (SrcVec && DstVec)
3564 Check(cast<VectorType>(SrcTy)->getElementCount() ==
3565 cast<VectorType>(DestTy)->getElementCount(),
3566 "UIToFP source and dest vector length mismatch", &I);
3567
3568 visitInstruction(I);
3569}
3570
3571void Verifier::visitSIToFPInst(SIToFPInst &I) {
3572 // Get the source and destination types
3573 Type *SrcTy = I.getOperand(0)->getType();
3574 Type *DestTy = I.getType();
3575
3576 bool SrcVec = SrcTy->isVectorTy();
3577 bool DstVec = DestTy->isVectorTy();
3578
3579 Check(SrcVec == DstVec,
3580 "SIToFP source and dest must both be vector or scalar", &I);
3581 Check(SrcTy->isIntOrIntVectorTy(),
3582 "SIToFP source must be integer or integer vector", &I);
3583 Check(DestTy->isFPOrFPVectorTy(), "SIToFP result must be FP or FP vector",
3584 &I);
3585
3586 if (SrcVec && DstVec)
3587 Check(cast<VectorType>(SrcTy)->getElementCount() ==
3588 cast<VectorType>(DestTy)->getElementCount(),
3589 "SIToFP source and dest vector length mismatch", &I);
3590
3591 visitInstruction(I);
3592}
3593
3594void Verifier::visitFPToUIInst(FPToUIInst &I) {
3595 // Get the source and destination types
3596 Type *SrcTy = I.getOperand(0)->getType();
3597 Type *DestTy = I.getType();
3598
3599 bool SrcVec = SrcTy->isVectorTy();
3600 bool DstVec = DestTy->isVectorTy();
3601
3602 Check(SrcVec == DstVec,
3603 "FPToUI source and dest must both be vector or scalar", &I);
3604 Check(SrcTy->isFPOrFPVectorTy(), "FPToUI source must be FP or FP vector", &I);
3605 Check(DestTy->isIntOrIntVectorTy(),
3606 "FPToUI result must be integer or integer vector", &I);
3607
3608 if (SrcVec && DstVec)
3609 Check(cast<VectorType>(SrcTy)->getElementCount() ==
3610 cast<VectorType>(DestTy)->getElementCount(),
3611 "FPToUI source and dest vector length mismatch", &I);
3612
3613 visitInstruction(I);
3614}
3615
3616void Verifier::visitFPToSIInst(FPToSIInst &I) {
3617 // Get the source and destination types
3618 Type *SrcTy = I.getOperand(0)->getType();
3619 Type *DestTy = I.getType();
3620
3621 bool SrcVec = SrcTy->isVectorTy();
3622 bool DstVec = DestTy->isVectorTy();
3623
3624 Check(SrcVec == DstVec,
3625 "FPToSI source and dest must both be vector or scalar", &I);
3626 Check(SrcTy->isFPOrFPVectorTy(), "FPToSI source must be FP or FP vector", &I);
3627 Check(DestTy->isIntOrIntVectorTy(),
3628 "FPToSI result must be integer or integer vector", &I);
3629
3630 if (SrcVec && DstVec)
3631 Check(cast<VectorType>(SrcTy)->getElementCount() ==
3632 cast<VectorType>(DestTy)->getElementCount(),
3633 "FPToSI source and dest vector length mismatch", &I);
3634
3635 visitInstruction(I);
3636}
3637
3638void Verifier::checkPtrToAddr(Type *SrcTy, Type *DestTy, const Value &V) {
3639 Check(SrcTy->isPtrOrPtrVectorTy(), "PtrToAddr source must be pointer", V);
3640 Check(DestTy->isIntOrIntVectorTy(), "PtrToAddr result must be integral", V);
3641 Check(SrcTy->isVectorTy() == DestTy->isVectorTy(), "PtrToAddr type mismatch",
3642 V);
3643
3644 if (SrcTy->isVectorTy()) {
3645 auto *VSrc = cast<VectorType>(SrcTy);
3646 auto *VDest = cast<VectorType>(DestTy);
3647 Check(VSrc->getElementCount() == VDest->getElementCount(),
3648 "PtrToAddr vector length mismatch", V);
3649 }
3650
3651 Type *AddrTy = DL.getAddressType(SrcTy);
3652 Check(AddrTy == DestTy, "PtrToAddr result must be address width", V);
3653}
3654
3655void Verifier::visitPtrToAddrInst(PtrToAddrInst &I) {
3656 checkPtrToAddr(I.getOperand(0)->getType(), I.getType(), I);
3657 visitInstruction(I);
3658}
3659
3660void Verifier::visitPtrToIntInst(PtrToIntInst &I) {
3661 // Get the source and destination types
3662 Type *SrcTy = I.getOperand(0)->getType();
3663 Type *DestTy = I.getType();
3664
3665 Check(SrcTy->isPtrOrPtrVectorTy(), "PtrToInt source must be pointer", &I);
3666
3667 Check(DestTy->isIntOrIntVectorTy(), "PtrToInt result must be integral", &I);
3668 Check(SrcTy->isVectorTy() == DestTy->isVectorTy(), "PtrToInt type mismatch",
3669 &I);
3670
3671 if (SrcTy->isVectorTy()) {
3672 auto *VSrc = cast<VectorType>(SrcTy);
3673 auto *VDest = cast<VectorType>(DestTy);
3674 Check(VSrc->getElementCount() == VDest->getElementCount(),
3675 "PtrToInt Vector length mismatch", &I);
3676 }
3677
3678 visitInstruction(I);
3679}
3680
3681void Verifier::visitIntToPtrInst(IntToPtrInst &I) {
3682 // Get the source and destination types
3683 Type *SrcTy = I.getOperand(0)->getType();
3684 Type *DestTy = I.getType();
3685
3686 Check(SrcTy->isIntOrIntVectorTy(), "IntToPtr source must be an integral", &I);
3687 Check(DestTy->isPtrOrPtrVectorTy(), "IntToPtr result must be a pointer", &I);
3688
3689 Check(SrcTy->isVectorTy() == DestTy->isVectorTy(), "IntToPtr type mismatch",
3690 &I);
3691 if (SrcTy->isVectorTy()) {
3692 auto *VSrc = cast<VectorType>(SrcTy);
3693 auto *VDest = cast<VectorType>(DestTy);
3694 Check(VSrc->getElementCount() == VDest->getElementCount(),
3695 "IntToPtr Vector length mismatch", &I);
3696 }
3697 visitInstruction(I);
3698}
3699
3700void Verifier::visitBitCastInst(BitCastInst &I) {
3701 Check(
3702 CastInst::castIsValid(Instruction::BitCast, I.getOperand(0), I.getType()),
3703 "Invalid bitcast", &I);
3704 visitInstruction(I);
3705}
3706
3707void Verifier::visitAddrSpaceCastInst(AddrSpaceCastInst &I) {
3708 Type *SrcTy = I.getOperand(0)->getType();
3709 Type *DestTy = I.getType();
3710
3711 Check(SrcTy->isPtrOrPtrVectorTy(), "AddrSpaceCast source must be a pointer",
3712 &I);
3713 Check(DestTy->isPtrOrPtrVectorTy(), "AddrSpaceCast result must be a pointer",
3714 &I);
3716 "AddrSpaceCast must be between different address spaces", &I);
3717 if (auto *SrcVTy = dyn_cast<VectorType>(SrcTy))
3718 Check(SrcVTy->getElementCount() ==
3719 cast<VectorType>(DestTy)->getElementCount(),
3720 "AddrSpaceCast vector pointer number of elements mismatch", &I);
3721 visitInstruction(I);
3722}
3723
3724/// visitPHINode - Ensure that a PHI node is well formed.
3725///
3726void Verifier::visitPHINode(PHINode &PN) {
3727 // Ensure that the PHI nodes are all grouped together at the top of the block.
3728 // This can be tested by checking whether the instruction before this is
3729 // either nonexistent (because this is begin()) or is a PHI node. If not,
3730 // then there is some other instruction before a PHI.
3731 Check(&PN == &PN.getParent()->front() ||
3733 "PHI nodes not grouped at top of basic block!", &PN, PN.getParent());
3734
3735 // Check that a PHI doesn't yield a Token.
3736 Check(!PN.getType()->isTokenLikeTy(), "PHI nodes cannot have token type!");
3737
3738 // Check that all of the values of the PHI node have the same type as the
3739 // result.
3740 for (Value *IncValue : PN.incoming_values()) {
3741 Check(PN.getType() == IncValue->getType(),
3742 "PHI node operands are not the same type as the result!", &PN);
3743 }
3744
3745 // All other PHI node constraints are checked in the visitBasicBlock method.
3746
3747 visitInstruction(PN);
3748}
3749
3750void Verifier::visitCallBase(CallBase &Call) {
3752 "Called function must be a pointer!", Call);
3753 FunctionType *FTy = Call.getFunctionType();
3754
3755 // Verify that the correct number of arguments are being passed
3756 if (FTy->isVarArg())
3757 Check(Call.arg_size() >= FTy->getNumParams(),
3758 "Called function requires more parameters than were provided!", Call);
3759 else
3760 Check(Call.arg_size() == FTy->getNumParams(),
3761 "Incorrect number of arguments passed to called function!", Call);
3762
3763 // Verify that all arguments to the call match the function type.
3764 for (unsigned i = 0, e = FTy->getNumParams(); i != e; ++i)
3765 Check(Call.getArgOperand(i)->getType() == FTy->getParamType(i),
3766 "Call parameter type does not match function signature!",
3767 Call.getArgOperand(i), FTy->getParamType(i), Call);
3768
3769 AttributeList Attrs = Call.getAttributes();
3770
3771 Check(verifyAttributeCount(Attrs, Call.arg_size()),
3772 "Attribute after last parameter!", Call);
3773
3774 Function *Callee =
3776 bool IsIntrinsic = Callee && Callee->isIntrinsic();
3777 if (IsIntrinsic)
3778 Check(Callee->getValueType() == FTy,
3779 "Intrinsic called with incompatible signature", Call);
3780
3781 // Verify if the calling convention of the callee is callable.
3783 "calling convention does not permit calls", Call);
3784
3785 // Disallow passing/returning values with alignment higher than we can
3786 // represent.
3787 // FIXME: Consider making DataLayout cap the alignment, so this isn't
3788 // necessary.
3789 auto VerifyTypeAlign = [&](Type *Ty, const Twine &Message) {
3790 if (!Ty->isSized())
3791 return;
3792 Align ABIAlign = DL.getABITypeAlign(Ty);
3793 Check(ABIAlign.value() <= Value::MaximumAlignment,
3794 "Incorrect alignment of " + Message + " to called function!", Call);
3795 };
3796
3797 if (!IsIntrinsic) {
3798 VerifyTypeAlign(FTy->getReturnType(), "return type");
3799 for (unsigned i = 0, e = FTy->getNumParams(); i != e; ++i) {
3800 Type *Ty = FTy->getParamType(i);
3801 VerifyTypeAlign(Ty, "argument passed");
3802 }
3803 }
3804
3805 if (Attrs.hasFnAttr(Attribute::Speculatable)) {
3806 // Don't allow speculatable on call sites, unless the underlying function
3807 // declaration is also speculatable.
3808 Check(Callee && Callee->isSpeculatable(),
3809 "speculatable attribute may not apply to call sites", Call);
3810 }
3811
3812 if (Attrs.hasFnAttr(Attribute::Preallocated)) {
3813 Check(Call.getIntrinsicID() == Intrinsic::call_preallocated_arg,
3814 "preallocated as a call site attribute can only be on "
3815 "llvm.call.preallocated.arg");
3816 }
3817
3818 // Verify call attributes.
3819 verifyFunctionAttrs(FTy, Attrs, &Call, IsIntrinsic, Call.isInlineAsm());
3820
3821 // Conservatively check the inalloca argument.
3822 // We have a bug if we can find that there is an underlying alloca without
3823 // inalloca.
3824 if (Call.hasInAllocaArgument()) {
3825 Value *InAllocaArg = Call.getArgOperand(FTy->getNumParams() - 1);
3826 if (auto AI = dyn_cast<AllocaInst>(InAllocaArg->stripInBoundsOffsets()))
3827 Check(AI->isUsedWithInAlloca(),
3828 "inalloca argument for call has mismatched alloca", AI, Call);
3829 }
3830
3831 // For each argument of the callsite, if it has the swifterror argument,
3832 // make sure the underlying alloca/parameter it comes from has a swifterror as
3833 // well.
3834 for (unsigned i = 0, e = FTy->getNumParams(); i != e; ++i) {
3835 if (Call.paramHasAttr(i, Attribute::SwiftError)) {
3836 Value *SwiftErrorArg = Call.getArgOperand(i);
3837 if (auto AI = dyn_cast<AllocaInst>(SwiftErrorArg->stripInBoundsOffsets())) {
3838 Check(AI->isSwiftError(),
3839 "swifterror argument for call has mismatched alloca", AI, Call);
3840 continue;
3841 }
3842 auto ArgI = dyn_cast<Argument>(SwiftErrorArg);
3843 Check(ArgI, "swifterror argument should come from an alloca or parameter",
3844 SwiftErrorArg, Call);
3845 Check(ArgI->hasSwiftErrorAttr(),
3846 "swifterror argument for call has mismatched parameter", ArgI,
3847 Call);
3848 }
3849
3850 if (Attrs.hasParamAttr(i, Attribute::ImmArg)) {
3851 // Don't allow immarg on call sites, unless the underlying declaration
3852 // also has the matching immarg.
3853 Check(Callee && Callee->hasParamAttribute(i, Attribute::ImmArg),
3854 "immarg may not apply only to call sites", Call.getArgOperand(i),
3855 Call);
3856 }
3857
3858 if (Call.paramHasAttr(i, Attribute::ImmArg)) {
3859 Value *ArgVal = Call.getArgOperand(i);
3860 Check(isa<ConstantInt>(ArgVal) || isa<ConstantFP>(ArgVal),
3861 "immarg operand has non-immediate parameter", ArgVal, Call);
3862
3863 // If the imm-arg is an integer and also has a range attached,
3864 // check if the given value is within the range.
3865 if (Call.paramHasAttr(i, Attribute::Range)) {
3866 if (auto *CI = dyn_cast<ConstantInt>(ArgVal)) {
3867 const ConstantRange &CR =
3868 Call.getParamAttr(i, Attribute::Range).getValueAsConstantRange();
3869 Check(CR.contains(CI->getValue()),
3870 "immarg value " + Twine(CI->getValue().getSExtValue()) +
3871 " out of range [" + Twine(CR.getLower().getSExtValue()) +
3872 ", " + Twine(CR.getUpper().getSExtValue()) + ")",
3873 Call);
3874 }
3875 }
3876 }
3877
3878 if (Call.paramHasAttr(i, Attribute::Preallocated)) {
3879 Value *ArgVal = Call.getArgOperand(i);
3880 bool hasOB =
3882 bool isMustTail = Call.isMustTailCall();
3883 Check(hasOB != isMustTail,
3884 "preallocated operand either requires a preallocated bundle or "
3885 "the call to be musttail (but not both)",
3886 ArgVal, Call);
3887 }
3888 }
3889
3890 if (FTy->isVarArg()) {
3891 // FIXME? is 'nest' even legal here?
3892 bool SawNest = false;
3893 bool SawReturned = false;
3894
3895 for (unsigned Idx = 0; Idx < FTy->getNumParams(); ++Idx) {
3896 if (Attrs.hasParamAttr(Idx, Attribute::Nest))
3897 SawNest = true;
3898 if (Attrs.hasParamAttr(Idx, Attribute::Returned))
3899 SawReturned = true;
3900 }
3901
3902 // Check attributes on the varargs part.
3903 for (unsigned Idx = FTy->getNumParams(); Idx < Call.arg_size(); ++Idx) {
3904 Type *Ty = Call.getArgOperand(Idx)->getType();
3905 AttributeSet ArgAttrs = Attrs.getParamAttrs(Idx);
3906 verifyParameterAttrs(ArgAttrs, Ty, &Call);
3907
3908 if (ArgAttrs.hasAttribute(Attribute::Nest)) {
3909 Check(!SawNest, "More than one parameter has attribute nest!", Call);
3910 SawNest = true;
3911 }
3912
3913 if (ArgAttrs.hasAttribute(Attribute::Returned)) {
3914 Check(!SawReturned, "More than one parameter has attribute returned!",
3915 Call);
3916 Check(Ty->canLosslesslyBitCastTo(FTy->getReturnType()),
3917 "Incompatible argument and return types for 'returned' "
3918 "attribute",
3919 Call);
3920 SawReturned = true;
3921 }
3922
3923 // Statepoint intrinsic is vararg but the wrapped function may be not.
3924 // Allow sret here and check the wrapped function in verifyStatepoint.
3925 if (Call.getIntrinsicID() != Intrinsic::experimental_gc_statepoint)
3926 Check(!ArgAttrs.hasAttribute(Attribute::StructRet),
3927 "Attribute 'sret' cannot be used for vararg call arguments!",
3928 Call);
3929
3930 if (ArgAttrs.hasAttribute(Attribute::InAlloca))
3931 Check(Idx == Call.arg_size() - 1,
3932 "inalloca isn't on the last argument!", Call);
3933 }
3934 }
3935
3936 // Verify that there's no metadata unless it's a direct call to an intrinsic.
3937 if (!IsIntrinsic) {
3938 for (Type *ParamTy : FTy->params()) {
3939 Check(!ParamTy->isMetadataTy(),
3940 "Function has metadata parameter but isn't an intrinsic", Call);
3941 Check(!ParamTy->isTokenLikeTy(),
3942 "Function has token parameter but isn't an intrinsic", Call);
3943 }
3944 }
3945
3946 // Verify that indirect calls don't return tokens.
3947 if (!Call.getCalledFunction()) {
3948 Check(!FTy->getReturnType()->isTokenLikeTy(),
3949 "Return type cannot be token for indirect call!");
3950 Check(!FTy->getReturnType()->isX86_AMXTy(),
3951 "Return type cannot be x86_amx for indirect call!");
3952 }
3953
3955 visitIntrinsicCall(ID, Call);
3956
3957 // Verify that a callsite has at most one "deopt", at most one "funclet", at
3958 // most one "gc-transition", at most one "cfguardtarget", at most one
3959 // "preallocated" operand bundle, and at most one "ptrauth" operand bundle.
3960 bool FoundDeoptBundle = false, FoundFuncletBundle = false,
3961 FoundGCTransitionBundle = false, FoundCFGuardTargetBundle = false,
3962 FoundPreallocatedBundle = false, FoundGCLiveBundle = false,
3963 FoundPtrauthBundle = false, FoundKCFIBundle = false,
3964 FoundAttachedCallBundle = false;
3965 for (unsigned i = 0, e = Call.getNumOperandBundles(); i < e; ++i) {
3966 OperandBundleUse BU = Call.getOperandBundleAt(i);
3967 uint32_t Tag = BU.getTagID();
3968 if (Tag == LLVMContext::OB_deopt) {
3969 Check(!FoundDeoptBundle, "Multiple deopt operand bundles", Call);
3970 FoundDeoptBundle = true;
3971 } else if (Tag == LLVMContext::OB_gc_transition) {
3972 Check(!FoundGCTransitionBundle, "Multiple gc-transition operand bundles",
3973 Call);
3974 FoundGCTransitionBundle = true;
3975 } else if (Tag == LLVMContext::OB_funclet) {
3976 Check(!FoundFuncletBundle, "Multiple funclet operand bundles", Call);
3977 FoundFuncletBundle = true;
3978 Check(BU.Inputs.size() == 1,
3979 "Expected exactly one funclet bundle operand", Call);
3980 Check(isa<FuncletPadInst>(BU.Inputs.front()),
3981 "Funclet bundle operands should correspond to a FuncletPadInst",
3982 Call);
3983 } else if (Tag == LLVMContext::OB_cfguardtarget) {
3984 Check(!FoundCFGuardTargetBundle, "Multiple CFGuardTarget operand bundles",
3985 Call);
3986 FoundCFGuardTargetBundle = true;
3987 Check(BU.Inputs.size() == 1,
3988 "Expected exactly one cfguardtarget bundle operand", Call);
3989 } else if (Tag == LLVMContext::OB_ptrauth) {
3990 Check(!FoundPtrauthBundle, "Multiple ptrauth operand bundles", Call);
3991 FoundPtrauthBundle = true;
3992 Check(BU.Inputs.size() == 2,
3993 "Expected exactly two ptrauth bundle operands", Call);
3994 Check(isa<ConstantInt>(BU.Inputs[0]) &&
3995 BU.Inputs[0]->getType()->isIntegerTy(32),
3996 "Ptrauth bundle key operand must be an i32 constant", Call);
3997 Check(BU.Inputs[1]->getType()->isIntegerTy(64),
3998 "Ptrauth bundle discriminator operand must be an i64", Call);
3999 } else if (Tag == LLVMContext::OB_kcfi) {
4000 Check(!FoundKCFIBundle, "Multiple kcfi operand bundles", Call);
4001 FoundKCFIBundle = true;
4002 Check(BU.Inputs.size() == 1, "Expected exactly one kcfi bundle operand",
4003 Call);
4004 Check(isa<ConstantInt>(BU.Inputs[0]) &&
4005 BU.Inputs[0]->getType()->isIntegerTy(32),
4006 "Kcfi bundle operand must be an i32 constant", Call);
4007 } else if (Tag == LLVMContext::OB_preallocated) {
4008 Check(!FoundPreallocatedBundle, "Multiple preallocated operand bundles",
4009 Call);
4010 FoundPreallocatedBundle = true;
4011 Check(BU.Inputs.size() == 1,
4012 "Expected exactly one preallocated bundle operand", Call);
4013 auto Input = dyn_cast<IntrinsicInst>(BU.Inputs.front());
4014 Check(Input &&
4015 Input->getIntrinsicID() == Intrinsic::call_preallocated_setup,
4016 "\"preallocated\" argument must be a token from "
4017 "llvm.call.preallocated.setup",
4018 Call);
4019 } else if (Tag == LLVMContext::OB_gc_live) {
4020 Check(!FoundGCLiveBundle, "Multiple gc-live operand bundles", Call);
4021 FoundGCLiveBundle = true;
4023 Check(!FoundAttachedCallBundle,
4024 "Multiple \"clang.arc.attachedcall\" operand bundles", Call);
4025 FoundAttachedCallBundle = true;
4026 verifyAttachedCallBundle(Call, BU);
4027 }
4028 }
4029
4030 // Verify that callee and callsite agree on whether to use pointer auth.
4031 Check(!(Call.getCalledFunction() && FoundPtrauthBundle),
4032 "Direct call cannot have a ptrauth bundle", Call);
4033
4034 // Verify that each inlinable callsite of a debug-info-bearing function in a
4035 // debug-info-bearing function has a debug location attached to it. Failure to
4036 // do so causes assertion failures when the inliner sets up inline scope info
4037 // (Interposable functions are not inlinable, neither are functions without
4038 // definitions.)
4044 "inlinable function call in a function with "
4045 "debug info must have a !dbg location",
4046 Call);
4047
4048 if (Call.isInlineAsm())
4049 verifyInlineAsmCall(Call);
4050
4051 ConvergenceVerifyHelper.visit(Call);
4052
4053 visitInstruction(Call);
4054}
4055
4056void Verifier::verifyTailCCMustTailAttrs(const AttrBuilder &Attrs,
4057 StringRef Context) {
4058 Check(!Attrs.contains(Attribute::InAlloca),
4059 Twine("inalloca attribute not allowed in ") + Context);
4060 Check(!Attrs.contains(Attribute::InReg),
4061 Twine("inreg attribute not allowed in ") + Context);
4062 Check(!Attrs.contains(Attribute::SwiftError),
4063 Twine("swifterror attribute not allowed in ") + Context);
4064 Check(!Attrs.contains(Attribute::Preallocated),
4065 Twine("preallocated attribute not allowed in ") + Context);
4066 Check(!Attrs.contains(Attribute::ByRef),
4067 Twine("byref attribute not allowed in ") + Context);
4068}
4069
4070/// Two types are "congruent" if they are identical, or if they are both pointer
4071/// types with different pointee types and the same address space.
4072static bool isTypeCongruent(Type *L, Type *R) {
4073 if (L == R)
4074 return true;
4077 if (!PL || !PR)
4078 return false;
4079 return PL->getAddressSpace() == PR->getAddressSpace();
4080}
4081
4082static AttrBuilder getParameterABIAttributes(LLVMContext& C, unsigned I, AttributeList Attrs) {
4083 static const Attribute::AttrKind ABIAttrs[] = {
4084 Attribute::StructRet, Attribute::ByVal, Attribute::InAlloca,
4085 Attribute::InReg, Attribute::StackAlignment, Attribute::SwiftSelf,
4086 Attribute::SwiftAsync, Attribute::SwiftError, Attribute::Preallocated,
4087 Attribute::ByRef};
4088 AttrBuilder Copy(C);
4089 for (auto AK : ABIAttrs) {
4090 Attribute Attr = Attrs.getParamAttrs(I).getAttribute(AK);
4091 if (Attr.isValid())
4092 Copy.addAttribute(Attr);
4093 }
4094
4095 // `align` is ABI-affecting only in combination with `byval` or `byref`.
4096 if (Attrs.hasParamAttr(I, Attribute::Alignment) &&
4097 (Attrs.hasParamAttr(I, Attribute::ByVal) ||
4098 Attrs.hasParamAttr(I, Attribute::ByRef)))
4099 Copy.addAlignmentAttr(Attrs.getParamAlignment(I));
4100 return Copy;
4101}
4102
4103void Verifier::verifyMustTailCall(CallInst &CI) {
4104 Check(!CI.isInlineAsm(), "cannot use musttail call with inline asm", &CI);
4105
4106 Function *F = CI.getParent()->getParent();
4107 FunctionType *CallerTy = F->getFunctionType();
4108 FunctionType *CalleeTy = CI.getFunctionType();
4109 Check(CallerTy->isVarArg() == CalleeTy->isVarArg(),
4110 "cannot guarantee tail call due to mismatched varargs", &CI);
4111 Check(isTypeCongruent(CallerTy->getReturnType(), CalleeTy->getReturnType()),
4112 "cannot guarantee tail call due to mismatched return types", &CI);
4113
4114 // - The calling conventions of the caller and callee must match.
4115 Check(F->getCallingConv() == CI.getCallingConv(),
4116 "cannot guarantee tail call due to mismatched calling conv", &CI);
4117
4118 // - The call must immediately precede a :ref:`ret <i_ret>` instruction,
4119 // or a pointer bitcast followed by a ret instruction.
4120 // - The ret instruction must return the (possibly bitcasted) value
4121 // produced by the call or void.
4122 Value *RetVal = &CI;
4124
4125 // Handle the optional bitcast.
4126 if (BitCastInst *BI = dyn_cast_or_null<BitCastInst>(Next)) {
4127 Check(BI->getOperand(0) == RetVal,
4128 "bitcast following musttail call must use the call", BI);
4129 RetVal = BI;
4130 Next = BI->getNextNode();
4131 }
4132
4133 // Check the return.
4134 ReturnInst *Ret = dyn_cast_or_null<ReturnInst>(Next);
4135 Check(Ret, "musttail call must precede a ret with an optional bitcast", &CI);
4136 Check(!Ret->getReturnValue() || Ret->getReturnValue() == RetVal ||
4137 isa<UndefValue>(Ret->getReturnValue()),
4138 "musttail call result must be returned", Ret);
4139
4140 AttributeList CallerAttrs = F->getAttributes();
4141 AttributeList CalleeAttrs = CI.getAttributes();
4142 if (CI.getCallingConv() == CallingConv::SwiftTail ||
4143 CI.getCallingConv() == CallingConv::Tail) {
4144 StringRef CCName =
4145 CI.getCallingConv() == CallingConv::Tail ? "tailcc" : "swifttailcc";
4146
4147 // - Only sret, byval, swiftself, and swiftasync ABI-impacting attributes
4148 // are allowed in swifttailcc call
4149 for (unsigned I = 0, E = CallerTy->getNumParams(); I != E; ++I) {
4150 AttrBuilder ABIAttrs = getParameterABIAttributes(F->getContext(), I, CallerAttrs);
4151 SmallString<32> Context{CCName, StringRef(" musttail caller")};
4152 verifyTailCCMustTailAttrs(ABIAttrs, Context);
4153 }
4154 for (unsigned I = 0, E = CalleeTy->getNumParams(); I != E; ++I) {
4155 AttrBuilder ABIAttrs = getParameterABIAttributes(F->getContext(), I, CalleeAttrs);
4156 SmallString<32> Context{CCName, StringRef(" musttail callee")};
4157 verifyTailCCMustTailAttrs(ABIAttrs, Context);
4158 }
4159 // - Varargs functions are not allowed
4160 Check(!CallerTy->isVarArg(), Twine("cannot guarantee ") + CCName +
4161 " tail call for varargs function");
4162 return;
4163 }
4164
4165 // - The caller and callee prototypes must match. Pointer types of
4166 // parameters or return types may differ in pointee type, but not
4167 // address space.
4168 if (!CI.getIntrinsicID()) {
4169 Check(CallerTy->getNumParams() == CalleeTy->getNumParams(),
4170 "cannot guarantee tail call due to mismatched parameter counts", &CI);
4171 for (unsigned I = 0, E = CallerTy->getNumParams(); I != E; ++I) {
4172 Check(
4173 isTypeCongruent(CallerTy->getParamType(I), CalleeTy->getParamType(I)),
4174 "cannot guarantee tail call due to mismatched parameter types", &CI);
4175 }
4176 }
4177
4178 // - All ABI-impacting function attributes, such as sret, byval, inreg,
4179 // returned, preallocated, and inalloca, must match.
4180 for (unsigned I = 0, E = CallerTy->getNumParams(); I != E; ++I) {
4181 AttrBuilder CallerABIAttrs = getParameterABIAttributes(F->getContext(), I, CallerAttrs);
4182 AttrBuilder CalleeABIAttrs = getParameterABIAttributes(F->getContext(), I, CalleeAttrs);
4183 Check(CallerABIAttrs == CalleeABIAttrs,
4184 "cannot guarantee tail call due to mismatched ABI impacting "
4185 "function attributes",
4186 &CI, CI.getOperand(I));
4187 }
4188}
4189
4190void Verifier::visitCallInst(CallInst &CI) {
4191 visitCallBase(CI);
4192
4193 if (CI.isMustTailCall())
4194 verifyMustTailCall(CI);
4195}
4196
4197void Verifier::visitInvokeInst(InvokeInst &II) {
4198 visitCallBase(II);
4199
4200 // Verify that the first non-PHI instruction of the unwind destination is an
4201 // exception handling instruction.
4202 Check(
4203 II.getUnwindDest()->isEHPad(),
4204 "The unwind destination does not have an exception handling instruction!",
4205 &II);
4206
4207 visitTerminator(II);
4208}
4209
4210/// visitUnaryOperator - Check the argument to the unary operator.
4211///
4212void Verifier::visitUnaryOperator(UnaryOperator &U) {
4213 Check(U.getType() == U.getOperand(0)->getType(),
4214 "Unary operators must have same type for"
4215 "operands and result!",
4216 &U);
4217
4218 switch (U.getOpcode()) {
4219 // Check that floating-point arithmetic operators are only used with
4220 // floating-point operands.
4221 case Instruction::FNeg:
4222 Check(U.getType()->isFPOrFPVectorTy(),
4223 "FNeg operator only works with float types!", &U);
4224 break;
4225 default:
4226 llvm_unreachable("Unknown UnaryOperator opcode!");
4227 }
4228
4229 visitInstruction(U);
4230}
4231
4232/// visitBinaryOperator - Check that both arguments to the binary operator are
4233/// of the same type!
4234///
4235void Verifier::visitBinaryOperator(BinaryOperator &B) {
4236 Check(B.getOperand(0)->getType() == B.getOperand(1)->getType(),
4237 "Both operands to a binary operator are not of the same type!", &B);
4238
4239 switch (B.getOpcode()) {
4240 // Check that integer arithmetic operators are only used with
4241 // integral operands.
4242 case Instruction::Add:
4243 case Instruction::Sub:
4244 case Instruction::Mul:
4245 case Instruction::SDiv:
4246 case Instruction::UDiv:
4247 case Instruction::SRem:
4248 case Instruction::URem:
4249 Check(B.getType()->isIntOrIntVectorTy(),
4250 "Integer arithmetic operators only work with integral types!", &B);
4251 Check(B.getType() == B.getOperand(0)->getType(),
4252 "Integer arithmetic operators must have same type "
4253 "for operands and result!",
4254 &B);
4255 break;
4256 // Check that floating-point arithmetic operators are only used with
4257 // floating-point operands.
4258 case Instruction::FAdd:
4259 case Instruction::FSub:
4260 case Instruction::FMul:
4261 case Instruction::FDiv:
4262 case Instruction::FRem:
4263 Check(B.getType()->isFPOrFPVectorTy(),
4264 "Floating-point arithmetic operators only work with "
4265 "floating-point types!",
4266 &B);
4267 Check(B.getType() == B.getOperand(0)->getType(),
4268 "Floating-point arithmetic operators must have same type "
4269 "for operands and result!",
4270 &B);
4271 break;
4272 // Check that logical operators are only used with integral operands.
4273 case Instruction::And:
4274 case Instruction::Or:
4275 case Instruction::Xor:
4276 Check(B.getType()->isIntOrIntVectorTy(),
4277 "Logical operators only work with integral types!", &B);
4278 Check(B.getType() == B.getOperand(0)->getType(),
4279 "Logical operators must have same type for operands and result!", &B);
4280 break;
4281 case Instruction::Shl:
4282 case Instruction::LShr:
4283 case Instruction::AShr:
4284 Check(B.getType()->isIntOrIntVectorTy(),
4285 "Shifts only work with integral types!", &B);
4286 Check(B.getType() == B.getOperand(0)->getType(),
4287 "Shift return type must be same as operands!", &B);
4288 break;
4289 default:
4290 llvm_unreachable("Unknown BinaryOperator opcode!");
4291 }
4292
4293 visitInstruction(B);
4294}
4295
4296void Verifier::visitICmpInst(ICmpInst &IC) {
4297 // Check that the operands are the same type
4298 Type *Op0Ty = IC.getOperand(0)->getType();
4299 Type *Op1Ty = IC.getOperand(1)->getType();
4300 Check(Op0Ty == Op1Ty,
4301 "Both operands to ICmp instruction are not of the same type!", &IC);
4302 // Check that the operands are the right type
4303 Check(Op0Ty->isIntOrIntVectorTy() || Op0Ty->isPtrOrPtrVectorTy(),
4304 "Invalid operand types for ICmp instruction", &IC);
4305 // Check that the predicate is valid.
4306 Check(IC.isIntPredicate(), "Invalid predicate in ICmp instruction!", &IC);
4307
4308 visitInstruction(IC);
4309}
4310
4311void Verifier::visitFCmpInst(FCmpInst &FC) {
4312 // Check that the operands are the same type
4313 Type *Op0Ty = FC.getOperand(0)->getType();
4314 Type *Op1Ty = FC.getOperand(1)->getType();
4315 Check(Op0Ty == Op1Ty,
4316 "Both operands to FCmp instruction are not of the same type!", &FC);
4317 // Check that the operands are the right type
4318 Check(Op0Ty->isFPOrFPVectorTy(), "Invalid operand types for FCmp instruction",
4319 &FC);
4320 // Check that the predicate is valid.
4321 Check(FC.isFPPredicate(), "Invalid predicate in FCmp instruction!", &FC);
4322
4323 visitInstruction(FC);
4324}
4325
4326void Verifier::visitExtractElementInst(ExtractElementInst &EI) {
4328 "Invalid extractelement operands!", &EI);
4329 visitInstruction(EI);
4330}
4331
4332void Verifier::visitInsertElementInst(InsertElementInst &IE) {
4333 Check(InsertElementInst::isValidOperands(IE.getOperand(0), IE.getOperand(1),
4334 IE.getOperand(2)),
4335 "Invalid insertelement operands!", &IE);
4336 visitInstruction(IE);
4337}
4338
4339void Verifier::visitShuffleVectorInst(ShuffleVectorInst &SV) {
4341 SV.getShuffleMask()),
4342 "Invalid shufflevector operands!", &SV);
4343 visitInstruction(SV);
4344}
4345
4346void Verifier::visitGetElementPtrInst(GetElementPtrInst &GEP) {
4347 Type *TargetTy = GEP.getPointerOperandType()->getScalarType();
4348
4349 Check(isa<PointerType>(TargetTy),
4350 "GEP base pointer is not a vector or a vector of pointers", &GEP);
4351 Check(GEP.getSourceElementType()->isSized(), "GEP into unsized type!", &GEP);
4352
4353 if (auto *STy = dyn_cast<StructType>(GEP.getSourceElementType())) {
4354 Check(!STy->isScalableTy(),
4355 "getelementptr cannot target structure that contains scalable vector"
4356 "type",
4357 &GEP);
4358 }
4359
4360 SmallVector<Value *, 16> Idxs(GEP.indices());
4361 Check(
4362 all_of(Idxs, [](Value *V) { return V->getType()->isIntOrIntVectorTy(); }),
4363 "GEP indexes must be integers", &GEP);
4364 Type *ElTy =
4365 GetElementPtrInst::getIndexedType(GEP.getSourceElementType(), Idxs);
4366 Check(ElTy, "Invalid indices for GEP pointer type!", &GEP);
4367
4368 PointerType *PtrTy = dyn_cast<PointerType>(GEP.getType()->getScalarType());
4369
4370 Check(PtrTy && GEP.getResultElementType() == ElTy,
4371 "GEP is not of right type for indices!", &GEP, ElTy);
4372
4373 if (auto *GEPVTy = dyn_cast<VectorType>(GEP.getType())) {
4374 // Additional checks for vector GEPs.
4375 ElementCount GEPWidth = GEPVTy->getElementCount();
4376 if (GEP.getPointerOperandType()->isVectorTy())
4377 Check(
4378 GEPWidth ==
4379 cast<VectorType>(GEP.getPointerOperandType())->getElementCount(),
4380 "Vector GEP result width doesn't match operand's", &GEP);
4381 for (Value *Idx : Idxs) {
4382 Type *IndexTy = Idx->getType();
4383 if (auto *IndexVTy = dyn_cast<VectorType>(IndexTy)) {
4384 ElementCount IndexWidth = IndexVTy->getElementCount();
4385 Check(IndexWidth == GEPWidth, "Invalid GEP index vector width", &GEP);
4386 }
4387 Check(IndexTy->isIntOrIntVectorTy(),
4388 "All GEP indices should be of integer type");
4389 }
4390 }
4391
4392 Check(GEP.getAddressSpace() == PtrTy->getAddressSpace(),
4393 "GEP address space doesn't match type", &GEP);
4394
4395 visitInstruction(GEP);
4396}
4397
4398static bool isContiguous(const ConstantRange &A, const ConstantRange &B) {
4399 return A.getUpper() == B.getLower() || A.getLower() == B.getUpper();
4400}
4401
4402/// Verify !range and !absolute_symbol metadata. These have the same
4403/// restrictions, except !absolute_symbol allows the full set.
4404void Verifier::verifyRangeLikeMetadata(const Value &I, const MDNode *Range,
4405 Type *Ty, RangeLikeMetadataKind Kind) {
4406 unsigned NumOperands = Range->getNumOperands();
4407 Check(NumOperands % 2 == 0, "Unfinished range!", Range);
4408 unsigned NumRanges = NumOperands / 2;
4409 Check(NumRanges >= 1, "It should have at least one range!", Range);
4410
4411 ConstantRange LastRange(1, true); // Dummy initial value
4412 for (unsigned i = 0; i < NumRanges; ++i) {
4413 ConstantInt *Low =
4414 mdconst::dyn_extract<ConstantInt>(Range->getOperand(2 * i));
4415 Check(Low, "The lower limit must be an integer!", Low);
4416 ConstantInt *High =
4417 mdconst::dyn_extract<ConstantInt>(Range->getOperand(2 * i + 1));
4418 Check(High, "The upper limit must be an integer!", High);
4419
4420 Check(High->getType() == Low->getType(), "Range pair types must match!",
4421 &I);
4422
4423 if (Kind == RangeLikeMetadataKind::NoaliasAddrspace) {
4424 Check(High->getType()->isIntegerTy(32),
4425 "noalias.addrspace type must be i32!", &I);
4426 } else {
4427 Check(High->getType() == Ty->getScalarType(),
4428 "Range types must match instruction type!", &I);
4429 }
4430
4431 APInt HighV = High->getValue();
4432 APInt LowV = Low->getValue();
4433
4434 // ConstantRange asserts if the ranges are the same except for the min/max
4435 // value. Leave the cases it tolerates for the empty range error below.
4436 Check(LowV != HighV || LowV.isMaxValue() || LowV.isMinValue(),
4437 "The upper and lower limits cannot be the same value", &I);
4438
4439 ConstantRange CurRange(LowV, HighV);
4440 Check(!CurRange.isEmptySet() &&
4441 (Kind == RangeLikeMetadataKind::AbsoluteSymbol ||
4442 !CurRange.isFullSet()),
4443 "Range must not be empty!", Range);
4444 if (i != 0) {
4445 Check(CurRange.intersectWith(LastRange).isEmptySet(),
4446 "Intervals are overlapping", Range);
4447 Check(LowV.sgt(LastRange.getLower()), "Intervals are not in order",
4448 Range);
4449 Check(!isContiguous(CurRange, LastRange), "Intervals are contiguous",
4450 Range);
4451 }
4452 LastRange = ConstantRange(LowV, HighV);
4453 }
4454 if (NumRanges > 2) {
4455 APInt FirstLow =
4456 mdconst::dyn_extract<ConstantInt>(Range->getOperand(0))->getValue();
4457 APInt FirstHigh =
4458 mdconst::dyn_extract<ConstantInt>(Range->getOperand(1))->getValue();
4459 ConstantRange FirstRange(FirstLow, FirstHigh);
4460 Check(FirstRange.intersectWith(LastRange).isEmptySet(),
4461 "Intervals are overlapping", Range);
4462 Check(!isContiguous(FirstRange, LastRange), "Intervals are contiguous",
4463 Range);
4464 }
4465}
4466
4467void Verifier::visitRangeMetadata(Instruction &I, MDNode *Range, Type *Ty) {
4468 assert(Range && Range == I.getMetadata(LLVMContext::MD_range) &&
4469 "precondition violation");
4470 verifyRangeLikeMetadata(I, Range, Ty, RangeLikeMetadataKind::Range);
4471}
4472
4473void Verifier::visitNoaliasAddrspaceMetadata(Instruction &I, MDNode *Range,
4474 Type *Ty) {
4475 assert(Range && Range == I.getMetadata(LLVMContext::MD_noalias_addrspace) &&
4476 "precondition violation");
4477 verifyRangeLikeMetadata(I, Range, Ty,
4478 RangeLikeMetadataKind::NoaliasAddrspace);
4479}
4480
4481void Verifier::checkAtomicMemAccessSize(Type *Ty, const Instruction *I) {
4482 unsigned Size = DL.getTypeSizeInBits(Ty).getFixedValue();
4483 Check(Size >= 8, "atomic memory access' size must be byte-sized", Ty, I);
4484 Check(!(Size & (Size - 1)),
4485 "atomic memory access' operand must have a power-of-two size", Ty, I);
4486}
4487
4488void Verifier::visitLoadInst(LoadInst &LI) {
4490 Check(PTy, "Load operand must be a pointer.", &LI);
4491 Type *ElTy = LI.getType();
4492 if (MaybeAlign A = LI.getAlign()) {
4493 Check(A->value() <= Value::MaximumAlignment,
4494 "huge alignment values are unsupported", &LI);
4495 }
4496 Check(ElTy->isSized(), "loading unsized types is not allowed", &LI);
4497 if (LI.isAtomic()) {
4498 Check(LI.getOrdering() != AtomicOrdering::Release &&
4499 LI.getOrdering() != AtomicOrdering::AcquireRelease,
4500 "Load cannot have Release ordering", &LI);
4501 Check(ElTy->getScalarType()->isIntOrPtrTy() ||
4503 "atomic load operand must have integer, pointer, floating point, "
4504 "or vector type!",
4505 ElTy, &LI);
4506
4507 checkAtomicMemAccessSize(ElTy, &LI);
4508 } else {
4510 "Non-atomic load cannot have SynchronizationScope specified", &LI);
4511 }
4512
4513 visitInstruction(LI);
4514}
4515
4516void Verifier::visitStoreInst(StoreInst &SI) {
4517 PointerType *PTy = dyn_cast<PointerType>(SI.getOperand(1)->getType());
4518 Check(PTy, "Store operand must be a pointer.", &SI);
4519 Type *ElTy = SI.getOperand(0)->getType();
4520 if (MaybeAlign A = SI.getAlign()) {
4521 Check(A->value() <= Value::MaximumAlignment,
4522 "huge alignment values are unsupported", &SI);
4523 }
4524 Check(ElTy->isSized(), "storing unsized types is not allowed", &SI);
4525 if (SI.isAtomic()) {
4526 Check(SI.getOrdering() != AtomicOrdering::Acquire &&
4527 SI.getOrdering() != AtomicOrdering::AcquireRelease,
4528 "Store cannot have Acquire ordering", &SI);
4529 Check(ElTy->getScalarType()->isIntOrPtrTy() ||
4531 "atomic store operand must have integer, pointer, floating point, "
4532 "or vector type!",
4533 ElTy, &SI);
4534 checkAtomicMemAccessSize(ElTy, &SI);
4535 } else {
4536 Check(SI.getSyncScopeID() == SyncScope::System,
4537 "Non-atomic store cannot have SynchronizationScope specified", &SI);
4538 }
4539 visitInstruction(SI);
4540}
4541
4542/// Check that SwiftErrorVal is used as a swifterror argument in CS.
4543void Verifier::verifySwiftErrorCall(CallBase &Call,
4544 const Value *SwiftErrorVal) {
4545 for (const auto &I : llvm::enumerate(Call.args())) {
4546 if (I.value() == SwiftErrorVal) {
4547 Check(Call.paramHasAttr(I.index(), Attribute::SwiftError),
4548 "swifterror value when used in a callsite should be marked "
4549 "with swifterror attribute",
4550 SwiftErrorVal, Call);
4551 }
4552 }
4553}
4554
4555void Verifier::verifySwiftErrorValue(const Value *SwiftErrorVal) {
4556 // Check that swifterror value is only used by loads, stores, or as
4557 // a swifterror argument.
4558 for (const User *U : SwiftErrorVal->users()) {
4560 isa<InvokeInst>(U),
4561 "swifterror value can only be loaded and stored from, or "
4562 "as a swifterror argument!",
4563 SwiftErrorVal, U);
4564 // If it is used by a store, check it is the second operand.
4565 if (auto StoreI = dyn_cast<StoreInst>(U))
4566 Check(StoreI->getOperand(1) == SwiftErrorVal,
4567 "swifterror value should be the second operand when used "
4568 "by stores",
4569 SwiftErrorVal, U);
4570 if (auto *Call = dyn_cast<CallBase>(U))
4571 verifySwiftErrorCall(*const_cast<CallBase *>(Call), SwiftErrorVal);
4572 }
4573}
4574
4575void Verifier::visitAllocaInst(AllocaInst &AI) {
4576 Type *Ty = AI.getAllocatedType();
4577 SmallPtrSet<Type*, 4> Visited;
4578 Check(Ty->isSized(&Visited), "Cannot allocate unsized type", &AI);
4579 // Check if it's a target extension type that disallows being used on the
4580 // stack.
4582 "Alloca has illegal target extension type", &AI);
4584 "Alloca array size must have integer type", &AI);
4585 if (MaybeAlign A = AI.getAlign()) {
4586 Check(A->value() <= Value::MaximumAlignment,
4587 "huge alignment values are unsupported", &AI);
4588 }
4589
4590 if (AI.isSwiftError()) {
4591 Check(Ty->isPointerTy(), "swifterror alloca must have pointer type", &AI);
4593 "swifterror alloca must not be array allocation", &AI);
4594 verifySwiftErrorValue(&AI);
4595 }
4596
4597 if (TT.isAMDGPU()) {
4599 "alloca on amdgpu must be in addrspace(5)", &AI);
4600 }
4601
4602 visitInstruction(AI);
4603}
4604
4605void Verifier::visitAtomicCmpXchgInst(AtomicCmpXchgInst &CXI) {
4606 Type *ElTy = CXI.getOperand(1)->getType();
4607 Check(ElTy->isIntOrPtrTy(),
4608 "cmpxchg operand must have integer or pointer type", ElTy, &CXI);
4609 checkAtomicMemAccessSize(ElTy, &CXI);
4610 visitInstruction(CXI);
4611}
4612
4613void Verifier::visitAtomicRMWInst(AtomicRMWInst &RMWI) {
4614 Check(RMWI.getOrdering() != AtomicOrdering::Unordered,
4615 "atomicrmw instructions cannot be unordered.", &RMWI);
4616 auto Op = RMWI.getOperation();
4617 Type *ElTy = RMWI.getOperand(1)->getType();
4618 if (Op == AtomicRMWInst::Xchg) {
4619 Check(ElTy->isIntegerTy() || ElTy->isFloatingPointTy() ||
4620 ElTy->isPointerTy(),
4621 "atomicrmw " + AtomicRMWInst::getOperationName(Op) +
4622 " operand must have integer or floating point type!",
4623 &RMWI, ElTy);
4624 } else if (AtomicRMWInst::isFPOperation(Op)) {
4626 "atomicrmw " + AtomicRMWInst::getOperationName(Op) +
4627 " operand must have floating-point or fixed vector of floating-point "
4628 "type!",
4629 &RMWI, ElTy);
4630 } else {
4631 Check(ElTy->isIntegerTy(),
4632 "atomicrmw " + AtomicRMWInst::getOperationName(Op) +
4633 " operand must have integer type!",
4634 &RMWI, ElTy);
4635 }
4636 checkAtomicMemAccessSize(ElTy, &RMWI);
4638 "Invalid binary operation!", &RMWI);
4639 visitInstruction(RMWI);
4640}
4641
4642void Verifier::visitFenceInst(FenceInst &FI) {
4643 const AtomicOrdering Ordering = FI.getOrdering();
4644 Check(Ordering == AtomicOrdering::Acquire ||
4645 Ordering == AtomicOrdering::Release ||
4646 Ordering == AtomicOrdering::AcquireRelease ||
4647 Ordering == AtomicOrdering::SequentiallyConsistent,
4648 "fence instructions may only have acquire, release, acq_rel, or "
4649 "seq_cst ordering.",
4650 &FI);
4651 visitInstruction(FI);
4652}
4653
4654void Verifier::visitExtractValueInst(ExtractValueInst &EVI) {
4656 EVI.getIndices()) == EVI.getType(),
4657 "Invalid ExtractValueInst operands!", &EVI);
4658
4659 visitInstruction(EVI);
4660}
4661
4662void Verifier::visitInsertValueInst(InsertValueInst &IVI) {
4664 IVI.getIndices()) ==
4665 IVI.getOperand(1)->getType(),
4666 "Invalid InsertValueInst operands!", &IVI);
4667
4668 visitInstruction(IVI);
4669}
4670
4671static Value *getParentPad(Value *EHPad) {
4672 if (auto *FPI = dyn_cast<FuncletPadInst>(EHPad))
4673 return FPI->getParentPad();
4674
4675 return cast<CatchSwitchInst>(EHPad)->getParentPad();
4676}
4677
4678void Verifier::visitEHPadPredecessors(Instruction &I) {
4679 assert(I.isEHPad());
4680
4681 BasicBlock *BB = I.getParent();
4682 Function *F = BB->getParent();
4683
4684 Check(BB != &F->getEntryBlock(), "EH pad cannot be in entry block.", &I);
4685
4686 if (auto *LPI = dyn_cast<LandingPadInst>(&I)) {
4687 // The landingpad instruction defines its parent as a landing pad block. The
4688 // landing pad block may be branched to only by the unwind edge of an
4689 // invoke.
4690 for (BasicBlock *PredBB : predecessors(BB)) {
4691 const auto *II = dyn_cast<InvokeInst>(PredBB->getTerminator());
4692 Check(II && II->getUnwindDest() == BB && II->getNormalDest() != BB,
4693 "Block containing LandingPadInst must be jumped to "
4694 "only by the unwind edge of an invoke.",
4695 LPI);
4696 }
4697 return;
4698 }
4699 if (auto *CPI = dyn_cast<CatchPadInst>(&I)) {
4700 if (!pred_empty(BB))
4701 Check(BB->getUniquePredecessor() == CPI->getCatchSwitch()->getParent(),
4702 "Block containg CatchPadInst must be jumped to "
4703 "only by its catchswitch.",
4704 CPI);
4705 Check(BB != CPI->getCatchSwitch()->getUnwindDest(),
4706 "Catchswitch cannot unwind to one of its catchpads",
4707 CPI->getCatchSwitch(), CPI);
4708 return;
4709 }
4710
4711 // Verify that each pred has a legal terminator with a legal to/from EH
4712 // pad relationship.
4713 Instruction *ToPad = &I;
4714 Value *ToPadParent = getParentPad(ToPad);
4715 for (BasicBlock *PredBB : predecessors(BB)) {
4716 Instruction *TI = PredBB->getTerminator();
4717 Value *FromPad;
4718 if (auto *II = dyn_cast<InvokeInst>(TI)) {
4719 Check(II->getUnwindDest() == BB && II->getNormalDest() != BB,
4720 "EH pad must be jumped to via an unwind edge", ToPad, II);
4721 auto *CalledFn =
4722 dyn_cast<Function>(II->getCalledOperand()->stripPointerCasts());
4723 if (CalledFn && CalledFn->isIntrinsic() && II->doesNotThrow() &&
4724 !IntrinsicInst::mayLowerToFunctionCall(CalledFn->getIntrinsicID()))
4725 continue;
4726 if (auto Bundle = II->getOperandBundle(LLVMContext::OB_funclet))
4727 FromPad = Bundle->Inputs[0];
4728 else
4729 FromPad = ConstantTokenNone::get(II->getContext());
4730 } else if (auto *CRI = dyn_cast<CleanupReturnInst>(TI)) {
4731 FromPad = CRI->getOperand(0);
4732 Check(FromPad != ToPadParent, "A cleanupret must exit its cleanup", CRI);
4733 } else if (auto *CSI = dyn_cast<CatchSwitchInst>(TI)) {
4734 FromPad = CSI;
4735 } else {
4736 Check(false, "EH pad must be jumped to via an unwind edge", ToPad, TI);
4737 }
4738
4739 // The edge may exit from zero or more nested pads.
4740 SmallPtrSet<Value *, 8> Seen;
4741 for (;; FromPad = getParentPad(FromPad)) {
4742 Check(FromPad != ToPad,
4743 "EH pad cannot handle exceptions raised within it", FromPad, TI);
4744 if (FromPad == ToPadParent) {
4745 // This is a legal unwind edge.
4746 break;
4747 }
4748 Check(!isa<ConstantTokenNone>(FromPad),
4749 "A single unwind edge may only enter one EH pad", TI);
4750 Check(Seen.insert(FromPad).second, "EH pad jumps through a cycle of pads",
4751 FromPad);
4752
4753 // This will be diagnosed on the corresponding instruction already. We
4754 // need the extra check here to make sure getParentPad() works.
4755 Check(isa<FuncletPadInst>(FromPad) || isa<CatchSwitchInst>(FromPad),
4756 "Parent pad must be catchpad/cleanuppad/catchswitch", TI);
4757 }
4758 }
4759}
4760
4761void Verifier::visitLandingPadInst(LandingPadInst &LPI) {
4762 // The landingpad instruction is ill-formed if it doesn't have any clauses and
4763 // isn't a cleanup.
4764 Check(LPI.getNumClauses() > 0 || LPI.isCleanup(),
4765 "LandingPadInst needs at least one clause or to be a cleanup.", &LPI);
4766
4767 visitEHPadPredecessors(LPI);
4768
4769 if (!LandingPadResultTy)
4770 LandingPadResultTy = LPI.getType();
4771 else
4772 Check(LandingPadResultTy == LPI.getType(),
4773 "The landingpad instruction should have a consistent result type "
4774 "inside a function.",
4775 &LPI);
4776
4777 Function *F = LPI.getParent()->getParent();
4778 Check(F->hasPersonalityFn(),
4779 "LandingPadInst needs to be in a function with a personality.", &LPI);
4780
4781 // The landingpad instruction must be the first non-PHI instruction in the
4782 // block.
4783 Check(LPI.getParent()->getLandingPadInst() == &LPI,
4784 "LandingPadInst not the first non-PHI instruction in the block.", &LPI);
4785
4786 for (unsigned i = 0, e = LPI.getNumClauses(); i < e; ++i) {
4787 Constant *Clause = LPI.getClause(i);
4788 if (LPI.isCatch(i)) {
4789 Check(isa<PointerType>(Clause->getType()),
4790 "Catch operand does not have pointer type!", &LPI);
4791 } else {
4792 Check(LPI.isFilter(i), "Clause is neither catch nor filter!", &LPI);
4794 "Filter operand is not an array of constants!", &LPI);
4795 }
4796 }
4797
4798 visitInstruction(LPI);
4799}
4800
4801void Verifier::visitResumeInst(ResumeInst &RI) {
4803 "ResumeInst needs to be in a function with a personality.", &RI);
4804
4805 if (!LandingPadResultTy)
4806 LandingPadResultTy = RI.getValue()->getType();
4807 else
4808 Check(LandingPadResultTy == RI.getValue()->getType(),
4809 "The resume instruction should have a consistent result type "
4810 "inside a function.",
4811 &RI);
4812
4813 visitTerminator(RI);
4814}
4815
4816void Verifier::visitCatchPadInst(CatchPadInst &CPI) {
4817 BasicBlock *BB = CPI.getParent();
4818
4819 Function *F = BB->getParent();
4820 Check(F->hasPersonalityFn(),
4821 "CatchPadInst needs to be in a function with a personality.", &CPI);
4822
4824 "CatchPadInst needs to be directly nested in a CatchSwitchInst.",
4825 CPI.getParentPad());
4826
4827 // The catchpad instruction must be the first non-PHI instruction in the
4828 // block.
4829 Check(&*BB->getFirstNonPHIIt() == &CPI,
4830 "CatchPadInst not the first non-PHI instruction in the block.", &CPI);
4831
4832 visitEHPadPredecessors(CPI);
4833 visitFuncletPadInst(CPI);
4834}
4835
4836void Verifier::visitCatchReturnInst(CatchReturnInst &CatchReturn) {
4837 Check(isa<CatchPadInst>(CatchReturn.getOperand(0)),
4838 "CatchReturnInst needs to be provided a CatchPad", &CatchReturn,
4839 CatchReturn.getOperand(0));
4840
4841 visitTerminator(CatchReturn);
4842}
4843
4844void Verifier::visitCleanupPadInst(CleanupPadInst &CPI) {
4845 BasicBlock *BB = CPI.getParent();
4846
4847 Function *F = BB->getParent();
4848 Check(F->hasPersonalityFn(),
4849 "CleanupPadInst needs to be in a function with a personality.", &CPI);
4850
4851 // The cleanuppad instruction must be the first non-PHI instruction in the
4852 // block.
4853 Check(&*BB->getFirstNonPHIIt() == &CPI,
4854 "CleanupPadInst not the first non-PHI instruction in the block.", &CPI);
4855
4856 auto *ParentPad = CPI.getParentPad();
4857 Check(isa<ConstantTokenNone>(ParentPad) || isa<FuncletPadInst>(ParentPad),
4858 "CleanupPadInst has an invalid parent.", &CPI);
4859
4860 visitEHPadPredecessors(CPI);
4861 visitFuncletPadInst(CPI);
4862}
4863
4864void Verifier::visitFuncletPadInst(FuncletPadInst &FPI) {
4865 User *FirstUser = nullptr;
4866 Value *FirstUnwindPad = nullptr;
4867 SmallVector<FuncletPadInst *, 8> Worklist({&FPI});
4868 SmallPtrSet<FuncletPadInst *, 8> Seen;
4869
4870 while (!Worklist.empty()) {
4871 FuncletPadInst *CurrentPad = Worklist.pop_back_val();
4872 Check(Seen.insert(CurrentPad).second,
4873 "FuncletPadInst must not be nested within itself", CurrentPad);
4874 Value *UnresolvedAncestorPad = nullptr;
4875 for (User *U : CurrentPad->users()) {
4876 BasicBlock *UnwindDest;
4877 if (auto *CRI = dyn_cast<CleanupReturnInst>(U)) {
4878 UnwindDest = CRI->getUnwindDest();
4879 } else if (auto *CSI = dyn_cast<CatchSwitchInst>(U)) {
4880 // We allow catchswitch unwind to caller to nest
4881 // within an outer pad that unwinds somewhere else,
4882 // because catchswitch doesn't have a nounwind variant.
4883 // See e.g. SimplifyCFGOpt::SimplifyUnreachable.
4884 if (CSI->unwindsToCaller())
4885 continue;
4886 UnwindDest = CSI->getUnwindDest();
4887 } else if (auto *II = dyn_cast<InvokeInst>(U)) {
4888 UnwindDest = II->getUnwindDest();
4889 } else if (isa<CallInst>(U)) {
4890 // Calls which don't unwind may be found inside funclet
4891 // pads that unwind somewhere else. We don't *require*
4892 // such calls to be annotated nounwind.
4893 continue;
4894 } else if (auto *CPI = dyn_cast<CleanupPadInst>(U)) {
4895 // The unwind dest for a cleanup can only be found by
4896 // recursive search. Add it to the worklist, and we'll
4897 // search for its first use that determines where it unwinds.
4898 Worklist.push_back(CPI);
4899 continue;
4900 } else {
4901 Check(isa<CatchReturnInst>(U), "Bogus funclet pad use", U);
4902 continue;
4903 }
4904
4905 Value *UnwindPad;
4906 bool ExitsFPI;
4907 if (UnwindDest) {
4908 UnwindPad = &*UnwindDest->getFirstNonPHIIt();
4909 if (!cast<Instruction>(UnwindPad)->isEHPad())
4910 continue;
4911 Value *UnwindParent = getParentPad(UnwindPad);
4912 // Ignore unwind edges that don't exit CurrentPad.
4913 if (UnwindParent == CurrentPad)
4914 continue;
4915 // Determine whether the original funclet pad is exited,
4916 // and if we are scanning nested pads determine how many
4917 // of them are exited so we can stop searching their
4918 // children.
4919 Value *ExitedPad = CurrentPad;
4920 ExitsFPI = false;
4921 do {
4922 if (ExitedPad == &FPI) {
4923 ExitsFPI = true;
4924 // Now we can resolve any ancestors of CurrentPad up to
4925 // FPI, but not including FPI since we need to make sure
4926 // to check all direct users of FPI for consistency.
4927 UnresolvedAncestorPad = &FPI;
4928 break;
4929 }
4930 Value *ExitedParent = getParentPad(ExitedPad);
4931 if (ExitedParent == UnwindParent) {
4932 // ExitedPad is the ancestor-most pad which this unwind
4933 // edge exits, so we can resolve up to it, meaning that
4934 // ExitedParent is the first ancestor still unresolved.
4935 UnresolvedAncestorPad = ExitedParent;
4936 break;
4937 }
4938 ExitedPad = ExitedParent;
4939 } while (!isa<ConstantTokenNone>(ExitedPad));
4940 } else {
4941 // Unwinding to caller exits all pads.
4942 UnwindPad = ConstantTokenNone::get(FPI.getContext());
4943 ExitsFPI = true;
4944 UnresolvedAncestorPad = &FPI;
4945 }
4946
4947 if (ExitsFPI) {
4948 // This unwind edge exits FPI. Make sure it agrees with other
4949 // such edges.
4950 if (FirstUser) {
4951 Check(UnwindPad == FirstUnwindPad,
4952 "Unwind edges out of a funclet "
4953 "pad must have the same unwind "
4954 "dest",
4955 &FPI, U, FirstUser);
4956 } else {
4957 FirstUser = U;
4958 FirstUnwindPad = UnwindPad;
4959 // Record cleanup sibling unwinds for verifySiblingFuncletUnwinds
4960 if (isa<CleanupPadInst>(&FPI) && !isa<ConstantTokenNone>(UnwindPad) &&
4961 getParentPad(UnwindPad) == getParentPad(&FPI))
4962 SiblingFuncletInfo[&FPI] = cast<Instruction>(U);
4963 }
4964 }
4965 // Make sure we visit all uses of FPI, but for nested pads stop as
4966 // soon as we know where they unwind to.
4967 if (CurrentPad != &FPI)
4968 break;
4969 }
4970 if (UnresolvedAncestorPad) {
4971 if (CurrentPad == UnresolvedAncestorPad) {
4972 // When CurrentPad is FPI itself, we don't mark it as resolved even if
4973 // we've found an unwind edge that exits it, because we need to verify
4974 // all direct uses of FPI.
4975 assert(CurrentPad == &FPI);
4976 continue;
4977 }
4978 // Pop off the worklist any nested pads that we've found an unwind
4979 // destination for. The pads on the worklist are the uncles,
4980 // great-uncles, etc. of CurrentPad. We've found an unwind destination
4981 // for all ancestors of CurrentPad up to but not including
4982 // UnresolvedAncestorPad.
4983 Value *ResolvedPad = CurrentPad;
4984 while (!Worklist.empty()) {
4985 Value *UnclePad = Worklist.back();
4986 Value *AncestorPad = getParentPad(UnclePad);
4987 // Walk ResolvedPad up the ancestor list until we either find the
4988 // uncle's parent or the last resolved ancestor.
4989 while (ResolvedPad != AncestorPad) {
4990 Value *ResolvedParent = getParentPad(ResolvedPad);
4991 if (ResolvedParent == UnresolvedAncestorPad) {
4992 break;
4993 }
4994 ResolvedPad = ResolvedParent;
4995 }
4996 // If the resolved ancestor search didn't find the uncle's parent,
4997 // then the uncle is not yet resolved.
4998 if (ResolvedPad != AncestorPad)
4999 break;
5000 // This uncle is resolved, so pop it from the worklist.
5001 Worklist.pop_back();
5002 }
5003 }
5004 }
5005
5006 if (FirstUnwindPad) {
5007 if (auto *CatchSwitch = dyn_cast<CatchSwitchInst>(FPI.getParentPad())) {
5008 BasicBlock *SwitchUnwindDest = CatchSwitch->getUnwindDest();
5009 Value *SwitchUnwindPad;
5010 if (SwitchUnwindDest)
5011 SwitchUnwindPad = &*SwitchUnwindDest->getFirstNonPHIIt();
5012 else
5013 SwitchUnwindPad = ConstantTokenNone::get(FPI.getContext());
5014 Check(SwitchUnwindPad == FirstUnwindPad,
5015 "Unwind edges out of a catch must have the same unwind dest as "
5016 "the parent catchswitch",
5017 &FPI, FirstUser, CatchSwitch);
5018 }
5019 }
5020
5021 visitInstruction(FPI);
5022}
5023
5024void Verifier::visitCatchSwitchInst(CatchSwitchInst &CatchSwitch) {
5025 BasicBlock *BB = CatchSwitch.getParent();
5026
5027 Function *F = BB->getParent();
5028 Check(F->hasPersonalityFn(),
5029 "CatchSwitchInst needs to be in a function with a personality.",
5030 &CatchSwitch);
5031
5032 // The catchswitch instruction must be the first non-PHI instruction in the
5033 // block.
5034 Check(&*BB->getFirstNonPHIIt() == &CatchSwitch,
5035 "CatchSwitchInst not the first non-PHI instruction in the block.",
5036 &CatchSwitch);
5037
5038 auto *ParentPad = CatchSwitch.getParentPad();
5039 Check(isa<ConstantTokenNone>(ParentPad) || isa<FuncletPadInst>(ParentPad),
5040 "CatchSwitchInst has an invalid parent.", ParentPad);
5041
5042 if (BasicBlock *UnwindDest = CatchSwitch.getUnwindDest()) {
5043 BasicBlock::iterator I = UnwindDest->getFirstNonPHIIt();
5044 Check(I->isEHPad() && !isa<LandingPadInst>(I),
5045 "CatchSwitchInst must unwind to an EH block which is not a "
5046 "landingpad.",
5047 &CatchSwitch);
5048
5049 // Record catchswitch sibling unwinds for verifySiblingFuncletUnwinds
5050 if (getParentPad(&*I) == ParentPad)
5051 SiblingFuncletInfo[&CatchSwitch] = &CatchSwitch;
5052 }
5053
5054 Check(CatchSwitch.getNumHandlers() != 0,
5055 "CatchSwitchInst cannot have empty handler list", &CatchSwitch);
5056
5057 for (BasicBlock *Handler : CatchSwitch.handlers()) {
5058 Check(isa<CatchPadInst>(Handler->getFirstNonPHIIt()),
5059 "CatchSwitchInst handlers must be catchpads", &CatchSwitch, Handler);
5060 }
5061
5062 visitEHPadPredecessors(CatchSwitch);
5063 visitTerminator(CatchSwitch);
5064}
5065
5066void Verifier::visitCleanupReturnInst(CleanupReturnInst &CRI) {
5068 "CleanupReturnInst needs to be provided a CleanupPad", &CRI,
5069 CRI.getOperand(0));
5070
5071 if (BasicBlock *UnwindDest = CRI.getUnwindDest()) {
5072 BasicBlock::iterator I = UnwindDest->getFirstNonPHIIt();
5073 Check(I->isEHPad() && !isa<LandingPadInst>(I),
5074 "CleanupReturnInst must unwind to an EH block which is not a "
5075 "landingpad.",
5076 &CRI);
5077 }
5078
5079 visitTerminator(CRI);
5080}
5081
5082void Verifier::verifyDominatesUse(Instruction &I, unsigned i) {
5083 Instruction *Op = cast<Instruction>(I.getOperand(i));
5084 // If the we have an invalid invoke, don't try to compute the dominance.
5085 // We already reject it in the invoke specific checks and the dominance
5086 // computation doesn't handle multiple edges.
5087 if (InvokeInst *II = dyn_cast<InvokeInst>(Op)) {
5088 if (II->getNormalDest() == II->getUnwindDest())
5089 return;
5090 }
5091
5092 // Quick check whether the def has already been encountered in the same block.
5093 // PHI nodes are not checked to prevent accepting preceding PHIs, because PHI
5094 // uses are defined to happen on the incoming edge, not at the instruction.
5095 //
5096 // FIXME: If this operand is a MetadataAsValue (wrapping a LocalAsMetadata)
5097 // wrapping an SSA value, assert that we've already encountered it. See
5098 // related FIXME in Mapper::mapLocalAsMetadata in ValueMapper.cpp.
5099 if (!isa<PHINode>(I) && InstsInThisBlock.count(Op))
5100 return;
5101
5102 const Use &U = I.getOperandUse(i);
5103 Check(DT.dominates(Op, U), "Instruction does not dominate all uses!", Op, &I);
5104}
5105
5106void Verifier::visitDereferenceableMetadata(Instruction& I, MDNode* MD) {
5107 Check(I.getType()->isPointerTy(),
5108 "dereferenceable, dereferenceable_or_null "
5109 "apply only to pointer types",
5110 &I);
5112 "dereferenceable, dereferenceable_or_null apply only to load"
5113 " and inttoptr instructions, use attributes for calls or invokes",
5114 &I);
5115 Check(MD->getNumOperands() == 1,
5116 "dereferenceable, dereferenceable_or_null "
5117 "take one operand!",
5118 &I);
5119 ConstantInt *CI = mdconst::dyn_extract<ConstantInt>(MD->getOperand(0));
5120 Check(CI && CI->getType()->isIntegerTy(64),
5121 "dereferenceable, "
5122 "dereferenceable_or_null metadata value must be an i64!",
5123 &I);
5124}
5125
5126void Verifier::visitNofreeMetadata(Instruction &I, MDNode *MD) {
5127 Check(I.getType()->isPointerTy(), "nofree applies only to pointer types", &I);
5128 Check((isa<IntToPtrInst>(I)), "nofree applies only to inttoptr instruction",
5129 &I);
5130 Check(MD->getNumOperands() == 0, "nofree metadata must be empty", &I);
5131}
5132
5133void Verifier::visitProfMetadata(Instruction &I, MDNode *MD) {
5134 auto GetBranchingTerminatorNumOperands = [&]() {
5135 unsigned ExpectedNumOperands = 0;
5136 if (BranchInst *BI = dyn_cast<BranchInst>(&I))
5137 ExpectedNumOperands = BI->getNumSuccessors();
5138 else if (SwitchInst *SI = dyn_cast<SwitchInst>(&I))
5139 ExpectedNumOperands = SI->getNumSuccessors();
5140 else if (isa<CallInst>(&I))
5141 ExpectedNumOperands = 1;
5142 else if (IndirectBrInst *IBI = dyn_cast<IndirectBrInst>(&I))
5143 ExpectedNumOperands = IBI->getNumDestinations();
5144 else if (isa<SelectInst>(&I))
5145 ExpectedNumOperands = 2;
5146 else if (CallBrInst *CI = dyn_cast<CallBrInst>(&I))
5147 ExpectedNumOperands = CI->getNumSuccessors();
5148 return ExpectedNumOperands;
5149 };
5150 Check(MD->getNumOperands() >= 1,
5151 "!prof annotations should have at least 1 operand", MD);
5152 // Check first operand.
5153 Check(MD->getOperand(0) != nullptr, "first operand should not be null", MD);
5155 "expected string with name of the !prof annotation", MD);
5156 MDString *MDS = cast<MDString>(MD->getOperand(0));
5157 StringRef ProfName = MDS->getString();
5158
5160 Check(GetBranchingTerminatorNumOperands() != 0 || isa<InvokeInst>(I),
5161 "'unknown' !prof should only appear on instructions on which "
5162 "'branch_weights' would",
5163 MD);
5164 verifyUnknownProfileMetadata(MD);
5165 return;
5166 }
5167
5168 Check(MD->getNumOperands() >= 2,
5169 "!prof annotations should have no less than 2 operands", MD);
5170
5171 // Check consistency of !prof branch_weights metadata.
5172 if (ProfName == MDProfLabels::BranchWeights) {
5173 unsigned NumBranchWeights = getNumBranchWeights(*MD);
5174 if (isa<InvokeInst>(&I)) {
5175 Check(NumBranchWeights == 1 || NumBranchWeights == 2,
5176 "Wrong number of InvokeInst branch_weights operands", MD);
5177 } else {
5178 const unsigned ExpectedNumOperands = GetBranchingTerminatorNumOperands();
5179 if (ExpectedNumOperands == 0)
5180 CheckFailed("!prof branch_weights are not allowed for this instruction",
5181 MD);
5182
5183 Check(NumBranchWeights == ExpectedNumOperands, "Wrong number of operands",
5184 MD);
5185 }
5186 for (unsigned i = getBranchWeightOffset(MD); i < MD->getNumOperands();
5187 ++i) {
5188 auto &MDO = MD->getOperand(i);
5189 Check(MDO, "second operand should not be null", MD);
5191 "!prof brunch_weights operand is not a const int");
5192 }
5193 } else if (ProfName == MDProfLabels::ValueProfile) {
5194 Check(isValueProfileMD(MD), "invalid value profiling metadata", MD);
5195 ConstantInt *KindInt = mdconst::dyn_extract<ConstantInt>(MD->getOperand(1));
5196 Check(KindInt, "VP !prof missing kind argument", MD);
5197
5198 auto Kind = KindInt->getZExtValue();
5199 Check(Kind >= InstrProfValueKind::IPVK_First &&
5200 Kind <= InstrProfValueKind::IPVK_Last,
5201 "Invalid VP !prof kind", MD);
5202 Check(MD->getNumOperands() % 2 == 1,
5203 "VP !prof should have an even number "
5204 "of arguments after 'VP'",
5205 MD);
5206 if (Kind == InstrProfValueKind::IPVK_IndirectCallTarget ||
5207 Kind == InstrProfValueKind::IPVK_MemOPSize)
5209 "VP !prof indirect call or memop size expected to be applied to "
5210 "CallBase instructions only",
5211 MD);
5212 } else {
5213 CheckFailed("expected either branch_weights or VP profile name", MD);
5214 }
5215}
5216
5217void Verifier::visitDIAssignIDMetadata(Instruction &I, MDNode *MD) {
5218 assert(I.hasMetadata(LLVMContext::MD_DIAssignID));
5219 // DIAssignID metadata must be attached to either an alloca or some form of
5220 // store/memory-writing instruction.
5221 // FIXME: We allow all intrinsic insts here to avoid trying to enumerate all
5222 // possible store intrinsics.
5223 bool ExpectedInstTy =
5225 CheckDI(ExpectedInstTy, "!DIAssignID attached to unexpected instruction kind",
5226 I, MD);
5227 // Iterate over the MetadataAsValue uses of the DIAssignID - these should
5228 // only be found as DbgAssignIntrinsic operands.
5229 if (auto *AsValue = MetadataAsValue::getIfExists(Context, MD)) {
5230 for (auto *User : AsValue->users()) {
5232 "!DIAssignID should only be used by llvm.dbg.assign intrinsics",
5233 MD, User);
5234 // All of the dbg.assign intrinsics should be in the same function as I.
5235 if (auto *DAI = dyn_cast<DbgAssignIntrinsic>(User))
5236 CheckDI(DAI->getFunction() == I.getFunction(),
5237 "dbg.assign not in same function as inst", DAI, &I);
5238 }
5239 }
5240 for (DbgVariableRecord *DVR :
5241 cast<DIAssignID>(MD)->getAllDbgVariableRecordUsers()) {
5242 CheckDI(DVR->isDbgAssign(),
5243 "!DIAssignID should only be used by Assign DVRs.", MD, DVR);
5244 CheckDI(DVR->getFunction() == I.getFunction(),
5245 "DVRAssign not in same function as inst", DVR, &I);
5246 }
5247}
5248
5249void Verifier::visitMMRAMetadata(Instruction &I, MDNode *MD) {
5251 "!mmra metadata attached to unexpected instruction kind", I, MD);
5252
5253 // MMRA Metadata should either be a tag, e.g. !{!"foo", !"bar"}, or a
5254 // list of tags such as !2 in the following example:
5255 // !0 = !{!"a", !"b"}
5256 // !1 = !{!"c", !"d"}
5257 // !2 = !{!0, !1}
5258 if (MMRAMetadata::isTagMD(MD))
5259 return;
5260
5261 Check(isa<MDTuple>(MD), "!mmra expected to be a metadata tuple", I, MD);
5262 for (const MDOperand &MDOp : MD->operands())
5263 Check(MMRAMetadata::isTagMD(MDOp.get()),
5264 "!mmra metadata tuple operand is not an MMRA tag", I, MDOp.get());
5265}
5266
5267void Verifier::visitCallStackMetadata(MDNode *MD) {
5268 // Call stack metadata should consist of a list of at least 1 constant int
5269 // (representing a hash of the location).
5270 Check(MD->getNumOperands() >= 1,
5271 "call stack metadata should have at least 1 operand", MD);
5272
5273 for (const auto &Op : MD->operands())
5275 "call stack metadata operand should be constant integer", Op);
5276}
5277
5278void Verifier::visitMemProfMetadata(Instruction &I, MDNode *MD) {
5279 Check(isa<CallBase>(I), "!memprof metadata should only exist on calls", &I);
5280 Check(MD->getNumOperands() >= 1,
5281 "!memprof annotations should have at least 1 metadata operand "
5282 "(MemInfoBlock)",
5283 MD);
5284
5285 // Check each MIB
5286 for (auto &MIBOp : MD->operands()) {
5287 MDNode *MIB = dyn_cast<MDNode>(MIBOp);
5288 // The first operand of an MIB should be the call stack metadata.
5289 // There rest of the operands should be MDString tags, and there should be
5290 // at least one.
5291 Check(MIB->getNumOperands() >= 2,
5292 "Each !memprof MemInfoBlock should have at least 2 operands", MIB);
5293
5294 // Check call stack metadata (first operand).
5295 Check(MIB->getOperand(0) != nullptr,
5296 "!memprof MemInfoBlock first operand should not be null", MIB);
5297 Check(isa<MDNode>(MIB->getOperand(0)),
5298 "!memprof MemInfoBlock first operand should be an MDNode", MIB);
5299 MDNode *StackMD = dyn_cast<MDNode>(MIB->getOperand(0));
5300 visitCallStackMetadata(StackMD);
5301
5302 // The next set of 1 or more operands should be MDString.
5303 unsigned I = 1;
5304 for (; I < MIB->getNumOperands(); ++I) {
5305 if (!isa<MDString>(MIB->getOperand(I))) {
5306 Check(I > 1,
5307 "!memprof MemInfoBlock second operand should be an MDString",
5308 MIB);
5309 break;
5310 }
5311 }
5312
5313 // Any remaining should be MDNode that are pairs of integers
5314 for (; I < MIB->getNumOperands(); ++I) {
5315 MDNode *OpNode = dyn_cast<MDNode>(MIB->getOperand(I));
5316 Check(OpNode, "Not all !memprof MemInfoBlock operands 2 to N are MDNode",
5317 MIB);
5318 Check(OpNode->getNumOperands() == 2,
5319 "Not all !memprof MemInfoBlock operands 2 to N are MDNode with 2 "
5320 "operands",
5321 MIB);
5322 // Check that all of Op's operands are ConstantInt.
5323 Check(llvm::all_of(OpNode->operands(),
5324 [](const MDOperand &Op) {
5325 return mdconst::hasa<ConstantInt>(Op);
5326 }),
5327 "Not all !memprof MemInfoBlock operands 2 to N are MDNode with "
5328 "ConstantInt operands",
5329 MIB);
5330 }
5331 }
5332}
5333
5334void Verifier::visitCallsiteMetadata(Instruction &I, MDNode *MD) {
5335 Check(isa<CallBase>(I), "!callsite metadata should only exist on calls", &I);
5336 // Verify the partial callstack annotated from memprof profiles. This callsite
5337 // is a part of a profiled allocation callstack.
5338 visitCallStackMetadata(MD);
5339}
5340
5341static inline bool isConstantIntMetadataOperand(const Metadata *MD) {
5342 if (auto *VAL = dyn_cast<ValueAsMetadata>(MD))
5343 return isa<ConstantInt>(VAL->getValue());
5344 return false;
5345}
5346
5347void Verifier::visitCalleeTypeMetadata(Instruction &I, MDNode *MD) {
5348 Check(isa<CallBase>(I), "!callee_type metadata should only exist on calls",
5349 &I);
5350 for (Metadata *Op : MD->operands()) {
5352 "The callee_type metadata must be a list of type metadata nodes", Op);
5353 auto *TypeMD = cast<MDNode>(Op);
5354 Check(TypeMD->getNumOperands() == 2,
5355 "Well-formed generalized type metadata must contain exactly two "
5356 "operands",
5357 Op);
5358 Check(isConstantIntMetadataOperand(TypeMD->getOperand(0)) &&
5359 mdconst::extract<ConstantInt>(TypeMD->getOperand(0))->isZero(),
5360 "The first operand of type metadata for functions must be zero", Op);
5361 Check(TypeMD->hasGeneralizedMDString(),
5362 "Only generalized type metadata can be part of the callee_type "
5363 "metadata list",
5364 Op);
5365 }
5366}
5367
5368void Verifier::visitAnnotationMetadata(MDNode *Annotation) {
5369 Check(isa<MDTuple>(Annotation), "annotation must be a tuple");
5370 Check(Annotation->getNumOperands() >= 1,
5371 "annotation must have at least one operand");
5372 for (const MDOperand &Op : Annotation->operands()) {
5373 bool TupleOfStrings =
5374 isa<MDTuple>(Op.get()) &&
5375 all_of(cast<MDTuple>(Op)->operands(), [](auto &Annotation) {
5376 return isa<MDString>(Annotation.get());
5377 });
5378 Check(isa<MDString>(Op.get()) || TupleOfStrings,
5379 "operands must be a string or a tuple of strings");
5380 }
5381}
5382
5383void Verifier::visitAliasScopeMetadata(const MDNode *MD) {
5384 unsigned NumOps = MD->getNumOperands();
5385 Check(NumOps >= 2 && NumOps <= 3, "scope must have two or three operands",
5386 MD);
5387 Check(MD->getOperand(0).get() == MD || isa<MDString>(MD->getOperand(0)),
5388 "first scope operand must be self-referential or string", MD);
5389 if (NumOps == 3)
5391 "third scope operand must be string (if used)", MD);
5392
5393 MDNode *Domain = dyn_cast<MDNode>(MD->getOperand(1));
5394 Check(Domain != nullptr, "second scope operand must be MDNode", MD);
5395
5396 unsigned NumDomainOps = Domain->getNumOperands();
5397 Check(NumDomainOps >= 1 && NumDomainOps <= 2,
5398 "domain must have one or two operands", Domain);
5399 Check(Domain->getOperand(0).get() == Domain ||
5400 isa<MDString>(Domain->getOperand(0)),
5401 "first domain operand must be self-referential or string", Domain);
5402 if (NumDomainOps == 2)
5403 Check(isa<MDString>(Domain->getOperand(1)),
5404 "second domain operand must be string (if used)", Domain);
5405}
5406
5407void Verifier::visitAliasScopeListMetadata(const MDNode *MD) {
5408 for (const MDOperand &Op : MD->operands()) {
5409 const MDNode *OpMD = dyn_cast<MDNode>(Op);
5410 Check(OpMD != nullptr, "scope list must consist of MDNodes", MD);
5411 visitAliasScopeMetadata(OpMD);
5412 }
5413}
5414
5415void Verifier::visitAccessGroupMetadata(const MDNode *MD) {
5416 auto IsValidAccessScope = [](const MDNode *MD) {
5417 return MD->getNumOperands() == 0 && MD->isDistinct();
5418 };
5419
5420 // It must be either an access scope itself...
5421 if (IsValidAccessScope(MD))
5422 return;
5423
5424 // ...or a list of access scopes.
5425 for (const MDOperand &Op : MD->operands()) {
5426 const MDNode *OpMD = dyn_cast<MDNode>(Op);
5427 Check(OpMD != nullptr, "Access scope list must consist of MDNodes", MD);
5428 Check(IsValidAccessScope(OpMD),
5429 "Access scope list contains invalid access scope", MD);
5430 }
5431}
5432
5433void Verifier::visitCapturesMetadata(Instruction &I, const MDNode *Captures) {
5434 static const char *ValidArgs[] = {"address_is_null", "address",
5435 "read_provenance", "provenance"};
5436
5437 auto *SI = dyn_cast<StoreInst>(&I);
5438 Check(SI, "!captures metadata can only be applied to store instructions", &I);
5439 Check(SI->getValueOperand()->getType()->isPointerTy(),
5440 "!captures metadata can only be applied to store with value operand of "
5441 "pointer type",
5442 &I);
5443 Check(Captures->getNumOperands() != 0, "!captures metadata cannot be empty",
5444 &I);
5445
5446 for (Metadata *Op : Captures->operands()) {
5447 auto *Str = dyn_cast<MDString>(Op);
5448 Check(Str, "!captures metadata must be a list of strings", &I);
5449 Check(is_contained(ValidArgs, Str->getString()),
5450 "invalid entry in !captures metadata", &I, Str);
5451 }
5452}
5453
5454void Verifier::visitAllocTokenMetadata(Instruction &I, MDNode *MD) {
5455 Check(isa<CallBase>(I), "!alloc_token should only exist on calls", &I);
5456 Check(MD->getNumOperands() == 2, "!alloc_token must have 2 operands", MD);
5457 Check(isa<MDString>(MD->getOperand(0)), "expected string", MD);
5459 "expected integer constant", MD);
5460}
5461
5462/// verifyInstruction - Verify that an instruction is well formed.
5463///
5464void Verifier::visitInstruction(Instruction &I) {
5465 BasicBlock *BB = I.getParent();
5466 Check(BB, "Instruction not embedded in basic block!", &I);
5467
5468 if (!isa<PHINode>(I)) { // Check that non-phi nodes are not self referential
5469 for (User *U : I.users()) {
5470 Check(U != (User *)&I || !DT.isReachableFromEntry(BB),
5471 "Only PHI nodes may reference their own value!", &I);
5472 }
5473 }
5474
5475 // Check that void typed values don't have names
5476 Check(!I.getType()->isVoidTy() || !I.hasName(),
5477 "Instruction has a name, but provides a void value!", &I);
5478
5479 // Check that the return value of the instruction is either void or a legal
5480 // value type.
5481 Check(I.getType()->isVoidTy() || I.getType()->isFirstClassType(),
5482 "Instruction returns a non-scalar type!", &I);
5483
5484 // Check that the instruction doesn't produce metadata. Calls are already
5485 // checked against the callee type.
5486 Check(!I.getType()->isMetadataTy() || isa<CallInst>(I) || isa<InvokeInst>(I),
5487 "Invalid use of metadata!", &I);
5488
5489 // Check that all uses of the instruction, if they are instructions
5490 // themselves, actually have parent basic blocks. If the use is not an
5491 // instruction, it is an error!
5492 for (Use &U : I.uses()) {
5493 if (Instruction *Used = dyn_cast<Instruction>(U.getUser()))
5494 Check(Used->getParent() != nullptr,
5495 "Instruction referencing"
5496 " instruction not embedded in a basic block!",
5497 &I, Used);
5498 else {
5499 CheckFailed("Use of instruction is not an instruction!", U);
5500 return;
5501 }
5502 }
5503
5504 // Get a pointer to the call base of the instruction if it is some form of
5505 // call.
5506 const CallBase *CBI = dyn_cast<CallBase>(&I);
5507
5508 for (unsigned i = 0, e = I.getNumOperands(); i != e; ++i) {
5509 Check(I.getOperand(i) != nullptr, "Instruction has null operand!", &I);
5510
5511 // Check to make sure that only first-class-values are operands to
5512 // instructions.
5513 if (!I.getOperand(i)->getType()->isFirstClassType()) {
5514 Check(false, "Instruction operands must be first-class values!", &I);
5515 }
5516
5517 if (Function *F = dyn_cast<Function>(I.getOperand(i))) {
5518 // This code checks whether the function is used as the operand of a
5519 // clang_arc_attachedcall operand bundle.
5520 auto IsAttachedCallOperand = [](Function *F, const CallBase *CBI,
5521 int Idx) {
5522 return CBI && CBI->isOperandBundleOfType(
5524 };
5525
5526 // Check to make sure that the "address of" an intrinsic function is never
5527 // taken. Ignore cases where the address of the intrinsic function is used
5528 // as the argument of operand bundle "clang.arc.attachedcall" as those
5529 // cases are handled in verifyAttachedCallBundle.
5530 Check((!F->isIntrinsic() ||
5531 (CBI && &CBI->getCalledOperandUse() == &I.getOperandUse(i)) ||
5532 IsAttachedCallOperand(F, CBI, i)),
5533 "Cannot take the address of an intrinsic!", &I);
5534 Check(!F->isIntrinsic() || isa<CallInst>(I) ||
5535 F->getIntrinsicID() == Intrinsic::donothing ||
5536 F->getIntrinsicID() == Intrinsic::seh_try_begin ||
5537 F->getIntrinsicID() == Intrinsic::seh_try_end ||
5538 F->getIntrinsicID() == Intrinsic::seh_scope_begin ||
5539 F->getIntrinsicID() == Intrinsic::seh_scope_end ||
5540 F->getIntrinsicID() == Intrinsic::coro_resume ||
5541 F->getIntrinsicID() == Intrinsic::coro_destroy ||
5542 F->getIntrinsicID() == Intrinsic::coro_await_suspend_void ||
5543 F->getIntrinsicID() == Intrinsic::coro_await_suspend_bool ||
5544 F->getIntrinsicID() == Intrinsic::coro_await_suspend_handle ||
5545 F->getIntrinsicID() ==
5546 Intrinsic::experimental_patchpoint_void ||
5547 F->getIntrinsicID() == Intrinsic::experimental_patchpoint ||
5548 F->getIntrinsicID() == Intrinsic::fake_use ||
5549 F->getIntrinsicID() == Intrinsic::experimental_gc_statepoint ||
5550 F->getIntrinsicID() == Intrinsic::wasm_throw ||
5551 F->getIntrinsicID() == Intrinsic::wasm_rethrow ||
5552 IsAttachedCallOperand(F, CBI, i),
5553 "Cannot invoke an intrinsic other than donothing, patchpoint, "
5554 "statepoint, coro_resume, coro_destroy, clang.arc.attachedcall or "
5555 "wasm.(re)throw",
5556 &I);
5557 Check(F->getParent() == &M, "Referencing function in another module!", &I,
5558 &M, F, F->getParent());
5559 } else if (BasicBlock *OpBB = dyn_cast<BasicBlock>(I.getOperand(i))) {
5560 Check(OpBB->getParent() == BB->getParent(),
5561 "Referring to a basic block in another function!", &I);
5562 } else if (Argument *OpArg = dyn_cast<Argument>(I.getOperand(i))) {
5563 Check(OpArg->getParent() == BB->getParent(),
5564 "Referring to an argument in another function!", &I);
5565 } else if (GlobalValue *GV = dyn_cast<GlobalValue>(I.getOperand(i))) {
5566 Check(GV->getParent() == &M, "Referencing global in another module!", &I,
5567 &M, GV, GV->getParent());
5568 } else if (Instruction *OpInst = dyn_cast<Instruction>(I.getOperand(i))) {
5569 Check(OpInst->getFunction() == BB->getParent(),
5570 "Referring to an instruction in another function!", &I);
5571 verifyDominatesUse(I, i);
5572 } else if (isa<InlineAsm>(I.getOperand(i))) {
5573 Check(CBI && &CBI->getCalledOperandUse() == &I.getOperandUse(i),
5574 "Cannot take the address of an inline asm!", &I);
5575 } else if (auto *CPA = dyn_cast<ConstantPtrAuth>(I.getOperand(i))) {
5576 visitConstantExprsRecursively(CPA);
5577 } else if (ConstantExpr *CE = dyn_cast<ConstantExpr>(I.getOperand(i))) {
5578 if (CE->getType()->isPtrOrPtrVectorTy()) {
5579 // If we have a ConstantExpr pointer, we need to see if it came from an
5580 // illegal bitcast.
5581 visitConstantExprsRecursively(CE);
5582 }
5583 }
5584 }
5585
5586 if (MDNode *MD = I.getMetadata(LLVMContext::MD_fpmath)) {
5587 Check(I.getType()->isFPOrFPVectorTy(),
5588 "fpmath requires a floating point result!", &I);
5589 Check(MD->getNumOperands() == 1, "fpmath takes one operand!", &I);
5590 if (ConstantFP *CFP0 =
5592 const APFloat &Accuracy = CFP0->getValueAPF();
5593 Check(&Accuracy.getSemantics() == &APFloat::IEEEsingle(),
5594 "fpmath accuracy must have float type", &I);
5595 Check(Accuracy.isFiniteNonZero() && !Accuracy.isNegative(),
5596 "fpmath accuracy not a positive number!", &I);
5597 } else {
5598 Check(false, "invalid fpmath accuracy!", &I);
5599 }
5600 }
5601
5602 if (MDNode *Range = I.getMetadata(LLVMContext::MD_range)) {
5604 "Ranges are only for loads, calls and invokes!", &I);
5605 visitRangeMetadata(I, Range, I.getType());
5606 }
5607
5608 if (MDNode *Range = I.getMetadata(LLVMContext::MD_noalias_addrspace)) {
5611 "noalias.addrspace are only for memory operations!", &I);
5612 visitNoaliasAddrspaceMetadata(I, Range, I.getType());
5613 }
5614
5615 if (I.hasMetadata(LLVMContext::MD_invariant_group)) {
5617 "invariant.group metadata is only for loads and stores", &I);
5618 }
5619
5620 if (MDNode *MD = I.getMetadata(LLVMContext::MD_nonnull)) {
5621 Check(I.getType()->isPointerTy(), "nonnull applies only to pointer types",
5622 &I);
5624 "nonnull applies only to load instructions, use attributes"
5625 " for calls or invokes",
5626 &I);
5627 Check(MD->getNumOperands() == 0, "nonnull metadata must be empty", &I);
5628 }
5629
5630 if (MDNode *MD = I.getMetadata(LLVMContext::MD_dereferenceable))
5631 visitDereferenceableMetadata(I, MD);
5632
5633 if (MDNode *MD = I.getMetadata(LLVMContext::MD_dereferenceable_or_null))
5634 visitDereferenceableMetadata(I, MD);
5635
5636 if (MDNode *MD = I.getMetadata(LLVMContext::MD_nofree))
5637 visitNofreeMetadata(I, MD);
5638
5639 if (MDNode *TBAA = I.getMetadata(LLVMContext::MD_tbaa))
5640 TBAAVerifyHelper.visitTBAAMetadata(&I, TBAA);
5641
5642 if (MDNode *MD = I.getMetadata(LLVMContext::MD_noalias))
5643 visitAliasScopeListMetadata(MD);
5644 if (MDNode *MD = I.getMetadata(LLVMContext::MD_alias_scope))
5645 visitAliasScopeListMetadata(MD);
5646
5647 if (MDNode *MD = I.getMetadata(LLVMContext::MD_access_group))
5648 visitAccessGroupMetadata(MD);
5649
5650 if (MDNode *AlignMD = I.getMetadata(LLVMContext::MD_align)) {
5651 Check(I.getType()->isPointerTy(), "align applies only to pointer types",
5652 &I);
5654 "align applies only to load instructions, "
5655 "use attributes for calls or invokes",
5656 &I);
5657 Check(AlignMD->getNumOperands() == 1, "align takes one operand!", &I);
5658 ConstantInt *CI = mdconst::dyn_extract<ConstantInt>(AlignMD->getOperand(0));
5659 Check(CI && CI->getType()->isIntegerTy(64),
5660 "align metadata value must be an i64!", &I);
5661 uint64_t Align = CI->getZExtValue();
5662 Check(isPowerOf2_64(Align), "align metadata value must be a power of 2!",
5663 &I);
5664 Check(Align <= Value::MaximumAlignment,
5665 "alignment is larger that implementation defined limit", &I);
5666 }
5667
5668 if (MDNode *MD = I.getMetadata(LLVMContext::MD_prof))
5669 visitProfMetadata(I, MD);
5670
5671 if (MDNode *MD = I.getMetadata(LLVMContext::MD_memprof))
5672 visitMemProfMetadata(I, MD);
5673
5674 if (MDNode *MD = I.getMetadata(LLVMContext::MD_callsite))
5675 visitCallsiteMetadata(I, MD);
5676
5677 if (MDNode *MD = I.getMetadata(LLVMContext::MD_callee_type))
5678 visitCalleeTypeMetadata(I, MD);
5679
5680 if (MDNode *MD = I.getMetadata(LLVMContext::MD_DIAssignID))
5681 visitDIAssignIDMetadata(I, MD);
5682
5683 if (MDNode *MMRA = I.getMetadata(LLVMContext::MD_mmra))
5684 visitMMRAMetadata(I, MMRA);
5685
5686 if (MDNode *Annotation = I.getMetadata(LLVMContext::MD_annotation))
5687 visitAnnotationMetadata(Annotation);
5688
5689 if (MDNode *Captures = I.getMetadata(LLVMContext::MD_captures))
5690 visitCapturesMetadata(I, Captures);
5691
5692 if (MDNode *MD = I.getMetadata(LLVMContext::MD_alloc_token))
5693 visitAllocTokenMetadata(I, MD);
5694
5695 if (MDNode *N = I.getDebugLoc().getAsMDNode()) {
5696 CheckDI(isa<DILocation>(N), "invalid !dbg metadata attachment", &I, N);
5697 visitMDNode(*N, AreDebugLocsAllowed::Yes);
5698
5699 if (auto *DL = dyn_cast<DILocation>(N)) {
5700 if (DL->getAtomGroup()) {
5701 CheckDI(DL->getScope()->getSubprogram()->getKeyInstructionsEnabled(),
5702 "DbgLoc uses atomGroup but DISubprogram doesn't have Key "
5703 "Instructions enabled",
5704 DL, DL->getScope()->getSubprogram());
5705 }
5706 }
5707 }
5708
5710 I.getAllMetadata(MDs);
5711 for (auto Attachment : MDs) {
5712 unsigned Kind = Attachment.first;
5713 auto AllowLocs =
5714 (Kind == LLVMContext::MD_dbg || Kind == LLVMContext::MD_loop)
5715 ? AreDebugLocsAllowed::Yes
5716 : AreDebugLocsAllowed::No;
5717 visitMDNode(*Attachment.second, AllowLocs);
5718 }
5719
5720 InstsInThisBlock.insert(&I);
5721}
5722
5723/// Allow intrinsics to be verified in different ways.
5724void Verifier::visitIntrinsicCall(Intrinsic::ID ID, CallBase &Call) {
5726 Check(IF->isDeclaration(), "Intrinsic functions should never be defined!",
5727 IF);
5728
5729 // Verify that the intrinsic prototype lines up with what the .td files
5730 // describe.
5731 FunctionType *IFTy = IF->getFunctionType();
5732 bool IsVarArg = IFTy->isVarArg();
5733
5737
5738 // Walk the descriptors to extract overloaded types.
5743 "Intrinsic has incorrect return type!", IF);
5745 "Intrinsic has incorrect argument type!", IF);
5746
5747 // Verify if the intrinsic call matches the vararg property.
5748 if (IsVarArg)
5750 "Intrinsic was not defined with variable arguments!", IF);
5751 else
5753 "Callsite was not defined with variable arguments!", IF);
5754
5755 // All descriptors should be absorbed by now.
5756 Check(TableRef.empty(), "Intrinsic has too few arguments!", IF);
5757
5758 // Now that we have the intrinsic ID and the actual argument types (and we
5759 // know they are legal for the intrinsic!) get the intrinsic name through the
5760 // usual means. This allows us to verify the mangling of argument types into
5761 // the name.
5762 const std::string ExpectedName =
5763 Intrinsic::getName(ID, ArgTys, IF->getParent(), IFTy);
5764 Check(ExpectedName == IF->getName(),
5765 "Intrinsic name not mangled correctly for type arguments! "
5766 "Should be: " +
5767 ExpectedName,
5768 IF);
5769
5770 // If the intrinsic takes MDNode arguments, verify that they are either global
5771 // or are local to *this* function.
5772 for (Value *V : Call.args()) {
5773 if (auto *MD = dyn_cast<MetadataAsValue>(V))
5774 visitMetadataAsValue(*MD, Call.getCaller());
5775 if (auto *Const = dyn_cast<Constant>(V))
5776 Check(!Const->getType()->isX86_AMXTy(),
5777 "const x86_amx is not allowed in argument!");
5778 }
5779
5780 switch (ID) {
5781 default:
5782 break;
5783 case Intrinsic::assume: {
5784 if (Call.hasOperandBundles()) {
5786 Check(Cond && Cond->isOne(),
5787 "assume with operand bundles must have i1 true condition", Call);
5788 }
5789 for (auto &Elem : Call.bundle_op_infos()) {
5790 unsigned ArgCount = Elem.End - Elem.Begin;
5791 // Separate storage assumptions are special insofar as they're the only
5792 // operand bundles allowed on assumes that aren't parameter attributes.
5793 if (Elem.Tag->getKey() == "separate_storage") {
5794 Check(ArgCount == 2,
5795 "separate_storage assumptions should have 2 arguments", Call);
5796 Check(Call.getOperand(Elem.Begin)->getType()->isPointerTy() &&
5797 Call.getOperand(Elem.Begin + 1)->getType()->isPointerTy(),
5798 "arguments to separate_storage assumptions should be pointers",
5799 Call);
5800 continue;
5801 }
5802 Check(Elem.Tag->getKey() == "ignore" ||
5803 Attribute::isExistingAttribute(Elem.Tag->getKey()),
5804 "tags must be valid attribute names", Call);
5805 Attribute::AttrKind Kind =
5806 Attribute::getAttrKindFromName(Elem.Tag->getKey());
5807 if (Kind == Attribute::Alignment) {
5808 Check(ArgCount <= 3 && ArgCount >= 2,
5809 "alignment assumptions should have 2 or 3 arguments", Call);
5810 Check(Call.getOperand(Elem.Begin)->getType()->isPointerTy(),
5811 "first argument should be a pointer", Call);
5812 Check(Call.getOperand(Elem.Begin + 1)->getType()->isIntegerTy(),
5813 "second argument should be an integer", Call);
5814 if (ArgCount == 3)
5815 Check(Call.getOperand(Elem.Begin + 2)->getType()->isIntegerTy(),
5816 "third argument should be an integer if present", Call);
5817 continue;
5818 }
5819 if (Kind == Attribute::Dereferenceable) {
5820 Check(ArgCount == 2,
5821 "dereferenceable assumptions should have 2 arguments", Call);
5822 Check(Call.getOperand(Elem.Begin)->getType()->isPointerTy(),
5823 "first argument should be a pointer", Call);
5824 Check(Call.getOperand(Elem.Begin + 1)->getType()->isIntegerTy(),
5825 "second argument should be an integer", Call);
5826 continue;
5827 }
5828 Check(ArgCount <= 2, "too many arguments", Call);
5829 if (Kind == Attribute::None)
5830 break;
5831 if (Attribute::isIntAttrKind(Kind)) {
5832 Check(ArgCount == 2, "this attribute should have 2 arguments", Call);
5833 Check(isa<ConstantInt>(Call.getOperand(Elem.Begin + 1)),
5834 "the second argument should be a constant integral value", Call);
5835 } else if (Attribute::canUseAsParamAttr(Kind)) {
5836 Check((ArgCount) == 1, "this attribute should have one argument", Call);
5837 } else if (Attribute::canUseAsFnAttr(Kind)) {
5838 Check((ArgCount) == 0, "this attribute has no argument", Call);
5839 }
5840 }
5841 break;
5842 }
5843 case Intrinsic::ucmp:
5844 case Intrinsic::scmp: {
5845 Type *SrcTy = Call.getOperand(0)->getType();
5846 Type *DestTy = Call.getType();
5847
5848 Check(DestTy->getScalarSizeInBits() >= 2,
5849 "result type must be at least 2 bits wide", Call);
5850
5851 bool IsDestTypeVector = DestTy->isVectorTy();
5852 Check(SrcTy->isVectorTy() == IsDestTypeVector,
5853 "ucmp/scmp argument and result types must both be either vector or "
5854 "scalar types",
5855 Call);
5856 if (IsDestTypeVector) {
5857 auto SrcVecLen = cast<VectorType>(SrcTy)->getElementCount();
5858 auto DestVecLen = cast<VectorType>(DestTy)->getElementCount();
5859 Check(SrcVecLen == DestVecLen,
5860 "return type and arguments must have the same number of "
5861 "elements",
5862 Call);
5863 }
5864 break;
5865 }
5866 case Intrinsic::coro_id: {
5867 auto *InfoArg = Call.getArgOperand(3)->stripPointerCasts();
5868 if (isa<ConstantPointerNull>(InfoArg))
5869 break;
5870 auto *GV = dyn_cast<GlobalVariable>(InfoArg);
5871 Check(GV && GV->isConstant() && GV->hasDefinitiveInitializer(),
5872 "info argument of llvm.coro.id must refer to an initialized "
5873 "constant");
5874 Constant *Init = GV->getInitializer();
5876 "info argument of llvm.coro.id must refer to either a struct or "
5877 "an array");
5878 break;
5879 }
5880 case Intrinsic::is_fpclass: {
5881 const ConstantInt *TestMask = cast<ConstantInt>(Call.getOperand(1));
5882 Check((TestMask->getZExtValue() & ~static_cast<unsigned>(fcAllFlags)) == 0,
5883 "unsupported bits for llvm.is.fpclass test mask");
5884 break;
5885 }
5886 case Intrinsic::fptrunc_round: {
5887 // Check the rounding mode
5888 Metadata *MD = nullptr;
5890 if (MAV)
5891 MD = MAV->getMetadata();
5892
5893 Check(MD != nullptr, "missing rounding mode argument", Call);
5894
5895 Check(isa<MDString>(MD),
5896 ("invalid value for llvm.fptrunc.round metadata operand"
5897 " (the operand should be a string)"),
5898 MD);
5899
5900 std::optional<RoundingMode> RoundMode =
5901 convertStrToRoundingMode(cast<MDString>(MD)->getString());
5902 Check(RoundMode && *RoundMode != RoundingMode::Dynamic,
5903 "unsupported rounding mode argument", Call);
5904 break;
5905 }
5906#define BEGIN_REGISTER_VP_INTRINSIC(VPID, ...) case Intrinsic::VPID:
5907#include "llvm/IR/VPIntrinsics.def"
5908#undef BEGIN_REGISTER_VP_INTRINSIC
5909 visitVPIntrinsic(cast<VPIntrinsic>(Call));
5910 break;
5911#define INSTRUCTION(NAME, NARGS, ROUND_MODE, INTRINSIC) \
5912 case Intrinsic::INTRINSIC:
5913#include "llvm/IR/ConstrainedOps.def"
5914#undef INSTRUCTION
5915 visitConstrainedFPIntrinsic(cast<ConstrainedFPIntrinsic>(Call));
5916 break;
5917 case Intrinsic::dbg_declare: // llvm.dbg.declare
5918 case Intrinsic::dbg_value: // llvm.dbg.value
5919 case Intrinsic::dbg_assign: // llvm.dbg.assign
5920 case Intrinsic::dbg_label: // llvm.dbg.label
5921 // We no longer interpret debug intrinsics (the old variable-location
5922 // design). They're meaningless as far as LLVM is concerned we could make
5923 // it an error for them to appear, but it's possible we'll have users
5924 // converting back to intrinsics for the forseeable future (such as DXIL),
5925 // so tolerate their existance.
5926 break;
5927 case Intrinsic::memcpy:
5928 case Intrinsic::memcpy_inline:
5929 case Intrinsic::memmove:
5930 case Intrinsic::memset:
5931 case Intrinsic::memset_inline:
5932 break;
5933 case Intrinsic::experimental_memset_pattern: {
5934 const auto Memset = cast<MemSetPatternInst>(&Call);
5935 Check(Memset->getValue()->getType()->isSized(),
5936 "unsized types cannot be used as memset patterns", Call);
5937 break;
5938 }
5939 case Intrinsic::memcpy_element_unordered_atomic:
5940 case Intrinsic::memmove_element_unordered_atomic:
5941 case Intrinsic::memset_element_unordered_atomic: {
5942 const auto *AMI = cast<AnyMemIntrinsic>(&Call);
5943
5944 ConstantInt *ElementSizeCI =
5945 cast<ConstantInt>(AMI->getRawElementSizeInBytes());
5946 const APInt &ElementSizeVal = ElementSizeCI->getValue();
5947 Check(ElementSizeVal.isPowerOf2(),
5948 "element size of the element-wise atomic memory intrinsic "
5949 "must be a power of 2",
5950 Call);
5951
5952 auto IsValidAlignment = [&](MaybeAlign Alignment) {
5953 return Alignment && ElementSizeVal.ule(Alignment->value());
5954 };
5955 Check(IsValidAlignment(AMI->getDestAlign()),
5956 "incorrect alignment of the destination argument", Call);
5957 if (const auto *AMT = dyn_cast<AnyMemTransferInst>(AMI)) {
5958 Check(IsValidAlignment(AMT->getSourceAlign()),
5959 "incorrect alignment of the source argument", Call);
5960 }
5961 break;
5962 }
5963 case Intrinsic::call_preallocated_setup: {
5964 auto *NumArgs = cast<ConstantInt>(Call.getArgOperand(0));
5965 bool FoundCall = false;
5966 for (User *U : Call.users()) {
5967 auto *UseCall = dyn_cast<CallBase>(U);
5968 Check(UseCall != nullptr,
5969 "Uses of llvm.call.preallocated.setup must be calls");
5970 Intrinsic::ID IID = UseCall->getIntrinsicID();
5971 if (IID == Intrinsic::call_preallocated_arg) {
5972 auto *AllocArgIndex = dyn_cast<ConstantInt>(UseCall->getArgOperand(1));
5973 Check(AllocArgIndex != nullptr,
5974 "llvm.call.preallocated.alloc arg index must be a constant");
5975 auto AllocArgIndexInt = AllocArgIndex->getValue();
5976 Check(AllocArgIndexInt.sge(0) &&
5977 AllocArgIndexInt.slt(NumArgs->getValue()),
5978 "llvm.call.preallocated.alloc arg index must be between 0 and "
5979 "corresponding "
5980 "llvm.call.preallocated.setup's argument count");
5981 } else if (IID == Intrinsic::call_preallocated_teardown) {
5982 // nothing to do
5983 } else {
5984 Check(!FoundCall, "Can have at most one call corresponding to a "
5985 "llvm.call.preallocated.setup");
5986 FoundCall = true;
5987 size_t NumPreallocatedArgs = 0;
5988 for (unsigned i = 0; i < UseCall->arg_size(); i++) {
5989 if (UseCall->paramHasAttr(i, Attribute::Preallocated)) {
5990 ++NumPreallocatedArgs;
5991 }
5992 }
5993 Check(NumPreallocatedArgs != 0,
5994 "cannot use preallocated intrinsics on a call without "
5995 "preallocated arguments");
5996 Check(NumArgs->equalsInt(NumPreallocatedArgs),
5997 "llvm.call.preallocated.setup arg size must be equal to number "
5998 "of preallocated arguments "
5999 "at call site",
6000 Call, *UseCall);
6001 // getOperandBundle() cannot be called if more than one of the operand
6002 // bundle exists. There is already a check elsewhere for this, so skip
6003 // here if we see more than one.
6004 if (UseCall->countOperandBundlesOfType(LLVMContext::OB_preallocated) >
6005 1) {
6006 return;
6007 }
6008 auto PreallocatedBundle =
6009 UseCall->getOperandBundle(LLVMContext::OB_preallocated);
6010 Check(PreallocatedBundle,
6011 "Use of llvm.call.preallocated.setup outside intrinsics "
6012 "must be in \"preallocated\" operand bundle");
6013 Check(PreallocatedBundle->Inputs.front().get() == &Call,
6014 "preallocated bundle must have token from corresponding "
6015 "llvm.call.preallocated.setup");
6016 }
6017 }
6018 break;
6019 }
6020 case Intrinsic::call_preallocated_arg: {
6021 auto *Token = dyn_cast<CallBase>(Call.getArgOperand(0));
6022 Check(Token &&
6023 Token->getIntrinsicID() == Intrinsic::call_preallocated_setup,
6024 "llvm.call.preallocated.arg token argument must be a "
6025 "llvm.call.preallocated.setup");
6026 Check(Call.hasFnAttr(Attribute::Preallocated),
6027 "llvm.call.preallocated.arg must be called with a \"preallocated\" "
6028 "call site attribute");
6029 break;
6030 }
6031 case Intrinsic::call_preallocated_teardown: {
6032 auto *Token = dyn_cast<CallBase>(Call.getArgOperand(0));
6033 Check(Token &&
6034 Token->getIntrinsicID() == Intrinsic::call_preallocated_setup,
6035 "llvm.call.preallocated.teardown token argument must be a "
6036 "llvm.call.preallocated.setup");
6037 break;
6038 }
6039 case Intrinsic::gcroot:
6040 case Intrinsic::gcwrite:
6041 case Intrinsic::gcread:
6042 if (ID == Intrinsic::gcroot) {
6043 AllocaInst *AI =
6045 Check(AI, "llvm.gcroot parameter #1 must be an alloca.", Call);
6047 "llvm.gcroot parameter #2 must be a constant.", Call);
6048 if (!AI->getAllocatedType()->isPointerTy()) {
6050 "llvm.gcroot parameter #1 must either be a pointer alloca, "
6051 "or argument #2 must be a non-null constant.",
6052 Call);
6053 }
6054 }
6055
6056 Check(Call.getParent()->getParent()->hasGC(),
6057 "Enclosing function does not use GC.", Call);
6058 break;
6059 case Intrinsic::init_trampoline:
6061 "llvm.init_trampoline parameter #2 must resolve to a function.",
6062 Call);
6063 break;
6064 case Intrinsic::prefetch:
6065 Check(cast<ConstantInt>(Call.getArgOperand(1))->getZExtValue() < 2,
6066 "rw argument to llvm.prefetch must be 0-1", Call);
6067 Check(cast<ConstantInt>(Call.getArgOperand(2))->getZExtValue() < 4,
6068 "locality argument to llvm.prefetch must be 0-3", Call);
6069 Check(cast<ConstantInt>(Call.getArgOperand(3))->getZExtValue() < 2,
6070 "cache type argument to llvm.prefetch must be 0-1", Call);
6071 break;
6072 case Intrinsic::reloc_none: {
6074 cast<MetadataAsValue>(Call.getArgOperand(0))->getMetadata()),
6075 "llvm.reloc.none argument must be a metadata string", &Call);
6076 break;
6077 }
6078 case Intrinsic::stackprotector:
6080 "llvm.stackprotector parameter #2 must resolve to an alloca.", Call);
6081 break;
6082 case Intrinsic::localescape: {
6083 BasicBlock *BB = Call.getParent();
6084 Check(BB->isEntryBlock(), "llvm.localescape used outside of entry block",
6085 Call);
6086 Check(!SawFrameEscape, "multiple calls to llvm.localescape in one function",
6087 Call);
6088 for (Value *Arg : Call.args()) {
6089 if (isa<ConstantPointerNull>(Arg))
6090 continue; // Null values are allowed as placeholders.
6091 auto *AI = dyn_cast<AllocaInst>(Arg->stripPointerCasts());
6092 Check(AI && AI->isStaticAlloca(),
6093 "llvm.localescape only accepts static allocas", Call);
6094 }
6095 FrameEscapeInfo[BB->getParent()].first = Call.arg_size();
6096 SawFrameEscape = true;
6097 break;
6098 }
6099 case Intrinsic::localrecover: {
6101 Function *Fn = dyn_cast<Function>(FnArg);
6102 Check(Fn && !Fn->isDeclaration(),
6103 "llvm.localrecover first "
6104 "argument must be function defined in this module",
6105 Call);
6106 auto *IdxArg = cast<ConstantInt>(Call.getArgOperand(2));
6107 auto &Entry = FrameEscapeInfo[Fn];
6108 Entry.second = unsigned(
6109 std::max(uint64_t(Entry.second), IdxArg->getLimitedValue(~0U) + 1));
6110 break;
6111 }
6112
6113 case Intrinsic::experimental_gc_statepoint:
6114 if (auto *CI = dyn_cast<CallInst>(&Call))
6115 Check(!CI->isInlineAsm(),
6116 "gc.statepoint support for inline assembly unimplemented", CI);
6117 Check(Call.getParent()->getParent()->hasGC(),
6118 "Enclosing function does not use GC.", Call);
6119
6120 verifyStatepoint(Call);
6121 break;
6122 case Intrinsic::experimental_gc_result: {
6123 Check(Call.getParent()->getParent()->hasGC(),
6124 "Enclosing function does not use GC.", Call);
6125
6126 auto *Statepoint = Call.getArgOperand(0);
6127 if (isa<UndefValue>(Statepoint))
6128 break;
6129
6130 // Are we tied to a statepoint properly?
6131 const auto *StatepointCall = dyn_cast<CallBase>(Statepoint);
6132 Check(StatepointCall && StatepointCall->getIntrinsicID() ==
6133 Intrinsic::experimental_gc_statepoint,
6134 "gc.result operand #1 must be from a statepoint", Call,
6135 Call.getArgOperand(0));
6136
6137 // Check that result type matches wrapped callee.
6138 auto *TargetFuncType =
6139 cast<FunctionType>(StatepointCall->getParamElementType(2));
6140 Check(Call.getType() == TargetFuncType->getReturnType(),
6141 "gc.result result type does not match wrapped callee", Call);
6142 break;
6143 }
6144 case Intrinsic::experimental_gc_relocate: {
6145 Check(Call.arg_size() == 3, "wrong number of arguments", Call);
6146
6148 "gc.relocate must return a pointer or a vector of pointers", Call);
6149
6150 // Check that this relocate is correctly tied to the statepoint
6151
6152 // This is case for relocate on the unwinding path of an invoke statepoint
6153 if (LandingPadInst *LandingPad =
6155
6156 const BasicBlock *InvokeBB =
6157 LandingPad->getParent()->getUniquePredecessor();
6158
6159 // Landingpad relocates should have only one predecessor with invoke
6160 // statepoint terminator
6161 Check(InvokeBB, "safepoints should have unique landingpads",
6162 LandingPad->getParent());
6163 Check(InvokeBB->getTerminator(), "safepoint block should be well formed",
6164 InvokeBB);
6166 "gc relocate should be linked to a statepoint", InvokeBB);
6167 } else {
6168 // In all other cases relocate should be tied to the statepoint directly.
6169 // This covers relocates on a normal return path of invoke statepoint and
6170 // relocates of a call statepoint.
6171 auto *Token = Call.getArgOperand(0);
6173 "gc relocate is incorrectly tied to the statepoint", Call, Token);
6174 }
6175
6176 // Verify rest of the relocate arguments.
6177 const Value &StatepointCall = *cast<GCRelocateInst>(Call).getStatepoint();
6178
6179 // Both the base and derived must be piped through the safepoint.
6182 "gc.relocate operand #2 must be integer offset", Call);
6183
6184 Value *Derived = Call.getArgOperand(2);
6185 Check(isa<ConstantInt>(Derived),
6186 "gc.relocate operand #3 must be integer offset", Call);
6187
6188 const uint64_t BaseIndex = cast<ConstantInt>(Base)->getZExtValue();
6189 const uint64_t DerivedIndex = cast<ConstantInt>(Derived)->getZExtValue();
6190
6191 // Check the bounds
6192 if (isa<UndefValue>(StatepointCall))
6193 break;
6194 if (auto Opt = cast<GCStatepointInst>(StatepointCall)
6195 .getOperandBundle(LLVMContext::OB_gc_live)) {
6196 Check(BaseIndex < Opt->Inputs.size(),
6197 "gc.relocate: statepoint base index out of bounds", Call);
6198 Check(DerivedIndex < Opt->Inputs.size(),
6199 "gc.relocate: statepoint derived index out of bounds", Call);
6200 }
6201
6202 // Relocated value must be either a pointer type or vector-of-pointer type,
6203 // but gc_relocate does not need to return the same pointer type as the
6204 // relocated pointer. It can be casted to the correct type later if it's
6205 // desired. However, they must have the same address space and 'vectorness'
6206 GCRelocateInst &Relocate = cast<GCRelocateInst>(Call);
6207 auto *ResultType = Call.getType();
6208 auto *DerivedType = Relocate.getDerivedPtr()->getType();
6209 auto *BaseType = Relocate.getBasePtr()->getType();
6210
6211 Check(BaseType->isPtrOrPtrVectorTy(),
6212 "gc.relocate: relocated value must be a pointer", Call);
6213 Check(DerivedType->isPtrOrPtrVectorTy(),
6214 "gc.relocate: relocated value must be a pointer", Call);
6215
6216 Check(ResultType->isVectorTy() == DerivedType->isVectorTy(),
6217 "gc.relocate: vector relocates to vector and pointer to pointer",
6218 Call);
6219 Check(
6220 ResultType->getPointerAddressSpace() ==
6221 DerivedType->getPointerAddressSpace(),
6222 "gc.relocate: relocating a pointer shouldn't change its address space",
6223 Call);
6224
6225 auto GC = llvm::getGCStrategy(Relocate.getFunction()->getGC());
6226 Check(GC, "gc.relocate: calling function must have GCStrategy",
6227 Call.getFunction());
6228 if (GC) {
6229 auto isGCPtr = [&GC](Type *PTy) {
6230 return GC->isGCManagedPointer(PTy->getScalarType()).value_or(true);
6231 };
6232 Check(isGCPtr(ResultType), "gc.relocate: must return gc pointer", Call);
6233 Check(isGCPtr(BaseType),
6234 "gc.relocate: relocated value must be a gc pointer", Call);
6235 Check(isGCPtr(DerivedType),
6236 "gc.relocate: relocated value must be a gc pointer", Call);
6237 }
6238 break;
6239 }
6240 case Intrinsic::experimental_patchpoint: {
6241 if (Call.getCallingConv() == CallingConv::AnyReg) {
6243 "patchpoint: invalid return type used with anyregcc", Call);
6244 }
6245 break;
6246 }
6247 case Intrinsic::eh_exceptioncode:
6248 case Intrinsic::eh_exceptionpointer: {
6250 "eh.exceptionpointer argument must be a catchpad", Call);
6251 break;
6252 }
6253 case Intrinsic::get_active_lane_mask: {
6255 "get_active_lane_mask: must return a "
6256 "vector",
6257 Call);
6258 auto *ElemTy = Call.getType()->getScalarType();
6259 Check(ElemTy->isIntegerTy(1),
6260 "get_active_lane_mask: element type is not "
6261 "i1",
6262 Call);
6263 break;
6264 }
6265 case Intrinsic::experimental_get_vector_length: {
6266 ConstantInt *VF = cast<ConstantInt>(Call.getArgOperand(1));
6267 Check(!VF->isNegative() && !VF->isZero(),
6268 "get_vector_length: VF must be positive", Call);
6269 break;
6270 }
6271 case Intrinsic::masked_load: {
6272 Check(Call.getType()->isVectorTy(), "masked_load: must return a vector",
6273 Call);
6274
6276 Value *PassThru = Call.getArgOperand(2);
6277 Check(Mask->getType()->isVectorTy(), "masked_load: mask must be vector",
6278 Call);
6279 Check(PassThru->getType() == Call.getType(),
6280 "masked_load: pass through and return type must match", Call);
6281 Check(cast<VectorType>(Mask->getType())->getElementCount() ==
6282 cast<VectorType>(Call.getType())->getElementCount(),
6283 "masked_load: vector mask must be same length as return", Call);
6284 break;
6285 }
6286 case Intrinsic::masked_store: {
6287 Value *Val = Call.getArgOperand(0);
6289 Check(Mask->getType()->isVectorTy(), "masked_store: mask must be vector",
6290 Call);
6291 Check(cast<VectorType>(Mask->getType())->getElementCount() ==
6292 cast<VectorType>(Val->getType())->getElementCount(),
6293 "masked_store: vector mask must be same length as value", Call);
6294 break;
6295 }
6296
6297 case Intrinsic::experimental_guard: {
6298 Check(isa<CallInst>(Call), "experimental_guard cannot be invoked", Call);
6300 "experimental_guard must have exactly one "
6301 "\"deopt\" operand bundle");
6302 break;
6303 }
6304
6305 case Intrinsic::experimental_deoptimize: {
6306 Check(isa<CallInst>(Call), "experimental_deoptimize cannot be invoked",
6307 Call);
6309 "experimental_deoptimize must have exactly one "
6310 "\"deopt\" operand bundle");
6312 "experimental_deoptimize return type must match caller return type");
6313
6314 if (isa<CallInst>(Call)) {
6316 Check(RI,
6317 "calls to experimental_deoptimize must be followed by a return");
6318
6319 if (!Call.getType()->isVoidTy() && RI)
6320 Check(RI->getReturnValue() == &Call,
6321 "calls to experimental_deoptimize must be followed by a return "
6322 "of the value computed by experimental_deoptimize");
6323 }
6324
6325 break;
6326 }
6327 case Intrinsic::vastart: {
6329 "va_start called in a non-varargs function");
6330 break;
6331 }
6332 case Intrinsic::get_dynamic_area_offset: {
6333 auto *IntTy = dyn_cast<IntegerType>(Call.getType());
6334 Check(IntTy && DL.getPointerSizeInBits(DL.getAllocaAddrSpace()) ==
6335 IntTy->getBitWidth(),
6336 "get_dynamic_area_offset result type must be scalar integer matching "
6337 "alloca address space width",
6338 Call);
6339 break;
6340 }
6341 case Intrinsic::vector_reduce_and:
6342 case Intrinsic::vector_reduce_or:
6343 case Intrinsic::vector_reduce_xor:
6344 case Intrinsic::vector_reduce_add:
6345 case Intrinsic::vector_reduce_mul:
6346 case Intrinsic::vector_reduce_smax:
6347 case Intrinsic::vector_reduce_smin:
6348 case Intrinsic::vector_reduce_umax:
6349 case Intrinsic::vector_reduce_umin: {
6350 Type *ArgTy = Call.getArgOperand(0)->getType();
6351 Check(ArgTy->isIntOrIntVectorTy() && ArgTy->isVectorTy(),
6352 "Intrinsic has incorrect argument type!");
6353 break;
6354 }
6355 case Intrinsic::vector_reduce_fmax:
6356 case Intrinsic::vector_reduce_fmin: {
6357 Type *ArgTy = Call.getArgOperand(0)->getType();
6358 Check(ArgTy->isFPOrFPVectorTy() && ArgTy->isVectorTy(),
6359 "Intrinsic has incorrect argument type!");
6360 break;
6361 }
6362 case Intrinsic::vector_reduce_fadd:
6363 case Intrinsic::vector_reduce_fmul: {
6364 // Unlike the other reductions, the first argument is a start value. The
6365 // second argument is the vector to be reduced.
6366 Type *ArgTy = Call.getArgOperand(1)->getType();
6367 Check(ArgTy->isFPOrFPVectorTy() && ArgTy->isVectorTy(),
6368 "Intrinsic has incorrect argument type!");
6369 break;
6370 }
6371 case Intrinsic::smul_fix:
6372 case Intrinsic::smul_fix_sat:
6373 case Intrinsic::umul_fix:
6374 case Intrinsic::umul_fix_sat:
6375 case Intrinsic::sdiv_fix:
6376 case Intrinsic::sdiv_fix_sat:
6377 case Intrinsic::udiv_fix:
6378 case Intrinsic::udiv_fix_sat: {
6379 Value *Op1 = Call.getArgOperand(0);
6380 Value *Op2 = Call.getArgOperand(1);
6382 "first operand of [us][mul|div]_fix[_sat] must be an int type or "
6383 "vector of ints");
6385 "second operand of [us][mul|div]_fix[_sat] must be an int type or "
6386 "vector of ints");
6387
6388 auto *Op3 = cast<ConstantInt>(Call.getArgOperand(2));
6389 Check(Op3->getType()->isIntegerTy(),
6390 "third operand of [us][mul|div]_fix[_sat] must be an int type");
6391 Check(Op3->getBitWidth() <= 32,
6392 "third operand of [us][mul|div]_fix[_sat] must fit within 32 bits");
6393
6394 if (ID == Intrinsic::smul_fix || ID == Intrinsic::smul_fix_sat ||
6395 ID == Intrinsic::sdiv_fix || ID == Intrinsic::sdiv_fix_sat) {
6396 Check(Op3->getZExtValue() < Op1->getType()->getScalarSizeInBits(),
6397 "the scale of s[mul|div]_fix[_sat] must be less than the width of "
6398 "the operands");
6399 } else {
6400 Check(Op3->getZExtValue() <= Op1->getType()->getScalarSizeInBits(),
6401 "the scale of u[mul|div]_fix[_sat] must be less than or equal "
6402 "to the width of the operands");
6403 }
6404 break;
6405 }
6406 case Intrinsic::lrint:
6407 case Intrinsic::llrint:
6408 case Intrinsic::lround:
6409 case Intrinsic::llround: {
6410 Type *ValTy = Call.getArgOperand(0)->getType();
6411 Type *ResultTy = Call.getType();
6412 auto *VTy = dyn_cast<VectorType>(ValTy);
6413 auto *RTy = dyn_cast<VectorType>(ResultTy);
6414 Check(ValTy->isFPOrFPVectorTy() && ResultTy->isIntOrIntVectorTy(),
6415 ExpectedName + ": argument must be floating-point or vector "
6416 "of floating-points, and result must be integer or "
6417 "vector of integers",
6418 &Call);
6419 Check(ValTy->isVectorTy() == ResultTy->isVectorTy(),
6420 ExpectedName + ": argument and result disagree on vector use", &Call);
6421 if (VTy) {
6422 Check(VTy->getElementCount() == RTy->getElementCount(),
6423 ExpectedName + ": argument must be same length as result", &Call);
6424 }
6425 break;
6426 }
6427 case Intrinsic::bswap: {
6428 Type *Ty = Call.getType();
6429 unsigned Size = Ty->getScalarSizeInBits();
6430 Check(Size % 16 == 0, "bswap must be an even number of bytes", &Call);
6431 break;
6432 }
6433 case Intrinsic::invariant_start: {
6434 ConstantInt *InvariantSize = dyn_cast<ConstantInt>(Call.getArgOperand(0));
6435 Check(InvariantSize &&
6436 (!InvariantSize->isNegative() || InvariantSize->isMinusOne()),
6437 "invariant_start parameter must be -1, 0 or a positive number",
6438 &Call);
6439 break;
6440 }
6441 case Intrinsic::matrix_multiply:
6442 case Intrinsic::matrix_transpose:
6443 case Intrinsic::matrix_column_major_load:
6444 case Intrinsic::matrix_column_major_store: {
6446 ConstantInt *Stride = nullptr;
6447 ConstantInt *NumRows;
6448 ConstantInt *NumColumns;
6449 VectorType *ResultTy;
6450 Type *Op0ElemTy = nullptr;
6451 Type *Op1ElemTy = nullptr;
6452 switch (ID) {
6453 case Intrinsic::matrix_multiply: {
6454 NumRows = cast<ConstantInt>(Call.getArgOperand(2));
6455 ConstantInt *N = cast<ConstantInt>(Call.getArgOperand(3));
6456 NumColumns = cast<ConstantInt>(Call.getArgOperand(4));
6458 ->getNumElements() ==
6459 NumRows->getZExtValue() * N->getZExtValue(),
6460 "First argument of a matrix operation does not match specified "
6461 "shape!");
6463 ->getNumElements() ==
6464 N->getZExtValue() * NumColumns->getZExtValue(),
6465 "Second argument of a matrix operation does not match specified "
6466 "shape!");
6467
6468 ResultTy = cast<VectorType>(Call.getType());
6469 Op0ElemTy =
6470 cast<VectorType>(Call.getArgOperand(0)->getType())->getElementType();
6471 Op1ElemTy =
6472 cast<VectorType>(Call.getArgOperand(1)->getType())->getElementType();
6473 break;
6474 }
6475 case Intrinsic::matrix_transpose:
6476 NumRows = cast<ConstantInt>(Call.getArgOperand(1));
6477 NumColumns = cast<ConstantInt>(Call.getArgOperand(2));
6478 ResultTy = cast<VectorType>(Call.getType());
6479 Op0ElemTy =
6480 cast<VectorType>(Call.getArgOperand(0)->getType())->getElementType();
6481 break;
6482 case Intrinsic::matrix_column_major_load: {
6484 NumRows = cast<ConstantInt>(Call.getArgOperand(3));
6485 NumColumns = cast<ConstantInt>(Call.getArgOperand(4));
6486 ResultTy = cast<VectorType>(Call.getType());
6487 break;
6488 }
6489 case Intrinsic::matrix_column_major_store: {
6491 NumRows = cast<ConstantInt>(Call.getArgOperand(4));
6492 NumColumns = cast<ConstantInt>(Call.getArgOperand(5));
6493 ResultTy = cast<VectorType>(Call.getArgOperand(0)->getType());
6494 Op0ElemTy =
6495 cast<VectorType>(Call.getArgOperand(0)->getType())->getElementType();
6496 break;
6497 }
6498 default:
6499 llvm_unreachable("unexpected intrinsic");
6500 }
6501
6502 Check(ResultTy->getElementType()->isIntegerTy() ||
6503 ResultTy->getElementType()->isFloatingPointTy(),
6504 "Result type must be an integer or floating-point type!", IF);
6505
6506 if (Op0ElemTy)
6507 Check(ResultTy->getElementType() == Op0ElemTy,
6508 "Vector element type mismatch of the result and first operand "
6509 "vector!",
6510 IF);
6511
6512 if (Op1ElemTy)
6513 Check(ResultTy->getElementType() == Op1ElemTy,
6514 "Vector element type mismatch of the result and second operand "
6515 "vector!",
6516 IF);
6517
6519 NumRows->getZExtValue() * NumColumns->getZExtValue(),
6520 "Result of a matrix operation does not fit in the returned vector!");
6521
6522 if (Stride) {
6523 Check(Stride->getBitWidth() <= 64, "Stride bitwidth cannot exceed 64!",
6524 IF);
6525 Check(Stride->getZExtValue() >= NumRows->getZExtValue(),
6526 "Stride must be greater or equal than the number of rows!", IF);
6527 }
6528
6529 break;
6530 }
6531 case Intrinsic::vector_splice: {
6533 int64_t Idx = cast<ConstantInt>(Call.getArgOperand(2))->getSExtValue();
6534 int64_t KnownMinNumElements = VecTy->getElementCount().getKnownMinValue();
6535 if (Call.getParent() && Call.getParent()->getParent()) {
6536 AttributeList Attrs = Call.getParent()->getParent()->getAttributes();
6537 if (Attrs.hasFnAttr(Attribute::VScaleRange))
6538 KnownMinNumElements *= Attrs.getFnAttrs().getVScaleRangeMin();
6539 }
6540 Check((Idx < 0 && std::abs(Idx) <= KnownMinNumElements) ||
6541 (Idx >= 0 && Idx < KnownMinNumElements),
6542 "The splice index exceeds the range [-VL, VL-1] where VL is the "
6543 "known minimum number of elements in the vector. For scalable "
6544 "vectors the minimum number of elements is determined from "
6545 "vscale_range.",
6546 &Call);
6547 break;
6548 }
6549 case Intrinsic::stepvector: {
6551 Check(VecTy && VecTy->getScalarType()->isIntegerTy() &&
6552 VecTy->getScalarSizeInBits() >= 8,
6553 "stepvector only supported for vectors of integers "
6554 "with a bitwidth of at least 8.",
6555 &Call);
6556 break;
6557 }
6558 case Intrinsic::experimental_vector_match: {
6559 Value *Op1 = Call.getArgOperand(0);
6560 Value *Op2 = Call.getArgOperand(1);
6562
6563 VectorType *Op1Ty = dyn_cast<VectorType>(Op1->getType());
6564 VectorType *Op2Ty = dyn_cast<VectorType>(Op2->getType());
6565 VectorType *MaskTy = dyn_cast<VectorType>(Mask->getType());
6566
6567 Check(Op1Ty && Op2Ty && MaskTy, "Operands must be vectors.", &Call);
6569 "Second operand must be a fixed length vector.", &Call);
6570 Check(Op1Ty->getElementType()->isIntegerTy(),
6571 "First operand must be a vector of integers.", &Call);
6572 Check(Op1Ty->getElementType() == Op2Ty->getElementType(),
6573 "First two operands must have the same element type.", &Call);
6574 Check(Op1Ty->getElementCount() == MaskTy->getElementCount(),
6575 "First operand and mask must have the same number of elements.",
6576 &Call);
6577 Check(MaskTy->getElementType()->isIntegerTy(1),
6578 "Mask must be a vector of i1's.", &Call);
6579 Check(Call.getType() == MaskTy, "Return type must match the mask type.",
6580 &Call);
6581 break;
6582 }
6583 case Intrinsic::vector_insert: {
6584 Value *Vec = Call.getArgOperand(0);
6585 Value *SubVec = Call.getArgOperand(1);
6586 Value *Idx = Call.getArgOperand(2);
6587 unsigned IdxN = cast<ConstantInt>(Idx)->getZExtValue();
6588
6589 VectorType *VecTy = cast<VectorType>(Vec->getType());
6590 VectorType *SubVecTy = cast<VectorType>(SubVec->getType());
6591
6592 ElementCount VecEC = VecTy->getElementCount();
6593 ElementCount SubVecEC = SubVecTy->getElementCount();
6594 Check(VecTy->getElementType() == SubVecTy->getElementType(),
6595 "vector_insert parameters must have the same element "
6596 "type.",
6597 &Call);
6598 Check(IdxN % SubVecEC.getKnownMinValue() == 0,
6599 "vector_insert index must be a constant multiple of "
6600 "the subvector's known minimum vector length.");
6601
6602 // If this insertion is not the 'mixed' case where a fixed vector is
6603 // inserted into a scalable vector, ensure that the insertion of the
6604 // subvector does not overrun the parent vector.
6605 if (VecEC.isScalable() == SubVecEC.isScalable()) {
6606 Check(IdxN < VecEC.getKnownMinValue() &&
6607 IdxN + SubVecEC.getKnownMinValue() <= VecEC.getKnownMinValue(),
6608 "subvector operand of vector_insert would overrun the "
6609 "vector being inserted into.");
6610 }
6611 break;
6612 }
6613 case Intrinsic::vector_extract: {
6614 Value *Vec = Call.getArgOperand(0);
6615 Value *Idx = Call.getArgOperand(1);
6616 unsigned IdxN = cast<ConstantInt>(Idx)->getZExtValue();
6617
6618 VectorType *ResultTy = cast<VectorType>(Call.getType());
6619 VectorType *VecTy = cast<VectorType>(Vec->getType());
6620
6621 ElementCount VecEC = VecTy->getElementCount();
6622 ElementCount ResultEC = ResultTy->getElementCount();
6623
6624 Check(ResultTy->getElementType() == VecTy->getElementType(),
6625 "vector_extract result must have the same element "
6626 "type as the input vector.",
6627 &Call);
6628 Check(IdxN % ResultEC.getKnownMinValue() == 0,
6629 "vector_extract index must be a constant multiple of "
6630 "the result type's known minimum vector length.");
6631
6632 // If this extraction is not the 'mixed' case where a fixed vector is
6633 // extracted from a scalable vector, ensure that the extraction does not
6634 // overrun the parent vector.
6635 if (VecEC.isScalable() == ResultEC.isScalable()) {
6636 Check(IdxN < VecEC.getKnownMinValue() &&
6637 IdxN + ResultEC.getKnownMinValue() <= VecEC.getKnownMinValue(),
6638 "vector_extract would overrun.");
6639 }
6640 break;
6641 }
6642 case Intrinsic::vector_partial_reduce_fadd:
6643 case Intrinsic::vector_partial_reduce_add: {
6646
6647 unsigned VecWidth = VecTy->getElementCount().getKnownMinValue();
6648 unsigned AccWidth = AccTy->getElementCount().getKnownMinValue();
6649
6650 Check((VecWidth % AccWidth) == 0,
6651 "Invalid vector widths for partial "
6652 "reduction. The width of the input vector "
6653 "must be a positive integer multiple of "
6654 "the width of the accumulator vector.");
6655 break;
6656 }
6657 case Intrinsic::experimental_noalias_scope_decl: {
6658 NoAliasScopeDecls.push_back(cast<IntrinsicInst>(&Call));
6659 break;
6660 }
6661 case Intrinsic::preserve_array_access_index:
6662 case Intrinsic::preserve_struct_access_index:
6663 case Intrinsic::aarch64_ldaxr:
6664 case Intrinsic::aarch64_ldxr:
6665 case Intrinsic::arm_ldaex:
6666 case Intrinsic::arm_ldrex: {
6667 Type *ElemTy = Call.getParamElementType(0);
6668 Check(ElemTy, "Intrinsic requires elementtype attribute on first argument.",
6669 &Call);
6670 break;
6671 }
6672 case Intrinsic::aarch64_stlxr:
6673 case Intrinsic::aarch64_stxr:
6674 case Intrinsic::arm_stlex:
6675 case Intrinsic::arm_strex: {
6676 Type *ElemTy = Call.getAttributes().getParamElementType(1);
6677 Check(ElemTy,
6678 "Intrinsic requires elementtype attribute on second argument.",
6679 &Call);
6680 break;
6681 }
6682 case Intrinsic::aarch64_prefetch: {
6683 Check(cast<ConstantInt>(Call.getArgOperand(1))->getZExtValue() < 2,
6684 "write argument to llvm.aarch64.prefetch must be 0 or 1", Call);
6685 Check(cast<ConstantInt>(Call.getArgOperand(2))->getZExtValue() < 4,
6686 "target argument to llvm.aarch64.prefetch must be 0-3", Call);
6687 Check(cast<ConstantInt>(Call.getArgOperand(3))->getZExtValue() < 2,
6688 "stream argument to llvm.aarch64.prefetch must be 0 or 1", Call);
6689 Check(cast<ConstantInt>(Call.getArgOperand(4))->getZExtValue() < 2,
6690 "isdata argument to llvm.aarch64.prefetch must be 0 or 1", Call);
6691 break;
6692 }
6693 case Intrinsic::callbr_landingpad: {
6694 const auto *CBR = dyn_cast<CallBrInst>(Call.getOperand(0));
6695 Check(CBR, "intrinstic requires callbr operand", &Call);
6696 if (!CBR)
6697 break;
6698
6699 const BasicBlock *LandingPadBB = Call.getParent();
6700 const BasicBlock *PredBB = LandingPadBB->getUniquePredecessor();
6701 if (!PredBB) {
6702 CheckFailed("Intrinsic in block must have 1 unique predecessor", &Call);
6703 break;
6704 }
6705 if (!isa<CallBrInst>(PredBB->getTerminator())) {
6706 CheckFailed("Intrinsic must have corresponding callbr in predecessor",
6707 &Call);
6708 break;
6709 }
6710 Check(llvm::is_contained(CBR->getIndirectDests(), LandingPadBB),
6711 "Intrinsic's corresponding callbr must have intrinsic's parent basic "
6712 "block in indirect destination list",
6713 &Call);
6714 const Instruction &First = *LandingPadBB->begin();
6715 Check(&First == &Call, "No other instructions may proceed intrinsic",
6716 &Call);
6717 break;
6718 }
6719 case Intrinsic::amdgcn_cs_chain: {
6720 auto CallerCC = Call.getCaller()->getCallingConv();
6721 switch (CallerCC) {
6722 case CallingConv::AMDGPU_CS:
6723 case CallingConv::AMDGPU_CS_Chain:
6724 case CallingConv::AMDGPU_CS_ChainPreserve:
6725 break;
6726 default:
6727 CheckFailed("Intrinsic can only be used from functions with the "
6728 "amdgpu_cs, amdgpu_cs_chain or amdgpu_cs_chain_preserve "
6729 "calling conventions",
6730 &Call);
6731 break;
6732 }
6733
6734 Check(Call.paramHasAttr(2, Attribute::InReg),
6735 "SGPR arguments must have the `inreg` attribute", &Call);
6736 Check(!Call.paramHasAttr(3, Attribute::InReg),
6737 "VGPR arguments must not have the `inreg` attribute", &Call);
6738
6739 auto *Next = Call.getNextNode();
6740 bool IsAMDUnreachable = Next && isa<IntrinsicInst>(Next) &&
6741 cast<IntrinsicInst>(Next)->getIntrinsicID() ==
6742 Intrinsic::amdgcn_unreachable;
6743 Check(Next && (isa<UnreachableInst>(Next) || IsAMDUnreachable),
6744 "llvm.amdgcn.cs.chain must be followed by unreachable", &Call);
6745 break;
6746 }
6747 case Intrinsic::amdgcn_init_exec_from_input: {
6748 const Argument *Arg = dyn_cast<Argument>(Call.getOperand(0));
6749 Check(Arg && Arg->hasInRegAttr(),
6750 "only inreg arguments to the parent function are valid as inputs to "
6751 "this intrinsic",
6752 &Call);
6753 break;
6754 }
6755 case Intrinsic::amdgcn_set_inactive_chain_arg: {
6756 auto CallerCC = Call.getCaller()->getCallingConv();
6757 switch (CallerCC) {
6758 case CallingConv::AMDGPU_CS_Chain:
6759 case CallingConv::AMDGPU_CS_ChainPreserve:
6760 break;
6761 default:
6762 CheckFailed("Intrinsic can only be used from functions with the "
6763 "amdgpu_cs_chain or amdgpu_cs_chain_preserve "
6764 "calling conventions",
6765 &Call);
6766 break;
6767 }
6768
6769 unsigned InactiveIdx = 1;
6770 Check(!Call.paramHasAttr(InactiveIdx, Attribute::InReg),
6771 "Value for inactive lanes must not have the `inreg` attribute",
6772 &Call);
6773 Check(isa<Argument>(Call.getArgOperand(InactiveIdx)),
6774 "Value for inactive lanes must be a function argument", &Call);
6775 Check(!cast<Argument>(Call.getArgOperand(InactiveIdx))->hasInRegAttr(),
6776 "Value for inactive lanes must be a VGPR function argument", &Call);
6777 break;
6778 }
6779 case Intrinsic::amdgcn_call_whole_wave: {
6781 Check(F, "Indirect whole wave calls are not allowed", &Call);
6782
6783 CallingConv::ID CC = F->getCallingConv();
6784 Check(CC == CallingConv::AMDGPU_Gfx_WholeWave,
6785 "Callee must have the amdgpu_gfx_whole_wave calling convention",
6786 &Call);
6787
6788 Check(!F->isVarArg(), "Variadic whole wave calls are not allowed", &Call);
6789
6790 Check(Call.arg_size() == F->arg_size(),
6791 "Call argument count must match callee argument count", &Call);
6792
6793 // The first argument of the call is the callee, and the first argument of
6794 // the callee is the active mask. The rest of the arguments must match.
6795 Check(F->arg_begin()->getType()->isIntegerTy(1),
6796 "Callee must have i1 as its first argument", &Call);
6797 for (auto [CallArg, FuncArg] :
6798 drop_begin(zip_equal(Call.args(), F->args()))) {
6799 Check(CallArg->getType() == FuncArg.getType(),
6800 "Argument types must match", &Call);
6801
6802 // Check that inreg attributes match between call site and function
6803 Check(Call.paramHasAttr(FuncArg.getArgNo(), Attribute::InReg) ==
6804 FuncArg.hasInRegAttr(),
6805 "Argument inreg attributes must match", &Call);
6806 }
6807 break;
6808 }
6809 case Intrinsic::amdgcn_s_prefetch_data: {
6810 Check(
6813 "llvm.amdgcn.s.prefetch.data only supports global or constant memory");
6814 break;
6815 }
6816 case Intrinsic::amdgcn_mfma_scale_f32_16x16x128_f8f6f4:
6817 case Intrinsic::amdgcn_mfma_scale_f32_32x32x64_f8f6f4: {
6818 Value *Src0 = Call.getArgOperand(0);
6819 Value *Src1 = Call.getArgOperand(1);
6820
6821 uint64_t CBSZ = cast<ConstantInt>(Call.getArgOperand(3))->getZExtValue();
6822 uint64_t BLGP = cast<ConstantInt>(Call.getArgOperand(4))->getZExtValue();
6823 Check(CBSZ <= 4, "invalid value for cbsz format", Call,
6824 Call.getArgOperand(3));
6825 Check(BLGP <= 4, "invalid value for blgp format", Call,
6826 Call.getArgOperand(4));
6827
6828 // AMDGPU::MFMAScaleFormats values
6829 auto getFormatNumRegs = [](unsigned FormatVal) {
6830 switch (FormatVal) {
6831 case 0:
6832 case 1:
6833 return 8u;
6834 case 2:
6835 case 3:
6836 return 6u;
6837 case 4:
6838 return 4u;
6839 default:
6840 llvm_unreachable("invalid format value");
6841 }
6842 };
6843
6844 auto isValidSrcASrcBVector = [](FixedVectorType *Ty) {
6845 if (!Ty || !Ty->getElementType()->isIntegerTy(32))
6846 return false;
6847 unsigned NumElts = Ty->getNumElements();
6848 return NumElts == 4 || NumElts == 6 || NumElts == 8;
6849 };
6850
6851 auto *Src0Ty = dyn_cast<FixedVectorType>(Src0->getType());
6852 auto *Src1Ty = dyn_cast<FixedVectorType>(Src1->getType());
6853 Check(isValidSrcASrcBVector(Src0Ty),
6854 "operand 0 must be 4, 6 or 8 element i32 vector", &Call, Src0);
6855 Check(isValidSrcASrcBVector(Src1Ty),
6856 "operand 1 must be 4, 6 or 8 element i32 vector", &Call, Src1);
6857
6858 // Permit excess registers for the format.
6859 Check(Src0Ty->getNumElements() >= getFormatNumRegs(CBSZ),
6860 "invalid vector type for format", &Call, Src0, Call.getArgOperand(3));
6861 Check(Src1Ty->getNumElements() >= getFormatNumRegs(BLGP),
6862 "invalid vector type for format", &Call, Src1, Call.getArgOperand(5));
6863 break;
6864 }
6865 case Intrinsic::amdgcn_wmma_f32_16x16x128_f8f6f4:
6866 case Intrinsic::amdgcn_wmma_scale_f32_16x16x128_f8f6f4:
6867 case Intrinsic::amdgcn_wmma_scale16_f32_16x16x128_f8f6f4: {
6868 Value *Src0 = Call.getArgOperand(1);
6869 Value *Src1 = Call.getArgOperand(3);
6870
6871 unsigned FmtA = cast<ConstantInt>(Call.getArgOperand(0))->getZExtValue();
6872 unsigned FmtB = cast<ConstantInt>(Call.getArgOperand(2))->getZExtValue();
6873 Check(FmtA <= 4, "invalid value for matrix format", Call,
6874 Call.getArgOperand(0));
6875 Check(FmtB <= 4, "invalid value for matrix format", Call,
6876 Call.getArgOperand(2));
6877
6878 // AMDGPU::MatrixFMT values
6879 auto getFormatNumRegs = [](unsigned FormatVal) {
6880 switch (FormatVal) {
6881 case 0:
6882 case 1:
6883 return 16u;
6884 case 2:
6885 case 3:
6886 return 12u;
6887 case 4:
6888 return 8u;
6889 default:
6890 llvm_unreachable("invalid format value");
6891 }
6892 };
6893
6894 auto isValidSrcASrcBVector = [](FixedVectorType *Ty) {
6895 if (!Ty || !Ty->getElementType()->isIntegerTy(32))
6896 return false;
6897 unsigned NumElts = Ty->getNumElements();
6898 return NumElts == 16 || NumElts == 12 || NumElts == 8;
6899 };
6900
6901 auto *Src0Ty = dyn_cast<FixedVectorType>(Src0->getType());
6902 auto *Src1Ty = dyn_cast<FixedVectorType>(Src1->getType());
6903 Check(isValidSrcASrcBVector(Src0Ty),
6904 "operand 1 must be 8, 12 or 16 element i32 vector", &Call, Src0);
6905 Check(isValidSrcASrcBVector(Src1Ty),
6906 "operand 3 must be 8, 12 or 16 element i32 vector", &Call, Src1);
6907
6908 // Permit excess registers for the format.
6909 Check(Src0Ty->getNumElements() >= getFormatNumRegs(FmtA),
6910 "invalid vector type for format", &Call, Src0, Call.getArgOperand(0));
6911 Check(Src1Ty->getNumElements() >= getFormatNumRegs(FmtB),
6912 "invalid vector type for format", &Call, Src1, Call.getArgOperand(2));
6913 break;
6914 }
6915 case Intrinsic::amdgcn_cooperative_atomic_load_32x4B:
6916 case Intrinsic::amdgcn_cooperative_atomic_load_16x8B:
6917 case Intrinsic::amdgcn_cooperative_atomic_load_8x16B:
6918 case Intrinsic::amdgcn_cooperative_atomic_store_32x4B:
6919 case Intrinsic::amdgcn_cooperative_atomic_store_16x8B:
6920 case Intrinsic::amdgcn_cooperative_atomic_store_8x16B: {
6921 // Check we only use this intrinsic on the FLAT or GLOBAL address spaces.
6922 Value *PtrArg = Call.getArgOperand(0);
6923 const unsigned AS = PtrArg->getType()->getPointerAddressSpace();
6925 "cooperative atomic intrinsics require a generic or global pointer",
6926 &Call, PtrArg);
6927
6928 // Last argument must be a MD string
6930 MDNode *MD = cast<MDNode>(Op->getMetadata());
6931 Check((MD->getNumOperands() == 1) && isa<MDString>(MD->getOperand(0)),
6932 "cooperative atomic intrinsics require that the last argument is a "
6933 "metadata string",
6934 &Call, Op);
6935 break;
6936 }
6937 case Intrinsic::nvvm_setmaxnreg_inc_sync_aligned_u32:
6938 case Intrinsic::nvvm_setmaxnreg_dec_sync_aligned_u32: {
6939 Value *V = Call.getArgOperand(0);
6940 unsigned RegCount = cast<ConstantInt>(V)->getZExtValue();
6941 Check(RegCount % 8 == 0,
6942 "reg_count argument to nvvm.setmaxnreg must be in multiples of 8");
6943 break;
6944 }
6945 case Intrinsic::experimental_convergence_entry:
6946 case Intrinsic::experimental_convergence_anchor:
6947 break;
6948 case Intrinsic::experimental_convergence_loop:
6949 break;
6950 case Intrinsic::ptrmask: {
6951 Type *Ty0 = Call.getArgOperand(0)->getType();
6952 Type *Ty1 = Call.getArgOperand(1)->getType();
6954 "llvm.ptrmask intrinsic first argument must be pointer or vector "
6955 "of pointers",
6956 &Call);
6957 Check(
6958 Ty0->isVectorTy() == Ty1->isVectorTy(),
6959 "llvm.ptrmask intrinsic arguments must be both scalars or both vectors",
6960 &Call);
6961 if (Ty0->isVectorTy())
6962 Check(cast<VectorType>(Ty0)->getElementCount() ==
6963 cast<VectorType>(Ty1)->getElementCount(),
6964 "llvm.ptrmask intrinsic arguments must have the same number of "
6965 "elements",
6966 &Call);
6967 Check(DL.getIndexTypeSizeInBits(Ty0) == Ty1->getScalarSizeInBits(),
6968 "llvm.ptrmask intrinsic second argument bitwidth must match "
6969 "pointer index type size of first argument",
6970 &Call);
6971 break;
6972 }
6973 case Intrinsic::thread_pointer: {
6975 DL.getDefaultGlobalsAddressSpace(),
6976 "llvm.thread.pointer intrinsic return type must be for the globals "
6977 "address space",
6978 &Call);
6979 break;
6980 }
6981 case Intrinsic::threadlocal_address: {
6982 const Value &Arg0 = *Call.getArgOperand(0);
6983 Check(isa<GlobalValue>(Arg0),
6984 "llvm.threadlocal.address first argument must be a GlobalValue");
6985 Check(cast<GlobalValue>(Arg0).isThreadLocal(),
6986 "llvm.threadlocal.address operand isThreadLocal() must be true");
6987 break;
6988 }
6989 case Intrinsic::lifetime_start:
6990 case Intrinsic::lifetime_end: {
6993 "llvm.lifetime.start/end can only be used on alloca or poison",
6994 &Call);
6995 break;
6996 }
6997 };
6998
6999 // Verify that there aren't any unmediated control transfers between funclets.
7001 Function *F = Call.getParent()->getParent();
7002 if (F->hasPersonalityFn() &&
7003 isScopedEHPersonality(classifyEHPersonality(F->getPersonalityFn()))) {
7004 // Run EH funclet coloring on-demand and cache results for other intrinsic
7005 // calls in this function
7006 if (BlockEHFuncletColors.empty())
7007 BlockEHFuncletColors = colorEHFunclets(*F);
7008
7009 // Check for catch-/cleanup-pad in first funclet block
7010 bool InEHFunclet = false;
7011 BasicBlock *CallBB = Call.getParent();
7012 const ColorVector &CV = BlockEHFuncletColors.find(CallBB)->second;
7013 assert(CV.size() > 0 && "Uncolored block");
7014 for (BasicBlock *ColorFirstBB : CV)
7015 if (auto It = ColorFirstBB->getFirstNonPHIIt();
7016 It != ColorFirstBB->end())
7018 InEHFunclet = true;
7019
7020 // Check for funclet operand bundle
7021 bool HasToken = false;
7022 for (unsigned I = 0, E = Call.getNumOperandBundles(); I != E; ++I)
7024 HasToken = true;
7025
7026 // This would cause silent code truncation in WinEHPrepare
7027 if (InEHFunclet)
7028 Check(HasToken, "Missing funclet token on intrinsic call", &Call);
7029 }
7030 }
7031}
7032
7033/// Carefully grab the subprogram from a local scope.
7034///
7035/// This carefully grabs the subprogram from a local scope, avoiding the
7036/// built-in assertions that would typically fire.
7038 if (!LocalScope)
7039 return nullptr;
7040
7041 if (auto *SP = dyn_cast<DISubprogram>(LocalScope))
7042 return SP;
7043
7044 if (auto *LB = dyn_cast<DILexicalBlockBase>(LocalScope))
7045 return getSubprogram(LB->getRawScope());
7046
7047 // Just return null; broken scope chains are checked elsewhere.
7048 assert(!isa<DILocalScope>(LocalScope) && "Unknown type of local scope");
7049 return nullptr;
7050}
7051
7052void Verifier::visit(DbgLabelRecord &DLR) {
7054 "invalid #dbg_label intrinsic variable", &DLR, DLR.getRawLabel());
7055
7056 // Ignore broken !dbg attachments; they're checked elsewhere.
7057 if (MDNode *N = DLR.getDebugLoc().getAsMDNode())
7058 if (!isa<DILocation>(N))
7059 return;
7060
7061 BasicBlock *BB = DLR.getParent();
7062 Function *F = BB ? BB->getParent() : nullptr;
7063
7064 // The scopes for variables and !dbg attachments must agree.
7065 DILabel *Label = DLR.getLabel();
7066 DILocation *Loc = DLR.getDebugLoc();
7067 CheckDI(Loc, "#dbg_label record requires a !dbg attachment", &DLR, BB, F);
7068
7069 DISubprogram *LabelSP = getSubprogram(Label->getRawScope());
7070 DISubprogram *LocSP = getSubprogram(Loc->getRawScope());
7071 if (!LabelSP || !LocSP)
7072 return;
7073
7074 CheckDI(LabelSP == LocSP,
7075 "mismatched subprogram between #dbg_label label and !dbg attachment",
7076 &DLR, BB, F, Label, Label->getScope()->getSubprogram(), Loc,
7077 Loc->getScope()->getSubprogram());
7078}
7079
7080void Verifier::visit(DbgVariableRecord &DVR) {
7081 BasicBlock *BB = DVR.getParent();
7082 Function *F = BB->getParent();
7083
7084 CheckDI(DVR.getType() == DbgVariableRecord::LocationType::Value ||
7085 DVR.getType() == DbgVariableRecord::LocationType::Declare ||
7086 DVR.getType() == DbgVariableRecord::LocationType::Assign,
7087 "invalid #dbg record type", &DVR, DVR.getType(), BB, F);
7088
7089 // The location for a DbgVariableRecord must be either a ValueAsMetadata,
7090 // DIArgList, or an empty MDNode (which is a legacy representation for an
7091 // "undef" location).
7092 auto *MD = DVR.getRawLocation();
7093 CheckDI(MD && (isa<ValueAsMetadata>(MD) || isa<DIArgList>(MD) ||
7094 (isa<MDNode>(MD) && !cast<MDNode>(MD)->getNumOperands())),
7095 "invalid #dbg record address/value", &DVR, MD, BB, F);
7096 if (auto *VAM = dyn_cast<ValueAsMetadata>(MD)) {
7097 visitValueAsMetadata(*VAM, F);
7098 if (DVR.isDbgDeclare()) {
7099 // Allow integers here to support inttoptr salvage.
7100 Type *Ty = VAM->getValue()->getType();
7101 CheckDI(Ty->isPointerTy() || Ty->isIntegerTy(),
7102 "location of #dbg_declare must be a pointer or int", &DVR, MD, BB,
7103 F);
7104 }
7105 } else if (auto *AL = dyn_cast<DIArgList>(MD)) {
7106 visitDIArgList(*AL, F);
7107 }
7108
7110 "invalid #dbg record variable", &DVR, DVR.getRawVariable(), BB, F);
7111 visitMDNode(*DVR.getRawVariable(), AreDebugLocsAllowed::No);
7112
7114 "invalid #dbg record expression", &DVR, DVR.getRawExpression(), BB,
7115 F);
7116 visitMDNode(*DVR.getExpression(), AreDebugLocsAllowed::No);
7117
7118 if (DVR.isDbgAssign()) {
7120 "invalid #dbg_assign DIAssignID", &DVR, DVR.getRawAssignID(), BB,
7121 F);
7122 visitMDNode(*cast<DIAssignID>(DVR.getRawAssignID()),
7123 AreDebugLocsAllowed::No);
7124
7125 const auto *RawAddr = DVR.getRawAddress();
7126 // Similarly to the location above, the address for an assign
7127 // DbgVariableRecord must be a ValueAsMetadata or an empty MDNode, which
7128 // represents an undef address.
7129 CheckDI(
7130 isa<ValueAsMetadata>(RawAddr) ||
7131 (isa<MDNode>(RawAddr) && !cast<MDNode>(RawAddr)->getNumOperands()),
7132 "invalid #dbg_assign address", &DVR, DVR.getRawAddress(), BB, F);
7133 if (auto *VAM = dyn_cast<ValueAsMetadata>(RawAddr))
7134 visitValueAsMetadata(*VAM, F);
7135
7137 "invalid #dbg_assign address expression", &DVR,
7138 DVR.getRawAddressExpression(), BB, F);
7139 visitMDNode(*DVR.getAddressExpression(), AreDebugLocsAllowed::No);
7140
7141 // All of the linked instructions should be in the same function as DVR.
7142 for (Instruction *I : at::getAssignmentInsts(&DVR))
7143 CheckDI(DVR.getFunction() == I->getFunction(),
7144 "inst not in same function as #dbg_assign", I, &DVR, BB, F);
7145 }
7146
7147 // This check is redundant with one in visitLocalVariable().
7148 DILocalVariable *Var = DVR.getVariable();
7149 CheckDI(isType(Var->getRawType()), "invalid type ref", Var, Var->getRawType(),
7150 BB, F);
7151
7152 auto *DLNode = DVR.getDebugLoc().getAsMDNode();
7153 CheckDI(isa_and_nonnull<DILocation>(DLNode), "invalid #dbg record DILocation",
7154 &DVR, DLNode, BB, F);
7155 DILocation *Loc = DVR.getDebugLoc();
7156
7157 // The scopes for variables and !dbg attachments must agree.
7158 DISubprogram *VarSP = getSubprogram(Var->getRawScope());
7159 DISubprogram *LocSP = getSubprogram(Loc->getRawScope());
7160 if (!VarSP || !LocSP)
7161 return; // Broken scope chains are checked elsewhere.
7162
7163 CheckDI(VarSP == LocSP,
7164 "mismatched subprogram between #dbg record variable and DILocation",
7165 &DVR, BB, F, Var, Var->getScope()->getSubprogram(), Loc,
7166 Loc->getScope()->getSubprogram(), BB, F);
7167
7168 verifyFnArgs(DVR);
7169}
7170
7171void Verifier::visitVPIntrinsic(VPIntrinsic &VPI) {
7172 if (auto *VPCast = dyn_cast<VPCastIntrinsic>(&VPI)) {
7173 auto *RetTy = cast<VectorType>(VPCast->getType());
7174 auto *ValTy = cast<VectorType>(VPCast->getOperand(0)->getType());
7175 Check(RetTy->getElementCount() == ValTy->getElementCount(),
7176 "VP cast intrinsic first argument and result vector lengths must be "
7177 "equal",
7178 *VPCast);
7179
7180 switch (VPCast->getIntrinsicID()) {
7181 default:
7182 llvm_unreachable("Unknown VP cast intrinsic");
7183 case Intrinsic::vp_trunc:
7184 Check(RetTy->isIntOrIntVectorTy() && ValTy->isIntOrIntVectorTy(),
7185 "llvm.vp.trunc intrinsic first argument and result element type "
7186 "must be integer",
7187 *VPCast);
7188 Check(RetTy->getScalarSizeInBits() < ValTy->getScalarSizeInBits(),
7189 "llvm.vp.trunc intrinsic the bit size of first argument must be "
7190 "larger than the bit size of the return type",
7191 *VPCast);
7192 break;
7193 case Intrinsic::vp_zext:
7194 case Intrinsic::vp_sext:
7195 Check(RetTy->isIntOrIntVectorTy() && ValTy->isIntOrIntVectorTy(),
7196 "llvm.vp.zext or llvm.vp.sext intrinsic first argument and result "
7197 "element type must be integer",
7198 *VPCast);
7199 Check(RetTy->getScalarSizeInBits() > ValTy->getScalarSizeInBits(),
7200 "llvm.vp.zext or llvm.vp.sext intrinsic the bit size of first "
7201 "argument must be smaller than the bit size of the return type",
7202 *VPCast);
7203 break;
7204 case Intrinsic::vp_fptoui:
7205 case Intrinsic::vp_fptosi:
7206 case Intrinsic::vp_lrint:
7207 case Intrinsic::vp_llrint:
7208 Check(
7209 RetTy->isIntOrIntVectorTy() && ValTy->isFPOrFPVectorTy(),
7210 "llvm.vp.fptoui, llvm.vp.fptosi, llvm.vp.lrint or llvm.vp.llrint" "intrinsic first argument element "
7211 "type must be floating-point and result element type must be integer",
7212 *VPCast);
7213 break;
7214 case Intrinsic::vp_uitofp:
7215 case Intrinsic::vp_sitofp:
7216 Check(
7217 RetTy->isFPOrFPVectorTy() && ValTy->isIntOrIntVectorTy(),
7218 "llvm.vp.uitofp or llvm.vp.sitofp intrinsic first argument element "
7219 "type must be integer and result element type must be floating-point",
7220 *VPCast);
7221 break;
7222 case Intrinsic::vp_fptrunc:
7223 Check(RetTy->isFPOrFPVectorTy() && ValTy->isFPOrFPVectorTy(),
7224 "llvm.vp.fptrunc intrinsic first argument and result element type "
7225 "must be floating-point",
7226 *VPCast);
7227 Check(RetTy->getScalarSizeInBits() < ValTy->getScalarSizeInBits(),
7228 "llvm.vp.fptrunc intrinsic the bit size of first argument must be "
7229 "larger than the bit size of the return type",
7230 *VPCast);
7231 break;
7232 case Intrinsic::vp_fpext:
7233 Check(RetTy->isFPOrFPVectorTy() && ValTy->isFPOrFPVectorTy(),
7234 "llvm.vp.fpext intrinsic first argument and result element type "
7235 "must be floating-point",
7236 *VPCast);
7237 Check(RetTy->getScalarSizeInBits() > ValTy->getScalarSizeInBits(),
7238 "llvm.vp.fpext intrinsic the bit size of first argument must be "
7239 "smaller than the bit size of the return type",
7240 *VPCast);
7241 break;
7242 case Intrinsic::vp_ptrtoint:
7243 Check(RetTy->isIntOrIntVectorTy() && ValTy->isPtrOrPtrVectorTy(),
7244 "llvm.vp.ptrtoint intrinsic first argument element type must be "
7245 "pointer and result element type must be integer",
7246 *VPCast);
7247 break;
7248 case Intrinsic::vp_inttoptr:
7249 Check(RetTy->isPtrOrPtrVectorTy() && ValTy->isIntOrIntVectorTy(),
7250 "llvm.vp.inttoptr intrinsic first argument element type must be "
7251 "integer and result element type must be pointer",
7252 *VPCast);
7253 break;
7254 }
7255 }
7256
7257 switch (VPI.getIntrinsicID()) {
7258 case Intrinsic::vp_fcmp: {
7259 auto Pred = cast<VPCmpIntrinsic>(&VPI)->getPredicate();
7261 "invalid predicate for VP FP comparison intrinsic", &VPI);
7262 break;
7263 }
7264 case Intrinsic::vp_icmp: {
7265 auto Pred = cast<VPCmpIntrinsic>(&VPI)->getPredicate();
7267 "invalid predicate for VP integer comparison intrinsic", &VPI);
7268 break;
7269 }
7270 case Intrinsic::vp_is_fpclass: {
7271 auto TestMask = cast<ConstantInt>(VPI.getOperand(1));
7272 Check((TestMask->getZExtValue() & ~static_cast<unsigned>(fcAllFlags)) == 0,
7273 "unsupported bits for llvm.vp.is.fpclass test mask");
7274 break;
7275 }
7276 case Intrinsic::experimental_vp_splice: {
7277 VectorType *VecTy = cast<VectorType>(VPI.getType());
7278 int64_t Idx = cast<ConstantInt>(VPI.getArgOperand(2))->getSExtValue();
7279 int64_t KnownMinNumElements = VecTy->getElementCount().getKnownMinValue();
7280 if (VPI.getParent() && VPI.getParent()->getParent()) {
7281 AttributeList Attrs = VPI.getParent()->getParent()->getAttributes();
7282 if (Attrs.hasFnAttr(Attribute::VScaleRange))
7283 KnownMinNumElements *= Attrs.getFnAttrs().getVScaleRangeMin();
7284 }
7285 Check((Idx < 0 && std::abs(Idx) <= KnownMinNumElements) ||
7286 (Idx >= 0 && Idx < KnownMinNumElements),
7287 "The splice index exceeds the range [-VL, VL-1] where VL is the "
7288 "known minimum number of elements in the vector. For scalable "
7289 "vectors the minimum number of elements is determined from "
7290 "vscale_range.",
7291 &VPI);
7292 break;
7293 }
7294 }
7295}
7296
7297void Verifier::visitConstrainedFPIntrinsic(ConstrainedFPIntrinsic &FPI) {
7298 unsigned NumOperands = FPI.getNonMetadataArgCount();
7299 bool HasRoundingMD =
7301
7302 // Add the expected number of metadata operands.
7303 NumOperands += (1 + HasRoundingMD);
7304
7305 // Compare intrinsics carry an extra predicate metadata operand.
7307 NumOperands += 1;
7308 Check((FPI.arg_size() == NumOperands),
7309 "invalid arguments for constrained FP intrinsic", &FPI);
7310
7311 switch (FPI.getIntrinsicID()) {
7312 case Intrinsic::experimental_constrained_lrint:
7313 case Intrinsic::experimental_constrained_llrint: {
7314 Type *ValTy = FPI.getArgOperand(0)->getType();
7315 Type *ResultTy = FPI.getType();
7316 Check(!ValTy->isVectorTy() && !ResultTy->isVectorTy(),
7317 "Intrinsic does not support vectors", &FPI);
7318 break;
7319 }
7320
7321 case Intrinsic::experimental_constrained_lround:
7322 case Intrinsic::experimental_constrained_llround: {
7323 Type *ValTy = FPI.getArgOperand(0)->getType();
7324 Type *ResultTy = FPI.getType();
7325 Check(!ValTy->isVectorTy() && !ResultTy->isVectorTy(),
7326 "Intrinsic does not support vectors", &FPI);
7327 break;
7328 }
7329
7330 case Intrinsic::experimental_constrained_fcmp:
7331 case Intrinsic::experimental_constrained_fcmps: {
7332 auto Pred = cast<ConstrainedFPCmpIntrinsic>(&FPI)->getPredicate();
7334 "invalid predicate for constrained FP comparison intrinsic", &FPI);
7335 break;
7336 }
7337
7338 case Intrinsic::experimental_constrained_fptosi:
7339 case Intrinsic::experimental_constrained_fptoui: {
7340 Value *Operand = FPI.getArgOperand(0);
7341 ElementCount SrcEC;
7342 Check(Operand->getType()->isFPOrFPVectorTy(),
7343 "Intrinsic first argument must be floating point", &FPI);
7344 if (auto *OperandT = dyn_cast<VectorType>(Operand->getType())) {
7345 SrcEC = cast<VectorType>(OperandT)->getElementCount();
7346 }
7347
7348 Operand = &FPI;
7349 Check(SrcEC.isNonZero() == Operand->getType()->isVectorTy(),
7350 "Intrinsic first argument and result disagree on vector use", &FPI);
7351 Check(Operand->getType()->isIntOrIntVectorTy(),
7352 "Intrinsic result must be an integer", &FPI);
7353 if (auto *OperandT = dyn_cast<VectorType>(Operand->getType())) {
7354 Check(SrcEC == cast<VectorType>(OperandT)->getElementCount(),
7355 "Intrinsic first argument and result vector lengths must be equal",
7356 &FPI);
7357 }
7358 break;
7359 }
7360
7361 case Intrinsic::experimental_constrained_sitofp:
7362 case Intrinsic::experimental_constrained_uitofp: {
7363 Value *Operand = FPI.getArgOperand(0);
7364 ElementCount SrcEC;
7365 Check(Operand->getType()->isIntOrIntVectorTy(),
7366 "Intrinsic first argument must be integer", &FPI);
7367 if (auto *OperandT = dyn_cast<VectorType>(Operand->getType())) {
7368 SrcEC = cast<VectorType>(OperandT)->getElementCount();
7369 }
7370
7371 Operand = &FPI;
7372 Check(SrcEC.isNonZero() == Operand->getType()->isVectorTy(),
7373 "Intrinsic first argument and result disagree on vector use", &FPI);
7374 Check(Operand->getType()->isFPOrFPVectorTy(),
7375 "Intrinsic result must be a floating point", &FPI);
7376 if (auto *OperandT = dyn_cast<VectorType>(Operand->getType())) {
7377 Check(SrcEC == cast<VectorType>(OperandT)->getElementCount(),
7378 "Intrinsic first argument and result vector lengths must be equal",
7379 &FPI);
7380 }
7381 break;
7382 }
7383
7384 case Intrinsic::experimental_constrained_fptrunc:
7385 case Intrinsic::experimental_constrained_fpext: {
7386 Value *Operand = FPI.getArgOperand(0);
7387 Type *OperandTy = Operand->getType();
7388 Value *Result = &FPI;
7389 Type *ResultTy = Result->getType();
7390 Check(OperandTy->isFPOrFPVectorTy(),
7391 "Intrinsic first argument must be FP or FP vector", &FPI);
7392 Check(ResultTy->isFPOrFPVectorTy(),
7393 "Intrinsic result must be FP or FP vector", &FPI);
7394 Check(OperandTy->isVectorTy() == ResultTy->isVectorTy(),
7395 "Intrinsic first argument and result disagree on vector use", &FPI);
7396 if (OperandTy->isVectorTy()) {
7397 Check(cast<VectorType>(OperandTy)->getElementCount() ==
7398 cast<VectorType>(ResultTy)->getElementCount(),
7399 "Intrinsic first argument and result vector lengths must be equal",
7400 &FPI);
7401 }
7402 if (FPI.getIntrinsicID() == Intrinsic::experimental_constrained_fptrunc) {
7403 Check(OperandTy->getScalarSizeInBits() > ResultTy->getScalarSizeInBits(),
7404 "Intrinsic first argument's type must be larger than result type",
7405 &FPI);
7406 } else {
7407 Check(OperandTy->getScalarSizeInBits() < ResultTy->getScalarSizeInBits(),
7408 "Intrinsic first argument's type must be smaller than result type",
7409 &FPI);
7410 }
7411 break;
7412 }
7413
7414 default:
7415 break;
7416 }
7417
7418 // If a non-metadata argument is passed in a metadata slot then the
7419 // error will be caught earlier when the incorrect argument doesn't
7420 // match the specification in the intrinsic call table. Thus, no
7421 // argument type check is needed here.
7422
7423 Check(FPI.getExceptionBehavior().has_value(),
7424 "invalid exception behavior argument", &FPI);
7425 if (HasRoundingMD) {
7426 Check(FPI.getRoundingMode().has_value(), "invalid rounding mode argument",
7427 &FPI);
7428 }
7429}
7430
7431void Verifier::verifyFragmentExpression(const DbgVariableRecord &DVR) {
7432 DILocalVariable *V = dyn_cast_or_null<DILocalVariable>(DVR.getRawVariable());
7433 DIExpression *E = dyn_cast_or_null<DIExpression>(DVR.getRawExpression());
7434
7435 // We don't know whether this intrinsic verified correctly.
7436 if (!V || !E || !E->isValid())
7437 return;
7438
7439 // Nothing to do if this isn't a DW_OP_LLVM_fragment expression.
7440 auto Fragment = E->getFragmentInfo();
7441 if (!Fragment)
7442 return;
7443
7444 // The frontend helps out GDB by emitting the members of local anonymous
7445 // unions as artificial local variables with shared storage. When SROA splits
7446 // the storage for artificial local variables that are smaller than the entire
7447 // union, the overhang piece will be outside of the allotted space for the
7448 // variable and this check fails.
7449 // FIXME: Remove this check as soon as clang stops doing this; it hides bugs.
7450 if (V->isArtificial())
7451 return;
7452
7453 verifyFragmentExpression(*V, *Fragment, &DVR);
7454}
7455
7456template <typename ValueOrMetadata>
7457void Verifier::verifyFragmentExpression(const DIVariable &V,
7459 ValueOrMetadata *Desc) {
7460 // If there's no size, the type is broken, but that should be checked
7461 // elsewhere.
7462 auto VarSize = V.getSizeInBits();
7463 if (!VarSize)
7464 return;
7465
7466 unsigned FragSize = Fragment.SizeInBits;
7467 unsigned FragOffset = Fragment.OffsetInBits;
7468 CheckDI(FragSize + FragOffset <= *VarSize,
7469 "fragment is larger than or outside of variable", Desc, &V);
7470 CheckDI(FragSize != *VarSize, "fragment covers entire variable", Desc, &V);
7471}
7472
7473void Verifier::verifyFnArgs(const DbgVariableRecord &DVR) {
7474 // This function does not take the scope of noninlined function arguments into
7475 // account. Don't run it if current function is nodebug, because it may
7476 // contain inlined debug intrinsics.
7477 if (!HasDebugInfo)
7478 return;
7479
7480 // For performance reasons only check non-inlined ones.
7481 if (DVR.getDebugLoc()->getInlinedAt())
7482 return;
7483
7484 DILocalVariable *Var = DVR.getVariable();
7485 CheckDI(Var, "#dbg record without variable");
7486
7487 unsigned ArgNo = Var->getArg();
7488 if (!ArgNo)
7489 return;
7490
7491 // Verify there are no duplicate function argument debug info entries.
7492 // These will cause hard-to-debug assertions in the DWARF backend.
7493 if (DebugFnArgs.size() < ArgNo)
7494 DebugFnArgs.resize(ArgNo, nullptr);
7495
7496 auto *Prev = DebugFnArgs[ArgNo - 1];
7497 DebugFnArgs[ArgNo - 1] = Var;
7498 CheckDI(!Prev || (Prev == Var), "conflicting debug info for argument", &DVR,
7499 Prev, Var);
7500}
7501
7502void Verifier::verifyNotEntryValue(const DbgVariableRecord &DVR) {
7503 DIExpression *E = dyn_cast_or_null<DIExpression>(DVR.getRawExpression());
7504
7505 // We don't know whether this intrinsic verified correctly.
7506 if (!E || !E->isValid())
7507 return;
7508
7510 Value *VarValue = DVR.getVariableLocationOp(0);
7511 if (isa<UndefValue>(VarValue) || isa<PoisonValue>(VarValue))
7512 return;
7513 // We allow EntryValues for swift async arguments, as they have an
7514 // ABI-guarantee to be turned into a specific register.
7515 if (auto *ArgLoc = dyn_cast_or_null<Argument>(VarValue);
7516 ArgLoc && ArgLoc->hasAttribute(Attribute::SwiftAsync))
7517 return;
7518 }
7519
7520 CheckDI(!E->isEntryValue(),
7521 "Entry values are only allowed in MIR unless they target a "
7522 "swiftasync Argument",
7523 &DVR);
7524}
7525
7526void Verifier::verifyCompileUnits() {
7527 // When more than one Module is imported into the same context, such as during
7528 // an LTO build before linking the modules, ODR type uniquing may cause types
7529 // to point to a different CU. This check does not make sense in this case.
7530 if (M.getContext().isODRUniquingDebugTypes())
7531 return;
7532 auto *CUs = M.getNamedMetadata("llvm.dbg.cu");
7533 SmallPtrSet<const Metadata *, 2> Listed;
7534 if (CUs)
7535 Listed.insert_range(CUs->operands());
7536 for (const auto *CU : CUVisited)
7537 CheckDI(Listed.count(CU), "DICompileUnit not listed in llvm.dbg.cu", CU);
7538 CUVisited.clear();
7539}
7540
7541void Verifier::verifyDeoptimizeCallingConvs() {
7542 if (DeoptimizeDeclarations.empty())
7543 return;
7544
7545 const Function *First = DeoptimizeDeclarations[0];
7546 for (const auto *F : ArrayRef(DeoptimizeDeclarations).slice(1)) {
7547 Check(First->getCallingConv() == F->getCallingConv(),
7548 "All llvm.experimental.deoptimize declarations must have the same "
7549 "calling convention",
7550 First, F);
7551 }
7552}
7553
7554void Verifier::verifyAttachedCallBundle(const CallBase &Call,
7555 const OperandBundleUse &BU) {
7556 FunctionType *FTy = Call.getFunctionType();
7557
7558 Check((FTy->getReturnType()->isPointerTy() ||
7559 (Call.doesNotReturn() && FTy->getReturnType()->isVoidTy())),
7560 "a call with operand bundle \"clang.arc.attachedcall\" must call a "
7561 "function returning a pointer or a non-returning function that has a "
7562 "void return type",
7563 Call);
7564
7565 Check(BU.Inputs.size() == 1 && isa<Function>(BU.Inputs.front()),
7566 "operand bundle \"clang.arc.attachedcall\" requires one function as "
7567 "an argument",
7568 Call);
7569
7570 auto *Fn = cast<Function>(BU.Inputs.front());
7571 Intrinsic::ID IID = Fn->getIntrinsicID();
7572
7573 if (IID) {
7574 Check((IID == Intrinsic::objc_retainAutoreleasedReturnValue ||
7575 IID == Intrinsic::objc_claimAutoreleasedReturnValue ||
7576 IID == Intrinsic::objc_unsafeClaimAutoreleasedReturnValue),
7577 "invalid function argument", Call);
7578 } else {
7579 StringRef FnName = Fn->getName();
7580 Check((FnName == "objc_retainAutoreleasedReturnValue" ||
7581 FnName == "objc_claimAutoreleasedReturnValue" ||
7582 FnName == "objc_unsafeClaimAutoreleasedReturnValue"),
7583 "invalid function argument", Call);
7584 }
7585}
7586
7587void Verifier::verifyNoAliasScopeDecl() {
7588 if (NoAliasScopeDecls.empty())
7589 return;
7590
7591 // only a single scope must be declared at a time.
7592 for (auto *II : NoAliasScopeDecls) {
7593 assert(II->getIntrinsicID() == Intrinsic::experimental_noalias_scope_decl &&
7594 "Not a llvm.experimental.noalias.scope.decl ?");
7595 const auto *ScopeListMV = dyn_cast<MetadataAsValue>(
7597 Check(ScopeListMV != nullptr,
7598 "llvm.experimental.noalias.scope.decl must have a MetadataAsValue "
7599 "argument",
7600 II);
7601
7602 const auto *ScopeListMD = dyn_cast<MDNode>(ScopeListMV->getMetadata());
7603 Check(ScopeListMD != nullptr, "!id.scope.list must point to an MDNode", II);
7604 Check(ScopeListMD->getNumOperands() == 1,
7605 "!id.scope.list must point to a list with a single scope", II);
7606 visitAliasScopeListMetadata(ScopeListMD);
7607 }
7608
7609 // Only check the domination rule when requested. Once all passes have been
7610 // adapted this option can go away.
7612 return;
7613
7614 // Now sort the intrinsics based on the scope MDNode so that declarations of
7615 // the same scopes are next to each other.
7616 auto GetScope = [](IntrinsicInst *II) {
7617 const auto *ScopeListMV = cast<MetadataAsValue>(
7619 return &cast<MDNode>(ScopeListMV->getMetadata())->getOperand(0);
7620 };
7621
7622 // We are sorting on MDNode pointers here. For valid input IR this is ok.
7623 // TODO: Sort on Metadata ID to avoid non-deterministic error messages.
7624 auto Compare = [GetScope](IntrinsicInst *Lhs, IntrinsicInst *Rhs) {
7625 return GetScope(Lhs) < GetScope(Rhs);
7626 };
7627
7628 llvm::sort(NoAliasScopeDecls, Compare);
7629
7630 // Go over the intrinsics and check that for the same scope, they are not
7631 // dominating each other.
7632 auto ItCurrent = NoAliasScopeDecls.begin();
7633 while (ItCurrent != NoAliasScopeDecls.end()) {
7634 auto CurScope = GetScope(*ItCurrent);
7635 auto ItNext = ItCurrent;
7636 do {
7637 ++ItNext;
7638 } while (ItNext != NoAliasScopeDecls.end() &&
7639 GetScope(*ItNext) == CurScope);
7640
7641 // [ItCurrent, ItNext) represents the declarations for the same scope.
7642 // Ensure they are not dominating each other.. but only if it is not too
7643 // expensive.
7644 if (ItNext - ItCurrent < 32)
7645 for (auto *I : llvm::make_range(ItCurrent, ItNext))
7646 for (auto *J : llvm::make_range(ItCurrent, ItNext))
7647 if (I != J)
7648 Check(!DT.dominates(I, J),
7649 "llvm.experimental.noalias.scope.decl dominates another one "
7650 "with the same scope",
7651 I);
7652 ItCurrent = ItNext;
7653 }
7654}
7655
7656//===----------------------------------------------------------------------===//
7657// Implement the public interfaces to this file...
7658//===----------------------------------------------------------------------===//
7659
7661 Function &F = const_cast<Function &>(f);
7662
7663 // Don't use a raw_null_ostream. Printing IR is expensive.
7664 Verifier V(OS, /*ShouldTreatBrokenDebugInfoAsError=*/true, *f.getParent());
7665
7666 // Note that this function's return value is inverted from what you would
7667 // expect of a function called "verify".
7668 return !V.verify(F);
7669}
7670
7672 bool *BrokenDebugInfo) {
7673 // Don't use a raw_null_ostream. Printing IR is expensive.
7674 Verifier V(OS, /*ShouldTreatBrokenDebugInfoAsError=*/!BrokenDebugInfo, M);
7675
7676 bool Broken = false;
7677 for (const Function &F : M)
7678 Broken |= !V.verify(F);
7679
7680 Broken |= !V.verify();
7681 if (BrokenDebugInfo)
7682 *BrokenDebugInfo = V.hasBrokenDebugInfo();
7683 // Note that this function's return value is inverted from what you would
7684 // expect of a function called "verify".
7685 return Broken;
7686}
7687
7688namespace {
7689
7690struct VerifierLegacyPass : public FunctionPass {
7691 static char ID;
7692
7693 std::unique_ptr<Verifier> V;
7694 bool FatalErrors = true;
7695
7696 VerifierLegacyPass() : FunctionPass(ID) {
7698 }
7699 explicit VerifierLegacyPass(bool FatalErrors)
7700 : FunctionPass(ID),
7701 FatalErrors(FatalErrors) {
7703 }
7704
7705 bool doInitialization(Module &M) override {
7706 V = std::make_unique<Verifier>(
7707 &dbgs(), /*ShouldTreatBrokenDebugInfoAsError=*/false, M);
7708 return false;
7709 }
7710
7711 bool runOnFunction(Function &F) override {
7712 if (!V->verify(F) && FatalErrors) {
7713 errs() << "in function " << F.getName() << '\n';
7714 report_fatal_error("Broken function found, compilation aborted!");
7715 }
7716 return false;
7717 }
7718
7719 bool doFinalization(Module &M) override {
7720 bool HasErrors = false;
7721 for (Function &F : M)
7722 if (F.isDeclaration())
7723 HasErrors |= !V->verify(F);
7724
7725 HasErrors |= !V->verify();
7726 if (FatalErrors && (HasErrors || V->hasBrokenDebugInfo()))
7727 report_fatal_error("Broken module found, compilation aborted!");
7728 return false;
7729 }
7730
7731 void getAnalysisUsage(AnalysisUsage &AU) const override {
7732 AU.setPreservesAll();
7733 }
7734};
7735
7736} // end anonymous namespace
7737
7738/// Helper to issue failure from the TBAA verification
7739template <typename... Tys> void TBAAVerifier::CheckFailed(Tys &&... Args) {
7740 if (Diagnostic)
7741 return Diagnostic->CheckFailed(Args...);
7742}
7743
7744#define CheckTBAA(C, ...) \
7745 do { \
7746 if (!(C)) { \
7747 CheckFailed(__VA_ARGS__); \
7748 return false; \
7749 } \
7750 } while (false)
7751
7752/// Verify that \p BaseNode can be used as the "base type" in the struct-path
7753/// TBAA scheme. This means \p BaseNode is either a scalar node, or a
7754/// struct-type node describing an aggregate data structure (like a struct).
7755TBAAVerifier::TBAABaseNodeSummary
7756TBAAVerifier::verifyTBAABaseNode(const Instruction *I, const MDNode *BaseNode,
7757 bool IsNewFormat) {
7758 if (BaseNode->getNumOperands() < 2) {
7759 CheckFailed("Base nodes must have at least two operands", I, BaseNode);
7760 return {true, ~0u};
7761 }
7762
7763 auto Itr = TBAABaseNodes.find(BaseNode);
7764 if (Itr != TBAABaseNodes.end())
7765 return Itr->second;
7766
7767 auto Result = verifyTBAABaseNodeImpl(I, BaseNode, IsNewFormat);
7768 auto InsertResult = TBAABaseNodes.insert({BaseNode, Result});
7769 (void)InsertResult;
7770 assert(InsertResult.second && "We just checked!");
7771 return Result;
7772}
7773
7774TBAAVerifier::TBAABaseNodeSummary
7775TBAAVerifier::verifyTBAABaseNodeImpl(const Instruction *I,
7776 const MDNode *BaseNode, bool IsNewFormat) {
7777 const TBAAVerifier::TBAABaseNodeSummary InvalidNode = {true, ~0u};
7778
7779 if (BaseNode->getNumOperands() == 2) {
7780 // Scalar nodes can only be accessed at offset 0.
7781 return isValidScalarTBAANode(BaseNode)
7782 ? TBAAVerifier::TBAABaseNodeSummary({false, 0})
7783 : InvalidNode;
7784 }
7785
7786 if (IsNewFormat) {
7787 if (BaseNode->getNumOperands() % 3 != 0) {
7788 CheckFailed("Access tag nodes must have the number of operands that is a "
7789 "multiple of 3!", BaseNode);
7790 return InvalidNode;
7791 }
7792 } else {
7793 if (BaseNode->getNumOperands() % 2 != 1) {
7794 CheckFailed("Struct tag nodes must have an odd number of operands!",
7795 BaseNode);
7796 return InvalidNode;
7797 }
7798 }
7799
7800 // Check the type size field.
7801 if (IsNewFormat) {
7802 auto *TypeSizeNode = mdconst::dyn_extract_or_null<ConstantInt>(
7803 BaseNode->getOperand(1));
7804 if (!TypeSizeNode) {
7805 CheckFailed("Type size nodes must be constants!", I, BaseNode);
7806 return InvalidNode;
7807 }
7808 }
7809
7810 // Check the type name field. In the new format it can be anything.
7811 if (!IsNewFormat && !isa<MDString>(BaseNode->getOperand(0))) {
7812 CheckFailed("Struct tag nodes have a string as their first operand",
7813 BaseNode);
7814 return InvalidNode;
7815 }
7816
7817 bool Failed = false;
7818
7819 std::optional<APInt> PrevOffset;
7820 unsigned BitWidth = ~0u;
7821
7822 // We've already checked that BaseNode is not a degenerate root node with one
7823 // operand in \c verifyTBAABaseNode, so this loop should run at least once.
7824 unsigned FirstFieldOpNo = IsNewFormat ? 3 : 1;
7825 unsigned NumOpsPerField = IsNewFormat ? 3 : 2;
7826 for (unsigned Idx = FirstFieldOpNo; Idx < BaseNode->getNumOperands();
7827 Idx += NumOpsPerField) {
7828 const MDOperand &FieldTy = BaseNode->getOperand(Idx);
7829 const MDOperand &FieldOffset = BaseNode->getOperand(Idx + 1);
7830 if (!isa<MDNode>(FieldTy)) {
7831 CheckFailed("Incorrect field entry in struct type node!", I, BaseNode);
7832 Failed = true;
7833 continue;
7834 }
7835
7836 auto *OffsetEntryCI =
7838 if (!OffsetEntryCI) {
7839 CheckFailed("Offset entries must be constants!", I, BaseNode);
7840 Failed = true;
7841 continue;
7842 }
7843
7844 if (BitWidth == ~0u)
7845 BitWidth = OffsetEntryCI->getBitWidth();
7846
7847 if (OffsetEntryCI->getBitWidth() != BitWidth) {
7848 CheckFailed(
7849 "Bitwidth between the offsets and struct type entries must match", I,
7850 BaseNode);
7851 Failed = true;
7852 continue;
7853 }
7854
7855 // NB! As far as I can tell, we generate a non-strictly increasing offset
7856 // sequence only from structs that have zero size bit fields. When
7857 // recursing into a contained struct in \c getFieldNodeFromTBAABaseNode we
7858 // pick the field lexically the latest in struct type metadata node. This
7859 // mirrors the actual behavior of the alias analysis implementation.
7860 bool IsAscending =
7861 !PrevOffset || PrevOffset->ule(OffsetEntryCI->getValue());
7862
7863 if (!IsAscending) {
7864 CheckFailed("Offsets must be increasing!", I, BaseNode);
7865 Failed = true;
7866 }
7867
7868 PrevOffset = OffsetEntryCI->getValue();
7869
7870 if (IsNewFormat) {
7871 auto *MemberSizeNode = mdconst::dyn_extract_or_null<ConstantInt>(
7872 BaseNode->getOperand(Idx + 2));
7873 if (!MemberSizeNode) {
7874 CheckFailed("Member size entries must be constants!", I, BaseNode);
7875 Failed = true;
7876 continue;
7877 }
7878 }
7879 }
7880
7881 return Failed ? InvalidNode
7882 : TBAAVerifier::TBAABaseNodeSummary(false, BitWidth);
7883}
7884
7885static bool IsRootTBAANode(const MDNode *MD) {
7886 return MD->getNumOperands() < 2;
7887}
7888
7889static bool IsScalarTBAANodeImpl(const MDNode *MD,
7891 if (MD->getNumOperands() != 2 && MD->getNumOperands() != 3)
7892 return false;
7893
7894 if (!isa<MDString>(MD->getOperand(0)))
7895 return false;
7896
7897 if (MD->getNumOperands() == 3) {
7899 if (!(Offset && Offset->isZero() && isa<MDString>(MD->getOperand(0))))
7900 return false;
7901 }
7902
7903 auto *Parent = dyn_cast_or_null<MDNode>(MD->getOperand(1));
7904 return Parent && Visited.insert(Parent).second &&
7905 (IsRootTBAANode(Parent) || IsScalarTBAANodeImpl(Parent, Visited));
7906}
7907
7908bool TBAAVerifier::isValidScalarTBAANode(const MDNode *MD) {
7909 auto ResultIt = TBAAScalarNodes.find(MD);
7910 if (ResultIt != TBAAScalarNodes.end())
7911 return ResultIt->second;
7912
7913 SmallPtrSet<const MDNode *, 4> Visited;
7914 bool Result = IsScalarTBAANodeImpl(MD, Visited);
7915 auto InsertResult = TBAAScalarNodes.insert({MD, Result});
7916 (void)InsertResult;
7917 assert(InsertResult.second && "Just checked!");
7918
7919 return Result;
7920}
7921
7922/// Returns the field node at the offset \p Offset in \p BaseNode. Update \p
7923/// Offset in place to be the offset within the field node returned.
7924///
7925/// We assume we've okayed \p BaseNode via \c verifyTBAABaseNode.
7926MDNode *TBAAVerifier::getFieldNodeFromTBAABaseNode(const Instruction *I,
7927 const MDNode *BaseNode,
7928 APInt &Offset,
7929 bool IsNewFormat) {
7930 assert(BaseNode->getNumOperands() >= 2 && "Invalid base node!");
7931
7932 // Scalar nodes have only one possible "field" -- their parent in the access
7933 // hierarchy. Offset must be zero at this point, but our caller is supposed
7934 // to check that.
7935 if (BaseNode->getNumOperands() == 2)
7936 return cast<MDNode>(BaseNode->getOperand(1));
7937
7938 unsigned FirstFieldOpNo = IsNewFormat ? 3 : 1;
7939 unsigned NumOpsPerField = IsNewFormat ? 3 : 2;
7940 for (unsigned Idx = FirstFieldOpNo; Idx < BaseNode->getNumOperands();
7941 Idx += NumOpsPerField) {
7942 auto *OffsetEntryCI =
7943 mdconst::extract<ConstantInt>(BaseNode->getOperand(Idx + 1));
7944 if (OffsetEntryCI->getValue().ugt(Offset)) {
7945 if (Idx == FirstFieldOpNo) {
7946 CheckFailed("Could not find TBAA parent in struct type node", I,
7947 BaseNode, &Offset);
7948 return nullptr;
7949 }
7950
7951 unsigned PrevIdx = Idx - NumOpsPerField;
7952 auto *PrevOffsetEntryCI =
7953 mdconst::extract<ConstantInt>(BaseNode->getOperand(PrevIdx + 1));
7954 Offset -= PrevOffsetEntryCI->getValue();
7955 return cast<MDNode>(BaseNode->getOperand(PrevIdx));
7956 }
7957 }
7958
7959 unsigned LastIdx = BaseNode->getNumOperands() - NumOpsPerField;
7960 auto *LastOffsetEntryCI = mdconst::extract<ConstantInt>(
7961 BaseNode->getOperand(LastIdx + 1));
7962 Offset -= LastOffsetEntryCI->getValue();
7963 return cast<MDNode>(BaseNode->getOperand(LastIdx));
7964}
7965
7967 if (!Type || Type->getNumOperands() < 3)
7968 return false;
7969
7970 // In the new format type nodes shall have a reference to the parent type as
7971 // its first operand.
7972 return isa_and_nonnull<MDNode>(Type->getOperand(0));
7973}
7974
7976 CheckTBAA(MD->getNumOperands() > 0, "TBAA metadata cannot have 0 operands", I,
7977 MD);
7978
7979 if (I)
7983 "This instruction shall not have a TBAA access tag!", I);
7984
7985 bool IsStructPathTBAA =
7986 isa<MDNode>(MD->getOperand(0)) && MD->getNumOperands() >= 3;
7987
7988 CheckTBAA(IsStructPathTBAA,
7989 "Old-style TBAA is no longer allowed, use struct-path TBAA instead",
7990 I);
7991
7992 MDNode *BaseNode = dyn_cast_or_null<MDNode>(MD->getOperand(0));
7993 MDNode *AccessType = dyn_cast_or_null<MDNode>(MD->getOperand(1));
7994
7995 bool IsNewFormat = isNewFormatTBAATypeNode(AccessType);
7996
7997 if (IsNewFormat) {
7998 CheckTBAA(MD->getNumOperands() == 4 || MD->getNumOperands() == 5,
7999 "Access tag metadata must have either 4 or 5 operands", I, MD);
8000 } else {
8001 CheckTBAA(MD->getNumOperands() < 5,
8002 "Struct tag metadata must have either 3 or 4 operands", I, MD);
8003 }
8004
8005 // Check the access size field.
8006 if (IsNewFormat) {
8007 auto *AccessSizeNode = mdconst::dyn_extract_or_null<ConstantInt>(
8008 MD->getOperand(3));
8009 CheckTBAA(AccessSizeNode, "Access size field must be a constant", I, MD);
8010 }
8011
8012 // Check the immutability flag.
8013 unsigned ImmutabilityFlagOpNo = IsNewFormat ? 4 : 3;
8014 if (MD->getNumOperands() == ImmutabilityFlagOpNo + 1) {
8015 auto *IsImmutableCI = mdconst::dyn_extract_or_null<ConstantInt>(
8016 MD->getOperand(ImmutabilityFlagOpNo));
8017 CheckTBAA(IsImmutableCI,
8018 "Immutability tag on struct tag metadata must be a constant", I,
8019 MD);
8020 CheckTBAA(
8021 IsImmutableCI->isZero() || IsImmutableCI->isOne(),
8022 "Immutability part of the struct tag metadata must be either 0 or 1", I,
8023 MD);
8024 }
8025
8026 CheckTBAA(BaseNode && AccessType,
8027 "Malformed struct tag metadata: base and access-type "
8028 "should be non-null and point to Metadata nodes",
8029 I, MD, BaseNode, AccessType);
8030
8031 if (!IsNewFormat) {
8032 CheckTBAA(isValidScalarTBAANode(AccessType),
8033 "Access type node must be a valid scalar type", I, MD,
8034 AccessType);
8035 }
8036
8038 CheckTBAA(OffsetCI, "Offset must be constant integer", I, MD);
8039
8040 APInt Offset = OffsetCI->getValue();
8041 bool SeenAccessTypeInPath = false;
8042
8043 SmallPtrSet<MDNode *, 4> StructPath;
8044
8045 for (/* empty */; BaseNode && !IsRootTBAANode(BaseNode);
8046 BaseNode =
8047 getFieldNodeFromTBAABaseNode(I, BaseNode, Offset, IsNewFormat)) {
8048 if (!StructPath.insert(BaseNode).second) {
8049 CheckFailed("Cycle detected in struct path", I, MD);
8050 return false;
8051 }
8052
8053 bool Invalid;
8054 unsigned BaseNodeBitWidth;
8055 std::tie(Invalid, BaseNodeBitWidth) =
8056 verifyTBAABaseNode(I, BaseNode, IsNewFormat);
8057
8058 // If the base node is invalid in itself, then we've already printed all the
8059 // errors we wanted to print.
8060 if (Invalid)
8061 return false;
8062
8063 SeenAccessTypeInPath |= BaseNode == AccessType;
8064
8065 if (isValidScalarTBAANode(BaseNode) || BaseNode == AccessType)
8066 CheckTBAA(Offset == 0, "Offset not zero at the point of scalar access", I,
8067 MD, &Offset);
8068
8069 CheckTBAA(BaseNodeBitWidth == Offset.getBitWidth() ||
8070 (BaseNodeBitWidth == 0 && Offset == 0) ||
8071 (IsNewFormat && BaseNodeBitWidth == ~0u),
8072 "Access bit-width not the same as description bit-width", I, MD,
8073 BaseNodeBitWidth, Offset.getBitWidth());
8074
8075 if (IsNewFormat && SeenAccessTypeInPath)
8076 break;
8077 }
8078
8079 CheckTBAA(SeenAccessTypeInPath, "Did not see access type in access path!", I,
8080 MD);
8081 return true;
8082}
8083
8084char VerifierLegacyPass::ID = 0;
8085INITIALIZE_PASS(VerifierLegacyPass, "verify", "Module Verifier", false, false)
8086
8088 return new VerifierLegacyPass(FatalErrors);
8089}
8090
8091AnalysisKey VerifierAnalysis::Key;
8098
8103
8105 auto Res = AM.getResult<VerifierAnalysis>(M);
8106 if (FatalErrors && (Res.IRBroken || Res.DebugInfoBroken))
8107 report_fatal_error("Broken module found, compilation aborted!");
8108
8109 return PreservedAnalyses::all();
8110}
8111
8113 auto res = AM.getResult<VerifierAnalysis>(F);
8114 if (res.IRBroken && FatalErrors)
8115 report_fatal_error("Broken function found, compilation aborted!");
8116
8117 return PreservedAnalyses::all();
8118}
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
AMDGPU address space definition.
ArrayRef< TableEntry > TableRef
This file declares a class to represent arbitrary precision floating point values and provide a varie...
This file implements a class to represent arbitrary precision integral constant values and operations...
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
Atomic ordering constants.
@ RetAttr
@ FnAttr
This file contains the simple types necessary to represent the attributes associated with functions a...
static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")
static GCRegistry::Add< StatepointGC > D("statepoint-example", "an example strategy for statepoint")
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
Analysis containing CSE Info
Definition CSEInfo.cpp:27
This file contains the declarations for the subclasses of Constant, which represent the different fla...
This file declares the LLVM IR specialization of the GenericConvergenceVerifier template.
static DISubprogram * getSubprogram(bool IsDistinct, Ts &&...Args)
dxil translate DXIL Translate Metadata
This file defines the DenseMap class.
This file contains constants used for implementing Dwarf debug support.
static bool runOnFunction(Function &F, bool PostInlining)
#define Check(C,...)
Hexagon Common GEP
This file provides various utilities for inspecting and working with the control flow graph in LLVM I...
Module.h This file contains the declarations for the Module class.
This header defines various interfaces for pass management in LLVM.
This defines the Use class.
const size_t AbstractManglingParser< Derived, Alloc >::NumOps
#define F(x, y, z)
Definition MD5.cpp:54
#define I(x, y, z)
Definition MD5.cpp:57
Machine Check Debug Module
This file implements a map that provides insertion order iteration.
This file provides utility for Memory Model Relaxation Annotations (MMRAs).
static bool isContiguous(const ConstantRange &A, const ConstantRange &B)
This file contains the declarations for metadata subclasses.
#define T
#define T1
ConstantRange Range(APInt(BitWidth, Low), APInt(BitWidth, High))
uint64_t High
uint64_t IntrinsicInst * II
#define P(N)
ppc ctr loops verify
#define INITIALIZE_PASS(passName, arg, name, cfg, analysis)
Definition PassSupport.h:56
This file contains the declarations for profiling metadata utility functions.
const SmallVectorImpl< MachineOperand > & Cond
static bool isValid(const char C)
Returns true if C is a valid mangled character: <0-9a-zA-Z_>.
static unsigned getNumElements(Type *Ty)
void visit(MachineFunction &MF, MachineBasicBlock &Start, std::function< void(MachineBasicBlock *)> op)
This file contains some templates that are useful if you are working with the STL at all.
verify safepoint Safepoint IR Verifier
BaseType
A given derived pointer can have multiple base pointers through phi/selects.
This file defines the SmallPtrSet class.
This file defines the SmallVector class.
This file contains some functions that are useful when dealing with strings.
static unsigned getBitWidth(Type *Ty, const DataLayout &DL)
Returns the bitwidth of the given scalar or pointer type.
static bool IsScalarTBAANodeImpl(const MDNode *MD, SmallPtrSetImpl< const MDNode * > &Visited)
static bool isType(const Metadata *MD)
static Instruction * getSuccPad(Instruction *Terminator)
static bool isMDTuple(const Metadata *MD)
static bool isNewFormatTBAATypeNode(llvm::MDNode *Type)
#define CheckDI(C,...)
We know that a debug info condition should be true, if not print an error message.
Definition Verifier.cpp:678
static void forEachUser(const Value *User, SmallPtrSet< const Value *, 32 > &Visited, llvm::function_ref< bool(const Value *)> Callback)
Definition Verifier.cpp:719
static bool isDINode(const Metadata *MD)
static bool isScope(const Metadata *MD)
static cl::opt< bool > VerifyNoAliasScopeDomination("verify-noalias-scope-decl-dom", cl::Hidden, cl::init(false), cl::desc("Ensure that llvm.experimental.noalias.scope.decl for identical " "scopes are not dominating"))
static bool isTypeCongruent(Type *L, Type *R)
Two types are "congruent" if they are identical, or if they are both pointer types with different poi...
#define CheckTBAA(C,...)
static bool isConstantIntMetadataOperand(const Metadata *MD)
static bool IsRootTBAANode(const MDNode *MD)
static Value * getParentPad(Value *EHPad)
static bool hasConflictingReferenceFlags(unsigned Flags)
Detect mutually exclusive flags.
static AttrBuilder getParameterABIAttributes(LLVMContext &C, unsigned I, AttributeList Attrs)
static const char PassName[]
bool isFiniteNonZero() const
Definition APFloat.h:1441
bool isNegative() const
Definition APFloat.h:1431
const fltSemantics & getSemantics() const
Definition APFloat.h:1439
Class for arbitrary precision integers.
Definition APInt.h:78
bool sgt(const APInt &RHS) const
Signed greater than comparison.
Definition APInt.h:1202
bool isMinValue() const
Determine if this is the smallest unsigned value.
Definition APInt.h:418
bool ule(const APInt &RHS) const
Unsigned less or equal comparison.
Definition APInt.h:1151
bool isPowerOf2() const
Check if this APInt's value is a power of two greater than zero.
Definition APInt.h:441
int64_t getSExtValue() const
Get sign extended value.
Definition APInt.h:1563
bool isMaxValue() const
Determine if this is the largest unsigned value.
Definition APInt.h:400
This class represents a conversion between pointers from one address space to another.
bool isSwiftError() const
Return true if this alloca is used as a swifterror argument to a call.
LLVM_ABI bool isStaticAlloca() const
Return true if this alloca is in the entry block of the function and is a constant size.
Align getAlign() const
Return the alignment of the memory that is being allocated by the instruction.
Type * getAllocatedType() const
Return the type that is being allocated by the instruction.
unsigned getAddressSpace() const
Return the address space for the allocation.
LLVM_ABI bool isArrayAllocation() const
Return true if there is an allocation size parameter to the allocation instruction that is not 1.
const Value * getArraySize() const
Get the number of elements allocated.
PassT::Result & getResult(IRUnitT &IR, ExtraArgTs... ExtraArgs)
Get the result of an analysis pass for a given IR unit.
void setPreservesAll()
Set by analyses that do not transform their input at all.
LLVM_ABI bool hasInRegAttr() const
Return true if this argument has the inreg attribute.
Definition Function.cpp:293
bool empty() const
empty - Check if the array is empty.
Definition ArrayRef.h:137
static bool isFPOperation(BinOp Op)
BinOp getOperation() const
static LLVM_ABI StringRef getOperationName(BinOp Op)
AtomicOrdering getOrdering() const
Returns the ordering constraint of this rmw instruction.
bool contains(Attribute::AttrKind A) const
Return true if the builder has the specified attribute.
LLVM_ABI bool hasAttribute(Attribute::AttrKind Kind) const
Return true if the attribute exists in this set.
LLVM_ABI std::string getAsString(bool InAttrGrp=false) const
Functions, function parameters, and return types can have attributes to indicate how they should be t...
Definition Attributes.h:69
LLVM_ABI const ConstantRange & getValueAsConstantRange() const
Return the attribute's value as a ConstantRange.
LLVM_ABI StringRef getValueAsString() const
Return the attribute's value as a string.
AttrKind
This enumeration lists the attributes that can be associated with parameters, function results,...
Definition Attributes.h:88
bool isValid() const
Return true if the attribute is any kind of attribute.
Definition Attributes.h:223
LLVM Basic Block Representation.
Definition BasicBlock.h:62
iterator begin()
Instruction iterator methods.
Definition BasicBlock.h:459
iterator_range< const_phi_iterator > phis() const
Returns a range that iterates over the phis in the basic block.
Definition BasicBlock.h:528
const Function * getParent() const
Return the enclosing method, or null if none.
Definition BasicBlock.h:213
LLVM_ABI InstListType::const_iterator getFirstNonPHIIt() const
Returns an iterator to the first instruction in this block that is not a PHINode instruction.
LLVM_ABI bool isEntryBlock() const
Return true if this is the entry block of the containing function.
const Instruction & front() const
Definition BasicBlock.h:482
LLVM_ABI const BasicBlock * getUniquePredecessor() const
Return the predecessor of this block if it has a unique predecessor block.
InstListType::iterator iterator
Instruction iterators...
Definition BasicBlock.h:170
const Instruction * getTerminator() const LLVM_READONLY
Returns the terminator instruction if the block is well formed or null if the block is not well forme...
Definition BasicBlock.h:233
This class represents a no-op cast from one type to another.
static LLVM_ABI BlockAddress * lookup(const BasicBlock *BB)
Lookup an existing BlockAddress constant for the given BasicBlock.
bool isConditional() const
Value * getCondition() const
Base class for all callable instructions (InvokeInst and CallInst) Holds everything related to callin...
bool isInlineAsm() const
Check if this call is an inline asm statement.
bool hasInAllocaArgument() const
Determine if there are is an inalloca argument.
OperandBundleUse getOperandBundleAt(unsigned Index) const
Return the operand bundle at a specific index.
Function * getCalledFunction() const
Returns the function called, or null if this is an indirect function invocation or the function signa...
bool doesNotAccessMemory(unsigned OpNo) const
bool hasFnAttr(Attribute::AttrKind Kind) const
Determine whether this call has the given attribute.
unsigned getNumOperandBundles() const
Return the number of operand bundles associated with this User.
CallingConv::ID getCallingConv() const
LLVM_ABI bool paramHasAttr(unsigned ArgNo, Attribute::AttrKind Kind) const
Determine whether the argument or parameter has the given attribute.
Attribute getParamAttr(unsigned ArgNo, Attribute::AttrKind Kind) const
Get the attribute of a given kind from a given arg.
iterator_range< bundle_op_iterator > bundle_op_infos()
Return the range [bundle_op_info_begin, bundle_op_info_end).
unsigned countOperandBundlesOfType(StringRef Name) const
Return the number of operand bundles with the tag Name attached to this instruction.
bool onlyReadsMemory(unsigned OpNo) const
Value * getCalledOperand() const
Type * getParamElementType(unsigned ArgNo) const
Extract the elementtype type for a parameter.
Value * getArgOperand(unsigned i) const
FunctionType * getFunctionType() const
LLVM_ABI Intrinsic::ID getIntrinsicID() const
Returns the intrinsic ID of the intrinsic called or Intrinsic::not_intrinsic if the called function i...
iterator_range< User::op_iterator > args()
Iteration adapter for range-for loops.
bool doesNotReturn() const
Determine if the call cannot return.
LLVM_ABI bool onlyAccessesArgMemory() const
Determine if the call can access memmory only using pointers based on its arguments.
unsigned arg_size() const
AttributeList getAttributes() const
Return the attributes for this call.
bool hasOperandBundles() const
Return true if this User has any operand bundles.
LLVM_ABI Function * getCaller()
Helper to get the caller (the parent function).
bool isMustTailCall() const
static LLVM_ABI bool castIsValid(Instruction::CastOps op, Type *SrcTy, Type *DstTy)
This method can be used to determine if a cast from SrcTy to DstTy using Opcode op is valid or not.
unsigned getNumHandlers() const
return the number of 'handlers' in this catchswitch instruction, except the default handler
Value * getParentPad() const
BasicBlock * getUnwindDest() const
handler_range handlers()
iteration adapter for range-for loops.
BasicBlock * getUnwindDest() const
bool isFPPredicate() const
Definition InstrTypes.h:782
bool isIntPredicate() const
Definition InstrTypes.h:783
static bool isIntPredicate(Predicate P)
Definition InstrTypes.h:776
bool isMinusOne() const
This function will return true iff every bit in this constant is set to true.
Definition Constants.h:226
bool isNegative() const
Definition Constants.h:209
bool isZero() const
This is just a convenience method to make client code smaller for a common code.
Definition Constants.h:214
unsigned getBitWidth() const
getBitWidth - Return the scalar bitwidth of this constant.
Definition Constants.h:157
uint64_t getZExtValue() const
Return the constant as a 64-bit unsigned integer value after it has been zero extended as appropriate...
Definition Constants.h:163
const APInt & getValue() const
Return the constant as an APInt value reference.
Definition Constants.h:154
Constant * getAddrDiscriminator() const
The address discriminator if any, or the null constant.
Definition Constants.h:1072
Constant * getPointer() const
The pointer that is signed in this ptrauth signed pointer.
Definition Constants.h:1059
ConstantInt * getKey() const
The Key ID, an i32 constant.
Definition Constants.h:1062
ConstantInt * getDiscriminator() const
The integer discriminator, an i64 constant, or 0.
Definition Constants.h:1065
static LLVM_ABI bool isOrderedRanges(ArrayRef< ConstantRange > RangesRef)
This class represents a range of values.
const APInt & getLower() const
Return the lower value for this range.
const APInt & getUpper() const
Return the upper value for this range.
LLVM_ABI bool contains(const APInt &Val) const
Return true if the specified value is in the set.
uint32_t getBitWidth() const
Get the bit width of this ConstantRange.
static LLVM_ABI ConstantTokenNone * get(LLVMContext &Context)
Return the ConstantTokenNone.
LLVM_ABI bool isNullValue() const
Return true if this is the value that would be returned by getNullValue.
Definition Constants.cpp:90
LLVM_ABI std::optional< fp::ExceptionBehavior > getExceptionBehavior() const
LLVM_ABI std::optional< RoundingMode > getRoundingMode() const
LLVM_ABI unsigned getNonMetadataArgCount() const
DbgVariableFragmentInfo FragmentInfo
@ FixedPointBinary
Scale factor 2^Factor.
@ FixedPointDecimal
Scale factor 10^Factor.
@ FixedPointRational
Arbitrary rational scale factor.
DIGlobalVariable * getVariable() const
LLVM_ABI DISubprogram * getSubprogram() const
Get the subprogram for this scope.
DILocalScope * getScope() const
Get the local scope for this variable.
Metadata * getRawScope() const
Base class for scope-like contexts.
Subprogram description. Uses SubclassData1.
static const DIScope * getRawRetainedNodeScope(const MDNode *N)
Base class for template parameters.
Base class for variables.
Metadata * getRawType() const
Metadata * getRawScope() const
A parsed version of the target data layout string in and methods for querying it.
Definition DataLayout.h:63
Records a position in IR for a source label (DILabel).
Base class for non-instruction debug metadata records that have positions within IR.
LLVM_ABI void print(raw_ostream &O, bool IsForDebug=false) const
DebugLoc getDebugLoc() const
LLVM_ABI const BasicBlock * getParent() const
LLVM_ABI Function * getFunction()
Record of a variable value-assignment, aka a non instruction representation of the dbg....
LLVM_ABI Value * getVariableLocationOp(unsigned OpIdx) const
DIExpression * getExpression() const
DILocalVariable * getVariable() const
Metadata * getRawLocation() const
Returns the metadata operand for the first location description.
@ End
Marks the end of the concrete types.
@ Any
To indicate all LocationTypes in searches.
DIExpression * getAddressExpression() const
MDNode * getAsMDNode() const
Return this as a bar MDNode.
Definition DebugLoc.h:291
ValueT lookup(const_arg_type_t< KeyT > Val) const
lookup - Return the entry for the specified key, or a default constructed value if no such entry exis...
Definition DenseMap.h:205
iterator find(const_arg_type_t< KeyT > Val)
Definition DenseMap.h:178
bool empty() const
Definition DenseMap.h:109
std::pair< iterator, bool > insert(const std::pair< KeyT, ValueT > &KV)
Definition DenseMap.h:233
Concrete subclass of DominatorTreeBase that is used to compute a normal dominator tree.
Definition Dominators.h:164
This instruction extracts a single (scalar) element from a VectorType value.
static LLVM_ABI bool isValidOperands(const Value *Vec, const Value *Idx)
Return true if an extractelement instruction can be formed with the specified operands.
ArrayRef< unsigned > getIndices() const
static LLVM_ABI Type * getIndexedType(Type *Agg, ArrayRef< unsigned > Idxs)
Returns the type of the element that would be extracted with an extractvalue instruction with the spe...
This instruction compares its operands according to the predicate given to the constructor.
This class represents an extension of floating point types.
This class represents a cast from floating point to signed integer.
This class represents a cast from floating point to unsigned integer.
This class represents a truncation of floating point types.
AtomicOrdering getOrdering() const
Returns the ordering constraint of this fence instruction.
Value * getParentPad() const
Convenience accessors.
FunctionPass class - This class is used to implement most global optimizations.
Definition Pass.h:314
Type * getReturnType() const
FunctionType * getFunctionType() const
Returns the FunctionType for me.
Definition Function.h:209
Intrinsic::ID getIntrinsicID() const LLVM_READONLY
getIntrinsicID - This method returns the ID number of the specified function, or Intrinsic::not_intri...
Definition Function.h:244
DISubprogram * getSubprogram() const
Get the attached subprogram.
CallingConv::ID getCallingConv() const
getCallingConv()/setCallingConv(CC) - These method get and set the calling convention of this functio...
Definition Function.h:270
bool hasPersonalityFn() const
Check whether this function has a personality function.
Definition Function.h:903
const Function & getFunction() const
Definition Function.h:164
const std::string & getGC() const
Definition Function.cpp:834
Type * getReturnType() const
Returns the type of the ret val.
Definition Function.h:214
bool isVarArg() const
isVarArg - Return true if this function takes a variable number of arguments.
Definition Function.h:227
LLVM_ABI Value * getBasePtr() const
LLVM_ABI Value * getDerivedPtr() const
static LLVM_ABI Type * getIndexedType(Type *Ty, ArrayRef< Value * > IdxList)
Returns the result type of a getelementptr with the given source element type and indexes.
static bool isValidLinkage(LinkageTypes L)
Definition GlobalAlias.h:98
const Constant * getAliasee() const
Definition GlobalAlias.h:87
LLVM_ABI const Function * getResolverFunction() const
Definition Globals.cpp:665
static bool isValidLinkage(LinkageTypes L)
Definition GlobalIFunc.h:86
const Constant * getResolver() const
Definition GlobalIFunc.h:73
LLVM_ABI void getAllMetadata(SmallVectorImpl< std::pair< unsigned, MDNode * > > &MDs) const
Appends all metadata attached to this value to MDs, sorting by KindID.
bool hasComdat() const
MDNode * getMetadata(unsigned KindID) const
Get the current metadata attachments for the given kind, if any.
Definition Value.h:576
bool hasExternalLinkage() const
bool isDSOLocal() const
bool isImplicitDSOLocal() const
LLVM_ABI bool isDeclaration() const
Return true if the primary definition of this global value is outside of the current translation unit...
Definition Globals.cpp:328
bool hasValidDeclarationLinkage() const
LinkageTypes getLinkage() const
bool hasDefaultVisibility() const
bool hasPrivateLinkage() const
bool hasHiddenVisibility() const
bool hasExternalWeakLinkage() const
bool hasDLLImportStorageClass() const
bool hasDLLExportStorageClass() const
bool isDeclarationForLinker() const
unsigned getAddressSpace() const
Module * getParent()
Get the module that this global value is contained inside of...
PointerType * getType() const
Global values are always pointers.
LLVM_ABI bool isInterposable() const
Return true if this global's definition can be substituted with an arbitrary definition at link time ...
Definition Globals.cpp:107
bool hasComdat() const
bool hasCommonLinkage() const
bool hasGlobalUnnamedAddr() const
bool hasAppendingLinkage() const
bool hasAvailableExternallyLinkage() const
Type * getValueType() const
const Constant * getInitializer() const
getInitializer - Return the initializer for this global variable.
bool hasInitializer() const
Definitions have initializers, declarations don't.
MaybeAlign getAlign() const
Returns the alignment of the given variable.
bool isConstant() const
If the value is a global constant, its value is immutable throughout the runtime execution of the pro...
bool hasDefinitiveInitializer() const
hasDefinitiveInitializer - Whether the global variable has an initializer, and any other instances of...
This instruction compares its operands according to the predicate given to the constructor.
BasicBlock * getDestination(unsigned i)
Return the specified destination.
unsigned getNumDestinations() const
return the number of possible destinations in this indirectbr instruction.
unsigned getNumSuccessors() const
This instruction inserts a single (scalar) element into a VectorType value.
static LLVM_ABI bool isValidOperands(const Value *Vec, const Value *NewElt, const Value *Idx)
Return true if an insertelement instruction can be formed with the specified operands.
ArrayRef< unsigned > getIndices() const
Base class for instruction visitors.
Definition InstVisitor.h:78
void visit(Iterator Start, Iterator End)
Definition InstVisitor.h:87
LLVM_ABI unsigned getNumSuccessors() const LLVM_READONLY
Return the number of successors that this instruction has.
const DebugLoc & getDebugLoc() const
Return the debug location for this node as a DebugLoc.
LLVM_ABI bool isAtomic() const LLVM_READONLY
Return true if this instruction has an AtomicOrdering of unordered or higher.
LLVM_ABI const Function * getFunction() const
Return the function this instruction belongs to.
This class represents a cast from an integer to a pointer.
static LLVM_ABI bool mayLowerToFunctionCall(Intrinsic::ID IID)
Check if the intrinsic might lower into a regular function call in the course of IR transformations.
Intrinsic::ID getIntrinsicID() const
Return the intrinsic ID of this intrinsic.
This is an important class for using LLVM in a threaded context.
Definition LLVMContext.h:68
bool isCleanup() const
Return 'true' if this landingpad instruction is a cleanup.
unsigned getNumClauses() const
Get the number of clauses for this landing pad.
bool isCatch(unsigned Idx) const
Return 'true' if the clause and index Idx is a catch clause.
bool isFilter(unsigned Idx) const
Return 'true' if the clause and index Idx is a filter clause.
Constant * getClause(unsigned Idx) const
Get the value of the clause at index Idx.
AtomicOrdering getOrdering() const
Returns the ordering constraint of this load instruction.
SyncScope::ID getSyncScopeID() const
Returns the synchronization scope ID of this load instruction.
Align getAlign() const
Return the alignment of the access that is being performed.
Metadata node.
Definition Metadata.h:1078
const MDOperand & getOperand(unsigned I) const
Definition Metadata.h:1442
bool isTemporary() const
Definition Metadata.h:1262
ArrayRef< MDOperand > operands() const
Definition Metadata.h:1440
unsigned getNumOperands() const
Return number of MDNode operands.
Definition Metadata.h:1448
bool isDistinct() const
Definition Metadata.h:1261
bool isResolved() const
Check if node is fully resolved.
Definition Metadata.h:1258
LLVMContext & getContext() const
Definition Metadata.h:1242
bool equalsStr(StringRef Str) const
Definition Metadata.h:922
Metadata * get() const
Definition Metadata.h:929
LLVM_ABI StringRef getString() const
Definition Metadata.cpp:618
static LLVM_ABI bool isTagMD(const Metadata *MD)
This class implements a map that also provides access to all stored values in a deterministic order.
Definition MapVector.h:36
static LLVM_ABI MetadataAsValue * getIfExists(LLVMContext &Context, Metadata *MD)
Definition Metadata.cpp:112
Metadata * getMetadata() const
Definition Metadata.h:201
Root of the metadata hierarchy.
Definition Metadata.h:64
LLVM_ABI void print(raw_ostream &OS, const Module *M=nullptr, bool IsForDebug=false) const
Print.
unsigned getMetadataID() const
Definition Metadata.h:104
Manage lifetime of a slot tracker for printing IR.
A Module instance is used to store all the information related to an LLVM module.
Definition Module.h:67
LLVM_ABI StringRef getName() const
LLVM_ABI void print(raw_ostream &ROS, bool IsForDebug=false) const
LLVM_ABI unsigned getNumOperands() const
iterator_range< op_iterator > operands()
Definition Metadata.h:1853
op_range incoming_values()
static LLVM_ABI PassRegistry * getPassRegistry()
getPassRegistry - Access the global registry object, which is automatically initialized at applicatio...
A set of analyses that are preserved following a run of a transformation pass.
Definition Analysis.h:112
static PreservedAnalyses all()
Construct a special preserved set that preserves all passes.
Definition Analysis.h:118
This class represents a cast from a pointer to an address (non-capturing ptrtoint).
This class represents a cast from a pointer to an integer.
Value * getValue() const
Convenience accessor.
This class represents a sign extension of integer types.
This class represents a cast from signed integer to floating point.
static LLVM_ABI const char * areInvalidOperands(Value *Cond, Value *True, Value *False)
Return a string if the specified operands are invalid for a select operation, otherwise return null.
This instruction constructs a fixed permutation of two input vectors.
static LLVM_ABI bool isValidOperands(const Value *V1, const Value *V2, const Value *Mask)
Return true if a shufflevector instruction can be formed with the specified operands.
static LLVM_ABI void getShuffleMask(const Constant *Mask, SmallVectorImpl< int > &Result)
Convert the input shuffle mask operand to a vector of integers.
A templated base class for SmallPtrSet which provides the typesafe interface that is common across al...
size_type count(ConstPtrType Ptr) const
count - Return 1 if the specified pointer is in the set, 0 otherwise.
void insert_range(Range &&R)
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
SmallPtrSet - This class implements a set which is optimized for holding SmallSize or less elements.
void reserve(size_type N)
iterator insert(iterator I, T &&Elt)
void resize(size_type N)
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
std::pair< StringRef, StringRef > split(char Separator) const
Split into two substrings around the first occurrence of a separator character.
Definition StringRef.h:702
static constexpr size_t npos
Definition StringRef.h:57
bool getAsInteger(unsigned Radix, T &Result) const
Parse the current string as an integer of the specified radix.
Definition StringRef.h:472
bool starts_with(StringRef Prefix) const
Check if this string starts with the given Prefix.
Definition StringRef.h:261
constexpr bool empty() const
empty - Check if the string is empty.
Definition StringRef.h:143
unsigned getNumElements() const
Random access to the elements.
LLVM_ABI Type * getTypeAtIndex(const Value *V) const
Given an index value into the type, return the type of the element.
Definition Type.cpp:718
LLVM_ABI bool isScalableTy(SmallPtrSetImpl< const Type * > &Visited) const
Returns true if this struct contains a scalable vector.
Definition Type.cpp:440
Verify that the TBAA Metadatas are valid.
Definition Verifier.h:40
LLVM_ABI bool visitTBAAMetadata(const Instruction *I, const MDNode *MD)
Visit an instruction, or a TBAA node itself as part of a metadata, and return true if it is valid,...
unsigned size() const
Triple - Helper class for working with autoconf configuration names.
Definition Triple.h:47
This class represents a truncation of integer types.
Twine - A lightweight data structure for efficiently representing the concatenation of temporary valu...
Definition Twine.h:82
The instances of the Type class are immutable: once they are created, they are never changed.
Definition Type.h:45
bool isVectorTy() const
True if this is an instance of VectorType.
Definition Type.h:273
LLVM_ABI bool containsNonGlobalTargetExtType(SmallPtrSetImpl< const Type * > &Visited) const
Return true if this type is or contains a target extension type that disallows being used as a global...
Definition Type.cpp:74
bool isArrayTy() const
True if this is an instance of ArrayType.
Definition Type.h:264
LLVM_ABI bool containsNonLocalTargetExtType(SmallPtrSetImpl< const Type * > &Visited) const
Return true if this type is or contains a target extension type that disallows being used as a local.
Definition Type.cpp:90
LLVM_ABI bool isScalableTy(SmallPtrSetImpl< const Type * > &Visited) const
Return true if this is a type whose size is a known multiple of vscale.
Definition Type.cpp:61
bool isLabelTy() const
Return true if this is 'label'.
Definition Type.h:228
bool isIntOrIntVectorTy() const
Return true if this is an integer type or a vector of integer types.
Definition Type.h:246
bool isPointerTy() const
True if this is an instance of PointerType.
Definition Type.h:267
bool isTokenLikeTy() const
Returns true if this is 'token' or a token-like target type.s.
Definition Type.cpp:1065
LLVM_ABI unsigned getPointerAddressSpace() const
Get the address space of this pointer or pointer vector type.
bool isSingleValueType() const
Return true if the type is a valid type for a register in codegen.
Definition Type.h:296
LLVM_ABI bool canLosslesslyBitCastTo(Type *Ty) const
Return true if this type could be converted with a lossless BitCast to type 'Ty'.
Definition Type.cpp:153
Type * getScalarType() const
If this is a vector type, return the element type, otherwise return 'this'.
Definition Type.h:352
bool isSized(SmallPtrSetImpl< Type * > *Visited=nullptr) const
Return true if it makes sense to take the size of this type.
Definition Type.h:311
LLVM_ABI unsigned getScalarSizeInBits() const LLVM_READONLY
If this is a vector type, return the getPrimitiveSizeInBits value for the element type.
Definition Type.cpp:230
bool isFloatingPointTy() const
Return true if this is one of the floating-point types.
Definition Type.h:184
bool isPtrOrPtrVectorTy() const
Return true if this is a pointer type or a vector of pointer types.
Definition Type.h:270
bool isIntOrPtrTy() const
Return true if this is an integer type or a pointer type.
Definition Type.h:255
bool isIntegerTy() const
True if this is an instance of IntegerType.
Definition Type.h:240
bool isFPOrFPVectorTy() const
Return true if this is a FP type or a vector of FP.
Definition Type.h:225
bool isVoidTy() const
Return true if this is 'void'.
Definition Type.h:139
bool isMetadataTy() const
Return true if this is 'metadata'.
Definition Type.h:231
This class represents a cast unsigned integer to floating point.
op_range operands()
Definition User.h:292
Value * getOperand(unsigned i) const
Definition User.h:232
unsigned getNumOperands() const
Definition User.h:254
This class represents the va_arg llvm instruction, which returns an argument of the specified type gi...
Value * getValue() const
Definition Metadata.h:498
LLVM Value Representation.
Definition Value.h:75
iterator_range< user_iterator > materialized_users()
Definition Value.h:420
Type * getType() const
All values are typed, get the type of this value.
Definition Value.h:256
LLVM_ABI const Value * stripInBoundsOffsets(function_ref< void(const Value *)> Func=[](const Value *) {}) const
Strip off pointer casts and inbounds GEPs.
Definition Value.cpp:812
iterator_range< user_iterator > users()
Definition Value.h:426
bool materialized_use_empty() const
Definition Value.h:351
LLVM_ABI const Value * stripPointerCasts() const
Strip off pointer casts, all-zero GEPs and address space casts.
Definition Value.cpp:701
LLVM_ABI LLVMContext & getContext() const
All values hold a context through their type.
Definition Value.cpp:1099
bool hasName() const
Definition Value.h:262
LLVM_ABI StringRef getName() const
Return a constant reference to the value's name.
Definition Value.cpp:322
Check a module for errors, and report separate error states for IR and debug info errors.
Definition Verifier.h:109
LLVM_ABI Result run(Module &M, ModuleAnalysisManager &)
LLVM_ABI PreservedAnalyses run(Module &M, ModuleAnalysisManager &AM)
This class represents zero extension of integer types.
constexpr bool isNonZero() const
Definition TypeSize.h:155
constexpr bool isScalable() const
Returns whether the quantity is scaled by a runtime quantity (vscale).
Definition TypeSize.h:168
constexpr ScalarTy getKnownMinValue() const
Returns the minimum value this quantity can represent.
Definition TypeSize.h:165
An efficient, type-erasing, non-owning reference to a callable.
const ParentTy * getParent() const
Definition ilist_node.h:34
NodeTy * getNextNode()
Get the next node, or nullptr for the list tail.
Definition ilist_node.h:348
This class implements an extremely fast bulk output stream that can only output to a stream.
Definition raw_ostream.h:53
CallInst * Call
This file contains the declaration of the Comdat class, which represents a single COMDAT in LLVM.
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
@ FLAT_ADDRESS
Address space for flat memory.
@ GLOBAL_ADDRESS
Address space for global memory (RAT0, VTX0).
@ PRIVATE_ADDRESS
Address space for private memory.
constexpr char Align[]
Key for Kernel::Arg::Metadata::mAlign.
constexpr char Args[]
Key for Kernel::Metadata::mArgs.
constexpr char Attrs[]
Key for Kernel::Metadata::mAttrs.
bool isFlatGlobalAddrSpace(unsigned AS)
constexpr std::underlying_type_t< E > Mask()
Get a bitmask with 1s in all places up to the high-order bit of E's largest value.
@ Entry
Definition COFF.h:862
unsigned ID
LLVM IR allows to use arbitrary numbers as calling convention identifiers.
Definition CallingConv.h:24
@ C
The default llvm calling convention, compatible with C.
Definition CallingConv.h:34
@ BasicBlock
Various leaf nodes.
Definition ISDOpcodes.h:81
LLVM_ABI MatchIntrinsicTypesResult matchIntrinsicSignature(FunctionType *FTy, ArrayRef< IITDescriptor > &Infos, SmallVectorImpl< Type * > &ArgTys)
Match the specified function type with the type constraints specified by the .td file.
LLVM_ABI void getIntrinsicInfoTableEntries(ID id, SmallVectorImpl< IITDescriptor > &T)
Return the IIT table descriptor for the specified intrinsic into an array of IITDescriptors.
@ MatchIntrinsicTypes_NoMatchRet
Definition Intrinsics.h:245
@ MatchIntrinsicTypes_NoMatchArg
Definition Intrinsics.h:246
LLVM_ABI bool hasConstrainedFPRoundingModeOperand(ID QID)
Returns true if the intrinsic ID is for one of the "ConstrainedFloating-Point Intrinsics" that take r...
LLVM_ABI StringRef getName(ID id)
Return the LLVM name for an intrinsic, such as "llvm.ppc.altivec.lvx".
static const int NoAliasScopeDeclScopeArg
Definition Intrinsics.h:41
LLVM_ABI bool matchIntrinsicVarArg(bool isVarArg, ArrayRef< IITDescriptor > &Infos)
Verify if the intrinsic has variable arguments.
std::variant< std::monostate, Loc::Single, Loc::Multi, Loc::MMI, Loc::EntryValue > Variant
Alias for the std::variant specialization base class of DbgVariable.
Definition DwarfDebug.h:189
Flag
These should be considered private to the implementation of the MCInstrDesc class.
@ System
Synchronized with respect to all concurrently executing threads.
Definition LLVMContext.h:58
LLVM_ABI std::optional< VFInfo > tryDemangleForVFABI(StringRef MangledName, const FunctionType *FTy)
Function to construct a VFInfo out of a mangled names in the following format:
@ CE
Windows NT (Windows on ARM)
Definition MCAsmInfo.h:48
LLVM_ABI AssignmentInstRange getAssignmentInsts(DIAssignID *ID)
Return a range of instructions (typically just one) that have ID as an attachment.
initializer< Ty > init(const Ty &Val)
@ DW_MACINFO_undef
Definition Dwarf.h:818
@ DW_MACINFO_start_file
Definition Dwarf.h:819
@ DW_MACINFO_define
Definition Dwarf.h:817
std::enable_if_t< detail::IsValidPointer< X, Y >::value, X * > dyn_extract_or_null(Y &&MD)
Extract a Value from Metadata, if any, allowing null.
Definition Metadata.h:708
std::enable_if_t< detail::IsValidPointer< X, Y >::value, X * > dyn_extract(Y &&MD)
Extract a Value from Metadata, if any.
Definition Metadata.h:695
std::enable_if_t< detail::IsValidPointer< X, Y >::value, X * > extract(Y &&MD)
Extract a Value from Metadata.
Definition Metadata.h:667
@ User
could "use" a pointer
NodeAddr< UseNode * > Use
Definition RDFGraph.h:385
NodeAddr< NodeBase * > Node
Definition RDFGraph.h:381
friend class Instruction
Iterator for Instructions in a `BasicBlock.
Definition BasicBlock.h:73
This is an optimization pass for GlobalISel generic memory operations.
auto drop_begin(T &&RangeOrContainer, size_t N=1)
Return a range covering RangeOrContainer with the first N elements excluded.
Definition STLExtras.h:316
@ Low
Lower the current thread's priority such that it does not affect foreground tasks significantly.
Definition Threading.h:280
@ Offset
Definition DWP.cpp:532
FunctionAddr VTableAddr Value
Definition InstrProf.h:137
bool all_of(R &&range, UnaryPredicate P)
Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly.
Definition STLExtras.h:1725
LLVM_ABI bool canInstructionHaveMMRAs(const Instruction &I)
detail::zippy< detail::zip_first, T, U, Args... > zip_equal(T &&t, U &&u, Args &&...args)
zip iterator that assumes that all iteratees have the same length.
Definition STLExtras.h:839
LLVM_ABI unsigned getBranchWeightOffset(const MDNode *ProfileData)
Return the offset to the first branch weight data.
constexpr bool isInt(int64_t x)
Checks if an integer fits into the given bit width.
Definition MathExtras.h:165
auto enumerate(FirstRange &&First, RestRanges &&...Rest)
Given two or more input ranges, returns a new range whose values are tuples (A, B,...
Definition STLExtras.h:2472
decltype(auto) dyn_cast(const From &Val)
dyn_cast<X> - Return the argument parameter cast to the specified type.
Definition Casting.h:643
LLVM_ABI bool verifyFunction(const Function &F, raw_ostream *OS=nullptr)
Check a function for errors, useful for use when debugging a pass.
AllocFnKind
Definition Attributes.h:51
testing::Matcher< const detail::ErrorHolder & > Failed()
Definition Error.h:198
iterator_range< T > make_range(T x, T y)
Convenience function for iterating over sub-ranges.
void append_range(Container &C, Range &&R)
Wrapper function to append range R to container C.
Definition STLExtras.h:2136
LLVM_ABI DenseMap< BasicBlock *, ColorVector > colorEHFunclets(Function &F)
If an EH funclet personality is in use (see isFuncletEHPersonality), this will recompute which blocks...
constexpr bool isPowerOf2_64(uint64_t Value)
Return true if the argument is a power of two > 0 (64 bit edition.)
Definition MathExtras.h:284
bool isa_and_nonnull(const Y &Val)
Definition Casting.h:676
Op::Description Desc
bool isScopedEHPersonality(EHPersonality Pers)
Returns true if this personality uses scope-style EH IR instructions: catchswitch,...
auto dyn_cast_or_null(const Y &Val)
Definition Casting.h:753
GenericConvergenceVerifier< SSAContext > ConvergenceVerifier
LLVM_ABI void initializeVerifierLegacyPassPass(PassRegistry &)
constexpr bool isPowerOf2_32(uint32_t Value)
Return true if the argument is a power of two > 0.
Definition MathExtras.h:279
bool isModSet(const ModRefInfo MRI)
Definition ModRef.h:49
void sort(IteratorTy Start, IteratorTy End)
Definition STLExtras.h:1622
LLVM_ABI raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
Definition Debug.cpp:207
LLVM_ABI void report_fatal_error(Error Err, bool gen_crash_diag=true)
Definition Error.cpp:167
FunctionAddr VTableAddr Count
Definition InstrProf.h:139
LLVM_ABI EHPersonality classifyEHPersonality(const Value *Pers)
See if the given exception handling personality function is one that we understand.
class LLVM_GSL_OWNER SmallVector
Forward declaration of SmallVector so that calculateSmallVectorDefaultInlinedElements can reference s...
bool isa(const From &Val)
isa<X> - Return true if the parameter to the template is an instance of one of the template type argu...
Definition Casting.h:547
LLVM_ABI bool isValueProfileMD(const MDNode *ProfileData)
Checks if an MDNode contains value profiling Metadata.
LLVM_ABI raw_fd_ostream & errs()
This returns a reference to a raw_ostream for standard error.
LLVM_ABI unsigned getNumBranchWeights(const MDNode &ProfileData)
AtomicOrdering
Atomic ordering for LLVM's memory model.
@ First
Helpers to iterate all locations in the MemoryEffectsBase class.
Definition ModRef.h:74
LLVM_ABI FunctionPass * createVerifierPass(bool FatalErrors=true)
FunctionAddr VTableAddr Next
Definition InstrProf.h:141
DWARFExpression::Operation Op
ArrayRef(const T &OneElt) -> ArrayRef< T >
constexpr unsigned BitWidth
TinyPtrVector< BasicBlock * > ColorVector
LLVM_ABI const char * LLVMLoopEstimatedTripCount
Profile-based loop metadata that should be accessed only by using llvm::getLoopEstimatedTripCount and...
DenormalMode parseDenormalFPAttribute(StringRef Str)
Returns the denormal mode to use for inputs and outputs.
decltype(auto) cast(const From &Val)
cast<X> - Return the argument parameter cast to the specified type.
Definition Casting.h:559
LLVM_ABI std::optional< RoundingMode > convertStrToRoundingMode(StringRef)
Returns a valid RoundingMode enumerator when given a string that is valid as input in constrained int...
Definition FPEnv.cpp:25
LLVM_ABI std::unique_ptr< GCStrategy > getGCStrategy(const StringRef Name)
Lookup the GCStrategy object associated with the given gc name.
auto predecessors(const MachineBasicBlock *BB)
bool is_contained(R &&Range, const E &Element)
Returns true if Element is found in Range.
Definition STLExtras.h:1897
bool pred_empty(const BasicBlock *BB)
Definition CFG.h:119
bool isHexDigit(char C)
Checks if character C is a hexadecimal numeric character.
AnalysisManager< Function > FunctionAnalysisManager
Convenience typedef for the Function analysis manager.
constexpr bool isCallableCC(CallingConv::ID CC)
LLVM_ABI bool verifyModule(const Module &M, raw_ostream *OS=nullptr, bool *BrokenDebugInfo=nullptr)
Check a module for errors.
AnalysisManager< Module > ModuleAnalysisManager
Convenience typedef for the Module analysis manager.
Definition MIRParser.h:39
#define N
constexpr uint64_t value() const
This is a hole in the type system and should not be abused.
Definition Alignment.h:77
A special type used by analysis passes to provide an address that identifies that particular analysis...
Definition Analysis.h:29
static LLVM_ABI const char * SyntheticFunctionEntryCount
static LLVM_ABI const char * UnknownBranchWeightsMarker
static LLVM_ABI const char * ValueProfile
static LLVM_ABI const char * FunctionEntryCount
static LLVM_ABI const char * BranchWeights
uint32_t getTagID() const
Return the tag of this operand bundle as an integer.
ArrayRef< Use > Inputs
void DebugInfoCheckFailed(const Twine &Message)
A debug info check failed.
Definition Verifier.cpp:302
VerifierSupport(raw_ostream *OS, const Module &M)
Definition Verifier.cpp:154
bool Broken
Track the brokenness of the module while recursively visiting.
Definition Verifier.cpp:148
void CheckFailed(const Twine &Message, const T1 &V1, const Ts &... Vs)
A check failed (with values to print).
Definition Verifier.cpp:295
bool BrokenDebugInfo
Broken debug info can be "recovered" from by stripping the debug info.
Definition Verifier.cpp:150
LLVMContext & Context
Definition Verifier.cpp:145
bool TreatBrokenDebugInfoAsError
Whether to treat broken debug info as an error.
Definition Verifier.cpp:152
void CheckFailed(const Twine &Message)
A check failed, so printout out the condition and the message.
Definition Verifier.cpp:284
const Module & M
Definition Verifier.cpp:141
const DataLayout & DL
Definition Verifier.cpp:144
void DebugInfoCheckFailed(const Twine &Message, const T1 &V1, const Ts &... Vs)
A debug info check failed (with values to print).
Definition Verifier.cpp:311
const Triple & TT
Definition Verifier.cpp:143
ModuleSlotTracker MST
Definition Verifier.cpp:142