LLVM 19.0.0git
Verifier.cpp
Go to the documentation of this file.
1//===-- Verifier.cpp - Implement the Module Verifier -----------------------==//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file defines the function verifier interface, that can be used for some
10// basic correctness checking of input to the system.
11//
12// Note that this does not provide full `Java style' security and verifications,
13// instead it just tries to ensure that code is well-formed.
14//
15// * Both of a binary operator's parameters are of the same type
16// * Verify that the indices of mem access instructions match other operands
17// * Verify that arithmetic and other things are only performed on first-class
18// types. Verify that shifts & logicals only happen on integrals f.e.
19// * All of the constants in a switch statement are of the correct type
20// * The code is in valid SSA form
21// * It should be illegal to put a label into any other type (like a structure)
22// or to return one. [except constant arrays!]
23// * Only phi nodes can be self referential: 'add i32 %0, %0 ; <int>:0' is bad
24// * PHI nodes must have an entry for each predecessor, with no extras.
25// * PHI nodes must be the first thing in a basic block, all grouped together
26// * All basic blocks should only end with terminator insts, not contain them
27// * The entry node to a function must not have predecessors
28// * All Instructions must be embedded into a basic block
29// * Functions cannot take a void-typed parameter
30// * Verify that a function's argument list agrees with it's declared type.
31// * It is illegal to specify a name for a void value.
32// * It is illegal to have a internal global value with no initializer
33// * It is illegal to have a ret instruction that returns a value that does not
34// agree with the function return value type.
35// * Function call argument types match the function prototype
36// * A landing pad is defined by a landingpad instruction, and can be jumped to
37// only by the unwind edge of an invoke instruction.
38// * A landingpad instruction must be the first non-PHI instruction in the
39// block.
40// * Landingpad instructions must be in a function with a personality function.
41// * Convergence control intrinsics are introduced in ConvergentOperations.rst.
42// The applied restrictions are too numerous to list here.
43// * The convergence entry intrinsic and the loop heart must be the first
44// non-PHI instruction in their respective block. This does not conflict with
45// the landing pads, since these two kinds cannot occur in the same block.
46// * All other things that are tested by asserts spread about the code...
47//
48//===----------------------------------------------------------------------===//
49
50#include "llvm/IR/Verifier.h"
51#include "llvm/ADT/APFloat.h"
52#include "llvm/ADT/APInt.h"
53#include "llvm/ADT/ArrayRef.h"
54#include "llvm/ADT/DenseMap.h"
55#include "llvm/ADT/MapVector.h"
57#include "llvm/ADT/STLExtras.h"
59#include "llvm/ADT/SmallSet.h"
62#include "llvm/ADT/StringMap.h"
63#include "llvm/ADT/StringRef.h"
64#include "llvm/ADT/Twine.h"
66#include "llvm/IR/Argument.h"
68#include "llvm/IR/Attributes.h"
69#include "llvm/IR/BasicBlock.h"
70#include "llvm/IR/CFG.h"
71#include "llvm/IR/CallingConv.h"
72#include "llvm/IR/Comdat.h"
73#include "llvm/IR/Constant.h"
76#include "llvm/IR/Constants.h"
78#include "llvm/IR/DataLayout.h"
79#include "llvm/IR/DebugInfo.h"
81#include "llvm/IR/DebugLoc.h"
83#include "llvm/IR/Dominators.h"
85#include "llvm/IR/Function.h"
86#include "llvm/IR/GCStrategy.h"
87#include "llvm/IR/GlobalAlias.h"
88#include "llvm/IR/GlobalValue.h"
90#include "llvm/IR/InlineAsm.h"
91#include "llvm/IR/InstVisitor.h"
92#include "llvm/IR/InstrTypes.h"
93#include "llvm/IR/Instruction.h"
96#include "llvm/IR/Intrinsics.h"
97#include "llvm/IR/IntrinsicsAArch64.h"
98#include "llvm/IR/IntrinsicsAMDGPU.h"
99#include "llvm/IR/IntrinsicsARM.h"
100#include "llvm/IR/IntrinsicsNVPTX.h"
101#include "llvm/IR/IntrinsicsWebAssembly.h"
102#include "llvm/IR/LLVMContext.h"
104#include "llvm/IR/Metadata.h"
105#include "llvm/IR/Module.h"
107#include "llvm/IR/PassManager.h"
109#include "llvm/IR/Statepoint.h"
110#include "llvm/IR/Type.h"
111#include "llvm/IR/Use.h"
112#include "llvm/IR/User.h"
114#include "llvm/IR/Value.h"
116#include "llvm/Pass.h"
118#include "llvm/Support/Casting.h"
122#include "llvm/Support/ModRef.h"
124#include <algorithm>
125#include <cassert>
126#include <cstdint>
127#include <memory>
128#include <optional>
129#include <string>
130#include <utility>
131
132using namespace llvm;
133
135 "verify-noalias-scope-decl-dom", cl::Hidden, cl::init(false),
136 cl::desc("Ensure that llvm.experimental.noalias.scope.decl for identical "
137 "scopes are not dominating"));
138
139namespace llvm {
140
143 const Module &M;
148
149 /// Track the brokenness of the module while recursively visiting.
150 bool Broken = false;
151 /// Broken debug info can be "recovered" from by stripping the debug info.
152 bool BrokenDebugInfo = false;
153 /// Whether to treat broken debug info as an error.
155
157 : OS(OS), M(M), MST(&M), TT(Triple::normalize(M.getTargetTriple())),
158 DL(M.getDataLayout()), Context(M.getContext()) {}
159
160private:
161 void Write(const Module *M) {
162 *OS << "; ModuleID = '" << M->getModuleIdentifier() << "'\n";
163 }
164
165 void Write(const Value *V) {
166 if (V)
167 Write(*V);
168 }
169
170 void Write(const Value &V) {
171 if (isa<Instruction>(V)) {
172 V.print(*OS, MST);
173 *OS << '\n';
174 } else {
175 V.printAsOperand(*OS, true, MST);
176 *OS << '\n';
177 }
178 }
179
180 void Write(const DbgRecord *DR) {
181 if (DR) {
182 DR->print(*OS, MST, false);
183 *OS << '\n';
184 }
185 }
186
188 switch (Type) {
190 *OS << "value";
191 break;
193 *OS << "declare";
194 break;
196 *OS << "assign";
197 break;
199 *OS << "end";
200 break;
202 *OS << "any";
203 break;
204 };
205 }
206
207 void Write(const Metadata *MD) {
208 if (!MD)
209 return;
210 MD->print(*OS, MST, &M);
211 *OS << '\n';
212 }
213
214 template <class T> void Write(const MDTupleTypedArrayWrapper<T> &MD) {
215 Write(MD.get());
216 }
217
218 void Write(const NamedMDNode *NMD) {
219 if (!NMD)
220 return;
221 NMD->print(*OS, MST);
222 *OS << '\n';
223 }
224
225 void Write(Type *T) {
226 if (!T)
227 return;
228 *OS << ' ' << *T;
229 }
230
231 void Write(const Comdat *C) {
232 if (!C)
233 return;
234 *OS << *C;
235 }
236
237 void Write(const APInt *AI) {
238 if (!AI)
239 return;
240 *OS << *AI << '\n';
241 }
242
243 void Write(const unsigned i) { *OS << i << '\n'; }
244
245 // NOLINTNEXTLINE(readability-identifier-naming)
246 void Write(const Attribute *A) {
247 if (!A)
248 return;
249 *OS << A->getAsString() << '\n';
250 }
251
252 // NOLINTNEXTLINE(readability-identifier-naming)
253 void Write(const AttributeSet *AS) {
254 if (!AS)
255 return;
256 *OS << AS->getAsString() << '\n';
257 }
258
259 // NOLINTNEXTLINE(readability-identifier-naming)
260 void Write(const AttributeList *AL) {
261 if (!AL)
262 return;
263 AL->print(*OS);
264 }
265
266 void Write(Printable P) { *OS << P << '\n'; }
267
268 template <typename T> void Write(ArrayRef<T> Vs) {
269 for (const T &V : Vs)
270 Write(V);
271 }
272
273 template <typename T1, typename... Ts>
274 void WriteTs(const T1 &V1, const Ts &... Vs) {
275 Write(V1);
276 WriteTs(Vs...);
277 }
278
279 template <typename... Ts> void WriteTs() {}
280
281public:
282 /// A check failed, so printout out the condition and the message.
283 ///
284 /// This provides a nice place to put a breakpoint if you want to see why
285 /// something is not correct.
286 void CheckFailed(const Twine &Message) {
287 if (OS)
288 *OS << Message << '\n';
289 Broken = true;
290 }
291
292 /// A check failed (with values to print).
293 ///
294 /// This calls the Message-only version so that the above is easier to set a
295 /// breakpoint on.
296 template <typename T1, typename... Ts>
297 void CheckFailed(const Twine &Message, const T1 &V1, const Ts &... Vs) {
298 CheckFailed(Message);
299 if (OS)
300 WriteTs(V1, Vs...);
301 }
302
303 /// A debug info check failed.
304 void DebugInfoCheckFailed(const Twine &Message) {
305 if (OS)
306 *OS << Message << '\n';
308 BrokenDebugInfo = true;
309 }
310
311 /// A debug info check failed (with values to print).
312 template <typename T1, typename... Ts>
313 void DebugInfoCheckFailed(const Twine &Message, const T1 &V1,
314 const Ts &... Vs) {
315 DebugInfoCheckFailed(Message);
316 if (OS)
317 WriteTs(V1, Vs...);
318 }
319};
320
321} // namespace llvm
322
323namespace {
324
325class Verifier : public InstVisitor<Verifier>, VerifierSupport {
326 friend class InstVisitor<Verifier>;
327
328 // ISD::ArgFlagsTy::MemAlign only have 4 bits for alignment, so
329 // the alignment size should not exceed 2^15. Since encode(Align)
330 // would plus the shift value by 1, the alignment size should
331 // not exceed 2^14, otherwise it can NOT be properly lowered
332 // in backend.
333 static constexpr unsigned ParamMaxAlignment = 1 << 14;
334 DominatorTree DT;
335
336 /// When verifying a basic block, keep track of all of the
337 /// instructions we have seen so far.
338 ///
339 /// This allows us to do efficient dominance checks for the case when an
340 /// instruction has an operand that is an instruction in the same block.
341 SmallPtrSet<Instruction *, 16> InstsInThisBlock;
342
343 /// Keep track of the metadata nodes that have been checked already.
345
346 /// Keep track which DISubprogram is attached to which function.
348
349 /// Track all DICompileUnits visited.
351
352 /// The result type for a landingpad.
353 Type *LandingPadResultTy;
354
355 /// Whether we've seen a call to @llvm.localescape in this function
356 /// already.
357 bool SawFrameEscape;
358
359 /// Whether the current function has a DISubprogram attached to it.
360 bool HasDebugInfo = false;
361
362 /// The current source language.
364
365 /// Stores the count of how many objects were passed to llvm.localescape for a
366 /// given function and the largest index passed to llvm.localrecover.
368
369 // Maps catchswitches and cleanuppads that unwind to siblings to the
370 // terminators that indicate the unwind, used to detect cycles therein.
372
373 /// Cache which blocks are in which funclet, if an EH funclet personality is
374 /// in use. Otherwise empty.
375 DenseMap<BasicBlock *, ColorVector> BlockEHFuncletColors;
376
377 /// Cache of constants visited in search of ConstantExprs.
378 SmallPtrSet<const Constant *, 32> ConstantExprVisited;
379
380 /// Cache of declarations of the llvm.experimental.deoptimize.<ty> intrinsic.
381 SmallVector<const Function *, 4> DeoptimizeDeclarations;
382
383 /// Cache of attribute lists verified.
384 SmallPtrSet<const void *, 32> AttributeListsVisited;
385
386 // Verify that this GlobalValue is only used in this module.
387 // This map is used to avoid visiting uses twice. We can arrive at a user
388 // twice, if they have multiple operands. In particular for very large
389 // constant expressions, we can arrive at a particular user many times.
390 SmallPtrSet<const Value *, 32> GlobalValueVisited;
391
392 // Keeps track of duplicate function argument debug info.
394
395 TBAAVerifier TBAAVerifyHelper;
396 ConvergenceVerifier ConvergenceVerifyHelper;
397
398 SmallVector<IntrinsicInst *, 4> NoAliasScopeDecls;
399
400 void checkAtomicMemAccessSize(Type *Ty, const Instruction *I);
401
402public:
403 explicit Verifier(raw_ostream *OS, bool ShouldTreatBrokenDebugInfoAsError,
404 const Module &M)
405 : VerifierSupport(OS, M), LandingPadResultTy(nullptr),
406 SawFrameEscape(false), TBAAVerifyHelper(this) {
407 TreatBrokenDebugInfoAsError = ShouldTreatBrokenDebugInfoAsError;
408 }
409
410 bool hasBrokenDebugInfo() const { return BrokenDebugInfo; }
411
412 bool verify(const Function &F) {
413 assert(F.getParent() == &M &&
414 "An instance of this class only works with a specific module!");
415
416 // First ensure the function is well-enough formed to compute dominance
417 // information, and directly compute a dominance tree. We don't rely on the
418 // pass manager to provide this as it isolates us from a potentially
419 // out-of-date dominator tree and makes it significantly more complex to run
420 // this code outside of a pass manager.
421 // FIXME: It's really gross that we have to cast away constness here.
422 if (!F.empty())
423 DT.recalculate(const_cast<Function &>(F));
424
425 for (const BasicBlock &BB : F) {
426 if (!BB.empty() && BB.back().isTerminator())
427 continue;
428
429 if (OS) {
430 *OS << "Basic Block in function '" << F.getName()
431 << "' does not have terminator!\n";
432 BB.printAsOperand(*OS, true, MST);
433 *OS << "\n";
434 }
435 return false;
436 }
437
438 auto FailureCB = [this](const Twine &Message) {
439 this->CheckFailed(Message);
440 };
441 ConvergenceVerifyHelper.initialize(OS, FailureCB, F);
442
443 Broken = false;
444 // FIXME: We strip const here because the inst visitor strips const.
445 visit(const_cast<Function &>(F));
446 verifySiblingFuncletUnwinds();
447
448 if (ConvergenceVerifyHelper.sawTokens())
449 ConvergenceVerifyHelper.verify(DT);
450
451 InstsInThisBlock.clear();
452 DebugFnArgs.clear();
453 LandingPadResultTy = nullptr;
454 SawFrameEscape = false;
455 SiblingFuncletInfo.clear();
456 verifyNoAliasScopeDecl();
457 NoAliasScopeDecls.clear();
458
459 return !Broken;
460 }
461
462 /// Verify the module that this instance of \c Verifier was initialized with.
463 bool verify() {
464 Broken = false;
465
466 // Collect all declarations of the llvm.experimental.deoptimize intrinsic.
467 for (const Function &F : M)
468 if (F.getIntrinsicID() == Intrinsic::experimental_deoptimize)
469 DeoptimizeDeclarations.push_back(&F);
470
471 // Now that we've visited every function, verify that we never asked to
472 // recover a frame index that wasn't escaped.
473 verifyFrameRecoverIndices();
474 for (const GlobalVariable &GV : M.globals())
475 visitGlobalVariable(GV);
476
477 for (const GlobalAlias &GA : M.aliases())
478 visitGlobalAlias(GA);
479
480 for (const GlobalIFunc &GI : M.ifuncs())
481 visitGlobalIFunc(GI);
482
483 for (const NamedMDNode &NMD : M.named_metadata())
484 visitNamedMDNode(NMD);
485
486 for (const StringMapEntry<Comdat> &SMEC : M.getComdatSymbolTable())
487 visitComdat(SMEC.getValue());
488
489 visitModuleFlags();
490 visitModuleIdents();
491 visitModuleCommandLines();
492
493 verifyCompileUnits();
494
495 verifyDeoptimizeCallingConvs();
496 DISubprogramAttachments.clear();
497 return !Broken;
498 }
499
500private:
501 /// Whether a metadata node is allowed to be, or contain, a DILocation.
502 enum class AreDebugLocsAllowed { No, Yes };
503
504 // Verification methods...
505 void visitGlobalValue(const GlobalValue &GV);
506 void visitGlobalVariable(const GlobalVariable &GV);
507 void visitGlobalAlias(const GlobalAlias &GA);
508 void visitGlobalIFunc(const GlobalIFunc &GI);
509 void visitAliaseeSubExpr(const GlobalAlias &A, const Constant &C);
510 void visitAliaseeSubExpr(SmallPtrSetImpl<const GlobalAlias *> &Visited,
511 const GlobalAlias &A, const Constant &C);
512 void visitNamedMDNode(const NamedMDNode &NMD);
513 void visitMDNode(const MDNode &MD, AreDebugLocsAllowed AllowLocs);
514 void visitMetadataAsValue(const MetadataAsValue &MD, Function *F);
515 void visitValueAsMetadata(const ValueAsMetadata &MD, Function *F);
516 void visitDIArgList(const DIArgList &AL, Function *F);
517 void visitComdat(const Comdat &C);
518 void visitModuleIdents();
519 void visitModuleCommandLines();
520 void visitModuleFlags();
521 void visitModuleFlag(const MDNode *Op,
523 SmallVectorImpl<const MDNode *> &Requirements);
524 void visitModuleFlagCGProfileEntry(const MDOperand &MDO);
525 void visitFunction(const Function &F);
526 void visitBasicBlock(BasicBlock &BB);
527 void verifyRangeMetadata(const Value &V, const MDNode *Range, Type *Ty,
528 bool IsAbsoluteSymbol);
529 void visitRangeMetadata(Instruction &I, MDNode *Range, Type *Ty);
530 void visitDereferenceableMetadata(Instruction &I, MDNode *MD);
531 void visitProfMetadata(Instruction &I, MDNode *MD);
532 void visitCallStackMetadata(MDNode *MD);
533 void visitMemProfMetadata(Instruction &I, MDNode *MD);
534 void visitCallsiteMetadata(Instruction &I, MDNode *MD);
535 void visitDIAssignIDMetadata(Instruction &I, MDNode *MD);
536 void visitMMRAMetadata(Instruction &I, MDNode *MD);
537 void visitAnnotationMetadata(MDNode *Annotation);
538 void visitAliasScopeMetadata(const MDNode *MD);
539 void visitAliasScopeListMetadata(const MDNode *MD);
540 void visitAccessGroupMetadata(const MDNode *MD);
541
542 template <class Ty> bool isValidMetadataArray(const MDTuple &N);
543#define HANDLE_SPECIALIZED_MDNODE_LEAF(CLASS) void visit##CLASS(const CLASS &N);
544#include "llvm/IR/Metadata.def"
545 void visitDIScope(const DIScope &N);
546 void visitDIVariable(const DIVariable &N);
547 void visitDILexicalBlockBase(const DILexicalBlockBase &N);
548 void visitDITemplateParameter(const DITemplateParameter &N);
549
550 void visitTemplateParams(const MDNode &N, const Metadata &RawParams);
551
552 void visit(DbgLabelRecord &DLR);
553 void visit(DbgVariableRecord &DVR);
554 // InstVisitor overrides...
556 void visitDbgRecords(Instruction &I);
557 void visit(Instruction &I);
558
559 void visitTruncInst(TruncInst &I);
560 void visitZExtInst(ZExtInst &I);
561 void visitSExtInst(SExtInst &I);
562 void visitFPTruncInst(FPTruncInst &I);
563 void visitFPExtInst(FPExtInst &I);
564 void visitFPToUIInst(FPToUIInst &I);
565 void visitFPToSIInst(FPToSIInst &I);
566 void visitUIToFPInst(UIToFPInst &I);
567 void visitSIToFPInst(SIToFPInst &I);
568 void visitIntToPtrInst(IntToPtrInst &I);
569 void visitPtrToIntInst(PtrToIntInst &I);
570 void visitBitCastInst(BitCastInst &I);
571 void visitAddrSpaceCastInst(AddrSpaceCastInst &I);
572 void visitPHINode(PHINode &PN);
573 void visitCallBase(CallBase &Call);
574 void visitUnaryOperator(UnaryOperator &U);
575 void visitBinaryOperator(BinaryOperator &B);
576 void visitICmpInst(ICmpInst &IC);
577 void visitFCmpInst(FCmpInst &FC);
578 void visitExtractElementInst(ExtractElementInst &EI);
579 void visitInsertElementInst(InsertElementInst &EI);
580 void visitShuffleVectorInst(ShuffleVectorInst &EI);
581 void visitVAArgInst(VAArgInst &VAA) { visitInstruction(VAA); }
582 void visitCallInst(CallInst &CI);
583 void visitInvokeInst(InvokeInst &II);
584 void visitGetElementPtrInst(GetElementPtrInst &GEP);
585 void visitLoadInst(LoadInst &LI);
586 void visitStoreInst(StoreInst &SI);
587 void verifyDominatesUse(Instruction &I, unsigned i);
588 void visitInstruction(Instruction &I);
589 void visitTerminator(Instruction &I);
590 void visitBranchInst(BranchInst &BI);
591 void visitReturnInst(ReturnInst &RI);
592 void visitSwitchInst(SwitchInst &SI);
593 void visitIndirectBrInst(IndirectBrInst &BI);
594 void visitCallBrInst(CallBrInst &CBI);
595 void visitSelectInst(SelectInst &SI);
596 void visitUserOp1(Instruction &I);
597 void visitUserOp2(Instruction &I) { visitUserOp1(I); }
598 void visitIntrinsicCall(Intrinsic::ID ID, CallBase &Call);
599 void visitConstrainedFPIntrinsic(ConstrainedFPIntrinsic &FPI);
600 void visitVPIntrinsic(VPIntrinsic &VPI);
601 void visitDbgIntrinsic(StringRef Kind, DbgVariableIntrinsic &DII);
602 void visitDbgLabelIntrinsic(StringRef Kind, DbgLabelInst &DLI);
603 void visitAtomicCmpXchgInst(AtomicCmpXchgInst &CXI);
604 void visitAtomicRMWInst(AtomicRMWInst &RMWI);
605 void visitFenceInst(FenceInst &FI);
606 void visitAllocaInst(AllocaInst &AI);
607 void visitExtractValueInst(ExtractValueInst &EVI);
608 void visitInsertValueInst(InsertValueInst &IVI);
609 void visitEHPadPredecessors(Instruction &I);
610 void visitLandingPadInst(LandingPadInst &LPI);
611 void visitResumeInst(ResumeInst &RI);
612 void visitCatchPadInst(CatchPadInst &CPI);
613 void visitCatchReturnInst(CatchReturnInst &CatchReturn);
614 void visitCleanupPadInst(CleanupPadInst &CPI);
615 void visitFuncletPadInst(FuncletPadInst &FPI);
616 void visitCatchSwitchInst(CatchSwitchInst &CatchSwitch);
617 void visitCleanupReturnInst(CleanupReturnInst &CRI);
618
619 void verifySwiftErrorCall(CallBase &Call, const Value *SwiftErrorVal);
620 void verifySwiftErrorValue(const Value *SwiftErrorVal);
621 void verifyTailCCMustTailAttrs(const AttrBuilder &Attrs, StringRef Context);
622 void verifyMustTailCall(CallInst &CI);
623 bool verifyAttributeCount(AttributeList Attrs, unsigned Params);
624 void verifyAttributeTypes(AttributeSet Attrs, const Value *V);
625 void verifyParameterAttrs(AttributeSet Attrs, Type *Ty, const Value *V);
626 void checkUnsignedBaseTenFuncAttr(AttributeList Attrs, StringRef Attr,
627 const Value *V);
628 void verifyFunctionAttrs(FunctionType *FT, AttributeList Attrs,
629 const Value *V, bool IsIntrinsic, bool IsInlineAsm);
630 void verifyFunctionMetadata(ArrayRef<std::pair<unsigned, MDNode *>> MDs);
631
632 void visitConstantExprsRecursively(const Constant *EntryC);
633 void visitConstantExpr(const ConstantExpr *CE);
634 void visitConstantPtrAuth(const ConstantPtrAuth *CPA);
635 void verifyInlineAsmCall(const CallBase &Call);
636 void verifyStatepoint(const CallBase &Call);
637 void verifyFrameRecoverIndices();
638 void verifySiblingFuncletUnwinds();
639
640 void verifyFragmentExpression(const DbgVariableIntrinsic &I);
641 void verifyFragmentExpression(const DbgVariableRecord &I);
642 template <typename ValueOrMetadata>
643 void verifyFragmentExpression(const DIVariable &V,
645 ValueOrMetadata *Desc);
646 void verifyFnArgs(const DbgVariableIntrinsic &I);
647 void verifyFnArgs(const DbgVariableRecord &DVR);
648 void verifyNotEntryValue(const DbgVariableIntrinsic &I);
649 void verifyNotEntryValue(const DbgVariableRecord &I);
650
651 /// Module-level debug info verification...
652 void verifyCompileUnits();
653
654 /// Module-level verification that all @llvm.experimental.deoptimize
655 /// declarations share the same calling convention.
656 void verifyDeoptimizeCallingConvs();
657
658 void verifyAttachedCallBundle(const CallBase &Call,
659 const OperandBundleUse &BU);
660
661 /// Verify the llvm.experimental.noalias.scope.decl declarations
662 void verifyNoAliasScopeDecl();
663};
664
665} // end anonymous namespace
666
667/// We know that cond should be true, if not print an error message.
668#define Check(C, ...) \
669 do { \
670 if (!(C)) { \
671 CheckFailed(__VA_ARGS__); \
672 return; \
673 } \
674 } while (false)
675
676/// We know that a debug info condition should be true, if not print
677/// an error message.
678#define CheckDI(C, ...) \
679 do { \
680 if (!(C)) { \
681 DebugInfoCheckFailed(__VA_ARGS__); \
682 return; \
683 } \
684 } while (false)
685
686void Verifier::visitDbgRecords(Instruction &I) {
687 if (!I.DebugMarker)
688 return;
689 CheckDI(I.DebugMarker->MarkedInstr == &I,
690 "Instruction has invalid DebugMarker", &I);
691 CheckDI(!isa<PHINode>(&I) || !I.hasDbgRecords(),
692 "PHI Node must not have any attached DbgRecords", &I);
693 for (DbgRecord &DR : I.getDbgRecordRange()) {
694 CheckDI(DR.getMarker() == I.DebugMarker,
695 "DbgRecord had invalid DebugMarker", &I, &DR);
696 if (auto *Loc =
697 dyn_cast_or_null<DILocation>(DR.getDebugLoc().getAsMDNode()))
698 visitMDNode(*Loc, AreDebugLocsAllowed::Yes);
699 if (auto *DVR = dyn_cast<DbgVariableRecord>(&DR)) {
700 visit(*DVR);
701 // These have to appear after `visit` for consistency with existing
702 // intrinsic behaviour.
703 verifyFragmentExpression(*DVR);
704 verifyNotEntryValue(*DVR);
705 } else if (auto *DLR = dyn_cast<DbgLabelRecord>(&DR)) {
706 visit(*DLR);
707 }
708 }
709}
710
711void Verifier::visit(Instruction &I) {
712 visitDbgRecords(I);
713 for (unsigned i = 0, e = I.getNumOperands(); i != e; ++i)
714 Check(I.getOperand(i) != nullptr, "Operand is null", &I);
716}
717
718// Helper to iterate over indirect users. By returning false, the callback can ask to stop traversing further.
719static void forEachUser(const Value *User,
721 llvm::function_ref<bool(const Value *)> Callback) {
722 if (!Visited.insert(User).second)
723 return;
724
727 while (!WorkList.empty()) {
728 const Value *Cur = WorkList.pop_back_val();
729 if (!Visited.insert(Cur).second)
730 continue;
731 if (Callback(Cur))
732 append_range(WorkList, Cur->materialized_users());
733 }
734}
735
736void Verifier::visitGlobalValue(const GlobalValue &GV) {
738 "Global is external, but doesn't have external or weak linkage!", &GV);
739
740 if (const GlobalObject *GO = dyn_cast<GlobalObject>(&GV)) {
741
742 if (MaybeAlign A = GO->getAlign()) {
743 Check(A->value() <= Value::MaximumAlignment,
744 "huge alignment values are unsupported", GO);
745 }
746
747 if (const MDNode *Associated =
748 GO->getMetadata(LLVMContext::MD_associated)) {
749 Check(Associated->getNumOperands() == 1,
750 "associated metadata must have one operand", &GV, Associated);
751 const Metadata *Op = Associated->getOperand(0).get();
752 Check(Op, "associated metadata must have a global value", GO, Associated);
753
754 const auto *VM = dyn_cast_or_null<ValueAsMetadata>(Op);
755 Check(VM, "associated metadata must be ValueAsMetadata", GO, Associated);
756 if (VM) {
757 Check(isa<PointerType>(VM->getValue()->getType()),
758 "associated value must be pointer typed", GV, Associated);
759
760 const Value *Stripped = VM->getValue()->stripPointerCastsAndAliases();
761 Check(isa<GlobalObject>(Stripped) || isa<Constant>(Stripped),
762 "associated metadata must point to a GlobalObject", GO, Stripped);
763 Check(Stripped != GO,
764 "global values should not associate to themselves", GO,
765 Associated);
766 }
767 }
768
769 // FIXME: Why is getMetadata on GlobalValue protected?
770 if (const MDNode *AbsoluteSymbol =
771 GO->getMetadata(LLVMContext::MD_absolute_symbol)) {
772 verifyRangeMetadata(*GO, AbsoluteSymbol, DL.getIntPtrType(GO->getType()),
773 true);
774 }
775 }
776
777 Check(!GV.hasAppendingLinkage() || isa<GlobalVariable>(GV),
778 "Only global variables can have appending linkage!", &GV);
779
780 if (GV.hasAppendingLinkage()) {
781 const GlobalVariable *GVar = dyn_cast<GlobalVariable>(&GV);
782 Check(GVar && GVar->getValueType()->isArrayTy(),
783 "Only global arrays can have appending linkage!", GVar);
784 }
785
786 if (GV.isDeclarationForLinker())
787 Check(!GV.hasComdat(), "Declaration may not be in a Comdat!", &GV);
788
789 if (GV.hasDLLExportStorageClass()) {
791 "dllexport GlobalValue must have default or protected visibility",
792 &GV);
793 }
794 if (GV.hasDLLImportStorageClass()) {
796 "dllimport GlobalValue must have default visibility", &GV);
797 Check(!GV.isDSOLocal(), "GlobalValue with DLLImport Storage is dso_local!",
798 &GV);
799
800 Check((GV.isDeclaration() &&
803 "Global is marked as dllimport, but not external", &GV);
804 }
805
806 if (GV.isImplicitDSOLocal())
807 Check(GV.isDSOLocal(),
808 "GlobalValue with local linkage or non-default "
809 "visibility must be dso_local!",
810 &GV);
811
812 forEachUser(&GV, GlobalValueVisited, [&](const Value *V) -> bool {
813 if (const Instruction *I = dyn_cast<Instruction>(V)) {
814 if (!I->getParent() || !I->getParent()->getParent())
815 CheckFailed("Global is referenced by parentless instruction!", &GV, &M,
816 I);
817 else if (I->getParent()->getParent()->getParent() != &M)
818 CheckFailed("Global is referenced in a different module!", &GV, &M, I,
819 I->getParent()->getParent(),
820 I->getParent()->getParent()->getParent());
821 return false;
822 } else if (const Function *F = dyn_cast<Function>(V)) {
823 if (F->getParent() != &M)
824 CheckFailed("Global is used by function in a different module", &GV, &M,
825 F, F->getParent());
826 return false;
827 }
828 return true;
829 });
830}
831
832void Verifier::visitGlobalVariable(const GlobalVariable &GV) {
833 if (GV.hasInitializer()) {
835 "Global variable initializer type does not match global "
836 "variable type!",
837 &GV);
838 // If the global has common linkage, it must have a zero initializer and
839 // cannot be constant.
840 if (GV.hasCommonLinkage()) {
842 "'common' global must have a zero initializer!", &GV);
843 Check(!GV.isConstant(), "'common' global may not be marked constant!",
844 &GV);
845 Check(!GV.hasComdat(), "'common' global may not be in a Comdat!", &GV);
846 }
847 }
848
849 if (GV.hasName() && (GV.getName() == "llvm.global_ctors" ||
850 GV.getName() == "llvm.global_dtors")) {
852 "invalid linkage for intrinsic global variable", &GV);
854 "invalid uses of intrinsic global variable", &GV);
855
856 // Don't worry about emitting an error for it not being an array,
857 // visitGlobalValue will complain on appending non-array.
858 if (ArrayType *ATy = dyn_cast<ArrayType>(GV.getValueType())) {
859 StructType *STy = dyn_cast<StructType>(ATy->getElementType());
860 PointerType *FuncPtrTy =
861 PointerType::get(Context, DL.getProgramAddressSpace());
862 Check(STy && (STy->getNumElements() == 2 || STy->getNumElements() == 3) &&
863 STy->getTypeAtIndex(0u)->isIntegerTy(32) &&
864 STy->getTypeAtIndex(1) == FuncPtrTy,
865 "wrong type for intrinsic global variable", &GV);
866 Check(STy->getNumElements() == 3,
867 "the third field of the element type is mandatory, "
868 "specify ptr null to migrate from the obsoleted 2-field form");
869 Type *ETy = STy->getTypeAtIndex(2);
870 Check(ETy->isPointerTy(), "wrong type for intrinsic global variable",
871 &GV);
872 }
873 }
874
875 if (GV.hasName() && (GV.getName() == "llvm.used" ||
876 GV.getName() == "llvm.compiler.used")) {
878 "invalid linkage for intrinsic global variable", &GV);
880 "invalid uses of intrinsic global variable", &GV);
881
882 Type *GVType = GV.getValueType();
883 if (ArrayType *ATy = dyn_cast<ArrayType>(GVType)) {
884 PointerType *PTy = dyn_cast<PointerType>(ATy->getElementType());
885 Check(PTy, "wrong type for intrinsic global variable", &GV);
886 if (GV.hasInitializer()) {
887 const Constant *Init = GV.getInitializer();
888 const ConstantArray *InitArray = dyn_cast<ConstantArray>(Init);
889 Check(InitArray, "wrong initalizer for intrinsic global variable",
890 Init);
891 for (Value *Op : InitArray->operands()) {
892 Value *V = Op->stripPointerCasts();
893 Check(isa<GlobalVariable>(V) || isa<Function>(V) ||
894 isa<GlobalAlias>(V),
895 Twine("invalid ") + GV.getName() + " member", V);
896 Check(V->hasName(),
897 Twine("members of ") + GV.getName() + " must be named", V);
898 }
899 }
900 }
901 }
902
903 // Visit any debug info attachments.
905 GV.getMetadata(LLVMContext::MD_dbg, MDs);
906 for (auto *MD : MDs) {
907 if (auto *GVE = dyn_cast<DIGlobalVariableExpression>(MD))
908 visitDIGlobalVariableExpression(*GVE);
909 else
910 CheckDI(false, "!dbg attachment of global variable must be a "
911 "DIGlobalVariableExpression");
912 }
913
914 // Scalable vectors cannot be global variables, since we don't know
915 // the runtime size.
917 "Globals cannot contain scalable types", &GV);
918
919 // Check if it's a target extension type that disallows being used as a
920 // global.
921 if (auto *TTy = dyn_cast<TargetExtType>(GV.getValueType()))
922 Check(TTy->hasProperty(TargetExtType::CanBeGlobal),
923 "Global @" + GV.getName() + " has illegal target extension type",
924 TTy);
925
926 if (!GV.hasInitializer()) {
927 visitGlobalValue(GV);
928 return;
929 }
930
931 // Walk any aggregate initializers looking for bitcasts between address spaces
932 visitConstantExprsRecursively(GV.getInitializer());
933
934 visitGlobalValue(GV);
935}
936
937void Verifier::visitAliaseeSubExpr(const GlobalAlias &GA, const Constant &C) {
939 Visited.insert(&GA);
940 visitAliaseeSubExpr(Visited, GA, C);
941}
942
943void Verifier::visitAliaseeSubExpr(SmallPtrSetImpl<const GlobalAlias*> &Visited,
944 const GlobalAlias &GA, const Constant &C) {
946 Check(isa<GlobalValue>(C) &&
947 cast<GlobalValue>(C).hasAvailableExternallyLinkage(),
948 "available_externally alias must point to available_externally "
949 "global value",
950 &GA);
951 }
952 if (const auto *GV = dyn_cast<GlobalValue>(&C)) {
954 Check(!GV->isDeclarationForLinker(), "Alias must point to a definition",
955 &GA);
956 }
957
958 if (const auto *GA2 = dyn_cast<GlobalAlias>(GV)) {
959 Check(Visited.insert(GA2).second, "Aliases cannot form a cycle", &GA);
960
961 Check(!GA2->isInterposable(),
962 "Alias cannot point to an interposable alias", &GA);
963 } else {
964 // Only continue verifying subexpressions of GlobalAliases.
965 // Do not recurse into global initializers.
966 return;
967 }
968 }
969
970 if (const auto *CE = dyn_cast<ConstantExpr>(&C))
971 visitConstantExprsRecursively(CE);
972
973 for (const Use &U : C.operands()) {
974 Value *V = &*U;
975 if (const auto *GA2 = dyn_cast<GlobalAlias>(V))
976 visitAliaseeSubExpr(Visited, GA, *GA2->getAliasee());
977 else if (const auto *C2 = dyn_cast<Constant>(V))
978 visitAliaseeSubExpr(Visited, GA, *C2);
979 }
980}
981
982void Verifier::visitGlobalAlias(const GlobalAlias &GA) {
984 "Alias should have private, internal, linkonce, weak, linkonce_odr, "
985 "weak_odr, external, or available_externally linkage!",
986 &GA);
987 const Constant *Aliasee = GA.getAliasee();
988 Check(Aliasee, "Aliasee cannot be NULL!", &GA);
989 Check(GA.getType() == Aliasee->getType(),
990 "Alias and aliasee types should match!", &GA);
991
992 Check(isa<GlobalValue>(Aliasee) || isa<ConstantExpr>(Aliasee),
993 "Aliasee should be either GlobalValue or ConstantExpr", &GA);
994
995 visitAliaseeSubExpr(GA, *Aliasee);
996
997 visitGlobalValue(GA);
998}
999
1000void Verifier::visitGlobalIFunc(const GlobalIFunc &GI) {
1002 "IFunc should have private, internal, linkonce, weak, linkonce_odr, "
1003 "weak_odr, or external linkage!",
1004 &GI);
1005 // Pierce through ConstantExprs and GlobalAliases and check that the resolver
1006 // is a Function definition.
1008 Check(Resolver, "IFunc must have a Function resolver", &GI);
1009 Check(!Resolver->isDeclarationForLinker(),
1010 "IFunc resolver must be a definition", &GI);
1011
1012 // Check that the immediate resolver operand (prior to any bitcasts) has the
1013 // correct type.
1014 const Type *ResolverTy = GI.getResolver()->getType();
1015
1016 Check(isa<PointerType>(Resolver->getFunctionType()->getReturnType()),
1017 "IFunc resolver must return a pointer", &GI);
1018
1019 const Type *ResolverFuncTy =
1021 Check(ResolverTy == ResolverFuncTy->getPointerTo(GI.getAddressSpace()),
1022 "IFunc resolver has incorrect type", &GI);
1023}
1024
1025void Verifier::visitNamedMDNode(const NamedMDNode &NMD) {
1026 // There used to be various other llvm.dbg.* nodes, but we don't support
1027 // upgrading them and we want to reserve the namespace for future uses.
1028 if (NMD.getName().starts_with("llvm.dbg."))
1029 CheckDI(NMD.getName() == "llvm.dbg.cu",
1030 "unrecognized named metadata node in the llvm.dbg namespace", &NMD);
1031 for (const MDNode *MD : NMD.operands()) {
1032 if (NMD.getName() == "llvm.dbg.cu")
1033 CheckDI(MD && isa<DICompileUnit>(MD), "invalid compile unit", &NMD, MD);
1034
1035 if (!MD)
1036 continue;
1037
1038 visitMDNode(*MD, AreDebugLocsAllowed::Yes);
1039 }
1040}
1041
1042void Verifier::visitMDNode(const MDNode &MD, AreDebugLocsAllowed AllowLocs) {
1043 // Only visit each node once. Metadata can be mutually recursive, so this
1044 // avoids infinite recursion here, as well as being an optimization.
1045 if (!MDNodes.insert(&MD).second)
1046 return;
1047
1048 Check(&MD.getContext() == &Context,
1049 "MDNode context does not match Module context!", &MD);
1050
1051 switch (MD.getMetadataID()) {
1052 default:
1053 llvm_unreachable("Invalid MDNode subclass");
1054 case Metadata::MDTupleKind:
1055 break;
1056#define HANDLE_SPECIALIZED_MDNODE_LEAF(CLASS) \
1057 case Metadata::CLASS##Kind: \
1058 visit##CLASS(cast<CLASS>(MD)); \
1059 break;
1060#include "llvm/IR/Metadata.def"
1061 }
1062
1063 for (const Metadata *Op : MD.operands()) {
1064 if (!Op)
1065 continue;
1066 Check(!isa<LocalAsMetadata>(Op), "Invalid operand for global metadata!",
1067 &MD, Op);
1068 CheckDI(!isa<DILocation>(Op) || AllowLocs == AreDebugLocsAllowed::Yes,
1069 "DILocation not allowed within this metadata node", &MD, Op);
1070 if (auto *N = dyn_cast<MDNode>(Op)) {
1071 visitMDNode(*N, AllowLocs);
1072 continue;
1073 }
1074 if (auto *V = dyn_cast<ValueAsMetadata>(Op)) {
1075 visitValueAsMetadata(*V, nullptr);
1076 continue;
1077 }
1078 }
1079
1080 // Check these last, so we diagnose problems in operands first.
1081 Check(!MD.isTemporary(), "Expected no forward declarations!", &MD);
1082 Check(MD.isResolved(), "All nodes should be resolved!", &MD);
1083}
1084
1085void Verifier::visitValueAsMetadata(const ValueAsMetadata &MD, Function *F) {
1086 Check(MD.getValue(), "Expected valid value", &MD);
1087 Check(!MD.getValue()->getType()->isMetadataTy(),
1088 "Unexpected metadata round-trip through values", &MD, MD.getValue());
1089
1090 auto *L = dyn_cast<LocalAsMetadata>(&MD);
1091 if (!L)
1092 return;
1093
1094 Check(F, "function-local metadata used outside a function", L);
1095
1096 // If this was an instruction, bb, or argument, verify that it is in the
1097 // function that we expect.
1098 Function *ActualF = nullptr;
1099 if (Instruction *I = dyn_cast<Instruction>(L->getValue())) {
1100 Check(I->getParent(), "function-local metadata not in basic block", L, I);
1101 ActualF = I->getParent()->getParent();
1102 } else if (BasicBlock *BB = dyn_cast<BasicBlock>(L->getValue()))
1103 ActualF = BB->getParent();
1104 else if (Argument *A = dyn_cast<Argument>(L->getValue()))
1105 ActualF = A->getParent();
1106 assert(ActualF && "Unimplemented function local metadata case!");
1107
1108 Check(ActualF == F, "function-local metadata used in wrong function", L);
1109}
1110
1111void Verifier::visitDIArgList(const DIArgList &AL, Function *F) {
1112 for (const ValueAsMetadata *VAM : AL.getArgs())
1113 visitValueAsMetadata(*VAM, F);
1114}
1115
1116void Verifier::visitMetadataAsValue(const MetadataAsValue &MDV, Function *F) {
1117 Metadata *MD = MDV.getMetadata();
1118 if (auto *N = dyn_cast<MDNode>(MD)) {
1119 visitMDNode(*N, AreDebugLocsAllowed::No);
1120 return;
1121 }
1122
1123 // Only visit each node once. Metadata can be mutually recursive, so this
1124 // avoids infinite recursion here, as well as being an optimization.
1125 if (!MDNodes.insert(MD).second)
1126 return;
1127
1128 if (auto *V = dyn_cast<ValueAsMetadata>(MD))
1129 visitValueAsMetadata(*V, F);
1130
1131 if (auto *AL = dyn_cast<DIArgList>(MD))
1132 visitDIArgList(*AL, F);
1133}
1134
1135static bool isType(const Metadata *MD) { return !MD || isa<DIType>(MD); }
1136static bool isScope(const Metadata *MD) { return !MD || isa<DIScope>(MD); }
1137static bool isDINode(const Metadata *MD) { return !MD || isa<DINode>(MD); }
1138
1139void Verifier::visitDILocation(const DILocation &N) {
1140 CheckDI(N.getRawScope() && isa<DILocalScope>(N.getRawScope()),
1141 "location requires a valid scope", &N, N.getRawScope());
1142 if (auto *IA = N.getRawInlinedAt())
1143 CheckDI(isa<DILocation>(IA), "inlined-at should be a location", &N, IA);
1144 if (auto *SP = dyn_cast<DISubprogram>(N.getRawScope()))
1145 CheckDI(SP->isDefinition(), "scope points into the type hierarchy", &N);
1146}
1147
1148void Verifier::visitGenericDINode(const GenericDINode &N) {
1149 CheckDI(N.getTag(), "invalid tag", &N);
1150}
1151
1152void Verifier::visitDIScope(const DIScope &N) {
1153 if (auto *F = N.getRawFile())
1154 CheckDI(isa<DIFile>(F), "invalid file", &N, F);
1155}
1156
1157void Verifier::visitDISubrange(const DISubrange &N) {
1158 CheckDI(N.getTag() == dwarf::DW_TAG_subrange_type, "invalid tag", &N);
1159 bool HasAssumedSizedArraySupport = dwarf::isFortran(CurrentSourceLang);
1160 CheckDI(HasAssumedSizedArraySupport || N.getRawCountNode() ||
1161 N.getRawUpperBound(),
1162 "Subrange must contain count or upperBound", &N);
1163 CheckDI(!N.getRawCountNode() || !N.getRawUpperBound(),
1164 "Subrange can have any one of count or upperBound", &N);
1165 auto *CBound = N.getRawCountNode();
1166 CheckDI(!CBound || isa<ConstantAsMetadata>(CBound) ||
1167 isa<DIVariable>(CBound) || isa<DIExpression>(CBound),
1168 "Count must be signed constant or DIVariable or DIExpression", &N);
1169 auto Count = N.getCount();
1170 CheckDI(!Count || !isa<ConstantInt *>(Count) ||
1171 cast<ConstantInt *>(Count)->getSExtValue() >= -1,
1172 "invalid subrange count", &N);
1173 auto *LBound = N.getRawLowerBound();
1174 CheckDI(!LBound || isa<ConstantAsMetadata>(LBound) ||
1175 isa<DIVariable>(LBound) || isa<DIExpression>(LBound),
1176 "LowerBound must be signed constant or DIVariable or DIExpression",
1177 &N);
1178 auto *UBound = N.getRawUpperBound();
1179 CheckDI(!UBound || isa<ConstantAsMetadata>(UBound) ||
1180 isa<DIVariable>(UBound) || isa<DIExpression>(UBound),
1181 "UpperBound must be signed constant or DIVariable or DIExpression",
1182 &N);
1183 auto *Stride = N.getRawStride();
1184 CheckDI(!Stride || isa<ConstantAsMetadata>(Stride) ||
1185 isa<DIVariable>(Stride) || isa<DIExpression>(Stride),
1186 "Stride must be signed constant or DIVariable or DIExpression", &N);
1187}
1188
1189void Verifier::visitDIGenericSubrange(const DIGenericSubrange &N) {
1190 CheckDI(N.getTag() == dwarf::DW_TAG_generic_subrange, "invalid tag", &N);
1191 CheckDI(N.getRawCountNode() || N.getRawUpperBound(),
1192 "GenericSubrange must contain count or upperBound", &N);
1193 CheckDI(!N.getRawCountNode() || !N.getRawUpperBound(),
1194 "GenericSubrange can have any one of count or upperBound", &N);
1195 auto *CBound = N.getRawCountNode();
1196 CheckDI(!CBound || isa<DIVariable>(CBound) || isa<DIExpression>(CBound),
1197 "Count must be signed constant or DIVariable or DIExpression", &N);
1198 auto *LBound = N.getRawLowerBound();
1199 CheckDI(LBound, "GenericSubrange must contain lowerBound", &N);
1200 CheckDI(isa<DIVariable>(LBound) || isa<DIExpression>(LBound),
1201 "LowerBound must be signed constant or DIVariable or DIExpression",
1202 &N);
1203 auto *UBound = N.getRawUpperBound();
1204 CheckDI(!UBound || isa<DIVariable>(UBound) || isa<DIExpression>(UBound),
1205 "UpperBound must be signed constant or DIVariable or DIExpression",
1206 &N);
1207 auto *Stride = N.getRawStride();
1208 CheckDI(Stride, "GenericSubrange must contain stride", &N);
1209 CheckDI(isa<DIVariable>(Stride) || isa<DIExpression>(Stride),
1210 "Stride must be signed constant or DIVariable or DIExpression", &N);
1211}
1212
1213void Verifier::visitDIEnumerator(const DIEnumerator &N) {
1214 CheckDI(N.getTag() == dwarf::DW_TAG_enumerator, "invalid tag", &N);
1215}
1216
1217void Verifier::visitDIBasicType(const DIBasicType &N) {
1218 CheckDI(N.getTag() == dwarf::DW_TAG_base_type ||
1219 N.getTag() == dwarf::DW_TAG_unspecified_type ||
1220 N.getTag() == dwarf::DW_TAG_string_type,
1221 "invalid tag", &N);
1222}
1223
1224void Verifier::visitDIStringType(const DIStringType &N) {
1225 CheckDI(N.getTag() == dwarf::DW_TAG_string_type, "invalid tag", &N);
1226 CheckDI(!(N.isBigEndian() && N.isLittleEndian()), "has conflicting flags",
1227 &N);
1228}
1229
1230void Verifier::visitDIDerivedType(const DIDerivedType &N) {
1231 // Common scope checks.
1232 visitDIScope(N);
1233
1234 CheckDI(N.getTag() == dwarf::DW_TAG_typedef ||
1235 N.getTag() == dwarf::DW_TAG_pointer_type ||
1236 N.getTag() == dwarf::DW_TAG_ptr_to_member_type ||
1237 N.getTag() == dwarf::DW_TAG_reference_type ||
1238 N.getTag() == dwarf::DW_TAG_rvalue_reference_type ||
1239 N.getTag() == dwarf::DW_TAG_const_type ||
1240 N.getTag() == dwarf::DW_TAG_immutable_type ||
1241 N.getTag() == dwarf::DW_TAG_volatile_type ||
1242 N.getTag() == dwarf::DW_TAG_restrict_type ||
1243 N.getTag() == dwarf::DW_TAG_atomic_type ||
1244 N.getTag() == dwarf::DW_TAG_LLVM_ptrauth_type ||
1245 N.getTag() == dwarf::DW_TAG_member ||
1246 (N.getTag() == dwarf::DW_TAG_variable && N.isStaticMember()) ||
1247 N.getTag() == dwarf::DW_TAG_inheritance ||
1248 N.getTag() == dwarf::DW_TAG_friend ||
1249 N.getTag() == dwarf::DW_TAG_set_type ||
1250 N.getTag() == dwarf::DW_TAG_template_alias,
1251 "invalid tag", &N);
1252 if (N.getTag() == dwarf::DW_TAG_ptr_to_member_type) {
1253 CheckDI(isType(N.getRawExtraData()), "invalid pointer to member type", &N,
1254 N.getRawExtraData());
1255 }
1256
1257 if (N.getTag() == dwarf::DW_TAG_set_type) {
1258 if (auto *T = N.getRawBaseType()) {
1259 auto *Enum = dyn_cast_or_null<DICompositeType>(T);
1260 auto *Basic = dyn_cast_or_null<DIBasicType>(T);
1261 CheckDI(
1262 (Enum && Enum->getTag() == dwarf::DW_TAG_enumeration_type) ||
1263 (Basic && (Basic->getEncoding() == dwarf::DW_ATE_unsigned ||
1264 Basic->getEncoding() == dwarf::DW_ATE_signed ||
1265 Basic->getEncoding() == dwarf::DW_ATE_unsigned_char ||
1266 Basic->getEncoding() == dwarf::DW_ATE_signed_char ||
1267 Basic->getEncoding() == dwarf::DW_ATE_boolean)),
1268 "invalid set base type", &N, T);
1269 }
1270 }
1271
1272 CheckDI(isScope(N.getRawScope()), "invalid scope", &N, N.getRawScope());
1273 CheckDI(isType(N.getRawBaseType()), "invalid base type", &N,
1274 N.getRawBaseType());
1275
1276 if (N.getDWARFAddressSpace()) {
1277 CheckDI(N.getTag() == dwarf::DW_TAG_pointer_type ||
1278 N.getTag() == dwarf::DW_TAG_reference_type ||
1279 N.getTag() == dwarf::DW_TAG_rvalue_reference_type,
1280 "DWARF address space only applies to pointer or reference types",
1281 &N);
1282 }
1283}
1284
1285/// Detect mutually exclusive flags.
1286static bool hasConflictingReferenceFlags(unsigned Flags) {
1287 return ((Flags & DINode::FlagLValueReference) &&
1288 (Flags & DINode::FlagRValueReference)) ||
1289 ((Flags & DINode::FlagTypePassByValue) &&
1290 (Flags & DINode::FlagTypePassByReference));
1291}
1292
1293void Verifier::visitTemplateParams(const MDNode &N, const Metadata &RawParams) {
1294 auto *Params = dyn_cast<MDTuple>(&RawParams);
1295 CheckDI(Params, "invalid template params", &N, &RawParams);
1296 for (Metadata *Op : Params->operands()) {
1297 CheckDI(Op && isa<DITemplateParameter>(Op), "invalid template parameter",
1298 &N, Params, Op);
1299 }
1300}
1301
1302void Verifier::visitDICompositeType(const DICompositeType &N) {
1303 // Common scope checks.
1304 visitDIScope(N);
1305
1306 CheckDI(N.getTag() == dwarf::DW_TAG_array_type ||
1307 N.getTag() == dwarf::DW_TAG_structure_type ||
1308 N.getTag() == dwarf::DW_TAG_union_type ||
1309 N.getTag() == dwarf::DW_TAG_enumeration_type ||
1310 N.getTag() == dwarf::DW_TAG_class_type ||
1311 N.getTag() == dwarf::DW_TAG_variant_part ||
1312 N.getTag() == dwarf::DW_TAG_namelist,
1313 "invalid tag", &N);
1314
1315 CheckDI(isScope(N.getRawScope()), "invalid scope", &N, N.getRawScope());
1316 CheckDI(isType(N.getRawBaseType()), "invalid base type", &N,
1317 N.getRawBaseType());
1318
1319 CheckDI(!N.getRawElements() || isa<MDTuple>(N.getRawElements()),
1320 "invalid composite elements", &N, N.getRawElements());
1321 CheckDI(isType(N.getRawVTableHolder()), "invalid vtable holder", &N,
1322 N.getRawVTableHolder());
1324 "invalid reference flags", &N);
1325 unsigned DIBlockByRefStruct = 1 << 4;
1326 CheckDI((N.getFlags() & DIBlockByRefStruct) == 0,
1327 "DIBlockByRefStruct on DICompositeType is no longer supported", &N);
1328
1329 if (N.isVector()) {
1330 const DINodeArray Elements = N.getElements();
1331 CheckDI(Elements.size() == 1 &&
1332 Elements[0]->getTag() == dwarf::DW_TAG_subrange_type,
1333 "invalid vector, expected one element of type subrange", &N);
1334 }
1335
1336 if (auto *Params = N.getRawTemplateParams())
1337 visitTemplateParams(N, *Params);
1338
1339 if (auto *D = N.getRawDiscriminator()) {
1340 CheckDI(isa<DIDerivedType>(D) && N.getTag() == dwarf::DW_TAG_variant_part,
1341 "discriminator can only appear on variant part");
1342 }
1343
1344 if (N.getRawDataLocation()) {
1345 CheckDI(N.getTag() == dwarf::DW_TAG_array_type,
1346 "dataLocation can only appear in array type");
1347 }
1348
1349 if (N.getRawAssociated()) {
1350 CheckDI(N.getTag() == dwarf::DW_TAG_array_type,
1351 "associated can only appear in array type");
1352 }
1353
1354 if (N.getRawAllocated()) {
1355 CheckDI(N.getTag() == dwarf::DW_TAG_array_type,
1356 "allocated can only appear in array type");
1357 }
1358
1359 if (N.getRawRank()) {
1360 CheckDI(N.getTag() == dwarf::DW_TAG_array_type,
1361 "rank can only appear in array type");
1362 }
1363
1364 if (N.getTag() == dwarf::DW_TAG_array_type) {
1365 CheckDI(N.getRawBaseType(), "array types must have a base type", &N);
1366 }
1367}
1368
1369void Verifier::visitDISubroutineType(const DISubroutineType &N) {
1370 CheckDI(N.getTag() == dwarf::DW_TAG_subroutine_type, "invalid tag", &N);
1371 if (auto *Types = N.getRawTypeArray()) {
1372 CheckDI(isa<MDTuple>(Types), "invalid composite elements", &N, Types);
1373 for (Metadata *Ty : N.getTypeArray()->operands()) {
1374 CheckDI(isType(Ty), "invalid subroutine type ref", &N, Types, Ty);
1375 }
1376 }
1378 "invalid reference flags", &N);
1379}
1380
1381void Verifier::visitDIFile(const DIFile &N) {
1382 CheckDI(N.getTag() == dwarf::DW_TAG_file_type, "invalid tag", &N);
1383 std::optional<DIFile::ChecksumInfo<StringRef>> Checksum = N.getChecksum();
1384 if (Checksum) {
1385 CheckDI(Checksum->Kind <= DIFile::ChecksumKind::CSK_Last,
1386 "invalid checksum kind", &N);
1387 size_t Size;
1388 switch (Checksum->Kind) {
1389 case DIFile::CSK_MD5:
1390 Size = 32;
1391 break;
1392 case DIFile::CSK_SHA1:
1393 Size = 40;
1394 break;
1395 case DIFile::CSK_SHA256:
1396 Size = 64;
1397 break;
1398 }
1399 CheckDI(Checksum->Value.size() == Size, "invalid checksum length", &N);
1400 CheckDI(Checksum->Value.find_if_not(llvm::isHexDigit) == StringRef::npos,
1401 "invalid checksum", &N);
1402 }
1403}
1404
1405void Verifier::visitDICompileUnit(const DICompileUnit &N) {
1406 CheckDI(N.isDistinct(), "compile units must be distinct", &N);
1407 CheckDI(N.getTag() == dwarf::DW_TAG_compile_unit, "invalid tag", &N);
1408
1409 // Don't bother verifying the compilation directory or producer string
1410 // as those could be empty.
1411 CheckDI(N.getRawFile() && isa<DIFile>(N.getRawFile()), "invalid file", &N,
1412 N.getRawFile());
1413 CheckDI(!N.getFile()->getFilename().empty(), "invalid filename", &N,
1414 N.getFile());
1415
1416 CurrentSourceLang = (dwarf::SourceLanguage)N.getSourceLanguage();
1417
1418 CheckDI((N.getEmissionKind() <= DICompileUnit::LastEmissionKind),
1419 "invalid emission kind", &N);
1420
1421 if (auto *Array = N.getRawEnumTypes()) {
1422 CheckDI(isa<MDTuple>(Array), "invalid enum list", &N, Array);
1423 for (Metadata *Op : N.getEnumTypes()->operands()) {
1424 auto *Enum = dyn_cast_or_null<DICompositeType>(Op);
1425 CheckDI(Enum && Enum->getTag() == dwarf::DW_TAG_enumeration_type,
1426 "invalid enum type", &N, N.getEnumTypes(), Op);
1427 }
1428 }
1429 if (auto *Array = N.getRawRetainedTypes()) {
1430 CheckDI(isa<MDTuple>(Array), "invalid retained type list", &N, Array);
1431 for (Metadata *Op : N.getRetainedTypes()->operands()) {
1432 CheckDI(
1433 Op && (isa<DIType>(Op) || (isa<DISubprogram>(Op) &&
1434 !cast<DISubprogram>(Op)->isDefinition())),
1435 "invalid retained type", &N, Op);
1436 }
1437 }
1438 if (auto *Array = N.getRawGlobalVariables()) {
1439 CheckDI(isa<MDTuple>(Array), "invalid global variable list", &N, Array);
1440 for (Metadata *Op : N.getGlobalVariables()->operands()) {
1441 CheckDI(Op && (isa<DIGlobalVariableExpression>(Op)),
1442 "invalid global variable ref", &N, Op);
1443 }
1444 }
1445 if (auto *Array = N.getRawImportedEntities()) {
1446 CheckDI(isa<MDTuple>(Array), "invalid imported entity list", &N, Array);
1447 for (Metadata *Op : N.getImportedEntities()->operands()) {
1448 CheckDI(Op && isa<DIImportedEntity>(Op), "invalid imported entity ref",
1449 &N, Op);
1450 }
1451 }
1452 if (auto *Array = N.getRawMacros()) {
1453 CheckDI(isa<MDTuple>(Array), "invalid macro list", &N, Array);
1454 for (Metadata *Op : N.getMacros()->operands()) {
1455 CheckDI(Op && isa<DIMacroNode>(Op), "invalid macro ref", &N, Op);
1456 }
1457 }
1458 CUVisited.insert(&N);
1459}
1460
1461void Verifier::visitDISubprogram(const DISubprogram &N) {
1462 CheckDI(N.getTag() == dwarf::DW_TAG_subprogram, "invalid tag", &N);
1463 CheckDI(isScope(N.getRawScope()), "invalid scope", &N, N.getRawScope());
1464 if (auto *F = N.getRawFile())
1465 CheckDI(isa<DIFile>(F), "invalid file", &N, F);
1466 else
1467 CheckDI(N.getLine() == 0, "line specified with no file", &N, N.getLine());
1468 if (auto *T = N.getRawType())
1469 CheckDI(isa<DISubroutineType>(T), "invalid subroutine type", &N, T);
1470 CheckDI(isType(N.getRawContainingType()), "invalid containing type", &N,
1471 N.getRawContainingType());
1472 if (auto *Params = N.getRawTemplateParams())
1473 visitTemplateParams(N, *Params);
1474 if (auto *S = N.getRawDeclaration())
1475 CheckDI(isa<DISubprogram>(S) && !cast<DISubprogram>(S)->isDefinition(),
1476 "invalid subprogram declaration", &N, S);
1477 if (auto *RawNode = N.getRawRetainedNodes()) {
1478 auto *Node = dyn_cast<MDTuple>(RawNode);
1479 CheckDI(Node, "invalid retained nodes list", &N, RawNode);
1480 for (Metadata *Op : Node->operands()) {
1481 CheckDI(Op && (isa<DILocalVariable>(Op) || isa<DILabel>(Op) ||
1482 isa<DIImportedEntity>(Op)),
1483 "invalid retained nodes, expected DILocalVariable, DILabel or "
1484 "DIImportedEntity",
1485 &N, Node, Op);
1486 }
1487 }
1489 "invalid reference flags", &N);
1490
1491 auto *Unit = N.getRawUnit();
1492 if (N.isDefinition()) {
1493 // Subprogram definitions (not part of the type hierarchy).
1494 CheckDI(N.isDistinct(), "subprogram definitions must be distinct", &N);
1495 CheckDI(Unit, "subprogram definitions must have a compile unit", &N);
1496 CheckDI(isa<DICompileUnit>(Unit), "invalid unit type", &N, Unit);
1497 // There's no good way to cross the CU boundary to insert a nested
1498 // DISubprogram definition in one CU into a type defined in another CU.
1499 auto *CT = dyn_cast_or_null<DICompositeType>(N.getRawScope());
1500 if (CT && CT->getRawIdentifier() &&
1501 M.getContext().isODRUniquingDebugTypes())
1502 CheckDI(N.getDeclaration(),
1503 "definition subprograms cannot be nested within DICompositeType "
1504 "when enabling ODR",
1505 &N);
1506 } else {
1507 // Subprogram declarations (part of the type hierarchy).
1508 CheckDI(!Unit, "subprogram declarations must not have a compile unit", &N);
1509 CheckDI(!N.getRawDeclaration(),
1510 "subprogram declaration must not have a declaration field");
1511 }
1512
1513 if (auto *RawThrownTypes = N.getRawThrownTypes()) {
1514 auto *ThrownTypes = dyn_cast<MDTuple>(RawThrownTypes);
1515 CheckDI(ThrownTypes, "invalid thrown types list", &N, RawThrownTypes);
1516 for (Metadata *Op : ThrownTypes->operands())
1517 CheckDI(Op && isa<DIType>(Op), "invalid thrown type", &N, ThrownTypes,
1518 Op);
1519 }
1520
1521 if (N.areAllCallsDescribed())
1522 CheckDI(N.isDefinition(),
1523 "DIFlagAllCallsDescribed must be attached to a definition");
1524}
1525
1526void Verifier::visitDILexicalBlockBase(const DILexicalBlockBase &N) {
1527 CheckDI(N.getTag() == dwarf::DW_TAG_lexical_block, "invalid tag", &N);
1528 CheckDI(N.getRawScope() && isa<DILocalScope>(N.getRawScope()),
1529 "invalid local scope", &N, N.getRawScope());
1530 if (auto *SP = dyn_cast<DISubprogram>(N.getRawScope()))
1531 CheckDI(SP->isDefinition(), "scope points into the type hierarchy", &N);
1532}
1533
1534void Verifier::visitDILexicalBlock(const DILexicalBlock &N) {
1535 visitDILexicalBlockBase(N);
1536
1537 CheckDI(N.getLine() || !N.getColumn(),
1538 "cannot have column info without line info", &N);
1539}
1540
1541void Verifier::visitDILexicalBlockFile(const DILexicalBlockFile &N) {
1542 visitDILexicalBlockBase(N);
1543}
1544
1545void Verifier::visitDICommonBlock(const DICommonBlock &N) {
1546 CheckDI(N.getTag() == dwarf::DW_TAG_common_block, "invalid tag", &N);
1547 if (auto *S = N.getRawScope())
1548 CheckDI(isa<DIScope>(S), "invalid scope ref", &N, S);
1549 if (auto *S = N.getRawDecl())
1550 CheckDI(isa<DIGlobalVariable>(S), "invalid declaration", &N, S);
1551}
1552
1553void Verifier::visitDINamespace(const DINamespace &N) {
1554 CheckDI(N.getTag() == dwarf::DW_TAG_namespace, "invalid tag", &N);
1555 if (auto *S = N.getRawScope())
1556 CheckDI(isa<DIScope>(S), "invalid scope ref", &N, S);
1557}
1558
1559void Verifier::visitDIMacro(const DIMacro &N) {
1560 CheckDI(N.getMacinfoType() == dwarf::DW_MACINFO_define ||
1561 N.getMacinfoType() == dwarf::DW_MACINFO_undef,
1562 "invalid macinfo type", &N);
1563 CheckDI(!N.getName().empty(), "anonymous macro", &N);
1564 if (!N.getValue().empty()) {
1565 assert(N.getValue().data()[0] != ' ' && "Macro value has a space prefix");
1566 }
1567}
1568
1569void Verifier::visitDIMacroFile(const DIMacroFile &N) {
1570 CheckDI(N.getMacinfoType() == dwarf::DW_MACINFO_start_file,
1571 "invalid macinfo type", &N);
1572 if (auto *F = N.getRawFile())
1573 CheckDI(isa<DIFile>(F), "invalid file", &N, F);
1574
1575 if (auto *Array = N.getRawElements()) {
1576 CheckDI(isa<MDTuple>(Array), "invalid macro list", &N, Array);
1577 for (Metadata *Op : N.getElements()->operands()) {
1578 CheckDI(Op && isa<DIMacroNode>(Op), "invalid macro ref", &N, Op);
1579 }
1580 }
1581}
1582
1583void Verifier::visitDIModule(const DIModule &N) {
1584 CheckDI(N.getTag() == dwarf::DW_TAG_module, "invalid tag", &N);
1585 CheckDI(!N.getName().empty(), "anonymous module", &N);
1586}
1587
1588void Verifier::visitDITemplateParameter(const DITemplateParameter &N) {
1589 CheckDI(isType(N.getRawType()), "invalid type ref", &N, N.getRawType());
1590}
1591
1592void Verifier::visitDITemplateTypeParameter(const DITemplateTypeParameter &N) {
1593 visitDITemplateParameter(N);
1594
1595 CheckDI(N.getTag() == dwarf::DW_TAG_template_type_parameter, "invalid tag",
1596 &N);
1597}
1598
1599void Verifier::visitDITemplateValueParameter(
1600 const DITemplateValueParameter &N) {
1601 visitDITemplateParameter(N);
1602
1603 CheckDI(N.getTag() == dwarf::DW_TAG_template_value_parameter ||
1604 N.getTag() == dwarf::DW_TAG_GNU_template_template_param ||
1605 N.getTag() == dwarf::DW_TAG_GNU_template_parameter_pack,
1606 "invalid tag", &N);
1607}
1608
1609void Verifier::visitDIVariable(const DIVariable &N) {
1610 if (auto *S = N.getRawScope())
1611 CheckDI(isa<DIScope>(S), "invalid scope", &N, S);
1612 if (auto *F = N.getRawFile())
1613 CheckDI(isa<DIFile>(F), "invalid file", &N, F);
1614}
1615
1616void Verifier::visitDIGlobalVariable(const DIGlobalVariable &N) {
1617 // Checks common to all variables.
1618 visitDIVariable(N);
1619
1620 CheckDI(N.getTag() == dwarf::DW_TAG_variable, "invalid tag", &N);
1621 CheckDI(isType(N.getRawType()), "invalid type ref", &N, N.getRawType());
1622 // Check only if the global variable is not an extern
1623 if (N.isDefinition())
1624 CheckDI(N.getType(), "missing global variable type", &N);
1625 if (auto *Member = N.getRawStaticDataMemberDeclaration()) {
1626 CheckDI(isa<DIDerivedType>(Member),
1627 "invalid static data member declaration", &N, Member);
1628 }
1629}
1630
1631void Verifier::visitDILocalVariable(const DILocalVariable &N) {
1632 // Checks common to all variables.
1633 visitDIVariable(N);
1634
1635 CheckDI(isType(N.getRawType()), "invalid type ref", &N, N.getRawType());
1636 CheckDI(N.getTag() == dwarf::DW_TAG_variable, "invalid tag", &N);
1637 CheckDI(N.getRawScope() && isa<DILocalScope>(N.getRawScope()),
1638 "local variable requires a valid scope", &N, N.getRawScope());
1639 if (auto Ty = N.getType())
1640 CheckDI(!isa<DISubroutineType>(Ty), "invalid type", &N, N.getType());
1641}
1642
1643void Verifier::visitDIAssignID(const DIAssignID &N) {
1644 CheckDI(!N.getNumOperands(), "DIAssignID has no arguments", &N);
1645 CheckDI(N.isDistinct(), "DIAssignID must be distinct", &N);
1646}
1647
1648void Verifier::visitDILabel(const DILabel &N) {
1649 if (auto *S = N.getRawScope())
1650 CheckDI(isa<DIScope>(S), "invalid scope", &N, S);
1651 if (auto *F = N.getRawFile())
1652 CheckDI(isa<DIFile>(F), "invalid file", &N, F);
1653
1654 CheckDI(N.getTag() == dwarf::DW_TAG_label, "invalid tag", &N);
1655 CheckDI(N.getRawScope() && isa<DILocalScope>(N.getRawScope()),
1656 "label requires a valid scope", &N, N.getRawScope());
1657}
1658
1659void Verifier::visitDIExpression(const DIExpression &N) {
1660 CheckDI(N.isValid(), "invalid expression", &N);
1661}
1662
1663void Verifier::visitDIGlobalVariableExpression(
1664 const DIGlobalVariableExpression &GVE) {
1665 CheckDI(GVE.getVariable(), "missing variable");
1666 if (auto *Var = GVE.getVariable())
1667 visitDIGlobalVariable(*Var);
1668 if (auto *Expr = GVE.getExpression()) {
1669 visitDIExpression(*Expr);
1670 if (auto Fragment = Expr->getFragmentInfo())
1671 verifyFragmentExpression(*GVE.getVariable(), *Fragment, &GVE);
1672 }
1673}
1674
1675void Verifier::visitDIObjCProperty(const DIObjCProperty &N) {
1676 CheckDI(N.getTag() == dwarf::DW_TAG_APPLE_property, "invalid tag", &N);
1677 if (auto *T = N.getRawType())
1678 CheckDI(isType(T), "invalid type ref", &N, T);
1679 if (auto *F = N.getRawFile())
1680 CheckDI(isa<DIFile>(F), "invalid file", &N, F);
1681}
1682
1683void Verifier::visitDIImportedEntity(const DIImportedEntity &N) {
1684 CheckDI(N.getTag() == dwarf::DW_TAG_imported_module ||
1685 N.getTag() == dwarf::DW_TAG_imported_declaration,
1686 "invalid tag", &N);
1687 if (auto *S = N.getRawScope())
1688 CheckDI(isa<DIScope>(S), "invalid scope for imported entity", &N, S);
1689 CheckDI(isDINode(N.getRawEntity()), "invalid imported entity", &N,
1690 N.getRawEntity());
1691}
1692
1693void Verifier::visitComdat(const Comdat &C) {
1694 // In COFF the Module is invalid if the GlobalValue has private linkage.
1695 // Entities with private linkage don't have entries in the symbol table.
1696 if (TT.isOSBinFormatCOFF())
1697 if (const GlobalValue *GV = M.getNamedValue(C.getName()))
1698 Check(!GV->hasPrivateLinkage(), "comdat global value has private linkage",
1699 GV);
1700}
1701
1702void Verifier::visitModuleIdents() {
1703 const NamedMDNode *Idents = M.getNamedMetadata("llvm.ident");
1704 if (!Idents)
1705 return;
1706
1707 // llvm.ident takes a list of metadata entry. Each entry has only one string.
1708 // Scan each llvm.ident entry and make sure that this requirement is met.
1709 for (const MDNode *N : Idents->operands()) {
1710 Check(N->getNumOperands() == 1,
1711 "incorrect number of operands in llvm.ident metadata", N);
1712 Check(dyn_cast_or_null<MDString>(N->getOperand(0)),
1713 ("invalid value for llvm.ident metadata entry operand"
1714 "(the operand should be a string)"),
1715 N->getOperand(0));
1716 }
1717}
1718
1719void Verifier::visitModuleCommandLines() {
1720 const NamedMDNode *CommandLines = M.getNamedMetadata("llvm.commandline");
1721 if (!CommandLines)
1722 return;
1723
1724 // llvm.commandline takes a list of metadata entry. Each entry has only one
1725 // string. Scan each llvm.commandline entry and make sure that this
1726 // requirement is met.
1727 for (const MDNode *N : CommandLines->operands()) {
1728 Check(N->getNumOperands() == 1,
1729 "incorrect number of operands in llvm.commandline metadata", N);
1730 Check(dyn_cast_or_null<MDString>(N->getOperand(0)),
1731 ("invalid value for llvm.commandline metadata entry operand"
1732 "(the operand should be a string)"),
1733 N->getOperand(0));
1734 }
1735}
1736
1737void Verifier::visitModuleFlags() {
1738 const NamedMDNode *Flags = M.getModuleFlagsMetadata();
1739 if (!Flags) return;
1740
1741 // Scan each flag, and track the flags and requirements.
1743 SmallVector<const MDNode*, 16> Requirements;
1744 uint64_t PAuthABIPlatform = -1;
1745 uint64_t PAuthABIVersion = -1;
1746 for (const MDNode *MDN : Flags->operands()) {
1747 visitModuleFlag(MDN, SeenIDs, Requirements);
1748 if (MDN->getNumOperands() != 3)
1749 continue;
1750 if (const auto *FlagName = dyn_cast_or_null<MDString>(MDN->getOperand(1))) {
1751 if (FlagName->getString() == "aarch64-elf-pauthabi-platform") {
1752 if (const auto *PAP =
1753 mdconst::dyn_extract_or_null<ConstantInt>(MDN->getOperand(2)))
1754 PAuthABIPlatform = PAP->getZExtValue();
1755 } else if (FlagName->getString() == "aarch64-elf-pauthabi-version") {
1756 if (const auto *PAV =
1757 mdconst::dyn_extract_or_null<ConstantInt>(MDN->getOperand(2)))
1758 PAuthABIVersion = PAV->getZExtValue();
1759 }
1760 }
1761 }
1762
1763 if ((PAuthABIPlatform == uint64_t(-1)) != (PAuthABIVersion == uint64_t(-1)))
1764 CheckFailed("either both or no 'aarch64-elf-pauthabi-platform' and "
1765 "'aarch64-elf-pauthabi-version' module flags must be present");
1766
1767 // Validate that the requirements in the module are valid.
1768 for (const MDNode *Requirement : Requirements) {
1769 const MDString *Flag = cast<MDString>(Requirement->getOperand(0));
1770 const Metadata *ReqValue = Requirement->getOperand(1);
1771
1772 const MDNode *Op = SeenIDs.lookup(Flag);
1773 if (!Op) {
1774 CheckFailed("invalid requirement on flag, flag is not present in module",
1775 Flag);
1776 continue;
1777 }
1778
1779 if (Op->getOperand(2) != ReqValue) {
1780 CheckFailed(("invalid requirement on flag, "
1781 "flag does not have the required value"),
1782 Flag);
1783 continue;
1784 }
1785 }
1786}
1787
1788void
1789Verifier::visitModuleFlag(const MDNode *Op,
1791 SmallVectorImpl<const MDNode *> &Requirements) {
1792 // Each module flag should have three arguments, the merge behavior (a
1793 // constant int), the flag ID (an MDString), and the value.
1794 Check(Op->getNumOperands() == 3,
1795 "incorrect number of operands in module flag", Op);
1797 if (!Module::isValidModFlagBehavior(Op->getOperand(0), MFB)) {
1798 Check(mdconst::dyn_extract_or_null<ConstantInt>(Op->getOperand(0)),
1799 "invalid behavior operand in module flag (expected constant integer)",
1800 Op->getOperand(0));
1801 Check(false,
1802 "invalid behavior operand in module flag (unexpected constant)",
1803 Op->getOperand(0));
1804 }
1805 MDString *ID = dyn_cast_or_null<MDString>(Op->getOperand(1));
1806 Check(ID, "invalid ID operand in module flag (expected metadata string)",
1807 Op->getOperand(1));
1808
1809 // Check the values for behaviors with additional requirements.
1810 switch (MFB) {
1811 case Module::Error:
1812 case Module::Warning:
1813 case Module::Override:
1814 // These behavior types accept any value.
1815 break;
1816
1817 case Module::Min: {
1818 auto *V = mdconst::dyn_extract_or_null<ConstantInt>(Op->getOperand(2));
1819 Check(V && V->getValue().isNonNegative(),
1820 "invalid value for 'min' module flag (expected constant non-negative "
1821 "integer)",
1822 Op->getOperand(2));
1823 break;
1824 }
1825
1826 case Module::Max: {
1827 Check(mdconst::dyn_extract_or_null<ConstantInt>(Op->getOperand(2)),
1828 "invalid value for 'max' module flag (expected constant integer)",
1829 Op->getOperand(2));
1830 break;
1831 }
1832
1833 case Module::Require: {
1834 // The value should itself be an MDNode with two operands, a flag ID (an
1835 // MDString), and a value.
1836 MDNode *Value = dyn_cast<MDNode>(Op->getOperand(2));
1837 Check(Value && Value->getNumOperands() == 2,
1838 "invalid value for 'require' module flag (expected metadata pair)",
1839 Op->getOperand(2));
1840 Check(isa<MDString>(Value->getOperand(0)),
1841 ("invalid value for 'require' module flag "
1842 "(first value operand should be a string)"),
1843 Value->getOperand(0));
1844
1845 // Append it to the list of requirements, to check once all module flags are
1846 // scanned.
1847 Requirements.push_back(Value);
1848 break;
1849 }
1850
1851 case Module::Append:
1852 case Module::AppendUnique: {
1853 // These behavior types require the operand be an MDNode.
1854 Check(isa<MDNode>(Op->getOperand(2)),
1855 "invalid value for 'append'-type module flag "
1856 "(expected a metadata node)",
1857 Op->getOperand(2));
1858 break;
1859 }
1860 }
1861
1862 // Unless this is a "requires" flag, check the ID is unique.
1863 if (MFB != Module::Require) {
1864 bool Inserted = SeenIDs.insert(std::make_pair(ID, Op)).second;
1865 Check(Inserted,
1866 "module flag identifiers must be unique (or of 'require' type)", ID);
1867 }
1868
1869 if (ID->getString() == "wchar_size") {
1871 = mdconst::dyn_extract_or_null<ConstantInt>(Op->getOperand(2));
1872 Check(Value, "wchar_size metadata requires constant integer argument");
1873 }
1874
1875 if (ID->getString() == "Linker Options") {
1876 // If the llvm.linker.options named metadata exists, we assume that the
1877 // bitcode reader has upgraded the module flag. Otherwise the flag might
1878 // have been created by a client directly.
1879 Check(M.getNamedMetadata("llvm.linker.options"),
1880 "'Linker Options' named metadata no longer supported");
1881 }
1882
1883 if (ID->getString() == "SemanticInterposition") {
1885 mdconst::dyn_extract_or_null<ConstantInt>(Op->getOperand(2));
1886 Check(Value,
1887 "SemanticInterposition metadata requires constant integer argument");
1888 }
1889
1890 if (ID->getString() == "CG Profile") {
1891 for (const MDOperand &MDO : cast<MDNode>(Op->getOperand(2))->operands())
1892 visitModuleFlagCGProfileEntry(MDO);
1893 }
1894}
1895
1896void Verifier::visitModuleFlagCGProfileEntry(const MDOperand &MDO) {
1897 auto CheckFunction = [&](const MDOperand &FuncMDO) {
1898 if (!FuncMDO)
1899 return;
1900 auto F = dyn_cast<ValueAsMetadata>(FuncMDO);
1901 Check(F && isa<Function>(F->getValue()->stripPointerCasts()),
1902 "expected a Function or null", FuncMDO);
1903 };
1904 auto Node = dyn_cast_or_null<MDNode>(MDO);
1905 Check(Node && Node->getNumOperands() == 3, "expected a MDNode triple", MDO);
1906 CheckFunction(Node->getOperand(0));
1907 CheckFunction(Node->getOperand(1));
1908 auto Count = dyn_cast_or_null<ConstantAsMetadata>(Node->getOperand(2));
1909 Check(Count && Count->getType()->isIntegerTy(),
1910 "expected an integer constant", Node->getOperand(2));
1911}
1912
1913void Verifier::verifyAttributeTypes(AttributeSet Attrs, const Value *V) {
1914 for (Attribute A : Attrs) {
1915
1916 if (A.isStringAttribute()) {
1917#define GET_ATTR_NAMES
1918#define ATTRIBUTE_ENUM(ENUM_NAME, DISPLAY_NAME)
1919#define ATTRIBUTE_STRBOOL(ENUM_NAME, DISPLAY_NAME) \
1920 if (A.getKindAsString() == #DISPLAY_NAME) { \
1921 auto V = A.getValueAsString(); \
1922 if (!(V.empty() || V == "true" || V == "false")) \
1923 CheckFailed("invalid value for '" #DISPLAY_NAME "' attribute: " + V + \
1924 ""); \
1925 }
1926
1927#include "llvm/IR/Attributes.inc"
1928 continue;
1929 }
1930
1931 if (A.isIntAttribute() != Attribute::isIntAttrKind(A.getKindAsEnum())) {
1932 CheckFailed("Attribute '" + A.getAsString() + "' should have an Argument",
1933 V);
1934 return;
1935 }
1936 }
1937}
1938
1939// VerifyParameterAttrs - Check the given attributes for an argument or return
1940// value of the specified type. The value V is printed in error messages.
1941void Verifier::verifyParameterAttrs(AttributeSet Attrs, Type *Ty,
1942 const Value *V) {
1943 if (!Attrs.hasAttributes())
1944 return;
1945
1946 verifyAttributeTypes(Attrs, V);
1947
1948 for (Attribute Attr : Attrs)
1949 Check(Attr.isStringAttribute() ||
1950 Attribute::canUseAsParamAttr(Attr.getKindAsEnum()),
1951 "Attribute '" + Attr.getAsString() + "' does not apply to parameters",
1952 V);
1953
1954 if (Attrs.hasAttribute(Attribute::ImmArg)) {
1955 Check(Attrs.getNumAttributes() == 1,
1956 "Attribute 'immarg' is incompatible with other attributes", V);
1957 }
1958
1959 // Check for mutually incompatible attributes. Only inreg is compatible with
1960 // sret.
1961 unsigned AttrCount = 0;
1962 AttrCount += Attrs.hasAttribute(Attribute::ByVal);
1963 AttrCount += Attrs.hasAttribute(Attribute::InAlloca);
1964 AttrCount += Attrs.hasAttribute(Attribute::Preallocated);
1965 AttrCount += Attrs.hasAttribute(Attribute::StructRet) ||
1966 Attrs.hasAttribute(Attribute::InReg);
1967 AttrCount += Attrs.hasAttribute(Attribute::Nest);
1968 AttrCount += Attrs.hasAttribute(Attribute::ByRef);
1969 Check(AttrCount <= 1,
1970 "Attributes 'byval', 'inalloca', 'preallocated', 'inreg', 'nest', "
1971 "'byref', and 'sret' are incompatible!",
1972 V);
1973
1974 Check(!(Attrs.hasAttribute(Attribute::InAlloca) &&
1975 Attrs.hasAttribute(Attribute::ReadOnly)),
1976 "Attributes "
1977 "'inalloca and readonly' are incompatible!",
1978 V);
1979
1980 Check(!(Attrs.hasAttribute(Attribute::StructRet) &&
1981 Attrs.hasAttribute(Attribute::Returned)),
1982 "Attributes "
1983 "'sret and returned' are incompatible!",
1984 V);
1985
1986 Check(!(Attrs.hasAttribute(Attribute::ZExt) &&
1987 Attrs.hasAttribute(Attribute::SExt)),
1988 "Attributes "
1989 "'zeroext and signext' are incompatible!",
1990 V);
1991
1992 Check(!(Attrs.hasAttribute(Attribute::ReadNone) &&
1993 Attrs.hasAttribute(Attribute::ReadOnly)),
1994 "Attributes "
1995 "'readnone and readonly' are incompatible!",
1996 V);
1997
1998 Check(!(Attrs.hasAttribute(Attribute::ReadNone) &&
1999 Attrs.hasAttribute(Attribute::WriteOnly)),
2000 "Attributes "
2001 "'readnone and writeonly' are incompatible!",
2002 V);
2003
2004 Check(!(Attrs.hasAttribute(Attribute::ReadOnly) &&
2005 Attrs.hasAttribute(Attribute::WriteOnly)),
2006 "Attributes "
2007 "'readonly and writeonly' are incompatible!",
2008 V);
2009
2010 Check(!(Attrs.hasAttribute(Attribute::NoInline) &&
2011 Attrs.hasAttribute(Attribute::AlwaysInline)),
2012 "Attributes "
2013 "'noinline and alwaysinline' are incompatible!",
2014 V);
2015
2016 Check(!(Attrs.hasAttribute(Attribute::Writable) &&
2017 Attrs.hasAttribute(Attribute::ReadNone)),
2018 "Attributes writable and readnone are incompatible!", V);
2019
2020 Check(!(Attrs.hasAttribute(Attribute::Writable) &&
2021 Attrs.hasAttribute(Attribute::ReadOnly)),
2022 "Attributes writable and readonly are incompatible!", V);
2023
2024 AttributeMask IncompatibleAttrs = AttributeFuncs::typeIncompatible(Ty);
2025 for (Attribute Attr : Attrs) {
2026 if (!Attr.isStringAttribute() &&
2027 IncompatibleAttrs.contains(Attr.getKindAsEnum())) {
2028 CheckFailed("Attribute '" + Attr.getAsString() +
2029 "' applied to incompatible type!", V);
2030 return;
2031 }
2032 }
2033
2034 if (isa<PointerType>(Ty)) {
2035 if (Attrs.hasAttribute(Attribute::ByVal)) {
2036 if (Attrs.hasAttribute(Attribute::Alignment)) {
2037 Align AttrAlign = Attrs.getAlignment().valueOrOne();
2038 Align MaxAlign(ParamMaxAlignment);
2039 Check(AttrAlign <= MaxAlign,
2040 "Attribute 'align' exceed the max size 2^14", V);
2041 }
2042 SmallPtrSet<Type *, 4> Visited;
2043 Check(Attrs.getByValType()->isSized(&Visited),
2044 "Attribute 'byval' does not support unsized types!", V);
2045 }
2046 if (Attrs.hasAttribute(Attribute::ByRef)) {
2047 SmallPtrSet<Type *, 4> Visited;
2048 Check(Attrs.getByRefType()->isSized(&Visited),
2049 "Attribute 'byref' does not support unsized types!", V);
2050 }
2051 if (Attrs.hasAttribute(Attribute::InAlloca)) {
2052 SmallPtrSet<Type *, 4> Visited;
2053 Check(Attrs.getInAllocaType()->isSized(&Visited),
2054 "Attribute 'inalloca' does not support unsized types!", V);
2055 }
2056 if (Attrs.hasAttribute(Attribute::Preallocated)) {
2057 SmallPtrSet<Type *, 4> Visited;
2058 Check(Attrs.getPreallocatedType()->isSized(&Visited),
2059 "Attribute 'preallocated' does not support unsized types!", V);
2060 }
2061 }
2062
2063 if (Attrs.hasAttribute(Attribute::Initializes)) {
2064 auto Inits = Attrs.getAttribute(Attribute::Initializes).getInitializes();
2065 Check(!Inits.empty(), "Attribute 'initializes' does not support empty list",
2066 V);
2068 "Attribute 'initializes' does not support unordered ranges", V);
2069 }
2070
2071 if (Attrs.hasAttribute(Attribute::NoFPClass)) {
2072 uint64_t Val = Attrs.getAttribute(Attribute::NoFPClass).getValueAsInt();
2073 Check(Val != 0, "Attribute 'nofpclass' must have at least one test bit set",
2074 V);
2075 Check((Val & ~static_cast<unsigned>(fcAllFlags)) == 0,
2076 "Invalid value for 'nofpclass' test mask", V);
2077 }
2078 if (Attrs.hasAttribute(Attribute::Range)) {
2079 const ConstantRange &CR =
2080 Attrs.getAttribute(Attribute::Range).getValueAsConstantRange();
2082 "Range bit width must match type bit width!", V);
2083 }
2084}
2085
2086void Verifier::checkUnsignedBaseTenFuncAttr(AttributeList Attrs, StringRef Attr,
2087 const Value *V) {
2088 if (Attrs.hasFnAttr(Attr)) {
2089 StringRef S = Attrs.getFnAttr(Attr).getValueAsString();
2090 unsigned N;
2091 if (S.getAsInteger(10, N))
2092 CheckFailed("\"" + Attr + "\" takes an unsigned integer: " + S, V);
2093 }
2094}
2095
2096// Check parameter attributes against a function type.
2097// The value V is printed in error messages.
2098void Verifier::verifyFunctionAttrs(FunctionType *FT, AttributeList Attrs,
2099 const Value *V, bool IsIntrinsic,
2100 bool IsInlineAsm) {
2101 if (Attrs.isEmpty())
2102 return;
2103
2104 if (AttributeListsVisited.insert(Attrs.getRawPointer()).second) {
2105 Check(Attrs.hasParentContext(Context),
2106 "Attribute list does not match Module context!", &Attrs, V);
2107 for (const auto &AttrSet : Attrs) {
2108 Check(!AttrSet.hasAttributes() || AttrSet.hasParentContext(Context),
2109 "Attribute set does not match Module context!", &AttrSet, V);
2110 for (const auto &A : AttrSet) {
2111 Check(A.hasParentContext(Context),
2112 "Attribute does not match Module context!", &A, V);
2113 }
2114 }
2115 }
2116
2117 bool SawNest = false;
2118 bool SawReturned = false;
2119 bool SawSRet = false;
2120 bool SawSwiftSelf = false;
2121 bool SawSwiftAsync = false;
2122 bool SawSwiftError = false;
2123
2124 // Verify return value attributes.
2125 AttributeSet RetAttrs = Attrs.getRetAttrs();
2126 for (Attribute RetAttr : RetAttrs)
2127 Check(RetAttr.isStringAttribute() ||
2128 Attribute::canUseAsRetAttr(RetAttr.getKindAsEnum()),
2129 "Attribute '" + RetAttr.getAsString() +
2130 "' does not apply to function return values",
2131 V);
2132
2133 unsigned MaxParameterWidth = 0;
2134 auto GetMaxParameterWidth = [&MaxParameterWidth](Type *Ty) {
2135 if (Ty->isVectorTy()) {
2136 if (auto *VT = dyn_cast<FixedVectorType>(Ty)) {
2137 unsigned Size = VT->getPrimitiveSizeInBits().getFixedValue();
2138 if (Size > MaxParameterWidth)
2139 MaxParameterWidth = Size;
2140 }
2141 }
2142 };
2143 GetMaxParameterWidth(FT->getReturnType());
2144 verifyParameterAttrs(RetAttrs, FT->getReturnType(), V);
2145
2146 // Verify parameter attributes.
2147 for (unsigned i = 0, e = FT->getNumParams(); i != e; ++i) {
2148 Type *Ty = FT->getParamType(i);
2149 AttributeSet ArgAttrs = Attrs.getParamAttrs(i);
2150
2151 if (!IsIntrinsic) {
2152 Check(!ArgAttrs.hasAttribute(Attribute::ImmArg),
2153 "immarg attribute only applies to intrinsics", V);
2154 if (!IsInlineAsm)
2155 Check(!ArgAttrs.hasAttribute(Attribute::ElementType),
2156 "Attribute 'elementtype' can only be applied to intrinsics"
2157 " and inline asm.",
2158 V);
2159 }
2160
2161 verifyParameterAttrs(ArgAttrs, Ty, V);
2162 GetMaxParameterWidth(Ty);
2163
2164 if (ArgAttrs.hasAttribute(Attribute::Nest)) {
2165 Check(!SawNest, "More than one parameter has attribute nest!", V);
2166 SawNest = true;
2167 }
2168
2169 if (ArgAttrs.hasAttribute(Attribute::Returned)) {
2170 Check(!SawReturned, "More than one parameter has attribute returned!", V);
2171 Check(Ty->canLosslesslyBitCastTo(FT->getReturnType()),
2172 "Incompatible argument and return types for 'returned' attribute",
2173 V);
2174 SawReturned = true;
2175 }
2176
2177 if (ArgAttrs.hasAttribute(Attribute::StructRet)) {
2178 Check(!SawSRet, "Cannot have multiple 'sret' parameters!", V);
2179 Check(i == 0 || i == 1,
2180 "Attribute 'sret' is not on first or second parameter!", V);
2181 SawSRet = true;
2182 }
2183
2184 if (ArgAttrs.hasAttribute(Attribute::SwiftSelf)) {
2185 Check(!SawSwiftSelf, "Cannot have multiple 'swiftself' parameters!", V);
2186 SawSwiftSelf = true;
2187 }
2188
2189 if (ArgAttrs.hasAttribute(Attribute::SwiftAsync)) {
2190 Check(!SawSwiftAsync, "Cannot have multiple 'swiftasync' parameters!", V);
2191 SawSwiftAsync = true;
2192 }
2193
2194 if (ArgAttrs.hasAttribute(Attribute::SwiftError)) {
2195 Check(!SawSwiftError, "Cannot have multiple 'swifterror' parameters!", V);
2196 SawSwiftError = true;
2197 }
2198
2199 if (ArgAttrs.hasAttribute(Attribute::InAlloca)) {
2200 Check(i == FT->getNumParams() - 1,
2201 "inalloca isn't on the last parameter!", V);
2202 }
2203 }
2204
2205 if (!Attrs.hasFnAttrs())
2206 return;
2207
2208 verifyAttributeTypes(Attrs.getFnAttrs(), V);
2209 for (Attribute FnAttr : Attrs.getFnAttrs())
2210 Check(FnAttr.isStringAttribute() ||
2211 Attribute::canUseAsFnAttr(FnAttr.getKindAsEnum()),
2212 "Attribute '" + FnAttr.getAsString() +
2213 "' does not apply to functions!",
2214 V);
2215
2216 Check(!(Attrs.hasFnAttr(Attribute::NoInline) &&
2217 Attrs.hasFnAttr(Attribute::AlwaysInline)),
2218 "Attributes 'noinline and alwaysinline' are incompatible!", V);
2219
2220 if (Attrs.hasFnAttr(Attribute::OptimizeNone)) {
2221 Check(Attrs.hasFnAttr(Attribute::NoInline),
2222 "Attribute 'optnone' requires 'noinline'!", V);
2223
2224 Check(!Attrs.hasFnAttr(Attribute::OptimizeForSize),
2225 "Attributes 'optsize and optnone' are incompatible!", V);
2226
2227 Check(!Attrs.hasFnAttr(Attribute::MinSize),
2228 "Attributes 'minsize and optnone' are incompatible!", V);
2229
2230 Check(!Attrs.hasFnAttr(Attribute::OptimizeForDebugging),
2231 "Attributes 'optdebug and optnone' are incompatible!", V);
2232 }
2233
2234 if (Attrs.hasFnAttr(Attribute::OptimizeForDebugging)) {
2235 Check(!Attrs.hasFnAttr(Attribute::OptimizeForSize),
2236 "Attributes 'optsize and optdebug' are incompatible!", V);
2237
2238 Check(!Attrs.hasFnAttr(Attribute::MinSize),
2239 "Attributes 'minsize and optdebug' are incompatible!", V);
2240 }
2241
2242 Check(!Attrs.hasAttrSomewhere(Attribute::Writable) ||
2243 isModSet(Attrs.getMemoryEffects().getModRef(IRMemLocation::ArgMem)),
2244 "Attribute writable and memory without argmem: write are incompatible!",
2245 V);
2246
2247 if (Attrs.hasFnAttr("aarch64_pstate_sm_enabled")) {
2248 Check(!Attrs.hasFnAttr("aarch64_pstate_sm_compatible"),
2249 "Attributes 'aarch64_pstate_sm_enabled and "
2250 "aarch64_pstate_sm_compatible' are incompatible!",
2251 V);
2252 }
2253
2254 Check((Attrs.hasFnAttr("aarch64_new_za") + Attrs.hasFnAttr("aarch64_in_za") +
2255 Attrs.hasFnAttr("aarch64_inout_za") +
2256 Attrs.hasFnAttr("aarch64_out_za") +
2257 Attrs.hasFnAttr("aarch64_preserves_za")) <= 1,
2258 "Attributes 'aarch64_new_za', 'aarch64_in_za', 'aarch64_out_za', "
2259 "'aarch64_inout_za' and 'aarch64_preserves_za' are mutually exclusive",
2260 V);
2261
2262 Check(
2263 (Attrs.hasFnAttr("aarch64_new_zt0") + Attrs.hasFnAttr("aarch64_in_zt0") +
2264 Attrs.hasFnAttr("aarch64_inout_zt0") +
2265 Attrs.hasFnAttr("aarch64_out_zt0") +
2266 Attrs.hasFnAttr("aarch64_preserves_zt0")) <= 1,
2267 "Attributes 'aarch64_new_zt0', 'aarch64_in_zt0', 'aarch64_out_zt0', "
2268 "'aarch64_inout_zt0' and 'aarch64_preserves_zt0' are mutually exclusive",
2269 V);
2270
2271 if (Attrs.hasFnAttr(Attribute::JumpTable)) {
2272 const GlobalValue *GV = cast<GlobalValue>(V);
2274 "Attribute 'jumptable' requires 'unnamed_addr'", V);
2275 }
2276
2277 if (auto Args = Attrs.getFnAttrs().getAllocSizeArgs()) {
2278 auto CheckParam = [&](StringRef Name, unsigned ParamNo) {
2279 if (ParamNo >= FT->getNumParams()) {
2280 CheckFailed("'allocsize' " + Name + " argument is out of bounds", V);
2281 return false;
2282 }
2283
2284 if (!FT->getParamType(ParamNo)->isIntegerTy()) {
2285 CheckFailed("'allocsize' " + Name +
2286 " argument must refer to an integer parameter",
2287 V);
2288 return false;
2289 }
2290
2291 return true;
2292 };
2293
2294 if (!CheckParam("element size", Args->first))
2295 return;
2296
2297 if (Args->second && !CheckParam("number of elements", *Args->second))
2298 return;
2299 }
2300
2301 if (Attrs.hasFnAttr(Attribute::AllocKind)) {
2302 AllocFnKind K = Attrs.getAllocKind();
2305 if (!is_contained(
2307 Type))
2308 CheckFailed(
2309 "'allockind()' requires exactly one of alloc, realloc, and free");
2310 if ((Type == AllocFnKind::Free) &&
2313 CheckFailed("'allockind(\"free\")' doesn't allow uninitialized, zeroed, "
2314 "or aligned modifiers.");
2316 if ((K & ZeroedUninit) == ZeroedUninit)
2317 CheckFailed("'allockind()' can't be both zeroed and uninitialized");
2318 }
2319
2320 if (Attrs.hasFnAttr(Attribute::VScaleRange)) {
2321 unsigned VScaleMin = Attrs.getFnAttrs().getVScaleRangeMin();
2322 if (VScaleMin == 0)
2323 CheckFailed("'vscale_range' minimum must be greater than 0", V);
2324 else if (!isPowerOf2_32(VScaleMin))
2325 CheckFailed("'vscale_range' minimum must be power-of-two value", V);
2326 std::optional<unsigned> VScaleMax = Attrs.getFnAttrs().getVScaleRangeMax();
2327 if (VScaleMax && VScaleMin > VScaleMax)
2328 CheckFailed("'vscale_range' minimum cannot be greater than maximum", V);
2329 else if (VScaleMax && !isPowerOf2_32(*VScaleMax))
2330 CheckFailed("'vscale_range' maximum must be power-of-two value", V);
2331 }
2332
2333 if (Attrs.hasFnAttr("frame-pointer")) {
2334 StringRef FP = Attrs.getFnAttr("frame-pointer").getValueAsString();
2335 if (FP != "all" && FP != "non-leaf" && FP != "none" && FP != "reserved")
2336 CheckFailed("invalid value for 'frame-pointer' attribute: " + FP, V);
2337 }
2338
2339 // Check EVEX512 feature.
2340 if (MaxParameterWidth >= 512 && Attrs.hasFnAttr("target-features") &&
2341 TT.isX86()) {
2342 StringRef TF = Attrs.getFnAttr("target-features").getValueAsString();
2343 Check(!TF.contains("+avx512f") || !TF.contains("-evex512"),
2344 "512-bit vector arguments require 'evex512' for AVX512", V);
2345 }
2346
2347 checkUnsignedBaseTenFuncAttr(Attrs, "patchable-function-prefix", V);
2348 checkUnsignedBaseTenFuncAttr(Attrs, "patchable-function-entry", V);
2349 checkUnsignedBaseTenFuncAttr(Attrs, "warn-stack-size", V);
2350
2351 if (auto A = Attrs.getFnAttr("sign-return-address"); A.isValid()) {
2352 StringRef S = A.getValueAsString();
2353 if (S != "none" && S != "all" && S != "non-leaf")
2354 CheckFailed("invalid value for 'sign-return-address' attribute: " + S, V);
2355 }
2356
2357 if (auto A = Attrs.getFnAttr("sign-return-address-key"); A.isValid()) {
2358 StringRef S = A.getValueAsString();
2359 if (S != "a_key" && S != "b_key")
2360 CheckFailed("invalid value for 'sign-return-address-key' attribute: " + S,
2361 V);
2362 }
2363
2364 if (auto A = Attrs.getFnAttr("branch-target-enforcement"); A.isValid()) {
2365 StringRef S = A.getValueAsString();
2366 if (S != "true" && S != "false")
2367 CheckFailed(
2368 "invalid value for 'branch-target-enforcement' attribute: " + S, V);
2369 }
2370
2371 if (auto A = Attrs.getFnAttr("vector-function-abi-variant"); A.isValid()) {
2372 StringRef S = A.getValueAsString();
2373 const std::optional<VFInfo> Info = VFABI::tryDemangleForVFABI(S, FT);
2374 if (!Info)
2375 CheckFailed("invalid name for a VFABI variant: " + S, V);
2376 }
2377}
2378
2379void Verifier::verifyFunctionMetadata(
2380 ArrayRef<std::pair<unsigned, MDNode *>> MDs) {
2381 for (const auto &Pair : MDs) {
2382 if (Pair.first == LLVMContext::MD_prof) {
2383 MDNode *MD = Pair.second;
2384 Check(MD->getNumOperands() >= 2,
2385 "!prof annotations should have no less than 2 operands", MD);
2386
2387 // Check first operand.
2388 Check(MD->getOperand(0) != nullptr, "first operand should not be null",
2389 MD);
2390 Check(isa<MDString>(MD->getOperand(0)),
2391 "expected string with name of the !prof annotation", MD);
2392 MDString *MDS = cast<MDString>(MD->getOperand(0));
2393 StringRef ProfName = MDS->getString();
2394 Check(ProfName == "function_entry_count" ||
2395 ProfName == "synthetic_function_entry_count",
2396 "first operand should be 'function_entry_count'"
2397 " or 'synthetic_function_entry_count'",
2398 MD);
2399
2400 // Check second operand.
2401 Check(MD->getOperand(1) != nullptr, "second operand should not be null",
2402 MD);
2403 Check(isa<ConstantAsMetadata>(MD->getOperand(1)),
2404 "expected integer argument to function_entry_count", MD);
2405 } else if (Pair.first == LLVMContext::MD_kcfi_type) {
2406 MDNode *MD = Pair.second;
2407 Check(MD->getNumOperands() == 1,
2408 "!kcfi_type must have exactly one operand", MD);
2409 Check(MD->getOperand(0) != nullptr, "!kcfi_type operand must not be null",
2410 MD);
2411 Check(isa<ConstantAsMetadata>(MD->getOperand(0)),
2412 "expected a constant operand for !kcfi_type", MD);
2413 Constant *C = cast<ConstantAsMetadata>(MD->getOperand(0))->getValue();
2414 Check(isa<ConstantInt>(C) && isa<IntegerType>(C->getType()),
2415 "expected a constant integer operand for !kcfi_type", MD);
2416 Check(cast<ConstantInt>(C)->getBitWidth() == 32,
2417 "expected a 32-bit integer constant operand for !kcfi_type", MD);
2418 }
2419 }
2420}
2421
2422void Verifier::visitConstantExprsRecursively(const Constant *EntryC) {
2423 if (!ConstantExprVisited.insert(EntryC).second)
2424 return;
2425
2427 Stack.push_back(EntryC);
2428
2429 while (!Stack.empty()) {
2430 const Constant *C = Stack.pop_back_val();
2431
2432 // Check this constant expression.
2433 if (const auto *CE = dyn_cast<ConstantExpr>(C))
2434 visitConstantExpr(CE);
2435
2436 if (const auto *CPA = dyn_cast<ConstantPtrAuth>(C))
2437 visitConstantPtrAuth(CPA);
2438
2439 if (const auto *GV = dyn_cast<GlobalValue>(C)) {
2440 // Global Values get visited separately, but we do need to make sure
2441 // that the global value is in the correct module
2442 Check(GV->getParent() == &M, "Referencing global in another module!",
2443 EntryC, &M, GV, GV->getParent());
2444 continue;
2445 }
2446
2447 // Visit all sub-expressions.
2448 for (const Use &U : C->operands()) {
2449 const auto *OpC = dyn_cast<Constant>(U);
2450 if (!OpC)
2451 continue;
2452 if (!ConstantExprVisited.insert(OpC).second)
2453 continue;
2454 Stack.push_back(OpC);
2455 }
2456 }
2457}
2458
2459void Verifier::visitConstantExpr(const ConstantExpr *CE) {
2460 if (CE->getOpcode() == Instruction::BitCast)
2461 Check(CastInst::castIsValid(Instruction::BitCast, CE->getOperand(0),
2462 CE->getType()),
2463 "Invalid bitcast", CE);
2464}
2465
2466void Verifier::visitConstantPtrAuth(const ConstantPtrAuth *CPA) {
2467 Check(CPA->getPointer()->getType()->isPointerTy(),
2468 "signed ptrauth constant base pointer must have pointer type");
2469
2470 Check(CPA->getType() == CPA->getPointer()->getType(),
2471 "signed ptrauth constant must have same type as its base pointer");
2472
2473 Check(CPA->getKey()->getBitWidth() == 32,
2474 "signed ptrauth constant key must be i32 constant integer");
2475
2477 "signed ptrauth constant address discriminator must be a pointer");
2478
2479 Check(CPA->getDiscriminator()->getBitWidth() == 64,
2480 "signed ptrauth constant discriminator must be i64 constant integer");
2481}
2482
2483bool Verifier::verifyAttributeCount(AttributeList Attrs, unsigned Params) {
2484 // There shouldn't be more attribute sets than there are parameters plus the
2485 // function and return value.
2486 return Attrs.getNumAttrSets() <= Params + 2;
2487}
2488
2489void Verifier::verifyInlineAsmCall(const CallBase &Call) {
2490 const InlineAsm *IA = cast<InlineAsm>(Call.getCalledOperand());
2491 unsigned ArgNo = 0;
2492 unsigned LabelNo = 0;
2493 for (const InlineAsm::ConstraintInfo &CI : IA->ParseConstraints()) {
2494 if (CI.Type == InlineAsm::isLabel) {
2495 ++LabelNo;
2496 continue;
2497 }
2498
2499 // Only deal with constraints that correspond to call arguments.
2500 if (!CI.hasArg())
2501 continue;
2502
2503 if (CI.isIndirect) {
2504 const Value *Arg = Call.getArgOperand(ArgNo);
2505 Check(Arg->getType()->isPointerTy(),
2506 "Operand for indirect constraint must have pointer type", &Call);
2507
2508 Check(Call.getParamElementType(ArgNo),
2509 "Operand for indirect constraint must have elementtype attribute",
2510 &Call);
2511 } else {
2512 Check(!Call.paramHasAttr(ArgNo, Attribute::ElementType),
2513 "Elementtype attribute can only be applied for indirect "
2514 "constraints",
2515 &Call);
2516 }
2517
2518 ArgNo++;
2519 }
2520
2521 if (auto *CallBr = dyn_cast<CallBrInst>(&Call)) {
2522 Check(LabelNo == CallBr->getNumIndirectDests(),
2523 "Number of label constraints does not match number of callbr dests",
2524 &Call);
2525 } else {
2526 Check(LabelNo == 0, "Label constraints can only be used with callbr",
2527 &Call);
2528 }
2529}
2530
2531/// Verify that statepoint intrinsic is well formed.
2532void Verifier::verifyStatepoint(const CallBase &Call) {
2533 assert(Call.getCalledFunction() &&
2534 Call.getCalledFunction()->getIntrinsicID() ==
2535 Intrinsic::experimental_gc_statepoint);
2536
2537 Check(!Call.doesNotAccessMemory() && !Call.onlyReadsMemory() &&
2538 !Call.onlyAccessesArgMemory(),
2539 "gc.statepoint must read and write all memory to preserve "
2540 "reordering restrictions required by safepoint semantics",
2541 Call);
2542
2543 const int64_t NumPatchBytes =
2544 cast<ConstantInt>(Call.getArgOperand(1))->getSExtValue();
2545 assert(isInt<32>(NumPatchBytes) && "NumPatchBytesV is an i32!");
2546 Check(NumPatchBytes >= 0,
2547 "gc.statepoint number of patchable bytes must be "
2548 "positive",
2549 Call);
2550
2551 Type *TargetElemType = Call.getParamElementType(2);
2552 Check(TargetElemType,
2553 "gc.statepoint callee argument must have elementtype attribute", Call);
2554 FunctionType *TargetFuncType = dyn_cast<FunctionType>(TargetElemType);
2555 Check(TargetFuncType,
2556 "gc.statepoint callee elementtype must be function type", Call);
2557
2558 const int NumCallArgs = cast<ConstantInt>(Call.getArgOperand(3))->getZExtValue();
2559 Check(NumCallArgs >= 0,
2560 "gc.statepoint number of arguments to underlying call "
2561 "must be positive",
2562 Call);
2563 const int NumParams = (int)TargetFuncType->getNumParams();
2564 if (TargetFuncType->isVarArg()) {
2565 Check(NumCallArgs >= NumParams,
2566 "gc.statepoint mismatch in number of vararg call args", Call);
2567
2568 // TODO: Remove this limitation
2569 Check(TargetFuncType->getReturnType()->isVoidTy(),
2570 "gc.statepoint doesn't support wrapping non-void "
2571 "vararg functions yet",
2572 Call);
2573 } else
2574 Check(NumCallArgs == NumParams,
2575 "gc.statepoint mismatch in number of call args", Call);
2576
2577 const uint64_t Flags
2578 = cast<ConstantInt>(Call.getArgOperand(4))->getZExtValue();
2579 Check((Flags & ~(uint64_t)StatepointFlags::MaskAll) == 0,
2580 "unknown flag used in gc.statepoint flags argument", Call);
2581
2582 // Verify that the types of the call parameter arguments match
2583 // the type of the wrapped callee.
2584 AttributeList Attrs = Call.getAttributes();
2585 for (int i = 0; i < NumParams; i++) {
2586 Type *ParamType = TargetFuncType->getParamType(i);
2587 Type *ArgType = Call.getArgOperand(5 + i)->getType();
2588 Check(ArgType == ParamType,
2589 "gc.statepoint call argument does not match wrapped "
2590 "function type",
2591 Call);
2592
2593 if (TargetFuncType->isVarArg()) {
2594 AttributeSet ArgAttrs = Attrs.getParamAttrs(5 + i);
2595 Check(!ArgAttrs.hasAttribute(Attribute::StructRet),
2596 "Attribute 'sret' cannot be used for vararg call arguments!", Call);
2597 }
2598 }
2599
2600 const int EndCallArgsInx = 4 + NumCallArgs;
2601
2602 const Value *NumTransitionArgsV = Call.getArgOperand(EndCallArgsInx + 1);
2603 Check(isa<ConstantInt>(NumTransitionArgsV),
2604 "gc.statepoint number of transition arguments "
2605 "must be constant integer",
2606 Call);
2607 const int NumTransitionArgs =
2608 cast<ConstantInt>(NumTransitionArgsV)->getZExtValue();
2609 Check(NumTransitionArgs == 0,
2610 "gc.statepoint w/inline transition bundle is deprecated", Call);
2611 const int EndTransitionArgsInx = EndCallArgsInx + 1 + NumTransitionArgs;
2612
2613 const Value *NumDeoptArgsV = Call.getArgOperand(EndTransitionArgsInx + 1);
2614 Check(isa<ConstantInt>(NumDeoptArgsV),
2615 "gc.statepoint number of deoptimization arguments "
2616 "must be constant integer",
2617 Call);
2618 const int NumDeoptArgs = cast<ConstantInt>(NumDeoptArgsV)->getZExtValue();
2619 Check(NumDeoptArgs == 0,
2620 "gc.statepoint w/inline deopt operands is deprecated", Call);
2621
2622 const int ExpectedNumArgs = 7 + NumCallArgs;
2623 Check(ExpectedNumArgs == (int)Call.arg_size(),
2624 "gc.statepoint too many arguments", Call);
2625
2626 // Check that the only uses of this gc.statepoint are gc.result or
2627 // gc.relocate calls which are tied to this statepoint and thus part
2628 // of the same statepoint sequence
2629 for (const User *U : Call.users()) {
2630 const CallInst *UserCall = dyn_cast<const CallInst>(U);
2631 Check(UserCall, "illegal use of statepoint token", Call, U);
2632 if (!UserCall)
2633 continue;
2634 Check(isa<GCRelocateInst>(UserCall) || isa<GCResultInst>(UserCall),
2635 "gc.result or gc.relocate are the only value uses "
2636 "of a gc.statepoint",
2637 Call, U);
2638 if (isa<GCResultInst>(UserCall)) {
2639 Check(UserCall->getArgOperand(0) == &Call,
2640 "gc.result connected to wrong gc.statepoint", Call, UserCall);
2641 } else if (isa<GCRelocateInst>(Call)) {
2642 Check(UserCall->getArgOperand(0) == &Call,
2643 "gc.relocate connected to wrong gc.statepoint", Call, UserCall);
2644 }
2645 }
2646
2647 // Note: It is legal for a single derived pointer to be listed multiple
2648 // times. It's non-optimal, but it is legal. It can also happen after
2649 // insertion if we strip a bitcast away.
2650 // Note: It is really tempting to check that each base is relocated and
2651 // that a derived pointer is never reused as a base pointer. This turns
2652 // out to be problematic since optimizations run after safepoint insertion
2653 // can recognize equality properties that the insertion logic doesn't know
2654 // about. See example statepoint.ll in the verifier subdirectory
2655}
2656
2657void Verifier::verifyFrameRecoverIndices() {
2658 for (auto &Counts : FrameEscapeInfo) {
2659 Function *F = Counts.first;
2660 unsigned EscapedObjectCount = Counts.second.first;
2661 unsigned MaxRecoveredIndex = Counts.second.second;
2662 Check(MaxRecoveredIndex <= EscapedObjectCount,
2663 "all indices passed to llvm.localrecover must be less than the "
2664 "number of arguments passed to llvm.localescape in the parent "
2665 "function",
2666 F);
2667 }
2668}
2669
2670static Instruction *getSuccPad(Instruction *Terminator) {
2671 BasicBlock *UnwindDest;
2672 if (auto *II = dyn_cast<InvokeInst>(Terminator))
2673 UnwindDest = II->getUnwindDest();
2674 else if (auto *CSI = dyn_cast<CatchSwitchInst>(Terminator))
2675 UnwindDest = CSI->getUnwindDest();
2676 else
2677 UnwindDest = cast<CleanupReturnInst>(Terminator)->getUnwindDest();
2678 return UnwindDest->getFirstNonPHI();
2679}
2680
2681void Verifier::verifySiblingFuncletUnwinds() {
2684 for (const auto &Pair : SiblingFuncletInfo) {
2685 Instruction *PredPad = Pair.first;
2686 if (Visited.count(PredPad))
2687 continue;
2688 Active.insert(PredPad);
2689 Instruction *Terminator = Pair.second;
2690 do {
2691 Instruction *SuccPad = getSuccPad(Terminator);
2692 if (Active.count(SuccPad)) {
2693 // Found a cycle; report error
2694 Instruction *CyclePad = SuccPad;
2696 do {
2697 CycleNodes.push_back(CyclePad);
2698 Instruction *CycleTerminator = SiblingFuncletInfo[CyclePad];
2699 if (CycleTerminator != CyclePad)
2700 CycleNodes.push_back(CycleTerminator);
2701 CyclePad = getSuccPad(CycleTerminator);
2702 } while (CyclePad != SuccPad);
2703 Check(false, "EH pads can't handle each other's exceptions",
2704 ArrayRef<Instruction *>(CycleNodes));
2705 }
2706 // Don't re-walk a node we've already checked
2707 if (!Visited.insert(SuccPad).second)
2708 break;
2709 // Walk to this successor if it has a map entry.
2710 PredPad = SuccPad;
2711 auto TermI = SiblingFuncletInfo.find(PredPad);
2712 if (TermI == SiblingFuncletInfo.end())
2713 break;
2714 Terminator = TermI->second;
2715 Active.insert(PredPad);
2716 } while (true);
2717 // Each node only has one successor, so we've walked all the active
2718 // nodes' successors.
2719 Active.clear();
2720 }
2721}
2722
2723// visitFunction - Verify that a function is ok.
2724//
2725void Verifier::visitFunction(const Function &F) {
2726 visitGlobalValue(F);
2727
2728 // Check function arguments.
2729 FunctionType *FT = F.getFunctionType();
2730 unsigned NumArgs = F.arg_size();
2731
2732 Check(&Context == &F.getContext(),
2733 "Function context does not match Module context!", &F);
2734
2735 Check(!F.hasCommonLinkage(), "Functions may not have common linkage", &F);
2736 Check(FT->getNumParams() == NumArgs,
2737 "# formal arguments must match # of arguments for function type!", &F,
2738 FT);
2739 Check(F.getReturnType()->isFirstClassType() ||
2740 F.getReturnType()->isVoidTy() || F.getReturnType()->isStructTy(),
2741 "Functions cannot return aggregate values!", &F);
2742
2743 Check(!F.hasStructRetAttr() || F.getReturnType()->isVoidTy(),
2744 "Invalid struct return type!", &F);
2745
2746 AttributeList Attrs = F.getAttributes();
2747
2748 Check(verifyAttributeCount(Attrs, FT->getNumParams()),
2749 "Attribute after last parameter!", &F);
2750
2751 CheckDI(F.IsNewDbgInfoFormat == F.getParent()->IsNewDbgInfoFormat,
2752 "Function debug format should match parent module", &F,
2753 F.IsNewDbgInfoFormat, F.getParent(),
2754 F.getParent()->IsNewDbgInfoFormat);
2755
2756 bool IsIntrinsic = F.isIntrinsic();
2757
2758 // Check function attributes.
2759 verifyFunctionAttrs(FT, Attrs, &F, IsIntrinsic, /* IsInlineAsm */ false);
2760
2761 // On function declarations/definitions, we do not support the builtin
2762 // attribute. We do not check this in VerifyFunctionAttrs since that is
2763 // checking for Attributes that can/can not ever be on functions.
2764 Check(!Attrs.hasFnAttr(Attribute::Builtin),
2765 "Attribute 'builtin' can only be applied to a callsite.", &F);
2766
2767 Check(!Attrs.hasAttrSomewhere(Attribute::ElementType),
2768 "Attribute 'elementtype' can only be applied to a callsite.", &F);
2769
2770 // Check that this function meets the restrictions on this calling convention.
2771 // Sometimes varargs is used for perfectly forwarding thunks, so some of these
2772 // restrictions can be lifted.
2773 switch (F.getCallingConv()) {
2774 default:
2775 case CallingConv::C:
2776 break;
2777 case CallingConv::X86_INTR: {
2778 Check(F.arg_empty() || Attrs.hasParamAttr(0, Attribute::ByVal),
2779 "Calling convention parameter requires byval", &F);
2780 break;
2781 }
2786 Check(F.getReturnType()->isVoidTy(),
2787 "Calling convention requires void return type", &F);
2788 [[fallthrough]];
2794 Check(!F.hasStructRetAttr(), "Calling convention does not allow sret", &F);
2795 if (F.getCallingConv() != CallingConv::SPIR_KERNEL) {
2796 const unsigned StackAS = DL.getAllocaAddrSpace();
2797 unsigned i = 0;
2798 for (const Argument &Arg : F.args()) {
2799 Check(!Attrs.hasParamAttr(i, Attribute::ByVal),
2800 "Calling convention disallows byval", &F);
2801 Check(!Attrs.hasParamAttr(i, Attribute::Preallocated),
2802 "Calling convention disallows preallocated", &F);
2803 Check(!Attrs.hasParamAttr(i, Attribute::InAlloca),
2804 "Calling convention disallows inalloca", &F);
2805
2806 if (Attrs.hasParamAttr(i, Attribute::ByRef)) {
2807 // FIXME: Should also disallow LDS and GDS, but we don't have the enum
2808 // value here.
2809 Check(Arg.getType()->getPointerAddressSpace() != StackAS,
2810 "Calling convention disallows stack byref", &F);
2811 }
2812
2813 ++i;
2814 }
2815 }
2816
2817 [[fallthrough]];
2818 case CallingConv::Fast:
2819 case CallingConv::Cold:
2823 Check(!F.isVarArg(),
2824 "Calling convention does not support varargs or "
2825 "perfect forwarding!",
2826 &F);
2827 break;
2828 }
2829
2830 // Check that the argument values match the function type for this function...
2831 unsigned i = 0;
2832 for (const Argument &Arg : F.args()) {
2833 Check(Arg.getType() == FT->getParamType(i),
2834 "Argument value does not match function argument type!", &Arg,
2835 FT->getParamType(i));
2836 Check(Arg.getType()->isFirstClassType(),
2837 "Function arguments must have first-class types!", &Arg);
2838 if (!IsIntrinsic) {
2839 Check(!Arg.getType()->isMetadataTy(),
2840 "Function takes metadata but isn't an intrinsic", &Arg, &F);
2841 Check(!Arg.getType()->isTokenTy(),
2842 "Function takes token but isn't an intrinsic", &Arg, &F);
2843 Check(!Arg.getType()->isX86_AMXTy(),
2844 "Function takes x86_amx but isn't an intrinsic", &Arg, &F);
2845 }
2846
2847 // Check that swifterror argument is only used by loads and stores.
2848 if (Attrs.hasParamAttr(i, Attribute::SwiftError)) {
2849 verifySwiftErrorValue(&Arg);
2850 }
2851 ++i;
2852 }
2853
2854 if (!IsIntrinsic) {
2855 Check(!F.getReturnType()->isTokenTy(),
2856 "Function returns a token but isn't an intrinsic", &F);
2857 Check(!F.getReturnType()->isX86_AMXTy(),
2858 "Function returns a x86_amx but isn't an intrinsic", &F);
2859 }
2860
2861 // Get the function metadata attachments.
2863 F.getAllMetadata(MDs);
2864 assert(F.hasMetadata() != MDs.empty() && "Bit out-of-sync");
2865 verifyFunctionMetadata(MDs);
2866
2867 // Check validity of the personality function
2868 if (F.hasPersonalityFn()) {
2869 auto *Per = dyn_cast<Function>(F.getPersonalityFn()->stripPointerCasts());
2870 if (Per)
2871 Check(Per->getParent() == F.getParent(),
2872 "Referencing personality function in another module!", &F,
2873 F.getParent(), Per, Per->getParent());
2874 }
2875
2876 // EH funclet coloring can be expensive, recompute on-demand
2877 BlockEHFuncletColors.clear();
2878
2879 if (F.isMaterializable()) {
2880 // Function has a body somewhere we can't see.
2881 Check(MDs.empty(), "unmaterialized function cannot have metadata", &F,
2882 MDs.empty() ? nullptr : MDs.front().second);
2883 } else if (F.isDeclaration()) {
2884 for (const auto &I : MDs) {
2885 // This is used for call site debug information.
2886 CheckDI(I.first != LLVMContext::MD_dbg ||
2887 !cast<DISubprogram>(I.second)->isDistinct(),
2888 "function declaration may only have a unique !dbg attachment",
2889 &F);
2890 Check(I.first != LLVMContext::MD_prof,
2891 "function declaration may not have a !prof attachment", &F);
2892
2893 // Verify the metadata itself.
2894 visitMDNode(*I.second, AreDebugLocsAllowed::Yes);
2895 }
2896 Check(!F.hasPersonalityFn(),
2897 "Function declaration shouldn't have a personality routine", &F);
2898 } else {
2899 // Verify that this function (which has a body) is not named "llvm.*". It
2900 // is not legal to define intrinsics.
2901 Check(!IsIntrinsic, "llvm intrinsics cannot be defined!", &F);
2902
2903 // Check the entry node
2904 const BasicBlock *Entry = &F.getEntryBlock();
2905 Check(pred_empty(Entry),
2906 "Entry block to function must not have predecessors!", Entry);
2907
2908 // The address of the entry block cannot be taken, unless it is dead.
2909 if (Entry->hasAddressTaken()) {
2910 Check(!BlockAddress::lookup(Entry)->isConstantUsed(),
2911 "blockaddress may not be used with the entry block!", Entry);
2912 }
2913
2914 unsigned NumDebugAttachments = 0, NumProfAttachments = 0,
2915 NumKCFIAttachments = 0;
2916 // Visit metadata attachments.
2917 for (const auto &I : MDs) {
2918 // Verify that the attachment is legal.
2919 auto AllowLocs = AreDebugLocsAllowed::No;
2920 switch (I.first) {
2921 default:
2922 break;
2923 case LLVMContext::MD_dbg: {
2924 ++NumDebugAttachments;
2925 CheckDI(NumDebugAttachments == 1,
2926 "function must have a single !dbg attachment", &F, I.second);
2927 CheckDI(isa<DISubprogram>(I.second),
2928 "function !dbg attachment must be a subprogram", &F, I.second);
2929 CheckDI(cast<DISubprogram>(I.second)->isDistinct(),
2930 "function definition may only have a distinct !dbg attachment",
2931 &F);
2932
2933 auto *SP = cast<DISubprogram>(I.second);
2934 const Function *&AttachedTo = DISubprogramAttachments[SP];
2935 CheckDI(!AttachedTo || AttachedTo == &F,
2936 "DISubprogram attached to more than one function", SP, &F);
2937 AttachedTo = &F;
2938 AllowLocs = AreDebugLocsAllowed::Yes;
2939 break;
2940 }
2941 case LLVMContext::MD_prof:
2942 ++NumProfAttachments;
2943 Check(NumProfAttachments == 1,
2944 "function must have a single !prof attachment", &F, I.second);
2945 break;
2946 case LLVMContext::MD_kcfi_type:
2947 ++NumKCFIAttachments;
2948 Check(NumKCFIAttachments == 1,
2949 "function must have a single !kcfi_type attachment", &F,
2950 I.second);
2951 break;
2952 }
2953
2954 // Verify the metadata itself.
2955 visitMDNode(*I.second, AllowLocs);
2956 }
2957 }
2958
2959 // If this function is actually an intrinsic, verify that it is only used in
2960 // direct call/invokes, never having its "address taken".
2961 // Only do this if the module is materialized, otherwise we don't have all the
2962 // uses.
2963 if (F.isIntrinsic() && F.getParent()->isMaterialized()) {
2964 const User *U;
2965 if (F.hasAddressTaken(&U, false, true, false,
2966 /*IgnoreARCAttachedCall=*/true))
2967 Check(false, "Invalid user of intrinsic instruction!", U);
2968 }
2969
2970 // Check intrinsics' signatures.
2971 switch (F.getIntrinsicID()) {
2972 case Intrinsic::experimental_gc_get_pointer_base: {
2973 FunctionType *FT = F.getFunctionType();
2974 Check(FT->getNumParams() == 1, "wrong number of parameters", F);
2975 Check(isa<PointerType>(F.getReturnType()),
2976 "gc.get.pointer.base must return a pointer", F);
2977 Check(FT->getParamType(0) == F.getReturnType(),
2978 "gc.get.pointer.base operand and result must be of the same type", F);
2979 break;
2980 }
2981 case Intrinsic::experimental_gc_get_pointer_offset: {
2982 FunctionType *FT = F.getFunctionType();
2983 Check(FT->getNumParams() == 1, "wrong number of parameters", F);
2984 Check(isa<PointerType>(FT->getParamType(0)),
2985 "gc.get.pointer.offset operand must be a pointer", F);
2986 Check(F.getReturnType()->isIntegerTy(),
2987 "gc.get.pointer.offset must return integer", F);
2988 break;
2989 }
2990 }
2991
2992 auto *N = F.getSubprogram();
2993 HasDebugInfo = (N != nullptr);
2994 if (!HasDebugInfo)
2995 return;
2996
2997 // Check that all !dbg attachments lead to back to N.
2998 //
2999 // FIXME: Check this incrementally while visiting !dbg attachments.
3000 // FIXME: Only check when N is the canonical subprogram for F.
3002 auto VisitDebugLoc = [&](const Instruction &I, const MDNode *Node) {
3003 // Be careful about using DILocation here since we might be dealing with
3004 // broken code (this is the Verifier after all).
3005 const DILocation *DL = dyn_cast_or_null<DILocation>(Node);
3006 if (!DL)
3007 return;
3008 if (!Seen.insert(DL).second)
3009 return;
3010
3011 Metadata *Parent = DL->getRawScope();
3012 CheckDI(Parent && isa<DILocalScope>(Parent),
3013 "DILocation's scope must be a DILocalScope", N, &F, &I, DL, Parent);
3014
3015 DILocalScope *Scope = DL->getInlinedAtScope();
3016 Check(Scope, "Failed to find DILocalScope", DL);
3017
3018 if (!Seen.insert(Scope).second)
3019 return;
3020
3021 DISubprogram *SP = Scope->getSubprogram();
3022
3023 // Scope and SP could be the same MDNode and we don't want to skip
3024 // validation in that case
3025 if (SP && ((Scope != SP) && !Seen.insert(SP).second))
3026 return;
3027
3028 CheckDI(SP->describes(&F),
3029 "!dbg attachment points at wrong subprogram for function", N, &F,
3030 &I, DL, Scope, SP);
3031 };
3032 for (auto &BB : F)
3033 for (auto &I : BB) {
3034 VisitDebugLoc(I, I.getDebugLoc().getAsMDNode());
3035 // The llvm.loop annotations also contain two DILocations.
3036 if (auto MD = I.getMetadata(LLVMContext::MD_loop))
3037 for (unsigned i = 1; i < MD->getNumOperands(); ++i)
3038 VisitDebugLoc(I, dyn_cast_or_null<MDNode>(MD->getOperand(i)));
3039 if (BrokenDebugInfo)
3040 return;
3041 }
3042}
3043
3044// verifyBasicBlock - Verify that a basic block is well formed...
3045//
3046void Verifier::visitBasicBlock(BasicBlock &BB) {
3047 InstsInThisBlock.clear();
3048 ConvergenceVerifyHelper.visit(BB);
3049
3050 // Ensure that basic blocks have terminators!
3051 Check(BB.getTerminator(), "Basic Block does not have terminator!", &BB);
3052
3053 // Check constraints that this basic block imposes on all of the PHI nodes in
3054 // it.
3055 if (isa<PHINode>(BB.front())) {
3058 llvm::sort(Preds);
3059 for (const PHINode &PN : BB.phis()) {
3060 Check(PN.getNumIncomingValues() == Preds.size(),
3061 "PHINode should have one entry for each predecessor of its "
3062 "parent basic block!",
3063 &PN);
3064
3065 // Get and sort all incoming values in the PHI node...
3066 Values.clear();
3067 Values.reserve(PN.getNumIncomingValues());
3068 for (unsigned i = 0, e = PN.getNumIncomingValues(); i != e; ++i)
3069 Values.push_back(
3070 std::make_pair(PN.getIncomingBlock(i), PN.getIncomingValue(i)));
3071 llvm::sort(Values);
3072
3073 for (unsigned i = 0, e = Values.size(); i != e; ++i) {
3074 // Check to make sure that if there is more than one entry for a
3075 // particular basic block in this PHI node, that the incoming values are
3076 // all identical.
3077 //
3078 Check(i == 0 || Values[i].first != Values[i - 1].first ||
3079 Values[i].second == Values[i - 1].second,
3080 "PHI node has multiple entries for the same basic block with "
3081 "different incoming values!",
3082 &PN, Values[i].first, Values[i].second, Values[i - 1].second);
3083
3084 // Check to make sure that the predecessors and PHI node entries are
3085 // matched up.
3086 Check(Values[i].first == Preds[i],
3087 "PHI node entries do not match predecessors!", &PN,
3088 Values[i].first, Preds[i]);
3089 }
3090 }
3091 }
3092
3093 // Check that all instructions have their parent pointers set up correctly.
3094 for (auto &I : BB)
3095 {
3096 Check(I.getParent() == &BB, "Instruction has bogus parent pointer!");
3097 }
3098
3099 CheckDI(BB.IsNewDbgInfoFormat == BB.getParent()->IsNewDbgInfoFormat,
3100 "BB debug format should match parent function", &BB,
3101 BB.IsNewDbgInfoFormat, BB.getParent(),
3102 BB.getParent()->IsNewDbgInfoFormat);
3103
3104 // Confirm that no issues arise from the debug program.
3105 if (BB.IsNewDbgInfoFormat)
3106 CheckDI(!BB.getTrailingDbgRecords(), "Basic Block has trailing DbgRecords!",
3107 &BB);
3108}
3109
3110void Verifier::visitTerminator(Instruction &I) {
3111 // Ensure that terminators only exist at the end of the basic block.
3112 Check(&I == I.getParent()->getTerminator(),
3113 "Terminator found in the middle of a basic block!", I.getParent());
3115}
3116
3117void Verifier::visitBranchInst(BranchInst &BI) {
3118 if (BI.isConditional()) {
3120 "Branch condition is not 'i1' type!", &BI, BI.getCondition());
3121 }
3122 visitTerminator(BI);
3123}
3124
3125void Verifier::visitReturnInst(ReturnInst &RI) {
3126 Function *F = RI.getParent()->getParent();
3127 unsigned N = RI.getNumOperands();
3128 if (F->getReturnType()->isVoidTy())
3129 Check(N == 0,
3130 "Found return instr that returns non-void in Function of void "
3131 "return type!",
3132 &RI, F->getReturnType());
3133 else
3134 Check(N == 1 && F->getReturnType() == RI.getOperand(0)->getType(),
3135 "Function return type does not match operand "
3136 "type of return inst!",
3137 &RI, F->getReturnType());
3138
3139 // Check to make sure that the return value has necessary properties for
3140 // terminators...
3141 visitTerminator(RI);
3142}
3143
3144void Verifier::visitSwitchInst(SwitchInst &SI) {
3145 Check(SI.getType()->isVoidTy(), "Switch must have void result type!", &SI);
3146 // Check to make sure that all of the constants in the switch instruction
3147 // have the same type as the switched-on value.
3148 Type *SwitchTy = SI.getCondition()->getType();
3150 for (auto &Case : SI.cases()) {
3151 Check(isa<ConstantInt>(SI.getOperand(Case.getCaseIndex() * 2 + 2)),
3152 "Case value is not a constant integer.", &SI);
3153 Check(Case.getCaseValue()->getType() == SwitchTy,
3154 "Switch constants must all be same type as switch value!", &SI);
3155 Check(Constants.insert(Case.getCaseValue()).second,
3156 "Duplicate integer as switch case", &SI, Case.getCaseValue());
3157 }
3158
3159 visitTerminator(SI);
3160}
3161
3162void Verifier::visitIndirectBrInst(IndirectBrInst &BI) {
3164 "Indirectbr operand must have pointer type!", &BI);
3165 for (unsigned i = 0, e = BI.getNumDestinations(); i != e; ++i)
3167 "Indirectbr destinations must all have pointer type!", &BI);
3168
3169 visitTerminator(BI);
3170}
3171
3172void Verifier::visitCallBrInst(CallBrInst &CBI) {
3173 Check(CBI.isInlineAsm(), "Callbr is currently only used for asm-goto!", &CBI);
3174 const InlineAsm *IA = cast<InlineAsm>(CBI.getCalledOperand());
3175 Check(!IA->canThrow(), "Unwinding from Callbr is not allowed");
3176
3177 verifyInlineAsmCall(CBI);
3178 visitTerminator(CBI);
3179}
3180
3181void Verifier::visitSelectInst(SelectInst &SI) {
3182 Check(!SelectInst::areInvalidOperands(SI.getOperand(0), SI.getOperand(1),
3183 SI.getOperand(2)),
3184 "Invalid operands for select instruction!", &SI);
3185
3186 Check(SI.getTrueValue()->getType() == SI.getType(),
3187 "Select values must have same type as select instruction!", &SI);
3188 visitInstruction(SI);
3189}
3190
3191/// visitUserOp1 - User defined operators shouldn't live beyond the lifetime of
3192/// a pass, if any exist, it's an error.
3193///
3194void Verifier::visitUserOp1(Instruction &I) {
3195 Check(false, "User-defined operators should not live outside of a pass!", &I);
3196}
3197
3198void Verifier::visitTruncInst(TruncInst &I) {
3199 // Get the source and destination types
3200 Type *SrcTy = I.getOperand(0)->getType();
3201 Type *DestTy = I.getType();
3202
3203 // Get the size of the types in bits, we'll need this later
3204 unsigned SrcBitSize = SrcTy->getScalarSizeInBits();
3205 unsigned DestBitSize = DestTy->getScalarSizeInBits();
3206
3207 Check(SrcTy->isIntOrIntVectorTy(), "Trunc only operates on integer", &I);
3208 Check(DestTy->isIntOrIntVectorTy(), "Trunc only produces integer", &I);
3209 Check(SrcTy->isVectorTy() == DestTy->isVectorTy(),
3210 "trunc source and destination must both be a vector or neither", &I);
3211 Check(SrcBitSize > DestBitSize, "DestTy too big for Trunc", &I);
3212
3214}
3215
3216void Verifier::visitZExtInst(ZExtInst &I) {
3217 // Get the source and destination types
3218 Type *SrcTy = I.getOperand(0)->getType();
3219 Type *DestTy = I.getType();
3220
3221 // Get the size of the types in bits, we'll need this later
3222 Check(SrcTy->isIntOrIntVectorTy(), "ZExt only operates on integer", &I);
3223 Check(DestTy->isIntOrIntVectorTy(), "ZExt only produces an integer", &I);
3224 Check(SrcTy->isVectorTy() == DestTy->isVectorTy(),
3225 "zext source and destination must both be a vector or neither", &I);
3226 unsigned SrcBitSize = SrcTy->getScalarSizeInBits();
3227 unsigned DestBitSize = DestTy->getScalarSizeInBits();
3228
3229 Check(SrcBitSize < DestBitSize, "Type too small for ZExt", &I);
3230
3232}
3233
3234void Verifier::visitSExtInst(SExtInst &I) {
3235 // Get the source and destination types
3236 Type *SrcTy = I.getOperand(0)->getType();
3237 Type *DestTy = I.getType();
3238
3239 // Get the size of the types in bits, we'll need this later
3240 unsigned SrcBitSize = SrcTy->getScalarSizeInBits();
3241 unsigned DestBitSize = DestTy->getScalarSizeInBits();
3242
3243 Check(SrcTy->isIntOrIntVectorTy(), "SExt only operates on integer", &I);
3244 Check(DestTy->isIntOrIntVectorTy(), "SExt only produces an integer", &I);
3245 Check(SrcTy->isVectorTy() == DestTy->isVectorTy(),
3246 "sext source and destination must both be a vector or neither", &I);
3247 Check(SrcBitSize < DestBitSize, "Type too small for SExt", &I);
3248
3250}
3251
3252void Verifier::visitFPTruncInst(FPTruncInst &I) {
3253 // Get the source and destination types
3254 Type *SrcTy = I.getOperand(0)->getType();
3255 Type *DestTy = I.getType();
3256 // Get the size of the types in bits, we'll need this later
3257 unsigned SrcBitSize = SrcTy->getScalarSizeInBits();
3258 unsigned DestBitSize = DestTy->getScalarSizeInBits();
3259
3260 Check(SrcTy->isFPOrFPVectorTy(), "FPTrunc only operates on FP", &I);
3261 Check(DestTy->isFPOrFPVectorTy(), "FPTrunc only produces an FP", &I);
3262 Check(SrcTy->isVectorTy() == DestTy->isVectorTy(),
3263 "fptrunc source and destination must both be a vector or neither", &I);
3264 Check(SrcBitSize > DestBitSize, "DestTy too big for FPTrunc", &I);
3265
3267}
3268
3269void Verifier::visitFPExtInst(FPExtInst &I) {
3270 // Get the source and destination types
3271 Type *SrcTy = I.getOperand(0)->getType();
3272 Type *DestTy = I.getType();
3273
3274 // Get the size of the types in bits, we'll need this later
3275 unsigned SrcBitSize = SrcTy->getScalarSizeInBits();
3276 unsigned DestBitSize = DestTy->getScalarSizeInBits();
3277
3278 Check(SrcTy->isFPOrFPVectorTy(), "FPExt only operates on FP", &I);
3279 Check(DestTy->isFPOrFPVectorTy(), "FPExt only produces an FP", &I);
3280 Check(SrcTy->isVectorTy() == DestTy->isVectorTy(),
3281 "fpext source and destination must both be a vector or neither", &I);
3282 Check(SrcBitSize < DestBitSize, "DestTy too small for FPExt", &I);
3283
3285}
3286
3287void Verifier::visitUIToFPInst(UIToFPInst &I) {
3288 // Get the source and destination types
3289 Type *SrcTy = I.getOperand(0)->getType();
3290 Type *DestTy = I.getType();
3291
3292 bool SrcVec = SrcTy->isVectorTy();
3293 bool DstVec = DestTy->isVectorTy();
3294
3295 Check(SrcVec == DstVec,
3296 "UIToFP source and dest must both be vector or scalar", &I);
3297 Check(SrcTy->isIntOrIntVectorTy(),
3298 "UIToFP source must be integer or integer vector", &I);
3299 Check(DestTy->isFPOrFPVectorTy(), "UIToFP result must be FP or FP vector",
3300 &I);
3301
3302 if (SrcVec && DstVec)
3303 Check(cast<VectorType>(SrcTy)->getElementCount() ==
3304 cast<VectorType>(DestTy)->getElementCount(),
3305 "UIToFP source and dest vector length mismatch", &I);
3306
3308}
3309
3310void Verifier::visitSIToFPInst(SIToFPInst &I) {
3311 // Get the source and destination types
3312 Type *SrcTy = I.getOperand(0)->getType();
3313 Type *DestTy = I.getType();
3314
3315 bool SrcVec = SrcTy->isVectorTy();
3316 bool DstVec = DestTy->isVectorTy();
3317
3318 Check(SrcVec == DstVec,
3319 "SIToFP source and dest must both be vector or scalar", &I);
3320 Check(SrcTy->isIntOrIntVectorTy(),
3321 "SIToFP source must be integer or integer vector", &I);
3322 Check(DestTy->isFPOrFPVectorTy(), "SIToFP result must be FP or FP vector",
3323 &I);
3324
3325 if (SrcVec && DstVec)
3326 Check(cast<VectorType>(SrcTy)->getElementCount() ==
3327 cast<VectorType>(DestTy)->getElementCount(),
3328 "SIToFP source and dest vector length mismatch", &I);
3329
3331}
3332
3333void Verifier::visitFPToUIInst(FPToUIInst &I) {
3334 // Get the source and destination types
3335 Type *SrcTy = I.getOperand(0)->getType();
3336 Type *DestTy = I.getType();
3337
3338 bool SrcVec = SrcTy->isVectorTy();
3339 bool DstVec = DestTy->isVectorTy();
3340
3341 Check(SrcVec == DstVec,
3342 "FPToUI source and dest must both be vector or scalar", &I);
3343 Check(SrcTy->isFPOrFPVectorTy(), "FPToUI source must be FP or FP vector", &I);
3344 Check(DestTy->isIntOrIntVectorTy(),
3345 "FPToUI result must be integer or integer vector", &I);
3346
3347 if (SrcVec && DstVec)
3348 Check(cast<VectorType>(SrcTy)->getElementCount() ==
3349 cast<VectorType>(DestTy)->getElementCount(),
3350 "FPToUI source and dest vector length mismatch", &I);
3351
3353}
3354
3355void Verifier::visitFPToSIInst(FPToSIInst &I) {
3356 // Get the source and destination types
3357 Type *SrcTy = I.getOperand(0)->getType();
3358 Type *DestTy = I.getType();
3359
3360 bool SrcVec = SrcTy->isVectorTy();
3361 bool DstVec = DestTy->isVectorTy();
3362
3363 Check(SrcVec == DstVec,
3364 "FPToSI source and dest must both be vector or scalar", &I);
3365 Check(SrcTy->isFPOrFPVectorTy(), "FPToSI source must be FP or FP vector", &I);
3366 Check(DestTy->isIntOrIntVectorTy(),
3367 "FPToSI result must be integer or integer vector", &I);
3368
3369 if (SrcVec && DstVec)
3370 Check(cast<VectorType>(SrcTy)->getElementCount() ==
3371 cast<VectorType>(DestTy)->getElementCount(),
3372 "FPToSI source and dest vector length mismatch", &I);
3373
3375}
3376
3377void Verifier::visitPtrToIntInst(PtrToIntInst &I) {
3378 // Get the source and destination types
3379 Type *SrcTy = I.getOperand(0)->getType();
3380 Type *DestTy = I.getType();
3381
3382 Check(SrcTy->isPtrOrPtrVectorTy(), "PtrToInt source must be pointer", &I);
3383
3384 Check(DestTy->isIntOrIntVectorTy(), "PtrToInt result must be integral", &I);
3385 Check(SrcTy->isVectorTy() == DestTy->isVectorTy(), "PtrToInt type mismatch",
3386 &I);
3387
3388 if (SrcTy->isVectorTy()) {
3389 auto *VSrc = cast<VectorType>(SrcTy);
3390 auto *VDest = cast<VectorType>(DestTy);
3391 Check(VSrc->getElementCount() == VDest->getElementCount(),
3392 "PtrToInt Vector width mismatch", &I);
3393 }
3394
3396}
3397
3398void Verifier::visitIntToPtrInst(IntToPtrInst &I) {
3399 // Get the source and destination types
3400 Type *SrcTy = I.getOperand(0)->getType();
3401 Type *DestTy = I.getType();
3402
3403 Check(SrcTy->isIntOrIntVectorTy(), "IntToPtr source must be an integral", &I);
3404 Check(DestTy->isPtrOrPtrVectorTy(), "IntToPtr result must be a pointer", &I);
3405
3406 Check(SrcTy->isVectorTy() == DestTy->isVectorTy(), "IntToPtr type mismatch",
3407 &I);
3408 if (SrcTy->isVectorTy()) {
3409 auto *VSrc = cast<VectorType>(SrcTy);
3410 auto *VDest = cast<VectorType>(DestTy);
3411 Check(VSrc->getElementCount() == VDest->getElementCount(),
3412 "IntToPtr Vector width mismatch", &I);
3413 }
3415}
3416
3417void Verifier::visitBitCastInst(BitCastInst &I) {
3418 Check(
3419 CastInst::castIsValid(Instruction::BitCast, I.getOperand(0), I.getType()),
3420 "Invalid bitcast", &I);
3422}
3423
3424void Verifier::visitAddrSpaceCastInst(AddrSpaceCastInst &I) {
3425 Type *SrcTy = I.getOperand(0)->getType();
3426 Type *DestTy = I.getType();
3427
3428 Check(SrcTy->isPtrOrPtrVectorTy(), "AddrSpaceCast source must be a pointer",
3429 &I);
3430 Check(DestTy->isPtrOrPtrVectorTy(), "AddrSpaceCast result must be a pointer",
3431 &I);
3433 "AddrSpaceCast must be between different address spaces", &I);
3434 if (auto *SrcVTy = dyn_cast<VectorType>(SrcTy))
3435 Check(SrcVTy->getElementCount() ==
3436 cast<VectorType>(DestTy)->getElementCount(),
3437 "AddrSpaceCast vector pointer number of elements mismatch", &I);
3439}
3440
3441/// visitPHINode - Ensure that a PHI node is well formed.
3442///
3443void Verifier::visitPHINode(PHINode &PN) {
3444 // Ensure that the PHI nodes are all grouped together at the top of the block.
3445 // This can be tested by checking whether the instruction before this is
3446 // either nonexistent (because this is begin()) or is a PHI node. If not,
3447 // then there is some other instruction before a PHI.
3448 Check(&PN == &PN.getParent()->front() ||
3449 isa<PHINode>(--BasicBlock::iterator(&PN)),
3450 "PHI nodes not grouped at top of basic block!", &PN, PN.getParent());
3451
3452 // Check that a PHI doesn't yield a Token.
3453 Check(!PN.getType()->isTokenTy(), "PHI nodes cannot have token type!");
3454
3455 // Check that all of the values of the PHI node have the same type as the
3456 // result.
3457 for (Value *IncValue : PN.incoming_values()) {
3458 Check(PN.getType() == IncValue->getType(),
3459 "PHI node operands are not the same type as the result!", &PN);
3460 }
3461
3462 // All other PHI node constraints are checked in the visitBasicBlock method.
3463
3464 visitInstruction(PN);
3465}
3466
3467void Verifier::visitCallBase(CallBase &Call) {
3468 Check(Call.getCalledOperand()->getType()->isPointerTy(),
3469 "Called function must be a pointer!", Call);
3470 FunctionType *FTy = Call.getFunctionType();
3471
3472 // Verify that the correct number of arguments are being passed
3473 if (FTy->isVarArg())
3474 Check(Call.arg_size() >= FTy->getNumParams(),
3475 "Called function requires more parameters than were provided!", Call);
3476 else
3477 Check(Call.arg_size() == FTy->getNumParams(),
3478 "Incorrect number of arguments passed to called function!", Call);
3479
3480 // Verify that all arguments to the call match the function type.
3481 for (unsigned i = 0, e = FTy->getNumParams(); i != e; ++i)
3482 Check(Call.getArgOperand(i)->getType() == FTy->getParamType(i),
3483 "Call parameter type does not match function signature!",
3484 Call.getArgOperand(i), FTy->getParamType(i), Call);
3485
3486 AttributeList Attrs = Call.getAttributes();
3487
3488 Check(verifyAttributeCount(Attrs, Call.arg_size()),
3489 "Attribute after last parameter!", Call);
3490
3491 Function *Callee =
3492 dyn_cast<Function>(Call.getCalledOperand()->stripPointerCasts());
3493 bool IsIntrinsic = Callee && Callee->isIntrinsic();
3494 if (IsIntrinsic)
3495 Check(Callee->getValueType() == FTy,
3496 "Intrinsic called with incompatible signature", Call);
3497
3498 // Disallow calls to functions with the amdgpu_cs_chain[_preserve] calling
3499 // convention.
3500 auto CC = Call.getCallingConv();
3503 "Direct calls to amdgpu_cs_chain/amdgpu_cs_chain_preserve functions "
3504 "not allowed. Please use the @llvm.amdgpu.cs.chain intrinsic instead.",
3505 Call);
3506
3507 auto VerifyTypeAlign = [&](Type *Ty, const Twine &Message) {
3508 if (!Ty->isSized())
3509 return;
3510 Align ABIAlign = DL.getABITypeAlign(Ty);
3511 Align MaxAlign(ParamMaxAlignment);
3512 Check(ABIAlign <= MaxAlign,
3513 "Incorrect alignment of " + Message + " to called function!", Call);
3514 };
3515
3516 if (!IsIntrinsic) {
3517 VerifyTypeAlign(FTy->getReturnType(), "return type");
3518 for (unsigned i = 0, e = FTy->getNumParams(); i != e; ++i) {
3519 Type *Ty = FTy->getParamType(i);
3520 VerifyTypeAlign(Ty, "argument passed");
3521 }
3522 }
3523
3524 if (Attrs.hasFnAttr(Attribute::Speculatable)) {
3525 // Don't allow speculatable on call sites, unless the underlying function
3526 // declaration is also speculatable.
3527 Check(Callee && Callee->isSpeculatable(),
3528 "speculatable attribute may not apply to call sites", Call);
3529 }
3530
3531 if (Attrs.hasFnAttr(Attribute::Preallocated)) {
3532 Check(Call.getCalledFunction()->getIntrinsicID() ==
3533 Intrinsic::call_preallocated_arg,
3534 "preallocated as a call site attribute can only be on "
3535 "llvm.call.preallocated.arg");
3536 }
3537
3538 // Verify call attributes.
3539 verifyFunctionAttrs(FTy, Attrs, &Call, IsIntrinsic, Call.isInlineAsm());
3540
3541 // Conservatively check the inalloca argument.
3542 // We have a bug if we can find that there is an underlying alloca without
3543 // inalloca.
3544 if (Call.hasInAllocaArgument()) {
3545 Value *InAllocaArg = Call.getArgOperand(FTy->getNumParams() - 1);
3546 if (auto AI = dyn_cast<AllocaInst>(InAllocaArg->stripInBoundsOffsets()))
3547 Check(AI->isUsedWithInAlloca(),
3548 "inalloca argument for call has mismatched alloca", AI, Call);
3549 }
3550
3551 // For each argument of the callsite, if it has the swifterror argument,
3552 // make sure the underlying alloca/parameter it comes from has a swifterror as
3553 // well.
3554 for (unsigned i = 0, e = FTy->getNumParams(); i != e; ++i) {
3555 if (Call.paramHasAttr(i, Attribute::SwiftError)) {
3556 Value *SwiftErrorArg = Call.getArgOperand(i);
3557 if (auto AI = dyn_cast<AllocaInst>(SwiftErrorArg->stripInBoundsOffsets())) {
3558 Check(AI->isSwiftError(),
3559 "swifterror argument for call has mismatched alloca", AI, Call);
3560 continue;
3561 }
3562 auto ArgI = dyn_cast<Argument>(SwiftErrorArg);
3563 Check(ArgI, "swifterror argument should come from an alloca or parameter",
3564 SwiftErrorArg, Call);
3565 Check(ArgI->hasSwiftErrorAttr(),
3566 "swifterror argument for call has mismatched parameter", ArgI,
3567 Call);
3568 }
3569
3570 if (Attrs.hasParamAttr(i, Attribute::ImmArg)) {
3571 // Don't allow immarg on call sites, unless the underlying declaration
3572 // also has the matching immarg.
3573 Check(Callee && Callee->hasParamAttribute(i, Attribute::ImmArg),
3574 "immarg may not apply only to call sites", Call.getArgOperand(i),
3575 Call);
3576 }
3577
3578 if (Call.paramHasAttr(i, Attribute::ImmArg)) {
3579 Value *ArgVal = Call.getArgOperand(i);
3580 Check(isa<ConstantInt>(ArgVal) || isa<ConstantFP>(ArgVal),
3581 "immarg operand has non-immediate parameter", ArgVal, Call);
3582 }
3583
3584 if (Call.paramHasAttr(i, Attribute::Preallocated)) {
3585 Value *ArgVal = Call.getArgOperand(i);
3586 bool hasOB =
3587 Call.countOperandBundlesOfType(LLVMContext::OB_preallocated) != 0;
3588 bool isMustTail = Call.isMustTailCall();
3589 Check(hasOB != isMustTail,
3590 "preallocated operand either requires a preallocated bundle or "
3591 "the call to be musttail (but not both)",
3592 ArgVal, Call);
3593 }
3594 }
3595
3596 if (FTy->isVarArg()) {
3597 // FIXME? is 'nest' even legal here?
3598 bool SawNest = false;
3599 bool SawReturned = false;
3600
3601 for (unsigned Idx = 0; Idx < FTy->getNumParams(); ++Idx) {
3602 if (Attrs.hasParamAttr(Idx, Attribute::Nest))
3603 SawNest = true;
3604 if (Attrs.hasParamAttr(Idx, Attribute::Returned))
3605 SawReturned = true;
3606 }
3607
3608 // Check attributes on the varargs part.
3609 for (unsigned Idx = FTy->getNumParams(); Idx < Call.arg_size(); ++Idx) {
3610 Type *Ty = Call.getArgOperand(Idx)->getType();
3611 AttributeSet ArgAttrs = Attrs.getParamAttrs(Idx);
3612 verifyParameterAttrs(ArgAttrs, Ty, &Call);
3613
3614 if (ArgAttrs.hasAttribute(Attribute::Nest)) {
3615 Check(!SawNest, "More than one parameter has attribute nest!", Call);
3616 SawNest = true;
3617 }
3618
3619 if (ArgAttrs.hasAttribute(Attribute::Returned)) {
3620 Check(!SawReturned, "More than one parameter has attribute returned!",
3621 Call);
3622 Check(Ty->canLosslesslyBitCastTo(FTy->getReturnType()),
3623 "Incompatible argument and return types for 'returned' "
3624 "attribute",
3625 Call);
3626 SawReturned = true;
3627 }
3628
3629 // Statepoint intrinsic is vararg but the wrapped function may be not.
3630 // Allow sret here and check the wrapped function in verifyStatepoint.
3631 if (!Call.getCalledFunction() ||
3632 Call.getCalledFunction()->getIntrinsicID() !=
3633 Intrinsic::experimental_gc_statepoint)
3634 Check(!ArgAttrs.hasAttribute(Attribute::StructRet),
3635 "Attribute 'sret' cannot be used for vararg call arguments!",
3636 Call);
3637
3638 if (ArgAttrs.hasAttribute(Attribute::InAlloca))
3639 Check(Idx == Call.arg_size() - 1,
3640 "inalloca isn't on the last argument!", Call);
3641 }
3642 }
3643
3644 // Verify that there's no metadata unless it's a direct call to an intrinsic.
3645 if (!IsIntrinsic) {
3646 for (Type *ParamTy : FTy->params()) {
3647 Check(!ParamTy->isMetadataTy(),
3648 "Function has metadata parameter but isn't an intrinsic", Call);
3649 Check(!ParamTy->isTokenTy(),
3650 "Function has token parameter but isn't an intrinsic", Call);
3651 }
3652 }
3653
3654 // Verify that indirect calls don't return tokens.
3655 if (!Call.getCalledFunction()) {
3656 Check(!FTy->getReturnType()->isTokenTy(),
3657 "Return type cannot be token for indirect call!");
3658 Check(!FTy->getReturnType()->isX86_AMXTy(),
3659 "Return type cannot be x86_amx for indirect call!");
3660 }
3661
3662 if (Function *F = Call.getCalledFunction())
3663 if (Intrinsic::ID ID = (Intrinsic::ID)F->getIntrinsicID())
3664 visitIntrinsicCall(ID, Call);
3665
3666 // Verify that a callsite has at most one "deopt", at most one "funclet", at
3667 // most one "gc-transition", at most one "cfguardtarget", at most one
3668 // "preallocated" operand bundle, and at most one "ptrauth" operand bundle.
3669 bool FoundDeoptBundle = false, FoundFuncletBundle = false,
3670 FoundGCTransitionBundle = false, FoundCFGuardTargetBundle = false,
3671 FoundPreallocatedBundle = false, FoundGCLiveBundle = false,
3672 FoundPtrauthBundle = false, FoundKCFIBundle = false,
3673 FoundAttachedCallBundle = false;
3674 for (unsigned i = 0, e = Call.getNumOperandBundles(); i < e; ++i) {
3675 OperandBundleUse BU = Call.getOperandBundleAt(i);
3676 uint32_t Tag = BU.getTagID();
3677 if (Tag == LLVMContext::OB_deopt) {
3678 Check(!FoundDeoptBundle, "Multiple deopt operand bundles", Call);
3679 FoundDeoptBundle = true;
3680 } else if (Tag == LLVMContext::OB_gc_transition) {
3681 Check(!FoundGCTransitionBundle, "Multiple gc-transition operand bundles",
3682 Call);
3683 FoundGCTransitionBundle = true;
3684 } else if (Tag == LLVMContext::OB_funclet) {
3685 Check(!FoundFuncletBundle, "Multiple funclet operand bundles", Call);
3686 FoundFuncletBundle = true;
3687 Check(BU.Inputs.size() == 1,
3688 "Expected exactly one funclet bundle operand", Call);
3689 Check(isa<FuncletPadInst>(BU.Inputs.front()),
3690 "Funclet bundle operands should correspond to a FuncletPadInst",
3691 Call);
3692 } else if (Tag == LLVMContext::OB_cfguardtarget) {
3693 Check(!FoundCFGuardTargetBundle, "Multiple CFGuardTarget operand bundles",
3694 Call);
3695 FoundCFGuardTargetBundle = true;
3696 Check(BU.Inputs.size() == 1,
3697 "Expected exactly one cfguardtarget bundle operand", Call);
3698 } else if (Tag == LLVMContext::OB_ptrauth) {
3699 Check(!FoundPtrauthBundle, "Multiple ptrauth operand bundles", Call);
3700 FoundPtrauthBundle = true;
3701 Check(BU.Inputs.size() == 2,
3702 "Expected exactly two ptrauth bundle operands", Call);
3703 Check(isa<ConstantInt>(BU.Inputs[0]) &&
3704 BU.Inputs[0]->getType()->isIntegerTy(32),
3705 "Ptrauth bundle key operand must be an i32 constant", Call);
3706 Check(BU.Inputs[1]->getType()->isIntegerTy(64),
3707 "Ptrauth bundle discriminator operand must be an i64", Call);
3708 } else if (Tag == LLVMContext::OB_kcfi) {
3709 Check(!FoundKCFIBundle, "Multiple kcfi operand bundles", Call);
3710 FoundKCFIBundle = true;
3711 Check(BU.Inputs.size() == 1, "Expected exactly one kcfi bundle operand",
3712 Call);
3713 Check(isa<ConstantInt>(BU.Inputs[0]) &&
3714 BU.Inputs[0]->getType()->isIntegerTy(32),
3715 "Kcfi bundle operand must be an i32 constant", Call);
3716 } else if (Tag == LLVMContext::OB_preallocated) {
3717 Check(!FoundPreallocatedBundle, "Multiple preallocated operand bundles",
3718 Call);
3719 FoundPreallocatedBundle = true;
3720 Check(BU.Inputs.size() == 1,
3721 "Expected exactly one preallocated bundle operand", Call);
3722 auto Input = dyn_cast<IntrinsicInst>(BU.Inputs.front());
3723 Check(Input &&
3724 Input->getIntrinsicID() == Intrinsic::call_preallocated_setup,
3725 "\"preallocated\" argument must be a token from "
3726 "llvm.call.preallocated.setup",
3727 Call);
3728 } else if (Tag == LLVMContext::OB_gc_live) {
3729 Check(!FoundGCLiveBundle, "Multiple gc-live operand bundles", Call);
3730 FoundGCLiveBundle = true;
3732 Check(!FoundAttachedCallBundle,
3733 "Multiple \"clang.arc.attachedcall\" operand bundles", Call);
3734 FoundAttachedCallBundle = true;
3735 verifyAttachedCallBundle(Call, BU);
3736 }
3737 }
3738
3739 // Verify that callee and callsite agree on whether to use pointer auth.
3740 Check(!(Call.getCalledFunction() && FoundPtrauthBundle),
3741 "Direct call cannot have a ptrauth bundle", Call);
3742
3743 // Verify that each inlinable callsite of a debug-info-bearing function in a
3744 // debug-info-bearing function has a debug location attached to it. Failure to
3745 // do so causes assertion failures when the inliner sets up inline scope info
3746 // (Interposable functions are not inlinable, neither are functions without
3747 // definitions.)
3748 if (Call.getFunction()->getSubprogram() && Call.getCalledFunction() &&
3749 !Call.getCalledFunction()->isInterposable() &&
3750 !Call.getCalledFunction()->isDeclaration() &&
3751 Call.getCalledFunction()->getSubprogram())
3752 CheckDI(Call.getDebugLoc(),
3753 "inlinable function call in a function with "
3754 "debug info must have a !dbg location",
3755 Call);
3756
3757 if (Call.isInlineAsm())
3758 verifyInlineAsmCall(Call);
3759
3760 ConvergenceVerifyHelper.visit(Call);
3761
3762 visitInstruction(Call);
3763}
3764
3765void Verifier::verifyTailCCMustTailAttrs(const AttrBuilder &Attrs,
3766 StringRef Context) {
3767 Check(!Attrs.contains(Attribute::InAlloca),
3768 Twine("inalloca attribute not allowed in ") + Context);
3769 Check(!Attrs.contains(Attribute::InReg),
3770 Twine("inreg attribute not allowed in ") + Context);
3771 Check(!Attrs.contains(Attribute::SwiftError),
3772 Twine("swifterror attribute not allowed in ") + Context);
3773 Check(!Attrs.contains(Attribute::Preallocated),
3774 Twine("preallocated attribute not allowed in ") + Context);
3775 Check(!Attrs.contains(Attribute::ByRef),
3776 Twine("byref attribute not allowed in ") + Context);
3777}
3778
3779/// Two types are "congruent" if they are identical, or if they are both pointer
3780/// types with different pointee types and the same address space.
3781static bool isTypeCongruent(Type *L, Type *R) {
3782 if (L == R)
3783 return true;
3784 PointerType *PL = dyn_cast<PointerType>(L);
3785 PointerType *PR = dyn_cast<PointerType>(R);
3786 if (!PL || !PR)
3787 return false;
3788 return PL->getAddressSpace() == PR->getAddressSpace();
3789}
3790
3792 static const Attribute::AttrKind ABIAttrs[] = {
3793 Attribute::StructRet, Attribute::ByVal, Attribute::InAlloca,
3794 Attribute::InReg, Attribute::StackAlignment, Attribute::SwiftSelf,
3795 Attribute::SwiftAsync, Attribute::SwiftError, Attribute::Preallocated,
3796 Attribute::ByRef};
3797 AttrBuilder Copy(C);
3798 for (auto AK : ABIAttrs) {
3799 Attribute Attr = Attrs.getParamAttrs(I).getAttribute(AK);
3800 if (Attr.isValid())
3801 Copy.addAttribute(Attr);
3802 }
3803
3804 // `align` is ABI-affecting only in combination with `byval` or `byref`.
3805 if (Attrs.hasParamAttr(I, Attribute::Alignment) &&
3806 (Attrs.hasParamAttr(I, Attribute::ByVal) ||
3807 Attrs.hasParamAttr(I, Attribute::ByRef)))
3808 Copy.addAlignmentAttr(Attrs.getParamAlignment(I));
3809 return Copy;
3810}
3811
3812void Verifier::verifyMustTailCall(CallInst &CI) {
3813 Check(!CI.isInlineAsm(), "cannot use musttail call with inline asm", &CI);
3814
3815 Function *F = CI.getParent()->getParent();
3816 FunctionType *CallerTy = F->getFunctionType();
3817 FunctionType *CalleeTy = CI.getFunctionType();
3818 Check(CallerTy->isVarArg() == CalleeTy->isVarArg(),
3819 "cannot guarantee tail call due to mismatched varargs", &CI);
3820 Check(isTypeCongruent(CallerTy->getReturnType(), CalleeTy->getReturnType()),
3821 "cannot guarantee tail call due to mismatched return types", &CI);
3822
3823 // - The calling conventions of the caller and callee must match.
3824 Check(F->getCallingConv() == CI.getCallingConv(),
3825 "cannot guarantee tail call due to mismatched calling conv", &CI);
3826
3827 // - The call must immediately precede a :ref:`ret <i_ret>` instruction,
3828 // or a pointer bitcast followed by a ret instruction.
3829 // - The ret instruction must return the (possibly bitcasted) value
3830 // produced by the call or void.
3831 Value *RetVal = &CI;
3832 Instruction *Next = CI.getNextNode();
3833
3834 // Handle the optional bitcast.
3835 if (BitCastInst *BI = dyn_cast_or_null<BitCastInst>(Next)) {
3836 Check(BI->getOperand(0) == RetVal,
3837 "bitcast following musttail call must use the call", BI);
3838 RetVal = BI;
3839 Next = BI->getNextNode();
3840 }
3841
3842 // Check the return.
3843 ReturnInst *Ret = dyn_cast_or_null<ReturnInst>(Next);
3844 Check(Ret, "musttail call must precede a ret with an optional bitcast", &CI);
3845 Check(!Ret->getReturnValue() || Ret->getReturnValue() == RetVal ||
3846 isa<UndefValue>(Ret->getReturnValue()),
3847 "musttail call result must be returned", Ret);
3848
3849 AttributeList CallerAttrs = F->getAttributes();
3850 AttributeList CalleeAttrs = CI.getAttributes();
3853 StringRef CCName =
3854 CI.getCallingConv() == CallingConv::Tail ? "tailcc" : "swifttailcc";
3855
3856 // - Only sret, byval, swiftself, and swiftasync ABI-impacting attributes
3857 // are allowed in swifttailcc call
3858 for (unsigned I = 0, E = CallerTy->getNumParams(); I != E; ++I) {
3859 AttrBuilder ABIAttrs = getParameterABIAttributes(F->getContext(), I, CallerAttrs);
3860 SmallString<32> Context{CCName, StringRef(" musttail caller")};
3861 verifyTailCCMustTailAttrs(ABIAttrs, Context);
3862 }
3863 for (unsigned I = 0, E = CalleeTy->getNumParams(); I != E; ++I) {
3864 AttrBuilder ABIAttrs = getParameterABIAttributes(F->getContext(), I, CalleeAttrs);
3865 SmallString<32> Context{CCName, StringRef(" musttail callee")};
3866 verifyTailCCMustTailAttrs(ABIAttrs, Context);
3867 }
3868 // - Varargs functions are not allowed
3869 Check(!CallerTy->isVarArg(), Twine("cannot guarantee ") + CCName +
3870 " tail call for varargs function");
3871 return;
3872 }
3873
3874 // - The caller and callee prototypes must match. Pointer types of
3875 // parameters or return types may differ in pointee type, but not
3876 // address space.
3877 if (!CI.getCalledFunction() || !CI.getCalledFunction()->isIntrinsic()) {
3878 Check(CallerTy->getNumParams() == CalleeTy->getNumParams(),
3879 "cannot guarantee tail call due to mismatched parameter counts", &CI);
3880 for (unsigned I = 0, E = CallerTy->getNumParams(); I != E; ++I) {
3881 Check(
3882 isTypeCongruent(CallerTy->getParamType(I), CalleeTy->getParamType(I)),
3883 "cannot guarantee tail call due to mismatched parameter types", &CI);
3884 }
3885 }
3886
3887 // - All ABI-impacting function attributes, such as sret, byval, inreg,
3888 // returned, preallocated, and inalloca, must match.
3889 for (unsigned I = 0, E = CallerTy->getNumParams(); I != E; ++I) {
3890 AttrBuilder CallerABIAttrs = getParameterABIAttributes(F->getContext(), I, CallerAttrs);
3891 AttrBuilder CalleeABIAttrs = getParameterABIAttributes(F->getContext(), I, CalleeAttrs);
3892 Check(CallerABIAttrs == CalleeABIAttrs,
3893 "cannot guarantee tail call due to mismatched ABI impacting "
3894 "function attributes",
3895 &CI, CI.getOperand(I));
3896 }
3897}
3898
3899void Verifier::visitCallInst(CallInst &CI) {
3900 visitCallBase(CI);
3901
3902 if (CI.isMustTailCall())
3903 verifyMustTailCall(CI);
3904}
3905
3906void Verifier::visitInvokeInst(InvokeInst &II) {
3908
3909 // Verify that the first non-PHI instruction of the unwind destination is an
3910 // exception handling instruction.
3911 Check(
3912 II.getUnwindDest()->isEHPad(),
3913 "The unwind destination does not have an exception handling instruction!",
3914 &II);
3915
3917}
3918
3919/// visitUnaryOperator - Check the argument to the unary operator.
3920///
3921void Verifier::visitUnaryOperator(UnaryOperator &U) {
3922 Check(U.getType() == U.getOperand(0)->getType(),
3923 "Unary operators must have same type for"
3924 "operands and result!",
3925 &U);
3926
3927 switch (U.getOpcode()) {
3928 // Check that floating-point arithmetic operators are only used with
3929 // floating-point operands.
3930 case Instruction::FNeg:
3931 Check(U.getType()->isFPOrFPVectorTy(),
3932 "FNeg operator only works with float types!", &U);
3933 break;
3934 default:
3935 llvm_unreachable("Unknown UnaryOperator opcode!");
3936 }
3937
3939}
3940
3941/// visitBinaryOperator - Check that both arguments to the binary operator are
3942/// of the same type!
3943///
3944void Verifier::visitBinaryOperator(BinaryOperator &B) {
3945 Check(B.getOperand(0)->getType() == B.getOperand(1)->getType(),
3946 "Both operands to a binary operator are not of the same type!", &B);
3947
3948 switch (B.getOpcode()) {
3949 // Check that integer arithmetic operators are only used with
3950 // integral operands.
3951 case Instruction::Add:
3952 case Instruction::Sub:
3953 case Instruction::Mul:
3954 case Instruction::SDiv:
3955 case Instruction::UDiv:
3956 case Instruction::SRem:
3957 case Instruction::URem:
3958 Check(B.getType()->isIntOrIntVectorTy(),
3959 "Integer arithmetic operators only work with integral types!", &B);
3960 Check(B.getType() == B.getOperand(0)->getType(),
3961 "Integer arithmetic operators must have same type "
3962 "for operands and result!",
3963 &B);
3964 break;
3965 // Check that floating-point arithmetic operators are only used with
3966 // floating-point operands.
3967 case Instruction::FAdd:
3968 case Instruction::FSub:
3969 case Instruction::FMul:
3970 case Instruction::FDiv:
3971 case Instruction::FRem:
3972 Check(B.getType()->isFPOrFPVectorTy(),
3973 "Floating-point arithmetic operators only work with "
3974 "floating-point types!",
3975 &B);
3976 Check(B.getType() == B.getOperand(0)->getType(),
3977 "Floating-point arithmetic operators must have same type "
3978 "for operands and result!",
3979 &B);
3980 break;
3981 // Check that logical operators are only used with integral operands.
3982 case Instruction::And:
3983 case Instruction::Or:
3984 case Instruction::Xor:
3985 Check(B.getType()->isIntOrIntVectorTy(),
3986 "Logical operators only work with integral types!", &B);
3987 Check(B.getType() == B.getOperand(0)->getType(),
3988 "Logical operators must have same type for operands and result!", &B);
3989 break;
3990 case Instruction::Shl:
3991 case Instruction::LShr:
3992 case Instruction::AShr:
3993 Check(B.getType()->isIntOrIntVectorTy(),
3994 "Shifts only work with integral types!", &B);
3995 Check(B.getType() == B.getOperand(0)->getType(),
3996 "Shift return type must be same as operands!", &B);
3997 break;
3998 default:
3999 llvm_unreachable("Unknown BinaryOperator opcode!");
4000 }
4001
4003}
4004
4005void Verifier::visitICmpInst(ICmpInst &IC) {
4006 // Check that the operands are the same type
4007 Type *Op0Ty = IC.getOperand(0)->getType();
4008 Type *Op1Ty = IC.getOperand(1)->getType();
4009 Check(Op0Ty == Op1Ty,
4010 "Both operands to ICmp instruction are not of the same type!", &IC);
4011 // Check that the operands are the right type
4012 Check(Op0Ty->isIntOrIntVectorTy() || Op0Ty->isPtrOrPtrVectorTy(),
4013 "Invalid operand types for ICmp instruction", &IC);
4014 // Check that the predicate is valid.
4015 Check(IC.isIntPredicate(), "Invalid predicate in ICmp instruction!", &IC);
4016
4017 visitInstruction(IC);
4018}
4019
4020void Verifier::visitFCmpInst(FCmpInst &FC) {
4021 // Check that the operands are the same type
4022 Type *Op0Ty = FC.getOperand(0)->getType();
4023 Type *Op1Ty = FC.getOperand(1)->getType();
4024 Check(Op0Ty == Op1Ty,
4025 "Both operands to FCmp instruction are not of the same type!", &FC);
4026 // Check that the operands are the right type
4027 Check(Op0Ty->isFPOrFPVectorTy(), "Invalid operand types for FCmp instruction",
4028 &FC);
4029 // Check that the predicate is valid.
4030 Check(FC.isFPPredicate(), "Invalid predicate in FCmp instruction!", &FC);
4031
4032 visitInstruction(FC);
4033}
4034
4035void Verifier::visitExtractElementInst(ExtractElementInst &EI) {
4037 "Invalid extractelement operands!", &EI);
4038 visitInstruction(EI);
4039}
4040
4041void Verifier::visitInsertElementInst(InsertElementInst &IE) {
4042 Check(InsertElementInst::isValidOperands(IE.getOperand(0), IE.getOperand(1),
4043 IE.getOperand(2)),
4044 "Invalid insertelement operands!", &IE);
4045 visitInstruction(IE);
4046}
4047
4048void Verifier::visitShuffleVectorInst(ShuffleVectorInst &SV) {
4050 SV.getShuffleMask()),
4051 "Invalid shufflevector operands!", &SV);
4052 visitInstruction(SV);
4053}
4054
4055void Verifier::visitGetElementPtrInst(GetElementPtrInst &GEP) {
4056 Type *TargetTy = GEP.getPointerOperandType()->getScalarType();
4057
4058 Check(isa<PointerType>(TargetTy),
4059 "GEP base pointer is not a vector or a vector of pointers", &GEP);
4060 Check(GEP.getSourceElementType()->isSized(), "GEP into unsized type!", &GEP);
4061
4062 if (auto *STy = dyn_cast<StructType>(GEP.getSourceElementType())) {
4063 SmallPtrSet<Type *, 4> Visited;
4064 Check(!STy->containsScalableVectorType(&Visited),
4065 "getelementptr cannot target structure that contains scalable vector"
4066 "type",
4067 &GEP);
4068 }
4069
4070 SmallVector<Value *, 16> Idxs(GEP.indices());
4071 Check(
4072 all_of(Idxs, [](Value *V) { return V->getType()->isIntOrIntVectorTy(); }),
4073 "GEP indexes must be integers", &GEP);
4074 Type *ElTy =
4075 GetElementPtrInst::getIndexedType(GEP.getSourceElementType(), Idxs);
4076 Check(ElTy, "Invalid indices for GEP pointer type!", &GEP);
4077
4078 Check(GEP.getType()->isPtrOrPtrVectorTy() &&
4079 GEP.getResultElementType() == ElTy,
4080 "GEP is not of right type for indices!", &GEP, ElTy);
4081
4082 if (auto *GEPVTy = dyn_cast<VectorType>(GEP.getType())) {
4083 // Additional checks for vector GEPs.
4084 ElementCount GEPWidth = GEPVTy->getElementCount();
4085 if (GEP.getPointerOperandType()->isVectorTy())
4086 Check(
4087 GEPWidth ==
4088 cast<VectorType>(GEP.getPointerOperandType())->getElementCount(),
4089 "Vector GEP result width doesn't match operand's", &GEP);
4090 for (Value *Idx : Idxs) {
4091 Type *IndexTy = Idx->getType();
4092 if (auto *IndexVTy = dyn_cast<VectorType>(IndexTy)) {
4093 ElementCount IndexWidth = IndexVTy->getElementCount();
4094 Check(IndexWidth == GEPWidth, "Invalid GEP index vector width", &GEP);
4095 }
4096 Check(IndexTy->isIntOrIntVectorTy(),
4097 "All GEP indices should be of integer type");
4098 }
4099 }
4100
4101 if (auto *PTy = dyn_cast<PointerType>(GEP.getType())) {
4102 Check(GEP.getAddressSpace() == PTy->getAddressSpace(),
4103 "GEP address space doesn't match type", &GEP);
4104 }
4105
4107}
4108
4109static bool isContiguous(const ConstantRange &A, const ConstantRange &B) {
4110 return A.getUpper() == B.getLower() || A.getLower() == B.getUpper();
4111}
4112
4113/// Verify !range and !absolute_symbol metadata. These have the same
4114/// restrictions, except !absolute_symbol allows the full set.
4115void Verifier::verifyRangeMetadata(const Value &I, const MDNode *Range,
4116 Type *Ty, bool IsAbsoluteSymbol) {
4117 unsigned NumOperands = Range->getNumOperands();
4118 Check(NumOperands % 2 == 0, "Unfinished range!", Range);
4119 unsigned NumRanges = NumOperands / 2;
4120 Check(NumRanges >= 1, "It should have at least one range!", Range);
4121
4122 ConstantRange LastRange(1, true); // Dummy initial value
4123 for (unsigned i = 0; i < NumRanges; ++i) {
4124 ConstantInt *Low =
4125 mdconst::dyn_extract<ConstantInt>(Range->getOperand(2 * i));
4126 Check(Low, "The lower limit must be an integer!", Low);
4127 ConstantInt *High =
4128 mdconst::dyn_extract<ConstantInt>(Range->getOperand(2 * i + 1));
4129 Check(High, "The upper limit must be an integer!", High);
4130 Check(High->getType() == Low->getType() &&
4131 High->getType() == Ty->getScalarType(),
4132 "Range types must match instruction type!", &I);
4133
4134 APInt HighV = High->getValue();
4135 APInt LowV = Low->getValue();
4136
4137 // ConstantRange asserts if the ranges are the same except for the min/max
4138 // value. Leave the cases it tolerates for the empty range error below.
4139 Check(LowV != HighV || LowV.isMaxValue() || LowV.isMinValue(),
4140 "The upper and lower limits cannot be the same value", &I);
4141
4142 ConstantRange CurRange(LowV, HighV);
4143 Check(!CurRange.isEmptySet() && (IsAbsoluteSymbol || !CurRange.isFullSet()),
4144 "Range must not be empty!", Range);
4145 if (i != 0) {
4146 Check(CurRange.intersectWith(LastRange).isEmptySet(),
4147 "Intervals are overlapping", Range);
4148 Check(LowV.sgt(LastRange.getLower()), "Intervals are not in order",
4149 Range);
4150 Check(!isContiguous(CurRange, LastRange), "Intervals are contiguous",
4151 Range);
4152 }
4153 LastRange = ConstantRange(LowV, HighV);
4154 }
4155 if (NumRanges > 2) {
4156 APInt FirstLow =
4157 mdconst::dyn_extract<ConstantInt>(Range->getOperand(0))->getValue();
4158 APInt FirstHigh =
4159 mdconst::dyn_extract<ConstantInt>(Range->getOperand(1))->getValue();
4160 ConstantRange FirstRange(FirstLow, FirstHigh);
4161 Check(FirstRange.intersectWith(LastRange).isEmptySet(),
4162 "Intervals are overlapping", Range);
4163 Check(!isContiguous(FirstRange, LastRange), "Intervals are contiguous",
4164 Range);
4165 }
4166}
4167
4168void Verifier::visitRangeMetadata(Instruction &I, MDNode *Range, Type *Ty) {
4169 assert(Range && Range == I.getMetadata(LLVMContext::MD_range) &&
4170 "precondition violation");
4171 verifyRangeMetadata(I, Range, Ty, false);
4172}
4173
4174void Verifier::checkAtomicMemAccessSize(Type *Ty, const Instruction *I) {
4175 unsigned Size = DL.getTypeSizeInBits(Ty);
4176 Check(Size >= 8, "atomic memory access' size must be byte-sized", Ty, I);
4177 Check(!(Size & (Size - 1)),
4178 "atomic memory access' operand must have a power-of-two size", Ty, I);
4179}
4180
4181void Verifier::visitLoadInst(LoadInst &LI) {
4182 PointerType *PTy = dyn_cast<PointerType>(LI.getOperand(0)->getType());
4183 Check(PTy, "Load operand must be a pointer.", &LI);
4184 Type *ElTy = LI.getType();
4185 if (MaybeAlign A = LI.getAlign()) {
4186 Check(A->value() <= Value::MaximumAlignment,
4187 "huge alignment values are unsupported", &LI);
4188 }
4189 Check(ElTy->isSized(), "loading unsized types is not allowed", &LI);
4190 if (LI.isAtomic()) {
4193 "Load cannot have Release ordering", &LI);
4194 Check(ElTy->isIntOrPtrTy() || ElTy->isFloatingPointTy(),
4195 "atomic load operand must have integer, pointer, or floating point "
4196 "type!",
4197 ElTy, &LI);
4198 checkAtomicMemAccessSize(ElTy, &LI);
4199 } else {
4201 "Non-atomic load cannot have SynchronizationScope specified", &LI);
4202 }
4203
4204 visitInstruction(LI);
4205}
4206
4207void Verifier::visitStoreInst(StoreInst &SI) {
4208 PointerType *PTy = dyn_cast<PointerType>(SI.getOperand(1)->getType());
4209 Check(PTy, "Store operand must be a pointer.", &SI);
4210 Type *ElTy = SI.getOperand(0)->getType();
4211 if (MaybeAlign A = SI.getAlign()) {
4212 Check(A->value() <= Value::MaximumAlignment,
4213 "huge alignment values are unsupported", &SI);
4214 }
4215 Check(ElTy->isSized(), "storing unsized types is not allowed", &SI);
4216 if (SI.isAtomic()) {
4217 Check(SI.getOrdering() != AtomicOrdering::Acquire &&
4218 SI.getOrdering() != AtomicOrdering::AcquireRelease,
4219 "Store cannot have Acquire ordering", &SI);
4220 Check(ElTy->isIntOrPtrTy() || ElTy->isFloatingPointTy(),
4221 "atomic store operand must have integer, pointer, or floating point "
4222 "type!",
4223 ElTy, &SI);
4224 checkAtomicMemAccessSize(ElTy, &SI);
4225 } else {
4226 Check(SI.getSyncScopeID() == SyncScope::System,
4227 "Non-atomic store cannot have SynchronizationScope specified", &SI);
4228 }
4229 visitInstruction(SI);
4230}
4231
4232/// Check that SwiftErrorVal is used as a swifterror argument in CS.
4233void Verifier::verifySwiftErrorCall(CallBase &Call,
4234 const Value *SwiftErrorVal) {
4235 for (const auto &I : llvm::enumerate(Call.args())) {
4236 if (I.value() == SwiftErrorVal) {
4237 Check(Call.paramHasAttr(I.index(), Attribute::SwiftError),
4238 "swifterror value when used in a callsite should be marked "
4239 "with swifterror attribute",
4240 SwiftErrorVal, Call);
4241 }
4242 }
4243}
4244
4245void Verifier::verifySwiftErrorValue(const Value *SwiftErrorVal) {
4246 // Check that swifterror value is only used by loads, stores, or as
4247 // a swifterror argument.
4248 for (const User *U : SwiftErrorVal->users()) {
4249 Check(isa<LoadInst>(U) || isa<StoreInst>(U) || isa<CallInst>(U) ||
4250 isa<InvokeInst>(U),
4251 "swifterror value can only be loaded and stored from, or "
4252 "as a swifterror argument!",
4253 SwiftErrorVal, U);
4254 // If it is used by a store, check it is the second operand.
4255 if (auto StoreI = dyn_cast<StoreInst>(U))
4256 Check(StoreI->getOperand(1) == SwiftErrorVal,
4257 "swifterror value should be the second operand when used "
4258 "by stores",
4259 SwiftErrorVal, U);
4260 if (auto *Call = dyn_cast<CallBase>(U))
4261 verifySwiftErrorCall(*const_cast<CallBase *>(Call), SwiftErrorVal);
4262 }
4263}
4264
4265void Verifier::visitAllocaInst(AllocaInst &AI) {
4266 SmallPtrSet<Type*, 4> Visited;
4267 Check(AI.getAllocatedType()->isSized(&Visited),
4268 "Cannot allocate unsized type", &AI);
4270 "Alloca array size must have integer type", &AI);
4271 if (MaybeAlign A = AI.getAlign()) {
4272 Check(A->value() <= Value::MaximumAlignment,
4273 "huge alignment values are unsupported", &AI);
4274 }
4275
4276 if (AI.isSwiftError()) {
4278 "swifterror alloca must have pointer type", &AI);
4280 "swifterror alloca must not be array allocation", &AI);
4281 verifySwiftErrorValue(&AI);
4282 }
4283
4284 visitInstruction(AI);
4285}
4286
4287void Verifier::visitAtomicCmpXchgInst(AtomicCmpXchgInst &CXI) {
4288 Type *ElTy = CXI.getOperand(1)->getType();
4289 Check(ElTy->isIntOrPtrTy(),
4290 "cmpxchg operand must have integer or pointer type", ElTy, &CXI);
4291 checkAtomicMemAccessSize(ElTy, &CXI);
4292 visitInstruction(CXI);
4293}
4294
4295void Verifier::visitAtomicRMWInst(AtomicRMWInst &RMWI) {
4297 "atomicrmw instructions cannot be unordered.", &RMWI);
4298 auto Op = RMWI.getOperation();
4299 Type *ElTy = RMWI.getOperand(1)->getType();
4300 if (Op == AtomicRMWInst::Xchg) {
4301 Check(ElTy->isIntegerTy() || ElTy->isFloatingPointTy() ||
4302 ElTy->isPointerTy(),
4303 "atomicrmw " + AtomicRMWInst::getOperationName(Op) +
4304 " operand must have integer or floating point type!",
4305 &RMWI, ElTy);
4306 } else if (AtomicRMWInst::isFPOperation(Op)) {
4307 Check(ElTy->isFPOrFPVectorTy() && !isa<ScalableVectorType>(ElTy),
4308 "atomicrmw " + AtomicRMWInst::getOperationName(Op) +
4309 " operand must have floating-point or fixed vector of floating-point "
4310 "type!",
4311 &RMWI, ElTy);
4312 } else {
4313 Check(ElTy->isIntegerTy(),
4314 "atomicrmw " + AtomicRMWInst::getOperationName(Op) +
4315 " operand must have integer type!",
4316 &RMWI, ElTy);
4317 }
4318 checkAtomicMemAccessSize(ElTy, &RMWI);
4320 "Invalid binary operation!", &RMWI);
4321 visitInstruction(RMWI);
4322}
4323
4324void Verifier::visitFenceInst(FenceInst &FI) {
4325 const AtomicOrdering Ordering = FI.getOrdering();
4326 Check(Ordering == AtomicOrdering::Acquire ||
4327 Ordering == AtomicOrdering::Release ||
4328 Ordering == AtomicOrdering::AcquireRelease ||
4330 "fence instructions may only have acquire, release, acq_rel, or "
4331 "seq_cst ordering.",
4332 &FI);
4333 visitInstruction(FI);
4334}
4335
4336void Verifier::visitExtractValueInst(ExtractValueInst &EVI) {
4338 EVI.getIndices()) == EVI.getType(),
4339 "Invalid ExtractValueInst operands!", &EVI);
4340
4341 visitInstruction(EVI);
4342}
4343
4344void Verifier::visitInsertValueInst(InsertValueInst &IVI) {
4346 IVI.getIndices()) ==
4347 IVI.getOperand(1)->getType(),
4348 "Invalid InsertValueInst operands!", &IVI);
4349
4350 visitInstruction(IVI);
4351}
4352
4353static Value *getParentPad(Value *EHPad) {
4354 if (auto *FPI = dyn_cast<FuncletPadInst>(EHPad))
4355 return FPI->getParentPad();
4356
4357 return cast<CatchSwitchInst>(EHPad)->getParentPad();
4358}
4359
4360void Verifier::visitEHPadPredecessors(Instruction &I) {
4361 assert(I.isEHPad());
4362
4363 BasicBlock *BB = I.getParent();
4364 Function *F = BB->getParent();
4365
4366 Check(BB != &F->getEntryBlock(), "EH pad cannot be in entry block.", &I);
4367
4368 if (auto *LPI = dyn_cast<LandingPadInst>(&I)) {
4369 // The landingpad instruction defines its parent as a landing pad block. The
4370 // landing pad block may be branched to only by the unwind edge of an
4371 // invoke.
4372 for (BasicBlock *PredBB : predecessors(BB)) {
4373 const auto *II = dyn_cast<InvokeInst>(PredBB->getTerminator());
4374 Check(II && II->getUnwindDest() == BB && II->getNormalDest() != BB,
4375 "Block containing LandingPadInst must be jumped to "
4376 "only by the unwind edge of an invoke.",
4377 LPI);
4378 }
4379 return;
4380 }
4381 if (auto *CPI = dyn_cast<CatchPadInst>(&I)) {
4382 if (!pred_empty(BB))
4383 Check(BB->getUniquePredecessor() == CPI->getCatchSwitch()->getParent(),
4384 "Block containg CatchPadInst must be jumped to "
4385 "only by its catchswitch.",
4386 CPI);
4387 Check(BB != CPI->getCatchSwitch()->getUnwindDest(),
4388 "Catchswitch cannot unwind to one of its catchpads",
4389 CPI->getCatchSwitch(), CPI);
4390 return;
4391 }
4392
4393 // Verify that each pred has a legal terminator with a legal to/from EH
4394 // pad relationship.
4395 Instruction *ToPad = &I;
4396 Value *ToPadParent = getParentPad(ToPad);
4397 for (BasicBlock *PredBB : predecessors(BB)) {
4398 Instruction *TI = PredBB->getTerminator();
4399 Value *FromPad;
4400 if (auto *II = dyn_cast<InvokeInst>(TI)) {
4401 Check(II->getUnwindDest() == BB && II->getNormalDest() != BB,
4402 "EH pad must be jumped to via an unwind edge", ToPad, II);
4403 auto *CalledFn =
4404 dyn_cast<Function>(II->getCalledOperand()->stripPointerCasts());
4405 if (CalledFn && CalledFn->isIntrinsic() && II->doesNotThrow() &&
4406 !IntrinsicInst::mayLowerToFunctionCall(CalledFn->getIntrinsicID()))
4407 continue;
4408 if (auto Bundle = II->getOperandBundle(LLVMContext::OB_funclet))
4409 FromPad = Bundle->Inputs[0];
4410 else
4411 FromPad = ConstantTokenNone::get(II->getContext());
4412 } else if (auto *CRI = dyn_cast<CleanupReturnInst>(TI)) {
4413 FromPad = CRI->getOperand(0);
4414 Check(FromPad != ToPadParent, "A cleanupret must exit its cleanup", CRI);
4415 } else if (auto *CSI = dyn_cast<CatchSwitchInst>(TI)) {
4416 FromPad = CSI;
4417 } else {
4418 Check(false, "EH pad must be jumped to via an unwind edge", ToPad, TI);
4419 }
4420
4421 // The edge may exit from zero or more nested pads.
4423 for (;; FromPad = getParentPad(FromPad)) {
4424 Check(FromPad != ToPad,
4425 "EH pad cannot handle exceptions raised within it", FromPad, TI);
4426 if (FromPad == ToPadParent) {
4427 // This is a legal unwind edge.
4428 break;
4429 }
4430 Check(!isa<ConstantTokenNone>(FromPad),
4431 "A single unwind edge may only enter one EH pad", TI);
4432 Check(Seen.insert(FromPad).second, "EH pad jumps through a cycle of pads",
4433 FromPad);
4434
4435 // This will be diagnosed on the corresponding instruction already. We
4436 // need the extra check here to make sure getParentPad() works.
4437 Check(isa<FuncletPadInst>(FromPad) || isa<CatchSwitchInst>(FromPad),
4438 "Parent pad must be catchpad/cleanuppad/catchswitch", TI);
4439 }
4440 }
4441}
4442
4443void Verifier::visitLandingPadInst(LandingPadInst &LPI) {
4444 // The landingpad instruction is ill-formed if it doesn't have any clauses and
4445 // isn't a cleanup.
4446 Check(LPI.getNumClauses() > 0 || LPI.isCleanup(),
4447 "LandingPadInst needs at least one clause or to be a cleanup.", &LPI);
4448
4449 visitEHPadPredecessors(LPI);
4450
4451 if (!LandingPadResultTy)
4452 LandingPadResultTy = LPI.getType();
4453 else
4454 Check(LandingPadResultTy == LPI.getType(),
4455 "The landingpad instruction should have a consistent result type "
4456 "inside a function.",
4457 &LPI);
4458
4459 Function *F = LPI.getParent()->getParent();
4460 Check(F->hasPersonalityFn(),
4461 "LandingPadInst needs to be in a function with a personality.", &LPI);
4462
4463 // The landingpad instruction must be the first non-PHI instruction in the
4464 // block.
4465 Check(LPI.getParent()->getLandingPadInst() == &LPI,
4466 "LandingPadInst not the first non-PHI instruction in the block.", &LPI);
4467
4468 for (unsigned i = 0, e = LPI.getNumClauses(); i < e; ++i) {
4469 Constant *Clause = LPI.getClause(i);
4470 if (LPI.isCatch(i)) {
4471 Check(isa<PointerType>(Clause->getType()),
4472 "Catch operand does not have pointer type!", &LPI);
4473 } else {
4474 Check(LPI.isFilter(i), "Clause is neither catch nor filter!", &LPI);
4475 Check(isa<ConstantArray>(Clause) || isa<ConstantAggregateZero>(Clause),
4476 "Filter operand is not an array of constants!", &LPI);
4477 }
4478 }
4479
4480 visitInstruction(LPI);
4481}
4482
4483void Verifier::visitResumeInst(ResumeInst &RI) {
4485 "ResumeInst needs to be in a function with a personality.", &RI);
4486
4487 if (!LandingPadResultTy)
4488 LandingPadResultTy = RI.getValue()->getType();
4489 else
4490 Check(LandingPadResultTy == RI.getValue()->getType(),
4491 "The resume instruction should have a consistent result type "
4492 "inside a function.",
4493 &RI);
4494
4495 visitTerminator(RI);
4496}
4497
4498void Verifier::visitCatchPadInst(CatchPadInst &CPI) {
4499 BasicBlock *BB = CPI.getParent();
4500
4501 Function *F = BB->getParent();
4502 Check(F->hasPersonalityFn(),
4503 "CatchPadInst needs to be in a function with a personality.", &CPI);
4504
4505 Check(isa<CatchSwitchInst>(CPI.getParentPad()),
4506 "CatchPadInst needs to be directly nested in a CatchSwitchInst.",
4507 CPI.getParentPad());
4508
4509 // The catchpad instruction must be the first non-PHI instruction in the
4510 // block.
4511 Check(BB->getFirstNonPHI() == &CPI,
4512 "CatchPadInst not the first non-PHI instruction in the block.", &CPI);
4513
4514 visitEHPadPredecessors(CPI);
4516}
4517
4518void Verifier::visitCatchReturnInst(CatchReturnInst &CatchReturn) {
4519 Check(isa<CatchPadInst>(CatchReturn.getOperand(0)),
4520 "CatchReturnInst needs to be provided a CatchPad", &CatchReturn,
4521 CatchReturn.getOperand(0));
4522
4523 visitTerminator(CatchReturn);
4524}
4525
4526void Verifier::visitCleanupPadInst(CleanupPadInst &CPI) {
4527 BasicBlock *BB = CPI.getParent();
4528
4529 Function *F = BB->getParent();
4530 Check(F->hasPersonalityFn(),
4531 "CleanupPadInst needs to be in a function with a personality.", &CPI);
4532
4533 // The cleanuppad instruction must be the first non-PHI instruction in the
4534 // block.
4535 Check(BB->getFirstNonPHI() == &CPI,
4536 "CleanupPadInst not the first non-PHI instruction in the block.", &CPI);
4537
4538 auto *ParentPad = CPI.getParentPad();
4539 Check(isa<ConstantTokenNone>(ParentPad) || isa<FuncletPadInst>(ParentPad),
4540 "CleanupPadInst has an invalid parent.", &CPI);
4541
4542 visitEHPadPredecessors(CPI);
4544}
4545
4546void Verifier::visitFuncletPadInst(FuncletPadInst &FPI) {
4547 User *FirstUser = nullptr;
4548 Value *FirstUnwindPad = nullptr;
4549 SmallVector<FuncletPadInst *, 8> Worklist({&FPI});
4551
4552 while (!Worklist.empty()) {
4553 FuncletPadInst *CurrentPad = Worklist.pop_back_val();
4554 Check(Seen.insert(CurrentPad).second,
4555 "FuncletPadInst must not be nested within itself", CurrentPad);
4556 Value *UnresolvedAncestorPad = nullptr;
4557 for (User *U : CurrentPad->users()) {
4558 BasicBlock *UnwindDest;
4559 if (auto *CRI = dyn_cast<CleanupReturnInst>(U)) {
4560 UnwindDest = CRI->getUnwindDest();
4561 } else if (auto *CSI = dyn_cast<CatchSwitchInst>(U)) {
4562 // We allow catchswitch unwind to caller to nest
4563 // within an outer pad that unwinds somewhere else,
4564 // because catchswitch doesn't have a nounwind variant.
4565 // See e.g. SimplifyCFGOpt::SimplifyUnreachable.
4566 if (CSI->unwindsToCaller())
4567 continue;
4568 UnwindDest = CSI->getUnwindDest();
4569 } else if (auto *II = dyn_cast<InvokeInst>(U)) {
4570 UnwindDest = II->getUnwindDest();
4571 } else if (isa<CallInst>(U)) {
4572 // Calls which don't unwind may be found inside funclet
4573 // pads that unwind somewhere else. We don't *require*
4574 // such calls to be annotated nounwind.
4575 continue;
4576 } else if (auto *CPI = dyn_cast<CleanupPadInst>(U)) {
4577 // The unwind dest for a cleanup can only be found by
4578 // recursive search. Add it to the worklist, and we'll
4579 // search for its first use that determines where it unwinds.
4580 Worklist.push_back(CPI);
4581 continue;
4582 } else {
4583 Check(isa<CatchReturnInst>(U), "Bogus funclet pad use", U);
4584 continue;
4585 }
4586
4587 Value *UnwindPad;
4588 bool ExitsFPI;
4589 if (UnwindDest) {
4590 UnwindPad = UnwindDest->getFirstNonPHI();
4591 if (!cast<Instruction>(UnwindPad)->isEHPad())
4592 continue;
4593 Value *UnwindParent = getParentPad(UnwindPad);
4594 // Ignore unwind edges that don't exit CurrentPad.
4595 if (UnwindParent == CurrentPad)
4596 continue;
4597 // Determine whether the original funclet pad is exited,
4598 // and if we are scanning nested pads determine how many
4599 // of them are exited so we can stop searching their
4600 // children.
4601 Value *ExitedPad = CurrentPad;
4602 ExitsFPI = false;
4603 do {
4604 if (ExitedPad == &FPI) {
4605 ExitsFPI = true;
4606 // Now we can resolve any ancestors of CurrentPad up to
4607 // FPI, but not including FPI since we need to make sure
4608 // to check all direct users of FPI for consistency.
4609 UnresolvedAncestorPad = &FPI;
4610 break;
4611 }
4612 Value *ExitedParent = getParentPad(ExitedPad);
4613 if (ExitedParent == UnwindParent) {
4614 // ExitedPad is the ancestor-most pad which this unwind
4615 // edge exits, so we can resolve up to it, meaning that
4616 // ExitedParent is the first ancestor still unresolved.
4617 UnresolvedAncestorPad = ExitedParent;
4618 break;
4619 }
4620 ExitedPad = ExitedParent;
4621 } while (!isa<ConstantTokenNone>(ExitedPad));
4622 } else {
4623 // Unwinding to caller exits all pads.
4624 UnwindPad = ConstantTokenNone::get(FPI.getContext());
4625 ExitsFPI = true;
4626 UnresolvedAncestorPad = &FPI;
4627 }
4628
4629 if (ExitsFPI) {
4630 // This unwind edge exits FPI. Make sure it agrees with other
4631 // such edges.
4632 if (FirstUser) {
4633 Check(UnwindPad == FirstUnwindPad,
4634 "Unwind edges out of a funclet "
4635 "pad must have the same unwind "
4636 "dest",
4637 &FPI, U, FirstUser);
4638 } else {
4639 FirstUser = U;
4640 FirstUnwindPad = UnwindPad;
4641 // Record cleanup sibling unwinds for verifySiblingFuncletUnwinds
4642 if (isa<CleanupPadInst>(&FPI) && !isa<ConstantTokenNone>(UnwindPad) &&
4643 getParentPad(UnwindPad) == getParentPad(&FPI))
4644 SiblingFuncletInfo[&FPI] = cast<Instruction>(U);
4645 }
4646 }
4647 // Make sure we visit all uses of FPI, but for nested pads stop as
4648 // soon as we know where they unwind to.
4649 if (CurrentPad != &FPI)
4650 break;
4651 }
4652 if (UnresolvedAncestorPad) {
4653 if (CurrentPad == UnresolvedAncestorPad) {
4654 // When CurrentPad is FPI itself, we don't mark it as resolved even if
4655 // we've found an unwind edge that exits it, because we need to verify
4656 // all direct uses of FPI.
4657 assert(CurrentPad == &FPI);
4658 continue;
4659 }
4660 // Pop off the worklist any nested pads that we've found an unwind
4661 // destination for. The pads on the worklist are the uncles,
4662 // great-uncles, etc. of CurrentPad. We've found an unwind destination
4663 // for all ancestors of CurrentPad up to but not including
4664 // UnresolvedAncestorPad.
4665 Value *ResolvedPad = CurrentPad;
4666 while (!Worklist.empty()) {
4667 Value *UnclePad = Worklist.back();
4668 Value *AncestorPad = getParentPad(UnclePad);
4669 // Walk ResolvedPad up the ancestor list until we either find the
4670 // uncle's parent or the last resolved ancestor.
4671 while (ResolvedPad != AncestorPad) {
4672 Value *ResolvedParent = getParentPad(ResolvedPad);
4673 if (ResolvedParent == UnresolvedAncestorPad) {
4674 break;
4675 }
4676 ResolvedPad = ResolvedParent;
4677 }
4678 // If the resolved ancestor search didn't find the uncle's parent,
4679 // then the uncle is not yet resolved.
4680 if (ResolvedPad != AncestorPad)
4681 break;
4682 // This uncle is resolved, so pop it from the worklist.
4683 Worklist.pop_back();
4684 }
4685 }
4686 }
4687
4688 if (FirstUnwindPad) {
4689 if (auto *CatchSwitch = dyn_cast<CatchSwitchInst>(FPI.getParentPad())) {
4690 BasicBlock *SwitchUnwindDest = CatchSwitch->getUnwindDest();
4691 Value *SwitchUnwindPad;
4692 if (SwitchUnwindDest)
4693 SwitchUnwindPad = SwitchUnwindDest->getFirstNonPHI();
4694 else
4695 SwitchUnwindPad = ConstantTokenNone::get(FPI.getContext());
4696 Check(SwitchUnwindPad == FirstUnwindPad,
4697 "Unwind edges out of a catch must have the same unwind dest as "
4698 "the parent catchswitch",
4699 &FPI, FirstUser, CatchSwitch);
4700 }
4701 }
4702
4703 visitInstruction(FPI);
4704}
4705
4706void Verifier::visitCatchSwitchInst(CatchSwitchInst &CatchSwitch) {
4707 BasicBlock *BB = CatchSwitch.getParent();
4708
4709 Function *F = BB->getParent();
4710 Check(F->hasPersonalityFn(),
4711 "CatchSwitchInst needs to be in a function with a personality.",
4712 &CatchSwitch);
4713
4714 // The catchswitch instruction must be the first non-PHI instruction in the
4715 // block.
4716 Check(BB->getFirstNonPHI() == &CatchSwitch,
4717 "CatchSwitchInst not the first non-PHI instruction in the block.",
4718 &CatchSwitch);
4719
4720 auto *ParentPad = CatchSwitch.getParentPad();
4721 Check(isa<ConstantTokenNone>(ParentPad) || isa<FuncletPadInst>(ParentPad),
4722 "CatchSwitchInst has an invalid parent.", ParentPad);
4723
4724 if (BasicBlock *UnwindDest = CatchSwitch.getUnwindDest()) {
4725 Instruction *I = UnwindDest->getFirstNonPHI();
4726 Check(I->isEHPad() && !isa<LandingPadInst>(I),
4727 "CatchSwitchInst must unwind to an EH block which is not a "
4728 "landingpad.",
4729 &CatchSwitch);
4730
4731 // Record catchswitch sibling unwinds for verifySiblingFuncletUnwinds
4732 if (getParentPad(I) == ParentPad)
4733 SiblingFuncletInfo[&CatchSwitch] = &CatchSwitch;
4734 }
4735
4736 Check(CatchSwitch.getNumHandlers() != 0,
4737 "CatchSwitchInst cannot have empty handler list", &CatchSwitch);
4738
4739 for (BasicBlock *Handler : CatchSwitch.handlers()) {
4740 Check(isa<CatchPadInst>(Handler->getFirstNonPHI()),
4741 "CatchSwitchInst handlers must be catchpads", &CatchSwitch, Handler);
4742 }
4743
4744 visitEHPadPredecessors(CatchSwitch);
4745 visitTerminator(CatchSwitch);
4746}
4747
4748void Verifier::visitCleanupReturnInst(CleanupReturnInst &CRI) {
4749 Check(isa<CleanupPadInst>(CRI.getOperand(0)),
4750 "CleanupReturnInst needs to be provided a CleanupPad", &CRI,
4751 CRI.getOperand(0));
4752
4753 if (BasicBlock *UnwindDest = CRI.getUnwindDest()) {
4754 Instruction *I = UnwindDest->getFirstNonPHI();
4755 Check(I->isEHPad() && !isa<LandingPadInst>(I),
4756 "CleanupReturnInst must unwind to an EH block which is not a "
4757 "landingpad.",
4758 &CRI);
4759 }
4760
4761 visitTerminator(CRI);
4762}
4763
4764void Verifier::verifyDominatesUse(Instruction &I, unsigned i) {
4765 Instruction *Op = cast<Instruction>(I.getOperand(i));
4766 // If the we have an invalid invoke, don't try to compute the dominance.
4767 // We already reject it in the invoke specific checks and the dominance
4768 // computation doesn't handle multiple edges.
4769 if (InvokeInst *II = dyn_cast<InvokeInst>(Op)) {
4770 if (II->getNormalDest() == II->getUnwindDest())
4771 return;
4772 }
4773
4774 // Quick check whether the def has already been encountered in the same block.
4775 // PHI nodes are not checked to prevent accepting preceding PHIs, because PHI
4776 // uses are defined to happen on the incoming edge, not at the instruction.
4777 //
4778 // FIXME: If this operand is a MetadataAsValue (wrapping a LocalAsMetadata)
4779 // wrapping an SSA value, assert that we've already encountered it. See
4780 // related FIXME in Mapper::mapLocalAsMetadata in ValueMapper.cpp.
4781 if (!isa<PHINode>(I) && InstsInThisBlock.count(Op))
4782 return;
4783
4784 const Use &U = I.getOperandUse(i);
4785 Check(DT.dominates(Op, U), "Instruction does not dominate all uses!", Op, &I);
4786}
4787
4788void Verifier::visitDereferenceableMetadata(Instruction& I, MDNode* MD) {
4789 Check(I.getType()->isPointerTy(),
4790 "dereferenceable, dereferenceable_or_null "
4791 "apply only to pointer types",
4792 &I);
4793 Check((isa<LoadInst>(I) || isa<IntToPtrInst>(I)),
4794 "dereferenceable, dereferenceable_or_null apply only to load"
4795 " and inttoptr instructions, use attributes for calls or invokes",
4796 &I);
4797 Check(MD->getNumOperands() == 1,
4798 "dereferenceable, dereferenceable_or_null "
4799 "take one operand!",
4800 &I);
4801 ConstantInt *CI = mdconst::dyn_extract<ConstantInt>(MD->getOperand(0));
4802 Check(CI && CI->getType()->isIntegerTy(64),
4803 "dereferenceable, "
4804 "dereferenceable_or_null metadata value must be an i64!",
4805 &I);
4806}
4807
4808void Verifier::visitProfMetadata(Instruction &I, MDNode *MD) {
4809 Check(MD->getNumOperands() >= 2,
4810 "!prof annotations should have no less than 2 operands", MD);
4811
4812 // Check first operand.
4813 Check(MD->getOperand(0) != nullptr, "first operand should not be null", MD);
4814 Check(isa<MDString>(MD->getOperand(0)),
4815 "expected string with name of the !prof annotation", MD);
4816 MDString *MDS = cast<MDString>(MD->getOperand(0));
4817 StringRef ProfName = MDS->getString();
4818
4819 // Check consistency of !prof branch_weights metadata.
4820 if (ProfName == "branch_weights") {
4821 unsigned NumBranchWeights = getNumBranchWeights(*MD);
4822 if (isa<InvokeInst>(&I)) {
4823 Check(NumBranchWeights == 1 || NumBranchWeights == 2,
4824 "Wrong number of InvokeInst branch_weights operands", MD);
4825 } else {
4826 unsigned ExpectedNumOperands = 0;
4827 if (BranchInst *BI = dyn_cast<BranchInst>(&I))
4828 ExpectedNumOperands = BI->getNumSuccessors();
4829 else if (SwitchInst *SI = dyn_cast<SwitchInst>(&I))
4830 ExpectedNumOperands = SI->getNumSuccessors();
4831 else if (isa<CallInst>(&I))
4832 ExpectedNumOperands = 1;
4833 else if (IndirectBrInst *IBI = dyn_cast<IndirectBrInst>(&I))
4834 ExpectedNumOperands = IBI->getNumDestinations();
4835 else if (isa<SelectInst>(&I))
4836 ExpectedNumOperands = 2;
4837 else if (CallBrInst *CI = dyn_cast<CallBrInst>(&I))
4838 ExpectedNumOperands = CI->getNumSuccessors();
4839 else
4840 CheckFailed("!prof branch_weights are not allowed for this instruction",
4841 MD);
4842
4843 Check(NumBranchWeights == ExpectedNumOperands, "Wrong number of operands",
4844 MD);
4845 }
4846 for (unsigned i = getBranchWeightOffset(MD); i < MD->getNumOperands();
4847 ++i) {
4848 auto &MDO = MD->getOperand(i);
4849 Check(MDO, "second operand should not be null", MD);
4850 Check(mdconst::dyn_extract<ConstantInt>(MDO),
4851 "!prof brunch_weights operand is not a const int");
4852 }
4853 }
4854}
4855
4856void Verifier::visitDIAssignIDMetadata(Instruction &I, MDNode *MD) {
4857 assert(I.hasMetadata(LLVMContext::MD_DIAssignID));
4858 bool ExpectedInstTy =
4859 isa<AllocaInst>(I) || isa<StoreInst>(I) || isa<MemIntrinsic>(I);
4860 CheckDI(ExpectedInstTy, "!DIAssignID attached to unexpected instruction kind",
4861 I, MD);
4862 // Iterate over the MetadataAsValue uses of the DIAssignID - these should
4863 // only be found as DbgAssignIntrinsic operands.
4864 if (auto *AsValue = MetadataAsValue::getIfExists(Context, MD)) {
4865 for (auto *User : AsValue->users()) {
4866 CheckDI(isa<DbgAssignIntrinsic>(User),
4867 "!DIAssignID should only be used by llvm.dbg.assign intrinsics",
4868 MD, User);
4869 // All of the dbg.assign intrinsics should be in the same function as I.
4870 if (auto *DAI = dyn_cast<DbgAssignIntrinsic>(User))
4871 CheckDI(DAI->getFunction() == I.getFunction(),
4872 "dbg.assign not in same function as inst", DAI, &I);
4873 }
4874 }
4875 for (DbgVariableRecord *DVR :
4876 cast<DIAssignID>(MD)->getAllDbgVariableRecordUsers()) {
4877 CheckDI(DVR->isDbgAssign(),
4878 "!DIAssignID should only be used by Assign DVRs.", MD, DVR);
4879 CheckDI(DVR->getFunction() == I.getFunction(),
4880 "DVRAssign not in same function as inst", DVR, &I);
4881 }
4882}
4883
4884void Verifier::visitMMRAMetadata(Instruction &I, MDNode *MD) {
4886 "!mmra metadata attached to unexpected instruction kind", I, MD);
4887
4888 // MMRA Metadata should either be a tag, e.g. !{!"foo", !"bar"}, or a
4889 // list of tags such as !2 in the following example:
4890 // !0 = !{!"a", !"b"}
4891 // !1 = !{!"c", !"d"}
4892 // !2 = !{!0, !1}
4893 if (MMRAMetadata::isTagMD(MD))
4894 return;
4895
4896 Check(isa<MDTuple>(MD), "!mmra expected to be a metadata tuple", I, MD);
4897 for (const MDOperand &MDOp : MD->operands())
4898 Check(MMRAMetadata::isTagMD(MDOp.get()),
4899 "!mmra metadata tuple operand is not an MMRA tag", I, MDOp.get());
4900}
4901
4902void Verifier::visitCallStackMetadata(MDNode *MD) {
4903 // Call stack metadata should consist of a list of at least 1 constant int
4904 // (representing a hash of the location).
4905 Check(MD->getNumOperands() >= 1,
4906 "call stack metadata should have at least 1 operand", MD);
4907
4908 for (const auto &Op : MD->operands())
4909 Check(mdconst::dyn_extract_or_null<ConstantInt>(Op),
4910 "call stack metadata operand should be constant integer", Op);
4911}
4912
4913void Verifier::visitMemProfMetadata(Instruction &I, MDNode *MD) {
4914 Check(isa<CallBase>(I), "!memprof metadata should only exist on calls", &I);
4915 Check(MD->getNumOperands() >= 1,
4916 "!memprof annotations should have at least 1 metadata operand "
4917 "(MemInfoBlock)",
4918 MD);
4919
4920 // Check each MIB
4921 for (auto &MIBOp : MD->operands()) {
4922 MDNode *MIB = dyn_cast<MDNode>(MIBOp);
4923 // The first operand of an MIB should be the call stack metadata.
4924 // There rest of the operands should be MDString tags, and there should be
4925 // at least one.
4926 Check(MIB->getNumOperands() >= 2,
4927 "Each !memprof MemInfoBlock should have at least 2 operands", MIB);
4928
4929 // Check call stack metadata (first operand).
4930 Check(MIB->getOperand(0) != nullptr,
4931 "!memprof MemInfoBlock first operand should not be null", MIB);
4932 Check(isa<MDNode>(MIB->getOperand(0)),
4933 "!memprof MemInfoBlock first operand should be an MDNode", MIB);
4934 MDNode *StackMD = dyn_cast<MDNode>(MIB->getOperand(0));
4935 visitCallStackMetadata(StackMD);
4936
4937 // Check that remaining operands are MDString.
4939 [](const MDOperand &Op) { return isa<MDString>(Op); }),
4940 "Not all !memprof MemInfoBlock operands 1 to N are MDString", MIB);
4941 }
4942}
4943
4944void Verifier::visitCallsiteMetadata(Instruction &I, MDNode *MD) {
4945 Check(isa<CallBase>(I), "!callsite metadata should only exist on calls", &I);
4946 // Verify the partial callstack annotated from memprof profiles. This callsite
4947 // is a part of a profiled allocation callstack.
4948 visitCallStackMetadata(MD);
4949}
4950
4951void Verifier::visitAnnotationMetadata(MDNode *Annotation) {
4952 Check(isa<MDTuple>(Annotation), "annotation must be a tuple");
4953 Check(Annotation->getNumOperands() >= 1,
4954 "annotation must have at least one operand");
4955 for (const MDOperand &Op : Annotation->operands()) {
4956 bool TupleOfStrings =
4957 isa<MDTuple>(Op.get()) &&
4958 all_of(cast<MDTuple>(Op)->operands(), [](auto &Annotation) {
4959 return isa<MDString>(Annotation.get());
4960 });
4961 Check(isa<MDString>(Op.get()) || TupleOfStrings,
4962 "operands must be a string or a tuple of strings");
4963 }
4964}
4965
4966void Verifier::visitAliasScopeMetadata(const MDNode *MD) {
4967 unsigned NumOps = MD->getNumOperands();
4968 Check(NumOps >= 2 && NumOps <= 3, "scope must have two or three operands",
4969 MD);
4970 Check(MD->getOperand(0).get() == MD || isa<MDString>(MD->getOperand(0)),
4971 "first scope operand must be self-referential or string", MD);
4972 if (NumOps == 3)
4973 Check(isa<MDString>(MD->getOperand(2)),
4974 "third scope operand must be string (if used)", MD);
4975
4976 MDNode *Domain = dyn_cast<MDNode>(MD->getOperand(1));
4977 Check(Domain != nullptr, "second scope operand must be MDNode", MD);
4978
4979 unsigned NumDomainOps = Domain->getNumOperands();
4980 Check(NumDomainOps >= 1 && NumDomainOps <= 2,
4981 "domain must have one or two operands", Domain);
4982 Check(Domain->getOperand(0).get() == Domain ||
4983 isa<MDString>(Domain->getOperand(0)),
4984 "first domain operand must be self-referential or string", Domain);
4985 if (NumDomainOps == 2)
4986 Check(isa<MDString>(Domain->getOperand(1)),
4987 "second domain operand must be string (if used)", Domain);
4988}
4989
4990void Verifier::visitAliasScopeListMetadata(const MDNode *MD) {
4991 for (const MDOperand &Op : MD->operands()) {
4992 const MDNode *OpMD = dyn_cast<MDNode>(Op);
4993 Check(OpMD != nullptr, "scope list must consist of MDNodes", MD);
4994 visitAliasScopeMetadata(OpMD);
4995 }
4996}
4997
4998void Verifier::visitAccessGroupMetadata(const MDNode *MD) {
4999 auto IsValidAccessScope = [](const MDNode *MD) {
5000 return MD->getNumOperands() == 0 && MD->isDistinct();
5001 };
5002
5003 // It must be either an access scope itself...
5004 if (IsValidAccessScope(MD))
5005 return;
5006
5007 // ...or a list of access scopes.
5008 for (const MDOperand &Op : MD->operands()) {
5009 const MDNode *OpMD = dyn_cast<MDNode>(Op);
5010 Check(OpMD != nullptr, "Access scope list must consist of MDNodes", MD);
5011 Check(IsValidAccessScope(OpMD),
5012 "Access scope list contains invalid access scope", MD);
5013 }
5014}
5015
5016/// verifyInstruction - Verify that an instruction is well formed.
5017///
5018void Verifier::visitInstruction(Instruction &I) {
5019 BasicBlock *BB = I.getParent();
5020 Check(BB, "Instruction not embedded in basic block!", &I);
5021
5022 if (!isa<PHINode>(I)) { // Check that non-phi nodes are not self referential
5023 for (User *U : I.users()) {
5024 Check(U != (User *)&I || !DT.isReachableFromEntry(BB),
5025 "Only PHI nodes may reference their own value!", &I);
5026 }
5027 }
5028
5029 // Check that void typed values don't have names
5030 Check(!I.getType()->isVoidTy() || !I.hasName(),
5031 "Instruction has a name, but provides a void value!", &I);
5032
5033 // Check that the return value of the instruction is either void or a legal
5034 // value type.
5035 Check(I.getType()->isVoidTy() || I.getType()->isFirstClassType(),
5036 "Instruction returns a non-scalar type!", &I);
5037
5038 // Check that the instruction doesn't produce metadata. Calls are already
5039 // checked against the callee type.
5040 Check(!I.getType()->isMetadataTy() || isa<CallInst>(I) || isa<InvokeInst>(I),
5041 "Invalid use of metadata!", &I);
5042
5043 // Check that all uses of the instruction, if they are instructions
5044 // themselves, actually have parent basic blocks. If the use is not an
5045 // instruction, it is an error!
5046 for (Use &U : I.uses()) {
5047 if (Instruction *Used = dyn_cast<Instruction>(U.getUser()))
5048 Check(Used->getParent() != nullptr,
5049 "Instruction referencing"
5050 " instruction not embedded in a basic block!",
5051 &I, Used);
5052 else {
5053 CheckFailed("Use of instruction is not an instruction!", U);
5054 return;
5055 }
5056 }
5057
5058 // Get a pointer to the call base of the instruction if it is some form of
5059 // call.
5060 const CallBase *CBI = dyn_cast<CallBase>(&I);
5061
5062 for (unsigned i = 0, e = I.getNumOperands(); i != e; ++i) {
5063 Check(I.getOperand(i) != nullptr, "Instruction has null operand!", &I);
5064
5065 // Check to make sure that only first-class-values are operands to
5066 // instructions.
5067 if (!I.getOperand(i)->getType()->isFirstClassType()) {
5068 Check(false, "Instruction operands must be first-class values!", &I);
5069 }
5070
5071 if (Function *F = dyn_cast<Function>(I.getOperand(i))) {
5072 // This code checks whether the function is used as the operand of a
5073 // clang_arc_attachedcall operand bundle.
5074 auto IsAttachedCallOperand = [](Function *F, const CallBase *CBI,
5075 int Idx) {
5076 return CBI && CBI->isOperandBundleOfType(
5078 };
5079
5080 // Check to make sure that the "address of" an intrinsic function is never
5081 // taken. Ignore cases where the address of the intrinsic function is used
5082 // as the argument of operand bundle "clang.arc.attachedcall" as those
5083 // cases are handled in verifyAttachedCallBundle.
5084 Check((!F->isIntrinsic() ||
5085 (CBI && &CBI->getCalledOperandUse() == &I.getOperandUse(i)) ||
5086 IsAttachedCallOperand(F, CBI, i)),
5087 "Cannot take the address of an intrinsic!", &I);
5088 Check(!F->isIntrinsic() || isa<CallInst>(I) ||
5089 F->getIntrinsicID() == Intrinsic::donothing ||
5090 F->getIntrinsicID() == Intrinsic::seh_try_begin ||
5091 F->getIntrinsicID() == Intrinsic::seh_try_end ||
5092 F->getIntrinsicID() == Intrinsic::seh_scope_begin ||
5093 F->getIntrinsicID() == Intrinsic::seh_scope_end ||
5094 F->getIntrinsicID() == Intrinsic::coro_resume ||
5095 F->getIntrinsicID() == Intrinsic::coro_destroy ||
5096 F->getIntrinsicID() == Intrinsic::coro_await_suspend_void ||
5097 F->getIntrinsicID() == Intrinsic::coro_await_suspend_bool ||
5098 F->getIntrinsicID() == Intrinsic::coro_await_suspend_handle ||
5099 F->getIntrinsicID() ==
5100 Intrinsic::experimental_patchpoint_void ||
5101 F->getIntrinsicID() == Intrinsic::experimental_patchpoint ||
5102 F->getIntrinsicID() == Intrinsic::experimental_gc_statepoint ||
5103 F->getIntrinsicID() == Intrinsic::wasm_rethrow ||
5104 IsAttachedCallOperand(F, CBI, i),
5105 "Cannot invoke an intrinsic other than donothing, patchpoint, "
5106 "statepoint, coro_resume, coro_destroy or clang.arc.attachedcall",
5107 &I);
5108 Check(F->getParent() == &M, "Referencing function in another module!", &I,
5109 &M, F, F->getParent());
5110 } else if (BasicBlock *OpBB = dyn_cast<BasicBlock>(I.getOperand(i))) {
5111 Check(OpBB->getParent() == BB->getParent(),
5112 "Referring to a basic block in another function!", &I);
5113 } else if (Argument *OpArg = dyn_cast<Argument>(I.getOperand(i))) {
5114 Check(OpArg->getParent() == BB->getParent(),
5115 "Referring to an argument in another function!", &I);
5116 } else if (GlobalValue *GV = dyn_cast<GlobalValue>(I.getOperand(i))) {
5117 Check(GV->getParent() == &M, "Referencing global in another module!", &I,
5118 &M, GV, GV->getParent());
5119 } else if (Instruction *OpInst = dyn_cast<Instruction>(I.getOperand(i))) {
5120 Check(OpInst->getFunction() == BB->getParent(),
5121 "Referring to an instruction in another function!", &I);
5122 verifyDominatesUse(I, i);
5123 } else if (isa<InlineAsm>(I.getOperand(i))) {
5124 Check(CBI && &CBI->getCalledOperandUse() == &I.getOperandUse(i),
5125 "Cannot take the address of an inline asm!", &I);
5126 } else if (auto *CPA = dyn_cast<ConstantPtrAuth>(I.getOperand(i))) {
5127 visitConstantExprsRecursively(CPA);
5128 } else if (ConstantExpr *CE = dyn_cast<ConstantExpr>(I.getOperand(i))) {
5129 if (CE->getType()->isPtrOrPtrVectorTy()) {
5130 // If we have a ConstantExpr pointer, we need to see if it came from an
5131 // illegal bitcast.
5132 visitConstantExprsRecursively(CE);
5133 }
5134 }
5135 }
5136
5137 if (MDNode *MD = I.getMetadata(LLVMContext::MD_fpmath)) {
5138 Check(I.getType()->isFPOrFPVectorTy(),
5139 "fpmath requires a floating point result!", &I);
5140 Check(MD->getNumOperands() == 1, "fpmath takes one operand!", &I);
5141 if (ConstantFP *CFP0 =
5142 mdconst::dyn_extract_or_null<ConstantFP>(MD->getOperand(0))) {
5143 const APFloat &Accuracy = CFP0->getValueAPF();
5144 Check(&Accuracy.getSemantics() == &APFloat::IEEEsingle(),
5145 "fpmath accuracy must have float type", &I);
5146 Check(Accuracy.isFiniteNonZero() && !Accuracy.isNegative(),
5147 "fpmath accuracy not a positive number!", &I);
5148 } else {
5149 Check(false, "invalid fpmath accuracy!", &I);
5150 }
5151 }
5152
5153 if (MDNode *Range = I.getMetadata(LLVMContext::MD_range)) {
5154 Check(isa<LoadInst>(I) || isa<CallInst>(I) || isa<InvokeInst>(I),
5155 "Ranges are only for loads, calls and invokes!", &I);
5156 visitRangeMetadata(I, Range, I.getType());
5157 }
5158
5159 if (I.hasMetadata(LLVMContext::MD_invariant_group)) {
5160 Check(isa<LoadInst>(I) || isa<StoreInst>(I),
5161 "invariant.group metadata is only for loads and stores", &I);
5162 }
5163
5164 if (MDNode *MD = I.getMetadata(LLVMContext::MD_nonnull)) {
5165 Check(I.getType()->isPointerTy(), "nonnull applies only to pointer types",
5166 &I);
5167 Check(isa<LoadInst>(I),
5168 "nonnull applies only to load instructions, use attributes"
5169 " for calls or invokes",
5170 &I);
5171 Check(MD->getNumOperands() == 0, "nonnull metadata must be empty", &I);
5172 }
5173
5174 if (MDNode *MD = I.getMetadata(LLVMContext::MD_dereferenceable))
5175 visitDereferenceableMetadata(I, MD);
5176
5177 if (MDNode *MD = I.getMetadata(LLVMContext::MD_dereferenceable_or_null))
5178 visitDereferenceableMetadata(I, MD);
5179
5180 if (MDNode *TBAA = I.getMetadata(LLVMContext::MD_tbaa))
5181 TBAAVerifyHelper.visitTBAAMetadata(I, TBAA);
5182
5183 if (MDNode *MD = I.getMetadata(LLVMContext::MD_noalias))
5184 visitAliasScopeListMetadata(MD);
5185 if (MDNode *MD = I.getMetadata(LLVMContext::MD_alias_scope))
5186 visitAliasScopeListMetadata(MD);
5187
5188 if (MDNode *MD = I.getMetadata(LLVMContext::MD_access_group))
5189 visitAccessGroupMetadata(MD);
5190
5191 if (MDNode *AlignMD = I.getMetadata(LLVMContext::MD_align)) {
5192 Check(I.getType()->isPointerTy(), "align applies only to pointer types",
5193 &I);
5194 Check(isa<LoadInst>(I),
5195 "align applies only to load instructions, "
5196 "use attributes for calls or invokes",
5197 &I);
5198 Check(AlignMD->getNumOperands() == 1, "align takes one operand!", &I);
5199 ConstantInt *CI = mdconst::dyn_extract<ConstantInt>(AlignMD->getOperand(0));
5200 Check(CI && CI->getType()->isIntegerTy(64),
5201 "align metadata value must be an i64!", &I);
5202 uint64_t Align = CI->getZExtValue();
5203 Check(isPowerOf2_64(Align), "align metadata value must be a power of 2!",
5204 &I);
5206 "alignment is larger that implementation defined limit", &I);
5207 }
5208
5209 if (MDNode *MD = I.getMetadata(LLVMContext::MD_prof))
5210 visitProfMetadata(I, MD);
5211
5212 if (MDNode *MD = I.getMetadata(LLVMContext::MD_memprof))
5213 visitMemProfMetadata(I, MD);
5214
5215 if (MDNode *MD = I.getMetadata(LLVMContext::MD_callsite))
5216 visitCallsiteMetadata(I, MD);
5217
5218 if (MDNode *MD = I.getMetadata(LLVMContext::MD_DIAssignID))
5219 visitDIAssignIDMetadata(I, MD);
5220
5221 if (MDNode *MMRA = I.getMetadata(LLVMContext::MD_mmra))
5222 visitMMRAMetadata(I, MMRA);
5223
5224 if (MDNode *Annotation = I.getMetadata(LLVMContext::MD_annotation))
5225 visitAnnotationMetadata(Annotation);
5226
5227 if (MDNode *N = I.getDebugLoc().getAsMDNode()) {
5228 CheckDI(isa<DILocation>(N), "invalid !dbg metadata attachment", &I, N);
5229 visitMDNode(*N, AreDebugLocsAllowed::Yes);
5230 }
5231
5232 if (auto *DII = dyn_cast<DbgVariableIntrinsic>(&I)) {
5233 verifyFragmentExpression(*DII);
5234 verifyNotEntryValue(*DII);
5235 }
5236
5238 I.getAllMetadata(MDs);
5239 for (auto Attachment : MDs) {
5240 unsigned Kind = Attachment.first;
5241 auto AllowLocs =
5242 (Kind == LLVMContext::MD_dbg || Kind == LLVMContext::MD_loop)
5243 ? AreDebugLocsAllowed::Yes
5244 : AreDebugLocsAllowed::No;
5245 visitMDNode(*Attachment.second, AllowLocs);
5246 }
5247
5248 InstsInThisBlock.insert(&I);
5249}
5250
5251/// Allow intrinsics to be verified in different ways.
5252void Verifier::visitIntrinsicCall(Intrinsic::ID ID, CallBase &Call) {
5253 Function *IF = Call.getCalledFunction();
5254 Check(IF->isDeclaration(), "Intrinsic functions should never be defined!",
5255 IF);
5256
5257 // Verify that the intrinsic prototype lines up with what the .td files
5258 // describe.
5259 FunctionType *IFTy = IF->getFunctionType();
5260 bool IsVarArg = IFTy->isVarArg();
5261
5265
5266 // Walk the descriptors to extract overloaded types.
5271 "Intrinsic has incorrect return type!", IF);
5273 "Intrinsic has incorrect argument type!", IF);
5274
5275 // Verify if the intrinsic call matches the vararg property.
5276 if (IsVarArg)
5278 "Intrinsic was not defined with variable arguments!", IF);
5279 else
5281 "Callsite was not defined with variable arguments!", IF);
5282
5283 // All descriptors should be absorbed by now.
5284 Check(TableRef.empty(), "Intrinsic has too few arguments!", IF);
5285
5286 // Now that we have the intrinsic ID and the actual argument types (and we
5287 // know they are legal for the intrinsic!) get the intrinsic name through the
5288 // usual means. This allows us to verify the mangling of argument types into
5289 // the name.
5290 const std::string ExpectedName =
5291 Intrinsic::getName(ID, ArgTys, IF->getParent(), IFTy);
5292 Check(ExpectedName == IF->getName(),
5293 "Intrinsic name not mangled correctly for type arguments! "
5294 "Should be: " +
5295 ExpectedName,
5296 IF);
5297
5298 // If the intrinsic takes MDNode arguments, verify that they are either global
5299 // or are local to *this* function.
5300 for (Value *V : Call.args()) {
5301 if (auto *MD = dyn_cast<MetadataAsValue>(V))
5302 visitMetadataAsValue(*MD, Call.getCaller());
5303 if (auto *Const = dyn_cast<Constant>(V))
5304 Check(!Const->getType()->isX86_AMXTy(),
5305 "const x86_amx is not allowed in argument!");
5306 }
5307
5308 switch (ID) {
5309 default:
5310 break;
5311 case Intrinsic::assume: {
5312 for (auto &Elem : Call.bundle_op_infos()) {
5313 unsigned ArgCount = Elem.End - Elem.Begin;
5314 // Separate storage assumptions are special insofar as they're the only
5315 // operand bundles allowed on assumes that aren't parameter attributes.
5316 if (Elem.Tag->getKey() == "separate_storage") {
5317 Check(ArgCount == 2,
5318 "separate_storage assumptions should have 2 arguments", Call);
5319 Check(Call.getOperand(Elem.Begin)->getType()->isPointerTy() &&
5320 Call.getOperand(Elem.Begin + 1)->getType()->isPointerTy(),
5321 "arguments to separate_storage assumptions should be pointers",
5322 Call);
5323 return;
5324 }
5325 Check(Elem.Tag->getKey() == "ignore" ||
5326 Attribute::isExistingAttribute(Elem.Tag->getKey()),
5327 "tags must be valid attribute names", Call);
5329 Attribute::getAttrKindFromName(Elem.Tag->getKey());
5330 if (Kind == Attribute::Alignment) {
5331 Check(ArgCount <= 3 && ArgCount >= 2,
5332 "alignment assumptions should have 2 or 3 arguments", Call);
5333 Check(Call.getOperand(Elem.Begin)->getType()->isPointerTy(),
5334 "first argument should be a pointer", Call);
5335 Check(Call.getOperand(Elem.Begin + 1)->getType()->isIntegerTy(),
5336 "second argument should be an integer", Call);
5337 if (ArgCount == 3)
5338 Check(Call.getOperand(Elem.Begin + 2)->getType()->isIntegerTy(),
5339 "third argument should be an integer if present", Call);
5340 return;
5341 }
5342 Check(ArgCount <= 2, "too many arguments", Call);
5343 if (Kind == Attribute::None)
5344 break;
5345 if (Attribute::isIntAttrKind(Kind)) {
5346 Check(ArgCount == 2, "this attribute should have 2 arguments", Call);
5347 Check(isa<ConstantInt>(Call.getOperand(Elem.Begin + 1)),
5348 "the second argument should be a constant integral value", Call);
5349 } else if (Attribute::canUseAsParamAttr(Kind)) {
5350 Check((ArgCount) == 1, "this attribute should have one argument", Call);
5351 } else if (Attribute::canUseAsFnAttr(Kind)) {
5352 Check((ArgCount) == 0, "this attribute has no argument", Call);
5353 }
5354 }
5355 break;
5356 }
5357 case Intrinsic::ucmp:
5358 case Intrinsic::scmp: {
5359 Type *SrcTy = Call.getOperand(0)->getType();
5360 Type *DestTy = Call.getType();
5361
5362 Check(DestTy->getScalarSizeInBits() >= 2,
5363 "result type must be at least 2 bits wide", Call);
5364
5365 bool IsDestTypeVector = DestTy->isVectorTy();
5366 Check(SrcTy->isVectorTy() == IsDestTypeVector,
5367 "ucmp/scmp argument and result types must both be either vector or "
5368 "scalar types",
5369 Call);
5370 if (IsDestTypeVector) {
5371 auto SrcVecLen = cast<VectorType>(SrcTy)->getElementCount();
5372 auto DestVecLen = cast<VectorType>(DestTy)->getElementCount();
5373 Check(SrcVecLen == DestVecLen,
5374 "return type and arguments must have the same number of "
5375 "elements",
5376 Call);
5377 }
5378 break;
5379 }
5380 case Intrinsic::coro_id: {
5381 auto *InfoArg = Call.getArgOperand(3)->stripPointerCasts();
5382 if (isa<ConstantPointerNull>(InfoArg))
5383 break;
5384 auto *GV = dyn_cast<GlobalVariable>(InfoArg);
5385 Check(GV && GV->isConstant() && GV->hasDefinitiveInitializer(),
5386 "info argument of llvm.coro.id must refer to an initialized "
5387 "constant");
5388 Constant *Init = GV->getInitializer();
5389 Check(isa<ConstantStruct>(Init) || isa<ConstantArray>(Init),
5390 "info argument of llvm.coro.id must refer to either a struct or "
5391 "an array");
5392 break;
5393 }
5394 case Intrinsic::is_fpclass: {
5395 const ConstantInt *TestMask = cast<ConstantInt>(Call.getOperand(1));
5396 Check((TestMask->getZExtValue() & ~static_cast<unsigned>(fcAllFlags)) == 0,
5397 "unsupported bits for llvm.is.fpclass test mask");
5398 break;
5399 }
5400 case Intrinsic::fptrunc_round: {
5401 // Check the rounding mode
5402 Metadata *MD = nullptr;
5403 auto *MAV = dyn_cast<MetadataAsValue>(Call.getOperand(1));
5404 if (MAV)
5405 MD = MAV->getMetadata();
5406
5407 Check(MD != nullptr, "missing rounding mode argument", Call);
5408
5409 Check(isa<MDString>(MD),
5410 ("invalid value for llvm.fptrunc.round metadata operand"
5411 " (the operand should be a string)"),
5412 MD);
5413
5414 std::optional<RoundingMode> RoundMode =
5415 convertStrToRoundingMode(cast<MDString>(MD)->getString());
5416 Check(RoundMode && *RoundMode != RoundingMode::Dynamic,
5417 "unsupported rounding mode argument", Call);
5418 break;
5419 }
5420#define BEGIN_REGISTER_VP_INTRINSIC(VPID, ...) case Intrinsic::VPID:
5421#include "llvm/IR/VPIntrinsics.def"
5422#undef BEGIN_REGISTER_VP_INTRINSIC
5423 visitVPIntrinsic(cast<VPIntrinsic>(Call));
5424 break;
5425#define INSTRUCTION(NAME, NARGS, ROUND_MODE, INTRINSIC) \
5426 case Intrinsic::INTRINSIC:
5427#include "llvm/IR/ConstrainedOps.def"
5428#undef INSTRUCTION
5429 visitConstrainedFPIntrinsic(cast<ConstrainedFPIntrinsic>(Call));
5430 break;
5431 case Intrinsic::dbg_declare: // llvm.dbg.declare
5432 Check(isa<MetadataAsValue>(Call.getArgOperand(0)),
5433 "invalid llvm.dbg.declare intrinsic call 1", Call);
5434 visitDbgIntrinsic("declare", cast<DbgVariableIntrinsic>(Call));
5435 break;
5436 case Intrinsic::dbg_value: // llvm.dbg.value
5437 visitDbgIntrinsic("value", cast<DbgVariableIntrinsic>(Call));
5438 break;
5439 case Intrinsic::dbg_assign: // llvm.dbg.assign
5440 visitDbgIntrinsic("assign", cast<DbgVariableIntrinsic>(Call));
5441 break;
5442 case Intrinsic::dbg_label: // llvm.dbg.label
5443 visitDbgLabelIntrinsic("label", cast<DbgLabelInst>(Call));
5444 break;
5445 case Intrinsic::memcpy:
5446 case Intrinsic::memcpy_inline:
5447 case Intrinsic::memmove:
5448 case Intrinsic::memset:
5449 case Intrinsic::memset_inline: {
5450 break;
5451 }
5452 case Intrinsic::memcpy_element_unordered_atomic:
5453 case Intrinsic::memmove_element_unordered_atomic:
5454 case Intrinsic::memset_element_unordered_atomic: {
5455 const auto *AMI = cast<AtomicMemIntrinsic>(&Call);
5456
5457 ConstantInt *ElementSizeCI =
5458 cast<ConstantInt>(AMI->getRawElementSizeInBytes());
5459 const APInt &ElementSizeVal = ElementSizeCI->getValue();
5460 Check(ElementSizeVal.isPowerOf2(),
5461 "element size of the element-wise atomic memory intrinsic "
5462 "must be a power of 2",
5463 Call);
5464
5465 auto IsValidAlignment = [&](MaybeAlign Alignment) {
5466 return Alignment && ElementSizeVal.ule(Alignment->value());
5467 };
5468 Check(IsValidAlignment(AMI->getDestAlign()),
5469 "incorrect alignment of the destination argument", Call);
5470 if (const auto *AMT = dyn_cast<AtomicMemTransferInst>(AMI)) {
5471 Check(IsValidAlignment(AMT->getSourceAlign()),
5472 "incorrect alignment of the source argument", Call);
5473 }
5474 break;
5475 }
5476 case Intrinsic::call_preallocated_setup: {
5477 auto *NumArgs = dyn_cast<ConstantInt>(Call.getArgOperand(0));
5478 Check(NumArgs != nullptr,
5479 "llvm.call.preallocated.setup argument must be a constant");
5480 bool FoundCall = false;
5481 for (User *U : Call.users()) {
5482 auto *UseCall = dyn_cast<CallBase>(U);
5483 Check(UseCall != nullptr,
5484 "Uses of llvm.call.preallocated.setup must be calls");
5485 const Function *Fn = UseCall->getCalledFunction();
5486 if (Fn && Fn->getIntrinsicID() == Intrinsic::call_preallocated_arg) {
5487 auto *AllocArgIndex = dyn_cast<ConstantInt>(UseCall->getArgOperand(1));
5488 Check(AllocArgIndex != nullptr,
5489 "llvm.call.preallocated.alloc arg index must be a constant");
5490 auto AllocArgIndexInt = AllocArgIndex->getValue();
5491 Check(AllocArgIndexInt.sge(0) &&
5492 AllocArgIndexInt.slt(NumArgs->getValue()),
5493 "llvm.call.preallocated.alloc arg index must be between 0 and "
5494 "corresponding "
5495 "llvm.call.preallocated.setup's argument count");
5496 } else if (Fn && Fn->getIntrinsicID() ==
5497 Intrinsic::call_preallocated_teardown) {
5498 // nothing to do
5499 } else {
5500 Check(!FoundCall, "Can have at most one call corresponding to a "
5501 "llvm.call.preallocated.setup");
5502 FoundCall = true;
5503 size_t NumPreallocatedArgs = 0;
5504 for (unsigned i = 0; i < UseCall->arg_size(); i++) {
5505 if (UseCall->paramHasAttr(i, Attribute::Preallocated)) {
5506 ++NumPreallocatedArgs;
5507 }
5508 }
5509 Check(NumPreallocatedArgs != 0,
5510 "cannot use preallocated intrinsics on a call without "
5511 "preallocated arguments");
5512 Check(NumArgs->equalsInt(NumPreallocatedArgs),
5513 "llvm.call.preallocated.setup arg size must be equal to number "
5514 "of preallocated arguments "
5515 "at call site",
5516 Call, *UseCall);
5517 // getOperandBundle() cannot be called if more than one of the operand
5518 // bundle exists. There is already a check elsewhere for this, so skip
5519 // here if we see more than one.
5520 if (UseCall->countOperandBundlesOfType(LLVMContext::OB_preallocated) >
5521 1) {
5522 return;
5523 }
5524 auto PreallocatedBundle =
5525 UseCall->getOperandBundle(LLVMContext::OB_preallocated);
5526 Check(PreallocatedBundle,
5527 "Use of llvm.call.preallocated.setup outside intrinsics "
5528 "must be in \"preallocated\" operand bundle");
5529 Check(PreallocatedBundle->Inputs.front().get() == &Call,
5530 "preallocated bundle must have token from corresponding "
5531 "llvm.call.preallocated.setup");
5532 }
5533 }
5534 break;
5535 }
5536 case Intrinsic::call_preallocated_arg: {
5537 auto *Token = dyn_cast<CallBase>(Call.getArgOperand(0));
5538 Check(Token && Token->getCalledFunction()->getIntrinsicID() ==
5539 Intrinsic::call_preallocated_setup,
5540 "llvm.call.preallocated.arg token argument must be a "
5541 "llvm.call.preallocated.setup");
5542 Check(Call.hasFnAttr(Attribute::Preallocated),
5543 "llvm.call.preallocated.arg must be called with a \"preallocated\" "
5544 "call site attribute");
5545 break;
5546 }
5547 case Intrinsic::call_preallocated_teardown: {
5548 auto *Token = dyn_cast<CallBase>(Call.getArgOperand(0));
5549 Check(Token && Token->getCalledFunction()->getIntrinsicID() ==
5550 Intrinsic::call_preallocated_setup,
5551 "llvm.call.preallocated.teardown token argument must be a "
5552 "llvm.call.preallocated.setup");
5553 break;
5554 }
5555 case Intrinsic::gcroot:
5556 case Intrinsic::gcwrite:
5557 case Intrinsic::gcread:
5558 if (ID == Intrinsic::gcroot) {
5559 AllocaInst *AI =
5560 dyn_cast<AllocaInst>(Call.getArgOperand(0)->stripPointerCasts());
5561 Check(AI, "llvm.gcroot parameter #1 must be an alloca.", Call);
5562 Check(isa<Constant>(Call.getArgOperand(1)),
5563 "llvm.gcroot parameter #2 must be a constant.", Call);
5564 if (!AI->getAllocatedType()->isPointerTy()) {
5565 Check(!isa<ConstantPointerNull>(Call.getArgOperand(1)),
5566 "llvm.gcroot parameter #1 must either be a pointer alloca, "
5567 "or argument #2 must be a non-null constant.",
5568 Call);
5569 }
5570 }
5571
5572 Check(Call.getParent()->getParent()->hasGC(),
5573 "Enclosing function does not use GC.", Call);
5574 break;
5575 case Intrinsic::init_trampoline:
5576 Check(isa<Function>(Call.getArgOperand(1)->stripPointerCasts()),
5577 "llvm.init_trampoline parameter #2 must resolve to a function.",
5578 Call);
5579 break;
5580 case Intrinsic::prefetch:
5581 Check(cast<ConstantInt>(Call.getArgOperand(1))->getZExtValue() < 2,
5582 "rw argument to llvm.prefetch must be 0-1", Call);
5583 Check(cast<ConstantInt>(Call.getArgOperand(2))->getZExtValue() < 4,
5584 "locality argument to llvm.prefetch must be 0-3", Call);
5585 Check(cast<ConstantInt>(Call.getArgOperand(3))->getZExtValue() < 2,
5586 "cache type argument to llvm.prefetch must be 0-1", Call);
5587 break;
5588 case Intrinsic::stackprotector:
5589 Check(isa<AllocaInst>(Call.getArgOperand(1)->stripPointerCasts()),
5590 "llvm.stackprotector parameter #2 must resolve to an alloca.", Call);
5591 break;
5592 case Intrinsic::localescape: {
5593 BasicBlock *BB = Call.getParent();
5594 Check(BB->isEntryBlock(), "llvm.localescape used outside of entry block",
5595 Call);
5596 Check(!SawFrameEscape, "multiple calls to llvm.localescape in one function",
5597 Call);
5598 for (Value *Arg : Call.args()) {
5599 if (isa<ConstantPointerNull>(Arg))
5600 continue; // Null values are allowed as placeholders.
5601 auto *AI = dyn_cast<AllocaInst>(Arg->stripPointerCasts());
5602 Check(AI && AI->isStaticAlloca(),
5603 "llvm.localescape only accepts static allocas", Call);
5604 }
5605 FrameEscapeInfo[BB->getParent()].first = Call.arg_size();
5606 SawFrameEscape = true;
5607 break;
5608 }
5609 case Intrinsic::localrecover: {
5610 Value *FnArg = Call.getArgOperand(0)->stripPointerCasts();
5611 Function *Fn = dyn_cast<Function>(FnArg);
5612 Check(Fn && !Fn->isDeclaration(),
5613 "llvm.localrecover first "
5614 "argument must be function defined in this module",
5615 Call);
5616 auto *IdxArg = cast<ConstantInt>(Call.getArgOperand(2));
5617 auto &Entry = FrameEscapeInfo[Fn];
5618 Entry.second = unsigned(
5619 std::max(uint64_t(Entry.second), IdxArg->getLimitedValue(~0U) + 1));
5620 break;
5621 }
5622
5623 case Intrinsic::experimental_gc_statepoint:
5624 if (auto *CI = dyn_cast<CallInst>(&Call))
5625 Check(!CI->isInlineAsm(),
5626 "gc.statepoint support for inline assembly unimplemented", CI);
5627 Check(Call.getParent()->getParent()->hasGC(),
5628 "Enclosing function does not use GC.", Call);
5629
5630 verifyStatepoint(Call);
5631 break;
5632 case Intrinsic::experimental_gc_result: {
5633 Check(Call.getParent()->getParent()->hasGC(),
5634 "Enclosing function does not use GC.", Call);
5635
5636 auto *Statepoint = Call.getArgOperand(0);
5637 if (isa<UndefValue>(Statepoint))
5638 break;
5639
5640 // Are we tied to a statepoint properly?
5641 const auto *StatepointCall = dyn_cast<CallBase>(Statepoint);
5642 const Function *StatepointFn =
5643 StatepointCall ? StatepointCall->getCalledFunction() : nullptr;
5644 Check(StatepointFn && StatepointFn->isDeclaration() &&
5645 StatepointFn->getIntrinsicID() ==
5646 Intrinsic::experimental_gc_statepoint,
5647 "gc.result operand #1 must be from a statepoint", Call,
5648 Call.getArgOperand(0));
5649
5650 // Check that result type matches wrapped callee.
5651 auto *TargetFuncType =
5652 cast<FunctionType>(StatepointCall->getParamElementType(2));
5653 Check(Call.getType() == TargetFuncType->getReturnType(),
5654 "gc.result result type does not match wrapped callee", Call);
5655 break;
5656 }
5657 case Intrinsic::experimental_gc_relocate: {
5658 Check(Call.arg_size() == 3, "wrong number of arguments", Call);
5659
5660 Check(isa<PointerType>(Call.getType()->getScalarType()),
5661 "gc.relocate must return a pointer or a vector of pointers", Call);
5662
5663 // Check that this relocate is correctly tied to the statepoint
5664
5665 // This is case for relocate on the unwinding path of an invoke statepoint
5666 if (LandingPadInst *LandingPad =
5667 dyn_cast<LandingPadInst>(Call.getArgOperand(0))) {
5668
5669 const BasicBlock *InvokeBB =
5670 LandingPad->getParent()->getUniquePredecessor();
5671
5672 // Landingpad relocates should have only one predecessor with invoke
5673 // statepoint terminator
5674 Check(InvokeBB, "safepoints should have unique landingpads",
5675 LandingPad->getParent());
5676 Check(InvokeBB->getTerminator(), "safepoint block should be well formed",
5677 InvokeBB);
5678 Check(isa<GCStatepointInst>(InvokeBB->getTerminator()),
5679 "gc relocate should be linked to a statepoint", InvokeBB);
5680 } else {
5681 // In all other cases relocate should be tied to the statepoint directly.
5682 // This covers relocates on a normal return path of invoke statepoint and
5683 // relocates of a call statepoint.
5684 auto *Token = Call.getArgOperand(0);
5685 Check(isa<GCStatepointInst>(Token) || isa<UndefValue>(Token),
5686 "gc relocate is incorrectly tied to the statepoint", Call, Token);
5687 }
5688
5689 // Verify rest of the relocate arguments.
5690 const Value &StatepointCall = *cast<GCRelocateInst>(Call).getStatepoint();
5691
5692 // Both the base and derived must be piped through the safepoint.
5693 Value *Base = Call.getArgOperand(1);
5694 Check(isa<ConstantInt>(Base),
5695 "gc.relocate operand #2 must be integer offset", Call);
5696
5697 Value *Derived = Call.getArgOperand(2);
5698 Check(isa<ConstantInt>(Derived),
5699 "gc.relocate operand #3 must be integer offset", Call);
5700
5701 const uint64_t BaseIndex = cast<ConstantInt>(Base)->getZExtValue();
5702 const uint64_t DerivedIndex = cast<ConstantInt>(Derived)->getZExtValue();
5703
5704 // Check the bounds
5705 if (isa<UndefValue>(StatepointCall))
5706 break;
5707 if (auto Opt = cast<GCStatepointInst>(StatepointCall)
5708 .getOperandBundle(LLVMContext::OB_gc_live)) {
5709 Check(BaseIndex < Opt->Inputs.size(),
5710 "gc.relocate: statepoint base index out of bounds", Call);
5711 Check(DerivedIndex < Opt->Inputs.size(),
5712 "gc.relocate: statepoint derived index out of bounds", Call);
5713 }
5714
5715 // Relocated value must be either a pointer type or vector-of-pointer type,
5716 // but gc_relocate does not need to return the same pointer type as the
5717 // relocated pointer. It can be casted to the correct type later if it's
5718 // desired. However, they must have the same address space and 'vectorness'
5719 GCRelocateInst &Relocate = cast<GCRelocateInst>(Call);
5720 auto *ResultType = Call.getType();
5721 auto *DerivedType = Relocate.getDerivedPtr()->getType();
5722 auto *BaseType = Relocate.getBasePtr()->getType();
5723
5724 Check(BaseType->isPtrOrPtrVectorTy(),
5725 "gc.relocate: relocated value must be a pointer", Call);
5726 Check(DerivedType->isPtrOrPtrVectorTy(),
5727 "gc.relocate: relocated value must be a pointer", Call);
5728
5729 Check(ResultType->isVectorTy() == DerivedType->isVectorTy(),
5730 "gc.relocate: vector relocates to vector and pointer to pointer",
5731 Call);
5732 Check(
5733 ResultType->getPointerAddressSpace() ==
5734 DerivedType->getPointerAddressSpace(),
5735 "gc.relocate: relocating a pointer shouldn't change its address space",
5736 Call);
5737
5738 auto GC = llvm::getGCStrategy(Relocate.getFunction()->getGC());
5739 Check(GC, "gc.relocate: calling function must have GCStrategy",
5740 Call.getFunction());
5741 if (GC) {
5742 auto isGCPtr = [&GC](Type *PTy) {
5743 return GC->isGCManagedPointer(PTy->getScalarType()).value_or(true);
5744 };
5745 Check(isGCPtr(ResultType), "gc.relocate: must return gc pointer", Call);
5746 Check(isGCPtr(BaseType),
5747 "gc.relocate: relocated value must be a gc pointer", Call);
5748 Check(isGCPtr(DerivedType),
5749 "gc.relocate: relocated value must be a gc pointer", Call);
5750 }
5751 break;
5752 }
5753 case Intrinsic::experimental_patchpoint: {
5754 if (Call.getCallingConv() == CallingConv::AnyReg) {
5755 Check(Call.getType()->isSingleValueType(),
5756 "patchpoint: invalid return type used with anyregcc", Call);
5757 }
5758 break;
5759 }
5760 case Intrinsic::eh_exceptioncode:
5761 case Intrinsic::eh_exceptionpointer: {
5762 Check(isa<CatchPadInst>(Call.getArgOperand(0)),
5763 "eh.exceptionpointer argument must be a catchpad", Call);
5764 break;
5765 }
5766 case Intrinsic::get_active_lane_mask: {
5767 Check(Call.getType()->isVectorTy(),
5768 "get_active_lane_mask: must return a "
5769 "vector",
5770 Call);
5771 auto *ElemTy = Call.getType()->getScalarType();
5772 Check(ElemTy->isIntegerTy(1),
5773 "get_active_lane_mask: element type is not "
5774 "i1",
5775 Call);
5776 break;
5777 }
5778 case Intrinsic::experimental_get_vector_length: {
5779 ConstantInt *VF = cast<ConstantInt>(Call.getArgOperand(1));
5780 Check(!VF->isNegative() && !VF->isZero(),
5781 "get_vector_length: VF must be positive", Call);
5782 break;
5783 }
5784 case Intrinsic::masked_load: {
5785 Check(Call.getType()->isVectorTy(), "masked_load: must return a vector",
5786 Call);
5787
5788 ConstantInt *Alignment = cast<ConstantInt>(Call.getArgOperand(1));
5789 Value *Mask = Call.getArgOperand(2);
5790 Value *PassThru = Call.getArgOperand(3);
5791 Check(Mask->getType()->isVectorTy(), "masked_load: mask must be vector",
5792 Call);
5793 Check(Alignment->getValue().isPowerOf2(),
5794 "masked_load: alignment must be a power of 2", Call);
5795 Check(PassThru->getType() == Call.getType(),
5796 "masked_load: pass through and return type must match", Call);
5797 Check(cast<VectorType>(Mask->getType())->getElementCount() ==
5798 cast<VectorType>(Call.getType())->getElementCount(),
5799 "masked_load: vector mask must be same length as return", Call);
5800 break;
5801 }
5802 case Intrinsic::masked_store: {
5803 Value *Val = Call.getArgOperand(0);
5804 ConstantInt *Alignment = cast<ConstantInt>(Call.getArgOperand(2));
5805 Value *Mask = Call.getArgOperand(3);
5806 Check(Mask->getType()->isVectorTy(), "masked_store: mask must be vector",
5807 Call);
5808 Check(Alignment->getValue().isPowerOf2(),
5809 "masked_store: alignment must be a power of 2", Call);
5810 Check(cast<VectorType>(Mask->getType())->getElementCount() ==
5811 cast<VectorType>(Val->getType())->getElementCount(),
5812 "masked_store: vector mask must be same length as value", Call);
5813 break;
5814 }
5815
5816 case Intrinsic::masked_gather: {
5817 const APInt &Alignment =
5818 cast<ConstantInt>(Call.getArgOperand(1))->getValue();
5819 Check(Alignment.isZero() || Alignment.isPowerOf2(),
5820 "masked_gather: alignment must be 0 or a power of 2", Call);
5821 break;
5822 }
5823 case Intrinsic::masked_scatter: {
5824 const APInt &Alignment =
5825 cast<ConstantInt>(Call.getArgOperand(2))->getValue();
5826 Check(Alignment.isZero() || Alignment.isPowerOf2(),
5827 "masked_scatter: alignment must be 0 or a power of 2", Call);
5828 break;
5829 }
5830
5831 case Intrinsic::experimental_guard: {
5832 Check(isa<CallInst>(Call), "experimental_guard cannot be invoked", Call);
5833 Check(Call.countOperandBundlesOfType(LLVMContext::OB_deopt) == 1,
5834 "experimental_guard must have exactly one "
5835 "\"deopt\" operand bundle");
5836 break;
5837 }
5838
5839 case Intrinsic::experimental_deoptimize: {
5840 Check(isa<CallInst>(Call), "experimental_deoptimize cannot be invoked",
5841 Call);
5842 Check(Call.countOperandBundlesOfType(LLVMContext::OB_deopt) == 1,
5843 "experimental_deoptimize must have exactly one "
5844 "\"deopt\" operand bundle");
5845 Check(Call.getType() == Call.getFunction()->getReturnType(),
5846 "experimental_deoptimize return type must match caller return type");
5847
5848 if (isa<CallInst>(Call)) {
5849 auto *RI = dyn_cast<ReturnInst>(Call.getNextNode());
5850 Check(RI,
5851 "calls to experimental_deoptimize must be followed by a return");
5852
5853 if (!Call.getType()->isVoidTy() && RI)
5854 Check(RI->getReturnValue() == &Call,
5855 "calls to experimental_deoptimize must be followed by a return "
5856 "of the value computed by experimental_deoptimize");
5857 }
5858
5859 break;
5860 }
5861 case Intrinsic::vastart: {
5862 Check(Call.getFunction()->isVarArg(),
5863 "va_start called in a non-varargs function");
5864 break;
5865 }
5866 case Intrinsic::vector_reduce_and:
5867 case Intrinsic::vector_reduce_or:
5868 case Intrinsic::vector_reduce_xor:
5869 case Intrinsic::vector_reduce_add:
5870 case Intrinsic::vector_reduce_mul:
5871 case Intrinsic::vector_reduce_smax:
5872 case Intrinsic::vector_reduce_smin:
5873 case Intrinsic::vector_reduce_umax:
5874 case Intrinsic::vector_reduce_umin: {
5875 Type *ArgTy = Call.getArgOperand(0)->getType();
5876 Check(ArgTy->isIntOrIntVectorTy() && ArgTy->isVectorTy(),
5877 "Intrinsic has incorrect argument type!");
5878 break;
5879 }
5880 case Intrinsic::vector_reduce_fmax:
5881 case Intrinsic::vector_reduce_fmin: {
5882 Type *ArgTy = Call.getArgOperand(0)->getType();
5883 Check(ArgTy->isFPOrFPVectorTy() && ArgTy->isVectorTy(),
5884 "Intrinsic has incorrect argument type!");
5885 break;
5886 }
5887 case Intrinsic::vector_reduce_fadd:
5888 case Intrinsic::vector_reduce_fmul: {
5889 // Unlike the other reductions, the first argument is a start value. The
5890 // second argument is the vector to be reduced.
5891 Type *ArgTy = Call.getArgOperand(1)->getType();
5892 Check(ArgTy->isFPOrFPVectorTy() && ArgTy->isVectorTy(),
5893 "Intrinsic has incorrect argument type!");
5894 break;
5895 }
5896 case Intrinsic::smul_fix:
5897 case Intrinsic::smul_fix_sat:
5898 case Intrinsic::umul_fix:
5899 case Intrinsic::umul_fix_sat:
5900 case Intrinsic::sdiv_fix:
5901 case Intrinsic::sdiv_fix_sat:
5902 case Intrinsic::udiv_fix:
5903 case Intrinsic::udiv_fix_sat: {
5904 Value *Op1 = Call.getArgOperand(0);
5905 Value *Op2 = Call.getArgOperand(1);
5907 "first operand of [us][mul|div]_fix[_sat] must be an int type or "
5908 "vector of ints");
5910 "second operand of [us][mul|div]_fix[_sat] must be an int type or "
5911 "vector of ints");
5912
5913 auto *Op3 = cast<ConstantInt>(Call.getArgOperand(2));
5914 Check(Op3->getType()->isIntegerTy(),
5915 "third operand of [us][mul|div]_fix[_sat] must be an int type");
5916 Check(Op3->getBitWidth() <= 32,
5917 "third operand of [us][mul|div]_fix[_sat] must fit within 32 bits");
5918
5919 if (ID == Intrinsic::smul_fix || ID == Intrinsic::smul_fix_sat ||
5920 ID == Intrinsic::sdiv_fix || ID == Intrinsic::sdiv_fix_sat) {
5921 Check(Op3->getZExtValue() < Op1->getType()->getScalarSizeInBits(),
5922 "the scale of s[mul|div]_fix[_sat] must be less than the width of "
5923 "the operands");
5924 } else {
5925 Check(Op3->getZExtValue() <= Op1->getType()->getScalarSizeInBits(),
5926 "the scale of u[mul|div]_fix[_sat] must be less than or equal "
5927 "to the width of the operands");
5928 }
5929 break;
5930 }
5931 case Intrinsic::lrint:
5932 case Intrinsic::llrint: {
5933 Type *ValTy = Call.getArgOperand(0)->getType();
5934 Type *ResultTy = Call.getType();
5935 Check(
5936 ValTy->isFPOrFPVectorTy() && ResultTy->isIntOrIntVectorTy(),
5937 "llvm.lrint, llvm.llrint: argument must be floating-point or vector "
5938 "of floating-points, and result must be integer or vector of integers",
5939 &Call);
5940 Check(ValTy->isVectorTy() == ResultTy->isVectorTy(),
5941 "llvm.lrint, llvm.llrint: argument and result disagree on vector use",
5942 &Call);
5943 if (ValTy->isVectorTy()) {
5944 Check(cast<VectorType>(ValTy)->getElementCount() ==
5945 cast<VectorType>(ResultTy)->getElementCount(),
5946 "llvm.lrint, llvm.llrint: argument must be same length as result",
5947 &Call);
5948 }
5949 break;
5950 }
5951 case Intrinsic::lround:
5952 case Intrinsic::llround: {
5953 Type *ValTy = Call.getArgOperand(0)->getType();
5954 Type *ResultTy = Call.getType();
5955 Check(!ValTy->isVectorTy() && !ResultTy->isVectorTy(),
5956 "Intrinsic does not support vectors", &Call);
5957 break;
5958 }
5959 case Intrinsic::bswap: {
5960 Type *Ty = Call.getType();
5961 unsigned Size = Ty->getScalarSizeInBits();
5962 Check(Size % 16 == 0, "bswap must be an even number of bytes", &Call);
5963 break;
5964 }
5965 case Intrinsic::invariant_start: {
5966 ConstantInt *InvariantSize = dyn_cast<ConstantInt>(Call.getArgOperand(0));
5967 Check(InvariantSize &&
5968 (!InvariantSize->isNegative() || InvariantSize->isMinusOne()),
5969 "invariant_start parameter must be -1, 0 or a positive number",
5970 &Call);
5971 break;
5972 }
5973 case Intrinsic::matrix_multiply:
5974 case Intrinsic::matrix_transpose:
5975 case Intrinsic::matrix_column_major_load:
5976 case Intrinsic::matrix_column_major_store: {
5977 Function *IF = Call.getCalledFunction();
5978 ConstantInt *Stride = nullptr;
5979 ConstantInt *NumRows;
5980 ConstantInt *NumColumns;
5981 VectorType *ResultTy;
5982 Type *Op0ElemTy = nullptr;
5983 Type *Op1ElemTy = nullptr;
5984 switch (ID) {
5985 case Intrinsic::matrix_multiply: {
5986 NumRows = cast<ConstantInt>(Call.getArgOperand(2));
5987 ConstantInt *N = cast<ConstantInt>(Call.getArgOperand(3));
5988 NumColumns = cast<ConstantInt>(Call.getArgOperand(4));
5989 Check(cast<FixedVectorType>(Call.getArgOperand(0)->getType())
5990 ->getNumElements() ==
5991 NumRows->getZExtValue() * N->getZExtValue(),
5992 "First argument of a matrix operation does not match specified "
5993 "shape!");
5994 Check(cast<FixedVectorType>(Call.getArgOperand(1)->getType())
5995 ->getNumElements() ==
5996 N->getZExtValue() * NumColumns->getZExtValue(),
5997 "Second argument of a matrix operation does not match specified "
5998 "shape!");
5999
6000 ResultTy = cast<VectorType>(Call.getType());
6001 Op0ElemTy =
6002 cast<VectorType>(Call.getArgOperand(0)->getType())->getElementType();
6003 Op1ElemTy =
6004 cast<VectorType>(Call.getArgOperand(1)->getType())->getElementType();
6005 break;
6006 }
6007 case Intrinsic::matrix_transpose:
6008 NumRows = cast<ConstantInt>(Call.getArgOperand(1));
6009 NumColumns = cast<ConstantInt>(Call.getArgOperand(2));
6010 ResultTy = cast<VectorType>(Call.getType());
6011 Op0ElemTy =
6012 cast<VectorType>(Call.getArgOperand(0)->getType())->getElementType();
6013 break;
6014 case Intrinsic::matrix_column_major_load: {
6015 Stride = dyn_cast<ConstantInt>(Call.getArgOperand(1));
6016 NumRows = cast<ConstantInt>(Call.getArgOperand(3));
6017 NumColumns = cast<ConstantInt>(Call.getArgOperand(4));
6018 ResultTy = cast<VectorType>(Call.getType());
6019 break;
6020 }
6021 case Intrinsic::matrix_column_major_store: {
6022 Stride = dyn_cast<ConstantInt>(Call.getArgOperand(2));
6023 NumRows = cast<ConstantInt>(Call.getArgOperand(4));
6024 NumColumns = cast<ConstantInt>(Call.getArgOperand(5));
6025 ResultTy = cast<VectorType>(Call.getArgOperand(0)->getType());
6026 Op0ElemTy =
6027 cast<VectorType>(Call.getArgOperand(0)->getType())->getElementType();
6028 break;
6029 }
6030 default:
6031 llvm_unreachable("unexpected intrinsic");
6032 }
6033
6034 Check(ResultTy->getElementType()->isIntegerTy() ||
6035 ResultTy->getElementType()->isFloatingPointTy(),
6036 "Result type must be an integer or floating-point type!", IF);
6037
6038 if (Op0ElemTy)
6039 Check(ResultTy->getElementType() == Op0ElemTy,
6040 "Vector element type mismatch of the result and first operand "
6041 "vector!",
6042 IF);
6043
6044 if (Op1ElemTy)
6045 Check(ResultTy->getElementType() == Op1ElemTy,
6046 "Vector element type mismatch of the result and second operand "
6047 "vector!",
6048 IF);
6049
6050 Check(cast<FixedVectorType>(ResultTy)->getNumElements() ==
6051 NumRows->getZExtValue() * NumColumns->getZExtValue(),
6052 "Result of a matrix operation does not fit in the returned vector!");
6053
6054 if (Stride)
6055 Check(Stride->getZExtValue() >= NumRows->getZExtValue(),
6056 "Stride must be greater or equal than the number of rows!", IF);
6057
6058 break;
6059 }
6060 case Intrinsic::vector_splice: {
6061 VectorType *VecTy = cast<VectorType>(Call.getType());
6062 int64_t Idx = cast<ConstantInt>(Call.getArgOperand(2))->getSExtValue();
6063 int64_t KnownMinNumElements = VecTy->getElementCount().getKnownMinValue();
6064 if (Call.getParent() && Call.getParent()->getParent()) {
6065 AttributeList Attrs = Call.getParent()->getParent()->getAttributes();
6066 if (Attrs.hasFnAttr(Attribute::VScaleRange))
6067 KnownMinNumElements *= Attrs.getFnAttrs().getVScaleRangeMin();
6068 }
6069 Check((Idx < 0 && std::abs(Idx) <= KnownMinNumElements) ||
6070 (Idx >= 0 && Idx < KnownMinNumElements),
6071 "The splice index exceeds the range [-VL, VL-1] where VL is the "
6072 "known minimum number of elements in the vector. For scalable "
6073 "vectors the minimum number of elements is determined from "
6074 "vscale_range.",
6075 &Call);
6076 break;
6077 }
6078 case Intrinsic::experimental_stepvector: {
6079 VectorType *VecTy = dyn_cast<VectorType>(Call.getType());
6080 Check(VecTy && VecTy->getScalarType()->isIntegerTy() &&
6081 VecTy->getScalarSizeInBits() >= 8,
6082 "experimental_stepvector only supported for vectors of integers "
6083 "with a bitwidth of at least 8.",
6084 &Call);
6085 break;
6086 }
6087 case Intrinsic::vector_insert: {
6088 Value *Vec = Call.getArgOperand(0);
6089 Value *SubVec = Call.getArgOperand(1);
6090 Value *Idx = Call.getArgOperand(2);
6091 unsigned IdxN = cast<ConstantInt>(Idx)->getZExtValue();
6092
6093 VectorType *VecTy = cast<VectorType>(Vec->getType());
6094 VectorType *SubVecTy = cast<VectorType>(SubVec->getType());
6095
6096 ElementCount VecEC = VecTy->getElementCount();
6097 ElementCount SubVecEC = SubVecTy->getElementCount();
6098 Check(VecTy->getElementType() == SubVecTy->getElementType(),
6099 "vector_insert parameters must have the same element "
6100 "type.",
6101 &Call);
6102 Check(IdxN % SubVecEC.getKnownMinValue() == 0,
6103 "vector_insert index must be a constant multiple of "
6104 "the subvector's known minimum vector length.");
6105
6106 // If this insertion is not the 'mixed' case where a fixed vector is
6107 // inserted into a scalable vector, ensure that the insertion of the
6108 // subvector does not overrun the parent vector.
6109 if (VecEC.isScalable() == SubVecEC.isScalable()) {
6110 Check(IdxN < VecEC.getKnownMinValue() &&
6111 IdxN + SubVecEC.getKnownMinValue() <= VecEC.getKnownMinValue(),
6112 "subvector operand of vector_insert would overrun the "
6113 "vector being inserted into.");
6114 }
6115 break;
6116 }
6117 case Intrinsic::vector_extract: {
6118 Value *Vec = Call.getArgOperand(0);
6119 Value *Idx = Call.getArgOperand(1);
6120 unsigned IdxN = cast<ConstantInt>(Idx)->getZExtValue();
6121
6122 VectorType *ResultTy = cast<VectorType>(Call.getType());
6123 VectorType *VecTy = cast<VectorType>(Vec->getType());
6124
6125 ElementCount VecEC = VecTy->getElementCount();
6126 ElementCount ResultEC = ResultTy->getElementCount();
6127
6128 Check(ResultTy->getElementType() == VecTy->getElementType(),
6129 "vector_extract result must have the same element "
6130 "type as the input vector.",
6131 &Call);
6132 Check(IdxN % ResultEC.getKnownMinValue() == 0,
6133 "vector_extract index must be a constant multiple of "
6134 "the result type's known minimum vector length.");
6135
6136 // If this extraction is not the 'mixed' case where a fixed vector is
6137 // extracted from a scalable vector, ensure that the extraction does not
6138 // overrun the parent vector.
6139 if (VecEC.isScalable() == ResultEC.isScalable()) {
6140 Check(IdxN < VecEC.getKnownMinValue() &&
6141 IdxN + ResultEC.getKnownMinValue() <= VecEC.getKnownMinValue(),
6142 "vector_extract would overrun.");
6143 }
6144 break;
6145 }
6146 case Intrinsic::experimental_noalias_scope_decl: {
6147 NoAliasScopeDecls.push_back(cast<IntrinsicInst>(&Call));
6148 break;
6149 }
6150 case Intrinsic::preserve_array_access_index:
6151 case Intrinsic::preserve_struct_access_index:
6152 case Intrinsic::aarch64_ldaxr:
6153 case Intrinsic::aarch64_ldxr:
6154 case Intrinsic::arm_ldaex:
6155 case Intrinsic::arm_ldrex: {
6156 Type *ElemTy = Call.getParamElementType(0);
6157 Check(ElemTy, "Intrinsic requires elementtype attribute on first argument.",
6158 &Call);
6159 break;
6160 }
6161 case Intrinsic::aarch64_stlxr:
6162 case Intrinsic::aarch64_stxr:
6163 case Intrinsic::arm_stlex:
6164 case Intrinsic::arm_strex: {
6165 Type *ElemTy = Call.getAttributes().getParamElementType(1);
6166 Check(ElemTy,
6167 "Intrinsic requires elementtype attribute on second argument.",
6168 &Call);
6169 break;
6170 }
6171 case Intrinsic::aarch64_prefetch: {
6172 Check(cast<ConstantInt>(Call.getArgOperand(1))->getZExtValue() < 2,
6173 "write argument to llvm.aarch64.prefetch must be 0 or 1", Call);
6174 Check(cast<ConstantInt>(Call.getArgOperand(2))->getZExtValue() < 4,
6175 "target argument to llvm.aarch64.prefetch must be 0-3", Call);
6176 Check(cast<ConstantInt>(Call.getArgOperand(3))->getZExtValue() < 2,
6177 "stream argument to llvm.aarch64.prefetch must be 0 or 1", Call);
6178 Check(cast<ConstantInt>(Call.getArgOperand(4))->getZExtValue() < 2,
6179 "isdata argument to llvm.aarch64.prefetch must be 0 or 1", Call);
6180 break;
6181 }
6182 case Intrinsic::callbr_landingpad: {
6183 const auto *CBR = dyn_cast<CallBrInst>(Call.getOperand(0));
6184 Check(CBR, "intrinstic requires callbr operand", &Call);
6185 if (!CBR)
6186 break;
6187
6188 const BasicBlock *LandingPadBB = Call.getParent();
6189 const BasicBlock *PredBB = LandingPadBB->getUniquePredecessor();
6190 if (!PredBB) {
6191 CheckFailed("Intrinsic in block must have 1 unique predecessor", &Call);
6192 break;
6193 }
6194 if (!isa<CallBrInst>(PredBB->getTerminator())) {
6195 CheckFailed("Intrinsic must have corresponding callbr in predecessor",
6196 &Call);
6197 break;
6198 }
6199 Check(llvm::any_of(CBR->getIndirectDests(),
6200 [LandingPadBB](const BasicBlock *IndDest) {
6201 return IndDest == LandingPadBB;
6202 }),
6203 "Intrinsic's corresponding callbr must have intrinsic's parent basic "
6204 "block in indirect destination list",
6205 &Call);
6206 const Instruction &First = *LandingPadBB->begin();
6207 Check(&First == &Call, "No other instructions may proceed intrinsic",
6208 &Call);
6209 break;
6210 }
6211 case Intrinsic::amdgcn_cs_chain: {
6212 auto CallerCC = Call.getCaller()->getCallingConv();
6213 switch (CallerCC) {
6217 break;
6218 default:
6219 CheckFailed("Intrinsic can only be used from functions with the "
6220 "amdgpu_cs, amdgpu_cs_chain or amdgpu_cs_chain_preserve "
6221 "calling conventions",
6222 &Call);
6223 break;
6224 }
6225
6226 Check(Call.paramHasAttr(2, Attribute::InReg),
6227 "SGPR arguments must have the `inreg` attribute", &Call);
6228 Check(!Call.paramHasAttr(3, Attribute::InReg),
6229 "VGPR arguments must not have the `inreg` attribute", &Call);
6230 break;
6231 }
6232 case Intrinsic::amdgcn_set_inactive_chain_arg: {
6233 auto CallerCC = Call.getCaller()->getCallingConv();
6234 switch (CallerCC) {
6237 break;
6238 default:
6239 CheckFailed("Intrinsic can only be used from functions with the "
6240 "amdgpu_cs_chain or amdgpu_cs_chain_preserve "
6241 "calling conventions",
6242 &Call);
6243 break;
6244 }
6245
6246 unsigned InactiveIdx = 1;
6247 Check(!Call.paramHasAttr(InactiveIdx, Attribute::InReg),
6248 "Value for inactive lanes must not have the `inreg` attribute",
6249 &Call);
6250 Check(isa<Argument>(Call.getArgOperand(InactiveIdx)),
6251 "Value for inactive lanes must be a function argument", &Call);
6252 Check(!cast<Argument>(Call.getArgOperand(InactiveIdx))->hasInRegAttr(),
6253 "Value for inactive lanes must be a VGPR function argument", &Call);
6254 break;
6255 }
6256 case Intrinsic::nvvm_setmaxnreg_inc_sync_aligned_u32:
6257 case Intrinsic::nvvm_setmaxnreg_dec_sync_aligned_u32: {
6258 Value *V = Call.getArgOperand(0);
6259 unsigned RegCount = cast<ConstantInt>(V)->getZExtValue();
6260 Check(RegCount % 8 == 0,
6261 "reg_count argument to nvvm.setmaxnreg must be in multiples of 8");
6262 Check((RegCount >= 24 && RegCount <= 256),
6263 "reg_count argument to nvvm.setmaxnreg must be within [24, 256]");
6264 break;
6265 }
6266 case Intrinsic::experimental_convergence_entry:
6267 case Intrinsic::experimental_convergence_anchor:
6268 break;
6269 case Intrinsic::experimental_convergence_loop:
6270 break;
6271 case Intrinsic::ptrmask: {
6272 Type *Ty0 = Call.getArgOperand(0)->getType();
6273 Type *Ty1 = Call.getArgOperand(1)->getType();
6275 "llvm.ptrmask intrinsic first argument must be pointer or vector "
6276 "of pointers",
6277 &Call);
6278 Check(
6279 Ty0->isVectorTy() == Ty1->isVectorTy(),
6280 "llvm.ptrmask intrinsic arguments must be both scalars or both vectors",
6281 &Call);
6282 if (Ty0->isVectorTy())
6283 Check(cast<VectorType>(Ty0)->getElementCount() ==
6284 cast<VectorType>(Ty1)->getElementCount(),
6285 "llvm.ptrmask intrinsic arguments must have the same number of "
6286 "elements",
6287 &Call);
6288 Check(DL.getIndexTypeSizeInBits(Ty0) == Ty1->getScalarSizeInBits(),
6289 "llvm.ptrmask intrinsic second argument bitwidth must match "
6290 "pointer index type size of first argument",
6291 &Call);
6292 break;
6293 }
6294 case Intrinsic::threadlocal_address: {
6295 const Value &Arg0 = *Call.getArgOperand(0);
6296 Check(isa<GlobalValue>(Arg0),
6297 "llvm.threadlocal.address first argument must be a GlobalValue");
6298 Check(cast<GlobalValue>(Arg0).isThreadLocal(),
6299 "llvm.threadlocal.address operand isThreadLocal() must be true");
6300 break;
6301 }
6302 };
6303
6304 // Verify that there aren't any unmediated control transfers between funclets.
6306 Function *F = Call.getParent()->getParent();
6307 if (F->hasPersonalityFn() &&
6308 isScopedEHPersonality(classifyEHPersonality(F->getPersonalityFn()))) {
6309 // Run EH funclet coloring on-demand and cache results for other intrinsic
6310 // calls in this function
6311 if (BlockEHFuncletColors.empty())
6312 BlockEHFuncletColors = colorEHFunclets(*F);
6313
6314 // Check for catch-/cleanup-pad in first funclet block
6315 bool InEHFunclet = false;
6316 BasicBlock *CallBB = Call.getParent();
6317 const ColorVector &CV = BlockEHFuncletColors.find(CallBB)->second;
6318 assert(CV.size() > 0 && "Uncolored block");
6319 for (BasicBlock *ColorFirstBB : CV)
6320 if (dyn_cast_or_null<FuncletPadInst>(ColorFirstBB->getFirstNonPHI()))
6321 InEHFunclet = true;
6322
6323 // Check for funclet operand bundle
6324 bool HasToken = false;
6325 for (unsigned I = 0, E = Call.getNumOperandBundles(); I != E; ++I)
6326 if (Call.getOperandBundleAt(I).getTagID() == LLVMContext::OB_funclet)
6327 HasToken = true;
6328
6329 // This would cause silent code truncation in WinEHPrepare
6330 if (InEHFunclet)
6331 Check(HasToken, "Missing funclet token on intrinsic call", &Call);
6332 }
6333 }
6334}
6335
6336/// Carefully grab the subprogram from a local scope.
6337///
6338/// This carefully grabs the subprogram from a local scope, avoiding the
6339/// built-in assertions that would typically fire.
6341 if (!LocalScope)
6342 return nullptr;
6343
6344 if (auto *SP = dyn_cast<DISubprogram>(LocalScope))
6345 return SP;
6346
6347 if (auto *LB = dyn_cast<DILexicalBlockBase>(LocalScope))
6348 return getSubprogram(LB->getRawScope());
6349
6350 // Just return null; broken scope chains are checked elsewhere.
6351 assert(!isa<DILocalScope>(LocalScope) && "Unknown type of local scope");
6352 return nullptr;
6353}
6354
6355void Verifier::visit(DbgLabelRecord &DLR) {
6356 CheckDI(isa<DILabel>(DLR.getRawLabel()),
6357 "invalid #dbg_label intrinsic variable", &DLR, DLR.getRawLabel());
6358
6359 // Ignore broken !dbg attachments; they're checked elsewhere.
6360 if (MDNode *N = DLR.getDebugLoc().getAsMDNode())
6361 if (!isa<DILocation>(N))
6362 return;
6363
6364 BasicBlock *BB = DLR.getParent();
6365 Function *F = BB ? BB->getParent() : nullptr;
6366
6367 // The scopes for variables and !dbg attachments must agree.
6368 DILabel *Label = DLR.getLabel();
6369 DILocation *Loc = DLR.getDebugLoc();
6370 CheckDI(Loc, "#dbg_label record requires a !dbg attachment", &DLR, BB, F);
6371
6372 DISubprogram *LabelSP = getSubprogram(Label->getRawScope());
6373 DISubprogram *LocSP = getSubprogram(Loc->getRawScope());
6374 if (!LabelSP || !LocSP)
6375 return;
6376
6377 CheckDI(LabelSP == LocSP,
6378 "mismatched subprogram between #dbg_label label and !dbg attachment",
6379 &DLR, BB, F, Label, Label->getScope()->getSubprogram(), Loc,
6380 Loc->getScope()->getSubprogram());
6381}
6382
6383void Verifier::visit(DbgVariableRecord &DVR) {
6384 BasicBlock *BB = DVR.getParent();
6385 Function *F = BB->getParent();
6386
6390 "invalid #dbg record type", &DVR, DVR.getType());
6391
6392 // The location for a DbgVariableRecord must be either a ValueAsMetadata,
6393 // DIArgList, or an empty MDNode (which is a legacy representation for an
6394 // "undef" location).
6395 auto *MD = DVR.getRawLocation();
6396 CheckDI(MD && (isa<ValueAsMetadata>(MD) || isa<DIArgList>(MD) ||
6397 (isa<MDNode>(MD) && !cast<MDNode>(MD)->getNumOperands())),
6398 "invalid #dbg record address/value", &DVR, MD);
6399 if (auto *VAM = dyn_cast<ValueAsMetadata>(MD))
6400 visitValueAsMetadata(*VAM, F);
6401 else if (auto *AL = dyn_cast<DIArgList>(MD))
6402 visitDIArgList(*AL, F);
6403
6404 CheckDI(isa_and_nonnull<DILocalVariable>(DVR.getRawVariable()),
6405 "invalid #dbg record variable", &DVR, DVR.getRawVariable());
6406 visitMDNode(*DVR.getRawVariable(), AreDebugLocsAllowed::No);
6407
6408 CheckDI(isa_and_nonnull<DIExpression>(DVR.getRawExpression()),
6409 "invalid #dbg record expression", &DVR, DVR.getRawExpression());
6410 visitMDNode(*DVR.getExpression(), AreDebugLocsAllowed::No);
6411
6412 if (DVR.isDbgAssign()) {
6413 CheckDI(isa_and_nonnull<DIAssignID>(DVR.getRawAssignID()),
6414 "invalid #dbg_assign DIAssignID", &DVR, DVR.getRawAssignID());
6415 visitMDNode(*cast<DIAssignID>(DVR.getRawAssignID()),
6416 AreDebugLocsAllowed::No);
6417
6418 const auto *RawAddr = DVR.getRawAddress();
6419 // Similarly to the location above, the address for an assign
6420 // DbgVariableRecord must be a ValueAsMetadata or an empty MDNode, which
6421 // represents an undef address.
6422 CheckDI(
6423 isa<ValueAsMetadata>(RawAddr) ||
6424 (isa<MDNode>(RawAddr) && !cast<MDNode>(RawAddr)->getNumOperands()),
6425 "invalid #dbg_assign address", &DVR, DVR.getRawAddress());
6426 if (auto *VAM = dyn_cast<ValueAsMetadata>(RawAddr))
6427 visitValueAsMetadata(*VAM, F);
6428
6429 CheckDI(isa_and_nonnull<DIExpression>(DVR.getRawAddressExpression()),
6430 "invalid #dbg_assign address expression", &DVR,
6432 visitMDNode(*DVR.getAddressExpression(), AreDebugLocsAllowed::No);
6433
6434 // All of the linked instructions should be in the same function as DVR.
6435 for (Instruction *I : at::getAssignmentInsts(&DVR))
6436 CheckDI(DVR.getFunction() == I->getFunction(),
6437 "inst not in same function as #dbg_assign", I, &DVR);
6438 }
6439
6440 // This check is redundant with one in visitLocalVariable().
6441 DILocalVariable *Var = DVR.getVariable();
6442 CheckDI(isType(Var->getRawType()), "invalid type ref", Var,
6443 Var->getRawType());
6444
6445 auto *DLNode = DVR.getDebugLoc().getAsMDNode();
6446 CheckDI(isa_and_nonnull<DILocation>(DLNode), "invalid #dbg record DILocation",
6447 &DVR, DLNode);
6448 DILocation *Loc = DVR.getDebugLoc();
6449
6450 // The scopes for variables and !dbg attachments must agree.
6451 DISubprogram *VarSP = getSubprogram(Var->getRawScope());
6452 DISubprogram *LocSP = getSubprogram(Loc->getRawScope());
6453 if (!VarSP || !LocSP)
6454 return; // Broken scope chains are checked elsewhere.
6455
6456 CheckDI(VarSP == LocSP,
6457 "mismatched subprogram between #dbg record variable and DILocation",
6458 &DVR, BB, F, Var, Var->getScope()->getSubprogram(), Loc,
6459 Loc->getScope()->getSubprogram());
6460
6461 verifyFnArgs(DVR);
6462}
6463
6464void Verifier::visitVPIntrinsic(VPIntrinsic &VPI) {
6465 if (auto *VPCast = dyn_cast<VPCastIntrinsic>(&VPI)) {
6466 auto *RetTy = cast<VectorType>(VPCast->getType());
6467 auto *ValTy = cast<VectorType>(VPCast->getOperand(0)->getType());
6468 Check(RetTy->getElementCount() == ValTy->getElementCount(),
6469 "VP cast intrinsic first argument and result vector lengths must be "
6470 "equal",
6471 *VPCast);
6472
6473 switch (VPCast->getIntrinsicID()) {
6474 default:
6475 llvm_unreachable("Unknown VP cast intrinsic");
6476 case Intrinsic::vp_trunc:
6477 Check(RetTy->isIntOrIntVectorTy() && ValTy->isIntOrIntVectorTy(),
6478 "llvm.vp.trunc intrinsic first argument and result element type "
6479 "must be integer",
6480 *VPCast);
6481 Check(RetTy->getScalarSizeInBits() < ValTy->getScalarSizeInBits(),
6482 "llvm.vp.trunc intrinsic the bit size of first argument must be "
6483 "larger than the bit size of the return type",
6484 *VPCast);
6485 break;
6486 case Intrinsic::vp_zext:
6487 case Intrinsic::vp_sext:
6488 Check(RetTy->isIntOrIntVectorTy() && ValTy->isIntOrIntVectorTy(),
6489 "llvm.vp.zext or llvm.vp.sext intrinsic first argument and result "
6490 "element type must be integer",
6491 *VPCast);
6492 Check(RetTy->getScalarSizeInBits() > ValTy->getScalarSizeInBits(),
6493 "llvm.vp.zext or llvm.vp.sext intrinsic the bit size of first "
6494 "argument must be smaller than the bit size of the return type",
6495 *VPCast);
6496 break;
6497 case Intrinsic::vp_fptoui:
6498 case Intrinsic::vp_fptosi:
6499 case Intrinsic::vp_lrint:
6500 case Intrinsic::vp_llrint:
6501 Check(
6502 RetTy->isIntOrIntVectorTy() && ValTy->isFPOrFPVectorTy(),
6503 "llvm.vp.fptoui, llvm.vp.fptosi, llvm.vp.lrint or llvm.vp.llrint" "intrinsic first argument element "
6504 "type must be floating-point and result element type must be integer",
6505 *VPCast);
6506 break;
6507 case Intrinsic::vp_uitofp:
6508 case Intrinsic::vp_sitofp:
6509 Check(
6510 RetTy->isFPOrFPVectorTy() && ValTy->isIntOrIntVectorTy(),
6511 "llvm.vp.uitofp or llvm.vp.sitofp intrinsic first argument element "
6512 "type must be integer and result element type must be floating-point",
6513 *VPCast);
6514 break;
6515 case Intrinsic::vp_fptrunc:
6516 Check(RetTy->isFPOrFPVectorTy() && ValTy->isFPOrFPVectorTy(),
6517 "llvm.vp.fptrunc intrinsic first argument and result element type "
6518 "must be floating-point",
6519 *VPCast);
6520 Check(RetTy->getScalarSizeInBits() < ValTy->getScalarSizeInBits(),
6521 "llvm.vp.fptrunc intrinsic the bit size of first argument must be "
6522 "larger than the bit size of the return type",
6523 *VPCast);
6524 break;
6525 case Intrinsic::vp_fpext:
6526 Check(RetTy->isFPOrFPVectorTy() && ValTy->isFPOrFPVectorTy(),
6527 "llvm.vp.fpext intrinsic first argument and result element type "
6528 "must be floating-point",
6529 *VPCast);
6530 Check(RetTy->getScalarSizeInBits() > ValTy->getScalarSizeInBits(),
6531 "llvm.vp.fpext intrinsic the bit size of first argument must be "
6532 "smaller than the bit size of the return type",
6533 *VPCast);
6534 break;
6535 case Intrinsic::vp_ptrtoint:
6536 Check(RetTy->isIntOrIntVectorTy() && ValTy->isPtrOrPtrVectorTy(),
6537 "llvm.vp.ptrtoint intrinsic first argument element type must be "
6538 "pointer and result element type must be integer",
6539 *VPCast);
6540 break;
6541 case Intrinsic::vp_inttoptr:
6542 Check(RetTy->isPtrOrPtrVectorTy() && ValTy->isIntOrIntVectorTy(),
6543 "llvm.vp.inttoptr intrinsic first argument element type must be "
6544 "integer and result element type must be pointer",
6545 *VPCast);
6546 break;
6547 }
6548 }
6549 if (VPI.getIntrinsicID() == Intrinsic::vp_fcmp) {
6550 auto Pred = cast<VPCmpIntrinsic>(&VPI)->getPredicate();
6552 "invalid predicate for VP FP comparison intrinsic", &VPI);
6553 }
6554 if (VPI.getIntrinsicID() == Intrinsic::vp_icmp) {
6555 auto Pred = cast<VPCmpIntrinsic>(&VPI)->getPredicate();
6557 "invalid predicate for VP integer comparison intrinsic", &VPI);
6558 }
6559 if (VPI.getIntrinsicID() == Intrinsic::vp_is_fpclass) {
6560 auto TestMask = cast<ConstantInt>(VPI.getOperand(1));
6561 Check((TestMask->getZExtValue() & ~static_cast<unsigned>(fcAllFlags)) == 0,
6562 "unsupported bits for llvm.vp.is.fpclass test mask");
6563 }
6564}
6565
6566void Verifier::visitConstrainedFPIntrinsic(ConstrainedFPIntrinsic &FPI) {
6567 unsigned NumOperands = FPI.getNonMetadataArgCount();
6568 bool HasRoundingMD =
6570
6571 // Add the expected number of metadata operands.
6572 NumOperands += (1 + HasRoundingMD);
6573
6574 // Compare intrinsics carry an extra predicate metadata operand.
6575 if (isa<ConstrainedFPCmpIntrinsic>(FPI))
6576 NumOperands += 1;
6577 Check((FPI.arg_size() == NumOperands),
6578 "invalid arguments for constrained FP intrinsic", &FPI);
6579
6580 switch (FPI.getIntrinsicID()) {
6581 case Intrinsic::experimental_constrained_lrint:
6582 case Intrinsic::experimental_constrained_llrint: {
6583 Type *ValTy = FPI.getArgOperand(0)->getType();
6584 Type *ResultTy = FPI.getType();
6585 Check(!ValTy->isVectorTy() && !ResultTy->isVectorTy(),
6586 "Intrinsic does not support vectors", &FPI);
6587 break;
6588 }
6589
6590 case Intrinsic::experimental_constrained_lround:
6591 case Intrinsic::experimental_constrained_llround: {
6592 Type *ValTy = FPI.getArgOperand(0)->getType();
6593 Type *ResultTy = FPI.getType();
6594 Check(!ValTy->isVectorTy() && !ResultTy->isVectorTy(),
6595 "Intrinsic does not support vectors", &FPI);
6596 break;
6597 }
6598
6599 case Intrinsic::experimental_constrained_fcmp:
6600 case Intrinsic::experimental_constrained_fcmps: {
6601 auto Pred = cast<ConstrainedFPCmpIntrinsic>(&FPI)->getPredicate();
6603 "invalid predicate for constrained FP comparison intrinsic", &FPI);
6604 break;
6605 }
6606
6607 case Intrinsic::experimental_constrained_fptosi:
6608 case Intrinsic::experimental_constrained_fptoui: {
6609 Value *Operand = FPI.getArgOperand(0);
6610 ElementCount SrcEC;
6611 Check(Operand->getType()->isFPOrFPVectorTy(),
6612 "Intrinsic first argument must be floating point", &FPI);
6613 if (auto *OperandT = dyn_cast<VectorType>(Operand->getType())) {
6614 SrcEC = cast<VectorType>(OperandT)->getElementCount();
6615 }
6616
6617 Operand = &FPI;
6618 Check(SrcEC.isNonZero() == Operand->getType()->isVectorTy(),
6619 "Intrinsic first argument and result disagree on vector use", &FPI);
6620 Check(Operand->getType()->isIntOrIntVectorTy(),
6621 "Intrinsic result must be an integer", &FPI);
6622 if (auto *OperandT = dyn_cast<VectorType>(Operand->getType())) {
6623 Check(SrcEC == cast<VectorType>(OperandT)->getElementCount(),
6624 "Intrinsic first argument and result vector lengths must be equal",
6625 &FPI);
6626 }
6627 break;
6628 }
6629
6630 case Intrinsic::experimental_constrained_sitofp:
6631 case Intrinsic::experimental_constrained_uitofp: {
6632 Value *Operand = FPI.getArgOperand(0);
6633 ElementCount SrcEC;
6634 Check(Operand->getType()->isIntOrIntVectorTy(),
6635 "Intrinsic first argument must be integer", &FPI);
6636 if (auto *OperandT = dyn_cast<VectorType>(Operand->getType())) {
6637 SrcEC = cast<VectorType>(OperandT)->getElementCount();
6638 }
6639
6640 Operand = &FPI;
6641 Check(SrcEC.isNonZero() == Operand->getType()->isVectorTy(),
6642 "Intrinsic first argument and result disagree on vector use", &FPI);
6643 Check(Operand->getType()->isFPOrFPVectorTy(),
6644 "Intrinsic result must be a floating point", &FPI);
6645 if (auto *OperandT = dyn_cast<VectorType>(Operand->getType())) {
6646 Check(SrcEC == cast<VectorType>(OperandT)->getElementCount(),
6647 "Intrinsic first argument and result vector lengths must be equal",
6648 &FPI);
6649 }
6650 break;
6651 }
6652
6653 case Intrinsic::experimental_constrained_fptrunc:
6654 case Intrinsic::experimental_constrained_fpext: {
6655 Value *Operand = FPI.getArgOperand(0);
6656 Type *OperandTy = Operand->getType();
6657 Value *Result = &FPI;
6658 Type *ResultTy = Result->getType();
6659 Check(OperandTy->isFPOrFPVectorTy(),
6660 "Intrinsic first argument must be FP or FP vector", &FPI);
6661 Check(ResultTy->isFPOrFPVectorTy(),
6662 "Intrinsic result must be FP or FP vector", &FPI);
6663 Check(OperandTy->isVectorTy() == ResultTy->isVectorTy(),
6664 "Intrinsic first argument and result disagree on vector use", &FPI);
6665 if (OperandTy->isVectorTy()) {
6666 Check(cast<VectorType>(OperandTy)->getElementCount() ==
6667 cast<VectorType>(ResultTy)->getElementCount(),
6668 "Intrinsic first argument and result vector lengths must be equal",
6669 &FPI);
6670 }
6671 if (FPI.getIntrinsicID() == Intrinsic::experimental_constrained_fptrunc) {
6672 Check(OperandTy->getScalarSizeInBits() > ResultTy->getScalarSizeInBits(),
6673 "Intrinsic first argument's type must be larger than result type",
6674 &FPI);
6675 } else {
6676 Check(OperandTy->getScalarSizeInBits() < ResultTy->getScalarSizeInBits(),
6677 "Intrinsic first argument's type must be smaller than result type",
6678 &FPI);
6679 }
6680 break;
6681 }
6682
6683 default:
6684 break;
6685 }
6686
6687 // If a non-metadata argument is passed in a metadata slot then the
6688 // error will be caught earlier when the incorrect argument doesn't
6689 // match the specification in the intrinsic call table. Thus, no
6690 // argument type check is needed here.
6691
6692 Check(FPI.getExceptionBehavior().has_value(),
6693 "invalid exception behavior argument", &FPI);
6694 if (HasRoundingMD) {
6695 Check(FPI.getRoundingMode().has_value(), "invalid rounding mode argument",
6696 &FPI);
6697 }
6698}
6699
6700void Verifier::visitDbgIntrinsic(StringRef Kind, DbgVariableIntrinsic &DII) {
6701 auto *MD = DII.getRawLocation();
6702 CheckDI(isa<ValueAsMetadata>(MD) || isa<DIArgList>(MD) ||
6703 (isa<MDNode>(MD) && !cast<MDNode>(MD)->getNumOperands()),
6704 "invalid llvm.dbg." + Kind + " intrinsic address/value", &DII, MD);
6705 CheckDI(isa<DILocalVariable>(DII.getRawVariable()),
6706 "invalid llvm.dbg." + Kind + " intrinsic variable", &DII,
6707 DII.getRawVariable());
6708 CheckDI(isa<DIExpression>(DII.getRawExpression()),
6709 "invalid llvm.dbg." + Kind + " intrinsic expression", &DII,
6710 DII.getRawExpression());
6711
6712 if (auto *DAI = dyn_cast<DbgAssignIntrinsic>(&DII)) {
6713 CheckDI(isa<DIAssignID>(DAI->getRawAssignID()),
6714 "invalid llvm.dbg.assign intrinsic DIAssignID", &DII,
6715 DAI->getRawAssignID());
6716 const auto *RawAddr = DAI->getRawAddress();
6717 CheckDI(
6718 isa<ValueAsMetadata>(RawAddr) ||
6719 (isa<MDNode>(RawAddr) && !cast<MDNode>(RawAddr)->getNumOperands()),
6720 "invalid llvm.dbg.assign intrinsic address", &DII,
6721 DAI->getRawAddress());
6722 CheckDI(isa<DIExpression>(DAI->getRawAddressExpression()),
6723 "invalid llvm.dbg.assign intrinsic address expression", &DII,
6724 DAI->getRawAddressExpression());
6725 // All of the linked instructions should be in the same function as DII.
6727 CheckDI(DAI->getFunction() == I->getFunction(),
6728 "inst not in same function as dbg.assign", I, DAI);
6729 }
6730
6731 // Ignore broken !dbg attachments; they're checked elsewhere.
6732 if (MDNode *N = DII.getDebugLoc().getAsMDNode())
6733 if (!isa<DILocation>(N))
6734 return;
6735
6736 BasicBlock *BB = DII.getParent();
6737 Function *F = BB ? BB->getParent() : nullptr;
6738
6739 // The scopes for variables and !dbg attachments must agree.
6740 DILocalVariable *Var = DII.getVariable();
6741 DILocation *Loc = DII.getDebugLoc();
6742 CheckDI(Loc, "llvm.dbg." + Kind + " intrinsic requires a !dbg attachment",
6743 &DII, BB, F);
6744
6745 DISubprogram *VarSP = getSubprogram(Var->getRawScope());
6746 DISubprogram *LocSP = getSubprogram(Loc->getRawScope());
6747 if (!VarSP || !LocSP)
6748 return; // Broken scope chains are checked elsewhere.
6749
6750 CheckDI(VarSP == LocSP,
6751 "mismatched subprogram between llvm.dbg." + Kind +
6752 " variable and !dbg attachment",
6753 &DII, BB, F, Var, Var->getScope()->getSubprogram(), Loc,
6754 Loc->getScope()->getSubprogram());
6755
6756 // This check is redundant with one in visitLocalVariable().
6757 CheckDI(isType(Var->getRawType()), "invalid type ref", Var,
6758 Var->getRawType());
6759 verifyFnArgs(DII);
6760}
6761
6762void Verifier::visitDbgLabelIntrinsic(StringRef Kind, DbgLabelInst &DLI) {
6763 CheckDI(isa<DILabel>(DLI.getRawLabel()),
6764 "invalid llvm.dbg." + Kind + " intrinsic variable", &DLI,
6765 DLI.getRawLabel());
6766
6767 // Ignore broken !dbg attachments; they're checked elsewhere.
6768 if (MDNode *N = DLI.getDebugLoc().getAsMDNode())
6769 if (!isa<DILocation>(N))
6770 return;
6771
6772 BasicBlock *BB = DLI.getParent();
6773 Function *F = BB ? BB->getParent() : nullptr;
6774
6775 // The scopes for variables and !dbg attachments must agree.
6776 DILabel *Label = DLI.getLabel();
6777 DILocation *Loc = DLI.getDebugLoc();
6778 Check(Loc, "llvm.dbg." + Kind + " intrinsic requires a !dbg attachment", &DLI,
6779 BB, F);
6780
6781 DISubprogram *LabelSP = getSubprogram(Label->getRawScope());
6782 DISubprogram *LocSP = getSubprogram(Loc->getRawScope());
6783 if (!LabelSP || !LocSP)
6784 return;
6785
6786 CheckDI(LabelSP == LocSP,
6787 "mismatched subprogram between llvm.dbg." + Kind +
6788 " label and !dbg attachment",
6789 &DLI, BB, F, Label, Label->getScope()->getSubprogram(), Loc,
6790 Loc->getScope()->getSubprogram());
6791}
6792
6793void Verifier::verifyFragmentExpression(const DbgVariableIntrinsic &I) {
6794 DILocalVariable *V = dyn_cast_or_null<DILocalVariable>(I.getRawVariable());
6795 DIExpression *E = dyn_cast_or_null<DIExpression>(I.getRawExpression());
6796
6797 // We don't know whether this intrinsic verified correctly.
6798 if (!V || !E || !E->isValid())
6799 return;
6800
6801 // Nothing to do if this isn't a DW_OP_LLVM_fragment expression.
6802 auto Fragment = E->getFragmentInfo();
6803 if (!Fragment)
6804 return;
6805
6806 // The frontend helps out GDB by emitting the members of local anonymous
6807 // unions as artificial local variables with shared storage. When SROA splits
6808 // the storage for artificial local variables that are smaller than the entire
6809 // union, the overhang piece will be outside of the allotted space for the
6810 // variable and this check fails.
6811 // FIXME: Remove this check as soon as clang stops doing this; it hides bugs.
6812 if (V->isArtificial())
6813 return;
6814
6815 verifyFragmentExpression(*V, *Fragment, &I);
6816}
6817void Verifier::verifyFragmentExpression(const DbgVariableRecord &DVR) {
6818 DILocalVariable *V = dyn_cast_or_null<DILocalVariable>(DVR.getRawVariable());
6819 DIExpression *E = dyn_cast_or_null<DIExpression>(DVR.getRawExpression());
6820
6821 // We don't know whether this intrinsic verified correctly.
6822 if (!V || !E || !E->isValid())
6823 return;
6824
6825 // Nothing to do if this isn't a DW_OP_LLVM_fragment expression.
6826 auto Fragment = E->getFragmentInfo();
6827 if (!Fragment)
6828 return;
6829
6830 // The frontend helps out GDB by emitting the members of local anonymous
6831 // unions as artificial local variables with shared storage. When SROA splits
6832 // the storage for artificial local variables that are smaller than the entire
6833 // union, the overhang piece will be outside of the allotted space for the
6834 // variable and this check fails.
6835 // FIXME: Remove this check as soon as clang stops doing this; it hides bugs.
6836 if (V->isArtificial())
6837 return;
6838
6839 verifyFragmentExpression(*V, *Fragment, &DVR);
6840}
6841
6842template <typename ValueOrMetadata>
6843void Verifier::verifyFragmentExpression(const DIVariable &V,
6845 ValueOrMetadata *Desc) {
6846 // If there's no size, the type is broken, but that should be checked
6847 // elsewhere.
6848 auto VarSize = V.getSizeInBits();
6849 if (!VarSize)
6850 return;
6851
6852 unsigned FragSize = Fragment.SizeInBits;
6853 unsigned FragOffset = Fragment.OffsetInBits;
6854 CheckDI(FragSize + FragOffset <= *VarSize,
6855 "fragment is larger than or outside of variable", Desc, &V);
6856 CheckDI(FragSize != *VarSize, "fragment covers entire variable", Desc, &V);
6857}
6858
6859void Verifier::verifyFnArgs(const DbgVariableIntrinsic &I) {
6860 // This function does not take the scope of noninlined function arguments into
6861 // account. Don't run it if current function is nodebug, because it may
6862 // contain inlined debug intrinsics.
6863 if (!HasDebugInfo)
6864 return;
6865
6866 // For performance reasons only check non-inlined ones.
6867 if (I.getDebugLoc()->getInlinedAt())
6868 return;
6869
6870 DILocalVariable *Var = I.getVariable();
6871 CheckDI(Var, "dbg intrinsic without variable");
6872
6873 unsigned ArgNo = Var->getArg();
6874 if (!ArgNo)
6875 return;
6876
6877 // Verify there are no duplicate function argument debug info entries.
6878 // These will cause hard-to-debug assertions in the DWARF backend.
6879 if (DebugFnArgs.size() < ArgNo)
6880 DebugFnArgs.resize(ArgNo, nullptr);
6881
6882 auto *Prev = DebugFnArgs[ArgNo - 1];
6883 DebugFnArgs[ArgNo - 1] = Var;
6884 CheckDI(!Prev || (Prev == Var), "conflicting debug info for argument", &I,
6885 Prev, Var);
6886}
6887void Verifier::verifyFnArgs(const DbgVariableRecord &DVR) {
6888 // This function does not take the scope of noninlined function arguments into
6889 // account. Don't run it if current function is nodebug, because it may
6890 // contain inlined debug intrinsics.
6891 if (!HasDebugInfo)
6892 return;
6893
6894 // For performance reasons only check non-inlined ones.
6895 if (DVR.getDebugLoc()->getInlinedAt())
6896 return;
6897
6898 DILocalVariable *Var = DVR.getVariable();
6899 CheckDI(Var, "#dbg record without variable");
6900
6901 unsigned ArgNo = Var->getArg();
6902 if (!ArgNo)
6903 return;
6904
6905 // Verify there are no duplicate function argument debug info entries.
6906 // These will cause hard-to-debug assertions in the DWARF backend.
6907 if (DebugFnArgs.size() < ArgNo)
6908 DebugFnArgs.resize(ArgNo, nullptr);
6909
6910 auto *Prev = DebugFnArgs[ArgNo - 1];
6911 DebugFnArgs[ArgNo - 1] = Var;
6912 CheckDI(!Prev || (Prev == Var), "conflicting debug info for argument", &DVR,
6913 Prev, Var);
6914}
6915
6916void Verifier::verifyNotEntryValue(const DbgVariableIntrinsic &I) {
6917 DIExpression *E = dyn_cast_or_null<DIExpression>(I.getRawExpression());
6918
6919 // We don't know whether this intrinsic verified correctly.
6920 if (!E || !E->isValid())
6921 return;
6922
6923 if (isa<ValueAsMetadata>(I.getRawLocation())) {
6924 Value *VarValue = I.getVariableLocationOp(0);
6925 if (isa<UndefValue>(VarValue) || isa<PoisonValue>(VarValue))
6926 return;
6927 // We allow EntryValues for swift async arguments, as they have an
6928 // ABI-guarantee to be turned into a specific register.
6929 if (auto *ArgLoc = dyn_cast_or_null<Argument>(VarValue);
6930 ArgLoc && ArgLoc->hasAttribute(Attribute::SwiftAsync))
6931 return;
6932 }
6933
6934 CheckDI(!E->isEntryValue(),
6935 "Entry values are only allowed in MIR unless they target a "
6936 "swiftasync Argument",
6937 &I);
6938}
6939void Verifier::verifyNotEntryValue(const DbgVariableRecord &DVR) {
6940 DIExpression *E = dyn_cast_or_null<DIExpression>(DVR.getRawExpression());
6941
6942 // We don't know whether this intrinsic verified correctly.
6943 if (!E || !E->isValid())
6944 return;
6945
6946 if (isa<ValueAsMetadata>(DVR.getRawLocation())) {
6947 Value *VarValue = DVR.getVariableLocationOp(0);
6948 if (isa<UndefValue>(VarValue) || isa<PoisonValue>(VarValue))
6949 return;
6950 // We allow EntryValues for swift async arguments, as they have an
6951 // ABI-guarantee to be turned into a specific register.
6952 if (auto *ArgLoc = dyn_cast_or_null<Argument>(VarValue);
6953 ArgLoc && ArgLoc->hasAttribute(Attribute::SwiftAsync))
6954 return;
6955 }
6956
6957 CheckDI(!E->isEntryValue(),
6958 "Entry values are only allowed in MIR unless they target a "
6959 "swiftasync Argument",
6960 &DVR);
6961}
6962
6963void Verifier::verifyCompileUnits() {
6964 // When more than one Module is imported into the same context, such as during
6965 // an LTO build before linking the modules, ODR type uniquing may cause types
6966 // to point to a different CU. This check does not make sense in this case.
6967 if (M.getContext().isODRUniquingDebugTypes())
6968 return;
6969 auto *CUs = M.getNamedMetadata("llvm.dbg.cu");
6971 if (CUs)
6972 Listed.insert(CUs->op_begin(), CUs->op_end());
6973 for (const auto *CU : CUVisited)
6974 CheckDI(Listed.count(CU), "DICompileUnit not listed in llvm.dbg.cu", CU);
6975 CUVisited.clear();
6976}
6977
6978void Verifier::verifyDeoptimizeCallingConvs() {
6979 if (DeoptimizeDeclarations.empty())
6980 return;
6981
6982 const Function *First = DeoptimizeDeclarations[0];
6983 for (const auto *F : ArrayRef(DeoptimizeDeclarations).slice(1)) {
6984 Check(First->getCallingConv() == F->getCallingConv(),
6985 "All llvm.experimental.deoptimize declarations must have the same "
6986 "calling convention",
6987 First, F);
6988 }
6989}
6990
6991void Verifier::verifyAttachedCallBundle(const CallBase &Call,
6992 const OperandBundleUse &BU) {
6993 FunctionType *FTy = Call.getFunctionType();
6994
6995 Check((FTy->getReturnType()->isPointerTy() ||
6996 (Call.doesNotReturn() && FTy->getReturnType()->isVoidTy())),
6997 "a call with operand bundle \"clang.arc.attachedcall\" must call a "
6998 "function returning a pointer or a non-returning function that has a "
6999 "void return type",
7000 Call);
7001
7002 Check(BU.Inputs.size() == 1 && isa<Function>(BU.Inputs.front()),
7003 "operand bundle \"clang.arc.attachedcall\" requires one function as "
7004 "an argument",
7005 Call);
7006
7007 auto *Fn = cast<Function>(BU.Inputs.front());
7008 Intrinsic::ID IID = Fn->getIntrinsicID();
7009
7010 if (IID) {
7011 Check((IID == Intrinsic::objc_retainAutoreleasedReturnValue ||
7012 IID == Intrinsic::objc_unsafeClaimAutoreleasedReturnValue),
7013 "invalid function argument", Call);
7014 } else {
7015 StringRef FnName = Fn->getName();
7016 Check((FnName == "objc_retainAutoreleasedReturnValue" ||
7017 FnName == "objc_unsafeClaimAutoreleasedReturnValue"),
7018 "invalid function argument", Call);
7019 }
7020}
7021
7022void Verifier::verifyNoAliasScopeDecl() {
7023 if (NoAliasScopeDecls.empty())
7024 return;
7025
7026 // only a single scope must be declared at a time.
7027 for (auto *II : NoAliasScopeDecls) {
7028 assert(II->getIntrinsicID() == Intrinsic::experimental_noalias_scope_decl &&
7029 "Not a llvm.experimental.noalias.scope.decl ?");
7030 const auto *ScopeListMV = dyn_cast<MetadataAsValue>(
7032 Check(ScopeListMV != nullptr,
7033 "llvm.experimental.noalias.scope.decl must have a MetadataAsValue "
7034 "argument",
7035 II);
7036
7037 const auto *ScopeListMD = dyn_cast<MDNode>(ScopeListMV->getMetadata());
7038 Check(ScopeListMD != nullptr, "!id.scope.list must point to an MDNode", II);
7039 Check(ScopeListMD->getNumOperands() == 1,
7040 "!id.scope.list must point to a list with a single scope", II);
7041 visitAliasScopeListMetadata(ScopeListMD);
7042 }
7043
7044 // Only check the domination rule when requested. Once all passes have been
7045 // adapted this option can go away.
7047 return;
7048
7049 // Now sort the intrinsics based on the scope MDNode so that declarations of
7050 // the same scopes are next to each other.
7051 auto GetScope = [](IntrinsicInst *II) {
7052 const auto *ScopeListMV = cast<MetadataAsValue>(
7054 return &cast<MDNode>(ScopeListMV->getMetadata())->getOperand(0);
7055 };
7056
7057 // We are sorting on MDNode pointers here. For valid input IR this is ok.
7058 // TODO: Sort on Metadata ID to avoid non-deterministic error messages.
7059 auto Compare = [GetScope](IntrinsicInst *Lhs, IntrinsicInst *Rhs) {
7060 return GetScope(Lhs) < GetScope(Rhs);
7061 };
7062
7063 llvm::sort(NoAliasScopeDecls, Compare);
7064
7065 // Go over the intrinsics and check that for the same scope, they are not
7066 // dominating each other.
7067 auto ItCurrent = NoAliasScopeDecls.begin();
7068 while (ItCurrent != NoAliasScopeDecls.end()) {
7069 auto CurScope = GetScope(*ItCurrent);
7070 auto ItNext = ItCurrent;
7071 do {
7072 ++ItNext;
7073 } while (ItNext != NoAliasScopeDecls.end() &&
7074 GetScope(*ItNext) == CurScope);
7075
7076 // [ItCurrent, ItNext) represents the declarations for the same scope.
7077 // Ensure they are not dominating each other.. but only if it is not too
7078 // expensive.
7079 if (ItNext - ItCurrent < 32)
7080 for (auto *I : llvm::make_range(ItCurrent, ItNext))
7081 for (auto *J : llvm::make_range(ItCurrent, ItNext))
7082 if (I != J)
7083 Check(!DT.dominates(I, J),
7084 "llvm.experimental.noalias.scope.decl dominates another one "
7085 "with the same scope",
7086 I);
7087 ItCurrent = ItNext;
7088 }
7089}
7090
7091//===----------------------------------------------------------------------===//
7092// Implement the public interfaces to this file...
7093//===----------------------------------------------------------------------===//
7094
7096 Function &F = const_cast<Function &>(f);
7097
7098 // Don't use a raw_null_ostream. Printing IR is expensive.
7099 Verifier V(OS, /*ShouldTreatBrokenDebugInfoAsError=*/true, *f.getParent());
7100
7101 // Note that this function's return value is inverted from what you would
7102 // expect of a function called "verify".
7103 return !V.verify(F);
7104}
7105
7107 bool *BrokenDebugInfo) {
7108 // Don't use a raw_null_ostream. Printing IR is expensive.
7109 Verifier V(OS, /*ShouldTreatBrokenDebugInfoAsError=*/!BrokenDebugInfo, M);
7110
7111 bool Broken = false;
7112 for (const Function &F : M)
7113 Broken |= !V.verify(F);
7114
7115 Broken |= !V.verify();
7116 if (BrokenDebugInfo)
7117 *BrokenDebugInfo = V.hasBrokenDebugInfo();
7118 // Note that this function's return value is inverted from what you would
7119 // expect of a function called "verify".
7120 return Broken;
7121}
7122
7123namespace {
7124
7125struct VerifierLegacyPass : public FunctionPass {
7126 static char ID;
7127
7128 std::unique_ptr<Verifier> V;
7129 bool FatalErrors = true;
7130
7131 VerifierLegacyPass() : FunctionPass(ID) {
7133 }
7134 explicit VerifierLegacyPass(bool FatalErrors)
7135 : FunctionPass(ID),
7136 FatalErrors(FatalErrors) {
7138 }
7139
7140 bool doInitialization(Module &M) override {
7141 V = std::make_unique<Verifier>(
7142 &dbgs(), /*ShouldTreatBrokenDebugInfoAsError=*/false, M);
7143 return false;
7144 }
7145
7146 bool runOnFunction(Function &F) override {
7147 if (!V->verify(F) && FatalErrors) {
7148 errs() << "in function " << F.getName() << '\n';
7149 report_fatal_error("Broken function found, compilation aborted!");
7150 }
7151 return false;
7152 }
7153
7154 bool doFinalization(Module &M) override {
7155 bool HasErrors = false;
7156 for (Function &F : M)
7157 if (F.isDeclaration())
7158 HasErrors |= !V->verify(F);
7159
7160 HasErrors |= !V->verify();
7161 if (FatalErrors && (HasErrors || V->hasBrokenDebugInfo()))
7162 report_fatal_error("Broken module found, compilation aborted!");
7163 return false;
7164 }
7165
7166 void getAnalysisUsage(AnalysisUsage &AU) const override {
7167 AU.setPreservesAll();
7168 }
7169};
7170
7171} // end anonymous namespace
7172
7173/// Helper to issue failure from the TBAA verification
7174template <typename... Tys> void TBAAVerifier::CheckFailed(Tys &&... Args) {
7175 if (Diagnostic)
7176 return Diagnostic->CheckFailed(Args...);
7177}
7178
7179#define CheckTBAA(C, ...) \
7180 do { \
7181 if (!(C)) { \
7182 CheckFailed(__VA_ARGS__); \
7183 return false; \
7184 } \
7185 } while (false)
7186
7187/// Verify that \p BaseNode can be used as the "base type" in the struct-path
7188/// TBAA scheme. This means \p BaseNode is either a scalar node, or a
7189/// struct-type node describing an aggregate data structure (like a struct).
7190TBAAVerifier::TBAABaseNodeSummary
7191TBAAVerifier::verifyTBAABaseNode(Instruction &I, const MDNode *BaseNode,
7192 bool IsNewFormat) {
7193 if (BaseNode->getNumOperands() < 2) {
7194 CheckFailed("Base nodes must have at least two operands", &I, BaseNode);
7195 return {true, ~0u};
7196 }
7197
7198 auto Itr = TBAABaseNodes.find(BaseNode);
7199 if (Itr != TBAABaseNodes.end())
7200 return Itr->second;
7201
7202 auto Result = verifyTBAABaseNodeImpl(I, BaseNode, IsNewFormat);
7203 auto InsertResult = TBAABaseNodes.insert({BaseNode, Result});
7204 (void)InsertResult;
7205 assert(InsertResult.second && "We just checked!");
7206 return Result;
7207}
7208
7209TBAAVerifier::TBAABaseNodeSummary
7210TBAAVerifier::verifyTBAABaseNodeImpl(Instruction &I, const MDNode *BaseNode,
7211 bool IsNewFormat) {
7212 const TBAAVerifier::TBAABaseNodeSummary InvalidNode = {true, ~0u};
7213
7214 if (BaseNode->getNumOperands() == 2) {
7215 // Scalar nodes can only be accessed at offset 0.
7216 return isValidScalarTBAANode(BaseNode)
7217 ? TBAAVerifier::TBAABaseNodeSummary({false, 0})
7218 : InvalidNode;
7219 }
7220
7221 if (IsNewFormat) {
7222 if (BaseNode->getNumOperands() % 3 != 0) {
7223 CheckFailed("Access tag nodes must have the number of operands that is a "
7224 "multiple of 3!", BaseNode);
7225 return InvalidNode;
7226 }
7227 } else {
7228 if (BaseNode->getNumOperands() % 2 != 1) {
7229 CheckFailed("Struct tag nodes must have an odd number of operands!",
7230 BaseNode);
7231 return InvalidNode;
7232 }
7233 }
7234
7235 // Check the type size field.
7236 if (IsNewFormat) {
7237 auto *TypeSizeNode = mdconst::dyn_extract_or_null<ConstantInt>(
7238 BaseNode->getOperand(1));
7239 if (!TypeSizeNode) {
7240 CheckFailed("Type size nodes must be constants!", &I, BaseNode);
7241 return InvalidNode;
7242 }
7243 }
7244
7245 // Check the type name field. In the new format it can be anything.
7246 if (!IsNewFormat && !isa<MDString>(BaseNode->getOperand(0))) {
7247 CheckFailed("Struct tag nodes have a string as their first operand",
7248 BaseNode);
7249 return InvalidNode;
7250 }
7251
7252 bool Failed = false;
7253
7254 std::optional<APInt> PrevOffset;
7255 unsigned BitWidth = ~0u;
7256
7257 // We've already checked that BaseNode is not a degenerate root node with one
7258 // operand in \c verifyTBAABaseNode, so this loop should run at least once.
7259 unsigned FirstFieldOpNo = IsNewFormat ? 3 : 1;
7260 unsigned NumOpsPerField = IsNewFormat ? 3 : 2;
7261 for (unsigned Idx = FirstFieldOpNo; Idx < BaseNode->getNumOperands();
7262 Idx += NumOpsPerField) {
7263 const MDOperand &FieldTy = BaseNode->getOperand(Idx);
7264 const MDOperand &FieldOffset = BaseNode->getOperand(Idx + 1);
7265 if (!isa<MDNode>(FieldTy)) {
7266 CheckFailed("Incorrect field entry in struct type node!", &I, BaseNode);
7267 Failed = true;
7268 continue;
7269 }
7270
7271 auto *OffsetEntryCI =
7272 mdconst::dyn_extract_or_null<ConstantInt>(FieldOffset);
7273 if (!OffsetEntryCI) {
7274 CheckFailed("Offset entries must be constants!", &I, BaseNode);
7275 Failed = true;
7276 continue;
7277 }
7278
7279 if (BitWidth == ~0u)
7280 BitWidth = OffsetEntryCI->getBitWidth();
7281
7282 if (OffsetEntryCI->getBitWidth() != BitWidth) {
7283 CheckFailed(
7284 "Bitwidth between the offsets and struct type entries must match", &I,
7285 BaseNode);
7286 Failed = true;
7287 continue;
7288 }
7289
7290 // NB! As far as I can tell, we generate a non-strictly increasing offset
7291 // sequence only from structs that have zero size bit fields. When
7292 // recursing into a contained struct in \c getFieldNodeFromTBAABaseNode we
7293 // pick the field lexically the latest in struct type metadata node. This
7294 // mirrors the actual behavior of the alias analysis implementation.
7295 bool IsAscending =
7296 !PrevOffset || PrevOffset->ule(OffsetEntryCI->getValue());
7297
7298 if (!IsAscending) {
7299 CheckFailed("Offsets must be increasing!", &I, BaseNode);
7300 Failed = true;
7301 }
7302
7303 PrevOffset = OffsetEntryCI->getValue();
7304
7305 if (IsNewFormat) {
7306 auto *MemberSizeNode = mdconst::dyn_extract_or_null<ConstantInt>(
7307 BaseNode->getOperand(Idx + 2));
7308 if (!MemberSizeNode) {
7309 CheckFailed("Member size entries must be constants!", &I, BaseNode);
7310 Failed = true;
7311 continue;
7312 }
7313 }
7314 }
7315
7316 return Failed ? InvalidNode
7317 : TBAAVerifier::TBAABaseNodeSummary(false, BitWidth);
7318}
7319
7320static bool IsRootTBAANode(const MDNode *MD) {
7321 return MD->getNumOperands() < 2;
7322}
7323
7324static bool IsScalarTBAANodeImpl(const MDNode *MD,
7326 if (MD->getNumOperands() != 2 && MD->getNumOperands() != 3)
7327 return false;
7328
7329 if (!isa<MDString>(MD->getOperand(0)))
7330 return false;
7331
7332 if (MD->getNumOperands() == 3) {
7333 auto *Offset = mdconst::dyn_extract<ConstantInt>(MD->getOperand(2));
7334 if (!(Offset && Offset->isZero() && isa<MDString>(MD->getOperand(0))))
7335 return false;
7336 }
7337
7338 auto *Parent = dyn_cast_or_null<MDNode>(MD->getOperand(1));
7339 return Parent && Visited.insert(Parent).second &&
7340 (IsRootTBAANode(Parent) || IsScalarTBAANodeImpl(Parent, Visited));
7341}
7342
7343bool TBAAVerifier::isValidScalarTBAANode(const MDNode *MD) {
7344 auto ResultIt = TBAAScalarNodes.find(MD);
7345 if (ResultIt != TBAAScalarNodes.end())
7346 return ResultIt->second;
7347
7349 bool Result = IsScalarTBAANodeImpl(MD, Visited);
7350 auto InsertResult = TBAAScalarNodes.insert({MD, Result});
7351 (void)InsertResult;
7352 assert(InsertResult.second && "Just checked!");
7353
7354 return Result;
7355}
7356
7357/// Returns the field node at the offset \p Offset in \p BaseNode. Update \p
7358/// Offset in place to be the offset within the field node returned.
7359///
7360/// We assume we've okayed \p BaseNode via \c verifyTBAABaseNode.
7361MDNode *TBAAVerifier::getFieldNodeFromTBAABaseNode(Instruction &I,
7362 const MDNode *BaseNode,
7363 APInt &Offset,
7364 bool IsNewFormat) {
7365 assert(BaseNode->getNumOperands() >= 2 && "Invalid base node!");
7366
7367 // Scalar nodes have only one possible "field" -- their parent in the access
7368 // hierarchy. Offset must be zero at this point, but our caller is supposed
7369 // to check that.
7370 if (BaseNode->getNumOperands() == 2)
7371 return cast<MDNode>(BaseNode->getOperand(1));
7372
7373 unsigned FirstFieldOpNo = IsNewFormat ? 3 : 1;
7374 unsigned NumOpsPerField = IsNewFormat ? 3 : 2;
7375 for (unsigned Idx = FirstFieldOpNo; Idx < BaseNode->getNumOperands();
7376 Idx += NumOpsPerField) {
7377 auto *OffsetEntryCI =
7378 mdconst::extract<ConstantInt>(BaseNode->getOperand(Idx + 1));
7379 if (OffsetEntryCI->getValue().ugt(Offset)) {
7380 if (Idx == FirstFieldOpNo) {
7381 CheckFailed("Could not find TBAA parent in struct type node", &I,
7382 BaseNode, &Offset);
7383 return nullptr;
7384 }
7385
7386 unsigned PrevIdx = Idx - NumOpsPerField;
7387 auto *PrevOffsetEntryCI =
7388 mdconst::extract<ConstantInt>(BaseNode->getOperand(PrevIdx + 1));
7389 Offset -= PrevOffsetEntryCI->getValue();
7390 return cast<MDNode>(BaseNode->getOperand(PrevIdx));
7391 }
7392 }
7393
7394 unsigned LastIdx = BaseNode->getNumOperands() - NumOpsPerField;
7395 auto *LastOffsetEntryCI = mdconst::extract<ConstantInt>(
7396 BaseNode->getOperand(LastIdx + 1));
7397 Offset -= LastOffsetEntryCI->getValue();
7398 return cast<MDNode>(BaseNode->getOperand(LastIdx));
7399}
7400
7402 if (!Type || Type->getNumOperands() < 3)
7403 return false;
7404
7405 // In the new format type nodes shall have a reference to the parent type as
7406 // its first operand.
7407 return isa_and_nonnull<MDNode>(Type->getOperand(0));
7408}
7409
7411 CheckTBAA(MD->getNumOperands() > 0, "TBAA metadata cannot have 0 operands",
7412 &I, MD);
7413
7414 CheckTBAA(isa<LoadInst>(I) || isa<StoreInst>(I) || isa<CallInst>(I) ||
7415 isa<VAArgInst>(I) || isa<AtomicRMWInst>(I) ||
7416 isa<AtomicCmpXchgInst>(I),
7417 "This instruction shall not have a TBAA access tag!", &I);
7418
7419 bool IsStructPathTBAA =
7420 isa<MDNode>(MD->getOperand(0)) && MD->getNumOperands() >= 3;
7421
7422 CheckTBAA(IsStructPathTBAA,
7423 "Old-style TBAA is no longer allowed, use struct-path TBAA instead",
7424 &I);
7425
7426 MDNode *BaseNode = dyn_cast_or_null<MDNode>(MD->getOperand(0));
7427 MDNode *AccessType = dyn_cast_or_null<MDNode>(MD->getOperand(1));
7428
7429 bool IsNewFormat = isNewFormatTBAATypeNode(AccessType);
7430
7431 if (IsNewFormat) {
7432 CheckTBAA(MD->getNumOperands() == 4 || MD->getNumOperands() == 5,
7433 "Access tag metadata must have either 4 or 5 operands", &I, MD);
7434 } else {
7435 CheckTBAA(MD->getNumOperands() < 5,
7436 "Struct tag metadata must have either 3 or 4 operands", &I, MD);
7437 }
7438
7439 // Check the access size field.
7440 if (IsNewFormat) {
7441 auto *AccessSizeNode = mdconst::dyn_extract_or_null<ConstantInt>(
7442 MD->getOperand(3));
7443 CheckTBAA(AccessSizeNode, "Access size field must be a constant", &I, MD);
7444 }
7445
7446 // Check the immutability flag.
7447 unsigned ImmutabilityFlagOpNo = IsNewFormat ? 4 : 3;
7448 if (MD->getNumOperands() == ImmutabilityFlagOpNo + 1) {
7449 auto *IsImmutableCI = mdconst::dyn_extract_or_null<ConstantInt>(
7450 MD->getOperand(ImmutabilityFlagOpNo));
7451 CheckTBAA(IsImmutableCI,
7452 "Immutability tag on struct tag metadata must be a constant", &I,
7453 MD);
7454 CheckTBAA(
7455 IsImmutableCI->isZero() || IsImmutableCI->isOne(),
7456 "Immutability part of the struct tag metadata must be either 0 or 1",
7457 &I, MD);
7458 }
7459
7460 CheckTBAA(BaseNode && AccessType,
7461 "Malformed struct tag metadata: base and access-type "
7462 "should be non-null and point to Metadata nodes",
7463 &I, MD, BaseNode, AccessType);
7464
7465 if (!IsNewFormat) {
7466 CheckTBAA(isValidScalarTBAANode(AccessType),
7467 "Access type node must be a valid scalar type", &I, MD,
7468 AccessType);
7469 }
7470
7471 auto *OffsetCI = mdconst::dyn_extract_or_null<ConstantInt>(MD->getOperand(2));
7472 CheckTBAA(OffsetCI, "Offset must be constant integer", &I, MD);
7473
7474 APInt Offset = OffsetCI->getValue();
7475 bool SeenAccessTypeInPath = false;
7476
7477 SmallPtrSet<MDNode *, 4> StructPath;
7478
7479 for (/* empty */; BaseNode && !IsRootTBAANode(BaseNode);
7480 BaseNode = getFieldNodeFromTBAABaseNode(I, BaseNode, Offset,
7481 IsNewFormat)) {
7482 if (!StructPath.insert(BaseNode).second) {
7483 CheckFailed("Cycle detected in struct path", &I, MD);
7484 return false;
7485 }
7486
7487 bool Invalid;
7488 unsigned BaseNodeBitWidth;
7489 std::tie(Invalid, BaseNodeBitWidth) = verifyTBAABaseNode(I, BaseNode,
7490 IsNewFormat);
7491
7492 // If the base node is invalid in itself, then we've already printed all the
7493 // errors we wanted to print.
7494 if (Invalid)
7495 return false;
7496
7497 SeenAccessTypeInPath |= BaseNode == AccessType;
7498
7499 if (isValidScalarTBAANode(BaseNode) || BaseNode == AccessType)
7500 CheckTBAA(Offset == 0, "Offset not zero at the point of scalar access",
7501 &I, MD, &Offset);
7502
7503 CheckTBAA(BaseNodeBitWidth == Offset.getBitWidth() ||
7504 (BaseNodeBitWidth == 0 && Offset == 0) ||
7505 (IsNewFormat && BaseNodeBitWidth == ~0u),
7506 "Access bit-width not the same as description bit-width", &I, MD,
7507 BaseNodeBitWidth, Offset.getBitWidth());
7508
7509 if (IsNewFormat && SeenAccessTypeInPath)
7510 break;
7511 }
7512
7513 CheckTBAA(SeenAccessTypeInPath, "Did not see access type in access path!", &I,
7514 MD);
7515 return true;
7516}
7517
7518char VerifierLegacyPass::ID = 0;
7519INITIALIZE_PASS(VerifierLegacyPass, "verify", "Module Verifier", false, false)
7520
7522 return new VerifierLegacyPass(FatalErrors);
7523}
7524
7525AnalysisKey VerifierAnalysis::Key;
7528 Result Res;
7530 return Res;
7531}
7532
7535 return { llvm::verifyFunction(F, &dbgs()), false };
7536}
7537
7539 auto Res = AM.getResult<VerifierAnalysis>(M);
7540 if (FatalErrors && (Res.IRBroken || Res.DebugInfoBroken))
7541 report_fatal_error("Broken module found, compilation aborted!");
7542
7543 return PreservedAnalyses::all();
7544}
7545
7547 auto res = AM.getResult<VerifierAnalysis>(F);
7548 if (res.IRBroken && FatalErrors)
7549 report_fatal_error("Broken function found, compilation aborted!");
7550
7551 return PreservedAnalyses::all();
7552}
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
This file defines the StringMap class.
This file declares a class to represent arbitrary precision floating point values and provide a varie...
This file implements a class to represent arbitrary precision integral constant values and operations...
Atomic ordering constants.
@ RetAttr
Definition: Attributes.cpp:728
@ FnAttr
Definition: Attributes.cpp:726
This file contains the simple types necessary to represent the attributes associated with functions a...
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")
static GCRegistry::Add< StatepointGC > D("statepoint-example", "an example strategy for statepoint")
Analysis containing CSE Info
Definition: CSEInfo.cpp:27
This file contains the declarations for the subclasses of Constant, which represent the different fla...
This file declares the LLVM IR specialization of the GenericConvergenceVerifier template.
return RetTy
Returns the sub type a function will return at a given Idx Should correspond to the result type of an ExtractValue instruction executed with just that one unsigned Idx
This file defines the DenseMap class.
This file contains constants used for implementing Dwarf debug support.
std::string Name
uint64_t Size
static bool runOnFunction(Function &F, bool PostInlining)
Hexagon Common GEP
This file provides various utilities for inspecting and working with the control flow graph in LLVM I...
#define F(x, y, z)
Definition: MD5.cpp:55
#define I(x, y, z)
Definition: MD5.cpp:58
This file implements a map that provides insertion order iteration.
This file provides utility for Memory Model Relaxation Annotations (MMRAs).
This file contains the declarations for metadata subclasses.
#define T1
Module.h This file contains the declarations for the Module class.
ConstantRange Range(APInt(BitWidth, Low), APInt(BitWidth, High))
uint64_t High
uint64_t IntrinsicInst * II
#define P(N)
ppc ctr loops verify
This header defines various interfaces for pass management in LLVM.
#define INITIALIZE_PASS(passName, arg, name, cfg, analysis)
Definition: PassSupport.h:38
This file builds on the ADT/GraphTraits.h file to build a generic graph post order iterator.
This file contains the declarations for profiling metadata utility functions.
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
This file contains some templates that are useful if you are working with the STL at all.
verify safepoint Safepoint IR Verifier
raw_pwrite_stream & OS
This file defines the SmallPtrSet class.
This file defines the SmallSet class.
This file defines the SmallVector class.
This file contains some functions that are useful when dealing with strings.
This defines the Use class.
static unsigned getBitWidth(Type *Ty, const DataLayout &DL)
Returns the bitwidth of the given scalar or pointer type.
static bool IsScalarTBAANodeImpl(const MDNode *MD, SmallPtrSetImpl< const MDNode * > &Visited)
Definition: Verifier.cpp:7324
static bool isType(const Metadata *MD)
Definition: Verifier.cpp:1135
static Instruction * getSuccPad(Instruction *Terminator)
Definition: Verifier.cpp:2670
#define Check(C,...)
We know that cond should be true, if not print an error message.
Definition: Verifier.cpp:668
static bool isNewFormatTBAATypeNode(llvm::MDNode *Type)
Definition: Verifier.cpp:7401
#define CheckDI(C,...)
We know that a debug info condition should be true, if not print an error message.
Definition: Verifier.cpp:678
static void forEachUser(const Value *User, SmallPtrSet< const Value *, 32 > &Visited, llvm::function_ref< bool(const Value *)> Callback)
Definition: Verifier.cpp:719
static bool isDINode(const Metadata *MD)
Definition: Verifier.cpp:1137
static bool isScope(const Metadata *MD)
Definition: Verifier.cpp:1136
static cl::opt< bool > VerifyNoAliasScopeDomination("verify-noalias-scope-decl-dom", cl::Hidden, cl::init(false), cl::desc("Ensure that llvm.experimental.noalias.scope.decl for identical " "scopes are not dominating"))
static DISubprogram * getSubprogram(Metadata *LocalScope)
Carefully grab the subprogram from a local scope.
Definition: Verifier.cpp:6340
static bool isTypeCongruent(Type *L, Type *R)
Two types are "congruent" if they are identical, or if they are both pointer types with different poi...
Definition: Verifier.cpp:3781
#define CheckTBAA(C,...)
Definition: Verifier.cpp:7179
static bool IsRootTBAANode(const MDNode *MD)
Definition: Verifier.cpp:7320
static bool isContiguous(const ConstantRange &A, const ConstantRange &B)
Definition: Verifier.cpp:4109
static Value * getParentPad(Value *EHPad)
Definition: Verifier.cpp:4353
static bool hasConflictingReferenceFlags(unsigned Flags)
Detect mutually exclusive flags.
Definition: Verifier.cpp:1286
static AttrBuilder getParameterABIAttributes(LLVMContext &C, unsigned I, AttributeList Attrs)
Definition: Verifier.cpp:3791
bool isFiniteNonZero() const
Definition: APFloat.h:1358
bool isNegative() const
Definition: APFloat.h:1348
const fltSemantics & getSemantics() const
Definition: APFloat.h:1356
Class for arbitrary precision integers.
Definition: APInt.h:77
bool sgt(const APInt &RHS) const
Signed greater than comparison.
Definition: APInt.h:1180
bool isZero() const
Determine if this value is zero, i.e. all bits are clear.
Definition: APInt.h:359
bool isMinValue() const
Determine if this is the smallest unsigned value.
Definition: APInt.h:396
bool ule(const APInt &RHS) const
Unsigned less or equal comparison.
Definition: APInt.h:1129
bool isPowerOf2() const
Check if this APInt's value is a power of two greater than zero.
Definition: APInt.h:419
bool isMaxValue() const
Determine if this is the largest unsigned value.
Definition: APInt.h:378
This class represents a conversion between pointers from one address space to another.
an instruction to allocate memory on the stack
Definition: Instructions.h:60
bool isSwiftError() const
Return true if this alloca is used as a swifterror argument to a call.
Definition: Instructions.h:146
bool isStaticAlloca() const
Return true if this alloca is in the entry block of the function and is a constant size.
Align getAlign() const
Return the alignment of the memory that is being allocated by the instruction.
Definition: Instructions.h:121
Type * getAllocatedType() const
Return the type that is being allocated by the instruction.
Definition: Instructions.h:114
bool isArrayAllocation() const
Return true if there is an allocation size parameter to the allocation instruction that is not 1.
const Value * getArraySize() const
Get the number of elements allocated.
Definition: Instructions.h:92
A container for analyses that lazily runs them and caches their results.
Definition: PassManager.h:253
PassT::Result & getResult(IRUnitT &IR, ExtraArgTs... ExtraArgs)
Get the result of an analysis pass for a given IR unit.
Definition: PassManager.h:405
Represent the analysis usage information of a pass.
void setPreservesAll()
Set by analyses that do not transform their input at all.
This class represents an incoming formal argument to a Function.
Definition: Argument.h:31
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
Definition: ArrayRef.h:41
bool empty() const
empty - Check if the array is empty.
Definition: ArrayRef.h:160
An instruction that atomically checks whether a specified value is in a memory location,...
Definition: Instructions.h:494
an instruction that atomically reads a memory location, combines it with another value,...
Definition: Instructions.h:695
static bool isFPOperation(BinOp Op)
Definition: Instructions.h:790
BinOp getOperation() const
Definition: Instructions.h:786
static StringRef getOperationName(BinOp Op)
AtomicOrdering getOrdering() const
Returns the ordering constraint of this rmw instruction.
Definition: Instructions.h:828
bool contains(Attribute::AttrKind A) const
Return true if the builder has the specified attribute.
Definition: AttributeMask.h:67
bool hasAttribute(Attribute::AttrKind Kind) const
Return true if the attribute exists in this set.
Definition: Attributes.cpp:909
std::string getAsString(bool InAttrGrp=false) const
Definition: Attributes.cpp:996
static Attribute::AttrKind getAttrKindFromName(StringRef AttrName)
Definition: Attributes.cpp:303
static bool canUseAsRetAttr(AttrKind Kind)
Definition: Attributes.cpp:749
static bool isExistingAttribute(StringRef Name)
Return true if the provided string matches the IR name of an attribute.
Definition: Attributes.cpp:326
static bool canUseAsFnAttr(AttrKind Kind)
Definition: Attributes.cpp:741
AttrKind
This enumeration lists the attributes that can be associated with parameters, function results,...
Definition: Attributes.h:86
@ None
No attributes have been set.
Definition: Attributes.h:88
static bool isIntAttrKind(AttrKind Kind)
Definition: Attributes.h:102
static bool canUseAsParamAttr(AttrKind Kind)
Definition: Attributes.cpp:745
bool isValid() const
Return true if the attribute is any kind of attribute.
Definition: Attributes.h:203
LLVM Basic Block Representation.
Definition: BasicBlock.h:61
iterator begin()
Instruction iterator methods.
Definition: BasicBlock.h:438
iterator_range< const_phi_iterator > phis() const
Returns a range that iterates over the phis in the basic block.
Definition: BasicBlock.h:507
const Instruction * getFirstNonPHI() const
Returns a pointer to the first instruction in this block that is not a PHINode instruction.
Definition: BasicBlock.cpp:365
const Instruction & front() const
Definition: BasicBlock.h:461
bool isEntryBlock() const
Return true if this is the entry block of the containing function.
Definition: BasicBlock.cpp:569
const BasicBlock * getUniquePredecessor() const
Return the predecessor of this block if it has a unique predecessor block.
Definition: BasicBlock.cpp:465
const Function * getParent() const
Return the enclosing method, or null if none.
Definition: BasicBlock.h:209
InstListType::iterator iterator
Instruction iterators...
Definition: BasicBlock.h:167
const Instruction * getTerminator() const LLVM_READONLY
Returns the terminator instruction if the block is well formed or null if the block is not well forme...
Definition: BasicBlock.h:229
This class represents a no-op cast from one type to another.
static BlockAddress * lookup(const BasicBlock *BB)
Lookup an existing BlockAddress constant for the given BasicBlock.
Definition: Constants.cpp:1851
Conditional or Unconditional Branch instruction.
bool isConditional() const
Value * getCondition() const
Base class for all callable instructions (InvokeInst and CallInst) Holds everything related to callin...
Definition: InstrTypes.h:1236
bool isInlineAsm() const
Check if this call is an inline asm statement.
Definition: InstrTypes.h:1532
Function * getCalledFunction() const
Returns the function called, or null if this is an indirect function invocation or the function signa...
Definition: InstrTypes.h:1465
CallingConv::ID getCallingConv() const
Definition: InstrTypes.h:1523
Value * getCalledOperand() const
Definition: InstrTypes.h:1458
Value * getArgOperand(unsigned i) const
Definition: InstrTypes.h:1410
FunctionType * getFunctionType() const
Definition: InstrTypes.h:1323
unsigned arg_size() const
Definition: InstrTypes.h:1408
AttributeList getAttributes() const
Return the parameter attributes for this call.
Definition: InstrTypes.h:1542
CallBr instruction, tracking function calls that may not return control but instead transfer it to a ...
This class represents a function call, abstracting a target machine's calling convention.
bool isMustTailCall() const
static bool castIsValid(Instruction::CastOps op, Type *SrcTy, Type *DstTy)
This method can be used to determine if a cast from SrcTy to DstTy using Opcode op is valid or not.
unsigned getNumHandlers() const
return the number of 'handlers' in this catchswitch instruction, except the default handler
Value * getParentPad() const
BasicBlock * getUnwindDest() const
handler_range handlers()
iteration adapter for range-for loops.
BasicBlock * getUnwindDest() const
bool isFPPredicate() const
Definition: InstrTypes.h:864
bool isIntPredicate() const
Definition: InstrTypes.h:865
static bool isIntPredicate(Predicate P)
Definition: InstrTypes.h:858
ConstantArray - Constant Array Declarations.
Definition: Constants.h:424
A constant value that is initialized with an expression using other constant values.
Definition: Constants.h:1084
ConstantFP - Floating Point Values [float, double].
Definition: Constants.h:269
This is the shared class of boolean and integer constants.
Definition: Constants.h:81
bool isMinusOne() const
This function will return true iff every bit in this constant is set to true.
Definition: Constants.h:218
bool isNegative() const
Definition: Constants.h:201
bool isZero() const
This is just a convenience method to make client code smaller for a common code.
Definition: Constants.h:206
unsigned getBitWidth() const
getBitWidth - Return the scalar bitwidth of this constant.
Definition: Constants.h:149
uint64_t getZExtValue() const
Return the constant as a 64-bit unsigned integer value after it has been zero extended as appropriate...
Definition: Constants.h:155
const APInt & getValue() const
Return the constant as an APInt value reference.
Definition: Constants.h:146
A signed pointer, in the ptrauth sense.
Definition: Constants.h:1012
Constant * getAddrDiscriminator() const
The address discriminator if any, or the null constant.
Definition: Constants.h:1050
Constant * getPointer() const
The pointer that is signed in this ptrauth signed pointer.
Definition: Constants.h:1037
ConstantInt * getKey() const
The Key ID, an i32 constant.
Definition: Constants.h:1040
ConstantInt * getDiscriminator() const
The integer discriminator, an i64 constant, or 0.
Definition: Constants.h:1043
static bool isOrderedRanges(ArrayRef< ConstantRange > RangesRef)
This class represents a range of values.
Definition: ConstantRange.h:47
uint32_t getBitWidth() const
Get the bit width of this ConstantRange.
static ConstantTokenNone * get(LLVMContext &Context)
Return the ConstantTokenNone.
Definition: Constants.cpp:1500
This is an important base class in LLVM.
Definition: Constant.h:41
bool isNullValue() const
Return true if this is the value that would be returned by getNullValue.
Definition: Constants.cpp:90
This is the common base class for constrained floating point intrinsics.
std::optional< fp::ExceptionBehavior > getExceptionBehavior() const
std::optional< RoundingMode > getRoundingMode() const
unsigned getNonMetadataArgCount() const
List of ValueAsMetadata, to be used as an argument to a dbg.value intrinsic.
Assignment ID.
Basic type, like 'int' or 'float'.
Debug common block.
Enumeration value.
DWARF expression.
bool isEntryValue() const
Check if the expression consists of exactly one entry value operand.
static std::optional< FragmentInfo > getFragmentInfo(expr_op_iterator Start, expr_op_iterator End)
Retrieve the details of this fragment expression.
A pair of DIGlobalVariable and DIExpression.
DIGlobalVariable * getVariable() const
DIExpression * getExpression() const
An imported module (C++ using directive or similar).
Debug lexical block.
A scope for locals.
DISubprogram * getSubprogram() const
Get the subprogram for this scope.
DILocalScope * getScope() const
Get the local scope for this variable.
Debug location.
Metadata * getRawScope() const
Represents a module in the programming language, for example, a Clang module, or a Fortran module.
Debug lexical block.
Base class for scope-like contexts.
String type, Fortran CHARACTER(n)
Subprogram description.
Array subrange.
Type array for a subprogram.
Base class for template parameters.
Base class for variables.
Metadata * getRawType() const
Metadata * getRawScope() const
This class represents an Operation in the Expression.
A parsed version of the target data layout string in and methods for querying it.
Definition: DataLayout.h:110
This represents the llvm.dbg.label instruction.
Metadata * getRawLabel() const
DILabel * getLabel() const
Records a position in IR for a source label (DILabel).
Base class for non-instruction debug metadata records that have positions within IR.
void print(raw_ostream &O, bool IsForDebug=false) const
DebugLoc getDebugLoc() const
const BasicBlock * getParent() const
This is the common base class for debug info intrinsics for variables.
Metadata * getRawLocation() const
DILocalVariable * getVariable() const
Metadata * getRawVariable() const
Metadata * getRawExpression() const
Record of a variable value-assignment, aka a non instruction representation of the dbg....
MDNode * getRawAddressExpression() const
DIExpression * getExpression() const
Value * getVariableLocationOp(unsigned OpIdx) const
DILocalVariable * getVariable() const
Metadata * getRawLocation() const
Returns the metadata operand for the first location description.
@ End
Marks the end of the concrete types.
@ Any
To indicate all LocationTypes in searches.
DIExpression * getAddressExpression() const
MDNode * getAsMDNode() const
Return this as a bar MDNode.
Definition: DebugLoc.h:106
ValueT lookup(const_arg_type_t< KeyT > Val) const
lookup - Return the entry for the specified key, or a default constructed value if no such entry exis...
Definition: DenseMap.h:202
iterator find(const_arg_type_t< KeyT > Val)
Definition: DenseMap.h:155
bool empty() const
Definition: DenseMap.h:98
std::pair< iterator, bool > insert(const std::pair< KeyT, ValueT > &KV)
Definition: DenseMap.h:220
void recalculate(ParentType &Func)
recalculate - compute a dominator tree for the given function
Concrete subclass of DominatorTreeBase that is used to compute a normal dominator tree.
Definition: Dominators.h:162
bool isReachableFromEntry(const Use &U) const
Provide an overload for a Use.
Definition: Dominators.cpp:321
bool dominates(const BasicBlock *BB, const Use &U) const
Return true if the (end of the) basic block BB dominates the use U.
Definition: Dominators.cpp:122
This instruction extracts a single (scalar) element from a VectorType value.
static bool isValidOperands(const Value *Vec, const Value *Idx)
Return true if an extractelement instruction can be formed with the specified operands.
This instruction extracts a struct member or array element value from an aggregate value.
ArrayRef< unsigned > getIndices() const
static Type * getIndexedType(Type *Agg, ArrayRef< unsigned > Idxs)
Returns the type of the element that would be extracted with an extractvalue instruction with the spe...
This instruction compares its operands according to the predicate given to the constructor.
This class represents an extension of floating point types.
This class represents a cast from floating point to signed integer.
This class represents a cast from floating point to unsigned integer.
This class represents a truncation of floating point types.
An instruction for ordering other memory operations.
Definition: Instructions.h:419
AtomicOrdering getOrdering() const
Returns the ordering constraint of this fence instruction.
Definition: Instructions.h:442
Value * getParentPad() const
Convenience accessors.
Definition: InstrTypes.h:2449
FunctionPass class - This class is used to implement most global optimizations.
Definition: Pass.h:311
Intrinsic::ID getIntrinsicID() const LLVM_READONLY
getIntrinsicID - This method returns the ID number of the specified function, or Intrinsic::not_intri...
Definition: Function.h:242
bool hasPersonalityFn() const
Check whether this function has a personality function.
Definition: Function.h:868
bool isIntrinsic() const
isIntrinsic - Returns true if the function's name starts with "llvm.".
Definition: Function.h:247
const std::string & getGC() const
Definition: Function.cpp:785
Represents calls to the gc.relocate intrinsic.
Value * getBasePtr() const
Value * getDerivedPtr() const
void initialize(raw_ostream *OS, function_ref< void(const Twine &Message)> FailureCB, const FunctionT &F)
Generic tagged DWARF-like metadata node.
an instruction for type-safe pointer arithmetic to access elements of arrays and structs
Definition: Instructions.h:914
static Type * getIndexedType(Type *Ty, ArrayRef< Value * > IdxList)
Returns the result type of a getelementptr with the given source element type and indexes.
static bool isValidLinkage(LinkageTypes L)
Definition: GlobalAlias.h:95
const Constant * getAliasee() const
Definition: GlobalAlias.h:84
const Function * getResolverFunction() const
Definition: Globals.cpp:611
static FunctionType * getResolverFunctionType(Type *IFuncValTy)
Definition: GlobalIFunc.h:83
static bool isValidLinkage(LinkageTypes L)
Definition: GlobalIFunc.h:87
const Constant * getResolver() const
Definition: GlobalIFunc.h:70
bool hasComdat() const
Definition: GlobalObject.h:128
MDNode * getMetadata(unsigned KindID) const
Get the current metadata attachments for the given kind, if any.
Definition: Value.h:565
bool hasExternalLinkage() const
Definition: GlobalValue.h:511
bool isDSOLocal() const
Definition: GlobalValue.h:305
bool isImplicitDSOLocal() const
Definition: GlobalValue.h:298
bool isDeclaration() const
Return true if the primary definition of this global value is outside of the current translation unit...
Definition: Globals.cpp:290
bool hasValidDeclarationLinkage() const
Definition: GlobalValue.h:533
LinkageTypes getLinkage() const
Definition: GlobalValue.h:546
bool hasDefaultVisibility() const
Definition: GlobalValue.h:249
bool hasPrivateLinkage() const
Definition: GlobalValue.h:527
bool hasHiddenVisibility() const
Definition: GlobalValue.h:250
bool hasExternalWeakLinkage() const
Definition: GlobalValue.h:529
bool hasDLLImportStorageClass() const
Definition: GlobalValue.h:278
bool hasDLLExportStorageClass() const
Definition: GlobalValue.h:281
bool isDeclarationForLinker() const
Definition: GlobalValue.h:618
unsigned getAddressSpace() const
Definition: GlobalValue.h:205
Module * getParent()
Get the module that this global value is contained inside of...
Definition: GlobalValue.h:656
PointerType * getType() const
Global values are always pointers.
Definition: GlobalValue.h:294
bool hasComdat() const
Definition: GlobalValue.h:241
bool hasCommonLinkage() const
Definition: GlobalValue.h:532
bool hasGlobalUnnamedAddr() const
Definition: GlobalValue.h:215
bool hasAppendingLinkage() const
Definition: GlobalValue.h:525
bool hasAvailableExternallyLinkage() const
Definition: GlobalValue.h:512
Type * getValueType() const
Definition: GlobalValue.h:296
const Constant * getInitializer() const
getInitializer - Return the initializer for this global variable.
bool hasInitializer() const
Definitions have initializers, declarations don't.
bool isConstant() const
If the value is a global constant, its value is immutable throughout the runtime execution of the pro...
bool hasDefinitiveInitializer() const
hasDefinitiveInitializer - Whether the global variable has an initializer, and any other instances of...
This instruction compares its operands according to the predicate given to the constructor.
Indirect Branch Instruction.
BasicBlock * getDestination(unsigned i)
Return the specified destination.
unsigned getNumDestinations() const
return the number of possible destinations in this indirectbr instruction.
unsigned getNumSuccessors() const
This instruction inserts a single (scalar) element into a VectorType value.
static bool isValidOperands(const Value *Vec, const Value *NewElt, const Value *Idx)
Return true if an insertelement instruction can be formed with the specified operands.
This instruction inserts a struct field of array element value into an aggregate value.
Value * getAggregateOperand()
ArrayRef< unsigned > getIndices() const
Base class for instruction visitors.
Definition: InstVisitor.h:78
RetTy visitTerminator(Instruction &I)
Definition: InstVisitor.h:253
RetTy visitCallBase(CallBase &I)
Definition: InstVisitor.h:267
void visitFunction(Function &F)
Definition: InstVisitor.h:142
void visitBasicBlock(BasicBlock &BB)
Definition: InstVisitor.h:143
void visit(Iterator Start, Iterator End)
Definition: InstVisitor.h:87
RetTy visitFuncletPadInst(FuncletPadInst &I)
Definition: InstVisitor.h:197
void visitInstruction(Instruction &I)
Definition: InstVisitor.h:280
unsigned getNumSuccessors() const LLVM_READONLY
Return the number of successors that this instruction has.
const DebugLoc & getDebugLoc() const
Return the debug location for this node as a DebugLoc.
Definition: Instruction.h:476
bool isAtomic() const LLVM_READONLY
Return true if this instruction has an AtomicOrdering of unordered or higher.
const Function * getFunction() const
Return the function this instruction belongs to.
Definition: Instruction.cpp:70
This class represents a cast from an integer to a pointer.
A wrapper class for inspecting calls to intrinsic functions.
Definition: IntrinsicInst.h:48
static bool mayLowerToFunctionCall(Intrinsic::ID IID)
Check if the intrinsic might lower into a regular function call in the course of IR transformations.
Intrinsic::ID getIntrinsicID() const
Return the intrinsic ID of this intrinsic.
Definition: IntrinsicInst.h:55
Invoke instruction.
This is an important class for using LLVM in a threaded context.
Definition: LLVMContext.h:67
The landingpad instruction holds all of the information necessary to generate correct exception handl...
bool isCleanup() const
Return 'true' if this landingpad instruction is a cleanup.
unsigned getNumClauses() const
Get the number of clauses for this landing pad.
bool isCatch(unsigned Idx) const
Return 'true' if the clause and index Idx is a catch clause.
bool isFilter(unsigned Idx) const
Return 'true' if the clause and index Idx is a filter clause.
Constant * getClause(unsigned Idx) const
Get the value of the clause at index Idx.
An instruction for reading from memory.
Definition: Instructions.h:173
AtomicOrdering getOrdering() const
Returns the ordering constraint of this load instruction.
Definition: Instructions.h:217
SyncScope::ID getSyncScopeID() const
Returns the synchronization scope ID of this load instruction.
Definition: Instructions.h:227
Align getAlign() const
Return the alignment of the access that is being performed.
Definition: Instructions.h:208
Metadata node.
Definition: Metadata.h:1067
const MDOperand & getOperand(unsigned I) const
Definition: Metadata.h:1428
bool isTemporary() const
Definition: Metadata.h:1251
ArrayRef< MDOperand > operands() const
Definition: Metadata.h:1426
unsigned getNumOperands() const
Return number of MDNode operands.
Definition: Metadata.h:1434
bool isDistinct() const
Definition: Metadata.h:1250
bool isResolved() const
Check if node is fully resolved.
Definition: Metadata.h:1247
LLVMContext & getContext() const
Definition: Metadata.h:1231
Tracking metadata reference owned by Metadata.
Definition: Metadata.h:889
Metadata * get() const
Definition: Metadata.h:918
A single uniqued string.
Definition: Metadata.h:720
StringRef getString() const
Definition: Metadata.cpp:610
Typed, array-like tuple of metadata.
Definition: Metadata.h:1627
Tuple of metadata.
Definition: Metadata.h:1470
static bool isTagMD(const Metadata *MD)
This class implements a map that also provides access to all stored values in a deterministic order.
Definition: MapVector.h:36
void clear()
Definition: MapVector.h:88
Metadata wrapper in the Value hierarchy.
Definition: Metadata.h:176
static MetadataAsValue * getIfExists(LLVMContext &Context, Metadata *MD)
Definition: Metadata.cpp:111
Metadata * getMetadata() const
Definition: Metadata.h:193
Root of the metadata hierarchy.
Definition: Metadata.h:62
void print(raw_ostream &OS, const Module *M=nullptr, bool IsForDebug=false) const
Print.
Definition: AsmWriter.cpp:5221
unsigned getMetadataID() const
Definition: Metadata.h:102
Manage lifetime of a slot tracker for printing IR.
A Module instance is used to store all the information related to an LLVM module.
Definition: Module.h:65
ModFlagBehavior
This enumeration defines the supported behaviors of module flags.
Definition: Module.h:115
@ AppendUnique
Appends the two values, which are required to be metadata nodes.
Definition: Module.h:144
@ Override
Uses the specified value, regardless of the behavior or value of the other module.
Definition: Module.h:136
@ Warning
Emits a warning if two values disagree.
Definition: Module.h:122
@ Error
Emits an error if two values disagree, otherwise the resulting value is that of the operands.
Definition: Module.h:118
@ Min
Takes the min of the two values, which are required to be integers.
Definition: Module.h:150
@ Append
Appends the two values, which are required to be metadata nodes.
Definition: Module.h:139
@ Max
Takes the max of the two values, which are required to be integers.
Definition: Module.h:147
@ Require
Adds a requirement that another module flag be present and have a specified value after linking is pe...
Definition: Module.h:131
const std::string & getModuleIdentifier() const
Get the module identifier which is, essentially, the name of the module.
Definition: Module.h:267
static bool isValidModFlagBehavior(Metadata *MD, ModFlagBehavior &MFB)
Checks if Metadata represents a valid ModFlagBehavior, and stores the converted result in MFB.
Definition: Module.cpp:288
A tuple of MDNodes.
Definition: Metadata.h:1729
StringRef getName() const
Definition: Metadata.cpp:1398
void print(raw_ostream &ROS, bool IsForDebug=false) const
Definition: AsmWriter.cpp:4882
iterator_range< op_iterator > operands()
Definition: Metadata.h:1825
op_range incoming_values()
static PassRegistry * getPassRegistry()
getPassRegistry - Access the global registry object, which is automatically initialized at applicatio...
static PointerType * get(Type *ElementType, unsigned AddressSpace)
This constructs a pointer to an object of the specified type in a numbered address space.
A set of analyses that are preserved following a run of a transformation pass.
Definition: Analysis.h:111
static PreservedAnalyses all()
Construct a special preserved set that preserves all passes.
Definition: Analysis.h:117
Simple wrapper around std::function<void(raw_ostream&)>.
Definition: Printable.h:38
This class represents a cast from a pointer to an integer.
Interface for looking up the initializer for a variable name, used by Init::resolveReferences.
Definition: Record.h:2213
Resume the propagation of an exception.
Value * getValue() const
Convenience accessor.
Return a value (possibly void), from a function.
This class represents a sign extension of integer types.
This class represents a cast from signed integer to floating point.
This class represents the LLVM 'select' instruction.
static const char * areInvalidOperands(Value *Cond, Value *True, Value *False)
Return a string if the specified operands are invalid for a select operation, otherwise return null.
This instruction constructs a fixed permutation of two input vectors.
static bool isValidOperands(const Value *V1, const Value *V2, const Value *Mask)
Return true if a shufflevector instruction can be formed with the specified operands.
static void getShuffleMask(const Constant *Mask, SmallVectorImpl< int > &Result)
Convert the input shuffle mask operand to a vector of integers.
A templated base class for SmallPtrSet which provides the typesafe interface that is common across al...
Definition: SmallPtrSet.h:323
size_type count(ConstPtrType Ptr) const
count - Return 1 if the specified pointer is in the set, 0 otherwise.
Definition: SmallPtrSet.h:412
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
Definition: SmallPtrSet.h:344
SmallPtrSet - This class implements a set which is optimized for holding SmallSize or less elements.
Definition: SmallPtrSet.h:479
SmallSet - This maintains a set of unique values, optimizing for the case when the set is small (less...
Definition: SmallSet.h:135
std::pair< const_iterator, bool > insert(const T &V)
insert - Insert an element into the set if it isn't already there.
Definition: SmallSet.h:179
SmallString - A SmallString is just a SmallVector with methods and accessors that make it work better...
Definition: SmallString.h:26
bool empty() const
Definition: SmallVector.h:94
size_t size() const
Definition: SmallVector.h:91
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
Definition: SmallVector.h:586
void reserve(size_type N)
Definition: SmallVector.h:676
iterator insert(iterator I, T &&Elt)
Definition: SmallVector.h:818
void resize(size_type N)
Definition: SmallVector.h:651
void push_back(const T &Elt)
Definition: SmallVector.h:426
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
Definition: SmallVector.h:1209
An instruction for storing to memory.
Definition: Instructions.h:289
StringMapEntry - This is used to represent one value that is inserted into a StringMap.
StringRef - Represent a constant reference to a string, i.e.
Definition: StringRef.h:50
bool getAsInteger(unsigned Radix, T &Result) const
Parse the current string as an integer of the specified radix.
Definition: StringRef.h:463
bool starts_with(StringRef Prefix) const
Check if this string starts with the given Prefix.
Definition: StringRef.h:258
bool contains(StringRef Other) const
Return true if the given string is a substring of *this, and false otherwise.
Definition: StringRef.h:417
static constexpr size_t npos
Definition: StringRef.h:52
Class to represent struct types.
Definition: DerivedTypes.h:216
unsigned getNumElements() const
Random access to the elements.
Definition: DerivedTypes.h:341
bool containsScalableVectorType(SmallPtrSetImpl< Type * > *Visited=nullptr) const
Returns true if this struct contains a scalable vector.
Definition: Type.cpp:400
Type * getTypeAtIndex(const Value *V) const
Given an index value into the type, return the type of the element.
Definition: Type.cpp:612
Multiway switch.
Verify that the TBAA Metadatas are valid.
Definition: Verifier.h:39
bool visitTBAAMetadata(Instruction &I, const MDNode *MD)
Visit an instruction and return true if it is valid, return false if an invalid TBAA is attached.
Definition: Verifier.cpp:7410
@ CanBeGlobal
This type may be used as the value type of a global variable.
Definition: DerivedTypes.h:771
TinyPtrVector - This class is specialized for cases where there are normally 0 or 1 element in a vect...
Definition: TinyPtrVector.h:29
unsigned size() const
Triple - Helper class for working with autoconf configuration names.
Definition: Triple.h:44
This class represents a truncation of integer types.
Twine - A lightweight data structure for efficiently representing the concatenation of temporary valu...
Definition: Twine.h:81
The instances of the Type class are immutable: once they are created, they are never changed.
Definition: Type.h:45
bool isVectorTy() const
True if this is an instance of VectorType.
Definition: Type.h:265
PointerType * getPointerTo(unsigned AddrSpace=0) const
Return a pointer to the current type.
bool isArrayTy() const
True if this is an instance of ArrayType.
Definition: Type.h:252
bool isLabelTy() const
Return true if this is 'label'.
Definition: Type.h:219
bool isIntOrIntVectorTy() const
Return true if this is an integer type or a vector of integer types.
Definition: Type.h:234
bool isPointerTy() const
True if this is an instance of PointerType.
Definition: Type.h:255
unsigned getPointerAddressSpace() const
Get the address space of this pointer or pointer vector type.
unsigned getScalarSizeInBits() const LLVM_READONLY
If this is a vector type, return the getPrimitiveSizeInBits value for the element type.
bool isSized(SmallPtrSetImpl< Type * > *Visited=nullptr) const
Return true if it makes sense to take the size of this type.
Definition: Type.h:302
bool isFloatingPointTy() const
Return true if this is one of the floating-point types.
Definition: Type.h:185
bool isPtrOrPtrVectorTy() const
Return true if this is a pointer type or a vector of pointer types.
Definition: Type.h:262
bool isScalableTy() const
Return true if this is a type whose size is a known multiple of vscale.
bool canLosslesslyBitCastTo(Type *Ty) const
Return true if this type could be converted with a lossless BitCast to type 'Ty'.
bool isIntOrPtrTy() const
Return true if this is an integer type or a pointer type.
Definition: Type.h:243
bool isIntegerTy() const
True if this is an instance of IntegerType.
Definition: Type.h:228
bool isTokenTy() const
Return true if this is 'token'.
Definition: Type.h:225
bool isFPOrFPVectorTy() const
Return true if this is a FP type or a vector of FP.
Definition: Type.h:216
Type * getScalarType() const
If this is a vector type, return the element type, otherwise return 'this'.
Definition: Type.h:348
bool isMetadataTy() const
Return true if this is 'metadata'.
Definition: Type.h:222
This class represents a cast unsigned integer to floating point.
A Use represents the edge between a Value definition and its users.
Definition: Use.h:43
op_range operands()
Definition: User.h:242
Value * getOperand(unsigned i) const
Definition: User.h:169
unsigned getNumOperands() const
Definition: User.h:191
This class represents the va_arg llvm instruction, which returns an argument of the specified type gi...
This is the common base class for vector predication intrinsics.
Value wrapper in the Metadata hierarchy.
Definition: Metadata.h:450
Value * getValue() const
Definition: Metadata.h:490
LLVM Value Representation.
Definition: Value.h:74
iterator_range< user_iterator > materialized_users()
Definition: Value.h:415
Type * getType() const
All values are typed, get the type of this value.
Definition: Value.h:255
static constexpr uint64_t MaximumAlignment
Definition: Value.h:807
const Value * stripPointerCastsAndAliases() const
Strip off pointer casts, all-zero GEPs, address space casts, and aliases.
Definition: Value.cpp:697
const Value * stripInBoundsOffsets(function_ref< void(const Value *)> Func=[](const Value *) {}) const
Strip off pointer casts and inbounds GEPs.
Definition: Value.cpp:785
iterator_range< user_iterator > users()
Definition: Value.h:421
bool materialized_use_empty() const
Definition: Value.h:349
LLVMContext & getContext() const
All values hold a context through their type.
Definition: Value.cpp:1074
bool hasName() const
Definition: Value.h:261
StringRef getName() const
Return a constant reference to the value's name.
Definition: Value.cpp:309
Check a module for errors, and report separate error states for IR and debug info errors.
Definition: Verifier.h:107
Result run(Module &M, ModuleAnalysisManager &)
Definition: Verifier.cpp:7526
PreservedAnalyses run(Module &M, ModuleAnalysisManager &AM)
Definition: Verifier.cpp:7538
This class represents zero extension of integer types.
constexpr bool isNonZero() const
Definition: TypeSize.h:158
constexpr bool isScalable() const
Returns whether the quantity is scaled by a runtime quantity (vscale).
Definition: TypeSize.h:171
constexpr ScalarTy getKnownMinValue() const
Returns the minimum value this quantity can represent.
Definition: TypeSize.h:168
An efficient, type-erasing, non-owning reference to a callable.
const ParentTy * getParent() const
Definition: ilist_node.h:32
NodeTy * getNextNode()
Get the next node, or nullptr for the list tail.
Definition: ilist_node.h:353
This class implements an extremely fast bulk output stream that can only output to a stream.
Definition: raw_ostream.h:52
This file contains the declaration of the Comdat class, which represents a single COMDAT in LLVM.
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
constexpr char Args[]
Key for Kernel::Metadata::mArgs.
constexpr char Attrs[]
Key for Kernel::Metadata::mAttrs.
AttributeMask typeIncompatible(Type *Ty, AttributeSafetyKind ASK=ASK_ALL)
Which attributes cannot be applied to a type.
constexpr std::underlying_type_t< E > Mask()
Get a bitmask with 1s in all places up to the high-order bit of E's largest value.
Definition: BitmaskEnum.h:121
@ Entry
Definition: COFF.h:811
unsigned ID
LLVM IR allows to use arbitrary numbers as calling convention identifiers.
Definition: CallingConv.h:24
@ AMDGPU_CS
Used for Mesa/AMDPAL compute shaders.
Definition: CallingConv.h:197
@ AMDGPU_VS
Used for Mesa vertex shaders, or AMDPAL last shader stage before rasterization (vertex shader if tess...
Definition: CallingConv.h:188
@ AMDGPU_KERNEL
Used for AMDGPU code object kernels.
Definition: CallingConv.h:200
@ AnyReg
OBSOLETED - Used for stack based JavaScript calls.
Definition: CallingConv.h:60
@ AMDGPU_CS_ChainPreserve
Used on AMDGPUs to give the middle-end more control over argument placement.
Definition: CallingConv.h:249
@ AMDGPU_HS
Used for Mesa/AMDPAL hull shaders (= tessellation control shaders).
Definition: CallingConv.h:206
@ AMDGPU_GS
Used for Mesa/AMDPAL geometry shaders.
Definition: CallingConv.h:191
@ X86_INTR
x86 hardware interrupt context.
Definition: CallingConv.h:173
@ AMDGPU_CS_Chain
Used on AMDGPUs to give the middle-end more control over argument placement.
Definition: CallingConv.h:245
@ AMDGPU_PS
Used for Mesa/AMDPAL pixel shaders.
Definition: CallingConv.h:194
@ Cold
Attempts to make code in the caller as efficient as possible under the assumption that the call is no...
Definition: CallingConv.h:47
@ PTX_Device
Call to a PTX device function.
Definition: CallingConv.h:129
@ SPIR_KERNEL
Used for SPIR kernel functions.
Definition: CallingConv.h:144
@ Fast
Attempts to make calls as fast as possible (e.g.
Definition: CallingConv.h:41
@ Intel_OCL_BI
Used for Intel OpenCL built-ins.
Definition: CallingConv.h:147
@ Tail
Attemps to make calls as fast as possible while guaranteeing that tail call optimization can always b...
Definition: CallingConv.h:76
@ PTX_Kernel
Call to a PTX kernel. Passes all arguments in parameter space.
Definition: CallingConv.h:125
@ SwiftTail
This follows the Swift calling convention in how arguments are passed but guarantees tail calls will ...
Definition: CallingConv.h:87
@ C
The default llvm calling convention, compatible with C.
Definition: CallingConv.h:34
MatchIntrinsicTypesResult matchIntrinsicSignature(FunctionType *FTy, ArrayRef< IITDescriptor > &Infos, SmallVectorImpl< Type * > &ArgTys)
Match the specified function type with the type constraints specified by the .td file.
Definition: Function.cpp:1735
void getIntrinsicInfoTableEntries(ID id, SmallVectorImpl< IITDescriptor > &T)
Return the IIT table descriptor for the specified intrinsic into an array of IITDescriptors.
Definition: Function.cpp:1328
@ MatchIntrinsicTypes_NoMatchRet
Definition: Intrinsics.h:218
@ MatchIntrinsicTypes_NoMatchArg
Definition: Intrinsics.h:219
bool hasConstrainedFPRoundingModeOperand(ID QID)
Returns true if the intrinsic ID is for one of the "Constrained Floating-Point Intrinsics" that take ...
Definition: Function.cpp:1516
StringRef getName(ID id)
Return the LLVM name for an intrinsic, such as "llvm.ppc.altivec.lvx".
Definition: Function.cpp:1042
static const int NoAliasScopeDeclScopeArg
Definition: Intrinsics.h:37
bool matchIntrinsicVarArg(bool isVarArg, ArrayRef< IITDescriptor > &Infos)
Verify if the intrinsic has variable arguments.
Definition: Function.cpp:1761
Flag
These should be considered private to the implementation of the MCInstrDesc class.
Definition: MCInstrDesc.h:148
@ System
Synchronized with respect to all concurrently executing threads.
Definition: LLVMContext.h:57
std::optional< VFInfo > tryDemangleForVFABI(StringRef MangledName, const FunctionType *FTy)
Function to construct a VFInfo out of a mangled names in the following format:
@ CE
Windows NT (Windows on ARM)
AssignmentInstRange getAssignmentInsts(DIAssignID *ID)
Return a range of instructions (typically just one) that have ID as an attachment.
Definition: DebugInfo.cpp:1796
initializer< Ty > init(const Ty &Val)
Definition: CommandLine.h:443
bool isFortran(SourceLanguage S)
Definition: Dwarf.h:572
SourceLanguage
Definition: Dwarf.h:207
@ DW_LANG_lo_user
Definition: Dwarf.h:211
@ DW_MACINFO_undef
Definition: Dwarf.h:790
@ DW_MACINFO_start_file
Definition: Dwarf.h:791
@ DW_MACINFO_define
Definition: Dwarf.h:789
This is an optimization pass for GlobalISel generic memory operations.
Definition: AddressRanges.h:18
auto drop_begin(T &&RangeOrContainer, size_t N=1)
Return a range covering RangeOrContainer with the first N elements excluded.
Definition: STLExtras.h:329
@ Low
Lower the current thread's priority such that it does not affect foreground tasks significantly.
@ Offset
Definition: DWP.cpp:480
bool all_of(R &&range, UnaryPredicate P)
Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly.
Definition: STLExtras.h:1722
bool canInstructionHaveMMRAs(const Instruction &I)
unsigned getBranchWeightOffset(const MDNode *ProfileData)
Return the offset to the first branch weight data.
auto enumerate(FirstRange &&First, RestRanges &&...Rest)
Given two or more input ranges, returns a new range whose values are are tuples (A,...
Definition: STLExtras.h:2400
bool verifyFunction(const Function &F, raw_ostream *OS=nullptr)
Check a function for errors, useful for use when debugging a pass.
Definition: Verifier.cpp:7095
AllocFnKind
Definition: Attributes.h:49
testing::Matcher< const detail::ErrorHolder & > Failed()
Definition: Error.h:198
void initializeVerifierLegacyPassPass(PassRegistry &)
iterator_range< T > make_range(T x, T y)
Convenience function for iterating over sub-ranges.
void append_range(Container &C, Range &&R)
Wrapper function to append range R to container C.
Definition: STLExtras.h:2067
DenseMap< BasicBlock *, ColorVector > colorEHFunclets(Function &F)
If an EH funclet personality is in use (see isFuncletEHPersonality), this will recompute which blocks...
constexpr bool isPowerOf2_64(uint64_t Value)
Return true if the argument is a power of two > 0 (64 bit edition.)
Definition: MathExtras.h:280
bool isScopedEHPersonality(EHPersonality Pers)
Returns true if this personality uses scope-style EH IR instructions: catchswitch,...
bool any_of(R &&range, UnaryPredicate P)
Provide wrappers to std::any_of which take ranges instead of having to pass begin/end explicitly.
Definition: STLExtras.h:1729
constexpr bool isPowerOf2_32(uint32_t Value)
Return true if the argument is a power of two > 0.
Definition: MathExtras.h:275
bool isModSet(const ModRefInfo MRI)
Definition: ModRef.h:48
void sort(IteratorTy Start, IteratorTy End)
Definition: STLExtras.h:1647
raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
Definition: Debug.cpp:163
void report_fatal_error(Error Err, bool gen_crash_diag=true)
Report a serious error, calling any installed error handler.
Definition: Error.cpp:167
EHPersonality classifyEHPersonality(const Value *Pers)
See if the given exception handling personality function is one that we understand.
raw_fd_ostream & errs()
This returns a reference to a raw_ostream for standard error.
unsigned getNumBranchWeights(const MDNode &ProfileData)
AtomicOrdering
Atomic ordering for LLVM's memory model.
@ ArgMem
Access to memory via argument pointers.
@ First
Helpers to iterate all locations in the MemoryEffectsBase class.
FunctionPass * createVerifierPass(bool FatalErrors=true)
Definition: Verifier.cpp:7521
@ Dynamic
Denotes mode unknown at compile time.
@ MaskAll
A bitmask that includes all valid flags.
constexpr unsigned BitWidth
Definition: BitmaskEnum.h:191
std::optional< RoundingMode > convertStrToRoundingMode(StringRef)
Returns a valid RoundingMode enumerator when given a string that is valid as input in constrained int...
Definition: FPEnv.cpp:24
std::unique_ptr< GCStrategy > getGCStrategy(const StringRef Name)
Lookup the GCStrategy object associated with the given gc name.
Definition: GCStrategy.cpp:24
auto predecessors(const MachineBasicBlock *BB)
bool is_contained(R &&Range, const E &Element)
Returns true if Element is found in Range.
Definition: STLExtras.h:1879
bool pred_empty(const BasicBlock *BB)
Definition: CFG.h:118
bool verifyModule(const Module &M, raw_ostream *OS=nullptr, bool *BrokenDebugInfo=nullptr)
Check a module for errors.
Definition: Verifier.cpp:7106
#define N
static const fltSemantics & IEEEsingle() LLVM_READNONE
Definition: APFloat.cpp:271
This struct is a compact representation of a valid (non-zero power of two) alignment.
Definition: Alignment.h:39
A special type used by analysis passes to provide an address that identifies that particular analysis...
Definition: Analysis.h:28
Holds the characteristics of one fragment of a larger variable.
Description of the encoding of one expression Op.
This struct is a compact representation of a valid (power of two) or undefined (0) alignment.
Definition: Alignment.h:117
A lightweight accessor for an operand bundle meant to be passed around by value.
Definition: InstrTypes.h:1131
uint32_t getTagID() const
Return the tag of this operand bundle as an integer.
Definition: InstrTypes.h:1159
ArrayRef< Use > Inputs
Definition: InstrTypes.h:1132
void DebugInfoCheckFailed(const Twine &Message)
A debug info check failed.
Definition: Verifier.cpp:304
VerifierSupport(raw_ostream *OS, const Module &M)
Definition: Verifier.cpp:156
bool Broken
Track the brokenness of the module while recursively visiting.
Definition: Verifier.cpp:150
raw_ostream * OS
Definition: Verifier.cpp:142
void CheckFailed(const Twine &Message, const T1 &V1, const Ts &... Vs)
A check failed (with values to print).
Definition: Verifier.cpp:297
bool BrokenDebugInfo
Broken debug info can be "recovered" from by stripping the debug info.
Definition: Verifier.cpp:152
LLVMContext & Context
Definition: Verifier.cpp:147
bool TreatBrokenDebugInfoAsError
Whether to treat broken debug info as an error.
Definition: Verifier.cpp:154
void CheckFailed(const Twine &Message)
A check failed, so printout out the condition and the message.
Definition: Verifier.cpp:286
const Module & M
Definition: Verifier.cpp:143
const DataLayout & DL
Definition: Verifier.cpp:146
void DebugInfoCheckFailed(const Twine &Message, const T1 &V1, const Ts &... Vs)
A debug info check failed (with values to print).
Definition: Verifier.cpp:313
ModuleSlotTracker MST
Definition: Verifier.cpp:144