LLVM 19.0.0git
Verifier.cpp
Go to the documentation of this file.
1//===-- Verifier.cpp - Implement the Module Verifier -----------------------==//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file defines the function verifier interface, that can be used for some
10// basic correctness checking of input to the system.
11//
12// Note that this does not provide full `Java style' security and verifications,
13// instead it just tries to ensure that code is well-formed.
14//
15// * Both of a binary operator's parameters are of the same type
16// * Verify that the indices of mem access instructions match other operands
17// * Verify that arithmetic and other things are only performed on first-class
18// types. Verify that shifts & logicals only happen on integrals f.e.
19// * All of the constants in a switch statement are of the correct type
20// * The code is in valid SSA form
21// * It should be illegal to put a label into any other type (like a structure)
22// or to return one. [except constant arrays!]
23// * Only phi nodes can be self referential: 'add i32 %0, %0 ; <int>:0' is bad
24// * PHI nodes must have an entry for each predecessor, with no extras.
25// * PHI nodes must be the first thing in a basic block, all grouped together
26// * All basic blocks should only end with terminator insts, not contain them
27// * The entry node to a function must not have predecessors
28// * All Instructions must be embedded into a basic block
29// * Functions cannot take a void-typed parameter
30// * Verify that a function's argument list agrees with it's declared type.
31// * It is illegal to specify a name for a void value.
32// * It is illegal to have a internal global value with no initializer
33// * It is illegal to have a ret instruction that returns a value that does not
34// agree with the function return value type.
35// * Function call argument types match the function prototype
36// * A landing pad is defined by a landingpad instruction, and can be jumped to
37// only by the unwind edge of an invoke instruction.
38// * A landingpad instruction must be the first non-PHI instruction in the
39// block.
40// * Landingpad instructions must be in a function with a personality function.
41// * Convergence control intrinsics are introduced in ConvergentOperations.rst.
42// The applied restrictions are too numerous to list here.
43// * The convergence entry intrinsic and the loop heart must be the first
44// non-PHI instruction in their respective block. This does not conflict with
45// the landing pads, since these two kinds cannot occur in the same block.
46// * All other things that are tested by asserts spread about the code...
47//
48//===----------------------------------------------------------------------===//
49
50#include "llvm/IR/Verifier.h"
51#include "llvm/ADT/APFloat.h"
52#include "llvm/ADT/APInt.h"
53#include "llvm/ADT/ArrayRef.h"
54#include "llvm/ADT/DenseMap.h"
55#include "llvm/ADT/MapVector.h"
57#include "llvm/ADT/STLExtras.h"
59#include "llvm/ADT/SmallSet.h"
62#include "llvm/ADT/StringMap.h"
63#include "llvm/ADT/StringRef.h"
64#include "llvm/ADT/Twine.h"
66#include "llvm/IR/Argument.h"
68#include "llvm/IR/Attributes.h"
69#include "llvm/IR/BasicBlock.h"
70#include "llvm/IR/CFG.h"
71#include "llvm/IR/CallingConv.h"
72#include "llvm/IR/Comdat.h"
73#include "llvm/IR/Constant.h"
76#include "llvm/IR/Constants.h"
78#include "llvm/IR/DataLayout.h"
79#include "llvm/IR/DebugInfo.h"
81#include "llvm/IR/DebugLoc.h"
83#include "llvm/IR/Dominators.h"
85#include "llvm/IR/Function.h"
86#include "llvm/IR/GCStrategy.h"
87#include "llvm/IR/GlobalAlias.h"
88#include "llvm/IR/GlobalValue.h"
90#include "llvm/IR/InlineAsm.h"
91#include "llvm/IR/InstVisitor.h"
92#include "llvm/IR/InstrTypes.h"
93#include "llvm/IR/Instruction.h"
96#include "llvm/IR/Intrinsics.h"
97#include "llvm/IR/IntrinsicsAArch64.h"
98#include "llvm/IR/IntrinsicsAMDGPU.h"
99#include "llvm/IR/IntrinsicsARM.h"
100#include "llvm/IR/IntrinsicsNVPTX.h"
101#include "llvm/IR/IntrinsicsWebAssembly.h"
102#include "llvm/IR/LLVMContext.h"
104#include "llvm/IR/Metadata.h"
105#include "llvm/IR/Module.h"
107#include "llvm/IR/PassManager.h"
109#include "llvm/IR/Statepoint.h"
110#include "llvm/IR/Type.h"
111#include "llvm/IR/Use.h"
112#include "llvm/IR/User.h"
114#include "llvm/IR/Value.h"
116#include "llvm/Pass.h"
118#include "llvm/Support/Casting.h"
122#include "llvm/Support/ModRef.h"
124#include <algorithm>
125#include <cassert>
126#include <cstdint>
127#include <memory>
128#include <optional>
129#include <string>
130#include <utility>
131
132using namespace llvm;
133
135 "verify-noalias-scope-decl-dom", cl::Hidden, cl::init(false),
136 cl::desc("Ensure that llvm.experimental.noalias.scope.decl for identical "
137 "scopes are not dominating"));
138
139namespace llvm {
140
143 const Module &M;
148
149 /// Track the brokenness of the module while recursively visiting.
150 bool Broken = false;
151 /// Broken debug info can be "recovered" from by stripping the debug info.
152 bool BrokenDebugInfo = false;
153 /// Whether to treat broken debug info as an error.
155
157 : OS(OS), M(M), MST(&M), TT(Triple::normalize(M.getTargetTriple())),
158 DL(M.getDataLayout()), Context(M.getContext()) {}
159
160private:
161 void Write(const Module *M) {
162 *OS << "; ModuleID = '" << M->getModuleIdentifier() << "'\n";
163 }
164
165 void Write(const Value *V) {
166 if (V)
167 Write(*V);
168 }
169
170 void Write(const Value &V) {
171 if (isa<Instruction>(V)) {
172 V.print(*OS, MST);
173 *OS << '\n';
174 } else {
175 V.printAsOperand(*OS, true, MST);
176 *OS << '\n';
177 }
178 }
179
180 void Write(const DbgRecord *DR) {
181 if (DR) {
182 DR->print(*OS, MST, false);
183 *OS << '\n';
184 }
185 }
186
188 switch (Type) {
190 *OS << "value";
191 break;
193 *OS << "declare";
194 break;
196 *OS << "assign";
197 break;
199 *OS << "end";
200 break;
202 *OS << "any";
203 break;
204 };
205 }
206
207 void Write(const Metadata *MD) {
208 if (!MD)
209 return;
210 MD->print(*OS, MST, &M);
211 *OS << '\n';
212 }
213
214 template <class T> void Write(const MDTupleTypedArrayWrapper<T> &MD) {
215 Write(MD.get());
216 }
217
218 void Write(const NamedMDNode *NMD) {
219 if (!NMD)
220 return;
221 NMD->print(*OS, MST);
222 *OS << '\n';
223 }
224
225 void Write(Type *T) {
226 if (!T)
227 return;
228 *OS << ' ' << *T;
229 }
230
231 void Write(const Comdat *C) {
232 if (!C)
233 return;
234 *OS << *C;
235 }
236
237 void Write(const APInt *AI) {
238 if (!AI)
239 return;
240 *OS << *AI << '\n';
241 }
242
243 void Write(const unsigned i) { *OS << i << '\n'; }
244
245 // NOLINTNEXTLINE(readability-identifier-naming)
246 void Write(const Attribute *A) {
247 if (!A)
248 return;
249 *OS << A->getAsString() << '\n';
250 }
251
252 // NOLINTNEXTLINE(readability-identifier-naming)
253 void Write(const AttributeSet *AS) {
254 if (!AS)
255 return;
256 *OS << AS->getAsString() << '\n';
257 }
258
259 // NOLINTNEXTLINE(readability-identifier-naming)
260 void Write(const AttributeList *AL) {
261 if (!AL)
262 return;
263 AL->print(*OS);
264 }
265
266 void Write(Printable P) { *OS << P << '\n'; }
267
268 template <typename T> void Write(ArrayRef<T> Vs) {
269 for (const T &V : Vs)
270 Write(V);
271 }
272
273 template <typename T1, typename... Ts>
274 void WriteTs(const T1 &V1, const Ts &... Vs) {
275 Write(V1);
276 WriteTs(Vs...);
277 }
278
279 template <typename... Ts> void WriteTs() {}
280
281public:
282 /// A check failed, so printout out the condition and the message.
283 ///
284 /// This provides a nice place to put a breakpoint if you want to see why
285 /// something is not correct.
286 void CheckFailed(const Twine &Message) {
287 if (OS)
288 *OS << Message << '\n';
289 Broken = true;
290 }
291
292 /// A check failed (with values to print).
293 ///
294 /// This calls the Message-only version so that the above is easier to set a
295 /// breakpoint on.
296 template <typename T1, typename... Ts>
297 void CheckFailed(const Twine &Message, const T1 &V1, const Ts &... Vs) {
298 CheckFailed(Message);
299 if (OS)
300 WriteTs(V1, Vs...);
301 }
302
303 /// A debug info check failed.
304 void DebugInfoCheckFailed(const Twine &Message) {
305 if (OS)
306 *OS << Message << '\n';
308 BrokenDebugInfo = true;
309 }
310
311 /// A debug info check failed (with values to print).
312 template <typename T1, typename... Ts>
313 void DebugInfoCheckFailed(const Twine &Message, const T1 &V1,
314 const Ts &... Vs) {
315 DebugInfoCheckFailed(Message);
316 if (OS)
317 WriteTs(V1, Vs...);
318 }
319};
320
321} // namespace llvm
322
323namespace {
324
325class Verifier : public InstVisitor<Verifier>, VerifierSupport {
326 friend class InstVisitor<Verifier>;
327 DominatorTree DT;
328
329 /// When verifying a basic block, keep track of all of the
330 /// instructions we have seen so far.
331 ///
332 /// This allows us to do efficient dominance checks for the case when an
333 /// instruction has an operand that is an instruction in the same block.
334 SmallPtrSet<Instruction *, 16> InstsInThisBlock;
335
336 /// Keep track of the metadata nodes that have been checked already.
338
339 /// Keep track which DISubprogram is attached to which function.
341
342 /// Track all DICompileUnits visited.
344
345 /// The result type for a landingpad.
346 Type *LandingPadResultTy;
347
348 /// Whether we've seen a call to @llvm.localescape in this function
349 /// already.
350 bool SawFrameEscape;
351
352 /// Whether the current function has a DISubprogram attached to it.
353 bool HasDebugInfo = false;
354
355 /// Stores the count of how many objects were passed to llvm.localescape for a
356 /// given function and the largest index passed to llvm.localrecover.
358
359 // Maps catchswitches and cleanuppads that unwind to siblings to the
360 // terminators that indicate the unwind, used to detect cycles therein.
362
363 /// Cache which blocks are in which funclet, if an EH funclet personality is
364 /// in use. Otherwise empty.
365 DenseMap<BasicBlock *, ColorVector> BlockEHFuncletColors;
366
367 /// Cache of constants visited in search of ConstantExprs.
368 SmallPtrSet<const Constant *, 32> ConstantExprVisited;
369
370 /// Cache of declarations of the llvm.experimental.deoptimize.<ty> intrinsic.
371 SmallVector<const Function *, 4> DeoptimizeDeclarations;
372
373 /// Cache of attribute lists verified.
374 SmallPtrSet<const void *, 32> AttributeListsVisited;
375
376 // Verify that this GlobalValue is only used in this module.
377 // This map is used to avoid visiting uses twice. We can arrive at a user
378 // twice, if they have multiple operands. In particular for very large
379 // constant expressions, we can arrive at a particular user many times.
380 SmallPtrSet<const Value *, 32> GlobalValueVisited;
381
382 // Keeps track of duplicate function argument debug info.
384
385 TBAAVerifier TBAAVerifyHelper;
386 ConvergenceVerifier ConvergenceVerifyHelper;
387
388 SmallVector<IntrinsicInst *, 4> NoAliasScopeDecls;
389
390 void checkAtomicMemAccessSize(Type *Ty, const Instruction *I);
391
392public:
393 explicit Verifier(raw_ostream *OS, bool ShouldTreatBrokenDebugInfoAsError,
394 const Module &M)
395 : VerifierSupport(OS, M), LandingPadResultTy(nullptr),
396 SawFrameEscape(false), TBAAVerifyHelper(this) {
397 TreatBrokenDebugInfoAsError = ShouldTreatBrokenDebugInfoAsError;
398 }
399
400 bool hasBrokenDebugInfo() const { return BrokenDebugInfo; }
401
402 bool verify(const Function &F) {
403 assert(F.getParent() == &M &&
404 "An instance of this class only works with a specific module!");
405
406 // First ensure the function is well-enough formed to compute dominance
407 // information, and directly compute a dominance tree. We don't rely on the
408 // pass manager to provide this as it isolates us from a potentially
409 // out-of-date dominator tree and makes it significantly more complex to run
410 // this code outside of a pass manager.
411 // FIXME: It's really gross that we have to cast away constness here.
412 if (!F.empty())
413 DT.recalculate(const_cast<Function &>(F));
414
415 for (const BasicBlock &BB : F) {
416 if (!BB.empty() && BB.back().isTerminator())
417 continue;
418
419 if (OS) {
420 *OS << "Basic Block in function '" << F.getName()
421 << "' does not have terminator!\n";
422 BB.printAsOperand(*OS, true, MST);
423 *OS << "\n";
424 }
425 return false;
426 }
427
428 auto FailureCB = [this](const Twine &Message) {
429 this->CheckFailed(Message);
430 };
431 ConvergenceVerifyHelper.initialize(OS, FailureCB, F);
432
433 Broken = false;
434 // FIXME: We strip const here because the inst visitor strips const.
435 visit(const_cast<Function &>(F));
436 verifySiblingFuncletUnwinds();
437
438 if (ConvergenceVerifyHelper.sawTokens())
439 ConvergenceVerifyHelper.verify(DT);
440
441 InstsInThisBlock.clear();
442 DebugFnArgs.clear();
443 LandingPadResultTy = nullptr;
444 SawFrameEscape = false;
445 SiblingFuncletInfo.clear();
446 verifyNoAliasScopeDecl();
447 NoAliasScopeDecls.clear();
448
449 return !Broken;
450 }
451
452 /// Verify the module that this instance of \c Verifier was initialized with.
453 bool verify() {
454 Broken = false;
455
456 // Collect all declarations of the llvm.experimental.deoptimize intrinsic.
457 for (const Function &F : M)
458 if (F.getIntrinsicID() == Intrinsic::experimental_deoptimize)
459 DeoptimizeDeclarations.push_back(&F);
460
461 // Now that we've visited every function, verify that we never asked to
462 // recover a frame index that wasn't escaped.
463 verifyFrameRecoverIndices();
464 for (const GlobalVariable &GV : M.globals())
465 visitGlobalVariable(GV);
466
467 for (const GlobalAlias &GA : M.aliases())
468 visitGlobalAlias(GA);
469
470 for (const GlobalIFunc &GI : M.ifuncs())
471 visitGlobalIFunc(GI);
472
473 for (const NamedMDNode &NMD : M.named_metadata())
474 visitNamedMDNode(NMD);
475
476 for (const StringMapEntry<Comdat> &SMEC : M.getComdatSymbolTable())
477 visitComdat(SMEC.getValue());
478
479 visitModuleFlags();
480 visitModuleIdents();
481 visitModuleCommandLines();
482
483 verifyCompileUnits();
484
485 verifyDeoptimizeCallingConvs();
486 DISubprogramAttachments.clear();
487 return !Broken;
488 }
489
490private:
491 /// Whether a metadata node is allowed to be, or contain, a DILocation.
492 enum class AreDebugLocsAllowed { No, Yes };
493
494 // Verification methods...
495 void visitGlobalValue(const GlobalValue &GV);
496 void visitGlobalVariable(const GlobalVariable &GV);
497 void visitGlobalAlias(const GlobalAlias &GA);
498 void visitGlobalIFunc(const GlobalIFunc &GI);
499 void visitAliaseeSubExpr(const GlobalAlias &A, const Constant &C);
500 void visitAliaseeSubExpr(SmallPtrSetImpl<const GlobalAlias *> &Visited,
501 const GlobalAlias &A, const Constant &C);
502 void visitNamedMDNode(const NamedMDNode &NMD);
503 void visitMDNode(const MDNode &MD, AreDebugLocsAllowed AllowLocs);
504 void visitMetadataAsValue(const MetadataAsValue &MD, Function *F);
505 void visitValueAsMetadata(const ValueAsMetadata &MD, Function *F);
506 void visitDIArgList(const DIArgList &AL, Function *F);
507 void visitComdat(const Comdat &C);
508 void visitModuleIdents();
509 void visitModuleCommandLines();
510 void visitModuleFlags();
511 void visitModuleFlag(const MDNode *Op,
513 SmallVectorImpl<const MDNode *> &Requirements);
514 void visitModuleFlagCGProfileEntry(const MDOperand &MDO);
515 void visitFunction(const Function &F);
516 void visitBasicBlock(BasicBlock &BB);
517 void verifyRangeMetadata(const Value &V, const MDNode *Range, Type *Ty,
518 bool IsAbsoluteSymbol);
519 void visitRangeMetadata(Instruction &I, MDNode *Range, Type *Ty);
520 void visitDereferenceableMetadata(Instruction &I, MDNode *MD);
521 void visitProfMetadata(Instruction &I, MDNode *MD);
522 void visitCallStackMetadata(MDNode *MD);
523 void visitMemProfMetadata(Instruction &I, MDNode *MD);
524 void visitCallsiteMetadata(Instruction &I, MDNode *MD);
525 void visitDIAssignIDMetadata(Instruction &I, MDNode *MD);
526 void visitMMRAMetadata(Instruction &I, MDNode *MD);
527 void visitAnnotationMetadata(MDNode *Annotation);
528 void visitAliasScopeMetadata(const MDNode *MD);
529 void visitAliasScopeListMetadata(const MDNode *MD);
530 void visitAccessGroupMetadata(const MDNode *MD);
531
532 template <class Ty> bool isValidMetadataArray(const MDTuple &N);
533#define HANDLE_SPECIALIZED_MDNODE_LEAF(CLASS) void visit##CLASS(const CLASS &N);
534#include "llvm/IR/Metadata.def"
535 void visitDIScope(const DIScope &N);
536 void visitDIVariable(const DIVariable &N);
537 void visitDILexicalBlockBase(const DILexicalBlockBase &N);
538 void visitDITemplateParameter(const DITemplateParameter &N);
539
540 void visitTemplateParams(const MDNode &N, const Metadata &RawParams);
541
542 void visit(DbgLabelRecord &DLR);
543 void visit(DbgVariableRecord &DVR);
544 // InstVisitor overrides...
546 void visitDbgRecords(Instruction &I);
547 void visit(Instruction &I);
548
549 void visitTruncInst(TruncInst &I);
550 void visitZExtInst(ZExtInst &I);
551 void visitSExtInst(SExtInst &I);
552 void visitFPTruncInst(FPTruncInst &I);
553 void visitFPExtInst(FPExtInst &I);
554 void visitFPToUIInst(FPToUIInst &I);
555 void visitFPToSIInst(FPToSIInst &I);
556 void visitUIToFPInst(UIToFPInst &I);
557 void visitSIToFPInst(SIToFPInst &I);
558 void visitIntToPtrInst(IntToPtrInst &I);
559 void visitPtrToIntInst(PtrToIntInst &I);
560 void visitBitCastInst(BitCastInst &I);
561 void visitAddrSpaceCastInst(AddrSpaceCastInst &I);
562 void visitPHINode(PHINode &PN);
563 void visitCallBase(CallBase &Call);
564 void visitUnaryOperator(UnaryOperator &U);
565 void visitBinaryOperator(BinaryOperator &B);
566 void visitICmpInst(ICmpInst &IC);
567 void visitFCmpInst(FCmpInst &FC);
568 void visitExtractElementInst(ExtractElementInst &EI);
569 void visitInsertElementInst(InsertElementInst &EI);
570 void visitShuffleVectorInst(ShuffleVectorInst &EI);
571 void visitVAArgInst(VAArgInst &VAA) { visitInstruction(VAA); }
572 void visitCallInst(CallInst &CI);
573 void visitInvokeInst(InvokeInst &II);
574 void visitGetElementPtrInst(GetElementPtrInst &GEP);
575 void visitLoadInst(LoadInst &LI);
576 void visitStoreInst(StoreInst &SI);
577 void verifyDominatesUse(Instruction &I, unsigned i);
578 void visitInstruction(Instruction &I);
579 void visitTerminator(Instruction &I);
580 void visitBranchInst(BranchInst &BI);
581 void visitReturnInst(ReturnInst &RI);
582 void visitSwitchInst(SwitchInst &SI);
583 void visitIndirectBrInst(IndirectBrInst &BI);
584 void visitCallBrInst(CallBrInst &CBI);
585 void visitSelectInst(SelectInst &SI);
586 void visitUserOp1(Instruction &I);
587 void visitUserOp2(Instruction &I) { visitUserOp1(I); }
588 void visitIntrinsicCall(Intrinsic::ID ID, CallBase &Call);
589 void visitConstrainedFPIntrinsic(ConstrainedFPIntrinsic &FPI);
590 void visitVPIntrinsic(VPIntrinsic &VPI);
591 void visitDbgIntrinsic(StringRef Kind, DbgVariableIntrinsic &DII);
592 void visitDbgLabelIntrinsic(StringRef Kind, DbgLabelInst &DLI);
593 void visitAtomicCmpXchgInst(AtomicCmpXchgInst &CXI);
594 void visitAtomicRMWInst(AtomicRMWInst &RMWI);
595 void visitFenceInst(FenceInst &FI);
596 void visitAllocaInst(AllocaInst &AI);
597 void visitExtractValueInst(ExtractValueInst &EVI);
598 void visitInsertValueInst(InsertValueInst &IVI);
599 void visitEHPadPredecessors(Instruction &I);
600 void visitLandingPadInst(LandingPadInst &LPI);
601 void visitResumeInst(ResumeInst &RI);
602 void visitCatchPadInst(CatchPadInst &CPI);
603 void visitCatchReturnInst(CatchReturnInst &CatchReturn);
604 void visitCleanupPadInst(CleanupPadInst &CPI);
605 void visitFuncletPadInst(FuncletPadInst &FPI);
606 void visitCatchSwitchInst(CatchSwitchInst &CatchSwitch);
607 void visitCleanupReturnInst(CleanupReturnInst &CRI);
608
609 void verifySwiftErrorCall(CallBase &Call, const Value *SwiftErrorVal);
610 void verifySwiftErrorValue(const Value *SwiftErrorVal);
611 void verifyTailCCMustTailAttrs(const AttrBuilder &Attrs, StringRef Context);
612 void verifyMustTailCall(CallInst &CI);
613 bool verifyAttributeCount(AttributeList Attrs, unsigned Params);
614 void verifyAttributeTypes(AttributeSet Attrs, const Value *V);
615 void verifyParameterAttrs(AttributeSet Attrs, Type *Ty, const Value *V);
616 void checkUnsignedBaseTenFuncAttr(AttributeList Attrs, StringRef Attr,
617 const Value *V);
618 void verifyFunctionAttrs(FunctionType *FT, AttributeList Attrs,
619 const Value *V, bool IsIntrinsic, bool IsInlineAsm);
620 void verifyFunctionMetadata(ArrayRef<std::pair<unsigned, MDNode *>> MDs);
621
622 void visitConstantExprsRecursively(const Constant *EntryC);
623 void visitConstantExpr(const ConstantExpr *CE);
624 void visitConstantPtrAuth(const ConstantPtrAuth *CPA);
625 void verifyInlineAsmCall(const CallBase &Call);
626 void verifyStatepoint(const CallBase &Call);
627 void verifyFrameRecoverIndices();
628 void verifySiblingFuncletUnwinds();
629
630 void verifyFragmentExpression(const DbgVariableIntrinsic &I);
631 void verifyFragmentExpression(const DbgVariableRecord &I);
632 template <typename ValueOrMetadata>
633 void verifyFragmentExpression(const DIVariable &V,
635 ValueOrMetadata *Desc);
636 void verifyFnArgs(const DbgVariableIntrinsic &I);
637 void verifyFnArgs(const DbgVariableRecord &DVR);
638 void verifyNotEntryValue(const DbgVariableIntrinsic &I);
639 void verifyNotEntryValue(const DbgVariableRecord &I);
640
641 /// Module-level debug info verification...
642 void verifyCompileUnits();
643
644 /// Module-level verification that all @llvm.experimental.deoptimize
645 /// declarations share the same calling convention.
646 void verifyDeoptimizeCallingConvs();
647
648 void verifyAttachedCallBundle(const CallBase &Call,
649 const OperandBundleUse &BU);
650
651 /// Verify the llvm.experimental.noalias.scope.decl declarations
652 void verifyNoAliasScopeDecl();
653};
654
655} // end anonymous namespace
656
657/// We know that cond should be true, if not print an error message.
658#define Check(C, ...) \
659 do { \
660 if (!(C)) { \
661 CheckFailed(__VA_ARGS__); \
662 return; \
663 } \
664 } while (false)
665
666/// We know that a debug info condition should be true, if not print
667/// an error message.
668#define CheckDI(C, ...) \
669 do { \
670 if (!(C)) { \
671 DebugInfoCheckFailed(__VA_ARGS__); \
672 return; \
673 } \
674 } while (false)
675
676void Verifier::visitDbgRecords(Instruction &I) {
677 if (!I.DebugMarker)
678 return;
679 CheckDI(I.DebugMarker->MarkedInstr == &I,
680 "Instruction has invalid DebugMarker", &I);
681 CheckDI(!isa<PHINode>(&I) || !I.hasDbgRecords(),
682 "PHI Node must not have any attached DbgRecords", &I);
683 for (DbgRecord &DR : I.getDbgRecordRange()) {
684 CheckDI(DR.getMarker() == I.DebugMarker,
685 "DbgRecord had invalid DebugMarker", &I, &DR);
686 if (auto *Loc =
687 dyn_cast_or_null<DILocation>(DR.getDebugLoc().getAsMDNode()))
688 visitMDNode(*Loc, AreDebugLocsAllowed::Yes);
689 if (auto *DVR = dyn_cast<DbgVariableRecord>(&DR)) {
690 visit(*DVR);
691 // These have to appear after `visit` for consistency with existing
692 // intrinsic behaviour.
693 verifyFragmentExpression(*DVR);
694 verifyNotEntryValue(*DVR);
695 } else if (auto *DLR = dyn_cast<DbgLabelRecord>(&DR)) {
696 visit(*DLR);
697 }
698 }
699}
700
701void Verifier::visit(Instruction &I) {
702 visitDbgRecords(I);
703 for (unsigned i = 0, e = I.getNumOperands(); i != e; ++i)
704 Check(I.getOperand(i) != nullptr, "Operand is null", &I);
706}
707
708// Helper to iterate over indirect users. By returning false, the callback can ask to stop traversing further.
709static void forEachUser(const Value *User,
711 llvm::function_ref<bool(const Value *)> Callback) {
712 if (!Visited.insert(User).second)
713 return;
714
717 while (!WorkList.empty()) {
718 const Value *Cur = WorkList.pop_back_val();
719 if (!Visited.insert(Cur).second)
720 continue;
721 if (Callback(Cur))
722 append_range(WorkList, Cur->materialized_users());
723 }
724}
725
726void Verifier::visitGlobalValue(const GlobalValue &GV) {
728 "Global is external, but doesn't have external or weak linkage!", &GV);
729
730 if (const GlobalObject *GO = dyn_cast<GlobalObject>(&GV)) {
731
732 if (MaybeAlign A = GO->getAlign()) {
733 Check(A->value() <= Value::MaximumAlignment,
734 "huge alignment values are unsupported", GO);
735 }
736
737 if (const MDNode *Associated =
738 GO->getMetadata(LLVMContext::MD_associated)) {
739 Check(Associated->getNumOperands() == 1,
740 "associated metadata must have one operand", &GV, Associated);
741 const Metadata *Op = Associated->getOperand(0).get();
742 Check(Op, "associated metadata must have a global value", GO, Associated);
743
744 const auto *VM = dyn_cast_or_null<ValueAsMetadata>(Op);
745 Check(VM, "associated metadata must be ValueAsMetadata", GO, Associated);
746 if (VM) {
747 Check(isa<PointerType>(VM->getValue()->getType()),
748 "associated value must be pointer typed", GV, Associated);
749
750 const Value *Stripped = VM->getValue()->stripPointerCastsAndAliases();
751 Check(isa<GlobalObject>(Stripped) || isa<Constant>(Stripped),
752 "associated metadata must point to a GlobalObject", GO, Stripped);
753 Check(Stripped != GO,
754 "global values should not associate to themselves", GO,
755 Associated);
756 }
757 }
758
759 // FIXME: Why is getMetadata on GlobalValue protected?
760 if (const MDNode *AbsoluteSymbol =
761 GO->getMetadata(LLVMContext::MD_absolute_symbol)) {
762 verifyRangeMetadata(*GO, AbsoluteSymbol, DL.getIntPtrType(GO->getType()),
763 true);
764 }
765 }
766
767 Check(!GV.hasAppendingLinkage() || isa<GlobalVariable>(GV),
768 "Only global variables can have appending linkage!", &GV);
769
770 if (GV.hasAppendingLinkage()) {
771 const GlobalVariable *GVar = dyn_cast<GlobalVariable>(&GV);
772 Check(GVar && GVar->getValueType()->isArrayTy(),
773 "Only global arrays can have appending linkage!", GVar);
774 }
775
776 if (GV.isDeclarationForLinker())
777 Check(!GV.hasComdat(), "Declaration may not be in a Comdat!", &GV);
778
779 if (GV.hasDLLExportStorageClass()) {
781 "dllexport GlobalValue must have default or protected visibility",
782 &GV);
783 }
784 if (GV.hasDLLImportStorageClass()) {
786 "dllimport GlobalValue must have default visibility", &GV);
787 Check(!GV.isDSOLocal(), "GlobalValue with DLLImport Storage is dso_local!",
788 &GV);
789
790 Check((GV.isDeclaration() &&
793 "Global is marked as dllimport, but not external", &GV);
794 }
795
796 if (GV.isImplicitDSOLocal())
797 Check(GV.isDSOLocal(),
798 "GlobalValue with local linkage or non-default "
799 "visibility must be dso_local!",
800 &GV);
801
802 forEachUser(&GV, GlobalValueVisited, [&](const Value *V) -> bool {
803 if (const Instruction *I = dyn_cast<Instruction>(V)) {
804 if (!I->getParent() || !I->getParent()->getParent())
805 CheckFailed("Global is referenced by parentless instruction!", &GV, &M,
806 I);
807 else if (I->getParent()->getParent()->getParent() != &M)
808 CheckFailed("Global is referenced in a different module!", &GV, &M, I,
809 I->getParent()->getParent(),
810 I->getParent()->getParent()->getParent());
811 return false;
812 } else if (const Function *F = dyn_cast<Function>(V)) {
813 if (F->getParent() != &M)
814 CheckFailed("Global is used by function in a different module", &GV, &M,
815 F, F->getParent());
816 return false;
817 }
818 return true;
819 });
820}
821
822void Verifier::visitGlobalVariable(const GlobalVariable &GV) {
823 if (GV.hasInitializer()) {
825 "Global variable initializer type does not match global "
826 "variable type!",
827 &GV);
828 // If the global has common linkage, it must have a zero initializer and
829 // cannot be constant.
830 if (GV.hasCommonLinkage()) {
832 "'common' global must have a zero initializer!", &GV);
833 Check(!GV.isConstant(), "'common' global may not be marked constant!",
834 &GV);
835 Check(!GV.hasComdat(), "'common' global may not be in a Comdat!", &GV);
836 }
837 }
838
839 if (GV.hasName() && (GV.getName() == "llvm.global_ctors" ||
840 GV.getName() == "llvm.global_dtors")) {
842 "invalid linkage for intrinsic global variable", &GV);
844 "invalid uses of intrinsic global variable", &GV);
845
846 // Don't worry about emitting an error for it not being an array,
847 // visitGlobalValue will complain on appending non-array.
848 if (ArrayType *ATy = dyn_cast<ArrayType>(GV.getValueType())) {
849 StructType *STy = dyn_cast<StructType>(ATy->getElementType());
850 PointerType *FuncPtrTy =
851 PointerType::get(Context, DL.getProgramAddressSpace());
852 Check(STy && (STy->getNumElements() == 2 || STy->getNumElements() == 3) &&
853 STy->getTypeAtIndex(0u)->isIntegerTy(32) &&
854 STy->getTypeAtIndex(1) == FuncPtrTy,
855 "wrong type for intrinsic global variable", &GV);
856 Check(STy->getNumElements() == 3,
857 "the third field of the element type is mandatory, "
858 "specify ptr null to migrate from the obsoleted 2-field form");
859 Type *ETy = STy->getTypeAtIndex(2);
860 Check(ETy->isPointerTy(), "wrong type for intrinsic global variable",
861 &GV);
862 }
863 }
864
865 if (GV.hasName() && (GV.getName() == "llvm.used" ||
866 GV.getName() == "llvm.compiler.used")) {
868 "invalid linkage for intrinsic global variable", &GV);
870 "invalid uses of intrinsic global variable", &GV);
871
872 Type *GVType = GV.getValueType();
873 if (ArrayType *ATy = dyn_cast<ArrayType>(GVType)) {
874 PointerType *PTy = dyn_cast<PointerType>(ATy->getElementType());
875 Check(PTy, "wrong type for intrinsic global variable", &GV);
876 if (GV.hasInitializer()) {
877 const Constant *Init = GV.getInitializer();
878 const ConstantArray *InitArray = dyn_cast<ConstantArray>(Init);
879 Check(InitArray, "wrong initalizer for intrinsic global variable",
880 Init);
881 for (Value *Op : InitArray->operands()) {
882 Value *V = Op->stripPointerCasts();
883 Check(isa<GlobalVariable>(V) || isa<Function>(V) ||
884 isa<GlobalAlias>(V),
885 Twine("invalid ") + GV.getName() + " member", V);
886 Check(V->hasName(),
887 Twine("members of ") + GV.getName() + " must be named", V);
888 }
889 }
890 }
891 }
892
893 // Visit any debug info attachments.
895 GV.getMetadata(LLVMContext::MD_dbg, MDs);
896 for (auto *MD : MDs) {
897 if (auto *GVE = dyn_cast<DIGlobalVariableExpression>(MD))
898 visitDIGlobalVariableExpression(*GVE);
899 else
900 CheckDI(false, "!dbg attachment of global variable must be a "
901 "DIGlobalVariableExpression");
902 }
903
904 // Scalable vectors cannot be global variables, since we don't know
905 // the runtime size.
907 "Globals cannot contain scalable types", &GV);
908
909 // Check if it's a target extension type that disallows being used as a
910 // global.
911 if (auto *TTy = dyn_cast<TargetExtType>(GV.getValueType()))
912 Check(TTy->hasProperty(TargetExtType::CanBeGlobal),
913 "Global @" + GV.getName() + " has illegal target extension type",
914 TTy);
915
916 if (!GV.hasInitializer()) {
917 visitGlobalValue(GV);
918 return;
919 }
920
921 // Walk any aggregate initializers looking for bitcasts between address spaces
922 visitConstantExprsRecursively(GV.getInitializer());
923
924 visitGlobalValue(GV);
925}
926
927void Verifier::visitAliaseeSubExpr(const GlobalAlias &GA, const Constant &C) {
929 Visited.insert(&GA);
930 visitAliaseeSubExpr(Visited, GA, C);
931}
932
933void Verifier::visitAliaseeSubExpr(SmallPtrSetImpl<const GlobalAlias*> &Visited,
934 const GlobalAlias &GA, const Constant &C) {
936 Check(isa<GlobalValue>(C) &&
937 cast<GlobalValue>(C).hasAvailableExternallyLinkage(),
938 "available_externally alias must point to available_externally "
939 "global value",
940 &GA);
941 }
942 if (const auto *GV = dyn_cast<GlobalValue>(&C)) {
944 Check(!GV->isDeclarationForLinker(), "Alias must point to a definition",
945 &GA);
946 }
947
948 if (const auto *GA2 = dyn_cast<GlobalAlias>(GV)) {
949 Check(Visited.insert(GA2).second, "Aliases cannot form a cycle", &GA);
950
951 Check(!GA2->isInterposable(),
952 "Alias cannot point to an interposable alias", &GA);
953 } else {
954 // Only continue verifying subexpressions of GlobalAliases.
955 // Do not recurse into global initializers.
956 return;
957 }
958 }
959
960 if (const auto *CE = dyn_cast<ConstantExpr>(&C))
961 visitConstantExprsRecursively(CE);
962
963 for (const Use &U : C.operands()) {
964 Value *V = &*U;
965 if (const auto *GA2 = dyn_cast<GlobalAlias>(V))
966 visitAliaseeSubExpr(Visited, GA, *GA2->getAliasee());
967 else if (const auto *C2 = dyn_cast<Constant>(V))
968 visitAliaseeSubExpr(Visited, GA, *C2);
969 }
970}
971
972void Verifier::visitGlobalAlias(const GlobalAlias &GA) {
974 "Alias should have private, internal, linkonce, weak, linkonce_odr, "
975 "weak_odr, external, or available_externally linkage!",
976 &GA);
977 const Constant *Aliasee = GA.getAliasee();
978 Check(Aliasee, "Aliasee cannot be NULL!", &GA);
979 Check(GA.getType() == Aliasee->getType(),
980 "Alias and aliasee types should match!", &GA);
981
982 Check(isa<GlobalValue>(Aliasee) || isa<ConstantExpr>(Aliasee),
983 "Aliasee should be either GlobalValue or ConstantExpr", &GA);
984
985 visitAliaseeSubExpr(GA, *Aliasee);
986
987 visitGlobalValue(GA);
988}
989
990void Verifier::visitGlobalIFunc(const GlobalIFunc &GI) {
992 "IFunc should have private, internal, linkonce, weak, linkonce_odr, "
993 "weak_odr, or external linkage!",
994 &GI);
995 // Pierce through ConstantExprs and GlobalAliases and check that the resolver
996 // is a Function definition.
998 Check(Resolver, "IFunc must have a Function resolver", &GI);
999 Check(!Resolver->isDeclarationForLinker(),
1000 "IFunc resolver must be a definition", &GI);
1001
1002 // Check that the immediate resolver operand (prior to any bitcasts) has the
1003 // correct type.
1004 const Type *ResolverTy = GI.getResolver()->getType();
1005
1006 Check(isa<PointerType>(Resolver->getFunctionType()->getReturnType()),
1007 "IFunc resolver must return a pointer", &GI);
1008
1009 const Type *ResolverFuncTy =
1011 Check(ResolverTy == ResolverFuncTy->getPointerTo(GI.getAddressSpace()),
1012 "IFunc resolver has incorrect type", &GI);
1013}
1014
1015void Verifier::visitNamedMDNode(const NamedMDNode &NMD) {
1016 // There used to be various other llvm.dbg.* nodes, but we don't support
1017 // upgrading them and we want to reserve the namespace for future uses.
1018 if (NMD.getName().starts_with("llvm.dbg."))
1019 CheckDI(NMD.getName() == "llvm.dbg.cu",
1020 "unrecognized named metadata node in the llvm.dbg namespace", &NMD);
1021 for (const MDNode *MD : NMD.operands()) {
1022 if (NMD.getName() == "llvm.dbg.cu")
1023 CheckDI(MD && isa<DICompileUnit>(MD), "invalid compile unit", &NMD, MD);
1024
1025 if (!MD)
1026 continue;
1027
1028 visitMDNode(*MD, AreDebugLocsAllowed::Yes);
1029 }
1030}
1031
1032void Verifier::visitMDNode(const MDNode &MD, AreDebugLocsAllowed AllowLocs) {
1033 // Only visit each node once. Metadata can be mutually recursive, so this
1034 // avoids infinite recursion here, as well as being an optimization.
1035 if (!MDNodes.insert(&MD).second)
1036 return;
1037
1038 Check(&MD.getContext() == &Context,
1039 "MDNode context does not match Module context!", &MD);
1040
1041 switch (MD.getMetadataID()) {
1042 default:
1043 llvm_unreachable("Invalid MDNode subclass");
1044 case Metadata::MDTupleKind:
1045 break;
1046#define HANDLE_SPECIALIZED_MDNODE_LEAF(CLASS) \
1047 case Metadata::CLASS##Kind: \
1048 visit##CLASS(cast<CLASS>(MD)); \
1049 break;
1050#include "llvm/IR/Metadata.def"
1051 }
1052
1053 for (const Metadata *Op : MD.operands()) {
1054 if (!Op)
1055 continue;
1056 Check(!isa<LocalAsMetadata>(Op), "Invalid operand for global metadata!",
1057 &MD, Op);
1058 CheckDI(!isa<DILocation>(Op) || AllowLocs == AreDebugLocsAllowed::Yes,
1059 "DILocation not allowed within this metadata node", &MD, Op);
1060 if (auto *N = dyn_cast<MDNode>(Op)) {
1061 visitMDNode(*N, AllowLocs);
1062 continue;
1063 }
1064 if (auto *V = dyn_cast<ValueAsMetadata>(Op)) {
1065 visitValueAsMetadata(*V, nullptr);
1066 continue;
1067 }
1068 }
1069
1070 // Check these last, so we diagnose problems in operands first.
1071 Check(!MD.isTemporary(), "Expected no forward declarations!", &MD);
1072 Check(MD.isResolved(), "All nodes should be resolved!", &MD);
1073}
1074
1075void Verifier::visitValueAsMetadata(const ValueAsMetadata &MD, Function *F) {
1076 Check(MD.getValue(), "Expected valid value", &MD);
1077 Check(!MD.getValue()->getType()->isMetadataTy(),
1078 "Unexpected metadata round-trip through values", &MD, MD.getValue());
1079
1080 auto *L = dyn_cast<LocalAsMetadata>(&MD);
1081 if (!L)
1082 return;
1083
1084 Check(F, "function-local metadata used outside a function", L);
1085
1086 // If this was an instruction, bb, or argument, verify that it is in the
1087 // function that we expect.
1088 Function *ActualF = nullptr;
1089 if (Instruction *I = dyn_cast<Instruction>(L->getValue())) {
1090 Check(I->getParent(), "function-local metadata not in basic block", L, I);
1091 ActualF = I->getParent()->getParent();
1092 } else if (BasicBlock *BB = dyn_cast<BasicBlock>(L->getValue()))
1093 ActualF = BB->getParent();
1094 else if (Argument *A = dyn_cast<Argument>(L->getValue()))
1095 ActualF = A->getParent();
1096 assert(ActualF && "Unimplemented function local metadata case!");
1097
1098 Check(ActualF == F, "function-local metadata used in wrong function", L);
1099}
1100
1101void Verifier::visitDIArgList(const DIArgList &AL, Function *F) {
1102 for (const ValueAsMetadata *VAM : AL.getArgs())
1103 visitValueAsMetadata(*VAM, F);
1104}
1105
1106void Verifier::visitMetadataAsValue(const MetadataAsValue &MDV, Function *F) {
1107 Metadata *MD = MDV.getMetadata();
1108 if (auto *N = dyn_cast<MDNode>(MD)) {
1109 visitMDNode(*N, AreDebugLocsAllowed::No);
1110 return;
1111 }
1112
1113 // Only visit each node once. Metadata can be mutually recursive, so this
1114 // avoids infinite recursion here, as well as being an optimization.
1115 if (!MDNodes.insert(MD).second)
1116 return;
1117
1118 if (auto *V = dyn_cast<ValueAsMetadata>(MD))
1119 visitValueAsMetadata(*V, F);
1120
1121 if (auto *AL = dyn_cast<DIArgList>(MD))
1122 visitDIArgList(*AL, F);
1123}
1124
1125static bool isType(const Metadata *MD) { return !MD || isa<DIType>(MD); }
1126static bool isScope(const Metadata *MD) { return !MD || isa<DIScope>(MD); }
1127static bool isDINode(const Metadata *MD) { return !MD || isa<DINode>(MD); }
1128
1129void Verifier::visitDILocation(const DILocation &N) {
1130 CheckDI(N.getRawScope() && isa<DILocalScope>(N.getRawScope()),
1131 "location requires a valid scope", &N, N.getRawScope());
1132 if (auto *IA = N.getRawInlinedAt())
1133 CheckDI(isa<DILocation>(IA), "inlined-at should be a location", &N, IA);
1134 if (auto *SP = dyn_cast<DISubprogram>(N.getRawScope()))
1135 CheckDI(SP->isDefinition(), "scope points into the type hierarchy", &N);
1136}
1137
1138void Verifier::visitGenericDINode(const GenericDINode &N) {
1139 CheckDI(N.getTag(), "invalid tag", &N);
1140}
1141
1142void Verifier::visitDIScope(const DIScope &N) {
1143 if (auto *F = N.getRawFile())
1144 CheckDI(isa<DIFile>(F), "invalid file", &N, F);
1145}
1146
1147void Verifier::visitDISubrange(const DISubrange &N) {
1148 CheckDI(N.getTag() == dwarf::DW_TAG_subrange_type, "invalid tag", &N);
1149 CheckDI(!N.getRawCountNode() || !N.getRawUpperBound(),
1150 "Subrange can have any one of count or upperBound", &N);
1151 auto *CBound = N.getRawCountNode();
1152 CheckDI(!CBound || isa<ConstantAsMetadata>(CBound) ||
1153 isa<DIVariable>(CBound) || isa<DIExpression>(CBound),
1154 "Count must be signed constant or DIVariable or DIExpression", &N);
1155 auto Count = N.getCount();
1156 CheckDI(!Count || !isa<ConstantInt *>(Count) ||
1157 cast<ConstantInt *>(Count)->getSExtValue() >= -1,
1158 "invalid subrange count", &N);
1159 auto *LBound = N.getRawLowerBound();
1160 CheckDI(!LBound || isa<ConstantAsMetadata>(LBound) ||
1161 isa<DIVariable>(LBound) || isa<DIExpression>(LBound),
1162 "LowerBound must be signed constant or DIVariable or DIExpression",
1163 &N);
1164 auto *UBound = N.getRawUpperBound();
1165 CheckDI(!UBound || isa<ConstantAsMetadata>(UBound) ||
1166 isa<DIVariable>(UBound) || isa<DIExpression>(UBound),
1167 "UpperBound must be signed constant or DIVariable or DIExpression",
1168 &N);
1169 auto *Stride = N.getRawStride();
1170 CheckDI(!Stride || isa<ConstantAsMetadata>(Stride) ||
1171 isa<DIVariable>(Stride) || isa<DIExpression>(Stride),
1172 "Stride must be signed constant or DIVariable or DIExpression", &N);
1173}
1174
1175void Verifier::visitDIGenericSubrange(const DIGenericSubrange &N) {
1176 CheckDI(N.getTag() == dwarf::DW_TAG_generic_subrange, "invalid tag", &N);
1177 CheckDI(!N.getRawCountNode() || !N.getRawUpperBound(),
1178 "GenericSubrange can have any one of count or upperBound", &N);
1179 auto *CBound = N.getRawCountNode();
1180 CheckDI(!CBound || isa<DIVariable>(CBound) || isa<DIExpression>(CBound),
1181 "Count must be signed constant or DIVariable or DIExpression", &N);
1182 auto *LBound = N.getRawLowerBound();
1183 CheckDI(LBound, "GenericSubrange must contain lowerBound", &N);
1184 CheckDI(isa<DIVariable>(LBound) || isa<DIExpression>(LBound),
1185 "LowerBound must be signed constant or DIVariable or DIExpression",
1186 &N);
1187 auto *UBound = N.getRawUpperBound();
1188 CheckDI(!UBound || isa<DIVariable>(UBound) || isa<DIExpression>(UBound),
1189 "UpperBound must be signed constant or DIVariable or DIExpression",
1190 &N);
1191 auto *Stride = N.getRawStride();
1192 CheckDI(Stride, "GenericSubrange must contain stride", &N);
1193 CheckDI(isa<DIVariable>(Stride) || isa<DIExpression>(Stride),
1194 "Stride must be signed constant or DIVariable or DIExpression", &N);
1195}
1196
1197void Verifier::visitDIEnumerator(const DIEnumerator &N) {
1198 CheckDI(N.getTag() == dwarf::DW_TAG_enumerator, "invalid tag", &N);
1199}
1200
1201void Verifier::visitDIBasicType(const DIBasicType &N) {
1202 CheckDI(N.getTag() == dwarf::DW_TAG_base_type ||
1203 N.getTag() == dwarf::DW_TAG_unspecified_type ||
1204 N.getTag() == dwarf::DW_TAG_string_type,
1205 "invalid tag", &N);
1206}
1207
1208void Verifier::visitDIStringType(const DIStringType &N) {
1209 CheckDI(N.getTag() == dwarf::DW_TAG_string_type, "invalid tag", &N);
1210 CheckDI(!(N.isBigEndian() && N.isLittleEndian()), "has conflicting flags",
1211 &N);
1212}
1213
1214void Verifier::visitDIDerivedType(const DIDerivedType &N) {
1215 // Common scope checks.
1216 visitDIScope(N);
1217
1218 CheckDI(N.getTag() == dwarf::DW_TAG_typedef ||
1219 N.getTag() == dwarf::DW_TAG_pointer_type ||
1220 N.getTag() == dwarf::DW_TAG_ptr_to_member_type ||
1221 N.getTag() == dwarf::DW_TAG_reference_type ||
1222 N.getTag() == dwarf::DW_TAG_rvalue_reference_type ||
1223 N.getTag() == dwarf::DW_TAG_const_type ||
1224 N.getTag() == dwarf::DW_TAG_immutable_type ||
1225 N.getTag() == dwarf::DW_TAG_volatile_type ||
1226 N.getTag() == dwarf::DW_TAG_restrict_type ||
1227 N.getTag() == dwarf::DW_TAG_atomic_type ||
1228 N.getTag() == dwarf::DW_TAG_LLVM_ptrauth_type ||
1229 N.getTag() == dwarf::DW_TAG_member ||
1230 (N.getTag() == dwarf::DW_TAG_variable && N.isStaticMember()) ||
1231 N.getTag() == dwarf::DW_TAG_inheritance ||
1232 N.getTag() == dwarf::DW_TAG_friend ||
1233 N.getTag() == dwarf::DW_TAG_set_type ||
1234 N.getTag() == dwarf::DW_TAG_template_alias,
1235 "invalid tag", &N);
1236 if (N.getTag() == dwarf::DW_TAG_ptr_to_member_type) {
1237 CheckDI(isType(N.getRawExtraData()), "invalid pointer to member type", &N,
1238 N.getRawExtraData());
1239 }
1240
1241 if (N.getTag() == dwarf::DW_TAG_set_type) {
1242 if (auto *T = N.getRawBaseType()) {
1243 auto *Enum = dyn_cast_or_null<DICompositeType>(T);
1244 auto *Basic = dyn_cast_or_null<DIBasicType>(T);
1245 CheckDI(
1246 (Enum && Enum->getTag() == dwarf::DW_TAG_enumeration_type) ||
1247 (Basic && (Basic->getEncoding() == dwarf::DW_ATE_unsigned ||
1248 Basic->getEncoding() == dwarf::DW_ATE_signed ||
1249 Basic->getEncoding() == dwarf::DW_ATE_unsigned_char ||
1250 Basic->getEncoding() == dwarf::DW_ATE_signed_char ||
1251 Basic->getEncoding() == dwarf::DW_ATE_boolean)),
1252 "invalid set base type", &N, T);
1253 }
1254 }
1255
1256 CheckDI(isScope(N.getRawScope()), "invalid scope", &N, N.getRawScope());
1257 CheckDI(isType(N.getRawBaseType()), "invalid base type", &N,
1258 N.getRawBaseType());
1259
1260 if (N.getDWARFAddressSpace()) {
1261 CheckDI(N.getTag() == dwarf::DW_TAG_pointer_type ||
1262 N.getTag() == dwarf::DW_TAG_reference_type ||
1263 N.getTag() == dwarf::DW_TAG_rvalue_reference_type,
1264 "DWARF address space only applies to pointer or reference types",
1265 &N);
1266 }
1267}
1268
1269/// Detect mutually exclusive flags.
1270static bool hasConflictingReferenceFlags(unsigned Flags) {
1271 return ((Flags & DINode::FlagLValueReference) &&
1272 (Flags & DINode::FlagRValueReference)) ||
1273 ((Flags & DINode::FlagTypePassByValue) &&
1274 (Flags & DINode::FlagTypePassByReference));
1275}
1276
1277void Verifier::visitTemplateParams(const MDNode &N, const Metadata &RawParams) {
1278 auto *Params = dyn_cast<MDTuple>(&RawParams);
1279 CheckDI(Params, "invalid template params", &N, &RawParams);
1280 for (Metadata *Op : Params->operands()) {
1281 CheckDI(Op && isa<DITemplateParameter>(Op), "invalid template parameter",
1282 &N, Params, Op);
1283 }
1284}
1285
1286void Verifier::visitDICompositeType(const DICompositeType &N) {
1287 // Common scope checks.
1288 visitDIScope(N);
1289
1290 CheckDI(N.getTag() == dwarf::DW_TAG_array_type ||
1291 N.getTag() == dwarf::DW_TAG_structure_type ||
1292 N.getTag() == dwarf::DW_TAG_union_type ||
1293 N.getTag() == dwarf::DW_TAG_enumeration_type ||
1294 N.getTag() == dwarf::DW_TAG_class_type ||
1295 N.getTag() == dwarf::DW_TAG_variant_part ||
1296 N.getTag() == dwarf::DW_TAG_namelist,
1297 "invalid tag", &N);
1298
1299 CheckDI(isScope(N.getRawScope()), "invalid scope", &N, N.getRawScope());
1300 CheckDI(isType(N.getRawBaseType()), "invalid base type", &N,
1301 N.getRawBaseType());
1302
1303 CheckDI(!N.getRawElements() || isa<MDTuple>(N.getRawElements()),
1304 "invalid composite elements", &N, N.getRawElements());
1305 CheckDI(isType(N.getRawVTableHolder()), "invalid vtable holder", &N,
1306 N.getRawVTableHolder());
1308 "invalid reference flags", &N);
1309 unsigned DIBlockByRefStruct = 1 << 4;
1310 CheckDI((N.getFlags() & DIBlockByRefStruct) == 0,
1311 "DIBlockByRefStruct on DICompositeType is no longer supported", &N);
1312
1313 if (N.isVector()) {
1314 const DINodeArray Elements = N.getElements();
1315 CheckDI(Elements.size() == 1 &&
1316 Elements[0]->getTag() == dwarf::DW_TAG_subrange_type,
1317 "invalid vector, expected one element of type subrange", &N);
1318 }
1319
1320 if (auto *Params = N.getRawTemplateParams())
1321 visitTemplateParams(N, *Params);
1322
1323 if (auto *D = N.getRawDiscriminator()) {
1324 CheckDI(isa<DIDerivedType>(D) && N.getTag() == dwarf::DW_TAG_variant_part,
1325 "discriminator can only appear on variant part");
1326 }
1327
1328 if (N.getRawDataLocation()) {
1329 CheckDI(N.getTag() == dwarf::DW_TAG_array_type,
1330 "dataLocation can only appear in array type");
1331 }
1332
1333 if (N.getRawAssociated()) {
1334 CheckDI(N.getTag() == dwarf::DW_TAG_array_type,
1335 "associated can only appear in array type");
1336 }
1337
1338 if (N.getRawAllocated()) {
1339 CheckDI(N.getTag() == dwarf::DW_TAG_array_type,
1340 "allocated can only appear in array type");
1341 }
1342
1343 if (N.getRawRank()) {
1344 CheckDI(N.getTag() == dwarf::DW_TAG_array_type,
1345 "rank can only appear in array type");
1346 }
1347
1348 if (N.getTag() == dwarf::DW_TAG_array_type) {
1349 CheckDI(N.getRawBaseType(), "array types must have a base type", &N);
1350 }
1351}
1352
1353void Verifier::visitDISubroutineType(const DISubroutineType &N) {
1354 CheckDI(N.getTag() == dwarf::DW_TAG_subroutine_type, "invalid tag", &N);
1355 if (auto *Types = N.getRawTypeArray()) {
1356 CheckDI(isa<MDTuple>(Types), "invalid composite elements", &N, Types);
1357 for (Metadata *Ty : N.getTypeArray()->operands()) {
1358 CheckDI(isType(Ty), "invalid subroutine type ref", &N, Types, Ty);
1359 }
1360 }
1362 "invalid reference flags", &N);
1363}
1364
1365void Verifier::visitDIFile(const DIFile &N) {
1366 CheckDI(N.getTag() == dwarf::DW_TAG_file_type, "invalid tag", &N);
1367 std::optional<DIFile::ChecksumInfo<StringRef>> Checksum = N.getChecksum();
1368 if (Checksum) {
1369 CheckDI(Checksum->Kind <= DIFile::ChecksumKind::CSK_Last,
1370 "invalid checksum kind", &N);
1371 size_t Size;
1372 switch (Checksum->Kind) {
1373 case DIFile::CSK_MD5:
1374 Size = 32;
1375 break;
1376 case DIFile::CSK_SHA1:
1377 Size = 40;
1378 break;
1379 case DIFile::CSK_SHA256:
1380 Size = 64;
1381 break;
1382 }
1383 CheckDI(Checksum->Value.size() == Size, "invalid checksum length", &N);
1384 CheckDI(Checksum->Value.find_if_not(llvm::isHexDigit) == StringRef::npos,
1385 "invalid checksum", &N);
1386 }
1387}
1388
1389void Verifier::visitDICompileUnit(const DICompileUnit &N) {
1390 CheckDI(N.isDistinct(), "compile units must be distinct", &N);
1391 CheckDI(N.getTag() == dwarf::DW_TAG_compile_unit, "invalid tag", &N);
1392
1393 // Don't bother verifying the compilation directory or producer string
1394 // as those could be empty.
1395 CheckDI(N.getRawFile() && isa<DIFile>(N.getRawFile()), "invalid file", &N,
1396 N.getRawFile());
1397 CheckDI(!N.getFile()->getFilename().empty(), "invalid filename", &N,
1398 N.getFile());
1399
1400 CheckDI((N.getEmissionKind() <= DICompileUnit::LastEmissionKind),
1401 "invalid emission kind", &N);
1402
1403 if (auto *Array = N.getRawEnumTypes()) {
1404 CheckDI(isa<MDTuple>(Array), "invalid enum list", &N, Array);
1405 for (Metadata *Op : N.getEnumTypes()->operands()) {
1406 auto *Enum = dyn_cast_or_null<DICompositeType>(Op);
1407 CheckDI(Enum && Enum->getTag() == dwarf::DW_TAG_enumeration_type,
1408 "invalid enum type", &N, N.getEnumTypes(), Op);
1409 }
1410 }
1411 if (auto *Array = N.getRawRetainedTypes()) {
1412 CheckDI(isa<MDTuple>(Array), "invalid retained type list", &N, Array);
1413 for (Metadata *Op : N.getRetainedTypes()->operands()) {
1414 CheckDI(
1415 Op && (isa<DIType>(Op) || (isa<DISubprogram>(Op) &&
1416 !cast<DISubprogram>(Op)->isDefinition())),
1417 "invalid retained type", &N, Op);
1418 }
1419 }
1420 if (auto *Array = N.getRawGlobalVariables()) {
1421 CheckDI(isa<MDTuple>(Array), "invalid global variable list", &N, Array);
1422 for (Metadata *Op : N.getGlobalVariables()->operands()) {
1423 CheckDI(Op && (isa<DIGlobalVariableExpression>(Op)),
1424 "invalid global variable ref", &N, Op);
1425 }
1426 }
1427 if (auto *Array = N.getRawImportedEntities()) {
1428 CheckDI(isa<MDTuple>(Array), "invalid imported entity list", &N, Array);
1429 for (Metadata *Op : N.getImportedEntities()->operands()) {
1430 CheckDI(Op && isa<DIImportedEntity>(Op), "invalid imported entity ref",
1431 &N, Op);
1432 }
1433 }
1434 if (auto *Array = N.getRawMacros()) {
1435 CheckDI(isa<MDTuple>(Array), "invalid macro list", &N, Array);
1436 for (Metadata *Op : N.getMacros()->operands()) {
1437 CheckDI(Op && isa<DIMacroNode>(Op), "invalid macro ref", &N, Op);
1438 }
1439 }
1440 CUVisited.insert(&N);
1441}
1442
1443void Verifier::visitDISubprogram(const DISubprogram &N) {
1444 CheckDI(N.getTag() == dwarf::DW_TAG_subprogram, "invalid tag", &N);
1445 CheckDI(isScope(N.getRawScope()), "invalid scope", &N, N.getRawScope());
1446 if (auto *F = N.getRawFile())
1447 CheckDI(isa<DIFile>(F), "invalid file", &N, F);
1448 else
1449 CheckDI(N.getLine() == 0, "line specified with no file", &N, N.getLine());
1450 if (auto *T = N.getRawType())
1451 CheckDI(isa<DISubroutineType>(T), "invalid subroutine type", &N, T);
1452 CheckDI(isType(N.getRawContainingType()), "invalid containing type", &N,
1453 N.getRawContainingType());
1454 if (auto *Params = N.getRawTemplateParams())
1455 visitTemplateParams(N, *Params);
1456 if (auto *S = N.getRawDeclaration())
1457 CheckDI(isa<DISubprogram>(S) && !cast<DISubprogram>(S)->isDefinition(),
1458 "invalid subprogram declaration", &N, S);
1459 if (auto *RawNode = N.getRawRetainedNodes()) {
1460 auto *Node = dyn_cast<MDTuple>(RawNode);
1461 CheckDI(Node, "invalid retained nodes list", &N, RawNode);
1462 for (Metadata *Op : Node->operands()) {
1463 CheckDI(Op && (isa<DILocalVariable>(Op) || isa<DILabel>(Op) ||
1464 isa<DIImportedEntity>(Op)),
1465 "invalid retained nodes, expected DILocalVariable, DILabel or "
1466 "DIImportedEntity",
1467 &N, Node, Op);
1468 }
1469 }
1471 "invalid reference flags", &N);
1472
1473 auto *Unit = N.getRawUnit();
1474 if (N.isDefinition()) {
1475 // Subprogram definitions (not part of the type hierarchy).
1476 CheckDI(N.isDistinct(), "subprogram definitions must be distinct", &N);
1477 CheckDI(Unit, "subprogram definitions must have a compile unit", &N);
1478 CheckDI(isa<DICompileUnit>(Unit), "invalid unit type", &N, Unit);
1479 // There's no good way to cross the CU boundary to insert a nested
1480 // DISubprogram definition in one CU into a type defined in another CU.
1481 auto *CT = dyn_cast_or_null<DICompositeType>(N.getRawScope());
1482 if (CT && CT->getRawIdentifier() &&
1483 M.getContext().isODRUniquingDebugTypes())
1484 CheckDI(N.getDeclaration(),
1485 "definition subprograms cannot be nested within DICompositeType "
1486 "when enabling ODR",
1487 &N);
1488 } else {
1489 // Subprogram declarations (part of the type hierarchy).
1490 CheckDI(!Unit, "subprogram declarations must not have a compile unit", &N);
1491 CheckDI(!N.getRawDeclaration(),
1492 "subprogram declaration must not have a declaration field");
1493 }
1494
1495 if (auto *RawThrownTypes = N.getRawThrownTypes()) {
1496 auto *ThrownTypes = dyn_cast<MDTuple>(RawThrownTypes);
1497 CheckDI(ThrownTypes, "invalid thrown types list", &N, RawThrownTypes);
1498 for (Metadata *Op : ThrownTypes->operands())
1499 CheckDI(Op && isa<DIType>(Op), "invalid thrown type", &N, ThrownTypes,
1500 Op);
1501 }
1502
1503 if (N.areAllCallsDescribed())
1504 CheckDI(N.isDefinition(),
1505 "DIFlagAllCallsDescribed must be attached to a definition");
1506}
1507
1508void Verifier::visitDILexicalBlockBase(const DILexicalBlockBase &N) {
1509 CheckDI(N.getTag() == dwarf::DW_TAG_lexical_block, "invalid tag", &N);
1510 CheckDI(N.getRawScope() && isa<DILocalScope>(N.getRawScope()),
1511 "invalid local scope", &N, N.getRawScope());
1512 if (auto *SP = dyn_cast<DISubprogram>(N.getRawScope()))
1513 CheckDI(SP->isDefinition(), "scope points into the type hierarchy", &N);
1514}
1515
1516void Verifier::visitDILexicalBlock(const DILexicalBlock &N) {
1517 visitDILexicalBlockBase(N);
1518
1519 CheckDI(N.getLine() || !N.getColumn(),
1520 "cannot have column info without line info", &N);
1521}
1522
1523void Verifier::visitDILexicalBlockFile(const DILexicalBlockFile &N) {
1524 visitDILexicalBlockBase(N);
1525}
1526
1527void Verifier::visitDICommonBlock(const DICommonBlock &N) {
1528 CheckDI(N.getTag() == dwarf::DW_TAG_common_block, "invalid tag", &N);
1529 if (auto *S = N.getRawScope())
1530 CheckDI(isa<DIScope>(S), "invalid scope ref", &N, S);
1531 if (auto *S = N.getRawDecl())
1532 CheckDI(isa<DIGlobalVariable>(S), "invalid declaration", &N, S);
1533}
1534
1535void Verifier::visitDINamespace(const DINamespace &N) {
1536 CheckDI(N.getTag() == dwarf::DW_TAG_namespace, "invalid tag", &N);
1537 if (auto *S = N.getRawScope())
1538 CheckDI(isa<DIScope>(S), "invalid scope ref", &N, S);
1539}
1540
1541void Verifier::visitDIMacro(const DIMacro &N) {
1542 CheckDI(N.getMacinfoType() == dwarf::DW_MACINFO_define ||
1543 N.getMacinfoType() == dwarf::DW_MACINFO_undef,
1544 "invalid macinfo type", &N);
1545 CheckDI(!N.getName().empty(), "anonymous macro", &N);
1546 if (!N.getValue().empty()) {
1547 assert(N.getValue().data()[0] != ' ' && "Macro value has a space prefix");
1548 }
1549}
1550
1551void Verifier::visitDIMacroFile(const DIMacroFile &N) {
1552 CheckDI(N.getMacinfoType() == dwarf::DW_MACINFO_start_file,
1553 "invalid macinfo type", &N);
1554 if (auto *F = N.getRawFile())
1555 CheckDI(isa<DIFile>(F), "invalid file", &N, F);
1556
1557 if (auto *Array = N.getRawElements()) {
1558 CheckDI(isa<MDTuple>(Array), "invalid macro list", &N, Array);
1559 for (Metadata *Op : N.getElements()->operands()) {
1560 CheckDI(Op && isa<DIMacroNode>(Op), "invalid macro ref", &N, Op);
1561 }
1562 }
1563}
1564
1565void Verifier::visitDIModule(const DIModule &N) {
1566 CheckDI(N.getTag() == dwarf::DW_TAG_module, "invalid tag", &N);
1567 CheckDI(!N.getName().empty(), "anonymous module", &N);
1568}
1569
1570void Verifier::visitDITemplateParameter(const DITemplateParameter &N) {
1571 CheckDI(isType(N.getRawType()), "invalid type ref", &N, N.getRawType());
1572}
1573
1574void Verifier::visitDITemplateTypeParameter(const DITemplateTypeParameter &N) {
1575 visitDITemplateParameter(N);
1576
1577 CheckDI(N.getTag() == dwarf::DW_TAG_template_type_parameter, "invalid tag",
1578 &N);
1579}
1580
1581void Verifier::visitDITemplateValueParameter(
1582 const DITemplateValueParameter &N) {
1583 visitDITemplateParameter(N);
1584
1585 CheckDI(N.getTag() == dwarf::DW_TAG_template_value_parameter ||
1586 N.getTag() == dwarf::DW_TAG_GNU_template_template_param ||
1587 N.getTag() == dwarf::DW_TAG_GNU_template_parameter_pack,
1588 "invalid tag", &N);
1589}
1590
1591void Verifier::visitDIVariable(const DIVariable &N) {
1592 if (auto *S = N.getRawScope())
1593 CheckDI(isa<DIScope>(S), "invalid scope", &N, S);
1594 if (auto *F = N.getRawFile())
1595 CheckDI(isa<DIFile>(F), "invalid file", &N, F);
1596}
1597
1598void Verifier::visitDIGlobalVariable(const DIGlobalVariable &N) {
1599 // Checks common to all variables.
1600 visitDIVariable(N);
1601
1602 CheckDI(N.getTag() == dwarf::DW_TAG_variable, "invalid tag", &N);
1603 CheckDI(isType(N.getRawType()), "invalid type ref", &N, N.getRawType());
1604 // Check only if the global variable is not an extern
1605 if (N.isDefinition())
1606 CheckDI(N.getType(), "missing global variable type", &N);
1607 if (auto *Member = N.getRawStaticDataMemberDeclaration()) {
1608 CheckDI(isa<DIDerivedType>(Member),
1609 "invalid static data member declaration", &N, Member);
1610 }
1611}
1612
1613void Verifier::visitDILocalVariable(const DILocalVariable &N) {
1614 // Checks common to all variables.
1615 visitDIVariable(N);
1616
1617 CheckDI(isType(N.getRawType()), "invalid type ref", &N, N.getRawType());
1618 CheckDI(N.getTag() == dwarf::DW_TAG_variable, "invalid tag", &N);
1619 CheckDI(N.getRawScope() && isa<DILocalScope>(N.getRawScope()),
1620 "local variable requires a valid scope", &N, N.getRawScope());
1621 if (auto Ty = N.getType())
1622 CheckDI(!isa<DISubroutineType>(Ty), "invalid type", &N, N.getType());
1623}
1624
1625void Verifier::visitDIAssignID(const DIAssignID &N) {
1626 CheckDI(!N.getNumOperands(), "DIAssignID has no arguments", &N);
1627 CheckDI(N.isDistinct(), "DIAssignID must be distinct", &N);
1628}
1629
1630void Verifier::visitDILabel(const DILabel &N) {
1631 if (auto *S = N.getRawScope())
1632 CheckDI(isa<DIScope>(S), "invalid scope", &N, S);
1633 if (auto *F = N.getRawFile())
1634 CheckDI(isa<DIFile>(F), "invalid file", &N, F);
1635
1636 CheckDI(N.getTag() == dwarf::DW_TAG_label, "invalid tag", &N);
1637 CheckDI(N.getRawScope() && isa<DILocalScope>(N.getRawScope()),
1638 "label requires a valid scope", &N, N.getRawScope());
1639}
1640
1641void Verifier::visitDIExpression(const DIExpression &N) {
1642 CheckDI(N.isValid(), "invalid expression", &N);
1643}
1644
1645void Verifier::visitDIGlobalVariableExpression(
1646 const DIGlobalVariableExpression &GVE) {
1647 CheckDI(GVE.getVariable(), "missing variable");
1648 if (auto *Var = GVE.getVariable())
1649 visitDIGlobalVariable(*Var);
1650 if (auto *Expr = GVE.getExpression()) {
1651 visitDIExpression(*Expr);
1652 if (auto Fragment = Expr->getFragmentInfo())
1653 verifyFragmentExpression(*GVE.getVariable(), *Fragment, &GVE);
1654 }
1655}
1656
1657void Verifier::visitDIObjCProperty(const DIObjCProperty &N) {
1658 CheckDI(N.getTag() == dwarf::DW_TAG_APPLE_property, "invalid tag", &N);
1659 if (auto *T = N.getRawType())
1660 CheckDI(isType(T), "invalid type ref", &N, T);
1661 if (auto *F = N.getRawFile())
1662 CheckDI(isa<DIFile>(F), "invalid file", &N, F);
1663}
1664
1665void Verifier::visitDIImportedEntity(const DIImportedEntity &N) {
1666 CheckDI(N.getTag() == dwarf::DW_TAG_imported_module ||
1667 N.getTag() == dwarf::DW_TAG_imported_declaration,
1668 "invalid tag", &N);
1669 if (auto *S = N.getRawScope())
1670 CheckDI(isa<DIScope>(S), "invalid scope for imported entity", &N, S);
1671 CheckDI(isDINode(N.getRawEntity()), "invalid imported entity", &N,
1672 N.getRawEntity());
1673}
1674
1675void Verifier::visitComdat(const Comdat &C) {
1676 // In COFF the Module is invalid if the GlobalValue has private linkage.
1677 // Entities with private linkage don't have entries in the symbol table.
1678 if (TT.isOSBinFormatCOFF())
1679 if (const GlobalValue *GV = M.getNamedValue(C.getName()))
1680 Check(!GV->hasPrivateLinkage(), "comdat global value has private linkage",
1681 GV);
1682}
1683
1684void Verifier::visitModuleIdents() {
1685 const NamedMDNode *Idents = M.getNamedMetadata("llvm.ident");
1686 if (!Idents)
1687 return;
1688
1689 // llvm.ident takes a list of metadata entry. Each entry has only one string.
1690 // Scan each llvm.ident entry and make sure that this requirement is met.
1691 for (const MDNode *N : Idents->operands()) {
1692 Check(N->getNumOperands() == 1,
1693 "incorrect number of operands in llvm.ident metadata", N);
1694 Check(dyn_cast_or_null<MDString>(N->getOperand(0)),
1695 ("invalid value for llvm.ident metadata entry operand"
1696 "(the operand should be a string)"),
1697 N->getOperand(0));
1698 }
1699}
1700
1701void Verifier::visitModuleCommandLines() {
1702 const NamedMDNode *CommandLines = M.getNamedMetadata("llvm.commandline");
1703 if (!CommandLines)
1704 return;
1705
1706 // llvm.commandline takes a list of metadata entry. Each entry has only one
1707 // string. Scan each llvm.commandline entry and make sure that this
1708 // requirement is met.
1709 for (const MDNode *N : CommandLines->operands()) {
1710 Check(N->getNumOperands() == 1,
1711 "incorrect number of operands in llvm.commandline metadata", N);
1712 Check(dyn_cast_or_null<MDString>(N->getOperand(0)),
1713 ("invalid value for llvm.commandline metadata entry operand"
1714 "(the operand should be a string)"),
1715 N->getOperand(0));
1716 }
1717}
1718
1719void Verifier::visitModuleFlags() {
1720 const NamedMDNode *Flags = M.getModuleFlagsMetadata();
1721 if (!Flags) return;
1722
1723 // Scan each flag, and track the flags and requirements.
1725 SmallVector<const MDNode*, 16> Requirements;
1726 uint64_t PAuthABIPlatform = -1;
1727 uint64_t PAuthABIVersion = -1;
1728 for (const MDNode *MDN : Flags->operands()) {
1729 visitModuleFlag(MDN, SeenIDs, Requirements);
1730 if (MDN->getNumOperands() != 3)
1731 continue;
1732 if (const auto *FlagName = dyn_cast_or_null<MDString>(MDN->getOperand(1))) {
1733 if (FlagName->getString() == "aarch64-elf-pauthabi-platform") {
1734 if (const auto *PAP =
1735 mdconst::dyn_extract_or_null<ConstantInt>(MDN->getOperand(2)))
1736 PAuthABIPlatform = PAP->getZExtValue();
1737 } else if (FlagName->getString() == "aarch64-elf-pauthabi-version") {
1738 if (const auto *PAV =
1739 mdconst::dyn_extract_or_null<ConstantInt>(MDN->getOperand(2)))
1740 PAuthABIVersion = PAV->getZExtValue();
1741 }
1742 }
1743 }
1744
1745 if ((PAuthABIPlatform == uint64_t(-1)) != (PAuthABIVersion == uint64_t(-1)))
1746 CheckFailed("either both or no 'aarch64-elf-pauthabi-platform' and "
1747 "'aarch64-elf-pauthabi-version' module flags must be present");
1748
1749 // Validate that the requirements in the module are valid.
1750 for (const MDNode *Requirement : Requirements) {
1751 const MDString *Flag = cast<MDString>(Requirement->getOperand(0));
1752 const Metadata *ReqValue = Requirement->getOperand(1);
1753
1754 const MDNode *Op = SeenIDs.lookup(Flag);
1755 if (!Op) {
1756 CheckFailed("invalid requirement on flag, flag is not present in module",
1757 Flag);
1758 continue;
1759 }
1760
1761 if (Op->getOperand(2) != ReqValue) {
1762 CheckFailed(("invalid requirement on flag, "
1763 "flag does not have the required value"),
1764 Flag);
1765 continue;
1766 }
1767 }
1768}
1769
1770void
1771Verifier::visitModuleFlag(const MDNode *Op,
1773 SmallVectorImpl<const MDNode *> &Requirements) {
1774 // Each module flag should have three arguments, the merge behavior (a
1775 // constant int), the flag ID (an MDString), and the value.
1776 Check(Op->getNumOperands() == 3,
1777 "incorrect number of operands in module flag", Op);
1779 if (!Module::isValidModFlagBehavior(Op->getOperand(0), MFB)) {
1780 Check(mdconst::dyn_extract_or_null<ConstantInt>(Op->getOperand(0)),
1781 "invalid behavior operand in module flag (expected constant integer)",
1782 Op->getOperand(0));
1783 Check(false,
1784 "invalid behavior operand in module flag (unexpected constant)",
1785 Op->getOperand(0));
1786 }
1787 MDString *ID = dyn_cast_or_null<MDString>(Op->getOperand(1));
1788 Check(ID, "invalid ID operand in module flag (expected metadata string)",
1789 Op->getOperand(1));
1790
1791 // Check the values for behaviors with additional requirements.
1792 switch (MFB) {
1793 case Module::Error:
1794 case Module::Warning:
1795 case Module::Override:
1796 // These behavior types accept any value.
1797 break;
1798
1799 case Module::Min: {
1800 auto *V = mdconst::dyn_extract_or_null<ConstantInt>(Op->getOperand(2));
1801 Check(V && V->getValue().isNonNegative(),
1802 "invalid value for 'min' module flag (expected constant non-negative "
1803 "integer)",
1804 Op->getOperand(2));
1805 break;
1806 }
1807
1808 case Module::Max: {
1809 Check(mdconst::dyn_extract_or_null<ConstantInt>(Op->getOperand(2)),
1810 "invalid value for 'max' module flag (expected constant integer)",
1811 Op->getOperand(2));
1812 break;
1813 }
1814
1815 case Module::Require: {
1816 // The value should itself be an MDNode with two operands, a flag ID (an
1817 // MDString), and a value.
1818 MDNode *Value = dyn_cast<MDNode>(Op->getOperand(2));
1819 Check(Value && Value->getNumOperands() == 2,
1820 "invalid value for 'require' module flag (expected metadata pair)",
1821 Op->getOperand(2));
1822 Check(isa<MDString>(Value->getOperand(0)),
1823 ("invalid value for 'require' module flag "
1824 "(first value operand should be a string)"),
1825 Value->getOperand(0));
1826
1827 // Append it to the list of requirements, to check once all module flags are
1828 // scanned.
1829 Requirements.push_back(Value);
1830 break;
1831 }
1832
1833 case Module::Append:
1834 case Module::AppendUnique: {
1835 // These behavior types require the operand be an MDNode.
1836 Check(isa<MDNode>(Op->getOperand(2)),
1837 "invalid value for 'append'-type module flag "
1838 "(expected a metadata node)",
1839 Op->getOperand(2));
1840 break;
1841 }
1842 }
1843
1844 // Unless this is a "requires" flag, check the ID is unique.
1845 if (MFB != Module::Require) {
1846 bool Inserted = SeenIDs.insert(std::make_pair(ID, Op)).second;
1847 Check(Inserted,
1848 "module flag identifiers must be unique (or of 'require' type)", ID);
1849 }
1850
1851 if (ID->getString() == "wchar_size") {
1853 = mdconst::dyn_extract_or_null<ConstantInt>(Op->getOperand(2));
1854 Check(Value, "wchar_size metadata requires constant integer argument");
1855 }
1856
1857 if (ID->getString() == "Linker Options") {
1858 // If the llvm.linker.options named metadata exists, we assume that the
1859 // bitcode reader has upgraded the module flag. Otherwise the flag might
1860 // have been created by a client directly.
1861 Check(M.getNamedMetadata("llvm.linker.options"),
1862 "'Linker Options' named metadata no longer supported");
1863 }
1864
1865 if (ID->getString() == "SemanticInterposition") {
1867 mdconst::dyn_extract_or_null<ConstantInt>(Op->getOperand(2));
1868 Check(Value,
1869 "SemanticInterposition metadata requires constant integer argument");
1870 }
1871
1872 if (ID->getString() == "CG Profile") {
1873 for (const MDOperand &MDO : cast<MDNode>(Op->getOperand(2))->operands())
1874 visitModuleFlagCGProfileEntry(MDO);
1875 }
1876}
1877
1878void Verifier::visitModuleFlagCGProfileEntry(const MDOperand &MDO) {
1879 auto CheckFunction = [&](const MDOperand &FuncMDO) {
1880 if (!FuncMDO)
1881 return;
1882 auto F = dyn_cast<ValueAsMetadata>(FuncMDO);
1883 Check(F && isa<Function>(F->getValue()->stripPointerCasts()),
1884 "expected a Function or null", FuncMDO);
1885 };
1886 auto Node = dyn_cast_or_null<MDNode>(MDO);
1887 Check(Node && Node->getNumOperands() == 3, "expected a MDNode triple", MDO);
1888 CheckFunction(Node->getOperand(0));
1889 CheckFunction(Node->getOperand(1));
1890 auto Count = dyn_cast_or_null<ConstantAsMetadata>(Node->getOperand(2));
1891 Check(Count && Count->getType()->isIntegerTy(),
1892 "expected an integer constant", Node->getOperand(2));
1893}
1894
1895void Verifier::verifyAttributeTypes(AttributeSet Attrs, const Value *V) {
1896 for (Attribute A : Attrs) {
1897
1898 if (A.isStringAttribute()) {
1899#define GET_ATTR_NAMES
1900#define ATTRIBUTE_ENUM(ENUM_NAME, DISPLAY_NAME)
1901#define ATTRIBUTE_STRBOOL(ENUM_NAME, DISPLAY_NAME) \
1902 if (A.getKindAsString() == #DISPLAY_NAME) { \
1903 auto V = A.getValueAsString(); \
1904 if (!(V.empty() || V == "true" || V == "false")) \
1905 CheckFailed("invalid value for '" #DISPLAY_NAME "' attribute: " + V + \
1906 ""); \
1907 }
1908
1909#include "llvm/IR/Attributes.inc"
1910 continue;
1911 }
1912
1913 if (A.isIntAttribute() != Attribute::isIntAttrKind(A.getKindAsEnum())) {
1914 CheckFailed("Attribute '" + A.getAsString() + "' should have an Argument",
1915 V);
1916 return;
1917 }
1918 }
1919}
1920
1921// VerifyParameterAttrs - Check the given attributes for an argument or return
1922// value of the specified type. The value V is printed in error messages.
1923void Verifier::verifyParameterAttrs(AttributeSet Attrs, Type *Ty,
1924 const Value *V) {
1925 if (!Attrs.hasAttributes())
1926 return;
1927
1928 verifyAttributeTypes(Attrs, V);
1929
1930 for (Attribute Attr : Attrs)
1931 Check(Attr.isStringAttribute() ||
1932 Attribute::canUseAsParamAttr(Attr.getKindAsEnum()),
1933 "Attribute '" + Attr.getAsString() + "' does not apply to parameters",
1934 V);
1935
1936 if (Attrs.hasAttribute(Attribute::ImmArg)) {
1937 Check(Attrs.getNumAttributes() == 1,
1938 "Attribute 'immarg' is incompatible with other attributes", V);
1939 }
1940
1941 // Check for mutually incompatible attributes. Only inreg is compatible with
1942 // sret.
1943 unsigned AttrCount = 0;
1944 AttrCount += Attrs.hasAttribute(Attribute::ByVal);
1945 AttrCount += Attrs.hasAttribute(Attribute::InAlloca);
1946 AttrCount += Attrs.hasAttribute(Attribute::Preallocated);
1947 AttrCount += Attrs.hasAttribute(Attribute::StructRet) ||
1948 Attrs.hasAttribute(Attribute::InReg);
1949 AttrCount += Attrs.hasAttribute(Attribute::Nest);
1950 AttrCount += Attrs.hasAttribute(Attribute::ByRef);
1951 Check(AttrCount <= 1,
1952 "Attributes 'byval', 'inalloca', 'preallocated', 'inreg', 'nest', "
1953 "'byref', and 'sret' are incompatible!",
1954 V);
1955
1956 Check(!(Attrs.hasAttribute(Attribute::InAlloca) &&
1957 Attrs.hasAttribute(Attribute::ReadOnly)),
1958 "Attributes "
1959 "'inalloca and readonly' are incompatible!",
1960 V);
1961
1962 Check(!(Attrs.hasAttribute(Attribute::StructRet) &&
1963 Attrs.hasAttribute(Attribute::Returned)),
1964 "Attributes "
1965 "'sret and returned' are incompatible!",
1966 V);
1967
1968 Check(!(Attrs.hasAttribute(Attribute::ZExt) &&
1969 Attrs.hasAttribute(Attribute::SExt)),
1970 "Attributes "
1971 "'zeroext and signext' are incompatible!",
1972 V);
1973
1974 Check(!(Attrs.hasAttribute(Attribute::ReadNone) &&
1975 Attrs.hasAttribute(Attribute::ReadOnly)),
1976 "Attributes "
1977 "'readnone and readonly' are incompatible!",
1978 V);
1979
1980 Check(!(Attrs.hasAttribute(Attribute::ReadNone) &&
1981 Attrs.hasAttribute(Attribute::WriteOnly)),
1982 "Attributes "
1983 "'readnone and writeonly' are incompatible!",
1984 V);
1985
1986 Check(!(Attrs.hasAttribute(Attribute::ReadOnly) &&
1987 Attrs.hasAttribute(Attribute::WriteOnly)),
1988 "Attributes "
1989 "'readonly and writeonly' are incompatible!",
1990 V);
1991
1992 Check(!(Attrs.hasAttribute(Attribute::NoInline) &&
1993 Attrs.hasAttribute(Attribute::AlwaysInline)),
1994 "Attributes "
1995 "'noinline and alwaysinline' are incompatible!",
1996 V);
1997
1998 Check(!(Attrs.hasAttribute(Attribute::Writable) &&
1999 Attrs.hasAttribute(Attribute::ReadNone)),
2000 "Attributes writable and readnone are incompatible!", V);
2001
2002 Check(!(Attrs.hasAttribute(Attribute::Writable) &&
2003 Attrs.hasAttribute(Attribute::ReadOnly)),
2004 "Attributes writable and readonly are incompatible!", V);
2005
2006 AttributeMask IncompatibleAttrs = AttributeFuncs::typeIncompatible(Ty);
2007 for (Attribute Attr : Attrs) {
2008 if (!Attr.isStringAttribute() &&
2009 IncompatibleAttrs.contains(Attr.getKindAsEnum())) {
2010 CheckFailed("Attribute '" + Attr.getAsString() +
2011 "' applied to incompatible type!", V);
2012 return;
2013 }
2014 }
2015
2016 if (isa<PointerType>(Ty)) {
2017 if (Attrs.hasAttribute(Attribute::Alignment)) {
2018 Align AttrAlign = Attrs.getAlignment().valueOrOne();
2019 Check(AttrAlign.value() <= Value::MaximumAlignment,
2020 "huge alignment values are unsupported", V);
2021 }
2022 if (Attrs.hasAttribute(Attribute::ByVal)) {
2023 SmallPtrSet<Type *, 4> Visited;
2024 Check(Attrs.getByValType()->isSized(&Visited),
2025 "Attribute 'byval' does not support unsized types!", V);
2026 Check(DL.getTypeAllocSize(Attrs.getByValType()).getKnownMinValue() <
2027 (1ULL << 32),
2028 "huge 'byval' arguments are unsupported", V);
2029 }
2030 if (Attrs.hasAttribute(Attribute::ByRef)) {
2031 SmallPtrSet<Type *, 4> Visited;
2032 Check(Attrs.getByRefType()->isSized(&Visited),
2033 "Attribute 'byref' does not support unsized types!", V);
2034 Check(DL.getTypeAllocSize(Attrs.getByRefType()).getKnownMinValue() <
2035 (1ULL << 32),
2036 "huge 'byref' arguments are unsupported", V);
2037 }
2038 if (Attrs.hasAttribute(Attribute::InAlloca)) {
2039 SmallPtrSet<Type *, 4> Visited;
2040 Check(Attrs.getInAllocaType()->isSized(&Visited),
2041 "Attribute 'inalloca' does not support unsized types!", V);
2042 Check(DL.getTypeAllocSize(Attrs.getInAllocaType()).getKnownMinValue() <
2043 (1ULL << 32),
2044 "huge 'inalloca' arguments are unsupported", V);
2045 }
2046 if (Attrs.hasAttribute(Attribute::Preallocated)) {
2047 SmallPtrSet<Type *, 4> Visited;
2048 Check(Attrs.getPreallocatedType()->isSized(&Visited),
2049 "Attribute 'preallocated' does not support unsized types!", V);
2050 Check(
2051 DL.getTypeAllocSize(Attrs.getPreallocatedType()).getKnownMinValue() <
2052 (1ULL << 32),
2053 "huge 'preallocated' arguments are unsupported", V);
2054 }
2055 }
2056
2057 if (Attrs.hasAttribute(Attribute::Initializes)) {
2058 auto Inits = Attrs.getAttribute(Attribute::Initializes).getInitializes();
2059 Check(!Inits.empty(), "Attribute 'initializes' does not support empty list",
2060 V);
2062 "Attribute 'initializes' does not support unordered ranges", V);
2063 }
2064
2065 if (Attrs.hasAttribute(Attribute::NoFPClass)) {
2066 uint64_t Val = Attrs.getAttribute(Attribute::NoFPClass).getValueAsInt();
2067 Check(Val != 0, "Attribute 'nofpclass' must have at least one test bit set",
2068 V);
2069 Check((Val & ~static_cast<unsigned>(fcAllFlags)) == 0,
2070 "Invalid value for 'nofpclass' test mask", V);
2071 }
2072 if (Attrs.hasAttribute(Attribute::Range)) {
2073 const ConstantRange &CR =
2074 Attrs.getAttribute(Attribute::Range).getValueAsConstantRange();
2076 "Range bit width must match type bit width!", V);
2077 }
2078}
2079
2080void Verifier::checkUnsignedBaseTenFuncAttr(AttributeList Attrs, StringRef Attr,
2081 const Value *V) {
2082 if (Attrs.hasFnAttr(Attr)) {
2083 StringRef S = Attrs.getFnAttr(Attr).getValueAsString();
2084 unsigned N;
2085 if (S.getAsInteger(10, N))
2086 CheckFailed("\"" + Attr + "\" takes an unsigned integer: " + S, V);
2087 }
2088}
2089
2090// Check parameter attributes against a function type.
2091// The value V is printed in error messages.
2092void Verifier::verifyFunctionAttrs(FunctionType *FT, AttributeList Attrs,
2093 const Value *V, bool IsIntrinsic,
2094 bool IsInlineAsm) {
2095 if (Attrs.isEmpty())
2096 return;
2097
2098 if (AttributeListsVisited.insert(Attrs.getRawPointer()).second) {
2099 Check(Attrs.hasParentContext(Context),
2100 "Attribute list does not match Module context!", &Attrs, V);
2101 for (const auto &AttrSet : Attrs) {
2102 Check(!AttrSet.hasAttributes() || AttrSet.hasParentContext(Context),
2103 "Attribute set does not match Module context!", &AttrSet, V);
2104 for (const auto &A : AttrSet) {
2105 Check(A.hasParentContext(Context),
2106 "Attribute does not match Module context!", &A, V);
2107 }
2108 }
2109 }
2110
2111 bool SawNest = false;
2112 bool SawReturned = false;
2113 bool SawSRet = false;
2114 bool SawSwiftSelf = false;
2115 bool SawSwiftAsync = false;
2116 bool SawSwiftError = false;
2117
2118 // Verify return value attributes.
2119 AttributeSet RetAttrs = Attrs.getRetAttrs();
2120 for (Attribute RetAttr : RetAttrs)
2121 Check(RetAttr.isStringAttribute() ||
2122 Attribute::canUseAsRetAttr(RetAttr.getKindAsEnum()),
2123 "Attribute '" + RetAttr.getAsString() +
2124 "' does not apply to function return values",
2125 V);
2126
2127 unsigned MaxParameterWidth = 0;
2128 auto GetMaxParameterWidth = [&MaxParameterWidth](Type *Ty) {
2129 if (Ty->isVectorTy()) {
2130 if (auto *VT = dyn_cast<FixedVectorType>(Ty)) {
2131 unsigned Size = VT->getPrimitiveSizeInBits().getFixedValue();
2132 if (Size > MaxParameterWidth)
2133 MaxParameterWidth = Size;
2134 }
2135 }
2136 };
2137 GetMaxParameterWidth(FT->getReturnType());
2138 verifyParameterAttrs(RetAttrs, FT->getReturnType(), V);
2139
2140 // Verify parameter attributes.
2141 for (unsigned i = 0, e = FT->getNumParams(); i != e; ++i) {
2142 Type *Ty = FT->getParamType(i);
2143 AttributeSet ArgAttrs = Attrs.getParamAttrs(i);
2144
2145 if (!IsIntrinsic) {
2146 Check(!ArgAttrs.hasAttribute(Attribute::ImmArg),
2147 "immarg attribute only applies to intrinsics", V);
2148 if (!IsInlineAsm)
2149 Check(!ArgAttrs.hasAttribute(Attribute::ElementType),
2150 "Attribute 'elementtype' can only be applied to intrinsics"
2151 " and inline asm.",
2152 V);
2153 }
2154
2155 verifyParameterAttrs(ArgAttrs, Ty, V);
2156 GetMaxParameterWidth(Ty);
2157
2158 if (ArgAttrs.hasAttribute(Attribute::Nest)) {
2159 Check(!SawNest, "More than one parameter has attribute nest!", V);
2160 SawNest = true;
2161 }
2162
2163 if (ArgAttrs.hasAttribute(Attribute::Returned)) {
2164 Check(!SawReturned, "More than one parameter has attribute returned!", V);
2165 Check(Ty->canLosslesslyBitCastTo(FT->getReturnType()),
2166 "Incompatible argument and return types for 'returned' attribute",
2167 V);
2168 SawReturned = true;
2169 }
2170
2171 if (ArgAttrs.hasAttribute(Attribute::StructRet)) {
2172 Check(!SawSRet, "Cannot have multiple 'sret' parameters!", V);
2173 Check(i == 0 || i == 1,
2174 "Attribute 'sret' is not on first or second parameter!", V);
2175 SawSRet = true;
2176 }
2177
2178 if (ArgAttrs.hasAttribute(Attribute::SwiftSelf)) {
2179 Check(!SawSwiftSelf, "Cannot have multiple 'swiftself' parameters!", V);
2180 SawSwiftSelf = true;
2181 }
2182
2183 if (ArgAttrs.hasAttribute(Attribute::SwiftAsync)) {
2184 Check(!SawSwiftAsync, "Cannot have multiple 'swiftasync' parameters!", V);
2185 SawSwiftAsync = true;
2186 }
2187
2188 if (ArgAttrs.hasAttribute(Attribute::SwiftError)) {
2189 Check(!SawSwiftError, "Cannot have multiple 'swifterror' parameters!", V);
2190 SawSwiftError = true;
2191 }
2192
2193 if (ArgAttrs.hasAttribute(Attribute::InAlloca)) {
2194 Check(i == FT->getNumParams() - 1,
2195 "inalloca isn't on the last parameter!", V);
2196 }
2197 }
2198
2199 if (!Attrs.hasFnAttrs())
2200 return;
2201
2202 verifyAttributeTypes(Attrs.getFnAttrs(), V);
2203 for (Attribute FnAttr : Attrs.getFnAttrs())
2204 Check(FnAttr.isStringAttribute() ||
2205 Attribute::canUseAsFnAttr(FnAttr.getKindAsEnum()),
2206 "Attribute '" + FnAttr.getAsString() +
2207 "' does not apply to functions!",
2208 V);
2209
2210 Check(!(Attrs.hasFnAttr(Attribute::NoInline) &&
2211 Attrs.hasFnAttr(Attribute::AlwaysInline)),
2212 "Attributes 'noinline and alwaysinline' are incompatible!", V);
2213
2214 if (Attrs.hasFnAttr(Attribute::OptimizeNone)) {
2215 Check(Attrs.hasFnAttr(Attribute::NoInline),
2216 "Attribute 'optnone' requires 'noinline'!", V);
2217
2218 Check(!Attrs.hasFnAttr(Attribute::OptimizeForSize),
2219 "Attributes 'optsize and optnone' are incompatible!", V);
2220
2221 Check(!Attrs.hasFnAttr(Attribute::MinSize),
2222 "Attributes 'minsize and optnone' are incompatible!", V);
2223
2224 Check(!Attrs.hasFnAttr(Attribute::OptimizeForDebugging),
2225 "Attributes 'optdebug and optnone' are incompatible!", V);
2226 }
2227
2228 if (Attrs.hasFnAttr(Attribute::OptimizeForDebugging)) {
2229 Check(!Attrs.hasFnAttr(Attribute::OptimizeForSize),
2230 "Attributes 'optsize and optdebug' are incompatible!", V);
2231
2232 Check(!Attrs.hasFnAttr(Attribute::MinSize),
2233 "Attributes 'minsize and optdebug' are incompatible!", V);
2234 }
2235
2236 Check(!Attrs.hasAttrSomewhere(Attribute::Writable) ||
2237 isModSet(Attrs.getMemoryEffects().getModRef(IRMemLocation::ArgMem)),
2238 "Attribute writable and memory without argmem: write are incompatible!",
2239 V);
2240
2241 if (Attrs.hasFnAttr("aarch64_pstate_sm_enabled")) {
2242 Check(!Attrs.hasFnAttr("aarch64_pstate_sm_compatible"),
2243 "Attributes 'aarch64_pstate_sm_enabled and "
2244 "aarch64_pstate_sm_compatible' are incompatible!",
2245 V);
2246 }
2247
2248 Check((Attrs.hasFnAttr("aarch64_new_za") + Attrs.hasFnAttr("aarch64_in_za") +
2249 Attrs.hasFnAttr("aarch64_inout_za") +
2250 Attrs.hasFnAttr("aarch64_out_za") +
2251 Attrs.hasFnAttr("aarch64_preserves_za")) <= 1,
2252 "Attributes 'aarch64_new_za', 'aarch64_in_za', 'aarch64_out_za', "
2253 "'aarch64_inout_za' and 'aarch64_preserves_za' are mutually exclusive",
2254 V);
2255
2256 Check(
2257 (Attrs.hasFnAttr("aarch64_new_zt0") + Attrs.hasFnAttr("aarch64_in_zt0") +
2258 Attrs.hasFnAttr("aarch64_inout_zt0") +
2259 Attrs.hasFnAttr("aarch64_out_zt0") +
2260 Attrs.hasFnAttr("aarch64_preserves_zt0")) <= 1,
2261 "Attributes 'aarch64_new_zt0', 'aarch64_in_zt0', 'aarch64_out_zt0', "
2262 "'aarch64_inout_zt0' and 'aarch64_preserves_zt0' are mutually exclusive",
2263 V);
2264
2265 if (Attrs.hasFnAttr(Attribute::JumpTable)) {
2266 const GlobalValue *GV = cast<GlobalValue>(V);
2268 "Attribute 'jumptable' requires 'unnamed_addr'", V);
2269 }
2270
2271 if (auto Args = Attrs.getFnAttrs().getAllocSizeArgs()) {
2272 auto CheckParam = [&](StringRef Name, unsigned ParamNo) {
2273 if (ParamNo >= FT->getNumParams()) {
2274 CheckFailed("'allocsize' " + Name + " argument is out of bounds", V);
2275 return false;
2276 }
2277
2278 if (!FT->getParamType(ParamNo)->isIntegerTy()) {
2279 CheckFailed("'allocsize' " + Name +
2280 " argument must refer to an integer parameter",
2281 V);
2282 return false;
2283 }
2284
2285 return true;
2286 };
2287
2288 if (!CheckParam("element size", Args->first))
2289 return;
2290
2291 if (Args->second && !CheckParam("number of elements", *Args->second))
2292 return;
2293 }
2294
2295 if (Attrs.hasFnAttr(Attribute::AllocKind)) {
2296 AllocFnKind K = Attrs.getAllocKind();
2299 if (!is_contained(
2301 Type))
2302 CheckFailed(
2303 "'allockind()' requires exactly one of alloc, realloc, and free");
2304 if ((Type == AllocFnKind::Free) &&
2307 CheckFailed("'allockind(\"free\")' doesn't allow uninitialized, zeroed, "
2308 "or aligned modifiers.");
2310 if ((K & ZeroedUninit) == ZeroedUninit)
2311 CheckFailed("'allockind()' can't be both zeroed and uninitialized");
2312 }
2313
2314 if (Attrs.hasFnAttr(Attribute::VScaleRange)) {
2315 unsigned VScaleMin = Attrs.getFnAttrs().getVScaleRangeMin();
2316 if (VScaleMin == 0)
2317 CheckFailed("'vscale_range' minimum must be greater than 0", V);
2318 else if (!isPowerOf2_32(VScaleMin))
2319 CheckFailed("'vscale_range' minimum must be power-of-two value", V);
2320 std::optional<unsigned> VScaleMax = Attrs.getFnAttrs().getVScaleRangeMax();
2321 if (VScaleMax && VScaleMin > VScaleMax)
2322 CheckFailed("'vscale_range' minimum cannot be greater than maximum", V);
2323 else if (VScaleMax && !isPowerOf2_32(*VScaleMax))
2324 CheckFailed("'vscale_range' maximum must be power-of-two value", V);
2325 }
2326
2327 if (Attrs.hasFnAttr("frame-pointer")) {
2328 StringRef FP = Attrs.getFnAttr("frame-pointer").getValueAsString();
2329 if (FP != "all" && FP != "non-leaf" && FP != "none" && FP != "reserved")
2330 CheckFailed("invalid value for 'frame-pointer' attribute: " + FP, V);
2331 }
2332
2333 // Check EVEX512 feature.
2334 if (MaxParameterWidth >= 512 && Attrs.hasFnAttr("target-features") &&
2335 TT.isX86()) {
2336 StringRef TF = Attrs.getFnAttr("target-features").getValueAsString();
2337 Check(!TF.contains("+avx512f") || !TF.contains("-evex512"),
2338 "512-bit vector arguments require 'evex512' for AVX512", V);
2339 }
2340
2341 checkUnsignedBaseTenFuncAttr(Attrs, "patchable-function-prefix", V);
2342 checkUnsignedBaseTenFuncAttr(Attrs, "patchable-function-entry", V);
2343 checkUnsignedBaseTenFuncAttr(Attrs, "warn-stack-size", V);
2344
2345 if (auto A = Attrs.getFnAttr("sign-return-address"); A.isValid()) {
2346 StringRef S = A.getValueAsString();
2347 if (S != "none" && S != "all" && S != "non-leaf")
2348 CheckFailed("invalid value for 'sign-return-address' attribute: " + S, V);
2349 }
2350
2351 if (auto A = Attrs.getFnAttr("sign-return-address-key"); A.isValid()) {
2352 StringRef S = A.getValueAsString();
2353 if (S != "a_key" && S != "b_key")
2354 CheckFailed("invalid value for 'sign-return-address-key' attribute: " + S,
2355 V);
2356 if (auto AA = Attrs.getFnAttr("sign-return-address"); !AA.isValid()) {
2357 CheckFailed(
2358 "'sign-return-address-key' present without `sign-return-address`");
2359 }
2360 }
2361
2362 if (auto A = Attrs.getFnAttr("branch-target-enforcement"); A.isValid()) {
2363 StringRef S = A.getValueAsString();
2364 if (S != "" && S != "true" && S != "false")
2365 CheckFailed(
2366 "invalid value for 'branch-target-enforcement' attribute: " + S, V);
2367 }
2368
2369 if (auto A = Attrs.getFnAttr("branch-protection-pauth-lr"); A.isValid()) {
2370 StringRef S = A.getValueAsString();
2371 if (S != "" && S != "true" && S != "false")
2372 CheckFailed(
2373 "invalid value for 'branch-protection-pauth-lr' attribute: " + S, V);
2374 }
2375
2376 if (auto A = Attrs.getFnAttr("guarded-control-stack"); A.isValid()) {
2377 StringRef S = A.getValueAsString();
2378 if (S != "" && S != "true" && S != "false")
2379 CheckFailed("invalid value for 'guarded-control-stack' attribute: " + S,
2380 V);
2381 }
2382
2383 if (auto A = Attrs.getFnAttr("vector-function-abi-variant"); A.isValid()) {
2384 StringRef S = A.getValueAsString();
2385 const std::optional<VFInfo> Info = VFABI::tryDemangleForVFABI(S, FT);
2386 if (!Info)
2387 CheckFailed("invalid name for a VFABI variant: " + S, V);
2388 }
2389}
2390
2391void Verifier::verifyFunctionMetadata(
2392 ArrayRef<std::pair<unsigned, MDNode *>> MDs) {
2393 for (const auto &Pair : MDs) {
2394 if (Pair.first == LLVMContext::MD_prof) {
2395 MDNode *MD = Pair.second;
2396 Check(MD->getNumOperands() >= 2,
2397 "!prof annotations should have no less than 2 operands", MD);
2398
2399 // Check first operand.
2400 Check(MD->getOperand(0) != nullptr, "first operand should not be null",
2401 MD);
2402 Check(isa<MDString>(MD->getOperand(0)),
2403 "expected string with name of the !prof annotation", MD);
2404 MDString *MDS = cast<MDString>(MD->getOperand(0));
2405 StringRef ProfName = MDS->getString();
2406 Check(ProfName == "function_entry_count" ||
2407 ProfName == "synthetic_function_entry_count",
2408 "first operand should be 'function_entry_count'"
2409 " or 'synthetic_function_entry_count'",
2410 MD);
2411
2412 // Check second operand.
2413 Check(MD->getOperand(1) != nullptr, "second operand should not be null",
2414 MD);
2415 Check(isa<ConstantAsMetadata>(MD->getOperand(1)),
2416 "expected integer argument to function_entry_count", MD);
2417 } else if (Pair.first == LLVMContext::MD_kcfi_type) {
2418 MDNode *MD = Pair.second;
2419 Check(MD->getNumOperands() == 1,
2420 "!kcfi_type must have exactly one operand", MD);
2421 Check(MD->getOperand(0) != nullptr, "!kcfi_type operand must not be null",
2422 MD);
2423 Check(isa<ConstantAsMetadata>(MD->getOperand(0)),
2424 "expected a constant operand for !kcfi_type", MD);
2425 Constant *C = cast<ConstantAsMetadata>(MD->getOperand(0))->getValue();
2426 Check(isa<ConstantInt>(C) && isa<IntegerType>(C->getType()),
2427 "expected a constant integer operand for !kcfi_type", MD);
2428 Check(cast<ConstantInt>(C)->getBitWidth() == 32,
2429 "expected a 32-bit integer constant operand for !kcfi_type", MD);
2430 }
2431 }
2432}
2433
2434void Verifier::visitConstantExprsRecursively(const Constant *EntryC) {
2435 if (!ConstantExprVisited.insert(EntryC).second)
2436 return;
2437
2439 Stack.push_back(EntryC);
2440
2441 while (!Stack.empty()) {
2442 const Constant *C = Stack.pop_back_val();
2443
2444 // Check this constant expression.
2445 if (const auto *CE = dyn_cast<ConstantExpr>(C))
2446 visitConstantExpr(CE);
2447
2448 if (const auto *CPA = dyn_cast<ConstantPtrAuth>(C))
2449 visitConstantPtrAuth(CPA);
2450
2451 if (const auto *GV = dyn_cast<GlobalValue>(C)) {
2452 // Global Values get visited separately, but we do need to make sure
2453 // that the global value is in the correct module
2454 Check(GV->getParent() == &M, "Referencing global in another module!",
2455 EntryC, &M, GV, GV->getParent());
2456 continue;
2457 }
2458
2459 // Visit all sub-expressions.
2460 for (const Use &U : C->operands()) {
2461 const auto *OpC = dyn_cast<Constant>(U);
2462 if (!OpC)
2463 continue;
2464 if (!ConstantExprVisited.insert(OpC).second)
2465 continue;
2466 Stack.push_back(OpC);
2467 }
2468 }
2469}
2470
2471void Verifier::visitConstantExpr(const ConstantExpr *CE) {
2472 if (CE->getOpcode() == Instruction::BitCast)
2473 Check(CastInst::castIsValid(Instruction::BitCast, CE->getOperand(0),
2474 CE->getType()),
2475 "Invalid bitcast", CE);
2476}
2477
2478void Verifier::visitConstantPtrAuth(const ConstantPtrAuth *CPA) {
2479 Check(CPA->getPointer()->getType()->isPointerTy(),
2480 "signed ptrauth constant base pointer must have pointer type");
2481
2482 Check(CPA->getType() == CPA->getPointer()->getType(),
2483 "signed ptrauth constant must have same type as its base pointer");
2484
2485 Check(CPA->getKey()->getBitWidth() == 32,
2486 "signed ptrauth constant key must be i32 constant integer");
2487
2489 "signed ptrauth constant address discriminator must be a pointer");
2490
2491 Check(CPA->getDiscriminator()->getBitWidth() == 64,
2492 "signed ptrauth constant discriminator must be i64 constant integer");
2493}
2494
2495bool Verifier::verifyAttributeCount(AttributeList Attrs, unsigned Params) {
2496 // There shouldn't be more attribute sets than there are parameters plus the
2497 // function and return value.
2498 return Attrs.getNumAttrSets() <= Params + 2;
2499}
2500
2501void Verifier::verifyInlineAsmCall(const CallBase &Call) {
2502 const InlineAsm *IA = cast<InlineAsm>(Call.getCalledOperand());
2503 unsigned ArgNo = 0;
2504 unsigned LabelNo = 0;
2505 for (const InlineAsm::ConstraintInfo &CI : IA->ParseConstraints()) {
2506 if (CI.Type == InlineAsm::isLabel) {
2507 ++LabelNo;
2508 continue;
2509 }
2510
2511 // Only deal with constraints that correspond to call arguments.
2512 if (!CI.hasArg())
2513 continue;
2514
2515 if (CI.isIndirect) {
2516 const Value *Arg = Call.getArgOperand(ArgNo);
2517 Check(Arg->getType()->isPointerTy(),
2518 "Operand for indirect constraint must have pointer type", &Call);
2519
2520 Check(Call.getParamElementType(ArgNo),
2521 "Operand for indirect constraint must have elementtype attribute",
2522 &Call);
2523 } else {
2524 Check(!Call.paramHasAttr(ArgNo, Attribute::ElementType),
2525 "Elementtype attribute can only be applied for indirect "
2526 "constraints",
2527 &Call);
2528 }
2529
2530 ArgNo++;
2531 }
2532
2533 if (auto *CallBr = dyn_cast<CallBrInst>(&Call)) {
2534 Check(LabelNo == CallBr->getNumIndirectDests(),
2535 "Number of label constraints does not match number of callbr dests",
2536 &Call);
2537 } else {
2538 Check(LabelNo == 0, "Label constraints can only be used with callbr",
2539 &Call);
2540 }
2541}
2542
2543/// Verify that statepoint intrinsic is well formed.
2544void Verifier::verifyStatepoint(const CallBase &Call) {
2545 assert(Call.getCalledFunction() &&
2546 Call.getCalledFunction()->getIntrinsicID() ==
2547 Intrinsic::experimental_gc_statepoint);
2548
2549 Check(!Call.doesNotAccessMemory() && !Call.onlyReadsMemory() &&
2550 !Call.onlyAccessesArgMemory(),
2551 "gc.statepoint must read and write all memory to preserve "
2552 "reordering restrictions required by safepoint semantics",
2553 Call);
2554
2555 const int64_t NumPatchBytes =
2556 cast<ConstantInt>(Call.getArgOperand(1))->getSExtValue();
2557 assert(isInt<32>(NumPatchBytes) && "NumPatchBytesV is an i32!");
2558 Check(NumPatchBytes >= 0,
2559 "gc.statepoint number of patchable bytes must be "
2560 "positive",
2561 Call);
2562
2563 Type *TargetElemType = Call.getParamElementType(2);
2564 Check(TargetElemType,
2565 "gc.statepoint callee argument must have elementtype attribute", Call);
2566 FunctionType *TargetFuncType = dyn_cast<FunctionType>(TargetElemType);
2567 Check(TargetFuncType,
2568 "gc.statepoint callee elementtype must be function type", Call);
2569
2570 const int NumCallArgs = cast<ConstantInt>(Call.getArgOperand(3))->getZExtValue();
2571 Check(NumCallArgs >= 0,
2572 "gc.statepoint number of arguments to underlying call "
2573 "must be positive",
2574 Call);
2575 const int NumParams = (int)TargetFuncType->getNumParams();
2576 if (TargetFuncType->isVarArg()) {
2577 Check(NumCallArgs >= NumParams,
2578 "gc.statepoint mismatch in number of vararg call args", Call);
2579
2580 // TODO: Remove this limitation
2581 Check(TargetFuncType->getReturnType()->isVoidTy(),
2582 "gc.statepoint doesn't support wrapping non-void "
2583 "vararg functions yet",
2584 Call);
2585 } else
2586 Check(NumCallArgs == NumParams,
2587 "gc.statepoint mismatch in number of call args", Call);
2588
2589 const uint64_t Flags
2590 = cast<ConstantInt>(Call.getArgOperand(4))->getZExtValue();
2591 Check((Flags & ~(uint64_t)StatepointFlags::MaskAll) == 0,
2592 "unknown flag used in gc.statepoint flags argument", Call);
2593
2594 // Verify that the types of the call parameter arguments match
2595 // the type of the wrapped callee.
2596 AttributeList Attrs = Call.getAttributes();
2597 for (int i = 0; i < NumParams; i++) {
2598 Type *ParamType = TargetFuncType->getParamType(i);
2599 Type *ArgType = Call.getArgOperand(5 + i)->getType();
2600 Check(ArgType == ParamType,
2601 "gc.statepoint call argument does not match wrapped "
2602 "function type",
2603 Call);
2604
2605 if (TargetFuncType->isVarArg()) {
2606 AttributeSet ArgAttrs = Attrs.getParamAttrs(5 + i);
2607 Check(!ArgAttrs.hasAttribute(Attribute::StructRet),
2608 "Attribute 'sret' cannot be used for vararg call arguments!", Call);
2609 }
2610 }
2611
2612 const int EndCallArgsInx = 4 + NumCallArgs;
2613
2614 const Value *NumTransitionArgsV = Call.getArgOperand(EndCallArgsInx + 1);
2615 Check(isa<ConstantInt>(NumTransitionArgsV),
2616 "gc.statepoint number of transition arguments "
2617 "must be constant integer",
2618 Call);
2619 const int NumTransitionArgs =
2620 cast<ConstantInt>(NumTransitionArgsV)->getZExtValue();
2621 Check(NumTransitionArgs == 0,
2622 "gc.statepoint w/inline transition bundle is deprecated", Call);
2623 const int EndTransitionArgsInx = EndCallArgsInx + 1 + NumTransitionArgs;
2624
2625 const Value *NumDeoptArgsV = Call.getArgOperand(EndTransitionArgsInx + 1);
2626 Check(isa<ConstantInt>(NumDeoptArgsV),
2627 "gc.statepoint number of deoptimization arguments "
2628 "must be constant integer",
2629 Call);
2630 const int NumDeoptArgs = cast<ConstantInt>(NumDeoptArgsV)->getZExtValue();
2631 Check(NumDeoptArgs == 0,
2632 "gc.statepoint w/inline deopt operands is deprecated", Call);
2633
2634 const int ExpectedNumArgs = 7 + NumCallArgs;
2635 Check(ExpectedNumArgs == (int)Call.arg_size(),
2636 "gc.statepoint too many arguments", Call);
2637
2638 // Check that the only uses of this gc.statepoint are gc.result or
2639 // gc.relocate calls which are tied to this statepoint and thus part
2640 // of the same statepoint sequence
2641 for (const User *U : Call.users()) {
2642 const CallInst *UserCall = dyn_cast<const CallInst>(U);
2643 Check(UserCall, "illegal use of statepoint token", Call, U);
2644 if (!UserCall)
2645 continue;
2646 Check(isa<GCRelocateInst>(UserCall) || isa<GCResultInst>(UserCall),
2647 "gc.result or gc.relocate are the only value uses "
2648 "of a gc.statepoint",
2649 Call, U);
2650 if (isa<GCResultInst>(UserCall)) {
2651 Check(UserCall->getArgOperand(0) == &Call,
2652 "gc.result connected to wrong gc.statepoint", Call, UserCall);
2653 } else if (isa<GCRelocateInst>(Call)) {
2654 Check(UserCall->getArgOperand(0) == &Call,
2655 "gc.relocate connected to wrong gc.statepoint", Call, UserCall);
2656 }
2657 }
2658
2659 // Note: It is legal for a single derived pointer to be listed multiple
2660 // times. It's non-optimal, but it is legal. It can also happen after
2661 // insertion if we strip a bitcast away.
2662 // Note: It is really tempting to check that each base is relocated and
2663 // that a derived pointer is never reused as a base pointer. This turns
2664 // out to be problematic since optimizations run after safepoint insertion
2665 // can recognize equality properties that the insertion logic doesn't know
2666 // about. See example statepoint.ll in the verifier subdirectory
2667}
2668
2669void Verifier::verifyFrameRecoverIndices() {
2670 for (auto &Counts : FrameEscapeInfo) {
2671 Function *F = Counts.first;
2672 unsigned EscapedObjectCount = Counts.second.first;
2673 unsigned MaxRecoveredIndex = Counts.second.second;
2674 Check(MaxRecoveredIndex <= EscapedObjectCount,
2675 "all indices passed to llvm.localrecover must be less than the "
2676 "number of arguments passed to llvm.localescape in the parent "
2677 "function",
2678 F);
2679 }
2680}
2681
2682static Instruction *getSuccPad(Instruction *Terminator) {
2683 BasicBlock *UnwindDest;
2684 if (auto *II = dyn_cast<InvokeInst>(Terminator))
2685 UnwindDest = II->getUnwindDest();
2686 else if (auto *CSI = dyn_cast<CatchSwitchInst>(Terminator))
2687 UnwindDest = CSI->getUnwindDest();
2688 else
2689 UnwindDest = cast<CleanupReturnInst>(Terminator)->getUnwindDest();
2690 return UnwindDest->getFirstNonPHI();
2691}
2692
2693void Verifier::verifySiblingFuncletUnwinds() {
2696 for (const auto &Pair : SiblingFuncletInfo) {
2697 Instruction *PredPad = Pair.first;
2698 if (Visited.count(PredPad))
2699 continue;
2700 Active.insert(PredPad);
2701 Instruction *Terminator = Pair.second;
2702 do {
2703 Instruction *SuccPad = getSuccPad(Terminator);
2704 if (Active.count(SuccPad)) {
2705 // Found a cycle; report error
2706 Instruction *CyclePad = SuccPad;
2708 do {
2709 CycleNodes.push_back(CyclePad);
2710 Instruction *CycleTerminator = SiblingFuncletInfo[CyclePad];
2711 if (CycleTerminator != CyclePad)
2712 CycleNodes.push_back(CycleTerminator);
2713 CyclePad = getSuccPad(CycleTerminator);
2714 } while (CyclePad != SuccPad);
2715 Check(false, "EH pads can't handle each other's exceptions",
2716 ArrayRef<Instruction *>(CycleNodes));
2717 }
2718 // Don't re-walk a node we've already checked
2719 if (!Visited.insert(SuccPad).second)
2720 break;
2721 // Walk to this successor if it has a map entry.
2722 PredPad = SuccPad;
2723 auto TermI = SiblingFuncletInfo.find(PredPad);
2724 if (TermI == SiblingFuncletInfo.end())
2725 break;
2726 Terminator = TermI->second;
2727 Active.insert(PredPad);
2728 } while (true);
2729 // Each node only has one successor, so we've walked all the active
2730 // nodes' successors.
2731 Active.clear();
2732 }
2733}
2734
2735// visitFunction - Verify that a function is ok.
2736//
2737void Verifier::visitFunction(const Function &F) {
2738 visitGlobalValue(F);
2739
2740 // Check function arguments.
2741 FunctionType *FT = F.getFunctionType();
2742 unsigned NumArgs = F.arg_size();
2743
2744 Check(&Context == &F.getContext(),
2745 "Function context does not match Module context!", &F);
2746
2747 Check(!F.hasCommonLinkage(), "Functions may not have common linkage", &F);
2748 Check(FT->getNumParams() == NumArgs,
2749 "# formal arguments must match # of arguments for function type!", &F,
2750 FT);
2751 Check(F.getReturnType()->isFirstClassType() ||
2752 F.getReturnType()->isVoidTy() || F.getReturnType()->isStructTy(),
2753 "Functions cannot return aggregate values!", &F);
2754
2755 Check(!F.hasStructRetAttr() || F.getReturnType()->isVoidTy(),
2756 "Invalid struct return type!", &F);
2757
2758 AttributeList Attrs = F.getAttributes();
2759
2760 Check(verifyAttributeCount(Attrs, FT->getNumParams()),
2761 "Attribute after last parameter!", &F);
2762
2763 CheckDI(F.IsNewDbgInfoFormat == F.getParent()->IsNewDbgInfoFormat,
2764 "Function debug format should match parent module", &F,
2765 F.IsNewDbgInfoFormat, F.getParent(),
2766 F.getParent()->IsNewDbgInfoFormat);
2767
2768 bool IsIntrinsic = F.isIntrinsic();
2769
2770 // Check function attributes.
2771 verifyFunctionAttrs(FT, Attrs, &F, IsIntrinsic, /* IsInlineAsm */ false);
2772
2773 // On function declarations/definitions, we do not support the builtin
2774 // attribute. We do not check this in VerifyFunctionAttrs since that is
2775 // checking for Attributes that can/can not ever be on functions.
2776 Check(!Attrs.hasFnAttr(Attribute::Builtin),
2777 "Attribute 'builtin' can only be applied to a callsite.", &F);
2778
2779 Check(!Attrs.hasAttrSomewhere(Attribute::ElementType),
2780 "Attribute 'elementtype' can only be applied to a callsite.", &F);
2781
2782 // Check that this function meets the restrictions on this calling convention.
2783 // Sometimes varargs is used for perfectly forwarding thunks, so some of these
2784 // restrictions can be lifted.
2785 switch (F.getCallingConv()) {
2786 default:
2787 case CallingConv::C:
2788 break;
2789 case CallingConv::X86_INTR: {
2790 Check(F.arg_empty() || Attrs.hasParamAttr(0, Attribute::ByVal),
2791 "Calling convention parameter requires byval", &F);
2792 break;
2793 }
2798 Check(F.getReturnType()->isVoidTy(),
2799 "Calling convention requires void return type", &F);
2800 [[fallthrough]];
2806 Check(!F.hasStructRetAttr(), "Calling convention does not allow sret", &F);
2807 if (F.getCallingConv() != CallingConv::SPIR_KERNEL) {
2808 const unsigned StackAS = DL.getAllocaAddrSpace();
2809 unsigned i = 0;
2810 for (const Argument &Arg : F.args()) {
2811 Check(!Attrs.hasParamAttr(i, Attribute::ByVal),
2812 "Calling convention disallows byval", &F);
2813 Check(!Attrs.hasParamAttr(i, Attribute::Preallocated),
2814 "Calling convention disallows preallocated", &F);
2815 Check(!Attrs.hasParamAttr(i, Attribute::InAlloca),
2816 "Calling convention disallows inalloca", &F);
2817
2818 if (Attrs.hasParamAttr(i, Attribute::ByRef)) {
2819 // FIXME: Should also disallow LDS and GDS, but we don't have the enum
2820 // value here.
2821 Check(Arg.getType()->getPointerAddressSpace() != StackAS,
2822 "Calling convention disallows stack byref", &F);
2823 }
2824
2825 ++i;
2826 }
2827 }
2828
2829 [[fallthrough]];
2830 case CallingConv::Fast:
2831 case CallingConv::Cold:
2835 Check(!F.isVarArg(),
2836 "Calling convention does not support varargs or "
2837 "perfect forwarding!",
2838 &F);
2839 break;
2840 }
2841
2842 // Check that the argument values match the function type for this function...
2843 unsigned i = 0;
2844 for (const Argument &Arg : F.args()) {
2845 Check(Arg.getType() == FT->getParamType(i),
2846 "Argument value does not match function argument type!", &Arg,
2847 FT->getParamType(i));
2848 Check(Arg.getType()->isFirstClassType(),
2849 "Function arguments must have first-class types!", &Arg);
2850 if (!IsIntrinsic) {
2851 Check(!Arg.getType()->isMetadataTy(),
2852 "Function takes metadata but isn't an intrinsic", &Arg, &F);
2853 Check(!Arg.getType()->isTokenTy(),
2854 "Function takes token but isn't an intrinsic", &Arg, &F);
2855 Check(!Arg.getType()->isX86_AMXTy(),
2856 "Function takes x86_amx but isn't an intrinsic", &Arg, &F);
2857 }
2858
2859 // Check that swifterror argument is only used by loads and stores.
2860 if (Attrs.hasParamAttr(i, Attribute::SwiftError)) {
2861 verifySwiftErrorValue(&Arg);
2862 }
2863 ++i;
2864 }
2865
2866 if (!IsIntrinsic) {
2867 Check(!F.getReturnType()->isTokenTy(),
2868 "Function returns a token but isn't an intrinsic", &F);
2869 Check(!F.getReturnType()->isX86_AMXTy(),
2870 "Function returns a x86_amx but isn't an intrinsic", &F);
2871 }
2872
2873 // Get the function metadata attachments.
2875 F.getAllMetadata(MDs);
2876 assert(F.hasMetadata() != MDs.empty() && "Bit out-of-sync");
2877 verifyFunctionMetadata(MDs);
2878
2879 // Check validity of the personality function
2880 if (F.hasPersonalityFn()) {
2881 auto *Per = dyn_cast<Function>(F.getPersonalityFn()->stripPointerCasts());
2882 if (Per)
2883 Check(Per->getParent() == F.getParent(),
2884 "Referencing personality function in another module!", &F,
2885 F.getParent(), Per, Per->getParent());
2886 }
2887
2888 // EH funclet coloring can be expensive, recompute on-demand
2889 BlockEHFuncletColors.clear();
2890
2891 if (F.isMaterializable()) {
2892 // Function has a body somewhere we can't see.
2893 Check(MDs.empty(), "unmaterialized function cannot have metadata", &F,
2894 MDs.empty() ? nullptr : MDs.front().second);
2895 } else if (F.isDeclaration()) {
2896 for (const auto &I : MDs) {
2897 // This is used for call site debug information.
2898 CheckDI(I.first != LLVMContext::MD_dbg ||
2899 !cast<DISubprogram>(I.second)->isDistinct(),
2900 "function declaration may only have a unique !dbg attachment",
2901 &F);
2902 Check(I.first != LLVMContext::MD_prof,
2903 "function declaration may not have a !prof attachment", &F);
2904
2905 // Verify the metadata itself.
2906 visitMDNode(*I.second, AreDebugLocsAllowed::Yes);
2907 }
2908 Check(!F.hasPersonalityFn(),
2909 "Function declaration shouldn't have a personality routine", &F);
2910 } else {
2911 // Verify that this function (which has a body) is not named "llvm.*". It
2912 // is not legal to define intrinsics.
2913 Check(!IsIntrinsic, "llvm intrinsics cannot be defined!", &F);
2914
2915 // Check the entry node
2916 const BasicBlock *Entry = &F.getEntryBlock();
2917 Check(pred_empty(Entry),
2918 "Entry block to function must not have predecessors!", Entry);
2919
2920 // The address of the entry block cannot be taken, unless it is dead.
2921 if (Entry->hasAddressTaken()) {
2922 Check(!BlockAddress::lookup(Entry)->isConstantUsed(),
2923 "blockaddress may not be used with the entry block!", Entry);
2924 }
2925
2926 unsigned NumDebugAttachments = 0, NumProfAttachments = 0,
2927 NumKCFIAttachments = 0;
2928 // Visit metadata attachments.
2929 for (const auto &I : MDs) {
2930 // Verify that the attachment is legal.
2931 auto AllowLocs = AreDebugLocsAllowed::No;
2932 switch (I.first) {
2933 default:
2934 break;
2935 case LLVMContext::MD_dbg: {
2936 ++NumDebugAttachments;
2937 CheckDI(NumDebugAttachments == 1,
2938 "function must have a single !dbg attachment", &F, I.second);
2939 CheckDI(isa<DISubprogram>(I.second),
2940 "function !dbg attachment must be a subprogram", &F, I.second);
2941 CheckDI(cast<DISubprogram>(I.second)->isDistinct(),
2942 "function definition may only have a distinct !dbg attachment",
2943 &F);
2944
2945 auto *SP = cast<DISubprogram>(I.second);
2946 const Function *&AttachedTo = DISubprogramAttachments[SP];
2947 CheckDI(!AttachedTo || AttachedTo == &F,
2948 "DISubprogram attached to more than one function", SP, &F);
2949 AttachedTo = &F;
2950 AllowLocs = AreDebugLocsAllowed::Yes;
2951 break;
2952 }
2953 case LLVMContext::MD_prof:
2954 ++NumProfAttachments;
2955 Check(NumProfAttachments == 1,
2956 "function must have a single !prof attachment", &F, I.second);
2957 break;
2958 case LLVMContext::MD_kcfi_type:
2959 ++NumKCFIAttachments;
2960 Check(NumKCFIAttachments == 1,
2961 "function must have a single !kcfi_type attachment", &F,
2962 I.second);
2963 break;
2964 }
2965
2966 // Verify the metadata itself.
2967 visitMDNode(*I.second, AllowLocs);
2968 }
2969 }
2970
2971 // If this function is actually an intrinsic, verify that it is only used in
2972 // direct call/invokes, never having its "address taken".
2973 // Only do this if the module is materialized, otherwise we don't have all the
2974 // uses.
2975 if (F.isIntrinsic() && F.getParent()->isMaterialized()) {
2976 const User *U;
2977 if (F.hasAddressTaken(&U, false, true, false,
2978 /*IgnoreARCAttachedCall=*/true))
2979 Check(false, "Invalid user of intrinsic instruction!", U);
2980 }
2981
2982 // Check intrinsics' signatures.
2983 switch (F.getIntrinsicID()) {
2984 case Intrinsic::experimental_gc_get_pointer_base: {
2985 FunctionType *FT = F.getFunctionType();
2986 Check(FT->getNumParams() == 1, "wrong number of parameters", F);
2987 Check(isa<PointerType>(F.getReturnType()),
2988 "gc.get.pointer.base must return a pointer", F);
2989 Check(FT->getParamType(0) == F.getReturnType(),
2990 "gc.get.pointer.base operand and result must be of the same type", F);
2991 break;
2992 }
2993 case Intrinsic::experimental_gc_get_pointer_offset: {
2994 FunctionType *FT = F.getFunctionType();
2995 Check(FT->getNumParams() == 1, "wrong number of parameters", F);
2996 Check(isa<PointerType>(FT->getParamType(0)),
2997 "gc.get.pointer.offset operand must be a pointer", F);
2998 Check(F.getReturnType()->isIntegerTy(),
2999 "gc.get.pointer.offset must return integer", F);
3000 break;
3001 }
3002 }
3003
3004 auto *N = F.getSubprogram();
3005 HasDebugInfo = (N != nullptr);
3006 if (!HasDebugInfo)
3007 return;
3008
3009 // Check that all !dbg attachments lead to back to N.
3010 //
3011 // FIXME: Check this incrementally while visiting !dbg attachments.
3012 // FIXME: Only check when N is the canonical subprogram for F.
3014 auto VisitDebugLoc = [&](const Instruction &I, const MDNode *Node) {
3015 // Be careful about using DILocation here since we might be dealing with
3016 // broken code (this is the Verifier after all).
3017 const DILocation *DL = dyn_cast_or_null<DILocation>(Node);
3018 if (!DL)
3019 return;
3020 if (!Seen.insert(DL).second)
3021 return;
3022
3023 Metadata *Parent = DL->getRawScope();
3024 CheckDI(Parent && isa<DILocalScope>(Parent),
3025 "DILocation's scope must be a DILocalScope", N, &F, &I, DL, Parent);
3026
3027 DILocalScope *Scope = DL->getInlinedAtScope();
3028 Check(Scope, "Failed to find DILocalScope", DL);
3029
3030 if (!Seen.insert(Scope).second)
3031 return;
3032
3033 DISubprogram *SP = Scope->getSubprogram();
3034
3035 // Scope and SP could be the same MDNode and we don't want to skip
3036 // validation in that case
3037 if (SP && ((Scope != SP) && !Seen.insert(SP).second))
3038 return;
3039
3040 CheckDI(SP->describes(&F),
3041 "!dbg attachment points at wrong subprogram for function", N, &F,
3042 &I, DL, Scope, SP);
3043 };
3044 for (auto &BB : F)
3045 for (auto &I : BB) {
3046 VisitDebugLoc(I, I.getDebugLoc().getAsMDNode());
3047 // The llvm.loop annotations also contain two DILocations.
3048 if (auto MD = I.getMetadata(LLVMContext::MD_loop))
3049 for (unsigned i = 1; i < MD->getNumOperands(); ++i)
3050 VisitDebugLoc(I, dyn_cast_or_null<MDNode>(MD->getOperand(i)));
3051 if (BrokenDebugInfo)
3052 return;
3053 }
3054}
3055
3056// verifyBasicBlock - Verify that a basic block is well formed...
3057//
3058void Verifier::visitBasicBlock(BasicBlock &BB) {
3059 InstsInThisBlock.clear();
3060 ConvergenceVerifyHelper.visit(BB);
3061
3062 // Ensure that basic blocks have terminators!
3063 Check(BB.getTerminator(), "Basic Block does not have terminator!", &BB);
3064
3065 // Check constraints that this basic block imposes on all of the PHI nodes in
3066 // it.
3067 if (isa<PHINode>(BB.front())) {
3070 llvm::sort(Preds);
3071 for (const PHINode &PN : BB.phis()) {
3072 Check(PN.getNumIncomingValues() == Preds.size(),
3073 "PHINode should have one entry for each predecessor of its "
3074 "parent basic block!",
3075 &PN);
3076
3077 // Get and sort all incoming values in the PHI node...
3078 Values.clear();
3079 Values.reserve(PN.getNumIncomingValues());
3080 for (unsigned i = 0, e = PN.getNumIncomingValues(); i != e; ++i)
3081 Values.push_back(
3082 std::make_pair(PN.getIncomingBlock(i), PN.getIncomingValue(i)));
3083 llvm::sort(Values);
3084
3085 for (unsigned i = 0, e = Values.size(); i != e; ++i) {
3086 // Check to make sure that if there is more than one entry for a
3087 // particular basic block in this PHI node, that the incoming values are
3088 // all identical.
3089 //
3090 Check(i == 0 || Values[i].first != Values[i - 1].first ||
3091 Values[i].second == Values[i - 1].second,
3092 "PHI node has multiple entries for the same basic block with "
3093 "different incoming values!",
3094 &PN, Values[i].first, Values[i].second, Values[i - 1].second);
3095
3096 // Check to make sure that the predecessors and PHI node entries are
3097 // matched up.
3098 Check(Values[i].first == Preds[i],
3099 "PHI node entries do not match predecessors!", &PN,
3100 Values[i].first, Preds[i]);
3101 }
3102 }
3103 }
3104
3105 // Check that all instructions have their parent pointers set up correctly.
3106 for (auto &I : BB)
3107 {
3108 Check(I.getParent() == &BB, "Instruction has bogus parent pointer!");
3109 }
3110
3111 CheckDI(BB.IsNewDbgInfoFormat == BB.getParent()->IsNewDbgInfoFormat,
3112 "BB debug format should match parent function", &BB,
3113 BB.IsNewDbgInfoFormat, BB.getParent(),
3114 BB.getParent()->IsNewDbgInfoFormat);
3115
3116 // Confirm that no issues arise from the debug program.
3117 if (BB.IsNewDbgInfoFormat)
3118 CheckDI(!BB.getTrailingDbgRecords(), "Basic Block has trailing DbgRecords!",
3119 &BB);
3120}
3121
3122void Verifier::visitTerminator(Instruction &I) {
3123 // Ensure that terminators only exist at the end of the basic block.
3124 Check(&I == I.getParent()->getTerminator(),
3125 "Terminator found in the middle of a basic block!", I.getParent());
3127}
3128
3129void Verifier::visitBranchInst(BranchInst &BI) {
3130 if (BI.isConditional()) {
3132 "Branch condition is not 'i1' type!", &BI, BI.getCondition());
3133 }
3134 visitTerminator(BI);
3135}
3136
3137void Verifier::visitReturnInst(ReturnInst &RI) {
3138 Function *F = RI.getParent()->getParent();
3139 unsigned N = RI.getNumOperands();
3140 if (F->getReturnType()->isVoidTy())
3141 Check(N == 0,
3142 "Found return instr that returns non-void in Function of void "
3143 "return type!",
3144 &RI, F->getReturnType());
3145 else
3146 Check(N == 1 && F->getReturnType() == RI.getOperand(0)->getType(),
3147 "Function return type does not match operand "
3148 "type of return inst!",
3149 &RI, F->getReturnType());
3150
3151 // Check to make sure that the return value has necessary properties for
3152 // terminators...
3153 visitTerminator(RI);
3154}
3155
3156void Verifier::visitSwitchInst(SwitchInst &SI) {
3157 Check(SI.getType()->isVoidTy(), "Switch must have void result type!", &SI);
3158 // Check to make sure that all of the constants in the switch instruction
3159 // have the same type as the switched-on value.
3160 Type *SwitchTy = SI.getCondition()->getType();
3162 for (auto &Case : SI.cases()) {
3163 Check(isa<ConstantInt>(SI.getOperand(Case.getCaseIndex() * 2 + 2)),
3164 "Case value is not a constant integer.", &SI);
3165 Check(Case.getCaseValue()->getType() == SwitchTy,
3166 "Switch constants must all be same type as switch value!", &SI);
3167 Check(Constants.insert(Case.getCaseValue()).second,
3168 "Duplicate integer as switch case", &SI, Case.getCaseValue());
3169 }
3170
3171 visitTerminator(SI);
3172}
3173
3174void Verifier::visitIndirectBrInst(IndirectBrInst &BI) {
3176 "Indirectbr operand must have pointer type!", &BI);
3177 for (unsigned i = 0, e = BI.getNumDestinations(); i != e; ++i)
3179 "Indirectbr destinations must all have pointer type!", &BI);
3180
3181 visitTerminator(BI);
3182}
3183
3184void Verifier::visitCallBrInst(CallBrInst &CBI) {
3185 Check(CBI.isInlineAsm(), "Callbr is currently only used for asm-goto!", &CBI);
3186 const InlineAsm *IA = cast<InlineAsm>(CBI.getCalledOperand());
3187 Check(!IA->canThrow(), "Unwinding from Callbr is not allowed");
3188
3189 verifyInlineAsmCall(CBI);
3190 visitTerminator(CBI);
3191}
3192
3193void Verifier::visitSelectInst(SelectInst &SI) {
3194 Check(!SelectInst::areInvalidOperands(SI.getOperand(0), SI.getOperand(1),
3195 SI.getOperand(2)),
3196 "Invalid operands for select instruction!", &SI);
3197
3198 Check(SI.getTrueValue()->getType() == SI.getType(),
3199 "Select values must have same type as select instruction!", &SI);
3200 visitInstruction(SI);
3201}
3202
3203/// visitUserOp1 - User defined operators shouldn't live beyond the lifetime of
3204/// a pass, if any exist, it's an error.
3205///
3206void Verifier::visitUserOp1(Instruction &I) {
3207 Check(false, "User-defined operators should not live outside of a pass!", &I);
3208}
3209
3210void Verifier::visitTruncInst(TruncInst &I) {
3211 // Get the source and destination types
3212 Type *SrcTy = I.getOperand(0)->getType();
3213 Type *DestTy = I.getType();
3214
3215 // Get the size of the types in bits, we'll need this later
3216 unsigned SrcBitSize = SrcTy->getScalarSizeInBits();
3217 unsigned DestBitSize = DestTy->getScalarSizeInBits();
3218
3219 Check(SrcTy->isIntOrIntVectorTy(), "Trunc only operates on integer", &I);
3220 Check(DestTy->isIntOrIntVectorTy(), "Trunc only produces integer", &I);
3221 Check(SrcTy->isVectorTy() == DestTy->isVectorTy(),
3222 "trunc source and destination must both be a vector or neither", &I);
3223 Check(SrcBitSize > DestBitSize, "DestTy too big for Trunc", &I);
3224
3226}
3227
3228void Verifier::visitZExtInst(ZExtInst &I) {
3229 // Get the source and destination types
3230 Type *SrcTy = I.getOperand(0)->getType();
3231 Type *DestTy = I.getType();
3232
3233 // Get the size of the types in bits, we'll need this later
3234 Check(SrcTy->isIntOrIntVectorTy(), "ZExt only operates on integer", &I);
3235 Check(DestTy->isIntOrIntVectorTy(), "ZExt only produces an integer", &I);
3236 Check(SrcTy->isVectorTy() == DestTy->isVectorTy(),
3237 "zext source and destination must both be a vector or neither", &I);
3238 unsigned SrcBitSize = SrcTy->getScalarSizeInBits();
3239 unsigned DestBitSize = DestTy->getScalarSizeInBits();
3240
3241 Check(SrcBitSize < DestBitSize, "Type too small for ZExt", &I);
3242
3244}
3245
3246void Verifier::visitSExtInst(SExtInst &I) {
3247 // Get the source and destination types
3248 Type *SrcTy = I.getOperand(0)->getType();
3249 Type *DestTy = I.getType();
3250
3251 // Get the size of the types in bits, we'll need this later
3252 unsigned SrcBitSize = SrcTy->getScalarSizeInBits();
3253 unsigned DestBitSize = DestTy->getScalarSizeInBits();
3254
3255 Check(SrcTy->isIntOrIntVectorTy(), "SExt only operates on integer", &I);
3256 Check(DestTy->isIntOrIntVectorTy(), "SExt only produces an integer", &I);
3257 Check(SrcTy->isVectorTy() == DestTy->isVectorTy(),
3258 "sext source and destination must both be a vector or neither", &I);
3259 Check(SrcBitSize < DestBitSize, "Type too small for SExt", &I);
3260
3262}
3263
3264void Verifier::visitFPTruncInst(FPTruncInst &I) {
3265 // Get the source and destination types
3266 Type *SrcTy = I.getOperand(0)->getType();
3267 Type *DestTy = I.getType();
3268 // Get the size of the types in bits, we'll need this later
3269 unsigned SrcBitSize = SrcTy->getScalarSizeInBits();
3270 unsigned DestBitSize = DestTy->getScalarSizeInBits();
3271
3272 Check(SrcTy->isFPOrFPVectorTy(), "FPTrunc only operates on FP", &I);
3273 Check(DestTy->isFPOrFPVectorTy(), "FPTrunc only produces an FP", &I);
3274 Check(SrcTy->isVectorTy() == DestTy->isVectorTy(),
3275 "fptrunc source and destination must both be a vector or neither", &I);
3276 Check(SrcBitSize > DestBitSize, "DestTy too big for FPTrunc", &I);
3277
3279}
3280
3281void Verifier::visitFPExtInst(FPExtInst &I) {
3282 // Get the source and destination types
3283 Type *SrcTy = I.getOperand(0)->getType();
3284 Type *DestTy = I.getType();
3285
3286 // Get the size of the types in bits, we'll need this later
3287 unsigned SrcBitSize = SrcTy->getScalarSizeInBits();
3288 unsigned DestBitSize = DestTy->getScalarSizeInBits();
3289
3290 Check(SrcTy->isFPOrFPVectorTy(), "FPExt only operates on FP", &I);
3291 Check(DestTy->isFPOrFPVectorTy(), "FPExt only produces an FP", &I);
3292 Check(SrcTy->isVectorTy() == DestTy->isVectorTy(),
3293 "fpext source and destination must both be a vector or neither", &I);
3294 Check(SrcBitSize < DestBitSize, "DestTy too small for FPExt", &I);
3295
3297}
3298
3299void Verifier::visitUIToFPInst(UIToFPInst &I) {
3300 // Get the source and destination types
3301 Type *SrcTy = I.getOperand(0)->getType();
3302 Type *DestTy = I.getType();
3303
3304 bool SrcVec = SrcTy->isVectorTy();
3305 bool DstVec = DestTy->isVectorTy();
3306
3307 Check(SrcVec == DstVec,
3308 "UIToFP source and dest must both be vector or scalar", &I);
3309 Check(SrcTy->isIntOrIntVectorTy(),
3310 "UIToFP source must be integer or integer vector", &I);
3311 Check(DestTy->isFPOrFPVectorTy(), "UIToFP result must be FP or FP vector",
3312 &I);
3313
3314 if (SrcVec && DstVec)
3315 Check(cast<VectorType>(SrcTy)->getElementCount() ==
3316 cast<VectorType>(DestTy)->getElementCount(),
3317 "UIToFP source and dest vector length mismatch", &I);
3318
3320}
3321
3322void Verifier::visitSIToFPInst(SIToFPInst &I) {
3323 // Get the source and destination types
3324 Type *SrcTy = I.getOperand(0)->getType();
3325 Type *DestTy = I.getType();
3326
3327 bool SrcVec = SrcTy->isVectorTy();
3328 bool DstVec = DestTy->isVectorTy();
3329
3330 Check(SrcVec == DstVec,
3331 "SIToFP source and dest must both be vector or scalar", &I);
3332 Check(SrcTy->isIntOrIntVectorTy(),
3333 "SIToFP source must be integer or integer vector", &I);
3334 Check(DestTy->isFPOrFPVectorTy(), "SIToFP result must be FP or FP vector",
3335 &I);
3336
3337 if (SrcVec && DstVec)
3338 Check(cast<VectorType>(SrcTy)->getElementCount() ==
3339 cast<VectorType>(DestTy)->getElementCount(),
3340 "SIToFP source and dest vector length mismatch", &I);
3341
3343}
3344
3345void Verifier::visitFPToUIInst(FPToUIInst &I) {
3346 // Get the source and destination types
3347 Type *SrcTy = I.getOperand(0)->getType();
3348 Type *DestTy = I.getType();
3349
3350 bool SrcVec = SrcTy->isVectorTy();
3351 bool DstVec = DestTy->isVectorTy();
3352
3353 Check(SrcVec == DstVec,
3354 "FPToUI source and dest must both be vector or scalar", &I);
3355 Check(SrcTy->isFPOrFPVectorTy(), "FPToUI source must be FP or FP vector", &I);
3356 Check(DestTy->isIntOrIntVectorTy(),
3357 "FPToUI result must be integer or integer vector", &I);
3358
3359 if (SrcVec && DstVec)
3360 Check(cast<VectorType>(SrcTy)->getElementCount() ==
3361 cast<VectorType>(DestTy)->getElementCount(),
3362 "FPToUI source and dest vector length mismatch", &I);
3363
3365}
3366
3367void Verifier::visitFPToSIInst(FPToSIInst &I) {
3368 // Get the source and destination types
3369 Type *SrcTy = I.getOperand(0)->getType();
3370 Type *DestTy = I.getType();
3371
3372 bool SrcVec = SrcTy->isVectorTy();
3373 bool DstVec = DestTy->isVectorTy();
3374
3375 Check(SrcVec == DstVec,
3376 "FPToSI source and dest must both be vector or scalar", &I);
3377 Check(SrcTy->isFPOrFPVectorTy(), "FPToSI source must be FP or FP vector", &I);
3378 Check(DestTy->isIntOrIntVectorTy(),
3379 "FPToSI result must be integer or integer vector", &I);
3380
3381 if (SrcVec && DstVec)
3382 Check(cast<VectorType>(SrcTy)->getElementCount() ==
3383 cast<VectorType>(DestTy)->getElementCount(),
3384 "FPToSI source and dest vector length mismatch", &I);
3385
3387}
3388
3389void Verifier::visitPtrToIntInst(PtrToIntInst &I) {
3390 // Get the source and destination types
3391 Type *SrcTy = I.getOperand(0)->getType();
3392 Type *DestTy = I.getType();
3393
3394 Check(SrcTy->isPtrOrPtrVectorTy(), "PtrToInt source must be pointer", &I);
3395
3396 Check(DestTy->isIntOrIntVectorTy(), "PtrToInt result must be integral", &I);
3397 Check(SrcTy->isVectorTy() == DestTy->isVectorTy(), "PtrToInt type mismatch",
3398 &I);
3399
3400 if (SrcTy->isVectorTy()) {
3401 auto *VSrc = cast<VectorType>(SrcTy);
3402 auto *VDest = cast<VectorType>(DestTy);
3403 Check(VSrc->getElementCount() == VDest->getElementCount(),
3404 "PtrToInt Vector width mismatch", &I);
3405 }
3406
3408}
3409
3410void Verifier::visitIntToPtrInst(IntToPtrInst &I) {
3411 // Get the source and destination types
3412 Type *SrcTy = I.getOperand(0)->getType();
3413 Type *DestTy = I.getType();
3414
3415 Check(SrcTy->isIntOrIntVectorTy(), "IntToPtr source must be an integral", &I);
3416 Check(DestTy->isPtrOrPtrVectorTy(), "IntToPtr result must be a pointer", &I);
3417
3418 Check(SrcTy->isVectorTy() == DestTy->isVectorTy(), "IntToPtr type mismatch",
3419 &I);
3420 if (SrcTy->isVectorTy()) {
3421 auto *VSrc = cast<VectorType>(SrcTy);
3422 auto *VDest = cast<VectorType>(DestTy);
3423 Check(VSrc->getElementCount() == VDest->getElementCount(),
3424 "IntToPtr Vector width mismatch", &I);
3425 }
3427}
3428
3429void Verifier::visitBitCastInst(BitCastInst &I) {
3430 Check(
3431 CastInst::castIsValid(Instruction::BitCast, I.getOperand(0), I.getType()),
3432 "Invalid bitcast", &I);
3434}
3435
3436void Verifier::visitAddrSpaceCastInst(AddrSpaceCastInst &I) {
3437 Type *SrcTy = I.getOperand(0)->getType();
3438 Type *DestTy = I.getType();
3439
3440 Check(SrcTy->isPtrOrPtrVectorTy(), "AddrSpaceCast source must be a pointer",
3441 &I);
3442 Check(DestTy->isPtrOrPtrVectorTy(), "AddrSpaceCast result must be a pointer",
3443 &I);
3445 "AddrSpaceCast must be between different address spaces", &I);
3446 if (auto *SrcVTy = dyn_cast<VectorType>(SrcTy))
3447 Check(SrcVTy->getElementCount() ==
3448 cast<VectorType>(DestTy)->getElementCount(),
3449 "AddrSpaceCast vector pointer number of elements mismatch", &I);
3451}
3452
3453/// visitPHINode - Ensure that a PHI node is well formed.
3454///
3455void Verifier::visitPHINode(PHINode &PN) {
3456 // Ensure that the PHI nodes are all grouped together at the top of the block.
3457 // This can be tested by checking whether the instruction before this is
3458 // either nonexistent (because this is begin()) or is a PHI node. If not,
3459 // then there is some other instruction before a PHI.
3460 Check(&PN == &PN.getParent()->front() ||
3461 isa<PHINode>(--BasicBlock::iterator(&PN)),
3462 "PHI nodes not grouped at top of basic block!", &PN, PN.getParent());
3463
3464 // Check that a PHI doesn't yield a Token.
3465 Check(!PN.getType()->isTokenTy(), "PHI nodes cannot have token type!");
3466
3467 // Check that all of the values of the PHI node have the same type as the
3468 // result.
3469 for (Value *IncValue : PN.incoming_values()) {
3470 Check(PN.getType() == IncValue->getType(),
3471 "PHI node operands are not the same type as the result!", &PN);
3472 }
3473
3474 // All other PHI node constraints are checked in the visitBasicBlock method.
3475
3476 visitInstruction(PN);
3477}
3478
3479void Verifier::visitCallBase(CallBase &Call) {
3480 Check(Call.getCalledOperand()->getType()->isPointerTy(),
3481 "Called function must be a pointer!", Call);
3482 FunctionType *FTy = Call.getFunctionType();
3483
3484 // Verify that the correct number of arguments are being passed
3485 if (FTy->isVarArg())
3486 Check(Call.arg_size() >= FTy->getNumParams(),
3487 "Called function requires more parameters than were provided!", Call);
3488 else
3489 Check(Call.arg_size() == FTy->getNumParams(),
3490 "Incorrect number of arguments passed to called function!", Call);
3491
3492 // Verify that all arguments to the call match the function type.
3493 for (unsigned i = 0, e = FTy->getNumParams(); i != e; ++i)
3494 Check(Call.getArgOperand(i)->getType() == FTy->getParamType(i),
3495 "Call parameter type does not match function signature!",
3496 Call.getArgOperand(i), FTy->getParamType(i), Call);
3497
3498 AttributeList Attrs = Call.getAttributes();
3499
3500 Check(verifyAttributeCount(Attrs, Call.arg_size()),
3501 "Attribute after last parameter!", Call);
3502
3503 Function *Callee =
3504 dyn_cast<Function>(Call.getCalledOperand()->stripPointerCasts());
3505 bool IsIntrinsic = Callee && Callee->isIntrinsic();
3506 if (IsIntrinsic)
3507 Check(Callee->getValueType() == FTy,
3508 "Intrinsic called with incompatible signature", Call);
3509
3510 // Disallow calls to functions with the amdgpu_cs_chain[_preserve] calling
3511 // convention.
3512 auto CC = Call.getCallingConv();
3515 "Direct calls to amdgpu_cs_chain/amdgpu_cs_chain_preserve functions "
3516 "not allowed. Please use the @llvm.amdgpu.cs.chain intrinsic instead.",
3517 Call);
3518
3519 // Disallow passing/returning values with alignment higher than we can
3520 // represent.
3521 // FIXME: Consider making DataLayout cap the alignment, so this isn't
3522 // necessary.
3523 auto VerifyTypeAlign = [&](Type *Ty, const Twine &Message) {
3524 if (!Ty->isSized())
3525 return;
3526 Align ABIAlign = DL.getABITypeAlign(Ty);
3527 Check(ABIAlign.value() <= Value::MaximumAlignment,
3528 "Incorrect alignment of " + Message + " to called function!", Call);
3529 };
3530
3531 if (!IsIntrinsic) {
3532 VerifyTypeAlign(FTy->getReturnType(), "return type");
3533 for (unsigned i = 0, e = FTy->getNumParams(); i != e; ++i) {
3534 Type *Ty = FTy->getParamType(i);
3535 VerifyTypeAlign(Ty, "argument passed");
3536 }
3537 }
3538
3539 if (Attrs.hasFnAttr(Attribute::Speculatable)) {
3540 // Don't allow speculatable on call sites, unless the underlying function
3541 // declaration is also speculatable.
3542 Check(Callee && Callee->isSpeculatable(),
3543 "speculatable attribute may not apply to call sites", Call);
3544 }
3545
3546 if (Attrs.hasFnAttr(Attribute::Preallocated)) {
3547 Check(Call.getCalledFunction()->getIntrinsicID() ==
3548 Intrinsic::call_preallocated_arg,
3549 "preallocated as a call site attribute can only be on "
3550 "llvm.call.preallocated.arg");
3551 }
3552
3553 // Verify call attributes.
3554 verifyFunctionAttrs(FTy, Attrs, &Call, IsIntrinsic, Call.isInlineAsm());
3555
3556 // Conservatively check the inalloca argument.
3557 // We have a bug if we can find that there is an underlying alloca without
3558 // inalloca.
3559 if (Call.hasInAllocaArgument()) {
3560 Value *InAllocaArg = Call.getArgOperand(FTy->getNumParams() - 1);
3561 if (auto AI = dyn_cast<AllocaInst>(InAllocaArg->stripInBoundsOffsets()))
3562 Check(AI->isUsedWithInAlloca(),
3563 "inalloca argument for call has mismatched alloca", AI, Call);
3564 }
3565
3566 // For each argument of the callsite, if it has the swifterror argument,
3567 // make sure the underlying alloca/parameter it comes from has a swifterror as
3568 // well.
3569 for (unsigned i = 0, e = FTy->getNumParams(); i != e; ++i) {
3570 if (Call.paramHasAttr(i, Attribute::SwiftError)) {
3571 Value *SwiftErrorArg = Call.getArgOperand(i);
3572 if (auto AI = dyn_cast<AllocaInst>(SwiftErrorArg->stripInBoundsOffsets())) {
3573 Check(AI->isSwiftError(),
3574 "swifterror argument for call has mismatched alloca", AI, Call);
3575 continue;
3576 }
3577 auto ArgI = dyn_cast<Argument>(SwiftErrorArg);
3578 Check(ArgI, "swifterror argument should come from an alloca or parameter",
3579 SwiftErrorArg, Call);
3580 Check(ArgI->hasSwiftErrorAttr(),
3581 "swifterror argument for call has mismatched parameter", ArgI,
3582 Call);
3583 }
3584
3585 if (Attrs.hasParamAttr(i, Attribute::ImmArg)) {
3586 // Don't allow immarg on call sites, unless the underlying declaration
3587 // also has the matching immarg.
3588 Check(Callee && Callee->hasParamAttribute(i, Attribute::ImmArg),
3589 "immarg may not apply only to call sites", Call.getArgOperand(i),
3590 Call);
3591 }
3592
3593 if (Call.paramHasAttr(i, Attribute::ImmArg)) {
3594 Value *ArgVal = Call.getArgOperand(i);
3595 Check(isa<ConstantInt>(ArgVal) || isa<ConstantFP>(ArgVal),
3596 "immarg operand has non-immediate parameter", ArgVal, Call);
3597 }
3598
3599 if (Call.paramHasAttr(i, Attribute::Preallocated)) {
3600 Value *ArgVal = Call.getArgOperand(i);
3601 bool hasOB =
3602 Call.countOperandBundlesOfType(LLVMContext::OB_preallocated) != 0;
3603 bool isMustTail = Call.isMustTailCall();
3604 Check(hasOB != isMustTail,
3605 "preallocated operand either requires a preallocated bundle or "
3606 "the call to be musttail (but not both)",
3607 ArgVal, Call);
3608 }
3609 }
3610
3611 if (FTy->isVarArg()) {
3612 // FIXME? is 'nest' even legal here?
3613 bool SawNest = false;
3614 bool SawReturned = false;
3615
3616 for (unsigned Idx = 0; Idx < FTy->getNumParams(); ++Idx) {
3617 if (Attrs.hasParamAttr(Idx, Attribute::Nest))
3618 SawNest = true;
3619 if (Attrs.hasParamAttr(Idx, Attribute::Returned))
3620 SawReturned = true;
3621 }
3622
3623 // Check attributes on the varargs part.
3624 for (unsigned Idx = FTy->getNumParams(); Idx < Call.arg_size(); ++Idx) {
3625 Type *Ty = Call.getArgOperand(Idx)->getType();
3626 AttributeSet ArgAttrs = Attrs.getParamAttrs(Idx);
3627 verifyParameterAttrs(ArgAttrs, Ty, &Call);
3628
3629 if (ArgAttrs.hasAttribute(Attribute::Nest)) {
3630 Check(!SawNest, "More than one parameter has attribute nest!", Call);
3631 SawNest = true;
3632 }
3633
3634 if (ArgAttrs.hasAttribute(Attribute::Returned)) {
3635 Check(!SawReturned, "More than one parameter has attribute returned!",
3636 Call);
3637 Check(Ty->canLosslesslyBitCastTo(FTy->getReturnType()),
3638 "Incompatible argument and return types for 'returned' "
3639 "attribute",
3640 Call);
3641 SawReturned = true;
3642 }
3643
3644 // Statepoint intrinsic is vararg but the wrapped function may be not.
3645 // Allow sret here and check the wrapped function in verifyStatepoint.
3646 if (!Call.getCalledFunction() ||
3647 Call.getCalledFunction()->getIntrinsicID() !=
3648 Intrinsic::experimental_gc_statepoint)
3649 Check(!ArgAttrs.hasAttribute(Attribute::StructRet),
3650 "Attribute 'sret' cannot be used for vararg call arguments!",
3651 Call);
3652
3653 if (ArgAttrs.hasAttribute(Attribute::InAlloca))
3654 Check(Idx == Call.arg_size() - 1,
3655 "inalloca isn't on the last argument!", Call);
3656 }
3657 }
3658
3659 // Verify that there's no metadata unless it's a direct call to an intrinsic.
3660 if (!IsIntrinsic) {
3661 for (Type *ParamTy : FTy->params()) {
3662 Check(!ParamTy->isMetadataTy(),
3663 "Function has metadata parameter but isn't an intrinsic", Call);
3664 Check(!ParamTy->isTokenTy(),
3665 "Function has token parameter but isn't an intrinsic", Call);
3666 }
3667 }
3668
3669 // Verify that indirect calls don't return tokens.
3670 if (!Call.getCalledFunction()) {
3671 Check(!FTy->getReturnType()->isTokenTy(),
3672 "Return type cannot be token for indirect call!");
3673 Check(!FTy->getReturnType()->isX86_AMXTy(),
3674 "Return type cannot be x86_amx for indirect call!");
3675 }
3676
3677 if (Function *F = Call.getCalledFunction())
3678 if (Intrinsic::ID ID = (Intrinsic::ID)F->getIntrinsicID())
3679 visitIntrinsicCall(ID, Call);
3680
3681 // Verify that a callsite has at most one "deopt", at most one "funclet", at
3682 // most one "gc-transition", at most one "cfguardtarget", at most one
3683 // "preallocated" operand bundle, and at most one "ptrauth" operand bundle.
3684 bool FoundDeoptBundle = false, FoundFuncletBundle = false,
3685 FoundGCTransitionBundle = false, FoundCFGuardTargetBundle = false,
3686 FoundPreallocatedBundle = false, FoundGCLiveBundle = false,
3687 FoundPtrauthBundle = false, FoundKCFIBundle = false,
3688 FoundAttachedCallBundle = false;
3689 for (unsigned i = 0, e = Call.getNumOperandBundles(); i < e; ++i) {
3690 OperandBundleUse BU = Call.getOperandBundleAt(i);
3691 uint32_t Tag = BU.getTagID();
3692 if (Tag == LLVMContext::OB_deopt) {
3693 Check(!FoundDeoptBundle, "Multiple deopt operand bundles", Call);
3694 FoundDeoptBundle = true;
3695 } else if (Tag == LLVMContext::OB_gc_transition) {
3696 Check(!FoundGCTransitionBundle, "Multiple gc-transition operand bundles",
3697 Call);
3698 FoundGCTransitionBundle = true;
3699 } else if (Tag == LLVMContext::OB_funclet) {
3700 Check(!FoundFuncletBundle, "Multiple funclet operand bundles", Call);
3701 FoundFuncletBundle = true;
3702 Check(BU.Inputs.size() == 1,
3703 "Expected exactly one funclet bundle operand", Call);
3704 Check(isa<FuncletPadInst>(BU.Inputs.front()),
3705 "Funclet bundle operands should correspond to a FuncletPadInst",
3706 Call);
3707 } else if (Tag == LLVMContext::OB_cfguardtarget) {
3708 Check(!FoundCFGuardTargetBundle, "Multiple CFGuardTarget operand bundles",
3709 Call);
3710 FoundCFGuardTargetBundle = true;
3711 Check(BU.Inputs.size() == 1,
3712 "Expected exactly one cfguardtarget bundle operand", Call);
3713 } else if (Tag == LLVMContext::OB_ptrauth) {
3714 Check(!FoundPtrauthBundle, "Multiple ptrauth operand bundles", Call);
3715 FoundPtrauthBundle = true;
3716 Check(BU.Inputs.size() == 2,
3717 "Expected exactly two ptrauth bundle operands", Call);
3718 Check(isa<ConstantInt>(BU.Inputs[0]) &&
3719 BU.Inputs[0]->getType()->isIntegerTy(32),
3720 "Ptrauth bundle key operand must be an i32 constant", Call);
3721 Check(BU.Inputs[1]->getType()->isIntegerTy(64),
3722 "Ptrauth bundle discriminator operand must be an i64", Call);
3723 } else if (Tag == LLVMContext::OB_kcfi) {
3724 Check(!FoundKCFIBundle, "Multiple kcfi operand bundles", Call);
3725 FoundKCFIBundle = true;
3726 Check(BU.Inputs.size() == 1, "Expected exactly one kcfi bundle operand",
3727 Call);
3728 Check(isa<ConstantInt>(BU.Inputs[0]) &&
3729 BU.Inputs[0]->getType()->isIntegerTy(32),
3730 "Kcfi bundle operand must be an i32 constant", Call);
3731 } else if (Tag == LLVMContext::OB_preallocated) {
3732 Check(!FoundPreallocatedBundle, "Multiple preallocated operand bundles",
3733 Call);
3734 FoundPreallocatedBundle = true;
3735 Check(BU.Inputs.size() == 1,
3736 "Expected exactly one preallocated bundle operand", Call);
3737 auto Input = dyn_cast<IntrinsicInst>(BU.Inputs.front());
3738 Check(Input &&
3739 Input->getIntrinsicID() == Intrinsic::call_preallocated_setup,
3740 "\"preallocated\" argument must be a token from "
3741 "llvm.call.preallocated.setup",
3742 Call);
3743 } else if (Tag == LLVMContext::OB_gc_live) {
3744 Check(!FoundGCLiveBundle, "Multiple gc-live operand bundles", Call);
3745 FoundGCLiveBundle = true;
3747 Check(!FoundAttachedCallBundle,
3748 "Multiple \"clang.arc.attachedcall\" operand bundles", Call);
3749 FoundAttachedCallBundle = true;
3750 verifyAttachedCallBundle(Call, BU);
3751 }
3752 }
3753
3754 // Verify that callee and callsite agree on whether to use pointer auth.
3755 Check(!(Call.getCalledFunction() && FoundPtrauthBundle),
3756 "Direct call cannot have a ptrauth bundle", Call);
3757
3758 // Verify that each inlinable callsite of a debug-info-bearing function in a
3759 // debug-info-bearing function has a debug location attached to it. Failure to
3760 // do so causes assertion failures when the inliner sets up inline scope info
3761 // (Interposable functions are not inlinable, neither are functions without
3762 // definitions.)
3763 if (Call.getFunction()->getSubprogram() && Call.getCalledFunction() &&
3764 !Call.getCalledFunction()->isInterposable() &&
3765 !Call.getCalledFunction()->isDeclaration() &&
3766 Call.getCalledFunction()->getSubprogram())
3767 CheckDI(Call.getDebugLoc(),
3768 "inlinable function call in a function with "
3769 "debug info must have a !dbg location",
3770 Call);
3771
3772 if (Call.isInlineAsm())
3773 verifyInlineAsmCall(Call);
3774
3775 ConvergenceVerifyHelper.visit(Call);
3776
3777 visitInstruction(Call);
3778}
3779
3780void Verifier::verifyTailCCMustTailAttrs(const AttrBuilder &Attrs,
3781 StringRef Context) {
3782 Check(!Attrs.contains(Attribute::InAlloca),
3783 Twine("inalloca attribute not allowed in ") + Context);
3784 Check(!Attrs.contains(Attribute::InReg),
3785 Twine("inreg attribute not allowed in ") + Context);
3786 Check(!Attrs.contains(Attribute::SwiftError),
3787 Twine("swifterror attribute not allowed in ") + Context);
3788 Check(!Attrs.contains(Attribute::Preallocated),
3789 Twine("preallocated attribute not allowed in ") + Context);
3790 Check(!Attrs.contains(Attribute::ByRef),
3791 Twine("byref attribute not allowed in ") + Context);
3792}
3793
3794/// Two types are "congruent" if they are identical, or if they are both pointer
3795/// types with different pointee types and the same address space.
3796static bool isTypeCongruent(Type *L, Type *R) {
3797 if (L == R)
3798 return true;
3799 PointerType *PL = dyn_cast<PointerType>(L);
3800 PointerType *PR = dyn_cast<PointerType>(R);
3801 if (!PL || !PR)
3802 return false;
3803 return PL->getAddressSpace() == PR->getAddressSpace();
3804}
3805
3807 static const Attribute::AttrKind ABIAttrs[] = {
3808 Attribute::StructRet, Attribute::ByVal, Attribute::InAlloca,
3809 Attribute::InReg, Attribute::StackAlignment, Attribute::SwiftSelf,
3810 Attribute::SwiftAsync, Attribute::SwiftError, Attribute::Preallocated,
3811 Attribute::ByRef};
3812 AttrBuilder Copy(C);
3813 for (auto AK : ABIAttrs) {
3814 Attribute Attr = Attrs.getParamAttrs(I).getAttribute(AK);
3815 if (Attr.isValid())
3816 Copy.addAttribute(Attr);
3817 }
3818
3819 // `align` is ABI-affecting only in combination with `byval` or `byref`.
3820 if (Attrs.hasParamAttr(I, Attribute::Alignment) &&
3821 (Attrs.hasParamAttr(I, Attribute::ByVal) ||
3822 Attrs.hasParamAttr(I, Attribute::ByRef)))
3823 Copy.addAlignmentAttr(Attrs.getParamAlignment(I));
3824 return Copy;
3825}
3826
3827void Verifier::verifyMustTailCall(CallInst &CI) {
3828 Check(!CI.isInlineAsm(), "cannot use musttail call with inline asm", &CI);
3829
3830 Function *F = CI.getParent()->getParent();
3831 FunctionType *CallerTy = F->getFunctionType();
3832 FunctionType *CalleeTy = CI.getFunctionType();
3833 Check(CallerTy->isVarArg() == CalleeTy->isVarArg(),
3834 "cannot guarantee tail call due to mismatched varargs", &CI);
3835 Check(isTypeCongruent(CallerTy->getReturnType(), CalleeTy->getReturnType()),
3836 "cannot guarantee tail call due to mismatched return types", &CI);
3837
3838 // - The calling conventions of the caller and callee must match.
3839 Check(F->getCallingConv() == CI.getCallingConv(),
3840 "cannot guarantee tail call due to mismatched calling conv", &CI);
3841
3842 // - The call must immediately precede a :ref:`ret <i_ret>` instruction,
3843 // or a pointer bitcast followed by a ret instruction.
3844 // - The ret instruction must return the (possibly bitcasted) value
3845 // produced by the call or void.
3846 Value *RetVal = &CI;
3847 Instruction *Next = CI.getNextNode();
3848
3849 // Handle the optional bitcast.
3850 if (BitCastInst *BI = dyn_cast_or_null<BitCastInst>(Next)) {
3851 Check(BI->getOperand(0) == RetVal,
3852 "bitcast following musttail call must use the call", BI);
3853 RetVal = BI;
3854 Next = BI->getNextNode();
3855 }
3856
3857 // Check the return.
3858 ReturnInst *Ret = dyn_cast_or_null<ReturnInst>(Next);
3859 Check(Ret, "musttail call must precede a ret with an optional bitcast", &CI);
3860 Check(!Ret->getReturnValue() || Ret->getReturnValue() == RetVal ||
3861 isa<UndefValue>(Ret->getReturnValue()),
3862 "musttail call result must be returned", Ret);
3863
3864 AttributeList CallerAttrs = F->getAttributes();
3865 AttributeList CalleeAttrs = CI.getAttributes();
3868 StringRef CCName =
3869 CI.getCallingConv() == CallingConv::Tail ? "tailcc" : "swifttailcc";
3870
3871 // - Only sret, byval, swiftself, and swiftasync ABI-impacting attributes
3872 // are allowed in swifttailcc call
3873 for (unsigned I = 0, E = CallerTy->getNumParams(); I != E; ++I) {
3874 AttrBuilder ABIAttrs = getParameterABIAttributes(F->getContext(), I, CallerAttrs);
3875 SmallString<32> Context{CCName, StringRef(" musttail caller")};
3876 verifyTailCCMustTailAttrs(ABIAttrs, Context);
3877 }
3878 for (unsigned I = 0, E = CalleeTy->getNumParams(); I != E; ++I) {
3879 AttrBuilder ABIAttrs = getParameterABIAttributes(F->getContext(), I, CalleeAttrs);
3880 SmallString<32> Context{CCName, StringRef(" musttail callee")};
3881 verifyTailCCMustTailAttrs(ABIAttrs, Context);
3882 }
3883 // - Varargs functions are not allowed
3884 Check(!CallerTy->isVarArg(), Twine("cannot guarantee ") + CCName +
3885 " tail call for varargs function");
3886 return;
3887 }
3888
3889 // - The caller and callee prototypes must match. Pointer types of
3890 // parameters or return types may differ in pointee type, but not
3891 // address space.
3892 if (!CI.getCalledFunction() || !CI.getCalledFunction()->isIntrinsic()) {
3893 Check(CallerTy->getNumParams() == CalleeTy->getNumParams(),
3894 "cannot guarantee tail call due to mismatched parameter counts", &CI);
3895 for (unsigned I = 0, E = CallerTy->getNumParams(); I != E; ++I) {
3896 Check(
3897 isTypeCongruent(CallerTy->getParamType(I), CalleeTy->getParamType(I)),
3898 "cannot guarantee tail call due to mismatched parameter types", &CI);
3899 }
3900 }
3901
3902 // - All ABI-impacting function attributes, such as sret, byval, inreg,
3903 // returned, preallocated, and inalloca, must match.
3904 for (unsigned I = 0, E = CallerTy->getNumParams(); I != E; ++I) {
3905 AttrBuilder CallerABIAttrs = getParameterABIAttributes(F->getContext(), I, CallerAttrs);
3906 AttrBuilder CalleeABIAttrs = getParameterABIAttributes(F->getContext(), I, CalleeAttrs);
3907 Check(CallerABIAttrs == CalleeABIAttrs,
3908 "cannot guarantee tail call due to mismatched ABI impacting "
3909 "function attributes",
3910 &CI, CI.getOperand(I));
3911 }
3912}
3913
3914void Verifier::visitCallInst(CallInst &CI) {
3915 visitCallBase(CI);
3916
3917 if (CI.isMustTailCall())
3918 verifyMustTailCall(CI);
3919}
3920
3921void Verifier::visitInvokeInst(InvokeInst &II) {
3923
3924 // Verify that the first non-PHI instruction of the unwind destination is an
3925 // exception handling instruction.
3926 Check(
3927 II.getUnwindDest()->isEHPad(),
3928 "The unwind destination does not have an exception handling instruction!",
3929 &II);
3930
3932}
3933
3934/// visitUnaryOperator - Check the argument to the unary operator.
3935///
3936void Verifier::visitUnaryOperator(UnaryOperator &U) {
3937 Check(U.getType() == U.getOperand(0)->getType(),
3938 "Unary operators must have same type for"
3939 "operands and result!",
3940 &U);
3941
3942 switch (U.getOpcode()) {
3943 // Check that floating-point arithmetic operators are only used with
3944 // floating-point operands.
3945 case Instruction::FNeg:
3946 Check(U.getType()->isFPOrFPVectorTy(),
3947 "FNeg operator only works with float types!", &U);
3948 break;
3949 default:
3950 llvm_unreachable("Unknown UnaryOperator opcode!");
3951 }
3952
3954}
3955
3956/// visitBinaryOperator - Check that both arguments to the binary operator are
3957/// of the same type!
3958///
3959void Verifier::visitBinaryOperator(BinaryOperator &B) {
3960 Check(B.getOperand(0)->getType() == B.getOperand(1)->getType(),
3961 "Both operands to a binary operator are not of the same type!", &B);
3962
3963 switch (B.getOpcode()) {
3964 // Check that integer arithmetic operators are only used with
3965 // integral operands.
3966 case Instruction::Add:
3967 case Instruction::Sub:
3968 case Instruction::Mul:
3969 case Instruction::SDiv:
3970 case Instruction::UDiv:
3971 case Instruction::SRem:
3972 case Instruction::URem:
3973 Check(B.getType()->isIntOrIntVectorTy(),
3974 "Integer arithmetic operators only work with integral types!", &B);
3975 Check(B.getType() == B.getOperand(0)->getType(),
3976 "Integer arithmetic operators must have same type "
3977 "for operands and result!",
3978 &B);
3979 break;
3980 // Check that floating-point arithmetic operators are only used with
3981 // floating-point operands.
3982 case Instruction::FAdd:
3983 case Instruction::FSub:
3984 case Instruction::FMul:
3985 case Instruction::FDiv:
3986 case Instruction::FRem:
3987 Check(B.getType()->isFPOrFPVectorTy(),
3988 "Floating-point arithmetic operators only work with "
3989 "floating-point types!",
3990 &B);
3991 Check(B.getType() == B.getOperand(0)->getType(),
3992 "Floating-point arithmetic operators must have same type "
3993 "for operands and result!",
3994 &B);
3995 break;
3996 // Check that logical operators are only used with integral operands.
3997 case Instruction::And:
3998 case Instruction::Or:
3999 case Instruction::Xor:
4000 Check(B.getType()->isIntOrIntVectorTy(),
4001 "Logical operators only work with integral types!", &B);
4002 Check(B.getType() == B.getOperand(0)->getType(),
4003 "Logical operators must have same type for operands and result!", &B);
4004 break;
4005 case Instruction::Shl:
4006 case Instruction::LShr:
4007 case Instruction::AShr:
4008 Check(B.getType()->isIntOrIntVectorTy(),
4009 "Shifts only work with integral types!", &B);
4010 Check(B.getType() == B.getOperand(0)->getType(),
4011 "Shift return type must be same as operands!", &B);
4012 break;
4013 default:
4014 llvm_unreachable("Unknown BinaryOperator opcode!");
4015 }
4016
4018}
4019
4020void Verifier::visitICmpInst(ICmpInst &IC) {
4021 // Check that the operands are the same type
4022 Type *Op0Ty = IC.getOperand(0)->getType();
4023 Type *Op1Ty = IC.getOperand(1)->getType();
4024 Check(Op0Ty == Op1Ty,
4025 "Both operands to ICmp instruction are not of the same type!", &IC);
4026 // Check that the operands are the right type
4027 Check(Op0Ty->isIntOrIntVectorTy() || Op0Ty->isPtrOrPtrVectorTy(),
4028 "Invalid operand types for ICmp instruction", &IC);
4029 // Check that the predicate is valid.
4030 Check(IC.isIntPredicate(), "Invalid predicate in ICmp instruction!", &IC);
4031
4032 visitInstruction(IC);
4033}
4034
4035void Verifier::visitFCmpInst(FCmpInst &FC) {
4036 // Check that the operands are the same type
4037 Type *Op0Ty = FC.getOperand(0)->getType();
4038 Type *Op1Ty = FC.getOperand(1)->getType();
4039 Check(Op0Ty == Op1Ty,
4040 "Both operands to FCmp instruction are not of the same type!", &FC);
4041 // Check that the operands are the right type
4042 Check(Op0Ty->isFPOrFPVectorTy(), "Invalid operand types for FCmp instruction",
4043 &FC);
4044 // Check that the predicate is valid.
4045 Check(FC.isFPPredicate(), "Invalid predicate in FCmp instruction!", &FC);
4046
4047 visitInstruction(FC);
4048}
4049
4050void Verifier::visitExtractElementInst(ExtractElementInst &EI) {
4052 "Invalid extractelement operands!", &EI);
4053 visitInstruction(EI);
4054}
4055
4056void Verifier::visitInsertElementInst(InsertElementInst &IE) {
4057 Check(InsertElementInst::isValidOperands(IE.getOperand(0), IE.getOperand(1),
4058 IE.getOperand(2)),
4059 "Invalid insertelement operands!", &IE);
4060 visitInstruction(IE);
4061}
4062
4063void Verifier::visitShuffleVectorInst(ShuffleVectorInst &SV) {
4065 SV.getShuffleMask()),
4066 "Invalid shufflevector operands!", &SV);
4067 visitInstruction(SV);
4068}
4069
4070void Verifier::visitGetElementPtrInst(GetElementPtrInst &GEP) {
4071 Type *TargetTy = GEP.getPointerOperandType()->getScalarType();
4072
4073 Check(isa<PointerType>(TargetTy),
4074 "GEP base pointer is not a vector or a vector of pointers", &GEP);
4075 Check(GEP.getSourceElementType()->isSized(), "GEP into unsized type!", &GEP);
4076
4077 if (auto *STy = dyn_cast<StructType>(GEP.getSourceElementType())) {
4078 SmallPtrSet<Type *, 4> Visited;
4079 Check(!STy->containsScalableVectorType(&Visited),
4080 "getelementptr cannot target structure that contains scalable vector"
4081 "type",
4082 &GEP);
4083 }
4084
4085 SmallVector<Value *, 16> Idxs(GEP.indices());
4086 Check(
4087 all_of(Idxs, [](Value *V) { return V->getType()->isIntOrIntVectorTy(); }),
4088 "GEP indexes must be integers", &GEP);
4089 Type *ElTy =
4090 GetElementPtrInst::getIndexedType(GEP.getSourceElementType(), Idxs);
4091 Check(ElTy, "Invalid indices for GEP pointer type!", &GEP);
4092
4093 Check(GEP.getType()->isPtrOrPtrVectorTy() &&
4094 GEP.getResultElementType() == ElTy,
4095 "GEP is not of right type for indices!", &GEP, ElTy);
4096
4097 if (auto *GEPVTy = dyn_cast<VectorType>(GEP.getType())) {
4098 // Additional checks for vector GEPs.
4099 ElementCount GEPWidth = GEPVTy->getElementCount();
4100 if (GEP.getPointerOperandType()->isVectorTy())
4101 Check(
4102 GEPWidth ==
4103 cast<VectorType>(GEP.getPointerOperandType())->getElementCount(),
4104 "Vector GEP result width doesn't match operand's", &GEP);
4105 for (Value *Idx : Idxs) {
4106 Type *IndexTy = Idx->getType();
4107 if (auto *IndexVTy = dyn_cast<VectorType>(IndexTy)) {
4108 ElementCount IndexWidth = IndexVTy->getElementCount();
4109 Check(IndexWidth == GEPWidth, "Invalid GEP index vector width", &GEP);
4110 }
4111 Check(IndexTy->isIntOrIntVectorTy(),
4112 "All GEP indices should be of integer type");
4113 }
4114 }
4115
4116 if (auto *PTy = dyn_cast<PointerType>(GEP.getType())) {
4117 Check(GEP.getAddressSpace() == PTy->getAddressSpace(),
4118 "GEP address space doesn't match type", &GEP);
4119 }
4120
4122}
4123
4124static bool isContiguous(const ConstantRange &A, const ConstantRange &B) {
4125 return A.getUpper() == B.getLower() || A.getLower() == B.getUpper();
4126}
4127
4128/// Verify !range and !absolute_symbol metadata. These have the same
4129/// restrictions, except !absolute_symbol allows the full set.
4130void Verifier::verifyRangeMetadata(const Value &I, const MDNode *Range,
4131 Type *Ty, bool IsAbsoluteSymbol) {
4132 unsigned NumOperands = Range->getNumOperands();
4133 Check(NumOperands % 2 == 0, "Unfinished range!", Range);
4134 unsigned NumRanges = NumOperands / 2;
4135 Check(NumRanges >= 1, "It should have at least one range!", Range);
4136
4137 ConstantRange LastRange(1, true); // Dummy initial value
4138 for (unsigned i = 0; i < NumRanges; ++i) {
4139 ConstantInt *Low =
4140 mdconst::dyn_extract<ConstantInt>(Range->getOperand(2 * i));
4141 Check(Low, "The lower limit must be an integer!", Low);
4142 ConstantInt *High =
4143 mdconst::dyn_extract<ConstantInt>(Range->getOperand(2 * i + 1));
4144 Check(High, "The upper limit must be an integer!", High);
4145 Check(High->getType() == Low->getType() &&
4146 High->getType() == Ty->getScalarType(),
4147 "Range types must match instruction type!", &I);
4148
4149 APInt HighV = High->getValue();
4150 APInt LowV = Low->getValue();
4151
4152 // ConstantRange asserts if the ranges are the same except for the min/max
4153 // value. Leave the cases it tolerates for the empty range error below.
4154 Check(LowV != HighV || LowV.isMaxValue() || LowV.isMinValue(),
4155 "The upper and lower limits cannot be the same value", &I);
4156
4157 ConstantRange CurRange(LowV, HighV);
4158 Check(!CurRange.isEmptySet() && (IsAbsoluteSymbol || !CurRange.isFullSet()),
4159 "Range must not be empty!", Range);
4160 if (i != 0) {
4161 Check(CurRange.intersectWith(LastRange).isEmptySet(),
4162 "Intervals are overlapping", Range);
4163 Check(LowV.sgt(LastRange.getLower()), "Intervals are not in order",
4164 Range);
4165 Check(!isContiguous(CurRange, LastRange), "Intervals are contiguous",
4166 Range);
4167 }
4168 LastRange = ConstantRange(LowV, HighV);
4169 }
4170 if (NumRanges > 2) {
4171 APInt FirstLow =
4172 mdconst::dyn_extract<ConstantInt>(Range->getOperand(0))->getValue();
4173 APInt FirstHigh =
4174 mdconst::dyn_extract<ConstantInt>(Range->getOperand(1))->getValue();
4175 ConstantRange FirstRange(FirstLow, FirstHigh);
4176 Check(FirstRange.intersectWith(LastRange).isEmptySet(),
4177 "Intervals are overlapping", Range);
4178 Check(!isContiguous(FirstRange, LastRange), "Intervals are contiguous",
4179 Range);
4180 }
4181}
4182
4183void Verifier::visitRangeMetadata(Instruction &I, MDNode *Range, Type *Ty) {
4184 assert(Range && Range == I.getMetadata(LLVMContext::MD_range) &&
4185 "precondition violation");
4186 verifyRangeMetadata(I, Range, Ty, false);
4187}
4188
4189void Verifier::checkAtomicMemAccessSize(Type *Ty, const Instruction *I) {
4190 unsigned Size = DL.getTypeSizeInBits(Ty);
4191 Check(Size >= 8, "atomic memory access' size must be byte-sized", Ty, I);
4192 Check(!(Size & (Size - 1)),
4193 "atomic memory access' operand must have a power-of-two size", Ty, I);
4194}
4195
4196void Verifier::visitLoadInst(LoadInst &LI) {
4197 PointerType *PTy = dyn_cast<PointerType>(LI.getOperand(0)->getType());
4198 Check(PTy, "Load operand must be a pointer.", &LI);
4199 Type *ElTy = LI.getType();
4200 if (MaybeAlign A = LI.getAlign()) {
4201 Check(A->value() <= Value::MaximumAlignment,
4202 "huge alignment values are unsupported", &LI);
4203 }
4204 Check(ElTy->isSized(), "loading unsized types is not allowed", &LI);
4205 if (LI.isAtomic()) {
4208 "Load cannot have Release ordering", &LI);
4209 Check(ElTy->isIntOrPtrTy() || ElTy->isFloatingPointTy(),
4210 "atomic load operand must have integer, pointer, or floating point "
4211 "type!",
4212 ElTy, &LI);
4213 checkAtomicMemAccessSize(ElTy, &LI);
4214 } else {
4216 "Non-atomic load cannot have SynchronizationScope specified", &LI);
4217 }
4218
4219 visitInstruction(LI);
4220}
4221
4222void Verifier::visitStoreInst(StoreInst &SI) {
4223 PointerType *PTy = dyn_cast<PointerType>(SI.getOperand(1)->getType());
4224 Check(PTy, "Store operand must be a pointer.", &SI);
4225 Type *ElTy = SI.getOperand(0)->getType();
4226 if (MaybeAlign A = SI.getAlign()) {
4227 Check(A->value() <= Value::MaximumAlignment,
4228 "huge alignment values are unsupported", &SI);
4229 }
4230 Check(ElTy->isSized(), "storing unsized types is not allowed", &SI);
4231 if (SI.isAtomic()) {
4232 Check(SI.getOrdering() != AtomicOrdering::Acquire &&
4233 SI.getOrdering() != AtomicOrdering::AcquireRelease,
4234 "Store cannot have Acquire ordering", &SI);
4235 Check(ElTy->isIntOrPtrTy() || ElTy->isFloatingPointTy(),
4236 "atomic store operand must have integer, pointer, or floating point "
4237 "type!",
4238 ElTy, &SI);
4239 checkAtomicMemAccessSize(ElTy, &SI);
4240 } else {
4241 Check(SI.getSyncScopeID() == SyncScope::System,
4242 "Non-atomic store cannot have SynchronizationScope specified", &SI);
4243 }
4244 visitInstruction(SI);
4245}
4246
4247/// Check that SwiftErrorVal is used as a swifterror argument in CS.
4248void Verifier::verifySwiftErrorCall(CallBase &Call,
4249 const Value *SwiftErrorVal) {
4250 for (const auto &I : llvm::enumerate(Call.args())) {
4251 if (I.value() == SwiftErrorVal) {
4252 Check(Call.paramHasAttr(I.index(), Attribute::SwiftError),
4253 "swifterror value when used in a callsite should be marked "
4254 "with swifterror attribute",
4255 SwiftErrorVal, Call);
4256 }
4257 }
4258}
4259
4260void Verifier::verifySwiftErrorValue(const Value *SwiftErrorVal) {
4261 // Check that swifterror value is only used by loads, stores, or as
4262 // a swifterror argument.
4263 for (const User *U : SwiftErrorVal->users()) {
4264 Check(isa<LoadInst>(U) || isa<StoreInst>(U) || isa<CallInst>(U) ||
4265 isa<InvokeInst>(U),
4266 "swifterror value can only be loaded and stored from, or "
4267 "as a swifterror argument!",
4268 SwiftErrorVal, U);
4269 // If it is used by a store, check it is the second operand.
4270 if (auto StoreI = dyn_cast<StoreInst>(U))
4271 Check(StoreI->getOperand(1) == SwiftErrorVal,
4272 "swifterror value should be the second operand when used "
4273 "by stores",
4274 SwiftErrorVal, U);
4275 if (auto *Call = dyn_cast<CallBase>(U))
4276 verifySwiftErrorCall(*const_cast<CallBase *>(Call), SwiftErrorVal);
4277 }
4278}
4279
4280void Verifier::visitAllocaInst(AllocaInst &AI) {
4281 SmallPtrSet<Type*, 4> Visited;
4282 Check(AI.getAllocatedType()->isSized(&Visited),
4283 "Cannot allocate unsized type", &AI);
4285 "Alloca array size must have integer type", &AI);
4286 if (MaybeAlign A = AI.getAlign()) {
4287 Check(A->value() <= Value::MaximumAlignment,
4288 "huge alignment values are unsupported", &AI);
4289 }
4290
4291 if (AI.isSwiftError()) {
4293 "swifterror alloca must have pointer type", &AI);
4295 "swifterror alloca must not be array allocation", &AI);
4296 verifySwiftErrorValue(&AI);
4297 }
4298
4299 visitInstruction(AI);
4300}
4301
4302void Verifier::visitAtomicCmpXchgInst(AtomicCmpXchgInst &CXI) {
4303 Type *ElTy = CXI.getOperand(1)->getType();
4304 Check(ElTy->isIntOrPtrTy(),
4305 "cmpxchg operand must have integer or pointer type", ElTy, &CXI);
4306 checkAtomicMemAccessSize(ElTy, &CXI);
4307 visitInstruction(CXI);
4308}
4309
4310void Verifier::visitAtomicRMWInst(AtomicRMWInst &RMWI) {
4312 "atomicrmw instructions cannot be unordered.", &RMWI);
4313 auto Op = RMWI.getOperation();
4314 Type *ElTy = RMWI.getOperand(1)->getType();
4315 if (Op == AtomicRMWInst::Xchg) {
4316 Check(ElTy->isIntegerTy() || ElTy->isFloatingPointTy() ||
4317 ElTy->isPointerTy(),
4318 "atomicrmw " + AtomicRMWInst::getOperationName(Op) +
4319 " operand must have integer or floating point type!",
4320 &RMWI, ElTy);
4321 } else if (AtomicRMWInst::isFPOperation(Op)) {
4322 Check(ElTy->isFPOrFPVectorTy() && !isa<ScalableVectorType>(ElTy),
4323 "atomicrmw " + AtomicRMWInst::getOperationName(Op) +
4324 " operand must have floating-point or fixed vector of floating-point "
4325 "type!",
4326 &RMWI, ElTy);
4327 } else {
4328 Check(ElTy->isIntegerTy(),
4329 "atomicrmw " + AtomicRMWInst::getOperationName(Op) +
4330 " operand must have integer type!",
4331 &RMWI, ElTy);
4332 }
4333 checkAtomicMemAccessSize(ElTy, &RMWI);
4335 "Invalid binary operation!", &RMWI);
4336 visitInstruction(RMWI);
4337}
4338
4339void Verifier::visitFenceInst(FenceInst &FI) {
4340 const AtomicOrdering Ordering = FI.getOrdering();
4341 Check(Ordering == AtomicOrdering::Acquire ||
4342 Ordering == AtomicOrdering::Release ||
4343 Ordering == AtomicOrdering::AcquireRelease ||
4345 "fence instructions may only have acquire, release, acq_rel, or "
4346 "seq_cst ordering.",
4347 &FI);
4348 visitInstruction(FI);
4349}
4350
4351void Verifier::visitExtractValueInst(ExtractValueInst &EVI) {
4353 EVI.getIndices()) == EVI.getType(),
4354 "Invalid ExtractValueInst operands!", &EVI);
4355
4356 visitInstruction(EVI);
4357}
4358
4359void Verifier::visitInsertValueInst(InsertValueInst &IVI) {
4361 IVI.getIndices()) ==
4362 IVI.getOperand(1)->getType(),
4363 "Invalid InsertValueInst operands!", &IVI);
4364
4365 visitInstruction(IVI);
4366}
4367
4368static Value *getParentPad(Value *EHPad) {
4369 if (auto *FPI = dyn_cast<FuncletPadInst>(EHPad))
4370 return FPI->getParentPad();
4371
4372 return cast<CatchSwitchInst>(EHPad)->getParentPad();
4373}
4374
4375void Verifier::visitEHPadPredecessors(Instruction &I) {
4376 assert(I.isEHPad());
4377
4378 BasicBlock *BB = I.getParent();
4379 Function *F = BB->getParent();
4380
4381 Check(BB != &F->getEntryBlock(), "EH pad cannot be in entry block.", &I);
4382
4383 if (auto *LPI = dyn_cast<LandingPadInst>(&I)) {
4384 // The landingpad instruction defines its parent as a landing pad block. The
4385 // landing pad block may be branched to only by the unwind edge of an
4386 // invoke.
4387 for (BasicBlock *PredBB : predecessors(BB)) {
4388 const auto *II = dyn_cast<InvokeInst>(PredBB->getTerminator());
4389 Check(II && II->getUnwindDest() == BB && II->getNormalDest() != BB,
4390 "Block containing LandingPadInst must be jumped to "
4391 "only by the unwind edge of an invoke.",
4392 LPI);
4393 }
4394 return;
4395 }
4396 if (auto *CPI = dyn_cast<CatchPadInst>(&I)) {
4397 if (!pred_empty(BB))
4398 Check(BB->getUniquePredecessor() == CPI->getCatchSwitch()->getParent(),
4399 "Block containg CatchPadInst must be jumped to "
4400 "only by its catchswitch.",
4401 CPI);
4402 Check(BB != CPI->getCatchSwitch()->getUnwindDest(),
4403 "Catchswitch cannot unwind to one of its catchpads",
4404 CPI->getCatchSwitch(), CPI);
4405 return;
4406 }
4407
4408 // Verify that each pred has a legal terminator with a legal to/from EH
4409 // pad relationship.
4410 Instruction *ToPad = &I;
4411 Value *ToPadParent = getParentPad(ToPad);
4412 for (BasicBlock *PredBB : predecessors(BB)) {
4413 Instruction *TI = PredBB->getTerminator();
4414 Value *FromPad;
4415 if (auto *II = dyn_cast<InvokeInst>(TI)) {
4416 Check(II->getUnwindDest() == BB && II->getNormalDest() != BB,
4417 "EH pad must be jumped to via an unwind edge", ToPad, II);
4418 auto *CalledFn =
4419 dyn_cast<Function>(II->getCalledOperand()->stripPointerCasts());
4420 if (CalledFn && CalledFn->isIntrinsic() && II->doesNotThrow() &&
4421 !IntrinsicInst::mayLowerToFunctionCall(CalledFn->getIntrinsicID()))
4422 continue;
4423 if (auto Bundle = II->getOperandBundle(LLVMContext::OB_funclet))
4424 FromPad = Bundle->Inputs[0];
4425 else
4426 FromPad = ConstantTokenNone::get(II->getContext());
4427 } else if (auto *CRI = dyn_cast<CleanupReturnInst>(TI)) {
4428 FromPad = CRI->getOperand(0);
4429 Check(FromPad != ToPadParent, "A cleanupret must exit its cleanup", CRI);
4430 } else if (auto *CSI = dyn_cast<CatchSwitchInst>(TI)) {
4431 FromPad = CSI;
4432 } else {
4433 Check(false, "EH pad must be jumped to via an unwind edge", ToPad, TI);
4434 }
4435
4436 // The edge may exit from zero or more nested pads.
4438 for (;; FromPad = getParentPad(FromPad)) {
4439 Check(FromPad != ToPad,
4440 "EH pad cannot handle exceptions raised within it", FromPad, TI);
4441 if (FromPad == ToPadParent) {
4442 // This is a legal unwind edge.
4443 break;
4444 }
4445 Check(!isa<ConstantTokenNone>(FromPad),
4446 "A single unwind edge may only enter one EH pad", TI);
4447 Check(Seen.insert(FromPad).second, "EH pad jumps through a cycle of pads",
4448 FromPad);
4449
4450 // This will be diagnosed on the corresponding instruction already. We
4451 // need the extra check here to make sure getParentPad() works.
4452 Check(isa<FuncletPadInst>(FromPad) || isa<CatchSwitchInst>(FromPad),
4453 "Parent pad must be catchpad/cleanuppad/catchswitch", TI);
4454 }
4455 }
4456}
4457
4458void Verifier::visitLandingPadInst(LandingPadInst &LPI) {
4459 // The landingpad instruction is ill-formed if it doesn't have any clauses and
4460 // isn't a cleanup.
4461 Check(LPI.getNumClauses() > 0 || LPI.isCleanup(),
4462 "LandingPadInst needs at least one clause or to be a cleanup.", &LPI);
4463
4464 visitEHPadPredecessors(LPI);
4465
4466 if (!LandingPadResultTy)
4467 LandingPadResultTy = LPI.getType();
4468 else
4469 Check(LandingPadResultTy == LPI.getType(),
4470 "The landingpad instruction should have a consistent result type "
4471 "inside a function.",
4472 &LPI);
4473
4474 Function *F = LPI.getParent()->getParent();
4475 Check(F->hasPersonalityFn(),
4476 "LandingPadInst needs to be in a function with a personality.", &LPI);
4477
4478 // The landingpad instruction must be the first non-PHI instruction in the
4479 // block.
4480 Check(LPI.getParent()->getLandingPadInst() == &LPI,
4481 "LandingPadInst not the first non-PHI instruction in the block.", &LPI);
4482
4483 for (unsigned i = 0, e = LPI.getNumClauses(); i < e; ++i) {
4484 Constant *Clause = LPI.getClause(i);
4485 if (LPI.isCatch(i)) {
4486 Check(isa<PointerType>(Clause->getType()),
4487 "Catch operand does not have pointer type!", &LPI);
4488 } else {
4489 Check(LPI.isFilter(i), "Clause is neither catch nor filter!", &LPI);
4490 Check(isa<ConstantArray>(Clause) || isa<ConstantAggregateZero>(Clause),
4491 "Filter operand is not an array of constants!", &LPI);
4492 }
4493 }
4494
4495 visitInstruction(LPI);
4496}
4497
4498void Verifier::visitResumeInst(ResumeInst &RI) {
4500 "ResumeInst needs to be in a function with a personality.", &RI);
4501
4502 if (!LandingPadResultTy)
4503 LandingPadResultTy = RI.getValue()->getType();
4504 else
4505 Check(LandingPadResultTy == RI.getValue()->getType(),
4506 "The resume instruction should have a consistent result type "
4507 "inside a function.",
4508 &RI);
4509
4510 visitTerminator(RI);
4511}
4512
4513void Verifier::visitCatchPadInst(CatchPadInst &CPI) {
4514 BasicBlock *BB = CPI.getParent();
4515
4516 Function *F = BB->getParent();
4517 Check(F->hasPersonalityFn(),
4518 "CatchPadInst needs to be in a function with a personality.", &CPI);
4519
4520 Check(isa<CatchSwitchInst>(CPI.getParentPad()),
4521 "CatchPadInst needs to be directly nested in a CatchSwitchInst.",
4522 CPI.getParentPad());
4523
4524 // The catchpad instruction must be the first non-PHI instruction in the
4525 // block.
4526 Check(BB->getFirstNonPHI() == &CPI,
4527 "CatchPadInst not the first non-PHI instruction in the block.", &CPI);
4528
4529 visitEHPadPredecessors(CPI);
4531}
4532
4533void Verifier::visitCatchReturnInst(CatchReturnInst &CatchReturn) {
4534 Check(isa<CatchPadInst>(CatchReturn.getOperand(0)),
4535 "CatchReturnInst needs to be provided a CatchPad", &CatchReturn,
4536 CatchReturn.getOperand(0));
4537
4538 visitTerminator(CatchReturn);
4539}
4540
4541void Verifier::visitCleanupPadInst(CleanupPadInst &CPI) {
4542 BasicBlock *BB = CPI.getParent();
4543
4544 Function *F = BB->getParent();
4545 Check(F->hasPersonalityFn(),
4546 "CleanupPadInst needs to be in a function with a personality.", &CPI);
4547
4548 // The cleanuppad instruction must be the first non-PHI instruction in the
4549 // block.
4550 Check(BB->getFirstNonPHI() == &CPI,
4551 "CleanupPadInst not the first non-PHI instruction in the block.", &CPI);
4552
4553 auto *ParentPad = CPI.getParentPad();
4554 Check(isa<ConstantTokenNone>(ParentPad) || isa<FuncletPadInst>(ParentPad),
4555 "CleanupPadInst has an invalid parent.", &CPI);
4556
4557 visitEHPadPredecessors(CPI);
4559}
4560
4561void Verifier::visitFuncletPadInst(FuncletPadInst &FPI) {
4562 User *FirstUser = nullptr;
4563 Value *FirstUnwindPad = nullptr;
4564 SmallVector<FuncletPadInst *, 8> Worklist({&FPI});
4566
4567 while (!Worklist.empty()) {
4568 FuncletPadInst *CurrentPad = Worklist.pop_back_val();
4569 Check(Seen.insert(CurrentPad).second,
4570 "FuncletPadInst must not be nested within itself", CurrentPad);
4571 Value *UnresolvedAncestorPad = nullptr;
4572 for (User *U : CurrentPad->users()) {
4573 BasicBlock *UnwindDest;
4574 if (auto *CRI = dyn_cast<CleanupReturnInst>(U)) {
4575 UnwindDest = CRI->getUnwindDest();
4576 } else if (auto *CSI = dyn_cast<CatchSwitchInst>(U)) {
4577 // We allow catchswitch unwind to caller to nest
4578 // within an outer pad that unwinds somewhere else,
4579 // because catchswitch doesn't have a nounwind variant.
4580 // See e.g. SimplifyCFGOpt::SimplifyUnreachable.
4581 if (CSI->unwindsToCaller())
4582 continue;
4583 UnwindDest = CSI->getUnwindDest();
4584 } else if (auto *II = dyn_cast<InvokeInst>(U)) {
4585 UnwindDest = II->getUnwindDest();
4586 } else if (isa<CallInst>(U)) {
4587 // Calls which don't unwind may be found inside funclet
4588 // pads that unwind somewhere else. We don't *require*
4589 // such calls to be annotated nounwind.
4590 continue;
4591 } else if (auto *CPI = dyn_cast<CleanupPadInst>(U)) {
4592 // The unwind dest for a cleanup can only be found by
4593 // recursive search. Add it to the worklist, and we'll
4594 // search for its first use that determines where it unwinds.
4595 Worklist.push_back(CPI);
4596 continue;
4597 } else {
4598 Check(isa<CatchReturnInst>(U), "Bogus funclet pad use", U);
4599 continue;
4600 }
4601
4602 Value *UnwindPad;
4603 bool ExitsFPI;
4604 if (UnwindDest) {
4605 UnwindPad = UnwindDest->getFirstNonPHI();
4606 if (!cast<Instruction>(UnwindPad)->isEHPad())
4607 continue;
4608 Value *UnwindParent = getParentPad(UnwindPad);
4609 // Ignore unwind edges that don't exit CurrentPad.
4610 if (UnwindParent == CurrentPad)
4611 continue;
4612 // Determine whether the original funclet pad is exited,
4613 // and if we are scanning nested pads determine how many
4614 // of them are exited so we can stop searching their
4615 // children.
4616 Value *ExitedPad = CurrentPad;
4617 ExitsFPI = false;
4618 do {
4619 if (ExitedPad == &FPI) {
4620 ExitsFPI = true;
4621 // Now we can resolve any ancestors of CurrentPad up to
4622 // FPI, but not including FPI since we need to make sure
4623 // to check all direct users of FPI for consistency.
4624 UnresolvedAncestorPad = &FPI;
4625 break;
4626 }
4627 Value *ExitedParent = getParentPad(ExitedPad);
4628 if (ExitedParent == UnwindParent) {
4629 // ExitedPad is the ancestor-most pad which this unwind
4630 // edge exits, so we can resolve up to it, meaning that
4631 // ExitedParent is the first ancestor still unresolved.
4632 UnresolvedAncestorPad = ExitedParent;
4633 break;
4634 }
4635 ExitedPad = ExitedParent;
4636 } while (!isa<ConstantTokenNone>(ExitedPad));
4637 } else {
4638 // Unwinding to caller exits all pads.
4639 UnwindPad = ConstantTokenNone::get(FPI.getContext());
4640 ExitsFPI = true;
4641 UnresolvedAncestorPad = &FPI;
4642 }
4643
4644 if (ExitsFPI) {
4645 // This unwind edge exits FPI. Make sure it agrees with other
4646 // such edges.
4647 if (FirstUser) {
4648 Check(UnwindPad == FirstUnwindPad,
4649 "Unwind edges out of a funclet "
4650 "pad must have the same unwind "
4651 "dest",
4652 &FPI, U, FirstUser);
4653 } else {
4654 FirstUser = U;
4655 FirstUnwindPad = UnwindPad;
4656 // Record cleanup sibling unwinds for verifySiblingFuncletUnwinds
4657 if (isa<CleanupPadInst>(&FPI) && !isa<ConstantTokenNone>(UnwindPad) &&
4658 getParentPad(UnwindPad) == getParentPad(&FPI))
4659 SiblingFuncletInfo[&FPI] = cast<Instruction>(U);
4660 }
4661 }
4662 // Make sure we visit all uses of FPI, but for nested pads stop as
4663 // soon as we know where they unwind to.
4664 if (CurrentPad != &FPI)
4665 break;
4666 }
4667 if (UnresolvedAncestorPad) {
4668 if (CurrentPad == UnresolvedAncestorPad) {
4669 // When CurrentPad is FPI itself, we don't mark it as resolved even if
4670 // we've found an unwind edge that exits it, because we need to verify
4671 // all direct uses of FPI.
4672 assert(CurrentPad == &FPI);
4673 continue;
4674 }
4675 // Pop off the worklist any nested pads that we've found an unwind
4676 // destination for. The pads on the worklist are the uncles,
4677 // great-uncles, etc. of CurrentPad. We've found an unwind destination
4678 // for all ancestors of CurrentPad up to but not including
4679 // UnresolvedAncestorPad.
4680 Value *ResolvedPad = CurrentPad;
4681 while (!Worklist.empty()) {
4682 Value *UnclePad = Worklist.back();
4683 Value *AncestorPad = getParentPad(UnclePad);
4684 // Walk ResolvedPad up the ancestor list until we either find the
4685 // uncle's parent or the last resolved ancestor.
4686 while (ResolvedPad != AncestorPad) {
4687 Value *ResolvedParent = getParentPad(ResolvedPad);
4688 if (ResolvedParent == UnresolvedAncestorPad) {
4689 break;
4690 }
4691 ResolvedPad = ResolvedParent;
4692 }
4693 // If the resolved ancestor search didn't find the uncle's parent,
4694 // then the uncle is not yet resolved.
4695 if (ResolvedPad != AncestorPad)
4696 break;
4697 // This uncle is resolved, so pop it from the worklist.
4698 Worklist.pop_back();
4699 }
4700 }
4701 }
4702
4703 if (FirstUnwindPad) {
4704 if (auto *CatchSwitch = dyn_cast<CatchSwitchInst>(FPI.getParentPad())) {
4705 BasicBlock *SwitchUnwindDest = CatchSwitch->getUnwindDest();
4706 Value *SwitchUnwindPad;
4707 if (SwitchUnwindDest)
4708 SwitchUnwindPad = SwitchUnwindDest->getFirstNonPHI();
4709 else
4710 SwitchUnwindPad = ConstantTokenNone::get(FPI.getContext());
4711 Check(SwitchUnwindPad == FirstUnwindPad,
4712 "Unwind edges out of a catch must have the same unwind dest as "
4713 "the parent catchswitch",
4714 &FPI, FirstUser, CatchSwitch);
4715 }
4716 }
4717
4718 visitInstruction(FPI);
4719}
4720
4721void Verifier::visitCatchSwitchInst(CatchSwitchInst &CatchSwitch) {
4722 BasicBlock *BB = CatchSwitch.getParent();
4723
4724 Function *F = BB->getParent();
4725 Check(F->hasPersonalityFn(),
4726 "CatchSwitchInst needs to be in a function with a personality.",
4727 &CatchSwitch);
4728
4729 // The catchswitch instruction must be the first non-PHI instruction in the
4730 // block.
4731 Check(BB->getFirstNonPHI() == &CatchSwitch,
4732 "CatchSwitchInst not the first non-PHI instruction in the block.",
4733 &CatchSwitch);
4734
4735 auto *ParentPad = CatchSwitch.getParentPad();
4736 Check(isa<ConstantTokenNone>(ParentPad) || isa<FuncletPadInst>(ParentPad),
4737 "CatchSwitchInst has an invalid parent.", ParentPad);
4738
4739 if (BasicBlock *UnwindDest = CatchSwitch.getUnwindDest()) {
4740 Instruction *I = UnwindDest->getFirstNonPHI();
4741 Check(I->isEHPad() && !isa<LandingPadInst>(I),
4742 "CatchSwitchInst must unwind to an EH block which is not a "
4743 "landingpad.",
4744 &CatchSwitch);
4745
4746 // Record catchswitch sibling unwinds for verifySiblingFuncletUnwinds
4747 if (getParentPad(I) == ParentPad)
4748 SiblingFuncletInfo[&CatchSwitch] = &CatchSwitch;
4749 }
4750
4751 Check(CatchSwitch.getNumHandlers() != 0,
4752 "CatchSwitchInst cannot have empty handler list", &CatchSwitch);
4753
4754 for (BasicBlock *Handler : CatchSwitch.handlers()) {
4755 Check(isa<CatchPadInst>(Handler->getFirstNonPHI()),
4756 "CatchSwitchInst handlers must be catchpads", &CatchSwitch, Handler);
4757 }
4758
4759 visitEHPadPredecessors(CatchSwitch);
4760 visitTerminator(CatchSwitch);
4761}
4762
4763void Verifier::visitCleanupReturnInst(CleanupReturnInst &CRI) {
4764 Check(isa<CleanupPadInst>(CRI.getOperand(0)),
4765 "CleanupReturnInst needs to be provided a CleanupPad", &CRI,
4766 CRI.getOperand(0));
4767
4768 if (BasicBlock *UnwindDest = CRI.getUnwindDest()) {
4769 Instruction *I = UnwindDest->getFirstNonPHI();
4770 Check(I->isEHPad() && !isa<LandingPadInst>(I),
4771 "CleanupReturnInst must unwind to an EH block which is not a "
4772 "landingpad.",
4773 &CRI);
4774 }
4775
4776 visitTerminator(CRI);
4777}
4778
4779void Verifier::verifyDominatesUse(Instruction &I, unsigned i) {
4780 Instruction *Op = cast<Instruction>(I.getOperand(i));
4781 // If the we have an invalid invoke, don't try to compute the dominance.
4782 // We already reject it in the invoke specific checks and the dominance
4783 // computation doesn't handle multiple edges.
4784 if (InvokeInst *II = dyn_cast<InvokeInst>(Op)) {
4785 if (II->getNormalDest() == II->getUnwindDest())
4786 return;
4787 }
4788
4789 // Quick check whether the def has already been encountered in the same block.
4790 // PHI nodes are not checked to prevent accepting preceding PHIs, because PHI
4791 // uses are defined to happen on the incoming edge, not at the instruction.
4792 //
4793 // FIXME: If this operand is a MetadataAsValue (wrapping a LocalAsMetadata)
4794 // wrapping an SSA value, assert that we've already encountered it. See
4795 // related FIXME in Mapper::mapLocalAsMetadata in ValueMapper.cpp.
4796 if (!isa<PHINode>(I) && InstsInThisBlock.count(Op))
4797 return;
4798
4799 const Use &U = I.getOperandUse(i);
4800 Check(DT.dominates(Op, U), "Instruction does not dominate all uses!", Op, &I);
4801}
4802
4803void Verifier::visitDereferenceableMetadata(Instruction& I, MDNode* MD) {
4804 Check(I.getType()->isPointerTy(),
4805 "dereferenceable, dereferenceable_or_null "
4806 "apply only to pointer types",
4807 &I);
4808 Check((isa<LoadInst>(I) || isa<IntToPtrInst>(I)),
4809 "dereferenceable, dereferenceable_or_null apply only to load"
4810 " and inttoptr instructions, use attributes for calls or invokes",
4811 &I);
4812 Check(MD->getNumOperands() == 1,
4813 "dereferenceable, dereferenceable_or_null "
4814 "take one operand!",
4815 &I);
4816 ConstantInt *CI = mdconst::dyn_extract<ConstantInt>(MD->getOperand(0));
4817 Check(CI && CI->getType()->isIntegerTy(64),
4818 "dereferenceable, "
4819 "dereferenceable_or_null metadata value must be an i64!",
4820 &I);
4821}
4822
4823void Verifier::visitProfMetadata(Instruction &I, MDNode *MD) {
4824 Check(MD->getNumOperands() >= 2,
4825 "!prof annotations should have no less than 2 operands", MD);
4826
4827 // Check first operand.
4828 Check(MD->getOperand(0) != nullptr, "first operand should not be null", MD);
4829 Check(isa<MDString>(MD->getOperand(0)),
4830 "expected string with name of the !prof annotation", MD);
4831 MDString *MDS = cast<MDString>(MD->getOperand(0));
4832 StringRef ProfName = MDS->getString();
4833
4834 // Check consistency of !prof branch_weights metadata.
4835 if (ProfName == "branch_weights") {
4836 unsigned NumBranchWeights = getNumBranchWeights(*MD);
4837 if (isa<InvokeInst>(&I)) {
4838 Check(NumBranchWeights == 1 || NumBranchWeights == 2,
4839 "Wrong number of InvokeInst branch_weights operands", MD);
4840 } else {
4841 unsigned ExpectedNumOperands = 0;
4842 if (BranchInst *BI = dyn_cast<BranchInst>(&I))
4843 ExpectedNumOperands = BI->getNumSuccessors();
4844 else if (SwitchInst *SI = dyn_cast<SwitchInst>(&I))
4845 ExpectedNumOperands = SI->getNumSuccessors();
4846 else if (isa<CallInst>(&I))
4847 ExpectedNumOperands = 1;
4848 else if (IndirectBrInst *IBI = dyn_cast<IndirectBrInst>(&I))
4849 ExpectedNumOperands = IBI->getNumDestinations();
4850 else if (isa<SelectInst>(&I))
4851 ExpectedNumOperands = 2;
4852 else if (CallBrInst *CI = dyn_cast<CallBrInst>(&I))
4853 ExpectedNumOperands = CI->getNumSuccessors();
4854 else
4855 CheckFailed("!prof branch_weights are not allowed for this instruction",
4856 MD);
4857
4858 Check(NumBranchWeights == ExpectedNumOperands, "Wrong number of operands",
4859 MD);
4860 }
4861 for (unsigned i = getBranchWeightOffset(MD); i < MD->getNumOperands();
4862 ++i) {
4863 auto &MDO = MD->getOperand(i);
4864 Check(MDO, "second operand should not be null", MD);
4865 Check(mdconst::dyn_extract<ConstantInt>(MDO),
4866 "!prof brunch_weights operand is not a const int");
4867 }
4868 }
4869}
4870
4871void Verifier::visitDIAssignIDMetadata(Instruction &I, MDNode *MD) {
4872 assert(I.hasMetadata(LLVMContext::MD_DIAssignID));
4873 bool ExpectedInstTy =
4874 isa<AllocaInst>(I) || isa<StoreInst>(I) || isa<MemIntrinsic>(I);
4875 CheckDI(ExpectedInstTy, "!DIAssignID attached to unexpected instruction kind",
4876 I, MD);
4877 // Iterate over the MetadataAsValue uses of the DIAssignID - these should
4878 // only be found as DbgAssignIntrinsic operands.
4879 if (auto *AsValue = MetadataAsValue::getIfExists(Context, MD)) {
4880 for (auto *User : AsValue->users()) {
4881 CheckDI(isa<DbgAssignIntrinsic>(User),
4882 "!DIAssignID should only be used by llvm.dbg.assign intrinsics",
4883 MD, User);
4884 // All of the dbg.assign intrinsics should be in the same function as I.
4885 if (auto *DAI = dyn_cast<DbgAssignIntrinsic>(User))
4886 CheckDI(DAI->getFunction() == I.getFunction(),
4887 "dbg.assign not in same function as inst", DAI, &I);
4888 }
4889 }
4890 for (DbgVariableRecord *DVR :
4891 cast<DIAssignID>(MD)->getAllDbgVariableRecordUsers()) {
4892 CheckDI(DVR->isDbgAssign(),
4893 "!DIAssignID should only be used by Assign DVRs.", MD, DVR);
4894 CheckDI(DVR->getFunction() == I.getFunction(),
4895 "DVRAssign not in same function as inst", DVR, &I);
4896 }
4897}
4898
4899void Verifier::visitMMRAMetadata(Instruction &I, MDNode *MD) {
4901 "!mmra metadata attached to unexpected instruction kind", I, MD);
4902
4903 // MMRA Metadata should either be a tag, e.g. !{!"foo", !"bar"}, or a
4904 // list of tags such as !2 in the following example:
4905 // !0 = !{!"a", !"b"}
4906 // !1 = !{!"c", !"d"}
4907 // !2 = !{!0, !1}
4908 if (MMRAMetadata::isTagMD(MD))
4909 return;
4910
4911 Check(isa<MDTuple>(MD), "!mmra expected to be a metadata tuple", I, MD);
4912 for (const MDOperand &MDOp : MD->operands())
4913 Check(MMRAMetadata::isTagMD(MDOp.get()),
4914 "!mmra metadata tuple operand is not an MMRA tag", I, MDOp.get());
4915}
4916
4917void Verifier::visitCallStackMetadata(MDNode *MD) {
4918 // Call stack metadata should consist of a list of at least 1 constant int
4919 // (representing a hash of the location).
4920 Check(MD->getNumOperands() >= 1,
4921 "call stack metadata should have at least 1 operand", MD);
4922
4923 for (const auto &Op : MD->operands())
4924 Check(mdconst::dyn_extract_or_null<ConstantInt>(Op),
4925 "call stack metadata operand should be constant integer", Op);
4926}
4927
4928void Verifier::visitMemProfMetadata(Instruction &I, MDNode *MD) {
4929 Check(isa<CallBase>(I), "!memprof metadata should only exist on calls", &I);
4930 Check(MD->getNumOperands() >= 1,
4931 "!memprof annotations should have at least 1 metadata operand "
4932 "(MemInfoBlock)",
4933 MD);
4934
4935 // Check each MIB
4936 for (auto &MIBOp : MD->operands()) {
4937 MDNode *MIB = dyn_cast<MDNode>(MIBOp);
4938 // The first operand of an MIB should be the call stack metadata.
4939 // There rest of the operands should be MDString tags, and there should be
4940 // at least one.
4941 Check(MIB->getNumOperands() >= 2,
4942 "Each !memprof MemInfoBlock should have at least 2 operands", MIB);
4943
4944 // Check call stack metadata (first operand).
4945 Check(MIB->getOperand(0) != nullptr,
4946 "!memprof MemInfoBlock first operand should not be null", MIB);
4947 Check(isa<MDNode>(MIB->getOperand(0)),
4948 "!memprof MemInfoBlock first operand should be an MDNode", MIB);
4949 MDNode *StackMD = dyn_cast<MDNode>(MIB->getOperand(0));
4950 visitCallStackMetadata(StackMD);
4951
4952 // Check that remaining operands, except possibly the last, are MDString.
4953 Check(llvm::all_of(MIB->operands().drop_front().drop_back(),
4954 [](const MDOperand &Op) { return isa<MDString>(Op); }),
4955 "Not all !memprof MemInfoBlock operands 1 to N-1 are MDString", MIB);
4956 // The last operand might be the total profiled size so can be an integer.
4957 auto &LastOperand = MIB->operands().back();
4958 Check(isa<MDString>(LastOperand) || mdconst::hasa<ConstantInt>(LastOperand),
4959 "Last !memprof MemInfoBlock operand not MDString or int", MIB);
4960 }
4961}
4962
4963void Verifier::visitCallsiteMetadata(Instruction &I, MDNode *MD) {
4964 Check(isa<CallBase>(I), "!callsite metadata should only exist on calls", &I);
4965 // Verify the partial callstack annotated from memprof profiles. This callsite
4966 // is a part of a profiled allocation callstack.
4967 visitCallStackMetadata(MD);
4968}
4969
4970void Verifier::visitAnnotationMetadata(MDNode *Annotation) {
4971 Check(isa<MDTuple>(Annotation), "annotation must be a tuple");
4972 Check(Annotation->getNumOperands() >= 1,
4973 "annotation must have at least one operand");
4974 for (const MDOperand &Op : Annotation->operands()) {
4975 bool TupleOfStrings =
4976 isa<MDTuple>(Op.get()) &&
4977 all_of(cast<MDTuple>(Op)->operands(), [](auto &Annotation) {
4978 return isa<MDString>(Annotation.get());
4979 });
4980 Check(isa<MDString>(Op.get()) || TupleOfStrings,
4981 "operands must be a string or a tuple of strings");
4982 }
4983}
4984
4985void Verifier::visitAliasScopeMetadata(const MDNode *MD) {
4986 unsigned NumOps = MD->getNumOperands();
4987 Check(NumOps >= 2 && NumOps <= 3, "scope must have two or three operands",
4988 MD);
4989 Check(MD->getOperand(0).get() == MD || isa<MDString>(MD->getOperand(0)),
4990 "first scope operand must be self-referential or string", MD);
4991 if (NumOps == 3)
4992 Check(isa<MDString>(MD->getOperand(2)),
4993 "third scope operand must be string (if used)", MD);
4994
4995 MDNode *Domain = dyn_cast<MDNode>(MD->getOperand(1));
4996 Check(Domain != nullptr, "second scope operand must be MDNode", MD);
4997
4998 unsigned NumDomainOps = Domain->getNumOperands();
4999 Check(NumDomainOps >= 1 && NumDomainOps <= 2,
5000 "domain must have one or two operands", Domain);
5001 Check(Domain->getOperand(0).get() == Domain ||
5002 isa<MDString>(Domain->getOperand(0)),
5003 "first domain operand must be self-referential or string", Domain);
5004 if (NumDomainOps == 2)
5005 Check(isa<MDString>(Domain->getOperand(1)),
5006 "second domain operand must be string (if used)", Domain);
5007}
5008
5009void Verifier::visitAliasScopeListMetadata(const MDNode *MD) {
5010 for (const MDOperand &Op : MD->operands()) {
5011 const MDNode *OpMD = dyn_cast<MDNode>(Op);
5012 Check(OpMD != nullptr, "scope list must consist of MDNodes", MD);
5013 visitAliasScopeMetadata(OpMD);
5014 }
5015}
5016
5017void Verifier::visitAccessGroupMetadata(const MDNode *MD) {
5018 auto IsValidAccessScope = [](const MDNode *MD) {
5019 return MD->getNumOperands() == 0 && MD->isDistinct();
5020 };
5021
5022 // It must be either an access scope itself...
5023 if (IsValidAccessScope(MD))
5024 return;
5025
5026 // ...or a list of access scopes.
5027 for (const MDOperand &Op : MD->operands()) {
5028 const MDNode *OpMD = dyn_cast<MDNode>(Op);
5029 Check(OpMD != nullptr, "Access scope list must consist of MDNodes", MD);
5030 Check(IsValidAccessScope(OpMD),
5031 "Access scope list contains invalid access scope", MD);
5032 }
5033}
5034
5035/// verifyInstruction - Verify that an instruction is well formed.
5036///
5037void Verifier::visitInstruction(Instruction &I) {
5038 BasicBlock *BB = I.getParent();
5039 Check(BB, "Instruction not embedded in basic block!", &I);
5040
5041 if (!isa<PHINode>(I)) { // Check that non-phi nodes are not self referential
5042 for (User *U : I.users()) {
5043 Check(U != (User *)&I || !DT.isReachableFromEntry(BB),
5044 "Only PHI nodes may reference their own value!", &I);
5045 }
5046 }
5047
5048 // Check that void typed values don't have names
5049 Check(!I.getType()->isVoidTy() || !I.hasName(),
5050 "Instruction has a name, but provides a void value!", &I);
5051
5052 // Check that the return value of the instruction is either void or a legal
5053 // value type.
5054 Check(I.getType()->isVoidTy() || I.getType()->isFirstClassType(),
5055 "Instruction returns a non-scalar type!", &I);
5056
5057 // Check that the instruction doesn't produce metadata. Calls are already
5058 // checked against the callee type.
5059 Check(!I.getType()->isMetadataTy() || isa<CallInst>(I) || isa<InvokeInst>(I),
5060 "Invalid use of metadata!", &I);
5061
5062 // Check that all uses of the instruction, if they are instructions
5063 // themselves, actually have parent basic blocks. If the use is not an
5064 // instruction, it is an error!
5065 for (Use &U : I.uses()) {
5066 if (Instruction *Used = dyn_cast<Instruction>(U.getUser()))
5067 Check(Used->getParent() != nullptr,
5068 "Instruction referencing"
5069 " instruction not embedded in a basic block!",
5070 &I, Used);
5071 else {
5072 CheckFailed("Use of instruction is not an instruction!", U);
5073 return;
5074 }
5075 }
5076
5077 // Get a pointer to the call base of the instruction if it is some form of
5078 // call.
5079 const CallBase *CBI = dyn_cast<CallBase>(&I);
5080
5081 for (unsigned i = 0, e = I.getNumOperands(); i != e; ++i) {
5082 Check(I.getOperand(i) != nullptr, "Instruction has null operand!", &I);
5083
5084 // Check to make sure that only first-class-values are operands to
5085 // instructions.
5086 if (!I.getOperand(i)->getType()->isFirstClassType()) {
5087 Check(false, "Instruction operands must be first-class values!", &I);
5088 }
5089
5090 if (Function *F = dyn_cast<Function>(I.getOperand(i))) {
5091 // This code checks whether the function is used as the operand of a
5092 // clang_arc_attachedcall operand bundle.
5093 auto IsAttachedCallOperand = [](Function *F, const CallBase *CBI,
5094 int Idx) {
5095 return CBI && CBI->isOperandBundleOfType(
5097 };
5098
5099 // Check to make sure that the "address of" an intrinsic function is never
5100 // taken. Ignore cases where the address of the intrinsic function is used
5101 // as the argument of operand bundle "clang.arc.attachedcall" as those
5102 // cases are handled in verifyAttachedCallBundle.
5103 Check((!F->isIntrinsic() ||
5104 (CBI && &CBI->getCalledOperandUse() == &I.getOperandUse(i)) ||
5105 IsAttachedCallOperand(F, CBI, i)),
5106 "Cannot take the address of an intrinsic!", &I);
5107 Check(!F->isIntrinsic() || isa<CallInst>(I) ||
5108 F->getIntrinsicID() == Intrinsic::donothing ||
5109 F->getIntrinsicID() == Intrinsic::seh_try_begin ||
5110 F->getIntrinsicID() == Intrinsic::seh_try_end ||
5111 F->getIntrinsicID() == Intrinsic::seh_scope_begin ||
5112 F->getIntrinsicID() == Intrinsic::seh_scope_end ||
5113 F->getIntrinsicID() == Intrinsic::coro_resume ||
5114 F->getIntrinsicID() == Intrinsic::coro_destroy ||
5115 F->getIntrinsicID() == Intrinsic::coro_await_suspend_void ||
5116 F->getIntrinsicID() == Intrinsic::coro_await_suspend_bool ||
5117 F->getIntrinsicID() == Intrinsic::coro_await_suspend_handle ||
5118 F->getIntrinsicID() ==
5119 Intrinsic::experimental_patchpoint_void ||
5120 F->getIntrinsicID() == Intrinsic::experimental_patchpoint ||
5121 F->getIntrinsicID() == Intrinsic::experimental_gc_statepoint ||
5122 F->getIntrinsicID() == Intrinsic::wasm_rethrow ||
5123 IsAttachedCallOperand(F, CBI, i),
5124 "Cannot invoke an intrinsic other than donothing, patchpoint, "
5125 "statepoint, coro_resume, coro_destroy or clang.arc.attachedcall",
5126 &I);
5127 Check(F->getParent() == &M, "Referencing function in another module!", &I,
5128 &M, F, F->getParent());
5129 } else if (BasicBlock *OpBB = dyn_cast<BasicBlock>(I.getOperand(i))) {
5130 Check(OpBB->getParent() == BB->getParent(),
5131 "Referring to a basic block in another function!", &I);
5132 } else if (Argument *OpArg = dyn_cast<Argument>(I.getOperand(i))) {
5133 Check(OpArg->getParent() == BB->getParent(),
5134 "Referring to an argument in another function!", &I);
5135 } else if (GlobalValue *GV = dyn_cast<GlobalValue>(I.getOperand(i))) {
5136 Check(GV->getParent() == &M, "Referencing global in another module!", &I,
5137 &M, GV, GV->getParent());
5138 } else if (Instruction *OpInst = dyn_cast<Instruction>(I.getOperand(i))) {
5139 Check(OpInst->getFunction() == BB->getParent(),
5140 "Referring to an instruction in another function!", &I);
5141 verifyDominatesUse(I, i);
5142 } else if (isa<InlineAsm>(I.getOperand(i))) {
5143 Check(CBI && &CBI->getCalledOperandUse() == &I.getOperandUse(i),
5144 "Cannot take the address of an inline asm!", &I);
5145 } else if (auto *CPA = dyn_cast<ConstantPtrAuth>(I.getOperand(i))) {
5146 visitConstantExprsRecursively(CPA);
5147 } else if (ConstantExpr *CE = dyn_cast<ConstantExpr>(I.getOperand(i))) {
5148 if (CE->getType()->isPtrOrPtrVectorTy()) {
5149 // If we have a ConstantExpr pointer, we need to see if it came from an
5150 // illegal bitcast.
5151 visitConstantExprsRecursively(CE);
5152 }
5153 }
5154 }
5155
5156 if (MDNode *MD = I.getMetadata(LLVMContext::MD_fpmath)) {
5157 Check(I.getType()->isFPOrFPVectorTy(),
5158 "fpmath requires a floating point result!", &I);
5159 Check(MD->getNumOperands() == 1, "fpmath takes one operand!", &I);
5160 if (ConstantFP *CFP0 =
5161 mdconst::dyn_extract_or_null<ConstantFP>(MD->getOperand(0))) {
5162 const APFloat &Accuracy = CFP0->getValueAPF();
5163 Check(&Accuracy.getSemantics() == &APFloat::IEEEsingle(),
5164 "fpmath accuracy must have float type", &I);
5165 Check(Accuracy.isFiniteNonZero() && !Accuracy.isNegative(),
5166 "fpmath accuracy not a positive number!", &I);
5167 } else {
5168 Check(false, "invalid fpmath accuracy!", &I);
5169 }
5170 }
5171
5172 if (MDNode *Range = I.getMetadata(LLVMContext::MD_range)) {
5173 Check(isa<LoadInst>(I) || isa<CallInst>(I) || isa<InvokeInst>(I),
5174 "Ranges are only for loads, calls and invokes!", &I);
5175 visitRangeMetadata(I, Range, I.getType());
5176 }
5177
5178 if (I.hasMetadata(LLVMContext::MD_invariant_group)) {
5179 Check(isa<LoadInst>(I) || isa<StoreInst>(I),
5180 "invariant.group metadata is only for loads and stores", &I);
5181 }
5182
5183 if (MDNode *MD = I.getMetadata(LLVMContext::MD_nonnull)) {
5184 Check(I.getType()->isPointerTy(), "nonnull applies only to pointer types",
5185 &I);
5186 Check(isa<LoadInst>(I),
5187 "nonnull applies only to load instructions, use attributes"
5188 " for calls or invokes",
5189 &I);
5190 Check(MD->getNumOperands() == 0, "nonnull metadata must be empty", &I);
5191 }
5192
5193 if (MDNode *MD = I.getMetadata(LLVMContext::MD_dereferenceable))
5194 visitDereferenceableMetadata(I, MD);
5195
5196 if (MDNode *MD = I.getMetadata(LLVMContext::MD_dereferenceable_or_null))
5197 visitDereferenceableMetadata(I, MD);
5198
5199 if (MDNode *TBAA = I.getMetadata(LLVMContext::MD_tbaa))
5200 TBAAVerifyHelper.visitTBAAMetadata(I, TBAA);
5201
5202 if (MDNode *MD = I.getMetadata(LLVMContext::MD_noalias))
5203 visitAliasScopeListMetadata(MD);
5204 if (MDNode *MD = I.getMetadata(LLVMContext::MD_alias_scope))
5205 visitAliasScopeListMetadata(MD);
5206
5207 if (MDNode *MD = I.getMetadata(LLVMContext::MD_access_group))
5208 visitAccessGroupMetadata(MD);
5209
5210 if (MDNode *AlignMD = I.getMetadata(LLVMContext::MD_align)) {
5211 Check(I.getType()->isPointerTy(), "align applies only to pointer types",
5212 &I);
5213 Check(isa<LoadInst>(I),
5214 "align applies only to load instructions, "
5215 "use attributes for calls or invokes",
5216 &I);
5217 Check(AlignMD->getNumOperands() == 1, "align takes one operand!", &I);
5218 ConstantInt *CI = mdconst::dyn_extract<ConstantInt>(AlignMD->getOperand(0));
5219 Check(CI && CI->getType()->isIntegerTy(64),
5220 "align metadata value must be an i64!", &I);
5221 uint64_t Align = CI->getZExtValue();
5222 Check(isPowerOf2_64(Align), "align metadata value must be a power of 2!",
5223 &I);
5225 "alignment is larger that implementation defined limit", &I);
5226 }
5227
5228 if (MDNode *MD = I.getMetadata(LLVMContext::MD_prof))
5229 visitProfMetadata(I, MD);
5230
5231 if (MDNode *MD = I.getMetadata(LLVMContext::MD_memprof))
5232 visitMemProfMetadata(I, MD);
5233
5234 if (MDNode *MD = I.getMetadata(LLVMContext::MD_callsite))
5235 visitCallsiteMetadata(I, MD);
5236
5237 if (MDNode *MD = I.getMetadata(LLVMContext::MD_DIAssignID))
5238 visitDIAssignIDMetadata(I, MD);
5239
5240 if (MDNode *MMRA = I.getMetadata(LLVMContext::MD_mmra))
5241 visitMMRAMetadata(I, MMRA);
5242
5243 if (MDNode *Annotation = I.getMetadata(LLVMContext::MD_annotation))
5244 visitAnnotationMetadata(Annotation);
5245
5246 if (MDNode *N = I.getDebugLoc().getAsMDNode()) {
5247 CheckDI(isa<DILocation>(N), "invalid !dbg metadata attachment", &I, N);
5248 visitMDNode(*N, AreDebugLocsAllowed::Yes);
5249 }
5250
5251 if (auto *DII = dyn_cast<DbgVariableIntrinsic>(&I)) {
5252 verifyFragmentExpression(*DII);
5253 verifyNotEntryValue(*DII);
5254 }
5255
5257 I.getAllMetadata(MDs);
5258 for (auto Attachment : MDs) {
5259 unsigned Kind = Attachment.first;
5260 auto AllowLocs =
5261 (Kind == LLVMContext::MD_dbg || Kind == LLVMContext::MD_loop)
5262 ? AreDebugLocsAllowed::Yes
5263 : AreDebugLocsAllowed::No;
5264 visitMDNode(*Attachment.second, AllowLocs);
5265 }
5266
5267 InstsInThisBlock.insert(&I);
5268}
5269
5270/// Allow intrinsics to be verified in different ways.
5271void Verifier::visitIntrinsicCall(Intrinsic::ID ID, CallBase &Call) {
5272 Function *IF = Call.getCalledFunction();
5273 Check(IF->isDeclaration(), "Intrinsic functions should never be defined!",
5274 IF);
5275
5276 // Verify that the intrinsic prototype lines up with what the .td files
5277 // describe.
5278 FunctionType *IFTy = IF->getFunctionType();
5279 bool IsVarArg = IFTy->isVarArg();
5280
5284
5285 // Walk the descriptors to extract overloaded types.
5290 "Intrinsic has incorrect return type!", IF);
5292 "Intrinsic has incorrect argument type!", IF);
5293
5294 // Verify if the intrinsic call matches the vararg property.
5295 if (IsVarArg)
5297 "Intrinsic was not defined with variable arguments!", IF);
5298 else
5300 "Callsite was not defined with variable arguments!", IF);
5301
5302 // All descriptors should be absorbed by now.
5303 Check(TableRef.empty(), "Intrinsic has too few arguments!", IF);
5304
5305 // Now that we have the intrinsic ID and the actual argument types (and we
5306 // know they are legal for the intrinsic!) get the intrinsic name through the
5307 // usual means. This allows us to verify the mangling of argument types into
5308 // the name.
5309 const std::string ExpectedName =
5310 Intrinsic::getName(ID, ArgTys, IF->getParent(), IFTy);
5311 Check(ExpectedName == IF->getName(),
5312 "Intrinsic name not mangled correctly for type arguments! "
5313 "Should be: " +
5314 ExpectedName,
5315 IF);
5316
5317 // If the intrinsic takes MDNode arguments, verify that they are either global
5318 // or are local to *this* function.
5319 for (Value *V : Call.args()) {
5320 if (auto *MD = dyn_cast<MetadataAsValue>(V))
5321 visitMetadataAsValue(*MD, Call.getCaller());
5322 if (auto *Const = dyn_cast<Constant>(V))
5323 Check(!Const->getType()->isX86_AMXTy(),
5324 "const x86_amx is not allowed in argument!");
5325 }
5326
5327 switch (ID) {
5328 default:
5329 break;
5330 case Intrinsic::assume: {
5331 for (auto &Elem : Call.bundle_op_infos()) {
5332 unsigned ArgCount = Elem.End - Elem.Begin;
5333 // Separate storage assumptions are special insofar as they're the only
5334 // operand bundles allowed on assumes that aren't parameter attributes.
5335 if (Elem.Tag->getKey() == "separate_storage") {
5336 Check(ArgCount == 2,
5337 "separate_storage assumptions should have 2 arguments", Call);
5338 Check(Call.getOperand(Elem.Begin)->getType()->isPointerTy() &&
5339 Call.getOperand(Elem.Begin + 1)->getType()->isPointerTy(),
5340 "arguments to separate_storage assumptions should be pointers",
5341 Call);
5342 return;
5343 }
5344 Check(Elem.Tag->getKey() == "ignore" ||
5345 Attribute::isExistingAttribute(Elem.Tag->getKey()),
5346 "tags must be valid attribute names", Call);
5348 Attribute::getAttrKindFromName(Elem.Tag->getKey());
5349 if (Kind == Attribute::Alignment) {
5350 Check(ArgCount <= 3 && ArgCount >= 2,
5351 "alignment assumptions should have 2 or 3 arguments", Call);
5352 Check(Call.getOperand(Elem.Begin)->getType()->isPointerTy(),
5353 "first argument should be a pointer", Call);
5354 Check(Call.getOperand(Elem.Begin + 1)->getType()->isIntegerTy(),
5355 "second argument should be an integer", Call);
5356 if (ArgCount == 3)
5357 Check(Call.getOperand(Elem.Begin + 2)->getType()->isIntegerTy(),
5358 "third argument should be an integer if present", Call);
5359 return;
5360 }
5361 Check(ArgCount <= 2, "too many arguments", Call);
5362 if (Kind == Attribute::None)
5363 break;
5364 if (Attribute::isIntAttrKind(Kind)) {
5365 Check(ArgCount == 2, "this attribute should have 2 arguments", Call);
5366 Check(isa<ConstantInt>(Call.getOperand(Elem.Begin + 1)),
5367 "the second argument should be a constant integral value", Call);
5368 } else if (Attribute::canUseAsParamAttr(Kind)) {
5369 Check((ArgCount) == 1, "this attribute should have one argument", Call);
5370 } else if (Attribute::canUseAsFnAttr(Kind)) {
5371 Check((ArgCount) == 0, "this attribute has no argument", Call);
5372 }
5373 }
5374 break;
5375 }
5376 case Intrinsic::ucmp:
5377 case Intrinsic::scmp: {
5378 Type *SrcTy = Call.getOperand(0)->getType();
5379 Type *DestTy = Call.getType();
5380
5381 Check(DestTy->getScalarSizeInBits() >= 2,
5382 "result type must be at least 2 bits wide", Call);
5383
5384 bool IsDestTypeVector = DestTy->isVectorTy();
5385 Check(SrcTy->isVectorTy() == IsDestTypeVector,
5386 "ucmp/scmp argument and result types must both be either vector or "
5387 "scalar types",
5388 Call);
5389 if (IsDestTypeVector) {
5390 auto SrcVecLen = cast<VectorType>(SrcTy)->getElementCount();
5391 auto DestVecLen = cast<VectorType>(DestTy)->getElementCount();
5392 Check(SrcVecLen == DestVecLen,
5393 "return type and arguments must have the same number of "
5394 "elements",
5395 Call);
5396 }
5397 break;
5398 }
5399 case Intrinsic::coro_id: {
5400 auto *InfoArg = Call.getArgOperand(3)->stripPointerCasts();
5401 if (isa<ConstantPointerNull>(InfoArg))
5402 break;
5403 auto *GV = dyn_cast<GlobalVariable>(InfoArg);
5404 Check(GV && GV->isConstant() && GV->hasDefinitiveInitializer(),
5405 "info argument of llvm.coro.id must refer to an initialized "
5406 "constant");
5407 Constant *Init = GV->getInitializer();
5408 Check(isa<ConstantStruct>(Init) || isa<ConstantArray>(Init),
5409 "info argument of llvm.coro.id must refer to either a struct or "
5410 "an array");
5411 break;
5412 }
5413 case Intrinsic::is_fpclass: {
5414 const ConstantInt *TestMask = cast<ConstantInt>(Call.getOperand(1));
5415 Check((TestMask->getZExtValue() & ~static_cast<unsigned>(fcAllFlags)) == 0,
5416 "unsupported bits for llvm.is.fpclass test mask");
5417 break;
5418 }
5419 case Intrinsic::fptrunc_round: {
5420 // Check the rounding mode
5421 Metadata *MD = nullptr;
5422 auto *MAV = dyn_cast<MetadataAsValue>(Call.getOperand(1));
5423 if (MAV)
5424 MD = MAV->getMetadata();
5425
5426 Check(MD != nullptr, "missing rounding mode argument", Call);
5427
5428 Check(isa<MDString>(MD),
5429 ("invalid value for llvm.fptrunc.round metadata operand"
5430 " (the operand should be a string)"),
5431 MD);
5432
5433 std::optional<RoundingMode> RoundMode =
5434 convertStrToRoundingMode(cast<MDString>(MD)->getString());
5435 Check(RoundMode && *RoundMode != RoundingMode::Dynamic,
5436 "unsupported rounding mode argument", Call);
5437 break;
5438 }
5439#define BEGIN_REGISTER_VP_INTRINSIC(VPID, ...) case Intrinsic::VPID:
5440#include "llvm/IR/VPIntrinsics.def"
5441#undef BEGIN_REGISTER_VP_INTRINSIC
5442 visitVPIntrinsic(cast<VPIntrinsic>(Call));
5443 break;
5444#define INSTRUCTION(NAME, NARGS, ROUND_MODE, INTRINSIC) \
5445 case Intrinsic::INTRINSIC:
5446#include "llvm/IR/ConstrainedOps.def"
5447#undef INSTRUCTION
5448 visitConstrainedFPIntrinsic(cast<ConstrainedFPIntrinsic>(Call));
5449 break;
5450 case Intrinsic::dbg_declare: // llvm.dbg.declare
5451 Check(isa<MetadataAsValue>(Call.getArgOperand(0)),
5452 "invalid llvm.dbg.declare intrinsic call 1", Call);
5453 visitDbgIntrinsic("declare", cast<DbgVariableIntrinsic>(Call));
5454 break;
5455 case Intrinsic::dbg_value: // llvm.dbg.value
5456 visitDbgIntrinsic("value", cast<DbgVariableIntrinsic>(Call));
5457 break;
5458 case Intrinsic::dbg_assign: // llvm.dbg.assign
5459 visitDbgIntrinsic("assign", cast<DbgVariableIntrinsic>(Call));
5460 break;
5461 case Intrinsic::dbg_label: // llvm.dbg.label
5462 visitDbgLabelIntrinsic("label", cast<DbgLabelInst>(Call));
5463 break;
5464 case Intrinsic::memcpy:
5465 case Intrinsic::memcpy_inline:
5466 case Intrinsic::memmove:
5467 case Intrinsic::memset:
5468 case Intrinsic::memset_inline: {
5469 break;
5470 }
5471 case Intrinsic::memcpy_element_unordered_atomic:
5472 case Intrinsic::memmove_element_unordered_atomic:
5473 case Intrinsic::memset_element_unordered_atomic: {
5474 const auto *AMI = cast<AtomicMemIntrinsic>(&Call);
5475
5476 ConstantInt *ElementSizeCI =
5477 cast<ConstantInt>(AMI->getRawElementSizeInBytes());
5478 const APInt &ElementSizeVal = ElementSizeCI->getValue();
5479 Check(ElementSizeVal.isPowerOf2(),
5480 "element size of the element-wise atomic memory intrinsic "
5481 "must be a power of 2",
5482 Call);
5483
5484 auto IsValidAlignment = [&](MaybeAlign Alignment) {
5485 return Alignment && ElementSizeVal.ule(Alignment->value());
5486 };
5487 Check(IsValidAlignment(AMI->getDestAlign()),
5488 "incorrect alignment of the destination argument", Call);
5489 if (const auto *AMT = dyn_cast<AtomicMemTransferInst>(AMI)) {
5490 Check(IsValidAlignment(AMT->getSourceAlign()),
5491 "incorrect alignment of the source argument", Call);
5492 }
5493 break;
5494 }
5495 case Intrinsic::call_preallocated_setup: {
5496 auto *NumArgs = dyn_cast<ConstantInt>(Call.getArgOperand(0));
5497 Check(NumArgs != nullptr,
5498 "llvm.call.preallocated.setup argument must be a constant");
5499 bool FoundCall = false;
5500 for (User *U : Call.users()) {
5501 auto *UseCall = dyn_cast<CallBase>(U);
5502 Check(UseCall != nullptr,
5503 "Uses of llvm.call.preallocated.setup must be calls");
5504 const Function *Fn = UseCall->getCalledFunction();
5505 if (Fn && Fn->getIntrinsicID() == Intrinsic::call_preallocated_arg) {
5506 auto *AllocArgIndex = dyn_cast<ConstantInt>(UseCall->getArgOperand(1));
5507 Check(AllocArgIndex != nullptr,
5508 "llvm.call.preallocated.alloc arg index must be a constant");
5509 auto AllocArgIndexInt = AllocArgIndex->getValue();
5510 Check(AllocArgIndexInt.sge(0) &&
5511 AllocArgIndexInt.slt(NumArgs->getValue()),
5512 "llvm.call.preallocated.alloc arg index must be between 0 and "
5513 "corresponding "
5514 "llvm.call.preallocated.setup's argument count");
5515 } else if (Fn && Fn->getIntrinsicID() ==
5516 Intrinsic::call_preallocated_teardown) {
5517 // nothing to do
5518 } else {
5519 Check(!FoundCall, "Can have at most one call corresponding to a "
5520 "llvm.call.preallocated.setup");
5521 FoundCall = true;
5522 size_t NumPreallocatedArgs = 0;
5523 for (unsigned i = 0; i < UseCall->arg_size(); i++) {
5524 if (UseCall->paramHasAttr(i, Attribute::Preallocated)) {
5525 ++NumPreallocatedArgs;
5526 }
5527 }
5528 Check(NumPreallocatedArgs != 0,
5529 "cannot use preallocated intrinsics on a call without "
5530 "preallocated arguments");
5531 Check(NumArgs->equalsInt(NumPreallocatedArgs),
5532 "llvm.call.preallocated.setup arg size must be equal to number "
5533 "of preallocated arguments "
5534 "at call site",
5535 Call, *UseCall);
5536 // getOperandBundle() cannot be called if more than one of the operand
5537 // bundle exists. There is already a check elsewhere for this, so skip
5538 // here if we see more than one.
5539 if (UseCall->countOperandBundlesOfType(LLVMContext::OB_preallocated) >
5540 1) {
5541 return;
5542 }
5543 auto PreallocatedBundle =
5544 UseCall->getOperandBundle(LLVMContext::OB_preallocated);
5545 Check(PreallocatedBundle,
5546 "Use of llvm.call.preallocated.setup outside intrinsics "
5547 "must be in \"preallocated\" operand bundle");
5548 Check(PreallocatedBundle->Inputs.front().get() == &Call,
5549 "preallocated bundle must have token from corresponding "
5550 "llvm.call.preallocated.setup");
5551 }
5552 }
5553 break;
5554 }
5555 case Intrinsic::call_preallocated_arg: {
5556 auto *Token = dyn_cast<CallBase>(Call.getArgOperand(0));
5557 Check(Token && Token->getCalledFunction()->getIntrinsicID() ==
5558 Intrinsic::call_preallocated_setup,
5559 "llvm.call.preallocated.arg token argument must be a "
5560 "llvm.call.preallocated.setup");
5561 Check(Call.hasFnAttr(Attribute::Preallocated),
5562 "llvm.call.preallocated.arg must be called with a \"preallocated\" "
5563 "call site attribute");
5564 break;
5565 }
5566 case Intrinsic::call_preallocated_teardown: {
5567 auto *Token = dyn_cast<CallBase>(Call.getArgOperand(0));
5568 Check(Token && Token->getCalledFunction()->getIntrinsicID() ==
5569 Intrinsic::call_preallocated_setup,
5570 "llvm.call.preallocated.teardown token argument must be a "
5571 "llvm.call.preallocated.setup");
5572 break;
5573 }
5574 case Intrinsic::gcroot:
5575 case Intrinsic::gcwrite:
5576 case Intrinsic::gcread:
5577 if (ID == Intrinsic::gcroot) {
5578 AllocaInst *AI =
5579 dyn_cast<AllocaInst>(Call.getArgOperand(0)->stripPointerCasts());
5580 Check(AI, "llvm.gcroot parameter #1 must be an alloca.", Call);
5581 Check(isa<Constant>(Call.getArgOperand(1)),
5582 "llvm.gcroot parameter #2 must be a constant.", Call);
5583 if (!AI->getAllocatedType()->isPointerTy()) {
5584 Check(!isa<ConstantPointerNull>(Call.getArgOperand(1)),
5585 "llvm.gcroot parameter #1 must either be a pointer alloca, "
5586 "or argument #2 must be a non-null constant.",
5587 Call);
5588 }
5589 }
5590
5591 Check(Call.getParent()->getParent()->hasGC(),
5592 "Enclosing function does not use GC.", Call);
5593 break;
5594 case Intrinsic::init_trampoline:
5595 Check(isa<Function>(Call.getArgOperand(1)->stripPointerCasts()),
5596 "llvm.init_trampoline parameter #2 must resolve to a function.",
5597 Call);
5598 break;
5599 case Intrinsic::prefetch:
5600 Check(cast<ConstantInt>(Call.getArgOperand(1))->getZExtValue() < 2,
5601 "rw argument to llvm.prefetch must be 0-1", Call);
5602 Check(cast<ConstantInt>(Call.getArgOperand(2))->getZExtValue() < 4,
5603 "locality argument to llvm.prefetch must be 0-3", Call);
5604 Check(cast<ConstantInt>(Call.getArgOperand(3))->getZExtValue() < 2,
5605 "cache type argument to llvm.prefetch must be 0-1", Call);
5606 break;
5607 case Intrinsic::stackprotector:
5608 Check(isa<AllocaInst>(Call.getArgOperand(1)->stripPointerCasts()),
5609 "llvm.stackprotector parameter #2 must resolve to an alloca.", Call);
5610 break;
5611 case Intrinsic::localescape: {
5612 BasicBlock *BB = Call.getParent();
5613 Check(BB->isEntryBlock(), "llvm.localescape used outside of entry block",
5614 Call);
5615 Check(!SawFrameEscape, "multiple calls to llvm.localescape in one function",
5616 Call);
5617 for (Value *Arg : Call.args()) {
5618 if (isa<ConstantPointerNull>(Arg))
5619 continue; // Null values are allowed as placeholders.
5620 auto *AI = dyn_cast<AllocaInst>(Arg->stripPointerCasts());
5621 Check(AI && AI->isStaticAlloca(),
5622 "llvm.localescape only accepts static allocas", Call);
5623 }
5624 FrameEscapeInfo[BB->getParent()].first = Call.arg_size();
5625 SawFrameEscape = true;
5626 break;
5627 }
5628 case Intrinsic::localrecover: {
5629 Value *FnArg = Call.getArgOperand(0)->stripPointerCasts();
5630 Function *Fn = dyn_cast<Function>(FnArg);
5631 Check(Fn && !Fn->isDeclaration(),
5632 "llvm.localrecover first "
5633 "argument must be function defined in this module",
5634 Call);
5635 auto *IdxArg = cast<ConstantInt>(Call.getArgOperand(2));
5636 auto &Entry = FrameEscapeInfo[Fn];
5637 Entry.second = unsigned(
5638 std::max(uint64_t(Entry.second), IdxArg->getLimitedValue(~0U) + 1));
5639 break;
5640 }
5641
5642 case Intrinsic::experimental_gc_statepoint:
5643 if (auto *CI = dyn_cast<CallInst>(&Call))
5644 Check(!CI->isInlineAsm(),
5645 "gc.statepoint support for inline assembly unimplemented", CI);
5646 Check(Call.getParent()->getParent()->hasGC(),
5647 "Enclosing function does not use GC.", Call);
5648
5649 verifyStatepoint(Call);
5650 break;
5651 case Intrinsic::experimental_gc_result: {
5652 Check(Call.getParent()->getParent()->hasGC(),
5653 "Enclosing function does not use GC.", Call);
5654
5655 auto *Statepoint = Call.getArgOperand(0);
5656 if (isa<UndefValue>(Statepoint))
5657 break;
5658
5659 // Are we tied to a statepoint properly?
5660 const auto *StatepointCall = dyn_cast<CallBase>(Statepoint);
5661 const Function *StatepointFn =
5662 StatepointCall ? StatepointCall->getCalledFunction() : nullptr;
5663 Check(StatepointFn && StatepointFn->isDeclaration() &&
5664 StatepointFn->getIntrinsicID() ==
5665 Intrinsic::experimental_gc_statepoint,
5666 "gc.result operand #1 must be from a statepoint", Call,
5667 Call.getArgOperand(0));
5668
5669 // Check that result type matches wrapped callee.
5670 auto *TargetFuncType =
5671 cast<FunctionType>(StatepointCall->getParamElementType(2));
5672 Check(Call.getType() == TargetFuncType->getReturnType(),
5673 "gc.result result type does not match wrapped callee", Call);
5674 break;
5675 }
5676 case Intrinsic::experimental_gc_relocate: {
5677 Check(Call.arg_size() == 3, "wrong number of arguments", Call);
5678
5679 Check(isa<PointerType>(Call.getType()->getScalarType()),
5680 "gc.relocate must return a pointer or a vector of pointers", Call);
5681
5682 // Check that this relocate is correctly tied to the statepoint
5683
5684 // This is case for relocate on the unwinding path of an invoke statepoint
5685 if (LandingPadInst *LandingPad =
5686 dyn_cast<LandingPadInst>(Call.getArgOperand(0))) {
5687
5688 const BasicBlock *InvokeBB =
5689 LandingPad->getParent()->getUniquePredecessor();
5690
5691 // Landingpad relocates should have only one predecessor with invoke
5692 // statepoint terminator
5693 Check(InvokeBB, "safepoints should have unique landingpads",
5694 LandingPad->getParent());
5695 Check(InvokeBB->getTerminator(), "safepoint block should be well formed",
5696 InvokeBB);
5697 Check(isa<GCStatepointInst>(InvokeBB->getTerminator()),
5698 "gc relocate should be linked to a statepoint", InvokeBB);
5699 } else {
5700 // In all other cases relocate should be tied to the statepoint directly.
5701 // This covers relocates on a normal return path of invoke statepoint and
5702 // relocates of a call statepoint.
5703 auto *Token = Call.getArgOperand(0);
5704 Check(isa<GCStatepointInst>(Token) || isa<UndefValue>(Token),
5705 "gc relocate is incorrectly tied to the statepoint", Call, Token);
5706 }
5707
5708 // Verify rest of the relocate arguments.
5709 const Value &StatepointCall = *cast<GCRelocateInst>(Call).getStatepoint();
5710
5711 // Both the base and derived must be piped through the safepoint.
5712 Value *Base = Call.getArgOperand(1);
5713 Check(isa<ConstantInt>(Base),
5714 "gc.relocate operand #2 must be integer offset", Call);
5715
5716 Value *Derived = Call.getArgOperand(2);
5717 Check(isa<ConstantInt>(Derived),
5718 "gc.relocate operand #3 must be integer offset", Call);
5719
5720 const uint64_t BaseIndex = cast<ConstantInt>(Base)->getZExtValue();
5721 const uint64_t DerivedIndex = cast<ConstantInt>(Derived)->getZExtValue();
5722
5723 // Check the bounds
5724 if (isa<UndefValue>(StatepointCall))
5725 break;
5726 if (auto Opt = cast<GCStatepointInst>(StatepointCall)
5727 .getOperandBundle(LLVMContext::OB_gc_live)) {
5728 Check(BaseIndex < Opt->Inputs.size(),
5729 "gc.relocate: statepoint base index out of bounds", Call);
5730 Check(DerivedIndex < Opt->Inputs.size(),
5731 "gc.relocate: statepoint derived index out of bounds", Call);
5732 }
5733
5734 // Relocated value must be either a pointer type or vector-of-pointer type,
5735 // but gc_relocate does not need to return the same pointer type as the
5736 // relocated pointer. It can be casted to the correct type later if it's
5737 // desired. However, they must have the same address space and 'vectorness'
5738 GCRelocateInst &Relocate = cast<GCRelocateInst>(Call);
5739 auto *ResultType = Call.getType();
5740 auto *DerivedType = Relocate.getDerivedPtr()->getType();
5741 auto *BaseType = Relocate.getBasePtr()->getType();
5742
5743 Check(BaseType->isPtrOrPtrVectorTy(),
5744 "gc.relocate: relocated value must be a pointer", Call);
5745 Check(DerivedType->isPtrOrPtrVectorTy(),
5746 "gc.relocate: relocated value must be a pointer", Call);
5747
5748 Check(ResultType->isVectorTy() == DerivedType->isVectorTy(),
5749 "gc.relocate: vector relocates to vector and pointer to pointer",
5750 Call);
5751 Check(
5752 ResultType->getPointerAddressSpace() ==
5753 DerivedType->getPointerAddressSpace(),
5754 "gc.relocate: relocating a pointer shouldn't change its address space",
5755 Call);
5756
5757 auto GC = llvm::getGCStrategy(Relocate.getFunction()->getGC());
5758 Check(GC, "gc.relocate: calling function must have GCStrategy",
5759 Call.getFunction());
5760 if (GC) {
5761 auto isGCPtr = [&GC](Type *PTy) {
5762 return GC->isGCManagedPointer(PTy->getScalarType()).value_or(true);
5763 };
5764 Check(isGCPtr(ResultType), "gc.relocate: must return gc pointer", Call);
5765 Check(isGCPtr(BaseType),
5766 "gc.relocate: relocated value must be a gc pointer", Call);
5767 Check(isGCPtr(DerivedType),
5768 "gc.relocate: relocated value must be a gc pointer", Call);
5769 }
5770 break;
5771 }
5772 case Intrinsic::experimental_patchpoint: {
5773 if (Call.getCallingConv() == CallingConv::AnyReg) {
5774 Check(Call.getType()->isSingleValueType(),
5775 "patchpoint: invalid return type used with anyregcc", Call);
5776 }
5777 break;
5778 }
5779 case Intrinsic::eh_exceptioncode:
5780 case Intrinsic::eh_exceptionpointer: {
5781 Check(isa<CatchPadInst>(Call.getArgOperand(0)),
5782 "eh.exceptionpointer argument must be a catchpad", Call);
5783 break;
5784 }
5785 case Intrinsic::get_active_lane_mask: {
5786 Check(Call.getType()->isVectorTy(),
5787 "get_active_lane_mask: must return a "
5788 "vector",
5789 Call);
5790 auto *ElemTy = Call.getType()->getScalarType();
5791 Check(ElemTy->isIntegerTy(1),
5792 "get_active_lane_mask: element type is not "
5793 "i1",
5794 Call);
5795 break;
5796 }
5797 case Intrinsic::experimental_get_vector_length: {
5798 ConstantInt *VF = cast<ConstantInt>(Call.getArgOperand(1));
5799 Check(!VF->isNegative() && !VF->isZero(),
5800 "get_vector_length: VF must be positive", Call);
5801 break;
5802 }
5803 case Intrinsic::masked_load: {
5804 Check(Call.getType()->isVectorTy(), "masked_load: must return a vector",
5805 Call);
5806
5807 ConstantInt *Alignment = cast<ConstantInt>(Call.getArgOperand(1));
5808 Value *Mask = Call.getArgOperand(2);
5809 Value *PassThru = Call.getArgOperand(3);
5810 Check(Mask->getType()->isVectorTy(), "masked_load: mask must be vector",
5811 Call);
5812 Check(Alignment->getValue().isPowerOf2(),
5813 "masked_load: alignment must be a power of 2", Call);
5814 Check(PassThru->getType() == Call.getType(),
5815 "masked_load: pass through and return type must match", Call);
5816 Check(cast<VectorType>(Mask->getType())->getElementCount() ==
5817 cast<VectorType>(Call.getType())->getElementCount(),
5818 "masked_load: vector mask must be same length as return", Call);
5819 break;
5820 }
5821 case Intrinsic::masked_store: {
5822 Value *Val = Call.getArgOperand(0);
5823 ConstantInt *Alignment = cast<ConstantInt>(Call.getArgOperand(2));
5824 Value *Mask = Call.getArgOperand(3);
5825 Check(Mask->getType()->isVectorTy(), "masked_store: mask must be vector",
5826 Call);
5827 Check(Alignment->getValue().isPowerOf2(),
5828 "masked_store: alignment must be a power of 2", Call);
5829 Check(cast<VectorType>(Mask->getType())->getElementCount() ==
5830 cast<VectorType>(Val->getType())->getElementCount(),
5831 "masked_store: vector mask must be same length as value", Call);
5832 break;
5833 }
5834
5835 case Intrinsic::masked_gather: {
5836 const APInt &Alignment =
5837 cast<ConstantInt>(Call.getArgOperand(1))->getValue();
5838 Check(Alignment.isZero() || Alignment.isPowerOf2(),
5839 "masked_gather: alignment must be 0 or a power of 2", Call);
5840 break;
5841 }
5842 case Intrinsic::masked_scatter: {
5843 const APInt &Alignment =
5844 cast<ConstantInt>(Call.getArgOperand(2))->getValue();
5845 Check(Alignment.isZero() || Alignment.isPowerOf2(),
5846 "masked_scatter: alignment must be 0 or a power of 2", Call);
5847 break;
5848 }
5849
5850 case Intrinsic::experimental_guard: {
5851 Check(isa<CallInst>(Call), "experimental_guard cannot be invoked", Call);
5852 Check(Call.countOperandBundlesOfType(LLVMContext::OB_deopt) == 1,
5853 "experimental_guard must have exactly one "
5854 "\"deopt\" operand bundle");
5855 break;
5856 }
5857
5858 case Intrinsic::experimental_deoptimize: {
5859 Check(isa<CallInst>(Call), "experimental_deoptimize cannot be invoked",
5860 Call);
5861 Check(Call.countOperandBundlesOfType(LLVMContext::OB_deopt) == 1,
5862 "experimental_deoptimize must have exactly one "
5863 "\"deopt\" operand bundle");
5864 Check(Call.getType() == Call.getFunction()->getReturnType(),
5865 "experimental_deoptimize return type must match caller return type");
5866
5867 if (isa<CallInst>(Call)) {
5868 auto *RI = dyn_cast<ReturnInst>(Call.getNextNode());
5869 Check(RI,
5870 "calls to experimental_deoptimize must be followed by a return");
5871
5872 if (!Call.getType()->isVoidTy() && RI)
5873 Check(RI->getReturnValue() == &Call,
5874 "calls to experimental_deoptimize must be followed by a return "
5875 "of the value computed by experimental_deoptimize");
5876 }
5877
5878 break;
5879 }
5880 case Intrinsic::vastart: {
5881 Check(Call.getFunction()->isVarArg(),
5882 "va_start called in a non-varargs function");
5883 break;
5884 }
5885 case Intrinsic::vector_reduce_and:
5886 case Intrinsic::vector_reduce_or:
5887 case Intrinsic::vector_reduce_xor:
5888 case Intrinsic::vector_reduce_add:
5889 case Intrinsic::vector_reduce_mul:
5890 case Intrinsic::vector_reduce_smax:
5891 case Intrinsic::vector_reduce_smin:
5892 case Intrinsic::vector_reduce_umax:
5893 case Intrinsic::vector_reduce_umin: {
5894 Type *ArgTy = Call.getArgOperand(0)->getType();
5895 Check(ArgTy->isIntOrIntVectorTy() && ArgTy->isVectorTy(),
5896 "Intrinsic has incorrect argument type!");
5897 break;
5898 }
5899 case Intrinsic::vector_reduce_fmax:
5900 case Intrinsic::vector_reduce_fmin: {
5901 Type *ArgTy = Call.getArgOperand(0)->getType();
5902 Check(ArgTy->isFPOrFPVectorTy() && ArgTy->isVectorTy(),
5903 "Intrinsic has incorrect argument type!");
5904 break;
5905 }
5906 case Intrinsic::vector_reduce_fadd:
5907 case Intrinsic::vector_reduce_fmul: {
5908 // Unlike the other reductions, the first argument is a start value. The
5909 // second argument is the vector to be reduced.
5910 Type *ArgTy = Call.getArgOperand(1)->getType();
5911 Check(ArgTy->isFPOrFPVectorTy() && ArgTy->isVectorTy(),
5912 "Intrinsic has incorrect argument type!");
5913 break;
5914 }
5915 case Intrinsic::smul_fix:
5916 case Intrinsic::smul_fix_sat:
5917 case Intrinsic::umul_fix:
5918 case Intrinsic::umul_fix_sat:
5919 case Intrinsic::sdiv_fix:
5920 case Intrinsic::sdiv_fix_sat:
5921 case Intrinsic::udiv_fix:
5922 case Intrinsic::udiv_fix_sat: {
5923 Value *Op1 = Call.getArgOperand(0);
5924 Value *Op2 = Call.getArgOperand(1);
5926 "first operand of [us][mul|div]_fix[_sat] must be an int type or "
5927 "vector of ints");
5929 "second operand of [us][mul|div]_fix[_sat] must be an int type or "
5930 "vector of ints");
5931
5932 auto *Op3 = cast<ConstantInt>(Call.getArgOperand(2));
5933 Check(Op3->getType()->isIntegerTy(),
5934 "third operand of [us][mul|div]_fix[_sat] must be an int type");
5935 Check(Op3->getBitWidth() <= 32,
5936 "third operand of [us][mul|div]_fix[_sat] must fit within 32 bits");
5937
5938 if (ID == Intrinsic::smul_fix || ID == Intrinsic::smul_fix_sat ||
5939 ID == Intrinsic::sdiv_fix || ID == Intrinsic::sdiv_fix_sat) {
5940 Check(Op3->getZExtValue() < Op1->getType()->getScalarSizeInBits(),
5941 "the scale of s[mul|div]_fix[_sat] must be less than the width of "
5942 "the operands");
5943 } else {
5944 Check(Op3->getZExtValue() <= Op1->getType()->getScalarSizeInBits(),
5945 "the scale of u[mul|div]_fix[_sat] must be less than or equal "
5946 "to the width of the operands");
5947 }
5948 break;
5949 }
5950 case Intrinsic::lrint:
5951 case Intrinsic::llrint: {
5952 Type *ValTy = Call.getArgOperand(0)->getType();
5953 Type *ResultTy = Call.getType();
5954 Check(
5955 ValTy->isFPOrFPVectorTy() && ResultTy->isIntOrIntVectorTy(),
5956 "llvm.lrint, llvm.llrint: argument must be floating-point or vector "
5957 "of floating-points, and result must be integer or vector of integers",
5958 &Call);
5959 Check(ValTy->isVectorTy() == ResultTy->isVectorTy(),
5960 "llvm.lrint, llvm.llrint: argument and result disagree on vector use",
5961 &Call);
5962 if (ValTy->isVectorTy()) {
5963 Check(cast<VectorType>(ValTy)->getElementCount() ==
5964 cast<VectorType>(ResultTy)->getElementCount(),
5965 "llvm.lrint, llvm.llrint: argument must be same length as result",
5966 &Call);
5967 }
5968 break;
5969 }
5970 case Intrinsic::lround:
5971 case Intrinsic::llround: {
5972 Type *ValTy = Call.getArgOperand(0)->getType();
5973 Type *ResultTy = Call.getType();
5974 Check(!ValTy->isVectorTy() && !ResultTy->isVectorTy(),
5975 "Intrinsic does not support vectors", &Call);
5976 break;
5977 }
5978 case Intrinsic::bswap: {
5979 Type *Ty = Call.getType();
5980 unsigned Size = Ty->getScalarSizeInBits();
5981 Check(Size % 16 == 0, "bswap must be an even number of bytes", &Call);
5982 break;
5983 }
5984 case Intrinsic::invariant_start: {
5985 ConstantInt *InvariantSize = dyn_cast<ConstantInt>(Call.getArgOperand(0));
5986 Check(InvariantSize &&
5987 (!InvariantSize->isNegative() || InvariantSize->isMinusOne()),
5988 "invariant_start parameter must be -1, 0 or a positive number",
5989 &Call);
5990 break;
5991 }
5992 case Intrinsic::matrix_multiply:
5993 case Intrinsic::matrix_transpose:
5994 case Intrinsic::matrix_column_major_load:
5995 case Intrinsic::matrix_column_major_store: {
5996 Function *IF = Call.getCalledFunction();
5997 ConstantInt *Stride = nullptr;
5998 ConstantInt *NumRows;
5999 ConstantInt *NumColumns;
6000 VectorType *ResultTy;
6001 Type *Op0ElemTy = nullptr;
6002 Type *Op1ElemTy = nullptr;
6003 switch (ID) {
6004 case Intrinsic::matrix_multiply: {
6005 NumRows = cast<ConstantInt>(Call.getArgOperand(2));
6006 ConstantInt *N = cast<ConstantInt>(Call.getArgOperand(3));
6007 NumColumns = cast<ConstantInt>(Call.getArgOperand(4));
6008 Check(cast<FixedVectorType>(Call.getArgOperand(0)->getType())
6009 ->getNumElements() ==
6010 NumRows->getZExtValue() * N->getZExtValue(),
6011 "First argument of a matrix operation does not match specified "
6012 "shape!");
6013 Check(cast<FixedVectorType>(Call.getArgOperand(1)->getType())
6014 ->getNumElements() ==
6015 N->getZExtValue() * NumColumns->getZExtValue(),
6016 "Second argument of a matrix operation does not match specified "
6017 "shape!");
6018
6019 ResultTy = cast<VectorType>(Call.getType());
6020 Op0ElemTy =
6021 cast<VectorType>(Call.getArgOperand(0)->getType())->getElementType();
6022 Op1ElemTy =
6023 cast<VectorType>(Call.getArgOperand(1)->getType())->getElementType();
6024 break;
6025 }
6026 case Intrinsic::matrix_transpose:
6027 NumRows = cast<ConstantInt>(Call.getArgOperand(1));
6028 NumColumns = cast<ConstantInt>(Call.getArgOperand(2));
6029 ResultTy = cast<VectorType>(Call.getType());
6030 Op0ElemTy =
6031 cast<VectorType>(Call.getArgOperand(0)->getType())->getElementType();
6032 break;
6033 case Intrinsic::matrix_column_major_load: {
6034 Stride = dyn_cast<ConstantInt>(Call.getArgOperand(1));
6035 NumRows = cast<ConstantInt>(Call.getArgOperand(3));
6036 NumColumns = cast<ConstantInt>(Call.getArgOperand(4));
6037 ResultTy = cast<VectorType>(Call.getType());
6038 break;
6039 }
6040 case Intrinsic::matrix_column_major_store: {
6041 Stride = dyn_cast<ConstantInt>(Call.getArgOperand(2));
6042 NumRows = cast<ConstantInt>(Call.getArgOperand(4));
6043 NumColumns = cast<ConstantInt>(Call.getArgOperand(5));
6044 ResultTy = cast<VectorType>(Call.getArgOperand(0)->getType());
6045 Op0ElemTy =
6046 cast<VectorType>(Call.getArgOperand(0)->getType())->getElementType();
6047 break;
6048 }
6049 default:
6050 llvm_unreachable("unexpected intrinsic");
6051 }
6052
6053 Check(ResultTy->getElementType()->isIntegerTy() ||
6054 ResultTy->getElementType()->isFloatingPointTy(),
6055 "Result type must be an integer or floating-point type!", IF);
6056
6057 if (Op0ElemTy)
6058 Check(ResultTy->getElementType() == Op0ElemTy,
6059 "Vector element type mismatch of the result and first operand "
6060 "vector!",
6061 IF);
6062
6063 if (Op1ElemTy)
6064 Check(ResultTy->getElementType() == Op1ElemTy,
6065 "Vector element type mismatch of the result and second operand "
6066 "vector!",
6067 IF);
6068
6069 Check(cast<FixedVectorType>(ResultTy)->getNumElements() ==
6070 NumRows->getZExtValue() * NumColumns->getZExtValue(),
6071 "Result of a matrix operation does not fit in the returned vector!");
6072
6073 if (Stride)
6074 Check(Stride->getZExtValue() >= NumRows->getZExtValue(),
6075 "Stride must be greater or equal than the number of rows!", IF);
6076
6077 break;
6078 }
6079 case Intrinsic::vector_splice: {
6080 VectorType *VecTy = cast<VectorType>(Call.getType());
6081 int64_t Idx = cast<ConstantInt>(Call.getArgOperand(2))->getSExtValue();
6082 int64_t KnownMinNumElements = VecTy->getElementCount().getKnownMinValue();
6083 if (Call.getParent() && Call.getParent()->getParent()) {
6084 AttributeList Attrs = Call.getParent()->getParent()->getAttributes();
6085 if (Attrs.hasFnAttr(Attribute::VScaleRange))
6086 KnownMinNumElements *= Attrs.getFnAttrs().getVScaleRangeMin();
6087 }
6088 Check((Idx < 0 && std::abs(Idx) <= KnownMinNumElements) ||
6089 (Idx >= 0 && Idx < KnownMinNumElements),
6090 "The splice index exceeds the range [-VL, VL-1] where VL is the "
6091 "known minimum number of elements in the vector. For scalable "
6092 "vectors the minimum number of elements is determined from "
6093 "vscale_range.",
6094 &Call);
6095 break;
6096 }
6097 case Intrinsic::experimental_stepvector: {
6098 VectorType *VecTy = dyn_cast<VectorType>(Call.getType());
6099 Check(VecTy && VecTy->getScalarType()->isIntegerTy() &&
6100 VecTy->getScalarSizeInBits() >= 8,
6101 "experimental_stepvector only supported for vectors of integers "
6102 "with a bitwidth of at least 8.",
6103 &Call);
6104 break;
6105 }
6106 case Intrinsic::vector_insert: {
6107 Value *Vec = Call.getArgOperand(0);
6108 Value *SubVec = Call.getArgOperand(1);
6109 Value *Idx = Call.getArgOperand(2);
6110 unsigned IdxN = cast<ConstantInt>(Idx)->getZExtValue();
6111
6112 VectorType *VecTy = cast<VectorType>(Vec->getType());
6113 VectorType *SubVecTy = cast<VectorType>(SubVec->getType());
6114
6115 ElementCount VecEC = VecTy->getElementCount();
6116 ElementCount SubVecEC = SubVecTy->getElementCount();
6117 Check(VecTy->getElementType() == SubVecTy->getElementType(),
6118 "vector_insert parameters must have the same element "
6119 "type.",
6120 &Call);
6121 Check(IdxN % SubVecEC.getKnownMinValue() == 0,
6122 "vector_insert index must be a constant multiple of "
6123 "the subvector's known minimum vector length.");
6124
6125 // If this insertion is not the 'mixed' case where a fixed vector is
6126 // inserted into a scalable vector, ensure that the insertion of the
6127 // subvector does not overrun the parent vector.
6128 if (VecEC.isScalable() == SubVecEC.isScalable()) {
6129 Check(IdxN < VecEC.getKnownMinValue() &&
6130 IdxN + SubVecEC.getKnownMinValue() <= VecEC.getKnownMinValue(),
6131 "subvector operand of vector_insert would overrun the "
6132 "vector being inserted into.");
6133 }
6134 break;
6135 }
6136 case Intrinsic::vector_extract: {
6137 Value *Vec = Call.getArgOperand(0);
6138 Value *Idx = Call.getArgOperand(1);
6139 unsigned IdxN = cast<ConstantInt>(Idx)->getZExtValue();
6140
6141 VectorType *ResultTy = cast<VectorType>(Call.getType());
6142 VectorType *VecTy = cast<VectorType>(Vec->getType());
6143
6144 ElementCount VecEC = VecTy->getElementCount();
6145 ElementCount ResultEC = ResultTy->getElementCount();
6146
6147 Check(ResultTy->getElementType() == VecTy->getElementType(),
6148 "vector_extract result must have the same element "
6149 "type as the input vector.",
6150 &Call);
6151 Check(IdxN % ResultEC.getKnownMinValue() == 0,
6152 "vector_extract index must be a constant multiple of "
6153 "the result type's known minimum vector length.");
6154
6155 // If this extraction is not the 'mixed' case where a fixed vector is
6156 // extracted from a scalable vector, ensure that the extraction does not
6157 // overrun the parent vector.
6158 if (VecEC.isScalable() == ResultEC.isScalable()) {
6159 Check(IdxN < VecEC.getKnownMinValue() &&
6160 IdxN + ResultEC.getKnownMinValue() <= VecEC.getKnownMinValue(),
6161 "vector_extract would overrun.");
6162 }
6163 break;
6164 }
6165 case Intrinsic::experimental_vector_partial_reduce_add: {
6166 VectorType *AccTy = cast<VectorType>(Call.getArgOperand(0)->getType());
6167 VectorType *VecTy = cast<VectorType>(Call.getArgOperand(1)->getType());
6168
6169 unsigned VecWidth = VecTy->getElementCount().getKnownMinValue();
6170 unsigned AccWidth = AccTy->getElementCount().getKnownMinValue();
6171
6172 Check((VecWidth % AccWidth) == 0,
6173 "Invalid vector widths for partial "
6174 "reduction. The width of the input vector "
6175 "must be a positive integer multiple of "
6176 "the width of the accumulator vector.");
6177 break;
6178 }
6179 case Intrinsic::experimental_noalias_scope_decl: {
6180 NoAliasScopeDecls.push_back(cast<IntrinsicInst>(&Call));
6181 break;
6182 }
6183 case Intrinsic::preserve_array_access_index:
6184 case Intrinsic::preserve_struct_access_index:
6185 case Intrinsic::aarch64_ldaxr:
6186 case Intrinsic::aarch64_ldxr:
6187 case Intrinsic::arm_ldaex:
6188 case Intrinsic::arm_ldrex: {
6189 Type *ElemTy = Call.getParamElementType(0);
6190 Check(ElemTy, "Intrinsic requires elementtype attribute on first argument.",
6191 &Call);
6192 break;
6193 }
6194 case Intrinsic::aarch64_stlxr:
6195 case Intrinsic::aarch64_stxr:
6196 case Intrinsic::arm_stlex:
6197 case Intrinsic::arm_strex: {
6198 Type *ElemTy = Call.getAttributes().getParamElementType(1);
6199 Check(ElemTy,
6200 "Intrinsic requires elementtype attribute on second argument.",
6201 &Call);
6202 break;
6203 }
6204 case Intrinsic::aarch64_prefetch: {
6205 Check(cast<ConstantInt>(Call.getArgOperand(1))->getZExtValue() < 2,
6206 "write argument to llvm.aarch64.prefetch must be 0 or 1", Call);
6207 Check(cast<ConstantInt>(Call.getArgOperand(2))->getZExtValue() < 4,
6208 "target argument to llvm.aarch64.prefetch must be 0-3", Call);
6209 Check(cast<ConstantInt>(Call.getArgOperand(3))->getZExtValue() < 2,
6210 "stream argument to llvm.aarch64.prefetch must be 0 or 1", Call);
6211 Check(cast<ConstantInt>(Call.getArgOperand(4))->getZExtValue() < 2,
6212 "isdata argument to llvm.aarch64.prefetch must be 0 or 1", Call);
6213 break;
6214 }
6215 case Intrinsic::callbr_landingpad: {
6216 const auto *CBR = dyn_cast<CallBrInst>(Call.getOperand(0));
6217 Check(CBR, "intrinstic requires callbr operand", &Call);
6218 if (!CBR)
6219 break;
6220
6221 const BasicBlock *LandingPadBB = Call.getParent();
6222 const BasicBlock *PredBB = LandingPadBB->getUniquePredecessor();
6223 if (!PredBB) {
6224 CheckFailed("Intrinsic in block must have 1 unique predecessor", &Call);
6225 break;
6226 }
6227 if (!isa<CallBrInst>(PredBB->getTerminator())) {
6228 CheckFailed("Intrinsic must have corresponding callbr in predecessor",
6229 &Call);
6230 break;
6231 }
6232 Check(llvm::any_of(CBR->getIndirectDests(),
6233 [LandingPadBB](const BasicBlock *IndDest) {
6234 return IndDest == LandingPadBB;
6235 }),
6236 "Intrinsic's corresponding callbr must have intrinsic's parent basic "
6237 "block in indirect destination list",
6238 &Call);
6239 const Instruction &First = *LandingPadBB->begin();
6240 Check(&First == &Call, "No other instructions may proceed intrinsic",
6241 &Call);
6242 break;
6243 }
6244 case Intrinsic::amdgcn_cs_chain: {
6245 auto CallerCC = Call.getCaller()->getCallingConv();
6246 switch (CallerCC) {
6250 break;
6251 default:
6252 CheckFailed("Intrinsic can only be used from functions with the "
6253 "amdgpu_cs, amdgpu_cs_chain or amdgpu_cs_chain_preserve "
6254 "calling conventions",
6255 &Call);
6256 break;
6257 }
6258
6259 Check(Call.paramHasAttr(2, Attribute::InReg),
6260 "SGPR arguments must have the `inreg` attribute", &Call);
6261 Check(!Call.paramHasAttr(3, Attribute::InReg),
6262 "VGPR arguments must not have the `inreg` attribute", &Call);
6263 break;
6264 }
6265 case Intrinsic::amdgcn_set_inactive_chain_arg: {
6266 auto CallerCC = Call.getCaller()->getCallingConv();
6267 switch (CallerCC) {
6270 break;
6271 default:
6272 CheckFailed("Intrinsic can only be used from functions with the "
6273 "amdgpu_cs_chain or amdgpu_cs_chain_preserve "
6274 "calling conventions",
6275 &Call);
6276 break;
6277 }
6278
6279 unsigned InactiveIdx = 1;
6280 Check(!Call.paramHasAttr(InactiveIdx, Attribute::InReg),
6281 "Value for inactive lanes must not have the `inreg` attribute",
6282 &Call);
6283 Check(isa<Argument>(Call.getArgOperand(InactiveIdx)),
6284 "Value for inactive lanes must be a function argument", &Call);
6285 Check(!cast<Argument>(Call.getArgOperand(InactiveIdx))->hasInRegAttr(),
6286 "Value for inactive lanes must be a VGPR function argument", &Call);
6287 break;
6288 }
6289 case Intrinsic::nvvm_setmaxnreg_inc_sync_aligned_u32:
6290 case Intrinsic::nvvm_setmaxnreg_dec_sync_aligned_u32: {
6291 Value *V = Call.getArgOperand(0);
6292 unsigned RegCount = cast<ConstantInt>(V)->getZExtValue();
6293 Check(RegCount % 8 == 0,
6294 "reg_count argument to nvvm.setmaxnreg must be in multiples of 8");
6295 Check((RegCount >= 24 && RegCount <= 256),
6296 "reg_count argument to nvvm.setmaxnreg must be within [24, 256]");
6297 break;
6298 }
6299 case Intrinsic::experimental_convergence_entry:
6300 case Intrinsic::experimental_convergence_anchor:
6301 break;
6302 case Intrinsic::experimental_convergence_loop:
6303 break;
6304 case Intrinsic::ptrmask: {
6305 Type *Ty0 = Call.getArgOperand(0)->getType();
6306 Type *Ty1 = Call.getArgOperand(1)->getType();
6308 "llvm.ptrmask intrinsic first argument must be pointer or vector "
6309 "of pointers",
6310 &Call);
6311 Check(
6312 Ty0->isVectorTy() == Ty1->isVectorTy(),
6313 "llvm.ptrmask intrinsic arguments must be both scalars or both vectors",
6314 &Call);
6315 if (Ty0->isVectorTy())
6316 Check(cast<VectorType>(Ty0)->getElementCount() ==
6317 cast<VectorType>(Ty1)->getElementCount(),
6318 "llvm.ptrmask intrinsic arguments must have the same number of "
6319 "elements",
6320 &Call);
6321 Check(DL.getIndexTypeSizeInBits(Ty0) == Ty1->getScalarSizeInBits(),
6322 "llvm.ptrmask intrinsic second argument bitwidth must match "
6323 "pointer index type size of first argument",
6324 &Call);
6325 break;
6326 }
6327 case Intrinsic::threadlocal_address: {
6328 const Value &Arg0 = *Call.getArgOperand(0);
6329 Check(isa<GlobalValue>(Arg0),
6330 "llvm.threadlocal.address first argument must be a GlobalValue");
6331 Check(cast<GlobalValue>(Arg0).isThreadLocal(),
6332 "llvm.threadlocal.address operand isThreadLocal() must be true");
6333 break;
6334 }
6335 };
6336
6337 // Verify that there aren't any unmediated control transfers between funclets.
6339 Function *F = Call.getParent()->getParent();
6340 if (F->hasPersonalityFn() &&
6341 isScopedEHPersonality(classifyEHPersonality(F->getPersonalityFn()))) {
6342 // Run EH funclet coloring on-demand and cache results for other intrinsic
6343 // calls in this function
6344 if (BlockEHFuncletColors.empty())
6345 BlockEHFuncletColors = colorEHFunclets(*F);
6346
6347 // Check for catch-/cleanup-pad in first funclet block
6348 bool InEHFunclet = false;
6349 BasicBlock *CallBB = Call.getParent();
6350 const ColorVector &CV = BlockEHFuncletColors.find(CallBB)->second;
6351 assert(CV.size() > 0 && "Uncolored block");
6352 for (BasicBlock *ColorFirstBB : CV)
6353 if (dyn_cast_or_null<FuncletPadInst>(ColorFirstBB->getFirstNonPHI()))
6354 InEHFunclet = true;
6355
6356 // Check for funclet operand bundle
6357 bool HasToken = false;
6358 for (unsigned I = 0, E = Call.getNumOperandBundles(); I != E; ++I)
6359 if (Call.getOperandBundleAt(I).getTagID() == LLVMContext::OB_funclet)
6360 HasToken = true;
6361
6362 // This would cause silent code truncation in WinEHPrepare
6363 if (InEHFunclet)
6364 Check(HasToken, "Missing funclet token on intrinsic call", &Call);
6365 }
6366 }
6367}
6368
6369/// Carefully grab the subprogram from a local scope.
6370///
6371/// This carefully grabs the subprogram from a local scope, avoiding the
6372/// built-in assertions that would typically fire.
6374 if (!LocalScope)
6375 return nullptr;
6376
6377 if (auto *SP = dyn_cast<DISubprogram>(LocalScope))
6378 return SP;
6379
6380 if (auto *LB = dyn_cast<DILexicalBlockBase>(LocalScope))
6381 return getSubprogram(LB->getRawScope());
6382
6383 // Just return null; broken scope chains are checked elsewhere.
6384 assert(!isa<DILocalScope>(LocalScope) && "Unknown type of local scope");
6385 return nullptr;
6386}
6387
6388void Verifier::visit(DbgLabelRecord &DLR) {
6389 CheckDI(isa<DILabel>(DLR.getRawLabel()),
6390 "invalid #dbg_label intrinsic variable", &DLR, DLR.getRawLabel());
6391
6392 // Ignore broken !dbg attachments; they're checked elsewhere.
6393 if (MDNode *N = DLR.getDebugLoc().getAsMDNode())
6394 if (!isa<DILocation>(N))
6395 return;
6396
6397 BasicBlock *BB = DLR.getParent();
6398 Function *F = BB ? BB->getParent() : nullptr;
6399
6400 // The scopes for variables and !dbg attachments must agree.
6401 DILabel *Label = DLR.getLabel();
6402 DILocation *Loc = DLR.getDebugLoc();
6403 CheckDI(Loc, "#dbg_label record requires a !dbg attachment", &DLR, BB, F);
6404
6405 DISubprogram *LabelSP = getSubprogram(Label->getRawScope());
6406 DISubprogram *LocSP = getSubprogram(Loc->getRawScope());
6407 if (!LabelSP || !LocSP)
6408 return;
6409
6410 CheckDI(LabelSP == LocSP,
6411 "mismatched subprogram between #dbg_label label and !dbg attachment",
6412 &DLR, BB, F, Label, Label->getScope()->getSubprogram(), Loc,
6413 Loc->getScope()->getSubprogram());
6414}
6415
6416void Verifier::visit(DbgVariableRecord &DVR) {
6417 BasicBlock *BB = DVR.getParent();
6418 Function *F = BB->getParent();
6419
6423 "invalid #dbg record type", &DVR, DVR.getType());
6424
6425 // The location for a DbgVariableRecord must be either a ValueAsMetadata,
6426 // DIArgList, or an empty MDNode (which is a legacy representation for an
6427 // "undef" location).
6428 auto *MD = DVR.getRawLocation();
6429 CheckDI(MD && (isa<ValueAsMetadata>(MD) || isa<DIArgList>(MD) ||
6430 (isa<MDNode>(MD) && !cast<MDNode>(MD)->getNumOperands())),
6431 "invalid #dbg record address/value", &DVR, MD);
6432 if (auto *VAM = dyn_cast<ValueAsMetadata>(MD))
6433 visitValueAsMetadata(*VAM, F);
6434 else if (auto *AL = dyn_cast<DIArgList>(MD))
6435 visitDIArgList(*AL, F);
6436
6437 CheckDI(isa_and_nonnull<DILocalVariable>(DVR.getRawVariable()),
6438 "invalid #dbg record variable", &DVR, DVR.getRawVariable());
6439 visitMDNode(*DVR.getRawVariable(), AreDebugLocsAllowed::No);
6440
6441 CheckDI(isa_and_nonnull<DIExpression>(DVR.getRawExpression()),
6442 "invalid #dbg record expression", &DVR, DVR.getRawExpression());
6443 visitMDNode(*DVR.getExpression(), AreDebugLocsAllowed::No);
6444
6445 if (DVR.isDbgAssign()) {
6446 CheckDI(isa_and_nonnull<DIAssignID>(DVR.getRawAssignID()),
6447 "invalid #dbg_assign DIAssignID", &DVR, DVR.getRawAssignID());
6448 visitMDNode(*cast<DIAssignID>(DVR.getRawAssignID()),
6449 AreDebugLocsAllowed::No);
6450
6451 const auto *RawAddr = DVR.getRawAddress();
6452 // Similarly to the location above, the address for an assign
6453 // DbgVariableRecord must be a ValueAsMetadata or an empty MDNode, which
6454 // represents an undef address.
6455 CheckDI(
6456 isa<ValueAsMetadata>(RawAddr) ||
6457 (isa<MDNode>(RawAddr) && !cast<MDNode>(RawAddr)->getNumOperands()),
6458 "invalid #dbg_assign address", &DVR, DVR.getRawAddress());
6459 if (auto *VAM = dyn_cast<ValueAsMetadata>(RawAddr))
6460 visitValueAsMetadata(*VAM, F);
6461
6462 CheckDI(isa_and_nonnull<DIExpression>(DVR.getRawAddressExpression()),
6463 "invalid #dbg_assign address expression", &DVR,
6465 visitMDNode(*DVR.getAddressExpression(), AreDebugLocsAllowed::No);
6466
6467 // All of the linked instructions should be in the same function as DVR.
6468 for (Instruction *I : at::getAssignmentInsts(&DVR))
6469 CheckDI(DVR.getFunction() == I->getFunction(),
6470 "inst not in same function as #dbg_assign", I, &DVR);
6471 }
6472
6473 // This check is redundant with one in visitLocalVariable().
6474 DILocalVariable *Var = DVR.getVariable();
6475 CheckDI(isType(Var->getRawType()), "invalid type ref", Var,
6476 Var->getRawType());
6477
6478 auto *DLNode = DVR.getDebugLoc().getAsMDNode();
6479 CheckDI(isa_and_nonnull<DILocation>(DLNode), "invalid #dbg record DILocation",
6480 &DVR, DLNode);
6481 DILocation *Loc = DVR.getDebugLoc();
6482
6483 // The scopes for variables and !dbg attachments must agree.
6484 DISubprogram *VarSP = getSubprogram(Var->getRawScope());
6485 DISubprogram *LocSP = getSubprogram(Loc->getRawScope());
6486 if (!VarSP || !LocSP)
6487 return; // Broken scope chains are checked elsewhere.
6488
6489 CheckDI(VarSP == LocSP,
6490 "mismatched subprogram between #dbg record variable and DILocation",
6491 &DVR, BB, F, Var, Var->getScope()->getSubprogram(), Loc,
6492 Loc->getScope()->getSubprogram());
6493
6494 verifyFnArgs(DVR);
6495}
6496
6497void Verifier::visitVPIntrinsic(VPIntrinsic &VPI) {
6498 if (auto *VPCast = dyn_cast<VPCastIntrinsic>(&VPI)) {
6499 auto *RetTy = cast<VectorType>(VPCast->getType());
6500 auto *ValTy = cast<VectorType>(VPCast->getOperand(0)->getType());
6501 Check(RetTy->getElementCount() == ValTy->getElementCount(),
6502 "VP cast intrinsic first argument and result vector lengths must be "
6503 "equal",
6504 *VPCast);
6505
6506 switch (VPCast->getIntrinsicID()) {
6507 default:
6508 llvm_unreachable("Unknown VP cast intrinsic");
6509 case Intrinsic::vp_trunc:
6510 Check(RetTy->isIntOrIntVectorTy() && ValTy->isIntOrIntVectorTy(),
6511 "llvm.vp.trunc intrinsic first argument and result element type "
6512 "must be integer",
6513 *VPCast);
6514 Check(RetTy->getScalarSizeInBits() < ValTy->getScalarSizeInBits(),
6515 "llvm.vp.trunc intrinsic the bit size of first argument must be "
6516 "larger than the bit size of the return type",
6517 *VPCast);
6518 break;
6519 case Intrinsic::vp_zext:
6520 case Intrinsic::vp_sext:
6521 Check(RetTy->isIntOrIntVectorTy() && ValTy->isIntOrIntVectorTy(),
6522 "llvm.vp.zext or llvm.vp.sext intrinsic first argument and result "
6523 "element type must be integer",
6524 *VPCast);
6525 Check(RetTy->getScalarSizeInBits() > ValTy->getScalarSizeInBits(),
6526 "llvm.vp.zext or llvm.vp.sext intrinsic the bit size of first "
6527 "argument must be smaller than the bit size of the return type",
6528 *VPCast);
6529 break;
6530 case Intrinsic::vp_fptoui:
6531 case Intrinsic::vp_fptosi:
6532 case Intrinsic::vp_lrint:
6533 case Intrinsic::vp_llrint:
6534 Check(
6535 RetTy->isIntOrIntVectorTy() && ValTy->isFPOrFPVectorTy(),
6536 "llvm.vp.fptoui, llvm.vp.fptosi, llvm.vp.lrint or llvm.vp.llrint" "intrinsic first argument element "
6537 "type must be floating-point and result element type must be integer",
6538 *VPCast);
6539 break;
6540 case Intrinsic::vp_uitofp:
6541 case Intrinsic::vp_sitofp:
6542 Check(
6543 RetTy->isFPOrFPVectorTy() && ValTy->isIntOrIntVectorTy(),
6544 "llvm.vp.uitofp or llvm.vp.sitofp intrinsic first argument element "
6545 "type must be integer and result element type must be floating-point",
6546 *VPCast);
6547 break;
6548 case Intrinsic::vp_fptrunc:
6549 Check(RetTy->isFPOrFPVectorTy() && ValTy->isFPOrFPVectorTy(),
6550 "llvm.vp.fptrunc intrinsic first argument and result element type "
6551 "must be floating-point",
6552 *VPCast);
6553 Check(RetTy->getScalarSizeInBits() < ValTy->getScalarSizeInBits(),
6554 "llvm.vp.fptrunc intrinsic the bit size of first argument must be "
6555 "larger than the bit size of the return type",
6556 *VPCast);
6557 break;
6558 case Intrinsic::vp_fpext:
6559 Check(RetTy->isFPOrFPVectorTy() && ValTy->isFPOrFPVectorTy(),
6560 "llvm.vp.fpext intrinsic first argument and result element type "
6561 "must be floating-point",
6562 *VPCast);
6563 Check(RetTy->getScalarSizeInBits() > ValTy->getScalarSizeInBits(),
6564 "llvm.vp.fpext intrinsic the bit size of first argument must be "
6565 "smaller than the bit size of the return type",
6566 *VPCast);
6567 break;
6568 case Intrinsic::vp_ptrtoint:
6569 Check(RetTy->isIntOrIntVectorTy() && ValTy->isPtrOrPtrVectorTy(),
6570 "llvm.vp.ptrtoint intrinsic first argument element type must be "
6571 "pointer and result element type must be integer",
6572 *VPCast);
6573 break;
6574 case Intrinsic::vp_inttoptr:
6575 Check(RetTy->isPtrOrPtrVectorTy() && ValTy->isIntOrIntVectorTy(),
6576 "llvm.vp.inttoptr intrinsic first argument element type must be "
6577 "integer and result element type must be pointer",
6578 *VPCast);
6579 break;
6580 }
6581 }
6582 if (VPI.getIntrinsicID() == Intrinsic::vp_fcmp) {
6583 auto Pred = cast<VPCmpIntrinsic>(&VPI)->getPredicate();
6585 "invalid predicate for VP FP comparison intrinsic", &VPI);
6586 }
6587 if (VPI.getIntrinsicID() == Intrinsic::vp_icmp) {
6588 auto Pred = cast<VPCmpIntrinsic>(&VPI)->getPredicate();
6590 "invalid predicate for VP integer comparison intrinsic", &VPI);
6591 }
6592 if (VPI.getIntrinsicID() == Intrinsic::vp_is_fpclass) {
6593 auto TestMask = cast<ConstantInt>(VPI.getOperand(1));
6594 Check((TestMask->getZExtValue() & ~static_cast<unsigned>(fcAllFlags)) == 0,
6595 "unsupported bits for llvm.vp.is.fpclass test mask");
6596 }
6597}
6598
6599void Verifier::visitConstrainedFPIntrinsic(ConstrainedFPIntrinsic &FPI) {
6600 unsigned NumOperands = FPI.getNonMetadataArgCount();
6601 bool HasRoundingMD =
6603
6604 // Add the expected number of metadata operands.
6605 NumOperands += (1 + HasRoundingMD);
6606
6607 // Compare intrinsics carry an extra predicate metadata operand.
6608 if (isa<ConstrainedFPCmpIntrinsic>(FPI))
6609 NumOperands += 1;
6610 Check((FPI.arg_size() == NumOperands),
6611 "invalid arguments for constrained FP intrinsic", &FPI);
6612
6613 switch (FPI.getIntrinsicID()) {
6614 case Intrinsic::experimental_constrained_lrint:
6615 case Intrinsic::experimental_constrained_llrint: {
6616 Type *ValTy = FPI.getArgOperand(0)->getType();
6617 Type *ResultTy = FPI.getType();
6618 Check(!ValTy->isVectorTy() && !ResultTy->isVectorTy(),
6619 "Intrinsic does not support vectors", &FPI);
6620 break;
6621 }
6622
6623 case Intrinsic::experimental_constrained_lround:
6624 case Intrinsic::experimental_constrained_llround: {
6625 Type *ValTy = FPI.getArgOperand(0)->getType();
6626 Type *ResultTy = FPI.getType();
6627 Check(!ValTy->isVectorTy() && !ResultTy->isVectorTy(),
6628 "Intrinsic does not support vectors", &FPI);
6629 break;
6630 }
6631
6632 case Intrinsic::experimental_constrained_fcmp:
6633 case Intrinsic::experimental_constrained_fcmps: {
6634 auto Pred = cast<ConstrainedFPCmpIntrinsic>(&FPI)->getPredicate();
6636 "invalid predicate for constrained FP comparison intrinsic", &FPI);
6637 break;
6638 }
6639
6640 case Intrinsic::experimental_constrained_fptosi:
6641 case Intrinsic::experimental_constrained_fptoui: {
6642 Value *Operand = FPI.getArgOperand(0);
6643 ElementCount SrcEC;
6644 Check(Operand->getType()->isFPOrFPVectorTy(),
6645 "Intrinsic first argument must be floating point", &FPI);
6646 if (auto *OperandT = dyn_cast<VectorType>(Operand->getType())) {
6647 SrcEC = cast<VectorType>(OperandT)->getElementCount();
6648 }
6649
6650 Operand = &FPI;
6651 Check(SrcEC.isNonZero() == Operand->getType()->isVectorTy(),
6652 "Intrinsic first argument and result disagree on vector use", &FPI);
6653 Check(Operand->getType()->isIntOrIntVectorTy(),
6654 "Intrinsic result must be an integer", &FPI);
6655 if (auto *OperandT = dyn_cast<VectorType>(Operand->getType())) {
6656 Check(SrcEC == cast<VectorType>(OperandT)->getElementCount(),
6657 "Intrinsic first argument and result vector lengths must be equal",
6658 &FPI);
6659 }
6660 break;
6661 }
6662
6663 case Intrinsic::experimental_constrained_sitofp:
6664 case Intrinsic::experimental_constrained_uitofp: {
6665 Value *Operand = FPI.getArgOperand(0);
6666 ElementCount SrcEC;
6667 Check(Operand->getType()->isIntOrIntVectorTy(),
6668 "Intrinsic first argument must be integer", &FPI);
6669 if (auto *OperandT = dyn_cast<VectorType>(Operand->getType())) {
6670 SrcEC = cast<VectorType>(OperandT)->getElementCount();
6671 }
6672
6673 Operand = &FPI;
6674 Check(SrcEC.isNonZero() == Operand->getType()->isVectorTy(),
6675 "Intrinsic first argument and result disagree on vector use", &FPI);
6676 Check(Operand->getType()->isFPOrFPVectorTy(),
6677 "Intrinsic result must be a floating point", &FPI);
6678 if (auto *OperandT = dyn_cast<VectorType>(Operand->getType())) {
6679 Check(SrcEC == cast<VectorType>(OperandT)->getElementCount(),
6680 "Intrinsic first argument and result vector lengths must be equal",
6681 &FPI);
6682 }
6683 break;
6684 }
6685
6686 case Intrinsic::experimental_constrained_fptrunc:
6687 case Intrinsic::experimental_constrained_fpext: {
6688 Value *Operand = FPI.getArgOperand(0);
6689 Type *OperandTy = Operand->getType();
6690 Value *Result = &FPI;
6691 Type *ResultTy = Result->getType();
6692 Check(OperandTy->isFPOrFPVectorTy(),
6693 "Intrinsic first argument must be FP or FP vector", &FPI);
6694 Check(ResultTy->isFPOrFPVectorTy(),
6695 "Intrinsic result must be FP or FP vector", &FPI);
6696 Check(OperandTy->isVectorTy() == ResultTy->isVectorTy(),
6697 "Intrinsic first argument and result disagree on vector use", &FPI);
6698 if (OperandTy->isVectorTy()) {
6699 Check(cast<VectorType>(OperandTy)->getElementCount() ==
6700 cast<VectorType>(ResultTy)->getElementCount(),
6701 "Intrinsic first argument and result vector lengths must be equal",
6702 &FPI);
6703 }
6704 if (FPI.getIntrinsicID() == Intrinsic::experimental_constrained_fptrunc) {
6705 Check(OperandTy->getScalarSizeInBits() > ResultTy->getScalarSizeInBits(),
6706 "Intrinsic first argument's type must be larger than result type",
6707 &FPI);
6708 } else {
6709 Check(OperandTy->getScalarSizeInBits() < ResultTy->getScalarSizeInBits(),
6710 "Intrinsic first argument's type must be smaller than result type",
6711 &FPI);
6712 }
6713 break;
6714 }
6715
6716 default:
6717 break;
6718 }
6719
6720 // If a non-metadata argument is passed in a metadata slot then the
6721 // error will be caught earlier when the incorrect argument doesn't
6722 // match the specification in the intrinsic call table. Thus, no
6723 // argument type check is needed here.
6724
6725 Check(FPI.getExceptionBehavior().has_value(),
6726 "invalid exception behavior argument", &FPI);
6727 if (HasRoundingMD) {
6728 Check(FPI.getRoundingMode().has_value(), "invalid rounding mode argument",
6729 &FPI);
6730 }
6731}
6732
6733void Verifier::visitDbgIntrinsic(StringRef Kind, DbgVariableIntrinsic &DII) {
6734 auto *MD = DII.getRawLocation();
6735 CheckDI(isa<ValueAsMetadata>(MD) || isa<DIArgList>(MD) ||
6736 (isa<MDNode>(MD) && !cast<MDNode>(MD)->getNumOperands()),
6737 "invalid llvm.dbg." + Kind + " intrinsic address/value", &DII, MD);
6738 CheckDI(isa<DILocalVariable>(DII.getRawVariable()),
6739 "invalid llvm.dbg." + Kind + " intrinsic variable", &DII,
6740 DII.getRawVariable());
6741 CheckDI(isa<DIExpression>(DII.getRawExpression()),
6742 "invalid llvm.dbg." + Kind + " intrinsic expression", &DII,
6743 DII.getRawExpression());
6744
6745 if (auto *DAI = dyn_cast<DbgAssignIntrinsic>(&DII)) {
6746 CheckDI(isa<DIAssignID>(DAI->getRawAssignID()),
6747 "invalid llvm.dbg.assign intrinsic DIAssignID", &DII,
6748 DAI->getRawAssignID());
6749 const auto *RawAddr = DAI->getRawAddress();
6750 CheckDI(
6751 isa<ValueAsMetadata>(RawAddr) ||
6752 (isa<MDNode>(RawAddr) && !cast<MDNode>(RawAddr)->getNumOperands()),
6753 "invalid llvm.dbg.assign intrinsic address", &DII,
6754 DAI->getRawAddress());
6755 CheckDI(isa<DIExpression>(DAI->getRawAddressExpression()),
6756 "invalid llvm.dbg.assign intrinsic address expression", &DII,
6757 DAI->getRawAddressExpression());
6758 // All of the linked instructions should be in the same function as DII.
6760 CheckDI(DAI->getFunction() == I->getFunction(),
6761 "inst not in same function as dbg.assign", I, DAI);
6762 }
6763
6764 // Ignore broken !dbg attachments; they're checked elsewhere.
6765 if (MDNode *N = DII.getDebugLoc().getAsMDNode())
6766 if (!isa<DILocation>(N))
6767 return;
6768
6769 BasicBlock *BB = DII.getParent();
6770 Function *F = BB ? BB->getParent() : nullptr;
6771
6772 // The scopes for variables and !dbg attachments must agree.
6773 DILocalVariable *Var = DII.getVariable();
6774 DILocation *Loc = DII.getDebugLoc();
6775 CheckDI(Loc, "llvm.dbg." + Kind + " intrinsic requires a !dbg attachment",
6776 &DII, BB, F);
6777
6778 DISubprogram *VarSP = getSubprogram(Var->getRawScope());
6779 DISubprogram *LocSP = getSubprogram(Loc->getRawScope());
6780 if (!VarSP || !LocSP)
6781 return; // Broken scope chains are checked elsewhere.
6782
6783 CheckDI(VarSP == LocSP,
6784 "mismatched subprogram between llvm.dbg." + Kind +
6785 " variable and !dbg attachment",
6786 &DII, BB, F, Var, Var->getScope()->getSubprogram(), Loc,
6787 Loc->getScope()->getSubprogram());
6788
6789 // This check is redundant with one in visitLocalVariable().
6790 CheckDI(isType(Var->getRawType()), "invalid type ref", Var,
6791 Var->getRawType());
6792 verifyFnArgs(DII);
6793}
6794
6795void Verifier::visitDbgLabelIntrinsic(StringRef Kind, DbgLabelInst &DLI) {
6796 CheckDI(isa<DILabel>(DLI.getRawLabel()),
6797 "invalid llvm.dbg." + Kind + " intrinsic variable", &DLI,
6798 DLI.getRawLabel());
6799
6800 // Ignore broken !dbg attachments; they're checked elsewhere.
6801 if (MDNode *N = DLI.getDebugLoc().getAsMDNode())
6802 if (!isa<DILocation>(N))
6803 return;
6804
6805 BasicBlock *BB = DLI.getParent();
6806 Function *F = BB ? BB->getParent() : nullptr;
6807
6808 // The scopes for variables and !dbg attachments must agree.
6809 DILabel *Label = DLI.getLabel();
6810 DILocation *Loc = DLI.getDebugLoc();
6811 Check(Loc, "llvm.dbg." + Kind + " intrinsic requires a !dbg attachment", &DLI,
6812 BB, F);
6813
6814 DISubprogram *LabelSP = getSubprogram(Label->getRawScope());
6815 DISubprogram *LocSP = getSubprogram(Loc->getRawScope());
6816 if (!LabelSP || !LocSP)
6817 return;
6818
6819 CheckDI(LabelSP == LocSP,
6820 "mismatched subprogram between llvm.dbg." + Kind +
6821 " label and !dbg attachment",
6822 &DLI, BB, F, Label, Label->getScope()->getSubprogram(), Loc,
6823 Loc->getScope()->getSubprogram());
6824}
6825
6826void Verifier::verifyFragmentExpression(const DbgVariableIntrinsic &I) {
6827 DILocalVariable *V = dyn_cast_or_null<DILocalVariable>(I.getRawVariable());
6828 DIExpression *E = dyn_cast_or_null<DIExpression>(I.getRawExpression());
6829
6830 // We don't know whether this intrinsic verified correctly.
6831 if (!V || !E || !E->isValid())
6832 return;
6833
6834 // Nothing to do if this isn't a DW_OP_LLVM_fragment expression.
6835 auto Fragment = E->getFragmentInfo();
6836 if (!Fragment)
6837 return;
6838
6839 // The frontend helps out GDB by emitting the members of local anonymous
6840 // unions as artificial local variables with shared storage. When SROA splits
6841 // the storage for artificial local variables that are smaller than the entire
6842 // union, the overhang piece will be outside of the allotted space for the
6843 // variable and this check fails.
6844 // FIXME: Remove this check as soon as clang stops doing this; it hides bugs.
6845 if (V->isArtificial())
6846 return;
6847
6848 verifyFragmentExpression(*V, *Fragment, &I);
6849}
6850void Verifier::verifyFragmentExpression(const DbgVariableRecord &DVR) {
6851 DILocalVariable *V = dyn_cast_or_null<DILocalVariable>(DVR.getRawVariable());
6852 DIExpression *E = dyn_cast_or_null<DIExpression>(DVR.getRawExpression());
6853
6854 // We don't know whether this intrinsic verified correctly.
6855 if (!V || !E || !E->isValid())
6856 return;
6857
6858 // Nothing to do if this isn't a DW_OP_LLVM_fragment expression.
6859 auto Fragment = E->getFragmentInfo();
6860 if (!Fragment)
6861 return;
6862
6863 // The frontend helps out GDB by emitting the members of local anonymous
6864 // unions as artificial local variables with shared storage. When SROA splits
6865 // the storage for artificial local variables that are smaller than the entire
6866 // union, the overhang piece will be outside of the allotted space for the
6867 // variable and this check fails.
6868 // FIXME: Remove this check as soon as clang stops doing this; it hides bugs.
6869 if (V->isArtificial())
6870 return;
6871
6872 verifyFragmentExpression(*V, *Fragment, &DVR);
6873}
6874
6875template <typename ValueOrMetadata>
6876void Verifier::verifyFragmentExpression(const DIVariable &V,
6878 ValueOrMetadata *Desc) {
6879 // If there's no size, the type is broken, but that should be checked
6880 // elsewhere.
6881 auto VarSize = V.getSizeInBits();
6882 if (!VarSize)
6883 return;
6884
6885 unsigned FragSize = Fragment.SizeInBits;
6886 unsigned FragOffset = Fragment.OffsetInBits;
6887 CheckDI(FragSize + FragOffset <= *VarSize,
6888 "fragment is larger than or outside of variable", Desc, &V);
6889 CheckDI(FragSize != *VarSize, "fragment covers entire variable", Desc, &V);
6890}
6891
6892void Verifier::verifyFnArgs(const DbgVariableIntrinsic &I) {
6893 // This function does not take the scope of noninlined function arguments into
6894 // account. Don't run it if current function is nodebug, because it may
6895 // contain inlined debug intrinsics.
6896 if (!HasDebugInfo)
6897 return;
6898
6899 // For performance reasons only check non-inlined ones.
6900 if (I.getDebugLoc()->getInlinedAt())
6901 return;
6902
6903 DILocalVariable *Var = I.getVariable();
6904 CheckDI(Var, "dbg intrinsic without variable");
6905
6906 unsigned ArgNo = Var->getArg();
6907 if (!ArgNo)
6908 return;
6909
6910 // Verify there are no duplicate function argument debug info entries.
6911 // These will cause hard-to-debug assertions in the DWARF backend.
6912 if (DebugFnArgs.size() < ArgNo)
6913 DebugFnArgs.resize(ArgNo, nullptr);
6914
6915 auto *Prev = DebugFnArgs[ArgNo - 1];
6916 DebugFnArgs[ArgNo - 1] = Var;
6917 CheckDI(!Prev || (Prev == Var), "conflicting debug info for argument", &I,
6918 Prev, Var);
6919}
6920void Verifier::verifyFnArgs(const DbgVariableRecord &DVR) {
6921 // This function does not take the scope of noninlined function arguments into
6922 // account. Don't run it if current function is nodebug, because it may
6923 // contain inlined debug intrinsics.
6924 if (!HasDebugInfo)
6925 return;
6926
6927 // For performance reasons only check non-inlined ones.
6928 if (DVR.getDebugLoc()->getInlinedAt())
6929 return;
6930
6931 DILocalVariable *Var = DVR.getVariable();
6932 CheckDI(Var, "#dbg record without variable");
6933
6934 unsigned ArgNo = Var->getArg();
6935 if (!ArgNo)
6936 return;
6937
6938 // Verify there are no duplicate function argument debug info entries.
6939 // These will cause hard-to-debug assertions in the DWARF backend.
6940 if (DebugFnArgs.size() < ArgNo)
6941 DebugFnArgs.resize(ArgNo, nullptr);
6942
6943 auto *Prev = DebugFnArgs[ArgNo - 1];
6944 DebugFnArgs[ArgNo - 1] = Var;
6945 CheckDI(!Prev || (Prev == Var), "conflicting debug info for argument", &DVR,
6946 Prev, Var);
6947}
6948
6949void Verifier::verifyNotEntryValue(const DbgVariableIntrinsic &I) {
6950 DIExpression *E = dyn_cast_or_null<DIExpression>(I.getRawExpression());
6951
6952 // We don't know whether this intrinsic verified correctly.
6953 if (!E || !E->isValid())
6954 return;
6955
6956 if (isa<ValueAsMetadata>(I.getRawLocation())) {
6957 Value *VarValue = I.getVariableLocationOp(0);
6958 if (isa<UndefValue>(VarValue) || isa<PoisonValue>(VarValue))
6959 return;
6960 // We allow EntryValues for swift async arguments, as they have an
6961 // ABI-guarantee to be turned into a specific register.
6962 if (auto *ArgLoc = dyn_cast_or_null<Argument>(VarValue);
6963 ArgLoc && ArgLoc->hasAttribute(Attribute::SwiftAsync))
6964 return;
6965 }
6966
6967 CheckDI(!E->isEntryValue(),
6968 "Entry values are only allowed in MIR unless they target a "
6969 "swiftasync Argument",
6970 &I);
6971}
6972void Verifier::verifyNotEntryValue(const DbgVariableRecord &DVR) {
6973 DIExpression *E = dyn_cast_or_null<DIExpression>(DVR.getRawExpression());
6974
6975 // We don't know whether this intrinsic verified correctly.
6976 if (!E || !E->isValid())
6977 return;
6978
6979 if (isa<ValueAsMetadata>(DVR.getRawLocation())) {
6980 Value *VarValue = DVR.getVariableLocationOp(0);
6981 if (isa<UndefValue>(VarValue) || isa<PoisonValue>(VarValue))
6982 return;
6983 // We allow EntryValues for swift async arguments, as they have an
6984 // ABI-guarantee to be turned into a specific register.
6985 if (auto *ArgLoc = dyn_cast_or_null<Argument>(VarValue);
6986 ArgLoc && ArgLoc->hasAttribute(Attribute::SwiftAsync))
6987 return;
6988 }
6989
6990 CheckDI(!E->isEntryValue(),
6991 "Entry values are only allowed in MIR unless they target a "
6992 "swiftasync Argument",
6993 &DVR);
6994}
6995
6996void Verifier::verifyCompileUnits() {
6997 // When more than one Module is imported into the same context, such as during
6998 // an LTO build before linking the modules, ODR type uniquing may cause types
6999 // to point to a different CU. This check does not make sense in this case.
7000 if (M.getContext().isODRUniquingDebugTypes())
7001 return;
7002 auto *CUs = M.getNamedMetadata("llvm.dbg.cu");
7004 if (CUs)
7005 Listed.insert(CUs->op_begin(), CUs->op_end());
7006 for (const auto *CU : CUVisited)
7007 CheckDI(Listed.count(CU), "DICompileUnit not listed in llvm.dbg.cu", CU);
7008 CUVisited.clear();
7009}
7010
7011void Verifier::verifyDeoptimizeCallingConvs() {
7012 if (DeoptimizeDeclarations.empty())
7013 return;
7014
7015 const Function *First = DeoptimizeDeclarations[0];
7016 for (const auto *F : ArrayRef(DeoptimizeDeclarations).slice(1)) {
7017 Check(First->getCallingConv() == F->getCallingConv(),
7018 "All llvm.experimental.deoptimize declarations must have the same "
7019 "calling convention",
7020 First, F);
7021 }
7022}
7023
7024void Verifier::verifyAttachedCallBundle(const CallBase &Call,
7025 const OperandBundleUse &BU) {
7026 FunctionType *FTy = Call.getFunctionType();
7027
7028 Check((FTy->getReturnType()->isPointerTy() ||
7029 (Call.doesNotReturn() && FTy->getReturnType()->isVoidTy())),
7030 "a call with operand bundle \"clang.arc.attachedcall\" must call a "
7031 "function returning a pointer or a non-returning function that has a "
7032 "void return type",
7033 Call);
7034
7035 Check(BU.Inputs.size() == 1 && isa<Function>(BU.Inputs.front()),
7036 "operand bundle \"clang.arc.attachedcall\" requires one function as "
7037 "an argument",
7038 Call);
7039
7040 auto *Fn = cast<Function>(BU.Inputs.front());
7041 Intrinsic::ID IID = Fn->getIntrinsicID();
7042
7043 if (IID) {
7044 Check((IID == Intrinsic::objc_retainAutoreleasedReturnValue ||
7045 IID == Intrinsic::objc_unsafeClaimAutoreleasedReturnValue),
7046 "invalid function argument", Call);
7047 } else {
7048 StringRef FnName = Fn->getName();
7049 Check((FnName == "objc_retainAutoreleasedReturnValue" ||
7050 FnName == "objc_unsafeClaimAutoreleasedReturnValue"),
7051 "invalid function argument", Call);
7052 }
7053}
7054
7055void Verifier::verifyNoAliasScopeDecl() {
7056 if (NoAliasScopeDecls.empty())
7057 return;
7058
7059 // only a single scope must be declared at a time.
7060 for (auto *II : NoAliasScopeDecls) {
7061 assert(II->getIntrinsicID() == Intrinsic::experimental_noalias_scope_decl &&
7062 "Not a llvm.experimental.noalias.scope.decl ?");
7063 const auto *ScopeListMV = dyn_cast<MetadataAsValue>(
7065 Check(ScopeListMV != nullptr,
7066 "llvm.experimental.noalias.scope.decl must have a MetadataAsValue "
7067 "argument",
7068 II);
7069
7070 const auto *ScopeListMD = dyn_cast<MDNode>(ScopeListMV->getMetadata());
7071 Check(ScopeListMD != nullptr, "!id.scope.list must point to an MDNode", II);
7072 Check(ScopeListMD->getNumOperands() == 1,
7073 "!id.scope.list must point to a list with a single scope", II);
7074 visitAliasScopeListMetadata(ScopeListMD);
7075 }
7076
7077 // Only check the domination rule when requested. Once all passes have been
7078 // adapted this option can go away.
7080 return;
7081
7082 // Now sort the intrinsics based on the scope MDNode so that declarations of
7083 // the same scopes are next to each other.
7084 auto GetScope = [](IntrinsicInst *II) {
7085 const auto *ScopeListMV = cast<MetadataAsValue>(
7087 return &cast<MDNode>(ScopeListMV->getMetadata())->getOperand(0);
7088 };
7089
7090 // We are sorting on MDNode pointers here. For valid input IR this is ok.
7091 // TODO: Sort on Metadata ID to avoid non-deterministic error messages.
7092 auto Compare = [GetScope](IntrinsicInst *Lhs, IntrinsicInst *Rhs) {
7093 return GetScope(Lhs) < GetScope(Rhs);
7094 };
7095
7096 llvm::sort(NoAliasScopeDecls, Compare);
7097
7098 // Go over the intrinsics and check that for the same scope, they are not
7099 // dominating each other.
7100 auto ItCurrent = NoAliasScopeDecls.begin();
7101 while (ItCurrent != NoAliasScopeDecls.end()) {
7102 auto CurScope = GetScope(*ItCurrent);
7103 auto ItNext = ItCurrent;
7104 do {
7105 ++ItNext;
7106 } while (ItNext != NoAliasScopeDecls.end() &&
7107 GetScope(*ItNext) == CurScope);
7108
7109 // [ItCurrent, ItNext) represents the declarations for the same scope.
7110 // Ensure they are not dominating each other.. but only if it is not too
7111 // expensive.
7112 if (ItNext - ItCurrent < 32)
7113 for (auto *I : llvm::make_range(ItCurrent, ItNext))
7114 for (auto *J : llvm::make_range(ItCurrent, ItNext))
7115 if (I != J)
7116 Check(!DT.dominates(I, J),
7117 "llvm.experimental.noalias.scope.decl dominates another one "
7118 "with the same scope",
7119 I);
7120 ItCurrent = ItNext;
7121 }
7122}
7123
7124//===----------------------------------------------------------------------===//
7125// Implement the public interfaces to this file...
7126//===----------------------------------------------------------------------===//
7127
7129 Function &F = const_cast<Function &>(f);
7130
7131 // Don't use a raw_null_ostream. Printing IR is expensive.
7132 Verifier V(OS, /*ShouldTreatBrokenDebugInfoAsError=*/true, *f.getParent());
7133
7134 // Note that this function's return value is inverted from what you would
7135 // expect of a function called "verify".
7136 return !V.verify(F);
7137}
7138
7140 bool *BrokenDebugInfo) {
7141 // Don't use a raw_null_ostream. Printing IR is expensive.
7142 Verifier V(OS, /*ShouldTreatBrokenDebugInfoAsError=*/!BrokenDebugInfo, M);
7143
7144 bool Broken = false;
7145 for (const Function &F : M)
7146 Broken |= !V.verify(F);
7147
7148 Broken |= !V.verify();
7149 if (BrokenDebugInfo)
7150 *BrokenDebugInfo = V.hasBrokenDebugInfo();
7151 // Note that this function's return value is inverted from what you would
7152 // expect of a function called "verify".
7153 return Broken;
7154}
7155
7156namespace {
7157
7158struct VerifierLegacyPass : public FunctionPass {
7159 static char ID;
7160
7161 std::unique_ptr<Verifier> V;
7162 bool FatalErrors = true;
7163
7164 VerifierLegacyPass() : FunctionPass(ID) {
7166 }
7167 explicit VerifierLegacyPass(bool FatalErrors)
7168 : FunctionPass(ID),
7169 FatalErrors(FatalErrors) {
7171 }
7172
7173 bool doInitialization(Module &M) override {
7174 V = std::make_unique<Verifier>(
7175 &dbgs(), /*ShouldTreatBrokenDebugInfoAsError=*/false, M);
7176 return false;
7177 }
7178
7179 bool runOnFunction(Function &F) override {
7180 if (!V->verify(F) && FatalErrors) {
7181 errs() << "in function " << F.getName() << '\n';
7182 report_fatal_error("Broken function found, compilation aborted!");
7183 }
7184 return false;
7185 }
7186
7187 bool doFinalization(Module &M) override {
7188 bool HasErrors = false;
7189 for (Function &F : M)
7190 if (F.isDeclaration())
7191 HasErrors |= !V->verify(F);
7192
7193 HasErrors |= !V->verify();
7194 if (FatalErrors && (HasErrors || V->hasBrokenDebugInfo()))
7195 report_fatal_error("Broken module found, compilation aborted!");
7196 return false;
7197 }
7198
7199 void getAnalysisUsage(AnalysisUsage &AU) const override {
7200 AU.setPreservesAll();
7201 }
7202};
7203
7204} // end anonymous namespace
7205
7206/// Helper to issue failure from the TBAA verification
7207template <typename... Tys> void TBAAVerifier::CheckFailed(Tys &&... Args) {
7208 if (Diagnostic)
7209 return Diagnostic->CheckFailed(Args...);
7210}
7211
7212#define CheckTBAA(C, ...) \
7213 do { \
7214 if (!(C)) { \
7215 CheckFailed(__VA_ARGS__); \
7216 return false; \
7217 } \
7218 } while (false)
7219
7220/// Verify that \p BaseNode can be used as the "base type" in the struct-path
7221/// TBAA scheme. This means \p BaseNode is either a scalar node, or a
7222/// struct-type node describing an aggregate data structure (like a struct).
7223TBAAVerifier::TBAABaseNodeSummary
7224TBAAVerifier::verifyTBAABaseNode(Instruction &I, const MDNode *BaseNode,
7225 bool IsNewFormat) {
7226 if (BaseNode->getNumOperands() < 2) {
7227 CheckFailed("Base nodes must have at least two operands", &I, BaseNode);
7228 return {true, ~0u};
7229 }
7230
7231 auto Itr = TBAABaseNodes.find(BaseNode);
7232 if (Itr != TBAABaseNodes.end())
7233 return Itr->second;
7234
7235 auto Result = verifyTBAABaseNodeImpl(I, BaseNode, IsNewFormat);
7236 auto InsertResult = TBAABaseNodes.insert({BaseNode, Result});
7237 (void)InsertResult;
7238 assert(InsertResult.second && "We just checked!");
7239 return Result;
7240}
7241
7242TBAAVerifier::TBAABaseNodeSummary
7243TBAAVerifier::verifyTBAABaseNodeImpl(Instruction &I, const MDNode *BaseNode,
7244 bool IsNewFormat) {
7245 const TBAAVerifier::TBAABaseNodeSummary InvalidNode = {true, ~0u};
7246
7247 if (BaseNode->getNumOperands() == 2) {
7248 // Scalar nodes can only be accessed at offset 0.
7249 return isValidScalarTBAANode(BaseNode)
7250 ? TBAAVerifier::TBAABaseNodeSummary({false, 0})
7251 : InvalidNode;
7252 }
7253
7254 if (IsNewFormat) {
7255 if (BaseNode->getNumOperands() % 3 != 0) {
7256 CheckFailed("Access tag nodes must have the number of operands that is a "
7257 "multiple of 3!", BaseNode);
7258 return InvalidNode;
7259 }
7260 } else {
7261 if (BaseNode->getNumOperands() % 2 != 1) {
7262 CheckFailed("Struct tag nodes must have an odd number of operands!",
7263 BaseNode);
7264 return InvalidNode;
7265 }
7266 }
7267
7268 // Check the type size field.
7269 if (IsNewFormat) {
7270 auto *TypeSizeNode = mdconst::dyn_extract_or_null<ConstantInt>(
7271 BaseNode->getOperand(1));
7272 if (!TypeSizeNode) {
7273 CheckFailed("Type size nodes must be constants!", &I, BaseNode);
7274 return InvalidNode;
7275 }
7276 }
7277
7278 // Check the type name field. In the new format it can be anything.
7279 if (!IsNewFormat && !isa<MDString>(BaseNode->getOperand(0))) {
7280 CheckFailed("Struct tag nodes have a string as their first operand",
7281 BaseNode);
7282 return InvalidNode;
7283 }
7284
7285 bool Failed = false;
7286
7287 std::optional<APInt> PrevOffset;
7288 unsigned BitWidth = ~0u;
7289
7290 // We've already checked that BaseNode is not a degenerate root node with one
7291 // operand in \c verifyTBAABaseNode, so this loop should run at least once.
7292 unsigned FirstFieldOpNo = IsNewFormat ? 3 : 1;
7293 unsigned NumOpsPerField = IsNewFormat ? 3 : 2;
7294 for (unsigned Idx = FirstFieldOpNo; Idx < BaseNode->getNumOperands();
7295 Idx += NumOpsPerField) {
7296 const MDOperand &FieldTy = BaseNode->getOperand(Idx);
7297 const MDOperand &FieldOffset = BaseNode->getOperand(Idx + 1);
7298 if (!isa<MDNode>(FieldTy)) {
7299 CheckFailed("Incorrect field entry in struct type node!", &I, BaseNode);
7300 Failed = true;
7301 continue;
7302 }
7303
7304 auto *OffsetEntryCI =
7305 mdconst::dyn_extract_or_null<ConstantInt>(FieldOffset);
7306 if (!OffsetEntryCI) {
7307 CheckFailed("Offset entries must be constants!", &I, BaseNode);
7308 Failed = true;
7309 continue;
7310 }
7311
7312 if (BitWidth == ~0u)
7313 BitWidth = OffsetEntryCI->getBitWidth();
7314
7315 if (OffsetEntryCI->getBitWidth() != BitWidth) {
7316 CheckFailed(
7317 "Bitwidth between the offsets and struct type entries must match", &I,
7318 BaseNode);
7319 Failed = true;
7320 continue;
7321 }
7322
7323 // NB! As far as I can tell, we generate a non-strictly increasing offset
7324 // sequence only from structs that have zero size bit fields. When
7325 // recursing into a contained struct in \c getFieldNodeFromTBAABaseNode we
7326 // pick the field lexically the latest in struct type metadata node. This
7327 // mirrors the actual behavior of the alias analysis implementation.
7328 bool IsAscending =
7329 !PrevOffset || PrevOffset->ule(OffsetEntryCI->getValue());
7330
7331 if (!IsAscending) {
7332 CheckFailed("Offsets must be increasing!", &I, BaseNode);
7333 Failed = true;
7334 }
7335
7336 PrevOffset = OffsetEntryCI->getValue();
7337
7338 if (IsNewFormat) {
7339 auto *MemberSizeNode = mdconst::dyn_extract_or_null<ConstantInt>(
7340 BaseNode->getOperand(Idx + 2));
7341 if (!MemberSizeNode) {
7342 CheckFailed("Member size entries must be constants!", &I, BaseNode);
7343 Failed = true;
7344 continue;
7345 }
7346 }
7347 }
7348
7349 return Failed ? InvalidNode
7350 : TBAAVerifier::TBAABaseNodeSummary(false, BitWidth);
7351}
7352
7353static bool IsRootTBAANode(const MDNode *MD) {
7354 return MD->getNumOperands() < 2;
7355}
7356
7357static bool IsScalarTBAANodeImpl(const MDNode *MD,
7359 if (MD->getNumOperands() != 2 && MD->getNumOperands() != 3)
7360 return false;
7361
7362 if (!isa<MDString>(MD->getOperand(0)))
7363 return false;
7364
7365 if (MD->getNumOperands() == 3) {
7366 auto *Offset = mdconst::dyn_extract<ConstantInt>(MD->getOperand(2));
7367 if (!(Offset && Offset->isZero() && isa<MDString>(MD->getOperand(0))))
7368 return false;
7369 }
7370
7371 auto *Parent = dyn_cast_or_null<MDNode>(MD->getOperand(1));
7372 return Parent && Visited.insert(Parent).second &&
7373 (IsRootTBAANode(Parent) || IsScalarTBAANodeImpl(Parent, Visited));
7374}
7375
7376bool TBAAVerifier::isValidScalarTBAANode(const MDNode *MD) {
7377 auto ResultIt = TBAAScalarNodes.find(MD);
7378 if (ResultIt != TBAAScalarNodes.end())
7379 return ResultIt->second;
7380
7382 bool Result = IsScalarTBAANodeImpl(MD, Visited);
7383 auto InsertResult = TBAAScalarNodes.insert({MD, Result});
7384 (void)InsertResult;
7385 assert(InsertResult.second && "Just checked!");
7386
7387 return Result;
7388}
7389
7390/// Returns the field node at the offset \p Offset in \p BaseNode. Update \p
7391/// Offset in place to be the offset within the field node returned.
7392///
7393/// We assume we've okayed \p BaseNode via \c verifyTBAABaseNode.
7394MDNode *TBAAVerifier::getFieldNodeFromTBAABaseNode(Instruction &I,
7395 const MDNode *BaseNode,
7396 APInt &Offset,
7397 bool IsNewFormat) {
7398 assert(BaseNode->getNumOperands() >= 2 && "Invalid base node!");
7399
7400 // Scalar nodes have only one possible "field" -- their parent in the access
7401 // hierarchy. Offset must be zero at this point, but our caller is supposed
7402 // to check that.
7403 if (BaseNode->getNumOperands() == 2)
7404 return cast<MDNode>(BaseNode->getOperand(1));
7405
7406 unsigned FirstFieldOpNo = IsNewFormat ? 3 : 1;
7407 unsigned NumOpsPerField = IsNewFormat ? 3 : 2;
7408 for (unsigned Idx = FirstFieldOpNo; Idx < BaseNode->getNumOperands();
7409 Idx += NumOpsPerField) {
7410 auto *OffsetEntryCI =
7411 mdconst::extract<ConstantInt>(BaseNode->getOperand(Idx + 1));
7412 if (OffsetEntryCI->getValue().ugt(Offset)) {
7413 if (Idx == FirstFieldOpNo) {
7414 CheckFailed("Could not find TBAA parent in struct type node", &I,
7415 BaseNode, &Offset);
7416 return nullptr;
7417 }
7418
7419 unsigned PrevIdx = Idx - NumOpsPerField;
7420 auto *PrevOffsetEntryCI =
7421 mdconst::extract<ConstantInt>(BaseNode->getOperand(PrevIdx + 1));
7422 Offset -= PrevOffsetEntryCI->getValue();
7423 return cast<MDNode>(BaseNode->getOperand(PrevIdx));
7424 }
7425 }
7426
7427 unsigned LastIdx = BaseNode->getNumOperands() - NumOpsPerField;
7428 auto *LastOffsetEntryCI = mdconst::extract<ConstantInt>(
7429 BaseNode->getOperand(LastIdx + 1));
7430 Offset -= LastOffsetEntryCI->getValue();
7431 return cast<MDNode>(BaseNode->getOperand(LastIdx));
7432}
7433
7435 if (!Type || Type->getNumOperands() < 3)
7436 return false;
7437
7438 // In the new format type nodes shall have a reference to the parent type as
7439 // its first operand.
7440 return isa_and_nonnull<MDNode>(Type->getOperand(0));
7441}
7442
7444 CheckTBAA(MD->getNumOperands() > 0, "TBAA metadata cannot have 0 operands",
7445 &I, MD);
7446
7447 CheckTBAA(isa<LoadInst>(I) || isa<StoreInst>(I) || isa<CallInst>(I) ||
7448 isa<VAArgInst>(I) || isa<AtomicRMWInst>(I) ||
7449 isa<AtomicCmpXchgInst>(I),
7450 "This instruction shall not have a TBAA access tag!", &I);
7451
7452 bool IsStructPathTBAA =
7453 isa<MDNode>(MD->getOperand(0)) && MD->getNumOperands() >= 3;
7454
7455 CheckTBAA(IsStructPathTBAA,
7456 "Old-style TBAA is no longer allowed, use struct-path TBAA instead",
7457 &I);
7458
7459 MDNode *BaseNode = dyn_cast_or_null<MDNode>(MD->getOperand(0));
7460 MDNode *AccessType = dyn_cast_or_null<MDNode>(MD->getOperand(1));
7461
7462 bool IsNewFormat = isNewFormatTBAATypeNode(AccessType);
7463
7464 if (IsNewFormat) {
7465 CheckTBAA(MD->getNumOperands() == 4 || MD->getNumOperands() == 5,
7466 "Access tag metadata must have either 4 or 5 operands", &I, MD);
7467 } else {
7468 CheckTBAA(MD->getNumOperands() < 5,
7469 "Struct tag metadata must have either 3 or 4 operands", &I, MD);
7470 }
7471
7472 // Check the access size field.
7473 if (IsNewFormat) {
7474 auto *AccessSizeNode = mdconst::dyn_extract_or_null<ConstantInt>(
7475 MD->getOperand(3));
7476 CheckTBAA(AccessSizeNode, "Access size field must be a constant", &I, MD);
7477 }
7478
7479 // Check the immutability flag.
7480 unsigned ImmutabilityFlagOpNo = IsNewFormat ? 4 : 3;
7481 if (MD->getNumOperands() == ImmutabilityFlagOpNo + 1) {
7482 auto *IsImmutableCI = mdconst::dyn_extract_or_null<ConstantInt>(
7483 MD->getOperand(ImmutabilityFlagOpNo));
7484 CheckTBAA(IsImmutableCI,
7485 "Immutability tag on struct tag metadata must be a constant", &I,
7486 MD);
7487 CheckTBAA(
7488 IsImmutableCI->isZero() || IsImmutableCI->isOne(),
7489 "Immutability part of the struct tag metadata must be either 0 or 1",
7490 &I, MD);
7491 }
7492
7493 CheckTBAA(BaseNode && AccessType,
7494 "Malformed struct tag metadata: base and access-type "
7495 "should be non-null and point to Metadata nodes",
7496 &I, MD, BaseNode, AccessType);
7497
7498 if (!IsNewFormat) {
7499 CheckTBAA(isValidScalarTBAANode(AccessType),
7500 "Access type node must be a valid scalar type", &I, MD,
7501 AccessType);
7502 }
7503
7504 auto *OffsetCI = mdconst::dyn_extract_or_null<ConstantInt>(MD->getOperand(2));
7505 CheckTBAA(OffsetCI, "Offset must be constant integer", &I, MD);
7506
7507 APInt Offset = OffsetCI->getValue();
7508 bool SeenAccessTypeInPath = false;
7509
7510 SmallPtrSet<MDNode *, 4> StructPath;
7511
7512 for (/* empty */; BaseNode && !IsRootTBAANode(BaseNode);
7513 BaseNode = getFieldNodeFromTBAABaseNode(I, BaseNode, Offset,
7514 IsNewFormat)) {
7515 if (!StructPath.insert(BaseNode).second) {
7516 CheckFailed("Cycle detected in struct path", &I, MD);
7517 return false;
7518 }
7519
7520 bool Invalid;
7521 unsigned BaseNodeBitWidth;
7522 std::tie(Invalid, BaseNodeBitWidth) = verifyTBAABaseNode(I, BaseNode,
7523 IsNewFormat);
7524
7525 // If the base node is invalid in itself, then we've already printed all the
7526 // errors we wanted to print.
7527 if (Invalid)
7528 return false;
7529
7530 SeenAccessTypeInPath |= BaseNode == AccessType;
7531
7532 if (isValidScalarTBAANode(BaseNode) || BaseNode == AccessType)
7533 CheckTBAA(Offset == 0, "Offset not zero at the point of scalar access",
7534 &I, MD, &Offset);
7535
7536 CheckTBAA(BaseNodeBitWidth == Offset.getBitWidth() ||
7537 (BaseNodeBitWidth == 0 && Offset == 0) ||
7538 (IsNewFormat && BaseNodeBitWidth == ~0u),
7539 "Access bit-width not the same as description bit-width", &I, MD,
7540 BaseNodeBitWidth, Offset.getBitWidth());
7541
7542 if (IsNewFormat && SeenAccessTypeInPath)
7543 break;
7544 }
7545
7546 CheckTBAA(SeenAccessTypeInPath, "Did not see access type in access path!", &I,
7547 MD);
7548 return true;
7549}
7550
7551char VerifierLegacyPass::ID = 0;
7552INITIALIZE_PASS(VerifierLegacyPass, "verify", "Module Verifier", false, false)
7553
7555 return new VerifierLegacyPass(FatalErrors);
7556}
7557
7558AnalysisKey VerifierAnalysis::Key;
7561 Result Res;
7563 return Res;
7564}
7565
7568 return { llvm::verifyFunction(F, &dbgs()), false };
7569}
7570
7572 auto Res = AM.getResult<VerifierAnalysis>(M);
7573 if (FatalErrors && (Res.IRBroken || Res.DebugInfoBroken))
7574 report_fatal_error("Broken module found, compilation aborted!");
7575
7576 return PreservedAnalyses::all();
7577}
7578
7580 auto res = AM.getResult<VerifierAnalysis>(F);
7581 if (res.IRBroken && FatalErrors)
7582 report_fatal_error("Broken function found, compilation aborted!");
7583
7584 return PreservedAnalyses::all();
7585}
This file defines the StringMap class.
This file declares a class to represent arbitrary precision floating point values and provide a varie...
This file implements a class to represent arbitrary precision integral constant values and operations...
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
Atomic ordering constants.
@ RetAttr
Definition: Attributes.cpp:728
@ FnAttr
Definition: Attributes.cpp:726
This file contains the simple types necessary to represent the attributes associated with functions a...
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")
static GCRegistry::Add< StatepointGC > D("statepoint-example", "an example strategy for statepoint")
Analysis containing CSE Info
Definition: CSEInfo.cpp:27
This file contains the declarations for the subclasses of Constant, which represent the different fla...
This file declares the LLVM IR specialization of the GenericConvergenceVerifier template.
return RetTy
Returns the sub type a function will return at a given Idx Should correspond to the result type of an ExtractValue instruction executed with just that one unsigned Idx
This file defines the DenseMap class.
This file contains constants used for implementing Dwarf debug support.
std::string Name
uint64_t Size
static bool runOnFunction(Function &F, bool PostInlining)
Hexagon Common GEP
This file provides various utilities for inspecting and working with the control flow graph in LLVM I...
This defines the Use class.
#define F(x, y, z)
Definition: MD5.cpp:55
#define I(x, y, z)
Definition: MD5.cpp:58
This file implements a map that provides insertion order iteration.
This file provides utility for Memory Model Relaxation Annotations (MMRAs).
This file contains the declarations for metadata subclasses.
#define T1
Module.h This file contains the declarations for the Module class.
ConstantRange Range(APInt(BitWidth, Low), APInt(BitWidth, High))
uint64_t High
uint64_t IntrinsicInst * II
#define P(N)
ppc ctr loops verify
This header defines various interfaces for pass management in LLVM.
#define INITIALIZE_PASS(passName, arg, name, cfg, analysis)
Definition: PassSupport.h:38
This file builds on the ADT/GraphTraits.h file to build a generic graph post order iterator.
This file contains the declarations for profiling metadata utility functions.
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
static unsigned getNumElements(Type *Ty)
This file contains some templates that are useful if you are working with the STL at all.
verify safepoint Safepoint IR Verifier
raw_pwrite_stream & OS
This file defines the SmallPtrSet class.
This file defines the SmallSet class.
This file defines the SmallVector class.
This file contains some functions that are useful when dealing with strings.
static unsigned getBitWidth(Type *Ty, const DataLayout &DL)
Returns the bitwidth of the given scalar or pointer type.
static bool IsScalarTBAANodeImpl(const MDNode *MD, SmallPtrSetImpl< const MDNode * > &Visited)
Definition: Verifier.cpp:7357
static bool isType(const Metadata *MD)
Definition: Verifier.cpp:1125
static Instruction * getSuccPad(Instruction *Terminator)
Definition: Verifier.cpp:2682
#define Check(C,...)
We know that cond should be true, if not print an error message.
Definition: Verifier.cpp:658
static bool isNewFormatTBAATypeNode(llvm::MDNode *Type)
Definition: Verifier.cpp:7434
#define CheckDI(C,...)
We know that a debug info condition should be true, if not print an error message.
Definition: Verifier.cpp:668
static void forEachUser(const Value *User, SmallPtrSet< const Value *, 32 > &Visited, llvm::function_ref< bool(const Value *)> Callback)
Definition: Verifier.cpp:709
static bool isDINode(const Metadata *MD)
Definition: Verifier.cpp:1127
static bool isScope(const Metadata *MD)
Definition: Verifier.cpp:1126
static cl::opt< bool > VerifyNoAliasScopeDomination("verify-noalias-scope-decl-dom", cl::Hidden, cl::init(false), cl::desc("Ensure that llvm.experimental.noalias.scope.decl for identical " "scopes are not dominating"))
static DISubprogram * getSubprogram(Metadata *LocalScope)
Carefully grab the subprogram from a local scope.
Definition: Verifier.cpp:6373
static bool isTypeCongruent(Type *L, Type *R)
Two types are "congruent" if they are identical, or if they are both pointer types with different poi...
Definition: Verifier.cpp:3796
#define CheckTBAA(C,...)
Definition: Verifier.cpp:7212
static bool IsRootTBAANode(const MDNode *MD)
Definition: Verifier.cpp:7353
static bool isContiguous(const ConstantRange &A, const ConstantRange &B)
Definition: Verifier.cpp:4124
static Value * getParentPad(Value *EHPad)
Definition: Verifier.cpp:4368
static bool hasConflictingReferenceFlags(unsigned Flags)
Detect mutually exclusive flags.
Definition: Verifier.cpp:1270
static AttrBuilder getParameterABIAttributes(LLVMContext &C, unsigned I, AttributeList Attrs)
Definition: Verifier.cpp:3806
bool isFiniteNonZero() const
Definition: APFloat.h:1364
bool isNegative() const
Definition: APFloat.h:1354
const fltSemantics & getSemantics() const
Definition: APFloat.h:1362
Class for arbitrary precision integers.
Definition: APInt.h:78
bool sgt(const APInt &RHS) const
Signed greater than comparison.
Definition: APInt.h:1181
bool isZero() const
Determine if this value is zero, i.e. all bits are clear.
Definition: APInt.h:360
bool isMinValue() const
Determine if this is the smallest unsigned value.
Definition: APInt.h:397
bool ule(const APInt &RHS) const
Unsigned less or equal comparison.
Definition: APInt.h:1130
bool isPowerOf2() const
Check if this APInt's value is a power of two greater than zero.
Definition: APInt.h:420
bool isMaxValue() const
Determine if this is the largest unsigned value.
Definition: APInt.h:379
This class represents a conversion between pointers from one address space to another.
an instruction to allocate memory on the stack
Definition: Instructions.h:61
bool isSwiftError() const
Return true if this alloca is used as a swifterror argument to a call.
Definition: Instructions.h:147
bool isStaticAlloca() const
Return true if this alloca is in the entry block of the function and is a constant size.
Align getAlign() const
Return the alignment of the memory that is being allocated by the instruction.
Definition: Instructions.h:122
Type * getAllocatedType() const
Return the type that is being allocated by the instruction.
Definition: Instructions.h:115
bool isArrayAllocation() const
Return true if there is an allocation size parameter to the allocation instruction that is not 1.
const Value * getArraySize() const
Get the number of elements allocated.
Definition: Instructions.h:93
A container for analyses that lazily runs them and caches their results.
Definition: PassManager.h:253
PassT::Result & getResult(IRUnitT &IR, ExtraArgTs... ExtraArgs)
Get the result of an analysis pass for a given IR unit.
Definition: PassManager.h:405
Represent the analysis usage information of a pass.
void setPreservesAll()
Set by analyses that do not transform their input at all.
This class represents an incoming formal argument to a Function.
Definition: Argument.h:31
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
Definition: ArrayRef.h:41
bool empty() const
empty - Check if the array is empty.
Definition: ArrayRef.h:160
An instruction that atomically checks whether a specified value is in a memory location,...
Definition: Instructions.h:495
an instruction that atomically reads a memory location, combines it with another value,...
Definition: Instructions.h:696
static bool isFPOperation(BinOp Op)
Definition: Instructions.h:791
BinOp getOperation() const
Definition: Instructions.h:787
static StringRef getOperationName(BinOp Op)
AtomicOrdering getOrdering() const
Returns the ordering constraint of this rmw instruction.
Definition: Instructions.h:829
bool contains(Attribute::AttrKind A) const
Return true if the builder has the specified attribute.
Definition: AttributeMask.h:67
bool hasAttribute(Attribute::AttrKind Kind) const
Return true if the attribute exists in this set.
Definition: Attributes.cpp:909
std::string getAsString(bool InAttrGrp=false) const
Definition: Attributes.cpp:996
static Attribute::AttrKind getAttrKindFromName(StringRef AttrName)
Definition: Attributes.cpp:303
static bool canUseAsRetAttr(AttrKind Kind)
Definition: Attributes.cpp:749
static bool isExistingAttribute(StringRef Name)
Return true if the provided string matches the IR name of an attribute.
Definition: Attributes.cpp:326
static bool canUseAsFnAttr(AttrKind Kind)
Definition: Attributes.cpp:741
AttrKind
This enumeration lists the attributes that can be associated with parameters, function results,...
Definition: Attributes.h:86
@ None
No attributes have been set.
Definition: Attributes.h:88
static bool isIntAttrKind(AttrKind Kind)
Definition: Attributes.h:102
static bool canUseAsParamAttr(AttrKind Kind)
Definition: Attributes.cpp:745
bool isValid() const
Return true if the attribute is any kind of attribute.
Definition: Attributes.h:203
LLVM Basic Block Representation.
Definition: BasicBlock.h:61
iterator begin()
Instruction iterator methods.
Definition: BasicBlock.h:438
iterator_range< const_phi_iterator > phis() const
Returns a range that iterates over the phis in the basic block.
Definition: BasicBlock.h:507
const Instruction * getFirstNonPHI() const
Returns a pointer to the first instruction in this block that is not a PHINode instruction.
Definition: BasicBlock.cpp:365
const Instruction & front() const
Definition: BasicBlock.h:461
bool isEntryBlock() const
Return true if this is the entry block of the containing function.
Definition: BasicBlock.cpp:569
const BasicBlock * getUniquePredecessor() const
Return the predecessor of this block if it has a unique predecessor block.
Definition: BasicBlock.cpp:465
const Function * getParent() const
Return the enclosing method, or null if none.
Definition: BasicBlock.h:209
InstListType::iterator iterator
Instruction iterators...
Definition: BasicBlock.h:167
const Instruction * getTerminator() const LLVM_READONLY
Returns the terminator instruction if the block is well formed or null if the block is not well forme...
Definition: BasicBlock.h:229
This class represents a no-op cast from one type to another.
static BlockAddress * lookup(const BasicBlock *BB)
Lookup an existing BlockAddress constant for the given BasicBlock.
Definition: Constants.cpp:1889
Conditional or Unconditional Branch instruction.
bool isConditional() const
Value * getCondition() const
Base class for all callable instructions (InvokeInst and CallInst) Holds everything related to callin...
Definition: InstrTypes.h:1236
bool isInlineAsm() const
Check if this call is an inline asm statement.
Definition: InstrTypes.h:1532
Function * getCalledFunction() const
Returns the function called, or null if this is an indirect function invocation or the function signa...
Definition: InstrTypes.h:1465
CallingConv::ID getCallingConv() const
Definition: InstrTypes.h:1523
Value * getCalledOperand() const
Definition: InstrTypes.h:1458
Value * getArgOperand(unsigned i) const
Definition: InstrTypes.h:1410
FunctionType * getFunctionType() const
Definition: InstrTypes.h:1323
unsigned arg_size() const
Definition: InstrTypes.h:1408
AttributeList getAttributes() const
Return the parameter attributes for this call.
Definition: InstrTypes.h:1542
CallBr instruction, tracking function calls that may not return control but instead transfer it to a ...
This class represents a function call, abstracting a target machine's calling convention.
bool isMustTailCall() const
static bool castIsValid(Instruction::CastOps op, Type *SrcTy, Type *DstTy)
This method can be used to determine if a cast from SrcTy to DstTy using Opcode op is valid or not.
unsigned getNumHandlers() const
return the number of 'handlers' in this catchswitch instruction, except the default handler
Value * getParentPad() const
BasicBlock * getUnwindDest() const
handler_range handlers()
iteration adapter for range-for loops.
BasicBlock * getUnwindDest() const
bool isFPPredicate() const
Definition: InstrTypes.h:864
bool isIntPredicate() const
Definition: InstrTypes.h:865
static bool isIntPredicate(Predicate P)
Definition: InstrTypes.h:858
ConstantArray - Constant Array Declarations.
Definition: Constants.h:424
A constant value that is initialized with an expression using other constant values.
Definition: Constants.h:1084
ConstantFP - Floating Point Values [float, double].
Definition: Constants.h:269
This is the shared class of boolean and integer constants.
Definition: Constants.h:81
bool isMinusOne() const
This function will return true iff every bit in this constant is set to true.
Definition: Constants.h:218
bool isNegative() const
Definition: Constants.h:201
bool isZero() const
This is just a convenience method to make client code smaller for a common code.
Definition: Constants.h:206
unsigned getBitWidth() const
getBitWidth - Return the scalar bitwidth of this constant.
Definition: Constants.h:149
uint64_t getZExtValue() const
Return the constant as a 64-bit unsigned integer value after it has been zero extended as appropriate...
Definition: Constants.h:155
const APInt & getValue() const
Return the constant as an APInt value reference.
Definition: Constants.h:146
A signed pointer, in the ptrauth sense.
Definition: Constants.h:1012
Constant * getAddrDiscriminator() const
The address discriminator if any, or the null constant.
Definition: Constants.h:1050
Constant * getPointer() const
The pointer that is signed in this ptrauth signed pointer.
Definition: Constants.h:1037
ConstantInt * getKey() const
The Key ID, an i32 constant.
Definition: Constants.h:1040
ConstantInt * getDiscriminator() const
The integer discriminator, an i64 constant, or 0.
Definition: Constants.h:1043
static bool isOrderedRanges(ArrayRef< ConstantRange > RangesRef)
This class represents a range of values.
Definition: ConstantRange.h:47
uint32_t getBitWidth() const
Get the bit width of this ConstantRange.
static ConstantTokenNone * get(LLVMContext &Context)
Return the ConstantTokenNone.
Definition: Constants.cpp:1500
This is an important base class in LLVM.
Definition: Constant.h:42
bool isNullValue() const
Return true if this is the value that would be returned by getNullValue.
Definition: Constants.cpp:90
This is the common base class for constrained floating point intrinsics.
std::optional< fp::ExceptionBehavior > getExceptionBehavior() const
std::optional< RoundingMode > getRoundingMode() const
unsigned getNonMetadataArgCount() const
List of ValueAsMetadata, to be used as an argument to a dbg.value intrinsic.
Assignment ID.
Basic type, like 'int' or 'float'.
Debug common block.
Enumeration value.
DWARF expression.
bool isEntryValue() const
Check if the expression consists of exactly one entry value operand.
static std::optional< FragmentInfo > getFragmentInfo(expr_op_iterator Start, expr_op_iterator End)
Retrieve the details of this fragment expression.
A pair of DIGlobalVariable and DIExpression.
DIGlobalVariable * getVariable() const
DIExpression * getExpression() const
An imported module (C++ using directive or similar).
Debug lexical block.
A scope for locals.
DISubprogram * getSubprogram() const
Get the subprogram for this scope.
DILocalScope * getScope() const
Get the local scope for this variable.
Debug location.
Metadata * getRawScope() const
Represents a module in the programming language, for example, a Clang module, or a Fortran module.
Debug lexical block.
Base class for scope-like contexts.
String type, Fortran CHARACTER(n)
Subprogram description.
Array subrange.
Type array for a subprogram.
Base class for template parameters.
Base class for variables.
Metadata * getRawType() const
Metadata * getRawScope() const
This class represents an Operation in the Expression.
A parsed version of the target data layout string in and methods for querying it.
Definition: DataLayout.h:110
This represents the llvm.dbg.label instruction.
Metadata * getRawLabel() const
DILabel * getLabel() const
Records a position in IR for a source label (DILabel).
Base class for non-instruction debug metadata records that have positions within IR.
void print(raw_ostream &O, bool IsForDebug=false) const
DebugLoc getDebugLoc() const
const BasicBlock * getParent() const
This is the common base class for debug info intrinsics for variables.
Metadata * getRawLocation() const
DILocalVariable * getVariable() const
Metadata * getRawVariable() const
Metadata * getRawExpression() const
Record of a variable value-assignment, aka a non instruction representation of the dbg....
MDNode * getRawAddressExpression() const
DIExpression * getExpression() const
Value * getVariableLocationOp(unsigned OpIdx) const
DILocalVariable * getVariable() const
Metadata * getRawLocation() const
Returns the metadata operand for the first location description.
@ End
Marks the end of the concrete types.
@ Any
To indicate all LocationTypes in searches.
DIExpression * getAddressExpression() const
MDNode * getAsMDNode() const
Return this as a bar MDNode.
Definition: DebugLoc.h:106
ValueT lookup(const_arg_type_t< KeyT > Val) const
lookup - Return the entry for the specified key, or a default constructed value if no such entry exis...
Definition: DenseMap.h:202
iterator find(const_arg_type_t< KeyT > Val)
Definition: DenseMap.h:155
bool empty() const
Definition: DenseMap.h:98
std::pair< iterator, bool > insert(const std::pair< KeyT, ValueT > &KV)
Definition: DenseMap.h:220
Concrete subclass of DominatorTreeBase that is used to compute a normal dominator tree.
Definition: Dominators.h:162
This instruction extracts a single (scalar) element from a VectorType value.
static bool isValidOperands(const Value *Vec, const Value *Idx)
Return true if an extractelement instruction can be formed with the specified operands.
This instruction extracts a struct member or array element value from an aggregate value.
ArrayRef< unsigned > getIndices() const
static Type * getIndexedType(Type *Agg, ArrayRef< unsigned > Idxs)
Returns the type of the element that would be extracted with an extractvalue instruction with the spe...
This instruction compares its operands according to the predicate given to the constructor.
This class represents an extension of floating point types.
This class represents a cast from floating point to signed integer.
This class represents a cast from floating point to unsigned integer.
This class represents a truncation of floating point types.
An instruction for ordering other memory operations.
Definition: Instructions.h:420
AtomicOrdering getOrdering() const
Returns the ordering constraint of this fence instruction.
Definition: Instructions.h:443
Value * getParentPad() const
Convenience accessors.
Definition: InstrTypes.h:2449
FunctionPass class - This class is used to implement most global optimizations.
Definition: Pass.h:311
Intrinsic::ID getIntrinsicID() const LLVM_READONLY
getIntrinsicID - This method returns the ID number of the specified function, or Intrinsic::not_intri...
Definition: Function.h:242
bool hasPersonalityFn() const
Check whether this function has a personality function.
Definition: Function.h:868
bool isIntrinsic() const
isIntrinsic - Returns true if the function's name starts with "llvm.".
Definition: Function.h:247
const std::string & getGC() const
Definition: Function.cpp:814
Represents calls to the gc.relocate intrinsic.
Value * getBasePtr() const
Value * getDerivedPtr() const
void initialize(raw_ostream *OS, function_ref< void(const Twine &Message)> FailureCB, const FunctionT &F)
Generic tagged DWARF-like metadata node.
an instruction for type-safe pointer arithmetic to access elements of arrays and structs
Definition: Instructions.h:915
static Type * getIndexedType(Type *Ty, ArrayRef< Value * > IdxList)
Returns the result type of a getelementptr with the given source element type and indexes.
static bool isValidLinkage(LinkageTypes L)
Definition: GlobalAlias.h:95
const Constant * getAliasee() const
Definition: GlobalAlias.h:84
const Function * getResolverFunction() const
Definition: Globals.cpp:611
static FunctionType * getResolverFunctionType(Type *IFuncValTy)
Definition: GlobalIFunc.h:83
static bool isValidLinkage(LinkageTypes L)
Definition: GlobalIFunc.h:87
const Constant * getResolver() const
Definition: GlobalIFunc.h:70
bool hasComdat() const
Definition: GlobalObject.h:128
MDNode * getMetadata(unsigned KindID) const
Get the current metadata attachments for the given kind, if any.
Definition: Value.h:565
bool hasExternalLinkage() const
Definition: GlobalValue.h:511
bool isDSOLocal() const
Definition: GlobalValue.h:305
bool isImplicitDSOLocal() const
Definition: GlobalValue.h:298
bool isDeclaration() const
Return true if the primary definition of this global value is outside of the current translation unit...
Definition: Globals.cpp:290
bool hasValidDeclarationLinkage() const
Definition: GlobalValue.h:533
LinkageTypes getLinkage() const
Definition: GlobalValue.h:546
bool hasDefaultVisibility() const
Definition: GlobalValue.h:249
bool hasPrivateLinkage() const
Definition: GlobalValue.h:527
bool hasHiddenVisibility() const
Definition: GlobalValue.h:250
bool hasExternalWeakLinkage() const
Definition: GlobalValue.h:529
bool hasDLLImportStorageClass() const
Definition: GlobalValue.h:278
bool hasDLLExportStorageClass() const
Definition: GlobalValue.h:281
bool isDeclarationForLinker() const
Definition: GlobalValue.h:618
unsigned getAddressSpace() const
Definition: GlobalValue.h:205
Module * getParent()
Get the module that this global value is contained inside of...
Definition: GlobalValue.h:656
PointerType * getType() const
Global values are always pointers.
Definition: GlobalValue.h:294
bool hasComdat() const
Definition: GlobalValue.h:241
bool hasCommonLinkage() const
Definition: GlobalValue.h:532
bool hasGlobalUnnamedAddr() const
Definition: GlobalValue.h:215
bool hasAppendingLinkage() const
Definition: GlobalValue.h:525
bool hasAvailableExternallyLinkage() const
Definition: GlobalValue.h:512
Type * getValueType() const
Definition: GlobalValue.h:296
const Constant * getInitializer() const
getInitializer - Return the initializer for this global variable.
bool hasInitializer() const
Definitions have initializers, declarations don't.
bool isConstant() const
If the value is a global constant, its value is immutable throughout the runtime execution of the pro...
bool hasDefinitiveInitializer() const
hasDefinitiveInitializer - Whether the global variable has an initializer, and any other instances of...
This instruction compares its operands according to the predicate given to the constructor.
Indirect Branch Instruction.
BasicBlock * getDestination(unsigned i)
Return the specified destination.
unsigned getNumDestinations() const
return the number of possible destinations in this indirectbr instruction.
unsigned getNumSuccessors() const
This instruction inserts a single (scalar) element into a VectorType value.
static bool isValidOperands(const Value *Vec, const Value *NewElt, const Value *Idx)
Return true if an insertelement instruction can be formed with the specified operands.
This instruction inserts a struct field of array element value into an aggregate value.
Value * getAggregateOperand()
ArrayRef< unsigned > getIndices() const
Base class for instruction visitors.
Definition: InstVisitor.h:78
RetTy visitTerminator(Instruction &I)
Definition: InstVisitor.h:253
RetTy visitCallBase(CallBase &I)
Definition: InstVisitor.h:267
void visitFunction(Function &F)
Definition: InstVisitor.h:142
void visitBasicBlock(BasicBlock &BB)
Definition: InstVisitor.h:143
void visit(Iterator Start, Iterator End)
Definition: InstVisitor.h:87
RetTy visitFuncletPadInst(FuncletPadInst &I)
Definition: InstVisitor.h:197
void visitInstruction(Instruction &I)
Definition: InstVisitor.h:280
unsigned getNumSuccessors() const LLVM_READONLY
Return the number of successors that this instruction has.
const DebugLoc & getDebugLoc() const
Return the debug location for this node as a DebugLoc.
Definition: Instruction.h:466
bool isAtomic() const LLVM_READONLY
Return true if this instruction has an AtomicOrdering of unordered or higher.
const Function * getFunction() const
Return the function this instruction belongs to.
Definition: Instruction.cpp:70
This class represents a cast from an integer to a pointer.
A wrapper class for inspecting calls to intrinsic functions.
Definition: IntrinsicInst.h:48
static bool mayLowerToFunctionCall(Intrinsic::ID IID)
Check if the intrinsic might lower into a regular function call in the course of IR transformations.
Intrinsic::ID getIntrinsicID() const
Return the intrinsic ID of this intrinsic.
Definition: IntrinsicInst.h:55
Invoke instruction.
This is an important class for using LLVM in a threaded context.
Definition: LLVMContext.h:67
The landingpad instruction holds all of the information necessary to generate correct exception handl...
bool isCleanup() const
Return 'true' if this landingpad instruction is a cleanup.
unsigned getNumClauses() const
Get the number of clauses for this landing pad.
bool isCatch(unsigned Idx) const
Return 'true' if the clause and index Idx is a catch clause.
bool isFilter(unsigned Idx) const
Return 'true' if the clause and index Idx is a filter clause.
Constant * getClause(unsigned Idx) const
Get the value of the clause at index Idx.
An instruction for reading from memory.
Definition: Instructions.h:174
AtomicOrdering getOrdering() const
Returns the ordering constraint of this load instruction.
Definition: Instructions.h:218
SyncScope::ID getSyncScopeID() const
Returns the synchronization scope ID of this load instruction.
Definition: Instructions.h:228
Align getAlign() const
Return the alignment of the access that is being performed.
Definition: Instructions.h:209
Metadata node.
Definition: Metadata.h:1067
const MDOperand & getOperand(unsigned I) const
Definition: Metadata.h:1428
bool isTemporary() const
Definition: Metadata.h:1251
ArrayRef< MDOperand > operands() const
Definition: Metadata.h:1426
unsigned getNumOperands() const
Return number of MDNode operands.
Definition: Metadata.h:1434
bool isDistinct() const
Definition: Metadata.h:1250
bool isResolved() const
Check if node is fully resolved.
Definition: Metadata.h:1247
LLVMContext & getContext() const
Definition: Metadata.h:1231
Tracking metadata reference owned by Metadata.
Definition: Metadata.h:889
Metadata * get() const
Definition: Metadata.h:918
A single uniqued string.
Definition: Metadata.h:720
StringRef getString() const
Definition: Metadata.cpp:610
Typed, array-like tuple of metadata.
Definition: Metadata.h:1627
Tuple of metadata.
Definition: Metadata.h:1470
static bool isTagMD(const Metadata *MD)
This class implements a map that also provides access to all stored values in a deterministic order.
Definition: MapVector.h:36
void clear()
Definition: MapVector.h:88
Metadata wrapper in the Value hierarchy.
Definition: Metadata.h:176
static MetadataAsValue * getIfExists(LLVMContext &Context, Metadata *MD)
Definition: Metadata.cpp:111
Metadata * getMetadata() const
Definition: Metadata.h:193
Root of the metadata hierarchy.
Definition: Metadata.h:62
void print(raw_ostream &OS, const Module *M=nullptr, bool IsForDebug=false) const
Print.
Definition: AsmWriter.cpp:5221
unsigned getMetadataID() const
Definition: Metadata.h:102
Manage lifetime of a slot tracker for printing IR.
A Module instance is used to store all the information related to an LLVM module.
Definition: Module.h:65
ModFlagBehavior
This enumeration defines the supported behaviors of module flags.
Definition: Module.h:115
@ AppendUnique
Appends the two values, which are required to be metadata nodes.
Definition: Module.h:144
@ Override
Uses the specified value, regardless of the behavior or value of the other module.
Definition: Module.h:136
@ Warning
Emits a warning if two values disagree.
Definition: Module.h:122
@ Error
Emits an error if two values disagree, otherwise the resulting value is that of the operands.
Definition: Module.h:118
@ Min
Takes the min of the two values, which are required to be integers.
Definition: Module.h:150
@ Append
Appends the two values, which are required to be metadata nodes.
Definition: Module.h:139
@ Max
Takes the max of the two values, which are required to be integers.
Definition: Module.h:147
@ Require
Adds a requirement that another module flag be present and have a specified value after linking is pe...
Definition: Module.h:131
const std::string & getModuleIdentifier() const
Get the module identifier which is, essentially, the name of the module.
Definition: Module.h:267
static bool isValidModFlagBehavior(Metadata *MD, ModFlagBehavior &MFB)
Checks if Metadata represents a valid ModFlagBehavior, and stores the converted result in MFB.
Definition: Module.cpp:288
A tuple of MDNodes.
Definition: Metadata.h:1729
StringRef getName() const
Definition: Metadata.cpp:1398
void print(raw_ostream &ROS, bool IsForDebug=false) const
Definition: AsmWriter.cpp:4882
iterator_range< op_iterator > operands()
Definition: Metadata.h:1825
op_range incoming_values()
static PassRegistry * getPassRegistry()
getPassRegistry - Access the global registry object, which is automatically initialized at applicatio...
static PointerType * get(Type *ElementType, unsigned AddressSpace)
This constructs a pointer to an object of the specified type in a numbered address space.
A set of analyses that are preserved following a run of a transformation pass.
Definition: Analysis.h:111
static PreservedAnalyses all()
Construct a special preserved set that preserves all passes.
Definition: Analysis.h:117
Simple wrapper around std::function<void(raw_ostream&)>.
Definition: Printable.h:38
This class represents a cast from a pointer to an integer.
Interface for looking up the initializer for a variable name, used by Init::resolveReferences.
Definition: Record.h:2213
Resume the propagation of an exception.
Value * getValue() const
Convenience accessor.
Return a value (possibly void), from a function.
This class represents a sign extension of integer types.
This class represents a cast from signed integer to floating point.
This class represents the LLVM 'select' instruction.
static const char * areInvalidOperands(Value *Cond, Value *True, Value *False)
Return a string if the specified operands are invalid for a select operation, otherwise return null.
This instruction constructs a fixed permutation of two input vectors.
static bool isValidOperands(const Value *V1, const Value *V2, const Value *Mask)
Return true if a shufflevector instruction can be formed with the specified operands.
static void getShuffleMask(const Constant *Mask, SmallVectorImpl< int > &Result)
Convert the input shuffle mask operand to a vector of integers.
A templated base class for SmallPtrSet which provides the typesafe interface that is common across al...
Definition: SmallPtrSet.h:323
size_type count(ConstPtrType Ptr) const
count - Return 1 if the specified pointer is in the set, 0 otherwise.
Definition: SmallPtrSet.h:412
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
Definition: SmallPtrSet.h:344
SmallPtrSet - This class implements a set which is optimized for holding SmallSize or less elements.
Definition: SmallPtrSet.h:479
SmallSet - This maintains a set of unique values, optimizing for the case when the set is small (less...
Definition: SmallSet.h:135
std::pair< const_iterator, bool > insert(const T &V)
insert - Insert an element into the set if it isn't already there.
Definition: SmallSet.h:179
SmallString - A SmallString is just a SmallVector with methods and accessors that make it work better...
Definition: SmallString.h:26
bool empty() const
Definition: SmallVector.h:94
size_t size() const
Definition: SmallVector.h:91
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
Definition: SmallVector.h:586
void reserve(size_type N)
Definition: SmallVector.h:676
iterator insert(iterator I, T &&Elt)
Definition: SmallVector.h:818
void resize(size_type N)
Definition: SmallVector.h:651
void push_back(const T &Elt)
Definition: SmallVector.h:426
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
Definition: SmallVector.h:1209
An instruction for storing to memory.
Definition: Instructions.h:290
StringMapEntry - This is used to represent one value that is inserted into a StringMap.
StringRef - Represent a constant reference to a string, i.e.
Definition: StringRef.h:50
bool getAsInteger(unsigned Radix, T &Result) const
Parse the current string as an integer of the specified radix.
Definition: StringRef.h:455
bool starts_with(StringRef Prefix) const
Check if this string starts with the given Prefix.
Definition: StringRef.h:250
bool contains(StringRef Other) const
Return true if the given string is a substring of *this, and false otherwise.
Definition: StringRef.h:409
static constexpr size_t npos
Definition: StringRef.h:52
Class to represent struct types.
Definition: DerivedTypes.h:216
unsigned getNumElements() const
Random access to the elements.
Definition: DerivedTypes.h:341
bool containsScalableVectorType(SmallPtrSetImpl< Type * > *Visited=nullptr) const
Returns true if this struct contains a scalable vector.
Definition: Type.cpp:400
Type * getTypeAtIndex(const Value *V) const
Given an index value into the type, return the type of the element.
Definition: Type.cpp:612
Multiway switch.
Verify that the TBAA Metadatas are valid.
Definition: Verifier.h:39
bool visitTBAAMetadata(Instruction &I, const MDNode *MD)
Visit an instruction and return true if it is valid, return false if an invalid TBAA is attached.
Definition: Verifier.cpp:7443
@ CanBeGlobal
This type may be used as the value type of a global variable.
Definition: DerivedTypes.h:771
TinyPtrVector - This class is specialized for cases where there are normally 0 or 1 element in a vect...
Definition: TinyPtrVector.h:29
unsigned size() const
Triple - Helper class for working with autoconf configuration names.
Definition: Triple.h:44
This class represents a truncation of integer types.
Twine - A lightweight data structure for efficiently representing the concatenation of temporary valu...
Definition: Twine.h:81
The instances of the Type class are immutable: once they are created, they are never changed.
Definition: Type.h:45
bool isVectorTy() const
True if this is an instance of VectorType.
Definition: Type.h:265
PointerType * getPointerTo(unsigned AddrSpace=0) const
Return a pointer to the current type.
bool isArrayTy() const
True if this is an instance of ArrayType.
Definition: Type.h:252
bool isLabelTy() const
Return true if this is 'label'.
Definition: Type.h:219
bool isIntOrIntVectorTy() const
Return true if this is an integer type or a vector of integer types.
Definition: Type.h:234
bool isPointerTy() const
True if this is an instance of PointerType.
Definition: Type.h:255
unsigned getPointerAddressSpace() const
Get the address space of this pointer or pointer vector type.
unsigned getScalarSizeInBits() const LLVM_READONLY
If this is a vector type, return the getPrimitiveSizeInBits value for the element type.
bool isSized(SmallPtrSetImpl< Type * > *Visited=nullptr) const
Return true if it makes sense to take the size of this type.
Definition: Type.h:302
bool isFloatingPointTy() const
Return true if this is one of the floating-point types.
Definition: Type.h:185
bool isPtrOrPtrVectorTy() const
Return true if this is a pointer type or a vector of pointer types.
Definition: Type.h:262
bool isScalableTy() const
Return true if this is a type whose size is a known multiple of vscale.
bool canLosslesslyBitCastTo(Type *Ty) const
Return true if this type could be converted with a lossless BitCast to type 'Ty'.
bool isIntOrPtrTy() const
Return true if this is an integer type or a pointer type.
Definition: Type.h:243
bool isIntegerTy() const
True if this is an instance of IntegerType.
Definition: Type.h:228
bool isTokenTy() const
Return true if this is 'token'.
Definition: Type.h:225
bool isFPOrFPVectorTy() const
Return true if this is a FP type or a vector of FP.
Definition: Type.h:216
Type * getScalarType() const
If this is a vector type, return the element type, otherwise return 'this'.
Definition: Type.h:348
bool isMetadataTy() const
Return true if this is 'metadata'.
Definition: Type.h:222
This class represents a cast unsigned integer to floating point.
A Use represents the edge between a Value definition and its users.
Definition: Use.h:43
op_range operands()
Definition: User.h:242
Value * getOperand(unsigned i) const
Definition: User.h:169
unsigned getNumOperands() const
Definition: User.h:191
This class represents the va_arg llvm instruction, which returns an argument of the specified type gi...
This is the common base class for vector predication intrinsics.
Value wrapper in the Metadata hierarchy.
Definition: Metadata.h:450
Value * getValue() const
Definition: Metadata.h:490
LLVM Value Representation.
Definition: Value.h:74
iterator_range< user_iterator > materialized_users()
Definition: Value.h:415
Type * getType() const
All values are typed, get the type of this value.
Definition: Value.h:255
static constexpr uint64_t MaximumAlignment
Definition: Value.h:807
const Value * stripPointerCastsAndAliases() const
Strip off pointer casts, all-zero GEPs, address space casts, and aliases.
Definition: Value.cpp:698
const Value * stripInBoundsOffsets(function_ref< void(const Value *)> Func=[](const Value *) {}) const
Strip off pointer casts and inbounds GEPs.
Definition: Value.cpp:786
iterator_range< user_iterator > users()
Definition: Value.h:421
bool materialized_use_empty() const
Definition: Value.h:349
LLVMContext & getContext() const
All values hold a context through their type.
Definition: Value.cpp:1075
bool hasName() const
Definition: Value.h:261
StringRef getName() const
Return a constant reference to the value's name.
Definition: Value.cpp:309
Check a module for errors, and report separate error states for IR and debug info errors.
Definition: Verifier.h:107
Result run(Module &M, ModuleAnalysisManager &)
Definition: Verifier.cpp:7559
PreservedAnalyses run(Module &M, ModuleAnalysisManager &AM)
Definition: Verifier.cpp:7571
This class represents zero extension of integer types.
constexpr bool isNonZero() const
Definition: TypeSize.h:158
constexpr bool isScalable() const
Returns whether the quantity is scaled by a runtime quantity (vscale).
Definition: TypeSize.h:171
constexpr ScalarTy getKnownMinValue() const
Returns the minimum value this quantity can represent.
Definition: TypeSize.h:168
An efficient, type-erasing, non-owning reference to a callable.
const ParentTy * getParent() const
Definition: ilist_node.h:32
NodeTy * getNextNode()
Get the next node, or nullptr for the list tail.
Definition: ilist_node.h:353
This class implements an extremely fast bulk output stream that can only output to a stream.
Definition: raw_ostream.h:52
This file contains the declaration of the Comdat class, which represents a single COMDAT in LLVM.
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
constexpr char Args[]
Key for Kernel::Metadata::mArgs.
constexpr char Attrs[]
Key for Kernel::Metadata::mAttrs.
AttributeMask typeIncompatible(Type *Ty, AttributeSafetyKind ASK=ASK_ALL)
Which attributes cannot be applied to a type.
constexpr std::underlying_type_t< E > Mask()
Get a bitmask with 1s in all places up to the high-order bit of E's largest value.
Definition: BitmaskEnum.h:121
@ Entry
Definition: COFF.h:811
@ AMDGPU_CS
Used for Mesa/AMDPAL compute shaders.
Definition: CallingConv.h:197
@ AMDGPU_VS
Used for Mesa vertex shaders, or AMDPAL last shader stage before rasterization (vertex shader if tess...
Definition: CallingConv.h:188
@ AMDGPU_KERNEL
Used for AMDGPU code object kernels.
Definition: CallingConv.h:200
@ AnyReg
OBSOLETED - Used for stack based JavaScript calls.
Definition: CallingConv.h:60
@ AMDGPU_CS_ChainPreserve
Used on AMDGPUs to give the middle-end more control over argument placement.
Definition: CallingConv.h:249
@ AMDGPU_HS
Used for Mesa/AMDPAL hull shaders (= tessellation control shaders).
Definition: CallingConv.h:206
@ AMDGPU_GS
Used for Mesa/AMDPAL geometry shaders.
Definition: CallingConv.h:191
@ X86_INTR
x86 hardware interrupt context.
Definition: CallingConv.h:173
@ AMDGPU_CS_Chain
Used on AMDGPUs to give the middle-end more control over argument placement.
Definition: CallingConv.h:245
@ AMDGPU_PS
Used for Mesa/AMDPAL pixel shaders.
Definition: CallingConv.h:194
@ Cold
Attempts to make code in the caller as efficient as possible under the assumption that the call is no...
Definition: CallingConv.h:47
@ PTX_Device
Call to a PTX device function.
Definition: CallingConv.h:129
@ SPIR_KERNEL
Used for SPIR kernel functions.
Definition: CallingConv.h:144
@ Fast
Attempts to make calls as fast as possible (e.g.
Definition: CallingConv.h:41
@ Intel_OCL_BI
Used for Intel OpenCL built-ins.
Definition: CallingConv.h:147
@ Tail
Attemps to make calls as fast as possible while guaranteeing that tail call optimization can always b...
Definition: CallingConv.h:76
@ PTX_Kernel
Call to a PTX kernel. Passes all arguments in parameter space.
Definition: CallingConv.h:125
@ SwiftTail
This follows the Swift calling convention in how arguments are passed but guarantees tail calls will ...
Definition: CallingConv.h:87
@ C
The default llvm calling convention, compatible with C.
Definition: CallingConv.h:34
unsigned ID
LLVM IR allows to use arbitrary numbers as calling convention identifiers.
Definition: CallingConv.h:24
MatchIntrinsicTypesResult matchIntrinsicSignature(FunctionType *FTy, ArrayRef< IITDescriptor > &Infos, SmallVectorImpl< Type * > &ArgTys)
Match the specified function type with the type constraints specified by the .td file.
Definition: Function.cpp:1764
void getIntrinsicInfoTableEntries(ID id, SmallVectorImpl< IITDescriptor > &T)
Return the IIT table descriptor for the specified intrinsic into an array of IITDescriptors.
Definition: Function.cpp:1357
@ MatchIntrinsicTypes_NoMatchRet
Definition: Intrinsics.h:218
@ MatchIntrinsicTypes_NoMatchArg
Definition: Intrinsics.h:219
bool hasConstrainedFPRoundingModeOperand(ID QID)
Returns true if the intrinsic ID is for one of the "Constrained Floating-Point Intrinsics" that take ...
Definition: Function.cpp:1545
StringRef getName(ID id)
Return the LLVM name for an intrinsic, such as "llvm.ppc.altivec.lvx".
Definition: Function.cpp:1071
static const int NoAliasScopeDeclScopeArg
Definition: Intrinsics.h:37
bool matchIntrinsicVarArg(bool isVarArg, ArrayRef< IITDescriptor > &Infos)
Verify if the intrinsic has variable arguments.
Definition: Function.cpp:1790
Flag
These should be considered private to the implementation of the MCInstrDesc class.
Definition: MCInstrDesc.h:148
@ System
Synchronized with respect to all concurrently executing threads.
Definition: LLVMContext.h:57
std::optional< VFInfo > tryDemangleForVFABI(StringRef MangledName, const FunctionType *FTy)
Function to construct a VFInfo out of a mangled names in the following format:
@ CE
Windows NT (Windows on ARM)
AssignmentInstRange getAssignmentInsts(DIAssignID *ID)
Return a range of instructions (typically just one) that have ID as an attachment.
Definition: DebugInfo.cpp:1796
initializer< Ty > init(const Ty &Val)
Definition: CommandLine.h:443
@ DW_MACINFO_undef
Definition: Dwarf.h:790
@ DW_MACINFO_start_file
Definition: Dwarf.h:791
@ DW_MACINFO_define
Definition: Dwarf.h:789
This is an optimization pass for GlobalISel generic memory operations.
Definition: AddressRanges.h:18
@ Low
Lower the current thread's priority such that it does not affect foreground tasks significantly.
@ Offset
Definition: DWP.cpp:480
bool all_of(R &&range, UnaryPredicate P)
Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly.
Definition: STLExtras.h:1722
bool canInstructionHaveMMRAs(const Instruction &I)
unsigned getBranchWeightOffset(const MDNode *ProfileData)
Return the offset to the first branch weight data.
auto enumerate(FirstRange &&First, RestRanges &&...Rest)
Given two or more input ranges, returns a new range whose values are are tuples (A,...
Definition: STLExtras.h:2400
bool verifyFunction(const Function &F, raw_ostream *OS=nullptr)
Check a function for errors, useful for use when debugging a pass.
Definition: Verifier.cpp:7128
AllocFnKind
Definition: Attributes.h:49
testing::Matcher< const detail::ErrorHolder & > Failed()
Definition: Error.h:198
void initializeVerifierLegacyPassPass(PassRegistry &)
iterator_range< T > make_range(T x, T y)
Convenience function for iterating over sub-ranges.
void append_range(Container &C, Range &&R)
Wrapper function to append range R to container C.
Definition: STLExtras.h:2067
DenseMap< BasicBlock *, ColorVector > colorEHFunclets(Function &F)
If an EH funclet personality is in use (see isFuncletEHPersonality), this will recompute which blocks...
constexpr bool isPowerOf2_64(uint64_t Value)
Return true if the argument is a power of two > 0 (64 bit edition.)
Definition: MathExtras.h:296
bool isScopedEHPersonality(EHPersonality Pers)
Returns true if this personality uses scope-style EH IR instructions: catchswitch,...
bool any_of(R &&range, UnaryPredicate P)
Provide wrappers to std::any_of which take ranges instead of having to pass begin/end explicitly.
Definition: STLExtras.h:1729
constexpr bool isPowerOf2_32(uint32_t Value)
Return true if the argument is a power of two > 0.
Definition: MathExtras.h:291
bool isModSet(const ModRefInfo MRI)
Definition: ModRef.h:48
void sort(IteratorTy Start, IteratorTy End)
Definition: STLExtras.h:1647
raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
Definition: Debug.cpp:163
void report_fatal_error(Error Err, bool gen_crash_diag=true)
Report a serious error, calling any installed error handler.
Definition: Error.cpp:167
EHPersonality classifyEHPersonality(const Value *Pers)
See if the given exception handling personality function is one that we understand.
raw_fd_ostream & errs()
This returns a reference to a raw_ostream for standard error.
unsigned getNumBranchWeights(const MDNode &ProfileData)
AtomicOrdering
Atomic ordering for LLVM's memory model.
@ ArgMem
Access to memory via argument pointers.
@ First
Helpers to iterate all locations in the MemoryEffectsBase class.
FunctionPass * createVerifierPass(bool FatalErrors=true)
Definition: Verifier.cpp:7554
@ Dynamic
Denotes mode unknown at compile time.
@ MaskAll
A bitmask that includes all valid flags.
constexpr unsigned BitWidth
Definition: BitmaskEnum.h:191
std::optional< RoundingMode > convertStrToRoundingMode(StringRef)
Returns a valid RoundingMode enumerator when given a string that is valid as input in constrained int...
Definition: FPEnv.cpp:24
std::unique_ptr< GCStrategy > getGCStrategy(const StringRef Name)
Lookup the GCStrategy object associated with the given gc name.
Definition: GCStrategy.cpp:24
auto predecessors(const MachineBasicBlock *BB)
bool is_contained(R &&Range, const E &Element)
Returns true if Element is found in Range.
Definition: STLExtras.h:1879
bool pred_empty(const BasicBlock *BB)
Definition: CFG.h:118
bool verifyModule(const Module &M, raw_ostream *OS=nullptr, bool *BrokenDebugInfo=nullptr)
Check a module for errors.
Definition: Verifier.cpp:7139
#define N
static const fltSemantics & IEEEsingle() LLVM_READNONE
Definition: APFloat.cpp:276
This struct is a compact representation of a valid (non-zero power of two) alignment.
Definition: Alignment.h:39
uint64_t value() const
This is a hole in the type system and should not be abused.
Definition: Alignment.h:85
A special type used by analysis passes to provide an address that identifies that particular analysis...
Definition: Analysis.h:28
Description of the encoding of one expression Op.
This struct is a compact representation of a valid (power of two) or undefined (0) alignment.
Definition: Alignment.h:117
A lightweight accessor for an operand bundle meant to be passed around by value.
Definition: InstrTypes.h:1131
uint32_t getTagID() const
Return the tag of this operand bundle as an integer.
Definition: InstrTypes.h:1159
ArrayRef< Use > Inputs
Definition: InstrTypes.h:1132
void DebugInfoCheckFailed(const Twine &Message)
A debug info check failed.
Definition: Verifier.cpp:304
VerifierSupport(raw_ostream *OS, const Module &M)
Definition: Verifier.cpp:156
bool Broken
Track the brokenness of the module while recursively visiting.
Definition: Verifier.cpp:150
raw_ostream * OS
Definition: Verifier.cpp:142
void CheckFailed(const Twine &Message, const T1 &V1, const Ts &... Vs)
A check failed (with values to print).
Definition: Verifier.cpp:297
bool BrokenDebugInfo
Broken debug info can be "recovered" from by stripping the debug info.
Definition: Verifier.cpp:152
LLVMContext & Context
Definition: Verifier.cpp:147
bool TreatBrokenDebugInfoAsError
Whether to treat broken debug info as an error.
Definition: Verifier.cpp:154
void CheckFailed(const Twine &Message)
A check failed, so printout out the condition and the message.
Definition: Verifier.cpp:286
const Module & M
Definition: Verifier.cpp:143
const DataLayout & DL
Definition: Verifier.cpp:146
void DebugInfoCheckFailed(const Twine &Message, const T1 &V1, const Ts &... Vs)
A debug info check failed (with values to print).
Definition: Verifier.cpp:313
ModuleSlotTracker MST
Definition: Verifier.cpp:144