LLVM 20.0.0git
Verifier.cpp
Go to the documentation of this file.
1//===-- Verifier.cpp - Implement the Module Verifier -----------------------==//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file defines the function verifier interface, that can be used for some
10// basic correctness checking of input to the system.
11//
12// Note that this does not provide full `Java style' security and verifications,
13// instead it just tries to ensure that code is well-formed.
14//
15// * Both of a binary operator's parameters are of the same type
16// * Verify that the indices of mem access instructions match other operands
17// * Verify that arithmetic and other things are only performed on first-class
18// types. Verify that shifts & logicals only happen on integrals f.e.
19// * All of the constants in a switch statement are of the correct type
20// * The code is in valid SSA form
21// * It should be illegal to put a label into any other type (like a structure)
22// or to return one. [except constant arrays!]
23// * Only phi nodes can be self referential: 'add i32 %0, %0 ; <int>:0' is bad
24// * PHI nodes must have an entry for each predecessor, with no extras.
25// * PHI nodes must be the first thing in a basic block, all grouped together
26// * All basic blocks should only end with terminator insts, not contain them
27// * The entry node to a function must not have predecessors
28// * All Instructions must be embedded into a basic block
29// * Functions cannot take a void-typed parameter
30// * Verify that a function's argument list agrees with it's declared type.
31// * It is illegal to specify a name for a void value.
32// * It is illegal to have a internal global value with no initializer
33// * It is illegal to have a ret instruction that returns a value that does not
34// agree with the function return value type.
35// * Function call argument types match the function prototype
36// * A landing pad is defined by a landingpad instruction, and can be jumped to
37// only by the unwind edge of an invoke instruction.
38// * A landingpad instruction must be the first non-PHI instruction in the
39// block.
40// * Landingpad instructions must be in a function with a personality function.
41// * Convergence control intrinsics are introduced in ConvergentOperations.rst.
42// The applied restrictions are too numerous to list here.
43// * The convergence entry intrinsic and the loop heart must be the first
44// non-PHI instruction in their respective block. This does not conflict with
45// the landing pads, since these two kinds cannot occur in the same block.
46// * All other things that are tested by asserts spread about the code...
47//
48//===----------------------------------------------------------------------===//
49
50#include "llvm/IR/Verifier.h"
51#include "llvm/ADT/APFloat.h"
52#include "llvm/ADT/APInt.h"
53#include "llvm/ADT/ArrayRef.h"
54#include "llvm/ADT/DenseMap.h"
55#include "llvm/ADT/MapVector.h"
57#include "llvm/ADT/STLExtras.h"
59#include "llvm/ADT/SmallSet.h"
62#include "llvm/ADT/StringMap.h"
63#include "llvm/ADT/StringRef.h"
64#include "llvm/ADT/Twine.h"
66#include "llvm/IR/Argument.h"
68#include "llvm/IR/Attributes.h"
69#include "llvm/IR/BasicBlock.h"
70#include "llvm/IR/CFG.h"
71#include "llvm/IR/CallingConv.h"
72#include "llvm/IR/Comdat.h"
73#include "llvm/IR/Constant.h"
76#include "llvm/IR/Constants.h"
78#include "llvm/IR/DataLayout.h"
79#include "llvm/IR/DebugInfo.h"
81#include "llvm/IR/DebugLoc.h"
83#include "llvm/IR/Dominators.h"
85#include "llvm/IR/Function.h"
86#include "llvm/IR/GCStrategy.h"
87#include "llvm/IR/GlobalAlias.h"
88#include "llvm/IR/GlobalValue.h"
90#include "llvm/IR/InlineAsm.h"
91#include "llvm/IR/InstVisitor.h"
92#include "llvm/IR/InstrTypes.h"
93#include "llvm/IR/Instruction.h"
96#include "llvm/IR/Intrinsics.h"
97#include "llvm/IR/IntrinsicsAArch64.h"
98#include "llvm/IR/IntrinsicsAMDGPU.h"
99#include "llvm/IR/IntrinsicsARM.h"
100#include "llvm/IR/IntrinsicsNVPTX.h"
101#include "llvm/IR/IntrinsicsWebAssembly.h"
102#include "llvm/IR/LLVMContext.h"
104#include "llvm/IR/Metadata.h"
105#include "llvm/IR/Module.h"
107#include "llvm/IR/PassManager.h"
109#include "llvm/IR/Statepoint.h"
110#include "llvm/IR/Type.h"
111#include "llvm/IR/Use.h"
112#include "llvm/IR/User.h"
114#include "llvm/IR/Value.h"
116#include "llvm/Pass.h"
118#include "llvm/Support/Casting.h"
122#include "llvm/Support/ModRef.h"
124#include <algorithm>
125#include <cassert>
126#include <cstdint>
127#include <memory>
128#include <optional>
129#include <string>
130#include <utility>
131
132using namespace llvm;
133
135 "verify-noalias-scope-decl-dom", cl::Hidden, cl::init(false),
136 cl::desc("Ensure that llvm.experimental.noalias.scope.decl for identical "
137 "scopes are not dominating"));
138
139namespace llvm {
140
143 const Module &M;
148
149 /// Track the brokenness of the module while recursively visiting.
150 bool Broken = false;
151 /// Broken debug info can be "recovered" from by stripping the debug info.
152 bool BrokenDebugInfo = false;
153 /// Whether to treat broken debug info as an error.
155
157 : OS(OS), M(M), MST(&M), TT(Triple::normalize(M.getTargetTriple())),
158 DL(M.getDataLayout()), Context(M.getContext()) {}
159
160private:
161 void Write(const Module *M) {
162 *OS << "; ModuleID = '" << M->getModuleIdentifier() << "'\n";
163 }
164
165 void Write(const Value *V) {
166 if (V)
167 Write(*V);
168 }
169
170 void Write(const Value &V) {
171 if (isa<Instruction>(V)) {
172 V.print(*OS, MST);
173 *OS << '\n';
174 } else {
175 V.printAsOperand(*OS, true, MST);
176 *OS << '\n';
177 }
178 }
179
180 void Write(const DbgRecord *DR) {
181 if (DR) {
182 DR->print(*OS, MST, false);
183 *OS << '\n';
184 }
185 }
186
188 switch (Type) {
190 *OS << "value";
191 break;
193 *OS << "declare";
194 break;
196 *OS << "assign";
197 break;
199 *OS << "end";
200 break;
202 *OS << "any";
203 break;
204 };
205 }
206
207 void Write(const Metadata *MD) {
208 if (!MD)
209 return;
210 MD->print(*OS, MST, &M);
211 *OS << '\n';
212 }
213
214 template <class T> void Write(const MDTupleTypedArrayWrapper<T> &MD) {
215 Write(MD.get());
216 }
217
218 void Write(const NamedMDNode *NMD) {
219 if (!NMD)
220 return;
221 NMD->print(*OS, MST);
222 *OS << '\n';
223 }
224
225 void Write(Type *T) {
226 if (!T)
227 return;
228 *OS << ' ' << *T;
229 }
230
231 void Write(const Comdat *C) {
232 if (!C)
233 return;
234 *OS << *C;
235 }
236
237 void Write(const APInt *AI) {
238 if (!AI)
239 return;
240 *OS << *AI << '\n';
241 }
242
243 void Write(const unsigned i) { *OS << i << '\n'; }
244
245 // NOLINTNEXTLINE(readability-identifier-naming)
246 void Write(const Attribute *A) {
247 if (!A)
248 return;
249 *OS << A->getAsString() << '\n';
250 }
251
252 // NOLINTNEXTLINE(readability-identifier-naming)
253 void Write(const AttributeSet *AS) {
254 if (!AS)
255 return;
256 *OS << AS->getAsString() << '\n';
257 }
258
259 // NOLINTNEXTLINE(readability-identifier-naming)
260 void Write(const AttributeList *AL) {
261 if (!AL)
262 return;
263 AL->print(*OS);
264 }
265
266 void Write(Printable P) { *OS << P << '\n'; }
267
268 template <typename T> void Write(ArrayRef<T> Vs) {
269 for (const T &V : Vs)
270 Write(V);
271 }
272
273 template <typename T1, typename... Ts>
274 void WriteTs(const T1 &V1, const Ts &... Vs) {
275 Write(V1);
276 WriteTs(Vs...);
277 }
278
279 template <typename... Ts> void WriteTs() {}
280
281public:
282 /// A check failed, so printout out the condition and the message.
283 ///
284 /// This provides a nice place to put a breakpoint if you want to see why
285 /// something is not correct.
286 void CheckFailed(const Twine &Message) {
287 if (OS)
288 *OS << Message << '\n';
289 Broken = true;
290 }
291
292 /// A check failed (with values to print).
293 ///
294 /// This calls the Message-only version so that the above is easier to set a
295 /// breakpoint on.
296 template <typename T1, typename... Ts>
297 void CheckFailed(const Twine &Message, const T1 &V1, const Ts &... Vs) {
298 CheckFailed(Message);
299 if (OS)
300 WriteTs(V1, Vs...);
301 }
302
303 /// A debug info check failed.
304 void DebugInfoCheckFailed(const Twine &Message) {
305 if (OS)
306 *OS << Message << '\n';
308 BrokenDebugInfo = true;
309 }
310
311 /// A debug info check failed (with values to print).
312 template <typename T1, typename... Ts>
313 void DebugInfoCheckFailed(const Twine &Message, const T1 &V1,
314 const Ts &... Vs) {
315 DebugInfoCheckFailed(Message);
316 if (OS)
317 WriteTs(V1, Vs...);
318 }
319};
320
321} // namespace llvm
322
323namespace {
324
325class Verifier : public InstVisitor<Verifier>, VerifierSupport {
326 friend class InstVisitor<Verifier>;
327 DominatorTree DT;
328
329 /// When verifying a basic block, keep track of all of the
330 /// instructions we have seen so far.
331 ///
332 /// This allows us to do efficient dominance checks for the case when an
333 /// instruction has an operand that is an instruction in the same block.
334 SmallPtrSet<Instruction *, 16> InstsInThisBlock;
335
336 /// Keep track of the metadata nodes that have been checked already.
338
339 /// Keep track which DISubprogram is attached to which function.
341
342 /// Track all DICompileUnits visited.
344
345 /// The result type for a landingpad.
346 Type *LandingPadResultTy;
347
348 /// Whether we've seen a call to @llvm.localescape in this function
349 /// already.
350 bool SawFrameEscape;
351
352 /// Whether the current function has a DISubprogram attached to it.
353 bool HasDebugInfo = false;
354
355 /// Stores the count of how many objects were passed to llvm.localescape for a
356 /// given function and the largest index passed to llvm.localrecover.
358
359 // Maps catchswitches and cleanuppads that unwind to siblings to the
360 // terminators that indicate the unwind, used to detect cycles therein.
362
363 /// Cache which blocks are in which funclet, if an EH funclet personality is
364 /// in use. Otherwise empty.
365 DenseMap<BasicBlock *, ColorVector> BlockEHFuncletColors;
366
367 /// Cache of constants visited in search of ConstantExprs.
368 SmallPtrSet<const Constant *, 32> ConstantExprVisited;
369
370 /// Cache of declarations of the llvm.experimental.deoptimize.<ty> intrinsic.
371 SmallVector<const Function *, 4> DeoptimizeDeclarations;
372
373 /// Cache of attribute lists verified.
374 SmallPtrSet<const void *, 32> AttributeListsVisited;
375
376 // Verify that this GlobalValue is only used in this module.
377 // This map is used to avoid visiting uses twice. We can arrive at a user
378 // twice, if they have multiple operands. In particular for very large
379 // constant expressions, we can arrive at a particular user many times.
380 SmallPtrSet<const Value *, 32> GlobalValueVisited;
381
382 // Keeps track of duplicate function argument debug info.
384
385 TBAAVerifier TBAAVerifyHelper;
386 ConvergenceVerifier ConvergenceVerifyHelper;
387
388 SmallVector<IntrinsicInst *, 4> NoAliasScopeDecls;
389
390 void checkAtomicMemAccessSize(Type *Ty, const Instruction *I);
391
392public:
393 explicit Verifier(raw_ostream *OS, bool ShouldTreatBrokenDebugInfoAsError,
394 const Module &M)
395 : VerifierSupport(OS, M), LandingPadResultTy(nullptr),
396 SawFrameEscape(false), TBAAVerifyHelper(this) {
397 TreatBrokenDebugInfoAsError = ShouldTreatBrokenDebugInfoAsError;
398 }
399
400 bool hasBrokenDebugInfo() const { return BrokenDebugInfo; }
401
402 bool verify(const Function &F) {
403 assert(F.getParent() == &M &&
404 "An instance of this class only works with a specific module!");
405
406 // First ensure the function is well-enough formed to compute dominance
407 // information, and directly compute a dominance tree. We don't rely on the
408 // pass manager to provide this as it isolates us from a potentially
409 // out-of-date dominator tree and makes it significantly more complex to run
410 // this code outside of a pass manager.
411 // FIXME: It's really gross that we have to cast away constness here.
412 if (!F.empty())
413 DT.recalculate(const_cast<Function &>(F));
414
415 for (const BasicBlock &BB : F) {
416 if (!BB.empty() && BB.back().isTerminator())
417 continue;
418
419 if (OS) {
420 *OS << "Basic Block in function '" << F.getName()
421 << "' does not have terminator!\n";
422 BB.printAsOperand(*OS, true, MST);
423 *OS << "\n";
424 }
425 return false;
426 }
427
428 auto FailureCB = [this](const Twine &Message) {
429 this->CheckFailed(Message);
430 };
431 ConvergenceVerifyHelper.initialize(OS, FailureCB, F);
432
433 Broken = false;
434 // FIXME: We strip const here because the inst visitor strips const.
435 visit(const_cast<Function &>(F));
436 verifySiblingFuncletUnwinds();
437
438 if (ConvergenceVerifyHelper.sawTokens())
439 ConvergenceVerifyHelper.verify(DT);
440
441 InstsInThisBlock.clear();
442 DebugFnArgs.clear();
443 LandingPadResultTy = nullptr;
444 SawFrameEscape = false;
445 SiblingFuncletInfo.clear();
446 verifyNoAliasScopeDecl();
447 NoAliasScopeDecls.clear();
448
449 return !Broken;
450 }
451
452 /// Verify the module that this instance of \c Verifier was initialized with.
453 bool verify() {
454 Broken = false;
455
456 // Collect all declarations of the llvm.experimental.deoptimize intrinsic.
457 for (const Function &F : M)
458 if (F.getIntrinsicID() == Intrinsic::experimental_deoptimize)
459 DeoptimizeDeclarations.push_back(&F);
460
461 // Now that we've visited every function, verify that we never asked to
462 // recover a frame index that wasn't escaped.
463 verifyFrameRecoverIndices();
464 for (const GlobalVariable &GV : M.globals())
465 visitGlobalVariable(GV);
466
467 for (const GlobalAlias &GA : M.aliases())
468 visitGlobalAlias(GA);
469
470 for (const GlobalIFunc &GI : M.ifuncs())
471 visitGlobalIFunc(GI);
472
473 for (const NamedMDNode &NMD : M.named_metadata())
474 visitNamedMDNode(NMD);
475
476 for (const StringMapEntry<Comdat> &SMEC : M.getComdatSymbolTable())
477 visitComdat(SMEC.getValue());
478
479 visitModuleFlags();
480 visitModuleIdents();
481 visitModuleCommandLines();
482
483 verifyCompileUnits();
484
485 verifyDeoptimizeCallingConvs();
486 DISubprogramAttachments.clear();
487 return !Broken;
488 }
489
490private:
491 /// Whether a metadata node is allowed to be, or contain, a DILocation.
492 enum class AreDebugLocsAllowed { No, Yes };
493
494 // Verification methods...
495 void visitGlobalValue(const GlobalValue &GV);
496 void visitGlobalVariable(const GlobalVariable &GV);
497 void visitGlobalAlias(const GlobalAlias &GA);
498 void visitGlobalIFunc(const GlobalIFunc &GI);
499 void visitAliaseeSubExpr(const GlobalAlias &A, const Constant &C);
500 void visitAliaseeSubExpr(SmallPtrSetImpl<const GlobalAlias *> &Visited,
501 const GlobalAlias &A, const Constant &C);
502 void visitNamedMDNode(const NamedMDNode &NMD);
503 void visitMDNode(const MDNode &MD, AreDebugLocsAllowed AllowLocs);
504 void visitMetadataAsValue(const MetadataAsValue &MD, Function *F);
505 void visitValueAsMetadata(const ValueAsMetadata &MD, Function *F);
506 void visitDIArgList(const DIArgList &AL, Function *F);
507 void visitComdat(const Comdat &C);
508 void visitModuleIdents();
509 void visitModuleCommandLines();
510 void visitModuleFlags();
511 void visitModuleFlag(const MDNode *Op,
513 SmallVectorImpl<const MDNode *> &Requirements);
514 void visitModuleFlagCGProfileEntry(const MDOperand &MDO);
515 void visitFunction(const Function &F);
516 void visitBasicBlock(BasicBlock &BB);
517 void verifyRangeMetadata(const Value &V, const MDNode *Range, Type *Ty,
518 bool IsAbsoluteSymbol);
519 void visitRangeMetadata(Instruction &I, MDNode *Range, Type *Ty);
520 void visitDereferenceableMetadata(Instruction &I, MDNode *MD);
521 void visitProfMetadata(Instruction &I, MDNode *MD);
522 void visitCallStackMetadata(MDNode *MD);
523 void visitMemProfMetadata(Instruction &I, MDNode *MD);
524 void visitCallsiteMetadata(Instruction &I, MDNode *MD);
525 void visitDIAssignIDMetadata(Instruction &I, MDNode *MD);
526 void visitMMRAMetadata(Instruction &I, MDNode *MD);
527 void visitAnnotationMetadata(MDNode *Annotation);
528 void visitAliasScopeMetadata(const MDNode *MD);
529 void visitAliasScopeListMetadata(const MDNode *MD);
530 void visitAccessGroupMetadata(const MDNode *MD);
531
532 template <class Ty> bool isValidMetadataArray(const MDTuple &N);
533#define HANDLE_SPECIALIZED_MDNODE_LEAF(CLASS) void visit##CLASS(const CLASS &N);
534#include "llvm/IR/Metadata.def"
535 void visitDIScope(const DIScope &N);
536 void visitDIVariable(const DIVariable &N);
537 void visitDILexicalBlockBase(const DILexicalBlockBase &N);
538 void visitDITemplateParameter(const DITemplateParameter &N);
539
540 void visitTemplateParams(const MDNode &N, const Metadata &RawParams);
541
542 void visit(DbgLabelRecord &DLR);
543 void visit(DbgVariableRecord &DVR);
544 // InstVisitor overrides...
546 void visitDbgRecords(Instruction &I);
547 void visit(Instruction &I);
548
549 void visitTruncInst(TruncInst &I);
550 void visitZExtInst(ZExtInst &I);
551 void visitSExtInst(SExtInst &I);
552 void visitFPTruncInst(FPTruncInst &I);
553 void visitFPExtInst(FPExtInst &I);
554 void visitFPToUIInst(FPToUIInst &I);
555 void visitFPToSIInst(FPToSIInst &I);
556 void visitUIToFPInst(UIToFPInst &I);
557 void visitSIToFPInst(SIToFPInst &I);
558 void visitIntToPtrInst(IntToPtrInst &I);
559 void visitPtrToIntInst(PtrToIntInst &I);
560 void visitBitCastInst(BitCastInst &I);
561 void visitAddrSpaceCastInst(AddrSpaceCastInst &I);
562 void visitPHINode(PHINode &PN);
563 void visitCallBase(CallBase &Call);
564 void visitUnaryOperator(UnaryOperator &U);
565 void visitBinaryOperator(BinaryOperator &B);
566 void visitICmpInst(ICmpInst &IC);
567 void visitFCmpInst(FCmpInst &FC);
568 void visitExtractElementInst(ExtractElementInst &EI);
569 void visitInsertElementInst(InsertElementInst &EI);
570 void visitShuffleVectorInst(ShuffleVectorInst &EI);
571 void visitVAArgInst(VAArgInst &VAA) { visitInstruction(VAA); }
572 void visitCallInst(CallInst &CI);
573 void visitInvokeInst(InvokeInst &II);
574 void visitGetElementPtrInst(GetElementPtrInst &GEP);
575 void visitLoadInst(LoadInst &LI);
576 void visitStoreInst(StoreInst &SI);
577 void verifyDominatesUse(Instruction &I, unsigned i);
578 void visitInstruction(Instruction &I);
579 void visitTerminator(Instruction &I);
580 void visitBranchInst(BranchInst &BI);
581 void visitReturnInst(ReturnInst &RI);
582 void visitSwitchInst(SwitchInst &SI);
583 void visitIndirectBrInst(IndirectBrInst &BI);
584 void visitCallBrInst(CallBrInst &CBI);
585 void visitSelectInst(SelectInst &SI);
586 void visitUserOp1(Instruction &I);
587 void visitUserOp2(Instruction &I) { visitUserOp1(I); }
588 void visitIntrinsicCall(Intrinsic::ID ID, CallBase &Call);
589 void visitConstrainedFPIntrinsic(ConstrainedFPIntrinsic &FPI);
590 void visitVPIntrinsic(VPIntrinsic &VPI);
591 void visitDbgIntrinsic(StringRef Kind, DbgVariableIntrinsic &DII);
592 void visitDbgLabelIntrinsic(StringRef Kind, DbgLabelInst &DLI);
593 void visitAtomicCmpXchgInst(AtomicCmpXchgInst &CXI);
594 void visitAtomicRMWInst(AtomicRMWInst &RMWI);
595 void visitFenceInst(FenceInst &FI);
596 void visitAllocaInst(AllocaInst &AI);
597 void visitExtractValueInst(ExtractValueInst &EVI);
598 void visitInsertValueInst(InsertValueInst &IVI);
599 void visitEHPadPredecessors(Instruction &I);
600 void visitLandingPadInst(LandingPadInst &LPI);
601 void visitResumeInst(ResumeInst &RI);
602 void visitCatchPadInst(CatchPadInst &CPI);
603 void visitCatchReturnInst(CatchReturnInst &CatchReturn);
604 void visitCleanupPadInst(CleanupPadInst &CPI);
605 void visitFuncletPadInst(FuncletPadInst &FPI);
606 void visitCatchSwitchInst(CatchSwitchInst &CatchSwitch);
607 void visitCleanupReturnInst(CleanupReturnInst &CRI);
608
609 void verifySwiftErrorCall(CallBase &Call, const Value *SwiftErrorVal);
610 void verifySwiftErrorValue(const Value *SwiftErrorVal);
611 void verifyTailCCMustTailAttrs(const AttrBuilder &Attrs, StringRef Context);
612 void verifyMustTailCall(CallInst &CI);
613 bool verifyAttributeCount(AttributeList Attrs, unsigned Params);
614 void verifyAttributeTypes(AttributeSet Attrs, const Value *V);
615 void verifyParameterAttrs(AttributeSet Attrs, Type *Ty, const Value *V);
616 void checkUnsignedBaseTenFuncAttr(AttributeList Attrs, StringRef Attr,
617 const Value *V);
618 void verifyFunctionAttrs(FunctionType *FT, AttributeList Attrs,
619 const Value *V, bool IsIntrinsic, bool IsInlineAsm);
620 void verifyFunctionMetadata(ArrayRef<std::pair<unsigned, MDNode *>> MDs);
621
622 void visitConstantExprsRecursively(const Constant *EntryC);
623 void visitConstantExpr(const ConstantExpr *CE);
624 void visitConstantPtrAuth(const ConstantPtrAuth *CPA);
625 void verifyInlineAsmCall(const CallBase &Call);
626 void verifyStatepoint(const CallBase &Call);
627 void verifyFrameRecoverIndices();
628 void verifySiblingFuncletUnwinds();
629
630 void verifyFragmentExpression(const DbgVariableIntrinsic &I);
631 void verifyFragmentExpression(const DbgVariableRecord &I);
632 template <typename ValueOrMetadata>
633 void verifyFragmentExpression(const DIVariable &V,
635 ValueOrMetadata *Desc);
636 void verifyFnArgs(const DbgVariableIntrinsic &I);
637 void verifyFnArgs(const DbgVariableRecord &DVR);
638 void verifyNotEntryValue(const DbgVariableIntrinsic &I);
639 void verifyNotEntryValue(const DbgVariableRecord &I);
640
641 /// Module-level debug info verification...
642 void verifyCompileUnits();
643
644 /// Module-level verification that all @llvm.experimental.deoptimize
645 /// declarations share the same calling convention.
646 void verifyDeoptimizeCallingConvs();
647
648 void verifyAttachedCallBundle(const CallBase &Call,
649 const OperandBundleUse &BU);
650
651 /// Verify the llvm.experimental.noalias.scope.decl declarations
652 void verifyNoAliasScopeDecl();
653};
654
655} // end anonymous namespace
656
657/// We know that cond should be true, if not print an error message.
658#define Check(C, ...) \
659 do { \
660 if (!(C)) { \
661 CheckFailed(__VA_ARGS__); \
662 return; \
663 } \
664 } while (false)
665
666/// We know that a debug info condition should be true, if not print
667/// an error message.
668#define CheckDI(C, ...) \
669 do { \
670 if (!(C)) { \
671 DebugInfoCheckFailed(__VA_ARGS__); \
672 return; \
673 } \
674 } while (false)
675
676void Verifier::visitDbgRecords(Instruction &I) {
677 if (!I.DebugMarker)
678 return;
679 CheckDI(I.DebugMarker->MarkedInstr == &I,
680 "Instruction has invalid DebugMarker", &I);
681 CheckDI(!isa<PHINode>(&I) || !I.hasDbgRecords(),
682 "PHI Node must not have any attached DbgRecords", &I);
683 for (DbgRecord &DR : I.getDbgRecordRange()) {
684 CheckDI(DR.getMarker() == I.DebugMarker,
685 "DbgRecord had invalid DebugMarker", &I, &DR);
686 if (auto *Loc =
687 dyn_cast_or_null<DILocation>(DR.getDebugLoc().getAsMDNode()))
688 visitMDNode(*Loc, AreDebugLocsAllowed::Yes);
689 if (auto *DVR = dyn_cast<DbgVariableRecord>(&DR)) {
690 visit(*DVR);
691 // These have to appear after `visit` for consistency with existing
692 // intrinsic behaviour.
693 verifyFragmentExpression(*DVR);
694 verifyNotEntryValue(*DVR);
695 } else if (auto *DLR = dyn_cast<DbgLabelRecord>(&DR)) {
696 visit(*DLR);
697 }
698 }
699}
700
701void Verifier::visit(Instruction &I) {
702 visitDbgRecords(I);
703 for (unsigned i = 0, e = I.getNumOperands(); i != e; ++i)
704 Check(I.getOperand(i) != nullptr, "Operand is null", &I);
706}
707
708// Helper to iterate over indirect users. By returning false, the callback can ask to stop traversing further.
709static void forEachUser(const Value *User,
711 llvm::function_ref<bool(const Value *)> Callback) {
712 if (!Visited.insert(User).second)
713 return;
714
717 while (!WorkList.empty()) {
718 const Value *Cur = WorkList.pop_back_val();
719 if (!Visited.insert(Cur).second)
720 continue;
721 if (Callback(Cur))
722 append_range(WorkList, Cur->materialized_users());
723 }
724}
725
726void Verifier::visitGlobalValue(const GlobalValue &GV) {
728 "Global is external, but doesn't have external or weak linkage!", &GV);
729
730 if (const GlobalObject *GO = dyn_cast<GlobalObject>(&GV)) {
731
732 if (MaybeAlign A = GO->getAlign()) {
733 Check(A->value() <= Value::MaximumAlignment,
734 "huge alignment values are unsupported", GO);
735 }
736
737 if (const MDNode *Associated =
738 GO->getMetadata(LLVMContext::MD_associated)) {
739 Check(Associated->getNumOperands() == 1,
740 "associated metadata must have one operand", &GV, Associated);
741 const Metadata *Op = Associated->getOperand(0).get();
742 Check(Op, "associated metadata must have a global value", GO, Associated);
743
744 const auto *VM = dyn_cast_or_null<ValueAsMetadata>(Op);
745 Check(VM, "associated metadata must be ValueAsMetadata", GO, Associated);
746 if (VM) {
747 Check(isa<PointerType>(VM->getValue()->getType()),
748 "associated value must be pointer typed", GV, Associated);
749
750 const Value *Stripped = VM->getValue()->stripPointerCastsAndAliases();
751 Check(isa<GlobalObject>(Stripped) || isa<Constant>(Stripped),
752 "associated metadata must point to a GlobalObject", GO, Stripped);
753 Check(Stripped != GO,
754 "global values should not associate to themselves", GO,
755 Associated);
756 }
757 }
758
759 // FIXME: Why is getMetadata on GlobalValue protected?
760 if (const MDNode *AbsoluteSymbol =
761 GO->getMetadata(LLVMContext::MD_absolute_symbol)) {
762 verifyRangeMetadata(*GO, AbsoluteSymbol, DL.getIntPtrType(GO->getType()),
763 true);
764 }
765 }
766
767 Check(!GV.hasAppendingLinkage() || isa<GlobalVariable>(GV),
768 "Only global variables can have appending linkage!", &GV);
769
770 if (GV.hasAppendingLinkage()) {
771 const GlobalVariable *GVar = dyn_cast<GlobalVariable>(&GV);
772 Check(GVar && GVar->getValueType()->isArrayTy(),
773 "Only global arrays can have appending linkage!", GVar);
774 }
775
776 if (GV.isDeclarationForLinker())
777 Check(!GV.hasComdat(), "Declaration may not be in a Comdat!", &GV);
778
779 if (GV.hasDLLExportStorageClass()) {
781 "dllexport GlobalValue must have default or protected visibility",
782 &GV);
783 }
784 if (GV.hasDLLImportStorageClass()) {
786 "dllimport GlobalValue must have default visibility", &GV);
787 Check(!GV.isDSOLocal(), "GlobalValue with DLLImport Storage is dso_local!",
788 &GV);
789
790 Check((GV.isDeclaration() &&
793 "Global is marked as dllimport, but not external", &GV);
794 }
795
796 if (GV.isImplicitDSOLocal())
797 Check(GV.isDSOLocal(),
798 "GlobalValue with local linkage or non-default "
799 "visibility must be dso_local!",
800 &GV);
801
802 forEachUser(&GV, GlobalValueVisited, [&](const Value *V) -> bool {
803 if (const Instruction *I = dyn_cast<Instruction>(V)) {
804 if (!I->getParent() || !I->getParent()->getParent())
805 CheckFailed("Global is referenced by parentless instruction!", &GV, &M,
806 I);
807 else if (I->getParent()->getParent()->getParent() != &M)
808 CheckFailed("Global is referenced in a different module!", &GV, &M, I,
809 I->getParent()->getParent(),
810 I->getParent()->getParent()->getParent());
811 return false;
812 } else if (const Function *F = dyn_cast<Function>(V)) {
813 if (F->getParent() != &M)
814 CheckFailed("Global is used by function in a different module", &GV, &M,
815 F, F->getParent());
816 return false;
817 }
818 return true;
819 });
820}
821
822void Verifier::visitGlobalVariable(const GlobalVariable &GV) {
823 if (GV.hasInitializer()) {
825 "Global variable initializer type does not match global "
826 "variable type!",
827 &GV);
828 // If the global has common linkage, it must have a zero initializer and
829 // cannot be constant.
830 if (GV.hasCommonLinkage()) {
832 "'common' global must have a zero initializer!", &GV);
833 Check(!GV.isConstant(), "'common' global may not be marked constant!",
834 &GV);
835 Check(!GV.hasComdat(), "'common' global may not be in a Comdat!", &GV);
836 }
837 }
838
839 if (GV.hasName() && (GV.getName() == "llvm.global_ctors" ||
840 GV.getName() == "llvm.global_dtors")) {
842 "invalid linkage for intrinsic global variable", &GV);
844 "invalid uses of intrinsic global variable", &GV);
845
846 // Don't worry about emitting an error for it not being an array,
847 // visitGlobalValue will complain on appending non-array.
848 if (ArrayType *ATy = dyn_cast<ArrayType>(GV.getValueType())) {
849 StructType *STy = dyn_cast<StructType>(ATy->getElementType());
850 PointerType *FuncPtrTy =
851 PointerType::get(Context, DL.getProgramAddressSpace());
852 Check(STy && (STy->getNumElements() == 2 || STy->getNumElements() == 3) &&
853 STy->getTypeAtIndex(0u)->isIntegerTy(32) &&
854 STy->getTypeAtIndex(1) == FuncPtrTy,
855 "wrong type for intrinsic global variable", &GV);
856 Check(STy->getNumElements() == 3,
857 "the third field of the element type is mandatory, "
858 "specify ptr null to migrate from the obsoleted 2-field form");
859 Type *ETy = STy->getTypeAtIndex(2);
860 Check(ETy->isPointerTy(), "wrong type for intrinsic global variable",
861 &GV);
862 }
863 }
864
865 if (GV.hasName() && (GV.getName() == "llvm.used" ||
866 GV.getName() == "llvm.compiler.used")) {
868 "invalid linkage for intrinsic global variable", &GV);
870 "invalid uses of intrinsic global variable", &GV);
871
872 Type *GVType = GV.getValueType();
873 if (ArrayType *ATy = dyn_cast<ArrayType>(GVType)) {
874 PointerType *PTy = dyn_cast<PointerType>(ATy->getElementType());
875 Check(PTy, "wrong type for intrinsic global variable", &GV);
876 if (GV.hasInitializer()) {
877 const Constant *Init = GV.getInitializer();
878 const ConstantArray *InitArray = dyn_cast<ConstantArray>(Init);
879 Check(InitArray, "wrong initalizer for intrinsic global variable",
880 Init);
881 for (Value *Op : InitArray->operands()) {
882 Value *V = Op->stripPointerCasts();
883 Check(isa<GlobalVariable>(V) || isa<Function>(V) ||
884 isa<GlobalAlias>(V),
885 Twine("invalid ") + GV.getName() + " member", V);
886 Check(V->hasName(),
887 Twine("members of ") + GV.getName() + " must be named", V);
888 }
889 }
890 }
891 }
892
893 // Visit any debug info attachments.
895 GV.getMetadata(LLVMContext::MD_dbg, MDs);
896 for (auto *MD : MDs) {
897 if (auto *GVE = dyn_cast<DIGlobalVariableExpression>(MD))
898 visitDIGlobalVariableExpression(*GVE);
899 else
900 CheckDI(false, "!dbg attachment of global variable must be a "
901 "DIGlobalVariableExpression");
902 }
903
904 // Scalable vectors cannot be global variables, since we don't know
905 // the runtime size.
907 "Globals cannot contain scalable types", &GV);
908
909 // Check if it's a target extension type that disallows being used as a
910 // global.
911 if (auto *TTy = dyn_cast<TargetExtType>(GV.getValueType()))
912 Check(TTy->hasProperty(TargetExtType::CanBeGlobal),
913 "Global @" + GV.getName() + " has illegal target extension type",
914 TTy);
915
916 if (!GV.hasInitializer()) {
917 visitGlobalValue(GV);
918 return;
919 }
920
921 // Walk any aggregate initializers looking for bitcasts between address spaces
922 visitConstantExprsRecursively(GV.getInitializer());
923
924 visitGlobalValue(GV);
925}
926
927void Verifier::visitAliaseeSubExpr(const GlobalAlias &GA, const Constant &C) {
929 Visited.insert(&GA);
930 visitAliaseeSubExpr(Visited, GA, C);
931}
932
933void Verifier::visitAliaseeSubExpr(SmallPtrSetImpl<const GlobalAlias*> &Visited,
934 const GlobalAlias &GA, const Constant &C) {
936 Check(isa<GlobalValue>(C) &&
937 cast<GlobalValue>(C).hasAvailableExternallyLinkage(),
938 "available_externally alias must point to available_externally "
939 "global value",
940 &GA);
941 }
942 if (const auto *GV = dyn_cast<GlobalValue>(&C)) {
944 Check(!GV->isDeclarationForLinker(), "Alias must point to a definition",
945 &GA);
946 }
947
948 if (const auto *GA2 = dyn_cast<GlobalAlias>(GV)) {
949 Check(Visited.insert(GA2).second, "Aliases cannot form a cycle", &GA);
950
951 Check(!GA2->isInterposable(),
952 "Alias cannot point to an interposable alias", &GA);
953 } else {
954 // Only continue verifying subexpressions of GlobalAliases.
955 // Do not recurse into global initializers.
956 return;
957 }
958 }
959
960 if (const auto *CE = dyn_cast<ConstantExpr>(&C))
961 visitConstantExprsRecursively(CE);
962
963 for (const Use &U : C.operands()) {
964 Value *V = &*U;
965 if (const auto *GA2 = dyn_cast<GlobalAlias>(V))
966 visitAliaseeSubExpr(Visited, GA, *GA2->getAliasee());
967 else if (const auto *C2 = dyn_cast<Constant>(V))
968 visitAliaseeSubExpr(Visited, GA, *C2);
969 }
970}
971
972void Verifier::visitGlobalAlias(const GlobalAlias &GA) {
974 "Alias should have private, internal, linkonce, weak, linkonce_odr, "
975 "weak_odr, external, or available_externally linkage!",
976 &GA);
977 const Constant *Aliasee = GA.getAliasee();
978 Check(Aliasee, "Aliasee cannot be NULL!", &GA);
979 Check(GA.getType() == Aliasee->getType(),
980 "Alias and aliasee types should match!", &GA);
981
982 Check(isa<GlobalValue>(Aliasee) || isa<ConstantExpr>(Aliasee),
983 "Aliasee should be either GlobalValue or ConstantExpr", &GA);
984
985 visitAliaseeSubExpr(GA, *Aliasee);
986
987 visitGlobalValue(GA);
988}
989
990void Verifier::visitGlobalIFunc(const GlobalIFunc &GI) {
992 "IFunc should have private, internal, linkonce, weak, linkonce_odr, "
993 "weak_odr, or external linkage!",
994 &GI);
995 // Pierce through ConstantExprs and GlobalAliases and check that the resolver
996 // is a Function definition.
998 Check(Resolver, "IFunc must have a Function resolver", &GI);
999 Check(!Resolver->isDeclarationForLinker(),
1000 "IFunc resolver must be a definition", &GI);
1001
1002 // Check that the immediate resolver operand (prior to any bitcasts) has the
1003 // correct type.
1004 const Type *ResolverTy = GI.getResolver()->getType();
1005
1006 Check(isa<PointerType>(Resolver->getFunctionType()->getReturnType()),
1007 "IFunc resolver must return a pointer", &GI);
1008
1009 Check(ResolverTy == PointerType::get(Context, GI.getAddressSpace()),
1010 "IFunc resolver has incorrect type", &GI);
1011}
1012
1013void Verifier::visitNamedMDNode(const NamedMDNode &NMD) {
1014 // There used to be various other llvm.dbg.* nodes, but we don't support
1015 // upgrading them and we want to reserve the namespace for future uses.
1016 if (NMD.getName().starts_with("llvm.dbg."))
1017 CheckDI(NMD.getName() == "llvm.dbg.cu",
1018 "unrecognized named metadata node in the llvm.dbg namespace", &NMD);
1019 for (const MDNode *MD : NMD.operands()) {
1020 if (NMD.getName() == "llvm.dbg.cu")
1021 CheckDI(MD && isa<DICompileUnit>(MD), "invalid compile unit", &NMD, MD);
1022
1023 if (!MD)
1024 continue;
1025
1026 visitMDNode(*MD, AreDebugLocsAllowed::Yes);
1027 }
1028}
1029
1030void Verifier::visitMDNode(const MDNode &MD, AreDebugLocsAllowed AllowLocs) {
1031 // Only visit each node once. Metadata can be mutually recursive, so this
1032 // avoids infinite recursion here, as well as being an optimization.
1033 if (!MDNodes.insert(&MD).second)
1034 return;
1035
1036 Check(&MD.getContext() == &Context,
1037 "MDNode context does not match Module context!", &MD);
1038
1039 switch (MD.getMetadataID()) {
1040 default:
1041 llvm_unreachable("Invalid MDNode subclass");
1042 case Metadata::MDTupleKind:
1043 break;
1044#define HANDLE_SPECIALIZED_MDNODE_LEAF(CLASS) \
1045 case Metadata::CLASS##Kind: \
1046 visit##CLASS(cast<CLASS>(MD)); \
1047 break;
1048#include "llvm/IR/Metadata.def"
1049 }
1050
1051 for (const Metadata *Op : MD.operands()) {
1052 if (!Op)
1053 continue;
1054 Check(!isa<LocalAsMetadata>(Op), "Invalid operand for global metadata!",
1055 &MD, Op);
1056 CheckDI(!isa<DILocation>(Op) || AllowLocs == AreDebugLocsAllowed::Yes,
1057 "DILocation not allowed within this metadata node", &MD, Op);
1058 if (auto *N = dyn_cast<MDNode>(Op)) {
1059 visitMDNode(*N, AllowLocs);
1060 continue;
1061 }
1062 if (auto *V = dyn_cast<ValueAsMetadata>(Op)) {
1063 visitValueAsMetadata(*V, nullptr);
1064 continue;
1065 }
1066 }
1067
1068 // Check these last, so we diagnose problems in operands first.
1069 Check(!MD.isTemporary(), "Expected no forward declarations!", &MD);
1070 Check(MD.isResolved(), "All nodes should be resolved!", &MD);
1071}
1072
1073void Verifier::visitValueAsMetadata(const ValueAsMetadata &MD, Function *F) {
1074 Check(MD.getValue(), "Expected valid value", &MD);
1075 Check(!MD.getValue()->getType()->isMetadataTy(),
1076 "Unexpected metadata round-trip through values", &MD, MD.getValue());
1077
1078 auto *L = dyn_cast<LocalAsMetadata>(&MD);
1079 if (!L)
1080 return;
1081
1082 Check(F, "function-local metadata used outside a function", L);
1083
1084 // If this was an instruction, bb, or argument, verify that it is in the
1085 // function that we expect.
1086 Function *ActualF = nullptr;
1087 if (Instruction *I = dyn_cast<Instruction>(L->getValue())) {
1088 Check(I->getParent(), "function-local metadata not in basic block", L, I);
1089 ActualF = I->getParent()->getParent();
1090 } else if (BasicBlock *BB = dyn_cast<BasicBlock>(L->getValue()))
1091 ActualF = BB->getParent();
1092 else if (Argument *A = dyn_cast<Argument>(L->getValue()))
1093 ActualF = A->getParent();
1094 assert(ActualF && "Unimplemented function local metadata case!");
1095
1096 Check(ActualF == F, "function-local metadata used in wrong function", L);
1097}
1098
1099void Verifier::visitDIArgList(const DIArgList &AL, Function *F) {
1100 for (const ValueAsMetadata *VAM : AL.getArgs())
1101 visitValueAsMetadata(*VAM, F);
1102}
1103
1104void Verifier::visitMetadataAsValue(const MetadataAsValue &MDV, Function *F) {
1105 Metadata *MD = MDV.getMetadata();
1106 if (auto *N = dyn_cast<MDNode>(MD)) {
1107 visitMDNode(*N, AreDebugLocsAllowed::No);
1108 return;
1109 }
1110
1111 // Only visit each node once. Metadata can be mutually recursive, so this
1112 // avoids infinite recursion here, as well as being an optimization.
1113 if (!MDNodes.insert(MD).second)
1114 return;
1115
1116 if (auto *V = dyn_cast<ValueAsMetadata>(MD))
1117 visitValueAsMetadata(*V, F);
1118
1119 if (auto *AL = dyn_cast<DIArgList>(MD))
1120 visitDIArgList(*AL, F);
1121}
1122
1123static bool isType(const Metadata *MD) { return !MD || isa<DIType>(MD); }
1124static bool isScope(const Metadata *MD) { return !MD || isa<DIScope>(MD); }
1125static bool isDINode(const Metadata *MD) { return !MD || isa<DINode>(MD); }
1126
1127void Verifier::visitDILocation(const DILocation &N) {
1128 CheckDI(N.getRawScope() && isa<DILocalScope>(N.getRawScope()),
1129 "location requires a valid scope", &N, N.getRawScope());
1130 if (auto *IA = N.getRawInlinedAt())
1131 CheckDI(isa<DILocation>(IA), "inlined-at should be a location", &N, IA);
1132 if (auto *SP = dyn_cast<DISubprogram>(N.getRawScope()))
1133 CheckDI(SP->isDefinition(), "scope points into the type hierarchy", &N);
1134}
1135
1136void Verifier::visitGenericDINode(const GenericDINode &N) {
1137 CheckDI(N.getTag(), "invalid tag", &N);
1138}
1139
1140void Verifier::visitDIScope(const DIScope &N) {
1141 if (auto *F = N.getRawFile())
1142 CheckDI(isa<DIFile>(F), "invalid file", &N, F);
1143}
1144
1145void Verifier::visitDISubrange(const DISubrange &N) {
1146 CheckDI(N.getTag() == dwarf::DW_TAG_subrange_type, "invalid tag", &N);
1147 CheckDI(!N.getRawCountNode() || !N.getRawUpperBound(),
1148 "Subrange can have any one of count or upperBound", &N);
1149 auto *CBound = N.getRawCountNode();
1150 CheckDI(!CBound || isa<ConstantAsMetadata>(CBound) ||
1151 isa<DIVariable>(CBound) || isa<DIExpression>(CBound),
1152 "Count must be signed constant or DIVariable or DIExpression", &N);
1153 auto Count = N.getCount();
1154 CheckDI(!Count || !isa<ConstantInt *>(Count) ||
1155 cast<ConstantInt *>(Count)->getSExtValue() >= -1,
1156 "invalid subrange count", &N);
1157 auto *LBound = N.getRawLowerBound();
1158 CheckDI(!LBound || isa<ConstantAsMetadata>(LBound) ||
1159 isa<DIVariable>(LBound) || isa<DIExpression>(LBound),
1160 "LowerBound must be signed constant or DIVariable or DIExpression",
1161 &N);
1162 auto *UBound = N.getRawUpperBound();
1163 CheckDI(!UBound || isa<ConstantAsMetadata>(UBound) ||
1164 isa<DIVariable>(UBound) || isa<DIExpression>(UBound),
1165 "UpperBound must be signed constant or DIVariable or DIExpression",
1166 &N);
1167 auto *Stride = N.getRawStride();
1168 CheckDI(!Stride || isa<ConstantAsMetadata>(Stride) ||
1169 isa<DIVariable>(Stride) || isa<DIExpression>(Stride),
1170 "Stride must be signed constant or DIVariable or DIExpression", &N);
1171}
1172
1173void Verifier::visitDIGenericSubrange(const DIGenericSubrange &N) {
1174 CheckDI(N.getTag() == dwarf::DW_TAG_generic_subrange, "invalid tag", &N);
1175 CheckDI(!N.getRawCountNode() || !N.getRawUpperBound(),
1176 "GenericSubrange can have any one of count or upperBound", &N);
1177 auto *CBound = N.getRawCountNode();
1178 CheckDI(!CBound || isa<DIVariable>(CBound) || isa<DIExpression>(CBound),
1179 "Count must be signed constant or DIVariable or DIExpression", &N);
1180 auto *LBound = N.getRawLowerBound();
1181 CheckDI(LBound, "GenericSubrange must contain lowerBound", &N);
1182 CheckDI(isa<DIVariable>(LBound) || isa<DIExpression>(LBound),
1183 "LowerBound must be signed constant or DIVariable or DIExpression",
1184 &N);
1185 auto *UBound = N.getRawUpperBound();
1186 CheckDI(!UBound || isa<DIVariable>(UBound) || isa<DIExpression>(UBound),
1187 "UpperBound must be signed constant or DIVariable or DIExpression",
1188 &N);
1189 auto *Stride = N.getRawStride();
1190 CheckDI(Stride, "GenericSubrange must contain stride", &N);
1191 CheckDI(isa<DIVariable>(Stride) || isa<DIExpression>(Stride),
1192 "Stride must be signed constant or DIVariable or DIExpression", &N);
1193}
1194
1195void Verifier::visitDIEnumerator(const DIEnumerator &N) {
1196 CheckDI(N.getTag() == dwarf::DW_TAG_enumerator, "invalid tag", &N);
1197}
1198
1199void Verifier::visitDIBasicType(const DIBasicType &N) {
1200 CheckDI(N.getTag() == dwarf::DW_TAG_base_type ||
1201 N.getTag() == dwarf::DW_TAG_unspecified_type ||
1202 N.getTag() == dwarf::DW_TAG_string_type,
1203 "invalid tag", &N);
1204}
1205
1206void Verifier::visitDIStringType(const DIStringType &N) {
1207 CheckDI(N.getTag() == dwarf::DW_TAG_string_type, "invalid tag", &N);
1208 CheckDI(!(N.isBigEndian() && N.isLittleEndian()), "has conflicting flags",
1209 &N);
1210}
1211
1212void Verifier::visitDIDerivedType(const DIDerivedType &N) {
1213 // Common scope checks.
1214 visitDIScope(N);
1215
1216 CheckDI(N.getTag() == dwarf::DW_TAG_typedef ||
1217 N.getTag() == dwarf::DW_TAG_pointer_type ||
1218 N.getTag() == dwarf::DW_TAG_ptr_to_member_type ||
1219 N.getTag() == dwarf::DW_TAG_reference_type ||
1220 N.getTag() == dwarf::DW_TAG_rvalue_reference_type ||
1221 N.getTag() == dwarf::DW_TAG_const_type ||
1222 N.getTag() == dwarf::DW_TAG_immutable_type ||
1223 N.getTag() == dwarf::DW_TAG_volatile_type ||
1224 N.getTag() == dwarf::DW_TAG_restrict_type ||
1225 N.getTag() == dwarf::DW_TAG_atomic_type ||
1226 N.getTag() == dwarf::DW_TAG_LLVM_ptrauth_type ||
1227 N.getTag() == dwarf::DW_TAG_member ||
1228 (N.getTag() == dwarf::DW_TAG_variable && N.isStaticMember()) ||
1229 N.getTag() == dwarf::DW_TAG_inheritance ||
1230 N.getTag() == dwarf::DW_TAG_friend ||
1231 N.getTag() == dwarf::DW_TAG_set_type ||
1232 N.getTag() == dwarf::DW_TAG_template_alias,
1233 "invalid tag", &N);
1234 if (N.getTag() == dwarf::DW_TAG_ptr_to_member_type) {
1235 CheckDI(isType(N.getRawExtraData()), "invalid pointer to member type", &N,
1236 N.getRawExtraData());
1237 }
1238
1239 if (N.getTag() == dwarf::DW_TAG_set_type) {
1240 if (auto *T = N.getRawBaseType()) {
1241 auto *Enum = dyn_cast_or_null<DICompositeType>(T);
1242 auto *Basic = dyn_cast_or_null<DIBasicType>(T);
1243 CheckDI(
1244 (Enum && Enum->getTag() == dwarf::DW_TAG_enumeration_type) ||
1245 (Basic && (Basic->getEncoding() == dwarf::DW_ATE_unsigned ||
1246 Basic->getEncoding() == dwarf::DW_ATE_signed ||
1247 Basic->getEncoding() == dwarf::DW_ATE_unsigned_char ||
1248 Basic->getEncoding() == dwarf::DW_ATE_signed_char ||
1249 Basic->getEncoding() == dwarf::DW_ATE_boolean)),
1250 "invalid set base type", &N, T);
1251 }
1252 }
1253
1254 CheckDI(isScope(N.getRawScope()), "invalid scope", &N, N.getRawScope());
1255 CheckDI(isType(N.getRawBaseType()), "invalid base type", &N,
1256 N.getRawBaseType());
1257
1258 if (N.getDWARFAddressSpace()) {
1259 CheckDI(N.getTag() == dwarf::DW_TAG_pointer_type ||
1260 N.getTag() == dwarf::DW_TAG_reference_type ||
1261 N.getTag() == dwarf::DW_TAG_rvalue_reference_type,
1262 "DWARF address space only applies to pointer or reference types",
1263 &N);
1264 }
1265}
1266
1267/// Detect mutually exclusive flags.
1268static bool hasConflictingReferenceFlags(unsigned Flags) {
1269 return ((Flags & DINode::FlagLValueReference) &&
1270 (Flags & DINode::FlagRValueReference)) ||
1271 ((Flags & DINode::FlagTypePassByValue) &&
1272 (Flags & DINode::FlagTypePassByReference));
1273}
1274
1275void Verifier::visitTemplateParams(const MDNode &N, const Metadata &RawParams) {
1276 auto *Params = dyn_cast<MDTuple>(&RawParams);
1277 CheckDI(Params, "invalid template params", &N, &RawParams);
1278 for (Metadata *Op : Params->operands()) {
1279 CheckDI(Op && isa<DITemplateParameter>(Op), "invalid template parameter",
1280 &N, Params, Op);
1281 }
1282}
1283
1284void Verifier::visitDICompositeType(const DICompositeType &N) {
1285 // Common scope checks.
1286 visitDIScope(N);
1287
1288 CheckDI(N.getTag() == dwarf::DW_TAG_array_type ||
1289 N.getTag() == dwarf::DW_TAG_structure_type ||
1290 N.getTag() == dwarf::DW_TAG_union_type ||
1291 N.getTag() == dwarf::DW_TAG_enumeration_type ||
1292 N.getTag() == dwarf::DW_TAG_class_type ||
1293 N.getTag() == dwarf::DW_TAG_variant_part ||
1294 N.getTag() == dwarf::DW_TAG_namelist,
1295 "invalid tag", &N);
1296
1297 CheckDI(isScope(N.getRawScope()), "invalid scope", &N, N.getRawScope());
1298 CheckDI(isType(N.getRawBaseType()), "invalid base type", &N,
1299 N.getRawBaseType());
1300
1301 CheckDI(!N.getRawElements() || isa<MDTuple>(N.getRawElements()),
1302 "invalid composite elements", &N, N.getRawElements());
1303 CheckDI(isType(N.getRawVTableHolder()), "invalid vtable holder", &N,
1304 N.getRawVTableHolder());
1306 "invalid reference flags", &N);
1307 unsigned DIBlockByRefStruct = 1 << 4;
1308 CheckDI((N.getFlags() & DIBlockByRefStruct) == 0,
1309 "DIBlockByRefStruct on DICompositeType is no longer supported", &N);
1310
1311 if (N.isVector()) {
1312 const DINodeArray Elements = N.getElements();
1313 CheckDI(Elements.size() == 1 &&
1314 Elements[0]->getTag() == dwarf::DW_TAG_subrange_type,
1315 "invalid vector, expected one element of type subrange", &N);
1316 }
1317
1318 if (auto *Params = N.getRawTemplateParams())
1319 visitTemplateParams(N, *Params);
1320
1321 if (auto *D = N.getRawDiscriminator()) {
1322 CheckDI(isa<DIDerivedType>(D) && N.getTag() == dwarf::DW_TAG_variant_part,
1323 "discriminator can only appear on variant part");
1324 }
1325
1326 if (N.getRawDataLocation()) {
1327 CheckDI(N.getTag() == dwarf::DW_TAG_array_type,
1328 "dataLocation can only appear in array type");
1329 }
1330
1331 if (N.getRawAssociated()) {
1332 CheckDI(N.getTag() == dwarf::DW_TAG_array_type,
1333 "associated can only appear in array type");
1334 }
1335
1336 if (N.getRawAllocated()) {
1337 CheckDI(N.getTag() == dwarf::DW_TAG_array_type,
1338 "allocated can only appear in array type");
1339 }
1340
1341 if (N.getRawRank()) {
1342 CheckDI(N.getTag() == dwarf::DW_TAG_array_type,
1343 "rank can only appear in array type");
1344 }
1345
1346 if (N.getTag() == dwarf::DW_TAG_array_type) {
1347 CheckDI(N.getRawBaseType(), "array types must have a base type", &N);
1348 }
1349}
1350
1351void Verifier::visitDISubroutineType(const DISubroutineType &N) {
1352 CheckDI(N.getTag() == dwarf::DW_TAG_subroutine_type, "invalid tag", &N);
1353 if (auto *Types = N.getRawTypeArray()) {
1354 CheckDI(isa<MDTuple>(Types), "invalid composite elements", &N, Types);
1355 for (Metadata *Ty : N.getTypeArray()->operands()) {
1356 CheckDI(isType(Ty), "invalid subroutine type ref", &N, Types, Ty);
1357 }
1358 }
1360 "invalid reference flags", &N);
1361}
1362
1363void Verifier::visitDIFile(const DIFile &N) {
1364 CheckDI(N.getTag() == dwarf::DW_TAG_file_type, "invalid tag", &N);
1365 std::optional<DIFile::ChecksumInfo<StringRef>> Checksum = N.getChecksum();
1366 if (Checksum) {
1367 CheckDI(Checksum->Kind <= DIFile::ChecksumKind::CSK_Last,
1368 "invalid checksum kind", &N);
1369 size_t Size;
1370 switch (Checksum->Kind) {
1371 case DIFile::CSK_MD5:
1372 Size = 32;
1373 break;
1374 case DIFile::CSK_SHA1:
1375 Size = 40;
1376 break;
1377 case DIFile::CSK_SHA256:
1378 Size = 64;
1379 break;
1380 }
1381 CheckDI(Checksum->Value.size() == Size, "invalid checksum length", &N);
1382 CheckDI(Checksum->Value.find_if_not(llvm::isHexDigit) == StringRef::npos,
1383 "invalid checksum", &N);
1384 }
1385}
1386
1387void Verifier::visitDICompileUnit(const DICompileUnit &N) {
1388 CheckDI(N.isDistinct(), "compile units must be distinct", &N);
1389 CheckDI(N.getTag() == dwarf::DW_TAG_compile_unit, "invalid tag", &N);
1390
1391 // Don't bother verifying the compilation directory or producer string
1392 // as those could be empty.
1393 CheckDI(N.getRawFile() && isa<DIFile>(N.getRawFile()), "invalid file", &N,
1394 N.getRawFile());
1395 CheckDI(!N.getFile()->getFilename().empty(), "invalid filename", &N,
1396 N.getFile());
1397
1398 CheckDI((N.getEmissionKind() <= DICompileUnit::LastEmissionKind),
1399 "invalid emission kind", &N);
1400
1401 if (auto *Array = N.getRawEnumTypes()) {
1402 CheckDI(isa<MDTuple>(Array), "invalid enum list", &N, Array);
1403 for (Metadata *Op : N.getEnumTypes()->operands()) {
1404 auto *Enum = dyn_cast_or_null<DICompositeType>(Op);
1405 CheckDI(Enum && Enum->getTag() == dwarf::DW_TAG_enumeration_type,
1406 "invalid enum type", &N, N.getEnumTypes(), Op);
1407 }
1408 }
1409 if (auto *Array = N.getRawRetainedTypes()) {
1410 CheckDI(isa<MDTuple>(Array), "invalid retained type list", &N, Array);
1411 for (Metadata *Op : N.getRetainedTypes()->operands()) {
1412 CheckDI(
1413 Op && (isa<DIType>(Op) || (isa<DISubprogram>(Op) &&
1414 !cast<DISubprogram>(Op)->isDefinition())),
1415 "invalid retained type", &N, Op);
1416 }
1417 }
1418 if (auto *Array = N.getRawGlobalVariables()) {
1419 CheckDI(isa<MDTuple>(Array), "invalid global variable list", &N, Array);
1420 for (Metadata *Op : N.getGlobalVariables()->operands()) {
1421 CheckDI(Op && (isa<DIGlobalVariableExpression>(Op)),
1422 "invalid global variable ref", &N, Op);
1423 }
1424 }
1425 if (auto *Array = N.getRawImportedEntities()) {
1426 CheckDI(isa<MDTuple>(Array), "invalid imported entity list", &N, Array);
1427 for (Metadata *Op : N.getImportedEntities()->operands()) {
1428 CheckDI(Op && isa<DIImportedEntity>(Op), "invalid imported entity ref",
1429 &N, Op);
1430 }
1431 }
1432 if (auto *Array = N.getRawMacros()) {
1433 CheckDI(isa<MDTuple>(Array), "invalid macro list", &N, Array);
1434 for (Metadata *Op : N.getMacros()->operands()) {
1435 CheckDI(Op && isa<DIMacroNode>(Op), "invalid macro ref", &N, Op);
1436 }
1437 }
1438 CUVisited.insert(&N);
1439}
1440
1441void Verifier::visitDISubprogram(const DISubprogram &N) {
1442 CheckDI(N.getTag() == dwarf::DW_TAG_subprogram, "invalid tag", &N);
1443 CheckDI(isScope(N.getRawScope()), "invalid scope", &N, N.getRawScope());
1444 if (auto *F = N.getRawFile())
1445 CheckDI(isa<DIFile>(F), "invalid file", &N, F);
1446 else
1447 CheckDI(N.getLine() == 0, "line specified with no file", &N, N.getLine());
1448 if (auto *T = N.getRawType())
1449 CheckDI(isa<DISubroutineType>(T), "invalid subroutine type", &N, T);
1450 CheckDI(isType(N.getRawContainingType()), "invalid containing type", &N,
1451 N.getRawContainingType());
1452 if (auto *Params = N.getRawTemplateParams())
1453 visitTemplateParams(N, *Params);
1454 if (auto *S = N.getRawDeclaration())
1455 CheckDI(isa<DISubprogram>(S) && !cast<DISubprogram>(S)->isDefinition(),
1456 "invalid subprogram declaration", &N, S);
1457 if (auto *RawNode = N.getRawRetainedNodes()) {
1458 auto *Node = dyn_cast<MDTuple>(RawNode);
1459 CheckDI(Node, "invalid retained nodes list", &N, RawNode);
1460 for (Metadata *Op : Node->operands()) {
1461 CheckDI(Op && (isa<DILocalVariable>(Op) || isa<DILabel>(Op) ||
1462 isa<DIImportedEntity>(Op)),
1463 "invalid retained nodes, expected DILocalVariable, DILabel or "
1464 "DIImportedEntity",
1465 &N, Node, Op);
1466 }
1467 }
1469 "invalid reference flags", &N);
1470
1471 auto *Unit = N.getRawUnit();
1472 if (N.isDefinition()) {
1473 // Subprogram definitions (not part of the type hierarchy).
1474 CheckDI(N.isDistinct(), "subprogram definitions must be distinct", &N);
1475 CheckDI(Unit, "subprogram definitions must have a compile unit", &N);
1476 CheckDI(isa<DICompileUnit>(Unit), "invalid unit type", &N, Unit);
1477 // There's no good way to cross the CU boundary to insert a nested
1478 // DISubprogram definition in one CU into a type defined in another CU.
1479 auto *CT = dyn_cast_or_null<DICompositeType>(N.getRawScope());
1480 if (CT && CT->getRawIdentifier() &&
1481 M.getContext().isODRUniquingDebugTypes())
1482 CheckDI(N.getDeclaration(),
1483 "definition subprograms cannot be nested within DICompositeType "
1484 "when enabling ODR",
1485 &N);
1486 } else {
1487 // Subprogram declarations (part of the type hierarchy).
1488 CheckDI(!Unit, "subprogram declarations must not have a compile unit", &N);
1489 CheckDI(!N.getRawDeclaration(),
1490 "subprogram declaration must not have a declaration field");
1491 }
1492
1493 if (auto *RawThrownTypes = N.getRawThrownTypes()) {
1494 auto *ThrownTypes = dyn_cast<MDTuple>(RawThrownTypes);
1495 CheckDI(ThrownTypes, "invalid thrown types list", &N, RawThrownTypes);
1496 for (Metadata *Op : ThrownTypes->operands())
1497 CheckDI(Op && isa<DIType>(Op), "invalid thrown type", &N, ThrownTypes,
1498 Op);
1499 }
1500
1501 if (N.areAllCallsDescribed())
1502 CheckDI(N.isDefinition(),
1503 "DIFlagAllCallsDescribed must be attached to a definition");
1504}
1505
1506void Verifier::visitDILexicalBlockBase(const DILexicalBlockBase &N) {
1507 CheckDI(N.getTag() == dwarf::DW_TAG_lexical_block, "invalid tag", &N);
1508 CheckDI(N.getRawScope() && isa<DILocalScope>(N.getRawScope()),
1509 "invalid local scope", &N, N.getRawScope());
1510 if (auto *SP = dyn_cast<DISubprogram>(N.getRawScope()))
1511 CheckDI(SP->isDefinition(), "scope points into the type hierarchy", &N);
1512}
1513
1514void Verifier::visitDILexicalBlock(const DILexicalBlock &N) {
1515 visitDILexicalBlockBase(N);
1516
1517 CheckDI(N.getLine() || !N.getColumn(),
1518 "cannot have column info without line info", &N);
1519}
1520
1521void Verifier::visitDILexicalBlockFile(const DILexicalBlockFile &N) {
1522 visitDILexicalBlockBase(N);
1523}
1524
1525void Verifier::visitDICommonBlock(const DICommonBlock &N) {
1526 CheckDI(N.getTag() == dwarf::DW_TAG_common_block, "invalid tag", &N);
1527 if (auto *S = N.getRawScope())
1528 CheckDI(isa<DIScope>(S), "invalid scope ref", &N, S);
1529 if (auto *S = N.getRawDecl())
1530 CheckDI(isa<DIGlobalVariable>(S), "invalid declaration", &N, S);
1531}
1532
1533void Verifier::visitDINamespace(const DINamespace &N) {
1534 CheckDI(N.getTag() == dwarf::DW_TAG_namespace, "invalid tag", &N);
1535 if (auto *S = N.getRawScope())
1536 CheckDI(isa<DIScope>(S), "invalid scope ref", &N, S);
1537}
1538
1539void Verifier::visitDIMacro(const DIMacro &N) {
1540 CheckDI(N.getMacinfoType() == dwarf::DW_MACINFO_define ||
1541 N.getMacinfoType() == dwarf::DW_MACINFO_undef,
1542 "invalid macinfo type", &N);
1543 CheckDI(!N.getName().empty(), "anonymous macro", &N);
1544 if (!N.getValue().empty()) {
1545 assert(N.getValue().data()[0] != ' ' && "Macro value has a space prefix");
1546 }
1547}
1548
1549void Verifier::visitDIMacroFile(const DIMacroFile &N) {
1550 CheckDI(N.getMacinfoType() == dwarf::DW_MACINFO_start_file,
1551 "invalid macinfo type", &N);
1552 if (auto *F = N.getRawFile())
1553 CheckDI(isa<DIFile>(F), "invalid file", &N, F);
1554
1555 if (auto *Array = N.getRawElements()) {
1556 CheckDI(isa<MDTuple>(Array), "invalid macro list", &N, Array);
1557 for (Metadata *Op : N.getElements()->operands()) {
1558 CheckDI(Op && isa<DIMacroNode>(Op), "invalid macro ref", &N, Op);
1559 }
1560 }
1561}
1562
1563void Verifier::visitDIModule(const DIModule &N) {
1564 CheckDI(N.getTag() == dwarf::DW_TAG_module, "invalid tag", &N);
1565 CheckDI(!N.getName().empty(), "anonymous module", &N);
1566}
1567
1568void Verifier::visitDITemplateParameter(const DITemplateParameter &N) {
1569 CheckDI(isType(N.getRawType()), "invalid type ref", &N, N.getRawType());
1570}
1571
1572void Verifier::visitDITemplateTypeParameter(const DITemplateTypeParameter &N) {
1573 visitDITemplateParameter(N);
1574
1575 CheckDI(N.getTag() == dwarf::DW_TAG_template_type_parameter, "invalid tag",
1576 &N);
1577}
1578
1579void Verifier::visitDITemplateValueParameter(
1580 const DITemplateValueParameter &N) {
1581 visitDITemplateParameter(N);
1582
1583 CheckDI(N.getTag() == dwarf::DW_TAG_template_value_parameter ||
1584 N.getTag() == dwarf::DW_TAG_GNU_template_template_param ||
1585 N.getTag() == dwarf::DW_TAG_GNU_template_parameter_pack,
1586 "invalid tag", &N);
1587}
1588
1589void Verifier::visitDIVariable(const DIVariable &N) {
1590 if (auto *S = N.getRawScope())
1591 CheckDI(isa<DIScope>(S), "invalid scope", &N, S);
1592 if (auto *F = N.getRawFile())
1593 CheckDI(isa<DIFile>(F), "invalid file", &N, F);
1594}
1595
1596void Verifier::visitDIGlobalVariable(const DIGlobalVariable &N) {
1597 // Checks common to all variables.
1598 visitDIVariable(N);
1599
1600 CheckDI(N.getTag() == dwarf::DW_TAG_variable, "invalid tag", &N);
1601 CheckDI(isType(N.getRawType()), "invalid type ref", &N, N.getRawType());
1602 // Check only if the global variable is not an extern
1603 if (N.isDefinition())
1604 CheckDI(N.getType(), "missing global variable type", &N);
1605 if (auto *Member = N.getRawStaticDataMemberDeclaration()) {
1606 CheckDI(isa<DIDerivedType>(Member),
1607 "invalid static data member declaration", &N, Member);
1608 }
1609}
1610
1611void Verifier::visitDILocalVariable(const DILocalVariable &N) {
1612 // Checks common to all variables.
1613 visitDIVariable(N);
1614
1615 CheckDI(isType(N.getRawType()), "invalid type ref", &N, N.getRawType());
1616 CheckDI(N.getTag() == dwarf::DW_TAG_variable, "invalid tag", &N);
1617 CheckDI(N.getRawScope() && isa<DILocalScope>(N.getRawScope()),
1618 "local variable requires a valid scope", &N, N.getRawScope());
1619 if (auto Ty = N.getType())
1620 CheckDI(!isa<DISubroutineType>(Ty), "invalid type", &N, N.getType());
1621}
1622
1623void Verifier::visitDIAssignID(const DIAssignID &N) {
1624 CheckDI(!N.getNumOperands(), "DIAssignID has no arguments", &N);
1625 CheckDI(N.isDistinct(), "DIAssignID must be distinct", &N);
1626}
1627
1628void Verifier::visitDILabel(const DILabel &N) {
1629 if (auto *S = N.getRawScope())
1630 CheckDI(isa<DIScope>(S), "invalid scope", &N, S);
1631 if (auto *F = N.getRawFile())
1632 CheckDI(isa<DIFile>(F), "invalid file", &N, F);
1633
1634 CheckDI(N.getTag() == dwarf::DW_TAG_label, "invalid tag", &N);
1635 CheckDI(N.getRawScope() && isa<DILocalScope>(N.getRawScope()),
1636 "label requires a valid scope", &N, N.getRawScope());
1637}
1638
1639void Verifier::visitDIExpression(const DIExpression &N) {
1640 CheckDI(N.isValid(), "invalid expression", &N);
1641}
1642
1643void Verifier::visitDIGlobalVariableExpression(
1644 const DIGlobalVariableExpression &GVE) {
1645 CheckDI(GVE.getVariable(), "missing variable");
1646 if (auto *Var = GVE.getVariable())
1647 visitDIGlobalVariable(*Var);
1648 if (auto *Expr = GVE.getExpression()) {
1649 visitDIExpression(*Expr);
1650 if (auto Fragment = Expr->getFragmentInfo())
1651 verifyFragmentExpression(*GVE.getVariable(), *Fragment, &GVE);
1652 }
1653}
1654
1655void Verifier::visitDIObjCProperty(const DIObjCProperty &N) {
1656 CheckDI(N.getTag() == dwarf::DW_TAG_APPLE_property, "invalid tag", &N);
1657 if (auto *T = N.getRawType())
1658 CheckDI(isType(T), "invalid type ref", &N, T);
1659 if (auto *F = N.getRawFile())
1660 CheckDI(isa<DIFile>(F), "invalid file", &N, F);
1661}
1662
1663void Verifier::visitDIImportedEntity(const DIImportedEntity &N) {
1664 CheckDI(N.getTag() == dwarf::DW_TAG_imported_module ||
1665 N.getTag() == dwarf::DW_TAG_imported_declaration,
1666 "invalid tag", &N);
1667 if (auto *S = N.getRawScope())
1668 CheckDI(isa<DIScope>(S), "invalid scope for imported entity", &N, S);
1669 CheckDI(isDINode(N.getRawEntity()), "invalid imported entity", &N,
1670 N.getRawEntity());
1671}
1672
1673void Verifier::visitComdat(const Comdat &C) {
1674 // In COFF the Module is invalid if the GlobalValue has private linkage.
1675 // Entities with private linkage don't have entries in the symbol table.
1676 if (TT.isOSBinFormatCOFF())
1677 if (const GlobalValue *GV = M.getNamedValue(C.getName()))
1678 Check(!GV->hasPrivateLinkage(), "comdat global value has private linkage",
1679 GV);
1680}
1681
1682void Verifier::visitModuleIdents() {
1683 const NamedMDNode *Idents = M.getNamedMetadata("llvm.ident");
1684 if (!Idents)
1685 return;
1686
1687 // llvm.ident takes a list of metadata entry. Each entry has only one string.
1688 // Scan each llvm.ident entry and make sure that this requirement is met.
1689 for (const MDNode *N : Idents->operands()) {
1690 Check(N->getNumOperands() == 1,
1691 "incorrect number of operands in llvm.ident metadata", N);
1692 Check(dyn_cast_or_null<MDString>(N->getOperand(0)),
1693 ("invalid value for llvm.ident metadata entry operand"
1694 "(the operand should be a string)"),
1695 N->getOperand(0));
1696 }
1697}
1698
1699void Verifier::visitModuleCommandLines() {
1700 const NamedMDNode *CommandLines = M.getNamedMetadata("llvm.commandline");
1701 if (!CommandLines)
1702 return;
1703
1704 // llvm.commandline takes a list of metadata entry. Each entry has only one
1705 // string. Scan each llvm.commandline entry and make sure that this
1706 // requirement is met.
1707 for (const MDNode *N : CommandLines->operands()) {
1708 Check(N->getNumOperands() == 1,
1709 "incorrect number of operands in llvm.commandline metadata", N);
1710 Check(dyn_cast_or_null<MDString>(N->getOperand(0)),
1711 ("invalid value for llvm.commandline metadata entry operand"
1712 "(the operand should be a string)"),
1713 N->getOperand(0));
1714 }
1715}
1716
1717void Verifier::visitModuleFlags() {
1718 const NamedMDNode *Flags = M.getModuleFlagsMetadata();
1719 if (!Flags) return;
1720
1721 // Scan each flag, and track the flags and requirements.
1723 SmallVector<const MDNode*, 16> Requirements;
1724 uint64_t PAuthABIPlatform = -1;
1725 uint64_t PAuthABIVersion = -1;
1726 for (const MDNode *MDN : Flags->operands()) {
1727 visitModuleFlag(MDN, SeenIDs, Requirements);
1728 if (MDN->getNumOperands() != 3)
1729 continue;
1730 if (const auto *FlagName = dyn_cast_or_null<MDString>(MDN->getOperand(1))) {
1731 if (FlagName->getString() == "aarch64-elf-pauthabi-platform") {
1732 if (const auto *PAP =
1733 mdconst::dyn_extract_or_null<ConstantInt>(MDN->getOperand(2)))
1734 PAuthABIPlatform = PAP->getZExtValue();
1735 } else if (FlagName->getString() == "aarch64-elf-pauthabi-version") {
1736 if (const auto *PAV =
1737 mdconst::dyn_extract_or_null<ConstantInt>(MDN->getOperand(2)))
1738 PAuthABIVersion = PAV->getZExtValue();
1739 }
1740 }
1741 }
1742
1743 if ((PAuthABIPlatform == uint64_t(-1)) != (PAuthABIVersion == uint64_t(-1)))
1744 CheckFailed("either both or no 'aarch64-elf-pauthabi-platform' and "
1745 "'aarch64-elf-pauthabi-version' module flags must be present");
1746
1747 // Validate that the requirements in the module are valid.
1748 for (const MDNode *Requirement : Requirements) {
1749 const MDString *Flag = cast<MDString>(Requirement->getOperand(0));
1750 const Metadata *ReqValue = Requirement->getOperand(1);
1751
1752 const MDNode *Op = SeenIDs.lookup(Flag);
1753 if (!Op) {
1754 CheckFailed("invalid requirement on flag, flag is not present in module",
1755 Flag);
1756 continue;
1757 }
1758
1759 if (Op->getOperand(2) != ReqValue) {
1760 CheckFailed(("invalid requirement on flag, "
1761 "flag does not have the required value"),
1762 Flag);
1763 continue;
1764 }
1765 }
1766}
1767
1768void
1769Verifier::visitModuleFlag(const MDNode *Op,
1771 SmallVectorImpl<const MDNode *> &Requirements) {
1772 // Each module flag should have three arguments, the merge behavior (a
1773 // constant int), the flag ID (an MDString), and the value.
1774 Check(Op->getNumOperands() == 3,
1775 "incorrect number of operands in module flag", Op);
1777 if (!Module::isValidModFlagBehavior(Op->getOperand(0), MFB)) {
1778 Check(mdconst::dyn_extract_or_null<ConstantInt>(Op->getOperand(0)),
1779 "invalid behavior operand in module flag (expected constant integer)",
1780 Op->getOperand(0));
1781 Check(false,
1782 "invalid behavior operand in module flag (unexpected constant)",
1783 Op->getOperand(0));
1784 }
1785 MDString *ID = dyn_cast_or_null<MDString>(Op->getOperand(1));
1786 Check(ID, "invalid ID operand in module flag (expected metadata string)",
1787 Op->getOperand(1));
1788
1789 // Check the values for behaviors with additional requirements.
1790 switch (MFB) {
1791 case Module::Error:
1792 case Module::Warning:
1793 case Module::Override:
1794 // These behavior types accept any value.
1795 break;
1796
1797 case Module::Min: {
1798 auto *V = mdconst::dyn_extract_or_null<ConstantInt>(Op->getOperand(2));
1799 Check(V && V->getValue().isNonNegative(),
1800 "invalid value for 'min' module flag (expected constant non-negative "
1801 "integer)",
1802 Op->getOperand(2));
1803 break;
1804 }
1805
1806 case Module::Max: {
1807 Check(mdconst::dyn_extract_or_null<ConstantInt>(Op->getOperand(2)),
1808 "invalid value for 'max' module flag (expected constant integer)",
1809 Op->getOperand(2));
1810 break;
1811 }
1812
1813 case Module::Require: {
1814 // The value should itself be an MDNode with two operands, a flag ID (an
1815 // MDString), and a value.
1816 MDNode *Value = dyn_cast<MDNode>(Op->getOperand(2));
1817 Check(Value && Value->getNumOperands() == 2,
1818 "invalid value for 'require' module flag (expected metadata pair)",
1819 Op->getOperand(2));
1820 Check(isa<MDString>(Value->getOperand(0)),
1821 ("invalid value for 'require' module flag "
1822 "(first value operand should be a string)"),
1823 Value->getOperand(0));
1824
1825 // Append it to the list of requirements, to check once all module flags are
1826 // scanned.
1827 Requirements.push_back(Value);
1828 break;
1829 }
1830
1831 case Module::Append:
1832 case Module::AppendUnique: {
1833 // These behavior types require the operand be an MDNode.
1834 Check(isa<MDNode>(Op->getOperand(2)),
1835 "invalid value for 'append'-type module flag "
1836 "(expected a metadata node)",
1837 Op->getOperand(2));
1838 break;
1839 }
1840 }
1841
1842 // Unless this is a "requires" flag, check the ID is unique.
1843 if (MFB != Module::Require) {
1844 bool Inserted = SeenIDs.insert(std::make_pair(ID, Op)).second;
1845 Check(Inserted,
1846 "module flag identifiers must be unique (or of 'require' type)", ID);
1847 }
1848
1849 if (ID->getString() == "wchar_size") {
1851 = mdconst::dyn_extract_or_null<ConstantInt>(Op->getOperand(2));
1852 Check(Value, "wchar_size metadata requires constant integer argument");
1853 }
1854
1855 if (ID->getString() == "Linker Options") {
1856 // If the llvm.linker.options named metadata exists, we assume that the
1857 // bitcode reader has upgraded the module flag. Otherwise the flag might
1858 // have been created by a client directly.
1859 Check(M.getNamedMetadata("llvm.linker.options"),
1860 "'Linker Options' named metadata no longer supported");
1861 }
1862
1863 if (ID->getString() == "SemanticInterposition") {
1865 mdconst::dyn_extract_or_null<ConstantInt>(Op->getOperand(2));
1866 Check(Value,
1867 "SemanticInterposition metadata requires constant integer argument");
1868 }
1869
1870 if (ID->getString() == "CG Profile") {
1871 for (const MDOperand &MDO : cast<MDNode>(Op->getOperand(2))->operands())
1872 visitModuleFlagCGProfileEntry(MDO);
1873 }
1874}
1875
1876void Verifier::visitModuleFlagCGProfileEntry(const MDOperand &MDO) {
1877 auto CheckFunction = [&](const MDOperand &FuncMDO) {
1878 if (!FuncMDO)
1879 return;
1880 auto F = dyn_cast<ValueAsMetadata>(FuncMDO);
1881 Check(F && isa<Function>(F->getValue()->stripPointerCasts()),
1882 "expected a Function or null", FuncMDO);
1883 };
1884 auto Node = dyn_cast_or_null<MDNode>(MDO);
1885 Check(Node && Node->getNumOperands() == 3, "expected a MDNode triple", MDO);
1886 CheckFunction(Node->getOperand(0));
1887 CheckFunction(Node->getOperand(1));
1888 auto Count = dyn_cast_or_null<ConstantAsMetadata>(Node->getOperand(2));
1889 Check(Count && Count->getType()->isIntegerTy(),
1890 "expected an integer constant", Node->getOperand(2));
1891}
1892
1893void Verifier::verifyAttributeTypes(AttributeSet Attrs, const Value *V) {
1894 for (Attribute A : Attrs) {
1895
1896 if (A.isStringAttribute()) {
1897#define GET_ATTR_NAMES
1898#define ATTRIBUTE_ENUM(ENUM_NAME, DISPLAY_NAME)
1899#define ATTRIBUTE_STRBOOL(ENUM_NAME, DISPLAY_NAME) \
1900 if (A.getKindAsString() == #DISPLAY_NAME) { \
1901 auto V = A.getValueAsString(); \
1902 if (!(V.empty() || V == "true" || V == "false")) \
1903 CheckFailed("invalid value for '" #DISPLAY_NAME "' attribute: " + V + \
1904 ""); \
1905 }
1906
1907#include "llvm/IR/Attributes.inc"
1908 continue;
1909 }
1910
1911 if (A.isIntAttribute() != Attribute::isIntAttrKind(A.getKindAsEnum())) {
1912 CheckFailed("Attribute '" + A.getAsString() + "' should have an Argument",
1913 V);
1914 return;
1915 }
1916 }
1917}
1918
1919// VerifyParameterAttrs - Check the given attributes for an argument or return
1920// value of the specified type. The value V is printed in error messages.
1921void Verifier::verifyParameterAttrs(AttributeSet Attrs, Type *Ty,
1922 const Value *V) {
1923 if (!Attrs.hasAttributes())
1924 return;
1925
1926 verifyAttributeTypes(Attrs, V);
1927
1928 for (Attribute Attr : Attrs)
1929 Check(Attr.isStringAttribute() ||
1930 Attribute::canUseAsParamAttr(Attr.getKindAsEnum()),
1931 "Attribute '" + Attr.getAsString() + "' does not apply to parameters",
1932 V);
1933
1934 if (Attrs.hasAttribute(Attribute::ImmArg)) {
1935 Check(Attrs.getNumAttributes() == 1,
1936 "Attribute 'immarg' is incompatible with other attributes", V);
1937 }
1938
1939 // Check for mutually incompatible attributes. Only inreg is compatible with
1940 // sret.
1941 unsigned AttrCount = 0;
1942 AttrCount += Attrs.hasAttribute(Attribute::ByVal);
1943 AttrCount += Attrs.hasAttribute(Attribute::InAlloca);
1944 AttrCount += Attrs.hasAttribute(Attribute::Preallocated);
1945 AttrCount += Attrs.hasAttribute(Attribute::StructRet) ||
1946 Attrs.hasAttribute(Attribute::InReg);
1947 AttrCount += Attrs.hasAttribute(Attribute::Nest);
1948 AttrCount += Attrs.hasAttribute(Attribute::ByRef);
1949 Check(AttrCount <= 1,
1950 "Attributes 'byval', 'inalloca', 'preallocated', 'inreg', 'nest', "
1951 "'byref', and 'sret' are incompatible!",
1952 V);
1953
1954 Check(!(Attrs.hasAttribute(Attribute::InAlloca) &&
1955 Attrs.hasAttribute(Attribute::ReadOnly)),
1956 "Attributes "
1957 "'inalloca and readonly' are incompatible!",
1958 V);
1959
1960 Check(!(Attrs.hasAttribute(Attribute::StructRet) &&
1961 Attrs.hasAttribute(Attribute::Returned)),
1962 "Attributes "
1963 "'sret and returned' are incompatible!",
1964 V);
1965
1966 Check(!(Attrs.hasAttribute(Attribute::ZExt) &&
1967 Attrs.hasAttribute(Attribute::SExt)),
1968 "Attributes "
1969 "'zeroext and signext' are incompatible!",
1970 V);
1971
1972 Check(!(Attrs.hasAttribute(Attribute::ReadNone) &&
1973 Attrs.hasAttribute(Attribute::ReadOnly)),
1974 "Attributes "
1975 "'readnone and readonly' are incompatible!",
1976 V);
1977
1978 Check(!(Attrs.hasAttribute(Attribute::ReadNone) &&
1979 Attrs.hasAttribute(Attribute::WriteOnly)),
1980 "Attributes "
1981 "'readnone and writeonly' are incompatible!",
1982 V);
1983
1984 Check(!(Attrs.hasAttribute(Attribute::ReadOnly) &&
1985 Attrs.hasAttribute(Attribute::WriteOnly)),
1986 "Attributes "
1987 "'readonly and writeonly' are incompatible!",
1988 V);
1989
1990 Check(!(Attrs.hasAttribute(Attribute::NoInline) &&
1991 Attrs.hasAttribute(Attribute::AlwaysInline)),
1992 "Attributes "
1993 "'noinline and alwaysinline' are incompatible!",
1994 V);
1995
1996 Check(!(Attrs.hasAttribute(Attribute::Writable) &&
1997 Attrs.hasAttribute(Attribute::ReadNone)),
1998 "Attributes writable and readnone are incompatible!", V);
1999
2000 Check(!(Attrs.hasAttribute(Attribute::Writable) &&
2001 Attrs.hasAttribute(Attribute::ReadOnly)),
2002 "Attributes writable and readonly are incompatible!", V);
2003
2004 AttributeMask IncompatibleAttrs = AttributeFuncs::typeIncompatible(Ty);
2005 for (Attribute Attr : Attrs) {
2006 if (!Attr.isStringAttribute() &&
2007 IncompatibleAttrs.contains(Attr.getKindAsEnum())) {
2008 CheckFailed("Attribute '" + Attr.getAsString() +
2009 "' applied to incompatible type!", V);
2010 return;
2011 }
2012 }
2013
2014 if (isa<PointerType>(Ty)) {
2015 if (Attrs.hasAttribute(Attribute::Alignment)) {
2016 Align AttrAlign = Attrs.getAlignment().valueOrOne();
2017 Check(AttrAlign.value() <= Value::MaximumAlignment,
2018 "huge alignment values are unsupported", V);
2019 }
2020 if (Attrs.hasAttribute(Attribute::ByVal)) {
2021 SmallPtrSet<Type *, 4> Visited;
2022 Check(Attrs.getByValType()->isSized(&Visited),
2023 "Attribute 'byval' does not support unsized types!", V);
2024 Check(DL.getTypeAllocSize(Attrs.getByValType()).getKnownMinValue() <
2025 (1ULL << 32),
2026 "huge 'byval' arguments are unsupported", V);
2027 }
2028 if (Attrs.hasAttribute(Attribute::ByRef)) {
2029 SmallPtrSet<Type *, 4> Visited;
2030 Check(Attrs.getByRefType()->isSized(&Visited),
2031 "Attribute 'byref' does not support unsized types!", V);
2032 Check(DL.getTypeAllocSize(Attrs.getByRefType()).getKnownMinValue() <
2033 (1ULL << 32),
2034 "huge 'byref' arguments are unsupported", V);
2035 }
2036 if (Attrs.hasAttribute(Attribute::InAlloca)) {
2037 SmallPtrSet<Type *, 4> Visited;
2038 Check(Attrs.getInAllocaType()->isSized(&Visited),
2039 "Attribute 'inalloca' does not support unsized types!", V);
2040 Check(DL.getTypeAllocSize(Attrs.getInAllocaType()).getKnownMinValue() <
2041 (1ULL << 32),
2042 "huge 'inalloca' arguments are unsupported", V);
2043 }
2044 if (Attrs.hasAttribute(Attribute::Preallocated)) {
2045 SmallPtrSet<Type *, 4> Visited;
2046 Check(Attrs.getPreallocatedType()->isSized(&Visited),
2047 "Attribute 'preallocated' does not support unsized types!", V);
2048 Check(
2049 DL.getTypeAllocSize(Attrs.getPreallocatedType()).getKnownMinValue() <
2050 (1ULL << 32),
2051 "huge 'preallocated' arguments are unsupported", V);
2052 }
2053 }
2054
2055 if (Attrs.hasAttribute(Attribute::Initializes)) {
2056 auto Inits = Attrs.getAttribute(Attribute::Initializes).getInitializes();
2057 Check(!Inits.empty(), "Attribute 'initializes' does not support empty list",
2058 V);
2060 "Attribute 'initializes' does not support unordered ranges", V);
2061 }
2062
2063 if (Attrs.hasAttribute(Attribute::NoFPClass)) {
2064 uint64_t Val = Attrs.getAttribute(Attribute::NoFPClass).getValueAsInt();
2065 Check(Val != 0, "Attribute 'nofpclass' must have at least one test bit set",
2066 V);
2067 Check((Val & ~static_cast<unsigned>(fcAllFlags)) == 0,
2068 "Invalid value for 'nofpclass' test mask", V);
2069 }
2070 if (Attrs.hasAttribute(Attribute::Range)) {
2071 const ConstantRange &CR =
2072 Attrs.getAttribute(Attribute::Range).getValueAsConstantRange();
2074 "Range bit width must match type bit width!", V);
2075 }
2076}
2077
2078void Verifier::checkUnsignedBaseTenFuncAttr(AttributeList Attrs, StringRef Attr,
2079 const Value *V) {
2080 if (Attrs.hasFnAttr(Attr)) {
2081 StringRef S = Attrs.getFnAttr(Attr).getValueAsString();
2082 unsigned N;
2083 if (S.getAsInteger(10, N))
2084 CheckFailed("\"" + Attr + "\" takes an unsigned integer: " + S, V);
2085 }
2086}
2087
2088// Check parameter attributes against a function type.
2089// The value V is printed in error messages.
2090void Verifier::verifyFunctionAttrs(FunctionType *FT, AttributeList Attrs,
2091 const Value *V, bool IsIntrinsic,
2092 bool IsInlineAsm) {
2093 if (Attrs.isEmpty())
2094 return;
2095
2096 if (AttributeListsVisited.insert(Attrs.getRawPointer()).second) {
2097 Check(Attrs.hasParentContext(Context),
2098 "Attribute list does not match Module context!", &Attrs, V);
2099 for (const auto &AttrSet : Attrs) {
2100 Check(!AttrSet.hasAttributes() || AttrSet.hasParentContext(Context),
2101 "Attribute set does not match Module context!", &AttrSet, V);
2102 for (const auto &A : AttrSet) {
2103 Check(A.hasParentContext(Context),
2104 "Attribute does not match Module context!", &A, V);
2105 }
2106 }
2107 }
2108
2109 bool SawNest = false;
2110 bool SawReturned = false;
2111 bool SawSRet = false;
2112 bool SawSwiftSelf = false;
2113 bool SawSwiftAsync = false;
2114 bool SawSwiftError = false;
2115
2116 // Verify return value attributes.
2117 AttributeSet RetAttrs = Attrs.getRetAttrs();
2118 for (Attribute RetAttr : RetAttrs)
2119 Check(RetAttr.isStringAttribute() ||
2120 Attribute::canUseAsRetAttr(RetAttr.getKindAsEnum()),
2121 "Attribute '" + RetAttr.getAsString() +
2122 "' does not apply to function return values",
2123 V);
2124
2125 unsigned MaxParameterWidth = 0;
2126 auto GetMaxParameterWidth = [&MaxParameterWidth](Type *Ty) {
2127 if (Ty->isVectorTy()) {
2128 if (auto *VT = dyn_cast<FixedVectorType>(Ty)) {
2129 unsigned Size = VT->getPrimitiveSizeInBits().getFixedValue();
2130 if (Size > MaxParameterWidth)
2131 MaxParameterWidth = Size;
2132 }
2133 }
2134 };
2135 GetMaxParameterWidth(FT->getReturnType());
2136 verifyParameterAttrs(RetAttrs, FT->getReturnType(), V);
2137
2138 // Verify parameter attributes.
2139 for (unsigned i = 0, e = FT->getNumParams(); i != e; ++i) {
2140 Type *Ty = FT->getParamType(i);
2141 AttributeSet ArgAttrs = Attrs.getParamAttrs(i);
2142
2143 if (!IsIntrinsic) {
2144 Check(!ArgAttrs.hasAttribute(Attribute::ImmArg),
2145 "immarg attribute only applies to intrinsics", V);
2146 if (!IsInlineAsm)
2147 Check(!ArgAttrs.hasAttribute(Attribute::ElementType),
2148 "Attribute 'elementtype' can only be applied to intrinsics"
2149 " and inline asm.",
2150 V);
2151 }
2152
2153 verifyParameterAttrs(ArgAttrs, Ty, V);
2154 GetMaxParameterWidth(Ty);
2155
2156 if (ArgAttrs.hasAttribute(Attribute::Nest)) {
2157 Check(!SawNest, "More than one parameter has attribute nest!", V);
2158 SawNest = true;
2159 }
2160
2161 if (ArgAttrs.hasAttribute(Attribute::Returned)) {
2162 Check(!SawReturned, "More than one parameter has attribute returned!", V);
2163 Check(Ty->canLosslesslyBitCastTo(FT->getReturnType()),
2164 "Incompatible argument and return types for 'returned' attribute",
2165 V);
2166 SawReturned = true;
2167 }
2168
2169 if (ArgAttrs.hasAttribute(Attribute::StructRet)) {
2170 Check(!SawSRet, "Cannot have multiple 'sret' parameters!", V);
2171 Check(i == 0 || i == 1,
2172 "Attribute 'sret' is not on first or second parameter!", V);
2173 SawSRet = true;
2174 }
2175
2176 if (ArgAttrs.hasAttribute(Attribute::SwiftSelf)) {
2177 Check(!SawSwiftSelf, "Cannot have multiple 'swiftself' parameters!", V);
2178 SawSwiftSelf = true;
2179 }
2180
2181 if (ArgAttrs.hasAttribute(Attribute::SwiftAsync)) {
2182 Check(!SawSwiftAsync, "Cannot have multiple 'swiftasync' parameters!", V);
2183 SawSwiftAsync = true;
2184 }
2185
2186 if (ArgAttrs.hasAttribute(Attribute::SwiftError)) {
2187 Check(!SawSwiftError, "Cannot have multiple 'swifterror' parameters!", V);
2188 SawSwiftError = true;
2189 }
2190
2191 if (ArgAttrs.hasAttribute(Attribute::InAlloca)) {
2192 Check(i == FT->getNumParams() - 1,
2193 "inalloca isn't on the last parameter!", V);
2194 }
2195 }
2196
2197 if (!Attrs.hasFnAttrs())
2198 return;
2199
2200 verifyAttributeTypes(Attrs.getFnAttrs(), V);
2201 for (Attribute FnAttr : Attrs.getFnAttrs())
2202 Check(FnAttr.isStringAttribute() ||
2203 Attribute::canUseAsFnAttr(FnAttr.getKindAsEnum()),
2204 "Attribute '" + FnAttr.getAsString() +
2205 "' does not apply to functions!",
2206 V);
2207
2208 Check(!(Attrs.hasFnAttr(Attribute::NoInline) &&
2209 Attrs.hasFnAttr(Attribute::AlwaysInline)),
2210 "Attributes 'noinline and alwaysinline' are incompatible!", V);
2211
2212 if (Attrs.hasFnAttr(Attribute::OptimizeNone)) {
2213 Check(Attrs.hasFnAttr(Attribute::NoInline),
2214 "Attribute 'optnone' requires 'noinline'!", V);
2215
2216 Check(!Attrs.hasFnAttr(Attribute::OptimizeForSize),
2217 "Attributes 'optsize and optnone' are incompatible!", V);
2218
2219 Check(!Attrs.hasFnAttr(Attribute::MinSize),
2220 "Attributes 'minsize and optnone' are incompatible!", V);
2221
2222 Check(!Attrs.hasFnAttr(Attribute::OptimizeForDebugging),
2223 "Attributes 'optdebug and optnone' are incompatible!", V);
2224 }
2225
2226 if (Attrs.hasFnAttr(Attribute::OptimizeForDebugging)) {
2227 Check(!Attrs.hasFnAttr(Attribute::OptimizeForSize),
2228 "Attributes 'optsize and optdebug' are incompatible!", V);
2229
2230 Check(!Attrs.hasFnAttr(Attribute::MinSize),
2231 "Attributes 'minsize and optdebug' are incompatible!", V);
2232 }
2233
2234 Check(!Attrs.hasAttrSomewhere(Attribute::Writable) ||
2235 isModSet(Attrs.getMemoryEffects().getModRef(IRMemLocation::ArgMem)),
2236 "Attribute writable and memory without argmem: write are incompatible!",
2237 V);
2238
2239 if (Attrs.hasFnAttr("aarch64_pstate_sm_enabled")) {
2240 Check(!Attrs.hasFnAttr("aarch64_pstate_sm_compatible"),
2241 "Attributes 'aarch64_pstate_sm_enabled and "
2242 "aarch64_pstate_sm_compatible' are incompatible!",
2243 V);
2244 }
2245
2246 Check((Attrs.hasFnAttr("aarch64_new_za") + Attrs.hasFnAttr("aarch64_in_za") +
2247 Attrs.hasFnAttr("aarch64_inout_za") +
2248 Attrs.hasFnAttr("aarch64_out_za") +
2249 Attrs.hasFnAttr("aarch64_preserves_za")) <= 1,
2250 "Attributes 'aarch64_new_za', 'aarch64_in_za', 'aarch64_out_za', "
2251 "'aarch64_inout_za' and 'aarch64_preserves_za' are mutually exclusive",
2252 V);
2253
2254 Check(
2255 (Attrs.hasFnAttr("aarch64_new_zt0") + Attrs.hasFnAttr("aarch64_in_zt0") +
2256 Attrs.hasFnAttr("aarch64_inout_zt0") +
2257 Attrs.hasFnAttr("aarch64_out_zt0") +
2258 Attrs.hasFnAttr("aarch64_preserves_zt0")) <= 1,
2259 "Attributes 'aarch64_new_zt0', 'aarch64_in_zt0', 'aarch64_out_zt0', "
2260 "'aarch64_inout_zt0' and 'aarch64_preserves_zt0' are mutually exclusive",
2261 V);
2262
2263 if (Attrs.hasFnAttr(Attribute::JumpTable)) {
2264 const GlobalValue *GV = cast<GlobalValue>(V);
2266 "Attribute 'jumptable' requires 'unnamed_addr'", V);
2267 }
2268
2269 if (auto Args = Attrs.getFnAttrs().getAllocSizeArgs()) {
2270 auto CheckParam = [&](StringRef Name, unsigned ParamNo) {
2271 if (ParamNo >= FT->getNumParams()) {
2272 CheckFailed("'allocsize' " + Name + " argument is out of bounds", V);
2273 return false;
2274 }
2275
2276 if (!FT->getParamType(ParamNo)->isIntegerTy()) {
2277 CheckFailed("'allocsize' " + Name +
2278 " argument must refer to an integer parameter",
2279 V);
2280 return false;
2281 }
2282
2283 return true;
2284 };
2285
2286 if (!CheckParam("element size", Args->first))
2287 return;
2288
2289 if (Args->second && !CheckParam("number of elements", *Args->second))
2290 return;
2291 }
2292
2293 if (Attrs.hasFnAttr(Attribute::AllocKind)) {
2294 AllocFnKind K = Attrs.getAllocKind();
2297 if (!is_contained(
2299 Type))
2300 CheckFailed(
2301 "'allockind()' requires exactly one of alloc, realloc, and free");
2302 if ((Type == AllocFnKind::Free) &&
2305 CheckFailed("'allockind(\"free\")' doesn't allow uninitialized, zeroed, "
2306 "or aligned modifiers.");
2308 if ((K & ZeroedUninit) == ZeroedUninit)
2309 CheckFailed("'allockind()' can't be both zeroed and uninitialized");
2310 }
2311
2312 if (Attrs.hasFnAttr(Attribute::VScaleRange)) {
2313 unsigned VScaleMin = Attrs.getFnAttrs().getVScaleRangeMin();
2314 if (VScaleMin == 0)
2315 CheckFailed("'vscale_range' minimum must be greater than 0", V);
2316 else if (!isPowerOf2_32(VScaleMin))
2317 CheckFailed("'vscale_range' minimum must be power-of-two value", V);
2318 std::optional<unsigned> VScaleMax = Attrs.getFnAttrs().getVScaleRangeMax();
2319 if (VScaleMax && VScaleMin > VScaleMax)
2320 CheckFailed("'vscale_range' minimum cannot be greater than maximum", V);
2321 else if (VScaleMax && !isPowerOf2_32(*VScaleMax))
2322 CheckFailed("'vscale_range' maximum must be power-of-two value", V);
2323 }
2324
2325 if (Attrs.hasFnAttr("frame-pointer")) {
2326 StringRef FP = Attrs.getFnAttr("frame-pointer").getValueAsString();
2327 if (FP != "all" && FP != "non-leaf" && FP != "none" && FP != "reserved")
2328 CheckFailed("invalid value for 'frame-pointer' attribute: " + FP, V);
2329 }
2330
2331 // Check EVEX512 feature.
2332 if (MaxParameterWidth >= 512 && Attrs.hasFnAttr("target-features") &&
2333 TT.isX86()) {
2334 StringRef TF = Attrs.getFnAttr("target-features").getValueAsString();
2335 Check(!TF.contains("+avx512f") || !TF.contains("-evex512"),
2336 "512-bit vector arguments require 'evex512' for AVX512", V);
2337 }
2338
2339 checkUnsignedBaseTenFuncAttr(Attrs, "patchable-function-prefix", V);
2340 checkUnsignedBaseTenFuncAttr(Attrs, "patchable-function-entry", V);
2341 checkUnsignedBaseTenFuncAttr(Attrs, "warn-stack-size", V);
2342
2343 if (auto A = Attrs.getFnAttr("sign-return-address"); A.isValid()) {
2344 StringRef S = A.getValueAsString();
2345 if (S != "none" && S != "all" && S != "non-leaf")
2346 CheckFailed("invalid value for 'sign-return-address' attribute: " + S, V);
2347 }
2348
2349 if (auto A = Attrs.getFnAttr("sign-return-address-key"); A.isValid()) {
2350 StringRef S = A.getValueAsString();
2351 if (S != "a_key" && S != "b_key")
2352 CheckFailed("invalid value for 'sign-return-address-key' attribute: " + S,
2353 V);
2354 if (auto AA = Attrs.getFnAttr("sign-return-address"); !AA.isValid()) {
2355 CheckFailed(
2356 "'sign-return-address-key' present without `sign-return-address`");
2357 }
2358 }
2359
2360 if (auto A = Attrs.getFnAttr("branch-target-enforcement"); A.isValid()) {
2361 StringRef S = A.getValueAsString();
2362 if (S != "" && S != "true" && S != "false")
2363 CheckFailed(
2364 "invalid value for 'branch-target-enforcement' attribute: " + S, V);
2365 }
2366
2367 if (auto A = Attrs.getFnAttr("branch-protection-pauth-lr"); A.isValid()) {
2368 StringRef S = A.getValueAsString();
2369 if (S != "" && S != "true" && S != "false")
2370 CheckFailed(
2371 "invalid value for 'branch-protection-pauth-lr' attribute: " + S, V);
2372 }
2373
2374 if (auto A = Attrs.getFnAttr("guarded-control-stack"); A.isValid()) {
2375 StringRef S = A.getValueAsString();
2376 if (S != "" && S != "true" && S != "false")
2377 CheckFailed("invalid value for 'guarded-control-stack' attribute: " + S,
2378 V);
2379 }
2380
2381 if (auto A = Attrs.getFnAttr("vector-function-abi-variant"); A.isValid()) {
2382 StringRef S = A.getValueAsString();
2383 const std::optional<VFInfo> Info = VFABI::tryDemangleForVFABI(S, FT);
2384 if (!Info)
2385 CheckFailed("invalid name for a VFABI variant: " + S, V);
2386 }
2387}
2388
2389void Verifier::verifyFunctionMetadata(
2390 ArrayRef<std::pair<unsigned, MDNode *>> MDs) {
2391 for (const auto &Pair : MDs) {
2392 if (Pair.first == LLVMContext::MD_prof) {
2393 MDNode *MD = Pair.second;
2394 Check(MD->getNumOperands() >= 2,
2395 "!prof annotations should have no less than 2 operands", MD);
2396
2397 // Check first operand.
2398 Check(MD->getOperand(0) != nullptr, "first operand should not be null",
2399 MD);
2400 Check(isa<MDString>(MD->getOperand(0)),
2401 "expected string with name of the !prof annotation", MD);
2402 MDString *MDS = cast<MDString>(MD->getOperand(0));
2403 StringRef ProfName = MDS->getString();
2404 Check(ProfName == "function_entry_count" ||
2405 ProfName == "synthetic_function_entry_count",
2406 "first operand should be 'function_entry_count'"
2407 " or 'synthetic_function_entry_count'",
2408 MD);
2409
2410 // Check second operand.
2411 Check(MD->getOperand(1) != nullptr, "second operand should not be null",
2412 MD);
2413 Check(isa<ConstantAsMetadata>(MD->getOperand(1)),
2414 "expected integer argument to function_entry_count", MD);
2415 } else if (Pair.first == LLVMContext::MD_kcfi_type) {
2416 MDNode *MD = Pair.second;
2417 Check(MD->getNumOperands() == 1,
2418 "!kcfi_type must have exactly one operand", MD);
2419 Check(MD->getOperand(0) != nullptr, "!kcfi_type operand must not be null",
2420 MD);
2421 Check(isa<ConstantAsMetadata>(MD->getOperand(0)),
2422 "expected a constant operand for !kcfi_type", MD);
2423 Constant *C = cast<ConstantAsMetadata>(MD->getOperand(0))->getValue();
2424 Check(isa<ConstantInt>(C) && isa<IntegerType>(C->getType()),
2425 "expected a constant integer operand for !kcfi_type", MD);
2426 Check(cast<ConstantInt>(C)->getBitWidth() == 32,
2427 "expected a 32-bit integer constant operand for !kcfi_type", MD);
2428 }
2429 }
2430}
2431
2432void Verifier::visitConstantExprsRecursively(const Constant *EntryC) {
2433 if (!ConstantExprVisited.insert(EntryC).second)
2434 return;
2435
2437 Stack.push_back(EntryC);
2438
2439 while (!Stack.empty()) {
2440 const Constant *C = Stack.pop_back_val();
2441
2442 // Check this constant expression.
2443 if (const auto *CE = dyn_cast<ConstantExpr>(C))
2444 visitConstantExpr(CE);
2445
2446 if (const auto *CPA = dyn_cast<ConstantPtrAuth>(C))
2447 visitConstantPtrAuth(CPA);
2448
2449 if (const auto *GV = dyn_cast<GlobalValue>(C)) {
2450 // Global Values get visited separately, but we do need to make sure
2451 // that the global value is in the correct module
2452 Check(GV->getParent() == &M, "Referencing global in another module!",
2453 EntryC, &M, GV, GV->getParent());
2454 continue;
2455 }
2456
2457 // Visit all sub-expressions.
2458 for (const Use &U : C->operands()) {
2459 const auto *OpC = dyn_cast<Constant>(U);
2460 if (!OpC)
2461 continue;
2462 if (!ConstantExprVisited.insert(OpC).second)
2463 continue;
2464 Stack.push_back(OpC);
2465 }
2466 }
2467}
2468
2469void Verifier::visitConstantExpr(const ConstantExpr *CE) {
2470 if (CE->getOpcode() == Instruction::BitCast)
2471 Check(CastInst::castIsValid(Instruction::BitCast, CE->getOperand(0),
2472 CE->getType()),
2473 "Invalid bitcast", CE);
2474}
2475
2476void Verifier::visitConstantPtrAuth(const ConstantPtrAuth *CPA) {
2477 Check(CPA->getPointer()->getType()->isPointerTy(),
2478 "signed ptrauth constant base pointer must have pointer type");
2479
2480 Check(CPA->getType() == CPA->getPointer()->getType(),
2481 "signed ptrauth constant must have same type as its base pointer");
2482
2483 Check(CPA->getKey()->getBitWidth() == 32,
2484 "signed ptrauth constant key must be i32 constant integer");
2485
2487 "signed ptrauth constant address discriminator must be a pointer");
2488
2489 Check(CPA->getDiscriminator()->getBitWidth() == 64,
2490 "signed ptrauth constant discriminator must be i64 constant integer");
2491}
2492
2493bool Verifier::verifyAttributeCount(AttributeList Attrs, unsigned Params) {
2494 // There shouldn't be more attribute sets than there are parameters plus the
2495 // function and return value.
2496 return Attrs.getNumAttrSets() <= Params + 2;
2497}
2498
2499void Verifier::verifyInlineAsmCall(const CallBase &Call) {
2500 const InlineAsm *IA = cast<InlineAsm>(Call.getCalledOperand());
2501 unsigned ArgNo = 0;
2502 unsigned LabelNo = 0;
2503 for (const InlineAsm::ConstraintInfo &CI : IA->ParseConstraints()) {
2504 if (CI.Type == InlineAsm::isLabel) {
2505 ++LabelNo;
2506 continue;
2507 }
2508
2509 // Only deal with constraints that correspond to call arguments.
2510 if (!CI.hasArg())
2511 continue;
2512
2513 if (CI.isIndirect) {
2514 const Value *Arg = Call.getArgOperand(ArgNo);
2515 Check(Arg->getType()->isPointerTy(),
2516 "Operand for indirect constraint must have pointer type", &Call);
2517
2518 Check(Call.getParamElementType(ArgNo),
2519 "Operand for indirect constraint must have elementtype attribute",
2520 &Call);
2521 } else {
2522 Check(!Call.paramHasAttr(ArgNo, Attribute::ElementType),
2523 "Elementtype attribute can only be applied for indirect "
2524 "constraints",
2525 &Call);
2526 }
2527
2528 ArgNo++;
2529 }
2530
2531 if (auto *CallBr = dyn_cast<CallBrInst>(&Call)) {
2532 Check(LabelNo == CallBr->getNumIndirectDests(),
2533 "Number of label constraints does not match number of callbr dests",
2534 &Call);
2535 } else {
2536 Check(LabelNo == 0, "Label constraints can only be used with callbr",
2537 &Call);
2538 }
2539}
2540
2541/// Verify that statepoint intrinsic is well formed.
2542void Verifier::verifyStatepoint(const CallBase &Call) {
2543 assert(Call.getCalledFunction() &&
2544 Call.getCalledFunction()->getIntrinsicID() ==
2545 Intrinsic::experimental_gc_statepoint);
2546
2547 Check(!Call.doesNotAccessMemory() && !Call.onlyReadsMemory() &&
2548 !Call.onlyAccessesArgMemory(),
2549 "gc.statepoint must read and write all memory to preserve "
2550 "reordering restrictions required by safepoint semantics",
2551 Call);
2552
2553 const int64_t NumPatchBytes =
2554 cast<ConstantInt>(Call.getArgOperand(1))->getSExtValue();
2555 assert(isInt<32>(NumPatchBytes) && "NumPatchBytesV is an i32!");
2556 Check(NumPatchBytes >= 0,
2557 "gc.statepoint number of patchable bytes must be "
2558 "positive",
2559 Call);
2560
2561 Type *TargetElemType = Call.getParamElementType(2);
2562 Check(TargetElemType,
2563 "gc.statepoint callee argument must have elementtype attribute", Call);
2564 FunctionType *TargetFuncType = dyn_cast<FunctionType>(TargetElemType);
2565 Check(TargetFuncType,
2566 "gc.statepoint callee elementtype must be function type", Call);
2567
2568 const int NumCallArgs = cast<ConstantInt>(Call.getArgOperand(3))->getZExtValue();
2569 Check(NumCallArgs >= 0,
2570 "gc.statepoint number of arguments to underlying call "
2571 "must be positive",
2572 Call);
2573 const int NumParams = (int)TargetFuncType->getNumParams();
2574 if (TargetFuncType->isVarArg()) {
2575 Check(NumCallArgs >= NumParams,
2576 "gc.statepoint mismatch in number of vararg call args", Call);
2577
2578 // TODO: Remove this limitation
2579 Check(TargetFuncType->getReturnType()->isVoidTy(),
2580 "gc.statepoint doesn't support wrapping non-void "
2581 "vararg functions yet",
2582 Call);
2583 } else
2584 Check(NumCallArgs == NumParams,
2585 "gc.statepoint mismatch in number of call args", Call);
2586
2587 const uint64_t Flags
2588 = cast<ConstantInt>(Call.getArgOperand(4))->getZExtValue();
2589 Check((Flags & ~(uint64_t)StatepointFlags::MaskAll) == 0,
2590 "unknown flag used in gc.statepoint flags argument", Call);
2591
2592 // Verify that the types of the call parameter arguments match
2593 // the type of the wrapped callee.
2594 AttributeList Attrs = Call.getAttributes();
2595 for (int i = 0; i < NumParams; i++) {
2596 Type *ParamType = TargetFuncType->getParamType(i);
2597 Type *ArgType = Call.getArgOperand(5 + i)->getType();
2598 Check(ArgType == ParamType,
2599 "gc.statepoint call argument does not match wrapped "
2600 "function type",
2601 Call);
2602
2603 if (TargetFuncType->isVarArg()) {
2604 AttributeSet ArgAttrs = Attrs.getParamAttrs(5 + i);
2605 Check(!ArgAttrs.hasAttribute(Attribute::StructRet),
2606 "Attribute 'sret' cannot be used for vararg call arguments!", Call);
2607 }
2608 }
2609
2610 const int EndCallArgsInx = 4 + NumCallArgs;
2611
2612 const Value *NumTransitionArgsV = Call.getArgOperand(EndCallArgsInx + 1);
2613 Check(isa<ConstantInt>(NumTransitionArgsV),
2614 "gc.statepoint number of transition arguments "
2615 "must be constant integer",
2616 Call);
2617 const int NumTransitionArgs =
2618 cast<ConstantInt>(NumTransitionArgsV)->getZExtValue();
2619 Check(NumTransitionArgs == 0,
2620 "gc.statepoint w/inline transition bundle is deprecated", Call);
2621 const int EndTransitionArgsInx = EndCallArgsInx + 1 + NumTransitionArgs;
2622
2623 const Value *NumDeoptArgsV = Call.getArgOperand(EndTransitionArgsInx + 1);
2624 Check(isa<ConstantInt>(NumDeoptArgsV),
2625 "gc.statepoint number of deoptimization arguments "
2626 "must be constant integer",
2627 Call);
2628 const int NumDeoptArgs = cast<ConstantInt>(NumDeoptArgsV)->getZExtValue();
2629 Check(NumDeoptArgs == 0,
2630 "gc.statepoint w/inline deopt operands is deprecated", Call);
2631
2632 const int ExpectedNumArgs = 7 + NumCallArgs;
2633 Check(ExpectedNumArgs == (int)Call.arg_size(),
2634 "gc.statepoint too many arguments", Call);
2635
2636 // Check that the only uses of this gc.statepoint are gc.result or
2637 // gc.relocate calls which are tied to this statepoint and thus part
2638 // of the same statepoint sequence
2639 for (const User *U : Call.users()) {
2640 const CallInst *UserCall = dyn_cast<const CallInst>(U);
2641 Check(UserCall, "illegal use of statepoint token", Call, U);
2642 if (!UserCall)
2643 continue;
2644 Check(isa<GCRelocateInst>(UserCall) || isa<GCResultInst>(UserCall),
2645 "gc.result or gc.relocate are the only value uses "
2646 "of a gc.statepoint",
2647 Call, U);
2648 if (isa<GCResultInst>(UserCall)) {
2649 Check(UserCall->getArgOperand(0) == &Call,
2650 "gc.result connected to wrong gc.statepoint", Call, UserCall);
2651 } else if (isa<GCRelocateInst>(Call)) {
2652 Check(UserCall->getArgOperand(0) == &Call,
2653 "gc.relocate connected to wrong gc.statepoint", Call, UserCall);
2654 }
2655 }
2656
2657 // Note: It is legal for a single derived pointer to be listed multiple
2658 // times. It's non-optimal, but it is legal. It can also happen after
2659 // insertion if we strip a bitcast away.
2660 // Note: It is really tempting to check that each base is relocated and
2661 // that a derived pointer is never reused as a base pointer. This turns
2662 // out to be problematic since optimizations run after safepoint insertion
2663 // can recognize equality properties that the insertion logic doesn't know
2664 // about. See example statepoint.ll in the verifier subdirectory
2665}
2666
2667void Verifier::verifyFrameRecoverIndices() {
2668 for (auto &Counts : FrameEscapeInfo) {
2669 Function *F = Counts.first;
2670 unsigned EscapedObjectCount = Counts.second.first;
2671 unsigned MaxRecoveredIndex = Counts.second.second;
2672 Check(MaxRecoveredIndex <= EscapedObjectCount,
2673 "all indices passed to llvm.localrecover must be less than the "
2674 "number of arguments passed to llvm.localescape in the parent "
2675 "function",
2676 F);
2677 }
2678}
2679
2680static Instruction *getSuccPad(Instruction *Terminator) {
2681 BasicBlock *UnwindDest;
2682 if (auto *II = dyn_cast<InvokeInst>(Terminator))
2683 UnwindDest = II->getUnwindDest();
2684 else if (auto *CSI = dyn_cast<CatchSwitchInst>(Terminator))
2685 UnwindDest = CSI->getUnwindDest();
2686 else
2687 UnwindDest = cast<CleanupReturnInst>(Terminator)->getUnwindDest();
2688 return UnwindDest->getFirstNonPHI();
2689}
2690
2691void Verifier::verifySiblingFuncletUnwinds() {
2694 for (const auto &Pair : SiblingFuncletInfo) {
2695 Instruction *PredPad = Pair.first;
2696 if (Visited.count(PredPad))
2697 continue;
2698 Active.insert(PredPad);
2699 Instruction *Terminator = Pair.second;
2700 do {
2701 Instruction *SuccPad = getSuccPad(Terminator);
2702 if (Active.count(SuccPad)) {
2703 // Found a cycle; report error
2704 Instruction *CyclePad = SuccPad;
2706 do {
2707 CycleNodes.push_back(CyclePad);
2708 Instruction *CycleTerminator = SiblingFuncletInfo[CyclePad];
2709 if (CycleTerminator != CyclePad)
2710 CycleNodes.push_back(CycleTerminator);
2711 CyclePad = getSuccPad(CycleTerminator);
2712 } while (CyclePad != SuccPad);
2713 Check(false, "EH pads can't handle each other's exceptions",
2714 ArrayRef<Instruction *>(CycleNodes));
2715 }
2716 // Don't re-walk a node we've already checked
2717 if (!Visited.insert(SuccPad).second)
2718 break;
2719 // Walk to this successor if it has a map entry.
2720 PredPad = SuccPad;
2721 auto TermI = SiblingFuncletInfo.find(PredPad);
2722 if (TermI == SiblingFuncletInfo.end())
2723 break;
2724 Terminator = TermI->second;
2725 Active.insert(PredPad);
2726 } while (true);
2727 // Each node only has one successor, so we've walked all the active
2728 // nodes' successors.
2729 Active.clear();
2730 }
2731}
2732
2733// visitFunction - Verify that a function is ok.
2734//
2735void Verifier::visitFunction(const Function &F) {
2736 visitGlobalValue(F);
2737
2738 // Check function arguments.
2739 FunctionType *FT = F.getFunctionType();
2740 unsigned NumArgs = F.arg_size();
2741
2742 Check(&Context == &F.getContext(),
2743 "Function context does not match Module context!", &F);
2744
2745 Check(!F.hasCommonLinkage(), "Functions may not have common linkage", &F);
2746 Check(FT->getNumParams() == NumArgs,
2747 "# formal arguments must match # of arguments for function type!", &F,
2748 FT);
2749 Check(F.getReturnType()->isFirstClassType() ||
2750 F.getReturnType()->isVoidTy() || F.getReturnType()->isStructTy(),
2751 "Functions cannot return aggregate values!", &F);
2752
2753 Check(!F.hasStructRetAttr() || F.getReturnType()->isVoidTy(),
2754 "Invalid struct return type!", &F);
2755
2756 AttributeList Attrs = F.getAttributes();
2757
2758 Check(verifyAttributeCount(Attrs, FT->getNumParams()),
2759 "Attribute after last parameter!", &F);
2760
2761 CheckDI(F.IsNewDbgInfoFormat == F.getParent()->IsNewDbgInfoFormat,
2762 "Function debug format should match parent module", &F,
2763 F.IsNewDbgInfoFormat, F.getParent(),
2764 F.getParent()->IsNewDbgInfoFormat);
2765
2766 bool IsIntrinsic = F.isIntrinsic();
2767
2768 // Check function attributes.
2769 verifyFunctionAttrs(FT, Attrs, &F, IsIntrinsic, /* IsInlineAsm */ false);
2770
2771 // On function declarations/definitions, we do not support the builtin
2772 // attribute. We do not check this in VerifyFunctionAttrs since that is
2773 // checking for Attributes that can/can not ever be on functions.
2774 Check(!Attrs.hasFnAttr(Attribute::Builtin),
2775 "Attribute 'builtin' can only be applied to a callsite.", &F);
2776
2777 Check(!Attrs.hasAttrSomewhere(Attribute::ElementType),
2778 "Attribute 'elementtype' can only be applied to a callsite.", &F);
2779
2780 if (Attrs.hasFnAttr(Attribute::Naked))
2781 for (const Argument &Arg : F.args())
2782 Check(Arg.use_empty(), "cannot use argument of naked function", &Arg);
2783
2784 // Check that this function meets the restrictions on this calling convention.
2785 // Sometimes varargs is used for perfectly forwarding thunks, so some of these
2786 // restrictions can be lifted.
2787 switch (F.getCallingConv()) {
2788 default:
2789 case CallingConv::C:
2790 break;
2791 case CallingConv::X86_INTR: {
2792 Check(F.arg_empty() || Attrs.hasParamAttr(0, Attribute::ByVal),
2793 "Calling convention parameter requires byval", &F);
2794 break;
2795 }
2800 Check(F.getReturnType()->isVoidTy(),
2801 "Calling convention requires void return type", &F);
2802 [[fallthrough]];
2808 Check(!F.hasStructRetAttr(), "Calling convention does not allow sret", &F);
2809 if (F.getCallingConv() != CallingConv::SPIR_KERNEL) {
2810 const unsigned StackAS = DL.getAllocaAddrSpace();
2811 unsigned i = 0;
2812 for (const Argument &Arg : F.args()) {
2813 Check(!Attrs.hasParamAttr(i, Attribute::ByVal),
2814 "Calling convention disallows byval", &F);
2815 Check(!Attrs.hasParamAttr(i, Attribute::Preallocated),
2816 "Calling convention disallows preallocated", &F);
2817 Check(!Attrs.hasParamAttr(i, Attribute::InAlloca),
2818 "Calling convention disallows inalloca", &F);
2819
2820 if (Attrs.hasParamAttr(i, Attribute::ByRef)) {
2821 // FIXME: Should also disallow LDS and GDS, but we don't have the enum
2822 // value here.
2823 Check(Arg.getType()->getPointerAddressSpace() != StackAS,
2824 "Calling convention disallows stack byref", &F);
2825 }
2826
2827 ++i;
2828 }
2829 }
2830
2831 [[fallthrough]];
2832 case CallingConv::Fast:
2833 case CallingConv::Cold:
2837 Check(!F.isVarArg(),
2838 "Calling convention does not support varargs or "
2839 "perfect forwarding!",
2840 &F);
2841 break;
2842 }
2843
2844 // Check that the argument values match the function type for this function...
2845 unsigned i = 0;
2846 for (const Argument &Arg : F.args()) {
2847 Check(Arg.getType() == FT->getParamType(i),
2848 "Argument value does not match function argument type!", &Arg,
2849 FT->getParamType(i));
2850 Check(Arg.getType()->isFirstClassType(),
2851 "Function arguments must have first-class types!", &Arg);
2852 if (!IsIntrinsic) {
2853 Check(!Arg.getType()->isMetadataTy(),
2854 "Function takes metadata but isn't an intrinsic", &Arg, &F);
2855 Check(!Arg.getType()->isTokenTy(),
2856 "Function takes token but isn't an intrinsic", &Arg, &F);
2857 Check(!Arg.getType()->isX86_AMXTy(),
2858 "Function takes x86_amx but isn't an intrinsic", &Arg, &F);
2859 }
2860
2861 // Check that swifterror argument is only used by loads and stores.
2862 if (Attrs.hasParamAttr(i, Attribute::SwiftError)) {
2863 verifySwiftErrorValue(&Arg);
2864 }
2865 ++i;
2866 }
2867
2868 if (!IsIntrinsic) {
2869 Check(!F.getReturnType()->isTokenTy(),
2870 "Function returns a token but isn't an intrinsic", &F);
2871 Check(!F.getReturnType()->isX86_AMXTy(),
2872 "Function returns a x86_amx but isn't an intrinsic", &F);
2873 }
2874
2875 // Get the function metadata attachments.
2877 F.getAllMetadata(MDs);
2878 assert(F.hasMetadata() != MDs.empty() && "Bit out-of-sync");
2879 verifyFunctionMetadata(MDs);
2880
2881 // Check validity of the personality function
2882 if (F.hasPersonalityFn()) {
2883 auto *Per = dyn_cast<Function>(F.getPersonalityFn()->stripPointerCasts());
2884 if (Per)
2885 Check(Per->getParent() == F.getParent(),
2886 "Referencing personality function in another module!", &F,
2887 F.getParent(), Per, Per->getParent());
2888 }
2889
2890 // EH funclet coloring can be expensive, recompute on-demand
2891 BlockEHFuncletColors.clear();
2892
2893 if (F.isMaterializable()) {
2894 // Function has a body somewhere we can't see.
2895 Check(MDs.empty(), "unmaterialized function cannot have metadata", &F,
2896 MDs.empty() ? nullptr : MDs.front().second);
2897 } else if (F.isDeclaration()) {
2898 for (const auto &I : MDs) {
2899 // This is used for call site debug information.
2900 CheckDI(I.first != LLVMContext::MD_dbg ||
2901 !cast<DISubprogram>(I.second)->isDistinct(),
2902 "function declaration may only have a unique !dbg attachment",
2903 &F);
2904 Check(I.first != LLVMContext::MD_prof,
2905 "function declaration may not have a !prof attachment", &F);
2906
2907 // Verify the metadata itself.
2908 visitMDNode(*I.second, AreDebugLocsAllowed::Yes);
2909 }
2910 Check(!F.hasPersonalityFn(),
2911 "Function declaration shouldn't have a personality routine", &F);
2912 } else {
2913 // Verify that this function (which has a body) is not named "llvm.*". It
2914 // is not legal to define intrinsics.
2915 Check(!IsIntrinsic, "llvm intrinsics cannot be defined!", &F);
2916
2917 // Check the entry node
2918 const BasicBlock *Entry = &F.getEntryBlock();
2919 Check(pred_empty(Entry),
2920 "Entry block to function must not have predecessors!", Entry);
2921
2922 // The address of the entry block cannot be taken, unless it is dead.
2923 if (Entry->hasAddressTaken()) {
2924 Check(!BlockAddress::lookup(Entry)->isConstantUsed(),
2925 "blockaddress may not be used with the entry block!", Entry);
2926 }
2927
2928 unsigned NumDebugAttachments = 0, NumProfAttachments = 0,
2929 NumKCFIAttachments = 0;
2930 // Visit metadata attachments.
2931 for (const auto &I : MDs) {
2932 // Verify that the attachment is legal.
2933 auto AllowLocs = AreDebugLocsAllowed::No;
2934 switch (I.first) {
2935 default:
2936 break;
2937 case LLVMContext::MD_dbg: {
2938 ++NumDebugAttachments;
2939 CheckDI(NumDebugAttachments == 1,
2940 "function must have a single !dbg attachment", &F, I.second);
2941 CheckDI(isa<DISubprogram>(I.second),
2942 "function !dbg attachment must be a subprogram", &F, I.second);
2943 CheckDI(cast<DISubprogram>(I.second)->isDistinct(),
2944 "function definition may only have a distinct !dbg attachment",
2945 &F);
2946
2947 auto *SP = cast<DISubprogram>(I.second);
2948 const Function *&AttachedTo = DISubprogramAttachments[SP];
2949 CheckDI(!AttachedTo || AttachedTo == &F,
2950 "DISubprogram attached to more than one function", SP, &F);
2951 AttachedTo = &F;
2952 AllowLocs = AreDebugLocsAllowed::Yes;
2953 break;
2954 }
2955 case LLVMContext::MD_prof:
2956 ++NumProfAttachments;
2957 Check(NumProfAttachments == 1,
2958 "function must have a single !prof attachment", &F, I.second);
2959 break;
2960 case LLVMContext::MD_kcfi_type:
2961 ++NumKCFIAttachments;
2962 Check(NumKCFIAttachments == 1,
2963 "function must have a single !kcfi_type attachment", &F,
2964 I.second);
2965 break;
2966 }
2967
2968 // Verify the metadata itself.
2969 visitMDNode(*I.second, AllowLocs);
2970 }
2971 }
2972
2973 // If this function is actually an intrinsic, verify that it is only used in
2974 // direct call/invokes, never having its "address taken".
2975 // Only do this if the module is materialized, otherwise we don't have all the
2976 // uses.
2977 if (F.isIntrinsic() && F.getParent()->isMaterialized()) {
2978 const User *U;
2979 if (F.hasAddressTaken(&U, false, true, false,
2980 /*IgnoreARCAttachedCall=*/true))
2981 Check(false, "Invalid user of intrinsic instruction!", U);
2982 }
2983
2984 // Check intrinsics' signatures.
2985 switch (F.getIntrinsicID()) {
2986 case Intrinsic::experimental_gc_get_pointer_base: {
2987 FunctionType *FT = F.getFunctionType();
2988 Check(FT->getNumParams() == 1, "wrong number of parameters", F);
2989 Check(isa<PointerType>(F.getReturnType()),
2990 "gc.get.pointer.base must return a pointer", F);
2991 Check(FT->getParamType(0) == F.getReturnType(),
2992 "gc.get.pointer.base operand and result must be of the same type", F);
2993 break;
2994 }
2995 case Intrinsic::experimental_gc_get_pointer_offset: {
2996 FunctionType *FT = F.getFunctionType();
2997 Check(FT->getNumParams() == 1, "wrong number of parameters", F);
2998 Check(isa<PointerType>(FT->getParamType(0)),
2999 "gc.get.pointer.offset operand must be a pointer", F);
3000 Check(F.getReturnType()->isIntegerTy(),
3001 "gc.get.pointer.offset must return integer", F);
3002 break;
3003 }
3004 }
3005
3006 auto *N = F.getSubprogram();
3007 HasDebugInfo = (N != nullptr);
3008 if (!HasDebugInfo)
3009 return;
3010
3011 // Check that all !dbg attachments lead to back to N.
3012 //
3013 // FIXME: Check this incrementally while visiting !dbg attachments.
3014 // FIXME: Only check when N is the canonical subprogram for F.
3016 auto VisitDebugLoc = [&](const Instruction &I, const MDNode *Node) {
3017 // Be careful about using DILocation here since we might be dealing with
3018 // broken code (this is the Verifier after all).
3019 const DILocation *DL = dyn_cast_or_null<DILocation>(Node);
3020 if (!DL)
3021 return;
3022 if (!Seen.insert(DL).second)
3023 return;
3024
3025 Metadata *Parent = DL->getRawScope();
3026 CheckDI(Parent && isa<DILocalScope>(Parent),
3027 "DILocation's scope must be a DILocalScope", N, &F, &I, DL, Parent);
3028
3029 DILocalScope *Scope = DL->getInlinedAtScope();
3030 Check(Scope, "Failed to find DILocalScope", DL);
3031
3032 if (!Seen.insert(Scope).second)
3033 return;
3034
3035 DISubprogram *SP = Scope->getSubprogram();
3036
3037 // Scope and SP could be the same MDNode and we don't want to skip
3038 // validation in that case
3039 if (SP && ((Scope != SP) && !Seen.insert(SP).second))
3040 return;
3041
3042 CheckDI(SP->describes(&F),
3043 "!dbg attachment points at wrong subprogram for function", N, &F,
3044 &I, DL, Scope, SP);
3045 };
3046 for (auto &BB : F)
3047 for (auto &I : BB) {
3048 VisitDebugLoc(I, I.getDebugLoc().getAsMDNode());
3049 // The llvm.loop annotations also contain two DILocations.
3050 if (auto MD = I.getMetadata(LLVMContext::MD_loop))
3051 for (unsigned i = 1; i < MD->getNumOperands(); ++i)
3052 VisitDebugLoc(I, dyn_cast_or_null<MDNode>(MD->getOperand(i)));
3053 if (BrokenDebugInfo)
3054 return;
3055 }
3056}
3057
3058// verifyBasicBlock - Verify that a basic block is well formed...
3059//
3060void Verifier::visitBasicBlock(BasicBlock &BB) {
3061 InstsInThisBlock.clear();
3062 ConvergenceVerifyHelper.visit(BB);
3063
3064 // Ensure that basic blocks have terminators!
3065 Check(BB.getTerminator(), "Basic Block does not have terminator!", &BB);
3066
3067 // Check constraints that this basic block imposes on all of the PHI nodes in
3068 // it.
3069 if (isa<PHINode>(BB.front())) {
3072 llvm::sort(Preds);
3073 for (const PHINode &PN : BB.phis()) {
3074 Check(PN.getNumIncomingValues() == Preds.size(),
3075 "PHINode should have one entry for each predecessor of its "
3076 "parent basic block!",
3077 &PN);
3078
3079 // Get and sort all incoming values in the PHI node...
3080 Values.clear();
3081 Values.reserve(PN.getNumIncomingValues());
3082 for (unsigned i = 0, e = PN.getNumIncomingValues(); i != e; ++i)
3083 Values.push_back(
3084 std::make_pair(PN.getIncomingBlock(i), PN.getIncomingValue(i)));
3085 llvm::sort(Values);
3086
3087 for (unsigned i = 0, e = Values.size(); i != e; ++i) {
3088 // Check to make sure that if there is more than one entry for a
3089 // particular basic block in this PHI node, that the incoming values are
3090 // all identical.
3091 //
3092 Check(i == 0 || Values[i].first != Values[i - 1].first ||
3093 Values[i].second == Values[i - 1].second,
3094 "PHI node has multiple entries for the same basic block with "
3095 "different incoming values!",
3096 &PN, Values[i].first, Values[i].second, Values[i - 1].second);
3097
3098 // Check to make sure that the predecessors and PHI node entries are
3099 // matched up.
3100 Check(Values[i].first == Preds[i],
3101 "PHI node entries do not match predecessors!", &PN,
3102 Values[i].first, Preds[i]);
3103 }
3104 }
3105 }
3106
3107 // Check that all instructions have their parent pointers set up correctly.
3108 for (auto &I : BB)
3109 {
3110 Check(I.getParent() == &BB, "Instruction has bogus parent pointer!");
3111 }
3112
3113 CheckDI(BB.IsNewDbgInfoFormat == BB.getParent()->IsNewDbgInfoFormat,
3114 "BB debug format should match parent function", &BB,
3115 BB.IsNewDbgInfoFormat, BB.getParent(),
3116 BB.getParent()->IsNewDbgInfoFormat);
3117
3118 // Confirm that no issues arise from the debug program.
3119 if (BB.IsNewDbgInfoFormat)
3120 CheckDI(!BB.getTrailingDbgRecords(), "Basic Block has trailing DbgRecords!",
3121 &BB);
3122}
3123
3124void Verifier::visitTerminator(Instruction &I) {
3125 // Ensure that terminators only exist at the end of the basic block.
3126 Check(&I == I.getParent()->getTerminator(),
3127 "Terminator found in the middle of a basic block!", I.getParent());
3129}
3130
3131void Verifier::visitBranchInst(BranchInst &BI) {
3132 if (BI.isConditional()) {
3134 "Branch condition is not 'i1' type!", &BI, BI.getCondition());
3135 }
3136 visitTerminator(BI);
3137}
3138
3139void Verifier::visitReturnInst(ReturnInst &RI) {
3140 Function *F = RI.getParent()->getParent();
3141 unsigned N = RI.getNumOperands();
3142 if (F->getReturnType()->isVoidTy())
3143 Check(N == 0,
3144 "Found return instr that returns non-void in Function of void "
3145 "return type!",
3146 &RI, F->getReturnType());
3147 else
3148 Check(N == 1 && F->getReturnType() == RI.getOperand(0)->getType(),
3149 "Function return type does not match operand "
3150 "type of return inst!",
3151 &RI, F->getReturnType());
3152
3153 // Check to make sure that the return value has necessary properties for
3154 // terminators...
3155 visitTerminator(RI);
3156}
3157
3158void Verifier::visitSwitchInst(SwitchInst &SI) {
3159 Check(SI.getType()->isVoidTy(), "Switch must have void result type!", &SI);
3160 // Check to make sure that all of the constants in the switch instruction
3161 // have the same type as the switched-on value.
3162 Type *SwitchTy = SI.getCondition()->getType();
3164 for (auto &Case : SI.cases()) {
3165 Check(isa<ConstantInt>(SI.getOperand(Case.getCaseIndex() * 2 + 2)),
3166 "Case value is not a constant integer.", &SI);
3167 Check(Case.getCaseValue()->getType() == SwitchTy,
3168 "Switch constants must all be same type as switch value!", &SI);
3169 Check(Constants.insert(Case.getCaseValue()).second,
3170 "Duplicate integer as switch case", &SI, Case.getCaseValue());
3171 }
3172
3173 visitTerminator(SI);
3174}
3175
3176void Verifier::visitIndirectBrInst(IndirectBrInst &BI) {
3178 "Indirectbr operand must have pointer type!", &BI);
3179 for (unsigned i = 0, e = BI.getNumDestinations(); i != e; ++i)
3181 "Indirectbr destinations must all have pointer type!", &BI);
3182
3183 visitTerminator(BI);
3184}
3185
3186void Verifier::visitCallBrInst(CallBrInst &CBI) {
3187 Check(CBI.isInlineAsm(), "Callbr is currently only used for asm-goto!", &CBI);
3188 const InlineAsm *IA = cast<InlineAsm>(CBI.getCalledOperand());
3189 Check(!IA->canThrow(), "Unwinding from Callbr is not allowed");
3190
3191 verifyInlineAsmCall(CBI);
3192 visitTerminator(CBI);
3193}
3194
3195void Verifier::visitSelectInst(SelectInst &SI) {
3196 Check(!SelectInst::areInvalidOperands(SI.getOperand(0), SI.getOperand(1),
3197 SI.getOperand(2)),
3198 "Invalid operands for select instruction!", &SI);
3199
3200 Check(SI.getTrueValue()->getType() == SI.getType(),
3201 "Select values must have same type as select instruction!", &SI);
3202 visitInstruction(SI);
3203}
3204
3205/// visitUserOp1 - User defined operators shouldn't live beyond the lifetime of
3206/// a pass, if any exist, it's an error.
3207///
3208void Verifier::visitUserOp1(Instruction &I) {
3209 Check(false, "User-defined operators should not live outside of a pass!", &I);
3210}
3211
3212void Verifier::visitTruncInst(TruncInst &I) {
3213 // Get the source and destination types
3214 Type *SrcTy = I.getOperand(0)->getType();
3215 Type *DestTy = I.getType();
3216
3217 // Get the size of the types in bits, we'll need this later
3218 unsigned SrcBitSize = SrcTy->getScalarSizeInBits();
3219 unsigned DestBitSize = DestTy->getScalarSizeInBits();
3220
3221 Check(SrcTy->isIntOrIntVectorTy(), "Trunc only operates on integer", &I);
3222 Check(DestTy->isIntOrIntVectorTy(), "Trunc only produces integer", &I);
3223 Check(SrcTy->isVectorTy() == DestTy->isVectorTy(),
3224 "trunc source and destination must both be a vector or neither", &I);
3225 Check(SrcBitSize > DestBitSize, "DestTy too big for Trunc", &I);
3226
3228}
3229
3230void Verifier::visitZExtInst(ZExtInst &I) {
3231 // Get the source and destination types
3232 Type *SrcTy = I.getOperand(0)->getType();
3233 Type *DestTy = I.getType();
3234
3235 // Get the size of the types in bits, we'll need this later
3236 Check(SrcTy->isIntOrIntVectorTy(), "ZExt only operates on integer", &I);
3237 Check(DestTy->isIntOrIntVectorTy(), "ZExt only produces an integer", &I);
3238 Check(SrcTy->isVectorTy() == DestTy->isVectorTy(),
3239 "zext source and destination must both be a vector or neither", &I);
3240 unsigned SrcBitSize = SrcTy->getScalarSizeInBits();
3241 unsigned DestBitSize = DestTy->getScalarSizeInBits();
3242
3243 Check(SrcBitSize < DestBitSize, "Type too small for ZExt", &I);
3244
3246}
3247
3248void Verifier::visitSExtInst(SExtInst &I) {
3249 // Get the source and destination types
3250 Type *SrcTy = I.getOperand(0)->getType();
3251 Type *DestTy = I.getType();
3252
3253 // Get the size of the types in bits, we'll need this later
3254 unsigned SrcBitSize = SrcTy->getScalarSizeInBits();
3255 unsigned DestBitSize = DestTy->getScalarSizeInBits();
3256
3257 Check(SrcTy->isIntOrIntVectorTy(), "SExt only operates on integer", &I);
3258 Check(DestTy->isIntOrIntVectorTy(), "SExt only produces an integer", &I);
3259 Check(SrcTy->isVectorTy() == DestTy->isVectorTy(),
3260 "sext source and destination must both be a vector or neither", &I);
3261 Check(SrcBitSize < DestBitSize, "Type too small for SExt", &I);
3262
3264}
3265
3266void Verifier::visitFPTruncInst(FPTruncInst &I) {
3267 // Get the source and destination types
3268 Type *SrcTy = I.getOperand(0)->getType();
3269 Type *DestTy = I.getType();
3270 // Get the size of the types in bits, we'll need this later
3271 unsigned SrcBitSize = SrcTy->getScalarSizeInBits();
3272 unsigned DestBitSize = DestTy->getScalarSizeInBits();
3273
3274 Check(SrcTy->isFPOrFPVectorTy(), "FPTrunc only operates on FP", &I);
3275 Check(DestTy->isFPOrFPVectorTy(), "FPTrunc only produces an FP", &I);
3276 Check(SrcTy->isVectorTy() == DestTy->isVectorTy(),
3277 "fptrunc source and destination must both be a vector or neither", &I);
3278 Check(SrcBitSize > DestBitSize, "DestTy too big for FPTrunc", &I);
3279
3281}
3282
3283void Verifier::visitFPExtInst(FPExtInst &I) {
3284 // Get the source and destination types
3285 Type *SrcTy = I.getOperand(0)->getType();
3286 Type *DestTy = I.getType();
3287
3288 // Get the size of the types in bits, we'll need this later
3289 unsigned SrcBitSize = SrcTy->getScalarSizeInBits();
3290 unsigned DestBitSize = DestTy->getScalarSizeInBits();
3291
3292 Check(SrcTy->isFPOrFPVectorTy(), "FPExt only operates on FP", &I);
3293 Check(DestTy->isFPOrFPVectorTy(), "FPExt only produces an FP", &I);
3294 Check(SrcTy->isVectorTy() == DestTy->isVectorTy(),
3295 "fpext source and destination must both be a vector or neither", &I);
3296 Check(SrcBitSize < DestBitSize, "DestTy too small for FPExt", &I);
3297
3299}
3300
3301void Verifier::visitUIToFPInst(UIToFPInst &I) {
3302 // Get the source and destination types
3303 Type *SrcTy = I.getOperand(0)->getType();
3304 Type *DestTy = I.getType();
3305
3306 bool SrcVec = SrcTy->isVectorTy();
3307 bool DstVec = DestTy->isVectorTy();
3308
3309 Check(SrcVec == DstVec,
3310 "UIToFP source and dest must both be vector or scalar", &I);
3311 Check(SrcTy->isIntOrIntVectorTy(),
3312 "UIToFP source must be integer or integer vector", &I);
3313 Check(DestTy->isFPOrFPVectorTy(), "UIToFP result must be FP or FP vector",
3314 &I);
3315
3316 if (SrcVec && DstVec)
3317 Check(cast<VectorType>(SrcTy)->getElementCount() ==
3318 cast<VectorType>(DestTy)->getElementCount(),
3319 "UIToFP source and dest vector length mismatch", &I);
3320
3322}
3323
3324void Verifier::visitSIToFPInst(SIToFPInst &I) {
3325 // Get the source and destination types
3326 Type *SrcTy = I.getOperand(0)->getType();
3327 Type *DestTy = I.getType();
3328
3329 bool SrcVec = SrcTy->isVectorTy();
3330 bool DstVec = DestTy->isVectorTy();
3331
3332 Check(SrcVec == DstVec,
3333 "SIToFP source and dest must both be vector or scalar", &I);
3334 Check(SrcTy->isIntOrIntVectorTy(),
3335 "SIToFP source must be integer or integer vector", &I);
3336 Check(DestTy->isFPOrFPVectorTy(), "SIToFP result must be FP or FP vector",
3337 &I);
3338
3339 if (SrcVec && DstVec)
3340 Check(cast<VectorType>(SrcTy)->getElementCount() ==
3341 cast<VectorType>(DestTy)->getElementCount(),
3342 "SIToFP source and dest vector length mismatch", &I);
3343
3345}
3346
3347void Verifier::visitFPToUIInst(FPToUIInst &I) {
3348 // Get the source and destination types
3349 Type *SrcTy = I.getOperand(0)->getType();
3350 Type *DestTy = I.getType();
3351
3352 bool SrcVec = SrcTy->isVectorTy();
3353 bool DstVec = DestTy->isVectorTy();
3354
3355 Check(SrcVec == DstVec,
3356 "FPToUI source and dest must both be vector or scalar", &I);
3357 Check(SrcTy->isFPOrFPVectorTy(), "FPToUI source must be FP or FP vector", &I);
3358 Check(DestTy->isIntOrIntVectorTy(),
3359 "FPToUI result must be integer or integer vector", &I);
3360
3361 if (SrcVec && DstVec)
3362 Check(cast<VectorType>(SrcTy)->getElementCount() ==
3363 cast<VectorType>(DestTy)->getElementCount(),
3364 "FPToUI source and dest vector length mismatch", &I);
3365
3367}
3368
3369void Verifier::visitFPToSIInst(FPToSIInst &I) {
3370 // Get the source and destination types
3371 Type *SrcTy = I.getOperand(0)->getType();
3372 Type *DestTy = I.getType();
3373
3374 bool SrcVec = SrcTy->isVectorTy();
3375 bool DstVec = DestTy->isVectorTy();
3376
3377 Check(SrcVec == DstVec,
3378 "FPToSI source and dest must both be vector or scalar", &I);
3379 Check(SrcTy->isFPOrFPVectorTy(), "FPToSI source must be FP or FP vector", &I);
3380 Check(DestTy->isIntOrIntVectorTy(),
3381 "FPToSI result must be integer or integer vector", &I);
3382
3383 if (SrcVec && DstVec)
3384 Check(cast<VectorType>(SrcTy)->getElementCount() ==
3385 cast<VectorType>(DestTy)->getElementCount(),
3386 "FPToSI source and dest vector length mismatch", &I);
3387
3389}
3390
3391void Verifier::visitPtrToIntInst(PtrToIntInst &I) {
3392 // Get the source and destination types
3393 Type *SrcTy = I.getOperand(0)->getType();
3394 Type *DestTy = I.getType();
3395
3396 Check(SrcTy->isPtrOrPtrVectorTy(), "PtrToInt source must be pointer", &I);
3397
3398 Check(DestTy->isIntOrIntVectorTy(), "PtrToInt result must be integral", &I);
3399 Check(SrcTy->isVectorTy() == DestTy->isVectorTy(), "PtrToInt type mismatch",
3400 &I);
3401
3402 if (SrcTy->isVectorTy()) {
3403 auto *VSrc = cast<VectorType>(SrcTy);
3404 auto *VDest = cast<VectorType>(DestTy);
3405 Check(VSrc->getElementCount() == VDest->getElementCount(),
3406 "PtrToInt Vector width mismatch", &I);
3407 }
3408
3410}
3411
3412void Verifier::visitIntToPtrInst(IntToPtrInst &I) {
3413 // Get the source and destination types
3414 Type *SrcTy = I.getOperand(0)->getType();
3415 Type *DestTy = I.getType();
3416
3417 Check(SrcTy->isIntOrIntVectorTy(), "IntToPtr source must be an integral", &I);
3418 Check(DestTy->isPtrOrPtrVectorTy(), "IntToPtr result must be a pointer", &I);
3419
3420 Check(SrcTy->isVectorTy() == DestTy->isVectorTy(), "IntToPtr type mismatch",
3421 &I);
3422 if (SrcTy->isVectorTy()) {
3423 auto *VSrc = cast<VectorType>(SrcTy);
3424 auto *VDest = cast<VectorType>(DestTy);
3425 Check(VSrc->getElementCount() == VDest->getElementCount(),
3426 "IntToPtr Vector width mismatch", &I);
3427 }
3429}
3430
3431void Verifier::visitBitCastInst(BitCastInst &I) {
3432 Check(
3433 CastInst::castIsValid(Instruction::BitCast, I.getOperand(0), I.getType()),
3434 "Invalid bitcast", &I);
3436}
3437
3438void Verifier::visitAddrSpaceCastInst(AddrSpaceCastInst &I) {
3439 Type *SrcTy = I.getOperand(0)->getType();
3440 Type *DestTy = I.getType();
3441
3442 Check(SrcTy->isPtrOrPtrVectorTy(), "AddrSpaceCast source must be a pointer",
3443 &I);
3444 Check(DestTy->isPtrOrPtrVectorTy(), "AddrSpaceCast result must be a pointer",
3445 &I);
3447 "AddrSpaceCast must be between different address spaces", &I);
3448 if (auto *SrcVTy = dyn_cast<VectorType>(SrcTy))
3449 Check(SrcVTy->getElementCount() ==
3450 cast<VectorType>(DestTy)->getElementCount(),
3451 "AddrSpaceCast vector pointer number of elements mismatch", &I);
3453}
3454
3455/// visitPHINode - Ensure that a PHI node is well formed.
3456///
3457void Verifier::visitPHINode(PHINode &PN) {
3458 // Ensure that the PHI nodes are all grouped together at the top of the block.
3459 // This can be tested by checking whether the instruction before this is
3460 // either nonexistent (because this is begin()) or is a PHI node. If not,
3461 // then there is some other instruction before a PHI.
3462 Check(&PN == &PN.getParent()->front() ||
3463 isa<PHINode>(--BasicBlock::iterator(&PN)),
3464 "PHI nodes not grouped at top of basic block!", &PN, PN.getParent());
3465
3466 // Check that a PHI doesn't yield a Token.
3467 Check(!PN.getType()->isTokenTy(), "PHI nodes cannot have token type!");
3468
3469 // Check that all of the values of the PHI node have the same type as the
3470 // result.
3471 for (Value *IncValue : PN.incoming_values()) {
3472 Check(PN.getType() == IncValue->getType(),
3473 "PHI node operands are not the same type as the result!", &PN);
3474 }
3475
3476 // All other PHI node constraints are checked in the visitBasicBlock method.
3477
3478 visitInstruction(PN);
3479}
3480
3481void Verifier::visitCallBase(CallBase &Call) {
3482 Check(Call.getCalledOperand()->getType()->isPointerTy(),
3483 "Called function must be a pointer!", Call);
3484 FunctionType *FTy = Call.getFunctionType();
3485
3486 // Verify that the correct number of arguments are being passed
3487 if (FTy->isVarArg())
3488 Check(Call.arg_size() >= FTy->getNumParams(),
3489 "Called function requires more parameters than were provided!", Call);
3490 else
3491 Check(Call.arg_size() == FTy->getNumParams(),
3492 "Incorrect number of arguments passed to called function!", Call);
3493
3494 // Verify that all arguments to the call match the function type.
3495 for (unsigned i = 0, e = FTy->getNumParams(); i != e; ++i)
3496 Check(Call.getArgOperand(i)->getType() == FTy->getParamType(i),
3497 "Call parameter type does not match function signature!",
3498 Call.getArgOperand(i), FTy->getParamType(i), Call);
3499
3500 AttributeList Attrs = Call.getAttributes();
3501
3502 Check(verifyAttributeCount(Attrs, Call.arg_size()),
3503 "Attribute after last parameter!", Call);
3504
3505 Function *Callee =
3506 dyn_cast<Function>(Call.getCalledOperand()->stripPointerCasts());
3507 bool IsIntrinsic = Callee && Callee->isIntrinsic();
3508 if (IsIntrinsic)
3509 Check(Callee->getValueType() == FTy,
3510 "Intrinsic called with incompatible signature", Call);
3511
3512 // Disallow calls to functions with the amdgpu_cs_chain[_preserve] calling
3513 // convention.
3514 auto CC = Call.getCallingConv();
3517 "Direct calls to amdgpu_cs_chain/amdgpu_cs_chain_preserve functions "
3518 "not allowed. Please use the @llvm.amdgpu.cs.chain intrinsic instead.",
3519 Call);
3520
3521 // Disallow passing/returning values with alignment higher than we can
3522 // represent.
3523 // FIXME: Consider making DataLayout cap the alignment, so this isn't
3524 // necessary.
3525 auto VerifyTypeAlign = [&](Type *Ty, const Twine &Message) {
3526 if (!Ty->isSized())
3527 return;
3528 Align ABIAlign = DL.getABITypeAlign(Ty);
3529 Check(ABIAlign.value() <= Value::MaximumAlignment,
3530 "Incorrect alignment of " + Message + " to called function!", Call);
3531 };
3532
3533 if (!IsIntrinsic) {
3534 VerifyTypeAlign(FTy->getReturnType(), "return type");
3535 for (unsigned i = 0, e = FTy->getNumParams(); i != e; ++i) {
3536 Type *Ty = FTy->getParamType(i);
3537 VerifyTypeAlign(Ty, "argument passed");
3538 }
3539 }
3540
3541 if (Attrs.hasFnAttr(Attribute::Speculatable)) {
3542 // Don't allow speculatable on call sites, unless the underlying function
3543 // declaration is also speculatable.
3544 Check(Callee && Callee->isSpeculatable(),
3545 "speculatable attribute may not apply to call sites", Call);
3546 }
3547
3548 if (Attrs.hasFnAttr(Attribute::Preallocated)) {
3549 Check(Call.getCalledFunction()->getIntrinsicID() ==
3550 Intrinsic::call_preallocated_arg,
3551 "preallocated as a call site attribute can only be on "
3552 "llvm.call.preallocated.arg");
3553 }
3554
3555 // Verify call attributes.
3556 verifyFunctionAttrs(FTy, Attrs, &Call, IsIntrinsic, Call.isInlineAsm());
3557
3558 // Conservatively check the inalloca argument.
3559 // We have a bug if we can find that there is an underlying alloca without
3560 // inalloca.
3561 if (Call.hasInAllocaArgument()) {
3562 Value *InAllocaArg = Call.getArgOperand(FTy->getNumParams() - 1);
3563 if (auto AI = dyn_cast<AllocaInst>(InAllocaArg->stripInBoundsOffsets()))
3564 Check(AI->isUsedWithInAlloca(),
3565 "inalloca argument for call has mismatched alloca", AI, Call);
3566 }
3567
3568 // For each argument of the callsite, if it has the swifterror argument,
3569 // make sure the underlying alloca/parameter it comes from has a swifterror as
3570 // well.
3571 for (unsigned i = 0, e = FTy->getNumParams(); i != e; ++i) {
3572 if (Call.paramHasAttr(i, Attribute::SwiftError)) {
3573 Value *SwiftErrorArg = Call.getArgOperand(i);
3574 if (auto AI = dyn_cast<AllocaInst>(SwiftErrorArg->stripInBoundsOffsets())) {
3575 Check(AI->isSwiftError(),
3576 "swifterror argument for call has mismatched alloca", AI, Call);
3577 continue;
3578 }
3579 auto ArgI = dyn_cast<Argument>(SwiftErrorArg);
3580 Check(ArgI, "swifterror argument should come from an alloca or parameter",
3581 SwiftErrorArg, Call);
3582 Check(ArgI->hasSwiftErrorAttr(),
3583 "swifterror argument for call has mismatched parameter", ArgI,
3584 Call);
3585 }
3586
3587 if (Attrs.hasParamAttr(i, Attribute::ImmArg)) {
3588 // Don't allow immarg on call sites, unless the underlying declaration
3589 // also has the matching immarg.
3590 Check(Callee && Callee->hasParamAttribute(i, Attribute::ImmArg),
3591 "immarg may not apply only to call sites", Call.getArgOperand(i),
3592 Call);
3593 }
3594
3595 if (Call.paramHasAttr(i, Attribute::ImmArg)) {
3596 Value *ArgVal = Call.getArgOperand(i);
3597 Check(isa<ConstantInt>(ArgVal) || isa<ConstantFP>(ArgVal),
3598 "immarg operand has non-immediate parameter", ArgVal, Call);
3599 }
3600
3601 if (Call.paramHasAttr(i, Attribute::Preallocated)) {
3602 Value *ArgVal = Call.getArgOperand(i);
3603 bool hasOB =
3604 Call.countOperandBundlesOfType(LLVMContext::OB_preallocated) != 0;
3605 bool isMustTail = Call.isMustTailCall();
3606 Check(hasOB != isMustTail,
3607 "preallocated operand either requires a preallocated bundle or "
3608 "the call to be musttail (but not both)",
3609 ArgVal, Call);
3610 }
3611 }
3612
3613 if (FTy->isVarArg()) {
3614 // FIXME? is 'nest' even legal here?
3615 bool SawNest = false;
3616 bool SawReturned = false;
3617
3618 for (unsigned Idx = 0; Idx < FTy->getNumParams(); ++Idx) {
3619 if (Attrs.hasParamAttr(Idx, Attribute::Nest))
3620 SawNest = true;
3621 if (Attrs.hasParamAttr(Idx, Attribute::Returned))
3622 SawReturned = true;
3623 }
3624
3625 // Check attributes on the varargs part.
3626 for (unsigned Idx = FTy->getNumParams(); Idx < Call.arg_size(); ++Idx) {
3627 Type *Ty = Call.getArgOperand(Idx)->getType();
3628 AttributeSet ArgAttrs = Attrs.getParamAttrs(Idx);
3629 verifyParameterAttrs(ArgAttrs, Ty, &Call);
3630
3631 if (ArgAttrs.hasAttribute(Attribute::Nest)) {
3632 Check(!SawNest, "More than one parameter has attribute nest!", Call);
3633 SawNest = true;
3634 }
3635
3636 if (ArgAttrs.hasAttribute(Attribute::Returned)) {
3637 Check(!SawReturned, "More than one parameter has attribute returned!",
3638 Call);
3639 Check(Ty->canLosslesslyBitCastTo(FTy->getReturnType()),
3640 "Incompatible argument and return types for 'returned' "
3641 "attribute",
3642 Call);
3643 SawReturned = true;
3644 }
3645
3646 // Statepoint intrinsic is vararg but the wrapped function may be not.
3647 // Allow sret here and check the wrapped function in verifyStatepoint.
3648 if (!Call.getCalledFunction() ||
3649 Call.getCalledFunction()->getIntrinsicID() !=
3650 Intrinsic::experimental_gc_statepoint)
3651 Check(!ArgAttrs.hasAttribute(Attribute::StructRet),
3652 "Attribute 'sret' cannot be used for vararg call arguments!",
3653 Call);
3654
3655 if (ArgAttrs.hasAttribute(Attribute::InAlloca))
3656 Check(Idx == Call.arg_size() - 1,
3657 "inalloca isn't on the last argument!", Call);
3658 }
3659 }
3660
3661 // Verify that there's no metadata unless it's a direct call to an intrinsic.
3662 if (!IsIntrinsic) {
3663 for (Type *ParamTy : FTy->params()) {
3664 Check(!ParamTy->isMetadataTy(),
3665 "Function has metadata parameter but isn't an intrinsic", Call);
3666 Check(!ParamTy->isTokenTy(),
3667 "Function has token parameter but isn't an intrinsic", Call);
3668 }
3669 }
3670
3671 // Verify that indirect calls don't return tokens.
3672 if (!Call.getCalledFunction()) {
3673 Check(!FTy->getReturnType()->isTokenTy(),
3674 "Return type cannot be token for indirect call!");
3675 Check(!FTy->getReturnType()->isX86_AMXTy(),
3676 "Return type cannot be x86_amx for indirect call!");
3677 }
3678
3679 if (Function *F = Call.getCalledFunction())
3680 if (Intrinsic::ID ID = (Intrinsic::ID)F->getIntrinsicID())
3681 visitIntrinsicCall(ID, Call);
3682
3683 // Verify that a callsite has at most one "deopt", at most one "funclet", at
3684 // most one "gc-transition", at most one "cfguardtarget", at most one
3685 // "preallocated" operand bundle, and at most one "ptrauth" operand bundle.
3686 bool FoundDeoptBundle = false, FoundFuncletBundle = false,
3687 FoundGCTransitionBundle = false, FoundCFGuardTargetBundle = false,
3688 FoundPreallocatedBundle = false, FoundGCLiveBundle = false,
3689 FoundPtrauthBundle = false, FoundKCFIBundle = false,
3690 FoundAttachedCallBundle = false;
3691 for (unsigned i = 0, e = Call.getNumOperandBundles(); i < e; ++i) {
3692 OperandBundleUse BU = Call.getOperandBundleAt(i);
3693 uint32_t Tag = BU.getTagID();
3694 if (Tag == LLVMContext::OB_deopt) {
3695 Check(!FoundDeoptBundle, "Multiple deopt operand bundles", Call);
3696 FoundDeoptBundle = true;
3697 } else if (Tag == LLVMContext::OB_gc_transition) {
3698 Check(!FoundGCTransitionBundle, "Multiple gc-transition operand bundles",
3699 Call);
3700 FoundGCTransitionBundle = true;
3701 } else if (Tag == LLVMContext::OB_funclet) {
3702 Check(!FoundFuncletBundle, "Multiple funclet operand bundles", Call);
3703 FoundFuncletBundle = true;
3704 Check(BU.Inputs.size() == 1,
3705 "Expected exactly one funclet bundle operand", Call);
3706 Check(isa<FuncletPadInst>(BU.Inputs.front()),
3707 "Funclet bundle operands should correspond to a FuncletPadInst",
3708 Call);
3709 } else if (Tag == LLVMContext::OB_cfguardtarget) {
3710 Check(!FoundCFGuardTargetBundle, "Multiple CFGuardTarget operand bundles",
3711 Call);
3712 FoundCFGuardTargetBundle = true;
3713 Check(BU.Inputs.size() == 1,
3714 "Expected exactly one cfguardtarget bundle operand", Call);
3715 } else if (Tag == LLVMContext::OB_ptrauth) {
3716 Check(!FoundPtrauthBundle, "Multiple ptrauth operand bundles", Call);
3717 FoundPtrauthBundle = true;
3718 Check(BU.Inputs.size() == 2,
3719 "Expected exactly two ptrauth bundle operands", Call);
3720 Check(isa<ConstantInt>(BU.Inputs[0]) &&
3721 BU.Inputs[0]->getType()->isIntegerTy(32),
3722 "Ptrauth bundle key operand must be an i32 constant", Call);
3723 Check(BU.Inputs[1]->getType()->isIntegerTy(64),
3724 "Ptrauth bundle discriminator operand must be an i64", Call);
3725 } else if (Tag == LLVMContext::OB_kcfi) {
3726 Check(!FoundKCFIBundle, "Multiple kcfi operand bundles", Call);
3727 FoundKCFIBundle = true;
3728 Check(BU.Inputs.size() == 1, "Expected exactly one kcfi bundle operand",
3729 Call);
3730 Check(isa<ConstantInt>(BU.Inputs[0]) &&
3731 BU.Inputs[0]->getType()->isIntegerTy(32),
3732 "Kcfi bundle operand must be an i32 constant", Call);
3733 } else if (Tag == LLVMContext::OB_preallocated) {
3734 Check(!FoundPreallocatedBundle, "Multiple preallocated operand bundles",
3735 Call);
3736 FoundPreallocatedBundle = true;
3737 Check(BU.Inputs.size() == 1,
3738 "Expected exactly one preallocated bundle operand", Call);
3739 auto Input = dyn_cast<IntrinsicInst>(BU.Inputs.front());
3740 Check(Input &&
3741 Input->getIntrinsicID() == Intrinsic::call_preallocated_setup,
3742 "\"preallocated\" argument must be a token from "
3743 "llvm.call.preallocated.setup",
3744 Call);
3745 } else if (Tag == LLVMContext::OB_gc_live) {
3746 Check(!FoundGCLiveBundle, "Multiple gc-live operand bundles", Call);
3747 FoundGCLiveBundle = true;
3749 Check(!FoundAttachedCallBundle,
3750 "Multiple \"clang.arc.attachedcall\" operand bundles", Call);
3751 FoundAttachedCallBundle = true;
3752 verifyAttachedCallBundle(Call, BU);
3753 }
3754 }
3755
3756 // Verify that callee and callsite agree on whether to use pointer auth.
3757 Check(!(Call.getCalledFunction() && FoundPtrauthBundle),
3758 "Direct call cannot have a ptrauth bundle", Call);
3759
3760 // Verify that each inlinable callsite of a debug-info-bearing function in a
3761 // debug-info-bearing function has a debug location attached to it. Failure to
3762 // do so causes assertion failures when the inliner sets up inline scope info
3763 // (Interposable functions are not inlinable, neither are functions without
3764 // definitions.)
3765 if (Call.getFunction()->getSubprogram() && Call.getCalledFunction() &&
3766 !Call.getCalledFunction()->isInterposable() &&
3767 !Call.getCalledFunction()->isDeclaration() &&
3768 Call.getCalledFunction()->getSubprogram())
3769 CheckDI(Call.getDebugLoc(),
3770 "inlinable function call in a function with "
3771 "debug info must have a !dbg location",
3772 Call);
3773
3774 if (Call.isInlineAsm())
3775 verifyInlineAsmCall(Call);
3776
3777 ConvergenceVerifyHelper.visit(Call);
3778
3779 visitInstruction(Call);
3780}
3781
3782void Verifier::verifyTailCCMustTailAttrs(const AttrBuilder &Attrs,
3783 StringRef Context) {
3784 Check(!Attrs.contains(Attribute::InAlloca),
3785 Twine("inalloca attribute not allowed in ") + Context);
3786 Check(!Attrs.contains(Attribute::InReg),
3787 Twine("inreg attribute not allowed in ") + Context);
3788 Check(!Attrs.contains(Attribute::SwiftError),
3789 Twine("swifterror attribute not allowed in ") + Context);
3790 Check(!Attrs.contains(Attribute::Preallocated),
3791 Twine("preallocated attribute not allowed in ") + Context);
3792 Check(!Attrs.contains(Attribute::ByRef),
3793 Twine("byref attribute not allowed in ") + Context);
3794}
3795
3796/// Two types are "congruent" if they are identical, or if they are both pointer
3797/// types with different pointee types and the same address space.
3798static bool isTypeCongruent(Type *L, Type *R) {
3799 if (L == R)
3800 return true;
3801 PointerType *PL = dyn_cast<PointerType>(L);
3802 PointerType *PR = dyn_cast<PointerType>(R);
3803 if (!PL || !PR)
3804 return false;
3805 return PL->getAddressSpace() == PR->getAddressSpace();
3806}
3807
3809 static const Attribute::AttrKind ABIAttrs[] = {
3810 Attribute::StructRet, Attribute::ByVal, Attribute::InAlloca,
3811 Attribute::InReg, Attribute::StackAlignment, Attribute::SwiftSelf,
3812 Attribute::SwiftAsync, Attribute::SwiftError, Attribute::Preallocated,
3813 Attribute::ByRef};
3814 AttrBuilder Copy(C);
3815 for (auto AK : ABIAttrs) {
3816 Attribute Attr = Attrs.getParamAttrs(I).getAttribute(AK);
3817 if (Attr.isValid())
3818 Copy.addAttribute(Attr);
3819 }
3820
3821 // `align` is ABI-affecting only in combination with `byval` or `byref`.
3822 if (Attrs.hasParamAttr(I, Attribute::Alignment) &&
3823 (Attrs.hasParamAttr(I, Attribute::ByVal) ||
3824 Attrs.hasParamAttr(I, Attribute::ByRef)))
3825 Copy.addAlignmentAttr(Attrs.getParamAlignment(I));
3826 return Copy;
3827}
3828
3829void Verifier::verifyMustTailCall(CallInst &CI) {
3830 Check(!CI.isInlineAsm(), "cannot use musttail call with inline asm", &CI);
3831
3832 Function *F = CI.getParent()->getParent();
3833 FunctionType *CallerTy = F->getFunctionType();
3834 FunctionType *CalleeTy = CI.getFunctionType();
3835 Check(CallerTy->isVarArg() == CalleeTy->isVarArg(),
3836 "cannot guarantee tail call due to mismatched varargs", &CI);
3837 Check(isTypeCongruent(CallerTy->getReturnType(), CalleeTy->getReturnType()),
3838 "cannot guarantee tail call due to mismatched return types", &CI);
3839
3840 // - The calling conventions of the caller and callee must match.
3841 Check(F->getCallingConv() == CI.getCallingConv(),
3842 "cannot guarantee tail call due to mismatched calling conv", &CI);
3843
3844 // - The call must immediately precede a :ref:`ret <i_ret>` instruction,
3845 // or a pointer bitcast followed by a ret instruction.
3846 // - The ret instruction must return the (possibly bitcasted) value
3847 // produced by the call or void.
3848 Value *RetVal = &CI;
3849 Instruction *Next = CI.getNextNode();
3850
3851 // Handle the optional bitcast.
3852 if (BitCastInst *BI = dyn_cast_or_null<BitCastInst>(Next)) {
3853 Check(BI->getOperand(0) == RetVal,
3854 "bitcast following musttail call must use the call", BI);
3855 RetVal = BI;
3856 Next = BI->getNextNode();
3857 }
3858
3859 // Check the return.
3860 ReturnInst *Ret = dyn_cast_or_null<ReturnInst>(Next);
3861 Check(Ret, "musttail call must precede a ret with an optional bitcast", &CI);
3862 Check(!Ret->getReturnValue() || Ret->getReturnValue() == RetVal ||
3863 isa<UndefValue>(Ret->getReturnValue()),
3864 "musttail call result must be returned", Ret);
3865
3866 AttributeList CallerAttrs = F->getAttributes();
3867 AttributeList CalleeAttrs = CI.getAttributes();
3870 StringRef CCName =
3871 CI.getCallingConv() == CallingConv::Tail ? "tailcc" : "swifttailcc";
3872
3873 // - Only sret, byval, swiftself, and swiftasync ABI-impacting attributes
3874 // are allowed in swifttailcc call
3875 for (unsigned I = 0, E = CallerTy->getNumParams(); I != E; ++I) {
3876 AttrBuilder ABIAttrs = getParameterABIAttributes(F->getContext(), I, CallerAttrs);
3877 SmallString<32> Context{CCName, StringRef(" musttail caller")};
3878 verifyTailCCMustTailAttrs(ABIAttrs, Context);
3879 }
3880 for (unsigned I = 0, E = CalleeTy->getNumParams(); I != E; ++I) {
3881 AttrBuilder ABIAttrs = getParameterABIAttributes(F->getContext(), I, CalleeAttrs);
3882 SmallString<32> Context{CCName, StringRef(" musttail callee")};
3883 verifyTailCCMustTailAttrs(ABIAttrs, Context);
3884 }
3885 // - Varargs functions are not allowed
3886 Check(!CallerTy->isVarArg(), Twine("cannot guarantee ") + CCName +
3887 " tail call for varargs function");
3888 return;
3889 }
3890
3891 // - The caller and callee prototypes must match. Pointer types of
3892 // parameters or return types may differ in pointee type, but not
3893 // address space.
3894 if (!CI.getCalledFunction() || !CI.getCalledFunction()->isIntrinsic()) {
3895 Check(CallerTy->getNumParams() == CalleeTy->getNumParams(),
3896 "cannot guarantee tail call due to mismatched parameter counts", &CI);
3897 for (unsigned I = 0, E = CallerTy->getNumParams(); I != E; ++I) {
3898 Check(
3899 isTypeCongruent(CallerTy->getParamType(I), CalleeTy->getParamType(I)),
3900 "cannot guarantee tail call due to mismatched parameter types", &CI);
3901 }
3902 }
3903
3904 // - All ABI-impacting function attributes, such as sret, byval, inreg,
3905 // returned, preallocated, and inalloca, must match.
3906 for (unsigned I = 0, E = CallerTy->getNumParams(); I != E; ++I) {
3907 AttrBuilder CallerABIAttrs = getParameterABIAttributes(F->getContext(), I, CallerAttrs);
3908 AttrBuilder CalleeABIAttrs = getParameterABIAttributes(F->getContext(), I, CalleeAttrs);
3909 Check(CallerABIAttrs == CalleeABIAttrs,
3910 "cannot guarantee tail call due to mismatched ABI impacting "
3911 "function attributes",
3912 &CI, CI.getOperand(I));
3913 }
3914}
3915
3916void Verifier::visitCallInst(CallInst &CI) {
3917 visitCallBase(CI);
3918
3919 if (CI.isMustTailCall())
3920 verifyMustTailCall(CI);
3921}
3922
3923void Verifier::visitInvokeInst(InvokeInst &II) {
3925
3926 // Verify that the first non-PHI instruction of the unwind destination is an
3927 // exception handling instruction.
3928 Check(
3929 II.getUnwindDest()->isEHPad(),
3930 "The unwind destination does not have an exception handling instruction!",
3931 &II);
3932
3934}
3935
3936/// visitUnaryOperator - Check the argument to the unary operator.
3937///
3938void Verifier::visitUnaryOperator(UnaryOperator &U) {
3939 Check(U.getType() == U.getOperand(0)->getType(),
3940 "Unary operators must have same type for"
3941 "operands and result!",
3942 &U);
3943
3944 switch (U.getOpcode()) {
3945 // Check that floating-point arithmetic operators are only used with
3946 // floating-point operands.
3947 case Instruction::FNeg:
3948 Check(U.getType()->isFPOrFPVectorTy(),
3949 "FNeg operator only works with float types!", &U);
3950 break;
3951 default:
3952 llvm_unreachable("Unknown UnaryOperator opcode!");
3953 }
3954
3956}
3957
3958/// visitBinaryOperator - Check that both arguments to the binary operator are
3959/// of the same type!
3960///
3961void Verifier::visitBinaryOperator(BinaryOperator &B) {
3962 Check(B.getOperand(0)->getType() == B.getOperand(1)->getType(),
3963 "Both operands to a binary operator are not of the same type!", &B);
3964
3965 switch (B.getOpcode()) {
3966 // Check that integer arithmetic operators are only used with
3967 // integral operands.
3968 case Instruction::Add:
3969 case Instruction::Sub:
3970 case Instruction::Mul:
3971 case Instruction::SDiv:
3972 case Instruction::UDiv:
3973 case Instruction::SRem:
3974 case Instruction::URem:
3975 Check(B.getType()->isIntOrIntVectorTy(),
3976 "Integer arithmetic operators only work with integral types!", &B);
3977 Check(B.getType() == B.getOperand(0)->getType(),
3978 "Integer arithmetic operators must have same type "
3979 "for operands and result!",
3980 &B);
3981 break;
3982 // Check that floating-point arithmetic operators are only used with
3983 // floating-point operands.
3984 case Instruction::FAdd:
3985 case Instruction::FSub:
3986 case Instruction::FMul:
3987 case Instruction::FDiv:
3988 case Instruction::FRem:
3989 Check(B.getType()->isFPOrFPVectorTy(),
3990 "Floating-point arithmetic operators only work with "
3991 "floating-point types!",
3992 &B);
3993 Check(B.getType() == B.getOperand(0)->getType(),
3994 "Floating-point arithmetic operators must have same type "
3995 "for operands and result!",
3996 &B);
3997 break;
3998 // Check that logical operators are only used with integral operands.
3999 case Instruction::And:
4000 case Instruction::Or:
4001 case Instruction::Xor:
4002 Check(B.getType()->isIntOrIntVectorTy(),
4003 "Logical operators only work with integral types!", &B);
4004 Check(B.getType() == B.getOperand(0)->getType(),
4005 "Logical operators must have same type for operands and result!", &B);
4006 break;
4007 case Instruction::Shl:
4008 case Instruction::LShr:
4009 case Instruction::AShr:
4010 Check(B.getType()->isIntOrIntVectorTy(),
4011 "Shifts only work with integral types!", &B);
4012 Check(B.getType() == B.getOperand(0)->getType(),
4013 "Shift return type must be same as operands!", &B);
4014 break;
4015 default:
4016 llvm_unreachable("Unknown BinaryOperator opcode!");
4017 }
4018
4020}
4021
4022void Verifier::visitICmpInst(ICmpInst &IC) {
4023 // Check that the operands are the same type
4024 Type *Op0Ty = IC.getOperand(0)->getType();
4025 Type *Op1Ty = IC.getOperand(1)->getType();
4026 Check(Op0Ty == Op1Ty,
4027 "Both operands to ICmp instruction are not of the same type!", &IC);
4028 // Check that the operands are the right type
4029 Check(Op0Ty->isIntOrIntVectorTy() || Op0Ty->isPtrOrPtrVectorTy(),
4030 "Invalid operand types for ICmp instruction", &IC);
4031 // Check that the predicate is valid.
4032 Check(IC.isIntPredicate(), "Invalid predicate in ICmp instruction!", &IC);
4033
4034 visitInstruction(IC);
4035}
4036
4037void Verifier::visitFCmpInst(FCmpInst &FC) {
4038 // Check that the operands are the same type
4039 Type *Op0Ty = FC.getOperand(0)->getType();
4040 Type *Op1Ty = FC.getOperand(1)->getType();
4041 Check(Op0Ty == Op1Ty,
4042 "Both operands to FCmp instruction are not of the same type!", &FC);
4043 // Check that the operands are the right type
4044 Check(Op0Ty->isFPOrFPVectorTy(), "Invalid operand types for FCmp instruction",
4045 &FC);
4046 // Check that the predicate is valid.
4047 Check(FC.isFPPredicate(), "Invalid predicate in FCmp instruction!", &FC);
4048
4049 visitInstruction(FC);
4050}
4051
4052void Verifier::visitExtractElementInst(ExtractElementInst &EI) {
4054 "Invalid extractelement operands!", &EI);
4055 visitInstruction(EI);
4056}
4057
4058void Verifier::visitInsertElementInst(InsertElementInst &IE) {
4059 Check(InsertElementInst::isValidOperands(IE.getOperand(0), IE.getOperand(1),
4060 IE.getOperand(2)),
4061 "Invalid insertelement operands!", &IE);
4062 visitInstruction(IE);
4063}
4064
4065void Verifier::visitShuffleVectorInst(ShuffleVectorInst &SV) {
4067 SV.getShuffleMask()),
4068 "Invalid shufflevector operands!", &SV);
4069 visitInstruction(SV);
4070}
4071
4072void Verifier::visitGetElementPtrInst(GetElementPtrInst &GEP) {
4073 Type *TargetTy = GEP.getPointerOperandType()->getScalarType();
4074
4075 Check(isa<PointerType>(TargetTy),
4076 "GEP base pointer is not a vector or a vector of pointers", &GEP);
4077 Check(GEP.getSourceElementType()->isSized(), "GEP into unsized type!", &GEP);
4078
4079 if (auto *STy = dyn_cast<StructType>(GEP.getSourceElementType())) {
4080 SmallPtrSet<Type *, 4> Visited;
4081 Check(!STy->containsScalableVectorType(&Visited),
4082 "getelementptr cannot target structure that contains scalable vector"
4083 "type",
4084 &GEP);
4085 }
4086
4087 SmallVector<Value *, 16> Idxs(GEP.indices());
4088 Check(
4089 all_of(Idxs, [](Value *V) { return V->getType()->isIntOrIntVectorTy(); }),
4090 "GEP indexes must be integers", &GEP);
4091 Type *ElTy =
4092 GetElementPtrInst::getIndexedType(GEP.getSourceElementType(), Idxs);
4093 Check(ElTy, "Invalid indices for GEP pointer type!", &GEP);
4094
4095 Check(GEP.getType()->isPtrOrPtrVectorTy() &&
4096 GEP.getResultElementType() == ElTy,
4097 "GEP is not of right type for indices!", &GEP, ElTy);
4098
4099 if (auto *GEPVTy = dyn_cast<VectorType>(GEP.getType())) {
4100 // Additional checks for vector GEPs.
4101 ElementCount GEPWidth = GEPVTy->getElementCount();
4102 if (GEP.getPointerOperandType()->isVectorTy())
4103 Check(
4104 GEPWidth ==
4105 cast<VectorType>(GEP.getPointerOperandType())->getElementCount(),
4106 "Vector GEP result width doesn't match operand's", &GEP);
4107 for (Value *Idx : Idxs) {
4108 Type *IndexTy = Idx->getType();
4109 if (auto *IndexVTy = dyn_cast<VectorType>(IndexTy)) {
4110 ElementCount IndexWidth = IndexVTy->getElementCount();
4111 Check(IndexWidth == GEPWidth, "Invalid GEP index vector width", &GEP);
4112 }
4113 Check(IndexTy->isIntOrIntVectorTy(),
4114 "All GEP indices should be of integer type");
4115 }
4116 }
4117
4118 if (auto *PTy = dyn_cast<PointerType>(GEP.getType())) {
4119 Check(GEP.getAddressSpace() == PTy->getAddressSpace(),
4120 "GEP address space doesn't match type", &GEP);
4121 }
4122
4124}
4125
4126static bool isContiguous(const ConstantRange &A, const ConstantRange &B) {
4127 return A.getUpper() == B.getLower() || A.getLower() == B.getUpper();
4128}
4129
4130/// Verify !range and !absolute_symbol metadata. These have the same
4131/// restrictions, except !absolute_symbol allows the full set.
4132void Verifier::verifyRangeMetadata(const Value &I, const MDNode *Range,
4133 Type *Ty, bool IsAbsoluteSymbol) {
4134 unsigned NumOperands = Range->getNumOperands();
4135 Check(NumOperands % 2 == 0, "Unfinished range!", Range);
4136 unsigned NumRanges = NumOperands / 2;
4137 Check(NumRanges >= 1, "It should have at least one range!", Range);
4138
4139 ConstantRange LastRange(1, true); // Dummy initial value
4140 for (unsigned i = 0; i < NumRanges; ++i) {
4141 ConstantInt *Low =
4142 mdconst::dyn_extract<ConstantInt>(Range->getOperand(2 * i));
4143 Check(Low, "The lower limit must be an integer!", Low);
4144 ConstantInt *High =
4145 mdconst::dyn_extract<ConstantInt>(Range->getOperand(2 * i + 1));
4146 Check(High, "The upper limit must be an integer!", High);
4147
4148 Check(High->getType() == Low->getType(), "Range pair types must match!",
4149 &I);
4150 Check(High->getType() == Ty->getScalarType(),
4151 "Range types must match instruction type!", &I);
4152
4153 APInt HighV = High->getValue();
4154 APInt LowV = Low->getValue();
4155
4156 // ConstantRange asserts if the ranges are the same except for the min/max
4157 // value. Leave the cases it tolerates for the empty range error below.
4158 Check(LowV != HighV || LowV.isMaxValue() || LowV.isMinValue(),
4159 "The upper and lower limits cannot be the same value", &I);
4160
4161 ConstantRange CurRange(LowV, HighV);
4162 Check(!CurRange.isEmptySet() && (IsAbsoluteSymbol || !CurRange.isFullSet()),
4163 "Range must not be empty!", Range);
4164 if (i != 0) {
4165 Check(CurRange.intersectWith(LastRange).isEmptySet(),
4166 "Intervals are overlapping", Range);
4167 Check(LowV.sgt(LastRange.getLower()), "Intervals are not in order",
4168 Range);
4169 Check(!isContiguous(CurRange, LastRange), "Intervals are contiguous",
4170 Range);
4171 }
4172 LastRange = ConstantRange(LowV, HighV);
4173 }
4174 if (NumRanges > 2) {
4175 APInt FirstLow =
4176 mdconst::dyn_extract<ConstantInt>(Range->getOperand(0))->getValue();
4177 APInt FirstHigh =
4178 mdconst::dyn_extract<ConstantInt>(Range->getOperand(1))->getValue();
4179 ConstantRange FirstRange(FirstLow, FirstHigh);
4180 Check(FirstRange.intersectWith(LastRange).isEmptySet(),
4181 "Intervals are overlapping", Range);
4182 Check(!isContiguous(FirstRange, LastRange), "Intervals are contiguous",
4183 Range);
4184 }
4185}
4186
4187void Verifier::visitRangeMetadata(Instruction &I, MDNode *Range, Type *Ty) {
4188 assert(Range && Range == I.getMetadata(LLVMContext::MD_range) &&
4189 "precondition violation");
4190 verifyRangeMetadata(I, Range, Ty, false);
4191}
4192
4193void Verifier::checkAtomicMemAccessSize(Type *Ty, const Instruction *I) {
4194 unsigned Size = DL.getTypeSizeInBits(Ty);
4195 Check(Size >= 8, "atomic memory access' size must be byte-sized", Ty, I);
4196 Check(!(Size & (Size - 1)),
4197 "atomic memory access' operand must have a power-of-two size", Ty, I);
4198}
4199
4200void Verifier::visitLoadInst(LoadInst &LI) {
4201 PointerType *PTy = dyn_cast<PointerType>(LI.getOperand(0)->getType());
4202 Check(PTy, "Load operand must be a pointer.", &LI);
4203 Type *ElTy = LI.getType();
4204 if (MaybeAlign A = LI.getAlign()) {
4205 Check(A->value() <= Value::MaximumAlignment,
4206 "huge alignment values are unsupported", &LI);
4207 }
4208 Check(ElTy->isSized(), "loading unsized types is not allowed", &LI);
4209 if (LI.isAtomic()) {
4212 "Load cannot have Release ordering", &LI);
4213 Check(ElTy->isIntOrPtrTy() || ElTy->isFloatingPointTy(),
4214 "atomic load operand must have integer, pointer, or floating point "
4215 "type!",
4216 ElTy, &LI);
4217 checkAtomicMemAccessSize(ElTy, &LI);
4218 } else {
4220 "Non-atomic load cannot have SynchronizationScope specified", &LI);
4221 }
4222
4223 visitInstruction(LI);
4224}
4225
4226void Verifier::visitStoreInst(StoreInst &SI) {
4227 PointerType *PTy = dyn_cast<PointerType>(SI.getOperand(1)->getType());
4228 Check(PTy, "Store operand must be a pointer.", &SI);
4229 Type *ElTy = SI.getOperand(0)->getType();
4230 if (MaybeAlign A = SI.getAlign()) {
4231 Check(A->value() <= Value::MaximumAlignment,
4232 "huge alignment values are unsupported", &SI);
4233 }
4234 Check(ElTy->isSized(), "storing unsized types is not allowed", &SI);
4235 if (SI.isAtomic()) {
4236 Check(SI.getOrdering() != AtomicOrdering::Acquire &&
4237 SI.getOrdering() != AtomicOrdering::AcquireRelease,
4238 "Store cannot have Acquire ordering", &SI);
4239 Check(ElTy->isIntOrPtrTy() || ElTy->isFloatingPointTy(),
4240 "atomic store operand must have integer, pointer, or floating point "
4241 "type!",
4242 ElTy, &SI);
4243 checkAtomicMemAccessSize(ElTy, &SI);
4244 } else {
4245 Check(SI.getSyncScopeID() == SyncScope::System,
4246 "Non-atomic store cannot have SynchronizationScope specified", &SI);
4247 }
4248 visitInstruction(SI);
4249}
4250
4251/// Check that SwiftErrorVal is used as a swifterror argument in CS.
4252void Verifier::verifySwiftErrorCall(CallBase &Call,
4253 const Value *SwiftErrorVal) {
4254 for (const auto &I : llvm::enumerate(Call.args())) {
4255 if (I.value() == SwiftErrorVal) {
4256 Check(Call.paramHasAttr(I.index(), Attribute::SwiftError),
4257 "swifterror value when used in a callsite should be marked "
4258 "with swifterror attribute",
4259 SwiftErrorVal, Call);
4260 }
4261 }
4262}
4263
4264void Verifier::verifySwiftErrorValue(const Value *SwiftErrorVal) {
4265 // Check that swifterror value is only used by loads, stores, or as
4266 // a swifterror argument.
4267 for (const User *U : SwiftErrorVal->users()) {
4268 Check(isa<LoadInst>(U) || isa<StoreInst>(U) || isa<CallInst>(U) ||
4269 isa<InvokeInst>(U),
4270 "swifterror value can only be loaded and stored from, or "
4271 "as a swifterror argument!",
4272 SwiftErrorVal, U);
4273 // If it is used by a store, check it is the second operand.
4274 if (auto StoreI = dyn_cast<StoreInst>(U))
4275 Check(StoreI->getOperand(1) == SwiftErrorVal,
4276 "swifterror value should be the second operand when used "
4277 "by stores",
4278 SwiftErrorVal, U);
4279 if (auto *Call = dyn_cast<CallBase>(U))
4280 verifySwiftErrorCall(*const_cast<CallBase *>(Call), SwiftErrorVal);
4281 }
4282}
4283
4284void Verifier::visitAllocaInst(AllocaInst &AI) {
4285 SmallPtrSet<Type*, 4> Visited;
4286 Check(AI.getAllocatedType()->isSized(&Visited),
4287 "Cannot allocate unsized type", &AI);
4289 "Alloca array size must have integer type", &AI);
4290 if (MaybeAlign A = AI.getAlign()) {
4291 Check(A->value() <= Value::MaximumAlignment,
4292 "huge alignment values are unsupported", &AI);
4293 }
4294
4295 if (AI.isSwiftError()) {
4297 "swifterror alloca must have pointer type", &AI);
4299 "swifterror alloca must not be array allocation", &AI);
4300 verifySwiftErrorValue(&AI);
4301 }
4302
4303 visitInstruction(AI);
4304}
4305
4306void Verifier::visitAtomicCmpXchgInst(AtomicCmpXchgInst &CXI) {
4307 Type *ElTy = CXI.getOperand(1)->getType();
4308 Check(ElTy->isIntOrPtrTy(),
4309 "cmpxchg operand must have integer or pointer type", ElTy, &CXI);
4310 checkAtomicMemAccessSize(ElTy, &CXI);
4311 visitInstruction(CXI);
4312}
4313
4314void Verifier::visitAtomicRMWInst(AtomicRMWInst &RMWI) {
4316 "atomicrmw instructions cannot be unordered.", &RMWI);
4317 auto Op = RMWI.getOperation();
4318 Type *ElTy = RMWI.getOperand(1)->getType();
4319 if (Op == AtomicRMWInst::Xchg) {
4320 Check(ElTy->isIntegerTy() || ElTy->isFloatingPointTy() ||
4321 ElTy->isPointerTy(),
4322 "atomicrmw " + AtomicRMWInst::getOperationName(Op) +
4323 " operand must have integer or floating point type!",
4324 &RMWI, ElTy);
4325 } else if (AtomicRMWInst::isFPOperation(Op)) {
4326 Check(ElTy->isFPOrFPVectorTy() && !isa<ScalableVectorType>(ElTy),
4327 "atomicrmw " + AtomicRMWInst::getOperationName(Op) +
4328 " operand must have floating-point or fixed vector of floating-point "
4329 "type!",
4330 &RMWI, ElTy);
4331 } else {
4332 Check(ElTy->isIntegerTy(),
4333 "atomicrmw " + AtomicRMWInst::getOperationName(Op) +
4334 " operand must have integer type!",
4335 &RMWI, ElTy);
4336 }
4337 checkAtomicMemAccessSize(ElTy, &RMWI);
4339 "Invalid binary operation!", &RMWI);
4340 visitInstruction(RMWI);
4341}
4342
4343void Verifier::visitFenceInst(FenceInst &FI) {
4344 const AtomicOrdering Ordering = FI.getOrdering();
4345 Check(Ordering == AtomicOrdering::Acquire ||
4346 Ordering == AtomicOrdering::Release ||
4347 Ordering == AtomicOrdering::AcquireRelease ||
4349 "fence instructions may only have acquire, release, acq_rel, or "
4350 "seq_cst ordering.",
4351 &FI);
4352 visitInstruction(FI);
4353}
4354
4355void Verifier::visitExtractValueInst(ExtractValueInst &EVI) {
4357 EVI.getIndices()) == EVI.getType(),
4358 "Invalid ExtractValueInst operands!", &EVI);
4359
4360 visitInstruction(EVI);
4361}
4362
4363void Verifier::visitInsertValueInst(InsertValueInst &IVI) {
4365 IVI.getIndices()) ==
4366 IVI.getOperand(1)->getType(),
4367 "Invalid InsertValueInst operands!", &IVI);
4368
4369 visitInstruction(IVI);
4370}
4371
4372static Value *getParentPad(Value *EHPad) {
4373 if (auto *FPI = dyn_cast<FuncletPadInst>(EHPad))
4374 return FPI->getParentPad();
4375
4376 return cast<CatchSwitchInst>(EHPad)->getParentPad();
4377}
4378
4379void Verifier::visitEHPadPredecessors(Instruction &I) {
4380 assert(I.isEHPad());
4381
4382 BasicBlock *BB = I.getParent();
4383 Function *F = BB->getParent();
4384
4385 Check(BB != &F->getEntryBlock(), "EH pad cannot be in entry block.", &I);
4386
4387 if (auto *LPI = dyn_cast<LandingPadInst>(&I)) {
4388 // The landingpad instruction defines its parent as a landing pad block. The
4389 // landing pad block may be branched to only by the unwind edge of an
4390 // invoke.
4391 for (BasicBlock *PredBB : predecessors(BB)) {
4392 const auto *II = dyn_cast<InvokeInst>(PredBB->getTerminator());
4393 Check(II && II->getUnwindDest() == BB && II->getNormalDest() != BB,
4394 "Block containing LandingPadInst must be jumped to "
4395 "only by the unwind edge of an invoke.",
4396 LPI);
4397 }
4398 return;
4399 }
4400 if (auto *CPI = dyn_cast<CatchPadInst>(&I)) {
4401 if (!pred_empty(BB))
4402 Check(BB->getUniquePredecessor() == CPI->getCatchSwitch()->getParent(),
4403 "Block containg CatchPadInst must be jumped to "
4404 "only by its catchswitch.",
4405 CPI);
4406 Check(BB != CPI->getCatchSwitch()->getUnwindDest(),
4407 "Catchswitch cannot unwind to one of its catchpads",
4408 CPI->getCatchSwitch(), CPI);
4409 return;
4410 }
4411
4412 // Verify that each pred has a legal terminator with a legal to/from EH
4413 // pad relationship.
4414 Instruction *ToPad = &I;
4415 Value *ToPadParent = getParentPad(ToPad);
4416 for (BasicBlock *PredBB : predecessors(BB)) {
4417 Instruction *TI = PredBB->getTerminator();
4418 Value *FromPad;
4419 if (auto *II = dyn_cast<InvokeInst>(TI)) {
4420 Check(II->getUnwindDest() == BB && II->getNormalDest() != BB,
4421 "EH pad must be jumped to via an unwind edge", ToPad, II);
4422 auto *CalledFn =
4423 dyn_cast<Function>(II->getCalledOperand()->stripPointerCasts());
4424 if (CalledFn && CalledFn->isIntrinsic() && II->doesNotThrow() &&
4425 !IntrinsicInst::mayLowerToFunctionCall(CalledFn->getIntrinsicID()))
4426 continue;
4427 if (auto Bundle = II->getOperandBundle(LLVMContext::OB_funclet))
4428 FromPad = Bundle->Inputs[0];
4429 else
4430 FromPad = ConstantTokenNone::get(II->getContext());
4431 } else if (auto *CRI = dyn_cast<CleanupReturnInst>(TI)) {
4432 FromPad = CRI->getOperand(0);
4433 Check(FromPad != ToPadParent, "A cleanupret must exit its cleanup", CRI);
4434 } else if (auto *CSI = dyn_cast<CatchSwitchInst>(TI)) {
4435 FromPad = CSI;
4436 } else {
4437 Check(false, "EH pad must be jumped to via an unwind edge", ToPad, TI);
4438 }
4439
4440 // The edge may exit from zero or more nested pads.
4442 for (;; FromPad = getParentPad(FromPad)) {
4443 Check(FromPad != ToPad,
4444 "EH pad cannot handle exceptions raised within it", FromPad, TI);
4445 if (FromPad == ToPadParent) {
4446 // This is a legal unwind edge.
4447 break;
4448 }
4449 Check(!isa<ConstantTokenNone>(FromPad),
4450 "A single unwind edge may only enter one EH pad", TI);
4451 Check(Seen.insert(FromPad).second, "EH pad jumps through a cycle of pads",
4452 FromPad);
4453
4454 // This will be diagnosed on the corresponding instruction already. We
4455 // need the extra check here to make sure getParentPad() works.
4456 Check(isa<FuncletPadInst>(FromPad) || isa<CatchSwitchInst>(FromPad),
4457 "Parent pad must be catchpad/cleanuppad/catchswitch", TI);
4458 }
4459 }
4460}
4461
4462void Verifier::visitLandingPadInst(LandingPadInst &LPI) {
4463 // The landingpad instruction is ill-formed if it doesn't have any clauses and
4464 // isn't a cleanup.
4465 Check(LPI.getNumClauses() > 0 || LPI.isCleanup(),
4466 "LandingPadInst needs at least one clause or to be a cleanup.", &LPI);
4467
4468 visitEHPadPredecessors(LPI);
4469
4470 if (!LandingPadResultTy)
4471 LandingPadResultTy = LPI.getType();
4472 else
4473 Check(LandingPadResultTy == LPI.getType(),
4474 "The landingpad instruction should have a consistent result type "
4475 "inside a function.",
4476 &LPI);
4477
4478 Function *F = LPI.getParent()->getParent();
4479 Check(F->hasPersonalityFn(),
4480 "LandingPadInst needs to be in a function with a personality.", &LPI);
4481
4482 // The landingpad instruction must be the first non-PHI instruction in the
4483 // block.
4484 Check(LPI.getParent()->getLandingPadInst() == &LPI,
4485 "LandingPadInst not the first non-PHI instruction in the block.", &LPI);
4486
4487 for (unsigned i = 0, e = LPI.getNumClauses(); i < e; ++i) {
4488 Constant *Clause = LPI.getClause(i);
4489 if (LPI.isCatch(i)) {
4490 Check(isa<PointerType>(Clause->getType()),
4491 "Catch operand does not have pointer type!", &LPI);
4492 } else {
4493 Check(LPI.isFilter(i), "Clause is neither catch nor filter!", &LPI);
4494 Check(isa<ConstantArray>(Clause) || isa<ConstantAggregateZero>(Clause),
4495 "Filter operand is not an array of constants!", &LPI);
4496 }
4497 }
4498
4499 visitInstruction(LPI);
4500}
4501
4502void Verifier::visitResumeInst(ResumeInst &RI) {
4504 "ResumeInst needs to be in a function with a personality.", &RI);
4505
4506 if (!LandingPadResultTy)
4507 LandingPadResultTy = RI.getValue()->getType();
4508 else
4509 Check(LandingPadResultTy == RI.getValue()->getType(),
4510 "The resume instruction should have a consistent result type "
4511 "inside a function.",
4512 &RI);
4513
4514 visitTerminator(RI);
4515}
4516
4517void Verifier::visitCatchPadInst(CatchPadInst &CPI) {
4518 BasicBlock *BB = CPI.getParent();
4519
4520 Function *F = BB->getParent();
4521 Check(F->hasPersonalityFn(),
4522 "CatchPadInst needs to be in a function with a personality.", &CPI);
4523
4524 Check(isa<CatchSwitchInst>(CPI.getParentPad()),
4525 "CatchPadInst needs to be directly nested in a CatchSwitchInst.",
4526 CPI.getParentPad());
4527
4528 // The catchpad instruction must be the first non-PHI instruction in the
4529 // block.
4530 Check(BB->getFirstNonPHI() == &CPI,
4531 "CatchPadInst not the first non-PHI instruction in the block.", &CPI);
4532
4533 visitEHPadPredecessors(CPI);
4535}
4536
4537void Verifier::visitCatchReturnInst(CatchReturnInst &CatchReturn) {
4538 Check(isa<CatchPadInst>(CatchReturn.getOperand(0)),
4539 "CatchReturnInst needs to be provided a CatchPad", &CatchReturn,
4540 CatchReturn.getOperand(0));
4541
4542 visitTerminator(CatchReturn);
4543}
4544
4545void Verifier::visitCleanupPadInst(CleanupPadInst &CPI) {
4546 BasicBlock *BB = CPI.getParent();
4547
4548 Function *F = BB->getParent();
4549 Check(F->hasPersonalityFn(),
4550 "CleanupPadInst needs to be in a function with a personality.", &CPI);
4551
4552 // The cleanuppad instruction must be the first non-PHI instruction in the
4553 // block.
4554 Check(BB->getFirstNonPHI() == &CPI,
4555 "CleanupPadInst not the first non-PHI instruction in the block.", &CPI);
4556
4557 auto *ParentPad = CPI.getParentPad();
4558 Check(isa<ConstantTokenNone>(ParentPad) || isa<FuncletPadInst>(ParentPad),
4559 "CleanupPadInst has an invalid parent.", &CPI);
4560
4561 visitEHPadPredecessors(CPI);
4563}
4564
4565void Verifier::visitFuncletPadInst(FuncletPadInst &FPI) {
4566 User *FirstUser = nullptr;
4567 Value *FirstUnwindPad = nullptr;
4568 SmallVector<FuncletPadInst *, 8> Worklist({&FPI});
4570
4571 while (!Worklist.empty()) {
4572 FuncletPadInst *CurrentPad = Worklist.pop_back_val();
4573 Check(Seen.insert(CurrentPad).second,
4574 "FuncletPadInst must not be nested within itself", CurrentPad);
4575 Value *UnresolvedAncestorPad = nullptr;
4576 for (User *U : CurrentPad->users()) {
4577 BasicBlock *UnwindDest;
4578 if (auto *CRI = dyn_cast<CleanupReturnInst>(U)) {
4579 UnwindDest = CRI->getUnwindDest();
4580 } else if (auto *CSI = dyn_cast<CatchSwitchInst>(U)) {
4581 // We allow catchswitch unwind to caller to nest
4582 // within an outer pad that unwinds somewhere else,
4583 // because catchswitch doesn't have a nounwind variant.
4584 // See e.g. SimplifyCFGOpt::SimplifyUnreachable.
4585 if (CSI->unwindsToCaller())
4586 continue;
4587 UnwindDest = CSI->getUnwindDest();
4588 } else if (auto *II = dyn_cast<InvokeInst>(U)) {
4589 UnwindDest = II->getUnwindDest();
4590 } else if (isa<CallInst>(U)) {
4591 // Calls which don't unwind may be found inside funclet
4592 // pads that unwind somewhere else. We don't *require*
4593 // such calls to be annotated nounwind.
4594 continue;
4595 } else if (auto *CPI = dyn_cast<CleanupPadInst>(U)) {
4596 // The unwind dest for a cleanup can only be found by
4597 // recursive search. Add it to the worklist, and we'll
4598 // search for its first use that determines where it unwinds.
4599 Worklist.push_back(CPI);
4600 continue;
4601 } else {
4602 Check(isa<CatchReturnInst>(U), "Bogus funclet pad use", U);
4603 continue;
4604 }
4605
4606 Value *UnwindPad;
4607 bool ExitsFPI;
4608 if (UnwindDest) {
4609 UnwindPad = UnwindDest->getFirstNonPHI();
4610 if (!cast<Instruction>(UnwindPad)->isEHPad())
4611 continue;
4612 Value *UnwindParent = getParentPad(UnwindPad);
4613 // Ignore unwind edges that don't exit CurrentPad.
4614 if (UnwindParent == CurrentPad)
4615 continue;
4616 // Determine whether the original funclet pad is exited,
4617 // and if we are scanning nested pads determine how many
4618 // of them are exited so we can stop searching their
4619 // children.
4620 Value *ExitedPad = CurrentPad;
4621 ExitsFPI = false;
4622 do {
4623 if (ExitedPad == &FPI) {
4624 ExitsFPI = true;
4625 // Now we can resolve any ancestors of CurrentPad up to
4626 // FPI, but not including FPI since we need to make sure
4627 // to check all direct users of FPI for consistency.
4628 UnresolvedAncestorPad = &FPI;
4629 break;
4630 }
4631 Value *ExitedParent = getParentPad(ExitedPad);
4632 if (ExitedParent == UnwindParent) {
4633 // ExitedPad is the ancestor-most pad which this unwind
4634 // edge exits, so we can resolve up to it, meaning that
4635 // ExitedParent is the first ancestor still unresolved.
4636 UnresolvedAncestorPad = ExitedParent;
4637 break;
4638 }
4639 ExitedPad = ExitedParent;
4640 } while (!isa<ConstantTokenNone>(ExitedPad));
4641 } else {
4642 // Unwinding to caller exits all pads.
4643 UnwindPad = ConstantTokenNone::get(FPI.getContext());
4644 ExitsFPI = true;
4645 UnresolvedAncestorPad = &FPI;
4646 }
4647
4648 if (ExitsFPI) {
4649 // This unwind edge exits FPI. Make sure it agrees with other
4650 // such edges.
4651 if (FirstUser) {
4652 Check(UnwindPad == FirstUnwindPad,
4653 "Unwind edges out of a funclet "
4654 "pad must have the same unwind "
4655 "dest",
4656 &FPI, U, FirstUser);
4657 } else {
4658 FirstUser = U;
4659 FirstUnwindPad = UnwindPad;
4660 // Record cleanup sibling unwinds for verifySiblingFuncletUnwinds
4661 if (isa<CleanupPadInst>(&FPI) && !isa<ConstantTokenNone>(UnwindPad) &&
4662 getParentPad(UnwindPad) == getParentPad(&FPI))
4663 SiblingFuncletInfo[&FPI] = cast<Instruction>(U);
4664 }
4665 }
4666 // Make sure we visit all uses of FPI, but for nested pads stop as
4667 // soon as we know where they unwind to.
4668 if (CurrentPad != &FPI)
4669 break;
4670 }
4671 if (UnresolvedAncestorPad) {
4672 if (CurrentPad == UnresolvedAncestorPad) {
4673 // When CurrentPad is FPI itself, we don't mark it as resolved even if
4674 // we've found an unwind edge that exits it, because we need to verify
4675 // all direct uses of FPI.
4676 assert(CurrentPad == &FPI);
4677 continue;
4678 }
4679 // Pop off the worklist any nested pads that we've found an unwind
4680 // destination for. The pads on the worklist are the uncles,
4681 // great-uncles, etc. of CurrentPad. We've found an unwind destination
4682 // for all ancestors of CurrentPad up to but not including
4683 // UnresolvedAncestorPad.
4684 Value *ResolvedPad = CurrentPad;
4685 while (!Worklist.empty()) {
4686 Value *UnclePad = Worklist.back();
4687 Value *AncestorPad = getParentPad(UnclePad);
4688 // Walk ResolvedPad up the ancestor list until we either find the
4689 // uncle's parent or the last resolved ancestor.
4690 while (ResolvedPad != AncestorPad) {
4691 Value *ResolvedParent = getParentPad(ResolvedPad);
4692 if (ResolvedParent == UnresolvedAncestorPad) {
4693 break;
4694 }
4695 ResolvedPad = ResolvedParent;
4696 }
4697 // If the resolved ancestor search didn't find the uncle's parent,
4698 // then the uncle is not yet resolved.
4699 if (ResolvedPad != AncestorPad)
4700 break;
4701 // This uncle is resolved, so pop it from the worklist.
4702 Worklist.pop_back();
4703 }
4704 }
4705 }
4706
4707 if (FirstUnwindPad) {
4708 if (auto *CatchSwitch = dyn_cast<CatchSwitchInst>(FPI.getParentPad())) {
4709 BasicBlock *SwitchUnwindDest = CatchSwitch->getUnwindDest();
4710 Value *SwitchUnwindPad;
4711 if (SwitchUnwindDest)
4712 SwitchUnwindPad = SwitchUnwindDest->getFirstNonPHI();
4713 else
4714 SwitchUnwindPad = ConstantTokenNone::get(FPI.getContext());
4715 Check(SwitchUnwindPad == FirstUnwindPad,
4716 "Unwind edges out of a catch must have the same unwind dest as "
4717 "the parent catchswitch",
4718 &FPI, FirstUser, CatchSwitch);
4719 }
4720 }
4721
4722 visitInstruction(FPI);
4723}
4724
4725void Verifier::visitCatchSwitchInst(CatchSwitchInst &CatchSwitch) {
4726 BasicBlock *BB = CatchSwitch.getParent();
4727
4728 Function *F = BB->getParent();
4729 Check(F->hasPersonalityFn(),
4730 "CatchSwitchInst needs to be in a function with a personality.",
4731 &CatchSwitch);
4732
4733 // The catchswitch instruction must be the first non-PHI instruction in the
4734 // block.
4735 Check(BB->getFirstNonPHI() == &CatchSwitch,
4736 "CatchSwitchInst not the first non-PHI instruction in the block.",
4737 &CatchSwitch);
4738
4739 auto *ParentPad = CatchSwitch.getParentPad();
4740 Check(isa<ConstantTokenNone>(ParentPad) || isa<FuncletPadInst>(ParentPad),
4741 "CatchSwitchInst has an invalid parent.", ParentPad);
4742
4743 if (BasicBlock *UnwindDest = CatchSwitch.getUnwindDest()) {
4744 Instruction *I = UnwindDest->getFirstNonPHI();
4745 Check(I->isEHPad() && !isa<LandingPadInst>(I),
4746 "CatchSwitchInst must unwind to an EH block which is not a "
4747 "landingpad.",
4748 &CatchSwitch);
4749
4750 // Record catchswitch sibling unwinds for verifySiblingFuncletUnwinds
4751 if (getParentPad(I) == ParentPad)
4752 SiblingFuncletInfo[&CatchSwitch] = &CatchSwitch;
4753 }
4754
4755 Check(CatchSwitch.getNumHandlers() != 0,
4756 "CatchSwitchInst cannot have empty handler list", &CatchSwitch);
4757
4758 for (BasicBlock *Handler : CatchSwitch.handlers()) {
4759 Check(isa<CatchPadInst>(Handler->getFirstNonPHI()),
4760 "CatchSwitchInst handlers must be catchpads", &CatchSwitch, Handler);
4761 }
4762
4763 visitEHPadPredecessors(CatchSwitch);
4764 visitTerminator(CatchSwitch);
4765}
4766
4767void Verifier::visitCleanupReturnInst(CleanupReturnInst &CRI) {
4768 Check(isa<CleanupPadInst>(CRI.getOperand(0)),
4769 "CleanupReturnInst needs to be provided a CleanupPad", &CRI,
4770 CRI.getOperand(0));
4771
4772 if (BasicBlock *UnwindDest = CRI.getUnwindDest()) {
4773 Instruction *I = UnwindDest->getFirstNonPHI();
4774 Check(I->isEHPad() && !isa<LandingPadInst>(I),
4775 "CleanupReturnInst must unwind to an EH block which is not a "
4776 "landingpad.",
4777 &CRI);
4778 }
4779
4780 visitTerminator(CRI);
4781}
4782
4783void Verifier::verifyDominatesUse(Instruction &I, unsigned i) {
4784 Instruction *Op = cast<Instruction>(I.getOperand(i));
4785 // If the we have an invalid invoke, don't try to compute the dominance.
4786 // We already reject it in the invoke specific checks and the dominance
4787 // computation doesn't handle multiple edges.
4788 if (InvokeInst *II = dyn_cast<InvokeInst>(Op)) {
4789 if (II->getNormalDest() == II->getUnwindDest())
4790 return;
4791 }
4792
4793 // Quick check whether the def has already been encountered in the same block.
4794 // PHI nodes are not checked to prevent accepting preceding PHIs, because PHI
4795 // uses are defined to happen on the incoming edge, not at the instruction.
4796 //
4797 // FIXME: If this operand is a MetadataAsValue (wrapping a LocalAsMetadata)
4798 // wrapping an SSA value, assert that we've already encountered it. See
4799 // related FIXME in Mapper::mapLocalAsMetadata in ValueMapper.cpp.
4800 if (!isa<PHINode>(I) && InstsInThisBlock.count(Op))
4801 return;
4802
4803 const Use &U = I.getOperandUse(i);
4804 Check(DT.dominates(Op, U), "Instruction does not dominate all uses!", Op, &I);
4805}
4806
4807void Verifier::visitDereferenceableMetadata(Instruction& I, MDNode* MD) {
4808 Check(I.getType()->isPointerTy(),
4809 "dereferenceable, dereferenceable_or_null "
4810 "apply only to pointer types",
4811 &I);
4812 Check((isa<LoadInst>(I) || isa<IntToPtrInst>(I)),
4813 "dereferenceable, dereferenceable_or_null apply only to load"
4814 " and inttoptr instructions, use attributes for calls or invokes",
4815 &I);
4816 Check(MD->getNumOperands() == 1,
4817 "dereferenceable, dereferenceable_or_null "
4818 "take one operand!",
4819 &I);
4820 ConstantInt *CI = mdconst::dyn_extract<ConstantInt>(MD->getOperand(0));
4821 Check(CI && CI->getType()->isIntegerTy(64),
4822 "dereferenceable, "
4823 "dereferenceable_or_null metadata value must be an i64!",
4824 &I);
4825}
4826
4827void Verifier::visitProfMetadata(Instruction &I, MDNode *MD) {
4828 Check(MD->getNumOperands() >= 2,
4829 "!prof annotations should have no less than 2 operands", MD);
4830
4831 // Check first operand.
4832 Check(MD->getOperand(0) != nullptr, "first operand should not be null", MD);
4833 Check(isa<MDString>(MD->getOperand(0)),
4834 "expected string with name of the !prof annotation", MD);
4835 MDString *MDS = cast<MDString>(MD->getOperand(0));
4836 StringRef ProfName = MDS->getString();
4837
4838 // Check consistency of !prof branch_weights metadata.
4839 if (ProfName == "branch_weights") {
4840 unsigned NumBranchWeights = getNumBranchWeights(*MD);
4841 if (isa<InvokeInst>(&I)) {
4842 Check(NumBranchWeights == 1 || NumBranchWeights == 2,
4843 "Wrong number of InvokeInst branch_weights operands", MD);
4844 } else {
4845 unsigned ExpectedNumOperands = 0;
4846 if (BranchInst *BI = dyn_cast<BranchInst>(&I))
4847 ExpectedNumOperands = BI->getNumSuccessors();
4848 else if (SwitchInst *SI = dyn_cast<SwitchInst>(&I))
4849 ExpectedNumOperands = SI->getNumSuccessors();
4850 else if (isa<CallInst>(&I))
4851 ExpectedNumOperands = 1;
4852 else if (IndirectBrInst *IBI = dyn_cast<IndirectBrInst>(&I))
4853 ExpectedNumOperands = IBI->getNumDestinations();
4854 else if (isa<SelectInst>(&I))
4855 ExpectedNumOperands = 2;
4856 else if (CallBrInst *CI = dyn_cast<CallBrInst>(&I))
4857 ExpectedNumOperands = CI->getNumSuccessors();
4858 else
4859 CheckFailed("!prof branch_weights are not allowed for this instruction",
4860 MD);
4861
4862 Check(NumBranchWeights == ExpectedNumOperands, "Wrong number of operands",
4863 MD);
4864 }
4865 for (unsigned i = getBranchWeightOffset(MD); i < MD->getNumOperands();
4866 ++i) {
4867 auto &MDO = MD->getOperand(i);
4868 Check(MDO, "second operand should not be null", MD);
4869 Check(mdconst::dyn_extract<ConstantInt>(MDO),
4870 "!prof brunch_weights operand is not a const int");
4871 }
4872 }
4873}
4874
4875void Verifier::visitDIAssignIDMetadata(Instruction &I, MDNode *MD) {
4876 assert(I.hasMetadata(LLVMContext::MD_DIAssignID));
4877 bool ExpectedInstTy =
4878 isa<AllocaInst>(I) || isa<StoreInst>(I) || isa<MemIntrinsic>(I);
4879 CheckDI(ExpectedInstTy, "!DIAssignID attached to unexpected instruction kind",
4880 I, MD);
4881 // Iterate over the MetadataAsValue uses of the DIAssignID - these should
4882 // only be found as DbgAssignIntrinsic operands.
4883 if (auto *AsValue = MetadataAsValue::getIfExists(Context, MD)) {
4884 for (auto *User : AsValue->users()) {
4885 CheckDI(isa<DbgAssignIntrinsic>(User),
4886 "!DIAssignID should only be used by llvm.dbg.assign intrinsics",
4887 MD, User);
4888 // All of the dbg.assign intrinsics should be in the same function as I.
4889 if (auto *DAI = dyn_cast<DbgAssignIntrinsic>(User))
4890 CheckDI(DAI->getFunction() == I.getFunction(),
4891 "dbg.assign not in same function as inst", DAI, &I);
4892 }
4893 }
4894 for (DbgVariableRecord *DVR :
4895 cast<DIAssignID>(MD)->getAllDbgVariableRecordUsers()) {
4896 CheckDI(DVR->isDbgAssign(),
4897 "!DIAssignID should only be used by Assign DVRs.", MD, DVR);
4898 CheckDI(DVR->getFunction() == I.getFunction(),
4899 "DVRAssign not in same function as inst", DVR, &I);
4900 }
4901}
4902
4903void Verifier::visitMMRAMetadata(Instruction &I, MDNode *MD) {
4905 "!mmra metadata attached to unexpected instruction kind", I, MD);
4906
4907 // MMRA Metadata should either be a tag, e.g. !{!"foo", !"bar"}, or a
4908 // list of tags such as !2 in the following example:
4909 // !0 = !{!"a", !"b"}
4910 // !1 = !{!"c", !"d"}
4911 // !2 = !{!0, !1}
4912 if (MMRAMetadata::isTagMD(MD))
4913 return;
4914
4915 Check(isa<MDTuple>(MD), "!mmra expected to be a metadata tuple", I, MD);
4916 for (const MDOperand &MDOp : MD->operands())
4917 Check(MMRAMetadata::isTagMD(MDOp.get()),
4918 "!mmra metadata tuple operand is not an MMRA tag", I, MDOp.get());
4919}
4920
4921void Verifier::visitCallStackMetadata(MDNode *MD) {
4922 // Call stack metadata should consist of a list of at least 1 constant int
4923 // (representing a hash of the location).
4924 Check(MD->getNumOperands() >= 1,
4925 "call stack metadata should have at least 1 operand", MD);
4926
4927 for (const auto &Op : MD->operands())
4928 Check(mdconst::dyn_extract_or_null<ConstantInt>(Op),
4929 "call stack metadata operand should be constant integer", Op);
4930}
4931
4932void Verifier::visitMemProfMetadata(Instruction &I, MDNode *MD) {
4933 Check(isa<CallBase>(I), "!memprof metadata should only exist on calls", &I);
4934 Check(MD->getNumOperands() >= 1,
4935 "!memprof annotations should have at least 1 metadata operand "
4936 "(MemInfoBlock)",
4937 MD);
4938
4939 // Check each MIB
4940 for (auto &MIBOp : MD->operands()) {
4941 MDNode *MIB = dyn_cast<MDNode>(MIBOp);
4942 // The first operand of an MIB should be the call stack metadata.
4943 // There rest of the operands should be MDString tags, and there should be
4944 // at least one.
4945 Check(MIB->getNumOperands() >= 2,
4946 "Each !memprof MemInfoBlock should have at least 2 operands", MIB);
4947
4948 // Check call stack metadata (first operand).
4949 Check(MIB->getOperand(0) != nullptr,
4950 "!memprof MemInfoBlock first operand should not be null", MIB);
4951 Check(isa<MDNode>(MIB->getOperand(0)),
4952 "!memprof MemInfoBlock first operand should be an MDNode", MIB);
4953 MDNode *StackMD = dyn_cast<MDNode>(MIB->getOperand(0));
4954 visitCallStackMetadata(StackMD);
4955
4956 // Check that remaining operands, except possibly the last, are MDString.
4957 Check(llvm::all_of(MIB->operands().drop_front().drop_back(),
4958 [](const MDOperand &Op) { return isa<MDString>(Op); }),
4959 "Not all !memprof MemInfoBlock operands 1 to N-1 are MDString", MIB);
4960 // The last operand might be the total profiled size so can be an integer.
4961 auto &LastOperand = MIB->operands().back();
4962 Check(isa<MDString>(LastOperand) || mdconst::hasa<ConstantInt>(LastOperand),
4963 "Last !memprof MemInfoBlock operand not MDString or int", MIB);
4964 }
4965}
4966
4967void Verifier::visitCallsiteMetadata(Instruction &I, MDNode *MD) {
4968 Check(isa<CallBase>(I), "!callsite metadata should only exist on calls", &I);
4969 // Verify the partial callstack annotated from memprof profiles. This callsite
4970 // is a part of a profiled allocation callstack.
4971 visitCallStackMetadata(MD);
4972}
4973
4974void Verifier::visitAnnotationMetadata(MDNode *Annotation) {
4975 Check(isa<MDTuple>(Annotation), "annotation must be a tuple");
4976 Check(Annotation->getNumOperands() >= 1,
4977 "annotation must have at least one operand");
4978 for (const MDOperand &Op : Annotation->operands()) {
4979 bool TupleOfStrings =
4980 isa<MDTuple>(Op.get()) &&
4981 all_of(cast<MDTuple>(Op)->operands(), [](auto &Annotation) {
4982 return isa<MDString>(Annotation.get());
4983 });
4984 Check(isa<MDString>(Op.get()) || TupleOfStrings,
4985 "operands must be a string or a tuple of strings");
4986 }
4987}
4988
4989void Verifier::visitAliasScopeMetadata(const MDNode *MD) {
4990 unsigned NumOps = MD->getNumOperands();
4991 Check(NumOps >= 2 && NumOps <= 3, "scope must have two or three operands",
4992 MD);
4993 Check(MD->getOperand(0).get() == MD || isa<MDString>(MD->getOperand(0)),
4994 "first scope operand must be self-referential or string", MD);
4995 if (NumOps == 3)
4996 Check(isa<MDString>(MD->getOperand(2)),
4997 "third scope operand must be string (if used)", MD);
4998
4999 MDNode *Domain = dyn_cast<MDNode>(MD->getOperand(1));
5000 Check(Domain != nullptr, "second scope operand must be MDNode", MD);
5001
5002 unsigned NumDomainOps = Domain->getNumOperands();
5003 Check(NumDomainOps >= 1 && NumDomainOps <= 2,
5004 "domain must have one or two operands", Domain);
5005 Check(Domain->getOperand(0).get() == Domain ||
5006 isa<MDString>(Domain->getOperand(0)),
5007 "first domain operand must be self-referential or string", Domain);
5008 if (NumDomainOps == 2)
5009 Check(isa<MDString>(Domain->getOperand(1)),
5010 "second domain operand must be string (if used)", Domain);
5011}
5012
5013void Verifier::visitAliasScopeListMetadata(const MDNode *MD) {
5014 for (const MDOperand &Op : MD->operands()) {
5015 const MDNode *OpMD = dyn_cast<MDNode>(Op);
5016 Check(OpMD != nullptr, "scope list must consist of MDNodes", MD);
5017 visitAliasScopeMetadata(OpMD);
5018 }
5019}
5020
5021void Verifier::visitAccessGroupMetadata(const MDNode *MD) {
5022 auto IsValidAccessScope = [](const MDNode *MD) {
5023 return MD->getNumOperands() == 0 && MD->isDistinct();
5024 };
5025
5026 // It must be either an access scope itself...
5027 if (IsValidAccessScope(MD))
5028 return;
5029
5030 // ...or a list of access scopes.
5031 for (const MDOperand &Op : MD->operands()) {
5032 const MDNode *OpMD = dyn_cast<MDNode>(Op);
5033 Check(OpMD != nullptr, "Access scope list must consist of MDNodes", MD);
5034 Check(IsValidAccessScope(OpMD),
5035 "Access scope list contains invalid access scope", MD);
5036 }
5037}
5038
5039/// verifyInstruction - Verify that an instruction is well formed.
5040///
5041void Verifier::visitInstruction(Instruction &I) {
5042 BasicBlock *BB = I.getParent();
5043 Check(BB, "Instruction not embedded in basic block!", &I);
5044
5045 if (!isa<PHINode>(I)) { // Check that non-phi nodes are not self referential
5046 for (User *U : I.users()) {
5047 Check(U != (User *)&I || !DT.isReachableFromEntry(BB),
5048 "Only PHI nodes may reference their own value!", &I);
5049 }
5050 }
5051
5052 // Check that void typed values don't have names
5053 Check(!I.getType()->isVoidTy() || !I.hasName(),
5054 "Instruction has a name, but provides a void value!", &I);
5055
5056 // Check that the return value of the instruction is either void or a legal
5057 // value type.
5058 Check(I.getType()->isVoidTy() || I.getType()->isFirstClassType(),
5059 "Instruction returns a non-scalar type!", &I);
5060
5061 // Check that the instruction doesn't produce metadata. Calls are already
5062 // checked against the callee type.
5063 Check(!I.getType()->isMetadataTy() || isa<CallInst>(I) || isa<InvokeInst>(I),
5064 "Invalid use of metadata!", &I);
5065
5066 // Check that all uses of the instruction, if they are instructions
5067 // themselves, actually have parent basic blocks. If the use is not an
5068 // instruction, it is an error!
5069 for (Use &U : I.uses()) {
5070 if (Instruction *Used = dyn_cast<Instruction>(U.getUser()))
5071 Check(Used->getParent() != nullptr,
5072 "Instruction referencing"
5073 " instruction not embedded in a basic block!",
5074 &I, Used);
5075 else {
5076 CheckFailed("Use of instruction is not an instruction!", U);
5077 return;
5078 }
5079 }
5080
5081 // Get a pointer to the call base of the instruction if it is some form of
5082 // call.
5083 const CallBase *CBI = dyn_cast<CallBase>(&I);
5084
5085 for (unsigned i = 0, e = I.getNumOperands(); i != e; ++i) {
5086 Check(I.getOperand(i) != nullptr, "Instruction has null operand!", &I);
5087
5088 // Check to make sure that only first-class-values are operands to
5089 // instructions.
5090 if (!I.getOperand(i)->getType()->isFirstClassType()) {
5091 Check(false, "Instruction operands must be first-class values!", &I);
5092 }
5093
5094 if (Function *F = dyn_cast<Function>(I.getOperand(i))) {
5095 // This code checks whether the function is used as the operand of a
5096 // clang_arc_attachedcall operand bundle.
5097 auto IsAttachedCallOperand = [](Function *F, const CallBase *CBI,
5098 int Idx) {
5099 return CBI && CBI->isOperandBundleOfType(
5101 };
5102
5103 // Check to make sure that the "address of" an intrinsic function is never
5104 // taken. Ignore cases where the address of the intrinsic function is used
5105 // as the argument of operand bundle "clang.arc.attachedcall" as those
5106 // cases are handled in verifyAttachedCallBundle.
5107 Check((!F->isIntrinsic() ||
5108 (CBI && &CBI->getCalledOperandUse() == &I.getOperandUse(i)) ||
5109 IsAttachedCallOperand(F, CBI, i)),
5110 "Cannot take the address of an intrinsic!", &I);
5111 Check(!F->isIntrinsic() || isa<CallInst>(I) ||
5112 F->getIntrinsicID() == Intrinsic::donothing ||
5113 F->getIntrinsicID() == Intrinsic::seh_try_begin ||
5114 F->getIntrinsicID() == Intrinsic::seh_try_end ||
5115 F->getIntrinsicID() == Intrinsic::seh_scope_begin ||
5116 F->getIntrinsicID() == Intrinsic::seh_scope_end ||
5117 F->getIntrinsicID() == Intrinsic::coro_resume ||
5118 F->getIntrinsicID() == Intrinsic::coro_destroy ||
5119 F->getIntrinsicID() == Intrinsic::coro_await_suspend_void ||
5120 F->getIntrinsicID() == Intrinsic::coro_await_suspend_bool ||
5121 F->getIntrinsicID() == Intrinsic::coro_await_suspend_handle ||
5122 F->getIntrinsicID() ==
5123 Intrinsic::experimental_patchpoint_void ||
5124 F->getIntrinsicID() == Intrinsic::experimental_patchpoint ||
5125 F->getIntrinsicID() == Intrinsic::experimental_gc_statepoint ||
5126 F->getIntrinsicID() == Intrinsic::wasm_rethrow ||
5127 IsAttachedCallOperand(F, CBI, i),
5128 "Cannot invoke an intrinsic other than donothing, patchpoint, "
5129 "statepoint, coro_resume, coro_destroy or clang.arc.attachedcall",
5130 &I);
5131 Check(F->getParent() == &M, "Referencing function in another module!", &I,
5132 &M, F, F->getParent());
5133 } else if (BasicBlock *OpBB = dyn_cast<BasicBlock>(I.getOperand(i))) {
5134 Check(OpBB->getParent() == BB->getParent(),
5135 "Referring to a basic block in another function!", &I);
5136 } else if (Argument *OpArg = dyn_cast<Argument>(I.getOperand(i))) {
5137 Check(OpArg->getParent() == BB->getParent(),
5138 "Referring to an argument in another function!", &I);
5139 } else if (GlobalValue *GV = dyn_cast<GlobalValue>(I.getOperand(i))) {
5140 Check(GV->getParent() == &M, "Referencing global in another module!", &I,
5141 &M, GV, GV->getParent());
5142 } else if (Instruction *OpInst = dyn_cast<Instruction>(I.getOperand(i))) {
5143 Check(OpInst->getFunction() == BB->getParent(),
5144 "Referring to an instruction in another function!", &I);
5145 verifyDominatesUse(I, i);
5146 } else if (isa<InlineAsm>(I.getOperand(i))) {
5147 Check(CBI && &CBI->getCalledOperandUse() == &I.getOperandUse(i),
5148 "Cannot take the address of an inline asm!", &I);
5149 } else if (auto *CPA = dyn_cast<ConstantPtrAuth>(I.getOperand(i))) {
5150 visitConstantExprsRecursively(CPA);
5151 } else if (ConstantExpr *CE = dyn_cast<ConstantExpr>(I.getOperand(i))) {
5152 if (CE->getType()->isPtrOrPtrVectorTy()) {
5153 // If we have a ConstantExpr pointer, we need to see if it came from an
5154 // illegal bitcast.
5155 visitConstantExprsRecursively(CE);
5156 }
5157 }
5158 }
5159
5160 if (MDNode *MD = I.getMetadata(LLVMContext::MD_fpmath)) {
5161 Check(I.getType()->isFPOrFPVectorTy(),
5162 "fpmath requires a floating point result!", &I);
5163 Check(MD->getNumOperands() == 1, "fpmath takes one operand!", &I);
5164 if (ConstantFP *CFP0 =
5165 mdconst::dyn_extract_or_null<ConstantFP>(MD->getOperand(0))) {
5166 const APFloat &Accuracy = CFP0->getValueAPF();
5167 Check(&Accuracy.getSemantics() == &APFloat::IEEEsingle(),
5168 "fpmath accuracy must have float type", &I);
5169 Check(Accuracy.isFiniteNonZero() && !Accuracy.isNegative(),
5170 "fpmath accuracy not a positive number!", &I);
5171 } else {
5172 Check(false, "invalid fpmath accuracy!", &I);
5173 }
5174 }
5175
5176 if (MDNode *Range = I.getMetadata(LLVMContext::MD_range)) {
5177 Check(isa<LoadInst>(I) || isa<CallInst>(I) || isa<InvokeInst>(I),
5178 "Ranges are only for loads, calls and invokes!", &I);
5179 visitRangeMetadata(I, Range, I.getType());
5180 }
5181
5182 if (I.hasMetadata(LLVMContext::MD_invariant_group)) {
5183 Check(isa<LoadInst>(I) || isa<StoreInst>(I),
5184 "invariant.group metadata is only for loads and stores", &I);
5185 }
5186
5187 if (MDNode *MD = I.getMetadata(LLVMContext::MD_nonnull)) {
5188 Check(I.getType()->isPointerTy(), "nonnull applies only to pointer types",
5189 &I);
5190 Check(isa<LoadInst>(I),
5191 "nonnull applies only to load instructions, use attributes"
5192 " for calls or invokes",
5193 &I);
5194 Check(MD->getNumOperands() == 0, "nonnull metadata must be empty", &I);
5195 }
5196
5197 if (MDNode *MD = I.getMetadata(LLVMContext::MD_dereferenceable))
5198 visitDereferenceableMetadata(I, MD);
5199
5200 if (MDNode *MD = I.getMetadata(LLVMContext::MD_dereferenceable_or_null))
5201 visitDereferenceableMetadata(I, MD);
5202
5203 if (MDNode *TBAA = I.getMetadata(LLVMContext::MD_tbaa))
5204 TBAAVerifyHelper.visitTBAAMetadata(I, TBAA);
5205
5206 if (MDNode *MD = I.getMetadata(LLVMContext::MD_noalias))
5207 visitAliasScopeListMetadata(MD);
5208 if (MDNode *MD = I.getMetadata(LLVMContext::MD_alias_scope))
5209 visitAliasScopeListMetadata(MD);
5210
5211 if (MDNode *MD = I.getMetadata(LLVMContext::MD_access_group))
5212 visitAccessGroupMetadata(MD);
5213
5214 if (MDNode *AlignMD = I.getMetadata(LLVMContext::MD_align)) {
5215 Check(I.getType()->isPointerTy(), "align applies only to pointer types",
5216 &I);
5217 Check(isa<LoadInst>(I),
5218 "align applies only to load instructions, "
5219 "use attributes for calls or invokes",
5220 &I);
5221 Check(AlignMD->getNumOperands() == 1, "align takes one operand!", &I);
5222 ConstantInt *CI = mdconst::dyn_extract<ConstantInt>(AlignMD->getOperand(0));
5223 Check(CI && CI->getType()->isIntegerTy(64),
5224 "align metadata value must be an i64!", &I);
5225 uint64_t Align = CI->getZExtValue();
5226 Check(isPowerOf2_64(Align), "align metadata value must be a power of 2!",
5227 &I);
5229 "alignment is larger that implementation defined limit", &I);
5230 }
5231
5232 if (MDNode *MD = I.getMetadata(LLVMContext::MD_prof))
5233 visitProfMetadata(I, MD);
5234
5235 if (MDNode *MD = I.getMetadata(LLVMContext::MD_memprof))
5236 visitMemProfMetadata(I, MD);
5237
5238 if (MDNode *MD = I.getMetadata(LLVMContext::MD_callsite))
5239 visitCallsiteMetadata(I, MD);
5240
5241 if (MDNode *MD = I.getMetadata(LLVMContext::MD_DIAssignID))
5242 visitDIAssignIDMetadata(I, MD);
5243
5244 if (MDNode *MMRA = I.getMetadata(LLVMContext::MD_mmra))
5245 visitMMRAMetadata(I, MMRA);
5246
5247 if (MDNode *Annotation = I.getMetadata(LLVMContext::MD_annotation))
5248 visitAnnotationMetadata(Annotation);
5249
5250 if (MDNode *N = I.getDebugLoc().getAsMDNode()) {
5251 CheckDI(isa<DILocation>(N), "invalid !dbg metadata attachment", &I, N);
5252 visitMDNode(*N, AreDebugLocsAllowed::Yes);
5253 }
5254
5255 if (auto *DII = dyn_cast<DbgVariableIntrinsic>(&I)) {
5256 verifyFragmentExpression(*DII);
5257 verifyNotEntryValue(*DII);
5258 }
5259
5261 I.getAllMetadata(MDs);
5262 for (auto Attachment : MDs) {
5263 unsigned Kind = Attachment.first;
5264 auto AllowLocs =
5265 (Kind == LLVMContext::MD_dbg || Kind == LLVMContext::MD_loop)
5266 ? AreDebugLocsAllowed::Yes
5267 : AreDebugLocsAllowed::No;
5268 visitMDNode(*Attachment.second, AllowLocs);
5269 }
5270
5271 InstsInThisBlock.insert(&I);
5272}
5273
5274/// Allow intrinsics to be verified in different ways.
5275void Verifier::visitIntrinsicCall(Intrinsic::ID ID, CallBase &Call) {
5276 Function *IF = Call.getCalledFunction();
5277 Check(IF->isDeclaration(), "Intrinsic functions should never be defined!",
5278 IF);
5279
5280 // Verify that the intrinsic prototype lines up with what the .td files
5281 // describe.
5282 FunctionType *IFTy = IF->getFunctionType();
5283 bool IsVarArg = IFTy->isVarArg();
5284
5288
5289 // Walk the descriptors to extract overloaded types.
5294 "Intrinsic has incorrect return type!", IF);
5296 "Intrinsic has incorrect argument type!", IF);
5297
5298 // Verify if the intrinsic call matches the vararg property.
5299 if (IsVarArg)
5301 "Intrinsic was not defined with variable arguments!", IF);
5302 else
5304 "Callsite was not defined with variable arguments!", IF);
5305
5306 // All descriptors should be absorbed by now.
5307 Check(TableRef.empty(), "Intrinsic has too few arguments!", IF);
5308
5309 // Now that we have the intrinsic ID and the actual argument types (and we
5310 // know they are legal for the intrinsic!) get the intrinsic name through the
5311 // usual means. This allows us to verify the mangling of argument types into
5312 // the name.
5313 const std::string ExpectedName =
5314 Intrinsic::getName(ID, ArgTys, IF->getParent(), IFTy);
5315 Check(ExpectedName == IF->getName(),
5316 "Intrinsic name not mangled correctly for type arguments! "
5317 "Should be: " +
5318 ExpectedName,
5319 IF);
5320
5321 // If the intrinsic takes MDNode arguments, verify that they are either global
5322 // or are local to *this* function.
5323 for (Value *V : Call.args()) {
5324 if (auto *MD = dyn_cast<MetadataAsValue>(V))
5325 visitMetadataAsValue(*MD, Call.getCaller());
5326 if (auto *Const = dyn_cast<Constant>(V))
5327 Check(!Const->getType()->isX86_AMXTy(),
5328 "const x86_amx is not allowed in argument!");
5329 }
5330
5331 switch (ID) {
5332 default:
5333 break;
5334 case Intrinsic::assume: {
5335 for (auto &Elem : Call.bundle_op_infos()) {
5336 unsigned ArgCount = Elem.End - Elem.Begin;
5337 // Separate storage assumptions are special insofar as they're the only
5338 // operand bundles allowed on assumes that aren't parameter attributes.
5339 if (Elem.Tag->getKey() == "separate_storage") {
5340 Check(ArgCount == 2,
5341 "separate_storage assumptions should have 2 arguments", Call);
5342 Check(Call.getOperand(Elem.Begin)->getType()->isPointerTy() &&
5343 Call.getOperand(Elem.Begin + 1)->getType()->isPointerTy(),
5344 "arguments to separate_storage assumptions should be pointers",
5345 Call);
5346 return;
5347 }
5348 Check(Elem.Tag->getKey() == "ignore" ||
5349 Attribute::isExistingAttribute(Elem.Tag->getKey()),
5350 "tags must be valid attribute names", Call);
5352 Attribute::getAttrKindFromName(Elem.Tag->getKey());
5353 if (Kind == Attribute::Alignment) {
5354 Check(ArgCount <= 3 && ArgCount >= 2,
5355 "alignment assumptions should have 2 or 3 arguments", Call);
5356 Check(Call.getOperand(Elem.Begin)->getType()->isPointerTy(),
5357 "first argument should be a pointer", Call);
5358 Check(Call.getOperand(Elem.Begin + 1)->getType()->isIntegerTy(),
5359 "second argument should be an integer", Call);
5360 if (ArgCount == 3)
5361 Check(Call.getOperand(Elem.Begin + 2)->getType()->isIntegerTy(),
5362 "third argument should be an integer if present", Call);
5363 return;
5364 }
5365 Check(ArgCount <= 2, "too many arguments", Call);
5366 if (Kind == Attribute::None)
5367 break;
5368 if (Attribute::isIntAttrKind(Kind)) {
5369 Check(ArgCount == 2, "this attribute should have 2 arguments", Call);
5370 Check(isa<ConstantInt>(Call.getOperand(Elem.Begin + 1)),
5371 "the second argument should be a constant integral value", Call);
5372 } else if (Attribute::canUseAsParamAttr(Kind)) {
5373 Check((ArgCount) == 1, "this attribute should have one argument", Call);
5374 } else if (Attribute::canUseAsFnAttr(Kind)) {
5375 Check((ArgCount) == 0, "this attribute has no argument", Call);
5376 }
5377 }
5378 break;
5379 }
5380 case Intrinsic::ucmp:
5381 case Intrinsic::scmp: {
5382 Type *SrcTy = Call.getOperand(0)->getType();
5383 Type *DestTy = Call.getType();
5384
5385 Check(DestTy->getScalarSizeInBits() >= 2,
5386 "result type must be at least 2 bits wide", Call);
5387
5388 bool IsDestTypeVector = DestTy->isVectorTy();
5389 Check(SrcTy->isVectorTy() == IsDestTypeVector,
5390 "ucmp/scmp argument and result types must both be either vector or "
5391 "scalar types",
5392 Call);
5393 if (IsDestTypeVector) {
5394 auto SrcVecLen = cast<VectorType>(SrcTy)->getElementCount();
5395 auto DestVecLen = cast<VectorType>(DestTy)->getElementCount();
5396 Check(SrcVecLen == DestVecLen,
5397 "return type and arguments must have the same number of "
5398 "elements",
5399 Call);
5400 }
5401 break;
5402 }
5403 case Intrinsic::coro_id: {
5404 auto *InfoArg = Call.getArgOperand(3)->stripPointerCasts();
5405 if (isa<ConstantPointerNull>(InfoArg))
5406 break;
5407 auto *GV = dyn_cast<GlobalVariable>(InfoArg);
5408 Check(GV && GV->isConstant() && GV->hasDefinitiveInitializer(),
5409 "info argument of llvm.coro.id must refer to an initialized "
5410 "constant");
5411 Constant *Init = GV->getInitializer();
5412 Check(isa<ConstantStruct>(Init) || isa<ConstantArray>(Init),
5413 "info argument of llvm.coro.id must refer to either a struct or "
5414 "an array");
5415 break;
5416 }
5417 case Intrinsic::is_fpclass: {
5418 const ConstantInt *TestMask = cast<ConstantInt>(Call.getOperand(1));
5419 Check((TestMask->getZExtValue() & ~static_cast<unsigned>(fcAllFlags)) == 0,
5420 "unsupported bits for llvm.is.fpclass test mask");
5421 break;
5422 }
5423 case Intrinsic::fptrunc_round: {
5424 // Check the rounding mode
5425 Metadata *MD = nullptr;
5426 auto *MAV = dyn_cast<MetadataAsValue>(Call.getOperand(1));
5427 if (MAV)
5428 MD = MAV->getMetadata();
5429
5430 Check(MD != nullptr, "missing rounding mode argument", Call);
5431
5432 Check(isa<MDString>(MD),
5433 ("invalid value for llvm.fptrunc.round metadata operand"
5434 " (the operand should be a string)"),
5435 MD);
5436
5437 std::optional<RoundingMode> RoundMode =
5438 convertStrToRoundingMode(cast<MDString>(MD)->getString());
5439 Check(RoundMode && *RoundMode != RoundingMode::Dynamic,
5440 "unsupported rounding mode argument", Call);
5441 break;
5442 }
5443#define BEGIN_REGISTER_VP_INTRINSIC(VPID, ...) case Intrinsic::VPID:
5444#include "llvm/IR/VPIntrinsics.def"
5445#undef BEGIN_REGISTER_VP_INTRINSIC
5446 visitVPIntrinsic(cast<VPIntrinsic>(Call));
5447 break;
5448#define INSTRUCTION(NAME, NARGS, ROUND_MODE, INTRINSIC) \
5449 case Intrinsic::INTRINSIC:
5450#include "llvm/IR/ConstrainedOps.def"
5451#undef INSTRUCTION
5452 visitConstrainedFPIntrinsic(cast<ConstrainedFPIntrinsic>(Call));
5453 break;
5454 case Intrinsic::dbg_declare: // llvm.dbg.declare
5455 Check(isa<MetadataAsValue>(Call.getArgOperand(0)),
5456 "invalid llvm.dbg.declare intrinsic call 1", Call);
5457 visitDbgIntrinsic("declare", cast<DbgVariableIntrinsic>(Call));
5458 break;
5459 case Intrinsic::dbg_value: // llvm.dbg.value
5460 visitDbgIntrinsic("value", cast<DbgVariableIntrinsic>(Call));
5461 break;
5462 case Intrinsic::dbg_assign: // llvm.dbg.assign
5463 visitDbgIntrinsic("assign", cast<DbgVariableIntrinsic>(Call));
5464 break;
5465 case Intrinsic::dbg_label: // llvm.dbg.label
5466 visitDbgLabelIntrinsic("label", cast<DbgLabelInst>(Call));
5467 break;
5468 case Intrinsic::memcpy:
5469 case Intrinsic::memcpy_inline:
5470 case Intrinsic::memmove:
5471 case Intrinsic::memset:
5472 case Intrinsic::memset_inline: {
5473 break;
5474 }
5475 case Intrinsic::memcpy_element_unordered_atomic:
5476 case Intrinsic::memmove_element_unordered_atomic:
5477 case Intrinsic::memset_element_unordered_atomic: {
5478 const auto *AMI = cast<AtomicMemIntrinsic>(&Call);
5479
5480 ConstantInt *ElementSizeCI =
5481 cast<ConstantInt>(AMI->getRawElementSizeInBytes());
5482 const APInt &ElementSizeVal = ElementSizeCI->getValue();
5483 Check(ElementSizeVal.isPowerOf2(),
5484 "element size of the element-wise atomic memory intrinsic "
5485 "must be a power of 2",
5486 Call);
5487
5488 auto IsValidAlignment = [&](MaybeAlign Alignment) {
5489 return Alignment && ElementSizeVal.ule(Alignment->value());
5490 };
5491 Check(IsValidAlignment(AMI->getDestAlign()),
5492 "incorrect alignment of the destination argument", Call);
5493 if (const auto *AMT = dyn_cast<AtomicMemTransferInst>(AMI)) {
5494 Check(IsValidAlignment(AMT->getSourceAlign()),
5495 "incorrect alignment of the source argument", Call);
5496 }
5497 break;
5498 }
5499 case Intrinsic::call_preallocated_setup: {
5500 auto *NumArgs = dyn_cast<ConstantInt>(Call.getArgOperand(0));
5501 Check(NumArgs != nullptr,
5502 "llvm.call.preallocated.setup argument must be a constant");
5503 bool FoundCall = false;
5504 for (User *U : Call.users()) {
5505 auto *UseCall = dyn_cast<CallBase>(U);
5506 Check(UseCall != nullptr,
5507 "Uses of llvm.call.preallocated.setup must be calls");
5508 const Function *Fn = UseCall->getCalledFunction();
5509 if (Fn && Fn->getIntrinsicID() == Intrinsic::call_preallocated_arg) {
5510 auto *AllocArgIndex = dyn_cast<ConstantInt>(UseCall->getArgOperand(1));
5511 Check(AllocArgIndex != nullptr,
5512 "llvm.call.preallocated.alloc arg index must be a constant");
5513 auto AllocArgIndexInt = AllocArgIndex->getValue();
5514 Check(AllocArgIndexInt.sge(0) &&
5515 AllocArgIndexInt.slt(NumArgs->getValue()),
5516 "llvm.call.preallocated.alloc arg index must be between 0 and "
5517 "corresponding "
5518 "llvm.call.preallocated.setup's argument count");
5519 } else if (Fn && Fn->getIntrinsicID() ==
5520 Intrinsic::call_preallocated_teardown) {
5521 // nothing to do
5522 } else {
5523 Check(!FoundCall, "Can have at most one call corresponding to a "
5524 "llvm.call.preallocated.setup");
5525 FoundCall = true;
5526 size_t NumPreallocatedArgs = 0;
5527 for (unsigned i = 0; i < UseCall->arg_size(); i++) {
5528 if (UseCall->paramHasAttr(i, Attribute::Preallocated)) {
5529 ++NumPreallocatedArgs;
5530 }
5531 }
5532 Check(NumPreallocatedArgs != 0,
5533 "cannot use preallocated intrinsics on a call without "
5534 "preallocated arguments");
5535 Check(NumArgs->equalsInt(NumPreallocatedArgs),
5536 "llvm.call.preallocated.setup arg size must be equal to number "
5537 "of preallocated arguments "
5538 "at call site",
5539 Call, *UseCall);
5540 // getOperandBundle() cannot be called if more than one of the operand
5541 // bundle exists. There is already a check elsewhere for this, so skip
5542 // here if we see more than one.
5543 if (UseCall->countOperandBundlesOfType(LLVMContext::OB_preallocated) >
5544 1) {
5545 return;
5546 }
5547 auto PreallocatedBundle =
5548 UseCall->getOperandBundle(LLVMContext::OB_preallocated);
5549 Check(PreallocatedBundle,
5550 "Use of llvm.call.preallocated.setup outside intrinsics "
5551 "must be in \"preallocated\" operand bundle");
5552 Check(PreallocatedBundle->Inputs.front().get() == &Call,
5553 "preallocated bundle must have token from corresponding "
5554 "llvm.call.preallocated.setup");
5555 }
5556 }
5557 break;
5558 }
5559 case Intrinsic::call_preallocated_arg: {
5560 auto *Token = dyn_cast<CallBase>(Call.getArgOperand(0));
5561 Check(Token && Token->getCalledFunction()->getIntrinsicID() ==
5562 Intrinsic::call_preallocated_setup,
5563 "llvm.call.preallocated.arg token argument must be a "
5564 "llvm.call.preallocated.setup");
5565 Check(Call.hasFnAttr(Attribute::Preallocated),
5566 "llvm.call.preallocated.arg must be called with a \"preallocated\" "
5567 "call site attribute");
5568 break;
5569 }
5570 case Intrinsic::call_preallocated_teardown: {
5571 auto *Token = dyn_cast<CallBase>(Call.getArgOperand(0));
5572 Check(Token && Token->getCalledFunction()->getIntrinsicID() ==
5573 Intrinsic::call_preallocated_setup,
5574 "llvm.call.preallocated.teardown token argument must be a "
5575 "llvm.call.preallocated.setup");
5576 break;
5577 }
5578 case Intrinsic::gcroot:
5579 case Intrinsic::gcwrite:
5580 case Intrinsic::gcread:
5581 if (ID == Intrinsic::gcroot) {
5582 AllocaInst *AI =
5583 dyn_cast<AllocaInst>(Call.getArgOperand(0)->stripPointerCasts());
5584 Check(AI, "llvm.gcroot parameter #1 must be an alloca.", Call);
5585 Check(isa<Constant>(Call.getArgOperand(1)),
5586 "llvm.gcroot parameter #2 must be a constant.", Call);
5587 if (!AI->getAllocatedType()->isPointerTy()) {
5588 Check(!isa<ConstantPointerNull>(Call.getArgOperand(1)),
5589 "llvm.gcroot parameter #1 must either be a pointer alloca, "
5590 "or argument #2 must be a non-null constant.",
5591 Call);
5592 }
5593 }
5594
5595 Check(Call.getParent()->getParent()->hasGC(),
5596 "Enclosing function does not use GC.", Call);
5597 break;
5598 case Intrinsic::init_trampoline:
5599 Check(isa<Function>(Call.getArgOperand(1)->stripPointerCasts()),
5600 "llvm.init_trampoline parameter #2 must resolve to a function.",
5601 Call);
5602 break;
5603 case Intrinsic::prefetch:
5604 Check(cast<ConstantInt>(Call.getArgOperand(1))->getZExtValue() < 2,
5605 "rw argument to llvm.prefetch must be 0-1", Call);
5606 Check(cast<ConstantInt>(Call.getArgOperand(2))->getZExtValue() < 4,
5607 "locality argument to llvm.prefetch must be 0-3", Call);
5608 Check(cast<ConstantInt>(Call.getArgOperand(3))->getZExtValue() < 2,
5609 "cache type argument to llvm.prefetch must be 0-1", Call);
5610 break;
5611 case Intrinsic::stackprotector:
5612 Check(isa<AllocaInst>(Call.getArgOperand(1)->stripPointerCasts()),
5613 "llvm.stackprotector parameter #2 must resolve to an alloca.", Call);
5614 break;
5615 case Intrinsic::localescape: {
5616 BasicBlock *BB = Call.getParent();
5617 Check(BB->isEntryBlock(), "llvm.localescape used outside of entry block",
5618 Call);
5619 Check(!SawFrameEscape, "multiple calls to llvm.localescape in one function",
5620 Call);
5621 for (Value *Arg : Call.args()) {
5622 if (isa<ConstantPointerNull>(Arg))
5623 continue; // Null values are allowed as placeholders.
5624 auto *AI = dyn_cast<AllocaInst>(Arg->stripPointerCasts());
5625 Check(AI && AI->isStaticAlloca(),
5626 "llvm.localescape only accepts static allocas", Call);
5627 }
5628 FrameEscapeInfo[BB->getParent()].first = Call.arg_size();
5629 SawFrameEscape = true;
5630 break;
5631 }
5632 case Intrinsic::localrecover: {
5633 Value *FnArg = Call.getArgOperand(0)->stripPointerCasts();
5634 Function *Fn = dyn_cast<Function>(FnArg);
5635 Check(Fn && !Fn->isDeclaration(),
5636 "llvm.localrecover first "
5637 "argument must be function defined in this module",
5638 Call);
5639 auto *IdxArg = cast<ConstantInt>(Call.getArgOperand(2));
5640 auto &Entry = FrameEscapeInfo[Fn];
5641 Entry.second = unsigned(
5642 std::max(uint64_t(Entry.second), IdxArg->getLimitedValue(~0U) + 1));
5643 break;
5644 }
5645
5646 case Intrinsic::experimental_gc_statepoint:
5647 if (auto *CI = dyn_cast<CallInst>(&Call))
5648 Check(!CI->isInlineAsm(),
5649 "gc.statepoint support for inline assembly unimplemented", CI);
5650 Check(Call.getParent()->getParent()->hasGC(),
5651 "Enclosing function does not use GC.", Call);
5652
5653 verifyStatepoint(Call);
5654 break;
5655 case Intrinsic::experimental_gc_result: {
5656 Check(Call.getParent()->getParent()->hasGC(),
5657 "Enclosing function does not use GC.", Call);
5658
5659 auto *Statepoint = Call.getArgOperand(0);
5660 if (isa<UndefValue>(Statepoint))
5661 break;
5662
5663 // Are we tied to a statepoint properly?
5664 const auto *StatepointCall = dyn_cast<CallBase>(Statepoint);
5665 const Function *StatepointFn =
5666 StatepointCall ? StatepointCall->getCalledFunction() : nullptr;
5667 Check(StatepointFn && StatepointFn->isDeclaration() &&
5668 StatepointFn->getIntrinsicID() ==
5669 Intrinsic::experimental_gc_statepoint,
5670 "gc.result operand #1 must be from a statepoint", Call,
5671 Call.getArgOperand(0));
5672
5673 // Check that result type matches wrapped callee.
5674 auto *TargetFuncType =
5675 cast<FunctionType>(StatepointCall->getParamElementType(2));
5676 Check(Call.getType() == TargetFuncType->getReturnType(),
5677 "gc.result result type does not match wrapped callee", Call);
5678 break;
5679 }
5680 case Intrinsic::experimental_gc_relocate: {
5681 Check(Call.arg_size() == 3, "wrong number of arguments", Call);
5682
5683 Check(isa<PointerType>(Call.getType()->getScalarType()),
5684 "gc.relocate must return a pointer or a vector of pointers", Call);
5685
5686 // Check that this relocate is correctly tied to the statepoint
5687
5688 // This is case for relocate on the unwinding path of an invoke statepoint
5689 if (LandingPadInst *LandingPad =
5690 dyn_cast<LandingPadInst>(Call.getArgOperand(0))) {
5691
5692 const BasicBlock *InvokeBB =
5693 LandingPad->getParent()->getUniquePredecessor();
5694
5695 // Landingpad relocates should have only one predecessor with invoke
5696 // statepoint terminator
5697 Check(InvokeBB, "safepoints should have unique landingpads",
5698 LandingPad->getParent());
5699 Check(InvokeBB->getTerminator(), "safepoint block should be well formed",
5700 InvokeBB);
5701 Check(isa<GCStatepointInst>(InvokeBB->getTerminator()),
5702 "gc relocate should be linked to a statepoint", InvokeBB);
5703 } else {
5704 // In all other cases relocate should be tied to the statepoint directly.
5705 // This covers relocates on a normal return path of invoke statepoint and
5706 // relocates of a call statepoint.
5707 auto *Token = Call.getArgOperand(0);
5708 Check(isa<GCStatepointInst>(Token) || isa<UndefValue>(Token),
5709 "gc relocate is incorrectly tied to the statepoint", Call, Token);
5710 }
5711
5712 // Verify rest of the relocate arguments.
5713 const Value &StatepointCall = *cast<GCRelocateInst>(Call).getStatepoint();
5714
5715 // Both the base and derived must be piped through the safepoint.
5716 Value *Base = Call.getArgOperand(1);
5717 Check(isa<ConstantInt>(Base),
5718 "gc.relocate operand #2 must be integer offset", Call);
5719
5720 Value *Derived = Call.getArgOperand(2);
5721 Check(isa<ConstantInt>(Derived),
5722 "gc.relocate operand #3 must be integer offset", Call);
5723
5724 const uint64_t BaseIndex = cast<ConstantInt>(Base)->getZExtValue();
5725 const uint64_t DerivedIndex = cast<ConstantInt>(Derived)->getZExtValue();
5726
5727 // Check the bounds
5728 if (isa<UndefValue>(StatepointCall))
5729 break;
5730 if (auto Opt = cast<GCStatepointInst>(StatepointCall)
5731 .getOperandBundle(LLVMContext::OB_gc_live)) {
5732 Check(BaseIndex < Opt->Inputs.size(),
5733 "gc.relocate: statepoint base index out of bounds", Call);
5734 Check(DerivedIndex < Opt->Inputs.size(),
5735 "gc.relocate: statepoint derived index out of bounds", Call);
5736 }
5737
5738 // Relocated value must be either a pointer type or vector-of-pointer type,
5739 // but gc_relocate does not need to return the same pointer type as the
5740 // relocated pointer. It can be casted to the correct type later if it's
5741 // desired. However, they must have the same address space and 'vectorness'
5742 GCRelocateInst &Relocate = cast<GCRelocateInst>(Call);
5743 auto *ResultType = Call.getType();
5744 auto *DerivedType = Relocate.getDerivedPtr()->getType();
5745 auto *BaseType = Relocate.getBasePtr()->getType();
5746
5747 Check(BaseType->isPtrOrPtrVectorTy(),
5748 "gc.relocate: relocated value must be a pointer", Call);
5749 Check(DerivedType->isPtrOrPtrVectorTy(),
5750 "gc.relocate: relocated value must be a pointer", Call);
5751
5752 Check(ResultType->isVectorTy() == DerivedType->isVectorTy(),
5753 "gc.relocate: vector relocates to vector and pointer to pointer",
5754 Call);
5755 Check(
5756 ResultType->getPointerAddressSpace() ==
5757 DerivedType->getPointerAddressSpace(),
5758 "gc.relocate: relocating a pointer shouldn't change its address space",
5759 Call);
5760
5761 auto GC = llvm::getGCStrategy(Relocate.getFunction()->getGC());
5762 Check(GC, "gc.relocate: calling function must have GCStrategy",
5763 Call.getFunction());
5764 if (GC) {
5765 auto isGCPtr = [&GC](Type *PTy) {
5766 return GC->isGCManagedPointer(PTy->getScalarType()).value_or(true);
5767 };
5768 Check(isGCPtr(ResultType), "gc.relocate: must return gc pointer", Call);
5769 Check(isGCPtr(BaseType),
5770 "gc.relocate: relocated value must be a gc pointer", Call);
5771 Check(isGCPtr(DerivedType),
5772 "gc.relocate: relocated value must be a gc pointer", Call);
5773 }
5774 break;
5775 }
5776 case Intrinsic::experimental_patchpoint: {
5777 if (Call.getCallingConv() == CallingConv::AnyReg) {
5778 Check(Call.getType()->isSingleValueType(),
5779 "patchpoint: invalid return type used with anyregcc", Call);
5780 }
5781 break;
5782 }
5783 case Intrinsic::eh_exceptioncode:
5784 case Intrinsic::eh_exceptionpointer: {
5785 Check(isa<CatchPadInst>(Call.getArgOperand(0)),
5786 "eh.exceptionpointer argument must be a catchpad", Call);
5787 break;
5788 }
5789 case Intrinsic::get_active_lane_mask: {
5790 Check(Call.getType()->isVectorTy(),
5791 "get_active_lane_mask: must return a "
5792 "vector",
5793 Call);
5794 auto *ElemTy = Call.getType()->getScalarType();
5795 Check(ElemTy->isIntegerTy(1),
5796 "get_active_lane_mask: element type is not "
5797 "i1",
5798 Call);
5799 break;
5800 }
5801 case Intrinsic::experimental_get_vector_length: {
5802 ConstantInt *VF = cast<ConstantInt>(Call.getArgOperand(1));
5803 Check(!VF->isNegative() && !VF->isZero(),
5804 "get_vector_length: VF must be positive", Call);
5805 break;
5806 }
5807 case Intrinsic::masked_load: {
5808 Check(Call.getType()->isVectorTy(), "masked_load: must return a vector",
5809 Call);
5810
5811 ConstantInt *Alignment = cast<ConstantInt>(Call.getArgOperand(1));
5812 Value *Mask = Call.getArgOperand(2);
5813 Value *PassThru = Call.getArgOperand(3);
5814 Check(Mask->getType()->isVectorTy(), "masked_load: mask must be vector",
5815 Call);
5816 Check(Alignment->getValue().isPowerOf2(),
5817 "masked_load: alignment must be a power of 2", Call);
5818 Check(PassThru->getType() == Call.getType(),
5819 "masked_load: pass through and return type must match", Call);
5820 Check(cast<VectorType>(Mask->getType())->getElementCount() ==
5821 cast<VectorType>(Call.getType())->getElementCount(),
5822 "masked_load: vector mask must be same length as return", Call);
5823 break;
5824 }
5825 case Intrinsic::masked_store: {
5826 Value *Val = Call.getArgOperand(0);
5827 ConstantInt *Alignment = cast<ConstantInt>(Call.getArgOperand(2));
5828 Value *Mask = Call.getArgOperand(3);
5829 Check(Mask->getType()->isVectorTy(), "masked_store: mask must be vector",
5830 Call);
5831 Check(Alignment->getValue().isPowerOf2(),
5832 "masked_store: alignment must be a power of 2", Call);
5833 Check(cast<VectorType>(Mask->getType())->getElementCount() ==
5834 cast<VectorType>(Val->getType())->getElementCount(),
5835 "masked_store: vector mask must be same length as value", Call);
5836 break;
5837 }
5838
5839 case Intrinsic::masked_gather: {
5840 const APInt &Alignment =
5841 cast<ConstantInt>(Call.getArgOperand(1))->getValue();
5842 Check(Alignment.isZero() || Alignment.isPowerOf2(),
5843 "masked_gather: alignment must be 0 or a power of 2", Call);
5844 break;
5845 }
5846 case Intrinsic::masked_scatter: {
5847 const APInt &Alignment =
5848 cast<ConstantInt>(Call.getArgOperand(2))->getValue();
5849 Check(Alignment.isZero() || Alignment.isPowerOf2(),
5850 "masked_scatter: alignment must be 0 or a power of 2", Call);
5851 break;
5852 }
5853
5854 case Intrinsic::experimental_guard: {
5855 Check(isa<CallInst>(Call), "experimental_guard cannot be invoked", Call);
5856 Check(Call.countOperandBundlesOfType(LLVMContext::OB_deopt) == 1,
5857 "experimental_guard must have exactly one "
5858 "\"deopt\" operand bundle");
5859 break;
5860 }
5861
5862 case Intrinsic::experimental_deoptimize: {
5863 Check(isa<CallInst>(Call), "experimental_deoptimize cannot be invoked",
5864 Call);
5865 Check(Call.countOperandBundlesOfType(LLVMContext::OB_deopt) == 1,
5866 "experimental_deoptimize must have exactly one "
5867 "\"deopt\" operand bundle");
5868 Check(Call.getType() == Call.getFunction()->getReturnType(),
5869 "experimental_deoptimize return type must match caller return type");
5870
5871 if (isa<CallInst>(Call)) {
5872 auto *RI = dyn_cast<ReturnInst>(Call.getNextNode());
5873 Check(RI,
5874 "calls to experimental_deoptimize must be followed by a return");
5875
5876 if (!Call.getType()->isVoidTy() && RI)
5877 Check(RI->getReturnValue() == &Call,
5878 "calls to experimental_deoptimize must be followed by a return "
5879 "of the value computed by experimental_deoptimize");
5880 }
5881
5882 break;
5883 }
5884 case Intrinsic::vastart: {
5885 Check(Call.getFunction()->isVarArg(),
5886 "va_start called in a non-varargs function");
5887 break;
5888 }
5889 case Intrinsic::vector_reduce_and:
5890 case Intrinsic::vector_reduce_or:
5891 case Intrinsic::vector_reduce_xor:
5892 case Intrinsic::vector_reduce_add:
5893 case Intrinsic::vector_reduce_mul:
5894 case Intrinsic::vector_reduce_smax:
5895 case Intrinsic::vector_reduce_smin:
5896 case Intrinsic::vector_reduce_umax:
5897 case Intrinsic::vector_reduce_umin: {
5898 Type *ArgTy = Call.getArgOperand(0)->getType();
5899 Check(ArgTy->isIntOrIntVectorTy() && ArgTy->isVectorTy(),
5900 "Intrinsic has incorrect argument type!");
5901 break;
5902 }
5903 case Intrinsic::vector_reduce_fmax:
5904 case Intrinsic::vector_reduce_fmin: {
5905 Type *ArgTy = Call.getArgOperand(0)->getType();
5906 Check(ArgTy->isFPOrFPVectorTy() && ArgTy->isVectorTy(),
5907 "Intrinsic has incorrect argument type!");
5908 break;
5909 }
5910 case Intrinsic::vector_reduce_fadd:
5911 case Intrinsic::vector_reduce_fmul: {
5912 // Unlike the other reductions, the first argument is a start value. The
5913 // second argument is the vector to be reduced.
5914 Type *ArgTy = Call.getArgOperand(1)->getType();
5915 Check(ArgTy->isFPOrFPVectorTy() && ArgTy->isVectorTy(),
5916 "Intrinsic has incorrect argument type!");
5917 break;
5918 }
5919 case Intrinsic::smul_fix:
5920 case Intrinsic::smul_fix_sat:
5921 case Intrinsic::umul_fix:
5922 case Intrinsic::umul_fix_sat:
5923 case Intrinsic::sdiv_fix:
5924 case Intrinsic::sdiv_fix_sat:
5925 case Intrinsic::udiv_fix:
5926 case Intrinsic::udiv_fix_sat: {
5927 Value *Op1 = Call.getArgOperand(0);
5928 Value *Op2 = Call.getArgOperand(1);
5930 "first operand of [us][mul|div]_fix[_sat] must be an int type or "
5931 "vector of ints");
5933 "second operand of [us][mul|div]_fix[_sat] must be an int type or "
5934 "vector of ints");
5935
5936 auto *Op3 = cast<ConstantInt>(Call.getArgOperand(2));
5937 Check(Op3->getType()->isIntegerTy(),
5938 "third operand of [us][mul|div]_fix[_sat] must be an int type");
5939 Check(Op3->getBitWidth() <= 32,
5940 "third operand of [us][mul|div]_fix[_sat] must fit within 32 bits");
5941
5942 if (ID == Intrinsic::smul_fix || ID == Intrinsic::smul_fix_sat ||
5943 ID == Intrinsic::sdiv_fix || ID == Intrinsic::sdiv_fix_sat) {
5944 Check(Op3->getZExtValue() < Op1->getType()->getScalarSizeInBits(),
5945 "the scale of s[mul|div]_fix[_sat] must be less than the width of "
5946 "the operands");
5947 } else {
5948 Check(Op3->getZExtValue() <= Op1->getType()->getScalarSizeInBits(),
5949 "the scale of u[mul|div]_fix[_sat] must be less than or equal "
5950 "to the width of the operands");
5951 }
5952 break;
5953 }
5954 case Intrinsic::lrint:
5955 case Intrinsic::llrint: {
5956 Type *ValTy = Call.getArgOperand(0)->getType();
5957 Type *ResultTy = Call.getType();
5958 Check(
5959 ValTy->isFPOrFPVectorTy() && ResultTy->isIntOrIntVectorTy(),
5960 "llvm.lrint, llvm.llrint: argument must be floating-point or vector "
5961 "of floating-points, and result must be integer or vector of integers",
5962 &Call);
5963 Check(ValTy->isVectorTy() == ResultTy->isVectorTy(),
5964 "llvm.lrint, llvm.llrint: argument and result disagree on vector use",
5965 &Call);
5966 if (ValTy->isVectorTy()) {
5967 Check(cast<VectorType>(ValTy)->getElementCount() ==
5968 cast<VectorType>(ResultTy)->getElementCount(),
5969 "llvm.lrint, llvm.llrint: argument must be same length as result",
5970 &Call);
5971 }
5972 break;
5973 }
5974 case Intrinsic::lround:
5975 case Intrinsic::llround: {
5976 Type *ValTy = Call.getArgOperand(0)->getType();
5977 Type *ResultTy = Call.getType();
5978 auto *VTy = dyn_cast<VectorType>(ValTy);
5979 auto *RTy = dyn_cast<VectorType>(ResultTy);
5980 Check(
5981 ValTy->isFPOrFPVectorTy() && ResultTy->isIntOrIntVectorTy(),
5982 "llvm.lround, llvm.llround: argument must be floating-point or vector "
5983 "of floating-points, and result must be integer or vector of integers",
5984 &Call);
5985 Check(
5986 ValTy->isVectorTy() == ResultTy->isVectorTy(),
5987 "llvm.lround, llvm.llround: argument and result disagree on vector use",
5988 &Call);
5989 if (VTy) {
5990 Check(VTy->getElementCount() == RTy->getElementCount(),
5991 "llvm.lround, llvm.llround: argument must be same length as result",
5992 &Call);
5993 }
5994 break;
5995 }
5996 case Intrinsic::bswap: {
5997 Type *Ty = Call.getType();
5998 unsigned Size = Ty->getScalarSizeInBits();
5999 Check(Size % 16 == 0, "bswap must be an even number of bytes", &Call);
6000 break;
6001 }
6002 case Intrinsic::invariant_start: {
6003 ConstantInt *InvariantSize = dyn_cast<ConstantInt>(Call.getArgOperand(0));
6004 Check(InvariantSize &&
6005 (!InvariantSize->isNegative() || InvariantSize->isMinusOne()),
6006 "invariant_start parameter must be -1, 0 or a positive number",
6007 &Call);
6008 break;
6009 }
6010 case Intrinsic::matrix_multiply:
6011 case Intrinsic::matrix_transpose:
6012 case Intrinsic::matrix_column_major_load:
6013 case Intrinsic::matrix_column_major_store: {
6014 Function *IF = Call.getCalledFunction();
6015 ConstantInt *Stride = nullptr;
6016 ConstantInt *NumRows;
6017 ConstantInt *NumColumns;
6018 VectorType *ResultTy;
6019 Type *Op0ElemTy = nullptr;
6020 Type *Op1ElemTy = nullptr;
6021 switch (ID) {
6022 case Intrinsic::matrix_multiply: {
6023 NumRows = cast<ConstantInt>(Call.getArgOperand(2));
6024 ConstantInt *N = cast<ConstantInt>(Call.getArgOperand(3));
6025 NumColumns = cast<ConstantInt>(Call.getArgOperand(4));
6026 Check(cast<FixedVectorType>(Call.getArgOperand(0)->getType())
6027 ->getNumElements() ==
6028 NumRows->getZExtValue() * N->getZExtValue(),
6029 "First argument of a matrix operation does not match specified "
6030 "shape!");
6031 Check(cast<FixedVectorType>(Call.getArgOperand(1)->getType())
6032 ->getNumElements() ==
6033 N->getZExtValue() * NumColumns->getZExtValue(),
6034 "Second argument of a matrix operation does not match specified "
6035 "shape!");
6036
6037 ResultTy = cast<VectorType>(Call.getType());
6038 Op0ElemTy =
6039 cast<VectorType>(Call.getArgOperand(0)->getType())->getElementType();
6040 Op1ElemTy =
6041 cast<VectorType>(Call.getArgOperand(1)->getType())->getElementType();
6042 break;
6043 }
6044 case Intrinsic::matrix_transpose:
6045 NumRows = cast<ConstantInt>(Call.getArgOperand(1));
6046 NumColumns = cast<ConstantInt>(Call.getArgOperand(2));
6047 ResultTy = cast<VectorType>(Call.getType());
6048 Op0ElemTy =
6049 cast<VectorType>(Call.getArgOperand(0)->getType())->getElementType();
6050 break;
6051 case Intrinsic::matrix_column_major_load: {
6052 Stride = dyn_cast<ConstantInt>(Call.getArgOperand(1));
6053 NumRows = cast<ConstantInt>(Call.getArgOperand(3));
6054 NumColumns = cast<ConstantInt>(Call.getArgOperand(4));
6055 ResultTy = cast<VectorType>(Call.getType());
6056 break;
6057 }
6058 case Intrinsic::matrix_column_major_store: {
6059 Stride = dyn_cast<ConstantInt>(Call.getArgOperand(2));
6060 NumRows = cast<ConstantInt>(Call.getArgOperand(4));
6061 NumColumns = cast<ConstantInt>(Call.getArgOperand(5));
6062 ResultTy = cast<VectorType>(Call.getArgOperand(0)->getType());
6063 Op0ElemTy =
6064 cast<VectorType>(Call.getArgOperand(0)->getType())->getElementType();
6065 break;
6066 }
6067 default:
6068 llvm_unreachable("unexpected intrinsic");
6069 }
6070
6071 Check(ResultTy->getElementType()->isIntegerTy() ||
6072 ResultTy->getElementType()->isFloatingPointTy(),
6073 "Result type must be an integer or floating-point type!", IF);
6074
6075 if (Op0ElemTy)
6076 Check(ResultTy->getElementType() == Op0ElemTy,
6077 "Vector element type mismatch of the result and first operand "
6078 "vector!",
6079 IF);
6080
6081 if (Op1ElemTy)
6082 Check(ResultTy->getElementType() == Op1ElemTy,
6083 "Vector element type mismatch of the result and second operand "
6084 "vector!",
6085 IF);
6086
6087 Check(cast<FixedVectorType>(ResultTy)->getNumElements() ==
6088 NumRows->getZExtValue() * NumColumns->getZExtValue(),
6089 "Result of a matrix operation does not fit in the returned vector!");
6090
6091 if (Stride)
6092 Check(Stride->getZExtValue() >= NumRows->getZExtValue(),
6093 "Stride must be greater or equal than the number of rows!", IF);
6094
6095 break;
6096 }
6097 case Intrinsic::vector_splice: {
6098 VectorType *VecTy = cast<VectorType>(Call.getType());
6099 int64_t Idx = cast<ConstantInt>(Call.getArgOperand(2))->getSExtValue();
6100 int64_t KnownMinNumElements = VecTy->getElementCount().getKnownMinValue();
6101 if (Call.getParent() && Call.getParent()->getParent()) {
6102 AttributeList Attrs = Call.getParent()->getParent()->getAttributes();
6103 if (Attrs.hasFnAttr(Attribute::VScaleRange))
6104 KnownMinNumElements *= Attrs.getFnAttrs().getVScaleRangeMin();
6105 }
6106 Check((Idx < 0 && std::abs(Idx) <= KnownMinNumElements) ||
6107 (Idx >= 0 && Idx < KnownMinNumElements),
6108 "The splice index exceeds the range [-VL, VL-1] where VL is the "
6109 "known minimum number of elements in the vector. For scalable "
6110 "vectors the minimum number of elements is determined from "
6111 "vscale_range.",
6112 &Call);
6113 break;
6114 }
6115 case Intrinsic::experimental_stepvector: {
6116 VectorType *VecTy = dyn_cast<VectorType>(Call.getType());
6117 Check(VecTy && VecTy->getScalarType()->isIntegerTy() &&
6118 VecTy->getScalarSizeInBits() >= 8,
6119 "experimental_stepvector only supported for vectors of integers "
6120 "with a bitwidth of at least 8.",
6121 &Call);
6122 break;
6123 }
6124 case Intrinsic::vector_insert: {
6125 Value *Vec = Call.getArgOperand(0);
6126 Value *SubVec = Call.getArgOperand(1);
6127 Value *Idx = Call.getArgOperand(2);
6128 unsigned IdxN = cast<ConstantInt>(Idx)->getZExtValue();
6129
6130 VectorType *VecTy = cast<VectorType>(Vec->getType());
6131 VectorType *SubVecTy = cast<VectorType>(SubVec->getType());
6132
6133 ElementCount VecEC = VecTy->getElementCount();
6134 ElementCount SubVecEC = SubVecTy->getElementCount();
6135 Check(VecTy->getElementType() == SubVecTy->getElementType(),
6136 "vector_insert parameters must have the same element "
6137 "type.",
6138 &Call);
6139 Check(IdxN % SubVecEC.getKnownMinValue() == 0,
6140 "vector_insert index must be a constant multiple of "
6141 "the subvector's known minimum vector length.");
6142
6143 // If this insertion is not the 'mixed' case where a fixed vector is
6144 // inserted into a scalable vector, ensure that the insertion of the
6145 // subvector does not overrun the parent vector.
6146 if (VecEC.isScalable() == SubVecEC.isScalable()) {
6147 Check(IdxN < VecEC.getKnownMinValue() &&
6148 IdxN + SubVecEC.getKnownMinValue() <= VecEC.getKnownMinValue(),
6149 "subvector operand of vector_insert would overrun the "
6150 "vector being inserted into.");
6151 }
6152 break;
6153 }
6154 case Intrinsic::vector_extract: {
6155 Value *Vec = Call.getArgOperand(0);
6156 Value *Idx = Call.getArgOperand(1);
6157 unsigned IdxN = cast<ConstantInt>(Idx)->getZExtValue();
6158
6159 VectorType *ResultTy = cast<VectorType>(Call.getType());
6160 VectorType *VecTy = cast<VectorType>(Vec->getType());
6161
6162 ElementCount VecEC = VecTy->getElementCount();
6163 ElementCount ResultEC = ResultTy->getElementCount();
6164
6165 Check(ResultTy->getElementType() == VecTy->getElementType(),
6166 "vector_extract result must have the same element "
6167 "type as the input vector.",
6168 &Call);
6169 Check(IdxN % ResultEC.getKnownMinValue() == 0,
6170 "vector_extract index must be a constant multiple of "
6171 "the result type's known minimum vector length.");
6172
6173 // If this extraction is not the 'mixed' case where a fixed vector is
6174 // extracted from a scalable vector, ensure that the extraction does not
6175 // overrun the parent vector.
6176 if (VecEC.isScalable() == ResultEC.isScalable()) {
6177 Check(IdxN < VecEC.getKnownMinValue() &&
6178 IdxN + ResultEC.getKnownMinValue() <= VecEC.getKnownMinValue(),
6179 "vector_extract would overrun.");
6180 }
6181 break;
6182 }
6183 case Intrinsic::experimental_vector_partial_reduce_add: {
6184 VectorType *AccTy = cast<VectorType>(Call.getArgOperand(0)->getType());
6185 VectorType *VecTy = cast<VectorType>(Call.getArgOperand(1)->getType());
6186
6187 unsigned VecWidth = VecTy->getElementCount().getKnownMinValue();
6188 unsigned AccWidth = AccTy->getElementCount().getKnownMinValue();
6189
6190 Check((VecWidth % AccWidth) == 0,
6191 "Invalid vector widths for partial "
6192 "reduction. The width of the input vector "
6193 "must be a positive integer multiple of "
6194 "the width of the accumulator vector.");
6195 break;
6196 }
6197 case Intrinsic::experimental_noalias_scope_decl: {
6198 NoAliasScopeDecls.push_back(cast<IntrinsicInst>(&Call));
6199 break;
6200 }
6201 case Intrinsic::preserve_array_access_index:
6202 case Intrinsic::preserve_struct_access_index:
6203 case Intrinsic::aarch64_ldaxr:
6204 case Intrinsic::aarch64_ldxr:
6205 case Intrinsic::arm_ldaex:
6206 case Intrinsic::arm_ldrex: {
6207 Type *ElemTy = Call.getParamElementType(0);
6208 Check(ElemTy, "Intrinsic requires elementtype attribute on first argument.",
6209 &Call);
6210 break;
6211 }
6212 case Intrinsic::aarch64_stlxr:
6213 case Intrinsic::aarch64_stxr:
6214 case Intrinsic::arm_stlex:
6215 case Intrinsic::arm_strex: {
6216 Type *ElemTy = Call.getAttributes().getParamElementType(1);
6217 Check(ElemTy,
6218 "Intrinsic requires elementtype attribute on second argument.",
6219 &Call);
6220 break;
6221 }
6222 case Intrinsic::aarch64_prefetch: {
6223 Check(cast<ConstantInt>(Call.getArgOperand(1))->getZExtValue() < 2,
6224 "write argument to llvm.aarch64.prefetch must be 0 or 1", Call);
6225 Check(cast<ConstantInt>(Call.getArgOperand(2))->getZExtValue() < 4,
6226 "target argument to llvm.aarch64.prefetch must be 0-3", Call);
6227 Check(cast<ConstantInt>(Call.getArgOperand(3))->getZExtValue() < 2,
6228 "stream argument to llvm.aarch64.prefetch must be 0 or 1", Call);
6229 Check(cast<ConstantInt>(Call.getArgOperand(4))->getZExtValue() < 2,
6230 "isdata argument to llvm.aarch64.prefetch must be 0 or 1", Call);
6231 break;
6232 }
6233 case Intrinsic::callbr_landingpad: {
6234 const auto *CBR = dyn_cast<CallBrInst>(Call.getOperand(0));
6235 Check(CBR, "intrinstic requires callbr operand", &Call);
6236 if (!CBR)
6237 break;
6238
6239 const BasicBlock *LandingPadBB = Call.getParent();
6240 const BasicBlock *PredBB = LandingPadBB->getUniquePredecessor();
6241 if (!PredBB) {
6242 CheckFailed("Intrinsic in block must have 1 unique predecessor", &Call);
6243 break;
6244 }
6245 if (!isa<CallBrInst>(PredBB->getTerminator())) {
6246 CheckFailed("Intrinsic must have corresponding callbr in predecessor",
6247 &Call);
6248 break;
6249 }
6250 Check(llvm::is_contained(CBR->getIndirectDests(), LandingPadBB),
6251 "Intrinsic's corresponding callbr must have intrinsic's parent basic "
6252 "block in indirect destination list",
6253 &Call);
6254 const Instruction &First = *LandingPadBB->begin();
6255 Check(&First == &Call, "No other instructions may proceed intrinsic",
6256 &Call);
6257 break;
6258 }
6259 case Intrinsic::amdgcn_cs_chain: {
6260 auto CallerCC = Call.getCaller()->getCallingConv();
6261 switch (CallerCC) {
6265 break;
6266 default:
6267 CheckFailed("Intrinsic can only be used from functions with the "
6268 "amdgpu_cs, amdgpu_cs_chain or amdgpu_cs_chain_preserve "
6269 "calling conventions",
6270 &Call);
6271 break;
6272 }
6273
6274 Check(Call.paramHasAttr(2, Attribute::InReg),
6275 "SGPR arguments must have the `inreg` attribute", &Call);
6276 Check(!Call.paramHasAttr(3, Attribute::InReg),
6277 "VGPR arguments must not have the `inreg` attribute", &Call);
6278 break;
6279 }
6280 case Intrinsic::amdgcn_set_inactive_chain_arg: {
6281 auto CallerCC = Call.getCaller()->getCallingConv();
6282 switch (CallerCC) {
6285 break;
6286 default:
6287 CheckFailed("Intrinsic can only be used from functions with the "
6288 "amdgpu_cs_chain or amdgpu_cs_chain_preserve "
6289 "calling conventions",
6290 &Call);
6291 break;
6292 }
6293
6294 unsigned InactiveIdx = 1;
6295 Check(!Call.paramHasAttr(InactiveIdx, Attribute::InReg),
6296 "Value for inactive lanes must not have the `inreg` attribute",
6297 &Call);
6298 Check(isa<Argument>(Call.getArgOperand(InactiveIdx)),
6299 "Value for inactive lanes must be a function argument", &Call);
6300 Check(!cast<Argument>(Call.getArgOperand(InactiveIdx))->hasInRegAttr(),
6301 "Value for inactive lanes must be a VGPR function argument", &Call);
6302 break;
6303 }
6304 case Intrinsic::nvvm_setmaxnreg_inc_sync_aligned_u32:
6305 case Intrinsic::nvvm_setmaxnreg_dec_sync_aligned_u32: {
6306 Value *V = Call.getArgOperand(0);
6307 unsigned RegCount = cast<ConstantInt>(V)->getZExtValue();
6308 Check(RegCount % 8 == 0,
6309 "reg_count argument to nvvm.setmaxnreg must be in multiples of 8");
6310 Check((RegCount >= 24 && RegCount <= 256),
6311 "reg_count argument to nvvm.setmaxnreg must be within [24, 256]");
6312 break;
6313 }
6314 case Intrinsic::experimental_convergence_entry:
6315 case Intrinsic::experimental_convergence_anchor:
6316 break;
6317 case Intrinsic::experimental_convergence_loop:
6318 break;
6319 case Intrinsic::ptrmask: {
6320 Type *Ty0 = Call.getArgOperand(0)->getType();
6321 Type *Ty1 = Call.getArgOperand(1)->getType();
6323 "llvm.ptrmask intrinsic first argument must be pointer or vector "
6324 "of pointers",
6325 &Call);
6326 Check(
6327 Ty0->isVectorTy() == Ty1->isVectorTy(),
6328 "llvm.ptrmask intrinsic arguments must be both scalars or both vectors",
6329 &Call);
6330 if (Ty0->isVectorTy())
6331 Check(cast<VectorType>(Ty0)->getElementCount() ==
6332 cast<VectorType>(Ty1)->getElementCount(),
6333 "llvm.ptrmask intrinsic arguments must have the same number of "
6334 "elements",
6335 &Call);
6336 Check(DL.getIndexTypeSizeInBits(Ty0) == Ty1->getScalarSizeInBits(),
6337 "llvm.ptrmask intrinsic second argument bitwidth must match "
6338 "pointer index type size of first argument",
6339 &Call);
6340 break;
6341 }
6342 case Intrinsic::threadlocal_address: {
6343 const Value &Arg0 = *Call.getArgOperand(0);
6344 Check(isa<GlobalValue>(Arg0),
6345 "llvm.threadlocal.address first argument must be a GlobalValue");
6346 Check(cast<GlobalValue>(Arg0).isThreadLocal(),
6347 "llvm.threadlocal.address operand isThreadLocal() must be true");
6348 break;
6349 }
6350 case Intrinsic::nvvm_fence_proxy_tensormap_generic_acquire_cta:
6351 case Intrinsic::nvvm_fence_proxy_tensormap_generic_acquire_cluster:
6352 case Intrinsic::nvvm_fence_proxy_tensormap_generic_acquire_gpu:
6353 case Intrinsic::nvvm_fence_proxy_tensormap_generic_acquire_sys: {
6354 unsigned size = cast<ConstantInt>(Call.getArgOperand(1))->getZExtValue();
6355 Check(size == 128, " The only supported value for size operand is 128");
6356 break;
6357 }
6358 };
6359
6360 // Verify that there aren't any unmediated control transfers between funclets.
6362 Function *F = Call.getParent()->getParent();
6363 if (F->hasPersonalityFn() &&
6364 isScopedEHPersonality(classifyEHPersonality(F->getPersonalityFn()))) {
6365 // Run EH funclet coloring on-demand and cache results for other intrinsic
6366 // calls in this function
6367 if (BlockEHFuncletColors.empty())
6368 BlockEHFuncletColors = colorEHFunclets(*F);
6369
6370 // Check for catch-/cleanup-pad in first funclet block
6371 bool InEHFunclet = false;
6372 BasicBlock *CallBB = Call.getParent();
6373 const ColorVector &CV = BlockEHFuncletColors.find(CallBB)->second;
6374 assert(CV.size() > 0 && "Uncolored block");
6375 for (BasicBlock *ColorFirstBB : CV)
6376 if (dyn_cast_or_null<FuncletPadInst>(ColorFirstBB->getFirstNonPHI()))
6377 InEHFunclet = true;
6378
6379 // Check for funclet operand bundle
6380 bool HasToken = false;
6381 for (unsigned I = 0, E = Call.getNumOperandBundles(); I != E; ++I)
6382 if (Call.getOperandBundleAt(I).getTagID() == LLVMContext::OB_funclet)
6383 HasToken = true;
6384
6385 // This would cause silent code truncation in WinEHPrepare
6386 if (InEHFunclet)
6387 Check(HasToken, "Missing funclet token on intrinsic call", &Call);
6388 }
6389 }
6390}
6391
6392/// Carefully grab the subprogram from a local scope.
6393///
6394/// This carefully grabs the subprogram from a local scope, avoiding the
6395/// built-in assertions that would typically fire.
6397 if (!LocalScope)
6398 return nullptr;
6399
6400 if (auto *SP = dyn_cast<DISubprogram>(LocalScope))
6401 return SP;
6402
6403 if (auto *LB = dyn_cast<DILexicalBlockBase>(LocalScope))
6404 return getSubprogram(LB->getRawScope());
6405
6406 // Just return null; broken scope chains are checked elsewhere.
6407 assert(!isa<DILocalScope>(LocalScope) && "Unknown type of local scope");
6408 return nullptr;
6409}
6410
6411void Verifier::visit(DbgLabelRecord &DLR) {
6412 CheckDI(isa<DILabel>(DLR.getRawLabel()),
6413 "invalid #dbg_label intrinsic variable", &DLR, DLR.getRawLabel());
6414
6415 // Ignore broken !dbg attachments; they're checked elsewhere.
6416 if (MDNode *N = DLR.getDebugLoc().getAsMDNode())
6417 if (!isa<DILocation>(N))
6418 return;
6419
6420 BasicBlock *BB = DLR.getParent();
6421 Function *F = BB ? BB->getParent() : nullptr;
6422
6423 // The scopes for variables and !dbg attachments must agree.
6424 DILabel *Label = DLR.getLabel();
6425 DILocation *Loc = DLR.getDebugLoc();
6426 CheckDI(Loc, "#dbg_label record requires a !dbg attachment", &DLR, BB, F);
6427
6428 DISubprogram *LabelSP = getSubprogram(Label->getRawScope());
6429 DISubprogram *LocSP = getSubprogram(Loc->getRawScope());
6430 if (!LabelSP || !LocSP)
6431 return;
6432
6433 CheckDI(LabelSP == LocSP,
6434 "mismatched subprogram between #dbg_label label and !dbg attachment",
6435 &DLR, BB, F, Label, Label->getScope()->getSubprogram(), Loc,
6436 Loc->getScope()->getSubprogram());
6437}
6438
6439void Verifier::visit(DbgVariableRecord &DVR) {
6440 BasicBlock *BB = DVR.getParent();
6441 Function *F = BB->getParent();
6442
6446 "invalid #dbg record type", &DVR, DVR.getType());
6447
6448 // The location for a DbgVariableRecord must be either a ValueAsMetadata,
6449 // DIArgList, or an empty MDNode (which is a legacy representation for an
6450 // "undef" location).
6451 auto *MD = DVR.getRawLocation();
6452 CheckDI(MD && (isa<ValueAsMetadata>(MD) || isa<DIArgList>(MD) ||
6453 (isa<MDNode>(MD) && !cast<MDNode>(MD)->getNumOperands())),
6454 "invalid #dbg record address/value", &DVR, MD);
6455 if (auto *VAM = dyn_cast<ValueAsMetadata>(MD))
6456 visitValueAsMetadata(*VAM, F);
6457 else if (auto *AL = dyn_cast<DIArgList>(MD))
6458 visitDIArgList(*AL, F);
6459
6460 CheckDI(isa_and_nonnull<DILocalVariable>(DVR.getRawVariable()),
6461 "invalid #dbg record variable", &DVR, DVR.getRawVariable());
6462 visitMDNode(*DVR.getRawVariable(), AreDebugLocsAllowed::No);
6463
6464 CheckDI(isa_and_nonnull<DIExpression>(DVR.getRawExpression()),
6465 "invalid #dbg record expression", &DVR, DVR.getRawExpression());
6466 visitMDNode(*DVR.getExpression(), AreDebugLocsAllowed::No);
6467
6468 if (DVR.isDbgAssign()) {
6469 CheckDI(isa_and_nonnull<DIAssignID>(DVR.getRawAssignID()),
6470 "invalid #dbg_assign DIAssignID", &DVR, DVR.getRawAssignID());
6471 visitMDNode(*cast<DIAssignID>(DVR.getRawAssignID()),
6472 AreDebugLocsAllowed::No);
6473
6474 const auto *RawAddr = DVR.getRawAddress();
6475 // Similarly to the location above, the address for an assign
6476 // DbgVariableRecord must be a ValueAsMetadata or an empty MDNode, which
6477 // represents an undef address.
6478 CheckDI(
6479 isa<ValueAsMetadata>(RawAddr) ||
6480 (isa<MDNode>(RawAddr) && !cast<MDNode>(RawAddr)->getNumOperands()),
6481 "invalid #dbg_assign address", &DVR, DVR.getRawAddress());
6482 if (auto *VAM = dyn_cast<ValueAsMetadata>(RawAddr))
6483 visitValueAsMetadata(*VAM, F);
6484
6485 CheckDI(isa_and_nonnull<DIExpression>(DVR.getRawAddressExpression()),
6486 "invalid #dbg_assign address expression", &DVR,
6488 visitMDNode(*DVR.getAddressExpression(), AreDebugLocsAllowed::No);
6489
6490 // All of the linked instructions should be in the same function as DVR.
6491 for (Instruction *I : at::getAssignmentInsts(&DVR))
6492 CheckDI(DVR.getFunction() == I->getFunction(),
6493 "inst not in same function as #dbg_assign", I, &DVR);
6494 }
6495
6496 // This check is redundant with one in visitLocalVariable().
6497 DILocalVariable *Var = DVR.getVariable();
6498 CheckDI(isType(Var->getRawType()), "invalid type ref", Var,
6499 Var->getRawType());
6500
6501 auto *DLNode = DVR.getDebugLoc().getAsMDNode();
6502 CheckDI(isa_and_nonnull<DILocation>(DLNode), "invalid #dbg record DILocation",
6503 &DVR, DLNode);
6504 DILocation *Loc = DVR.getDebugLoc();
6505
6506 // The scopes for variables and !dbg attachments must agree.
6507 DISubprogram *VarSP = getSubprogram(Var->getRawScope());
6508 DISubprogram *LocSP = getSubprogram(Loc->getRawScope());
6509 if (!VarSP || !LocSP)
6510 return; // Broken scope chains are checked elsewhere.
6511
6512 CheckDI(VarSP == LocSP,
6513 "mismatched subprogram between #dbg record variable and DILocation",
6514 &DVR, BB, F, Var, Var->getScope()->getSubprogram(), Loc,
6515 Loc->getScope()->getSubprogram());
6516
6517 verifyFnArgs(DVR);
6518}
6519
6520void Verifier::visitVPIntrinsic(VPIntrinsic &VPI) {
6521 if (auto *VPCast = dyn_cast<VPCastIntrinsic>(&VPI)) {
6522 auto *RetTy = cast<VectorType>(VPCast->getType());
6523 auto *ValTy = cast<VectorType>(VPCast->getOperand(0)->getType());
6524 Check(RetTy->getElementCount() == ValTy->getElementCount(),
6525 "VP cast intrinsic first argument and result vector lengths must be "
6526 "equal",
6527 *VPCast);
6528
6529 switch (VPCast->getIntrinsicID()) {
6530 default:
6531 llvm_unreachable("Unknown VP cast intrinsic");
6532 case Intrinsic::vp_trunc:
6533 Check(RetTy->isIntOrIntVectorTy() && ValTy->isIntOrIntVectorTy(),
6534 "llvm.vp.trunc intrinsic first argument and result element type "
6535 "must be integer",
6536 *VPCast);
6537 Check(RetTy->getScalarSizeInBits() < ValTy->getScalarSizeInBits(),
6538 "llvm.vp.trunc intrinsic the bit size of first argument must be "
6539 "larger than the bit size of the return type",
6540 *VPCast);
6541 break;
6542 case Intrinsic::vp_zext:
6543 case Intrinsic::vp_sext:
6544 Check(RetTy->isIntOrIntVectorTy() && ValTy->isIntOrIntVectorTy(),
6545 "llvm.vp.zext or llvm.vp.sext intrinsic first argument and result "
6546 "element type must be integer",
6547 *VPCast);
6548 Check(RetTy->getScalarSizeInBits() > ValTy->getScalarSizeInBits(),
6549 "llvm.vp.zext or llvm.vp.sext intrinsic the bit size of first "
6550 "argument must be smaller than the bit size of the return type",
6551 *VPCast);
6552 break;
6553 case Intrinsic::vp_fptoui:
6554 case Intrinsic::vp_fptosi:
6555 case Intrinsic::vp_lrint:
6556 case Intrinsic::vp_llrint:
6557 Check(
6558 RetTy->isIntOrIntVectorTy() && ValTy->isFPOrFPVectorTy(),
6559 "llvm.vp.fptoui, llvm.vp.fptosi, llvm.vp.lrint or llvm.vp.llrint" "intrinsic first argument element "
6560 "type must be floating-point and result element type must be integer",
6561 *VPCast);
6562 break;
6563 case Intrinsic::vp_uitofp:
6564 case Intrinsic::vp_sitofp:
6565 Check(
6566 RetTy->isFPOrFPVectorTy() && ValTy->isIntOrIntVectorTy(),
6567 "llvm.vp.uitofp or llvm.vp.sitofp intrinsic first argument element "
6568 "type must be integer and result element type must be floating-point",
6569 *VPCast);
6570 break;
6571 case Intrinsic::vp_fptrunc:
6572 Check(RetTy->isFPOrFPVectorTy() && ValTy->isFPOrFPVectorTy(),
6573 "llvm.vp.fptrunc intrinsic first argument and result element type "
6574 "must be floating-point",
6575 *VPCast);
6576 Check(RetTy->getScalarSizeInBits() < ValTy->getScalarSizeInBits(),
6577 "llvm.vp.fptrunc intrinsic the bit size of first argument must be "
6578 "larger than the bit size of the return type",
6579 *VPCast);
6580 break;
6581 case Intrinsic::vp_fpext:
6582 Check(RetTy->isFPOrFPVectorTy() && ValTy->isFPOrFPVectorTy(),
6583 "llvm.vp.fpext intrinsic first argument and result element type "
6584 "must be floating-point",
6585 *VPCast);
6586 Check(RetTy->getScalarSizeInBits() > ValTy->getScalarSizeInBits(),
6587 "llvm.vp.fpext intrinsic the bit size of first argument must be "
6588 "smaller than the bit size of the return type",
6589 *VPCast);
6590 break;
6591 case Intrinsic::vp_ptrtoint:
6592 Check(RetTy->isIntOrIntVectorTy() && ValTy->isPtrOrPtrVectorTy(),
6593 "llvm.vp.ptrtoint intrinsic first argument element type must be "
6594 "pointer and result element type must be integer",
6595 *VPCast);
6596 break;
6597 case Intrinsic::vp_inttoptr:
6598 Check(RetTy->isPtrOrPtrVectorTy() && ValTy->isIntOrIntVectorTy(),
6599 "llvm.vp.inttoptr intrinsic first argument element type must be "
6600 "integer and result element type must be pointer",
6601 *VPCast);
6602 break;
6603 }
6604 }
6605 if (VPI.getIntrinsicID() == Intrinsic::vp_fcmp) {
6606 auto Pred = cast<VPCmpIntrinsic>(&VPI)->getPredicate();
6608 "invalid predicate for VP FP comparison intrinsic", &VPI);
6609 }
6610 if (VPI.getIntrinsicID() == Intrinsic::vp_icmp) {
6611 auto Pred = cast<VPCmpIntrinsic>(&VPI)->getPredicate();
6613 "invalid predicate for VP integer comparison intrinsic", &VPI);
6614 }
6615 if (VPI.getIntrinsicID() == Intrinsic::vp_is_fpclass) {
6616 auto TestMask = cast<ConstantInt>(VPI.getOperand(1));
6617 Check((TestMask->getZExtValue() & ~static_cast<unsigned>(fcAllFlags)) == 0,
6618 "unsupported bits for llvm.vp.is.fpclass test mask");
6619 }
6620}
6621
6622void Verifier::visitConstrainedFPIntrinsic(ConstrainedFPIntrinsic &FPI) {
6623 unsigned NumOperands = FPI.getNonMetadataArgCount();
6624 bool HasRoundingMD =
6626
6627 // Add the expected number of metadata operands.
6628 NumOperands += (1 + HasRoundingMD);
6629
6630 // Compare intrinsics carry an extra predicate metadata operand.
6631 if (isa<ConstrainedFPCmpIntrinsic>(FPI))
6632 NumOperands += 1;
6633 Check((FPI.arg_size() == NumOperands),
6634 "invalid arguments for constrained FP intrinsic", &FPI);
6635
6636 switch (FPI.getIntrinsicID()) {
6637 case Intrinsic::experimental_constrained_lrint:
6638 case Intrinsic::experimental_constrained_llrint: {
6639 Type *ValTy = FPI.getArgOperand(0)->getType();
6640 Type *ResultTy = FPI.getType();
6641 Check(!ValTy->isVectorTy() && !ResultTy->isVectorTy(),
6642 "Intrinsic does not support vectors", &FPI);
6643 break;
6644 }
6645
6646 case Intrinsic::experimental_constrained_lround:
6647 case Intrinsic::experimental_constrained_llround: {
6648 Type *ValTy = FPI.getArgOperand(0)->getType();
6649 Type *ResultTy = FPI.getType();
6650 Check(!ValTy->isVectorTy() && !ResultTy->isVectorTy(),
6651 "Intrinsic does not support vectors", &FPI);
6652 break;
6653 }
6654
6655 case Intrinsic::experimental_constrained_fcmp:
6656 case Intrinsic::experimental_constrained_fcmps: {
6657 auto Pred = cast<ConstrainedFPCmpIntrinsic>(&FPI)->getPredicate();
6659 "invalid predicate for constrained FP comparison intrinsic", &FPI);
6660 break;
6661 }
6662
6663 case Intrinsic::experimental_constrained_fptosi:
6664 case Intrinsic::experimental_constrained_fptoui: {
6665 Value *Operand = FPI.getArgOperand(0);
6666 ElementCount SrcEC;
6667 Check(Operand->getType()->isFPOrFPVectorTy(),
6668 "Intrinsic first argument must be floating point", &FPI);
6669 if (auto *OperandT = dyn_cast<VectorType>(Operand->getType())) {
6670 SrcEC = cast<VectorType>(OperandT)->getElementCount();
6671 }
6672
6673 Operand = &FPI;
6674 Check(SrcEC.isNonZero() == Operand->getType()->isVectorTy(),
6675 "Intrinsic first argument and result disagree on vector use", &FPI);
6676 Check(Operand->getType()->isIntOrIntVectorTy(),
6677 "Intrinsic result must be an integer", &FPI);
6678 if (auto *OperandT = dyn_cast<VectorType>(Operand->getType())) {
6679 Check(SrcEC == cast<VectorType>(OperandT)->getElementCount(),
6680 "Intrinsic first argument and result vector lengths must be equal",
6681 &FPI);
6682 }
6683 break;
6684 }
6685
6686 case Intrinsic::experimental_constrained_sitofp:
6687 case Intrinsic::experimental_constrained_uitofp: {
6688 Value *Operand = FPI.getArgOperand(0);
6689 ElementCount SrcEC;
6690 Check(Operand->getType()->isIntOrIntVectorTy(),
6691 "Intrinsic first argument must be integer", &FPI);
6692 if (auto *OperandT = dyn_cast<VectorType>(Operand->getType())) {
6693 SrcEC = cast<VectorType>(OperandT)->getElementCount();
6694 }
6695
6696 Operand = &FPI;
6697 Check(SrcEC.isNonZero() == Operand->getType()->isVectorTy(),
6698 "Intrinsic first argument and result disagree on vector use", &FPI);
6699 Check(Operand->getType()->isFPOrFPVectorTy(),
6700 "Intrinsic result must be a floating point", &FPI);
6701 if (auto *OperandT = dyn_cast<VectorType>(Operand->getType())) {
6702 Check(SrcEC == cast<VectorType>(OperandT)->getElementCount(),
6703 "Intrinsic first argument and result vector lengths must be equal",
6704 &FPI);
6705 }
6706 break;
6707 }
6708
6709 case Intrinsic::experimental_constrained_fptrunc:
6710 case Intrinsic::experimental_constrained_fpext: {
6711 Value *Operand = FPI.getArgOperand(0);
6712 Type *OperandTy = Operand->getType();
6713 Value *Result = &FPI;
6714 Type *ResultTy = Result->getType();
6715 Check(OperandTy->isFPOrFPVectorTy(),
6716 "Intrinsic first argument must be FP or FP vector", &FPI);
6717 Check(ResultTy->isFPOrFPVectorTy(),
6718 "Intrinsic result must be FP or FP vector", &FPI);
6719 Check(OperandTy->isVectorTy() == ResultTy->isVectorTy(),
6720 "Intrinsic first argument and result disagree on vector use", &FPI);
6721 if (OperandTy->isVectorTy()) {
6722 Check(cast<VectorType>(OperandTy)->getElementCount() ==
6723 cast<VectorType>(ResultTy)->getElementCount(),
6724 "Intrinsic first argument and result vector lengths must be equal",
6725 &FPI);
6726 }
6727 if (FPI.getIntrinsicID() == Intrinsic::experimental_constrained_fptrunc) {
6728 Check(OperandTy->getScalarSizeInBits() > ResultTy->getScalarSizeInBits(),
6729 "Intrinsic first argument's type must be larger than result type",
6730 &FPI);
6731 } else {
6732 Check(OperandTy->getScalarSizeInBits() < ResultTy->getScalarSizeInBits(),
6733 "Intrinsic first argument's type must be smaller than result type",
6734 &FPI);
6735 }
6736 break;
6737 }
6738
6739 default:
6740 break;
6741 }
6742
6743 // If a non-metadata argument is passed in a metadata slot then the
6744 // error will be caught earlier when the incorrect argument doesn't
6745 // match the specification in the intrinsic call table. Thus, no
6746 // argument type check is needed here.
6747
6748 Check(FPI.getExceptionBehavior().has_value(),
6749 "invalid exception behavior argument", &FPI);
6750 if (HasRoundingMD) {
6751 Check(FPI.getRoundingMode().has_value(), "invalid rounding mode argument",
6752 &FPI);
6753 }
6754}
6755
6756void Verifier::visitDbgIntrinsic(StringRef Kind, DbgVariableIntrinsic &DII) {
6757 auto *MD = DII.getRawLocation();
6758 CheckDI(isa<ValueAsMetadata>(MD) || isa<DIArgList>(MD) ||
6759 (isa<MDNode>(MD) && !cast<MDNode>(MD)->getNumOperands()),
6760 "invalid llvm.dbg." + Kind + " intrinsic address/value", &DII, MD);
6761 CheckDI(isa<DILocalVariable>(DII.getRawVariable()),
6762 "invalid llvm.dbg." + Kind + " intrinsic variable", &DII,
6763 DII.getRawVariable());
6764 CheckDI(isa<DIExpression>(DII.getRawExpression()),
6765 "invalid llvm.dbg." + Kind + " intrinsic expression", &DII,
6766 DII.getRawExpression());
6767
6768 if (auto *DAI = dyn_cast<DbgAssignIntrinsic>(&DII)) {
6769 CheckDI(isa<DIAssignID>(DAI->getRawAssignID()),
6770 "invalid llvm.dbg.assign intrinsic DIAssignID", &DII,
6771 DAI->getRawAssignID());
6772 const auto *RawAddr = DAI->getRawAddress();
6773 CheckDI(
6774 isa<ValueAsMetadata>(RawAddr) ||
6775 (isa<MDNode>(RawAddr) && !cast<MDNode>(RawAddr)->getNumOperands()),
6776 "invalid llvm.dbg.assign intrinsic address", &DII,
6777 DAI->getRawAddress());
6778 CheckDI(isa<DIExpression>(DAI->getRawAddressExpression()),
6779 "invalid llvm.dbg.assign intrinsic address expression", &DII,
6780 DAI->getRawAddressExpression());
6781 // All of the linked instructions should be in the same function as DII.
6783 CheckDI(DAI->getFunction() == I->getFunction(),
6784 "inst not in same function as dbg.assign", I, DAI);
6785 }
6786
6787 // Ignore broken !dbg attachments; they're checked elsewhere.
6788 if (MDNode *N = DII.getDebugLoc().getAsMDNode())
6789 if (!isa<DILocation>(N))
6790 return;
6791
6792 BasicBlock *BB = DII.getParent();
6793 Function *F = BB ? BB->getParent() : nullptr;
6794
6795 // The scopes for variables and !dbg attachments must agree.
6796 DILocalVariable *Var = DII.getVariable();
6797 DILocation *Loc = DII.getDebugLoc();
6798 CheckDI(Loc, "llvm.dbg." + Kind + " intrinsic requires a !dbg attachment",
6799 &DII, BB, F);
6800
6801 DISubprogram *VarSP = getSubprogram(Var->getRawScope());
6802 DISubprogram *LocSP = getSubprogram(Loc->getRawScope());
6803 if (!VarSP || !LocSP)
6804 return; // Broken scope chains are checked elsewhere.
6805
6806 CheckDI(VarSP == LocSP,
6807 "mismatched subprogram between llvm.dbg." + Kind +
6808 " variable and !dbg attachment",
6809 &DII, BB, F, Var, Var->getScope()->getSubprogram(), Loc,
6810 Loc->getScope()->getSubprogram());
6811
6812 // This check is redundant with one in visitLocalVariable().
6813 CheckDI(isType(Var->getRawType()), "invalid type ref", Var,
6814 Var->getRawType());
6815 verifyFnArgs(DII);
6816}
6817
6818void Verifier::visitDbgLabelIntrinsic(StringRef Kind, DbgLabelInst &DLI) {
6819 CheckDI(isa<DILabel>(DLI.getRawLabel()),
6820 "invalid llvm.dbg." + Kind + " intrinsic variable", &DLI,
6821 DLI.getRawLabel());
6822
6823 // Ignore broken !dbg attachments; they're checked elsewhere.
6824 if (MDNode *N = DLI.getDebugLoc().getAsMDNode())
6825 if (!isa<DILocation>(N))
6826 return;
6827
6828 BasicBlock *BB = DLI.getParent();
6829 Function *F = BB ? BB->getParent() : nullptr;
6830
6831 // The scopes for variables and !dbg attachments must agree.
6832 DILabel *Label = DLI.getLabel();
6833 DILocation *Loc = DLI.getDebugLoc();
6834 Check(Loc, "llvm.dbg." + Kind + " intrinsic requires a !dbg attachment", &DLI,
6835 BB, F);
6836
6837 DISubprogram *LabelSP = getSubprogram(Label->getRawScope());
6838 DISubprogram *LocSP = getSubprogram(Loc->getRawScope());
6839 if (!LabelSP || !LocSP)
6840 return;
6841
6842 CheckDI(LabelSP == LocSP,
6843 "mismatched subprogram between llvm.dbg." + Kind +
6844 " label and !dbg attachment",
6845 &DLI, BB, F, Label, Label->getScope()->getSubprogram(), Loc,
6846 Loc->getScope()->getSubprogram());
6847}
6848
6849void Verifier::verifyFragmentExpression(const DbgVariableIntrinsic &I) {
6850 DILocalVariable *V = dyn_cast_or_null<DILocalVariable>(I.getRawVariable());
6851 DIExpression *E = dyn_cast_or_null<DIExpression>(I.getRawExpression());
6852
6853 // We don't know whether this intrinsic verified correctly.
6854 if (!V || !E || !E->isValid())
6855 return;
6856
6857 // Nothing to do if this isn't a DW_OP_LLVM_fragment expression.
6858 auto Fragment = E->getFragmentInfo();
6859 if (!Fragment)
6860 return;
6861
6862 // The frontend helps out GDB by emitting the members of local anonymous
6863 // unions as artificial local variables with shared storage. When SROA splits
6864 // the storage for artificial local variables that are smaller than the entire
6865 // union, the overhang piece will be outside of the allotted space for the
6866 // variable and this check fails.
6867 // FIXME: Remove this check as soon as clang stops doing this; it hides bugs.
6868 if (V->isArtificial())
6869 return;
6870
6871 verifyFragmentExpression(*V, *Fragment, &I);
6872}
6873void Verifier::verifyFragmentExpression(const DbgVariableRecord &DVR) {
6874 DILocalVariable *V = dyn_cast_or_null<DILocalVariable>(DVR.getRawVariable());
6875 DIExpression *E = dyn_cast_or_null<DIExpression>(DVR.getRawExpression());
6876
6877 // We don't know whether this intrinsic verified correctly.
6878 if (!V || !E || !E->isValid())
6879 return;
6880
6881 // Nothing to do if this isn't a DW_OP_LLVM_fragment expression.
6882 auto Fragment = E->getFragmentInfo();
6883 if (!Fragment)
6884 return;
6885
6886 // The frontend helps out GDB by emitting the members of local anonymous
6887 // unions as artificial local variables with shared storage. When SROA splits
6888 // the storage for artificial local variables that are smaller than the entire
6889 // union, the overhang piece will be outside of the allotted space for the
6890 // variable and this check fails.
6891 // FIXME: Remove this check as soon as clang stops doing this; it hides bugs.
6892 if (V->isArtificial())
6893 return;
6894
6895 verifyFragmentExpression(*V, *Fragment, &DVR);
6896}
6897
6898template <typename ValueOrMetadata>
6899void Verifier::verifyFragmentExpression(const DIVariable &V,
6901 ValueOrMetadata *Desc) {
6902 // If there's no size, the type is broken, but that should be checked
6903 // elsewhere.
6904 auto VarSize = V.getSizeInBits();
6905 if (!VarSize)
6906 return;
6907
6908 unsigned FragSize = Fragment.SizeInBits;
6909 unsigned FragOffset = Fragment.OffsetInBits;
6910 CheckDI(FragSize + FragOffset <= *VarSize,
6911 "fragment is larger than or outside of variable", Desc, &V);
6912 CheckDI(FragSize != *VarSize, "fragment covers entire variable", Desc, &V);
6913}
6914
6915void Verifier::verifyFnArgs(const DbgVariableIntrinsic &I) {
6916 // This function does not take the scope of noninlined function arguments into
6917 // account. Don't run it if current function is nodebug, because it may
6918 // contain inlined debug intrinsics.
6919 if (!HasDebugInfo)
6920 return;
6921
6922 // For performance reasons only check non-inlined ones.
6923 if (I.getDebugLoc()->getInlinedAt())
6924 return;
6925
6926 DILocalVariable *Var = I.getVariable();
6927 CheckDI(Var, "dbg intrinsic without variable");
6928
6929 unsigned ArgNo = Var->getArg();
6930 if (!ArgNo)
6931 return;
6932
6933 // Verify there are no duplicate function argument debug info entries.
6934 // These will cause hard-to-debug assertions in the DWARF backend.
6935 if (DebugFnArgs.size() < ArgNo)
6936 DebugFnArgs.resize(ArgNo, nullptr);
6937
6938 auto *Prev = DebugFnArgs[ArgNo - 1];
6939 DebugFnArgs[ArgNo - 1] = Var;
6940 CheckDI(!Prev || (Prev == Var), "conflicting debug info for argument", &I,
6941 Prev, Var);
6942}
6943void Verifier::verifyFnArgs(const DbgVariableRecord &DVR) {
6944 // This function does not take the scope of noninlined function arguments into
6945 // account. Don't run it if current function is nodebug, because it may
6946 // contain inlined debug intrinsics.
6947 if (!HasDebugInfo)
6948 return;
6949
6950 // For performance reasons only check non-inlined ones.
6951 if (DVR.getDebugLoc()->getInlinedAt())
6952 return;
6953
6954 DILocalVariable *Var = DVR.getVariable();
6955 CheckDI(Var, "#dbg record without variable");
6956
6957 unsigned ArgNo = Var->getArg();
6958 if (!ArgNo)
6959 return;
6960
6961 // Verify there are no duplicate function argument debug info entries.
6962 // These will cause hard-to-debug assertions in the DWARF backend.
6963 if (DebugFnArgs.size() < ArgNo)
6964 DebugFnArgs.resize(ArgNo, nullptr);
6965
6966 auto *Prev = DebugFnArgs[ArgNo - 1];
6967 DebugFnArgs[ArgNo - 1] = Var;
6968 CheckDI(!Prev || (Prev == Var), "conflicting debug info for argument", &DVR,
6969 Prev, Var);
6970}
6971
6972void Verifier::verifyNotEntryValue(const DbgVariableIntrinsic &I) {
6973 DIExpression *E = dyn_cast_or_null<DIExpression>(I.getRawExpression());
6974
6975 // We don't know whether this intrinsic verified correctly.
6976 if (!E || !E->isValid())
6977 return;
6978
6979 if (isa<ValueAsMetadata>(I.getRawLocation())) {
6980 Value *VarValue = I.getVariableLocationOp(0);
6981 if (isa<UndefValue>(VarValue) || isa<PoisonValue>(VarValue))
6982 return;
6983 // We allow EntryValues for swift async arguments, as they have an
6984 // ABI-guarantee to be turned into a specific register.
6985 if (auto *ArgLoc = dyn_cast_or_null<Argument>(VarValue);
6986 ArgLoc && ArgLoc->hasAttribute(Attribute::SwiftAsync))
6987 return;
6988 }
6989
6990 CheckDI(!E->isEntryValue(),
6991 "Entry values are only allowed in MIR unless they target a "
6992 "swiftasync Argument",
6993 &I);
6994}
6995void Verifier::verifyNotEntryValue(const DbgVariableRecord &DVR) {
6996 DIExpression *E = dyn_cast_or_null<DIExpression>(DVR.getRawExpression());
6997
6998 // We don't know whether this intrinsic verified correctly.
6999 if (!E || !E->isValid())
7000 return;
7001
7002 if (isa<ValueAsMetadata>(DVR.getRawLocation())) {
7003 Value *VarValue = DVR.getVariableLocationOp(0);
7004 if (isa<UndefValue>(VarValue) || isa<PoisonValue>(VarValue))
7005 return;
7006 // We allow EntryValues for swift async arguments, as they have an
7007 // ABI-guarantee to be turned into a specific register.
7008 if (auto *ArgLoc = dyn_cast_or_null<Argument>(VarValue);
7009 ArgLoc && ArgLoc->hasAttribute(Attribute::SwiftAsync))
7010 return;
7011 }
7012
7013 CheckDI(!E->isEntryValue(),
7014 "Entry values are only allowed in MIR unless they target a "
7015 "swiftasync Argument",
7016 &DVR);
7017}
7018
7019void Verifier::verifyCompileUnits() {
7020 // When more than one Module is imported into the same context, such as during
7021 // an LTO build before linking the modules, ODR type uniquing may cause types
7022 // to point to a different CU. This check does not make sense in this case.
7023 if (M.getContext().isODRUniquingDebugTypes())
7024 return;
7025 auto *CUs = M.getNamedMetadata("llvm.dbg.cu");
7027 if (CUs)
7028 Listed.insert(CUs->op_begin(), CUs->op_end());
7029 for (const auto *CU : CUVisited)
7030 CheckDI(Listed.count(CU), "DICompileUnit not listed in llvm.dbg.cu", CU);
7031 CUVisited.clear();
7032}
7033
7034void Verifier::verifyDeoptimizeCallingConvs() {
7035 if (DeoptimizeDeclarations.empty())
7036 return;
7037
7038 const Function *First = DeoptimizeDeclarations[0];
7039 for (const auto *F : ArrayRef(DeoptimizeDeclarations).slice(1)) {
7040 Check(First->getCallingConv() == F->getCallingConv(),
7041 "All llvm.experimental.deoptimize declarations must have the same "
7042 "calling convention",
7043 First, F);
7044 }
7045}
7046
7047void Verifier::verifyAttachedCallBundle(const CallBase &Call,
7048 const OperandBundleUse &BU) {
7049 FunctionType *FTy = Call.getFunctionType();
7050
7051 Check((FTy->getReturnType()->isPointerTy() ||
7052 (Call.doesNotReturn() && FTy->getReturnType()->isVoidTy())),
7053 "a call with operand bundle \"clang.arc.attachedcall\" must call a "
7054 "function returning a pointer or a non-returning function that has a "
7055 "void return type",
7056 Call);
7057
7058 Check(BU.Inputs.size() == 1 && isa<Function>(BU.Inputs.front()),
7059 "operand bundle \"clang.arc.attachedcall\" requires one function as "
7060 "an argument",
7061 Call);
7062
7063 auto *Fn = cast<Function>(BU.Inputs.front());
7064 Intrinsic::ID IID = Fn->getIntrinsicID();
7065
7066 if (IID) {
7067 Check((IID == Intrinsic::objc_retainAutoreleasedReturnValue ||
7068 IID == Intrinsic::objc_unsafeClaimAutoreleasedReturnValue),
7069 "invalid function argument", Call);
7070 } else {
7071 StringRef FnName = Fn->getName();
7072 Check((FnName == "objc_retainAutoreleasedReturnValue" ||
7073 FnName == "objc_unsafeClaimAutoreleasedReturnValue"),
7074 "invalid function argument", Call);
7075 }
7076}
7077
7078void Verifier::verifyNoAliasScopeDecl() {
7079 if (NoAliasScopeDecls.empty())
7080 return;
7081
7082 // only a single scope must be declared at a time.
7083 for (auto *II : NoAliasScopeDecls) {
7084 assert(II->getIntrinsicID() == Intrinsic::experimental_noalias_scope_decl &&
7085 "Not a llvm.experimental.noalias.scope.decl ?");
7086 const auto *ScopeListMV = dyn_cast<MetadataAsValue>(
7088 Check(ScopeListMV != nullptr,
7089 "llvm.experimental.noalias.scope.decl must have a MetadataAsValue "
7090 "argument",
7091 II);
7092
7093 const auto *ScopeListMD = dyn_cast<MDNode>(ScopeListMV->getMetadata());
7094 Check(ScopeListMD != nullptr, "!id.scope.list must point to an MDNode", II);
7095 Check(ScopeListMD->getNumOperands() == 1,
7096 "!id.scope.list must point to a list with a single scope", II);
7097 visitAliasScopeListMetadata(ScopeListMD);
7098 }
7099
7100 // Only check the domination rule when requested. Once all passes have been
7101 // adapted this option can go away.
7103 return;
7104
7105 // Now sort the intrinsics based on the scope MDNode so that declarations of
7106 // the same scopes are next to each other.
7107 auto GetScope = [](IntrinsicInst *II) {
7108 const auto *ScopeListMV = cast<MetadataAsValue>(
7110 return &cast<MDNode>(ScopeListMV->getMetadata())->getOperand(0);
7111 };
7112
7113 // We are sorting on MDNode pointers here. For valid input IR this is ok.
7114 // TODO: Sort on Metadata ID to avoid non-deterministic error messages.
7115 auto Compare = [GetScope](IntrinsicInst *Lhs, IntrinsicInst *Rhs) {
7116 return GetScope(Lhs) < GetScope(Rhs);
7117 };
7118
7119 llvm::sort(NoAliasScopeDecls, Compare);
7120
7121 // Go over the intrinsics and check that for the same scope, they are not
7122 // dominating each other.
7123 auto ItCurrent = NoAliasScopeDecls.begin();
7124 while (ItCurrent != NoAliasScopeDecls.end()) {
7125 auto CurScope = GetScope(*ItCurrent);
7126 auto ItNext = ItCurrent;
7127 do {
7128 ++ItNext;
7129 } while (ItNext != NoAliasScopeDecls.end() &&
7130 GetScope(*ItNext) == CurScope);
7131
7132 // [ItCurrent, ItNext) represents the declarations for the same scope.
7133 // Ensure they are not dominating each other.. but only if it is not too
7134 // expensive.
7135 if (ItNext - ItCurrent < 32)
7136 for (auto *I : llvm::make_range(ItCurrent, ItNext))
7137 for (auto *J : llvm::make_range(ItCurrent, ItNext))
7138 if (I != J)
7139 Check(!DT.dominates(I, J),
7140 "llvm.experimental.noalias.scope.decl dominates another one "
7141 "with the same scope",
7142 I);
7143 ItCurrent = ItNext;
7144 }
7145}
7146
7147//===----------------------------------------------------------------------===//
7148// Implement the public interfaces to this file...
7149//===----------------------------------------------------------------------===//
7150
7152 Function &F = const_cast<Function &>(f);
7153
7154 // Don't use a raw_null_ostream. Printing IR is expensive.
7155 Verifier V(OS, /*ShouldTreatBrokenDebugInfoAsError=*/true, *f.getParent());
7156
7157 // Note that this function's return value is inverted from what you would
7158 // expect of a function called "verify".
7159 return !V.verify(F);
7160}
7161
7163 bool *BrokenDebugInfo) {
7164 // Don't use a raw_null_ostream. Printing IR is expensive.
7165 Verifier V(OS, /*ShouldTreatBrokenDebugInfoAsError=*/!BrokenDebugInfo, M);
7166
7167 bool Broken = false;
7168 for (const Function &F : M)
7169 Broken |= !V.verify(F);
7170
7171 Broken |= !V.verify();
7172 if (BrokenDebugInfo)
7173 *BrokenDebugInfo = V.hasBrokenDebugInfo();
7174 // Note that this function's return value is inverted from what you would
7175 // expect of a function called "verify".
7176 return Broken;
7177}
7178
7179namespace {
7180
7181struct VerifierLegacyPass : public FunctionPass {
7182 static char ID;
7183
7184 std::unique_ptr<Verifier> V;
7185 bool FatalErrors = true;
7186
7187 VerifierLegacyPass() : FunctionPass(ID) {
7189 }
7190 explicit VerifierLegacyPass(bool FatalErrors)
7191 : FunctionPass(ID),
7192 FatalErrors(FatalErrors) {
7194 }
7195
7196 bool doInitialization(Module &M) override {
7197 V = std::make_unique<Verifier>(
7198 &dbgs(), /*ShouldTreatBrokenDebugInfoAsError=*/false, M);
7199 return false;
7200 }
7201
7202 bool runOnFunction(Function &F) override {
7203 if (!V->verify(F) && FatalErrors) {
7204 errs() << "in function " << F.getName() << '\n';
7205 report_fatal_error("Broken function found, compilation aborted!");
7206 }
7207 return false;
7208 }
7209
7210 bool doFinalization(Module &M) override {
7211 bool HasErrors = false;
7212 for (Function &F : M)
7213 if (F.isDeclaration())
7214 HasErrors |= !V->verify(F);
7215
7216 HasErrors |= !V->verify();
7217 if (FatalErrors && (HasErrors || V->hasBrokenDebugInfo()))
7218 report_fatal_error("Broken module found, compilation aborted!");
7219 return false;
7220 }
7221
7222 void getAnalysisUsage(AnalysisUsage &AU) const override {
7223 AU.setPreservesAll();
7224 }
7225};
7226
7227} // end anonymous namespace
7228
7229/// Helper to issue failure from the TBAA verification
7230template <typename... Tys> void TBAAVerifier::CheckFailed(Tys &&... Args) {
7231 if (Diagnostic)
7232 return Diagnostic->CheckFailed(Args...);
7233}
7234
7235#define CheckTBAA(C, ...) \
7236 do { \
7237 if (!(C)) { \
7238 CheckFailed(__VA_ARGS__); \
7239 return false; \
7240 } \
7241 } while (false)
7242
7243/// Verify that \p BaseNode can be used as the "base type" in the struct-path
7244/// TBAA scheme. This means \p BaseNode is either a scalar node, or a
7245/// struct-type node describing an aggregate data structure (like a struct).
7246TBAAVerifier::TBAABaseNodeSummary
7247TBAAVerifier::verifyTBAABaseNode(Instruction &I, const MDNode *BaseNode,
7248 bool IsNewFormat) {
7249 if (BaseNode->getNumOperands() < 2) {
7250 CheckFailed("Base nodes must have at least two operands", &I, BaseNode);
7251 return {true, ~0u};
7252 }
7253
7254 auto Itr = TBAABaseNodes.find(BaseNode);
7255 if (Itr != TBAABaseNodes.end())
7256 return Itr->second;
7257
7258 auto Result = verifyTBAABaseNodeImpl(I, BaseNode, IsNewFormat);
7259 auto InsertResult = TBAABaseNodes.insert({BaseNode, Result});
7260 (void)InsertResult;
7261 assert(InsertResult.second && "We just checked!");
7262 return Result;
7263}
7264
7265TBAAVerifier::TBAABaseNodeSummary
7266TBAAVerifier::verifyTBAABaseNodeImpl(Instruction &I, const MDNode *BaseNode,
7267 bool IsNewFormat) {
7268 const TBAAVerifier::TBAABaseNodeSummary InvalidNode = {true, ~0u};
7269
7270 if (BaseNode->getNumOperands() == 2) {
7271 // Scalar nodes can only be accessed at offset 0.
7272 return isValidScalarTBAANode(BaseNode)
7273 ? TBAAVerifier::TBAABaseNodeSummary({false, 0})
7274 : InvalidNode;
7275 }
7276
7277 if (IsNewFormat) {
7278 if (BaseNode->getNumOperands() % 3 != 0) {
7279 CheckFailed("Access tag nodes must have the number of operands that is a "
7280 "multiple of 3!", BaseNode);
7281 return InvalidNode;
7282 }
7283 } else {
7284 if (BaseNode->getNumOperands() % 2 != 1) {
7285 CheckFailed("Struct tag nodes must have an odd number of operands!",
7286 BaseNode);
7287 return InvalidNode;
7288 }
7289 }
7290
7291 // Check the type size field.
7292 if (IsNewFormat) {
7293 auto *TypeSizeNode = mdconst::dyn_extract_or_null<ConstantInt>(
7294 BaseNode->getOperand(1));
7295 if (!TypeSizeNode) {
7296 CheckFailed("Type size nodes must be constants!", &I, BaseNode);
7297 return InvalidNode;
7298 }
7299 }
7300
7301 // Check the type name field. In the new format it can be anything.
7302 if (!IsNewFormat && !isa<MDString>(BaseNode->getOperand(0))) {
7303 CheckFailed("Struct tag nodes have a string as their first operand",
7304 BaseNode);
7305 return InvalidNode;
7306 }
7307
7308 bool Failed = false;
7309
7310 std::optional<APInt> PrevOffset;
7311 unsigned BitWidth = ~0u;
7312
7313 // We've already checked that BaseNode is not a degenerate root node with one
7314 // operand in \c verifyTBAABaseNode, so this loop should run at least once.
7315 unsigned FirstFieldOpNo = IsNewFormat ? 3 : 1;
7316 unsigned NumOpsPerField = IsNewFormat ? 3 : 2;
7317 for (unsigned Idx = FirstFieldOpNo; Idx < BaseNode->getNumOperands();
7318 Idx += NumOpsPerField) {
7319 const MDOperand &FieldTy = BaseNode->getOperand(Idx);
7320 const MDOperand &FieldOffset = BaseNode->getOperand(Idx + 1);
7321 if (!isa<MDNode>(FieldTy)) {
7322 CheckFailed("Incorrect field entry in struct type node!", &I, BaseNode);
7323 Failed = true;
7324 continue;
7325 }
7326
7327 auto *OffsetEntryCI =
7328 mdconst::dyn_extract_or_null<ConstantInt>(FieldOffset);
7329 if (!OffsetEntryCI) {
7330 CheckFailed("Offset entries must be constants!", &I, BaseNode);
7331 Failed = true;
7332 continue;
7333 }
7334
7335 if (BitWidth == ~0u)
7336 BitWidth = OffsetEntryCI->getBitWidth();
7337
7338 if (OffsetEntryCI->getBitWidth() != BitWidth) {
7339 CheckFailed(
7340 "Bitwidth between the offsets and struct type entries must match", &I,
7341 BaseNode);
7342 Failed = true;
7343 continue;
7344 }
7345
7346 // NB! As far as I can tell, we generate a non-strictly increasing offset
7347 // sequence only from structs that have zero size bit fields. When
7348 // recursing into a contained struct in \c getFieldNodeFromTBAABaseNode we
7349 // pick the field lexically the latest in struct type metadata node. This
7350 // mirrors the actual behavior of the alias analysis implementation.
7351 bool IsAscending =
7352 !PrevOffset || PrevOffset->ule(OffsetEntryCI->getValue());
7353
7354 if (!IsAscending) {
7355 CheckFailed("Offsets must be increasing!", &I, BaseNode);
7356 Failed = true;
7357 }
7358
7359 PrevOffset = OffsetEntryCI->getValue();
7360
7361 if (IsNewFormat) {
7362 auto *MemberSizeNode = mdconst::dyn_extract_or_null<ConstantInt>(
7363 BaseNode->getOperand(Idx + 2));
7364 if (!MemberSizeNode) {
7365 CheckFailed("Member size entries must be constants!", &I, BaseNode);
7366 Failed = true;
7367 continue;
7368 }
7369 }
7370 }
7371
7372 return Failed ? InvalidNode
7373 : TBAAVerifier::TBAABaseNodeSummary(false, BitWidth);
7374}
7375
7376static bool IsRootTBAANode(const MDNode *MD) {
7377 return MD->getNumOperands() < 2;
7378}
7379
7380static bool IsScalarTBAANodeImpl(const MDNode *MD,
7382 if (MD->getNumOperands() != 2 && MD->getNumOperands() != 3)
7383 return false;
7384
7385 if (!isa<MDString>(MD->getOperand(0)))
7386 return false;
7387
7388 if (MD->getNumOperands() == 3) {
7389 auto *Offset = mdconst::dyn_extract<ConstantInt>(MD->getOperand(2));
7390 if (!(Offset && Offset->isZero() && isa<MDString>(MD->getOperand(0))))
7391 return false;
7392 }
7393
7394 auto *Parent = dyn_cast_or_null<MDNode>(MD->getOperand(1));
7395 return Parent && Visited.insert(Parent).second &&
7396 (IsRootTBAANode(Parent) || IsScalarTBAANodeImpl(Parent, Visited));
7397}
7398
7399bool TBAAVerifier::isValidScalarTBAANode(const MDNode *MD) {
7400 auto ResultIt = TBAAScalarNodes.find(MD);
7401 if (ResultIt != TBAAScalarNodes.end())
7402 return ResultIt->second;
7403
7405 bool Result = IsScalarTBAANodeImpl(MD, Visited);
7406 auto InsertResult = TBAAScalarNodes.insert({MD, Result});
7407 (void)InsertResult;
7408 assert(InsertResult.second && "Just checked!");
7409
7410 return Result;
7411}
7412
7413/// Returns the field node at the offset \p Offset in \p BaseNode. Update \p
7414/// Offset in place to be the offset within the field node returned.
7415///
7416/// We assume we've okayed \p BaseNode via \c verifyTBAABaseNode.
7417MDNode *TBAAVerifier::getFieldNodeFromTBAABaseNode(Instruction &I,
7418 const MDNode *BaseNode,
7419 APInt &Offset,
7420 bool IsNewFormat) {
7421 assert(BaseNode->getNumOperands() >= 2 && "Invalid base node!");
7422
7423 // Scalar nodes have only one possible "field" -- their parent in the access
7424 // hierarchy. Offset must be zero at this point, but our caller is supposed
7425 // to check that.
7426 if (BaseNode->getNumOperands() == 2)
7427 return cast<MDNode>(BaseNode->getOperand(1));
7428
7429 unsigned FirstFieldOpNo = IsNewFormat ? 3 : 1;
7430 unsigned NumOpsPerField = IsNewFormat ? 3 : 2;
7431 for (unsigned Idx = FirstFieldOpNo; Idx < BaseNode->getNumOperands();
7432 Idx += NumOpsPerField) {
7433 auto *OffsetEntryCI =
7434 mdconst::extract<ConstantInt>(BaseNode->getOperand(Idx + 1));
7435 if (OffsetEntryCI->getValue().ugt(Offset)) {
7436 if (Idx == FirstFieldOpNo) {
7437 CheckFailed("Could not find TBAA parent in struct type node", &I,
7438 BaseNode, &Offset);
7439 return nullptr;
7440 }
7441
7442 unsigned PrevIdx = Idx - NumOpsPerField;
7443 auto *PrevOffsetEntryCI =
7444 mdconst::extract<ConstantInt>(BaseNode->getOperand(PrevIdx + 1));
7445 Offset -= PrevOffsetEntryCI->getValue();
7446 return cast<MDNode>(BaseNode->getOperand(PrevIdx));
7447 }
7448 }
7449
7450 unsigned LastIdx = BaseNode->getNumOperands() - NumOpsPerField;
7451 auto *LastOffsetEntryCI = mdconst::extract<ConstantInt>(
7452 BaseNode->getOperand(LastIdx + 1));
7453 Offset -= LastOffsetEntryCI->getValue();
7454 return cast<MDNode>(BaseNode->getOperand(LastIdx));
7455}
7456
7458 if (!Type || Type->getNumOperands() < 3)
7459 return false;
7460
7461 // In the new format type nodes shall have a reference to the parent type as
7462 // its first operand.
7463 return isa_and_nonnull<MDNode>(Type->getOperand(0));
7464}
7465
7467 CheckTBAA(MD->getNumOperands() > 0, "TBAA metadata cannot have 0 operands",
7468 &I, MD);
7469
7470 CheckTBAA(isa<LoadInst>(I) || isa<StoreInst>(I) || isa<CallInst>(I) ||
7471 isa<VAArgInst>(I) || isa<AtomicRMWInst>(I) ||
7472 isa<AtomicCmpXchgInst>(I),
7473 "This instruction shall not have a TBAA access tag!", &I);
7474
7475 bool IsStructPathTBAA =
7476 isa<MDNode>(MD->getOperand(0)) && MD->getNumOperands() >= 3;
7477
7478 CheckTBAA(IsStructPathTBAA,
7479 "Old-style TBAA is no longer allowed, use struct-path TBAA instead",
7480 &I);
7481
7482 MDNode *BaseNode = dyn_cast_or_null<MDNode>(MD->getOperand(0));
7483 MDNode *AccessType = dyn_cast_or_null<MDNode>(MD->getOperand(1));
7484
7485 bool IsNewFormat = isNewFormatTBAATypeNode(AccessType);
7486
7487 if (IsNewFormat) {
7488 CheckTBAA(MD->getNumOperands() == 4 || MD->getNumOperands() == 5,
7489 "Access tag metadata must have either 4 or 5 operands", &I, MD);
7490 } else {
7491 CheckTBAA(MD->getNumOperands() < 5,
7492 "Struct tag metadata must have either 3 or 4 operands", &I, MD);
7493 }
7494
7495 // Check the access size field.
7496 if (IsNewFormat) {
7497 auto *AccessSizeNode = mdconst::dyn_extract_or_null<ConstantInt>(
7498 MD->getOperand(3));
7499 CheckTBAA(AccessSizeNode, "Access size field must be a constant", &I, MD);
7500 }
7501
7502 // Check the immutability flag.
7503 unsigned ImmutabilityFlagOpNo = IsNewFormat ? 4 : 3;
7504 if (MD->getNumOperands() == ImmutabilityFlagOpNo + 1) {
7505 auto *IsImmutableCI = mdconst::dyn_extract_or_null<ConstantInt>(
7506 MD->getOperand(ImmutabilityFlagOpNo));
7507 CheckTBAA(IsImmutableCI,
7508 "Immutability tag on struct tag metadata must be a constant", &I,
7509 MD);
7510 CheckTBAA(
7511 IsImmutableCI->isZero() || IsImmutableCI->isOne(),
7512 "Immutability part of the struct tag metadata must be either 0 or 1",
7513 &I, MD);
7514 }
7515
7516 CheckTBAA(BaseNode && AccessType,
7517 "Malformed struct tag metadata: base and access-type "
7518 "should be non-null and point to Metadata nodes",
7519 &I, MD, BaseNode, AccessType);
7520
7521 if (!IsNewFormat) {
7522 CheckTBAA(isValidScalarTBAANode(AccessType),
7523 "Access type node must be a valid scalar type", &I, MD,
7524 AccessType);
7525 }
7526
7527 auto *OffsetCI = mdconst::dyn_extract_or_null<ConstantInt>(MD->getOperand(2));
7528 CheckTBAA(OffsetCI, "Offset must be constant integer", &I, MD);
7529
7530 APInt Offset = OffsetCI->getValue();
7531 bool SeenAccessTypeInPath = false;
7532
7533 SmallPtrSet<MDNode *, 4> StructPath;
7534
7535 for (/* empty */; BaseNode && !IsRootTBAANode(BaseNode);
7536 BaseNode = getFieldNodeFromTBAABaseNode(I, BaseNode, Offset,
7537 IsNewFormat)) {
7538 if (!StructPath.insert(BaseNode).second) {
7539 CheckFailed("Cycle detected in struct path", &I, MD);
7540 return false;
7541 }
7542
7543 bool Invalid;
7544 unsigned BaseNodeBitWidth;
7545 std::tie(Invalid, BaseNodeBitWidth) = verifyTBAABaseNode(I, BaseNode,
7546 IsNewFormat);
7547
7548 // If the base node is invalid in itself, then we've already printed all the
7549 // errors we wanted to print.
7550 if (Invalid)
7551 return false;
7552
7553 SeenAccessTypeInPath |= BaseNode == AccessType;
7554
7555 if (isValidScalarTBAANode(BaseNode) || BaseNode == AccessType)
7556 CheckTBAA(Offset == 0, "Offset not zero at the point of scalar access",
7557 &I, MD, &Offset);
7558
7559 CheckTBAA(BaseNodeBitWidth == Offset.getBitWidth() ||
7560 (BaseNodeBitWidth == 0 && Offset == 0) ||
7561 (IsNewFormat && BaseNodeBitWidth == ~0u),
7562 "Access bit-width not the same as description bit-width", &I, MD,
7563 BaseNodeBitWidth, Offset.getBitWidth());
7564
7565 if (IsNewFormat && SeenAccessTypeInPath)
7566 break;
7567 }
7568
7569 CheckTBAA(SeenAccessTypeInPath, "Did not see access type in access path!", &I,
7570 MD);
7571 return true;
7572}
7573
7574char VerifierLegacyPass::ID = 0;
7575INITIALIZE_PASS(VerifierLegacyPass, "verify", "Module Verifier", false, false)
7576
7578 return new VerifierLegacyPass(FatalErrors);
7579}
7580
7581AnalysisKey VerifierAnalysis::Key;
7584 Result Res;
7586 return Res;
7587}
7588
7591 return { llvm::verifyFunction(F, &dbgs()), false };
7592}
7593
7595 auto Res = AM.getResult<VerifierAnalysis>(M);
7596 if (FatalErrors && (Res.IRBroken || Res.DebugInfoBroken))
7597 report_fatal_error("Broken module found, compilation aborted!");
7598
7599 return PreservedAnalyses::all();
7600}
7601
7603 auto res = AM.getResult<VerifierAnalysis>(F);
7604 if (res.IRBroken && FatalErrors)
7605 report_fatal_error("Broken function found, compilation aborted!");
7606
7607 return PreservedAnalyses::all();
7608}
This file defines the StringMap class.
This file declares a class to represent arbitrary precision floating point values and provide a varie...
This file implements a class to represent arbitrary precision integral constant values and operations...
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
Atomic ordering constants.
@ RetAttr
Definition: Attributes.cpp:729
@ FnAttr
Definition: Attributes.cpp:727
This file contains the simple types necessary to represent the attributes associated with functions a...
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")
static GCRegistry::Add< StatepointGC > D("statepoint-example", "an example strategy for statepoint")
Analysis containing CSE Info
Definition: CSEInfo.cpp:27
This file contains the declarations for the subclasses of Constant, which represent the different fla...
This file declares the LLVM IR specialization of the GenericConvergenceVerifier template.
return RetTy
Returns the sub type a function will return at a given Idx Should correspond to the result type of an ExtractValue instruction executed with just that one unsigned Idx
This file defines the DenseMap class.
This file contains constants used for implementing Dwarf debug support.
std::string Name
uint64_t Size
static bool runOnFunction(Function &F, bool PostInlining)
Hexagon Common GEP
This file provides various utilities for inspecting and working with the control flow graph in LLVM I...
This defines the Use class.
#define F(x, y, z)
Definition: MD5.cpp:55
#define I(x, y, z)
Definition: MD5.cpp:58
This file implements a map that provides insertion order iteration.
This file provides utility for Memory Model Relaxation Annotations (MMRAs).
This file contains the declarations for metadata subclasses.
#define T1
Module.h This file contains the declarations for the Module class.
ConstantRange Range(APInt(BitWidth, Low), APInt(BitWidth, High))
uint64_t High
uint64_t IntrinsicInst * II
#define P(N)
ppc ctr loops verify
This header defines various interfaces for pass management in LLVM.
#define INITIALIZE_PASS(passName, arg, name, cfg, analysis)
Definition: PassSupport.h:38
This file builds on the ADT/GraphTraits.h file to build a generic graph post order iterator.
This file contains the declarations for profiling metadata utility functions.
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
static unsigned getNumElements(Type *Ty)
This file contains some templates that are useful if you are working with the STL at all.
verify safepoint Safepoint IR Verifier
raw_pwrite_stream & OS
This file defines the SmallPtrSet class.
This file defines the SmallSet class.
This file defines the SmallVector class.
This file contains some functions that are useful when dealing with strings.
static unsigned getBitWidth(Type *Ty, const DataLayout &DL)
Returns the bitwidth of the given scalar or pointer type.
static bool IsScalarTBAANodeImpl(const MDNode *MD, SmallPtrSetImpl< const MDNode * > &Visited)
Definition: Verifier.cpp:7380
static bool isType(const Metadata *MD)
Definition: Verifier.cpp:1123
static Instruction * getSuccPad(Instruction *Terminator)
Definition: Verifier.cpp:2680
#define Check(C,...)
We know that cond should be true, if not print an error message.
Definition: Verifier.cpp:658
static bool isNewFormatTBAATypeNode(llvm::MDNode *Type)
Definition: Verifier.cpp:7457
#define CheckDI(C,...)
We know that a debug info condition should be true, if not print an error message.
Definition: Verifier.cpp:668
static void forEachUser(const Value *User, SmallPtrSet< const Value *, 32 > &Visited, llvm::function_ref< bool(const Value *)> Callback)
Definition: Verifier.cpp:709
static bool isDINode(const Metadata *MD)
Definition: Verifier.cpp:1125
static bool isScope(const Metadata *MD)
Definition: Verifier.cpp:1124
static cl::opt< bool > VerifyNoAliasScopeDomination("verify-noalias-scope-decl-dom", cl::Hidden, cl::init(false), cl::desc("Ensure that llvm.experimental.noalias.scope.decl for identical " "scopes are not dominating"))
static DISubprogram * getSubprogram(Metadata *LocalScope)
Carefully grab the subprogram from a local scope.
Definition: Verifier.cpp:6396
static bool isTypeCongruent(Type *L, Type *R)
Two types are "congruent" if they are identical, or if they are both pointer types with different poi...
Definition: Verifier.cpp:3798
#define CheckTBAA(C,...)
Definition: Verifier.cpp:7235
static bool IsRootTBAANode(const MDNode *MD)
Definition: Verifier.cpp:7376
static bool isContiguous(const ConstantRange &A, const ConstantRange &B)
Definition: Verifier.cpp:4126
static Value * getParentPad(Value *EHPad)
Definition: Verifier.cpp:4372
static bool hasConflictingReferenceFlags(unsigned Flags)
Detect mutually exclusive flags.
Definition: Verifier.cpp:1268
static AttrBuilder getParameterABIAttributes(LLVMContext &C, unsigned I, AttributeList Attrs)
Definition: Verifier.cpp:3808
bool isFiniteNonZero() const
Definition: APFloat.h:1370
bool isNegative() const
Definition: APFloat.h:1360
const fltSemantics & getSemantics() const
Definition: APFloat.h:1368
Class for arbitrary precision integers.
Definition: APInt.h:78
bool sgt(const APInt &RHS) const
Signed greater than comparison.
Definition: APInt.h:1179
bool isZero() const
Determine if this value is zero, i.e. all bits are clear.
Definition: APInt.h:358
bool isMinValue() const
Determine if this is the smallest unsigned value.
Definition: APInt.h:395
bool ule(const APInt &RHS) const
Unsigned less or equal comparison.
Definition: APInt.h:1128
bool isPowerOf2() const
Check if this APInt's value is a power of two greater than zero.
Definition: APInt.h:418
bool isMaxValue() const
Determine if this is the largest unsigned value.
Definition: APInt.h:377
This class represents a conversion between pointers from one address space to another.
an instruction to allocate memory on the stack
Definition: Instructions.h:61
bool isSwiftError() const
Return true if this alloca is used as a swifterror argument to a call.
Definition: Instructions.h:147
bool isStaticAlloca() const
Return true if this alloca is in the entry block of the function and is a constant size.
Align getAlign() const
Return the alignment of the memory that is being allocated by the instruction.
Definition: Instructions.h:122
Type * getAllocatedType() const
Return the type that is being allocated by the instruction.
Definition: Instructions.h:115
bool isArrayAllocation() const
Return true if there is an allocation size parameter to the allocation instruction that is not 1.
const Value * getArraySize() const
Get the number of elements allocated.
Definition: Instructions.h:93
A container for analyses that lazily runs them and caches their results.
Definition: PassManager.h:253
PassT::Result & getResult(IRUnitT &IR, ExtraArgTs... ExtraArgs)
Get the result of an analysis pass for a given IR unit.
Definition: PassManager.h:405
Represent the analysis usage information of a pass.
void setPreservesAll()
Set by analyses that do not transform their input at all.
This class represents an incoming formal argument to a Function.
Definition: Argument.h:31
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
Definition: ArrayRef.h:41
bool empty() const
empty - Check if the array is empty.
Definition: ArrayRef.h:160
An instruction that atomically checks whether a specified value is in a memory location,...
Definition: Instructions.h:495
an instruction that atomically reads a memory location, combines it with another value,...
Definition: Instructions.h:696
static bool isFPOperation(BinOp Op)
Definition: Instructions.h:791
BinOp getOperation() const
Definition: Instructions.h:787
static StringRef getOperationName(BinOp Op)
AtomicOrdering getOrdering() const
Returns the ordering constraint of this rmw instruction.
Definition: Instructions.h:829
bool contains(Attribute::AttrKind A) const
Return true if the builder has the specified attribute.
Definition: AttributeMask.h:67
bool hasAttribute(Attribute::AttrKind Kind) const
Return true if the attribute exists in this set.
Definition: Attributes.cpp:910
std::string getAsString(bool InAttrGrp=false) const
Definition: Attributes.cpp:997
static Attribute::AttrKind getAttrKindFromName(StringRef AttrName)
Definition: Attributes.cpp:304
static bool canUseAsRetAttr(AttrKind Kind)
Definition: Attributes.cpp:750
static bool isExistingAttribute(StringRef Name)
Return true if the provided string matches the IR name of an attribute.
Definition: Attributes.cpp:327
static bool canUseAsFnAttr(AttrKind Kind)
Definition: Attributes.cpp:742
AttrKind
This enumeration lists the attributes that can be associated with parameters, function results,...
Definition: Attributes.h:86
@ None
No attributes have been set.
Definition: Attributes.h:88
static bool isIntAttrKind(AttrKind Kind)
Definition: Attributes.h:102
static bool canUseAsParamAttr(AttrKind Kind)
Definition: Attributes.cpp:746
bool isValid() const
Return true if the attribute is any kind of attribute.
Definition: Attributes.h:203
LLVM Basic Block Representation.
Definition: BasicBlock.h:61
iterator begin()
Instruction iterator methods.
Definition: BasicBlock.h:448
iterator_range< const_phi_iterator > phis() const
Returns a range that iterates over the phis in the basic block.
Definition: BasicBlock.h:517
const Instruction * getFirstNonPHI() const
Returns a pointer to the first instruction in this block that is not a PHINode instruction.
Definition: BasicBlock.cpp:367
const Instruction & front() const
Definition: BasicBlock.h:471
bool isEntryBlock() const
Return true if this is the entry block of the containing function.
Definition: BasicBlock.cpp:571
const BasicBlock * getUniquePredecessor() const
Return the predecessor of this block if it has a unique predecessor block.
Definition: BasicBlock.cpp:467
const Function * getParent() const
Return the enclosing method, or null if none.
Definition: BasicBlock.h:219
InstListType::iterator iterator
Instruction iterators...
Definition: BasicBlock.h:177
const Instruction * getTerminator() const LLVM_READONLY
Returns the terminator instruction if the block is well formed or null if the block is not well forme...
Definition: BasicBlock.h:239
This class represents a no-op cast from one type to another.
static BlockAddress * lookup(const BasicBlock *BB)
Lookup an existing BlockAddress constant for the given BasicBlock.
Definition: Constants.cpp:1889
Conditional or Unconditional Branch instruction.
bool isConditional() const
Value * getCondition() const
Base class for all callable instructions (InvokeInst and CallInst) Holds everything related to callin...
Definition: InstrTypes.h:1236
bool isInlineAsm() const
Check if this call is an inline asm statement.
Definition: InstrTypes.h:1532
Function * getCalledFunction() const
Returns the function called, or null if this is an indirect function invocation or the function signa...
Definition: InstrTypes.h:1465
CallingConv::ID getCallingConv() const
Definition: InstrTypes.h:1523
Value * getCalledOperand() const
Definition: InstrTypes.h:1458
Value * getArgOperand(unsigned i) const
Definition: InstrTypes.h:1410
FunctionType * getFunctionType() const
Definition: InstrTypes.h:1323
unsigned arg_size() const
Definition: InstrTypes.h:1408
AttributeList getAttributes() const
Return the parameter attributes for this call.
Definition: InstrTypes.h:1542
CallBr instruction, tracking function calls that may not return control but instead transfer it to a ...
This class represents a function call, abstracting a target machine's calling convention.
bool isMustTailCall() const
static bool castIsValid(Instruction::CastOps op, Type *SrcTy, Type *DstTy)
This method can be used to determine if a cast from SrcTy to DstTy using Opcode op is valid or not.
unsigned getNumHandlers() const
return the number of 'handlers' in this catchswitch instruction, except the default handler
Value * getParentPad() const
BasicBlock * getUnwindDest() const
handler_range handlers()
iteration adapter for range-for loops.
BasicBlock * getUnwindDest() const
bool isFPPredicate() const
Definition: InstrTypes.h:864
bool isIntPredicate() const
Definition: InstrTypes.h:865
static bool isIntPredicate(Predicate P)
Definition: InstrTypes.h:858
ConstantArray - Constant Array Declarations.
Definition: Constants.h:424
A constant value that is initialized with an expression using other constant values.
Definition: Constants.h:1097
ConstantFP - Floating Point Values [float, double].
Definition: Constants.h:269
This is the shared class of boolean and integer constants.
Definition: Constants.h:81
bool isMinusOne() const
This function will return true iff every bit in this constant is set to true.
Definition: Constants.h:218
bool isNegative() const
Definition: Constants.h:201
bool isZero() const
This is just a convenience method to make client code smaller for a common code.
Definition: Constants.h:206
unsigned getBitWidth() const
getBitWidth - Return the scalar bitwidth of this constant.
Definition: Constants.h:149
uint64_t getZExtValue() const
Return the constant as a 64-bit unsigned integer value after it has been zero extended as appropriate...
Definition: Constants.h:155
const APInt & getValue() const
Return the constant as an APInt value reference.
Definition: Constants.h:146
A signed pointer, in the ptrauth sense.
Definition: Constants.h:1012
Constant * getAddrDiscriminator() const
The address discriminator if any, or the null constant.
Definition: Constants.h:1050
Constant * getPointer() const
The pointer that is signed in this ptrauth signed pointer.
Definition: Constants.h:1037
ConstantInt * getKey() const
The Key ID, an i32 constant.
Definition: Constants.h:1040
ConstantInt * getDiscriminator() const
The integer discriminator, an i64 constant, or 0.
Definition: Constants.h:1043
static bool isOrderedRanges(ArrayRef< ConstantRange > RangesRef)
This class represents a range of values.
Definition: ConstantRange.h:47
uint32_t getBitWidth() const
Get the bit width of this ConstantRange.
static ConstantTokenNone * get(LLVMContext &Context)
Return the ConstantTokenNone.
Definition: Constants.cpp:1500
This is an important base class in LLVM.
Definition: Constant.h:42
bool isNullValue() const
Return true if this is the value that would be returned by getNullValue.
Definition: Constants.cpp:90
This is the common base class for constrained floating point intrinsics.
std::optional< fp::ExceptionBehavior > getExceptionBehavior() const
std::optional< RoundingMode > getRoundingMode() const
unsigned getNonMetadataArgCount() const
List of ValueAsMetadata, to be used as an argument to a dbg.value intrinsic.
Assignment ID.
Basic type, like 'int' or 'float'.
Debug common block.
Enumeration value.
DWARF expression.
bool isEntryValue() const
Check if the expression consists of exactly one entry value operand.
static std::optional< FragmentInfo > getFragmentInfo(expr_op_iterator Start, expr_op_iterator End)
Retrieve the details of this fragment expression.
A pair of DIGlobalVariable and DIExpression.
DIGlobalVariable * getVariable() const
DIExpression * getExpression() const
An imported module (C++ using directive or similar).
Debug lexical block.
A scope for locals.
DISubprogram * getSubprogram() const
Get the subprogram for this scope.
DILocalScope * getScope() const
Get the local scope for this variable.
Debug location.
Metadata * getRawScope() const
Represents a module in the programming language, for example, a Clang module, or a Fortran module.
Debug lexical block.
Base class for scope-like contexts.
String type, Fortran CHARACTER(n)
Subprogram description.
Array subrange.
Type array for a subprogram.
Base class for template parameters.
Base class for variables.
Metadata * getRawType() const
Metadata * getRawScope() const
This class represents an Operation in the Expression.
A parsed version of the target data layout string in and methods for querying it.
Definition: DataLayout.h:63
This represents the llvm.dbg.label instruction.
Metadata * getRawLabel() const
DILabel * getLabel() const
Records a position in IR for a source label (DILabel).
Base class for non-instruction debug metadata records that have positions within IR.
void print(raw_ostream &O, bool IsForDebug=false) const
DebugLoc getDebugLoc() const
const BasicBlock * getParent() const
This is the common base class for debug info intrinsics for variables.
Metadata * getRawLocation() const
DILocalVariable * getVariable() const
Metadata * getRawVariable() const
Metadata * getRawExpression() const
Record of a variable value-assignment, aka a non instruction representation of the dbg....
MDNode * getRawAddressExpression() const
DIExpression * getExpression() const
Value * getVariableLocationOp(unsigned OpIdx) const
DILocalVariable * getVariable() const
Metadata * getRawLocation() const
Returns the metadata operand for the first location description.
@ End
Marks the end of the concrete types.
@ Any
To indicate all LocationTypes in searches.
DIExpression * getAddressExpression() const
MDNode * getAsMDNode() const
Return this as a bar MDNode.
Definition: DebugLoc.h:106
ValueT lookup(const_arg_type_t< KeyT > Val) const
lookup - Return the entry for the specified key, or a default constructed value if no such entry exis...
Definition: DenseMap.h:194
iterator find(const_arg_type_t< KeyT > Val)
Definition: DenseMap.h:155
bool empty() const
Definition: DenseMap.h:98
std::pair< iterator, bool > insert(const std::pair< KeyT, ValueT > &KV)
Definition: DenseMap.h:211
Concrete subclass of DominatorTreeBase that is used to compute a normal dominator tree.
Definition: Dominators.h:162
This instruction extracts a single (scalar) element from a VectorType value.
static bool isValidOperands(const Value *Vec, const Value *Idx)
Return true if an extractelement instruction can be formed with the specified operands.
This instruction extracts a struct member or array element value from an aggregate value.
ArrayRef< unsigned > getIndices() const
static Type * getIndexedType(Type *Agg, ArrayRef< unsigned > Idxs)
Returns the type of the element that would be extracted with an extractvalue instruction with the spe...
This instruction compares its operands according to the predicate given to the constructor.
This class represents an extension of floating point types.
This class represents a cast from floating point to signed integer.
This class represents a cast from floating point to unsigned integer.
This class represents a truncation of floating point types.
An instruction for ordering other memory operations.
Definition: Instructions.h:420
AtomicOrdering getOrdering() const
Returns the ordering constraint of this fence instruction.
Definition: Instructions.h:443
Value * getParentPad() const
Convenience accessors.
Definition: InstrTypes.h:2449
FunctionPass class - This class is used to implement most global optimizations.
Definition: Pass.h:310
Intrinsic::ID getIntrinsicID() const LLVM_READONLY
getIntrinsicID - This method returns the ID number of the specified function, or Intrinsic::not_intri...
Definition: Function.h:249
bool hasPersonalityFn() const
Check whether this function has a personality function.
Definition: Function.h:903
bool isIntrinsic() const
isIntrinsic - Returns true if the function's name starts with "llvm.".
Definition: Function.h:254
const std::string & getGC() const
Definition: Function.cpp:838
Represents calls to the gc.relocate intrinsic.
Value * getBasePtr() const
Value * getDerivedPtr() const
void initialize(raw_ostream *OS, function_ref< void(const Twine &Message)> FailureCB, const FunctionT &F)
Generic tagged DWARF-like metadata node.
an instruction for type-safe pointer arithmetic to access elements of arrays and structs
Definition: Instructions.h:915
static Type * getIndexedType(Type *Ty, ArrayRef< Value * > IdxList)
Returns the result type of a getelementptr with the given source element type and indexes.
static bool isValidLinkage(LinkageTypes L)
Definition: GlobalAlias.h:95
const Constant * getAliasee() const
Definition: GlobalAlias.h:84
const Function * getResolverFunction() const
Definition: Globals.cpp:617
static bool isValidLinkage(LinkageTypes L)
Definition: GlobalIFunc.h:83
const Constant * getResolver() const
Definition: GlobalIFunc.h:70
bool hasComdat() const
Definition: GlobalObject.h:128
MDNode * getMetadata(unsigned KindID) const
Get the current metadata attachments for the given kind, if any.
Definition: Value.h:565
bool hasExternalLinkage() const
Definition: GlobalValue.h:511
bool isDSOLocal() const
Definition: GlobalValue.h:305
bool isImplicitDSOLocal() const
Definition: GlobalValue.h:298
bool isDeclaration() const
Return true if the primary definition of this global value is outside of the current translation unit...
Definition: Globals.cpp:290
bool hasValidDeclarationLinkage() const
Definition: GlobalValue.h:533
LinkageTypes getLinkage() const
Definition: GlobalValue.h:546
bool hasDefaultVisibility() const
Definition: GlobalValue.h:249
bool hasPrivateLinkage() const
Definition: GlobalValue.h:527
bool hasHiddenVisibility() const
Definition: GlobalValue.h:250
bool hasExternalWeakLinkage() const
Definition: GlobalValue.h:529
bool hasDLLImportStorageClass() const
Definition: GlobalValue.h:278
bool hasDLLExportStorageClass() const
Definition: GlobalValue.h:281
bool isDeclarationForLinker() const
Definition: GlobalValue.h:618
unsigned getAddressSpace() const
Definition: GlobalValue.h:205
Module * getParent()
Get the module that this global value is contained inside of...
Definition: GlobalValue.h:656
PointerType * getType() const
Global values are always pointers.
Definition: GlobalValue.h:294
bool hasComdat() const
Definition: GlobalValue.h:241
bool hasCommonLinkage() const
Definition: GlobalValue.h:532
bool hasGlobalUnnamedAddr() const
Definition: GlobalValue.h:215
bool hasAppendingLinkage() const
Definition: GlobalValue.h:525
bool hasAvailableExternallyLinkage() const
Definition: GlobalValue.h:512
Type * getValueType() const
Definition: GlobalValue.h:296
const Constant * getInitializer() const
getInitializer - Return the initializer for this global variable.
bool hasInitializer() const
Definitions have initializers, declarations don't.
bool isConstant() const
If the value is a global constant, its value is immutable throughout the runtime execution of the pro...
bool hasDefinitiveInitializer() const
hasDefinitiveInitializer - Whether the global variable has an initializer, and any other instances of...
This instruction compares its operands according to the predicate given to the constructor.
Indirect Branch Instruction.
BasicBlock * getDestination(unsigned i)
Return the specified destination.
unsigned getNumDestinations() const
return the number of possible destinations in this indirectbr instruction.
unsigned getNumSuccessors() const
This instruction inserts a single (scalar) element into a VectorType value.
static bool isValidOperands(const Value *Vec, const Value *NewElt, const Value *Idx)
Return true if an insertelement instruction can be formed with the specified operands.
This instruction inserts a struct field of array element value into an aggregate value.
Value * getAggregateOperand()
ArrayRef< unsigned > getIndices() const
Base class for instruction visitors.
Definition: InstVisitor.h:78
RetTy visitTerminator(Instruction &I)
Definition: InstVisitor.h:253
RetTy visitCallBase(CallBase &I)
Definition: InstVisitor.h:267
void visitFunction(Function &F)
Definition: InstVisitor.h:142
void visitBasicBlock(BasicBlock &BB)
Definition: InstVisitor.h:143
void visit(Iterator Start, Iterator End)
Definition: InstVisitor.h:87
RetTy visitFuncletPadInst(FuncletPadInst &I)
Definition: InstVisitor.h:197
void visitInstruction(Instruction &I)
Definition: InstVisitor.h:280
unsigned getNumSuccessors() const LLVM_READONLY
Return the number of successors that this instruction has.
const DebugLoc & getDebugLoc() const
Return the debug location for this node as a DebugLoc.
Definition: Instruction.h:466
bool isAtomic() const LLVM_READONLY
Return true if this instruction has an AtomicOrdering of unordered or higher.
const Function * getFunction() const
Return the function this instruction belongs to.
Definition: Instruction.cpp:70
This class represents a cast from an integer to a pointer.
A wrapper class for inspecting calls to intrinsic functions.
Definition: IntrinsicInst.h:48
static bool mayLowerToFunctionCall(Intrinsic::ID IID)
Check if the intrinsic might lower into a regular function call in the course of IR transformations.
Intrinsic::ID getIntrinsicID() const
Return the intrinsic ID of this intrinsic.
Definition: IntrinsicInst.h:55
Invoke instruction.
This is an important class for using LLVM in a threaded context.
Definition: LLVMContext.h:67
The landingpad instruction holds all of the information necessary to generate correct exception handl...
bool isCleanup() const
Return 'true' if this landingpad instruction is a cleanup.
unsigned getNumClauses() const
Get the number of clauses for this landing pad.
bool isCatch(unsigned Idx) const
Return 'true' if the clause and index Idx is a catch clause.
bool isFilter(unsigned Idx) const
Return 'true' if the clause and index Idx is a filter clause.
Constant * getClause(unsigned Idx) const
Get the value of the clause at index Idx.
An instruction for reading from memory.
Definition: Instructions.h:174
AtomicOrdering getOrdering() const
Returns the ordering constraint of this load instruction.
Definition: Instructions.h:218
SyncScope::ID getSyncScopeID() const
Returns the synchronization scope ID of this load instruction.
Definition: Instructions.h:228
Align getAlign() const
Return the alignment of the access that is being performed.
Definition: Instructions.h:209
Metadata node.
Definition: Metadata.h:1069
const MDOperand & getOperand(unsigned I) const
Definition: Metadata.h:1430
bool isTemporary() const
Definition: Metadata.h:1253
ArrayRef< MDOperand > operands() const
Definition: Metadata.h:1428
unsigned getNumOperands() const
Return number of MDNode operands.
Definition: Metadata.h:1436
bool isDistinct() const
Definition: Metadata.h:1252
bool isResolved() const
Check if node is fully resolved.
Definition: Metadata.h:1249
LLVMContext & getContext() const
Definition: Metadata.h:1233
Tracking metadata reference owned by Metadata.
Definition: Metadata.h:891
Metadata * get() const
Definition: Metadata.h:920
A single uniqued string.
Definition: Metadata.h:720
StringRef getString() const
Definition: Metadata.cpp:616
Typed, array-like tuple of metadata.
Definition: Metadata.h:1628
Tuple of metadata.
Definition: Metadata.h:1472
static bool isTagMD(const Metadata *MD)
This class implements a map that also provides access to all stored values in a deterministic order.
Definition: MapVector.h:36
void clear()
Definition: MapVector.h:88
Metadata wrapper in the Value hierarchy.
Definition: Metadata.h:176
static MetadataAsValue * getIfExists(LLVMContext &Context, Metadata *MD)
Definition: Metadata.cpp:111
Metadata * getMetadata() const
Definition: Metadata.h:193
Root of the metadata hierarchy.
Definition: Metadata.h:62
void print(raw_ostream &OS, const Module *M=nullptr, bool IsForDebug=false) const
Print.
Definition: AsmWriter.cpp:5222
unsigned getMetadataID() const
Definition: Metadata.h:102
Manage lifetime of a slot tracker for printing IR.
A Module instance is used to store all the information related to an LLVM module.
Definition: Module.h:65
ModFlagBehavior
This enumeration defines the supported behaviors of module flags.
Definition: Module.h:115
@ AppendUnique
Appends the two values, which are required to be metadata nodes.
Definition: Module.h:144
@ Override
Uses the specified value, regardless of the behavior or value of the other module.
Definition: Module.h:136
@ Warning
Emits a warning if two values disagree.
Definition: Module.h:122
@ Error
Emits an error if two values disagree, otherwise the resulting value is that of the operands.
Definition: Module.h:118
@ Min
Takes the min of the two values, which are required to be integers.
Definition: Module.h:150
@ Append
Appends the two values, which are required to be metadata nodes.
Definition: Module.h:139
@ Max
Takes the max of the two values, which are required to be integers.
Definition: Module.h:147
@ Require
Adds a requirement that another module flag be present and have a specified value after linking is pe...
Definition: Module.h:131
const std::string & getModuleIdentifier() const
Get the module identifier which is, essentially, the name of the module.
Definition: Module.h:265
static bool isValidModFlagBehavior(Metadata *MD, ModFlagBehavior &MFB)
Checks if Metadata represents a valid ModFlagBehavior, and stores the converted result in MFB.
Definition: Module.cpp:290
A tuple of MDNodes.
Definition: Metadata.h:1730
StringRef getName() const
Definition: Metadata.cpp:1405
void print(raw_ostream &ROS, bool IsForDebug=false) const
Definition: AsmWriter.cpp:4883
iterator_range< op_iterator > operands()
Definition: Metadata.h:1826
op_range incoming_values()
static PassRegistry * getPassRegistry()
getPassRegistry - Access the global registry object, which is automatically initialized at applicatio...
static PointerType * get(Type *ElementType, unsigned AddressSpace)
This constructs a pointer to an object of the specified type in a numbered address space.
A set of analyses that are preserved following a run of a transformation pass.
Definition: Analysis.h:111
static PreservedAnalyses all()
Construct a special preserved set that preserves all passes.
Definition: Analysis.h:117
Simple wrapper around std::function<void(raw_ostream&)>.
Definition: Printable.h:38
This class represents a cast from a pointer to an integer.
Interface for looking up the initializer for a variable name, used by Init::resolveReferences.
Definition: Record.h:2212
Resume the propagation of an exception.
Value * getValue() const
Convenience accessor.
Return a value (possibly void), from a function.
This class represents a sign extension of integer types.
This class represents a cast from signed integer to floating point.
This class represents the LLVM 'select' instruction.
static const char * areInvalidOperands(Value *Cond, Value *True, Value *False)
Return a string if the specified operands are invalid for a select operation, otherwise return null.
This instruction constructs a fixed permutation of two input vectors.
static bool isValidOperands(const Value *V1, const Value *V2, const Value *Mask)
Return true if a shufflevector instruction can be formed with the specified operands.
static void getShuffleMask(const Constant *Mask, SmallVectorImpl< int > &Result)
Convert the input shuffle mask operand to a vector of integers.
A templated base class for SmallPtrSet which provides the typesafe interface that is common across al...
Definition: SmallPtrSet.h:346
size_type count(ConstPtrType Ptr) const
count - Return 1 if the specified pointer is in the set, 0 otherwise.
Definition: SmallPtrSet.h:435
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
Definition: SmallPtrSet.h:367
SmallPtrSet - This class implements a set which is optimized for holding SmallSize or less elements.
Definition: SmallPtrSet.h:502
SmallSet - This maintains a set of unique values, optimizing for the case when the set is small (less...
Definition: SmallSet.h:135
std::pair< const_iterator, bool > insert(const T &V)
insert - Insert an element into the set if it isn't already there.
Definition: SmallSet.h:179
SmallString - A SmallString is just a SmallVector with methods and accessors that make it work better...
Definition: SmallString.h:26
bool empty() const
Definition: SmallVector.h:94
size_t size() const
Definition: SmallVector.h:91
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
Definition: SmallVector.h:586
void reserve(size_type N)
Definition: SmallVector.h:676
iterator insert(iterator I, T &&Elt)
Definition: SmallVector.h:818
void resize(size_type N)
Definition: SmallVector.h:651
void push_back(const T &Elt)
Definition: SmallVector.h:426
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
Definition: SmallVector.h:1209
An instruction for storing to memory.
Definition: Instructions.h:290
StringMapEntry - This is used to represent one value that is inserted into a StringMap.
StringRef - Represent a constant reference to a string, i.e.
Definition: StringRef.h:50
bool getAsInteger(unsigned Radix, T &Result) const
Parse the current string as an integer of the specified radix.
Definition: StringRef.h:455
bool starts_with(StringRef Prefix) const
Check if this string starts with the given Prefix.
Definition: StringRef.h:250
bool contains(StringRef Other) const
Return true if the given string is a substring of *this, and false otherwise.
Definition: StringRef.h:409
static constexpr size_t npos
Definition: StringRef.h:52
Class to represent struct types.
Definition: DerivedTypes.h:216
unsigned getNumElements() const
Random access to the elements.
Definition: DerivedTypes.h:341
bool containsScalableVectorType(SmallPtrSetImpl< Type * > *Visited=nullptr) const
Returns true if this struct contains a scalable vector.
Definition: Type.cpp:388
Type * getTypeAtIndex(const Value *V) const
Given an index value into the type, return the type of the element.
Definition: Type.cpp:600
Multiway switch.
Verify that the TBAA Metadatas are valid.
Definition: Verifier.h:39
bool visitTBAAMetadata(Instruction &I, const MDNode *MD)
Visit an instruction and return true if it is valid, return false if an invalid TBAA is attached.
Definition: Verifier.cpp:7466
@ CanBeGlobal
This type may be used as the value type of a global variable.
Definition: DerivedTypes.h:771
TinyPtrVector - This class is specialized for cases where there are normally 0 or 1 element in a vect...
Definition: TinyPtrVector.h:29
unsigned size() const
Triple - Helper class for working with autoconf configuration names.
Definition: Triple.h:44
This class represents a truncation of integer types.
Twine - A lightweight data structure for efficiently representing the concatenation of temporary valu...
Definition: Twine.h:81
The instances of the Type class are immutable: once they are created, they are never changed.
Definition: Type.h:45
bool isVectorTy() const
True if this is an instance of VectorType.
Definition: Type.h:261
bool isArrayTy() const
True if this is an instance of ArrayType.
Definition: Type.h:248
bool isLabelTy() const
Return true if this is 'label'.
Definition: Type.h:215
bool isIntOrIntVectorTy() const
Return true if this is an integer type or a vector of integer types.
Definition: Type.h:230
bool isPointerTy() const
True if this is an instance of PointerType.
Definition: Type.h:251
unsigned getPointerAddressSpace() const
Get the address space of this pointer or pointer vector type.
unsigned getScalarSizeInBits() const LLVM_READONLY
If this is a vector type, return the getPrimitiveSizeInBits value for the element type.
bool isSized(SmallPtrSetImpl< Type * > *Visited=nullptr) const
Return true if it makes sense to take the size of this type.
Definition: Type.h:298
bool isFloatingPointTy() const
Return true if this is one of the floating-point types.
Definition: Type.h:184
bool isPtrOrPtrVectorTy() const
Return true if this is a pointer type or a vector of pointer types.
Definition: Type.h:258
bool isScalableTy() const
Return true if this is a type whose size is a known multiple of vscale.
bool canLosslesslyBitCastTo(Type *Ty) const
Return true if this type could be converted with a lossless BitCast to type 'Ty'.
bool isIntOrPtrTy() const
Return true if this is an integer type or a pointer type.
Definition: Type.h:239
bool isIntegerTy() const
True if this is an instance of IntegerType.
Definition: Type.h:224
bool isTokenTy() const
Return true if this is 'token'.
Definition: Type.h:221
bool isFPOrFPVectorTy() const
Return true if this is a FP type or a vector of FP.
Definition: Type.h:212
Type * getScalarType() const
If this is a vector type, return the element type, otherwise return 'this'.
Definition: Type.h:343
bool isMetadataTy() const
Return true if this is 'metadata'.
Definition: Type.h:218
This class represents a cast unsigned integer to floating point.
A Use represents the edge between a Value definition and its users.
Definition: Use.h:43
op_range operands()
Definition: User.h:242
Value * getOperand(unsigned i) const
Definition: User.h:169
unsigned getNumOperands() const
Definition: User.h:191
This class represents the va_arg llvm instruction, which returns an argument of the specified type gi...
This is the common base class for vector predication intrinsics.
Value wrapper in the Metadata hierarchy.
Definition: Metadata.h:450
Value * getValue() const
Definition: Metadata.h:490
LLVM Value Representation.
Definition: Value.h:74
iterator_range< user_iterator > materialized_users()
Definition: Value.h:415
Type * getType() const
All values are typed, get the type of this value.
Definition: Value.h:255
static constexpr uint64_t MaximumAlignment
Definition: Value.h:807
const Value * stripPointerCastsAndAliases() const
Strip off pointer casts, all-zero GEPs, address space casts, and aliases.
Definition: Value.cpp:698
const Value * stripInBoundsOffsets(function_ref< void(const Value *)> Func=[](const Value *) {}) const
Strip off pointer casts and inbounds GEPs.
Definition: Value.cpp:786
iterator_range< user_iterator > users()
Definition: Value.h:421
bool materialized_use_empty() const
Definition: Value.h:349
LLVMContext & getContext() const
All values hold a context through their type.
Definition: Value.cpp:1075
bool hasName() const
Definition: Value.h:261
StringRef getName() const
Return a constant reference to the value's name.
Definition: Value.cpp:309
Check a module for errors, and report separate error states for IR and debug info errors.
Definition: Verifier.h:107
Result run(Module &M, ModuleAnalysisManager &)
Definition: Verifier.cpp:7582
PreservedAnalyses run(Module &M, ModuleAnalysisManager &AM)
Definition: Verifier.cpp:7594
This class represents zero extension of integer types.
constexpr bool isNonZero() const
Definition: TypeSize.h:158
constexpr bool isScalable() const
Returns whether the quantity is scaled by a runtime quantity (vscale).
Definition: TypeSize.h:171
constexpr ScalarTy getKnownMinValue() const
Returns the minimum value this quantity can represent.
Definition: TypeSize.h:168
An efficient, type-erasing, non-owning reference to a callable.
const ParentTy * getParent() const
Definition: ilist_node.h:32
NodeTy * getNextNode()
Get the next node, or nullptr for the list tail.
Definition: ilist_node.h:353
This class implements an extremely fast bulk output stream that can only output to a stream.
Definition: raw_ostream.h:52
This file contains the declaration of the Comdat class, which represents a single COMDAT in LLVM.
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
constexpr char Args[]
Key for Kernel::Metadata::mArgs.
constexpr char Attrs[]
Key for Kernel::Metadata::mAttrs.
AttributeMask typeIncompatible(Type *Ty, AttributeSafetyKind ASK=ASK_ALL)
Which attributes cannot be applied to a type.
constexpr std::underlying_type_t< E > Mask()
Get a bitmask with 1s in all places up to the high-order bit of E's largest value.
Definition: BitmaskEnum.h:121
@ Entry
Definition: COFF.h:826
unsigned ID
LLVM IR allows to use arbitrary numbers as calling convention identifiers.
Definition: CallingConv.h:24
@ AMDGPU_CS
Used for Mesa/AMDPAL compute shaders.
Definition: CallingConv.h:197
@ AMDGPU_VS
Used for Mesa vertex shaders, or AMDPAL last shader stage before rasterization (vertex shader if tess...
Definition: CallingConv.h:188
@ AMDGPU_KERNEL
Used for AMDGPU code object kernels.
Definition: CallingConv.h:200
@ AnyReg
OBSOLETED - Used for stack based JavaScript calls.
Definition: CallingConv.h:60
@ AMDGPU_CS_ChainPreserve
Used on AMDGPUs to give the middle-end more control over argument placement.
Definition: CallingConv.h:249
@ AMDGPU_HS
Used for Mesa/AMDPAL hull shaders (= tessellation control shaders).
Definition: CallingConv.h:206
@ AMDGPU_GS
Used for Mesa/AMDPAL geometry shaders.
Definition: CallingConv.h:191
@ X86_INTR
x86 hardware interrupt context.
Definition: CallingConv.h:173
@ AMDGPU_CS_Chain
Used on AMDGPUs to give the middle-end more control over argument placement.
Definition: CallingConv.h:245
@ AMDGPU_PS
Used for Mesa/AMDPAL pixel shaders.
Definition: CallingConv.h:194
@ Cold
Attempts to make code in the caller as efficient as possible under the assumption that the call is no...
Definition: CallingConv.h:47
@ PTX_Device
Call to a PTX device function.
Definition: CallingConv.h:129
@ SPIR_KERNEL
Used for SPIR kernel functions.
Definition: CallingConv.h:144
@ Fast
Attempts to make calls as fast as possible (e.g.
Definition: CallingConv.h:41
@ Intel_OCL_BI
Used for Intel OpenCL built-ins.
Definition: CallingConv.h:147
@ Tail
Attemps to make calls as fast as possible while guaranteeing that tail call optimization can always b...
Definition: CallingConv.h:76
@ PTX_Kernel
Call to a PTX kernel. Passes all arguments in parameter space.
Definition: CallingConv.h:125
@ SwiftTail
This follows the Swift calling convention in how arguments are passed but guarantees tail calls will ...
Definition: CallingConv.h:87
@ C
The default llvm calling convention, compatible with C.
Definition: CallingConv.h:34
MatchIntrinsicTypesResult matchIntrinsicSignature(FunctionType *FTy, ArrayRef< IITDescriptor > &Infos, SmallVectorImpl< Type * > &ArgTys)
Match the specified function type with the type constraints specified by the .td file.
Definition: Function.cpp:1794
void getIntrinsicInfoTableEntries(ID id, SmallVectorImpl< IITDescriptor > &T)
Return the IIT table descriptor for the specified intrinsic into an array of IITDescriptors.
Definition: Function.cpp:1382
@ MatchIntrinsicTypes_NoMatchRet
Definition: Intrinsics.h:218
@ MatchIntrinsicTypes_NoMatchArg
Definition: Intrinsics.h:219
bool hasConstrainedFPRoundingModeOperand(ID QID)
Returns true if the intrinsic ID is for one of the "Constrained Floating-Point Intrinsics" that take ...
Definition: Function.cpp:1571
StringRef getName(ID id)
Return the LLVM name for an intrinsic, such as "llvm.ppc.altivec.lvx".
Definition: Function.cpp:1096
static const int NoAliasScopeDeclScopeArg
Definition: Intrinsics.h:37
bool matchIntrinsicVarArg(bool isVarArg, ArrayRef< IITDescriptor > &Infos)
Verify if the intrinsic has variable arguments.
Definition: Function.cpp:1820
Flag
These should be considered private to the implementation of the MCInstrDesc class.
Definition: MCInstrDesc.h:148
@ System
Synchronized with respect to all concurrently executing threads.
Definition: LLVMContext.h:57
std::optional< VFInfo > tryDemangleForVFABI(StringRef MangledName, const FunctionType *FTy)
Function to construct a VFInfo out of a mangled names in the following format:
@ CE
Windows NT (Windows on ARM)
AssignmentInstRange getAssignmentInsts(DIAssignID *ID)
Return a range of instructions (typically just one) that have ID as an attachment.
Definition: DebugInfo.cpp:1796
initializer< Ty > init(const Ty &Val)
Definition: CommandLine.h:443
@ DW_MACINFO_undef
Definition: Dwarf.h:790
@ DW_MACINFO_start_file
Definition: Dwarf.h:791
@ DW_MACINFO_define
Definition: Dwarf.h:789
This is an optimization pass for GlobalISel generic memory operations.
Definition: AddressRanges.h:18
@ Low
Lower the current thread's priority such that it does not affect foreground tasks significantly.
@ Offset
Definition: DWP.cpp:480
bool all_of(R &&range, UnaryPredicate P)
Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly.
Definition: STLExtras.h:1722
auto size(R &&Range, std::enable_if_t< std::is_base_of< std::random_access_iterator_tag, typename std::iterator_traits< decltype(Range.begin())>::iterator_category >::value, void > *=nullptr)
Get the size of a range.
Definition: STLExtras.h:1680
bool canInstructionHaveMMRAs(const Instruction &I)
@ Write
Definition: CodeGenData.h:103
unsigned getBranchWeightOffset(const MDNode *ProfileData)
Return the offset to the first branch weight data.
auto enumerate(FirstRange &&First, RestRanges &&...Rest)
Given two or more input ranges, returns a new range whose values are tuples (A, B,...
Definition: STLExtras.h:2431
bool verifyFunction(const Function &F, raw_ostream *OS=nullptr)
Check a function for errors, useful for use when debugging a pass.
Definition: Verifier.cpp:7151
AllocFnKind
Definition: Attributes.h:49
testing::Matcher< const detail::ErrorHolder & > Failed()
Definition: Error.h:198
void initializeVerifierLegacyPassPass(PassRegistry &)
iterator_range< T > make_range(T x, T y)
Convenience function for iterating over sub-ranges.
void append_range(Container &C, Range &&R)
Wrapper function to append range R to container C.
Definition: STLExtras.h:2098
DenseMap< BasicBlock *, ColorVector > colorEHFunclets(Function &F)
If an EH funclet personality is in use (see isFuncletEHPersonality), this will recompute which blocks...
constexpr bool isPowerOf2_64(uint64_t Value)
Return true if the argument is a power of two > 0 (64 bit edition.)
Definition: MathExtras.h:296
bool isScopedEHPersonality(EHPersonality Pers)
Returns true if this personality uses scope-style EH IR instructions: catchswitch,...
constexpr bool isPowerOf2_32(uint32_t Value)
Return true if the argument is a power of two > 0.
Definition: MathExtras.h:291
bool isModSet(const ModRefInfo MRI)
Definition: ModRef.h:48
void sort(IteratorTy Start, IteratorTy End)
Definition: STLExtras.h:1647
raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
Definition: Debug.cpp:163
void report_fatal_error(Error Err, bool gen_crash_diag=true)
Report a serious error, calling any installed error handler.
Definition: Error.cpp:167
EHPersonality classifyEHPersonality(const Value *Pers)
See if the given exception handling personality function is one that we understand.
raw_fd_ostream & errs()
This returns a reference to a raw_ostream for standard error.
unsigned getNumBranchWeights(const MDNode &ProfileData)
AtomicOrdering
Atomic ordering for LLVM's memory model.
@ ArgMem
Access to memory via argument pointers.
@ First
Helpers to iterate all locations in the MemoryEffectsBase class.
FunctionPass * createVerifierPass(bool FatalErrors=true)
Definition: Verifier.cpp:7577
@ Dynamic
Denotes mode unknown at compile time.
@ MaskAll
A bitmask that includes all valid flags.
constexpr unsigned BitWidth
Definition: BitmaskEnum.h:191
std::optional< RoundingMode > convertStrToRoundingMode(StringRef)
Returns a valid RoundingMode enumerator when given a string that is valid as input in constrained int...
Definition: FPEnv.cpp:24
std::unique_ptr< GCStrategy > getGCStrategy(const StringRef Name)
Lookup the GCStrategy object associated with the given gc name.
Definition: GCStrategy.cpp:24
auto predecessors(const MachineBasicBlock *BB)
bool is_contained(R &&Range, const E &Element)
Returns true if Element is found in Range.
Definition: STLExtras.h:1886
bool pred_empty(const BasicBlock *BB)
Definition: CFG.h:118
bool verifyModule(const Module &M, raw_ostream *OS=nullptr, bool *BrokenDebugInfo=nullptr)
Check a module for errors.
Definition: Verifier.cpp:7162
#define N
static const fltSemantics & IEEEsingle() LLVM_READNONE
Definition: APFloat.cpp:281
This struct is a compact representation of a valid (non-zero power of two) alignment.
Definition: Alignment.h:39
uint64_t value() const
This is a hole in the type system and should not be abused.
Definition: Alignment.h:85
A special type used by analysis passes to provide an address that identifies that particular analysis...
Definition: Analysis.h:28
Description of the encoding of one expression Op.
This struct is a compact representation of a valid (power of two) or undefined (0) alignment.
Definition: Alignment.h:117
A lightweight accessor for an operand bundle meant to be passed around by value.
Definition: InstrTypes.h:1131
uint32_t getTagID() const
Return the tag of this operand bundle as an integer.
Definition: InstrTypes.h:1159
ArrayRef< Use > Inputs
Definition: InstrTypes.h:1132
void DebugInfoCheckFailed(const Twine &Message)
A debug info check failed.
Definition: Verifier.cpp:304
VerifierSupport(raw_ostream *OS, const Module &M)
Definition: Verifier.cpp:156
bool Broken
Track the brokenness of the module while recursively visiting.
Definition: Verifier.cpp:150
raw_ostream * OS
Definition: Verifier.cpp:142
void CheckFailed(const Twine &Message, const T1 &V1, const Ts &... Vs)
A check failed (with values to print).
Definition: Verifier.cpp:297
bool BrokenDebugInfo
Broken debug info can be "recovered" from by stripping the debug info.
Definition: Verifier.cpp:152
LLVMContext & Context
Definition: Verifier.cpp:147
bool TreatBrokenDebugInfoAsError
Whether to treat broken debug info as an error.
Definition: Verifier.cpp:154
void CheckFailed(const Twine &Message)
A check failed, so printout out the condition and the message.
Definition: Verifier.cpp:286
const Module & M
Definition: Verifier.cpp:143
const DataLayout & DL
Definition: Verifier.cpp:146
void DebugInfoCheckFailed(const Twine &Message, const T1 &V1, const Ts &... Vs)
A debug info check failed (with values to print).
Definition: Verifier.cpp:313
ModuleSlotTracker MST
Definition: Verifier.cpp:144