LLVM 22.0.0git
Verifier.cpp
Go to the documentation of this file.
1//===-- Verifier.cpp - Implement the Module Verifier -----------------------==//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file defines the function verifier interface, that can be used for some
10// basic correctness checking of input to the system.
11//
12// Note that this does not provide full `Java style' security and verifications,
13// instead it just tries to ensure that code is well-formed.
14//
15// * Both of a binary operator's parameters are of the same type
16// * Verify that the indices of mem access instructions match other operands
17// * Verify that arithmetic and other things are only performed on first-class
18// types. Verify that shifts & logicals only happen on integrals f.e.
19// * All of the constants in a switch statement are of the correct type
20// * The code is in valid SSA form
21// * It should be illegal to put a label into any other type (like a structure)
22// or to return one. [except constant arrays!]
23// * Only phi nodes can be self referential: 'add i32 %0, %0 ; <int>:0' is bad
24// * PHI nodes must have an entry for each predecessor, with no extras.
25// * PHI nodes must be the first thing in a basic block, all grouped together
26// * All basic blocks should only end with terminator insts, not contain them
27// * The entry node to a function must not have predecessors
28// * All Instructions must be embedded into a basic block
29// * Functions cannot take a void-typed parameter
30// * Verify that a function's argument list agrees with it's declared type.
31// * It is illegal to specify a name for a void value.
32// * It is illegal to have a internal global value with no initializer
33// * It is illegal to have a ret instruction that returns a value that does not
34// agree with the function return value type.
35// * Function call argument types match the function prototype
36// * A landing pad is defined by a landingpad instruction, and can be jumped to
37// only by the unwind edge of an invoke instruction.
38// * A landingpad instruction must be the first non-PHI instruction in the
39// block.
40// * Landingpad instructions must be in a function with a personality function.
41// * Convergence control intrinsics are introduced in ConvergentOperations.rst.
42// The applied restrictions are too numerous to list here.
43// * The convergence entry intrinsic and the loop heart must be the first
44// non-PHI instruction in their respective block. This does not conflict with
45// the landing pads, since these two kinds cannot occur in the same block.
46// * All other things that are tested by asserts spread about the code...
47//
48//===----------------------------------------------------------------------===//
49
50#include "llvm/IR/Verifier.h"
51#include "llvm/ADT/APFloat.h"
52#include "llvm/ADT/APInt.h"
53#include "llvm/ADT/ArrayRef.h"
54#include "llvm/ADT/DenseMap.h"
55#include "llvm/ADT/MapVector.h"
56#include "llvm/ADT/STLExtras.h"
60#include "llvm/ADT/StringRef.h"
61#include "llvm/ADT/Twine.h"
63#include "llvm/IR/Argument.h"
65#include "llvm/IR/Attributes.h"
66#include "llvm/IR/BasicBlock.h"
67#include "llvm/IR/CFG.h"
68#include "llvm/IR/CallingConv.h"
69#include "llvm/IR/Comdat.h"
70#include "llvm/IR/Constant.h"
73#include "llvm/IR/Constants.h"
75#include "llvm/IR/DataLayout.h"
76#include "llvm/IR/DebugInfo.h"
78#include "llvm/IR/DebugLoc.h"
80#include "llvm/IR/Dominators.h"
82#include "llvm/IR/Function.h"
83#include "llvm/IR/GCStrategy.h"
84#include "llvm/IR/GlobalAlias.h"
85#include "llvm/IR/GlobalValue.h"
87#include "llvm/IR/InlineAsm.h"
88#include "llvm/IR/InstVisitor.h"
89#include "llvm/IR/InstrTypes.h"
90#include "llvm/IR/Instruction.h"
93#include "llvm/IR/Intrinsics.h"
94#include "llvm/IR/IntrinsicsAArch64.h"
95#include "llvm/IR/IntrinsicsAMDGPU.h"
96#include "llvm/IR/IntrinsicsARM.h"
97#include "llvm/IR/IntrinsicsNVPTX.h"
98#include "llvm/IR/IntrinsicsWebAssembly.h"
99#include "llvm/IR/LLVMContext.h"
101#include "llvm/IR/Metadata.h"
102#include "llvm/IR/Module.h"
104#include "llvm/IR/PassManager.h"
106#include "llvm/IR/Statepoint.h"
107#include "llvm/IR/Type.h"
108#include "llvm/IR/Use.h"
109#include "llvm/IR/User.h"
111#include "llvm/IR/Value.h"
113#include "llvm/Pass.h"
117#include "llvm/Support/Casting.h"
121#include "llvm/Support/ModRef.h"
123#include <algorithm>
124#include <cassert>
125#include <cstdint>
126#include <memory>
127#include <optional>
128#include <string>
129#include <utility>
130
131using namespace llvm;
132
134 "verify-noalias-scope-decl-dom", cl::Hidden, cl::init(false),
135 cl::desc("Ensure that llvm.experimental.noalias.scope.decl for identical "
136 "scopes are not dominating"));
137
138namespace llvm {
139
142 const Module &M;
144 const Triple &TT;
147
148 /// Track the brokenness of the module while recursively visiting.
149 bool Broken = false;
150 /// Broken debug info can be "recovered" from by stripping the debug info.
151 bool BrokenDebugInfo = false;
152 /// Whether to treat broken debug info as an error.
154
156 : OS(OS), M(M), MST(&M), TT(M.getTargetTriple()), DL(M.getDataLayout()),
157 Context(M.getContext()) {}
158
159private:
160 void Write(const Module *M) {
161 *OS << "; ModuleID = '" << M->getModuleIdentifier() << "'\n";
162 }
163
164 void Write(const Value *V) {
165 if (V)
166 Write(*V);
167 }
168
169 void Write(const Value &V) {
170 if (isa<Instruction>(V)) {
171 V.print(*OS, MST);
172 *OS << '\n';
173 } else {
174 V.printAsOperand(*OS, true, MST);
175 *OS << '\n';
176 }
177 }
178
179 void Write(const DbgRecord *DR) {
180 if (DR) {
181 DR->print(*OS, MST, false);
182 *OS << '\n';
183 }
184 }
185
187 switch (Type) {
189 *OS << "value";
190 break;
192 *OS << "declare";
193 break;
195 *OS << "assign";
196 break;
198 *OS << "end";
199 break;
201 *OS << "any";
202 break;
203 };
204 }
205
206 void Write(const Metadata *MD) {
207 if (!MD)
208 return;
209 MD->print(*OS, MST, &M);
210 *OS << '\n';
211 }
212
213 template <class T> void Write(const MDTupleTypedArrayWrapper<T> &MD) {
214 Write(MD.get());
215 }
216
217 void Write(const NamedMDNode *NMD) {
218 if (!NMD)
219 return;
220 NMD->print(*OS, MST);
221 *OS << '\n';
222 }
223
224 void Write(Type *T) {
225 if (!T)
226 return;
227 *OS << ' ' << *T;
228 }
229
230 void Write(const Comdat *C) {
231 if (!C)
232 return;
233 *OS << *C;
234 }
235
236 void Write(const APInt *AI) {
237 if (!AI)
238 return;
239 *OS << *AI << '\n';
240 }
241
242 void Write(const unsigned i) { *OS << i << '\n'; }
243
244 // NOLINTNEXTLINE(readability-identifier-naming)
245 void Write(const Attribute *A) {
246 if (!A)
247 return;
248 *OS << A->getAsString() << '\n';
249 }
250
251 // NOLINTNEXTLINE(readability-identifier-naming)
252 void Write(const AttributeSet *AS) {
253 if (!AS)
254 return;
255 *OS << AS->getAsString() << '\n';
256 }
257
258 // NOLINTNEXTLINE(readability-identifier-naming)
259 void Write(const AttributeList *AL) {
260 if (!AL)
261 return;
262 AL->print(*OS);
263 }
264
265 void Write(Printable P) { *OS << P << '\n'; }
266
267 template <typename T> void Write(ArrayRef<T> Vs) {
268 for (const T &V : Vs)
269 Write(V);
270 }
271
272 template <typename T1, typename... Ts>
273 void WriteTs(const T1 &V1, const Ts &... Vs) {
274 Write(V1);
275 WriteTs(Vs...);
276 }
277
278 template <typename... Ts> void WriteTs() {}
279
280public:
281 /// A check failed, so printout out the condition and the message.
282 ///
283 /// This provides a nice place to put a breakpoint if you want to see why
284 /// something is not correct.
285 void CheckFailed(const Twine &Message) {
286 if (OS)
287 *OS << Message << '\n';
288 Broken = true;
289 }
290
291 /// A check failed (with values to print).
292 ///
293 /// This calls the Message-only version so that the above is easier to set a
294 /// breakpoint on.
295 template <typename T1, typename... Ts>
296 void CheckFailed(const Twine &Message, const T1 &V1, const Ts &... Vs) {
297 CheckFailed(Message);
298 if (OS)
299 WriteTs(V1, Vs...);
300 }
301
302 /// A debug info check failed.
303 void DebugInfoCheckFailed(const Twine &Message) {
304 if (OS)
305 *OS << Message << '\n';
307 BrokenDebugInfo = true;
308 }
309
310 /// A debug info check failed (with values to print).
311 template <typename T1, typename... Ts>
312 void DebugInfoCheckFailed(const Twine &Message, const T1 &V1,
313 const Ts &... Vs) {
314 DebugInfoCheckFailed(Message);
315 if (OS)
316 WriteTs(V1, Vs...);
317 }
318};
319
320} // namespace llvm
321
322namespace {
323
324class Verifier : public InstVisitor<Verifier>, VerifierSupport {
325 friend class InstVisitor<Verifier>;
326 DominatorTree DT;
327
328 /// When verifying a basic block, keep track of all of the
329 /// instructions we have seen so far.
330 ///
331 /// This allows us to do efficient dominance checks for the case when an
332 /// instruction has an operand that is an instruction in the same block.
333 SmallPtrSet<Instruction *, 16> InstsInThisBlock;
334
335 /// Keep track of the metadata nodes that have been checked already.
337
338 /// Keep track which DISubprogram is attached to which function.
340
341 /// Track all DICompileUnits visited.
343
344 /// The result type for a landingpad.
345 Type *LandingPadResultTy;
346
347 /// Whether we've seen a call to @llvm.localescape in this function
348 /// already.
349 bool SawFrameEscape;
350
351 /// Whether the current function has a DISubprogram attached to it.
352 bool HasDebugInfo = false;
353
354 /// Stores the count of how many objects were passed to llvm.localescape for a
355 /// given function and the largest index passed to llvm.localrecover.
357
358 // Maps catchswitches and cleanuppads that unwind to siblings to the
359 // terminators that indicate the unwind, used to detect cycles therein.
361
362 /// Cache which blocks are in which funclet, if an EH funclet personality is
363 /// in use. Otherwise empty.
364 DenseMap<BasicBlock *, ColorVector> BlockEHFuncletColors;
365
366 /// Cache of constants visited in search of ConstantExprs.
367 SmallPtrSet<const Constant *, 32> ConstantExprVisited;
368
369 /// Cache of declarations of the llvm.experimental.deoptimize.<ty> intrinsic.
370 SmallVector<const Function *, 4> DeoptimizeDeclarations;
371
372 /// Cache of attribute lists verified.
373 SmallPtrSet<const void *, 32> AttributeListsVisited;
374
375 // Verify that this GlobalValue is only used in this module.
376 // This map is used to avoid visiting uses twice. We can arrive at a user
377 // twice, if they have multiple operands. In particular for very large
378 // constant expressions, we can arrive at a particular user many times.
379 SmallPtrSet<const Value *, 32> GlobalValueVisited;
380
381 // Keeps track of duplicate function argument debug info.
383
384 TBAAVerifier TBAAVerifyHelper;
385 ConvergenceVerifier ConvergenceVerifyHelper;
386
387 SmallVector<IntrinsicInst *, 4> NoAliasScopeDecls;
388
389 void checkAtomicMemAccessSize(Type *Ty, const Instruction *I);
390
391public:
392 explicit Verifier(raw_ostream *OS, bool ShouldTreatBrokenDebugInfoAsError,
393 const Module &M)
394 : VerifierSupport(OS, M), LandingPadResultTy(nullptr),
395 SawFrameEscape(false), TBAAVerifyHelper(this) {
396 TreatBrokenDebugInfoAsError = ShouldTreatBrokenDebugInfoAsError;
397 }
398
399 bool hasBrokenDebugInfo() const { return BrokenDebugInfo; }
400
401 bool verify(const Function &F) {
402 assert(F.getParent() == &M &&
403 "An instance of this class only works with a specific module!");
404
405 // First ensure the function is well-enough formed to compute dominance
406 // information, and directly compute a dominance tree. We don't rely on the
407 // pass manager to provide this as it isolates us from a potentially
408 // out-of-date dominator tree and makes it significantly more complex to run
409 // this code outside of a pass manager.
410 // FIXME: It's really gross that we have to cast away constness here.
411 if (!F.empty())
412 DT.recalculate(const_cast<Function &>(F));
413
414 for (const BasicBlock &BB : F) {
415 if (!BB.empty() && BB.back().isTerminator())
416 continue;
417
418 if (OS) {
419 *OS << "Basic Block in function '" << F.getName()
420 << "' does not have terminator!\n";
421 BB.printAsOperand(*OS, true, MST);
422 *OS << "\n";
423 }
424 return false;
425 }
426
427 auto FailureCB = [this](const Twine &Message) {
428 this->CheckFailed(Message);
429 };
430 ConvergenceVerifyHelper.initialize(OS, FailureCB, F);
431
432 Broken = false;
433 // FIXME: We strip const here because the inst visitor strips const.
434 visit(const_cast<Function &>(F));
435 verifySiblingFuncletUnwinds();
436
437 if (ConvergenceVerifyHelper.sawTokens())
438 ConvergenceVerifyHelper.verify(DT);
439
440 InstsInThisBlock.clear();
441 DebugFnArgs.clear();
442 LandingPadResultTy = nullptr;
443 SawFrameEscape = false;
444 SiblingFuncletInfo.clear();
445 verifyNoAliasScopeDecl();
446 NoAliasScopeDecls.clear();
447
448 return !Broken;
449 }
450
451 /// Verify the module that this instance of \c Verifier was initialized with.
452 bool verify() {
453 Broken = false;
454
455 // Collect all declarations of the llvm.experimental.deoptimize intrinsic.
456 for (const Function &F : M)
457 if (F.getIntrinsicID() == Intrinsic::experimental_deoptimize)
458 DeoptimizeDeclarations.push_back(&F);
459
460 // Now that we've visited every function, verify that we never asked to
461 // recover a frame index that wasn't escaped.
462 verifyFrameRecoverIndices();
463 for (const GlobalVariable &GV : M.globals())
464 visitGlobalVariable(GV);
465
466 for (const GlobalAlias &GA : M.aliases())
467 visitGlobalAlias(GA);
468
469 for (const GlobalIFunc &GI : M.ifuncs())
470 visitGlobalIFunc(GI);
471
472 for (const NamedMDNode &NMD : M.named_metadata())
473 visitNamedMDNode(NMD);
474
475 for (const StringMapEntry<Comdat> &SMEC : M.getComdatSymbolTable())
476 visitComdat(SMEC.getValue());
477
478 visitModuleFlags();
479 visitModuleIdents();
480 visitModuleCommandLines();
481
482 verifyCompileUnits();
483
484 verifyDeoptimizeCallingConvs();
485 DISubprogramAttachments.clear();
486 return !Broken;
487 }
488
489private:
490 /// Whether a metadata node is allowed to be, or contain, a DILocation.
491 enum class AreDebugLocsAllowed { No, Yes };
492
493 /// Metadata that should be treated as a range, with slightly different
494 /// requirements.
495 enum class RangeLikeMetadataKind {
496 Range, // MD_range
497 AbsoluteSymbol, // MD_absolute_symbol
498 NoaliasAddrspace // MD_noalias_addrspace
499 };
500
501 // Verification methods...
502 void visitGlobalValue(const GlobalValue &GV);
503 void visitGlobalVariable(const GlobalVariable &GV);
504 void visitGlobalAlias(const GlobalAlias &GA);
505 void visitGlobalIFunc(const GlobalIFunc &GI);
506 void visitAliaseeSubExpr(const GlobalAlias &A, const Constant &C);
507 void visitAliaseeSubExpr(SmallPtrSetImpl<const GlobalAlias *> &Visited,
508 const GlobalAlias &A, const Constant &C);
509 void visitNamedMDNode(const NamedMDNode &NMD);
510 void visitMDNode(const MDNode &MD, AreDebugLocsAllowed AllowLocs);
511 void visitMetadataAsValue(const MetadataAsValue &MD, Function *F);
512 void visitValueAsMetadata(const ValueAsMetadata &MD, Function *F);
513 void visitDIArgList(const DIArgList &AL, Function *F);
514 void visitComdat(const Comdat &C);
515 void visitModuleIdents();
516 void visitModuleCommandLines();
517 void visitModuleFlags();
518 void visitModuleFlag(const MDNode *Op,
520 SmallVectorImpl<const MDNode *> &Requirements);
521 void visitModuleFlagCGProfileEntry(const MDOperand &MDO);
522 void visitFunction(const Function &F);
523 void visitBasicBlock(BasicBlock &BB);
524 void verifyRangeLikeMetadata(const Value &V, const MDNode *Range, Type *Ty,
525 RangeLikeMetadataKind Kind);
526 void visitRangeMetadata(Instruction &I, MDNode *Range, Type *Ty);
527 void visitNoaliasAddrspaceMetadata(Instruction &I, MDNode *Range, Type *Ty);
528 void visitDereferenceableMetadata(Instruction &I, MDNode *MD);
529 void visitProfMetadata(Instruction &I, MDNode *MD);
530 void visitCallStackMetadata(MDNode *MD);
531 void visitMemProfMetadata(Instruction &I, MDNode *MD);
532 void visitCallsiteMetadata(Instruction &I, MDNode *MD);
533 void visitCalleeTypeMetadata(Instruction &I, MDNode *MD);
534 void visitDIAssignIDMetadata(Instruction &I, MDNode *MD);
535 void visitMMRAMetadata(Instruction &I, MDNode *MD);
536 void visitAnnotationMetadata(MDNode *Annotation);
537 void visitAliasScopeMetadata(const MDNode *MD);
538 void visitAliasScopeListMetadata(const MDNode *MD);
539 void visitAccessGroupMetadata(const MDNode *MD);
540
541 template <class Ty> bool isValidMetadataArray(const MDTuple &N);
542#define HANDLE_SPECIALIZED_MDNODE_LEAF(CLASS) void visit##CLASS(const CLASS &N);
543#include "llvm/IR/Metadata.def"
544 void visitDIScope(const DIScope &N);
545 void visitDIVariable(const DIVariable &N);
546 void visitDILexicalBlockBase(const DILexicalBlockBase &N);
547 void visitDITemplateParameter(const DITemplateParameter &N);
548
549 void visitTemplateParams(const MDNode &N, const Metadata &RawParams);
550
551 void visit(DbgLabelRecord &DLR);
552 void visit(DbgVariableRecord &DVR);
553 // InstVisitor overrides...
555 void visitDbgRecords(Instruction &I);
556 void visit(Instruction &I);
557
558 void visitTruncInst(TruncInst &I);
559 void visitZExtInst(ZExtInst &I);
560 void visitSExtInst(SExtInst &I);
561 void visitFPTruncInst(FPTruncInst &I);
562 void visitFPExtInst(FPExtInst &I);
563 void visitFPToUIInst(FPToUIInst &I);
564 void visitFPToSIInst(FPToSIInst &I);
565 void visitUIToFPInst(UIToFPInst &I);
566 void visitSIToFPInst(SIToFPInst &I);
567 void visitIntToPtrInst(IntToPtrInst &I);
568 void checkPtrToAddr(Type *SrcTy, Type *DestTy, const Value &V);
569 void visitPtrToAddrInst(PtrToAddrInst &I);
570 void visitPtrToIntInst(PtrToIntInst &I);
571 void visitBitCastInst(BitCastInst &I);
572 void visitAddrSpaceCastInst(AddrSpaceCastInst &I);
573 void visitPHINode(PHINode &PN);
574 void visitCallBase(CallBase &Call);
575 void visitUnaryOperator(UnaryOperator &U);
576 void visitBinaryOperator(BinaryOperator &B);
577 void visitICmpInst(ICmpInst &IC);
578 void visitFCmpInst(FCmpInst &FC);
579 void visitExtractElementInst(ExtractElementInst &EI);
580 void visitInsertElementInst(InsertElementInst &EI);
581 void visitShuffleVectorInst(ShuffleVectorInst &EI);
582 void visitVAArgInst(VAArgInst &VAA) { visitInstruction(VAA); }
583 void visitCallInst(CallInst &CI);
584 void visitInvokeInst(InvokeInst &II);
585 void visitGetElementPtrInst(GetElementPtrInst &GEP);
586 void visitLoadInst(LoadInst &LI);
587 void visitStoreInst(StoreInst &SI);
588 void verifyDominatesUse(Instruction &I, unsigned i);
589 void visitInstruction(Instruction &I);
590 void visitTerminator(Instruction &I);
591 void visitBranchInst(BranchInst &BI);
592 void visitReturnInst(ReturnInst &RI);
593 void visitSwitchInst(SwitchInst &SI);
594 void visitIndirectBrInst(IndirectBrInst &BI);
595 void visitCallBrInst(CallBrInst &CBI);
596 void visitSelectInst(SelectInst &SI);
597 void visitUserOp1(Instruction &I);
598 void visitUserOp2(Instruction &I) { visitUserOp1(I); }
599 void visitIntrinsicCall(Intrinsic::ID ID, CallBase &Call);
600 void visitConstrainedFPIntrinsic(ConstrainedFPIntrinsic &FPI);
601 void visitVPIntrinsic(VPIntrinsic &VPI);
602 void visitDbgLabelIntrinsic(StringRef Kind, DbgLabelInst &DLI);
603 void visitAtomicCmpXchgInst(AtomicCmpXchgInst &CXI);
604 void visitAtomicRMWInst(AtomicRMWInst &RMWI);
605 void visitFenceInst(FenceInst &FI);
606 void visitAllocaInst(AllocaInst &AI);
607 void visitExtractValueInst(ExtractValueInst &EVI);
608 void visitInsertValueInst(InsertValueInst &IVI);
609 void visitEHPadPredecessors(Instruction &I);
610 void visitLandingPadInst(LandingPadInst &LPI);
611 void visitResumeInst(ResumeInst &RI);
612 void visitCatchPadInst(CatchPadInst &CPI);
613 void visitCatchReturnInst(CatchReturnInst &CatchReturn);
614 void visitCleanupPadInst(CleanupPadInst &CPI);
615 void visitFuncletPadInst(FuncletPadInst &FPI);
616 void visitCatchSwitchInst(CatchSwitchInst &CatchSwitch);
617 void visitCleanupReturnInst(CleanupReturnInst &CRI);
618
619 void verifySwiftErrorCall(CallBase &Call, const Value *SwiftErrorVal);
620 void verifySwiftErrorValue(const Value *SwiftErrorVal);
621 void verifyTailCCMustTailAttrs(const AttrBuilder &Attrs, StringRef Context);
622 void verifyMustTailCall(CallInst &CI);
623 bool verifyAttributeCount(AttributeList Attrs, unsigned Params);
624 void verifyAttributeTypes(AttributeSet Attrs, const Value *V);
625 void verifyParameterAttrs(AttributeSet Attrs, Type *Ty, const Value *V);
626 void checkUnsignedBaseTenFuncAttr(AttributeList Attrs, StringRef Attr,
627 const Value *V);
628 void verifyFunctionAttrs(FunctionType *FT, AttributeList Attrs,
629 const Value *V, bool IsIntrinsic, bool IsInlineAsm);
630 void verifyFunctionMetadata(ArrayRef<std::pair<unsigned, MDNode *>> MDs);
631
632 void visitConstantExprsRecursively(const Constant *EntryC);
633 void visitConstantExpr(const ConstantExpr *CE);
634 void visitConstantPtrAuth(const ConstantPtrAuth *CPA);
635 void verifyInlineAsmCall(const CallBase &Call);
636 void verifyStatepoint(const CallBase &Call);
637 void verifyFrameRecoverIndices();
638 void verifySiblingFuncletUnwinds();
639
640 void verifyFragmentExpression(const DbgVariableRecord &I);
641 template <typename ValueOrMetadata>
642 void verifyFragmentExpression(const DIVariable &V,
644 ValueOrMetadata *Desc);
645 void verifyFnArgs(const DbgVariableRecord &DVR);
646 void verifyNotEntryValue(const DbgVariableRecord &I);
647
648 /// Module-level debug info verification...
649 void verifyCompileUnits();
650
651 /// Module-level verification that all @llvm.experimental.deoptimize
652 /// declarations share the same calling convention.
653 void verifyDeoptimizeCallingConvs();
654
655 void verifyAttachedCallBundle(const CallBase &Call,
656 const OperandBundleUse &BU);
657
658 /// Verify the llvm.experimental.noalias.scope.decl declarations
659 void verifyNoAliasScopeDecl();
660};
661
662} // end anonymous namespace
663
664/// We know that cond should be true, if not print an error message.
665#define Check(C, ...) \
666 do { \
667 if (!(C)) { \
668 CheckFailed(__VA_ARGS__); \
669 return; \
670 } \
671 } while (false)
672
673/// We know that a debug info condition should be true, if not print
674/// an error message.
675#define CheckDI(C, ...) \
676 do { \
677 if (!(C)) { \
678 DebugInfoCheckFailed(__VA_ARGS__); \
679 return; \
680 } \
681 } while (false)
682
683void Verifier::visitDbgRecords(Instruction &I) {
684 if (!I.DebugMarker)
685 return;
686 CheckDI(I.DebugMarker->MarkedInstr == &I,
687 "Instruction has invalid DebugMarker", &I);
688 CheckDI(!isa<PHINode>(&I) || !I.hasDbgRecords(),
689 "PHI Node must not have any attached DbgRecords", &I);
690 for (DbgRecord &DR : I.getDbgRecordRange()) {
691 CheckDI(DR.getMarker() == I.DebugMarker,
692 "DbgRecord had invalid DebugMarker", &I, &DR);
693 if (auto *Loc =
694 dyn_cast_or_null<DILocation>(DR.getDebugLoc().getAsMDNode()))
695 visitMDNode(*Loc, AreDebugLocsAllowed::Yes);
696 if (auto *DVR = dyn_cast<DbgVariableRecord>(&DR)) {
697 visit(*DVR);
698 // These have to appear after `visit` for consistency with existing
699 // intrinsic behaviour.
700 verifyFragmentExpression(*DVR);
701 verifyNotEntryValue(*DVR);
702 } else if (auto *DLR = dyn_cast<DbgLabelRecord>(&DR)) {
703 visit(*DLR);
704 }
705 }
706}
707
708void Verifier::visit(Instruction &I) {
709 visitDbgRecords(I);
710 for (unsigned i = 0, e = I.getNumOperands(); i != e; ++i)
711 Check(I.getOperand(i) != nullptr, "Operand is null", &I);
713}
714
715// Helper to iterate over indirect users. By returning false, the callback can ask to stop traversing further.
716static void forEachUser(const Value *User,
718 llvm::function_ref<bool(const Value *)> Callback) {
719 if (!Visited.insert(User).second)
720 return;
721
723 while (!WorkList.empty()) {
724 const Value *Cur = WorkList.pop_back_val();
725 if (!Visited.insert(Cur).second)
726 continue;
727 if (Callback(Cur))
728 append_range(WorkList, Cur->materialized_users());
729 }
730}
731
732void Verifier::visitGlobalValue(const GlobalValue &GV) {
734 "Global is external, but doesn't have external or weak linkage!", &GV);
735
736 if (const GlobalObject *GO = dyn_cast<GlobalObject>(&GV)) {
737 if (const MDNode *Associated =
738 GO->getMetadata(LLVMContext::MD_associated)) {
739 Check(Associated->getNumOperands() == 1,
740 "associated metadata must have one operand", &GV, Associated);
741 const Metadata *Op = Associated->getOperand(0).get();
742 Check(Op, "associated metadata must have a global value", GO, Associated);
743
744 const auto *VM = dyn_cast_or_null<ValueAsMetadata>(Op);
745 Check(VM, "associated metadata must be ValueAsMetadata", GO, Associated);
746 if (VM) {
747 Check(isa<PointerType>(VM->getValue()->getType()),
748 "associated value must be pointer typed", GV, Associated);
749
750 const Value *Stripped = VM->getValue()->stripPointerCastsAndAliases();
751 Check(isa<GlobalObject>(Stripped) || isa<Constant>(Stripped),
752 "associated metadata must point to a GlobalObject", GO, Stripped);
753 Check(Stripped != GO,
754 "global values should not associate to themselves", GO,
755 Associated);
756 }
757 }
758
759 // FIXME: Why is getMetadata on GlobalValue protected?
760 if (const MDNode *AbsoluteSymbol =
761 GO->getMetadata(LLVMContext::MD_absolute_symbol)) {
762 verifyRangeLikeMetadata(*GO, AbsoluteSymbol,
763 DL.getIntPtrType(GO->getType()),
764 RangeLikeMetadataKind::AbsoluteSymbol);
765 }
766 }
767
768 Check(!GV.hasAppendingLinkage() || isa<GlobalVariable>(GV),
769 "Only global variables can have appending linkage!", &GV);
770
771 if (GV.hasAppendingLinkage()) {
772 const GlobalVariable *GVar = dyn_cast<GlobalVariable>(&GV);
773 Check(GVar && GVar->getValueType()->isArrayTy(),
774 "Only global arrays can have appending linkage!", GVar);
775 }
776
777 if (GV.isDeclarationForLinker())
778 Check(!GV.hasComdat(), "Declaration may not be in a Comdat!", &GV);
779
780 if (GV.hasDLLExportStorageClass()) {
782 "dllexport GlobalValue must have default or protected visibility",
783 &GV);
784 }
785 if (GV.hasDLLImportStorageClass()) {
787 "dllimport GlobalValue must have default visibility", &GV);
788 Check(!GV.isDSOLocal(), "GlobalValue with DLLImport Storage is dso_local!",
789 &GV);
790
791 Check((GV.isDeclaration() &&
794 "Global is marked as dllimport, but not external", &GV);
795 }
796
797 if (GV.isImplicitDSOLocal())
798 Check(GV.isDSOLocal(),
799 "GlobalValue with local linkage or non-default "
800 "visibility must be dso_local!",
801 &GV);
802
803 forEachUser(&GV, GlobalValueVisited, [&](const Value *V) -> bool {
804 if (const Instruction *I = dyn_cast<Instruction>(V)) {
805 if (!I->getParent() || !I->getParent()->getParent())
806 CheckFailed("Global is referenced by parentless instruction!", &GV, &M,
807 I);
808 else if (I->getParent()->getParent()->getParent() != &M)
809 CheckFailed("Global is referenced in a different module!", &GV, &M, I,
810 I->getParent()->getParent(),
811 I->getParent()->getParent()->getParent());
812 return false;
813 } else if (const Function *F = dyn_cast<Function>(V)) {
814 if (F->getParent() != &M)
815 CheckFailed("Global is used by function in a different module", &GV, &M,
816 F, F->getParent());
817 return false;
818 }
819 return true;
820 });
821}
822
823void Verifier::visitGlobalVariable(const GlobalVariable &GV) {
824 Type *GVType = GV.getValueType();
825
826 if (MaybeAlign A = GV.getAlign()) {
827 Check(A->value() <= Value::MaximumAlignment,
828 "huge alignment values are unsupported", &GV);
829 }
830
831 if (GV.hasInitializer()) {
832 Check(GV.getInitializer()->getType() == GVType,
833 "Global variable initializer type does not match global "
834 "variable type!",
835 &GV);
837 "Global variable initializer must be sized", &GV);
838 visitConstantExprsRecursively(GV.getInitializer());
839 // If the global has common linkage, it must have a zero initializer and
840 // cannot be constant.
841 if (GV.hasCommonLinkage()) {
843 "'common' global must have a zero initializer!", &GV);
844 Check(!GV.isConstant(), "'common' global may not be marked constant!",
845 &GV);
846 Check(!GV.hasComdat(), "'common' global may not be in a Comdat!", &GV);
847 }
848 }
849
850 if (GV.hasName() && (GV.getName() == "llvm.global_ctors" ||
851 GV.getName() == "llvm.global_dtors")) {
853 "invalid linkage for intrinsic global variable", &GV);
855 "invalid uses of intrinsic global variable", &GV);
856
857 // Don't worry about emitting an error for it not being an array,
858 // visitGlobalValue will complain on appending non-array.
859 if (ArrayType *ATy = dyn_cast<ArrayType>(GVType)) {
860 StructType *STy = dyn_cast<StructType>(ATy->getElementType());
861 PointerType *FuncPtrTy =
862 PointerType::get(Context, DL.getProgramAddressSpace());
863 Check(STy && (STy->getNumElements() == 2 || STy->getNumElements() == 3) &&
864 STy->getTypeAtIndex(0u)->isIntegerTy(32) &&
865 STy->getTypeAtIndex(1) == FuncPtrTy,
866 "wrong type for intrinsic global variable", &GV);
867 Check(STy->getNumElements() == 3,
868 "the third field of the element type is mandatory, "
869 "specify ptr null to migrate from the obsoleted 2-field form");
870 Type *ETy = STy->getTypeAtIndex(2);
871 Check(ETy->isPointerTy(), "wrong type for intrinsic global variable",
872 &GV);
873 }
874 }
875
876 if (GV.hasName() && (GV.getName() == "llvm.used" ||
877 GV.getName() == "llvm.compiler.used")) {
879 "invalid linkage for intrinsic global variable", &GV);
881 "invalid uses of intrinsic global variable", &GV);
882
883 if (ArrayType *ATy = dyn_cast<ArrayType>(GVType)) {
884 PointerType *PTy = dyn_cast<PointerType>(ATy->getElementType());
885 Check(PTy, "wrong type for intrinsic global variable", &GV);
886 if (GV.hasInitializer()) {
887 const Constant *Init = GV.getInitializer();
888 const ConstantArray *InitArray = dyn_cast<ConstantArray>(Init);
889 Check(InitArray, "wrong initalizer for intrinsic global variable",
890 Init);
891 for (Value *Op : InitArray->operands()) {
892 Value *V = Op->stripPointerCasts();
893 Check(isa<GlobalVariable>(V) || isa<Function>(V) ||
894 isa<GlobalAlias>(V),
895 Twine("invalid ") + GV.getName() + " member", V);
896 Check(V->hasName(),
897 Twine("members of ") + GV.getName() + " must be named", V);
898 }
899 }
900 }
901 }
902
903 // Visit any debug info attachments.
905 GV.getMetadata(LLVMContext::MD_dbg, MDs);
906 for (auto *MD : MDs) {
907 if (auto *GVE = dyn_cast<DIGlobalVariableExpression>(MD))
908 visitDIGlobalVariableExpression(*GVE);
909 else
910 CheckDI(false, "!dbg attachment of global variable must be a "
911 "DIGlobalVariableExpression");
912 }
913
914 // Scalable vectors cannot be global variables, since we don't know
915 // the runtime size.
916 Check(!GVType->isScalableTy(), "Globals cannot contain scalable types", &GV);
917
918 // Check if it is or contains a target extension type that disallows being
919 // used as a global.
921 "Global @" + GV.getName() + " has illegal target extension type",
922 GVType);
923
924 if (!GV.hasInitializer()) {
925 visitGlobalValue(GV);
926 return;
927 }
928
929 // Walk any aggregate initializers looking for bitcasts between address spaces
930 visitConstantExprsRecursively(GV.getInitializer());
931
932 visitGlobalValue(GV);
933}
934
935void Verifier::visitAliaseeSubExpr(const GlobalAlias &GA, const Constant &C) {
937 Visited.insert(&GA);
938 visitAliaseeSubExpr(Visited, GA, C);
939}
940
941void Verifier::visitAliaseeSubExpr(SmallPtrSetImpl<const GlobalAlias*> &Visited,
942 const GlobalAlias &GA, const Constant &C) {
944 Check(isa<GlobalValue>(C) &&
945 cast<GlobalValue>(C).hasAvailableExternallyLinkage(),
946 "available_externally alias must point to available_externally "
947 "global value",
948 &GA);
949 }
950 if (const auto *GV = dyn_cast<GlobalValue>(&C)) {
952 Check(!GV->isDeclarationForLinker(), "Alias must point to a definition",
953 &GA);
954 }
955
956 if (const auto *GA2 = dyn_cast<GlobalAlias>(GV)) {
957 Check(Visited.insert(GA2).second, "Aliases cannot form a cycle", &GA);
958
959 Check(!GA2->isInterposable(),
960 "Alias cannot point to an interposable alias", &GA);
961 } else {
962 // Only continue verifying subexpressions of GlobalAliases.
963 // Do not recurse into global initializers.
964 return;
965 }
966 }
967
968 if (const auto *CE = dyn_cast<ConstantExpr>(&C))
969 visitConstantExprsRecursively(CE);
970
971 for (const Use &U : C.operands()) {
972 Value *V = &*U;
973 if (const auto *GA2 = dyn_cast<GlobalAlias>(V))
974 visitAliaseeSubExpr(Visited, GA, *GA2->getAliasee());
975 else if (const auto *C2 = dyn_cast<Constant>(V))
976 visitAliaseeSubExpr(Visited, GA, *C2);
977 }
978}
979
980void Verifier::visitGlobalAlias(const GlobalAlias &GA) {
982 "Alias should have private, internal, linkonce, weak, linkonce_odr, "
983 "weak_odr, external, or available_externally linkage!",
984 &GA);
985 const Constant *Aliasee = GA.getAliasee();
986 Check(Aliasee, "Aliasee cannot be NULL!", &GA);
987 Check(GA.getType() == Aliasee->getType(),
988 "Alias and aliasee types should match!", &GA);
989
990 Check(isa<GlobalValue>(Aliasee) || isa<ConstantExpr>(Aliasee),
991 "Aliasee should be either GlobalValue or ConstantExpr", &GA);
992
993 visitAliaseeSubExpr(GA, *Aliasee);
994
995 visitGlobalValue(GA);
996}
997
998void Verifier::visitGlobalIFunc(const GlobalIFunc &GI) {
1000 "IFunc should have private, internal, linkonce, weak, linkonce_odr, "
1001 "weak_odr, or external linkage!",
1002 &GI);
1003 // Pierce through ConstantExprs and GlobalAliases and check that the resolver
1004 // is a Function definition.
1006 Check(Resolver, "IFunc must have a Function resolver", &GI);
1007 Check(!Resolver->isDeclarationForLinker(),
1008 "IFunc resolver must be a definition", &GI);
1009
1010 // Check that the immediate resolver operand (prior to any bitcasts) has the
1011 // correct type.
1012 const Type *ResolverTy = GI.getResolver()->getType();
1013
1014 Check(isa<PointerType>(Resolver->getFunctionType()->getReturnType()),
1015 "IFunc resolver must return a pointer", &GI);
1016
1017 Check(ResolverTy == PointerType::get(Context, GI.getAddressSpace()),
1018 "IFunc resolver has incorrect type", &GI);
1019}
1020
1021void Verifier::visitNamedMDNode(const NamedMDNode &NMD) {
1022 // There used to be various other llvm.dbg.* nodes, but we don't support
1023 // upgrading them and we want to reserve the namespace for future uses.
1024 if (NMD.getName().starts_with("llvm.dbg."))
1025 CheckDI(NMD.getName() == "llvm.dbg.cu",
1026 "unrecognized named metadata node in the llvm.dbg namespace", &NMD);
1027 for (const MDNode *MD : NMD.operands()) {
1028 if (NMD.getName() == "llvm.dbg.cu")
1029 CheckDI(MD && isa<DICompileUnit>(MD), "invalid compile unit", &NMD, MD);
1030
1031 if (!MD)
1032 continue;
1033
1034 visitMDNode(*MD, AreDebugLocsAllowed::Yes);
1035 }
1036}
1037
1038void Verifier::visitMDNode(const MDNode &MD, AreDebugLocsAllowed AllowLocs) {
1039 // Only visit each node once. Metadata can be mutually recursive, so this
1040 // avoids infinite recursion here, as well as being an optimization.
1041 if (!MDNodes.insert(&MD).second)
1042 return;
1043
1044 Check(&MD.getContext() == &Context,
1045 "MDNode context does not match Module context!", &MD);
1046
1047 switch (MD.getMetadataID()) {
1048 default:
1049 llvm_unreachable("Invalid MDNode subclass");
1050 case Metadata::MDTupleKind:
1051 break;
1052#define HANDLE_SPECIALIZED_MDNODE_LEAF(CLASS) \
1053 case Metadata::CLASS##Kind: \
1054 visit##CLASS(cast<CLASS>(MD)); \
1055 break;
1056#include "llvm/IR/Metadata.def"
1057 }
1058
1059 for (const Metadata *Op : MD.operands()) {
1060 if (!Op)
1061 continue;
1062 Check(!isa<LocalAsMetadata>(Op), "Invalid operand for global metadata!",
1063 &MD, Op);
1064 CheckDI(!isa<DILocation>(Op) || AllowLocs == AreDebugLocsAllowed::Yes,
1065 "DILocation not allowed within this metadata node", &MD, Op);
1066 if (auto *N = dyn_cast<MDNode>(Op)) {
1067 visitMDNode(*N, AllowLocs);
1068 continue;
1069 }
1070 if (auto *V = dyn_cast<ValueAsMetadata>(Op)) {
1071 visitValueAsMetadata(*V, nullptr);
1072 continue;
1073 }
1074 }
1075
1076 // Check these last, so we diagnose problems in operands first.
1077 Check(!MD.isTemporary(), "Expected no forward declarations!", &MD);
1078 Check(MD.isResolved(), "All nodes should be resolved!", &MD);
1079}
1080
1081void Verifier::visitValueAsMetadata(const ValueAsMetadata &MD, Function *F) {
1082 Check(MD.getValue(), "Expected valid value", &MD);
1083 Check(!MD.getValue()->getType()->isMetadataTy(),
1084 "Unexpected metadata round-trip through values", &MD, MD.getValue());
1085
1086 auto *L = dyn_cast<LocalAsMetadata>(&MD);
1087 if (!L)
1088 return;
1089
1090 Check(F, "function-local metadata used outside a function", L);
1091
1092 // If this was an instruction, bb, or argument, verify that it is in the
1093 // function that we expect.
1094 Function *ActualF = nullptr;
1095 if (Instruction *I = dyn_cast<Instruction>(L->getValue())) {
1096 Check(I->getParent(), "function-local metadata not in basic block", L, I);
1097 ActualF = I->getParent()->getParent();
1098 } else if (BasicBlock *BB = dyn_cast<BasicBlock>(L->getValue()))
1099 ActualF = BB->getParent();
1100 else if (Argument *A = dyn_cast<Argument>(L->getValue()))
1101 ActualF = A->getParent();
1102 assert(ActualF && "Unimplemented function local metadata case!");
1103
1104 Check(ActualF == F, "function-local metadata used in wrong function", L);
1105}
1106
1107void Verifier::visitDIArgList(const DIArgList &AL, Function *F) {
1108 for (const ValueAsMetadata *VAM : AL.getArgs())
1109 visitValueAsMetadata(*VAM, F);
1110}
1111
1112void Verifier::visitMetadataAsValue(const MetadataAsValue &MDV, Function *F) {
1113 Metadata *MD = MDV.getMetadata();
1114 if (auto *N = dyn_cast<MDNode>(MD)) {
1115 visitMDNode(*N, AreDebugLocsAllowed::No);
1116 return;
1117 }
1118
1119 // Only visit each node once. Metadata can be mutually recursive, so this
1120 // avoids infinite recursion here, as well as being an optimization.
1121 if (!MDNodes.insert(MD).second)
1122 return;
1123
1124 if (auto *V = dyn_cast<ValueAsMetadata>(MD))
1125 visitValueAsMetadata(*V, F);
1126
1127 if (auto *AL = dyn_cast<DIArgList>(MD))
1128 visitDIArgList(*AL, F);
1129}
1130
1131static bool isType(const Metadata *MD) { return !MD || isa<DIType>(MD); }
1132static bool isScope(const Metadata *MD) { return !MD || isa<DIScope>(MD); }
1133static bool isDINode(const Metadata *MD) { return !MD || isa<DINode>(MD); }
1134
1135void Verifier::visitDILocation(const DILocation &N) {
1136 CheckDI(N.getRawScope() && isa<DILocalScope>(N.getRawScope()),
1137 "location requires a valid scope", &N, N.getRawScope());
1138 if (auto *IA = N.getRawInlinedAt())
1139 CheckDI(isa<DILocation>(IA), "inlined-at should be a location", &N, IA);
1140 if (auto *SP = dyn_cast<DISubprogram>(N.getRawScope()))
1141 CheckDI(SP->isDefinition(), "scope points into the type hierarchy", &N);
1142}
1143
1144void Verifier::visitGenericDINode(const GenericDINode &N) {
1145 CheckDI(N.getTag(), "invalid tag", &N);
1146}
1147
1148void Verifier::visitDIScope(const DIScope &N) {
1149 if (auto *F = N.getRawFile())
1150 CheckDI(isa<DIFile>(F), "invalid file", &N, F);
1151}
1152
1153void Verifier::visitDISubrangeType(const DISubrangeType &N) {
1154 CheckDI(N.getTag() == dwarf::DW_TAG_subrange_type, "invalid tag", &N);
1155 auto *BaseType = N.getRawBaseType();
1156 CheckDI(!BaseType || isType(BaseType), "BaseType must be a type");
1157 auto *LBound = N.getRawLowerBound();
1158 CheckDI(!LBound || isa<ConstantAsMetadata>(LBound) ||
1159 isa<DIVariable>(LBound) || isa<DIExpression>(LBound),
1160 "LowerBound must be signed constant or DIVariable or DIExpression",
1161 &N);
1162 auto *UBound = N.getRawUpperBound();
1163 CheckDI(!UBound || isa<ConstantAsMetadata>(UBound) ||
1164 isa<DIVariable>(UBound) || isa<DIExpression>(UBound),
1165 "UpperBound must be signed constant or DIVariable or DIExpression",
1166 &N);
1167 auto *Stride = N.getRawStride();
1168 CheckDI(!Stride || isa<ConstantAsMetadata>(Stride) ||
1169 isa<DIVariable>(Stride) || isa<DIExpression>(Stride),
1170 "Stride must be signed constant or DIVariable or DIExpression", &N);
1171 auto *Bias = N.getRawBias();
1172 CheckDI(!Bias || isa<ConstantAsMetadata>(Bias) || isa<DIVariable>(Bias) ||
1173 isa<DIExpression>(Bias),
1174 "Bias must be signed constant or DIVariable or DIExpression", &N);
1175 // Subrange types currently only support constant size.
1176 auto *Size = N.getRawSizeInBits();
1177 CheckDI(!Size || isa<ConstantAsMetadata>(Size),
1178 "SizeInBits must be a constant");
1179}
1180
1181void Verifier::visitDISubrange(const DISubrange &N) {
1182 CheckDI(N.getTag() == dwarf::DW_TAG_subrange_type, "invalid tag", &N);
1183 CheckDI(!N.getRawCountNode() || !N.getRawUpperBound(),
1184 "Subrange can have any one of count or upperBound", &N);
1185 auto *CBound = N.getRawCountNode();
1186 CheckDI(!CBound || isa<ConstantAsMetadata>(CBound) ||
1187 isa<DIVariable>(CBound) || isa<DIExpression>(CBound),
1188 "Count must be signed constant or DIVariable or DIExpression", &N);
1189 auto Count = N.getCount();
1190 CheckDI(!Count || !isa<ConstantInt *>(Count) ||
1191 cast<ConstantInt *>(Count)->getSExtValue() >= -1,
1192 "invalid subrange count", &N);
1193 auto *LBound = N.getRawLowerBound();
1194 CheckDI(!LBound || isa<ConstantAsMetadata>(LBound) ||
1195 isa<DIVariable>(LBound) || isa<DIExpression>(LBound),
1196 "LowerBound must be signed constant or DIVariable or DIExpression",
1197 &N);
1198 auto *UBound = N.getRawUpperBound();
1199 CheckDI(!UBound || isa<ConstantAsMetadata>(UBound) ||
1200 isa<DIVariable>(UBound) || isa<DIExpression>(UBound),
1201 "UpperBound must be signed constant or DIVariable or DIExpression",
1202 &N);
1203 auto *Stride = N.getRawStride();
1204 CheckDI(!Stride || isa<ConstantAsMetadata>(Stride) ||
1205 isa<DIVariable>(Stride) || isa<DIExpression>(Stride),
1206 "Stride must be signed constant or DIVariable or DIExpression", &N);
1207}
1208
1209void Verifier::visitDIGenericSubrange(const DIGenericSubrange &N) {
1210 CheckDI(N.getTag() == dwarf::DW_TAG_generic_subrange, "invalid tag", &N);
1211 CheckDI(!N.getRawCountNode() || !N.getRawUpperBound(),
1212 "GenericSubrange can have any one of count or upperBound", &N);
1213 auto *CBound = N.getRawCountNode();
1214 CheckDI(!CBound || isa<DIVariable>(CBound) || isa<DIExpression>(CBound),
1215 "Count must be signed constant or DIVariable or DIExpression", &N);
1216 auto *LBound = N.getRawLowerBound();
1217 CheckDI(LBound, "GenericSubrange must contain lowerBound", &N);
1218 CheckDI(isa<DIVariable>(LBound) || isa<DIExpression>(LBound),
1219 "LowerBound must be signed constant or DIVariable or DIExpression",
1220 &N);
1221 auto *UBound = N.getRawUpperBound();
1222 CheckDI(!UBound || isa<DIVariable>(UBound) || isa<DIExpression>(UBound),
1223 "UpperBound must be signed constant or DIVariable or DIExpression",
1224 &N);
1225 auto *Stride = N.getRawStride();
1226 CheckDI(Stride, "GenericSubrange must contain stride", &N);
1227 CheckDI(isa<DIVariable>(Stride) || isa<DIExpression>(Stride),
1228 "Stride must be signed constant or DIVariable or DIExpression", &N);
1229}
1230
1231void Verifier::visitDIEnumerator(const DIEnumerator &N) {
1232 CheckDI(N.getTag() == dwarf::DW_TAG_enumerator, "invalid tag", &N);
1233}
1234
1235void Verifier::visitDIBasicType(const DIBasicType &N) {
1236 CheckDI(N.getTag() == dwarf::DW_TAG_base_type ||
1237 N.getTag() == dwarf::DW_TAG_unspecified_type ||
1238 N.getTag() == dwarf::DW_TAG_string_type,
1239 "invalid tag", &N);
1240 // Basic types currently only support constant size.
1241 auto *Size = N.getRawSizeInBits();
1242 CheckDI(!Size || isa<ConstantAsMetadata>(Size),
1243 "SizeInBits must be a constant");
1244}
1245
1246void Verifier::visitDIFixedPointType(const DIFixedPointType &N) {
1247 visitDIBasicType(N);
1248
1249 CheckDI(N.getTag() == dwarf::DW_TAG_base_type, "invalid tag", &N);
1250 CheckDI(N.getEncoding() == dwarf::DW_ATE_signed_fixed ||
1251 N.getEncoding() == dwarf::DW_ATE_unsigned_fixed,
1252 "invalid encoding", &N);
1256 "invalid kind", &N);
1258 N.getFactorRaw() == 0,
1259 "factor should be 0 for rationals", &N);
1261 (N.getNumeratorRaw() == 0 && N.getDenominatorRaw() == 0),
1262 "numerator and denominator should be 0 for non-rationals", &N);
1263}
1264
1265void Verifier::visitDIStringType(const DIStringType &N) {
1266 CheckDI(N.getTag() == dwarf::DW_TAG_string_type, "invalid tag", &N);
1267 CheckDI(!(N.isBigEndian() && N.isLittleEndian()), "has conflicting flags",
1268 &N);
1269}
1270
1271void Verifier::visitDIDerivedType(const DIDerivedType &N) {
1272 // Common scope checks.
1273 visitDIScope(N);
1274
1275 CheckDI(N.getTag() == dwarf::DW_TAG_typedef ||
1276 N.getTag() == dwarf::DW_TAG_pointer_type ||
1277 N.getTag() == dwarf::DW_TAG_ptr_to_member_type ||
1278 N.getTag() == dwarf::DW_TAG_reference_type ||
1279 N.getTag() == dwarf::DW_TAG_rvalue_reference_type ||
1280 N.getTag() == dwarf::DW_TAG_const_type ||
1281 N.getTag() == dwarf::DW_TAG_immutable_type ||
1282 N.getTag() == dwarf::DW_TAG_volatile_type ||
1283 N.getTag() == dwarf::DW_TAG_restrict_type ||
1284 N.getTag() == dwarf::DW_TAG_atomic_type ||
1285 N.getTag() == dwarf::DW_TAG_LLVM_ptrauth_type ||
1286 N.getTag() == dwarf::DW_TAG_member ||
1287 (N.getTag() == dwarf::DW_TAG_variable && N.isStaticMember()) ||
1288 N.getTag() == dwarf::DW_TAG_inheritance ||
1289 N.getTag() == dwarf::DW_TAG_friend ||
1290 N.getTag() == dwarf::DW_TAG_set_type ||
1291 N.getTag() == dwarf::DW_TAG_template_alias,
1292 "invalid tag", &N);
1293 if (N.getTag() == dwarf::DW_TAG_ptr_to_member_type) {
1294 CheckDI(isType(N.getRawExtraData()), "invalid pointer to member type", &N,
1295 N.getRawExtraData());
1296 }
1297
1298 if (N.getTag() == dwarf::DW_TAG_set_type) {
1299 if (auto *T = N.getRawBaseType()) {
1300 auto *Enum = dyn_cast_or_null<DICompositeType>(T);
1301 auto *Subrange = dyn_cast_or_null<DISubrangeType>(T);
1302 auto *Basic = dyn_cast_or_null<DIBasicType>(T);
1303 CheckDI(
1304 (Enum && Enum->getTag() == dwarf::DW_TAG_enumeration_type) ||
1305 (Subrange && Subrange->getTag() == dwarf::DW_TAG_subrange_type) ||
1306 (Basic && (Basic->getEncoding() == dwarf::DW_ATE_unsigned ||
1307 Basic->getEncoding() == dwarf::DW_ATE_signed ||
1308 Basic->getEncoding() == dwarf::DW_ATE_unsigned_char ||
1309 Basic->getEncoding() == dwarf::DW_ATE_signed_char ||
1310 Basic->getEncoding() == dwarf::DW_ATE_boolean)),
1311 "invalid set base type", &N, T);
1312 }
1313 }
1314
1315 CheckDI(isScope(N.getRawScope()), "invalid scope", &N, N.getRawScope());
1316 CheckDI(isType(N.getRawBaseType()), "invalid base type", &N,
1317 N.getRawBaseType());
1318
1319 if (N.getDWARFAddressSpace()) {
1320 CheckDI(N.getTag() == dwarf::DW_TAG_pointer_type ||
1321 N.getTag() == dwarf::DW_TAG_reference_type ||
1322 N.getTag() == dwarf::DW_TAG_rvalue_reference_type,
1323 "DWARF address space only applies to pointer or reference types",
1324 &N);
1325 }
1326
1327 auto *Size = N.getRawSizeInBits();
1328 CheckDI(!Size || isa<ConstantAsMetadata>(Size) || isa<DIVariable>(Size) ||
1329 isa<DIExpression>(Size),
1330 "SizeInBits must be a constant or DIVariable or DIExpression");
1331}
1332
1333/// Detect mutually exclusive flags.
1334static bool hasConflictingReferenceFlags(unsigned Flags) {
1335 return ((Flags & DINode::FlagLValueReference) &&
1336 (Flags & DINode::FlagRValueReference)) ||
1337 ((Flags & DINode::FlagTypePassByValue) &&
1338 (Flags & DINode::FlagTypePassByReference));
1339}
1340
1341void Verifier::visitTemplateParams(const MDNode &N, const Metadata &RawParams) {
1342 auto *Params = dyn_cast<MDTuple>(&RawParams);
1343 CheckDI(Params, "invalid template params", &N, &RawParams);
1344 for (Metadata *Op : Params->operands()) {
1345 CheckDI(Op && isa<DITemplateParameter>(Op), "invalid template parameter",
1346 &N, Params, Op);
1347 }
1348}
1349
1350void Verifier::visitDICompositeType(const DICompositeType &N) {
1351 // Common scope checks.
1352 visitDIScope(N);
1353
1354 CheckDI(N.getTag() == dwarf::DW_TAG_array_type ||
1355 N.getTag() == dwarf::DW_TAG_structure_type ||
1356 N.getTag() == dwarf::DW_TAG_union_type ||
1357 N.getTag() == dwarf::DW_TAG_enumeration_type ||
1358 N.getTag() == dwarf::DW_TAG_class_type ||
1359 N.getTag() == dwarf::DW_TAG_variant_part ||
1360 N.getTag() == dwarf::DW_TAG_variant ||
1361 N.getTag() == dwarf::DW_TAG_namelist,
1362 "invalid tag", &N);
1363
1364 CheckDI(isScope(N.getRawScope()), "invalid scope", &N, N.getRawScope());
1365 CheckDI(isType(N.getRawBaseType()), "invalid base type", &N,
1366 N.getRawBaseType());
1367
1368 CheckDI(!N.getRawElements() || isa<MDTuple>(N.getRawElements()),
1369 "invalid composite elements", &N, N.getRawElements());
1370 CheckDI(isType(N.getRawVTableHolder()), "invalid vtable holder", &N,
1371 N.getRawVTableHolder());
1373 "invalid reference flags", &N);
1374 unsigned DIBlockByRefStruct = 1 << 4;
1375 CheckDI((N.getFlags() & DIBlockByRefStruct) == 0,
1376 "DIBlockByRefStruct on DICompositeType is no longer supported", &N);
1377 CheckDI(llvm::all_of(N.getElements(), [](const DINode *N) { return N; }),
1378 "DISubprogram contains null entry in `elements` field", &N);
1379
1380 if (N.isVector()) {
1381 const DINodeArray Elements = N.getElements();
1382 CheckDI(Elements.size() == 1 &&
1383 Elements[0]->getTag() == dwarf::DW_TAG_subrange_type,
1384 "invalid vector, expected one element of type subrange", &N);
1385 }
1386
1387 if (auto *Params = N.getRawTemplateParams())
1388 visitTemplateParams(N, *Params);
1389
1390 if (auto *D = N.getRawDiscriminator()) {
1391 CheckDI(isa<DIDerivedType>(D) && N.getTag() == dwarf::DW_TAG_variant_part,
1392 "discriminator can only appear on variant part");
1393 }
1394
1395 if (N.getRawDataLocation()) {
1396 CheckDI(N.getTag() == dwarf::DW_TAG_array_type,
1397 "dataLocation can only appear in array type");
1398 }
1399
1400 if (N.getRawAssociated()) {
1401 CheckDI(N.getTag() == dwarf::DW_TAG_array_type,
1402 "associated can only appear in array type");
1403 }
1404
1405 if (N.getRawAllocated()) {
1406 CheckDI(N.getTag() == dwarf::DW_TAG_array_type,
1407 "allocated can only appear in array type");
1408 }
1409
1410 if (N.getRawRank()) {
1411 CheckDI(N.getTag() == dwarf::DW_TAG_array_type,
1412 "rank can only appear in array type");
1413 }
1414
1415 if (N.getTag() == dwarf::DW_TAG_array_type) {
1416 CheckDI(N.getRawBaseType(), "array types must have a base type", &N);
1417 }
1418
1419 auto *Size = N.getRawSizeInBits();
1420 CheckDI(!Size || isa<ConstantAsMetadata>(Size) || isa<DIVariable>(Size) ||
1421 isa<DIExpression>(Size),
1422 "SizeInBits must be a constant or DIVariable or DIExpression");
1423}
1424
1425void Verifier::visitDISubroutineType(const DISubroutineType &N) {
1426 CheckDI(N.getTag() == dwarf::DW_TAG_subroutine_type, "invalid tag", &N);
1427 if (auto *Types = N.getRawTypeArray()) {
1428 CheckDI(isa<MDTuple>(Types), "invalid composite elements", &N, Types);
1429 for (Metadata *Ty : N.getTypeArray()->operands()) {
1430 CheckDI(isType(Ty), "invalid subroutine type ref", &N, Types, Ty);
1431 }
1432 }
1434 "invalid reference flags", &N);
1435}
1436
1437void Verifier::visitDIFile(const DIFile &N) {
1438 CheckDI(N.getTag() == dwarf::DW_TAG_file_type, "invalid tag", &N);
1439 std::optional<DIFile::ChecksumInfo<StringRef>> Checksum = N.getChecksum();
1440 if (Checksum) {
1441 CheckDI(Checksum->Kind <= DIFile::ChecksumKind::CSK_Last,
1442 "invalid checksum kind", &N);
1443 size_t Size;
1444 switch (Checksum->Kind) {
1445 case DIFile::CSK_MD5:
1446 Size = 32;
1447 break;
1448 case DIFile::CSK_SHA1:
1449 Size = 40;
1450 break;
1451 case DIFile::CSK_SHA256:
1452 Size = 64;
1453 break;
1454 }
1455 CheckDI(Checksum->Value.size() == Size, "invalid checksum length", &N);
1456 CheckDI(Checksum->Value.find_if_not(llvm::isHexDigit) == StringRef::npos,
1457 "invalid checksum", &N);
1458 }
1459}
1460
1461void Verifier::visitDICompileUnit(const DICompileUnit &N) {
1462 CheckDI(N.isDistinct(), "compile units must be distinct", &N);
1463 CheckDI(N.getTag() == dwarf::DW_TAG_compile_unit, "invalid tag", &N);
1464
1465 // Don't bother verifying the compilation directory or producer string
1466 // as those could be empty.
1467 CheckDI(N.getRawFile() && isa<DIFile>(N.getRawFile()), "invalid file", &N,
1468 N.getRawFile());
1469 CheckDI(!N.getFile()->getFilename().empty(), "invalid filename", &N,
1470 N.getFile());
1471
1472 CheckDI((N.getEmissionKind() <= DICompileUnit::LastEmissionKind),
1473 "invalid emission kind", &N);
1474
1475 if (auto *Array = N.getRawEnumTypes()) {
1476 CheckDI(isa<MDTuple>(Array), "invalid enum list", &N, Array);
1477 for (Metadata *Op : N.getEnumTypes()->operands()) {
1478 auto *Enum = dyn_cast_or_null<DICompositeType>(Op);
1479 CheckDI(Enum && Enum->getTag() == dwarf::DW_TAG_enumeration_type,
1480 "invalid enum type", &N, N.getEnumTypes(), Op);
1481 }
1482 }
1483 if (auto *Array = N.getRawRetainedTypes()) {
1484 CheckDI(isa<MDTuple>(Array), "invalid retained type list", &N, Array);
1485 for (Metadata *Op : N.getRetainedTypes()->operands()) {
1486 CheckDI(
1487 Op && (isa<DIType>(Op) || (isa<DISubprogram>(Op) &&
1488 !cast<DISubprogram>(Op)->isDefinition())),
1489 "invalid retained type", &N, Op);
1490 }
1491 }
1492 if (auto *Array = N.getRawGlobalVariables()) {
1493 CheckDI(isa<MDTuple>(Array), "invalid global variable list", &N, Array);
1494 for (Metadata *Op : N.getGlobalVariables()->operands()) {
1495 CheckDI(Op && (isa<DIGlobalVariableExpression>(Op)),
1496 "invalid global variable ref", &N, Op);
1497 }
1498 }
1499 if (auto *Array = N.getRawImportedEntities()) {
1500 CheckDI(isa<MDTuple>(Array), "invalid imported entity list", &N, Array);
1501 for (Metadata *Op : N.getImportedEntities()->operands()) {
1502 CheckDI(Op && isa<DIImportedEntity>(Op), "invalid imported entity ref",
1503 &N, Op);
1504 }
1505 }
1506 if (auto *Array = N.getRawMacros()) {
1507 CheckDI(isa<MDTuple>(Array), "invalid macro list", &N, Array);
1508 for (Metadata *Op : N.getMacros()->operands()) {
1509 CheckDI(Op && isa<DIMacroNode>(Op), "invalid macro ref", &N, Op);
1510 }
1511 }
1512 CUVisited.insert(&N);
1513}
1514
1515void Verifier::visitDISubprogram(const DISubprogram &N) {
1516 CheckDI(N.getTag() == dwarf::DW_TAG_subprogram, "invalid tag", &N);
1517 CheckDI(isScope(N.getRawScope()), "invalid scope", &N, N.getRawScope());
1518 if (auto *F = N.getRawFile())
1519 CheckDI(isa<DIFile>(F), "invalid file", &N, F);
1520 else
1521 CheckDI(N.getLine() == 0, "line specified with no file", &N, N.getLine());
1522 if (auto *T = N.getRawType())
1523 CheckDI(isa<DISubroutineType>(T), "invalid subroutine type", &N, T);
1524 CheckDI(isType(N.getRawContainingType()), "invalid containing type", &N,
1525 N.getRawContainingType());
1526 if (auto *Params = N.getRawTemplateParams())
1527 visitTemplateParams(N, *Params);
1528 if (auto *S = N.getRawDeclaration())
1529 CheckDI(isa<DISubprogram>(S) && !cast<DISubprogram>(S)->isDefinition(),
1530 "invalid subprogram declaration", &N, S);
1531 if (auto *RawNode = N.getRawRetainedNodes()) {
1532 auto *Node = dyn_cast<MDTuple>(RawNode);
1533 CheckDI(Node, "invalid retained nodes list", &N, RawNode);
1534 for (Metadata *Op : Node->operands()) {
1535 CheckDI(Op && (isa<DILocalVariable>(Op) || isa<DILabel>(Op) ||
1536 isa<DIImportedEntity>(Op)),
1537 "invalid retained nodes, expected DILocalVariable, DILabel or "
1538 "DIImportedEntity",
1539 &N, Node, Op);
1540 }
1541 }
1543 "invalid reference flags", &N);
1544
1545 auto *Unit = N.getRawUnit();
1546 if (N.isDefinition()) {
1547 // Subprogram definitions (not part of the type hierarchy).
1548 CheckDI(N.isDistinct(), "subprogram definitions must be distinct", &N);
1549 CheckDI(Unit, "subprogram definitions must have a compile unit", &N);
1550 CheckDI(isa<DICompileUnit>(Unit), "invalid unit type", &N, Unit);
1551 // There's no good way to cross the CU boundary to insert a nested
1552 // DISubprogram definition in one CU into a type defined in another CU.
1553 auto *CT = dyn_cast_or_null<DICompositeType>(N.getRawScope());
1554 if (CT && CT->getRawIdentifier() &&
1555 M.getContext().isODRUniquingDebugTypes())
1556 CheckDI(N.getDeclaration(),
1557 "definition subprograms cannot be nested within DICompositeType "
1558 "when enabling ODR",
1559 &N);
1560 } else {
1561 // Subprogram declarations (part of the type hierarchy).
1562 CheckDI(!Unit, "subprogram declarations must not have a compile unit", &N);
1563 CheckDI(!N.getRawDeclaration(),
1564 "subprogram declaration must not have a declaration field");
1565 }
1566
1567 if (auto *RawThrownTypes = N.getRawThrownTypes()) {
1568 auto *ThrownTypes = dyn_cast<MDTuple>(RawThrownTypes);
1569 CheckDI(ThrownTypes, "invalid thrown types list", &N, RawThrownTypes);
1570 for (Metadata *Op : ThrownTypes->operands())
1571 CheckDI(Op && isa<DIType>(Op), "invalid thrown type", &N, ThrownTypes,
1572 Op);
1573 }
1574
1575 if (N.areAllCallsDescribed())
1576 CheckDI(N.isDefinition(),
1577 "DIFlagAllCallsDescribed must be attached to a definition");
1578}
1579
1580void Verifier::visitDILexicalBlockBase(const DILexicalBlockBase &N) {
1581 CheckDI(N.getTag() == dwarf::DW_TAG_lexical_block, "invalid tag", &N);
1582 CheckDI(N.getRawScope() && isa<DILocalScope>(N.getRawScope()),
1583 "invalid local scope", &N, N.getRawScope());
1584 if (auto *SP = dyn_cast<DISubprogram>(N.getRawScope()))
1585 CheckDI(SP->isDefinition(), "scope points into the type hierarchy", &N);
1586}
1587
1588void Verifier::visitDILexicalBlock(const DILexicalBlock &N) {
1589 visitDILexicalBlockBase(N);
1590
1591 CheckDI(N.getLine() || !N.getColumn(),
1592 "cannot have column info without line info", &N);
1593}
1594
1595void Verifier::visitDILexicalBlockFile(const DILexicalBlockFile &N) {
1596 visitDILexicalBlockBase(N);
1597}
1598
1599void Verifier::visitDICommonBlock(const DICommonBlock &N) {
1600 CheckDI(N.getTag() == dwarf::DW_TAG_common_block, "invalid tag", &N);
1601 if (auto *S = N.getRawScope())
1602 CheckDI(isa<DIScope>(S), "invalid scope ref", &N, S);
1603 if (auto *S = N.getRawDecl())
1604 CheckDI(isa<DIGlobalVariable>(S), "invalid declaration", &N, S);
1605}
1606
1607void Verifier::visitDINamespace(const DINamespace &N) {
1608 CheckDI(N.getTag() == dwarf::DW_TAG_namespace, "invalid tag", &N);
1609 if (auto *S = N.getRawScope())
1610 CheckDI(isa<DIScope>(S), "invalid scope ref", &N, S);
1611}
1612
1613void Verifier::visitDIMacro(const DIMacro &N) {
1614 CheckDI(N.getMacinfoType() == dwarf::DW_MACINFO_define ||
1615 N.getMacinfoType() == dwarf::DW_MACINFO_undef,
1616 "invalid macinfo type", &N);
1617 CheckDI(!N.getName().empty(), "anonymous macro", &N);
1618 if (!N.getValue().empty()) {
1619 assert(N.getValue().data()[0] != ' ' && "Macro value has a space prefix");
1620 }
1621}
1622
1623void Verifier::visitDIMacroFile(const DIMacroFile &N) {
1624 CheckDI(N.getMacinfoType() == dwarf::DW_MACINFO_start_file,
1625 "invalid macinfo type", &N);
1626 if (auto *F = N.getRawFile())
1627 CheckDI(isa<DIFile>(F), "invalid file", &N, F);
1628
1629 if (auto *Array = N.getRawElements()) {
1630 CheckDI(isa<MDTuple>(Array), "invalid macro list", &N, Array);
1631 for (Metadata *Op : N.getElements()->operands()) {
1632 CheckDI(Op && isa<DIMacroNode>(Op), "invalid macro ref", &N, Op);
1633 }
1634 }
1635}
1636
1637void Verifier::visitDIModule(const DIModule &N) {
1638 CheckDI(N.getTag() == dwarf::DW_TAG_module, "invalid tag", &N);
1639 CheckDI(!N.getName().empty(), "anonymous module", &N);
1640}
1641
1642void Verifier::visitDITemplateParameter(const DITemplateParameter &N) {
1643 CheckDI(isType(N.getRawType()), "invalid type ref", &N, N.getRawType());
1644}
1645
1646void Verifier::visitDITemplateTypeParameter(const DITemplateTypeParameter &N) {
1647 visitDITemplateParameter(N);
1648
1649 CheckDI(N.getTag() == dwarf::DW_TAG_template_type_parameter, "invalid tag",
1650 &N);
1651}
1652
1653void Verifier::visitDITemplateValueParameter(
1654 const DITemplateValueParameter &N) {
1655 visitDITemplateParameter(N);
1656
1657 CheckDI(N.getTag() == dwarf::DW_TAG_template_value_parameter ||
1658 N.getTag() == dwarf::DW_TAG_GNU_template_template_param ||
1659 N.getTag() == dwarf::DW_TAG_GNU_template_parameter_pack,
1660 "invalid tag", &N);
1661}
1662
1663void Verifier::visitDIVariable(const DIVariable &N) {
1664 if (auto *S = N.getRawScope())
1665 CheckDI(isa<DIScope>(S), "invalid scope", &N, S);
1666 if (auto *F = N.getRawFile())
1667 CheckDI(isa<DIFile>(F), "invalid file", &N, F);
1668}
1669
1670void Verifier::visitDIGlobalVariable(const DIGlobalVariable &N) {
1671 // Checks common to all variables.
1672 visitDIVariable(N);
1673
1674 CheckDI(N.getTag() == dwarf::DW_TAG_variable, "invalid tag", &N);
1675 CheckDI(isType(N.getRawType()), "invalid type ref", &N, N.getRawType());
1676 // Check only if the global variable is not an extern
1677 if (N.isDefinition())
1678 CheckDI(N.getType(), "missing global variable type", &N);
1679 if (auto *Member = N.getRawStaticDataMemberDeclaration()) {
1680 CheckDI(isa<DIDerivedType>(Member),
1681 "invalid static data member declaration", &N, Member);
1682 }
1683}
1684
1685void Verifier::visitDILocalVariable(const DILocalVariable &N) {
1686 // Checks common to all variables.
1687 visitDIVariable(N);
1688
1689 CheckDI(isType(N.getRawType()), "invalid type ref", &N, N.getRawType());
1690 CheckDI(N.getTag() == dwarf::DW_TAG_variable, "invalid tag", &N);
1691 CheckDI(N.getRawScope() && isa<DILocalScope>(N.getRawScope()),
1692 "local variable requires a valid scope", &N, N.getRawScope());
1693 if (auto Ty = N.getType())
1694 CheckDI(!isa<DISubroutineType>(Ty), "invalid type", &N, N.getType());
1695}
1696
1697void Verifier::visitDIAssignID(const DIAssignID &N) {
1698 CheckDI(!N.getNumOperands(), "DIAssignID has no arguments", &N);
1699 CheckDI(N.isDistinct(), "DIAssignID must be distinct", &N);
1700}
1701
1702void Verifier::visitDILabel(const DILabel &N) {
1703 if (auto *S = N.getRawScope())
1704 CheckDI(isa<DIScope>(S), "invalid scope", &N, S);
1705 if (auto *F = N.getRawFile())
1706 CheckDI(isa<DIFile>(F), "invalid file", &N, F);
1707
1708 CheckDI(N.getTag() == dwarf::DW_TAG_label, "invalid tag", &N);
1709 CheckDI(N.getRawScope() && isa<DILocalScope>(N.getRawScope()),
1710 "label requires a valid scope", &N, N.getRawScope());
1711}
1712
1713void Verifier::visitDIExpression(const DIExpression &N) {
1714 CheckDI(N.isValid(), "invalid expression", &N);
1715}
1716
1717void Verifier::visitDIGlobalVariableExpression(
1718 const DIGlobalVariableExpression &GVE) {
1719 CheckDI(GVE.getVariable(), "missing variable");
1720 if (auto *Var = GVE.getVariable())
1721 visitDIGlobalVariable(*Var);
1722 if (auto *Expr = GVE.getExpression()) {
1723 visitDIExpression(*Expr);
1724 if (auto Fragment = Expr->getFragmentInfo())
1725 verifyFragmentExpression(*GVE.getVariable(), *Fragment, &GVE);
1726 }
1727}
1728
1729void Verifier::visitDIObjCProperty(const DIObjCProperty &N) {
1730 CheckDI(N.getTag() == dwarf::DW_TAG_APPLE_property, "invalid tag", &N);
1731 if (auto *T = N.getRawType())
1732 CheckDI(isType(T), "invalid type ref", &N, T);
1733 if (auto *F = N.getRawFile())
1734 CheckDI(isa<DIFile>(F), "invalid file", &N, F);
1735}
1736
1737void Verifier::visitDIImportedEntity(const DIImportedEntity &N) {
1738 CheckDI(N.getTag() == dwarf::DW_TAG_imported_module ||
1739 N.getTag() == dwarf::DW_TAG_imported_declaration,
1740 "invalid tag", &N);
1741 if (auto *S = N.getRawScope())
1742 CheckDI(isa<DIScope>(S), "invalid scope for imported entity", &N, S);
1743 CheckDI(isDINode(N.getRawEntity()), "invalid imported entity", &N,
1744 N.getRawEntity());
1745}
1746
1747void Verifier::visitComdat(const Comdat &C) {
1748 // In COFF the Module is invalid if the GlobalValue has private linkage.
1749 // Entities with private linkage don't have entries in the symbol table.
1750 if (TT.isOSBinFormatCOFF())
1751 if (const GlobalValue *GV = M.getNamedValue(C.getName()))
1752 Check(!GV->hasPrivateLinkage(), "comdat global value has private linkage",
1753 GV);
1754}
1755
1756void Verifier::visitModuleIdents() {
1757 const NamedMDNode *Idents = M.getNamedMetadata("llvm.ident");
1758 if (!Idents)
1759 return;
1760
1761 // llvm.ident takes a list of metadata entry. Each entry has only one string.
1762 // Scan each llvm.ident entry and make sure that this requirement is met.
1763 for (const MDNode *N : Idents->operands()) {
1764 Check(N->getNumOperands() == 1,
1765 "incorrect number of operands in llvm.ident metadata", N);
1766 Check(dyn_cast_or_null<MDString>(N->getOperand(0)),
1767 ("invalid value for llvm.ident metadata entry operand"
1768 "(the operand should be a string)"),
1769 N->getOperand(0));
1770 }
1771}
1772
1773void Verifier::visitModuleCommandLines() {
1774 const NamedMDNode *CommandLines = M.getNamedMetadata("llvm.commandline");
1775 if (!CommandLines)
1776 return;
1777
1778 // llvm.commandline takes a list of metadata entry. Each entry has only one
1779 // string. Scan each llvm.commandline entry and make sure that this
1780 // requirement is met.
1781 for (const MDNode *N : CommandLines->operands()) {
1782 Check(N->getNumOperands() == 1,
1783 "incorrect number of operands in llvm.commandline metadata", N);
1784 Check(dyn_cast_or_null<MDString>(N->getOperand(0)),
1785 ("invalid value for llvm.commandline metadata entry operand"
1786 "(the operand should be a string)"),
1787 N->getOperand(0));
1788 }
1789}
1790
1791void Verifier::visitModuleFlags() {
1792 const NamedMDNode *Flags = M.getModuleFlagsMetadata();
1793 if (!Flags) return;
1794
1795 // Scan each flag, and track the flags and requirements.
1797 SmallVector<const MDNode*, 16> Requirements;
1798 uint64_t PAuthABIPlatform = -1;
1799 uint64_t PAuthABIVersion = -1;
1800 for (const MDNode *MDN : Flags->operands()) {
1801 visitModuleFlag(MDN, SeenIDs, Requirements);
1802 if (MDN->getNumOperands() != 3)
1803 continue;
1804 if (const auto *FlagName = dyn_cast_or_null<MDString>(MDN->getOperand(1))) {
1805 if (FlagName->getString() == "aarch64-elf-pauthabi-platform") {
1806 if (const auto *PAP =
1807 mdconst::dyn_extract_or_null<ConstantInt>(MDN->getOperand(2)))
1808 PAuthABIPlatform = PAP->getZExtValue();
1809 } else if (FlagName->getString() == "aarch64-elf-pauthabi-version") {
1810 if (const auto *PAV =
1811 mdconst::dyn_extract_or_null<ConstantInt>(MDN->getOperand(2)))
1812 PAuthABIVersion = PAV->getZExtValue();
1813 }
1814 }
1815 }
1816
1817 if ((PAuthABIPlatform == uint64_t(-1)) != (PAuthABIVersion == uint64_t(-1)))
1818 CheckFailed("either both or no 'aarch64-elf-pauthabi-platform' and "
1819 "'aarch64-elf-pauthabi-version' module flags must be present");
1820
1821 // Validate that the requirements in the module are valid.
1822 for (const MDNode *Requirement : Requirements) {
1823 const MDString *Flag = cast<MDString>(Requirement->getOperand(0));
1824 const Metadata *ReqValue = Requirement->getOperand(1);
1825
1826 const MDNode *Op = SeenIDs.lookup(Flag);
1827 if (!Op) {
1828 CheckFailed("invalid requirement on flag, flag is not present in module",
1829 Flag);
1830 continue;
1831 }
1832
1833 if (Op->getOperand(2) != ReqValue) {
1834 CheckFailed(("invalid requirement on flag, "
1835 "flag does not have the required value"),
1836 Flag);
1837 continue;
1838 }
1839 }
1840}
1841
1842void
1843Verifier::visitModuleFlag(const MDNode *Op,
1845 SmallVectorImpl<const MDNode *> &Requirements) {
1846 // Each module flag should have three arguments, the merge behavior (a
1847 // constant int), the flag ID (an MDString), and the value.
1848 Check(Op->getNumOperands() == 3,
1849 "incorrect number of operands in module flag", Op);
1851 if (!Module::isValidModFlagBehavior(Op->getOperand(0), MFB)) {
1852 Check(mdconst::dyn_extract_or_null<ConstantInt>(Op->getOperand(0)),
1853 "invalid behavior operand in module flag (expected constant integer)",
1854 Op->getOperand(0));
1855 Check(false,
1856 "invalid behavior operand in module flag (unexpected constant)",
1857 Op->getOperand(0));
1858 }
1859 MDString *ID = dyn_cast_or_null<MDString>(Op->getOperand(1));
1860 Check(ID, "invalid ID operand in module flag (expected metadata string)",
1861 Op->getOperand(1));
1862
1863 // Check the values for behaviors with additional requirements.
1864 switch (MFB) {
1865 case Module::Error:
1866 case Module::Warning:
1867 case Module::Override:
1868 // These behavior types accept any value.
1869 break;
1870
1871 case Module::Min: {
1872 auto *V = mdconst::dyn_extract_or_null<ConstantInt>(Op->getOperand(2));
1873 Check(V && V->getValue().isNonNegative(),
1874 "invalid value for 'min' module flag (expected constant non-negative "
1875 "integer)",
1876 Op->getOperand(2));
1877 break;
1878 }
1879
1880 case Module::Max: {
1881 Check(mdconst::dyn_extract_or_null<ConstantInt>(Op->getOperand(2)),
1882 "invalid value for 'max' module flag (expected constant integer)",
1883 Op->getOperand(2));
1884 break;
1885 }
1886
1887 case Module::Require: {
1888 // The value should itself be an MDNode with two operands, a flag ID (an
1889 // MDString), and a value.
1890 MDNode *Value = dyn_cast<MDNode>(Op->getOperand(2));
1891 Check(Value && Value->getNumOperands() == 2,
1892 "invalid value for 'require' module flag (expected metadata pair)",
1893 Op->getOperand(2));
1894 Check(isa<MDString>(Value->getOperand(0)),
1895 ("invalid value for 'require' module flag "
1896 "(first value operand should be a string)"),
1897 Value->getOperand(0));
1898
1899 // Append it to the list of requirements, to check once all module flags are
1900 // scanned.
1901 Requirements.push_back(Value);
1902 break;
1903 }
1904
1905 case Module::Append:
1906 case Module::AppendUnique: {
1907 // These behavior types require the operand be an MDNode.
1908 Check(isa<MDNode>(Op->getOperand(2)),
1909 "invalid value for 'append'-type module flag "
1910 "(expected a metadata node)",
1911 Op->getOperand(2));
1912 break;
1913 }
1914 }
1915
1916 // Unless this is a "requires" flag, check the ID is unique.
1917 if (MFB != Module::Require) {
1918 bool Inserted = SeenIDs.insert(std::make_pair(ID, Op)).second;
1919 Check(Inserted,
1920 "module flag identifiers must be unique (or of 'require' type)", ID);
1921 }
1922
1923 if (ID->getString() == "wchar_size") {
1925 = mdconst::dyn_extract_or_null<ConstantInt>(Op->getOperand(2));
1926 Check(Value, "wchar_size metadata requires constant integer argument");
1927 }
1928
1929 if (ID->getString() == "Linker Options") {
1930 // If the llvm.linker.options named metadata exists, we assume that the
1931 // bitcode reader has upgraded the module flag. Otherwise the flag might
1932 // have been created by a client directly.
1933 Check(M.getNamedMetadata("llvm.linker.options"),
1934 "'Linker Options' named metadata no longer supported");
1935 }
1936
1937 if (ID->getString() == "SemanticInterposition") {
1939 mdconst::dyn_extract_or_null<ConstantInt>(Op->getOperand(2));
1940 Check(Value,
1941 "SemanticInterposition metadata requires constant integer argument");
1942 }
1943
1944 if (ID->getString() == "CG Profile") {
1945 for (const MDOperand &MDO : cast<MDNode>(Op->getOperand(2))->operands())
1946 visitModuleFlagCGProfileEntry(MDO);
1947 }
1948}
1949
1950void Verifier::visitModuleFlagCGProfileEntry(const MDOperand &MDO) {
1951 auto CheckFunction = [&](const MDOperand &FuncMDO) {
1952 if (!FuncMDO)
1953 return;
1954 auto F = dyn_cast<ValueAsMetadata>(FuncMDO);
1955 Check(F && isa<Function>(F->getValue()->stripPointerCasts()),
1956 "expected a Function or null", FuncMDO);
1957 };
1958 auto Node = dyn_cast_or_null<MDNode>(MDO);
1959 Check(Node && Node->getNumOperands() == 3, "expected a MDNode triple", MDO);
1960 CheckFunction(Node->getOperand(0));
1961 CheckFunction(Node->getOperand(1));
1962 auto Count = dyn_cast_or_null<ConstantAsMetadata>(Node->getOperand(2));
1963 Check(Count && Count->getType()->isIntegerTy(),
1964 "expected an integer constant", Node->getOperand(2));
1965}
1966
1967void Verifier::verifyAttributeTypes(AttributeSet Attrs, const Value *V) {
1968 for (Attribute A : Attrs) {
1969
1970 if (A.isStringAttribute()) {
1971#define GET_ATTR_NAMES
1972#define ATTRIBUTE_ENUM(ENUM_NAME, DISPLAY_NAME)
1973#define ATTRIBUTE_STRBOOL(ENUM_NAME, DISPLAY_NAME) \
1974 if (A.getKindAsString() == #DISPLAY_NAME) { \
1975 auto V = A.getValueAsString(); \
1976 if (!(V.empty() || V == "true" || V == "false")) \
1977 CheckFailed("invalid value for '" #DISPLAY_NAME "' attribute: " + V + \
1978 ""); \
1979 }
1980
1981#include "llvm/IR/Attributes.inc"
1982 continue;
1983 }
1984
1985 if (A.isIntAttribute() != Attribute::isIntAttrKind(A.getKindAsEnum())) {
1986 CheckFailed("Attribute '" + A.getAsString() + "' should have an Argument",
1987 V);
1988 return;
1989 }
1990 }
1991}
1992
1993// VerifyParameterAttrs - Check the given attributes for an argument or return
1994// value of the specified type. The value V is printed in error messages.
1995void Verifier::verifyParameterAttrs(AttributeSet Attrs, Type *Ty,
1996 const Value *V) {
1997 if (!Attrs.hasAttributes())
1998 return;
1999
2000 verifyAttributeTypes(Attrs, V);
2001
2002 for (Attribute Attr : Attrs)
2003 Check(Attr.isStringAttribute() ||
2004 Attribute::canUseAsParamAttr(Attr.getKindAsEnum()),
2005 "Attribute '" + Attr.getAsString() + "' does not apply to parameters",
2006 V);
2007
2008 if (Attrs.hasAttribute(Attribute::ImmArg)) {
2009 unsigned AttrCount =
2010 Attrs.getNumAttributes() - Attrs.hasAttribute(Attribute::Range);
2011 Check(AttrCount == 1,
2012 "Attribute 'immarg' is incompatible with other attributes except the "
2013 "'range' attribute",
2014 V);
2015 }
2016
2017 // Check for mutually incompatible attributes. Only inreg is compatible with
2018 // sret.
2019 unsigned AttrCount = 0;
2020 AttrCount += Attrs.hasAttribute(Attribute::ByVal);
2021 AttrCount += Attrs.hasAttribute(Attribute::InAlloca);
2022 AttrCount += Attrs.hasAttribute(Attribute::Preallocated);
2023 AttrCount += Attrs.hasAttribute(Attribute::StructRet) ||
2024 Attrs.hasAttribute(Attribute::InReg);
2025 AttrCount += Attrs.hasAttribute(Attribute::Nest);
2026 AttrCount += Attrs.hasAttribute(Attribute::ByRef);
2027 Check(AttrCount <= 1,
2028 "Attributes 'byval', 'inalloca', 'preallocated', 'inreg', 'nest', "
2029 "'byref', and 'sret' are incompatible!",
2030 V);
2031
2032 Check(!(Attrs.hasAttribute(Attribute::InAlloca) &&
2033 Attrs.hasAttribute(Attribute::ReadOnly)),
2034 "Attributes "
2035 "'inalloca and readonly' are incompatible!",
2036 V);
2037
2038 Check(!(Attrs.hasAttribute(Attribute::StructRet) &&
2039 Attrs.hasAttribute(Attribute::Returned)),
2040 "Attributes "
2041 "'sret and returned' are incompatible!",
2042 V);
2043
2044 Check(!(Attrs.hasAttribute(Attribute::ZExt) &&
2045 Attrs.hasAttribute(Attribute::SExt)),
2046 "Attributes "
2047 "'zeroext and signext' are incompatible!",
2048 V);
2049
2050 Check(!(Attrs.hasAttribute(Attribute::ReadNone) &&
2051 Attrs.hasAttribute(Attribute::ReadOnly)),
2052 "Attributes "
2053 "'readnone and readonly' are incompatible!",
2054 V);
2055
2056 Check(!(Attrs.hasAttribute(Attribute::ReadNone) &&
2057 Attrs.hasAttribute(Attribute::WriteOnly)),
2058 "Attributes "
2059 "'readnone and writeonly' are incompatible!",
2060 V);
2061
2062 Check(!(Attrs.hasAttribute(Attribute::ReadOnly) &&
2063 Attrs.hasAttribute(Attribute::WriteOnly)),
2064 "Attributes "
2065 "'readonly and writeonly' are incompatible!",
2066 V);
2067
2068 Check(!(Attrs.hasAttribute(Attribute::NoInline) &&
2069 Attrs.hasAttribute(Attribute::AlwaysInline)),
2070 "Attributes "
2071 "'noinline and alwaysinline' are incompatible!",
2072 V);
2073
2074 Check(!(Attrs.hasAttribute(Attribute::Writable) &&
2075 Attrs.hasAttribute(Attribute::ReadNone)),
2076 "Attributes writable and readnone are incompatible!", V);
2077
2078 Check(!(Attrs.hasAttribute(Attribute::Writable) &&
2079 Attrs.hasAttribute(Attribute::ReadOnly)),
2080 "Attributes writable and readonly are incompatible!", V);
2081
2082 AttributeMask IncompatibleAttrs = AttributeFuncs::typeIncompatible(Ty, Attrs);
2083 for (Attribute Attr : Attrs) {
2084 if (!Attr.isStringAttribute() &&
2085 IncompatibleAttrs.contains(Attr.getKindAsEnum())) {
2086 CheckFailed("Attribute '" + Attr.getAsString() +
2087 "' applied to incompatible type!", V);
2088 return;
2089 }
2090 }
2091
2092 if (isa<PointerType>(Ty)) {
2093 if (Attrs.hasAttribute(Attribute::Alignment)) {
2094 Align AttrAlign = Attrs.getAlignment().valueOrOne();
2095 Check(AttrAlign.value() <= Value::MaximumAlignment,
2096 "huge alignment values are unsupported", V);
2097 }
2098 if (Attrs.hasAttribute(Attribute::ByVal)) {
2099 Type *ByValTy = Attrs.getByValType();
2100 SmallPtrSet<Type *, 4> Visited;
2101 Check(ByValTy->isSized(&Visited),
2102 "Attribute 'byval' does not support unsized types!", V);
2103 // Check if it is or contains a target extension type that disallows being
2104 // used on the stack.
2106 "'byval' argument has illegal target extension type", V);
2107 Check(DL.getTypeAllocSize(ByValTy).getKnownMinValue() < (1ULL << 32),
2108 "huge 'byval' arguments are unsupported", V);
2109 }
2110 if (Attrs.hasAttribute(Attribute::ByRef)) {
2111 SmallPtrSet<Type *, 4> Visited;
2112 Check(Attrs.getByRefType()->isSized(&Visited),
2113 "Attribute 'byref' does not support unsized types!", V);
2114 Check(DL.getTypeAllocSize(Attrs.getByRefType()).getKnownMinValue() <
2115 (1ULL << 32),
2116 "huge 'byref' arguments are unsupported", V);
2117 }
2118 if (Attrs.hasAttribute(Attribute::InAlloca)) {
2119 SmallPtrSet<Type *, 4> Visited;
2120 Check(Attrs.getInAllocaType()->isSized(&Visited),
2121 "Attribute 'inalloca' does not support unsized types!", V);
2122 Check(DL.getTypeAllocSize(Attrs.getInAllocaType()).getKnownMinValue() <
2123 (1ULL << 32),
2124 "huge 'inalloca' arguments are unsupported", V);
2125 }
2126 if (Attrs.hasAttribute(Attribute::Preallocated)) {
2127 SmallPtrSet<Type *, 4> Visited;
2128 Check(Attrs.getPreallocatedType()->isSized(&Visited),
2129 "Attribute 'preallocated' does not support unsized types!", V);
2130 Check(
2131 DL.getTypeAllocSize(Attrs.getPreallocatedType()).getKnownMinValue() <
2132 (1ULL << 32),
2133 "huge 'preallocated' arguments are unsupported", V);
2134 }
2135 }
2136
2137 if (Attrs.hasAttribute(Attribute::Initializes)) {
2138 auto Inits = Attrs.getAttribute(Attribute::Initializes).getInitializes();
2139 Check(!Inits.empty(), "Attribute 'initializes' does not support empty list",
2140 V);
2142 "Attribute 'initializes' does not support unordered ranges", V);
2143 }
2144
2145 if (Attrs.hasAttribute(Attribute::NoFPClass)) {
2146 uint64_t Val = Attrs.getAttribute(Attribute::NoFPClass).getValueAsInt();
2147 Check(Val != 0, "Attribute 'nofpclass' must have at least one test bit set",
2148 V);
2149 Check((Val & ~static_cast<unsigned>(fcAllFlags)) == 0,
2150 "Invalid value for 'nofpclass' test mask", V);
2151 }
2152 if (Attrs.hasAttribute(Attribute::Range)) {
2153 const ConstantRange &CR =
2154 Attrs.getAttribute(Attribute::Range).getValueAsConstantRange();
2156 "Range bit width must match type bit width!", V);
2157 }
2158}
2159
2160void Verifier::checkUnsignedBaseTenFuncAttr(AttributeList Attrs, StringRef Attr,
2161 const Value *V) {
2162 if (Attrs.hasFnAttr(Attr)) {
2163 StringRef S = Attrs.getFnAttr(Attr).getValueAsString();
2164 unsigned N;
2165 if (S.getAsInteger(10, N))
2166 CheckFailed("\"" + Attr + "\" takes an unsigned integer: " + S, V);
2167 }
2168}
2169
2170// Check parameter attributes against a function type.
2171// The value V is printed in error messages.
2172void Verifier::verifyFunctionAttrs(FunctionType *FT, AttributeList Attrs,
2173 const Value *V, bool IsIntrinsic,
2174 bool IsInlineAsm) {
2175 if (Attrs.isEmpty())
2176 return;
2177
2178 if (AttributeListsVisited.insert(Attrs.getRawPointer()).second) {
2179 Check(Attrs.hasParentContext(Context),
2180 "Attribute list does not match Module context!", &Attrs, V);
2181 for (const auto &AttrSet : Attrs) {
2182 Check(!AttrSet.hasAttributes() || AttrSet.hasParentContext(Context),
2183 "Attribute set does not match Module context!", &AttrSet, V);
2184 for (const auto &A : AttrSet) {
2185 Check(A.hasParentContext(Context),
2186 "Attribute does not match Module context!", &A, V);
2187 }
2188 }
2189 }
2190
2191 bool SawNest = false;
2192 bool SawReturned = false;
2193 bool SawSRet = false;
2194 bool SawSwiftSelf = false;
2195 bool SawSwiftAsync = false;
2196 bool SawSwiftError = false;
2197
2198 // Verify return value attributes.
2199 AttributeSet RetAttrs = Attrs.getRetAttrs();
2200 for (Attribute RetAttr : RetAttrs)
2201 Check(RetAttr.isStringAttribute() ||
2202 Attribute::canUseAsRetAttr(RetAttr.getKindAsEnum()),
2203 "Attribute '" + RetAttr.getAsString() +
2204 "' does not apply to function return values",
2205 V);
2206
2207 unsigned MaxParameterWidth = 0;
2208 auto GetMaxParameterWidth = [&MaxParameterWidth](Type *Ty) {
2209 if (Ty->isVectorTy()) {
2210 if (auto *VT = dyn_cast<FixedVectorType>(Ty)) {
2211 unsigned Size = VT->getPrimitiveSizeInBits().getFixedValue();
2212 if (Size > MaxParameterWidth)
2213 MaxParameterWidth = Size;
2214 }
2215 }
2216 };
2217 GetMaxParameterWidth(FT->getReturnType());
2218 verifyParameterAttrs(RetAttrs, FT->getReturnType(), V);
2219
2220 // Verify parameter attributes.
2221 for (unsigned i = 0, e = FT->getNumParams(); i != e; ++i) {
2222 Type *Ty = FT->getParamType(i);
2223 AttributeSet ArgAttrs = Attrs.getParamAttrs(i);
2224
2225 if (!IsIntrinsic) {
2226 Check(!ArgAttrs.hasAttribute(Attribute::ImmArg),
2227 "immarg attribute only applies to intrinsics", V);
2228 if (!IsInlineAsm)
2229 Check(!ArgAttrs.hasAttribute(Attribute::ElementType),
2230 "Attribute 'elementtype' can only be applied to intrinsics"
2231 " and inline asm.",
2232 V);
2233 }
2234
2235 verifyParameterAttrs(ArgAttrs, Ty, V);
2236 GetMaxParameterWidth(Ty);
2237
2238 if (ArgAttrs.hasAttribute(Attribute::Nest)) {
2239 Check(!SawNest, "More than one parameter has attribute nest!", V);
2240 SawNest = true;
2241 }
2242
2243 if (ArgAttrs.hasAttribute(Attribute::Returned)) {
2244 Check(!SawReturned, "More than one parameter has attribute returned!", V);
2245 Check(Ty->canLosslesslyBitCastTo(FT->getReturnType()),
2246 "Incompatible argument and return types for 'returned' attribute",
2247 V);
2248 SawReturned = true;
2249 }
2250
2251 if (ArgAttrs.hasAttribute(Attribute::StructRet)) {
2252 Check(!SawSRet, "Cannot have multiple 'sret' parameters!", V);
2253 Check(i == 0 || i == 1,
2254 "Attribute 'sret' is not on first or second parameter!", V);
2255 SawSRet = true;
2256 }
2257
2258 if (ArgAttrs.hasAttribute(Attribute::SwiftSelf)) {
2259 Check(!SawSwiftSelf, "Cannot have multiple 'swiftself' parameters!", V);
2260 SawSwiftSelf = true;
2261 }
2262
2263 if (ArgAttrs.hasAttribute(Attribute::SwiftAsync)) {
2264 Check(!SawSwiftAsync, "Cannot have multiple 'swiftasync' parameters!", V);
2265 SawSwiftAsync = true;
2266 }
2267
2268 if (ArgAttrs.hasAttribute(Attribute::SwiftError)) {
2269 Check(!SawSwiftError, "Cannot have multiple 'swifterror' parameters!", V);
2270 SawSwiftError = true;
2271 }
2272
2273 if (ArgAttrs.hasAttribute(Attribute::InAlloca)) {
2274 Check(i == FT->getNumParams() - 1,
2275 "inalloca isn't on the last parameter!", V);
2276 }
2277 }
2278
2279 if (!Attrs.hasFnAttrs())
2280 return;
2281
2282 verifyAttributeTypes(Attrs.getFnAttrs(), V);
2283 for (Attribute FnAttr : Attrs.getFnAttrs())
2284 Check(FnAttr.isStringAttribute() ||
2285 Attribute::canUseAsFnAttr(FnAttr.getKindAsEnum()),
2286 "Attribute '" + FnAttr.getAsString() +
2287 "' does not apply to functions!",
2288 V);
2289
2290 Check(!(Attrs.hasFnAttr(Attribute::NoInline) &&
2291 Attrs.hasFnAttr(Attribute::AlwaysInline)),
2292 "Attributes 'noinline and alwaysinline' are incompatible!", V);
2293
2294 if (Attrs.hasFnAttr(Attribute::OptimizeNone)) {
2295 Check(Attrs.hasFnAttr(Attribute::NoInline),
2296 "Attribute 'optnone' requires 'noinline'!", V);
2297
2298 Check(!Attrs.hasFnAttr(Attribute::OptimizeForSize),
2299 "Attributes 'optsize and optnone' are incompatible!", V);
2300
2301 Check(!Attrs.hasFnAttr(Attribute::MinSize),
2302 "Attributes 'minsize and optnone' are incompatible!", V);
2303
2304 Check(!Attrs.hasFnAttr(Attribute::OptimizeForDebugging),
2305 "Attributes 'optdebug and optnone' are incompatible!", V);
2306 }
2307
2308 Check(!(Attrs.hasFnAttr(Attribute::SanitizeRealtime) &&
2309 Attrs.hasFnAttr(Attribute::SanitizeRealtimeBlocking)),
2310 "Attributes "
2311 "'sanitize_realtime and sanitize_realtime_blocking' are incompatible!",
2312 V);
2313
2314 if (Attrs.hasFnAttr(Attribute::OptimizeForDebugging)) {
2315 Check(!Attrs.hasFnAttr(Attribute::OptimizeForSize),
2316 "Attributes 'optsize and optdebug' are incompatible!", V);
2317
2318 Check(!Attrs.hasFnAttr(Attribute::MinSize),
2319 "Attributes 'minsize and optdebug' are incompatible!", V);
2320 }
2321
2322 Check(!Attrs.hasAttrSomewhere(Attribute::Writable) ||
2323 isModSet(Attrs.getMemoryEffects().getModRef(IRMemLocation::ArgMem)),
2324 "Attribute writable and memory without argmem: write are incompatible!",
2325 V);
2326
2327 if (Attrs.hasFnAttr("aarch64_pstate_sm_enabled")) {
2328 Check(!Attrs.hasFnAttr("aarch64_pstate_sm_compatible"),
2329 "Attributes 'aarch64_pstate_sm_enabled and "
2330 "aarch64_pstate_sm_compatible' are incompatible!",
2331 V);
2332 }
2333
2334 Check((Attrs.hasFnAttr("aarch64_new_za") + Attrs.hasFnAttr("aarch64_in_za") +
2335 Attrs.hasFnAttr("aarch64_inout_za") +
2336 Attrs.hasFnAttr("aarch64_out_za") +
2337 Attrs.hasFnAttr("aarch64_preserves_za") +
2338 Attrs.hasFnAttr("aarch64_za_state_agnostic")) <= 1,
2339 "Attributes 'aarch64_new_za', 'aarch64_in_za', 'aarch64_out_za', "
2340 "'aarch64_inout_za', 'aarch64_preserves_za' and "
2341 "'aarch64_za_state_agnostic' are mutually exclusive",
2342 V);
2343
2344 Check((Attrs.hasFnAttr("aarch64_new_zt0") +
2345 Attrs.hasFnAttr("aarch64_in_zt0") +
2346 Attrs.hasFnAttr("aarch64_inout_zt0") +
2347 Attrs.hasFnAttr("aarch64_out_zt0") +
2348 Attrs.hasFnAttr("aarch64_preserves_zt0") +
2349 Attrs.hasFnAttr("aarch64_za_state_agnostic")) <= 1,
2350 "Attributes 'aarch64_new_zt0', 'aarch64_in_zt0', 'aarch64_out_zt0', "
2351 "'aarch64_inout_zt0', 'aarch64_preserves_zt0' and "
2352 "'aarch64_za_state_agnostic' are mutually exclusive",
2353 V);
2354
2355 if (Attrs.hasFnAttr(Attribute::JumpTable)) {
2356 const GlobalValue *GV = cast<GlobalValue>(V);
2358 "Attribute 'jumptable' requires 'unnamed_addr'", V);
2359 }
2360
2361 if (auto Args = Attrs.getFnAttrs().getAllocSizeArgs()) {
2362 auto CheckParam = [&](StringRef Name, unsigned ParamNo) {
2363 if (ParamNo >= FT->getNumParams()) {
2364 CheckFailed("'allocsize' " + Name + " argument is out of bounds", V);
2365 return false;
2366 }
2367
2368 if (!FT->getParamType(ParamNo)->isIntegerTy()) {
2369 CheckFailed("'allocsize' " + Name +
2370 " argument must refer to an integer parameter",
2371 V);
2372 return false;
2373 }
2374
2375 return true;
2376 };
2377
2378 if (!CheckParam("element size", Args->first))
2379 return;
2380
2381 if (Args->second && !CheckParam("number of elements", *Args->second))
2382 return;
2383 }
2384
2385 if (Attrs.hasFnAttr(Attribute::AllocKind)) {
2386 AllocFnKind K = Attrs.getAllocKind();
2389 if (!is_contained(
2391 Type))
2392 CheckFailed(
2393 "'allockind()' requires exactly one of alloc, realloc, and free");
2394 if ((Type == AllocFnKind::Free) &&
2397 CheckFailed("'allockind(\"free\")' doesn't allow uninitialized, zeroed, "
2398 "or aligned modifiers.");
2400 if ((K & ZeroedUninit) == ZeroedUninit)
2401 CheckFailed("'allockind()' can't be both zeroed and uninitialized");
2402 }
2403
2404 if (Attribute A = Attrs.getFnAttr("alloc-variant-zeroed"); A.isValid()) {
2405 StringRef S = A.getValueAsString();
2406 Check(!S.empty(), "'alloc-variant-zeroed' must not be empty");
2407 Function *Variant = M.getFunction(S);
2408 if (Variant) {
2409 Attribute Family = Attrs.getFnAttr("alloc-family");
2410 Attribute VariantFamily = Variant->getFnAttribute("alloc-family");
2411 if (Family.isValid())
2412 Check(VariantFamily.isValid() &&
2413 VariantFamily.getValueAsString() == Family.getValueAsString(),
2414 "'alloc-variant-zeroed' must name a function belonging to the "
2415 "same 'alloc-family'");
2416
2417 Check(Variant->hasFnAttribute(Attribute::AllocKind) &&
2418 (Variant->getFnAttribute(Attribute::AllocKind).getAllocKind() &
2420 "'alloc-variant-zeroed' must name a function with "
2421 "'allockind(\"zeroed\")'");
2422
2423 Check(FT == Variant->getFunctionType(),
2424 "'alloc-variant-zeroed' must name a function with the same "
2425 "signature");
2426 }
2427 }
2428
2429 if (Attrs.hasFnAttr(Attribute::VScaleRange)) {
2430 unsigned VScaleMin = Attrs.getFnAttrs().getVScaleRangeMin();
2431 if (VScaleMin == 0)
2432 CheckFailed("'vscale_range' minimum must be greater than 0", V);
2433 else if (!isPowerOf2_32(VScaleMin))
2434 CheckFailed("'vscale_range' minimum must be power-of-two value", V);
2435 std::optional<unsigned> VScaleMax = Attrs.getFnAttrs().getVScaleRangeMax();
2436 if (VScaleMax && VScaleMin > VScaleMax)
2437 CheckFailed("'vscale_range' minimum cannot be greater than maximum", V);
2438 else if (VScaleMax && !isPowerOf2_32(*VScaleMax))
2439 CheckFailed("'vscale_range' maximum must be power-of-two value", V);
2440 }
2441
2442 if (Attribute FPAttr = Attrs.getFnAttr("frame-pointer"); FPAttr.isValid()) {
2443 StringRef FP = FPAttr.getValueAsString();
2444 if (FP != "all" && FP != "non-leaf" && FP != "none" && FP != "reserved")
2445 CheckFailed("invalid value for 'frame-pointer' attribute: " + FP, V);
2446 }
2447
2448 // Check EVEX512 feature.
2449 if (TT.isX86() && MaxParameterWidth >= 512) {
2450 Attribute TargetFeaturesAttr = Attrs.getFnAttr("target-features");
2451 if (TargetFeaturesAttr.isValid()) {
2452 StringRef TF = TargetFeaturesAttr.getValueAsString();
2453 Check(!TF.contains("+avx512f") || !TF.contains("-evex512"),
2454 "512-bit vector arguments require 'evex512' for AVX512", V);
2455 }
2456 }
2457
2458 checkUnsignedBaseTenFuncAttr(Attrs, "patchable-function-prefix", V);
2459 checkUnsignedBaseTenFuncAttr(Attrs, "patchable-function-entry", V);
2460 if (Attrs.hasFnAttr("patchable-function-entry-section"))
2461 Check(!Attrs.getFnAttr("patchable-function-entry-section")
2462 .getValueAsString()
2463 .empty(),
2464 "\"patchable-function-entry-section\" must not be empty");
2465 checkUnsignedBaseTenFuncAttr(Attrs, "warn-stack-size", V);
2466
2467 if (auto A = Attrs.getFnAttr("sign-return-address"); A.isValid()) {
2468 StringRef S = A.getValueAsString();
2469 if (S != "none" && S != "all" && S != "non-leaf")
2470 CheckFailed("invalid value for 'sign-return-address' attribute: " + S, V);
2471 }
2472
2473 if (auto A = Attrs.getFnAttr("sign-return-address-key"); A.isValid()) {
2474 StringRef S = A.getValueAsString();
2475 if (S != "a_key" && S != "b_key")
2476 CheckFailed("invalid value for 'sign-return-address-key' attribute: " + S,
2477 V);
2478 if (auto AA = Attrs.getFnAttr("sign-return-address"); !AA.isValid()) {
2479 CheckFailed(
2480 "'sign-return-address-key' present without `sign-return-address`");
2481 }
2482 }
2483
2484 if (auto A = Attrs.getFnAttr("branch-target-enforcement"); A.isValid()) {
2485 StringRef S = A.getValueAsString();
2486 if (S != "" && S != "true" && S != "false")
2487 CheckFailed(
2488 "invalid value for 'branch-target-enforcement' attribute: " + S, V);
2489 }
2490
2491 if (auto A = Attrs.getFnAttr("branch-protection-pauth-lr"); A.isValid()) {
2492 StringRef S = A.getValueAsString();
2493 if (S != "" && S != "true" && S != "false")
2494 CheckFailed(
2495 "invalid value for 'branch-protection-pauth-lr' attribute: " + S, V);
2496 }
2497
2498 if (auto A = Attrs.getFnAttr("guarded-control-stack"); A.isValid()) {
2499 StringRef S = A.getValueAsString();
2500 if (S != "" && S != "true" && S != "false")
2501 CheckFailed("invalid value for 'guarded-control-stack' attribute: " + S,
2502 V);
2503 }
2504
2505 if (auto A = Attrs.getFnAttr("vector-function-abi-variant"); A.isValid()) {
2506 StringRef S = A.getValueAsString();
2507 const std::optional<VFInfo> Info = VFABI::tryDemangleForVFABI(S, FT);
2508 if (!Info)
2509 CheckFailed("invalid name for a VFABI variant: " + S, V);
2510 }
2511
2512 if (auto A = Attrs.getFnAttr("denormal-fp-math"); A.isValid()) {
2513 StringRef S = A.getValueAsString();
2515 CheckFailed("invalid value for 'denormal-fp-math' attribute: " + S, V);
2516 }
2517
2518 if (auto A = Attrs.getFnAttr("denormal-fp-math-f32"); A.isValid()) {
2519 StringRef S = A.getValueAsString();
2521 CheckFailed("invalid value for 'denormal-fp-math-f32' attribute: " + S,
2522 V);
2523 }
2524}
2525
2526void Verifier::verifyFunctionMetadata(
2527 ArrayRef<std::pair<unsigned, MDNode *>> MDs) {
2528 for (const auto &Pair : MDs) {
2529 if (Pair.first == LLVMContext::MD_prof) {
2530 MDNode *MD = Pair.second;
2532 CheckFailed("'unknown' !prof metadata should appear only on "
2533 "instructions supporting the 'branch_weights' metadata",
2534 MD);
2535 continue;
2536 }
2537 Check(MD->getNumOperands() >= 2,
2538 "!prof annotations should have no less than 2 operands", MD);
2539
2540 // Check first operand.
2541 Check(MD->getOperand(0) != nullptr, "first operand should not be null",
2542 MD);
2543 Check(isa<MDString>(MD->getOperand(0)),
2544 "expected string with name of the !prof annotation", MD);
2545 MDString *MDS = cast<MDString>(MD->getOperand(0));
2546 StringRef ProfName = MDS->getString();
2549 "first operand should be 'function_entry_count'"
2550 " or 'synthetic_function_entry_count'",
2551 MD);
2552
2553 // Check second operand.
2554 Check(MD->getOperand(1) != nullptr, "second operand should not be null",
2555 MD);
2556 Check(isa<ConstantAsMetadata>(MD->getOperand(1)),
2557 "expected integer argument to function_entry_count", MD);
2558 } else if (Pair.first == LLVMContext::MD_kcfi_type) {
2559 MDNode *MD = Pair.second;
2560 Check(MD->getNumOperands() == 1,
2561 "!kcfi_type must have exactly one operand", MD);
2562 Check(MD->getOperand(0) != nullptr, "!kcfi_type operand must not be null",
2563 MD);
2564 Check(isa<ConstantAsMetadata>(MD->getOperand(0)),
2565 "expected a constant operand for !kcfi_type", MD);
2566 Constant *C = cast<ConstantAsMetadata>(MD->getOperand(0))->getValue();
2567 Check(isa<ConstantInt>(C) && isa<IntegerType>(C->getType()),
2568 "expected a constant integer operand for !kcfi_type", MD);
2569 Check(cast<ConstantInt>(C)->getBitWidth() == 32,
2570 "expected a 32-bit integer constant operand for !kcfi_type", MD);
2571 }
2572 }
2573}
2574
2575void Verifier::visitConstantExprsRecursively(const Constant *EntryC) {
2576 if (!ConstantExprVisited.insert(EntryC).second)
2577 return;
2578
2580 Stack.push_back(EntryC);
2581
2582 while (!Stack.empty()) {
2583 const Constant *C = Stack.pop_back_val();
2584
2585 // Check this constant expression.
2586 if (const auto *CE = dyn_cast<ConstantExpr>(C))
2587 visitConstantExpr(CE);
2588
2589 if (const auto *CPA = dyn_cast<ConstantPtrAuth>(C))
2590 visitConstantPtrAuth(CPA);
2591
2592 if (const auto *GV = dyn_cast<GlobalValue>(C)) {
2593 // Global Values get visited separately, but we do need to make sure
2594 // that the global value is in the correct module
2595 Check(GV->getParent() == &M, "Referencing global in another module!",
2596 EntryC, &M, GV, GV->getParent());
2597 continue;
2598 }
2599
2600 // Visit all sub-expressions.
2601 for (const Use &U : C->operands()) {
2602 const auto *OpC = dyn_cast<Constant>(U);
2603 if (!OpC)
2604 continue;
2605 if (!ConstantExprVisited.insert(OpC).second)
2606 continue;
2607 Stack.push_back(OpC);
2608 }
2609 }
2610}
2611
2612void Verifier::visitConstantExpr(const ConstantExpr *CE) {
2613 if (CE->getOpcode() == Instruction::BitCast)
2614 Check(CastInst::castIsValid(Instruction::BitCast, CE->getOperand(0),
2615 CE->getType()),
2616 "Invalid bitcast", CE);
2617 else if (CE->getOpcode() == Instruction::PtrToAddr)
2618 checkPtrToAddr(CE->getOperand(0)->getType(), CE->getType(), *CE);
2619}
2620
2621void Verifier::visitConstantPtrAuth(const ConstantPtrAuth *CPA) {
2622 Check(CPA->getPointer()->getType()->isPointerTy(),
2623 "signed ptrauth constant base pointer must have pointer type");
2624
2625 Check(CPA->getType() == CPA->getPointer()->getType(),
2626 "signed ptrauth constant must have same type as its base pointer");
2627
2628 Check(CPA->getKey()->getBitWidth() == 32,
2629 "signed ptrauth constant key must be i32 constant integer");
2630
2632 "signed ptrauth constant address discriminator must be a pointer");
2633
2634 Check(CPA->getDiscriminator()->getBitWidth() == 64,
2635 "signed ptrauth constant discriminator must be i64 constant integer");
2636}
2637
2638bool Verifier::verifyAttributeCount(AttributeList Attrs, unsigned Params) {
2639 // There shouldn't be more attribute sets than there are parameters plus the
2640 // function and return value.
2641 return Attrs.getNumAttrSets() <= Params + 2;
2642}
2643
2644void Verifier::verifyInlineAsmCall(const CallBase &Call) {
2645 const InlineAsm *IA = cast<InlineAsm>(Call.getCalledOperand());
2646 unsigned ArgNo = 0;
2647 unsigned LabelNo = 0;
2648 for (const InlineAsm::ConstraintInfo &CI : IA->ParseConstraints()) {
2649 if (CI.Type == InlineAsm::isLabel) {
2650 ++LabelNo;
2651 continue;
2652 }
2653
2654 // Only deal with constraints that correspond to call arguments.
2655 if (!CI.hasArg())
2656 continue;
2657
2658 if (CI.isIndirect) {
2659 const Value *Arg = Call.getArgOperand(ArgNo);
2660 Check(Arg->getType()->isPointerTy(),
2661 "Operand for indirect constraint must have pointer type", &Call);
2662
2663 Check(Call.getParamElementType(ArgNo),
2664 "Operand for indirect constraint must have elementtype attribute",
2665 &Call);
2666 } else {
2667 Check(!Call.paramHasAttr(ArgNo, Attribute::ElementType),
2668 "Elementtype attribute can only be applied for indirect "
2669 "constraints",
2670 &Call);
2671 }
2672
2673 ArgNo++;
2674 }
2675
2676 if (auto *CallBr = dyn_cast<CallBrInst>(&Call)) {
2677 Check(LabelNo == CallBr->getNumIndirectDests(),
2678 "Number of label constraints does not match number of callbr dests",
2679 &Call);
2680 } else {
2681 Check(LabelNo == 0, "Label constraints can only be used with callbr",
2682 &Call);
2683 }
2684}
2685
2686/// Verify that statepoint intrinsic is well formed.
2687void Verifier::verifyStatepoint(const CallBase &Call) {
2688 assert(Call.getIntrinsicID() == Intrinsic::experimental_gc_statepoint);
2689
2690 Check(!Call.doesNotAccessMemory() && !Call.onlyReadsMemory() &&
2691 !Call.onlyAccessesArgMemory(),
2692 "gc.statepoint must read and write all memory to preserve "
2693 "reordering restrictions required by safepoint semantics",
2694 Call);
2695
2696 const int64_t NumPatchBytes =
2697 cast<ConstantInt>(Call.getArgOperand(1))->getSExtValue();
2698 assert(isInt<32>(NumPatchBytes) && "NumPatchBytesV is an i32!");
2699 Check(NumPatchBytes >= 0,
2700 "gc.statepoint number of patchable bytes must be "
2701 "positive",
2702 Call);
2703
2704 Type *TargetElemType = Call.getParamElementType(2);
2705 Check(TargetElemType,
2706 "gc.statepoint callee argument must have elementtype attribute", Call);
2707 FunctionType *TargetFuncType = dyn_cast<FunctionType>(TargetElemType);
2708 Check(TargetFuncType,
2709 "gc.statepoint callee elementtype must be function type", Call);
2710
2711 const int NumCallArgs = cast<ConstantInt>(Call.getArgOperand(3))->getZExtValue();
2712 Check(NumCallArgs >= 0,
2713 "gc.statepoint number of arguments to underlying call "
2714 "must be positive",
2715 Call);
2716 const int NumParams = (int)TargetFuncType->getNumParams();
2717 if (TargetFuncType->isVarArg()) {
2718 Check(NumCallArgs >= NumParams,
2719 "gc.statepoint mismatch in number of vararg call args", Call);
2720
2721 // TODO: Remove this limitation
2722 Check(TargetFuncType->getReturnType()->isVoidTy(),
2723 "gc.statepoint doesn't support wrapping non-void "
2724 "vararg functions yet",
2725 Call);
2726 } else
2727 Check(NumCallArgs == NumParams,
2728 "gc.statepoint mismatch in number of call args", Call);
2729
2730 const uint64_t Flags
2731 = cast<ConstantInt>(Call.getArgOperand(4))->getZExtValue();
2732 Check((Flags & ~(uint64_t)StatepointFlags::MaskAll) == 0,
2733 "unknown flag used in gc.statepoint flags argument", Call);
2734
2735 // Verify that the types of the call parameter arguments match
2736 // the type of the wrapped callee.
2737 AttributeList Attrs = Call.getAttributes();
2738 for (int i = 0; i < NumParams; i++) {
2739 Type *ParamType = TargetFuncType->getParamType(i);
2740 Type *ArgType = Call.getArgOperand(5 + i)->getType();
2741 Check(ArgType == ParamType,
2742 "gc.statepoint call argument does not match wrapped "
2743 "function type",
2744 Call);
2745
2746 if (TargetFuncType->isVarArg()) {
2747 AttributeSet ArgAttrs = Attrs.getParamAttrs(5 + i);
2748 Check(!ArgAttrs.hasAttribute(Attribute::StructRet),
2749 "Attribute 'sret' cannot be used for vararg call arguments!", Call);
2750 }
2751 }
2752
2753 const int EndCallArgsInx = 4 + NumCallArgs;
2754
2755 const Value *NumTransitionArgsV = Call.getArgOperand(EndCallArgsInx + 1);
2756 Check(isa<ConstantInt>(NumTransitionArgsV),
2757 "gc.statepoint number of transition arguments "
2758 "must be constant integer",
2759 Call);
2760 const int NumTransitionArgs =
2761 cast<ConstantInt>(NumTransitionArgsV)->getZExtValue();
2762 Check(NumTransitionArgs == 0,
2763 "gc.statepoint w/inline transition bundle is deprecated", Call);
2764 const int EndTransitionArgsInx = EndCallArgsInx + 1 + NumTransitionArgs;
2765
2766 const Value *NumDeoptArgsV = Call.getArgOperand(EndTransitionArgsInx + 1);
2767 Check(isa<ConstantInt>(NumDeoptArgsV),
2768 "gc.statepoint number of deoptimization arguments "
2769 "must be constant integer",
2770 Call);
2771 const int NumDeoptArgs = cast<ConstantInt>(NumDeoptArgsV)->getZExtValue();
2772 Check(NumDeoptArgs == 0,
2773 "gc.statepoint w/inline deopt operands is deprecated", Call);
2774
2775 const int ExpectedNumArgs = 7 + NumCallArgs;
2776 Check(ExpectedNumArgs == (int)Call.arg_size(),
2777 "gc.statepoint too many arguments", Call);
2778
2779 // Check that the only uses of this gc.statepoint are gc.result or
2780 // gc.relocate calls which are tied to this statepoint and thus part
2781 // of the same statepoint sequence
2782 for (const User *U : Call.users()) {
2783 const CallInst *UserCall = dyn_cast<const CallInst>(U);
2784 Check(UserCall, "illegal use of statepoint token", Call, U);
2785 if (!UserCall)
2786 continue;
2787 Check(isa<GCRelocateInst>(UserCall) || isa<GCResultInst>(UserCall),
2788 "gc.result or gc.relocate are the only value uses "
2789 "of a gc.statepoint",
2790 Call, U);
2791 if (isa<GCResultInst>(UserCall)) {
2792 Check(UserCall->getArgOperand(0) == &Call,
2793 "gc.result connected to wrong gc.statepoint", Call, UserCall);
2794 } else if (isa<GCRelocateInst>(Call)) {
2795 Check(UserCall->getArgOperand(0) == &Call,
2796 "gc.relocate connected to wrong gc.statepoint", Call, UserCall);
2797 }
2798 }
2799
2800 // Note: It is legal for a single derived pointer to be listed multiple
2801 // times. It's non-optimal, but it is legal. It can also happen after
2802 // insertion if we strip a bitcast away.
2803 // Note: It is really tempting to check that each base is relocated and
2804 // that a derived pointer is never reused as a base pointer. This turns
2805 // out to be problematic since optimizations run after safepoint insertion
2806 // can recognize equality properties that the insertion logic doesn't know
2807 // about. See example statepoint.ll in the verifier subdirectory
2808}
2809
2810void Verifier::verifyFrameRecoverIndices() {
2811 for (auto &Counts : FrameEscapeInfo) {
2812 Function *F = Counts.first;
2813 unsigned EscapedObjectCount = Counts.second.first;
2814 unsigned MaxRecoveredIndex = Counts.second.second;
2815 Check(MaxRecoveredIndex <= EscapedObjectCount,
2816 "all indices passed to llvm.localrecover must be less than the "
2817 "number of arguments passed to llvm.localescape in the parent "
2818 "function",
2819 F);
2820 }
2821}
2822
2823static Instruction *getSuccPad(Instruction *Terminator) {
2824 BasicBlock *UnwindDest;
2825 if (auto *II = dyn_cast<InvokeInst>(Terminator))
2826 UnwindDest = II->getUnwindDest();
2827 else if (auto *CSI = dyn_cast<CatchSwitchInst>(Terminator))
2828 UnwindDest = CSI->getUnwindDest();
2829 else
2830 UnwindDest = cast<CleanupReturnInst>(Terminator)->getUnwindDest();
2831 return &*UnwindDest->getFirstNonPHIIt();
2832}
2833
2834void Verifier::verifySiblingFuncletUnwinds() {
2837 for (const auto &Pair : SiblingFuncletInfo) {
2838 Instruction *PredPad = Pair.first;
2839 if (Visited.count(PredPad))
2840 continue;
2841 Active.insert(PredPad);
2842 Instruction *Terminator = Pair.second;
2843 do {
2844 Instruction *SuccPad = getSuccPad(Terminator);
2845 if (Active.count(SuccPad)) {
2846 // Found a cycle; report error
2847 Instruction *CyclePad = SuccPad;
2849 do {
2850 CycleNodes.push_back(CyclePad);
2851 Instruction *CycleTerminator = SiblingFuncletInfo[CyclePad];
2852 if (CycleTerminator != CyclePad)
2853 CycleNodes.push_back(CycleTerminator);
2854 CyclePad = getSuccPad(CycleTerminator);
2855 } while (CyclePad != SuccPad);
2856 Check(false, "EH pads can't handle each other's exceptions",
2857 ArrayRef<Instruction *>(CycleNodes));
2858 }
2859 // Don't re-walk a node we've already checked
2860 if (!Visited.insert(SuccPad).second)
2861 break;
2862 // Walk to this successor if it has a map entry.
2863 PredPad = SuccPad;
2864 auto TermI = SiblingFuncletInfo.find(PredPad);
2865 if (TermI == SiblingFuncletInfo.end())
2866 break;
2867 Terminator = TermI->second;
2868 Active.insert(PredPad);
2869 } while (true);
2870 // Each node only has one successor, so we've walked all the active
2871 // nodes' successors.
2872 Active.clear();
2873 }
2874}
2875
2876// visitFunction - Verify that a function is ok.
2877//
2878void Verifier::visitFunction(const Function &F) {
2879 visitGlobalValue(F);
2880
2881 // Check function arguments.
2882 FunctionType *FT = F.getFunctionType();
2883 unsigned NumArgs = F.arg_size();
2884
2885 Check(&Context == &F.getContext(),
2886 "Function context does not match Module context!", &F);
2887
2888 Check(!F.hasCommonLinkage(), "Functions may not have common linkage", &F);
2889 Check(FT->getNumParams() == NumArgs,
2890 "# formal arguments must match # of arguments for function type!", &F,
2891 FT);
2892 Check(F.getReturnType()->isFirstClassType() ||
2893 F.getReturnType()->isVoidTy() || F.getReturnType()->isStructTy(),
2894 "Functions cannot return aggregate values!", &F);
2895
2896 Check(!F.hasStructRetAttr() || F.getReturnType()->isVoidTy(),
2897 "Invalid struct return type!", &F);
2898
2899 if (MaybeAlign A = F.getAlign()) {
2900 Check(A->value() <= Value::MaximumAlignment,
2901 "huge alignment values are unsupported", &F);
2902 }
2903
2904 AttributeList Attrs = F.getAttributes();
2905
2906 Check(verifyAttributeCount(Attrs, FT->getNumParams()),
2907 "Attribute after last parameter!", &F);
2908
2909 bool IsIntrinsic = F.isIntrinsic();
2910
2911 // Check function attributes.
2912 verifyFunctionAttrs(FT, Attrs, &F, IsIntrinsic, /* IsInlineAsm */ false);
2913
2914 // On function declarations/definitions, we do not support the builtin
2915 // attribute. We do not check this in VerifyFunctionAttrs since that is
2916 // checking for Attributes that can/can not ever be on functions.
2917 Check(!Attrs.hasFnAttr(Attribute::Builtin),
2918 "Attribute 'builtin' can only be applied to a callsite.", &F);
2919
2920 Check(!Attrs.hasAttrSomewhere(Attribute::ElementType),
2921 "Attribute 'elementtype' can only be applied to a callsite.", &F);
2922
2923 Check(!Attrs.hasFnAttr("aarch64_zt0_undef"),
2924 "Attribute 'aarch64_zt0_undef' can only be applied to a callsite.");
2925
2926 if (Attrs.hasFnAttr(Attribute::Naked))
2927 for (const Argument &Arg : F.args())
2928 Check(Arg.use_empty(), "cannot use argument of naked function", &Arg);
2929
2930 // Check that this function meets the restrictions on this calling convention.
2931 // Sometimes varargs is used for perfectly forwarding thunks, so some of these
2932 // restrictions can be lifted.
2933 switch (F.getCallingConv()) {
2934 default:
2935 case CallingConv::C:
2936 break;
2937 case CallingConv::X86_INTR: {
2938 Check(F.arg_empty() || Attrs.hasParamAttr(0, Attribute::ByVal),
2939 "Calling convention parameter requires byval", &F);
2940 break;
2941 }
2946 Check(F.getReturnType()->isVoidTy(),
2947 "Calling convention requires void return type", &F);
2948 [[fallthrough]];
2954 Check(!F.hasStructRetAttr(), "Calling convention does not allow sret", &F);
2955 if (F.getCallingConv() != CallingConv::SPIR_KERNEL) {
2956 const unsigned StackAS = DL.getAllocaAddrSpace();
2957 unsigned i = 0;
2958 for (const Argument &Arg : F.args()) {
2959 Check(!Attrs.hasParamAttr(i, Attribute::ByVal),
2960 "Calling convention disallows byval", &F);
2961 Check(!Attrs.hasParamAttr(i, Attribute::Preallocated),
2962 "Calling convention disallows preallocated", &F);
2963 Check(!Attrs.hasParamAttr(i, Attribute::InAlloca),
2964 "Calling convention disallows inalloca", &F);
2965
2966 if (Attrs.hasParamAttr(i, Attribute::ByRef)) {
2967 // FIXME: Should also disallow LDS and GDS, but we don't have the enum
2968 // value here.
2969 Check(Arg.getType()->getPointerAddressSpace() != StackAS,
2970 "Calling convention disallows stack byref", &F);
2971 }
2972
2973 ++i;
2974 }
2975 }
2976
2977 [[fallthrough]];
2978 case CallingConv::Fast:
2979 case CallingConv::Cold:
2983 Check(!F.isVarArg(),
2984 "Calling convention does not support varargs or "
2985 "perfect forwarding!",
2986 &F);
2987 break;
2989 Check(!F.arg_empty() && F.arg_begin()->getType()->isIntegerTy(1),
2990 "Calling convention requires first argument to be i1", &F);
2991 Check(!F.arg_begin()->hasInRegAttr(),
2992 "Calling convention requires first argument to not be inreg", &F);
2993 Check(!F.isVarArg(),
2994 "Calling convention does not support varargs or "
2995 "perfect forwarding!",
2996 &F);
2997 break;
2998 }
2999
3000 // Check that the argument values match the function type for this function...
3001 unsigned i = 0;
3002 for (const Argument &Arg : F.args()) {
3003 Check(Arg.getType() == FT->getParamType(i),
3004 "Argument value does not match function argument type!", &Arg,
3005 FT->getParamType(i));
3006 Check(Arg.getType()->isFirstClassType(),
3007 "Function arguments must have first-class types!", &Arg);
3008 if (!IsIntrinsic) {
3009 Check(!Arg.getType()->isMetadataTy(),
3010 "Function takes metadata but isn't an intrinsic", &Arg, &F);
3011 Check(!Arg.getType()->isTokenLikeTy(),
3012 "Function takes token but isn't an intrinsic", &Arg, &F);
3013 Check(!Arg.getType()->isX86_AMXTy(),
3014 "Function takes x86_amx but isn't an intrinsic", &Arg, &F);
3015 }
3016
3017 // Check that swifterror argument is only used by loads and stores.
3018 if (Attrs.hasParamAttr(i, Attribute::SwiftError)) {
3019 verifySwiftErrorValue(&Arg);
3020 }
3021 ++i;
3022 }
3023
3024 if (!IsIntrinsic) {
3025 Check(!F.getReturnType()->isTokenLikeTy(),
3026 "Function returns a token but isn't an intrinsic", &F);
3027 Check(!F.getReturnType()->isX86_AMXTy(),
3028 "Function returns a x86_amx but isn't an intrinsic", &F);
3029 }
3030
3031 // Get the function metadata attachments.
3033 F.getAllMetadata(MDs);
3034 assert(F.hasMetadata() != MDs.empty() && "Bit out-of-sync");
3035 verifyFunctionMetadata(MDs);
3036
3037 // Check validity of the personality function
3038 if (F.hasPersonalityFn()) {
3039 auto *Per = dyn_cast<Function>(F.getPersonalityFn()->stripPointerCasts());
3040 if (Per)
3041 Check(Per->getParent() == F.getParent(),
3042 "Referencing personality function in another module!", &F,
3043 F.getParent(), Per, Per->getParent());
3044 }
3045
3046 // EH funclet coloring can be expensive, recompute on-demand
3047 BlockEHFuncletColors.clear();
3048
3049 if (F.isMaterializable()) {
3050 // Function has a body somewhere we can't see.
3051 Check(MDs.empty(), "unmaterialized function cannot have metadata", &F,
3052 MDs.empty() ? nullptr : MDs.front().second);
3053 } else if (F.isDeclaration()) {
3054 for (const auto &I : MDs) {
3055 // This is used for call site debug information.
3056 CheckDI(I.first != LLVMContext::MD_dbg ||
3057 !cast<DISubprogram>(I.second)->isDistinct(),
3058 "function declaration may only have a unique !dbg attachment",
3059 &F);
3060 Check(I.first != LLVMContext::MD_prof,
3061 "function declaration may not have a !prof attachment", &F);
3062
3063 // Verify the metadata itself.
3064 visitMDNode(*I.second, AreDebugLocsAllowed::Yes);
3065 }
3066 Check(!F.hasPersonalityFn(),
3067 "Function declaration shouldn't have a personality routine", &F);
3068 } else {
3069 // Verify that this function (which has a body) is not named "llvm.*". It
3070 // is not legal to define intrinsics.
3071 Check(!IsIntrinsic, "llvm intrinsics cannot be defined!", &F);
3072
3073 // Check the entry node
3074 const BasicBlock *Entry = &F.getEntryBlock();
3075 Check(pred_empty(Entry),
3076 "Entry block to function must not have predecessors!", Entry);
3077
3078 // The address of the entry block cannot be taken, unless it is dead.
3079 if (Entry->hasAddressTaken()) {
3080 Check(!BlockAddress::lookup(Entry)->isConstantUsed(),
3081 "blockaddress may not be used with the entry block!", Entry);
3082 }
3083
3084 unsigned NumDebugAttachments = 0, NumProfAttachments = 0,
3085 NumKCFIAttachments = 0;
3086 // Visit metadata attachments.
3087 for (const auto &I : MDs) {
3088 // Verify that the attachment is legal.
3089 auto AllowLocs = AreDebugLocsAllowed::No;
3090 switch (I.first) {
3091 default:
3092 break;
3093 case LLVMContext::MD_dbg: {
3094 ++NumDebugAttachments;
3095 CheckDI(NumDebugAttachments == 1,
3096 "function must have a single !dbg attachment", &F, I.second);
3097 CheckDI(isa<DISubprogram>(I.second),
3098 "function !dbg attachment must be a subprogram", &F, I.second);
3099 CheckDI(cast<DISubprogram>(I.second)->isDistinct(),
3100 "function definition may only have a distinct !dbg attachment",
3101 &F);
3102
3103 auto *SP = cast<DISubprogram>(I.second);
3104 const Function *&AttachedTo = DISubprogramAttachments[SP];
3105 CheckDI(!AttachedTo || AttachedTo == &F,
3106 "DISubprogram attached to more than one function", SP, &F);
3107 AttachedTo = &F;
3108 AllowLocs = AreDebugLocsAllowed::Yes;
3109 break;
3110 }
3111 case LLVMContext::MD_prof:
3112 ++NumProfAttachments;
3113 Check(NumProfAttachments == 1,
3114 "function must have a single !prof attachment", &F, I.second);
3115 break;
3116 case LLVMContext::MD_kcfi_type:
3117 ++NumKCFIAttachments;
3118 Check(NumKCFIAttachments == 1,
3119 "function must have a single !kcfi_type attachment", &F,
3120 I.second);
3121 break;
3122 }
3123
3124 // Verify the metadata itself.
3125 visitMDNode(*I.second, AllowLocs);
3126 }
3127 }
3128
3129 // If this function is actually an intrinsic, verify that it is only used in
3130 // direct call/invokes, never having its "address taken".
3131 // Only do this if the module is materialized, otherwise we don't have all the
3132 // uses.
3133 if (F.isIntrinsic() && F.getParent()->isMaterialized()) {
3134 const User *U;
3135 if (F.hasAddressTaken(&U, false, true, false,
3136 /*IgnoreARCAttachedCall=*/true))
3137 Check(false, "Invalid user of intrinsic instruction!", U);
3138 }
3139
3140 // Check intrinsics' signatures.
3141 switch (F.getIntrinsicID()) {
3142 case Intrinsic::experimental_gc_get_pointer_base: {
3143 FunctionType *FT = F.getFunctionType();
3144 Check(FT->getNumParams() == 1, "wrong number of parameters", F);
3145 Check(isa<PointerType>(F.getReturnType()),
3146 "gc.get.pointer.base must return a pointer", F);
3147 Check(FT->getParamType(0) == F.getReturnType(),
3148 "gc.get.pointer.base operand and result must be of the same type", F);
3149 break;
3150 }
3151 case Intrinsic::experimental_gc_get_pointer_offset: {
3152 FunctionType *FT = F.getFunctionType();
3153 Check(FT->getNumParams() == 1, "wrong number of parameters", F);
3154 Check(isa<PointerType>(FT->getParamType(0)),
3155 "gc.get.pointer.offset operand must be a pointer", F);
3156 Check(F.getReturnType()->isIntegerTy(),
3157 "gc.get.pointer.offset must return integer", F);
3158 break;
3159 }
3160 }
3161
3162 auto *N = F.getSubprogram();
3163 HasDebugInfo = (N != nullptr);
3164 if (!HasDebugInfo)
3165 return;
3166
3167 // Check that all !dbg attachments lead to back to N.
3168 //
3169 // FIXME: Check this incrementally while visiting !dbg attachments.
3170 // FIXME: Only check when N is the canonical subprogram for F.
3172 auto VisitDebugLoc = [&](const Instruction &I, const MDNode *Node) {
3173 // Be careful about using DILocation here since we might be dealing with
3174 // broken code (this is the Verifier after all).
3175 const DILocation *DL = dyn_cast_or_null<DILocation>(Node);
3176 if (!DL)
3177 return;
3178 if (!Seen.insert(DL).second)
3179 return;
3180
3181 Metadata *Parent = DL->getRawScope();
3182 CheckDI(Parent && isa<DILocalScope>(Parent),
3183 "DILocation's scope must be a DILocalScope", N, &F, &I, DL, Parent);
3184
3185 DILocalScope *Scope = DL->getInlinedAtScope();
3186 Check(Scope, "Failed to find DILocalScope", DL);
3187
3188 if (!Seen.insert(Scope).second)
3189 return;
3190
3191 DISubprogram *SP = Scope->getSubprogram();
3192
3193 // Scope and SP could be the same MDNode and we don't want to skip
3194 // validation in that case
3195 if (SP && ((Scope != SP) && !Seen.insert(SP).second))
3196 return;
3197
3198 CheckDI(SP->describes(&F),
3199 "!dbg attachment points at wrong subprogram for function", N, &F,
3200 &I, DL, Scope, SP);
3201 };
3202 for (auto &BB : F)
3203 for (auto &I : BB) {
3204 VisitDebugLoc(I, I.getDebugLoc().getAsMDNode());
3205 // The llvm.loop annotations also contain two DILocations.
3206 if (auto MD = I.getMetadata(LLVMContext::MD_loop))
3207 for (unsigned i = 1; i < MD->getNumOperands(); ++i)
3208 VisitDebugLoc(I, dyn_cast_or_null<MDNode>(MD->getOperand(i)));
3209 if (BrokenDebugInfo)
3210 return;
3211 }
3212}
3213
3214// verifyBasicBlock - Verify that a basic block is well formed...
3215//
3216void Verifier::visitBasicBlock(BasicBlock &BB) {
3217 InstsInThisBlock.clear();
3218 ConvergenceVerifyHelper.visit(BB);
3219
3220 // Ensure that basic blocks have terminators!
3221 Check(BB.getTerminator(), "Basic Block does not have terminator!", &BB);
3222
3223 // Check constraints that this basic block imposes on all of the PHI nodes in
3224 // it.
3225 if (isa<PHINode>(BB.front())) {
3228 llvm::sort(Preds);
3229 for (const PHINode &PN : BB.phis()) {
3230 Check(PN.getNumIncomingValues() == Preds.size(),
3231 "PHINode should have one entry for each predecessor of its "
3232 "parent basic block!",
3233 &PN);
3234
3235 // Get and sort all incoming values in the PHI node...
3236 Values.clear();
3237 Values.reserve(PN.getNumIncomingValues());
3238 for (unsigned i = 0, e = PN.getNumIncomingValues(); i != e; ++i)
3239 Values.push_back(
3240 std::make_pair(PN.getIncomingBlock(i), PN.getIncomingValue(i)));
3241 llvm::sort(Values);
3242
3243 for (unsigned i = 0, e = Values.size(); i != e; ++i) {
3244 // Check to make sure that if there is more than one entry for a
3245 // particular basic block in this PHI node, that the incoming values are
3246 // all identical.
3247 //
3248 Check(i == 0 || Values[i].first != Values[i - 1].first ||
3249 Values[i].second == Values[i - 1].second,
3250 "PHI node has multiple entries for the same basic block with "
3251 "different incoming values!",
3252 &PN, Values[i].first, Values[i].second, Values[i - 1].second);
3253
3254 // Check to make sure that the predecessors and PHI node entries are
3255 // matched up.
3256 Check(Values[i].first == Preds[i],
3257 "PHI node entries do not match predecessors!", &PN,
3258 Values[i].first, Preds[i]);
3259 }
3260 }
3261 }
3262
3263 // Check that all instructions have their parent pointers set up correctly.
3264 for (auto &I : BB)
3265 {
3266 Check(I.getParent() == &BB, "Instruction has bogus parent pointer!");
3267 }
3268
3269 // Confirm that no issues arise from the debug program.
3270 CheckDI(!BB.getTrailingDbgRecords(), "Basic Block has trailing DbgRecords!",
3271 &BB);
3272}
3273
3274void Verifier::visitTerminator(Instruction &I) {
3275 // Ensure that terminators only exist at the end of the basic block.
3276 Check(&I == I.getParent()->getTerminator(),
3277 "Terminator found in the middle of a basic block!", I.getParent());
3279}
3280
3281void Verifier::visitBranchInst(BranchInst &BI) {
3282 if (BI.isConditional()) {
3284 "Branch condition is not 'i1' type!", &BI, BI.getCondition());
3285 }
3286 visitTerminator(BI);
3287}
3288
3289void Verifier::visitReturnInst(ReturnInst &RI) {
3290 Function *F = RI.getParent()->getParent();
3291 unsigned N = RI.getNumOperands();
3292 if (F->getReturnType()->isVoidTy())
3293 Check(N == 0,
3294 "Found return instr that returns non-void in Function of void "
3295 "return type!",
3296 &RI, F->getReturnType());
3297 else
3298 Check(N == 1 && F->getReturnType() == RI.getOperand(0)->getType(),
3299 "Function return type does not match operand "
3300 "type of return inst!",
3301 &RI, F->getReturnType());
3302
3303 // Check to make sure that the return value has necessary properties for
3304 // terminators...
3305 visitTerminator(RI);
3306}
3307
3308void Verifier::visitSwitchInst(SwitchInst &SI) {
3309 Check(SI.getType()->isVoidTy(), "Switch must have void result type!", &SI);
3310 // Check to make sure that all of the constants in the switch instruction
3311 // have the same type as the switched-on value.
3312 Type *SwitchTy = SI.getCondition()->getType();
3314 for (auto &Case : SI.cases()) {
3315 Check(isa<ConstantInt>(SI.getOperand(Case.getCaseIndex() * 2 + 2)),
3316 "Case value is not a constant integer.", &SI);
3317 Check(Case.getCaseValue()->getType() == SwitchTy,
3318 "Switch constants must all be same type as switch value!", &SI);
3319 Check(Constants.insert(Case.getCaseValue()).second,
3320 "Duplicate integer as switch case", &SI, Case.getCaseValue());
3321 }
3322
3323 visitTerminator(SI);
3324}
3325
3326void Verifier::visitIndirectBrInst(IndirectBrInst &BI) {
3328 "Indirectbr operand must have pointer type!", &BI);
3329 for (unsigned i = 0, e = BI.getNumDestinations(); i != e; ++i)
3331 "Indirectbr destinations must all have pointer type!", &BI);
3332
3333 visitTerminator(BI);
3334}
3335
3336void Verifier::visitCallBrInst(CallBrInst &CBI) {
3337 Check(CBI.isInlineAsm(), "Callbr is currently only used for asm-goto!", &CBI);
3338 const InlineAsm *IA = cast<InlineAsm>(CBI.getCalledOperand());
3339 Check(!IA->canThrow(), "Unwinding from Callbr is not allowed");
3340
3341 verifyInlineAsmCall(CBI);
3342 visitTerminator(CBI);
3343}
3344
3345void Verifier::visitSelectInst(SelectInst &SI) {
3346 Check(!SelectInst::areInvalidOperands(SI.getOperand(0), SI.getOperand(1),
3347 SI.getOperand(2)),
3348 "Invalid operands for select instruction!", &SI);
3349
3350 Check(SI.getTrueValue()->getType() == SI.getType(),
3351 "Select values must have same type as select instruction!", &SI);
3352 visitInstruction(SI);
3353}
3354
3355/// visitUserOp1 - User defined operators shouldn't live beyond the lifetime of
3356/// a pass, if any exist, it's an error.
3357///
3358void Verifier::visitUserOp1(Instruction &I) {
3359 Check(false, "User-defined operators should not live outside of a pass!", &I);
3360}
3361
3362void Verifier::visitTruncInst(TruncInst &I) {
3363 // Get the source and destination types
3364 Type *SrcTy = I.getOperand(0)->getType();
3365 Type *DestTy = I.getType();
3366
3367 // Get the size of the types in bits, we'll need this later
3368 unsigned SrcBitSize = SrcTy->getScalarSizeInBits();
3369 unsigned DestBitSize = DestTy->getScalarSizeInBits();
3370
3371 Check(SrcTy->isIntOrIntVectorTy(), "Trunc only operates on integer", &I);
3372 Check(DestTy->isIntOrIntVectorTy(), "Trunc only produces integer", &I);
3373 Check(SrcTy->isVectorTy() == DestTy->isVectorTy(),
3374 "trunc source and destination must both be a vector or neither", &I);
3375 Check(SrcBitSize > DestBitSize, "DestTy too big for Trunc", &I);
3376
3378}
3379
3380void Verifier::visitZExtInst(ZExtInst &I) {
3381 // Get the source and destination types
3382 Type *SrcTy = I.getOperand(0)->getType();
3383 Type *DestTy = I.getType();
3384
3385 // Get the size of the types in bits, we'll need this later
3386 Check(SrcTy->isIntOrIntVectorTy(), "ZExt only operates on integer", &I);
3387 Check(DestTy->isIntOrIntVectorTy(), "ZExt only produces an integer", &I);
3388 Check(SrcTy->isVectorTy() == DestTy->isVectorTy(),
3389 "zext source and destination must both be a vector or neither", &I);
3390 unsigned SrcBitSize = SrcTy->getScalarSizeInBits();
3391 unsigned DestBitSize = DestTy->getScalarSizeInBits();
3392
3393 Check(SrcBitSize < DestBitSize, "Type too small for ZExt", &I);
3394
3396}
3397
3398void Verifier::visitSExtInst(SExtInst &I) {
3399 // Get the source and destination types
3400 Type *SrcTy = I.getOperand(0)->getType();
3401 Type *DestTy = I.getType();
3402
3403 // Get the size of the types in bits, we'll need this later
3404 unsigned SrcBitSize = SrcTy->getScalarSizeInBits();
3405 unsigned DestBitSize = DestTy->getScalarSizeInBits();
3406
3407 Check(SrcTy->isIntOrIntVectorTy(), "SExt only operates on integer", &I);
3408 Check(DestTy->isIntOrIntVectorTy(), "SExt only produces an integer", &I);
3409 Check(SrcTy->isVectorTy() == DestTy->isVectorTy(),
3410 "sext source and destination must both be a vector or neither", &I);
3411 Check(SrcBitSize < DestBitSize, "Type too small for SExt", &I);
3412
3414}
3415
3416void Verifier::visitFPTruncInst(FPTruncInst &I) {
3417 // Get the source and destination types
3418 Type *SrcTy = I.getOperand(0)->getType();
3419 Type *DestTy = I.getType();
3420 // Get the size of the types in bits, we'll need this later
3421 unsigned SrcBitSize = SrcTy->getScalarSizeInBits();
3422 unsigned DestBitSize = DestTy->getScalarSizeInBits();
3423
3424 Check(SrcTy->isFPOrFPVectorTy(), "FPTrunc only operates on FP", &I);
3425 Check(DestTy->isFPOrFPVectorTy(), "FPTrunc only produces an FP", &I);
3426 Check(SrcTy->isVectorTy() == DestTy->isVectorTy(),
3427 "fptrunc source and destination must both be a vector or neither", &I);
3428 Check(SrcBitSize > DestBitSize, "DestTy too big for FPTrunc", &I);
3429
3431}
3432
3433void Verifier::visitFPExtInst(FPExtInst &I) {
3434 // Get the source and destination types
3435 Type *SrcTy = I.getOperand(0)->getType();
3436 Type *DestTy = I.getType();
3437
3438 // Get the size of the types in bits, we'll need this later
3439 unsigned SrcBitSize = SrcTy->getScalarSizeInBits();
3440 unsigned DestBitSize = DestTy->getScalarSizeInBits();
3441
3442 Check(SrcTy->isFPOrFPVectorTy(), "FPExt only operates on FP", &I);
3443 Check(DestTy->isFPOrFPVectorTy(), "FPExt only produces an FP", &I);
3444 Check(SrcTy->isVectorTy() == DestTy->isVectorTy(),
3445 "fpext source and destination must both be a vector or neither", &I);
3446 Check(SrcBitSize < DestBitSize, "DestTy too small for FPExt", &I);
3447
3449}
3450
3451void Verifier::visitUIToFPInst(UIToFPInst &I) {
3452 // Get the source and destination types
3453 Type *SrcTy = I.getOperand(0)->getType();
3454 Type *DestTy = I.getType();
3455
3456 bool SrcVec = SrcTy->isVectorTy();
3457 bool DstVec = DestTy->isVectorTy();
3458
3459 Check(SrcVec == DstVec,
3460 "UIToFP source and dest must both be vector or scalar", &I);
3461 Check(SrcTy->isIntOrIntVectorTy(),
3462 "UIToFP source must be integer or integer vector", &I);
3463 Check(DestTy->isFPOrFPVectorTy(), "UIToFP result must be FP or FP vector",
3464 &I);
3465
3466 if (SrcVec && DstVec)
3467 Check(cast<VectorType>(SrcTy)->getElementCount() ==
3468 cast<VectorType>(DestTy)->getElementCount(),
3469 "UIToFP source and dest vector length mismatch", &I);
3470
3472}
3473
3474void Verifier::visitSIToFPInst(SIToFPInst &I) {
3475 // Get the source and destination types
3476 Type *SrcTy = I.getOperand(0)->getType();
3477 Type *DestTy = I.getType();
3478
3479 bool SrcVec = SrcTy->isVectorTy();
3480 bool DstVec = DestTy->isVectorTy();
3481
3482 Check(SrcVec == DstVec,
3483 "SIToFP source and dest must both be vector or scalar", &I);
3484 Check(SrcTy->isIntOrIntVectorTy(),
3485 "SIToFP source must be integer or integer vector", &I);
3486 Check(DestTy->isFPOrFPVectorTy(), "SIToFP result must be FP or FP vector",
3487 &I);
3488
3489 if (SrcVec && DstVec)
3490 Check(cast<VectorType>(SrcTy)->getElementCount() ==
3491 cast<VectorType>(DestTy)->getElementCount(),
3492 "SIToFP source and dest vector length mismatch", &I);
3493
3495}
3496
3497void Verifier::visitFPToUIInst(FPToUIInst &I) {
3498 // Get the source and destination types
3499 Type *SrcTy = I.getOperand(0)->getType();
3500 Type *DestTy = I.getType();
3501
3502 bool SrcVec = SrcTy->isVectorTy();
3503 bool DstVec = DestTy->isVectorTy();
3504
3505 Check(SrcVec == DstVec,
3506 "FPToUI source and dest must both be vector or scalar", &I);
3507 Check(SrcTy->isFPOrFPVectorTy(), "FPToUI source must be FP or FP vector", &I);
3508 Check(DestTy->isIntOrIntVectorTy(),
3509 "FPToUI result must be integer or integer vector", &I);
3510
3511 if (SrcVec && DstVec)
3512 Check(cast<VectorType>(SrcTy)->getElementCount() ==
3513 cast<VectorType>(DestTy)->getElementCount(),
3514 "FPToUI source and dest vector length mismatch", &I);
3515
3517}
3518
3519void Verifier::visitFPToSIInst(FPToSIInst &I) {
3520 // Get the source and destination types
3521 Type *SrcTy = I.getOperand(0)->getType();
3522 Type *DestTy = I.getType();
3523
3524 bool SrcVec = SrcTy->isVectorTy();
3525 bool DstVec = DestTy->isVectorTy();
3526
3527 Check(SrcVec == DstVec,
3528 "FPToSI source and dest must both be vector or scalar", &I);
3529 Check(SrcTy->isFPOrFPVectorTy(), "FPToSI source must be FP or FP vector", &I);
3530 Check(DestTy->isIntOrIntVectorTy(),
3531 "FPToSI result must be integer or integer vector", &I);
3532
3533 if (SrcVec && DstVec)
3534 Check(cast<VectorType>(SrcTy)->getElementCount() ==
3535 cast<VectorType>(DestTy)->getElementCount(),
3536 "FPToSI source and dest vector length mismatch", &I);
3537
3539}
3540
3541void Verifier::checkPtrToAddr(Type *SrcTy, Type *DestTy, const Value &V) {
3542 Check(SrcTy->isPtrOrPtrVectorTy(), "PtrToAddr source must be pointer", V);
3543 Check(DestTy->isIntOrIntVectorTy(), "PtrToAddr result must be integral", V);
3544 Check(SrcTy->isVectorTy() == DestTy->isVectorTy(), "PtrToAddr type mismatch",
3545 V);
3546
3547 if (SrcTy->isVectorTy()) {
3548 auto *VSrc = cast<VectorType>(SrcTy);
3549 auto *VDest = cast<VectorType>(DestTy);
3550 Check(VSrc->getElementCount() == VDest->getElementCount(),
3551 "PtrToAddr vector length mismatch", V);
3552 }
3553
3554 Type *AddrTy = DL.getAddressType(SrcTy);
3555 Check(AddrTy == DestTy, "PtrToAddr result must be address width", V);
3556}
3557
3558void Verifier::visitPtrToAddrInst(PtrToAddrInst &I) {
3559 checkPtrToAddr(I.getOperand(0)->getType(), I.getType(), I);
3561}
3562
3563void Verifier::visitPtrToIntInst(PtrToIntInst &I) {
3564 // Get the source and destination types
3565 Type *SrcTy = I.getOperand(0)->getType();
3566 Type *DestTy = I.getType();
3567
3568 Check(SrcTy->isPtrOrPtrVectorTy(), "PtrToInt source must be pointer", &I);
3569
3570 Check(DestTy->isIntOrIntVectorTy(), "PtrToInt result must be integral", &I);
3571 Check(SrcTy->isVectorTy() == DestTy->isVectorTy(), "PtrToInt type mismatch",
3572 &I);
3573
3574 if (SrcTy->isVectorTy()) {
3575 auto *VSrc = cast<VectorType>(SrcTy);
3576 auto *VDest = cast<VectorType>(DestTy);
3577 Check(VSrc->getElementCount() == VDest->getElementCount(),
3578 "PtrToInt Vector length mismatch", &I);
3579 }
3580
3582}
3583
3584void Verifier::visitIntToPtrInst(IntToPtrInst &I) {
3585 // Get the source and destination types
3586 Type *SrcTy = I.getOperand(0)->getType();
3587 Type *DestTy = I.getType();
3588
3589 Check(SrcTy->isIntOrIntVectorTy(), "IntToPtr source must be an integral", &I);
3590 Check(DestTy->isPtrOrPtrVectorTy(), "IntToPtr result must be a pointer", &I);
3591
3592 Check(SrcTy->isVectorTy() == DestTy->isVectorTy(), "IntToPtr type mismatch",
3593 &I);
3594 if (SrcTy->isVectorTy()) {
3595 auto *VSrc = cast<VectorType>(SrcTy);
3596 auto *VDest = cast<VectorType>(DestTy);
3597 Check(VSrc->getElementCount() == VDest->getElementCount(),
3598 "IntToPtr Vector length mismatch", &I);
3599 }
3601}
3602
3603void Verifier::visitBitCastInst(BitCastInst &I) {
3604 Check(
3605 CastInst::castIsValid(Instruction::BitCast, I.getOperand(0), I.getType()),
3606 "Invalid bitcast", &I);
3608}
3609
3610void Verifier::visitAddrSpaceCastInst(AddrSpaceCastInst &I) {
3611 Type *SrcTy = I.getOperand(0)->getType();
3612 Type *DestTy = I.getType();
3613
3614 Check(SrcTy->isPtrOrPtrVectorTy(), "AddrSpaceCast source must be a pointer",
3615 &I);
3616 Check(DestTy->isPtrOrPtrVectorTy(), "AddrSpaceCast result must be a pointer",
3617 &I);
3619 "AddrSpaceCast must be between different address spaces", &I);
3620 if (auto *SrcVTy = dyn_cast<VectorType>(SrcTy))
3621 Check(SrcVTy->getElementCount() ==
3622 cast<VectorType>(DestTy)->getElementCount(),
3623 "AddrSpaceCast vector pointer number of elements mismatch", &I);
3625}
3626
3627/// visitPHINode - Ensure that a PHI node is well formed.
3628///
3629void Verifier::visitPHINode(PHINode &PN) {
3630 // Ensure that the PHI nodes are all grouped together at the top of the block.
3631 // This can be tested by checking whether the instruction before this is
3632 // either nonexistent (because this is begin()) or is a PHI node. If not,
3633 // then there is some other instruction before a PHI.
3634 Check(&PN == &PN.getParent()->front() ||
3635 isa<PHINode>(--BasicBlock::iterator(&PN)),
3636 "PHI nodes not grouped at top of basic block!", &PN, PN.getParent());
3637
3638 // Check that a PHI doesn't yield a Token.
3639 Check(!PN.getType()->isTokenLikeTy(), "PHI nodes cannot have token type!");
3640
3641 // Check that all of the values of the PHI node have the same type as the
3642 // result.
3643 for (Value *IncValue : PN.incoming_values()) {
3644 Check(PN.getType() == IncValue->getType(),
3645 "PHI node operands are not the same type as the result!", &PN);
3646 }
3647
3648 // All other PHI node constraints are checked in the visitBasicBlock method.
3649
3650 visitInstruction(PN);
3651}
3652
3653void Verifier::visitCallBase(CallBase &Call) {
3654 Check(Call.getCalledOperand()->getType()->isPointerTy(),
3655 "Called function must be a pointer!", Call);
3656 FunctionType *FTy = Call.getFunctionType();
3657
3658 // Verify that the correct number of arguments are being passed
3659 if (FTy->isVarArg())
3660 Check(Call.arg_size() >= FTy->getNumParams(),
3661 "Called function requires more parameters than were provided!", Call);
3662 else
3663 Check(Call.arg_size() == FTy->getNumParams(),
3664 "Incorrect number of arguments passed to called function!", Call);
3665
3666 // Verify that all arguments to the call match the function type.
3667 for (unsigned i = 0, e = FTy->getNumParams(); i != e; ++i)
3668 Check(Call.getArgOperand(i)->getType() == FTy->getParamType(i),
3669 "Call parameter type does not match function signature!",
3670 Call.getArgOperand(i), FTy->getParamType(i), Call);
3671
3672 AttributeList Attrs = Call.getAttributes();
3673
3674 Check(verifyAttributeCount(Attrs, Call.arg_size()),
3675 "Attribute after last parameter!", Call);
3676
3677 Function *Callee =
3678 dyn_cast<Function>(Call.getCalledOperand()->stripPointerCasts());
3679 bool IsIntrinsic = Callee && Callee->isIntrinsic();
3680 if (IsIntrinsic)
3681 Check(Callee->getValueType() == FTy,
3682 "Intrinsic called with incompatible signature", Call);
3683
3684 // Verify if the calling convention of the callee is callable.
3685 Check(isCallableCC(Call.getCallingConv()),
3686 "calling convention does not permit calls", Call);
3687
3688 // Disallow passing/returning values with alignment higher than we can
3689 // represent.
3690 // FIXME: Consider making DataLayout cap the alignment, so this isn't
3691 // necessary.
3692 auto VerifyTypeAlign = [&](Type *Ty, const Twine &Message) {
3693 if (!Ty->isSized())
3694 return;
3695 Align ABIAlign = DL.getABITypeAlign(Ty);
3696 Check(ABIAlign.value() <= Value::MaximumAlignment,
3697 "Incorrect alignment of " + Message + " to called function!", Call);
3698 };
3699
3700 if (!IsIntrinsic) {
3701 VerifyTypeAlign(FTy->getReturnType(), "return type");
3702 for (unsigned i = 0, e = FTy->getNumParams(); i != e; ++i) {
3703 Type *Ty = FTy->getParamType(i);
3704 VerifyTypeAlign(Ty, "argument passed");
3705 }
3706 }
3707
3708 if (Attrs.hasFnAttr(Attribute::Speculatable)) {
3709 // Don't allow speculatable on call sites, unless the underlying function
3710 // declaration is also speculatable.
3711 Check(Callee && Callee->isSpeculatable(),
3712 "speculatable attribute may not apply to call sites", Call);
3713 }
3714
3715 if (Attrs.hasFnAttr(Attribute::Preallocated)) {
3716 Check(Call.getIntrinsicID() == Intrinsic::call_preallocated_arg,
3717 "preallocated as a call site attribute can only be on "
3718 "llvm.call.preallocated.arg");
3719 }
3720
3721 // Verify call attributes.
3722 verifyFunctionAttrs(FTy, Attrs, &Call, IsIntrinsic, Call.isInlineAsm());
3723
3724 // Conservatively check the inalloca argument.
3725 // We have a bug if we can find that there is an underlying alloca without
3726 // inalloca.
3727 if (Call.hasInAllocaArgument()) {
3728 Value *InAllocaArg = Call.getArgOperand(FTy->getNumParams() - 1);
3729 if (auto AI = dyn_cast<AllocaInst>(InAllocaArg->stripInBoundsOffsets()))
3730 Check(AI->isUsedWithInAlloca(),
3731 "inalloca argument for call has mismatched alloca", AI, Call);
3732 }
3733
3734 // For each argument of the callsite, if it has the swifterror argument,
3735 // make sure the underlying alloca/parameter it comes from has a swifterror as
3736 // well.
3737 for (unsigned i = 0, e = FTy->getNumParams(); i != e; ++i) {
3738 if (Call.paramHasAttr(i, Attribute::SwiftError)) {
3739 Value *SwiftErrorArg = Call.getArgOperand(i);
3740 if (auto AI = dyn_cast<AllocaInst>(SwiftErrorArg->stripInBoundsOffsets())) {
3741 Check(AI->isSwiftError(),
3742 "swifterror argument for call has mismatched alloca", AI, Call);
3743 continue;
3744 }
3745 auto ArgI = dyn_cast<Argument>(SwiftErrorArg);
3746 Check(ArgI, "swifterror argument should come from an alloca or parameter",
3747 SwiftErrorArg, Call);
3748 Check(ArgI->hasSwiftErrorAttr(),
3749 "swifterror argument for call has mismatched parameter", ArgI,
3750 Call);
3751 }
3752
3753 if (Attrs.hasParamAttr(i, Attribute::ImmArg)) {
3754 // Don't allow immarg on call sites, unless the underlying declaration
3755 // also has the matching immarg.
3756 Check(Callee && Callee->hasParamAttribute(i, Attribute::ImmArg),
3757 "immarg may not apply only to call sites", Call.getArgOperand(i),
3758 Call);
3759 }
3760
3761 if (Call.paramHasAttr(i, Attribute::ImmArg)) {
3762 Value *ArgVal = Call.getArgOperand(i);
3763 Check(isa<ConstantInt>(ArgVal) || isa<ConstantFP>(ArgVal),
3764 "immarg operand has non-immediate parameter", ArgVal, Call);
3765
3766 // If the imm-arg is an integer and also has a range attached,
3767 // check if the given value is within the range.
3768 if (Call.paramHasAttr(i, Attribute::Range)) {
3769 if (auto *CI = dyn_cast<ConstantInt>(ArgVal)) {
3770 const ConstantRange &CR =
3771 Call.getParamAttr(i, Attribute::Range).getValueAsConstantRange();
3772 Check(CR.contains(CI->getValue()),
3773 "immarg value " + Twine(CI->getValue().getSExtValue()) +
3774 " out of range [" + Twine(CR.getLower().getSExtValue()) +
3775 ", " + Twine(CR.getUpper().getSExtValue()) + ")",
3776 Call);
3777 }
3778 }
3779 }
3780
3781 if (Call.paramHasAttr(i, Attribute::Preallocated)) {
3782 Value *ArgVal = Call.getArgOperand(i);
3783 bool hasOB =
3784 Call.countOperandBundlesOfType(LLVMContext::OB_preallocated) != 0;
3785 bool isMustTail = Call.isMustTailCall();
3786 Check(hasOB != isMustTail,
3787 "preallocated operand either requires a preallocated bundle or "
3788 "the call to be musttail (but not both)",
3789 ArgVal, Call);
3790 }
3791 }
3792
3793 if (FTy->isVarArg()) {
3794 // FIXME? is 'nest' even legal here?
3795 bool SawNest = false;
3796 bool SawReturned = false;
3797
3798 for (unsigned Idx = 0; Idx < FTy->getNumParams(); ++Idx) {
3799 if (Attrs.hasParamAttr(Idx, Attribute::Nest))
3800 SawNest = true;
3801 if (Attrs.hasParamAttr(Idx, Attribute::Returned))
3802 SawReturned = true;
3803 }
3804
3805 // Check attributes on the varargs part.
3806 for (unsigned Idx = FTy->getNumParams(); Idx < Call.arg_size(); ++Idx) {
3807 Type *Ty = Call.getArgOperand(Idx)->getType();
3808 AttributeSet ArgAttrs = Attrs.getParamAttrs(Idx);
3809 verifyParameterAttrs(ArgAttrs, Ty, &Call);
3810
3811 if (ArgAttrs.hasAttribute(Attribute::Nest)) {
3812 Check(!SawNest, "More than one parameter has attribute nest!", Call);
3813 SawNest = true;
3814 }
3815
3816 if (ArgAttrs.hasAttribute(Attribute::Returned)) {
3817 Check(!SawReturned, "More than one parameter has attribute returned!",
3818 Call);
3819 Check(Ty->canLosslesslyBitCastTo(FTy->getReturnType()),
3820 "Incompatible argument and return types for 'returned' "
3821 "attribute",
3822 Call);
3823 SawReturned = true;
3824 }
3825
3826 // Statepoint intrinsic is vararg but the wrapped function may be not.
3827 // Allow sret here and check the wrapped function in verifyStatepoint.
3828 if (Call.getIntrinsicID() != Intrinsic::experimental_gc_statepoint)
3829 Check(!ArgAttrs.hasAttribute(Attribute::StructRet),
3830 "Attribute 'sret' cannot be used for vararg call arguments!",
3831 Call);
3832
3833 if (ArgAttrs.hasAttribute(Attribute::InAlloca))
3834 Check(Idx == Call.arg_size() - 1,
3835 "inalloca isn't on the last argument!", Call);
3836 }
3837 }
3838
3839 // Verify that there's no metadata unless it's a direct call to an intrinsic.
3840 if (!IsIntrinsic) {
3841 for (Type *ParamTy : FTy->params()) {
3842 Check(!ParamTy->isMetadataTy(),
3843 "Function has metadata parameter but isn't an intrinsic", Call);
3844 Check(!ParamTy->isTokenLikeTy(),
3845 "Function has token parameter but isn't an intrinsic", Call);
3846 }
3847 }
3848
3849 // Verify that indirect calls don't return tokens.
3850 if (!Call.getCalledFunction()) {
3851 Check(!FTy->getReturnType()->isTokenLikeTy(),
3852 "Return type cannot be token for indirect call!");
3853 Check(!FTy->getReturnType()->isX86_AMXTy(),
3854 "Return type cannot be x86_amx for indirect call!");
3855 }
3856
3857 if (Intrinsic::ID ID = Call.getIntrinsicID())
3858 visitIntrinsicCall(ID, Call);
3859
3860 // Verify that a callsite has at most one "deopt", at most one "funclet", at
3861 // most one "gc-transition", at most one "cfguardtarget", at most one
3862 // "preallocated" operand bundle, and at most one "ptrauth" operand bundle.
3863 bool FoundDeoptBundle = false, FoundFuncletBundle = false,
3864 FoundGCTransitionBundle = false, FoundCFGuardTargetBundle = false,
3865 FoundPreallocatedBundle = false, FoundGCLiveBundle = false,
3866 FoundPtrauthBundle = false, FoundKCFIBundle = false,
3867 FoundAttachedCallBundle = false;
3868 for (unsigned i = 0, e = Call.getNumOperandBundles(); i < e; ++i) {
3869 OperandBundleUse BU = Call.getOperandBundleAt(i);
3870 uint32_t Tag = BU.getTagID();
3871 if (Tag == LLVMContext::OB_deopt) {
3872 Check(!FoundDeoptBundle, "Multiple deopt operand bundles", Call);
3873 FoundDeoptBundle = true;
3874 } else if (Tag == LLVMContext::OB_gc_transition) {
3875 Check(!FoundGCTransitionBundle, "Multiple gc-transition operand bundles",
3876 Call);
3877 FoundGCTransitionBundle = true;
3878 } else if (Tag == LLVMContext::OB_funclet) {
3879 Check(!FoundFuncletBundle, "Multiple funclet operand bundles", Call);
3880 FoundFuncletBundle = true;
3881 Check(BU.Inputs.size() == 1,
3882 "Expected exactly one funclet bundle operand", Call);
3883 Check(isa<FuncletPadInst>(BU.Inputs.front()),
3884 "Funclet bundle operands should correspond to a FuncletPadInst",
3885 Call);
3886 } else if (Tag == LLVMContext::OB_cfguardtarget) {
3887 Check(!FoundCFGuardTargetBundle, "Multiple CFGuardTarget operand bundles",
3888 Call);
3889 FoundCFGuardTargetBundle = true;
3890 Check(BU.Inputs.size() == 1,
3891 "Expected exactly one cfguardtarget bundle operand", Call);
3892 } else if (Tag == LLVMContext::OB_ptrauth) {
3893 Check(!FoundPtrauthBundle, "Multiple ptrauth operand bundles", Call);
3894 FoundPtrauthBundle = true;
3895 Check(BU.Inputs.size() == 2,
3896 "Expected exactly two ptrauth bundle operands", Call);
3897 Check(isa<ConstantInt>(BU.Inputs[0]) &&
3898 BU.Inputs[0]->getType()->isIntegerTy(32),
3899 "Ptrauth bundle key operand must be an i32 constant", Call);
3900 Check(BU.Inputs[1]->getType()->isIntegerTy(64),
3901 "Ptrauth bundle discriminator operand must be an i64", Call);
3902 } else if (Tag == LLVMContext::OB_kcfi) {
3903 Check(!FoundKCFIBundle, "Multiple kcfi operand bundles", Call);
3904 FoundKCFIBundle = true;
3905 Check(BU.Inputs.size() == 1, "Expected exactly one kcfi bundle operand",
3906 Call);
3907 Check(isa<ConstantInt>(BU.Inputs[0]) &&
3908 BU.Inputs[0]->getType()->isIntegerTy(32),
3909 "Kcfi bundle operand must be an i32 constant", Call);
3910 } else if (Tag == LLVMContext::OB_preallocated) {
3911 Check(!FoundPreallocatedBundle, "Multiple preallocated operand bundles",
3912 Call);
3913 FoundPreallocatedBundle = true;
3914 Check(BU.Inputs.size() == 1,
3915 "Expected exactly one preallocated bundle operand", Call);
3916 auto Input = dyn_cast<IntrinsicInst>(BU.Inputs.front());
3917 Check(Input &&
3918 Input->getIntrinsicID() == Intrinsic::call_preallocated_setup,
3919 "\"preallocated\" argument must be a token from "
3920 "llvm.call.preallocated.setup",
3921 Call);
3922 } else if (Tag == LLVMContext::OB_gc_live) {
3923 Check(!FoundGCLiveBundle, "Multiple gc-live operand bundles", Call);
3924 FoundGCLiveBundle = true;
3926 Check(!FoundAttachedCallBundle,
3927 "Multiple \"clang.arc.attachedcall\" operand bundles", Call);
3928 FoundAttachedCallBundle = true;
3929 verifyAttachedCallBundle(Call, BU);
3930 }
3931 }
3932
3933 // Verify that callee and callsite agree on whether to use pointer auth.
3934 Check(!(Call.getCalledFunction() && FoundPtrauthBundle),
3935 "Direct call cannot have a ptrauth bundle", Call);
3936
3937 // Verify that each inlinable callsite of a debug-info-bearing function in a
3938 // debug-info-bearing function has a debug location attached to it. Failure to
3939 // do so causes assertion failures when the inliner sets up inline scope info
3940 // (Interposable functions are not inlinable, neither are functions without
3941 // definitions.)
3942 if (Call.getFunction()->getSubprogram() && Call.getCalledFunction() &&
3943 !Call.getCalledFunction()->isInterposable() &&
3944 !Call.getCalledFunction()->isDeclaration() &&
3945 Call.getCalledFunction()->getSubprogram())
3946 CheckDI(Call.getDebugLoc(),
3947 "inlinable function call in a function with "
3948 "debug info must have a !dbg location",
3949 Call);
3950
3951 if (Call.isInlineAsm())
3952 verifyInlineAsmCall(Call);
3953
3954 ConvergenceVerifyHelper.visit(Call);
3955
3956 visitInstruction(Call);
3957}
3958
3959void Verifier::verifyTailCCMustTailAttrs(const AttrBuilder &Attrs,
3960 StringRef Context) {
3961 Check(!Attrs.contains(Attribute::InAlloca),
3962 Twine("inalloca attribute not allowed in ") + Context);
3963 Check(!Attrs.contains(Attribute::InReg),
3964 Twine("inreg attribute not allowed in ") + Context);
3965 Check(!Attrs.contains(Attribute::SwiftError),
3966 Twine("swifterror attribute not allowed in ") + Context);
3967 Check(!Attrs.contains(Attribute::Preallocated),
3968 Twine("preallocated attribute not allowed in ") + Context);
3969 Check(!Attrs.contains(Attribute::ByRef),
3970 Twine("byref attribute not allowed in ") + Context);
3971}
3972
3973/// Two types are "congruent" if they are identical, or if they are both pointer
3974/// types with different pointee types and the same address space.
3975static bool isTypeCongruent(Type *L, Type *R) {
3976 if (L == R)
3977 return true;
3978 PointerType *PL = dyn_cast<PointerType>(L);
3979 PointerType *PR = dyn_cast<PointerType>(R);
3980 if (!PL || !PR)
3981 return false;
3982 return PL->getAddressSpace() == PR->getAddressSpace();
3983}
3984
3986 static const Attribute::AttrKind ABIAttrs[] = {
3987 Attribute::StructRet, Attribute::ByVal, Attribute::InAlloca,
3988 Attribute::InReg, Attribute::StackAlignment, Attribute::SwiftSelf,
3989 Attribute::SwiftAsync, Attribute::SwiftError, Attribute::Preallocated,
3990 Attribute::ByRef};
3991 AttrBuilder Copy(C);
3992 for (auto AK : ABIAttrs) {
3993 Attribute Attr = Attrs.getParamAttrs(I).getAttribute(AK);
3994 if (Attr.isValid())
3995 Copy.addAttribute(Attr);
3996 }
3997
3998 // `align` is ABI-affecting only in combination with `byval` or `byref`.
3999 if (Attrs.hasParamAttr(I, Attribute::Alignment) &&
4000 (Attrs.hasParamAttr(I, Attribute::ByVal) ||
4001 Attrs.hasParamAttr(I, Attribute::ByRef)))
4002 Copy.addAlignmentAttr(Attrs.getParamAlignment(I));
4003 return Copy;
4004}
4005
4006void Verifier::verifyMustTailCall(CallInst &CI) {
4007 Check(!CI.isInlineAsm(), "cannot use musttail call with inline asm", &CI);
4008
4009 Function *F = CI.getParent()->getParent();
4010 FunctionType *CallerTy = F->getFunctionType();
4011 FunctionType *CalleeTy = CI.getFunctionType();
4012 Check(CallerTy->isVarArg() == CalleeTy->isVarArg(),
4013 "cannot guarantee tail call due to mismatched varargs", &CI);
4014 Check(isTypeCongruent(CallerTy->getReturnType(), CalleeTy->getReturnType()),
4015 "cannot guarantee tail call due to mismatched return types", &CI);
4016
4017 // - The calling conventions of the caller and callee must match.
4018 Check(F->getCallingConv() == CI.getCallingConv(),
4019 "cannot guarantee tail call due to mismatched calling conv", &CI);
4020
4021 // - The call must immediately precede a :ref:`ret <i_ret>` instruction,
4022 // or a pointer bitcast followed by a ret instruction.
4023 // - The ret instruction must return the (possibly bitcasted) value
4024 // produced by the call or void.
4025 Value *RetVal = &CI;
4026 Instruction *Next = CI.getNextNode();
4027
4028 // Handle the optional bitcast.
4029 if (BitCastInst *BI = dyn_cast_or_null<BitCastInst>(Next)) {
4030 Check(BI->getOperand(0) == RetVal,
4031 "bitcast following musttail call must use the call", BI);
4032 RetVal = BI;
4033 Next = BI->getNextNode();
4034 }
4035
4036 // Check the return.
4037 ReturnInst *Ret = dyn_cast_or_null<ReturnInst>(Next);
4038 Check(Ret, "musttail call must precede a ret with an optional bitcast", &CI);
4039 Check(!Ret->getReturnValue() || Ret->getReturnValue() == RetVal ||
4040 isa<UndefValue>(Ret->getReturnValue()),
4041 "musttail call result must be returned", Ret);
4042
4043 AttributeList CallerAttrs = F->getAttributes();
4044 AttributeList CalleeAttrs = CI.getAttributes();
4047 StringRef CCName =
4048 CI.getCallingConv() == CallingConv::Tail ? "tailcc" : "swifttailcc";
4049
4050 // - Only sret, byval, swiftself, and swiftasync ABI-impacting attributes
4051 // are allowed in swifttailcc call
4052 for (unsigned I = 0, E = CallerTy->getNumParams(); I != E; ++I) {
4053 AttrBuilder ABIAttrs = getParameterABIAttributes(F->getContext(), I, CallerAttrs);
4054 SmallString<32> Context{CCName, StringRef(" musttail caller")};
4055 verifyTailCCMustTailAttrs(ABIAttrs, Context);
4056 }
4057 for (unsigned I = 0, E = CalleeTy->getNumParams(); I != E; ++I) {
4058 AttrBuilder ABIAttrs = getParameterABIAttributes(F->getContext(), I, CalleeAttrs);
4059 SmallString<32> Context{CCName, StringRef(" musttail callee")};
4060 verifyTailCCMustTailAttrs(ABIAttrs, Context);
4061 }
4062 // - Varargs functions are not allowed
4063 Check(!CallerTy->isVarArg(), Twine("cannot guarantee ") + CCName +
4064 " tail call for varargs function");
4065 return;
4066 }
4067
4068 // - The caller and callee prototypes must match. Pointer types of
4069 // parameters or return types may differ in pointee type, but not
4070 // address space.
4071 if (!CI.getIntrinsicID()) {
4072 Check(CallerTy->getNumParams() == CalleeTy->getNumParams(),
4073 "cannot guarantee tail call due to mismatched parameter counts", &CI);
4074 for (unsigned I = 0, E = CallerTy->getNumParams(); I != E; ++I) {
4075 Check(
4076 isTypeCongruent(CallerTy->getParamType(I), CalleeTy->getParamType(I)),
4077 "cannot guarantee tail call due to mismatched parameter types", &CI);
4078 }
4079 }
4080
4081 // - All ABI-impacting function attributes, such as sret, byval, inreg,
4082 // returned, preallocated, and inalloca, must match.
4083 for (unsigned I = 0, E = CallerTy->getNumParams(); I != E; ++I) {
4084 AttrBuilder CallerABIAttrs = getParameterABIAttributes(F->getContext(), I, CallerAttrs);
4085 AttrBuilder CalleeABIAttrs = getParameterABIAttributes(F->getContext(), I, CalleeAttrs);
4086 Check(CallerABIAttrs == CalleeABIAttrs,
4087 "cannot guarantee tail call due to mismatched ABI impacting "
4088 "function attributes",
4089 &CI, CI.getOperand(I));
4090 }
4091}
4092
4093void Verifier::visitCallInst(CallInst &CI) {
4094 visitCallBase(CI);
4095
4096 if (CI.isMustTailCall())
4097 verifyMustTailCall(CI);
4098}
4099
4100void Verifier::visitInvokeInst(InvokeInst &II) {
4102
4103 // Verify that the first non-PHI instruction of the unwind destination is an
4104 // exception handling instruction.
4105 Check(
4106 II.getUnwindDest()->isEHPad(),
4107 "The unwind destination does not have an exception handling instruction!",
4108 &II);
4109
4111}
4112
4113/// visitUnaryOperator - Check the argument to the unary operator.
4114///
4115void Verifier::visitUnaryOperator(UnaryOperator &U) {
4116 Check(U.getType() == U.getOperand(0)->getType(),
4117 "Unary operators must have same type for"
4118 "operands and result!",
4119 &U);
4120
4121 switch (U.getOpcode()) {
4122 // Check that floating-point arithmetic operators are only used with
4123 // floating-point operands.
4124 case Instruction::FNeg:
4125 Check(U.getType()->isFPOrFPVectorTy(),
4126 "FNeg operator only works with float types!", &U);
4127 break;
4128 default:
4129 llvm_unreachable("Unknown UnaryOperator opcode!");
4130 }
4131
4133}
4134
4135/// visitBinaryOperator - Check that both arguments to the binary operator are
4136/// of the same type!
4137///
4138void Verifier::visitBinaryOperator(BinaryOperator &B) {
4139 Check(B.getOperand(0)->getType() == B.getOperand(1)->getType(),
4140 "Both operands to a binary operator are not of the same type!", &B);
4141
4142 switch (B.getOpcode()) {
4143 // Check that integer arithmetic operators are only used with
4144 // integral operands.
4145 case Instruction::Add:
4146 case Instruction::Sub:
4147 case Instruction::Mul:
4148 case Instruction::SDiv:
4149 case Instruction::UDiv:
4150 case Instruction::SRem:
4151 case Instruction::URem:
4152 Check(B.getType()->isIntOrIntVectorTy(),
4153 "Integer arithmetic operators only work with integral types!", &B);
4154 Check(B.getType() == B.getOperand(0)->getType(),
4155 "Integer arithmetic operators must have same type "
4156 "for operands and result!",
4157 &B);
4158 break;
4159 // Check that floating-point arithmetic operators are only used with
4160 // floating-point operands.
4161 case Instruction::FAdd:
4162 case Instruction::FSub:
4163 case Instruction::FMul:
4164 case Instruction::FDiv:
4165 case Instruction::FRem:
4166 Check(B.getType()->isFPOrFPVectorTy(),
4167 "Floating-point arithmetic operators only work with "
4168 "floating-point types!",
4169 &B);
4170 Check(B.getType() == B.getOperand(0)->getType(),
4171 "Floating-point arithmetic operators must have same type "
4172 "for operands and result!",
4173 &B);
4174 break;
4175 // Check that logical operators are only used with integral operands.
4176 case Instruction::And:
4177 case Instruction::Or:
4178 case Instruction::Xor:
4179 Check(B.getType()->isIntOrIntVectorTy(),
4180 "Logical operators only work with integral types!", &B);
4181 Check(B.getType() == B.getOperand(0)->getType(),
4182 "Logical operators must have same type for operands and result!", &B);
4183 break;
4184 case Instruction::Shl:
4185 case Instruction::LShr:
4186 case Instruction::AShr:
4187 Check(B.getType()->isIntOrIntVectorTy(),
4188 "Shifts only work with integral types!", &B);
4189 Check(B.getType() == B.getOperand(0)->getType(),
4190 "Shift return type must be same as operands!", &B);
4191 break;
4192 default:
4193 llvm_unreachable("Unknown BinaryOperator opcode!");
4194 }
4195
4197}
4198
4199void Verifier::visitICmpInst(ICmpInst &IC) {
4200 // Check that the operands are the same type
4201 Type *Op0Ty = IC.getOperand(0)->getType();
4202 Type *Op1Ty = IC.getOperand(1)->getType();
4203 Check(Op0Ty == Op1Ty,
4204 "Both operands to ICmp instruction are not of the same type!", &IC);
4205 // Check that the operands are the right type
4206 Check(Op0Ty->isIntOrIntVectorTy() || Op0Ty->isPtrOrPtrVectorTy(),
4207 "Invalid operand types for ICmp instruction", &IC);
4208 // Check that the predicate is valid.
4209 Check(IC.isIntPredicate(), "Invalid predicate in ICmp instruction!", &IC);
4210
4211 visitInstruction(IC);
4212}
4213
4214void Verifier::visitFCmpInst(FCmpInst &FC) {
4215 // Check that the operands are the same type
4216 Type *Op0Ty = FC.getOperand(0)->getType();
4217 Type *Op1Ty = FC.getOperand(1)->getType();
4218 Check(Op0Ty == Op1Ty,
4219 "Both operands to FCmp instruction are not of the same type!", &FC);
4220 // Check that the operands are the right type
4221 Check(Op0Ty->isFPOrFPVectorTy(), "Invalid operand types for FCmp instruction",
4222 &FC);
4223 // Check that the predicate is valid.
4224 Check(FC.isFPPredicate(), "Invalid predicate in FCmp instruction!", &FC);
4225
4226 visitInstruction(FC);
4227}
4228
4229void Verifier::visitExtractElementInst(ExtractElementInst &EI) {
4231 "Invalid extractelement operands!", &EI);
4232 visitInstruction(EI);
4233}
4234
4235void Verifier::visitInsertElementInst(InsertElementInst &IE) {
4236 Check(InsertElementInst::isValidOperands(IE.getOperand(0), IE.getOperand(1),
4237 IE.getOperand(2)),
4238 "Invalid insertelement operands!", &IE);
4239 visitInstruction(IE);
4240}
4241
4242void Verifier::visitShuffleVectorInst(ShuffleVectorInst &SV) {
4244 SV.getShuffleMask()),
4245 "Invalid shufflevector operands!", &SV);
4246 visitInstruction(SV);
4247}
4248
4249void Verifier::visitGetElementPtrInst(GetElementPtrInst &GEP) {
4250 Type *TargetTy = GEP.getPointerOperandType()->getScalarType();
4251
4252 Check(isa<PointerType>(TargetTy),
4253 "GEP base pointer is not a vector or a vector of pointers", &GEP);
4254 Check(GEP.getSourceElementType()->isSized(), "GEP into unsized type!", &GEP);
4255
4256 if (auto *STy = dyn_cast<StructType>(GEP.getSourceElementType())) {
4257 Check(!STy->isScalableTy(),
4258 "getelementptr cannot target structure that contains scalable vector"
4259 "type",
4260 &GEP);
4261 }
4262
4263 SmallVector<Value *, 16> Idxs(GEP.indices());
4264 Check(
4265 all_of(Idxs, [](Value *V) { return V->getType()->isIntOrIntVectorTy(); }),
4266 "GEP indexes must be integers", &GEP);
4267 Type *ElTy =
4268 GetElementPtrInst::getIndexedType(GEP.getSourceElementType(), Idxs);
4269 Check(ElTy, "Invalid indices for GEP pointer type!", &GEP);
4270
4271 PointerType *PtrTy = dyn_cast<PointerType>(GEP.getType()->getScalarType());
4272
4273 Check(PtrTy && GEP.getResultElementType() == ElTy,
4274 "GEP is not of right type for indices!", &GEP, ElTy);
4275
4276 if (auto *GEPVTy = dyn_cast<VectorType>(GEP.getType())) {
4277 // Additional checks for vector GEPs.
4278 ElementCount GEPWidth = GEPVTy->getElementCount();
4279 if (GEP.getPointerOperandType()->isVectorTy())
4280 Check(
4281 GEPWidth ==
4282 cast<VectorType>(GEP.getPointerOperandType())->getElementCount(),
4283 "Vector GEP result width doesn't match operand's", &GEP);
4284 for (Value *Idx : Idxs) {
4285 Type *IndexTy = Idx->getType();
4286 if (auto *IndexVTy = dyn_cast<VectorType>(IndexTy)) {
4287 ElementCount IndexWidth = IndexVTy->getElementCount();
4288 Check(IndexWidth == GEPWidth, "Invalid GEP index vector width", &GEP);
4289 }
4290 Check(IndexTy->isIntOrIntVectorTy(),
4291 "All GEP indices should be of integer type");
4292 }
4293 }
4294
4295 Check(GEP.getAddressSpace() == PtrTy->getAddressSpace(),
4296 "GEP address space doesn't match type", &GEP);
4297
4299}
4300
4301static bool isContiguous(const ConstantRange &A, const ConstantRange &B) {
4302 return A.getUpper() == B.getLower() || A.getLower() == B.getUpper();
4303}
4304
4305/// Verify !range and !absolute_symbol metadata. These have the same
4306/// restrictions, except !absolute_symbol allows the full set.
4307void Verifier::verifyRangeLikeMetadata(const Value &I, const MDNode *Range,
4308 Type *Ty, RangeLikeMetadataKind Kind) {
4309 unsigned NumOperands = Range->getNumOperands();
4310 Check(NumOperands % 2 == 0, "Unfinished range!", Range);
4311 unsigned NumRanges = NumOperands / 2;
4312 Check(NumRanges >= 1, "It should have at least one range!", Range);
4313
4314 ConstantRange LastRange(1, true); // Dummy initial value
4315 for (unsigned i = 0; i < NumRanges; ++i) {
4316 ConstantInt *Low =
4317 mdconst::dyn_extract<ConstantInt>(Range->getOperand(2 * i));
4318 Check(Low, "The lower limit must be an integer!", Low);
4319 ConstantInt *High =
4320 mdconst::dyn_extract<ConstantInt>(Range->getOperand(2 * i + 1));
4321 Check(High, "The upper limit must be an integer!", High);
4322
4323 Check(High->getType() == Low->getType(), "Range pair types must match!",
4324 &I);
4325
4326 if (Kind == RangeLikeMetadataKind::NoaliasAddrspace) {
4327 Check(High->getType()->isIntegerTy(32),
4328 "noalias.addrspace type must be i32!", &I);
4329 } else {
4330 Check(High->getType() == Ty->getScalarType(),
4331 "Range types must match instruction type!", &I);
4332 }
4333
4334 APInt HighV = High->getValue();
4335 APInt LowV = Low->getValue();
4336
4337 // ConstantRange asserts if the ranges are the same except for the min/max
4338 // value. Leave the cases it tolerates for the empty range error below.
4339 Check(LowV != HighV || LowV.isMaxValue() || LowV.isMinValue(),
4340 "The upper and lower limits cannot be the same value", &I);
4341
4342 ConstantRange CurRange(LowV, HighV);
4343 Check(!CurRange.isEmptySet() &&
4344 (Kind == RangeLikeMetadataKind::AbsoluteSymbol ||
4345 !CurRange.isFullSet()),
4346 "Range must not be empty!", Range);
4347 if (i != 0) {
4348 Check(CurRange.intersectWith(LastRange).isEmptySet(),
4349 "Intervals are overlapping", Range);
4350 Check(LowV.sgt(LastRange.getLower()), "Intervals are not in order",
4351 Range);
4352 Check(!isContiguous(CurRange, LastRange), "Intervals are contiguous",
4353 Range);
4354 }
4355 LastRange = ConstantRange(LowV, HighV);
4356 }
4357 if (NumRanges > 2) {
4358 APInt FirstLow =
4359 mdconst::dyn_extract<ConstantInt>(Range->getOperand(0))->getValue();
4360 APInt FirstHigh =
4361 mdconst::dyn_extract<ConstantInt>(Range->getOperand(1))->getValue();
4362 ConstantRange FirstRange(FirstLow, FirstHigh);
4363 Check(FirstRange.intersectWith(LastRange).isEmptySet(),
4364 "Intervals are overlapping", Range);
4365 Check(!isContiguous(FirstRange, LastRange), "Intervals are contiguous",
4366 Range);
4367 }
4368}
4369
4370void Verifier::visitRangeMetadata(Instruction &I, MDNode *Range, Type *Ty) {
4371 assert(Range && Range == I.getMetadata(LLVMContext::MD_range) &&
4372 "precondition violation");
4373 verifyRangeLikeMetadata(I, Range, Ty, RangeLikeMetadataKind::Range);
4374}
4375
4376void Verifier::visitNoaliasAddrspaceMetadata(Instruction &I, MDNode *Range,
4377 Type *Ty) {
4378 assert(Range && Range == I.getMetadata(LLVMContext::MD_noalias_addrspace) &&
4379 "precondition violation");
4380 verifyRangeLikeMetadata(I, Range, Ty,
4381 RangeLikeMetadataKind::NoaliasAddrspace);
4382}
4383
4384void Verifier::checkAtomicMemAccessSize(Type *Ty, const Instruction *I) {
4385 unsigned Size = DL.getTypeSizeInBits(Ty);
4386 Check(Size >= 8, "atomic memory access' size must be byte-sized", Ty, I);
4387 Check(!(Size & (Size - 1)),
4388 "atomic memory access' operand must have a power-of-two size", Ty, I);
4389}
4390
4391void Verifier::visitLoadInst(LoadInst &LI) {
4392 PointerType *PTy = dyn_cast<PointerType>(LI.getOperand(0)->getType());
4393 Check(PTy, "Load operand must be a pointer.", &LI);
4394 Type *ElTy = LI.getType();
4395 if (MaybeAlign A = LI.getAlign()) {
4396 Check(A->value() <= Value::MaximumAlignment,
4397 "huge alignment values are unsupported", &LI);
4398 }
4399 Check(ElTy->isSized(), "loading unsized types is not allowed", &LI);
4400 if (LI.isAtomic()) {
4403 "Load cannot have Release ordering", &LI);
4404 Check(ElTy->isIntOrPtrTy() || ElTy->isFloatingPointTy(),
4405 "atomic load operand must have integer, pointer, or floating point "
4406 "type!",
4407 ElTy, &LI);
4408 checkAtomicMemAccessSize(ElTy, &LI);
4409 } else {
4411 "Non-atomic load cannot have SynchronizationScope specified", &LI);
4412 }
4413
4414 visitInstruction(LI);
4415}
4416
4417void Verifier::visitStoreInst(StoreInst &SI) {
4418 PointerType *PTy = dyn_cast<PointerType>(SI.getOperand(1)->getType());
4419 Check(PTy, "Store operand must be a pointer.", &SI);
4420 Type *ElTy = SI.getOperand(0)->getType();
4421 if (MaybeAlign A = SI.getAlign()) {
4422 Check(A->value() <= Value::MaximumAlignment,
4423 "huge alignment values are unsupported", &SI);
4424 }
4425 Check(ElTy->isSized(), "storing unsized types is not allowed", &SI);
4426 if (SI.isAtomic()) {
4427 Check(SI.getOrdering() != AtomicOrdering::Acquire &&
4428 SI.getOrdering() != AtomicOrdering::AcquireRelease,
4429 "Store cannot have Acquire ordering", &SI);
4430 Check(ElTy->isIntOrPtrTy() || ElTy->isFloatingPointTy(),
4431 "atomic store operand must have integer, pointer, or floating point "
4432 "type!",
4433 ElTy, &SI);
4434 checkAtomicMemAccessSize(ElTy, &SI);
4435 } else {
4436 Check(SI.getSyncScopeID() == SyncScope::System,
4437 "Non-atomic store cannot have SynchronizationScope specified", &SI);
4438 }
4439 visitInstruction(SI);
4440}
4441
4442/// Check that SwiftErrorVal is used as a swifterror argument in CS.
4443void Verifier::verifySwiftErrorCall(CallBase &Call,
4444 const Value *SwiftErrorVal) {
4445 for (const auto &I : llvm::enumerate(Call.args())) {
4446 if (I.value() == SwiftErrorVal) {
4447 Check(Call.paramHasAttr(I.index(), Attribute::SwiftError),
4448 "swifterror value when used in a callsite should be marked "
4449 "with swifterror attribute",
4450 SwiftErrorVal, Call);
4451 }
4452 }
4453}
4454
4455void Verifier::verifySwiftErrorValue(const Value *SwiftErrorVal) {
4456 // Check that swifterror value is only used by loads, stores, or as
4457 // a swifterror argument.
4458 for (const User *U : SwiftErrorVal->users()) {
4459 Check(isa<LoadInst>(U) || isa<StoreInst>(U) || isa<CallInst>(U) ||
4460 isa<InvokeInst>(U),
4461 "swifterror value can only be loaded and stored from, or "
4462 "as a swifterror argument!",
4463 SwiftErrorVal, U);
4464 // If it is used by a store, check it is the second operand.
4465 if (auto StoreI = dyn_cast<StoreInst>(U))
4466 Check(StoreI->getOperand(1) == SwiftErrorVal,
4467 "swifterror value should be the second operand when used "
4468 "by stores",
4469 SwiftErrorVal, U);
4470 if (auto *Call = dyn_cast<CallBase>(U))
4471 verifySwiftErrorCall(*const_cast<CallBase *>(Call), SwiftErrorVal);
4472 }
4473}
4474
4475void Verifier::visitAllocaInst(AllocaInst &AI) {
4476 Type *Ty = AI.getAllocatedType();
4477 SmallPtrSet<Type*, 4> Visited;
4478 Check(Ty->isSized(&Visited), "Cannot allocate unsized type", &AI);
4479 // Check if it's a target extension type that disallows being used on the
4480 // stack.
4482 "Alloca has illegal target extension type", &AI);
4484 "Alloca array size must have integer type", &AI);
4485 if (MaybeAlign A = AI.getAlign()) {
4486 Check(A->value() <= Value::MaximumAlignment,
4487 "huge alignment values are unsupported", &AI);
4488 }
4489
4490 if (AI.isSwiftError()) {
4491 Check(Ty->isPointerTy(), "swifterror alloca must have pointer type", &AI);
4493 "swifterror alloca must not be array allocation", &AI);
4494 verifySwiftErrorValue(&AI);
4495 }
4496
4497 if (TT.isAMDGPU()) {
4499 "alloca on amdgpu must be in addrspace(5)", &AI);
4500 }
4501
4502 visitInstruction(AI);
4503}
4504
4505void Verifier::visitAtomicCmpXchgInst(AtomicCmpXchgInst &CXI) {
4506 Type *ElTy = CXI.getOperand(1)->getType();
4507 Check(ElTy->isIntOrPtrTy(),
4508 "cmpxchg operand must have integer or pointer type", ElTy, &CXI);
4509 checkAtomicMemAccessSize(ElTy, &CXI);
4510 visitInstruction(CXI);
4511}
4512
4513void Verifier::visitAtomicRMWInst(AtomicRMWInst &RMWI) {
4515 "atomicrmw instructions cannot be unordered.", &RMWI);
4516 auto Op = RMWI.getOperation();
4517 Type *ElTy = RMWI.getOperand(1)->getType();
4518 if (Op == AtomicRMWInst::Xchg) {
4519 Check(ElTy->isIntegerTy() || ElTy->isFloatingPointTy() ||
4520 ElTy->isPointerTy(),
4521 "atomicrmw " + AtomicRMWInst::getOperationName(Op) +
4522 " operand must have integer or floating point type!",
4523 &RMWI, ElTy);
4524 } else if (AtomicRMWInst::isFPOperation(Op)) {
4525 Check(ElTy->isFPOrFPVectorTy() && !isa<ScalableVectorType>(ElTy),
4526 "atomicrmw " + AtomicRMWInst::getOperationName(Op) +
4527 " operand must have floating-point or fixed vector of floating-point "
4528 "type!",
4529 &RMWI, ElTy);
4530 } else {
4531 Check(ElTy->isIntegerTy(),
4532 "atomicrmw " + AtomicRMWInst::getOperationName(Op) +
4533 " operand must have integer type!",
4534 &RMWI, ElTy);
4535 }
4536 checkAtomicMemAccessSize(ElTy, &RMWI);
4538 "Invalid binary operation!", &RMWI);
4539 visitInstruction(RMWI);
4540}
4541
4542void Verifier::visitFenceInst(FenceInst &FI) {
4543 const AtomicOrdering Ordering = FI.getOrdering();
4544 Check(Ordering == AtomicOrdering::Acquire ||
4545 Ordering == AtomicOrdering::Release ||
4546 Ordering == AtomicOrdering::AcquireRelease ||
4548 "fence instructions may only have acquire, release, acq_rel, or "
4549 "seq_cst ordering.",
4550 &FI);
4551 visitInstruction(FI);
4552}
4553
4554void Verifier::visitExtractValueInst(ExtractValueInst &EVI) {
4556 EVI.getIndices()) == EVI.getType(),
4557 "Invalid ExtractValueInst operands!", &EVI);
4558
4559 visitInstruction(EVI);
4560}
4561
4562void Verifier::visitInsertValueInst(InsertValueInst &IVI) {
4564 IVI.getIndices()) ==
4565 IVI.getOperand(1)->getType(),
4566 "Invalid InsertValueInst operands!", &IVI);
4567
4568 visitInstruction(IVI);
4569}
4570
4571static Value *getParentPad(Value *EHPad) {
4572 if (auto *FPI = dyn_cast<FuncletPadInst>(EHPad))
4573 return FPI->getParentPad();
4574
4575 return cast<CatchSwitchInst>(EHPad)->getParentPad();
4576}
4577
4578void Verifier::visitEHPadPredecessors(Instruction &I) {
4579 assert(I.isEHPad());
4580
4581 BasicBlock *BB = I.getParent();
4582 Function *F = BB->getParent();
4583
4584 Check(BB != &F->getEntryBlock(), "EH pad cannot be in entry block.", &I);
4585
4586 if (auto *LPI = dyn_cast<LandingPadInst>(&I)) {
4587 // The landingpad instruction defines its parent as a landing pad block. The
4588 // landing pad block may be branched to only by the unwind edge of an
4589 // invoke.
4590 for (BasicBlock *PredBB : predecessors(BB)) {
4591 const auto *II = dyn_cast<InvokeInst>(PredBB->getTerminator());
4592 Check(II && II->getUnwindDest() == BB && II->getNormalDest() != BB,
4593 "Block containing LandingPadInst must be jumped to "
4594 "only by the unwind edge of an invoke.",
4595 LPI);
4596 }
4597 return;
4598 }
4599 if (auto *CPI = dyn_cast<CatchPadInst>(&I)) {
4600 if (!pred_empty(BB))
4601 Check(BB->getUniquePredecessor() == CPI->getCatchSwitch()->getParent(),
4602 "Block containg CatchPadInst must be jumped to "
4603 "only by its catchswitch.",
4604 CPI);
4605 Check(BB != CPI->getCatchSwitch()->getUnwindDest(),
4606 "Catchswitch cannot unwind to one of its catchpads",
4607 CPI->getCatchSwitch(), CPI);
4608 return;
4609 }
4610
4611 // Verify that each pred has a legal terminator with a legal to/from EH
4612 // pad relationship.
4613 Instruction *ToPad = &I;
4614 Value *ToPadParent = getParentPad(ToPad);
4615 for (BasicBlock *PredBB : predecessors(BB)) {
4616 Instruction *TI = PredBB->getTerminator();
4617 Value *FromPad;
4618 if (auto *II = dyn_cast<InvokeInst>(TI)) {
4619 Check(II->getUnwindDest() == BB && II->getNormalDest() != BB,
4620 "EH pad must be jumped to via an unwind edge", ToPad, II);
4621 auto *CalledFn =
4622 dyn_cast<Function>(II->getCalledOperand()->stripPointerCasts());
4623 if (CalledFn && CalledFn->isIntrinsic() && II->doesNotThrow() &&
4624 !IntrinsicInst::mayLowerToFunctionCall(CalledFn->getIntrinsicID()))
4625 continue;
4626 if (auto Bundle = II->getOperandBundle(LLVMContext::OB_funclet))
4627 FromPad = Bundle->Inputs[0];
4628 else
4629 FromPad = ConstantTokenNone::get(II->getContext());
4630 } else if (auto *CRI = dyn_cast<CleanupReturnInst>(TI)) {
4631 FromPad = CRI->getOperand(0);
4632 Check(FromPad != ToPadParent, "A cleanupret must exit its cleanup", CRI);
4633 } else if (auto *CSI = dyn_cast<CatchSwitchInst>(TI)) {
4634 FromPad = CSI;
4635 } else {
4636 Check(false, "EH pad must be jumped to via an unwind edge", ToPad, TI);
4637 }
4638
4639 // The edge may exit from zero or more nested pads.
4641 for (;; FromPad = getParentPad(FromPad)) {
4642 Check(FromPad != ToPad,
4643 "EH pad cannot handle exceptions raised within it", FromPad, TI);
4644 if (FromPad == ToPadParent) {
4645 // This is a legal unwind edge.
4646 break;
4647 }
4648 Check(!isa<ConstantTokenNone>(FromPad),
4649 "A single unwind edge may only enter one EH pad", TI);
4650 Check(Seen.insert(FromPad).second, "EH pad jumps through a cycle of pads",
4651 FromPad);
4652
4653 // This will be diagnosed on the corresponding instruction already. We
4654 // need the extra check here to make sure getParentPad() works.
4655 Check(isa<FuncletPadInst>(FromPad) || isa<CatchSwitchInst>(FromPad),
4656 "Parent pad must be catchpad/cleanuppad/catchswitch", TI);
4657 }
4658 }
4659}
4660
4661void Verifier::visitLandingPadInst(LandingPadInst &LPI) {
4662 // The landingpad instruction is ill-formed if it doesn't have any clauses and
4663 // isn't a cleanup.
4664 Check(LPI.getNumClauses() > 0 || LPI.isCleanup(),
4665 "LandingPadInst needs at least one clause or to be a cleanup.", &LPI);
4666
4667 visitEHPadPredecessors(LPI);
4668
4669 if (!LandingPadResultTy)
4670 LandingPadResultTy = LPI.getType();
4671 else
4672 Check(LandingPadResultTy == LPI.getType(),
4673 "The landingpad instruction should have a consistent result type "
4674 "inside a function.",
4675 &LPI);
4676
4677 Function *F = LPI.getParent()->getParent();
4678 Check(F->hasPersonalityFn(),
4679 "LandingPadInst needs to be in a function with a personality.", &LPI);
4680
4681 // The landingpad instruction must be the first non-PHI instruction in the
4682 // block.
4683 Check(LPI.getParent()->getLandingPadInst() == &LPI,
4684 "LandingPadInst not the first non-PHI instruction in the block.", &LPI);
4685
4686 for (unsigned i = 0, e = LPI.getNumClauses(); i < e; ++i) {
4687 Constant *Clause = LPI.getClause(i);
4688 if (LPI.isCatch(i)) {
4689 Check(isa<PointerType>(Clause->getType()),
4690 "Catch operand does not have pointer type!", &LPI);
4691 } else {
4692 Check(LPI.isFilter(i), "Clause is neither catch nor filter!", &LPI);
4693 Check(isa<ConstantArray>(Clause) || isa<ConstantAggregateZero>(Clause),
4694 "Filter operand is not an array of constants!", &LPI);
4695 }
4696 }
4697
4698 visitInstruction(LPI);
4699}
4700
4701void Verifier::visitResumeInst(ResumeInst &RI) {
4703 "ResumeInst needs to be in a function with a personality.", &RI);
4704
4705 if (!LandingPadResultTy)
4706 LandingPadResultTy = RI.getValue()->getType();
4707 else
4708 Check(LandingPadResultTy == RI.getValue()->getType(),
4709 "The resume instruction should have a consistent result type "
4710 "inside a function.",
4711 &RI);
4712
4713 visitTerminator(RI);
4714}
4715
4716void Verifier::visitCatchPadInst(CatchPadInst &CPI) {
4717 BasicBlock *BB = CPI.getParent();
4718
4719 Function *F = BB->getParent();
4720 Check(F->hasPersonalityFn(),
4721 "CatchPadInst needs to be in a function with a personality.", &CPI);
4722
4723 Check(isa<CatchSwitchInst>(CPI.getParentPad()),
4724 "CatchPadInst needs to be directly nested in a CatchSwitchInst.",
4725 CPI.getParentPad());
4726
4727 // The catchpad instruction must be the first non-PHI instruction in the
4728 // block.
4729 Check(&*BB->getFirstNonPHIIt() == &CPI,
4730 "CatchPadInst not the first non-PHI instruction in the block.", &CPI);
4731
4732 visitEHPadPredecessors(CPI);
4734}
4735
4736void Verifier::visitCatchReturnInst(CatchReturnInst &CatchReturn) {
4737 Check(isa<CatchPadInst>(CatchReturn.getOperand(0)),
4738 "CatchReturnInst needs to be provided a CatchPad", &CatchReturn,
4739 CatchReturn.getOperand(0));
4740
4741 visitTerminator(CatchReturn);
4742}
4743
4744void Verifier::visitCleanupPadInst(CleanupPadInst &CPI) {
4745 BasicBlock *BB = CPI.getParent();
4746
4747 Function *F = BB->getParent();
4748 Check(F->hasPersonalityFn(),
4749 "CleanupPadInst needs to be in a function with a personality.", &CPI);
4750
4751 // The cleanuppad instruction must be the first non-PHI instruction in the
4752 // block.
4753 Check(&*BB->getFirstNonPHIIt() == &CPI,
4754 "CleanupPadInst not the first non-PHI instruction in the block.", &CPI);
4755
4756 auto *ParentPad = CPI.getParentPad();
4757 Check(isa<ConstantTokenNone>(ParentPad) || isa<FuncletPadInst>(ParentPad),
4758 "CleanupPadInst has an invalid parent.", &CPI);
4759
4760 visitEHPadPredecessors(CPI);
4762}
4763
4764void Verifier::visitFuncletPadInst(FuncletPadInst &FPI) {
4765 User *FirstUser = nullptr;
4766 Value *FirstUnwindPad = nullptr;
4767 SmallVector<FuncletPadInst *, 8> Worklist({&FPI});
4769
4770 while (!Worklist.empty()) {
4771 FuncletPadInst *CurrentPad = Worklist.pop_back_val();
4772 Check(Seen.insert(CurrentPad).second,
4773 "FuncletPadInst must not be nested within itself", CurrentPad);
4774 Value *UnresolvedAncestorPad = nullptr;
4775 for (User *U : CurrentPad->users()) {
4776 BasicBlock *UnwindDest;
4777 if (auto *CRI = dyn_cast<CleanupReturnInst>(U)) {
4778 UnwindDest = CRI->getUnwindDest();
4779 } else if (auto *CSI = dyn_cast<CatchSwitchInst>(U)) {
4780 // We allow catchswitch unwind to caller to nest
4781 // within an outer pad that unwinds somewhere else,
4782 // because catchswitch doesn't have a nounwind variant.
4783 // See e.g. SimplifyCFGOpt::SimplifyUnreachable.
4784 if (CSI->unwindsToCaller())
4785 continue;
4786 UnwindDest = CSI->getUnwindDest();
4787 } else if (auto *II = dyn_cast<InvokeInst>(U)) {
4788 UnwindDest = II->getUnwindDest();
4789 } else if (isa<CallInst>(U)) {
4790 // Calls which don't unwind may be found inside funclet
4791 // pads that unwind somewhere else. We don't *require*
4792 // such calls to be annotated nounwind.
4793 continue;
4794 } else if (auto *CPI = dyn_cast<CleanupPadInst>(U)) {
4795 // The unwind dest for a cleanup can only be found by
4796 // recursive search. Add it to the worklist, and we'll
4797 // search for its first use that determines where it unwinds.
4798 Worklist.push_back(CPI);
4799 continue;
4800 } else {
4801 Check(isa<CatchReturnInst>(U), "Bogus funclet pad use", U);
4802 continue;
4803 }
4804
4805 Value *UnwindPad;
4806 bool ExitsFPI;
4807 if (UnwindDest) {
4808 UnwindPad = &*UnwindDest->getFirstNonPHIIt();
4809 if (!cast<Instruction>(UnwindPad)->isEHPad())
4810 continue;
4811 Value *UnwindParent = getParentPad(UnwindPad);
4812 // Ignore unwind edges that don't exit CurrentPad.
4813 if (UnwindParent == CurrentPad)
4814 continue;
4815 // Determine whether the original funclet pad is exited,
4816 // and if we are scanning nested pads determine how many
4817 // of them are exited so we can stop searching their
4818 // children.
4819 Value *ExitedPad = CurrentPad;
4820 ExitsFPI = false;
4821 do {
4822 if (ExitedPad == &FPI) {
4823 ExitsFPI = true;
4824 // Now we can resolve any ancestors of CurrentPad up to
4825 // FPI, but not including FPI since we need to make sure
4826 // to check all direct users of FPI for consistency.
4827 UnresolvedAncestorPad = &FPI;
4828 break;
4829 }
4830 Value *ExitedParent = getParentPad(ExitedPad);
4831 if (ExitedParent == UnwindParent) {
4832 // ExitedPad is the ancestor-most pad which this unwind
4833 // edge exits, so we can resolve up to it, meaning that
4834 // ExitedParent is the first ancestor still unresolved.
4835 UnresolvedAncestorPad = ExitedParent;
4836 break;
4837 }
4838 ExitedPad = ExitedParent;
4839 } while (!isa<ConstantTokenNone>(ExitedPad));
4840 } else {
4841 // Unwinding to caller exits all pads.
4842 UnwindPad = ConstantTokenNone::get(FPI.getContext());
4843 ExitsFPI = true;
4844 UnresolvedAncestorPad = &FPI;
4845 }
4846
4847 if (ExitsFPI) {
4848 // This unwind edge exits FPI. Make sure it agrees with other
4849 // such edges.
4850 if (FirstUser) {
4851 Check(UnwindPad == FirstUnwindPad,
4852 "Unwind edges out of a funclet "
4853 "pad must have the same unwind "
4854 "dest",
4855 &FPI, U, FirstUser);
4856 } else {
4857 FirstUser = U;
4858 FirstUnwindPad = UnwindPad;
4859 // Record cleanup sibling unwinds for verifySiblingFuncletUnwinds
4860 if (isa<CleanupPadInst>(&FPI) && !isa<ConstantTokenNone>(UnwindPad) &&
4861 getParentPad(UnwindPad) == getParentPad(&FPI))
4862 SiblingFuncletInfo[&FPI] = cast<Instruction>(U);
4863 }
4864 }
4865 // Make sure we visit all uses of FPI, but for nested pads stop as
4866 // soon as we know where they unwind to.
4867 if (CurrentPad != &FPI)
4868 break;
4869 }
4870 if (UnresolvedAncestorPad) {
4871 if (CurrentPad == UnresolvedAncestorPad) {
4872 // When CurrentPad is FPI itself, we don't mark it as resolved even if
4873 // we've found an unwind edge that exits it, because we need to verify
4874 // all direct uses of FPI.
4875 assert(CurrentPad == &FPI);
4876 continue;
4877 }
4878 // Pop off the worklist any nested pads that we've found an unwind
4879 // destination for. The pads on the worklist are the uncles,
4880 // great-uncles, etc. of CurrentPad. We've found an unwind destination
4881 // for all ancestors of CurrentPad up to but not including
4882 // UnresolvedAncestorPad.
4883 Value *ResolvedPad = CurrentPad;
4884 while (!Worklist.empty()) {
4885 Value *UnclePad = Worklist.back();
4886 Value *AncestorPad = getParentPad(UnclePad);
4887 // Walk ResolvedPad up the ancestor list until we either find the
4888 // uncle's parent or the last resolved ancestor.
4889 while (ResolvedPad != AncestorPad) {
4890 Value *ResolvedParent = getParentPad(ResolvedPad);
4891 if (ResolvedParent == UnresolvedAncestorPad) {
4892 break;
4893 }
4894 ResolvedPad = ResolvedParent;
4895 }
4896 // If the resolved ancestor search didn't find the uncle's parent,
4897 // then the uncle is not yet resolved.
4898 if (ResolvedPad != AncestorPad)
4899 break;
4900 // This uncle is resolved, so pop it from the worklist.
4901 Worklist.pop_back();
4902 }
4903 }
4904 }
4905
4906 if (FirstUnwindPad) {
4907 if (auto *CatchSwitch = dyn_cast<CatchSwitchInst>(FPI.getParentPad())) {
4908 BasicBlock *SwitchUnwindDest = CatchSwitch->getUnwindDest();
4909 Value *SwitchUnwindPad;
4910 if (SwitchUnwindDest)
4911 SwitchUnwindPad = &*SwitchUnwindDest->getFirstNonPHIIt();
4912 else
4913 SwitchUnwindPad = ConstantTokenNone::get(FPI.getContext());
4914 Check(SwitchUnwindPad == FirstUnwindPad,
4915 "Unwind edges out of a catch must have the same unwind dest as "
4916 "the parent catchswitch",
4917 &FPI, FirstUser, CatchSwitch);
4918 }
4919 }
4920
4921 visitInstruction(FPI);
4922}
4923
4924void Verifier::visitCatchSwitchInst(CatchSwitchInst &CatchSwitch) {
4925 BasicBlock *BB = CatchSwitch.getParent();
4926
4927 Function *F = BB->getParent();
4928 Check(F->hasPersonalityFn(),
4929 "CatchSwitchInst needs to be in a function with a personality.",
4930 &CatchSwitch);
4931
4932 // The catchswitch instruction must be the first non-PHI instruction in the
4933 // block.
4934 Check(&*BB->getFirstNonPHIIt() == &CatchSwitch,
4935 "CatchSwitchInst not the first non-PHI instruction in the block.",
4936 &CatchSwitch);
4937
4938 auto *ParentPad = CatchSwitch.getParentPad();
4939 Check(isa<ConstantTokenNone>(ParentPad) || isa<FuncletPadInst>(ParentPad),
4940 "CatchSwitchInst has an invalid parent.", ParentPad);
4941
4942 if (BasicBlock *UnwindDest = CatchSwitch.getUnwindDest()) {
4943 BasicBlock::iterator I = UnwindDest->getFirstNonPHIIt();
4944 Check(I->isEHPad() && !isa<LandingPadInst>(I),
4945 "CatchSwitchInst must unwind to an EH block which is not a "
4946 "landingpad.",
4947 &CatchSwitch);
4948
4949 // Record catchswitch sibling unwinds for verifySiblingFuncletUnwinds
4950 if (getParentPad(&*I) == ParentPad)
4951 SiblingFuncletInfo[&CatchSwitch] = &CatchSwitch;
4952 }
4953
4954 Check(CatchSwitch.getNumHandlers() != 0,
4955 "CatchSwitchInst cannot have empty handler list", &CatchSwitch);
4956
4957 for (BasicBlock *Handler : CatchSwitch.handlers()) {
4958 Check(isa<CatchPadInst>(Handler->getFirstNonPHIIt()),
4959 "CatchSwitchInst handlers must be catchpads", &CatchSwitch, Handler);
4960 }
4961
4962 visitEHPadPredecessors(CatchSwitch);
4963 visitTerminator(CatchSwitch);
4964}
4965
4966void Verifier::visitCleanupReturnInst(CleanupReturnInst &CRI) {
4967 Check(isa<CleanupPadInst>(CRI.getOperand(0)),
4968 "CleanupReturnInst needs to be provided a CleanupPad", &CRI,
4969 CRI.getOperand(0));
4970
4971 if (BasicBlock *UnwindDest = CRI.getUnwindDest()) {
4972 BasicBlock::iterator I = UnwindDest->getFirstNonPHIIt();
4973 Check(I->isEHPad() && !isa<LandingPadInst>(I),
4974 "CleanupReturnInst must unwind to an EH block which is not a "
4975 "landingpad.",
4976 &CRI);
4977 }
4978
4979 visitTerminator(CRI);
4980}
4981
4982void Verifier::verifyDominatesUse(Instruction &I, unsigned i) {
4983 Instruction *Op = cast<Instruction>(I.getOperand(i));
4984 // If the we have an invalid invoke, don't try to compute the dominance.
4985 // We already reject it in the invoke specific checks and the dominance
4986 // computation doesn't handle multiple edges.
4987 if (InvokeInst *II = dyn_cast<InvokeInst>(Op)) {
4988 if (II->getNormalDest() == II->getUnwindDest())
4989 return;
4990 }
4991
4992 // Quick check whether the def has already been encountered in the same block.
4993 // PHI nodes are not checked to prevent accepting preceding PHIs, because PHI
4994 // uses are defined to happen on the incoming edge, not at the instruction.
4995 //
4996 // FIXME: If this operand is a MetadataAsValue (wrapping a LocalAsMetadata)
4997 // wrapping an SSA value, assert that we've already encountered it. See
4998 // related FIXME in Mapper::mapLocalAsMetadata in ValueMapper.cpp.
4999 if (!isa<PHINode>(I) && InstsInThisBlock.count(Op))
5000 return;
5001
5002 const Use &U = I.getOperandUse(i);
5003 Check(DT.dominates(Op, U), "Instruction does not dominate all uses!", Op, &I);
5004}
5005
5006void Verifier::visitDereferenceableMetadata(Instruction& I, MDNode* MD) {
5007 Check(I.getType()->isPointerTy(),
5008 "dereferenceable, dereferenceable_or_null "
5009 "apply only to pointer types",
5010 &I);
5011 Check((isa<LoadInst>(I) || isa<IntToPtrInst>(I)),
5012 "dereferenceable, dereferenceable_or_null apply only to load"
5013 " and inttoptr instructions, use attributes for calls or invokes",
5014 &I);
5015 Check(MD->getNumOperands() == 1,
5016 "dereferenceable, dereferenceable_or_null "
5017 "take one operand!",
5018 &I);
5019 ConstantInt *CI = mdconst::dyn_extract<ConstantInt>(MD->getOperand(0));
5020 Check(CI && CI->getType()->isIntegerTy(64),
5021 "dereferenceable, "
5022 "dereferenceable_or_null metadata value must be an i64!",
5023 &I);
5024}
5025
5026void Verifier::visitProfMetadata(Instruction &I, MDNode *MD) {
5027 auto GetBranchingTerminatorNumOperands = [&]() {
5028 unsigned ExpectedNumOperands = 0;
5029 if (BranchInst *BI = dyn_cast<BranchInst>(&I))
5030 ExpectedNumOperands = BI->getNumSuccessors();
5031 else if (SwitchInst *SI = dyn_cast<SwitchInst>(&I))
5032 ExpectedNumOperands = SI->getNumSuccessors();
5033 else if (isa<CallInst>(&I))
5034 ExpectedNumOperands = 1;
5035 else if (IndirectBrInst *IBI = dyn_cast<IndirectBrInst>(&I))
5036 ExpectedNumOperands = IBI->getNumDestinations();
5037 else if (isa<SelectInst>(&I))
5038 ExpectedNumOperands = 2;
5039 else if (CallBrInst *CI = dyn_cast<CallBrInst>(&I))
5040 ExpectedNumOperands = CI->getNumSuccessors();
5041 return ExpectedNumOperands;
5042 };
5043 Check(MD->getNumOperands() >= 1,
5044 "!prof annotations should have at least 1 operand", MD);
5045 // Check first operand.
5046 Check(MD->getOperand(0) != nullptr, "first operand should not be null", MD);
5047 Check(isa<MDString>(MD->getOperand(0)),
5048 "expected string with name of the !prof annotation", MD);
5049 MDString *MDS = cast<MDString>(MD->getOperand(0));
5050 StringRef ProfName = MDS->getString();
5051
5053 Check(GetBranchingTerminatorNumOperands() != 0 || isa<InvokeInst>(I),
5054 "'unknown' !prof should only appear on instructions on which "
5055 "'branch_weights' would",
5056 MD);
5057 Check(MD->getNumOperands() == 1,
5058 "'unknown' !prof should have no additional operands", MD);
5059 return;
5060 }
5061
5062 Check(MD->getNumOperands() >= 2,
5063 "!prof annotations should have no less than 2 operands", MD);
5064
5065 // Check consistency of !prof branch_weights metadata.
5066 if (ProfName == MDProfLabels::BranchWeights) {
5067 unsigned NumBranchWeights = getNumBranchWeights(*MD);
5068 if (isa<InvokeInst>(&I)) {
5069 Check(NumBranchWeights == 1 || NumBranchWeights == 2,
5070 "Wrong number of InvokeInst branch_weights operands", MD);
5071 } else {
5072 const unsigned ExpectedNumOperands = GetBranchingTerminatorNumOperands();
5073 if (ExpectedNumOperands == 0)
5074 CheckFailed("!prof branch_weights are not allowed for this instruction",
5075 MD);
5076
5077 Check(NumBranchWeights == ExpectedNumOperands, "Wrong number of operands",
5078 MD);
5079 }
5080 for (unsigned i = getBranchWeightOffset(MD); i < MD->getNumOperands();
5081 ++i) {
5082 auto &MDO = MD->getOperand(i);
5083 Check(MDO, "second operand should not be null", MD);
5084 Check(mdconst::dyn_extract<ConstantInt>(MDO),
5085 "!prof brunch_weights operand is not a const int");
5086 }
5087 } else if (ProfName == MDProfLabels::ValueProfile) {
5088 Check(isValueProfileMD(MD), "invalid value profiling metadata", MD);
5089 ConstantInt *KindInt = mdconst::dyn_extract<ConstantInt>(MD->getOperand(1));
5090 Check(KindInt, "VP !prof missing kind argument", MD);
5091
5092 auto Kind = KindInt->getZExtValue();
5093 Check(Kind >= InstrProfValueKind::IPVK_First &&
5094 Kind <= InstrProfValueKind::IPVK_Last,
5095 "Invalid VP !prof kind", MD);
5096 Check(MD->getNumOperands() % 2 == 1,
5097 "VP !prof should have an even number "
5098 "of arguments after 'VP'",
5099 MD);
5100 if (Kind == InstrProfValueKind::IPVK_IndirectCallTarget ||
5101 Kind == InstrProfValueKind::IPVK_MemOPSize)
5102 Check(isa<CallBase>(I),
5103 "VP !prof indirect call or memop size expected to be applied to "
5104 "CallBase instructions only",
5105 MD);
5106 } else {
5107 CheckFailed("expected either branch_weights or VP profile name", MD);
5108 }
5109}
5110
5111void Verifier::visitDIAssignIDMetadata(Instruction &I, MDNode *MD) {
5112 assert(I.hasMetadata(LLVMContext::MD_DIAssignID));
5113 // DIAssignID metadata must be attached to either an alloca or some form of
5114 // store/memory-writing instruction.
5115 // FIXME: We allow all intrinsic insts here to avoid trying to enumerate all
5116 // possible store intrinsics.
5117 bool ExpectedInstTy =
5118 isa<AllocaInst>(I) || isa<StoreInst>(I) || isa<IntrinsicInst>(I);
5119 CheckDI(ExpectedInstTy, "!DIAssignID attached to unexpected instruction kind",
5120 I, MD);
5121 // Iterate over the MetadataAsValue uses of the DIAssignID - these should
5122 // only be found as DbgAssignIntrinsic operands.
5123 if (auto *AsValue = MetadataAsValue::getIfExists(Context, MD)) {
5124 for (auto *User : AsValue->users()) {
5125 CheckDI(isa<DbgAssignIntrinsic>(User),
5126 "!DIAssignID should only be used by llvm.dbg.assign intrinsics",
5127 MD, User);
5128 // All of the dbg.assign intrinsics should be in the same function as I.
5129 if (auto *DAI = dyn_cast<DbgAssignIntrinsic>(User))
5130 CheckDI(DAI->getFunction() == I.getFunction(),
5131 "dbg.assign not in same function as inst", DAI, &I);
5132 }
5133 }
5134 for (DbgVariableRecord *DVR :
5135 cast<DIAssignID>(MD)->getAllDbgVariableRecordUsers()) {
5136 CheckDI(DVR->isDbgAssign(),
5137 "!DIAssignID should only be used by Assign DVRs.", MD, DVR);
5138 CheckDI(DVR->getFunction() == I.getFunction(),
5139 "DVRAssign not in same function as inst", DVR, &I);
5140 }
5141}
5142
5143void Verifier::visitMMRAMetadata(Instruction &I, MDNode *MD) {
5145 "!mmra metadata attached to unexpected instruction kind", I, MD);
5146
5147 // MMRA Metadata should either be a tag, e.g. !{!"foo", !"bar"}, or a
5148 // list of tags such as !2 in the following example:
5149 // !0 = !{!"a", !"b"}
5150 // !1 = !{!"c", !"d"}
5151 // !2 = !{!0, !1}
5152 if (MMRAMetadata::isTagMD(MD))
5153 return;
5154
5155 Check(isa<MDTuple>(MD), "!mmra expected to be a metadata tuple", I, MD);
5156 for (const MDOperand &MDOp : MD->operands())
5157 Check(MMRAMetadata::isTagMD(MDOp.get()),
5158 "!mmra metadata tuple operand is not an MMRA tag", I, MDOp.get());
5159}
5160
5161void Verifier::visitCallStackMetadata(MDNode *MD) {
5162 // Call stack metadata should consist of a list of at least 1 constant int
5163 // (representing a hash of the location).
5164 Check(MD->getNumOperands() >= 1,
5165 "call stack metadata should have at least 1 operand", MD);
5166
5167 for (const auto &Op : MD->operands())
5168 Check(mdconst::dyn_extract_or_null<ConstantInt>(Op),
5169 "call stack metadata operand should be constant integer", Op);
5170}
5171
5172void Verifier::visitMemProfMetadata(Instruction &I, MDNode *MD) {
5173 Check(isa<CallBase>(I), "!memprof metadata should only exist on calls", &I);
5174 Check(MD->getNumOperands() >= 1,
5175 "!memprof annotations should have at least 1 metadata operand "
5176 "(MemInfoBlock)",
5177 MD);
5178
5179 // Check each MIB
5180 for (auto &MIBOp : MD->operands()) {
5181 MDNode *MIB = dyn_cast<MDNode>(MIBOp);
5182 // The first operand of an MIB should be the call stack metadata.
5183 // There rest of the operands should be MDString tags, and there should be
5184 // at least one.
5185 Check(MIB->getNumOperands() >= 2,
5186 "Each !memprof MemInfoBlock should have at least 2 operands", MIB);
5187
5188 // Check call stack metadata (first operand).
5189 Check(MIB->getOperand(0) != nullptr,
5190 "!memprof MemInfoBlock first operand should not be null", MIB);
5191 Check(isa<MDNode>(MIB->getOperand(0)),
5192 "!memprof MemInfoBlock first operand should be an MDNode", MIB);
5193 MDNode *StackMD = dyn_cast<MDNode>(MIB->getOperand(0));
5194 visitCallStackMetadata(StackMD);
5195
5196 // The next set of 1 or more operands should be MDString.
5197 unsigned I = 1;
5198 for (; I < MIB->getNumOperands(); ++I) {
5199 if (!isa<MDString>(MIB->getOperand(I))) {
5200 Check(I > 1,
5201 "!memprof MemInfoBlock second operand should be an MDString",
5202 MIB);
5203 break;
5204 }
5205 }
5206
5207 // Any remaining should be MDNode that are pairs of integers
5208 for (; I < MIB->getNumOperands(); ++I) {
5209 MDNode *OpNode = dyn_cast<MDNode>(MIB->getOperand(I));
5210 Check(OpNode, "Not all !memprof MemInfoBlock operands 2 to N are MDNode",
5211 MIB);
5212 Check(OpNode->getNumOperands() == 2,
5213 "Not all !memprof MemInfoBlock operands 2 to N are MDNode with 2 "
5214 "operands",
5215 MIB);
5216 // Check that all of Op's operands are ConstantInt.
5217 Check(llvm::all_of(OpNode->operands(),
5218 [](const MDOperand &Op) {
5219 return mdconst::hasa<ConstantInt>(Op);
5220 }),
5221 "Not all !memprof MemInfoBlock operands 2 to N are MDNode with "
5222 "ConstantInt operands",
5223 MIB);
5224 }
5225 }
5226}
5227
5228void Verifier::visitCallsiteMetadata(Instruction &I, MDNode *MD) {
5229 Check(isa<CallBase>(I), "!callsite metadata should only exist on calls", &I);
5230 // Verify the partial callstack annotated from memprof profiles. This callsite
5231 // is a part of a profiled allocation callstack.
5232 visitCallStackMetadata(MD);
5233}
5234
5235static inline bool isConstantIntMetadataOperand(const Metadata *MD) {
5236 if (auto *VAL = dyn_cast<ValueAsMetadata>(MD))
5237 return isa<ConstantInt>(VAL->getValue());
5238 return false;
5239}
5240
5241void Verifier::visitCalleeTypeMetadata(Instruction &I, MDNode *MD) {
5242 Check(isa<CallBase>(I), "!callee_type metadata should only exist on calls",
5243 &I);
5244 for (Metadata *Op : MD->operands()) {
5245 Check(isa<MDNode>(Op),
5246 "The callee_type metadata must be a list of type metadata nodes", Op);
5247 auto *TypeMD = cast<MDNode>(Op);
5248 Check(TypeMD->getNumOperands() == 2,
5249 "Well-formed generalized type metadata must contain exactly two "
5250 "operands",
5251 Op);
5252 Check(isConstantIntMetadataOperand(TypeMD->getOperand(0)) &&
5253 mdconst::extract<ConstantInt>(TypeMD->getOperand(0))->isZero(),
5254 "The first operand of type metadata for functions must be zero", Op);
5255 Check(TypeMD->hasGeneralizedMDString(),
5256 "Only generalized type metadata can be part of the callee_type "
5257 "metadata list",
5258 Op);
5259 }
5260}
5261
5262void Verifier::visitAnnotationMetadata(MDNode *Annotation) {
5263 Check(isa<MDTuple>(Annotation), "annotation must be a tuple");
5264 Check(Annotation->getNumOperands() >= 1,
5265 "annotation must have at least one operand");
5266 for (const MDOperand &Op : Annotation->operands()) {
5267 bool TupleOfStrings =
5268 isa<MDTuple>(Op.get()) &&
5269 all_of(cast<MDTuple>(Op)->operands(), [](auto &Annotation) {
5270 return isa<MDString>(Annotation.get());
5271 });
5272 Check(isa<MDString>(Op.get()) || TupleOfStrings,
5273 "operands must be a string or a tuple of strings");
5274 }
5275}
5276
5277void Verifier::visitAliasScopeMetadata(const MDNode *MD) {
5278 unsigned NumOps = MD->getNumOperands();
5279 Check(NumOps >= 2 && NumOps <= 3, "scope must have two or three operands",
5280 MD);
5281 Check(MD->getOperand(0).get() == MD || isa<MDString>(MD->getOperand(0)),
5282 "first scope operand must be self-referential or string", MD);
5283 if (NumOps == 3)
5284 Check(isa<MDString>(MD->getOperand(2)),
5285 "third scope operand must be string (if used)", MD);
5286
5287 MDNode *Domain = dyn_cast<MDNode>(MD->getOperand(1));
5288 Check(Domain != nullptr, "second scope operand must be MDNode", MD);
5289
5290 unsigned NumDomainOps = Domain->getNumOperands();
5291 Check(NumDomainOps >= 1 && NumDomainOps <= 2,
5292 "domain must have one or two operands", Domain);
5293 Check(Domain->getOperand(0).get() == Domain ||
5294 isa<MDString>(Domain->getOperand(0)),
5295 "first domain operand must be self-referential or string", Domain);
5296 if (NumDomainOps == 2)
5297 Check(isa<MDString>(Domain->getOperand(1)),
5298 "second domain operand must be string (if used)", Domain);
5299}
5300
5301void Verifier::visitAliasScopeListMetadata(const MDNode *MD) {
5302 for (const MDOperand &Op : MD->operands()) {
5303 const MDNode *OpMD = dyn_cast<MDNode>(Op);
5304 Check(OpMD != nullptr, "scope list must consist of MDNodes", MD);
5305 visitAliasScopeMetadata(OpMD);
5306 }
5307}
5308
5309void Verifier::visitAccessGroupMetadata(const MDNode *MD) {
5310 auto IsValidAccessScope = [](const MDNode *MD) {
5311 return MD->getNumOperands() == 0 && MD->isDistinct();
5312 };
5313
5314 // It must be either an access scope itself...
5315 if (IsValidAccessScope(MD))
5316 return;
5317
5318 // ...or a list of access scopes.
5319 for (const MDOperand &Op : MD->operands()) {
5320 const MDNode *OpMD = dyn_cast<MDNode>(Op);
5321 Check(OpMD != nullptr, "Access scope list must consist of MDNodes", MD);
5322 Check(IsValidAccessScope(OpMD),
5323 "Access scope list contains invalid access scope", MD);
5324 }
5325}
5326
5327/// verifyInstruction - Verify that an instruction is well formed.
5328///
5329void Verifier::visitInstruction(Instruction &I) {
5330 BasicBlock *BB = I.getParent();
5331 Check(BB, "Instruction not embedded in basic block!", &I);
5332
5333 if (!isa<PHINode>(I)) { // Check that non-phi nodes are not self referential
5334 for (User *U : I.users()) {
5335 Check(U != (User *)&I || !DT.isReachableFromEntry(BB),
5336 "Only PHI nodes may reference their own value!", &I);
5337 }
5338 }
5339
5340 // Check that void typed values don't have names
5341 Check(!I.getType()->isVoidTy() || !I.hasName(),
5342 "Instruction has a name, but provides a void value!", &I);
5343
5344 // Check that the return value of the instruction is either void or a legal
5345 // value type.
5346 Check(I.getType()->isVoidTy() || I.getType()->isFirstClassType(),
5347 "Instruction returns a non-scalar type!", &I);
5348
5349 // Check that the instruction doesn't produce metadata. Calls are already
5350 // checked against the callee type.
5351 Check(!I.getType()->isMetadataTy() || isa<CallInst>(I) || isa<InvokeInst>(I),
5352 "Invalid use of metadata!", &I);
5353
5354 // Check that all uses of the instruction, if they are instructions
5355 // themselves, actually have parent basic blocks. If the use is not an
5356 // instruction, it is an error!
5357 for (Use &U : I.uses()) {
5358 if (Instruction *Used = dyn_cast<Instruction>(U.getUser()))
5359 Check(Used->getParent() != nullptr,
5360 "Instruction referencing"
5361 " instruction not embedded in a basic block!",
5362 &I, Used);
5363 else {
5364 CheckFailed("Use of instruction is not an instruction!", U);
5365 return;
5366 }
5367 }
5368
5369 // Get a pointer to the call base of the instruction if it is some form of
5370 // call.
5371 const CallBase *CBI = dyn_cast<CallBase>(&I);
5372
5373 for (unsigned i = 0, e = I.getNumOperands(); i != e; ++i) {
5374 Check(I.getOperand(i) != nullptr, "Instruction has null operand!", &I);
5375
5376 // Check to make sure that only first-class-values are operands to
5377 // instructions.
5378 if (!I.getOperand(i)->getType()->isFirstClassType()) {
5379 Check(false, "Instruction operands must be first-class values!", &I);
5380 }
5381
5382 if (Function *F = dyn_cast<Function>(I.getOperand(i))) {
5383 // This code checks whether the function is used as the operand of a
5384 // clang_arc_attachedcall operand bundle.
5385 auto IsAttachedCallOperand = [](Function *F, const CallBase *CBI,
5386 int Idx) {
5387 return CBI && CBI->isOperandBundleOfType(
5389 };
5390
5391 // Check to make sure that the "address of" an intrinsic function is never
5392 // taken. Ignore cases where the address of the intrinsic function is used
5393 // as the argument of operand bundle "clang.arc.attachedcall" as those
5394 // cases are handled in verifyAttachedCallBundle.
5395 Check((!F->isIntrinsic() ||
5396 (CBI && &CBI->getCalledOperandUse() == &I.getOperandUse(i)) ||
5397 IsAttachedCallOperand(F, CBI, i)),
5398 "Cannot take the address of an intrinsic!", &I);
5399 Check(!F->isIntrinsic() || isa<CallInst>(I) ||
5400 F->getIntrinsicID() == Intrinsic::donothing ||
5401 F->getIntrinsicID() == Intrinsic::seh_try_begin ||
5402 F->getIntrinsicID() == Intrinsic::seh_try_end ||
5403 F->getIntrinsicID() == Intrinsic::seh_scope_begin ||
5404 F->getIntrinsicID() == Intrinsic::seh_scope_end ||
5405 F->getIntrinsicID() == Intrinsic::coro_resume ||
5406 F->getIntrinsicID() == Intrinsic::coro_destroy ||
5407 F->getIntrinsicID() == Intrinsic::coro_await_suspend_void ||
5408 F->getIntrinsicID() == Intrinsic::coro_await_suspend_bool ||
5409 F->getIntrinsicID() == Intrinsic::coro_await_suspend_handle ||
5410 F->getIntrinsicID() ==
5411 Intrinsic::experimental_patchpoint_void ||
5412 F->getIntrinsicID() == Intrinsic::experimental_patchpoint ||
5413 F->getIntrinsicID() == Intrinsic::fake_use ||
5414 F->getIntrinsicID() == Intrinsic::experimental_gc_statepoint ||
5415 F->getIntrinsicID() == Intrinsic::wasm_throw ||
5416 F->getIntrinsicID() == Intrinsic::wasm_rethrow ||
5417 IsAttachedCallOperand(F, CBI, i),
5418 "Cannot invoke an intrinsic other than donothing, patchpoint, "
5419 "statepoint, coro_resume, coro_destroy, clang.arc.attachedcall or "
5420 "wasm.(re)throw",
5421 &I);
5422 Check(F->getParent() == &M, "Referencing function in another module!", &I,
5423 &M, F, F->getParent());
5424 } else if (BasicBlock *OpBB = dyn_cast<BasicBlock>(I.getOperand(i))) {
5425 Check(OpBB->getParent() == BB->getParent(),
5426 "Referring to a basic block in another function!", &I);
5427 } else if (Argument *OpArg = dyn_cast<Argument>(I.getOperand(i))) {
5428 Check(OpArg->getParent() == BB->getParent(),
5429 "Referring to an argument in another function!", &I);
5430 } else if (GlobalValue *GV = dyn_cast<GlobalValue>(I.getOperand(i))) {
5431 Check(GV->getParent() == &M, "Referencing global in another module!", &I,
5432 &M, GV, GV->getParent());
5433 } else if (Instruction *OpInst = dyn_cast<Instruction>(I.getOperand(i))) {
5434 Check(OpInst->getFunction() == BB->getParent(),
5435 "Referring to an instruction in another function!", &I);
5436 verifyDominatesUse(I, i);
5437 } else if (isa<InlineAsm>(I.getOperand(i))) {
5438 Check(CBI && &CBI->getCalledOperandUse() == &I.getOperandUse(i),
5439 "Cannot take the address of an inline asm!", &I);
5440 } else if (auto *CPA = dyn_cast<ConstantPtrAuth>(I.getOperand(i))) {
5441 visitConstantExprsRecursively(CPA);
5442 } else if (ConstantExpr *CE = dyn_cast<ConstantExpr>(I.getOperand(i))) {
5443 if (CE->getType()->isPtrOrPtrVectorTy()) {
5444 // If we have a ConstantExpr pointer, we need to see if it came from an
5445 // illegal bitcast.
5446 visitConstantExprsRecursively(CE);
5447 }
5448 }
5449 }
5450
5451 if (MDNode *MD = I.getMetadata(LLVMContext::MD_fpmath)) {
5452 Check(I.getType()->isFPOrFPVectorTy(),
5453 "fpmath requires a floating point result!", &I);
5454 Check(MD->getNumOperands() == 1, "fpmath takes one operand!", &I);
5455 if (ConstantFP *CFP0 =
5456 mdconst::dyn_extract_or_null<ConstantFP>(MD->getOperand(0))) {
5457 const APFloat &Accuracy = CFP0->getValueAPF();
5458 Check(&Accuracy.getSemantics() == &APFloat::IEEEsingle(),
5459 "fpmath accuracy must have float type", &I);
5460 Check(Accuracy.isFiniteNonZero() && !Accuracy.isNegative(),
5461 "fpmath accuracy not a positive number!", &I);
5462 } else {
5463 Check(false, "invalid fpmath accuracy!", &I);
5464 }
5465 }
5466
5467 if (MDNode *Range = I.getMetadata(LLVMContext::MD_range)) {
5468 Check(isa<LoadInst>(I) || isa<CallInst>(I) || isa<InvokeInst>(I),
5469 "Ranges are only for loads, calls and invokes!", &I);
5470 visitRangeMetadata(I, Range, I.getType());
5471 }
5472
5473 if (MDNode *Range = I.getMetadata(LLVMContext::MD_noalias_addrspace)) {
5474 Check(isa<LoadInst>(I) || isa<StoreInst>(I) || isa<AtomicRMWInst>(I) ||
5475 isa<AtomicCmpXchgInst>(I) || isa<CallInst>(I),
5476 "noalias.addrspace are only for memory operations!", &I);
5477 visitNoaliasAddrspaceMetadata(I, Range, I.getType());
5478 }
5479
5480 if (I.hasMetadata(LLVMContext::MD_invariant_group)) {
5481 Check(isa<LoadInst>(I) || isa<StoreInst>(I),
5482 "invariant.group metadata is only for loads and stores", &I);
5483 }
5484
5485 if (MDNode *MD = I.getMetadata(LLVMContext::MD_nonnull)) {
5486 Check(I.getType()->isPointerTy(), "nonnull applies only to pointer types",
5487 &I);
5488 Check(isa<LoadInst>(I),
5489 "nonnull applies only to load instructions, use attributes"
5490 " for calls or invokes",
5491 &I);
5492 Check(MD->getNumOperands() == 0, "nonnull metadata must be empty", &I);
5493 }
5494
5495 if (MDNode *MD = I.getMetadata(LLVMContext::MD_dereferenceable))
5496 visitDereferenceableMetadata(I, MD);
5497
5498 if (MDNode *MD = I.getMetadata(LLVMContext::MD_dereferenceable_or_null))
5499 visitDereferenceableMetadata(I, MD);
5500
5501 if (MDNode *TBAA = I.getMetadata(LLVMContext::MD_tbaa))
5502 TBAAVerifyHelper.visitTBAAMetadata(I, TBAA);
5503
5504 if (MDNode *MD = I.getMetadata(LLVMContext::MD_noalias))
5505 visitAliasScopeListMetadata(MD);
5506 if (MDNode *MD = I.getMetadata(LLVMContext::MD_alias_scope))
5507 visitAliasScopeListMetadata(MD);
5508
5509 if (MDNode *MD = I.getMetadata(LLVMContext::MD_access_group))
5510 visitAccessGroupMetadata(MD);
5511
5512 if (MDNode *AlignMD = I.getMetadata(LLVMContext::MD_align)) {
5513 Check(I.getType()->isPointerTy(), "align applies only to pointer types",
5514 &I);
5515 Check(isa<LoadInst>(I),
5516 "align applies only to load instructions, "
5517 "use attributes for calls or invokes",
5518 &I);
5519 Check(AlignMD->getNumOperands() == 1, "align takes one operand!", &I);
5520 ConstantInt *CI = mdconst::dyn_extract<ConstantInt>(AlignMD->getOperand(0));
5521 Check(CI && CI->getType()->isIntegerTy(64),
5522 "align metadata value must be an i64!", &I);
5523 uint64_t Align = CI->getZExtValue();
5524 Check(isPowerOf2_64(Align), "align metadata value must be a power of 2!",
5525 &I);
5527 "alignment is larger that implementation defined limit", &I);
5528 }
5529
5530 if (MDNode *MD = I.getMetadata(LLVMContext::MD_prof))
5531 visitProfMetadata(I, MD);
5532
5533 if (MDNode *MD = I.getMetadata(LLVMContext::MD_memprof))
5534 visitMemProfMetadata(I, MD);
5535
5536 if (MDNode *MD = I.getMetadata(LLVMContext::MD_callsite))
5537 visitCallsiteMetadata(I, MD);
5538
5539 if (MDNode *MD = I.getMetadata(LLVMContext::MD_callee_type))
5540 visitCalleeTypeMetadata(I, MD);
5541
5542 if (MDNode *MD = I.getMetadata(LLVMContext::MD_DIAssignID))
5543 visitDIAssignIDMetadata(I, MD);
5544
5545 if (MDNode *MMRA = I.getMetadata(LLVMContext::MD_mmra))
5546 visitMMRAMetadata(I, MMRA);
5547
5548 if (MDNode *Annotation = I.getMetadata(LLVMContext::MD_annotation))
5549 visitAnnotationMetadata(Annotation);
5550
5551 if (MDNode *N = I.getDebugLoc().getAsMDNode()) {
5552 CheckDI(isa<DILocation>(N), "invalid !dbg metadata attachment", &I, N);
5553 visitMDNode(*N, AreDebugLocsAllowed::Yes);
5554
5555 if (auto *DL = dyn_cast<DILocation>(N)) {
5556 if (DL->getAtomGroup()) {
5557 CheckDI(DL->getScope()->getSubprogram()->getKeyInstructionsEnabled(),
5558 "DbgLoc uses atomGroup but DISubprogram doesn't have Key "
5559 "Instructions enabled",
5560 DL, DL->getScope()->getSubprogram());
5561 }
5562 }
5563 }
5564
5566 I.getAllMetadata(MDs);
5567 for (auto Attachment : MDs) {
5568 unsigned Kind = Attachment.first;
5569 auto AllowLocs =
5570 (Kind == LLVMContext::MD_dbg || Kind == LLVMContext::MD_loop)
5571 ? AreDebugLocsAllowed::Yes
5572 : AreDebugLocsAllowed::No;
5573 visitMDNode(*Attachment.second, AllowLocs);
5574 }
5575
5576 InstsInThisBlock.insert(&I);
5577}
5578
5579/// Allow intrinsics to be verified in different ways.
5580void Verifier::visitIntrinsicCall(Intrinsic::ID ID, CallBase &Call) {
5581 Function *IF = Call.getCalledFunction();
5582 Check(IF->isDeclaration(), "Intrinsic functions should never be defined!",
5583 IF);
5584
5585 // Verify that the intrinsic prototype lines up with what the .td files
5586 // describe.
5587 FunctionType *IFTy = IF->getFunctionType();
5588 bool IsVarArg = IFTy->isVarArg();
5589
5593
5594 // Walk the descriptors to extract overloaded types.
5599 "Intrinsic has incorrect return type!", IF);
5601 "Intrinsic has incorrect argument type!", IF);
5602
5603 // Verify if the intrinsic call matches the vararg property.
5604 if (IsVarArg)
5606 "Intrinsic was not defined with variable arguments!", IF);
5607 else
5609 "Callsite was not defined with variable arguments!", IF);
5610
5611 // All descriptors should be absorbed by now.
5612 Check(TableRef.empty(), "Intrinsic has too few arguments!", IF);
5613
5614 // Now that we have the intrinsic ID and the actual argument types (and we
5615 // know they are legal for the intrinsic!) get the intrinsic name through the
5616 // usual means. This allows us to verify the mangling of argument types into
5617 // the name.
5618 const std::string ExpectedName =
5619 Intrinsic::getName(ID, ArgTys, IF->getParent(), IFTy);
5620 Check(ExpectedName == IF->getName(),
5621 "Intrinsic name not mangled correctly for type arguments! "
5622 "Should be: " +
5623 ExpectedName,
5624 IF);
5625
5626 // If the intrinsic takes MDNode arguments, verify that they are either global
5627 // or are local to *this* function.
5628 for (Value *V : Call.args()) {
5629 if (auto *MD = dyn_cast<MetadataAsValue>(V))
5630 visitMetadataAsValue(*MD, Call.getCaller());
5631 if (auto *Const = dyn_cast<Constant>(V))
5632 Check(!Const->getType()->isX86_AMXTy(),
5633 "const x86_amx is not allowed in argument!");
5634 }
5635
5636 switch (ID) {
5637 default:
5638 break;
5639 case Intrinsic::assume: {
5640 for (auto &Elem : Call.bundle_op_infos()) {
5641 unsigned ArgCount = Elem.End - Elem.Begin;
5642 // Separate storage assumptions are special insofar as they're the only
5643 // operand bundles allowed on assumes that aren't parameter attributes.
5644 if (Elem.Tag->getKey() == "separate_storage") {
5645 Check(ArgCount == 2,
5646 "separate_storage assumptions should have 2 arguments", Call);
5647 Check(Call.getOperand(Elem.Begin)->getType()->isPointerTy() &&
5648 Call.getOperand(Elem.Begin + 1)->getType()->isPointerTy(),
5649 "arguments to separate_storage assumptions should be pointers",
5650 Call);
5651 continue;
5652 }
5653 Check(Elem.Tag->getKey() == "ignore" ||
5654 Attribute::isExistingAttribute(Elem.Tag->getKey()),
5655 "tags must be valid attribute names", Call);
5657 Attribute::getAttrKindFromName(Elem.Tag->getKey());
5658 if (Kind == Attribute::Alignment) {
5659 Check(ArgCount <= 3 && ArgCount >= 2,
5660 "alignment assumptions should have 2 or 3 arguments", Call);
5661 Check(Call.getOperand(Elem.Begin)->getType()->isPointerTy(),
5662 "first argument should be a pointer", Call);
5663 Check(Call.getOperand(Elem.Begin + 1)->getType()->isIntegerTy(),
5664 "second argument should be an integer", Call);
5665 if (ArgCount == 3)
5666 Check(Call.getOperand(Elem.Begin + 2)->getType()->isIntegerTy(),
5667 "third argument should be an integer if present", Call);
5668 continue;
5669 }
5670 if (Kind == Attribute::Dereferenceable) {
5671 Check(ArgCount == 2,
5672 "dereferenceable assumptions should have 2 arguments", Call);
5673 Check(Call.getOperand(Elem.Begin)->getType()->isPointerTy(),
5674 "first argument should be a pointer", Call);
5675 Check(Call.getOperand(Elem.Begin + 1)->getType()->isIntegerTy(),
5676 "second argument should be an integer", Call);
5677 continue;
5678 }
5679 Check(ArgCount <= 2, "too many arguments", Call);
5680 if (Kind == Attribute::None)
5681 break;
5682 if (Attribute::isIntAttrKind(Kind)) {
5683 Check(ArgCount == 2, "this attribute should have 2 arguments", Call);
5684 Check(isa<ConstantInt>(Call.getOperand(Elem.Begin + 1)),
5685 "the second argument should be a constant integral value", Call);
5686 } else if (Attribute::canUseAsParamAttr(Kind)) {
5687 Check((ArgCount) == 1, "this attribute should have one argument", Call);
5688 } else if (Attribute::canUseAsFnAttr(Kind)) {
5689 Check((ArgCount) == 0, "this attribute has no argument", Call);
5690 }
5691 }
5692 break;
5693 }
5694 case Intrinsic::ucmp:
5695 case Intrinsic::scmp: {
5696 Type *SrcTy = Call.getOperand(0)->getType();
5697 Type *DestTy = Call.getType();
5698
5699 Check(DestTy->getScalarSizeInBits() >= 2,
5700 "result type must be at least 2 bits wide", Call);
5701
5702 bool IsDestTypeVector = DestTy->isVectorTy();
5703 Check(SrcTy->isVectorTy() == IsDestTypeVector,
5704 "ucmp/scmp argument and result types must both be either vector or "
5705 "scalar types",
5706 Call);
5707 if (IsDestTypeVector) {
5708 auto SrcVecLen = cast<VectorType>(SrcTy)->getElementCount();
5709 auto DestVecLen = cast<VectorType>(DestTy)->getElementCount();
5710 Check(SrcVecLen == DestVecLen,
5711 "return type and arguments must have the same number of "
5712 "elements",
5713 Call);
5714 }
5715 break;
5716 }
5717 case Intrinsic::coro_id: {
5718 auto *InfoArg = Call.getArgOperand(3)->stripPointerCasts();
5719 if (isa<ConstantPointerNull>(InfoArg))
5720 break;
5721 auto *GV = dyn_cast<GlobalVariable>(InfoArg);
5722 Check(GV && GV->isConstant() && GV->hasDefinitiveInitializer(),
5723 "info argument of llvm.coro.id must refer to an initialized "
5724 "constant");
5725 Constant *Init = GV->getInitializer();
5726 Check(isa<ConstantStruct>(Init) || isa<ConstantArray>(Init),
5727 "info argument of llvm.coro.id must refer to either a struct or "
5728 "an array");
5729 break;
5730 }
5731 case Intrinsic::is_fpclass: {
5732 const ConstantInt *TestMask = cast<ConstantInt>(Call.getOperand(1));
5733 Check((TestMask->getZExtValue() & ~static_cast<unsigned>(fcAllFlags)) == 0,
5734 "unsupported bits for llvm.is.fpclass test mask");
5735 break;
5736 }
5737 case Intrinsic::fptrunc_round: {
5738 // Check the rounding mode
5739 Metadata *MD = nullptr;
5740 auto *MAV = dyn_cast<MetadataAsValue>(Call.getOperand(1));
5741 if (MAV)
5742 MD = MAV->getMetadata();
5743
5744 Check(MD != nullptr, "missing rounding mode argument", Call);
5745
5746 Check(isa<MDString>(MD),
5747 ("invalid value for llvm.fptrunc.round metadata operand"
5748 " (the operand should be a string)"),
5749 MD);
5750
5751 std::optional<RoundingMode> RoundMode =
5752 convertStrToRoundingMode(cast<MDString>(MD)->getString());
5753 Check(RoundMode && *RoundMode != RoundingMode::Dynamic,
5754 "unsupported rounding mode argument", Call);
5755 break;
5756 }
5757#define BEGIN_REGISTER_VP_INTRINSIC(VPID, ...) case Intrinsic::VPID:
5758#include "llvm/IR/VPIntrinsics.def"
5759#undef BEGIN_REGISTER_VP_INTRINSIC
5760 visitVPIntrinsic(cast<VPIntrinsic>(Call));
5761 break;
5762#define INSTRUCTION(NAME, NARGS, ROUND_MODE, INTRINSIC) \
5763 case Intrinsic::INTRINSIC:
5764#include "llvm/IR/ConstrainedOps.def"
5765#undef INSTRUCTION
5766 visitConstrainedFPIntrinsic(cast<ConstrainedFPIntrinsic>(Call));
5767 break;
5768 case Intrinsic::dbg_declare: // llvm.dbg.declare
5769 case Intrinsic::dbg_value: // llvm.dbg.value
5770 case Intrinsic::dbg_assign: // llvm.dbg.assign
5771 case Intrinsic::dbg_label: // llvm.dbg.label
5772 // We no longer interpret debug intrinsics (the old variable-location
5773 // design). They're meaningless as far as LLVM is concerned we could make
5774 // it an error for them to appear, but it's possible we'll have users
5775 // converting back to intrinsics for the forseeable future (such as DXIL),
5776 // so tolerate their existance.
5777 break;
5778 case Intrinsic::memcpy:
5779 case Intrinsic::memcpy_inline:
5780 case Intrinsic::memmove:
5781 case Intrinsic::memset:
5782 case Intrinsic::memset_inline:
5783 break;
5784 case Intrinsic::experimental_memset_pattern: {
5785 const auto Memset = cast<MemSetPatternInst>(&Call);
5786 Check(Memset->getValue()->getType()->isSized(),
5787 "unsized types cannot be used as memset patterns", Call);
5788 break;
5789 }
5790 case Intrinsic::memcpy_element_unordered_atomic:
5791 case Intrinsic::memmove_element_unordered_atomic:
5792 case Intrinsic::memset_element_unordered_atomic: {
5793 const auto *AMI = cast<AnyMemIntrinsic>(&Call);
5794
5795 ConstantInt *ElementSizeCI =
5796 cast<ConstantInt>(AMI->getRawElementSizeInBytes());
5797 const APInt &ElementSizeVal = ElementSizeCI->getValue();
5798 Check(ElementSizeVal.isPowerOf2(),
5799 "element size of the element-wise atomic memory intrinsic "
5800 "must be a power of 2",
5801 Call);
5802
5803 auto IsValidAlignment = [&](MaybeAlign Alignment) {
5804 return Alignment && ElementSizeVal.ule(Alignment->value());
5805 };
5806 Check(IsValidAlignment(AMI->getDestAlign()),
5807 "incorrect alignment of the destination argument", Call);
5808 if (const auto *AMT = dyn_cast<AnyMemTransferInst>(AMI)) {
5809 Check(IsValidAlignment(AMT->getSourceAlign()),
5810 "incorrect alignment of the source argument", Call);
5811 }
5812 break;
5813 }
5814 case Intrinsic::call_preallocated_setup: {
5815 auto *NumArgs = dyn_cast<ConstantInt>(Call.getArgOperand(0));
5816 Check(NumArgs != nullptr,
5817 "llvm.call.preallocated.setup argument must be a constant");
5818 bool FoundCall = false;
5819 for (User *U : Call.users()) {
5820 auto *UseCall = dyn_cast<CallBase>(U);
5821 Check(UseCall != nullptr,
5822 "Uses of llvm.call.preallocated.setup must be calls");
5823 Intrinsic::ID IID = UseCall->getIntrinsicID();
5824 if (IID == Intrinsic::call_preallocated_arg) {
5825 auto *AllocArgIndex = dyn_cast<ConstantInt>(UseCall->getArgOperand(1));
5826 Check(AllocArgIndex != nullptr,
5827 "llvm.call.preallocated.alloc arg index must be a constant");
5828 auto AllocArgIndexInt = AllocArgIndex->getValue();
5829 Check(AllocArgIndexInt.sge(0) &&
5830 AllocArgIndexInt.slt(NumArgs->getValue()),
5831 "llvm.call.preallocated.alloc arg index must be between 0 and "
5832 "corresponding "
5833 "llvm.call.preallocated.setup's argument count");
5834 } else if (IID == Intrinsic::call_preallocated_teardown) {
5835 // nothing to do
5836 } else {
5837 Check(!FoundCall, "Can have at most one call corresponding to a "
5838 "llvm.call.preallocated.setup");
5839 FoundCall = true;
5840 size_t NumPreallocatedArgs = 0;
5841 for (unsigned i = 0; i < UseCall->arg_size(); i++) {
5842 if (UseCall->paramHasAttr(i, Attribute::Preallocated)) {
5843 ++NumPreallocatedArgs;
5844 }
5845 }
5846 Check(NumPreallocatedArgs != 0,
5847 "cannot use preallocated intrinsics on a call without "
5848 "preallocated arguments");
5849 Check(NumArgs->equalsInt(NumPreallocatedArgs),
5850 "llvm.call.preallocated.setup arg size must be equal to number "
5851 "of preallocated arguments "
5852 "at call site",
5853 Call, *UseCall);
5854 // getOperandBundle() cannot be called if more than one of the operand
5855 // bundle exists. There is already a check elsewhere for this, so skip
5856 // here if we see more than one.
5857 if (UseCall->countOperandBundlesOfType(LLVMContext::OB_preallocated) >
5858 1) {
5859 return;
5860 }
5861 auto PreallocatedBundle =
5862 UseCall->getOperandBundle(LLVMContext::OB_preallocated);
5863 Check(PreallocatedBundle,
5864 "Use of llvm.call.preallocated.setup outside intrinsics "
5865 "must be in \"preallocated\" operand bundle");
5866 Check(PreallocatedBundle->Inputs.front().get() == &Call,
5867 "preallocated bundle must have token from corresponding "
5868 "llvm.call.preallocated.setup");
5869 }
5870 }
5871 break;
5872 }
5873 case Intrinsic::call_preallocated_arg: {
5874 auto *Token = dyn_cast<CallBase>(Call.getArgOperand(0));
5875 Check(Token &&
5876 Token->getIntrinsicID() == Intrinsic::call_preallocated_setup,
5877 "llvm.call.preallocated.arg token argument must be a "
5878 "llvm.call.preallocated.setup");
5879 Check(Call.hasFnAttr(Attribute::Preallocated),
5880 "llvm.call.preallocated.arg must be called with a \"preallocated\" "
5881 "call site attribute");
5882 break;
5883 }
5884 case Intrinsic::call_preallocated_teardown: {
5885 auto *Token = dyn_cast<CallBase>(Call.getArgOperand(0));
5886 Check(Token &&
5887 Token->getIntrinsicID() == Intrinsic::call_preallocated_setup,
5888 "llvm.call.preallocated.teardown token argument must be a "
5889 "llvm.call.preallocated.setup");
5890 break;
5891 }
5892 case Intrinsic::gcroot:
5893 case Intrinsic::gcwrite:
5894 case Intrinsic::gcread:
5895 if (ID == Intrinsic::gcroot) {
5896 AllocaInst *AI =
5897 dyn_cast<AllocaInst>(Call.getArgOperand(0)->stripPointerCasts());
5898 Check(AI, "llvm.gcroot parameter #1 must be an alloca.", Call);
5899 Check(isa<Constant>(Call.getArgOperand(1)),
5900 "llvm.gcroot parameter #2 must be a constant.", Call);
5901 if (!AI->getAllocatedType()->isPointerTy()) {
5902 Check(!isa<ConstantPointerNull>(Call.getArgOperand(1)),
5903 "llvm.gcroot parameter #1 must either be a pointer alloca, "
5904 "or argument #2 must be a non-null constant.",
5905 Call);
5906 }
5907 }
5908
5909 Check(Call.getParent()->getParent()->hasGC(),
5910 "Enclosing function does not use GC.", Call);
5911 break;
5912 case Intrinsic::init_trampoline:
5913 Check(isa<Function>(Call.getArgOperand(1)->stripPointerCasts()),
5914 "llvm.init_trampoline parameter #2 must resolve to a function.",
5915 Call);
5916 break;
5917 case Intrinsic::prefetch:
5918 Check(cast<ConstantInt>(Call.getArgOperand(1))->getZExtValue() < 2,
5919 "rw argument to llvm.prefetch must be 0-1", Call);
5920 Check(cast<ConstantInt>(Call.getArgOperand(2))->getZExtValue() < 4,
5921 "locality argument to llvm.prefetch must be 0-3", Call);
5922 Check(cast<ConstantInt>(Call.getArgOperand(3))->getZExtValue() < 2,
5923 "cache type argument to llvm.prefetch must be 0-1", Call);
5924 break;
5925 case Intrinsic::stackprotector:
5926 Check(isa<AllocaInst>(Call.getArgOperand(1)->stripPointerCasts()),
5927 "llvm.stackprotector parameter #2 must resolve to an alloca.", Call);
5928 break;
5929 case Intrinsic::localescape: {
5930 BasicBlock *BB = Call.getParent();
5931 Check(BB->isEntryBlock(), "llvm.localescape used outside of entry block",
5932 Call);
5933 Check(!SawFrameEscape, "multiple calls to llvm.localescape in one function",
5934 Call);
5935 for (Value *Arg : Call.args()) {
5936 if (isa<ConstantPointerNull>(Arg))
5937 continue; // Null values are allowed as placeholders.
5938 auto *AI = dyn_cast<AllocaInst>(Arg->stripPointerCasts());
5939 Check(AI && AI->isStaticAlloca(),
5940 "llvm.localescape only accepts static allocas", Call);
5941 }
5942 FrameEscapeInfo[BB->getParent()].first = Call.arg_size();
5943 SawFrameEscape = true;
5944 break;
5945 }
5946 case Intrinsic::localrecover: {
5947 Value *FnArg = Call.getArgOperand(0)->stripPointerCasts();
5948 Function *Fn = dyn_cast<Function>(FnArg);
5949 Check(Fn && !Fn->isDeclaration(),
5950 "llvm.localrecover first "
5951 "argument must be function defined in this module",
5952 Call);
5953 auto *IdxArg = cast<ConstantInt>(Call.getArgOperand(2));
5954 auto &Entry = FrameEscapeInfo[Fn];
5955 Entry.second = unsigned(
5956 std::max(uint64_t(Entry.second), IdxArg->getLimitedValue(~0U) + 1));
5957 break;
5958 }
5959
5960 case Intrinsic::experimental_gc_statepoint:
5961 if (auto *CI = dyn_cast<CallInst>(&Call))
5962 Check(!CI->isInlineAsm(),
5963 "gc.statepoint support for inline assembly unimplemented", CI);
5964 Check(Call.getParent()->getParent()->hasGC(),
5965 "Enclosing function does not use GC.", Call);
5966
5967 verifyStatepoint(Call);
5968 break;
5969 case Intrinsic::experimental_gc_result: {
5970 Check(Call.getParent()->getParent()->hasGC(),
5971 "Enclosing function does not use GC.", Call);
5972
5973 auto *Statepoint = Call.getArgOperand(0);
5974 if (isa<UndefValue>(Statepoint))
5975 break;
5976
5977 // Are we tied to a statepoint properly?
5978 const auto *StatepointCall = dyn_cast<CallBase>(Statepoint);
5979 Check(StatepointCall && StatepointCall->getIntrinsicID() ==
5980 Intrinsic::experimental_gc_statepoint,
5981 "gc.result operand #1 must be from a statepoint", Call,
5982 Call.getArgOperand(0));
5983
5984 // Check that result type matches wrapped callee.
5985 auto *TargetFuncType =
5986 cast<FunctionType>(StatepointCall->getParamElementType(2));
5987 Check(Call.getType() == TargetFuncType->getReturnType(),
5988 "gc.result result type does not match wrapped callee", Call);
5989 break;
5990 }
5991 case Intrinsic::experimental_gc_relocate: {
5992 Check(Call.arg_size() == 3, "wrong number of arguments", Call);
5993
5994 Check(isa<PointerType>(Call.getType()->getScalarType()),
5995 "gc.relocate must return a pointer or a vector of pointers", Call);
5996
5997 // Check that this relocate is correctly tied to the statepoint
5998
5999 // This is case for relocate on the unwinding path of an invoke statepoint
6000 if (LandingPadInst *LandingPad =
6001 dyn_cast<LandingPadInst>(Call.getArgOperand(0))) {
6002
6003 const BasicBlock *InvokeBB =
6004 LandingPad->getParent()->getUniquePredecessor();
6005
6006 // Landingpad relocates should have only one predecessor with invoke
6007 // statepoint terminator
6008 Check(InvokeBB, "safepoints should have unique landingpads",
6009 LandingPad->getParent());
6010 Check(InvokeBB->getTerminator(), "safepoint block should be well formed",
6011 InvokeBB);
6012 Check(isa<GCStatepointInst>(InvokeBB->getTerminator()),
6013 "gc relocate should be linked to a statepoint", InvokeBB);
6014 } else {
6015 // In all other cases relocate should be tied to the statepoint directly.
6016 // This covers relocates on a normal return path of invoke statepoint and
6017 // relocates of a call statepoint.
6018 auto *Token = Call.getArgOperand(0);
6019 Check(isa<GCStatepointInst>(Token) || isa<UndefValue>(Token),
6020 "gc relocate is incorrectly tied to the statepoint", Call, Token);
6021 }
6022
6023 // Verify rest of the relocate arguments.
6024 const Value &StatepointCall = *cast<GCRelocateInst>(Call).getStatepoint();
6025
6026 // Both the base and derived must be piped through the safepoint.
6027 Value *Base = Call.getArgOperand(1);
6028 Check(isa<ConstantInt>(Base),
6029 "gc.relocate operand #2 must be integer offset", Call);
6030
6031 Value *Derived = Call.getArgOperand(2);
6032 Check(isa<ConstantInt>(Derived),
6033 "gc.relocate operand #3 must be integer offset", Call);
6034
6035 const uint64_t BaseIndex = cast<ConstantInt>(Base)->getZExtValue();
6036 const uint64_t DerivedIndex = cast<ConstantInt>(Derived)->getZExtValue();
6037
6038 // Check the bounds
6039 if (isa<UndefValue>(StatepointCall))
6040 break;
6041 if (auto Opt = cast<GCStatepointInst>(StatepointCall)
6042 .getOperandBundle(LLVMContext::OB_gc_live)) {
6043 Check(BaseIndex < Opt->Inputs.size(),
6044 "gc.relocate: statepoint base index out of bounds", Call);
6045 Check(DerivedIndex < Opt->Inputs.size(),
6046 "gc.relocate: statepoint derived index out of bounds", Call);
6047 }
6048
6049 // Relocated value must be either a pointer type or vector-of-pointer type,
6050 // but gc_relocate does not need to return the same pointer type as the
6051 // relocated pointer. It can be casted to the correct type later if it's
6052 // desired. However, they must have the same address space and 'vectorness'
6053 GCRelocateInst &Relocate = cast<GCRelocateInst>(Call);
6054 auto *ResultType = Call.getType();
6055 auto *DerivedType = Relocate.getDerivedPtr()->getType();
6056 auto *BaseType = Relocate.getBasePtr()->getType();
6057
6058 Check(BaseType->isPtrOrPtrVectorTy(),
6059 "gc.relocate: relocated value must be a pointer", Call);
6060 Check(DerivedType->isPtrOrPtrVectorTy(),
6061 "gc.relocate: relocated value must be a pointer", Call);
6062
6063 Check(ResultType->isVectorTy() == DerivedType->isVectorTy(),
6064 "gc.relocate: vector relocates to vector and pointer to pointer",
6065 Call);
6066 Check(
6067 ResultType->getPointerAddressSpace() ==
6068 DerivedType->getPointerAddressSpace(),
6069 "gc.relocate: relocating a pointer shouldn't change its address space",
6070 Call);
6071
6072 auto GC = llvm::getGCStrategy(Relocate.getFunction()->getGC());
6073 Check(GC, "gc.relocate: calling function must have GCStrategy",
6074 Call.getFunction());
6075 if (GC) {
6076 auto isGCPtr = [&GC](Type *PTy) {
6077 return GC->isGCManagedPointer(PTy->getScalarType()).value_or(true);
6078 };
6079 Check(isGCPtr(ResultType), "gc.relocate: must return gc pointer", Call);
6080 Check(isGCPtr(BaseType),
6081 "gc.relocate: relocated value must be a gc pointer", Call);
6082 Check(isGCPtr(DerivedType),
6083 "gc.relocate: relocated value must be a gc pointer", Call);
6084 }
6085 break;
6086 }
6087 case Intrinsic::experimental_patchpoint: {
6088 if (Call.getCallingConv() == CallingConv::AnyReg) {
6089 Check(Call.getType()->isSingleValueType(),
6090 "patchpoint: invalid return type used with anyregcc", Call);
6091 }
6092 break;
6093 }
6094 case Intrinsic::eh_exceptioncode:
6095 case Intrinsic::eh_exceptionpointer: {
6096 Check(isa<CatchPadInst>(Call.getArgOperand(0)),
6097 "eh.exceptionpointer argument must be a catchpad", Call);
6098 break;
6099 }
6100 case Intrinsic::get_active_lane_mask: {
6101 Check(Call.getType()->isVectorTy(),
6102 "get_active_lane_mask: must return a "
6103 "vector",
6104 Call);
6105 auto *ElemTy = Call.getType()->getScalarType();
6106 Check(ElemTy->isIntegerTy(1),
6107 "get_active_lane_mask: element type is not "
6108 "i1",
6109 Call);
6110 break;
6111 }
6112 case Intrinsic::experimental_get_vector_length: {
6113 ConstantInt *VF = cast<ConstantInt>(Call.getArgOperand(1));
6114 Check(!VF->isNegative() && !VF->isZero(),
6115 "get_vector_length: VF must be positive", Call);
6116 break;
6117 }
6118 case Intrinsic::masked_load: {
6119 Check(Call.getType()->isVectorTy(), "masked_load: must return a vector",
6120 Call);
6121
6122 ConstantInt *Alignment = cast<ConstantInt>(Call.getArgOperand(1));
6123 Value *Mask = Call.getArgOperand(2);
6124 Value *PassThru = Call.getArgOperand(3);
6125 Check(Mask->getType()->isVectorTy(), "masked_load: mask must be vector",
6126 Call);
6127 Check(Alignment->getValue().isPowerOf2(),
6128 "masked_load: alignment must be a power of 2", Call);
6129 Check(PassThru->getType() == Call.getType(),
6130 "masked_load: pass through and return type must match", Call);
6131 Check(cast<VectorType>(Mask->getType())->getElementCount() ==
6132 cast<VectorType>(Call.getType())->getElementCount(),
6133 "masked_load: vector mask must be same length as return", Call);
6134 break;
6135 }
6136 case Intrinsic::masked_store: {
6137 Value *Val = Call.getArgOperand(0);
6138 ConstantInt *Alignment = cast<ConstantInt>(Call.getArgOperand(2));
6139 Value *Mask = Call.getArgOperand(3);
6140 Check(Mask->getType()->isVectorTy(), "masked_store: mask must be vector",
6141 Call);
6142 Check(Alignment->getValue().isPowerOf2(),
6143 "masked_store: alignment must be a power of 2", Call);
6144 Check(cast<VectorType>(Mask->getType())->getElementCount() ==
6145 cast<VectorType>(Val->getType())->getElementCount(),
6146 "masked_store: vector mask must be same length as value", Call);
6147 break;
6148 }
6149
6150 case Intrinsic::masked_gather: {
6151 const APInt &Alignment =
6152 cast<ConstantInt>(Call.getArgOperand(1))->getValue();
6153 Check(Alignment.isZero() || Alignment.isPowerOf2(),
6154 "masked_gather: alignment must be 0 or a power of 2", Call);
6155 break;
6156 }
6157 case Intrinsic::masked_scatter: {
6158 const APInt &Alignment =
6159 cast<ConstantInt>(Call.getArgOperand(2))->getValue();
6160 Check(Alignment.isZero() || Alignment.isPowerOf2(),
6161 "masked_scatter: alignment must be 0 or a power of 2", Call);
6162 break;
6163 }
6164
6165 case Intrinsic::experimental_guard: {
6166 Check(isa<CallInst>(Call), "experimental_guard cannot be invoked", Call);
6167 Check(Call.countOperandBundlesOfType(LLVMContext::OB_deopt) == 1,
6168 "experimental_guard must have exactly one "
6169 "\"deopt\" operand bundle");
6170 break;
6171 }
6172
6173 case Intrinsic::experimental_deoptimize: {
6174 Check(isa<CallInst>(Call), "experimental_deoptimize cannot be invoked",
6175 Call);
6176 Check(Call.countOperandBundlesOfType(LLVMContext::OB_deopt) == 1,
6177 "experimental_deoptimize must have exactly one "
6178 "\"deopt\" operand bundle");
6179 Check(Call.getType() == Call.getFunction()->getReturnType(),
6180 "experimental_deoptimize return type must match caller return type");
6181
6182 if (isa<CallInst>(Call)) {
6183 auto *RI = dyn_cast<ReturnInst>(Call.getNextNode());
6184 Check(RI,
6185 "calls to experimental_deoptimize must be followed by a return");
6186
6187 if (!Call.getType()->isVoidTy() && RI)
6188 Check(RI->getReturnValue() == &Call,
6189 "calls to experimental_deoptimize must be followed by a return "
6190 "of the value computed by experimental_deoptimize");
6191 }
6192
6193 break;
6194 }
6195 case Intrinsic::vastart: {
6196 Check(Call.getFunction()->isVarArg(),
6197 "va_start called in a non-varargs function");
6198 break;
6199 }
6200 case Intrinsic::get_dynamic_area_offset: {
6201 auto *IntTy = dyn_cast<IntegerType>(Call.getType());
6202 Check(IntTy && DL.getPointerSizeInBits(DL.getAllocaAddrSpace()) ==
6203 IntTy->getBitWidth(),
6204 "get_dynamic_area_offset result type must be scalar integer matching "
6205 "alloca address space width",
6206 Call);
6207 break;
6208 }
6209 case Intrinsic::vector_reduce_and:
6210 case Intrinsic::vector_reduce_or:
6211 case Intrinsic::vector_reduce_xor:
6212 case Intrinsic::vector_reduce_add:
6213 case Intrinsic::vector_reduce_mul:
6214 case Intrinsic::vector_reduce_smax:
6215 case Intrinsic::vector_reduce_smin:
6216 case Intrinsic::vector_reduce_umax:
6217 case Intrinsic::vector_reduce_umin: {
6218 Type *ArgTy = Call.getArgOperand(0)->getType();
6219 Check(ArgTy->isIntOrIntVectorTy() && ArgTy->isVectorTy(),
6220 "Intrinsic has incorrect argument type!");
6221 break;
6222 }
6223 case Intrinsic::vector_reduce_fmax:
6224 case Intrinsic::vector_reduce_fmin: {
6225 Type *ArgTy = Call.getArgOperand(0)->getType();
6226 Check(ArgTy->isFPOrFPVectorTy() && ArgTy->isVectorTy(),
6227 "Intrinsic has incorrect argument type!");
6228 break;
6229 }
6230 case Intrinsic::vector_reduce_fadd:
6231 case Intrinsic::vector_reduce_fmul: {
6232 // Unlike the other reductions, the first argument is a start value. The
6233 // second argument is the vector to be reduced.
6234 Type *ArgTy = Call.getArgOperand(1)->getType();
6235 Check(ArgTy->isFPOrFPVectorTy() && ArgTy->isVectorTy(),
6236 "Intrinsic has incorrect argument type!");
6237 break;
6238 }
6239 case Intrinsic::smul_fix:
6240 case Intrinsic::smul_fix_sat:
6241 case Intrinsic::umul_fix:
6242 case Intrinsic::umul_fix_sat:
6243 case Intrinsic::sdiv_fix:
6244 case Intrinsic::sdiv_fix_sat:
6245 case Intrinsic::udiv_fix:
6246 case Intrinsic::udiv_fix_sat: {
6247 Value *Op1 = Call.getArgOperand(0);
6248 Value *Op2 = Call.getArgOperand(1);
6250 "first operand of [us][mul|div]_fix[_sat] must be an int type or "
6251 "vector of ints");
6253 "second operand of [us][mul|div]_fix[_sat] must be an int type or "
6254 "vector of ints");
6255
6256 auto *Op3 = cast<ConstantInt>(Call.getArgOperand(2));
6257 Check(Op3->getType()->isIntegerTy(),
6258 "third operand of [us][mul|div]_fix[_sat] must be an int type");
6259 Check(Op3->getBitWidth() <= 32,
6260 "third operand of [us][mul|div]_fix[_sat] must fit within 32 bits");
6261
6262 if (ID == Intrinsic::smul_fix || ID == Intrinsic::smul_fix_sat ||
6263 ID == Intrinsic::sdiv_fix || ID == Intrinsic::sdiv_fix_sat) {
6264 Check(Op3->getZExtValue() < Op1->getType()->getScalarSizeInBits(),
6265 "the scale of s[mul|div]_fix[_sat] must be less than the width of "
6266 "the operands");
6267 } else {
6268 Check(Op3->getZExtValue() <= Op1->getType()->getScalarSizeInBits(),
6269 "the scale of u[mul|div]_fix[_sat] must be less than or equal "
6270 "to the width of the operands");
6271 }
6272 break;
6273 }
6274 case Intrinsic::lrint:
6275 case Intrinsic::llrint:
6276 case Intrinsic::lround:
6277 case Intrinsic::llround: {
6278 Type *ValTy = Call.getArgOperand(0)->getType();
6279 Type *ResultTy = Call.getType();
6280 auto *VTy = dyn_cast<VectorType>(ValTy);
6281 auto *RTy = dyn_cast<VectorType>(ResultTy);
6282 Check(ValTy->isFPOrFPVectorTy() && ResultTy->isIntOrIntVectorTy(),
6283 ExpectedName + ": argument must be floating-point or vector "
6284 "of floating-points, and result must be integer or "
6285 "vector of integers",
6286 &Call);
6287 Check(ValTy->isVectorTy() == ResultTy->isVectorTy(),
6288 ExpectedName + ": argument and result disagree on vector use", &Call);
6289 if (VTy) {
6290 Check(VTy->getElementCount() == RTy->getElementCount(),
6291 ExpectedName + ": argument must be same length as result", &Call);
6292 }
6293 break;
6294 }
6295 case Intrinsic::bswap: {
6296 Type *Ty = Call.getType();
6297 unsigned Size = Ty->getScalarSizeInBits();
6298 Check(Size % 16 == 0, "bswap must be an even number of bytes", &Call);
6299 break;
6300 }
6301 case Intrinsic::invariant_start: {
6302 ConstantInt *InvariantSize = dyn_cast<ConstantInt>(Call.getArgOperand(0));
6303 Check(InvariantSize &&
6304 (!InvariantSize->isNegative() || InvariantSize->isMinusOne()),
6305 "invariant_start parameter must be -1, 0 or a positive number",
6306 &Call);
6307 break;
6308 }
6309 case Intrinsic::matrix_multiply:
6310 case Intrinsic::matrix_transpose:
6311 case Intrinsic::matrix_column_major_load:
6312 case Intrinsic::matrix_column_major_store: {
6313 Function *IF = Call.getCalledFunction();
6314 ConstantInt *Stride = nullptr;
6315 ConstantInt *NumRows;
6316 ConstantInt *NumColumns;
6317 VectorType *ResultTy;
6318 Type *Op0ElemTy = nullptr;
6319 Type *Op1ElemTy = nullptr;
6320 switch (ID) {
6321 case Intrinsic::matrix_multiply: {
6322 NumRows = cast<ConstantInt>(Call.getArgOperand(2));
6323 ConstantInt *N = cast<ConstantInt>(Call.getArgOperand(3));
6324 NumColumns = cast<ConstantInt>(Call.getArgOperand(4));
6325 Check(cast<FixedVectorType>(Call.getArgOperand(0)->getType())
6326 ->getNumElements() ==
6327 NumRows->getZExtValue() * N->getZExtValue(),
6328 "First argument of a matrix operation does not match specified "
6329 "shape!");
6330 Check(cast<FixedVectorType>(Call.getArgOperand(1)->getType())
6331 ->getNumElements() ==
6332 N->getZExtValue() * NumColumns->getZExtValue(),
6333 "Second argument of a matrix operation does not match specified "
6334 "shape!");
6335
6336 ResultTy = cast<VectorType>(Call.getType());
6337 Op0ElemTy =
6338 cast<VectorType>(Call.getArgOperand(0)->getType())->getElementType();
6339 Op1ElemTy =
6340 cast<VectorType>(Call.getArgOperand(1)->getType())->getElementType();
6341 break;
6342 }
6343 case Intrinsic::matrix_transpose:
6344 NumRows = cast<ConstantInt>(Call.getArgOperand(1));
6345 NumColumns = cast<ConstantInt>(Call.getArgOperand(2));
6346 ResultTy = cast<VectorType>(Call.getType());
6347 Op0ElemTy =
6348 cast<VectorType>(Call.getArgOperand(0)->getType())->getElementType();
6349 break;
6350 case Intrinsic::matrix_column_major_load: {
6351 Stride = dyn_cast<ConstantInt>(Call.getArgOperand(1));
6352 NumRows = cast<ConstantInt>(Call.getArgOperand(3));
6353 NumColumns = cast<ConstantInt>(Call.getArgOperand(4));
6354 ResultTy = cast<VectorType>(Call.getType());
6355 break;
6356 }
6357 case Intrinsic::matrix_column_major_store: {
6358 Stride = dyn_cast<ConstantInt>(Call.getArgOperand(2));
6359 NumRows = cast<ConstantInt>(Call.getArgOperand(4));
6360 NumColumns = cast<ConstantInt>(Call.getArgOperand(5));
6361 ResultTy = cast<VectorType>(Call.getArgOperand(0)->getType());
6362 Op0ElemTy =
6363 cast<VectorType>(Call.getArgOperand(0)->getType())->getElementType();
6364 break;
6365 }
6366 default:
6367 llvm_unreachable("unexpected intrinsic");
6368 }
6369
6370 Check(ResultTy->getElementType()->isIntegerTy() ||
6371 ResultTy->getElementType()->isFloatingPointTy(),
6372 "Result type must be an integer or floating-point type!", IF);
6373
6374 if (Op0ElemTy)
6375 Check(ResultTy->getElementType() == Op0ElemTy,
6376 "Vector element type mismatch of the result and first operand "
6377 "vector!",
6378 IF);
6379
6380 if (Op1ElemTy)
6381 Check(ResultTy->getElementType() == Op1ElemTy,
6382 "Vector element type mismatch of the result and second operand "
6383 "vector!",
6384 IF);
6385
6386 Check(cast<FixedVectorType>(ResultTy)->getNumElements() ==
6387 NumRows->getZExtValue() * NumColumns->getZExtValue(),
6388 "Result of a matrix operation does not fit in the returned vector!");
6389
6390 if (Stride)
6391 Check(Stride->getZExtValue() >= NumRows->getZExtValue(),
6392 "Stride must be greater or equal than the number of rows!", IF);
6393
6394 break;
6395 }
6396 case Intrinsic::vector_splice: {
6397 VectorType *VecTy = cast<VectorType>(Call.getType());
6398 int64_t Idx = cast<ConstantInt>(Call.getArgOperand(2))->getSExtValue();
6399 int64_t KnownMinNumElements = VecTy->getElementCount().getKnownMinValue();
6400 if (Call.getParent() && Call.getParent()->getParent()) {
6401 AttributeList Attrs = Call.getParent()->getParent()->getAttributes();
6402 if (Attrs.hasFnAttr(Attribute::VScaleRange))
6403 KnownMinNumElements *= Attrs.getFnAttrs().getVScaleRangeMin();
6404 }
6405 Check((Idx < 0 && std::abs(Idx) <= KnownMinNumElements) ||
6406 (Idx >= 0 && Idx < KnownMinNumElements),
6407 "The splice index exceeds the range [-VL, VL-1] where VL is the "
6408 "known minimum number of elements in the vector. For scalable "
6409 "vectors the minimum number of elements is determined from "
6410 "vscale_range.",
6411 &Call);
6412 break;
6413 }
6414 case Intrinsic::stepvector: {
6415 VectorType *VecTy = dyn_cast<VectorType>(Call.getType());
6416 Check(VecTy && VecTy->getScalarType()->isIntegerTy() &&
6417 VecTy->getScalarSizeInBits() >= 8,
6418 "stepvector only supported for vectors of integers "
6419 "with a bitwidth of at least 8.",
6420 &Call);
6421 break;
6422 }
6423 case Intrinsic::experimental_vector_match: {
6424 Value *Op1 = Call.getArgOperand(0);
6425 Value *Op2 = Call.getArgOperand(1);
6426 Value *Mask = Call.getArgOperand(2);
6427
6428 VectorType *Op1Ty = dyn_cast<VectorType>(Op1->getType());
6429 VectorType *Op2Ty = dyn_cast<VectorType>(Op2->getType());
6430 VectorType *MaskTy = dyn_cast<VectorType>(Mask->getType());
6431
6432 Check(Op1Ty && Op2Ty && MaskTy, "Operands must be vectors.", &Call);
6433 Check(isa<FixedVectorType>(Op2Ty),
6434 "Second operand must be a fixed length vector.", &Call);
6435 Check(Op1Ty->getElementType()->isIntegerTy(),
6436 "First operand must be a vector of integers.", &Call);
6437 Check(Op1Ty->getElementType() == Op2Ty->getElementType(),
6438 "First two operands must have the same element type.", &Call);
6439 Check(Op1Ty->getElementCount() == MaskTy->getElementCount(),
6440 "First operand and mask must have the same number of elements.",
6441 &Call);
6442 Check(MaskTy->getElementType()->isIntegerTy(1),
6443 "Mask must be a vector of i1's.", &Call);
6444 Check(Call.getType() == MaskTy, "Return type must match the mask type.",
6445 &Call);
6446 break;
6447 }
6448 case Intrinsic::vector_insert: {
6449 Value *Vec = Call.getArgOperand(0);
6450 Value *SubVec = Call.getArgOperand(1);
6451 Value *Idx = Call.getArgOperand(2);
6452 unsigned IdxN = cast<ConstantInt>(Idx)->getZExtValue();
6453
6454 VectorType *VecTy = cast<VectorType>(Vec->getType());
6455 VectorType *SubVecTy = cast<VectorType>(SubVec->getType());
6456
6457 ElementCount VecEC = VecTy->getElementCount();
6458 ElementCount SubVecEC = SubVecTy->getElementCount();
6459 Check(VecTy->getElementType() == SubVecTy->getElementType(),
6460 "vector_insert parameters must have the same element "
6461 "type.",
6462 &Call);
6463 Check(IdxN % SubVecEC.getKnownMinValue() == 0,
6464 "vector_insert index must be a constant multiple of "
6465 "the subvector's known minimum vector length.");
6466
6467 // If this insertion is not the 'mixed' case where a fixed vector is
6468 // inserted into a scalable vector, ensure that the insertion of the
6469 // subvector does not overrun the parent vector.
6470 if (VecEC.isScalable() == SubVecEC.isScalable()) {
6471 Check(IdxN < VecEC.getKnownMinValue() &&
6472 IdxN + SubVecEC.getKnownMinValue() <= VecEC.getKnownMinValue(),
6473 "subvector operand of vector_insert would overrun the "
6474 "vector being inserted into.");
6475 }
6476 break;
6477 }
6478 case Intrinsic::vector_extract: {
6479 Value *Vec = Call.getArgOperand(0);
6480 Value *Idx = Call.getArgOperand(1);
6481 unsigned IdxN = cast<ConstantInt>(Idx)->getZExtValue();
6482
6483 VectorType *ResultTy = cast<VectorType>(Call.getType());
6484 VectorType *VecTy = cast<VectorType>(Vec->getType());
6485
6486 ElementCount VecEC = VecTy->getElementCount();
6487 ElementCount ResultEC = ResultTy->getElementCount();
6488
6489 Check(ResultTy->getElementType() == VecTy->getElementType(),
6490 "vector_extract result must have the same element "
6491 "type as the input vector.",
6492 &Call);
6493 Check(IdxN % ResultEC.getKnownMinValue() == 0,
6494 "vector_extract index must be a constant multiple of "
6495 "the result type's known minimum vector length.");
6496
6497 // If this extraction is not the 'mixed' case where a fixed vector is
6498 // extracted from a scalable vector, ensure that the extraction does not
6499 // overrun the parent vector.
6500 if (VecEC.isScalable() == ResultEC.isScalable()) {
6501 Check(IdxN < VecEC.getKnownMinValue() &&
6502 IdxN + ResultEC.getKnownMinValue() <= VecEC.getKnownMinValue(),
6503 "vector_extract would overrun.");
6504 }
6505 break;
6506 }
6507 case Intrinsic::experimental_vector_partial_reduce_add: {
6508 VectorType *AccTy = cast<VectorType>(Call.getArgOperand(0)->getType());
6509 VectorType *VecTy = cast<VectorType>(Call.getArgOperand(1)->getType());
6510
6511 unsigned VecWidth = VecTy->getElementCount().getKnownMinValue();
6512 unsigned AccWidth = AccTy->getElementCount().getKnownMinValue();
6513
6514 Check((VecWidth % AccWidth) == 0,
6515 "Invalid vector widths for partial "
6516 "reduction. The width of the input vector "
6517 "must be a positive integer multiple of "
6518 "the width of the accumulator vector.");
6519 break;
6520 }
6521 case Intrinsic::experimental_noalias_scope_decl: {
6522 NoAliasScopeDecls.push_back(cast<IntrinsicInst>(&Call));
6523 break;
6524 }
6525 case Intrinsic::preserve_array_access_index:
6526 case Intrinsic::preserve_struct_access_index:
6527 case Intrinsic::aarch64_ldaxr:
6528 case Intrinsic::aarch64_ldxr:
6529 case Intrinsic::arm_ldaex:
6530 case Intrinsic::arm_ldrex: {
6531 Type *ElemTy = Call.getParamElementType(0);
6532 Check(ElemTy, "Intrinsic requires elementtype attribute on first argument.",
6533 &Call);
6534 break;
6535 }
6536 case Intrinsic::aarch64_stlxr:
6537 case Intrinsic::aarch64_stxr:
6538 case Intrinsic::arm_stlex:
6539 case Intrinsic::arm_strex: {
6540 Type *ElemTy = Call.getAttributes().getParamElementType(1);
6541 Check(ElemTy,
6542 "Intrinsic requires elementtype attribute on second argument.",
6543 &Call);
6544 break;
6545 }
6546 case Intrinsic::aarch64_prefetch: {
6547 Check(cast<ConstantInt>(Call.getArgOperand(1))->getZExtValue() < 2,
6548 "write argument to llvm.aarch64.prefetch must be 0 or 1", Call);
6549 Check(cast<ConstantInt>(Call.getArgOperand(2))->getZExtValue() < 4,
6550 "target argument to llvm.aarch64.prefetch must be 0-3", Call);
6551 Check(cast<ConstantInt>(Call.getArgOperand(3))->getZExtValue() < 2,
6552 "stream argument to llvm.aarch64.prefetch must be 0 or 1", Call);
6553 Check(cast<ConstantInt>(Call.getArgOperand(4))->getZExtValue() < 2,
6554 "isdata argument to llvm.aarch64.prefetch must be 0 or 1", Call);
6555 break;
6556 }
6557 case Intrinsic::callbr_landingpad: {
6558 const auto *CBR = dyn_cast<CallBrInst>(Call.getOperand(0));
6559 Check(CBR, "intrinstic requires callbr operand", &Call);
6560 if (!CBR)
6561 break;
6562
6563 const BasicBlock *LandingPadBB = Call.getParent();
6564 const BasicBlock *PredBB = LandingPadBB->getUniquePredecessor();
6565 if (!PredBB) {
6566 CheckFailed("Intrinsic in block must have 1 unique predecessor", &Call);
6567 break;
6568 }
6569 if (!isa<CallBrInst>(PredBB->getTerminator())) {
6570 CheckFailed("Intrinsic must have corresponding callbr in predecessor",
6571 &Call);
6572 break;
6573 }
6574 Check(llvm::is_contained(CBR->getIndirectDests(), LandingPadBB),
6575 "Intrinsic's corresponding callbr must have intrinsic's parent basic "
6576 "block in indirect destination list",
6577 &Call);
6578 const Instruction &First = *LandingPadBB->begin();
6579 Check(&First == &Call, "No other instructions may proceed intrinsic",
6580 &Call);
6581 break;
6582 }
6583 case Intrinsic::amdgcn_cs_chain: {
6584 auto CallerCC = Call.getCaller()->getCallingConv();
6585 switch (CallerCC) {
6589 break;
6590 default:
6591 CheckFailed("Intrinsic can only be used from functions with the "
6592 "amdgpu_cs, amdgpu_cs_chain or amdgpu_cs_chain_preserve "
6593 "calling conventions",
6594 &Call);
6595 break;
6596 }
6597
6598 Check(Call.paramHasAttr(2, Attribute::InReg),
6599 "SGPR arguments must have the `inreg` attribute", &Call);
6600 Check(!Call.paramHasAttr(3, Attribute::InReg),
6601 "VGPR arguments must not have the `inreg` attribute", &Call);
6602
6603 auto *Next = Call.getNextNode();
6604 bool IsAMDUnreachable = Next && isa<IntrinsicInst>(Next) &&
6605 cast<IntrinsicInst>(Next)->getIntrinsicID() ==
6606 Intrinsic::amdgcn_unreachable;
6607 Check(Next && (isa<UnreachableInst>(Next) || IsAMDUnreachable),
6608 "llvm.amdgcn.cs.chain must be followed by unreachable", &Call);
6609 break;
6610 }
6611 case Intrinsic::amdgcn_init_exec_from_input: {
6612 const Argument *Arg = dyn_cast<Argument>(Call.getOperand(0));
6613 Check(Arg && Arg->hasInRegAttr(),
6614 "only inreg arguments to the parent function are valid as inputs to "
6615 "this intrinsic",
6616 &Call);
6617 break;
6618 }
6619 case Intrinsic::amdgcn_set_inactive_chain_arg: {
6620 auto CallerCC = Call.getCaller()->getCallingConv();
6621 switch (CallerCC) {
6624 break;
6625 default:
6626 CheckFailed("Intrinsic can only be used from functions with the "
6627 "amdgpu_cs_chain or amdgpu_cs_chain_preserve "
6628 "calling conventions",
6629 &Call);
6630 break;
6631 }
6632
6633 unsigned InactiveIdx = 1;
6634 Check(!Call.paramHasAttr(InactiveIdx, Attribute::InReg),
6635 "Value for inactive lanes must not have the `inreg` attribute",
6636 &Call);
6637 Check(isa<Argument>(Call.getArgOperand(InactiveIdx)),
6638 "Value for inactive lanes must be a function argument", &Call);
6639 Check(!cast<Argument>(Call.getArgOperand(InactiveIdx))->hasInRegAttr(),
6640 "Value for inactive lanes must be a VGPR function argument", &Call);
6641 break;
6642 }
6643 case Intrinsic::amdgcn_call_whole_wave: {
6644 auto F = dyn_cast<Function>(Call.getArgOperand(0));
6645 Check(F, "Indirect whole wave calls are not allowed", &Call);
6646
6647 CallingConv::ID CC = F->getCallingConv();
6649 "Callee must have the amdgpu_gfx_whole_wave calling convention",
6650 &Call);
6651
6652 Check(!F->isVarArg(), "Variadic whole wave calls are not allowed", &Call);
6653
6654 Check(Call.arg_size() == F->arg_size(),
6655 "Call argument count must match callee argument count", &Call);
6656
6657 // The first argument of the call is the callee, and the first argument of
6658 // the callee is the active mask. The rest of the arguments must match.
6659 Check(F->arg_begin()->getType()->isIntegerTy(1),
6660 "Callee must have i1 as its first argument", &Call);
6661 for (auto [CallArg, FuncArg] :
6662 drop_begin(zip_equal(Call.args(), F->args()))) {
6663 Check(CallArg->getType() == FuncArg.getType(),
6664 "Argument types must match", &Call);
6665
6666 // Check that inreg attributes match between call site and function
6667 Check(Call.paramHasAttr(FuncArg.getArgNo(), Attribute::InReg) ==
6668 FuncArg.hasInRegAttr(),
6669 "Argument inreg attributes must match", &Call);
6670 }
6671 break;
6672 }
6673 case Intrinsic::amdgcn_s_prefetch_data: {
6674 Check(
6676 Call.getArgOperand(0)->getType()->getPointerAddressSpace()),
6677 "llvm.amdgcn.s.prefetch.data only supports global or constant memory");
6678 break;
6679 }
6680 case Intrinsic::amdgcn_mfma_scale_f32_16x16x128_f8f6f4:
6681 case Intrinsic::amdgcn_mfma_scale_f32_32x32x64_f8f6f4: {
6682 Value *Src0 = Call.getArgOperand(0);
6683 Value *Src1 = Call.getArgOperand(1);
6684
6685 uint64_t CBSZ = cast<ConstantInt>(Call.getArgOperand(3))->getZExtValue();
6686 uint64_t BLGP = cast<ConstantInt>(Call.getArgOperand(4))->getZExtValue();
6687 Check(CBSZ <= 4, "invalid value for cbsz format", Call,
6688 Call.getArgOperand(3));
6689 Check(BLGP <= 4, "invalid value for blgp format", Call,
6690 Call.getArgOperand(4));
6691
6692 // AMDGPU::MFMAScaleFormats values
6693 auto getFormatNumRegs = [](unsigned FormatVal) {
6694 switch (FormatVal) {
6695 case 0:
6696 case 1:
6697 return 8u;
6698 case 2:
6699 case 3:
6700 return 6u;
6701 case 4:
6702 return 4u;
6703 default:
6704 llvm_unreachable("invalid format value");
6705 }
6706 };
6707
6708 auto isValidSrcASrcBVector = [](FixedVectorType *Ty) {
6709 if (!Ty || !Ty->getElementType()->isIntegerTy(32))
6710 return false;
6711 unsigned NumElts = Ty->getNumElements();
6712 return NumElts == 4 || NumElts == 6 || NumElts == 8;
6713 };
6714
6715 auto *Src0Ty = dyn_cast<FixedVectorType>(Src0->getType());
6716 auto *Src1Ty = dyn_cast<FixedVectorType>(Src1->getType());
6717 Check(isValidSrcASrcBVector(Src0Ty),
6718 "operand 0 must be 4, 6 or 8 element i32 vector", &Call, Src0);
6719 Check(isValidSrcASrcBVector(Src1Ty),
6720 "operand 1 must be 4, 6 or 8 element i32 vector", &Call, Src1);
6721
6722 // Permit excess registers for the format.
6723 Check(Src0Ty->getNumElements() >= getFormatNumRegs(CBSZ),
6724 "invalid vector type for format", &Call, Src0, Call.getArgOperand(3));
6725 Check(Src1Ty->getNumElements() >= getFormatNumRegs(BLGP),
6726 "invalid vector type for format", &Call, Src1, Call.getArgOperand(5));
6727 break;
6728 }
6729 case Intrinsic::amdgcn_wmma_f32_16x16x128_f8f6f4:
6730 case Intrinsic::amdgcn_wmma_scale_f32_16x16x128_f8f6f4:
6731 case Intrinsic::amdgcn_wmma_scale16_f32_16x16x128_f8f6f4: {
6732 Value *Src0 = Call.getArgOperand(1);
6733 Value *Src1 = Call.getArgOperand(3);
6734
6735 unsigned FmtA = cast<ConstantInt>(Call.getArgOperand(0))->getZExtValue();
6736 unsigned FmtB = cast<ConstantInt>(Call.getArgOperand(2))->getZExtValue();
6737 Check(FmtA <= 4, "invalid value for matrix format", Call,
6738 Call.getArgOperand(0));
6739 Check(FmtB <= 4, "invalid value for matrix format", Call,
6740 Call.getArgOperand(2));
6741
6742 // AMDGPU::MatrixFMT values
6743 auto getFormatNumRegs = [](unsigned FormatVal) {
6744 switch (FormatVal) {
6745 case 0:
6746 case 1:
6747 return 16u;
6748 case 2:
6749 case 3:
6750 return 12u;
6751 case 4:
6752 return 8u;
6753 default:
6754 llvm_unreachable("invalid format value");
6755 }
6756 };
6757
6758 auto isValidSrcASrcBVector = [](FixedVectorType *Ty) {
6759 if (!Ty || !Ty->getElementType()->isIntegerTy(32))
6760 return false;
6761 unsigned NumElts = Ty->getNumElements();
6762 return NumElts == 16 || NumElts == 12 || NumElts == 8;
6763 };
6764
6765 auto *Src0Ty = dyn_cast<FixedVectorType>(Src0->getType());
6766 auto *Src1Ty = dyn_cast<FixedVectorType>(Src1->getType());
6767 Check(isValidSrcASrcBVector(Src0Ty),
6768 "operand 1 must be 8, 12 or 16 element i32 vector", &Call, Src0);
6769 Check(isValidSrcASrcBVector(Src1Ty),
6770 "operand 3 must be 8, 12 or 16 element i32 vector", &Call, Src1);
6771
6772 // Permit excess registers for the format.
6773 Check(Src0Ty->getNumElements() >= getFormatNumRegs(FmtA),
6774 "invalid vector type for format", &Call, Src0, Call.getArgOperand(0));
6775 Check(Src1Ty->getNumElements() >= getFormatNumRegs(FmtB),
6776 "invalid vector type for format", &Call, Src1, Call.getArgOperand(2));
6777 break;
6778 }
6779 case Intrinsic::nvvm_setmaxnreg_inc_sync_aligned_u32:
6780 case Intrinsic::nvvm_setmaxnreg_dec_sync_aligned_u32: {
6781 Value *V = Call.getArgOperand(0);
6782 unsigned RegCount = cast<ConstantInt>(V)->getZExtValue();
6783 Check(RegCount % 8 == 0,
6784 "reg_count argument to nvvm.setmaxnreg must be in multiples of 8");
6785 break;
6786 }
6787 case Intrinsic::experimental_convergence_entry:
6788 case Intrinsic::experimental_convergence_anchor:
6789 break;
6790 case Intrinsic::experimental_convergence_loop:
6791 break;
6792 case Intrinsic::ptrmask: {
6793 Type *Ty0 = Call.getArgOperand(0)->getType();
6794 Type *Ty1 = Call.getArgOperand(1)->getType();
6796 "llvm.ptrmask intrinsic first argument must be pointer or vector "
6797 "of pointers",
6798 &Call);
6799 Check(
6800 Ty0->isVectorTy() == Ty1->isVectorTy(),
6801 "llvm.ptrmask intrinsic arguments must be both scalars or both vectors",
6802 &Call);
6803 if (Ty0->isVectorTy())
6804 Check(cast<VectorType>(Ty0)->getElementCount() ==
6805 cast<VectorType>(Ty1)->getElementCount(),
6806 "llvm.ptrmask intrinsic arguments must have the same number of "
6807 "elements",
6808 &Call);
6809 Check(DL.getIndexTypeSizeInBits(Ty0) == Ty1->getScalarSizeInBits(),
6810 "llvm.ptrmask intrinsic second argument bitwidth must match "
6811 "pointer index type size of first argument",
6812 &Call);
6813 break;
6814 }
6815 case Intrinsic::thread_pointer: {
6816 Check(Call.getType()->getPointerAddressSpace() ==
6817 DL.getDefaultGlobalsAddressSpace(),
6818 "llvm.thread.pointer intrinsic return type must be for the globals "
6819 "address space",
6820 &Call);
6821 break;
6822 }
6823 case Intrinsic::threadlocal_address: {
6824 const Value &Arg0 = *Call.getArgOperand(0);
6825 Check(isa<GlobalValue>(Arg0),
6826 "llvm.threadlocal.address first argument must be a GlobalValue");
6827 Check(cast<GlobalValue>(Arg0).isThreadLocal(),
6828 "llvm.threadlocal.address operand isThreadLocal() must be true");
6829 break;
6830 }
6831 case Intrinsic::lifetime_start:
6832 case Intrinsic::lifetime_end: {
6833 Value *Ptr = Call.getArgOperand(0);
6834 Check(isa<AllocaInst>(Ptr) || isa<PoisonValue>(Ptr),
6835 "llvm.lifetime.start/end can only be used on alloca or poison",
6836 &Call);
6837 break;
6838 }
6839 };
6840
6841 // Verify that there aren't any unmediated control transfers between funclets.
6843 Function *F = Call.getParent()->getParent();
6844 if (F->hasPersonalityFn() &&
6845 isScopedEHPersonality(classifyEHPersonality(F->getPersonalityFn()))) {
6846 // Run EH funclet coloring on-demand and cache results for other intrinsic
6847 // calls in this function
6848 if (BlockEHFuncletColors.empty())
6849 BlockEHFuncletColors = colorEHFunclets(*F);
6850
6851 // Check for catch-/cleanup-pad in first funclet block
6852 bool InEHFunclet = false;
6853 BasicBlock *CallBB = Call.getParent();
6854 const ColorVector &CV = BlockEHFuncletColors.find(CallBB)->second;
6855 assert(CV.size() > 0 && "Uncolored block");
6856 for (BasicBlock *ColorFirstBB : CV)
6857 if (auto It = ColorFirstBB->getFirstNonPHIIt();
6858 It != ColorFirstBB->end())
6859 if (isa_and_nonnull<FuncletPadInst>(&*It))
6860 InEHFunclet = true;
6861
6862 // Check for funclet operand bundle
6863 bool HasToken = false;
6864 for (unsigned I = 0, E = Call.getNumOperandBundles(); I != E; ++I)
6865 if (Call.getOperandBundleAt(I).getTagID() == LLVMContext::OB_funclet)
6866 HasToken = true;
6867
6868 // This would cause silent code truncation in WinEHPrepare
6869 if (InEHFunclet)
6870 Check(HasToken, "Missing funclet token on intrinsic call", &Call);
6871 }
6872 }
6873}
6874
6875/// Carefully grab the subprogram from a local scope.
6876///
6877/// This carefully grabs the subprogram from a local scope, avoiding the
6878/// built-in assertions that would typically fire.
6880 if (!LocalScope)
6881 return nullptr;
6882
6883 if (auto *SP = dyn_cast<DISubprogram>(LocalScope))
6884 return SP;
6885
6886 if (auto *LB = dyn_cast<DILexicalBlockBase>(LocalScope))
6887 return getSubprogram(LB->getRawScope());
6888
6889 // Just return null; broken scope chains are checked elsewhere.
6890 assert(!isa<DILocalScope>(LocalScope) && "Unknown type of local scope");
6891 return nullptr;
6892}
6893
6894void Verifier::visit(DbgLabelRecord &DLR) {
6895 CheckDI(isa<DILabel>(DLR.getRawLabel()),
6896 "invalid #dbg_label intrinsic variable", &DLR, DLR.getRawLabel());
6897
6898 // Ignore broken !dbg attachments; they're checked elsewhere.
6899 if (MDNode *N = DLR.getDebugLoc().getAsMDNode())
6900 if (!isa<DILocation>(N))
6901 return;
6902
6903 BasicBlock *BB = DLR.getParent();
6904 Function *F = BB ? BB->getParent() : nullptr;
6905
6906 // The scopes for variables and !dbg attachments must agree.
6907 DILabel *Label = DLR.getLabel();
6908 DILocation *Loc = DLR.getDebugLoc();
6909 CheckDI(Loc, "#dbg_label record requires a !dbg attachment", &DLR, BB, F);
6910
6911 DISubprogram *LabelSP = getSubprogram(Label->getRawScope());
6912 DISubprogram *LocSP = getSubprogram(Loc->getRawScope());
6913 if (!LabelSP || !LocSP)
6914 return;
6915
6916 CheckDI(LabelSP == LocSP,
6917 "mismatched subprogram between #dbg_label label and !dbg attachment",
6918 &DLR, BB, F, Label, Label->getScope()->getSubprogram(), Loc,
6919 Loc->getScope()->getSubprogram());
6920}
6921
6922void Verifier::visit(DbgVariableRecord &DVR) {
6923 BasicBlock *BB = DVR.getParent();
6924 Function *F = BB->getParent();
6925
6929 "invalid #dbg record type", &DVR, DVR.getType(), BB, F);
6930
6931 // The location for a DbgVariableRecord must be either a ValueAsMetadata,
6932 // DIArgList, or an empty MDNode (which is a legacy representation for an
6933 // "undef" location).
6934 auto *MD = DVR.getRawLocation();
6935 CheckDI(MD && (isa<ValueAsMetadata>(MD) || isa<DIArgList>(MD) ||
6936 (isa<MDNode>(MD) && !cast<MDNode>(MD)->getNumOperands())),
6937 "invalid #dbg record address/value", &DVR, MD, BB, F);
6938 if (auto *VAM = dyn_cast<ValueAsMetadata>(MD)) {
6939 visitValueAsMetadata(*VAM, F);
6940 if (DVR.isDbgDeclare()) {
6941 // Allow integers here to support inttoptr salvage.
6942 Type *Ty = VAM->getValue()->getType();
6943 CheckDI(Ty->isPointerTy() || Ty->isIntegerTy(),
6944 "location of #dbg_declare must be a pointer or int", &DVR, MD, BB,
6945 F);
6946 }
6947 } else if (auto *AL = dyn_cast<DIArgList>(MD)) {
6948 visitDIArgList(*AL, F);
6949 }
6950
6951 CheckDI(isa_and_nonnull<DILocalVariable>(DVR.getRawVariable()),
6952 "invalid #dbg record variable", &DVR, DVR.getRawVariable(), BB, F);
6953 visitMDNode(*DVR.getRawVariable(), AreDebugLocsAllowed::No);
6954
6955 CheckDI(isa_and_nonnull<DIExpression>(DVR.getRawExpression()),
6956 "invalid #dbg record expression", &DVR, DVR.getRawExpression(), BB,
6957 F);
6958 visitMDNode(*DVR.getExpression(), AreDebugLocsAllowed::No);
6959
6960 if (DVR.isDbgAssign()) {
6961 CheckDI(isa_and_nonnull<DIAssignID>(DVR.getRawAssignID()),
6962 "invalid #dbg_assign DIAssignID", &DVR, DVR.getRawAssignID(), BB,
6963 F);
6964 visitMDNode(*cast<DIAssignID>(DVR.getRawAssignID()),
6965 AreDebugLocsAllowed::No);
6966
6967 const auto *RawAddr = DVR.getRawAddress();
6968 // Similarly to the location above, the address for an assign
6969 // DbgVariableRecord must be a ValueAsMetadata or an empty MDNode, which
6970 // represents an undef address.
6971 CheckDI(
6972 isa<ValueAsMetadata>(RawAddr) ||
6973 (isa<MDNode>(RawAddr) && !cast<MDNode>(RawAddr)->getNumOperands()),
6974 "invalid #dbg_assign address", &DVR, DVR.getRawAddress(), BB, F);
6975 if (auto *VAM = dyn_cast<ValueAsMetadata>(RawAddr))
6976 visitValueAsMetadata(*VAM, F);
6977
6978 CheckDI(isa_and_nonnull<DIExpression>(DVR.getRawAddressExpression()),
6979 "invalid #dbg_assign address expression", &DVR,
6980 DVR.getRawAddressExpression(), BB, F);
6981 visitMDNode(*DVR.getAddressExpression(), AreDebugLocsAllowed::No);
6982
6983 // All of the linked instructions should be in the same function as DVR.
6984 for (Instruction *I : at::getAssignmentInsts(&DVR))
6985 CheckDI(DVR.getFunction() == I->getFunction(),
6986 "inst not in same function as #dbg_assign", I, &DVR, BB, F);
6987 }
6988
6989 // This check is redundant with one in visitLocalVariable().
6990 DILocalVariable *Var = DVR.getVariable();
6991 CheckDI(isType(Var->getRawType()), "invalid type ref", Var, Var->getRawType(),
6992 BB, F);
6993
6994 auto *DLNode = DVR.getDebugLoc().getAsMDNode();
6995 CheckDI(isa_and_nonnull<DILocation>(DLNode), "invalid #dbg record DILocation",
6996 &DVR, DLNode, BB, F);
6997 DILocation *Loc = DVR.getDebugLoc();
6998
6999 // The scopes for variables and !dbg attachments must agree.
7000 DISubprogram *VarSP = getSubprogram(Var->getRawScope());
7001 DISubprogram *LocSP = getSubprogram(Loc->getRawScope());
7002 if (!VarSP || !LocSP)
7003 return; // Broken scope chains are checked elsewhere.
7004
7005 CheckDI(VarSP == LocSP,
7006 "mismatched subprogram between #dbg record variable and DILocation",
7007 &DVR, BB, F, Var, Var->getScope()->getSubprogram(), Loc,
7008 Loc->getScope()->getSubprogram(), BB, F);
7009
7010 verifyFnArgs(DVR);
7011}
7012
7013void Verifier::visitVPIntrinsic(VPIntrinsic &VPI) {
7014 if (auto *VPCast = dyn_cast<VPCastIntrinsic>(&VPI)) {
7015 auto *RetTy = cast<VectorType>(VPCast->getType());
7016 auto *ValTy = cast<VectorType>(VPCast->getOperand(0)->getType());
7017 Check(RetTy->getElementCount() == ValTy->getElementCount(),
7018 "VP cast intrinsic first argument and result vector lengths must be "
7019 "equal",
7020 *VPCast);
7021
7022 switch (VPCast->getIntrinsicID()) {
7023 default:
7024 llvm_unreachable("Unknown VP cast intrinsic");
7025 case Intrinsic::vp_trunc:
7026 Check(RetTy->isIntOrIntVectorTy() && ValTy->isIntOrIntVectorTy(),
7027 "llvm.vp.trunc intrinsic first argument and result element type "
7028 "must be integer",
7029 *VPCast);
7030 Check(RetTy->getScalarSizeInBits() < ValTy->getScalarSizeInBits(),
7031 "llvm.vp.trunc intrinsic the bit size of first argument must be "
7032 "larger than the bit size of the return type",
7033 *VPCast);
7034 break;
7035 case Intrinsic::vp_zext:
7036 case Intrinsic::vp_sext:
7037 Check(RetTy->isIntOrIntVectorTy() && ValTy->isIntOrIntVectorTy(),
7038 "llvm.vp.zext or llvm.vp.sext intrinsic first argument and result "
7039 "element type must be integer",
7040 *VPCast);
7041 Check(RetTy->getScalarSizeInBits() > ValTy->getScalarSizeInBits(),
7042 "llvm.vp.zext or llvm.vp.sext intrinsic the bit size of first "
7043 "argument must be smaller than the bit size of the return type",
7044 *VPCast);
7045 break;
7046 case Intrinsic::vp_fptoui:
7047 case Intrinsic::vp_fptosi:
7048 case Intrinsic::vp_lrint:
7049 case Intrinsic::vp_llrint:
7050 Check(
7051 RetTy->isIntOrIntVectorTy() && ValTy->isFPOrFPVectorTy(),
7052 "llvm.vp.fptoui, llvm.vp.fptosi, llvm.vp.lrint or llvm.vp.llrint" "intrinsic first argument element "
7053 "type must be floating-point and result element type must be integer",
7054 *VPCast);
7055 break;
7056 case Intrinsic::vp_uitofp:
7057 case Intrinsic::vp_sitofp:
7058 Check(
7059 RetTy->isFPOrFPVectorTy() && ValTy->isIntOrIntVectorTy(),
7060 "llvm.vp.uitofp or llvm.vp.sitofp intrinsic first argument element "
7061 "type must be integer and result element type must be floating-point",
7062 *VPCast);
7063 break;
7064 case Intrinsic::vp_fptrunc:
7065 Check(RetTy->isFPOrFPVectorTy() && ValTy->isFPOrFPVectorTy(),
7066 "llvm.vp.fptrunc intrinsic first argument and result element type "
7067 "must be floating-point",
7068 *VPCast);
7069 Check(RetTy->getScalarSizeInBits() < ValTy->getScalarSizeInBits(),
7070 "llvm.vp.fptrunc intrinsic the bit size of first argument must be "
7071 "larger than the bit size of the return type",
7072 *VPCast);
7073 break;
7074 case Intrinsic::vp_fpext:
7075 Check(RetTy->isFPOrFPVectorTy() && ValTy->isFPOrFPVectorTy(),
7076 "llvm.vp.fpext intrinsic first argument and result element type "
7077 "must be floating-point",
7078 *VPCast);
7079 Check(RetTy->getScalarSizeInBits() > ValTy->getScalarSizeInBits(),
7080 "llvm.vp.fpext intrinsic the bit size of first argument must be "
7081 "smaller than the bit size of the return type",
7082 *VPCast);
7083 break;
7084 case Intrinsic::vp_ptrtoint:
7085 Check(RetTy->isIntOrIntVectorTy() && ValTy->isPtrOrPtrVectorTy(),
7086 "llvm.vp.ptrtoint intrinsic first argument element type must be "
7087 "pointer and result element type must be integer",
7088 *VPCast);
7089 break;
7090 case Intrinsic::vp_inttoptr:
7091 Check(RetTy->isPtrOrPtrVectorTy() && ValTy->isIntOrIntVectorTy(),
7092 "llvm.vp.inttoptr intrinsic first argument element type must be "
7093 "integer and result element type must be pointer",
7094 *VPCast);
7095 break;
7096 }
7097 }
7098
7099 switch (VPI.getIntrinsicID()) {
7100 case Intrinsic::vp_fcmp: {
7101 auto Pred = cast<VPCmpIntrinsic>(&VPI)->getPredicate();
7103 "invalid predicate for VP FP comparison intrinsic", &VPI);
7104 break;
7105 }
7106 case Intrinsic::vp_icmp: {
7107 auto Pred = cast<VPCmpIntrinsic>(&VPI)->getPredicate();
7109 "invalid predicate for VP integer comparison intrinsic", &VPI);
7110 break;
7111 }
7112 case Intrinsic::vp_is_fpclass: {
7113 auto TestMask = cast<ConstantInt>(VPI.getOperand(1));
7114 Check((TestMask->getZExtValue() & ~static_cast<unsigned>(fcAllFlags)) == 0,
7115 "unsupported bits for llvm.vp.is.fpclass test mask");
7116 break;
7117 }
7118 case Intrinsic::experimental_vp_splice: {
7119 VectorType *VecTy = cast<VectorType>(VPI.getType());
7120 int64_t Idx = cast<ConstantInt>(VPI.getArgOperand(2))->getSExtValue();
7121 int64_t KnownMinNumElements = VecTy->getElementCount().getKnownMinValue();
7122 if (VPI.getParent() && VPI.getParent()->getParent()) {
7123 AttributeList Attrs = VPI.getParent()->getParent()->getAttributes();
7124 if (Attrs.hasFnAttr(Attribute::VScaleRange))
7125 KnownMinNumElements *= Attrs.getFnAttrs().getVScaleRangeMin();
7126 }
7127 Check((Idx < 0 && std::abs(Idx) <= KnownMinNumElements) ||
7128 (Idx >= 0 && Idx < KnownMinNumElements),
7129 "The splice index exceeds the range [-VL, VL-1] where VL is the "
7130 "known minimum number of elements in the vector. For scalable "
7131 "vectors the minimum number of elements is determined from "
7132 "vscale_range.",
7133 &VPI);
7134 break;
7135 }
7136 }
7137}
7138
7139void Verifier::visitConstrainedFPIntrinsic(ConstrainedFPIntrinsic &FPI) {
7140 unsigned NumOperands = FPI.getNonMetadataArgCount();
7141 bool HasRoundingMD =
7143
7144 // Add the expected number of metadata operands.
7145 NumOperands += (1 + HasRoundingMD);
7146
7147 // Compare intrinsics carry an extra predicate metadata operand.
7148 if (isa<ConstrainedFPCmpIntrinsic>(FPI))
7149 NumOperands += 1;
7150 Check((FPI.arg_size() == NumOperands),
7151 "invalid arguments for constrained FP intrinsic", &FPI);
7152
7153 switch (FPI.getIntrinsicID()) {
7154 case Intrinsic::experimental_constrained_lrint:
7155 case Intrinsic::experimental_constrained_llrint: {
7156 Type *ValTy = FPI.getArgOperand(0)->getType();
7157 Type *ResultTy = FPI.getType();
7158 Check(!ValTy->isVectorTy() && !ResultTy->isVectorTy(),
7159 "Intrinsic does not support vectors", &FPI);
7160 break;
7161 }
7162
7163 case Intrinsic::experimental_constrained_lround:
7164 case Intrinsic::experimental_constrained_llround: {
7165 Type *ValTy = FPI.getArgOperand(0)->getType();
7166 Type *ResultTy = FPI.getType();
7167 Check(!ValTy->isVectorTy() && !ResultTy->isVectorTy(),
7168 "Intrinsic does not support vectors", &FPI);
7169 break;
7170 }
7171
7172 case Intrinsic::experimental_constrained_fcmp:
7173 case Intrinsic::experimental_constrained_fcmps: {
7174 auto Pred = cast<ConstrainedFPCmpIntrinsic>(&FPI)->getPredicate();
7176 "invalid predicate for constrained FP comparison intrinsic", &FPI);
7177 break;
7178 }
7179
7180 case Intrinsic::experimental_constrained_fptosi:
7181 case Intrinsic::experimental_constrained_fptoui: {
7182 Value *Operand = FPI.getArgOperand(0);
7183 ElementCount SrcEC;
7184 Check(Operand->getType()->isFPOrFPVectorTy(),
7185 "Intrinsic first argument must be floating point", &FPI);
7186 if (auto *OperandT = dyn_cast<VectorType>(Operand->getType())) {
7187 SrcEC = cast<VectorType>(OperandT)->getElementCount();
7188 }
7189
7190 Operand = &FPI;
7191 Check(SrcEC.isNonZero() == Operand->getType()->isVectorTy(),
7192 "Intrinsic first argument and result disagree on vector use", &FPI);
7193 Check(Operand->getType()->isIntOrIntVectorTy(),
7194 "Intrinsic result must be an integer", &FPI);
7195 if (auto *OperandT = dyn_cast<VectorType>(Operand->getType())) {
7196 Check(SrcEC == cast<VectorType>(OperandT)->getElementCount(),
7197 "Intrinsic first argument and result vector lengths must be equal",
7198 &FPI);
7199 }
7200 break;
7201 }
7202
7203 case Intrinsic::experimental_constrained_sitofp:
7204 case Intrinsic::experimental_constrained_uitofp: {
7205 Value *Operand = FPI.getArgOperand(0);
7206 ElementCount SrcEC;
7207 Check(Operand->getType()->isIntOrIntVectorTy(),
7208 "Intrinsic first argument must be integer", &FPI);
7209 if (auto *OperandT = dyn_cast<VectorType>(Operand->getType())) {
7210 SrcEC = cast<VectorType>(OperandT)->getElementCount();
7211 }
7212
7213 Operand = &FPI;
7214 Check(SrcEC.isNonZero() == Operand->getType()->isVectorTy(),
7215 "Intrinsic first argument and result disagree on vector use", &FPI);
7216 Check(Operand->getType()->isFPOrFPVectorTy(),
7217 "Intrinsic result must be a floating point", &FPI);
7218 if (auto *OperandT = dyn_cast<VectorType>(Operand->getType())) {
7219 Check(SrcEC == cast<VectorType>(OperandT)->getElementCount(),
7220 "Intrinsic first argument and result vector lengths must be equal",
7221 &FPI);
7222 }
7223 break;
7224 }
7225
7226 case Intrinsic::experimental_constrained_fptrunc:
7227 case Intrinsic::experimental_constrained_fpext: {
7228 Value *Operand = FPI.getArgOperand(0);
7229 Type *OperandTy = Operand->getType();
7230 Value *Result = &FPI;
7231 Type *ResultTy = Result->getType();
7232 Check(OperandTy->isFPOrFPVectorTy(),
7233 "Intrinsic first argument must be FP or FP vector", &FPI);
7234 Check(ResultTy->isFPOrFPVectorTy(),
7235 "Intrinsic result must be FP or FP vector", &FPI);
7236 Check(OperandTy->isVectorTy() == ResultTy->isVectorTy(),
7237 "Intrinsic first argument and result disagree on vector use", &FPI);
7238 if (OperandTy->isVectorTy()) {
7239 Check(cast<VectorType>(OperandTy)->getElementCount() ==
7240 cast<VectorType>(ResultTy)->getElementCount(),
7241 "Intrinsic first argument and result vector lengths must be equal",
7242 &FPI);
7243 }
7244 if (FPI.getIntrinsicID() == Intrinsic::experimental_constrained_fptrunc) {
7245 Check(OperandTy->getScalarSizeInBits() > ResultTy->getScalarSizeInBits(),
7246 "Intrinsic first argument's type must be larger than result type",
7247 &FPI);
7248 } else {
7249 Check(OperandTy->getScalarSizeInBits() < ResultTy->getScalarSizeInBits(),
7250 "Intrinsic first argument's type must be smaller than result type",
7251 &FPI);
7252 }
7253 break;
7254 }
7255
7256 default:
7257 break;
7258 }
7259
7260 // If a non-metadata argument is passed in a metadata slot then the
7261 // error will be caught earlier when the incorrect argument doesn't
7262 // match the specification in the intrinsic call table. Thus, no
7263 // argument type check is needed here.
7264
7265 Check(FPI.getExceptionBehavior().has_value(),
7266 "invalid exception behavior argument", &FPI);
7267 if (HasRoundingMD) {
7268 Check(FPI.getRoundingMode().has_value(), "invalid rounding mode argument",
7269 &FPI);
7270 }
7271}
7272
7273void Verifier::verifyFragmentExpression(const DbgVariableRecord &DVR) {
7274 DILocalVariable *V = dyn_cast_or_null<DILocalVariable>(DVR.getRawVariable());
7275 DIExpression *E = dyn_cast_or_null<DIExpression>(DVR.getRawExpression());
7276
7277 // We don't know whether this intrinsic verified correctly.
7278 if (!V || !E || !E->isValid())
7279 return;
7280
7281 // Nothing to do if this isn't a DW_OP_LLVM_fragment expression.
7282 auto Fragment = E->getFragmentInfo();
7283 if (!Fragment)
7284 return;
7285
7286 // The frontend helps out GDB by emitting the members of local anonymous
7287 // unions as artificial local variables with shared storage. When SROA splits
7288 // the storage for artificial local variables that are smaller than the entire
7289 // union, the overhang piece will be outside of the allotted space for the
7290 // variable and this check fails.
7291 // FIXME: Remove this check as soon as clang stops doing this; it hides bugs.
7292 if (V->isArtificial())
7293 return;
7294
7295 verifyFragmentExpression(*V, *Fragment, &DVR);
7296}
7297
7298template <typename ValueOrMetadata>
7299void Verifier::verifyFragmentExpression(const DIVariable &V,
7301 ValueOrMetadata *Desc) {
7302 // If there's no size, the type is broken, but that should be checked
7303 // elsewhere.
7304 auto VarSize = V.getSizeInBits();
7305 if (!VarSize)
7306 return;
7307
7308 unsigned FragSize = Fragment.SizeInBits;
7309 unsigned FragOffset = Fragment.OffsetInBits;
7310 CheckDI(FragSize + FragOffset <= *VarSize,
7311 "fragment is larger than or outside of variable", Desc, &V);
7312 CheckDI(FragSize != *VarSize, "fragment covers entire variable", Desc, &V);
7313}
7314
7315void Verifier::verifyFnArgs(const DbgVariableRecord &DVR) {
7316 // This function does not take the scope of noninlined function arguments into
7317 // account. Don't run it if current function is nodebug, because it may
7318 // contain inlined debug intrinsics.
7319 if (!HasDebugInfo)
7320 return;
7321
7322 // For performance reasons only check non-inlined ones.
7323 if (DVR.getDebugLoc()->getInlinedAt())
7324 return;
7325
7326 DILocalVariable *Var = DVR.getVariable();
7327 CheckDI(Var, "#dbg record without variable");
7328
7329 unsigned ArgNo = Var->getArg();
7330 if (!ArgNo)
7331 return;
7332
7333 // Verify there are no duplicate function argument debug info entries.
7334 // These will cause hard-to-debug assertions in the DWARF backend.
7335 if (DebugFnArgs.size() < ArgNo)
7336 DebugFnArgs.resize(ArgNo, nullptr);
7337
7338 auto *Prev = DebugFnArgs[ArgNo - 1];
7339 DebugFnArgs[ArgNo - 1] = Var;
7340 CheckDI(!Prev || (Prev == Var), "conflicting debug info for argument", &DVR,
7341 Prev, Var);
7342}
7343
7344void Verifier::verifyNotEntryValue(const DbgVariableRecord &DVR) {
7345 DIExpression *E = dyn_cast_or_null<DIExpression>(DVR.getRawExpression());
7346
7347 // We don't know whether this intrinsic verified correctly.
7348 if (!E || !E->isValid())
7349 return;
7350
7351 if (isa<ValueAsMetadata>(DVR.getRawLocation())) {
7352 Value *VarValue = DVR.getVariableLocationOp(0);
7353 if (isa<UndefValue>(VarValue) || isa<PoisonValue>(VarValue))
7354 return;
7355 // We allow EntryValues for swift async arguments, as they have an
7356 // ABI-guarantee to be turned into a specific register.
7357 if (auto *ArgLoc = dyn_cast_or_null<Argument>(VarValue);
7358 ArgLoc && ArgLoc->hasAttribute(Attribute::SwiftAsync))
7359 return;
7360 }
7361
7362 CheckDI(!E->isEntryValue(),
7363 "Entry values are only allowed in MIR unless they target a "
7364 "swiftasync Argument",
7365 &DVR);
7366}
7367
7368void Verifier::verifyCompileUnits() {
7369 // When more than one Module is imported into the same context, such as during
7370 // an LTO build before linking the modules, ODR type uniquing may cause types
7371 // to point to a different CU. This check does not make sense in this case.
7372 if (M.getContext().isODRUniquingDebugTypes())
7373 return;
7374 auto *CUs = M.getNamedMetadata("llvm.dbg.cu");
7376 if (CUs)
7377 Listed.insert_range(CUs->operands());
7378 for (const auto *CU : CUVisited)
7379 CheckDI(Listed.count(CU), "DICompileUnit not listed in llvm.dbg.cu", CU);
7380 CUVisited.clear();
7381}
7382
7383void Verifier::verifyDeoptimizeCallingConvs() {
7384 if (DeoptimizeDeclarations.empty())
7385 return;
7386
7387 const Function *First = DeoptimizeDeclarations[0];
7388 for (const auto *F : ArrayRef(DeoptimizeDeclarations).slice(1)) {
7389 Check(First->getCallingConv() == F->getCallingConv(),
7390 "All llvm.experimental.deoptimize declarations must have the same "
7391 "calling convention",
7392 First, F);
7393 }
7394}
7395
7396void Verifier::verifyAttachedCallBundle(const CallBase &Call,
7397 const OperandBundleUse &BU) {
7398 FunctionType *FTy = Call.getFunctionType();
7399
7400 Check((FTy->getReturnType()->isPointerTy() ||
7401 (Call.doesNotReturn() && FTy->getReturnType()->isVoidTy())),
7402 "a call with operand bundle \"clang.arc.attachedcall\" must call a "
7403 "function returning a pointer or a non-returning function that has a "
7404 "void return type",
7405 Call);
7406
7407 Check(BU.Inputs.size() == 1 && isa<Function>(BU.Inputs.front()),
7408 "operand bundle \"clang.arc.attachedcall\" requires one function as "
7409 "an argument",
7410 Call);
7411
7412 auto *Fn = cast<Function>(BU.Inputs.front());
7413 Intrinsic::ID IID = Fn->getIntrinsicID();
7414
7415 if (IID) {
7416 Check((IID == Intrinsic::objc_retainAutoreleasedReturnValue ||
7417 IID == Intrinsic::objc_claimAutoreleasedReturnValue ||
7418 IID == Intrinsic::objc_unsafeClaimAutoreleasedReturnValue),
7419 "invalid function argument", Call);
7420 } else {
7421 StringRef FnName = Fn->getName();
7422 Check((FnName == "objc_retainAutoreleasedReturnValue" ||
7423 FnName == "objc_claimAutoreleasedReturnValue" ||
7424 FnName == "objc_unsafeClaimAutoreleasedReturnValue"),
7425 "invalid function argument", Call);
7426 }
7427}
7428
7429void Verifier::verifyNoAliasScopeDecl() {
7430 if (NoAliasScopeDecls.empty())
7431 return;
7432
7433 // only a single scope must be declared at a time.
7434 for (auto *II : NoAliasScopeDecls) {
7435 assert(II->getIntrinsicID() == Intrinsic::experimental_noalias_scope_decl &&
7436 "Not a llvm.experimental.noalias.scope.decl ?");
7437 const auto *ScopeListMV = dyn_cast<MetadataAsValue>(
7439 Check(ScopeListMV != nullptr,
7440 "llvm.experimental.noalias.scope.decl must have a MetadataAsValue "
7441 "argument",
7442 II);
7443
7444 const auto *ScopeListMD = dyn_cast<MDNode>(ScopeListMV->getMetadata());
7445 Check(ScopeListMD != nullptr, "!id.scope.list must point to an MDNode", II);
7446 Check(ScopeListMD->getNumOperands() == 1,
7447 "!id.scope.list must point to a list with a single scope", II);
7448 visitAliasScopeListMetadata(ScopeListMD);
7449 }
7450
7451 // Only check the domination rule when requested. Once all passes have been
7452 // adapted this option can go away.
7454 return;
7455
7456 // Now sort the intrinsics based on the scope MDNode so that declarations of
7457 // the same scopes are next to each other.
7458 auto GetScope = [](IntrinsicInst *II) {
7459 const auto *ScopeListMV = cast<MetadataAsValue>(
7461 return &cast<MDNode>(ScopeListMV->getMetadata())->getOperand(0);
7462 };
7463
7464 // We are sorting on MDNode pointers here. For valid input IR this is ok.
7465 // TODO: Sort on Metadata ID to avoid non-deterministic error messages.
7466 auto Compare = [GetScope](IntrinsicInst *Lhs, IntrinsicInst *Rhs) {
7467 return GetScope(Lhs) < GetScope(Rhs);
7468 };
7469
7470 llvm::sort(NoAliasScopeDecls, Compare);
7471
7472 // Go over the intrinsics and check that for the same scope, they are not
7473 // dominating each other.
7474 auto ItCurrent = NoAliasScopeDecls.begin();
7475 while (ItCurrent != NoAliasScopeDecls.end()) {
7476 auto CurScope = GetScope(*ItCurrent);
7477 auto ItNext = ItCurrent;
7478 do {
7479 ++ItNext;
7480 } while (ItNext != NoAliasScopeDecls.end() &&
7481 GetScope(*ItNext) == CurScope);
7482
7483 // [ItCurrent, ItNext) represents the declarations for the same scope.
7484 // Ensure they are not dominating each other.. but only if it is not too
7485 // expensive.
7486 if (ItNext - ItCurrent < 32)
7487 for (auto *I : llvm::make_range(ItCurrent, ItNext))
7488 for (auto *J : llvm::make_range(ItCurrent, ItNext))
7489 if (I != J)
7490 Check(!DT.dominates(I, J),
7491 "llvm.experimental.noalias.scope.decl dominates another one "
7492 "with the same scope",
7493 I);
7494 ItCurrent = ItNext;
7495 }
7496}
7497
7498//===----------------------------------------------------------------------===//
7499// Implement the public interfaces to this file...
7500//===----------------------------------------------------------------------===//
7501
7503 Function &F = const_cast<Function &>(f);
7504
7505 // Don't use a raw_null_ostream. Printing IR is expensive.
7506 Verifier V(OS, /*ShouldTreatBrokenDebugInfoAsError=*/true, *f.getParent());
7507
7508 // Note that this function's return value is inverted from what you would
7509 // expect of a function called "verify".
7510 return !V.verify(F);
7511}
7512
7514 bool *BrokenDebugInfo) {
7515 // Don't use a raw_null_ostream. Printing IR is expensive.
7516 Verifier V(OS, /*ShouldTreatBrokenDebugInfoAsError=*/!BrokenDebugInfo, M);
7517
7518 bool Broken = false;
7519 for (const Function &F : M)
7520 Broken |= !V.verify(F);
7521
7522 Broken |= !V.verify();
7523 if (BrokenDebugInfo)
7524 *BrokenDebugInfo = V.hasBrokenDebugInfo();
7525 // Note that this function's return value is inverted from what you would
7526 // expect of a function called "verify".
7527 return Broken;
7528}
7529
7530namespace {
7531
7532struct VerifierLegacyPass : public FunctionPass {
7533 static char ID;
7534
7535 std::unique_ptr<Verifier> V;
7536 bool FatalErrors = true;
7537
7538 VerifierLegacyPass() : FunctionPass(ID) {
7540 }
7541 explicit VerifierLegacyPass(bool FatalErrors)
7542 : FunctionPass(ID),
7543 FatalErrors(FatalErrors) {
7545 }
7546
7547 bool doInitialization(Module &M) override {
7548 V = std::make_unique<Verifier>(
7549 &dbgs(), /*ShouldTreatBrokenDebugInfoAsError=*/false, M);
7550 return false;
7551 }
7552
7553 bool runOnFunction(Function &F) override {
7554 if (!V->verify(F) && FatalErrors) {
7555 errs() << "in function " << F.getName() << '\n';
7556 report_fatal_error("Broken function found, compilation aborted!");
7557 }
7558 return false;
7559 }
7560
7561 bool doFinalization(Module &M) override {
7562 bool HasErrors = false;
7563 for (Function &F : M)
7564 if (F.isDeclaration())
7565 HasErrors |= !V->verify(F);
7566
7567 HasErrors |= !V->verify();
7568 if (FatalErrors && (HasErrors || V->hasBrokenDebugInfo()))
7569 report_fatal_error("Broken module found, compilation aborted!");
7570 return false;
7571 }
7572
7573 void getAnalysisUsage(AnalysisUsage &AU) const override {
7574 AU.setPreservesAll();
7575 }
7576};
7577
7578} // end anonymous namespace
7579
7580/// Helper to issue failure from the TBAA verification
7581template <typename... Tys> void TBAAVerifier::CheckFailed(Tys &&... Args) {
7582 if (Diagnostic)
7583 return Diagnostic->CheckFailed(Args...);
7584}
7585
7586#define CheckTBAA(C, ...) \
7587 do { \
7588 if (!(C)) { \
7589 CheckFailed(__VA_ARGS__); \
7590 return false; \
7591 } \
7592 } while (false)
7593
7594/// Verify that \p BaseNode can be used as the "base type" in the struct-path
7595/// TBAA scheme. This means \p BaseNode is either a scalar node, or a
7596/// struct-type node describing an aggregate data structure (like a struct).
7597TBAAVerifier::TBAABaseNodeSummary
7598TBAAVerifier::verifyTBAABaseNode(Instruction &I, const MDNode *BaseNode,
7599 bool IsNewFormat) {
7600 if (BaseNode->getNumOperands() < 2) {
7601 CheckFailed("Base nodes must have at least two operands", &I, BaseNode);
7602 return {true, ~0u};
7603 }
7604
7605 auto Itr = TBAABaseNodes.find(BaseNode);
7606 if (Itr != TBAABaseNodes.end())
7607 return Itr->second;
7608
7609 auto Result = verifyTBAABaseNodeImpl(I, BaseNode, IsNewFormat);
7610 auto InsertResult = TBAABaseNodes.insert({BaseNode, Result});
7611 (void)InsertResult;
7612 assert(InsertResult.second && "We just checked!");
7613 return Result;
7614}
7615
7616TBAAVerifier::TBAABaseNodeSummary
7617TBAAVerifier::verifyTBAABaseNodeImpl(Instruction &I, const MDNode *BaseNode,
7618 bool IsNewFormat) {
7619 const TBAAVerifier::TBAABaseNodeSummary InvalidNode = {true, ~0u};
7620
7621 if (BaseNode->getNumOperands() == 2) {
7622 // Scalar nodes can only be accessed at offset 0.
7623 return isValidScalarTBAANode(BaseNode)
7624 ? TBAAVerifier::TBAABaseNodeSummary({false, 0})
7625 : InvalidNode;
7626 }
7627
7628 if (IsNewFormat) {
7629 if (BaseNode->getNumOperands() % 3 != 0) {
7630 CheckFailed("Access tag nodes must have the number of operands that is a "
7631 "multiple of 3!", BaseNode);
7632 return InvalidNode;
7633 }
7634 } else {
7635 if (BaseNode->getNumOperands() % 2 != 1) {
7636 CheckFailed("Struct tag nodes must have an odd number of operands!",
7637 BaseNode);
7638 return InvalidNode;
7639 }
7640 }
7641
7642 // Check the type size field.
7643 if (IsNewFormat) {
7644 auto *TypeSizeNode = mdconst::dyn_extract_or_null<ConstantInt>(
7645 BaseNode->getOperand(1));
7646 if (!TypeSizeNode) {
7647 CheckFailed("Type size nodes must be constants!", &I, BaseNode);
7648 return InvalidNode;
7649 }
7650 }
7651
7652 // Check the type name field. In the new format it can be anything.
7653 if (!IsNewFormat && !isa<MDString>(BaseNode->getOperand(0))) {
7654 CheckFailed("Struct tag nodes have a string as their first operand",
7655 BaseNode);
7656 return InvalidNode;
7657 }
7658
7659 bool Failed = false;
7660
7661 std::optional<APInt> PrevOffset;
7662 unsigned BitWidth = ~0u;
7663
7664 // We've already checked that BaseNode is not a degenerate root node with one
7665 // operand in \c verifyTBAABaseNode, so this loop should run at least once.
7666 unsigned FirstFieldOpNo = IsNewFormat ? 3 : 1;
7667 unsigned NumOpsPerField = IsNewFormat ? 3 : 2;
7668 for (unsigned Idx = FirstFieldOpNo; Idx < BaseNode->getNumOperands();
7669 Idx += NumOpsPerField) {
7670 const MDOperand &FieldTy = BaseNode->getOperand(Idx);
7671 const MDOperand &FieldOffset = BaseNode->getOperand(Idx + 1);
7672 if (!isa<MDNode>(FieldTy)) {
7673 CheckFailed("Incorrect field entry in struct type node!", &I, BaseNode);
7674 Failed = true;
7675 continue;
7676 }
7677
7678 auto *OffsetEntryCI =
7679 mdconst::dyn_extract_or_null<ConstantInt>(FieldOffset);
7680 if (!OffsetEntryCI) {
7681 CheckFailed("Offset entries must be constants!", &I, BaseNode);
7682 Failed = true;
7683 continue;
7684 }
7685
7686 if (BitWidth == ~0u)
7687 BitWidth = OffsetEntryCI->getBitWidth();
7688
7689 if (OffsetEntryCI->getBitWidth() != BitWidth) {
7690 CheckFailed(
7691 "Bitwidth between the offsets and struct type entries must match", &I,
7692 BaseNode);
7693 Failed = true;
7694 continue;
7695 }
7696
7697 // NB! As far as I can tell, we generate a non-strictly increasing offset
7698 // sequence only from structs that have zero size bit fields. When
7699 // recursing into a contained struct in \c getFieldNodeFromTBAABaseNode we
7700 // pick the field lexically the latest in struct type metadata node. This
7701 // mirrors the actual behavior of the alias analysis implementation.
7702 bool IsAscending =
7703 !PrevOffset || PrevOffset->ule(OffsetEntryCI->getValue());
7704
7705 if (!IsAscending) {
7706 CheckFailed("Offsets must be increasing!", &I, BaseNode);
7707 Failed = true;
7708 }
7709
7710 PrevOffset = OffsetEntryCI->getValue();
7711
7712 if (IsNewFormat) {
7713 auto *MemberSizeNode = mdconst::dyn_extract_or_null<ConstantInt>(
7714 BaseNode->getOperand(Idx + 2));
7715 if (!MemberSizeNode) {
7716 CheckFailed("Member size entries must be constants!", &I, BaseNode);
7717 Failed = true;
7718 continue;
7719 }
7720 }
7721 }
7722
7723 return Failed ? InvalidNode
7724 : TBAAVerifier::TBAABaseNodeSummary(false, BitWidth);
7725}
7726
7727static bool IsRootTBAANode(const MDNode *MD) {
7728 return MD->getNumOperands() < 2;
7729}
7730
7731static bool IsScalarTBAANodeImpl(const MDNode *MD,
7733 if (MD->getNumOperands() != 2 && MD->getNumOperands() != 3)
7734 return false;
7735
7736 if (!isa<MDString>(MD->getOperand(0)))
7737 return false;
7738
7739 if (MD->getNumOperands() == 3) {
7740 auto *Offset = mdconst::dyn_extract<ConstantInt>(MD->getOperand(2));
7741 if (!(Offset && Offset->isZero() && isa<MDString>(MD->getOperand(0))))
7742 return false;
7743 }
7744
7745 auto *Parent = dyn_cast_or_null<MDNode>(MD->getOperand(1));
7746 return Parent && Visited.insert(Parent).second &&
7747 (IsRootTBAANode(Parent) || IsScalarTBAANodeImpl(Parent, Visited));
7748}
7749
7750bool TBAAVerifier::isValidScalarTBAANode(const MDNode *MD) {
7751 auto ResultIt = TBAAScalarNodes.find(MD);
7752 if (ResultIt != TBAAScalarNodes.end())
7753 return ResultIt->second;
7754
7756 bool Result = IsScalarTBAANodeImpl(MD, Visited);
7757 auto InsertResult = TBAAScalarNodes.insert({MD, Result});
7758 (void)InsertResult;
7759 assert(InsertResult.second && "Just checked!");
7760
7761 return Result;
7762}
7763
7764/// Returns the field node at the offset \p Offset in \p BaseNode. Update \p
7765/// Offset in place to be the offset within the field node returned.
7766///
7767/// We assume we've okayed \p BaseNode via \c verifyTBAABaseNode.
7768MDNode *TBAAVerifier::getFieldNodeFromTBAABaseNode(Instruction &I,
7769 const MDNode *BaseNode,
7770 APInt &Offset,
7771 bool IsNewFormat) {
7772 assert(BaseNode->getNumOperands() >= 2 && "Invalid base node!");
7773
7774 // Scalar nodes have only one possible "field" -- their parent in the access
7775 // hierarchy. Offset must be zero at this point, but our caller is supposed
7776 // to check that.
7777 if (BaseNode->getNumOperands() == 2)
7778 return cast<MDNode>(BaseNode->getOperand(1));
7779
7780 unsigned FirstFieldOpNo = IsNewFormat ? 3 : 1;
7781 unsigned NumOpsPerField = IsNewFormat ? 3 : 2;
7782 for (unsigned Idx = FirstFieldOpNo; Idx < BaseNode->getNumOperands();
7783 Idx += NumOpsPerField) {
7784 auto *OffsetEntryCI =
7785 mdconst::extract<ConstantInt>(BaseNode->getOperand(Idx + 1));
7786 if (OffsetEntryCI->getValue().ugt(Offset)) {
7787 if (Idx == FirstFieldOpNo) {
7788 CheckFailed("Could not find TBAA parent in struct type node", &I,
7789 BaseNode, &Offset);
7790 return nullptr;
7791 }
7792
7793 unsigned PrevIdx = Idx - NumOpsPerField;
7794 auto *PrevOffsetEntryCI =
7795 mdconst::extract<ConstantInt>(BaseNode->getOperand(PrevIdx + 1));
7796 Offset -= PrevOffsetEntryCI->getValue();
7797 return cast<MDNode>(BaseNode->getOperand(PrevIdx));
7798 }
7799 }
7800
7801 unsigned LastIdx = BaseNode->getNumOperands() - NumOpsPerField;
7802 auto *LastOffsetEntryCI = mdconst::extract<ConstantInt>(
7803 BaseNode->getOperand(LastIdx + 1));
7804 Offset -= LastOffsetEntryCI->getValue();
7805 return cast<MDNode>(BaseNode->getOperand(LastIdx));
7806}
7807
7809 if (!Type || Type->getNumOperands() < 3)
7810 return false;
7811
7812 // In the new format type nodes shall have a reference to the parent type as
7813 // its first operand.
7814 return isa_and_nonnull<MDNode>(Type->getOperand(0));
7815}
7816
7818 CheckTBAA(MD->getNumOperands() > 0, "TBAA metadata cannot have 0 operands",
7819 &I, MD);
7820
7821 CheckTBAA(isa<LoadInst>(I) || isa<StoreInst>(I) || isa<CallInst>(I) ||
7822 isa<VAArgInst>(I) || isa<AtomicRMWInst>(I) ||
7823 isa<AtomicCmpXchgInst>(I),
7824 "This instruction shall not have a TBAA access tag!", &I);
7825
7826 bool IsStructPathTBAA =
7827 isa<MDNode>(MD->getOperand(0)) && MD->getNumOperands() >= 3;
7828
7829 CheckTBAA(IsStructPathTBAA,
7830 "Old-style TBAA is no longer allowed, use struct-path TBAA instead",
7831 &I);
7832
7833 MDNode *BaseNode = dyn_cast_or_null<MDNode>(MD->getOperand(0));
7834 MDNode *AccessType = dyn_cast_or_null<MDNode>(MD->getOperand(1));
7835
7836 bool IsNewFormat = isNewFormatTBAATypeNode(AccessType);
7837
7838 if (IsNewFormat) {
7839 CheckTBAA(MD->getNumOperands() == 4 || MD->getNumOperands() == 5,
7840 "Access tag metadata must have either 4 or 5 operands", &I, MD);
7841 } else {
7842 CheckTBAA(MD->getNumOperands() < 5,
7843 "Struct tag metadata must have either 3 or 4 operands", &I, MD);
7844 }
7845
7846 // Check the access size field.
7847 if (IsNewFormat) {
7848 auto *AccessSizeNode = mdconst::dyn_extract_or_null<ConstantInt>(
7849 MD->getOperand(3));
7850 CheckTBAA(AccessSizeNode, "Access size field must be a constant", &I, MD);
7851 }
7852
7853 // Check the immutability flag.
7854 unsigned ImmutabilityFlagOpNo = IsNewFormat ? 4 : 3;
7855 if (MD->getNumOperands() == ImmutabilityFlagOpNo + 1) {
7856 auto *IsImmutableCI = mdconst::dyn_extract_or_null<ConstantInt>(
7857 MD->getOperand(ImmutabilityFlagOpNo));
7858 CheckTBAA(IsImmutableCI,
7859 "Immutability tag on struct tag metadata must be a constant", &I,
7860 MD);
7861 CheckTBAA(
7862 IsImmutableCI->isZero() || IsImmutableCI->isOne(),
7863 "Immutability part of the struct tag metadata must be either 0 or 1",
7864 &I, MD);
7865 }
7866
7867 CheckTBAA(BaseNode && AccessType,
7868 "Malformed struct tag metadata: base and access-type "
7869 "should be non-null and point to Metadata nodes",
7870 &I, MD, BaseNode, AccessType);
7871
7872 if (!IsNewFormat) {
7873 CheckTBAA(isValidScalarTBAANode(AccessType),
7874 "Access type node must be a valid scalar type", &I, MD,
7875 AccessType);
7876 }
7877
7878 auto *OffsetCI = mdconst::dyn_extract_or_null<ConstantInt>(MD->getOperand(2));
7879 CheckTBAA(OffsetCI, "Offset must be constant integer", &I, MD);
7880
7881 APInt Offset = OffsetCI->getValue();
7882 bool SeenAccessTypeInPath = false;
7883
7884 SmallPtrSet<MDNode *, 4> StructPath;
7885
7886 for (/* empty */; BaseNode && !IsRootTBAANode(BaseNode);
7887 BaseNode = getFieldNodeFromTBAABaseNode(I, BaseNode, Offset,
7888 IsNewFormat)) {
7889 if (!StructPath.insert(BaseNode).second) {
7890 CheckFailed("Cycle detected in struct path", &I, MD);
7891 return false;
7892 }
7893
7894 bool Invalid;
7895 unsigned BaseNodeBitWidth;
7896 std::tie(Invalid, BaseNodeBitWidth) = verifyTBAABaseNode(I, BaseNode,
7897 IsNewFormat);
7898
7899 // If the base node is invalid in itself, then we've already printed all the
7900 // errors we wanted to print.
7901 if (Invalid)
7902 return false;
7903
7904 SeenAccessTypeInPath |= BaseNode == AccessType;
7905
7906 if (isValidScalarTBAANode(BaseNode) || BaseNode == AccessType)
7907 CheckTBAA(Offset == 0, "Offset not zero at the point of scalar access",
7908 &I, MD, &Offset);
7909
7910 CheckTBAA(BaseNodeBitWidth == Offset.getBitWidth() ||
7911 (BaseNodeBitWidth == 0 && Offset == 0) ||
7912 (IsNewFormat && BaseNodeBitWidth == ~0u),
7913 "Access bit-width not the same as description bit-width", &I, MD,
7914 BaseNodeBitWidth, Offset.getBitWidth());
7915
7916 if (IsNewFormat && SeenAccessTypeInPath)
7917 break;
7918 }
7919
7920 CheckTBAA(SeenAccessTypeInPath, "Did not see access type in access path!", &I,
7921 MD);
7922 return true;
7923}
7924
7925char VerifierLegacyPass::ID = 0;
7926INITIALIZE_PASS(VerifierLegacyPass, "verify", "Module Verifier", false, false)
7927
7929 return new VerifierLegacyPass(FatalErrors);
7930}
7931
7932AnalysisKey VerifierAnalysis::Key;
7935 Result Res;
7937 return Res;
7938}
7939
7942 return { llvm::verifyFunction(F, &dbgs()), false };
7943}
7944
7946 auto Res = AM.getResult<VerifierAnalysis>(M);
7947 if (FatalErrors && (Res.IRBroken || Res.DebugInfoBroken))
7948 report_fatal_error("Broken module found, compilation aborted!");
7949
7950 return PreservedAnalyses::all();
7951}
7952
7954 auto res = AM.getResult<VerifierAnalysis>(F);
7955 if (res.IRBroken && FatalErrors)
7956 report_fatal_error("Broken function found, compilation aborted!");
7957
7958 return PreservedAnalyses::all();
7959}
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
AMDGPU address space definition.
This file declares a class to represent arbitrary precision floating point values and provide a varie...
This file implements a class to represent arbitrary precision integral constant values and operations...
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
Atomic ordering constants.
@ RetAttr
Definition: Attributes.cpp:763
@ FnAttr
Definition: Attributes.cpp:761
This file contains the simple types necessary to represent the attributes associated with functions a...
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")
static GCRegistry::Add< StatepointGC > D("statepoint-example", "an example strategy for statepoint")
Analysis containing CSE Info
Definition: CSEInfo.cpp:27
This file contains the declarations for the subclasses of Constant, which represent the different fla...
This file declares the LLVM IR specialization of the GenericConvergenceVerifier template.
return RetTy
Returns the sub type a function will return at a given Idx Should correspond to the result type of an ExtractValue instruction executed with just that one unsigned Idx
This file defines the DenseMap class.
This file contains constants used for implementing Dwarf debug support.
std::string Name
uint64_t Size
static bool runOnFunction(Function &F, bool PostInlining)
Hexagon Common GEP
This file provides various utilities for inspecting and working with the control flow graph in LLVM I...
Module.h This file contains the declarations for the Module class.
This header defines various interfaces for pass management in LLVM.
This defines the Use class.
#define F(x, y, z)
Definition: MD5.cpp:55
#define I(x, y, z)
Definition: MD5.cpp:58
This file implements a map that provides insertion order iteration.
This file provides utility for Memory Model Relaxation Annotations (MMRAs).
This file contains the declarations for metadata subclasses.
#define T1
ConstantRange Range(APInt(BitWidth, Low), APInt(BitWidth, High))
uint64_t High
uint64_t IntrinsicInst * II
#define P(N)
ppc ctr loops verify
#define INITIALIZE_PASS(passName, arg, name, cfg, analysis)
Definition: PassSupport.h:56
This file contains the declarations for profiling metadata utility functions.
static bool isValid(const char C)
Returns true if C is a valid mangled character: <0-9a-zA-Z_>.
static unsigned getNumElements(Type *Ty)
void visit(MachineFunction &MF, MachineBasicBlock &Start, std::function< void(MachineBasicBlock *)> op)
This file contains some templates that are useful if you are working with the STL at all.
verify safepoint Safepoint IR Verifier
raw_pwrite_stream & OS
This file defines the SmallPtrSet class.
This file defines the SmallVector class.
This file contains some functions that are useful when dealing with strings.
static unsigned getBitWidth(Type *Ty, const DataLayout &DL)
Returns the bitwidth of the given scalar or pointer type.
static bool IsScalarTBAANodeImpl(const MDNode *MD, SmallPtrSetImpl< const MDNode * > &Visited)
Definition: Verifier.cpp:7731
static bool isType(const Metadata *MD)
Definition: Verifier.cpp:1131
static Instruction * getSuccPad(Instruction *Terminator)
Definition: Verifier.cpp:2823
#define Check(C,...)
We know that cond should be true, if not print an error message.
Definition: Verifier.cpp:665
static bool isNewFormatTBAATypeNode(llvm::MDNode *Type)
Definition: Verifier.cpp:7808
#define CheckDI(C,...)
We know that a debug info condition should be true, if not print an error message.
Definition: Verifier.cpp:675
static void forEachUser(const Value *User, SmallPtrSet< const Value *, 32 > &Visited, llvm::function_ref< bool(const Value *)> Callback)
Definition: Verifier.cpp:716
static bool isDINode(const Metadata *MD)
Definition: Verifier.cpp:1133
static bool isScope(const Metadata *MD)
Definition: Verifier.cpp:1132
static cl::opt< bool > VerifyNoAliasScopeDomination("verify-noalias-scope-decl-dom", cl::Hidden, cl::init(false), cl::desc("Ensure that llvm.experimental.noalias.scope.decl for identical " "scopes are not dominating"))
static DISubprogram * getSubprogram(Metadata *LocalScope)
Carefully grab the subprogram from a local scope.
Definition: Verifier.cpp:6879
static bool isTypeCongruent(Type *L, Type *R)
Two types are "congruent" if they are identical, or if they are both pointer types with different poi...
Definition: Verifier.cpp:3975
#define CheckTBAA(C,...)
Definition: Verifier.cpp:7586
static bool isConstantIntMetadataOperand(const Metadata *MD)
Definition: Verifier.cpp:5235
static bool IsRootTBAANode(const MDNode *MD)
Definition: Verifier.cpp:7727
static bool isContiguous(const ConstantRange &A, const ConstantRange &B)
Definition: Verifier.cpp:4301
static Value * getParentPad(Value *EHPad)
Definition: Verifier.cpp:4571
static bool hasConflictingReferenceFlags(unsigned Flags)
Detect mutually exclusive flags.
Definition: Verifier.cpp:1334
static AttrBuilder getParameterABIAttributes(LLVMContext &C, unsigned I, AttributeList Attrs)
Definition: Verifier.cpp:3985
bool isFiniteNonZero() const
Definition: APFloat.h:1459
bool isNegative() const
Definition: APFloat.h:1449
const fltSemantics & getSemantics() const
Definition: APFloat.h:1457
Class for arbitrary precision integers.
Definition: APInt.h:78
bool sgt(const APInt &RHS) const
Signed greater than comparison.
Definition: APInt.h:1201
bool isZero() const
Determine if this value is zero, i.e. all bits are clear.
Definition: APInt.h:380
bool isMinValue() const
Determine if this is the smallest unsigned value.
Definition: APInt.h:417
bool ule(const APInt &RHS) const
Unsigned less or equal comparison.
Definition: APInt.h:1150
bool isPowerOf2() const
Check if this APInt's value is a power of two greater than zero.
Definition: APInt.h:440
int64_t getSExtValue() const
Get sign extended value.
Definition: APInt.h:1562
bool isMaxValue() const
Determine if this is the largest unsigned value.
Definition: APInt.h:399
This class represents a conversion between pointers from one address space to another.
an instruction to allocate memory on the stack
Definition: Instructions.h:64
bool isSwiftError() const
Return true if this alloca is used as a swifterror argument to a call.
Definition: Instructions.h:153
LLVM_ABI bool isStaticAlloca() const
Return true if this alloca is in the entry block of the function and is a constant size.
Align getAlign() const
Return the alignment of the memory that is being allocated by the instruction.
Definition: Instructions.h:128
Type * getAllocatedType() const
Return the type that is being allocated by the instruction.
Definition: Instructions.h:121
unsigned getAddressSpace() const
Return the address space for the allocation.
Definition: Instructions.h:106
LLVM_ABI bool isArrayAllocation() const
Return true if there is an allocation size parameter to the allocation instruction that is not 1.
const Value * getArraySize() const
Get the number of elements allocated.
Definition: Instructions.h:97
A container for analyses that lazily runs them and caches their results.
Definition: PassManager.h:255
PassT::Result & getResult(IRUnitT &IR, ExtraArgTs... ExtraArgs)
Get the result of an analysis pass for a given IR unit.
Definition: PassManager.h:412
Represent the analysis usage information of a pass.
void setPreservesAll()
Set by analyses that do not transform their input at all.
This class represents an incoming formal argument to a Function.
Definition: Argument.h:32
LLVM_ABI bool hasInRegAttr() const
Return true if this argument has the inreg attribute.
Definition: Function.cpp:293
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
Definition: ArrayRef.h:41
bool empty() const
empty - Check if the array is empty.
Definition: ArrayRef.h:142
An instruction that atomically checks whether a specified value is in a memory location,...
Definition: Instructions.h:506
an instruction that atomically reads a memory location, combines it with another value,...
Definition: Instructions.h:709
static bool isFPOperation(BinOp Op)
Definition: Instructions.h:823
BinOp getOperation() const
Definition: Instructions.h:819
static LLVM_ABI StringRef getOperationName(BinOp Op)
AtomicOrdering getOrdering() const
Returns the ordering constraint of this rmw instruction.
Definition: Instructions.h:863
bool contains(Attribute::AttrKind A) const
Return true if the builder has the specified attribute.
Definition: AttributeMask.h:67
LLVM_ABI bool hasAttribute(Attribute::AttrKind Kind) const
Return true if the attribute exists in this set.
LLVM_ABI std::string getAsString(bool InAttrGrp=false) const
static LLVM_ABI Attribute::AttrKind getAttrKindFromName(StringRef AttrName)
Definition: Attributes.cpp:313
static LLVM_ABI bool canUseAsRetAttr(AttrKind Kind)
Definition: Attributes.cpp:793
LLVM_ABI StringRef getValueAsString() const
Return the attribute's value as a string.
Definition: Attributes.cpp:400
static LLVM_ABI bool isExistingAttribute(StringRef Name)
Return true if the provided string matches the IR name of an attribute.
Definition: Attributes.cpp:336
static LLVM_ABI bool canUseAsFnAttr(AttrKind Kind)
Definition: Attributes.cpp:785
AttrKind
This enumeration lists the attributes that can be associated with parameters, function results,...
Definition: Attributes.h:88
@ None
No attributes have been set.
Definition: Attributes.h:90
static bool isIntAttrKind(AttrKind Kind)
Definition: Attributes.h:104
static LLVM_ABI bool canUseAsParamAttr(AttrKind Kind)
Definition: Attributes.cpp:789
bool isValid() const
Return true if the attribute is any kind of attribute.
Definition: Attributes.h:223
LLVM Basic Block Representation.
Definition: BasicBlock.h:62
iterator begin()
Instruction iterator methods.
Definition: BasicBlock.h:459
iterator_range< const_phi_iterator > phis() const
Returns a range that iterates over the phis in the basic block.
Definition: BasicBlock.h:528
LLVM_ABI InstListType::const_iterator getFirstNonPHIIt() const
Returns an iterator to the first instruction in this block that is not a PHINode instruction.
Definition: BasicBlock.cpp:337
const Instruction & front() const
Definition: BasicBlock.h:482
LLVM_ABI bool isEntryBlock() const
Return true if this is the entry block of the containing function.
Definition: BasicBlock.cpp:549
LLVM_ABI const BasicBlock * getUniquePredecessor() const
Return the predecessor of this block if it has a unique predecessor block.
Definition: BasicBlock.cpp:445
const Function * getParent() const
Return the enclosing method, or null if none.
Definition: BasicBlock.h:213
InstListType::iterator iterator
Instruction iterators...
Definition: BasicBlock.h:170
const Instruction * getTerminator() const LLVM_READONLY
Returns the terminator instruction if the block is well formed or null if the block is not well forme...
Definition: BasicBlock.h:233
This class represents a no-op cast from one type to another.
static LLVM_ABI BlockAddress * lookup(const BasicBlock *BB)
Lookup an existing BlockAddress constant for the given BasicBlock.
Definition: Constants.cpp:1922
Conditional or Unconditional Branch instruction.
bool isConditional() const
Value * getCondition() const
Base class for all callable instructions (InvokeInst and CallInst) Holds everything related to callin...
Definition: InstrTypes.h:1116
bool isInlineAsm() const
Check if this call is an inline asm statement.
Definition: InstrTypes.h:1415
CallingConv::ID getCallingConv() const
Definition: InstrTypes.h:1406
Value * getCalledOperand() const
Definition: InstrTypes.h:1340
Value * getArgOperand(unsigned i) const
Definition: InstrTypes.h:1292
FunctionType * getFunctionType() const
Definition: InstrTypes.h:1205
LLVM_ABI Intrinsic::ID getIntrinsicID() const
Returns the intrinsic ID of the intrinsic called or Intrinsic::not_intrinsic if the called function i...
unsigned arg_size() const
Definition: InstrTypes.h:1290
AttributeList getAttributes() const
Return the attributes for this call.
Definition: InstrTypes.h:1424
CallBr instruction, tracking function calls that may not return control but instead transfer it to a ...
This class represents a function call, abstracting a target machine's calling convention.
bool isMustTailCall() const
static LLVM_ABI bool castIsValid(Instruction::CastOps op, Type *SrcTy, Type *DstTy)
This method can be used to determine if a cast from SrcTy to DstTy using Opcode op is valid or not.
unsigned getNumHandlers() const
return the number of 'handlers' in this catchswitch instruction, except the default handler
Value * getParentPad() const
BasicBlock * getUnwindDest() const
handler_range handlers()
iteration adapter for range-for loops.
BasicBlock * getUnwindDest() const
bool isFPPredicate() const
Definition: InstrTypes.h:784
bool isIntPredicate() const
Definition: InstrTypes.h:785
static bool isIntPredicate(Predicate P)
Definition: InstrTypes.h:778
ConstantArray - Constant Array Declarations.
Definition: Constants.h:433
A constant value that is initialized with an expression using other constant values.
Definition: Constants.h:1120
ConstantFP - Floating Point Values [float, double].
Definition: Constants.h:277
This is the shared class of boolean and integer constants.
Definition: Constants.h:87
bool isMinusOne() const
This function will return true iff every bit in this constant is set to true.
Definition: Constants.h:226
bool isNegative() const
Definition: Constants.h:209
bool isZero() const
This is just a convenience method to make client code smaller for a common code.
Definition: Constants.h:214
unsigned getBitWidth() const
getBitWidth - Return the scalar bitwidth of this constant.
Definition: Constants.h:157
uint64_t getZExtValue() const
Return the constant as a 64-bit unsigned integer value after it has been zero extended as appropriate...
Definition: Constants.h:163
const APInt & getValue() const
Return the constant as an APInt value reference.
Definition: Constants.h:154
A signed pointer, in the ptrauth sense.
Definition: Constants.h:1032
Constant * getAddrDiscriminator() const
The address discriminator if any, or the null constant.
Definition: Constants.h:1072
Constant * getPointer() const
The pointer that is signed in this ptrauth signed pointer.
Definition: Constants.h:1059
ConstantInt * getKey() const
The Key ID, an i32 constant.
Definition: Constants.h:1062
ConstantInt * getDiscriminator() const
The integer discriminator, an i64 constant, or 0.
Definition: Constants.h:1065
static LLVM_ABI bool isOrderedRanges(ArrayRef< ConstantRange > RangesRef)
This class represents a range of values.
Definition: ConstantRange.h:47
const APInt & getLower() const
Return the lower value for this range.
const APInt & getUpper() const
Return the upper value for this range.
LLVM_ABI bool contains(const APInt &Val) const
Return true if the specified value is in the set.
uint32_t getBitWidth() const
Get the bit width of this ConstantRange.
static LLVM_ABI ConstantTokenNone * get(LLVMContext &Context)
Return the ConstantTokenNone.
Definition: Constants.cpp:1526
This is an important base class in LLVM.
Definition: Constant.h:43
LLVM_ABI bool isNullValue() const
Return true if this is the value that would be returned by getNullValue.
Definition: Constants.cpp:90
This is the common base class for constrained floating point intrinsics.
LLVM_ABI std::optional< fp::ExceptionBehavior > getExceptionBehavior() const
LLVM_ABI std::optional< RoundingMode > getRoundingMode() const
LLVM_ABI unsigned getNonMetadataArgCount() const
List of ValueAsMetadata, to be used as an argument to a dbg.value intrinsic.
Assignment ID.
Basic type, like 'int' or 'float'.
Debug common block.
Enumeration value.
DWARF expression.
LLVM_ABI bool isEntryValue() const
Check if the expression consists of exactly one entry value operand.
static LLVM_ABI std::optional< FragmentInfo > getFragmentInfo(expr_op_iterator Start, expr_op_iterator End)
Retrieve the details of this fragment expression.
LLVM_ABI bool isValid() const
@ FixedPointBinary
Scale factor 2^Factor.
@ FixedPointDecimal
Scale factor 10^Factor.
@ FixedPointRational
Arbitrary rational scale factor.
A pair of DIGlobalVariable and DIExpression.
DIGlobalVariable * getVariable() const
DIExpression * getExpression() const
An imported module (C++ using directive or similar).
Debug lexical block.
A scope for locals.
LLVM_ABI DISubprogram * getSubprogram() const
Get the subprogram for this scope.
DILocalScope * getScope() const
Get the local scope for this variable.
Debug location.
Metadata * getRawScope() const
Represents a module in the programming language, for example, a Clang module, or a Fortran module.
Debug lexical block.
Tagged DWARF-like metadata node.
Base class for scope-like contexts.
String type, Fortran CHARACTER(n)
Subprogram description. Uses SubclassData1.
Array subrange.
Type array for a subprogram.
Base class for template parameters.
Base class for variables.
Metadata * getRawType() const
Metadata * getRawScope() const
This class represents an Operation in the Expression.
A parsed version of the target data layout string in and methods for querying it.
Definition: DataLayout.h:63
This represents the llvm.dbg.label instruction.
Records a position in IR for a source label (DILabel).
Base class for non-instruction debug metadata records that have positions within IR.
LLVM_ABI Function * getFunction()
LLVM_ABI void print(raw_ostream &O, bool IsForDebug=false) const
DebugLoc getDebugLoc() const
LLVM_ABI const BasicBlock * getParent() const
Record of a variable value-assignment, aka a non instruction representation of the dbg....
MDNode * getRawAddressExpression() const
DIExpression * getExpression() const
LLVM_ABI Value * getVariableLocationOp(unsigned OpIdx) const
DILocalVariable * getVariable() const
Metadata * getRawLocation() const
Returns the metadata operand for the first location description.
@ End
Marks the end of the concrete types.
@ Any
To indicate all LocationTypes in searches.
DIExpression * getAddressExpression() const
MDNode * getAsMDNode() const
Return this as a bar MDNode.
Definition: DebugLoc.h:291
ValueT lookup(const_arg_type_t< KeyT > Val) const
lookup - Return the entry for the specified key, or a default constructed value if no such entry exis...
Definition: DenseMap.h:203
iterator find(const_arg_type_t< KeyT > Val)
Definition: DenseMap.h:177
bool empty() const
Definition: DenseMap.h:119
std::pair< iterator, bool > insert(const std::pair< KeyT, ValueT > &KV)
Definition: DenseMap.h:230
void recalculate(ParentType &Func)
recalculate - compute a dominator tree for the given function
Concrete subclass of DominatorTreeBase that is used to compute a normal dominator tree.
Definition: Dominators.h:165
LLVM_ABI bool isReachableFromEntry(const Use &U) const
Provide an overload for a Use.
Definition: Dominators.cpp:334
LLVM_ABI bool dominates(const BasicBlock *BB, const Use &U) const
Return true if the (end of the) basic block BB dominates the use U.
Definition: Dominators.cpp:135
This instruction extracts a single (scalar) element from a VectorType value.
static LLVM_ABI bool isValidOperands(const Value *Vec, const Value *Idx)
Return true if an extractelement instruction can be formed with the specified operands.
This instruction extracts a struct member or array element value from an aggregate value.
ArrayRef< unsigned > getIndices() const
static LLVM_ABI Type * getIndexedType(Type *Agg, ArrayRef< unsigned > Idxs)
Returns the type of the element that would be extracted with an extractvalue instruction with the spe...
This instruction compares its operands according to the predicate given to the constructor.
This class represents an extension of floating point types.
This class represents a cast from floating point to signed integer.
This class represents a cast from floating point to unsigned integer.
This class represents a truncation of floating point types.
An instruction for ordering other memory operations.
Definition: Instructions.h:429
AtomicOrdering getOrdering() const
Returns the ordering constraint of this fence instruction.
Definition: Instructions.h:454
Class to represent fixed width SIMD vectors.
Definition: DerivedTypes.h:592
Value * getParentPad() const
Convenience accessors.
Definition: InstrTypes.h:2391
FunctionPass class - This class is used to implement most global optimizations.
Definition: Pass.h:314
Intrinsic::ID getIntrinsicID() const LLVM_READONLY
getIntrinsicID - This method returns the ID number of the specified function, or Intrinsic::not_intri...
Definition: Function.h:244
bool hasPersonalityFn() const
Check whether this function has a personality function.
Definition: Function.h:903
const std::string & getGC() const
Definition: Function.cpp:831
Represents calls to the gc.relocate intrinsic.
LLVM_ABI Value * getBasePtr() const
LLVM_ABI Value * getDerivedPtr() const
void initialize(raw_ostream *OS, function_ref< void(const Twine &Message)> FailureCB, const FunctionT &F)
Generic tagged DWARF-like metadata node.
an instruction for type-safe pointer arithmetic to access elements of arrays and structs
Definition: Instructions.h:949
static LLVM_ABI Type * getIndexedType(Type *Ty, ArrayRef< Value * > IdxList)
Returns the result type of a getelementptr with the given source element type and indexes.
static bool isValidLinkage(LinkageTypes L)
Definition: GlobalAlias.h:98
const Constant * getAliasee() const
Definition: GlobalAlias.h:87
LLVM_ABI const Function * getResolverFunction() const
Definition: Globals.cpp:652
static bool isValidLinkage(LinkageTypes L)
Definition: GlobalIFunc.h:86
const Constant * getResolver() const
Definition: GlobalIFunc.h:73
bool hasComdat() const
Definition: GlobalObject.h:130
MDNode * getMetadata(unsigned KindID) const
Get the current metadata attachments for the given kind, if any.
Definition: Value.h:576
bool hasExternalLinkage() const
Definition: GlobalValue.h:513
bool isDSOLocal() const
Definition: GlobalValue.h:307
bool isImplicitDSOLocal() const
Definition: GlobalValue.h:300
LLVM_ABI bool isDeclaration() const
Return true if the primary definition of this global value is outside of the current translation unit...
Definition: Globals.cpp:316
bool hasValidDeclarationLinkage() const
Definition: GlobalValue.h:535
LinkageTypes getLinkage() const
Definition: GlobalValue.h:548
bool hasDefaultVisibility() const
Definition: GlobalValue.h:251
bool hasPrivateLinkage() const
Definition: GlobalValue.h:529
bool hasHiddenVisibility() const
Definition: GlobalValue.h:252
bool hasExternalWeakLinkage() const
Definition: GlobalValue.h:531
bool hasDLLImportStorageClass() const
Definition: GlobalValue.h:280
bool hasDLLExportStorageClass() const
Definition: GlobalValue.h:283
bool isDeclarationForLinker() const
Definition: GlobalValue.h:625
unsigned getAddressSpace() const
Definition: GlobalValue.h:207
Module * getParent()
Get the module that this global value is contained inside of...
Definition: GlobalValue.h:663
PointerType * getType() const
Global values are always pointers.
Definition: GlobalValue.h:296
bool hasComdat() const
Definition: GlobalValue.h:243
bool hasCommonLinkage() const
Definition: GlobalValue.h:534
bool hasGlobalUnnamedAddr() const
Definition: GlobalValue.h:217
bool hasAppendingLinkage() const
Definition: GlobalValue.h:527
bool hasAvailableExternallyLinkage() const
Definition: GlobalValue.h:514
Type * getValueType() const
Definition: GlobalValue.h:298
const Constant * getInitializer() const
getInitializer - Return the initializer for this global variable.
bool hasInitializer() const
Definitions have initializers, declarations don't.
MaybeAlign getAlign() const
Returns the alignment of the given variable.
bool isConstant() const
If the value is a global constant, its value is immutable throughout the runtime execution of the pro...
bool hasDefinitiveInitializer() const
hasDefinitiveInitializer - Whether the global variable has an initializer, and any other instances of...
This instruction compares its operands according to the predicate given to the constructor.
Indirect Branch Instruction.
BasicBlock * getDestination(unsigned i)
Return the specified destination.
unsigned getNumDestinations() const
return the number of possible destinations in this indirectbr instruction.
unsigned getNumSuccessors() const
This instruction inserts a single (scalar) element into a VectorType value.
static LLVM_ABI bool isValidOperands(const Value *Vec, const Value *NewElt, const Value *Idx)
Return true if an insertelement instruction can be formed with the specified operands.
This instruction inserts a struct field of array element value into an aggregate value.
Value * getAggregateOperand()
ArrayRef< unsigned > getIndices() const
Base class for instruction visitors.
Definition: InstVisitor.h:78
RetTy visitTerminator(Instruction &I)
Definition: InstVisitor.h:248
RetTy visitCallBase(CallBase &I)
Definition: InstVisitor.h:262
void visitFunction(Function &F)
Definition: InstVisitor.h:142
void visitBasicBlock(BasicBlock &BB)
Definition: InstVisitor.h:143
void visit(Iterator Start, Iterator End)
Definition: InstVisitor.h:87
RetTy visitFuncletPadInst(FuncletPadInst &I)
Definition: InstVisitor.h:198
void visitInstruction(Instruction &I)
Definition: InstVisitor.h:275
LLVM_ABI unsigned getNumSuccessors() const LLVM_READONLY
Return the number of successors that this instruction has.
LLVM_ABI bool isAtomic() const LLVM_READONLY
Return true if this instruction has an AtomicOrdering of unordered or higher.
LLVM_ABI const Function * getFunction() const
Return the function this instruction belongs to.
Definition: Instruction.cpp:82
This class represents a cast from an integer to a pointer.
A wrapper class for inspecting calls to intrinsic functions.
Definition: IntrinsicInst.h:49
static LLVM_ABI bool mayLowerToFunctionCall(Intrinsic::ID IID)
Check if the intrinsic might lower into a regular function call in the course of IR transformations.
Intrinsic::ID getIntrinsicID() const
Return the intrinsic ID of this intrinsic.
Definition: IntrinsicInst.h:56
Invoke instruction.
This is an important class for using LLVM in a threaded context.
Definition: LLVMContext.h:68
The landingpad instruction holds all of the information necessary to generate correct exception handl...
bool isCleanup() const
Return 'true' if this landingpad instruction is a cleanup.
unsigned getNumClauses() const
Get the number of clauses for this landing pad.
bool isCatch(unsigned Idx) const
Return 'true' if the clause and index Idx is a catch clause.
bool isFilter(unsigned Idx) const
Return 'true' if the clause and index Idx is a filter clause.
Constant * getClause(unsigned Idx) const
Get the value of the clause at index Idx.
An instruction for reading from memory.
Definition: Instructions.h:180
AtomicOrdering getOrdering() const
Returns the ordering constraint of this load instruction.
Definition: Instructions.h:224
SyncScope::ID getSyncScopeID() const
Returns the synchronization scope ID of this load instruction.
Definition: Instructions.h:234
Align getAlign() const
Return the alignment of the access that is being performed.
Definition: Instructions.h:215
Metadata node.
Definition: Metadata.h:1077
const MDOperand & getOperand(unsigned I) const
Definition: Metadata.h:1445
bool isTemporary() const
Definition: Metadata.h:1261
ArrayRef< MDOperand > operands() const
Definition: Metadata.h:1443
unsigned getNumOperands() const
Return number of MDNode operands.
Definition: Metadata.h:1451
bool isDistinct() const
Definition: Metadata.h:1260
bool isResolved() const
Check if node is fully resolved.
Definition: Metadata.h:1257
LLVMContext & getContext() const
Definition: Metadata.h:1241
Tracking metadata reference owned by Metadata.
Definition: Metadata.h:899
Metadata * get() const
Definition: Metadata.h:928
A single uniqued string.
Definition: Metadata.h:720
LLVM_ABI StringRef getString() const
Definition: Metadata.cpp:617
Typed, array-like tuple of metadata.
Definition: Metadata.h:1651
Tuple of metadata.
Definition: Metadata.h:1493
static LLVM_ABI bool isTagMD(const Metadata *MD)
This class implements a map that also provides access to all stored values in a deterministic order.
Definition: MapVector.h:36
void clear()
Definition: MapVector.h:84
Metadata wrapper in the Value hierarchy.
Definition: Metadata.h:182
static LLVM_ABI MetadataAsValue * getIfExists(LLVMContext &Context, Metadata *MD)
Definition: Metadata.cpp:111
Metadata * getMetadata() const
Definition: Metadata.h:200
Root of the metadata hierarchy.
Definition: Metadata.h:63
LLVM_ABI void print(raw_ostream &OS, const Module *M=nullptr, bool IsForDebug=false) const
Print.
Definition: AsmWriter.cpp:5421
unsigned getMetadataID() const
Definition: Metadata.h:103
Manage lifetime of a slot tracker for printing IR.
A Module instance is used to store all the information related to an LLVM module.
Definition: Module.h:67
ModFlagBehavior
This enumeration defines the supported behaviors of module flags.
Definition: Module.h:117
@ AppendUnique
Appends the two values, which are required to be metadata nodes.
Definition: Module.h:146
@ Override
Uses the specified value, regardless of the behavior or value of the other module.
Definition: Module.h:138
@ Warning
Emits a warning if two values disagree.
Definition: Module.h:124
@ Error
Emits an error if two values disagree, otherwise the resulting value is that of the operands.
Definition: Module.h:120
@ Min
Takes the min of the two values, which are required to be integers.
Definition: Module.h:152
@ Append
Appends the two values, which are required to be metadata nodes.
Definition: Module.h:141
@ Max
Takes the max of the two values, which are required to be integers.
Definition: Module.h:149
@ Require
Adds a requirement that another module flag be present and have a specified value after linking is pe...
Definition: Module.h:133
const std::string & getModuleIdentifier() const
Get the module identifier which is, essentially, the name of the module.
Definition: Module.h:252
static bool isValidModFlagBehavior(Metadata *MD, ModFlagBehavior &MFB)
Checks if Metadata represents a valid ModFlagBehavior, and stores the converted result in MFB.
Definition: Module.cpp:323
A tuple of MDNodes.
Definition: Metadata.h:1753
LLVM_ABI StringRef getName() const
Definition: Metadata.cpp:1482
LLVM_ABI void print(raw_ostream &ROS, bool IsForDebug=false) const
Definition: AsmWriter.cpp:5082
iterator_range< op_iterator > operands()
Definition: Metadata.h:1849
op_range incoming_values()
static LLVM_ABI PassRegistry * getPassRegistry()
getPassRegistry - Access the global registry object, which is automatically initialized at applicatio...
static LLVM_ABI PointerType * get(Type *ElementType, unsigned AddressSpace)
This constructs a pointer to an object of the specified type in a numbered address space.
A set of analyses that are preserved following a run of a transformation pass.
Definition: Analysis.h:112
static PreservedAnalyses all()
Construct a special preserved set that preserves all passes.
Definition: Analysis.h:118
Simple wrapper around std::function<void(raw_ostream&)>.
Definition: Printable.h:38
This class represents a cast from a pointer to an address (non-capturing ptrtoint).
This class represents a cast from a pointer to an integer.
Interface for looking up the initializer for a variable name, used by Init::resolveReferences.
Definition: Record.h:2196
Resume the propagation of an exception.
Value * getValue() const
Convenience accessor.
Return a value (possibly void), from a function.
This class represents a sign extension of integer types.
This class represents a cast from signed integer to floating point.
This class represents the LLVM 'select' instruction.
static LLVM_ABI const char * areInvalidOperands(Value *Cond, Value *True, Value *False)
Return a string if the specified operands are invalid for a select operation, otherwise return null.
This instruction constructs a fixed permutation of two input vectors.
static LLVM_ABI bool isValidOperands(const Value *V1, const Value *V2, const Value *Mask)
Return true if a shufflevector instruction can be formed with the specified operands.
static LLVM_ABI void getShuffleMask(const Constant *Mask, SmallVectorImpl< int > &Result)
Convert the input shuffle mask operand to a vector of integers.
A templated base class for SmallPtrSet which provides the typesafe interface that is common across al...
Definition: SmallPtrSet.h:380
size_type count(ConstPtrType Ptr) const
count - Return 1 if the specified pointer is in the set, 0 otherwise.
Definition: SmallPtrSet.h:470
void insert_range(Range &&R)
Definition: SmallPtrSet.h:490
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
Definition: SmallPtrSet.h:401
SmallPtrSet - This class implements a set which is optimized for holding SmallSize or less elements.
Definition: SmallPtrSet.h:541
SmallString - A SmallString is just a SmallVector with methods and accessors that make it work better...
Definition: SmallString.h:26
bool empty() const
Definition: SmallVector.h:82
size_t size() const
Definition: SmallVector.h:79
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
Definition: SmallVector.h:574
void reserve(size_type N)
Definition: SmallVector.h:664
iterator insert(iterator I, T &&Elt)
Definition: SmallVector.h:806
void resize(size_type N)
Definition: SmallVector.h:639
void push_back(const T &Elt)
Definition: SmallVector.h:414
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
Definition: SmallVector.h:1197
An instruction for storing to memory.
Definition: Instructions.h:296
StringMapEntry - This is used to represent one value that is inserted into a StringMap.
StringRef - Represent a constant reference to a string, i.e.
Definition: StringRef.h:55
bool getAsInteger(unsigned Radix, T &Result) const
Parse the current string as an integer of the specified radix.
Definition: StringRef.h:480
bool starts_with(StringRef Prefix) const
Check if this string starts with the given Prefix.
Definition: StringRef.h:269
constexpr bool empty() const
empty - Check if the string is empty.
Definition: StringRef.h:151
bool contains(StringRef Other) const
Return true if the given string is a substring of *this, and false otherwise.
Definition: StringRef.h:434
static constexpr size_t npos
Definition: StringRef.h:57
Class to represent struct types.
Definition: DerivedTypes.h:218
unsigned getNumElements() const
Random access to the elements.
Definition: DerivedTypes.h:368
LLVM_ABI Type * getTypeAtIndex(const Value *V) const
Given an index value into the type, return the type of the element.
Definition: Type.cpp:719
LLVM_ABI bool isScalableTy(SmallPtrSetImpl< const Type * > &Visited) const
Returns true if this struct contains a scalable vector.
Definition: Type.cpp:441
Multiway switch.
Verify that the TBAA Metadatas are valid.
Definition: Verifier.h:40
LLVM_ABI bool visitTBAAMetadata(Instruction &I, const MDNode *MD)
Visit an instruction and return true if it is valid, return false if an invalid TBAA is attached.
Definition: Verifier.cpp:7817
TinyPtrVector - This class is specialized for cases where there are normally 0 or 1 element in a vect...
Definition: TinyPtrVector.h:29
unsigned size() const
Triple - Helper class for working with autoconf configuration names.
Definition: Triple.h:47
This class represents a truncation of integer types.
Twine - A lightweight data structure for efficiently representing the concatenation of temporary valu...
Definition: Twine.h:82
The instances of the Type class are immutable: once they are created, they are never changed.
Definition: Type.h:45
bool isVectorTy() const
True if this is an instance of VectorType.
Definition: Type.h:273
LLVM_ABI bool canLosslesslyBitCastTo(Type *Ty) const
Return true if this type could be converted with a lossless BitCast to type 'Ty'.
bool isArrayTy() const
True if this is an instance of ArrayType.
Definition: Type.h:264
bool isLabelTy() const
Return true if this is 'label'.
Definition: Type.h:228
bool isIntOrIntVectorTy() const
Return true if this is an integer type or a vector of integer types.
Definition: Type.h:246
bool isPointerTy() const
True if this is an instance of PointerType.
Definition: Type.h:267
LLVM_ABI bool containsNonGlobalTargetExtType(SmallPtrSetImpl< const Type * > &Visited) const
Return true if this type is or contains a target extension type that disallows being used as a global...
LLVM_ABI unsigned getPointerAddressSpace() const
Get the address space of this pointer or pointer vector type.
LLVM_ABI bool isScalableTy(SmallPtrSetImpl< const Type * > &Visited) const
Return true if this is a type whose size is a known multiple of vscale.
LLVM_ABI bool containsNonLocalTargetExtType(SmallPtrSetImpl< const Type * > &Visited) const
Return true if this type is or contains a target extension type that disallows being used as a local.
bool isSized(SmallPtrSetImpl< Type * > *Visited=nullptr) const
Return true if it makes sense to take the size of this type.
Definition: Type.h:311
bool isTokenLikeTy() const
Returns true if this is 'token' or a token-like target type.s.
bool isFloatingPointTy() const
Return true if this is one of the floating-point types.
Definition: Type.h:184
bool isPtrOrPtrVectorTy() const
Return true if this is a pointer type or a vector of pointer types.
Definition: Type.h:270
bool isIntOrPtrTy() const
Return true if this is an integer type or a pointer type.
Definition: Type.h:255
LLVM_ABI unsigned getScalarSizeInBits() const LLVM_READONLY
If this is a vector type, return the getPrimitiveSizeInBits value for the element type.
bool isIntegerTy() const
True if this is an instance of IntegerType.
Definition: Type.h:240
bool isFPOrFPVectorTy() const
Return true if this is a FP type or a vector of FP.
Definition: Type.h:225
Type * getScalarType() const
If this is a vector type, return the element type, otherwise return 'this'.
Definition: Type.h:352
bool isMetadataTy() const
Return true if this is 'metadata'.
Definition: Type.h:231
This class represents a cast unsigned integer to floating point.
A Use represents the edge between a Value definition and its users.
Definition: Use.h:35
op_range operands()
Definition: User.h:292
Value * getOperand(unsigned i) const
Definition: User.h:232
unsigned getNumOperands() const
Definition: User.h:254
This class represents the va_arg llvm instruction, which returns an argument of the specified type gi...
This is the common base class for vector predication intrinsics.
Value wrapper in the Metadata hierarchy.
Definition: Metadata.h:457
Value * getValue() const
Definition: Metadata.h:497
LLVM Value Representation.
Definition: Value.h:75
iterator_range< user_iterator > materialized_users()
Definition: Value.h:420
Type * getType() const
All values are typed, get the type of this value.
Definition: Value.h:256
static constexpr uint64_t MaximumAlignment
Definition: Value.h:830
LLVM_ABI const Value * stripPointerCastsAndAliases() const
Strip off pointer casts, all-zero GEPs, address space casts, and aliases.
Definition: Value.cpp:705
LLVM_ABI const Value * stripInBoundsOffsets(function_ref< void(const Value *)> Func=[](const Value *) {}) const
Strip off pointer casts and inbounds GEPs.
Definition: Value.cpp:812
iterator_range< user_iterator > users()
Definition: Value.h:426
bool materialized_use_empty() const
Definition: Value.h:351
LLVM_ABI LLVMContext & getContext() const
All values hold a context through their type.
Definition: Value.cpp:1098
bool hasName() const
Definition: Value.h:262
LLVM_ABI StringRef getName() const
Return a constant reference to the value's name.
Definition: Value.cpp:322
Check a module for errors, and report separate error states for IR and debug info errors.
Definition: Verifier.h:108
LLVM_ABI Result run(Module &M, ModuleAnalysisManager &)
Definition: Verifier.cpp:7933
LLVM_ABI PreservedAnalyses run(Module &M, ModuleAnalysisManager &AM)
Definition: Verifier.cpp:7945
This class represents zero extension of integer types.
constexpr bool isNonZero() const
Definition: TypeSize.h:159
constexpr bool isScalable() const
Returns whether the quantity is scaled by a runtime quantity (vscale).
Definition: TypeSize.h:172
constexpr ScalarTy getKnownMinValue() const
Returns the minimum value this quantity can represent.
Definition: TypeSize.h:169
An efficient, type-erasing, non-owning reference to a callable.
const ParentTy * getParent() const
Definition: ilist_node.h:34
NodeTy * getNextNode()
Get the next node, or nullptr for the list tail.
Definition: ilist_node.h:359
This class implements an extremely fast bulk output stream that can only output to a stream.
Definition: raw_ostream.h:53
This file contains the declaration of the Comdat class, which represents a single COMDAT in LLVM.
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
@ PRIVATE_ADDRESS
Address space for private memory.
constexpr char Args[]
Key for Kernel::Metadata::mArgs.
constexpr char Attrs[]
Key for Kernel::Metadata::mAttrs.
bool isFlatGlobalAddrSpace(unsigned AS)
LLVM_ABI AttributeMask typeIncompatible(Type *Ty, AttributeSet AS, AttributeSafetyKind ASK=ASK_ALL)
Which attributes cannot be applied to a type.
constexpr std::underlying_type_t< E > Mask()
Get a bitmask with 1s in all places up to the high-order bit of E's largest value.
Definition: BitmaskEnum.h:126
@ Entry
Definition: COFF.h:862
unsigned ID
LLVM IR allows to use arbitrary numbers as calling convention identifiers.
Definition: CallingConv.h:24
@ AMDGPU_CS
Used for Mesa/AMDPAL compute shaders.
Definition: CallingConv.h:197
@ AMDGPU_VS
Used for Mesa vertex shaders, or AMDPAL last shader stage before rasterization (vertex shader if tess...
Definition: CallingConv.h:188
@ AMDGPU_KERNEL
Used for AMDGPU code object kernels.
Definition: CallingConv.h:200
@ AnyReg
OBSOLETED - Used for stack based JavaScript calls.
Definition: CallingConv.h:60
@ AMDGPU_CS_ChainPreserve
Used on AMDGPUs to give the middle-end more control over argument placement.
Definition: CallingConv.h:249
@ AMDGPU_HS
Used for Mesa/AMDPAL hull shaders (= tessellation control shaders).
Definition: CallingConv.h:206
@ AMDGPU_GS
Used for Mesa/AMDPAL geometry shaders.
Definition: CallingConv.h:191
@ X86_INTR
x86 hardware interrupt context.
Definition: CallingConv.h:173
@ AMDGPU_CS_Chain
Used on AMDGPUs to give the middle-end more control over argument placement.
Definition: CallingConv.h:245
@ AMDGPU_PS
Used for Mesa/AMDPAL pixel shaders.
Definition: CallingConv.h:194
@ Cold
Attempts to make code in the caller as efficient as possible under the assumption that the call is no...
Definition: CallingConv.h:47
@ PTX_Device
Call to a PTX device function.
Definition: CallingConv.h:129
@ SPIR_KERNEL
Used for SPIR kernel functions.
Definition: CallingConv.h:144
@ Fast
Attempts to make calls as fast as possible (e.g.
Definition: CallingConv.h:41
@ Intel_OCL_BI
Used for Intel OpenCL built-ins.
Definition: CallingConv.h:147
@ Tail
Attemps to make calls as fast as possible while guaranteeing that tail call optimization can always b...
Definition: CallingConv.h:76
@ PTX_Kernel
Call to a PTX kernel. Passes all arguments in parameter space.
Definition: CallingConv.h:125
@ SwiftTail
This follows the Swift calling convention in how arguments are passed but guarantees tail calls will ...
Definition: CallingConv.h:87
@ C
The default llvm calling convention, compatible with C.
Definition: CallingConv.h:34
LLVM_ABI MatchIntrinsicTypesResult matchIntrinsicSignature(FunctionType *FTy, ArrayRef< IITDescriptor > &Infos, SmallVectorImpl< Type * > &ArgTys)
Match the specified function type with the type constraints specified by the .td file.
LLVM_ABI void getIntrinsicInfoTableEntries(ID id, SmallVectorImpl< IITDescriptor > &T)
Return the IIT table descriptor for the specified intrinsic into an array of IITDescriptors.
Definition: Intrinsics.cpp:458
@ MatchIntrinsicTypes_NoMatchRet
Definition: Intrinsics.h:240
@ MatchIntrinsicTypes_NoMatchArg
Definition: Intrinsics.h:241
LLVM_ABI bool hasConstrainedFPRoundingModeOperand(ID QID)
Returns true if the intrinsic ID is for one of the "Constrained Floating-Point Intrinsics" that take ...
Definition: Intrinsics.cpp:794
LLVM_ABI StringRef getName(ID id)
Return the LLVM name for an intrinsic, such as "llvm.ppc.altivec.lvx".
Definition: Intrinsics.cpp:49
static const int NoAliasScopeDeclScopeArg
Definition: Intrinsics.h:39
LLVM_ABI bool matchIntrinsicVarArg(bool isVarArg, ArrayRef< IITDescriptor > &Infos)
Verify if the intrinsic has variable arguments.
std::variant< std::monostate, Loc::Single, Loc::Multi, Loc::MMI, Loc::EntryValue > Variant
Alias for the std::variant specialization base class of DbgVariable.
Definition: DwarfDebug.h:190
Flag
These should be considered private to the implementation of the MCInstrDesc class.
Definition: MCInstrDesc.h:149
@ System
Synchronized with respect to all concurrently executing threads.
Definition: LLVMContext.h:58
LLVM_ABI std::optional< VFInfo > tryDemangleForVFABI(StringRef MangledName, const FunctionType *FTy)
Function to construct a VFInfo out of a mangled names in the following format:
@ CE
Windows NT (Windows on ARM)
LLVM_ABI AssignmentInstRange getAssignmentInsts(DIAssignID *ID)
Return a range of instructions (typically just one) that have ID as an attachment.
Definition: DebugInfo.cpp:1887
initializer< Ty > init(const Ty &Val)
Definition: CommandLine.h:444
@ DW_MACINFO_undef
Definition: Dwarf.h:804
@ DW_MACINFO_start_file
Definition: Dwarf.h:805
@ DW_MACINFO_define
Definition: Dwarf.h:803
This is an optimization pass for GlobalISel generic memory operations.
Definition: AddressRanges.h:18
auto drop_begin(T &&RangeOrContainer, size_t N=1)
Return a range covering RangeOrContainer with the first N elements excluded.
Definition: STLExtras.h:338
@ Low
Lower the current thread's priority such that it does not affect foreground tasks significantly.
@ Offset
Definition: DWP.cpp:477
bool all_of(R &&range, UnaryPredicate P)
Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly.
Definition: STLExtras.h:1744
LLVM_ABI bool canInstructionHaveMMRAs(const Instruction &I)
@ Write
Definition: CodeGenData.h:109
detail::zippy< detail::zip_first, T, U, Args... > zip_equal(T &&t, U &&u, Args &&...args)
zip iterator that assumes that all iteratees have the same length.
Definition: STLExtras.h:870
LLVM_ABI unsigned getBranchWeightOffset(const MDNode *ProfileData)
Return the offset to the first branch weight data.
auto enumerate(FirstRange &&First, RestRanges &&...Rest)
Given two or more input ranges, returns a new range whose values are tuples (A, B,...
Definition: STLExtras.h:2491
LLVM_ABI bool verifyFunction(const Function &F, raw_ostream *OS=nullptr)
Check a function for errors, useful for use when debugging a pass.
Definition: Verifier.cpp:7502
AllocFnKind
Definition: Attributes.h:51
testing::Matcher< const detail::ErrorHolder & > Failed()
Definition: Error.h:198
iterator_range< T > make_range(T x, T y)
Convenience function for iterating over sub-ranges.
void append_range(Container &C, Range &&R)
Wrapper function to append range R to container C.
Definition: STLExtras.h:2155
LLVM_ABI DenseMap< BasicBlock *, ColorVector > colorEHFunclets(Function &F)
If an EH funclet personality is in use (see isFuncletEHPersonality), this will recompute which blocks...
constexpr bool isPowerOf2_64(uint64_t Value)
Return true if the argument is a power of two > 0 (64 bit edition.)
Definition: MathExtras.h:293
bool isScopedEHPersonality(EHPersonality Pers)
Returns true if this personality uses scope-style EH IR instructions: catchswitch,...
LLVM_ABI void initializeVerifierLegacyPassPass(PassRegistry &)
constexpr bool isPowerOf2_32(uint32_t Value)
Return true if the argument is a power of two > 0.
Definition: MathExtras.h:288
bool isModSet(const ModRefInfo MRI)
Definition: ModRef.h:49
void sort(IteratorTy Start, IteratorTy End)
Definition: STLExtras.h:1669
LLVM_ABI raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
Definition: Debug.cpp:207
LLVM_ABI void report_fatal_error(Error Err, bool gen_crash_diag=true)
Definition: Error.cpp:167
LLVM_ABI EHPersonality classifyEHPersonality(const Value *Pers)
See if the given exception handling personality function is one that we understand.
LLVM_ABI bool isValueProfileMD(const MDNode *ProfileData)
Checks if an MDNode contains value profiling Metadata.
LLVM_ABI raw_fd_ostream & errs()
This returns a reference to a raw_ostream for standard error.
LLVM_ABI unsigned getNumBranchWeights(const MDNode &ProfileData)
AtomicOrdering
Atomic ordering for LLVM's memory model.
@ ArgMem
Access to memory via argument pointers.
@ First
Helpers to iterate all locations in the MemoryEffectsBase class.
LLVM_ABI FunctionPass * createVerifierPass(bool FatalErrors=true)
Definition: Verifier.cpp:7928
@ Dynamic
Denotes mode unknown at compile time.
@ MaskAll
A bitmask that includes all valid flags.
constexpr unsigned BitWidth
Definition: BitmaskEnum.h:223
DenormalMode parseDenormalFPAttribute(StringRef Str)
Returns the denormal mode to use for inputs and outputs.
LLVM_ABI std::optional< RoundingMode > convertStrToRoundingMode(StringRef)
Returns a valid RoundingMode enumerator when given a string that is valid as input in constrained int...
Definition: FPEnv.cpp:24
LLVM_ABI std::unique_ptr< GCStrategy > getGCStrategy(const StringRef Name)
Lookup the GCStrategy object associated with the given gc name.
Definition: GCStrategy.cpp:24
auto predecessors(const MachineBasicBlock *BB)
LLVM_ABI bool isExplicitlyUnknownBranchWeightsMetadata(const MDNode &MD)
bool is_contained(R &&Range, const E &Element)
Returns true if Element is found in Range.
Definition: STLExtras.h:1916
bool pred_empty(const BasicBlock *BB)
Definition: CFG.h:119
constexpr bool isCallableCC(CallingConv::ID CC)
Definition: CallingConv.h:298
LLVM_ABI bool verifyModule(const Module &M, raw_ostream *OS=nullptr, bool *BrokenDebugInfo=nullptr)
Check a module for errors.
Definition: Verifier.cpp:7513
#define N
static LLVM_ABI const fltSemantics & IEEEsingle() LLVM_READNONE
Definition: APFloat.cpp:266
This struct is a compact representation of a valid (non-zero power of two) alignment.
Definition: Alignment.h:39
uint64_t value() const
This is a hole in the type system and should not be abused.
Definition: Alignment.h:85
A special type used by analysis passes to provide an address that identifies that particular analysis...
Definition: Analysis.h:29
Description of the encoding of one expression Op.
static LLVM_ABI const char * SyntheticFunctionEntryCount
Definition: ProfDataUtils.h:28
static LLVM_ABI const char * BranchWeights
Definition: ProfDataUtils.h:25
static LLVM_ABI const char * FunctionEntryCount
Definition: ProfDataUtils.h:27
static LLVM_ABI const char * UnknownBranchWeightsMarker
Definition: ProfDataUtils.h:30
static LLVM_ABI const char * ValueProfile
Definition: ProfDataUtils.h:26
This struct is a compact representation of a valid (power of two) or undefined (0) alignment.
Definition: Alignment.h:117
A lightweight accessor for an operand bundle meant to be passed around by value.
Definition: InstrTypes.h:1011
uint32_t getTagID() const
Return the tag of this operand bundle as an integer.
Definition: InstrTypes.h:1039
ArrayRef< Use > Inputs
Definition: InstrTypes.h:1012
void DebugInfoCheckFailed(const Twine &Message)
A debug info check failed.
Definition: Verifier.cpp:303
VerifierSupport(raw_ostream *OS, const Module &M)
Definition: Verifier.cpp:155
bool Broken
Track the brokenness of the module while recursively visiting.
Definition: Verifier.cpp:149
raw_ostream * OS
Definition: Verifier.cpp:141
void CheckFailed(const Twine &Message, const T1 &V1, const Ts &... Vs)
A check failed (with values to print).
Definition: Verifier.cpp:296
bool BrokenDebugInfo
Broken debug info can be "recovered" from by stripping the debug info.
Definition: Verifier.cpp:151
LLVMContext & Context
Definition: Verifier.cpp:146
bool TreatBrokenDebugInfoAsError
Whether to treat broken debug info as an error.
Definition: Verifier.cpp:153
void CheckFailed(const Twine &Message)
A check failed, so printout out the condition and the message.
Definition: Verifier.cpp:285
const Module & M
Definition: Verifier.cpp:142
const DataLayout & DL
Definition: Verifier.cpp:145
void DebugInfoCheckFailed(const Twine &Message, const T1 &V1, const Ts &... Vs)
A debug info check failed (with values to print).
Definition: Verifier.cpp:312
const Triple & TT
Definition: Verifier.cpp:144
ModuleSlotTracker MST
Definition: Verifier.cpp:143