LLVM 20.0.0git
Verifier.cpp
Go to the documentation of this file.
1//===-- Verifier.cpp - Implement the Module Verifier -----------------------==//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file defines the function verifier interface, that can be used for some
10// basic correctness checking of input to the system.
11//
12// Note that this does not provide full `Java style' security and verifications,
13// instead it just tries to ensure that code is well-formed.
14//
15// * Both of a binary operator's parameters are of the same type
16// * Verify that the indices of mem access instructions match other operands
17// * Verify that arithmetic and other things are only performed on first-class
18// types. Verify that shifts & logicals only happen on integrals f.e.
19// * All of the constants in a switch statement are of the correct type
20// * The code is in valid SSA form
21// * It should be illegal to put a label into any other type (like a structure)
22// or to return one. [except constant arrays!]
23// * Only phi nodes can be self referential: 'add i32 %0, %0 ; <int>:0' is bad
24// * PHI nodes must have an entry for each predecessor, with no extras.
25// * PHI nodes must be the first thing in a basic block, all grouped together
26// * All basic blocks should only end with terminator insts, not contain them
27// * The entry node to a function must not have predecessors
28// * All Instructions must be embedded into a basic block
29// * Functions cannot take a void-typed parameter
30// * Verify that a function's argument list agrees with it's declared type.
31// * It is illegal to specify a name for a void value.
32// * It is illegal to have a internal global value with no initializer
33// * It is illegal to have a ret instruction that returns a value that does not
34// agree with the function return value type.
35// * Function call argument types match the function prototype
36// * A landing pad is defined by a landingpad instruction, and can be jumped to
37// only by the unwind edge of an invoke instruction.
38// * A landingpad instruction must be the first non-PHI instruction in the
39// block.
40// * Landingpad instructions must be in a function with a personality function.
41// * Convergence control intrinsics are introduced in ConvergentOperations.rst.
42// The applied restrictions are too numerous to list here.
43// * The convergence entry intrinsic and the loop heart must be the first
44// non-PHI instruction in their respective block. This does not conflict with
45// the landing pads, since these two kinds cannot occur in the same block.
46// * All other things that are tested by asserts spread about the code...
47//
48//===----------------------------------------------------------------------===//
49
50#include "llvm/IR/Verifier.h"
51#include "llvm/ADT/APFloat.h"
52#include "llvm/ADT/APInt.h"
53#include "llvm/ADT/ArrayRef.h"
54#include "llvm/ADT/DenseMap.h"
55#include "llvm/ADT/MapVector.h"
56#include "llvm/ADT/STLExtras.h"
58#include "llvm/ADT/SmallSet.h"
61#include "llvm/ADT/StringRef.h"
62#include "llvm/ADT/Twine.h"
64#include "llvm/IR/Argument.h"
66#include "llvm/IR/Attributes.h"
67#include "llvm/IR/BasicBlock.h"
68#include "llvm/IR/CFG.h"
69#include "llvm/IR/CallingConv.h"
70#include "llvm/IR/Comdat.h"
71#include "llvm/IR/Constant.h"
74#include "llvm/IR/Constants.h"
76#include "llvm/IR/DataLayout.h"
77#include "llvm/IR/DebugInfo.h"
79#include "llvm/IR/DebugLoc.h"
81#include "llvm/IR/Dominators.h"
83#include "llvm/IR/Function.h"
84#include "llvm/IR/GCStrategy.h"
85#include "llvm/IR/GlobalAlias.h"
86#include "llvm/IR/GlobalValue.h"
88#include "llvm/IR/InlineAsm.h"
89#include "llvm/IR/InstVisitor.h"
90#include "llvm/IR/InstrTypes.h"
91#include "llvm/IR/Instruction.h"
94#include "llvm/IR/Intrinsics.h"
95#include "llvm/IR/IntrinsicsAArch64.h"
96#include "llvm/IR/IntrinsicsAMDGPU.h"
97#include "llvm/IR/IntrinsicsARM.h"
98#include "llvm/IR/IntrinsicsNVPTX.h"
99#include "llvm/IR/IntrinsicsWebAssembly.h"
100#include "llvm/IR/LLVMContext.h"
102#include "llvm/IR/Metadata.h"
103#include "llvm/IR/Module.h"
105#include "llvm/IR/PassManager.h"
107#include "llvm/IR/Statepoint.h"
108#include "llvm/IR/Type.h"
109#include "llvm/IR/Use.h"
110#include "llvm/IR/User.h"
112#include "llvm/IR/Value.h"
114#include "llvm/Pass.h"
117#include "llvm/Support/Casting.h"
121#include "llvm/Support/ModRef.h"
123#include <algorithm>
124#include <cassert>
125#include <cstdint>
126#include <memory>
127#include <optional>
128#include <string>
129#include <utility>
130
131using namespace llvm;
132
134 "verify-noalias-scope-decl-dom", cl::Hidden, cl::init(false),
135 cl::desc("Ensure that llvm.experimental.noalias.scope.decl for identical "
136 "scopes are not dominating"));
137
138namespace llvm {
139
142 const Module &M;
147
148 /// Track the brokenness of the module while recursively visiting.
149 bool Broken = false;
150 /// Broken debug info can be "recovered" from by stripping the debug info.
151 bool BrokenDebugInfo = false;
152 /// Whether to treat broken debug info as an error.
154
156 : OS(OS), M(M), MST(&M), TT(Triple::normalize(M.getTargetTriple())),
157 DL(M.getDataLayout()), Context(M.getContext()) {}
158
159private:
160 void Write(const Module *M) {
161 *OS << "; ModuleID = '" << M->getModuleIdentifier() << "'\n";
162 }
163
164 void Write(const Value *V) {
165 if (V)
166 Write(*V);
167 }
168
169 void Write(const Value &V) {
170 if (isa<Instruction>(V)) {
171 V.print(*OS, MST);
172 *OS << '\n';
173 } else {
174 V.printAsOperand(*OS, true, MST);
175 *OS << '\n';
176 }
177 }
178
179 void Write(const DbgRecord *DR) {
180 if (DR) {
181 DR->print(*OS, MST, false);
182 *OS << '\n';
183 }
184 }
185
187 switch (Type) {
189 *OS << "value";
190 break;
192 *OS << "declare";
193 break;
195 *OS << "assign";
196 break;
198 *OS << "end";
199 break;
201 *OS << "any";
202 break;
203 };
204 }
205
206 void Write(const Metadata *MD) {
207 if (!MD)
208 return;
209 MD->print(*OS, MST, &M);
210 *OS << '\n';
211 }
212
213 template <class T> void Write(const MDTupleTypedArrayWrapper<T> &MD) {
214 Write(MD.get());
215 }
216
217 void Write(const NamedMDNode *NMD) {
218 if (!NMD)
219 return;
220 NMD->print(*OS, MST);
221 *OS << '\n';
222 }
223
224 void Write(Type *T) {
225 if (!T)
226 return;
227 *OS << ' ' << *T;
228 }
229
230 void Write(const Comdat *C) {
231 if (!C)
232 return;
233 *OS << *C;
234 }
235
236 void Write(const APInt *AI) {
237 if (!AI)
238 return;
239 *OS << *AI << '\n';
240 }
241
242 void Write(const unsigned i) { *OS << i << '\n'; }
243
244 // NOLINTNEXTLINE(readability-identifier-naming)
245 void Write(const Attribute *A) {
246 if (!A)
247 return;
248 *OS << A->getAsString() << '\n';
249 }
250
251 // NOLINTNEXTLINE(readability-identifier-naming)
252 void Write(const AttributeSet *AS) {
253 if (!AS)
254 return;
255 *OS << AS->getAsString() << '\n';
256 }
257
258 // NOLINTNEXTLINE(readability-identifier-naming)
259 void Write(const AttributeList *AL) {
260 if (!AL)
261 return;
262 AL->print(*OS);
263 }
264
265 void Write(Printable P) { *OS << P << '\n'; }
266
267 template <typename T> void Write(ArrayRef<T> Vs) {
268 for (const T &V : Vs)
269 Write(V);
270 }
271
272 template <typename T1, typename... Ts>
273 void WriteTs(const T1 &V1, const Ts &... Vs) {
274 Write(V1);
275 WriteTs(Vs...);
276 }
277
278 template <typename... Ts> void WriteTs() {}
279
280public:
281 /// A check failed, so printout out the condition and the message.
282 ///
283 /// This provides a nice place to put a breakpoint if you want to see why
284 /// something is not correct.
285 void CheckFailed(const Twine &Message) {
286 if (OS)
287 *OS << Message << '\n';
288 Broken = true;
289 }
290
291 /// A check failed (with values to print).
292 ///
293 /// This calls the Message-only version so that the above is easier to set a
294 /// breakpoint on.
295 template <typename T1, typename... Ts>
296 void CheckFailed(const Twine &Message, const T1 &V1, const Ts &... Vs) {
297 CheckFailed(Message);
298 if (OS)
299 WriteTs(V1, Vs...);
300 }
301
302 /// A debug info check failed.
303 void DebugInfoCheckFailed(const Twine &Message) {
304 if (OS)
305 *OS << Message << '\n';
307 BrokenDebugInfo = true;
308 }
309
310 /// A debug info check failed (with values to print).
311 template <typename T1, typename... Ts>
312 void DebugInfoCheckFailed(const Twine &Message, const T1 &V1,
313 const Ts &... Vs) {
314 DebugInfoCheckFailed(Message);
315 if (OS)
316 WriteTs(V1, Vs...);
317 }
318};
319
320} // namespace llvm
321
322namespace {
323
324class Verifier : public InstVisitor<Verifier>, VerifierSupport {
325 friend class InstVisitor<Verifier>;
326 DominatorTree DT;
327
328 /// When verifying a basic block, keep track of all of the
329 /// instructions we have seen so far.
330 ///
331 /// This allows us to do efficient dominance checks for the case when an
332 /// instruction has an operand that is an instruction in the same block.
333 SmallPtrSet<Instruction *, 16> InstsInThisBlock;
334
335 /// Keep track of the metadata nodes that have been checked already.
337
338 /// Keep track which DISubprogram is attached to which function.
340
341 /// Track all DICompileUnits visited.
343
344 /// The result type for a landingpad.
345 Type *LandingPadResultTy;
346
347 /// Whether we've seen a call to @llvm.localescape in this function
348 /// already.
349 bool SawFrameEscape;
350
351 /// Whether the current function has a DISubprogram attached to it.
352 bool HasDebugInfo = false;
353
354 /// Stores the count of how many objects were passed to llvm.localescape for a
355 /// given function and the largest index passed to llvm.localrecover.
357
358 // Maps catchswitches and cleanuppads that unwind to siblings to the
359 // terminators that indicate the unwind, used to detect cycles therein.
361
362 /// Cache which blocks are in which funclet, if an EH funclet personality is
363 /// in use. Otherwise empty.
364 DenseMap<BasicBlock *, ColorVector> BlockEHFuncletColors;
365
366 /// Cache of constants visited in search of ConstantExprs.
367 SmallPtrSet<const Constant *, 32> ConstantExprVisited;
368
369 /// Cache of declarations of the llvm.experimental.deoptimize.<ty> intrinsic.
370 SmallVector<const Function *, 4> DeoptimizeDeclarations;
371
372 /// Cache of attribute lists verified.
373 SmallPtrSet<const void *, 32> AttributeListsVisited;
374
375 // Verify that this GlobalValue is only used in this module.
376 // This map is used to avoid visiting uses twice. We can arrive at a user
377 // twice, if they have multiple operands. In particular for very large
378 // constant expressions, we can arrive at a particular user many times.
379 SmallPtrSet<const Value *, 32> GlobalValueVisited;
380
381 // Keeps track of duplicate function argument debug info.
383
384 TBAAVerifier TBAAVerifyHelper;
385 ConvergenceVerifier ConvergenceVerifyHelper;
386
387 SmallVector<IntrinsicInst *, 4> NoAliasScopeDecls;
388
389 void checkAtomicMemAccessSize(Type *Ty, const Instruction *I);
390
391public:
392 explicit Verifier(raw_ostream *OS, bool ShouldTreatBrokenDebugInfoAsError,
393 const Module &M)
394 : VerifierSupport(OS, M), LandingPadResultTy(nullptr),
395 SawFrameEscape(false), TBAAVerifyHelper(this) {
396 TreatBrokenDebugInfoAsError = ShouldTreatBrokenDebugInfoAsError;
397 }
398
399 bool hasBrokenDebugInfo() const { return BrokenDebugInfo; }
400
401 bool verify(const Function &F) {
402 assert(F.getParent() == &M &&
403 "An instance of this class only works with a specific module!");
404
405 // First ensure the function is well-enough formed to compute dominance
406 // information, and directly compute a dominance tree. We don't rely on the
407 // pass manager to provide this as it isolates us from a potentially
408 // out-of-date dominator tree and makes it significantly more complex to run
409 // this code outside of a pass manager.
410 // FIXME: It's really gross that we have to cast away constness here.
411 if (!F.empty())
412 DT.recalculate(const_cast<Function &>(F));
413
414 for (const BasicBlock &BB : F) {
415 if (!BB.empty() && BB.back().isTerminator())
416 continue;
417
418 if (OS) {
419 *OS << "Basic Block in function '" << F.getName()
420 << "' does not have terminator!\n";
421 BB.printAsOperand(*OS, true, MST);
422 *OS << "\n";
423 }
424 return false;
425 }
426
427 auto FailureCB = [this](const Twine &Message) {
428 this->CheckFailed(Message);
429 };
430 ConvergenceVerifyHelper.initialize(OS, FailureCB, F);
431
432 Broken = false;
433 // FIXME: We strip const here because the inst visitor strips const.
434 visit(const_cast<Function &>(F));
435 verifySiblingFuncletUnwinds();
436
437 if (ConvergenceVerifyHelper.sawTokens())
438 ConvergenceVerifyHelper.verify(DT);
439
440 InstsInThisBlock.clear();
441 DebugFnArgs.clear();
442 LandingPadResultTy = nullptr;
443 SawFrameEscape = false;
444 SiblingFuncletInfo.clear();
445 verifyNoAliasScopeDecl();
446 NoAliasScopeDecls.clear();
447
448 return !Broken;
449 }
450
451 /// Verify the module that this instance of \c Verifier was initialized with.
452 bool verify() {
453 Broken = false;
454
455 // Collect all declarations of the llvm.experimental.deoptimize intrinsic.
456 for (const Function &F : M)
457 if (F.getIntrinsicID() == Intrinsic::experimental_deoptimize)
458 DeoptimizeDeclarations.push_back(&F);
459
460 // Now that we've visited every function, verify that we never asked to
461 // recover a frame index that wasn't escaped.
462 verifyFrameRecoverIndices();
463 for (const GlobalVariable &GV : M.globals())
464 visitGlobalVariable(GV);
465
466 for (const GlobalAlias &GA : M.aliases())
467 visitGlobalAlias(GA);
468
469 for (const GlobalIFunc &GI : M.ifuncs())
470 visitGlobalIFunc(GI);
471
472 for (const NamedMDNode &NMD : M.named_metadata())
473 visitNamedMDNode(NMD);
474
475 for (const StringMapEntry<Comdat> &SMEC : M.getComdatSymbolTable())
476 visitComdat(SMEC.getValue());
477
478 visitModuleFlags();
479 visitModuleIdents();
480 visitModuleCommandLines();
481
482 verifyCompileUnits();
483
484 verifyDeoptimizeCallingConvs();
485 DISubprogramAttachments.clear();
486 return !Broken;
487 }
488
489private:
490 /// Whether a metadata node is allowed to be, or contain, a DILocation.
491 enum class AreDebugLocsAllowed { No, Yes };
492
493 /// Metadata that should be treated as a range, with slightly different
494 /// requirements.
495 enum class RangeLikeMetadataKind {
496 Range, // MD_range
497 AbsoluteSymbol, // MD_absolute_symbol
498 NoaliasAddrspace // MD_noalias_addrspace
499 };
500
501 // Verification methods...
502 void visitGlobalValue(const GlobalValue &GV);
503 void visitGlobalVariable(const GlobalVariable &GV);
504 void visitGlobalAlias(const GlobalAlias &GA);
505 void visitGlobalIFunc(const GlobalIFunc &GI);
506 void visitAliaseeSubExpr(const GlobalAlias &A, const Constant &C);
507 void visitAliaseeSubExpr(SmallPtrSetImpl<const GlobalAlias *> &Visited,
508 const GlobalAlias &A, const Constant &C);
509 void visitNamedMDNode(const NamedMDNode &NMD);
510 void visitMDNode(const MDNode &MD, AreDebugLocsAllowed AllowLocs);
511 void visitMetadataAsValue(const MetadataAsValue &MD, Function *F);
512 void visitValueAsMetadata(const ValueAsMetadata &MD, Function *F);
513 void visitDIArgList(const DIArgList &AL, Function *F);
514 void visitComdat(const Comdat &C);
515 void visitModuleIdents();
516 void visitModuleCommandLines();
517 void visitModuleFlags();
518 void visitModuleFlag(const MDNode *Op,
520 SmallVectorImpl<const MDNode *> &Requirements);
521 void visitModuleFlagCGProfileEntry(const MDOperand &MDO);
522 void visitFunction(const Function &F);
523 void visitBasicBlock(BasicBlock &BB);
524 void verifyRangeLikeMetadata(const Value &V, const MDNode *Range, Type *Ty,
525 RangeLikeMetadataKind Kind);
526 void visitRangeMetadata(Instruction &I, MDNode *Range, Type *Ty);
527 void visitNoaliasAddrspaceMetadata(Instruction &I, MDNode *Range, Type *Ty);
528 void visitDereferenceableMetadata(Instruction &I, MDNode *MD);
529 void visitProfMetadata(Instruction &I, MDNode *MD);
530 void visitCallStackMetadata(MDNode *MD);
531 void visitMemProfMetadata(Instruction &I, MDNode *MD);
532 void visitCallsiteMetadata(Instruction &I, MDNode *MD);
533 void visitDIAssignIDMetadata(Instruction &I, MDNode *MD);
534 void visitMMRAMetadata(Instruction &I, MDNode *MD);
535 void visitAnnotationMetadata(MDNode *Annotation);
536 void visitAliasScopeMetadata(const MDNode *MD);
537 void visitAliasScopeListMetadata(const MDNode *MD);
538 void visitAccessGroupMetadata(const MDNode *MD);
539
540 template <class Ty> bool isValidMetadataArray(const MDTuple &N);
541#define HANDLE_SPECIALIZED_MDNODE_LEAF(CLASS) void visit##CLASS(const CLASS &N);
542#include "llvm/IR/Metadata.def"
543 void visitDIScope(const DIScope &N);
544 void visitDIVariable(const DIVariable &N);
545 void visitDILexicalBlockBase(const DILexicalBlockBase &N);
546 void visitDITemplateParameter(const DITemplateParameter &N);
547
548 void visitTemplateParams(const MDNode &N, const Metadata &RawParams);
549
550 void visit(DbgLabelRecord &DLR);
551 void visit(DbgVariableRecord &DVR);
552 // InstVisitor overrides...
554 void visitDbgRecords(Instruction &I);
555 void visit(Instruction &I);
556
557 void visitTruncInst(TruncInst &I);
558 void visitZExtInst(ZExtInst &I);
559 void visitSExtInst(SExtInst &I);
560 void visitFPTruncInst(FPTruncInst &I);
561 void visitFPExtInst(FPExtInst &I);
562 void visitFPToUIInst(FPToUIInst &I);
563 void visitFPToSIInst(FPToSIInst &I);
564 void visitUIToFPInst(UIToFPInst &I);
565 void visitSIToFPInst(SIToFPInst &I);
566 void visitIntToPtrInst(IntToPtrInst &I);
567 void visitPtrToIntInst(PtrToIntInst &I);
568 void visitBitCastInst(BitCastInst &I);
569 void visitAddrSpaceCastInst(AddrSpaceCastInst &I);
570 void visitPHINode(PHINode &PN);
571 void visitCallBase(CallBase &Call);
572 void visitUnaryOperator(UnaryOperator &U);
573 void visitBinaryOperator(BinaryOperator &B);
574 void visitICmpInst(ICmpInst &IC);
575 void visitFCmpInst(FCmpInst &FC);
576 void visitExtractElementInst(ExtractElementInst &EI);
577 void visitInsertElementInst(InsertElementInst &EI);
578 void visitShuffleVectorInst(ShuffleVectorInst &EI);
579 void visitVAArgInst(VAArgInst &VAA) { visitInstruction(VAA); }
580 void visitCallInst(CallInst &CI);
581 void visitInvokeInst(InvokeInst &II);
582 void visitGetElementPtrInst(GetElementPtrInst &GEP);
583 void visitLoadInst(LoadInst &LI);
584 void visitStoreInst(StoreInst &SI);
585 void verifyDominatesUse(Instruction &I, unsigned i);
586 void visitInstruction(Instruction &I);
587 void visitTerminator(Instruction &I);
588 void visitBranchInst(BranchInst &BI);
589 void visitReturnInst(ReturnInst &RI);
590 void visitSwitchInst(SwitchInst &SI);
591 void visitIndirectBrInst(IndirectBrInst &BI);
592 void visitCallBrInst(CallBrInst &CBI);
593 void visitSelectInst(SelectInst &SI);
594 void visitUserOp1(Instruction &I);
595 void visitUserOp2(Instruction &I) { visitUserOp1(I); }
596 void visitIntrinsicCall(Intrinsic::ID ID, CallBase &Call);
597 void visitConstrainedFPIntrinsic(ConstrainedFPIntrinsic &FPI);
598 void visitVPIntrinsic(VPIntrinsic &VPI);
599 void visitDbgIntrinsic(StringRef Kind, DbgVariableIntrinsic &DII);
600 void visitDbgLabelIntrinsic(StringRef Kind, DbgLabelInst &DLI);
601 void visitAtomicCmpXchgInst(AtomicCmpXchgInst &CXI);
602 void visitAtomicRMWInst(AtomicRMWInst &RMWI);
603 void visitFenceInst(FenceInst &FI);
604 void visitAllocaInst(AllocaInst &AI);
605 void visitExtractValueInst(ExtractValueInst &EVI);
606 void visitInsertValueInst(InsertValueInst &IVI);
607 void visitEHPadPredecessors(Instruction &I);
608 void visitLandingPadInst(LandingPadInst &LPI);
609 void visitResumeInst(ResumeInst &RI);
610 void visitCatchPadInst(CatchPadInst &CPI);
611 void visitCatchReturnInst(CatchReturnInst &CatchReturn);
612 void visitCleanupPadInst(CleanupPadInst &CPI);
613 void visitFuncletPadInst(FuncletPadInst &FPI);
614 void visitCatchSwitchInst(CatchSwitchInst &CatchSwitch);
615 void visitCleanupReturnInst(CleanupReturnInst &CRI);
616
617 void verifySwiftErrorCall(CallBase &Call, const Value *SwiftErrorVal);
618 void verifySwiftErrorValue(const Value *SwiftErrorVal);
619 void verifyTailCCMustTailAttrs(const AttrBuilder &Attrs, StringRef Context);
620 void verifyMustTailCall(CallInst &CI);
621 bool verifyAttributeCount(AttributeList Attrs, unsigned Params);
622 void verifyAttributeTypes(AttributeSet Attrs, const Value *V);
623 void verifyParameterAttrs(AttributeSet Attrs, Type *Ty, const Value *V);
624 void checkUnsignedBaseTenFuncAttr(AttributeList Attrs, StringRef Attr,
625 const Value *V);
626 void verifyFunctionAttrs(FunctionType *FT, AttributeList Attrs,
627 const Value *V, bool IsIntrinsic, bool IsInlineAsm);
628 void verifyFunctionMetadata(ArrayRef<std::pair<unsigned, MDNode *>> MDs);
629
630 void visitConstantExprsRecursively(const Constant *EntryC);
631 void visitConstantExpr(const ConstantExpr *CE);
632 void visitConstantPtrAuth(const ConstantPtrAuth *CPA);
633 void verifyInlineAsmCall(const CallBase &Call);
634 void verifyStatepoint(const CallBase &Call);
635 void verifyFrameRecoverIndices();
636 void verifySiblingFuncletUnwinds();
637
638 void verifyFragmentExpression(const DbgVariableIntrinsic &I);
639 void verifyFragmentExpression(const DbgVariableRecord &I);
640 template <typename ValueOrMetadata>
641 void verifyFragmentExpression(const DIVariable &V,
643 ValueOrMetadata *Desc);
644 void verifyFnArgs(const DbgVariableIntrinsic &I);
645 void verifyFnArgs(const DbgVariableRecord &DVR);
646 void verifyNotEntryValue(const DbgVariableIntrinsic &I);
647 void verifyNotEntryValue(const DbgVariableRecord &I);
648
649 /// Module-level debug info verification...
650 void verifyCompileUnits();
651
652 /// Module-level verification that all @llvm.experimental.deoptimize
653 /// declarations share the same calling convention.
654 void verifyDeoptimizeCallingConvs();
655
656 void verifyAttachedCallBundle(const CallBase &Call,
657 const OperandBundleUse &BU);
658
659 /// Verify the llvm.experimental.noalias.scope.decl declarations
660 void verifyNoAliasScopeDecl();
661};
662
663} // end anonymous namespace
664
665/// We know that cond should be true, if not print an error message.
666#define Check(C, ...) \
667 do { \
668 if (!(C)) { \
669 CheckFailed(__VA_ARGS__); \
670 return; \
671 } \
672 } while (false)
673
674/// We know that a debug info condition should be true, if not print
675/// an error message.
676#define CheckDI(C, ...) \
677 do { \
678 if (!(C)) { \
679 DebugInfoCheckFailed(__VA_ARGS__); \
680 return; \
681 } \
682 } while (false)
683
684void Verifier::visitDbgRecords(Instruction &I) {
685 if (!I.DebugMarker)
686 return;
687 CheckDI(I.DebugMarker->MarkedInstr == &I,
688 "Instruction has invalid DebugMarker", &I);
689 CheckDI(!isa<PHINode>(&I) || !I.hasDbgRecords(),
690 "PHI Node must not have any attached DbgRecords", &I);
691 for (DbgRecord &DR : I.getDbgRecordRange()) {
692 CheckDI(DR.getMarker() == I.DebugMarker,
693 "DbgRecord had invalid DebugMarker", &I, &DR);
694 if (auto *Loc =
695 dyn_cast_or_null<DILocation>(DR.getDebugLoc().getAsMDNode()))
696 visitMDNode(*Loc, AreDebugLocsAllowed::Yes);
697 if (auto *DVR = dyn_cast<DbgVariableRecord>(&DR)) {
698 visit(*DVR);
699 // These have to appear after `visit` for consistency with existing
700 // intrinsic behaviour.
701 verifyFragmentExpression(*DVR);
702 verifyNotEntryValue(*DVR);
703 } else if (auto *DLR = dyn_cast<DbgLabelRecord>(&DR)) {
704 visit(*DLR);
705 }
706 }
707}
708
709void Verifier::visit(Instruction &I) {
710 visitDbgRecords(I);
711 for (unsigned i = 0, e = I.getNumOperands(); i != e; ++i)
712 Check(I.getOperand(i) != nullptr, "Operand is null", &I);
714}
715
716// Helper to iterate over indirect users. By returning false, the callback can ask to stop traversing further.
717static void forEachUser(const Value *User,
719 llvm::function_ref<bool(const Value *)> Callback) {
720 if (!Visited.insert(User).second)
721 return;
722
725 while (!WorkList.empty()) {
726 const Value *Cur = WorkList.pop_back_val();
727 if (!Visited.insert(Cur).second)
728 continue;
729 if (Callback(Cur))
730 append_range(WorkList, Cur->materialized_users());
731 }
732}
733
734void Verifier::visitGlobalValue(const GlobalValue &GV) {
736 "Global is external, but doesn't have external or weak linkage!", &GV);
737
738 if (const GlobalObject *GO = dyn_cast<GlobalObject>(&GV)) {
739
740 if (MaybeAlign A = GO->getAlign()) {
741 Check(A->value() <= Value::MaximumAlignment,
742 "huge alignment values are unsupported", GO);
743 }
744
745 if (const MDNode *Associated =
746 GO->getMetadata(LLVMContext::MD_associated)) {
747 Check(Associated->getNumOperands() == 1,
748 "associated metadata must have one operand", &GV, Associated);
749 const Metadata *Op = Associated->getOperand(0).get();
750 Check(Op, "associated metadata must have a global value", GO, Associated);
751
752 const auto *VM = dyn_cast_or_null<ValueAsMetadata>(Op);
753 Check(VM, "associated metadata must be ValueAsMetadata", GO, Associated);
754 if (VM) {
755 Check(isa<PointerType>(VM->getValue()->getType()),
756 "associated value must be pointer typed", GV, Associated);
757
758 const Value *Stripped = VM->getValue()->stripPointerCastsAndAliases();
759 Check(isa<GlobalObject>(Stripped) || isa<Constant>(Stripped),
760 "associated metadata must point to a GlobalObject", GO, Stripped);
761 Check(Stripped != GO,
762 "global values should not associate to themselves", GO,
763 Associated);
764 }
765 }
766
767 // FIXME: Why is getMetadata on GlobalValue protected?
768 if (const MDNode *AbsoluteSymbol =
769 GO->getMetadata(LLVMContext::MD_absolute_symbol)) {
770 verifyRangeLikeMetadata(*GO, AbsoluteSymbol,
771 DL.getIntPtrType(GO->getType()),
772 RangeLikeMetadataKind::AbsoluteSymbol);
773 }
774 }
775
776 Check(!GV.hasAppendingLinkage() || isa<GlobalVariable>(GV),
777 "Only global variables can have appending linkage!", &GV);
778
779 if (GV.hasAppendingLinkage()) {
780 const GlobalVariable *GVar = dyn_cast<GlobalVariable>(&GV);
781 Check(GVar && GVar->getValueType()->isArrayTy(),
782 "Only global arrays can have appending linkage!", GVar);
783 }
784
785 if (GV.isDeclarationForLinker())
786 Check(!GV.hasComdat(), "Declaration may not be in a Comdat!", &GV);
787
788 if (GV.hasDLLExportStorageClass()) {
790 "dllexport GlobalValue must have default or protected visibility",
791 &GV);
792 }
793 if (GV.hasDLLImportStorageClass()) {
795 "dllimport GlobalValue must have default visibility", &GV);
796 Check(!GV.isDSOLocal(), "GlobalValue with DLLImport Storage is dso_local!",
797 &GV);
798
799 Check((GV.isDeclaration() &&
802 "Global is marked as dllimport, but not external", &GV);
803 }
804
805 if (GV.isImplicitDSOLocal())
806 Check(GV.isDSOLocal(),
807 "GlobalValue with local linkage or non-default "
808 "visibility must be dso_local!",
809 &GV);
810
811 if (GV.isTagged()) {
812 Check(!GV.hasSection(), "tagged GlobalValue must not be in section.", &GV);
813 }
814
815 forEachUser(&GV, GlobalValueVisited, [&](const Value *V) -> bool {
816 if (const Instruction *I = dyn_cast<Instruction>(V)) {
817 if (!I->getParent() || !I->getParent()->getParent())
818 CheckFailed("Global is referenced by parentless instruction!", &GV, &M,
819 I);
820 else if (I->getParent()->getParent()->getParent() != &M)
821 CheckFailed("Global is referenced in a different module!", &GV, &M, I,
822 I->getParent()->getParent(),
823 I->getParent()->getParent()->getParent());
824 return false;
825 } else if (const Function *F = dyn_cast<Function>(V)) {
826 if (F->getParent() != &M)
827 CheckFailed("Global is used by function in a different module", &GV, &M,
828 F, F->getParent());
829 return false;
830 }
831 return true;
832 });
833}
834
835void Verifier::visitGlobalVariable(const GlobalVariable &GV) {
836 Type *GVType = GV.getValueType();
837
838 if (GV.hasInitializer()) {
839 Check(GV.getInitializer()->getType() == GVType,
840 "Global variable initializer type does not match global "
841 "variable type!",
842 &GV);
843 // If the global has common linkage, it must have a zero initializer and
844 // cannot be constant.
845 if (GV.hasCommonLinkage()) {
847 "'common' global must have a zero initializer!", &GV);
848 Check(!GV.isConstant(), "'common' global may not be marked constant!",
849 &GV);
850 Check(!GV.hasComdat(), "'common' global may not be in a Comdat!", &GV);
851 }
852 }
853
854 if (GV.hasName() && (GV.getName() == "llvm.global_ctors" ||
855 GV.getName() == "llvm.global_dtors")) {
857 "invalid linkage for intrinsic global variable", &GV);
859 "invalid uses of intrinsic global variable", &GV);
860
861 // Don't worry about emitting an error for it not being an array,
862 // visitGlobalValue will complain on appending non-array.
863 if (ArrayType *ATy = dyn_cast<ArrayType>(GVType)) {
864 StructType *STy = dyn_cast<StructType>(ATy->getElementType());
865 PointerType *FuncPtrTy =
866 PointerType::get(Context, DL.getProgramAddressSpace());
867 Check(STy && (STy->getNumElements() == 2 || STy->getNumElements() == 3) &&
868 STy->getTypeAtIndex(0u)->isIntegerTy(32) &&
869 STy->getTypeAtIndex(1) == FuncPtrTy,
870 "wrong type for intrinsic global variable", &GV);
871 Check(STy->getNumElements() == 3,
872 "the third field of the element type is mandatory, "
873 "specify ptr null to migrate from the obsoleted 2-field form");
874 Type *ETy = STy->getTypeAtIndex(2);
875 Check(ETy->isPointerTy(), "wrong type for intrinsic global variable",
876 &GV);
877 }
878 }
879
880 if (GV.hasName() && (GV.getName() == "llvm.used" ||
881 GV.getName() == "llvm.compiler.used")) {
883 "invalid linkage for intrinsic global variable", &GV);
885 "invalid uses of intrinsic global variable", &GV);
886
887 if (ArrayType *ATy = dyn_cast<ArrayType>(GVType)) {
888 PointerType *PTy = dyn_cast<PointerType>(ATy->getElementType());
889 Check(PTy, "wrong type for intrinsic global variable", &GV);
890 if (GV.hasInitializer()) {
891 const Constant *Init = GV.getInitializer();
892 const ConstantArray *InitArray = dyn_cast<ConstantArray>(Init);
893 Check(InitArray, "wrong initalizer for intrinsic global variable",
894 Init);
895 for (Value *Op : InitArray->operands()) {
896 Value *V = Op->stripPointerCasts();
897 Check(isa<GlobalVariable>(V) || isa<Function>(V) ||
898 isa<GlobalAlias>(V),
899 Twine("invalid ") + GV.getName() + " member", V);
900 Check(V->hasName(),
901 Twine("members of ") + GV.getName() + " must be named", V);
902 }
903 }
904 }
905 }
906
907 // Visit any debug info attachments.
909 GV.getMetadata(LLVMContext::MD_dbg, MDs);
910 for (auto *MD : MDs) {
911 if (auto *GVE = dyn_cast<DIGlobalVariableExpression>(MD))
912 visitDIGlobalVariableExpression(*GVE);
913 else
914 CheckDI(false, "!dbg attachment of global variable must be a "
915 "DIGlobalVariableExpression");
916 }
917
918 // Scalable vectors cannot be global variables, since we don't know
919 // the runtime size.
920 Check(!GVType->isScalableTy(), "Globals cannot contain scalable types", &GV);
921
922 // Check if it is or contains a target extension type that disallows being
923 // used as a global.
925 "Global @" + GV.getName() + " has illegal target extension type",
926 GVType);
927
928 if (!GV.hasInitializer()) {
929 visitGlobalValue(GV);
930 return;
931 }
932
933 // Walk any aggregate initializers looking for bitcasts between address spaces
934 visitConstantExprsRecursively(GV.getInitializer());
935
936 visitGlobalValue(GV);
937}
938
939void Verifier::visitAliaseeSubExpr(const GlobalAlias &GA, const Constant &C) {
941 Visited.insert(&GA);
942 visitAliaseeSubExpr(Visited, GA, C);
943}
944
945void Verifier::visitAliaseeSubExpr(SmallPtrSetImpl<const GlobalAlias*> &Visited,
946 const GlobalAlias &GA, const Constant &C) {
948 Check(isa<GlobalValue>(C) &&
949 cast<GlobalValue>(C).hasAvailableExternallyLinkage(),
950 "available_externally alias must point to available_externally "
951 "global value",
952 &GA);
953 }
954 if (const auto *GV = dyn_cast<GlobalValue>(&C)) {
956 Check(!GV->isDeclarationForLinker(), "Alias must point to a definition",
957 &GA);
958 }
959
960 if (const auto *GA2 = dyn_cast<GlobalAlias>(GV)) {
961 Check(Visited.insert(GA2).second, "Aliases cannot form a cycle", &GA);
962
963 Check(!GA2->isInterposable(),
964 "Alias cannot point to an interposable alias", &GA);
965 } else {
966 // Only continue verifying subexpressions of GlobalAliases.
967 // Do not recurse into global initializers.
968 return;
969 }
970 }
971
972 if (const auto *CE = dyn_cast<ConstantExpr>(&C))
973 visitConstantExprsRecursively(CE);
974
975 for (const Use &U : C.operands()) {
976 Value *V = &*U;
977 if (const auto *GA2 = dyn_cast<GlobalAlias>(V))
978 visitAliaseeSubExpr(Visited, GA, *GA2->getAliasee());
979 else if (const auto *C2 = dyn_cast<Constant>(V))
980 visitAliaseeSubExpr(Visited, GA, *C2);
981 }
982}
983
984void Verifier::visitGlobalAlias(const GlobalAlias &GA) {
986 "Alias should have private, internal, linkonce, weak, linkonce_odr, "
987 "weak_odr, external, or available_externally linkage!",
988 &GA);
989 const Constant *Aliasee = GA.getAliasee();
990 Check(Aliasee, "Aliasee cannot be NULL!", &GA);
991 Check(GA.getType() == Aliasee->getType(),
992 "Alias and aliasee types should match!", &GA);
993
994 Check(isa<GlobalValue>(Aliasee) || isa<ConstantExpr>(Aliasee),
995 "Aliasee should be either GlobalValue or ConstantExpr", &GA);
996
997 visitAliaseeSubExpr(GA, *Aliasee);
998
999 visitGlobalValue(GA);
1000}
1001
1002void Verifier::visitGlobalIFunc(const GlobalIFunc &GI) {
1004 "IFunc should have private, internal, linkonce, weak, linkonce_odr, "
1005 "weak_odr, or external linkage!",
1006 &GI);
1007 // Pierce through ConstantExprs and GlobalAliases and check that the resolver
1008 // is a Function definition.
1010 Check(Resolver, "IFunc must have a Function resolver", &GI);
1011 Check(!Resolver->isDeclarationForLinker(),
1012 "IFunc resolver must be a definition", &GI);
1013
1014 // Check that the immediate resolver operand (prior to any bitcasts) has the
1015 // correct type.
1016 const Type *ResolverTy = GI.getResolver()->getType();
1017
1018 Check(isa<PointerType>(Resolver->getFunctionType()->getReturnType()),
1019 "IFunc resolver must return a pointer", &GI);
1020
1021 Check(ResolverTy == PointerType::get(Context, GI.getAddressSpace()),
1022 "IFunc resolver has incorrect type", &GI);
1023}
1024
1025void Verifier::visitNamedMDNode(const NamedMDNode &NMD) {
1026 // There used to be various other llvm.dbg.* nodes, but we don't support
1027 // upgrading them and we want to reserve the namespace for future uses.
1028 if (NMD.getName().starts_with("llvm.dbg."))
1029 CheckDI(NMD.getName() == "llvm.dbg.cu",
1030 "unrecognized named metadata node in the llvm.dbg namespace", &NMD);
1031 for (const MDNode *MD : NMD.operands()) {
1032 if (NMD.getName() == "llvm.dbg.cu")
1033 CheckDI(MD && isa<DICompileUnit>(MD), "invalid compile unit", &NMD, MD);
1034
1035 if (!MD)
1036 continue;
1037
1038 visitMDNode(*MD, AreDebugLocsAllowed::Yes);
1039 }
1040}
1041
1042void Verifier::visitMDNode(const MDNode &MD, AreDebugLocsAllowed AllowLocs) {
1043 // Only visit each node once. Metadata can be mutually recursive, so this
1044 // avoids infinite recursion here, as well as being an optimization.
1045 if (!MDNodes.insert(&MD).second)
1046 return;
1047
1048 Check(&MD.getContext() == &Context,
1049 "MDNode context does not match Module context!", &MD);
1050
1051 switch (MD.getMetadataID()) {
1052 default:
1053 llvm_unreachable("Invalid MDNode subclass");
1054 case Metadata::MDTupleKind:
1055 break;
1056#define HANDLE_SPECIALIZED_MDNODE_LEAF(CLASS) \
1057 case Metadata::CLASS##Kind: \
1058 visit##CLASS(cast<CLASS>(MD)); \
1059 break;
1060#include "llvm/IR/Metadata.def"
1061 }
1062
1063 for (const Metadata *Op : MD.operands()) {
1064 if (!Op)
1065 continue;
1066 Check(!isa<LocalAsMetadata>(Op), "Invalid operand for global metadata!",
1067 &MD, Op);
1068 CheckDI(!isa<DILocation>(Op) || AllowLocs == AreDebugLocsAllowed::Yes,
1069 "DILocation not allowed within this metadata node", &MD, Op);
1070 if (auto *N = dyn_cast<MDNode>(Op)) {
1071 visitMDNode(*N, AllowLocs);
1072 continue;
1073 }
1074 if (auto *V = dyn_cast<ValueAsMetadata>(Op)) {
1075 visitValueAsMetadata(*V, nullptr);
1076 continue;
1077 }
1078 }
1079
1080 // Check these last, so we diagnose problems in operands first.
1081 Check(!MD.isTemporary(), "Expected no forward declarations!", &MD);
1082 Check(MD.isResolved(), "All nodes should be resolved!", &MD);
1083}
1084
1085void Verifier::visitValueAsMetadata(const ValueAsMetadata &MD, Function *F) {
1086 Check(MD.getValue(), "Expected valid value", &MD);
1087 Check(!MD.getValue()->getType()->isMetadataTy(),
1088 "Unexpected metadata round-trip through values", &MD, MD.getValue());
1089
1090 auto *L = dyn_cast<LocalAsMetadata>(&MD);
1091 if (!L)
1092 return;
1093
1094 Check(F, "function-local metadata used outside a function", L);
1095
1096 // If this was an instruction, bb, or argument, verify that it is in the
1097 // function that we expect.
1098 Function *ActualF = nullptr;
1099 if (Instruction *I = dyn_cast<Instruction>(L->getValue())) {
1100 Check(I->getParent(), "function-local metadata not in basic block", L, I);
1101 ActualF = I->getParent()->getParent();
1102 } else if (BasicBlock *BB = dyn_cast<BasicBlock>(L->getValue()))
1103 ActualF = BB->getParent();
1104 else if (Argument *A = dyn_cast<Argument>(L->getValue()))
1105 ActualF = A->getParent();
1106 assert(ActualF && "Unimplemented function local metadata case!");
1107
1108 Check(ActualF == F, "function-local metadata used in wrong function", L);
1109}
1110
1111void Verifier::visitDIArgList(const DIArgList &AL, Function *F) {
1112 for (const ValueAsMetadata *VAM : AL.getArgs())
1113 visitValueAsMetadata(*VAM, F);
1114}
1115
1116void Verifier::visitMetadataAsValue(const MetadataAsValue &MDV, Function *F) {
1117 Metadata *MD = MDV.getMetadata();
1118 if (auto *N = dyn_cast<MDNode>(MD)) {
1119 visitMDNode(*N, AreDebugLocsAllowed::No);
1120 return;
1121 }
1122
1123 // Only visit each node once. Metadata can be mutually recursive, so this
1124 // avoids infinite recursion here, as well as being an optimization.
1125 if (!MDNodes.insert(MD).second)
1126 return;
1127
1128 if (auto *V = dyn_cast<ValueAsMetadata>(MD))
1129 visitValueAsMetadata(*V, F);
1130
1131 if (auto *AL = dyn_cast<DIArgList>(MD))
1132 visitDIArgList(*AL, F);
1133}
1134
1135static bool isType(const Metadata *MD) { return !MD || isa<DIType>(MD); }
1136static bool isScope(const Metadata *MD) { return !MD || isa<DIScope>(MD); }
1137static bool isDINode(const Metadata *MD) { return !MD || isa<DINode>(MD); }
1138
1139void Verifier::visitDILocation(const DILocation &N) {
1140 CheckDI(N.getRawScope() && isa<DILocalScope>(N.getRawScope()),
1141 "location requires a valid scope", &N, N.getRawScope());
1142 if (auto *IA = N.getRawInlinedAt())
1143 CheckDI(isa<DILocation>(IA), "inlined-at should be a location", &N, IA);
1144 if (auto *SP = dyn_cast<DISubprogram>(N.getRawScope()))
1145 CheckDI(SP->isDefinition(), "scope points into the type hierarchy", &N);
1146}
1147
1148void Verifier::visitGenericDINode(const GenericDINode &N) {
1149 CheckDI(N.getTag(), "invalid tag", &N);
1150}
1151
1152void Verifier::visitDIScope(const DIScope &N) {
1153 if (auto *F = N.getRawFile())
1154 CheckDI(isa<DIFile>(F), "invalid file", &N, F);
1155}
1156
1157void Verifier::visitDISubrange(const DISubrange &N) {
1158 CheckDI(N.getTag() == dwarf::DW_TAG_subrange_type, "invalid tag", &N);
1159 CheckDI(!N.getRawCountNode() || !N.getRawUpperBound(),
1160 "Subrange can have any one of count or upperBound", &N);
1161 auto *CBound = N.getRawCountNode();
1162 CheckDI(!CBound || isa<ConstantAsMetadata>(CBound) ||
1163 isa<DIVariable>(CBound) || isa<DIExpression>(CBound),
1164 "Count must be signed constant or DIVariable or DIExpression", &N);
1165 auto Count = N.getCount();
1166 CheckDI(!Count || !isa<ConstantInt *>(Count) ||
1167 cast<ConstantInt *>(Count)->getSExtValue() >= -1,
1168 "invalid subrange count", &N);
1169 auto *LBound = N.getRawLowerBound();
1170 CheckDI(!LBound || isa<ConstantAsMetadata>(LBound) ||
1171 isa<DIVariable>(LBound) || isa<DIExpression>(LBound),
1172 "LowerBound must be signed constant or DIVariable or DIExpression",
1173 &N);
1174 auto *UBound = N.getRawUpperBound();
1175 CheckDI(!UBound || isa<ConstantAsMetadata>(UBound) ||
1176 isa<DIVariable>(UBound) || isa<DIExpression>(UBound),
1177 "UpperBound must be signed constant or DIVariable or DIExpression",
1178 &N);
1179 auto *Stride = N.getRawStride();
1180 CheckDI(!Stride || isa<ConstantAsMetadata>(Stride) ||
1181 isa<DIVariable>(Stride) || isa<DIExpression>(Stride),
1182 "Stride must be signed constant or DIVariable or DIExpression", &N);
1183}
1184
1185void Verifier::visitDIGenericSubrange(const DIGenericSubrange &N) {
1186 CheckDI(N.getTag() == dwarf::DW_TAG_generic_subrange, "invalid tag", &N);
1187 CheckDI(!N.getRawCountNode() || !N.getRawUpperBound(),
1188 "GenericSubrange can have any one of count or upperBound", &N);
1189 auto *CBound = N.getRawCountNode();
1190 CheckDI(!CBound || isa<DIVariable>(CBound) || isa<DIExpression>(CBound),
1191 "Count must be signed constant or DIVariable or DIExpression", &N);
1192 auto *LBound = N.getRawLowerBound();
1193 CheckDI(LBound, "GenericSubrange must contain lowerBound", &N);
1194 CheckDI(isa<DIVariable>(LBound) || isa<DIExpression>(LBound),
1195 "LowerBound must be signed constant or DIVariable or DIExpression",
1196 &N);
1197 auto *UBound = N.getRawUpperBound();
1198 CheckDI(!UBound || isa<DIVariable>(UBound) || isa<DIExpression>(UBound),
1199 "UpperBound must be signed constant or DIVariable or DIExpression",
1200 &N);
1201 auto *Stride = N.getRawStride();
1202 CheckDI(Stride, "GenericSubrange must contain stride", &N);
1203 CheckDI(isa<DIVariable>(Stride) || isa<DIExpression>(Stride),
1204 "Stride must be signed constant or DIVariable or DIExpression", &N);
1205}
1206
1207void Verifier::visitDIEnumerator(const DIEnumerator &N) {
1208 CheckDI(N.getTag() == dwarf::DW_TAG_enumerator, "invalid tag", &N);
1209}
1210
1211void Verifier::visitDIBasicType(const DIBasicType &N) {
1212 CheckDI(N.getTag() == dwarf::DW_TAG_base_type ||
1213 N.getTag() == dwarf::DW_TAG_unspecified_type ||
1214 N.getTag() == dwarf::DW_TAG_string_type,
1215 "invalid tag", &N);
1216}
1217
1218void Verifier::visitDIStringType(const DIStringType &N) {
1219 CheckDI(N.getTag() == dwarf::DW_TAG_string_type, "invalid tag", &N);
1220 CheckDI(!(N.isBigEndian() && N.isLittleEndian()), "has conflicting flags",
1221 &N);
1222}
1223
1224void Verifier::visitDIDerivedType(const DIDerivedType &N) {
1225 // Common scope checks.
1226 visitDIScope(N);
1227
1228 CheckDI(N.getTag() == dwarf::DW_TAG_typedef ||
1229 N.getTag() == dwarf::DW_TAG_pointer_type ||
1230 N.getTag() == dwarf::DW_TAG_ptr_to_member_type ||
1231 N.getTag() == dwarf::DW_TAG_reference_type ||
1232 N.getTag() == dwarf::DW_TAG_rvalue_reference_type ||
1233 N.getTag() == dwarf::DW_TAG_const_type ||
1234 N.getTag() == dwarf::DW_TAG_immutable_type ||
1235 N.getTag() == dwarf::DW_TAG_volatile_type ||
1236 N.getTag() == dwarf::DW_TAG_restrict_type ||
1237 N.getTag() == dwarf::DW_TAG_atomic_type ||
1238 N.getTag() == dwarf::DW_TAG_LLVM_ptrauth_type ||
1239 N.getTag() == dwarf::DW_TAG_member ||
1240 (N.getTag() == dwarf::DW_TAG_variable && N.isStaticMember()) ||
1241 N.getTag() == dwarf::DW_TAG_inheritance ||
1242 N.getTag() == dwarf::DW_TAG_friend ||
1243 N.getTag() == dwarf::DW_TAG_set_type ||
1244 N.getTag() == dwarf::DW_TAG_template_alias,
1245 "invalid tag", &N);
1246 if (N.getTag() == dwarf::DW_TAG_ptr_to_member_type) {
1247 CheckDI(isType(N.getRawExtraData()), "invalid pointer to member type", &N,
1248 N.getRawExtraData());
1249 }
1250
1251 if (N.getTag() == dwarf::DW_TAG_set_type) {
1252 if (auto *T = N.getRawBaseType()) {
1253 auto *Enum = dyn_cast_or_null<DICompositeType>(T);
1254 auto *Basic = dyn_cast_or_null<DIBasicType>(T);
1255 CheckDI(
1256 (Enum && Enum->getTag() == dwarf::DW_TAG_enumeration_type) ||
1257 (Basic && (Basic->getEncoding() == dwarf::DW_ATE_unsigned ||
1258 Basic->getEncoding() == dwarf::DW_ATE_signed ||
1259 Basic->getEncoding() == dwarf::DW_ATE_unsigned_char ||
1260 Basic->getEncoding() == dwarf::DW_ATE_signed_char ||
1261 Basic->getEncoding() == dwarf::DW_ATE_boolean)),
1262 "invalid set base type", &N, T);
1263 }
1264 }
1265
1266 CheckDI(isScope(N.getRawScope()), "invalid scope", &N, N.getRawScope());
1267 CheckDI(isType(N.getRawBaseType()), "invalid base type", &N,
1268 N.getRawBaseType());
1269
1270 if (N.getDWARFAddressSpace()) {
1271 CheckDI(N.getTag() == dwarf::DW_TAG_pointer_type ||
1272 N.getTag() == dwarf::DW_TAG_reference_type ||
1273 N.getTag() == dwarf::DW_TAG_rvalue_reference_type,
1274 "DWARF address space only applies to pointer or reference types",
1275 &N);
1276 }
1277}
1278
1279/// Detect mutually exclusive flags.
1280static bool hasConflictingReferenceFlags(unsigned Flags) {
1281 return ((Flags & DINode::FlagLValueReference) &&
1282 (Flags & DINode::FlagRValueReference)) ||
1283 ((Flags & DINode::FlagTypePassByValue) &&
1284 (Flags & DINode::FlagTypePassByReference));
1285}
1286
1287void Verifier::visitTemplateParams(const MDNode &N, const Metadata &RawParams) {
1288 auto *Params = dyn_cast<MDTuple>(&RawParams);
1289 CheckDI(Params, "invalid template params", &N, &RawParams);
1290 for (Metadata *Op : Params->operands()) {
1291 CheckDI(Op && isa<DITemplateParameter>(Op), "invalid template parameter",
1292 &N, Params, Op);
1293 }
1294}
1295
1296void Verifier::visitDICompositeType(const DICompositeType &N) {
1297 // Common scope checks.
1298 visitDIScope(N);
1299
1300 CheckDI(N.getTag() == dwarf::DW_TAG_array_type ||
1301 N.getTag() == dwarf::DW_TAG_structure_type ||
1302 N.getTag() == dwarf::DW_TAG_union_type ||
1303 N.getTag() == dwarf::DW_TAG_enumeration_type ||
1304 N.getTag() == dwarf::DW_TAG_class_type ||
1305 N.getTag() == dwarf::DW_TAG_variant_part ||
1306 N.getTag() == dwarf::DW_TAG_namelist,
1307 "invalid tag", &N);
1308
1309 CheckDI(isScope(N.getRawScope()), "invalid scope", &N, N.getRawScope());
1310 CheckDI(isType(N.getRawBaseType()), "invalid base type", &N,
1311 N.getRawBaseType());
1312
1313 CheckDI(!N.getRawElements() || isa<MDTuple>(N.getRawElements()),
1314 "invalid composite elements", &N, N.getRawElements());
1315 CheckDI(isType(N.getRawVTableHolder()), "invalid vtable holder", &N,
1316 N.getRawVTableHolder());
1318 "invalid reference flags", &N);
1319 unsigned DIBlockByRefStruct = 1 << 4;
1320 CheckDI((N.getFlags() & DIBlockByRefStruct) == 0,
1321 "DIBlockByRefStruct on DICompositeType is no longer supported", &N);
1322 CheckDI(llvm::all_of(N.getElements(), [](const DINode *N) { return N; }),
1323 "DISubprogram contains null entry in `elements` field", &N);
1324
1325 if (N.isVector()) {
1326 const DINodeArray Elements = N.getElements();
1327 CheckDI(Elements.size() == 1 &&
1328 Elements[0]->getTag() == dwarf::DW_TAG_subrange_type,
1329 "invalid vector, expected one element of type subrange", &N);
1330 }
1331
1332 if (auto *Params = N.getRawTemplateParams())
1333 visitTemplateParams(N, *Params);
1334
1335 if (auto *D = N.getRawDiscriminator()) {
1336 CheckDI(isa<DIDerivedType>(D) && N.getTag() == dwarf::DW_TAG_variant_part,
1337 "discriminator can only appear on variant part");
1338 }
1339
1340 if (N.getRawDataLocation()) {
1341 CheckDI(N.getTag() == dwarf::DW_TAG_array_type,
1342 "dataLocation can only appear in array type");
1343 }
1344
1345 if (N.getRawAssociated()) {
1346 CheckDI(N.getTag() == dwarf::DW_TAG_array_type,
1347 "associated can only appear in array type");
1348 }
1349
1350 if (N.getRawAllocated()) {
1351 CheckDI(N.getTag() == dwarf::DW_TAG_array_type,
1352 "allocated can only appear in array type");
1353 }
1354
1355 if (N.getRawRank()) {
1356 CheckDI(N.getTag() == dwarf::DW_TAG_array_type,
1357 "rank can only appear in array type");
1358 }
1359
1360 if (N.getTag() == dwarf::DW_TAG_array_type) {
1361 CheckDI(N.getRawBaseType(), "array types must have a base type", &N);
1362 }
1363}
1364
1365void Verifier::visitDISubroutineType(const DISubroutineType &N) {
1366 CheckDI(N.getTag() == dwarf::DW_TAG_subroutine_type, "invalid tag", &N);
1367 if (auto *Types = N.getRawTypeArray()) {
1368 CheckDI(isa<MDTuple>(Types), "invalid composite elements", &N, Types);
1369 for (Metadata *Ty : N.getTypeArray()->operands()) {
1370 CheckDI(isType(Ty), "invalid subroutine type ref", &N, Types, Ty);
1371 }
1372 }
1374 "invalid reference flags", &N);
1375}
1376
1377void Verifier::visitDIFile(const DIFile &N) {
1378 CheckDI(N.getTag() == dwarf::DW_TAG_file_type, "invalid tag", &N);
1379 std::optional<DIFile::ChecksumInfo<StringRef>> Checksum = N.getChecksum();
1380 if (Checksum) {
1381 CheckDI(Checksum->Kind <= DIFile::ChecksumKind::CSK_Last,
1382 "invalid checksum kind", &N);
1383 size_t Size;
1384 switch (Checksum->Kind) {
1385 case DIFile::CSK_MD5:
1386 Size = 32;
1387 break;
1388 case DIFile::CSK_SHA1:
1389 Size = 40;
1390 break;
1391 case DIFile::CSK_SHA256:
1392 Size = 64;
1393 break;
1394 }
1395 CheckDI(Checksum->Value.size() == Size, "invalid checksum length", &N);
1396 CheckDI(Checksum->Value.find_if_not(llvm::isHexDigit) == StringRef::npos,
1397 "invalid checksum", &N);
1398 }
1399}
1400
1401void Verifier::visitDICompileUnit(const DICompileUnit &N) {
1402 CheckDI(N.isDistinct(), "compile units must be distinct", &N);
1403 CheckDI(N.getTag() == dwarf::DW_TAG_compile_unit, "invalid tag", &N);
1404
1405 // Don't bother verifying the compilation directory or producer string
1406 // as those could be empty.
1407 CheckDI(N.getRawFile() && isa<DIFile>(N.getRawFile()), "invalid file", &N,
1408 N.getRawFile());
1409 CheckDI(!N.getFile()->getFilename().empty(), "invalid filename", &N,
1410 N.getFile());
1411
1412 CheckDI((N.getEmissionKind() <= DICompileUnit::LastEmissionKind),
1413 "invalid emission kind", &N);
1414
1415 if (auto *Array = N.getRawEnumTypes()) {
1416 CheckDI(isa<MDTuple>(Array), "invalid enum list", &N, Array);
1417 for (Metadata *Op : N.getEnumTypes()->operands()) {
1418 auto *Enum = dyn_cast_or_null<DICompositeType>(Op);
1419 CheckDI(Enum && Enum->getTag() == dwarf::DW_TAG_enumeration_type,
1420 "invalid enum type", &N, N.getEnumTypes(), Op);
1421 }
1422 }
1423 if (auto *Array = N.getRawRetainedTypes()) {
1424 CheckDI(isa<MDTuple>(Array), "invalid retained type list", &N, Array);
1425 for (Metadata *Op : N.getRetainedTypes()->operands()) {
1426 CheckDI(
1427 Op && (isa<DIType>(Op) || (isa<DISubprogram>(Op) &&
1428 !cast<DISubprogram>(Op)->isDefinition())),
1429 "invalid retained type", &N, Op);
1430 }
1431 }
1432 if (auto *Array = N.getRawGlobalVariables()) {
1433 CheckDI(isa<MDTuple>(Array), "invalid global variable list", &N, Array);
1434 for (Metadata *Op : N.getGlobalVariables()->operands()) {
1435 CheckDI(Op && (isa<DIGlobalVariableExpression>(Op)),
1436 "invalid global variable ref", &N, Op);
1437 }
1438 }
1439 if (auto *Array = N.getRawImportedEntities()) {
1440 CheckDI(isa<MDTuple>(Array), "invalid imported entity list", &N, Array);
1441 for (Metadata *Op : N.getImportedEntities()->operands()) {
1442 CheckDI(Op && isa<DIImportedEntity>(Op), "invalid imported entity ref",
1443 &N, Op);
1444 }
1445 }
1446 if (auto *Array = N.getRawMacros()) {
1447 CheckDI(isa<MDTuple>(Array), "invalid macro list", &N, Array);
1448 for (Metadata *Op : N.getMacros()->operands()) {
1449 CheckDI(Op && isa<DIMacroNode>(Op), "invalid macro ref", &N, Op);
1450 }
1451 }
1452 CUVisited.insert(&N);
1453}
1454
1455void Verifier::visitDISubprogram(const DISubprogram &N) {
1456 CheckDI(N.getTag() == dwarf::DW_TAG_subprogram, "invalid tag", &N);
1457 CheckDI(isScope(N.getRawScope()), "invalid scope", &N, N.getRawScope());
1458 if (auto *F = N.getRawFile())
1459 CheckDI(isa<DIFile>(F), "invalid file", &N, F);
1460 else
1461 CheckDI(N.getLine() == 0, "line specified with no file", &N, N.getLine());
1462 if (auto *T = N.getRawType())
1463 CheckDI(isa<DISubroutineType>(T), "invalid subroutine type", &N, T);
1464 CheckDI(isType(N.getRawContainingType()), "invalid containing type", &N,
1465 N.getRawContainingType());
1466 if (auto *Params = N.getRawTemplateParams())
1467 visitTemplateParams(N, *Params);
1468 if (auto *S = N.getRawDeclaration())
1469 CheckDI(isa<DISubprogram>(S) && !cast<DISubprogram>(S)->isDefinition(),
1470 "invalid subprogram declaration", &N, S);
1471 if (auto *RawNode = N.getRawRetainedNodes()) {
1472 auto *Node = dyn_cast<MDTuple>(RawNode);
1473 CheckDI(Node, "invalid retained nodes list", &N, RawNode);
1474 for (Metadata *Op : Node->operands()) {
1475 CheckDI(Op && (isa<DILocalVariable>(Op) || isa<DILabel>(Op) ||
1476 isa<DIImportedEntity>(Op)),
1477 "invalid retained nodes, expected DILocalVariable, DILabel or "
1478 "DIImportedEntity",
1479 &N, Node, Op);
1480 }
1481 }
1483 "invalid reference flags", &N);
1484
1485 auto *Unit = N.getRawUnit();
1486 if (N.isDefinition()) {
1487 // Subprogram definitions (not part of the type hierarchy).
1488 CheckDI(N.isDistinct(), "subprogram definitions must be distinct", &N);
1489 CheckDI(Unit, "subprogram definitions must have a compile unit", &N);
1490 CheckDI(isa<DICompileUnit>(Unit), "invalid unit type", &N, Unit);
1491 // There's no good way to cross the CU boundary to insert a nested
1492 // DISubprogram definition in one CU into a type defined in another CU.
1493 auto *CT = dyn_cast_or_null<DICompositeType>(N.getRawScope());
1494 if (CT && CT->getRawIdentifier() &&
1495 M.getContext().isODRUniquingDebugTypes())
1496 CheckDI(N.getDeclaration(),
1497 "definition subprograms cannot be nested within DICompositeType "
1498 "when enabling ODR",
1499 &N);
1500 } else {
1501 // Subprogram declarations (part of the type hierarchy).
1502 CheckDI(!Unit, "subprogram declarations must not have a compile unit", &N);
1503 CheckDI(!N.getRawDeclaration(),
1504 "subprogram declaration must not have a declaration field");
1505 }
1506
1507 if (auto *RawThrownTypes = N.getRawThrownTypes()) {
1508 auto *ThrownTypes = dyn_cast<MDTuple>(RawThrownTypes);
1509 CheckDI(ThrownTypes, "invalid thrown types list", &N, RawThrownTypes);
1510 for (Metadata *Op : ThrownTypes->operands())
1511 CheckDI(Op && isa<DIType>(Op), "invalid thrown type", &N, ThrownTypes,
1512 Op);
1513 }
1514
1515 if (N.areAllCallsDescribed())
1516 CheckDI(N.isDefinition(),
1517 "DIFlagAllCallsDescribed must be attached to a definition");
1518}
1519
1520void Verifier::visitDILexicalBlockBase(const DILexicalBlockBase &N) {
1521 CheckDI(N.getTag() == dwarf::DW_TAG_lexical_block, "invalid tag", &N);
1522 CheckDI(N.getRawScope() && isa<DILocalScope>(N.getRawScope()),
1523 "invalid local scope", &N, N.getRawScope());
1524 if (auto *SP = dyn_cast<DISubprogram>(N.getRawScope()))
1525 CheckDI(SP->isDefinition(), "scope points into the type hierarchy", &N);
1526}
1527
1528void Verifier::visitDILexicalBlock(const DILexicalBlock &N) {
1529 visitDILexicalBlockBase(N);
1530
1531 CheckDI(N.getLine() || !N.getColumn(),
1532 "cannot have column info without line info", &N);
1533}
1534
1535void Verifier::visitDILexicalBlockFile(const DILexicalBlockFile &N) {
1536 visitDILexicalBlockBase(N);
1537}
1538
1539void Verifier::visitDICommonBlock(const DICommonBlock &N) {
1540 CheckDI(N.getTag() == dwarf::DW_TAG_common_block, "invalid tag", &N);
1541 if (auto *S = N.getRawScope())
1542 CheckDI(isa<DIScope>(S), "invalid scope ref", &N, S);
1543 if (auto *S = N.getRawDecl())
1544 CheckDI(isa<DIGlobalVariable>(S), "invalid declaration", &N, S);
1545}
1546
1547void Verifier::visitDINamespace(const DINamespace &N) {
1548 CheckDI(N.getTag() == dwarf::DW_TAG_namespace, "invalid tag", &N);
1549 if (auto *S = N.getRawScope())
1550 CheckDI(isa<DIScope>(S), "invalid scope ref", &N, S);
1551}
1552
1553void Verifier::visitDIMacro(const DIMacro &N) {
1554 CheckDI(N.getMacinfoType() == dwarf::DW_MACINFO_define ||
1555 N.getMacinfoType() == dwarf::DW_MACINFO_undef,
1556 "invalid macinfo type", &N);
1557 CheckDI(!N.getName().empty(), "anonymous macro", &N);
1558 if (!N.getValue().empty()) {
1559 assert(N.getValue().data()[0] != ' ' && "Macro value has a space prefix");
1560 }
1561}
1562
1563void Verifier::visitDIMacroFile(const DIMacroFile &N) {
1564 CheckDI(N.getMacinfoType() == dwarf::DW_MACINFO_start_file,
1565 "invalid macinfo type", &N);
1566 if (auto *F = N.getRawFile())
1567 CheckDI(isa<DIFile>(F), "invalid file", &N, F);
1568
1569 if (auto *Array = N.getRawElements()) {
1570 CheckDI(isa<MDTuple>(Array), "invalid macro list", &N, Array);
1571 for (Metadata *Op : N.getElements()->operands()) {
1572 CheckDI(Op && isa<DIMacroNode>(Op), "invalid macro ref", &N, Op);
1573 }
1574 }
1575}
1576
1577void Verifier::visitDIModule(const DIModule &N) {
1578 CheckDI(N.getTag() == dwarf::DW_TAG_module, "invalid tag", &N);
1579 CheckDI(!N.getName().empty(), "anonymous module", &N);
1580}
1581
1582void Verifier::visitDITemplateParameter(const DITemplateParameter &N) {
1583 CheckDI(isType(N.getRawType()), "invalid type ref", &N, N.getRawType());
1584}
1585
1586void Verifier::visitDITemplateTypeParameter(const DITemplateTypeParameter &N) {
1587 visitDITemplateParameter(N);
1588
1589 CheckDI(N.getTag() == dwarf::DW_TAG_template_type_parameter, "invalid tag",
1590 &N);
1591}
1592
1593void Verifier::visitDITemplateValueParameter(
1594 const DITemplateValueParameter &N) {
1595 visitDITemplateParameter(N);
1596
1597 CheckDI(N.getTag() == dwarf::DW_TAG_template_value_parameter ||
1598 N.getTag() == dwarf::DW_TAG_GNU_template_template_param ||
1599 N.getTag() == dwarf::DW_TAG_GNU_template_parameter_pack,
1600 "invalid tag", &N);
1601}
1602
1603void Verifier::visitDIVariable(const DIVariable &N) {
1604 if (auto *S = N.getRawScope())
1605 CheckDI(isa<DIScope>(S), "invalid scope", &N, S);
1606 if (auto *F = N.getRawFile())
1607 CheckDI(isa<DIFile>(F), "invalid file", &N, F);
1608}
1609
1610void Verifier::visitDIGlobalVariable(const DIGlobalVariable &N) {
1611 // Checks common to all variables.
1612 visitDIVariable(N);
1613
1614 CheckDI(N.getTag() == dwarf::DW_TAG_variable, "invalid tag", &N);
1615 CheckDI(isType(N.getRawType()), "invalid type ref", &N, N.getRawType());
1616 // Check only if the global variable is not an extern
1617 if (N.isDefinition())
1618 CheckDI(N.getType(), "missing global variable type", &N);
1619 if (auto *Member = N.getRawStaticDataMemberDeclaration()) {
1620 CheckDI(isa<DIDerivedType>(Member),
1621 "invalid static data member declaration", &N, Member);
1622 }
1623}
1624
1625void Verifier::visitDILocalVariable(const DILocalVariable &N) {
1626 // Checks common to all variables.
1627 visitDIVariable(N);
1628
1629 CheckDI(isType(N.getRawType()), "invalid type ref", &N, N.getRawType());
1630 CheckDI(N.getTag() == dwarf::DW_TAG_variable, "invalid tag", &N);
1631 CheckDI(N.getRawScope() && isa<DILocalScope>(N.getRawScope()),
1632 "local variable requires a valid scope", &N, N.getRawScope());
1633 if (auto Ty = N.getType())
1634 CheckDI(!isa<DISubroutineType>(Ty), "invalid type", &N, N.getType());
1635}
1636
1637void Verifier::visitDIAssignID(const DIAssignID &N) {
1638 CheckDI(!N.getNumOperands(), "DIAssignID has no arguments", &N);
1639 CheckDI(N.isDistinct(), "DIAssignID must be distinct", &N);
1640}
1641
1642void Verifier::visitDILabel(const DILabel &N) {
1643 if (auto *S = N.getRawScope())
1644 CheckDI(isa<DIScope>(S), "invalid scope", &N, S);
1645 if (auto *F = N.getRawFile())
1646 CheckDI(isa<DIFile>(F), "invalid file", &N, F);
1647
1648 CheckDI(N.getTag() == dwarf::DW_TAG_label, "invalid tag", &N);
1649 CheckDI(N.getRawScope() && isa<DILocalScope>(N.getRawScope()),
1650 "label requires a valid scope", &N, N.getRawScope());
1651}
1652
1653void Verifier::visitDIExpression(const DIExpression &N) {
1654 CheckDI(N.isValid(), "invalid expression", &N);
1655}
1656
1657void Verifier::visitDIGlobalVariableExpression(
1658 const DIGlobalVariableExpression &GVE) {
1659 CheckDI(GVE.getVariable(), "missing variable");
1660 if (auto *Var = GVE.getVariable())
1661 visitDIGlobalVariable(*Var);
1662 if (auto *Expr = GVE.getExpression()) {
1663 visitDIExpression(*Expr);
1664 if (auto Fragment = Expr->getFragmentInfo())
1665 verifyFragmentExpression(*GVE.getVariable(), *Fragment, &GVE);
1666 }
1667}
1668
1669void Verifier::visitDIObjCProperty(const DIObjCProperty &N) {
1670 CheckDI(N.getTag() == dwarf::DW_TAG_APPLE_property, "invalid tag", &N);
1671 if (auto *T = N.getRawType())
1672 CheckDI(isType(T), "invalid type ref", &N, T);
1673 if (auto *F = N.getRawFile())
1674 CheckDI(isa<DIFile>(F), "invalid file", &N, F);
1675}
1676
1677void Verifier::visitDIImportedEntity(const DIImportedEntity &N) {
1678 CheckDI(N.getTag() == dwarf::DW_TAG_imported_module ||
1679 N.getTag() == dwarf::DW_TAG_imported_declaration,
1680 "invalid tag", &N);
1681 if (auto *S = N.getRawScope())
1682 CheckDI(isa<DIScope>(S), "invalid scope for imported entity", &N, S);
1683 CheckDI(isDINode(N.getRawEntity()), "invalid imported entity", &N,
1684 N.getRawEntity());
1685}
1686
1687void Verifier::visitComdat(const Comdat &C) {
1688 // In COFF the Module is invalid if the GlobalValue has private linkage.
1689 // Entities with private linkage don't have entries in the symbol table.
1690 if (TT.isOSBinFormatCOFF())
1691 if (const GlobalValue *GV = M.getNamedValue(C.getName()))
1692 Check(!GV->hasPrivateLinkage(), "comdat global value has private linkage",
1693 GV);
1694}
1695
1696void Verifier::visitModuleIdents() {
1697 const NamedMDNode *Idents = M.getNamedMetadata("llvm.ident");
1698 if (!Idents)
1699 return;
1700
1701 // llvm.ident takes a list of metadata entry. Each entry has only one string.
1702 // Scan each llvm.ident entry and make sure that this requirement is met.
1703 for (const MDNode *N : Idents->operands()) {
1704 Check(N->getNumOperands() == 1,
1705 "incorrect number of operands in llvm.ident metadata", N);
1706 Check(dyn_cast_or_null<MDString>(N->getOperand(0)),
1707 ("invalid value for llvm.ident metadata entry operand"
1708 "(the operand should be a string)"),
1709 N->getOperand(0));
1710 }
1711}
1712
1713void Verifier::visitModuleCommandLines() {
1714 const NamedMDNode *CommandLines = M.getNamedMetadata("llvm.commandline");
1715 if (!CommandLines)
1716 return;
1717
1718 // llvm.commandline takes a list of metadata entry. Each entry has only one
1719 // string. Scan each llvm.commandline entry and make sure that this
1720 // requirement is met.
1721 for (const MDNode *N : CommandLines->operands()) {
1722 Check(N->getNumOperands() == 1,
1723 "incorrect number of operands in llvm.commandline metadata", N);
1724 Check(dyn_cast_or_null<MDString>(N->getOperand(0)),
1725 ("invalid value for llvm.commandline metadata entry operand"
1726 "(the operand should be a string)"),
1727 N->getOperand(0));
1728 }
1729}
1730
1731void Verifier::visitModuleFlags() {
1732 const NamedMDNode *Flags = M.getModuleFlagsMetadata();
1733 if (!Flags) return;
1734
1735 // Scan each flag, and track the flags and requirements.
1737 SmallVector<const MDNode*, 16> Requirements;
1738 uint64_t PAuthABIPlatform = -1;
1739 uint64_t PAuthABIVersion = -1;
1740 for (const MDNode *MDN : Flags->operands()) {
1741 visitModuleFlag(MDN, SeenIDs, Requirements);
1742 if (MDN->getNumOperands() != 3)
1743 continue;
1744 if (const auto *FlagName = dyn_cast_or_null<MDString>(MDN->getOperand(1))) {
1745 if (FlagName->getString() == "aarch64-elf-pauthabi-platform") {
1746 if (const auto *PAP =
1747 mdconst::dyn_extract_or_null<ConstantInt>(MDN->getOperand(2)))
1748 PAuthABIPlatform = PAP->getZExtValue();
1749 } else if (FlagName->getString() == "aarch64-elf-pauthabi-version") {
1750 if (const auto *PAV =
1751 mdconst::dyn_extract_or_null<ConstantInt>(MDN->getOperand(2)))
1752 PAuthABIVersion = PAV->getZExtValue();
1753 }
1754 }
1755 }
1756
1757 if ((PAuthABIPlatform == uint64_t(-1)) != (PAuthABIVersion == uint64_t(-1)))
1758 CheckFailed("either both or no 'aarch64-elf-pauthabi-platform' and "
1759 "'aarch64-elf-pauthabi-version' module flags must be present");
1760
1761 // Validate that the requirements in the module are valid.
1762 for (const MDNode *Requirement : Requirements) {
1763 const MDString *Flag = cast<MDString>(Requirement->getOperand(0));
1764 const Metadata *ReqValue = Requirement->getOperand(1);
1765
1766 const MDNode *Op = SeenIDs.lookup(Flag);
1767 if (!Op) {
1768 CheckFailed("invalid requirement on flag, flag is not present in module",
1769 Flag);
1770 continue;
1771 }
1772
1773 if (Op->getOperand(2) != ReqValue) {
1774 CheckFailed(("invalid requirement on flag, "
1775 "flag does not have the required value"),
1776 Flag);
1777 continue;
1778 }
1779 }
1780}
1781
1782void
1783Verifier::visitModuleFlag(const MDNode *Op,
1785 SmallVectorImpl<const MDNode *> &Requirements) {
1786 // Each module flag should have three arguments, the merge behavior (a
1787 // constant int), the flag ID (an MDString), and the value.
1788 Check(Op->getNumOperands() == 3,
1789 "incorrect number of operands in module flag", Op);
1791 if (!Module::isValidModFlagBehavior(Op->getOperand(0), MFB)) {
1792 Check(mdconst::dyn_extract_or_null<ConstantInt>(Op->getOperand(0)),
1793 "invalid behavior operand in module flag (expected constant integer)",
1794 Op->getOperand(0));
1795 Check(false,
1796 "invalid behavior operand in module flag (unexpected constant)",
1797 Op->getOperand(0));
1798 }
1799 MDString *ID = dyn_cast_or_null<MDString>(Op->getOperand(1));
1800 Check(ID, "invalid ID operand in module flag (expected metadata string)",
1801 Op->getOperand(1));
1802
1803 // Check the values for behaviors with additional requirements.
1804 switch (MFB) {
1805 case Module::Error:
1806 case Module::Warning:
1807 case Module::Override:
1808 // These behavior types accept any value.
1809 break;
1810
1811 case Module::Min: {
1812 auto *V = mdconst::dyn_extract_or_null<ConstantInt>(Op->getOperand(2));
1813 Check(V && V->getValue().isNonNegative(),
1814 "invalid value for 'min' module flag (expected constant non-negative "
1815 "integer)",
1816 Op->getOperand(2));
1817 break;
1818 }
1819
1820 case Module::Max: {
1821 Check(mdconst::dyn_extract_or_null<ConstantInt>(Op->getOperand(2)),
1822 "invalid value for 'max' module flag (expected constant integer)",
1823 Op->getOperand(2));
1824 break;
1825 }
1826
1827 case Module::Require: {
1828 // The value should itself be an MDNode with two operands, a flag ID (an
1829 // MDString), and a value.
1830 MDNode *Value = dyn_cast<MDNode>(Op->getOperand(2));
1831 Check(Value && Value->getNumOperands() == 2,
1832 "invalid value for 'require' module flag (expected metadata pair)",
1833 Op->getOperand(2));
1834 Check(isa<MDString>(Value->getOperand(0)),
1835 ("invalid value for 'require' module flag "
1836 "(first value operand should be a string)"),
1837 Value->getOperand(0));
1838
1839 // Append it to the list of requirements, to check once all module flags are
1840 // scanned.
1841 Requirements.push_back(Value);
1842 break;
1843 }
1844
1845 case Module::Append:
1846 case Module::AppendUnique: {
1847 // These behavior types require the operand be an MDNode.
1848 Check(isa<MDNode>(Op->getOperand(2)),
1849 "invalid value for 'append'-type module flag "
1850 "(expected a metadata node)",
1851 Op->getOperand(2));
1852 break;
1853 }
1854 }
1855
1856 // Unless this is a "requires" flag, check the ID is unique.
1857 if (MFB != Module::Require) {
1858 bool Inserted = SeenIDs.insert(std::make_pair(ID, Op)).second;
1859 Check(Inserted,
1860 "module flag identifiers must be unique (or of 'require' type)", ID);
1861 }
1862
1863 if (ID->getString() == "wchar_size") {
1865 = mdconst::dyn_extract_or_null<ConstantInt>(Op->getOperand(2));
1866 Check(Value, "wchar_size metadata requires constant integer argument");
1867 }
1868
1869 if (ID->getString() == "Linker Options") {
1870 // If the llvm.linker.options named metadata exists, we assume that the
1871 // bitcode reader has upgraded the module flag. Otherwise the flag might
1872 // have been created by a client directly.
1873 Check(M.getNamedMetadata("llvm.linker.options"),
1874 "'Linker Options' named metadata no longer supported");
1875 }
1876
1877 if (ID->getString() == "SemanticInterposition") {
1879 mdconst::dyn_extract_or_null<ConstantInt>(Op->getOperand(2));
1880 Check(Value,
1881 "SemanticInterposition metadata requires constant integer argument");
1882 }
1883
1884 if (ID->getString() == "CG Profile") {
1885 for (const MDOperand &MDO : cast<MDNode>(Op->getOperand(2))->operands())
1886 visitModuleFlagCGProfileEntry(MDO);
1887 }
1888}
1889
1890void Verifier::visitModuleFlagCGProfileEntry(const MDOperand &MDO) {
1891 auto CheckFunction = [&](const MDOperand &FuncMDO) {
1892 if (!FuncMDO)
1893 return;
1894 auto F = dyn_cast<ValueAsMetadata>(FuncMDO);
1895 Check(F && isa<Function>(F->getValue()->stripPointerCasts()),
1896 "expected a Function or null", FuncMDO);
1897 };
1898 auto Node = dyn_cast_or_null<MDNode>(MDO);
1899 Check(Node && Node->getNumOperands() == 3, "expected a MDNode triple", MDO);
1900 CheckFunction(Node->getOperand(0));
1901 CheckFunction(Node->getOperand(1));
1902 auto Count = dyn_cast_or_null<ConstantAsMetadata>(Node->getOperand(2));
1903 Check(Count && Count->getType()->isIntegerTy(),
1904 "expected an integer constant", Node->getOperand(2));
1905}
1906
1907void Verifier::verifyAttributeTypes(AttributeSet Attrs, const Value *V) {
1908 for (Attribute A : Attrs) {
1909
1910 if (A.isStringAttribute()) {
1911#define GET_ATTR_NAMES
1912#define ATTRIBUTE_ENUM(ENUM_NAME, DISPLAY_NAME)
1913#define ATTRIBUTE_STRBOOL(ENUM_NAME, DISPLAY_NAME) \
1914 if (A.getKindAsString() == #DISPLAY_NAME) { \
1915 auto V = A.getValueAsString(); \
1916 if (!(V.empty() || V == "true" || V == "false")) \
1917 CheckFailed("invalid value for '" #DISPLAY_NAME "' attribute: " + V + \
1918 ""); \
1919 }
1920
1921#include "llvm/IR/Attributes.inc"
1922 continue;
1923 }
1924
1925 if (A.isIntAttribute() != Attribute::isIntAttrKind(A.getKindAsEnum())) {
1926 CheckFailed("Attribute '" + A.getAsString() + "' should have an Argument",
1927 V);
1928 return;
1929 }
1930 }
1931}
1932
1933// VerifyParameterAttrs - Check the given attributes for an argument or return
1934// value of the specified type. The value V is printed in error messages.
1935void Verifier::verifyParameterAttrs(AttributeSet Attrs, Type *Ty,
1936 const Value *V) {
1937 if (!Attrs.hasAttributes())
1938 return;
1939
1940 verifyAttributeTypes(Attrs, V);
1941
1942 for (Attribute Attr : Attrs)
1943 Check(Attr.isStringAttribute() ||
1944 Attribute::canUseAsParamAttr(Attr.getKindAsEnum()),
1945 "Attribute '" + Attr.getAsString() + "' does not apply to parameters",
1946 V);
1947
1948 if (Attrs.hasAttribute(Attribute::ImmArg)) {
1949 Check(Attrs.getNumAttributes() == 1,
1950 "Attribute 'immarg' is incompatible with other attributes", V);
1951 }
1952
1953 // Check for mutually incompatible attributes. Only inreg is compatible with
1954 // sret.
1955 unsigned AttrCount = 0;
1956 AttrCount += Attrs.hasAttribute(Attribute::ByVal);
1957 AttrCount += Attrs.hasAttribute(Attribute::InAlloca);
1958 AttrCount += Attrs.hasAttribute(Attribute::Preallocated);
1959 AttrCount += Attrs.hasAttribute(Attribute::StructRet) ||
1960 Attrs.hasAttribute(Attribute::InReg);
1961 AttrCount += Attrs.hasAttribute(Attribute::Nest);
1962 AttrCount += Attrs.hasAttribute(Attribute::ByRef);
1963 Check(AttrCount <= 1,
1964 "Attributes 'byval', 'inalloca', 'preallocated', 'inreg', 'nest', "
1965 "'byref', and 'sret' are incompatible!",
1966 V);
1967
1968 Check(!(Attrs.hasAttribute(Attribute::InAlloca) &&
1969 Attrs.hasAttribute(Attribute::ReadOnly)),
1970 "Attributes "
1971 "'inalloca and readonly' are incompatible!",
1972 V);
1973
1974 Check(!(Attrs.hasAttribute(Attribute::StructRet) &&
1975 Attrs.hasAttribute(Attribute::Returned)),
1976 "Attributes "
1977 "'sret and returned' are incompatible!",
1978 V);
1979
1980 Check(!(Attrs.hasAttribute(Attribute::ZExt) &&
1981 Attrs.hasAttribute(Attribute::SExt)),
1982 "Attributes "
1983 "'zeroext and signext' are incompatible!",
1984 V);
1985
1986 Check(!(Attrs.hasAttribute(Attribute::ReadNone) &&
1987 Attrs.hasAttribute(Attribute::ReadOnly)),
1988 "Attributes "
1989 "'readnone and readonly' are incompatible!",
1990 V);
1991
1992 Check(!(Attrs.hasAttribute(Attribute::ReadNone) &&
1993 Attrs.hasAttribute(Attribute::WriteOnly)),
1994 "Attributes "
1995 "'readnone and writeonly' are incompatible!",
1996 V);
1997
1998 Check(!(Attrs.hasAttribute(Attribute::ReadOnly) &&
1999 Attrs.hasAttribute(Attribute::WriteOnly)),
2000 "Attributes "
2001 "'readonly and writeonly' are incompatible!",
2002 V);
2003
2004 Check(!(Attrs.hasAttribute(Attribute::NoInline) &&
2005 Attrs.hasAttribute(Attribute::AlwaysInline)),
2006 "Attributes "
2007 "'noinline and alwaysinline' are incompatible!",
2008 V);
2009
2010 Check(!(Attrs.hasAttribute(Attribute::Writable) &&
2011 Attrs.hasAttribute(Attribute::ReadNone)),
2012 "Attributes writable and readnone are incompatible!", V);
2013
2014 Check(!(Attrs.hasAttribute(Attribute::Writable) &&
2015 Attrs.hasAttribute(Attribute::ReadOnly)),
2016 "Attributes writable and readonly are incompatible!", V);
2017
2018 AttributeMask IncompatibleAttrs = AttributeFuncs::typeIncompatible(Ty, Attrs);
2019 for (Attribute Attr : Attrs) {
2020 if (!Attr.isStringAttribute() &&
2021 IncompatibleAttrs.contains(Attr.getKindAsEnum())) {
2022 CheckFailed("Attribute '" + Attr.getAsString() +
2023 "' applied to incompatible type!", V);
2024 return;
2025 }
2026 }
2027
2028 if (isa<PointerType>(Ty)) {
2029 if (Attrs.hasAttribute(Attribute::Alignment)) {
2030 Align AttrAlign = Attrs.getAlignment().valueOrOne();
2031 Check(AttrAlign.value() <= Value::MaximumAlignment,
2032 "huge alignment values are unsupported", V);
2033 }
2034 if (Attrs.hasAttribute(Attribute::ByVal)) {
2035 Type *ByValTy = Attrs.getByValType();
2036 SmallPtrSet<Type *, 4> Visited;
2037 Check(ByValTy->isSized(&Visited),
2038 "Attribute 'byval' does not support unsized types!", V);
2039 // Check if it is or contains a target extension type that disallows being
2040 // used on the stack.
2042 "'byval' argument has illegal target extension type", V);
2043 Check(DL.getTypeAllocSize(ByValTy).getKnownMinValue() < (1ULL << 32),
2044 "huge 'byval' arguments are unsupported", V);
2045 }
2046 if (Attrs.hasAttribute(Attribute::ByRef)) {
2047 SmallPtrSet<Type *, 4> Visited;
2048 Check(Attrs.getByRefType()->isSized(&Visited),
2049 "Attribute 'byref' does not support unsized types!", V);
2050 Check(DL.getTypeAllocSize(Attrs.getByRefType()).getKnownMinValue() <
2051 (1ULL << 32),
2052 "huge 'byref' arguments are unsupported", V);
2053 }
2054 if (Attrs.hasAttribute(Attribute::InAlloca)) {
2055 SmallPtrSet<Type *, 4> Visited;
2056 Check(Attrs.getInAllocaType()->isSized(&Visited),
2057 "Attribute 'inalloca' does not support unsized types!", V);
2058 Check(DL.getTypeAllocSize(Attrs.getInAllocaType()).getKnownMinValue() <
2059 (1ULL << 32),
2060 "huge 'inalloca' arguments are unsupported", V);
2061 }
2062 if (Attrs.hasAttribute(Attribute::Preallocated)) {
2063 SmallPtrSet<Type *, 4> Visited;
2064 Check(Attrs.getPreallocatedType()->isSized(&Visited),
2065 "Attribute 'preallocated' does not support unsized types!", V);
2066 Check(
2067 DL.getTypeAllocSize(Attrs.getPreallocatedType()).getKnownMinValue() <
2068 (1ULL << 32),
2069 "huge 'preallocated' arguments are unsupported", V);
2070 }
2071 }
2072
2073 if (Attrs.hasAttribute(Attribute::Initializes)) {
2074 auto Inits = Attrs.getAttribute(Attribute::Initializes).getInitializes();
2075 Check(!Inits.empty(), "Attribute 'initializes' does not support empty list",
2076 V);
2078 "Attribute 'initializes' does not support unordered ranges", V);
2079 }
2080
2081 if (Attrs.hasAttribute(Attribute::NoFPClass)) {
2082 uint64_t Val = Attrs.getAttribute(Attribute::NoFPClass).getValueAsInt();
2083 Check(Val != 0, "Attribute 'nofpclass' must have at least one test bit set",
2084 V);
2085 Check((Val & ~static_cast<unsigned>(fcAllFlags)) == 0,
2086 "Invalid value for 'nofpclass' test mask", V);
2087 }
2088 if (Attrs.hasAttribute(Attribute::Range)) {
2089 const ConstantRange &CR =
2090 Attrs.getAttribute(Attribute::Range).getValueAsConstantRange();
2092 "Range bit width must match type bit width!", V);
2093 }
2094}
2095
2096void Verifier::checkUnsignedBaseTenFuncAttr(AttributeList Attrs, StringRef Attr,
2097 const Value *V) {
2098 if (Attrs.hasFnAttr(Attr)) {
2099 StringRef S = Attrs.getFnAttr(Attr).getValueAsString();
2100 unsigned N;
2101 if (S.getAsInteger(10, N))
2102 CheckFailed("\"" + Attr + "\" takes an unsigned integer: " + S, V);
2103 }
2104}
2105
2106// Check parameter attributes against a function type.
2107// The value V is printed in error messages.
2108void Verifier::verifyFunctionAttrs(FunctionType *FT, AttributeList Attrs,
2109 const Value *V, bool IsIntrinsic,
2110 bool IsInlineAsm) {
2111 if (Attrs.isEmpty())
2112 return;
2113
2114 if (AttributeListsVisited.insert(Attrs.getRawPointer()).second) {
2115 Check(Attrs.hasParentContext(Context),
2116 "Attribute list does not match Module context!", &Attrs, V);
2117 for (const auto &AttrSet : Attrs) {
2118 Check(!AttrSet.hasAttributes() || AttrSet.hasParentContext(Context),
2119 "Attribute set does not match Module context!", &AttrSet, V);
2120 for (const auto &A : AttrSet) {
2121 Check(A.hasParentContext(Context),
2122 "Attribute does not match Module context!", &A, V);
2123 }
2124 }
2125 }
2126
2127 bool SawNest = false;
2128 bool SawReturned = false;
2129 bool SawSRet = false;
2130 bool SawSwiftSelf = false;
2131 bool SawSwiftAsync = false;
2132 bool SawSwiftError = false;
2133
2134 // Verify return value attributes.
2135 AttributeSet RetAttrs = Attrs.getRetAttrs();
2136 for (Attribute RetAttr : RetAttrs)
2137 Check(RetAttr.isStringAttribute() ||
2138 Attribute::canUseAsRetAttr(RetAttr.getKindAsEnum()),
2139 "Attribute '" + RetAttr.getAsString() +
2140 "' does not apply to function return values",
2141 V);
2142
2143 unsigned MaxParameterWidth = 0;
2144 auto GetMaxParameterWidth = [&MaxParameterWidth](Type *Ty) {
2145 if (Ty->isVectorTy()) {
2146 if (auto *VT = dyn_cast<FixedVectorType>(Ty)) {
2147 unsigned Size = VT->getPrimitiveSizeInBits().getFixedValue();
2148 if (Size > MaxParameterWidth)
2149 MaxParameterWidth = Size;
2150 }
2151 }
2152 };
2153 GetMaxParameterWidth(FT->getReturnType());
2154 verifyParameterAttrs(RetAttrs, FT->getReturnType(), V);
2155
2156 // Verify parameter attributes.
2157 for (unsigned i = 0, e = FT->getNumParams(); i != e; ++i) {
2158 Type *Ty = FT->getParamType(i);
2159 AttributeSet ArgAttrs = Attrs.getParamAttrs(i);
2160
2161 if (!IsIntrinsic) {
2162 Check(!ArgAttrs.hasAttribute(Attribute::ImmArg),
2163 "immarg attribute only applies to intrinsics", V);
2164 if (!IsInlineAsm)
2165 Check(!ArgAttrs.hasAttribute(Attribute::ElementType),
2166 "Attribute 'elementtype' can only be applied to intrinsics"
2167 " and inline asm.",
2168 V);
2169 }
2170
2171 verifyParameterAttrs(ArgAttrs, Ty, V);
2172 GetMaxParameterWidth(Ty);
2173
2174 if (ArgAttrs.hasAttribute(Attribute::Nest)) {
2175 Check(!SawNest, "More than one parameter has attribute nest!", V);
2176 SawNest = true;
2177 }
2178
2179 if (ArgAttrs.hasAttribute(Attribute::Returned)) {
2180 Check(!SawReturned, "More than one parameter has attribute returned!", V);
2181 Check(Ty->canLosslesslyBitCastTo(FT->getReturnType()),
2182 "Incompatible argument and return types for 'returned' attribute",
2183 V);
2184 SawReturned = true;
2185 }
2186
2187 if (ArgAttrs.hasAttribute(Attribute::StructRet)) {
2188 Check(!SawSRet, "Cannot have multiple 'sret' parameters!", V);
2189 Check(i == 0 || i == 1,
2190 "Attribute 'sret' is not on first or second parameter!", V);
2191 SawSRet = true;
2192 }
2193
2194 if (ArgAttrs.hasAttribute(Attribute::SwiftSelf)) {
2195 Check(!SawSwiftSelf, "Cannot have multiple 'swiftself' parameters!", V);
2196 SawSwiftSelf = true;
2197 }
2198
2199 if (ArgAttrs.hasAttribute(Attribute::SwiftAsync)) {
2200 Check(!SawSwiftAsync, "Cannot have multiple 'swiftasync' parameters!", V);
2201 SawSwiftAsync = true;
2202 }
2203
2204 if (ArgAttrs.hasAttribute(Attribute::SwiftError)) {
2205 Check(!SawSwiftError, "Cannot have multiple 'swifterror' parameters!", V);
2206 SawSwiftError = true;
2207 }
2208
2209 if (ArgAttrs.hasAttribute(Attribute::InAlloca)) {
2210 Check(i == FT->getNumParams() - 1,
2211 "inalloca isn't on the last parameter!", V);
2212 }
2213 }
2214
2215 if (!Attrs.hasFnAttrs())
2216 return;
2217
2218 verifyAttributeTypes(Attrs.getFnAttrs(), V);
2219 for (Attribute FnAttr : Attrs.getFnAttrs())
2220 Check(FnAttr.isStringAttribute() ||
2221 Attribute::canUseAsFnAttr(FnAttr.getKindAsEnum()),
2222 "Attribute '" + FnAttr.getAsString() +
2223 "' does not apply to functions!",
2224 V);
2225
2226 Check(!(Attrs.hasFnAttr(Attribute::NoInline) &&
2227 Attrs.hasFnAttr(Attribute::AlwaysInline)),
2228 "Attributes 'noinline and alwaysinline' are incompatible!", V);
2229
2230 if (Attrs.hasFnAttr(Attribute::OptimizeNone)) {
2231 Check(Attrs.hasFnAttr(Attribute::NoInline),
2232 "Attribute 'optnone' requires 'noinline'!", V);
2233
2234 Check(!Attrs.hasFnAttr(Attribute::OptimizeForSize),
2235 "Attributes 'optsize and optnone' are incompatible!", V);
2236
2237 Check(!Attrs.hasFnAttr(Attribute::MinSize),
2238 "Attributes 'minsize and optnone' are incompatible!", V);
2239
2240 Check(!Attrs.hasFnAttr(Attribute::OptimizeForDebugging),
2241 "Attributes 'optdebug and optnone' are incompatible!", V);
2242 }
2243
2244 Check(!(Attrs.hasFnAttr(Attribute::SanitizeRealtime) &&
2245 Attrs.hasFnAttr(Attribute::SanitizeRealtimeBlocking)),
2246 "Attributes "
2247 "'sanitize_realtime and sanitize_realtime_blocking' are incompatible!",
2248 V);
2249
2250 if (Attrs.hasFnAttr(Attribute::OptimizeForDebugging)) {
2251 Check(!Attrs.hasFnAttr(Attribute::OptimizeForSize),
2252 "Attributes 'optsize and optdebug' are incompatible!", V);
2253
2254 Check(!Attrs.hasFnAttr(Attribute::MinSize),
2255 "Attributes 'minsize and optdebug' are incompatible!", V);
2256 }
2257
2258 Check(!Attrs.hasAttrSomewhere(Attribute::Writable) ||
2259 isModSet(Attrs.getMemoryEffects().getModRef(IRMemLocation::ArgMem)),
2260 "Attribute writable and memory without argmem: write are incompatible!",
2261 V);
2262
2263 if (Attrs.hasFnAttr("aarch64_pstate_sm_enabled")) {
2264 Check(!Attrs.hasFnAttr("aarch64_pstate_sm_compatible"),
2265 "Attributes 'aarch64_pstate_sm_enabled and "
2266 "aarch64_pstate_sm_compatible' are incompatible!",
2267 V);
2268 }
2269
2270 Check((Attrs.hasFnAttr("aarch64_new_za") + Attrs.hasFnAttr("aarch64_in_za") +
2271 Attrs.hasFnAttr("aarch64_inout_za") +
2272 Attrs.hasFnAttr("aarch64_out_za") +
2273 Attrs.hasFnAttr("aarch64_preserves_za") +
2274 Attrs.hasFnAttr("aarch64_za_state_agnostic")) <= 1,
2275 "Attributes 'aarch64_new_za', 'aarch64_in_za', 'aarch64_out_za', "
2276 "'aarch64_inout_za', 'aarch64_preserves_za' and "
2277 "'aarch64_za_state_agnostic' are mutually exclusive",
2278 V);
2279
2280 Check((Attrs.hasFnAttr("aarch64_new_zt0") +
2281 Attrs.hasFnAttr("aarch64_in_zt0") +
2282 Attrs.hasFnAttr("aarch64_inout_zt0") +
2283 Attrs.hasFnAttr("aarch64_out_zt0") +
2284 Attrs.hasFnAttr("aarch64_preserves_zt0") +
2285 Attrs.hasFnAttr("aarch64_za_state_agnostic")) <= 1,
2286 "Attributes 'aarch64_new_zt0', 'aarch64_in_zt0', 'aarch64_out_zt0', "
2287 "'aarch64_inout_zt0', 'aarch64_preserves_zt0' and "
2288 "'aarch64_za_state_agnostic' are mutually exclusive",
2289 V);
2290
2291 if (Attrs.hasFnAttr(Attribute::JumpTable)) {
2292 const GlobalValue *GV = cast<GlobalValue>(V);
2294 "Attribute 'jumptable' requires 'unnamed_addr'", V);
2295 }
2296
2297 if (auto Args = Attrs.getFnAttrs().getAllocSizeArgs()) {
2298 auto CheckParam = [&](StringRef Name, unsigned ParamNo) {
2299 if (ParamNo >= FT->getNumParams()) {
2300 CheckFailed("'allocsize' " + Name + " argument is out of bounds", V);
2301 return false;
2302 }
2303
2304 if (!FT->getParamType(ParamNo)->isIntegerTy()) {
2305 CheckFailed("'allocsize' " + Name +
2306 " argument must refer to an integer parameter",
2307 V);
2308 return false;
2309 }
2310
2311 return true;
2312 };
2313
2314 if (!CheckParam("element size", Args->first))
2315 return;
2316
2317 if (Args->second && !CheckParam("number of elements", *Args->second))
2318 return;
2319 }
2320
2321 if (Attrs.hasFnAttr(Attribute::AllocKind)) {
2322 AllocFnKind K = Attrs.getAllocKind();
2325 if (!is_contained(
2327 Type))
2328 CheckFailed(
2329 "'allockind()' requires exactly one of alloc, realloc, and free");
2330 if ((Type == AllocFnKind::Free) &&
2333 CheckFailed("'allockind(\"free\")' doesn't allow uninitialized, zeroed, "
2334 "or aligned modifiers.");
2336 if ((K & ZeroedUninit) == ZeroedUninit)
2337 CheckFailed("'allockind()' can't be both zeroed and uninitialized");
2338 }
2339
2340 if (Attrs.hasFnAttr(Attribute::VScaleRange)) {
2341 unsigned VScaleMin = Attrs.getFnAttrs().getVScaleRangeMin();
2342 if (VScaleMin == 0)
2343 CheckFailed("'vscale_range' minimum must be greater than 0", V);
2344 else if (!isPowerOf2_32(VScaleMin))
2345 CheckFailed("'vscale_range' minimum must be power-of-two value", V);
2346 std::optional<unsigned> VScaleMax = Attrs.getFnAttrs().getVScaleRangeMax();
2347 if (VScaleMax && VScaleMin > VScaleMax)
2348 CheckFailed("'vscale_range' minimum cannot be greater than maximum", V);
2349 else if (VScaleMax && !isPowerOf2_32(*VScaleMax))
2350 CheckFailed("'vscale_range' maximum must be power-of-two value", V);
2351 }
2352
2353 if (Attrs.hasFnAttr("frame-pointer")) {
2354 StringRef FP = Attrs.getFnAttr("frame-pointer").getValueAsString();
2355 if (FP != "all" && FP != "non-leaf" && FP != "none" && FP != "reserved")
2356 CheckFailed("invalid value for 'frame-pointer' attribute: " + FP, V);
2357 }
2358
2359 // Check EVEX512 feature.
2360 if (MaxParameterWidth >= 512 && Attrs.hasFnAttr("target-features") &&
2361 TT.isX86()) {
2362 StringRef TF = Attrs.getFnAttr("target-features").getValueAsString();
2363 Check(!TF.contains("+avx512f") || !TF.contains("-evex512"),
2364 "512-bit vector arguments require 'evex512' for AVX512", V);
2365 }
2366
2367 checkUnsignedBaseTenFuncAttr(Attrs, "patchable-function-prefix", V);
2368 checkUnsignedBaseTenFuncAttr(Attrs, "patchable-function-entry", V);
2369 checkUnsignedBaseTenFuncAttr(Attrs, "warn-stack-size", V);
2370
2371 if (auto A = Attrs.getFnAttr("sign-return-address"); A.isValid()) {
2372 StringRef S = A.getValueAsString();
2373 if (S != "none" && S != "all" && S != "non-leaf")
2374 CheckFailed("invalid value for 'sign-return-address' attribute: " + S, V);
2375 }
2376
2377 if (auto A = Attrs.getFnAttr("sign-return-address-key"); A.isValid()) {
2378 StringRef S = A.getValueAsString();
2379 if (S != "a_key" && S != "b_key")
2380 CheckFailed("invalid value for 'sign-return-address-key' attribute: " + S,
2381 V);
2382 if (auto AA = Attrs.getFnAttr("sign-return-address"); !AA.isValid()) {
2383 CheckFailed(
2384 "'sign-return-address-key' present without `sign-return-address`");
2385 }
2386 }
2387
2388 if (auto A = Attrs.getFnAttr("branch-target-enforcement"); A.isValid()) {
2389 StringRef S = A.getValueAsString();
2390 if (S != "" && S != "true" && S != "false")
2391 CheckFailed(
2392 "invalid value for 'branch-target-enforcement' attribute: " + S, V);
2393 }
2394
2395 if (auto A = Attrs.getFnAttr("branch-protection-pauth-lr"); A.isValid()) {
2396 StringRef S = A.getValueAsString();
2397 if (S != "" && S != "true" && S != "false")
2398 CheckFailed(
2399 "invalid value for 'branch-protection-pauth-lr' attribute: " + S, V);
2400 }
2401
2402 if (auto A = Attrs.getFnAttr("guarded-control-stack"); A.isValid()) {
2403 StringRef S = A.getValueAsString();
2404 if (S != "" && S != "true" && S != "false")
2405 CheckFailed("invalid value for 'guarded-control-stack' attribute: " + S,
2406 V);
2407 }
2408
2409 if (auto A = Attrs.getFnAttr("vector-function-abi-variant"); A.isValid()) {
2410 StringRef S = A.getValueAsString();
2411 const std::optional<VFInfo> Info = VFABI::tryDemangleForVFABI(S, FT);
2412 if (!Info)
2413 CheckFailed("invalid name for a VFABI variant: " + S, V);
2414 }
2415
2416 if (auto A = Attrs.getFnAttr("denormal-fp-math"); A.isValid()) {
2417 StringRef S = A.getValueAsString();
2419 CheckFailed("invalid value for 'denormal-fp-math' attribute: " + S, V);
2420 }
2421
2422 if (auto A = Attrs.getFnAttr("denormal-fp-math-f32"); A.isValid()) {
2423 StringRef S = A.getValueAsString();
2425 CheckFailed("invalid value for 'denormal-fp-math-f32' attribute: " + S,
2426 V);
2427 }
2428}
2429
2430void Verifier::verifyFunctionMetadata(
2431 ArrayRef<std::pair<unsigned, MDNode *>> MDs) {
2432 for (const auto &Pair : MDs) {
2433 if (Pair.first == LLVMContext::MD_prof) {
2434 MDNode *MD = Pair.second;
2435 Check(MD->getNumOperands() >= 2,
2436 "!prof annotations should have no less than 2 operands", MD);
2437
2438 // Check first operand.
2439 Check(MD->getOperand(0) != nullptr, "first operand should not be null",
2440 MD);
2441 Check(isa<MDString>(MD->getOperand(0)),
2442 "expected string with name of the !prof annotation", MD);
2443 MDString *MDS = cast<MDString>(MD->getOperand(0));
2444 StringRef ProfName = MDS->getString();
2445 Check(ProfName == "function_entry_count" ||
2446 ProfName == "synthetic_function_entry_count",
2447 "first operand should be 'function_entry_count'"
2448 " or 'synthetic_function_entry_count'",
2449 MD);
2450
2451 // Check second operand.
2452 Check(MD->getOperand(1) != nullptr, "second operand should not be null",
2453 MD);
2454 Check(isa<ConstantAsMetadata>(MD->getOperand(1)),
2455 "expected integer argument to function_entry_count", MD);
2456 } else if (Pair.first == LLVMContext::MD_kcfi_type) {
2457 MDNode *MD = Pair.second;
2458 Check(MD->getNumOperands() == 1,
2459 "!kcfi_type must have exactly one operand", MD);
2460 Check(MD->getOperand(0) != nullptr, "!kcfi_type operand must not be null",
2461 MD);
2462 Check(isa<ConstantAsMetadata>(MD->getOperand(0)),
2463 "expected a constant operand for !kcfi_type", MD);
2464 Constant *C = cast<ConstantAsMetadata>(MD->getOperand(0))->getValue();
2465 Check(isa<ConstantInt>(C) && isa<IntegerType>(C->getType()),
2466 "expected a constant integer operand for !kcfi_type", MD);
2467 Check(cast<ConstantInt>(C)->getBitWidth() == 32,
2468 "expected a 32-bit integer constant operand for !kcfi_type", MD);
2469 }
2470 }
2471}
2472
2473void Verifier::visitConstantExprsRecursively(const Constant *EntryC) {
2474 if (!ConstantExprVisited.insert(EntryC).second)
2475 return;
2476
2478 Stack.push_back(EntryC);
2479
2480 while (!Stack.empty()) {
2481 const Constant *C = Stack.pop_back_val();
2482
2483 // Check this constant expression.
2484 if (const auto *CE = dyn_cast<ConstantExpr>(C))
2485 visitConstantExpr(CE);
2486
2487 if (const auto *CPA = dyn_cast<ConstantPtrAuth>(C))
2488 visitConstantPtrAuth(CPA);
2489
2490 if (const auto *GV = dyn_cast<GlobalValue>(C)) {
2491 // Global Values get visited separately, but we do need to make sure
2492 // that the global value is in the correct module
2493 Check(GV->getParent() == &M, "Referencing global in another module!",
2494 EntryC, &M, GV, GV->getParent());
2495 continue;
2496 }
2497
2498 // Visit all sub-expressions.
2499 for (const Use &U : C->operands()) {
2500 const auto *OpC = dyn_cast<Constant>(U);
2501 if (!OpC)
2502 continue;
2503 if (!ConstantExprVisited.insert(OpC).second)
2504 continue;
2505 Stack.push_back(OpC);
2506 }
2507 }
2508}
2509
2510void Verifier::visitConstantExpr(const ConstantExpr *CE) {
2511 if (CE->getOpcode() == Instruction::BitCast)
2512 Check(CastInst::castIsValid(Instruction::BitCast, CE->getOperand(0),
2513 CE->getType()),
2514 "Invalid bitcast", CE);
2515}
2516
2517void Verifier::visitConstantPtrAuth(const ConstantPtrAuth *CPA) {
2518 Check(CPA->getPointer()->getType()->isPointerTy(),
2519 "signed ptrauth constant base pointer must have pointer type");
2520
2521 Check(CPA->getType() == CPA->getPointer()->getType(),
2522 "signed ptrauth constant must have same type as its base pointer");
2523
2524 Check(CPA->getKey()->getBitWidth() == 32,
2525 "signed ptrauth constant key must be i32 constant integer");
2526
2528 "signed ptrauth constant address discriminator must be a pointer");
2529
2530 Check(CPA->getDiscriminator()->getBitWidth() == 64,
2531 "signed ptrauth constant discriminator must be i64 constant integer");
2532}
2533
2534bool Verifier::verifyAttributeCount(AttributeList Attrs, unsigned Params) {
2535 // There shouldn't be more attribute sets than there are parameters plus the
2536 // function and return value.
2537 return Attrs.getNumAttrSets() <= Params + 2;
2538}
2539
2540void Verifier::verifyInlineAsmCall(const CallBase &Call) {
2541 const InlineAsm *IA = cast<InlineAsm>(Call.getCalledOperand());
2542 unsigned ArgNo = 0;
2543 unsigned LabelNo = 0;
2544 for (const InlineAsm::ConstraintInfo &CI : IA->ParseConstraints()) {
2545 if (CI.Type == InlineAsm::isLabel) {
2546 ++LabelNo;
2547 continue;
2548 }
2549
2550 // Only deal with constraints that correspond to call arguments.
2551 if (!CI.hasArg())
2552 continue;
2553
2554 if (CI.isIndirect) {
2555 const Value *Arg = Call.getArgOperand(ArgNo);
2556 Check(Arg->getType()->isPointerTy(),
2557 "Operand for indirect constraint must have pointer type", &Call);
2558
2559 Check(Call.getParamElementType(ArgNo),
2560 "Operand for indirect constraint must have elementtype attribute",
2561 &Call);
2562 } else {
2563 Check(!Call.paramHasAttr(ArgNo, Attribute::ElementType),
2564 "Elementtype attribute can only be applied for indirect "
2565 "constraints",
2566 &Call);
2567 }
2568
2569 ArgNo++;
2570 }
2571
2572 if (auto *CallBr = dyn_cast<CallBrInst>(&Call)) {
2573 Check(LabelNo == CallBr->getNumIndirectDests(),
2574 "Number of label constraints does not match number of callbr dests",
2575 &Call);
2576 } else {
2577 Check(LabelNo == 0, "Label constraints can only be used with callbr",
2578 &Call);
2579 }
2580}
2581
2582/// Verify that statepoint intrinsic is well formed.
2583void Verifier::verifyStatepoint(const CallBase &Call) {
2584 assert(Call.getCalledFunction() &&
2585 Call.getCalledFunction()->getIntrinsicID() ==
2586 Intrinsic::experimental_gc_statepoint);
2587
2588 Check(!Call.doesNotAccessMemory() && !Call.onlyReadsMemory() &&
2589 !Call.onlyAccessesArgMemory(),
2590 "gc.statepoint must read and write all memory to preserve "
2591 "reordering restrictions required by safepoint semantics",
2592 Call);
2593
2594 const int64_t NumPatchBytes =
2595 cast<ConstantInt>(Call.getArgOperand(1))->getSExtValue();
2596 assert(isInt<32>(NumPatchBytes) && "NumPatchBytesV is an i32!");
2597 Check(NumPatchBytes >= 0,
2598 "gc.statepoint number of patchable bytes must be "
2599 "positive",
2600 Call);
2601
2602 Type *TargetElemType = Call.getParamElementType(2);
2603 Check(TargetElemType,
2604 "gc.statepoint callee argument must have elementtype attribute", Call);
2605 FunctionType *TargetFuncType = dyn_cast<FunctionType>(TargetElemType);
2606 Check(TargetFuncType,
2607 "gc.statepoint callee elementtype must be function type", Call);
2608
2609 const int NumCallArgs = cast<ConstantInt>(Call.getArgOperand(3))->getZExtValue();
2610 Check(NumCallArgs >= 0,
2611 "gc.statepoint number of arguments to underlying call "
2612 "must be positive",
2613 Call);
2614 const int NumParams = (int)TargetFuncType->getNumParams();
2615 if (TargetFuncType->isVarArg()) {
2616 Check(NumCallArgs >= NumParams,
2617 "gc.statepoint mismatch in number of vararg call args", Call);
2618
2619 // TODO: Remove this limitation
2620 Check(TargetFuncType->getReturnType()->isVoidTy(),
2621 "gc.statepoint doesn't support wrapping non-void "
2622 "vararg functions yet",
2623 Call);
2624 } else
2625 Check(NumCallArgs == NumParams,
2626 "gc.statepoint mismatch in number of call args", Call);
2627
2628 const uint64_t Flags
2629 = cast<ConstantInt>(Call.getArgOperand(4))->getZExtValue();
2630 Check((Flags & ~(uint64_t)StatepointFlags::MaskAll) == 0,
2631 "unknown flag used in gc.statepoint flags argument", Call);
2632
2633 // Verify that the types of the call parameter arguments match
2634 // the type of the wrapped callee.
2635 AttributeList Attrs = Call.getAttributes();
2636 for (int i = 0; i < NumParams; i++) {
2637 Type *ParamType = TargetFuncType->getParamType(i);
2638 Type *ArgType = Call.getArgOperand(5 + i)->getType();
2639 Check(ArgType == ParamType,
2640 "gc.statepoint call argument does not match wrapped "
2641 "function type",
2642 Call);
2643
2644 if (TargetFuncType->isVarArg()) {
2645 AttributeSet ArgAttrs = Attrs.getParamAttrs(5 + i);
2646 Check(!ArgAttrs.hasAttribute(Attribute::StructRet),
2647 "Attribute 'sret' cannot be used for vararg call arguments!", Call);
2648 }
2649 }
2650
2651 const int EndCallArgsInx = 4 + NumCallArgs;
2652
2653 const Value *NumTransitionArgsV = Call.getArgOperand(EndCallArgsInx + 1);
2654 Check(isa<ConstantInt>(NumTransitionArgsV),
2655 "gc.statepoint number of transition arguments "
2656 "must be constant integer",
2657 Call);
2658 const int NumTransitionArgs =
2659 cast<ConstantInt>(NumTransitionArgsV)->getZExtValue();
2660 Check(NumTransitionArgs == 0,
2661 "gc.statepoint w/inline transition bundle is deprecated", Call);
2662 const int EndTransitionArgsInx = EndCallArgsInx + 1 + NumTransitionArgs;
2663
2664 const Value *NumDeoptArgsV = Call.getArgOperand(EndTransitionArgsInx + 1);
2665 Check(isa<ConstantInt>(NumDeoptArgsV),
2666 "gc.statepoint number of deoptimization arguments "
2667 "must be constant integer",
2668 Call);
2669 const int NumDeoptArgs = cast<ConstantInt>(NumDeoptArgsV)->getZExtValue();
2670 Check(NumDeoptArgs == 0,
2671 "gc.statepoint w/inline deopt operands is deprecated", Call);
2672
2673 const int ExpectedNumArgs = 7 + NumCallArgs;
2674 Check(ExpectedNumArgs == (int)Call.arg_size(),
2675 "gc.statepoint too many arguments", Call);
2676
2677 // Check that the only uses of this gc.statepoint are gc.result or
2678 // gc.relocate calls which are tied to this statepoint and thus part
2679 // of the same statepoint sequence
2680 for (const User *U : Call.users()) {
2681 const CallInst *UserCall = dyn_cast<const CallInst>(U);
2682 Check(UserCall, "illegal use of statepoint token", Call, U);
2683 if (!UserCall)
2684 continue;
2685 Check(isa<GCRelocateInst>(UserCall) || isa<GCResultInst>(UserCall),
2686 "gc.result or gc.relocate are the only value uses "
2687 "of a gc.statepoint",
2688 Call, U);
2689 if (isa<GCResultInst>(UserCall)) {
2690 Check(UserCall->getArgOperand(0) == &Call,
2691 "gc.result connected to wrong gc.statepoint", Call, UserCall);
2692 } else if (isa<GCRelocateInst>(Call)) {
2693 Check(UserCall->getArgOperand(0) == &Call,
2694 "gc.relocate connected to wrong gc.statepoint", Call, UserCall);
2695 }
2696 }
2697
2698 // Note: It is legal for a single derived pointer to be listed multiple
2699 // times. It's non-optimal, but it is legal. It can also happen after
2700 // insertion if we strip a bitcast away.
2701 // Note: It is really tempting to check that each base is relocated and
2702 // that a derived pointer is never reused as a base pointer. This turns
2703 // out to be problematic since optimizations run after safepoint insertion
2704 // can recognize equality properties that the insertion logic doesn't know
2705 // about. See example statepoint.ll in the verifier subdirectory
2706}
2707
2708void Verifier::verifyFrameRecoverIndices() {
2709 for (auto &Counts : FrameEscapeInfo) {
2710 Function *F = Counts.first;
2711 unsigned EscapedObjectCount = Counts.second.first;
2712 unsigned MaxRecoveredIndex = Counts.second.second;
2713 Check(MaxRecoveredIndex <= EscapedObjectCount,
2714 "all indices passed to llvm.localrecover must be less than the "
2715 "number of arguments passed to llvm.localescape in the parent "
2716 "function",
2717 F);
2718 }
2719}
2720
2721static Instruction *getSuccPad(Instruction *Terminator) {
2722 BasicBlock *UnwindDest;
2723 if (auto *II = dyn_cast<InvokeInst>(Terminator))
2724 UnwindDest = II->getUnwindDest();
2725 else if (auto *CSI = dyn_cast<CatchSwitchInst>(Terminator))
2726 UnwindDest = CSI->getUnwindDest();
2727 else
2728 UnwindDest = cast<CleanupReturnInst>(Terminator)->getUnwindDest();
2729 return UnwindDest->getFirstNonPHI();
2730}
2731
2732void Verifier::verifySiblingFuncletUnwinds() {
2735 for (const auto &Pair : SiblingFuncletInfo) {
2736 Instruction *PredPad = Pair.first;
2737 if (Visited.count(PredPad))
2738 continue;
2739 Active.insert(PredPad);
2740 Instruction *Terminator = Pair.second;
2741 do {
2742 Instruction *SuccPad = getSuccPad(Terminator);
2743 if (Active.count(SuccPad)) {
2744 // Found a cycle; report error
2745 Instruction *CyclePad = SuccPad;
2747 do {
2748 CycleNodes.push_back(CyclePad);
2749 Instruction *CycleTerminator = SiblingFuncletInfo[CyclePad];
2750 if (CycleTerminator != CyclePad)
2751 CycleNodes.push_back(CycleTerminator);
2752 CyclePad = getSuccPad(CycleTerminator);
2753 } while (CyclePad != SuccPad);
2754 Check(false, "EH pads can't handle each other's exceptions",
2755 ArrayRef<Instruction *>(CycleNodes));
2756 }
2757 // Don't re-walk a node we've already checked
2758 if (!Visited.insert(SuccPad).second)
2759 break;
2760 // Walk to this successor if it has a map entry.
2761 PredPad = SuccPad;
2762 auto TermI = SiblingFuncletInfo.find(PredPad);
2763 if (TermI == SiblingFuncletInfo.end())
2764 break;
2765 Terminator = TermI->second;
2766 Active.insert(PredPad);
2767 } while (true);
2768 // Each node only has one successor, so we've walked all the active
2769 // nodes' successors.
2770 Active.clear();
2771 }
2772}
2773
2774// visitFunction - Verify that a function is ok.
2775//
2776void Verifier::visitFunction(const Function &F) {
2777 visitGlobalValue(F);
2778
2779 // Check function arguments.
2780 FunctionType *FT = F.getFunctionType();
2781 unsigned NumArgs = F.arg_size();
2782
2783 Check(&Context == &F.getContext(),
2784 "Function context does not match Module context!", &F);
2785
2786 Check(!F.hasCommonLinkage(), "Functions may not have common linkage", &F);
2787 Check(FT->getNumParams() == NumArgs,
2788 "# formal arguments must match # of arguments for function type!", &F,
2789 FT);
2790 Check(F.getReturnType()->isFirstClassType() ||
2791 F.getReturnType()->isVoidTy() || F.getReturnType()->isStructTy(),
2792 "Functions cannot return aggregate values!", &F);
2793
2794 Check(!F.hasStructRetAttr() || F.getReturnType()->isVoidTy(),
2795 "Invalid struct return type!", &F);
2796
2797 AttributeList Attrs = F.getAttributes();
2798
2799 Check(verifyAttributeCount(Attrs, FT->getNumParams()),
2800 "Attribute after last parameter!", &F);
2801
2802 CheckDI(F.IsNewDbgInfoFormat == F.getParent()->IsNewDbgInfoFormat,
2803 "Function debug format should match parent module", &F,
2804 F.IsNewDbgInfoFormat, F.getParent(),
2805 F.getParent()->IsNewDbgInfoFormat);
2806
2807 bool IsIntrinsic = F.isIntrinsic();
2808
2809 // Check function attributes.
2810 verifyFunctionAttrs(FT, Attrs, &F, IsIntrinsic, /* IsInlineAsm */ false);
2811
2812 // On function declarations/definitions, we do not support the builtin
2813 // attribute. We do not check this in VerifyFunctionAttrs since that is
2814 // checking for Attributes that can/can not ever be on functions.
2815 Check(!Attrs.hasFnAttr(Attribute::Builtin),
2816 "Attribute 'builtin' can only be applied to a callsite.", &F);
2817
2818 Check(!Attrs.hasAttrSomewhere(Attribute::ElementType),
2819 "Attribute 'elementtype' can only be applied to a callsite.", &F);
2820
2821 if (Attrs.hasFnAttr(Attribute::Naked))
2822 for (const Argument &Arg : F.args())
2823 Check(Arg.use_empty(), "cannot use argument of naked function", &Arg);
2824
2825 // Check that this function meets the restrictions on this calling convention.
2826 // Sometimes varargs is used for perfectly forwarding thunks, so some of these
2827 // restrictions can be lifted.
2828 switch (F.getCallingConv()) {
2829 default:
2830 case CallingConv::C:
2831 break;
2832 case CallingConv::X86_INTR: {
2833 Check(F.arg_empty() || Attrs.hasParamAttr(0, Attribute::ByVal),
2834 "Calling convention parameter requires byval", &F);
2835 break;
2836 }
2841 Check(F.getReturnType()->isVoidTy(),
2842 "Calling convention requires void return type", &F);
2843 [[fallthrough]];
2849 Check(!F.hasStructRetAttr(), "Calling convention does not allow sret", &F);
2850 if (F.getCallingConv() != CallingConv::SPIR_KERNEL) {
2851 const unsigned StackAS = DL.getAllocaAddrSpace();
2852 unsigned i = 0;
2853 for (const Argument &Arg : F.args()) {
2854 Check(!Attrs.hasParamAttr(i, Attribute::ByVal),
2855 "Calling convention disallows byval", &F);
2856 Check(!Attrs.hasParamAttr(i, Attribute::Preallocated),
2857 "Calling convention disallows preallocated", &F);
2858 Check(!Attrs.hasParamAttr(i, Attribute::InAlloca),
2859 "Calling convention disallows inalloca", &F);
2860
2861 if (Attrs.hasParamAttr(i, Attribute::ByRef)) {
2862 // FIXME: Should also disallow LDS and GDS, but we don't have the enum
2863 // value here.
2864 Check(Arg.getType()->getPointerAddressSpace() != StackAS,
2865 "Calling convention disallows stack byref", &F);
2866 }
2867
2868 ++i;
2869 }
2870 }
2871
2872 [[fallthrough]];
2873 case CallingConv::Fast:
2874 case CallingConv::Cold:
2878 Check(!F.isVarArg(),
2879 "Calling convention does not support varargs or "
2880 "perfect forwarding!",
2881 &F);
2882 break;
2883 }
2884
2885 // Check that the argument values match the function type for this function...
2886 unsigned i = 0;
2887 for (const Argument &Arg : F.args()) {
2888 Check(Arg.getType() == FT->getParamType(i),
2889 "Argument value does not match function argument type!", &Arg,
2890 FT->getParamType(i));
2891 Check(Arg.getType()->isFirstClassType(),
2892 "Function arguments must have first-class types!", &Arg);
2893 if (!IsIntrinsic) {
2894 Check(!Arg.getType()->isMetadataTy(),
2895 "Function takes metadata but isn't an intrinsic", &Arg, &F);
2896 Check(!Arg.getType()->isTokenTy(),
2897 "Function takes token but isn't an intrinsic", &Arg, &F);
2898 Check(!Arg.getType()->isX86_AMXTy(),
2899 "Function takes x86_amx but isn't an intrinsic", &Arg, &F);
2900 }
2901
2902 // Check that swifterror argument is only used by loads and stores.
2903 if (Attrs.hasParamAttr(i, Attribute::SwiftError)) {
2904 verifySwiftErrorValue(&Arg);
2905 }
2906 ++i;
2907 }
2908
2909 if (!IsIntrinsic) {
2910 Check(!F.getReturnType()->isTokenTy(),
2911 "Function returns a token but isn't an intrinsic", &F);
2912 Check(!F.getReturnType()->isX86_AMXTy(),
2913 "Function returns a x86_amx but isn't an intrinsic", &F);
2914 }
2915
2916 // Get the function metadata attachments.
2918 F.getAllMetadata(MDs);
2919 assert(F.hasMetadata() != MDs.empty() && "Bit out-of-sync");
2920 verifyFunctionMetadata(MDs);
2921
2922 // Check validity of the personality function
2923 if (F.hasPersonalityFn()) {
2924 auto *Per = dyn_cast<Function>(F.getPersonalityFn()->stripPointerCasts());
2925 if (Per)
2926 Check(Per->getParent() == F.getParent(),
2927 "Referencing personality function in another module!", &F,
2928 F.getParent(), Per, Per->getParent());
2929 }
2930
2931 // EH funclet coloring can be expensive, recompute on-demand
2932 BlockEHFuncletColors.clear();
2933
2934 if (F.isMaterializable()) {
2935 // Function has a body somewhere we can't see.
2936 Check(MDs.empty(), "unmaterialized function cannot have metadata", &F,
2937 MDs.empty() ? nullptr : MDs.front().second);
2938 } else if (F.isDeclaration()) {
2939 for (const auto &I : MDs) {
2940 // This is used for call site debug information.
2941 CheckDI(I.first != LLVMContext::MD_dbg ||
2942 !cast<DISubprogram>(I.second)->isDistinct(),
2943 "function declaration may only have a unique !dbg attachment",
2944 &F);
2945 Check(I.first != LLVMContext::MD_prof,
2946 "function declaration may not have a !prof attachment", &F);
2947
2948 // Verify the metadata itself.
2949 visitMDNode(*I.second, AreDebugLocsAllowed::Yes);
2950 }
2951 Check(!F.hasPersonalityFn(),
2952 "Function declaration shouldn't have a personality routine", &F);
2953 } else {
2954 // Verify that this function (which has a body) is not named "llvm.*". It
2955 // is not legal to define intrinsics.
2956 Check(!IsIntrinsic, "llvm intrinsics cannot be defined!", &F);
2957
2958 // Check the entry node
2959 const BasicBlock *Entry = &F.getEntryBlock();
2960 Check(pred_empty(Entry),
2961 "Entry block to function must not have predecessors!", Entry);
2962
2963 // The address of the entry block cannot be taken, unless it is dead.
2964 if (Entry->hasAddressTaken()) {
2965 Check(!BlockAddress::lookup(Entry)->isConstantUsed(),
2966 "blockaddress may not be used with the entry block!", Entry);
2967 }
2968
2969 unsigned NumDebugAttachments = 0, NumProfAttachments = 0,
2970 NumKCFIAttachments = 0;
2971 // Visit metadata attachments.
2972 for (const auto &I : MDs) {
2973 // Verify that the attachment is legal.
2974 auto AllowLocs = AreDebugLocsAllowed::No;
2975 switch (I.first) {
2976 default:
2977 break;
2978 case LLVMContext::MD_dbg: {
2979 ++NumDebugAttachments;
2980 CheckDI(NumDebugAttachments == 1,
2981 "function must have a single !dbg attachment", &F, I.second);
2982 CheckDI(isa<DISubprogram>(I.second),
2983 "function !dbg attachment must be a subprogram", &F, I.second);
2984 CheckDI(cast<DISubprogram>(I.second)->isDistinct(),
2985 "function definition may only have a distinct !dbg attachment",
2986 &F);
2987
2988 auto *SP = cast<DISubprogram>(I.second);
2989 const Function *&AttachedTo = DISubprogramAttachments[SP];
2990 CheckDI(!AttachedTo || AttachedTo == &F,
2991 "DISubprogram attached to more than one function", SP, &F);
2992 AttachedTo = &F;
2993 AllowLocs = AreDebugLocsAllowed::Yes;
2994 break;
2995 }
2996 case LLVMContext::MD_prof:
2997 ++NumProfAttachments;
2998 Check(NumProfAttachments == 1,
2999 "function must have a single !prof attachment", &F, I.second);
3000 break;
3001 case LLVMContext::MD_kcfi_type:
3002 ++NumKCFIAttachments;
3003 Check(NumKCFIAttachments == 1,
3004 "function must have a single !kcfi_type attachment", &F,
3005 I.second);
3006 break;
3007 }
3008
3009 // Verify the metadata itself.
3010 visitMDNode(*I.second, AllowLocs);
3011 }
3012 }
3013
3014 // If this function is actually an intrinsic, verify that it is only used in
3015 // direct call/invokes, never having its "address taken".
3016 // Only do this if the module is materialized, otherwise we don't have all the
3017 // uses.
3018 if (F.isIntrinsic() && F.getParent()->isMaterialized()) {
3019 const User *U;
3020 if (F.hasAddressTaken(&U, false, true, false,
3021 /*IgnoreARCAttachedCall=*/true))
3022 Check(false, "Invalid user of intrinsic instruction!", U);
3023 }
3024
3025 // Check intrinsics' signatures.
3026 switch (F.getIntrinsicID()) {
3027 case Intrinsic::experimental_gc_get_pointer_base: {
3028 FunctionType *FT = F.getFunctionType();
3029 Check(FT->getNumParams() == 1, "wrong number of parameters", F);
3030 Check(isa<PointerType>(F.getReturnType()),
3031 "gc.get.pointer.base must return a pointer", F);
3032 Check(FT->getParamType(0) == F.getReturnType(),
3033 "gc.get.pointer.base operand and result must be of the same type", F);
3034 break;
3035 }
3036 case Intrinsic::experimental_gc_get_pointer_offset: {
3037 FunctionType *FT = F.getFunctionType();
3038 Check(FT->getNumParams() == 1, "wrong number of parameters", F);
3039 Check(isa<PointerType>(FT->getParamType(0)),
3040 "gc.get.pointer.offset operand must be a pointer", F);
3041 Check(F.getReturnType()->isIntegerTy(),
3042 "gc.get.pointer.offset must return integer", F);
3043 break;
3044 }
3045 }
3046
3047 auto *N = F.getSubprogram();
3048 HasDebugInfo = (N != nullptr);
3049 if (!HasDebugInfo)
3050 return;
3051
3052 // Check that all !dbg attachments lead to back to N.
3053 //
3054 // FIXME: Check this incrementally while visiting !dbg attachments.
3055 // FIXME: Only check when N is the canonical subprogram for F.
3057 auto VisitDebugLoc = [&](const Instruction &I, const MDNode *Node) {
3058 // Be careful about using DILocation here since we might be dealing with
3059 // broken code (this is the Verifier after all).
3060 const DILocation *DL = dyn_cast_or_null<DILocation>(Node);
3061 if (!DL)
3062 return;
3063 if (!Seen.insert(DL).second)
3064 return;
3065
3066 Metadata *Parent = DL->getRawScope();
3067 CheckDI(Parent && isa<DILocalScope>(Parent),
3068 "DILocation's scope must be a DILocalScope", N, &F, &I, DL, Parent);
3069
3070 DILocalScope *Scope = DL->getInlinedAtScope();
3071 Check(Scope, "Failed to find DILocalScope", DL);
3072
3073 if (!Seen.insert(Scope).second)
3074 return;
3075
3076 DISubprogram *SP = Scope->getSubprogram();
3077
3078 // Scope and SP could be the same MDNode and we don't want to skip
3079 // validation in that case
3080 if (SP && ((Scope != SP) && !Seen.insert(SP).second))
3081 return;
3082
3083 CheckDI(SP->describes(&F),
3084 "!dbg attachment points at wrong subprogram for function", N, &F,
3085 &I, DL, Scope, SP);
3086 };
3087 for (auto &BB : F)
3088 for (auto &I : BB) {
3089 VisitDebugLoc(I, I.getDebugLoc().getAsMDNode());
3090 // The llvm.loop annotations also contain two DILocations.
3091 if (auto MD = I.getMetadata(LLVMContext::MD_loop))
3092 for (unsigned i = 1; i < MD->getNumOperands(); ++i)
3093 VisitDebugLoc(I, dyn_cast_or_null<MDNode>(MD->getOperand(i)));
3094 if (BrokenDebugInfo)
3095 return;
3096 }
3097}
3098
3099// verifyBasicBlock - Verify that a basic block is well formed...
3100//
3101void Verifier::visitBasicBlock(BasicBlock &BB) {
3102 InstsInThisBlock.clear();
3103 ConvergenceVerifyHelper.visit(BB);
3104
3105 // Ensure that basic blocks have terminators!
3106 Check(BB.getTerminator(), "Basic Block does not have terminator!", &BB);
3107
3108 // Check constraints that this basic block imposes on all of the PHI nodes in
3109 // it.
3110 if (isa<PHINode>(BB.front())) {
3113 llvm::sort(Preds);
3114 for (const PHINode &PN : BB.phis()) {
3115 Check(PN.getNumIncomingValues() == Preds.size(),
3116 "PHINode should have one entry for each predecessor of its "
3117 "parent basic block!",
3118 &PN);
3119
3120 // Get and sort all incoming values in the PHI node...
3121 Values.clear();
3122 Values.reserve(PN.getNumIncomingValues());
3123 for (unsigned i = 0, e = PN.getNumIncomingValues(); i != e; ++i)
3124 Values.push_back(
3125 std::make_pair(PN.getIncomingBlock(i), PN.getIncomingValue(i)));
3126 llvm::sort(Values);
3127
3128 for (unsigned i = 0, e = Values.size(); i != e; ++i) {
3129 // Check to make sure that if there is more than one entry for a
3130 // particular basic block in this PHI node, that the incoming values are
3131 // all identical.
3132 //
3133 Check(i == 0 || Values[i].first != Values[i - 1].first ||
3134 Values[i].second == Values[i - 1].second,
3135 "PHI node has multiple entries for the same basic block with "
3136 "different incoming values!",
3137 &PN, Values[i].first, Values[i].second, Values[i - 1].second);
3138
3139 // Check to make sure that the predecessors and PHI node entries are
3140 // matched up.
3141 Check(Values[i].first == Preds[i],
3142 "PHI node entries do not match predecessors!", &PN,
3143 Values[i].first, Preds[i]);
3144 }
3145 }
3146 }
3147
3148 // Check that all instructions have their parent pointers set up correctly.
3149 for (auto &I : BB)
3150 {
3151 Check(I.getParent() == &BB, "Instruction has bogus parent pointer!");
3152 }
3153
3154 CheckDI(BB.IsNewDbgInfoFormat == BB.getParent()->IsNewDbgInfoFormat,
3155 "BB debug format should match parent function", &BB,
3156 BB.IsNewDbgInfoFormat, BB.getParent(),
3157 BB.getParent()->IsNewDbgInfoFormat);
3158
3159 // Confirm that no issues arise from the debug program.
3160 if (BB.IsNewDbgInfoFormat)
3161 CheckDI(!BB.getTrailingDbgRecords(), "Basic Block has trailing DbgRecords!",
3162 &BB);
3163}
3164
3165void Verifier::visitTerminator(Instruction &I) {
3166 // Ensure that terminators only exist at the end of the basic block.
3167 Check(&I == I.getParent()->getTerminator(),
3168 "Terminator found in the middle of a basic block!", I.getParent());
3170}
3171
3172void Verifier::visitBranchInst(BranchInst &BI) {
3173 if (BI.isConditional()) {
3175 "Branch condition is not 'i1' type!", &BI, BI.getCondition());
3176 }
3177 visitTerminator(BI);
3178}
3179
3180void Verifier::visitReturnInst(ReturnInst &RI) {
3181 Function *F = RI.getParent()->getParent();
3182 unsigned N = RI.getNumOperands();
3183 if (F->getReturnType()->isVoidTy())
3184 Check(N == 0,
3185 "Found return instr that returns non-void in Function of void "
3186 "return type!",
3187 &RI, F->getReturnType());
3188 else
3189 Check(N == 1 && F->getReturnType() == RI.getOperand(0)->getType(),
3190 "Function return type does not match operand "
3191 "type of return inst!",
3192 &RI, F->getReturnType());
3193
3194 // Check to make sure that the return value has necessary properties for
3195 // terminators...
3196 visitTerminator(RI);
3197}
3198
3199void Verifier::visitSwitchInst(SwitchInst &SI) {
3200 Check(SI.getType()->isVoidTy(), "Switch must have void result type!", &SI);
3201 // Check to make sure that all of the constants in the switch instruction
3202 // have the same type as the switched-on value.
3203 Type *SwitchTy = SI.getCondition()->getType();
3205 for (auto &Case : SI.cases()) {
3206 Check(isa<ConstantInt>(SI.getOperand(Case.getCaseIndex() * 2 + 2)),
3207 "Case value is not a constant integer.", &SI);
3208 Check(Case.getCaseValue()->getType() == SwitchTy,
3209 "Switch constants must all be same type as switch value!", &SI);
3210 Check(Constants.insert(Case.getCaseValue()).second,
3211 "Duplicate integer as switch case", &SI, Case.getCaseValue());
3212 }
3213
3214 visitTerminator(SI);
3215}
3216
3217void Verifier::visitIndirectBrInst(IndirectBrInst &BI) {
3219 "Indirectbr operand must have pointer type!", &BI);
3220 for (unsigned i = 0, e = BI.getNumDestinations(); i != e; ++i)
3222 "Indirectbr destinations must all have pointer type!", &BI);
3223
3224 visitTerminator(BI);
3225}
3226
3227void Verifier::visitCallBrInst(CallBrInst &CBI) {
3228 Check(CBI.isInlineAsm(), "Callbr is currently only used for asm-goto!", &CBI);
3229 const InlineAsm *IA = cast<InlineAsm>(CBI.getCalledOperand());
3230 Check(!IA->canThrow(), "Unwinding from Callbr is not allowed");
3231
3232 verifyInlineAsmCall(CBI);
3233 visitTerminator(CBI);
3234}
3235
3236void Verifier::visitSelectInst(SelectInst &SI) {
3237 Check(!SelectInst::areInvalidOperands(SI.getOperand(0), SI.getOperand(1),
3238 SI.getOperand(2)),
3239 "Invalid operands for select instruction!", &SI);
3240
3241 Check(SI.getTrueValue()->getType() == SI.getType(),
3242 "Select values must have same type as select instruction!", &SI);
3243 visitInstruction(SI);
3244}
3245
3246/// visitUserOp1 - User defined operators shouldn't live beyond the lifetime of
3247/// a pass, if any exist, it's an error.
3248///
3249void Verifier::visitUserOp1(Instruction &I) {
3250 Check(false, "User-defined operators should not live outside of a pass!", &I);
3251}
3252
3253void Verifier::visitTruncInst(TruncInst &I) {
3254 // Get the source and destination types
3255 Type *SrcTy = I.getOperand(0)->getType();
3256 Type *DestTy = I.getType();
3257
3258 // Get the size of the types in bits, we'll need this later
3259 unsigned SrcBitSize = SrcTy->getScalarSizeInBits();
3260 unsigned DestBitSize = DestTy->getScalarSizeInBits();
3261
3262 Check(SrcTy->isIntOrIntVectorTy(), "Trunc only operates on integer", &I);
3263 Check(DestTy->isIntOrIntVectorTy(), "Trunc only produces integer", &I);
3264 Check(SrcTy->isVectorTy() == DestTy->isVectorTy(),
3265 "trunc source and destination must both be a vector or neither", &I);
3266 Check(SrcBitSize > DestBitSize, "DestTy too big for Trunc", &I);
3267
3269}
3270
3271void Verifier::visitZExtInst(ZExtInst &I) {
3272 // Get the source and destination types
3273 Type *SrcTy = I.getOperand(0)->getType();
3274 Type *DestTy = I.getType();
3275
3276 // Get the size of the types in bits, we'll need this later
3277 Check(SrcTy->isIntOrIntVectorTy(), "ZExt only operates on integer", &I);
3278 Check(DestTy->isIntOrIntVectorTy(), "ZExt only produces an integer", &I);
3279 Check(SrcTy->isVectorTy() == DestTy->isVectorTy(),
3280 "zext source and destination must both be a vector or neither", &I);
3281 unsigned SrcBitSize = SrcTy->getScalarSizeInBits();
3282 unsigned DestBitSize = DestTy->getScalarSizeInBits();
3283
3284 Check(SrcBitSize < DestBitSize, "Type too small for ZExt", &I);
3285
3287}
3288
3289void Verifier::visitSExtInst(SExtInst &I) {
3290 // Get the source and destination types
3291 Type *SrcTy = I.getOperand(0)->getType();
3292 Type *DestTy = I.getType();
3293
3294 // Get the size of the types in bits, we'll need this later
3295 unsigned SrcBitSize = SrcTy->getScalarSizeInBits();
3296 unsigned DestBitSize = DestTy->getScalarSizeInBits();
3297
3298 Check(SrcTy->isIntOrIntVectorTy(), "SExt only operates on integer", &I);
3299 Check(DestTy->isIntOrIntVectorTy(), "SExt only produces an integer", &I);
3300 Check(SrcTy->isVectorTy() == DestTy->isVectorTy(),
3301 "sext source and destination must both be a vector or neither", &I);
3302 Check(SrcBitSize < DestBitSize, "Type too small for SExt", &I);
3303
3305}
3306
3307void Verifier::visitFPTruncInst(FPTruncInst &I) {
3308 // Get the source and destination types
3309 Type *SrcTy = I.getOperand(0)->getType();
3310 Type *DestTy = I.getType();
3311 // Get the size of the types in bits, we'll need this later
3312 unsigned SrcBitSize = SrcTy->getScalarSizeInBits();
3313 unsigned DestBitSize = DestTy->getScalarSizeInBits();
3314
3315 Check(SrcTy->isFPOrFPVectorTy(), "FPTrunc only operates on FP", &I);
3316 Check(DestTy->isFPOrFPVectorTy(), "FPTrunc only produces an FP", &I);
3317 Check(SrcTy->isVectorTy() == DestTy->isVectorTy(),
3318 "fptrunc source and destination must both be a vector or neither", &I);
3319 Check(SrcBitSize > DestBitSize, "DestTy too big for FPTrunc", &I);
3320
3322}
3323
3324void Verifier::visitFPExtInst(FPExtInst &I) {
3325 // Get the source and destination types
3326 Type *SrcTy = I.getOperand(0)->getType();
3327 Type *DestTy = I.getType();
3328
3329 // Get the size of the types in bits, we'll need this later
3330 unsigned SrcBitSize = SrcTy->getScalarSizeInBits();
3331 unsigned DestBitSize = DestTy->getScalarSizeInBits();
3332
3333 Check(SrcTy->isFPOrFPVectorTy(), "FPExt only operates on FP", &I);
3334 Check(DestTy->isFPOrFPVectorTy(), "FPExt only produces an FP", &I);
3335 Check(SrcTy->isVectorTy() == DestTy->isVectorTy(),
3336 "fpext source and destination must both be a vector or neither", &I);
3337 Check(SrcBitSize < DestBitSize, "DestTy too small for FPExt", &I);
3338
3340}
3341
3342void Verifier::visitUIToFPInst(UIToFPInst &I) {
3343 // Get the source and destination types
3344 Type *SrcTy = I.getOperand(0)->getType();
3345 Type *DestTy = I.getType();
3346
3347 bool SrcVec = SrcTy->isVectorTy();
3348 bool DstVec = DestTy->isVectorTy();
3349
3350 Check(SrcVec == DstVec,
3351 "UIToFP source and dest must both be vector or scalar", &I);
3352 Check(SrcTy->isIntOrIntVectorTy(),
3353 "UIToFP source must be integer or integer vector", &I);
3354 Check(DestTy->isFPOrFPVectorTy(), "UIToFP result must be FP or FP vector",
3355 &I);
3356
3357 if (SrcVec && DstVec)
3358 Check(cast<VectorType>(SrcTy)->getElementCount() ==
3359 cast<VectorType>(DestTy)->getElementCount(),
3360 "UIToFP source and dest vector length mismatch", &I);
3361
3363}
3364
3365void Verifier::visitSIToFPInst(SIToFPInst &I) {
3366 // Get the source and destination types
3367 Type *SrcTy = I.getOperand(0)->getType();
3368 Type *DestTy = I.getType();
3369
3370 bool SrcVec = SrcTy->isVectorTy();
3371 bool DstVec = DestTy->isVectorTy();
3372
3373 Check(SrcVec == DstVec,
3374 "SIToFP source and dest must both be vector or scalar", &I);
3375 Check(SrcTy->isIntOrIntVectorTy(),
3376 "SIToFP source must be integer or integer vector", &I);
3377 Check(DestTy->isFPOrFPVectorTy(), "SIToFP result must be FP or FP vector",
3378 &I);
3379
3380 if (SrcVec && DstVec)
3381 Check(cast<VectorType>(SrcTy)->getElementCount() ==
3382 cast<VectorType>(DestTy)->getElementCount(),
3383 "SIToFP source and dest vector length mismatch", &I);
3384
3386}
3387
3388void Verifier::visitFPToUIInst(FPToUIInst &I) {
3389 // Get the source and destination types
3390 Type *SrcTy = I.getOperand(0)->getType();
3391 Type *DestTy = I.getType();
3392
3393 bool SrcVec = SrcTy->isVectorTy();
3394 bool DstVec = DestTy->isVectorTy();
3395
3396 Check(SrcVec == DstVec,
3397 "FPToUI source and dest must both be vector or scalar", &I);
3398 Check(SrcTy->isFPOrFPVectorTy(), "FPToUI source must be FP or FP vector", &I);
3399 Check(DestTy->isIntOrIntVectorTy(),
3400 "FPToUI result must be integer or integer vector", &I);
3401
3402 if (SrcVec && DstVec)
3403 Check(cast<VectorType>(SrcTy)->getElementCount() ==
3404 cast<VectorType>(DestTy)->getElementCount(),
3405 "FPToUI source and dest vector length mismatch", &I);
3406
3408}
3409
3410void Verifier::visitFPToSIInst(FPToSIInst &I) {
3411 // Get the source and destination types
3412 Type *SrcTy = I.getOperand(0)->getType();
3413 Type *DestTy = I.getType();
3414
3415 bool SrcVec = SrcTy->isVectorTy();
3416 bool DstVec = DestTy->isVectorTy();
3417
3418 Check(SrcVec == DstVec,
3419 "FPToSI source and dest must both be vector or scalar", &I);
3420 Check(SrcTy->isFPOrFPVectorTy(), "FPToSI source must be FP or FP vector", &I);
3421 Check(DestTy->isIntOrIntVectorTy(),
3422 "FPToSI result must be integer or integer vector", &I);
3423
3424 if (SrcVec && DstVec)
3425 Check(cast<VectorType>(SrcTy)->getElementCount() ==
3426 cast<VectorType>(DestTy)->getElementCount(),
3427 "FPToSI source and dest vector length mismatch", &I);
3428
3430}
3431
3432void Verifier::visitPtrToIntInst(PtrToIntInst &I) {
3433 // Get the source and destination types
3434 Type *SrcTy = I.getOperand(0)->getType();
3435 Type *DestTy = I.getType();
3436
3437 Check(SrcTy->isPtrOrPtrVectorTy(), "PtrToInt source must be pointer", &I);
3438
3439 Check(DestTy->isIntOrIntVectorTy(), "PtrToInt result must be integral", &I);
3440 Check(SrcTy->isVectorTy() == DestTy->isVectorTy(), "PtrToInt type mismatch",
3441 &I);
3442
3443 if (SrcTy->isVectorTy()) {
3444 auto *VSrc = cast<VectorType>(SrcTy);
3445 auto *VDest = cast<VectorType>(DestTy);
3446 Check(VSrc->getElementCount() == VDest->getElementCount(),
3447 "PtrToInt Vector width mismatch", &I);
3448 }
3449
3451}
3452
3453void Verifier::visitIntToPtrInst(IntToPtrInst &I) {
3454 // Get the source and destination types
3455 Type *SrcTy = I.getOperand(0)->getType();
3456 Type *DestTy = I.getType();
3457
3458 Check(SrcTy->isIntOrIntVectorTy(), "IntToPtr source must be an integral", &I);
3459 Check(DestTy->isPtrOrPtrVectorTy(), "IntToPtr result must be a pointer", &I);
3460
3461 Check(SrcTy->isVectorTy() == DestTy->isVectorTy(), "IntToPtr type mismatch",
3462 &I);
3463 if (SrcTy->isVectorTy()) {
3464 auto *VSrc = cast<VectorType>(SrcTy);
3465 auto *VDest = cast<VectorType>(DestTy);
3466 Check(VSrc->getElementCount() == VDest->getElementCount(),
3467 "IntToPtr Vector width mismatch", &I);
3468 }
3470}
3471
3472void Verifier::visitBitCastInst(BitCastInst &I) {
3473 Check(
3474 CastInst::castIsValid(Instruction::BitCast, I.getOperand(0), I.getType()),
3475 "Invalid bitcast", &I);
3477}
3478
3479void Verifier::visitAddrSpaceCastInst(AddrSpaceCastInst &I) {
3480 Type *SrcTy = I.getOperand(0)->getType();
3481 Type *DestTy = I.getType();
3482
3483 Check(SrcTy->isPtrOrPtrVectorTy(), "AddrSpaceCast source must be a pointer",
3484 &I);
3485 Check(DestTy->isPtrOrPtrVectorTy(), "AddrSpaceCast result must be a pointer",
3486 &I);
3488 "AddrSpaceCast must be between different address spaces", &I);
3489 if (auto *SrcVTy = dyn_cast<VectorType>(SrcTy))
3490 Check(SrcVTy->getElementCount() ==
3491 cast<VectorType>(DestTy)->getElementCount(),
3492 "AddrSpaceCast vector pointer number of elements mismatch", &I);
3494}
3495
3496/// visitPHINode - Ensure that a PHI node is well formed.
3497///
3498void Verifier::visitPHINode(PHINode &PN) {
3499 // Ensure that the PHI nodes are all grouped together at the top of the block.
3500 // This can be tested by checking whether the instruction before this is
3501 // either nonexistent (because this is begin()) or is a PHI node. If not,
3502 // then there is some other instruction before a PHI.
3503 Check(&PN == &PN.getParent()->front() ||
3504 isa<PHINode>(--BasicBlock::iterator(&PN)),
3505 "PHI nodes not grouped at top of basic block!", &PN, PN.getParent());
3506
3507 // Check that a PHI doesn't yield a Token.
3508 Check(!PN.getType()->isTokenTy(), "PHI nodes cannot have token type!");
3509
3510 // Check that all of the values of the PHI node have the same type as the
3511 // result.
3512 for (Value *IncValue : PN.incoming_values()) {
3513 Check(PN.getType() == IncValue->getType(),
3514 "PHI node operands are not the same type as the result!", &PN);
3515 }
3516
3517 // All other PHI node constraints are checked in the visitBasicBlock method.
3518
3519 visitInstruction(PN);
3520}
3521
3522void Verifier::visitCallBase(CallBase &Call) {
3523 Check(Call.getCalledOperand()->getType()->isPointerTy(),
3524 "Called function must be a pointer!", Call);
3525 FunctionType *FTy = Call.getFunctionType();
3526
3527 // Verify that the correct number of arguments are being passed
3528 if (FTy->isVarArg())
3529 Check(Call.arg_size() >= FTy->getNumParams(),
3530 "Called function requires more parameters than were provided!", Call);
3531 else
3532 Check(Call.arg_size() == FTy->getNumParams(),
3533 "Incorrect number of arguments passed to called function!", Call);
3534
3535 // Verify that all arguments to the call match the function type.
3536 for (unsigned i = 0, e = FTy->getNumParams(); i != e; ++i)
3537 Check(Call.getArgOperand(i)->getType() == FTy->getParamType(i),
3538 "Call parameter type does not match function signature!",
3539 Call.getArgOperand(i), FTy->getParamType(i), Call);
3540
3541 AttributeList Attrs = Call.getAttributes();
3542
3543 Check(verifyAttributeCount(Attrs, Call.arg_size()),
3544 "Attribute after last parameter!", Call);
3545
3546 Function *Callee =
3547 dyn_cast<Function>(Call.getCalledOperand()->stripPointerCasts());
3548 bool IsIntrinsic = Callee && Callee->isIntrinsic();
3549 if (IsIntrinsic)
3550 Check(Callee->getValueType() == FTy,
3551 "Intrinsic called with incompatible signature", Call);
3552
3553 // Disallow calls to functions with the amdgpu_cs_chain[_preserve] calling
3554 // convention.
3555 auto CC = Call.getCallingConv();
3558 "Direct calls to amdgpu_cs_chain/amdgpu_cs_chain_preserve functions "
3559 "not allowed. Please use the @llvm.amdgpu.cs.chain intrinsic instead.",
3560 Call);
3561
3562 // Disallow passing/returning values with alignment higher than we can
3563 // represent.
3564 // FIXME: Consider making DataLayout cap the alignment, so this isn't
3565 // necessary.
3566 auto VerifyTypeAlign = [&](Type *Ty, const Twine &Message) {
3567 if (!Ty->isSized())
3568 return;
3569 Align ABIAlign = DL.getABITypeAlign(Ty);
3570 Check(ABIAlign.value() <= Value::MaximumAlignment,
3571 "Incorrect alignment of " + Message + " to called function!", Call);
3572 };
3573
3574 if (!IsIntrinsic) {
3575 VerifyTypeAlign(FTy->getReturnType(), "return type");
3576 for (unsigned i = 0, e = FTy->getNumParams(); i != e; ++i) {
3577 Type *Ty = FTy->getParamType(i);
3578 VerifyTypeAlign(Ty, "argument passed");
3579 }
3580 }
3581
3582 if (Attrs.hasFnAttr(Attribute::Speculatable)) {
3583 // Don't allow speculatable on call sites, unless the underlying function
3584 // declaration is also speculatable.
3585 Check(Callee && Callee->isSpeculatable(),
3586 "speculatable attribute may not apply to call sites", Call);
3587 }
3588
3589 if (Attrs.hasFnAttr(Attribute::Preallocated)) {
3590 Check(Call.getCalledFunction()->getIntrinsicID() ==
3591 Intrinsic::call_preallocated_arg,
3592 "preallocated as a call site attribute can only be on "
3593 "llvm.call.preallocated.arg");
3594 }
3595
3596 // Verify call attributes.
3597 verifyFunctionAttrs(FTy, Attrs, &Call, IsIntrinsic, Call.isInlineAsm());
3598
3599 // Conservatively check the inalloca argument.
3600 // We have a bug if we can find that there is an underlying alloca without
3601 // inalloca.
3602 if (Call.hasInAllocaArgument()) {
3603 Value *InAllocaArg = Call.getArgOperand(FTy->getNumParams() - 1);
3604 if (auto AI = dyn_cast<AllocaInst>(InAllocaArg->stripInBoundsOffsets()))
3605 Check(AI->isUsedWithInAlloca(),
3606 "inalloca argument for call has mismatched alloca", AI, Call);
3607 }
3608
3609 // For each argument of the callsite, if it has the swifterror argument,
3610 // make sure the underlying alloca/parameter it comes from has a swifterror as
3611 // well.
3612 for (unsigned i = 0, e = FTy->getNumParams(); i != e; ++i) {
3613 if (Call.paramHasAttr(i, Attribute::SwiftError)) {
3614 Value *SwiftErrorArg = Call.getArgOperand(i);
3615 if (auto AI = dyn_cast<AllocaInst>(SwiftErrorArg->stripInBoundsOffsets())) {
3616 Check(AI->isSwiftError(),
3617 "swifterror argument for call has mismatched alloca", AI, Call);
3618 continue;
3619 }
3620 auto ArgI = dyn_cast<Argument>(SwiftErrorArg);
3621 Check(ArgI, "swifterror argument should come from an alloca or parameter",
3622 SwiftErrorArg, Call);
3623 Check(ArgI->hasSwiftErrorAttr(),
3624 "swifterror argument for call has mismatched parameter", ArgI,
3625 Call);
3626 }
3627
3628 if (Attrs.hasParamAttr(i, Attribute::ImmArg)) {
3629 // Don't allow immarg on call sites, unless the underlying declaration
3630 // also has the matching immarg.
3631 Check(Callee && Callee->hasParamAttribute(i, Attribute::ImmArg),
3632 "immarg may not apply only to call sites", Call.getArgOperand(i),
3633 Call);
3634 }
3635
3636 if (Call.paramHasAttr(i, Attribute::ImmArg)) {
3637 Value *ArgVal = Call.getArgOperand(i);
3638 Check(isa<ConstantInt>(ArgVal) || isa<ConstantFP>(ArgVal),
3639 "immarg operand has non-immediate parameter", ArgVal, Call);
3640 }
3641
3642 if (Call.paramHasAttr(i, Attribute::Preallocated)) {
3643 Value *ArgVal = Call.getArgOperand(i);
3644 bool hasOB =
3645 Call.countOperandBundlesOfType(LLVMContext::OB_preallocated) != 0;
3646 bool isMustTail = Call.isMustTailCall();
3647 Check(hasOB != isMustTail,
3648 "preallocated operand either requires a preallocated bundle or "
3649 "the call to be musttail (but not both)",
3650 ArgVal, Call);
3651 }
3652 }
3653
3654 if (FTy->isVarArg()) {
3655 // FIXME? is 'nest' even legal here?
3656 bool SawNest = false;
3657 bool SawReturned = false;
3658
3659 for (unsigned Idx = 0; Idx < FTy->getNumParams(); ++Idx) {
3660 if (Attrs.hasParamAttr(Idx, Attribute::Nest))
3661 SawNest = true;
3662 if (Attrs.hasParamAttr(Idx, Attribute::Returned))
3663 SawReturned = true;
3664 }
3665
3666 // Check attributes on the varargs part.
3667 for (unsigned Idx = FTy->getNumParams(); Idx < Call.arg_size(); ++Idx) {
3668 Type *Ty = Call.getArgOperand(Idx)->getType();
3669 AttributeSet ArgAttrs = Attrs.getParamAttrs(Idx);
3670 verifyParameterAttrs(ArgAttrs, Ty, &Call);
3671
3672 if (ArgAttrs.hasAttribute(Attribute::Nest)) {
3673 Check(!SawNest, "More than one parameter has attribute nest!", Call);
3674 SawNest = true;
3675 }
3676
3677 if (ArgAttrs.hasAttribute(Attribute::Returned)) {
3678 Check(!SawReturned, "More than one parameter has attribute returned!",
3679 Call);
3680 Check(Ty->canLosslesslyBitCastTo(FTy->getReturnType()),
3681 "Incompatible argument and return types for 'returned' "
3682 "attribute",
3683 Call);
3684 SawReturned = true;
3685 }
3686
3687 // Statepoint intrinsic is vararg but the wrapped function may be not.
3688 // Allow sret here and check the wrapped function in verifyStatepoint.
3689 if (!Call.getCalledFunction() ||
3690 Call.getCalledFunction()->getIntrinsicID() !=
3691 Intrinsic::experimental_gc_statepoint)
3692 Check(!ArgAttrs.hasAttribute(Attribute::StructRet),
3693 "Attribute 'sret' cannot be used for vararg call arguments!",
3694 Call);
3695
3696 if (ArgAttrs.hasAttribute(Attribute::InAlloca))
3697 Check(Idx == Call.arg_size() - 1,
3698 "inalloca isn't on the last argument!", Call);
3699 }
3700 }
3701
3702 // Verify that there's no metadata unless it's a direct call to an intrinsic.
3703 if (!IsIntrinsic) {
3704 for (Type *ParamTy : FTy->params()) {
3705 Check(!ParamTy->isMetadataTy(),
3706 "Function has metadata parameter but isn't an intrinsic", Call);
3707 Check(!ParamTy->isTokenTy(),
3708 "Function has token parameter but isn't an intrinsic", Call);
3709 }
3710 }
3711
3712 // Verify that indirect calls don't return tokens.
3713 if (!Call.getCalledFunction()) {
3714 Check(!FTy->getReturnType()->isTokenTy(),
3715 "Return type cannot be token for indirect call!");
3716 Check(!FTy->getReturnType()->isX86_AMXTy(),
3717 "Return type cannot be x86_amx for indirect call!");
3718 }
3719
3720 if (Function *F = Call.getCalledFunction())
3721 if (Intrinsic::ID ID = (Intrinsic::ID)F->getIntrinsicID())
3722 visitIntrinsicCall(ID, Call);
3723
3724 // Verify that a callsite has at most one "deopt", at most one "funclet", at
3725 // most one "gc-transition", at most one "cfguardtarget", at most one
3726 // "preallocated" operand bundle, and at most one "ptrauth" operand bundle.
3727 bool FoundDeoptBundle = false, FoundFuncletBundle = false,
3728 FoundGCTransitionBundle = false, FoundCFGuardTargetBundle = false,
3729 FoundPreallocatedBundle = false, FoundGCLiveBundle = false,
3730 FoundPtrauthBundle = false, FoundKCFIBundle = false,
3731 FoundAttachedCallBundle = false;
3732 for (unsigned i = 0, e = Call.getNumOperandBundles(); i < e; ++i) {
3733 OperandBundleUse BU = Call.getOperandBundleAt(i);
3734 uint32_t Tag = BU.getTagID();
3735 if (Tag == LLVMContext::OB_deopt) {
3736 Check(!FoundDeoptBundle, "Multiple deopt operand bundles", Call);
3737 FoundDeoptBundle = true;
3738 } else if (Tag == LLVMContext::OB_gc_transition) {
3739 Check(!FoundGCTransitionBundle, "Multiple gc-transition operand bundles",
3740 Call);
3741 FoundGCTransitionBundle = true;
3742 } else if (Tag == LLVMContext::OB_funclet) {
3743 Check(!FoundFuncletBundle, "Multiple funclet operand bundles", Call);
3744 FoundFuncletBundle = true;
3745 Check(BU.Inputs.size() == 1,
3746 "Expected exactly one funclet bundle operand", Call);
3747 Check(isa<FuncletPadInst>(BU.Inputs.front()),
3748 "Funclet bundle operands should correspond to a FuncletPadInst",
3749 Call);
3750 } else if (Tag == LLVMContext::OB_cfguardtarget) {
3751 Check(!FoundCFGuardTargetBundle, "Multiple CFGuardTarget operand bundles",
3752 Call);
3753 FoundCFGuardTargetBundle = true;
3754 Check(BU.Inputs.size() == 1,
3755 "Expected exactly one cfguardtarget bundle operand", Call);
3756 } else if (Tag == LLVMContext::OB_ptrauth) {
3757 Check(!FoundPtrauthBundle, "Multiple ptrauth operand bundles", Call);
3758 FoundPtrauthBundle = true;
3759 Check(BU.Inputs.size() == 2,
3760 "Expected exactly two ptrauth bundle operands", Call);
3761 Check(isa<ConstantInt>(BU.Inputs[0]) &&
3762 BU.Inputs[0]->getType()->isIntegerTy(32),
3763 "Ptrauth bundle key operand must be an i32 constant", Call);
3764 Check(BU.Inputs[1]->getType()->isIntegerTy(64),
3765 "Ptrauth bundle discriminator operand must be an i64", Call);
3766 } else if (Tag == LLVMContext::OB_kcfi) {
3767 Check(!FoundKCFIBundle, "Multiple kcfi operand bundles", Call);
3768 FoundKCFIBundle = true;
3769 Check(BU.Inputs.size() == 1, "Expected exactly one kcfi bundle operand",
3770 Call);
3771 Check(isa<ConstantInt>(BU.Inputs[0]) &&
3772 BU.Inputs[0]->getType()->isIntegerTy(32),
3773 "Kcfi bundle operand must be an i32 constant", Call);
3774 } else if (Tag == LLVMContext::OB_preallocated) {
3775 Check(!FoundPreallocatedBundle, "Multiple preallocated operand bundles",
3776 Call);
3777 FoundPreallocatedBundle = true;
3778 Check(BU.Inputs.size() == 1,
3779 "Expected exactly one preallocated bundle operand", Call);
3780 auto Input = dyn_cast<IntrinsicInst>(BU.Inputs.front());
3781 Check(Input &&
3782 Input->getIntrinsicID() == Intrinsic::call_preallocated_setup,
3783 "\"preallocated\" argument must be a token from "
3784 "llvm.call.preallocated.setup",
3785 Call);
3786 } else if (Tag == LLVMContext::OB_gc_live) {
3787 Check(!FoundGCLiveBundle, "Multiple gc-live operand bundles", Call);
3788 FoundGCLiveBundle = true;
3790 Check(!FoundAttachedCallBundle,
3791 "Multiple \"clang.arc.attachedcall\" operand bundles", Call);
3792 FoundAttachedCallBundle = true;
3793 verifyAttachedCallBundle(Call, BU);
3794 }
3795 }
3796
3797 // Verify that callee and callsite agree on whether to use pointer auth.
3798 Check(!(Call.getCalledFunction() && FoundPtrauthBundle),
3799 "Direct call cannot have a ptrauth bundle", Call);
3800
3801 // Verify that each inlinable callsite of a debug-info-bearing function in a
3802 // debug-info-bearing function has a debug location attached to it. Failure to
3803 // do so causes assertion failures when the inliner sets up inline scope info
3804 // (Interposable functions are not inlinable, neither are functions without
3805 // definitions.)
3806 if (Call.getFunction()->getSubprogram() && Call.getCalledFunction() &&
3807 !Call.getCalledFunction()->isInterposable() &&
3808 !Call.getCalledFunction()->isDeclaration() &&
3809 Call.getCalledFunction()->getSubprogram())
3810 CheckDI(Call.getDebugLoc(),
3811 "inlinable function call in a function with "
3812 "debug info must have a !dbg location",
3813 Call);
3814
3815 if (Call.isInlineAsm())
3816 verifyInlineAsmCall(Call);
3817
3818 ConvergenceVerifyHelper.visit(Call);
3819
3820 visitInstruction(Call);
3821}
3822
3823void Verifier::verifyTailCCMustTailAttrs(const AttrBuilder &Attrs,
3824 StringRef Context) {
3825 Check(!Attrs.contains(Attribute::InAlloca),
3826 Twine("inalloca attribute not allowed in ") + Context);
3827 Check(!Attrs.contains(Attribute::InReg),
3828 Twine("inreg attribute not allowed in ") + Context);
3829 Check(!Attrs.contains(Attribute::SwiftError),
3830 Twine("swifterror attribute not allowed in ") + Context);
3831 Check(!Attrs.contains(Attribute::Preallocated),
3832 Twine("preallocated attribute not allowed in ") + Context);
3833 Check(!Attrs.contains(Attribute::ByRef),
3834 Twine("byref attribute not allowed in ") + Context);
3835}
3836
3837/// Two types are "congruent" if they are identical, or if they are both pointer
3838/// types with different pointee types and the same address space.
3839static bool isTypeCongruent(Type *L, Type *R) {
3840 if (L == R)
3841 return true;
3842 PointerType *PL = dyn_cast<PointerType>(L);
3843 PointerType *PR = dyn_cast<PointerType>(R);
3844 if (!PL || !PR)
3845 return false;
3846 return PL->getAddressSpace() == PR->getAddressSpace();
3847}
3848
3850 static const Attribute::AttrKind ABIAttrs[] = {
3851 Attribute::StructRet, Attribute::ByVal, Attribute::InAlloca,
3852 Attribute::InReg, Attribute::StackAlignment, Attribute::SwiftSelf,
3853 Attribute::SwiftAsync, Attribute::SwiftError, Attribute::Preallocated,
3854 Attribute::ByRef};
3855 AttrBuilder Copy(C);
3856 for (auto AK : ABIAttrs) {
3857 Attribute Attr = Attrs.getParamAttrs(I).getAttribute(AK);
3858 if (Attr.isValid())
3859 Copy.addAttribute(Attr);
3860 }
3861
3862 // `align` is ABI-affecting only in combination with `byval` or `byref`.
3863 if (Attrs.hasParamAttr(I, Attribute::Alignment) &&
3864 (Attrs.hasParamAttr(I, Attribute::ByVal) ||
3865 Attrs.hasParamAttr(I, Attribute::ByRef)))
3866 Copy.addAlignmentAttr(Attrs.getParamAlignment(I));
3867 return Copy;
3868}
3869
3870void Verifier::verifyMustTailCall(CallInst &CI) {
3871 Check(!CI.isInlineAsm(), "cannot use musttail call with inline asm", &CI);
3872
3873 Function *F = CI.getParent()->getParent();
3874 FunctionType *CallerTy = F->getFunctionType();
3875 FunctionType *CalleeTy = CI.getFunctionType();
3876 Check(CallerTy->isVarArg() == CalleeTy->isVarArg(),
3877 "cannot guarantee tail call due to mismatched varargs", &CI);
3878 Check(isTypeCongruent(CallerTy->getReturnType(), CalleeTy->getReturnType()),
3879 "cannot guarantee tail call due to mismatched return types", &CI);
3880
3881 // - The calling conventions of the caller and callee must match.
3882 Check(F->getCallingConv() == CI.getCallingConv(),
3883 "cannot guarantee tail call due to mismatched calling conv", &CI);
3884
3885 // - The call must immediately precede a :ref:`ret <i_ret>` instruction,
3886 // or a pointer bitcast followed by a ret instruction.
3887 // - The ret instruction must return the (possibly bitcasted) value
3888 // produced by the call or void.
3889 Value *RetVal = &CI;
3890 Instruction *Next = CI.getNextNode();
3891
3892 // Handle the optional bitcast.
3893 if (BitCastInst *BI = dyn_cast_or_null<BitCastInst>(Next)) {
3894 Check(BI->getOperand(0) == RetVal,
3895 "bitcast following musttail call must use the call", BI);
3896 RetVal = BI;
3897 Next = BI->getNextNode();
3898 }
3899
3900 // Check the return.
3901 ReturnInst *Ret = dyn_cast_or_null<ReturnInst>(Next);
3902 Check(Ret, "musttail call must precede a ret with an optional bitcast", &CI);
3903 Check(!Ret->getReturnValue() || Ret->getReturnValue() == RetVal ||
3904 isa<UndefValue>(Ret->getReturnValue()),
3905 "musttail call result must be returned", Ret);
3906
3907 AttributeList CallerAttrs = F->getAttributes();
3908 AttributeList CalleeAttrs = CI.getAttributes();
3911 StringRef CCName =
3912 CI.getCallingConv() == CallingConv::Tail ? "tailcc" : "swifttailcc";
3913
3914 // - Only sret, byval, swiftself, and swiftasync ABI-impacting attributes
3915 // are allowed in swifttailcc call
3916 for (unsigned I = 0, E = CallerTy->getNumParams(); I != E; ++I) {
3917 AttrBuilder ABIAttrs = getParameterABIAttributes(F->getContext(), I, CallerAttrs);
3918 SmallString<32> Context{CCName, StringRef(" musttail caller")};
3919 verifyTailCCMustTailAttrs(ABIAttrs, Context);
3920 }
3921 for (unsigned I = 0, E = CalleeTy->getNumParams(); I != E; ++I) {
3922 AttrBuilder ABIAttrs = getParameterABIAttributes(F->getContext(), I, CalleeAttrs);
3923 SmallString<32> Context{CCName, StringRef(" musttail callee")};
3924 verifyTailCCMustTailAttrs(ABIAttrs, Context);
3925 }
3926 // - Varargs functions are not allowed
3927 Check(!CallerTy->isVarArg(), Twine("cannot guarantee ") + CCName +
3928 " tail call for varargs function");
3929 return;
3930 }
3931
3932 // - The caller and callee prototypes must match. Pointer types of
3933 // parameters or return types may differ in pointee type, but not
3934 // address space.
3935 if (!CI.getCalledFunction() || !CI.getCalledFunction()->isIntrinsic()) {
3936 Check(CallerTy->getNumParams() == CalleeTy->getNumParams(),
3937 "cannot guarantee tail call due to mismatched parameter counts", &CI);
3938 for (unsigned I = 0, E = CallerTy->getNumParams(); I != E; ++I) {
3939 Check(
3940 isTypeCongruent(CallerTy->getParamType(I), CalleeTy->getParamType(I)),
3941 "cannot guarantee tail call due to mismatched parameter types", &CI);
3942 }
3943 }
3944
3945 // - All ABI-impacting function attributes, such as sret, byval, inreg,
3946 // returned, preallocated, and inalloca, must match.
3947 for (unsigned I = 0, E = CallerTy->getNumParams(); I != E; ++I) {
3948 AttrBuilder CallerABIAttrs = getParameterABIAttributes(F->getContext(), I, CallerAttrs);
3949 AttrBuilder CalleeABIAttrs = getParameterABIAttributes(F->getContext(), I, CalleeAttrs);
3950 Check(CallerABIAttrs == CalleeABIAttrs,
3951 "cannot guarantee tail call due to mismatched ABI impacting "
3952 "function attributes",
3953 &CI, CI.getOperand(I));
3954 }
3955}
3956
3957void Verifier::visitCallInst(CallInst &CI) {
3958 visitCallBase(CI);
3959
3960 if (CI.isMustTailCall())
3961 verifyMustTailCall(CI);
3962}
3963
3964void Verifier::visitInvokeInst(InvokeInst &II) {
3966
3967 // Verify that the first non-PHI instruction of the unwind destination is an
3968 // exception handling instruction.
3969 Check(
3970 II.getUnwindDest()->isEHPad(),
3971 "The unwind destination does not have an exception handling instruction!",
3972 &II);
3973
3975}
3976
3977/// visitUnaryOperator - Check the argument to the unary operator.
3978///
3979void Verifier::visitUnaryOperator(UnaryOperator &U) {
3980 Check(U.getType() == U.getOperand(0)->getType(),
3981 "Unary operators must have same type for"
3982 "operands and result!",
3983 &U);
3984
3985 switch (U.getOpcode()) {
3986 // Check that floating-point arithmetic operators are only used with
3987 // floating-point operands.
3988 case Instruction::FNeg:
3989 Check(U.getType()->isFPOrFPVectorTy(),
3990 "FNeg operator only works with float types!", &U);
3991 break;
3992 default:
3993 llvm_unreachable("Unknown UnaryOperator opcode!");
3994 }
3995
3997}
3998
3999/// visitBinaryOperator - Check that both arguments to the binary operator are
4000/// of the same type!
4001///
4002void Verifier::visitBinaryOperator(BinaryOperator &B) {
4003 Check(B.getOperand(0)->getType() == B.getOperand(1)->getType(),
4004 "Both operands to a binary operator are not of the same type!", &B);
4005
4006 switch (B.getOpcode()) {
4007 // Check that integer arithmetic operators are only used with
4008 // integral operands.
4009 case Instruction::Add:
4010 case Instruction::Sub:
4011 case Instruction::Mul:
4012 case Instruction::SDiv:
4013 case Instruction::UDiv:
4014 case Instruction::SRem:
4015 case Instruction::URem:
4016 Check(B.getType()->isIntOrIntVectorTy(),
4017 "Integer arithmetic operators only work with integral types!", &B);
4018 Check(B.getType() == B.getOperand(0)->getType(),
4019 "Integer arithmetic operators must have same type "
4020 "for operands and result!",
4021 &B);
4022 break;
4023 // Check that floating-point arithmetic operators are only used with
4024 // floating-point operands.
4025 case Instruction::FAdd:
4026 case Instruction::FSub:
4027 case Instruction::FMul:
4028 case Instruction::FDiv:
4029 case Instruction::FRem:
4030 Check(B.getType()->isFPOrFPVectorTy(),
4031 "Floating-point arithmetic operators only work with "
4032 "floating-point types!",
4033 &B);
4034 Check(B.getType() == B.getOperand(0)->getType(),
4035 "Floating-point arithmetic operators must have same type "
4036 "for operands and result!",
4037 &B);
4038 break;
4039 // Check that logical operators are only used with integral operands.
4040 case Instruction::And:
4041 case Instruction::Or:
4042 case Instruction::Xor:
4043 Check(B.getType()->isIntOrIntVectorTy(),
4044 "Logical operators only work with integral types!", &B);
4045 Check(B.getType() == B.getOperand(0)->getType(),
4046 "Logical operators must have same type for operands and result!", &B);
4047 break;
4048 case Instruction::Shl:
4049 case Instruction::LShr:
4050 case Instruction::AShr:
4051 Check(B.getType()->isIntOrIntVectorTy(),
4052 "Shifts only work with integral types!", &B);
4053 Check(B.getType() == B.getOperand(0)->getType(),
4054 "Shift return type must be same as operands!", &B);
4055 break;
4056 default:
4057 llvm_unreachable("Unknown BinaryOperator opcode!");
4058 }
4059
4061}
4062
4063void Verifier::visitICmpInst(ICmpInst &IC) {
4064 // Check that the operands are the same type
4065 Type *Op0Ty = IC.getOperand(0)->getType();
4066 Type *Op1Ty = IC.getOperand(1)->getType();
4067 Check(Op0Ty == Op1Ty,
4068 "Both operands to ICmp instruction are not of the same type!", &IC);
4069 // Check that the operands are the right type
4070 Check(Op0Ty->isIntOrIntVectorTy() || Op0Ty->isPtrOrPtrVectorTy(),
4071 "Invalid operand types for ICmp instruction", &IC);
4072 // Check that the predicate is valid.
4073 Check(IC.isIntPredicate(), "Invalid predicate in ICmp instruction!", &IC);
4074
4075 visitInstruction(IC);
4076}
4077
4078void Verifier::visitFCmpInst(FCmpInst &FC) {
4079 // Check that the operands are the same type
4080 Type *Op0Ty = FC.getOperand(0)->getType();
4081 Type *Op1Ty = FC.getOperand(1)->getType();
4082 Check(Op0Ty == Op1Ty,
4083 "Both operands to FCmp instruction are not of the same type!", &FC);
4084 // Check that the operands are the right type
4085 Check(Op0Ty->isFPOrFPVectorTy(), "Invalid operand types for FCmp instruction",
4086 &FC);
4087 // Check that the predicate is valid.
4088 Check(FC.isFPPredicate(), "Invalid predicate in FCmp instruction!", &FC);
4089
4090 visitInstruction(FC);
4091}
4092
4093void Verifier::visitExtractElementInst(ExtractElementInst &EI) {
4095 "Invalid extractelement operands!", &EI);
4096 visitInstruction(EI);
4097}
4098
4099void Verifier::visitInsertElementInst(InsertElementInst &IE) {
4100 Check(InsertElementInst::isValidOperands(IE.getOperand(0), IE.getOperand(1),
4101 IE.getOperand(2)),
4102 "Invalid insertelement operands!", &IE);
4103 visitInstruction(IE);
4104}
4105
4106void Verifier::visitShuffleVectorInst(ShuffleVectorInst &SV) {
4108 SV.getShuffleMask()),
4109 "Invalid shufflevector operands!", &SV);
4110 visitInstruction(SV);
4111}
4112
4113void Verifier::visitGetElementPtrInst(GetElementPtrInst &GEP) {
4114 Type *TargetTy = GEP.getPointerOperandType()->getScalarType();
4115
4116 Check(isa<PointerType>(TargetTy),
4117 "GEP base pointer is not a vector or a vector of pointers", &GEP);
4118 Check(GEP.getSourceElementType()->isSized(), "GEP into unsized type!", &GEP);
4119
4120 if (auto *STy = dyn_cast<StructType>(GEP.getSourceElementType())) {
4121 Check(!STy->isScalableTy(),
4122 "getelementptr cannot target structure that contains scalable vector"
4123 "type",
4124 &GEP);
4125 }
4126
4127 SmallVector<Value *, 16> Idxs(GEP.indices());
4128 Check(
4129 all_of(Idxs, [](Value *V) { return V->getType()->isIntOrIntVectorTy(); }),
4130 "GEP indexes must be integers", &GEP);
4131 Type *ElTy =
4132 GetElementPtrInst::getIndexedType(GEP.getSourceElementType(), Idxs);
4133 Check(ElTy, "Invalid indices for GEP pointer type!", &GEP);
4134
4135 PointerType *PtrTy = dyn_cast<PointerType>(GEP.getType()->getScalarType());
4136
4137 Check(PtrTy && GEP.getResultElementType() == ElTy,
4138 "GEP is not of right type for indices!", &GEP, ElTy);
4139
4140 if (auto *GEPVTy = dyn_cast<VectorType>(GEP.getType())) {
4141 // Additional checks for vector GEPs.
4142 ElementCount GEPWidth = GEPVTy->getElementCount();
4143 if (GEP.getPointerOperandType()->isVectorTy())
4144 Check(
4145 GEPWidth ==
4146 cast<VectorType>(GEP.getPointerOperandType())->getElementCount(),
4147 "Vector GEP result width doesn't match operand's", &GEP);
4148 for (Value *Idx : Idxs) {
4149 Type *IndexTy = Idx->getType();
4150 if (auto *IndexVTy = dyn_cast<VectorType>(IndexTy)) {
4151 ElementCount IndexWidth = IndexVTy->getElementCount();
4152 Check(IndexWidth == GEPWidth, "Invalid GEP index vector width", &GEP);
4153 }
4154 Check(IndexTy->isIntOrIntVectorTy(),
4155 "All GEP indices should be of integer type");
4156 }
4157 }
4158
4159 Check(GEP.getAddressSpace() == PtrTy->getAddressSpace(),
4160 "GEP address space doesn't match type", &GEP);
4161
4163}
4164
4165static bool isContiguous(const ConstantRange &A, const ConstantRange &B) {
4166 return A.getUpper() == B.getLower() || A.getLower() == B.getUpper();
4167}
4168
4169/// Verify !range and !absolute_symbol metadata. These have the same
4170/// restrictions, except !absolute_symbol allows the full set.
4171void Verifier::verifyRangeLikeMetadata(const Value &I, const MDNode *Range,
4172 Type *Ty, RangeLikeMetadataKind Kind) {
4173 unsigned NumOperands = Range->getNumOperands();
4174 Check(NumOperands % 2 == 0, "Unfinished range!", Range);
4175 unsigned NumRanges = NumOperands / 2;
4176 Check(NumRanges >= 1, "It should have at least one range!", Range);
4177
4178 ConstantRange LastRange(1, true); // Dummy initial value
4179 for (unsigned i = 0; i < NumRanges; ++i) {
4180 ConstantInt *Low =
4181 mdconst::dyn_extract<ConstantInt>(Range->getOperand(2 * i));
4182 Check(Low, "The lower limit must be an integer!", Low);
4183 ConstantInt *High =
4184 mdconst::dyn_extract<ConstantInt>(Range->getOperand(2 * i + 1));
4185 Check(High, "The upper limit must be an integer!", High);
4186
4187 Check(High->getType() == Low->getType(), "Range pair types must match!",
4188 &I);
4189
4190 if (Kind == RangeLikeMetadataKind::NoaliasAddrspace) {
4191 Check(High->getType()->isIntegerTy(32),
4192 "noalias.addrspace type must be i32!", &I);
4193 } else {
4194 Check(High->getType() == Ty->getScalarType(),
4195 "Range types must match instruction type!", &I);
4196 }
4197
4198 APInt HighV = High->getValue();
4199 APInt LowV = Low->getValue();
4200
4201 // ConstantRange asserts if the ranges are the same except for the min/max
4202 // value. Leave the cases it tolerates for the empty range error below.
4203 Check(LowV != HighV || LowV.isMaxValue() || LowV.isMinValue(),
4204 "The upper and lower limits cannot be the same value", &I);
4205
4206 ConstantRange CurRange(LowV, HighV);
4207 Check(!CurRange.isEmptySet() &&
4208 (Kind == RangeLikeMetadataKind::AbsoluteSymbol ||
4209 !CurRange.isFullSet()),
4210 "Range must not be empty!", Range);
4211 if (i != 0) {
4212 Check(CurRange.intersectWith(LastRange).isEmptySet(),
4213 "Intervals are overlapping", Range);
4214 Check(LowV.sgt(LastRange.getLower()), "Intervals are not in order",
4215 Range);
4216 Check(!isContiguous(CurRange, LastRange), "Intervals are contiguous",
4217 Range);
4218 }
4219 LastRange = ConstantRange(LowV, HighV);
4220 }
4221 if (NumRanges > 2) {
4222 APInt FirstLow =
4223 mdconst::dyn_extract<ConstantInt>(Range->getOperand(0))->getValue();
4224 APInt FirstHigh =
4225 mdconst::dyn_extract<ConstantInt>(Range->getOperand(1))->getValue();
4226 ConstantRange FirstRange(FirstLow, FirstHigh);
4227 Check(FirstRange.intersectWith(LastRange).isEmptySet(),
4228 "Intervals are overlapping", Range);
4229 Check(!isContiguous(FirstRange, LastRange), "Intervals are contiguous",
4230 Range);
4231 }
4232}
4233
4234void Verifier::visitRangeMetadata(Instruction &I, MDNode *Range, Type *Ty) {
4235 assert(Range && Range == I.getMetadata(LLVMContext::MD_range) &&
4236 "precondition violation");
4237 verifyRangeLikeMetadata(I, Range, Ty, RangeLikeMetadataKind::Range);
4238}
4239
4240void Verifier::visitNoaliasAddrspaceMetadata(Instruction &I, MDNode *Range,
4241 Type *Ty) {
4242 assert(Range && Range == I.getMetadata(LLVMContext::MD_noalias_addrspace) &&
4243 "precondition violation");
4244 verifyRangeLikeMetadata(I, Range, Ty,
4245 RangeLikeMetadataKind::NoaliasAddrspace);
4246}
4247
4248void Verifier::checkAtomicMemAccessSize(Type *Ty, const Instruction *I) {
4249 unsigned Size = DL.getTypeSizeInBits(Ty);
4250 Check(Size >= 8, "atomic memory access' size must be byte-sized", Ty, I);
4251 Check(!(Size & (Size - 1)),
4252 "atomic memory access' operand must have a power-of-two size", Ty, I);
4253}
4254
4255void Verifier::visitLoadInst(LoadInst &LI) {
4256 PointerType *PTy = dyn_cast<PointerType>(LI.getOperand(0)->getType());
4257 Check(PTy, "Load operand must be a pointer.", &LI);
4258 Type *ElTy = LI.getType();
4259 if (MaybeAlign A = LI.getAlign()) {
4260 Check(A->value() <= Value::MaximumAlignment,
4261 "huge alignment values are unsupported", &LI);
4262 }
4263 Check(ElTy->isSized(), "loading unsized types is not allowed", &LI);
4264 if (LI.isAtomic()) {
4267 "Load cannot have Release ordering", &LI);
4268 Check(ElTy->isIntOrPtrTy() || ElTy->isFloatingPointTy(),
4269 "atomic load operand must have integer, pointer, or floating point "
4270 "type!",
4271 ElTy, &LI);
4272 checkAtomicMemAccessSize(ElTy, &LI);
4273 } else {
4275 "Non-atomic load cannot have SynchronizationScope specified", &LI);
4276 }
4277
4278 visitInstruction(LI);
4279}
4280
4281void Verifier::visitStoreInst(StoreInst &SI) {
4282 PointerType *PTy = dyn_cast<PointerType>(SI.getOperand(1)->getType());
4283 Check(PTy, "Store operand must be a pointer.", &SI);
4284 Type *ElTy = SI.getOperand(0)->getType();
4285 if (MaybeAlign A = SI.getAlign()) {
4286 Check(A->value() <= Value::MaximumAlignment,
4287 "huge alignment values are unsupported", &SI);
4288 }
4289 Check(ElTy->isSized(), "storing unsized types is not allowed", &SI);
4290 if (SI.isAtomic()) {
4291 Check(SI.getOrdering() != AtomicOrdering::Acquire &&
4292 SI.getOrdering() != AtomicOrdering::AcquireRelease,
4293 "Store cannot have Acquire ordering", &SI);
4294 Check(ElTy->isIntOrPtrTy() || ElTy->isFloatingPointTy(),
4295 "atomic store operand must have integer, pointer, or floating point "
4296 "type!",
4297 ElTy, &SI);
4298 checkAtomicMemAccessSize(ElTy, &SI);
4299 } else {
4300 Check(SI.getSyncScopeID() == SyncScope::System,
4301 "Non-atomic store cannot have SynchronizationScope specified", &SI);
4302 }
4303 visitInstruction(SI);
4304}
4305
4306/// Check that SwiftErrorVal is used as a swifterror argument in CS.
4307void Verifier::verifySwiftErrorCall(CallBase &Call,
4308 const Value *SwiftErrorVal) {
4309 for (const auto &I : llvm::enumerate(Call.args())) {
4310 if (I.value() == SwiftErrorVal) {
4311 Check(Call.paramHasAttr(I.index(), Attribute::SwiftError),
4312 "swifterror value when used in a callsite should be marked "
4313 "with swifterror attribute",
4314 SwiftErrorVal, Call);
4315 }
4316 }
4317}
4318
4319void Verifier::verifySwiftErrorValue(const Value *SwiftErrorVal) {
4320 // Check that swifterror value is only used by loads, stores, or as
4321 // a swifterror argument.
4322 for (const User *U : SwiftErrorVal->users()) {
4323 Check(isa<LoadInst>(U) || isa<StoreInst>(U) || isa<CallInst>(U) ||
4324 isa<InvokeInst>(U),
4325 "swifterror value can only be loaded and stored from, or "
4326 "as a swifterror argument!",
4327 SwiftErrorVal, U);
4328 // If it is used by a store, check it is the second operand.
4329 if (auto StoreI = dyn_cast<StoreInst>(U))
4330 Check(StoreI->getOperand(1) == SwiftErrorVal,
4331 "swifterror value should be the second operand when used "
4332 "by stores",
4333 SwiftErrorVal, U);
4334 if (auto *Call = dyn_cast<CallBase>(U))
4335 verifySwiftErrorCall(*const_cast<CallBase *>(Call), SwiftErrorVal);
4336 }
4337}
4338
4339void Verifier::visitAllocaInst(AllocaInst &AI) {
4340 Type *Ty = AI.getAllocatedType();
4341 SmallPtrSet<Type*, 4> Visited;
4342 Check(Ty->isSized(&Visited), "Cannot allocate unsized type", &AI);
4343 // Check if it's a target extension type that disallows being used on the
4344 // stack.
4346 "Alloca has illegal target extension type", &AI);
4348 "Alloca array size must have integer type", &AI);
4349 if (MaybeAlign A = AI.getAlign()) {
4350 Check(A->value() <= Value::MaximumAlignment,
4351 "huge alignment values are unsupported", &AI);
4352 }
4353
4354 if (AI.isSwiftError()) {
4355 Check(Ty->isPointerTy(), "swifterror alloca must have pointer type", &AI);
4357 "swifterror alloca must not be array allocation", &AI);
4358 verifySwiftErrorValue(&AI);
4359 }
4360
4361 visitInstruction(AI);
4362}
4363
4364void Verifier::visitAtomicCmpXchgInst(AtomicCmpXchgInst &CXI) {
4365 Type *ElTy = CXI.getOperand(1)->getType();
4366 Check(ElTy->isIntOrPtrTy(),
4367 "cmpxchg operand must have integer or pointer type", ElTy, &CXI);
4368 checkAtomicMemAccessSize(ElTy, &CXI);
4369 visitInstruction(CXI);
4370}
4371
4372void Verifier::visitAtomicRMWInst(AtomicRMWInst &RMWI) {
4374 "atomicrmw instructions cannot be unordered.", &RMWI);
4375 auto Op = RMWI.getOperation();
4376 Type *ElTy = RMWI.getOperand(1)->getType();
4377 if (Op == AtomicRMWInst::Xchg) {
4378 Check(ElTy->isIntegerTy() || ElTy->isFloatingPointTy() ||
4379 ElTy->isPointerTy(),
4380 "atomicrmw " + AtomicRMWInst::getOperationName(Op) +
4381 " operand must have integer or floating point type!",
4382 &RMWI, ElTy);
4383 } else if (AtomicRMWInst::isFPOperation(Op)) {
4384 Check(ElTy->isFPOrFPVectorTy() && !isa<ScalableVectorType>(ElTy),
4385 "atomicrmw " + AtomicRMWInst::getOperationName(Op) +
4386 " operand must have floating-point or fixed vector of floating-point "
4387 "type!",
4388 &RMWI, ElTy);
4389 } else {
4390 Check(ElTy->isIntegerTy(),
4391 "atomicrmw " + AtomicRMWInst::getOperationName(Op) +
4392 " operand must have integer type!",
4393 &RMWI, ElTy);
4394 }
4395 checkAtomicMemAccessSize(ElTy, &RMWI);
4397 "Invalid binary operation!", &RMWI);
4398 visitInstruction(RMWI);
4399}
4400
4401void Verifier::visitFenceInst(FenceInst &FI) {
4402 const AtomicOrdering Ordering = FI.getOrdering();
4403 Check(Ordering == AtomicOrdering::Acquire ||
4404 Ordering == AtomicOrdering::Release ||
4405 Ordering == AtomicOrdering::AcquireRelease ||
4407 "fence instructions may only have acquire, release, acq_rel, or "
4408 "seq_cst ordering.",
4409 &FI);
4410 visitInstruction(FI);
4411}
4412
4413void Verifier::visitExtractValueInst(ExtractValueInst &EVI) {
4415 EVI.getIndices()) == EVI.getType(),
4416 "Invalid ExtractValueInst operands!", &EVI);
4417
4418 visitInstruction(EVI);
4419}
4420
4421void Verifier::visitInsertValueInst(InsertValueInst &IVI) {
4423 IVI.getIndices()) ==
4424 IVI.getOperand(1)->getType(),
4425 "Invalid InsertValueInst operands!", &IVI);
4426
4427 visitInstruction(IVI);
4428}
4429
4430static Value *getParentPad(Value *EHPad) {
4431 if (auto *FPI = dyn_cast<FuncletPadInst>(EHPad))
4432 return FPI->getParentPad();
4433
4434 return cast<CatchSwitchInst>(EHPad)->getParentPad();
4435}
4436
4437void Verifier::visitEHPadPredecessors(Instruction &I) {
4438 assert(I.isEHPad());
4439
4440 BasicBlock *BB = I.getParent();
4441 Function *F = BB->getParent();
4442
4443 Check(BB != &F->getEntryBlock(), "EH pad cannot be in entry block.", &I);
4444
4445 if (auto *LPI = dyn_cast<LandingPadInst>(&I)) {
4446 // The landingpad instruction defines its parent as a landing pad block. The
4447 // landing pad block may be branched to only by the unwind edge of an
4448 // invoke.
4449 for (BasicBlock *PredBB : predecessors(BB)) {
4450 const auto *II = dyn_cast<InvokeInst>(PredBB->getTerminator());
4451 Check(II && II->getUnwindDest() == BB && II->getNormalDest() != BB,
4452 "Block containing LandingPadInst must be jumped to "
4453 "only by the unwind edge of an invoke.",
4454 LPI);
4455 }
4456 return;
4457 }
4458 if (auto *CPI = dyn_cast<CatchPadInst>(&I)) {
4459 if (!pred_empty(BB))
4460 Check(BB->getUniquePredecessor() == CPI->getCatchSwitch()->getParent(),
4461 "Block containg CatchPadInst must be jumped to "
4462 "only by its catchswitch.",
4463 CPI);
4464 Check(BB != CPI->getCatchSwitch()->getUnwindDest(),
4465 "Catchswitch cannot unwind to one of its catchpads",
4466 CPI->getCatchSwitch(), CPI);
4467 return;
4468 }
4469
4470 // Verify that each pred has a legal terminator with a legal to/from EH
4471 // pad relationship.
4472 Instruction *ToPad = &I;
4473 Value *ToPadParent = getParentPad(ToPad);
4474 for (BasicBlock *PredBB : predecessors(BB)) {
4475 Instruction *TI = PredBB->getTerminator();
4476 Value *FromPad;
4477 if (auto *II = dyn_cast<InvokeInst>(TI)) {
4478 Check(II->getUnwindDest() == BB && II->getNormalDest() != BB,
4479 "EH pad must be jumped to via an unwind edge", ToPad, II);
4480 auto *CalledFn =
4481 dyn_cast<Function>(II->getCalledOperand()->stripPointerCasts());
4482 if (CalledFn && CalledFn->isIntrinsic() && II->doesNotThrow() &&
4483 !IntrinsicInst::mayLowerToFunctionCall(CalledFn->getIntrinsicID()))
4484 continue;
4485 if (auto Bundle = II->getOperandBundle(LLVMContext::OB_funclet))
4486 FromPad = Bundle->Inputs[0];
4487 else
4488 FromPad = ConstantTokenNone::get(II->getContext());
4489 } else if (auto *CRI = dyn_cast<CleanupReturnInst>(TI)) {
4490 FromPad = CRI->getOperand(0);
4491 Check(FromPad != ToPadParent, "A cleanupret must exit its cleanup", CRI);
4492 } else if (auto *CSI = dyn_cast<CatchSwitchInst>(TI)) {
4493 FromPad = CSI;
4494 } else {
4495 Check(false, "EH pad must be jumped to via an unwind edge", ToPad, TI);
4496 }
4497
4498 // The edge may exit from zero or more nested pads.
4500 for (;; FromPad = getParentPad(FromPad)) {
4501 Check(FromPad != ToPad,
4502 "EH pad cannot handle exceptions raised within it", FromPad, TI);
4503 if (FromPad == ToPadParent) {
4504 // This is a legal unwind edge.
4505 break;
4506 }
4507 Check(!isa<ConstantTokenNone>(FromPad),
4508 "A single unwind edge may only enter one EH pad", TI);
4509 Check(Seen.insert(FromPad).second, "EH pad jumps through a cycle of pads",
4510 FromPad);
4511
4512 // This will be diagnosed on the corresponding instruction already. We
4513 // need the extra check here to make sure getParentPad() works.
4514 Check(isa<FuncletPadInst>(FromPad) || isa<CatchSwitchInst>(FromPad),
4515 "Parent pad must be catchpad/cleanuppad/catchswitch", TI);
4516 }
4517 }
4518}
4519
4520void Verifier::visitLandingPadInst(LandingPadInst &LPI) {
4521 // The landingpad instruction is ill-formed if it doesn't have any clauses and
4522 // isn't a cleanup.
4523 Check(LPI.getNumClauses() > 0 || LPI.isCleanup(),
4524 "LandingPadInst needs at least one clause or to be a cleanup.", &LPI);
4525
4526 visitEHPadPredecessors(LPI);
4527
4528 if (!LandingPadResultTy)
4529 LandingPadResultTy = LPI.getType();
4530 else
4531 Check(LandingPadResultTy == LPI.getType(),
4532 "The landingpad instruction should have a consistent result type "
4533 "inside a function.",
4534 &LPI);
4535
4536 Function *F = LPI.getParent()->getParent();
4537 Check(F->hasPersonalityFn(),
4538 "LandingPadInst needs to be in a function with a personality.", &LPI);
4539
4540 // The landingpad instruction must be the first non-PHI instruction in the
4541 // block.
4542 Check(LPI.getParent()->getLandingPadInst() == &LPI,
4543 "LandingPadInst not the first non-PHI instruction in the block.", &LPI);
4544
4545 for (unsigned i = 0, e = LPI.getNumClauses(); i < e; ++i) {
4546 Constant *Clause = LPI.getClause(i);
4547 if (LPI.isCatch(i)) {
4548 Check(isa<PointerType>(Clause->getType()),
4549 "Catch operand does not have pointer type!", &LPI);
4550 } else {
4551 Check(LPI.isFilter(i), "Clause is neither catch nor filter!", &LPI);
4552 Check(isa<ConstantArray>(Clause) || isa<ConstantAggregateZero>(Clause),
4553 "Filter operand is not an array of constants!", &LPI);
4554 }
4555 }
4556
4557 visitInstruction(LPI);
4558}
4559
4560void Verifier::visitResumeInst(ResumeInst &RI) {
4562 "ResumeInst needs to be in a function with a personality.", &RI);
4563
4564 if (!LandingPadResultTy)
4565 LandingPadResultTy = RI.getValue()->getType();
4566 else
4567 Check(LandingPadResultTy == RI.getValue()->getType(),
4568 "The resume instruction should have a consistent result type "
4569 "inside a function.",
4570 &RI);
4571
4572 visitTerminator(RI);
4573}
4574
4575void Verifier::visitCatchPadInst(CatchPadInst &CPI) {
4576 BasicBlock *BB = CPI.getParent();
4577
4578 Function *F = BB->getParent();
4579 Check(F->hasPersonalityFn(),
4580 "CatchPadInst needs to be in a function with a personality.", &CPI);
4581
4582 Check(isa<CatchSwitchInst>(CPI.getParentPad()),
4583 "CatchPadInst needs to be directly nested in a CatchSwitchInst.",
4584 CPI.getParentPad());
4585
4586 // The catchpad instruction must be the first non-PHI instruction in the
4587 // block.
4588 Check(BB->getFirstNonPHI() == &CPI,
4589 "CatchPadInst not the first non-PHI instruction in the block.", &CPI);
4590
4591 visitEHPadPredecessors(CPI);
4593}
4594
4595void Verifier::visitCatchReturnInst(CatchReturnInst &CatchReturn) {
4596 Check(isa<CatchPadInst>(CatchReturn.getOperand(0)),
4597 "CatchReturnInst needs to be provided a CatchPad", &CatchReturn,
4598 CatchReturn.getOperand(0));
4599
4600 visitTerminator(CatchReturn);
4601}
4602
4603void Verifier::visitCleanupPadInst(CleanupPadInst &CPI) {
4604 BasicBlock *BB = CPI.getParent();
4605
4606 Function *F = BB->getParent();
4607 Check(F->hasPersonalityFn(),
4608 "CleanupPadInst needs to be in a function with a personality.", &CPI);
4609
4610 // The cleanuppad instruction must be the first non-PHI instruction in the
4611 // block.
4612 Check(BB->getFirstNonPHI() == &CPI,
4613 "CleanupPadInst not the first non-PHI instruction in the block.", &CPI);
4614
4615 auto *ParentPad = CPI.getParentPad();
4616 Check(isa<ConstantTokenNone>(ParentPad) || isa<FuncletPadInst>(ParentPad),
4617 "CleanupPadInst has an invalid parent.", &CPI);
4618
4619 visitEHPadPredecessors(CPI);
4621}
4622
4623void Verifier::visitFuncletPadInst(FuncletPadInst &FPI) {
4624 User *FirstUser = nullptr;
4625 Value *FirstUnwindPad = nullptr;
4626 SmallVector<FuncletPadInst *, 8> Worklist({&FPI});
4628
4629 while (!Worklist.empty()) {
4630 FuncletPadInst *CurrentPad = Worklist.pop_back_val();
4631 Check(Seen.insert(CurrentPad).second,
4632 "FuncletPadInst must not be nested within itself", CurrentPad);
4633 Value *UnresolvedAncestorPad = nullptr;
4634 for (User *U : CurrentPad->users()) {
4635 BasicBlock *UnwindDest;
4636 if (auto *CRI = dyn_cast<CleanupReturnInst>(U)) {
4637 UnwindDest = CRI->getUnwindDest();
4638 } else if (auto *CSI = dyn_cast<CatchSwitchInst>(U)) {
4639 // We allow catchswitch unwind to caller to nest
4640 // within an outer pad that unwinds somewhere else,
4641 // because catchswitch doesn't have a nounwind variant.
4642 // See e.g. SimplifyCFGOpt::SimplifyUnreachable.
4643 if (CSI->unwindsToCaller())
4644 continue;
4645 UnwindDest = CSI->getUnwindDest();
4646 } else if (auto *II = dyn_cast<InvokeInst>(U)) {
4647 UnwindDest = II->getUnwindDest();
4648 } else if (isa<CallInst>(U)) {
4649 // Calls which don't unwind may be found inside funclet
4650 // pads that unwind somewhere else. We don't *require*
4651 // such calls to be annotated nounwind.
4652 continue;
4653 } else if (auto *CPI = dyn_cast<CleanupPadInst>(U)) {
4654 // The unwind dest for a cleanup can only be found by
4655 // recursive search. Add it to the worklist, and we'll
4656 // search for its first use that determines where it unwinds.
4657 Worklist.push_back(CPI);
4658 continue;
4659 } else {
4660 Check(isa<CatchReturnInst>(U), "Bogus funclet pad use", U);
4661 continue;
4662 }
4663
4664 Value *UnwindPad;
4665 bool ExitsFPI;
4666 if (UnwindDest) {
4667 UnwindPad = UnwindDest->getFirstNonPHI();
4668 if (!cast<Instruction>(UnwindPad)->isEHPad())
4669 continue;
4670 Value *UnwindParent = getParentPad(UnwindPad);
4671 // Ignore unwind edges that don't exit CurrentPad.
4672 if (UnwindParent == CurrentPad)
4673 continue;
4674 // Determine whether the original funclet pad is exited,
4675 // and if we are scanning nested pads determine how many
4676 // of them are exited so we can stop searching their
4677 // children.
4678 Value *ExitedPad = CurrentPad;
4679 ExitsFPI = false;
4680 do {
4681 if (ExitedPad == &FPI) {
4682 ExitsFPI = true;
4683 // Now we can resolve any ancestors of CurrentPad up to
4684 // FPI, but not including FPI since we need to make sure
4685 // to check all direct users of FPI for consistency.
4686 UnresolvedAncestorPad = &FPI;
4687 break;
4688 }
4689 Value *ExitedParent = getParentPad(ExitedPad);
4690 if (ExitedParent == UnwindParent) {
4691 // ExitedPad is the ancestor-most pad which this unwind
4692 // edge exits, so we can resolve up to it, meaning that
4693 // ExitedParent is the first ancestor still unresolved.
4694 UnresolvedAncestorPad = ExitedParent;
4695 break;
4696 }
4697 ExitedPad = ExitedParent;
4698 } while (!isa<ConstantTokenNone>(ExitedPad));
4699 } else {
4700 // Unwinding to caller exits all pads.
4701 UnwindPad = ConstantTokenNone::get(FPI.getContext());
4702 ExitsFPI = true;
4703 UnresolvedAncestorPad = &FPI;
4704 }
4705
4706 if (ExitsFPI) {
4707 // This unwind edge exits FPI. Make sure it agrees with other
4708 // such edges.
4709 if (FirstUser) {
4710 Check(UnwindPad == FirstUnwindPad,
4711 "Unwind edges out of a funclet "
4712 "pad must have the same unwind "
4713 "dest",
4714 &FPI, U, FirstUser);
4715 } else {
4716 FirstUser = U;
4717 FirstUnwindPad = UnwindPad;
4718 // Record cleanup sibling unwinds for verifySiblingFuncletUnwinds
4719 if (isa<CleanupPadInst>(&FPI) && !isa<ConstantTokenNone>(UnwindPad) &&
4720 getParentPad(UnwindPad) == getParentPad(&FPI))
4721 SiblingFuncletInfo[&FPI] = cast<Instruction>(U);
4722 }
4723 }
4724 // Make sure we visit all uses of FPI, but for nested pads stop as
4725 // soon as we know where they unwind to.
4726 if (CurrentPad != &FPI)
4727 break;
4728 }
4729 if (UnresolvedAncestorPad) {
4730 if (CurrentPad == UnresolvedAncestorPad) {
4731 // When CurrentPad is FPI itself, we don't mark it as resolved even if
4732 // we've found an unwind edge that exits it, because we need to verify
4733 // all direct uses of FPI.
4734 assert(CurrentPad == &FPI);
4735 continue;
4736 }
4737 // Pop off the worklist any nested pads that we've found an unwind
4738 // destination for. The pads on the worklist are the uncles,
4739 // great-uncles, etc. of CurrentPad. We've found an unwind destination
4740 // for all ancestors of CurrentPad up to but not including
4741 // UnresolvedAncestorPad.
4742 Value *ResolvedPad = CurrentPad;
4743 while (!Worklist.empty()) {
4744 Value *UnclePad = Worklist.back();
4745 Value *AncestorPad = getParentPad(UnclePad);
4746 // Walk ResolvedPad up the ancestor list until we either find the
4747 // uncle's parent or the last resolved ancestor.
4748 while (ResolvedPad != AncestorPad) {
4749 Value *ResolvedParent = getParentPad(ResolvedPad);
4750 if (ResolvedParent == UnresolvedAncestorPad) {
4751 break;
4752 }
4753 ResolvedPad = ResolvedParent;
4754 }
4755 // If the resolved ancestor search didn't find the uncle's parent,
4756 // then the uncle is not yet resolved.
4757 if (ResolvedPad != AncestorPad)
4758 break;
4759 // This uncle is resolved, so pop it from the worklist.
4760 Worklist.pop_back();
4761 }
4762 }
4763 }
4764
4765 if (FirstUnwindPad) {
4766 if (auto *CatchSwitch = dyn_cast<CatchSwitchInst>(FPI.getParentPad())) {
4767 BasicBlock *SwitchUnwindDest = CatchSwitch->getUnwindDest();
4768 Value *SwitchUnwindPad;
4769 if (SwitchUnwindDest)
4770 SwitchUnwindPad = SwitchUnwindDest->getFirstNonPHI();
4771 else
4772 SwitchUnwindPad = ConstantTokenNone::get(FPI.getContext());
4773 Check(SwitchUnwindPad == FirstUnwindPad,
4774 "Unwind edges out of a catch must have the same unwind dest as "
4775 "the parent catchswitch",
4776 &FPI, FirstUser, CatchSwitch);
4777 }
4778 }
4779
4780 visitInstruction(FPI);
4781}
4782
4783void Verifier::visitCatchSwitchInst(CatchSwitchInst &CatchSwitch) {
4784 BasicBlock *BB = CatchSwitch.getParent();
4785
4786 Function *F = BB->getParent();
4787 Check(F->hasPersonalityFn(),
4788 "CatchSwitchInst needs to be in a function with a personality.",
4789 &CatchSwitch);
4790
4791 // The catchswitch instruction must be the first non-PHI instruction in the
4792 // block.
4793 Check(BB->getFirstNonPHI() == &CatchSwitch,
4794 "CatchSwitchInst not the first non-PHI instruction in the block.",
4795 &CatchSwitch);
4796
4797 auto *ParentPad = CatchSwitch.getParentPad();
4798 Check(isa<ConstantTokenNone>(ParentPad) || isa<FuncletPadInst>(ParentPad),
4799 "CatchSwitchInst has an invalid parent.", ParentPad);
4800
4801 if (BasicBlock *UnwindDest = CatchSwitch.getUnwindDest()) {
4802 Instruction *I = UnwindDest->getFirstNonPHI();
4803 Check(I->isEHPad() && !isa<LandingPadInst>(I),
4804 "CatchSwitchInst must unwind to an EH block which is not a "
4805 "landingpad.",
4806 &CatchSwitch);
4807
4808 // Record catchswitch sibling unwinds for verifySiblingFuncletUnwinds
4809 if (getParentPad(I) == ParentPad)
4810 SiblingFuncletInfo[&CatchSwitch] = &CatchSwitch;
4811 }
4812
4813 Check(CatchSwitch.getNumHandlers() != 0,
4814 "CatchSwitchInst cannot have empty handler list", &CatchSwitch);
4815
4816 for (BasicBlock *Handler : CatchSwitch.handlers()) {
4817 Check(isa<CatchPadInst>(Handler->getFirstNonPHI()),
4818 "CatchSwitchInst handlers must be catchpads", &CatchSwitch, Handler);
4819 }
4820
4821 visitEHPadPredecessors(CatchSwitch);
4822 visitTerminator(CatchSwitch);
4823}
4824
4825void Verifier::visitCleanupReturnInst(CleanupReturnInst &CRI) {
4826 Check(isa<CleanupPadInst>(CRI.getOperand(0)),
4827 "CleanupReturnInst needs to be provided a CleanupPad", &CRI,
4828 CRI.getOperand(0));
4829
4830 if (BasicBlock *UnwindDest = CRI.getUnwindDest()) {
4831 Instruction *I = UnwindDest->getFirstNonPHI();
4832 Check(I->isEHPad() && !isa<LandingPadInst>(I),
4833 "CleanupReturnInst must unwind to an EH block which is not a "
4834 "landingpad.",
4835 &CRI);
4836 }
4837
4838 visitTerminator(CRI);
4839}
4840
4841void Verifier::verifyDominatesUse(Instruction &I, unsigned i) {
4842 Instruction *Op = cast<Instruction>(I.getOperand(i));
4843 // If the we have an invalid invoke, don't try to compute the dominance.
4844 // We already reject it in the invoke specific checks and the dominance
4845 // computation doesn't handle multiple edges.
4846 if (InvokeInst *II = dyn_cast<InvokeInst>(Op)) {
4847 if (II->getNormalDest() == II->getUnwindDest())
4848 return;
4849 }
4850
4851 // Quick check whether the def has already been encountered in the same block.
4852 // PHI nodes are not checked to prevent accepting preceding PHIs, because PHI
4853 // uses are defined to happen on the incoming edge, not at the instruction.
4854 //
4855 // FIXME: If this operand is a MetadataAsValue (wrapping a LocalAsMetadata)
4856 // wrapping an SSA value, assert that we've already encountered it. See
4857 // related FIXME in Mapper::mapLocalAsMetadata in ValueMapper.cpp.
4858 if (!isa<PHINode>(I) && InstsInThisBlock.count(Op))
4859 return;
4860
4861 const Use &U = I.getOperandUse(i);
4862 Check(DT.dominates(Op, U), "Instruction does not dominate all uses!", Op, &I);
4863}
4864
4865void Verifier::visitDereferenceableMetadata(Instruction& I, MDNode* MD) {
4866 Check(I.getType()->isPointerTy(),
4867 "dereferenceable, dereferenceable_or_null "
4868 "apply only to pointer types",
4869 &I);
4870 Check((isa<LoadInst>(I) || isa<IntToPtrInst>(I)),
4871 "dereferenceable, dereferenceable_or_null apply only to load"
4872 " and inttoptr instructions, use attributes for calls or invokes",
4873 &I);
4874 Check(MD->getNumOperands() == 1,
4875 "dereferenceable, dereferenceable_or_null "
4876 "take one operand!",
4877 &I);
4878 ConstantInt *CI = mdconst::dyn_extract<ConstantInt>(MD->getOperand(0));
4879 Check(CI && CI->getType()->isIntegerTy(64),
4880 "dereferenceable, "
4881 "dereferenceable_or_null metadata value must be an i64!",
4882 &I);
4883}
4884
4885void Verifier::visitProfMetadata(Instruction &I, MDNode *MD) {
4886 Check(MD->getNumOperands() >= 2,
4887 "!prof annotations should have no less than 2 operands", MD);
4888
4889 // Check first operand.
4890 Check(MD->getOperand(0) != nullptr, "first operand should not be null", MD);
4891 Check(isa<MDString>(MD->getOperand(0)),
4892 "expected string with name of the !prof annotation", MD);
4893 MDString *MDS = cast<MDString>(MD->getOperand(0));
4894 StringRef ProfName = MDS->getString();
4895
4896 // Check consistency of !prof branch_weights metadata.
4897 if (ProfName == "branch_weights") {
4898 unsigned NumBranchWeights = getNumBranchWeights(*MD);
4899 if (isa<InvokeInst>(&I)) {
4900 Check(NumBranchWeights == 1 || NumBranchWeights == 2,
4901 "Wrong number of InvokeInst branch_weights operands", MD);
4902 } else {
4903 unsigned ExpectedNumOperands = 0;
4904 if (BranchInst *BI = dyn_cast<BranchInst>(&I))
4905 ExpectedNumOperands = BI->getNumSuccessors();
4906 else if (SwitchInst *SI = dyn_cast<SwitchInst>(&I))
4907 ExpectedNumOperands = SI->getNumSuccessors();
4908 else if (isa<CallInst>(&I))
4909 ExpectedNumOperands = 1;
4910 else if (IndirectBrInst *IBI = dyn_cast<IndirectBrInst>(&I))
4911 ExpectedNumOperands = IBI->getNumDestinations();
4912 else if (isa<SelectInst>(&I))
4913 ExpectedNumOperands = 2;
4914 else if (CallBrInst *CI = dyn_cast<CallBrInst>(&I))
4915 ExpectedNumOperands = CI->getNumSuccessors();
4916 else
4917 CheckFailed("!prof branch_weights are not allowed for this instruction",
4918 MD);
4919
4920 Check(NumBranchWeights == ExpectedNumOperands, "Wrong number of operands",
4921 MD);
4922 }
4923 for (unsigned i = getBranchWeightOffset(MD); i < MD->getNumOperands();
4924 ++i) {
4925 auto &MDO = MD->getOperand(i);
4926 Check(MDO, "second operand should not be null", MD);
4927 Check(mdconst::dyn_extract<ConstantInt>(MDO),
4928 "!prof brunch_weights operand is not a const int");
4929 }
4930 }
4931}
4932
4933void Verifier::visitDIAssignIDMetadata(Instruction &I, MDNode *MD) {
4934 assert(I.hasMetadata(LLVMContext::MD_DIAssignID));
4935 bool ExpectedInstTy =
4936 isa<AllocaInst>(I) || isa<StoreInst>(I) || isa<MemIntrinsic>(I);
4937 CheckDI(ExpectedInstTy, "!DIAssignID attached to unexpected instruction kind",
4938 I, MD);
4939 // Iterate over the MetadataAsValue uses of the DIAssignID - these should
4940 // only be found as DbgAssignIntrinsic operands.
4941 if (auto *AsValue = MetadataAsValue::getIfExists(Context, MD)) {
4942 for (auto *User : AsValue->users()) {
4943 CheckDI(isa<DbgAssignIntrinsic>(User),
4944 "!DIAssignID should only be used by llvm.dbg.assign intrinsics",
4945 MD, User);
4946 // All of the dbg.assign intrinsics should be in the same function as I.
4947 if (auto *DAI = dyn_cast<DbgAssignIntrinsic>(User))
4948 CheckDI(DAI->getFunction() == I.getFunction(),
4949 "dbg.assign not in same function as inst", DAI, &I);
4950 }
4951 }
4952 for (DbgVariableRecord *DVR :
4953 cast<DIAssignID>(MD)->getAllDbgVariableRecordUsers()) {
4954 CheckDI(DVR->isDbgAssign(),
4955 "!DIAssignID should only be used by Assign DVRs.", MD, DVR);
4956 CheckDI(DVR->getFunction() == I.getFunction(),
4957 "DVRAssign not in same function as inst", DVR, &I);
4958 }
4959}
4960
4961void Verifier::visitMMRAMetadata(Instruction &I, MDNode *MD) {
4963 "!mmra metadata attached to unexpected instruction kind", I, MD);
4964
4965 // MMRA Metadata should either be a tag, e.g. !{!"foo", !"bar"}, or a
4966 // list of tags such as !2 in the following example:
4967 // !0 = !{!"a", !"b"}
4968 // !1 = !{!"c", !"d"}
4969 // !2 = !{!0, !1}
4970 if (MMRAMetadata::isTagMD(MD))
4971 return;
4972
4973 Check(isa<MDTuple>(MD), "!mmra expected to be a metadata tuple", I, MD);
4974 for (const MDOperand &MDOp : MD->operands())
4975 Check(MMRAMetadata::isTagMD(MDOp.get()),
4976 "!mmra metadata tuple operand is not an MMRA tag", I, MDOp.get());
4977}
4978
4979void Verifier::visitCallStackMetadata(MDNode *MD) {
4980 // Call stack metadata should consist of a list of at least 1 constant int
4981 // (representing a hash of the location).
4982 Check(MD->getNumOperands() >= 1,
4983 "call stack metadata should have at least 1 operand", MD);
4984
4985 for (const auto &Op : MD->operands())
4986 Check(mdconst::dyn_extract_or_null<ConstantInt>(Op),
4987 "call stack metadata operand should be constant integer", Op);
4988}
4989
4990void Verifier::visitMemProfMetadata(Instruction &I, MDNode *MD) {
4991 Check(isa<CallBase>(I), "!memprof metadata should only exist on calls", &I);
4992 Check(MD->getNumOperands() >= 1,
4993 "!memprof annotations should have at least 1 metadata operand "
4994 "(MemInfoBlock)",
4995 MD);
4996
4997 // Check each MIB
4998 for (auto &MIBOp : MD->operands()) {
4999 MDNode *MIB = dyn_cast<MDNode>(MIBOp);
5000 // The first operand of an MIB should be the call stack metadata.
5001 // There rest of the operands should be MDString tags, and there should be
5002 // at least one.
5003 Check(MIB->getNumOperands() >= 2,
5004 "Each !memprof MemInfoBlock should have at least 2 operands", MIB);
5005
5006 // Check call stack metadata (first operand).
5007 Check(MIB->getOperand(0) != nullptr,
5008 "!memprof MemInfoBlock first operand should not be null", MIB);
5009 Check(isa<MDNode>(MIB->getOperand(0)),
5010 "!memprof MemInfoBlock first operand should be an MDNode", MIB);
5011 MDNode *StackMD = dyn_cast<MDNode>(MIB->getOperand(0));
5012 visitCallStackMetadata(StackMD);
5013
5014 // The next set of 1 or more operands should be MDString.
5015 unsigned I = 1;
5016 for (; I < MIB->getNumOperands(); ++I) {
5017 if (!isa<MDString>(MIB->getOperand(I))) {
5018 Check(I > 1,
5019 "!memprof MemInfoBlock second operand should be an MDString",
5020 MIB);
5021 break;
5022 }
5023 }
5024
5025 // Any remaining should be MDNode that are pairs of integers
5026 for (; I < MIB->getNumOperands(); ++I) {
5027 MDNode *OpNode = dyn_cast<MDNode>(MIB->getOperand(I));
5028 Check(OpNode, "Not all !memprof MemInfoBlock operands 2 to N are MDNode",
5029 MIB);
5030 Check(OpNode->getNumOperands() == 2,
5031 "Not all !memprof MemInfoBlock operands 2 to N are MDNode with 2 "
5032 "operands",
5033 MIB);
5034 // Check that all of Op's operands are ConstantInt.
5035 Check(llvm::all_of(OpNode->operands(),
5036 [](const MDOperand &Op) {
5037 return mdconst::hasa<ConstantInt>(Op);
5038 }),
5039 "Not all !memprof MemInfoBlock operands 2 to N are MDNode with "
5040 "ConstantInt operands",
5041 MIB);
5042 }
5043 }
5044}
5045
5046void Verifier::visitCallsiteMetadata(Instruction &I, MDNode *MD) {
5047 Check(isa<CallBase>(I), "!callsite metadata should only exist on calls", &I);
5048 // Verify the partial callstack annotated from memprof profiles. This callsite
5049 // is a part of a profiled allocation callstack.
5050 visitCallStackMetadata(MD);
5051}
5052
5053void Verifier::visitAnnotationMetadata(MDNode *Annotation) {
5054 Check(isa<MDTuple>(Annotation), "annotation must be a tuple");
5055 Check(Annotation->getNumOperands() >= 1,
5056 "annotation must have at least one operand");
5057 for (const MDOperand &Op : Annotation->operands()) {
5058 bool TupleOfStrings =
5059 isa<MDTuple>(Op.get()) &&
5060 all_of(cast<MDTuple>(Op)->operands(), [](auto &Annotation) {
5061 return isa<MDString>(Annotation.get());
5062 });
5063 Check(isa<MDString>(Op.get()) || TupleOfStrings,
5064 "operands must be a string or a tuple of strings");
5065 }
5066}
5067
5068void Verifier::visitAliasScopeMetadata(const MDNode *MD) {
5069 unsigned NumOps = MD->getNumOperands();
5070 Check(NumOps >= 2 && NumOps <= 3, "scope must have two or three operands",
5071 MD);
5072 Check(MD->getOperand(0).get() == MD || isa<MDString>(MD->getOperand(0)),
5073 "first scope operand must be self-referential or string", MD);
5074 if (NumOps == 3)
5075 Check(isa<MDString>(MD->getOperand(2)),
5076 "third scope operand must be string (if used)", MD);
5077
5078 MDNode *Domain = dyn_cast<MDNode>(MD->getOperand(1));
5079 Check(Domain != nullptr, "second scope operand must be MDNode", MD);
5080
5081 unsigned NumDomainOps = Domain->getNumOperands();
5082 Check(NumDomainOps >= 1 && NumDomainOps <= 2,
5083 "domain must have one or two operands", Domain);
5084 Check(Domain->getOperand(0).get() == Domain ||
5085 isa<MDString>(Domain->getOperand(0)),
5086 "first domain operand must be self-referential or string", Domain);
5087 if (NumDomainOps == 2)
5088 Check(isa<MDString>(Domain->getOperand(1)),
5089 "second domain operand must be string (if used)", Domain);
5090}
5091
5092void Verifier::visitAliasScopeListMetadata(const MDNode *MD) {
5093 for (const MDOperand &Op : MD->operands()) {
5094 const MDNode *OpMD = dyn_cast<MDNode>(Op);
5095 Check(OpMD != nullptr, "scope list must consist of MDNodes", MD);
5096 visitAliasScopeMetadata(OpMD);
5097 }
5098}
5099
5100void Verifier::visitAccessGroupMetadata(const MDNode *MD) {
5101 auto IsValidAccessScope = [](const MDNode *MD) {
5102 return MD->getNumOperands() == 0 && MD->isDistinct();
5103 };
5104
5105 // It must be either an access scope itself...
5106 if (IsValidAccessScope(MD))
5107 return;
5108
5109 // ...or a list of access scopes.
5110 for (const MDOperand &Op : MD->operands()) {
5111 const MDNode *OpMD = dyn_cast<MDNode>(Op);
5112 Check(OpMD != nullptr, "Access scope list must consist of MDNodes", MD);
5113 Check(IsValidAccessScope(OpMD),
5114 "Access scope list contains invalid access scope", MD);
5115 }
5116}
5117
5118/// verifyInstruction - Verify that an instruction is well formed.
5119///
5120void Verifier::visitInstruction(Instruction &I) {
5121 BasicBlock *BB = I.getParent();
5122 Check(BB, "Instruction not embedded in basic block!", &I);
5123
5124 if (!isa<PHINode>(I)) { // Check that non-phi nodes are not self referential
5125 for (User *U : I.users()) {
5126 Check(U != (User *)&I || !DT.isReachableFromEntry(BB),
5127 "Only PHI nodes may reference their own value!", &I);
5128 }
5129 }
5130
5131 // Check that void typed values don't have names
5132 Check(!I.getType()->isVoidTy() || !I.hasName(),
5133 "Instruction has a name, but provides a void value!", &I);
5134
5135 // Check that the return value of the instruction is either void or a legal
5136 // value type.
5137 Check(I.getType()->isVoidTy() || I.getType()->isFirstClassType(),
5138 "Instruction returns a non-scalar type!", &I);
5139
5140 // Check that the instruction doesn't produce metadata. Calls are already
5141 // checked against the callee type.
5142 Check(!I.getType()->isMetadataTy() || isa<CallInst>(I) || isa<InvokeInst>(I),
5143 "Invalid use of metadata!", &I);
5144
5145 // Check that all uses of the instruction, if they are instructions
5146 // themselves, actually have parent basic blocks. If the use is not an
5147 // instruction, it is an error!
5148 for (Use &U : I.uses()) {
5149 if (Instruction *Used = dyn_cast<Instruction>(U.getUser()))
5150 Check(Used->getParent() != nullptr,
5151 "Instruction referencing"
5152 " instruction not embedded in a basic block!",
5153 &I, Used);
5154 else {
5155 CheckFailed("Use of instruction is not an instruction!", U);
5156 return;
5157 }
5158 }
5159
5160 // Get a pointer to the call base of the instruction if it is some form of
5161 // call.
5162 const CallBase *CBI = dyn_cast<CallBase>(&I);
5163
5164 for (unsigned i = 0, e = I.getNumOperands(); i != e; ++i) {
5165 Check(I.getOperand(i) != nullptr, "Instruction has null operand!", &I);
5166
5167 // Check to make sure that only first-class-values are operands to
5168 // instructions.
5169 if (!I.getOperand(i)->getType()->isFirstClassType()) {
5170 Check(false, "Instruction operands must be first-class values!", &I);
5171 }
5172
5173 if (Function *F = dyn_cast<Function>(I.getOperand(i))) {
5174 // This code checks whether the function is used as the operand of a
5175 // clang_arc_attachedcall operand bundle.
5176 auto IsAttachedCallOperand = [](Function *F, const CallBase *CBI,
5177 int Idx) {
5178 return CBI && CBI->isOperandBundleOfType(
5180 };
5181
5182 // Check to make sure that the "address of" an intrinsic function is never
5183 // taken. Ignore cases where the address of the intrinsic function is used
5184 // as the argument of operand bundle "clang.arc.attachedcall" as those
5185 // cases are handled in verifyAttachedCallBundle.
5186 Check((!F->isIntrinsic() ||
5187 (CBI && &CBI->getCalledOperandUse() == &I.getOperandUse(i)) ||
5188 IsAttachedCallOperand(F, CBI, i)),
5189 "Cannot take the address of an intrinsic!", &I);
5190 Check(!F->isIntrinsic() || isa<CallInst>(I) ||
5191 F->getIntrinsicID() == Intrinsic::donothing ||
5192 F->getIntrinsicID() == Intrinsic::seh_try_begin ||
5193 F->getIntrinsicID() == Intrinsic::seh_try_end ||
5194 F->getIntrinsicID() == Intrinsic::seh_scope_begin ||
5195 F->getIntrinsicID() == Intrinsic::seh_scope_end ||
5196 F->getIntrinsicID() == Intrinsic::coro_resume ||
5197 F->getIntrinsicID() == Intrinsic::coro_destroy ||
5198 F->getIntrinsicID() == Intrinsic::coro_await_suspend_void ||
5199 F->getIntrinsicID() == Intrinsic::coro_await_suspend_bool ||
5200 F->getIntrinsicID() == Intrinsic::coro_await_suspend_handle ||
5201 F->getIntrinsicID() ==
5202 Intrinsic::experimental_patchpoint_void ||
5203 F->getIntrinsicID() == Intrinsic::experimental_patchpoint ||
5204 F->getIntrinsicID() == Intrinsic::fake_use ||
5205 F->getIntrinsicID() == Intrinsic::experimental_gc_statepoint ||
5206 F->getIntrinsicID() == Intrinsic::wasm_rethrow ||
5207 IsAttachedCallOperand(F, CBI, i),
5208 "Cannot invoke an intrinsic other than donothing, patchpoint, "
5209 "statepoint, coro_resume, coro_destroy or clang.arc.attachedcall",
5210 &I);
5211 Check(F->getParent() == &M, "Referencing function in another module!", &I,
5212 &M, F, F->getParent());
5213 } else if (BasicBlock *OpBB = dyn_cast<BasicBlock>(I.getOperand(i))) {
5214 Check(OpBB->getParent() == BB->getParent(),
5215 "Referring to a basic block in another function!", &I);
5216 } else if (Argument *OpArg = dyn_cast<Argument>(I.getOperand(i))) {
5217 Check(OpArg->getParent() == BB->getParent(),
5218 "Referring to an argument in another function!", &I);
5219 } else if (GlobalValue *GV = dyn_cast<GlobalValue>(I.getOperand(i))) {
5220 Check(GV->getParent() == &M, "Referencing global in another module!", &I,
5221 &M, GV, GV->getParent());
5222 } else if (Instruction *OpInst = dyn_cast<Instruction>(I.getOperand(i))) {
5223 Check(OpInst->getFunction() == BB->getParent(),
5224 "Referring to an instruction in another function!", &I);
5225 verifyDominatesUse(I, i);
5226 } else if (isa<InlineAsm>(I.getOperand(i))) {
5227 Check(CBI && &CBI->getCalledOperandUse() == &I.getOperandUse(i),
5228 "Cannot take the address of an inline asm!", &I);
5229 } else if (auto *CPA = dyn_cast<ConstantPtrAuth>(I.getOperand(i))) {
5230 visitConstantExprsRecursively(CPA);
5231 } else if (ConstantExpr *CE = dyn_cast<ConstantExpr>(I.getOperand(i))) {
5232 if (CE->getType()->isPtrOrPtrVectorTy()) {
5233 // If we have a ConstantExpr pointer, we need to see if it came from an
5234 // illegal bitcast.
5235 visitConstantExprsRecursively(CE);
5236 }
5237 }
5238 }
5239
5240 if (MDNode *MD = I.getMetadata(LLVMContext::MD_fpmath)) {
5241 Check(I.getType()->isFPOrFPVectorTy(),
5242 "fpmath requires a floating point result!", &I);
5243 Check(MD->getNumOperands() == 1, "fpmath takes one operand!", &I);
5244 if (ConstantFP *CFP0 =
5245 mdconst::dyn_extract_or_null<ConstantFP>(MD->getOperand(0))) {
5246 const APFloat &Accuracy = CFP0->getValueAPF();
5247 Check(&Accuracy.getSemantics() == &APFloat::IEEEsingle(),
5248 "fpmath accuracy must have float type", &I);
5249 Check(Accuracy.isFiniteNonZero() && !Accuracy.isNegative(),
5250 "fpmath accuracy not a positive number!", &I);
5251 } else {
5252 Check(false, "invalid fpmath accuracy!", &I);
5253 }
5254 }
5255
5256 if (MDNode *Range = I.getMetadata(LLVMContext::MD_range)) {
5257 Check(isa<LoadInst>(I) || isa<CallInst>(I) || isa<InvokeInst>(I),
5258 "Ranges are only for loads, calls and invokes!", &I);
5259 visitRangeMetadata(I, Range, I.getType());
5260 }
5261
5262 if (MDNode *Range = I.getMetadata(LLVMContext::MD_noalias_addrspace)) {
5263 Check(isa<LoadInst>(I) || isa<StoreInst>(I) || isa<AtomicRMWInst>(I) ||
5264 isa<AtomicCmpXchgInst>(I) || isa<CallInst>(I),
5265 "noalias.addrspace are only for memory operations!", &I);
5266 visitNoaliasAddrspaceMetadata(I, Range, I.getType());
5267 }
5268
5269 if (I.hasMetadata(LLVMContext::MD_invariant_group)) {
5270 Check(isa<LoadInst>(I) || isa<StoreInst>(I),
5271 "invariant.group metadata is only for loads and stores", &I);
5272 }
5273
5274 if (MDNode *MD = I.getMetadata(LLVMContext::MD_nonnull)) {
5275 Check(I.getType()->isPointerTy(), "nonnull applies only to pointer types",
5276 &I);
5277 Check(isa<LoadInst>(I),
5278 "nonnull applies only to load instructions, use attributes"
5279 " for calls or invokes",
5280 &I);
5281 Check(MD->getNumOperands() == 0, "nonnull metadata must be empty", &I);
5282 }
5283
5284 if (MDNode *MD = I.getMetadata(LLVMContext::MD_dereferenceable))
5285 visitDereferenceableMetadata(I, MD);
5286
5287 if (MDNode *MD = I.getMetadata(LLVMContext::MD_dereferenceable_or_null))
5288 visitDereferenceableMetadata(I, MD);
5289
5290 if (MDNode *TBAA = I.getMetadata(LLVMContext::MD_tbaa))
5291 TBAAVerifyHelper.visitTBAAMetadata(I, TBAA);
5292
5293 if (MDNode *MD = I.getMetadata(LLVMContext::MD_noalias))
5294 visitAliasScopeListMetadata(MD);
5295 if (MDNode *MD = I.getMetadata(LLVMContext::MD_alias_scope))
5296 visitAliasScopeListMetadata(MD);
5297
5298 if (MDNode *MD = I.getMetadata(LLVMContext::MD_access_group))
5299 visitAccessGroupMetadata(MD);
5300
5301 if (MDNode *AlignMD = I.getMetadata(LLVMContext::MD_align)) {
5302 Check(I.getType()->isPointerTy(), "align applies only to pointer types",
5303 &I);
5304 Check(isa<LoadInst>(I),
5305 "align applies only to load instructions, "
5306 "use attributes for calls or invokes",
5307 &I);
5308 Check(AlignMD->getNumOperands() == 1, "align takes one operand!", &I);
5309 ConstantInt *CI = mdconst::dyn_extract<ConstantInt>(AlignMD->getOperand(0));
5310 Check(CI && CI->getType()->isIntegerTy(64),
5311 "align metadata value must be an i64!", &I);
5312 uint64_t Align = CI->getZExtValue();
5313 Check(isPowerOf2_64(Align), "align metadata value must be a power of 2!",
5314 &I);
5316 "alignment is larger that implementation defined limit", &I);
5317 }
5318
5319 if (MDNode *MD = I.getMetadata(LLVMContext::MD_prof))
5320 visitProfMetadata(I, MD);
5321
5322 if (MDNode *MD = I.getMetadata(LLVMContext::MD_memprof))
5323 visitMemProfMetadata(I, MD);
5324
5325 if (MDNode *MD = I.getMetadata(LLVMContext::MD_callsite))
5326 visitCallsiteMetadata(I, MD);
5327
5328 if (MDNode *MD = I.getMetadata(LLVMContext::MD_DIAssignID))
5329 visitDIAssignIDMetadata(I, MD);
5330
5331 if (MDNode *MMRA = I.getMetadata(LLVMContext::MD_mmra))
5332 visitMMRAMetadata(I, MMRA);
5333
5334 if (MDNode *Annotation = I.getMetadata(LLVMContext::MD_annotation))
5335 visitAnnotationMetadata(Annotation);
5336
5337 if (MDNode *N = I.getDebugLoc().getAsMDNode()) {
5338 CheckDI(isa<DILocation>(N), "invalid !dbg metadata attachment", &I, N);
5339 visitMDNode(*N, AreDebugLocsAllowed::Yes);
5340 }
5341
5342 if (auto *DII = dyn_cast<DbgVariableIntrinsic>(&I)) {
5343 verifyFragmentExpression(*DII);
5344 verifyNotEntryValue(*DII);
5345 }
5346
5348 I.getAllMetadata(MDs);
5349 for (auto Attachment : MDs) {
5350 unsigned Kind = Attachment.first;
5351 auto AllowLocs =
5352 (Kind == LLVMContext::MD_dbg || Kind == LLVMContext::MD_loop)
5353 ? AreDebugLocsAllowed::Yes
5354 : AreDebugLocsAllowed::No;
5355 visitMDNode(*Attachment.second, AllowLocs);
5356 }
5357
5358 InstsInThisBlock.insert(&I);
5359}
5360
5361/// Allow intrinsics to be verified in different ways.
5362void Verifier::visitIntrinsicCall(Intrinsic::ID ID, CallBase &Call) {
5363 Function *IF = Call.getCalledFunction();
5364 Check(IF->isDeclaration(), "Intrinsic functions should never be defined!",
5365 IF);
5366
5367 // Verify that the intrinsic prototype lines up with what the .td files
5368 // describe.
5369 FunctionType *IFTy = IF->getFunctionType();
5370 bool IsVarArg = IFTy->isVarArg();
5371
5375
5376 // Walk the descriptors to extract overloaded types.
5381 "Intrinsic has incorrect return type!", IF);
5383 "Intrinsic has incorrect argument type!", IF);
5384
5385 // Verify if the intrinsic call matches the vararg property.
5386 if (IsVarArg)
5388 "Intrinsic was not defined with variable arguments!", IF);
5389 else
5391 "Callsite was not defined with variable arguments!", IF);
5392
5393 // All descriptors should be absorbed by now.
5394 Check(TableRef.empty(), "Intrinsic has too few arguments!", IF);
5395
5396 // Now that we have the intrinsic ID and the actual argument types (and we
5397 // know they are legal for the intrinsic!) get the intrinsic name through the
5398 // usual means. This allows us to verify the mangling of argument types into
5399 // the name.
5400 const std::string ExpectedName =
5401 Intrinsic::getName(ID, ArgTys, IF->getParent(), IFTy);
5402 Check(ExpectedName == IF->getName(),
5403 "Intrinsic name not mangled correctly for type arguments! "
5404 "Should be: " +
5405 ExpectedName,
5406 IF);
5407
5408 // If the intrinsic takes MDNode arguments, verify that they are either global
5409 // or are local to *this* function.
5410 for (Value *V : Call.args()) {
5411 if (auto *MD = dyn_cast<MetadataAsValue>(V))
5412 visitMetadataAsValue(*MD, Call.getCaller());
5413 if (auto *Const = dyn_cast<Constant>(V))
5414 Check(!Const->getType()->isX86_AMXTy(),
5415 "const x86_amx is not allowed in argument!");
5416 }
5417
5418 switch (ID) {
5419 default:
5420 break;
5421 case Intrinsic::assume: {
5422 for (auto &Elem : Call.bundle_op_infos()) {
5423 unsigned ArgCount = Elem.End - Elem.Begin;
5424 // Separate storage assumptions are special insofar as they're the only
5425 // operand bundles allowed on assumes that aren't parameter attributes.
5426 if (Elem.Tag->getKey() == "separate_storage") {
5427 Check(ArgCount == 2,
5428 "separate_storage assumptions should have 2 arguments", Call);
5429 Check(Call.getOperand(Elem.Begin)->getType()->isPointerTy() &&
5430 Call.getOperand(Elem.Begin + 1)->getType()->isPointerTy(),
5431 "arguments to separate_storage assumptions should be pointers",
5432 Call);
5433 return;
5434 }
5435 Check(Elem.Tag->getKey() == "ignore" ||
5436 Attribute::isExistingAttribute(Elem.Tag->getKey()),
5437 "tags must be valid attribute names", Call);
5439 Attribute::getAttrKindFromName(Elem.Tag->getKey());
5440 if (Kind == Attribute::Alignment) {
5441 Check(ArgCount <= 3 && ArgCount >= 2,
5442 "alignment assumptions should have 2 or 3 arguments", Call);
5443 Check(Call.getOperand(Elem.Begin)->getType()->isPointerTy(),
5444 "first argument should be a pointer", Call);
5445 Check(Call.getOperand(Elem.Begin + 1)->getType()->isIntegerTy(),
5446 "second argument should be an integer", Call);
5447 if (ArgCount == 3)
5448 Check(Call.getOperand(Elem.Begin + 2)->getType()->isIntegerTy(),
5449 "third argument should be an integer if present", Call);
5450 return;
5451 }
5452 Check(ArgCount <= 2, "too many arguments", Call);
5453 if (Kind == Attribute::None)
5454 break;
5455 if (Attribute::isIntAttrKind(Kind)) {
5456 Check(ArgCount == 2, "this attribute should have 2 arguments", Call);
5457 Check(isa<ConstantInt>(Call.getOperand(Elem.Begin + 1)),
5458 "the second argument should be a constant integral value", Call);
5459 } else if (Attribute::canUseAsParamAttr(Kind)) {
5460 Check((ArgCount) == 1, "this attribute should have one argument", Call);
5461 } else if (Attribute::canUseAsFnAttr(Kind)) {
5462 Check((ArgCount) == 0, "this attribute has no argument", Call);
5463 }
5464 }
5465 break;
5466 }
5467 case Intrinsic::ucmp:
5468 case Intrinsic::scmp: {
5469 Type *SrcTy = Call.getOperand(0)->getType();
5470 Type *DestTy = Call.getType();
5471
5472 Check(DestTy->getScalarSizeInBits() >= 2,
5473 "result type must be at least 2 bits wide", Call);
5474
5475 bool IsDestTypeVector = DestTy->isVectorTy();
5476 Check(SrcTy->isVectorTy() == IsDestTypeVector,
5477 "ucmp/scmp argument and result types must both be either vector or "
5478 "scalar types",
5479 Call);
5480 if (IsDestTypeVector) {
5481 auto SrcVecLen = cast<VectorType>(SrcTy)->getElementCount();
5482 auto DestVecLen = cast<VectorType>(DestTy)->getElementCount();
5483 Check(SrcVecLen == DestVecLen,
5484 "return type and arguments must have the same number of "
5485 "elements",
5486 Call);
5487 }
5488 break;
5489 }
5490 case Intrinsic::coro_id: {
5491 auto *InfoArg = Call.getArgOperand(3)->stripPointerCasts();
5492 if (isa<ConstantPointerNull>(InfoArg))
5493 break;
5494 auto *GV = dyn_cast<GlobalVariable>(InfoArg);
5495 Check(GV && GV->isConstant() && GV->hasDefinitiveInitializer(),
5496 "info argument of llvm.coro.id must refer to an initialized "
5497 "constant");
5498 Constant *Init = GV->getInitializer();
5499 Check(isa<ConstantStruct>(Init) || isa<ConstantArray>(Init),
5500 "info argument of llvm.coro.id must refer to either a struct or "
5501 "an array");
5502 break;
5503 }
5504 case Intrinsic::is_fpclass: {
5505 const ConstantInt *TestMask = cast<ConstantInt>(Call.getOperand(1));
5506 Check((TestMask->getZExtValue() & ~static_cast<unsigned>(fcAllFlags)) == 0,
5507 "unsupported bits for llvm.is.fpclass test mask");
5508 break;
5509 }
5510 case Intrinsic::fptrunc_round: {
5511 // Check the rounding mode
5512 Metadata *MD = nullptr;
5513 auto *MAV = dyn_cast<MetadataAsValue>(Call.getOperand(1));
5514 if (MAV)
5515 MD = MAV->getMetadata();
5516
5517 Check(MD != nullptr, "missing rounding mode argument", Call);
5518
5519 Check(isa<MDString>(MD),
5520 ("invalid value for llvm.fptrunc.round metadata operand"
5521 " (the operand should be a string)"),
5522 MD);
5523
5524 std::optional<RoundingMode> RoundMode =
5525 convertStrToRoundingMode(cast<MDString>(MD)->getString());
5526 Check(RoundMode && *RoundMode != RoundingMode::Dynamic,
5527 "unsupported rounding mode argument", Call);
5528 break;
5529 }
5530#define BEGIN_REGISTER_VP_INTRINSIC(VPID, ...) case Intrinsic::VPID:
5531#include "llvm/IR/VPIntrinsics.def"
5532#undef BEGIN_REGISTER_VP_INTRINSIC
5533 visitVPIntrinsic(cast<VPIntrinsic>(Call));
5534 break;
5535#define INSTRUCTION(NAME, NARGS, ROUND_MODE, INTRINSIC) \
5536 case Intrinsic::INTRINSIC:
5537#include "llvm/IR/ConstrainedOps.def"
5538#undef INSTRUCTION
5539 visitConstrainedFPIntrinsic(cast<ConstrainedFPIntrinsic>(Call));
5540 break;
5541 case Intrinsic::dbg_declare: // llvm.dbg.declare
5542 Check(isa<MetadataAsValue>(Call.getArgOperand(0)),
5543 "invalid llvm.dbg.declare intrinsic call 1", Call);
5544 visitDbgIntrinsic("declare", cast<DbgVariableIntrinsic>(Call));
5545 break;
5546 case Intrinsic::dbg_value: // llvm.dbg.value
5547 visitDbgIntrinsic("value", cast<DbgVariableIntrinsic>(Call));
5548 break;
5549 case Intrinsic::dbg_assign: // llvm.dbg.assign
5550 visitDbgIntrinsic("assign", cast<DbgVariableIntrinsic>(Call));
5551 break;
5552 case Intrinsic::dbg_label: // llvm.dbg.label
5553 visitDbgLabelIntrinsic("label", cast<DbgLabelInst>(Call));
5554 break;
5555 case Intrinsic::memcpy:
5556 case Intrinsic::memcpy_inline:
5557 case Intrinsic::memmove:
5558 case Intrinsic::memset:
5559 case Intrinsic::memset_inline:
5560 case Intrinsic::experimental_memset_pattern: {
5561 break;
5562 }
5563 case Intrinsic::memcpy_element_unordered_atomic:
5564 case Intrinsic::memmove_element_unordered_atomic:
5565 case Intrinsic::memset_element_unordered_atomic: {
5566 const auto *AMI = cast<AtomicMemIntrinsic>(&Call);
5567
5568 ConstantInt *ElementSizeCI =
5569 cast<ConstantInt>(AMI->getRawElementSizeInBytes());
5570 const APInt &ElementSizeVal = ElementSizeCI->getValue();
5571 Check(ElementSizeVal.isPowerOf2(),
5572 "element size of the element-wise atomic memory intrinsic "
5573 "must be a power of 2",
5574 Call);
5575
5576 auto IsValidAlignment = [&](MaybeAlign Alignment) {
5577 return Alignment && ElementSizeVal.ule(Alignment->value());
5578 };
5579 Check(IsValidAlignment(AMI->getDestAlign()),
5580 "incorrect alignment of the destination argument", Call);
5581 if (const auto *AMT = dyn_cast<AtomicMemTransferInst>(AMI)) {
5582 Check(IsValidAlignment(AMT->getSourceAlign()),
5583 "incorrect alignment of the source argument", Call);
5584 }
5585 break;
5586 }
5587 case Intrinsic::call_preallocated_setup: {
5588 auto *NumArgs = dyn_cast<ConstantInt>(Call.getArgOperand(0));
5589 Check(NumArgs != nullptr,
5590 "llvm.call.preallocated.setup argument must be a constant");
5591 bool FoundCall = false;
5592 for (User *U : Call.users()) {
5593 auto *UseCall = dyn_cast<CallBase>(U);
5594 Check(UseCall != nullptr,
5595 "Uses of llvm.call.preallocated.setup must be calls");
5596 const Function *Fn = UseCall->getCalledFunction();
5597 if (Fn && Fn->getIntrinsicID() == Intrinsic::call_preallocated_arg) {
5598 auto *AllocArgIndex = dyn_cast<ConstantInt>(UseCall->getArgOperand(1));
5599 Check(AllocArgIndex != nullptr,
5600 "llvm.call.preallocated.alloc arg index must be a constant");
5601 auto AllocArgIndexInt = AllocArgIndex->getValue();
5602 Check(AllocArgIndexInt.sge(0) &&
5603 AllocArgIndexInt.slt(NumArgs->getValue()),
5604 "llvm.call.preallocated.alloc arg index must be between 0 and "
5605 "corresponding "
5606 "llvm.call.preallocated.setup's argument count");
5607 } else if (Fn && Fn->getIntrinsicID() ==
5608 Intrinsic::call_preallocated_teardown) {
5609 // nothing to do
5610 } else {
5611 Check(!FoundCall, "Can have at most one call corresponding to a "
5612 "llvm.call.preallocated.setup");
5613 FoundCall = true;
5614 size_t NumPreallocatedArgs = 0;
5615 for (unsigned i = 0; i < UseCall->arg_size(); i++) {
5616 if (UseCall->paramHasAttr(i, Attribute::Preallocated)) {
5617 ++NumPreallocatedArgs;
5618 }
5619 }
5620 Check(NumPreallocatedArgs != 0,
5621 "cannot use preallocated intrinsics on a call without "
5622 "preallocated arguments");
5623 Check(NumArgs->equalsInt(NumPreallocatedArgs),
5624 "llvm.call.preallocated.setup arg size must be equal to number "
5625 "of preallocated arguments "
5626 "at call site",
5627 Call, *UseCall);
5628 // getOperandBundle() cannot be called if more than one of the operand
5629 // bundle exists. There is already a check elsewhere for this, so skip
5630 // here if we see more than one.
5631 if (UseCall->countOperandBundlesOfType(LLVMContext::OB_preallocated) >
5632 1) {
5633 return;
5634 }
5635 auto PreallocatedBundle =
5636 UseCall->getOperandBundle(LLVMContext::OB_preallocated);
5637 Check(PreallocatedBundle,
5638 "Use of llvm.call.preallocated.setup outside intrinsics "
5639 "must be in \"preallocated\" operand bundle");
5640 Check(PreallocatedBundle->Inputs.front().get() == &Call,
5641 "preallocated bundle must have token from corresponding "
5642 "llvm.call.preallocated.setup");
5643 }
5644 }
5645 break;
5646 }
5647 case Intrinsic::call_preallocated_arg: {
5648 auto *Token = dyn_cast<CallBase>(Call.getArgOperand(0));
5649 Check(Token && Token->getCalledFunction()->getIntrinsicID() ==
5650 Intrinsic::call_preallocated_setup,
5651 "llvm.call.preallocated.arg token argument must be a "
5652 "llvm.call.preallocated.setup");
5653 Check(Call.hasFnAttr(Attribute::Preallocated),
5654 "llvm.call.preallocated.arg must be called with a \"preallocated\" "
5655 "call site attribute");
5656 break;
5657 }
5658 case Intrinsic::call_preallocated_teardown: {
5659 auto *Token = dyn_cast<CallBase>(Call.getArgOperand(0));
5660 Check(Token && Token->getCalledFunction()->getIntrinsicID() ==
5661 Intrinsic::call_preallocated_setup,
5662 "llvm.call.preallocated.teardown token argument must be a "
5663 "llvm.call.preallocated.setup");
5664 break;
5665 }
5666 case Intrinsic::gcroot:
5667 case Intrinsic::gcwrite:
5668 case Intrinsic::gcread:
5669 if (ID == Intrinsic::gcroot) {
5670 AllocaInst *AI =
5671 dyn_cast<AllocaInst>(Call.getArgOperand(0)->stripPointerCasts());
5672 Check(AI, "llvm.gcroot parameter #1 must be an alloca.", Call);
5673 Check(isa<Constant>(Call.getArgOperand(1)),
5674 "llvm.gcroot parameter #2 must be a constant.", Call);
5675 if (!AI->getAllocatedType()->isPointerTy()) {
5676 Check(!isa<ConstantPointerNull>(Call.getArgOperand(1)),
5677 "llvm.gcroot parameter #1 must either be a pointer alloca, "
5678 "or argument #2 must be a non-null constant.",
5679 Call);
5680 }
5681 }
5682
5683 Check(Call.getParent()->getParent()->hasGC(),
5684 "Enclosing function does not use GC.", Call);
5685 break;
5686 case Intrinsic::init_trampoline:
5687 Check(isa<Function>(Call.getArgOperand(1)->stripPointerCasts()),
5688 "llvm.init_trampoline parameter #2 must resolve to a function.",
5689 Call);
5690 break;
5691 case Intrinsic::prefetch:
5692 Check(cast<ConstantInt>(Call.getArgOperand(1))->getZExtValue() < 2,
5693 "rw argument to llvm.prefetch must be 0-1", Call);
5694 Check(cast<ConstantInt>(Call.getArgOperand(2))->getZExtValue() < 4,
5695 "locality argument to llvm.prefetch must be 0-3", Call);
5696 Check(cast<ConstantInt>(Call.getArgOperand(3))->getZExtValue() < 2,
5697 "cache type argument to llvm.prefetch must be 0-1", Call);
5698 break;
5699 case Intrinsic::stackprotector:
5700 Check(isa<AllocaInst>(Call.getArgOperand(1)->stripPointerCasts()),
5701 "llvm.stackprotector parameter #2 must resolve to an alloca.", Call);
5702 break;
5703 case Intrinsic::localescape: {
5704 BasicBlock *BB = Call.getParent();
5705 Check(BB->isEntryBlock(), "llvm.localescape used outside of entry block",
5706 Call);
5707 Check(!SawFrameEscape, "multiple calls to llvm.localescape in one function",
5708 Call);
5709 for (Value *Arg : Call.args()) {
5710 if (isa<ConstantPointerNull>(Arg))
5711 continue; // Null values are allowed as placeholders.
5712 auto *AI = dyn_cast<AllocaInst>(Arg->stripPointerCasts());
5713 Check(AI && AI->isStaticAlloca(),
5714 "llvm.localescape only accepts static allocas", Call);
5715 }
5716 FrameEscapeInfo[BB->getParent()].first = Call.arg_size();
5717 SawFrameEscape = true;
5718 break;
5719 }
5720 case Intrinsic::localrecover: {
5721 Value *FnArg = Call.getArgOperand(0)->stripPointerCasts();
5722 Function *Fn = dyn_cast<Function>(FnArg);
5723 Check(Fn && !Fn->isDeclaration(),
5724 "llvm.localrecover first "
5725 "argument must be function defined in this module",
5726 Call);
5727 auto *IdxArg = cast<ConstantInt>(Call.getArgOperand(2));
5728 auto &Entry = FrameEscapeInfo[Fn];
5729 Entry.second = unsigned(
5730 std::max(uint64_t(Entry.second), IdxArg->getLimitedValue(~0U) + 1));
5731 break;
5732 }
5733
5734 case Intrinsic::experimental_gc_statepoint:
5735 if (auto *CI = dyn_cast<CallInst>(&Call))
5736 Check(!CI->isInlineAsm(),
5737 "gc.statepoint support for inline assembly unimplemented", CI);
5738 Check(Call.getParent()->getParent()->hasGC(),
5739 "Enclosing function does not use GC.", Call);
5740
5741 verifyStatepoint(Call);
5742 break;
5743 case Intrinsic::experimental_gc_result: {
5744 Check(Call.getParent()->getParent()->hasGC(),
5745 "Enclosing function does not use GC.", Call);
5746
5747 auto *Statepoint = Call.getArgOperand(0);
5748 if (isa<UndefValue>(Statepoint))
5749 break;
5750
5751 // Are we tied to a statepoint properly?
5752 const auto *StatepointCall = dyn_cast<CallBase>(Statepoint);
5753 const Function *StatepointFn =
5754 StatepointCall ? StatepointCall->getCalledFunction() : nullptr;
5755 Check(StatepointFn && StatepointFn->isDeclaration() &&
5756 StatepointFn->getIntrinsicID() ==
5757 Intrinsic::experimental_gc_statepoint,
5758 "gc.result operand #1 must be from a statepoint", Call,
5759 Call.getArgOperand(0));
5760
5761 // Check that result type matches wrapped callee.
5762 auto *TargetFuncType =
5763 cast<FunctionType>(StatepointCall->getParamElementType(2));
5764 Check(Call.getType() == TargetFuncType->getReturnType(),
5765 "gc.result result type does not match wrapped callee", Call);
5766 break;
5767 }
5768 case Intrinsic::experimental_gc_relocate: {
5769 Check(Call.arg_size() == 3, "wrong number of arguments", Call);
5770
5771 Check(isa<PointerType>(Call.getType()->getScalarType()),
5772 "gc.relocate must return a pointer or a vector of pointers", Call);
5773
5774 // Check that this relocate is correctly tied to the statepoint
5775
5776 // This is case for relocate on the unwinding path of an invoke statepoint
5777 if (LandingPadInst *LandingPad =
5778 dyn_cast<LandingPadInst>(Call.getArgOperand(0))) {
5779
5780 const BasicBlock *InvokeBB =
5781 LandingPad->getParent()->getUniquePredecessor();
5782
5783 // Landingpad relocates should have only one predecessor with invoke
5784 // statepoint terminator
5785 Check(InvokeBB, "safepoints should have unique landingpads",
5786 LandingPad->getParent());
5787 Check(InvokeBB->getTerminator(), "safepoint block should be well formed",
5788 InvokeBB);
5789 Check(isa<GCStatepointInst>(InvokeBB->getTerminator()),
5790 "gc relocate should be linked to a statepoint", InvokeBB);
5791 } else {
5792 // In all other cases relocate should be tied to the statepoint directly.
5793 // This covers relocates on a normal return path of invoke statepoint and
5794 // relocates of a call statepoint.
5795 auto *Token = Call.getArgOperand(0);
5796 Check(isa<GCStatepointInst>(Token) || isa<UndefValue>(Token),
5797 "gc relocate is incorrectly tied to the statepoint", Call, Token);
5798 }
5799
5800 // Verify rest of the relocate arguments.
5801 const Value &StatepointCall = *cast<GCRelocateInst>(Call).getStatepoint();
5802
5803 // Both the base and derived must be piped through the safepoint.
5804 Value *Base = Call.getArgOperand(1);
5805 Check(isa<ConstantInt>(Base),
5806 "gc.relocate operand #2 must be integer offset", Call);
5807
5808 Value *Derived = Call.getArgOperand(2);
5809 Check(isa<ConstantInt>(Derived),
5810 "gc.relocate operand #3 must be integer offset", Call);
5811
5812 const uint64_t BaseIndex = cast<ConstantInt>(Base)->getZExtValue();
5813 const uint64_t DerivedIndex = cast<ConstantInt>(Derived)->getZExtValue();
5814
5815 // Check the bounds
5816 if (isa<UndefValue>(StatepointCall))
5817 break;
5818 if (auto Opt = cast<GCStatepointInst>(StatepointCall)
5819 .getOperandBundle(LLVMContext::OB_gc_live)) {
5820 Check(BaseIndex < Opt->Inputs.size(),
5821 "gc.relocate: statepoint base index out of bounds", Call);
5822 Check(DerivedIndex < Opt->Inputs.size(),
5823 "gc.relocate: statepoint derived index out of bounds", Call);
5824 }
5825
5826 // Relocated value must be either a pointer type or vector-of-pointer type,
5827 // but gc_relocate does not need to return the same pointer type as the
5828 // relocated pointer. It can be casted to the correct type later if it's
5829 // desired. However, they must have the same address space and 'vectorness'
5830 GCRelocateInst &Relocate = cast<GCRelocateInst>(Call);
5831 auto *ResultType = Call.getType();
5832 auto *DerivedType = Relocate.getDerivedPtr()->getType();
5833 auto *BaseType = Relocate.getBasePtr()->getType();
5834
5835 Check(BaseType->isPtrOrPtrVectorTy(),
5836 "gc.relocate: relocated value must be a pointer", Call);
5837 Check(DerivedType->isPtrOrPtrVectorTy(),
5838 "gc.relocate: relocated value must be a pointer", Call);
5839
5840 Check(ResultType->isVectorTy() == DerivedType->isVectorTy(),
5841 "gc.relocate: vector relocates to vector and pointer to pointer",
5842 Call);
5843 Check(
5844 ResultType->getPointerAddressSpace() ==
5845 DerivedType->getPointerAddressSpace(),
5846 "gc.relocate: relocating a pointer shouldn't change its address space",
5847 Call);
5848
5849 auto GC = llvm::getGCStrategy(Relocate.getFunction()->getGC());
5850 Check(GC, "gc.relocate: calling function must have GCStrategy",
5851 Call.getFunction());
5852 if (GC) {
5853 auto isGCPtr = [&GC](Type *PTy) {
5854 return GC->isGCManagedPointer(PTy->getScalarType()).value_or(true);
5855 };
5856 Check(isGCPtr(ResultType), "gc.relocate: must return gc pointer", Call);
5857 Check(isGCPtr(BaseType),
5858 "gc.relocate: relocated value must be a gc pointer", Call);
5859 Check(isGCPtr(DerivedType),
5860 "gc.relocate: relocated value must be a gc pointer", Call);
5861 }
5862 break;
5863 }
5864 case Intrinsic::experimental_patchpoint: {
5865 if (Call.getCallingConv() == CallingConv::AnyReg) {
5866 Check(Call.getType()->isSingleValueType(),
5867 "patchpoint: invalid return type used with anyregcc", Call);
5868 }
5869 break;
5870 }
5871 case Intrinsic::eh_exceptioncode:
5872 case Intrinsic::eh_exceptionpointer: {
5873 Check(isa<CatchPadInst>(Call.getArgOperand(0)),
5874 "eh.exceptionpointer argument must be a catchpad", Call);
5875 break;
5876 }
5877 case Intrinsic::get_active_lane_mask: {
5878 Check(Call.getType()->isVectorTy(),
5879 "get_active_lane_mask: must return a "
5880 "vector",
5881 Call);
5882 auto *ElemTy = Call.getType()->getScalarType();
5883 Check(ElemTy->isIntegerTy(1),
5884 "get_active_lane_mask: element type is not "
5885 "i1",
5886 Call);
5887 break;
5888 }
5889 case Intrinsic::experimental_get_vector_length: {
5890 ConstantInt *VF = cast<ConstantInt>(Call.getArgOperand(1));
5891 Check(!VF->isNegative() && !VF->isZero(),
5892 "get_vector_length: VF must be positive", Call);
5893 break;
5894 }
5895 case Intrinsic::masked_load: {
5896 Check(Call.getType()->isVectorTy(), "masked_load: must return a vector",
5897 Call);
5898
5899 ConstantInt *Alignment = cast<ConstantInt>(Call.getArgOperand(1));
5900 Value *Mask = Call.getArgOperand(2);
5901 Value *PassThru = Call.getArgOperand(3);
5902 Check(Mask->getType()->isVectorTy(), "masked_load: mask must be vector",
5903 Call);
5904 Check(Alignment->getValue().isPowerOf2(),
5905 "masked_load: alignment must be a power of 2", Call);
5906 Check(PassThru->getType() == Call.getType(),
5907 "masked_load: pass through and return type must match", Call);
5908 Check(cast<VectorType>(Mask->getType())->getElementCount() ==
5909 cast<VectorType>(Call.getType())->getElementCount(),
5910 "masked_load: vector mask must be same length as return", Call);
5911 break;
5912 }
5913 case Intrinsic::masked_store: {
5914 Value *Val = Call.getArgOperand(0);
5915 ConstantInt *Alignment = cast<ConstantInt>(Call.getArgOperand(2));
5916 Value *Mask = Call.getArgOperand(3);
5917 Check(Mask->getType()->isVectorTy(), "masked_store: mask must be vector",
5918 Call);
5919 Check(Alignment->getValue().isPowerOf2(),
5920 "masked_store: alignment must be a power of 2", Call);
5921 Check(cast<VectorType>(Mask->getType())->getElementCount() ==
5922 cast<VectorType>(Val->getType())->getElementCount(),
5923 "masked_store: vector mask must be same length as value", Call);
5924 break;
5925 }
5926
5927 case Intrinsic::masked_gather: {
5928 const APInt &Alignment =
5929 cast<ConstantInt>(Call.getArgOperand(1))->getValue();
5930 Check(Alignment.isZero() || Alignment.isPowerOf2(),
5931 "masked_gather: alignment must be 0 or a power of 2", Call);
5932 break;
5933 }
5934 case Intrinsic::masked_scatter: {
5935 const APInt &Alignment =
5936 cast<ConstantInt>(Call.getArgOperand(2))->getValue();
5937 Check(Alignment.isZero() || Alignment.isPowerOf2(),
5938 "masked_scatter: alignment must be 0 or a power of 2", Call);
5939 break;
5940 }
5941
5942 case Intrinsic::experimental_guard: {
5943 Check(isa<CallInst>(Call), "experimental_guard cannot be invoked", Call);
5944 Check(Call.countOperandBundlesOfType(LLVMContext::OB_deopt) == 1,
5945 "experimental_guard must have exactly one "
5946 "\"deopt\" operand bundle");
5947 break;
5948 }
5949
5950 case Intrinsic::experimental_deoptimize: {
5951 Check(isa<CallInst>(Call), "experimental_deoptimize cannot be invoked",
5952 Call);
5953 Check(Call.countOperandBundlesOfType(LLVMContext::OB_deopt) == 1,
5954 "experimental_deoptimize must have exactly one "
5955 "\"deopt\" operand bundle");
5956 Check(Call.getType() == Call.getFunction()->getReturnType(),
5957 "experimental_deoptimize return type must match caller return type");
5958
5959 if (isa<CallInst>(Call)) {
5960 auto *RI = dyn_cast<ReturnInst>(Call.getNextNode());
5961 Check(RI,
5962 "calls to experimental_deoptimize must be followed by a return");
5963
5964 if (!Call.getType()->isVoidTy() && RI)
5965 Check(RI->getReturnValue() == &Call,
5966 "calls to experimental_deoptimize must be followed by a return "
5967 "of the value computed by experimental_deoptimize");
5968 }
5969
5970 break;
5971 }
5972 case Intrinsic::vastart: {
5973 Check(Call.getFunction()->isVarArg(),
5974 "va_start called in a non-varargs function");
5975 break;
5976 }
5977 case Intrinsic::vector_reduce_and:
5978 case Intrinsic::vector_reduce_or:
5979 case Intrinsic::vector_reduce_xor:
5980 case Intrinsic::vector_reduce_add:
5981 case Intrinsic::vector_reduce_mul:
5982 case Intrinsic::vector_reduce_smax:
5983 case Intrinsic::vector_reduce_smin:
5984 case Intrinsic::vector_reduce_umax:
5985 case Intrinsic::vector_reduce_umin: {
5986 Type *ArgTy = Call.getArgOperand(0)->getType();
5987 Check(ArgTy->isIntOrIntVectorTy() && ArgTy->isVectorTy(),
5988 "Intrinsic has incorrect argument type!");
5989 break;
5990 }
5991 case Intrinsic::vector_reduce_fmax:
5992 case Intrinsic::vector_reduce_fmin: {
5993 Type *ArgTy = Call.getArgOperand(0)->getType();
5994 Check(ArgTy->isFPOrFPVectorTy() && ArgTy->isVectorTy(),
5995 "Intrinsic has incorrect argument type!");
5996 break;
5997 }
5998 case Intrinsic::vector_reduce_fadd:
5999 case Intrinsic::vector_reduce_fmul: {
6000 // Unlike the other reductions, the first argument is a start value. The
6001 // second argument is the vector to be reduced.
6002 Type *ArgTy = Call.getArgOperand(1)->getType();
6003 Check(ArgTy->isFPOrFPVectorTy() && ArgTy->isVectorTy(),
6004 "Intrinsic has incorrect argument type!");
6005 break;
6006 }
6007 case Intrinsic::smul_fix:
6008 case Intrinsic::smul_fix_sat:
6009 case Intrinsic::umul_fix:
6010 case Intrinsic::umul_fix_sat:
6011 case Intrinsic::sdiv_fix:
6012 case Intrinsic::sdiv_fix_sat:
6013 case Intrinsic::udiv_fix:
6014 case Intrinsic::udiv_fix_sat: {
6015 Value *Op1 = Call.getArgOperand(0);
6016 Value *Op2 = Call.getArgOperand(1);
6018 "first operand of [us][mul|div]_fix[_sat] must be an int type or "
6019 "vector of ints");
6021 "second operand of [us][mul|div]_fix[_sat] must be an int type or "
6022 "vector of ints");
6023
6024 auto *Op3 = cast<ConstantInt>(Call.getArgOperand(2));
6025 Check(Op3->getType()->isIntegerTy(),
6026 "third operand of [us][mul|div]_fix[_sat] must be an int type");
6027 Check(Op3->getBitWidth() <= 32,
6028 "third operand of [us][mul|div]_fix[_sat] must fit within 32 bits");
6029
6030 if (ID == Intrinsic::smul_fix || ID == Intrinsic::smul_fix_sat ||
6031 ID == Intrinsic::sdiv_fix || ID == Intrinsic::sdiv_fix_sat) {
6032 Check(Op3->getZExtValue() < Op1->getType()->getScalarSizeInBits(),
6033 "the scale of s[mul|div]_fix[_sat] must be less than the width of "
6034 "the operands");
6035 } else {
6036 Check(Op3->getZExtValue() <= Op1->getType()->getScalarSizeInBits(),
6037 "the scale of u[mul|div]_fix[_sat] must be less than or equal "
6038 "to the width of the operands");
6039 }
6040 break;
6041 }
6042 case Intrinsic::lrint:
6043 case Intrinsic::llrint:
6044 case Intrinsic::lround:
6045 case Intrinsic::llround: {
6046 Type *ValTy = Call.getArgOperand(0)->getType();
6047 Type *ResultTy = Call.getType();
6048 auto *VTy = dyn_cast<VectorType>(ValTy);
6049 auto *RTy = dyn_cast<VectorType>(ResultTy);
6050 Check(ValTy->isFPOrFPVectorTy() && ResultTy->isIntOrIntVectorTy(),
6051 ExpectedName + ": argument must be floating-point or vector "
6052 "of floating-points, and result must be integer or "
6053 "vector of integers",
6054 &Call);
6055 Check(ValTy->isVectorTy() == ResultTy->isVectorTy(),
6056 ExpectedName + ": argument and result disagree on vector use", &Call);
6057 if (VTy) {
6058 Check(VTy->getElementCount() == RTy->getElementCount(),
6059 ExpectedName + ": argument must be same length as result", &Call);
6060 }
6061 break;
6062 }
6063 case Intrinsic::bswap: {
6064 Type *Ty = Call.getType();
6065 unsigned Size = Ty->getScalarSizeInBits();
6066 Check(Size % 16 == 0, "bswap must be an even number of bytes", &Call);
6067 break;
6068 }
6069 case Intrinsic::invariant_start: {
6070 ConstantInt *InvariantSize = dyn_cast<ConstantInt>(Call.getArgOperand(0));
6071 Check(InvariantSize &&
6072 (!InvariantSize->isNegative() || InvariantSize->isMinusOne()),
6073 "invariant_start parameter must be -1, 0 or a positive number",
6074 &Call);
6075 break;
6076 }
6077 case Intrinsic::matrix_multiply:
6078 case Intrinsic::matrix_transpose:
6079 case Intrinsic::matrix_column_major_load:
6080 case Intrinsic::matrix_column_major_store: {
6081 Function *IF = Call.getCalledFunction();
6082 ConstantInt *Stride = nullptr;
6083 ConstantInt *NumRows;
6084 ConstantInt *NumColumns;
6085 VectorType *ResultTy;
6086 Type *Op0ElemTy = nullptr;
6087 Type *Op1ElemTy = nullptr;
6088 switch (ID) {
6089 case Intrinsic::matrix_multiply: {
6090 NumRows = cast<ConstantInt>(Call.getArgOperand(2));
6091 ConstantInt *N = cast<ConstantInt>(Call.getArgOperand(3));
6092 NumColumns = cast<ConstantInt>(Call.getArgOperand(4));
6093 Check(cast<FixedVectorType>(Call.getArgOperand(0)->getType())
6094 ->getNumElements() ==
6095 NumRows->getZExtValue() * N->getZExtValue(),
6096 "First argument of a matrix operation does not match specified "
6097 "shape!");
6098 Check(cast<FixedVectorType>(Call.getArgOperand(1)->getType())
6099 ->getNumElements() ==
6100 N->getZExtValue() * NumColumns->getZExtValue(),
6101 "Second argument of a matrix operation does not match specified "
6102 "shape!");
6103
6104 ResultTy = cast<VectorType>(Call.getType());
6105 Op0ElemTy =
6106 cast<VectorType>(Call.getArgOperand(0)->getType())->getElementType();
6107 Op1ElemTy =
6108 cast<VectorType>(Call.getArgOperand(1)->getType())->getElementType();
6109 break;
6110 }
6111 case Intrinsic::matrix_transpose:
6112 NumRows = cast<ConstantInt>(Call.getArgOperand(1));
6113 NumColumns = cast<ConstantInt>(Call.getArgOperand(2));
6114 ResultTy = cast<VectorType>(Call.getType());
6115 Op0ElemTy =
6116 cast<VectorType>(Call.getArgOperand(0)->getType())->getElementType();
6117 break;
6118 case Intrinsic::matrix_column_major_load: {
6119 Stride = dyn_cast<ConstantInt>(Call.getArgOperand(1));
6120 NumRows = cast<ConstantInt>(Call.getArgOperand(3));
6121 NumColumns = cast<ConstantInt>(Call.getArgOperand(4));
6122 ResultTy = cast<VectorType>(Call.getType());
6123 break;
6124 }
6125 case Intrinsic::matrix_column_major_store: {
6126 Stride = dyn_cast<ConstantInt>(Call.getArgOperand(2));
6127 NumRows = cast<ConstantInt>(Call.getArgOperand(4));
6128 NumColumns = cast<ConstantInt>(Call.getArgOperand(5));
6129 ResultTy = cast<VectorType>(Call.getArgOperand(0)->getType());
6130 Op0ElemTy =
6131 cast<VectorType>(Call.getArgOperand(0)->getType())->getElementType();
6132 break;
6133 }
6134 default:
6135 llvm_unreachable("unexpected intrinsic");
6136 }
6137
6138 Check(ResultTy->getElementType()->isIntegerTy() ||
6139 ResultTy->getElementType()->isFloatingPointTy(),
6140 "Result type must be an integer or floating-point type!", IF);
6141
6142 if (Op0ElemTy)
6143 Check(ResultTy->getElementType() == Op0ElemTy,
6144 "Vector element type mismatch of the result and first operand "
6145 "vector!",
6146 IF);
6147
6148 if (Op1ElemTy)
6149 Check(ResultTy->getElementType() == Op1ElemTy,
6150 "Vector element type mismatch of the result and second operand "
6151 "vector!",
6152 IF);
6153
6154 Check(cast<FixedVectorType>(ResultTy)->getNumElements() ==
6155 NumRows->getZExtValue() * NumColumns->getZExtValue(),
6156 "Result of a matrix operation does not fit in the returned vector!");
6157
6158 if (Stride)
6159 Check(Stride->getZExtValue() >= NumRows->getZExtValue(),
6160 "Stride must be greater or equal than the number of rows!", IF);
6161
6162 break;
6163 }
6164 case Intrinsic::vector_splice: {
6165 VectorType *VecTy = cast<VectorType>(Call.getType());
6166 int64_t Idx = cast<ConstantInt>(Call.getArgOperand(2))->getSExtValue();
6167 int64_t KnownMinNumElements = VecTy->getElementCount().getKnownMinValue();
6168 if (Call.getParent() && Call.getParent()->getParent()) {
6169 AttributeList Attrs = Call.getParent()->getParent()->getAttributes();
6170 if (Attrs.hasFnAttr(Attribute::VScaleRange))
6171 KnownMinNumElements *= Attrs.getFnAttrs().getVScaleRangeMin();
6172 }
6173 Check((Idx < 0 && std::abs(Idx) <= KnownMinNumElements) ||
6174 (Idx >= 0 && Idx < KnownMinNumElements),
6175 "The splice index exceeds the range [-VL, VL-1] where VL is the "
6176 "known minimum number of elements in the vector. For scalable "
6177 "vectors the minimum number of elements is determined from "
6178 "vscale_range.",
6179 &Call);
6180 break;
6181 }
6182 case Intrinsic::stepvector: {
6183 VectorType *VecTy = dyn_cast<VectorType>(Call.getType());
6184 Check(VecTy && VecTy->getScalarType()->isIntegerTy() &&
6185 VecTy->getScalarSizeInBits() >= 8,
6186 "stepvector only supported for vectors of integers "
6187 "with a bitwidth of at least 8.",
6188 &Call);
6189 break;
6190 }
6191 case Intrinsic::experimental_vector_match: {
6192 Value *Op1 = Call.getArgOperand(0);
6193 Value *Op2 = Call.getArgOperand(1);
6194 Value *Mask = Call.getArgOperand(2);
6195
6196 VectorType *Op1Ty = dyn_cast<VectorType>(Op1->getType());
6197 VectorType *Op2Ty = dyn_cast<VectorType>(Op2->getType());
6198 VectorType *MaskTy = dyn_cast<VectorType>(Mask->getType());
6199
6200 Check(Op1Ty && Op2Ty && MaskTy, "Operands must be vectors.", &Call);
6201 Check(isa<FixedVectorType>(Op2Ty),
6202 "Second operand must be a fixed length vector.", &Call);
6203 Check(Op1Ty->getElementType()->isIntegerTy(),
6204 "First operand must be a vector of integers.", &Call);
6205 Check(Op1Ty->getElementType() == Op2Ty->getElementType(),
6206 "First two operands must have the same element type.", &Call);
6207 Check(Op1Ty->getElementCount() == MaskTy->getElementCount(),
6208 "First operand and mask must have the same number of elements.",
6209 &Call);
6210 Check(MaskTy->getElementType()->isIntegerTy(1),
6211 "Mask must be a vector of i1's.", &Call);
6212 Check(Call.getType() == MaskTy, "Return type must match the mask type.",
6213 &Call);
6214 break;
6215 }
6216 case Intrinsic::vector_insert: {
6217 Value *Vec = Call.getArgOperand(0);
6218 Value *SubVec = Call.getArgOperand(1);
6219 Value *Idx = Call.getArgOperand(2);
6220 unsigned IdxN = cast<ConstantInt>(Idx)->getZExtValue();
6221
6222 VectorType *VecTy = cast<VectorType>(Vec->getType());
6223 VectorType *SubVecTy = cast<VectorType>(SubVec->getType());
6224
6225 ElementCount VecEC = VecTy->getElementCount();
6226 ElementCount SubVecEC = SubVecTy->getElementCount();
6227 Check(VecTy->getElementType() == SubVecTy->getElementType(),
6228 "vector_insert parameters must have the same element "
6229 "type.",
6230 &Call);
6231 Check(IdxN % SubVecEC.getKnownMinValue() == 0,
6232 "vector_insert index must be a constant multiple of "
6233 "the subvector's known minimum vector length.");
6234
6235 // If this insertion is not the 'mixed' case where a fixed vector is
6236 // inserted into a scalable vector, ensure that the insertion of the
6237 // subvector does not overrun the parent vector.
6238 if (VecEC.isScalable() == SubVecEC.isScalable()) {
6239 Check(IdxN < VecEC.getKnownMinValue() &&
6240 IdxN + SubVecEC.getKnownMinValue() <= VecEC.getKnownMinValue(),
6241 "subvector operand of vector_insert would overrun the "
6242 "vector being inserted into.");
6243 }
6244 break;
6245 }
6246 case Intrinsic::vector_extract: {
6247 Value *Vec = Call.getArgOperand(0);
6248 Value *Idx = Call.getArgOperand(1);
6249 unsigned IdxN = cast<ConstantInt>(Idx)->getZExtValue();
6250
6251 VectorType *ResultTy = cast<VectorType>(Call.getType());
6252 VectorType *VecTy = cast<VectorType>(Vec->getType());
6253
6254 ElementCount VecEC = VecTy->getElementCount();
6255 ElementCount ResultEC = ResultTy->getElementCount();
6256
6257 Check(ResultTy->getElementType() == VecTy->getElementType(),
6258 "vector_extract result must have the same element "
6259 "type as the input vector.",
6260 &Call);
6261 Check(IdxN % ResultEC.getKnownMinValue() == 0,
6262 "vector_extract index must be a constant multiple of "
6263 "the result type's known minimum vector length.");
6264
6265 // If this extraction is not the 'mixed' case where a fixed vector is
6266 // extracted from a scalable vector, ensure that the extraction does not
6267 // overrun the parent vector.
6268 if (VecEC.isScalable() == ResultEC.isScalable()) {
6269 Check(IdxN < VecEC.getKnownMinValue() &&
6270 IdxN + ResultEC.getKnownMinValue() <= VecEC.getKnownMinValue(),
6271 "vector_extract would overrun.");
6272 }
6273 break;
6274 }
6275 case Intrinsic::experimental_vector_partial_reduce_add: {
6276 VectorType *AccTy = cast<VectorType>(Call.getArgOperand(0)->getType());
6277 VectorType *VecTy = cast<VectorType>(Call.getArgOperand(1)->getType());
6278
6279 unsigned VecWidth = VecTy->getElementCount().getKnownMinValue();
6280 unsigned AccWidth = AccTy->getElementCount().getKnownMinValue();
6281
6282 Check((VecWidth % AccWidth) == 0,
6283 "Invalid vector widths for partial "
6284 "reduction. The width of the input vector "
6285 "must be a positive integer multiple of "
6286 "the width of the accumulator vector.");
6287 break;
6288 }
6289 case Intrinsic::experimental_noalias_scope_decl: {
6290 NoAliasScopeDecls.push_back(cast<IntrinsicInst>(&Call));
6291 break;
6292 }
6293 case Intrinsic::preserve_array_access_index:
6294 case Intrinsic::preserve_struct_access_index:
6295 case Intrinsic::aarch64_ldaxr:
6296 case Intrinsic::aarch64_ldxr:
6297 case Intrinsic::arm_ldaex:
6298 case Intrinsic::arm_ldrex: {
6299 Type *ElemTy = Call.getParamElementType(0);
6300 Check(ElemTy, "Intrinsic requires elementtype attribute on first argument.",
6301 &Call);
6302 break;
6303 }
6304 case Intrinsic::aarch64_stlxr:
6305 case Intrinsic::aarch64_stxr:
6306 case Intrinsic::arm_stlex:
6307 case Intrinsic::arm_strex: {
6308 Type *ElemTy = Call.getAttributes().getParamElementType(1);
6309 Check(ElemTy,
6310 "Intrinsic requires elementtype attribute on second argument.",
6311 &Call);
6312 break;
6313 }
6314 case Intrinsic::aarch64_prefetch: {
6315 Check(cast<ConstantInt>(Call.getArgOperand(1))->getZExtValue() < 2,
6316 "write argument to llvm.aarch64.prefetch must be 0 or 1", Call);
6317 Check(cast<ConstantInt>(Call.getArgOperand(2))->getZExtValue() < 4,
6318 "target argument to llvm.aarch64.prefetch must be 0-3", Call);
6319 Check(cast<ConstantInt>(Call.getArgOperand(3))->getZExtValue() < 2,
6320 "stream argument to llvm.aarch64.prefetch must be 0 or 1", Call);
6321 Check(cast<ConstantInt>(Call.getArgOperand(4))->getZExtValue() < 2,
6322 "isdata argument to llvm.aarch64.prefetch must be 0 or 1", Call);
6323 break;
6324 }
6325 case Intrinsic::callbr_landingpad: {
6326 const auto *CBR = dyn_cast<CallBrInst>(Call.getOperand(0));
6327 Check(CBR, "intrinstic requires callbr operand", &Call);
6328 if (!CBR)
6329 break;
6330
6331 const BasicBlock *LandingPadBB = Call.getParent();
6332 const BasicBlock *PredBB = LandingPadBB->getUniquePredecessor();
6333 if (!PredBB) {
6334 CheckFailed("Intrinsic in block must have 1 unique predecessor", &Call);
6335 break;
6336 }
6337 if (!isa<CallBrInst>(PredBB->getTerminator())) {
6338 CheckFailed("Intrinsic must have corresponding callbr in predecessor",
6339 &Call);
6340 break;
6341 }
6342 Check(llvm::is_contained(CBR->getIndirectDests(), LandingPadBB),
6343 "Intrinsic's corresponding callbr must have intrinsic's parent basic "
6344 "block in indirect destination list",
6345 &Call);
6346 const Instruction &First = *LandingPadBB->begin();
6347 Check(&First == &Call, "No other instructions may proceed intrinsic",
6348 &Call);
6349 break;
6350 }
6351 case Intrinsic::amdgcn_cs_chain: {
6352 auto CallerCC = Call.getCaller()->getCallingConv();
6353 switch (CallerCC) {
6357 break;
6358 default:
6359 CheckFailed("Intrinsic can only be used from functions with the "
6360 "amdgpu_cs, amdgpu_cs_chain or amdgpu_cs_chain_preserve "
6361 "calling conventions",
6362 &Call);
6363 break;
6364 }
6365
6366 Check(Call.paramHasAttr(2, Attribute::InReg),
6367 "SGPR arguments must have the `inreg` attribute", &Call);
6368 Check(!Call.paramHasAttr(3, Attribute::InReg),
6369 "VGPR arguments must not have the `inreg` attribute", &Call);
6370 break;
6371 }
6372 case Intrinsic::amdgcn_set_inactive_chain_arg: {
6373 auto CallerCC = Call.getCaller()->getCallingConv();
6374 switch (CallerCC) {
6377 break;
6378 default:
6379 CheckFailed("Intrinsic can only be used from functions with the "
6380 "amdgpu_cs_chain or amdgpu_cs_chain_preserve "
6381 "calling conventions",
6382 &Call);
6383 break;
6384 }
6385
6386 unsigned InactiveIdx = 1;
6387 Check(!Call.paramHasAttr(InactiveIdx, Attribute::InReg),
6388 "Value for inactive lanes must not have the `inreg` attribute",
6389 &Call);
6390 Check(isa<Argument>(Call.getArgOperand(InactiveIdx)),
6391 "Value for inactive lanes must be a function argument", &Call);
6392 Check(!cast<Argument>(Call.getArgOperand(InactiveIdx))->hasInRegAttr(),
6393 "Value for inactive lanes must be a VGPR function argument", &Call);
6394 break;
6395 }
6396 case Intrinsic::amdgcn_s_prefetch_data: {
6397 Check(
6399 Call.getArgOperand(0)->getType()->getPointerAddressSpace()),
6400 "llvm.amdgcn.s.prefetch.data only supports global or constant memory");
6401 break;
6402 }
6403 case Intrinsic::amdgcn_mfma_scale_f32_16x16x128_f8f6f4:
6404 case Intrinsic::amdgcn_mfma_scale_f32_32x32x64_f8f6f4: {
6405 Value *Src0 = Call.getArgOperand(0);
6406 Value *Src1 = Call.getArgOperand(1);
6407
6408 uint64_t CBSZ = cast<ConstantInt>(Call.getArgOperand(3))->getZExtValue();
6409 uint64_t BLGP = cast<ConstantInt>(Call.getArgOperand(4))->getZExtValue();
6410 Check(CBSZ <= 4, "invalid value for cbsz format", Call,
6411 Call.getArgOperand(3));
6412 Check(BLGP <= 4, "invalid value for blgp format", Call,
6413 Call.getArgOperand(4));
6414
6415 // AMDGPU::MFMAScaleFormats values
6416 auto getFormatNumRegs = [](unsigned FormatVal) {
6417 switch (FormatVal) {
6418 case 0:
6419 case 1:
6420 return 8u;
6421 case 2:
6422 case 3:
6423 return 6u;
6424 case 4:
6425 return 4u;
6426 default:
6427 llvm_unreachable("invalid format value");
6428 }
6429 };
6430
6431 auto isValidSrcASrcBVector = [](FixedVectorType *Ty) {
6432 if (!Ty || !Ty->getElementType()->isIntegerTy(32))
6433 return false;
6434 unsigned NumElts = Ty->getNumElements();
6435 return NumElts == 4 || NumElts == 6 || NumElts == 8;
6436 };
6437
6438 auto *Src0Ty = dyn_cast<FixedVectorType>(Src0->getType());
6439 auto *Src1Ty = dyn_cast<FixedVectorType>(Src1->getType());
6440 Check(isValidSrcASrcBVector(Src0Ty),
6441 "operand 0 must be 4, 6 or 8 element i32 vector", &Call, Src0);
6442 Check(isValidSrcASrcBVector(Src1Ty),
6443 "operand 1 must be 4, 6 or 8 element i32 vector", &Call, Src1);
6444
6445 // Permit excess registers for the format.
6446 Check(Src0Ty->getNumElements() >= getFormatNumRegs(CBSZ),
6447 "invalid vector type for format", &Call, Src0, Call.getArgOperand(3));
6448 Check(Src1Ty->getNumElements() >= getFormatNumRegs(BLGP),
6449 "invalid vector type for format", &Call, Src1, Call.getArgOperand(5));
6450 break;
6451 }
6452 case Intrinsic::nvvm_setmaxnreg_inc_sync_aligned_u32:
6453 case Intrinsic::nvvm_setmaxnreg_dec_sync_aligned_u32: {
6454 Value *V = Call.getArgOperand(0);
6455 unsigned RegCount = cast<ConstantInt>(V)->getZExtValue();
6456 Check(RegCount % 8 == 0,
6457 "reg_count argument to nvvm.setmaxnreg must be in multiples of 8");
6458 Check((RegCount >= 24 && RegCount <= 256),
6459 "reg_count argument to nvvm.setmaxnreg must be within [24, 256]");
6460 break;
6461 }
6462 case Intrinsic::experimental_convergence_entry:
6463 case Intrinsic::experimental_convergence_anchor:
6464 break;
6465 case Intrinsic::experimental_convergence_loop:
6466 break;
6467 case Intrinsic::ptrmask: {
6468 Type *Ty0 = Call.getArgOperand(0)->getType();
6469 Type *Ty1 = Call.getArgOperand(1)->getType();
6471 "llvm.ptrmask intrinsic first argument must be pointer or vector "
6472 "of pointers",
6473 &Call);
6474 Check(
6475 Ty0->isVectorTy() == Ty1->isVectorTy(),
6476 "llvm.ptrmask intrinsic arguments must be both scalars or both vectors",
6477 &Call);
6478 if (Ty0->isVectorTy())
6479 Check(cast<VectorType>(Ty0)->getElementCount() ==
6480 cast<VectorType>(Ty1)->getElementCount(),
6481 "llvm.ptrmask intrinsic arguments must have the same number of "
6482 "elements",
6483 &Call);
6484 Check(DL.getIndexTypeSizeInBits(Ty0) == Ty1->getScalarSizeInBits(),
6485 "llvm.ptrmask intrinsic second argument bitwidth must match "
6486 "pointer index type size of first argument",
6487 &Call);
6488 break;
6489 }
6490 case Intrinsic::threadlocal_address: {
6491 const Value &Arg0 = *Call.getArgOperand(0);
6492 Check(isa<GlobalValue>(Arg0),
6493 "llvm.threadlocal.address first argument must be a GlobalValue");
6494 Check(cast<GlobalValue>(Arg0).isThreadLocal(),
6495 "llvm.threadlocal.address operand isThreadLocal() must be true");
6496 break;
6497 }
6498 case Intrinsic::nvvm_fence_proxy_tensormap_generic_acquire_cta:
6499 case Intrinsic::nvvm_fence_proxy_tensormap_generic_acquire_cluster:
6500 case Intrinsic::nvvm_fence_proxy_tensormap_generic_acquire_gpu:
6501 case Intrinsic::nvvm_fence_proxy_tensormap_generic_acquire_sys: {
6502 unsigned size = cast<ConstantInt>(Call.getArgOperand(1))->getZExtValue();
6503 Check(size == 128, " The only supported value for size operand is 128");
6504 break;
6505 }
6506 };
6507
6508 // Verify that there aren't any unmediated control transfers between funclets.
6510 Function *F = Call.getParent()->getParent();
6511 if (F->hasPersonalityFn() &&
6512 isScopedEHPersonality(classifyEHPersonality(F->getPersonalityFn()))) {
6513 // Run EH funclet coloring on-demand and cache results for other intrinsic
6514 // calls in this function
6515 if (BlockEHFuncletColors.empty())
6516 BlockEHFuncletColors = colorEHFunclets(*F);
6517
6518 // Check for catch-/cleanup-pad in first funclet block
6519 bool InEHFunclet = false;
6520 BasicBlock *CallBB = Call.getParent();
6521 const ColorVector &CV = BlockEHFuncletColors.find(CallBB)->second;
6522 assert(CV.size() > 0 && "Uncolored block");
6523 for (BasicBlock *ColorFirstBB : CV)
6524 if (dyn_cast_or_null<FuncletPadInst>(ColorFirstBB->getFirstNonPHI()))
6525 InEHFunclet = true;
6526
6527 // Check for funclet operand bundle
6528 bool HasToken = false;
6529 for (unsigned I = 0, E = Call.getNumOperandBundles(); I != E; ++I)
6530 if (Call.getOperandBundleAt(I).getTagID() == LLVMContext::OB_funclet)
6531 HasToken = true;
6532
6533 // This would cause silent code truncation in WinEHPrepare
6534 if (InEHFunclet)
6535 Check(HasToken, "Missing funclet token on intrinsic call", &Call);
6536 }
6537 }
6538}
6539
6540/// Carefully grab the subprogram from a local scope.
6541///
6542/// This carefully grabs the subprogram from a local scope, avoiding the
6543/// built-in assertions that would typically fire.
6545 if (!LocalScope)
6546 return nullptr;
6547
6548 if (auto *SP = dyn_cast<DISubprogram>(LocalScope))
6549 return SP;
6550
6551 if (auto *LB = dyn_cast<DILexicalBlockBase>(LocalScope))
6552 return getSubprogram(LB->getRawScope());
6553
6554 // Just return null; broken scope chains are checked elsewhere.
6555 assert(!isa<DILocalScope>(LocalScope) && "Unknown type of local scope");
6556 return nullptr;
6557}
6558
6559void Verifier::visit(DbgLabelRecord &DLR) {
6560 CheckDI(isa<DILabel>(DLR.getRawLabel()),
6561 "invalid #dbg_label intrinsic variable", &DLR, DLR.getRawLabel());
6562
6563 // Ignore broken !dbg attachments; they're checked elsewhere.
6564 if (MDNode *N = DLR.getDebugLoc().getAsMDNode())
6565 if (!isa<DILocation>(N))
6566 return;
6567
6568 BasicBlock *BB = DLR.getParent();
6569 Function *F = BB ? BB->getParent() : nullptr;
6570
6571 // The scopes for variables and !dbg attachments must agree.
6572 DILabel *Label = DLR.getLabel();
6573 DILocation *Loc = DLR.getDebugLoc();
6574 CheckDI(Loc, "#dbg_label record requires a !dbg attachment", &DLR, BB, F);
6575
6576 DISubprogram *LabelSP = getSubprogram(Label->getRawScope());
6577 DISubprogram *LocSP = getSubprogram(Loc->getRawScope());
6578 if (!LabelSP || !LocSP)
6579 return;
6580
6581 CheckDI(LabelSP == LocSP,
6582 "mismatched subprogram between #dbg_label label and !dbg attachment",
6583 &DLR, BB, F, Label, Label->getScope()->getSubprogram(), Loc,
6584 Loc->getScope()->getSubprogram());
6585}
6586
6587void Verifier::visit(DbgVariableRecord &DVR) {
6588 BasicBlock *BB = DVR.getParent();
6589 Function *F = BB->getParent();
6590
6594 "invalid #dbg record type", &DVR, DVR.getType());
6595
6596 // The location for a DbgVariableRecord must be either a ValueAsMetadata,
6597 // DIArgList, or an empty MDNode (which is a legacy representation for an
6598 // "undef" location).
6599 auto *MD = DVR.getRawLocation();
6600 CheckDI(MD && (isa<ValueAsMetadata>(MD) || isa<DIArgList>(MD) ||
6601 (isa<MDNode>(MD) && !cast<MDNode>(MD)->getNumOperands())),
6602 "invalid #dbg record address/value", &DVR, MD);
6603 if (auto *VAM = dyn_cast<ValueAsMetadata>(MD))
6604 visitValueAsMetadata(*VAM, F);
6605 else if (auto *AL = dyn_cast<DIArgList>(MD))
6606 visitDIArgList(*AL, F);
6607
6608 CheckDI(isa_and_nonnull<DILocalVariable>(DVR.getRawVariable()),
6609 "invalid #dbg record variable", &DVR, DVR.getRawVariable());
6610 visitMDNode(*DVR.getRawVariable(), AreDebugLocsAllowed::No);
6611
6612 CheckDI(isa_and_nonnull<DIExpression>(DVR.getRawExpression()),
6613 "invalid #dbg record expression", &DVR, DVR.getRawExpression());
6614 visitMDNode(*DVR.getExpression(), AreDebugLocsAllowed::No);
6615
6616 if (DVR.isDbgAssign()) {
6617 CheckDI(isa_and_nonnull<DIAssignID>(DVR.getRawAssignID()),
6618 "invalid #dbg_assign DIAssignID", &DVR, DVR.getRawAssignID());
6619 visitMDNode(*cast<DIAssignID>(DVR.getRawAssignID()),
6620 AreDebugLocsAllowed::No);
6621
6622 const auto *RawAddr = DVR.getRawAddress();
6623 // Similarly to the location above, the address for an assign
6624 // DbgVariableRecord must be a ValueAsMetadata or an empty MDNode, which
6625 // represents an undef address.
6626 CheckDI(
6627 isa<ValueAsMetadata>(RawAddr) ||
6628 (isa<MDNode>(RawAddr) && !cast<MDNode>(RawAddr)->getNumOperands()),
6629 "invalid #dbg_assign address", &DVR, DVR.getRawAddress());
6630 if (auto *VAM = dyn_cast<ValueAsMetadata>(RawAddr))
6631 visitValueAsMetadata(*VAM, F);
6632
6633 CheckDI(isa_and_nonnull<DIExpression>(DVR.getRawAddressExpression()),
6634 "invalid #dbg_assign address expression", &DVR,
6636 visitMDNode(*DVR.getAddressExpression(), AreDebugLocsAllowed::No);
6637
6638 // All of the linked instructions should be in the same function as DVR.
6639 for (Instruction *I : at::getAssignmentInsts(&DVR))
6640 CheckDI(DVR.getFunction() == I->getFunction(),
6641 "inst not in same function as #dbg_assign", I, &DVR);
6642 }
6643
6644 // This check is redundant with one in visitLocalVariable().
6645 DILocalVariable *Var = DVR.getVariable();
6646 CheckDI(isType(Var->getRawType()), "invalid type ref", Var,
6647 Var->getRawType());
6648
6649 auto *DLNode = DVR.getDebugLoc().getAsMDNode();
6650 CheckDI(isa_and_nonnull<DILocation>(DLNode), "invalid #dbg record DILocation",
6651 &DVR, DLNode);
6652 DILocation *Loc = DVR.getDebugLoc();
6653
6654 // The scopes for variables and !dbg attachments must agree.
6655 DISubprogram *VarSP = getSubprogram(Var->getRawScope());
6656 DISubprogram *LocSP = getSubprogram(Loc->getRawScope());
6657 if (!VarSP || !LocSP)
6658 return; // Broken scope chains are checked elsewhere.
6659
6660 CheckDI(VarSP == LocSP,
6661 "mismatched subprogram between #dbg record variable and DILocation",
6662 &DVR, BB, F, Var, Var->getScope()->getSubprogram(), Loc,
6663 Loc->getScope()->getSubprogram());
6664
6665 verifyFnArgs(DVR);
6666}
6667
6668void Verifier::visitVPIntrinsic(VPIntrinsic &VPI) {
6669 if (auto *VPCast = dyn_cast<VPCastIntrinsic>(&VPI)) {
6670 auto *RetTy = cast<VectorType>(VPCast->getType());
6671 auto *ValTy = cast<VectorType>(VPCast->getOperand(0)->getType());
6672 Check(RetTy->getElementCount() == ValTy->getElementCount(),
6673 "VP cast intrinsic first argument and result vector lengths must be "
6674 "equal",
6675 *VPCast);
6676
6677 switch (VPCast->getIntrinsicID()) {
6678 default:
6679 llvm_unreachable("Unknown VP cast intrinsic");
6680 case Intrinsic::vp_trunc:
6681 Check(RetTy->isIntOrIntVectorTy() && ValTy->isIntOrIntVectorTy(),
6682 "llvm.vp.trunc intrinsic first argument and result element type "
6683 "must be integer",
6684 *VPCast);
6685 Check(RetTy->getScalarSizeInBits() < ValTy->getScalarSizeInBits(),
6686 "llvm.vp.trunc intrinsic the bit size of first argument must be "
6687 "larger than the bit size of the return type",
6688 *VPCast);
6689 break;
6690 case Intrinsic::vp_zext:
6691 case Intrinsic::vp_sext:
6692 Check(RetTy->isIntOrIntVectorTy() && ValTy->isIntOrIntVectorTy(),
6693 "llvm.vp.zext or llvm.vp.sext intrinsic first argument and result "
6694 "element type must be integer",
6695 *VPCast);
6696 Check(RetTy->getScalarSizeInBits() > ValTy->getScalarSizeInBits(),
6697 "llvm.vp.zext or llvm.vp.sext intrinsic the bit size of first "
6698 "argument must be smaller than the bit size of the return type",
6699 *VPCast);
6700 break;
6701 case Intrinsic::vp_fptoui:
6702 case Intrinsic::vp_fptosi:
6703 case Intrinsic::vp_lrint:
6704 case Intrinsic::vp_llrint:
6705 Check(
6706 RetTy->isIntOrIntVectorTy() && ValTy->isFPOrFPVectorTy(),
6707 "llvm.vp.fptoui, llvm.vp.fptosi, llvm.vp.lrint or llvm.vp.llrint" "intrinsic first argument element "
6708 "type must be floating-point and result element type must be integer",
6709 *VPCast);
6710 break;
6711 case Intrinsic::vp_uitofp:
6712 case Intrinsic::vp_sitofp:
6713 Check(
6714 RetTy->isFPOrFPVectorTy() && ValTy->isIntOrIntVectorTy(),
6715 "llvm.vp.uitofp or llvm.vp.sitofp intrinsic first argument element "
6716 "type must be integer and result element type must be floating-point",
6717 *VPCast);
6718 break;
6719 case Intrinsic::vp_fptrunc:
6720 Check(RetTy->isFPOrFPVectorTy() && ValTy->isFPOrFPVectorTy(),
6721 "llvm.vp.fptrunc intrinsic first argument and result element type "
6722 "must be floating-point",
6723 *VPCast);
6724 Check(RetTy->getScalarSizeInBits() < ValTy->getScalarSizeInBits(),
6725 "llvm.vp.fptrunc intrinsic the bit size of first argument must be "
6726 "larger than the bit size of the return type",
6727 *VPCast);
6728 break;
6729 case Intrinsic::vp_fpext:
6730 Check(RetTy->isFPOrFPVectorTy() && ValTy->isFPOrFPVectorTy(),
6731 "llvm.vp.fpext intrinsic first argument and result element type "
6732 "must be floating-point",
6733 *VPCast);
6734 Check(RetTy->getScalarSizeInBits() > ValTy->getScalarSizeInBits(),
6735 "llvm.vp.fpext intrinsic the bit size of first argument must be "
6736 "smaller than the bit size of the return type",
6737 *VPCast);
6738 break;
6739 case Intrinsic::vp_ptrtoint:
6740 Check(RetTy->isIntOrIntVectorTy() && ValTy->isPtrOrPtrVectorTy(),
6741 "llvm.vp.ptrtoint intrinsic first argument element type must be "
6742 "pointer and result element type must be integer",
6743 *VPCast);
6744 break;
6745 case Intrinsic::vp_inttoptr:
6746 Check(RetTy->isPtrOrPtrVectorTy() && ValTy->isIntOrIntVectorTy(),
6747 "llvm.vp.inttoptr intrinsic first argument element type must be "
6748 "integer and result element type must be pointer",
6749 *VPCast);
6750 break;
6751 }
6752 }
6753 if (VPI.getIntrinsicID() == Intrinsic::vp_fcmp) {
6754 auto Pred = cast<VPCmpIntrinsic>(&VPI)->getPredicate();
6756 "invalid predicate for VP FP comparison intrinsic", &VPI);
6757 }
6758 if (VPI.getIntrinsicID() == Intrinsic::vp_icmp) {
6759 auto Pred = cast<VPCmpIntrinsic>(&VPI)->getPredicate();
6761 "invalid predicate for VP integer comparison intrinsic", &VPI);
6762 }
6763 if (VPI.getIntrinsicID() == Intrinsic::vp_is_fpclass) {
6764 auto TestMask = cast<ConstantInt>(VPI.getOperand(1));
6765 Check((TestMask->getZExtValue() & ~static_cast<unsigned>(fcAllFlags)) == 0,
6766 "unsupported bits for llvm.vp.is.fpclass test mask");
6767 }
6768}
6769
6770void Verifier::visitConstrainedFPIntrinsic(ConstrainedFPIntrinsic &FPI) {
6771 unsigned NumOperands = FPI.getNonMetadataArgCount();
6772 bool HasRoundingMD =
6774
6775 // Add the expected number of metadata operands.
6776 NumOperands += (1 + HasRoundingMD);
6777
6778 // Compare intrinsics carry an extra predicate metadata operand.
6779 if (isa<ConstrainedFPCmpIntrinsic>(FPI))
6780 NumOperands += 1;
6781 Check((FPI.arg_size() == NumOperands),
6782 "invalid arguments for constrained FP intrinsic", &FPI);
6783
6784 switch (FPI.getIntrinsicID()) {
6785 case Intrinsic::experimental_constrained_lrint:
6786 case Intrinsic::experimental_constrained_llrint: {
6787 Type *ValTy = FPI.getArgOperand(0)->getType();
6788 Type *ResultTy = FPI.getType();
6789 Check(!ValTy->isVectorTy() && !ResultTy->isVectorTy(),
6790 "Intrinsic does not support vectors", &FPI);
6791 break;
6792 }
6793
6794 case Intrinsic::experimental_constrained_lround:
6795 case Intrinsic::experimental_constrained_llround: {
6796 Type *ValTy = FPI.getArgOperand(0)->getType();
6797 Type *ResultTy = FPI.getType();
6798 Check(!ValTy->isVectorTy() && !ResultTy->isVectorTy(),
6799 "Intrinsic does not support vectors", &FPI);
6800 break;
6801 }
6802
6803 case Intrinsic::experimental_constrained_fcmp:
6804 case Intrinsic::experimental_constrained_fcmps: {
6805 auto Pred = cast<ConstrainedFPCmpIntrinsic>(&FPI)->getPredicate();
6807 "invalid predicate for constrained FP comparison intrinsic", &FPI);
6808 break;
6809 }
6810
6811 case Intrinsic::experimental_constrained_fptosi:
6812 case Intrinsic::experimental_constrained_fptoui: {
6813 Value *Operand = FPI.getArgOperand(0);
6814 ElementCount SrcEC;
6815 Check(Operand->getType()->isFPOrFPVectorTy(),
6816 "Intrinsic first argument must be floating point", &FPI);
6817 if (auto *OperandT = dyn_cast<VectorType>(Operand->getType())) {
6818 SrcEC = cast<VectorType>(OperandT)->getElementCount();
6819 }
6820
6821 Operand = &FPI;
6822 Check(SrcEC.isNonZero() == Operand->getType()->isVectorTy(),
6823 "Intrinsic first argument and result disagree on vector use", &FPI);
6824 Check(Operand->getType()->isIntOrIntVectorTy(),
6825 "Intrinsic result must be an integer", &FPI);
6826 if (auto *OperandT = dyn_cast<VectorType>(Operand->getType())) {
6827 Check(SrcEC == cast<VectorType>(OperandT)->getElementCount(),
6828 "Intrinsic first argument and result vector lengths must be equal",
6829 &FPI);
6830 }
6831 break;
6832 }
6833
6834 case Intrinsic::experimental_constrained_sitofp:
6835 case Intrinsic::experimental_constrained_uitofp: {
6836 Value *Operand = FPI.getArgOperand(0);
6837 ElementCount SrcEC;
6838 Check(Operand->getType()->isIntOrIntVectorTy(),
6839 "Intrinsic first argument must be integer", &FPI);
6840 if (auto *OperandT = dyn_cast<VectorType>(Operand->getType())) {
6841 SrcEC = cast<VectorType>(OperandT)->getElementCount();
6842 }
6843
6844 Operand = &FPI;
6845 Check(SrcEC.isNonZero() == Operand->getType()->isVectorTy(),
6846 "Intrinsic first argument and result disagree on vector use", &FPI);
6847 Check(Operand->getType()->isFPOrFPVectorTy(),
6848 "Intrinsic result must be a floating point", &FPI);
6849 if (auto *OperandT = dyn_cast<VectorType>(Operand->getType())) {
6850 Check(SrcEC == cast<VectorType>(OperandT)->getElementCount(),
6851 "Intrinsic first argument and result vector lengths must be equal",
6852 &FPI);
6853 }
6854 break;
6855 }
6856
6857 case Intrinsic::experimental_constrained_fptrunc:
6858 case Intrinsic::experimental_constrained_fpext: {
6859 Value *Operand = FPI.getArgOperand(0);
6860 Type *OperandTy = Operand->getType();
6861 Value *Result = &FPI;
6862 Type *ResultTy = Result->getType();
6863 Check(OperandTy->isFPOrFPVectorTy(),
6864 "Intrinsic first argument must be FP or FP vector", &FPI);
6865 Check(ResultTy->isFPOrFPVectorTy(),
6866 "Intrinsic result must be FP or FP vector", &FPI);
6867 Check(OperandTy->isVectorTy() == ResultTy->isVectorTy(),
6868 "Intrinsic first argument and result disagree on vector use", &FPI);
6869 if (OperandTy->isVectorTy()) {
6870 Check(cast<VectorType>(OperandTy)->getElementCount() ==
6871 cast<VectorType>(ResultTy)->getElementCount(),
6872 "Intrinsic first argument and result vector lengths must be equal",
6873 &FPI);
6874 }
6875 if (FPI.getIntrinsicID() == Intrinsic::experimental_constrained_fptrunc) {
6876 Check(OperandTy->getScalarSizeInBits() > ResultTy->getScalarSizeInBits(),
6877 "Intrinsic first argument's type must be larger than result type",
6878 &FPI);
6879 } else {
6880 Check(OperandTy->getScalarSizeInBits() < ResultTy->getScalarSizeInBits(),
6881 "Intrinsic first argument's type must be smaller than result type",
6882 &FPI);
6883 }
6884 break;
6885 }
6886
6887 default:
6888 break;
6889 }
6890
6891 // If a non-metadata argument is passed in a metadata slot then the
6892 // error will be caught earlier when the incorrect argument doesn't
6893 // match the specification in the intrinsic call table. Thus, no
6894 // argument type check is needed here.
6895
6896 Check(FPI.getExceptionBehavior().has_value(),
6897 "invalid exception behavior argument", &FPI);
6898 if (HasRoundingMD) {
6899 Check(FPI.getRoundingMode().has_value(), "invalid rounding mode argument",
6900 &FPI);
6901 }
6902}
6903
6904void Verifier::visitDbgIntrinsic(StringRef Kind, DbgVariableIntrinsic &DII) {
6905 auto *MD = DII.getRawLocation();
6906 CheckDI(isa<ValueAsMetadata>(MD) || isa<DIArgList>(MD) ||
6907 (isa<MDNode>(MD) && !cast<MDNode>(MD)->getNumOperands()),
6908 "invalid llvm.dbg." + Kind + " intrinsic address/value", &DII, MD);
6909 CheckDI(isa<DILocalVariable>(DII.getRawVariable()),
6910 "invalid llvm.dbg." + Kind + " intrinsic variable", &DII,
6911 DII.getRawVariable());
6912 CheckDI(isa<DIExpression>(DII.getRawExpression()),
6913 "invalid llvm.dbg." + Kind + " intrinsic expression", &DII,
6914 DII.getRawExpression());
6915
6916 if (auto *DAI = dyn_cast<DbgAssignIntrinsic>(&DII)) {
6917 CheckDI(isa<DIAssignID>(DAI->getRawAssignID()),
6918 "invalid llvm.dbg.assign intrinsic DIAssignID", &DII,
6919 DAI->getRawAssignID());
6920 const auto *RawAddr = DAI->getRawAddress();
6921 CheckDI(
6922 isa<ValueAsMetadata>(RawAddr) ||
6923 (isa<MDNode>(RawAddr) && !cast<MDNode>(RawAddr)->getNumOperands()),
6924 "invalid llvm.dbg.assign intrinsic address", &DII,
6925 DAI->getRawAddress());
6926 CheckDI(isa<DIExpression>(DAI->getRawAddressExpression()),
6927 "invalid llvm.dbg.assign intrinsic address expression", &DII,
6928 DAI->getRawAddressExpression());
6929 // All of the linked instructions should be in the same function as DII.
6931 CheckDI(DAI->getFunction() == I->getFunction(),
6932 "inst not in same function as dbg.assign", I, DAI);
6933 }
6934
6935 // Ignore broken !dbg attachments; they're checked elsewhere.
6936 if (MDNode *N = DII.getDebugLoc().getAsMDNode())
6937 if (!isa<DILocation>(N))
6938 return;
6939
6940 BasicBlock *BB = DII.getParent();
6941 Function *F = BB ? BB->getParent() : nullptr;
6942
6943 // The scopes for variables and !dbg attachments must agree.
6944 DILocalVariable *Var = DII.getVariable();
6945 DILocation *Loc = DII.getDebugLoc();
6946 CheckDI(Loc, "llvm.dbg." + Kind + " intrinsic requires a !dbg attachment",
6947 &DII, BB, F);
6948
6949 DISubprogram *VarSP = getSubprogram(Var->getRawScope());
6950 DISubprogram *LocSP = getSubprogram(Loc->getRawScope());
6951 if (!VarSP || !LocSP)
6952 return; // Broken scope chains are checked elsewhere.
6953
6954 CheckDI(VarSP == LocSP,
6955 "mismatched subprogram between llvm.dbg." + Kind +
6956 " variable and !dbg attachment",
6957 &DII, BB, F, Var, Var->getScope()->getSubprogram(), Loc,
6958 Loc->getScope()->getSubprogram());
6959
6960 // This check is redundant with one in visitLocalVariable().
6961 CheckDI(isType(Var->getRawType()), "invalid type ref", Var,
6962 Var->getRawType());
6963 verifyFnArgs(DII);
6964}
6965
6966void Verifier::visitDbgLabelIntrinsic(StringRef Kind, DbgLabelInst &DLI) {
6967 CheckDI(isa<DILabel>(DLI.getRawLabel()),
6968 "invalid llvm.dbg." + Kind + " intrinsic variable", &DLI,
6969 DLI.getRawLabel());
6970
6971 // Ignore broken !dbg attachments; they're checked elsewhere.
6972 if (MDNode *N = DLI.getDebugLoc().getAsMDNode())
6973 if (!isa<DILocation>(N))
6974 return;
6975
6976 BasicBlock *BB = DLI.getParent();
6977 Function *F = BB ? BB->getParent() : nullptr;
6978
6979 // The scopes for variables and !dbg attachments must agree.
6980 DILabel *Label = DLI.getLabel();
6981 DILocation *Loc = DLI.getDebugLoc();
6982 Check(Loc, "llvm.dbg." + Kind + " intrinsic requires a !dbg attachment", &DLI,
6983 BB, F);
6984
6985 DISubprogram *LabelSP = getSubprogram(Label->getRawScope());
6986 DISubprogram *LocSP = getSubprogram(Loc->getRawScope());
6987 if (!LabelSP || !LocSP)
6988 return;
6989
6990 CheckDI(LabelSP == LocSP,
6991 "mismatched subprogram between llvm.dbg." + Kind +
6992 " label and !dbg attachment",
6993 &DLI, BB, F, Label, Label->getScope()->getSubprogram(), Loc,
6994 Loc->getScope()->getSubprogram());
6995}
6996
6997void Verifier::verifyFragmentExpression(const DbgVariableIntrinsic &I) {
6998 DILocalVariable *V = dyn_cast_or_null<DILocalVariable>(I.getRawVariable());
6999 DIExpression *E = dyn_cast_or_null<DIExpression>(I.getRawExpression());
7000
7001 // We don't know whether this intrinsic verified correctly.
7002 if (!V || !E || !E->isValid())
7003 return;
7004
7005 // Nothing to do if this isn't a DW_OP_LLVM_fragment expression.
7006 auto Fragment = E->getFragmentInfo();
7007 if (!Fragment)
7008 return;
7009
7010 // The frontend helps out GDB by emitting the members of local anonymous
7011 // unions as artificial local variables with shared storage. When SROA splits
7012 // the storage for artificial local variables that are smaller than the entire
7013 // union, the overhang piece will be outside of the allotted space for the
7014 // variable and this check fails.
7015 // FIXME: Remove this check as soon as clang stops doing this; it hides bugs.
7016 if (V->isArtificial())
7017 return;
7018
7019 verifyFragmentExpression(*V, *Fragment, &I);
7020}
7021void Verifier::verifyFragmentExpression(const DbgVariableRecord &DVR) {
7022 DILocalVariable *V = dyn_cast_or_null<DILocalVariable>(DVR.getRawVariable());
7023 DIExpression *E = dyn_cast_or_null<DIExpression>(DVR.getRawExpression());
7024
7025 // We don't know whether this intrinsic verified correctly.
7026 if (!V || !E || !E->isValid())
7027 return;
7028
7029 // Nothing to do if this isn't a DW_OP_LLVM_fragment expression.
7030 auto Fragment = E->getFragmentInfo();
7031 if (!Fragment)
7032 return;
7033
7034 // The frontend helps out GDB by emitting the members of local anonymous
7035 // unions as artificial local variables with shared storage. When SROA splits
7036 // the storage for artificial local variables that are smaller than the entire
7037 // union, the overhang piece will be outside of the allotted space for the
7038 // variable and this check fails.
7039 // FIXME: Remove this check as soon as clang stops doing this; it hides bugs.
7040 if (V->isArtificial())
7041 return;
7042
7043 verifyFragmentExpression(*V, *Fragment, &DVR);
7044}
7045
7046template <typename ValueOrMetadata>
7047void Verifier::verifyFragmentExpression(const DIVariable &V,
7049 ValueOrMetadata *Desc) {
7050 // If there's no size, the type is broken, but that should be checked
7051 // elsewhere.
7052 auto VarSize = V.getSizeInBits();
7053 if (!VarSize)
7054 return;
7055
7056 unsigned FragSize = Fragment.SizeInBits;
7057 unsigned FragOffset = Fragment.OffsetInBits;
7058 CheckDI(FragSize + FragOffset <= *VarSize,
7059 "fragment is larger than or outside of variable", Desc, &V);
7060 CheckDI(FragSize != *VarSize, "fragment covers entire variable", Desc, &V);
7061}
7062
7063void Verifier::verifyFnArgs(const DbgVariableIntrinsic &I) {
7064 // This function does not take the scope of noninlined function arguments into
7065 // account. Don't run it if current function is nodebug, because it may
7066 // contain inlined debug intrinsics.
7067 if (!HasDebugInfo)
7068 return;
7069
7070 // For performance reasons only check non-inlined ones.
7071 if (I.getDebugLoc()->getInlinedAt())
7072 return;
7073
7074 DILocalVariable *Var = I.getVariable();
7075 CheckDI(Var, "dbg intrinsic without variable");
7076
7077 unsigned ArgNo = Var->getArg();
7078 if (!ArgNo)
7079 return;
7080
7081 // Verify there are no duplicate function argument debug info entries.
7082 // These will cause hard-to-debug assertions in the DWARF backend.
7083 if (DebugFnArgs.size() < ArgNo)
7084 DebugFnArgs.resize(ArgNo, nullptr);
7085
7086 auto *Prev = DebugFnArgs[ArgNo - 1];
7087 DebugFnArgs[ArgNo - 1] = Var;
7088 CheckDI(!Prev || (Prev == Var), "conflicting debug info for argument", &I,
7089 Prev, Var);
7090}
7091void Verifier::verifyFnArgs(const DbgVariableRecord &DVR) {
7092 // This function does not take the scope of noninlined function arguments into
7093 // account. Don't run it if current function is nodebug, because it may
7094 // contain inlined debug intrinsics.
7095 if (!HasDebugInfo)
7096 return;
7097
7098 // For performance reasons only check non-inlined ones.
7099 if (DVR.getDebugLoc()->getInlinedAt())
7100 return;
7101
7102 DILocalVariable *Var = DVR.getVariable();
7103 CheckDI(Var, "#dbg record without variable");
7104
7105 unsigned ArgNo = Var->getArg();
7106 if (!ArgNo)
7107 return;
7108
7109 // Verify there are no duplicate function argument debug info entries.
7110 // These will cause hard-to-debug assertions in the DWARF backend.
7111 if (DebugFnArgs.size() < ArgNo)
7112 DebugFnArgs.resize(ArgNo, nullptr);
7113
7114 auto *Prev = DebugFnArgs[ArgNo - 1];
7115 DebugFnArgs[ArgNo - 1] = Var;
7116 CheckDI(!Prev || (Prev == Var), "conflicting debug info for argument", &DVR,
7117 Prev, Var);
7118}
7119
7120void Verifier::verifyNotEntryValue(const DbgVariableIntrinsic &I) {
7121 DIExpression *E = dyn_cast_or_null<DIExpression>(I.getRawExpression());
7122
7123 // We don't know whether this intrinsic verified correctly.
7124 if (!E || !E->isValid())
7125 return;
7126
7127 if (isa<ValueAsMetadata>(I.getRawLocation())) {
7128 Value *VarValue = I.getVariableLocationOp(0);
7129 if (isa<UndefValue>(VarValue) || isa<PoisonValue>(VarValue))
7130 return;
7131 // We allow EntryValues for swift async arguments, as they have an
7132 // ABI-guarantee to be turned into a specific register.
7133 if (auto *ArgLoc = dyn_cast_or_null<Argument>(VarValue);
7134 ArgLoc && ArgLoc->hasAttribute(Attribute::SwiftAsync))
7135 return;
7136 }
7137
7138 CheckDI(!E->isEntryValue(),
7139 "Entry values are only allowed in MIR unless they target a "
7140 "swiftasync Argument",
7141 &I);
7142}
7143void Verifier::verifyNotEntryValue(const DbgVariableRecord &DVR) {
7144 DIExpression *E = dyn_cast_or_null<DIExpression>(DVR.getRawExpression());
7145
7146 // We don't know whether this intrinsic verified correctly.
7147 if (!E || !E->isValid())
7148 return;
7149
7150 if (isa<ValueAsMetadata>(DVR.getRawLocation())) {
7151 Value *VarValue = DVR.getVariableLocationOp(0);
7152 if (isa<UndefValue>(VarValue) || isa<PoisonValue>(VarValue))
7153 return;
7154 // We allow EntryValues for swift async arguments, as they have an
7155 // ABI-guarantee to be turned into a specific register.
7156 if (auto *ArgLoc = dyn_cast_or_null<Argument>(VarValue);
7157 ArgLoc && ArgLoc->hasAttribute(Attribute::SwiftAsync))
7158 return;
7159 }
7160
7161 CheckDI(!E->isEntryValue(),
7162 "Entry values are only allowed in MIR unless they target a "
7163 "swiftasync Argument",
7164 &DVR);
7165}
7166
7167void Verifier::verifyCompileUnits() {
7168 // When more than one Module is imported into the same context, such as during
7169 // an LTO build before linking the modules, ODR type uniquing may cause types
7170 // to point to a different CU. This check does not make sense in this case.
7171 if (M.getContext().isODRUniquingDebugTypes())
7172 return;
7173 auto *CUs = M.getNamedMetadata("llvm.dbg.cu");
7175 if (CUs)
7176 Listed.insert(CUs->op_begin(), CUs->op_end());
7177 for (const auto *CU : CUVisited)
7178 CheckDI(Listed.count(CU), "DICompileUnit not listed in llvm.dbg.cu", CU);
7179 CUVisited.clear();
7180}
7181
7182void Verifier::verifyDeoptimizeCallingConvs() {
7183 if (DeoptimizeDeclarations.empty())
7184 return;
7185
7186 const Function *First = DeoptimizeDeclarations[0];
7187 for (const auto *F : ArrayRef(DeoptimizeDeclarations).slice(1)) {
7188 Check(First->getCallingConv() == F->getCallingConv(),
7189 "All llvm.experimental.deoptimize declarations must have the same "
7190 "calling convention",
7191 First, F);
7192 }
7193}
7194
7195void Verifier::verifyAttachedCallBundle(const CallBase &Call,
7196 const OperandBundleUse &BU) {
7197 FunctionType *FTy = Call.getFunctionType();
7198
7199 Check((FTy->getReturnType()->isPointerTy() ||
7200 (Call.doesNotReturn() && FTy->getReturnType()->isVoidTy())),
7201 "a call with operand bundle \"clang.arc.attachedcall\" must call a "
7202 "function returning a pointer or a non-returning function that has a "
7203 "void return type",
7204 Call);
7205
7206 Check(BU.Inputs.size() == 1 && isa<Function>(BU.Inputs.front()),
7207 "operand bundle \"clang.arc.attachedcall\" requires one function as "
7208 "an argument",
7209 Call);
7210
7211 auto *Fn = cast<Function>(BU.Inputs.front());
7212 Intrinsic::ID IID = Fn->getIntrinsicID();
7213
7214 if (IID) {
7215 Check((IID == Intrinsic::objc_retainAutoreleasedReturnValue ||
7216 IID == Intrinsic::objc_unsafeClaimAutoreleasedReturnValue),
7217 "invalid function argument", Call);
7218 } else {
7219 StringRef FnName = Fn->getName();
7220 Check((FnName == "objc_retainAutoreleasedReturnValue" ||
7221 FnName == "objc_unsafeClaimAutoreleasedReturnValue"),
7222 "invalid function argument", Call);
7223 }
7224}
7225
7226void Verifier::verifyNoAliasScopeDecl() {
7227 if (NoAliasScopeDecls.empty())
7228 return;
7229
7230 // only a single scope must be declared at a time.
7231 for (auto *II : NoAliasScopeDecls) {
7232 assert(II->getIntrinsicID() == Intrinsic::experimental_noalias_scope_decl &&
7233 "Not a llvm.experimental.noalias.scope.decl ?");
7234 const auto *ScopeListMV = dyn_cast<MetadataAsValue>(
7236 Check(ScopeListMV != nullptr,
7237 "llvm.experimental.noalias.scope.decl must have a MetadataAsValue "
7238 "argument",
7239 II);
7240
7241 const auto *ScopeListMD = dyn_cast<MDNode>(ScopeListMV->getMetadata());
7242 Check(ScopeListMD != nullptr, "!id.scope.list must point to an MDNode", II);
7243 Check(ScopeListMD->getNumOperands() == 1,
7244 "!id.scope.list must point to a list with a single scope", II);
7245 visitAliasScopeListMetadata(ScopeListMD);
7246 }
7247
7248 // Only check the domination rule when requested. Once all passes have been
7249 // adapted this option can go away.
7251 return;
7252
7253 // Now sort the intrinsics based on the scope MDNode so that declarations of
7254 // the same scopes are next to each other.
7255 auto GetScope = [](IntrinsicInst *II) {
7256 const auto *ScopeListMV = cast<MetadataAsValue>(
7258 return &cast<MDNode>(ScopeListMV->getMetadata())->getOperand(0);
7259 };
7260
7261 // We are sorting on MDNode pointers here. For valid input IR this is ok.
7262 // TODO: Sort on Metadata ID to avoid non-deterministic error messages.
7263 auto Compare = [GetScope](IntrinsicInst *Lhs, IntrinsicInst *Rhs) {
7264 return GetScope(Lhs) < GetScope(Rhs);
7265 };
7266
7267 llvm::sort(NoAliasScopeDecls, Compare);
7268
7269 // Go over the intrinsics and check that for the same scope, they are not
7270 // dominating each other.
7271 auto ItCurrent = NoAliasScopeDecls.begin();
7272 while (ItCurrent != NoAliasScopeDecls.end()) {
7273 auto CurScope = GetScope(*ItCurrent);
7274 auto ItNext = ItCurrent;
7275 do {
7276 ++ItNext;
7277 } while (ItNext != NoAliasScopeDecls.end() &&
7278 GetScope(*ItNext) == CurScope);
7279
7280 // [ItCurrent, ItNext) represents the declarations for the same scope.
7281 // Ensure they are not dominating each other.. but only if it is not too
7282 // expensive.
7283 if (ItNext - ItCurrent < 32)
7284 for (auto *I : llvm::make_range(ItCurrent, ItNext))
7285 for (auto *J : llvm::make_range(ItCurrent, ItNext))
7286 if (I != J)
7287 Check(!DT.dominates(I, J),
7288 "llvm.experimental.noalias.scope.decl dominates another one "
7289 "with the same scope",
7290 I);
7291 ItCurrent = ItNext;
7292 }
7293}
7294
7295//===----------------------------------------------------------------------===//
7296// Implement the public interfaces to this file...
7297//===----------------------------------------------------------------------===//
7298
7300 Function &F = const_cast<Function &>(f);
7301
7302 // Don't use a raw_null_ostream. Printing IR is expensive.
7303 Verifier V(OS, /*ShouldTreatBrokenDebugInfoAsError=*/true, *f.getParent());
7304
7305 // Note that this function's return value is inverted from what you would
7306 // expect of a function called "verify".
7307 return !V.verify(F);
7308}
7309
7311 bool *BrokenDebugInfo) {
7312 // Don't use a raw_null_ostream. Printing IR is expensive.
7313 Verifier V(OS, /*ShouldTreatBrokenDebugInfoAsError=*/!BrokenDebugInfo, M);
7314
7315 bool Broken = false;
7316 for (const Function &F : M)
7317 Broken |= !V.verify(F);
7318
7319 Broken |= !V.verify();
7320 if (BrokenDebugInfo)
7321 *BrokenDebugInfo = V.hasBrokenDebugInfo();
7322 // Note that this function's return value is inverted from what you would
7323 // expect of a function called "verify".
7324 return Broken;
7325}
7326
7327namespace {
7328
7329struct VerifierLegacyPass : public FunctionPass {
7330 static char ID;
7331
7332 std::unique_ptr<Verifier> V;
7333 bool FatalErrors = true;
7334
7335 VerifierLegacyPass() : FunctionPass(ID) {
7337 }
7338 explicit VerifierLegacyPass(bool FatalErrors)
7339 : FunctionPass(ID),
7340 FatalErrors(FatalErrors) {
7342 }
7343
7344 bool doInitialization(Module &M) override {
7345 V = std::make_unique<Verifier>(
7346 &dbgs(), /*ShouldTreatBrokenDebugInfoAsError=*/false, M);
7347 return false;
7348 }
7349
7350 bool runOnFunction(Function &F) override {
7351 if (!V->verify(F) && FatalErrors) {
7352 errs() << "in function " << F.getName() << '\n';
7353 report_fatal_error("Broken function found, compilation aborted!");
7354 }
7355 return false;
7356 }
7357
7358 bool doFinalization(Module &M) override {
7359 bool HasErrors = false;
7360 for (Function &F : M)
7361 if (F.isDeclaration())
7362 HasErrors |= !V->verify(F);
7363
7364 HasErrors |= !V->verify();
7365 if (FatalErrors && (HasErrors || V->hasBrokenDebugInfo()))
7366 report_fatal_error("Broken module found, compilation aborted!");
7367 return false;
7368 }
7369
7370 void getAnalysisUsage(AnalysisUsage &AU) const override {
7371 AU.setPreservesAll();
7372 }
7373};
7374
7375} // end anonymous namespace
7376
7377/// Helper to issue failure from the TBAA verification
7378template <typename... Tys> void TBAAVerifier::CheckFailed(Tys &&... Args) {
7379 if (Diagnostic)
7380 return Diagnostic->CheckFailed(Args...);
7381}
7382
7383#define CheckTBAA(C, ...) \
7384 do { \
7385 if (!(C)) { \
7386 CheckFailed(__VA_ARGS__); \
7387 return false; \
7388 } \
7389 } while (false)
7390
7391/// Verify that \p BaseNode can be used as the "base type" in the struct-path
7392/// TBAA scheme. This means \p BaseNode is either a scalar node, or a
7393/// struct-type node describing an aggregate data structure (like a struct).
7394TBAAVerifier::TBAABaseNodeSummary
7395TBAAVerifier::verifyTBAABaseNode(Instruction &I, const MDNode *BaseNode,
7396 bool IsNewFormat) {
7397 if (BaseNode->getNumOperands() < 2) {
7398 CheckFailed("Base nodes must have at least two operands", &I, BaseNode);
7399 return {true, ~0u};
7400 }
7401
7402 auto Itr = TBAABaseNodes.find(BaseNode);
7403 if (Itr != TBAABaseNodes.end())
7404 return Itr->second;
7405
7406 auto Result = verifyTBAABaseNodeImpl(I, BaseNode, IsNewFormat);
7407 auto InsertResult = TBAABaseNodes.insert({BaseNode, Result});
7408 (void)InsertResult;
7409 assert(InsertResult.second && "We just checked!");
7410 return Result;
7411}
7412
7413TBAAVerifier::TBAABaseNodeSummary
7414TBAAVerifier::verifyTBAABaseNodeImpl(Instruction &I, const MDNode *BaseNode,
7415 bool IsNewFormat) {
7416 const TBAAVerifier::TBAABaseNodeSummary InvalidNode = {true, ~0u};
7417
7418 if (BaseNode->getNumOperands() == 2) {
7419 // Scalar nodes can only be accessed at offset 0.
7420 return isValidScalarTBAANode(BaseNode)
7421 ? TBAAVerifier::TBAABaseNodeSummary({false, 0})
7422 : InvalidNode;
7423 }
7424
7425 if (IsNewFormat) {
7426 if (BaseNode->getNumOperands() % 3 != 0) {
7427 CheckFailed("Access tag nodes must have the number of operands that is a "
7428 "multiple of 3!", BaseNode);
7429 return InvalidNode;
7430 }
7431 } else {
7432 if (BaseNode->getNumOperands() % 2 != 1) {
7433 CheckFailed("Struct tag nodes must have an odd number of operands!",
7434 BaseNode);
7435 return InvalidNode;
7436 }
7437 }
7438
7439 // Check the type size field.
7440 if (IsNewFormat) {
7441 auto *TypeSizeNode = mdconst::dyn_extract_or_null<ConstantInt>(
7442 BaseNode->getOperand(1));
7443 if (!TypeSizeNode) {
7444 CheckFailed("Type size nodes must be constants!", &I, BaseNode);
7445 return InvalidNode;
7446 }
7447 }
7448
7449 // Check the type name field. In the new format it can be anything.
7450 if (!IsNewFormat && !isa<MDString>(BaseNode->getOperand(0))) {
7451 CheckFailed("Struct tag nodes have a string as their first operand",
7452 BaseNode);
7453 return InvalidNode;
7454 }
7455
7456 bool Failed = false;
7457
7458 std::optional<APInt> PrevOffset;
7459 unsigned BitWidth = ~0u;
7460
7461 // We've already checked that BaseNode is not a degenerate root node with one
7462 // operand in \c verifyTBAABaseNode, so this loop should run at least once.
7463 unsigned FirstFieldOpNo = IsNewFormat ? 3 : 1;
7464 unsigned NumOpsPerField = IsNewFormat ? 3 : 2;
7465 for (unsigned Idx = FirstFieldOpNo; Idx < BaseNode->getNumOperands();
7466 Idx += NumOpsPerField) {
7467 const MDOperand &FieldTy = BaseNode->getOperand(Idx);
7468 const MDOperand &FieldOffset = BaseNode->getOperand(Idx + 1);
7469 if (!isa<MDNode>(FieldTy)) {
7470 CheckFailed("Incorrect field entry in struct type node!", &I, BaseNode);
7471 Failed = true;
7472 continue;
7473 }
7474
7475 auto *OffsetEntryCI =
7476 mdconst::dyn_extract_or_null<ConstantInt>(FieldOffset);
7477 if (!OffsetEntryCI) {
7478 CheckFailed("Offset entries must be constants!", &I, BaseNode);
7479 Failed = true;
7480 continue;
7481 }
7482
7483 if (BitWidth == ~0u)
7484 BitWidth = OffsetEntryCI->getBitWidth();
7485
7486 if (OffsetEntryCI->getBitWidth() != BitWidth) {
7487 CheckFailed(
7488 "Bitwidth between the offsets and struct type entries must match", &I,
7489 BaseNode);
7490 Failed = true;
7491 continue;
7492 }
7493
7494 // NB! As far as I can tell, we generate a non-strictly increasing offset
7495 // sequence only from structs that have zero size bit fields. When
7496 // recursing into a contained struct in \c getFieldNodeFromTBAABaseNode we
7497 // pick the field lexically the latest in struct type metadata node. This
7498 // mirrors the actual behavior of the alias analysis implementation.
7499 bool IsAscending =
7500 !PrevOffset || PrevOffset->ule(OffsetEntryCI->getValue());
7501
7502 if (!IsAscending) {
7503 CheckFailed("Offsets must be increasing!", &I, BaseNode);
7504 Failed = true;
7505 }
7506
7507 PrevOffset = OffsetEntryCI->getValue();
7508
7509 if (IsNewFormat) {
7510 auto *MemberSizeNode = mdconst::dyn_extract_or_null<ConstantInt>(
7511 BaseNode->getOperand(Idx + 2));
7512 if (!MemberSizeNode) {
7513 CheckFailed("Member size entries must be constants!", &I, BaseNode);
7514 Failed = true;
7515 continue;
7516 }
7517 }
7518 }
7519
7520 return Failed ? InvalidNode
7521 : TBAAVerifier::TBAABaseNodeSummary(false, BitWidth);
7522}
7523
7524static bool IsRootTBAANode(const MDNode *MD) {
7525 return MD->getNumOperands() < 2;
7526}
7527
7528static bool IsScalarTBAANodeImpl(const MDNode *MD,
7530 if (MD->getNumOperands() != 2 && MD->getNumOperands() != 3)
7531 return false;
7532
7533 if (!isa<MDString>(MD->getOperand(0)))
7534 return false;
7535
7536 if (MD->getNumOperands() == 3) {
7537 auto *Offset = mdconst::dyn_extract<ConstantInt>(MD->getOperand(2));
7538 if (!(Offset && Offset->isZero() && isa<MDString>(MD->getOperand(0))))
7539 return false;
7540 }
7541
7542 auto *Parent = dyn_cast_or_null<MDNode>(MD->getOperand(1));
7543 return Parent && Visited.insert(Parent).second &&
7544 (IsRootTBAANode(Parent) || IsScalarTBAANodeImpl(Parent, Visited));
7545}
7546
7547bool TBAAVerifier::isValidScalarTBAANode(const MDNode *MD) {
7548 auto ResultIt = TBAAScalarNodes.find(MD);
7549 if (ResultIt != TBAAScalarNodes.end())
7550 return ResultIt->second;
7551
7553 bool Result = IsScalarTBAANodeImpl(MD, Visited);
7554 auto InsertResult = TBAAScalarNodes.insert({MD, Result});
7555 (void)InsertResult;
7556 assert(InsertResult.second && "Just checked!");
7557
7558 return Result;
7559}
7560
7561/// Returns the field node at the offset \p Offset in \p BaseNode. Update \p
7562/// Offset in place to be the offset within the field node returned.
7563///
7564/// We assume we've okayed \p BaseNode via \c verifyTBAABaseNode.
7565MDNode *TBAAVerifier::getFieldNodeFromTBAABaseNode(Instruction &I,
7566 const MDNode *BaseNode,
7567 APInt &Offset,
7568 bool IsNewFormat) {
7569 assert(BaseNode->getNumOperands() >= 2 && "Invalid base node!");
7570
7571 // Scalar nodes have only one possible "field" -- their parent in the access
7572 // hierarchy. Offset must be zero at this point, but our caller is supposed
7573 // to check that.
7574 if (BaseNode->getNumOperands() == 2)
7575 return cast<MDNode>(BaseNode->getOperand(1));
7576
7577 unsigned FirstFieldOpNo = IsNewFormat ? 3 : 1;
7578 unsigned NumOpsPerField = IsNewFormat ? 3 : 2;
7579 for (unsigned Idx = FirstFieldOpNo; Idx < BaseNode->getNumOperands();
7580 Idx += NumOpsPerField) {
7581 auto *OffsetEntryCI =
7582 mdconst::extract<ConstantInt>(BaseNode->getOperand(Idx + 1));
7583 if (OffsetEntryCI->getValue().ugt(Offset)) {
7584 if (Idx == FirstFieldOpNo) {
7585 CheckFailed("Could not find TBAA parent in struct type node", &I,
7586 BaseNode, &Offset);
7587 return nullptr;
7588 }
7589
7590 unsigned PrevIdx = Idx - NumOpsPerField;
7591 auto *PrevOffsetEntryCI =
7592 mdconst::extract<ConstantInt>(BaseNode->getOperand(PrevIdx + 1));
7593 Offset -= PrevOffsetEntryCI->getValue();
7594 return cast<MDNode>(BaseNode->getOperand(PrevIdx));
7595 }
7596 }
7597
7598 unsigned LastIdx = BaseNode->getNumOperands() - NumOpsPerField;
7599 auto *LastOffsetEntryCI = mdconst::extract<ConstantInt>(
7600 BaseNode->getOperand(LastIdx + 1));
7601 Offset -= LastOffsetEntryCI->getValue();
7602 return cast<MDNode>(BaseNode->getOperand(LastIdx));
7603}
7604
7606 if (!Type || Type->getNumOperands() < 3)
7607 return false;
7608
7609 // In the new format type nodes shall have a reference to the parent type as
7610 // its first operand.
7611 return isa_and_nonnull<MDNode>(Type->getOperand(0));
7612}
7613
7615 CheckTBAA(MD->getNumOperands() > 0, "TBAA metadata cannot have 0 operands",
7616 &I, MD);
7617
7618 CheckTBAA(isa<LoadInst>(I) || isa<StoreInst>(I) || isa<CallInst>(I) ||
7619 isa<VAArgInst>(I) || isa<AtomicRMWInst>(I) ||
7620 isa<AtomicCmpXchgInst>(I),
7621 "This instruction shall not have a TBAA access tag!", &I);
7622
7623 bool IsStructPathTBAA =
7624 isa<MDNode>(MD->getOperand(0)) && MD->getNumOperands() >= 3;
7625
7626 CheckTBAA(IsStructPathTBAA,
7627 "Old-style TBAA is no longer allowed, use struct-path TBAA instead",
7628 &I);
7629
7630 MDNode *BaseNode = dyn_cast_or_null<MDNode>(MD->getOperand(0));
7631 MDNode *AccessType = dyn_cast_or_null<MDNode>(MD->getOperand(1));
7632
7633 bool IsNewFormat = isNewFormatTBAATypeNode(AccessType);
7634
7635 if (IsNewFormat) {
7636 CheckTBAA(MD->getNumOperands() == 4 || MD->getNumOperands() == 5,
7637 "Access tag metadata must have either 4 or 5 operands", &I, MD);
7638 } else {
7639 CheckTBAA(MD->getNumOperands() < 5,
7640 "Struct tag metadata must have either 3 or 4 operands", &I, MD);
7641 }
7642
7643 // Check the access size field.
7644 if (IsNewFormat) {
7645 auto *AccessSizeNode = mdconst::dyn_extract_or_null<ConstantInt>(
7646 MD->getOperand(3));
7647 CheckTBAA(AccessSizeNode, "Access size field must be a constant", &I, MD);
7648 }
7649
7650 // Check the immutability flag.
7651 unsigned ImmutabilityFlagOpNo = IsNewFormat ? 4 : 3;
7652 if (MD->getNumOperands() == ImmutabilityFlagOpNo + 1) {
7653 auto *IsImmutableCI = mdconst::dyn_extract_or_null<ConstantInt>(
7654 MD->getOperand(ImmutabilityFlagOpNo));
7655 CheckTBAA(IsImmutableCI,
7656 "Immutability tag on struct tag metadata must be a constant", &I,
7657 MD);
7658 CheckTBAA(
7659 IsImmutableCI->isZero() || IsImmutableCI->isOne(),
7660 "Immutability part of the struct tag metadata must be either 0 or 1",
7661 &I, MD);
7662 }
7663
7664 CheckTBAA(BaseNode && AccessType,
7665 "Malformed struct tag metadata: base and access-type "
7666 "should be non-null and point to Metadata nodes",
7667 &I, MD, BaseNode, AccessType);
7668
7669 if (!IsNewFormat) {
7670 CheckTBAA(isValidScalarTBAANode(AccessType),
7671 "Access type node must be a valid scalar type", &I, MD,
7672 AccessType);
7673 }
7674
7675 auto *OffsetCI = mdconst::dyn_extract_or_null<ConstantInt>(MD->getOperand(2));
7676 CheckTBAA(OffsetCI, "Offset must be constant integer", &I, MD);
7677
7678 APInt Offset = OffsetCI->getValue();
7679 bool SeenAccessTypeInPath = false;
7680
7681 SmallPtrSet<MDNode *, 4> StructPath;
7682
7683 for (/* empty */; BaseNode && !IsRootTBAANode(BaseNode);
7684 BaseNode = getFieldNodeFromTBAABaseNode(I, BaseNode, Offset,
7685 IsNewFormat)) {
7686 if (!StructPath.insert(BaseNode).second) {
7687 CheckFailed("Cycle detected in struct path", &I, MD);
7688 return false;
7689 }
7690
7691 bool Invalid;
7692 unsigned BaseNodeBitWidth;
7693 std::tie(Invalid, BaseNodeBitWidth) = verifyTBAABaseNode(I, BaseNode,
7694 IsNewFormat);
7695
7696 // If the base node is invalid in itself, then we've already printed all the
7697 // errors we wanted to print.
7698 if (Invalid)
7699 return false;
7700
7701 SeenAccessTypeInPath |= BaseNode == AccessType;
7702
7703 if (isValidScalarTBAANode(BaseNode) || BaseNode == AccessType)
7704 CheckTBAA(Offset == 0, "Offset not zero at the point of scalar access",
7705 &I, MD, &Offset);
7706
7707 CheckTBAA(BaseNodeBitWidth == Offset.getBitWidth() ||
7708 (BaseNodeBitWidth == 0 && Offset == 0) ||
7709 (IsNewFormat && BaseNodeBitWidth == ~0u),
7710 "Access bit-width not the same as description bit-width", &I, MD,
7711 BaseNodeBitWidth, Offset.getBitWidth());
7712
7713 if (IsNewFormat && SeenAccessTypeInPath)
7714 break;
7715 }
7716
7717 CheckTBAA(SeenAccessTypeInPath, "Did not see access type in access path!", &I,
7718 MD);
7719 return true;
7720}
7721
7722char VerifierLegacyPass::ID = 0;
7723INITIALIZE_PASS(VerifierLegacyPass, "verify", "Module Verifier", false, false)
7724
7726 return new VerifierLegacyPass(FatalErrors);
7727}
7728
7729AnalysisKey VerifierAnalysis::Key;
7732 Result Res;
7734 return Res;
7735}
7736
7739 return { llvm::verifyFunction(F, &dbgs()), false };
7740}
7741
7743 auto Res = AM.getResult<VerifierAnalysis>(M);
7744 if (FatalErrors && (Res.IRBroken || Res.DebugInfoBroken))
7745 report_fatal_error("Broken module found, compilation aborted!");
7746
7747 return PreservedAnalyses::all();
7748}
7749
7751 auto res = AM.getResult<VerifierAnalysis>(F);
7752 if (res.IRBroken && FatalErrors)
7753 report_fatal_error("Broken function found, compilation aborted!");
7754
7755 return PreservedAnalyses::all();
7756}
AMDGPU address space definition.
This file declares a class to represent arbitrary precision floating point values and provide a varie...
This file implements a class to represent arbitrary precision integral constant values and operations...
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
Atomic ordering constants.
@ RetAttr
Definition: Attributes.cpp:752
@ FnAttr
Definition: Attributes.cpp:750
This file contains the simple types necessary to represent the attributes associated with functions a...
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")
static GCRegistry::Add< StatepointGC > D("statepoint-example", "an example strategy for statepoint")
Analysis containing CSE Info
Definition: CSEInfo.cpp:27
This file contains the declarations for the subclasses of Constant, which represent the different fla...
This file declares the LLVM IR specialization of the GenericConvergenceVerifier template.
return RetTy
Returns the sub type a function will return at a given Idx Should correspond to the result type of an ExtractValue instruction executed with just that one unsigned Idx
This file defines the DenseMap class.
This file contains constants used for implementing Dwarf debug support.
std::string Name
uint64_t Size
static bool runOnFunction(Function &F, bool PostInlining)
Hexagon Common GEP
This file provides various utilities for inspecting and working with the control flow graph in LLVM I...
Module.h This file contains the declarations for the Module class.
This header defines various interfaces for pass management in LLVM.
This defines the Use class.
#define F(x, y, z)
Definition: MD5.cpp:55
#define I(x, y, z)
Definition: MD5.cpp:58
This file implements a map that provides insertion order iteration.
This file provides utility for Memory Model Relaxation Annotations (MMRAs).
This file contains the declarations for metadata subclasses.
#define T1
ConstantRange Range(APInt(BitWidth, Low), APInt(BitWidth, High))
uint64_t High
uint64_t IntrinsicInst * II
#define P(N)
ppc ctr loops verify
#define INITIALIZE_PASS(passName, arg, name, cfg, analysis)
Definition: PassSupport.h:38
This file contains the declarations for profiling metadata utility functions.
static bool isValid(const char C)
Returns true if C is a valid mangled character: <0-9a-zA-Z_>.
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
static unsigned getNumElements(Type *Ty)
void visit(MachineFunction &MF, MachineBasicBlock &Start, std::function< void(MachineBasicBlock *)> op)
This file contains some templates that are useful if you are working with the STL at all.
verify safepoint Safepoint IR Verifier
raw_pwrite_stream & OS
This file defines the SmallPtrSet class.
This file defines the SmallSet class.
This file defines the SmallVector class.
This file contains some functions that are useful when dealing with strings.
static unsigned getBitWidth(Type *Ty, const DataLayout &DL)
Returns the bitwidth of the given scalar or pointer type.
static bool IsScalarTBAANodeImpl(const MDNode *MD, SmallPtrSetImpl< const MDNode * > &Visited)
Definition: Verifier.cpp:7528
static bool isType(const Metadata *MD)
Definition: Verifier.cpp:1135
static Instruction * getSuccPad(Instruction *Terminator)
Definition: Verifier.cpp:2721
#define Check(C,...)
We know that cond should be true, if not print an error message.
Definition: Verifier.cpp:666
static bool isNewFormatTBAATypeNode(llvm::MDNode *Type)
Definition: Verifier.cpp:7605
#define CheckDI(C,...)
We know that a debug info condition should be true, if not print an error message.
Definition: Verifier.cpp:676
static void forEachUser(const Value *User, SmallPtrSet< const Value *, 32 > &Visited, llvm::function_ref< bool(const Value *)> Callback)
Definition: Verifier.cpp:717
static bool isDINode(const Metadata *MD)
Definition: Verifier.cpp:1137
static bool isScope(const Metadata *MD)
Definition: Verifier.cpp:1136
static cl::opt< bool > VerifyNoAliasScopeDomination("verify-noalias-scope-decl-dom", cl::Hidden, cl::init(false), cl::desc("Ensure that llvm.experimental.noalias.scope.decl for identical " "scopes are not dominating"))
static DISubprogram * getSubprogram(Metadata *LocalScope)
Carefully grab the subprogram from a local scope.
Definition: Verifier.cpp:6544
static bool isTypeCongruent(Type *L, Type *R)
Two types are "congruent" if they are identical, or if they are both pointer types with different poi...
Definition: Verifier.cpp:3839
#define CheckTBAA(C,...)
Definition: Verifier.cpp:7383
static bool IsRootTBAANode(const MDNode *MD)
Definition: Verifier.cpp:7524
static bool isContiguous(const ConstantRange &A, const ConstantRange &B)
Definition: Verifier.cpp:4165
static Value * getParentPad(Value *EHPad)
Definition: Verifier.cpp:4430
static bool hasConflictingReferenceFlags(unsigned Flags)
Detect mutually exclusive flags.
Definition: Verifier.cpp:1280
static AttrBuilder getParameterABIAttributes(LLVMContext &C, unsigned I, AttributeList Attrs)
Definition: Verifier.cpp:3849
bool isFiniteNonZero() const
Definition: APFloat.h:1455
bool isNegative() const
Definition: APFloat.h:1445
const fltSemantics & getSemantics() const
Definition: APFloat.h:1453
Class for arbitrary precision integers.
Definition: APInt.h:78
bool sgt(const APInt &RHS) const
Signed greater than comparison.
Definition: APInt.h:1201
bool isZero() const
Determine if this value is zero, i.e. all bits are clear.
Definition: APInt.h:380
bool isMinValue() const
Determine if this is the smallest unsigned value.
Definition: APInt.h:417
bool ule(const APInt &RHS) const
Unsigned less or equal comparison.
Definition: APInt.h:1150
bool isPowerOf2() const
Check if this APInt's value is a power of two greater than zero.
Definition: APInt.h:440
bool isMaxValue() const
Determine if this is the largest unsigned value.
Definition: APInt.h:399
This class represents a conversion between pointers from one address space to another.
an instruction to allocate memory on the stack
Definition: Instructions.h:63
bool isSwiftError() const
Return true if this alloca is used as a swifterror argument to a call.
Definition: Instructions.h:149
bool isStaticAlloca() const
Return true if this alloca is in the entry block of the function and is a constant size.
Align getAlign() const
Return the alignment of the memory that is being allocated by the instruction.
Definition: Instructions.h:124
Type * getAllocatedType() const
Return the type that is being allocated by the instruction.
Definition: Instructions.h:117
bool isArrayAllocation() const
Return true if there is an allocation size parameter to the allocation instruction that is not 1.
const Value * getArraySize() const
Get the number of elements allocated.
Definition: Instructions.h:95
A container for analyses that lazily runs them and caches their results.
Definition: PassManager.h:253
PassT::Result & getResult(IRUnitT &IR, ExtraArgTs... ExtraArgs)
Get the result of an analysis pass for a given IR unit.
Definition: PassManager.h:410
Represent the analysis usage information of a pass.
void setPreservesAll()
Set by analyses that do not transform their input at all.
This class represents an incoming formal argument to a Function.
Definition: Argument.h:31
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
Definition: ArrayRef.h:41
bool empty() const
empty - Check if the array is empty.
Definition: ArrayRef.h:163
An instruction that atomically checks whether a specified value is in a memory location,...
Definition: Instructions.h:501
an instruction that atomically reads a memory location, combines it with another value,...
Definition: Instructions.h:704
static bool isFPOperation(BinOp Op)
Definition: Instructions.h:809
BinOp getOperation() const
Definition: Instructions.h:805
static StringRef getOperationName(BinOp Op)
AtomicOrdering getOrdering() const
Returns the ordering constraint of this rmw instruction.
Definition: Instructions.h:847
bool contains(Attribute::AttrKind A) const
Return true if the builder has the specified attribute.
Definition: AttributeMask.h:67
bool hasAttribute(Attribute::AttrKind Kind) const
Return true if the attribute exists in this set.
std::string getAsString(bool InAttrGrp=false) const
static Attribute::AttrKind getAttrKindFromName(StringRef AttrName)
Definition: Attributes.cpp:305
static bool canUseAsRetAttr(AttrKind Kind)
Definition: Attributes.cpp:782
static bool isExistingAttribute(StringRef Name)
Return true if the provided string matches the IR name of an attribute.
Definition: Attributes.cpp:328
static bool canUseAsFnAttr(AttrKind Kind)
Definition: Attributes.cpp:774
AttrKind
This enumeration lists the attributes that can be associated with parameters, function results,...
Definition: Attributes.h:86
@ None
No attributes have been set.
Definition: Attributes.h:88
static bool isIntAttrKind(AttrKind Kind)
Definition: Attributes.h:102
static bool canUseAsParamAttr(AttrKind Kind)
Definition: Attributes.cpp:778
bool isValid() const
Return true if the attribute is any kind of attribute.
Definition: Attributes.h:208
LLVM Basic Block Representation.
Definition: BasicBlock.h:61
iterator begin()
Instruction iterator methods.
Definition: BasicBlock.h:448
iterator_range< const_phi_iterator > phis() const
Returns a range that iterates over the phis in the basic block.
Definition: BasicBlock.h:517
const Instruction * getFirstNonPHI() const
Returns a pointer to the first instruction in this block that is not a PHINode instruction.
Definition: BasicBlock.cpp:367
const Instruction & front() const
Definition: BasicBlock.h:471
bool isEntryBlock() const
Return true if this is the entry block of the containing function.
Definition: BasicBlock.cpp:571
const BasicBlock * getUniquePredecessor() const
Return the predecessor of this block if it has a unique predecessor block.
Definition: BasicBlock.cpp:467
const Function * getParent() const
Return the enclosing method, or null if none.
Definition: BasicBlock.h:219
InstListType::iterator iterator
Instruction iterators...
Definition: BasicBlock.h:177
const Instruction * getTerminator() const LLVM_READONLY
Returns the terminator instruction if the block is well formed or null if the block is not well forme...
Definition: BasicBlock.h:239
This class represents a no-op cast from one type to another.
static BlockAddress * lookup(const BasicBlock *BB)
Lookup an existing BlockAddress constant for the given BasicBlock.
Definition: Constants.cpp:1915
Conditional or Unconditional Branch instruction.
bool isConditional() const
Value * getCondition() const
Base class for all callable instructions (InvokeInst and CallInst) Holds everything related to callin...
Definition: InstrTypes.h:1112
bool isInlineAsm() const
Check if this call is an inline asm statement.
Definition: InstrTypes.h:1408
Function * getCalledFunction() const
Returns the function called, or null if this is an indirect function invocation or the function signa...
Definition: InstrTypes.h:1341
CallingConv::ID getCallingConv() const
Definition: InstrTypes.h:1399
Value * getCalledOperand() const
Definition: InstrTypes.h:1334
Value * getArgOperand(unsigned i) const
Definition: InstrTypes.h:1286
FunctionType * getFunctionType() const
Definition: InstrTypes.h:1199
unsigned arg_size() const
Definition: InstrTypes.h:1284
AttributeList getAttributes() const
Return the attributes for this call.
Definition: InstrTypes.h:1417
CallBr instruction, tracking function calls that may not return control but instead transfer it to a ...
This class represents a function call, abstracting a target machine's calling convention.
bool isMustTailCall() const
static bool castIsValid(Instruction::CastOps op, Type *SrcTy, Type *DstTy)
This method can be used to determine if a cast from SrcTy to DstTy using Opcode op is valid or not.
unsigned getNumHandlers() const
return the number of 'handlers' in this catchswitch instruction, except the default handler
Value * getParentPad() const
BasicBlock * getUnwindDest() const
handler_range handlers()
iteration adapter for range-for loops.
BasicBlock * getUnwindDest() const
bool isFPPredicate() const
Definition: InstrTypes.h:780
bool isIntPredicate() const
Definition: InstrTypes.h:781
static bool isIntPredicate(Predicate P)
Definition: InstrTypes.h:774
ConstantArray - Constant Array Declarations.
Definition: Constants.h:427
A constant value that is initialized with an expression using other constant values.
Definition: Constants.h:1108
ConstantFP - Floating Point Values [float, double].
Definition: Constants.h:271
This is the shared class of boolean and integer constants.
Definition: Constants.h:83
bool isMinusOne() const
This function will return true iff every bit in this constant is set to true.
Definition: Constants.h:220
bool isNegative() const
Definition: Constants.h:203
bool isZero() const
This is just a convenience method to make client code smaller for a common code.
Definition: Constants.h:208
unsigned getBitWidth() const
getBitWidth - Return the scalar bitwidth of this constant.
Definition: Constants.h:151
uint64_t getZExtValue() const
Return the constant as a 64-bit unsigned integer value after it has been zero extended as appropriate...
Definition: Constants.h:157
const APInt & getValue() const
Return the constant as an APInt value reference.
Definition: Constants.h:148
A signed pointer, in the ptrauth sense.
Definition: Constants.h:1021
Constant * getAddrDiscriminator() const
The address discriminator if any, or the null constant.
Definition: Constants.h:1061
Constant * getPointer() const
The pointer that is signed in this ptrauth signed pointer.
Definition: Constants.h:1048
ConstantInt * getKey() const
The Key ID, an i32 constant.
Definition: Constants.h:1051
ConstantInt * getDiscriminator() const
The integer discriminator, an i64 constant, or 0.
Definition: Constants.h:1054
static bool isOrderedRanges(ArrayRef< ConstantRange > RangesRef)
This class represents a range of values.
Definition: ConstantRange.h:47
uint32_t getBitWidth() const
Get the bit width of this ConstantRange.
static ConstantTokenNone * get(LLVMContext &Context)
Return the ConstantTokenNone.
Definition: Constants.cpp:1522
This is an important base class in LLVM.
Definition: Constant.h:42
bool isNullValue() const
Return true if this is the value that would be returned by getNullValue.
Definition: Constants.cpp:90
This is the common base class for constrained floating point intrinsics.
std::optional< fp::ExceptionBehavior > getExceptionBehavior() const
std::optional< RoundingMode > getRoundingMode() const
unsigned getNonMetadataArgCount() const
List of ValueAsMetadata, to be used as an argument to a dbg.value intrinsic.
Assignment ID.
Basic type, like 'int' or 'float'.
Debug common block.
Enumeration value.
DWARF expression.
bool isEntryValue() const
Check if the expression consists of exactly one entry value operand.
static std::optional< FragmentInfo > getFragmentInfo(expr_op_iterator Start, expr_op_iterator End)
Retrieve the details of this fragment expression.
A pair of DIGlobalVariable and DIExpression.
DIGlobalVariable * getVariable() const
DIExpression * getExpression() const
An imported module (C++ using directive or similar).
Debug lexical block.
A scope for locals.
DISubprogram * getSubprogram() const
Get the subprogram for this scope.
DILocalScope * getScope() const
Get the local scope for this variable.
Debug location.
Metadata * getRawScope() const
Represents a module in the programming language, for example, a Clang module, or a Fortran module.
Debug lexical block.
Tagged DWARF-like metadata node.
Base class for scope-like contexts.
String type, Fortran CHARACTER(n)
Subprogram description.
Array subrange.
Type array for a subprogram.
Base class for template parameters.
Base class for variables.
Metadata * getRawType() const
Metadata * getRawScope() const
This class represents an Operation in the Expression.
A parsed version of the target data layout string in and methods for querying it.
Definition: DataLayout.h:63
This represents the llvm.dbg.label instruction.
Metadata * getRawLabel() const
DILabel * getLabel() const
Records a position in IR for a source label (DILabel).
Base class for non-instruction debug metadata records that have positions within IR.
void print(raw_ostream &O, bool IsForDebug=false) const
DebugLoc getDebugLoc() const
const BasicBlock * getParent() const
This is the common base class for debug info intrinsics for variables.
Metadata * getRawLocation() const
DILocalVariable * getVariable() const
Metadata * getRawVariable() const
Metadata * getRawExpression() const
Record of a variable value-assignment, aka a non instruction representation of the dbg....
MDNode * getRawAddressExpression() const
DIExpression * getExpression() const
Value * getVariableLocationOp(unsigned OpIdx) const
DILocalVariable * getVariable() const
Metadata * getRawLocation() const
Returns the metadata operand for the first location description.
@ End
Marks the end of the concrete types.
@ Any
To indicate all LocationTypes in searches.
DIExpression * getAddressExpression() const
MDNode * getAsMDNode() const
Return this as a bar MDNode.
Definition: DebugLoc.h:106
ValueT lookup(const_arg_type_t< KeyT > Val) const
lookup - Return the entry for the specified key, or a default constructed value if no such entry exis...
Definition: DenseMap.h:194
iterator find(const_arg_type_t< KeyT > Val)
Definition: DenseMap.h:156
bool empty() const
Definition: DenseMap.h:98
std::pair< iterator, bool > insert(const std::pair< KeyT, ValueT > &KV)
Definition: DenseMap.h:211
void recalculate(ParentType &Func)
recalculate - compute a dominator tree for the given function
Concrete subclass of DominatorTreeBase that is used to compute a normal dominator tree.
Definition: Dominators.h:162
bool isReachableFromEntry(const Use &U) const
Provide an overload for a Use.
Definition: Dominators.cpp:321
bool dominates(const BasicBlock *BB, const Use &U) const
Return true if the (end of the) basic block BB dominates the use U.
Definition: Dominators.cpp:122
This instruction extracts a single (scalar) element from a VectorType value.
static bool isValidOperands(const Value *Vec, const Value *Idx)
Return true if an extractelement instruction can be formed with the specified operands.
This instruction extracts a struct member or array element value from an aggregate value.
ArrayRef< unsigned > getIndices() const
static Type * getIndexedType(Type *Agg, ArrayRef< unsigned > Idxs)
Returns the type of the element that would be extracted with an extractvalue instruction with the spe...
This instruction compares its operands according to the predicate given to the constructor.
This class represents an extension of floating point types.
This class represents a cast from floating point to signed integer.
This class represents a cast from floating point to unsigned integer.
This class represents a truncation of floating point types.
An instruction for ordering other memory operations.
Definition: Instructions.h:424
AtomicOrdering getOrdering() const
Returns the ordering constraint of this fence instruction.
Definition: Instructions.h:449
Class to represent fixed width SIMD vectors.
Definition: DerivedTypes.h:563
Value * getParentPad() const
Convenience accessors.
Definition: InstrTypes.h:2359
FunctionPass class - This class is used to implement most global optimizations.
Definition: Pass.h:310
Intrinsic::ID getIntrinsicID() const LLVM_READONLY
getIntrinsicID - This method returns the ID number of the specified function, or Intrinsic::not_intri...
Definition: Function.h:251
bool hasPersonalityFn() const
Check whether this function has a personality function.
Definition: Function.h:905
bool isIntrinsic() const
isIntrinsic - Returns true if the function's name starts with "llvm.".
Definition: Function.h:256
const std::string & getGC() const
Definition: Function.cpp:835
Represents calls to the gc.relocate intrinsic.
Value * getBasePtr() const
Value * getDerivedPtr() const
void initialize(raw_ostream *OS, function_ref< void(const Twine &Message)> FailureCB, const FunctionT &F)
Generic tagged DWARF-like metadata node.
an instruction for type-safe pointer arithmetic to access elements of arrays and structs
Definition: Instructions.h:933
static Type * getIndexedType(Type *Ty, ArrayRef< Value * > IdxList)
Returns the result type of a getelementptr with the given source element type and indexes.
static bool isValidLinkage(LinkageTypes L)
Definition: GlobalAlias.h:97
const Constant * getAliasee() const
Definition: GlobalAlias.h:86
const Function * getResolverFunction() const
Definition: Globals.cpp:624
static bool isValidLinkage(LinkageTypes L)
Definition: GlobalIFunc.h:85
const Constant * getResolver() const
Definition: GlobalIFunc.h:72
bool hasComdat() const
Definition: GlobalObject.h:127
MDNode * getMetadata(unsigned KindID) const
Get the current metadata attachments for the given kind, if any.
Definition: Value.h:565
bool hasExternalLinkage() const
Definition: GlobalValue.h:512
bool isDSOLocal() const
Definition: GlobalValue.h:306
bool isImplicitDSOLocal() const
Definition: GlobalValue.h:299
bool isDeclaration() const
Return true if the primary definition of this global value is outside of the current translation unit...
Definition: Globals.cpp:296
bool hasValidDeclarationLinkage() const
Definition: GlobalValue.h:534
LinkageTypes getLinkage() const
Definition: GlobalValue.h:547
bool hasDefaultVisibility() const
Definition: GlobalValue.h:250
bool hasPrivateLinkage() const
Definition: GlobalValue.h:528
bool isTagged() const
Definition: GlobalValue.h:366
bool hasHiddenVisibility() const
Definition: GlobalValue.h:251
bool hasExternalWeakLinkage() const
Definition: GlobalValue.h:530
bool hasDLLImportStorageClass() const
Definition: GlobalValue.h:279
bool hasDLLExportStorageClass() const
Definition: GlobalValue.h:282
bool isDeclarationForLinker() const
Definition: GlobalValue.h:619
unsigned getAddressSpace() const
Definition: GlobalValue.h:206
Module * getParent()
Get the module that this global value is contained inside of...
Definition: GlobalValue.h:657
PointerType * getType() const
Global values are always pointers.
Definition: GlobalValue.h:295
bool hasSection() const
Definition: GlobalValue.h:291
bool hasComdat() const
Definition: GlobalValue.h:242
bool hasCommonLinkage() const
Definition: GlobalValue.h:533
bool hasGlobalUnnamedAddr() const
Definition: GlobalValue.h:216
bool hasAppendingLinkage() const
Definition: GlobalValue.h:526
bool hasAvailableExternallyLinkage() const
Definition: GlobalValue.h:513
Type * getValueType() const
Definition: GlobalValue.h:297
const Constant * getInitializer() const
getInitializer - Return the initializer for this global variable.
bool hasInitializer() const
Definitions have initializers, declarations don't.
bool isConstant() const
If the value is a global constant, its value is immutable throughout the runtime execution of the pro...
bool hasDefinitiveInitializer() const
hasDefinitiveInitializer - Whether the global variable has an initializer, and any other instances of...
This instruction compares its operands according to the predicate given to the constructor.
Indirect Branch Instruction.
BasicBlock * getDestination(unsigned i)
Return the specified destination.
unsigned getNumDestinations() const
return the number of possible destinations in this indirectbr instruction.
unsigned getNumSuccessors() const
This instruction inserts a single (scalar) element into a VectorType value.
static bool isValidOperands(const Value *Vec, const Value *NewElt, const Value *Idx)
Return true if an insertelement instruction can be formed with the specified operands.
This instruction inserts a struct field of array element value into an aggregate value.
Value * getAggregateOperand()
ArrayRef< unsigned > getIndices() const
Base class for instruction visitors.
Definition: InstVisitor.h:78
RetTy visitTerminator(Instruction &I)
Definition: InstVisitor.h:256
RetTy visitCallBase(CallBase &I)
Definition: InstVisitor.h:270
void visitFunction(Function &F)
Definition: InstVisitor.h:142
void visitBasicBlock(BasicBlock &BB)
Definition: InstVisitor.h:143
void visit(Iterator Start, Iterator End)
Definition: InstVisitor.h:87
RetTy visitFuncletPadInst(FuncletPadInst &I)
Definition: InstVisitor.h:197
void visitInstruction(Instruction &I)
Definition: InstVisitor.h:283
unsigned getNumSuccessors() const LLVM_READONLY
Return the number of successors that this instruction has.
const DebugLoc & getDebugLoc() const
Return the debug location for this node as a DebugLoc.
Definition: Instruction.h:475
bool isAtomic() const LLVM_READONLY
Return true if this instruction has an AtomicOrdering of unordered or higher.
const Function * getFunction() const
Return the function this instruction belongs to.
Definition: Instruction.cpp:72
This class represents a cast from an integer to a pointer.
A wrapper class for inspecting calls to intrinsic functions.
Definition: IntrinsicInst.h:48
static bool mayLowerToFunctionCall(Intrinsic::ID IID)
Check if the intrinsic might lower into a regular function call in the course of IR transformations.
Intrinsic::ID getIntrinsicID() const
Return the intrinsic ID of this intrinsic.
Definition: IntrinsicInst.h:55
Invoke instruction.
This is an important class for using LLVM in a threaded context.
Definition: LLVMContext.h:67
The landingpad instruction holds all of the information necessary to generate correct exception handl...
bool isCleanup() const
Return 'true' if this landingpad instruction is a cleanup.
unsigned getNumClauses() const
Get the number of clauses for this landing pad.
bool isCatch(unsigned Idx) const
Return 'true' if the clause and index Idx is a catch clause.
bool isFilter(unsigned Idx) const
Return 'true' if the clause and index Idx is a filter clause.
Constant * getClause(unsigned Idx) const
Get the value of the clause at index Idx.
An instruction for reading from memory.
Definition: Instructions.h:176
AtomicOrdering getOrdering() const
Returns the ordering constraint of this load instruction.
Definition: Instructions.h:220
SyncScope::ID getSyncScopeID() const
Returns the synchronization scope ID of this load instruction.
Definition: Instructions.h:230
Align getAlign() const
Return the alignment of the access that is being performed.
Definition: Instructions.h:211
Metadata node.
Definition: Metadata.h:1073
const MDOperand & getOperand(unsigned I) const
Definition: Metadata.h:1434
bool isTemporary() const
Definition: Metadata.h:1257
ArrayRef< MDOperand > operands() const
Definition: Metadata.h:1432
unsigned getNumOperands() const
Return number of MDNode operands.
Definition: Metadata.h:1440
bool isDistinct() const
Definition: Metadata.h:1256
bool isResolved() const
Check if node is fully resolved.
Definition: Metadata.h:1253
LLVMContext & getContext() const
Definition: Metadata.h:1237
Tracking metadata reference owned by Metadata.
Definition: Metadata.h:895
Metadata * get() const
Definition: Metadata.h:924
A single uniqued string.
Definition: Metadata.h:724
StringRef getString() const
Definition: Metadata.cpp:616
Typed, array-like tuple of metadata.
Definition: Metadata.h:1635
Tuple of metadata.
Definition: Metadata.h:1479
static bool isTagMD(const Metadata *MD)
This class implements a map that also provides access to all stored values in a deterministic order.
Definition: MapVector.h:36
void clear()
Definition: MapVector.h:88
Metadata wrapper in the Value hierarchy.
Definition: Metadata.h:180
static MetadataAsValue * getIfExists(LLVMContext &Context, Metadata *MD)
Definition: Metadata.cpp:111
Metadata * getMetadata() const
Definition: Metadata.h:197
Root of the metadata hierarchy.
Definition: Metadata.h:62
void print(raw_ostream &OS, const Module *M=nullptr, bool IsForDebug=false) const
Print.
Definition: AsmWriter.cpp:5260
unsigned getMetadataID() const
Definition: Metadata.h:102
Manage lifetime of a slot tracker for printing IR.
A Module instance is used to store all the information related to an LLVM module.
Definition: Module.h:65
ModFlagBehavior
This enumeration defines the supported behaviors of module flags.
Definition: Module.h:115
@ AppendUnique
Appends the two values, which are required to be metadata nodes.
Definition: Module.h:144
@ Override
Uses the specified value, regardless of the behavior or value of the other module.
Definition: Module.h:136
@ Warning
Emits a warning if two values disagree.
Definition: Module.h:122
@ Error
Emits an error if two values disagree, otherwise the resulting value is that of the operands.
Definition: Module.h:118
@ Min
Takes the min of the two values, which are required to be integers.
Definition: Module.h:150
@ Append
Appends the two values, which are required to be metadata nodes.
Definition: Module.h:139
@ Max
Takes the max of the two values, which are required to be integers.
Definition: Module.h:147
@ Require
Adds a requirement that another module flag be present and have a specified value after linking is pe...
Definition: Module.h:131
const std::string & getModuleIdentifier() const
Get the module identifier which is, essentially, the name of the module.
Definition: Module.h:268
static bool isValidModFlagBehavior(Metadata *MD, ModFlagBehavior &MFB)
Checks if Metadata represents a valid ModFlagBehavior, and stores the converted result in MFB.
Definition: Module.cpp:325
A tuple of MDNodes.
Definition: Metadata.h:1737
StringRef getName() const
Definition: Metadata.cpp:1442
void print(raw_ostream &ROS, bool IsForDebug=false) const
Definition: AsmWriter.cpp:4921
iterator_range< op_iterator > operands()
Definition: Metadata.h:1833
op_range incoming_values()
static PassRegistry * getPassRegistry()
getPassRegistry - Access the global registry object, which is automatically initialized at applicatio...
static PointerType * get(Type *ElementType, unsigned AddressSpace)
This constructs a pointer to an object of the specified type in a numbered address space.
A set of analyses that are preserved following a run of a transformation pass.
Definition: Analysis.h:111
static PreservedAnalyses all()
Construct a special preserved set that preserves all passes.
Definition: Analysis.h:117
Simple wrapper around std::function<void(raw_ostream&)>.
Definition: Printable.h:38
This class represents a cast from a pointer to an integer.
Interface for looking up the initializer for a variable name, used by Init::resolveReferences.
Definition: Record.h:2148
Resume the propagation of an exception.
Value * getValue() const
Convenience accessor.
Return a value (possibly void), from a function.
This class represents a sign extension of integer types.
This class represents a cast from signed integer to floating point.
This class represents the LLVM 'select' instruction.
static const char * areInvalidOperands(Value *Cond, Value *True, Value *False)
Return a string if the specified operands are invalid for a select operation, otherwise return null.
This instruction constructs a fixed permutation of two input vectors.
static bool isValidOperands(const Value *V1, const Value *V2, const Value *Mask)
Return true if a shufflevector instruction can be formed with the specified operands.
static void getShuffleMask(const Constant *Mask, SmallVectorImpl< int > &Result)
Convert the input shuffle mask operand to a vector of integers.
A templated base class for SmallPtrSet which provides the typesafe interface that is common across al...
Definition: SmallPtrSet.h:363
size_type count(ConstPtrType Ptr) const
count - Return 1 if the specified pointer is in the set, 0 otherwise.
Definition: SmallPtrSet.h:452
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
Definition: SmallPtrSet.h:384
SmallPtrSet - This class implements a set which is optimized for holding SmallSize or less elements.
Definition: SmallPtrSet.h:519
SmallSet - This maintains a set of unique values, optimizing for the case when the set is small (less...
Definition: SmallSet.h:132
std::pair< const_iterator, bool > insert(const T &V)
insert - Insert an element into the set if it isn't already there.
Definition: SmallSet.h:181
SmallString - A SmallString is just a SmallVector with methods and accessors that make it work better...
Definition: SmallString.h:26
bool empty() const
Definition: SmallVector.h:81
size_t size() const
Definition: SmallVector.h:78
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
Definition: SmallVector.h:573
void reserve(size_type N)
Definition: SmallVector.h:663
iterator insert(iterator I, T &&Elt)
Definition: SmallVector.h:805
void resize(size_type N)
Definition: SmallVector.h:638
void push_back(const T &Elt)
Definition: SmallVector.h:413
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
Definition: SmallVector.h:1196
An instruction for storing to memory.
Definition: Instructions.h:292
StringMapEntry - This is used to represent one value that is inserted into a StringMap.
StringRef - Represent a constant reference to a string, i.e.
Definition: StringRef.h:51
bool getAsInteger(unsigned Radix, T &Result) const
Parse the current string as an integer of the specified radix.
Definition: StringRef.h:470
bool starts_with(StringRef Prefix) const
Check if this string starts with the given Prefix.
Definition: StringRef.h:265
bool contains(StringRef Other) const
Return true if the given string is a substring of *this, and false otherwise.
Definition: StringRef.h:424
static constexpr size_t npos
Definition: StringRef.h:53
Class to represent struct types.
Definition: DerivedTypes.h:218
unsigned getNumElements() const
Random access to the elements.
Definition: DerivedTypes.h:365
Type * getTypeAtIndex(const Value *V) const
Given an index value into the type, return the type of the element.
Definition: Type.cpp:711
bool isScalableTy(SmallPtrSetImpl< const Type * > &Visited) const
Returns true if this struct contains a scalable vector.
Definition: Type.cpp:433
Multiway switch.
Verify that the TBAA Metadatas are valid.
Definition: Verifier.h:39
bool visitTBAAMetadata(Instruction &I, const MDNode *MD)
Visit an instruction and return true if it is valid, return false if an invalid TBAA is attached.
Definition: Verifier.cpp:7614
TinyPtrVector - This class is specialized for cases where there are normally 0 or 1 element in a vect...
Definition: TinyPtrVector.h:29
unsigned size() const
Triple - Helper class for working with autoconf configuration names.
Definition: Triple.h:44
This class represents a truncation of integer types.
Twine - A lightweight data structure for efficiently representing the concatenation of temporary valu...
Definition: Twine.h:81
The instances of the Type class are immutable: once they are created, they are never changed.
Definition: Type.h:45
bool isVectorTy() const
True if this is an instance of VectorType.
Definition: Type.h:270
bool containsNonLocalTargetExtType(SmallPtrSetImpl< const Type * > &Visited) const
Return true if this type is or contains a target extension type that disallows being used as a local.
bool isArrayTy() const
True if this is an instance of ArrayType.
Definition: Type.h:261
bool isLabelTy() const
Return true if this is 'label'.
Definition: Type.h:228
bool isIntOrIntVectorTy() const
Return true if this is an integer type or a vector of integer types.
Definition: Type.h:243
bool isPointerTy() const
True if this is an instance of PointerType.
Definition: Type.h:264
bool containsNonGlobalTargetExtType(SmallPtrSetImpl< const Type * > &Visited) const
Return true if this type is or contains a target extension type that disallows being used as a global...
unsigned getPointerAddressSpace() const
Get the address space of this pointer or pointer vector type.
unsigned getScalarSizeInBits() const LLVM_READONLY
If this is a vector type, return the getPrimitiveSizeInBits value for the element type.
bool isSized(SmallPtrSetImpl< Type * > *Visited=nullptr) const
Return true if it makes sense to take the size of this type.
Definition: Type.h:310
bool isScalableTy(SmallPtrSetImpl< const Type * > &Visited) const
Return true if this is a type whose size is a known multiple of vscale.
bool isFloatingPointTy() const
Return true if this is one of the floating-point types.
Definition: Type.h:184
bool isPtrOrPtrVectorTy() const
Return true if this is a pointer type or a vector of pointer types.
Definition: Type.h:267
bool canLosslesslyBitCastTo(Type *Ty) const
Return true if this type could be converted with a lossless BitCast to type 'Ty'.
bool isIntOrPtrTy() const
Return true if this is an integer type or a pointer type.
Definition: Type.h:252
bool isIntegerTy() const
True if this is an instance of IntegerType.
Definition: Type.h:237
bool isTokenTy() const
Return true if this is 'token'.
Definition: Type.h:234
bool isFPOrFPVectorTy() const
Return true if this is a FP type or a vector of FP.
Definition: Type.h:225
Type * getScalarType() const
If this is a vector type, return the element type, otherwise return 'this'.
Definition: Type.h:355
bool isMetadataTy() const
Return true if this is 'metadata'.
Definition: Type.h:231
This class represents a cast unsigned integer to floating point.
A Use represents the edge between a Value definition and its users.
Definition: Use.h:43
op_range operands()
Definition: User.h:288
Value * getOperand(unsigned i) const
Definition: User.h:228
unsigned getNumOperands() const
Definition: User.h:250
This class represents the va_arg llvm instruction, which returns an argument of the specified type gi...
This is the common base class for vector predication intrinsics.
Value wrapper in the Metadata hierarchy.
Definition: Metadata.h:454
Value * getValue() const
Definition: Metadata.h:494
LLVM Value Representation.
Definition: Value.h:74
iterator_range< user_iterator > materialized_users()
Definition: Value.h:415
Type * getType() const
All values are typed, get the type of this value.
Definition: Value.h:255
static constexpr uint64_t MaximumAlignment
Definition: Value.h:811
const Value * stripPointerCastsAndAliases() const
Strip off pointer casts, all-zero GEPs, address space casts, and aliases.
Definition: Value.cpp:698
const Value * stripInBoundsOffsets(function_ref< void(const Value *)> Func=[](const Value *) {}) const
Strip off pointer casts and inbounds GEPs.
Definition: Value.cpp:786
iterator_range< user_iterator > users()
Definition: Value.h:421
bool materialized_use_empty() const
Definition: Value.h:349
LLVMContext & getContext() const
All values hold a context through their type.
Definition: Value.cpp:1075
bool hasName() const
Definition: Value.h:261
StringRef getName() const
Return a constant reference to the value's name.
Definition: Value.cpp:309
Check a module for errors, and report separate error states for IR and debug info errors.
Definition: Verifier.h:107
Result run(Module &M, ModuleAnalysisManager &)
Definition: Verifier.cpp:7730
PreservedAnalyses run(Module &M, ModuleAnalysisManager &AM)
Definition: Verifier.cpp:7742
This class represents zero extension of integer types.
constexpr bool isNonZero() const
Definition: TypeSize.h:158
constexpr bool isScalable() const
Returns whether the quantity is scaled by a runtime quantity (vscale).
Definition: TypeSize.h:171
constexpr ScalarTy getKnownMinValue() const
Returns the minimum value this quantity can represent.
Definition: TypeSize.h:168
An efficient, type-erasing, non-owning reference to a callable.
const ParentTy * getParent() const
Definition: ilist_node.h:32
NodeTy * getNextNode()
Get the next node, or nullptr for the list tail.
Definition: ilist_node.h:353
This class implements an extremely fast bulk output stream that can only output to a stream.
Definition: raw_ostream.h:52
This file contains the declaration of the Comdat class, which represents a single COMDAT in LLVM.
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
constexpr char Args[]
Key for Kernel::Metadata::mArgs.
constexpr char Attrs[]
Key for Kernel::Metadata::mAttrs.
bool isFlatGlobalAddrSpace(unsigned AS)
AttributeMask typeIncompatible(Type *Ty, AttributeSet AS, AttributeSafetyKind ASK=ASK_ALL)
Which attributes cannot be applied to a type.
constexpr std::underlying_type_t< E > Mask()
Get a bitmask with 1s in all places up to the high-order bit of E's largest value.
Definition: BitmaskEnum.h:125
@ Entry
Definition: COFF.h:844
@ AMDGPU_CS
Used for Mesa/AMDPAL compute shaders.
Definition: CallingConv.h:197
@ AMDGPU_VS
Used for Mesa vertex shaders, or AMDPAL last shader stage before rasterization (vertex shader if tess...
Definition: CallingConv.h:188
@ AMDGPU_KERNEL
Used for AMDGPU code object kernels.
Definition: CallingConv.h:200
@ AnyReg
OBSOLETED - Used for stack based JavaScript calls.
Definition: CallingConv.h:60
@ AMDGPU_CS_ChainPreserve
Used on AMDGPUs to give the middle-end more control over argument placement.
Definition: CallingConv.h:249
@ AMDGPU_HS
Used for Mesa/AMDPAL hull shaders (= tessellation control shaders).
Definition: CallingConv.h:206
@ AMDGPU_GS
Used for Mesa/AMDPAL geometry shaders.
Definition: CallingConv.h:191
@ X86_INTR
x86 hardware interrupt context.
Definition: CallingConv.h:173
@ AMDGPU_CS_Chain
Used on AMDGPUs to give the middle-end more control over argument placement.
Definition: CallingConv.h:245
@ AMDGPU_PS
Used for Mesa/AMDPAL pixel shaders.
Definition: CallingConv.h:194
@ Cold
Attempts to make code in the caller as efficient as possible under the assumption that the call is no...
Definition: CallingConv.h:47
@ PTX_Device
Call to a PTX device function.
Definition: CallingConv.h:129
@ SPIR_KERNEL
Used for SPIR kernel functions.
Definition: CallingConv.h:144
@ Fast
Attempts to make calls as fast as possible (e.g.
Definition: CallingConv.h:41
@ Intel_OCL_BI
Used for Intel OpenCL built-ins.
Definition: CallingConv.h:147
@ Tail
Attemps to make calls as fast as possible while guaranteeing that tail call optimization can always b...
Definition: CallingConv.h:76
@ PTX_Kernel
Call to a PTX kernel. Passes all arguments in parameter space.
Definition: CallingConv.h:125
@ SwiftTail
This follows the Swift calling convention in how arguments are passed but guarantees tail calls will ...
Definition: CallingConv.h:87
@ C
The default llvm calling convention, compatible with C.
Definition: CallingConv.h:34
unsigned ID
LLVM IR allows to use arbitrary numbers as calling convention identifiers.
Definition: CallingConv.h:24
ID ArrayRef< Type * > Tys
Definition: Intrinsics.h:102
MatchIntrinsicTypesResult matchIntrinsicSignature(FunctionType *FTy, ArrayRef< IITDescriptor > &Infos, SmallVectorImpl< Type * > &ArgTys)
Match the specified function type with the type constraints specified by the .td file.
void getIntrinsicInfoTableEntries(ID id, SmallVectorImpl< IITDescriptor > &T)
Return the IIT table descriptor for the specified intrinsic into an array of IITDescriptors.
Definition: Intrinsics.cpp:446
@ MatchIntrinsicTypes_NoMatchRet
Definition: Intrinsics.h:231
@ MatchIntrinsicTypes_NoMatchArg
Definition: Intrinsics.h:232
bool hasConstrainedFPRoundingModeOperand(ID QID)
Returns true if the intrinsic ID is for one of the "Constrained Floating-Point Intrinsics" that take ...
Definition: Intrinsics.cpp:774
StringRef getName(ID id)
Return the LLVM name for an intrinsic, such as "llvm.ppc.altivec.lvx".
Definition: Intrinsics.cpp:46
static const int NoAliasScopeDeclScopeArg
Definition: Intrinsics.h:37
bool matchIntrinsicVarArg(bool isVarArg, ArrayRef< IITDescriptor > &Infos)
Verify if the intrinsic has variable arguments.
Flag
These should be considered private to the implementation of the MCInstrDesc class.
Definition: MCInstrDesc.h:148
@ System
Synchronized with respect to all concurrently executing threads.
Definition: LLVMContext.h:57
std::optional< VFInfo > tryDemangleForVFABI(StringRef MangledName, const FunctionType *FTy)
Function to construct a VFInfo out of a mangled names in the following format:
@ CE
Windows NT (Windows on ARM)
AssignmentInstRange getAssignmentInsts(DIAssignID *ID)
Return a range of instructions (typically just one) that have ID as an attachment.
Definition: DebugInfo.cpp:1855
initializer< Ty > init(const Ty &Val)
Definition: CommandLine.h:443
@ DW_MACINFO_undef
Definition: Dwarf.h:797
@ DW_MACINFO_start_file
Definition: Dwarf.h:798
@ DW_MACINFO_define
Definition: Dwarf.h:796
This is an optimization pass for GlobalISel generic memory operations.
Definition: AddressRanges.h:18
@ Low
Lower the current thread's priority such that it does not affect foreground tasks significantly.
@ Offset
Definition: DWP.cpp:480
bool all_of(R &&range, UnaryPredicate P)
Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly.
Definition: STLExtras.h:1739
auto size(R &&Range, std::enable_if_t< std::is_base_of< std::random_access_iterator_tag, typename std::iterator_traits< decltype(Range.begin())>::iterator_category >::value, void > *=nullptr)
Get the size of a range.
Definition: STLExtras.h:1697
bool canInstructionHaveMMRAs(const Instruction &I)
@ Write
Definition: CodeGenData.h:108
unsigned getBranchWeightOffset(const MDNode *ProfileData)
Return the offset to the first branch weight data.
auto enumerate(FirstRange &&First, RestRanges &&...Rest)
Given two or more input ranges, returns a new range whose values are tuples (A, B,...
Definition: STLExtras.h:2448
bool verifyFunction(const Function &F, raw_ostream *OS=nullptr)
Check a function for errors, useful for use when debugging a pass.
Definition: Verifier.cpp:7299
AllocFnKind
Definition: Attributes.h:49
testing::Matcher< const detail::ErrorHolder & > Failed()
Definition: Error.h:198
void initializeVerifierLegacyPassPass(PassRegistry &)
iterator_range< T > make_range(T x, T y)
Convenience function for iterating over sub-ranges.
void append_range(Container &C, Range &&R)
Wrapper function to append range R to container C.
Definition: STLExtras.h:2115
DenseMap< BasicBlock *, ColorVector > colorEHFunclets(Function &F)
If an EH funclet personality is in use (see isFuncletEHPersonality), this will recompute which blocks...
constexpr bool isPowerOf2_64(uint64_t Value)
Return true if the argument is a power of two > 0 (64 bit edition.)
Definition: MathExtras.h:298
bool isScopedEHPersonality(EHPersonality Pers)
Returns true if this personality uses scope-style EH IR instructions: catchswitch,...
constexpr bool isPowerOf2_32(uint32_t Value)
Return true if the argument is a power of two > 0.
Definition: MathExtras.h:293
bool isModSet(const ModRefInfo MRI)
Definition: ModRef.h:48
void sort(IteratorTy Start, IteratorTy End)
Definition: STLExtras.h:1664
raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
Definition: Debug.cpp:163
void report_fatal_error(Error Err, bool gen_crash_diag=true)
Report a serious error, calling any installed error handler.
Definition: Error.cpp:167
EHPersonality classifyEHPersonality(const Value *Pers)
See if the given exception handling personality function is one that we understand.
raw_fd_ostream & errs()
This returns a reference to a raw_ostream for standard error.
unsigned getNumBranchWeights(const MDNode &ProfileData)
AtomicOrdering
Atomic ordering for LLVM's memory model.
@ ArgMem
Access to memory via argument pointers.
@ First
Helpers to iterate all locations in the MemoryEffectsBase class.
FunctionPass * createVerifierPass(bool FatalErrors=true)
Definition: Verifier.cpp:7725
@ Dynamic
Denotes mode unknown at compile time.
@ MaskAll
A bitmask that includes all valid flags.
constexpr unsigned BitWidth
Definition: BitmaskEnum.h:217
DenormalMode parseDenormalFPAttribute(StringRef Str)
Returns the denormal mode to use for inputs and outputs.
std::optional< RoundingMode > convertStrToRoundingMode(StringRef)
Returns a valid RoundingMode enumerator when given a string that is valid as input in constrained int...
Definition: FPEnv.cpp:24
std::unique_ptr< GCStrategy > getGCStrategy(const StringRef Name)
Lookup the GCStrategy object associated with the given gc name.
Definition: GCStrategy.cpp:24
auto predecessors(const MachineBasicBlock *BB)
bool is_contained(R &&Range, const E &Element)
Returns true if Element is found in Range.
Definition: STLExtras.h:1903
bool pred_empty(const BasicBlock *BB)
Definition: CFG.h:118
bool verifyModule(const Module &M, raw_ostream *OS=nullptr, bool *BrokenDebugInfo=nullptr)
Check a module for errors.
Definition: Verifier.cpp:7310
#define N
static const fltSemantics & IEEEsingle() LLVM_READNONE
Definition: APFloat.cpp:257
This struct is a compact representation of a valid (non-zero power of two) alignment.
Definition: Alignment.h:39
uint64_t value() const
This is a hole in the type system and should not be abused.
Definition: Alignment.h:85
A special type used by analysis passes to provide an address that identifies that particular analysis...
Definition: Analysis.h:28
Description of the encoding of one expression Op.
This struct is a compact representation of a valid (power of two) or undefined (0) alignment.
Definition: Alignment.h:117
A lightweight accessor for an operand bundle meant to be passed around by value.
Definition: InstrTypes.h:1007
uint32_t getTagID() const
Return the tag of this operand bundle as an integer.
Definition: InstrTypes.h:1035
ArrayRef< Use > Inputs
Definition: InstrTypes.h:1008
void DebugInfoCheckFailed(const Twine &Message)
A debug info check failed.
Definition: Verifier.cpp:303
VerifierSupport(raw_ostream *OS, const Module &M)
Definition: Verifier.cpp:155
bool Broken
Track the brokenness of the module while recursively visiting.
Definition: Verifier.cpp:149
raw_ostream * OS
Definition: Verifier.cpp:141
void CheckFailed(const Twine &Message, const T1 &V1, const Ts &... Vs)
A check failed (with values to print).
Definition: Verifier.cpp:296
bool BrokenDebugInfo
Broken debug info can be "recovered" from by stripping the debug info.
Definition: Verifier.cpp:151
LLVMContext & Context
Definition: Verifier.cpp:146
bool TreatBrokenDebugInfoAsError
Whether to treat broken debug info as an error.
Definition: Verifier.cpp:153
void CheckFailed(const Twine &Message)
A check failed, so printout out the condition and the message.
Definition: Verifier.cpp:285
const Module & M
Definition: Verifier.cpp:142
const DataLayout & DL
Definition: Verifier.cpp:145
void DebugInfoCheckFailed(const Twine &Message, const T1 &V1, const Ts &... Vs)
A debug info check failed (with values to print).
Definition: Verifier.cpp:312
ModuleSlotTracker MST
Definition: Verifier.cpp:143