LLVM 20.0.0git
Verifier.cpp
Go to the documentation of this file.
1//===-- Verifier.cpp - Implement the Module Verifier -----------------------==//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file defines the function verifier interface, that can be used for some
10// basic correctness checking of input to the system.
11//
12// Note that this does not provide full `Java style' security and verifications,
13// instead it just tries to ensure that code is well-formed.
14//
15// * Both of a binary operator's parameters are of the same type
16// * Verify that the indices of mem access instructions match other operands
17// * Verify that arithmetic and other things are only performed on first-class
18// types. Verify that shifts & logicals only happen on integrals f.e.
19// * All of the constants in a switch statement are of the correct type
20// * The code is in valid SSA form
21// * It should be illegal to put a label into any other type (like a structure)
22// or to return one. [except constant arrays!]
23// * Only phi nodes can be self referential: 'add i32 %0, %0 ; <int>:0' is bad
24// * PHI nodes must have an entry for each predecessor, with no extras.
25// * PHI nodes must be the first thing in a basic block, all grouped together
26// * All basic blocks should only end with terminator insts, not contain them
27// * The entry node to a function must not have predecessors
28// * All Instructions must be embedded into a basic block
29// * Functions cannot take a void-typed parameter
30// * Verify that a function's argument list agrees with it's declared type.
31// * It is illegal to specify a name for a void value.
32// * It is illegal to have a internal global value with no initializer
33// * It is illegal to have a ret instruction that returns a value that does not
34// agree with the function return value type.
35// * Function call argument types match the function prototype
36// * A landing pad is defined by a landingpad instruction, and can be jumped to
37// only by the unwind edge of an invoke instruction.
38// * A landingpad instruction must be the first non-PHI instruction in the
39// block.
40// * Landingpad instructions must be in a function with a personality function.
41// * Convergence control intrinsics are introduced in ConvergentOperations.rst.
42// The applied restrictions are too numerous to list here.
43// * The convergence entry intrinsic and the loop heart must be the first
44// non-PHI instruction in their respective block. This does not conflict with
45// the landing pads, since these two kinds cannot occur in the same block.
46// * All other things that are tested by asserts spread about the code...
47//
48//===----------------------------------------------------------------------===//
49
50#include "llvm/IR/Verifier.h"
51#include "llvm/ADT/APFloat.h"
52#include "llvm/ADT/APInt.h"
53#include "llvm/ADT/ArrayRef.h"
54#include "llvm/ADT/DenseMap.h"
55#include "llvm/ADT/MapVector.h"
56#include "llvm/ADT/STLExtras.h"
58#include "llvm/ADT/SmallSet.h"
61#include "llvm/ADT/StringRef.h"
62#include "llvm/ADT/Twine.h"
64#include "llvm/IR/Argument.h"
66#include "llvm/IR/Attributes.h"
67#include "llvm/IR/BasicBlock.h"
68#include "llvm/IR/CFG.h"
69#include "llvm/IR/CallingConv.h"
70#include "llvm/IR/Comdat.h"
71#include "llvm/IR/Constant.h"
74#include "llvm/IR/Constants.h"
76#include "llvm/IR/DataLayout.h"
77#include "llvm/IR/DebugInfo.h"
79#include "llvm/IR/DebugLoc.h"
81#include "llvm/IR/Dominators.h"
83#include "llvm/IR/Function.h"
84#include "llvm/IR/GCStrategy.h"
85#include "llvm/IR/GlobalAlias.h"
86#include "llvm/IR/GlobalValue.h"
88#include "llvm/IR/InlineAsm.h"
89#include "llvm/IR/InstVisitor.h"
90#include "llvm/IR/InstrTypes.h"
91#include "llvm/IR/Instruction.h"
94#include "llvm/IR/Intrinsics.h"
95#include "llvm/IR/IntrinsicsAArch64.h"
96#include "llvm/IR/IntrinsicsAMDGPU.h"
97#include "llvm/IR/IntrinsicsARM.h"
98#include "llvm/IR/IntrinsicsNVPTX.h"
99#include "llvm/IR/IntrinsicsWebAssembly.h"
100#include "llvm/IR/LLVMContext.h"
102#include "llvm/IR/Metadata.h"
103#include "llvm/IR/Module.h"
105#include "llvm/IR/PassManager.h"
107#include "llvm/IR/Statepoint.h"
108#include "llvm/IR/Type.h"
109#include "llvm/IR/Use.h"
110#include "llvm/IR/User.h"
112#include "llvm/IR/Value.h"
114#include "llvm/Pass.h"
117#include "llvm/Support/Casting.h"
121#include "llvm/Support/ModRef.h"
123#include <algorithm>
124#include <cassert>
125#include <cstdint>
126#include <memory>
127#include <optional>
128#include <string>
129#include <utility>
130
131using namespace llvm;
132
134 "verify-noalias-scope-decl-dom", cl::Hidden, cl::init(false),
135 cl::desc("Ensure that llvm.experimental.noalias.scope.decl for identical "
136 "scopes are not dominating"));
137
138namespace llvm {
139
142 const Module &M;
147
148 /// Track the brokenness of the module while recursively visiting.
149 bool Broken = false;
150 /// Broken debug info can be "recovered" from by stripping the debug info.
151 bool BrokenDebugInfo = false;
152 /// Whether to treat broken debug info as an error.
154
156 : OS(OS), M(M), MST(&M), TT(Triple::normalize(M.getTargetTriple())),
157 DL(M.getDataLayout()), Context(M.getContext()) {}
158
159private:
160 void Write(const Module *M) {
161 *OS << "; ModuleID = '" << M->getModuleIdentifier() << "'\n";
162 }
163
164 void Write(const Value *V) {
165 if (V)
166 Write(*V);
167 }
168
169 void Write(const Value &V) {
170 if (isa<Instruction>(V)) {
171 V.print(*OS, MST);
172 *OS << '\n';
173 } else {
174 V.printAsOperand(*OS, true, MST);
175 *OS << '\n';
176 }
177 }
178
179 void Write(const DbgRecord *DR) {
180 if (DR) {
181 DR->print(*OS, MST, false);
182 *OS << '\n';
183 }
184 }
185
187 switch (Type) {
189 *OS << "value";
190 break;
192 *OS << "declare";
193 break;
195 *OS << "assign";
196 break;
198 *OS << "end";
199 break;
201 *OS << "any";
202 break;
203 };
204 }
205
206 void Write(const Metadata *MD) {
207 if (!MD)
208 return;
209 MD->print(*OS, MST, &M);
210 *OS << '\n';
211 }
212
213 template <class T> void Write(const MDTupleTypedArrayWrapper<T> &MD) {
214 Write(MD.get());
215 }
216
217 void Write(const NamedMDNode *NMD) {
218 if (!NMD)
219 return;
220 NMD->print(*OS, MST);
221 *OS << '\n';
222 }
223
224 void Write(Type *T) {
225 if (!T)
226 return;
227 *OS << ' ' << *T;
228 }
229
230 void Write(const Comdat *C) {
231 if (!C)
232 return;
233 *OS << *C;
234 }
235
236 void Write(const APInt *AI) {
237 if (!AI)
238 return;
239 *OS << *AI << '\n';
240 }
241
242 void Write(const unsigned i) { *OS << i << '\n'; }
243
244 // NOLINTNEXTLINE(readability-identifier-naming)
245 void Write(const Attribute *A) {
246 if (!A)
247 return;
248 *OS << A->getAsString() << '\n';
249 }
250
251 // NOLINTNEXTLINE(readability-identifier-naming)
252 void Write(const AttributeSet *AS) {
253 if (!AS)
254 return;
255 *OS << AS->getAsString() << '\n';
256 }
257
258 // NOLINTNEXTLINE(readability-identifier-naming)
259 void Write(const AttributeList *AL) {
260 if (!AL)
261 return;
262 AL->print(*OS);
263 }
264
265 void Write(Printable P) { *OS << P << '\n'; }
266
267 template <typename T> void Write(ArrayRef<T> Vs) {
268 for (const T &V : Vs)
269 Write(V);
270 }
271
272 template <typename T1, typename... Ts>
273 void WriteTs(const T1 &V1, const Ts &... Vs) {
274 Write(V1);
275 WriteTs(Vs...);
276 }
277
278 template <typename... Ts> void WriteTs() {}
279
280public:
281 /// A check failed, so printout out the condition and the message.
282 ///
283 /// This provides a nice place to put a breakpoint if you want to see why
284 /// something is not correct.
285 void CheckFailed(const Twine &Message) {
286 if (OS)
287 *OS << Message << '\n';
288 Broken = true;
289 }
290
291 /// A check failed (with values to print).
292 ///
293 /// This calls the Message-only version so that the above is easier to set a
294 /// breakpoint on.
295 template <typename T1, typename... Ts>
296 void CheckFailed(const Twine &Message, const T1 &V1, const Ts &... Vs) {
297 CheckFailed(Message);
298 if (OS)
299 WriteTs(V1, Vs...);
300 }
301
302 /// A debug info check failed.
303 void DebugInfoCheckFailed(const Twine &Message) {
304 if (OS)
305 *OS << Message << '\n';
307 BrokenDebugInfo = true;
308 }
309
310 /// A debug info check failed (with values to print).
311 template <typename T1, typename... Ts>
312 void DebugInfoCheckFailed(const Twine &Message, const T1 &V1,
313 const Ts &... Vs) {
314 DebugInfoCheckFailed(Message);
315 if (OS)
316 WriteTs(V1, Vs...);
317 }
318};
319
320} // namespace llvm
321
322namespace {
323
324class Verifier : public InstVisitor<Verifier>, VerifierSupport {
325 friend class InstVisitor<Verifier>;
326 DominatorTree DT;
327
328 /// When verifying a basic block, keep track of all of the
329 /// instructions we have seen so far.
330 ///
331 /// This allows us to do efficient dominance checks for the case when an
332 /// instruction has an operand that is an instruction in the same block.
333 SmallPtrSet<Instruction *, 16> InstsInThisBlock;
334
335 /// Keep track of the metadata nodes that have been checked already.
337
338 /// Keep track which DISubprogram is attached to which function.
340
341 /// Track all DICompileUnits visited.
343
344 /// The result type for a landingpad.
345 Type *LandingPadResultTy;
346
347 /// Whether we've seen a call to @llvm.localescape in this function
348 /// already.
349 bool SawFrameEscape;
350
351 /// Whether the current function has a DISubprogram attached to it.
352 bool HasDebugInfo = false;
353
354 /// Stores the count of how many objects were passed to llvm.localescape for a
355 /// given function and the largest index passed to llvm.localrecover.
357
358 // Maps catchswitches and cleanuppads that unwind to siblings to the
359 // terminators that indicate the unwind, used to detect cycles therein.
361
362 /// Cache which blocks are in which funclet, if an EH funclet personality is
363 /// in use. Otherwise empty.
364 DenseMap<BasicBlock *, ColorVector> BlockEHFuncletColors;
365
366 /// Cache of constants visited in search of ConstantExprs.
367 SmallPtrSet<const Constant *, 32> ConstantExprVisited;
368
369 /// Cache of declarations of the llvm.experimental.deoptimize.<ty> intrinsic.
370 SmallVector<const Function *, 4> DeoptimizeDeclarations;
371
372 /// Cache of attribute lists verified.
373 SmallPtrSet<const void *, 32> AttributeListsVisited;
374
375 // Verify that this GlobalValue is only used in this module.
376 // This map is used to avoid visiting uses twice. We can arrive at a user
377 // twice, if they have multiple operands. In particular for very large
378 // constant expressions, we can arrive at a particular user many times.
379 SmallPtrSet<const Value *, 32> GlobalValueVisited;
380
381 // Keeps track of duplicate function argument debug info.
383
384 TBAAVerifier TBAAVerifyHelper;
385 ConvergenceVerifier ConvergenceVerifyHelper;
386
387 SmallVector<IntrinsicInst *, 4> NoAliasScopeDecls;
388
389 void checkAtomicMemAccessSize(Type *Ty, const Instruction *I);
390
391public:
392 explicit Verifier(raw_ostream *OS, bool ShouldTreatBrokenDebugInfoAsError,
393 const Module &M)
394 : VerifierSupport(OS, M), LandingPadResultTy(nullptr),
395 SawFrameEscape(false), TBAAVerifyHelper(this) {
396 TreatBrokenDebugInfoAsError = ShouldTreatBrokenDebugInfoAsError;
397 }
398
399 bool hasBrokenDebugInfo() const { return BrokenDebugInfo; }
400
401 bool verify(const Function &F) {
402 assert(F.getParent() == &M &&
403 "An instance of this class only works with a specific module!");
404
405 // First ensure the function is well-enough formed to compute dominance
406 // information, and directly compute a dominance tree. We don't rely on the
407 // pass manager to provide this as it isolates us from a potentially
408 // out-of-date dominator tree and makes it significantly more complex to run
409 // this code outside of a pass manager.
410 // FIXME: It's really gross that we have to cast away constness here.
411 if (!F.empty())
412 DT.recalculate(const_cast<Function &>(F));
413
414 for (const BasicBlock &BB : F) {
415 if (!BB.empty() && BB.back().isTerminator())
416 continue;
417
418 if (OS) {
419 *OS << "Basic Block in function '" << F.getName()
420 << "' does not have terminator!\n";
421 BB.printAsOperand(*OS, true, MST);
422 *OS << "\n";
423 }
424 return false;
425 }
426
427 auto FailureCB = [this](const Twine &Message) {
428 this->CheckFailed(Message);
429 };
430 ConvergenceVerifyHelper.initialize(OS, FailureCB, F);
431
432 Broken = false;
433 // FIXME: We strip const here because the inst visitor strips const.
434 visit(const_cast<Function &>(F));
435 verifySiblingFuncletUnwinds();
436
437 if (ConvergenceVerifyHelper.sawTokens())
438 ConvergenceVerifyHelper.verify(DT);
439
440 InstsInThisBlock.clear();
441 DebugFnArgs.clear();
442 LandingPadResultTy = nullptr;
443 SawFrameEscape = false;
444 SiblingFuncletInfo.clear();
445 verifyNoAliasScopeDecl();
446 NoAliasScopeDecls.clear();
447
448 return !Broken;
449 }
450
451 /// Verify the module that this instance of \c Verifier was initialized with.
452 bool verify() {
453 Broken = false;
454
455 // Collect all declarations of the llvm.experimental.deoptimize intrinsic.
456 for (const Function &F : M)
457 if (F.getIntrinsicID() == Intrinsic::experimental_deoptimize)
458 DeoptimizeDeclarations.push_back(&F);
459
460 // Now that we've visited every function, verify that we never asked to
461 // recover a frame index that wasn't escaped.
462 verifyFrameRecoverIndices();
463 for (const GlobalVariable &GV : M.globals())
464 visitGlobalVariable(GV);
465
466 for (const GlobalAlias &GA : M.aliases())
467 visitGlobalAlias(GA);
468
469 for (const GlobalIFunc &GI : M.ifuncs())
470 visitGlobalIFunc(GI);
471
472 for (const NamedMDNode &NMD : M.named_metadata())
473 visitNamedMDNode(NMD);
474
475 for (const StringMapEntry<Comdat> &SMEC : M.getComdatSymbolTable())
476 visitComdat(SMEC.getValue());
477
478 visitModuleFlags();
479 visitModuleIdents();
480 visitModuleCommandLines();
481
482 verifyCompileUnits();
483
484 verifyDeoptimizeCallingConvs();
485 DISubprogramAttachments.clear();
486 return !Broken;
487 }
488
489private:
490 /// Whether a metadata node is allowed to be, or contain, a DILocation.
491 enum class AreDebugLocsAllowed { No, Yes };
492
493 /// Metadata that should be treated as a range, with slightly different
494 /// requirements.
495 enum class RangeLikeMetadataKind {
496 Range, // MD_range
497 AbsoluteSymbol, // MD_absolute_symbol
498 NoaliasAddrspace // MD_noalias_addrspace
499 };
500
501 // Verification methods...
502 void visitGlobalValue(const GlobalValue &GV);
503 void visitGlobalVariable(const GlobalVariable &GV);
504 void visitGlobalAlias(const GlobalAlias &GA);
505 void visitGlobalIFunc(const GlobalIFunc &GI);
506 void visitAliaseeSubExpr(const GlobalAlias &A, const Constant &C);
507 void visitAliaseeSubExpr(SmallPtrSetImpl<const GlobalAlias *> &Visited,
508 const GlobalAlias &A, const Constant &C);
509 void visitNamedMDNode(const NamedMDNode &NMD);
510 void visitMDNode(const MDNode &MD, AreDebugLocsAllowed AllowLocs);
511 void visitMetadataAsValue(const MetadataAsValue &MD, Function *F);
512 void visitValueAsMetadata(const ValueAsMetadata &MD, Function *F);
513 void visitDIArgList(const DIArgList &AL, Function *F);
514 void visitComdat(const Comdat &C);
515 void visitModuleIdents();
516 void visitModuleCommandLines();
517 void visitModuleFlags();
518 void visitModuleFlag(const MDNode *Op,
520 SmallVectorImpl<const MDNode *> &Requirements);
521 void visitModuleFlagCGProfileEntry(const MDOperand &MDO);
522 void visitFunction(const Function &F);
523 void visitBasicBlock(BasicBlock &BB);
524 void verifyRangeLikeMetadata(const Value &V, const MDNode *Range, Type *Ty,
525 RangeLikeMetadataKind Kind);
526 void visitRangeMetadata(Instruction &I, MDNode *Range, Type *Ty);
527 void visitNoaliasAddrspaceMetadata(Instruction &I, MDNode *Range, Type *Ty);
528 void visitDereferenceableMetadata(Instruction &I, MDNode *MD);
529 void visitProfMetadata(Instruction &I, MDNode *MD);
530 void visitCallStackMetadata(MDNode *MD);
531 void visitMemProfMetadata(Instruction &I, MDNode *MD);
532 void visitCallsiteMetadata(Instruction &I, MDNode *MD);
533 void visitDIAssignIDMetadata(Instruction &I, MDNode *MD);
534 void visitMMRAMetadata(Instruction &I, MDNode *MD);
535 void visitAnnotationMetadata(MDNode *Annotation);
536 void visitAliasScopeMetadata(const MDNode *MD);
537 void visitAliasScopeListMetadata(const MDNode *MD);
538 void visitAccessGroupMetadata(const MDNode *MD);
539
540 template <class Ty> bool isValidMetadataArray(const MDTuple &N);
541#define HANDLE_SPECIALIZED_MDNODE_LEAF(CLASS) void visit##CLASS(const CLASS &N);
542#include "llvm/IR/Metadata.def"
543 void visitDIScope(const DIScope &N);
544 void visitDIVariable(const DIVariable &N);
545 void visitDILexicalBlockBase(const DILexicalBlockBase &N);
546 void visitDITemplateParameter(const DITemplateParameter &N);
547
548 void visitTemplateParams(const MDNode &N, const Metadata &RawParams);
549
550 void visit(DbgLabelRecord &DLR);
551 void visit(DbgVariableRecord &DVR);
552 // InstVisitor overrides...
554 void visitDbgRecords(Instruction &I);
555 void visit(Instruction &I);
556
557 void visitTruncInst(TruncInst &I);
558 void visitZExtInst(ZExtInst &I);
559 void visitSExtInst(SExtInst &I);
560 void visitFPTruncInst(FPTruncInst &I);
561 void visitFPExtInst(FPExtInst &I);
562 void visitFPToUIInst(FPToUIInst &I);
563 void visitFPToSIInst(FPToSIInst &I);
564 void visitUIToFPInst(UIToFPInst &I);
565 void visitSIToFPInst(SIToFPInst &I);
566 void visitIntToPtrInst(IntToPtrInst &I);
567 void visitPtrToIntInst(PtrToIntInst &I);
568 void visitBitCastInst(BitCastInst &I);
569 void visitAddrSpaceCastInst(AddrSpaceCastInst &I);
570 void visitPHINode(PHINode &PN);
571 void visitCallBase(CallBase &Call);
572 void visitUnaryOperator(UnaryOperator &U);
573 void visitBinaryOperator(BinaryOperator &B);
574 void visitICmpInst(ICmpInst &IC);
575 void visitFCmpInst(FCmpInst &FC);
576 void visitExtractElementInst(ExtractElementInst &EI);
577 void visitInsertElementInst(InsertElementInst &EI);
578 void visitShuffleVectorInst(ShuffleVectorInst &EI);
579 void visitVAArgInst(VAArgInst &VAA) { visitInstruction(VAA); }
580 void visitCallInst(CallInst &CI);
581 void visitInvokeInst(InvokeInst &II);
582 void visitGetElementPtrInst(GetElementPtrInst &GEP);
583 void visitLoadInst(LoadInst &LI);
584 void visitStoreInst(StoreInst &SI);
585 void verifyDominatesUse(Instruction &I, unsigned i);
586 void visitInstruction(Instruction &I);
587 void visitTerminator(Instruction &I);
588 void visitBranchInst(BranchInst &BI);
589 void visitReturnInst(ReturnInst &RI);
590 void visitSwitchInst(SwitchInst &SI);
591 void visitIndirectBrInst(IndirectBrInst &BI);
592 void visitCallBrInst(CallBrInst &CBI);
593 void visitSelectInst(SelectInst &SI);
594 void visitUserOp1(Instruction &I);
595 void visitUserOp2(Instruction &I) { visitUserOp1(I); }
596 void visitIntrinsicCall(Intrinsic::ID ID, CallBase &Call);
597 void visitConstrainedFPIntrinsic(ConstrainedFPIntrinsic &FPI);
598 void visitVPIntrinsic(VPIntrinsic &VPI);
599 void visitDbgIntrinsic(StringRef Kind, DbgVariableIntrinsic &DII);
600 void visitDbgLabelIntrinsic(StringRef Kind, DbgLabelInst &DLI);
601 void visitAtomicCmpXchgInst(AtomicCmpXchgInst &CXI);
602 void visitAtomicRMWInst(AtomicRMWInst &RMWI);
603 void visitFenceInst(FenceInst &FI);
604 void visitAllocaInst(AllocaInst &AI);
605 void visitExtractValueInst(ExtractValueInst &EVI);
606 void visitInsertValueInst(InsertValueInst &IVI);
607 void visitEHPadPredecessors(Instruction &I);
608 void visitLandingPadInst(LandingPadInst &LPI);
609 void visitResumeInst(ResumeInst &RI);
610 void visitCatchPadInst(CatchPadInst &CPI);
611 void visitCatchReturnInst(CatchReturnInst &CatchReturn);
612 void visitCleanupPadInst(CleanupPadInst &CPI);
613 void visitFuncletPadInst(FuncletPadInst &FPI);
614 void visitCatchSwitchInst(CatchSwitchInst &CatchSwitch);
615 void visitCleanupReturnInst(CleanupReturnInst &CRI);
616
617 void verifySwiftErrorCall(CallBase &Call, const Value *SwiftErrorVal);
618 void verifySwiftErrorValue(const Value *SwiftErrorVal);
619 void verifyTailCCMustTailAttrs(const AttrBuilder &Attrs, StringRef Context);
620 void verifyMustTailCall(CallInst &CI);
621 bool verifyAttributeCount(AttributeList Attrs, unsigned Params);
622 void verifyAttributeTypes(AttributeSet Attrs, const Value *V);
623 void verifyParameterAttrs(AttributeSet Attrs, Type *Ty, const Value *V);
624 void checkUnsignedBaseTenFuncAttr(AttributeList Attrs, StringRef Attr,
625 const Value *V);
626 void verifyFunctionAttrs(FunctionType *FT, AttributeList Attrs,
627 const Value *V, bool IsIntrinsic, bool IsInlineAsm);
628 void verifyFunctionMetadata(ArrayRef<std::pair<unsigned, MDNode *>> MDs);
629
630 void visitConstantExprsRecursively(const Constant *EntryC);
631 void visitConstantExpr(const ConstantExpr *CE);
632 void visitConstantPtrAuth(const ConstantPtrAuth *CPA);
633 void verifyInlineAsmCall(const CallBase &Call);
634 void verifyStatepoint(const CallBase &Call);
635 void verifyFrameRecoverIndices();
636 void verifySiblingFuncletUnwinds();
637
638 void verifyFragmentExpression(const DbgVariableIntrinsic &I);
639 void verifyFragmentExpression(const DbgVariableRecord &I);
640 template <typename ValueOrMetadata>
641 void verifyFragmentExpression(const DIVariable &V,
643 ValueOrMetadata *Desc);
644 void verifyFnArgs(const DbgVariableIntrinsic &I);
645 void verifyFnArgs(const DbgVariableRecord &DVR);
646 void verifyNotEntryValue(const DbgVariableIntrinsic &I);
647 void verifyNotEntryValue(const DbgVariableRecord &I);
648
649 /// Module-level debug info verification...
650 void verifyCompileUnits();
651
652 /// Module-level verification that all @llvm.experimental.deoptimize
653 /// declarations share the same calling convention.
654 void verifyDeoptimizeCallingConvs();
655
656 void verifyAttachedCallBundle(const CallBase &Call,
657 const OperandBundleUse &BU);
658
659 /// Verify the llvm.experimental.noalias.scope.decl declarations
660 void verifyNoAliasScopeDecl();
661};
662
663} // end anonymous namespace
664
665/// We know that cond should be true, if not print an error message.
666#define Check(C, ...) \
667 do { \
668 if (!(C)) { \
669 CheckFailed(__VA_ARGS__); \
670 return; \
671 } \
672 } while (false)
673
674/// We know that a debug info condition should be true, if not print
675/// an error message.
676#define CheckDI(C, ...) \
677 do { \
678 if (!(C)) { \
679 DebugInfoCheckFailed(__VA_ARGS__); \
680 return; \
681 } \
682 } while (false)
683
684void Verifier::visitDbgRecords(Instruction &I) {
685 if (!I.DebugMarker)
686 return;
687 CheckDI(I.DebugMarker->MarkedInstr == &I,
688 "Instruction has invalid DebugMarker", &I);
689 CheckDI(!isa<PHINode>(&I) || !I.hasDbgRecords(),
690 "PHI Node must not have any attached DbgRecords", &I);
691 for (DbgRecord &DR : I.getDbgRecordRange()) {
692 CheckDI(DR.getMarker() == I.DebugMarker,
693 "DbgRecord had invalid DebugMarker", &I, &DR);
694 if (auto *Loc =
695 dyn_cast_or_null<DILocation>(DR.getDebugLoc().getAsMDNode()))
696 visitMDNode(*Loc, AreDebugLocsAllowed::Yes);
697 if (auto *DVR = dyn_cast<DbgVariableRecord>(&DR)) {
698 visit(*DVR);
699 // These have to appear after `visit` for consistency with existing
700 // intrinsic behaviour.
701 verifyFragmentExpression(*DVR);
702 verifyNotEntryValue(*DVR);
703 } else if (auto *DLR = dyn_cast<DbgLabelRecord>(&DR)) {
704 visit(*DLR);
705 }
706 }
707}
708
709void Verifier::visit(Instruction &I) {
710 visitDbgRecords(I);
711 for (unsigned i = 0, e = I.getNumOperands(); i != e; ++i)
712 Check(I.getOperand(i) != nullptr, "Operand is null", &I);
714}
715
716// Helper to iterate over indirect users. By returning false, the callback can ask to stop traversing further.
717static void forEachUser(const Value *User,
719 llvm::function_ref<bool(const Value *)> Callback) {
720 if (!Visited.insert(User).second)
721 return;
722
725 while (!WorkList.empty()) {
726 const Value *Cur = WorkList.pop_back_val();
727 if (!Visited.insert(Cur).second)
728 continue;
729 if (Callback(Cur))
730 append_range(WorkList, Cur->materialized_users());
731 }
732}
733
734void Verifier::visitGlobalValue(const GlobalValue &GV) {
736 "Global is external, but doesn't have external or weak linkage!", &GV);
737
738 if (const GlobalObject *GO = dyn_cast<GlobalObject>(&GV)) {
739
740 if (MaybeAlign A = GO->getAlign()) {
741 Check(A->value() <= Value::MaximumAlignment,
742 "huge alignment values are unsupported", GO);
743 }
744
745 if (const MDNode *Associated =
746 GO->getMetadata(LLVMContext::MD_associated)) {
747 Check(Associated->getNumOperands() == 1,
748 "associated metadata must have one operand", &GV, Associated);
749 const Metadata *Op = Associated->getOperand(0).get();
750 Check(Op, "associated metadata must have a global value", GO, Associated);
751
752 const auto *VM = dyn_cast_or_null<ValueAsMetadata>(Op);
753 Check(VM, "associated metadata must be ValueAsMetadata", GO, Associated);
754 if (VM) {
755 Check(isa<PointerType>(VM->getValue()->getType()),
756 "associated value must be pointer typed", GV, Associated);
757
758 const Value *Stripped = VM->getValue()->stripPointerCastsAndAliases();
759 Check(isa<GlobalObject>(Stripped) || isa<Constant>(Stripped),
760 "associated metadata must point to a GlobalObject", GO, Stripped);
761 Check(Stripped != GO,
762 "global values should not associate to themselves", GO,
763 Associated);
764 }
765 }
766
767 // FIXME: Why is getMetadata on GlobalValue protected?
768 if (const MDNode *AbsoluteSymbol =
769 GO->getMetadata(LLVMContext::MD_absolute_symbol)) {
770 verifyRangeLikeMetadata(*GO, AbsoluteSymbol,
771 DL.getIntPtrType(GO->getType()),
772 RangeLikeMetadataKind::AbsoluteSymbol);
773 }
774 }
775
776 Check(!GV.hasAppendingLinkage() || isa<GlobalVariable>(GV),
777 "Only global variables can have appending linkage!", &GV);
778
779 if (GV.hasAppendingLinkage()) {
780 const GlobalVariable *GVar = dyn_cast<GlobalVariable>(&GV);
781 Check(GVar && GVar->getValueType()->isArrayTy(),
782 "Only global arrays can have appending linkage!", GVar);
783 }
784
785 if (GV.isDeclarationForLinker())
786 Check(!GV.hasComdat(), "Declaration may not be in a Comdat!", &GV);
787
788 if (GV.hasDLLExportStorageClass()) {
790 "dllexport GlobalValue must have default or protected visibility",
791 &GV);
792 }
793 if (GV.hasDLLImportStorageClass()) {
795 "dllimport GlobalValue must have default visibility", &GV);
796 Check(!GV.isDSOLocal(), "GlobalValue with DLLImport Storage is dso_local!",
797 &GV);
798
799 Check((GV.isDeclaration() &&
802 "Global is marked as dllimport, but not external", &GV);
803 }
804
805 if (GV.isImplicitDSOLocal())
806 Check(GV.isDSOLocal(),
807 "GlobalValue with local linkage or non-default "
808 "visibility must be dso_local!",
809 &GV);
810
811 if (GV.isTagged()) {
812 Check(!GV.hasSection(), "tagged GlobalValue must not be in section.", &GV);
813 }
814
815 forEachUser(&GV, GlobalValueVisited, [&](const Value *V) -> bool {
816 if (const Instruction *I = dyn_cast<Instruction>(V)) {
817 if (!I->getParent() || !I->getParent()->getParent())
818 CheckFailed("Global is referenced by parentless instruction!", &GV, &M,
819 I);
820 else if (I->getParent()->getParent()->getParent() != &M)
821 CheckFailed("Global is referenced in a different module!", &GV, &M, I,
822 I->getParent()->getParent(),
823 I->getParent()->getParent()->getParent());
824 return false;
825 } else if (const Function *F = dyn_cast<Function>(V)) {
826 if (F->getParent() != &M)
827 CheckFailed("Global is used by function in a different module", &GV, &M,
828 F, F->getParent());
829 return false;
830 }
831 return true;
832 });
833}
834
835void Verifier::visitGlobalVariable(const GlobalVariable &GV) {
836 Type *GVType = GV.getValueType();
837
838 if (GV.hasInitializer()) {
839 Check(GV.getInitializer()->getType() == GVType,
840 "Global variable initializer type does not match global "
841 "variable type!",
842 &GV);
843 // If the global has common linkage, it must have a zero initializer and
844 // cannot be constant.
845 if (GV.hasCommonLinkage()) {
847 "'common' global must have a zero initializer!", &GV);
848 Check(!GV.isConstant(), "'common' global may not be marked constant!",
849 &GV);
850 Check(!GV.hasComdat(), "'common' global may not be in a Comdat!", &GV);
851 }
852 }
853
854 if (GV.hasName() && (GV.getName() == "llvm.global_ctors" ||
855 GV.getName() == "llvm.global_dtors")) {
857 "invalid linkage for intrinsic global variable", &GV);
859 "invalid uses of intrinsic global variable", &GV);
860
861 // Don't worry about emitting an error for it not being an array,
862 // visitGlobalValue will complain on appending non-array.
863 if (ArrayType *ATy = dyn_cast<ArrayType>(GVType)) {
864 StructType *STy = dyn_cast<StructType>(ATy->getElementType());
865 PointerType *FuncPtrTy =
866 PointerType::get(Context, DL.getProgramAddressSpace());
867 Check(STy && (STy->getNumElements() == 2 || STy->getNumElements() == 3) &&
868 STy->getTypeAtIndex(0u)->isIntegerTy(32) &&
869 STy->getTypeAtIndex(1) == FuncPtrTy,
870 "wrong type for intrinsic global variable", &GV);
871 Check(STy->getNumElements() == 3,
872 "the third field of the element type is mandatory, "
873 "specify ptr null to migrate from the obsoleted 2-field form");
874 Type *ETy = STy->getTypeAtIndex(2);
875 Check(ETy->isPointerTy(), "wrong type for intrinsic global variable",
876 &GV);
877 }
878 }
879
880 if (GV.hasName() && (GV.getName() == "llvm.used" ||
881 GV.getName() == "llvm.compiler.used")) {
883 "invalid linkage for intrinsic global variable", &GV);
885 "invalid uses of intrinsic global variable", &GV);
886
887 if (ArrayType *ATy = dyn_cast<ArrayType>(GVType)) {
888 PointerType *PTy = dyn_cast<PointerType>(ATy->getElementType());
889 Check(PTy, "wrong type for intrinsic global variable", &GV);
890 if (GV.hasInitializer()) {
891 const Constant *Init = GV.getInitializer();
892 const ConstantArray *InitArray = dyn_cast<ConstantArray>(Init);
893 Check(InitArray, "wrong initalizer for intrinsic global variable",
894 Init);
895 for (Value *Op : InitArray->operands()) {
896 Value *V = Op->stripPointerCasts();
897 Check(isa<GlobalVariable>(V) || isa<Function>(V) ||
898 isa<GlobalAlias>(V),
899 Twine("invalid ") + GV.getName() + " member", V);
900 Check(V->hasName(),
901 Twine("members of ") + GV.getName() + " must be named", V);
902 }
903 }
904 }
905 }
906
907 // Visit any debug info attachments.
909 GV.getMetadata(LLVMContext::MD_dbg, MDs);
910 for (auto *MD : MDs) {
911 if (auto *GVE = dyn_cast<DIGlobalVariableExpression>(MD))
912 visitDIGlobalVariableExpression(*GVE);
913 else
914 CheckDI(false, "!dbg attachment of global variable must be a "
915 "DIGlobalVariableExpression");
916 }
917
918 // Scalable vectors cannot be global variables, since we don't know
919 // the runtime size.
920 Check(!GVType->isScalableTy(), "Globals cannot contain scalable types", &GV);
921
922 // Check if it is or contains a target extension type that disallows being
923 // used as a global.
925 "Global @" + GV.getName() + " has illegal target extension type",
926 GVType);
927
928 if (!GV.hasInitializer()) {
929 visitGlobalValue(GV);
930 return;
931 }
932
933 // Walk any aggregate initializers looking for bitcasts between address spaces
934 visitConstantExprsRecursively(GV.getInitializer());
935
936 visitGlobalValue(GV);
937}
938
939void Verifier::visitAliaseeSubExpr(const GlobalAlias &GA, const Constant &C) {
941 Visited.insert(&GA);
942 visitAliaseeSubExpr(Visited, GA, C);
943}
944
945void Verifier::visitAliaseeSubExpr(SmallPtrSetImpl<const GlobalAlias*> &Visited,
946 const GlobalAlias &GA, const Constant &C) {
948 Check(isa<GlobalValue>(C) &&
949 cast<GlobalValue>(C).hasAvailableExternallyLinkage(),
950 "available_externally alias must point to available_externally "
951 "global value",
952 &GA);
953 }
954 if (const auto *GV = dyn_cast<GlobalValue>(&C)) {
956 Check(!GV->isDeclarationForLinker(), "Alias must point to a definition",
957 &GA);
958 }
959
960 if (const auto *GA2 = dyn_cast<GlobalAlias>(GV)) {
961 Check(Visited.insert(GA2).second, "Aliases cannot form a cycle", &GA);
962
963 Check(!GA2->isInterposable(),
964 "Alias cannot point to an interposable alias", &GA);
965 } else {
966 // Only continue verifying subexpressions of GlobalAliases.
967 // Do not recurse into global initializers.
968 return;
969 }
970 }
971
972 if (const auto *CE = dyn_cast<ConstantExpr>(&C))
973 visitConstantExprsRecursively(CE);
974
975 for (const Use &U : C.operands()) {
976 Value *V = &*U;
977 if (const auto *GA2 = dyn_cast<GlobalAlias>(V))
978 visitAliaseeSubExpr(Visited, GA, *GA2->getAliasee());
979 else if (const auto *C2 = dyn_cast<Constant>(V))
980 visitAliaseeSubExpr(Visited, GA, *C2);
981 }
982}
983
984void Verifier::visitGlobalAlias(const GlobalAlias &GA) {
986 "Alias should have private, internal, linkonce, weak, linkonce_odr, "
987 "weak_odr, external, or available_externally linkage!",
988 &GA);
989 const Constant *Aliasee = GA.getAliasee();
990 Check(Aliasee, "Aliasee cannot be NULL!", &GA);
991 Check(GA.getType() == Aliasee->getType(),
992 "Alias and aliasee types should match!", &GA);
993
994 Check(isa<GlobalValue>(Aliasee) || isa<ConstantExpr>(Aliasee),
995 "Aliasee should be either GlobalValue or ConstantExpr", &GA);
996
997 visitAliaseeSubExpr(GA, *Aliasee);
998
999 visitGlobalValue(GA);
1000}
1001
1002void Verifier::visitGlobalIFunc(const GlobalIFunc &GI) {
1004 "IFunc should have private, internal, linkonce, weak, linkonce_odr, "
1005 "weak_odr, or external linkage!",
1006 &GI);
1007 // Pierce through ConstantExprs and GlobalAliases and check that the resolver
1008 // is a Function definition.
1010 Check(Resolver, "IFunc must have a Function resolver", &GI);
1011 Check(!Resolver->isDeclarationForLinker(),
1012 "IFunc resolver must be a definition", &GI);
1013
1014 // Check that the immediate resolver operand (prior to any bitcasts) has the
1015 // correct type.
1016 const Type *ResolverTy = GI.getResolver()->getType();
1017
1018 Check(isa<PointerType>(Resolver->getFunctionType()->getReturnType()),
1019 "IFunc resolver must return a pointer", &GI);
1020
1021 Check(ResolverTy == PointerType::get(Context, GI.getAddressSpace()),
1022 "IFunc resolver has incorrect type", &GI);
1023}
1024
1025void Verifier::visitNamedMDNode(const NamedMDNode &NMD) {
1026 // There used to be various other llvm.dbg.* nodes, but we don't support
1027 // upgrading them and we want to reserve the namespace for future uses.
1028 if (NMD.getName().starts_with("llvm.dbg."))
1029 CheckDI(NMD.getName() == "llvm.dbg.cu",
1030 "unrecognized named metadata node in the llvm.dbg namespace", &NMD);
1031 for (const MDNode *MD : NMD.operands()) {
1032 if (NMD.getName() == "llvm.dbg.cu")
1033 CheckDI(MD && isa<DICompileUnit>(MD), "invalid compile unit", &NMD, MD);
1034
1035 if (!MD)
1036 continue;
1037
1038 visitMDNode(*MD, AreDebugLocsAllowed::Yes);
1039 }
1040}
1041
1042void Verifier::visitMDNode(const MDNode &MD, AreDebugLocsAllowed AllowLocs) {
1043 // Only visit each node once. Metadata can be mutually recursive, so this
1044 // avoids infinite recursion here, as well as being an optimization.
1045 if (!MDNodes.insert(&MD).second)
1046 return;
1047
1048 Check(&MD.getContext() == &Context,
1049 "MDNode context does not match Module context!", &MD);
1050
1051 switch (MD.getMetadataID()) {
1052 default:
1053 llvm_unreachable("Invalid MDNode subclass");
1054 case Metadata::MDTupleKind:
1055 break;
1056#define HANDLE_SPECIALIZED_MDNODE_LEAF(CLASS) \
1057 case Metadata::CLASS##Kind: \
1058 visit##CLASS(cast<CLASS>(MD)); \
1059 break;
1060#include "llvm/IR/Metadata.def"
1061 }
1062
1063 for (const Metadata *Op : MD.operands()) {
1064 if (!Op)
1065 continue;
1066 Check(!isa<LocalAsMetadata>(Op), "Invalid operand for global metadata!",
1067 &MD, Op);
1068 CheckDI(!isa<DILocation>(Op) || AllowLocs == AreDebugLocsAllowed::Yes,
1069 "DILocation not allowed within this metadata node", &MD, Op);
1070 if (auto *N = dyn_cast<MDNode>(Op)) {
1071 visitMDNode(*N, AllowLocs);
1072 continue;
1073 }
1074 if (auto *V = dyn_cast<ValueAsMetadata>(Op)) {
1075 visitValueAsMetadata(*V, nullptr);
1076 continue;
1077 }
1078 }
1079
1080 // Check these last, so we diagnose problems in operands first.
1081 Check(!MD.isTemporary(), "Expected no forward declarations!", &MD);
1082 Check(MD.isResolved(), "All nodes should be resolved!", &MD);
1083}
1084
1085void Verifier::visitValueAsMetadata(const ValueAsMetadata &MD, Function *F) {
1086 Check(MD.getValue(), "Expected valid value", &MD);
1087 Check(!MD.getValue()->getType()->isMetadataTy(),
1088 "Unexpected metadata round-trip through values", &MD, MD.getValue());
1089
1090 auto *L = dyn_cast<LocalAsMetadata>(&MD);
1091 if (!L)
1092 return;
1093
1094 Check(F, "function-local metadata used outside a function", L);
1095
1096 // If this was an instruction, bb, or argument, verify that it is in the
1097 // function that we expect.
1098 Function *ActualF = nullptr;
1099 if (Instruction *I = dyn_cast<Instruction>(L->getValue())) {
1100 Check(I->getParent(), "function-local metadata not in basic block", L, I);
1101 ActualF = I->getParent()->getParent();
1102 } else if (BasicBlock *BB = dyn_cast<BasicBlock>(L->getValue()))
1103 ActualF = BB->getParent();
1104 else if (Argument *A = dyn_cast<Argument>(L->getValue()))
1105 ActualF = A->getParent();
1106 assert(ActualF && "Unimplemented function local metadata case!");
1107
1108 Check(ActualF == F, "function-local metadata used in wrong function", L);
1109}
1110
1111void Verifier::visitDIArgList(const DIArgList &AL, Function *F) {
1112 for (const ValueAsMetadata *VAM : AL.getArgs())
1113 visitValueAsMetadata(*VAM, F);
1114}
1115
1116void Verifier::visitMetadataAsValue(const MetadataAsValue &MDV, Function *F) {
1117 Metadata *MD = MDV.getMetadata();
1118 if (auto *N = dyn_cast<MDNode>(MD)) {
1119 visitMDNode(*N, AreDebugLocsAllowed::No);
1120 return;
1121 }
1122
1123 // Only visit each node once. Metadata can be mutually recursive, so this
1124 // avoids infinite recursion here, as well as being an optimization.
1125 if (!MDNodes.insert(MD).second)
1126 return;
1127
1128 if (auto *V = dyn_cast<ValueAsMetadata>(MD))
1129 visitValueAsMetadata(*V, F);
1130
1131 if (auto *AL = dyn_cast<DIArgList>(MD))
1132 visitDIArgList(*AL, F);
1133}
1134
1135static bool isType(const Metadata *MD) { return !MD || isa<DIType>(MD); }
1136static bool isScope(const Metadata *MD) { return !MD || isa<DIScope>(MD); }
1137static bool isDINode(const Metadata *MD) { return !MD || isa<DINode>(MD); }
1138
1139void Verifier::visitDILocation(const DILocation &N) {
1140 CheckDI(N.getRawScope() && isa<DILocalScope>(N.getRawScope()),
1141 "location requires a valid scope", &N, N.getRawScope());
1142 if (auto *IA = N.getRawInlinedAt())
1143 CheckDI(isa<DILocation>(IA), "inlined-at should be a location", &N, IA);
1144 if (auto *SP = dyn_cast<DISubprogram>(N.getRawScope()))
1145 CheckDI(SP->isDefinition(), "scope points into the type hierarchy", &N);
1146}
1147
1148void Verifier::visitGenericDINode(const GenericDINode &N) {
1149 CheckDI(N.getTag(), "invalid tag", &N);
1150}
1151
1152void Verifier::visitDIScope(const DIScope &N) {
1153 if (auto *F = N.getRawFile())
1154 CheckDI(isa<DIFile>(F), "invalid file", &N, F);
1155}
1156
1157void Verifier::visitDISubrange(const DISubrange &N) {
1158 CheckDI(N.getTag() == dwarf::DW_TAG_subrange_type, "invalid tag", &N);
1159 CheckDI(!N.getRawCountNode() || !N.getRawUpperBound(),
1160 "Subrange can have any one of count or upperBound", &N);
1161 auto *CBound = N.getRawCountNode();
1162 CheckDI(!CBound || isa<ConstantAsMetadata>(CBound) ||
1163 isa<DIVariable>(CBound) || isa<DIExpression>(CBound),
1164 "Count must be signed constant or DIVariable or DIExpression", &N);
1165 auto Count = N.getCount();
1166 CheckDI(!Count || !isa<ConstantInt *>(Count) ||
1167 cast<ConstantInt *>(Count)->getSExtValue() >= -1,
1168 "invalid subrange count", &N);
1169 auto *LBound = N.getRawLowerBound();
1170 CheckDI(!LBound || isa<ConstantAsMetadata>(LBound) ||
1171 isa<DIVariable>(LBound) || isa<DIExpression>(LBound),
1172 "LowerBound must be signed constant or DIVariable or DIExpression",
1173 &N);
1174 auto *UBound = N.getRawUpperBound();
1175 CheckDI(!UBound || isa<ConstantAsMetadata>(UBound) ||
1176 isa<DIVariable>(UBound) || isa<DIExpression>(UBound),
1177 "UpperBound must be signed constant or DIVariable or DIExpression",
1178 &N);
1179 auto *Stride = N.getRawStride();
1180 CheckDI(!Stride || isa<ConstantAsMetadata>(Stride) ||
1181 isa<DIVariable>(Stride) || isa<DIExpression>(Stride),
1182 "Stride must be signed constant or DIVariable or DIExpression", &N);
1183}
1184
1185void Verifier::visitDIGenericSubrange(const DIGenericSubrange &N) {
1186 CheckDI(N.getTag() == dwarf::DW_TAG_generic_subrange, "invalid tag", &N);
1187 CheckDI(!N.getRawCountNode() || !N.getRawUpperBound(),
1188 "GenericSubrange can have any one of count or upperBound", &N);
1189 auto *CBound = N.getRawCountNode();
1190 CheckDI(!CBound || isa<DIVariable>(CBound) || isa<DIExpression>(CBound),
1191 "Count must be signed constant or DIVariable or DIExpression", &N);
1192 auto *LBound = N.getRawLowerBound();
1193 CheckDI(LBound, "GenericSubrange must contain lowerBound", &N);
1194 CheckDI(isa<DIVariable>(LBound) || isa<DIExpression>(LBound),
1195 "LowerBound must be signed constant or DIVariable or DIExpression",
1196 &N);
1197 auto *UBound = N.getRawUpperBound();
1198 CheckDI(!UBound || isa<DIVariable>(UBound) || isa<DIExpression>(UBound),
1199 "UpperBound must be signed constant or DIVariable or DIExpression",
1200 &N);
1201 auto *Stride = N.getRawStride();
1202 CheckDI(Stride, "GenericSubrange must contain stride", &N);
1203 CheckDI(isa<DIVariable>(Stride) || isa<DIExpression>(Stride),
1204 "Stride must be signed constant or DIVariable or DIExpression", &N);
1205}
1206
1207void Verifier::visitDIEnumerator(const DIEnumerator &N) {
1208 CheckDI(N.getTag() == dwarf::DW_TAG_enumerator, "invalid tag", &N);
1209}
1210
1211void Verifier::visitDIBasicType(const DIBasicType &N) {
1212 CheckDI(N.getTag() == dwarf::DW_TAG_base_type ||
1213 N.getTag() == dwarf::DW_TAG_unspecified_type ||
1214 N.getTag() == dwarf::DW_TAG_string_type,
1215 "invalid tag", &N);
1216}
1217
1218void Verifier::visitDIStringType(const DIStringType &N) {
1219 CheckDI(N.getTag() == dwarf::DW_TAG_string_type, "invalid tag", &N);
1220 CheckDI(!(N.isBigEndian() && N.isLittleEndian()), "has conflicting flags",
1221 &N);
1222}
1223
1224void Verifier::visitDIDerivedType(const DIDerivedType &N) {
1225 // Common scope checks.
1226 visitDIScope(N);
1227
1228 CheckDI(N.getTag() == dwarf::DW_TAG_typedef ||
1229 N.getTag() == dwarf::DW_TAG_pointer_type ||
1230 N.getTag() == dwarf::DW_TAG_ptr_to_member_type ||
1231 N.getTag() == dwarf::DW_TAG_reference_type ||
1232 N.getTag() == dwarf::DW_TAG_rvalue_reference_type ||
1233 N.getTag() == dwarf::DW_TAG_const_type ||
1234 N.getTag() == dwarf::DW_TAG_immutable_type ||
1235 N.getTag() == dwarf::DW_TAG_volatile_type ||
1236 N.getTag() == dwarf::DW_TAG_restrict_type ||
1237 N.getTag() == dwarf::DW_TAG_atomic_type ||
1238 N.getTag() == dwarf::DW_TAG_LLVM_ptrauth_type ||
1239 N.getTag() == dwarf::DW_TAG_member ||
1240 (N.getTag() == dwarf::DW_TAG_variable && N.isStaticMember()) ||
1241 N.getTag() == dwarf::DW_TAG_inheritance ||
1242 N.getTag() == dwarf::DW_TAG_friend ||
1243 N.getTag() == dwarf::DW_TAG_set_type ||
1244 N.getTag() == dwarf::DW_TAG_template_alias,
1245 "invalid tag", &N);
1246 if (N.getTag() == dwarf::DW_TAG_ptr_to_member_type) {
1247 CheckDI(isType(N.getRawExtraData()), "invalid pointer to member type", &N,
1248 N.getRawExtraData());
1249 }
1250
1251 if (N.getTag() == dwarf::DW_TAG_set_type) {
1252 if (auto *T = N.getRawBaseType()) {
1253 auto *Enum = dyn_cast_or_null<DICompositeType>(T);
1254 auto *Basic = dyn_cast_or_null<DIBasicType>(T);
1255 CheckDI(
1256 (Enum && Enum->getTag() == dwarf::DW_TAG_enumeration_type) ||
1257 (Basic && (Basic->getEncoding() == dwarf::DW_ATE_unsigned ||
1258 Basic->getEncoding() == dwarf::DW_ATE_signed ||
1259 Basic->getEncoding() == dwarf::DW_ATE_unsigned_char ||
1260 Basic->getEncoding() == dwarf::DW_ATE_signed_char ||
1261 Basic->getEncoding() == dwarf::DW_ATE_boolean)),
1262 "invalid set base type", &N, T);
1263 }
1264 }
1265
1266 CheckDI(isScope(N.getRawScope()), "invalid scope", &N, N.getRawScope());
1267 CheckDI(isType(N.getRawBaseType()), "invalid base type", &N,
1268 N.getRawBaseType());
1269
1270 if (N.getDWARFAddressSpace()) {
1271 CheckDI(N.getTag() == dwarf::DW_TAG_pointer_type ||
1272 N.getTag() == dwarf::DW_TAG_reference_type ||
1273 N.getTag() == dwarf::DW_TAG_rvalue_reference_type,
1274 "DWARF address space only applies to pointer or reference types",
1275 &N);
1276 }
1277}
1278
1279/// Detect mutually exclusive flags.
1280static bool hasConflictingReferenceFlags(unsigned Flags) {
1281 return ((Flags & DINode::FlagLValueReference) &&
1282 (Flags & DINode::FlagRValueReference)) ||
1283 ((Flags & DINode::FlagTypePassByValue) &&
1284 (Flags & DINode::FlagTypePassByReference));
1285}
1286
1287void Verifier::visitTemplateParams(const MDNode &N, const Metadata &RawParams) {
1288 auto *Params = dyn_cast<MDTuple>(&RawParams);
1289 CheckDI(Params, "invalid template params", &N, &RawParams);
1290 for (Metadata *Op : Params->operands()) {
1291 CheckDI(Op && isa<DITemplateParameter>(Op), "invalid template parameter",
1292 &N, Params, Op);
1293 }
1294}
1295
1296void Verifier::visitDICompositeType(const DICompositeType &N) {
1297 // Common scope checks.
1298 visitDIScope(N);
1299
1300 CheckDI(N.getTag() == dwarf::DW_TAG_array_type ||
1301 N.getTag() == dwarf::DW_TAG_structure_type ||
1302 N.getTag() == dwarf::DW_TAG_union_type ||
1303 N.getTag() == dwarf::DW_TAG_enumeration_type ||
1304 N.getTag() == dwarf::DW_TAG_class_type ||
1305 N.getTag() == dwarf::DW_TAG_variant_part ||
1306 N.getTag() == dwarf::DW_TAG_namelist,
1307 "invalid tag", &N);
1308
1309 CheckDI(isScope(N.getRawScope()), "invalid scope", &N, N.getRawScope());
1310 CheckDI(isType(N.getRawBaseType()), "invalid base type", &N,
1311 N.getRawBaseType());
1312
1313 CheckDI(!N.getRawElements() || isa<MDTuple>(N.getRawElements()),
1314 "invalid composite elements", &N, N.getRawElements());
1315 CheckDI(isType(N.getRawVTableHolder()), "invalid vtable holder", &N,
1316 N.getRawVTableHolder());
1318 "invalid reference flags", &N);
1319 unsigned DIBlockByRefStruct = 1 << 4;
1320 CheckDI((N.getFlags() & DIBlockByRefStruct) == 0,
1321 "DIBlockByRefStruct on DICompositeType is no longer supported", &N);
1322
1323 if (N.isVector()) {
1324 const DINodeArray Elements = N.getElements();
1325 CheckDI(Elements.size() == 1 &&
1326 Elements[0]->getTag() == dwarf::DW_TAG_subrange_type,
1327 "invalid vector, expected one element of type subrange", &N);
1328 }
1329
1330 if (auto *Params = N.getRawTemplateParams())
1331 visitTemplateParams(N, *Params);
1332
1333 if (auto *D = N.getRawDiscriminator()) {
1334 CheckDI(isa<DIDerivedType>(D) && N.getTag() == dwarf::DW_TAG_variant_part,
1335 "discriminator can only appear on variant part");
1336 }
1337
1338 if (N.getRawDataLocation()) {
1339 CheckDI(N.getTag() == dwarf::DW_TAG_array_type,
1340 "dataLocation can only appear in array type");
1341 }
1342
1343 if (N.getRawAssociated()) {
1344 CheckDI(N.getTag() == dwarf::DW_TAG_array_type,
1345 "associated can only appear in array type");
1346 }
1347
1348 if (N.getRawAllocated()) {
1349 CheckDI(N.getTag() == dwarf::DW_TAG_array_type,
1350 "allocated can only appear in array type");
1351 }
1352
1353 if (N.getRawRank()) {
1354 CheckDI(N.getTag() == dwarf::DW_TAG_array_type,
1355 "rank can only appear in array type");
1356 }
1357
1358 if (N.getTag() == dwarf::DW_TAG_array_type) {
1359 CheckDI(N.getRawBaseType(), "array types must have a base type", &N);
1360 }
1361}
1362
1363void Verifier::visitDISubroutineType(const DISubroutineType &N) {
1364 CheckDI(N.getTag() == dwarf::DW_TAG_subroutine_type, "invalid tag", &N);
1365 if (auto *Types = N.getRawTypeArray()) {
1366 CheckDI(isa<MDTuple>(Types), "invalid composite elements", &N, Types);
1367 for (Metadata *Ty : N.getTypeArray()->operands()) {
1368 CheckDI(isType(Ty), "invalid subroutine type ref", &N, Types, Ty);
1369 }
1370 }
1372 "invalid reference flags", &N);
1373}
1374
1375void Verifier::visitDIFile(const DIFile &N) {
1376 CheckDI(N.getTag() == dwarf::DW_TAG_file_type, "invalid tag", &N);
1377 std::optional<DIFile::ChecksumInfo<StringRef>> Checksum = N.getChecksum();
1378 if (Checksum) {
1379 CheckDI(Checksum->Kind <= DIFile::ChecksumKind::CSK_Last,
1380 "invalid checksum kind", &N);
1381 size_t Size;
1382 switch (Checksum->Kind) {
1383 case DIFile::CSK_MD5:
1384 Size = 32;
1385 break;
1386 case DIFile::CSK_SHA1:
1387 Size = 40;
1388 break;
1389 case DIFile::CSK_SHA256:
1390 Size = 64;
1391 break;
1392 }
1393 CheckDI(Checksum->Value.size() == Size, "invalid checksum length", &N);
1394 CheckDI(Checksum->Value.find_if_not(llvm::isHexDigit) == StringRef::npos,
1395 "invalid checksum", &N);
1396 }
1397}
1398
1399void Verifier::visitDICompileUnit(const DICompileUnit &N) {
1400 CheckDI(N.isDistinct(), "compile units must be distinct", &N);
1401 CheckDI(N.getTag() == dwarf::DW_TAG_compile_unit, "invalid tag", &N);
1402
1403 // Don't bother verifying the compilation directory or producer string
1404 // as those could be empty.
1405 CheckDI(N.getRawFile() && isa<DIFile>(N.getRawFile()), "invalid file", &N,
1406 N.getRawFile());
1407 CheckDI(!N.getFile()->getFilename().empty(), "invalid filename", &N,
1408 N.getFile());
1409
1410 CheckDI((N.getEmissionKind() <= DICompileUnit::LastEmissionKind),
1411 "invalid emission kind", &N);
1412
1413 if (auto *Array = N.getRawEnumTypes()) {
1414 CheckDI(isa<MDTuple>(Array), "invalid enum list", &N, Array);
1415 for (Metadata *Op : N.getEnumTypes()->operands()) {
1416 auto *Enum = dyn_cast_or_null<DICompositeType>(Op);
1417 CheckDI(Enum && Enum->getTag() == dwarf::DW_TAG_enumeration_type,
1418 "invalid enum type", &N, N.getEnumTypes(), Op);
1419 }
1420 }
1421 if (auto *Array = N.getRawRetainedTypes()) {
1422 CheckDI(isa<MDTuple>(Array), "invalid retained type list", &N, Array);
1423 for (Metadata *Op : N.getRetainedTypes()->operands()) {
1424 CheckDI(
1425 Op && (isa<DIType>(Op) || (isa<DISubprogram>(Op) &&
1426 !cast<DISubprogram>(Op)->isDefinition())),
1427 "invalid retained type", &N, Op);
1428 }
1429 }
1430 if (auto *Array = N.getRawGlobalVariables()) {
1431 CheckDI(isa<MDTuple>(Array), "invalid global variable list", &N, Array);
1432 for (Metadata *Op : N.getGlobalVariables()->operands()) {
1433 CheckDI(Op && (isa<DIGlobalVariableExpression>(Op)),
1434 "invalid global variable ref", &N, Op);
1435 }
1436 }
1437 if (auto *Array = N.getRawImportedEntities()) {
1438 CheckDI(isa<MDTuple>(Array), "invalid imported entity list", &N, Array);
1439 for (Metadata *Op : N.getImportedEntities()->operands()) {
1440 CheckDI(Op && isa<DIImportedEntity>(Op), "invalid imported entity ref",
1441 &N, Op);
1442 }
1443 }
1444 if (auto *Array = N.getRawMacros()) {
1445 CheckDI(isa<MDTuple>(Array), "invalid macro list", &N, Array);
1446 for (Metadata *Op : N.getMacros()->operands()) {
1447 CheckDI(Op && isa<DIMacroNode>(Op), "invalid macro ref", &N, Op);
1448 }
1449 }
1450 CUVisited.insert(&N);
1451}
1452
1453void Verifier::visitDISubprogram(const DISubprogram &N) {
1454 CheckDI(N.getTag() == dwarf::DW_TAG_subprogram, "invalid tag", &N);
1455 CheckDI(isScope(N.getRawScope()), "invalid scope", &N, N.getRawScope());
1456 if (auto *F = N.getRawFile())
1457 CheckDI(isa<DIFile>(F), "invalid file", &N, F);
1458 else
1459 CheckDI(N.getLine() == 0, "line specified with no file", &N, N.getLine());
1460 if (auto *T = N.getRawType())
1461 CheckDI(isa<DISubroutineType>(T), "invalid subroutine type", &N, T);
1462 CheckDI(isType(N.getRawContainingType()), "invalid containing type", &N,
1463 N.getRawContainingType());
1464 if (auto *Params = N.getRawTemplateParams())
1465 visitTemplateParams(N, *Params);
1466 if (auto *S = N.getRawDeclaration())
1467 CheckDI(isa<DISubprogram>(S) && !cast<DISubprogram>(S)->isDefinition(),
1468 "invalid subprogram declaration", &N, S);
1469 if (auto *RawNode = N.getRawRetainedNodes()) {
1470 auto *Node = dyn_cast<MDTuple>(RawNode);
1471 CheckDI(Node, "invalid retained nodes list", &N, RawNode);
1472 for (Metadata *Op : Node->operands()) {
1473 CheckDI(Op && (isa<DILocalVariable>(Op) || isa<DILabel>(Op) ||
1474 isa<DIImportedEntity>(Op)),
1475 "invalid retained nodes, expected DILocalVariable, DILabel or "
1476 "DIImportedEntity",
1477 &N, Node, Op);
1478 }
1479 }
1481 "invalid reference flags", &N);
1482
1483 auto *Unit = N.getRawUnit();
1484 if (N.isDefinition()) {
1485 // Subprogram definitions (not part of the type hierarchy).
1486 CheckDI(N.isDistinct(), "subprogram definitions must be distinct", &N);
1487 CheckDI(Unit, "subprogram definitions must have a compile unit", &N);
1488 CheckDI(isa<DICompileUnit>(Unit), "invalid unit type", &N, Unit);
1489 // There's no good way to cross the CU boundary to insert a nested
1490 // DISubprogram definition in one CU into a type defined in another CU.
1491 auto *CT = dyn_cast_or_null<DICompositeType>(N.getRawScope());
1492 if (CT && CT->getRawIdentifier() &&
1493 M.getContext().isODRUniquingDebugTypes())
1494 CheckDI(N.getDeclaration(),
1495 "definition subprograms cannot be nested within DICompositeType "
1496 "when enabling ODR",
1497 &N);
1498 } else {
1499 // Subprogram declarations (part of the type hierarchy).
1500 CheckDI(!Unit, "subprogram declarations must not have a compile unit", &N);
1501 CheckDI(!N.getRawDeclaration(),
1502 "subprogram declaration must not have a declaration field");
1503 }
1504
1505 if (auto *RawThrownTypes = N.getRawThrownTypes()) {
1506 auto *ThrownTypes = dyn_cast<MDTuple>(RawThrownTypes);
1507 CheckDI(ThrownTypes, "invalid thrown types list", &N, RawThrownTypes);
1508 for (Metadata *Op : ThrownTypes->operands())
1509 CheckDI(Op && isa<DIType>(Op), "invalid thrown type", &N, ThrownTypes,
1510 Op);
1511 }
1512
1513 if (N.areAllCallsDescribed())
1514 CheckDI(N.isDefinition(),
1515 "DIFlagAllCallsDescribed must be attached to a definition");
1516}
1517
1518void Verifier::visitDILexicalBlockBase(const DILexicalBlockBase &N) {
1519 CheckDI(N.getTag() == dwarf::DW_TAG_lexical_block, "invalid tag", &N);
1520 CheckDI(N.getRawScope() && isa<DILocalScope>(N.getRawScope()),
1521 "invalid local scope", &N, N.getRawScope());
1522 if (auto *SP = dyn_cast<DISubprogram>(N.getRawScope()))
1523 CheckDI(SP->isDefinition(), "scope points into the type hierarchy", &N);
1524}
1525
1526void Verifier::visitDILexicalBlock(const DILexicalBlock &N) {
1527 visitDILexicalBlockBase(N);
1528
1529 CheckDI(N.getLine() || !N.getColumn(),
1530 "cannot have column info without line info", &N);
1531}
1532
1533void Verifier::visitDILexicalBlockFile(const DILexicalBlockFile &N) {
1534 visitDILexicalBlockBase(N);
1535}
1536
1537void Verifier::visitDICommonBlock(const DICommonBlock &N) {
1538 CheckDI(N.getTag() == dwarf::DW_TAG_common_block, "invalid tag", &N);
1539 if (auto *S = N.getRawScope())
1540 CheckDI(isa<DIScope>(S), "invalid scope ref", &N, S);
1541 if (auto *S = N.getRawDecl())
1542 CheckDI(isa<DIGlobalVariable>(S), "invalid declaration", &N, S);
1543}
1544
1545void Verifier::visitDINamespace(const DINamespace &N) {
1546 CheckDI(N.getTag() == dwarf::DW_TAG_namespace, "invalid tag", &N);
1547 if (auto *S = N.getRawScope())
1548 CheckDI(isa<DIScope>(S), "invalid scope ref", &N, S);
1549}
1550
1551void Verifier::visitDIMacro(const DIMacro &N) {
1552 CheckDI(N.getMacinfoType() == dwarf::DW_MACINFO_define ||
1553 N.getMacinfoType() == dwarf::DW_MACINFO_undef,
1554 "invalid macinfo type", &N);
1555 CheckDI(!N.getName().empty(), "anonymous macro", &N);
1556 if (!N.getValue().empty()) {
1557 assert(N.getValue().data()[0] != ' ' && "Macro value has a space prefix");
1558 }
1559}
1560
1561void Verifier::visitDIMacroFile(const DIMacroFile &N) {
1562 CheckDI(N.getMacinfoType() == dwarf::DW_MACINFO_start_file,
1563 "invalid macinfo type", &N);
1564 if (auto *F = N.getRawFile())
1565 CheckDI(isa<DIFile>(F), "invalid file", &N, F);
1566
1567 if (auto *Array = N.getRawElements()) {
1568 CheckDI(isa<MDTuple>(Array), "invalid macro list", &N, Array);
1569 for (Metadata *Op : N.getElements()->operands()) {
1570 CheckDI(Op && isa<DIMacroNode>(Op), "invalid macro ref", &N, Op);
1571 }
1572 }
1573}
1574
1575void Verifier::visitDIModule(const DIModule &N) {
1576 CheckDI(N.getTag() == dwarf::DW_TAG_module, "invalid tag", &N);
1577 CheckDI(!N.getName().empty(), "anonymous module", &N);
1578}
1579
1580void Verifier::visitDITemplateParameter(const DITemplateParameter &N) {
1581 CheckDI(isType(N.getRawType()), "invalid type ref", &N, N.getRawType());
1582}
1583
1584void Verifier::visitDITemplateTypeParameter(const DITemplateTypeParameter &N) {
1585 visitDITemplateParameter(N);
1586
1587 CheckDI(N.getTag() == dwarf::DW_TAG_template_type_parameter, "invalid tag",
1588 &N);
1589}
1590
1591void Verifier::visitDITemplateValueParameter(
1592 const DITemplateValueParameter &N) {
1593 visitDITemplateParameter(N);
1594
1595 CheckDI(N.getTag() == dwarf::DW_TAG_template_value_parameter ||
1596 N.getTag() == dwarf::DW_TAG_GNU_template_template_param ||
1597 N.getTag() == dwarf::DW_TAG_GNU_template_parameter_pack,
1598 "invalid tag", &N);
1599}
1600
1601void Verifier::visitDIVariable(const DIVariable &N) {
1602 if (auto *S = N.getRawScope())
1603 CheckDI(isa<DIScope>(S), "invalid scope", &N, S);
1604 if (auto *F = N.getRawFile())
1605 CheckDI(isa<DIFile>(F), "invalid file", &N, F);
1606}
1607
1608void Verifier::visitDIGlobalVariable(const DIGlobalVariable &N) {
1609 // Checks common to all variables.
1610 visitDIVariable(N);
1611
1612 CheckDI(N.getTag() == dwarf::DW_TAG_variable, "invalid tag", &N);
1613 CheckDI(isType(N.getRawType()), "invalid type ref", &N, N.getRawType());
1614 // Check only if the global variable is not an extern
1615 if (N.isDefinition())
1616 CheckDI(N.getType(), "missing global variable type", &N);
1617 if (auto *Member = N.getRawStaticDataMemberDeclaration()) {
1618 CheckDI(isa<DIDerivedType>(Member),
1619 "invalid static data member declaration", &N, Member);
1620 }
1621}
1622
1623void Verifier::visitDILocalVariable(const DILocalVariable &N) {
1624 // Checks common to all variables.
1625 visitDIVariable(N);
1626
1627 CheckDI(isType(N.getRawType()), "invalid type ref", &N, N.getRawType());
1628 CheckDI(N.getTag() == dwarf::DW_TAG_variable, "invalid tag", &N);
1629 CheckDI(N.getRawScope() && isa<DILocalScope>(N.getRawScope()),
1630 "local variable requires a valid scope", &N, N.getRawScope());
1631 if (auto Ty = N.getType())
1632 CheckDI(!isa<DISubroutineType>(Ty), "invalid type", &N, N.getType());
1633}
1634
1635void Verifier::visitDIAssignID(const DIAssignID &N) {
1636 CheckDI(!N.getNumOperands(), "DIAssignID has no arguments", &N);
1637 CheckDI(N.isDistinct(), "DIAssignID must be distinct", &N);
1638}
1639
1640void Verifier::visitDILabel(const DILabel &N) {
1641 if (auto *S = N.getRawScope())
1642 CheckDI(isa<DIScope>(S), "invalid scope", &N, S);
1643 if (auto *F = N.getRawFile())
1644 CheckDI(isa<DIFile>(F), "invalid file", &N, F);
1645
1646 CheckDI(N.getTag() == dwarf::DW_TAG_label, "invalid tag", &N);
1647 CheckDI(N.getRawScope() && isa<DILocalScope>(N.getRawScope()),
1648 "label requires a valid scope", &N, N.getRawScope());
1649}
1650
1651void Verifier::visitDIExpression(const DIExpression &N) {
1652 CheckDI(N.isValid(), "invalid expression", &N);
1653}
1654
1655void Verifier::visitDIGlobalVariableExpression(
1656 const DIGlobalVariableExpression &GVE) {
1657 CheckDI(GVE.getVariable(), "missing variable");
1658 if (auto *Var = GVE.getVariable())
1659 visitDIGlobalVariable(*Var);
1660 if (auto *Expr = GVE.getExpression()) {
1661 visitDIExpression(*Expr);
1662 if (auto Fragment = Expr->getFragmentInfo())
1663 verifyFragmentExpression(*GVE.getVariable(), *Fragment, &GVE);
1664 }
1665}
1666
1667void Verifier::visitDIObjCProperty(const DIObjCProperty &N) {
1668 CheckDI(N.getTag() == dwarf::DW_TAG_APPLE_property, "invalid tag", &N);
1669 if (auto *T = N.getRawType())
1670 CheckDI(isType(T), "invalid type ref", &N, T);
1671 if (auto *F = N.getRawFile())
1672 CheckDI(isa<DIFile>(F), "invalid file", &N, F);
1673}
1674
1675void Verifier::visitDIImportedEntity(const DIImportedEntity &N) {
1676 CheckDI(N.getTag() == dwarf::DW_TAG_imported_module ||
1677 N.getTag() == dwarf::DW_TAG_imported_declaration,
1678 "invalid tag", &N);
1679 if (auto *S = N.getRawScope())
1680 CheckDI(isa<DIScope>(S), "invalid scope for imported entity", &N, S);
1681 CheckDI(isDINode(N.getRawEntity()), "invalid imported entity", &N,
1682 N.getRawEntity());
1683}
1684
1685void Verifier::visitComdat(const Comdat &C) {
1686 // In COFF the Module is invalid if the GlobalValue has private linkage.
1687 // Entities with private linkage don't have entries in the symbol table.
1688 if (TT.isOSBinFormatCOFF())
1689 if (const GlobalValue *GV = M.getNamedValue(C.getName()))
1690 Check(!GV->hasPrivateLinkage(), "comdat global value has private linkage",
1691 GV);
1692}
1693
1694void Verifier::visitModuleIdents() {
1695 const NamedMDNode *Idents = M.getNamedMetadata("llvm.ident");
1696 if (!Idents)
1697 return;
1698
1699 // llvm.ident takes a list of metadata entry. Each entry has only one string.
1700 // Scan each llvm.ident entry and make sure that this requirement is met.
1701 for (const MDNode *N : Idents->operands()) {
1702 Check(N->getNumOperands() == 1,
1703 "incorrect number of operands in llvm.ident metadata", N);
1704 Check(dyn_cast_or_null<MDString>(N->getOperand(0)),
1705 ("invalid value for llvm.ident metadata entry operand"
1706 "(the operand should be a string)"),
1707 N->getOperand(0));
1708 }
1709}
1710
1711void Verifier::visitModuleCommandLines() {
1712 const NamedMDNode *CommandLines = M.getNamedMetadata("llvm.commandline");
1713 if (!CommandLines)
1714 return;
1715
1716 // llvm.commandline takes a list of metadata entry. Each entry has only one
1717 // string. Scan each llvm.commandline entry and make sure that this
1718 // requirement is met.
1719 for (const MDNode *N : CommandLines->operands()) {
1720 Check(N->getNumOperands() == 1,
1721 "incorrect number of operands in llvm.commandline metadata", N);
1722 Check(dyn_cast_or_null<MDString>(N->getOperand(0)),
1723 ("invalid value for llvm.commandline metadata entry operand"
1724 "(the operand should be a string)"),
1725 N->getOperand(0));
1726 }
1727}
1728
1729void Verifier::visitModuleFlags() {
1730 const NamedMDNode *Flags = M.getModuleFlagsMetadata();
1731 if (!Flags) return;
1732
1733 // Scan each flag, and track the flags and requirements.
1735 SmallVector<const MDNode*, 16> Requirements;
1736 uint64_t PAuthABIPlatform = -1;
1737 uint64_t PAuthABIVersion = -1;
1738 for (const MDNode *MDN : Flags->operands()) {
1739 visitModuleFlag(MDN, SeenIDs, Requirements);
1740 if (MDN->getNumOperands() != 3)
1741 continue;
1742 if (const auto *FlagName = dyn_cast_or_null<MDString>(MDN->getOperand(1))) {
1743 if (FlagName->getString() == "aarch64-elf-pauthabi-platform") {
1744 if (const auto *PAP =
1745 mdconst::dyn_extract_or_null<ConstantInt>(MDN->getOperand(2)))
1746 PAuthABIPlatform = PAP->getZExtValue();
1747 } else if (FlagName->getString() == "aarch64-elf-pauthabi-version") {
1748 if (const auto *PAV =
1749 mdconst::dyn_extract_or_null<ConstantInt>(MDN->getOperand(2)))
1750 PAuthABIVersion = PAV->getZExtValue();
1751 }
1752 }
1753 }
1754
1755 if ((PAuthABIPlatform == uint64_t(-1)) != (PAuthABIVersion == uint64_t(-1)))
1756 CheckFailed("either both or no 'aarch64-elf-pauthabi-platform' and "
1757 "'aarch64-elf-pauthabi-version' module flags must be present");
1758
1759 // Validate that the requirements in the module are valid.
1760 for (const MDNode *Requirement : Requirements) {
1761 const MDString *Flag = cast<MDString>(Requirement->getOperand(0));
1762 const Metadata *ReqValue = Requirement->getOperand(1);
1763
1764 const MDNode *Op = SeenIDs.lookup(Flag);
1765 if (!Op) {
1766 CheckFailed("invalid requirement on flag, flag is not present in module",
1767 Flag);
1768 continue;
1769 }
1770
1771 if (Op->getOperand(2) != ReqValue) {
1772 CheckFailed(("invalid requirement on flag, "
1773 "flag does not have the required value"),
1774 Flag);
1775 continue;
1776 }
1777 }
1778}
1779
1780void
1781Verifier::visitModuleFlag(const MDNode *Op,
1783 SmallVectorImpl<const MDNode *> &Requirements) {
1784 // Each module flag should have three arguments, the merge behavior (a
1785 // constant int), the flag ID (an MDString), and the value.
1786 Check(Op->getNumOperands() == 3,
1787 "incorrect number of operands in module flag", Op);
1789 if (!Module::isValidModFlagBehavior(Op->getOperand(0), MFB)) {
1790 Check(mdconst::dyn_extract_or_null<ConstantInt>(Op->getOperand(0)),
1791 "invalid behavior operand in module flag (expected constant integer)",
1792 Op->getOperand(0));
1793 Check(false,
1794 "invalid behavior operand in module flag (unexpected constant)",
1795 Op->getOperand(0));
1796 }
1797 MDString *ID = dyn_cast_or_null<MDString>(Op->getOperand(1));
1798 Check(ID, "invalid ID operand in module flag (expected metadata string)",
1799 Op->getOperand(1));
1800
1801 // Check the values for behaviors with additional requirements.
1802 switch (MFB) {
1803 case Module::Error:
1804 case Module::Warning:
1805 case Module::Override:
1806 // These behavior types accept any value.
1807 break;
1808
1809 case Module::Min: {
1810 auto *V = mdconst::dyn_extract_or_null<ConstantInt>(Op->getOperand(2));
1811 Check(V && V->getValue().isNonNegative(),
1812 "invalid value for 'min' module flag (expected constant non-negative "
1813 "integer)",
1814 Op->getOperand(2));
1815 break;
1816 }
1817
1818 case Module::Max: {
1819 Check(mdconst::dyn_extract_or_null<ConstantInt>(Op->getOperand(2)),
1820 "invalid value for 'max' module flag (expected constant integer)",
1821 Op->getOperand(2));
1822 break;
1823 }
1824
1825 case Module::Require: {
1826 // The value should itself be an MDNode with two operands, a flag ID (an
1827 // MDString), and a value.
1828 MDNode *Value = dyn_cast<MDNode>(Op->getOperand(2));
1829 Check(Value && Value->getNumOperands() == 2,
1830 "invalid value for 'require' module flag (expected metadata pair)",
1831 Op->getOperand(2));
1832 Check(isa<MDString>(Value->getOperand(0)),
1833 ("invalid value for 'require' module flag "
1834 "(first value operand should be a string)"),
1835 Value->getOperand(0));
1836
1837 // Append it to the list of requirements, to check once all module flags are
1838 // scanned.
1839 Requirements.push_back(Value);
1840 break;
1841 }
1842
1843 case Module::Append:
1844 case Module::AppendUnique: {
1845 // These behavior types require the operand be an MDNode.
1846 Check(isa<MDNode>(Op->getOperand(2)),
1847 "invalid value for 'append'-type module flag "
1848 "(expected a metadata node)",
1849 Op->getOperand(2));
1850 break;
1851 }
1852 }
1853
1854 // Unless this is a "requires" flag, check the ID is unique.
1855 if (MFB != Module::Require) {
1856 bool Inserted = SeenIDs.insert(std::make_pair(ID, Op)).second;
1857 Check(Inserted,
1858 "module flag identifiers must be unique (or of 'require' type)", ID);
1859 }
1860
1861 if (ID->getString() == "wchar_size") {
1863 = mdconst::dyn_extract_or_null<ConstantInt>(Op->getOperand(2));
1864 Check(Value, "wchar_size metadata requires constant integer argument");
1865 }
1866
1867 if (ID->getString() == "Linker Options") {
1868 // If the llvm.linker.options named metadata exists, we assume that the
1869 // bitcode reader has upgraded the module flag. Otherwise the flag might
1870 // have been created by a client directly.
1871 Check(M.getNamedMetadata("llvm.linker.options"),
1872 "'Linker Options' named metadata no longer supported");
1873 }
1874
1875 if (ID->getString() == "SemanticInterposition") {
1877 mdconst::dyn_extract_or_null<ConstantInt>(Op->getOperand(2));
1878 Check(Value,
1879 "SemanticInterposition metadata requires constant integer argument");
1880 }
1881
1882 if (ID->getString() == "CG Profile") {
1883 for (const MDOperand &MDO : cast<MDNode>(Op->getOperand(2))->operands())
1884 visitModuleFlagCGProfileEntry(MDO);
1885 }
1886}
1887
1888void Verifier::visitModuleFlagCGProfileEntry(const MDOperand &MDO) {
1889 auto CheckFunction = [&](const MDOperand &FuncMDO) {
1890 if (!FuncMDO)
1891 return;
1892 auto F = dyn_cast<ValueAsMetadata>(FuncMDO);
1893 Check(F && isa<Function>(F->getValue()->stripPointerCasts()),
1894 "expected a Function or null", FuncMDO);
1895 };
1896 auto Node = dyn_cast_or_null<MDNode>(MDO);
1897 Check(Node && Node->getNumOperands() == 3, "expected a MDNode triple", MDO);
1898 CheckFunction(Node->getOperand(0));
1899 CheckFunction(Node->getOperand(1));
1900 auto Count = dyn_cast_or_null<ConstantAsMetadata>(Node->getOperand(2));
1901 Check(Count && Count->getType()->isIntegerTy(),
1902 "expected an integer constant", Node->getOperand(2));
1903}
1904
1905void Verifier::verifyAttributeTypes(AttributeSet Attrs, const Value *V) {
1906 for (Attribute A : Attrs) {
1907
1908 if (A.isStringAttribute()) {
1909#define GET_ATTR_NAMES
1910#define ATTRIBUTE_ENUM(ENUM_NAME, DISPLAY_NAME)
1911#define ATTRIBUTE_STRBOOL(ENUM_NAME, DISPLAY_NAME) \
1912 if (A.getKindAsString() == #DISPLAY_NAME) { \
1913 auto V = A.getValueAsString(); \
1914 if (!(V.empty() || V == "true" || V == "false")) \
1915 CheckFailed("invalid value for '" #DISPLAY_NAME "' attribute: " + V + \
1916 ""); \
1917 }
1918
1919#include "llvm/IR/Attributes.inc"
1920 continue;
1921 }
1922
1923 if (A.isIntAttribute() != Attribute::isIntAttrKind(A.getKindAsEnum())) {
1924 CheckFailed("Attribute '" + A.getAsString() + "' should have an Argument",
1925 V);
1926 return;
1927 }
1928 }
1929}
1930
1931// VerifyParameterAttrs - Check the given attributes for an argument or return
1932// value of the specified type. The value V is printed in error messages.
1933void Verifier::verifyParameterAttrs(AttributeSet Attrs, Type *Ty,
1934 const Value *V) {
1935 if (!Attrs.hasAttributes())
1936 return;
1937
1938 verifyAttributeTypes(Attrs, V);
1939
1940 for (Attribute Attr : Attrs)
1941 Check(Attr.isStringAttribute() ||
1942 Attribute::canUseAsParamAttr(Attr.getKindAsEnum()),
1943 "Attribute '" + Attr.getAsString() + "' does not apply to parameters",
1944 V);
1945
1946 if (Attrs.hasAttribute(Attribute::ImmArg)) {
1947 Check(Attrs.getNumAttributes() == 1,
1948 "Attribute 'immarg' is incompatible with other attributes", V);
1949 }
1950
1951 // Check for mutually incompatible attributes. Only inreg is compatible with
1952 // sret.
1953 unsigned AttrCount = 0;
1954 AttrCount += Attrs.hasAttribute(Attribute::ByVal);
1955 AttrCount += Attrs.hasAttribute(Attribute::InAlloca);
1956 AttrCount += Attrs.hasAttribute(Attribute::Preallocated);
1957 AttrCount += Attrs.hasAttribute(Attribute::StructRet) ||
1958 Attrs.hasAttribute(Attribute::InReg);
1959 AttrCount += Attrs.hasAttribute(Attribute::Nest);
1960 AttrCount += Attrs.hasAttribute(Attribute::ByRef);
1961 Check(AttrCount <= 1,
1962 "Attributes 'byval', 'inalloca', 'preallocated', 'inreg', 'nest', "
1963 "'byref', and 'sret' are incompatible!",
1964 V);
1965
1966 Check(!(Attrs.hasAttribute(Attribute::InAlloca) &&
1967 Attrs.hasAttribute(Attribute::ReadOnly)),
1968 "Attributes "
1969 "'inalloca and readonly' are incompatible!",
1970 V);
1971
1972 Check(!(Attrs.hasAttribute(Attribute::StructRet) &&
1973 Attrs.hasAttribute(Attribute::Returned)),
1974 "Attributes "
1975 "'sret and returned' are incompatible!",
1976 V);
1977
1978 Check(!(Attrs.hasAttribute(Attribute::ZExt) &&
1979 Attrs.hasAttribute(Attribute::SExt)),
1980 "Attributes "
1981 "'zeroext and signext' are incompatible!",
1982 V);
1983
1984 Check(!(Attrs.hasAttribute(Attribute::ReadNone) &&
1985 Attrs.hasAttribute(Attribute::ReadOnly)),
1986 "Attributes "
1987 "'readnone and readonly' are incompatible!",
1988 V);
1989
1990 Check(!(Attrs.hasAttribute(Attribute::ReadNone) &&
1991 Attrs.hasAttribute(Attribute::WriteOnly)),
1992 "Attributes "
1993 "'readnone and writeonly' are incompatible!",
1994 V);
1995
1996 Check(!(Attrs.hasAttribute(Attribute::ReadOnly) &&
1997 Attrs.hasAttribute(Attribute::WriteOnly)),
1998 "Attributes "
1999 "'readonly and writeonly' are incompatible!",
2000 V);
2001
2002 Check(!(Attrs.hasAttribute(Attribute::NoInline) &&
2003 Attrs.hasAttribute(Attribute::AlwaysInline)),
2004 "Attributes "
2005 "'noinline and alwaysinline' are incompatible!",
2006 V);
2007
2008 Check(!(Attrs.hasAttribute(Attribute::Writable) &&
2009 Attrs.hasAttribute(Attribute::ReadNone)),
2010 "Attributes writable and readnone are incompatible!", V);
2011
2012 Check(!(Attrs.hasAttribute(Attribute::Writable) &&
2013 Attrs.hasAttribute(Attribute::ReadOnly)),
2014 "Attributes writable and readonly are incompatible!", V);
2015
2016 AttributeMask IncompatibleAttrs = AttributeFuncs::typeIncompatible(Ty, Attrs);
2017 for (Attribute Attr : Attrs) {
2018 if (!Attr.isStringAttribute() &&
2019 IncompatibleAttrs.contains(Attr.getKindAsEnum())) {
2020 CheckFailed("Attribute '" + Attr.getAsString() +
2021 "' applied to incompatible type!", V);
2022 return;
2023 }
2024 }
2025
2026 if (isa<PointerType>(Ty)) {
2027 if (Attrs.hasAttribute(Attribute::Alignment)) {
2028 Align AttrAlign = Attrs.getAlignment().valueOrOne();
2029 Check(AttrAlign.value() <= Value::MaximumAlignment,
2030 "huge alignment values are unsupported", V);
2031 }
2032 if (Attrs.hasAttribute(Attribute::ByVal)) {
2033 Type *ByValTy = Attrs.getByValType();
2034 SmallPtrSet<Type *, 4> Visited;
2035 Check(ByValTy->isSized(&Visited),
2036 "Attribute 'byval' does not support unsized types!", V);
2037 // Check if it is or contains a target extension type that disallows being
2038 // used on the stack.
2040 "'byval' argument has illegal target extension type", V);
2041 Check(DL.getTypeAllocSize(ByValTy).getKnownMinValue() < (1ULL << 32),
2042 "huge 'byval' arguments are unsupported", V);
2043 }
2044 if (Attrs.hasAttribute(Attribute::ByRef)) {
2045 SmallPtrSet<Type *, 4> Visited;
2046 Check(Attrs.getByRefType()->isSized(&Visited),
2047 "Attribute 'byref' does not support unsized types!", V);
2048 Check(DL.getTypeAllocSize(Attrs.getByRefType()).getKnownMinValue() <
2049 (1ULL << 32),
2050 "huge 'byref' arguments are unsupported", V);
2051 }
2052 if (Attrs.hasAttribute(Attribute::InAlloca)) {
2053 SmallPtrSet<Type *, 4> Visited;
2054 Check(Attrs.getInAllocaType()->isSized(&Visited),
2055 "Attribute 'inalloca' does not support unsized types!", V);
2056 Check(DL.getTypeAllocSize(Attrs.getInAllocaType()).getKnownMinValue() <
2057 (1ULL << 32),
2058 "huge 'inalloca' arguments are unsupported", V);
2059 }
2060 if (Attrs.hasAttribute(Attribute::Preallocated)) {
2061 SmallPtrSet<Type *, 4> Visited;
2062 Check(Attrs.getPreallocatedType()->isSized(&Visited),
2063 "Attribute 'preallocated' does not support unsized types!", V);
2064 Check(
2065 DL.getTypeAllocSize(Attrs.getPreallocatedType()).getKnownMinValue() <
2066 (1ULL << 32),
2067 "huge 'preallocated' arguments are unsupported", V);
2068 }
2069 }
2070
2071 if (Attrs.hasAttribute(Attribute::Initializes)) {
2072 auto Inits = Attrs.getAttribute(Attribute::Initializes).getInitializes();
2073 Check(!Inits.empty(), "Attribute 'initializes' does not support empty list",
2074 V);
2076 "Attribute 'initializes' does not support unordered ranges", V);
2077 }
2078
2079 if (Attrs.hasAttribute(Attribute::NoFPClass)) {
2080 uint64_t Val = Attrs.getAttribute(Attribute::NoFPClass).getValueAsInt();
2081 Check(Val != 0, "Attribute 'nofpclass' must have at least one test bit set",
2082 V);
2083 Check((Val & ~static_cast<unsigned>(fcAllFlags)) == 0,
2084 "Invalid value for 'nofpclass' test mask", V);
2085 }
2086 if (Attrs.hasAttribute(Attribute::Range)) {
2087 const ConstantRange &CR =
2088 Attrs.getAttribute(Attribute::Range).getValueAsConstantRange();
2090 "Range bit width must match type bit width!", V);
2091 }
2092}
2093
2094void Verifier::checkUnsignedBaseTenFuncAttr(AttributeList Attrs, StringRef Attr,
2095 const Value *V) {
2096 if (Attrs.hasFnAttr(Attr)) {
2097 StringRef S = Attrs.getFnAttr(Attr).getValueAsString();
2098 unsigned N;
2099 if (S.getAsInteger(10, N))
2100 CheckFailed("\"" + Attr + "\" takes an unsigned integer: " + S, V);
2101 }
2102}
2103
2104// Check parameter attributes against a function type.
2105// The value V is printed in error messages.
2106void Verifier::verifyFunctionAttrs(FunctionType *FT, AttributeList Attrs,
2107 const Value *V, bool IsIntrinsic,
2108 bool IsInlineAsm) {
2109 if (Attrs.isEmpty())
2110 return;
2111
2112 if (AttributeListsVisited.insert(Attrs.getRawPointer()).second) {
2113 Check(Attrs.hasParentContext(Context),
2114 "Attribute list does not match Module context!", &Attrs, V);
2115 for (const auto &AttrSet : Attrs) {
2116 Check(!AttrSet.hasAttributes() || AttrSet.hasParentContext(Context),
2117 "Attribute set does not match Module context!", &AttrSet, V);
2118 for (const auto &A : AttrSet) {
2119 Check(A.hasParentContext(Context),
2120 "Attribute does not match Module context!", &A, V);
2121 }
2122 }
2123 }
2124
2125 bool SawNest = false;
2126 bool SawReturned = false;
2127 bool SawSRet = false;
2128 bool SawSwiftSelf = false;
2129 bool SawSwiftAsync = false;
2130 bool SawSwiftError = false;
2131
2132 // Verify return value attributes.
2133 AttributeSet RetAttrs = Attrs.getRetAttrs();
2134 for (Attribute RetAttr : RetAttrs)
2135 Check(RetAttr.isStringAttribute() ||
2136 Attribute::canUseAsRetAttr(RetAttr.getKindAsEnum()),
2137 "Attribute '" + RetAttr.getAsString() +
2138 "' does not apply to function return values",
2139 V);
2140
2141 unsigned MaxParameterWidth = 0;
2142 auto GetMaxParameterWidth = [&MaxParameterWidth](Type *Ty) {
2143 if (Ty->isVectorTy()) {
2144 if (auto *VT = dyn_cast<FixedVectorType>(Ty)) {
2145 unsigned Size = VT->getPrimitiveSizeInBits().getFixedValue();
2146 if (Size > MaxParameterWidth)
2147 MaxParameterWidth = Size;
2148 }
2149 }
2150 };
2151 GetMaxParameterWidth(FT->getReturnType());
2152 verifyParameterAttrs(RetAttrs, FT->getReturnType(), V);
2153
2154 // Verify parameter attributes.
2155 for (unsigned i = 0, e = FT->getNumParams(); i != e; ++i) {
2156 Type *Ty = FT->getParamType(i);
2157 AttributeSet ArgAttrs = Attrs.getParamAttrs(i);
2158
2159 if (!IsIntrinsic) {
2160 Check(!ArgAttrs.hasAttribute(Attribute::ImmArg),
2161 "immarg attribute only applies to intrinsics", V);
2162 if (!IsInlineAsm)
2163 Check(!ArgAttrs.hasAttribute(Attribute::ElementType),
2164 "Attribute 'elementtype' can only be applied to intrinsics"
2165 " and inline asm.",
2166 V);
2167 }
2168
2169 verifyParameterAttrs(ArgAttrs, Ty, V);
2170 GetMaxParameterWidth(Ty);
2171
2172 if (ArgAttrs.hasAttribute(Attribute::Nest)) {
2173 Check(!SawNest, "More than one parameter has attribute nest!", V);
2174 SawNest = true;
2175 }
2176
2177 if (ArgAttrs.hasAttribute(Attribute::Returned)) {
2178 Check(!SawReturned, "More than one parameter has attribute returned!", V);
2179 Check(Ty->canLosslesslyBitCastTo(FT->getReturnType()),
2180 "Incompatible argument and return types for 'returned' attribute",
2181 V);
2182 SawReturned = true;
2183 }
2184
2185 if (ArgAttrs.hasAttribute(Attribute::StructRet)) {
2186 Check(!SawSRet, "Cannot have multiple 'sret' parameters!", V);
2187 Check(i == 0 || i == 1,
2188 "Attribute 'sret' is not on first or second parameter!", V);
2189 SawSRet = true;
2190 }
2191
2192 if (ArgAttrs.hasAttribute(Attribute::SwiftSelf)) {
2193 Check(!SawSwiftSelf, "Cannot have multiple 'swiftself' parameters!", V);
2194 SawSwiftSelf = true;
2195 }
2196
2197 if (ArgAttrs.hasAttribute(Attribute::SwiftAsync)) {
2198 Check(!SawSwiftAsync, "Cannot have multiple 'swiftasync' parameters!", V);
2199 SawSwiftAsync = true;
2200 }
2201
2202 if (ArgAttrs.hasAttribute(Attribute::SwiftError)) {
2203 Check(!SawSwiftError, "Cannot have multiple 'swifterror' parameters!", V);
2204 SawSwiftError = true;
2205 }
2206
2207 if (ArgAttrs.hasAttribute(Attribute::InAlloca)) {
2208 Check(i == FT->getNumParams() - 1,
2209 "inalloca isn't on the last parameter!", V);
2210 }
2211 }
2212
2213 if (!Attrs.hasFnAttrs())
2214 return;
2215
2216 verifyAttributeTypes(Attrs.getFnAttrs(), V);
2217 for (Attribute FnAttr : Attrs.getFnAttrs())
2218 Check(FnAttr.isStringAttribute() ||
2219 Attribute::canUseAsFnAttr(FnAttr.getKindAsEnum()),
2220 "Attribute '" + FnAttr.getAsString() +
2221 "' does not apply to functions!",
2222 V);
2223
2224 Check(!(Attrs.hasFnAttr(Attribute::NoInline) &&
2225 Attrs.hasFnAttr(Attribute::AlwaysInline)),
2226 "Attributes 'noinline and alwaysinline' are incompatible!", V);
2227
2228 if (Attrs.hasFnAttr(Attribute::OptimizeNone)) {
2229 Check(Attrs.hasFnAttr(Attribute::NoInline),
2230 "Attribute 'optnone' requires 'noinline'!", V);
2231
2232 Check(!Attrs.hasFnAttr(Attribute::OptimizeForSize),
2233 "Attributes 'optsize and optnone' are incompatible!", V);
2234
2235 Check(!Attrs.hasFnAttr(Attribute::MinSize),
2236 "Attributes 'minsize and optnone' are incompatible!", V);
2237
2238 Check(!Attrs.hasFnAttr(Attribute::OptimizeForDebugging),
2239 "Attributes 'optdebug and optnone' are incompatible!", V);
2240 }
2241
2242 Check(!(Attrs.hasFnAttr(Attribute::SanitizeRealtime) &&
2243 Attrs.hasFnAttr(Attribute::SanitizeRealtimeBlocking)),
2244 "Attributes "
2245 "'sanitize_realtime and sanitize_realtime_blocking' are incompatible!",
2246 V);
2247
2248 if (Attrs.hasFnAttr(Attribute::OptimizeForDebugging)) {
2249 Check(!Attrs.hasFnAttr(Attribute::OptimizeForSize),
2250 "Attributes 'optsize and optdebug' are incompatible!", V);
2251
2252 Check(!Attrs.hasFnAttr(Attribute::MinSize),
2253 "Attributes 'minsize and optdebug' are incompatible!", V);
2254 }
2255
2256 Check(!Attrs.hasAttrSomewhere(Attribute::Writable) ||
2257 isModSet(Attrs.getMemoryEffects().getModRef(IRMemLocation::ArgMem)),
2258 "Attribute writable and memory without argmem: write are incompatible!",
2259 V);
2260
2261 if (Attrs.hasFnAttr("aarch64_pstate_sm_enabled")) {
2262 Check(!Attrs.hasFnAttr("aarch64_pstate_sm_compatible"),
2263 "Attributes 'aarch64_pstate_sm_enabled and "
2264 "aarch64_pstate_sm_compatible' are incompatible!",
2265 V);
2266 }
2267
2268 Check((Attrs.hasFnAttr("aarch64_new_za") + Attrs.hasFnAttr("aarch64_in_za") +
2269 Attrs.hasFnAttr("aarch64_inout_za") +
2270 Attrs.hasFnAttr("aarch64_out_za") +
2271 Attrs.hasFnAttr("aarch64_preserves_za") +
2272 Attrs.hasFnAttr("aarch64_za_state_agnostic")) <= 1,
2273 "Attributes 'aarch64_new_za', 'aarch64_in_za', 'aarch64_out_za', "
2274 "'aarch64_inout_za', 'aarch64_preserves_za' and "
2275 "'aarch64_za_state_agnostic' are mutually exclusive",
2276 V);
2277
2278 Check((Attrs.hasFnAttr("aarch64_new_zt0") +
2279 Attrs.hasFnAttr("aarch64_in_zt0") +
2280 Attrs.hasFnAttr("aarch64_inout_zt0") +
2281 Attrs.hasFnAttr("aarch64_out_zt0") +
2282 Attrs.hasFnAttr("aarch64_preserves_zt0") +
2283 Attrs.hasFnAttr("aarch64_za_state_agnostic")) <= 1,
2284 "Attributes 'aarch64_new_zt0', 'aarch64_in_zt0', 'aarch64_out_zt0', "
2285 "'aarch64_inout_zt0', 'aarch64_preserves_zt0' and "
2286 "'aarch64_za_state_agnostic' are mutually exclusive",
2287 V);
2288
2289 if (Attrs.hasFnAttr(Attribute::JumpTable)) {
2290 const GlobalValue *GV = cast<GlobalValue>(V);
2292 "Attribute 'jumptable' requires 'unnamed_addr'", V);
2293 }
2294
2295 if (auto Args = Attrs.getFnAttrs().getAllocSizeArgs()) {
2296 auto CheckParam = [&](StringRef Name, unsigned ParamNo) {
2297 if (ParamNo >= FT->getNumParams()) {
2298 CheckFailed("'allocsize' " + Name + " argument is out of bounds", V);
2299 return false;
2300 }
2301
2302 if (!FT->getParamType(ParamNo)->isIntegerTy()) {
2303 CheckFailed("'allocsize' " + Name +
2304 " argument must refer to an integer parameter",
2305 V);
2306 return false;
2307 }
2308
2309 return true;
2310 };
2311
2312 if (!CheckParam("element size", Args->first))
2313 return;
2314
2315 if (Args->second && !CheckParam("number of elements", *Args->second))
2316 return;
2317 }
2318
2319 if (Attrs.hasFnAttr(Attribute::AllocKind)) {
2320 AllocFnKind K = Attrs.getAllocKind();
2323 if (!is_contained(
2325 Type))
2326 CheckFailed(
2327 "'allockind()' requires exactly one of alloc, realloc, and free");
2328 if ((Type == AllocFnKind::Free) &&
2331 CheckFailed("'allockind(\"free\")' doesn't allow uninitialized, zeroed, "
2332 "or aligned modifiers.");
2334 if ((K & ZeroedUninit) == ZeroedUninit)
2335 CheckFailed("'allockind()' can't be both zeroed and uninitialized");
2336 }
2337
2338 if (Attrs.hasFnAttr(Attribute::VScaleRange)) {
2339 unsigned VScaleMin = Attrs.getFnAttrs().getVScaleRangeMin();
2340 if (VScaleMin == 0)
2341 CheckFailed("'vscale_range' minimum must be greater than 0", V);
2342 else if (!isPowerOf2_32(VScaleMin))
2343 CheckFailed("'vscale_range' minimum must be power-of-two value", V);
2344 std::optional<unsigned> VScaleMax = Attrs.getFnAttrs().getVScaleRangeMax();
2345 if (VScaleMax && VScaleMin > VScaleMax)
2346 CheckFailed("'vscale_range' minimum cannot be greater than maximum", V);
2347 else if (VScaleMax && !isPowerOf2_32(*VScaleMax))
2348 CheckFailed("'vscale_range' maximum must be power-of-two value", V);
2349 }
2350
2351 if (Attrs.hasFnAttr("frame-pointer")) {
2352 StringRef FP = Attrs.getFnAttr("frame-pointer").getValueAsString();
2353 if (FP != "all" && FP != "non-leaf" && FP != "none" && FP != "reserved")
2354 CheckFailed("invalid value for 'frame-pointer' attribute: " + FP, V);
2355 }
2356
2357 // Check EVEX512 feature.
2358 if (MaxParameterWidth >= 512 && Attrs.hasFnAttr("target-features") &&
2359 TT.isX86()) {
2360 StringRef TF = Attrs.getFnAttr("target-features").getValueAsString();
2361 Check(!TF.contains("+avx512f") || !TF.contains("-evex512"),
2362 "512-bit vector arguments require 'evex512' for AVX512", V);
2363 }
2364
2365 checkUnsignedBaseTenFuncAttr(Attrs, "patchable-function-prefix", V);
2366 checkUnsignedBaseTenFuncAttr(Attrs, "patchable-function-entry", V);
2367 checkUnsignedBaseTenFuncAttr(Attrs, "warn-stack-size", V);
2368
2369 if (auto A = Attrs.getFnAttr("sign-return-address"); A.isValid()) {
2370 StringRef S = A.getValueAsString();
2371 if (S != "none" && S != "all" && S != "non-leaf")
2372 CheckFailed("invalid value for 'sign-return-address' attribute: " + S, V);
2373 }
2374
2375 if (auto A = Attrs.getFnAttr("sign-return-address-key"); A.isValid()) {
2376 StringRef S = A.getValueAsString();
2377 if (S != "a_key" && S != "b_key")
2378 CheckFailed("invalid value for 'sign-return-address-key' attribute: " + S,
2379 V);
2380 if (auto AA = Attrs.getFnAttr("sign-return-address"); !AA.isValid()) {
2381 CheckFailed(
2382 "'sign-return-address-key' present without `sign-return-address`");
2383 }
2384 }
2385
2386 if (auto A = Attrs.getFnAttr("branch-target-enforcement"); A.isValid()) {
2387 StringRef S = A.getValueAsString();
2388 if (S != "" && S != "true" && S != "false")
2389 CheckFailed(
2390 "invalid value for 'branch-target-enforcement' attribute: " + S, V);
2391 }
2392
2393 if (auto A = Attrs.getFnAttr("branch-protection-pauth-lr"); A.isValid()) {
2394 StringRef S = A.getValueAsString();
2395 if (S != "" && S != "true" && S != "false")
2396 CheckFailed(
2397 "invalid value for 'branch-protection-pauth-lr' attribute: " + S, V);
2398 }
2399
2400 if (auto A = Attrs.getFnAttr("guarded-control-stack"); A.isValid()) {
2401 StringRef S = A.getValueAsString();
2402 if (S != "" && S != "true" && S != "false")
2403 CheckFailed("invalid value for 'guarded-control-stack' attribute: " + S,
2404 V);
2405 }
2406
2407 if (auto A = Attrs.getFnAttr("vector-function-abi-variant"); A.isValid()) {
2408 StringRef S = A.getValueAsString();
2409 const std::optional<VFInfo> Info = VFABI::tryDemangleForVFABI(S, FT);
2410 if (!Info)
2411 CheckFailed("invalid name for a VFABI variant: " + S, V);
2412 }
2413
2414 if (auto A = Attrs.getFnAttr("denormal-fp-math"); A.isValid()) {
2415 StringRef S = A.getValueAsString();
2417 CheckFailed("invalid value for 'denormal-fp-math' attribute: " + S, V);
2418 }
2419
2420 if (auto A = Attrs.getFnAttr("denormal-fp-math-f32"); A.isValid()) {
2421 StringRef S = A.getValueAsString();
2423 CheckFailed("invalid value for 'denormal-fp-math-f32' attribute: " + S,
2424 V);
2425 }
2426}
2427
2428void Verifier::verifyFunctionMetadata(
2429 ArrayRef<std::pair<unsigned, MDNode *>> MDs) {
2430 for (const auto &Pair : MDs) {
2431 if (Pair.first == LLVMContext::MD_prof) {
2432 MDNode *MD = Pair.second;
2433 Check(MD->getNumOperands() >= 2,
2434 "!prof annotations should have no less than 2 operands", MD);
2435
2436 // Check first operand.
2437 Check(MD->getOperand(0) != nullptr, "first operand should not be null",
2438 MD);
2439 Check(isa<MDString>(MD->getOperand(0)),
2440 "expected string with name of the !prof annotation", MD);
2441 MDString *MDS = cast<MDString>(MD->getOperand(0));
2442 StringRef ProfName = MDS->getString();
2443 Check(ProfName == "function_entry_count" ||
2444 ProfName == "synthetic_function_entry_count",
2445 "first operand should be 'function_entry_count'"
2446 " or 'synthetic_function_entry_count'",
2447 MD);
2448
2449 // Check second operand.
2450 Check(MD->getOperand(1) != nullptr, "second operand should not be null",
2451 MD);
2452 Check(isa<ConstantAsMetadata>(MD->getOperand(1)),
2453 "expected integer argument to function_entry_count", MD);
2454 } else if (Pair.first == LLVMContext::MD_kcfi_type) {
2455 MDNode *MD = Pair.second;
2456 Check(MD->getNumOperands() == 1,
2457 "!kcfi_type must have exactly one operand", MD);
2458 Check(MD->getOperand(0) != nullptr, "!kcfi_type operand must not be null",
2459 MD);
2460 Check(isa<ConstantAsMetadata>(MD->getOperand(0)),
2461 "expected a constant operand for !kcfi_type", MD);
2462 Constant *C = cast<ConstantAsMetadata>(MD->getOperand(0))->getValue();
2463 Check(isa<ConstantInt>(C) && isa<IntegerType>(C->getType()),
2464 "expected a constant integer operand for !kcfi_type", MD);
2465 Check(cast<ConstantInt>(C)->getBitWidth() == 32,
2466 "expected a 32-bit integer constant operand for !kcfi_type", MD);
2467 }
2468 }
2469}
2470
2471void Verifier::visitConstantExprsRecursively(const Constant *EntryC) {
2472 if (!ConstantExprVisited.insert(EntryC).second)
2473 return;
2474
2476 Stack.push_back(EntryC);
2477
2478 while (!Stack.empty()) {
2479 const Constant *C = Stack.pop_back_val();
2480
2481 // Check this constant expression.
2482 if (const auto *CE = dyn_cast<ConstantExpr>(C))
2483 visitConstantExpr(CE);
2484
2485 if (const auto *CPA = dyn_cast<ConstantPtrAuth>(C))
2486 visitConstantPtrAuth(CPA);
2487
2488 if (const auto *GV = dyn_cast<GlobalValue>(C)) {
2489 // Global Values get visited separately, but we do need to make sure
2490 // that the global value is in the correct module
2491 Check(GV->getParent() == &M, "Referencing global in another module!",
2492 EntryC, &M, GV, GV->getParent());
2493 continue;
2494 }
2495
2496 // Visit all sub-expressions.
2497 for (const Use &U : C->operands()) {
2498 const auto *OpC = dyn_cast<Constant>(U);
2499 if (!OpC)
2500 continue;
2501 if (!ConstantExprVisited.insert(OpC).second)
2502 continue;
2503 Stack.push_back(OpC);
2504 }
2505 }
2506}
2507
2508void Verifier::visitConstantExpr(const ConstantExpr *CE) {
2509 if (CE->getOpcode() == Instruction::BitCast)
2510 Check(CastInst::castIsValid(Instruction::BitCast, CE->getOperand(0),
2511 CE->getType()),
2512 "Invalid bitcast", CE);
2513}
2514
2515void Verifier::visitConstantPtrAuth(const ConstantPtrAuth *CPA) {
2516 Check(CPA->getPointer()->getType()->isPointerTy(),
2517 "signed ptrauth constant base pointer must have pointer type");
2518
2519 Check(CPA->getType() == CPA->getPointer()->getType(),
2520 "signed ptrauth constant must have same type as its base pointer");
2521
2522 Check(CPA->getKey()->getBitWidth() == 32,
2523 "signed ptrauth constant key must be i32 constant integer");
2524
2526 "signed ptrauth constant address discriminator must be a pointer");
2527
2528 Check(CPA->getDiscriminator()->getBitWidth() == 64,
2529 "signed ptrauth constant discriminator must be i64 constant integer");
2530}
2531
2532bool Verifier::verifyAttributeCount(AttributeList Attrs, unsigned Params) {
2533 // There shouldn't be more attribute sets than there are parameters plus the
2534 // function and return value.
2535 return Attrs.getNumAttrSets() <= Params + 2;
2536}
2537
2538void Verifier::verifyInlineAsmCall(const CallBase &Call) {
2539 const InlineAsm *IA = cast<InlineAsm>(Call.getCalledOperand());
2540 unsigned ArgNo = 0;
2541 unsigned LabelNo = 0;
2542 for (const InlineAsm::ConstraintInfo &CI : IA->ParseConstraints()) {
2543 if (CI.Type == InlineAsm::isLabel) {
2544 ++LabelNo;
2545 continue;
2546 }
2547
2548 // Only deal with constraints that correspond to call arguments.
2549 if (!CI.hasArg())
2550 continue;
2551
2552 if (CI.isIndirect) {
2553 const Value *Arg = Call.getArgOperand(ArgNo);
2554 Check(Arg->getType()->isPointerTy(),
2555 "Operand for indirect constraint must have pointer type", &Call);
2556
2557 Check(Call.getParamElementType(ArgNo),
2558 "Operand for indirect constraint must have elementtype attribute",
2559 &Call);
2560 } else {
2561 Check(!Call.paramHasAttr(ArgNo, Attribute::ElementType),
2562 "Elementtype attribute can only be applied for indirect "
2563 "constraints",
2564 &Call);
2565 }
2566
2567 ArgNo++;
2568 }
2569
2570 if (auto *CallBr = dyn_cast<CallBrInst>(&Call)) {
2571 Check(LabelNo == CallBr->getNumIndirectDests(),
2572 "Number of label constraints does not match number of callbr dests",
2573 &Call);
2574 } else {
2575 Check(LabelNo == 0, "Label constraints can only be used with callbr",
2576 &Call);
2577 }
2578}
2579
2580/// Verify that statepoint intrinsic is well formed.
2581void Verifier::verifyStatepoint(const CallBase &Call) {
2582 assert(Call.getCalledFunction() &&
2583 Call.getCalledFunction()->getIntrinsicID() ==
2584 Intrinsic::experimental_gc_statepoint);
2585
2586 Check(!Call.doesNotAccessMemory() && !Call.onlyReadsMemory() &&
2587 !Call.onlyAccessesArgMemory(),
2588 "gc.statepoint must read and write all memory to preserve "
2589 "reordering restrictions required by safepoint semantics",
2590 Call);
2591
2592 const int64_t NumPatchBytes =
2593 cast<ConstantInt>(Call.getArgOperand(1))->getSExtValue();
2594 assert(isInt<32>(NumPatchBytes) && "NumPatchBytesV is an i32!");
2595 Check(NumPatchBytes >= 0,
2596 "gc.statepoint number of patchable bytes must be "
2597 "positive",
2598 Call);
2599
2600 Type *TargetElemType = Call.getParamElementType(2);
2601 Check(TargetElemType,
2602 "gc.statepoint callee argument must have elementtype attribute", Call);
2603 FunctionType *TargetFuncType = dyn_cast<FunctionType>(TargetElemType);
2604 Check(TargetFuncType,
2605 "gc.statepoint callee elementtype must be function type", Call);
2606
2607 const int NumCallArgs = cast<ConstantInt>(Call.getArgOperand(3))->getZExtValue();
2608 Check(NumCallArgs >= 0,
2609 "gc.statepoint number of arguments to underlying call "
2610 "must be positive",
2611 Call);
2612 const int NumParams = (int)TargetFuncType->getNumParams();
2613 if (TargetFuncType->isVarArg()) {
2614 Check(NumCallArgs >= NumParams,
2615 "gc.statepoint mismatch in number of vararg call args", Call);
2616
2617 // TODO: Remove this limitation
2618 Check(TargetFuncType->getReturnType()->isVoidTy(),
2619 "gc.statepoint doesn't support wrapping non-void "
2620 "vararg functions yet",
2621 Call);
2622 } else
2623 Check(NumCallArgs == NumParams,
2624 "gc.statepoint mismatch in number of call args", Call);
2625
2626 const uint64_t Flags
2627 = cast<ConstantInt>(Call.getArgOperand(4))->getZExtValue();
2628 Check((Flags & ~(uint64_t)StatepointFlags::MaskAll) == 0,
2629 "unknown flag used in gc.statepoint flags argument", Call);
2630
2631 // Verify that the types of the call parameter arguments match
2632 // the type of the wrapped callee.
2633 AttributeList Attrs = Call.getAttributes();
2634 for (int i = 0; i < NumParams; i++) {
2635 Type *ParamType = TargetFuncType->getParamType(i);
2636 Type *ArgType = Call.getArgOperand(5 + i)->getType();
2637 Check(ArgType == ParamType,
2638 "gc.statepoint call argument does not match wrapped "
2639 "function type",
2640 Call);
2641
2642 if (TargetFuncType->isVarArg()) {
2643 AttributeSet ArgAttrs = Attrs.getParamAttrs(5 + i);
2644 Check(!ArgAttrs.hasAttribute(Attribute::StructRet),
2645 "Attribute 'sret' cannot be used for vararg call arguments!", Call);
2646 }
2647 }
2648
2649 const int EndCallArgsInx = 4 + NumCallArgs;
2650
2651 const Value *NumTransitionArgsV = Call.getArgOperand(EndCallArgsInx + 1);
2652 Check(isa<ConstantInt>(NumTransitionArgsV),
2653 "gc.statepoint number of transition arguments "
2654 "must be constant integer",
2655 Call);
2656 const int NumTransitionArgs =
2657 cast<ConstantInt>(NumTransitionArgsV)->getZExtValue();
2658 Check(NumTransitionArgs == 0,
2659 "gc.statepoint w/inline transition bundle is deprecated", Call);
2660 const int EndTransitionArgsInx = EndCallArgsInx + 1 + NumTransitionArgs;
2661
2662 const Value *NumDeoptArgsV = Call.getArgOperand(EndTransitionArgsInx + 1);
2663 Check(isa<ConstantInt>(NumDeoptArgsV),
2664 "gc.statepoint number of deoptimization arguments "
2665 "must be constant integer",
2666 Call);
2667 const int NumDeoptArgs = cast<ConstantInt>(NumDeoptArgsV)->getZExtValue();
2668 Check(NumDeoptArgs == 0,
2669 "gc.statepoint w/inline deopt operands is deprecated", Call);
2670
2671 const int ExpectedNumArgs = 7 + NumCallArgs;
2672 Check(ExpectedNumArgs == (int)Call.arg_size(),
2673 "gc.statepoint too many arguments", Call);
2674
2675 // Check that the only uses of this gc.statepoint are gc.result or
2676 // gc.relocate calls which are tied to this statepoint and thus part
2677 // of the same statepoint sequence
2678 for (const User *U : Call.users()) {
2679 const CallInst *UserCall = dyn_cast<const CallInst>(U);
2680 Check(UserCall, "illegal use of statepoint token", Call, U);
2681 if (!UserCall)
2682 continue;
2683 Check(isa<GCRelocateInst>(UserCall) || isa<GCResultInst>(UserCall),
2684 "gc.result or gc.relocate are the only value uses "
2685 "of a gc.statepoint",
2686 Call, U);
2687 if (isa<GCResultInst>(UserCall)) {
2688 Check(UserCall->getArgOperand(0) == &Call,
2689 "gc.result connected to wrong gc.statepoint", Call, UserCall);
2690 } else if (isa<GCRelocateInst>(Call)) {
2691 Check(UserCall->getArgOperand(0) == &Call,
2692 "gc.relocate connected to wrong gc.statepoint", Call, UserCall);
2693 }
2694 }
2695
2696 // Note: It is legal for a single derived pointer to be listed multiple
2697 // times. It's non-optimal, but it is legal. It can also happen after
2698 // insertion if we strip a bitcast away.
2699 // Note: It is really tempting to check that each base is relocated and
2700 // that a derived pointer is never reused as a base pointer. This turns
2701 // out to be problematic since optimizations run after safepoint insertion
2702 // can recognize equality properties that the insertion logic doesn't know
2703 // about. See example statepoint.ll in the verifier subdirectory
2704}
2705
2706void Verifier::verifyFrameRecoverIndices() {
2707 for (auto &Counts : FrameEscapeInfo) {
2708 Function *F = Counts.first;
2709 unsigned EscapedObjectCount = Counts.second.first;
2710 unsigned MaxRecoveredIndex = Counts.second.second;
2711 Check(MaxRecoveredIndex <= EscapedObjectCount,
2712 "all indices passed to llvm.localrecover must be less than the "
2713 "number of arguments passed to llvm.localescape in the parent "
2714 "function",
2715 F);
2716 }
2717}
2718
2719static Instruction *getSuccPad(Instruction *Terminator) {
2720 BasicBlock *UnwindDest;
2721 if (auto *II = dyn_cast<InvokeInst>(Terminator))
2722 UnwindDest = II->getUnwindDest();
2723 else if (auto *CSI = dyn_cast<CatchSwitchInst>(Terminator))
2724 UnwindDest = CSI->getUnwindDest();
2725 else
2726 UnwindDest = cast<CleanupReturnInst>(Terminator)->getUnwindDest();
2727 return UnwindDest->getFirstNonPHI();
2728}
2729
2730void Verifier::verifySiblingFuncletUnwinds() {
2733 for (const auto &Pair : SiblingFuncletInfo) {
2734 Instruction *PredPad = Pair.first;
2735 if (Visited.count(PredPad))
2736 continue;
2737 Active.insert(PredPad);
2738 Instruction *Terminator = Pair.second;
2739 do {
2740 Instruction *SuccPad = getSuccPad(Terminator);
2741 if (Active.count(SuccPad)) {
2742 // Found a cycle; report error
2743 Instruction *CyclePad = SuccPad;
2745 do {
2746 CycleNodes.push_back(CyclePad);
2747 Instruction *CycleTerminator = SiblingFuncletInfo[CyclePad];
2748 if (CycleTerminator != CyclePad)
2749 CycleNodes.push_back(CycleTerminator);
2750 CyclePad = getSuccPad(CycleTerminator);
2751 } while (CyclePad != SuccPad);
2752 Check(false, "EH pads can't handle each other's exceptions",
2753 ArrayRef<Instruction *>(CycleNodes));
2754 }
2755 // Don't re-walk a node we've already checked
2756 if (!Visited.insert(SuccPad).second)
2757 break;
2758 // Walk to this successor if it has a map entry.
2759 PredPad = SuccPad;
2760 auto TermI = SiblingFuncletInfo.find(PredPad);
2761 if (TermI == SiblingFuncletInfo.end())
2762 break;
2763 Terminator = TermI->second;
2764 Active.insert(PredPad);
2765 } while (true);
2766 // Each node only has one successor, so we've walked all the active
2767 // nodes' successors.
2768 Active.clear();
2769 }
2770}
2771
2772// visitFunction - Verify that a function is ok.
2773//
2774void Verifier::visitFunction(const Function &F) {
2775 visitGlobalValue(F);
2776
2777 // Check function arguments.
2778 FunctionType *FT = F.getFunctionType();
2779 unsigned NumArgs = F.arg_size();
2780
2781 Check(&Context == &F.getContext(),
2782 "Function context does not match Module context!", &F);
2783
2784 Check(!F.hasCommonLinkage(), "Functions may not have common linkage", &F);
2785 Check(FT->getNumParams() == NumArgs,
2786 "# formal arguments must match # of arguments for function type!", &F,
2787 FT);
2788 Check(F.getReturnType()->isFirstClassType() ||
2789 F.getReturnType()->isVoidTy() || F.getReturnType()->isStructTy(),
2790 "Functions cannot return aggregate values!", &F);
2791
2792 Check(!F.hasStructRetAttr() || F.getReturnType()->isVoidTy(),
2793 "Invalid struct return type!", &F);
2794
2795 AttributeList Attrs = F.getAttributes();
2796
2797 Check(verifyAttributeCount(Attrs, FT->getNumParams()),
2798 "Attribute after last parameter!", &F);
2799
2800 CheckDI(F.IsNewDbgInfoFormat == F.getParent()->IsNewDbgInfoFormat,
2801 "Function debug format should match parent module", &F,
2802 F.IsNewDbgInfoFormat, F.getParent(),
2803 F.getParent()->IsNewDbgInfoFormat);
2804
2805 bool IsIntrinsic = F.isIntrinsic();
2806
2807 // Check function attributes.
2808 verifyFunctionAttrs(FT, Attrs, &F, IsIntrinsic, /* IsInlineAsm */ false);
2809
2810 // On function declarations/definitions, we do not support the builtin
2811 // attribute. We do not check this in VerifyFunctionAttrs since that is
2812 // checking for Attributes that can/can not ever be on functions.
2813 Check(!Attrs.hasFnAttr(Attribute::Builtin),
2814 "Attribute 'builtin' can only be applied to a callsite.", &F);
2815
2816 Check(!Attrs.hasAttrSomewhere(Attribute::ElementType),
2817 "Attribute 'elementtype' can only be applied to a callsite.", &F);
2818
2819 if (Attrs.hasFnAttr(Attribute::Naked))
2820 for (const Argument &Arg : F.args())
2821 Check(Arg.use_empty(), "cannot use argument of naked function", &Arg);
2822
2823 // Check that this function meets the restrictions on this calling convention.
2824 // Sometimes varargs is used for perfectly forwarding thunks, so some of these
2825 // restrictions can be lifted.
2826 switch (F.getCallingConv()) {
2827 default:
2828 case CallingConv::C:
2829 break;
2830 case CallingConv::X86_INTR: {
2831 Check(F.arg_empty() || Attrs.hasParamAttr(0, Attribute::ByVal),
2832 "Calling convention parameter requires byval", &F);
2833 break;
2834 }
2839 Check(F.getReturnType()->isVoidTy(),
2840 "Calling convention requires void return type", &F);
2841 [[fallthrough]];
2847 Check(!F.hasStructRetAttr(), "Calling convention does not allow sret", &F);
2848 if (F.getCallingConv() != CallingConv::SPIR_KERNEL) {
2849 const unsigned StackAS = DL.getAllocaAddrSpace();
2850 unsigned i = 0;
2851 for (const Argument &Arg : F.args()) {
2852 Check(!Attrs.hasParamAttr(i, Attribute::ByVal),
2853 "Calling convention disallows byval", &F);
2854 Check(!Attrs.hasParamAttr(i, Attribute::Preallocated),
2855 "Calling convention disallows preallocated", &F);
2856 Check(!Attrs.hasParamAttr(i, Attribute::InAlloca),
2857 "Calling convention disallows inalloca", &F);
2858
2859 if (Attrs.hasParamAttr(i, Attribute::ByRef)) {
2860 // FIXME: Should also disallow LDS and GDS, but we don't have the enum
2861 // value here.
2862 Check(Arg.getType()->getPointerAddressSpace() != StackAS,
2863 "Calling convention disallows stack byref", &F);
2864 }
2865
2866 ++i;
2867 }
2868 }
2869
2870 [[fallthrough]];
2871 case CallingConv::Fast:
2872 case CallingConv::Cold:
2876 Check(!F.isVarArg(),
2877 "Calling convention does not support varargs or "
2878 "perfect forwarding!",
2879 &F);
2880 break;
2881 }
2882
2883 // Check that the argument values match the function type for this function...
2884 unsigned i = 0;
2885 for (const Argument &Arg : F.args()) {
2886 Check(Arg.getType() == FT->getParamType(i),
2887 "Argument value does not match function argument type!", &Arg,
2888 FT->getParamType(i));
2889 Check(Arg.getType()->isFirstClassType(),
2890 "Function arguments must have first-class types!", &Arg);
2891 if (!IsIntrinsic) {
2892 Check(!Arg.getType()->isMetadataTy(),
2893 "Function takes metadata but isn't an intrinsic", &Arg, &F);
2894 Check(!Arg.getType()->isTokenTy(),
2895 "Function takes token but isn't an intrinsic", &Arg, &F);
2896 Check(!Arg.getType()->isX86_AMXTy(),
2897 "Function takes x86_amx but isn't an intrinsic", &Arg, &F);
2898 }
2899
2900 // Check that swifterror argument is only used by loads and stores.
2901 if (Attrs.hasParamAttr(i, Attribute::SwiftError)) {
2902 verifySwiftErrorValue(&Arg);
2903 }
2904 ++i;
2905 }
2906
2907 if (!IsIntrinsic) {
2908 Check(!F.getReturnType()->isTokenTy(),
2909 "Function returns a token but isn't an intrinsic", &F);
2910 Check(!F.getReturnType()->isX86_AMXTy(),
2911 "Function returns a x86_amx but isn't an intrinsic", &F);
2912 }
2913
2914 // Get the function metadata attachments.
2916 F.getAllMetadata(MDs);
2917 assert(F.hasMetadata() != MDs.empty() && "Bit out-of-sync");
2918 verifyFunctionMetadata(MDs);
2919
2920 // Check validity of the personality function
2921 if (F.hasPersonalityFn()) {
2922 auto *Per = dyn_cast<Function>(F.getPersonalityFn()->stripPointerCasts());
2923 if (Per)
2924 Check(Per->getParent() == F.getParent(),
2925 "Referencing personality function in another module!", &F,
2926 F.getParent(), Per, Per->getParent());
2927 }
2928
2929 // EH funclet coloring can be expensive, recompute on-demand
2930 BlockEHFuncletColors.clear();
2931
2932 if (F.isMaterializable()) {
2933 // Function has a body somewhere we can't see.
2934 Check(MDs.empty(), "unmaterialized function cannot have metadata", &F,
2935 MDs.empty() ? nullptr : MDs.front().second);
2936 } else if (F.isDeclaration()) {
2937 for (const auto &I : MDs) {
2938 // This is used for call site debug information.
2939 CheckDI(I.first != LLVMContext::MD_dbg ||
2940 !cast<DISubprogram>(I.second)->isDistinct(),
2941 "function declaration may only have a unique !dbg attachment",
2942 &F);
2943 Check(I.first != LLVMContext::MD_prof,
2944 "function declaration may not have a !prof attachment", &F);
2945
2946 // Verify the metadata itself.
2947 visitMDNode(*I.second, AreDebugLocsAllowed::Yes);
2948 }
2949 Check(!F.hasPersonalityFn(),
2950 "Function declaration shouldn't have a personality routine", &F);
2951 } else {
2952 // Verify that this function (which has a body) is not named "llvm.*". It
2953 // is not legal to define intrinsics.
2954 Check(!IsIntrinsic, "llvm intrinsics cannot be defined!", &F);
2955
2956 // Check the entry node
2957 const BasicBlock *Entry = &F.getEntryBlock();
2958 Check(pred_empty(Entry),
2959 "Entry block to function must not have predecessors!", Entry);
2960
2961 // The address of the entry block cannot be taken, unless it is dead.
2962 if (Entry->hasAddressTaken()) {
2963 Check(!BlockAddress::lookup(Entry)->isConstantUsed(),
2964 "blockaddress may not be used with the entry block!", Entry);
2965 }
2966
2967 unsigned NumDebugAttachments = 0, NumProfAttachments = 0,
2968 NumKCFIAttachments = 0;
2969 // Visit metadata attachments.
2970 for (const auto &I : MDs) {
2971 // Verify that the attachment is legal.
2972 auto AllowLocs = AreDebugLocsAllowed::No;
2973 switch (I.first) {
2974 default:
2975 break;
2976 case LLVMContext::MD_dbg: {
2977 ++NumDebugAttachments;
2978 CheckDI(NumDebugAttachments == 1,
2979 "function must have a single !dbg attachment", &F, I.second);
2980 CheckDI(isa<DISubprogram>(I.second),
2981 "function !dbg attachment must be a subprogram", &F, I.second);
2982 CheckDI(cast<DISubprogram>(I.second)->isDistinct(),
2983 "function definition may only have a distinct !dbg attachment",
2984 &F);
2985
2986 auto *SP = cast<DISubprogram>(I.second);
2987 const Function *&AttachedTo = DISubprogramAttachments[SP];
2988 CheckDI(!AttachedTo || AttachedTo == &F,
2989 "DISubprogram attached to more than one function", SP, &F);
2990 AttachedTo = &F;
2991 AllowLocs = AreDebugLocsAllowed::Yes;
2992 break;
2993 }
2994 case LLVMContext::MD_prof:
2995 ++NumProfAttachments;
2996 Check(NumProfAttachments == 1,
2997 "function must have a single !prof attachment", &F, I.second);
2998 break;
2999 case LLVMContext::MD_kcfi_type:
3000 ++NumKCFIAttachments;
3001 Check(NumKCFIAttachments == 1,
3002 "function must have a single !kcfi_type attachment", &F,
3003 I.second);
3004 break;
3005 }
3006
3007 // Verify the metadata itself.
3008 visitMDNode(*I.second, AllowLocs);
3009 }
3010 }
3011
3012 // If this function is actually an intrinsic, verify that it is only used in
3013 // direct call/invokes, never having its "address taken".
3014 // Only do this if the module is materialized, otherwise we don't have all the
3015 // uses.
3016 if (F.isIntrinsic() && F.getParent()->isMaterialized()) {
3017 const User *U;
3018 if (F.hasAddressTaken(&U, false, true, false,
3019 /*IgnoreARCAttachedCall=*/true))
3020 Check(false, "Invalid user of intrinsic instruction!", U);
3021 }
3022
3023 // Check intrinsics' signatures.
3024 switch (F.getIntrinsicID()) {
3025 case Intrinsic::experimental_gc_get_pointer_base: {
3026 FunctionType *FT = F.getFunctionType();
3027 Check(FT->getNumParams() == 1, "wrong number of parameters", F);
3028 Check(isa<PointerType>(F.getReturnType()),
3029 "gc.get.pointer.base must return a pointer", F);
3030 Check(FT->getParamType(0) == F.getReturnType(),
3031 "gc.get.pointer.base operand and result must be of the same type", F);
3032 break;
3033 }
3034 case Intrinsic::experimental_gc_get_pointer_offset: {
3035 FunctionType *FT = F.getFunctionType();
3036 Check(FT->getNumParams() == 1, "wrong number of parameters", F);
3037 Check(isa<PointerType>(FT->getParamType(0)),
3038 "gc.get.pointer.offset operand must be a pointer", F);
3039 Check(F.getReturnType()->isIntegerTy(),
3040 "gc.get.pointer.offset must return integer", F);
3041 break;
3042 }
3043 }
3044
3045 auto *N = F.getSubprogram();
3046 HasDebugInfo = (N != nullptr);
3047 if (!HasDebugInfo)
3048 return;
3049
3050 // Check that all !dbg attachments lead to back to N.
3051 //
3052 // FIXME: Check this incrementally while visiting !dbg attachments.
3053 // FIXME: Only check when N is the canonical subprogram for F.
3055 auto VisitDebugLoc = [&](const Instruction &I, const MDNode *Node) {
3056 // Be careful about using DILocation here since we might be dealing with
3057 // broken code (this is the Verifier after all).
3058 const DILocation *DL = dyn_cast_or_null<DILocation>(Node);
3059 if (!DL)
3060 return;
3061 if (!Seen.insert(DL).second)
3062 return;
3063
3064 Metadata *Parent = DL->getRawScope();
3065 CheckDI(Parent && isa<DILocalScope>(Parent),
3066 "DILocation's scope must be a DILocalScope", N, &F, &I, DL, Parent);
3067
3068 DILocalScope *Scope = DL->getInlinedAtScope();
3069 Check(Scope, "Failed to find DILocalScope", DL);
3070
3071 if (!Seen.insert(Scope).second)
3072 return;
3073
3074 DISubprogram *SP = Scope->getSubprogram();
3075
3076 // Scope and SP could be the same MDNode and we don't want to skip
3077 // validation in that case
3078 if (SP && ((Scope != SP) && !Seen.insert(SP).second))
3079 return;
3080
3081 CheckDI(SP->describes(&F),
3082 "!dbg attachment points at wrong subprogram for function", N, &F,
3083 &I, DL, Scope, SP);
3084 };
3085 for (auto &BB : F)
3086 for (auto &I : BB) {
3087 VisitDebugLoc(I, I.getDebugLoc().getAsMDNode());
3088 // The llvm.loop annotations also contain two DILocations.
3089 if (auto MD = I.getMetadata(LLVMContext::MD_loop))
3090 for (unsigned i = 1; i < MD->getNumOperands(); ++i)
3091 VisitDebugLoc(I, dyn_cast_or_null<MDNode>(MD->getOperand(i)));
3092 if (BrokenDebugInfo)
3093 return;
3094 }
3095}
3096
3097// verifyBasicBlock - Verify that a basic block is well formed...
3098//
3099void Verifier::visitBasicBlock(BasicBlock &BB) {
3100 InstsInThisBlock.clear();
3101 ConvergenceVerifyHelper.visit(BB);
3102
3103 // Ensure that basic blocks have terminators!
3104 Check(BB.getTerminator(), "Basic Block does not have terminator!", &BB);
3105
3106 // Check constraints that this basic block imposes on all of the PHI nodes in
3107 // it.
3108 if (isa<PHINode>(BB.front())) {
3111 llvm::sort(Preds);
3112 for (const PHINode &PN : BB.phis()) {
3113 Check(PN.getNumIncomingValues() == Preds.size(),
3114 "PHINode should have one entry for each predecessor of its "
3115 "parent basic block!",
3116 &PN);
3117
3118 // Get and sort all incoming values in the PHI node...
3119 Values.clear();
3120 Values.reserve(PN.getNumIncomingValues());
3121 for (unsigned i = 0, e = PN.getNumIncomingValues(); i != e; ++i)
3122 Values.push_back(
3123 std::make_pair(PN.getIncomingBlock(i), PN.getIncomingValue(i)));
3124 llvm::sort(Values);
3125
3126 for (unsigned i = 0, e = Values.size(); i != e; ++i) {
3127 // Check to make sure that if there is more than one entry for a
3128 // particular basic block in this PHI node, that the incoming values are
3129 // all identical.
3130 //
3131 Check(i == 0 || Values[i].first != Values[i - 1].first ||
3132 Values[i].second == Values[i - 1].second,
3133 "PHI node has multiple entries for the same basic block with "
3134 "different incoming values!",
3135 &PN, Values[i].first, Values[i].second, Values[i - 1].second);
3136
3137 // Check to make sure that the predecessors and PHI node entries are
3138 // matched up.
3139 Check(Values[i].first == Preds[i],
3140 "PHI node entries do not match predecessors!", &PN,
3141 Values[i].first, Preds[i]);
3142 }
3143 }
3144 }
3145
3146 // Check that all instructions have their parent pointers set up correctly.
3147 for (auto &I : BB)
3148 {
3149 Check(I.getParent() == &BB, "Instruction has bogus parent pointer!");
3150 }
3151
3152 CheckDI(BB.IsNewDbgInfoFormat == BB.getParent()->IsNewDbgInfoFormat,
3153 "BB debug format should match parent function", &BB,
3154 BB.IsNewDbgInfoFormat, BB.getParent(),
3155 BB.getParent()->IsNewDbgInfoFormat);
3156
3157 // Confirm that no issues arise from the debug program.
3158 if (BB.IsNewDbgInfoFormat)
3159 CheckDI(!BB.getTrailingDbgRecords(), "Basic Block has trailing DbgRecords!",
3160 &BB);
3161}
3162
3163void Verifier::visitTerminator(Instruction &I) {
3164 // Ensure that terminators only exist at the end of the basic block.
3165 Check(&I == I.getParent()->getTerminator(),
3166 "Terminator found in the middle of a basic block!", I.getParent());
3168}
3169
3170void Verifier::visitBranchInst(BranchInst &BI) {
3171 if (BI.isConditional()) {
3173 "Branch condition is not 'i1' type!", &BI, BI.getCondition());
3174 }
3175 visitTerminator(BI);
3176}
3177
3178void Verifier::visitReturnInst(ReturnInst &RI) {
3179 Function *F = RI.getParent()->getParent();
3180 unsigned N = RI.getNumOperands();
3181 if (F->getReturnType()->isVoidTy())
3182 Check(N == 0,
3183 "Found return instr that returns non-void in Function of void "
3184 "return type!",
3185 &RI, F->getReturnType());
3186 else
3187 Check(N == 1 && F->getReturnType() == RI.getOperand(0)->getType(),
3188 "Function return type does not match operand "
3189 "type of return inst!",
3190 &RI, F->getReturnType());
3191
3192 // Check to make sure that the return value has necessary properties for
3193 // terminators...
3194 visitTerminator(RI);
3195}
3196
3197void Verifier::visitSwitchInst(SwitchInst &SI) {
3198 Check(SI.getType()->isVoidTy(), "Switch must have void result type!", &SI);
3199 // Check to make sure that all of the constants in the switch instruction
3200 // have the same type as the switched-on value.
3201 Type *SwitchTy = SI.getCondition()->getType();
3203 for (auto &Case : SI.cases()) {
3204 Check(isa<ConstantInt>(SI.getOperand(Case.getCaseIndex() * 2 + 2)),
3205 "Case value is not a constant integer.", &SI);
3206 Check(Case.getCaseValue()->getType() == SwitchTy,
3207 "Switch constants must all be same type as switch value!", &SI);
3208 Check(Constants.insert(Case.getCaseValue()).second,
3209 "Duplicate integer as switch case", &SI, Case.getCaseValue());
3210 }
3211
3212 visitTerminator(SI);
3213}
3214
3215void Verifier::visitIndirectBrInst(IndirectBrInst &BI) {
3217 "Indirectbr operand must have pointer type!", &BI);
3218 for (unsigned i = 0, e = BI.getNumDestinations(); i != e; ++i)
3220 "Indirectbr destinations must all have pointer type!", &BI);
3221
3222 visitTerminator(BI);
3223}
3224
3225void Verifier::visitCallBrInst(CallBrInst &CBI) {
3226 Check(CBI.isInlineAsm(), "Callbr is currently only used for asm-goto!", &CBI);
3227 const InlineAsm *IA = cast<InlineAsm>(CBI.getCalledOperand());
3228 Check(!IA->canThrow(), "Unwinding from Callbr is not allowed");
3229
3230 verifyInlineAsmCall(CBI);
3231 visitTerminator(CBI);
3232}
3233
3234void Verifier::visitSelectInst(SelectInst &SI) {
3235 Check(!SelectInst::areInvalidOperands(SI.getOperand(0), SI.getOperand(1),
3236 SI.getOperand(2)),
3237 "Invalid operands for select instruction!", &SI);
3238
3239 Check(SI.getTrueValue()->getType() == SI.getType(),
3240 "Select values must have same type as select instruction!", &SI);
3241 visitInstruction(SI);
3242}
3243
3244/// visitUserOp1 - User defined operators shouldn't live beyond the lifetime of
3245/// a pass, if any exist, it's an error.
3246///
3247void Verifier::visitUserOp1(Instruction &I) {
3248 Check(false, "User-defined operators should not live outside of a pass!", &I);
3249}
3250
3251void Verifier::visitTruncInst(TruncInst &I) {
3252 // Get the source and destination types
3253 Type *SrcTy = I.getOperand(0)->getType();
3254 Type *DestTy = I.getType();
3255
3256 // Get the size of the types in bits, we'll need this later
3257 unsigned SrcBitSize = SrcTy->getScalarSizeInBits();
3258 unsigned DestBitSize = DestTy->getScalarSizeInBits();
3259
3260 Check(SrcTy->isIntOrIntVectorTy(), "Trunc only operates on integer", &I);
3261 Check(DestTy->isIntOrIntVectorTy(), "Trunc only produces integer", &I);
3262 Check(SrcTy->isVectorTy() == DestTy->isVectorTy(),
3263 "trunc source and destination must both be a vector or neither", &I);
3264 Check(SrcBitSize > DestBitSize, "DestTy too big for Trunc", &I);
3265
3267}
3268
3269void Verifier::visitZExtInst(ZExtInst &I) {
3270 // Get the source and destination types
3271 Type *SrcTy = I.getOperand(0)->getType();
3272 Type *DestTy = I.getType();
3273
3274 // Get the size of the types in bits, we'll need this later
3275 Check(SrcTy->isIntOrIntVectorTy(), "ZExt only operates on integer", &I);
3276 Check(DestTy->isIntOrIntVectorTy(), "ZExt only produces an integer", &I);
3277 Check(SrcTy->isVectorTy() == DestTy->isVectorTy(),
3278 "zext source and destination must both be a vector or neither", &I);
3279 unsigned SrcBitSize = SrcTy->getScalarSizeInBits();
3280 unsigned DestBitSize = DestTy->getScalarSizeInBits();
3281
3282 Check(SrcBitSize < DestBitSize, "Type too small for ZExt", &I);
3283
3285}
3286
3287void Verifier::visitSExtInst(SExtInst &I) {
3288 // Get the source and destination types
3289 Type *SrcTy = I.getOperand(0)->getType();
3290 Type *DestTy = I.getType();
3291
3292 // Get the size of the types in bits, we'll need this later
3293 unsigned SrcBitSize = SrcTy->getScalarSizeInBits();
3294 unsigned DestBitSize = DestTy->getScalarSizeInBits();
3295
3296 Check(SrcTy->isIntOrIntVectorTy(), "SExt only operates on integer", &I);
3297 Check(DestTy->isIntOrIntVectorTy(), "SExt only produces an integer", &I);
3298 Check(SrcTy->isVectorTy() == DestTy->isVectorTy(),
3299 "sext source and destination must both be a vector or neither", &I);
3300 Check(SrcBitSize < DestBitSize, "Type too small for SExt", &I);
3301
3303}
3304
3305void Verifier::visitFPTruncInst(FPTruncInst &I) {
3306 // Get the source and destination types
3307 Type *SrcTy = I.getOperand(0)->getType();
3308 Type *DestTy = I.getType();
3309 // Get the size of the types in bits, we'll need this later
3310 unsigned SrcBitSize = SrcTy->getScalarSizeInBits();
3311 unsigned DestBitSize = DestTy->getScalarSizeInBits();
3312
3313 Check(SrcTy->isFPOrFPVectorTy(), "FPTrunc only operates on FP", &I);
3314 Check(DestTy->isFPOrFPVectorTy(), "FPTrunc only produces an FP", &I);
3315 Check(SrcTy->isVectorTy() == DestTy->isVectorTy(),
3316 "fptrunc source and destination must both be a vector or neither", &I);
3317 Check(SrcBitSize > DestBitSize, "DestTy too big for FPTrunc", &I);
3318
3320}
3321
3322void Verifier::visitFPExtInst(FPExtInst &I) {
3323 // Get the source and destination types
3324 Type *SrcTy = I.getOperand(0)->getType();
3325 Type *DestTy = I.getType();
3326
3327 // Get the size of the types in bits, we'll need this later
3328 unsigned SrcBitSize = SrcTy->getScalarSizeInBits();
3329 unsigned DestBitSize = DestTy->getScalarSizeInBits();
3330
3331 Check(SrcTy->isFPOrFPVectorTy(), "FPExt only operates on FP", &I);
3332 Check(DestTy->isFPOrFPVectorTy(), "FPExt only produces an FP", &I);
3333 Check(SrcTy->isVectorTy() == DestTy->isVectorTy(),
3334 "fpext source and destination must both be a vector or neither", &I);
3335 Check(SrcBitSize < DestBitSize, "DestTy too small for FPExt", &I);
3336
3338}
3339
3340void Verifier::visitUIToFPInst(UIToFPInst &I) {
3341 // Get the source and destination types
3342 Type *SrcTy = I.getOperand(0)->getType();
3343 Type *DestTy = I.getType();
3344
3345 bool SrcVec = SrcTy->isVectorTy();
3346 bool DstVec = DestTy->isVectorTy();
3347
3348 Check(SrcVec == DstVec,
3349 "UIToFP source and dest must both be vector or scalar", &I);
3350 Check(SrcTy->isIntOrIntVectorTy(),
3351 "UIToFP source must be integer or integer vector", &I);
3352 Check(DestTy->isFPOrFPVectorTy(), "UIToFP result must be FP or FP vector",
3353 &I);
3354
3355 if (SrcVec && DstVec)
3356 Check(cast<VectorType>(SrcTy)->getElementCount() ==
3357 cast<VectorType>(DestTy)->getElementCount(),
3358 "UIToFP source and dest vector length mismatch", &I);
3359
3361}
3362
3363void Verifier::visitSIToFPInst(SIToFPInst &I) {
3364 // Get the source and destination types
3365 Type *SrcTy = I.getOperand(0)->getType();
3366 Type *DestTy = I.getType();
3367
3368 bool SrcVec = SrcTy->isVectorTy();
3369 bool DstVec = DestTy->isVectorTy();
3370
3371 Check(SrcVec == DstVec,
3372 "SIToFP source and dest must both be vector or scalar", &I);
3373 Check(SrcTy->isIntOrIntVectorTy(),
3374 "SIToFP source must be integer or integer vector", &I);
3375 Check(DestTy->isFPOrFPVectorTy(), "SIToFP result must be FP or FP vector",
3376 &I);
3377
3378 if (SrcVec && DstVec)
3379 Check(cast<VectorType>(SrcTy)->getElementCount() ==
3380 cast<VectorType>(DestTy)->getElementCount(),
3381 "SIToFP source and dest vector length mismatch", &I);
3382
3384}
3385
3386void Verifier::visitFPToUIInst(FPToUIInst &I) {
3387 // Get the source and destination types
3388 Type *SrcTy = I.getOperand(0)->getType();
3389 Type *DestTy = I.getType();
3390
3391 bool SrcVec = SrcTy->isVectorTy();
3392 bool DstVec = DestTy->isVectorTy();
3393
3394 Check(SrcVec == DstVec,
3395 "FPToUI source and dest must both be vector or scalar", &I);
3396 Check(SrcTy->isFPOrFPVectorTy(), "FPToUI source must be FP or FP vector", &I);
3397 Check(DestTy->isIntOrIntVectorTy(),
3398 "FPToUI result must be integer or integer vector", &I);
3399
3400 if (SrcVec && DstVec)
3401 Check(cast<VectorType>(SrcTy)->getElementCount() ==
3402 cast<VectorType>(DestTy)->getElementCount(),
3403 "FPToUI source and dest vector length mismatch", &I);
3404
3406}
3407
3408void Verifier::visitFPToSIInst(FPToSIInst &I) {
3409 // Get the source and destination types
3410 Type *SrcTy = I.getOperand(0)->getType();
3411 Type *DestTy = I.getType();
3412
3413 bool SrcVec = SrcTy->isVectorTy();
3414 bool DstVec = DestTy->isVectorTy();
3415
3416 Check(SrcVec == DstVec,
3417 "FPToSI source and dest must both be vector or scalar", &I);
3418 Check(SrcTy->isFPOrFPVectorTy(), "FPToSI source must be FP or FP vector", &I);
3419 Check(DestTy->isIntOrIntVectorTy(),
3420 "FPToSI result must be integer or integer vector", &I);
3421
3422 if (SrcVec && DstVec)
3423 Check(cast<VectorType>(SrcTy)->getElementCount() ==
3424 cast<VectorType>(DestTy)->getElementCount(),
3425 "FPToSI source and dest vector length mismatch", &I);
3426
3428}
3429
3430void Verifier::visitPtrToIntInst(PtrToIntInst &I) {
3431 // Get the source and destination types
3432 Type *SrcTy = I.getOperand(0)->getType();
3433 Type *DestTy = I.getType();
3434
3435 Check(SrcTy->isPtrOrPtrVectorTy(), "PtrToInt source must be pointer", &I);
3436
3437 Check(DestTy->isIntOrIntVectorTy(), "PtrToInt result must be integral", &I);
3438 Check(SrcTy->isVectorTy() == DestTy->isVectorTy(), "PtrToInt type mismatch",
3439 &I);
3440
3441 if (SrcTy->isVectorTy()) {
3442 auto *VSrc = cast<VectorType>(SrcTy);
3443 auto *VDest = cast<VectorType>(DestTy);
3444 Check(VSrc->getElementCount() == VDest->getElementCount(),
3445 "PtrToInt Vector width mismatch", &I);
3446 }
3447
3449}
3450
3451void Verifier::visitIntToPtrInst(IntToPtrInst &I) {
3452 // Get the source and destination types
3453 Type *SrcTy = I.getOperand(0)->getType();
3454 Type *DestTy = I.getType();
3455
3456 Check(SrcTy->isIntOrIntVectorTy(), "IntToPtr source must be an integral", &I);
3457 Check(DestTy->isPtrOrPtrVectorTy(), "IntToPtr result must be a pointer", &I);
3458
3459 Check(SrcTy->isVectorTy() == DestTy->isVectorTy(), "IntToPtr type mismatch",
3460 &I);
3461 if (SrcTy->isVectorTy()) {
3462 auto *VSrc = cast<VectorType>(SrcTy);
3463 auto *VDest = cast<VectorType>(DestTy);
3464 Check(VSrc->getElementCount() == VDest->getElementCount(),
3465 "IntToPtr Vector width mismatch", &I);
3466 }
3468}
3469
3470void Verifier::visitBitCastInst(BitCastInst &I) {
3471 Check(
3472 CastInst::castIsValid(Instruction::BitCast, I.getOperand(0), I.getType()),
3473 "Invalid bitcast", &I);
3475}
3476
3477void Verifier::visitAddrSpaceCastInst(AddrSpaceCastInst &I) {
3478 Type *SrcTy = I.getOperand(0)->getType();
3479 Type *DestTy = I.getType();
3480
3481 Check(SrcTy->isPtrOrPtrVectorTy(), "AddrSpaceCast source must be a pointer",
3482 &I);
3483 Check(DestTy->isPtrOrPtrVectorTy(), "AddrSpaceCast result must be a pointer",
3484 &I);
3486 "AddrSpaceCast must be between different address spaces", &I);
3487 if (auto *SrcVTy = dyn_cast<VectorType>(SrcTy))
3488 Check(SrcVTy->getElementCount() ==
3489 cast<VectorType>(DestTy)->getElementCount(),
3490 "AddrSpaceCast vector pointer number of elements mismatch", &I);
3492}
3493
3494/// visitPHINode - Ensure that a PHI node is well formed.
3495///
3496void Verifier::visitPHINode(PHINode &PN) {
3497 // Ensure that the PHI nodes are all grouped together at the top of the block.
3498 // This can be tested by checking whether the instruction before this is
3499 // either nonexistent (because this is begin()) or is a PHI node. If not,
3500 // then there is some other instruction before a PHI.
3501 Check(&PN == &PN.getParent()->front() ||
3502 isa<PHINode>(--BasicBlock::iterator(&PN)),
3503 "PHI nodes not grouped at top of basic block!", &PN, PN.getParent());
3504
3505 // Check that a PHI doesn't yield a Token.
3506 Check(!PN.getType()->isTokenTy(), "PHI nodes cannot have token type!");
3507
3508 // Check that all of the values of the PHI node have the same type as the
3509 // result.
3510 for (Value *IncValue : PN.incoming_values()) {
3511 Check(PN.getType() == IncValue->getType(),
3512 "PHI node operands are not the same type as the result!", &PN);
3513 }
3514
3515 // All other PHI node constraints are checked in the visitBasicBlock method.
3516
3517 visitInstruction(PN);
3518}
3519
3520void Verifier::visitCallBase(CallBase &Call) {
3521 Check(Call.getCalledOperand()->getType()->isPointerTy(),
3522 "Called function must be a pointer!", Call);
3523 FunctionType *FTy = Call.getFunctionType();
3524
3525 // Verify that the correct number of arguments are being passed
3526 if (FTy->isVarArg())
3527 Check(Call.arg_size() >= FTy->getNumParams(),
3528 "Called function requires more parameters than were provided!", Call);
3529 else
3530 Check(Call.arg_size() == FTy->getNumParams(),
3531 "Incorrect number of arguments passed to called function!", Call);
3532
3533 // Verify that all arguments to the call match the function type.
3534 for (unsigned i = 0, e = FTy->getNumParams(); i != e; ++i)
3535 Check(Call.getArgOperand(i)->getType() == FTy->getParamType(i),
3536 "Call parameter type does not match function signature!",
3537 Call.getArgOperand(i), FTy->getParamType(i), Call);
3538
3539 AttributeList Attrs = Call.getAttributes();
3540
3541 Check(verifyAttributeCount(Attrs, Call.arg_size()),
3542 "Attribute after last parameter!", Call);
3543
3544 Function *Callee =
3545 dyn_cast<Function>(Call.getCalledOperand()->stripPointerCasts());
3546 bool IsIntrinsic = Callee && Callee->isIntrinsic();
3547 if (IsIntrinsic)
3548 Check(Callee->getValueType() == FTy,
3549 "Intrinsic called with incompatible signature", Call);
3550
3551 // Disallow calls to functions with the amdgpu_cs_chain[_preserve] calling
3552 // convention.
3553 auto CC = Call.getCallingConv();
3556 "Direct calls to amdgpu_cs_chain/amdgpu_cs_chain_preserve functions "
3557 "not allowed. Please use the @llvm.amdgpu.cs.chain intrinsic instead.",
3558 Call);
3559
3560 // Disallow passing/returning values with alignment higher than we can
3561 // represent.
3562 // FIXME: Consider making DataLayout cap the alignment, so this isn't
3563 // necessary.
3564 auto VerifyTypeAlign = [&](Type *Ty, const Twine &Message) {
3565 if (!Ty->isSized())
3566 return;
3567 Align ABIAlign = DL.getABITypeAlign(Ty);
3568 Check(ABIAlign.value() <= Value::MaximumAlignment,
3569 "Incorrect alignment of " + Message + " to called function!", Call);
3570 };
3571
3572 if (!IsIntrinsic) {
3573 VerifyTypeAlign(FTy->getReturnType(), "return type");
3574 for (unsigned i = 0, e = FTy->getNumParams(); i != e; ++i) {
3575 Type *Ty = FTy->getParamType(i);
3576 VerifyTypeAlign(Ty, "argument passed");
3577 }
3578 }
3579
3580 if (Attrs.hasFnAttr(Attribute::Speculatable)) {
3581 // Don't allow speculatable on call sites, unless the underlying function
3582 // declaration is also speculatable.
3583 Check(Callee && Callee->isSpeculatable(),
3584 "speculatable attribute may not apply to call sites", Call);
3585 }
3586
3587 if (Attrs.hasFnAttr(Attribute::Preallocated)) {
3588 Check(Call.getCalledFunction()->getIntrinsicID() ==
3589 Intrinsic::call_preallocated_arg,
3590 "preallocated as a call site attribute can only be on "
3591 "llvm.call.preallocated.arg");
3592 }
3593
3594 // Verify call attributes.
3595 verifyFunctionAttrs(FTy, Attrs, &Call, IsIntrinsic, Call.isInlineAsm());
3596
3597 // Conservatively check the inalloca argument.
3598 // We have a bug if we can find that there is an underlying alloca without
3599 // inalloca.
3600 if (Call.hasInAllocaArgument()) {
3601 Value *InAllocaArg = Call.getArgOperand(FTy->getNumParams() - 1);
3602 if (auto AI = dyn_cast<AllocaInst>(InAllocaArg->stripInBoundsOffsets()))
3603 Check(AI->isUsedWithInAlloca(),
3604 "inalloca argument for call has mismatched alloca", AI, Call);
3605 }
3606
3607 // For each argument of the callsite, if it has the swifterror argument,
3608 // make sure the underlying alloca/parameter it comes from has a swifterror as
3609 // well.
3610 for (unsigned i = 0, e = FTy->getNumParams(); i != e; ++i) {
3611 if (Call.paramHasAttr(i, Attribute::SwiftError)) {
3612 Value *SwiftErrorArg = Call.getArgOperand(i);
3613 if (auto AI = dyn_cast<AllocaInst>(SwiftErrorArg->stripInBoundsOffsets())) {
3614 Check(AI->isSwiftError(),
3615 "swifterror argument for call has mismatched alloca", AI, Call);
3616 continue;
3617 }
3618 auto ArgI = dyn_cast<Argument>(SwiftErrorArg);
3619 Check(ArgI, "swifterror argument should come from an alloca or parameter",
3620 SwiftErrorArg, Call);
3621 Check(ArgI->hasSwiftErrorAttr(),
3622 "swifterror argument for call has mismatched parameter", ArgI,
3623 Call);
3624 }
3625
3626 if (Attrs.hasParamAttr(i, Attribute::ImmArg)) {
3627 // Don't allow immarg on call sites, unless the underlying declaration
3628 // also has the matching immarg.
3629 Check(Callee && Callee->hasParamAttribute(i, Attribute::ImmArg),
3630 "immarg may not apply only to call sites", Call.getArgOperand(i),
3631 Call);
3632 }
3633
3634 if (Call.paramHasAttr(i, Attribute::ImmArg)) {
3635 Value *ArgVal = Call.getArgOperand(i);
3636 Check(isa<ConstantInt>(ArgVal) || isa<ConstantFP>(ArgVal),
3637 "immarg operand has non-immediate parameter", ArgVal, Call);
3638 }
3639
3640 if (Call.paramHasAttr(i, Attribute::Preallocated)) {
3641 Value *ArgVal = Call.getArgOperand(i);
3642 bool hasOB =
3643 Call.countOperandBundlesOfType(LLVMContext::OB_preallocated) != 0;
3644 bool isMustTail = Call.isMustTailCall();
3645 Check(hasOB != isMustTail,
3646 "preallocated operand either requires a preallocated bundle or "
3647 "the call to be musttail (but not both)",
3648 ArgVal, Call);
3649 }
3650 }
3651
3652 if (FTy->isVarArg()) {
3653 // FIXME? is 'nest' even legal here?
3654 bool SawNest = false;
3655 bool SawReturned = false;
3656
3657 for (unsigned Idx = 0; Idx < FTy->getNumParams(); ++Idx) {
3658 if (Attrs.hasParamAttr(Idx, Attribute::Nest))
3659 SawNest = true;
3660 if (Attrs.hasParamAttr(Idx, Attribute::Returned))
3661 SawReturned = true;
3662 }
3663
3664 // Check attributes on the varargs part.
3665 for (unsigned Idx = FTy->getNumParams(); Idx < Call.arg_size(); ++Idx) {
3666 Type *Ty = Call.getArgOperand(Idx)->getType();
3667 AttributeSet ArgAttrs = Attrs.getParamAttrs(Idx);
3668 verifyParameterAttrs(ArgAttrs, Ty, &Call);
3669
3670 if (ArgAttrs.hasAttribute(Attribute::Nest)) {
3671 Check(!SawNest, "More than one parameter has attribute nest!", Call);
3672 SawNest = true;
3673 }
3674
3675 if (ArgAttrs.hasAttribute(Attribute::Returned)) {
3676 Check(!SawReturned, "More than one parameter has attribute returned!",
3677 Call);
3678 Check(Ty->canLosslesslyBitCastTo(FTy->getReturnType()),
3679 "Incompatible argument and return types for 'returned' "
3680 "attribute",
3681 Call);
3682 SawReturned = true;
3683 }
3684
3685 // Statepoint intrinsic is vararg but the wrapped function may be not.
3686 // Allow sret here and check the wrapped function in verifyStatepoint.
3687 if (!Call.getCalledFunction() ||
3688 Call.getCalledFunction()->getIntrinsicID() !=
3689 Intrinsic::experimental_gc_statepoint)
3690 Check(!ArgAttrs.hasAttribute(Attribute::StructRet),
3691 "Attribute 'sret' cannot be used for vararg call arguments!",
3692 Call);
3693
3694 if (ArgAttrs.hasAttribute(Attribute::InAlloca))
3695 Check(Idx == Call.arg_size() - 1,
3696 "inalloca isn't on the last argument!", Call);
3697 }
3698 }
3699
3700 // Verify that there's no metadata unless it's a direct call to an intrinsic.
3701 if (!IsIntrinsic) {
3702 for (Type *ParamTy : FTy->params()) {
3703 Check(!ParamTy->isMetadataTy(),
3704 "Function has metadata parameter but isn't an intrinsic", Call);
3705 Check(!ParamTy->isTokenTy(),
3706 "Function has token parameter but isn't an intrinsic", Call);
3707 }
3708 }
3709
3710 // Verify that indirect calls don't return tokens.
3711 if (!Call.getCalledFunction()) {
3712 Check(!FTy->getReturnType()->isTokenTy(),
3713 "Return type cannot be token for indirect call!");
3714 Check(!FTy->getReturnType()->isX86_AMXTy(),
3715 "Return type cannot be x86_amx for indirect call!");
3716 }
3717
3718 if (Function *F = Call.getCalledFunction())
3719 if (Intrinsic::ID ID = (Intrinsic::ID)F->getIntrinsicID())
3720 visitIntrinsicCall(ID, Call);
3721
3722 // Verify that a callsite has at most one "deopt", at most one "funclet", at
3723 // most one "gc-transition", at most one "cfguardtarget", at most one
3724 // "preallocated" operand bundle, and at most one "ptrauth" operand bundle.
3725 bool FoundDeoptBundle = false, FoundFuncletBundle = false,
3726 FoundGCTransitionBundle = false, FoundCFGuardTargetBundle = false,
3727 FoundPreallocatedBundle = false, FoundGCLiveBundle = false,
3728 FoundPtrauthBundle = false, FoundKCFIBundle = false,
3729 FoundAttachedCallBundle = false;
3730 for (unsigned i = 0, e = Call.getNumOperandBundles(); i < e; ++i) {
3731 OperandBundleUse BU = Call.getOperandBundleAt(i);
3732 uint32_t Tag = BU.getTagID();
3733 if (Tag == LLVMContext::OB_deopt) {
3734 Check(!FoundDeoptBundle, "Multiple deopt operand bundles", Call);
3735 FoundDeoptBundle = true;
3736 } else if (Tag == LLVMContext::OB_gc_transition) {
3737 Check(!FoundGCTransitionBundle, "Multiple gc-transition operand bundles",
3738 Call);
3739 FoundGCTransitionBundle = true;
3740 } else if (Tag == LLVMContext::OB_funclet) {
3741 Check(!FoundFuncletBundle, "Multiple funclet operand bundles", Call);
3742 FoundFuncletBundle = true;
3743 Check(BU.Inputs.size() == 1,
3744 "Expected exactly one funclet bundle operand", Call);
3745 Check(isa<FuncletPadInst>(BU.Inputs.front()),
3746 "Funclet bundle operands should correspond to a FuncletPadInst",
3747 Call);
3748 } else if (Tag == LLVMContext::OB_cfguardtarget) {
3749 Check(!FoundCFGuardTargetBundle, "Multiple CFGuardTarget operand bundles",
3750 Call);
3751 FoundCFGuardTargetBundle = true;
3752 Check(BU.Inputs.size() == 1,
3753 "Expected exactly one cfguardtarget bundle operand", Call);
3754 } else if (Tag == LLVMContext::OB_ptrauth) {
3755 Check(!FoundPtrauthBundle, "Multiple ptrauth operand bundles", Call);
3756 FoundPtrauthBundle = true;
3757 Check(BU.Inputs.size() == 2,
3758 "Expected exactly two ptrauth bundle operands", Call);
3759 Check(isa<ConstantInt>(BU.Inputs[0]) &&
3760 BU.Inputs[0]->getType()->isIntegerTy(32),
3761 "Ptrauth bundle key operand must be an i32 constant", Call);
3762 Check(BU.Inputs[1]->getType()->isIntegerTy(64),
3763 "Ptrauth bundle discriminator operand must be an i64", Call);
3764 } else if (Tag == LLVMContext::OB_kcfi) {
3765 Check(!FoundKCFIBundle, "Multiple kcfi operand bundles", Call);
3766 FoundKCFIBundle = true;
3767 Check(BU.Inputs.size() == 1, "Expected exactly one kcfi bundle operand",
3768 Call);
3769 Check(isa<ConstantInt>(BU.Inputs[0]) &&
3770 BU.Inputs[0]->getType()->isIntegerTy(32),
3771 "Kcfi bundle operand must be an i32 constant", Call);
3772 } else if (Tag == LLVMContext::OB_preallocated) {
3773 Check(!FoundPreallocatedBundle, "Multiple preallocated operand bundles",
3774 Call);
3775 FoundPreallocatedBundle = true;
3776 Check(BU.Inputs.size() == 1,
3777 "Expected exactly one preallocated bundle operand", Call);
3778 auto Input = dyn_cast<IntrinsicInst>(BU.Inputs.front());
3779 Check(Input &&
3780 Input->getIntrinsicID() == Intrinsic::call_preallocated_setup,
3781 "\"preallocated\" argument must be a token from "
3782 "llvm.call.preallocated.setup",
3783 Call);
3784 } else if (Tag == LLVMContext::OB_gc_live) {
3785 Check(!FoundGCLiveBundle, "Multiple gc-live operand bundles", Call);
3786 FoundGCLiveBundle = true;
3788 Check(!FoundAttachedCallBundle,
3789 "Multiple \"clang.arc.attachedcall\" operand bundles", Call);
3790 FoundAttachedCallBundle = true;
3791 verifyAttachedCallBundle(Call, BU);
3792 }
3793 }
3794
3795 // Verify that callee and callsite agree on whether to use pointer auth.
3796 Check(!(Call.getCalledFunction() && FoundPtrauthBundle),
3797 "Direct call cannot have a ptrauth bundle", Call);
3798
3799 // Verify that each inlinable callsite of a debug-info-bearing function in a
3800 // debug-info-bearing function has a debug location attached to it. Failure to
3801 // do so causes assertion failures when the inliner sets up inline scope info
3802 // (Interposable functions are not inlinable, neither are functions without
3803 // definitions.)
3804 if (Call.getFunction()->getSubprogram() && Call.getCalledFunction() &&
3805 !Call.getCalledFunction()->isInterposable() &&
3806 !Call.getCalledFunction()->isDeclaration() &&
3807 Call.getCalledFunction()->getSubprogram())
3808 CheckDI(Call.getDebugLoc(),
3809 "inlinable function call in a function with "
3810 "debug info must have a !dbg location",
3811 Call);
3812
3813 if (Call.isInlineAsm())
3814 verifyInlineAsmCall(Call);
3815
3816 ConvergenceVerifyHelper.visit(Call);
3817
3818 visitInstruction(Call);
3819}
3820
3821void Verifier::verifyTailCCMustTailAttrs(const AttrBuilder &Attrs,
3822 StringRef Context) {
3823 Check(!Attrs.contains(Attribute::InAlloca),
3824 Twine("inalloca attribute not allowed in ") + Context);
3825 Check(!Attrs.contains(Attribute::InReg),
3826 Twine("inreg attribute not allowed in ") + Context);
3827 Check(!Attrs.contains(Attribute::SwiftError),
3828 Twine("swifterror attribute not allowed in ") + Context);
3829 Check(!Attrs.contains(Attribute::Preallocated),
3830 Twine("preallocated attribute not allowed in ") + Context);
3831 Check(!Attrs.contains(Attribute::ByRef),
3832 Twine("byref attribute not allowed in ") + Context);
3833}
3834
3835/// Two types are "congruent" if they are identical, or if they are both pointer
3836/// types with different pointee types and the same address space.
3837static bool isTypeCongruent(Type *L, Type *R) {
3838 if (L == R)
3839 return true;
3840 PointerType *PL = dyn_cast<PointerType>(L);
3841 PointerType *PR = dyn_cast<PointerType>(R);
3842 if (!PL || !PR)
3843 return false;
3844 return PL->getAddressSpace() == PR->getAddressSpace();
3845}
3846
3848 static const Attribute::AttrKind ABIAttrs[] = {
3849 Attribute::StructRet, Attribute::ByVal, Attribute::InAlloca,
3850 Attribute::InReg, Attribute::StackAlignment, Attribute::SwiftSelf,
3851 Attribute::SwiftAsync, Attribute::SwiftError, Attribute::Preallocated,
3852 Attribute::ByRef};
3853 AttrBuilder Copy(C);
3854 for (auto AK : ABIAttrs) {
3855 Attribute Attr = Attrs.getParamAttrs(I).getAttribute(AK);
3856 if (Attr.isValid())
3857 Copy.addAttribute(Attr);
3858 }
3859
3860 // `align` is ABI-affecting only in combination with `byval` or `byref`.
3861 if (Attrs.hasParamAttr(I, Attribute::Alignment) &&
3862 (Attrs.hasParamAttr(I, Attribute::ByVal) ||
3863 Attrs.hasParamAttr(I, Attribute::ByRef)))
3864 Copy.addAlignmentAttr(Attrs.getParamAlignment(I));
3865 return Copy;
3866}
3867
3868void Verifier::verifyMustTailCall(CallInst &CI) {
3869 Check(!CI.isInlineAsm(), "cannot use musttail call with inline asm", &CI);
3870
3871 Function *F = CI.getParent()->getParent();
3872 FunctionType *CallerTy = F->getFunctionType();
3873 FunctionType *CalleeTy = CI.getFunctionType();
3874 Check(CallerTy->isVarArg() == CalleeTy->isVarArg(),
3875 "cannot guarantee tail call due to mismatched varargs", &CI);
3876 Check(isTypeCongruent(CallerTy->getReturnType(), CalleeTy->getReturnType()),
3877 "cannot guarantee tail call due to mismatched return types", &CI);
3878
3879 // - The calling conventions of the caller and callee must match.
3880 Check(F->getCallingConv() == CI.getCallingConv(),
3881 "cannot guarantee tail call due to mismatched calling conv", &CI);
3882
3883 // - The call must immediately precede a :ref:`ret <i_ret>` instruction,
3884 // or a pointer bitcast followed by a ret instruction.
3885 // - The ret instruction must return the (possibly bitcasted) value
3886 // produced by the call or void.
3887 Value *RetVal = &CI;
3888 Instruction *Next = CI.getNextNode();
3889
3890 // Handle the optional bitcast.
3891 if (BitCastInst *BI = dyn_cast_or_null<BitCastInst>(Next)) {
3892 Check(BI->getOperand(0) == RetVal,
3893 "bitcast following musttail call must use the call", BI);
3894 RetVal = BI;
3895 Next = BI->getNextNode();
3896 }
3897
3898 // Check the return.
3899 ReturnInst *Ret = dyn_cast_or_null<ReturnInst>(Next);
3900 Check(Ret, "musttail call must precede a ret with an optional bitcast", &CI);
3901 Check(!Ret->getReturnValue() || Ret->getReturnValue() == RetVal ||
3902 isa<UndefValue>(Ret->getReturnValue()),
3903 "musttail call result must be returned", Ret);
3904
3905 AttributeList CallerAttrs = F->getAttributes();
3906 AttributeList CalleeAttrs = CI.getAttributes();
3909 StringRef CCName =
3910 CI.getCallingConv() == CallingConv::Tail ? "tailcc" : "swifttailcc";
3911
3912 // - Only sret, byval, swiftself, and swiftasync ABI-impacting attributes
3913 // are allowed in swifttailcc call
3914 for (unsigned I = 0, E = CallerTy->getNumParams(); I != E; ++I) {
3915 AttrBuilder ABIAttrs = getParameterABIAttributes(F->getContext(), I, CallerAttrs);
3916 SmallString<32> Context{CCName, StringRef(" musttail caller")};
3917 verifyTailCCMustTailAttrs(ABIAttrs, Context);
3918 }
3919 for (unsigned I = 0, E = CalleeTy->getNumParams(); I != E; ++I) {
3920 AttrBuilder ABIAttrs = getParameterABIAttributes(F->getContext(), I, CalleeAttrs);
3921 SmallString<32> Context{CCName, StringRef(" musttail callee")};
3922 verifyTailCCMustTailAttrs(ABIAttrs, Context);
3923 }
3924 // - Varargs functions are not allowed
3925 Check(!CallerTy->isVarArg(), Twine("cannot guarantee ") + CCName +
3926 " tail call for varargs function");
3927 return;
3928 }
3929
3930 // - The caller and callee prototypes must match. Pointer types of
3931 // parameters or return types may differ in pointee type, but not
3932 // address space.
3933 if (!CI.getCalledFunction() || !CI.getCalledFunction()->isIntrinsic()) {
3934 Check(CallerTy->getNumParams() == CalleeTy->getNumParams(),
3935 "cannot guarantee tail call due to mismatched parameter counts", &CI);
3936 for (unsigned I = 0, E = CallerTy->getNumParams(); I != E; ++I) {
3937 Check(
3938 isTypeCongruent(CallerTy->getParamType(I), CalleeTy->getParamType(I)),
3939 "cannot guarantee tail call due to mismatched parameter types", &CI);
3940 }
3941 }
3942
3943 // - All ABI-impacting function attributes, such as sret, byval, inreg,
3944 // returned, preallocated, and inalloca, must match.
3945 for (unsigned I = 0, E = CallerTy->getNumParams(); I != E; ++I) {
3946 AttrBuilder CallerABIAttrs = getParameterABIAttributes(F->getContext(), I, CallerAttrs);
3947 AttrBuilder CalleeABIAttrs = getParameterABIAttributes(F->getContext(), I, CalleeAttrs);
3948 Check(CallerABIAttrs == CalleeABIAttrs,
3949 "cannot guarantee tail call due to mismatched ABI impacting "
3950 "function attributes",
3951 &CI, CI.getOperand(I));
3952 }
3953}
3954
3955void Verifier::visitCallInst(CallInst &CI) {
3956 visitCallBase(CI);
3957
3958 if (CI.isMustTailCall())
3959 verifyMustTailCall(CI);
3960}
3961
3962void Verifier::visitInvokeInst(InvokeInst &II) {
3964
3965 // Verify that the first non-PHI instruction of the unwind destination is an
3966 // exception handling instruction.
3967 Check(
3968 II.getUnwindDest()->isEHPad(),
3969 "The unwind destination does not have an exception handling instruction!",
3970 &II);
3971
3973}
3974
3975/// visitUnaryOperator - Check the argument to the unary operator.
3976///
3977void Verifier::visitUnaryOperator(UnaryOperator &U) {
3978 Check(U.getType() == U.getOperand(0)->getType(),
3979 "Unary operators must have same type for"
3980 "operands and result!",
3981 &U);
3982
3983 switch (U.getOpcode()) {
3984 // Check that floating-point arithmetic operators are only used with
3985 // floating-point operands.
3986 case Instruction::FNeg:
3987 Check(U.getType()->isFPOrFPVectorTy(),
3988 "FNeg operator only works with float types!", &U);
3989 break;
3990 default:
3991 llvm_unreachable("Unknown UnaryOperator opcode!");
3992 }
3993
3995}
3996
3997/// visitBinaryOperator - Check that both arguments to the binary operator are
3998/// of the same type!
3999///
4000void Verifier::visitBinaryOperator(BinaryOperator &B) {
4001 Check(B.getOperand(0)->getType() == B.getOperand(1)->getType(),
4002 "Both operands to a binary operator are not of the same type!", &B);
4003
4004 switch (B.getOpcode()) {
4005 // Check that integer arithmetic operators are only used with
4006 // integral operands.
4007 case Instruction::Add:
4008 case Instruction::Sub:
4009 case Instruction::Mul:
4010 case Instruction::SDiv:
4011 case Instruction::UDiv:
4012 case Instruction::SRem:
4013 case Instruction::URem:
4014 Check(B.getType()->isIntOrIntVectorTy(),
4015 "Integer arithmetic operators only work with integral types!", &B);
4016 Check(B.getType() == B.getOperand(0)->getType(),
4017 "Integer arithmetic operators must have same type "
4018 "for operands and result!",
4019 &B);
4020 break;
4021 // Check that floating-point arithmetic operators are only used with
4022 // floating-point operands.
4023 case Instruction::FAdd:
4024 case Instruction::FSub:
4025 case Instruction::FMul:
4026 case Instruction::FDiv:
4027 case Instruction::FRem:
4028 Check(B.getType()->isFPOrFPVectorTy(),
4029 "Floating-point arithmetic operators only work with "
4030 "floating-point types!",
4031 &B);
4032 Check(B.getType() == B.getOperand(0)->getType(),
4033 "Floating-point arithmetic operators must have same type "
4034 "for operands and result!",
4035 &B);
4036 break;
4037 // Check that logical operators are only used with integral operands.
4038 case Instruction::And:
4039 case Instruction::Or:
4040 case Instruction::Xor:
4041 Check(B.getType()->isIntOrIntVectorTy(),
4042 "Logical operators only work with integral types!", &B);
4043 Check(B.getType() == B.getOperand(0)->getType(),
4044 "Logical operators must have same type for operands and result!", &B);
4045 break;
4046 case Instruction::Shl:
4047 case Instruction::LShr:
4048 case Instruction::AShr:
4049 Check(B.getType()->isIntOrIntVectorTy(),
4050 "Shifts only work with integral types!", &B);
4051 Check(B.getType() == B.getOperand(0)->getType(),
4052 "Shift return type must be same as operands!", &B);
4053 break;
4054 default:
4055 llvm_unreachable("Unknown BinaryOperator opcode!");
4056 }
4057
4059}
4060
4061void Verifier::visitICmpInst(ICmpInst &IC) {
4062 // Check that the operands are the same type
4063 Type *Op0Ty = IC.getOperand(0)->getType();
4064 Type *Op1Ty = IC.getOperand(1)->getType();
4065 Check(Op0Ty == Op1Ty,
4066 "Both operands to ICmp instruction are not of the same type!", &IC);
4067 // Check that the operands are the right type
4068 Check(Op0Ty->isIntOrIntVectorTy() || Op0Ty->isPtrOrPtrVectorTy(),
4069 "Invalid operand types for ICmp instruction", &IC);
4070 // Check that the predicate is valid.
4071 Check(IC.isIntPredicate(), "Invalid predicate in ICmp instruction!", &IC);
4072
4073 visitInstruction(IC);
4074}
4075
4076void Verifier::visitFCmpInst(FCmpInst &FC) {
4077 // Check that the operands are the same type
4078 Type *Op0Ty = FC.getOperand(0)->getType();
4079 Type *Op1Ty = FC.getOperand(1)->getType();
4080 Check(Op0Ty == Op1Ty,
4081 "Both operands to FCmp instruction are not of the same type!", &FC);
4082 // Check that the operands are the right type
4083 Check(Op0Ty->isFPOrFPVectorTy(), "Invalid operand types for FCmp instruction",
4084 &FC);
4085 // Check that the predicate is valid.
4086 Check(FC.isFPPredicate(), "Invalid predicate in FCmp instruction!", &FC);
4087
4088 visitInstruction(FC);
4089}
4090
4091void Verifier::visitExtractElementInst(ExtractElementInst &EI) {
4093 "Invalid extractelement operands!", &EI);
4094 visitInstruction(EI);
4095}
4096
4097void Verifier::visitInsertElementInst(InsertElementInst &IE) {
4098 Check(InsertElementInst::isValidOperands(IE.getOperand(0), IE.getOperand(1),
4099 IE.getOperand(2)),
4100 "Invalid insertelement operands!", &IE);
4101 visitInstruction(IE);
4102}
4103
4104void Verifier::visitShuffleVectorInst(ShuffleVectorInst &SV) {
4106 SV.getShuffleMask()),
4107 "Invalid shufflevector operands!", &SV);
4108 visitInstruction(SV);
4109}
4110
4111void Verifier::visitGetElementPtrInst(GetElementPtrInst &GEP) {
4112 Type *TargetTy = GEP.getPointerOperandType()->getScalarType();
4113
4114 Check(isa<PointerType>(TargetTy),
4115 "GEP base pointer is not a vector or a vector of pointers", &GEP);
4116 Check(GEP.getSourceElementType()->isSized(), "GEP into unsized type!", &GEP);
4117
4118 if (auto *STy = dyn_cast<StructType>(GEP.getSourceElementType())) {
4119 Check(!STy->isScalableTy(),
4120 "getelementptr cannot target structure that contains scalable vector"
4121 "type",
4122 &GEP);
4123 }
4124
4125 SmallVector<Value *, 16> Idxs(GEP.indices());
4126 Check(
4127 all_of(Idxs, [](Value *V) { return V->getType()->isIntOrIntVectorTy(); }),
4128 "GEP indexes must be integers", &GEP);
4129 Type *ElTy =
4130 GetElementPtrInst::getIndexedType(GEP.getSourceElementType(), Idxs);
4131 Check(ElTy, "Invalid indices for GEP pointer type!", &GEP);
4132
4133 PointerType *PtrTy = dyn_cast<PointerType>(GEP.getType()->getScalarType());
4134
4135 Check(PtrTy && GEP.getResultElementType() == ElTy,
4136 "GEP is not of right type for indices!", &GEP, ElTy);
4137
4138 if (auto *GEPVTy = dyn_cast<VectorType>(GEP.getType())) {
4139 // Additional checks for vector GEPs.
4140 ElementCount GEPWidth = GEPVTy->getElementCount();
4141 if (GEP.getPointerOperandType()->isVectorTy())
4142 Check(
4143 GEPWidth ==
4144 cast<VectorType>(GEP.getPointerOperandType())->getElementCount(),
4145 "Vector GEP result width doesn't match operand's", &GEP);
4146 for (Value *Idx : Idxs) {
4147 Type *IndexTy = Idx->getType();
4148 if (auto *IndexVTy = dyn_cast<VectorType>(IndexTy)) {
4149 ElementCount IndexWidth = IndexVTy->getElementCount();
4150 Check(IndexWidth == GEPWidth, "Invalid GEP index vector width", &GEP);
4151 }
4152 Check(IndexTy->isIntOrIntVectorTy(),
4153 "All GEP indices should be of integer type");
4154 }
4155 }
4156
4157 Check(GEP.getAddressSpace() == PtrTy->getAddressSpace(),
4158 "GEP address space doesn't match type", &GEP);
4159
4161}
4162
4163static bool isContiguous(const ConstantRange &A, const ConstantRange &B) {
4164 return A.getUpper() == B.getLower() || A.getLower() == B.getUpper();
4165}
4166
4167/// Verify !range and !absolute_symbol metadata. These have the same
4168/// restrictions, except !absolute_symbol allows the full set.
4169void Verifier::verifyRangeLikeMetadata(const Value &I, const MDNode *Range,
4170 Type *Ty, RangeLikeMetadataKind Kind) {
4171 unsigned NumOperands = Range->getNumOperands();
4172 Check(NumOperands % 2 == 0, "Unfinished range!", Range);
4173 unsigned NumRanges = NumOperands / 2;
4174 Check(NumRanges >= 1, "It should have at least one range!", Range);
4175
4176 ConstantRange LastRange(1, true); // Dummy initial value
4177 for (unsigned i = 0; i < NumRanges; ++i) {
4178 ConstantInt *Low =
4179 mdconst::dyn_extract<ConstantInt>(Range->getOperand(2 * i));
4180 Check(Low, "The lower limit must be an integer!", Low);
4181 ConstantInt *High =
4182 mdconst::dyn_extract<ConstantInt>(Range->getOperand(2 * i + 1));
4183 Check(High, "The upper limit must be an integer!", High);
4184
4185 Check(High->getType() == Low->getType(), "Range pair types must match!",
4186 &I);
4187
4188 if (Kind == RangeLikeMetadataKind::NoaliasAddrspace) {
4189 Check(High->getType()->isIntegerTy(32),
4190 "noalias.addrspace type must be i32!", &I);
4191 } else {
4192 Check(High->getType() == Ty->getScalarType(),
4193 "Range types must match instruction type!", &I);
4194 }
4195
4196 APInt HighV = High->getValue();
4197 APInt LowV = Low->getValue();
4198
4199 // ConstantRange asserts if the ranges are the same except for the min/max
4200 // value. Leave the cases it tolerates for the empty range error below.
4201 Check(LowV != HighV || LowV.isMaxValue() || LowV.isMinValue(),
4202 "The upper and lower limits cannot be the same value", &I);
4203
4204 ConstantRange CurRange(LowV, HighV);
4205 Check(!CurRange.isEmptySet() &&
4206 (Kind == RangeLikeMetadataKind::AbsoluteSymbol ||
4207 !CurRange.isFullSet()),
4208 "Range must not be empty!", Range);
4209 if (i != 0) {
4210 Check(CurRange.intersectWith(LastRange).isEmptySet(),
4211 "Intervals are overlapping", Range);
4212 Check(LowV.sgt(LastRange.getLower()), "Intervals are not in order",
4213 Range);
4214 Check(!isContiguous(CurRange, LastRange), "Intervals are contiguous",
4215 Range);
4216 }
4217 LastRange = ConstantRange(LowV, HighV);
4218 }
4219 if (NumRanges > 2) {
4220 APInt FirstLow =
4221 mdconst::dyn_extract<ConstantInt>(Range->getOperand(0))->getValue();
4222 APInt FirstHigh =
4223 mdconst::dyn_extract<ConstantInt>(Range->getOperand(1))->getValue();
4224 ConstantRange FirstRange(FirstLow, FirstHigh);
4225 Check(FirstRange.intersectWith(LastRange).isEmptySet(),
4226 "Intervals are overlapping", Range);
4227 Check(!isContiguous(FirstRange, LastRange), "Intervals are contiguous",
4228 Range);
4229 }
4230}
4231
4232void Verifier::visitRangeMetadata(Instruction &I, MDNode *Range, Type *Ty) {
4233 assert(Range && Range == I.getMetadata(LLVMContext::MD_range) &&
4234 "precondition violation");
4235 verifyRangeLikeMetadata(I, Range, Ty, RangeLikeMetadataKind::Range);
4236}
4237
4238void Verifier::visitNoaliasAddrspaceMetadata(Instruction &I, MDNode *Range,
4239 Type *Ty) {
4240 assert(Range && Range == I.getMetadata(LLVMContext::MD_noalias_addrspace) &&
4241 "precondition violation");
4242 verifyRangeLikeMetadata(I, Range, Ty,
4243 RangeLikeMetadataKind::NoaliasAddrspace);
4244}
4245
4246void Verifier::checkAtomicMemAccessSize(Type *Ty, const Instruction *I) {
4247 unsigned Size = DL.getTypeSizeInBits(Ty);
4248 Check(Size >= 8, "atomic memory access' size must be byte-sized", Ty, I);
4249 Check(!(Size & (Size - 1)),
4250 "atomic memory access' operand must have a power-of-two size", Ty, I);
4251}
4252
4253void Verifier::visitLoadInst(LoadInst &LI) {
4254 PointerType *PTy = dyn_cast<PointerType>(LI.getOperand(0)->getType());
4255 Check(PTy, "Load operand must be a pointer.", &LI);
4256 Type *ElTy = LI.getType();
4257 if (MaybeAlign A = LI.getAlign()) {
4258 Check(A->value() <= Value::MaximumAlignment,
4259 "huge alignment values are unsupported", &LI);
4260 }
4261 Check(ElTy->isSized(), "loading unsized types is not allowed", &LI);
4262 if (LI.isAtomic()) {
4265 "Load cannot have Release ordering", &LI);
4266 Check(ElTy->isIntOrPtrTy() || ElTy->isFloatingPointTy(),
4267 "atomic load operand must have integer, pointer, or floating point "
4268 "type!",
4269 ElTy, &LI);
4270 checkAtomicMemAccessSize(ElTy, &LI);
4271 } else {
4273 "Non-atomic load cannot have SynchronizationScope specified", &LI);
4274 }
4275
4276 visitInstruction(LI);
4277}
4278
4279void Verifier::visitStoreInst(StoreInst &SI) {
4280 PointerType *PTy = dyn_cast<PointerType>(SI.getOperand(1)->getType());
4281 Check(PTy, "Store operand must be a pointer.", &SI);
4282 Type *ElTy = SI.getOperand(0)->getType();
4283 if (MaybeAlign A = SI.getAlign()) {
4284 Check(A->value() <= Value::MaximumAlignment,
4285 "huge alignment values are unsupported", &SI);
4286 }
4287 Check(ElTy->isSized(), "storing unsized types is not allowed", &SI);
4288 if (SI.isAtomic()) {
4289 Check(SI.getOrdering() != AtomicOrdering::Acquire &&
4290 SI.getOrdering() != AtomicOrdering::AcquireRelease,
4291 "Store cannot have Acquire ordering", &SI);
4292 Check(ElTy->isIntOrPtrTy() || ElTy->isFloatingPointTy(),
4293 "atomic store operand must have integer, pointer, or floating point "
4294 "type!",
4295 ElTy, &SI);
4296 checkAtomicMemAccessSize(ElTy, &SI);
4297 } else {
4298 Check(SI.getSyncScopeID() == SyncScope::System,
4299 "Non-atomic store cannot have SynchronizationScope specified", &SI);
4300 }
4301 visitInstruction(SI);
4302}
4303
4304/// Check that SwiftErrorVal is used as a swifterror argument in CS.
4305void Verifier::verifySwiftErrorCall(CallBase &Call,
4306 const Value *SwiftErrorVal) {
4307 for (const auto &I : llvm::enumerate(Call.args())) {
4308 if (I.value() == SwiftErrorVal) {
4309 Check(Call.paramHasAttr(I.index(), Attribute::SwiftError),
4310 "swifterror value when used in a callsite should be marked "
4311 "with swifterror attribute",
4312 SwiftErrorVal, Call);
4313 }
4314 }
4315}
4316
4317void Verifier::verifySwiftErrorValue(const Value *SwiftErrorVal) {
4318 // Check that swifterror value is only used by loads, stores, or as
4319 // a swifterror argument.
4320 for (const User *U : SwiftErrorVal->users()) {
4321 Check(isa<LoadInst>(U) || isa<StoreInst>(U) || isa<CallInst>(U) ||
4322 isa<InvokeInst>(U),
4323 "swifterror value can only be loaded and stored from, or "
4324 "as a swifterror argument!",
4325 SwiftErrorVal, U);
4326 // If it is used by a store, check it is the second operand.
4327 if (auto StoreI = dyn_cast<StoreInst>(U))
4328 Check(StoreI->getOperand(1) == SwiftErrorVal,
4329 "swifterror value should be the second operand when used "
4330 "by stores",
4331 SwiftErrorVal, U);
4332 if (auto *Call = dyn_cast<CallBase>(U))
4333 verifySwiftErrorCall(*const_cast<CallBase *>(Call), SwiftErrorVal);
4334 }
4335}
4336
4337void Verifier::visitAllocaInst(AllocaInst &AI) {
4338 Type *Ty = AI.getAllocatedType();
4339 SmallPtrSet<Type*, 4> Visited;
4340 Check(Ty->isSized(&Visited), "Cannot allocate unsized type", &AI);
4341 // Check if it's a target extension type that disallows being used on the
4342 // stack.
4344 "Alloca has illegal target extension type", &AI);
4346 "Alloca array size must have integer type", &AI);
4347 if (MaybeAlign A = AI.getAlign()) {
4348 Check(A->value() <= Value::MaximumAlignment,
4349 "huge alignment values are unsupported", &AI);
4350 }
4351
4352 if (AI.isSwiftError()) {
4353 Check(Ty->isPointerTy(), "swifterror alloca must have pointer type", &AI);
4355 "swifterror alloca must not be array allocation", &AI);
4356 verifySwiftErrorValue(&AI);
4357 }
4358
4359 visitInstruction(AI);
4360}
4361
4362void Verifier::visitAtomicCmpXchgInst(AtomicCmpXchgInst &CXI) {
4363 Type *ElTy = CXI.getOperand(1)->getType();
4364 Check(ElTy->isIntOrPtrTy(),
4365 "cmpxchg operand must have integer or pointer type", ElTy, &CXI);
4366 checkAtomicMemAccessSize(ElTy, &CXI);
4367 visitInstruction(CXI);
4368}
4369
4370void Verifier::visitAtomicRMWInst(AtomicRMWInst &RMWI) {
4372 "atomicrmw instructions cannot be unordered.", &RMWI);
4373 auto Op = RMWI.getOperation();
4374 Type *ElTy = RMWI.getOperand(1)->getType();
4375 if (Op == AtomicRMWInst::Xchg) {
4376 Check(ElTy->isIntegerTy() || ElTy->isFloatingPointTy() ||
4377 ElTy->isPointerTy(),
4378 "atomicrmw " + AtomicRMWInst::getOperationName(Op) +
4379 " operand must have integer or floating point type!",
4380 &RMWI, ElTy);
4381 } else if (AtomicRMWInst::isFPOperation(Op)) {
4382 Check(ElTy->isFPOrFPVectorTy() && !isa<ScalableVectorType>(ElTy),
4383 "atomicrmw " + AtomicRMWInst::getOperationName(Op) +
4384 " operand must have floating-point or fixed vector of floating-point "
4385 "type!",
4386 &RMWI, ElTy);
4387 } else {
4388 Check(ElTy->isIntegerTy(),
4389 "atomicrmw " + AtomicRMWInst::getOperationName(Op) +
4390 " operand must have integer type!",
4391 &RMWI, ElTy);
4392 }
4393 checkAtomicMemAccessSize(ElTy, &RMWI);
4395 "Invalid binary operation!", &RMWI);
4396 visitInstruction(RMWI);
4397}
4398
4399void Verifier::visitFenceInst(FenceInst &FI) {
4400 const AtomicOrdering Ordering = FI.getOrdering();
4401 Check(Ordering == AtomicOrdering::Acquire ||
4402 Ordering == AtomicOrdering::Release ||
4403 Ordering == AtomicOrdering::AcquireRelease ||
4405 "fence instructions may only have acquire, release, acq_rel, or "
4406 "seq_cst ordering.",
4407 &FI);
4408 visitInstruction(FI);
4409}
4410
4411void Verifier::visitExtractValueInst(ExtractValueInst &EVI) {
4413 EVI.getIndices()) == EVI.getType(),
4414 "Invalid ExtractValueInst operands!", &EVI);
4415
4416 visitInstruction(EVI);
4417}
4418
4419void Verifier::visitInsertValueInst(InsertValueInst &IVI) {
4421 IVI.getIndices()) ==
4422 IVI.getOperand(1)->getType(),
4423 "Invalid InsertValueInst operands!", &IVI);
4424
4425 visitInstruction(IVI);
4426}
4427
4428static Value *getParentPad(Value *EHPad) {
4429 if (auto *FPI = dyn_cast<FuncletPadInst>(EHPad))
4430 return FPI->getParentPad();
4431
4432 return cast<CatchSwitchInst>(EHPad)->getParentPad();
4433}
4434
4435void Verifier::visitEHPadPredecessors(Instruction &I) {
4436 assert(I.isEHPad());
4437
4438 BasicBlock *BB = I.getParent();
4439 Function *F = BB->getParent();
4440
4441 Check(BB != &F->getEntryBlock(), "EH pad cannot be in entry block.", &I);
4442
4443 if (auto *LPI = dyn_cast<LandingPadInst>(&I)) {
4444 // The landingpad instruction defines its parent as a landing pad block. The
4445 // landing pad block may be branched to only by the unwind edge of an
4446 // invoke.
4447 for (BasicBlock *PredBB : predecessors(BB)) {
4448 const auto *II = dyn_cast<InvokeInst>(PredBB->getTerminator());
4449 Check(II && II->getUnwindDest() == BB && II->getNormalDest() != BB,
4450 "Block containing LandingPadInst must be jumped to "
4451 "only by the unwind edge of an invoke.",
4452 LPI);
4453 }
4454 return;
4455 }
4456 if (auto *CPI = dyn_cast<CatchPadInst>(&I)) {
4457 if (!pred_empty(BB))
4458 Check(BB->getUniquePredecessor() == CPI->getCatchSwitch()->getParent(),
4459 "Block containg CatchPadInst must be jumped to "
4460 "only by its catchswitch.",
4461 CPI);
4462 Check(BB != CPI->getCatchSwitch()->getUnwindDest(),
4463 "Catchswitch cannot unwind to one of its catchpads",
4464 CPI->getCatchSwitch(), CPI);
4465 return;
4466 }
4467
4468 // Verify that each pred has a legal terminator with a legal to/from EH
4469 // pad relationship.
4470 Instruction *ToPad = &I;
4471 Value *ToPadParent = getParentPad(ToPad);
4472 for (BasicBlock *PredBB : predecessors(BB)) {
4473 Instruction *TI = PredBB->getTerminator();
4474 Value *FromPad;
4475 if (auto *II = dyn_cast<InvokeInst>(TI)) {
4476 Check(II->getUnwindDest() == BB && II->getNormalDest() != BB,
4477 "EH pad must be jumped to via an unwind edge", ToPad, II);
4478 auto *CalledFn =
4479 dyn_cast<Function>(II->getCalledOperand()->stripPointerCasts());
4480 if (CalledFn && CalledFn->isIntrinsic() && II->doesNotThrow() &&
4481 !IntrinsicInst::mayLowerToFunctionCall(CalledFn->getIntrinsicID()))
4482 continue;
4483 if (auto Bundle = II->getOperandBundle(LLVMContext::OB_funclet))
4484 FromPad = Bundle->Inputs[0];
4485 else
4486 FromPad = ConstantTokenNone::get(II->getContext());
4487 } else if (auto *CRI = dyn_cast<CleanupReturnInst>(TI)) {
4488 FromPad = CRI->getOperand(0);
4489 Check(FromPad != ToPadParent, "A cleanupret must exit its cleanup", CRI);
4490 } else if (auto *CSI = dyn_cast<CatchSwitchInst>(TI)) {
4491 FromPad = CSI;
4492 } else {
4493 Check(false, "EH pad must be jumped to via an unwind edge", ToPad, TI);
4494 }
4495
4496 // The edge may exit from zero or more nested pads.
4498 for (;; FromPad = getParentPad(FromPad)) {
4499 Check(FromPad != ToPad,
4500 "EH pad cannot handle exceptions raised within it", FromPad, TI);
4501 if (FromPad == ToPadParent) {
4502 // This is a legal unwind edge.
4503 break;
4504 }
4505 Check(!isa<ConstantTokenNone>(FromPad),
4506 "A single unwind edge may only enter one EH pad", TI);
4507 Check(Seen.insert(FromPad).second, "EH pad jumps through a cycle of pads",
4508 FromPad);
4509
4510 // This will be diagnosed on the corresponding instruction already. We
4511 // need the extra check here to make sure getParentPad() works.
4512 Check(isa<FuncletPadInst>(FromPad) || isa<CatchSwitchInst>(FromPad),
4513 "Parent pad must be catchpad/cleanuppad/catchswitch", TI);
4514 }
4515 }
4516}
4517
4518void Verifier::visitLandingPadInst(LandingPadInst &LPI) {
4519 // The landingpad instruction is ill-formed if it doesn't have any clauses and
4520 // isn't a cleanup.
4521 Check(LPI.getNumClauses() > 0 || LPI.isCleanup(),
4522 "LandingPadInst needs at least one clause or to be a cleanup.", &LPI);
4523
4524 visitEHPadPredecessors(LPI);
4525
4526 if (!LandingPadResultTy)
4527 LandingPadResultTy = LPI.getType();
4528 else
4529 Check(LandingPadResultTy == LPI.getType(),
4530 "The landingpad instruction should have a consistent result type "
4531 "inside a function.",
4532 &LPI);
4533
4534 Function *F = LPI.getParent()->getParent();
4535 Check(F->hasPersonalityFn(),
4536 "LandingPadInst needs to be in a function with a personality.", &LPI);
4537
4538 // The landingpad instruction must be the first non-PHI instruction in the
4539 // block.
4540 Check(LPI.getParent()->getLandingPadInst() == &LPI,
4541 "LandingPadInst not the first non-PHI instruction in the block.", &LPI);
4542
4543 for (unsigned i = 0, e = LPI.getNumClauses(); i < e; ++i) {
4544 Constant *Clause = LPI.getClause(i);
4545 if (LPI.isCatch(i)) {
4546 Check(isa<PointerType>(Clause->getType()),
4547 "Catch operand does not have pointer type!", &LPI);
4548 } else {
4549 Check(LPI.isFilter(i), "Clause is neither catch nor filter!", &LPI);
4550 Check(isa<ConstantArray>(Clause) || isa<ConstantAggregateZero>(Clause),
4551 "Filter operand is not an array of constants!", &LPI);
4552 }
4553 }
4554
4555 visitInstruction(LPI);
4556}
4557
4558void Verifier::visitResumeInst(ResumeInst &RI) {
4560 "ResumeInst needs to be in a function with a personality.", &RI);
4561
4562 if (!LandingPadResultTy)
4563 LandingPadResultTy = RI.getValue()->getType();
4564 else
4565 Check(LandingPadResultTy == RI.getValue()->getType(),
4566 "The resume instruction should have a consistent result type "
4567 "inside a function.",
4568 &RI);
4569
4570 visitTerminator(RI);
4571}
4572
4573void Verifier::visitCatchPadInst(CatchPadInst &CPI) {
4574 BasicBlock *BB = CPI.getParent();
4575
4576 Function *F = BB->getParent();
4577 Check(F->hasPersonalityFn(),
4578 "CatchPadInst needs to be in a function with a personality.", &CPI);
4579
4580 Check(isa<CatchSwitchInst>(CPI.getParentPad()),
4581 "CatchPadInst needs to be directly nested in a CatchSwitchInst.",
4582 CPI.getParentPad());
4583
4584 // The catchpad instruction must be the first non-PHI instruction in the
4585 // block.
4586 Check(BB->getFirstNonPHI() == &CPI,
4587 "CatchPadInst not the first non-PHI instruction in the block.", &CPI);
4588
4589 visitEHPadPredecessors(CPI);
4591}
4592
4593void Verifier::visitCatchReturnInst(CatchReturnInst &CatchReturn) {
4594 Check(isa<CatchPadInst>(CatchReturn.getOperand(0)),
4595 "CatchReturnInst needs to be provided a CatchPad", &CatchReturn,
4596 CatchReturn.getOperand(0));
4597
4598 visitTerminator(CatchReturn);
4599}
4600
4601void Verifier::visitCleanupPadInst(CleanupPadInst &CPI) {
4602 BasicBlock *BB = CPI.getParent();
4603
4604 Function *F = BB->getParent();
4605 Check(F->hasPersonalityFn(),
4606 "CleanupPadInst needs to be in a function with a personality.", &CPI);
4607
4608 // The cleanuppad instruction must be the first non-PHI instruction in the
4609 // block.
4610 Check(BB->getFirstNonPHI() == &CPI,
4611 "CleanupPadInst not the first non-PHI instruction in the block.", &CPI);
4612
4613 auto *ParentPad = CPI.getParentPad();
4614 Check(isa<ConstantTokenNone>(ParentPad) || isa<FuncletPadInst>(ParentPad),
4615 "CleanupPadInst has an invalid parent.", &CPI);
4616
4617 visitEHPadPredecessors(CPI);
4619}
4620
4621void Verifier::visitFuncletPadInst(FuncletPadInst &FPI) {
4622 User *FirstUser = nullptr;
4623 Value *FirstUnwindPad = nullptr;
4624 SmallVector<FuncletPadInst *, 8> Worklist({&FPI});
4626
4627 while (!Worklist.empty()) {
4628 FuncletPadInst *CurrentPad = Worklist.pop_back_val();
4629 Check(Seen.insert(CurrentPad).second,
4630 "FuncletPadInst must not be nested within itself", CurrentPad);
4631 Value *UnresolvedAncestorPad = nullptr;
4632 for (User *U : CurrentPad->users()) {
4633 BasicBlock *UnwindDest;
4634 if (auto *CRI = dyn_cast<CleanupReturnInst>(U)) {
4635 UnwindDest = CRI->getUnwindDest();
4636 } else if (auto *CSI = dyn_cast<CatchSwitchInst>(U)) {
4637 // We allow catchswitch unwind to caller to nest
4638 // within an outer pad that unwinds somewhere else,
4639 // because catchswitch doesn't have a nounwind variant.
4640 // See e.g. SimplifyCFGOpt::SimplifyUnreachable.
4641 if (CSI->unwindsToCaller())
4642 continue;
4643 UnwindDest = CSI->getUnwindDest();
4644 } else if (auto *II = dyn_cast<InvokeInst>(U)) {
4645 UnwindDest = II->getUnwindDest();
4646 } else if (isa<CallInst>(U)) {
4647 // Calls which don't unwind may be found inside funclet
4648 // pads that unwind somewhere else. We don't *require*
4649 // such calls to be annotated nounwind.
4650 continue;
4651 } else if (auto *CPI = dyn_cast<CleanupPadInst>(U)) {
4652 // The unwind dest for a cleanup can only be found by
4653 // recursive search. Add it to the worklist, and we'll
4654 // search for its first use that determines where it unwinds.
4655 Worklist.push_back(CPI);
4656 continue;
4657 } else {
4658 Check(isa<CatchReturnInst>(U), "Bogus funclet pad use", U);
4659 continue;
4660 }
4661
4662 Value *UnwindPad;
4663 bool ExitsFPI;
4664 if (UnwindDest) {
4665 UnwindPad = UnwindDest->getFirstNonPHI();
4666 if (!cast<Instruction>(UnwindPad)->isEHPad())
4667 continue;
4668 Value *UnwindParent = getParentPad(UnwindPad);
4669 // Ignore unwind edges that don't exit CurrentPad.
4670 if (UnwindParent == CurrentPad)
4671 continue;
4672 // Determine whether the original funclet pad is exited,
4673 // and if we are scanning nested pads determine how many
4674 // of them are exited so we can stop searching their
4675 // children.
4676 Value *ExitedPad = CurrentPad;
4677 ExitsFPI = false;
4678 do {
4679 if (ExitedPad == &FPI) {
4680 ExitsFPI = true;
4681 // Now we can resolve any ancestors of CurrentPad up to
4682 // FPI, but not including FPI since we need to make sure
4683 // to check all direct users of FPI for consistency.
4684 UnresolvedAncestorPad = &FPI;
4685 break;
4686 }
4687 Value *ExitedParent = getParentPad(ExitedPad);
4688 if (ExitedParent == UnwindParent) {
4689 // ExitedPad is the ancestor-most pad which this unwind
4690 // edge exits, so we can resolve up to it, meaning that
4691 // ExitedParent is the first ancestor still unresolved.
4692 UnresolvedAncestorPad = ExitedParent;
4693 break;
4694 }
4695 ExitedPad = ExitedParent;
4696 } while (!isa<ConstantTokenNone>(ExitedPad));
4697 } else {
4698 // Unwinding to caller exits all pads.
4699 UnwindPad = ConstantTokenNone::get(FPI.getContext());
4700 ExitsFPI = true;
4701 UnresolvedAncestorPad = &FPI;
4702 }
4703
4704 if (ExitsFPI) {
4705 // This unwind edge exits FPI. Make sure it agrees with other
4706 // such edges.
4707 if (FirstUser) {
4708 Check(UnwindPad == FirstUnwindPad,
4709 "Unwind edges out of a funclet "
4710 "pad must have the same unwind "
4711 "dest",
4712 &FPI, U, FirstUser);
4713 } else {
4714 FirstUser = U;
4715 FirstUnwindPad = UnwindPad;
4716 // Record cleanup sibling unwinds for verifySiblingFuncletUnwinds
4717 if (isa<CleanupPadInst>(&FPI) && !isa<ConstantTokenNone>(UnwindPad) &&
4718 getParentPad(UnwindPad) == getParentPad(&FPI))
4719 SiblingFuncletInfo[&FPI] = cast<Instruction>(U);
4720 }
4721 }
4722 // Make sure we visit all uses of FPI, but for nested pads stop as
4723 // soon as we know where they unwind to.
4724 if (CurrentPad != &FPI)
4725 break;
4726 }
4727 if (UnresolvedAncestorPad) {
4728 if (CurrentPad == UnresolvedAncestorPad) {
4729 // When CurrentPad is FPI itself, we don't mark it as resolved even if
4730 // we've found an unwind edge that exits it, because we need to verify
4731 // all direct uses of FPI.
4732 assert(CurrentPad == &FPI);
4733 continue;
4734 }
4735 // Pop off the worklist any nested pads that we've found an unwind
4736 // destination for. The pads on the worklist are the uncles,
4737 // great-uncles, etc. of CurrentPad. We've found an unwind destination
4738 // for all ancestors of CurrentPad up to but not including
4739 // UnresolvedAncestorPad.
4740 Value *ResolvedPad = CurrentPad;
4741 while (!Worklist.empty()) {
4742 Value *UnclePad = Worklist.back();
4743 Value *AncestorPad = getParentPad(UnclePad);
4744 // Walk ResolvedPad up the ancestor list until we either find the
4745 // uncle's parent or the last resolved ancestor.
4746 while (ResolvedPad != AncestorPad) {
4747 Value *ResolvedParent = getParentPad(ResolvedPad);
4748 if (ResolvedParent == UnresolvedAncestorPad) {
4749 break;
4750 }
4751 ResolvedPad = ResolvedParent;
4752 }
4753 // If the resolved ancestor search didn't find the uncle's parent,
4754 // then the uncle is not yet resolved.
4755 if (ResolvedPad != AncestorPad)
4756 break;
4757 // This uncle is resolved, so pop it from the worklist.
4758 Worklist.pop_back();
4759 }
4760 }
4761 }
4762
4763 if (FirstUnwindPad) {
4764 if (auto *CatchSwitch = dyn_cast<CatchSwitchInst>(FPI.getParentPad())) {
4765 BasicBlock *SwitchUnwindDest = CatchSwitch->getUnwindDest();
4766 Value *SwitchUnwindPad;
4767 if (SwitchUnwindDest)
4768 SwitchUnwindPad = SwitchUnwindDest->getFirstNonPHI();
4769 else
4770 SwitchUnwindPad = ConstantTokenNone::get(FPI.getContext());
4771 Check(SwitchUnwindPad == FirstUnwindPad,
4772 "Unwind edges out of a catch must have the same unwind dest as "
4773 "the parent catchswitch",
4774 &FPI, FirstUser, CatchSwitch);
4775 }
4776 }
4777
4778 visitInstruction(FPI);
4779}
4780
4781void Verifier::visitCatchSwitchInst(CatchSwitchInst &CatchSwitch) {
4782 BasicBlock *BB = CatchSwitch.getParent();
4783
4784 Function *F = BB->getParent();
4785 Check(F->hasPersonalityFn(),
4786 "CatchSwitchInst needs to be in a function with a personality.",
4787 &CatchSwitch);
4788
4789 // The catchswitch instruction must be the first non-PHI instruction in the
4790 // block.
4791 Check(BB->getFirstNonPHI() == &CatchSwitch,
4792 "CatchSwitchInst not the first non-PHI instruction in the block.",
4793 &CatchSwitch);
4794
4795 auto *ParentPad = CatchSwitch.getParentPad();
4796 Check(isa<ConstantTokenNone>(ParentPad) || isa<FuncletPadInst>(ParentPad),
4797 "CatchSwitchInst has an invalid parent.", ParentPad);
4798
4799 if (BasicBlock *UnwindDest = CatchSwitch.getUnwindDest()) {
4800 Instruction *I = UnwindDest->getFirstNonPHI();
4801 Check(I->isEHPad() && !isa<LandingPadInst>(I),
4802 "CatchSwitchInst must unwind to an EH block which is not a "
4803 "landingpad.",
4804 &CatchSwitch);
4805
4806 // Record catchswitch sibling unwinds for verifySiblingFuncletUnwinds
4807 if (getParentPad(I) == ParentPad)
4808 SiblingFuncletInfo[&CatchSwitch] = &CatchSwitch;
4809 }
4810
4811 Check(CatchSwitch.getNumHandlers() != 0,
4812 "CatchSwitchInst cannot have empty handler list", &CatchSwitch);
4813
4814 for (BasicBlock *Handler : CatchSwitch.handlers()) {
4815 Check(isa<CatchPadInst>(Handler->getFirstNonPHI()),
4816 "CatchSwitchInst handlers must be catchpads", &CatchSwitch, Handler);
4817 }
4818
4819 visitEHPadPredecessors(CatchSwitch);
4820 visitTerminator(CatchSwitch);
4821}
4822
4823void Verifier::visitCleanupReturnInst(CleanupReturnInst &CRI) {
4824 Check(isa<CleanupPadInst>(CRI.getOperand(0)),
4825 "CleanupReturnInst needs to be provided a CleanupPad", &CRI,
4826 CRI.getOperand(0));
4827
4828 if (BasicBlock *UnwindDest = CRI.getUnwindDest()) {
4829 Instruction *I = UnwindDest->getFirstNonPHI();
4830 Check(I->isEHPad() && !isa<LandingPadInst>(I),
4831 "CleanupReturnInst must unwind to an EH block which is not a "
4832 "landingpad.",
4833 &CRI);
4834 }
4835
4836 visitTerminator(CRI);
4837}
4838
4839void Verifier::verifyDominatesUse(Instruction &I, unsigned i) {
4840 Instruction *Op = cast<Instruction>(I.getOperand(i));
4841 // If the we have an invalid invoke, don't try to compute the dominance.
4842 // We already reject it in the invoke specific checks and the dominance
4843 // computation doesn't handle multiple edges.
4844 if (InvokeInst *II = dyn_cast<InvokeInst>(Op)) {
4845 if (II->getNormalDest() == II->getUnwindDest())
4846 return;
4847 }
4848
4849 // Quick check whether the def has already been encountered in the same block.
4850 // PHI nodes are not checked to prevent accepting preceding PHIs, because PHI
4851 // uses are defined to happen on the incoming edge, not at the instruction.
4852 //
4853 // FIXME: If this operand is a MetadataAsValue (wrapping a LocalAsMetadata)
4854 // wrapping an SSA value, assert that we've already encountered it. See
4855 // related FIXME in Mapper::mapLocalAsMetadata in ValueMapper.cpp.
4856 if (!isa<PHINode>(I) && InstsInThisBlock.count(Op))
4857 return;
4858
4859 const Use &U = I.getOperandUse(i);
4860 Check(DT.dominates(Op, U), "Instruction does not dominate all uses!", Op, &I);
4861}
4862
4863void Verifier::visitDereferenceableMetadata(Instruction& I, MDNode* MD) {
4864 Check(I.getType()->isPointerTy(),
4865 "dereferenceable, dereferenceable_or_null "
4866 "apply only to pointer types",
4867 &I);
4868 Check((isa<LoadInst>(I) || isa<IntToPtrInst>(I)),
4869 "dereferenceable, dereferenceable_or_null apply only to load"
4870 " and inttoptr instructions, use attributes for calls or invokes",
4871 &I);
4872 Check(MD->getNumOperands() == 1,
4873 "dereferenceable, dereferenceable_or_null "
4874 "take one operand!",
4875 &I);
4876 ConstantInt *CI = mdconst::dyn_extract<ConstantInt>(MD->getOperand(0));
4877 Check(CI && CI->getType()->isIntegerTy(64),
4878 "dereferenceable, "
4879 "dereferenceable_or_null metadata value must be an i64!",
4880 &I);
4881}
4882
4883void Verifier::visitProfMetadata(Instruction &I, MDNode *MD) {
4884 Check(MD->getNumOperands() >= 2,
4885 "!prof annotations should have no less than 2 operands", MD);
4886
4887 // Check first operand.
4888 Check(MD->getOperand(0) != nullptr, "first operand should not be null", MD);
4889 Check(isa<MDString>(MD->getOperand(0)),
4890 "expected string with name of the !prof annotation", MD);
4891 MDString *MDS = cast<MDString>(MD->getOperand(0));
4892 StringRef ProfName = MDS->getString();
4893
4894 // Check consistency of !prof branch_weights metadata.
4895 if (ProfName == "branch_weights") {
4896 unsigned NumBranchWeights = getNumBranchWeights(*MD);
4897 if (isa<InvokeInst>(&I)) {
4898 Check(NumBranchWeights == 1 || NumBranchWeights == 2,
4899 "Wrong number of InvokeInst branch_weights operands", MD);
4900 } else {
4901 unsigned ExpectedNumOperands = 0;
4902 if (BranchInst *BI = dyn_cast<BranchInst>(&I))
4903 ExpectedNumOperands = BI->getNumSuccessors();
4904 else if (SwitchInst *SI = dyn_cast<SwitchInst>(&I))
4905 ExpectedNumOperands = SI->getNumSuccessors();
4906 else if (isa<CallInst>(&I))
4907 ExpectedNumOperands = 1;
4908 else if (IndirectBrInst *IBI = dyn_cast<IndirectBrInst>(&I))
4909 ExpectedNumOperands = IBI->getNumDestinations();
4910 else if (isa<SelectInst>(&I))
4911 ExpectedNumOperands = 2;
4912 else if (CallBrInst *CI = dyn_cast<CallBrInst>(&I))
4913 ExpectedNumOperands = CI->getNumSuccessors();
4914 else
4915 CheckFailed("!prof branch_weights are not allowed for this instruction",
4916 MD);
4917
4918 Check(NumBranchWeights == ExpectedNumOperands, "Wrong number of operands",
4919 MD);
4920 }
4921 for (unsigned i = getBranchWeightOffset(MD); i < MD->getNumOperands();
4922 ++i) {
4923 auto &MDO = MD->getOperand(i);
4924 Check(MDO, "second operand should not be null", MD);
4925 Check(mdconst::dyn_extract<ConstantInt>(MDO),
4926 "!prof brunch_weights operand is not a const int");
4927 }
4928 }
4929}
4930
4931void Verifier::visitDIAssignIDMetadata(Instruction &I, MDNode *MD) {
4932 assert(I.hasMetadata(LLVMContext::MD_DIAssignID));
4933 bool ExpectedInstTy =
4934 isa<AllocaInst>(I) || isa<StoreInst>(I) || isa<MemIntrinsic>(I);
4935 CheckDI(ExpectedInstTy, "!DIAssignID attached to unexpected instruction kind",
4936 I, MD);
4937 // Iterate over the MetadataAsValue uses of the DIAssignID - these should
4938 // only be found as DbgAssignIntrinsic operands.
4939 if (auto *AsValue = MetadataAsValue::getIfExists(Context, MD)) {
4940 for (auto *User : AsValue->users()) {
4941 CheckDI(isa<DbgAssignIntrinsic>(User),
4942 "!DIAssignID should only be used by llvm.dbg.assign intrinsics",
4943 MD, User);
4944 // All of the dbg.assign intrinsics should be in the same function as I.
4945 if (auto *DAI = dyn_cast<DbgAssignIntrinsic>(User))
4946 CheckDI(DAI->getFunction() == I.getFunction(),
4947 "dbg.assign not in same function as inst", DAI, &I);
4948 }
4949 }
4950 for (DbgVariableRecord *DVR :
4951 cast<DIAssignID>(MD)->getAllDbgVariableRecordUsers()) {
4952 CheckDI(DVR->isDbgAssign(),
4953 "!DIAssignID should only be used by Assign DVRs.", MD, DVR);
4954 CheckDI(DVR->getFunction() == I.getFunction(),
4955 "DVRAssign not in same function as inst", DVR, &I);
4956 }
4957}
4958
4959void Verifier::visitMMRAMetadata(Instruction &I, MDNode *MD) {
4961 "!mmra metadata attached to unexpected instruction kind", I, MD);
4962
4963 // MMRA Metadata should either be a tag, e.g. !{!"foo", !"bar"}, or a
4964 // list of tags such as !2 in the following example:
4965 // !0 = !{!"a", !"b"}
4966 // !1 = !{!"c", !"d"}
4967 // !2 = !{!0, !1}
4968 if (MMRAMetadata::isTagMD(MD))
4969 return;
4970
4971 Check(isa<MDTuple>(MD), "!mmra expected to be a metadata tuple", I, MD);
4972 for (const MDOperand &MDOp : MD->operands())
4973 Check(MMRAMetadata::isTagMD(MDOp.get()),
4974 "!mmra metadata tuple operand is not an MMRA tag", I, MDOp.get());
4975}
4976
4977void Verifier::visitCallStackMetadata(MDNode *MD) {
4978 // Call stack metadata should consist of a list of at least 1 constant int
4979 // (representing a hash of the location).
4980 Check(MD->getNumOperands() >= 1,
4981 "call stack metadata should have at least 1 operand", MD);
4982
4983 for (const auto &Op : MD->operands())
4984 Check(mdconst::dyn_extract_or_null<ConstantInt>(Op),
4985 "call stack metadata operand should be constant integer", Op);
4986}
4987
4988void Verifier::visitMemProfMetadata(Instruction &I, MDNode *MD) {
4989 Check(isa<CallBase>(I), "!memprof metadata should only exist on calls", &I);
4990 Check(MD->getNumOperands() >= 1,
4991 "!memprof annotations should have at least 1 metadata operand "
4992 "(MemInfoBlock)",
4993 MD);
4994
4995 // Check each MIB
4996 for (auto &MIBOp : MD->operands()) {
4997 MDNode *MIB = dyn_cast<MDNode>(MIBOp);
4998 // The first operand of an MIB should be the call stack metadata.
4999 // There rest of the operands should be MDString tags, and there should be
5000 // at least one.
5001 Check(MIB->getNumOperands() >= 2,
5002 "Each !memprof MemInfoBlock should have at least 2 operands", MIB);
5003
5004 // Check call stack metadata (first operand).
5005 Check(MIB->getOperand(0) != nullptr,
5006 "!memprof MemInfoBlock first operand should not be null", MIB);
5007 Check(isa<MDNode>(MIB->getOperand(0)),
5008 "!memprof MemInfoBlock first operand should be an MDNode", MIB);
5009 MDNode *StackMD = dyn_cast<MDNode>(MIB->getOperand(0));
5010 visitCallStackMetadata(StackMD);
5011
5012 // The next set of 1 or more operands should be MDString.
5013 unsigned I = 1;
5014 for (; I < MIB->getNumOperands(); ++I) {
5015 if (!isa<MDString>(MIB->getOperand(I))) {
5016 Check(I > 1,
5017 "!memprof MemInfoBlock second operand should be an MDString",
5018 MIB);
5019 break;
5020 }
5021 }
5022
5023 // Any remaining should be MDNode that are pairs of integers
5024 for (; I < MIB->getNumOperands(); ++I) {
5025 MDNode *OpNode = dyn_cast<MDNode>(MIB->getOperand(I));
5026 Check(OpNode, "Not all !memprof MemInfoBlock operands 2 to N are MDNode",
5027 MIB);
5028 Check(OpNode->getNumOperands() == 2,
5029 "Not all !memprof MemInfoBlock operands 2 to N are MDNode with 2 "
5030 "operands",
5031 MIB);
5032 // Check that all of Op's operands are ConstantInt.
5033 Check(llvm::all_of(OpNode->operands(),
5034 [](const MDOperand &Op) {
5035 return mdconst::hasa<ConstantInt>(Op);
5036 }),
5037 "Not all !memprof MemInfoBlock operands 2 to N are MDNode with "
5038 "ConstantInt operands",
5039 MIB);
5040 }
5041 }
5042}
5043
5044void Verifier::visitCallsiteMetadata(Instruction &I, MDNode *MD) {
5045 Check(isa<CallBase>(I), "!callsite metadata should only exist on calls", &I);
5046 // Verify the partial callstack annotated from memprof profiles. This callsite
5047 // is a part of a profiled allocation callstack.
5048 visitCallStackMetadata(MD);
5049}
5050
5051void Verifier::visitAnnotationMetadata(MDNode *Annotation) {
5052 Check(isa<MDTuple>(Annotation), "annotation must be a tuple");
5053 Check(Annotation->getNumOperands() >= 1,
5054 "annotation must have at least one operand");
5055 for (const MDOperand &Op : Annotation->operands()) {
5056 bool TupleOfStrings =
5057 isa<MDTuple>(Op.get()) &&
5058 all_of(cast<MDTuple>(Op)->operands(), [](auto &Annotation) {
5059 return isa<MDString>(Annotation.get());
5060 });
5061 Check(isa<MDString>(Op.get()) || TupleOfStrings,
5062 "operands must be a string or a tuple of strings");
5063 }
5064}
5065
5066void Verifier::visitAliasScopeMetadata(const MDNode *MD) {
5067 unsigned NumOps = MD->getNumOperands();
5068 Check(NumOps >= 2 && NumOps <= 3, "scope must have two or three operands",
5069 MD);
5070 Check(MD->getOperand(0).get() == MD || isa<MDString>(MD->getOperand(0)),
5071 "first scope operand must be self-referential or string", MD);
5072 if (NumOps == 3)
5073 Check(isa<MDString>(MD->getOperand(2)),
5074 "third scope operand must be string (if used)", MD);
5075
5076 MDNode *Domain = dyn_cast<MDNode>(MD->getOperand(1));
5077 Check(Domain != nullptr, "second scope operand must be MDNode", MD);
5078
5079 unsigned NumDomainOps = Domain->getNumOperands();
5080 Check(NumDomainOps >= 1 && NumDomainOps <= 2,
5081 "domain must have one or two operands", Domain);
5082 Check(Domain->getOperand(0).get() == Domain ||
5083 isa<MDString>(Domain->getOperand(0)),
5084 "first domain operand must be self-referential or string", Domain);
5085 if (NumDomainOps == 2)
5086 Check(isa<MDString>(Domain->getOperand(1)),
5087 "second domain operand must be string (if used)", Domain);
5088}
5089
5090void Verifier::visitAliasScopeListMetadata(const MDNode *MD) {
5091 for (const MDOperand &Op : MD->operands()) {
5092 const MDNode *OpMD = dyn_cast<MDNode>(Op);
5093 Check(OpMD != nullptr, "scope list must consist of MDNodes", MD);
5094 visitAliasScopeMetadata(OpMD);
5095 }
5096}
5097
5098void Verifier::visitAccessGroupMetadata(const MDNode *MD) {
5099 auto IsValidAccessScope = [](const MDNode *MD) {
5100 return MD->getNumOperands() == 0 && MD->isDistinct();
5101 };
5102
5103 // It must be either an access scope itself...
5104 if (IsValidAccessScope(MD))
5105 return;
5106
5107 // ...or a list of access scopes.
5108 for (const MDOperand &Op : MD->operands()) {
5109 const MDNode *OpMD = dyn_cast<MDNode>(Op);
5110 Check(OpMD != nullptr, "Access scope list must consist of MDNodes", MD);
5111 Check(IsValidAccessScope(OpMD),
5112 "Access scope list contains invalid access scope", MD);
5113 }
5114}
5115
5116/// verifyInstruction - Verify that an instruction is well formed.
5117///
5118void Verifier::visitInstruction(Instruction &I) {
5119 BasicBlock *BB = I.getParent();
5120 Check(BB, "Instruction not embedded in basic block!", &I);
5121
5122 if (!isa<PHINode>(I)) { // Check that non-phi nodes are not self referential
5123 for (User *U : I.users()) {
5124 Check(U != (User *)&I || !DT.isReachableFromEntry(BB),
5125 "Only PHI nodes may reference their own value!", &I);
5126 }
5127 }
5128
5129 // Check that void typed values don't have names
5130 Check(!I.getType()->isVoidTy() || !I.hasName(),
5131 "Instruction has a name, but provides a void value!", &I);
5132
5133 // Check that the return value of the instruction is either void or a legal
5134 // value type.
5135 Check(I.getType()->isVoidTy() || I.getType()->isFirstClassType(),
5136 "Instruction returns a non-scalar type!", &I);
5137
5138 // Check that the instruction doesn't produce metadata. Calls are already
5139 // checked against the callee type.
5140 Check(!I.getType()->isMetadataTy() || isa<CallInst>(I) || isa<InvokeInst>(I),
5141 "Invalid use of metadata!", &I);
5142
5143 // Check that all uses of the instruction, if they are instructions
5144 // themselves, actually have parent basic blocks. If the use is not an
5145 // instruction, it is an error!
5146 for (Use &U : I.uses()) {
5147 if (Instruction *Used = dyn_cast<Instruction>(U.getUser()))
5148 Check(Used->getParent() != nullptr,
5149 "Instruction referencing"
5150 " instruction not embedded in a basic block!",
5151 &I, Used);
5152 else {
5153 CheckFailed("Use of instruction is not an instruction!", U);
5154 return;
5155 }
5156 }
5157
5158 // Get a pointer to the call base of the instruction if it is some form of
5159 // call.
5160 const CallBase *CBI = dyn_cast<CallBase>(&I);
5161
5162 for (unsigned i = 0, e = I.getNumOperands(); i != e; ++i) {
5163 Check(I.getOperand(i) != nullptr, "Instruction has null operand!", &I);
5164
5165 // Check to make sure that only first-class-values are operands to
5166 // instructions.
5167 if (!I.getOperand(i)->getType()->isFirstClassType()) {
5168 Check(false, "Instruction operands must be first-class values!", &I);
5169 }
5170
5171 if (Function *F = dyn_cast<Function>(I.getOperand(i))) {
5172 // This code checks whether the function is used as the operand of a
5173 // clang_arc_attachedcall operand bundle.
5174 auto IsAttachedCallOperand = [](Function *F, const CallBase *CBI,
5175 int Idx) {
5176 return CBI && CBI->isOperandBundleOfType(
5178 };
5179
5180 // Check to make sure that the "address of" an intrinsic function is never
5181 // taken. Ignore cases where the address of the intrinsic function is used
5182 // as the argument of operand bundle "clang.arc.attachedcall" as those
5183 // cases are handled in verifyAttachedCallBundle.
5184 Check((!F->isIntrinsic() ||
5185 (CBI && &CBI->getCalledOperandUse() == &I.getOperandUse(i)) ||
5186 IsAttachedCallOperand(F, CBI, i)),
5187 "Cannot take the address of an intrinsic!", &I);
5188 Check(!F->isIntrinsic() || isa<CallInst>(I) ||
5189 F->getIntrinsicID() == Intrinsic::donothing ||
5190 F->getIntrinsicID() == Intrinsic::seh_try_begin ||
5191 F->getIntrinsicID() == Intrinsic::seh_try_end ||
5192 F->getIntrinsicID() == Intrinsic::seh_scope_begin ||
5193 F->getIntrinsicID() == Intrinsic::seh_scope_end ||
5194 F->getIntrinsicID() == Intrinsic::coro_resume ||
5195 F->getIntrinsicID() == Intrinsic::coro_destroy ||
5196 F->getIntrinsicID() == Intrinsic::coro_await_suspend_void ||
5197 F->getIntrinsicID() == Intrinsic::coro_await_suspend_bool ||
5198 F->getIntrinsicID() == Intrinsic::coro_await_suspend_handle ||
5199 F->getIntrinsicID() ==
5200 Intrinsic::experimental_patchpoint_void ||
5201 F->getIntrinsicID() == Intrinsic::experimental_patchpoint ||
5202 F->getIntrinsicID() == Intrinsic::fake_use ||
5203 F->getIntrinsicID() == Intrinsic::experimental_gc_statepoint ||
5204 F->getIntrinsicID() == Intrinsic::wasm_rethrow ||
5205 IsAttachedCallOperand(F, CBI, i),
5206 "Cannot invoke an intrinsic other than donothing, patchpoint, "
5207 "statepoint, coro_resume, coro_destroy or clang.arc.attachedcall",
5208 &I);
5209 Check(F->getParent() == &M, "Referencing function in another module!", &I,
5210 &M, F, F->getParent());
5211 } else if (BasicBlock *OpBB = dyn_cast<BasicBlock>(I.getOperand(i))) {
5212 Check(OpBB->getParent() == BB->getParent(),
5213 "Referring to a basic block in another function!", &I);
5214 } else if (Argument *OpArg = dyn_cast<Argument>(I.getOperand(i))) {
5215 Check(OpArg->getParent() == BB->getParent(),
5216 "Referring to an argument in another function!", &I);
5217 } else if (GlobalValue *GV = dyn_cast<GlobalValue>(I.getOperand(i))) {
5218 Check(GV->getParent() == &M, "Referencing global in another module!", &I,
5219 &M, GV, GV->getParent());
5220 } else if (Instruction *OpInst = dyn_cast<Instruction>(I.getOperand(i))) {
5221 Check(OpInst->getFunction() == BB->getParent(),
5222 "Referring to an instruction in another function!", &I);
5223 verifyDominatesUse(I, i);
5224 } else if (isa<InlineAsm>(I.getOperand(i))) {
5225 Check(CBI && &CBI->getCalledOperandUse() == &I.getOperandUse(i),
5226 "Cannot take the address of an inline asm!", &I);
5227 } else if (auto *CPA = dyn_cast<ConstantPtrAuth>(I.getOperand(i))) {
5228 visitConstantExprsRecursively(CPA);
5229 } else if (ConstantExpr *CE = dyn_cast<ConstantExpr>(I.getOperand(i))) {
5230 if (CE->getType()->isPtrOrPtrVectorTy()) {
5231 // If we have a ConstantExpr pointer, we need to see if it came from an
5232 // illegal bitcast.
5233 visitConstantExprsRecursively(CE);
5234 }
5235 }
5236 }
5237
5238 if (MDNode *MD = I.getMetadata(LLVMContext::MD_fpmath)) {
5239 Check(I.getType()->isFPOrFPVectorTy(),
5240 "fpmath requires a floating point result!", &I);
5241 Check(MD->getNumOperands() == 1, "fpmath takes one operand!", &I);
5242 if (ConstantFP *CFP0 =
5243 mdconst::dyn_extract_or_null<ConstantFP>(MD->getOperand(0))) {
5244 const APFloat &Accuracy = CFP0->getValueAPF();
5245 Check(&Accuracy.getSemantics() == &APFloat::IEEEsingle(),
5246 "fpmath accuracy must have float type", &I);
5247 Check(Accuracy.isFiniteNonZero() && !Accuracy.isNegative(),
5248 "fpmath accuracy not a positive number!", &I);
5249 } else {
5250 Check(false, "invalid fpmath accuracy!", &I);
5251 }
5252 }
5253
5254 if (MDNode *Range = I.getMetadata(LLVMContext::MD_range)) {
5255 Check(isa<LoadInst>(I) || isa<CallInst>(I) || isa<InvokeInst>(I),
5256 "Ranges are only for loads, calls and invokes!", &I);
5257 visitRangeMetadata(I, Range, I.getType());
5258 }
5259
5260 if (MDNode *Range = I.getMetadata(LLVMContext::MD_noalias_addrspace)) {
5261 Check(isa<LoadInst>(I) || isa<StoreInst>(I) || isa<AtomicRMWInst>(I) ||
5262 isa<AtomicCmpXchgInst>(I) || isa<CallInst>(I),
5263 "noalias.addrspace are only for memory operations!", &I);
5264 visitNoaliasAddrspaceMetadata(I, Range, I.getType());
5265 }
5266
5267 if (I.hasMetadata(LLVMContext::MD_invariant_group)) {
5268 Check(isa<LoadInst>(I) || isa<StoreInst>(I),
5269 "invariant.group metadata is only for loads and stores", &I);
5270 }
5271
5272 if (MDNode *MD = I.getMetadata(LLVMContext::MD_nonnull)) {
5273 Check(I.getType()->isPointerTy(), "nonnull applies only to pointer types",
5274 &I);
5275 Check(isa<LoadInst>(I),
5276 "nonnull applies only to load instructions, use attributes"
5277 " for calls or invokes",
5278 &I);
5279 Check(MD->getNumOperands() == 0, "nonnull metadata must be empty", &I);
5280 }
5281
5282 if (MDNode *MD = I.getMetadata(LLVMContext::MD_dereferenceable))
5283 visitDereferenceableMetadata(I, MD);
5284
5285 if (MDNode *MD = I.getMetadata(LLVMContext::MD_dereferenceable_or_null))
5286 visitDereferenceableMetadata(I, MD);
5287
5288 if (MDNode *TBAA = I.getMetadata(LLVMContext::MD_tbaa))
5289 TBAAVerifyHelper.visitTBAAMetadata(I, TBAA);
5290
5291 if (MDNode *MD = I.getMetadata(LLVMContext::MD_noalias))
5292 visitAliasScopeListMetadata(MD);
5293 if (MDNode *MD = I.getMetadata(LLVMContext::MD_alias_scope))
5294 visitAliasScopeListMetadata(MD);
5295
5296 if (MDNode *MD = I.getMetadata(LLVMContext::MD_access_group))
5297 visitAccessGroupMetadata(MD);
5298
5299 if (MDNode *AlignMD = I.getMetadata(LLVMContext::MD_align)) {
5300 Check(I.getType()->isPointerTy(), "align applies only to pointer types",
5301 &I);
5302 Check(isa<LoadInst>(I),
5303 "align applies only to load instructions, "
5304 "use attributes for calls or invokes",
5305 &I);
5306 Check(AlignMD->getNumOperands() == 1, "align takes one operand!", &I);
5307 ConstantInt *CI = mdconst::dyn_extract<ConstantInt>(AlignMD->getOperand(0));
5308 Check(CI && CI->getType()->isIntegerTy(64),
5309 "align metadata value must be an i64!", &I);
5310 uint64_t Align = CI->getZExtValue();
5311 Check(isPowerOf2_64(Align), "align metadata value must be a power of 2!",
5312 &I);
5314 "alignment is larger that implementation defined limit", &I);
5315 }
5316
5317 if (MDNode *MD = I.getMetadata(LLVMContext::MD_prof))
5318 visitProfMetadata(I, MD);
5319
5320 if (MDNode *MD = I.getMetadata(LLVMContext::MD_memprof))
5321 visitMemProfMetadata(I, MD);
5322
5323 if (MDNode *MD = I.getMetadata(LLVMContext::MD_callsite))
5324 visitCallsiteMetadata(I, MD);
5325
5326 if (MDNode *MD = I.getMetadata(LLVMContext::MD_DIAssignID))
5327 visitDIAssignIDMetadata(I, MD);
5328
5329 if (MDNode *MMRA = I.getMetadata(LLVMContext::MD_mmra))
5330 visitMMRAMetadata(I, MMRA);
5331
5332 if (MDNode *Annotation = I.getMetadata(LLVMContext::MD_annotation))
5333 visitAnnotationMetadata(Annotation);
5334
5335 if (MDNode *N = I.getDebugLoc().getAsMDNode()) {
5336 CheckDI(isa<DILocation>(N), "invalid !dbg metadata attachment", &I, N);
5337 visitMDNode(*N, AreDebugLocsAllowed::Yes);
5338 }
5339
5340 if (auto *DII = dyn_cast<DbgVariableIntrinsic>(&I)) {
5341 verifyFragmentExpression(*DII);
5342 verifyNotEntryValue(*DII);
5343 }
5344
5346 I.getAllMetadata(MDs);
5347 for (auto Attachment : MDs) {
5348 unsigned Kind = Attachment.first;
5349 auto AllowLocs =
5350 (Kind == LLVMContext::MD_dbg || Kind == LLVMContext::MD_loop)
5351 ? AreDebugLocsAllowed::Yes
5352 : AreDebugLocsAllowed::No;
5353 visitMDNode(*Attachment.second, AllowLocs);
5354 }
5355
5356 InstsInThisBlock.insert(&I);
5357}
5358
5359/// Allow intrinsics to be verified in different ways.
5360void Verifier::visitIntrinsicCall(Intrinsic::ID ID, CallBase &Call) {
5361 Function *IF = Call.getCalledFunction();
5362 Check(IF->isDeclaration(), "Intrinsic functions should never be defined!",
5363 IF);
5364
5365 // Verify that the intrinsic prototype lines up with what the .td files
5366 // describe.
5367 FunctionType *IFTy = IF->getFunctionType();
5368 bool IsVarArg = IFTy->isVarArg();
5369
5373
5374 // Walk the descriptors to extract overloaded types.
5379 "Intrinsic has incorrect return type!", IF);
5381 "Intrinsic has incorrect argument type!", IF);
5382
5383 // Verify if the intrinsic call matches the vararg property.
5384 if (IsVarArg)
5386 "Intrinsic was not defined with variable arguments!", IF);
5387 else
5389 "Callsite was not defined with variable arguments!", IF);
5390
5391 // All descriptors should be absorbed by now.
5392 Check(TableRef.empty(), "Intrinsic has too few arguments!", IF);
5393
5394 // Now that we have the intrinsic ID and the actual argument types (and we
5395 // know they are legal for the intrinsic!) get the intrinsic name through the
5396 // usual means. This allows us to verify the mangling of argument types into
5397 // the name.
5398 const std::string ExpectedName =
5399 Intrinsic::getName(ID, ArgTys, IF->getParent(), IFTy);
5400 Check(ExpectedName == IF->getName(),
5401 "Intrinsic name not mangled correctly for type arguments! "
5402 "Should be: " +
5403 ExpectedName,
5404 IF);
5405
5406 // If the intrinsic takes MDNode arguments, verify that they are either global
5407 // or are local to *this* function.
5408 for (Value *V : Call.args()) {
5409 if (auto *MD = dyn_cast<MetadataAsValue>(V))
5410 visitMetadataAsValue(*MD, Call.getCaller());
5411 if (auto *Const = dyn_cast<Constant>(V))
5412 Check(!Const->getType()->isX86_AMXTy(),
5413 "const x86_amx is not allowed in argument!");
5414 }
5415
5416 switch (ID) {
5417 default:
5418 break;
5419 case Intrinsic::assume: {
5420 for (auto &Elem : Call.bundle_op_infos()) {
5421 unsigned ArgCount = Elem.End - Elem.Begin;
5422 // Separate storage assumptions are special insofar as they're the only
5423 // operand bundles allowed on assumes that aren't parameter attributes.
5424 if (Elem.Tag->getKey() == "separate_storage") {
5425 Check(ArgCount == 2,
5426 "separate_storage assumptions should have 2 arguments", Call);
5427 Check(Call.getOperand(Elem.Begin)->getType()->isPointerTy() &&
5428 Call.getOperand(Elem.Begin + 1)->getType()->isPointerTy(),
5429 "arguments to separate_storage assumptions should be pointers",
5430 Call);
5431 return;
5432 }
5433 Check(Elem.Tag->getKey() == "ignore" ||
5434 Attribute::isExistingAttribute(Elem.Tag->getKey()),
5435 "tags must be valid attribute names", Call);
5437 Attribute::getAttrKindFromName(Elem.Tag->getKey());
5438 if (Kind == Attribute::Alignment) {
5439 Check(ArgCount <= 3 && ArgCount >= 2,
5440 "alignment assumptions should have 2 or 3 arguments", Call);
5441 Check(Call.getOperand(Elem.Begin)->getType()->isPointerTy(),
5442 "first argument should be a pointer", Call);
5443 Check(Call.getOperand(Elem.Begin + 1)->getType()->isIntegerTy(),
5444 "second argument should be an integer", Call);
5445 if (ArgCount == 3)
5446 Check(Call.getOperand(Elem.Begin + 2)->getType()->isIntegerTy(),
5447 "third argument should be an integer if present", Call);
5448 return;
5449 }
5450 Check(ArgCount <= 2, "too many arguments", Call);
5451 if (Kind == Attribute::None)
5452 break;
5453 if (Attribute::isIntAttrKind(Kind)) {
5454 Check(ArgCount == 2, "this attribute should have 2 arguments", Call);
5455 Check(isa<ConstantInt>(Call.getOperand(Elem.Begin + 1)),
5456 "the second argument should be a constant integral value", Call);
5457 } else if (Attribute::canUseAsParamAttr(Kind)) {
5458 Check((ArgCount) == 1, "this attribute should have one argument", Call);
5459 } else if (Attribute::canUseAsFnAttr(Kind)) {
5460 Check((ArgCount) == 0, "this attribute has no argument", Call);
5461 }
5462 }
5463 break;
5464 }
5465 case Intrinsic::ucmp:
5466 case Intrinsic::scmp: {
5467 Type *SrcTy = Call.getOperand(0)->getType();
5468 Type *DestTy = Call.getType();
5469
5470 Check(DestTy->getScalarSizeInBits() >= 2,
5471 "result type must be at least 2 bits wide", Call);
5472
5473 bool IsDestTypeVector = DestTy->isVectorTy();
5474 Check(SrcTy->isVectorTy() == IsDestTypeVector,
5475 "ucmp/scmp argument and result types must both be either vector or "
5476 "scalar types",
5477 Call);
5478 if (IsDestTypeVector) {
5479 auto SrcVecLen = cast<VectorType>(SrcTy)->getElementCount();
5480 auto DestVecLen = cast<VectorType>(DestTy)->getElementCount();
5481 Check(SrcVecLen == DestVecLen,
5482 "return type and arguments must have the same number of "
5483 "elements",
5484 Call);
5485 }
5486 break;
5487 }
5488 case Intrinsic::coro_id: {
5489 auto *InfoArg = Call.getArgOperand(3)->stripPointerCasts();
5490 if (isa<ConstantPointerNull>(InfoArg))
5491 break;
5492 auto *GV = dyn_cast<GlobalVariable>(InfoArg);
5493 Check(GV && GV->isConstant() && GV->hasDefinitiveInitializer(),
5494 "info argument of llvm.coro.id must refer to an initialized "
5495 "constant");
5496 Constant *Init = GV->getInitializer();
5497 Check(isa<ConstantStruct>(Init) || isa<ConstantArray>(Init),
5498 "info argument of llvm.coro.id must refer to either a struct or "
5499 "an array");
5500 break;
5501 }
5502 case Intrinsic::is_fpclass: {
5503 const ConstantInt *TestMask = cast<ConstantInt>(Call.getOperand(1));
5504 Check((TestMask->getZExtValue() & ~static_cast<unsigned>(fcAllFlags)) == 0,
5505 "unsupported bits for llvm.is.fpclass test mask");
5506 break;
5507 }
5508 case Intrinsic::fptrunc_round: {
5509 // Check the rounding mode
5510 Metadata *MD = nullptr;
5511 auto *MAV = dyn_cast<MetadataAsValue>(Call.getOperand(1));
5512 if (MAV)
5513 MD = MAV->getMetadata();
5514
5515 Check(MD != nullptr, "missing rounding mode argument", Call);
5516
5517 Check(isa<MDString>(MD),
5518 ("invalid value for llvm.fptrunc.round metadata operand"
5519 " (the operand should be a string)"),
5520 MD);
5521
5522 std::optional<RoundingMode> RoundMode =
5523 convertStrToRoundingMode(cast<MDString>(MD)->getString());
5524 Check(RoundMode && *RoundMode != RoundingMode::Dynamic,
5525 "unsupported rounding mode argument", Call);
5526 break;
5527 }
5528#define BEGIN_REGISTER_VP_INTRINSIC(VPID, ...) case Intrinsic::VPID:
5529#include "llvm/IR/VPIntrinsics.def"
5530#undef BEGIN_REGISTER_VP_INTRINSIC
5531 visitVPIntrinsic(cast<VPIntrinsic>(Call));
5532 break;
5533#define INSTRUCTION(NAME, NARGS, ROUND_MODE, INTRINSIC) \
5534 case Intrinsic::INTRINSIC:
5535#include "llvm/IR/ConstrainedOps.def"
5536#undef INSTRUCTION
5537 visitConstrainedFPIntrinsic(cast<ConstrainedFPIntrinsic>(Call));
5538 break;
5539 case Intrinsic::dbg_declare: // llvm.dbg.declare
5540 Check(isa<MetadataAsValue>(Call.getArgOperand(0)),
5541 "invalid llvm.dbg.declare intrinsic call 1", Call);
5542 visitDbgIntrinsic("declare", cast<DbgVariableIntrinsic>(Call));
5543 break;
5544 case Intrinsic::dbg_value: // llvm.dbg.value
5545 visitDbgIntrinsic("value", cast<DbgVariableIntrinsic>(Call));
5546 break;
5547 case Intrinsic::dbg_assign: // llvm.dbg.assign
5548 visitDbgIntrinsic("assign", cast<DbgVariableIntrinsic>(Call));
5549 break;
5550 case Intrinsic::dbg_label: // llvm.dbg.label
5551 visitDbgLabelIntrinsic("label", cast<DbgLabelInst>(Call));
5552 break;
5553 case Intrinsic::memcpy:
5554 case Intrinsic::memcpy_inline:
5555 case Intrinsic::memmove:
5556 case Intrinsic::memset:
5557 case Intrinsic::memset_inline:
5558 case Intrinsic::experimental_memset_pattern: {
5559 break;
5560 }
5561 case Intrinsic::memcpy_element_unordered_atomic:
5562 case Intrinsic::memmove_element_unordered_atomic:
5563 case Intrinsic::memset_element_unordered_atomic: {
5564 const auto *AMI = cast<AtomicMemIntrinsic>(&Call);
5565
5566 ConstantInt *ElementSizeCI =
5567 cast<ConstantInt>(AMI->getRawElementSizeInBytes());
5568 const APInt &ElementSizeVal = ElementSizeCI->getValue();
5569 Check(ElementSizeVal.isPowerOf2(),
5570 "element size of the element-wise atomic memory intrinsic "
5571 "must be a power of 2",
5572 Call);
5573
5574 auto IsValidAlignment = [&](MaybeAlign Alignment) {
5575 return Alignment && ElementSizeVal.ule(Alignment->value());
5576 };
5577 Check(IsValidAlignment(AMI->getDestAlign()),
5578 "incorrect alignment of the destination argument", Call);
5579 if (const auto *AMT = dyn_cast<AtomicMemTransferInst>(AMI)) {
5580 Check(IsValidAlignment(AMT->getSourceAlign()),
5581 "incorrect alignment of the source argument", Call);
5582 }
5583 break;
5584 }
5585 case Intrinsic::call_preallocated_setup: {
5586 auto *NumArgs = dyn_cast<ConstantInt>(Call.getArgOperand(0));
5587 Check(NumArgs != nullptr,
5588 "llvm.call.preallocated.setup argument must be a constant");
5589 bool FoundCall = false;
5590 for (User *U : Call.users()) {
5591 auto *UseCall = dyn_cast<CallBase>(U);
5592 Check(UseCall != nullptr,
5593 "Uses of llvm.call.preallocated.setup must be calls");
5594 const Function *Fn = UseCall->getCalledFunction();
5595 if (Fn && Fn->getIntrinsicID() == Intrinsic::call_preallocated_arg) {
5596 auto *AllocArgIndex = dyn_cast<ConstantInt>(UseCall->getArgOperand(1));
5597 Check(AllocArgIndex != nullptr,
5598 "llvm.call.preallocated.alloc arg index must be a constant");
5599 auto AllocArgIndexInt = AllocArgIndex->getValue();
5600 Check(AllocArgIndexInt.sge(0) &&
5601 AllocArgIndexInt.slt(NumArgs->getValue()),
5602 "llvm.call.preallocated.alloc arg index must be between 0 and "
5603 "corresponding "
5604 "llvm.call.preallocated.setup's argument count");
5605 } else if (Fn && Fn->getIntrinsicID() ==
5606 Intrinsic::call_preallocated_teardown) {
5607 // nothing to do
5608 } else {
5609 Check(!FoundCall, "Can have at most one call corresponding to a "
5610 "llvm.call.preallocated.setup");
5611 FoundCall = true;
5612 size_t NumPreallocatedArgs = 0;
5613 for (unsigned i = 0; i < UseCall->arg_size(); i++) {
5614 if (UseCall->paramHasAttr(i, Attribute::Preallocated)) {
5615 ++NumPreallocatedArgs;
5616 }
5617 }
5618 Check(NumPreallocatedArgs != 0,
5619 "cannot use preallocated intrinsics on a call without "
5620 "preallocated arguments");
5621 Check(NumArgs->equalsInt(NumPreallocatedArgs),
5622 "llvm.call.preallocated.setup arg size must be equal to number "
5623 "of preallocated arguments "
5624 "at call site",
5625 Call, *UseCall);
5626 // getOperandBundle() cannot be called if more than one of the operand
5627 // bundle exists. There is already a check elsewhere for this, so skip
5628 // here if we see more than one.
5629 if (UseCall->countOperandBundlesOfType(LLVMContext::OB_preallocated) >
5630 1) {
5631 return;
5632 }
5633 auto PreallocatedBundle =
5634 UseCall->getOperandBundle(LLVMContext::OB_preallocated);
5635 Check(PreallocatedBundle,
5636 "Use of llvm.call.preallocated.setup outside intrinsics "
5637 "must be in \"preallocated\" operand bundle");
5638 Check(PreallocatedBundle->Inputs.front().get() == &Call,
5639 "preallocated bundle must have token from corresponding "
5640 "llvm.call.preallocated.setup");
5641 }
5642 }
5643 break;
5644 }
5645 case Intrinsic::call_preallocated_arg: {
5646 auto *Token = dyn_cast<CallBase>(Call.getArgOperand(0));
5647 Check(Token && Token->getCalledFunction()->getIntrinsicID() ==
5648 Intrinsic::call_preallocated_setup,
5649 "llvm.call.preallocated.arg token argument must be a "
5650 "llvm.call.preallocated.setup");
5651 Check(Call.hasFnAttr(Attribute::Preallocated),
5652 "llvm.call.preallocated.arg must be called with a \"preallocated\" "
5653 "call site attribute");
5654 break;
5655 }
5656 case Intrinsic::call_preallocated_teardown: {
5657 auto *Token = dyn_cast<CallBase>(Call.getArgOperand(0));
5658 Check(Token && Token->getCalledFunction()->getIntrinsicID() ==
5659 Intrinsic::call_preallocated_setup,
5660 "llvm.call.preallocated.teardown token argument must be a "
5661 "llvm.call.preallocated.setup");
5662 break;
5663 }
5664 case Intrinsic::gcroot:
5665 case Intrinsic::gcwrite:
5666 case Intrinsic::gcread:
5667 if (ID == Intrinsic::gcroot) {
5668 AllocaInst *AI =
5669 dyn_cast<AllocaInst>(Call.getArgOperand(0)->stripPointerCasts());
5670 Check(AI, "llvm.gcroot parameter #1 must be an alloca.", Call);
5671 Check(isa<Constant>(Call.getArgOperand(1)),
5672 "llvm.gcroot parameter #2 must be a constant.", Call);
5673 if (!AI->getAllocatedType()->isPointerTy()) {
5674 Check(!isa<ConstantPointerNull>(Call.getArgOperand(1)),
5675 "llvm.gcroot parameter #1 must either be a pointer alloca, "
5676 "or argument #2 must be a non-null constant.",
5677 Call);
5678 }
5679 }
5680
5681 Check(Call.getParent()->getParent()->hasGC(),
5682 "Enclosing function does not use GC.", Call);
5683 break;
5684 case Intrinsic::init_trampoline:
5685 Check(isa<Function>(Call.getArgOperand(1)->stripPointerCasts()),
5686 "llvm.init_trampoline parameter #2 must resolve to a function.",
5687 Call);
5688 break;
5689 case Intrinsic::prefetch:
5690 Check(cast<ConstantInt>(Call.getArgOperand(1))->getZExtValue() < 2,
5691 "rw argument to llvm.prefetch must be 0-1", Call);
5692 Check(cast<ConstantInt>(Call.getArgOperand(2))->getZExtValue() < 4,
5693 "locality argument to llvm.prefetch must be 0-3", Call);
5694 Check(cast<ConstantInt>(Call.getArgOperand(3))->getZExtValue() < 2,
5695 "cache type argument to llvm.prefetch must be 0-1", Call);
5696 break;
5697 case Intrinsic::stackprotector:
5698 Check(isa<AllocaInst>(Call.getArgOperand(1)->stripPointerCasts()),
5699 "llvm.stackprotector parameter #2 must resolve to an alloca.", Call);
5700 break;
5701 case Intrinsic::localescape: {
5702 BasicBlock *BB = Call.getParent();
5703 Check(BB->isEntryBlock(), "llvm.localescape used outside of entry block",
5704 Call);
5705 Check(!SawFrameEscape, "multiple calls to llvm.localescape in one function",
5706 Call);
5707 for (Value *Arg : Call.args()) {
5708 if (isa<ConstantPointerNull>(Arg))
5709 continue; // Null values are allowed as placeholders.
5710 auto *AI = dyn_cast<AllocaInst>(Arg->stripPointerCasts());
5711 Check(AI && AI->isStaticAlloca(),
5712 "llvm.localescape only accepts static allocas", Call);
5713 }
5714 FrameEscapeInfo[BB->getParent()].first = Call.arg_size();
5715 SawFrameEscape = true;
5716 break;
5717 }
5718 case Intrinsic::localrecover: {
5719 Value *FnArg = Call.getArgOperand(0)->stripPointerCasts();
5720 Function *Fn = dyn_cast<Function>(FnArg);
5721 Check(Fn && !Fn->isDeclaration(),
5722 "llvm.localrecover first "
5723 "argument must be function defined in this module",
5724 Call);
5725 auto *IdxArg = cast<ConstantInt>(Call.getArgOperand(2));
5726 auto &Entry = FrameEscapeInfo[Fn];
5727 Entry.second = unsigned(
5728 std::max(uint64_t(Entry.second), IdxArg->getLimitedValue(~0U) + 1));
5729 break;
5730 }
5731
5732 case Intrinsic::experimental_gc_statepoint:
5733 if (auto *CI = dyn_cast<CallInst>(&Call))
5734 Check(!CI->isInlineAsm(),
5735 "gc.statepoint support for inline assembly unimplemented", CI);
5736 Check(Call.getParent()->getParent()->hasGC(),
5737 "Enclosing function does not use GC.", Call);
5738
5739 verifyStatepoint(Call);
5740 break;
5741 case Intrinsic::experimental_gc_result: {
5742 Check(Call.getParent()->getParent()->hasGC(),
5743 "Enclosing function does not use GC.", Call);
5744
5745 auto *Statepoint = Call.getArgOperand(0);
5746 if (isa<UndefValue>(Statepoint))
5747 break;
5748
5749 // Are we tied to a statepoint properly?
5750 const auto *StatepointCall = dyn_cast<CallBase>(Statepoint);
5751 const Function *StatepointFn =
5752 StatepointCall ? StatepointCall->getCalledFunction() : nullptr;
5753 Check(StatepointFn && StatepointFn->isDeclaration() &&
5754 StatepointFn->getIntrinsicID() ==
5755 Intrinsic::experimental_gc_statepoint,
5756 "gc.result operand #1 must be from a statepoint", Call,
5757 Call.getArgOperand(0));
5758
5759 // Check that result type matches wrapped callee.
5760 auto *TargetFuncType =
5761 cast<FunctionType>(StatepointCall->getParamElementType(2));
5762 Check(Call.getType() == TargetFuncType->getReturnType(),
5763 "gc.result result type does not match wrapped callee", Call);
5764 break;
5765 }
5766 case Intrinsic::experimental_gc_relocate: {
5767 Check(Call.arg_size() == 3, "wrong number of arguments", Call);
5768
5769 Check(isa<PointerType>(Call.getType()->getScalarType()),
5770 "gc.relocate must return a pointer or a vector of pointers", Call);
5771
5772 // Check that this relocate is correctly tied to the statepoint
5773
5774 // This is case for relocate on the unwinding path of an invoke statepoint
5775 if (LandingPadInst *LandingPad =
5776 dyn_cast<LandingPadInst>(Call.getArgOperand(0))) {
5777
5778 const BasicBlock *InvokeBB =
5779 LandingPad->getParent()->getUniquePredecessor();
5780
5781 // Landingpad relocates should have only one predecessor with invoke
5782 // statepoint terminator
5783 Check(InvokeBB, "safepoints should have unique landingpads",
5784 LandingPad->getParent());
5785 Check(InvokeBB->getTerminator(), "safepoint block should be well formed",
5786 InvokeBB);
5787 Check(isa<GCStatepointInst>(InvokeBB->getTerminator()),
5788 "gc relocate should be linked to a statepoint", InvokeBB);
5789 } else {
5790 // In all other cases relocate should be tied to the statepoint directly.
5791 // This covers relocates on a normal return path of invoke statepoint and
5792 // relocates of a call statepoint.
5793 auto *Token = Call.getArgOperand(0);
5794 Check(isa<GCStatepointInst>(Token) || isa<UndefValue>(Token),
5795 "gc relocate is incorrectly tied to the statepoint", Call, Token);
5796 }
5797
5798 // Verify rest of the relocate arguments.
5799 const Value &StatepointCall = *cast<GCRelocateInst>(Call).getStatepoint();
5800
5801 // Both the base and derived must be piped through the safepoint.
5802 Value *Base = Call.getArgOperand(1);
5803 Check(isa<ConstantInt>(Base),
5804 "gc.relocate operand #2 must be integer offset", Call);
5805
5806 Value *Derived = Call.getArgOperand(2);
5807 Check(isa<ConstantInt>(Derived),
5808 "gc.relocate operand #3 must be integer offset", Call);
5809
5810 const uint64_t BaseIndex = cast<ConstantInt>(Base)->getZExtValue();
5811 const uint64_t DerivedIndex = cast<ConstantInt>(Derived)->getZExtValue();
5812
5813 // Check the bounds
5814 if (isa<UndefValue>(StatepointCall))
5815 break;
5816 if (auto Opt = cast<GCStatepointInst>(StatepointCall)
5817 .getOperandBundle(LLVMContext::OB_gc_live)) {
5818 Check(BaseIndex < Opt->Inputs.size(),
5819 "gc.relocate: statepoint base index out of bounds", Call);
5820 Check(DerivedIndex < Opt->Inputs.size(),
5821 "gc.relocate: statepoint derived index out of bounds", Call);
5822 }
5823
5824 // Relocated value must be either a pointer type or vector-of-pointer type,
5825 // but gc_relocate does not need to return the same pointer type as the
5826 // relocated pointer. It can be casted to the correct type later if it's
5827 // desired. However, they must have the same address space and 'vectorness'
5828 GCRelocateInst &Relocate = cast<GCRelocateInst>(Call);
5829 auto *ResultType = Call.getType();
5830 auto *DerivedType = Relocate.getDerivedPtr()->getType();
5831 auto *BaseType = Relocate.getBasePtr()->getType();
5832
5833 Check(BaseType->isPtrOrPtrVectorTy(),
5834 "gc.relocate: relocated value must be a pointer", Call);
5835 Check(DerivedType->isPtrOrPtrVectorTy(),
5836 "gc.relocate: relocated value must be a pointer", Call);
5837
5838 Check(ResultType->isVectorTy() == DerivedType->isVectorTy(),
5839 "gc.relocate: vector relocates to vector and pointer to pointer",
5840 Call);
5841 Check(
5842 ResultType->getPointerAddressSpace() ==
5843 DerivedType->getPointerAddressSpace(),
5844 "gc.relocate: relocating a pointer shouldn't change its address space",
5845 Call);
5846
5847 auto GC = llvm::getGCStrategy(Relocate.getFunction()->getGC());
5848 Check(GC, "gc.relocate: calling function must have GCStrategy",
5849 Call.getFunction());
5850 if (GC) {
5851 auto isGCPtr = [&GC](Type *PTy) {
5852 return GC->isGCManagedPointer(PTy->getScalarType()).value_or(true);
5853 };
5854 Check(isGCPtr(ResultType), "gc.relocate: must return gc pointer", Call);
5855 Check(isGCPtr(BaseType),
5856 "gc.relocate: relocated value must be a gc pointer", Call);
5857 Check(isGCPtr(DerivedType),
5858 "gc.relocate: relocated value must be a gc pointer", Call);
5859 }
5860 break;
5861 }
5862 case Intrinsic::experimental_patchpoint: {
5863 if (Call.getCallingConv() == CallingConv::AnyReg) {
5864 Check(Call.getType()->isSingleValueType(),
5865 "patchpoint: invalid return type used with anyregcc", Call);
5866 }
5867 break;
5868 }
5869 case Intrinsic::eh_exceptioncode:
5870 case Intrinsic::eh_exceptionpointer: {
5871 Check(isa<CatchPadInst>(Call.getArgOperand(0)),
5872 "eh.exceptionpointer argument must be a catchpad", Call);
5873 break;
5874 }
5875 case Intrinsic::get_active_lane_mask: {
5876 Check(Call.getType()->isVectorTy(),
5877 "get_active_lane_mask: must return a "
5878 "vector",
5879 Call);
5880 auto *ElemTy = Call.getType()->getScalarType();
5881 Check(ElemTy->isIntegerTy(1),
5882 "get_active_lane_mask: element type is not "
5883 "i1",
5884 Call);
5885 break;
5886 }
5887 case Intrinsic::experimental_get_vector_length: {
5888 ConstantInt *VF = cast<ConstantInt>(Call.getArgOperand(1));
5889 Check(!VF->isNegative() && !VF->isZero(),
5890 "get_vector_length: VF must be positive", Call);
5891 break;
5892 }
5893 case Intrinsic::masked_load: {
5894 Check(Call.getType()->isVectorTy(), "masked_load: must return a vector",
5895 Call);
5896
5897 ConstantInt *Alignment = cast<ConstantInt>(Call.getArgOperand(1));
5898 Value *Mask = Call.getArgOperand(2);
5899 Value *PassThru = Call.getArgOperand(3);
5900 Check(Mask->getType()->isVectorTy(), "masked_load: mask must be vector",
5901 Call);
5902 Check(Alignment->getValue().isPowerOf2(),
5903 "masked_load: alignment must be a power of 2", Call);
5904 Check(PassThru->getType() == Call.getType(),
5905 "masked_load: pass through and return type must match", Call);
5906 Check(cast<VectorType>(Mask->getType())->getElementCount() ==
5907 cast<VectorType>(Call.getType())->getElementCount(),
5908 "masked_load: vector mask must be same length as return", Call);
5909 break;
5910 }
5911 case Intrinsic::masked_store: {
5912 Value *Val = Call.getArgOperand(0);
5913 ConstantInt *Alignment = cast<ConstantInt>(Call.getArgOperand(2));
5914 Value *Mask = Call.getArgOperand(3);
5915 Check(Mask->getType()->isVectorTy(), "masked_store: mask must be vector",
5916 Call);
5917 Check(Alignment->getValue().isPowerOf2(),
5918 "masked_store: alignment must be a power of 2", Call);
5919 Check(cast<VectorType>(Mask->getType())->getElementCount() ==
5920 cast<VectorType>(Val->getType())->getElementCount(),
5921 "masked_store: vector mask must be same length as value", Call);
5922 break;
5923 }
5924
5925 case Intrinsic::masked_gather: {
5926 const APInt &Alignment =
5927 cast<ConstantInt>(Call.getArgOperand(1))->getValue();
5928 Check(Alignment.isZero() || Alignment.isPowerOf2(),
5929 "masked_gather: alignment must be 0 or a power of 2", Call);
5930 break;
5931 }
5932 case Intrinsic::masked_scatter: {
5933 const APInt &Alignment =
5934 cast<ConstantInt>(Call.getArgOperand(2))->getValue();
5935 Check(Alignment.isZero() || Alignment.isPowerOf2(),
5936 "masked_scatter: alignment must be 0 or a power of 2", Call);
5937 break;
5938 }
5939
5940 case Intrinsic::experimental_guard: {
5941 Check(isa<CallInst>(Call), "experimental_guard cannot be invoked", Call);
5942 Check(Call.countOperandBundlesOfType(LLVMContext::OB_deopt) == 1,
5943 "experimental_guard must have exactly one "
5944 "\"deopt\" operand bundle");
5945 break;
5946 }
5947
5948 case Intrinsic::experimental_deoptimize: {
5949 Check(isa<CallInst>(Call), "experimental_deoptimize cannot be invoked",
5950 Call);
5951 Check(Call.countOperandBundlesOfType(LLVMContext::OB_deopt) == 1,
5952 "experimental_deoptimize must have exactly one "
5953 "\"deopt\" operand bundle");
5954 Check(Call.getType() == Call.getFunction()->getReturnType(),
5955 "experimental_deoptimize return type must match caller return type");
5956
5957 if (isa<CallInst>(Call)) {
5958 auto *RI = dyn_cast<ReturnInst>(Call.getNextNode());
5959 Check(RI,
5960 "calls to experimental_deoptimize must be followed by a return");
5961
5962 if (!Call.getType()->isVoidTy() && RI)
5963 Check(RI->getReturnValue() == &Call,
5964 "calls to experimental_deoptimize must be followed by a return "
5965 "of the value computed by experimental_deoptimize");
5966 }
5967
5968 break;
5969 }
5970 case Intrinsic::vastart: {
5971 Check(Call.getFunction()->isVarArg(),
5972 "va_start called in a non-varargs function");
5973 break;
5974 }
5975 case Intrinsic::vector_reduce_and:
5976 case Intrinsic::vector_reduce_or:
5977 case Intrinsic::vector_reduce_xor:
5978 case Intrinsic::vector_reduce_add:
5979 case Intrinsic::vector_reduce_mul:
5980 case Intrinsic::vector_reduce_smax:
5981 case Intrinsic::vector_reduce_smin:
5982 case Intrinsic::vector_reduce_umax:
5983 case Intrinsic::vector_reduce_umin: {
5984 Type *ArgTy = Call.getArgOperand(0)->getType();
5985 Check(ArgTy->isIntOrIntVectorTy() && ArgTy->isVectorTy(),
5986 "Intrinsic has incorrect argument type!");
5987 break;
5988 }
5989 case Intrinsic::vector_reduce_fmax:
5990 case Intrinsic::vector_reduce_fmin: {
5991 Type *ArgTy = Call.getArgOperand(0)->getType();
5992 Check(ArgTy->isFPOrFPVectorTy() && ArgTy->isVectorTy(),
5993 "Intrinsic has incorrect argument type!");
5994 break;
5995 }
5996 case Intrinsic::vector_reduce_fadd:
5997 case Intrinsic::vector_reduce_fmul: {
5998 // Unlike the other reductions, the first argument is a start value. The
5999 // second argument is the vector to be reduced.
6000 Type *ArgTy = Call.getArgOperand(1)->getType();
6001 Check(ArgTy->isFPOrFPVectorTy() && ArgTy->isVectorTy(),
6002 "Intrinsic has incorrect argument type!");
6003 break;
6004 }
6005 case Intrinsic::smul_fix:
6006 case Intrinsic::smul_fix_sat:
6007 case Intrinsic::umul_fix:
6008 case Intrinsic::umul_fix_sat:
6009 case Intrinsic::sdiv_fix:
6010 case Intrinsic::sdiv_fix_sat:
6011 case Intrinsic::udiv_fix:
6012 case Intrinsic::udiv_fix_sat: {
6013 Value *Op1 = Call.getArgOperand(0);
6014 Value *Op2 = Call.getArgOperand(1);
6016 "first operand of [us][mul|div]_fix[_sat] must be an int type or "
6017 "vector of ints");
6019 "second operand of [us][mul|div]_fix[_sat] must be an int type or "
6020 "vector of ints");
6021
6022 auto *Op3 = cast<ConstantInt>(Call.getArgOperand(2));
6023 Check(Op3->getType()->isIntegerTy(),
6024 "third operand of [us][mul|div]_fix[_sat] must be an int type");
6025 Check(Op3->getBitWidth() <= 32,
6026 "third operand of [us][mul|div]_fix[_sat] must fit within 32 bits");
6027
6028 if (ID == Intrinsic::smul_fix || ID == Intrinsic::smul_fix_sat ||
6029 ID == Intrinsic::sdiv_fix || ID == Intrinsic::sdiv_fix_sat) {
6030 Check(Op3->getZExtValue() < Op1->getType()->getScalarSizeInBits(),
6031 "the scale of s[mul|div]_fix[_sat] must be less than the width of "
6032 "the operands");
6033 } else {
6034 Check(Op3->getZExtValue() <= Op1->getType()->getScalarSizeInBits(),
6035 "the scale of u[mul|div]_fix[_sat] must be less than or equal "
6036 "to the width of the operands");
6037 }
6038 break;
6039 }
6040 case Intrinsic::lrint:
6041 case Intrinsic::llrint:
6042 case Intrinsic::lround:
6043 case Intrinsic::llround: {
6044 Type *ValTy = Call.getArgOperand(0)->getType();
6045 Type *ResultTy = Call.getType();
6046 auto *VTy = dyn_cast<VectorType>(ValTy);
6047 auto *RTy = dyn_cast<VectorType>(ResultTy);
6048 Check(ValTy->isFPOrFPVectorTy() && ResultTy->isIntOrIntVectorTy(),
6049 ExpectedName + ": argument must be floating-point or vector "
6050 "of floating-points, and result must be integer or "
6051 "vector of integers",
6052 &Call);
6053 Check(ValTy->isVectorTy() == ResultTy->isVectorTy(),
6054 ExpectedName + ": argument and result disagree on vector use", &Call);
6055 if (VTy) {
6056 Check(VTy->getElementCount() == RTy->getElementCount(),
6057 ExpectedName + ": argument must be same length as result", &Call);
6058 }
6059 break;
6060 }
6061 case Intrinsic::bswap: {
6062 Type *Ty = Call.getType();
6063 unsigned Size = Ty->getScalarSizeInBits();
6064 Check(Size % 16 == 0, "bswap must be an even number of bytes", &Call);
6065 break;
6066 }
6067 case Intrinsic::invariant_start: {
6068 ConstantInt *InvariantSize = dyn_cast<ConstantInt>(Call.getArgOperand(0));
6069 Check(InvariantSize &&
6070 (!InvariantSize->isNegative() || InvariantSize->isMinusOne()),
6071 "invariant_start parameter must be -1, 0 or a positive number",
6072 &Call);
6073 break;
6074 }
6075 case Intrinsic::matrix_multiply:
6076 case Intrinsic::matrix_transpose:
6077 case Intrinsic::matrix_column_major_load:
6078 case Intrinsic::matrix_column_major_store: {
6079 Function *IF = Call.getCalledFunction();
6080 ConstantInt *Stride = nullptr;
6081 ConstantInt *NumRows;
6082 ConstantInt *NumColumns;
6083 VectorType *ResultTy;
6084 Type *Op0ElemTy = nullptr;
6085 Type *Op1ElemTy = nullptr;
6086 switch (ID) {
6087 case Intrinsic::matrix_multiply: {
6088 NumRows = cast<ConstantInt>(Call.getArgOperand(2));
6089 ConstantInt *N = cast<ConstantInt>(Call.getArgOperand(3));
6090 NumColumns = cast<ConstantInt>(Call.getArgOperand(4));
6091 Check(cast<FixedVectorType>(Call.getArgOperand(0)->getType())
6092 ->getNumElements() ==
6093 NumRows->getZExtValue() * N->getZExtValue(),
6094 "First argument of a matrix operation does not match specified "
6095 "shape!");
6096 Check(cast<FixedVectorType>(Call.getArgOperand(1)->getType())
6097 ->getNumElements() ==
6098 N->getZExtValue() * NumColumns->getZExtValue(),
6099 "Second argument of a matrix operation does not match specified "
6100 "shape!");
6101
6102 ResultTy = cast<VectorType>(Call.getType());
6103 Op0ElemTy =
6104 cast<VectorType>(Call.getArgOperand(0)->getType())->getElementType();
6105 Op1ElemTy =
6106 cast<VectorType>(Call.getArgOperand(1)->getType())->getElementType();
6107 break;
6108 }
6109 case Intrinsic::matrix_transpose:
6110 NumRows = cast<ConstantInt>(Call.getArgOperand(1));
6111 NumColumns = cast<ConstantInt>(Call.getArgOperand(2));
6112 ResultTy = cast<VectorType>(Call.getType());
6113 Op0ElemTy =
6114 cast<VectorType>(Call.getArgOperand(0)->getType())->getElementType();
6115 break;
6116 case Intrinsic::matrix_column_major_load: {
6117 Stride = dyn_cast<ConstantInt>(Call.getArgOperand(1));
6118 NumRows = cast<ConstantInt>(Call.getArgOperand(3));
6119 NumColumns = cast<ConstantInt>(Call.getArgOperand(4));
6120 ResultTy = cast<VectorType>(Call.getType());
6121 break;
6122 }
6123 case Intrinsic::matrix_column_major_store: {
6124 Stride = dyn_cast<ConstantInt>(Call.getArgOperand(2));
6125 NumRows = cast<ConstantInt>(Call.getArgOperand(4));
6126 NumColumns = cast<ConstantInt>(Call.getArgOperand(5));
6127 ResultTy = cast<VectorType>(Call.getArgOperand(0)->getType());
6128 Op0ElemTy =
6129 cast<VectorType>(Call.getArgOperand(0)->getType())->getElementType();
6130 break;
6131 }
6132 default:
6133 llvm_unreachable("unexpected intrinsic");
6134 }
6135
6136 Check(ResultTy->getElementType()->isIntegerTy() ||
6137 ResultTy->getElementType()->isFloatingPointTy(),
6138 "Result type must be an integer or floating-point type!", IF);
6139
6140 if (Op0ElemTy)
6141 Check(ResultTy->getElementType() == Op0ElemTy,
6142 "Vector element type mismatch of the result and first operand "
6143 "vector!",
6144 IF);
6145
6146 if (Op1ElemTy)
6147 Check(ResultTy->getElementType() == Op1ElemTy,
6148 "Vector element type mismatch of the result and second operand "
6149 "vector!",
6150 IF);
6151
6152 Check(cast<FixedVectorType>(ResultTy)->getNumElements() ==
6153 NumRows->getZExtValue() * NumColumns->getZExtValue(),
6154 "Result of a matrix operation does not fit in the returned vector!");
6155
6156 if (Stride)
6157 Check(Stride->getZExtValue() >= NumRows->getZExtValue(),
6158 "Stride must be greater or equal than the number of rows!", IF);
6159
6160 break;
6161 }
6162 case Intrinsic::vector_splice: {
6163 VectorType *VecTy = cast<VectorType>(Call.getType());
6164 int64_t Idx = cast<ConstantInt>(Call.getArgOperand(2))->getSExtValue();
6165 int64_t KnownMinNumElements = VecTy->getElementCount().getKnownMinValue();
6166 if (Call.getParent() && Call.getParent()->getParent()) {
6167 AttributeList Attrs = Call.getParent()->getParent()->getAttributes();
6168 if (Attrs.hasFnAttr(Attribute::VScaleRange))
6169 KnownMinNumElements *= Attrs.getFnAttrs().getVScaleRangeMin();
6170 }
6171 Check((Idx < 0 && std::abs(Idx) <= KnownMinNumElements) ||
6172 (Idx >= 0 && Idx < KnownMinNumElements),
6173 "The splice index exceeds the range [-VL, VL-1] where VL is the "
6174 "known minimum number of elements in the vector. For scalable "
6175 "vectors the minimum number of elements is determined from "
6176 "vscale_range.",
6177 &Call);
6178 break;
6179 }
6180 case Intrinsic::stepvector: {
6181 VectorType *VecTy = dyn_cast<VectorType>(Call.getType());
6182 Check(VecTy && VecTy->getScalarType()->isIntegerTy() &&
6183 VecTy->getScalarSizeInBits() >= 8,
6184 "stepvector only supported for vectors of integers "
6185 "with a bitwidth of at least 8.",
6186 &Call);
6187 break;
6188 }
6189 case Intrinsic::experimental_vector_match: {
6190 Value *Op1 = Call.getArgOperand(0);
6191 Value *Op2 = Call.getArgOperand(1);
6192 Value *Mask = Call.getArgOperand(2);
6193
6194 VectorType *Op1Ty = dyn_cast<VectorType>(Op1->getType());
6195 VectorType *Op2Ty = dyn_cast<VectorType>(Op2->getType());
6196 VectorType *MaskTy = dyn_cast<VectorType>(Mask->getType());
6197
6198 Check(Op1Ty && Op2Ty && MaskTy, "Operands must be vectors.", &Call);
6199 Check(isa<FixedVectorType>(Op2Ty),
6200 "Second operand must be a fixed length vector.", &Call);
6201 Check(Op1Ty->getElementType()->isIntegerTy(),
6202 "First operand must be a vector of integers.", &Call);
6203 Check(Op1Ty->getElementType() == Op2Ty->getElementType(),
6204 "First two operands must have the same element type.", &Call);
6205 Check(Op1Ty->getElementCount() == MaskTy->getElementCount(),
6206 "First operand and mask must have the same number of elements.",
6207 &Call);
6208 Check(MaskTy->getElementType()->isIntegerTy(1),
6209 "Mask must be a vector of i1's.", &Call);
6210 Check(Call.getType() == MaskTy, "Return type must match the mask type.",
6211 &Call);
6212 break;
6213 }
6214 case Intrinsic::vector_insert: {
6215 Value *Vec = Call.getArgOperand(0);
6216 Value *SubVec = Call.getArgOperand(1);
6217 Value *Idx = Call.getArgOperand(2);
6218 unsigned IdxN = cast<ConstantInt>(Idx)->getZExtValue();
6219
6220 VectorType *VecTy = cast<VectorType>(Vec->getType());
6221 VectorType *SubVecTy = cast<VectorType>(SubVec->getType());
6222
6223 ElementCount VecEC = VecTy->getElementCount();
6224 ElementCount SubVecEC = SubVecTy->getElementCount();
6225 Check(VecTy->getElementType() == SubVecTy->getElementType(),
6226 "vector_insert parameters must have the same element "
6227 "type.",
6228 &Call);
6229 Check(IdxN % SubVecEC.getKnownMinValue() == 0,
6230 "vector_insert index must be a constant multiple of "
6231 "the subvector's known minimum vector length.");
6232
6233 // If this insertion is not the 'mixed' case where a fixed vector is
6234 // inserted into a scalable vector, ensure that the insertion of the
6235 // subvector does not overrun the parent vector.
6236 if (VecEC.isScalable() == SubVecEC.isScalable()) {
6237 Check(IdxN < VecEC.getKnownMinValue() &&
6238 IdxN + SubVecEC.getKnownMinValue() <= VecEC.getKnownMinValue(),
6239 "subvector operand of vector_insert would overrun the "
6240 "vector being inserted into.");
6241 }
6242 break;
6243 }
6244 case Intrinsic::vector_extract: {
6245 Value *Vec = Call.getArgOperand(0);
6246 Value *Idx = Call.getArgOperand(1);
6247 unsigned IdxN = cast<ConstantInt>(Idx)->getZExtValue();
6248
6249 VectorType *ResultTy = cast<VectorType>(Call.getType());
6250 VectorType *VecTy = cast<VectorType>(Vec->getType());
6251
6252 ElementCount VecEC = VecTy->getElementCount();
6253 ElementCount ResultEC = ResultTy->getElementCount();
6254
6255 Check(ResultTy->getElementType() == VecTy->getElementType(),
6256 "vector_extract result must have the same element "
6257 "type as the input vector.",
6258 &Call);
6259 Check(IdxN % ResultEC.getKnownMinValue() == 0,
6260 "vector_extract index must be a constant multiple of "
6261 "the result type's known minimum vector length.");
6262
6263 // If this extraction is not the 'mixed' case where a fixed vector is
6264 // extracted from a scalable vector, ensure that the extraction does not
6265 // overrun the parent vector.
6266 if (VecEC.isScalable() == ResultEC.isScalable()) {
6267 Check(IdxN < VecEC.getKnownMinValue() &&
6268 IdxN + ResultEC.getKnownMinValue() <= VecEC.getKnownMinValue(),
6269 "vector_extract would overrun.");
6270 }
6271 break;
6272 }
6273 case Intrinsic::experimental_vector_partial_reduce_add: {
6274 VectorType *AccTy = cast<VectorType>(Call.getArgOperand(0)->getType());
6275 VectorType *VecTy = cast<VectorType>(Call.getArgOperand(1)->getType());
6276
6277 unsigned VecWidth = VecTy->getElementCount().getKnownMinValue();
6278 unsigned AccWidth = AccTy->getElementCount().getKnownMinValue();
6279
6280 Check((VecWidth % AccWidth) == 0,
6281 "Invalid vector widths for partial "
6282 "reduction. The width of the input vector "
6283 "must be a positive integer multiple of "
6284 "the width of the accumulator vector.");
6285 break;
6286 }
6287 case Intrinsic::experimental_noalias_scope_decl: {
6288 NoAliasScopeDecls.push_back(cast<IntrinsicInst>(&Call));
6289 break;
6290 }
6291 case Intrinsic::preserve_array_access_index:
6292 case Intrinsic::preserve_struct_access_index:
6293 case Intrinsic::aarch64_ldaxr:
6294 case Intrinsic::aarch64_ldxr:
6295 case Intrinsic::arm_ldaex:
6296 case Intrinsic::arm_ldrex: {
6297 Type *ElemTy = Call.getParamElementType(0);
6298 Check(ElemTy, "Intrinsic requires elementtype attribute on first argument.",
6299 &Call);
6300 break;
6301 }
6302 case Intrinsic::aarch64_stlxr:
6303 case Intrinsic::aarch64_stxr:
6304 case Intrinsic::arm_stlex:
6305 case Intrinsic::arm_strex: {
6306 Type *ElemTy = Call.getAttributes().getParamElementType(1);
6307 Check(ElemTy,
6308 "Intrinsic requires elementtype attribute on second argument.",
6309 &Call);
6310 break;
6311 }
6312 case Intrinsic::aarch64_prefetch: {
6313 Check(cast<ConstantInt>(Call.getArgOperand(1))->getZExtValue() < 2,
6314 "write argument to llvm.aarch64.prefetch must be 0 or 1", Call);
6315 Check(cast<ConstantInt>(Call.getArgOperand(2))->getZExtValue() < 4,
6316 "target argument to llvm.aarch64.prefetch must be 0-3", Call);
6317 Check(cast<ConstantInt>(Call.getArgOperand(3))->getZExtValue() < 2,
6318 "stream argument to llvm.aarch64.prefetch must be 0 or 1", Call);
6319 Check(cast<ConstantInt>(Call.getArgOperand(4))->getZExtValue() < 2,
6320 "isdata argument to llvm.aarch64.prefetch must be 0 or 1", Call);
6321 break;
6322 }
6323 case Intrinsic::callbr_landingpad: {
6324 const auto *CBR = dyn_cast<CallBrInst>(Call.getOperand(0));
6325 Check(CBR, "intrinstic requires callbr operand", &Call);
6326 if (!CBR)
6327 break;
6328
6329 const BasicBlock *LandingPadBB = Call.getParent();
6330 const BasicBlock *PredBB = LandingPadBB->getUniquePredecessor();
6331 if (!PredBB) {
6332 CheckFailed("Intrinsic in block must have 1 unique predecessor", &Call);
6333 break;
6334 }
6335 if (!isa<CallBrInst>(PredBB->getTerminator())) {
6336 CheckFailed("Intrinsic must have corresponding callbr in predecessor",
6337 &Call);
6338 break;
6339 }
6340 Check(llvm::is_contained(CBR->getIndirectDests(), LandingPadBB),
6341 "Intrinsic's corresponding callbr must have intrinsic's parent basic "
6342 "block in indirect destination list",
6343 &Call);
6344 const Instruction &First = *LandingPadBB->begin();
6345 Check(&First == &Call, "No other instructions may proceed intrinsic",
6346 &Call);
6347 break;
6348 }
6349 case Intrinsic::amdgcn_cs_chain: {
6350 auto CallerCC = Call.getCaller()->getCallingConv();
6351 switch (CallerCC) {
6355 break;
6356 default:
6357 CheckFailed("Intrinsic can only be used from functions with the "
6358 "amdgpu_cs, amdgpu_cs_chain or amdgpu_cs_chain_preserve "
6359 "calling conventions",
6360 &Call);
6361 break;
6362 }
6363
6364 Check(Call.paramHasAttr(2, Attribute::InReg),
6365 "SGPR arguments must have the `inreg` attribute", &Call);
6366 Check(!Call.paramHasAttr(3, Attribute::InReg),
6367 "VGPR arguments must not have the `inreg` attribute", &Call);
6368 break;
6369 }
6370 case Intrinsic::amdgcn_set_inactive_chain_arg: {
6371 auto CallerCC = Call.getCaller()->getCallingConv();
6372 switch (CallerCC) {
6375 break;
6376 default:
6377 CheckFailed("Intrinsic can only be used from functions with the "
6378 "amdgpu_cs_chain or amdgpu_cs_chain_preserve "
6379 "calling conventions",
6380 &Call);
6381 break;
6382 }
6383
6384 unsigned InactiveIdx = 1;
6385 Check(!Call.paramHasAttr(InactiveIdx, Attribute::InReg),
6386 "Value for inactive lanes must not have the `inreg` attribute",
6387 &Call);
6388 Check(isa<Argument>(Call.getArgOperand(InactiveIdx)),
6389 "Value for inactive lanes must be a function argument", &Call);
6390 Check(!cast<Argument>(Call.getArgOperand(InactiveIdx))->hasInRegAttr(),
6391 "Value for inactive lanes must be a VGPR function argument", &Call);
6392 break;
6393 }
6394 case Intrinsic::amdgcn_s_prefetch_data: {
6395 Check(
6397 Call.getArgOperand(0)->getType()->getPointerAddressSpace()),
6398 "llvm.amdgcn.s.prefetch.data only supports global or constant memory");
6399 break;
6400 }
6401 case Intrinsic::amdgcn_mfma_scale_f32_16x16x128_f8f6f4:
6402 case Intrinsic::amdgcn_mfma_scale_f32_32x32x64_f8f6f4: {
6403 Value *Src0 = Call.getArgOperand(0);
6404 Value *Src1 = Call.getArgOperand(1);
6405
6406 uint64_t CBSZ = cast<ConstantInt>(Call.getArgOperand(3))->getZExtValue();
6407 uint64_t BLGP = cast<ConstantInt>(Call.getArgOperand(4))->getZExtValue();
6408 Check(CBSZ <= 4, "invalid value for cbsz format", Call,
6409 Call.getArgOperand(3));
6410 Check(BLGP <= 4, "invalid value for blgp format", Call,
6411 Call.getArgOperand(4));
6412
6413 // AMDGPU::MFMAScaleFormats values
6414 auto getFormatNumRegs = [](unsigned FormatVal) {
6415 switch (FormatVal) {
6416 case 0:
6417 case 1:
6418 return 8u;
6419 case 2:
6420 case 3:
6421 return 6u;
6422 case 4:
6423 return 4u;
6424 default:
6425 llvm_unreachable("invalid format value");
6426 }
6427 };
6428
6429 auto isValidSrcASrcBVector = [](FixedVectorType *Ty) {
6430 if (!Ty || !Ty->getElementType()->isIntegerTy(32))
6431 return false;
6432 unsigned NumElts = Ty->getNumElements();
6433 return NumElts == 4 || NumElts == 6 || NumElts == 8;
6434 };
6435
6436 auto *Src0Ty = dyn_cast<FixedVectorType>(Src0->getType());
6437 auto *Src1Ty = dyn_cast<FixedVectorType>(Src1->getType());
6438 Check(isValidSrcASrcBVector(Src0Ty),
6439 "operand 0 must be 4, 6 or 8 element i32 vector", &Call, Src0);
6440 Check(isValidSrcASrcBVector(Src1Ty),
6441 "operand 1 must be 4, 6 or 8 element i32 vector", &Call, Src1);
6442
6443 // Permit excess registers for the format.
6444 Check(Src0Ty->getNumElements() >= getFormatNumRegs(CBSZ),
6445 "invalid vector type for format", &Call, Src0, Call.getArgOperand(3));
6446 Check(Src1Ty->getNumElements() >= getFormatNumRegs(BLGP),
6447 "invalid vector type for format", &Call, Src1, Call.getArgOperand(5));
6448 break;
6449 }
6450 case Intrinsic::nvvm_setmaxnreg_inc_sync_aligned_u32:
6451 case Intrinsic::nvvm_setmaxnreg_dec_sync_aligned_u32: {
6452 Value *V = Call.getArgOperand(0);
6453 unsigned RegCount = cast<ConstantInt>(V)->getZExtValue();
6454 Check(RegCount % 8 == 0,
6455 "reg_count argument to nvvm.setmaxnreg must be in multiples of 8");
6456 Check((RegCount >= 24 && RegCount <= 256),
6457 "reg_count argument to nvvm.setmaxnreg must be within [24, 256]");
6458 break;
6459 }
6460 case Intrinsic::experimental_convergence_entry:
6461 case Intrinsic::experimental_convergence_anchor:
6462 break;
6463 case Intrinsic::experimental_convergence_loop:
6464 break;
6465 case Intrinsic::ptrmask: {
6466 Type *Ty0 = Call.getArgOperand(0)->getType();
6467 Type *Ty1 = Call.getArgOperand(1)->getType();
6469 "llvm.ptrmask intrinsic first argument must be pointer or vector "
6470 "of pointers",
6471 &Call);
6472 Check(
6473 Ty0->isVectorTy() == Ty1->isVectorTy(),
6474 "llvm.ptrmask intrinsic arguments must be both scalars or both vectors",
6475 &Call);
6476 if (Ty0->isVectorTy())
6477 Check(cast<VectorType>(Ty0)->getElementCount() ==
6478 cast<VectorType>(Ty1)->getElementCount(),
6479 "llvm.ptrmask intrinsic arguments must have the same number of "
6480 "elements",
6481 &Call);
6482 Check(DL.getIndexTypeSizeInBits(Ty0) == Ty1->getScalarSizeInBits(),
6483 "llvm.ptrmask intrinsic second argument bitwidth must match "
6484 "pointer index type size of first argument",
6485 &Call);
6486 break;
6487 }
6488 case Intrinsic::threadlocal_address: {
6489 const Value &Arg0 = *Call.getArgOperand(0);
6490 Check(isa<GlobalValue>(Arg0),
6491 "llvm.threadlocal.address first argument must be a GlobalValue");
6492 Check(cast<GlobalValue>(Arg0).isThreadLocal(),
6493 "llvm.threadlocal.address operand isThreadLocal() must be true");
6494 break;
6495 }
6496 case Intrinsic::nvvm_fence_proxy_tensormap_generic_acquire_cta:
6497 case Intrinsic::nvvm_fence_proxy_tensormap_generic_acquire_cluster:
6498 case Intrinsic::nvvm_fence_proxy_tensormap_generic_acquire_gpu:
6499 case Intrinsic::nvvm_fence_proxy_tensormap_generic_acquire_sys: {
6500 unsigned size = cast<ConstantInt>(Call.getArgOperand(1))->getZExtValue();
6501 Check(size == 128, " The only supported value for size operand is 128");
6502 break;
6503 }
6504 };
6505
6506 // Verify that there aren't any unmediated control transfers between funclets.
6508 Function *F = Call.getParent()->getParent();
6509 if (F->hasPersonalityFn() &&
6510 isScopedEHPersonality(classifyEHPersonality(F->getPersonalityFn()))) {
6511 // Run EH funclet coloring on-demand and cache results for other intrinsic
6512 // calls in this function
6513 if (BlockEHFuncletColors.empty())
6514 BlockEHFuncletColors = colorEHFunclets(*F);
6515
6516 // Check for catch-/cleanup-pad in first funclet block
6517 bool InEHFunclet = false;
6518 BasicBlock *CallBB = Call.getParent();
6519 const ColorVector &CV = BlockEHFuncletColors.find(CallBB)->second;
6520 assert(CV.size() > 0 && "Uncolored block");
6521 for (BasicBlock *ColorFirstBB : CV)
6522 if (dyn_cast_or_null<FuncletPadInst>(ColorFirstBB->getFirstNonPHI()))
6523 InEHFunclet = true;
6524
6525 // Check for funclet operand bundle
6526 bool HasToken = false;
6527 for (unsigned I = 0, E = Call.getNumOperandBundles(); I != E; ++I)
6528 if (Call.getOperandBundleAt(I).getTagID() == LLVMContext::OB_funclet)
6529 HasToken = true;
6530
6531 // This would cause silent code truncation in WinEHPrepare
6532 if (InEHFunclet)
6533 Check(HasToken, "Missing funclet token on intrinsic call", &Call);
6534 }
6535 }
6536}
6537
6538/// Carefully grab the subprogram from a local scope.
6539///
6540/// This carefully grabs the subprogram from a local scope, avoiding the
6541/// built-in assertions that would typically fire.
6543 if (!LocalScope)
6544 return nullptr;
6545
6546 if (auto *SP = dyn_cast<DISubprogram>(LocalScope))
6547 return SP;
6548
6549 if (auto *LB = dyn_cast<DILexicalBlockBase>(LocalScope))
6550 return getSubprogram(LB->getRawScope());
6551
6552 // Just return null; broken scope chains are checked elsewhere.
6553 assert(!isa<DILocalScope>(LocalScope) && "Unknown type of local scope");
6554 return nullptr;
6555}
6556
6557void Verifier::visit(DbgLabelRecord &DLR) {
6558 CheckDI(isa<DILabel>(DLR.getRawLabel()),
6559 "invalid #dbg_label intrinsic variable", &DLR, DLR.getRawLabel());
6560
6561 // Ignore broken !dbg attachments; they're checked elsewhere.
6562 if (MDNode *N = DLR.getDebugLoc().getAsMDNode())
6563 if (!isa<DILocation>(N))
6564 return;
6565
6566 BasicBlock *BB = DLR.getParent();
6567 Function *F = BB ? BB->getParent() : nullptr;
6568
6569 // The scopes for variables and !dbg attachments must agree.
6570 DILabel *Label = DLR.getLabel();
6571 DILocation *Loc = DLR.getDebugLoc();
6572 CheckDI(Loc, "#dbg_label record requires a !dbg attachment", &DLR, BB, F);
6573
6574 DISubprogram *LabelSP = getSubprogram(Label->getRawScope());
6575 DISubprogram *LocSP = getSubprogram(Loc->getRawScope());
6576 if (!LabelSP || !LocSP)
6577 return;
6578
6579 CheckDI(LabelSP == LocSP,
6580 "mismatched subprogram between #dbg_label label and !dbg attachment",
6581 &DLR, BB, F, Label, Label->getScope()->getSubprogram(), Loc,
6582 Loc->getScope()->getSubprogram());
6583}
6584
6585void Verifier::visit(DbgVariableRecord &DVR) {
6586 BasicBlock *BB = DVR.getParent();
6587 Function *F = BB->getParent();
6588
6592 "invalid #dbg record type", &DVR, DVR.getType());
6593
6594 // The location for a DbgVariableRecord must be either a ValueAsMetadata,
6595 // DIArgList, or an empty MDNode (which is a legacy representation for an
6596 // "undef" location).
6597 auto *MD = DVR.getRawLocation();
6598 CheckDI(MD && (isa<ValueAsMetadata>(MD) || isa<DIArgList>(MD) ||
6599 (isa<MDNode>(MD) && !cast<MDNode>(MD)->getNumOperands())),
6600 "invalid #dbg record address/value", &DVR, MD);
6601 if (auto *VAM = dyn_cast<ValueAsMetadata>(MD))
6602 visitValueAsMetadata(*VAM, F);
6603 else if (auto *AL = dyn_cast<DIArgList>(MD))
6604 visitDIArgList(*AL, F);
6605
6606 CheckDI(isa_and_nonnull<DILocalVariable>(DVR.getRawVariable()),
6607 "invalid #dbg record variable", &DVR, DVR.getRawVariable());
6608 visitMDNode(*DVR.getRawVariable(), AreDebugLocsAllowed::No);
6609
6610 CheckDI(isa_and_nonnull<DIExpression>(DVR.getRawExpression()),
6611 "invalid #dbg record expression", &DVR, DVR.getRawExpression());
6612 visitMDNode(*DVR.getExpression(), AreDebugLocsAllowed::No);
6613
6614 if (DVR.isDbgAssign()) {
6615 CheckDI(isa_and_nonnull<DIAssignID>(DVR.getRawAssignID()),
6616 "invalid #dbg_assign DIAssignID", &DVR, DVR.getRawAssignID());
6617 visitMDNode(*cast<DIAssignID>(DVR.getRawAssignID()),
6618 AreDebugLocsAllowed::No);
6619
6620 const auto *RawAddr = DVR.getRawAddress();
6621 // Similarly to the location above, the address for an assign
6622 // DbgVariableRecord must be a ValueAsMetadata or an empty MDNode, which
6623 // represents an undef address.
6624 CheckDI(
6625 isa<ValueAsMetadata>(RawAddr) ||
6626 (isa<MDNode>(RawAddr) && !cast<MDNode>(RawAddr)->getNumOperands()),
6627 "invalid #dbg_assign address", &DVR, DVR.getRawAddress());
6628 if (auto *VAM = dyn_cast<ValueAsMetadata>(RawAddr))
6629 visitValueAsMetadata(*VAM, F);
6630
6631 CheckDI(isa_and_nonnull<DIExpression>(DVR.getRawAddressExpression()),
6632 "invalid #dbg_assign address expression", &DVR,
6634 visitMDNode(*DVR.getAddressExpression(), AreDebugLocsAllowed::No);
6635
6636 // All of the linked instructions should be in the same function as DVR.
6637 for (Instruction *I : at::getAssignmentInsts(&DVR))
6638 CheckDI(DVR.getFunction() == I->getFunction(),
6639 "inst not in same function as #dbg_assign", I, &DVR);
6640 }
6641
6642 // This check is redundant with one in visitLocalVariable().
6643 DILocalVariable *Var = DVR.getVariable();
6644 CheckDI(isType(Var->getRawType()), "invalid type ref", Var,
6645 Var->getRawType());
6646
6647 auto *DLNode = DVR.getDebugLoc().getAsMDNode();
6648 CheckDI(isa_and_nonnull<DILocation>(DLNode), "invalid #dbg record DILocation",
6649 &DVR, DLNode);
6650 DILocation *Loc = DVR.getDebugLoc();
6651
6652 // The scopes for variables and !dbg attachments must agree.
6653 DISubprogram *VarSP = getSubprogram(Var->getRawScope());
6654 DISubprogram *LocSP = getSubprogram(Loc->getRawScope());
6655 if (!VarSP || !LocSP)
6656 return; // Broken scope chains are checked elsewhere.
6657
6658 CheckDI(VarSP == LocSP,
6659 "mismatched subprogram between #dbg record variable and DILocation",
6660 &DVR, BB, F, Var, Var->getScope()->getSubprogram(), Loc,
6661 Loc->getScope()->getSubprogram());
6662
6663 verifyFnArgs(DVR);
6664}
6665
6666void Verifier::visitVPIntrinsic(VPIntrinsic &VPI) {
6667 if (auto *VPCast = dyn_cast<VPCastIntrinsic>(&VPI)) {
6668 auto *RetTy = cast<VectorType>(VPCast->getType());
6669 auto *ValTy = cast<VectorType>(VPCast->getOperand(0)->getType());
6670 Check(RetTy->getElementCount() == ValTy->getElementCount(),
6671 "VP cast intrinsic first argument and result vector lengths must be "
6672 "equal",
6673 *VPCast);
6674
6675 switch (VPCast->getIntrinsicID()) {
6676 default:
6677 llvm_unreachable("Unknown VP cast intrinsic");
6678 case Intrinsic::vp_trunc:
6679 Check(RetTy->isIntOrIntVectorTy() && ValTy->isIntOrIntVectorTy(),
6680 "llvm.vp.trunc intrinsic first argument and result element type "
6681 "must be integer",
6682 *VPCast);
6683 Check(RetTy->getScalarSizeInBits() < ValTy->getScalarSizeInBits(),
6684 "llvm.vp.trunc intrinsic the bit size of first argument must be "
6685 "larger than the bit size of the return type",
6686 *VPCast);
6687 break;
6688 case Intrinsic::vp_zext:
6689 case Intrinsic::vp_sext:
6690 Check(RetTy->isIntOrIntVectorTy() && ValTy->isIntOrIntVectorTy(),
6691 "llvm.vp.zext or llvm.vp.sext intrinsic first argument and result "
6692 "element type must be integer",
6693 *VPCast);
6694 Check(RetTy->getScalarSizeInBits() > ValTy->getScalarSizeInBits(),
6695 "llvm.vp.zext or llvm.vp.sext intrinsic the bit size of first "
6696 "argument must be smaller than the bit size of the return type",
6697 *VPCast);
6698 break;
6699 case Intrinsic::vp_fptoui:
6700 case Intrinsic::vp_fptosi:
6701 case Intrinsic::vp_lrint:
6702 case Intrinsic::vp_llrint:
6703 Check(
6704 RetTy->isIntOrIntVectorTy() && ValTy->isFPOrFPVectorTy(),
6705 "llvm.vp.fptoui, llvm.vp.fptosi, llvm.vp.lrint or llvm.vp.llrint" "intrinsic first argument element "
6706 "type must be floating-point and result element type must be integer",
6707 *VPCast);
6708 break;
6709 case Intrinsic::vp_uitofp:
6710 case Intrinsic::vp_sitofp:
6711 Check(
6712 RetTy->isFPOrFPVectorTy() && ValTy->isIntOrIntVectorTy(),
6713 "llvm.vp.uitofp or llvm.vp.sitofp intrinsic first argument element "
6714 "type must be integer and result element type must be floating-point",
6715 *VPCast);
6716 break;
6717 case Intrinsic::vp_fptrunc:
6718 Check(RetTy->isFPOrFPVectorTy() && ValTy->isFPOrFPVectorTy(),
6719 "llvm.vp.fptrunc intrinsic first argument and result element type "
6720 "must be floating-point",
6721 *VPCast);
6722 Check(RetTy->getScalarSizeInBits() < ValTy->getScalarSizeInBits(),
6723 "llvm.vp.fptrunc intrinsic the bit size of first argument must be "
6724 "larger than the bit size of the return type",
6725 *VPCast);
6726 break;
6727 case Intrinsic::vp_fpext:
6728 Check(RetTy->isFPOrFPVectorTy() && ValTy->isFPOrFPVectorTy(),
6729 "llvm.vp.fpext intrinsic first argument and result element type "
6730 "must be floating-point",
6731 *VPCast);
6732 Check(RetTy->getScalarSizeInBits() > ValTy->getScalarSizeInBits(),
6733 "llvm.vp.fpext intrinsic the bit size of first argument must be "
6734 "smaller than the bit size of the return type",
6735 *VPCast);
6736 break;
6737 case Intrinsic::vp_ptrtoint:
6738 Check(RetTy->isIntOrIntVectorTy() && ValTy->isPtrOrPtrVectorTy(),
6739 "llvm.vp.ptrtoint intrinsic first argument element type must be "
6740 "pointer and result element type must be integer",
6741 *VPCast);
6742 break;
6743 case Intrinsic::vp_inttoptr:
6744 Check(RetTy->isPtrOrPtrVectorTy() && ValTy->isIntOrIntVectorTy(),
6745 "llvm.vp.inttoptr intrinsic first argument element type must be "
6746 "integer and result element type must be pointer",
6747 *VPCast);
6748 break;
6749 }
6750 }
6751 if (VPI.getIntrinsicID() == Intrinsic::vp_fcmp) {
6752 auto Pred = cast<VPCmpIntrinsic>(&VPI)->getPredicate();
6754 "invalid predicate for VP FP comparison intrinsic", &VPI);
6755 }
6756 if (VPI.getIntrinsicID() == Intrinsic::vp_icmp) {
6757 auto Pred = cast<VPCmpIntrinsic>(&VPI)->getPredicate();
6759 "invalid predicate for VP integer comparison intrinsic", &VPI);
6760 }
6761 if (VPI.getIntrinsicID() == Intrinsic::vp_is_fpclass) {
6762 auto TestMask = cast<ConstantInt>(VPI.getOperand(1));
6763 Check((TestMask->getZExtValue() & ~static_cast<unsigned>(fcAllFlags)) == 0,
6764 "unsupported bits for llvm.vp.is.fpclass test mask");
6765 }
6766}
6767
6768void Verifier::visitConstrainedFPIntrinsic(ConstrainedFPIntrinsic &FPI) {
6769 unsigned NumOperands = FPI.getNonMetadataArgCount();
6770 bool HasRoundingMD =
6772
6773 // Add the expected number of metadata operands.
6774 NumOperands += (1 + HasRoundingMD);
6775
6776 // Compare intrinsics carry an extra predicate metadata operand.
6777 if (isa<ConstrainedFPCmpIntrinsic>(FPI))
6778 NumOperands += 1;
6779 Check((FPI.arg_size() == NumOperands),
6780 "invalid arguments for constrained FP intrinsic", &FPI);
6781
6782 switch (FPI.getIntrinsicID()) {
6783 case Intrinsic::experimental_constrained_lrint:
6784 case Intrinsic::experimental_constrained_llrint: {
6785 Type *ValTy = FPI.getArgOperand(0)->getType();
6786 Type *ResultTy = FPI.getType();
6787 Check(!ValTy->isVectorTy() && !ResultTy->isVectorTy(),
6788 "Intrinsic does not support vectors", &FPI);
6789 break;
6790 }
6791
6792 case Intrinsic::experimental_constrained_lround:
6793 case Intrinsic::experimental_constrained_llround: {
6794 Type *ValTy = FPI.getArgOperand(0)->getType();
6795 Type *ResultTy = FPI.getType();
6796 Check(!ValTy->isVectorTy() && !ResultTy->isVectorTy(),
6797 "Intrinsic does not support vectors", &FPI);
6798 break;
6799 }
6800
6801 case Intrinsic::experimental_constrained_fcmp:
6802 case Intrinsic::experimental_constrained_fcmps: {
6803 auto Pred = cast<ConstrainedFPCmpIntrinsic>(&FPI)->getPredicate();
6805 "invalid predicate for constrained FP comparison intrinsic", &FPI);
6806 break;
6807 }
6808
6809 case Intrinsic::experimental_constrained_fptosi:
6810 case Intrinsic::experimental_constrained_fptoui: {
6811 Value *Operand = FPI.getArgOperand(0);
6812 ElementCount SrcEC;
6813 Check(Operand->getType()->isFPOrFPVectorTy(),
6814 "Intrinsic first argument must be floating point", &FPI);
6815 if (auto *OperandT = dyn_cast<VectorType>(Operand->getType())) {
6816 SrcEC = cast<VectorType>(OperandT)->getElementCount();
6817 }
6818
6819 Operand = &FPI;
6820 Check(SrcEC.isNonZero() == Operand->getType()->isVectorTy(),
6821 "Intrinsic first argument and result disagree on vector use", &FPI);
6822 Check(Operand->getType()->isIntOrIntVectorTy(),
6823 "Intrinsic result must be an integer", &FPI);
6824 if (auto *OperandT = dyn_cast<VectorType>(Operand->getType())) {
6825 Check(SrcEC == cast<VectorType>(OperandT)->getElementCount(),
6826 "Intrinsic first argument and result vector lengths must be equal",
6827 &FPI);
6828 }
6829 break;
6830 }
6831
6832 case Intrinsic::experimental_constrained_sitofp:
6833 case Intrinsic::experimental_constrained_uitofp: {
6834 Value *Operand = FPI.getArgOperand(0);
6835 ElementCount SrcEC;
6836 Check(Operand->getType()->isIntOrIntVectorTy(),
6837 "Intrinsic first argument must be integer", &FPI);
6838 if (auto *OperandT = dyn_cast<VectorType>(Operand->getType())) {
6839 SrcEC = cast<VectorType>(OperandT)->getElementCount();
6840 }
6841
6842 Operand = &FPI;
6843 Check(SrcEC.isNonZero() == Operand->getType()->isVectorTy(),
6844 "Intrinsic first argument and result disagree on vector use", &FPI);
6845 Check(Operand->getType()->isFPOrFPVectorTy(),
6846 "Intrinsic result must be a floating point", &FPI);
6847 if (auto *OperandT = dyn_cast<VectorType>(Operand->getType())) {
6848 Check(SrcEC == cast<VectorType>(OperandT)->getElementCount(),
6849 "Intrinsic first argument and result vector lengths must be equal",
6850 &FPI);
6851 }
6852 break;
6853 }
6854
6855 case Intrinsic::experimental_constrained_fptrunc:
6856 case Intrinsic::experimental_constrained_fpext: {
6857 Value *Operand = FPI.getArgOperand(0);
6858 Type *OperandTy = Operand->getType();
6859 Value *Result = &FPI;
6860 Type *ResultTy = Result->getType();
6861 Check(OperandTy->isFPOrFPVectorTy(),
6862 "Intrinsic first argument must be FP or FP vector", &FPI);
6863 Check(ResultTy->isFPOrFPVectorTy(),
6864 "Intrinsic result must be FP or FP vector", &FPI);
6865 Check(OperandTy->isVectorTy() == ResultTy->isVectorTy(),
6866 "Intrinsic first argument and result disagree on vector use", &FPI);
6867 if (OperandTy->isVectorTy()) {
6868 Check(cast<VectorType>(OperandTy)->getElementCount() ==
6869 cast<VectorType>(ResultTy)->getElementCount(),
6870 "Intrinsic first argument and result vector lengths must be equal",
6871 &FPI);
6872 }
6873 if (FPI.getIntrinsicID() == Intrinsic::experimental_constrained_fptrunc) {
6874 Check(OperandTy->getScalarSizeInBits() > ResultTy->getScalarSizeInBits(),
6875 "Intrinsic first argument's type must be larger than result type",
6876 &FPI);
6877 } else {
6878 Check(OperandTy->getScalarSizeInBits() < ResultTy->getScalarSizeInBits(),
6879 "Intrinsic first argument's type must be smaller than result type",
6880 &FPI);
6881 }
6882 break;
6883 }
6884
6885 default:
6886 break;
6887 }
6888
6889 // If a non-metadata argument is passed in a metadata slot then the
6890 // error will be caught earlier when the incorrect argument doesn't
6891 // match the specification in the intrinsic call table. Thus, no
6892 // argument type check is needed here.
6893
6894 Check(FPI.getExceptionBehavior().has_value(),
6895 "invalid exception behavior argument", &FPI);
6896 if (HasRoundingMD) {
6897 Check(FPI.getRoundingMode().has_value(), "invalid rounding mode argument",
6898 &FPI);
6899 }
6900}
6901
6902void Verifier::visitDbgIntrinsic(StringRef Kind, DbgVariableIntrinsic &DII) {
6903 auto *MD = DII.getRawLocation();
6904 CheckDI(isa<ValueAsMetadata>(MD) || isa<DIArgList>(MD) ||
6905 (isa<MDNode>(MD) && !cast<MDNode>(MD)->getNumOperands()),
6906 "invalid llvm.dbg." + Kind + " intrinsic address/value", &DII, MD);
6907 CheckDI(isa<DILocalVariable>(DII.getRawVariable()),
6908 "invalid llvm.dbg." + Kind + " intrinsic variable", &DII,
6909 DII.getRawVariable());
6910 CheckDI(isa<DIExpression>(DII.getRawExpression()),
6911 "invalid llvm.dbg." + Kind + " intrinsic expression", &DII,
6912 DII.getRawExpression());
6913
6914 if (auto *DAI = dyn_cast<DbgAssignIntrinsic>(&DII)) {
6915 CheckDI(isa<DIAssignID>(DAI->getRawAssignID()),
6916 "invalid llvm.dbg.assign intrinsic DIAssignID", &DII,
6917 DAI->getRawAssignID());
6918 const auto *RawAddr = DAI->getRawAddress();
6919 CheckDI(
6920 isa<ValueAsMetadata>(RawAddr) ||
6921 (isa<MDNode>(RawAddr) && !cast<MDNode>(RawAddr)->getNumOperands()),
6922 "invalid llvm.dbg.assign intrinsic address", &DII,
6923 DAI->getRawAddress());
6924 CheckDI(isa<DIExpression>(DAI->getRawAddressExpression()),
6925 "invalid llvm.dbg.assign intrinsic address expression", &DII,
6926 DAI->getRawAddressExpression());
6927 // All of the linked instructions should be in the same function as DII.
6929 CheckDI(DAI->getFunction() == I->getFunction(),
6930 "inst not in same function as dbg.assign", I, DAI);
6931 }
6932
6933 // Ignore broken !dbg attachments; they're checked elsewhere.
6934 if (MDNode *N = DII.getDebugLoc().getAsMDNode())
6935 if (!isa<DILocation>(N))
6936 return;
6937
6938 BasicBlock *BB = DII.getParent();
6939 Function *F = BB ? BB->getParent() : nullptr;
6940
6941 // The scopes for variables and !dbg attachments must agree.
6942 DILocalVariable *Var = DII.getVariable();
6943 DILocation *Loc = DII.getDebugLoc();
6944 CheckDI(Loc, "llvm.dbg." + Kind + " intrinsic requires a !dbg attachment",
6945 &DII, BB, F);
6946
6947 DISubprogram *VarSP = getSubprogram(Var->getRawScope());
6948 DISubprogram *LocSP = getSubprogram(Loc->getRawScope());
6949 if (!VarSP || !LocSP)
6950 return; // Broken scope chains are checked elsewhere.
6951
6952 CheckDI(VarSP == LocSP,
6953 "mismatched subprogram between llvm.dbg." + Kind +
6954 " variable and !dbg attachment",
6955 &DII, BB, F, Var, Var->getScope()->getSubprogram(), Loc,
6956 Loc->getScope()->getSubprogram());
6957
6958 // This check is redundant with one in visitLocalVariable().
6959 CheckDI(isType(Var->getRawType()), "invalid type ref", Var,
6960 Var->getRawType());
6961 verifyFnArgs(DII);
6962}
6963
6964void Verifier::visitDbgLabelIntrinsic(StringRef Kind, DbgLabelInst &DLI) {
6965 CheckDI(isa<DILabel>(DLI.getRawLabel()),
6966 "invalid llvm.dbg." + Kind + " intrinsic variable", &DLI,
6967 DLI.getRawLabel());
6968
6969 // Ignore broken !dbg attachments; they're checked elsewhere.
6970 if (MDNode *N = DLI.getDebugLoc().getAsMDNode())
6971 if (!isa<DILocation>(N))
6972 return;
6973
6974 BasicBlock *BB = DLI.getParent();
6975 Function *F = BB ? BB->getParent() : nullptr;
6976
6977 // The scopes for variables and !dbg attachments must agree.
6978 DILabel *Label = DLI.getLabel();
6979 DILocation *Loc = DLI.getDebugLoc();
6980 Check(Loc, "llvm.dbg." + Kind + " intrinsic requires a !dbg attachment", &DLI,
6981 BB, F);
6982
6983 DISubprogram *LabelSP = getSubprogram(Label->getRawScope());
6984 DISubprogram *LocSP = getSubprogram(Loc->getRawScope());
6985 if (!LabelSP || !LocSP)
6986 return;
6987
6988 CheckDI(LabelSP == LocSP,
6989 "mismatched subprogram between llvm.dbg." + Kind +
6990 " label and !dbg attachment",
6991 &DLI, BB, F, Label, Label->getScope()->getSubprogram(), Loc,
6992 Loc->getScope()->getSubprogram());
6993}
6994
6995void Verifier::verifyFragmentExpression(const DbgVariableIntrinsic &I) {
6996 DILocalVariable *V = dyn_cast_or_null<DILocalVariable>(I.getRawVariable());
6997 DIExpression *E = dyn_cast_or_null<DIExpression>(I.getRawExpression());
6998
6999 // We don't know whether this intrinsic verified correctly.
7000 if (!V || !E || !E->isValid())
7001 return;
7002
7003 // Nothing to do if this isn't a DW_OP_LLVM_fragment expression.
7004 auto Fragment = E->getFragmentInfo();
7005 if (!Fragment)
7006 return;
7007
7008 // The frontend helps out GDB by emitting the members of local anonymous
7009 // unions as artificial local variables with shared storage. When SROA splits
7010 // the storage for artificial local variables that are smaller than the entire
7011 // union, the overhang piece will be outside of the allotted space for the
7012 // variable and this check fails.
7013 // FIXME: Remove this check as soon as clang stops doing this; it hides bugs.
7014 if (V->isArtificial())
7015 return;
7016
7017 verifyFragmentExpression(*V, *Fragment, &I);
7018}
7019void Verifier::verifyFragmentExpression(const DbgVariableRecord &DVR) {
7020 DILocalVariable *V = dyn_cast_or_null<DILocalVariable>(DVR.getRawVariable());
7021 DIExpression *E = dyn_cast_or_null<DIExpression>(DVR.getRawExpression());
7022
7023 // We don't know whether this intrinsic verified correctly.
7024 if (!V || !E || !E->isValid())
7025 return;
7026
7027 // Nothing to do if this isn't a DW_OP_LLVM_fragment expression.
7028 auto Fragment = E->getFragmentInfo();
7029 if (!Fragment)
7030 return;
7031
7032 // The frontend helps out GDB by emitting the members of local anonymous
7033 // unions as artificial local variables with shared storage. When SROA splits
7034 // the storage for artificial local variables that are smaller than the entire
7035 // union, the overhang piece will be outside of the allotted space for the
7036 // variable and this check fails.
7037 // FIXME: Remove this check as soon as clang stops doing this; it hides bugs.
7038 if (V->isArtificial())
7039 return;
7040
7041 verifyFragmentExpression(*V, *Fragment, &DVR);
7042}
7043
7044template <typename ValueOrMetadata>
7045void Verifier::verifyFragmentExpression(const DIVariable &V,
7047 ValueOrMetadata *Desc) {
7048 // If there's no size, the type is broken, but that should be checked
7049 // elsewhere.
7050 auto VarSize = V.getSizeInBits();
7051 if (!VarSize)
7052 return;
7053
7054 unsigned FragSize = Fragment.SizeInBits;
7055 unsigned FragOffset = Fragment.OffsetInBits;
7056 CheckDI(FragSize + FragOffset <= *VarSize,
7057 "fragment is larger than or outside of variable", Desc, &V);
7058 CheckDI(FragSize != *VarSize, "fragment covers entire variable", Desc, &V);
7059}
7060
7061void Verifier::verifyFnArgs(const DbgVariableIntrinsic &I) {
7062 // This function does not take the scope of noninlined function arguments into
7063 // account. Don't run it if current function is nodebug, because it may
7064 // contain inlined debug intrinsics.
7065 if (!HasDebugInfo)
7066 return;
7067
7068 // For performance reasons only check non-inlined ones.
7069 if (I.getDebugLoc()->getInlinedAt())
7070 return;
7071
7072 DILocalVariable *Var = I.getVariable();
7073 CheckDI(Var, "dbg intrinsic without variable");
7074
7075 unsigned ArgNo = Var->getArg();
7076 if (!ArgNo)
7077 return;
7078
7079 // Verify there are no duplicate function argument debug info entries.
7080 // These will cause hard-to-debug assertions in the DWARF backend.
7081 if (DebugFnArgs.size() < ArgNo)
7082 DebugFnArgs.resize(ArgNo, nullptr);
7083
7084 auto *Prev = DebugFnArgs[ArgNo - 1];
7085 DebugFnArgs[ArgNo - 1] = Var;
7086 CheckDI(!Prev || (Prev == Var), "conflicting debug info for argument", &I,
7087 Prev, Var);
7088}
7089void Verifier::verifyFnArgs(const DbgVariableRecord &DVR) {
7090 // This function does not take the scope of noninlined function arguments into
7091 // account. Don't run it if current function is nodebug, because it may
7092 // contain inlined debug intrinsics.
7093 if (!HasDebugInfo)
7094 return;
7095
7096 // For performance reasons only check non-inlined ones.
7097 if (DVR.getDebugLoc()->getInlinedAt())
7098 return;
7099
7100 DILocalVariable *Var = DVR.getVariable();
7101 CheckDI(Var, "#dbg record without variable");
7102
7103 unsigned ArgNo = Var->getArg();
7104 if (!ArgNo)
7105 return;
7106
7107 // Verify there are no duplicate function argument debug info entries.
7108 // These will cause hard-to-debug assertions in the DWARF backend.
7109 if (DebugFnArgs.size() < ArgNo)
7110 DebugFnArgs.resize(ArgNo, nullptr);
7111
7112 auto *Prev = DebugFnArgs[ArgNo - 1];
7113 DebugFnArgs[ArgNo - 1] = Var;
7114 CheckDI(!Prev || (Prev == Var), "conflicting debug info for argument", &DVR,
7115 Prev, Var);
7116}
7117
7118void Verifier::verifyNotEntryValue(const DbgVariableIntrinsic &I) {
7119 DIExpression *E = dyn_cast_or_null<DIExpression>(I.getRawExpression());
7120
7121 // We don't know whether this intrinsic verified correctly.
7122 if (!E || !E->isValid())
7123 return;
7124
7125 if (isa<ValueAsMetadata>(I.getRawLocation())) {
7126 Value *VarValue = I.getVariableLocationOp(0);
7127 if (isa<UndefValue>(VarValue) || isa<PoisonValue>(VarValue))
7128 return;
7129 // We allow EntryValues for swift async arguments, as they have an
7130 // ABI-guarantee to be turned into a specific register.
7131 if (auto *ArgLoc = dyn_cast_or_null<Argument>(VarValue);
7132 ArgLoc && ArgLoc->hasAttribute(Attribute::SwiftAsync))
7133 return;
7134 }
7135
7136 CheckDI(!E->isEntryValue(),
7137 "Entry values are only allowed in MIR unless they target a "
7138 "swiftasync Argument",
7139 &I);
7140}
7141void Verifier::verifyNotEntryValue(const DbgVariableRecord &DVR) {
7142 DIExpression *E = dyn_cast_or_null<DIExpression>(DVR.getRawExpression());
7143
7144 // We don't know whether this intrinsic verified correctly.
7145 if (!E || !E->isValid())
7146 return;
7147
7148 if (isa<ValueAsMetadata>(DVR.getRawLocation())) {
7149 Value *VarValue = DVR.getVariableLocationOp(0);
7150 if (isa<UndefValue>(VarValue) || isa<PoisonValue>(VarValue))
7151 return;
7152 // We allow EntryValues for swift async arguments, as they have an
7153 // ABI-guarantee to be turned into a specific register.
7154 if (auto *ArgLoc = dyn_cast_or_null<Argument>(VarValue);
7155 ArgLoc && ArgLoc->hasAttribute(Attribute::SwiftAsync))
7156 return;
7157 }
7158
7159 CheckDI(!E->isEntryValue(),
7160 "Entry values are only allowed in MIR unless they target a "
7161 "swiftasync Argument",
7162 &DVR);
7163}
7164
7165void Verifier::verifyCompileUnits() {
7166 // When more than one Module is imported into the same context, such as during
7167 // an LTO build before linking the modules, ODR type uniquing may cause types
7168 // to point to a different CU. This check does not make sense in this case.
7169 if (M.getContext().isODRUniquingDebugTypes())
7170 return;
7171 auto *CUs = M.getNamedMetadata("llvm.dbg.cu");
7173 if (CUs)
7174 Listed.insert(CUs->op_begin(), CUs->op_end());
7175 for (const auto *CU : CUVisited)
7176 CheckDI(Listed.count(CU), "DICompileUnit not listed in llvm.dbg.cu", CU);
7177 CUVisited.clear();
7178}
7179
7180void Verifier::verifyDeoptimizeCallingConvs() {
7181 if (DeoptimizeDeclarations.empty())
7182 return;
7183
7184 const Function *First = DeoptimizeDeclarations[0];
7185 for (const auto *F : ArrayRef(DeoptimizeDeclarations).slice(1)) {
7186 Check(First->getCallingConv() == F->getCallingConv(),
7187 "All llvm.experimental.deoptimize declarations must have the same "
7188 "calling convention",
7189 First, F);
7190 }
7191}
7192
7193void Verifier::verifyAttachedCallBundle(const CallBase &Call,
7194 const OperandBundleUse &BU) {
7195 FunctionType *FTy = Call.getFunctionType();
7196
7197 Check((FTy->getReturnType()->isPointerTy() ||
7198 (Call.doesNotReturn() && FTy->getReturnType()->isVoidTy())),
7199 "a call with operand bundle \"clang.arc.attachedcall\" must call a "
7200 "function returning a pointer or a non-returning function that has a "
7201 "void return type",
7202 Call);
7203
7204 Check(BU.Inputs.size() == 1 && isa<Function>(BU.Inputs.front()),
7205 "operand bundle \"clang.arc.attachedcall\" requires one function as "
7206 "an argument",
7207 Call);
7208
7209 auto *Fn = cast<Function>(BU.Inputs.front());
7210 Intrinsic::ID IID = Fn->getIntrinsicID();
7211
7212 if (IID) {
7213 Check((IID == Intrinsic::objc_retainAutoreleasedReturnValue ||
7214 IID == Intrinsic::objc_unsafeClaimAutoreleasedReturnValue),
7215 "invalid function argument", Call);
7216 } else {
7217 StringRef FnName = Fn->getName();
7218 Check((FnName == "objc_retainAutoreleasedReturnValue" ||
7219 FnName == "objc_unsafeClaimAutoreleasedReturnValue"),
7220 "invalid function argument", Call);
7221 }
7222}
7223
7224void Verifier::verifyNoAliasScopeDecl() {
7225 if (NoAliasScopeDecls.empty())
7226 return;
7227
7228 // only a single scope must be declared at a time.
7229 for (auto *II : NoAliasScopeDecls) {
7230 assert(II->getIntrinsicID() == Intrinsic::experimental_noalias_scope_decl &&
7231 "Not a llvm.experimental.noalias.scope.decl ?");
7232 const auto *ScopeListMV = dyn_cast<MetadataAsValue>(
7234 Check(ScopeListMV != nullptr,
7235 "llvm.experimental.noalias.scope.decl must have a MetadataAsValue "
7236 "argument",
7237 II);
7238
7239 const auto *ScopeListMD = dyn_cast<MDNode>(ScopeListMV->getMetadata());
7240 Check(ScopeListMD != nullptr, "!id.scope.list must point to an MDNode", II);
7241 Check(ScopeListMD->getNumOperands() == 1,
7242 "!id.scope.list must point to a list with a single scope", II);
7243 visitAliasScopeListMetadata(ScopeListMD);
7244 }
7245
7246 // Only check the domination rule when requested. Once all passes have been
7247 // adapted this option can go away.
7249 return;
7250
7251 // Now sort the intrinsics based on the scope MDNode so that declarations of
7252 // the same scopes are next to each other.
7253 auto GetScope = [](IntrinsicInst *II) {
7254 const auto *ScopeListMV = cast<MetadataAsValue>(
7256 return &cast<MDNode>(ScopeListMV->getMetadata())->getOperand(0);
7257 };
7258
7259 // We are sorting on MDNode pointers here. For valid input IR this is ok.
7260 // TODO: Sort on Metadata ID to avoid non-deterministic error messages.
7261 auto Compare = [GetScope](IntrinsicInst *Lhs, IntrinsicInst *Rhs) {
7262 return GetScope(Lhs) < GetScope(Rhs);
7263 };
7264
7265 llvm::sort(NoAliasScopeDecls, Compare);
7266
7267 // Go over the intrinsics and check that for the same scope, they are not
7268 // dominating each other.
7269 auto ItCurrent = NoAliasScopeDecls.begin();
7270 while (ItCurrent != NoAliasScopeDecls.end()) {
7271 auto CurScope = GetScope(*ItCurrent);
7272 auto ItNext = ItCurrent;
7273 do {
7274 ++ItNext;
7275 } while (ItNext != NoAliasScopeDecls.end() &&
7276 GetScope(*ItNext) == CurScope);
7277
7278 // [ItCurrent, ItNext) represents the declarations for the same scope.
7279 // Ensure they are not dominating each other.. but only if it is not too
7280 // expensive.
7281 if (ItNext - ItCurrent < 32)
7282 for (auto *I : llvm::make_range(ItCurrent, ItNext))
7283 for (auto *J : llvm::make_range(ItCurrent, ItNext))
7284 if (I != J)
7285 Check(!DT.dominates(I, J),
7286 "llvm.experimental.noalias.scope.decl dominates another one "
7287 "with the same scope",
7288 I);
7289 ItCurrent = ItNext;
7290 }
7291}
7292
7293//===----------------------------------------------------------------------===//
7294// Implement the public interfaces to this file...
7295//===----------------------------------------------------------------------===//
7296
7298 Function &F = const_cast<Function &>(f);
7299
7300 // Don't use a raw_null_ostream. Printing IR is expensive.
7301 Verifier V(OS, /*ShouldTreatBrokenDebugInfoAsError=*/true, *f.getParent());
7302
7303 // Note that this function's return value is inverted from what you would
7304 // expect of a function called "verify".
7305 return !V.verify(F);
7306}
7307
7309 bool *BrokenDebugInfo) {
7310 // Don't use a raw_null_ostream. Printing IR is expensive.
7311 Verifier V(OS, /*ShouldTreatBrokenDebugInfoAsError=*/!BrokenDebugInfo, M);
7312
7313 bool Broken = false;
7314 for (const Function &F : M)
7315 Broken |= !V.verify(F);
7316
7317 Broken |= !V.verify();
7318 if (BrokenDebugInfo)
7319 *BrokenDebugInfo = V.hasBrokenDebugInfo();
7320 // Note that this function's return value is inverted from what you would
7321 // expect of a function called "verify".
7322 return Broken;
7323}
7324
7325namespace {
7326
7327struct VerifierLegacyPass : public FunctionPass {
7328 static char ID;
7329
7330 std::unique_ptr<Verifier> V;
7331 bool FatalErrors = true;
7332
7333 VerifierLegacyPass() : FunctionPass(ID) {
7335 }
7336 explicit VerifierLegacyPass(bool FatalErrors)
7337 : FunctionPass(ID),
7338 FatalErrors(FatalErrors) {
7340 }
7341
7342 bool doInitialization(Module &M) override {
7343 V = std::make_unique<Verifier>(
7344 &dbgs(), /*ShouldTreatBrokenDebugInfoAsError=*/false, M);
7345 return false;
7346 }
7347
7348 bool runOnFunction(Function &F) override {
7349 if (!V->verify(F) && FatalErrors) {
7350 errs() << "in function " << F.getName() << '\n';
7351 report_fatal_error("Broken function found, compilation aborted!");
7352 }
7353 return false;
7354 }
7355
7356 bool doFinalization(Module &M) override {
7357 bool HasErrors = false;
7358 for (Function &F : M)
7359 if (F.isDeclaration())
7360 HasErrors |= !V->verify(F);
7361
7362 HasErrors |= !V->verify();
7363 if (FatalErrors && (HasErrors || V->hasBrokenDebugInfo()))
7364 report_fatal_error("Broken module found, compilation aborted!");
7365 return false;
7366 }
7367
7368 void getAnalysisUsage(AnalysisUsage &AU) const override {
7369 AU.setPreservesAll();
7370 }
7371};
7372
7373} // end anonymous namespace
7374
7375/// Helper to issue failure from the TBAA verification
7376template <typename... Tys> void TBAAVerifier::CheckFailed(Tys &&... Args) {
7377 if (Diagnostic)
7378 return Diagnostic->CheckFailed(Args...);
7379}
7380
7381#define CheckTBAA(C, ...) \
7382 do { \
7383 if (!(C)) { \
7384 CheckFailed(__VA_ARGS__); \
7385 return false; \
7386 } \
7387 } while (false)
7388
7389/// Verify that \p BaseNode can be used as the "base type" in the struct-path
7390/// TBAA scheme. This means \p BaseNode is either a scalar node, or a
7391/// struct-type node describing an aggregate data structure (like a struct).
7392TBAAVerifier::TBAABaseNodeSummary
7393TBAAVerifier::verifyTBAABaseNode(Instruction &I, const MDNode *BaseNode,
7394 bool IsNewFormat) {
7395 if (BaseNode->getNumOperands() < 2) {
7396 CheckFailed("Base nodes must have at least two operands", &I, BaseNode);
7397 return {true, ~0u};
7398 }
7399
7400 auto Itr = TBAABaseNodes.find(BaseNode);
7401 if (Itr != TBAABaseNodes.end())
7402 return Itr->second;
7403
7404 auto Result = verifyTBAABaseNodeImpl(I, BaseNode, IsNewFormat);
7405 auto InsertResult = TBAABaseNodes.insert({BaseNode, Result});
7406 (void)InsertResult;
7407 assert(InsertResult.second && "We just checked!");
7408 return Result;
7409}
7410
7411TBAAVerifier::TBAABaseNodeSummary
7412TBAAVerifier::verifyTBAABaseNodeImpl(Instruction &I, const MDNode *BaseNode,
7413 bool IsNewFormat) {
7414 const TBAAVerifier::TBAABaseNodeSummary InvalidNode = {true, ~0u};
7415
7416 if (BaseNode->getNumOperands() == 2) {
7417 // Scalar nodes can only be accessed at offset 0.
7418 return isValidScalarTBAANode(BaseNode)
7419 ? TBAAVerifier::TBAABaseNodeSummary({false, 0})
7420 : InvalidNode;
7421 }
7422
7423 if (IsNewFormat) {
7424 if (BaseNode->getNumOperands() % 3 != 0) {
7425 CheckFailed("Access tag nodes must have the number of operands that is a "
7426 "multiple of 3!", BaseNode);
7427 return InvalidNode;
7428 }
7429 } else {
7430 if (BaseNode->getNumOperands() % 2 != 1) {
7431 CheckFailed("Struct tag nodes must have an odd number of operands!",
7432 BaseNode);
7433 return InvalidNode;
7434 }
7435 }
7436
7437 // Check the type size field.
7438 if (IsNewFormat) {
7439 auto *TypeSizeNode = mdconst::dyn_extract_or_null<ConstantInt>(
7440 BaseNode->getOperand(1));
7441 if (!TypeSizeNode) {
7442 CheckFailed("Type size nodes must be constants!", &I, BaseNode);
7443 return InvalidNode;
7444 }
7445 }
7446
7447 // Check the type name field. In the new format it can be anything.
7448 if (!IsNewFormat && !isa<MDString>(BaseNode->getOperand(0))) {
7449 CheckFailed("Struct tag nodes have a string as their first operand",
7450 BaseNode);
7451 return InvalidNode;
7452 }
7453
7454 bool Failed = false;
7455
7456 std::optional<APInt> PrevOffset;
7457 unsigned BitWidth = ~0u;
7458
7459 // We've already checked that BaseNode is not a degenerate root node with one
7460 // operand in \c verifyTBAABaseNode, so this loop should run at least once.
7461 unsigned FirstFieldOpNo = IsNewFormat ? 3 : 1;
7462 unsigned NumOpsPerField = IsNewFormat ? 3 : 2;
7463 for (unsigned Idx = FirstFieldOpNo; Idx < BaseNode->getNumOperands();
7464 Idx += NumOpsPerField) {
7465 const MDOperand &FieldTy = BaseNode->getOperand(Idx);
7466 const MDOperand &FieldOffset = BaseNode->getOperand(Idx + 1);
7467 if (!isa<MDNode>(FieldTy)) {
7468 CheckFailed("Incorrect field entry in struct type node!", &I, BaseNode);
7469 Failed = true;
7470 continue;
7471 }
7472
7473 auto *OffsetEntryCI =
7474 mdconst::dyn_extract_or_null<ConstantInt>(FieldOffset);
7475 if (!OffsetEntryCI) {
7476 CheckFailed("Offset entries must be constants!", &I, BaseNode);
7477 Failed = true;
7478 continue;
7479 }
7480
7481 if (BitWidth == ~0u)
7482 BitWidth = OffsetEntryCI->getBitWidth();
7483
7484 if (OffsetEntryCI->getBitWidth() != BitWidth) {
7485 CheckFailed(
7486 "Bitwidth between the offsets and struct type entries must match", &I,
7487 BaseNode);
7488 Failed = true;
7489 continue;
7490 }
7491
7492 // NB! As far as I can tell, we generate a non-strictly increasing offset
7493 // sequence only from structs that have zero size bit fields. When
7494 // recursing into a contained struct in \c getFieldNodeFromTBAABaseNode we
7495 // pick the field lexically the latest in struct type metadata node. This
7496 // mirrors the actual behavior of the alias analysis implementation.
7497 bool IsAscending =
7498 !PrevOffset || PrevOffset->ule(OffsetEntryCI->getValue());
7499
7500 if (!IsAscending) {
7501 CheckFailed("Offsets must be increasing!", &I, BaseNode);
7502 Failed = true;
7503 }
7504
7505 PrevOffset = OffsetEntryCI->getValue();
7506
7507 if (IsNewFormat) {
7508 auto *MemberSizeNode = mdconst::dyn_extract_or_null<ConstantInt>(
7509 BaseNode->getOperand(Idx + 2));
7510 if (!MemberSizeNode) {
7511 CheckFailed("Member size entries must be constants!", &I, BaseNode);
7512 Failed = true;
7513 continue;
7514 }
7515 }
7516 }
7517
7518 return Failed ? InvalidNode
7519 : TBAAVerifier::TBAABaseNodeSummary(false, BitWidth);
7520}
7521
7522static bool IsRootTBAANode(const MDNode *MD) {
7523 return MD->getNumOperands() < 2;
7524}
7525
7526static bool IsScalarTBAANodeImpl(const MDNode *MD,
7528 if (MD->getNumOperands() != 2 && MD->getNumOperands() != 3)
7529 return false;
7530
7531 if (!isa<MDString>(MD->getOperand(0)))
7532 return false;
7533
7534 if (MD->getNumOperands() == 3) {
7535 auto *Offset = mdconst::dyn_extract<ConstantInt>(MD->getOperand(2));
7536 if (!(Offset && Offset->isZero() && isa<MDString>(MD->getOperand(0))))
7537 return false;
7538 }
7539
7540 auto *Parent = dyn_cast_or_null<MDNode>(MD->getOperand(1));
7541 return Parent && Visited.insert(Parent).second &&
7542 (IsRootTBAANode(Parent) || IsScalarTBAANodeImpl(Parent, Visited));
7543}
7544
7545bool TBAAVerifier::isValidScalarTBAANode(const MDNode *MD) {
7546 auto ResultIt = TBAAScalarNodes.find(MD);
7547 if (ResultIt != TBAAScalarNodes.end())
7548 return ResultIt->second;
7549
7551 bool Result = IsScalarTBAANodeImpl(MD, Visited);
7552 auto InsertResult = TBAAScalarNodes.insert({MD, Result});
7553 (void)InsertResult;
7554 assert(InsertResult.second && "Just checked!");
7555
7556 return Result;
7557}
7558
7559/// Returns the field node at the offset \p Offset in \p BaseNode. Update \p
7560/// Offset in place to be the offset within the field node returned.
7561///
7562/// We assume we've okayed \p BaseNode via \c verifyTBAABaseNode.
7563MDNode *TBAAVerifier::getFieldNodeFromTBAABaseNode(Instruction &I,
7564 const MDNode *BaseNode,
7565 APInt &Offset,
7566 bool IsNewFormat) {
7567 assert(BaseNode->getNumOperands() >= 2 && "Invalid base node!");
7568
7569 // Scalar nodes have only one possible "field" -- their parent in the access
7570 // hierarchy. Offset must be zero at this point, but our caller is supposed
7571 // to check that.
7572 if (BaseNode->getNumOperands() == 2)
7573 return cast<MDNode>(BaseNode->getOperand(1));
7574
7575 unsigned FirstFieldOpNo = IsNewFormat ? 3 : 1;
7576 unsigned NumOpsPerField = IsNewFormat ? 3 : 2;
7577 for (unsigned Idx = FirstFieldOpNo; Idx < BaseNode->getNumOperands();
7578 Idx += NumOpsPerField) {
7579 auto *OffsetEntryCI =
7580 mdconst::extract<ConstantInt>(BaseNode->getOperand(Idx + 1));
7581 if (OffsetEntryCI->getValue().ugt(Offset)) {
7582 if (Idx == FirstFieldOpNo) {
7583 CheckFailed("Could not find TBAA parent in struct type node", &I,
7584 BaseNode, &Offset);
7585 return nullptr;
7586 }
7587
7588 unsigned PrevIdx = Idx - NumOpsPerField;
7589 auto *PrevOffsetEntryCI =
7590 mdconst::extract<ConstantInt>(BaseNode->getOperand(PrevIdx + 1));
7591 Offset -= PrevOffsetEntryCI->getValue();
7592 return cast<MDNode>(BaseNode->getOperand(PrevIdx));
7593 }
7594 }
7595
7596 unsigned LastIdx = BaseNode->getNumOperands() - NumOpsPerField;
7597 auto *LastOffsetEntryCI = mdconst::extract<ConstantInt>(
7598 BaseNode->getOperand(LastIdx + 1));
7599 Offset -= LastOffsetEntryCI->getValue();
7600 return cast<MDNode>(BaseNode->getOperand(LastIdx));
7601}
7602
7604 if (!Type || Type->getNumOperands() < 3)
7605 return false;
7606
7607 // In the new format type nodes shall have a reference to the parent type as
7608 // its first operand.
7609 return isa_and_nonnull<MDNode>(Type->getOperand(0));
7610}
7611
7613 CheckTBAA(MD->getNumOperands() > 0, "TBAA metadata cannot have 0 operands",
7614 &I, MD);
7615
7616 CheckTBAA(isa<LoadInst>(I) || isa<StoreInst>(I) || isa<CallInst>(I) ||
7617 isa<VAArgInst>(I) || isa<AtomicRMWInst>(I) ||
7618 isa<AtomicCmpXchgInst>(I),
7619 "This instruction shall not have a TBAA access tag!", &I);
7620
7621 bool IsStructPathTBAA =
7622 isa<MDNode>(MD->getOperand(0)) && MD->getNumOperands() >= 3;
7623
7624 CheckTBAA(IsStructPathTBAA,
7625 "Old-style TBAA is no longer allowed, use struct-path TBAA instead",
7626 &I);
7627
7628 MDNode *BaseNode = dyn_cast_or_null<MDNode>(MD->getOperand(0));
7629 MDNode *AccessType = dyn_cast_or_null<MDNode>(MD->getOperand(1));
7630
7631 bool IsNewFormat = isNewFormatTBAATypeNode(AccessType);
7632
7633 if (IsNewFormat) {
7634 CheckTBAA(MD->getNumOperands() == 4 || MD->getNumOperands() == 5,
7635 "Access tag metadata must have either 4 or 5 operands", &I, MD);
7636 } else {
7637 CheckTBAA(MD->getNumOperands() < 5,
7638 "Struct tag metadata must have either 3 or 4 operands", &I, MD);
7639 }
7640
7641 // Check the access size field.
7642 if (IsNewFormat) {
7643 auto *AccessSizeNode = mdconst::dyn_extract_or_null<ConstantInt>(
7644 MD->getOperand(3));
7645 CheckTBAA(AccessSizeNode, "Access size field must be a constant", &I, MD);
7646 }
7647
7648 // Check the immutability flag.
7649 unsigned ImmutabilityFlagOpNo = IsNewFormat ? 4 : 3;
7650 if (MD->getNumOperands() == ImmutabilityFlagOpNo + 1) {
7651 auto *IsImmutableCI = mdconst::dyn_extract_or_null<ConstantInt>(
7652 MD->getOperand(ImmutabilityFlagOpNo));
7653 CheckTBAA(IsImmutableCI,
7654 "Immutability tag on struct tag metadata must be a constant", &I,
7655 MD);
7656 CheckTBAA(
7657 IsImmutableCI->isZero() || IsImmutableCI->isOne(),
7658 "Immutability part of the struct tag metadata must be either 0 or 1",
7659 &I, MD);
7660 }
7661
7662 CheckTBAA(BaseNode && AccessType,
7663 "Malformed struct tag metadata: base and access-type "
7664 "should be non-null and point to Metadata nodes",
7665 &I, MD, BaseNode, AccessType);
7666
7667 if (!IsNewFormat) {
7668 CheckTBAA(isValidScalarTBAANode(AccessType),
7669 "Access type node must be a valid scalar type", &I, MD,
7670 AccessType);
7671 }
7672
7673 auto *OffsetCI = mdconst::dyn_extract_or_null<ConstantInt>(MD->getOperand(2));
7674 CheckTBAA(OffsetCI, "Offset must be constant integer", &I, MD);
7675
7676 APInt Offset = OffsetCI->getValue();
7677 bool SeenAccessTypeInPath = false;
7678
7679 SmallPtrSet<MDNode *, 4> StructPath;
7680
7681 for (/* empty */; BaseNode && !IsRootTBAANode(BaseNode);
7682 BaseNode = getFieldNodeFromTBAABaseNode(I, BaseNode, Offset,
7683 IsNewFormat)) {
7684 if (!StructPath.insert(BaseNode).second) {
7685 CheckFailed("Cycle detected in struct path", &I, MD);
7686 return false;
7687 }
7688
7689 bool Invalid;
7690 unsigned BaseNodeBitWidth;
7691 std::tie(Invalid, BaseNodeBitWidth) = verifyTBAABaseNode(I, BaseNode,
7692 IsNewFormat);
7693
7694 // If the base node is invalid in itself, then we've already printed all the
7695 // errors we wanted to print.
7696 if (Invalid)
7697 return false;
7698
7699 SeenAccessTypeInPath |= BaseNode == AccessType;
7700
7701 if (isValidScalarTBAANode(BaseNode) || BaseNode == AccessType)
7702 CheckTBAA(Offset == 0, "Offset not zero at the point of scalar access",
7703 &I, MD, &Offset);
7704
7705 CheckTBAA(BaseNodeBitWidth == Offset.getBitWidth() ||
7706 (BaseNodeBitWidth == 0 && Offset == 0) ||
7707 (IsNewFormat && BaseNodeBitWidth == ~0u),
7708 "Access bit-width not the same as description bit-width", &I, MD,
7709 BaseNodeBitWidth, Offset.getBitWidth());
7710
7711 if (IsNewFormat && SeenAccessTypeInPath)
7712 break;
7713 }
7714
7715 CheckTBAA(SeenAccessTypeInPath, "Did not see access type in access path!", &I,
7716 MD);
7717 return true;
7718}
7719
7720char VerifierLegacyPass::ID = 0;
7721INITIALIZE_PASS(VerifierLegacyPass, "verify", "Module Verifier", false, false)
7722
7724 return new VerifierLegacyPass(FatalErrors);
7725}
7726
7727AnalysisKey VerifierAnalysis::Key;
7730 Result Res;
7732 return Res;
7733}
7734
7737 return { llvm::verifyFunction(F, &dbgs()), false };
7738}
7739
7741 auto Res = AM.getResult<VerifierAnalysis>(M);
7742 if (FatalErrors && (Res.IRBroken || Res.DebugInfoBroken))
7743 report_fatal_error("Broken module found, compilation aborted!");
7744
7745 return PreservedAnalyses::all();
7746}
7747
7749 auto res = AM.getResult<VerifierAnalysis>(F);
7750 if (res.IRBroken && FatalErrors)
7751 report_fatal_error("Broken function found, compilation aborted!");
7752
7753 return PreservedAnalyses::all();
7754}
AMDGPU address space definition.
This file declares a class to represent arbitrary precision floating point values and provide a varie...
This file implements a class to represent arbitrary precision integral constant values and operations...
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
Atomic ordering constants.
@ RetAttr
Definition: Attributes.cpp:739
@ FnAttr
Definition: Attributes.cpp:737
This file contains the simple types necessary to represent the attributes associated with functions a...
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")
static GCRegistry::Add< StatepointGC > D("statepoint-example", "an example strategy for statepoint")
Analysis containing CSE Info
Definition: CSEInfo.cpp:27
This file contains the declarations for the subclasses of Constant, which represent the different fla...
This file declares the LLVM IR specialization of the GenericConvergenceVerifier template.
return RetTy
Returns the sub type a function will return at a given Idx Should correspond to the result type of an ExtractValue instruction executed with just that one unsigned Idx
This file defines the DenseMap class.
This file contains constants used for implementing Dwarf debug support.
std::string Name
uint64_t Size
static bool runOnFunction(Function &F, bool PostInlining)
Hexagon Common GEP
This file provides various utilities for inspecting and working with the control flow graph in LLVM I...
Module.h This file contains the declarations for the Module class.
This header defines various interfaces for pass management in LLVM.
This defines the Use class.
#define F(x, y, z)
Definition: MD5.cpp:55
#define I(x, y, z)
Definition: MD5.cpp:58
This file implements a map that provides insertion order iteration.
This file provides utility for Memory Model Relaxation Annotations (MMRAs).
This file contains the declarations for metadata subclasses.
#define T1
ConstantRange Range(APInt(BitWidth, Low), APInt(BitWidth, High))
uint64_t High
uint64_t IntrinsicInst * II
#define P(N)
ppc ctr loops verify
#define INITIALIZE_PASS(passName, arg, name, cfg, analysis)
Definition: PassSupport.h:38
This file contains the declarations for profiling metadata utility functions.
static bool isValid(const char C)
Returns true if C is a valid mangled character: <0-9a-zA-Z_>.
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
static unsigned getNumElements(Type *Ty)
void visit(MachineFunction &MF, MachineBasicBlock &Start, std::function< void(MachineBasicBlock *)> op)
This file contains some templates that are useful if you are working with the STL at all.
verify safepoint Safepoint IR Verifier
raw_pwrite_stream & OS
This file defines the SmallPtrSet class.
This file defines the SmallSet class.
This file defines the SmallVector class.
This file contains some functions that are useful when dealing with strings.
static unsigned getBitWidth(Type *Ty, const DataLayout &DL)
Returns the bitwidth of the given scalar or pointer type.
static bool IsScalarTBAANodeImpl(const MDNode *MD, SmallPtrSetImpl< const MDNode * > &Visited)
Definition: Verifier.cpp:7526
static bool isType(const Metadata *MD)
Definition: Verifier.cpp:1135
static Instruction * getSuccPad(Instruction *Terminator)
Definition: Verifier.cpp:2719
#define Check(C,...)
We know that cond should be true, if not print an error message.
Definition: Verifier.cpp:666
static bool isNewFormatTBAATypeNode(llvm::MDNode *Type)
Definition: Verifier.cpp:7603
#define CheckDI(C,...)
We know that a debug info condition should be true, if not print an error message.
Definition: Verifier.cpp:676
static void forEachUser(const Value *User, SmallPtrSet< const Value *, 32 > &Visited, llvm::function_ref< bool(const Value *)> Callback)
Definition: Verifier.cpp:717
static bool isDINode(const Metadata *MD)
Definition: Verifier.cpp:1137
static bool isScope(const Metadata *MD)
Definition: Verifier.cpp:1136
static cl::opt< bool > VerifyNoAliasScopeDomination("verify-noalias-scope-decl-dom", cl::Hidden, cl::init(false), cl::desc("Ensure that llvm.experimental.noalias.scope.decl for identical " "scopes are not dominating"))
static DISubprogram * getSubprogram(Metadata *LocalScope)
Carefully grab the subprogram from a local scope.
Definition: Verifier.cpp:6542
static bool isTypeCongruent(Type *L, Type *R)
Two types are "congruent" if they are identical, or if they are both pointer types with different poi...
Definition: Verifier.cpp:3837
#define CheckTBAA(C,...)
Definition: Verifier.cpp:7381
static bool IsRootTBAANode(const MDNode *MD)
Definition: Verifier.cpp:7522
static bool isContiguous(const ConstantRange &A, const ConstantRange &B)
Definition: Verifier.cpp:4163
static Value * getParentPad(Value *EHPad)
Definition: Verifier.cpp:4428
static bool hasConflictingReferenceFlags(unsigned Flags)
Detect mutually exclusive flags.
Definition: Verifier.cpp:1280
static AttrBuilder getParameterABIAttributes(LLVMContext &C, unsigned I, AttributeList Attrs)
Definition: Verifier.cpp:3847
bool isFiniteNonZero() const
Definition: APFloat.h:1450
bool isNegative() const
Definition: APFloat.h:1440
const fltSemantics & getSemantics() const
Definition: APFloat.h:1448
Class for arbitrary precision integers.
Definition: APInt.h:78
bool sgt(const APInt &RHS) const
Signed greater than comparison.
Definition: APInt.h:1201
bool isZero() const
Determine if this value is zero, i.e. all bits are clear.
Definition: APInt.h:380
bool isMinValue() const
Determine if this is the smallest unsigned value.
Definition: APInt.h:417
bool ule(const APInt &RHS) const
Unsigned less or equal comparison.
Definition: APInt.h:1150
bool isPowerOf2() const
Check if this APInt's value is a power of two greater than zero.
Definition: APInt.h:440
bool isMaxValue() const
Determine if this is the largest unsigned value.
Definition: APInt.h:399
This class represents a conversion between pointers from one address space to another.
an instruction to allocate memory on the stack
Definition: Instructions.h:63
bool isSwiftError() const
Return true if this alloca is used as a swifterror argument to a call.
Definition: Instructions.h:149
bool isStaticAlloca() const
Return true if this alloca is in the entry block of the function and is a constant size.
Align getAlign() const
Return the alignment of the memory that is being allocated by the instruction.
Definition: Instructions.h:124
Type * getAllocatedType() const
Return the type that is being allocated by the instruction.
Definition: Instructions.h:117
bool isArrayAllocation() const
Return true if there is an allocation size parameter to the allocation instruction that is not 1.
const Value * getArraySize() const
Get the number of elements allocated.
Definition: Instructions.h:95
A container for analyses that lazily runs them and caches their results.
Definition: PassManager.h:253
PassT::Result & getResult(IRUnitT &IR, ExtraArgTs... ExtraArgs)
Get the result of an analysis pass for a given IR unit.
Definition: PassManager.h:410
Represent the analysis usage information of a pass.
void setPreservesAll()
Set by analyses that do not transform their input at all.
This class represents an incoming formal argument to a Function.
Definition: Argument.h:31
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
Definition: ArrayRef.h:41
bool empty() const
empty - Check if the array is empty.
Definition: ArrayRef.h:163
An instruction that atomically checks whether a specified value is in a memory location,...
Definition: Instructions.h:501
an instruction that atomically reads a memory location, combines it with another value,...
Definition: Instructions.h:704
static bool isFPOperation(BinOp Op)
Definition: Instructions.h:809
BinOp getOperation() const
Definition: Instructions.h:805
static StringRef getOperationName(BinOp Op)
AtomicOrdering getOrdering() const
Returns the ordering constraint of this rmw instruction.
Definition: Instructions.h:847
bool contains(Attribute::AttrKind A) const
Return true if the builder has the specified attribute.
Definition: AttributeMask.h:67
bool hasAttribute(Attribute::AttrKind Kind) const
Return true if the attribute exists in this set.
std::string getAsString(bool InAttrGrp=false) const
static Attribute::AttrKind getAttrKindFromName(StringRef AttrName)
Definition: Attributes.cpp:305
static bool canUseAsRetAttr(AttrKind Kind)
Definition: Attributes.cpp:769
static bool isExistingAttribute(StringRef Name)
Return true if the provided string matches the IR name of an attribute.
Definition: Attributes.cpp:328
static bool canUseAsFnAttr(AttrKind Kind)
Definition: Attributes.cpp:761
AttrKind
This enumeration lists the attributes that can be associated with parameters, function results,...
Definition: Attributes.h:86
@ None
No attributes have been set.
Definition: Attributes.h:88
static bool isIntAttrKind(AttrKind Kind)
Definition: Attributes.h:102
static bool canUseAsParamAttr(AttrKind Kind)
Definition: Attributes.cpp:765
bool isValid() const
Return true if the attribute is any kind of attribute.
Definition: Attributes.h:208
LLVM Basic Block Representation.
Definition: BasicBlock.h:61
iterator begin()
Instruction iterator methods.
Definition: BasicBlock.h:448
iterator_range< const_phi_iterator > phis() const
Returns a range that iterates over the phis in the basic block.
Definition: BasicBlock.h:517
const Instruction * getFirstNonPHI() const
Returns a pointer to the first instruction in this block that is not a PHINode instruction.
Definition: BasicBlock.cpp:367
const Instruction & front() const
Definition: BasicBlock.h:471
bool isEntryBlock() const
Return true if this is the entry block of the containing function.
Definition: BasicBlock.cpp:571
const BasicBlock * getUniquePredecessor() const
Return the predecessor of this block if it has a unique predecessor block.
Definition: BasicBlock.cpp:467
const Function * getParent() const
Return the enclosing method, or null if none.
Definition: BasicBlock.h:219
InstListType::iterator iterator
Instruction iterators...
Definition: BasicBlock.h:177
const Instruction * getTerminator() const LLVM_READONLY
Returns the terminator instruction if the block is well formed or null if the block is not well forme...
Definition: BasicBlock.h:239
This class represents a no-op cast from one type to another.
static BlockAddress * lookup(const BasicBlock *BB)
Lookup an existing BlockAddress constant for the given BasicBlock.
Definition: Constants.cpp:1915
Conditional or Unconditional Branch instruction.
bool isConditional() const
Value * getCondition() const
Base class for all callable instructions (InvokeInst and CallInst) Holds everything related to callin...
Definition: InstrTypes.h:1120
bool isInlineAsm() const
Check if this call is an inline asm statement.
Definition: InstrTypes.h:1416
Function * getCalledFunction() const
Returns the function called, or null if this is an indirect function invocation or the function signa...
Definition: InstrTypes.h:1349
CallingConv::ID getCallingConv() const
Definition: InstrTypes.h:1407
Value * getCalledOperand() const
Definition: InstrTypes.h:1342
Value * getArgOperand(unsigned i) const
Definition: InstrTypes.h:1294
FunctionType * getFunctionType() const
Definition: InstrTypes.h:1207
unsigned arg_size() const
Definition: InstrTypes.h:1292
AttributeList getAttributes() const
Return the attributes for this call.
Definition: InstrTypes.h:1425
CallBr instruction, tracking function calls that may not return control but instead transfer it to a ...
This class represents a function call, abstracting a target machine's calling convention.
bool isMustTailCall() const
static bool castIsValid(Instruction::CastOps op, Type *SrcTy, Type *DstTy)
This method can be used to determine if a cast from SrcTy to DstTy using Opcode op is valid or not.
unsigned getNumHandlers() const
return the number of 'handlers' in this catchswitch instruction, except the default handler
Value * getParentPad() const
BasicBlock * getUnwindDest() const
handler_range handlers()
iteration adapter for range-for loops.
BasicBlock * getUnwindDest() const
bool isFPPredicate() const
Definition: InstrTypes.h:780
bool isIntPredicate() const
Definition: InstrTypes.h:781
static bool isIntPredicate(Predicate P)
Definition: InstrTypes.h:774
ConstantArray - Constant Array Declarations.
Definition: Constants.h:427
A constant value that is initialized with an expression using other constant values.
Definition: Constants.h:1108
ConstantFP - Floating Point Values [float, double].
Definition: Constants.h:271
This is the shared class of boolean and integer constants.
Definition: Constants.h:83
bool isMinusOne() const
This function will return true iff every bit in this constant is set to true.
Definition: Constants.h:220
bool isNegative() const
Definition: Constants.h:203
bool isZero() const
This is just a convenience method to make client code smaller for a common code.
Definition: Constants.h:208
unsigned getBitWidth() const
getBitWidth - Return the scalar bitwidth of this constant.
Definition: Constants.h:151
uint64_t getZExtValue() const
Return the constant as a 64-bit unsigned integer value after it has been zero extended as appropriate...
Definition: Constants.h:157
const APInt & getValue() const
Return the constant as an APInt value reference.
Definition: Constants.h:148
A signed pointer, in the ptrauth sense.
Definition: Constants.h:1021
Constant * getAddrDiscriminator() const
The address discriminator if any, or the null constant.
Definition: Constants.h:1061
Constant * getPointer() const
The pointer that is signed in this ptrauth signed pointer.
Definition: Constants.h:1048
ConstantInt * getKey() const
The Key ID, an i32 constant.
Definition: Constants.h:1051
ConstantInt * getDiscriminator() const
The integer discriminator, an i64 constant, or 0.
Definition: Constants.h:1054
static bool isOrderedRanges(ArrayRef< ConstantRange > RangesRef)
This class represents a range of values.
Definition: ConstantRange.h:47
uint32_t getBitWidth() const
Get the bit width of this ConstantRange.
static ConstantTokenNone * get(LLVMContext &Context)
Return the ConstantTokenNone.
Definition: Constants.cpp:1522
This is an important base class in LLVM.
Definition: Constant.h:42
bool isNullValue() const
Return true if this is the value that would be returned by getNullValue.
Definition: Constants.cpp:90
This is the common base class for constrained floating point intrinsics.
std::optional< fp::ExceptionBehavior > getExceptionBehavior() const
std::optional< RoundingMode > getRoundingMode() const
unsigned getNonMetadataArgCount() const
List of ValueAsMetadata, to be used as an argument to a dbg.value intrinsic.
Assignment ID.
Basic type, like 'int' or 'float'.
Debug common block.
Enumeration value.
DWARF expression.
bool isEntryValue() const
Check if the expression consists of exactly one entry value operand.
static std::optional< FragmentInfo > getFragmentInfo(expr_op_iterator Start, expr_op_iterator End)
Retrieve the details of this fragment expression.
A pair of DIGlobalVariable and DIExpression.
DIGlobalVariable * getVariable() const
DIExpression * getExpression() const
An imported module (C++ using directive or similar).
Debug lexical block.
A scope for locals.
DISubprogram * getSubprogram() const
Get the subprogram for this scope.
DILocalScope * getScope() const
Get the local scope for this variable.
Debug location.
Metadata * getRawScope() const
Represents a module in the programming language, for example, a Clang module, or a Fortran module.
Debug lexical block.
Base class for scope-like contexts.
String type, Fortran CHARACTER(n)
Subprogram description.
Array subrange.
Type array for a subprogram.
Base class for template parameters.
Base class for variables.
Metadata * getRawType() const
Metadata * getRawScope() const
This class represents an Operation in the Expression.
A parsed version of the target data layout string in and methods for querying it.
Definition: DataLayout.h:63
This represents the llvm.dbg.label instruction.
Metadata * getRawLabel() const
DILabel * getLabel() const
Records a position in IR for a source label (DILabel).
Base class for non-instruction debug metadata records that have positions within IR.
void print(raw_ostream &O, bool IsForDebug=false) const
DebugLoc getDebugLoc() const
const BasicBlock * getParent() const
This is the common base class for debug info intrinsics for variables.
Metadata * getRawLocation() const
DILocalVariable * getVariable() const
Metadata * getRawVariable() const
Metadata * getRawExpression() const
Record of a variable value-assignment, aka a non instruction representation of the dbg....
MDNode * getRawAddressExpression() const
DIExpression * getExpression() const
Value * getVariableLocationOp(unsigned OpIdx) const
DILocalVariable * getVariable() const
Metadata * getRawLocation() const
Returns the metadata operand for the first location description.
@ End
Marks the end of the concrete types.
@ Any
To indicate all LocationTypes in searches.
DIExpression * getAddressExpression() const
MDNode * getAsMDNode() const
Return this as a bar MDNode.
Definition: DebugLoc.h:106
ValueT lookup(const_arg_type_t< KeyT > Val) const
lookup - Return the entry for the specified key, or a default constructed value if no such entry exis...
Definition: DenseMap.h:194
iterator find(const_arg_type_t< KeyT > Val)
Definition: DenseMap.h:156
bool empty() const
Definition: DenseMap.h:98
std::pair< iterator, bool > insert(const std::pair< KeyT, ValueT > &KV)
Definition: DenseMap.h:211
void recalculate(ParentType &Func)
recalculate - compute a dominator tree for the given function
Concrete subclass of DominatorTreeBase that is used to compute a normal dominator tree.
Definition: Dominators.h:162
bool isReachableFromEntry(const Use &U) const
Provide an overload for a Use.
Definition: Dominators.cpp:321
bool dominates(const BasicBlock *BB, const Use &U) const
Return true if the (end of the) basic block BB dominates the use U.
Definition: Dominators.cpp:122
This instruction extracts a single (scalar) element from a VectorType value.
static bool isValidOperands(const Value *Vec, const Value *Idx)
Return true if an extractelement instruction can be formed with the specified operands.
This instruction extracts a struct member or array element value from an aggregate value.
ArrayRef< unsigned > getIndices() const
static Type * getIndexedType(Type *Agg, ArrayRef< unsigned > Idxs)
Returns the type of the element that would be extracted with an extractvalue instruction with the spe...
This instruction compares its operands according to the predicate given to the constructor.
This class represents an extension of floating point types.
This class represents a cast from floating point to signed integer.
This class represents a cast from floating point to unsigned integer.
This class represents a truncation of floating point types.
An instruction for ordering other memory operations.
Definition: Instructions.h:424
AtomicOrdering getOrdering() const
Returns the ordering constraint of this fence instruction.
Definition: Instructions.h:449
Class to represent fixed width SIMD vectors.
Definition: DerivedTypes.h:563
Value * getParentPad() const
Convenience accessors.
Definition: InstrTypes.h:2357
FunctionPass class - This class is used to implement most global optimizations.
Definition: Pass.h:310
Intrinsic::ID getIntrinsicID() const LLVM_READONLY
getIntrinsicID - This method returns the ID number of the specified function, or Intrinsic::not_intri...
Definition: Function.h:251
bool hasPersonalityFn() const
Check whether this function has a personality function.
Definition: Function.h:905
bool isIntrinsic() const
isIntrinsic - Returns true if the function's name starts with "llvm.".
Definition: Function.h:256
const std::string & getGC() const
Definition: Function.cpp:835
Represents calls to the gc.relocate intrinsic.
Value * getBasePtr() const
Value * getDerivedPtr() const
void initialize(raw_ostream *OS, function_ref< void(const Twine &Message)> FailureCB, const FunctionT &F)
Generic tagged DWARF-like metadata node.
an instruction for type-safe pointer arithmetic to access elements of arrays and structs
Definition: Instructions.h:933
static Type * getIndexedType(Type *Ty, ArrayRef< Value * > IdxList)
Returns the result type of a getelementptr with the given source element type and indexes.
static bool isValidLinkage(LinkageTypes L)
Definition: GlobalAlias.h:97
const Constant * getAliasee() const
Definition: GlobalAlias.h:86
const Function * getResolverFunction() const
Definition: Globals.cpp:624
static bool isValidLinkage(LinkageTypes L)
Definition: GlobalIFunc.h:85
const Constant * getResolver() const
Definition: GlobalIFunc.h:72
bool hasComdat() const
Definition: GlobalObject.h:127
MDNode * getMetadata(unsigned KindID) const
Get the current metadata attachments for the given kind, if any.
Definition: Value.h:565
bool hasExternalLinkage() const
Definition: GlobalValue.h:511
bool isDSOLocal() const
Definition: GlobalValue.h:305
bool isImplicitDSOLocal() const
Definition: GlobalValue.h:298
bool isDeclaration() const
Return true if the primary definition of this global value is outside of the current translation unit...
Definition: Globals.cpp:296
bool hasValidDeclarationLinkage() const
Definition: GlobalValue.h:533
LinkageTypes getLinkage() const
Definition: GlobalValue.h:546
bool hasDefaultVisibility() const
Definition: GlobalValue.h:249
bool hasPrivateLinkage() const
Definition: GlobalValue.h:527
bool isTagged() const
Definition: GlobalValue.h:365
bool hasHiddenVisibility() const
Definition: GlobalValue.h:250
bool hasExternalWeakLinkage() const
Definition: GlobalValue.h:529
bool hasDLLImportStorageClass() const
Definition: GlobalValue.h:278
bool hasDLLExportStorageClass() const
Definition: GlobalValue.h:281
bool isDeclarationForLinker() const
Definition: GlobalValue.h:618
unsigned getAddressSpace() const
Definition: GlobalValue.h:205
Module * getParent()
Get the module that this global value is contained inside of...
Definition: GlobalValue.h:656
PointerType * getType() const
Global values are always pointers.
Definition: GlobalValue.h:294
bool hasSection() const
Definition: GlobalValue.h:290
bool hasComdat() const
Definition: GlobalValue.h:241
bool hasCommonLinkage() const
Definition: GlobalValue.h:532
bool hasGlobalUnnamedAddr() const
Definition: GlobalValue.h:215
bool hasAppendingLinkage() const
Definition: GlobalValue.h:525
bool hasAvailableExternallyLinkage() const
Definition: GlobalValue.h:512
Type * getValueType() const
Definition: GlobalValue.h:296
const Constant * getInitializer() const
getInitializer - Return the initializer for this global variable.
bool hasInitializer() const
Definitions have initializers, declarations don't.
bool isConstant() const
If the value is a global constant, its value is immutable throughout the runtime execution of the pro...
bool hasDefinitiveInitializer() const
hasDefinitiveInitializer - Whether the global variable has an initializer, and any other instances of...
This instruction compares its operands according to the predicate given to the constructor.
Indirect Branch Instruction.
BasicBlock * getDestination(unsigned i)
Return the specified destination.
unsigned getNumDestinations() const
return the number of possible destinations in this indirectbr instruction.
unsigned getNumSuccessors() const
This instruction inserts a single (scalar) element into a VectorType value.
static bool isValidOperands(const Value *Vec, const Value *NewElt, const Value *Idx)
Return true if an insertelement instruction can be formed with the specified operands.
This instruction inserts a struct field of array element value into an aggregate value.
Value * getAggregateOperand()
ArrayRef< unsigned > getIndices() const
Base class for instruction visitors.
Definition: InstVisitor.h:78
RetTy visitTerminator(Instruction &I)
Definition: InstVisitor.h:256
RetTy visitCallBase(CallBase &I)
Definition: InstVisitor.h:270
void visitFunction(Function &F)
Definition: InstVisitor.h:142
void visitBasicBlock(BasicBlock &BB)
Definition: InstVisitor.h:143
void visit(Iterator Start, Iterator End)
Definition: InstVisitor.h:87
RetTy visitFuncletPadInst(FuncletPadInst &I)
Definition: InstVisitor.h:197
void visitInstruction(Instruction &I)
Definition: InstVisitor.h:283
unsigned getNumSuccessors() const LLVM_READONLY
Return the number of successors that this instruction has.
const DebugLoc & getDebugLoc() const
Return the debug location for this node as a DebugLoc.
Definition: Instruction.h:475
bool isAtomic() const LLVM_READONLY
Return true if this instruction has an AtomicOrdering of unordered or higher.
const Function * getFunction() const
Return the function this instruction belongs to.
Definition: Instruction.cpp:72
This class represents a cast from an integer to a pointer.
A wrapper class for inspecting calls to intrinsic functions.
Definition: IntrinsicInst.h:48
static bool mayLowerToFunctionCall(Intrinsic::ID IID)
Check if the intrinsic might lower into a regular function call in the course of IR transformations.
Intrinsic::ID getIntrinsicID() const
Return the intrinsic ID of this intrinsic.
Definition: IntrinsicInst.h:55
Invoke instruction.
This is an important class for using LLVM in a threaded context.
Definition: LLVMContext.h:67
The landingpad instruction holds all of the information necessary to generate correct exception handl...
bool isCleanup() const
Return 'true' if this landingpad instruction is a cleanup.
unsigned getNumClauses() const
Get the number of clauses for this landing pad.
bool isCatch(unsigned Idx) const
Return 'true' if the clause and index Idx is a catch clause.
bool isFilter(unsigned Idx) const
Return 'true' if the clause and index Idx is a filter clause.
Constant * getClause(unsigned Idx) const
Get the value of the clause at index Idx.
An instruction for reading from memory.
Definition: Instructions.h:176
AtomicOrdering getOrdering() const
Returns the ordering constraint of this load instruction.
Definition: Instructions.h:220
SyncScope::ID getSyncScopeID() const
Returns the synchronization scope ID of this load instruction.
Definition: Instructions.h:230
Align getAlign() const
Return the alignment of the access that is being performed.
Definition: Instructions.h:211
Metadata node.
Definition: Metadata.h:1069
const MDOperand & getOperand(unsigned I) const
Definition: Metadata.h:1430
bool isTemporary() const
Definition: Metadata.h:1253
ArrayRef< MDOperand > operands() const
Definition: Metadata.h:1428
unsigned getNumOperands() const
Return number of MDNode operands.
Definition: Metadata.h:1436
bool isDistinct() const
Definition: Metadata.h:1252
bool isResolved() const
Check if node is fully resolved.
Definition: Metadata.h:1249
LLVMContext & getContext() const
Definition: Metadata.h:1233
Tracking metadata reference owned by Metadata.
Definition: Metadata.h:891
Metadata * get() const
Definition: Metadata.h:920
A single uniqued string.
Definition: Metadata.h:720
StringRef getString() const
Definition: Metadata.cpp:616
Typed, array-like tuple of metadata.
Definition: Metadata.h:1629
Tuple of metadata.
Definition: Metadata.h:1473
static bool isTagMD(const Metadata *MD)
This class implements a map that also provides access to all stored values in a deterministic order.
Definition: MapVector.h:36
void clear()
Definition: MapVector.h:88
Metadata wrapper in the Value hierarchy.
Definition: Metadata.h:176
static MetadataAsValue * getIfExists(LLVMContext &Context, Metadata *MD)
Definition: Metadata.cpp:111
Metadata * getMetadata() const
Definition: Metadata.h:193
Root of the metadata hierarchy.
Definition: Metadata.h:62
void print(raw_ostream &OS, const Module *M=nullptr, bool IsForDebug=false) const
Print.
Definition: AsmWriter.cpp:5260
unsigned getMetadataID() const
Definition: Metadata.h:102
Manage lifetime of a slot tracker for printing IR.
A Module instance is used to store all the information related to an LLVM module.
Definition: Module.h:65
ModFlagBehavior
This enumeration defines the supported behaviors of module flags.
Definition: Module.h:115
@ AppendUnique
Appends the two values, which are required to be metadata nodes.
Definition: Module.h:144
@ Override
Uses the specified value, regardless of the behavior or value of the other module.
Definition: Module.h:136
@ Warning
Emits a warning if two values disagree.
Definition: Module.h:122
@ Error
Emits an error if two values disagree, otherwise the resulting value is that of the operands.
Definition: Module.h:118
@ Min
Takes the min of the two values, which are required to be integers.
Definition: Module.h:150
@ Append
Appends the two values, which are required to be metadata nodes.
Definition: Module.h:139
@ Max
Takes the max of the two values, which are required to be integers.
Definition: Module.h:147
@ Require
Adds a requirement that another module flag be present and have a specified value after linking is pe...
Definition: Module.h:131
const std::string & getModuleIdentifier() const
Get the module identifier which is, essentially, the name of the module.
Definition: Module.h:268
static bool isValidModFlagBehavior(Metadata *MD, ModFlagBehavior &MFB)
Checks if Metadata represents a valid ModFlagBehavior, and stores the converted result in MFB.
Definition: Module.cpp:325
A tuple of MDNodes.
Definition: Metadata.h:1731
StringRef getName() const
Definition: Metadata.cpp:1442
void print(raw_ostream &ROS, bool IsForDebug=false) const
Definition: AsmWriter.cpp:4921
iterator_range< op_iterator > operands()
Definition: Metadata.h:1827
op_range incoming_values()
static PassRegistry * getPassRegistry()
getPassRegistry - Access the global registry object, which is automatically initialized at applicatio...
static PointerType * get(Type *ElementType, unsigned AddressSpace)
This constructs a pointer to an object of the specified type in a numbered address space.
A set of analyses that are preserved following a run of a transformation pass.
Definition: Analysis.h:111
static PreservedAnalyses all()
Construct a special preserved set that preserves all passes.
Definition: Analysis.h:117
Simple wrapper around std::function<void(raw_ostream&)>.
Definition: Printable.h:38
This class represents a cast from a pointer to an integer.
Interface for looking up the initializer for a variable name, used by Init::resolveReferences.
Definition: Record.h:2203
Resume the propagation of an exception.
Value * getValue() const
Convenience accessor.
Return a value (possibly void), from a function.
This class represents a sign extension of integer types.
This class represents a cast from signed integer to floating point.
This class represents the LLVM 'select' instruction.
static const char * areInvalidOperands(Value *Cond, Value *True, Value *False)
Return a string if the specified operands are invalid for a select operation, otherwise return null.
This instruction constructs a fixed permutation of two input vectors.
static bool isValidOperands(const Value *V1, const Value *V2, const Value *Mask)
Return true if a shufflevector instruction can be formed with the specified operands.
static void getShuffleMask(const Constant *Mask, SmallVectorImpl< int > &Result)
Convert the input shuffle mask operand to a vector of integers.
A templated base class for SmallPtrSet which provides the typesafe interface that is common across al...
Definition: SmallPtrSet.h:363
size_type count(ConstPtrType Ptr) const
count - Return 1 if the specified pointer is in the set, 0 otherwise.
Definition: SmallPtrSet.h:452
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
Definition: SmallPtrSet.h:384
SmallPtrSet - This class implements a set which is optimized for holding SmallSize or less elements.
Definition: SmallPtrSet.h:519
SmallSet - This maintains a set of unique values, optimizing for the case when the set is small (less...
Definition: SmallSet.h:132
std::pair< const_iterator, bool > insert(const T &V)
insert - Insert an element into the set if it isn't already there.
Definition: SmallSet.h:181
SmallString - A SmallString is just a SmallVector with methods and accessors that make it work better...
Definition: SmallString.h:26
bool empty() const
Definition: SmallVector.h:81
size_t size() const
Definition: SmallVector.h:78
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
Definition: SmallVector.h:573
void reserve(size_type N)
Definition: SmallVector.h:663
iterator insert(iterator I, T &&Elt)
Definition: SmallVector.h:805
void resize(size_type N)
Definition: SmallVector.h:638
void push_back(const T &Elt)
Definition: SmallVector.h:413
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
Definition: SmallVector.h:1196
An instruction for storing to memory.
Definition: Instructions.h:292
StringMapEntry - This is used to represent one value that is inserted into a StringMap.
StringRef - Represent a constant reference to a string, i.e.
Definition: StringRef.h:51
bool getAsInteger(unsigned Radix, T &Result) const
Parse the current string as an integer of the specified radix.
Definition: StringRef.h:470
bool starts_with(StringRef Prefix) const
Check if this string starts with the given Prefix.
Definition: StringRef.h:265
bool contains(StringRef Other) const
Return true if the given string is a substring of *this, and false otherwise.
Definition: StringRef.h:424
static constexpr size_t npos
Definition: StringRef.h:53
Class to represent struct types.
Definition: DerivedTypes.h:218
unsigned getNumElements() const
Random access to the elements.
Definition: DerivedTypes.h:365
Type * getTypeAtIndex(const Value *V) const
Given an index value into the type, return the type of the element.
Definition: Type.cpp:711
bool isScalableTy(SmallPtrSetImpl< const Type * > &Visited) const
Returns true if this struct contains a scalable vector.
Definition: Type.cpp:433
Multiway switch.
Verify that the TBAA Metadatas are valid.
Definition: Verifier.h:39
bool visitTBAAMetadata(Instruction &I, const MDNode *MD)
Visit an instruction and return true if it is valid, return false if an invalid TBAA is attached.
Definition: Verifier.cpp:7612
TinyPtrVector - This class is specialized for cases where there are normally 0 or 1 element in a vect...
Definition: TinyPtrVector.h:29
unsigned size() const
Triple - Helper class for working with autoconf configuration names.
Definition: Triple.h:44
This class represents a truncation of integer types.
Twine - A lightweight data structure for efficiently representing the concatenation of temporary valu...
Definition: Twine.h:81
The instances of the Type class are immutable: once they are created, they are never changed.
Definition: Type.h:45
bool isVectorTy() const
True if this is an instance of VectorType.
Definition: Type.h:270
bool containsNonLocalTargetExtType(SmallPtrSetImpl< const Type * > &Visited) const
Return true if this type is or contains a target extension type that disallows being used as a local.
bool isArrayTy() const
True if this is an instance of ArrayType.
Definition: Type.h:261
bool isLabelTy() const
Return true if this is 'label'.
Definition: Type.h:228
bool isIntOrIntVectorTy() const
Return true if this is an integer type or a vector of integer types.
Definition: Type.h:243
bool isPointerTy() const
True if this is an instance of PointerType.
Definition: Type.h:264
bool containsNonGlobalTargetExtType(SmallPtrSetImpl< const Type * > &Visited) const
Return true if this type is or contains a target extension type that disallows being used as a global...
unsigned getPointerAddressSpace() const
Get the address space of this pointer or pointer vector type.
unsigned getScalarSizeInBits() const LLVM_READONLY
If this is a vector type, return the getPrimitiveSizeInBits value for the element type.
bool isSized(SmallPtrSetImpl< Type * > *Visited=nullptr) const
Return true if it makes sense to take the size of this type.
Definition: Type.h:310
bool isScalableTy(SmallPtrSetImpl< const Type * > &Visited) const
Return true if this is a type whose size is a known multiple of vscale.
bool isFloatingPointTy() const
Return true if this is one of the floating-point types.
Definition: Type.h:184
bool isPtrOrPtrVectorTy() const
Return true if this is a pointer type or a vector of pointer types.
Definition: Type.h:267
bool canLosslesslyBitCastTo(Type *Ty) const
Return true if this type could be converted with a lossless BitCast to type 'Ty'.
bool isIntOrPtrTy() const
Return true if this is an integer type or a pointer type.
Definition: Type.h:252
bool isIntegerTy() const
True if this is an instance of IntegerType.
Definition: Type.h:237
bool isTokenTy() const
Return true if this is 'token'.
Definition: Type.h:234
bool isFPOrFPVectorTy() const
Return true if this is a FP type or a vector of FP.
Definition: Type.h:225
Type * getScalarType() const
If this is a vector type, return the element type, otherwise return 'this'.
Definition: Type.h:355
bool isMetadataTy() const
Return true if this is 'metadata'.
Definition: Type.h:231
This class represents a cast unsigned integer to floating point.
A Use represents the edge between a Value definition and its users.
Definition: Use.h:43
op_range operands()
Definition: User.h:288
Value * getOperand(unsigned i) const
Definition: User.h:228
unsigned getNumOperands() const
Definition: User.h:250
This class represents the va_arg llvm instruction, which returns an argument of the specified type gi...
This is the common base class for vector predication intrinsics.
Value wrapper in the Metadata hierarchy.
Definition: Metadata.h:450
Value * getValue() const
Definition: Metadata.h:490
LLVM Value Representation.
Definition: Value.h:74
iterator_range< user_iterator > materialized_users()
Definition: Value.h:415
Type * getType() const
All values are typed, get the type of this value.
Definition: Value.h:255
static constexpr uint64_t MaximumAlignment
Definition: Value.h:811
const Value * stripPointerCastsAndAliases() const
Strip off pointer casts, all-zero GEPs, address space casts, and aliases.
Definition: Value.cpp:698
const Value * stripInBoundsOffsets(function_ref< void(const Value *)> Func=[](const Value *) {}) const
Strip off pointer casts and inbounds GEPs.
Definition: Value.cpp:786
iterator_range< user_iterator > users()
Definition: Value.h:421
bool materialized_use_empty() const
Definition: Value.h:349
LLVMContext & getContext() const
All values hold a context through their type.
Definition: Value.cpp:1075
bool hasName() const
Definition: Value.h:261
StringRef getName() const
Return a constant reference to the value's name.
Definition: Value.cpp:309
Check a module for errors, and report separate error states for IR and debug info errors.
Definition: Verifier.h:107
Result run(Module &M, ModuleAnalysisManager &)
Definition: Verifier.cpp:7728
PreservedAnalyses run(Module &M, ModuleAnalysisManager &AM)
Definition: Verifier.cpp:7740
This class represents zero extension of integer types.
constexpr bool isNonZero() const
Definition: TypeSize.h:158
constexpr bool isScalable() const
Returns whether the quantity is scaled by a runtime quantity (vscale).
Definition: TypeSize.h:171
constexpr ScalarTy getKnownMinValue() const
Returns the minimum value this quantity can represent.
Definition: TypeSize.h:168
An efficient, type-erasing, non-owning reference to a callable.
const ParentTy * getParent() const
Definition: ilist_node.h:32
NodeTy * getNextNode()
Get the next node, or nullptr for the list tail.
Definition: ilist_node.h:353
This class implements an extremely fast bulk output stream that can only output to a stream.
Definition: raw_ostream.h:52
This file contains the declaration of the Comdat class, which represents a single COMDAT in LLVM.
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
constexpr char Args[]
Key for Kernel::Metadata::mArgs.
constexpr char Attrs[]
Key for Kernel::Metadata::mAttrs.
bool isFlatGlobalAddrSpace(unsigned AS)
AttributeMask typeIncompatible(Type *Ty, AttributeSet AS, AttributeSafetyKind ASK=ASK_ALL)
Which attributes cannot be applied to a type.
constexpr std::underlying_type_t< E > Mask()
Get a bitmask with 1s in all places up to the high-order bit of E's largest value.
Definition: BitmaskEnum.h:125
@ Entry
Definition: COFF.h:844
@ AMDGPU_CS
Used for Mesa/AMDPAL compute shaders.
Definition: CallingConv.h:197
@ AMDGPU_VS
Used for Mesa vertex shaders, or AMDPAL last shader stage before rasterization (vertex shader if tess...
Definition: CallingConv.h:188
@ AMDGPU_KERNEL
Used for AMDGPU code object kernels.
Definition: CallingConv.h:200
@ AnyReg
OBSOLETED - Used for stack based JavaScript calls.
Definition: CallingConv.h:60
@ AMDGPU_CS_ChainPreserve
Used on AMDGPUs to give the middle-end more control over argument placement.
Definition: CallingConv.h:249
@ AMDGPU_HS
Used for Mesa/AMDPAL hull shaders (= tessellation control shaders).
Definition: CallingConv.h:206
@ AMDGPU_GS
Used for Mesa/AMDPAL geometry shaders.
Definition: CallingConv.h:191
@ X86_INTR
x86 hardware interrupt context.
Definition: CallingConv.h:173
@ AMDGPU_CS_Chain
Used on AMDGPUs to give the middle-end more control over argument placement.
Definition: CallingConv.h:245
@ AMDGPU_PS
Used for Mesa/AMDPAL pixel shaders.
Definition: CallingConv.h:194
@ Cold
Attempts to make code in the caller as efficient as possible under the assumption that the call is no...
Definition: CallingConv.h:47
@ PTX_Device
Call to a PTX device function.
Definition: CallingConv.h:129
@ SPIR_KERNEL
Used for SPIR kernel functions.
Definition: CallingConv.h:144
@ Fast
Attempts to make calls as fast as possible (e.g.
Definition: CallingConv.h:41
@ Intel_OCL_BI
Used for Intel OpenCL built-ins.
Definition: CallingConv.h:147
@ Tail
Attemps to make calls as fast as possible while guaranteeing that tail call optimization can always b...
Definition: CallingConv.h:76
@ PTX_Kernel
Call to a PTX kernel. Passes all arguments in parameter space.
Definition: CallingConv.h:125
@ SwiftTail
This follows the Swift calling convention in how arguments are passed but guarantees tail calls will ...
Definition: CallingConv.h:87
@ C
The default llvm calling convention, compatible with C.
Definition: CallingConv.h:34
unsigned ID
LLVM IR allows to use arbitrary numbers as calling convention identifiers.
Definition: CallingConv.h:24
ID ArrayRef< Type * > Tys
Definition: Intrinsics.h:102
MatchIntrinsicTypesResult matchIntrinsicSignature(FunctionType *FTy, ArrayRef< IITDescriptor > &Infos, SmallVectorImpl< Type * > &ArgTys)
Match the specified function type with the type constraints specified by the .td file.
void getIntrinsicInfoTableEntries(ID id, SmallVectorImpl< IITDescriptor > &T)
Return the IIT table descriptor for the specified intrinsic into an array of IITDescriptors.
Definition: Intrinsics.cpp:446
@ MatchIntrinsicTypes_NoMatchRet
Definition: Intrinsics.h:231
@ MatchIntrinsicTypes_NoMatchArg
Definition: Intrinsics.h:232
bool hasConstrainedFPRoundingModeOperand(ID QID)
Returns true if the intrinsic ID is for one of the "Constrained Floating-Point Intrinsics" that take ...
Definition: Intrinsics.cpp:774
StringRef getName(ID id)
Return the LLVM name for an intrinsic, such as "llvm.ppc.altivec.lvx".
Definition: Intrinsics.cpp:46
static const int NoAliasScopeDeclScopeArg
Definition: Intrinsics.h:37
bool matchIntrinsicVarArg(bool isVarArg, ArrayRef< IITDescriptor > &Infos)
Verify if the intrinsic has variable arguments.
Flag
These should be considered private to the implementation of the MCInstrDesc class.
Definition: MCInstrDesc.h:148
@ System
Synchronized with respect to all concurrently executing threads.
Definition: LLVMContext.h:57
std::optional< VFInfo > tryDemangleForVFABI(StringRef MangledName, const FunctionType *FTy)
Function to construct a VFInfo out of a mangled names in the following format:
@ CE
Windows NT (Windows on ARM)
AssignmentInstRange getAssignmentInsts(DIAssignID *ID)
Return a range of instructions (typically just one) that have ID as an attachment.
Definition: DebugInfo.cpp:1854
initializer< Ty > init(const Ty &Val)
Definition: CommandLine.h:443
@ DW_MACINFO_undef
Definition: Dwarf.h:797
@ DW_MACINFO_start_file
Definition: Dwarf.h:798
@ DW_MACINFO_define
Definition: Dwarf.h:796
This is an optimization pass for GlobalISel generic memory operations.
Definition: AddressRanges.h:18
@ Low
Lower the current thread's priority such that it does not affect foreground tasks significantly.
@ Offset
Definition: DWP.cpp:480
bool all_of(R &&range, UnaryPredicate P)
Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly.
Definition: STLExtras.h:1739
auto size(R &&Range, std::enable_if_t< std::is_base_of< std::random_access_iterator_tag, typename std::iterator_traits< decltype(Range.begin())>::iterator_category >::value, void > *=nullptr)
Get the size of a range.
Definition: STLExtras.h:1697
bool canInstructionHaveMMRAs(const Instruction &I)
@ Write
Definition: CodeGenData.h:108
unsigned getBranchWeightOffset(const MDNode *ProfileData)
Return the offset to the first branch weight data.
auto enumerate(FirstRange &&First, RestRanges &&...Rest)
Given two or more input ranges, returns a new range whose values are tuples (A, B,...
Definition: STLExtras.h:2448
bool verifyFunction(const Function &F, raw_ostream *OS=nullptr)
Check a function for errors, useful for use when debugging a pass.
Definition: Verifier.cpp:7297
AllocFnKind
Definition: Attributes.h:49
testing::Matcher< const detail::ErrorHolder & > Failed()
Definition: Error.h:198
void initializeVerifierLegacyPassPass(PassRegistry &)
iterator_range< T > make_range(T x, T y)
Convenience function for iterating over sub-ranges.
void append_range(Container &C, Range &&R)
Wrapper function to append range R to container C.
Definition: STLExtras.h:2115
DenseMap< BasicBlock *, ColorVector > colorEHFunclets(Function &F)
If an EH funclet personality is in use (see isFuncletEHPersonality), this will recompute which blocks...
constexpr bool isPowerOf2_64(uint64_t Value)
Return true if the argument is a power of two > 0 (64 bit edition.)
Definition: MathExtras.h:296
bool isScopedEHPersonality(EHPersonality Pers)
Returns true if this personality uses scope-style EH IR instructions: catchswitch,...
constexpr bool isPowerOf2_32(uint32_t Value)
Return true if the argument is a power of two > 0.
Definition: MathExtras.h:291
bool isModSet(const ModRefInfo MRI)
Definition: ModRef.h:48
void sort(IteratorTy Start, IteratorTy End)
Definition: STLExtras.h:1664
raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
Definition: Debug.cpp:163
void report_fatal_error(Error Err, bool gen_crash_diag=true)
Report a serious error, calling any installed error handler.
Definition: Error.cpp:167
EHPersonality classifyEHPersonality(const Value *Pers)
See if the given exception handling personality function is one that we understand.
raw_fd_ostream & errs()
This returns a reference to a raw_ostream for standard error.
unsigned getNumBranchWeights(const MDNode &ProfileData)
AtomicOrdering
Atomic ordering for LLVM's memory model.
@ ArgMem
Access to memory via argument pointers.
@ First
Helpers to iterate all locations in the MemoryEffectsBase class.
FunctionPass * createVerifierPass(bool FatalErrors=true)
Definition: Verifier.cpp:7723
@ Dynamic
Denotes mode unknown at compile time.
@ MaskAll
A bitmask that includes all valid flags.
constexpr unsigned BitWidth
Definition: BitmaskEnum.h:217
DenormalMode parseDenormalFPAttribute(StringRef Str)
Returns the denormal mode to use for inputs and outputs.
std::optional< RoundingMode > convertStrToRoundingMode(StringRef)
Returns a valid RoundingMode enumerator when given a string that is valid as input in constrained int...
Definition: FPEnv.cpp:24
std::unique_ptr< GCStrategy > getGCStrategy(const StringRef Name)
Lookup the GCStrategy object associated with the given gc name.
Definition: GCStrategy.cpp:24
auto predecessors(const MachineBasicBlock *BB)
bool is_contained(R &&Range, const E &Element)
Returns true if Element is found in Range.
Definition: STLExtras.h:1903
bool pred_empty(const BasicBlock *BB)
Definition: CFG.h:118
bool verifyModule(const Module &M, raw_ostream *OS=nullptr, bool *BrokenDebugInfo=nullptr)
Check a module for errors.
Definition: Verifier.cpp:7308
#define N
static const fltSemantics & IEEEsingle() LLVM_READNONE
Definition: APFloat.cpp:265
This struct is a compact representation of a valid (non-zero power of two) alignment.
Definition: Alignment.h:39
uint64_t value() const
This is a hole in the type system and should not be abused.
Definition: Alignment.h:85
A special type used by analysis passes to provide an address that identifies that particular analysis...
Definition: Analysis.h:28
Description of the encoding of one expression Op.
This struct is a compact representation of a valid (power of two) or undefined (0) alignment.
Definition: Alignment.h:117
A lightweight accessor for an operand bundle meant to be passed around by value.
Definition: InstrTypes.h:1015
uint32_t getTagID() const
Return the tag of this operand bundle as an integer.
Definition: InstrTypes.h:1043
ArrayRef< Use > Inputs
Definition: InstrTypes.h:1016
void DebugInfoCheckFailed(const Twine &Message)
A debug info check failed.
Definition: Verifier.cpp:303
VerifierSupport(raw_ostream *OS, const Module &M)
Definition: Verifier.cpp:155
bool Broken
Track the brokenness of the module while recursively visiting.
Definition: Verifier.cpp:149
raw_ostream * OS
Definition: Verifier.cpp:141
void CheckFailed(const Twine &Message, const T1 &V1, const Ts &... Vs)
A check failed (with values to print).
Definition: Verifier.cpp:296
bool BrokenDebugInfo
Broken debug info can be "recovered" from by stripping the debug info.
Definition: Verifier.cpp:151
LLVMContext & Context
Definition: Verifier.cpp:146
bool TreatBrokenDebugInfoAsError
Whether to treat broken debug info as an error.
Definition: Verifier.cpp:153
void CheckFailed(const Twine &Message)
A check failed, so printout out the condition and the message.
Definition: Verifier.cpp:285
const Module & M
Definition: Verifier.cpp:142
const DataLayout & DL
Definition: Verifier.cpp:145
void DebugInfoCheckFailed(const Twine &Message, const T1 &V1, const Ts &... Vs)
A debug info check failed (with values to print).
Definition: Verifier.cpp:312
ModuleSlotTracker MST
Definition: Verifier.cpp:143