LLVM 23.0.0git
Verifier.cpp
Go to the documentation of this file.
1//===-- Verifier.cpp - Implement the Module Verifier -----------------------==//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file defines the function verifier interface, that can be used for some
10// basic correctness checking of input to the system.
11//
12// Note that this does not provide full `Java style' security and verifications,
13// instead it just tries to ensure that code is well-formed.
14//
15// * Both of a binary operator's parameters are of the same type
16// * Verify that the indices of mem access instructions match other operands
17// * Verify that arithmetic and other things are only performed on first-class
18// types. Verify that shifts & logicals only happen on integrals f.e.
19// * All of the constants in a switch statement are of the correct type
20// * The code is in valid SSA form
21// * It should be illegal to put a label into any other type (like a structure)
22// or to return one. [except constant arrays!]
23// * Only phi nodes can be self referential: 'add i32 %0, %0 ; <int>:0' is bad
24// * PHI nodes must have an entry for each predecessor, with no extras.
25// * PHI nodes must be the first thing in a basic block, all grouped together
26// * All basic blocks should only end with terminator insts, not contain them
27// * The entry node to a function must not have predecessors
28// * All Instructions must be embedded into a basic block
29// * Functions cannot take a void-typed parameter
30// * Verify that a function's argument list agrees with it's declared type.
31// * It is illegal to specify a name for a void value.
32// * It is illegal to have a internal global value with no initializer
33// * It is illegal to have a ret instruction that returns a value that does not
34// agree with the function return value type.
35// * Function call argument types match the function prototype
36// * A landing pad is defined by a landingpad instruction, and can be jumped to
37// only by the unwind edge of an invoke instruction.
38// * A landingpad instruction must be the first non-PHI instruction in the
39// block.
40// * Landingpad instructions must be in a function with a personality function.
41// * Convergence control intrinsics are introduced in ConvergentOperations.rst.
42// The applied restrictions are too numerous to list here.
43// * The convergence entry intrinsic and the loop heart must be the first
44// non-PHI instruction in their respective block. This does not conflict with
45// the landing pads, since these two kinds cannot occur in the same block.
46// * All other things that are tested by asserts spread about the code...
47//
48//===----------------------------------------------------------------------===//
49
50#include "llvm/IR/Verifier.h"
51#include "llvm/ADT/APFloat.h"
52#include "llvm/ADT/APInt.h"
53#include "llvm/ADT/ArrayRef.h"
54#include "llvm/ADT/DenseMap.h"
55#include "llvm/ADT/MapVector.h"
56#include "llvm/ADT/STLExtras.h"
60#include "llvm/ADT/StringRef.h"
61#include "llvm/ADT/Twine.h"
63#include "llvm/IR/Argument.h"
65#include "llvm/IR/Attributes.h"
66#include "llvm/IR/BasicBlock.h"
67#include "llvm/IR/CFG.h"
68#include "llvm/IR/CallingConv.h"
69#include "llvm/IR/Comdat.h"
70#include "llvm/IR/Constant.h"
73#include "llvm/IR/Constants.h"
75#include "llvm/IR/DataLayout.h"
76#include "llvm/IR/DebugInfo.h"
78#include "llvm/IR/DebugLoc.h"
80#include "llvm/IR/Dominators.h"
82#include "llvm/IR/FPEnv.h"
83#include "llvm/IR/Function.h"
84#include "llvm/IR/GCStrategy.h"
86#include "llvm/IR/GlobalAlias.h"
87#include "llvm/IR/GlobalValue.h"
89#include "llvm/IR/InlineAsm.h"
90#include "llvm/IR/InstVisitor.h"
91#include "llvm/IR/InstrTypes.h"
92#include "llvm/IR/Instruction.h"
95#include "llvm/IR/Intrinsics.h"
96#include "llvm/IR/IntrinsicsAArch64.h"
97#include "llvm/IR/IntrinsicsAMDGPU.h"
98#include "llvm/IR/IntrinsicsARM.h"
99#include "llvm/IR/IntrinsicsNVPTX.h"
100#include "llvm/IR/IntrinsicsWebAssembly.h"
101#include "llvm/IR/LLVMContext.h"
103#include "llvm/IR/Metadata.h"
104#include "llvm/IR/Module.h"
106#include "llvm/IR/PassManager.h"
108#include "llvm/IR/Statepoint.h"
109#include "llvm/IR/Type.h"
110#include "llvm/IR/Use.h"
111#include "llvm/IR/User.h"
113#include "llvm/IR/Value.h"
115#include "llvm/Pass.h"
119#include "llvm/Support/Casting.h"
123#include "llvm/Support/ModRef.h"
127#include <algorithm>
128#include <cassert>
129#include <cstdint>
130#include <memory>
131#include <optional>
132#include <string>
133#include <utility>
134
135using namespace llvm;
136
138 "verify-noalias-scope-decl-dom", cl::Hidden, cl::init(false),
139 cl::desc("Ensure that llvm.experimental.noalias.scope.decl for identical "
140 "scopes are not dominating"));
141
144 const Module &M;
146 const Triple &TT;
149
150 /// Track the brokenness of the module while recursively visiting.
151 bool Broken = false;
152 /// Broken debug info can be "recovered" from by stripping the debug info.
153 bool BrokenDebugInfo = false;
154 /// Whether to treat broken debug info as an error.
156
158 : OS(OS), M(M), MST(&M), TT(M.getTargetTriple()), DL(M.getDataLayout()),
159 Context(M.getContext()) {}
160
161private:
162 void Write(const Module *M) {
163 *OS << "; ModuleID = '" << M->getModuleIdentifier() << "'\n";
164 }
165
166 void Write(const Value *V) {
167 if (V)
168 Write(*V);
169 }
170
171 void Write(const Value &V) {
172 if (isa<Instruction>(V)) {
173 V.print(*OS, MST);
174 *OS << '\n';
175 } else {
176 V.printAsOperand(*OS, true, MST);
177 *OS << '\n';
178 }
179 }
180
181 void Write(const DbgRecord *DR) {
182 if (DR) {
183 DR->print(*OS, MST, false);
184 *OS << '\n';
185 }
186 }
187
189 switch (Type) {
191 *OS << "value";
192 break;
194 *OS << "declare";
195 break;
197 *OS << "declare_value";
198 break;
200 *OS << "assign";
201 break;
203 *OS << "end";
204 break;
206 *OS << "any";
207 break;
208 };
209 }
210
211 void Write(const Metadata *MD) {
212 if (!MD)
213 return;
214 MD->print(*OS, MST, &M);
215 *OS << '\n';
216 }
217
218 template <class T> void Write(const MDTupleTypedArrayWrapper<T> &MD) {
219 Write(MD.get());
220 }
221
222 void Write(const NamedMDNode *NMD) {
223 if (!NMD)
224 return;
225 NMD->print(*OS, MST);
226 *OS << '\n';
227 }
228
229 void Write(Type *T) {
230 if (!T)
231 return;
232 *OS << ' ' << *T;
233 }
234
235 void Write(const Comdat *C) {
236 if (!C)
237 return;
238 *OS << *C;
239 }
240
241 void Write(const APInt *AI) {
242 if (!AI)
243 return;
244 *OS << *AI << '\n';
245 }
246
247 void Write(const unsigned i) { *OS << i << '\n'; }
248
249 // NOLINTNEXTLINE(readability-identifier-naming)
250 void Write(const Attribute *A) {
251 if (!A)
252 return;
253 *OS << A->getAsString() << '\n';
254 }
255
256 // NOLINTNEXTLINE(readability-identifier-naming)
257 void Write(const AttributeSet *AS) {
258 if (!AS)
259 return;
260 *OS << AS->getAsString() << '\n';
261 }
262
263 // NOLINTNEXTLINE(readability-identifier-naming)
264 void Write(const AttributeList *AL) {
265 if (!AL)
266 return;
267 AL->print(*OS);
268 }
269
270 void Write(Printable P) { *OS << P << '\n'; }
271
272 template <typename T> void Write(ArrayRef<T> Vs) {
273 for (const T &V : Vs)
274 Write(V);
275 }
276
277 template <typename T1, typename... Ts>
278 void WriteTs(const T1 &V1, const Ts &... Vs) {
279 Write(V1);
280 WriteTs(Vs...);
281 }
282
283 template <typename... Ts> void WriteTs() {}
284
285public:
286 /// A check failed, so printout out the condition and the message.
287 ///
288 /// This provides a nice place to put a breakpoint if you want to see why
289 /// something is not correct.
290 void CheckFailed(const Twine &Message) {
291 if (OS)
292 *OS << Message << '\n';
293 Broken = true;
294 }
295
296 /// A check failed (with values to print).
297 ///
298 /// This calls the Message-only version so that the above is easier to set a
299 /// breakpoint on.
300 template <typename T1, typename... Ts>
301 void CheckFailed(const Twine &Message, const T1 &V1, const Ts &... Vs) {
302 CheckFailed(Message);
303 if (OS)
304 WriteTs(V1, Vs...);
305 }
306
307 /// A debug info check failed.
308 void DebugInfoCheckFailed(const Twine &Message) {
309 if (OS)
310 *OS << Message << '\n';
312 BrokenDebugInfo = true;
313 }
314
315 /// A debug info check failed (with values to print).
316 template <typename T1, typename... Ts>
317 void DebugInfoCheckFailed(const Twine &Message, const T1 &V1,
318 const Ts &... Vs) {
319 DebugInfoCheckFailed(Message);
320 if (OS)
321 WriteTs(V1, Vs...);
322 }
323};
324
325namespace {
326
327class Verifier : public InstVisitor<Verifier>, VerifierSupport {
328 friend class InstVisitor<Verifier>;
329 DominatorTree DT;
330
331 /// When verifying a basic block, keep track of all of the
332 /// instructions we have seen so far.
333 ///
334 /// This allows us to do efficient dominance checks for the case when an
335 /// instruction has an operand that is an instruction in the same block.
336 SmallPtrSet<Instruction *, 16> InstsInThisBlock;
337
338 /// Keep track of the metadata nodes that have been checked already.
340
341 /// Keep track which DISubprogram is attached to which function.
343
344 /// Track all DICompileUnits visited.
346
347 /// The result type for a landingpad.
348 Type *LandingPadResultTy;
349
350 /// Whether we've seen a call to @llvm.localescape in this function
351 /// already.
352 bool SawFrameEscape;
353
354 /// Whether the current function has a DISubprogram attached to it.
355 bool HasDebugInfo = false;
356
357 /// Stores the count of how many objects were passed to llvm.localescape for a
358 /// given function and the largest index passed to llvm.localrecover.
360
361 // Maps catchswitches and cleanuppads that unwind to siblings to the
362 // terminators that indicate the unwind, used to detect cycles therein.
364
365 /// Cache which blocks are in which funclet, if an EH funclet personality is
366 /// in use. Otherwise empty.
367 DenseMap<BasicBlock *, ColorVector> BlockEHFuncletColors;
368
369 /// Cache of constants visited in search of ConstantExprs.
370 SmallPtrSet<const Constant *, 32> ConstantExprVisited;
371
372 /// Cache of declarations of the llvm.experimental.deoptimize.<ty> intrinsic.
373 SmallVector<const Function *, 4> DeoptimizeDeclarations;
374
375 /// Cache of attribute lists verified.
376 SmallPtrSet<const void *, 32> AttributeListsVisited;
377
378 // Verify that this GlobalValue is only used in this module.
379 // This map is used to avoid visiting uses twice. We can arrive at a user
380 // twice, if they have multiple operands. In particular for very large
381 // constant expressions, we can arrive at a particular user many times.
382 SmallPtrSet<const Value *, 32> GlobalValueVisited;
383
384 // Keeps track of duplicate function argument debug info.
386
387 TBAAVerifier TBAAVerifyHelper;
388 ConvergenceVerifier ConvergenceVerifyHelper;
389
390 SmallVector<IntrinsicInst *, 4> NoAliasScopeDecls;
391
392 void checkAtomicMemAccessSize(Type *Ty, const Instruction *I);
393
394public:
395 explicit Verifier(raw_ostream *OS, bool ShouldTreatBrokenDebugInfoAsError,
396 const Module &M)
397 : VerifierSupport(OS, M), LandingPadResultTy(nullptr),
398 SawFrameEscape(false), TBAAVerifyHelper(this) {
399 TreatBrokenDebugInfoAsError = ShouldTreatBrokenDebugInfoAsError;
400 }
401
402 bool hasBrokenDebugInfo() const { return BrokenDebugInfo; }
403
404 bool verify(const Function &F) {
405 llvm::TimeTraceScope timeScope("Verifier");
406 assert(F.getParent() == &M &&
407 "An instance of this class only works with a specific module!");
408
409 // First ensure the function is well-enough formed to compute dominance
410 // information, and directly compute a dominance tree. We don't rely on the
411 // pass manager to provide this as it isolates us from a potentially
412 // out-of-date dominator tree and makes it significantly more complex to run
413 // this code outside of a pass manager.
414
415 // First check that every basic block has a terminator, otherwise we can't
416 // even inspect the CFG.
417 for (const BasicBlock &BB : F) {
418 if (!BB.empty() && BB.back().isTerminator())
419 continue;
420
421 if (OS) {
422 *OS << "Basic Block in function '" << F.getName()
423 << "' does not have terminator!\n";
424 BB.printAsOperand(*OS, true, MST);
425 *OS << "\n";
426 }
427 return false;
428 }
429
430 // FIXME: It's really gross that we have to cast away constness here.
431 if (!F.empty())
432 DT.recalculate(const_cast<Function &>(F));
433
434 auto FailureCB = [this](const Twine &Message) {
435 this->CheckFailed(Message);
436 };
437 ConvergenceVerifyHelper.initialize(OS, FailureCB, F);
438
439 Broken = false;
440 // FIXME: We strip const here because the inst visitor strips const.
441 visit(const_cast<Function &>(F));
442 verifySiblingFuncletUnwinds();
443
444 if (ConvergenceVerifyHelper.sawTokens())
445 ConvergenceVerifyHelper.verify(DT);
446
447 InstsInThisBlock.clear();
448 DebugFnArgs.clear();
449 LandingPadResultTy = nullptr;
450 SawFrameEscape = false;
451 SiblingFuncletInfo.clear();
452 verifyNoAliasScopeDecl();
453 NoAliasScopeDecls.clear();
454
455 return !Broken;
456 }
457
458 /// Verify the module that this instance of \c Verifier was initialized with.
459 bool verify() {
460 Broken = false;
461
462 // Collect all declarations of the llvm.experimental.deoptimize intrinsic.
463 for (const Function &F : M)
464 if (F.getIntrinsicID() == Intrinsic::experimental_deoptimize)
465 DeoptimizeDeclarations.push_back(&F);
466
467 // Now that we've visited every function, verify that we never asked to
468 // recover a frame index that wasn't escaped.
469 verifyFrameRecoverIndices();
470 for (const GlobalVariable &GV : M.globals())
471 visitGlobalVariable(GV);
472
473 for (const GlobalAlias &GA : M.aliases())
474 visitGlobalAlias(GA);
475
476 for (const GlobalIFunc &GI : M.ifuncs())
477 visitGlobalIFunc(GI);
478
479 for (const NamedMDNode &NMD : M.named_metadata())
480 visitNamedMDNode(NMD);
481
482 for (const StringMapEntry<Comdat> &SMEC : M.getComdatSymbolTable())
483 visitComdat(SMEC.getValue());
484
485 visitModuleFlags();
486 visitModuleIdents();
487 visitModuleCommandLines();
488 visitModuleErrnoTBAA();
489
490 verifyCompileUnits();
491
492 verifyDeoptimizeCallingConvs();
493 DISubprogramAttachments.clear();
494 return !Broken;
495 }
496
497private:
498 /// Whether a metadata node is allowed to be, or contain, a DILocation.
499 enum class AreDebugLocsAllowed { No, Yes };
500
501 /// Metadata that should be treated as a range, with slightly different
502 /// requirements.
503 enum class RangeLikeMetadataKind {
504 Range, // MD_range
505 AbsoluteSymbol, // MD_absolute_symbol
506 NoaliasAddrspace // MD_noalias_addrspace
507 };
508
509 // Verification methods...
510 void visitGlobalValue(const GlobalValue &GV);
511 void visitGlobalVariable(const GlobalVariable &GV);
512 void visitGlobalAlias(const GlobalAlias &GA);
513 void visitGlobalIFunc(const GlobalIFunc &GI);
514 void visitAliaseeSubExpr(const GlobalAlias &A, const Constant &C);
515 void visitAliaseeSubExpr(SmallPtrSetImpl<const GlobalAlias *> &Visited,
516 const GlobalAlias &A, const Constant &C);
517 void visitNamedMDNode(const NamedMDNode &NMD);
518 void visitMDNode(const MDNode &MD, AreDebugLocsAllowed AllowLocs);
519 void visitMetadataAsValue(const MetadataAsValue &MD, Function *F);
520 void visitValueAsMetadata(const ValueAsMetadata &MD, Function *F);
521 void visitDIArgList(const DIArgList &AL, Function *F);
522 void visitComdat(const Comdat &C);
523 void visitModuleIdents();
524 void visitModuleCommandLines();
525 void visitModuleErrnoTBAA();
526 void visitModuleFlags();
527 void visitModuleFlag(const MDNode *Op,
528 DenseMap<const MDString *, const MDNode *> &SeenIDs,
529 SmallVectorImpl<const MDNode *> &Requirements);
530 void visitModuleFlagCGProfileEntry(const MDOperand &MDO);
531 void visitFunction(const Function &F);
532 void visitBasicBlock(BasicBlock &BB);
533 void verifyRangeLikeMetadata(const Value &V, const MDNode *Range, Type *Ty,
534 RangeLikeMetadataKind Kind);
535 void visitRangeMetadata(Instruction &I, MDNode *Range, Type *Ty);
536 void visitNoFPClassMetadata(Instruction &I, MDNode *Range, Type *Ty);
537 void visitNoaliasAddrspaceMetadata(Instruction &I, MDNode *Range, Type *Ty);
538 void visitDereferenceableMetadata(Instruction &I, MDNode *MD);
539 void visitNofreeMetadata(Instruction &I, MDNode *MD);
540 void visitProfMetadata(Instruction &I, MDNode *MD);
541 void visitCallStackMetadata(MDNode *MD);
542 void visitMemProfMetadata(Instruction &I, MDNode *MD);
543 void visitCallsiteMetadata(Instruction &I, MDNode *MD);
544 void visitCalleeTypeMetadata(Instruction &I, MDNode *MD);
545 void visitDIAssignIDMetadata(Instruction &I, MDNode *MD);
546 void visitMMRAMetadata(Instruction &I, MDNode *MD);
547 void visitAnnotationMetadata(MDNode *Annotation);
548 void visitAliasScopeMetadata(const MDNode *MD);
549 void visitAliasScopeListMetadata(const MDNode *MD);
550 void visitAccessGroupMetadata(const MDNode *MD);
551 void visitCapturesMetadata(Instruction &I, const MDNode *Captures);
552 void visitAllocTokenMetadata(Instruction &I, MDNode *MD);
553 void visitInlineHistoryMetadata(Instruction &I, MDNode *MD);
554
555 template <class Ty> bool isValidMetadataArray(const MDTuple &N);
556#define HANDLE_SPECIALIZED_MDNODE_LEAF(CLASS) void visit##CLASS(const CLASS &N);
557#include "llvm/IR/Metadata.def"
558 void visitDIType(const DIType &N);
559 void visitDIScope(const DIScope &N);
560 void visitDIVariable(const DIVariable &N);
561 void visitDILexicalBlockBase(const DILexicalBlockBase &N);
562 void visitDITemplateParameter(const DITemplateParameter &N);
563
564 void visitTemplateParams(const MDNode &N, const Metadata &RawParams);
565
566 void visit(DbgLabelRecord &DLR);
567 void visit(DbgVariableRecord &DVR);
568 // InstVisitor overrides...
569 using InstVisitor<Verifier>::visit;
570 void visitDbgRecords(Instruction &I);
571 void visit(Instruction &I);
572
573 void visitTruncInst(TruncInst &I);
574 void visitZExtInst(ZExtInst &I);
575 void visitSExtInst(SExtInst &I);
576 void visitFPTruncInst(FPTruncInst &I);
577 void visitFPExtInst(FPExtInst &I);
578 void visitFPToUIInst(FPToUIInst &I);
579 void visitFPToSIInst(FPToSIInst &I);
580 void visitUIToFPInst(UIToFPInst &I);
581 void visitSIToFPInst(SIToFPInst &I);
582 void visitIntToPtrInst(IntToPtrInst &I);
583 void checkPtrToAddr(Type *SrcTy, Type *DestTy, const Value &V);
584 void visitPtrToAddrInst(PtrToAddrInst &I);
585 void visitPtrToIntInst(PtrToIntInst &I);
586 void visitBitCastInst(BitCastInst &I);
587 void visitAddrSpaceCastInst(AddrSpaceCastInst &I);
588 void visitPHINode(PHINode &PN);
589 void visitCallBase(CallBase &Call);
590 void visitUnaryOperator(UnaryOperator &U);
591 void visitBinaryOperator(BinaryOperator &B);
592 void visitICmpInst(ICmpInst &IC);
593 void visitFCmpInst(FCmpInst &FC);
594 void visitExtractElementInst(ExtractElementInst &EI);
595 void visitInsertElementInst(InsertElementInst &EI);
596 void visitShuffleVectorInst(ShuffleVectorInst &EI);
597 void visitVAArgInst(VAArgInst &VAA) { visitInstruction(VAA); }
598 void visitCallInst(CallInst &CI);
599 void visitInvokeInst(InvokeInst &II);
600 void visitGetElementPtrInst(GetElementPtrInst &GEP);
601 void visitLoadInst(LoadInst &LI);
602 void visitStoreInst(StoreInst &SI);
603 void verifyDominatesUse(Instruction &I, unsigned i);
604 void visitInstruction(Instruction &I);
605 void visitTerminator(Instruction &I);
606 void visitCondBrInst(CondBrInst &BI);
607 void visitReturnInst(ReturnInst &RI);
608 void visitSwitchInst(SwitchInst &SI);
609 void visitIndirectBrInst(IndirectBrInst &BI);
610 void visitCallBrInst(CallBrInst &CBI);
611 void visitSelectInst(SelectInst &SI);
612 void visitUserOp1(Instruction &I);
613 void visitUserOp2(Instruction &I) { visitUserOp1(I); }
614 void visitIntrinsicCall(Intrinsic::ID ID, CallBase &Call);
615 void visitConstrainedFPIntrinsic(ConstrainedFPIntrinsic &FPI);
616 void visitVPIntrinsic(VPIntrinsic &VPI);
617 void visitDbgLabelIntrinsic(StringRef Kind, DbgLabelInst &DLI);
618 void visitAtomicCmpXchgInst(AtomicCmpXchgInst &CXI);
619 void visitAtomicRMWInst(AtomicRMWInst &RMWI);
620 void visitFenceInst(FenceInst &FI);
621 void visitAllocaInst(AllocaInst &AI);
622 void visitExtractValueInst(ExtractValueInst &EVI);
623 void visitInsertValueInst(InsertValueInst &IVI);
624 void visitEHPadPredecessors(Instruction &I);
625 void visitLandingPadInst(LandingPadInst &LPI);
626 void visitResumeInst(ResumeInst &RI);
627 void visitCatchPadInst(CatchPadInst &CPI);
628 void visitCatchReturnInst(CatchReturnInst &CatchReturn);
629 void visitCleanupPadInst(CleanupPadInst &CPI);
630 void visitFuncletPadInst(FuncletPadInst &FPI);
631 void visitCatchSwitchInst(CatchSwitchInst &CatchSwitch);
632 void visitCleanupReturnInst(CleanupReturnInst &CRI);
633
634 void verifySwiftErrorCall(CallBase &Call, const Value *SwiftErrorVal);
635 void verifySwiftErrorValue(const Value *SwiftErrorVal);
636 void verifyTailCCMustTailAttrs(const AttrBuilder &Attrs, StringRef Context);
637 void verifyMustTailCall(CallInst &CI);
638 bool verifyAttributeCount(AttributeList Attrs, unsigned Params);
639 void verifyAttributeTypes(AttributeSet Attrs, const Value *V);
640 void verifyParameterAttrs(AttributeSet Attrs, Type *Ty, const Value *V);
641 void checkUnsignedBaseTenFuncAttr(AttributeList Attrs, StringRef Attr,
642 const Value *V);
643 void verifyFunctionAttrs(FunctionType *FT, AttributeList Attrs,
644 const Value *V, bool IsIntrinsic, bool IsInlineAsm);
645 void verifyFunctionMetadata(ArrayRef<std::pair<unsigned, MDNode *>> MDs);
646 void verifyUnknownProfileMetadata(MDNode *MD);
647 void visitConstantExprsRecursively(const Constant *EntryC);
648 void visitConstantExpr(const ConstantExpr *CE);
649 void visitConstantPtrAuth(const ConstantPtrAuth *CPA);
650 void verifyInlineAsmCall(const CallBase &Call);
651 void verifyStatepoint(const CallBase &Call);
652 void verifyFrameRecoverIndices();
653 void verifySiblingFuncletUnwinds();
654
655 void verifyFragmentExpression(const DbgVariableRecord &I);
656 template <typename ValueOrMetadata>
657 void verifyFragmentExpression(const DIVariable &V,
659 ValueOrMetadata *Desc);
660 void verifyFnArgs(const DbgVariableRecord &DVR);
661 void verifyNotEntryValue(const DbgVariableRecord &I);
662
663 /// Module-level debug info verification...
664 void verifyCompileUnits();
665
666 /// Module-level verification that all @llvm.experimental.deoptimize
667 /// declarations share the same calling convention.
668 void verifyDeoptimizeCallingConvs();
669
670 void verifyAttachedCallBundle(const CallBase &Call,
671 const OperandBundleUse &BU);
672
673 /// Verify the llvm.experimental.noalias.scope.decl declarations
674 void verifyNoAliasScopeDecl();
675};
676
677} // end anonymous namespace
678
679/// We know that cond should be true, if not print an error message.
680#define Check(C, ...) \
681 do { \
682 if (!(C)) { \
683 CheckFailed(__VA_ARGS__); \
684 return; \
685 } \
686 } while (false)
687
688/// We know that a debug info condition should be true, if not print
689/// an error message.
690#define CheckDI(C, ...) \
691 do { \
692 if (!(C)) { \
693 DebugInfoCheckFailed(__VA_ARGS__); \
694 return; \
695 } \
696 } while (false)
697
698void Verifier::visitDbgRecords(Instruction &I) {
699 if (!I.DebugMarker)
700 return;
701 CheckDI(I.DebugMarker->MarkedInstr == &I,
702 "Instruction has invalid DebugMarker", &I);
703 CheckDI(!isa<PHINode>(&I) || !I.hasDbgRecords(),
704 "PHI Node must not have any attached DbgRecords", &I);
705 for (DbgRecord &DR : I.getDbgRecordRange()) {
706 CheckDI(DR.getMarker() == I.DebugMarker,
707 "DbgRecord had invalid DebugMarker", &I, &DR);
708 if (auto *Loc =
710 visitMDNode(*Loc, AreDebugLocsAllowed::Yes);
711 if (auto *DVR = dyn_cast<DbgVariableRecord>(&DR)) {
712 visit(*DVR);
713 // These have to appear after `visit` for consistency with existing
714 // intrinsic behaviour.
715 verifyFragmentExpression(*DVR);
716 verifyNotEntryValue(*DVR);
717 } else if (auto *DLR = dyn_cast<DbgLabelRecord>(&DR)) {
718 visit(*DLR);
719 }
720 }
721}
722
723void Verifier::visit(Instruction &I) {
724 visitDbgRecords(I);
725 for (unsigned i = 0, e = I.getNumOperands(); i != e; ++i)
726 Check(I.getOperand(i) != nullptr, "Operand is null", &I);
728}
729
730// Helper to iterate over indirect users. By returning false, the callback can ask to stop traversing further.
731static void forEachUser(const Value *User,
733 llvm::function_ref<bool(const Value *)> Callback) {
734 if (!Visited.insert(User).second)
735 return;
736
738 while (!WorkList.empty()) {
739 const Value *Cur = WorkList.pop_back_val();
740 if (!Visited.insert(Cur).second)
741 continue;
742 if (Callback(Cur))
743 append_range(WorkList, Cur->materialized_users());
744 }
745}
746
747void Verifier::visitGlobalValue(const GlobalValue &GV) {
749 "Global is external, but doesn't have external or weak linkage!", &GV);
750
751 if (const GlobalObject *GO = dyn_cast<GlobalObject>(&GV)) {
752 if (const MDNode *Associated =
753 GO->getMetadata(LLVMContext::MD_associated)) {
754 Check(Associated->getNumOperands() == 1,
755 "associated metadata must have one operand", &GV, Associated);
756 const Metadata *Op = Associated->getOperand(0).get();
757 Check(Op, "associated metadata must have a global value", GO, Associated);
758
759 const auto *VM = dyn_cast_or_null<ValueAsMetadata>(Op);
760 Check(VM, "associated metadata must be ValueAsMetadata", GO, Associated);
761 if (VM) {
762 Check(isa<PointerType>(VM->getValue()->getType()),
763 "associated value must be pointer typed", GV, Associated);
764
765 const Value *Stripped = VM->getValue()->stripPointerCastsAndAliases();
766 Check(isa<GlobalObject>(Stripped) || isa<Constant>(Stripped),
767 "associated metadata must point to a GlobalObject", GO, Stripped);
768 Check(Stripped != GO,
769 "global values should not associate to themselves", GO,
770 Associated);
771 }
772 }
773
774 // FIXME: Why is getMetadata on GlobalValue protected?
775 if (const MDNode *AbsoluteSymbol =
776 GO->getMetadata(LLVMContext::MD_absolute_symbol)) {
777 verifyRangeLikeMetadata(*GO, AbsoluteSymbol,
778 DL.getIntPtrType(GO->getType()),
779 RangeLikeMetadataKind::AbsoluteSymbol);
780 }
781
782 if (GO->hasMetadata(LLVMContext::MD_implicit_ref)) {
783 Check(!GO->isDeclaration(),
784 "ref metadata must not be placed on a declaration", GO);
785
787 GO->getMetadata(LLVMContext::MD_implicit_ref, MDs);
788 for (const MDNode *MD : MDs) {
789 Check(MD->getNumOperands() == 1, "ref metadata must have one operand",
790 &GV, MD);
791 const Metadata *Op = MD->getOperand(0).get();
792 const auto *VM = dyn_cast_or_null<ValueAsMetadata>(Op);
793 Check(VM, "ref metadata must be ValueAsMetadata", GO, MD);
794 if (VM) {
795 Check(isa<PointerType>(VM->getValue()->getType()),
796 "ref value must be pointer typed", GV, MD);
797
798 const Value *Stripped = VM->getValue()->stripPointerCastsAndAliases();
799 Check(isa<GlobalObject>(Stripped) || isa<Constant>(Stripped),
800 "ref metadata must point to a GlobalObject", GO, Stripped);
801 Check(Stripped != GO, "values should not reference themselves", GO,
802 MD);
803 }
804 }
805 }
806
807 if (auto *Props = GO->getMetadata(LLVMContext::MD_elf_section_properties)) {
808 Check(Props->getNumOperands() == 2,
809 "elf_section_properties metadata must have two operands", GO,
810 Props);
811 if (Props->getNumOperands() == 2) {
812 auto *Type = dyn_cast<ConstantAsMetadata>(Props->getOperand(0));
813 Check(Type, "type field must be ConstantAsMetadata", GO, Props);
814 auto *TypeInt = dyn_cast<ConstantInt>(Type->getValue());
815 Check(TypeInt, "type field must be ConstantInt", GO, Props);
816
817 auto *Entsize = dyn_cast<ConstantAsMetadata>(Props->getOperand(1));
818 Check(Entsize, "entsize field must be ConstantAsMetadata", GO, Props);
819 auto *EntsizeInt = dyn_cast<ConstantInt>(Entsize->getValue());
820 Check(EntsizeInt, "entsize field must be ConstantInt", GO, Props);
821 }
822 }
823 }
824
826 "Only global variables can have appending linkage!", &GV);
827
828 if (GV.hasAppendingLinkage()) {
829 const GlobalVariable *GVar = dyn_cast<GlobalVariable>(&GV);
830 Check(GVar && GVar->getValueType()->isArrayTy(),
831 "Only global arrays can have appending linkage!", GVar);
832 }
833
834 if (GV.isDeclarationForLinker())
835 Check(!GV.hasComdat(), "Declaration may not be in a Comdat!", &GV);
836
837 if (GV.hasDLLExportStorageClass()) {
839 "dllexport GlobalValue must have default or protected visibility",
840 &GV);
841 }
842 if (GV.hasDLLImportStorageClass()) {
844 "dllimport GlobalValue must have default visibility", &GV);
845 Check(!GV.isDSOLocal(), "GlobalValue with DLLImport Storage is dso_local!",
846 &GV);
847
848 Check((GV.isDeclaration() &&
851 "Global is marked as dllimport, but not external", &GV);
852 }
853
854 if (GV.isImplicitDSOLocal())
855 Check(GV.isDSOLocal(),
856 "GlobalValue with local linkage or non-default "
857 "visibility must be dso_local!",
858 &GV);
859
860 forEachUser(&GV, GlobalValueVisited, [&](const Value *V) -> bool {
861 if (const Instruction *I = dyn_cast<Instruction>(V)) {
862 if (!I->getParent() || !I->getParent()->getParent())
863 CheckFailed("Global is referenced by parentless instruction!", &GV, &M,
864 I);
865 else if (I->getParent()->getParent()->getParent() != &M)
866 CheckFailed("Global is referenced in a different module!", &GV, &M, I,
867 I->getParent()->getParent(),
868 I->getParent()->getParent()->getParent());
869 return false;
870 } else if (const Function *F = dyn_cast<Function>(V)) {
871 if (F->getParent() != &M)
872 CheckFailed("Global is used by function in a different module", &GV, &M,
873 F, F->getParent());
874 return false;
875 }
876 return true;
877 });
878}
879
880void Verifier::visitGlobalVariable(const GlobalVariable &GV) {
881 Type *GVType = GV.getValueType();
882
883 if (MaybeAlign A = GV.getAlign()) {
884 Check(A->value() <= Value::MaximumAlignment,
885 "huge alignment values are unsupported", &GV);
886 }
887
888 if (GV.hasInitializer()) {
889 Check(GV.getInitializer()->getType() == GVType,
890 "Global variable initializer type does not match global "
891 "variable type!",
892 &GV);
894 "Global variable initializer must be sized", &GV);
895 visitConstantExprsRecursively(GV.getInitializer());
896 // If the global has common linkage, it must have a zero initializer and
897 // cannot be constant.
898 if (GV.hasCommonLinkage()) {
900 "'common' global must have a zero initializer!", &GV);
901 Check(!GV.isConstant(), "'common' global may not be marked constant!",
902 &GV);
903 Check(!GV.hasComdat(), "'common' global may not be in a Comdat!", &GV);
904 }
905 }
906
907 if (GV.hasName() && (GV.getName() == "llvm.global_ctors" ||
908 GV.getName() == "llvm.global_dtors")) {
910 "invalid linkage for intrinsic global variable", &GV);
912 "invalid uses of intrinsic global variable", &GV);
913
914 // Don't worry about emitting an error for it not being an array,
915 // visitGlobalValue will complain on appending non-array.
916 if (ArrayType *ATy = dyn_cast<ArrayType>(GVType)) {
917 StructType *STy = dyn_cast<StructType>(ATy->getElementType());
918 PointerType *FuncPtrTy =
919 PointerType::get(Context, DL.getProgramAddressSpace());
920 Check(STy && (STy->getNumElements() == 2 || STy->getNumElements() == 3) &&
921 STy->getTypeAtIndex(0u)->isIntegerTy(32) &&
922 STy->getTypeAtIndex(1) == FuncPtrTy,
923 "wrong type for intrinsic global variable", &GV);
924 Check(STy->getNumElements() == 3,
925 "the third field of the element type is mandatory, "
926 "specify ptr null to migrate from the obsoleted 2-field form");
927 Type *ETy = STy->getTypeAtIndex(2);
928 Check(ETy->isPointerTy(), "wrong type for intrinsic global variable",
929 &GV);
930 }
931 }
932
933 if (GV.hasName() && (GV.getName() == "llvm.used" ||
934 GV.getName() == "llvm.compiler.used")) {
936 "invalid linkage for intrinsic global variable", &GV);
938 "invalid uses of intrinsic global variable", &GV);
939
940 if (ArrayType *ATy = dyn_cast<ArrayType>(GVType)) {
941 PointerType *PTy = dyn_cast<PointerType>(ATy->getElementType());
942 Check(PTy, "wrong type for intrinsic global variable", &GV);
943 if (GV.hasInitializer()) {
944 const Constant *Init = GV.getInitializer();
945 const ConstantArray *InitArray = dyn_cast<ConstantArray>(Init);
946 Check(InitArray, "wrong initializer for intrinsic global variable",
947 Init);
948 for (Value *Op : InitArray->operands()) {
949 Value *V = Op->stripPointerCasts();
952 Twine("invalid ") + GV.getName() + " member", V);
953 Check(V->hasName(),
954 Twine("members of ") + GV.getName() + " must be named", V);
955 }
956 }
957 }
958 }
959
960 // Visit any debug info attachments.
962 GV.getMetadata(LLVMContext::MD_dbg, MDs);
963 for (auto *MD : MDs) {
964 if (auto *GVE = dyn_cast<DIGlobalVariableExpression>(MD))
965 visitDIGlobalVariableExpression(*GVE);
966 else
967 CheckDI(false, "!dbg attachment of global variable must be a "
968 "DIGlobalVariableExpression");
969 }
970
971 // Scalable vectors cannot be global variables, since we don't know
972 // the runtime size.
973 Check(!GVType->isScalableTy(), "Globals cannot contain scalable types", &GV);
974
975 // Check if it is or contains a target extension type that disallows being
976 // used as a global.
978 "Global @" + GV.getName() + " has illegal target extension type",
979 GVType);
980
981 // Check that the the address space can hold all bits of the type, recognized
982 // by an access in the address space being able to reach all bytes of the
983 // type.
984 Check(!GVType->isSized() ||
985 isUIntN(DL.getAddressSizeInBits(GV.getAddressSpace()),
986 GV.getGlobalSize(DL)),
987 "Global variable is too large to fit into the address space", &GV,
988 GVType);
989
990 if (!GV.hasInitializer()) {
991 visitGlobalValue(GV);
992 return;
993 }
994
995 // Walk any aggregate initializers looking for bitcasts between address spaces
996 visitConstantExprsRecursively(GV.getInitializer());
997
998 visitGlobalValue(GV);
999}
1000
1001void Verifier::visitAliaseeSubExpr(const GlobalAlias &GA, const Constant &C) {
1002 SmallPtrSet<const GlobalAlias*, 4> Visited;
1003 Visited.insert(&GA);
1004 visitAliaseeSubExpr(Visited, GA, C);
1005}
1006
1007void Verifier::visitAliaseeSubExpr(SmallPtrSetImpl<const GlobalAlias*> &Visited,
1008 const GlobalAlias &GA, const Constant &C) {
1011 cast<GlobalValue>(C).hasAvailableExternallyLinkage(),
1012 "available_externally alias must point to available_externally "
1013 "global value",
1014 &GA);
1015 }
1016 if (const auto *GV = dyn_cast<GlobalValue>(&C)) {
1018 Check(!GV->isDeclarationForLinker(), "Alias must point to a definition",
1019 &GA);
1020 }
1021
1022 if (const auto *GA2 = dyn_cast<GlobalAlias>(GV)) {
1023 Check(Visited.insert(GA2).second, "Aliases cannot form a cycle", &GA);
1024
1025 Check(!GA2->isInterposable(),
1026 "Alias cannot point to an interposable alias", &GA);
1027 } else {
1028 // Only continue verifying subexpressions of GlobalAliases.
1029 // Do not recurse into global initializers.
1030 return;
1031 }
1032 }
1033
1034 if (const auto *CE = dyn_cast<ConstantExpr>(&C))
1035 visitConstantExprsRecursively(CE);
1036
1037 for (const Use &U : C.operands()) {
1038 Value *V = &*U;
1039 if (const auto *GA2 = dyn_cast<GlobalAlias>(V))
1040 visitAliaseeSubExpr(Visited, GA, *GA2->getAliasee());
1041 else if (const auto *C2 = dyn_cast<Constant>(V))
1042 visitAliaseeSubExpr(Visited, GA, *C2);
1043 }
1044}
1045
1046void Verifier::visitGlobalAlias(const GlobalAlias &GA) {
1048 "Alias should have private, internal, linkonce, weak, linkonce_odr, "
1049 "weak_odr, external, or available_externally linkage!",
1050 &GA);
1051 const Constant *Aliasee = GA.getAliasee();
1052 Check(Aliasee, "Aliasee cannot be NULL!", &GA);
1053 Check(GA.getType() == Aliasee->getType(),
1054 "Alias and aliasee types should match!", &GA);
1055
1056 Check(isa<GlobalValue>(Aliasee) || isa<ConstantExpr>(Aliasee),
1057 "Aliasee should be either GlobalValue or ConstantExpr", &GA);
1058
1059 visitAliaseeSubExpr(GA, *Aliasee);
1060
1061 visitGlobalValue(GA);
1062}
1063
1064void Verifier::visitGlobalIFunc(const GlobalIFunc &GI) {
1065 visitGlobalValue(GI);
1066
1068 GI.getAllMetadata(MDs);
1069 for (const auto &I : MDs) {
1070 CheckDI(I.first != LLVMContext::MD_dbg,
1071 "an ifunc may not have a !dbg attachment", &GI);
1072 Check(I.first != LLVMContext::MD_prof,
1073 "an ifunc may not have a !prof attachment", &GI);
1074 visitMDNode(*I.second, AreDebugLocsAllowed::No);
1075 }
1076
1078 "IFunc should have private, internal, linkonce, weak, linkonce_odr, "
1079 "weak_odr, or external linkage!",
1080 &GI);
1081 // Pierce through ConstantExprs and GlobalAliases and check that the resolver
1082 // is a Function definition.
1083 const Function *Resolver = GI.getResolverFunction();
1084 Check(Resolver, "IFunc must have a Function resolver", &GI);
1085 Check(!Resolver->isDeclarationForLinker(),
1086 "IFunc resolver must be a definition", &GI);
1087
1088 // Check that the immediate resolver operand (prior to any bitcasts) has the
1089 // correct type.
1090 const Type *ResolverTy = GI.getResolver()->getType();
1091
1093 "IFunc resolver must return a pointer", &GI);
1094
1095 Check(ResolverTy == PointerType::get(Context, GI.getAddressSpace()),
1096 "IFunc resolver has incorrect type", &GI);
1097}
1098
1099void Verifier::visitNamedMDNode(const NamedMDNode &NMD) {
1100 // There used to be various other llvm.dbg.* nodes, but we don't support
1101 // upgrading them and we want to reserve the namespace for future uses.
1102 if (NMD.getName().starts_with("llvm.dbg."))
1103 CheckDI(NMD.getName() == "llvm.dbg.cu",
1104 "unrecognized named metadata node in the llvm.dbg namespace", &NMD);
1105 for (const MDNode *MD : NMD.operands()) {
1106 if (NMD.getName() == "llvm.dbg.cu")
1107 CheckDI(MD && isa<DICompileUnit>(MD), "invalid compile unit", &NMD, MD);
1108
1109 if (!MD)
1110 continue;
1111
1112 visitMDNode(*MD, AreDebugLocsAllowed::Yes);
1113 }
1114}
1115
1116void Verifier::visitMDNode(const MDNode &MD, AreDebugLocsAllowed AllowLocs) {
1117 // Only visit each node once. Metadata can be mutually recursive, so this
1118 // avoids infinite recursion here, as well as being an optimization.
1119 if (!MDNodes.insert(&MD).second)
1120 return;
1121
1122 Check(&MD.getContext() == &Context,
1123 "MDNode context does not match Module context!", &MD);
1124
1125 switch (MD.getMetadataID()) {
1126 default:
1127 llvm_unreachable("Invalid MDNode subclass");
1128 case Metadata::MDTupleKind:
1129 break;
1130#define HANDLE_SPECIALIZED_MDNODE_LEAF(CLASS) \
1131 case Metadata::CLASS##Kind: \
1132 visit##CLASS(cast<CLASS>(MD)); \
1133 break;
1134#include "llvm/IR/Metadata.def"
1135 }
1136
1137 for (const Metadata *Op : MD.operands()) {
1138 if (!Op)
1139 continue;
1140 Check(!isa<LocalAsMetadata>(Op), "Invalid operand for global metadata!",
1141 &MD, Op);
1142 CheckDI(!isa<DILocation>(Op) || AllowLocs == AreDebugLocsAllowed::Yes,
1143 "DILocation not allowed within this metadata node", &MD, Op);
1144 if (auto *N = dyn_cast<MDNode>(Op)) {
1145 visitMDNode(*N, AllowLocs);
1146 continue;
1147 }
1148 if (auto *V = dyn_cast<ValueAsMetadata>(Op)) {
1149 visitValueAsMetadata(*V, nullptr);
1150 continue;
1151 }
1152 }
1153
1154 // Check llvm.loop.estimated_trip_count.
1155 if (MD.getNumOperands() > 0 &&
1157 Check(MD.getNumOperands() == 2, "Expected two operands", &MD);
1159 Check(Count && Count->getType()->isIntegerTy() &&
1160 cast<IntegerType>(Count->getType())->getBitWidth() <= 32,
1161 "Expected second operand to be an integer constant of type i32 or "
1162 "smaller",
1163 &MD);
1164 }
1165
1166 // Check these last, so we diagnose problems in operands first.
1167 Check(!MD.isTemporary(), "Expected no forward declarations!", &MD);
1168 Check(MD.isResolved(), "All nodes should be resolved!", &MD);
1169}
1170
1171void Verifier::visitValueAsMetadata(const ValueAsMetadata &MD, Function *F) {
1172 Check(MD.getValue(), "Expected valid value", &MD);
1173 Check(!MD.getValue()->getType()->isMetadataTy(),
1174 "Unexpected metadata round-trip through values", &MD, MD.getValue());
1175
1176 auto *L = dyn_cast<LocalAsMetadata>(&MD);
1177 if (!L)
1178 return;
1179
1180 Check(F, "function-local metadata used outside a function", L);
1181
1182 // If this was an instruction, bb, or argument, verify that it is in the
1183 // function that we expect.
1184 Function *ActualF = nullptr;
1185 if (Instruction *I = dyn_cast<Instruction>(L->getValue())) {
1186 Check(I->getParent(), "function-local metadata not in basic block", L, I);
1187 ActualF = I->getParent()->getParent();
1188 } else if (BasicBlock *BB = dyn_cast<BasicBlock>(L->getValue()))
1189 ActualF = BB->getParent();
1190 else if (Argument *A = dyn_cast<Argument>(L->getValue()))
1191 ActualF = A->getParent();
1192 assert(ActualF && "Unimplemented function local metadata case!");
1193
1194 Check(ActualF == F, "function-local metadata used in wrong function", L);
1195}
1196
1197void Verifier::visitDIArgList(const DIArgList &AL, Function *F) {
1198 for (const ValueAsMetadata *VAM : AL.getArgs())
1199 visitValueAsMetadata(*VAM, F);
1200}
1201
1202void Verifier::visitMetadataAsValue(const MetadataAsValue &MDV, Function *F) {
1203 Metadata *MD = MDV.getMetadata();
1204 if (auto *N = dyn_cast<MDNode>(MD)) {
1205 visitMDNode(*N, AreDebugLocsAllowed::No);
1206 return;
1207 }
1208
1209 // Only visit each node once. Metadata can be mutually recursive, so this
1210 // avoids infinite recursion here, as well as being an optimization.
1211 if (!MDNodes.insert(MD).second)
1212 return;
1213
1214 if (auto *V = dyn_cast<ValueAsMetadata>(MD))
1215 visitValueAsMetadata(*V, F);
1216
1217 if (auto *AL = dyn_cast<DIArgList>(MD))
1218 visitDIArgList(*AL, F);
1219}
1220
1221static bool isType(const Metadata *MD) { return !MD || isa<DIType>(MD); }
1222static bool isScope(const Metadata *MD) { return !MD || isa<DIScope>(MD); }
1223static bool isDINode(const Metadata *MD) { return !MD || isa<DINode>(MD); }
1224static bool isMDTuple(const Metadata *MD) { return !MD || isa<MDTuple>(MD); }
1225
1226void Verifier::visitDILocation(const DILocation &N) {
1227 CheckDI(N.getRawScope() && isa<DILocalScope>(N.getRawScope()),
1228 "location requires a valid scope", &N, N.getRawScope());
1229 if (auto *IA = N.getRawInlinedAt())
1230 CheckDI(isa<DILocation>(IA), "inlined-at should be a location", &N, IA);
1231 if (auto *SP = dyn_cast<DISubprogram>(N.getRawScope()))
1232 CheckDI(SP->isDefinition(), "scope points into the type hierarchy", &N);
1233}
1234
1235void Verifier::visitGenericDINode(const GenericDINode &N) {
1236 CheckDI(N.getTag(), "invalid tag", &N);
1237}
1238
1239void Verifier::visitDIScope(const DIScope &N) {
1240 if (auto *F = N.getRawFile())
1241 CheckDI(isa<DIFile>(F), "invalid file", &N, F);
1242}
1243
1244void Verifier::visitDIType(const DIType &N) {
1245 CheckDI(isScope(N.getRawScope()), "invalid scope", &N, N.getRawScope());
1246 visitDIScope(N);
1247 CheckDI(N.getRawFile() || N.getLine() == 0, "line specified with no file", &N,
1248 N.getLine());
1249}
1250
1251void Verifier::visitDISubrangeType(const DISubrangeType &N) {
1252 visitDIType(N);
1253
1254 CheckDI(N.getTag() == dwarf::DW_TAG_subrange_type, "invalid tag", &N);
1255 auto *BaseType = N.getRawBaseType();
1256 CheckDI(!BaseType || isType(BaseType), "BaseType must be a type");
1257 auto *LBound = N.getRawLowerBound();
1258 CheckDI(!LBound || isa<ConstantAsMetadata>(LBound) ||
1259 isa<DIVariable>(LBound) || isa<DIExpression>(LBound) ||
1260 isa<DIDerivedType>(LBound),
1261 "LowerBound must be signed constant or DIVariable or DIExpression or "
1262 "DIDerivedType",
1263 &N);
1264 auto *UBound = N.getRawUpperBound();
1265 CheckDI(!UBound || isa<ConstantAsMetadata>(UBound) ||
1266 isa<DIVariable>(UBound) || isa<DIExpression>(UBound) ||
1267 isa<DIDerivedType>(UBound),
1268 "UpperBound must be signed constant or DIVariable or DIExpression or "
1269 "DIDerivedType",
1270 &N);
1271 auto *Stride = N.getRawStride();
1272 CheckDI(!Stride || isa<ConstantAsMetadata>(Stride) ||
1273 isa<DIVariable>(Stride) || isa<DIExpression>(Stride),
1274 "Stride must be signed constant or DIVariable or DIExpression", &N);
1275 auto *Bias = N.getRawBias();
1276 CheckDI(!Bias || isa<ConstantAsMetadata>(Bias) || isa<DIVariable>(Bias) ||
1277 isa<DIExpression>(Bias),
1278 "Bias must be signed constant or DIVariable or DIExpression", &N);
1279 // Subrange types currently only support constant size.
1280 auto *Size = N.getRawSizeInBits();
1282 "SizeInBits must be a constant");
1283}
1284
1285void Verifier::visitDISubrange(const DISubrange &N) {
1286 CheckDI(N.getTag() == dwarf::DW_TAG_subrange_type, "invalid tag", &N);
1287 CheckDI(!N.getRawCountNode() || !N.getRawUpperBound(),
1288 "Subrange can have any one of count or upperBound", &N);
1289 auto *CBound = N.getRawCountNode();
1290 CheckDI(!CBound || isa<ConstantAsMetadata>(CBound) ||
1291 isa<DIVariable>(CBound) || isa<DIExpression>(CBound),
1292 "Count must be signed constant or DIVariable or DIExpression", &N);
1293 auto Count = N.getCount();
1295 cast<ConstantInt *>(Count)->getSExtValue() >= -1,
1296 "invalid subrange count", &N);
1297 auto *LBound = N.getRawLowerBound();
1298 CheckDI(!LBound || isa<ConstantAsMetadata>(LBound) ||
1299 isa<DIVariable>(LBound) || isa<DIExpression>(LBound),
1300 "LowerBound must be signed constant or DIVariable or DIExpression",
1301 &N);
1302 auto *UBound = N.getRawUpperBound();
1303 CheckDI(!UBound || isa<ConstantAsMetadata>(UBound) ||
1304 isa<DIVariable>(UBound) || isa<DIExpression>(UBound),
1305 "UpperBound must be signed constant or DIVariable or DIExpression",
1306 &N);
1307 auto *Stride = N.getRawStride();
1308 CheckDI(!Stride || isa<ConstantAsMetadata>(Stride) ||
1309 isa<DIVariable>(Stride) || isa<DIExpression>(Stride),
1310 "Stride must be signed constant or DIVariable or DIExpression", &N);
1311}
1312
1313void Verifier::visitDIGenericSubrange(const DIGenericSubrange &N) {
1314 CheckDI(N.getTag() == dwarf::DW_TAG_generic_subrange, "invalid tag", &N);
1315 CheckDI(!N.getRawCountNode() || !N.getRawUpperBound(),
1316 "GenericSubrange can have any one of count or upperBound", &N);
1317 auto *CBound = N.getRawCountNode();
1318 CheckDI(!CBound || isa<DIVariable>(CBound) || isa<DIExpression>(CBound),
1319 "Count must be signed constant or DIVariable or DIExpression", &N);
1320 auto *LBound = N.getRawLowerBound();
1321 CheckDI(LBound, "GenericSubrange must contain lowerBound", &N);
1322 CheckDI(isa<DIVariable>(LBound) || isa<DIExpression>(LBound),
1323 "LowerBound must be signed constant or DIVariable or DIExpression",
1324 &N);
1325 auto *UBound = N.getRawUpperBound();
1326 CheckDI(!UBound || isa<DIVariable>(UBound) || isa<DIExpression>(UBound),
1327 "UpperBound must be signed constant or DIVariable or DIExpression",
1328 &N);
1329 auto *Stride = N.getRawStride();
1330 CheckDI(Stride, "GenericSubrange must contain stride", &N);
1331 CheckDI(isa<DIVariable>(Stride) || isa<DIExpression>(Stride),
1332 "Stride must be signed constant or DIVariable or DIExpression", &N);
1333}
1334
1335void Verifier::visitDIEnumerator(const DIEnumerator &N) {
1336 CheckDI(N.getTag() == dwarf::DW_TAG_enumerator, "invalid tag", &N);
1337}
1338
1339void Verifier::visitDIBasicType(const DIBasicType &N) {
1340 visitDIType(N);
1341
1342 CheckDI(N.getTag() == dwarf::DW_TAG_base_type ||
1343 N.getTag() == dwarf::DW_TAG_unspecified_type ||
1344 N.getTag() == dwarf::DW_TAG_string_type,
1345 "invalid tag", &N);
1346 // Basic types currently only support constant size.
1347 auto *Size = N.getRawSizeInBits();
1349 "SizeInBits must be a constant");
1350}
1351
1352void Verifier::visitDIFixedPointType(const DIFixedPointType &N) {
1353 visitDIBasicType(N);
1354
1355 CheckDI(N.getTag() == dwarf::DW_TAG_base_type, "invalid tag", &N);
1356 CheckDI(N.getEncoding() == dwarf::DW_ATE_signed_fixed ||
1357 N.getEncoding() == dwarf::DW_ATE_unsigned_fixed,
1358 "invalid encoding", &N);
1362 "invalid kind", &N);
1364 N.getFactorRaw() == 0,
1365 "factor should be 0 for rationals", &N);
1367 (N.getNumeratorRaw() == 0 && N.getDenominatorRaw() == 0),
1368 "numerator and denominator should be 0 for non-rationals", &N);
1369}
1370
1371void Verifier::visitDIStringType(const DIStringType &N) {
1372 visitDIType(N);
1373
1374 CheckDI(N.getTag() == dwarf::DW_TAG_string_type, "invalid tag", &N);
1375 CheckDI(!(N.isBigEndian() && N.isLittleEndian()), "has conflicting flags",
1376 &N);
1377}
1378
1379void Verifier::visitDIDerivedType(const DIDerivedType &N) {
1380 // Common type checks.
1381 visitDIType(N);
1382
1383 CheckDI(N.getTag() == dwarf::DW_TAG_typedef ||
1384 N.getTag() == dwarf::DW_TAG_pointer_type ||
1385 N.getTag() == dwarf::DW_TAG_ptr_to_member_type ||
1386 N.getTag() == dwarf::DW_TAG_reference_type ||
1387 N.getTag() == dwarf::DW_TAG_rvalue_reference_type ||
1388 N.getTag() == dwarf::DW_TAG_const_type ||
1389 N.getTag() == dwarf::DW_TAG_immutable_type ||
1390 N.getTag() == dwarf::DW_TAG_volatile_type ||
1391 N.getTag() == dwarf::DW_TAG_restrict_type ||
1392 N.getTag() == dwarf::DW_TAG_atomic_type ||
1393 N.getTag() == dwarf::DW_TAG_LLVM_ptrauth_type ||
1394 N.getTag() == dwarf::DW_TAG_member ||
1395 (N.getTag() == dwarf::DW_TAG_variable && N.isStaticMember()) ||
1396 N.getTag() == dwarf::DW_TAG_inheritance ||
1397 N.getTag() == dwarf::DW_TAG_friend ||
1398 N.getTag() == dwarf::DW_TAG_set_type ||
1399 N.getTag() == dwarf::DW_TAG_template_alias,
1400 "invalid tag", &N);
1401 if (N.getTag() == dwarf::DW_TAG_ptr_to_member_type) {
1402 CheckDI(isType(N.getRawExtraData()), "invalid pointer to member type", &N,
1403 N.getRawExtraData());
1404 } else if (N.getTag() == dwarf::DW_TAG_template_alias) {
1405 CheckDI(isMDTuple(N.getRawExtraData()), "invalid template parameters", &N,
1406 N.getRawExtraData());
1407 } else if (N.getTag() == dwarf::DW_TAG_inheritance ||
1408 N.getTag() == dwarf::DW_TAG_member ||
1409 N.getTag() == dwarf::DW_TAG_variable) {
1410 auto *ExtraData = N.getRawExtraData();
1411 auto IsValidExtraData = [&]() {
1412 if (ExtraData == nullptr)
1413 return true;
1414 if (isa<ConstantAsMetadata>(ExtraData) || isa<MDString>(ExtraData) ||
1415 isa<DIObjCProperty>(ExtraData))
1416 return true;
1417 if (auto *Tuple = dyn_cast<MDTuple>(ExtraData)) {
1418 if (Tuple->getNumOperands() != 1)
1419 return false;
1420 return isa_and_nonnull<ConstantAsMetadata>(Tuple->getOperand(0).get());
1421 }
1422 return false;
1423 };
1424 CheckDI(IsValidExtraData(),
1425 "extraData must be ConstantAsMetadata, MDString, DIObjCProperty, "
1426 "or MDTuple with single ConstantAsMetadata operand",
1427 &N, ExtraData);
1428 }
1429
1430 if (N.getTag() == dwarf::DW_TAG_set_type) {
1431 if (auto *T = N.getRawBaseType()) {
1435 CheckDI(
1436 (Enum && Enum->getTag() == dwarf::DW_TAG_enumeration_type) ||
1437 (Subrange && Subrange->getTag() == dwarf::DW_TAG_subrange_type) ||
1438 (Basic && (Basic->getEncoding() == dwarf::DW_ATE_unsigned ||
1439 Basic->getEncoding() == dwarf::DW_ATE_signed ||
1440 Basic->getEncoding() == dwarf::DW_ATE_unsigned_char ||
1441 Basic->getEncoding() == dwarf::DW_ATE_signed_char ||
1442 Basic->getEncoding() == dwarf::DW_ATE_boolean)),
1443 "invalid set base type", &N, T);
1444 }
1445 }
1446
1447 CheckDI(isType(N.getRawBaseType()), "invalid base type", &N,
1448 N.getRawBaseType());
1449
1450 if (N.getDWARFAddressSpace()) {
1451 CheckDI(N.getTag() == dwarf::DW_TAG_pointer_type ||
1452 N.getTag() == dwarf::DW_TAG_reference_type ||
1453 N.getTag() == dwarf::DW_TAG_rvalue_reference_type,
1454 "DWARF address space only applies to pointer or reference types",
1455 &N);
1456 }
1457
1458 auto *Size = N.getRawSizeInBits();
1461 "SizeInBits must be a constant or DIVariable or DIExpression");
1462}
1463
1464/// Detect mutually exclusive flags.
1465static bool hasConflictingReferenceFlags(unsigned Flags) {
1466 return ((Flags & DINode::FlagLValueReference) &&
1467 (Flags & DINode::FlagRValueReference)) ||
1468 ((Flags & DINode::FlagTypePassByValue) &&
1469 (Flags & DINode::FlagTypePassByReference));
1470}
1471
1472void Verifier::visitTemplateParams(const MDNode &N, const Metadata &RawParams) {
1473 auto *Params = dyn_cast<MDTuple>(&RawParams);
1474 CheckDI(Params, "invalid template params", &N, &RawParams);
1475 for (Metadata *Op : Params->operands()) {
1476 CheckDI(Op && isa<DITemplateParameter>(Op), "invalid template parameter",
1477 &N, Params, Op);
1478 }
1479}
1480
1481void Verifier::visitDICompositeType(const DICompositeType &N) {
1482 // Common type checks.
1483 visitDIType(N);
1484
1485 CheckDI(N.getTag() == dwarf::DW_TAG_array_type ||
1486 N.getTag() == dwarf::DW_TAG_structure_type ||
1487 N.getTag() == dwarf::DW_TAG_union_type ||
1488 N.getTag() == dwarf::DW_TAG_enumeration_type ||
1489 N.getTag() == dwarf::DW_TAG_class_type ||
1490 N.getTag() == dwarf::DW_TAG_variant_part ||
1491 N.getTag() == dwarf::DW_TAG_variant ||
1492 N.getTag() == dwarf::DW_TAG_namelist,
1493 "invalid tag", &N);
1494
1495 CheckDI(isType(N.getRawBaseType()), "invalid base type", &N,
1496 N.getRawBaseType());
1497
1498 CheckDI(!N.getRawElements() || isa<MDTuple>(N.getRawElements()),
1499 "invalid composite elements", &N, N.getRawElements());
1500 CheckDI(isType(N.getRawVTableHolder()), "invalid vtable holder", &N,
1501 N.getRawVTableHolder());
1503 "invalid reference flags", &N);
1504 unsigned DIBlockByRefStruct = 1 << 4;
1505 CheckDI((N.getFlags() & DIBlockByRefStruct) == 0,
1506 "DIBlockByRefStruct on DICompositeType is no longer supported", &N);
1507 CheckDI(llvm::all_of(N.getElements(), [](const DINode *N) { return N; }),
1508 "DISubprogram contains null entry in `elements` field", &N);
1509
1510 if (N.isVector()) {
1511 const DINodeArray Elements = N.getElements();
1512 CheckDI(Elements.size() == 1 &&
1513 Elements[0]->getTag() == dwarf::DW_TAG_subrange_type,
1514 "invalid vector, expected one element of type subrange", &N);
1515 }
1516
1517 if (auto *Params = N.getRawTemplateParams())
1518 visitTemplateParams(N, *Params);
1519
1520 if (auto *D = N.getRawDiscriminator()) {
1521 CheckDI(isa<DIDerivedType>(D) && N.getTag() == dwarf::DW_TAG_variant_part,
1522 "discriminator can only appear on variant part");
1523 }
1524
1525 if (N.getRawDataLocation()) {
1526 CheckDI(N.getTag() == dwarf::DW_TAG_array_type,
1527 "dataLocation can only appear in array type");
1528 }
1529
1530 if (N.getRawAssociated()) {
1531 CheckDI(N.getTag() == dwarf::DW_TAG_array_type,
1532 "associated can only appear in array type");
1533 }
1534
1535 if (N.getRawAllocated()) {
1536 CheckDI(N.getTag() == dwarf::DW_TAG_array_type,
1537 "allocated can only appear in array type");
1538 }
1539
1540 if (N.getRawRank()) {
1541 CheckDI(N.getTag() == dwarf::DW_TAG_array_type,
1542 "rank can only appear in array type");
1543 }
1544
1545 if (N.getTag() == dwarf::DW_TAG_array_type) {
1546 CheckDI(N.getRawBaseType(), "array types must have a base type", &N);
1547 }
1548
1549 auto *Size = N.getRawSizeInBits();
1552 "SizeInBits must be a constant or DIVariable or DIExpression");
1553}
1554
1555void Verifier::visitDISubroutineType(const DISubroutineType &N) {
1556 visitDIType(N);
1557 CheckDI(N.getTag() == dwarf::DW_TAG_subroutine_type, "invalid tag", &N);
1558 if (auto *Types = N.getRawTypeArray()) {
1559 CheckDI(isa<MDTuple>(Types), "invalid composite elements", &N, Types);
1560 for (Metadata *Ty : N.getTypeArray()->operands()) {
1561 CheckDI(isType(Ty), "invalid subroutine type ref", &N, Types, Ty);
1562 }
1563 }
1565 "invalid reference flags", &N);
1566}
1567
1568void Verifier::visitDIFile(const DIFile &N) {
1569 CheckDI(N.getTag() == dwarf::DW_TAG_file_type, "invalid tag", &N);
1570 std::optional<DIFile::ChecksumInfo<StringRef>> Checksum = N.getChecksum();
1571 if (Checksum) {
1572 CheckDI(Checksum->Kind <= DIFile::ChecksumKind::CSK_Last,
1573 "invalid checksum kind", &N);
1574 size_t Size;
1575 switch (Checksum->Kind) {
1576 case DIFile::CSK_MD5:
1577 Size = 32;
1578 break;
1579 case DIFile::CSK_SHA1:
1580 Size = 40;
1581 break;
1582 case DIFile::CSK_SHA256:
1583 Size = 64;
1584 break;
1585 }
1586 CheckDI(Checksum->Value.size() == Size, "invalid checksum length", &N);
1587 CheckDI(Checksum->Value.find_if_not(llvm::isHexDigit) == StringRef::npos,
1588 "invalid checksum", &N);
1589 }
1590}
1591
1592void Verifier::visitDICompileUnit(const DICompileUnit &N) {
1593 CheckDI(N.isDistinct(), "compile units must be distinct", &N);
1594 CheckDI(N.getTag() == dwarf::DW_TAG_compile_unit, "invalid tag", &N);
1595
1596 // Don't bother verifying the compilation directory or producer string
1597 // as those could be empty.
1598 CheckDI(N.getRawFile() && isa<DIFile>(N.getRawFile()), "invalid file", &N,
1599 N.getRawFile());
1600 CheckDI(!N.getFile()->getFilename().empty(), "invalid filename", &N,
1601 N.getFile());
1602
1603 CheckDI((N.getEmissionKind() <= DICompileUnit::LastEmissionKind),
1604 "invalid emission kind", &N);
1605
1606 if (auto *Array = N.getRawEnumTypes()) {
1607 CheckDI(isa<MDTuple>(Array), "invalid enum list", &N, Array);
1608 for (Metadata *Op : N.getEnumTypes()->operands()) {
1610 CheckDI(Enum && Enum->getTag() == dwarf::DW_TAG_enumeration_type,
1611 "invalid enum type", &N, N.getEnumTypes(), Op);
1612 CheckDI(!Enum->getScope() || !isa<DILocalScope>(Enum->getScope()),
1613 "function-local enum in a DICompileUnit's enum list", &N,
1614 N.getEnumTypes(), Op);
1615 }
1616 }
1617 if (auto *Array = N.getRawRetainedTypes()) {
1618 CheckDI(isa<MDTuple>(Array), "invalid retained type list", &N, Array);
1619 for (Metadata *Op : N.getRetainedTypes()->operands()) {
1620 CheckDI(
1621 Op && (isa<DIType>(Op) || (isa<DISubprogram>(Op) &&
1622 !cast<DISubprogram>(Op)->isDefinition())),
1623 "invalid retained type", &N, Op);
1624 }
1625 }
1626 if (auto *Array = N.getRawGlobalVariables()) {
1627 CheckDI(isa<MDTuple>(Array), "invalid global variable list", &N, Array);
1628 for (Metadata *Op : N.getGlobalVariables()->operands()) {
1630 "invalid global variable ref", &N, Op);
1631 }
1632 }
1633 if (auto *Array = N.getRawImportedEntities()) {
1634 CheckDI(isa<MDTuple>(Array), "invalid imported entity list", &N, Array);
1635 for (Metadata *Op : N.getImportedEntities()->operands()) {
1637 CheckDI(IE, "invalid imported entity ref", &N, Op);
1639 "function-local imports are not allowed in a DICompileUnit's "
1640 "imported entities list",
1641 &N, Op);
1642 }
1643 }
1644 if (auto *Array = N.getRawMacros()) {
1645 CheckDI(isa<MDTuple>(Array), "invalid macro list", &N, Array);
1646 for (Metadata *Op : N.getMacros()->operands()) {
1647 CheckDI(Op && isa<DIMacroNode>(Op), "invalid macro ref", &N, Op);
1648 }
1649 }
1650 CUVisited.insert(&N);
1651}
1652
1653void Verifier::visitDISubprogram(const DISubprogram &N) {
1654 CheckDI(N.getTag() == dwarf::DW_TAG_subprogram, "invalid tag", &N);
1655 CheckDI(isScope(N.getRawScope()), "invalid scope", &N, N.getRawScope());
1656 if (auto *F = N.getRawFile())
1657 CheckDI(isa<DIFile>(F), "invalid file", &N, F);
1658 else
1659 CheckDI(N.getLine() == 0, "line specified with no file", &N, N.getLine());
1660 if (auto *T = N.getRawType())
1661 CheckDI(isa<DISubroutineType>(T), "invalid subroutine type", &N, T);
1662 CheckDI(isType(N.getRawContainingType()), "invalid containing type", &N,
1663 N.getRawContainingType());
1664 if (auto *Params = N.getRawTemplateParams())
1665 visitTemplateParams(N, *Params);
1666 if (auto *S = N.getRawDeclaration())
1667 CheckDI(isa<DISubprogram>(S) && !cast<DISubprogram>(S)->isDefinition(),
1668 "invalid subprogram declaration", &N, S);
1669 if (auto *RawNode = N.getRawRetainedNodes()) {
1670 auto *Node = dyn_cast<MDTuple>(RawNode);
1671 CheckDI(Node, "invalid retained nodes list", &N, RawNode);
1672
1673 DenseMap<unsigned, DILocalVariable *> Args;
1674 for (Metadata *Op : Node->operands()) {
1675 CheckDI(Op, "nullptr in retained nodes", &N, Node);
1676
1677 auto True = [](const Metadata *) { return true; };
1678 auto False = [](const Metadata *) { return false; };
1679 bool IsTypeCorrect = DISubprogram::visitRetainedNode<bool>(
1680 Op, True, True, True, True, False);
1681 CheckDI(IsTypeCorrect,
1682 "invalid retained nodes, expected DILocalVariable, DILabel, "
1683 "DIImportedEntity or DIType",
1684 &N, Node, Op);
1685
1686 auto *RetainedNode = cast<DINode>(Op);
1687 auto *RetainedNodeScope = dyn_cast_or_null<DILocalScope>(
1689 CheckDI(RetainedNodeScope,
1690 "invalid retained nodes, retained node is not local", &N, Node,
1691 RetainedNode);
1692
1693 DISubprogram *RetainedNodeSP = RetainedNodeScope->getSubprogram();
1694 DICompileUnit *RetainedNodeUnit =
1695 RetainedNodeSP ? RetainedNodeSP->getUnit() : nullptr;
1696 CheckDI(
1697 RetainedNodeSP == &N,
1698 "invalid retained nodes, retained node does not belong to subprogram",
1699 &N, Node, RetainedNode, RetainedNodeScope, RetainedNodeSP,
1700 RetainedNodeUnit);
1701
1702 auto *DV = dyn_cast<DILocalVariable>(RetainedNode);
1703 if (!DV)
1704 continue;
1705 if (unsigned ArgNum = DV->getArg()) {
1706 auto [ArgI, Inserted] = Args.insert({ArgNum, DV});
1707 CheckDI(Inserted || DV == ArgI->second,
1708 "invalid retained nodes, more than one local variable with the "
1709 "same argument index",
1710 &N, N.getUnit(), Node, RetainedNode, Args[ArgNum]);
1711 }
1712 }
1713 }
1715 "invalid reference flags", &N);
1716
1717 auto *Unit = N.getRawUnit();
1718 if (N.isDefinition()) {
1719 // Subprogram definitions (not part of the type hierarchy).
1720 CheckDI(N.isDistinct(), "subprogram definitions must be distinct", &N);
1721 CheckDI(Unit, "subprogram definitions must have a compile unit", &N);
1722 CheckDI(isa<DICompileUnit>(Unit), "invalid unit type", &N, Unit);
1723 // There's no good way to cross the CU boundary to insert a nested
1724 // DISubprogram definition in one CU into a type defined in another CU.
1725 auto *CT = dyn_cast_or_null<DICompositeType>(N.getRawScope());
1726 if (CT && CT->getRawIdentifier() &&
1727 M.getContext().isODRUniquingDebugTypes())
1728 CheckDI(N.getDeclaration(),
1729 "definition subprograms cannot be nested within DICompositeType "
1730 "when enabling ODR",
1731 &N);
1732 } else {
1733 // Subprogram declarations (part of the type hierarchy).
1734 CheckDI(!Unit, "subprogram declarations must not have a compile unit", &N);
1735 CheckDI(!N.getRawDeclaration(),
1736 "subprogram declaration must not have a declaration field");
1737 }
1738
1739 if (auto *RawThrownTypes = N.getRawThrownTypes()) {
1740 auto *ThrownTypes = dyn_cast<MDTuple>(RawThrownTypes);
1741 CheckDI(ThrownTypes, "invalid thrown types list", &N, RawThrownTypes);
1742 for (Metadata *Op : ThrownTypes->operands())
1743 CheckDI(Op && isa<DIType>(Op), "invalid thrown type", &N, ThrownTypes,
1744 Op);
1745 }
1746
1747 if (N.areAllCallsDescribed())
1748 CheckDI(N.isDefinition(),
1749 "DIFlagAllCallsDescribed must be attached to a definition");
1750}
1751
1752void Verifier::visitDILexicalBlockBase(const DILexicalBlockBase &N) {
1753 CheckDI(N.getTag() == dwarf::DW_TAG_lexical_block, "invalid tag", &N);
1754 CheckDI(N.getRawScope() && isa<DILocalScope>(N.getRawScope()),
1755 "invalid local scope", &N, N.getRawScope());
1756 if (auto *SP = dyn_cast<DISubprogram>(N.getRawScope()))
1757 CheckDI(SP->isDefinition(), "scope points into the type hierarchy", &N);
1758}
1759
1760void Verifier::visitDILexicalBlock(const DILexicalBlock &N) {
1761 visitDILexicalBlockBase(N);
1762
1763 CheckDI(N.getLine() || !N.getColumn(),
1764 "cannot have column info without line info", &N);
1765}
1766
1767void Verifier::visitDILexicalBlockFile(const DILexicalBlockFile &N) {
1768 visitDILexicalBlockBase(N);
1769}
1770
1771void Verifier::visitDICommonBlock(const DICommonBlock &N) {
1772 CheckDI(N.getTag() == dwarf::DW_TAG_common_block, "invalid tag", &N);
1773 if (auto *S = N.getRawScope())
1774 CheckDI(isa<DIScope>(S), "invalid scope ref", &N, S);
1775 if (auto *S = N.getRawDecl())
1776 CheckDI(isa<DIGlobalVariable>(S), "invalid declaration", &N, S);
1777}
1778
1779void Verifier::visitDINamespace(const DINamespace &N) {
1780 CheckDI(N.getTag() == dwarf::DW_TAG_namespace, "invalid tag", &N);
1781 if (auto *S = N.getRawScope())
1782 CheckDI(isa<DIScope>(S), "invalid scope ref", &N, S);
1783}
1784
1785void Verifier::visitDIMacro(const DIMacro &N) {
1786 CheckDI(N.getMacinfoType() == dwarf::DW_MACINFO_define ||
1787 N.getMacinfoType() == dwarf::DW_MACINFO_undef,
1788 "invalid macinfo type", &N);
1789 CheckDI(!N.getName().empty(), "anonymous macro", &N);
1790 if (!N.getValue().empty()) {
1791 assert(N.getValue().data()[0] != ' ' && "Macro value has a space prefix");
1792 }
1793}
1794
1795void Verifier::visitDIMacroFile(const DIMacroFile &N) {
1796 CheckDI(N.getMacinfoType() == dwarf::DW_MACINFO_start_file,
1797 "invalid macinfo type", &N);
1798 if (auto *F = N.getRawFile())
1799 CheckDI(isa<DIFile>(F), "invalid file", &N, F);
1800
1801 if (auto *Array = N.getRawElements()) {
1802 CheckDI(isa<MDTuple>(Array), "invalid macro list", &N, Array);
1803 for (Metadata *Op : N.getElements()->operands()) {
1804 CheckDI(Op && isa<DIMacroNode>(Op), "invalid macro ref", &N, Op);
1805 }
1806 }
1807}
1808
1809void Verifier::visitDIModule(const DIModule &N) {
1810 CheckDI(N.getTag() == dwarf::DW_TAG_module, "invalid tag", &N);
1811 CheckDI(!N.getName().empty(), "anonymous module", &N);
1812}
1813
1814void Verifier::visitDITemplateParameter(const DITemplateParameter &N) {
1815 CheckDI(isType(N.getRawType()), "invalid type ref", &N, N.getRawType());
1816}
1817
1818void Verifier::visitDITemplateTypeParameter(const DITemplateTypeParameter &N) {
1819 visitDITemplateParameter(N);
1820
1821 CheckDI(N.getTag() == dwarf::DW_TAG_template_type_parameter, "invalid tag",
1822 &N);
1823}
1824
1825void Verifier::visitDITemplateValueParameter(
1826 const DITemplateValueParameter &N) {
1827 visitDITemplateParameter(N);
1828
1829 CheckDI(N.getTag() == dwarf::DW_TAG_template_value_parameter ||
1830 N.getTag() == dwarf::DW_TAG_GNU_template_template_param ||
1831 N.getTag() == dwarf::DW_TAG_GNU_template_parameter_pack,
1832 "invalid tag", &N);
1833}
1834
1835void Verifier::visitDIVariable(const DIVariable &N) {
1836 if (auto *S = N.getRawScope())
1837 CheckDI(isa<DIScope>(S), "invalid scope", &N, S);
1838 if (auto *F = N.getRawFile())
1839 CheckDI(isa<DIFile>(F), "invalid file", &N, F);
1840}
1841
1842void Verifier::visitDIGlobalVariable(const DIGlobalVariable &N) {
1843 // Checks common to all variables.
1844 visitDIVariable(N);
1845
1846 CheckDI(N.getTag() == dwarf::DW_TAG_variable, "invalid tag", &N);
1847 CheckDI(isType(N.getRawType()), "invalid type ref", &N, N.getRawType());
1848 // Check only if the global variable is not an extern
1849 if (N.isDefinition())
1850 CheckDI(N.getType(), "missing global variable type", &N);
1851 if (auto *Member = N.getRawStaticDataMemberDeclaration()) {
1853 "invalid static data member declaration", &N, Member);
1854 }
1855}
1856
1857void Verifier::visitDILocalVariable(const DILocalVariable &N) {
1858 // Checks common to all variables.
1859 visitDIVariable(N);
1860
1861 CheckDI(isType(N.getRawType()), "invalid type ref", &N, N.getRawType());
1862 CheckDI(N.getTag() == dwarf::DW_TAG_variable, "invalid tag", &N);
1863 CheckDI(N.getRawScope() && isa<DILocalScope>(N.getRawScope()),
1864 "local variable requires a valid scope", &N, N.getRawScope());
1865 if (auto Ty = N.getType())
1866 CheckDI(!isa<DISubroutineType>(Ty), "invalid type", &N, N.getType());
1867}
1868
1869void Verifier::visitDIAssignID(const DIAssignID &N) {
1870 CheckDI(!N.getNumOperands(), "DIAssignID has no arguments", &N);
1871 CheckDI(N.isDistinct(), "DIAssignID must be distinct", &N);
1872}
1873
1874void Verifier::visitDILabel(const DILabel &N) {
1875 if (auto *S = N.getRawScope())
1876 CheckDI(isa<DIScope>(S), "invalid scope", &N, S);
1877 if (auto *F = N.getRawFile())
1878 CheckDI(isa<DIFile>(F), "invalid file", &N, F);
1879
1880 CheckDI(N.getTag() == dwarf::DW_TAG_label, "invalid tag", &N);
1881 CheckDI(N.getRawScope() && isa<DILocalScope>(N.getRawScope()),
1882 "label requires a valid scope", &N, N.getRawScope());
1883}
1884
1885void Verifier::visitDIExpression(const DIExpression &N) {
1886 CheckDI(N.isValid(), "invalid expression", &N);
1887}
1888
1889void Verifier::visitDIGlobalVariableExpression(
1890 const DIGlobalVariableExpression &GVE) {
1891 CheckDI(GVE.getVariable(), "missing variable");
1892 if (auto *Var = GVE.getVariable())
1893 visitDIGlobalVariable(*Var);
1894 if (auto *Expr = GVE.getExpression()) {
1895 visitDIExpression(*Expr);
1896 if (auto Fragment = Expr->getFragmentInfo())
1897 verifyFragmentExpression(*GVE.getVariable(), *Fragment, &GVE);
1898 }
1899}
1900
1901void Verifier::visitDIObjCProperty(const DIObjCProperty &N) {
1902 CheckDI(N.getTag() == dwarf::DW_TAG_APPLE_property, "invalid tag", &N);
1903 if (auto *T = N.getRawType())
1904 CheckDI(isType(T), "invalid type ref", &N, T);
1905 if (auto *F = N.getRawFile())
1906 CheckDI(isa<DIFile>(F), "invalid file", &N, F);
1907}
1908
1909void Verifier::visitDIImportedEntity(const DIImportedEntity &N) {
1910 CheckDI(N.getTag() == dwarf::DW_TAG_imported_module ||
1911 N.getTag() == dwarf::DW_TAG_imported_declaration,
1912 "invalid tag", &N);
1913 if (auto *S = N.getRawScope())
1914 CheckDI(isa<DIScope>(S), "invalid scope for imported entity", &N, S);
1915 CheckDI(isDINode(N.getRawEntity()), "invalid imported entity", &N,
1916 N.getRawEntity());
1917}
1918
1919void Verifier::visitComdat(const Comdat &C) {
1920 // In COFF the Module is invalid if the GlobalValue has private linkage.
1921 // Entities with private linkage don't have entries in the symbol table.
1922 if (TT.isOSBinFormatCOFF())
1923 if (const GlobalValue *GV = M.getNamedValue(C.getName()))
1924 Check(!GV->hasPrivateLinkage(), "comdat global value has private linkage",
1925 GV);
1926}
1927
1928void Verifier::visitModuleIdents() {
1929 const NamedMDNode *Idents = M.getNamedMetadata("llvm.ident");
1930 if (!Idents)
1931 return;
1932
1933 // llvm.ident takes a list of metadata entry. Each entry has only one string.
1934 // Scan each llvm.ident entry and make sure that this requirement is met.
1935 for (const MDNode *N : Idents->operands()) {
1936 Check(N->getNumOperands() == 1,
1937 "incorrect number of operands in llvm.ident metadata", N);
1938 Check(dyn_cast_or_null<MDString>(N->getOperand(0)),
1939 ("invalid value for llvm.ident metadata entry operand"
1940 "(the operand should be a string)"),
1941 N->getOperand(0));
1942 }
1943}
1944
1945void Verifier::visitModuleCommandLines() {
1946 const NamedMDNode *CommandLines = M.getNamedMetadata("llvm.commandline");
1947 if (!CommandLines)
1948 return;
1949
1950 // llvm.commandline takes a list of metadata entry. Each entry has only one
1951 // string. Scan each llvm.commandline entry and make sure that this
1952 // requirement is met.
1953 for (const MDNode *N : CommandLines->operands()) {
1954 Check(N->getNumOperands() == 1,
1955 "incorrect number of operands in llvm.commandline metadata", N);
1956 Check(dyn_cast_or_null<MDString>(N->getOperand(0)),
1957 ("invalid value for llvm.commandline metadata entry operand"
1958 "(the operand should be a string)"),
1959 N->getOperand(0));
1960 }
1961}
1962
1963void Verifier::visitModuleErrnoTBAA() {
1964 const NamedMDNode *ErrnoTBAA = M.getNamedMetadata("llvm.errno.tbaa");
1965 if (!ErrnoTBAA)
1966 return;
1967
1968 Check(ErrnoTBAA->getNumOperands() >= 1,
1969 "llvm.errno.tbaa must have at least one operand", ErrnoTBAA);
1970
1971 for (const MDNode *N : ErrnoTBAA->operands())
1972 TBAAVerifyHelper.visitTBAAMetadata(nullptr, N);
1973}
1974
1975void Verifier::visitModuleFlags() {
1976 const NamedMDNode *Flags = M.getModuleFlagsMetadata();
1977 if (!Flags) return;
1978
1979 // Scan each flag, and track the flags and requirements.
1980 DenseMap<const MDString*, const MDNode*> SeenIDs;
1981 SmallVector<const MDNode*, 16> Requirements;
1982 uint64_t PAuthABIPlatform = -1;
1983 uint64_t PAuthABIVersion = -1;
1984 for (const MDNode *MDN : Flags->operands()) {
1985 visitModuleFlag(MDN, SeenIDs, Requirements);
1986 if (MDN->getNumOperands() != 3)
1987 continue;
1988 if (const auto *FlagName = dyn_cast_or_null<MDString>(MDN->getOperand(1))) {
1989 if (FlagName->getString() == "aarch64-elf-pauthabi-platform") {
1990 if (const auto *PAP =
1992 PAuthABIPlatform = PAP->getZExtValue();
1993 } else if (FlagName->getString() == "aarch64-elf-pauthabi-version") {
1994 if (const auto *PAV =
1996 PAuthABIVersion = PAV->getZExtValue();
1997 }
1998 }
1999 }
2000
2001 if ((PAuthABIPlatform == uint64_t(-1)) != (PAuthABIVersion == uint64_t(-1)))
2002 CheckFailed("either both or no 'aarch64-elf-pauthabi-platform' and "
2003 "'aarch64-elf-pauthabi-version' module flags must be present");
2004
2005 // Validate that the requirements in the module are valid.
2006 for (const MDNode *Requirement : Requirements) {
2007 const MDString *Flag = cast<MDString>(Requirement->getOperand(0));
2008 const Metadata *ReqValue = Requirement->getOperand(1);
2009
2010 const MDNode *Op = SeenIDs.lookup(Flag);
2011 if (!Op) {
2012 CheckFailed("invalid requirement on flag, flag is not present in module",
2013 Flag);
2014 continue;
2015 }
2016
2017 if (Op->getOperand(2) != ReqValue) {
2018 CheckFailed(("invalid requirement on flag, "
2019 "flag does not have the required value"),
2020 Flag);
2021 continue;
2022 }
2023 }
2024}
2025
2026void
2027Verifier::visitModuleFlag(const MDNode *Op,
2028 DenseMap<const MDString *, const MDNode *> &SeenIDs,
2029 SmallVectorImpl<const MDNode *> &Requirements) {
2030 // Each module flag should have three arguments, the merge behavior (a
2031 // constant int), the flag ID (an MDString), and the value.
2032 Check(Op->getNumOperands() == 3,
2033 "incorrect number of operands in module flag", Op);
2034 Module::ModFlagBehavior MFB;
2035 if (!Module::isValidModFlagBehavior(Op->getOperand(0), MFB)) {
2037 "invalid behavior operand in module flag (expected constant integer)",
2038 Op->getOperand(0));
2039 Check(false,
2040 "invalid behavior operand in module flag (unexpected constant)",
2041 Op->getOperand(0));
2042 }
2043 MDString *ID = dyn_cast_or_null<MDString>(Op->getOperand(1));
2044 Check(ID, "invalid ID operand in module flag (expected metadata string)",
2045 Op->getOperand(1));
2046
2047 // Check the values for behaviors with additional requirements.
2048 switch (MFB) {
2049 case Module::Error:
2050 case Module::Warning:
2051 case Module::Override:
2052 // These behavior types accept any value.
2053 break;
2054
2055 case Module::Min: {
2056 auto *V = mdconst::dyn_extract_or_null<ConstantInt>(Op->getOperand(2));
2057 Check(V && V->getValue().isNonNegative(),
2058 "invalid value for 'min' module flag (expected constant non-negative "
2059 "integer)",
2060 Op->getOperand(2));
2061 break;
2062 }
2063
2064 case Module::Max: {
2066 "invalid value for 'max' module flag (expected constant integer)",
2067 Op->getOperand(2));
2068 break;
2069 }
2070
2071 case Module::Require: {
2072 // The value should itself be an MDNode with two operands, a flag ID (an
2073 // MDString), and a value.
2074 MDNode *Value = dyn_cast<MDNode>(Op->getOperand(2));
2075 Check(Value && Value->getNumOperands() == 2,
2076 "invalid value for 'require' module flag (expected metadata pair)",
2077 Op->getOperand(2));
2078 Check(isa<MDString>(Value->getOperand(0)),
2079 ("invalid value for 'require' module flag "
2080 "(first value operand should be a string)"),
2081 Value->getOperand(0));
2082
2083 // Append it to the list of requirements, to check once all module flags are
2084 // scanned.
2085 Requirements.push_back(Value);
2086 break;
2087 }
2088
2089 case Module::Append:
2090 case Module::AppendUnique: {
2091 // These behavior types require the operand be an MDNode.
2092 Check(isa<MDNode>(Op->getOperand(2)),
2093 "invalid value for 'append'-type module flag "
2094 "(expected a metadata node)",
2095 Op->getOperand(2));
2096 break;
2097 }
2098 }
2099
2100 // Unless this is a "requires" flag, check the ID is unique.
2101 if (MFB != Module::Require) {
2102 bool Inserted = SeenIDs.insert(std::make_pair(ID, Op)).second;
2103 Check(Inserted,
2104 "module flag identifiers must be unique (or of 'require' type)", ID);
2105 }
2106
2107 if (ID->getString() == "wchar_size") {
2108 ConstantInt *Value
2110 Check(Value, "wchar_size metadata requires constant integer argument");
2111 }
2112
2113 if (ID->getString() == "Linker Options") {
2114 // If the llvm.linker.options named metadata exists, we assume that the
2115 // bitcode reader has upgraded the module flag. Otherwise the flag might
2116 // have been created by a client directly.
2117 Check(M.getNamedMetadata("llvm.linker.options"),
2118 "'Linker Options' named metadata no longer supported");
2119 }
2120
2121 if (ID->getString() == "SemanticInterposition") {
2122 ConstantInt *Value =
2124 Check(Value,
2125 "SemanticInterposition metadata requires constant integer argument");
2126 }
2127
2128 if (ID->getString() == "CG Profile") {
2129 for (const MDOperand &MDO : cast<MDNode>(Op->getOperand(2))->operands())
2130 visitModuleFlagCGProfileEntry(MDO);
2131 }
2132}
2133
2134void Verifier::visitModuleFlagCGProfileEntry(const MDOperand &MDO) {
2135 auto CheckFunction = [&](const MDOperand &FuncMDO) {
2136 if (!FuncMDO)
2137 return;
2138 auto F = dyn_cast<ValueAsMetadata>(FuncMDO);
2139 Check(F && isa<Function>(F->getValue()->stripPointerCasts()),
2140 "expected a Function or null", FuncMDO);
2141 };
2142 auto Node = dyn_cast_or_null<MDNode>(MDO);
2143 Check(Node && Node->getNumOperands() == 3, "expected a MDNode triple", MDO);
2144 CheckFunction(Node->getOperand(0));
2145 CheckFunction(Node->getOperand(1));
2146 auto Count = dyn_cast_or_null<ConstantAsMetadata>(Node->getOperand(2));
2147 Check(Count && Count->getType()->isIntegerTy(),
2148 "expected an integer constant", Node->getOperand(2));
2149}
2150
2151void Verifier::verifyAttributeTypes(AttributeSet Attrs, const Value *V) {
2152 for (Attribute A : Attrs) {
2153
2154 if (A.isStringAttribute()) {
2155#define GET_ATTR_NAMES
2156#define ATTRIBUTE_ENUM(ENUM_NAME, DISPLAY_NAME)
2157#define ATTRIBUTE_STRBOOL(ENUM_NAME, DISPLAY_NAME) \
2158 if (A.getKindAsString() == #DISPLAY_NAME) { \
2159 auto V = A.getValueAsString(); \
2160 if (!(V.empty() || V == "true" || V == "false")) \
2161 CheckFailed("invalid value for '" #DISPLAY_NAME "' attribute: " + V + \
2162 ""); \
2163 }
2164
2165#include "llvm/IR/Attributes.inc"
2166 continue;
2167 }
2168
2169 if (A.isIntAttribute() != Attribute::isIntAttrKind(A.getKindAsEnum())) {
2170 CheckFailed("Attribute '" + A.getAsString() + "' should have an Argument",
2171 V);
2172 return;
2173 }
2174 }
2175}
2176
2177// VerifyParameterAttrs - Check the given attributes for an argument or return
2178// value of the specified type. The value V is printed in error messages.
2179void Verifier::verifyParameterAttrs(AttributeSet Attrs, Type *Ty,
2180 const Value *V) {
2181 if (!Attrs.hasAttributes())
2182 return;
2183
2184 verifyAttributeTypes(Attrs, V);
2185
2186 for (Attribute Attr : Attrs)
2187 Check(Attr.isStringAttribute() ||
2188 Attribute::canUseAsParamAttr(Attr.getKindAsEnum()),
2189 "Attribute '" + Attr.getAsString() + "' does not apply to parameters",
2190 V);
2191
2192 if (Attrs.hasAttribute(Attribute::ImmArg)) {
2193 unsigned AttrCount =
2194 Attrs.getNumAttributes() - Attrs.hasAttribute(Attribute::Range);
2195 Check(AttrCount == 1,
2196 "Attribute 'immarg' is incompatible with other attributes except the "
2197 "'range' attribute",
2198 V);
2199 }
2200
2201 // Check for mutually incompatible attributes. Only inreg is compatible with
2202 // sret.
2203 unsigned AttrCount = 0;
2204 AttrCount += Attrs.hasAttribute(Attribute::ByVal);
2205 AttrCount += Attrs.hasAttribute(Attribute::InAlloca);
2206 AttrCount += Attrs.hasAttribute(Attribute::Preallocated);
2207 AttrCount += Attrs.hasAttribute(Attribute::StructRet) ||
2208 Attrs.hasAttribute(Attribute::InReg);
2209 AttrCount += Attrs.hasAttribute(Attribute::Nest);
2210 AttrCount += Attrs.hasAttribute(Attribute::ByRef);
2211 Check(AttrCount <= 1,
2212 "Attributes 'byval', 'inalloca', 'preallocated', 'inreg', 'nest', "
2213 "'byref', and 'sret' are incompatible!",
2214 V);
2215
2216 Check(!(Attrs.hasAttribute(Attribute::InAlloca) &&
2217 Attrs.hasAttribute(Attribute::ReadOnly)),
2218 "Attributes "
2219 "'inalloca and readonly' are incompatible!",
2220 V);
2221
2222 Check(!(Attrs.hasAttribute(Attribute::StructRet) &&
2223 Attrs.hasAttribute(Attribute::Returned)),
2224 "Attributes "
2225 "'sret and returned' are incompatible!",
2226 V);
2227
2228 Check(!(Attrs.hasAttribute(Attribute::ZExt) &&
2229 Attrs.hasAttribute(Attribute::SExt)),
2230 "Attributes "
2231 "'zeroext and signext' are incompatible!",
2232 V);
2233
2234 Check(!(Attrs.hasAttribute(Attribute::ReadNone) &&
2235 Attrs.hasAttribute(Attribute::ReadOnly)),
2236 "Attributes "
2237 "'readnone and readonly' are incompatible!",
2238 V);
2239
2240 Check(!(Attrs.hasAttribute(Attribute::ReadNone) &&
2241 Attrs.hasAttribute(Attribute::WriteOnly)),
2242 "Attributes "
2243 "'readnone and writeonly' are incompatible!",
2244 V);
2245
2246 Check(!(Attrs.hasAttribute(Attribute::ReadOnly) &&
2247 Attrs.hasAttribute(Attribute::WriteOnly)),
2248 "Attributes "
2249 "'readonly and writeonly' are incompatible!",
2250 V);
2251
2252 Check(!(Attrs.hasAttribute(Attribute::NoInline) &&
2253 Attrs.hasAttribute(Attribute::AlwaysInline)),
2254 "Attributes "
2255 "'noinline and alwaysinline' are incompatible!",
2256 V);
2257
2258 Check(!(Attrs.hasAttribute(Attribute::Writable) &&
2259 Attrs.hasAttribute(Attribute::ReadNone)),
2260 "Attributes writable and readnone are incompatible!", V);
2261
2262 Check(!(Attrs.hasAttribute(Attribute::Writable) &&
2263 Attrs.hasAttribute(Attribute::ReadOnly)),
2264 "Attributes writable and readonly are incompatible!", V);
2265
2266 AttributeMask IncompatibleAttrs = AttributeFuncs::typeIncompatible(Ty, Attrs);
2267 for (Attribute Attr : Attrs) {
2268 if (!Attr.isStringAttribute() &&
2269 IncompatibleAttrs.contains(Attr.getKindAsEnum())) {
2270 CheckFailed("Attribute '" + Attr.getAsString() +
2271 "' applied to incompatible type!", V);
2272 return;
2273 }
2274 }
2275
2276 if (isa<PointerType>(Ty)) {
2277 if (Attrs.hasAttribute(Attribute::Alignment)) {
2278 Align AttrAlign = Attrs.getAlignment().valueOrOne();
2279 Check(AttrAlign.value() <= Value::MaximumAlignment,
2280 "huge alignment values are unsupported", V);
2281 }
2282 if (Attrs.hasAttribute(Attribute::ByVal)) {
2283 Type *ByValTy = Attrs.getByValType();
2284 SmallPtrSet<Type *, 4> Visited;
2285 Check(ByValTy->isSized(&Visited),
2286 "Attribute 'byval' does not support unsized types!", V);
2287 // Check if it is or contains a target extension type that disallows being
2288 // used on the stack.
2290 "'byval' argument has illegal target extension type", V);
2291 Check(DL.getTypeAllocSize(ByValTy).getKnownMinValue() < (1ULL << 32),
2292 "huge 'byval' arguments are unsupported", V);
2293 }
2294 if (Attrs.hasAttribute(Attribute::ByRef)) {
2295 SmallPtrSet<Type *, 4> Visited;
2296 Check(Attrs.getByRefType()->isSized(&Visited),
2297 "Attribute 'byref' does not support unsized types!", V);
2298 Check(DL.getTypeAllocSize(Attrs.getByRefType()).getKnownMinValue() <
2299 (1ULL << 32),
2300 "huge 'byref' arguments are unsupported", V);
2301 }
2302 if (Attrs.hasAttribute(Attribute::InAlloca)) {
2303 SmallPtrSet<Type *, 4> Visited;
2304 Check(Attrs.getInAllocaType()->isSized(&Visited),
2305 "Attribute 'inalloca' does not support unsized types!", V);
2306 Check(DL.getTypeAllocSize(Attrs.getInAllocaType()).getKnownMinValue() <
2307 (1ULL << 32),
2308 "huge 'inalloca' arguments are unsupported", V);
2309 }
2310 if (Attrs.hasAttribute(Attribute::Preallocated)) {
2311 SmallPtrSet<Type *, 4> Visited;
2312 Check(Attrs.getPreallocatedType()->isSized(&Visited),
2313 "Attribute 'preallocated' does not support unsized types!", V);
2314 Check(
2315 DL.getTypeAllocSize(Attrs.getPreallocatedType()).getKnownMinValue() <
2316 (1ULL << 32),
2317 "huge 'preallocated' arguments are unsupported", V);
2318 }
2319 }
2320
2321 if (Attrs.hasAttribute(Attribute::Initializes)) {
2322 auto Inits = Attrs.getAttribute(Attribute::Initializes).getInitializes();
2323 Check(!Inits.empty(), "Attribute 'initializes' does not support empty list",
2324 V);
2326 "Attribute 'initializes' does not support unordered ranges", V);
2327 }
2328
2329 if (Attrs.hasAttribute(Attribute::NoFPClass)) {
2330 uint64_t Val = Attrs.getAttribute(Attribute::NoFPClass).getValueAsInt();
2331 Check(Val != 0, "Attribute 'nofpclass' must have at least one test bit set",
2332 V);
2333 Check((Val & ~static_cast<unsigned>(fcAllFlags)) == 0,
2334 "Invalid value for 'nofpclass' test mask", V);
2335 }
2336 if (Attrs.hasAttribute(Attribute::Range)) {
2337 const ConstantRange &CR =
2338 Attrs.getAttribute(Attribute::Range).getValueAsConstantRange();
2340 "Range bit width must match type bit width!", V);
2341 }
2342}
2343
2344void Verifier::checkUnsignedBaseTenFuncAttr(AttributeList Attrs, StringRef Attr,
2345 const Value *V) {
2346 if (Attrs.hasFnAttr(Attr)) {
2347 StringRef S = Attrs.getFnAttr(Attr).getValueAsString();
2348 unsigned N;
2349 if (S.getAsInteger(10, N))
2350 CheckFailed("\"" + Attr + "\" takes an unsigned integer: " + S, V);
2351 }
2352}
2353
2354// Check parameter attributes against a function type.
2355// The value V is printed in error messages.
2356void Verifier::verifyFunctionAttrs(FunctionType *FT, AttributeList Attrs,
2357 const Value *V, bool IsIntrinsic,
2358 bool IsInlineAsm) {
2359 if (Attrs.isEmpty())
2360 return;
2361
2362 if (AttributeListsVisited.insert(Attrs.getRawPointer()).second) {
2363 Check(Attrs.hasParentContext(Context),
2364 "Attribute list does not match Module context!", &Attrs, V);
2365 for (const auto &AttrSet : Attrs) {
2366 Check(!AttrSet.hasAttributes() || AttrSet.hasParentContext(Context),
2367 "Attribute set does not match Module context!", &AttrSet, V);
2368 for (const auto &A : AttrSet) {
2369 Check(A.hasParentContext(Context),
2370 "Attribute does not match Module context!", &A, V);
2371 }
2372 }
2373 }
2374
2375 bool SawNest = false;
2376 bool SawReturned = false;
2377 bool SawSRet = false;
2378 bool SawSwiftSelf = false;
2379 bool SawSwiftAsync = false;
2380 bool SawSwiftError = false;
2381
2382 // Verify return value attributes.
2383 AttributeSet RetAttrs = Attrs.getRetAttrs();
2384 for (Attribute RetAttr : RetAttrs)
2385 Check(RetAttr.isStringAttribute() ||
2386 Attribute::canUseAsRetAttr(RetAttr.getKindAsEnum()),
2387 "Attribute '" + RetAttr.getAsString() +
2388 "' does not apply to function return values",
2389 V);
2390
2391 unsigned MaxParameterWidth = 0;
2392 auto GetMaxParameterWidth = [&MaxParameterWidth](Type *Ty) {
2393 if (Ty->isVectorTy()) {
2394 if (auto *VT = dyn_cast<FixedVectorType>(Ty)) {
2395 unsigned Size = VT->getPrimitiveSizeInBits().getFixedValue();
2396 if (Size > MaxParameterWidth)
2397 MaxParameterWidth = Size;
2398 }
2399 }
2400 };
2401 GetMaxParameterWidth(FT->getReturnType());
2402 verifyParameterAttrs(RetAttrs, FT->getReturnType(), V);
2403
2404 // Verify parameter attributes.
2405 for (unsigned i = 0, e = FT->getNumParams(); i != e; ++i) {
2406 Type *Ty = FT->getParamType(i);
2407 AttributeSet ArgAttrs = Attrs.getParamAttrs(i);
2408
2409 if (!IsIntrinsic) {
2410 Check(!ArgAttrs.hasAttribute(Attribute::ImmArg),
2411 "immarg attribute only applies to intrinsics", V);
2412 if (!IsInlineAsm)
2413 Check(!ArgAttrs.hasAttribute(Attribute::ElementType),
2414 "Attribute 'elementtype' can only be applied to intrinsics"
2415 " and inline asm.",
2416 V);
2417 }
2418
2419 verifyParameterAttrs(ArgAttrs, Ty, V);
2420 GetMaxParameterWidth(Ty);
2421
2422 if (ArgAttrs.hasAttribute(Attribute::Nest)) {
2423 Check(!SawNest, "More than one parameter has attribute nest!", V);
2424 SawNest = true;
2425 }
2426
2427 if (ArgAttrs.hasAttribute(Attribute::Returned)) {
2428 Check(!SawReturned, "More than one parameter has attribute returned!", V);
2429 Check(Ty->canLosslesslyBitCastTo(FT->getReturnType()),
2430 "Incompatible argument and return types for 'returned' attribute",
2431 V);
2432 SawReturned = true;
2433 }
2434
2435 if (ArgAttrs.hasAttribute(Attribute::StructRet)) {
2436 Check(!SawSRet, "Cannot have multiple 'sret' parameters!", V);
2437 Check(i == 0 || i == 1,
2438 "Attribute 'sret' is not on first or second parameter!", V);
2439 SawSRet = true;
2440 }
2441
2442 if (ArgAttrs.hasAttribute(Attribute::SwiftSelf)) {
2443 Check(!SawSwiftSelf, "Cannot have multiple 'swiftself' parameters!", V);
2444 SawSwiftSelf = true;
2445 }
2446
2447 if (ArgAttrs.hasAttribute(Attribute::SwiftAsync)) {
2448 Check(!SawSwiftAsync, "Cannot have multiple 'swiftasync' parameters!", V);
2449 SawSwiftAsync = true;
2450 }
2451
2452 if (ArgAttrs.hasAttribute(Attribute::SwiftError)) {
2453 Check(!SawSwiftError, "Cannot have multiple 'swifterror' parameters!", V);
2454 SawSwiftError = true;
2455 }
2456
2457 if (ArgAttrs.hasAttribute(Attribute::InAlloca)) {
2458 Check(i == FT->getNumParams() - 1,
2459 "inalloca isn't on the last parameter!", V);
2460 }
2461 }
2462
2463 if (!Attrs.hasFnAttrs())
2464 return;
2465
2466 verifyAttributeTypes(Attrs.getFnAttrs(), V);
2467 for (Attribute FnAttr : Attrs.getFnAttrs())
2468 Check(FnAttr.isStringAttribute() ||
2469 Attribute::canUseAsFnAttr(FnAttr.getKindAsEnum()),
2470 "Attribute '" + FnAttr.getAsString() +
2471 "' does not apply to functions!",
2472 V);
2473
2474 Check(!(Attrs.hasFnAttr(Attribute::NoInline) &&
2475 Attrs.hasFnAttr(Attribute::AlwaysInline)),
2476 "Attributes 'noinline and alwaysinline' are incompatible!", V);
2477
2478 if (Attrs.hasFnAttr(Attribute::OptimizeNone)) {
2479 Check(Attrs.hasFnAttr(Attribute::NoInline),
2480 "Attribute 'optnone' requires 'noinline'!", V);
2481
2482 Check(!Attrs.hasFnAttr(Attribute::OptimizeForSize),
2483 "Attributes 'optsize and optnone' are incompatible!", V);
2484
2485 Check(!Attrs.hasFnAttr(Attribute::MinSize),
2486 "Attributes 'minsize and optnone' are incompatible!", V);
2487
2488 Check(!Attrs.hasFnAttr(Attribute::OptimizeForDebugging),
2489 "Attributes 'optdebug and optnone' are incompatible!", V);
2490 }
2491
2492 Check(!(Attrs.hasFnAttr(Attribute::SanitizeRealtime) &&
2493 Attrs.hasFnAttr(Attribute::SanitizeRealtimeBlocking)),
2494 "Attributes "
2495 "'sanitize_realtime and sanitize_realtime_blocking' are incompatible!",
2496 V);
2497
2498 if (Attrs.hasFnAttr(Attribute::OptimizeForDebugging)) {
2499 Check(!Attrs.hasFnAttr(Attribute::OptimizeForSize),
2500 "Attributes 'optsize and optdebug' are incompatible!", V);
2501
2502 Check(!Attrs.hasFnAttr(Attribute::MinSize),
2503 "Attributes 'minsize and optdebug' are incompatible!", V);
2504 }
2505
2506 Check(!Attrs.hasAttrSomewhere(Attribute::Writable) ||
2507 isModSet(Attrs.getMemoryEffects().getModRef(IRMemLocation::ArgMem)),
2508 "Attribute writable and memory without argmem: write are incompatible!",
2509 V);
2510
2511 if (Attrs.hasFnAttr("aarch64_pstate_sm_enabled")) {
2512 Check(!Attrs.hasFnAttr("aarch64_pstate_sm_compatible"),
2513 "Attributes 'aarch64_pstate_sm_enabled and "
2514 "aarch64_pstate_sm_compatible' are incompatible!",
2515 V);
2516 }
2517
2518 Check((Attrs.hasFnAttr("aarch64_new_za") + Attrs.hasFnAttr("aarch64_in_za") +
2519 Attrs.hasFnAttr("aarch64_inout_za") +
2520 Attrs.hasFnAttr("aarch64_out_za") +
2521 Attrs.hasFnAttr("aarch64_preserves_za") +
2522 Attrs.hasFnAttr("aarch64_za_state_agnostic")) <= 1,
2523 "Attributes 'aarch64_new_za', 'aarch64_in_za', 'aarch64_out_za', "
2524 "'aarch64_inout_za', 'aarch64_preserves_za' and "
2525 "'aarch64_za_state_agnostic' are mutually exclusive",
2526 V);
2527
2528 Check((Attrs.hasFnAttr("aarch64_new_zt0") +
2529 Attrs.hasFnAttr("aarch64_in_zt0") +
2530 Attrs.hasFnAttr("aarch64_inout_zt0") +
2531 Attrs.hasFnAttr("aarch64_out_zt0") +
2532 Attrs.hasFnAttr("aarch64_preserves_zt0") +
2533 Attrs.hasFnAttr("aarch64_za_state_agnostic")) <= 1,
2534 "Attributes 'aarch64_new_zt0', 'aarch64_in_zt0', 'aarch64_out_zt0', "
2535 "'aarch64_inout_zt0', 'aarch64_preserves_zt0' and "
2536 "'aarch64_za_state_agnostic' are mutually exclusive",
2537 V);
2538
2539 if (Attrs.hasFnAttr(Attribute::JumpTable)) {
2540 const GlobalValue *GV = cast<GlobalValue>(V);
2542 "Attribute 'jumptable' requires 'unnamed_addr'", V);
2543 }
2544
2545 if (auto Args = Attrs.getFnAttrs().getAllocSizeArgs()) {
2546 auto CheckParam = [&](StringRef Name, unsigned ParamNo) {
2547 if (ParamNo >= FT->getNumParams()) {
2548 CheckFailed("'allocsize' " + Name + " argument is out of bounds", V);
2549 return false;
2550 }
2551
2552 if (!FT->getParamType(ParamNo)->isIntegerTy()) {
2553 CheckFailed("'allocsize' " + Name +
2554 " argument must refer to an integer parameter",
2555 V);
2556 return false;
2557 }
2558
2559 return true;
2560 };
2561
2562 if (!CheckParam("element size", Args->first))
2563 return;
2564
2565 if (Args->second && !CheckParam("number of elements", *Args->second))
2566 return;
2567 }
2568
2569 if (Attrs.hasFnAttr(Attribute::AllocKind)) {
2570 AllocFnKind K = Attrs.getAllocKind();
2572 K & (AllocFnKind::Alloc | AllocFnKind::Realloc | AllocFnKind::Free);
2573 if (!is_contained(
2574 {AllocFnKind::Alloc, AllocFnKind::Realloc, AllocFnKind::Free},
2575 Type))
2576 CheckFailed(
2577 "'allockind()' requires exactly one of alloc, realloc, and free");
2578 if ((Type == AllocFnKind::Free) &&
2579 ((K & (AllocFnKind::Uninitialized | AllocFnKind::Zeroed |
2580 AllocFnKind::Aligned)) != AllocFnKind::Unknown))
2581 CheckFailed("'allockind(\"free\")' doesn't allow uninitialized, zeroed, "
2582 "or aligned modifiers.");
2583 AllocFnKind ZeroedUninit = AllocFnKind::Uninitialized | AllocFnKind::Zeroed;
2584 if ((K & ZeroedUninit) == ZeroedUninit)
2585 CheckFailed("'allockind()' can't be both zeroed and uninitialized");
2586 }
2587
2588 if (Attribute A = Attrs.getFnAttr("alloc-variant-zeroed"); A.isValid()) {
2589 StringRef S = A.getValueAsString();
2590 Check(!S.empty(), "'alloc-variant-zeroed' must not be empty");
2591 Function *Variant = M.getFunction(S);
2592 if (Variant) {
2593 Attribute Family = Attrs.getFnAttr("alloc-family");
2594 Attribute VariantFamily = Variant->getFnAttribute("alloc-family");
2595 if (Family.isValid())
2596 Check(VariantFamily.isValid() &&
2597 VariantFamily.getValueAsString() == Family.getValueAsString(),
2598 "'alloc-variant-zeroed' must name a function belonging to the "
2599 "same 'alloc-family'");
2600
2601 Check(Variant->hasFnAttribute(Attribute::AllocKind) &&
2602 (Variant->getFnAttribute(Attribute::AllocKind).getAllocKind() &
2603 AllocFnKind::Zeroed) != AllocFnKind::Unknown,
2604 "'alloc-variant-zeroed' must name a function with "
2605 "'allockind(\"zeroed\")'");
2606
2607 Check(FT == Variant->getFunctionType(),
2608 "'alloc-variant-zeroed' must name a function with the same "
2609 "signature");
2610
2611 if (const Function *F = dyn_cast<Function>(V))
2612 Check(F->getCallingConv() == Variant->getCallingConv(),
2613 "'alloc-variant-zeroed' must name a function with the same "
2614 "calling convention");
2615 }
2616 }
2617
2618 if (Attrs.hasFnAttr(Attribute::VScaleRange)) {
2619 unsigned VScaleMin = Attrs.getFnAttrs().getVScaleRangeMin();
2620 if (VScaleMin == 0)
2621 CheckFailed("'vscale_range' minimum must be greater than 0", V);
2622 else if (!isPowerOf2_32(VScaleMin))
2623 CheckFailed("'vscale_range' minimum must be power-of-two value", V);
2624 std::optional<unsigned> VScaleMax = Attrs.getFnAttrs().getVScaleRangeMax();
2625 if (VScaleMax && VScaleMin > VScaleMax)
2626 CheckFailed("'vscale_range' minimum cannot be greater than maximum", V);
2627 else if (VScaleMax && !isPowerOf2_32(*VScaleMax))
2628 CheckFailed("'vscale_range' maximum must be power-of-two value", V);
2629 }
2630
2631 if (Attribute FPAttr = Attrs.getFnAttr("frame-pointer"); FPAttr.isValid()) {
2632 StringRef FP = FPAttr.getValueAsString();
2633 if (FP != "all" && FP != "non-leaf" && FP != "none" && FP != "reserved" &&
2634 FP != "non-leaf-no-reserve")
2635 CheckFailed("invalid value for 'frame-pointer' attribute: " + FP, V);
2636 }
2637
2638 checkUnsignedBaseTenFuncAttr(Attrs, "patchable-function-prefix", V);
2639 checkUnsignedBaseTenFuncAttr(Attrs, "patchable-function-entry", V);
2640 if (Attrs.hasFnAttr("patchable-function-entry-section"))
2641 Check(!Attrs.getFnAttr("patchable-function-entry-section")
2642 .getValueAsString()
2643 .empty(),
2644 "\"patchable-function-entry-section\" must not be empty");
2645 checkUnsignedBaseTenFuncAttr(Attrs, "warn-stack-size", V);
2646
2647 if (auto A = Attrs.getFnAttr("sign-return-address"); A.isValid()) {
2648 StringRef S = A.getValueAsString();
2649 if (S != "none" && S != "all" && S != "non-leaf")
2650 CheckFailed("invalid value for 'sign-return-address' attribute: " + S, V);
2651 }
2652
2653 if (auto A = Attrs.getFnAttr("sign-return-address-key"); A.isValid()) {
2654 StringRef S = A.getValueAsString();
2655 if (S != "a_key" && S != "b_key")
2656 CheckFailed("invalid value for 'sign-return-address-key' attribute: " + S,
2657 V);
2658 if (auto AA = Attrs.getFnAttr("sign-return-address"); !AA.isValid()) {
2659 CheckFailed(
2660 "'sign-return-address-key' present without `sign-return-address`");
2661 }
2662 }
2663
2664 if (auto A = Attrs.getFnAttr("branch-target-enforcement"); A.isValid()) {
2665 StringRef S = A.getValueAsString();
2666 if (S != "" && S != "true" && S != "false")
2667 CheckFailed(
2668 "invalid value for 'branch-target-enforcement' attribute: " + S, V);
2669 }
2670
2671 if (auto A = Attrs.getFnAttr("branch-protection-pauth-lr"); A.isValid()) {
2672 StringRef S = A.getValueAsString();
2673 if (S != "" && S != "true" && S != "false")
2674 CheckFailed(
2675 "invalid value for 'branch-protection-pauth-lr' attribute: " + S, V);
2676 }
2677
2678 if (auto A = Attrs.getFnAttr("guarded-control-stack"); A.isValid()) {
2679 StringRef S = A.getValueAsString();
2680 if (S != "" && S != "true" && S != "false")
2681 CheckFailed("invalid value for 'guarded-control-stack' attribute: " + S,
2682 V);
2683 }
2684
2685 if (auto A = Attrs.getFnAttr("vector-function-abi-variant"); A.isValid()) {
2686 StringRef S = A.getValueAsString();
2687 const std::optional<VFInfo> Info = VFABI::tryDemangleForVFABI(S, FT);
2688 if (!Info)
2689 CheckFailed("invalid name for a VFABI variant: " + S, V);
2690 }
2691
2692 if (auto A = Attrs.getFnAttr("modular-format"); A.isValid()) {
2693 StringRef S = A.getValueAsString();
2695 S.split(Args, ',');
2696 Check(Args.size() >= 5,
2697 "modular-format attribute requires at least 5 arguments", V);
2698 unsigned FirstArgIdx;
2699 Check(!Args[2].getAsInteger(10, FirstArgIdx),
2700 "modular-format attribute first arg index is not an integer", V);
2701 unsigned UpperBound = FT->getNumParams() + (FT->isVarArg() ? 1 : 0);
2702 Check(FirstArgIdx > 0 && FirstArgIdx <= UpperBound,
2703 "modular-format attribute first arg index is out of bounds", V);
2704 }
2705
2706 if (auto A = Attrs.getFnAttr("target-features"); A.isValid()) {
2707 StringRef S = A.getValueAsString();
2708 if (!S.empty()) {
2709 for (auto FeatureFlag : split(S, ',')) {
2710 if (FeatureFlag.empty())
2711 CheckFailed(
2712 "target-features attribute should not contain an empty string");
2713 else
2714 Check(FeatureFlag[0] == '+' || FeatureFlag[0] == '-',
2715 "target feature '" + FeatureFlag +
2716 "' must start with a '+' or '-'",
2717 V);
2718 }
2719 }
2720 }
2721}
2722void Verifier::verifyUnknownProfileMetadata(MDNode *MD) {
2723 Check(MD->getNumOperands() == 2,
2724 "'unknown' !prof should have a single additional operand", MD);
2725 auto *PassName = dyn_cast<MDString>(MD->getOperand(1));
2726 Check(PassName != nullptr,
2727 "'unknown' !prof should have an additional operand of type "
2728 "string");
2729 Check(!PassName->getString().empty(),
2730 "the 'unknown' !prof operand should not be an empty string");
2731}
2732
2733void Verifier::verifyFunctionMetadata(
2734 ArrayRef<std::pair<unsigned, MDNode *>> MDs) {
2735 for (const auto &Pair : MDs) {
2736 if (Pair.first == LLVMContext::MD_prof) {
2737 MDNode *MD = Pair.second;
2738 Check(MD->getNumOperands() >= 2,
2739 "!prof annotations should have no less than 2 operands", MD);
2740 // We may have functions that are synthesized by the compiler, e.g. in
2741 // WPD, that we can't currently determine the entry count.
2742 if (MD->getOperand(0).equalsStr(
2744 verifyUnknownProfileMetadata(MD);
2745 continue;
2746 }
2747
2748 // Check first operand.
2749 Check(MD->getOperand(0) != nullptr, "first operand should not be null",
2750 MD);
2752 "expected string with name of the !prof annotation", MD);
2753 MDString *MDS = cast<MDString>(MD->getOperand(0));
2754 StringRef ProfName = MDS->getString();
2757 "first operand should be 'function_entry_count'"
2758 " or 'synthetic_function_entry_count'",
2759 MD);
2760
2761 // Check second operand.
2762 Check(MD->getOperand(1) != nullptr, "second operand should not be null",
2763 MD);
2765 "expected integer argument to function_entry_count", MD);
2766 } else if (Pair.first == LLVMContext::MD_kcfi_type) {
2767 MDNode *MD = Pair.second;
2768 Check(MD->getNumOperands() == 1,
2769 "!kcfi_type must have exactly one operand", MD);
2770 Check(MD->getOperand(0) != nullptr, "!kcfi_type operand must not be null",
2771 MD);
2773 "expected a constant operand for !kcfi_type", MD);
2774 Constant *C = cast<ConstantAsMetadata>(MD->getOperand(0))->getValue();
2775 Check(isa<ConstantInt>(C) && isa<IntegerType>(C->getType()),
2776 "expected a constant integer operand for !kcfi_type", MD);
2778 "expected a 32-bit integer constant operand for !kcfi_type", MD);
2779 }
2780 }
2781}
2782
2783void Verifier::visitConstantExprsRecursively(const Constant *EntryC) {
2784 if (EntryC->getNumOperands() == 0)
2785 return;
2786
2787 if (!ConstantExprVisited.insert(EntryC).second)
2788 return;
2789
2791 Stack.push_back(EntryC);
2792
2793 while (!Stack.empty()) {
2794 const Constant *C = Stack.pop_back_val();
2795
2796 // Check this constant expression.
2797 if (const auto *CE = dyn_cast<ConstantExpr>(C))
2798 visitConstantExpr(CE);
2799
2800 if (const auto *CPA = dyn_cast<ConstantPtrAuth>(C))
2801 visitConstantPtrAuth(CPA);
2802
2803 if (const auto *GV = dyn_cast<GlobalValue>(C)) {
2804 // Global Values get visited separately, but we do need to make sure
2805 // that the global value is in the correct module
2806 Check(GV->getParent() == &M, "Referencing global in another module!",
2807 EntryC, &M, GV, GV->getParent());
2808 continue;
2809 }
2810
2811 // Visit all sub-expressions.
2812 for (const Use &U : C->operands()) {
2813 const auto *OpC = dyn_cast<Constant>(U);
2814 if (!OpC)
2815 continue;
2816 if (!ConstantExprVisited.insert(OpC).second)
2817 continue;
2818 Stack.push_back(OpC);
2819 }
2820 }
2821}
2822
2823void Verifier::visitConstantExpr(const ConstantExpr *CE) {
2824 if (CE->getOpcode() == Instruction::BitCast)
2825 Check(CastInst::castIsValid(Instruction::BitCast, CE->getOperand(0),
2826 CE->getType()),
2827 "Invalid bitcast", CE);
2828 else if (CE->getOpcode() == Instruction::PtrToAddr)
2829 checkPtrToAddr(CE->getOperand(0)->getType(), CE->getType(), *CE);
2830}
2831
2832void Verifier::visitConstantPtrAuth(const ConstantPtrAuth *CPA) {
2833 Check(CPA->getPointer()->getType()->isPointerTy(),
2834 "signed ptrauth constant base pointer must have pointer type");
2835
2836 Check(CPA->getType() == CPA->getPointer()->getType(),
2837 "signed ptrauth constant must have same type as its base pointer");
2838
2839 Check(CPA->getKey()->getBitWidth() == 32,
2840 "signed ptrauth constant key must be i32 constant integer");
2841
2843 "signed ptrauth constant address discriminator must be a pointer");
2844
2845 Check(CPA->getDiscriminator()->getBitWidth() == 64,
2846 "signed ptrauth constant discriminator must be i64 constant integer");
2847
2849 "signed ptrauth constant deactivation symbol must be a pointer");
2850
2853 "signed ptrauth constant deactivation symbol must be a global value "
2854 "or null");
2855}
2856
2857bool Verifier::verifyAttributeCount(AttributeList Attrs, unsigned Params) {
2858 // There shouldn't be more attribute sets than there are parameters plus the
2859 // function and return value.
2860 return Attrs.getNumAttrSets() <= Params + 2;
2861}
2862
2863void Verifier::verifyInlineAsmCall(const CallBase &Call) {
2864 const InlineAsm *IA = cast<InlineAsm>(Call.getCalledOperand());
2865 unsigned ArgNo = 0;
2866 unsigned LabelNo = 0;
2867 for (const InlineAsm::ConstraintInfo &CI : IA->ParseConstraints()) {
2868 if (CI.Type == InlineAsm::isLabel) {
2869 ++LabelNo;
2870 continue;
2871 }
2872
2873 // Only deal with constraints that correspond to call arguments.
2874 if (!CI.hasArg())
2875 continue;
2876
2877 if (CI.isIndirect) {
2878 const Value *Arg = Call.getArgOperand(ArgNo);
2879 Check(Arg->getType()->isPointerTy(),
2880 "Operand for indirect constraint must have pointer type", &Call);
2881
2883 "Operand for indirect constraint must have elementtype attribute",
2884 &Call);
2885 } else {
2886 Check(!Call.paramHasAttr(ArgNo, Attribute::ElementType),
2887 "Elementtype attribute can only be applied for indirect "
2888 "constraints",
2889 &Call);
2890 }
2891
2892 ArgNo++;
2893 }
2894
2895 if (auto *CallBr = dyn_cast<CallBrInst>(&Call)) {
2896 Check(LabelNo == CallBr->getNumIndirectDests(),
2897 "Number of label constraints does not match number of callbr dests",
2898 &Call);
2899 } else {
2900 Check(LabelNo == 0, "Label constraints can only be used with callbr",
2901 &Call);
2902 }
2903}
2904
2905/// Verify that statepoint intrinsic is well formed.
2906void Verifier::verifyStatepoint(const CallBase &Call) {
2907 assert(Call.getIntrinsicID() == Intrinsic::experimental_gc_statepoint);
2908
2911 "gc.statepoint must read and write all memory to preserve "
2912 "reordering restrictions required by safepoint semantics",
2913 Call);
2914
2915 const int64_t NumPatchBytes =
2916 cast<ConstantInt>(Call.getArgOperand(1))->getSExtValue();
2917 assert(isInt<32>(NumPatchBytes) && "NumPatchBytesV is an i32!");
2918 Check(NumPatchBytes >= 0,
2919 "gc.statepoint number of patchable bytes must be "
2920 "positive",
2921 Call);
2922
2923 Type *TargetElemType = Call.getParamElementType(2);
2924 Check(TargetElemType,
2925 "gc.statepoint callee argument must have elementtype attribute", Call);
2926 FunctionType *TargetFuncType = dyn_cast<FunctionType>(TargetElemType);
2927 Check(TargetFuncType,
2928 "gc.statepoint callee elementtype must be function type", Call);
2929
2930 const int NumCallArgs = cast<ConstantInt>(Call.getArgOperand(3))->getZExtValue();
2931 Check(NumCallArgs >= 0,
2932 "gc.statepoint number of arguments to underlying call "
2933 "must be positive",
2934 Call);
2935 const int NumParams = (int)TargetFuncType->getNumParams();
2936 if (TargetFuncType->isVarArg()) {
2937 Check(NumCallArgs >= NumParams,
2938 "gc.statepoint mismatch in number of vararg call args", Call);
2939
2940 // TODO: Remove this limitation
2941 Check(TargetFuncType->getReturnType()->isVoidTy(),
2942 "gc.statepoint doesn't support wrapping non-void "
2943 "vararg functions yet",
2944 Call);
2945 } else
2946 Check(NumCallArgs == NumParams,
2947 "gc.statepoint mismatch in number of call args", Call);
2948
2949 const uint64_t Flags
2950 = cast<ConstantInt>(Call.getArgOperand(4))->getZExtValue();
2951 Check((Flags & ~(uint64_t)StatepointFlags::MaskAll) == 0,
2952 "unknown flag used in gc.statepoint flags argument", Call);
2953
2954 // Verify that the types of the call parameter arguments match
2955 // the type of the wrapped callee.
2956 AttributeList Attrs = Call.getAttributes();
2957 for (int i = 0; i < NumParams; i++) {
2958 Type *ParamType = TargetFuncType->getParamType(i);
2959 Type *ArgType = Call.getArgOperand(5 + i)->getType();
2960 Check(ArgType == ParamType,
2961 "gc.statepoint call argument does not match wrapped "
2962 "function type",
2963 Call);
2964
2965 if (TargetFuncType->isVarArg()) {
2966 AttributeSet ArgAttrs = Attrs.getParamAttrs(5 + i);
2967 Check(!ArgAttrs.hasAttribute(Attribute::StructRet),
2968 "Attribute 'sret' cannot be used for vararg call arguments!", Call);
2969 }
2970 }
2971
2972 const int EndCallArgsInx = 4 + NumCallArgs;
2973
2974 const Value *NumTransitionArgsV = Call.getArgOperand(EndCallArgsInx + 1);
2975 Check(isa<ConstantInt>(NumTransitionArgsV),
2976 "gc.statepoint number of transition arguments "
2977 "must be constant integer",
2978 Call);
2979 const int NumTransitionArgs =
2980 cast<ConstantInt>(NumTransitionArgsV)->getZExtValue();
2981 Check(NumTransitionArgs == 0,
2982 "gc.statepoint w/inline transition bundle is deprecated", Call);
2983 const int EndTransitionArgsInx = EndCallArgsInx + 1 + NumTransitionArgs;
2984
2985 const Value *NumDeoptArgsV = Call.getArgOperand(EndTransitionArgsInx + 1);
2986 Check(isa<ConstantInt>(NumDeoptArgsV),
2987 "gc.statepoint number of deoptimization arguments "
2988 "must be constant integer",
2989 Call);
2990 const int NumDeoptArgs = cast<ConstantInt>(NumDeoptArgsV)->getZExtValue();
2991 Check(NumDeoptArgs == 0,
2992 "gc.statepoint w/inline deopt operands is deprecated", Call);
2993
2994 const int ExpectedNumArgs = 7 + NumCallArgs;
2995 Check(ExpectedNumArgs == (int)Call.arg_size(),
2996 "gc.statepoint too many arguments", Call);
2997
2998 // Check that the only uses of this gc.statepoint are gc.result or
2999 // gc.relocate calls which are tied to this statepoint and thus part
3000 // of the same statepoint sequence
3001 for (const User *U : Call.users()) {
3002 const CallInst *UserCall = dyn_cast<const CallInst>(U);
3003 Check(UserCall, "illegal use of statepoint token", Call, U);
3004 if (!UserCall)
3005 continue;
3006 Check(isa<GCRelocateInst>(UserCall) || isa<GCResultInst>(UserCall),
3007 "gc.result or gc.relocate are the only value uses "
3008 "of a gc.statepoint",
3009 Call, U);
3010 if (isa<GCResultInst>(UserCall)) {
3011 Check(UserCall->getArgOperand(0) == &Call,
3012 "gc.result connected to wrong gc.statepoint", Call, UserCall);
3013 } else if (isa<GCRelocateInst>(Call)) {
3014 Check(UserCall->getArgOperand(0) == &Call,
3015 "gc.relocate connected to wrong gc.statepoint", Call, UserCall);
3016 }
3017 }
3018
3019 // Note: It is legal for a single derived pointer to be listed multiple
3020 // times. It's non-optimal, but it is legal. It can also happen after
3021 // insertion if we strip a bitcast away.
3022 // Note: It is really tempting to check that each base is relocated and
3023 // that a derived pointer is never reused as a base pointer. This turns
3024 // out to be problematic since optimizations run after safepoint insertion
3025 // can recognize equality properties that the insertion logic doesn't know
3026 // about. See example statepoint.ll in the verifier subdirectory
3027}
3028
3029void Verifier::verifyFrameRecoverIndices() {
3030 for (auto &Counts : FrameEscapeInfo) {
3031 Function *F = Counts.first;
3032 unsigned EscapedObjectCount = Counts.second.first;
3033 unsigned MaxRecoveredIndex = Counts.second.second;
3034 Check(MaxRecoveredIndex <= EscapedObjectCount,
3035 "all indices passed to llvm.localrecover must be less than the "
3036 "number of arguments passed to llvm.localescape in the parent "
3037 "function",
3038 F);
3039 }
3040}
3041
3042static Instruction *getSuccPad(Instruction *Terminator) {
3043 BasicBlock *UnwindDest;
3044 if (auto *II = dyn_cast<InvokeInst>(Terminator))
3045 UnwindDest = II->getUnwindDest();
3046 else if (auto *CSI = dyn_cast<CatchSwitchInst>(Terminator))
3047 UnwindDest = CSI->getUnwindDest();
3048 else
3049 UnwindDest = cast<CleanupReturnInst>(Terminator)->getUnwindDest();
3050 return &*UnwindDest->getFirstNonPHIIt();
3051}
3052
3053void Verifier::verifySiblingFuncletUnwinds() {
3054 llvm::TimeTraceScope timeScope("Verifier verify sibling funclet unwinds");
3055 SmallPtrSet<Instruction *, 8> Visited;
3056 SmallPtrSet<Instruction *, 8> Active;
3057 for (const auto &Pair : SiblingFuncletInfo) {
3058 Instruction *PredPad = Pair.first;
3059 if (Visited.count(PredPad))
3060 continue;
3061 Active.insert(PredPad);
3062 Instruction *Terminator = Pair.second;
3063 do {
3064 Instruction *SuccPad = getSuccPad(Terminator);
3065 if (Active.count(SuccPad)) {
3066 // Found a cycle; report error
3067 Instruction *CyclePad = SuccPad;
3068 SmallVector<Instruction *, 8> CycleNodes;
3069 do {
3070 CycleNodes.push_back(CyclePad);
3071 Instruction *CycleTerminator = SiblingFuncletInfo[CyclePad];
3072 if (CycleTerminator != CyclePad)
3073 CycleNodes.push_back(CycleTerminator);
3074 CyclePad = getSuccPad(CycleTerminator);
3075 } while (CyclePad != SuccPad);
3076 Check(false, "EH pads can't handle each other's exceptions",
3077 ArrayRef<Instruction *>(CycleNodes));
3078 }
3079 // Don't re-walk a node we've already checked
3080 if (!Visited.insert(SuccPad).second)
3081 break;
3082 // Walk to this successor if it has a map entry.
3083 PredPad = SuccPad;
3084 auto TermI = SiblingFuncletInfo.find(PredPad);
3085 if (TermI == SiblingFuncletInfo.end())
3086 break;
3087 Terminator = TermI->second;
3088 Active.insert(PredPad);
3089 } while (true);
3090 // Each node only has one successor, so we've walked all the active
3091 // nodes' successors.
3092 Active.clear();
3093 }
3094}
3095
3096// visitFunction - Verify that a function is ok.
3097//
3098void Verifier::visitFunction(const Function &F) {
3099 visitGlobalValue(F);
3100
3101 // Check function arguments.
3102 FunctionType *FT = F.getFunctionType();
3103 unsigned NumArgs = F.arg_size();
3104
3105 Check(&Context == &F.getContext(),
3106 "Function context does not match Module context!", &F);
3107
3108 Check(!F.hasCommonLinkage(), "Functions may not have common linkage", &F);
3109 Check(FT->getNumParams() == NumArgs,
3110 "# formal arguments must match # of arguments for function type!", &F,
3111 FT);
3112 Check(F.getReturnType()->isFirstClassType() ||
3113 F.getReturnType()->isVoidTy() || F.getReturnType()->isStructTy(),
3114 "Functions cannot return aggregate values!", &F);
3115
3116 Check(!F.hasStructRetAttr() || F.getReturnType()->isVoidTy(),
3117 "Invalid struct return type!", &F);
3118
3119 if (MaybeAlign A = F.getAlign()) {
3120 Check(A->value() <= Value::MaximumAlignment,
3121 "huge alignment values are unsupported", &F);
3122 }
3123
3124 AttributeList Attrs = F.getAttributes();
3125
3126 Check(verifyAttributeCount(Attrs, FT->getNumParams()),
3127 "Attribute after last parameter!", &F);
3128
3129 bool IsIntrinsic = F.isIntrinsic();
3130
3131 // Check function attributes.
3132 verifyFunctionAttrs(FT, Attrs, &F, IsIntrinsic, /* IsInlineAsm */ false);
3133
3134 // On function declarations/definitions, we do not support the builtin
3135 // attribute. We do not check this in VerifyFunctionAttrs since that is
3136 // checking for Attributes that can/can not ever be on functions.
3137 Check(!Attrs.hasFnAttr(Attribute::Builtin),
3138 "Attribute 'builtin' can only be applied to a callsite.", &F);
3139
3140 Check(!Attrs.hasAttrSomewhere(Attribute::ElementType),
3141 "Attribute 'elementtype' can only be applied to a callsite.", &F);
3142
3143 if (Attrs.hasFnAttr(Attribute::Naked))
3144 for (const Argument &Arg : F.args())
3145 Check(Arg.use_empty(), "cannot use argument of naked function", &Arg);
3146
3147 // Check that this function meets the restrictions on this calling convention.
3148 // Sometimes varargs is used for perfectly forwarding thunks, so some of these
3149 // restrictions can be lifted.
3150 switch (F.getCallingConv()) {
3151 default:
3152 case CallingConv::C:
3153 break;
3154 case CallingConv::X86_INTR: {
3155 Check(F.arg_empty() || Attrs.hasParamAttr(0, Attribute::ByVal),
3156 "Calling convention parameter requires byval", &F);
3157 break;
3158 }
3159 case CallingConv::AMDGPU_KERNEL:
3160 case CallingConv::SPIR_KERNEL:
3161 case CallingConv::AMDGPU_CS_Chain:
3162 case CallingConv::AMDGPU_CS_ChainPreserve:
3163 Check(F.getReturnType()->isVoidTy(),
3164 "Calling convention requires void return type", &F);
3165 [[fallthrough]];
3166 case CallingConv::AMDGPU_VS:
3167 case CallingConv::AMDGPU_HS:
3168 case CallingConv::AMDGPU_GS:
3169 case CallingConv::AMDGPU_PS:
3170 case CallingConv::AMDGPU_CS:
3171 Check(!F.hasStructRetAttr(), "Calling convention does not allow sret", &F);
3172 if (F.getCallingConv() != CallingConv::SPIR_KERNEL) {
3173 const unsigned StackAS = DL.getAllocaAddrSpace();
3174 unsigned i = 0;
3175 for (const Argument &Arg : F.args()) {
3176 Check(!Attrs.hasParamAttr(i, Attribute::ByVal),
3177 "Calling convention disallows byval", &F);
3178 Check(!Attrs.hasParamAttr(i, Attribute::Preallocated),
3179 "Calling convention disallows preallocated", &F);
3180 Check(!Attrs.hasParamAttr(i, Attribute::InAlloca),
3181 "Calling convention disallows inalloca", &F);
3182
3183 if (Attrs.hasParamAttr(i, Attribute::ByRef)) {
3184 // FIXME: Should also disallow LDS and GDS, but we don't have the enum
3185 // value here.
3186 Check(Arg.getType()->getPointerAddressSpace() != StackAS,
3187 "Calling convention disallows stack byref", &F);
3188 }
3189
3190 ++i;
3191 }
3192 }
3193
3194 [[fallthrough]];
3195 case CallingConv::Fast:
3196 case CallingConv::Cold:
3197 case CallingConv::Intel_OCL_BI:
3198 case CallingConv::PTX_Kernel:
3199 case CallingConv::PTX_Device:
3200 Check(!F.isVarArg(),
3201 "Calling convention does not support varargs or "
3202 "perfect forwarding!",
3203 &F);
3204 break;
3205 case CallingConv::AMDGPU_Gfx_WholeWave:
3206 Check(!F.arg_empty() && F.arg_begin()->getType()->isIntegerTy(1),
3207 "Calling convention requires first argument to be i1", &F);
3208 Check(!F.arg_begin()->hasInRegAttr(),
3209 "Calling convention requires first argument to not be inreg", &F);
3210 Check(!F.isVarArg(),
3211 "Calling convention does not support varargs or "
3212 "perfect forwarding!",
3213 &F);
3214 break;
3215 }
3216
3217 // Check that the argument values match the function type for this function...
3218 unsigned i = 0;
3219 for (const Argument &Arg : F.args()) {
3220 Check(Arg.getType() == FT->getParamType(i),
3221 "Argument value does not match function argument type!", &Arg,
3222 FT->getParamType(i));
3223 Check(Arg.getType()->isFirstClassType(),
3224 "Function arguments must have first-class types!", &Arg);
3225 if (!IsIntrinsic) {
3226 Check(!Arg.getType()->isMetadataTy(),
3227 "Function takes metadata but isn't an intrinsic", &Arg, &F);
3228 Check(!Arg.getType()->isTokenLikeTy(),
3229 "Function takes token but isn't an intrinsic", &Arg, &F);
3230 Check(!Arg.getType()->isX86_AMXTy(),
3231 "Function takes x86_amx but isn't an intrinsic", &Arg, &F);
3232 }
3233
3234 // Check that swifterror argument is only used by loads and stores.
3235 if (Attrs.hasParamAttr(i, Attribute::SwiftError)) {
3236 verifySwiftErrorValue(&Arg);
3237 }
3238 ++i;
3239 }
3240
3241 if (!IsIntrinsic) {
3242 Check(!F.getReturnType()->isTokenLikeTy(),
3243 "Function returns a token but isn't an intrinsic", &F);
3244 Check(!F.getReturnType()->isX86_AMXTy(),
3245 "Function returns a x86_amx but isn't an intrinsic", &F);
3246 }
3247
3248 // Get the function metadata attachments.
3250 F.getAllMetadata(MDs);
3251 assert(F.hasMetadata() != MDs.empty() && "Bit out-of-sync");
3252 verifyFunctionMetadata(MDs);
3253
3254 // Check validity of the personality function
3255 if (F.hasPersonalityFn()) {
3256 auto *Per = dyn_cast<Function>(F.getPersonalityFn()->stripPointerCasts());
3257 if (Per)
3258 Check(Per->getParent() == F.getParent(),
3259 "Referencing personality function in another module!", &F,
3260 F.getParent(), Per, Per->getParent());
3261 }
3262
3263 // EH funclet coloring can be expensive, recompute on-demand
3264 BlockEHFuncletColors.clear();
3265
3266 if (F.isMaterializable()) {
3267 // Function has a body somewhere we can't see.
3268 Check(MDs.empty(), "unmaterialized function cannot have metadata", &F,
3269 MDs.empty() ? nullptr : MDs.front().second);
3270 } else if (F.isDeclaration()) {
3271 for (const auto &I : MDs) {
3272 // This is used for call site debug information.
3273 CheckDI(I.first != LLVMContext::MD_dbg ||
3274 !cast<DISubprogram>(I.second)->isDistinct(),
3275 "function declaration may only have a unique !dbg attachment",
3276 &F);
3277 Check(I.first != LLVMContext::MD_prof,
3278 "function declaration may not have a !prof attachment", &F);
3279
3280 // Verify the metadata itself.
3281 visitMDNode(*I.second, AreDebugLocsAllowed::Yes);
3282 }
3283 Check(!F.hasPersonalityFn(),
3284 "Function declaration shouldn't have a personality routine", &F);
3285 } else {
3286 // Verify that this function (which has a body) is not named "llvm.*". It
3287 // is not legal to define intrinsics.
3288 Check(!IsIntrinsic, "llvm intrinsics cannot be defined!", &F);
3289
3290 // Check the entry node
3291 const BasicBlock *Entry = &F.getEntryBlock();
3292 Check(pred_empty(Entry),
3293 "Entry block to function must not have predecessors!", Entry);
3294
3295 // The address of the entry block cannot be taken, unless it is dead.
3296 if (Entry->hasAddressTaken()) {
3297 Check(!BlockAddress::lookup(Entry)->isConstantUsed(),
3298 "blockaddress may not be used with the entry block!", Entry);
3299 }
3300
3301 unsigned NumDebugAttachments = 0, NumProfAttachments = 0,
3302 NumKCFIAttachments = 0;
3303 // Visit metadata attachments.
3304 for (const auto &I : MDs) {
3305 // Verify that the attachment is legal.
3306 auto AllowLocs = AreDebugLocsAllowed::No;
3307 switch (I.first) {
3308 default:
3309 break;
3310 case LLVMContext::MD_dbg: {
3311 ++NumDebugAttachments;
3312 CheckDI(NumDebugAttachments == 1,
3313 "function must have a single !dbg attachment", &F, I.second);
3314 CheckDI(isa<DISubprogram>(I.second),
3315 "function !dbg attachment must be a subprogram", &F, I.second);
3316 CheckDI(cast<DISubprogram>(I.second)->isDistinct(),
3317 "function definition may only have a distinct !dbg attachment",
3318 &F);
3319
3320 auto *SP = cast<DISubprogram>(I.second);
3321 const Function *&AttachedTo = DISubprogramAttachments[SP];
3322 CheckDI(!AttachedTo || AttachedTo == &F,
3323 "DISubprogram attached to more than one function", SP, &F);
3324 AttachedTo = &F;
3325 AllowLocs = AreDebugLocsAllowed::Yes;
3326 break;
3327 }
3328 case LLVMContext::MD_prof:
3329 ++NumProfAttachments;
3330 Check(NumProfAttachments == 1,
3331 "function must have a single !prof attachment", &F, I.second);
3332 break;
3333 case LLVMContext::MD_kcfi_type:
3334 ++NumKCFIAttachments;
3335 Check(NumKCFIAttachments == 1,
3336 "function must have a single !kcfi_type attachment", &F,
3337 I.second);
3338 break;
3339 }
3340
3341 // Verify the metadata itself.
3342 visitMDNode(*I.second, AllowLocs);
3343 }
3344 }
3345
3346 // If this function is actually an intrinsic, verify that it is only used in
3347 // direct call/invokes, never having its "address taken".
3348 // Only do this if the module is materialized, otherwise we don't have all the
3349 // uses.
3350 if (F.isIntrinsic() && F.getParent()->isMaterialized()) {
3351 const User *U;
3352 if (F.hasAddressTaken(&U, false, true, false,
3353 /*IgnoreARCAttachedCall=*/true))
3354 Check(false, "Invalid user of intrinsic instruction!", U);
3355 }
3356
3357 // Check intrinsics' signatures.
3358 switch (F.getIntrinsicID()) {
3359 case Intrinsic::experimental_gc_get_pointer_base: {
3360 FunctionType *FT = F.getFunctionType();
3361 Check(FT->getNumParams() == 1, "wrong number of parameters", F);
3362 Check(isa<PointerType>(F.getReturnType()),
3363 "gc.get.pointer.base must return a pointer", F);
3364 Check(FT->getParamType(0) == F.getReturnType(),
3365 "gc.get.pointer.base operand and result must be of the same type", F);
3366 break;
3367 }
3368 case Intrinsic::experimental_gc_get_pointer_offset: {
3369 FunctionType *FT = F.getFunctionType();
3370 Check(FT->getNumParams() == 1, "wrong number of parameters", F);
3371 Check(isa<PointerType>(FT->getParamType(0)),
3372 "gc.get.pointer.offset operand must be a pointer", F);
3373 Check(F.getReturnType()->isIntegerTy(),
3374 "gc.get.pointer.offset must return integer", F);
3375 break;
3376 }
3377 }
3378
3379 auto *N = F.getSubprogram();
3380 HasDebugInfo = (N != nullptr);
3381 if (!HasDebugInfo)
3382 return;
3383
3384 // Check that all !dbg attachments lead to back to N.
3385 //
3386 // FIXME: Check this incrementally while visiting !dbg attachments.
3387 // FIXME: Only check when N is the canonical subprogram for F.
3388 SmallPtrSet<const MDNode *, 32> Seen;
3389 auto VisitDebugLoc = [&](const Instruction &I, const MDNode *Node) {
3390 // Be careful about using DILocation here since we might be dealing with
3391 // broken code (this is the Verifier after all).
3392 const DILocation *DL = dyn_cast_or_null<DILocation>(Node);
3393 if (!DL)
3394 return;
3395 if (!Seen.insert(DL).second)
3396 return;
3397
3398 Metadata *Parent = DL->getRawScope();
3399 CheckDI(Parent && isa<DILocalScope>(Parent),
3400 "DILocation's scope must be a DILocalScope", N, &F, &I, DL, Parent);
3401
3402 DILocalScope *Scope = DL->getInlinedAtScope();
3403 Check(Scope, "Failed to find DILocalScope", DL);
3404
3405 if (!Seen.insert(Scope).second)
3406 return;
3407
3408 DISubprogram *SP = Scope->getSubprogram();
3409
3410 // Scope and SP could be the same MDNode and we don't want to skip
3411 // validation in that case
3412 if ((Scope != SP) && !Seen.insert(SP).second)
3413 return;
3414
3415 CheckDI(SP->describes(&F),
3416 "!dbg attachment points at wrong subprogram for function", N, &F,
3417 &I, DL, Scope, SP);
3418 };
3419 for (auto &BB : F)
3420 for (auto &I : BB) {
3421 VisitDebugLoc(I, I.getDebugLoc().getAsMDNode());
3422 // The llvm.loop annotations also contain two DILocations.
3423 if (auto MD = I.getMetadata(LLVMContext::MD_loop))
3424 for (unsigned i = 1; i < MD->getNumOperands(); ++i)
3425 VisitDebugLoc(I, dyn_cast_or_null<MDNode>(MD->getOperand(i)));
3426 if (BrokenDebugInfo)
3427 return;
3428 }
3429}
3430
3431// verifyBasicBlock - Verify that a basic block is well formed...
3432//
3433void Verifier::visitBasicBlock(BasicBlock &BB) {
3434 InstsInThisBlock.clear();
3435 ConvergenceVerifyHelper.visit(BB);
3436
3437 // Ensure that basic blocks have terminators!
3438 Check(BB.getTerminator(), "Basic Block does not have terminator!", &BB);
3439
3440 // Check constraints that this basic block imposes on all of the PHI nodes in
3441 // it.
3442 if (isa<PHINode>(BB.front())) {
3443 SmallVector<BasicBlock *, 8> Preds(predecessors(&BB));
3445 llvm::sort(Preds);
3446 for (const PHINode &PN : BB.phis()) {
3447 Check(PN.getNumIncomingValues() == Preds.size(),
3448 "PHINode should have one entry for each predecessor of its "
3449 "parent basic block!",
3450 &PN);
3451
3452 // Get and sort all incoming values in the PHI node...
3453 Values.clear();
3454 Values.reserve(PN.getNumIncomingValues());
3455 for (unsigned i = 0, e = PN.getNumIncomingValues(); i != e; ++i)
3456 Values.push_back(
3457 std::make_pair(PN.getIncomingBlock(i), PN.getIncomingValue(i)));
3458 llvm::sort(Values);
3459
3460 for (unsigned i = 0, e = Values.size(); i != e; ++i) {
3461 // Check to make sure that if there is more than one entry for a
3462 // particular basic block in this PHI node, that the incoming values are
3463 // all identical.
3464 //
3465 Check(i == 0 || Values[i].first != Values[i - 1].first ||
3466 Values[i].second == Values[i - 1].second,
3467 "PHI node has multiple entries for the same basic block with "
3468 "different incoming values!",
3469 &PN, Values[i].first, Values[i].second, Values[i - 1].second);
3470
3471 // Check to make sure that the predecessors and PHI node entries are
3472 // matched up.
3473 Check(Values[i].first == Preds[i],
3474 "PHI node entries do not match predecessors!", &PN,
3475 Values[i].first, Preds[i]);
3476 }
3477 }
3478 }
3479
3480 // Check that all instructions have their parent pointers set up correctly.
3481 for (auto &I : BB)
3482 {
3483 Check(I.getParent() == &BB, "Instruction has bogus parent pointer!");
3484 }
3485
3486 // Confirm that no issues arise from the debug program.
3487 CheckDI(!BB.getTrailingDbgRecords(), "Basic Block has trailing DbgRecords!",
3488 &BB);
3489}
3490
3491void Verifier::visitTerminator(Instruction &I) {
3492 // Ensure that terminators only exist at the end of the basic block.
3493 Check(&I == I.getParent()->getTerminator(),
3494 "Terminator found in the middle of a basic block!", I.getParent());
3495 visitInstruction(I);
3496}
3497
3498void Verifier::visitCondBrInst(CondBrInst &BI) {
3500 "Branch condition is not 'i1' type!", &BI, BI.getCondition());
3501 visitTerminator(BI);
3502}
3503
3504void Verifier::visitReturnInst(ReturnInst &RI) {
3505 Function *F = RI.getParent()->getParent();
3506 unsigned N = RI.getNumOperands();
3507 if (F->getReturnType()->isVoidTy())
3508 Check(N == 0,
3509 "Found return instr that returns non-void in Function of void "
3510 "return type!",
3511 &RI, F->getReturnType());
3512 else
3513 Check(N == 1 && F->getReturnType() == RI.getOperand(0)->getType(),
3514 "Function return type does not match operand "
3515 "type of return inst!",
3516 &RI, F->getReturnType());
3517
3518 // Check to make sure that the return value has necessary properties for
3519 // terminators...
3520 visitTerminator(RI);
3521}
3522
3523void Verifier::visitSwitchInst(SwitchInst &SI) {
3524 Check(SI.getType()->isVoidTy(), "Switch must have void result type!", &SI);
3525 // Check to make sure that all of the constants in the switch instruction
3526 // have the same type as the switched-on value.
3527 Type *SwitchTy = SI.getCondition()->getType();
3528 SmallPtrSet<ConstantInt*, 32> Constants;
3529 for (auto &Case : SI.cases()) {
3530 Check(isa<ConstantInt>(Case.getCaseValue()),
3531 "Case value is not a constant integer.", &SI);
3532 Check(Case.getCaseValue()->getType() == SwitchTy,
3533 "Switch constants must all be same type as switch value!", &SI);
3534 Check(Constants.insert(Case.getCaseValue()).second,
3535 "Duplicate integer as switch case", &SI, Case.getCaseValue());
3536 }
3537
3538 visitTerminator(SI);
3539}
3540
3541void Verifier::visitIndirectBrInst(IndirectBrInst &BI) {
3543 "Indirectbr operand must have pointer type!", &BI);
3544 for (unsigned i = 0, e = BI.getNumDestinations(); i != e; ++i)
3546 "Indirectbr destinations must all have pointer type!", &BI);
3547
3548 visitTerminator(BI);
3549}
3550
3551void Verifier::visitCallBrInst(CallBrInst &CBI) {
3552 if (!CBI.isInlineAsm()) {
3554 "Callbr: indirect function / invalid signature");
3555 Check(!CBI.hasOperandBundles(),
3556 "Callbr for intrinsics currently doesn't support operand bundles");
3557
3558 switch (CBI.getIntrinsicID()) {
3559 case Intrinsic::amdgcn_kill: {
3560 Check(CBI.getNumIndirectDests() == 1,
3561 "Callbr amdgcn_kill only supports one indirect dest");
3562 bool Unreachable = isa<UnreachableInst>(CBI.getIndirectDest(0)->begin());
3563 CallInst *Call = dyn_cast<CallInst>(CBI.getIndirectDest(0)->begin());
3564 Check(Unreachable || (Call && Call->getIntrinsicID() ==
3565 Intrinsic::amdgcn_unreachable),
3566 "Callbr amdgcn_kill indirect dest needs to be unreachable");
3567 break;
3568 }
3569 default:
3570 CheckFailed(
3571 "Callbr currently only supports asm-goto and selected intrinsics");
3572 }
3573 visitIntrinsicCall(CBI.getIntrinsicID(), CBI);
3574 } else {
3575 const InlineAsm *IA = cast<InlineAsm>(CBI.getCalledOperand());
3576 Check(!IA->canThrow(), "Unwinding from Callbr is not allowed");
3577
3578 verifyInlineAsmCall(CBI);
3579 }
3580 visitTerminator(CBI);
3581}
3582
3583void Verifier::visitSelectInst(SelectInst &SI) {
3584 Check(!SelectInst::areInvalidOperands(SI.getOperand(0), SI.getOperand(1),
3585 SI.getOperand(2)),
3586 "Invalid operands for select instruction!", &SI);
3587
3588 Check(SI.getTrueValue()->getType() == SI.getType(),
3589 "Select values must have same type as select instruction!", &SI);
3590 visitInstruction(SI);
3591}
3592
3593/// visitUserOp1 - User defined operators shouldn't live beyond the lifetime of
3594/// a pass, if any exist, it's an error.
3595///
3596void Verifier::visitUserOp1(Instruction &I) {
3597 Check(false, "User-defined operators should not live outside of a pass!", &I);
3598}
3599
3600void Verifier::visitTruncInst(TruncInst &I) {
3601 // Get the source and destination types
3602 Type *SrcTy = I.getOperand(0)->getType();
3603 Type *DestTy = I.getType();
3604
3605 // Get the size of the types in bits, we'll need this later
3606 unsigned SrcBitSize = SrcTy->getScalarSizeInBits();
3607 unsigned DestBitSize = DestTy->getScalarSizeInBits();
3608
3609 Check(SrcTy->isIntOrIntVectorTy(), "Trunc only operates on integer", &I);
3610 Check(DestTy->isIntOrIntVectorTy(), "Trunc only produces integer", &I);
3611 Check(SrcTy->isVectorTy() == DestTy->isVectorTy(),
3612 "trunc source and destination must both be a vector or neither", &I);
3613 Check(SrcBitSize > DestBitSize, "DestTy too big for Trunc", &I);
3614
3615 visitInstruction(I);
3616}
3617
3618void Verifier::visitZExtInst(ZExtInst &I) {
3619 // Get the source and destination types
3620 Type *SrcTy = I.getOperand(0)->getType();
3621 Type *DestTy = I.getType();
3622
3623 // Get the size of the types in bits, we'll need this later
3624 Check(SrcTy->isIntOrIntVectorTy(), "ZExt only operates on integer", &I);
3625 Check(DestTy->isIntOrIntVectorTy(), "ZExt only produces an integer", &I);
3626 Check(SrcTy->isVectorTy() == DestTy->isVectorTy(),
3627 "zext source and destination must both be a vector or neither", &I);
3628 unsigned SrcBitSize = SrcTy->getScalarSizeInBits();
3629 unsigned DestBitSize = DestTy->getScalarSizeInBits();
3630
3631 Check(SrcBitSize < DestBitSize, "Type too small for ZExt", &I);
3632
3633 visitInstruction(I);
3634}
3635
3636void Verifier::visitSExtInst(SExtInst &I) {
3637 // Get the source and destination types
3638 Type *SrcTy = I.getOperand(0)->getType();
3639 Type *DestTy = I.getType();
3640
3641 // Get the size of the types in bits, we'll need this later
3642 unsigned SrcBitSize = SrcTy->getScalarSizeInBits();
3643 unsigned DestBitSize = DestTy->getScalarSizeInBits();
3644
3645 Check(SrcTy->isIntOrIntVectorTy(), "SExt only operates on integer", &I);
3646 Check(DestTy->isIntOrIntVectorTy(), "SExt only produces an integer", &I);
3647 Check(SrcTy->isVectorTy() == DestTy->isVectorTy(),
3648 "sext source and destination must both be a vector or neither", &I);
3649 Check(SrcBitSize < DestBitSize, "Type too small for SExt", &I);
3650
3651 visitInstruction(I);
3652}
3653
3654void Verifier::visitFPTruncInst(FPTruncInst &I) {
3655 // Get the source and destination types
3656 Type *SrcTy = I.getOperand(0)->getType();
3657 Type *DestTy = I.getType();
3658 // Get the size of the types in bits, we'll need this later
3659 unsigned SrcBitSize = SrcTy->getScalarSizeInBits();
3660 unsigned DestBitSize = DestTy->getScalarSizeInBits();
3661
3662 Check(SrcTy->isFPOrFPVectorTy(), "FPTrunc only operates on FP", &I);
3663 Check(DestTy->isFPOrFPVectorTy(), "FPTrunc only produces an FP", &I);
3664 Check(SrcTy->isVectorTy() == DestTy->isVectorTy(),
3665 "fptrunc source and destination must both be a vector or neither", &I);
3666 Check(SrcBitSize > DestBitSize, "DestTy too big for FPTrunc", &I);
3667
3668 visitInstruction(I);
3669}
3670
3671void Verifier::visitFPExtInst(FPExtInst &I) {
3672 // Get the source and destination types
3673 Type *SrcTy = I.getOperand(0)->getType();
3674 Type *DestTy = I.getType();
3675
3676 // Get the size of the types in bits, we'll need this later
3677 unsigned SrcBitSize = SrcTy->getScalarSizeInBits();
3678 unsigned DestBitSize = DestTy->getScalarSizeInBits();
3679
3680 Check(SrcTy->isFPOrFPVectorTy(), "FPExt only operates on FP", &I);
3681 Check(DestTy->isFPOrFPVectorTy(), "FPExt only produces an FP", &I);
3682 Check(SrcTy->isVectorTy() == DestTy->isVectorTy(),
3683 "fpext source and destination must both be a vector or neither", &I);
3684 Check(SrcBitSize < DestBitSize, "DestTy too small for FPExt", &I);
3685
3686 visitInstruction(I);
3687}
3688
3689void Verifier::visitUIToFPInst(UIToFPInst &I) {
3690 // Get the source and destination types
3691 Type *SrcTy = I.getOperand(0)->getType();
3692 Type *DestTy = I.getType();
3693
3694 bool SrcVec = SrcTy->isVectorTy();
3695 bool DstVec = DestTy->isVectorTy();
3696
3697 Check(SrcVec == DstVec,
3698 "UIToFP source and dest must both be vector or scalar", &I);
3699 Check(SrcTy->isIntOrIntVectorTy(),
3700 "UIToFP source must be integer or integer vector", &I);
3701 Check(DestTy->isFPOrFPVectorTy(), "UIToFP result must be FP or FP vector",
3702 &I);
3703
3704 if (SrcVec && DstVec)
3705 Check(cast<VectorType>(SrcTy)->getElementCount() ==
3706 cast<VectorType>(DestTy)->getElementCount(),
3707 "UIToFP source and dest vector length mismatch", &I);
3708
3709 visitInstruction(I);
3710}
3711
3712void Verifier::visitSIToFPInst(SIToFPInst &I) {
3713 // Get the source and destination types
3714 Type *SrcTy = I.getOperand(0)->getType();
3715 Type *DestTy = I.getType();
3716
3717 bool SrcVec = SrcTy->isVectorTy();
3718 bool DstVec = DestTy->isVectorTy();
3719
3720 Check(SrcVec == DstVec,
3721 "SIToFP source and dest must both be vector or scalar", &I);
3722 Check(SrcTy->isIntOrIntVectorTy(),
3723 "SIToFP source must be integer or integer vector", &I);
3724 Check(DestTy->isFPOrFPVectorTy(), "SIToFP result must be FP or FP vector",
3725 &I);
3726
3727 if (SrcVec && DstVec)
3728 Check(cast<VectorType>(SrcTy)->getElementCount() ==
3729 cast<VectorType>(DestTy)->getElementCount(),
3730 "SIToFP source and dest vector length mismatch", &I);
3731
3732 visitInstruction(I);
3733}
3734
3735void Verifier::visitFPToUIInst(FPToUIInst &I) {
3736 // Get the source and destination types
3737 Type *SrcTy = I.getOperand(0)->getType();
3738 Type *DestTy = I.getType();
3739
3740 bool SrcVec = SrcTy->isVectorTy();
3741 bool DstVec = DestTy->isVectorTy();
3742
3743 Check(SrcVec == DstVec,
3744 "FPToUI source and dest must both be vector or scalar", &I);
3745 Check(SrcTy->isFPOrFPVectorTy(), "FPToUI source must be FP or FP vector", &I);
3746 Check(DestTy->isIntOrIntVectorTy(),
3747 "FPToUI result must be integer or integer vector", &I);
3748
3749 if (SrcVec && DstVec)
3750 Check(cast<VectorType>(SrcTy)->getElementCount() ==
3751 cast<VectorType>(DestTy)->getElementCount(),
3752 "FPToUI source and dest vector length mismatch", &I);
3753
3754 visitInstruction(I);
3755}
3756
3757void Verifier::visitFPToSIInst(FPToSIInst &I) {
3758 // Get the source and destination types
3759 Type *SrcTy = I.getOperand(0)->getType();
3760 Type *DestTy = I.getType();
3761
3762 bool SrcVec = SrcTy->isVectorTy();
3763 bool DstVec = DestTy->isVectorTy();
3764
3765 Check(SrcVec == DstVec,
3766 "FPToSI source and dest must both be vector or scalar", &I);
3767 Check(SrcTy->isFPOrFPVectorTy(), "FPToSI source must be FP or FP vector", &I);
3768 Check(DestTy->isIntOrIntVectorTy(),
3769 "FPToSI result must be integer or integer vector", &I);
3770
3771 if (SrcVec && DstVec)
3772 Check(cast<VectorType>(SrcTy)->getElementCount() ==
3773 cast<VectorType>(DestTy)->getElementCount(),
3774 "FPToSI source and dest vector length mismatch", &I);
3775
3776 visitInstruction(I);
3777}
3778
3779void Verifier::checkPtrToAddr(Type *SrcTy, Type *DestTy, const Value &V) {
3780 Check(SrcTy->isPtrOrPtrVectorTy(), "PtrToAddr source must be pointer", V);
3781 Check(DestTy->isIntOrIntVectorTy(), "PtrToAddr result must be integral", V);
3782 Check(SrcTy->isVectorTy() == DestTy->isVectorTy(), "PtrToAddr type mismatch",
3783 V);
3784
3785 if (SrcTy->isVectorTy()) {
3786 auto *VSrc = cast<VectorType>(SrcTy);
3787 auto *VDest = cast<VectorType>(DestTy);
3788 Check(VSrc->getElementCount() == VDest->getElementCount(),
3789 "PtrToAddr vector length mismatch", V);
3790 }
3791
3792 Type *AddrTy = DL.getAddressType(SrcTy);
3793 Check(AddrTy == DestTy, "PtrToAddr result must be address width", V);
3794}
3795
3796void Verifier::visitPtrToAddrInst(PtrToAddrInst &I) {
3797 checkPtrToAddr(I.getOperand(0)->getType(), I.getType(), I);
3798 visitInstruction(I);
3799}
3800
3801void Verifier::visitPtrToIntInst(PtrToIntInst &I) {
3802 // Get the source and destination types
3803 Type *SrcTy = I.getOperand(0)->getType();
3804 Type *DestTy = I.getType();
3805
3806 Check(SrcTy->isPtrOrPtrVectorTy(), "PtrToInt source must be pointer", &I);
3807
3808 Check(DestTy->isIntOrIntVectorTy(), "PtrToInt result must be integral", &I);
3809 Check(SrcTy->isVectorTy() == DestTy->isVectorTy(), "PtrToInt type mismatch",
3810 &I);
3811
3812 if (SrcTy->isVectorTy()) {
3813 auto *VSrc = cast<VectorType>(SrcTy);
3814 auto *VDest = cast<VectorType>(DestTy);
3815 Check(VSrc->getElementCount() == VDest->getElementCount(),
3816 "PtrToInt Vector length mismatch", &I);
3817 }
3818
3819 visitInstruction(I);
3820}
3821
3822void Verifier::visitIntToPtrInst(IntToPtrInst &I) {
3823 // Get the source and destination types
3824 Type *SrcTy = I.getOperand(0)->getType();
3825 Type *DestTy = I.getType();
3826
3827 Check(SrcTy->isIntOrIntVectorTy(), "IntToPtr source must be an integral", &I);
3828 Check(DestTy->isPtrOrPtrVectorTy(), "IntToPtr result must be a pointer", &I);
3829
3830 Check(SrcTy->isVectorTy() == DestTy->isVectorTy(), "IntToPtr type mismatch",
3831 &I);
3832 if (SrcTy->isVectorTy()) {
3833 auto *VSrc = cast<VectorType>(SrcTy);
3834 auto *VDest = cast<VectorType>(DestTy);
3835 Check(VSrc->getElementCount() == VDest->getElementCount(),
3836 "IntToPtr Vector length mismatch", &I);
3837 }
3838 visitInstruction(I);
3839}
3840
3841void Verifier::visitBitCastInst(BitCastInst &I) {
3842 Check(
3843 CastInst::castIsValid(Instruction::BitCast, I.getOperand(0), I.getType()),
3844 "Invalid bitcast", &I);
3845 visitInstruction(I);
3846}
3847
3848void Verifier::visitAddrSpaceCastInst(AddrSpaceCastInst &I) {
3849 Type *SrcTy = I.getOperand(0)->getType();
3850 Type *DestTy = I.getType();
3851
3852 Check(SrcTy->isPtrOrPtrVectorTy(), "AddrSpaceCast source must be a pointer",
3853 &I);
3854 Check(DestTy->isPtrOrPtrVectorTy(), "AddrSpaceCast result must be a pointer",
3855 &I);
3857 "AddrSpaceCast must be between different address spaces", &I);
3858 if (auto *SrcVTy = dyn_cast<VectorType>(SrcTy))
3859 Check(SrcVTy->getElementCount() ==
3860 cast<VectorType>(DestTy)->getElementCount(),
3861 "AddrSpaceCast vector pointer number of elements mismatch", &I);
3862 visitInstruction(I);
3863}
3864
3865/// visitPHINode - Ensure that a PHI node is well formed.
3866///
3867void Verifier::visitPHINode(PHINode &PN) {
3868 // Ensure that the PHI nodes are all grouped together at the top of the block.
3869 // This can be tested by checking whether the instruction before this is
3870 // either nonexistent (because this is begin()) or is a PHI node. If not,
3871 // then there is some other instruction before a PHI.
3872 Check(&PN == &PN.getParent()->front() ||
3874 "PHI nodes not grouped at top of basic block!", &PN, PN.getParent());
3875
3876 // Check that a PHI doesn't yield a Token.
3877 Check(!PN.getType()->isTokenLikeTy(), "PHI nodes cannot have token type!");
3878
3879 // Check that all of the values of the PHI node have the same type as the
3880 // result.
3881 for (Value *IncValue : PN.incoming_values()) {
3882 Check(PN.getType() == IncValue->getType(),
3883 "PHI node operands are not the same type as the result!", &PN);
3884 }
3885
3886 // All other PHI node constraints are checked in the visitBasicBlock method.
3887
3888 visitInstruction(PN);
3889}
3890
3891void Verifier::visitCallBase(CallBase &Call) {
3893 "Called function must be a pointer!", Call);
3894 FunctionType *FTy = Call.getFunctionType();
3895
3896 // Verify that the correct number of arguments are being passed
3897 if (FTy->isVarArg())
3898 Check(Call.arg_size() >= FTy->getNumParams(),
3899 "Called function requires more parameters than were provided!", Call);
3900 else
3901 Check(Call.arg_size() == FTy->getNumParams(),
3902 "Incorrect number of arguments passed to called function!", Call);
3903
3904 // Verify that all arguments to the call match the function type.
3905 for (unsigned i = 0, e = FTy->getNumParams(); i != e; ++i)
3906 Check(Call.getArgOperand(i)->getType() == FTy->getParamType(i),
3907 "Call parameter type does not match function signature!",
3908 Call.getArgOperand(i), FTy->getParamType(i), Call);
3909
3910 AttributeList Attrs = Call.getAttributes();
3911
3912 Check(verifyAttributeCount(Attrs, Call.arg_size()),
3913 "Attribute after last parameter!", Call);
3914
3915 Function *Callee =
3917 bool IsIntrinsic = Callee && Callee->isIntrinsic();
3918 if (IsIntrinsic)
3919 Check(Callee->getFunctionType() == FTy,
3920 "Intrinsic called with incompatible signature", Call);
3921
3922 // Verify if the calling convention of the callee is callable.
3924 "calling convention does not permit calls", Call);
3925
3926 // Disallow passing/returning values with alignment higher than we can
3927 // represent.
3928 // FIXME: Consider making DataLayout cap the alignment, so this isn't
3929 // necessary.
3930 auto VerifyTypeAlign = [&](Type *Ty, const Twine &Message) {
3931 if (!Ty->isSized())
3932 return;
3933 Align ABIAlign = DL.getABITypeAlign(Ty);
3934 Check(ABIAlign.value() <= Value::MaximumAlignment,
3935 "Incorrect alignment of " + Message + " to called function!", Call);
3936 };
3937
3938 if (!IsIntrinsic) {
3939 VerifyTypeAlign(FTy->getReturnType(), "return type");
3940 for (unsigned i = 0, e = FTy->getNumParams(); i != e; ++i) {
3941 Type *Ty = FTy->getParamType(i);
3942 VerifyTypeAlign(Ty, "argument passed");
3943 }
3944 }
3945
3946 if (Attrs.hasFnAttr(Attribute::Speculatable)) {
3947 // Don't allow speculatable on call sites, unless the underlying function
3948 // declaration is also speculatable.
3949 Check(Callee && Callee->isSpeculatable(),
3950 "speculatable attribute may not apply to call sites", Call);
3951 }
3952
3953 if (Attrs.hasFnAttr(Attribute::Preallocated)) {
3954 Check(Call.getIntrinsicID() == Intrinsic::call_preallocated_arg,
3955 "preallocated as a call site attribute can only be on "
3956 "llvm.call.preallocated.arg");
3957 }
3958
3959 Check(!Attrs.hasFnAttr(Attribute::DenormalFPEnv),
3960 "denormal_fpenv attribute may not apply to call sites", Call);
3961
3962 // Verify call attributes.
3963 verifyFunctionAttrs(FTy, Attrs, &Call, IsIntrinsic, Call.isInlineAsm());
3964
3965 // Conservatively check the inalloca argument.
3966 // We have a bug if we can find that there is an underlying alloca without
3967 // inalloca.
3968 if (Call.hasInAllocaArgument()) {
3969 Value *InAllocaArg = Call.getArgOperand(FTy->getNumParams() - 1);
3970 if (auto AI = dyn_cast<AllocaInst>(InAllocaArg->stripInBoundsOffsets()))
3971 Check(AI->isUsedWithInAlloca(),
3972 "inalloca argument for call has mismatched alloca", AI, Call);
3973 }
3974
3975 // For each argument of the callsite, if it has the swifterror argument,
3976 // make sure the underlying alloca/parameter it comes from has a swifterror as
3977 // well.
3978 for (unsigned i = 0, e = FTy->getNumParams(); i != e; ++i) {
3979 if (Call.paramHasAttr(i, Attribute::SwiftError)) {
3980 Value *SwiftErrorArg = Call.getArgOperand(i);
3981 if (auto AI = dyn_cast<AllocaInst>(SwiftErrorArg->stripInBoundsOffsets())) {
3982 Check(AI->isSwiftError(),
3983 "swifterror argument for call has mismatched alloca", AI, Call);
3984 continue;
3985 }
3986 auto ArgI = dyn_cast<Argument>(SwiftErrorArg);
3987 Check(ArgI, "swifterror argument should come from an alloca or parameter",
3988 SwiftErrorArg, Call);
3989 Check(ArgI->hasSwiftErrorAttr(),
3990 "swifterror argument for call has mismatched parameter", ArgI,
3991 Call);
3992 }
3993
3994 if (Attrs.hasParamAttr(i, Attribute::ImmArg)) {
3995 // Don't allow immarg on call sites, unless the underlying declaration
3996 // also has the matching immarg.
3997 Check(Callee && Callee->hasParamAttribute(i, Attribute::ImmArg),
3998 "immarg may not apply only to call sites", Call.getArgOperand(i),
3999 Call);
4000 }
4001
4002 if (Call.paramHasAttr(i, Attribute::ImmArg)) {
4003 Value *ArgVal = Call.getArgOperand(i);
4004 Check(isa<ConstantInt>(ArgVal) || isa<ConstantFP>(ArgVal),
4005 "immarg operand has non-immediate parameter", ArgVal, Call);
4006
4007 // If the imm-arg is an integer and also has a range attached,
4008 // check if the given value is within the range.
4009 if (Call.paramHasAttr(i, Attribute::Range)) {
4010 if (auto *CI = dyn_cast<ConstantInt>(ArgVal)) {
4011 const ConstantRange &CR =
4012 Call.getParamAttr(i, Attribute::Range).getValueAsConstantRange();
4013 Check(CR.contains(CI->getValue()),
4014 "immarg value " + Twine(CI->getValue().getSExtValue()) +
4015 " out of range [" + Twine(CR.getLower().getSExtValue()) +
4016 ", " + Twine(CR.getUpper().getSExtValue()) + ")",
4017 Call);
4018 }
4019 }
4020 }
4021
4022 if (Call.paramHasAttr(i, Attribute::Preallocated)) {
4023 Value *ArgVal = Call.getArgOperand(i);
4024 bool hasOB =
4026 bool isMustTail = Call.isMustTailCall();
4027 Check(hasOB != isMustTail,
4028 "preallocated operand either requires a preallocated bundle or "
4029 "the call to be musttail (but not both)",
4030 ArgVal, Call);
4031 }
4032 }
4033
4034 if (FTy->isVarArg()) {
4035 // FIXME? is 'nest' even legal here?
4036 bool SawNest = false;
4037 bool SawReturned = false;
4038
4039 for (unsigned Idx = 0; Idx < FTy->getNumParams(); ++Idx) {
4040 if (Attrs.hasParamAttr(Idx, Attribute::Nest))
4041 SawNest = true;
4042 if (Attrs.hasParamAttr(Idx, Attribute::Returned))
4043 SawReturned = true;
4044 }
4045
4046 // Check attributes on the varargs part.
4047 for (unsigned Idx = FTy->getNumParams(); Idx < Call.arg_size(); ++Idx) {
4048 Type *Ty = Call.getArgOperand(Idx)->getType();
4049 AttributeSet ArgAttrs = Attrs.getParamAttrs(Idx);
4050 verifyParameterAttrs(ArgAttrs, Ty, &Call);
4051
4052 if (ArgAttrs.hasAttribute(Attribute::Nest)) {
4053 Check(!SawNest, "More than one parameter has attribute nest!", Call);
4054 SawNest = true;
4055 }
4056
4057 if (ArgAttrs.hasAttribute(Attribute::Returned)) {
4058 Check(!SawReturned, "More than one parameter has attribute returned!",
4059 Call);
4060 Check(Ty->canLosslesslyBitCastTo(FTy->getReturnType()),
4061 "Incompatible argument and return types for 'returned' "
4062 "attribute",
4063 Call);
4064 SawReturned = true;
4065 }
4066
4067 // Statepoint intrinsic is vararg but the wrapped function may be not.
4068 // Allow sret here and check the wrapped function in verifyStatepoint.
4069 if (Call.getIntrinsicID() != Intrinsic::experimental_gc_statepoint)
4070 Check(!ArgAttrs.hasAttribute(Attribute::StructRet),
4071 "Attribute 'sret' cannot be used for vararg call arguments!",
4072 Call);
4073
4074 if (ArgAttrs.hasAttribute(Attribute::InAlloca))
4075 Check(Idx == Call.arg_size() - 1,
4076 "inalloca isn't on the last argument!", Call);
4077 }
4078 }
4079
4080 // Verify that there's no metadata unless it's a direct call to an intrinsic.
4081 if (!IsIntrinsic) {
4082 for (Type *ParamTy : FTy->params()) {
4083 Check(!ParamTy->isMetadataTy(),
4084 "Function has metadata parameter but isn't an intrinsic", Call);
4085 Check(!ParamTy->isTokenLikeTy(),
4086 "Function has token parameter but isn't an intrinsic", Call);
4087 }
4088 }
4089
4090 // Verify that indirect calls don't return tokens.
4091 if (!Call.getCalledFunction()) {
4092 Check(!FTy->getReturnType()->isTokenLikeTy(),
4093 "Return type cannot be token for indirect call!");
4094 Check(!FTy->getReturnType()->isX86_AMXTy(),
4095 "Return type cannot be x86_amx for indirect call!");
4096 }
4097
4099 visitIntrinsicCall(ID, Call);
4100
4101 // Verify that a callsite has at most one "deopt", at most one "funclet", at
4102 // most one "gc-transition", at most one "cfguardtarget", at most one
4103 // "preallocated" operand bundle, and at most one "ptrauth" operand bundle.
4104 bool FoundDeoptBundle = false, FoundFuncletBundle = false,
4105 FoundGCTransitionBundle = false, FoundCFGuardTargetBundle = false,
4106 FoundPreallocatedBundle = false, FoundGCLiveBundle = false,
4107 FoundPtrauthBundle = false, FoundKCFIBundle = false,
4108 FoundAttachedCallBundle = false;
4109 for (unsigned i = 0, e = Call.getNumOperandBundles(); i < e; ++i) {
4110 OperandBundleUse BU = Call.getOperandBundleAt(i);
4111 uint32_t Tag = BU.getTagID();
4112 if (Tag == LLVMContext::OB_deopt) {
4113 Check(!FoundDeoptBundle, "Multiple deopt operand bundles", Call);
4114 FoundDeoptBundle = true;
4115 } else if (Tag == LLVMContext::OB_gc_transition) {
4116 Check(!FoundGCTransitionBundle, "Multiple gc-transition operand bundles",
4117 Call);
4118 FoundGCTransitionBundle = true;
4119 } else if (Tag == LLVMContext::OB_funclet) {
4120 Check(!FoundFuncletBundle, "Multiple funclet operand bundles", Call);
4121 FoundFuncletBundle = true;
4122 Check(BU.Inputs.size() == 1,
4123 "Expected exactly one funclet bundle operand", Call);
4124 Check(isa<FuncletPadInst>(BU.Inputs.front()),
4125 "Funclet bundle operands should correspond to a FuncletPadInst",
4126 Call);
4127 } else if (Tag == LLVMContext::OB_cfguardtarget) {
4128 Check(!FoundCFGuardTargetBundle, "Multiple CFGuardTarget operand bundles",
4129 Call);
4130 FoundCFGuardTargetBundle = true;
4131 Check(BU.Inputs.size() == 1,
4132 "Expected exactly one cfguardtarget bundle operand", Call);
4133 } else if (Tag == LLVMContext::OB_ptrauth) {
4134 Check(!FoundPtrauthBundle, "Multiple ptrauth operand bundles", Call);
4135 FoundPtrauthBundle = true;
4136 Check(BU.Inputs.size() == 2,
4137 "Expected exactly two ptrauth bundle operands", Call);
4138 Check(isa<ConstantInt>(BU.Inputs[0]) &&
4139 BU.Inputs[0]->getType()->isIntegerTy(32),
4140 "Ptrauth bundle key operand must be an i32 constant", Call);
4141 Check(BU.Inputs[1]->getType()->isIntegerTy(64),
4142 "Ptrauth bundle discriminator operand must be an i64", Call);
4143 } else if (Tag == LLVMContext::OB_kcfi) {
4144 Check(!FoundKCFIBundle, "Multiple kcfi operand bundles", Call);
4145 FoundKCFIBundle = true;
4146 Check(BU.Inputs.size() == 1, "Expected exactly one kcfi bundle operand",
4147 Call);
4148 Check(isa<ConstantInt>(BU.Inputs[0]) &&
4149 BU.Inputs[0]->getType()->isIntegerTy(32),
4150 "Kcfi bundle operand must be an i32 constant", Call);
4151 } else if (Tag == LLVMContext::OB_preallocated) {
4152 Check(!FoundPreallocatedBundle, "Multiple preallocated operand bundles",
4153 Call);
4154 FoundPreallocatedBundle = true;
4155 Check(BU.Inputs.size() == 1,
4156 "Expected exactly one preallocated bundle operand", Call);
4157 auto Input = dyn_cast<IntrinsicInst>(BU.Inputs.front());
4158 Check(Input &&
4159 Input->getIntrinsicID() == Intrinsic::call_preallocated_setup,
4160 "\"preallocated\" argument must be a token from "
4161 "llvm.call.preallocated.setup",
4162 Call);
4163 } else if (Tag == LLVMContext::OB_gc_live) {
4164 Check(!FoundGCLiveBundle, "Multiple gc-live operand bundles", Call);
4165 FoundGCLiveBundle = true;
4167 Check(!FoundAttachedCallBundle,
4168 "Multiple \"clang.arc.attachedcall\" operand bundles", Call);
4169 FoundAttachedCallBundle = true;
4170 verifyAttachedCallBundle(Call, BU);
4171 }
4172 }
4173
4174 // Verify that callee and callsite agree on whether to use pointer auth.
4175 Check(!(Call.getCalledFunction() && FoundPtrauthBundle),
4176 "Direct call cannot have a ptrauth bundle", Call);
4177
4178 // Verify that each inlinable callsite of a debug-info-bearing function in a
4179 // debug-info-bearing function has a debug location attached to it. Failure to
4180 // do so causes assertion failures when the inliner sets up inline scope info
4181 // (Interposable functions are not inlinable, neither are functions without
4182 // definitions.)
4188 "inlinable function call in a function with "
4189 "debug info must have a !dbg location",
4190 Call);
4191
4192 if (Call.isInlineAsm())
4193 verifyInlineAsmCall(Call);
4194
4195 ConvergenceVerifyHelper.visit(Call);
4196
4197 visitInstruction(Call);
4198}
4199
4200void Verifier::verifyTailCCMustTailAttrs(const AttrBuilder &Attrs,
4201 StringRef Context) {
4202 Check(!Attrs.contains(Attribute::InAlloca),
4203 Twine("inalloca attribute not allowed in ") + Context);
4204 Check(!Attrs.contains(Attribute::InReg),
4205 Twine("inreg attribute not allowed in ") + Context);
4206 Check(!Attrs.contains(Attribute::SwiftError),
4207 Twine("swifterror attribute not allowed in ") + Context);
4208 Check(!Attrs.contains(Attribute::Preallocated),
4209 Twine("preallocated attribute not allowed in ") + Context);
4210 Check(!Attrs.contains(Attribute::ByRef),
4211 Twine("byref attribute not allowed in ") + Context);
4212}
4213
4214/// Two types are "congruent" if they are identical, or if they are both pointer
4215/// types with different pointee types and the same address space.
4216static bool isTypeCongruent(Type *L, Type *R) {
4217 if (L == R)
4218 return true;
4221 if (!PL || !PR)
4222 return false;
4223 return PL->getAddressSpace() == PR->getAddressSpace();
4224}
4225
4226static AttrBuilder getParameterABIAttributes(LLVMContext& C, unsigned I, AttributeList Attrs) {
4227 static const Attribute::AttrKind ABIAttrs[] = {
4228 Attribute::StructRet, Attribute::ByVal, Attribute::InAlloca,
4229 Attribute::InReg, Attribute::StackAlignment, Attribute::SwiftSelf,
4230 Attribute::SwiftAsync, Attribute::SwiftError, Attribute::Preallocated,
4231 Attribute::ByRef};
4232 AttrBuilder Copy(C);
4233 for (auto AK : ABIAttrs) {
4234 Attribute Attr = Attrs.getParamAttrs(I).getAttribute(AK);
4235 if (Attr.isValid())
4236 Copy.addAttribute(Attr);
4237 }
4238
4239 // `align` is ABI-affecting only in combination with `byval` or `byref`.
4240 if (Attrs.hasParamAttr(I, Attribute::Alignment) &&
4241 (Attrs.hasParamAttr(I, Attribute::ByVal) ||
4242 Attrs.hasParamAttr(I, Attribute::ByRef)))
4243 Copy.addAlignmentAttr(Attrs.getParamAlignment(I));
4244 return Copy;
4245}
4246
4247void Verifier::verifyMustTailCall(CallInst &CI) {
4248 Check(!CI.isInlineAsm(), "cannot use musttail call with inline asm", &CI);
4249
4250 Function *F = CI.getParent()->getParent();
4251 FunctionType *CallerTy = F->getFunctionType();
4252 FunctionType *CalleeTy = CI.getFunctionType();
4253 Check(CallerTy->isVarArg() == CalleeTy->isVarArg(),
4254 "cannot guarantee tail call due to mismatched varargs", &CI);
4255 Check(isTypeCongruent(CallerTy->getReturnType(), CalleeTy->getReturnType()),
4256 "cannot guarantee tail call due to mismatched return types", &CI);
4257
4258 // - The calling conventions of the caller and callee must match.
4259 Check(F->getCallingConv() == CI.getCallingConv(),
4260 "cannot guarantee tail call due to mismatched calling conv", &CI);
4261
4262 // - The call must immediately precede a :ref:`ret <i_ret>` instruction,
4263 // or a pointer bitcast followed by a ret instruction.
4264 // - The ret instruction must return the (possibly bitcasted) value
4265 // produced by the call or void.
4266 Value *RetVal = &CI;
4268
4269 // Handle the optional bitcast.
4270 if (BitCastInst *BI = dyn_cast_or_null<BitCastInst>(Next)) {
4271 Check(BI->getOperand(0) == RetVal,
4272 "bitcast following musttail call must use the call", BI);
4273 RetVal = BI;
4274 Next = BI->getNextNode();
4275 }
4276
4277 // Check the return.
4278 ReturnInst *Ret = dyn_cast_or_null<ReturnInst>(Next);
4279 Check(Ret, "musttail call must precede a ret with an optional bitcast", &CI);
4280 Check(!Ret->getReturnValue() || Ret->getReturnValue() == RetVal ||
4282 "musttail call result must be returned", Ret);
4283
4284 AttributeList CallerAttrs = F->getAttributes();
4285 AttributeList CalleeAttrs = CI.getAttributes();
4286 if (CI.getCallingConv() == CallingConv::SwiftTail ||
4287 CI.getCallingConv() == CallingConv::Tail) {
4288 StringRef CCName =
4289 CI.getCallingConv() == CallingConv::Tail ? "tailcc" : "swifttailcc";
4290
4291 // - Only sret, byval, swiftself, and swiftasync ABI-impacting attributes
4292 // are allowed in swifttailcc call
4293 for (unsigned I = 0, E = CallerTy->getNumParams(); I != E; ++I) {
4294 AttrBuilder ABIAttrs = getParameterABIAttributes(F->getContext(), I, CallerAttrs);
4295 SmallString<32> Context{CCName, StringRef(" musttail caller")};
4296 verifyTailCCMustTailAttrs(ABIAttrs, Context);
4297 }
4298 for (unsigned I = 0, E = CalleeTy->getNumParams(); I != E; ++I) {
4299 AttrBuilder ABIAttrs = getParameterABIAttributes(F->getContext(), I, CalleeAttrs);
4300 SmallString<32> Context{CCName, StringRef(" musttail callee")};
4301 verifyTailCCMustTailAttrs(ABIAttrs, Context);
4302 }
4303 // - Varargs functions are not allowed
4304 Check(!CallerTy->isVarArg(), Twine("cannot guarantee ") + CCName +
4305 " tail call for varargs function");
4306 return;
4307 }
4308
4309 // - The caller and callee prototypes must match. Pointer types of
4310 // parameters or return types may differ in pointee type, but not
4311 // address space.
4312 if (!CI.getIntrinsicID()) {
4313 Check(CallerTy->getNumParams() == CalleeTy->getNumParams(),
4314 "cannot guarantee tail call due to mismatched parameter counts", &CI);
4315 for (unsigned I = 0, E = CallerTy->getNumParams(); I != E; ++I) {
4316 Check(
4317 isTypeCongruent(CallerTy->getParamType(I), CalleeTy->getParamType(I)),
4318 "cannot guarantee tail call due to mismatched parameter types", &CI);
4319 }
4320 }
4321
4322 // - All ABI-impacting function attributes, such as sret, byval, inreg,
4323 // returned, preallocated, and inalloca, must match.
4324 for (unsigned I = 0, E = CallerTy->getNumParams(); I != E; ++I) {
4325 AttrBuilder CallerABIAttrs = getParameterABIAttributes(F->getContext(), I, CallerAttrs);
4326 AttrBuilder CalleeABIAttrs = getParameterABIAttributes(F->getContext(), I, CalleeAttrs);
4327 Check(CallerABIAttrs == CalleeABIAttrs,
4328 "cannot guarantee tail call due to mismatched ABI impacting "
4329 "function attributes",
4330 &CI, CI.getOperand(I));
4331 }
4332}
4333
4334void Verifier::visitCallInst(CallInst &CI) {
4335 visitCallBase(CI);
4336
4337 if (CI.isMustTailCall())
4338 verifyMustTailCall(CI);
4339}
4340
4341void Verifier::visitInvokeInst(InvokeInst &II) {
4342 visitCallBase(II);
4343
4344 // Verify that the first non-PHI instruction of the unwind destination is an
4345 // exception handling instruction.
4346 Check(
4347 II.getUnwindDest()->isEHPad(),
4348 "The unwind destination does not have an exception handling instruction!",
4349 &II);
4350
4351 visitTerminator(II);
4352}
4353
4354/// visitUnaryOperator - Check the argument to the unary operator.
4355///
4356void Verifier::visitUnaryOperator(UnaryOperator &U) {
4357 Check(U.getType() == U.getOperand(0)->getType(),
4358 "Unary operators must have same type for"
4359 "operands and result!",
4360 &U);
4361
4362 switch (U.getOpcode()) {
4363 // Check that floating-point arithmetic operators are only used with
4364 // floating-point operands.
4365 case Instruction::FNeg:
4366 Check(U.getType()->isFPOrFPVectorTy(),
4367 "FNeg operator only works with float types!", &U);
4368 break;
4369 default:
4370 llvm_unreachable("Unknown UnaryOperator opcode!");
4371 }
4372
4373 visitInstruction(U);
4374}
4375
4376/// visitBinaryOperator - Check that both arguments to the binary operator are
4377/// of the same type!
4378///
4379void Verifier::visitBinaryOperator(BinaryOperator &B) {
4380 Check(B.getOperand(0)->getType() == B.getOperand(1)->getType(),
4381 "Both operands to a binary operator are not of the same type!", &B);
4382
4383 switch (B.getOpcode()) {
4384 // Check that integer arithmetic operators are only used with
4385 // integral operands.
4386 case Instruction::Add:
4387 case Instruction::Sub:
4388 case Instruction::Mul:
4389 case Instruction::SDiv:
4390 case Instruction::UDiv:
4391 case Instruction::SRem:
4392 case Instruction::URem:
4393 Check(B.getType()->isIntOrIntVectorTy(),
4394 "Integer arithmetic operators only work with integral types!", &B);
4395 Check(B.getType() == B.getOperand(0)->getType(),
4396 "Integer arithmetic operators must have same type "
4397 "for operands and result!",
4398 &B);
4399 break;
4400 // Check that floating-point arithmetic operators are only used with
4401 // floating-point operands.
4402 case Instruction::FAdd:
4403 case Instruction::FSub:
4404 case Instruction::FMul:
4405 case Instruction::FDiv:
4406 case Instruction::FRem:
4407 Check(B.getType()->isFPOrFPVectorTy(),
4408 "Floating-point arithmetic operators only work with "
4409 "floating-point types!",
4410 &B);
4411 Check(B.getType() == B.getOperand(0)->getType(),
4412 "Floating-point arithmetic operators must have same type "
4413 "for operands and result!",
4414 &B);
4415 break;
4416 // Check that logical operators are only used with integral operands.
4417 case Instruction::And:
4418 case Instruction::Or:
4419 case Instruction::Xor:
4420 Check(B.getType()->isIntOrIntVectorTy(),
4421 "Logical operators only work with integral types!", &B);
4422 Check(B.getType() == B.getOperand(0)->getType(),
4423 "Logical operators must have same type for operands and result!", &B);
4424 break;
4425 case Instruction::Shl:
4426 case Instruction::LShr:
4427 case Instruction::AShr:
4428 Check(B.getType()->isIntOrIntVectorTy(),
4429 "Shifts only work with integral types!", &B);
4430 Check(B.getType() == B.getOperand(0)->getType(),
4431 "Shift return type must be same as operands!", &B);
4432 break;
4433 default:
4434 llvm_unreachable("Unknown BinaryOperator opcode!");
4435 }
4436
4437 visitInstruction(B);
4438}
4439
4440void Verifier::visitICmpInst(ICmpInst &IC) {
4441 // Check that the operands are the same type
4442 Type *Op0Ty = IC.getOperand(0)->getType();
4443 Type *Op1Ty = IC.getOperand(1)->getType();
4444 Check(Op0Ty == Op1Ty,
4445 "Both operands to ICmp instruction are not of the same type!", &IC);
4446 // Check that the operands are the right type
4447 Check(Op0Ty->isIntOrIntVectorTy() || Op0Ty->isPtrOrPtrVectorTy(),
4448 "Invalid operand types for ICmp instruction", &IC);
4449 // Check that the predicate is valid.
4450 Check(IC.isIntPredicate(), "Invalid predicate in ICmp instruction!", &IC);
4451
4452 visitInstruction(IC);
4453}
4454
4455void Verifier::visitFCmpInst(FCmpInst &FC) {
4456 // Check that the operands are the same type
4457 Type *Op0Ty = FC.getOperand(0)->getType();
4458 Type *Op1Ty = FC.getOperand(1)->getType();
4459 Check(Op0Ty == Op1Ty,
4460 "Both operands to FCmp instruction are not of the same type!", &FC);
4461 // Check that the operands are the right type
4462 Check(Op0Ty->isFPOrFPVectorTy(), "Invalid operand types for FCmp instruction",
4463 &FC);
4464 // Check that the predicate is valid.
4465 Check(FC.isFPPredicate(), "Invalid predicate in FCmp instruction!", &FC);
4466
4467 visitInstruction(FC);
4468}
4469
4470void Verifier::visitExtractElementInst(ExtractElementInst &EI) {
4472 "Invalid extractelement operands!", &EI);
4473 visitInstruction(EI);
4474}
4475
4476void Verifier::visitInsertElementInst(InsertElementInst &IE) {
4477 Check(InsertElementInst::isValidOperands(IE.getOperand(0), IE.getOperand(1),
4478 IE.getOperand(2)),
4479 "Invalid insertelement operands!", &IE);
4480 visitInstruction(IE);
4481}
4482
4483void Verifier::visitShuffleVectorInst(ShuffleVectorInst &SV) {
4485 SV.getShuffleMask()),
4486 "Invalid shufflevector operands!", &SV);
4487 visitInstruction(SV);
4488}
4489
4490void Verifier::visitGetElementPtrInst(GetElementPtrInst &GEP) {
4491 Type *TargetTy = GEP.getPointerOperandType()->getScalarType();
4492
4493 Check(isa<PointerType>(TargetTy),
4494 "GEP base pointer is not a vector or a vector of pointers", &GEP);
4495 Check(GEP.getSourceElementType()->isSized(), "GEP into unsized type!", &GEP);
4496
4497 if (auto *STy = dyn_cast<StructType>(GEP.getSourceElementType())) {
4498 Check(!STy->isScalableTy(),
4499 "getelementptr cannot target structure that contains scalable vector"
4500 "type",
4501 &GEP);
4502 }
4503
4504 SmallVector<Value *, 16> Idxs(GEP.indices());
4505 Check(
4506 all_of(Idxs, [](Value *V) { return V->getType()->isIntOrIntVectorTy(); }),
4507 "GEP indexes must be integers", &GEP);
4508 Type *ElTy =
4509 GetElementPtrInst::getIndexedType(GEP.getSourceElementType(), Idxs);
4510 Check(ElTy, "Invalid indices for GEP pointer type!", &GEP);
4511
4512 PointerType *PtrTy = dyn_cast<PointerType>(GEP.getType()->getScalarType());
4513
4514 Check(PtrTy && GEP.getResultElementType() == ElTy,
4515 "GEP is not of right type for indices!", &GEP, ElTy);
4516
4517 if (auto *GEPVTy = dyn_cast<VectorType>(GEP.getType())) {
4518 // Additional checks for vector GEPs.
4519 ElementCount GEPWidth = GEPVTy->getElementCount();
4520 if (GEP.getPointerOperandType()->isVectorTy())
4521 Check(
4522 GEPWidth ==
4523 cast<VectorType>(GEP.getPointerOperandType())->getElementCount(),
4524 "Vector GEP result width doesn't match operand's", &GEP);
4525 for (Value *Idx : Idxs) {
4526 Type *IndexTy = Idx->getType();
4527 if (auto *IndexVTy = dyn_cast<VectorType>(IndexTy)) {
4528 ElementCount IndexWidth = IndexVTy->getElementCount();
4529 Check(IndexWidth == GEPWidth, "Invalid GEP index vector width", &GEP);
4530 }
4531 Check(IndexTy->isIntOrIntVectorTy(),
4532 "All GEP indices should be of integer type");
4533 }
4534 }
4535
4536 // Check that GEP does not index into a vector with non-byte-addressable
4537 // elements.
4539 GTI != GTE; ++GTI) {
4540 if (GTI.isVector()) {
4541 Type *ElemTy = GTI.getIndexedType();
4542 Check(DL.typeSizeEqualsStoreSize(ElemTy),
4543 "GEP into vector with non-byte-addressable element type", &GEP);
4544 }
4545 }
4546
4547 Check(GEP.getAddressSpace() == PtrTy->getAddressSpace(),
4548 "GEP address space doesn't match type", &GEP);
4549
4550 visitInstruction(GEP);
4551}
4552
4553static bool isContiguous(const ConstantRange &A, const ConstantRange &B) {
4554 return A.getUpper() == B.getLower() || A.getLower() == B.getUpper();
4555}
4556
4557/// Verify !range and !absolute_symbol metadata. These have the same
4558/// restrictions, except !absolute_symbol allows the full set.
4559void Verifier::verifyRangeLikeMetadata(const Value &I, const MDNode *Range,
4560 Type *Ty, RangeLikeMetadataKind Kind) {
4561 unsigned NumOperands = Range->getNumOperands();
4562 Check(NumOperands % 2 == 0, "Unfinished range!", Range);
4563 unsigned NumRanges = NumOperands / 2;
4564 Check(NumRanges >= 1, "It should have at least one range!", Range);
4565
4566 ConstantRange LastRange(1, true); // Dummy initial value
4567 for (unsigned i = 0; i < NumRanges; ++i) {
4568 ConstantInt *Low =
4569 mdconst::dyn_extract<ConstantInt>(Range->getOperand(2 * i));
4570 Check(Low, "The lower limit must be an integer!", Low);
4571 ConstantInt *High =
4572 mdconst::dyn_extract<ConstantInt>(Range->getOperand(2 * i + 1));
4573 Check(High, "The upper limit must be an integer!", High);
4574
4575 Check(High->getType() == Low->getType(), "Range pair types must match!",
4576 &I);
4577
4578 if (Kind == RangeLikeMetadataKind::NoaliasAddrspace) {
4579 Check(High->getType()->isIntegerTy(32),
4580 "noalias.addrspace type must be i32!", &I);
4581 } else {
4582 Check(High->getType() == Ty->getScalarType(),
4583 "Range types must match instruction type!", &I);
4584 }
4585
4586 APInt HighV = High->getValue();
4587 APInt LowV = Low->getValue();
4588
4589 // ConstantRange asserts if the ranges are the same except for the min/max
4590 // value. Leave the cases it tolerates for the empty range error below.
4591 Check(LowV != HighV || LowV.isMaxValue() || LowV.isMinValue(),
4592 "The upper and lower limits cannot be the same value", &I);
4593
4594 ConstantRange CurRange(LowV, HighV);
4595 Check(!CurRange.isEmptySet() &&
4596 (Kind == RangeLikeMetadataKind::AbsoluteSymbol ||
4597 !CurRange.isFullSet()),
4598 "Range must not be empty!", Range);
4599 if (i != 0) {
4600 Check(CurRange.intersectWith(LastRange).isEmptySet(),
4601 "Intervals are overlapping", Range);
4602 Check(LowV.sgt(LastRange.getLower()), "Intervals are not in order",
4603 Range);
4604 Check(!isContiguous(CurRange, LastRange), "Intervals are contiguous",
4605 Range);
4606 }
4607 LastRange = ConstantRange(LowV, HighV);
4608 }
4609 if (NumRanges > 2) {
4610 APInt FirstLow =
4611 mdconst::dyn_extract<ConstantInt>(Range->getOperand(0))->getValue();
4612 APInt FirstHigh =
4613 mdconst::dyn_extract<ConstantInt>(Range->getOperand(1))->getValue();
4614 ConstantRange FirstRange(FirstLow, FirstHigh);
4615 Check(FirstRange.intersectWith(LastRange).isEmptySet(),
4616 "Intervals are overlapping", Range);
4617 Check(!isContiguous(FirstRange, LastRange), "Intervals are contiguous",
4618 Range);
4619 }
4620}
4621
4622void Verifier::visitRangeMetadata(Instruction &I, MDNode *Range, Type *Ty) {
4623 assert(Range && Range == I.getMetadata(LLVMContext::MD_range) &&
4624 "precondition violation");
4625 verifyRangeLikeMetadata(I, Range, Ty, RangeLikeMetadataKind::Range);
4626}
4627
4628void Verifier::visitNoFPClassMetadata(Instruction &I, MDNode *NoFPClass,
4629 Type *Ty) {
4630 Check(AttributeFuncs::isNoFPClassCompatibleType(Ty),
4631 "nofpclass only applies to floating-point typed loads", I);
4632
4633 Check(NoFPClass->getNumOperands() == 1,
4634 "nofpclass must have exactly one entry", NoFPClass);
4635 ConstantInt *MaskVal =
4637 Check(MaskVal && MaskVal->getType()->isIntegerTy(32),
4638 "nofpclass entry must be a constant i32", NoFPClass);
4639 uint32_t Val = MaskVal->getZExtValue();
4640 Check(Val != 0, "'nofpclass' must have at least one test bit set", NoFPClass,
4641 I);
4642
4643 Check((Val & ~static_cast<unsigned>(fcAllFlags)) == 0,
4644 "Invalid value for 'nofpclass' test mask", NoFPClass, I);
4645}
4646
4647void Verifier::visitNoaliasAddrspaceMetadata(Instruction &I, MDNode *Range,
4648 Type *Ty) {
4649 assert(Range && Range == I.getMetadata(LLVMContext::MD_noalias_addrspace) &&
4650 "precondition violation");
4651 verifyRangeLikeMetadata(I, Range, Ty,
4652 RangeLikeMetadataKind::NoaliasAddrspace);
4653}
4654
4655void Verifier::checkAtomicMemAccessSize(Type *Ty, const Instruction *I) {
4656 unsigned Size = DL.getTypeSizeInBits(Ty).getFixedValue();
4657 Check(Size >= 8, "atomic memory access' size must be byte-sized", Ty, I);
4658 Check(!(Size & (Size - 1)),
4659 "atomic memory access' operand must have a power-of-two size", Ty, I);
4660}
4661
4662void Verifier::visitLoadInst(LoadInst &LI) {
4664 Check(PTy, "Load operand must be a pointer.", &LI);
4665 Type *ElTy = LI.getType();
4666 if (MaybeAlign A = LI.getAlign()) {
4667 Check(A->value() <= Value::MaximumAlignment,
4668 "huge alignment values are unsupported", &LI);
4669 }
4670 Check(ElTy->isSized(), "loading unsized types is not allowed", &LI);
4671 if (LI.isAtomic()) {
4672 Check(LI.getOrdering() != AtomicOrdering::Release &&
4673 LI.getOrdering() != AtomicOrdering::AcquireRelease,
4674 "Load cannot have Release ordering", &LI);
4675 Check(ElTy->getScalarType()->isIntOrPtrTy() ||
4676 ElTy->getScalarType()->isByteTy() ||
4678 "atomic load operand must have integer, byte, pointer, floating "
4679 "point, or vector type!",
4680 ElTy, &LI);
4681
4682 checkAtomicMemAccessSize(ElTy, &LI);
4683 } else {
4685 "Non-atomic load cannot have SynchronizationScope specified", &LI);
4686 }
4687
4688 visitInstruction(LI);
4689}
4690
4691void Verifier::visitStoreInst(StoreInst &SI) {
4692 PointerType *PTy = dyn_cast<PointerType>(SI.getOperand(1)->getType());
4693 Check(PTy, "Store operand must be a pointer.", &SI);
4694 Type *ElTy = SI.getOperand(0)->getType();
4695 if (MaybeAlign A = SI.getAlign()) {
4696 Check(A->value() <= Value::MaximumAlignment,
4697 "huge alignment values are unsupported", &SI);
4698 }
4699 Check(ElTy->isSized(), "storing unsized types is not allowed", &SI);
4700 if (SI.isAtomic()) {
4701 Check(SI.getOrdering() != AtomicOrdering::Acquire &&
4702 SI.getOrdering() != AtomicOrdering::AcquireRelease,
4703 "Store cannot have Acquire ordering", &SI);
4704 Check(ElTy->getScalarType()->isIntOrPtrTy() ||
4705 ElTy->getScalarType()->isByteTy() ||
4707 "atomic store operand must have integer, byte, pointer, floating "
4708 "point, or vector type!",
4709 ElTy, &SI);
4710 checkAtomicMemAccessSize(ElTy, &SI);
4711 } else {
4712 Check(SI.getSyncScopeID() == SyncScope::System,
4713 "Non-atomic store cannot have SynchronizationScope specified", &SI);
4714 }
4715 visitInstruction(SI);
4716}
4717
4718/// Check that SwiftErrorVal is used as a swifterror argument in CS.
4719void Verifier::verifySwiftErrorCall(CallBase &Call,
4720 const Value *SwiftErrorVal) {
4721 for (const auto &I : llvm::enumerate(Call.args())) {
4722 if (I.value() == SwiftErrorVal) {
4723 Check(Call.paramHasAttr(I.index(), Attribute::SwiftError),
4724 "swifterror value when used in a callsite should be marked "
4725 "with swifterror attribute",
4726 SwiftErrorVal, Call);
4727 }
4728 }
4729}
4730
4731void Verifier::verifySwiftErrorValue(const Value *SwiftErrorVal) {
4732 // Check that swifterror value is only used by loads, stores, or as
4733 // a swifterror argument.
4734 for (const User *U : SwiftErrorVal->users()) {
4736 isa<InvokeInst>(U),
4737 "swifterror value can only be loaded and stored from, or "
4738 "as a swifterror argument!",
4739 SwiftErrorVal, U);
4740 // If it is used by a store, check it is the second operand.
4741 if (auto StoreI = dyn_cast<StoreInst>(U))
4742 Check(StoreI->getOperand(1) == SwiftErrorVal,
4743 "swifterror value should be the second operand when used "
4744 "by stores",
4745 SwiftErrorVal, U);
4746 if (auto *Call = dyn_cast<CallBase>(U))
4747 verifySwiftErrorCall(*const_cast<CallBase *>(Call), SwiftErrorVal);
4748 }
4749}
4750
4751void Verifier::visitAllocaInst(AllocaInst &AI) {
4752 Type *Ty = AI.getAllocatedType();
4753 SmallPtrSet<Type*, 4> Visited;
4754 Check(Ty->isSized(&Visited), "Cannot allocate unsized type", &AI);
4755 // Check if it's a target extension type that disallows being used on the
4756 // stack.
4758 "Alloca has illegal target extension type", &AI);
4760 "Alloca array size must have integer type", &AI);
4761 if (MaybeAlign A = AI.getAlign()) {
4762 Check(A->value() <= Value::MaximumAlignment,
4763 "huge alignment values are unsupported", &AI);
4764 }
4765
4766 if (AI.isSwiftError()) {
4767 Check(Ty->isPointerTy(), "swifterror alloca must have pointer type", &AI);
4769 "swifterror alloca must not be array allocation", &AI);
4770 verifySwiftErrorValue(&AI);
4771 }
4772
4773 if (TT.isAMDGPU()) {
4775 "alloca on amdgpu must be in addrspace(5)", &AI);
4776 }
4777
4778 visitInstruction(AI);
4779}
4780
4781void Verifier::visitAtomicCmpXchgInst(AtomicCmpXchgInst &CXI) {
4782 Type *ElTy = CXI.getOperand(1)->getType();
4783 Check(ElTy->isIntOrPtrTy(),
4784 "cmpxchg operand must have integer or pointer type", ElTy, &CXI);
4785 checkAtomicMemAccessSize(ElTy, &CXI);
4786 visitInstruction(CXI);
4787}
4788
4789void Verifier::visitAtomicRMWInst(AtomicRMWInst &RMWI) {
4790 Check(RMWI.getOrdering() != AtomicOrdering::Unordered,
4791 "atomicrmw instructions cannot be unordered.", &RMWI);
4792 auto Op = RMWI.getOperation();
4793 Type *ElTy = RMWI.getOperand(1)->getType();
4794 if (Op == AtomicRMWInst::Xchg) {
4795 Check(ElTy->isIntegerTy() || ElTy->isFloatingPointTy() ||
4796 ElTy->isPointerTy(),
4797 "atomicrmw " + AtomicRMWInst::getOperationName(Op) +
4798 " operand must have integer or floating point type!",
4799 &RMWI, ElTy);
4800 } else if (AtomicRMWInst::isFPOperation(Op)) {
4802 "atomicrmw " + AtomicRMWInst::getOperationName(Op) +
4803 " operand must have floating-point or fixed vector of floating-point "
4804 "type!",
4805 &RMWI, ElTy);
4806 } else {
4807 Check(ElTy->isIntegerTy(),
4808 "atomicrmw " + AtomicRMWInst::getOperationName(Op) +
4809 " operand must have integer type!",
4810 &RMWI, ElTy);
4811 }
4812 checkAtomicMemAccessSize(ElTy, &RMWI);
4814 "Invalid binary operation!", &RMWI);
4815 visitInstruction(RMWI);
4816}
4817
4818void Verifier::visitFenceInst(FenceInst &FI) {
4819 const AtomicOrdering Ordering = FI.getOrdering();
4820 Check(Ordering == AtomicOrdering::Acquire ||
4821 Ordering == AtomicOrdering::Release ||
4822 Ordering == AtomicOrdering::AcquireRelease ||
4823 Ordering == AtomicOrdering::SequentiallyConsistent,
4824 "fence instructions may only have acquire, release, acq_rel, or "
4825 "seq_cst ordering.",
4826 &FI);
4827 visitInstruction(FI);
4828}
4829
4830void Verifier::visitExtractValueInst(ExtractValueInst &EVI) {
4832 EVI.getIndices()) == EVI.getType(),
4833 "Invalid ExtractValueInst operands!", &EVI);
4834
4835 visitInstruction(EVI);
4836}
4837
4838void Verifier::visitInsertValueInst(InsertValueInst &IVI) {
4840 IVI.getIndices()) ==
4841 IVI.getOperand(1)->getType(),
4842 "Invalid InsertValueInst operands!", &IVI);
4843
4844 visitInstruction(IVI);
4845}
4846
4847static Value *getParentPad(Value *EHPad) {
4848 if (auto *FPI = dyn_cast<FuncletPadInst>(EHPad))
4849 return FPI->getParentPad();
4850
4851 return cast<CatchSwitchInst>(EHPad)->getParentPad();
4852}
4853
4854void Verifier::visitEHPadPredecessors(Instruction &I) {
4855 assert(I.isEHPad());
4856
4857 BasicBlock *BB = I.getParent();
4858 Function *F = BB->getParent();
4859
4860 Check(BB != &F->getEntryBlock(), "EH pad cannot be in entry block.", &I);
4861
4862 if (auto *LPI = dyn_cast<LandingPadInst>(&I)) {
4863 // The landingpad instruction defines its parent as a landing pad block. The
4864 // landing pad block may be branched to only by the unwind edge of an
4865 // invoke.
4866 for (BasicBlock *PredBB : predecessors(BB)) {
4867 const auto *II = dyn_cast<InvokeInst>(PredBB->getTerminator());
4868 Check(II && II->getUnwindDest() == BB && II->getNormalDest() != BB,
4869 "Block containing LandingPadInst must be jumped to "
4870 "only by the unwind edge of an invoke.",
4871 LPI);
4872 }
4873 return;
4874 }
4875 if (auto *CPI = dyn_cast<CatchPadInst>(&I)) {
4876 if (!pred_empty(BB))
4877 Check(BB->getUniquePredecessor() == CPI->getCatchSwitch()->getParent(),
4878 "Block containg CatchPadInst must be jumped to "
4879 "only by its catchswitch.",
4880 CPI);
4881 Check(BB != CPI->getCatchSwitch()->getUnwindDest(),
4882 "Catchswitch cannot unwind to one of its catchpads",
4883 CPI->getCatchSwitch(), CPI);
4884 return;
4885 }
4886
4887 // Verify that each pred has a legal terminator with a legal to/from EH
4888 // pad relationship.
4889 Instruction *ToPad = &I;
4890 Value *ToPadParent = getParentPad(ToPad);
4891 for (BasicBlock *PredBB : predecessors(BB)) {
4892 Instruction *TI = PredBB->getTerminator();
4893 Value *FromPad;
4894 if (auto *II = dyn_cast<InvokeInst>(TI)) {
4895 Check(II->getUnwindDest() == BB && II->getNormalDest() != BB,
4896 "EH pad must be jumped to via an unwind edge", ToPad, II);
4897 auto *CalledFn =
4898 dyn_cast<Function>(II->getCalledOperand()->stripPointerCasts());
4899 if (CalledFn && CalledFn->isIntrinsic() && II->doesNotThrow() &&
4900 !IntrinsicInst::mayLowerToFunctionCall(CalledFn->getIntrinsicID()))
4901 continue;
4902 if (auto Bundle = II->getOperandBundle(LLVMContext::OB_funclet))
4903 FromPad = Bundle->Inputs[0];
4904 else
4905 FromPad = ConstantTokenNone::get(II->getContext());
4906 } else if (auto *CRI = dyn_cast<CleanupReturnInst>(TI)) {
4907 FromPad = CRI->getOperand(0);
4908 Check(FromPad != ToPadParent, "A cleanupret must exit its cleanup", CRI);
4909 } else if (auto *CSI = dyn_cast<CatchSwitchInst>(TI)) {
4910 FromPad = CSI;
4911 } else {
4912 Check(false, "EH pad must be jumped to via an unwind edge", ToPad, TI);
4913 }
4914
4915 // The edge may exit from zero or more nested pads.
4916 SmallPtrSet<Value *, 8> Seen;
4917 for (;; FromPad = getParentPad(FromPad)) {
4918 Check(FromPad != ToPad,
4919 "EH pad cannot handle exceptions raised within it", FromPad, TI);
4920 if (FromPad == ToPadParent) {
4921 // This is a legal unwind edge.
4922 break;
4923 }
4924 Check(!isa<ConstantTokenNone>(FromPad),
4925 "A single unwind edge may only enter one EH pad", TI);
4926 Check(Seen.insert(FromPad).second, "EH pad jumps through a cycle of pads",
4927 FromPad);
4928
4929 // This will be diagnosed on the corresponding instruction already. We
4930 // need the extra check here to make sure getParentPad() works.
4931 Check(isa<FuncletPadInst>(FromPad) || isa<CatchSwitchInst>(FromPad),
4932 "Parent pad must be catchpad/cleanuppad/catchswitch", TI);
4933 }
4934 }
4935}
4936
4937void Verifier::visitLandingPadInst(LandingPadInst &LPI) {
4938 // The landingpad instruction is ill-formed if it doesn't have any clauses and
4939 // isn't a cleanup.
4940 Check(LPI.getNumClauses() > 0 || LPI.isCleanup(),
4941 "LandingPadInst needs at least one clause or to be a cleanup.", &LPI);
4942
4943 visitEHPadPredecessors(LPI);
4944
4945 if (!LandingPadResultTy)
4946 LandingPadResultTy = LPI.getType();
4947 else
4948 Check(LandingPadResultTy == LPI.getType(),
4949 "The landingpad instruction should have a consistent result type "
4950 "inside a function.",
4951 &LPI);
4952
4953 Function *F = LPI.getParent()->getParent();
4954 Check(F->hasPersonalityFn(),
4955 "LandingPadInst needs to be in a function with a personality.", &LPI);
4956
4957 // The landingpad instruction must be the first non-PHI instruction in the
4958 // block.
4959 Check(LPI.getParent()->getLandingPadInst() == &LPI,
4960 "LandingPadInst not the first non-PHI instruction in the block.", &LPI);
4961
4962 for (unsigned i = 0, e = LPI.getNumClauses(); i < e; ++i) {
4963 Constant *Clause = LPI.getClause(i);
4964 if (LPI.isCatch(i)) {
4965 Check(isa<PointerType>(Clause->getType()),
4966 "Catch operand does not have pointer type!", &LPI);
4967 } else {
4968 Check(LPI.isFilter(i), "Clause is neither catch nor filter!", &LPI);
4970 "Filter operand is not an array of constants!", &LPI);
4971 }
4972 }
4973
4974 visitInstruction(LPI);
4975}
4976
4977void Verifier::visitResumeInst(ResumeInst &RI) {
4979 "ResumeInst needs to be in a function with a personality.", &RI);
4980
4981 if (!LandingPadResultTy)
4982 LandingPadResultTy = RI.getValue()->getType();
4983 else
4984 Check(LandingPadResultTy == RI.getValue()->getType(),
4985 "The resume instruction should have a consistent result type "
4986 "inside a function.",
4987 &RI);
4988
4989 visitTerminator(RI);
4990}
4991
4992void Verifier::visitCatchPadInst(CatchPadInst &CPI) {
4993 BasicBlock *BB = CPI.getParent();
4994
4995 Function *F = BB->getParent();
4996 Check(F->hasPersonalityFn(),
4997 "CatchPadInst needs to be in a function with a personality.", &CPI);
4998
5000 "CatchPadInst needs to be directly nested in a CatchSwitchInst.",
5001 CPI.getParentPad());
5002
5003 // The catchpad instruction must be the first non-PHI instruction in the
5004 // block.
5005 Check(&*BB->getFirstNonPHIIt() == &CPI,
5006 "CatchPadInst not the first non-PHI instruction in the block.", &CPI);
5007
5009 [](Use &U) {
5010 auto *V = U.get();
5011 return isa<Constant>(V) || isa<AllocaInst>(V);
5012 }),
5013 "Argument operand must be alloca or constant.", &CPI);
5014
5015 visitEHPadPredecessors(CPI);
5016 visitFuncletPadInst(CPI);
5017}
5018
5019void Verifier::visitCatchReturnInst(CatchReturnInst &CatchReturn) {
5020 Check(isa<CatchPadInst>(CatchReturn.getOperand(0)),
5021 "CatchReturnInst needs to be provided a CatchPad", &CatchReturn,
5022 CatchReturn.getOperand(0));
5023
5024 visitTerminator(CatchReturn);
5025}
5026
5027void Verifier::visitCleanupPadInst(CleanupPadInst &CPI) {
5028 BasicBlock *BB = CPI.getParent();
5029
5030 Function *F = BB->getParent();
5031 Check(F->hasPersonalityFn(),
5032 "CleanupPadInst needs to be in a function with a personality.", &CPI);
5033
5034 // The cleanuppad instruction must be the first non-PHI instruction in the
5035 // block.
5036 Check(&*BB->getFirstNonPHIIt() == &CPI,
5037 "CleanupPadInst not the first non-PHI instruction in the block.", &CPI);
5038
5039 auto *ParentPad = CPI.getParentPad();
5040 Check(isa<ConstantTokenNone>(ParentPad) || isa<FuncletPadInst>(ParentPad),
5041 "CleanupPadInst has an invalid parent.", &CPI);
5042
5043 visitEHPadPredecessors(CPI);
5044 visitFuncletPadInst(CPI);
5045}
5046
5047void Verifier::visitFuncletPadInst(FuncletPadInst &FPI) {
5048 User *FirstUser = nullptr;
5049 Value *FirstUnwindPad = nullptr;
5050 SmallVector<FuncletPadInst *, 8> Worklist({&FPI});
5051 SmallPtrSet<FuncletPadInst *, 8> Seen;
5052
5053 while (!Worklist.empty()) {
5054 FuncletPadInst *CurrentPad = Worklist.pop_back_val();
5055 Check(Seen.insert(CurrentPad).second,
5056 "FuncletPadInst must not be nested within itself", CurrentPad);
5057 Value *UnresolvedAncestorPad = nullptr;
5058 for (User *U : CurrentPad->users()) {
5059 BasicBlock *UnwindDest;
5060 if (auto *CRI = dyn_cast<CleanupReturnInst>(U)) {
5061 UnwindDest = CRI->getUnwindDest();
5062 } else if (auto *CSI = dyn_cast<CatchSwitchInst>(U)) {
5063 // We allow catchswitch unwind to caller to nest
5064 // within an outer pad that unwinds somewhere else,
5065 // because catchswitch doesn't have a nounwind variant.
5066 // See e.g. SimplifyCFGOpt::SimplifyUnreachable.
5067 if (CSI->unwindsToCaller())
5068 continue;
5069 UnwindDest = CSI->getUnwindDest();
5070 } else if (auto *II = dyn_cast<InvokeInst>(U)) {
5071 UnwindDest = II->getUnwindDest();
5072 } else if (isa<CallInst>(U)) {
5073 // Calls which don't unwind may be found inside funclet
5074 // pads that unwind somewhere else. We don't *require*
5075 // such calls to be annotated nounwind.
5076 continue;
5077 } else if (auto *CPI = dyn_cast<CleanupPadInst>(U)) {
5078 // The unwind dest for a cleanup can only be found by
5079 // recursive search. Add it to the worklist, and we'll
5080 // search for its first use that determines where it unwinds.
5081 Worklist.push_back(CPI);
5082 continue;
5083 } else {
5084 Check(isa<CatchReturnInst>(U), "Bogus funclet pad use", U);
5085 continue;
5086 }
5087
5088 Value *UnwindPad;
5089 bool ExitsFPI;
5090 if (UnwindDest) {
5091 UnwindPad = &*UnwindDest->getFirstNonPHIIt();
5092 if (!cast<Instruction>(UnwindPad)->isEHPad())
5093 continue;
5094 Value *UnwindParent = getParentPad(UnwindPad);
5095 // Ignore unwind edges that don't exit CurrentPad.
5096 if (UnwindParent == CurrentPad)
5097 continue;
5098 // Determine whether the original funclet pad is exited,
5099 // and if we are scanning nested pads determine how many
5100 // of them are exited so we can stop searching their
5101 // children.
5102 Value *ExitedPad = CurrentPad;
5103 ExitsFPI = false;
5104 do {
5105 if (ExitedPad == &FPI) {
5106 ExitsFPI = true;
5107 // Now we can resolve any ancestors of CurrentPad up to
5108 // FPI, but not including FPI since we need to make sure
5109 // to check all direct users of FPI for consistency.
5110 UnresolvedAncestorPad = &FPI;
5111 break;
5112 }
5113 Value *ExitedParent = getParentPad(ExitedPad);
5114 if (ExitedParent == UnwindParent) {
5115 // ExitedPad is the ancestor-most pad which this unwind
5116 // edge exits, so we can resolve up to it, meaning that
5117 // ExitedParent is the first ancestor still unresolved.
5118 UnresolvedAncestorPad = ExitedParent;
5119 break;
5120 }
5121 ExitedPad = ExitedParent;
5122 } while (!isa<ConstantTokenNone>(ExitedPad));
5123 } else {
5124 // Unwinding to caller exits all pads.
5125 UnwindPad = ConstantTokenNone::get(FPI.getContext());
5126 ExitsFPI = true;
5127 UnresolvedAncestorPad = &FPI;
5128 }
5129
5130 if (ExitsFPI) {
5131 // This unwind edge exits FPI. Make sure it agrees with other
5132 // such edges.
5133 if (FirstUser) {
5134 Check(UnwindPad == FirstUnwindPad,
5135 "Unwind edges out of a funclet "
5136 "pad must have the same unwind "
5137 "dest",
5138 &FPI, U, FirstUser);
5139 } else {
5140 FirstUser = U;
5141 FirstUnwindPad = UnwindPad;
5142 // Record cleanup sibling unwinds for verifySiblingFuncletUnwinds
5143 if (isa<CleanupPadInst>(&FPI) && !isa<ConstantTokenNone>(UnwindPad) &&
5144 getParentPad(UnwindPad) == getParentPad(&FPI))
5145 SiblingFuncletInfo[&FPI] = cast<Instruction>(U);
5146 }
5147 }
5148 // Make sure we visit all uses of FPI, but for nested pads stop as
5149 // soon as we know where they unwind to.
5150 if (CurrentPad != &FPI)
5151 break;
5152 }
5153 if (UnresolvedAncestorPad) {
5154 if (CurrentPad == UnresolvedAncestorPad) {
5155 // When CurrentPad is FPI itself, we don't mark it as resolved even if
5156 // we've found an unwind edge that exits it, because we need to verify
5157 // all direct uses of FPI.
5158 assert(CurrentPad == &FPI);
5159 continue;
5160 }
5161 // Pop off the worklist any nested pads that we've found an unwind
5162 // destination for. The pads on the worklist are the uncles,
5163 // great-uncles, etc. of CurrentPad. We've found an unwind destination
5164 // for all ancestors of CurrentPad up to but not including
5165 // UnresolvedAncestorPad.
5166 Value *ResolvedPad = CurrentPad;
5167 while (!Worklist.empty()) {
5168 Value *UnclePad = Worklist.back();
5169 Value *AncestorPad = getParentPad(UnclePad);
5170 // Walk ResolvedPad up the ancestor list until we either find the
5171 // uncle's parent or the last resolved ancestor.
5172 while (ResolvedPad != AncestorPad) {
5173 Value *ResolvedParent = getParentPad(ResolvedPad);
5174 if (ResolvedParent == UnresolvedAncestorPad) {
5175 break;
5176 }
5177 ResolvedPad = ResolvedParent;
5178 }
5179 // If the resolved ancestor search didn't find the uncle's parent,
5180 // then the uncle is not yet resolved.
5181 if (ResolvedPad != AncestorPad)
5182 break;
5183 // This uncle is resolved, so pop it from the worklist.
5184 Worklist.pop_back();
5185 }
5186 }
5187 }
5188
5189 if (FirstUnwindPad) {
5190 if (auto *CatchSwitch = dyn_cast<CatchSwitchInst>(FPI.getParentPad())) {
5191 BasicBlock *SwitchUnwindDest = CatchSwitch->getUnwindDest();
5192 Value *SwitchUnwindPad;
5193 if (SwitchUnwindDest)
5194 SwitchUnwindPad = &*SwitchUnwindDest->getFirstNonPHIIt();
5195 else
5196 SwitchUnwindPad = ConstantTokenNone::get(FPI.getContext());
5197 Check(SwitchUnwindPad == FirstUnwindPad,
5198 "Unwind edges out of a catch must have the same unwind dest as "
5199 "the parent catchswitch",
5200 &FPI, FirstUser, CatchSwitch);
5201 }
5202 }
5203
5204 visitInstruction(FPI);
5205}
5206
5207void Verifier::visitCatchSwitchInst(CatchSwitchInst &CatchSwitch) {
5208 BasicBlock *BB = CatchSwitch.getParent();
5209
5210 Function *F = BB->getParent();
5211 Check(F->hasPersonalityFn(),
5212 "CatchSwitchInst needs to be in a function with a personality.",
5213 &CatchSwitch);
5214
5215 // The catchswitch instruction must be the first non-PHI instruction in the
5216 // block.
5217 Check(&*BB->getFirstNonPHIIt() == &CatchSwitch,
5218 "CatchSwitchInst not the first non-PHI instruction in the block.",
5219 &CatchSwitch);
5220
5221 auto *ParentPad = CatchSwitch.getParentPad();
5222 Check(isa<ConstantTokenNone>(ParentPad) || isa<FuncletPadInst>(ParentPad),
5223 "CatchSwitchInst has an invalid parent.", ParentPad);
5224
5225 if (BasicBlock *UnwindDest = CatchSwitch.getUnwindDest()) {
5226 BasicBlock::iterator I = UnwindDest->getFirstNonPHIIt();
5227 Check(I->isEHPad() && !isa<LandingPadInst>(I),
5228 "CatchSwitchInst must unwind to an EH block which is not a "
5229 "landingpad.",
5230 &CatchSwitch);
5231
5232 // Record catchswitch sibling unwinds for verifySiblingFuncletUnwinds
5233 if (getParentPad(&*I) == ParentPad)
5234 SiblingFuncletInfo[&CatchSwitch] = &CatchSwitch;
5235 }
5236
5237 Check(CatchSwitch.getNumHandlers() != 0,
5238 "CatchSwitchInst cannot have empty handler list", &CatchSwitch);
5239
5240 for (BasicBlock *Handler : CatchSwitch.handlers()) {
5241 Check(isa<CatchPadInst>(Handler->getFirstNonPHIIt()),
5242 "CatchSwitchInst handlers must be catchpads", &CatchSwitch, Handler);
5243 }
5244
5245 visitEHPadPredecessors(CatchSwitch);
5246 visitTerminator(CatchSwitch);
5247}
5248
5249void Verifier::visitCleanupReturnInst(CleanupReturnInst &CRI) {
5251 "CleanupReturnInst needs to be provided a CleanupPad", &CRI,
5252 CRI.getOperand(0));
5253
5254 if (BasicBlock *UnwindDest = CRI.getUnwindDest()) {
5255 BasicBlock::iterator I = UnwindDest->getFirstNonPHIIt();
5256 Check(I->isEHPad() && !isa<LandingPadInst>(I),
5257 "CleanupReturnInst must unwind to an EH block which is not a "
5258 "landingpad.",
5259 &CRI);
5260 }
5261
5262 visitTerminator(CRI);
5263}
5264
5265void Verifier::verifyDominatesUse(Instruction &I, unsigned i) {
5266 Instruction *Op = cast<Instruction>(I.getOperand(i));
5267 // If the we have an invalid invoke, don't try to compute the dominance.
5268 // We already reject it in the invoke specific checks and the dominance
5269 // computation doesn't handle multiple edges.
5270 if (InvokeInst *II = dyn_cast<InvokeInst>(Op)) {
5271 if (II->getNormalDest() == II->getUnwindDest())
5272 return;
5273 }
5274
5275 // Quick check whether the def has already been encountered in the same block.
5276 // PHI nodes are not checked to prevent accepting preceding PHIs, because PHI
5277 // uses are defined to happen on the incoming edge, not at the instruction.
5278 //
5279 // FIXME: If this operand is a MetadataAsValue (wrapping a LocalAsMetadata)
5280 // wrapping an SSA value, assert that we've already encountered it. See
5281 // related FIXME in Mapper::mapLocalAsMetadata in ValueMapper.cpp.
5282 if (!isa<PHINode>(I) && InstsInThisBlock.count(Op))
5283 return;
5284
5285 const Use &U = I.getOperandUse(i);
5286 Check(DT.dominates(Op, U), "Instruction does not dominate all uses!", Op, &I);
5287}
5288
5289void Verifier::visitDereferenceableMetadata(Instruction& I, MDNode* MD) {
5290 Check(I.getType()->isPointerTy(),
5291 "dereferenceable, dereferenceable_or_null "
5292 "apply only to pointer types",
5293 &I);
5295 "dereferenceable, dereferenceable_or_null apply only to load"
5296 " and inttoptr instructions, use attributes for calls or invokes",
5297 &I);
5298 Check(MD->getNumOperands() == 1,
5299 "dereferenceable, dereferenceable_or_null "
5300 "take one operand!",
5301 &I);
5302 ConstantInt *CI = mdconst::dyn_extract<ConstantInt>(MD->getOperand(0));
5303 Check(CI && CI->getType()->isIntegerTy(64),
5304 "dereferenceable, "
5305 "dereferenceable_or_null metadata value must be an i64!",
5306 &I);
5307}
5308
5309void Verifier::visitNofreeMetadata(Instruction &I, MDNode *MD) {
5310 Check(I.getType()->isPointerTy(), "nofree applies only to pointer types", &I);
5311 Check((isa<IntToPtrInst>(I)), "nofree applies only to inttoptr instruction",
5312 &I);
5313 Check(MD->getNumOperands() == 0, "nofree metadata must be empty", &I);
5314}
5315
5316void Verifier::visitProfMetadata(Instruction &I, MDNode *MD) {
5317 auto GetBranchingTerminatorNumOperands = [&]() {
5318 unsigned ExpectedNumOperands = 0;
5319 if (CondBrInst *BI = dyn_cast<CondBrInst>(&I))
5320 ExpectedNumOperands = BI->getNumSuccessors();
5321 else if (SwitchInst *SI = dyn_cast<SwitchInst>(&I))
5322 ExpectedNumOperands = SI->getNumSuccessors();
5323 else if (isa<CallInst>(&I))
5324 ExpectedNumOperands = 1;
5325 else if (IndirectBrInst *IBI = dyn_cast<IndirectBrInst>(&I))
5326 ExpectedNumOperands = IBI->getNumDestinations();
5327 else if (isa<SelectInst>(&I))
5328 ExpectedNumOperands = 2;
5329 else if (CallBrInst *CI = dyn_cast<CallBrInst>(&I))
5330 ExpectedNumOperands = CI->getNumSuccessors();
5331 return ExpectedNumOperands;
5332 };
5333 Check(MD->getNumOperands() >= 1,
5334 "!prof annotations should have at least 1 operand", MD);
5335 // Check first operand.
5336 Check(MD->getOperand(0) != nullptr, "first operand should not be null", MD);
5338 "expected string with name of the !prof annotation", MD);
5339 MDString *MDS = cast<MDString>(MD->getOperand(0));
5340 StringRef ProfName = MDS->getString();
5341
5343 Check(GetBranchingTerminatorNumOperands() != 0 || isa<InvokeInst>(I),
5344 "'unknown' !prof should only appear on instructions on which "
5345 "'branch_weights' would",
5346 MD);
5347 verifyUnknownProfileMetadata(MD);
5348 return;
5349 }
5350
5351 Check(MD->getNumOperands() >= 2,
5352 "!prof annotations should have no less than 2 operands", MD);
5353
5354 // Check consistency of !prof branch_weights metadata.
5355 if (ProfName == MDProfLabels::BranchWeights) {
5356 unsigned NumBranchWeights = getNumBranchWeights(*MD);
5357 if (isa<InvokeInst>(&I)) {
5358 Check(NumBranchWeights == 1 || NumBranchWeights == 2,
5359 "Wrong number of InvokeInst branch_weights operands", MD);
5360 } else {
5361 const unsigned ExpectedNumOperands = GetBranchingTerminatorNumOperands();
5362 if (ExpectedNumOperands == 0)
5363 CheckFailed("!prof branch_weights are not allowed for this instruction",
5364 MD);
5365
5366 Check(NumBranchWeights == ExpectedNumOperands, "Wrong number of operands",
5367 MD);
5368 }
5369 for (unsigned i = getBranchWeightOffset(MD); i < MD->getNumOperands();
5370 ++i) {
5371 auto &MDO = MD->getOperand(i);
5372 Check(MDO, "second operand should not be null", MD);
5374 "!prof brunch_weights operand is not a const int");
5375 }
5376 } else if (ProfName == MDProfLabels::ValueProfile) {
5377 Check(isValueProfileMD(MD), "invalid value profiling metadata", MD);
5378 ConstantInt *KindInt = mdconst::dyn_extract<ConstantInt>(MD->getOperand(1));
5379 Check(KindInt, "VP !prof missing kind argument", MD);
5380
5381 auto Kind = KindInt->getZExtValue();
5382 Check(Kind >= InstrProfValueKind::IPVK_First &&
5383 Kind <= InstrProfValueKind::IPVK_Last,
5384 "Invalid VP !prof kind", MD);
5385 Check(MD->getNumOperands() % 2 == 1,
5386 "VP !prof should have an even number "
5387 "of arguments after 'VP'",
5388 MD);
5389 if (Kind == InstrProfValueKind::IPVK_IndirectCallTarget ||
5390 Kind == InstrProfValueKind::IPVK_MemOPSize)
5392 "VP !prof indirect call or memop size expected to be applied to "
5393 "CallBase instructions only",
5394 MD);
5395 } else {
5396 CheckFailed("expected either branch_weights or VP profile name", MD);
5397 }
5398}
5399
5400void Verifier::visitDIAssignIDMetadata(Instruction &I, MDNode *MD) {
5401 assert(I.hasMetadata(LLVMContext::MD_DIAssignID));
5402 // DIAssignID metadata must be attached to either an alloca or some form of
5403 // store/memory-writing instruction.
5404 // FIXME: We allow all intrinsic insts here to avoid trying to enumerate all
5405 // possible store intrinsics.
5406 bool ExpectedInstTy =
5408 CheckDI(ExpectedInstTy, "!DIAssignID attached to unexpected instruction kind",
5409 I, MD);
5410 // Iterate over the MetadataAsValue uses of the DIAssignID - these should
5411 // only be found as DbgAssignIntrinsic operands.
5412 if (auto *AsValue = MetadataAsValue::getIfExists(Context, MD)) {
5413 for (auto *User : AsValue->users()) {
5415 "!DIAssignID should only be used by llvm.dbg.assign intrinsics",
5416 MD, User);
5417 // All of the dbg.assign intrinsics should be in the same function as I.
5418 if (auto *DAI = dyn_cast<DbgAssignIntrinsic>(User))
5419 CheckDI(DAI->getFunction() == I.getFunction(),
5420 "dbg.assign not in same function as inst", DAI, &I);
5421 }
5422 }
5423 for (DbgVariableRecord *DVR :
5424 cast<DIAssignID>(MD)->getAllDbgVariableRecordUsers()) {
5425 CheckDI(DVR->isDbgAssign(),
5426 "!DIAssignID should only be used by Assign DVRs.", MD, DVR);
5427 CheckDI(DVR->getFunction() == I.getFunction(),
5428 "DVRAssign not in same function as inst", DVR, &I);
5429 }
5430}
5431
5432void Verifier::visitMMRAMetadata(Instruction &I, MDNode *MD) {
5434 "!mmra metadata attached to unexpected instruction kind", I, MD);
5435
5436 // MMRA Metadata should either be a tag, e.g. !{!"foo", !"bar"}, or a
5437 // list of tags such as !2 in the following example:
5438 // !0 = !{!"a", !"b"}
5439 // !1 = !{!"c", !"d"}
5440 // !2 = !{!0, !1}
5441 if (MMRAMetadata::isTagMD(MD))
5442 return;
5443
5444 Check(isa<MDTuple>(MD), "!mmra expected to be a metadata tuple", I, MD);
5445 for (const MDOperand &MDOp : MD->operands())
5446 Check(MMRAMetadata::isTagMD(MDOp.get()),
5447 "!mmra metadata tuple operand is not an MMRA tag", I, MDOp.get());
5448}
5449
5450void Verifier::visitCallStackMetadata(MDNode *MD) {
5451 // Call stack metadata should consist of a list of at least 1 constant int
5452 // (representing a hash of the location).
5453 Check(MD->getNumOperands() >= 1,
5454 "call stack metadata should have at least 1 operand", MD);
5455
5456 for (const auto &Op : MD->operands())
5458 "call stack metadata operand should be constant integer", Op);
5459}
5460
5461void Verifier::visitMemProfMetadata(Instruction &I, MDNode *MD) {
5462 Check(isa<CallBase>(I), "!memprof metadata should only exist on calls", &I);
5463 Check(MD->getNumOperands() >= 1,
5464 "!memprof annotations should have at least 1 metadata operand "
5465 "(MemInfoBlock)",
5466 MD);
5467
5468 // Check each MIB
5469 for (auto &MIBOp : MD->operands()) {
5470 MDNode *MIB = dyn_cast<MDNode>(MIBOp);
5471 // The first operand of an MIB should be the call stack metadata.
5472 // There rest of the operands should be MDString tags, and there should be
5473 // at least one.
5474 Check(MIB->getNumOperands() >= 2,
5475 "Each !memprof MemInfoBlock should have at least 2 operands", MIB);
5476
5477 // Check call stack metadata (first operand).
5478 Check(MIB->getOperand(0) != nullptr,
5479 "!memprof MemInfoBlock first operand should not be null", MIB);
5480 Check(isa<MDNode>(MIB->getOperand(0)),
5481 "!memprof MemInfoBlock first operand should be an MDNode", MIB);
5482 MDNode *StackMD = dyn_cast<MDNode>(MIB->getOperand(0));
5483 visitCallStackMetadata(StackMD);
5484
5485 // The second MIB operand should be MDString.
5487 "!memprof MemInfoBlock second operand should be an MDString", MIB);
5488
5489 // Any remaining should be MDNode that are pairs of integers
5490 for (unsigned I = 2; I < MIB->getNumOperands(); ++I) {
5491 MDNode *OpNode = dyn_cast<MDNode>(MIB->getOperand(I));
5492 Check(OpNode, "Not all !memprof MemInfoBlock operands 2 to N are MDNode",
5493 MIB);
5494 Check(OpNode->getNumOperands() == 2,
5495 "Not all !memprof MemInfoBlock operands 2 to N are MDNode with 2 "
5496 "operands",
5497 MIB);
5498 // Check that all of Op's operands are ConstantInt.
5499 Check(llvm::all_of(OpNode->operands(),
5500 [](const MDOperand &Op) {
5501 return mdconst::hasa<ConstantInt>(Op);
5502 }),
5503 "Not all !memprof MemInfoBlock operands 2 to N are MDNode with "
5504 "ConstantInt operands",
5505 MIB);
5506 }
5507 }
5508}
5509
5510void Verifier::visitCallsiteMetadata(Instruction &I, MDNode *MD) {
5511 Check(isa<CallBase>(I), "!callsite metadata should only exist on calls", &I);
5512 // Verify the partial callstack annotated from memprof profiles. This callsite
5513 // is a part of a profiled allocation callstack.
5514 visitCallStackMetadata(MD);
5515}
5516
5517static inline bool isConstantIntMetadataOperand(const Metadata *MD) {
5518 if (auto *VAL = dyn_cast<ValueAsMetadata>(MD))
5519 return isa<ConstantInt>(VAL->getValue());
5520 return false;
5521}
5522
5523void Verifier::visitCalleeTypeMetadata(Instruction &I, MDNode *MD) {
5524 Check(isa<CallBase>(I), "!callee_type metadata should only exist on calls",
5525 &I);
5526 for (Metadata *Op : MD->operands()) {
5528 "The callee_type metadata must be a list of type metadata nodes", Op);
5529 auto *TypeMD = cast<MDNode>(Op);
5530 Check(TypeMD->getNumOperands() == 2,
5531 "Well-formed generalized type metadata must contain exactly two "
5532 "operands",
5533 Op);
5534 Check(isConstantIntMetadataOperand(TypeMD->getOperand(0)) &&
5535 mdconst::extract<ConstantInt>(TypeMD->getOperand(0))->isZero(),
5536 "The first operand of type metadata for functions must be zero", Op);
5537 Check(TypeMD->hasGeneralizedMDString(),
5538 "Only generalized type metadata can be part of the callee_type "
5539 "metadata list",
5540 Op);
5541 }
5542}
5543
5544void Verifier::visitAnnotationMetadata(MDNode *Annotation) {
5545 Check(isa<MDTuple>(Annotation), "annotation must be a tuple");
5546 Check(Annotation->getNumOperands() >= 1,
5547 "annotation must have at least one operand");
5548 for (const MDOperand &Op : Annotation->operands()) {
5549 bool TupleOfStrings =
5550 isa<MDTuple>(Op.get()) &&
5551 all_of(cast<MDTuple>(Op)->operands(), [](auto &Annotation) {
5552 return isa<MDString>(Annotation.get());
5553 });
5554 Check(isa<MDString>(Op.get()) || TupleOfStrings,
5555 "operands must be a string or a tuple of strings");
5556 }
5557}
5558
5559void Verifier::visitAliasScopeMetadata(const MDNode *MD) {
5560 unsigned NumOps = MD->getNumOperands();
5561 Check(NumOps >= 2 && NumOps <= 3, "scope must have two or three operands",
5562 MD);
5563 Check(MD->getOperand(0).get() == MD || isa<MDString>(MD->getOperand(0)),
5564 "first scope operand must be self-referential or string", MD);
5565 if (NumOps == 3)
5567 "third scope operand must be string (if used)", MD);
5568
5569 MDNode *Domain = dyn_cast<MDNode>(MD->getOperand(1));
5570 Check(Domain != nullptr, "second scope operand must be MDNode", MD);
5571
5572 unsigned NumDomainOps = Domain->getNumOperands();
5573 Check(NumDomainOps >= 1 && NumDomainOps <= 2,
5574 "domain must have one or two operands", Domain);
5575 Check(Domain->getOperand(0).get() == Domain ||
5576 isa<MDString>(Domain->getOperand(0)),
5577 "first domain operand must be self-referential or string", Domain);
5578 if (NumDomainOps == 2)
5579 Check(isa<MDString>(Domain->getOperand(1)),
5580 "second domain operand must be string (if used)", Domain);
5581}
5582
5583void Verifier::visitAliasScopeListMetadata(const MDNode *MD) {
5584 for (const MDOperand &Op : MD->operands()) {
5585 const MDNode *OpMD = dyn_cast<MDNode>(Op);
5586 Check(OpMD != nullptr, "scope list must consist of MDNodes", MD);
5587 visitAliasScopeMetadata(OpMD);
5588 }
5589}
5590
5591void Verifier::visitAccessGroupMetadata(const MDNode *MD) {
5592 auto IsValidAccessScope = [](const MDNode *MD) {
5593 return MD->getNumOperands() == 0 && MD->isDistinct();
5594 };
5595
5596 // It must be either an access scope itself...
5597 if (IsValidAccessScope(MD))
5598 return;
5599
5600 // ...or a list of access scopes.
5601 for (const MDOperand &Op : MD->operands()) {
5602 const MDNode *OpMD = dyn_cast<MDNode>(Op);
5603 Check(OpMD != nullptr, "Access scope list must consist of MDNodes", MD);
5604 Check(IsValidAccessScope(OpMD),
5605 "Access scope list contains invalid access scope", MD);
5606 }
5607}
5608
5609void Verifier::visitCapturesMetadata(Instruction &I, const MDNode *Captures) {
5610 static const char *ValidArgs[] = {"address_is_null", "address",
5611 "read_provenance", "provenance"};
5612
5613 auto *SI = dyn_cast<StoreInst>(&I);
5614 Check(SI, "!captures metadata can only be applied to store instructions", &I);
5615 Check(SI->getValueOperand()->getType()->isPointerTy(),
5616 "!captures metadata can only be applied to store with value operand of "
5617 "pointer type",
5618 &I);
5619 Check(Captures->getNumOperands() != 0, "!captures metadata cannot be empty",
5620 &I);
5621
5622 for (Metadata *Op : Captures->operands()) {
5623 auto *Str = dyn_cast<MDString>(Op);
5624 Check(Str, "!captures metadata must be a list of strings", &I);
5625 Check(is_contained(ValidArgs, Str->getString()),
5626 "invalid entry in !captures metadata", &I, Str);
5627 }
5628}
5629
5630void Verifier::visitAllocTokenMetadata(Instruction &I, MDNode *MD) {
5631 Check(isa<CallBase>(I), "!alloc_token should only exist on calls", &I);
5632 Check(MD->getNumOperands() == 2, "!alloc_token must have 2 operands", MD);
5633 Check(isa<MDString>(MD->getOperand(0)), "expected string", MD);
5635 "expected integer constant", MD);
5636}
5637
5638void Verifier::visitInlineHistoryMetadata(Instruction &I, MDNode *MD) {
5639 Check(isa<CallBase>(I), "!inline_history should only exist on calls", &I);
5640 for (Metadata *Op : MD->operands()) {
5641 // Can be null when a function is erased.
5642 if (!Op)
5643 continue;
5646 ->getValue()
5647 ->stripPointerCastsAndAliases()),
5648 "!inline_history operands must be functions or null", MD);
5649 }
5650}
5651
5652/// verifyInstruction - Verify that an instruction is well formed.
5653///
5654void Verifier::visitInstruction(Instruction &I) {
5655 BasicBlock *BB = I.getParent();
5656 Check(BB, "Instruction not embedded in basic block!", &I);
5657
5658 if (!isa<PHINode>(I)) { // Check that non-phi nodes are not self referential
5659 for (User *U : I.users()) {
5660 Check(U != (User *)&I || !DT.isReachableFromEntry(BB),
5661 "Only PHI nodes may reference their own value!", &I);
5662 }
5663 }
5664
5665 // Check that void typed values don't have names
5666 Check(!I.getType()->isVoidTy() || !I.hasName(),
5667 "Instruction has a name, but provides a void value!", &I);
5668
5669 // Check that the return value of the instruction is either void or a legal
5670 // value type.
5671 Check(I.getType()->isVoidTy() || I.getType()->isFirstClassType(),
5672 "Instruction returns a non-scalar type!", &I);
5673
5674 // Check that the instruction doesn't produce metadata. Calls are already
5675 // checked against the callee type.
5676 Check(!I.getType()->isMetadataTy() || isa<CallInst>(I) || isa<InvokeInst>(I),
5677 "Invalid use of metadata!", &I);
5678
5679 // Check that all uses of the instruction, if they are instructions
5680 // themselves, actually have parent basic blocks. If the use is not an
5681 // instruction, it is an error!
5682 for (Use &U : I.uses()) {
5683 if (Instruction *Used = dyn_cast<Instruction>(U.getUser()))
5684 Check(Used->getParent() != nullptr,
5685 "Instruction referencing"
5686 " instruction not embedded in a basic block!",
5687 &I, Used);
5688 else {
5689 CheckFailed("Use of instruction is not an instruction!", U);
5690 return;
5691 }
5692 }
5693
5694 // Get a pointer to the call base of the instruction if it is some form of
5695 // call.
5696 const CallBase *CBI = dyn_cast<CallBase>(&I);
5697
5698 for (unsigned i = 0, e = I.getNumOperands(); i != e; ++i) {
5699 Check(I.getOperand(i) != nullptr, "Instruction has null operand!", &I);
5700
5701 // Check to make sure that only first-class-values are operands to
5702 // instructions.
5703 if (!I.getOperand(i)->getType()->isFirstClassType()) {
5704 Check(false, "Instruction operands must be first-class values!", &I);
5705 }
5706
5707 if (Function *F = dyn_cast<Function>(I.getOperand(i))) {
5708 // This code checks whether the function is used as the operand of a
5709 // clang_arc_attachedcall operand bundle.
5710 auto IsAttachedCallOperand = [](Function *F, const CallBase *CBI,
5711 int Idx) {
5712 return CBI && CBI->isOperandBundleOfType(
5714 };
5715
5716 // Check to make sure that the "address of" an intrinsic function is never
5717 // taken. Ignore cases where the address of the intrinsic function is used
5718 // as the argument of operand bundle "clang.arc.attachedcall" as those
5719 // cases are handled in verifyAttachedCallBundle.
5720 Check((!F->isIntrinsic() ||
5721 (CBI && &CBI->getCalledOperandUse() == &I.getOperandUse(i)) ||
5722 IsAttachedCallOperand(F, CBI, i)),
5723 "Cannot take the address of an intrinsic!", &I);
5724 Check(!F->isIntrinsic() || isa<CallInst>(I) || isa<CallBrInst>(I) ||
5725 F->getIntrinsicID() == Intrinsic::donothing ||
5726 F->getIntrinsicID() == Intrinsic::seh_try_begin ||
5727 F->getIntrinsicID() == Intrinsic::seh_try_end ||
5728 F->getIntrinsicID() == Intrinsic::seh_scope_begin ||
5729 F->getIntrinsicID() == Intrinsic::seh_scope_end ||
5730 F->getIntrinsicID() == Intrinsic::coro_resume ||
5731 F->getIntrinsicID() == Intrinsic::coro_destroy ||
5732 F->getIntrinsicID() == Intrinsic::coro_await_suspend_void ||
5733 F->getIntrinsicID() == Intrinsic::coro_await_suspend_bool ||
5734 F->getIntrinsicID() == Intrinsic::coro_await_suspend_handle ||
5735 F->getIntrinsicID() ==
5736 Intrinsic::experimental_patchpoint_void ||
5737 F->getIntrinsicID() == Intrinsic::experimental_patchpoint ||
5738 F->getIntrinsicID() == Intrinsic::fake_use ||
5739 F->getIntrinsicID() == Intrinsic::experimental_gc_statepoint ||
5740 F->getIntrinsicID() == Intrinsic::wasm_throw ||
5741 F->getIntrinsicID() == Intrinsic::wasm_rethrow ||
5742 IsAttachedCallOperand(F, CBI, i),
5743 "Cannot invoke an intrinsic other than donothing, patchpoint, "
5744 "statepoint, coro_resume, coro_destroy, clang.arc.attachedcall or "
5745 "wasm.(re)throw",
5746 &I);
5747 Check(F->getParent() == &M, "Referencing function in another module!", &I,
5748 &M, F, F->getParent());
5749 } else if (BasicBlock *OpBB = dyn_cast<BasicBlock>(I.getOperand(i))) {
5750 Check(OpBB->getParent() == BB->getParent(),
5751 "Referring to a basic block in another function!", &I);
5752 } else if (Argument *OpArg = dyn_cast<Argument>(I.getOperand(i))) {
5753 Check(OpArg->getParent() == BB->getParent(),
5754 "Referring to an argument in another function!", &I);
5755 } else if (GlobalValue *GV = dyn_cast<GlobalValue>(I.getOperand(i))) {
5756 Check(GV->getParent() == &M, "Referencing global in another module!", &I,
5757 &M, GV, GV->getParent());
5758 } else if (Instruction *OpInst = dyn_cast<Instruction>(I.getOperand(i))) {
5759 Check(OpInst->getFunction() == BB->getParent(),
5760 "Referring to an instruction in another function!", &I);
5761 verifyDominatesUse(I, i);
5762 } else if (isa<InlineAsm>(I.getOperand(i))) {
5763 Check(CBI && &CBI->getCalledOperandUse() == &I.getOperandUse(i),
5764 "Cannot take the address of an inline asm!", &I);
5765 } else if (auto *C = dyn_cast<Constant>(I.getOperand(i))) {
5766 visitConstantExprsRecursively(C);
5767 }
5768 }
5769
5770 if (MDNode *MD = I.getMetadata(LLVMContext::MD_fpmath)) {
5772 "fpmath requires a floating point result!", &I);
5773 Check(MD->getNumOperands() == 1, "fpmath takes one operand!", &I);
5774 if (ConstantFP *CFP0 =
5776 const APFloat &Accuracy = CFP0->getValueAPF();
5777 Check(&Accuracy.getSemantics() == &APFloat::IEEEsingle(),
5778 "fpmath accuracy must have float type", &I);
5779 Check(Accuracy.isFiniteNonZero() && !Accuracy.isNegative(),
5780 "fpmath accuracy not a positive number!", &I);
5781 } else {
5782 Check(false, "invalid fpmath accuracy!", &I);
5783 }
5784 }
5785
5786 if (MDNode *Range = I.getMetadata(LLVMContext::MD_range)) {
5788 "Ranges are only for loads, calls and invokes!", &I);
5789 visitRangeMetadata(I, Range, I.getType());
5790 }
5791
5792 if (MDNode *MD = I.getMetadata(LLVMContext::MD_nofpclass)) {
5793 Check(isa<LoadInst>(I), "nofpclass is only for loads", &I);
5794 visitNoFPClassMetadata(I, MD, I.getType());
5795 }
5796
5797 if (MDNode *Range = I.getMetadata(LLVMContext::MD_noalias_addrspace)) {
5800 "noalias.addrspace are only for memory operations!", &I);
5801 visitNoaliasAddrspaceMetadata(I, Range, I.getType());
5802 }
5803
5804 if (I.hasMetadata(LLVMContext::MD_invariant_group)) {
5806 "invariant.group metadata is only for loads and stores", &I);
5807 }
5808
5809 if (MDNode *MD = I.getMetadata(LLVMContext::MD_nonnull)) {
5810 Check(I.getType()->isPointerTy(), "nonnull applies only to pointer types",
5811 &I);
5813 "nonnull applies only to load instructions, use attributes"
5814 " for calls or invokes",
5815 &I);
5816 Check(MD->getNumOperands() == 0, "nonnull metadata must be empty", &I);
5817 }
5818
5819 if (MDNode *MD = I.getMetadata(LLVMContext::MD_dereferenceable))
5820 visitDereferenceableMetadata(I, MD);
5821
5822 if (MDNode *MD = I.getMetadata(LLVMContext::MD_dereferenceable_or_null))
5823 visitDereferenceableMetadata(I, MD);
5824
5825 if (MDNode *MD = I.getMetadata(LLVMContext::MD_nofree))
5826 visitNofreeMetadata(I, MD);
5827
5828 if (MDNode *TBAA = I.getMetadata(LLVMContext::MD_tbaa))
5829 TBAAVerifyHelper.visitTBAAMetadata(&I, TBAA);
5830
5831 if (MDNode *MD = I.getMetadata(LLVMContext::MD_noalias))
5832 visitAliasScopeListMetadata(MD);
5833 if (MDNode *MD = I.getMetadata(LLVMContext::MD_alias_scope))
5834 visitAliasScopeListMetadata(MD);
5835
5836 if (MDNode *MD = I.getMetadata(LLVMContext::MD_access_group))
5837 visitAccessGroupMetadata(MD);
5838
5839 if (MDNode *AlignMD = I.getMetadata(LLVMContext::MD_align)) {
5840 Check(I.getType()->isPointerTy(), "align applies only to pointer types",
5841 &I);
5843 "align applies only to load instructions, "
5844 "use attributes for calls or invokes",
5845 &I);
5846 Check(AlignMD->getNumOperands() == 1, "align takes one operand!", &I);
5847 ConstantInt *CI = mdconst::dyn_extract<ConstantInt>(AlignMD->getOperand(0));
5848 Check(CI && CI->getType()->isIntegerTy(64),
5849 "align metadata value must be an i64!", &I);
5850 uint64_t Align = CI->getZExtValue();
5851 Check(isPowerOf2_64(Align), "align metadata value must be a power of 2!",
5852 &I);
5853 Check(Align <= Value::MaximumAlignment,
5854 "alignment is larger that implementation defined limit", &I);
5855 }
5856
5857 if (MDNode *MD = I.getMetadata(LLVMContext::MD_prof))
5858 visitProfMetadata(I, MD);
5859
5860 if (MDNode *MD = I.getMetadata(LLVMContext::MD_memprof))
5861 visitMemProfMetadata(I, MD);
5862
5863 if (MDNode *MD = I.getMetadata(LLVMContext::MD_callsite))
5864 visitCallsiteMetadata(I, MD);
5865
5866 if (MDNode *MD = I.getMetadata(LLVMContext::MD_callee_type))
5867 visitCalleeTypeMetadata(I, MD);
5868
5869 if (MDNode *MD = I.getMetadata(LLVMContext::MD_DIAssignID))
5870 visitDIAssignIDMetadata(I, MD);
5871
5872 if (MDNode *MMRA = I.getMetadata(LLVMContext::MD_mmra))
5873 visitMMRAMetadata(I, MMRA);
5874
5875 if (MDNode *Annotation = I.getMetadata(LLVMContext::MD_annotation))
5876 visitAnnotationMetadata(Annotation);
5877
5878 if (MDNode *Captures = I.getMetadata(LLVMContext::MD_captures))
5879 visitCapturesMetadata(I, Captures);
5880
5881 if (MDNode *MD = I.getMetadata(LLVMContext::MD_alloc_token))
5882 visitAllocTokenMetadata(I, MD);
5883
5884 if (MDNode *MD = I.getMetadata(LLVMContext::MD_inline_history))
5885 visitInlineHistoryMetadata(I, MD);
5886
5887 if (MDNode *N = I.getDebugLoc().getAsMDNode()) {
5888 CheckDI(isa<DILocation>(N), "invalid !dbg metadata attachment", &I, N);
5889 visitMDNode(*N, AreDebugLocsAllowed::Yes);
5890
5891 if (auto *DL = dyn_cast<DILocation>(N)) {
5892 if (DL->getAtomGroup()) {
5893 CheckDI(DL->getScope()->getSubprogram()->getKeyInstructionsEnabled(),
5894 "DbgLoc uses atomGroup but DISubprogram doesn't have Key "
5895 "Instructions enabled",
5896 DL, DL->getScope()->getSubprogram());
5897 }
5898 }
5899 }
5900
5902 I.getAllMetadata(MDs);
5903 for (auto Attachment : MDs) {
5904 unsigned Kind = Attachment.first;
5905 auto AllowLocs =
5906 (Kind == LLVMContext::MD_dbg || Kind == LLVMContext::MD_loop)
5907 ? AreDebugLocsAllowed::Yes
5908 : AreDebugLocsAllowed::No;
5909 visitMDNode(*Attachment.second, AllowLocs);
5910 }
5911
5912 InstsInThisBlock.insert(&I);
5913}
5914
5915/// Allow intrinsics to be verified in different ways.
5916void Verifier::visitIntrinsicCall(Intrinsic::ID ID, CallBase &Call) {
5918 Check(IF->isDeclaration(), "Intrinsic functions should never be defined!",
5919 IF);
5920
5921 // Verify that the intrinsic prototype lines up with what the .td files
5922 // describe.
5923 FunctionType *IFTy = IF->getFunctionType();
5924
5925 // Walk the descriptors to extract overloaded types.
5926 std::string ErrMsg;
5927 raw_string_ostream ErrOS(ErrMsg);
5928 SmallVector<Type *, 4> OverloadTys;
5929 bool IsValid = Intrinsic::isSignatureValid(ID, IFTy, OverloadTys, ErrOS);
5930 Check(IsValid, ErrMsg, IF);
5931
5932 // Now that we have the intrinsic ID and the actual argument types (and we
5933 // know they are legal for the intrinsic!) get the intrinsic name through the
5934 // usual means. This allows us to verify the mangling of argument types into
5935 // the name.
5936 const std::string ExpectedName =
5937 Intrinsic::getName(ID, OverloadTys, IF->getParent(), IFTy);
5938 Check(ExpectedName == IF->getName(),
5939 "Intrinsic name not mangled correctly for type arguments! "
5940 "Should be: " +
5941 ExpectedName,
5942 IF);
5943
5944 // If the intrinsic takes MDNode arguments, verify that they are either global
5945 // or are local to *this* function.
5946 for (Value *V : Call.args()) {
5947 if (auto *MD = dyn_cast<MetadataAsValue>(V))
5948 visitMetadataAsValue(*MD, Call.getCaller());
5949 if (auto *Const = dyn_cast<Constant>(V))
5950 Check(!Const->getType()->isX86_AMXTy(),
5951 "const x86_amx is not allowed in argument!");
5952 }
5953
5954 switch (ID) {
5955 default:
5956 break;
5957 case Intrinsic::assume: {
5958 if (Call.hasOperandBundles()) {
5960 Check(Cond && Cond->isOne(),
5961 "assume with operand bundles must have i1 true condition", Call);
5962 }
5963 for (auto &Elem : Call.bundle_op_infos()) {
5964 unsigned ArgCount = Elem.End - Elem.Begin;
5965 // Separate storage assumptions are special insofar as they're the only
5966 // operand bundles allowed on assumes that aren't parameter attributes.
5967 if (Elem.Tag->getKey() == "separate_storage") {
5968 Check(ArgCount == 2,
5969 "separate_storage assumptions should have 2 arguments", Call);
5970 Check(Call.getOperand(Elem.Begin)->getType()->isPointerTy() &&
5971 Call.getOperand(Elem.Begin + 1)->getType()->isPointerTy(),
5972 "arguments to separate_storage assumptions should be pointers",
5973 Call);
5974 continue;
5975 }
5976 Check(Elem.Tag->getKey() == "ignore" ||
5977 Attribute::isExistingAttribute(Elem.Tag->getKey()),
5978 "tags must be valid attribute names", Call);
5979 Attribute::AttrKind Kind =
5980 Attribute::getAttrKindFromName(Elem.Tag->getKey());
5981 if (Kind == Attribute::Alignment) {
5982 Check(ArgCount <= 3 && ArgCount >= 2,
5983 "alignment assumptions should have 2 or 3 arguments", Call);
5984 Check(Call.getOperand(Elem.Begin)->getType()->isPointerTy(),
5985 "first argument should be a pointer", Call);
5986 Check(Call.getOperand(Elem.Begin + 1)->getType()->isIntegerTy(),
5987 "second argument should be an integer", Call);
5988 if (ArgCount == 3)
5989 Check(Call.getOperand(Elem.Begin + 2)->getType()->isIntegerTy(),
5990 "third argument should be an integer if present", Call);
5991 continue;
5992 }
5993 if (Kind == Attribute::Dereferenceable) {
5994 Check(ArgCount == 2,
5995 "dereferenceable assumptions should have 2 arguments", Call);
5996 Check(Call.getOperand(Elem.Begin)->getType()->isPointerTy(),
5997 "first argument should be a pointer", Call);
5998 Check(Call.getOperand(Elem.Begin + 1)->getType()->isIntegerTy(),
5999 "second argument should be an integer", Call);
6000 continue;
6001 }
6002 Check(ArgCount <= 2, "too many arguments", Call);
6003 if (Kind == Attribute::None)
6004 break;
6005 if (Attribute::isIntAttrKind(Kind)) {
6006 Check(ArgCount == 2, "this attribute should have 2 arguments", Call);
6007 Check(isa<ConstantInt>(Call.getOperand(Elem.Begin + 1)),
6008 "the second argument should be a constant integral value", Call);
6009 } else if (Attribute::canUseAsParamAttr(Kind)) {
6010 Check((ArgCount) == 1, "this attribute should have one argument", Call);
6011 } else if (Attribute::canUseAsFnAttr(Kind)) {
6012 Check((ArgCount) == 0, "this attribute has no argument", Call);
6013 }
6014 }
6015 break;
6016 }
6017 case Intrinsic::ucmp:
6018 case Intrinsic::scmp: {
6019 Type *SrcTy = Call.getOperand(0)->getType();
6020 Type *DestTy = Call.getType();
6021
6022 Check(DestTy->getScalarSizeInBits() >= 2,
6023 "result type must be at least 2 bits wide", Call);
6024
6025 bool IsDestTypeVector = DestTy->isVectorTy();
6026 Check(SrcTy->isVectorTy() == IsDestTypeVector,
6027 "ucmp/scmp argument and result types must both be either vector or "
6028 "scalar types",
6029 Call);
6030 if (IsDestTypeVector) {
6031 auto SrcVecLen = cast<VectorType>(SrcTy)->getElementCount();
6032 auto DestVecLen = cast<VectorType>(DestTy)->getElementCount();
6033 Check(SrcVecLen == DestVecLen,
6034 "return type and arguments must have the same number of "
6035 "elements",
6036 Call);
6037 }
6038 break;
6039 }
6040 case Intrinsic::coro_begin:
6041 case Intrinsic::coro_begin_custom_abi:
6043 "id argument of llvm.coro.begin must refer to coro.id");
6044 break;
6045 case Intrinsic::coro_id: {
6047 "align argument only accepts constants");
6048 auto *Promise = Call.getArgOperand(1);
6049 Check(isa<ConstantPointerNull>(Promise) || isa<AllocaInst>(Promise),
6050 "promise argument must refer to an alloca");
6051
6052 auto *CoroAddr = Call.getArgOperand(2)->stripPointerCasts();
6053 bool BeforeCoroEarly = isa<ConstantPointerNull>(CoroAddr);
6054 Check(BeforeCoroEarly || isa<Function>(CoroAddr),
6055 "coro argument must refer to a function");
6056
6057 auto *InfoArg = Call.getArgOperand(3);
6058 bool BeforeCoroSplit = isa<ConstantPointerNull>(InfoArg);
6059 if (BeforeCoroSplit)
6060 break;
6061
6062 Check(!BeforeCoroEarly, "cannot run CoroSplit before CoroEarly");
6063 auto *GV = dyn_cast<GlobalVariable>(InfoArg);
6064 Check(GV && GV->isConstant() && GV->hasDefinitiveInitializer(),
6065 "info argument of llvm.coro.id must refer to an initialized "
6066 "constant");
6067 Constant *Init = GV->getInitializer();
6069 "info argument of llvm.coro.id must refer to either a struct or "
6070 "an array");
6071 break;
6072 }
6073 case Intrinsic::is_fpclass: {
6074 const ConstantInt *TestMask = cast<ConstantInt>(Call.getOperand(1));
6075 Check((TestMask->getZExtValue() & ~static_cast<unsigned>(fcAllFlags)) == 0,
6076 "unsupported bits for llvm.is.fpclass test mask");
6077 break;
6078 }
6079 case Intrinsic::fptrunc_round: {
6080 // Check the rounding mode
6081 Metadata *MD = nullptr;
6083 if (MAV)
6084 MD = MAV->getMetadata();
6085
6086 Check(MD != nullptr, "missing rounding mode argument", Call);
6087
6088 Check(isa<MDString>(MD),
6089 ("invalid value for llvm.fptrunc.round metadata operand"
6090 " (the operand should be a string)"),
6091 MD);
6092
6093 std::optional<RoundingMode> RoundMode =
6094 convertStrToRoundingMode(cast<MDString>(MD)->getString());
6095 Check(RoundMode && *RoundMode != RoundingMode::Dynamic,
6096 "unsupported rounding mode argument", Call);
6097 break;
6098 }
6099 case Intrinsic::convert_to_arbitrary_fp: {
6100 // Check that vector element counts are consistent.
6101 Type *ValueTy = Call.getArgOperand(0)->getType();
6102 Type *IntTy = Call.getType();
6103
6104 if (auto *ValueVecTy = dyn_cast<VectorType>(ValueTy)) {
6105 auto *IntVecTy = dyn_cast<VectorType>(IntTy);
6106 Check(IntVecTy,
6107 "if floating-point operand is a vector, integer operand must also "
6108 "be a vector",
6109 Call);
6110 Check(ValueVecTy->getElementCount() == IntVecTy->getElementCount(),
6111 "floating-point and integer vector operands must have the same "
6112 "element count",
6113 Call);
6114 }
6115
6116 // Check interpretation metadata (argoperand 1).
6117 auto *InterpMAV = dyn_cast<MetadataAsValue>(Call.getArgOperand(1));
6118 Check(InterpMAV, "missing interpretation metadata operand", Call);
6119 auto *InterpStr = dyn_cast<MDString>(InterpMAV->getMetadata());
6120 Check(InterpStr, "interpretation metadata operand must be a string", Call);
6121 StringRef Interp = InterpStr->getString();
6122
6123 Check(!Interp.empty(), "interpretation metadata string must not be empty",
6124 Call);
6125
6126 // Valid interpretation strings: mini-float format names.
6128 "unsupported interpretation metadata string", Call);
6129
6130 // Check rounding mode metadata (argoperand 2).
6131 auto *RoundingMAV = dyn_cast<MetadataAsValue>(Call.getArgOperand(2));
6132 Check(RoundingMAV, "missing rounding mode metadata operand", Call);
6133 auto *RoundingStr = dyn_cast<MDString>(RoundingMAV->getMetadata());
6134 Check(RoundingStr, "rounding mode metadata operand must be a string", Call);
6135
6136 std::optional<RoundingMode> RM =
6137 convertStrToRoundingMode(RoundingStr->getString());
6138 Check(RM && *RM != RoundingMode::Dynamic,
6139 "unsupported rounding mode argument", Call);
6140 break;
6141 }
6142 case Intrinsic::convert_from_arbitrary_fp: {
6143 // Check that vector element counts are consistent.
6144 Type *IntTy = Call.getArgOperand(0)->getType();
6145 Type *ValueTy = Call.getType();
6146
6147 if (auto *ValueVecTy = dyn_cast<VectorType>(ValueTy)) {
6148 auto *IntVecTy = dyn_cast<VectorType>(IntTy);
6149 Check(IntVecTy,
6150 "if floating-point operand is a vector, integer operand must also "
6151 "be a vector",
6152 Call);
6153 Check(ValueVecTy->getElementCount() == IntVecTy->getElementCount(),
6154 "floating-point and integer vector operands must have the same "
6155 "element count",
6156 Call);
6157 }
6158
6159 // Check interpretation metadata (argoperand 1).
6160 auto *InterpMAV = dyn_cast<MetadataAsValue>(Call.getArgOperand(1));
6161 Check(InterpMAV, "missing interpretation metadata operand", Call);
6162 auto *InterpStr = dyn_cast<MDString>(InterpMAV->getMetadata());
6163 Check(InterpStr, "interpretation metadata operand must be a string", Call);
6164 StringRef Interp = InterpStr->getString();
6165
6166 Check(!Interp.empty(), "interpretation metadata string must not be empty",
6167 Call);
6168
6169 // Valid interpretation strings: mini-float format names.
6171 "unsupported interpretation metadata string", Call);
6172 break;
6173 }
6174#define BEGIN_REGISTER_VP_INTRINSIC(VPID, ...) case Intrinsic::VPID:
6175#include "llvm/IR/VPIntrinsics.def"
6176#undef BEGIN_REGISTER_VP_INTRINSIC
6177 visitVPIntrinsic(cast<VPIntrinsic>(Call));
6178 break;
6179#define INSTRUCTION(NAME, NARGS, ROUND_MODE, INTRINSIC) \
6180 case Intrinsic::INTRINSIC:
6181#include "llvm/IR/ConstrainedOps.def"
6182#undef INSTRUCTION
6183 visitConstrainedFPIntrinsic(cast<ConstrainedFPIntrinsic>(Call));
6184 break;
6185 case Intrinsic::dbg_declare: // llvm.dbg.declare
6186 case Intrinsic::dbg_value: // llvm.dbg.value
6187 case Intrinsic::dbg_assign: // llvm.dbg.assign
6188 case Intrinsic::dbg_label: // llvm.dbg.label
6189 // We no longer interpret debug intrinsics (the old variable-location
6190 // design). They're meaningless as far as LLVM is concerned we could make
6191 // it an error for them to appear, but it's possible we'll have users
6192 // converting back to intrinsics for the forseeable future (such as DXIL),
6193 // so tolerate their existance.
6194 break;
6195 case Intrinsic::memcpy:
6196 case Intrinsic::memcpy_inline:
6197 case Intrinsic::memmove:
6198 case Intrinsic::memset:
6199 case Intrinsic::memset_inline:
6200 break;
6201 case Intrinsic::experimental_memset_pattern: {
6202 const auto Memset = cast<MemSetPatternInst>(&Call);
6203 Check(Memset->getValue()->getType()->isSized(),
6204 "unsized types cannot be used as memset patterns", Call);
6205 break;
6206 }
6207 case Intrinsic::memcpy_element_unordered_atomic:
6208 case Intrinsic::memmove_element_unordered_atomic:
6209 case Intrinsic::memset_element_unordered_atomic: {
6210 const auto *AMI = cast<AnyMemIntrinsic>(&Call);
6211
6212 ConstantInt *ElementSizeCI =
6213 cast<ConstantInt>(AMI->getRawElementSizeInBytes());
6214 const APInt &ElementSizeVal = ElementSizeCI->getValue();
6215 Check(ElementSizeVal.isPowerOf2(),
6216 "element size of the element-wise atomic memory intrinsic "
6217 "must be a power of 2",
6218 Call);
6219
6220 auto IsValidAlignment = [&](MaybeAlign Alignment) {
6221 return Alignment && ElementSizeVal.ule(Alignment->value());
6222 };
6223 Check(IsValidAlignment(AMI->getDestAlign()),
6224 "incorrect alignment of the destination argument", Call);
6225 if (const auto *AMT = dyn_cast<AnyMemTransferInst>(AMI)) {
6226 Check(IsValidAlignment(AMT->getSourceAlign()),
6227 "incorrect alignment of the source argument", Call);
6228 }
6229 break;
6230 }
6231 case Intrinsic::call_preallocated_setup: {
6232 auto *NumArgs = cast<ConstantInt>(Call.getArgOperand(0));
6233 bool FoundCall = false;
6234 for (User *U : Call.users()) {
6235 auto *UseCall = dyn_cast<CallBase>(U);
6236 Check(UseCall != nullptr,
6237 "Uses of llvm.call.preallocated.setup must be calls");
6238 Intrinsic::ID IID = UseCall->getIntrinsicID();
6239 if (IID == Intrinsic::call_preallocated_arg) {
6240 auto *AllocArgIndex = dyn_cast<ConstantInt>(UseCall->getArgOperand(1));
6241 Check(AllocArgIndex != nullptr,
6242 "llvm.call.preallocated.alloc arg index must be a constant");
6243 auto AllocArgIndexInt = AllocArgIndex->getValue();
6244 Check(AllocArgIndexInt.sge(0) &&
6245 AllocArgIndexInt.slt(NumArgs->getValue()),
6246 "llvm.call.preallocated.alloc arg index must be between 0 and "
6247 "corresponding "
6248 "llvm.call.preallocated.setup's argument count");
6249 } else if (IID == Intrinsic::call_preallocated_teardown) {
6250 // nothing to do
6251 } else {
6252 Check(!FoundCall, "Can have at most one call corresponding to a "
6253 "llvm.call.preallocated.setup");
6254 FoundCall = true;
6255 size_t NumPreallocatedArgs = 0;
6256 for (unsigned i = 0; i < UseCall->arg_size(); i++) {
6257 if (UseCall->paramHasAttr(i, Attribute::Preallocated)) {
6258 ++NumPreallocatedArgs;
6259 }
6260 }
6261 Check(NumPreallocatedArgs != 0,
6262 "cannot use preallocated intrinsics on a call without "
6263 "preallocated arguments");
6264 Check(NumArgs->equalsInt(NumPreallocatedArgs),
6265 "llvm.call.preallocated.setup arg size must be equal to number "
6266 "of preallocated arguments "
6267 "at call site",
6268 Call, *UseCall);
6269 // getOperandBundle() cannot be called if more than one of the operand
6270 // bundle exists. There is already a check elsewhere for this, so skip
6271 // here if we see more than one.
6272 if (UseCall->countOperandBundlesOfType(LLVMContext::OB_preallocated) >
6273 1) {
6274 return;
6275 }
6276 auto PreallocatedBundle =
6277 UseCall->getOperandBundle(LLVMContext::OB_preallocated);
6278 Check(PreallocatedBundle,
6279 "Use of llvm.call.preallocated.setup outside intrinsics "
6280 "must be in \"preallocated\" operand bundle");
6281 Check(PreallocatedBundle->Inputs.front().get() == &Call,
6282 "preallocated bundle must have token from corresponding "
6283 "llvm.call.preallocated.setup");
6284 }
6285 }
6286 break;
6287 }
6288 case Intrinsic::call_preallocated_arg: {
6289 auto *Token = dyn_cast<CallBase>(Call.getArgOperand(0));
6290 Check(Token &&
6291 Token->getIntrinsicID() == Intrinsic::call_preallocated_setup,
6292 "llvm.call.preallocated.arg token argument must be a "
6293 "llvm.call.preallocated.setup");
6294 Check(Call.hasFnAttr(Attribute::Preallocated),
6295 "llvm.call.preallocated.arg must be called with a \"preallocated\" "
6296 "call site attribute");
6297 break;
6298 }
6299 case Intrinsic::call_preallocated_teardown: {
6300 auto *Token = dyn_cast<CallBase>(Call.getArgOperand(0));
6301 Check(Token &&
6302 Token->getIntrinsicID() == Intrinsic::call_preallocated_setup,
6303 "llvm.call.preallocated.teardown token argument must be a "
6304 "llvm.call.preallocated.setup");
6305 break;
6306 }
6307 case Intrinsic::gcroot:
6308 case Intrinsic::gcwrite:
6309 case Intrinsic::gcread:
6310 if (ID == Intrinsic::gcroot) {
6311 AllocaInst *AI =
6313 Check(AI, "llvm.gcroot parameter #1 must be an alloca.", Call);
6315 "llvm.gcroot parameter #2 must be a constant.", Call);
6316 if (!AI->getAllocatedType()->isPointerTy()) {
6318 "llvm.gcroot parameter #1 must either be a pointer alloca, "
6319 "or argument #2 must be a non-null constant.",
6320 Call);
6321 }
6322 }
6323
6324 Check(Call.getParent()->getParent()->hasGC(),
6325 "Enclosing function does not use GC.", Call);
6326 break;
6327 case Intrinsic::init_trampoline:
6329 "llvm.init_trampoline parameter #2 must resolve to a function.",
6330 Call);
6331 break;
6332 case Intrinsic::prefetch:
6333 Check(cast<ConstantInt>(Call.getArgOperand(1))->getZExtValue() < 2,
6334 "rw argument to llvm.prefetch must be 0-1", Call);
6335 Check(cast<ConstantInt>(Call.getArgOperand(2))->getZExtValue() < 4,
6336 "locality argument to llvm.prefetch must be 0-3", Call);
6337 Check(cast<ConstantInt>(Call.getArgOperand(3))->getZExtValue() < 2,
6338 "cache type argument to llvm.prefetch must be 0-1", Call);
6339 break;
6340 case Intrinsic::reloc_none: {
6342 cast<MetadataAsValue>(Call.getArgOperand(0))->getMetadata()),
6343 "llvm.reloc.none argument must be a metadata string", &Call);
6344 break;
6345 }
6346 case Intrinsic::stackprotector:
6348 "llvm.stackprotector parameter #2 must resolve to an alloca.", Call);
6349 break;
6350 case Intrinsic::localescape: {
6351 BasicBlock *BB = Call.getParent();
6352 Check(BB->isEntryBlock(), "llvm.localescape used outside of entry block",
6353 Call);
6354 Check(!SawFrameEscape, "multiple calls to llvm.localescape in one function",
6355 Call);
6356 for (Value *Arg : Call.args()) {
6357 if (isa<ConstantPointerNull>(Arg))
6358 continue; // Null values are allowed as placeholders.
6359 auto *AI = dyn_cast<AllocaInst>(Arg->stripPointerCasts());
6360 Check(AI && AI->isStaticAlloca(),
6361 "llvm.localescape only accepts static allocas", Call);
6362 }
6363 FrameEscapeInfo[BB->getParent()].first = Call.arg_size();
6364 SawFrameEscape = true;
6365 break;
6366 }
6367 case Intrinsic::localrecover: {
6369 Function *Fn = dyn_cast<Function>(FnArg);
6370 Check(Fn && !Fn->isDeclaration(),
6371 "llvm.localrecover first "
6372 "argument must be function defined in this module",
6373 Call);
6374 auto *IdxArg = cast<ConstantInt>(Call.getArgOperand(2));
6375 auto &Entry = FrameEscapeInfo[Fn];
6376 Entry.second = unsigned(
6377 std::max(uint64_t(Entry.second), IdxArg->getLimitedValue(~0U) + 1));
6378 break;
6379 }
6380
6381 case Intrinsic::experimental_gc_statepoint:
6382 if (auto *CI = dyn_cast<CallInst>(&Call))
6383 Check(!CI->isInlineAsm(),
6384 "gc.statepoint support for inline assembly unimplemented", CI);
6385 Check(Call.getParent()->getParent()->hasGC(),
6386 "Enclosing function does not use GC.", Call);
6387
6388 verifyStatepoint(Call);
6389 break;
6390 case Intrinsic::experimental_gc_result: {
6391 Check(Call.getParent()->getParent()->hasGC(),
6392 "Enclosing function does not use GC.", Call);
6393
6394 auto *Statepoint = Call.getArgOperand(0);
6395 if (isa<UndefValue>(Statepoint))
6396 break;
6397
6398 // Are we tied to a statepoint properly?
6399 const auto *StatepointCall = dyn_cast<CallBase>(Statepoint);
6400 Check(StatepointCall && StatepointCall->getIntrinsicID() ==
6401 Intrinsic::experimental_gc_statepoint,
6402 "gc.result operand #1 must be from a statepoint", Call,
6403 Call.getArgOperand(0));
6404
6405 // Check that result type matches wrapped callee.
6406 auto *TargetFuncType =
6407 cast<FunctionType>(StatepointCall->getParamElementType(2));
6408 Check(Call.getType() == TargetFuncType->getReturnType(),
6409 "gc.result result type does not match wrapped callee", Call);
6410 break;
6411 }
6412 case Intrinsic::experimental_gc_relocate: {
6413 Check(Call.arg_size() == 3, "wrong number of arguments", Call);
6414
6416 "gc.relocate must return a pointer or a vector of pointers", Call);
6417
6418 // Check that this relocate is correctly tied to the statepoint
6419
6420 // This is case for relocate on the unwinding path of an invoke statepoint
6421 if (LandingPadInst *LandingPad =
6423
6424 const BasicBlock *InvokeBB =
6425 LandingPad->getParent()->getUniquePredecessor();
6426
6427 // Landingpad relocates should have only one predecessor with invoke
6428 // statepoint terminator
6429 Check(InvokeBB, "safepoints should have unique landingpads",
6430 LandingPad->getParent());
6431 Check(InvokeBB->getTerminator(), "safepoint block should be well formed",
6432 InvokeBB);
6434 "gc relocate should be linked to a statepoint", InvokeBB);
6435 } else {
6436 // In all other cases relocate should be tied to the statepoint directly.
6437 // This covers relocates on a normal return path of invoke statepoint and
6438 // relocates of a call statepoint.
6439 auto *Token = Call.getArgOperand(0);
6441 "gc relocate is incorrectly tied to the statepoint", Call, Token);
6442 }
6443
6444 // Verify rest of the relocate arguments.
6445 const Value &StatepointCall = *cast<GCRelocateInst>(Call).getStatepoint();
6446
6447 // Both the base and derived must be piped through the safepoint.
6450 "gc.relocate operand #2 must be integer offset", Call);
6451
6452 Value *Derived = Call.getArgOperand(2);
6453 Check(isa<ConstantInt>(Derived),
6454 "gc.relocate operand #3 must be integer offset", Call);
6455
6456 const uint64_t BaseIndex = cast<ConstantInt>(Base)->getZExtValue();
6457 const uint64_t DerivedIndex = cast<ConstantInt>(Derived)->getZExtValue();
6458
6459 // Check the bounds
6460 if (isa<UndefValue>(StatepointCall))
6461 break;
6462 if (auto Opt = cast<GCStatepointInst>(StatepointCall)
6463 .getOperandBundle(LLVMContext::OB_gc_live)) {
6464 Check(BaseIndex < Opt->Inputs.size(),
6465 "gc.relocate: statepoint base index out of bounds", Call);
6466 Check(DerivedIndex < Opt->Inputs.size(),
6467 "gc.relocate: statepoint derived index out of bounds", Call);
6468 }
6469
6470 // Relocated value must be either a pointer type or vector-of-pointer type,
6471 // but gc_relocate does not need to return the same pointer type as the
6472 // relocated pointer. It can be casted to the correct type later if it's
6473 // desired. However, they must have the same address space and 'vectorness'
6474 GCRelocateInst &Relocate = cast<GCRelocateInst>(Call);
6475 auto *ResultType = Call.getType();
6476 auto *DerivedType = Relocate.getDerivedPtr()->getType();
6477 auto *BaseType = Relocate.getBasePtr()->getType();
6478
6479 Check(BaseType->isPtrOrPtrVectorTy(),
6480 "gc.relocate: relocated value must be a pointer", Call);
6481 Check(DerivedType->isPtrOrPtrVectorTy(),
6482 "gc.relocate: relocated value must be a pointer", Call);
6483
6484 Check(ResultType->isVectorTy() == DerivedType->isVectorTy(),
6485 "gc.relocate: vector relocates to vector and pointer to pointer",
6486 Call);
6487 Check(
6488 ResultType->getPointerAddressSpace() ==
6489 DerivedType->getPointerAddressSpace(),
6490 "gc.relocate: relocating a pointer shouldn't change its address space",
6491 Call);
6492
6493 auto GC = llvm::getGCStrategy(Relocate.getFunction()->getGC());
6494 Check(GC, "gc.relocate: calling function must have GCStrategy",
6495 Call.getFunction());
6496 if (GC) {
6497 auto isGCPtr = [&GC](Type *PTy) {
6498 return GC->isGCManagedPointer(PTy->getScalarType()).value_or(true);
6499 };
6500 Check(isGCPtr(ResultType), "gc.relocate: must return gc pointer", Call);
6501 Check(isGCPtr(BaseType),
6502 "gc.relocate: relocated value must be a gc pointer", Call);
6503 Check(isGCPtr(DerivedType),
6504 "gc.relocate: relocated value must be a gc pointer", Call);
6505 }
6506 break;
6507 }
6508 case Intrinsic::experimental_patchpoint: {
6509 if (Call.getCallingConv() == CallingConv::AnyReg) {
6511 "patchpoint: invalid return type used with anyregcc", Call);
6512 }
6513 break;
6514 }
6515 case Intrinsic::eh_exceptioncode:
6516 case Intrinsic::eh_exceptionpointer: {
6518 "eh.exceptionpointer argument must be a catchpad", Call);
6519 break;
6520 }
6521 case Intrinsic::get_active_lane_mask: {
6523 "get_active_lane_mask: must return a "
6524 "vector",
6525 Call);
6526 auto *ElemTy = Call.getType()->getScalarType();
6527 Check(ElemTy->isIntegerTy(1),
6528 "get_active_lane_mask: element type is not "
6529 "i1",
6530 Call);
6531 break;
6532 }
6533 case Intrinsic::experimental_get_vector_length: {
6534 ConstantInt *VF = cast<ConstantInt>(Call.getArgOperand(1));
6535 Check(!VF->isNegative() && !VF->isZero(),
6536 "get_vector_length: VF must be positive", Call);
6537 break;
6538 }
6539 case Intrinsic::masked_load: {
6540 Check(Call.getType()->isVectorTy(), "masked_load: must return a vector",
6541 Call);
6542
6544 Value *PassThru = Call.getArgOperand(2);
6545 Check(Mask->getType()->isVectorTy(), "masked_load: mask must be vector",
6546 Call);
6547 Check(PassThru->getType() == Call.getType(),
6548 "masked_load: pass through and return type must match", Call);
6549 Check(cast<VectorType>(Mask->getType())->getElementCount() ==
6550 cast<VectorType>(Call.getType())->getElementCount(),
6551 "masked_load: vector mask must be same length as return", Call);
6552 break;
6553 }
6554 case Intrinsic::masked_store: {
6555 Value *Val = Call.getArgOperand(0);
6557 Check(Mask->getType()->isVectorTy(), "masked_store: mask must be vector",
6558 Call);
6559 Check(cast<VectorType>(Mask->getType())->getElementCount() ==
6560 cast<VectorType>(Val->getType())->getElementCount(),
6561 "masked_store: vector mask must be same length as value", Call);
6562 break;
6563 }
6564 case Intrinsic::experimental_guard: {
6565 Check(isa<CallInst>(Call), "experimental_guard cannot be invoked", Call);
6567 "experimental_guard must have exactly one "
6568 "\"deopt\" operand bundle");
6569 break;
6570 }
6571
6572 case Intrinsic::experimental_deoptimize: {
6573 Check(isa<CallInst>(Call), "experimental_deoptimize cannot be invoked",
6574 Call);
6576 "experimental_deoptimize must have exactly one "
6577 "\"deopt\" operand bundle");
6579 "experimental_deoptimize return type must match caller return type");
6580
6581 if (isa<CallInst>(Call)) {
6583 Check(RI,
6584 "calls to experimental_deoptimize must be followed by a return");
6585
6586 if (!Call.getType()->isVoidTy() && RI)
6587 Check(RI->getReturnValue() == &Call,
6588 "calls to experimental_deoptimize must be followed by a return "
6589 "of the value computed by experimental_deoptimize");
6590 }
6591
6592 break;
6593 }
6594 case Intrinsic::vastart: {
6596 "va_start called in a non-varargs function");
6597 break;
6598 }
6599 case Intrinsic::get_dynamic_area_offset: {
6600 auto *IntTy = dyn_cast<IntegerType>(Call.getType());
6601 Check(IntTy && DL.getPointerSizeInBits(DL.getAllocaAddrSpace()) ==
6602 IntTy->getBitWidth(),
6603 "get_dynamic_area_offset result type must be scalar integer matching "
6604 "alloca address space width",
6605 Call);
6606 break;
6607 }
6608 case Intrinsic::masked_udiv:
6609 case Intrinsic::masked_sdiv:
6610 case Intrinsic::masked_urem:
6611 case Intrinsic::masked_srem:
6612 case Intrinsic::vector_reduce_and:
6613 case Intrinsic::vector_reduce_or:
6614 case Intrinsic::vector_reduce_xor:
6615 case Intrinsic::vector_reduce_add:
6616 case Intrinsic::vector_reduce_mul:
6617 case Intrinsic::vector_reduce_smax:
6618 case Intrinsic::vector_reduce_smin:
6619 case Intrinsic::vector_reduce_umax:
6620 case Intrinsic::vector_reduce_umin: {
6621 Type *ArgTy = Call.getArgOperand(0)->getType();
6622 Check(ArgTy->isIntOrIntVectorTy() && ArgTy->isVectorTy(),
6623 "intrinsic has incorrect argument type!");
6624 break;
6625 }
6626 case Intrinsic::vector_reduce_fmax:
6627 case Intrinsic::vector_reduce_fmin: {
6628 Type *ArgTy = Call.getArgOperand(0)->getType();
6629 Check(ArgTy->isFPOrFPVectorTy() && ArgTy->isVectorTy(),
6630 "intrinsic has incorrect argument type!");
6631 break;
6632 }
6633 case Intrinsic::vector_reduce_fadd:
6634 case Intrinsic::vector_reduce_fmul: {
6635 // Unlike the other reductions, the first argument is a start value. The
6636 // second argument is the vector to be reduced.
6637 Type *ArgTy = Call.getArgOperand(1)->getType();
6638 Check(ArgTy->isFPOrFPVectorTy() && ArgTy->isVectorTy(),
6639 "intrinsic has incorrect argument type!");
6640 break;
6641 }
6642 case Intrinsic::smul_fix:
6643 case Intrinsic::smul_fix_sat:
6644 case Intrinsic::umul_fix:
6645 case Intrinsic::umul_fix_sat:
6646 case Intrinsic::sdiv_fix:
6647 case Intrinsic::sdiv_fix_sat:
6648 case Intrinsic::udiv_fix:
6649 case Intrinsic::udiv_fix_sat: {
6650 Value *Op1 = Call.getArgOperand(0);
6651 Value *Op2 = Call.getArgOperand(1);
6653 "first operand of [us][mul|div]_fix[_sat] must be an int type or "
6654 "vector of ints");
6656 "second operand of [us][mul|div]_fix[_sat] must be an int type or "
6657 "vector of ints");
6658
6659 auto *Op3 = cast<ConstantInt>(Call.getArgOperand(2));
6660 Check(Op3->getType()->isIntegerTy(),
6661 "third operand of [us][mul|div]_fix[_sat] must be an int type");
6662 Check(Op3->getBitWidth() <= 32,
6663 "third operand of [us][mul|div]_fix[_sat] must fit within 32 bits");
6664
6665 if (ID == Intrinsic::smul_fix || ID == Intrinsic::smul_fix_sat ||
6666 ID == Intrinsic::sdiv_fix || ID == Intrinsic::sdiv_fix_sat) {
6667 Check(Op3->getZExtValue() < Op1->getType()->getScalarSizeInBits(),
6668 "the scale of s[mul|div]_fix[_sat] must be less than the width of "
6669 "the operands");
6670 } else {
6671 Check(Op3->getZExtValue() <= Op1->getType()->getScalarSizeInBits(),
6672 "the scale of u[mul|div]_fix[_sat] must be less than or equal "
6673 "to the width of the operands");
6674 }
6675 break;
6676 }
6677 case Intrinsic::lrint:
6678 case Intrinsic::llrint:
6679 case Intrinsic::lround:
6680 case Intrinsic::llround: {
6681 Type *ValTy = Call.getArgOperand(0)->getType();
6682 Type *ResultTy = Call.getType();
6683 auto *VTy = dyn_cast<VectorType>(ValTy);
6684 auto *RTy = dyn_cast<VectorType>(ResultTy);
6685 Check(ValTy->isFPOrFPVectorTy() && ResultTy->isIntOrIntVectorTy(),
6686 ExpectedName + ": argument must be floating-point or vector "
6687 "of floating-points, and result must be integer or "
6688 "vector of integers",
6689 &Call);
6690 Check(ValTy->isVectorTy() == ResultTy->isVectorTy(),
6691 ExpectedName + ": argument and result disagree on vector use", &Call);
6692 if (VTy) {
6693 Check(VTy->getElementCount() == RTy->getElementCount(),
6694 ExpectedName + ": argument must be same length as result", &Call);
6695 }
6696 break;
6697 }
6698 case Intrinsic::bswap: {
6699 Type *Ty = Call.getType();
6700 unsigned Size = Ty->getScalarSizeInBits();
6701 Check(Size % 16 == 0, "bswap must be an even number of bytes", &Call);
6702 break;
6703 }
6704 case Intrinsic::invariant_start: {
6705 ConstantInt *InvariantSize = dyn_cast<ConstantInt>(Call.getArgOperand(0));
6706 Check(InvariantSize &&
6707 (!InvariantSize->isNegative() || InvariantSize->isMinusOne()),
6708 "invariant_start parameter must be -1, 0 or a positive number",
6709 &Call);
6710 break;
6711 }
6712 case Intrinsic::matrix_multiply:
6713 case Intrinsic::matrix_transpose:
6714 case Intrinsic::matrix_column_major_load:
6715 case Intrinsic::matrix_column_major_store: {
6717 ConstantInt *Stride = nullptr;
6718 ConstantInt *NumRows;
6719 ConstantInt *NumColumns;
6720 VectorType *ResultTy;
6721 Type *Op0ElemTy = nullptr;
6722 Type *Op1ElemTy = nullptr;
6723 switch (ID) {
6724 case Intrinsic::matrix_multiply: {
6725 NumRows = cast<ConstantInt>(Call.getArgOperand(2));
6726 ConstantInt *N = cast<ConstantInt>(Call.getArgOperand(3));
6727 NumColumns = cast<ConstantInt>(Call.getArgOperand(4));
6729 ->getNumElements() ==
6730 NumRows->getZExtValue() * N->getZExtValue(),
6731 "First argument of a matrix operation does not match specified "
6732 "shape!");
6734 ->getNumElements() ==
6735 N->getZExtValue() * NumColumns->getZExtValue(),
6736 "Second argument of a matrix operation does not match specified "
6737 "shape!");
6738
6739 ResultTy = cast<VectorType>(Call.getType());
6740 Op0ElemTy =
6741 cast<VectorType>(Call.getArgOperand(0)->getType())->getElementType();
6742 Op1ElemTy =
6743 cast<VectorType>(Call.getArgOperand(1)->getType())->getElementType();
6744 break;
6745 }
6746 case Intrinsic::matrix_transpose:
6747 NumRows = cast<ConstantInt>(Call.getArgOperand(1));
6748 NumColumns = cast<ConstantInt>(Call.getArgOperand(2));
6749 ResultTy = cast<VectorType>(Call.getType());
6750 Op0ElemTy =
6751 cast<VectorType>(Call.getArgOperand(0)->getType())->getElementType();
6752 break;
6753 case Intrinsic::matrix_column_major_load: {
6755 NumRows = cast<ConstantInt>(Call.getArgOperand(3));
6756 NumColumns = cast<ConstantInt>(Call.getArgOperand(4));
6757 ResultTy = cast<VectorType>(Call.getType());
6758 break;
6759 }
6760 case Intrinsic::matrix_column_major_store: {
6762 NumRows = cast<ConstantInt>(Call.getArgOperand(4));
6763 NumColumns = cast<ConstantInt>(Call.getArgOperand(5));
6764 ResultTy = cast<VectorType>(Call.getArgOperand(0)->getType());
6765 Op0ElemTy =
6766 cast<VectorType>(Call.getArgOperand(0)->getType())->getElementType();
6767 break;
6768 }
6769 default:
6770 llvm_unreachable("unexpected intrinsic");
6771 }
6772
6773 Check(ResultTy->getElementType()->isIntegerTy() ||
6774 ResultTy->getElementType()->isFloatingPointTy(),
6775 "Result type must be an integer or floating-point type!", IF);
6776
6777 if (Op0ElemTy)
6778 Check(ResultTy->getElementType() == Op0ElemTy,
6779 "Vector element type mismatch of the result and first operand "
6780 "vector!",
6781 IF);
6782
6783 if (Op1ElemTy)
6784 Check(ResultTy->getElementType() == Op1ElemTy,
6785 "Vector element type mismatch of the result and second operand "
6786 "vector!",
6787 IF);
6788
6790 NumRows->getZExtValue() * NumColumns->getZExtValue(),
6791 "Result of a matrix operation does not fit in the returned vector!");
6792
6793 if (Stride) {
6794 Check(Stride->getBitWidth() <= 64, "Stride bitwidth cannot exceed 64!",
6795 IF);
6796 Check(Stride->getZExtValue() >= NumRows->getZExtValue(),
6797 "Stride must be greater or equal than the number of rows!", IF);
6798 }
6799
6800 break;
6801 }
6802 case Intrinsic::stepvector: {
6804 Check(VecTy && VecTy->getScalarType()->isIntegerTy() &&
6805 VecTy->getScalarSizeInBits() >= 8,
6806 "stepvector only supported for vectors of integers "
6807 "with a bitwidth of at least 8.",
6808 &Call);
6809 break;
6810 }
6811 case Intrinsic::experimental_vector_match: {
6812 Value *Op1 = Call.getArgOperand(0);
6813 Value *Op2 = Call.getArgOperand(1);
6815
6816 VectorType *Op1Ty = dyn_cast<VectorType>(Op1->getType());
6817 VectorType *Op2Ty = dyn_cast<VectorType>(Op2->getType());
6818 VectorType *MaskTy = dyn_cast<VectorType>(Mask->getType());
6819
6820 Check(Op1Ty && Op2Ty && MaskTy, "Operands must be vectors.", &Call);
6822 "Second operand must be a fixed length vector.", &Call);
6823 Check(Op1Ty->getElementType()->isIntegerTy(),
6824 "First operand must be a vector of integers.", &Call);
6825 Check(Op1Ty->getElementType() == Op2Ty->getElementType(),
6826 "First two operands must have the same element type.", &Call);
6827 Check(Op1Ty->getElementCount() == MaskTy->getElementCount(),
6828 "First operand and mask must have the same number of elements.",
6829 &Call);
6830 Check(MaskTy->getElementType()->isIntegerTy(1),
6831 "Mask must be a vector of i1's.", &Call);
6832 Check(Call.getType() == MaskTy, "Return type must match the mask type.",
6833 &Call);
6834 break;
6835 }
6836 case Intrinsic::vector_insert: {
6837 Value *Vec = Call.getArgOperand(0);
6838 Value *SubVec = Call.getArgOperand(1);
6839 Value *Idx = Call.getArgOperand(2);
6840 unsigned IdxN = cast<ConstantInt>(Idx)->getZExtValue();
6841
6842 VectorType *VecTy = cast<VectorType>(Vec->getType());
6843 VectorType *SubVecTy = cast<VectorType>(SubVec->getType());
6844
6845 ElementCount VecEC = VecTy->getElementCount();
6846 ElementCount SubVecEC = SubVecTy->getElementCount();
6847 Check(VecTy->getElementType() == SubVecTy->getElementType(),
6848 "vector_insert parameters must have the same element "
6849 "type.",
6850 &Call);
6851 Check(IdxN % SubVecEC.getKnownMinValue() == 0,
6852 "vector_insert index must be a constant multiple of "
6853 "the subvector's known minimum vector length.");
6854
6855 // If this insertion is not the 'mixed' case where a fixed vector is
6856 // inserted into a scalable vector, ensure that the insertion of the
6857 // subvector does not overrun the parent vector.
6858 if (VecEC.isScalable() == SubVecEC.isScalable()) {
6859 Check(IdxN < VecEC.getKnownMinValue() &&
6860 IdxN + SubVecEC.getKnownMinValue() <= VecEC.getKnownMinValue(),
6861 "subvector operand of vector_insert would overrun the "
6862 "vector being inserted into.");
6863 }
6864 break;
6865 }
6866 case Intrinsic::vector_extract: {
6867 Value *Vec = Call.getArgOperand(0);
6868 Value *Idx = Call.getArgOperand(1);
6869 unsigned IdxN = cast<ConstantInt>(Idx)->getZExtValue();
6870
6871 VectorType *ResultTy = cast<VectorType>(Call.getType());
6872 VectorType *VecTy = cast<VectorType>(Vec->getType());
6873
6874 ElementCount VecEC = VecTy->getElementCount();
6875 ElementCount ResultEC = ResultTy->getElementCount();
6876
6877 Check(ResultTy->getElementType() == VecTy->getElementType(),
6878 "vector_extract result must have the same element "
6879 "type as the input vector.",
6880 &Call);
6881 Check(IdxN % ResultEC.getKnownMinValue() == 0,
6882 "vector_extract index must be a constant multiple of "
6883 "the result type's known minimum vector length.");
6884
6885 // If this extraction is not the 'mixed' case where a fixed vector is
6886 // extracted from a scalable vector, ensure that the extraction does not
6887 // overrun the parent vector.
6888 if (VecEC.isScalable() == ResultEC.isScalable()) {
6889 Check(IdxN < VecEC.getKnownMinValue() &&
6890 IdxN + ResultEC.getKnownMinValue() <= VecEC.getKnownMinValue(),
6891 "vector_extract would overrun.");
6892 }
6893 break;
6894 }
6895 case Intrinsic::vector_partial_reduce_fadd:
6896 case Intrinsic::vector_partial_reduce_add: {
6899
6900 unsigned VecWidth = VecTy->getElementCount().getKnownMinValue();
6901 unsigned AccWidth = AccTy->getElementCount().getKnownMinValue();
6902
6903 Check((VecWidth % AccWidth) == 0,
6904 "Invalid vector widths for partial "
6905 "reduction. The width of the input vector "
6906 "must be a positive integer multiple of "
6907 "the width of the accumulator vector.");
6908 break;
6909 }
6910 case Intrinsic::experimental_noalias_scope_decl: {
6911 NoAliasScopeDecls.push_back(cast<IntrinsicInst>(&Call));
6912 break;
6913 }
6914 case Intrinsic::preserve_array_access_index:
6915 case Intrinsic::preserve_struct_access_index:
6916 case Intrinsic::aarch64_ldaxr:
6917 case Intrinsic::aarch64_ldxr:
6918 case Intrinsic::arm_ldaex:
6919 case Intrinsic::arm_ldrex: {
6920 Type *ElemTy = Call.getParamElementType(0);
6921 Check(ElemTy, "Intrinsic requires elementtype attribute on first argument.",
6922 &Call);
6923 break;
6924 }
6925 case Intrinsic::aarch64_stlxr:
6926 case Intrinsic::aarch64_stxr:
6927 case Intrinsic::arm_stlex:
6928 case Intrinsic::arm_strex: {
6929 Type *ElemTy = Call.getAttributes().getParamElementType(1);
6930 Check(ElemTy,
6931 "Intrinsic requires elementtype attribute on second argument.",
6932 &Call);
6933 break;
6934 }
6935 case Intrinsic::aarch64_prefetch: {
6936 Check(cast<ConstantInt>(Call.getArgOperand(1))->getZExtValue() < 2,
6937 "write argument to llvm.aarch64.prefetch must be 0 or 1", Call);
6938 Check(cast<ConstantInt>(Call.getArgOperand(2))->getZExtValue() < 4,
6939 "target argument to llvm.aarch64.prefetch must be 0-3", Call);
6940 Check(cast<ConstantInt>(Call.getArgOperand(3))->getZExtValue() < 2,
6941 "stream argument to llvm.aarch64.prefetch must be 0 or 1", Call);
6942 Check(cast<ConstantInt>(Call.getArgOperand(4))->getZExtValue() < 2,
6943 "isdata argument to llvm.aarch64.prefetch must be 0 or 1", Call);
6944 break;
6945 }
6946 case Intrinsic::aarch64_range_prefetch: {
6947 Check(cast<ConstantInt>(Call.getArgOperand(1))->getZExtValue() < 2,
6948 "write argument to llvm.aarch64.range.prefetch must be 0 or 1", Call);
6949 Check(cast<ConstantInt>(Call.getArgOperand(2))->getZExtValue() < 2,
6950 "stream argument to llvm.aarch64.range.prefetch must be 0 or 1",
6951 Call);
6952 break;
6953 }
6954 case Intrinsic::callbr_landingpad: {
6955 const auto *CBR = dyn_cast<CallBrInst>(Call.getOperand(0));
6956 Check(CBR, "intrinstic requires callbr operand", &Call);
6957 if (!CBR)
6958 break;
6959
6960 const BasicBlock *LandingPadBB = Call.getParent();
6961 const BasicBlock *PredBB = LandingPadBB->getUniquePredecessor();
6962 if (!PredBB) {
6963 CheckFailed("Intrinsic in block must have 1 unique predecessor", &Call);
6964 break;
6965 }
6966 if (!isa<CallBrInst>(PredBB->getTerminator())) {
6967 CheckFailed("Intrinsic must have corresponding callbr in predecessor",
6968 &Call);
6969 break;
6970 }
6971 Check(llvm::is_contained(CBR->getIndirectDests(), LandingPadBB),
6972 "Intrinsic's corresponding callbr must have intrinsic's parent basic "
6973 "block in indirect destination list",
6974 &Call);
6975 const Instruction &First = *LandingPadBB->begin();
6976 Check(&First == &Call, "No other instructions may proceed intrinsic",
6977 &Call);
6978 break;
6979 }
6980 case Intrinsic::structured_gep: {
6981 // Parser should refuse those 2 cases.
6982 assert(Call.arg_size() >= 1);
6984
6985 Check(Call.paramHasAttr(0, Attribute::ElementType),
6986 "Intrinsic first parameter is missing an ElementType attribute",
6987 &Call);
6988
6989 Type *T = Call.getParamAttr(0, Attribute::ElementType).getValueAsType();
6990 for (unsigned I = 1; I < Call.arg_size(); ++I) {
6992 ConstantInt *CI = dyn_cast<ConstantInt>(Index);
6993 Check(Index->getType()->isIntegerTy(),
6994 "Index operand type must be an integer", &Call);
6995
6996 if (ArrayType *AT = dyn_cast<ArrayType>(T)) {
6997 T = AT->getElementType();
6998 } else if (StructType *ST = dyn_cast<StructType>(T)) {
6999 Check(CI, "Indexing into a struct requires a constant int", &Call);
7000 Check(CI->getZExtValue() < ST->getNumElements(),
7001 "Indexing in a struct should be inbounds", &Call);
7002 T = ST->getElementType(CI->getZExtValue());
7003 } else if (VectorType *VT = dyn_cast<VectorType>(T)) {
7004 T = VT->getElementType();
7005 } else {
7006 CheckFailed("Reached a non-composite type with more indices to process",
7007 &Call);
7008 }
7009 }
7010 break;
7011 }
7012 case Intrinsic::structured_alloca:
7013 Check(Call.hasRetAttr(Attribute::ElementType),
7014 "@llvm.structured.alloca calls require elementtype attribute.",
7015 &Call);
7016 break;
7017 case Intrinsic::amdgcn_cs_chain: {
7018 auto CallerCC = Call.getCaller()->getCallingConv();
7019 switch (CallerCC) {
7020 case CallingConv::AMDGPU_CS:
7021 case CallingConv::AMDGPU_CS_Chain:
7022 case CallingConv::AMDGPU_CS_ChainPreserve:
7023 case CallingConv::AMDGPU_ES:
7024 case CallingConv::AMDGPU_GS:
7025 case CallingConv::AMDGPU_HS:
7026 case CallingConv::AMDGPU_LS:
7027 case CallingConv::AMDGPU_VS:
7028 break;
7029 default:
7030 CheckFailed("Intrinsic cannot be called from functions with this "
7031 "calling convention",
7032 &Call);
7033 break;
7034 }
7035
7036 Check(Call.paramHasAttr(2, Attribute::InReg),
7037 "SGPR arguments must have the `inreg` attribute", &Call);
7038 Check(!Call.paramHasAttr(3, Attribute::InReg),
7039 "VGPR arguments must not have the `inreg` attribute", &Call);
7040
7041 auto *Next = Call.getNextNode();
7042 bool IsAMDUnreachable = Next && isa<IntrinsicInst>(Next) &&
7043 cast<IntrinsicInst>(Next)->getIntrinsicID() ==
7044 Intrinsic::amdgcn_unreachable;
7045 Check(Next && (isa<UnreachableInst>(Next) || IsAMDUnreachable),
7046 "llvm.amdgcn.cs.chain must be followed by unreachable", &Call);
7047 break;
7048 }
7049 case Intrinsic::amdgcn_init_exec_from_input: {
7050 const Argument *Arg = dyn_cast<Argument>(Call.getOperand(0));
7051 Check(Arg && Arg->hasInRegAttr(),
7052 "only inreg arguments to the parent function are valid as inputs to "
7053 "this intrinsic",
7054 &Call);
7055 break;
7056 }
7057 case Intrinsic::amdgcn_set_inactive_chain_arg: {
7058 auto CallerCC = Call.getCaller()->getCallingConv();
7059 switch (CallerCC) {
7060 case CallingConv::AMDGPU_CS_Chain:
7061 case CallingConv::AMDGPU_CS_ChainPreserve:
7062 break;
7063 default:
7064 CheckFailed("Intrinsic can only be used from functions with the "
7065 "amdgpu_cs_chain or amdgpu_cs_chain_preserve "
7066 "calling conventions",
7067 &Call);
7068 break;
7069 }
7070
7071 unsigned InactiveIdx = 1;
7072 Check(!Call.paramHasAttr(InactiveIdx, Attribute::InReg),
7073 "Value for inactive lanes must not have the `inreg` attribute",
7074 &Call);
7075 Check(isa<Argument>(Call.getArgOperand(InactiveIdx)),
7076 "Value for inactive lanes must be a function argument", &Call);
7077 Check(!cast<Argument>(Call.getArgOperand(InactiveIdx))->hasInRegAttr(),
7078 "Value for inactive lanes must be a VGPR function argument", &Call);
7079 break;
7080 }
7081 case Intrinsic::amdgcn_call_whole_wave: {
7083 Check(F, "Indirect whole wave calls are not allowed", &Call);
7084
7085 CallingConv::ID CC = F->getCallingConv();
7086 Check(CC == CallingConv::AMDGPU_Gfx_WholeWave,
7087 "Callee must have the amdgpu_gfx_whole_wave calling convention",
7088 &Call);
7089
7090 Check(!F->isVarArg(), "Variadic whole wave calls are not allowed", &Call);
7091
7092 Check(Call.arg_size() == F->arg_size(),
7093 "Call argument count must match callee argument count", &Call);
7094
7095 // The first argument of the call is the callee, and the first argument of
7096 // the callee is the active mask. The rest of the arguments must match.
7097 Check(F->arg_begin()->getType()->isIntegerTy(1),
7098 "Callee must have i1 as its first argument", &Call);
7099 for (auto [CallArg, FuncArg] :
7100 drop_begin(zip_equal(Call.args(), F->args()))) {
7101 Check(CallArg->getType() == FuncArg.getType(),
7102 "Argument types must match", &Call);
7103
7104 // Check that inreg attributes match between call site and function
7105 Check(Call.paramHasAttr(FuncArg.getArgNo(), Attribute::InReg) ==
7106 FuncArg.hasInRegAttr(),
7107 "Argument inreg attributes must match", &Call);
7108 }
7109 break;
7110 }
7111 case Intrinsic::amdgcn_s_prefetch_data: {
7112 Check(
7115 "llvm.amdgcn.s.prefetch.data only supports global or constant memory");
7116 break;
7117 }
7118 case Intrinsic::amdgcn_mfma_scale_f32_16x16x128_f8f6f4:
7119 case Intrinsic::amdgcn_mfma_scale_f32_32x32x64_f8f6f4: {
7120 Value *Src0 = Call.getArgOperand(0);
7121 Value *Src1 = Call.getArgOperand(1);
7122
7123 uint64_t CBSZ = cast<ConstantInt>(Call.getArgOperand(3))->getZExtValue();
7124 uint64_t BLGP = cast<ConstantInt>(Call.getArgOperand(4))->getZExtValue();
7125 Check(CBSZ <= 4, "invalid value for cbsz format", Call,
7126 Call.getArgOperand(3));
7127 Check(BLGP <= 4, "invalid value for blgp format", Call,
7128 Call.getArgOperand(4));
7129
7130 // AMDGPU::MFMAScaleFormats values
7131 auto getFormatNumRegs = [](unsigned FormatVal) {
7132 switch (FormatVal) {
7133 case 0:
7134 case 1:
7135 return 8u;
7136 case 2:
7137 case 3:
7138 return 6u;
7139 case 4:
7140 return 4u;
7141 default:
7142 llvm_unreachable("invalid format value");
7143 }
7144 };
7145
7146 auto isValidSrcASrcBVector = [](FixedVectorType *Ty) {
7147 if (!Ty || !Ty->getElementType()->isIntegerTy(32))
7148 return false;
7149 unsigned NumElts = Ty->getNumElements();
7150 return NumElts == 4 || NumElts == 6 || NumElts == 8;
7151 };
7152
7153 auto *Src0Ty = dyn_cast<FixedVectorType>(Src0->getType());
7154 auto *Src1Ty = dyn_cast<FixedVectorType>(Src1->getType());
7155 Check(isValidSrcASrcBVector(Src0Ty),
7156 "operand 0 must be 4, 6 or 8 element i32 vector", &Call, Src0);
7157 Check(isValidSrcASrcBVector(Src1Ty),
7158 "operand 1 must be 4, 6 or 8 element i32 vector", &Call, Src1);
7159
7160 // Permit excess registers for the format.
7161 Check(Src0Ty->getNumElements() >= getFormatNumRegs(CBSZ),
7162 "invalid vector type for format", &Call, Src0, Call.getArgOperand(3));
7163 Check(Src1Ty->getNumElements() >= getFormatNumRegs(BLGP),
7164 "invalid vector type for format", &Call, Src1, Call.getArgOperand(5));
7165 break;
7166 }
7167 case Intrinsic::amdgcn_wmma_f32_16x16x128_f8f6f4:
7168 case Intrinsic::amdgcn_wmma_scale_f32_16x16x128_f8f6f4:
7169 case Intrinsic::amdgcn_wmma_scale16_f32_16x16x128_f8f6f4: {
7170 Value *Src0 = Call.getArgOperand(1);
7171 Value *Src1 = Call.getArgOperand(3);
7172
7173 unsigned FmtA = cast<ConstantInt>(Call.getArgOperand(0))->getZExtValue();
7174 unsigned FmtB = cast<ConstantInt>(Call.getArgOperand(2))->getZExtValue();
7175 Check(FmtA <= 4, "invalid value for matrix format", Call,
7176 Call.getArgOperand(0));
7177 Check(FmtB <= 4, "invalid value for matrix format", Call,
7178 Call.getArgOperand(2));
7179
7180 // AMDGPU::MatrixFMT values
7181 auto getFormatNumRegs = [](unsigned FormatVal) {
7182 switch (FormatVal) {
7183 case 0:
7184 case 1:
7185 return 16u;
7186 case 2:
7187 case 3:
7188 return 12u;
7189 case 4:
7190 return 8u;
7191 default:
7192 llvm_unreachable("invalid format value");
7193 }
7194 };
7195
7196 auto isValidSrcASrcBVector = [](FixedVectorType *Ty) {
7197 if (!Ty || !Ty->getElementType()->isIntegerTy(32))
7198 return false;
7199 unsigned NumElts = Ty->getNumElements();
7200 return NumElts == 16 || NumElts == 12 || NumElts == 8;
7201 };
7202
7203 auto *Src0Ty = dyn_cast<FixedVectorType>(Src0->getType());
7204 auto *Src1Ty = dyn_cast<FixedVectorType>(Src1->getType());
7205 Check(isValidSrcASrcBVector(Src0Ty),
7206 "operand 1 must be 8, 12 or 16 element i32 vector", &Call, Src0);
7207 Check(isValidSrcASrcBVector(Src1Ty),
7208 "operand 3 must be 8, 12 or 16 element i32 vector", &Call, Src1);
7209
7210 // Permit excess registers for the format.
7211 Check(Src0Ty->getNumElements() >= getFormatNumRegs(FmtA),
7212 "invalid vector type for format", &Call, Src0, Call.getArgOperand(0));
7213 Check(Src1Ty->getNumElements() >= getFormatNumRegs(FmtB),
7214 "invalid vector type for format", &Call, Src1, Call.getArgOperand(2));
7215 break;
7216 }
7217 case Intrinsic::amdgcn_cooperative_atomic_load_32x4B:
7218 case Intrinsic::amdgcn_cooperative_atomic_load_16x8B:
7219 case Intrinsic::amdgcn_cooperative_atomic_load_8x16B:
7220 case Intrinsic::amdgcn_cooperative_atomic_store_32x4B:
7221 case Intrinsic::amdgcn_cooperative_atomic_store_16x8B:
7222 case Intrinsic::amdgcn_cooperative_atomic_store_8x16B: {
7223 // Check we only use this intrinsic on the FLAT or GLOBAL address spaces.
7224 Value *PtrArg = Call.getArgOperand(0);
7225 const unsigned AS = PtrArg->getType()->getPointerAddressSpace();
7227 "cooperative atomic intrinsics require a generic or global pointer",
7228 &Call, PtrArg);
7229
7230 // Last argument must be a MD string
7232 MDNode *MD = cast<MDNode>(Op->getMetadata());
7233 Check((MD->getNumOperands() == 1) && isa<MDString>(MD->getOperand(0)),
7234 "cooperative atomic intrinsics require that the last argument is a "
7235 "metadata string",
7236 &Call, Op);
7237 break;
7238 }
7239 case Intrinsic::nvvm_setmaxnreg_inc_sync_aligned_u32:
7240 case Intrinsic::nvvm_setmaxnreg_dec_sync_aligned_u32: {
7241 Value *V = Call.getArgOperand(0);
7242 unsigned RegCount = cast<ConstantInt>(V)->getZExtValue();
7243 Check(RegCount % 8 == 0,
7244 "reg_count argument to nvvm.setmaxnreg must be in multiples of 8");
7245 break;
7246 }
7247 case Intrinsic::experimental_convergence_entry:
7248 case Intrinsic::experimental_convergence_anchor:
7249 break;
7250 case Intrinsic::experimental_convergence_loop:
7251 break;
7252 case Intrinsic::ptrmask: {
7253 Type *Ty0 = Call.getArgOperand(0)->getType();
7254 Type *Ty1 = Call.getArgOperand(1)->getType();
7256 "llvm.ptrmask intrinsic first argument must be pointer or vector "
7257 "of pointers",
7258 &Call);
7259 Check(
7260 Ty0->isVectorTy() == Ty1->isVectorTy(),
7261 "llvm.ptrmask intrinsic arguments must be both scalars or both vectors",
7262 &Call);
7263 if (Ty0->isVectorTy())
7264 Check(cast<VectorType>(Ty0)->getElementCount() ==
7265 cast<VectorType>(Ty1)->getElementCount(),
7266 "llvm.ptrmask intrinsic arguments must have the same number of "
7267 "elements",
7268 &Call);
7269 Check(DL.getIndexTypeSizeInBits(Ty0) == Ty1->getScalarSizeInBits(),
7270 "llvm.ptrmask intrinsic second argument bitwidth must match "
7271 "pointer index type size of first argument",
7272 &Call);
7273 break;
7274 }
7275 case Intrinsic::thread_pointer: {
7277 DL.getDefaultGlobalsAddressSpace(),
7278 "llvm.thread.pointer intrinsic return type must be for the globals "
7279 "address space",
7280 &Call);
7281 break;
7282 }
7283 case Intrinsic::threadlocal_address: {
7284 const Value &Arg0 = *Call.getArgOperand(0);
7285 Check(isa<GlobalValue>(Arg0),
7286 "llvm.threadlocal.address first argument must be a GlobalValue");
7287 Check(cast<GlobalValue>(Arg0).isThreadLocal(),
7288 "llvm.threadlocal.address operand isThreadLocal() must be true");
7289 break;
7290 }
7291 case Intrinsic::lifetime_start:
7292 case Intrinsic::lifetime_end: {
7293 Value *Ptr = Call.getArgOperand(0);
7294 IntrinsicInst *II = dyn_cast<IntrinsicInst>(Ptr);
7295 Check(isa<AllocaInst>(Ptr) || isa<PoisonValue>(Ptr) ||
7296 (II && II->getIntrinsicID() == Intrinsic::structured_alloca),
7297 "llvm.lifetime.start/end can only be used on alloca or poison",
7298 &Call);
7299 break;
7300 }
7301 case Intrinsic::sponentry: {
7302 const unsigned StackAS = DL.getAllocaAddrSpace();
7303 const Type *RetTy = Call.getFunctionType()->getReturnType();
7304 Check(RetTy->getPointerAddressSpace() == StackAS,
7305 "llvm.sponentry must return a pointer to the stack", &Call);
7306 break;
7307 }
7308 };
7309
7310 // Verify that there aren't any unmediated control transfers between funclets.
7312 Function *F = Call.getParent()->getParent();
7313 if (F->hasPersonalityFn() &&
7314 isScopedEHPersonality(classifyEHPersonality(F->getPersonalityFn()))) {
7315 // Run EH funclet coloring on-demand and cache results for other intrinsic
7316 // calls in this function
7317 if (BlockEHFuncletColors.empty())
7318 BlockEHFuncletColors = colorEHFunclets(*F);
7319
7320 // Check for catch-/cleanup-pad in first funclet block
7321 bool InEHFunclet = false;
7322 BasicBlock *CallBB = Call.getParent();
7323 const ColorVector &CV = BlockEHFuncletColors.find(CallBB)->second;
7324 assert(CV.size() > 0 && "Uncolored block");
7325 for (BasicBlock *ColorFirstBB : CV)
7326 if (auto It = ColorFirstBB->getFirstNonPHIIt();
7327 It != ColorFirstBB->end())
7329 InEHFunclet = true;
7330
7331 // Check for funclet operand bundle
7332 bool HasToken = false;
7333 for (unsigned I = 0, E = Call.getNumOperandBundles(); I != E; ++I)
7335 HasToken = true;
7336
7337 // This would cause silent code truncation in WinEHPrepare
7338 if (InEHFunclet)
7339 Check(HasToken, "Missing funclet token on intrinsic call", &Call);
7340 }
7341 }
7342}
7343
7344/// Carefully grab the subprogram from a local scope.
7345///
7346/// This carefully grabs the subprogram from a local scope, avoiding the
7347/// built-in assertions that would typically fire.
7349 if (!LocalScope)
7350 return nullptr;
7351
7352 if (auto *SP = dyn_cast<DISubprogram>(LocalScope))
7353 return SP;
7354
7355 if (auto *LB = dyn_cast<DILexicalBlockBase>(LocalScope))
7356 return getSubprogram(LB->getRawScope());
7357
7358 // Just return null; broken scope chains are checked elsewhere.
7359 assert(!isa<DILocalScope>(LocalScope) && "Unknown type of local scope");
7360 return nullptr;
7361}
7362
7363void Verifier::visit(DbgLabelRecord &DLR) {
7365 "invalid #dbg_label intrinsic variable", &DLR, DLR.getRawLabel());
7366
7367 // Ignore broken !dbg attachments; they're checked elsewhere.
7368 if (MDNode *N = DLR.getDebugLoc().getAsMDNode())
7369 if (!isa<DILocation>(N))
7370 return;
7371
7372 BasicBlock *BB = DLR.getParent();
7373 Function *F = BB ? BB->getParent() : nullptr;
7374
7375 // The scopes for variables and !dbg attachments must agree.
7376 DILabel *Label = DLR.getLabel();
7377 DILocation *Loc = DLR.getDebugLoc();
7378 CheckDI(Loc, "#dbg_label record requires a !dbg attachment", &DLR, BB, F);
7379
7380 DISubprogram *LabelSP = getSubprogram(Label->getRawScope());
7381 DISubprogram *LocSP = getSubprogram(Loc->getRawScope());
7382 if (!LabelSP || !LocSP)
7383 return;
7384
7385 CheckDI(LabelSP == LocSP,
7386 "mismatched subprogram between #dbg_label label and !dbg attachment",
7387 &DLR, BB, F, Label, Label->getScope()->getSubprogram(), Loc,
7388 Loc->getScope()->getSubprogram());
7389}
7390
7391void Verifier::visit(DbgVariableRecord &DVR) {
7392 BasicBlock *BB = DVR.getParent();
7393 Function *F = BB->getParent();
7394
7395 CheckDI(DVR.getType() == DbgVariableRecord::LocationType::Value ||
7396 DVR.getType() == DbgVariableRecord::LocationType::Declare ||
7397 DVR.getType() == DbgVariableRecord::LocationType::DeclareValue ||
7398 DVR.getType() == DbgVariableRecord::LocationType::Assign,
7399 "invalid #dbg record type", &DVR, DVR.getType(), BB, F);
7400
7401 // The location for a DbgVariableRecord must be either a ValueAsMetadata,
7402 // DIArgList, or an empty MDNode (which is a legacy representation for an
7403 // "undef" location).
7404 auto *MD = DVR.getRawLocation();
7405 CheckDI(MD && (isa<ValueAsMetadata>(MD) || isa<DIArgList>(MD) ||
7406 (isa<MDNode>(MD) && !cast<MDNode>(MD)->getNumOperands())),
7407 "invalid #dbg record address/value", &DVR, MD, BB, F);
7408 if (auto *VAM = dyn_cast<ValueAsMetadata>(MD)) {
7409 visitValueAsMetadata(*VAM, F);
7410 if (DVR.isDbgDeclare()) {
7411 // Allow integers here to support inttoptr salvage.
7412 Type *Ty = VAM->getValue()->getType();
7413 CheckDI(Ty->isPointerTy() || Ty->isIntegerTy(),
7414 "location of #dbg_declare must be a pointer or int", &DVR, MD, BB,
7415 F);
7416 }
7417 } else if (auto *AL = dyn_cast<DIArgList>(MD)) {
7418 visitDIArgList(*AL, F);
7419 }
7420
7422 "invalid #dbg record variable", &DVR, DVR.getRawVariable(), BB, F);
7423 visitMDNode(*DVR.getRawVariable(), AreDebugLocsAllowed::No);
7424
7426 "invalid #dbg record expression", &DVR, DVR.getRawExpression(), BB,
7427 F);
7428 visitMDNode(*DVR.getExpression(), AreDebugLocsAllowed::No);
7429
7430 if (DVR.isDbgAssign()) {
7432 "invalid #dbg_assign DIAssignID", &DVR, DVR.getRawAssignID(), BB,
7433 F);
7434 visitMDNode(*cast<DIAssignID>(DVR.getRawAssignID()),
7435 AreDebugLocsAllowed::No);
7436
7437 const auto *RawAddr = DVR.getRawAddress();
7438 // Similarly to the location above, the address for an assign
7439 // DbgVariableRecord must be a ValueAsMetadata or an empty MDNode, which
7440 // represents an undef address.
7441 CheckDI(
7442 isa<ValueAsMetadata>(RawAddr) ||
7443 (isa<MDNode>(RawAddr) && !cast<MDNode>(RawAddr)->getNumOperands()),
7444 "invalid #dbg_assign address", &DVR, DVR.getRawAddress(), BB, F);
7445 if (auto *VAM = dyn_cast<ValueAsMetadata>(RawAddr))
7446 visitValueAsMetadata(*VAM, F);
7447
7449 "invalid #dbg_assign address expression", &DVR,
7450 DVR.getRawAddressExpression(), BB, F);
7451 visitMDNode(*DVR.getAddressExpression(), AreDebugLocsAllowed::No);
7452
7453 // All of the linked instructions should be in the same function as DVR.
7454 for (Instruction *I : at::getAssignmentInsts(&DVR))
7455 CheckDI(DVR.getFunction() == I->getFunction(),
7456 "inst not in same function as #dbg_assign", I, &DVR, BB, F);
7457 }
7458
7459 // This check is redundant with one in visitLocalVariable().
7460 DILocalVariable *Var = DVR.getVariable();
7461 CheckDI(isType(Var->getRawType()), "invalid type ref", Var, Var->getRawType(),
7462 BB, F);
7463
7464 auto *DLNode = DVR.getDebugLoc().getAsMDNode();
7465 CheckDI(isa_and_nonnull<DILocation>(DLNode), "invalid #dbg record DILocation",
7466 &DVR, DLNode, BB, F);
7467 DILocation *Loc = DVR.getDebugLoc();
7468
7469 // The scopes for variables and !dbg attachments must agree.
7470 DISubprogram *VarSP = getSubprogram(Var->getRawScope());
7471 DISubprogram *LocSP = getSubprogram(Loc->getRawScope());
7472 if (!VarSP || !LocSP)
7473 return; // Broken scope chains are checked elsewhere.
7474
7475 CheckDI(VarSP == LocSP,
7476 "mismatched subprogram between #dbg record variable and DILocation",
7477 &DVR, BB, F, Var, Var->getScope()->getSubprogram(), Loc,
7478 Loc->getScope()->getSubprogram(), BB, F);
7479
7480 verifyFnArgs(DVR);
7481}
7482
7483void Verifier::visitVPIntrinsic(VPIntrinsic &VPI) {
7484 if (auto *VPCast = dyn_cast<VPCastIntrinsic>(&VPI)) {
7485 auto *RetTy = cast<VectorType>(VPCast->getType());
7486 auto *ValTy = cast<VectorType>(VPCast->getOperand(0)->getType());
7487 Check(RetTy->getElementCount() == ValTy->getElementCount(),
7488 "VP cast intrinsic first argument and result vector lengths must be "
7489 "equal",
7490 *VPCast);
7491
7492 switch (VPCast->getIntrinsicID()) {
7493 default:
7494 llvm_unreachable("Unknown VP cast intrinsic");
7495 case Intrinsic::vp_trunc:
7496 Check(RetTy->isIntOrIntVectorTy() && ValTy->isIntOrIntVectorTy(),
7497 "llvm.vp.trunc intrinsic first argument and result element type "
7498 "must be integer",
7499 *VPCast);
7500 Check(RetTy->getScalarSizeInBits() < ValTy->getScalarSizeInBits(),
7501 "llvm.vp.trunc intrinsic the bit size of first argument must be "
7502 "larger than the bit size of the return type",
7503 *VPCast);
7504 break;
7505 case Intrinsic::vp_zext:
7506 case Intrinsic::vp_sext:
7507 Check(RetTy->isIntOrIntVectorTy() && ValTy->isIntOrIntVectorTy(),
7508 "llvm.vp.zext or llvm.vp.sext intrinsic first argument and result "
7509 "element type must be integer",
7510 *VPCast);
7511 Check(RetTy->getScalarSizeInBits() > ValTy->getScalarSizeInBits(),
7512 "llvm.vp.zext or llvm.vp.sext intrinsic the bit size of first "
7513 "argument must be smaller than the bit size of the return type",
7514 *VPCast);
7515 break;
7516 case Intrinsic::vp_fptoui:
7517 case Intrinsic::vp_fptosi:
7518 case Intrinsic::vp_lrint:
7519 case Intrinsic::vp_llrint:
7520 Check(
7521 RetTy->isIntOrIntVectorTy() && ValTy->isFPOrFPVectorTy(),
7522 "llvm.vp.fptoui, llvm.vp.fptosi, llvm.vp.lrint or llvm.vp.llrint" "intrinsic first argument element "
7523 "type must be floating-point and result element type must be integer",
7524 *VPCast);
7525 break;
7526 case Intrinsic::vp_uitofp:
7527 case Intrinsic::vp_sitofp:
7528 Check(
7529 RetTy->isFPOrFPVectorTy() && ValTy->isIntOrIntVectorTy(),
7530 "llvm.vp.uitofp or llvm.vp.sitofp intrinsic first argument element "
7531 "type must be integer and result element type must be floating-point",
7532 *VPCast);
7533 break;
7534 case Intrinsic::vp_fptrunc:
7535 Check(RetTy->isFPOrFPVectorTy() && ValTy->isFPOrFPVectorTy(),
7536 "llvm.vp.fptrunc intrinsic first argument and result element type "
7537 "must be floating-point",
7538 *VPCast);
7539 Check(RetTy->getScalarSizeInBits() < ValTy->getScalarSizeInBits(),
7540 "llvm.vp.fptrunc intrinsic the bit size of first argument must be "
7541 "larger than the bit size of the return type",
7542 *VPCast);
7543 break;
7544 case Intrinsic::vp_fpext:
7545 Check(RetTy->isFPOrFPVectorTy() && ValTy->isFPOrFPVectorTy(),
7546 "llvm.vp.fpext intrinsic first argument and result element type "
7547 "must be floating-point",
7548 *VPCast);
7549 Check(RetTy->getScalarSizeInBits() > ValTy->getScalarSizeInBits(),
7550 "llvm.vp.fpext intrinsic the bit size of first argument must be "
7551 "smaller than the bit size of the return type",
7552 *VPCast);
7553 break;
7554 case Intrinsic::vp_ptrtoint:
7555 Check(RetTy->isIntOrIntVectorTy() && ValTy->isPtrOrPtrVectorTy(),
7556 "llvm.vp.ptrtoint intrinsic first argument element type must be "
7557 "pointer and result element type must be integer",
7558 *VPCast);
7559 break;
7560 case Intrinsic::vp_inttoptr:
7561 Check(RetTy->isPtrOrPtrVectorTy() && ValTy->isIntOrIntVectorTy(),
7562 "llvm.vp.inttoptr intrinsic first argument element type must be "
7563 "integer and result element type must be pointer",
7564 *VPCast);
7565 break;
7566 }
7567 }
7568
7569 switch (VPI.getIntrinsicID()) {
7570 case Intrinsic::vp_fcmp: {
7571 auto Pred = cast<VPCmpIntrinsic>(&VPI)->getPredicate();
7573 "invalid predicate for VP FP comparison intrinsic", &VPI);
7574 break;
7575 }
7576 case Intrinsic::vp_icmp: {
7577 auto Pred = cast<VPCmpIntrinsic>(&VPI)->getPredicate();
7579 "invalid predicate for VP integer comparison intrinsic", &VPI);
7580 break;
7581 }
7582 case Intrinsic::vp_is_fpclass: {
7583 auto TestMask = cast<ConstantInt>(VPI.getOperand(1));
7584 Check((TestMask->getZExtValue() & ~static_cast<unsigned>(fcAllFlags)) == 0,
7585 "unsupported bits for llvm.vp.is.fpclass test mask");
7586 break;
7587 }
7588 case Intrinsic::experimental_vp_splice: {
7589 VectorType *VecTy = cast<VectorType>(VPI.getType());
7590 int64_t Idx = cast<ConstantInt>(VPI.getArgOperand(2))->getSExtValue();
7591 int64_t KnownMinNumElements = VecTy->getElementCount().getKnownMinValue();
7592 if (VPI.getParent() && VPI.getParent()->getParent()) {
7593 AttributeList Attrs = VPI.getParent()->getParent()->getAttributes();
7594 if (Attrs.hasFnAttr(Attribute::VScaleRange))
7595 KnownMinNumElements *= Attrs.getFnAttrs().getVScaleRangeMin();
7596 }
7597 Check((Idx < 0 && std::abs(Idx) <= KnownMinNumElements) ||
7598 (Idx >= 0 && Idx < KnownMinNumElements),
7599 "The splice index exceeds the range [-VL, VL-1] where VL is the "
7600 "known minimum number of elements in the vector. For scalable "
7601 "vectors the minimum number of elements is determined from "
7602 "vscale_range.",
7603 &VPI);
7604 break;
7605 }
7606 }
7607}
7608
7609void Verifier::visitConstrainedFPIntrinsic(ConstrainedFPIntrinsic &FPI) {
7610 unsigned NumOperands = FPI.getNonMetadataArgCount();
7611 bool HasRoundingMD =
7613
7614 // Add the expected number of metadata operands.
7615 NumOperands += (1 + HasRoundingMD);
7616
7617 // Compare intrinsics carry an extra predicate metadata operand.
7619 NumOperands += 1;
7620 Check((FPI.arg_size() == NumOperands),
7621 "invalid arguments for constrained FP intrinsic", &FPI);
7622
7623 switch (FPI.getIntrinsicID()) {
7624 case Intrinsic::experimental_constrained_lrint:
7625 case Intrinsic::experimental_constrained_llrint: {
7626 Type *ValTy = FPI.getArgOperand(0)->getType();
7627 Type *ResultTy = FPI.getType();
7628 Check(!ValTy->isVectorTy() && !ResultTy->isVectorTy(),
7629 "Intrinsic does not support vectors", &FPI);
7630 break;
7631 }
7632
7633 case Intrinsic::experimental_constrained_lround:
7634 case Intrinsic::experimental_constrained_llround: {
7635 Type *ValTy = FPI.getArgOperand(0)->getType();
7636 Type *ResultTy = FPI.getType();
7637 Check(!ValTy->isVectorTy() && !ResultTy->isVectorTy(),
7638 "Intrinsic does not support vectors", &FPI);
7639 break;
7640 }
7641
7642 case Intrinsic::experimental_constrained_fcmp:
7643 case Intrinsic::experimental_constrained_fcmps: {
7644 auto Pred = cast<ConstrainedFPCmpIntrinsic>(&FPI)->getPredicate();
7646 "invalid predicate for constrained FP comparison intrinsic", &FPI);
7647 break;
7648 }
7649
7650 case Intrinsic::experimental_constrained_fptosi:
7651 case Intrinsic::experimental_constrained_fptoui: {
7652 Value *Operand = FPI.getArgOperand(0);
7653 ElementCount SrcEC;
7654 Check(Operand->getType()->isFPOrFPVectorTy(),
7655 "Intrinsic first argument must be floating point", &FPI);
7656 if (auto *OperandT = dyn_cast<VectorType>(Operand->getType())) {
7657 SrcEC = cast<VectorType>(OperandT)->getElementCount();
7658 }
7659
7660 Operand = &FPI;
7661 Check(SrcEC.isNonZero() == Operand->getType()->isVectorTy(),
7662 "Intrinsic first argument and result disagree on vector use", &FPI);
7663 Check(Operand->getType()->isIntOrIntVectorTy(),
7664 "Intrinsic result must be an integer", &FPI);
7665 if (auto *OperandT = dyn_cast<VectorType>(Operand->getType())) {
7666 Check(SrcEC == cast<VectorType>(OperandT)->getElementCount(),
7667 "Intrinsic first argument and result vector lengths must be equal",
7668 &FPI);
7669 }
7670 break;
7671 }
7672
7673 case Intrinsic::experimental_constrained_sitofp:
7674 case Intrinsic::experimental_constrained_uitofp: {
7675 Value *Operand = FPI.getArgOperand(0);
7676 ElementCount SrcEC;
7677 Check(Operand->getType()->isIntOrIntVectorTy(),
7678 "Intrinsic first argument must be integer", &FPI);
7679 if (auto *OperandT = dyn_cast<VectorType>(Operand->getType())) {
7680 SrcEC = cast<VectorType>(OperandT)->getElementCount();
7681 }
7682
7683 Operand = &FPI;
7684 Check(SrcEC.isNonZero() == Operand->getType()->isVectorTy(),
7685 "Intrinsic first argument and result disagree on vector use", &FPI);
7686 Check(Operand->getType()->isFPOrFPVectorTy(),
7687 "Intrinsic result must be a floating point", &FPI);
7688 if (auto *OperandT = dyn_cast<VectorType>(Operand->getType())) {
7689 Check(SrcEC == cast<VectorType>(OperandT)->getElementCount(),
7690 "Intrinsic first argument and result vector lengths must be equal",
7691 &FPI);
7692 }
7693 break;
7694 }
7695
7696 case Intrinsic::experimental_constrained_fptrunc:
7697 case Intrinsic::experimental_constrained_fpext: {
7698 Value *Operand = FPI.getArgOperand(0);
7699 Type *OperandTy = Operand->getType();
7700 Value *Result = &FPI;
7701 Type *ResultTy = Result->getType();
7702 Check(OperandTy->isFPOrFPVectorTy(),
7703 "Intrinsic first argument must be FP or FP vector", &FPI);
7704 Check(ResultTy->isFPOrFPVectorTy(),
7705 "Intrinsic result must be FP or FP vector", &FPI);
7706 Check(OperandTy->isVectorTy() == ResultTy->isVectorTy(),
7707 "Intrinsic first argument and result disagree on vector use", &FPI);
7708 if (OperandTy->isVectorTy()) {
7709 Check(cast<VectorType>(OperandTy)->getElementCount() ==
7710 cast<VectorType>(ResultTy)->getElementCount(),
7711 "Intrinsic first argument and result vector lengths must be equal",
7712 &FPI);
7713 }
7714 if (FPI.getIntrinsicID() == Intrinsic::experimental_constrained_fptrunc) {
7715 Check(OperandTy->getScalarSizeInBits() > ResultTy->getScalarSizeInBits(),
7716 "Intrinsic first argument's type must be larger than result type",
7717 &FPI);
7718 } else {
7719 Check(OperandTy->getScalarSizeInBits() < ResultTy->getScalarSizeInBits(),
7720 "Intrinsic first argument's type must be smaller than result type",
7721 &FPI);
7722 }
7723 break;
7724 }
7725
7726 default:
7727 break;
7728 }
7729
7730 // If a non-metadata argument is passed in a metadata slot then the
7731 // error will be caught earlier when the incorrect argument doesn't
7732 // match the specification in the intrinsic call table. Thus, no
7733 // argument type check is needed here.
7734
7735 Check(FPI.getExceptionBehavior().has_value(),
7736 "invalid exception behavior argument", &FPI);
7737 if (HasRoundingMD) {
7738 Check(FPI.getRoundingMode().has_value(), "invalid rounding mode argument",
7739 &FPI);
7740 }
7741}
7742
7743void Verifier::verifyFragmentExpression(const DbgVariableRecord &DVR) {
7744 DILocalVariable *V = dyn_cast_or_null<DILocalVariable>(DVR.getRawVariable());
7745 DIExpression *E = dyn_cast_or_null<DIExpression>(DVR.getRawExpression());
7746
7747 // We don't know whether this intrinsic verified correctly.
7748 if (!V || !E || !E->isValid())
7749 return;
7750
7751 // Nothing to do if this isn't a DW_OP_LLVM_fragment expression.
7752 auto Fragment = E->getFragmentInfo();
7753 if (!Fragment)
7754 return;
7755
7756 // The frontend helps out GDB by emitting the members of local anonymous
7757 // unions as artificial local variables with shared storage. When SROA splits
7758 // the storage for artificial local variables that are smaller than the entire
7759 // union, the overhang piece will be outside of the allotted space for the
7760 // variable and this check fails.
7761 // FIXME: Remove this check as soon as clang stops doing this; it hides bugs.
7762 if (V->isArtificial())
7763 return;
7764
7765 verifyFragmentExpression(*V, *Fragment, &DVR);
7766}
7767
7768template <typename ValueOrMetadata>
7769void Verifier::verifyFragmentExpression(const DIVariable &V,
7771 ValueOrMetadata *Desc) {
7772 // If there's no size, the type is broken, but that should be checked
7773 // elsewhere.
7774 auto VarSize = V.getSizeInBits();
7775 if (!VarSize)
7776 return;
7777
7778 unsigned FragSize = Fragment.SizeInBits;
7779 unsigned FragOffset = Fragment.OffsetInBits;
7780 CheckDI(FragSize + FragOffset <= *VarSize,
7781 "fragment is larger than or outside of variable", Desc, &V);
7782 CheckDI(FragSize != *VarSize, "fragment covers entire variable", Desc, &V);
7783}
7784
7785void Verifier::verifyFnArgs(const DbgVariableRecord &DVR) {
7786 // This function does not take the scope of noninlined function arguments into
7787 // account. Don't run it if current function is nodebug, because it may
7788 // contain inlined debug intrinsics.
7789 if (!HasDebugInfo)
7790 return;
7791
7792 // For performance reasons only check non-inlined ones.
7793 if (DVR.getDebugLoc()->getInlinedAt())
7794 return;
7795
7796 DILocalVariable *Var = DVR.getVariable();
7797 CheckDI(Var, "#dbg record without variable");
7798
7799 unsigned ArgNo = Var->getArg();
7800 if (!ArgNo)
7801 return;
7802
7803 // Verify there are no duplicate function argument debug info entries.
7804 // These will cause hard-to-debug assertions in the DWARF backend.
7805 if (DebugFnArgs.size() < ArgNo)
7806 DebugFnArgs.resize(ArgNo, nullptr);
7807
7808 auto *Prev = DebugFnArgs[ArgNo - 1];
7809 DebugFnArgs[ArgNo - 1] = Var;
7810 CheckDI(!Prev || (Prev == Var), "conflicting debug info for argument", &DVR,
7811 Prev, Var);
7812}
7813
7814void Verifier::verifyNotEntryValue(const DbgVariableRecord &DVR) {
7815 DIExpression *E = dyn_cast_or_null<DIExpression>(DVR.getRawExpression());
7816
7817 // We don't know whether this intrinsic verified correctly.
7818 if (!E || !E->isValid())
7819 return;
7820
7822 Value *VarValue = DVR.getVariableLocationOp(0);
7823 if (isa<UndefValue>(VarValue) || isa<PoisonValue>(VarValue))
7824 return;
7825 // We allow EntryValues for swift async arguments, as they have an
7826 // ABI-guarantee to be turned into a specific register.
7827 if (auto *ArgLoc = dyn_cast_or_null<Argument>(VarValue);
7828 ArgLoc && ArgLoc->hasAttribute(Attribute::SwiftAsync))
7829 return;
7830 }
7831
7832 CheckDI(!E->isEntryValue(),
7833 "Entry values are only allowed in MIR unless they target a "
7834 "swiftasync Argument",
7835 &DVR);
7836}
7837
7838void Verifier::verifyCompileUnits() {
7839 // When more than one Module is imported into the same context, such as during
7840 // an LTO build before linking the modules, ODR type uniquing may cause types
7841 // to point to a different CU. This check does not make sense in this case.
7842 if (M.getContext().isODRUniquingDebugTypes())
7843 return;
7844 auto *CUs = M.getNamedMetadata("llvm.dbg.cu");
7845 SmallPtrSet<const Metadata *, 2> Listed;
7846 if (CUs)
7847 Listed.insert_range(CUs->operands());
7848 for (const auto *CU : CUVisited)
7849 CheckDI(Listed.count(CU), "DICompileUnit not listed in llvm.dbg.cu", CU);
7850 CUVisited.clear();
7851}
7852
7853void Verifier::verifyDeoptimizeCallingConvs() {
7854 if (DeoptimizeDeclarations.empty())
7855 return;
7856
7857 const Function *First = DeoptimizeDeclarations[0];
7858 for (const auto *F : ArrayRef(DeoptimizeDeclarations).slice(1)) {
7859 Check(First->getCallingConv() == F->getCallingConv(),
7860 "All llvm.experimental.deoptimize declarations must have the same "
7861 "calling convention",
7862 First, F);
7863 }
7864}
7865
7866void Verifier::verifyAttachedCallBundle(const CallBase &Call,
7867 const OperandBundleUse &BU) {
7868 FunctionType *FTy = Call.getFunctionType();
7869
7870 Check((FTy->getReturnType()->isPointerTy() ||
7871 (Call.doesNotReturn() && FTy->getReturnType()->isVoidTy())),
7872 "a call with operand bundle \"clang.arc.attachedcall\" must call a "
7873 "function returning a pointer or a non-returning function that has a "
7874 "void return type",
7875 Call);
7876
7877 Check(BU.Inputs.size() == 1 && isa<Function>(BU.Inputs.front()),
7878 "operand bundle \"clang.arc.attachedcall\" requires one function as "
7879 "an argument",
7880 Call);
7881
7882 auto *Fn = cast<Function>(BU.Inputs.front());
7883 Intrinsic::ID IID = Fn->getIntrinsicID();
7884
7885 if (IID) {
7886 Check((IID == Intrinsic::objc_retainAutoreleasedReturnValue ||
7887 IID == Intrinsic::objc_claimAutoreleasedReturnValue ||
7888 IID == Intrinsic::objc_unsafeClaimAutoreleasedReturnValue),
7889 "invalid function argument", Call);
7890 } else {
7891 StringRef FnName = Fn->getName();
7892 Check((FnName == "objc_retainAutoreleasedReturnValue" ||
7893 FnName == "objc_claimAutoreleasedReturnValue" ||
7894 FnName == "objc_unsafeClaimAutoreleasedReturnValue"),
7895 "invalid function argument", Call);
7896 }
7897}
7898
7899void Verifier::verifyNoAliasScopeDecl() {
7900 if (NoAliasScopeDecls.empty())
7901 return;
7902
7903 // only a single scope must be declared at a time.
7904 for (auto *II : NoAliasScopeDecls) {
7905 assert(II->getIntrinsicID() == Intrinsic::experimental_noalias_scope_decl &&
7906 "Not a llvm.experimental.noalias.scope.decl ?");
7907 const auto *ScopeListMV = dyn_cast<MetadataAsValue>(
7909 Check(ScopeListMV != nullptr,
7910 "llvm.experimental.noalias.scope.decl must have a MetadataAsValue "
7911 "argument",
7912 II);
7913
7914 const auto *ScopeListMD = dyn_cast<MDNode>(ScopeListMV->getMetadata());
7915 Check(ScopeListMD != nullptr, "!id.scope.list must point to an MDNode", II);
7916 Check(ScopeListMD->getNumOperands() == 1,
7917 "!id.scope.list must point to a list with a single scope", II);
7918 visitAliasScopeListMetadata(ScopeListMD);
7919 }
7920
7921 // Only check the domination rule when requested. Once all passes have been
7922 // adapted this option can go away.
7924 return;
7925
7926 // Now sort the intrinsics based on the scope MDNode so that declarations of
7927 // the same scopes are next to each other.
7928 auto GetScope = [](IntrinsicInst *II) {
7929 const auto *ScopeListMV = cast<MetadataAsValue>(
7931 return &cast<MDNode>(ScopeListMV->getMetadata())->getOperand(0);
7932 };
7933
7934 // We are sorting on MDNode pointers here. For valid input IR this is ok.
7935 // TODO: Sort on Metadata ID to avoid non-deterministic error messages.
7936 auto Compare = [GetScope](IntrinsicInst *Lhs, IntrinsicInst *Rhs) {
7937 return GetScope(Lhs) < GetScope(Rhs);
7938 };
7939
7940 llvm::sort(NoAliasScopeDecls, Compare);
7941
7942 // Go over the intrinsics and check that for the same scope, they are not
7943 // dominating each other.
7944 auto ItCurrent = NoAliasScopeDecls.begin();
7945 while (ItCurrent != NoAliasScopeDecls.end()) {
7946 auto CurScope = GetScope(*ItCurrent);
7947 auto ItNext = ItCurrent;
7948 do {
7949 ++ItNext;
7950 } while (ItNext != NoAliasScopeDecls.end() &&
7951 GetScope(*ItNext) == CurScope);
7952
7953 // [ItCurrent, ItNext) represents the declarations for the same scope.
7954 // Ensure they are not dominating each other.. but only if it is not too
7955 // expensive.
7956 if (ItNext - ItCurrent < 32)
7957 for (auto *I : llvm::make_range(ItCurrent, ItNext))
7958 for (auto *J : llvm::make_range(ItCurrent, ItNext))
7959 if (I != J)
7960 Check(!DT.dominates(I, J),
7961 "llvm.experimental.noalias.scope.decl dominates another one "
7962 "with the same scope",
7963 I);
7964 ItCurrent = ItNext;
7965 }
7966}
7967
7968//===----------------------------------------------------------------------===//
7969// Implement the public interfaces to this file...
7970//===----------------------------------------------------------------------===//
7971
7973 Function &F = const_cast<Function &>(f);
7974
7975 // Don't use a raw_null_ostream. Printing IR is expensive.
7976 Verifier V(OS, /*ShouldTreatBrokenDebugInfoAsError=*/true, *f.getParent());
7977
7978 // Note that this function's return value is inverted from what you would
7979 // expect of a function called "verify".
7980 return !V.verify(F);
7981}
7982
7984 bool *BrokenDebugInfo) {
7985 // Don't use a raw_null_ostream. Printing IR is expensive.
7986 Verifier V(OS, /*ShouldTreatBrokenDebugInfoAsError=*/!BrokenDebugInfo, M);
7987
7988 bool Broken = false;
7989 for (const Function &F : M)
7990 Broken |= !V.verify(F);
7991
7992 Broken |= !V.verify();
7993 if (BrokenDebugInfo)
7994 *BrokenDebugInfo = V.hasBrokenDebugInfo();
7995 // Note that this function's return value is inverted from what you would
7996 // expect of a function called "verify".
7997 return Broken;
7998}
7999
8000namespace {
8001
8002struct VerifierLegacyPass : public FunctionPass {
8003 static char ID;
8004
8005 std::unique_ptr<Verifier> V;
8006 bool FatalErrors = true;
8007
8008 VerifierLegacyPass() : FunctionPass(ID) {}
8009 explicit VerifierLegacyPass(bool FatalErrors)
8010 : FunctionPass(ID), FatalErrors(FatalErrors) {}
8011
8012 bool doInitialization(Module &M) override {
8013 V = std::make_unique<Verifier>(
8014 &dbgs(), /*ShouldTreatBrokenDebugInfoAsError=*/false, M);
8015 return false;
8016 }
8017
8018 bool runOnFunction(Function &F) override {
8019 if (!V->verify(F) && FatalErrors) {
8020 errs() << "in function " << F.getName() << '\n';
8021 report_fatal_error("Broken function found, compilation aborted!");
8022 }
8023 return false;
8024 }
8025
8026 bool doFinalization(Module &M) override {
8027 bool HasErrors = false;
8028 for (Function &F : M)
8029 if (F.isDeclaration())
8030 HasErrors |= !V->verify(F);
8031
8032 HasErrors |= !V->verify();
8033 if (FatalErrors && (HasErrors || V->hasBrokenDebugInfo()))
8034 report_fatal_error("Broken module found, compilation aborted!");
8035 return false;
8036 }
8037
8038 void getAnalysisUsage(AnalysisUsage &AU) const override {
8039 AU.setPreservesAll();
8040 }
8041};
8042
8043} // end anonymous namespace
8044
8045/// Helper to issue failure from the TBAA verification
8046template <typename... Tys> void TBAAVerifier::CheckFailed(Tys &&... Args) {
8047 if (Diagnostic)
8048 return Diagnostic->CheckFailed(Args...);
8049}
8050
8051#define CheckTBAA(C, ...) \
8052 do { \
8053 if (!(C)) { \
8054 CheckFailed(__VA_ARGS__); \
8055 return false; \
8056 } \
8057 } while (false)
8058
8059/// Verify that \p BaseNode can be used as the "base type" in the struct-path
8060/// TBAA scheme. This means \p BaseNode is either a scalar node, or a
8061/// struct-type node describing an aggregate data structure (like a struct).
8062TBAAVerifier::TBAABaseNodeSummary
8063TBAAVerifier::verifyTBAABaseNode(const Instruction *I, const MDNode *BaseNode,
8064 bool IsNewFormat) {
8065 if (BaseNode->getNumOperands() < 2) {
8066 CheckFailed("Base nodes must have at least two operands", I, BaseNode);
8067 return {true, ~0u};
8068 }
8069
8070 auto Itr = TBAABaseNodes.find(BaseNode);
8071 if (Itr != TBAABaseNodes.end())
8072 return Itr->second;
8073
8074 auto Result = verifyTBAABaseNodeImpl(I, BaseNode, IsNewFormat);
8075 auto InsertResult = TBAABaseNodes.insert({BaseNode, Result});
8076 (void)InsertResult;
8077 assert(InsertResult.second && "We just checked!");
8078 return Result;
8079}
8080
8081TBAAVerifier::TBAABaseNodeSummary
8082TBAAVerifier::verifyTBAABaseNodeImpl(const Instruction *I,
8083 const MDNode *BaseNode, bool IsNewFormat) {
8084 const TBAAVerifier::TBAABaseNodeSummary InvalidNode = {true, ~0u};
8085
8086 if (BaseNode->getNumOperands() == 2) {
8087 // Scalar nodes can only be accessed at offset 0.
8088 return isValidScalarTBAANode(BaseNode)
8089 ? TBAAVerifier::TBAABaseNodeSummary({false, 0})
8090 : InvalidNode;
8091 }
8092
8093 if (IsNewFormat) {
8094 if (BaseNode->getNumOperands() % 3 != 0) {
8095 CheckFailed("Access tag nodes must have the number of operands that is a "
8096 "multiple of 3!", BaseNode);
8097 return InvalidNode;
8098 }
8099 } else {
8100 if (BaseNode->getNumOperands() % 2 != 1) {
8101 CheckFailed("Struct tag nodes must have an odd number of operands!",
8102 BaseNode);
8103 return InvalidNode;
8104 }
8105 }
8106
8107 // Check the type size field.
8108 if (IsNewFormat) {
8109 auto *TypeSizeNode = mdconst::dyn_extract_or_null<ConstantInt>(
8110 BaseNode->getOperand(1));
8111 if (!TypeSizeNode) {
8112 CheckFailed("Type size nodes must be constants!", I, BaseNode);
8113 return InvalidNode;
8114 }
8115 }
8116
8117 // Check the type name field. In the new format it can be anything.
8118 if (!IsNewFormat && !isa<MDString>(BaseNode->getOperand(0))) {
8119 CheckFailed("Struct tag nodes have a string as their first operand",
8120 BaseNode);
8121 return InvalidNode;
8122 }
8123
8124 bool Failed = false;
8125
8126 std::optional<APInt> PrevOffset;
8127 unsigned BitWidth = ~0u;
8128
8129 // We've already checked that BaseNode is not a degenerate root node with one
8130 // operand in \c verifyTBAABaseNode, so this loop should run at least once.
8131 unsigned FirstFieldOpNo = IsNewFormat ? 3 : 1;
8132 unsigned NumOpsPerField = IsNewFormat ? 3 : 2;
8133 for (unsigned Idx = FirstFieldOpNo; Idx < BaseNode->getNumOperands();
8134 Idx += NumOpsPerField) {
8135 const MDOperand &FieldTy = BaseNode->getOperand(Idx);
8136 const MDOperand &FieldOffset = BaseNode->getOperand(Idx + 1);
8137 if (!isa<MDNode>(FieldTy)) {
8138 CheckFailed("Incorrect field entry in struct type node!", I, BaseNode);
8139 Failed = true;
8140 continue;
8141 }
8142
8143 auto *OffsetEntryCI =
8145 if (!OffsetEntryCI) {
8146 CheckFailed("Offset entries must be constants!", I, BaseNode);
8147 Failed = true;
8148 continue;
8149 }
8150
8151 if (BitWidth == ~0u)
8152 BitWidth = OffsetEntryCI->getBitWidth();
8153
8154 if (OffsetEntryCI->getBitWidth() != BitWidth) {
8155 CheckFailed(
8156 "Bitwidth between the offsets and struct type entries must match", I,
8157 BaseNode);
8158 Failed = true;
8159 continue;
8160 }
8161
8162 // NB! As far as I can tell, we generate a non-strictly increasing offset
8163 // sequence only from structs that have zero size bit fields. When
8164 // recursing into a contained struct in \c getFieldNodeFromTBAABaseNode we
8165 // pick the field lexically the latest in struct type metadata node. This
8166 // mirrors the actual behavior of the alias analysis implementation.
8167 bool IsAscending =
8168 !PrevOffset || PrevOffset->ule(OffsetEntryCI->getValue());
8169
8170 if (!IsAscending) {
8171 CheckFailed("Offsets must be increasing!", I, BaseNode);
8172 Failed = true;
8173 }
8174
8175 PrevOffset = OffsetEntryCI->getValue();
8176
8177 if (IsNewFormat) {
8178 auto *MemberSizeNode = mdconst::dyn_extract_or_null<ConstantInt>(
8179 BaseNode->getOperand(Idx + 2));
8180 if (!MemberSizeNode) {
8181 CheckFailed("Member size entries must be constants!", I, BaseNode);
8182 Failed = true;
8183 continue;
8184 }
8185 }
8186 }
8187
8188 return Failed ? InvalidNode
8189 : TBAAVerifier::TBAABaseNodeSummary(false, BitWidth);
8190}
8191
8192static bool IsRootTBAANode(const MDNode *MD) {
8193 return MD->getNumOperands() < 2;
8194}
8195
8196static bool IsScalarTBAANodeImpl(const MDNode *MD,
8198 if (MD->getNumOperands() != 2 && MD->getNumOperands() != 3)
8199 return false;
8200
8201 if (!isa<MDString>(MD->getOperand(0)))
8202 return false;
8203
8204 if (MD->getNumOperands() == 3) {
8206 if (!(Offset && Offset->isZero() && isa<MDString>(MD->getOperand(0))))
8207 return false;
8208 }
8209
8210 auto *Parent = dyn_cast_or_null<MDNode>(MD->getOperand(1));
8211 return Parent && Visited.insert(Parent).second &&
8212 (IsRootTBAANode(Parent) || IsScalarTBAANodeImpl(Parent, Visited));
8213}
8214
8215bool TBAAVerifier::isValidScalarTBAANode(const MDNode *MD) {
8216 auto ResultIt = TBAAScalarNodes.find(MD);
8217 if (ResultIt != TBAAScalarNodes.end())
8218 return ResultIt->second;
8219
8220 SmallPtrSet<const MDNode *, 4> Visited;
8221 bool Result = IsScalarTBAANodeImpl(MD, Visited);
8222 auto InsertResult = TBAAScalarNodes.insert({MD, Result});
8223 (void)InsertResult;
8224 assert(InsertResult.second && "Just checked!");
8225
8226 return Result;
8227}
8228
8229/// Returns the field node at the offset \p Offset in \p BaseNode. Update \p
8230/// Offset in place to be the offset within the field node returned.
8231///
8232/// We assume we've okayed \p BaseNode via \c verifyTBAABaseNode.
8233MDNode *TBAAVerifier::getFieldNodeFromTBAABaseNode(const Instruction *I,
8234 const MDNode *BaseNode,
8235 APInt &Offset,
8236 bool IsNewFormat) {
8237 assert(BaseNode->getNumOperands() >= 2 && "Invalid base node!");
8238
8239 // Scalar nodes have only one possible "field" -- their parent in the access
8240 // hierarchy. Offset must be zero at this point, but our caller is supposed
8241 // to check that.
8242 if (BaseNode->getNumOperands() == 2)
8243 return cast<MDNode>(BaseNode->getOperand(1));
8244
8245 unsigned FirstFieldOpNo = IsNewFormat ? 3 : 1;
8246 unsigned NumOpsPerField = IsNewFormat ? 3 : 2;
8247 for (unsigned Idx = FirstFieldOpNo; Idx < BaseNode->getNumOperands();
8248 Idx += NumOpsPerField) {
8249 auto *OffsetEntryCI =
8250 mdconst::extract<ConstantInt>(BaseNode->getOperand(Idx + 1));
8251 if (OffsetEntryCI->getValue().ugt(Offset)) {
8252 if (Idx == FirstFieldOpNo) {
8253 CheckFailed("Could not find TBAA parent in struct type node", I,
8254 BaseNode, &Offset);
8255 return nullptr;
8256 }
8257
8258 unsigned PrevIdx = Idx - NumOpsPerField;
8259 auto *PrevOffsetEntryCI =
8260 mdconst::extract<ConstantInt>(BaseNode->getOperand(PrevIdx + 1));
8261 Offset -= PrevOffsetEntryCI->getValue();
8262 return cast<MDNode>(BaseNode->getOperand(PrevIdx));
8263 }
8264 }
8265
8266 unsigned LastIdx = BaseNode->getNumOperands() - NumOpsPerField;
8267 auto *LastOffsetEntryCI = mdconst::extract<ConstantInt>(
8268 BaseNode->getOperand(LastIdx + 1));
8269 Offset -= LastOffsetEntryCI->getValue();
8270 return cast<MDNode>(BaseNode->getOperand(LastIdx));
8271}
8272
8274 if (!Type || Type->getNumOperands() < 3)
8275 return false;
8276
8277 // In the new format type nodes shall have a reference to the parent type as
8278 // its first operand.
8279 return isa_and_nonnull<MDNode>(Type->getOperand(0));
8280}
8281
8283 CheckTBAA(MD->getNumOperands() > 0, "TBAA metadata cannot have 0 operands", I,
8284 MD);
8285
8286 if (I)
8290 "This instruction shall not have a TBAA access tag!", I);
8291
8292 bool IsStructPathTBAA =
8293 isa<MDNode>(MD->getOperand(0)) && MD->getNumOperands() >= 3;
8294
8295 CheckTBAA(IsStructPathTBAA,
8296 "Old-style TBAA is no longer allowed, use struct-path TBAA instead",
8297 I);
8298
8299 MDNode *BaseNode = dyn_cast_or_null<MDNode>(MD->getOperand(0));
8300 MDNode *AccessType = dyn_cast_or_null<MDNode>(MD->getOperand(1));
8301
8302 bool IsNewFormat = isNewFormatTBAATypeNode(AccessType);
8303
8304 if (IsNewFormat) {
8305 CheckTBAA(MD->getNumOperands() == 4 || MD->getNumOperands() == 5,
8306 "Access tag metadata must have either 4 or 5 operands", I, MD);
8307 } else {
8308 CheckTBAA(MD->getNumOperands() < 5,
8309 "Struct tag metadata must have either 3 or 4 operands", I, MD);
8310 }
8311
8312 // Check the access size field.
8313 if (IsNewFormat) {
8314 auto *AccessSizeNode = mdconst::dyn_extract_or_null<ConstantInt>(
8315 MD->getOperand(3));
8316 CheckTBAA(AccessSizeNode, "Access size field must be a constant", I, MD);
8317 }
8318
8319 // Check the immutability flag.
8320 unsigned ImmutabilityFlagOpNo = IsNewFormat ? 4 : 3;
8321 if (MD->getNumOperands() == ImmutabilityFlagOpNo + 1) {
8322 auto *IsImmutableCI = mdconst::dyn_extract_or_null<ConstantInt>(
8323 MD->getOperand(ImmutabilityFlagOpNo));
8324 CheckTBAA(IsImmutableCI,
8325 "Immutability tag on struct tag metadata must be a constant", I,
8326 MD);
8327 CheckTBAA(
8328 IsImmutableCI->isZero() || IsImmutableCI->isOne(),
8329 "Immutability part of the struct tag metadata must be either 0 or 1", I,
8330 MD);
8331 }
8332
8333 CheckTBAA(BaseNode && AccessType,
8334 "Malformed struct tag metadata: base and access-type "
8335 "should be non-null and point to Metadata nodes",
8336 I, MD, BaseNode, AccessType);
8337
8338 if (!IsNewFormat) {
8339 CheckTBAA(isValidScalarTBAANode(AccessType),
8340 "Access type node must be a valid scalar type", I, MD,
8341 AccessType);
8342 }
8343
8345 CheckTBAA(OffsetCI, "Offset must be constant integer", I, MD);
8346
8347 APInt Offset = OffsetCI->getValue();
8348 bool SeenAccessTypeInPath = false;
8349
8350 SmallPtrSet<MDNode *, 4> StructPath;
8351
8352 for (/* empty */; BaseNode && !IsRootTBAANode(BaseNode);
8353 BaseNode =
8354 getFieldNodeFromTBAABaseNode(I, BaseNode, Offset, IsNewFormat)) {
8355 if (!StructPath.insert(BaseNode).second) {
8356 CheckFailed("Cycle detected in struct path", I, MD);
8357 return false;
8358 }
8359
8360 bool Invalid;
8361 unsigned BaseNodeBitWidth;
8362 std::tie(Invalid, BaseNodeBitWidth) =
8363 verifyTBAABaseNode(I, BaseNode, IsNewFormat);
8364
8365 // If the base node is invalid in itself, then we've already printed all the
8366 // errors we wanted to print.
8367 if (Invalid)
8368 return false;
8369
8370 SeenAccessTypeInPath |= BaseNode == AccessType;
8371
8372 if (isValidScalarTBAANode(BaseNode) || BaseNode == AccessType)
8373 CheckTBAA(Offset == 0, "Offset not zero at the point of scalar access", I,
8374 MD, &Offset);
8375
8376 CheckTBAA(BaseNodeBitWidth == Offset.getBitWidth() ||
8377 (BaseNodeBitWidth == 0 && Offset == 0) ||
8378 (IsNewFormat && BaseNodeBitWidth == ~0u),
8379 "Access bit-width not the same as description bit-width", I, MD,
8380 BaseNodeBitWidth, Offset.getBitWidth());
8381
8382 if (IsNewFormat && SeenAccessTypeInPath)
8383 break;
8384 }
8385
8386 CheckTBAA(SeenAccessTypeInPath, "Did not see access type in access path!", I,
8387 MD);
8388 return true;
8389}
8390
8391char VerifierLegacyPass::ID = 0;
8392INITIALIZE_PASS(VerifierLegacyPass, "verify", "Module Verifier", false, false)
8393
8395 return new VerifierLegacyPass(FatalErrors);
8396}
8397
8398AnalysisKey VerifierAnalysis::Key;
8405
8410
8412 auto Res = AM.getResult<VerifierAnalysis>(M);
8413 if (FatalErrors && (Res.IRBroken || Res.DebugInfoBroken))
8414 report_fatal_error("Broken module found, compilation aborted!");
8415
8416 return PreservedAnalyses::all();
8417}
8418
8420 auto res = AM.getResult<VerifierAnalysis>(F);
8421 if (res.IRBroken && FatalErrors)
8422 report_fatal_error("Broken function found, compilation aborted!");
8423
8424 return PreservedAnalyses::all();
8425}
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
AMDGPU address space definition.
This file declares a class to represent arbitrary precision floating point values and provide a varie...
This file implements a class to represent arbitrary precision integral constant values and operations...
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
Atomic ordering constants.
@ RetAttr
@ FnAttr
This file contains the simple types necessary to represent the attributes associated with functions a...
static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")
static GCRegistry::Add< StatepointGC > D("statepoint-example", "an example strategy for statepoint")
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
This file contains the declarations for the subclasses of Constant, which represent the different fla...
This file declares the LLVM IR specialization of the GenericConvergenceVerifier template.
static DISubprogram * getSubprogram(bool IsDistinct, Ts &&...Args)
dxil translate DXIL Translate Metadata
This file defines the DenseMap class.
This file contains constants used for implementing Dwarf debug support.
static bool runOnFunction(Function &F, bool PostInlining)
This file contains the declarations of entities that describe floating point environment and related ...
#define Check(C,...)
Hexagon Common GEP
This file provides various utilities for inspecting and working with the control flow graph in LLVM I...
Module.h This file contains the declarations for the Module class.
This header defines various interfaces for pass management in LLVM.
This defines the Use class.
const size_t AbstractManglingParser< Derived, Alloc >::NumOps
#define F(x, y, z)
Definition MD5.cpp:54
#define I(x, y, z)
Definition MD5.cpp:57
Machine Check Debug Module
This file implements a map that provides insertion order iteration.
This file provides utility for Memory Model Relaxation Annotations (MMRAs).
static bool isContiguous(const ConstantRange &A, const ConstantRange &B)
This file contains the declarations for metadata subclasses.
#define T
#define T1
ConstantRange Range(APInt(BitWidth, Low), APInt(BitWidth, High))
uint64_t High
uint64_t IntrinsicInst * II
#define P(N)
ppc ctr loops verify
#define INITIALIZE_PASS(passName, arg, name, cfg, analysis)
Definition PassSupport.h:56
This file contains the declarations for profiling metadata utility functions.
const SmallVectorImpl< MachineOperand > & Cond
static unsigned getNumElements(Type *Ty)
static void visit(BasicBlock &Start, std::function< bool(BasicBlock *)> op)
This file contains some templates that are useful if you are working with the STL at all.
verify safepoint Safepoint IR Verifier
BaseType
A given derived pointer can have multiple base pointers through phi/selects.
This file defines the SmallPtrSet class.
This file defines the SmallVector class.
This file contains some functions that are useful when dealing with strings.
static unsigned getBitWidth(Type *Ty, const DataLayout &DL)
Returns the bitwidth of the given scalar or pointer type.
static bool IsScalarTBAANodeImpl(const MDNode *MD, SmallPtrSetImpl< const MDNode * > &Visited)
static bool isType(const Metadata *MD)
static Instruction * getSuccPad(Instruction *Terminator)
static bool isMDTuple(const Metadata *MD)
static bool isNewFormatTBAATypeNode(llvm::MDNode *Type)
#define CheckDI(C,...)
We know that a debug info condition should be true, if not print an error message.
Definition Verifier.cpp:690
static void forEachUser(const Value *User, SmallPtrSet< const Value *, 32 > &Visited, llvm::function_ref< bool(const Value *)> Callback)
Definition Verifier.cpp:731
static bool isDINode(const Metadata *MD)
static bool isScope(const Metadata *MD)
static cl::opt< bool > VerifyNoAliasScopeDomination("verify-noalias-scope-decl-dom", cl::Hidden, cl::init(false), cl::desc("Ensure that llvm.experimental.noalias.scope.decl for identical " "scopes are not dominating"))
static bool isTypeCongruent(Type *L, Type *R)
Two types are "congruent" if they are identical, or if they are both pointer types with different poi...
#define CheckTBAA(C,...)
static bool isConstantIntMetadataOperand(const Metadata *MD)
static bool IsRootTBAANode(const MDNode *MD)
static Value * getParentPad(Value *EHPad)
static bool hasConflictingReferenceFlags(unsigned Flags)
Detect mutually exclusive flags.
static AttrBuilder getParameterABIAttributes(LLVMContext &C, unsigned I, AttributeList Attrs)
static const char PassName[]
static LLVM_ABI bool isValidArbitraryFPFormat(StringRef Format)
Returns true if the given string is a valid arbitrary floating-point format interpretation for llvm....
Definition APFloat.cpp:5990
bool isFiniteNonZero() const
Definition APFloat.h:1544
bool isNegative() const
Definition APFloat.h:1534
const fltSemantics & getSemantics() const
Definition APFloat.h:1542
Class for arbitrary precision integers.
Definition APInt.h:78
bool sgt(const APInt &RHS) const
Signed greater than comparison.
Definition APInt.h:1208
bool isMinValue() const
Determine if this is the smallest unsigned value.
Definition APInt.h:418
bool ule(const APInt &RHS) const
Unsigned less or equal comparison.
Definition APInt.h:1157
bool isPowerOf2() const
Check if this APInt's value is a power of two greater than zero.
Definition APInt.h:441
int64_t getSExtValue() const
Get sign extended value.
Definition APInt.h:1585
bool isMaxValue() const
Determine if this is the largest unsigned value.
Definition APInt.h:400
This class represents a conversion between pointers from one address space to another.
bool isSwiftError() const
Return true if this alloca is used as a swifterror argument to a call.
LLVM_ABI bool isStaticAlloca() const
Return true if this alloca is in the entry block of the function and is a constant size.
Align getAlign() const
Return the alignment of the memory that is being allocated by the instruction.
Type * getAllocatedType() const
Return the type that is being allocated by the instruction.
unsigned getAddressSpace() const
Return the address space for the allocation.
LLVM_ABI bool isArrayAllocation() const
Return true if there is an allocation size parameter to the allocation instruction that is not 1.
const Value * getArraySize() const
Get the number of elements allocated.
PassT::Result & getResult(IRUnitT &IR, ExtraArgTs... ExtraArgs)
Get the result of an analysis pass for a given IR unit.
void setPreservesAll()
Set by analyses that do not transform their input at all.
LLVM_ABI bool hasInRegAttr() const
Return true if this argument has the inreg attribute.
Definition Function.cpp:292
static bool isFPOperation(BinOp Op)
BinOp getOperation() const
static LLVM_ABI StringRef getOperationName(BinOp Op)
AtomicOrdering getOrdering() const
Returns the ordering constraint of this rmw instruction.
bool contains(Attribute::AttrKind A) const
Return true if the builder has the specified attribute.
LLVM_ABI bool hasAttribute(Attribute::AttrKind Kind) const
Return true if the attribute exists in this set.
LLVM_ABI std::string getAsString(bool InAttrGrp=false) const
Functions, function parameters, and return types can have attributes to indicate how they should be t...
Definition Attributes.h:105
LLVM_ABI const ConstantRange & getValueAsConstantRange() const
Return the attribute's value as a ConstantRange.
LLVM_ABI StringRef getValueAsString() const
Return the attribute's value as a string.
AttrKind
This enumeration lists the attributes that can be associated with parameters, function results,...
Definition Attributes.h:124
bool isValid() const
Return true if the attribute is any kind of attribute.
Definition Attributes.h:261
LLVM_ABI Type * getValueAsType() const
Return the attribute's value as a Type.
LLVM Basic Block Representation.
Definition BasicBlock.h:62
iterator begin()
Instruction iterator methods.
Definition BasicBlock.h:461
iterator_range< const_phi_iterator > phis() const
Returns a range that iterates over the phis in the basic block.
Definition BasicBlock.h:530
const Function * getParent() const
Return the enclosing method, or null if none.
Definition BasicBlock.h:213
LLVM_ABI InstListType::const_iterator getFirstNonPHIIt() const
Returns an iterator to the first instruction in this block that is not a PHINode instruction.
LLVM_ABI bool isEntryBlock() const
Return true if this is the entry block of the containing function.
const Instruction & front() const
Definition BasicBlock.h:484
LLVM_ABI const BasicBlock * getUniquePredecessor() const
Return the predecessor of this block if it has a unique predecessor block.
InstListType::iterator iterator
Instruction iterators...
Definition BasicBlock.h:170
const Instruction * getTerminator() const LLVM_READONLY
Returns the terminator instruction; assumes that the block is well-formed.
Definition BasicBlock.h:237
This class represents a no-op cast from one type to another.
static LLVM_ABI BlockAddress * lookup(const BasicBlock *BB)
Lookup an existing BlockAddress constant for the given BasicBlock.
Base class for all callable instructions (InvokeInst and CallInst) Holds everything related to callin...
bool isInlineAsm() const
Check if this call is an inline asm statement.
bool hasInAllocaArgument() const
Determine if there are is an inalloca argument.
OperandBundleUse getOperandBundleAt(unsigned Index) const
Return the operand bundle at a specific index.
Function * getCalledFunction() const
Returns the function called, or null if this is an indirect function invocation or the function signa...
bool doesNotAccessMemory(unsigned OpNo) const
bool hasFnAttr(Attribute::AttrKind Kind) const
Determine whether this call has the given attribute.
bool hasRetAttr(Attribute::AttrKind Kind) const
Determine whether the return value has the given attribute.
unsigned getNumOperandBundles() const
Return the number of operand bundles associated with this User.
CallingConv::ID getCallingConv() const
LLVM_ABI bool paramHasAttr(unsigned ArgNo, Attribute::AttrKind Kind) const
Determine whether the argument or parameter has the given attribute.
Attribute getParamAttr(unsigned ArgNo, Attribute::AttrKind Kind) const
Get the attribute of a given kind from a given arg.
iterator_range< bundle_op_iterator > bundle_op_infos()
Return the range [bundle_op_info_begin, bundle_op_info_end).
unsigned countOperandBundlesOfType(StringRef Name) const
Return the number of operand bundles with the tag Name attached to this instruction.
bool onlyReadsMemory(unsigned OpNo) const
Value * getCalledOperand() const
Type * getParamElementType(unsigned ArgNo) const
Extract the elementtype type for a parameter.
Value * getArgOperand(unsigned i) const
FunctionType * getFunctionType() const
LLVM_ABI Intrinsic::ID getIntrinsicID() const
Returns the intrinsic ID of the intrinsic called or Intrinsic::not_intrinsic if the called function i...
iterator_range< User::op_iterator > args()
Iteration adapter for range-for loops.
bool doesNotReturn() const
Determine if the call cannot return.
LLVM_ABI bool onlyAccessesArgMemory() const
Determine if the call can access memmory only using pointers based on its arguments.
unsigned arg_size() const
AttributeList getAttributes() const
Return the attributes for this call.
bool hasOperandBundles() const
Return true if this User has any operand bundles.
LLVM_ABI Function * getCaller()
Helper to get the caller (the parent function).
BasicBlock * getIndirectDest(unsigned i) const
unsigned getNumIndirectDests() const
Return the number of callbr indirect dest labels.
bool isMustTailCall() const
static LLVM_ABI bool castIsValid(Instruction::CastOps op, Type *SrcTy, Type *DstTy)
This method can be used to determine if a cast from SrcTy to DstTy using Opcode op is valid or not.
unsigned getNumHandlers() const
return the number of 'handlers' in this catchswitch instruction, except the default handler
Value * getParentPad() const
BasicBlock * getUnwindDest() const
handler_range handlers()
iteration adapter for range-for loops.
BasicBlock * getUnwindDest() const
bool isFPPredicate() const
Definition InstrTypes.h:782
bool isIntPredicate() const
Definition InstrTypes.h:783
static bool isIntPredicate(Predicate P)
Definition InstrTypes.h:776
Value * getCondition() const
bool isMinusOne() const
This function will return true iff every bit in this constant is set to true.
Definition Constants.h:231
bool isNegative() const
Definition Constants.h:214
bool isZero() const
This is just a convenience method to make client code smaller for a common code.
Definition Constants.h:219
unsigned getBitWidth() const
getBitWidth - Return the scalar bitwidth of this constant.
Definition Constants.h:162
uint64_t getZExtValue() const
Return the constant as a 64-bit unsigned integer value after it has been zero extended as appropriate...
Definition Constants.h:168
const APInt & getValue() const
Return the constant as an APInt value reference.
Definition Constants.h:159
Constant * getAddrDiscriminator() const
The address discriminator if any, or the null constant.
Definition Constants.h:1239
Constant * getPointer() const
The pointer that is signed in this ptrauth signed pointer.
Definition Constants.h:1226
ConstantInt * getKey() const
The Key ID, an i32 constant.
Definition Constants.h:1229
Constant * getDeactivationSymbol() const
Definition Constants.h:1248
ConstantInt * getDiscriminator() const
The integer discriminator, an i64 constant, or 0.
Definition Constants.h:1232
static LLVM_ABI bool isOrderedRanges(ArrayRef< ConstantRange > RangesRef)
This class represents a range of values.
const APInt & getLower() const
Return the lower value for this range.
const APInt & getUpper() const
Return the upper value for this range.
LLVM_ABI bool contains(const APInt &Val) const
Return true if the specified value is in the set.
uint32_t getBitWidth() const
Get the bit width of this ConstantRange.
static LLVM_ABI ConstantTokenNone * get(LLVMContext &Context)
Return the ConstantTokenNone.
LLVM_ABI bool isNullValue() const
Return true if this is the value that would be returned by getNullValue.
Definition Constants.cpp:74
LLVM_ABI std::optional< fp::ExceptionBehavior > getExceptionBehavior() const
LLVM_ABI std::optional< RoundingMode > getRoundingMode() const
LLVM_ABI unsigned getNonMetadataArgCount() const
DbgVariableFragmentInfo FragmentInfo
@ FixedPointBinary
Scale factor 2^Factor.
@ FixedPointDecimal
Scale factor 10^Factor.
@ FixedPointRational
Arbitrary rational scale factor.
DIGlobalVariable * getVariable() const
LLVM_ABI DISubprogram * getSubprogram() const
Get the subprogram for this scope.
DILocalScope * getScope() const
Get the local scope for this variable.
Metadata * getRawScope() const
Base class for scope-like contexts.
Subprogram description. Uses SubclassData1.
static const DIScope * getRawRetainedNodeScope(const MDNode *N)
Base class for template parameters.
Base class for types.
Base class for variables.
Metadata * getRawType() const
Metadata * getRawScope() const
A parsed version of the target data layout string in and methods for querying it.
Definition DataLayout.h:64
Records a position in IR for a source label (DILabel).
Base class for non-instruction debug metadata records that have positions within IR.
LLVM_ABI void print(raw_ostream &O, bool IsForDebug=false) const
DebugLoc getDebugLoc() const
LLVM_ABI const BasicBlock * getParent() const
LLVM_ABI Function * getFunction()
Record of a variable value-assignment, aka a non instruction representation of the dbg....
LLVM_ABI Value * getVariableLocationOp(unsigned OpIdx) const
DIExpression * getExpression() const
DILocalVariable * getVariable() const
Metadata * getRawLocation() const
Returns the metadata operand for the first location description.
@ End
Marks the end of the concrete types.
@ Any
To indicate all LocationTypes in searches.
DIExpression * getAddressExpression() const
MDNode * getAsMDNode() const
Return this as a bar MDNode.
Definition DebugLoc.h:290
ValueT lookup(const_arg_type_t< KeyT > Val) const
lookup - Return the entry for the specified key, or a default constructed value if no such entry exis...
Definition DenseMap.h:205
iterator find(const_arg_type_t< KeyT > Val)
Definition DenseMap.h:178
bool empty() const
Definition DenseMap.h:109
std::pair< iterator, bool > insert(const std::pair< KeyT, ValueT > &KV)
Definition DenseMap.h:241
Concrete subclass of DominatorTreeBase that is used to compute a normal dominator tree.
Definition Dominators.h:159
This instruction extracts a single (scalar) element from a VectorType value.
static LLVM_ABI bool isValidOperands(const Value *Vec, const Value *Idx)
Return true if an extractelement instruction can be formed with the specified operands.
ArrayRef< unsigned > getIndices() const
static LLVM_ABI Type * getIndexedType(Type *Agg, ArrayRef< unsigned > Idxs)
Returns the type of the element that would be extracted with an extractvalue instruction with the spe...
This instruction compares its operands according to the predicate given to the constructor.
This class represents an extension of floating point types.
static bool isSupportedFloatingPointType(Type *Ty)
Returns true if Ty is a supported floating-point type for phi, select, or call FPMathOperators.
Definition Operator.h:344
This class represents a cast from floating point to signed integer.
This class represents a cast from floating point to unsigned integer.
This class represents a truncation of floating point types.
AtomicOrdering getOrdering() const
Returns the ordering constraint of this fence instruction.
op_range arg_operands()
arg_operands - iteration adapter for range-for loops.
Value * getParentPad() const
Convenience accessors.
FunctionPass class - This class is used to implement most global optimizations.
Definition Pass.h:314
Type * getReturnType() const
FunctionType * getFunctionType() const
Returns the FunctionType for me.
Definition Function.h:211
Intrinsic::ID getIntrinsicID() const LLVM_READONLY
getIntrinsicID - This method returns the ID number of the specified function, or Intrinsic::not_intri...
Definition Function.h:246
DISubprogram * getSubprogram() const
Get the attached subprogram.
CallingConv::ID getCallingConv() const
getCallingConv()/setCallingConv(CC) - These method get and set the calling convention of this functio...
Definition Function.h:272
bool hasPersonalityFn() const
Check whether this function has a personality function.
Definition Function.h:905
const Function & getFunction() const
Definition Function.h:166
const std::string & getGC() const
Definition Function.cpp:818
Type * getReturnType() const
Returns the type of the ret val.
Definition Function.h:216
bool isVarArg() const
isVarArg - Return true if this function takes a variable number of arguments.
Definition Function.h:229
LLVM_ABI Value * getBasePtr() const
LLVM_ABI Value * getDerivedPtr() const
static LLVM_ABI Type * getIndexedType(Type *Ty, ArrayRef< Value * > IdxList)
Returns the result type of a getelementptr with the given source element type and indexes.
static bool isValidLinkage(LinkageTypes L)
Definition GlobalAlias.h:98
const Constant * getAliasee() const
Definition GlobalAlias.h:87
LLVM_ABI const Function * getResolverFunction() const
Definition Globals.cpp:688
static bool isValidLinkage(LinkageTypes L)
Definition GlobalIFunc.h:86
const Constant * getResolver() const
Definition GlobalIFunc.h:73
LLVM_ABI void getAllMetadata(SmallVectorImpl< std::pair< unsigned, MDNode * > > &MDs) const
Appends all metadata attached to this value to MDs, sorting by KindID.
bool hasComdat() const
MDNode * getMetadata(unsigned KindID) const
Get the metadata of given kind attached to this GlobalObject.
bool hasExternalLinkage() const
bool isDSOLocal() const
bool isImplicitDSOLocal() const
LLVM_ABI bool isDeclaration() const
Return true if the primary definition of this global value is outside of the current translation unit...
Definition Globals.cpp:337
bool hasValidDeclarationLinkage() const
LinkageTypes getLinkage() const
bool hasDefaultVisibility() const
bool hasPrivateLinkage() const
bool hasHiddenVisibility() const
bool hasExternalWeakLinkage() const
bool hasDLLImportStorageClass() const
bool hasDLLExportStorageClass() const
bool isDeclarationForLinker() const
unsigned getAddressSpace() const
Module * getParent()
Get the module that this global value is contained inside of...
PointerType * getType() const
Global values are always pointers.
LLVM_ABI bool isInterposable() const
Return true if this global's definition can be substituted with an arbitrary definition at link time ...
Definition Globals.cpp:116
bool hasComdat() const
bool hasCommonLinkage() const
bool hasGlobalUnnamedAddr() const
bool hasAppendingLinkage() const
bool hasAvailableExternallyLinkage() const
Type * getValueType() const
const Constant * getInitializer() const
getInitializer - Return the initializer for this global variable.
bool hasInitializer() const
Definitions have initializers, declarations don't.
MaybeAlign getAlign() const
Returns the alignment of the given variable.
LLVM_ABI uint64_t getGlobalSize(const DataLayout &DL) const
Get the size of this global variable in bytes.
Definition Globals.cpp:569
bool isConstant() const
If the value is a global constant, its value is immutable throughout the runtime execution of the pro...
bool hasDefinitiveInitializer() const
hasDefinitiveInitializer - Whether the global variable has an initializer, and any other instances of...
This instruction compares its operands according to the predicate given to the constructor.
BasicBlock * getDestination(unsigned i)
Return the specified destination.
unsigned getNumDestinations() const
return the number of possible destinations in this indirectbr instruction.
unsigned getNumSuccessors() const
This instruction inserts a single (scalar) element into a VectorType value.
static LLVM_ABI bool isValidOperands(const Value *Vec, const Value *NewElt, const Value *Idx)
Return true if an insertelement instruction can be formed with the specified operands.
ArrayRef< unsigned > getIndices() const
Base class for instruction visitors.
Definition InstVisitor.h:78
void visit(Iterator Start, Iterator End)
Definition InstVisitor.h:87
LLVM_ABI unsigned getNumSuccessors() const LLVM_READONLY
Return the number of successors that this instruction has.
const DebugLoc & getDebugLoc() const
Return the debug location for this node as a DebugLoc.
LLVM_ABI bool isAtomic() const LLVM_READONLY
Return true if this instruction has an AtomicOrdering of unordered or higher.
LLVM_ABI const Function * getFunction() const
Return the function this instruction belongs to.
This class represents a cast from an integer to a pointer.
static LLVM_ABI bool mayLowerToFunctionCall(Intrinsic::ID IID)
Check if the intrinsic might lower into a regular function call in the course of IR transformations.
Intrinsic::ID getIntrinsicID() const
Return the intrinsic ID of this intrinsic.
This is an important class for using LLVM in a threaded context.
Definition LLVMContext.h:68
bool isCleanup() const
Return 'true' if this landingpad instruction is a cleanup.
unsigned getNumClauses() const
Get the number of clauses for this landing pad.
bool isCatch(unsigned Idx) const
Return 'true' if the clause and index Idx is a catch clause.
bool isFilter(unsigned Idx) const
Return 'true' if the clause and index Idx is a filter clause.
Constant * getClause(unsigned Idx) const
Get the value of the clause at index Idx.
AtomicOrdering getOrdering() const
Returns the ordering constraint of this load instruction.
SyncScope::ID getSyncScopeID() const
Returns the synchronization scope ID of this load instruction.
Align getAlign() const
Return the alignment of the access that is being performed.
Metadata node.
Definition Metadata.h:1080
const MDOperand & getOperand(unsigned I) const
Definition Metadata.h:1444
bool isTemporary() const
Definition Metadata.h:1264
ArrayRef< MDOperand > operands() const
Definition Metadata.h:1442
unsigned getNumOperands() const
Return number of MDNode operands.
Definition Metadata.h:1450
bool isDistinct() const
Definition Metadata.h:1263
bool isResolved() const
Check if node is fully resolved.
Definition Metadata.h:1260
LLVMContext & getContext() const
Definition Metadata.h:1244
bool equalsStr(StringRef Str) const
Definition Metadata.h:924
Metadata * get() const
Definition Metadata.h:931
LLVM_ABI StringRef getString() const
Definition Metadata.cpp:632
static LLVM_ABI bool isTagMD(const Metadata *MD)
This class implements a map that also provides access to all stored values in a deterministic order.
Definition MapVector.h:36
static LLVM_ABI MetadataAsValue * getIfExists(LLVMContext &Context, Metadata *MD)
Definition Metadata.cpp:118
Metadata * getMetadata() const
Definition Metadata.h:202
Root of the metadata hierarchy.
Definition Metadata.h:64
LLVM_ABI void print(raw_ostream &OS, const Module *M=nullptr, bool IsForDebug=false) const
Print.
unsigned getMetadataID() const
Definition Metadata.h:104
Manage lifetime of a slot tracker for printing IR.
A Module instance is used to store all the information related to an LLVM module.
Definition Module.h:67
LLVM_ABI StringRef getName() const
LLVM_ABI void print(raw_ostream &ROS, bool IsForDebug=false) const
LLVM_ABI unsigned getNumOperands() const
iterator_range< op_iterator > operands()
Definition Metadata.h:1856
op_range incoming_values()
A set of analyses that are preserved following a run of a transformation pass.
Definition Analysis.h:112
static PreservedAnalyses all()
Construct a special preserved set that preserves all passes.
Definition Analysis.h:118
This class represents a cast from a pointer to an address (non-capturing ptrtoint).
This class represents a cast from a pointer to an integer.
Value * getValue() const
Convenience accessor.
Value * getReturnValue() const
Convenience accessor. Returns null if there is no return value.
This class represents a sign extension of integer types.
This class represents a cast from signed integer to floating point.
static LLVM_ABI const char * areInvalidOperands(Value *Cond, Value *True, Value *False)
Return a string if the specified operands are invalid for a select operation, otherwise return null.
This instruction constructs a fixed permutation of two input vectors.
static LLVM_ABI bool isValidOperands(const Value *V1, const Value *V2, const Value *Mask)
Return true if a shufflevector instruction can be formed with the specified operands.
static LLVM_ABI void getShuffleMask(const Constant *Mask, SmallVectorImpl< int > &Result)
Convert the input shuffle mask operand to a vector of integers.
A templated base class for SmallPtrSet which provides the typesafe interface that is common across al...
size_type count(ConstPtrType Ptr) const
count - Return 1 if the specified pointer is in the set, 0 otherwise.
void insert_range(Range &&R)
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
SmallPtrSet - This class implements a set which is optimized for holding SmallSize or less elements.
void reserve(size_type N)
iterator insert(iterator I, T &&Elt)
void resize(size_type N)
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
std::pair< StringRef, StringRef > split(char Separator) const
Split into two substrings around the first occurrence of a separator character.
Definition StringRef.h:730
static constexpr size_t npos
Definition StringRef.h:57
bool getAsInteger(unsigned Radix, T &Result) const
Parse the current string as an integer of the specified radix.
Definition StringRef.h:490
bool starts_with(StringRef Prefix) const
Check if this string starts with the given Prefix.
Definition StringRef.h:258
constexpr bool empty() const
empty - Check if the string is empty.
Definition StringRef.h:140
unsigned getNumElements() const
Random access to the elements.
LLVM_ABI Type * getTypeAtIndex(const Value *V) const
Given an index value into the type, return the type of the element.
Definition Type.cpp:788
LLVM_ABI bool isScalableTy(SmallPtrSetImpl< const Type * > &Visited) const
Returns true if this struct contains a scalable vector.
Definition Type.cpp:510
Verify that the TBAA Metadatas are valid.
Definition Verifier.h:40
LLVM_ABI bool visitTBAAMetadata(const Instruction *I, const MDNode *MD)
Visit an instruction, or a TBAA node itself as part of a metadata, and return true if it is valid,...
unsigned size() const
Triple - Helper class for working with autoconf configuration names.
Definition Triple.h:47
This class represents a truncation of integer types.
Twine - A lightweight data structure for efficiently representing the concatenation of temporary valu...
Definition Twine.h:82
The instances of the Type class are immutable: once they are created, they are never changed.
Definition Type.h:46
bool isByteTy() const
True if this is an instance of ByteType.
Definition Type.h:242
bool isVectorTy() const
True if this is an instance of VectorType.
Definition Type.h:290
LLVM_ABI bool containsNonGlobalTargetExtType(SmallPtrSetImpl< const Type * > &Visited) const
Return true if this type is or contains a target extension type that disallows being used as a global...
Definition Type.cpp:78
bool isArrayTy() const
True if this is an instance of ArrayType.
Definition Type.h:281
LLVM_ABI bool containsNonLocalTargetExtType(SmallPtrSetImpl< const Type * > &Visited) const
Return true if this type is or contains a target extension type that disallows being used as a local.
Definition Type.cpp:94
LLVM_ABI bool isScalableTy(SmallPtrSetImpl< const Type * > &Visited) const
Return true if this is a type whose size is a known multiple of vscale.
Definition Type.cpp:65
bool isLabelTy() const
Return true if this is 'label'.
Definition Type.h:230
bool isIntOrIntVectorTy() const
Return true if this is an integer type or a vector of integer types.
Definition Type.h:263
bool isPointerTy() const
True if this is an instance of PointerType.
Definition Type.h:284
LLVM_ABI bool isTokenLikeTy() const
Returns true if this is 'token' or a token-like target type.s.
Definition Type.cpp:1136
LLVM_ABI unsigned getPointerAddressSpace() const
Get the address space of this pointer or pointer vector type.
bool isSingleValueType() const
Return true if the type is a valid type for a register in codegen.
Definition Type.h:313
LLVM_ABI bool canLosslesslyBitCastTo(Type *Ty) const
Return true if this type could be converted with a lossless BitCast to type 'Ty'.
Definition Type.cpp:157
Type * getScalarType() const
If this is a vector type, return the element type, otherwise return 'this'.
Definition Type.h:370
bool isSized(SmallPtrSetImpl< Type * > *Visited=nullptr) const
Return true if it makes sense to take the size of this type.
Definition Type.h:328
LLVM_ABI unsigned getScalarSizeInBits() const LLVM_READONLY
If this is a vector type, return the getPrimitiveSizeInBits value for the element type.
Definition Type.cpp:236
bool isFloatingPointTy() const
Return true if this is one of the floating-point types.
Definition Type.h:186
bool isPtrOrPtrVectorTy() const
Return true if this is a pointer type or a vector of pointer types.
Definition Type.h:287
bool isIntOrPtrTy() const
Return true if this is an integer type or a pointer type.
Definition Type.h:272
bool isIntegerTy() const
True if this is an instance of IntegerType.
Definition Type.h:257
bool isFPOrFPVectorTy() const
Return true if this is a FP type or a vector of FP.
Definition Type.h:227
bool isVoidTy() const
Return true if this is 'void'.
Definition Type.h:141
bool isMetadataTy() const
Return true if this is 'metadata'.
Definition Type.h:233
This class represents a cast unsigned integer to floating point.
op_range operands()
Definition User.h:267
Value * getOperand(unsigned i) const
Definition User.h:207
unsigned getNumOperands() const
Definition User.h:229
This class represents the va_arg llvm instruction, which returns an argument of the specified type gi...
Value * getValue() const
Definition Metadata.h:499
LLVM Value Representation.
Definition Value.h:75
iterator_range< user_iterator > materialized_users()
Definition Value.h:420
Type * getType() const
All values are typed, get the type of this value.
Definition Value.h:255
LLVM_ABI const Value * stripPointerCastsAndAliases() const
Strip off pointer casts, all-zero GEPs, address space casts, and aliases.
Definition Value.cpp:713
LLVMContext & getContext() const
All values hold a context through their type.
Definition Value.h:258
LLVM_ABI const Value * stripInBoundsOffsets(function_ref< void(const Value *)> Func=[](const Value *) {}) const
Strip off pointer casts and inbounds GEPs.
Definition Value.cpp:820
iterator_range< user_iterator > users()
Definition Value.h:426
bool materialized_use_empty() const
Definition Value.h:351
LLVM_ABI const Value * stripPointerCasts() const
Strip off pointer casts, all-zero GEPs and address space casts.
Definition Value.cpp:709
bool hasName() const
Definition Value.h:261
LLVM_ABI StringRef getName() const
Return a constant reference to the value's name.
Definition Value.cpp:318
Check a module for errors, and report separate error states for IR and debug info errors.
Definition Verifier.h:109
LLVM_ABI Result run(Module &M, ModuleAnalysisManager &)
LLVM_ABI PreservedAnalyses run(Module &M, ModuleAnalysisManager &AM)
This class represents zero extension of integer types.
constexpr bool isNonZero() const
Definition TypeSize.h:155
constexpr bool isScalable() const
Returns whether the quantity is scaled by a runtime quantity (vscale).
Definition TypeSize.h:168
constexpr ScalarTy getKnownMinValue() const
Returns the minimum value this quantity can represent.
Definition TypeSize.h:165
An efficient, type-erasing, non-owning reference to a callable.
const ParentTy * getParent() const
Definition ilist_node.h:34
NodeTy * getNextNode()
Get the next node, or nullptr for the list tail.
Definition ilist_node.h:348
This class implements an extremely fast bulk output stream that can only output to a stream.
Definition raw_ostream.h:53
CallInst * Call
This file contains the declaration of the Comdat class, which represents a single COMDAT in LLVM.
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
@ FLAT_ADDRESS
Address space for flat memory.
@ GLOBAL_ADDRESS
Address space for global memory (RAT0, VTX0).
@ PRIVATE_ADDRESS
Address space for private memory.
constexpr char Align[]
Key for Kernel::Arg::Metadata::mAlign.
constexpr char Args[]
Key for Kernel::Metadata::mArgs.
constexpr char Attrs[]
Key for Kernel::Metadata::mAttrs.
bool isFlatGlobalAddrSpace(unsigned AS)
constexpr std::underlying_type_t< E > Mask()
Get a bitmask with 1s in all places up to the high-order bit of E's largest value.
@ Entry
Definition COFF.h:862
unsigned ID
LLVM IR allows to use arbitrary numbers as calling convention identifiers.
Definition CallingConv.h:24
@ C
The default llvm calling convention, compatible with C.
Definition CallingConv.h:34
@ BasicBlock
Various leaf nodes.
Definition ISDOpcodes.h:81
LLVM_ABI bool hasConstrainedFPRoundingModeOperand(ID QID)
Returns true if the intrinsic ID is for one of the "ConstrainedFloating-Point Intrinsics" that take r...
LLVM_ABI StringRef getName(ID id)
Return the LLVM name for an intrinsic, such as "llvm.ppc.altivec.lvx".
static const int NoAliasScopeDeclScopeArg
Definition Intrinsics.h:41
LLVM_ABI bool isSignatureValid(Intrinsic::ID ID, FunctionType *FT, SmallVectorImpl< Type * > &OverloadTys, raw_ostream &OS=nulls())
Returns true if FT is a valid function type for intrinsic ID.
std::variant< std::monostate, Loc::Single, Loc::Multi, Loc::MMI, Loc::EntryValue > Variant
Alias for the std::variant specialization base class of DbgVariable.
Definition DwarfDebug.h:190
Flag
These should be considered private to the implementation of the MCInstrDesc class.
@ System
Synchronized with respect to all concurrently executing threads.
Definition LLVMContext.h:58
LLVM_ABI std::optional< VFInfo > tryDemangleForVFABI(StringRef MangledName, const FunctionType *FTy)
Function to construct a VFInfo out of a mangled names in the following format:
@ CE
Windows NT (Windows on ARM)
Definition MCAsmInfo.h:48
LLVM_ABI AssignmentInstRange getAssignmentInsts(DIAssignID *ID)
Return a range of instructions (typically just one) that have ID as an attachment.
initializer< Ty > init(const Ty &Val)
@ DW_MACINFO_undef
Definition Dwarf.h:818
@ DW_MACINFO_start_file
Definition Dwarf.h:819
@ DW_MACINFO_define
Definition Dwarf.h:817
std::enable_if_t< detail::IsValidPointer< X, Y >::value, X * > dyn_extract_or_null(Y &&MD)
Extract a Value from Metadata, if any, allowing null.
Definition Metadata.h:709
std::enable_if_t< detail::IsValidPointer< X, Y >::value, X * > dyn_extract(Y &&MD)
Extract a Value from Metadata, if any.
Definition Metadata.h:696
std::enable_if_t< detail::IsValidPointer< X, Y >::value, X * > extract(Y &&MD)
Extract a Value from Metadata.
Definition Metadata.h:668
@ User
could "use" a pointer
NodeAddr< UseNode * > Use
Definition RDFGraph.h:385
NodeAddr< NodeBase * > Node
Definition RDFGraph.h:381
friend class Instruction
Iterator for Instructions in a `BasicBlock.
Definition BasicBlock.h:73
This is an optimization pass for GlobalISel generic memory operations.
auto drop_begin(T &&RangeOrContainer, size_t N=1)
Return a range covering RangeOrContainer with the first N elements excluded.
Definition STLExtras.h:315
@ Low
Lower the current thread's priority such that it does not affect foreground tasks significantly.
Definition Threading.h:280
@ Offset
Definition DWP.cpp:557
FunctionAddr VTableAddr Value
Definition InstrProf.h:137
bool all_of(R &&range, UnaryPredicate P)
Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly.
Definition STLExtras.h:1738
LLVM_ABI bool canInstructionHaveMMRAs(const Instruction &I)
detail::zippy< detail::zip_first, T, U, Args... > zip_equal(T &&t, U &&u, Args &&...args)
zip iterator that assumes that all iteratees have the same length.
Definition STLExtras.h:840
LLVM_ABI unsigned getBranchWeightOffset(const MDNode *ProfileData)
Return the offset to the first branch weight data.
constexpr bool isInt(int64_t x)
Checks if an integer fits into the given bit width.
Definition MathExtras.h:165
auto enumerate(FirstRange &&First, RestRanges &&...Rest)
Given two or more input ranges, returns a new range whose values are tuples (A, B,...
Definition STLExtras.h:2553
decltype(auto) dyn_cast(const From &Val)
dyn_cast<X> - Return the argument parameter cast to the specified type.
Definition Casting.h:643
LLVM_ABI bool verifyFunction(const Function &F, raw_ostream *OS=nullptr)
Check a function for errors, useful for use when debugging a pass.
AllocFnKind
Definition Attributes.h:53
testing::Matcher< const detail::ErrorHolder & > Failed()
Definition Error.h:198
iterator_range< T > make_range(T x, T y)
Convenience function for iterating over sub-ranges.
void append_range(Container &C, Range &&R)
Wrapper function to append range R to container C.
Definition STLExtras.h:2207
LLVM_ABI DenseMap< BasicBlock *, ColorVector > colorEHFunclets(Function &F)
If an EH funclet personality is in use (see isFuncletEHPersonality), this will recompute which blocks...
constexpr bool isUIntN(unsigned N, uint64_t x)
Checks if an unsigned integer fits into the given (dynamic) bit width.
Definition MathExtras.h:243
constexpr bool isPowerOf2_64(uint64_t Value)
Return true if the argument is a power of two > 0 (64 bit edition.)
Definition MathExtras.h:284
gep_type_iterator gep_type_end(const User *GEP)
bool isa_and_nonnull(const Y &Val)
Definition Casting.h:676
Op::Description Desc
bool isScopedEHPersonality(EHPersonality Pers)
Returns true if this personality uses scope-style EH IR instructions: catchswitch,...
auto dyn_cast_or_null(const Y &Val)
Definition Casting.h:753
GenericConvergenceVerifier< SSAContext > ConvergenceVerifier
constexpr bool isPowerOf2_32(uint32_t Value)
Return true if the argument is a power of two > 0.
Definition MathExtras.h:279
bool isModSet(const ModRefInfo MRI)
Definition ModRef.h:49
void sort(IteratorTy Start, IteratorTy End)
Definition STLExtras.h:1635
LLVM_ABI raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
Definition Debug.cpp:207
LLVM_ABI void report_fatal_error(Error Err, bool gen_crash_diag=true)
Definition Error.cpp:163
generic_gep_type_iterator<> gep_type_iterator
FunctionAddr VTableAddr Count
Definition InstrProf.h:139
LLVM_ABI EHPersonality classifyEHPersonality(const Value *Pers)
See if the given exception handling personality function is one that we understand.
iterator_range< SplittingIterator > split(StringRef Str, StringRef Separator)
Split the specified string over a separator and return a range-compatible iterable over its partition...
class LLVM_GSL_OWNER SmallVector
Forward declaration of SmallVector so that calculateSmallVectorDefaultInlinedElements can reference s...
bool isa(const From &Val)
isa<X> - Return true if the parameter to the template is an instance of one of the template type argu...
Definition Casting.h:547
LLVM_ABI bool isValueProfileMD(const MDNode *ProfileData)
Checks if an MDNode contains value profiling Metadata.
LLVM_ABI raw_fd_ostream & errs()
This returns a reference to a raw_ostream for standard error.
LLVM_ABI unsigned getNumBranchWeights(const MDNode &ProfileData)
AtomicOrdering
Atomic ordering for LLVM's memory model.
@ First
Helpers to iterate all locations in the MemoryEffectsBase class.
Definition ModRef.h:74
LLVM_ABI FunctionPass * createVerifierPass(bool FatalErrors=true)
FunctionAddr VTableAddr Next
Definition InstrProf.h:141
DWARFExpression::Operation Op
ArrayRef(const T &OneElt) -> ArrayRef< T >
constexpr unsigned BitWidth
TinyPtrVector< BasicBlock * > ColorVector
LLVM_ABI const char * LLVMLoopEstimatedTripCount
Profile-based loop metadata that should be accessed only by using llvm::getLoopEstimatedTripCount and...
decltype(auto) cast(const From &Val)
cast<X> - Return the argument parameter cast to the specified type.
Definition Casting.h:559
LLVM_ABI std::optional< RoundingMode > convertStrToRoundingMode(StringRef)
Returns a valid RoundingMode enumerator when given a string that is valid as input in constrained int...
Definition FPEnv.cpp:25
gep_type_iterator gep_type_begin(const User *GEP)
LLVM_ABI std::unique_ptr< GCStrategy > getGCStrategy(const StringRef Name)
Lookup the GCStrategy object associated with the given gc name.
auto predecessors(const MachineBasicBlock *BB)
bool is_contained(R &&Range, const E &Element)
Returns true if Element is found in Range.
Definition STLExtras.h:1946
bool pred_empty(const BasicBlock *BB)
Definition CFG.h:119
bool isHexDigit(char C)
Checks if character C is a hexadecimal numeric character.
AnalysisManager< Function > FunctionAnalysisManager
Convenience typedef for the Function analysis manager.
constexpr bool isCallableCC(CallingConv::ID CC)
LLVM_ABI bool verifyModule(const Module &M, raw_ostream *OS=nullptr, bool *BrokenDebugInfo=nullptr)
Check a module for errors.
AnalysisManager< Module > ModuleAnalysisManager
Convenience typedef for the Module analysis manager.
Definition MIRParser.h:39
#define N
constexpr uint64_t value() const
This is a hole in the type system and should not be abused.
Definition Alignment.h:77
A special type used by analysis passes to provide an address that identifies that particular analysis...
Definition Analysis.h:29
static LLVM_ABI const char * SyntheticFunctionEntryCount
static LLVM_ABI const char * UnknownBranchWeightsMarker
static LLVM_ABI const char * ValueProfile
static LLVM_ABI const char * FunctionEntryCount
static LLVM_ABI const char * BranchWeights
uint32_t getTagID() const
Return the tag of this operand bundle as an integer.
ArrayRef< Use > Inputs
void DebugInfoCheckFailed(const Twine &Message)
A debug info check failed.
Definition Verifier.cpp:308
VerifierSupport(raw_ostream *OS, const Module &M)
Definition Verifier.cpp:157
bool Broken
Track the brokenness of the module while recursively visiting.
Definition Verifier.cpp:151
void CheckFailed(const Twine &Message, const T1 &V1, const Ts &... Vs)
A check failed (with values to print).
Definition Verifier.cpp:301
bool BrokenDebugInfo
Broken debug info can be "recovered" from by stripping the debug info.
Definition Verifier.cpp:153
LLVMContext & Context
Definition Verifier.cpp:148
bool TreatBrokenDebugInfoAsError
Whether to treat broken debug info as an error.
Definition Verifier.cpp:155
void CheckFailed(const Twine &Message)
A check failed, so printout out the condition and the message.
Definition Verifier.cpp:290
const Module & M
Definition Verifier.cpp:144
const DataLayout & DL
Definition Verifier.cpp:147
void DebugInfoCheckFailed(const Twine &Message, const T1 &V1, const Ts &... Vs)
A debug info check failed (with values to print).
Definition Verifier.cpp:317
const Triple & TT
Definition Verifier.cpp:146
ModuleSlotTracker MST
Definition Verifier.cpp:145