LLVM 23.0.0git
Verifier.cpp
Go to the documentation of this file.
1//===-- Verifier.cpp - Implement the Module Verifier -----------------------==//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file defines the function verifier interface, that can be used for some
10// basic correctness checking of input to the system.
11//
12// Note that this does not provide full `Java style' security and verifications,
13// instead it just tries to ensure that code is well-formed.
14//
15// * Both of a binary operator's parameters are of the same type
16// * Verify that the indices of mem access instructions match other operands
17// * Verify that arithmetic and other things are only performed on first-class
18// types. Verify that shifts & logicals only happen on integrals f.e.
19// * All of the constants in a switch statement are of the correct type
20// * The code is in valid SSA form
21// * It should be illegal to put a label into any other type (like a structure)
22// or to return one. [except constant arrays!]
23// * Only phi nodes can be self referential: 'add i32 %0, %0 ; <int>:0' is bad
24// * PHI nodes must have an entry for each predecessor, with no extras.
25// * PHI nodes must be the first thing in a basic block, all grouped together
26// * All basic blocks should only end with terminator insts, not contain them
27// * The entry node to a function must not have predecessors
28// * All Instructions must be embedded into a basic block
29// * Functions cannot take a void-typed parameter
30// * Verify that a function's argument list agrees with it's declared type.
31// * It is illegal to specify a name for a void value.
32// * It is illegal to have a internal global value with no initializer
33// * It is illegal to have a ret instruction that returns a value that does not
34// agree with the function return value type.
35// * Function call argument types match the function prototype
36// * A landing pad is defined by a landingpad instruction, and can be jumped to
37// only by the unwind edge of an invoke instruction.
38// * A landingpad instruction must be the first non-PHI instruction in the
39// block.
40// * Landingpad instructions must be in a function with a personality function.
41// * Convergence control intrinsics are introduced in ConvergentOperations.rst.
42// The applied restrictions are too numerous to list here.
43// * The convergence entry intrinsic and the loop heart must be the first
44// non-PHI instruction in their respective block. This does not conflict with
45// the landing pads, since these two kinds cannot occur in the same block.
46// * All other things that are tested by asserts spread about the code...
47//
48//===----------------------------------------------------------------------===//
49
50#include "llvm/IR/Verifier.h"
51#include "llvm/ADT/APFloat.h"
52#include "llvm/ADT/APInt.h"
53#include "llvm/ADT/ArrayRef.h"
54#include "llvm/ADT/DenseMap.h"
55#include "llvm/ADT/MapVector.h"
56#include "llvm/ADT/STLExtras.h"
60#include "llvm/ADT/StringRef.h"
61#include "llvm/ADT/Twine.h"
63#include "llvm/IR/Argument.h"
65#include "llvm/IR/Attributes.h"
66#include "llvm/IR/BasicBlock.h"
67#include "llvm/IR/CFG.h"
68#include "llvm/IR/CallingConv.h"
69#include "llvm/IR/Comdat.h"
70#include "llvm/IR/Constant.h"
73#include "llvm/IR/Constants.h"
75#include "llvm/IR/DataLayout.h"
76#include "llvm/IR/DebugInfo.h"
78#include "llvm/IR/DebugLoc.h"
80#include "llvm/IR/Dominators.h"
82#include "llvm/IR/FPEnv.h"
83#include "llvm/IR/Function.h"
84#include "llvm/IR/GCStrategy.h"
86#include "llvm/IR/GlobalAlias.h"
87#include "llvm/IR/GlobalValue.h"
89#include "llvm/IR/InlineAsm.h"
90#include "llvm/IR/InstVisitor.h"
91#include "llvm/IR/InstrTypes.h"
92#include "llvm/IR/Instruction.h"
95#include "llvm/IR/Intrinsics.h"
96#include "llvm/IR/IntrinsicsAArch64.h"
97#include "llvm/IR/IntrinsicsAMDGPU.h"
98#include "llvm/IR/IntrinsicsARM.h"
99#include "llvm/IR/IntrinsicsNVPTX.h"
100#include "llvm/IR/IntrinsicsWebAssembly.h"
101#include "llvm/IR/LLVMContext.h"
103#include "llvm/IR/Metadata.h"
104#include "llvm/IR/Module.h"
106#include "llvm/IR/PassManager.h"
108#include "llvm/IR/Statepoint.h"
109#include "llvm/IR/Type.h"
110#include "llvm/IR/Use.h"
111#include "llvm/IR/User.h"
113#include "llvm/IR/Value.h"
115#include "llvm/Pass.h"
119#include "llvm/Support/Casting.h"
123#include "llvm/Support/ModRef.h"
126#include <algorithm>
127#include <cassert>
128#include <cstdint>
129#include <memory>
130#include <optional>
131#include <string>
132#include <utility>
133
134using namespace llvm;
135
137 "verify-noalias-scope-decl-dom", cl::Hidden, cl::init(false),
138 cl::desc("Ensure that llvm.experimental.noalias.scope.decl for identical "
139 "scopes are not dominating"));
140
143 const Module &M;
145 const Triple &TT;
148
149 /// Track the brokenness of the module while recursively visiting.
150 bool Broken = false;
151 /// Broken debug info can be "recovered" from by stripping the debug info.
152 bool BrokenDebugInfo = false;
153 /// Whether to treat broken debug info as an error.
155
157 : OS(OS), M(M), MST(&M), TT(M.getTargetTriple()), DL(M.getDataLayout()),
158 Context(M.getContext()) {}
159
160private:
161 void Write(const Module *M) {
162 *OS << "; ModuleID = '" << M->getModuleIdentifier() << "'\n";
163 }
164
165 void Write(const Value *V) {
166 if (V)
167 Write(*V);
168 }
169
170 void Write(const Value &V) {
171 if (isa<Instruction>(V)) {
172 V.print(*OS, MST);
173 *OS << '\n';
174 } else {
175 V.printAsOperand(*OS, true, MST);
176 *OS << '\n';
177 }
178 }
179
180 void Write(const DbgRecord *DR) {
181 if (DR) {
182 DR->print(*OS, MST, false);
183 *OS << '\n';
184 }
185 }
186
188 switch (Type) {
190 *OS << "value";
191 break;
193 *OS << "declare";
194 break;
196 *OS << "declare_value";
197 break;
199 *OS << "assign";
200 break;
202 *OS << "end";
203 break;
205 *OS << "any";
206 break;
207 };
208 }
209
210 void Write(const Metadata *MD) {
211 if (!MD)
212 return;
213 MD->print(*OS, MST, &M);
214 *OS << '\n';
215 }
216
217 template <class T> void Write(const MDTupleTypedArrayWrapper<T> &MD) {
218 Write(MD.get());
219 }
220
221 void Write(const NamedMDNode *NMD) {
222 if (!NMD)
223 return;
224 NMD->print(*OS, MST);
225 *OS << '\n';
226 }
227
228 void Write(Type *T) {
229 if (!T)
230 return;
231 *OS << ' ' << *T;
232 }
233
234 void Write(const Comdat *C) {
235 if (!C)
236 return;
237 *OS << *C;
238 }
239
240 void Write(const APInt *AI) {
241 if (!AI)
242 return;
243 *OS << *AI << '\n';
244 }
245
246 void Write(const unsigned i) { *OS << i << '\n'; }
247
248 // NOLINTNEXTLINE(readability-identifier-naming)
249 void Write(const Attribute *A) {
250 if (!A)
251 return;
252 *OS << A->getAsString() << '\n';
253 }
254
255 // NOLINTNEXTLINE(readability-identifier-naming)
256 void Write(const AttributeSet *AS) {
257 if (!AS)
258 return;
259 *OS << AS->getAsString() << '\n';
260 }
261
262 // NOLINTNEXTLINE(readability-identifier-naming)
263 void Write(const AttributeList *AL) {
264 if (!AL)
265 return;
266 AL->print(*OS);
267 }
268
269 void Write(Printable P) { *OS << P << '\n'; }
270
271 template <typename T> void Write(ArrayRef<T> Vs) {
272 for (const T &V : Vs)
273 Write(V);
274 }
275
276 template <typename T1, typename... Ts>
277 void WriteTs(const T1 &V1, const Ts &... Vs) {
278 Write(V1);
279 WriteTs(Vs...);
280 }
281
282 template <typename... Ts> void WriteTs() {}
283
284public:
285 /// A check failed, so printout out the condition and the message.
286 ///
287 /// This provides a nice place to put a breakpoint if you want to see why
288 /// something is not correct.
289 void CheckFailed(const Twine &Message) {
290 if (OS)
291 *OS << Message << '\n';
292 Broken = true;
293 }
294
295 /// A check failed (with values to print).
296 ///
297 /// This calls the Message-only version so that the above is easier to set a
298 /// breakpoint on.
299 template <typename T1, typename... Ts>
300 void CheckFailed(const Twine &Message, const T1 &V1, const Ts &... Vs) {
301 CheckFailed(Message);
302 if (OS)
303 WriteTs(V1, Vs...);
304 }
305
306 /// A debug info check failed.
307 void DebugInfoCheckFailed(const Twine &Message) {
308 if (OS)
309 *OS << Message << '\n';
311 BrokenDebugInfo = true;
312 }
313
314 /// A debug info check failed (with values to print).
315 template <typename T1, typename... Ts>
316 void DebugInfoCheckFailed(const Twine &Message, const T1 &V1,
317 const Ts &... Vs) {
318 DebugInfoCheckFailed(Message);
319 if (OS)
320 WriteTs(V1, Vs...);
321 }
322};
323
324namespace {
325
326class Verifier : public InstVisitor<Verifier>, VerifierSupport {
327 friend class InstVisitor<Verifier>;
328 DominatorTree DT;
329
330 /// When verifying a basic block, keep track of all of the
331 /// instructions we have seen so far.
332 ///
333 /// This allows us to do efficient dominance checks for the case when an
334 /// instruction has an operand that is an instruction in the same block.
335 SmallPtrSet<Instruction *, 16> InstsInThisBlock;
336
337 /// Keep track of the metadata nodes that have been checked already.
339
340 /// Keep track which DISubprogram is attached to which function.
342
343 /// Track all DICompileUnits visited.
345
346 /// The result type for a landingpad.
347 Type *LandingPadResultTy;
348
349 /// Whether we've seen a call to @llvm.localescape in this function
350 /// already.
351 bool SawFrameEscape;
352
353 /// Whether the current function has a DISubprogram attached to it.
354 bool HasDebugInfo = false;
355
356 /// Stores the count of how many objects were passed to llvm.localescape for a
357 /// given function and the largest index passed to llvm.localrecover.
359
360 // Maps catchswitches and cleanuppads that unwind to siblings to the
361 // terminators that indicate the unwind, used to detect cycles therein.
363
364 /// Cache which blocks are in which funclet, if an EH funclet personality is
365 /// in use. Otherwise empty.
366 DenseMap<BasicBlock *, ColorVector> BlockEHFuncletColors;
367
368 /// Cache of constants visited in search of ConstantExprs.
369 SmallPtrSet<const Constant *, 32> ConstantExprVisited;
370
371 /// Cache of declarations of the llvm.experimental.deoptimize.<ty> intrinsic.
372 SmallVector<const Function *, 4> DeoptimizeDeclarations;
373
374 /// Cache of attribute lists verified.
375 SmallPtrSet<const void *, 32> AttributeListsVisited;
376
377 // Verify that this GlobalValue is only used in this module.
378 // This map is used to avoid visiting uses twice. We can arrive at a user
379 // twice, if they have multiple operands. In particular for very large
380 // constant expressions, we can arrive at a particular user many times.
381 SmallPtrSet<const Value *, 32> GlobalValueVisited;
382
383 // Keeps track of duplicate function argument debug info.
385
386 TBAAVerifier TBAAVerifyHelper;
387 ConvergenceVerifier ConvergenceVerifyHelper;
388
389 SmallVector<IntrinsicInst *, 4> NoAliasScopeDecls;
390
391 void checkAtomicMemAccessSize(Type *Ty, const Instruction *I);
392
393public:
394 explicit Verifier(raw_ostream *OS, bool ShouldTreatBrokenDebugInfoAsError,
395 const Module &M)
396 : VerifierSupport(OS, M), LandingPadResultTy(nullptr),
397 SawFrameEscape(false), TBAAVerifyHelper(this) {
398 TreatBrokenDebugInfoAsError = ShouldTreatBrokenDebugInfoAsError;
399 }
400
401 bool hasBrokenDebugInfo() const { return BrokenDebugInfo; }
402
403 bool verify(const Function &F) {
404 llvm::TimeTraceScope timeScope("Verifier");
405 assert(F.getParent() == &M &&
406 "An instance of this class only works with a specific module!");
407
408 // First ensure the function is well-enough formed to compute dominance
409 // information, and directly compute a dominance tree. We don't rely on the
410 // pass manager to provide this as it isolates us from a potentially
411 // out-of-date dominator tree and makes it significantly more complex to run
412 // this code outside of a pass manager.
413
414 // First check that every basic block has a terminator, otherwise we can't
415 // even inspect the CFG.
416 for (const BasicBlock &BB : F) {
417 if (!BB.empty() && BB.back().isTerminator())
418 continue;
419
420 if (OS) {
421 *OS << "Basic Block in function '" << F.getName()
422 << "' does not have terminator!\n";
423 BB.printAsOperand(*OS, true, MST);
424 *OS << "\n";
425 }
426 return false;
427 }
428
429 // FIXME: It's really gross that we have to cast away constness here.
430 if (!F.empty())
431 DT.recalculate(const_cast<Function &>(F));
432
433 auto FailureCB = [this](const Twine &Message) {
434 this->CheckFailed(Message);
435 };
436 ConvergenceVerifyHelper.initialize(OS, FailureCB, F);
437
438 Broken = false;
439 // FIXME: We strip const here because the inst visitor strips const.
440 visit(const_cast<Function &>(F));
441 verifySiblingFuncletUnwinds();
442
443 if (ConvergenceVerifyHelper.sawTokens())
444 ConvergenceVerifyHelper.verify(DT);
445
446 InstsInThisBlock.clear();
447 DebugFnArgs.clear();
448 LandingPadResultTy = nullptr;
449 SawFrameEscape = false;
450 SiblingFuncletInfo.clear();
451 verifyNoAliasScopeDecl();
452 NoAliasScopeDecls.clear();
453
454 return !Broken;
455 }
456
457 /// Verify the module that this instance of \c Verifier was initialized with.
458 bool verify() {
459 Broken = false;
460
461 // Collect all declarations of the llvm.experimental.deoptimize intrinsic.
462 for (const Function &F : M)
463 if (F.getIntrinsicID() == Intrinsic::experimental_deoptimize)
464 DeoptimizeDeclarations.push_back(&F);
465
466 // Now that we've visited every function, verify that we never asked to
467 // recover a frame index that wasn't escaped.
468 verifyFrameRecoverIndices();
469 for (const GlobalVariable &GV : M.globals())
470 visitGlobalVariable(GV);
471
472 for (const GlobalAlias &GA : M.aliases())
473 visitGlobalAlias(GA);
474
475 for (const GlobalIFunc &GI : M.ifuncs())
476 visitGlobalIFunc(GI);
477
478 for (const NamedMDNode &NMD : M.named_metadata())
479 visitNamedMDNode(NMD);
480
481 for (const StringMapEntry<Comdat> &SMEC : M.getComdatSymbolTable())
482 visitComdat(SMEC.getValue());
483
484 visitModuleFlags();
485 visitModuleIdents();
486 visitModuleCommandLines();
487 visitModuleErrnoTBAA();
488
489 verifyCompileUnits();
490
491 verifyDeoptimizeCallingConvs();
492 DISubprogramAttachments.clear();
493 return !Broken;
494 }
495
496private:
497 /// Whether a metadata node is allowed to be, or contain, a DILocation.
498 enum class AreDebugLocsAllowed { No, Yes };
499
500 /// Metadata that should be treated as a range, with slightly different
501 /// requirements.
502 enum class RangeLikeMetadataKind {
503 Range, // MD_range
504 AbsoluteSymbol, // MD_absolute_symbol
505 NoaliasAddrspace // MD_noalias_addrspace
506 };
507
508 // Verification methods...
509 void visitGlobalValue(const GlobalValue &GV);
510 void visitGlobalVariable(const GlobalVariable &GV);
511 void visitGlobalAlias(const GlobalAlias &GA);
512 void visitGlobalIFunc(const GlobalIFunc &GI);
513 void visitAliaseeSubExpr(const GlobalAlias &A, const Constant &C);
514 void visitAliaseeSubExpr(SmallPtrSetImpl<const GlobalAlias *> &Visited,
515 const GlobalAlias &A, const Constant &C);
516 void visitNamedMDNode(const NamedMDNode &NMD);
517 void visitMDNode(const MDNode &MD, AreDebugLocsAllowed AllowLocs);
518 void visitMetadataAsValue(const MetadataAsValue &MD, Function *F);
519 void visitValueAsMetadata(const ValueAsMetadata &MD, Function *F);
520 void visitDIArgList(const DIArgList &AL, Function *F);
521 void visitComdat(const Comdat &C);
522 void visitModuleIdents();
523 void visitModuleCommandLines();
524 void visitModuleErrnoTBAA();
525 void visitModuleFlags();
526 void visitModuleFlag(const MDNode *Op,
527 DenseMap<const MDString *, const MDNode *> &SeenIDs,
528 SmallVectorImpl<const MDNode *> &Requirements);
529 void visitModuleFlagCGProfileEntry(const MDOperand &MDO);
530 void visitFunction(const Function &F);
531 void visitBasicBlock(BasicBlock &BB);
532 void verifyRangeLikeMetadata(const Value &V, const MDNode *Range, Type *Ty,
533 RangeLikeMetadataKind Kind);
534 void visitRangeMetadata(Instruction &I, MDNode *Range, Type *Ty);
535 void visitNoFPClassMetadata(Instruction &I, MDNode *Range, Type *Ty);
536 void visitNoaliasAddrspaceMetadata(Instruction &I, MDNode *Range, Type *Ty);
537 void visitDereferenceableMetadata(Instruction &I, MDNode *MD);
538 void visitNofreeMetadata(Instruction &I, MDNode *MD);
539 void visitProfMetadata(Instruction &I, MDNode *MD);
540 void visitCallStackMetadata(MDNode *MD);
541 void visitMemProfMetadata(Instruction &I, MDNode *MD);
542 void visitCallsiteMetadata(Instruction &I, MDNode *MD);
543 void visitCalleeTypeMetadata(Instruction &I, MDNode *MD);
544 void visitDIAssignIDMetadata(Instruction &I, MDNode *MD);
545 void visitMMRAMetadata(Instruction &I, MDNode *MD);
546 void visitAnnotationMetadata(MDNode *Annotation);
547 void visitAliasScopeMetadata(const MDNode *MD);
548 void visitAliasScopeListMetadata(const MDNode *MD);
549 void visitAccessGroupMetadata(const MDNode *MD);
550 void visitCapturesMetadata(Instruction &I, const MDNode *Captures);
551 void visitAllocTokenMetadata(Instruction &I, MDNode *MD);
552 void visitInlineHistoryMetadata(Instruction &I, MDNode *MD);
553
554 template <class Ty> bool isValidMetadataArray(const MDTuple &N);
555#define HANDLE_SPECIALIZED_MDNODE_LEAF(CLASS) void visit##CLASS(const CLASS &N);
556#include "llvm/IR/Metadata.def"
557 void visitDIType(const DIType &N);
558 void visitDIScope(const DIScope &N);
559 void visitDIVariable(const DIVariable &N);
560 void visitDILexicalBlockBase(const DILexicalBlockBase &N);
561 void visitDITemplateParameter(const DITemplateParameter &N);
562
563 void visitTemplateParams(const MDNode &N, const Metadata &RawParams);
564
565 void visit(DbgLabelRecord &DLR);
566 void visit(DbgVariableRecord &DVR);
567 // InstVisitor overrides...
568 using InstVisitor<Verifier>::visit;
569 void visitDbgRecords(Instruction &I);
570 void visit(Instruction &I);
571
572 void visitTruncInst(TruncInst &I);
573 void visitZExtInst(ZExtInst &I);
574 void visitSExtInst(SExtInst &I);
575 void visitFPTruncInst(FPTruncInst &I);
576 void visitFPExtInst(FPExtInst &I);
577 void visitFPToUIInst(FPToUIInst &I);
578 void visitFPToSIInst(FPToSIInst &I);
579 void visitUIToFPInst(UIToFPInst &I);
580 void visitSIToFPInst(SIToFPInst &I);
581 void visitIntToPtrInst(IntToPtrInst &I);
582 void checkPtrToAddr(Type *SrcTy, Type *DestTy, const Value &V);
583 void visitPtrToAddrInst(PtrToAddrInst &I);
584 void visitPtrToIntInst(PtrToIntInst &I);
585 void visitBitCastInst(BitCastInst &I);
586 void visitAddrSpaceCastInst(AddrSpaceCastInst &I);
587 void visitPHINode(PHINode &PN);
588 void visitCallBase(CallBase &Call);
589 void visitUnaryOperator(UnaryOperator &U);
590 void visitBinaryOperator(BinaryOperator &B);
591 void visitICmpInst(ICmpInst &IC);
592 void visitFCmpInst(FCmpInst &FC);
593 void visitExtractElementInst(ExtractElementInst &EI);
594 void visitInsertElementInst(InsertElementInst &EI);
595 void visitShuffleVectorInst(ShuffleVectorInst &EI);
596 void visitVAArgInst(VAArgInst &VAA) { visitInstruction(VAA); }
597 void visitCallInst(CallInst &CI);
598 void visitInvokeInst(InvokeInst &II);
599 void visitGetElementPtrInst(GetElementPtrInst &GEP);
600 void visitLoadInst(LoadInst &LI);
601 void visitStoreInst(StoreInst &SI);
602 void verifyDominatesUse(Instruction &I, unsigned i);
603 void visitInstruction(Instruction &I);
604 void visitTerminator(Instruction &I);
605 void visitCondBrInst(CondBrInst &BI);
606 void visitReturnInst(ReturnInst &RI);
607 void visitSwitchInst(SwitchInst &SI);
608 void visitIndirectBrInst(IndirectBrInst &BI);
609 void visitCallBrInst(CallBrInst &CBI);
610 void visitSelectInst(SelectInst &SI);
611 void visitUserOp1(Instruction &I);
612 void visitUserOp2(Instruction &I) { visitUserOp1(I); }
613 void visitIntrinsicCall(Intrinsic::ID ID, CallBase &Call);
614 void visitConstrainedFPIntrinsic(ConstrainedFPIntrinsic &FPI);
615 void visitVPIntrinsic(VPIntrinsic &VPI);
616 void visitDbgLabelIntrinsic(StringRef Kind, DbgLabelInst &DLI);
617 void visitAtomicCmpXchgInst(AtomicCmpXchgInst &CXI);
618 void visitAtomicRMWInst(AtomicRMWInst &RMWI);
619 void visitFenceInst(FenceInst &FI);
620 void visitAllocaInst(AllocaInst &AI);
621 void visitExtractValueInst(ExtractValueInst &EVI);
622 void visitInsertValueInst(InsertValueInst &IVI);
623 void visitEHPadPredecessors(Instruction &I);
624 void visitLandingPadInst(LandingPadInst &LPI);
625 void visitResumeInst(ResumeInst &RI);
626 void visitCatchPadInst(CatchPadInst &CPI);
627 void visitCatchReturnInst(CatchReturnInst &CatchReturn);
628 void visitCleanupPadInst(CleanupPadInst &CPI);
629 void visitFuncletPadInst(FuncletPadInst &FPI);
630 void visitCatchSwitchInst(CatchSwitchInst &CatchSwitch);
631 void visitCleanupReturnInst(CleanupReturnInst &CRI);
632
633 void verifySwiftErrorCall(CallBase &Call, const Value *SwiftErrorVal);
634 void verifySwiftErrorValue(const Value *SwiftErrorVal);
635 void verifyTailCCMustTailAttrs(const AttrBuilder &Attrs, StringRef Context);
636 void verifyMustTailCall(CallInst &CI);
637 bool verifyAttributeCount(AttributeList Attrs, unsigned Params);
638 void verifyAttributeTypes(AttributeSet Attrs, const Value *V);
639 void verifyParameterAttrs(AttributeSet Attrs, Type *Ty, const Value *V);
640 void checkUnsignedBaseTenFuncAttr(AttributeList Attrs, StringRef Attr,
641 const Value *V);
642 void verifyFunctionAttrs(FunctionType *FT, AttributeList Attrs,
643 const Value *V, bool IsIntrinsic, bool IsInlineAsm);
644 void verifyFunctionMetadata(ArrayRef<std::pair<unsigned, MDNode *>> MDs);
645 void verifyUnknownProfileMetadata(MDNode *MD);
646 void visitConstantExprsRecursively(const Constant *EntryC);
647 void visitConstantExpr(const ConstantExpr *CE);
648 void visitConstantPtrAuth(const ConstantPtrAuth *CPA);
649 void verifyInlineAsmCall(const CallBase &Call);
650 void verifyStatepoint(const CallBase &Call);
651 void verifyFrameRecoverIndices();
652 void verifySiblingFuncletUnwinds();
653
654 void verifyFragmentExpression(const DbgVariableRecord &I);
655 template <typename ValueOrMetadata>
656 void verifyFragmentExpression(const DIVariable &V,
658 ValueOrMetadata *Desc);
659 void verifyFnArgs(const DbgVariableRecord &DVR);
660 void verifyNotEntryValue(const DbgVariableRecord &I);
661
662 /// Module-level debug info verification...
663 void verifyCompileUnits();
664
665 /// Module-level verification that all @llvm.experimental.deoptimize
666 /// declarations share the same calling convention.
667 void verifyDeoptimizeCallingConvs();
668
669 void verifyAttachedCallBundle(const CallBase &Call,
670 const OperandBundleUse &BU);
671
672 /// Verify the llvm.experimental.noalias.scope.decl declarations
673 void verifyNoAliasScopeDecl();
674};
675
676} // end anonymous namespace
677
678/// We know that cond should be true, if not print an error message.
679#define Check(C, ...) \
680 do { \
681 if (!(C)) { \
682 CheckFailed(__VA_ARGS__); \
683 return; \
684 } \
685 } while (false)
686
687/// We know that a debug info condition should be true, if not print
688/// an error message.
689#define CheckDI(C, ...) \
690 do { \
691 if (!(C)) { \
692 DebugInfoCheckFailed(__VA_ARGS__); \
693 return; \
694 } \
695 } while (false)
696
697void Verifier::visitDbgRecords(Instruction &I) {
698 if (!I.DebugMarker)
699 return;
700 CheckDI(I.DebugMarker->MarkedInstr == &I,
701 "Instruction has invalid DebugMarker", &I);
702 CheckDI(!isa<PHINode>(&I) || !I.hasDbgRecords(),
703 "PHI Node must not have any attached DbgRecords", &I);
704 for (DbgRecord &DR : I.getDbgRecordRange()) {
705 CheckDI(DR.getMarker() == I.DebugMarker,
706 "DbgRecord had invalid DebugMarker", &I, &DR);
707 if (auto *Loc =
709 visitMDNode(*Loc, AreDebugLocsAllowed::Yes);
710 if (auto *DVR = dyn_cast<DbgVariableRecord>(&DR)) {
711 visit(*DVR);
712 // These have to appear after `visit` for consistency with existing
713 // intrinsic behaviour.
714 verifyFragmentExpression(*DVR);
715 verifyNotEntryValue(*DVR);
716 } else if (auto *DLR = dyn_cast<DbgLabelRecord>(&DR)) {
717 visit(*DLR);
718 }
719 }
720}
721
722void Verifier::visit(Instruction &I) {
723 visitDbgRecords(I);
724 for (unsigned i = 0, e = I.getNumOperands(); i != e; ++i)
725 Check(I.getOperand(i) != nullptr, "Operand is null", &I);
727}
728
729// Helper to iterate over indirect users. By returning false, the callback can ask to stop traversing further.
730static void forEachUser(const Value *User,
732 llvm::function_ref<bool(const Value *)> Callback) {
733 if (!Visited.insert(User).second)
734 return;
735
737 while (!WorkList.empty()) {
738 const Value *Cur = WorkList.pop_back_val();
739 if (!Visited.insert(Cur).second)
740 continue;
741 if (Callback(Cur))
742 append_range(WorkList, Cur->materialized_users());
743 }
744}
745
746void Verifier::visitGlobalValue(const GlobalValue &GV) {
748 "Global is external, but doesn't have external or weak linkage!", &GV);
749
750 if (const GlobalObject *GO = dyn_cast<GlobalObject>(&GV)) {
751 if (const MDNode *Associated =
752 GO->getMetadata(LLVMContext::MD_associated)) {
753 Check(Associated->getNumOperands() == 1,
754 "associated metadata must have one operand", &GV, Associated);
755 const Metadata *Op = Associated->getOperand(0).get();
756 Check(Op, "associated metadata must have a global value", GO, Associated);
757
758 const auto *VM = dyn_cast_or_null<ValueAsMetadata>(Op);
759 Check(VM, "associated metadata must be ValueAsMetadata", GO, Associated);
760 if (VM) {
761 Check(isa<PointerType>(VM->getValue()->getType()),
762 "associated value must be pointer typed", GV, Associated);
763
764 const Value *Stripped = VM->getValue()->stripPointerCastsAndAliases();
765 Check(isa<GlobalObject>(Stripped) || isa<Constant>(Stripped),
766 "associated metadata must point to a GlobalObject", GO, Stripped);
767 Check(Stripped != GO,
768 "global values should not associate to themselves", GO,
769 Associated);
770 }
771 }
772
773 // FIXME: Why is getMetadata on GlobalValue protected?
774 if (const MDNode *AbsoluteSymbol =
775 GO->getMetadata(LLVMContext::MD_absolute_symbol)) {
776 verifyRangeLikeMetadata(*GO, AbsoluteSymbol,
777 DL.getIntPtrType(GO->getType()),
778 RangeLikeMetadataKind::AbsoluteSymbol);
779 }
780
781 if (GO->hasMetadata(LLVMContext::MD_implicit_ref)) {
782 Check(!GO->isDeclaration(),
783 "ref metadata must not be placed on a declaration", GO);
784
786 GO->getMetadata(LLVMContext::MD_implicit_ref, MDs);
787 for (const MDNode *MD : MDs) {
788 Check(MD->getNumOperands() == 1, "ref metadata must have one operand",
789 &GV, MD);
790 const Metadata *Op = MD->getOperand(0).get();
791 const auto *VM = dyn_cast_or_null<ValueAsMetadata>(Op);
792 Check(VM, "ref metadata must be ValueAsMetadata", GO, MD);
793 if (VM) {
794 Check(isa<PointerType>(VM->getValue()->getType()),
795 "ref value must be pointer typed", GV, MD);
796
797 const Value *Stripped = VM->getValue()->stripPointerCastsAndAliases();
798 Check(isa<GlobalObject>(Stripped) || isa<Constant>(Stripped),
799 "ref metadata must point to a GlobalObject", GO, Stripped);
800 Check(Stripped != GO, "values should not reference themselves", GO,
801 MD);
802 }
803 }
804 }
805 }
806
808 "Only global variables can have appending linkage!", &GV);
809
810 if (GV.hasAppendingLinkage()) {
811 const GlobalVariable *GVar = dyn_cast<GlobalVariable>(&GV);
812 Check(GVar && GVar->getValueType()->isArrayTy(),
813 "Only global arrays can have appending linkage!", GVar);
814 }
815
816 if (GV.isDeclarationForLinker())
817 Check(!GV.hasComdat(), "Declaration may not be in a Comdat!", &GV);
818
819 if (GV.hasDLLExportStorageClass()) {
821 "dllexport GlobalValue must have default or protected visibility",
822 &GV);
823 }
824 if (GV.hasDLLImportStorageClass()) {
826 "dllimport GlobalValue must have default visibility", &GV);
827 Check(!GV.isDSOLocal(), "GlobalValue with DLLImport Storage is dso_local!",
828 &GV);
829
830 Check((GV.isDeclaration() &&
833 "Global is marked as dllimport, but not external", &GV);
834 }
835
836 if (GV.isImplicitDSOLocal())
837 Check(GV.isDSOLocal(),
838 "GlobalValue with local linkage or non-default "
839 "visibility must be dso_local!",
840 &GV);
841
842 forEachUser(&GV, GlobalValueVisited, [&](const Value *V) -> bool {
843 if (const Instruction *I = dyn_cast<Instruction>(V)) {
844 if (!I->getParent() || !I->getParent()->getParent())
845 CheckFailed("Global is referenced by parentless instruction!", &GV, &M,
846 I);
847 else if (I->getParent()->getParent()->getParent() != &M)
848 CheckFailed("Global is referenced in a different module!", &GV, &M, I,
849 I->getParent()->getParent(),
850 I->getParent()->getParent()->getParent());
851 return false;
852 } else if (const Function *F = dyn_cast<Function>(V)) {
853 if (F->getParent() != &M)
854 CheckFailed("Global is used by function in a different module", &GV, &M,
855 F, F->getParent());
856 return false;
857 }
858 return true;
859 });
860}
861
862void Verifier::visitGlobalVariable(const GlobalVariable &GV) {
863 Type *GVType = GV.getValueType();
864
865 if (MaybeAlign A = GV.getAlign()) {
866 Check(A->value() <= Value::MaximumAlignment,
867 "huge alignment values are unsupported", &GV);
868 }
869
870 if (GV.hasInitializer()) {
871 Check(GV.getInitializer()->getType() == GVType,
872 "Global variable initializer type does not match global "
873 "variable type!",
874 &GV);
876 "Global variable initializer must be sized", &GV);
877 visitConstantExprsRecursively(GV.getInitializer());
878 // If the global has common linkage, it must have a zero initializer and
879 // cannot be constant.
880 if (GV.hasCommonLinkage()) {
882 "'common' global must have a zero initializer!", &GV);
883 Check(!GV.isConstant(), "'common' global may not be marked constant!",
884 &GV);
885 Check(!GV.hasComdat(), "'common' global may not be in a Comdat!", &GV);
886 }
887 }
888
889 if (GV.hasName() && (GV.getName() == "llvm.global_ctors" ||
890 GV.getName() == "llvm.global_dtors")) {
892 "invalid linkage for intrinsic global variable", &GV);
894 "invalid uses of intrinsic global variable", &GV);
895
896 // Don't worry about emitting an error for it not being an array,
897 // visitGlobalValue will complain on appending non-array.
898 if (ArrayType *ATy = dyn_cast<ArrayType>(GVType)) {
899 StructType *STy = dyn_cast<StructType>(ATy->getElementType());
900 PointerType *FuncPtrTy =
901 PointerType::get(Context, DL.getProgramAddressSpace());
902 Check(STy && (STy->getNumElements() == 2 || STy->getNumElements() == 3) &&
903 STy->getTypeAtIndex(0u)->isIntegerTy(32) &&
904 STy->getTypeAtIndex(1) == FuncPtrTy,
905 "wrong type for intrinsic global variable", &GV);
906 Check(STy->getNumElements() == 3,
907 "the third field of the element type is mandatory, "
908 "specify ptr null to migrate from the obsoleted 2-field form");
909 Type *ETy = STy->getTypeAtIndex(2);
910 Check(ETy->isPointerTy(), "wrong type for intrinsic global variable",
911 &GV);
912 }
913 }
914
915 if (GV.hasName() && (GV.getName() == "llvm.used" ||
916 GV.getName() == "llvm.compiler.used")) {
918 "invalid linkage for intrinsic global variable", &GV);
920 "invalid uses of intrinsic global variable", &GV);
921
922 if (ArrayType *ATy = dyn_cast<ArrayType>(GVType)) {
923 PointerType *PTy = dyn_cast<PointerType>(ATy->getElementType());
924 Check(PTy, "wrong type for intrinsic global variable", &GV);
925 if (GV.hasInitializer()) {
926 const Constant *Init = GV.getInitializer();
927 const ConstantArray *InitArray = dyn_cast<ConstantArray>(Init);
928 Check(InitArray, "wrong initializer for intrinsic global variable",
929 Init);
930 for (Value *Op : InitArray->operands()) {
931 Value *V = Op->stripPointerCasts();
934 Twine("invalid ") + GV.getName() + " member", V);
935 Check(V->hasName(),
936 Twine("members of ") + GV.getName() + " must be named", V);
937 }
938 }
939 }
940 }
941
942 // Visit any debug info attachments.
944 GV.getMetadata(LLVMContext::MD_dbg, MDs);
945 for (auto *MD : MDs) {
946 if (auto *GVE = dyn_cast<DIGlobalVariableExpression>(MD))
947 visitDIGlobalVariableExpression(*GVE);
948 else
949 CheckDI(false, "!dbg attachment of global variable must be a "
950 "DIGlobalVariableExpression");
951 }
952
953 // Scalable vectors cannot be global variables, since we don't know
954 // the runtime size.
955 Check(!GVType->isScalableTy(), "Globals cannot contain scalable types", &GV);
956
957 // Check if it is or contains a target extension type that disallows being
958 // used as a global.
960 "Global @" + GV.getName() + " has illegal target extension type",
961 GVType);
962
963 // Check that the the address space can hold all bits of the type, recognized
964 // by an access in the address space being able to reach all bytes of the
965 // type.
966 Check(!GVType->isSized() ||
967 isUIntN(DL.getAddressSizeInBits(GV.getAddressSpace()),
968 GV.getGlobalSize(DL)),
969 "Global variable is too large to fit into the address space", &GV,
970 GVType);
971
972 if (!GV.hasInitializer()) {
973 visitGlobalValue(GV);
974 return;
975 }
976
977 // Walk any aggregate initializers looking for bitcasts between address spaces
978 visitConstantExprsRecursively(GV.getInitializer());
979
980 visitGlobalValue(GV);
981}
982
983void Verifier::visitAliaseeSubExpr(const GlobalAlias &GA, const Constant &C) {
984 SmallPtrSet<const GlobalAlias*, 4> Visited;
985 Visited.insert(&GA);
986 visitAliaseeSubExpr(Visited, GA, C);
987}
988
989void Verifier::visitAliaseeSubExpr(SmallPtrSetImpl<const GlobalAlias*> &Visited,
990 const GlobalAlias &GA, const Constant &C) {
993 cast<GlobalValue>(C).hasAvailableExternallyLinkage(),
994 "available_externally alias must point to available_externally "
995 "global value",
996 &GA);
997 }
998 if (const auto *GV = dyn_cast<GlobalValue>(&C)) {
1000 Check(!GV->isDeclarationForLinker(), "Alias must point to a definition",
1001 &GA);
1002 }
1003
1004 if (const auto *GA2 = dyn_cast<GlobalAlias>(GV)) {
1005 Check(Visited.insert(GA2).second, "Aliases cannot form a cycle", &GA);
1006
1007 Check(!GA2->isInterposable(),
1008 "Alias cannot point to an interposable alias", &GA);
1009 } else {
1010 // Only continue verifying subexpressions of GlobalAliases.
1011 // Do not recurse into global initializers.
1012 return;
1013 }
1014 }
1015
1016 if (const auto *CE = dyn_cast<ConstantExpr>(&C))
1017 visitConstantExprsRecursively(CE);
1018
1019 for (const Use &U : C.operands()) {
1020 Value *V = &*U;
1021 if (const auto *GA2 = dyn_cast<GlobalAlias>(V))
1022 visitAliaseeSubExpr(Visited, GA, *GA2->getAliasee());
1023 else if (const auto *C2 = dyn_cast<Constant>(V))
1024 visitAliaseeSubExpr(Visited, GA, *C2);
1025 }
1026}
1027
1028void Verifier::visitGlobalAlias(const GlobalAlias &GA) {
1030 "Alias should have private, internal, linkonce, weak, linkonce_odr, "
1031 "weak_odr, external, or available_externally linkage!",
1032 &GA);
1033 const Constant *Aliasee = GA.getAliasee();
1034 Check(Aliasee, "Aliasee cannot be NULL!", &GA);
1035 Check(GA.getType() == Aliasee->getType(),
1036 "Alias and aliasee types should match!", &GA);
1037
1038 Check(isa<GlobalValue>(Aliasee) || isa<ConstantExpr>(Aliasee),
1039 "Aliasee should be either GlobalValue or ConstantExpr", &GA);
1040
1041 visitAliaseeSubExpr(GA, *Aliasee);
1042
1043 visitGlobalValue(GA);
1044}
1045
1046void Verifier::visitGlobalIFunc(const GlobalIFunc &GI) {
1047 visitGlobalValue(GI);
1048
1050 GI.getAllMetadata(MDs);
1051 for (const auto &I : MDs) {
1052 CheckDI(I.first != LLVMContext::MD_dbg,
1053 "an ifunc may not have a !dbg attachment", &GI);
1054 Check(I.first != LLVMContext::MD_prof,
1055 "an ifunc may not have a !prof attachment", &GI);
1056 visitMDNode(*I.second, AreDebugLocsAllowed::No);
1057 }
1058
1060 "IFunc should have private, internal, linkonce, weak, linkonce_odr, "
1061 "weak_odr, or external linkage!",
1062 &GI);
1063 // Pierce through ConstantExprs and GlobalAliases and check that the resolver
1064 // is a Function definition.
1065 const Function *Resolver = GI.getResolverFunction();
1066 Check(Resolver, "IFunc must have a Function resolver", &GI);
1067 Check(!Resolver->isDeclarationForLinker(),
1068 "IFunc resolver must be a definition", &GI);
1069
1070 // Check that the immediate resolver operand (prior to any bitcasts) has the
1071 // correct type.
1072 const Type *ResolverTy = GI.getResolver()->getType();
1073
1075 "IFunc resolver must return a pointer", &GI);
1076
1077 Check(ResolverTy == PointerType::get(Context, GI.getAddressSpace()),
1078 "IFunc resolver has incorrect type", &GI);
1079}
1080
1081void Verifier::visitNamedMDNode(const NamedMDNode &NMD) {
1082 // There used to be various other llvm.dbg.* nodes, but we don't support
1083 // upgrading them and we want to reserve the namespace for future uses.
1084 if (NMD.getName().starts_with("llvm.dbg."))
1085 CheckDI(NMD.getName() == "llvm.dbg.cu",
1086 "unrecognized named metadata node in the llvm.dbg namespace", &NMD);
1087 for (const MDNode *MD : NMD.operands()) {
1088 if (NMD.getName() == "llvm.dbg.cu")
1089 CheckDI(MD && isa<DICompileUnit>(MD), "invalid compile unit", &NMD, MD);
1090
1091 if (!MD)
1092 continue;
1093
1094 visitMDNode(*MD, AreDebugLocsAllowed::Yes);
1095 }
1096}
1097
1098void Verifier::visitMDNode(const MDNode &MD, AreDebugLocsAllowed AllowLocs) {
1099 // Only visit each node once. Metadata can be mutually recursive, so this
1100 // avoids infinite recursion here, as well as being an optimization.
1101 if (!MDNodes.insert(&MD).second)
1102 return;
1103
1104 Check(&MD.getContext() == &Context,
1105 "MDNode context does not match Module context!", &MD);
1106
1107 switch (MD.getMetadataID()) {
1108 default:
1109 llvm_unreachable("Invalid MDNode subclass");
1110 case Metadata::MDTupleKind:
1111 break;
1112#define HANDLE_SPECIALIZED_MDNODE_LEAF(CLASS) \
1113 case Metadata::CLASS##Kind: \
1114 visit##CLASS(cast<CLASS>(MD)); \
1115 break;
1116#include "llvm/IR/Metadata.def"
1117 }
1118
1119 for (const Metadata *Op : MD.operands()) {
1120 if (!Op)
1121 continue;
1122 Check(!isa<LocalAsMetadata>(Op), "Invalid operand for global metadata!",
1123 &MD, Op);
1124 CheckDI(!isa<DILocation>(Op) || AllowLocs == AreDebugLocsAllowed::Yes,
1125 "DILocation not allowed within this metadata node", &MD, Op);
1126 if (auto *N = dyn_cast<MDNode>(Op)) {
1127 visitMDNode(*N, AllowLocs);
1128 continue;
1129 }
1130 if (auto *V = dyn_cast<ValueAsMetadata>(Op)) {
1131 visitValueAsMetadata(*V, nullptr);
1132 continue;
1133 }
1134 }
1135
1136 // Check llvm.loop.estimated_trip_count.
1137 if (MD.getNumOperands() > 0 &&
1139 Check(MD.getNumOperands() == 2, "Expected two operands", &MD);
1141 Check(Count && Count->getType()->isIntegerTy() &&
1142 cast<IntegerType>(Count->getType())->getBitWidth() <= 32,
1143 "Expected second operand to be an integer constant of type i32 or "
1144 "smaller",
1145 &MD);
1146 }
1147
1148 // Check these last, so we diagnose problems in operands first.
1149 Check(!MD.isTemporary(), "Expected no forward declarations!", &MD);
1150 Check(MD.isResolved(), "All nodes should be resolved!", &MD);
1151}
1152
1153void Verifier::visitValueAsMetadata(const ValueAsMetadata &MD, Function *F) {
1154 Check(MD.getValue(), "Expected valid value", &MD);
1155 Check(!MD.getValue()->getType()->isMetadataTy(),
1156 "Unexpected metadata round-trip through values", &MD, MD.getValue());
1157
1158 auto *L = dyn_cast<LocalAsMetadata>(&MD);
1159 if (!L)
1160 return;
1161
1162 Check(F, "function-local metadata used outside a function", L);
1163
1164 // If this was an instruction, bb, or argument, verify that it is in the
1165 // function that we expect.
1166 Function *ActualF = nullptr;
1167 if (Instruction *I = dyn_cast<Instruction>(L->getValue())) {
1168 Check(I->getParent(), "function-local metadata not in basic block", L, I);
1169 ActualF = I->getParent()->getParent();
1170 } else if (BasicBlock *BB = dyn_cast<BasicBlock>(L->getValue()))
1171 ActualF = BB->getParent();
1172 else if (Argument *A = dyn_cast<Argument>(L->getValue()))
1173 ActualF = A->getParent();
1174 assert(ActualF && "Unimplemented function local metadata case!");
1175
1176 Check(ActualF == F, "function-local metadata used in wrong function", L);
1177}
1178
1179void Verifier::visitDIArgList(const DIArgList &AL, Function *F) {
1180 for (const ValueAsMetadata *VAM : AL.getArgs())
1181 visitValueAsMetadata(*VAM, F);
1182}
1183
1184void Verifier::visitMetadataAsValue(const MetadataAsValue &MDV, Function *F) {
1185 Metadata *MD = MDV.getMetadata();
1186 if (auto *N = dyn_cast<MDNode>(MD)) {
1187 visitMDNode(*N, AreDebugLocsAllowed::No);
1188 return;
1189 }
1190
1191 // Only visit each node once. Metadata can be mutually recursive, so this
1192 // avoids infinite recursion here, as well as being an optimization.
1193 if (!MDNodes.insert(MD).second)
1194 return;
1195
1196 if (auto *V = dyn_cast<ValueAsMetadata>(MD))
1197 visitValueAsMetadata(*V, F);
1198
1199 if (auto *AL = dyn_cast<DIArgList>(MD))
1200 visitDIArgList(*AL, F);
1201}
1202
1203static bool isType(const Metadata *MD) { return !MD || isa<DIType>(MD); }
1204static bool isScope(const Metadata *MD) { return !MD || isa<DIScope>(MD); }
1205static bool isDINode(const Metadata *MD) { return !MD || isa<DINode>(MD); }
1206static bool isMDTuple(const Metadata *MD) { return !MD || isa<MDTuple>(MD); }
1207
1208void Verifier::visitDILocation(const DILocation &N) {
1209 CheckDI(N.getRawScope() && isa<DILocalScope>(N.getRawScope()),
1210 "location requires a valid scope", &N, N.getRawScope());
1211 if (auto *IA = N.getRawInlinedAt())
1212 CheckDI(isa<DILocation>(IA), "inlined-at should be a location", &N, IA);
1213 if (auto *SP = dyn_cast<DISubprogram>(N.getRawScope()))
1214 CheckDI(SP->isDefinition(), "scope points into the type hierarchy", &N);
1215}
1216
1217void Verifier::visitGenericDINode(const GenericDINode &N) {
1218 CheckDI(N.getTag(), "invalid tag", &N);
1219}
1220
1221void Verifier::visitDIScope(const DIScope &N) {
1222 if (auto *F = N.getRawFile())
1223 CheckDI(isa<DIFile>(F), "invalid file", &N, F);
1224}
1225
1226void Verifier::visitDIType(const DIType &N) {
1227 CheckDI(isScope(N.getRawScope()), "invalid scope", &N, N.getRawScope());
1228 visitDIScope(N);
1229 CheckDI(N.getRawFile() || N.getLine() == 0, "line specified with no file", &N,
1230 N.getLine());
1231}
1232
1233void Verifier::visitDISubrangeType(const DISubrangeType &N) {
1234 visitDIType(N);
1235
1236 CheckDI(N.getTag() == dwarf::DW_TAG_subrange_type, "invalid tag", &N);
1237 auto *BaseType = N.getRawBaseType();
1238 CheckDI(!BaseType || isType(BaseType), "BaseType must be a type");
1239 auto *LBound = N.getRawLowerBound();
1240 CheckDI(!LBound || isa<ConstantAsMetadata>(LBound) ||
1241 isa<DIVariable>(LBound) || isa<DIExpression>(LBound) ||
1242 isa<DIDerivedType>(LBound),
1243 "LowerBound must be signed constant or DIVariable or DIExpression or "
1244 "DIDerivedType",
1245 &N);
1246 auto *UBound = N.getRawUpperBound();
1247 CheckDI(!UBound || isa<ConstantAsMetadata>(UBound) ||
1248 isa<DIVariable>(UBound) || isa<DIExpression>(UBound) ||
1249 isa<DIDerivedType>(UBound),
1250 "UpperBound must be signed constant or DIVariable or DIExpression or "
1251 "DIDerivedType",
1252 &N);
1253 auto *Stride = N.getRawStride();
1254 CheckDI(!Stride || isa<ConstantAsMetadata>(Stride) ||
1255 isa<DIVariable>(Stride) || isa<DIExpression>(Stride),
1256 "Stride must be signed constant or DIVariable or DIExpression", &N);
1257 auto *Bias = N.getRawBias();
1258 CheckDI(!Bias || isa<ConstantAsMetadata>(Bias) || isa<DIVariable>(Bias) ||
1259 isa<DIExpression>(Bias),
1260 "Bias must be signed constant or DIVariable or DIExpression", &N);
1261 // Subrange types currently only support constant size.
1262 auto *Size = N.getRawSizeInBits();
1264 "SizeInBits must be a constant");
1265}
1266
1267void Verifier::visitDISubrange(const DISubrange &N) {
1268 CheckDI(N.getTag() == dwarf::DW_TAG_subrange_type, "invalid tag", &N);
1269 CheckDI(!N.getRawCountNode() || !N.getRawUpperBound(),
1270 "Subrange can have any one of count or upperBound", &N);
1271 auto *CBound = N.getRawCountNode();
1272 CheckDI(!CBound || isa<ConstantAsMetadata>(CBound) ||
1273 isa<DIVariable>(CBound) || isa<DIExpression>(CBound),
1274 "Count must be signed constant or DIVariable or DIExpression", &N);
1275 auto Count = N.getCount();
1277 cast<ConstantInt *>(Count)->getSExtValue() >= -1,
1278 "invalid subrange count", &N);
1279 auto *LBound = N.getRawLowerBound();
1280 CheckDI(!LBound || isa<ConstantAsMetadata>(LBound) ||
1281 isa<DIVariable>(LBound) || isa<DIExpression>(LBound),
1282 "LowerBound must be signed constant or DIVariable or DIExpression",
1283 &N);
1284 auto *UBound = N.getRawUpperBound();
1285 CheckDI(!UBound || isa<ConstantAsMetadata>(UBound) ||
1286 isa<DIVariable>(UBound) || isa<DIExpression>(UBound),
1287 "UpperBound must be signed constant or DIVariable or DIExpression",
1288 &N);
1289 auto *Stride = N.getRawStride();
1290 CheckDI(!Stride || isa<ConstantAsMetadata>(Stride) ||
1291 isa<DIVariable>(Stride) || isa<DIExpression>(Stride),
1292 "Stride must be signed constant or DIVariable or DIExpression", &N);
1293}
1294
1295void Verifier::visitDIGenericSubrange(const DIGenericSubrange &N) {
1296 CheckDI(N.getTag() == dwarf::DW_TAG_generic_subrange, "invalid tag", &N);
1297 CheckDI(!N.getRawCountNode() || !N.getRawUpperBound(),
1298 "GenericSubrange can have any one of count or upperBound", &N);
1299 auto *CBound = N.getRawCountNode();
1300 CheckDI(!CBound || isa<DIVariable>(CBound) || isa<DIExpression>(CBound),
1301 "Count must be signed constant or DIVariable or DIExpression", &N);
1302 auto *LBound = N.getRawLowerBound();
1303 CheckDI(LBound, "GenericSubrange must contain lowerBound", &N);
1304 CheckDI(isa<DIVariable>(LBound) || isa<DIExpression>(LBound),
1305 "LowerBound must be signed constant or DIVariable or DIExpression",
1306 &N);
1307 auto *UBound = N.getRawUpperBound();
1308 CheckDI(!UBound || isa<DIVariable>(UBound) || isa<DIExpression>(UBound),
1309 "UpperBound must be signed constant or DIVariable or DIExpression",
1310 &N);
1311 auto *Stride = N.getRawStride();
1312 CheckDI(Stride, "GenericSubrange must contain stride", &N);
1313 CheckDI(isa<DIVariable>(Stride) || isa<DIExpression>(Stride),
1314 "Stride must be signed constant or DIVariable or DIExpression", &N);
1315}
1316
1317void Verifier::visitDIEnumerator(const DIEnumerator &N) {
1318 CheckDI(N.getTag() == dwarf::DW_TAG_enumerator, "invalid tag", &N);
1319}
1320
1321void Verifier::visitDIBasicType(const DIBasicType &N) {
1322 visitDIType(N);
1323
1324 CheckDI(N.getTag() == dwarf::DW_TAG_base_type ||
1325 N.getTag() == dwarf::DW_TAG_unspecified_type ||
1326 N.getTag() == dwarf::DW_TAG_string_type,
1327 "invalid tag", &N);
1328 // Basic types currently only support constant size.
1329 auto *Size = N.getRawSizeInBits();
1331 "SizeInBits must be a constant");
1332}
1333
1334void Verifier::visitDIFixedPointType(const DIFixedPointType &N) {
1335 visitDIBasicType(N);
1336
1337 CheckDI(N.getTag() == dwarf::DW_TAG_base_type, "invalid tag", &N);
1338 CheckDI(N.getEncoding() == dwarf::DW_ATE_signed_fixed ||
1339 N.getEncoding() == dwarf::DW_ATE_unsigned_fixed,
1340 "invalid encoding", &N);
1344 "invalid kind", &N);
1346 N.getFactorRaw() == 0,
1347 "factor should be 0 for rationals", &N);
1349 (N.getNumeratorRaw() == 0 && N.getDenominatorRaw() == 0),
1350 "numerator and denominator should be 0 for non-rationals", &N);
1351}
1352
1353void Verifier::visitDIStringType(const DIStringType &N) {
1354 visitDIType(N);
1355
1356 CheckDI(N.getTag() == dwarf::DW_TAG_string_type, "invalid tag", &N);
1357 CheckDI(!(N.isBigEndian() && N.isLittleEndian()), "has conflicting flags",
1358 &N);
1359}
1360
1361void Verifier::visitDIDerivedType(const DIDerivedType &N) {
1362 // Common type checks.
1363 visitDIType(N);
1364
1365 CheckDI(N.getTag() == dwarf::DW_TAG_typedef ||
1366 N.getTag() == dwarf::DW_TAG_pointer_type ||
1367 N.getTag() == dwarf::DW_TAG_ptr_to_member_type ||
1368 N.getTag() == dwarf::DW_TAG_reference_type ||
1369 N.getTag() == dwarf::DW_TAG_rvalue_reference_type ||
1370 N.getTag() == dwarf::DW_TAG_const_type ||
1371 N.getTag() == dwarf::DW_TAG_immutable_type ||
1372 N.getTag() == dwarf::DW_TAG_volatile_type ||
1373 N.getTag() == dwarf::DW_TAG_restrict_type ||
1374 N.getTag() == dwarf::DW_TAG_atomic_type ||
1375 N.getTag() == dwarf::DW_TAG_LLVM_ptrauth_type ||
1376 N.getTag() == dwarf::DW_TAG_member ||
1377 (N.getTag() == dwarf::DW_TAG_variable && N.isStaticMember()) ||
1378 N.getTag() == dwarf::DW_TAG_inheritance ||
1379 N.getTag() == dwarf::DW_TAG_friend ||
1380 N.getTag() == dwarf::DW_TAG_set_type ||
1381 N.getTag() == dwarf::DW_TAG_template_alias,
1382 "invalid tag", &N);
1383 if (N.getTag() == dwarf::DW_TAG_ptr_to_member_type) {
1384 CheckDI(isType(N.getRawExtraData()), "invalid pointer to member type", &N,
1385 N.getRawExtraData());
1386 } else if (N.getTag() == dwarf::DW_TAG_template_alias) {
1387 CheckDI(isMDTuple(N.getRawExtraData()), "invalid template parameters", &N,
1388 N.getRawExtraData());
1389 } else if (N.getTag() == dwarf::DW_TAG_inheritance ||
1390 N.getTag() == dwarf::DW_TAG_member ||
1391 N.getTag() == dwarf::DW_TAG_variable) {
1392 auto *ExtraData = N.getRawExtraData();
1393 auto IsValidExtraData = [&]() {
1394 if (ExtraData == nullptr)
1395 return true;
1396 if (isa<ConstantAsMetadata>(ExtraData) || isa<MDString>(ExtraData) ||
1397 isa<DIObjCProperty>(ExtraData))
1398 return true;
1399 if (auto *Tuple = dyn_cast<MDTuple>(ExtraData)) {
1400 if (Tuple->getNumOperands() != 1)
1401 return false;
1402 return isa_and_nonnull<ConstantAsMetadata>(Tuple->getOperand(0).get());
1403 }
1404 return false;
1405 };
1406 CheckDI(IsValidExtraData(),
1407 "extraData must be ConstantAsMetadata, MDString, DIObjCProperty, "
1408 "or MDTuple with single ConstantAsMetadata operand",
1409 &N, ExtraData);
1410 }
1411
1412 if (N.getTag() == dwarf::DW_TAG_set_type) {
1413 if (auto *T = N.getRawBaseType()) {
1417 CheckDI(
1418 (Enum && Enum->getTag() == dwarf::DW_TAG_enumeration_type) ||
1419 (Subrange && Subrange->getTag() == dwarf::DW_TAG_subrange_type) ||
1420 (Basic && (Basic->getEncoding() == dwarf::DW_ATE_unsigned ||
1421 Basic->getEncoding() == dwarf::DW_ATE_signed ||
1422 Basic->getEncoding() == dwarf::DW_ATE_unsigned_char ||
1423 Basic->getEncoding() == dwarf::DW_ATE_signed_char ||
1424 Basic->getEncoding() == dwarf::DW_ATE_boolean)),
1425 "invalid set base type", &N, T);
1426 }
1427 }
1428
1429 CheckDI(isType(N.getRawBaseType()), "invalid base type", &N,
1430 N.getRawBaseType());
1431
1432 if (N.getDWARFAddressSpace()) {
1433 CheckDI(N.getTag() == dwarf::DW_TAG_pointer_type ||
1434 N.getTag() == dwarf::DW_TAG_reference_type ||
1435 N.getTag() == dwarf::DW_TAG_rvalue_reference_type,
1436 "DWARF address space only applies to pointer or reference types",
1437 &N);
1438 }
1439
1440 auto *Size = N.getRawSizeInBits();
1443 "SizeInBits must be a constant or DIVariable or DIExpression");
1444}
1445
1446/// Detect mutually exclusive flags.
1447static bool hasConflictingReferenceFlags(unsigned Flags) {
1448 return ((Flags & DINode::FlagLValueReference) &&
1449 (Flags & DINode::FlagRValueReference)) ||
1450 ((Flags & DINode::FlagTypePassByValue) &&
1451 (Flags & DINode::FlagTypePassByReference));
1452}
1453
1454void Verifier::visitTemplateParams(const MDNode &N, const Metadata &RawParams) {
1455 auto *Params = dyn_cast<MDTuple>(&RawParams);
1456 CheckDI(Params, "invalid template params", &N, &RawParams);
1457 for (Metadata *Op : Params->operands()) {
1458 CheckDI(Op && isa<DITemplateParameter>(Op), "invalid template parameter",
1459 &N, Params, Op);
1460 }
1461}
1462
1463void Verifier::visitDICompositeType(const DICompositeType &N) {
1464 // Common type checks.
1465 visitDIType(N);
1466
1467 CheckDI(N.getTag() == dwarf::DW_TAG_array_type ||
1468 N.getTag() == dwarf::DW_TAG_structure_type ||
1469 N.getTag() == dwarf::DW_TAG_union_type ||
1470 N.getTag() == dwarf::DW_TAG_enumeration_type ||
1471 N.getTag() == dwarf::DW_TAG_class_type ||
1472 N.getTag() == dwarf::DW_TAG_variant_part ||
1473 N.getTag() == dwarf::DW_TAG_variant ||
1474 N.getTag() == dwarf::DW_TAG_namelist,
1475 "invalid tag", &N);
1476
1477 CheckDI(isType(N.getRawBaseType()), "invalid base type", &N,
1478 N.getRawBaseType());
1479
1480 CheckDI(!N.getRawElements() || isa<MDTuple>(N.getRawElements()),
1481 "invalid composite elements", &N, N.getRawElements());
1482 CheckDI(isType(N.getRawVTableHolder()), "invalid vtable holder", &N,
1483 N.getRawVTableHolder());
1485 "invalid reference flags", &N);
1486 unsigned DIBlockByRefStruct = 1 << 4;
1487 CheckDI((N.getFlags() & DIBlockByRefStruct) == 0,
1488 "DIBlockByRefStruct on DICompositeType is no longer supported", &N);
1489 CheckDI(llvm::all_of(N.getElements(), [](const DINode *N) { return N; }),
1490 "DISubprogram contains null entry in `elements` field", &N);
1491
1492 if (N.isVector()) {
1493 const DINodeArray Elements = N.getElements();
1494 CheckDI(Elements.size() == 1 &&
1495 Elements[0]->getTag() == dwarf::DW_TAG_subrange_type,
1496 "invalid vector, expected one element of type subrange", &N);
1497 }
1498
1499 if (auto *Params = N.getRawTemplateParams())
1500 visitTemplateParams(N, *Params);
1501
1502 if (auto *D = N.getRawDiscriminator()) {
1503 CheckDI(isa<DIDerivedType>(D) && N.getTag() == dwarf::DW_TAG_variant_part,
1504 "discriminator can only appear on variant part");
1505 }
1506
1507 if (N.getRawDataLocation()) {
1508 CheckDI(N.getTag() == dwarf::DW_TAG_array_type,
1509 "dataLocation can only appear in array type");
1510 }
1511
1512 if (N.getRawAssociated()) {
1513 CheckDI(N.getTag() == dwarf::DW_TAG_array_type,
1514 "associated can only appear in array type");
1515 }
1516
1517 if (N.getRawAllocated()) {
1518 CheckDI(N.getTag() == dwarf::DW_TAG_array_type,
1519 "allocated can only appear in array type");
1520 }
1521
1522 if (N.getRawRank()) {
1523 CheckDI(N.getTag() == dwarf::DW_TAG_array_type,
1524 "rank can only appear in array type");
1525 }
1526
1527 if (N.getTag() == dwarf::DW_TAG_array_type) {
1528 CheckDI(N.getRawBaseType(), "array types must have a base type", &N);
1529 }
1530
1531 auto *Size = N.getRawSizeInBits();
1534 "SizeInBits must be a constant or DIVariable or DIExpression");
1535}
1536
1537void Verifier::visitDISubroutineType(const DISubroutineType &N) {
1538 visitDIType(N);
1539 CheckDI(N.getTag() == dwarf::DW_TAG_subroutine_type, "invalid tag", &N);
1540 if (auto *Types = N.getRawTypeArray()) {
1541 CheckDI(isa<MDTuple>(Types), "invalid composite elements", &N, Types);
1542 for (Metadata *Ty : N.getTypeArray()->operands()) {
1543 CheckDI(isType(Ty), "invalid subroutine type ref", &N, Types, Ty);
1544 }
1545 }
1547 "invalid reference flags", &N);
1548}
1549
1550void Verifier::visitDIFile(const DIFile &N) {
1551 CheckDI(N.getTag() == dwarf::DW_TAG_file_type, "invalid tag", &N);
1552 std::optional<DIFile::ChecksumInfo<StringRef>> Checksum = N.getChecksum();
1553 if (Checksum) {
1554 CheckDI(Checksum->Kind <= DIFile::ChecksumKind::CSK_Last,
1555 "invalid checksum kind", &N);
1556 size_t Size;
1557 switch (Checksum->Kind) {
1558 case DIFile::CSK_MD5:
1559 Size = 32;
1560 break;
1561 case DIFile::CSK_SHA1:
1562 Size = 40;
1563 break;
1564 case DIFile::CSK_SHA256:
1565 Size = 64;
1566 break;
1567 }
1568 CheckDI(Checksum->Value.size() == Size, "invalid checksum length", &N);
1569 CheckDI(Checksum->Value.find_if_not(llvm::isHexDigit) == StringRef::npos,
1570 "invalid checksum", &N);
1571 }
1572}
1573
1574void Verifier::visitDICompileUnit(const DICompileUnit &N) {
1575 CheckDI(N.isDistinct(), "compile units must be distinct", &N);
1576 CheckDI(N.getTag() == dwarf::DW_TAG_compile_unit, "invalid tag", &N);
1577
1578 // Don't bother verifying the compilation directory or producer string
1579 // as those could be empty.
1580 CheckDI(N.getRawFile() && isa<DIFile>(N.getRawFile()), "invalid file", &N,
1581 N.getRawFile());
1582 CheckDI(!N.getFile()->getFilename().empty(), "invalid filename", &N,
1583 N.getFile());
1584
1585 CheckDI((N.getEmissionKind() <= DICompileUnit::LastEmissionKind),
1586 "invalid emission kind", &N);
1587
1588 if (auto *Array = N.getRawEnumTypes()) {
1589 CheckDI(isa<MDTuple>(Array), "invalid enum list", &N, Array);
1590 for (Metadata *Op : N.getEnumTypes()->operands()) {
1592 CheckDI(Enum && Enum->getTag() == dwarf::DW_TAG_enumeration_type,
1593 "invalid enum type", &N, N.getEnumTypes(), Op);
1594 CheckDI(!Enum->getScope() || !isa<DILocalScope>(Enum->getScope()),
1595 "function-local enum in a DICompileUnit's enum list", &N,
1596 N.getEnumTypes(), Op);
1597 }
1598 }
1599 if (auto *Array = N.getRawRetainedTypes()) {
1600 CheckDI(isa<MDTuple>(Array), "invalid retained type list", &N, Array);
1601 for (Metadata *Op : N.getRetainedTypes()->operands()) {
1602 CheckDI(
1603 Op && (isa<DIType>(Op) || (isa<DISubprogram>(Op) &&
1604 !cast<DISubprogram>(Op)->isDefinition())),
1605 "invalid retained type", &N, Op);
1606 }
1607 }
1608 if (auto *Array = N.getRawGlobalVariables()) {
1609 CheckDI(isa<MDTuple>(Array), "invalid global variable list", &N, Array);
1610 for (Metadata *Op : N.getGlobalVariables()->operands()) {
1612 "invalid global variable ref", &N, Op);
1613 }
1614 }
1615 if (auto *Array = N.getRawImportedEntities()) {
1616 CheckDI(isa<MDTuple>(Array), "invalid imported entity list", &N, Array);
1617 for (Metadata *Op : N.getImportedEntities()->operands()) {
1619 CheckDI(IE, "invalid imported entity ref", &N, Op);
1621 "function-local imports are not allowed in a DICompileUnit's "
1622 "imported entities list",
1623 &N, Op);
1624 }
1625 }
1626 if (auto *Array = N.getRawMacros()) {
1627 CheckDI(isa<MDTuple>(Array), "invalid macro list", &N, Array);
1628 for (Metadata *Op : N.getMacros()->operands()) {
1629 CheckDI(Op && isa<DIMacroNode>(Op), "invalid macro ref", &N, Op);
1630 }
1631 }
1632 CUVisited.insert(&N);
1633}
1634
1635void Verifier::visitDISubprogram(const DISubprogram &N) {
1636 CheckDI(N.getTag() == dwarf::DW_TAG_subprogram, "invalid tag", &N);
1637 CheckDI(isScope(N.getRawScope()), "invalid scope", &N, N.getRawScope());
1638 if (auto *F = N.getRawFile())
1639 CheckDI(isa<DIFile>(F), "invalid file", &N, F);
1640 else
1641 CheckDI(N.getLine() == 0, "line specified with no file", &N, N.getLine());
1642 if (auto *T = N.getRawType())
1643 CheckDI(isa<DISubroutineType>(T), "invalid subroutine type", &N, T);
1644 CheckDI(isType(N.getRawContainingType()), "invalid containing type", &N,
1645 N.getRawContainingType());
1646 if (auto *Params = N.getRawTemplateParams())
1647 visitTemplateParams(N, *Params);
1648 if (auto *S = N.getRawDeclaration())
1649 CheckDI(isa<DISubprogram>(S) && !cast<DISubprogram>(S)->isDefinition(),
1650 "invalid subprogram declaration", &N, S);
1651 if (auto *RawNode = N.getRawRetainedNodes()) {
1652 auto *Node = dyn_cast<MDTuple>(RawNode);
1653 CheckDI(Node, "invalid retained nodes list", &N, RawNode);
1654
1655 DenseMap<unsigned, DILocalVariable *> Args;
1656 for (Metadata *Op : Node->operands()) {
1657 CheckDI(Op, "nullptr in retained nodes", &N, Node);
1658
1659 auto True = [](const Metadata *) { return true; };
1660 auto False = [](const Metadata *) { return false; };
1661 bool IsTypeCorrect = DISubprogram::visitRetainedNode<bool>(
1662 Op, True, True, True, True, False);
1663 CheckDI(IsTypeCorrect,
1664 "invalid retained nodes, expected DILocalVariable, DILabel, "
1665 "DIImportedEntity or DIType",
1666 &N, Node, Op);
1667
1668 auto *RetainedNode = cast<DINode>(Op);
1669 auto *RetainedNodeScope = dyn_cast_or_null<DILocalScope>(
1671 CheckDI(RetainedNodeScope,
1672 "invalid retained nodes, retained node is not local", &N, Node,
1673 RetainedNode);
1674
1675 DISubprogram *RetainedNodeSP = RetainedNodeScope->getSubprogram();
1676 DICompileUnit *RetainedNodeUnit =
1677 RetainedNodeSP ? RetainedNodeSP->getUnit() : nullptr;
1678 CheckDI(
1679 RetainedNodeSP == &N,
1680 "invalid retained nodes, retained node does not belong to subprogram",
1681 &N, Node, RetainedNode, RetainedNodeScope, RetainedNodeSP,
1682 RetainedNodeUnit);
1683
1684 auto *DV = dyn_cast<DILocalVariable>(RetainedNode);
1685 if (!DV)
1686 continue;
1687 if (unsigned ArgNum = DV->getArg()) {
1688 auto [ArgI, Inserted] = Args.insert({ArgNum, DV});
1689 CheckDI(Inserted || DV == ArgI->second,
1690 "invalid retained nodes, more than one local variable with the "
1691 "same argument index",
1692 &N, N.getUnit(), Node, RetainedNode, Args[ArgNum]);
1693 }
1694 }
1695 }
1697 "invalid reference flags", &N);
1698
1699 auto *Unit = N.getRawUnit();
1700 if (N.isDefinition()) {
1701 // Subprogram definitions (not part of the type hierarchy).
1702 CheckDI(N.isDistinct(), "subprogram definitions must be distinct", &N);
1703 CheckDI(Unit, "subprogram definitions must have a compile unit", &N);
1704 CheckDI(isa<DICompileUnit>(Unit), "invalid unit type", &N, Unit);
1705 // There's no good way to cross the CU boundary to insert a nested
1706 // DISubprogram definition in one CU into a type defined in another CU.
1707 auto *CT = dyn_cast_or_null<DICompositeType>(N.getRawScope());
1708 if (CT && CT->getRawIdentifier() &&
1709 M.getContext().isODRUniquingDebugTypes())
1710 CheckDI(N.getDeclaration(),
1711 "definition subprograms cannot be nested within DICompositeType "
1712 "when enabling ODR",
1713 &N);
1714 } else {
1715 // Subprogram declarations (part of the type hierarchy).
1716 CheckDI(!Unit, "subprogram declarations must not have a compile unit", &N);
1717 CheckDI(!N.getRawDeclaration(),
1718 "subprogram declaration must not have a declaration field");
1719 }
1720
1721 if (auto *RawThrownTypes = N.getRawThrownTypes()) {
1722 auto *ThrownTypes = dyn_cast<MDTuple>(RawThrownTypes);
1723 CheckDI(ThrownTypes, "invalid thrown types list", &N, RawThrownTypes);
1724 for (Metadata *Op : ThrownTypes->operands())
1725 CheckDI(Op && isa<DIType>(Op), "invalid thrown type", &N, ThrownTypes,
1726 Op);
1727 }
1728
1729 if (N.areAllCallsDescribed())
1730 CheckDI(N.isDefinition(),
1731 "DIFlagAllCallsDescribed must be attached to a definition");
1732}
1733
1734void Verifier::visitDILexicalBlockBase(const DILexicalBlockBase &N) {
1735 CheckDI(N.getTag() == dwarf::DW_TAG_lexical_block, "invalid tag", &N);
1736 CheckDI(N.getRawScope() && isa<DILocalScope>(N.getRawScope()),
1737 "invalid local scope", &N, N.getRawScope());
1738 if (auto *SP = dyn_cast<DISubprogram>(N.getRawScope()))
1739 CheckDI(SP->isDefinition(), "scope points into the type hierarchy", &N);
1740}
1741
1742void Verifier::visitDILexicalBlock(const DILexicalBlock &N) {
1743 visitDILexicalBlockBase(N);
1744
1745 CheckDI(N.getLine() || !N.getColumn(),
1746 "cannot have column info without line info", &N);
1747}
1748
1749void Verifier::visitDILexicalBlockFile(const DILexicalBlockFile &N) {
1750 visitDILexicalBlockBase(N);
1751}
1752
1753void Verifier::visitDICommonBlock(const DICommonBlock &N) {
1754 CheckDI(N.getTag() == dwarf::DW_TAG_common_block, "invalid tag", &N);
1755 if (auto *S = N.getRawScope())
1756 CheckDI(isa<DIScope>(S), "invalid scope ref", &N, S);
1757 if (auto *S = N.getRawDecl())
1758 CheckDI(isa<DIGlobalVariable>(S), "invalid declaration", &N, S);
1759}
1760
1761void Verifier::visitDINamespace(const DINamespace &N) {
1762 CheckDI(N.getTag() == dwarf::DW_TAG_namespace, "invalid tag", &N);
1763 if (auto *S = N.getRawScope())
1764 CheckDI(isa<DIScope>(S), "invalid scope ref", &N, S);
1765}
1766
1767void Verifier::visitDIMacro(const DIMacro &N) {
1768 CheckDI(N.getMacinfoType() == dwarf::DW_MACINFO_define ||
1769 N.getMacinfoType() == dwarf::DW_MACINFO_undef,
1770 "invalid macinfo type", &N);
1771 CheckDI(!N.getName().empty(), "anonymous macro", &N);
1772 if (!N.getValue().empty()) {
1773 assert(N.getValue().data()[0] != ' ' && "Macro value has a space prefix");
1774 }
1775}
1776
1777void Verifier::visitDIMacroFile(const DIMacroFile &N) {
1778 CheckDI(N.getMacinfoType() == dwarf::DW_MACINFO_start_file,
1779 "invalid macinfo type", &N);
1780 if (auto *F = N.getRawFile())
1781 CheckDI(isa<DIFile>(F), "invalid file", &N, F);
1782
1783 if (auto *Array = N.getRawElements()) {
1784 CheckDI(isa<MDTuple>(Array), "invalid macro list", &N, Array);
1785 for (Metadata *Op : N.getElements()->operands()) {
1786 CheckDI(Op && isa<DIMacroNode>(Op), "invalid macro ref", &N, Op);
1787 }
1788 }
1789}
1790
1791void Verifier::visitDIModule(const DIModule &N) {
1792 CheckDI(N.getTag() == dwarf::DW_TAG_module, "invalid tag", &N);
1793 CheckDI(!N.getName().empty(), "anonymous module", &N);
1794}
1795
1796void Verifier::visitDITemplateParameter(const DITemplateParameter &N) {
1797 CheckDI(isType(N.getRawType()), "invalid type ref", &N, N.getRawType());
1798}
1799
1800void Verifier::visitDITemplateTypeParameter(const DITemplateTypeParameter &N) {
1801 visitDITemplateParameter(N);
1802
1803 CheckDI(N.getTag() == dwarf::DW_TAG_template_type_parameter, "invalid tag",
1804 &N);
1805}
1806
1807void Verifier::visitDITemplateValueParameter(
1808 const DITemplateValueParameter &N) {
1809 visitDITemplateParameter(N);
1810
1811 CheckDI(N.getTag() == dwarf::DW_TAG_template_value_parameter ||
1812 N.getTag() == dwarf::DW_TAG_GNU_template_template_param ||
1813 N.getTag() == dwarf::DW_TAG_GNU_template_parameter_pack,
1814 "invalid tag", &N);
1815}
1816
1817void Verifier::visitDIVariable(const DIVariable &N) {
1818 if (auto *S = N.getRawScope())
1819 CheckDI(isa<DIScope>(S), "invalid scope", &N, S);
1820 if (auto *F = N.getRawFile())
1821 CheckDI(isa<DIFile>(F), "invalid file", &N, F);
1822}
1823
1824void Verifier::visitDIGlobalVariable(const DIGlobalVariable &N) {
1825 // Checks common to all variables.
1826 visitDIVariable(N);
1827
1828 CheckDI(N.getTag() == dwarf::DW_TAG_variable, "invalid tag", &N);
1829 CheckDI(isType(N.getRawType()), "invalid type ref", &N, N.getRawType());
1830 // Check only if the global variable is not an extern
1831 if (N.isDefinition())
1832 CheckDI(N.getType(), "missing global variable type", &N);
1833 if (auto *Member = N.getRawStaticDataMemberDeclaration()) {
1835 "invalid static data member declaration", &N, Member);
1836 }
1837}
1838
1839void Verifier::visitDILocalVariable(const DILocalVariable &N) {
1840 // Checks common to all variables.
1841 visitDIVariable(N);
1842
1843 CheckDI(isType(N.getRawType()), "invalid type ref", &N, N.getRawType());
1844 CheckDI(N.getTag() == dwarf::DW_TAG_variable, "invalid tag", &N);
1845 CheckDI(N.getRawScope() && isa<DILocalScope>(N.getRawScope()),
1846 "local variable requires a valid scope", &N, N.getRawScope());
1847 if (auto Ty = N.getType())
1848 CheckDI(!isa<DISubroutineType>(Ty), "invalid type", &N, N.getType());
1849}
1850
1851void Verifier::visitDIAssignID(const DIAssignID &N) {
1852 CheckDI(!N.getNumOperands(), "DIAssignID has no arguments", &N);
1853 CheckDI(N.isDistinct(), "DIAssignID must be distinct", &N);
1854}
1855
1856void Verifier::visitDILabel(const DILabel &N) {
1857 if (auto *S = N.getRawScope())
1858 CheckDI(isa<DIScope>(S), "invalid scope", &N, S);
1859 if (auto *F = N.getRawFile())
1860 CheckDI(isa<DIFile>(F), "invalid file", &N, F);
1861
1862 CheckDI(N.getTag() == dwarf::DW_TAG_label, "invalid tag", &N);
1863 CheckDI(N.getRawScope() && isa<DILocalScope>(N.getRawScope()),
1864 "label requires a valid scope", &N, N.getRawScope());
1865}
1866
1867void Verifier::visitDIExpression(const DIExpression &N) {
1868 CheckDI(N.isValid(), "invalid expression", &N);
1869}
1870
1871void Verifier::visitDIGlobalVariableExpression(
1872 const DIGlobalVariableExpression &GVE) {
1873 CheckDI(GVE.getVariable(), "missing variable");
1874 if (auto *Var = GVE.getVariable())
1875 visitDIGlobalVariable(*Var);
1876 if (auto *Expr = GVE.getExpression()) {
1877 visitDIExpression(*Expr);
1878 if (auto Fragment = Expr->getFragmentInfo())
1879 verifyFragmentExpression(*GVE.getVariable(), *Fragment, &GVE);
1880 }
1881}
1882
1883void Verifier::visitDIObjCProperty(const DIObjCProperty &N) {
1884 CheckDI(N.getTag() == dwarf::DW_TAG_APPLE_property, "invalid tag", &N);
1885 if (auto *T = N.getRawType())
1886 CheckDI(isType(T), "invalid type ref", &N, T);
1887 if (auto *F = N.getRawFile())
1888 CheckDI(isa<DIFile>(F), "invalid file", &N, F);
1889}
1890
1891void Verifier::visitDIImportedEntity(const DIImportedEntity &N) {
1892 CheckDI(N.getTag() == dwarf::DW_TAG_imported_module ||
1893 N.getTag() == dwarf::DW_TAG_imported_declaration,
1894 "invalid tag", &N);
1895 if (auto *S = N.getRawScope())
1896 CheckDI(isa<DIScope>(S), "invalid scope for imported entity", &N, S);
1897 CheckDI(isDINode(N.getRawEntity()), "invalid imported entity", &N,
1898 N.getRawEntity());
1899}
1900
1901void Verifier::visitComdat(const Comdat &C) {
1902 // In COFF the Module is invalid if the GlobalValue has private linkage.
1903 // Entities with private linkage don't have entries in the symbol table.
1904 if (TT.isOSBinFormatCOFF())
1905 if (const GlobalValue *GV = M.getNamedValue(C.getName()))
1906 Check(!GV->hasPrivateLinkage(), "comdat global value has private linkage",
1907 GV);
1908}
1909
1910void Verifier::visitModuleIdents() {
1911 const NamedMDNode *Idents = M.getNamedMetadata("llvm.ident");
1912 if (!Idents)
1913 return;
1914
1915 // llvm.ident takes a list of metadata entry. Each entry has only one string.
1916 // Scan each llvm.ident entry and make sure that this requirement is met.
1917 for (const MDNode *N : Idents->operands()) {
1918 Check(N->getNumOperands() == 1,
1919 "incorrect number of operands in llvm.ident metadata", N);
1920 Check(dyn_cast_or_null<MDString>(N->getOperand(0)),
1921 ("invalid value for llvm.ident metadata entry operand"
1922 "(the operand should be a string)"),
1923 N->getOperand(0));
1924 }
1925}
1926
1927void Verifier::visitModuleCommandLines() {
1928 const NamedMDNode *CommandLines = M.getNamedMetadata("llvm.commandline");
1929 if (!CommandLines)
1930 return;
1931
1932 // llvm.commandline takes a list of metadata entry. Each entry has only one
1933 // string. Scan each llvm.commandline entry and make sure that this
1934 // requirement is met.
1935 for (const MDNode *N : CommandLines->operands()) {
1936 Check(N->getNumOperands() == 1,
1937 "incorrect number of operands in llvm.commandline metadata", N);
1938 Check(dyn_cast_or_null<MDString>(N->getOperand(0)),
1939 ("invalid value for llvm.commandline metadata entry operand"
1940 "(the operand should be a string)"),
1941 N->getOperand(0));
1942 }
1943}
1944
1945void Verifier::visitModuleErrnoTBAA() {
1946 const NamedMDNode *ErrnoTBAA = M.getNamedMetadata("llvm.errno.tbaa");
1947 if (!ErrnoTBAA)
1948 return;
1949
1950 Check(ErrnoTBAA->getNumOperands() >= 1,
1951 "llvm.errno.tbaa must have at least one operand", ErrnoTBAA);
1952
1953 for (const MDNode *N : ErrnoTBAA->operands())
1954 TBAAVerifyHelper.visitTBAAMetadata(nullptr, N);
1955}
1956
1957void Verifier::visitModuleFlags() {
1958 const NamedMDNode *Flags = M.getModuleFlagsMetadata();
1959 if (!Flags) return;
1960
1961 // Scan each flag, and track the flags and requirements.
1962 DenseMap<const MDString*, const MDNode*> SeenIDs;
1963 SmallVector<const MDNode*, 16> Requirements;
1964 uint64_t PAuthABIPlatform = -1;
1965 uint64_t PAuthABIVersion = -1;
1966 for (const MDNode *MDN : Flags->operands()) {
1967 visitModuleFlag(MDN, SeenIDs, Requirements);
1968 if (MDN->getNumOperands() != 3)
1969 continue;
1970 if (const auto *FlagName = dyn_cast_or_null<MDString>(MDN->getOperand(1))) {
1971 if (FlagName->getString() == "aarch64-elf-pauthabi-platform") {
1972 if (const auto *PAP =
1974 PAuthABIPlatform = PAP->getZExtValue();
1975 } else if (FlagName->getString() == "aarch64-elf-pauthabi-version") {
1976 if (const auto *PAV =
1978 PAuthABIVersion = PAV->getZExtValue();
1979 }
1980 }
1981 }
1982
1983 if ((PAuthABIPlatform == uint64_t(-1)) != (PAuthABIVersion == uint64_t(-1)))
1984 CheckFailed("either both or no 'aarch64-elf-pauthabi-platform' and "
1985 "'aarch64-elf-pauthabi-version' module flags must be present");
1986
1987 // Validate that the requirements in the module are valid.
1988 for (const MDNode *Requirement : Requirements) {
1989 const MDString *Flag = cast<MDString>(Requirement->getOperand(0));
1990 const Metadata *ReqValue = Requirement->getOperand(1);
1991
1992 const MDNode *Op = SeenIDs.lookup(Flag);
1993 if (!Op) {
1994 CheckFailed("invalid requirement on flag, flag is not present in module",
1995 Flag);
1996 continue;
1997 }
1998
1999 if (Op->getOperand(2) != ReqValue) {
2000 CheckFailed(("invalid requirement on flag, "
2001 "flag does not have the required value"),
2002 Flag);
2003 continue;
2004 }
2005 }
2006}
2007
2008void
2009Verifier::visitModuleFlag(const MDNode *Op,
2010 DenseMap<const MDString *, const MDNode *> &SeenIDs,
2011 SmallVectorImpl<const MDNode *> &Requirements) {
2012 // Each module flag should have three arguments, the merge behavior (a
2013 // constant int), the flag ID (an MDString), and the value.
2014 Check(Op->getNumOperands() == 3,
2015 "incorrect number of operands in module flag", Op);
2016 Module::ModFlagBehavior MFB;
2017 if (!Module::isValidModFlagBehavior(Op->getOperand(0), MFB)) {
2019 "invalid behavior operand in module flag (expected constant integer)",
2020 Op->getOperand(0));
2021 Check(false,
2022 "invalid behavior operand in module flag (unexpected constant)",
2023 Op->getOperand(0));
2024 }
2025 MDString *ID = dyn_cast_or_null<MDString>(Op->getOperand(1));
2026 Check(ID, "invalid ID operand in module flag (expected metadata string)",
2027 Op->getOperand(1));
2028
2029 // Check the values for behaviors with additional requirements.
2030 switch (MFB) {
2031 case Module::Error:
2032 case Module::Warning:
2033 case Module::Override:
2034 // These behavior types accept any value.
2035 break;
2036
2037 case Module::Min: {
2038 auto *V = mdconst::dyn_extract_or_null<ConstantInt>(Op->getOperand(2));
2039 Check(V && V->getValue().isNonNegative(),
2040 "invalid value for 'min' module flag (expected constant non-negative "
2041 "integer)",
2042 Op->getOperand(2));
2043 break;
2044 }
2045
2046 case Module::Max: {
2048 "invalid value for 'max' module flag (expected constant integer)",
2049 Op->getOperand(2));
2050 break;
2051 }
2052
2053 case Module::Require: {
2054 // The value should itself be an MDNode with two operands, a flag ID (an
2055 // MDString), and a value.
2056 MDNode *Value = dyn_cast<MDNode>(Op->getOperand(2));
2057 Check(Value && Value->getNumOperands() == 2,
2058 "invalid value for 'require' module flag (expected metadata pair)",
2059 Op->getOperand(2));
2060 Check(isa<MDString>(Value->getOperand(0)),
2061 ("invalid value for 'require' module flag "
2062 "(first value operand should be a string)"),
2063 Value->getOperand(0));
2064
2065 // Append it to the list of requirements, to check once all module flags are
2066 // scanned.
2067 Requirements.push_back(Value);
2068 break;
2069 }
2070
2071 case Module::Append:
2072 case Module::AppendUnique: {
2073 // These behavior types require the operand be an MDNode.
2074 Check(isa<MDNode>(Op->getOperand(2)),
2075 "invalid value for 'append'-type module flag "
2076 "(expected a metadata node)",
2077 Op->getOperand(2));
2078 break;
2079 }
2080 }
2081
2082 // Unless this is a "requires" flag, check the ID is unique.
2083 if (MFB != Module::Require) {
2084 bool Inserted = SeenIDs.insert(std::make_pair(ID, Op)).second;
2085 Check(Inserted,
2086 "module flag identifiers must be unique (or of 'require' type)", ID);
2087 }
2088
2089 if (ID->getString() == "wchar_size") {
2090 ConstantInt *Value
2092 Check(Value, "wchar_size metadata requires constant integer argument");
2093 }
2094
2095 if (ID->getString() == "Linker Options") {
2096 // If the llvm.linker.options named metadata exists, we assume that the
2097 // bitcode reader has upgraded the module flag. Otherwise the flag might
2098 // have been created by a client directly.
2099 Check(M.getNamedMetadata("llvm.linker.options"),
2100 "'Linker Options' named metadata no longer supported");
2101 }
2102
2103 if (ID->getString() == "SemanticInterposition") {
2104 ConstantInt *Value =
2106 Check(Value,
2107 "SemanticInterposition metadata requires constant integer argument");
2108 }
2109
2110 if (ID->getString() == "CG Profile") {
2111 for (const MDOperand &MDO : cast<MDNode>(Op->getOperand(2))->operands())
2112 visitModuleFlagCGProfileEntry(MDO);
2113 }
2114}
2115
2116void Verifier::visitModuleFlagCGProfileEntry(const MDOperand &MDO) {
2117 auto CheckFunction = [&](const MDOperand &FuncMDO) {
2118 if (!FuncMDO)
2119 return;
2120 auto F = dyn_cast<ValueAsMetadata>(FuncMDO);
2121 Check(F && isa<Function>(F->getValue()->stripPointerCasts()),
2122 "expected a Function or null", FuncMDO);
2123 };
2124 auto Node = dyn_cast_or_null<MDNode>(MDO);
2125 Check(Node && Node->getNumOperands() == 3, "expected a MDNode triple", MDO);
2126 CheckFunction(Node->getOperand(0));
2127 CheckFunction(Node->getOperand(1));
2128 auto Count = dyn_cast_or_null<ConstantAsMetadata>(Node->getOperand(2));
2129 Check(Count && Count->getType()->isIntegerTy(),
2130 "expected an integer constant", Node->getOperand(2));
2131}
2132
2133void Verifier::verifyAttributeTypes(AttributeSet Attrs, const Value *V) {
2134 for (Attribute A : Attrs) {
2135
2136 if (A.isStringAttribute()) {
2137#define GET_ATTR_NAMES
2138#define ATTRIBUTE_ENUM(ENUM_NAME, DISPLAY_NAME)
2139#define ATTRIBUTE_STRBOOL(ENUM_NAME, DISPLAY_NAME) \
2140 if (A.getKindAsString() == #DISPLAY_NAME) { \
2141 auto V = A.getValueAsString(); \
2142 if (!(V.empty() || V == "true" || V == "false")) \
2143 CheckFailed("invalid value for '" #DISPLAY_NAME "' attribute: " + V + \
2144 ""); \
2145 }
2146
2147#include "llvm/IR/Attributes.inc"
2148 continue;
2149 }
2150
2151 if (A.isIntAttribute() != Attribute::isIntAttrKind(A.getKindAsEnum())) {
2152 CheckFailed("Attribute '" + A.getAsString() + "' should have an Argument",
2153 V);
2154 return;
2155 }
2156 }
2157}
2158
2159// VerifyParameterAttrs - Check the given attributes for an argument or return
2160// value of the specified type. The value V is printed in error messages.
2161void Verifier::verifyParameterAttrs(AttributeSet Attrs, Type *Ty,
2162 const Value *V) {
2163 if (!Attrs.hasAttributes())
2164 return;
2165
2166 verifyAttributeTypes(Attrs, V);
2167
2168 for (Attribute Attr : Attrs)
2169 Check(Attr.isStringAttribute() ||
2170 Attribute::canUseAsParamAttr(Attr.getKindAsEnum()),
2171 "Attribute '" + Attr.getAsString() + "' does not apply to parameters",
2172 V);
2173
2174 if (Attrs.hasAttribute(Attribute::ImmArg)) {
2175 unsigned AttrCount =
2176 Attrs.getNumAttributes() - Attrs.hasAttribute(Attribute::Range);
2177 Check(AttrCount == 1,
2178 "Attribute 'immarg' is incompatible with other attributes except the "
2179 "'range' attribute",
2180 V);
2181 }
2182
2183 // Check for mutually incompatible attributes. Only inreg is compatible with
2184 // sret.
2185 unsigned AttrCount = 0;
2186 AttrCount += Attrs.hasAttribute(Attribute::ByVal);
2187 AttrCount += Attrs.hasAttribute(Attribute::InAlloca);
2188 AttrCount += Attrs.hasAttribute(Attribute::Preallocated);
2189 AttrCount += Attrs.hasAttribute(Attribute::StructRet) ||
2190 Attrs.hasAttribute(Attribute::InReg);
2191 AttrCount += Attrs.hasAttribute(Attribute::Nest);
2192 AttrCount += Attrs.hasAttribute(Attribute::ByRef);
2193 Check(AttrCount <= 1,
2194 "Attributes 'byval', 'inalloca', 'preallocated', 'inreg', 'nest', "
2195 "'byref', and 'sret' are incompatible!",
2196 V);
2197
2198 Check(!(Attrs.hasAttribute(Attribute::InAlloca) &&
2199 Attrs.hasAttribute(Attribute::ReadOnly)),
2200 "Attributes "
2201 "'inalloca and readonly' are incompatible!",
2202 V);
2203
2204 Check(!(Attrs.hasAttribute(Attribute::StructRet) &&
2205 Attrs.hasAttribute(Attribute::Returned)),
2206 "Attributes "
2207 "'sret and returned' are incompatible!",
2208 V);
2209
2210 Check(!(Attrs.hasAttribute(Attribute::ZExt) &&
2211 Attrs.hasAttribute(Attribute::SExt)),
2212 "Attributes "
2213 "'zeroext and signext' are incompatible!",
2214 V);
2215
2216 Check(!(Attrs.hasAttribute(Attribute::ReadNone) &&
2217 Attrs.hasAttribute(Attribute::ReadOnly)),
2218 "Attributes "
2219 "'readnone and readonly' are incompatible!",
2220 V);
2221
2222 Check(!(Attrs.hasAttribute(Attribute::ReadNone) &&
2223 Attrs.hasAttribute(Attribute::WriteOnly)),
2224 "Attributes "
2225 "'readnone and writeonly' are incompatible!",
2226 V);
2227
2228 Check(!(Attrs.hasAttribute(Attribute::ReadOnly) &&
2229 Attrs.hasAttribute(Attribute::WriteOnly)),
2230 "Attributes "
2231 "'readonly and writeonly' are incompatible!",
2232 V);
2233
2234 Check(!(Attrs.hasAttribute(Attribute::NoInline) &&
2235 Attrs.hasAttribute(Attribute::AlwaysInline)),
2236 "Attributes "
2237 "'noinline and alwaysinline' are incompatible!",
2238 V);
2239
2240 Check(!(Attrs.hasAttribute(Attribute::Writable) &&
2241 Attrs.hasAttribute(Attribute::ReadNone)),
2242 "Attributes writable and readnone are incompatible!", V);
2243
2244 Check(!(Attrs.hasAttribute(Attribute::Writable) &&
2245 Attrs.hasAttribute(Attribute::ReadOnly)),
2246 "Attributes writable and readonly are incompatible!", V);
2247
2248 AttributeMask IncompatibleAttrs = AttributeFuncs::typeIncompatible(Ty, Attrs);
2249 for (Attribute Attr : Attrs) {
2250 if (!Attr.isStringAttribute() &&
2251 IncompatibleAttrs.contains(Attr.getKindAsEnum())) {
2252 CheckFailed("Attribute '" + Attr.getAsString() +
2253 "' applied to incompatible type!", V);
2254 return;
2255 }
2256 }
2257
2258 if (isa<PointerType>(Ty)) {
2259 if (Attrs.hasAttribute(Attribute::Alignment)) {
2260 Align AttrAlign = Attrs.getAlignment().valueOrOne();
2261 Check(AttrAlign.value() <= Value::MaximumAlignment,
2262 "huge alignment values are unsupported", V);
2263 }
2264 if (Attrs.hasAttribute(Attribute::ByVal)) {
2265 Type *ByValTy = Attrs.getByValType();
2266 SmallPtrSet<Type *, 4> Visited;
2267 Check(ByValTy->isSized(&Visited),
2268 "Attribute 'byval' does not support unsized types!", V);
2269 // Check if it is or contains a target extension type that disallows being
2270 // used on the stack.
2272 "'byval' argument has illegal target extension type", V);
2273 Check(DL.getTypeAllocSize(ByValTy).getKnownMinValue() < (1ULL << 32),
2274 "huge 'byval' arguments are unsupported", V);
2275 }
2276 if (Attrs.hasAttribute(Attribute::ByRef)) {
2277 SmallPtrSet<Type *, 4> Visited;
2278 Check(Attrs.getByRefType()->isSized(&Visited),
2279 "Attribute 'byref' does not support unsized types!", V);
2280 Check(DL.getTypeAllocSize(Attrs.getByRefType()).getKnownMinValue() <
2281 (1ULL << 32),
2282 "huge 'byref' arguments are unsupported", V);
2283 }
2284 if (Attrs.hasAttribute(Attribute::InAlloca)) {
2285 SmallPtrSet<Type *, 4> Visited;
2286 Check(Attrs.getInAllocaType()->isSized(&Visited),
2287 "Attribute 'inalloca' does not support unsized types!", V);
2288 Check(DL.getTypeAllocSize(Attrs.getInAllocaType()).getKnownMinValue() <
2289 (1ULL << 32),
2290 "huge 'inalloca' arguments are unsupported", V);
2291 }
2292 if (Attrs.hasAttribute(Attribute::Preallocated)) {
2293 SmallPtrSet<Type *, 4> Visited;
2294 Check(Attrs.getPreallocatedType()->isSized(&Visited),
2295 "Attribute 'preallocated' does not support unsized types!", V);
2296 Check(
2297 DL.getTypeAllocSize(Attrs.getPreallocatedType()).getKnownMinValue() <
2298 (1ULL << 32),
2299 "huge 'preallocated' arguments are unsupported", V);
2300 }
2301 }
2302
2303 if (Attrs.hasAttribute(Attribute::Initializes)) {
2304 auto Inits = Attrs.getAttribute(Attribute::Initializes).getInitializes();
2305 Check(!Inits.empty(), "Attribute 'initializes' does not support empty list",
2306 V);
2308 "Attribute 'initializes' does not support unordered ranges", V);
2309 }
2310
2311 if (Attrs.hasAttribute(Attribute::NoFPClass)) {
2312 uint64_t Val = Attrs.getAttribute(Attribute::NoFPClass).getValueAsInt();
2313 Check(Val != 0, "Attribute 'nofpclass' must have at least one test bit set",
2314 V);
2315 Check((Val & ~static_cast<unsigned>(fcAllFlags)) == 0,
2316 "Invalid value for 'nofpclass' test mask", V);
2317 }
2318 if (Attrs.hasAttribute(Attribute::Range)) {
2319 const ConstantRange &CR =
2320 Attrs.getAttribute(Attribute::Range).getValueAsConstantRange();
2322 "Range bit width must match type bit width!", V);
2323 }
2324}
2325
2326void Verifier::checkUnsignedBaseTenFuncAttr(AttributeList Attrs, StringRef Attr,
2327 const Value *V) {
2328 if (Attrs.hasFnAttr(Attr)) {
2329 StringRef S = Attrs.getFnAttr(Attr).getValueAsString();
2330 unsigned N;
2331 if (S.getAsInteger(10, N))
2332 CheckFailed("\"" + Attr + "\" takes an unsigned integer: " + S, V);
2333 }
2334}
2335
2336// Check parameter attributes against a function type.
2337// The value V is printed in error messages.
2338void Verifier::verifyFunctionAttrs(FunctionType *FT, AttributeList Attrs,
2339 const Value *V, bool IsIntrinsic,
2340 bool IsInlineAsm) {
2341 if (Attrs.isEmpty())
2342 return;
2343
2344 if (AttributeListsVisited.insert(Attrs.getRawPointer()).second) {
2345 Check(Attrs.hasParentContext(Context),
2346 "Attribute list does not match Module context!", &Attrs, V);
2347 for (const auto &AttrSet : Attrs) {
2348 Check(!AttrSet.hasAttributes() || AttrSet.hasParentContext(Context),
2349 "Attribute set does not match Module context!", &AttrSet, V);
2350 for (const auto &A : AttrSet) {
2351 Check(A.hasParentContext(Context),
2352 "Attribute does not match Module context!", &A, V);
2353 }
2354 }
2355 }
2356
2357 bool SawNest = false;
2358 bool SawReturned = false;
2359 bool SawSRet = false;
2360 bool SawSwiftSelf = false;
2361 bool SawSwiftAsync = false;
2362 bool SawSwiftError = false;
2363
2364 // Verify return value attributes.
2365 AttributeSet RetAttrs = Attrs.getRetAttrs();
2366 for (Attribute RetAttr : RetAttrs)
2367 Check(RetAttr.isStringAttribute() ||
2368 Attribute::canUseAsRetAttr(RetAttr.getKindAsEnum()),
2369 "Attribute '" + RetAttr.getAsString() +
2370 "' does not apply to function return values",
2371 V);
2372
2373 unsigned MaxParameterWidth = 0;
2374 auto GetMaxParameterWidth = [&MaxParameterWidth](Type *Ty) {
2375 if (Ty->isVectorTy()) {
2376 if (auto *VT = dyn_cast<FixedVectorType>(Ty)) {
2377 unsigned Size = VT->getPrimitiveSizeInBits().getFixedValue();
2378 if (Size > MaxParameterWidth)
2379 MaxParameterWidth = Size;
2380 }
2381 }
2382 };
2383 GetMaxParameterWidth(FT->getReturnType());
2384 verifyParameterAttrs(RetAttrs, FT->getReturnType(), V);
2385
2386 // Verify parameter attributes.
2387 for (unsigned i = 0, e = FT->getNumParams(); i != e; ++i) {
2388 Type *Ty = FT->getParamType(i);
2389 AttributeSet ArgAttrs = Attrs.getParamAttrs(i);
2390
2391 if (!IsIntrinsic) {
2392 Check(!ArgAttrs.hasAttribute(Attribute::ImmArg),
2393 "immarg attribute only applies to intrinsics", V);
2394 if (!IsInlineAsm)
2395 Check(!ArgAttrs.hasAttribute(Attribute::ElementType),
2396 "Attribute 'elementtype' can only be applied to intrinsics"
2397 " and inline asm.",
2398 V);
2399 }
2400
2401 verifyParameterAttrs(ArgAttrs, Ty, V);
2402 GetMaxParameterWidth(Ty);
2403
2404 if (ArgAttrs.hasAttribute(Attribute::Nest)) {
2405 Check(!SawNest, "More than one parameter has attribute nest!", V);
2406 SawNest = true;
2407 }
2408
2409 if (ArgAttrs.hasAttribute(Attribute::Returned)) {
2410 Check(!SawReturned, "More than one parameter has attribute returned!", V);
2411 Check(Ty->canLosslesslyBitCastTo(FT->getReturnType()),
2412 "Incompatible argument and return types for 'returned' attribute",
2413 V);
2414 SawReturned = true;
2415 }
2416
2417 if (ArgAttrs.hasAttribute(Attribute::StructRet)) {
2418 Check(!SawSRet, "Cannot have multiple 'sret' parameters!", V);
2419 Check(i == 0 || i == 1,
2420 "Attribute 'sret' is not on first or second parameter!", V);
2421 SawSRet = true;
2422 }
2423
2424 if (ArgAttrs.hasAttribute(Attribute::SwiftSelf)) {
2425 Check(!SawSwiftSelf, "Cannot have multiple 'swiftself' parameters!", V);
2426 SawSwiftSelf = true;
2427 }
2428
2429 if (ArgAttrs.hasAttribute(Attribute::SwiftAsync)) {
2430 Check(!SawSwiftAsync, "Cannot have multiple 'swiftasync' parameters!", V);
2431 SawSwiftAsync = true;
2432 }
2433
2434 if (ArgAttrs.hasAttribute(Attribute::SwiftError)) {
2435 Check(!SawSwiftError, "Cannot have multiple 'swifterror' parameters!", V);
2436 SawSwiftError = true;
2437 }
2438
2439 if (ArgAttrs.hasAttribute(Attribute::InAlloca)) {
2440 Check(i == FT->getNumParams() - 1,
2441 "inalloca isn't on the last parameter!", V);
2442 }
2443 }
2444
2445 if (!Attrs.hasFnAttrs())
2446 return;
2447
2448 verifyAttributeTypes(Attrs.getFnAttrs(), V);
2449 for (Attribute FnAttr : Attrs.getFnAttrs())
2450 Check(FnAttr.isStringAttribute() ||
2451 Attribute::canUseAsFnAttr(FnAttr.getKindAsEnum()),
2452 "Attribute '" + FnAttr.getAsString() +
2453 "' does not apply to functions!",
2454 V);
2455
2456 Check(!(Attrs.hasFnAttr(Attribute::NoInline) &&
2457 Attrs.hasFnAttr(Attribute::AlwaysInline)),
2458 "Attributes 'noinline and alwaysinline' are incompatible!", V);
2459
2460 if (Attrs.hasFnAttr(Attribute::OptimizeNone)) {
2461 Check(Attrs.hasFnAttr(Attribute::NoInline),
2462 "Attribute 'optnone' requires 'noinline'!", V);
2463
2464 Check(!Attrs.hasFnAttr(Attribute::OptimizeForSize),
2465 "Attributes 'optsize and optnone' are incompatible!", V);
2466
2467 Check(!Attrs.hasFnAttr(Attribute::MinSize),
2468 "Attributes 'minsize and optnone' are incompatible!", V);
2469
2470 Check(!Attrs.hasFnAttr(Attribute::OptimizeForDebugging),
2471 "Attributes 'optdebug and optnone' are incompatible!", V);
2472 }
2473
2474 Check(!(Attrs.hasFnAttr(Attribute::SanitizeRealtime) &&
2475 Attrs.hasFnAttr(Attribute::SanitizeRealtimeBlocking)),
2476 "Attributes "
2477 "'sanitize_realtime and sanitize_realtime_blocking' are incompatible!",
2478 V);
2479
2480 if (Attrs.hasFnAttr(Attribute::OptimizeForDebugging)) {
2481 Check(!Attrs.hasFnAttr(Attribute::OptimizeForSize),
2482 "Attributes 'optsize and optdebug' are incompatible!", V);
2483
2484 Check(!Attrs.hasFnAttr(Attribute::MinSize),
2485 "Attributes 'minsize and optdebug' are incompatible!", V);
2486 }
2487
2488 Check(!Attrs.hasAttrSomewhere(Attribute::Writable) ||
2489 isModSet(Attrs.getMemoryEffects().getModRef(IRMemLocation::ArgMem)),
2490 "Attribute writable and memory without argmem: write are incompatible!",
2491 V);
2492
2493 if (Attrs.hasFnAttr("aarch64_pstate_sm_enabled")) {
2494 Check(!Attrs.hasFnAttr("aarch64_pstate_sm_compatible"),
2495 "Attributes 'aarch64_pstate_sm_enabled and "
2496 "aarch64_pstate_sm_compatible' are incompatible!",
2497 V);
2498 }
2499
2500 Check((Attrs.hasFnAttr("aarch64_new_za") + Attrs.hasFnAttr("aarch64_in_za") +
2501 Attrs.hasFnAttr("aarch64_inout_za") +
2502 Attrs.hasFnAttr("aarch64_out_za") +
2503 Attrs.hasFnAttr("aarch64_preserves_za") +
2504 Attrs.hasFnAttr("aarch64_za_state_agnostic")) <= 1,
2505 "Attributes 'aarch64_new_za', 'aarch64_in_za', 'aarch64_out_za', "
2506 "'aarch64_inout_za', 'aarch64_preserves_za' and "
2507 "'aarch64_za_state_agnostic' are mutually exclusive",
2508 V);
2509
2510 Check((Attrs.hasFnAttr("aarch64_new_zt0") +
2511 Attrs.hasFnAttr("aarch64_in_zt0") +
2512 Attrs.hasFnAttr("aarch64_inout_zt0") +
2513 Attrs.hasFnAttr("aarch64_out_zt0") +
2514 Attrs.hasFnAttr("aarch64_preserves_zt0") +
2515 Attrs.hasFnAttr("aarch64_za_state_agnostic")) <= 1,
2516 "Attributes 'aarch64_new_zt0', 'aarch64_in_zt0', 'aarch64_out_zt0', "
2517 "'aarch64_inout_zt0', 'aarch64_preserves_zt0' and "
2518 "'aarch64_za_state_agnostic' are mutually exclusive",
2519 V);
2520
2521 if (Attrs.hasFnAttr(Attribute::JumpTable)) {
2522 const GlobalValue *GV = cast<GlobalValue>(V);
2524 "Attribute 'jumptable' requires 'unnamed_addr'", V);
2525 }
2526
2527 if (auto Args = Attrs.getFnAttrs().getAllocSizeArgs()) {
2528 auto CheckParam = [&](StringRef Name, unsigned ParamNo) {
2529 if (ParamNo >= FT->getNumParams()) {
2530 CheckFailed("'allocsize' " + Name + " argument is out of bounds", V);
2531 return false;
2532 }
2533
2534 if (!FT->getParamType(ParamNo)->isIntegerTy()) {
2535 CheckFailed("'allocsize' " + Name +
2536 " argument must refer to an integer parameter",
2537 V);
2538 return false;
2539 }
2540
2541 return true;
2542 };
2543
2544 if (!CheckParam("element size", Args->first))
2545 return;
2546
2547 if (Args->second && !CheckParam("number of elements", *Args->second))
2548 return;
2549 }
2550
2551 if (Attrs.hasFnAttr(Attribute::AllocKind)) {
2552 AllocFnKind K = Attrs.getAllocKind();
2554 K & (AllocFnKind::Alloc | AllocFnKind::Realloc | AllocFnKind::Free);
2555 if (!is_contained(
2556 {AllocFnKind::Alloc, AllocFnKind::Realloc, AllocFnKind::Free},
2557 Type))
2558 CheckFailed(
2559 "'allockind()' requires exactly one of alloc, realloc, and free");
2560 if ((Type == AllocFnKind::Free) &&
2561 ((K & (AllocFnKind::Uninitialized | AllocFnKind::Zeroed |
2562 AllocFnKind::Aligned)) != AllocFnKind::Unknown))
2563 CheckFailed("'allockind(\"free\")' doesn't allow uninitialized, zeroed, "
2564 "or aligned modifiers.");
2565 AllocFnKind ZeroedUninit = AllocFnKind::Uninitialized | AllocFnKind::Zeroed;
2566 if ((K & ZeroedUninit) == ZeroedUninit)
2567 CheckFailed("'allockind()' can't be both zeroed and uninitialized");
2568 }
2569
2570 if (Attribute A = Attrs.getFnAttr("alloc-variant-zeroed"); A.isValid()) {
2571 StringRef S = A.getValueAsString();
2572 Check(!S.empty(), "'alloc-variant-zeroed' must not be empty");
2573 Function *Variant = M.getFunction(S);
2574 if (Variant) {
2575 Attribute Family = Attrs.getFnAttr("alloc-family");
2576 Attribute VariantFamily = Variant->getFnAttribute("alloc-family");
2577 if (Family.isValid())
2578 Check(VariantFamily.isValid() &&
2579 VariantFamily.getValueAsString() == Family.getValueAsString(),
2580 "'alloc-variant-zeroed' must name a function belonging to the "
2581 "same 'alloc-family'");
2582
2583 Check(Variant->hasFnAttribute(Attribute::AllocKind) &&
2584 (Variant->getFnAttribute(Attribute::AllocKind).getAllocKind() &
2585 AllocFnKind::Zeroed) != AllocFnKind::Unknown,
2586 "'alloc-variant-zeroed' must name a function with "
2587 "'allockind(\"zeroed\")'");
2588
2589 Check(FT == Variant->getFunctionType(),
2590 "'alloc-variant-zeroed' must name a function with the same "
2591 "signature");
2592
2593 if (const Function *F = dyn_cast<Function>(V))
2594 Check(F->getCallingConv() == Variant->getCallingConv(),
2595 "'alloc-variant-zeroed' must name a function with the same "
2596 "calling convention");
2597 }
2598 }
2599
2600 if (Attrs.hasFnAttr(Attribute::VScaleRange)) {
2601 unsigned VScaleMin = Attrs.getFnAttrs().getVScaleRangeMin();
2602 if (VScaleMin == 0)
2603 CheckFailed("'vscale_range' minimum must be greater than 0", V);
2604 else if (!isPowerOf2_32(VScaleMin))
2605 CheckFailed("'vscale_range' minimum must be power-of-two value", V);
2606 std::optional<unsigned> VScaleMax = Attrs.getFnAttrs().getVScaleRangeMax();
2607 if (VScaleMax && VScaleMin > VScaleMax)
2608 CheckFailed("'vscale_range' minimum cannot be greater than maximum", V);
2609 else if (VScaleMax && !isPowerOf2_32(*VScaleMax))
2610 CheckFailed("'vscale_range' maximum must be power-of-two value", V);
2611 }
2612
2613 if (Attribute FPAttr = Attrs.getFnAttr("frame-pointer"); FPAttr.isValid()) {
2614 StringRef FP = FPAttr.getValueAsString();
2615 if (FP != "all" && FP != "non-leaf" && FP != "none" && FP != "reserved" &&
2616 FP != "non-leaf-no-reserve")
2617 CheckFailed("invalid value for 'frame-pointer' attribute: " + FP, V);
2618 }
2619
2620 checkUnsignedBaseTenFuncAttr(Attrs, "patchable-function-prefix", V);
2621 checkUnsignedBaseTenFuncAttr(Attrs, "patchable-function-entry", V);
2622 if (Attrs.hasFnAttr("patchable-function-entry-section"))
2623 Check(!Attrs.getFnAttr("patchable-function-entry-section")
2624 .getValueAsString()
2625 .empty(),
2626 "\"patchable-function-entry-section\" must not be empty");
2627 checkUnsignedBaseTenFuncAttr(Attrs, "warn-stack-size", V);
2628
2629 if (auto A = Attrs.getFnAttr("sign-return-address"); A.isValid()) {
2630 StringRef S = A.getValueAsString();
2631 if (S != "none" && S != "all" && S != "non-leaf")
2632 CheckFailed("invalid value for 'sign-return-address' attribute: " + S, V);
2633 }
2634
2635 if (auto A = Attrs.getFnAttr("sign-return-address-key"); A.isValid()) {
2636 StringRef S = A.getValueAsString();
2637 if (S != "a_key" && S != "b_key")
2638 CheckFailed("invalid value for 'sign-return-address-key' attribute: " + S,
2639 V);
2640 if (auto AA = Attrs.getFnAttr("sign-return-address"); !AA.isValid()) {
2641 CheckFailed(
2642 "'sign-return-address-key' present without `sign-return-address`");
2643 }
2644 }
2645
2646 if (auto A = Attrs.getFnAttr("branch-target-enforcement"); A.isValid()) {
2647 StringRef S = A.getValueAsString();
2648 if (S != "" && S != "true" && S != "false")
2649 CheckFailed(
2650 "invalid value for 'branch-target-enforcement' attribute: " + S, V);
2651 }
2652
2653 if (auto A = Attrs.getFnAttr("branch-protection-pauth-lr"); A.isValid()) {
2654 StringRef S = A.getValueAsString();
2655 if (S != "" && S != "true" && S != "false")
2656 CheckFailed(
2657 "invalid value for 'branch-protection-pauth-lr' attribute: " + S, V);
2658 }
2659
2660 if (auto A = Attrs.getFnAttr("guarded-control-stack"); A.isValid()) {
2661 StringRef S = A.getValueAsString();
2662 if (S != "" && S != "true" && S != "false")
2663 CheckFailed("invalid value for 'guarded-control-stack' attribute: " + S,
2664 V);
2665 }
2666
2667 if (auto A = Attrs.getFnAttr("vector-function-abi-variant"); A.isValid()) {
2668 StringRef S = A.getValueAsString();
2669 const std::optional<VFInfo> Info = VFABI::tryDemangleForVFABI(S, FT);
2670 if (!Info)
2671 CheckFailed("invalid name for a VFABI variant: " + S, V);
2672 }
2673
2674 if (auto A = Attrs.getFnAttr("modular-format"); A.isValid()) {
2675 StringRef S = A.getValueAsString();
2677 S.split(Args, ',');
2678 Check(Args.size() >= 5,
2679 "modular-format attribute requires at least 5 arguments", V);
2680 unsigned FirstArgIdx;
2681 Check(!Args[2].getAsInteger(10, FirstArgIdx),
2682 "modular-format attribute first arg index is not an integer", V);
2683 unsigned UpperBound = FT->getNumParams() + (FT->isVarArg() ? 1 : 0);
2684 Check(FirstArgIdx > 0 && FirstArgIdx <= UpperBound,
2685 "modular-format attribute first arg index is out of bounds", V);
2686 }
2687
2688 if (auto A = Attrs.getFnAttr("target-features"); A.isValid()) {
2689 StringRef S = A.getValueAsString();
2690 if (!S.empty()) {
2691 for (auto FeatureFlag : split(S, ',')) {
2692 if (FeatureFlag.empty())
2693 CheckFailed(
2694 "target-features attribute should not contain an empty string");
2695 else
2696 Check(FeatureFlag[0] == '+' || FeatureFlag[0] == '-',
2697 "target feature '" + FeatureFlag +
2698 "' must start with a '+' or '-'",
2699 V);
2700 }
2701 }
2702 }
2703}
2704void Verifier::verifyUnknownProfileMetadata(MDNode *MD) {
2705 Check(MD->getNumOperands() == 2,
2706 "'unknown' !prof should have a single additional operand", MD);
2707 auto *PassName = dyn_cast<MDString>(MD->getOperand(1));
2708 Check(PassName != nullptr,
2709 "'unknown' !prof should have an additional operand of type "
2710 "string");
2711 Check(!PassName->getString().empty(),
2712 "the 'unknown' !prof operand should not be an empty string");
2713}
2714
2715void Verifier::verifyFunctionMetadata(
2716 ArrayRef<std::pair<unsigned, MDNode *>> MDs) {
2717 for (const auto &Pair : MDs) {
2718 if (Pair.first == LLVMContext::MD_prof) {
2719 MDNode *MD = Pair.second;
2720 Check(MD->getNumOperands() >= 2,
2721 "!prof annotations should have no less than 2 operands", MD);
2722 // We may have functions that are synthesized by the compiler, e.g. in
2723 // WPD, that we can't currently determine the entry count.
2724 if (MD->getOperand(0).equalsStr(
2726 verifyUnknownProfileMetadata(MD);
2727 continue;
2728 }
2729
2730 // Check first operand.
2731 Check(MD->getOperand(0) != nullptr, "first operand should not be null",
2732 MD);
2734 "expected string with name of the !prof annotation", MD);
2735 MDString *MDS = cast<MDString>(MD->getOperand(0));
2736 StringRef ProfName = MDS->getString();
2739 "first operand should be 'function_entry_count'"
2740 " or 'synthetic_function_entry_count'",
2741 MD);
2742
2743 // Check second operand.
2744 Check(MD->getOperand(1) != nullptr, "second operand should not be null",
2745 MD);
2747 "expected integer argument to function_entry_count", MD);
2748 } else if (Pair.first == LLVMContext::MD_kcfi_type) {
2749 MDNode *MD = Pair.second;
2750 Check(MD->getNumOperands() == 1,
2751 "!kcfi_type must have exactly one operand", MD);
2752 Check(MD->getOperand(0) != nullptr, "!kcfi_type operand must not be null",
2753 MD);
2755 "expected a constant operand for !kcfi_type", MD);
2756 Constant *C = cast<ConstantAsMetadata>(MD->getOperand(0))->getValue();
2757 Check(isa<ConstantInt>(C) && isa<IntegerType>(C->getType()),
2758 "expected a constant integer operand for !kcfi_type", MD);
2760 "expected a 32-bit integer constant operand for !kcfi_type", MD);
2761 }
2762 }
2763}
2764
2765void Verifier::visitConstantExprsRecursively(const Constant *EntryC) {
2766 if (EntryC->getNumOperands() == 0)
2767 return;
2768
2769 if (!ConstantExprVisited.insert(EntryC).second)
2770 return;
2771
2773 Stack.push_back(EntryC);
2774
2775 while (!Stack.empty()) {
2776 const Constant *C = Stack.pop_back_val();
2777
2778 // Check this constant expression.
2779 if (const auto *CE = dyn_cast<ConstantExpr>(C))
2780 visitConstantExpr(CE);
2781
2782 if (const auto *CPA = dyn_cast<ConstantPtrAuth>(C))
2783 visitConstantPtrAuth(CPA);
2784
2785 if (const auto *GV = dyn_cast<GlobalValue>(C)) {
2786 // Global Values get visited separately, but we do need to make sure
2787 // that the global value is in the correct module
2788 Check(GV->getParent() == &M, "Referencing global in another module!",
2789 EntryC, &M, GV, GV->getParent());
2790 continue;
2791 }
2792
2793 // Visit all sub-expressions.
2794 for (const Use &U : C->operands()) {
2795 const auto *OpC = dyn_cast<Constant>(U);
2796 if (!OpC)
2797 continue;
2798 if (!ConstantExprVisited.insert(OpC).second)
2799 continue;
2800 Stack.push_back(OpC);
2801 }
2802 }
2803}
2804
2805void Verifier::visitConstantExpr(const ConstantExpr *CE) {
2806 if (CE->getOpcode() == Instruction::BitCast)
2807 Check(CastInst::castIsValid(Instruction::BitCast, CE->getOperand(0),
2808 CE->getType()),
2809 "Invalid bitcast", CE);
2810 else if (CE->getOpcode() == Instruction::PtrToAddr)
2811 checkPtrToAddr(CE->getOperand(0)->getType(), CE->getType(), *CE);
2812}
2813
2814void Verifier::visitConstantPtrAuth(const ConstantPtrAuth *CPA) {
2815 Check(CPA->getPointer()->getType()->isPointerTy(),
2816 "signed ptrauth constant base pointer must have pointer type");
2817
2818 Check(CPA->getType() == CPA->getPointer()->getType(),
2819 "signed ptrauth constant must have same type as its base pointer");
2820
2821 Check(CPA->getKey()->getBitWidth() == 32,
2822 "signed ptrauth constant key must be i32 constant integer");
2823
2825 "signed ptrauth constant address discriminator must be a pointer");
2826
2827 Check(CPA->getDiscriminator()->getBitWidth() == 64,
2828 "signed ptrauth constant discriminator must be i64 constant integer");
2829
2831 "signed ptrauth constant deactivation symbol must be a pointer");
2832
2835 "signed ptrauth constant deactivation symbol must be a global value "
2836 "or null");
2837}
2838
2839bool Verifier::verifyAttributeCount(AttributeList Attrs, unsigned Params) {
2840 // There shouldn't be more attribute sets than there are parameters plus the
2841 // function and return value.
2842 return Attrs.getNumAttrSets() <= Params + 2;
2843}
2844
2845void Verifier::verifyInlineAsmCall(const CallBase &Call) {
2846 const InlineAsm *IA = cast<InlineAsm>(Call.getCalledOperand());
2847 unsigned ArgNo = 0;
2848 unsigned LabelNo = 0;
2849 for (const InlineAsm::ConstraintInfo &CI : IA->ParseConstraints()) {
2850 if (CI.Type == InlineAsm::isLabel) {
2851 ++LabelNo;
2852 continue;
2853 }
2854
2855 // Only deal with constraints that correspond to call arguments.
2856 if (!CI.hasArg())
2857 continue;
2858
2859 if (CI.isIndirect) {
2860 const Value *Arg = Call.getArgOperand(ArgNo);
2861 Check(Arg->getType()->isPointerTy(),
2862 "Operand for indirect constraint must have pointer type", &Call);
2863
2865 "Operand for indirect constraint must have elementtype attribute",
2866 &Call);
2867 } else {
2868 Check(!Call.paramHasAttr(ArgNo, Attribute::ElementType),
2869 "Elementtype attribute can only be applied for indirect "
2870 "constraints",
2871 &Call);
2872 }
2873
2874 ArgNo++;
2875 }
2876
2877 if (auto *CallBr = dyn_cast<CallBrInst>(&Call)) {
2878 Check(LabelNo == CallBr->getNumIndirectDests(),
2879 "Number of label constraints does not match number of callbr dests",
2880 &Call);
2881 } else {
2882 Check(LabelNo == 0, "Label constraints can only be used with callbr",
2883 &Call);
2884 }
2885}
2886
2887/// Verify that statepoint intrinsic is well formed.
2888void Verifier::verifyStatepoint(const CallBase &Call) {
2889 assert(Call.getIntrinsicID() == Intrinsic::experimental_gc_statepoint);
2890
2893 "gc.statepoint must read and write all memory to preserve "
2894 "reordering restrictions required by safepoint semantics",
2895 Call);
2896
2897 const int64_t NumPatchBytes =
2898 cast<ConstantInt>(Call.getArgOperand(1))->getSExtValue();
2899 assert(isInt<32>(NumPatchBytes) && "NumPatchBytesV is an i32!");
2900 Check(NumPatchBytes >= 0,
2901 "gc.statepoint number of patchable bytes must be "
2902 "positive",
2903 Call);
2904
2905 Type *TargetElemType = Call.getParamElementType(2);
2906 Check(TargetElemType,
2907 "gc.statepoint callee argument must have elementtype attribute", Call);
2908 FunctionType *TargetFuncType = dyn_cast<FunctionType>(TargetElemType);
2909 Check(TargetFuncType,
2910 "gc.statepoint callee elementtype must be function type", Call);
2911
2912 const int NumCallArgs = cast<ConstantInt>(Call.getArgOperand(3))->getZExtValue();
2913 Check(NumCallArgs >= 0,
2914 "gc.statepoint number of arguments to underlying call "
2915 "must be positive",
2916 Call);
2917 const int NumParams = (int)TargetFuncType->getNumParams();
2918 if (TargetFuncType->isVarArg()) {
2919 Check(NumCallArgs >= NumParams,
2920 "gc.statepoint mismatch in number of vararg call args", Call);
2921
2922 // TODO: Remove this limitation
2923 Check(TargetFuncType->getReturnType()->isVoidTy(),
2924 "gc.statepoint doesn't support wrapping non-void "
2925 "vararg functions yet",
2926 Call);
2927 } else
2928 Check(NumCallArgs == NumParams,
2929 "gc.statepoint mismatch in number of call args", Call);
2930
2931 const uint64_t Flags
2932 = cast<ConstantInt>(Call.getArgOperand(4))->getZExtValue();
2933 Check((Flags & ~(uint64_t)StatepointFlags::MaskAll) == 0,
2934 "unknown flag used in gc.statepoint flags argument", Call);
2935
2936 // Verify that the types of the call parameter arguments match
2937 // the type of the wrapped callee.
2938 AttributeList Attrs = Call.getAttributes();
2939 for (int i = 0; i < NumParams; i++) {
2940 Type *ParamType = TargetFuncType->getParamType(i);
2941 Type *ArgType = Call.getArgOperand(5 + i)->getType();
2942 Check(ArgType == ParamType,
2943 "gc.statepoint call argument does not match wrapped "
2944 "function type",
2945 Call);
2946
2947 if (TargetFuncType->isVarArg()) {
2948 AttributeSet ArgAttrs = Attrs.getParamAttrs(5 + i);
2949 Check(!ArgAttrs.hasAttribute(Attribute::StructRet),
2950 "Attribute 'sret' cannot be used for vararg call arguments!", Call);
2951 }
2952 }
2953
2954 const int EndCallArgsInx = 4 + NumCallArgs;
2955
2956 const Value *NumTransitionArgsV = Call.getArgOperand(EndCallArgsInx + 1);
2957 Check(isa<ConstantInt>(NumTransitionArgsV),
2958 "gc.statepoint number of transition arguments "
2959 "must be constant integer",
2960 Call);
2961 const int NumTransitionArgs =
2962 cast<ConstantInt>(NumTransitionArgsV)->getZExtValue();
2963 Check(NumTransitionArgs == 0,
2964 "gc.statepoint w/inline transition bundle is deprecated", Call);
2965 const int EndTransitionArgsInx = EndCallArgsInx + 1 + NumTransitionArgs;
2966
2967 const Value *NumDeoptArgsV = Call.getArgOperand(EndTransitionArgsInx + 1);
2968 Check(isa<ConstantInt>(NumDeoptArgsV),
2969 "gc.statepoint number of deoptimization arguments "
2970 "must be constant integer",
2971 Call);
2972 const int NumDeoptArgs = cast<ConstantInt>(NumDeoptArgsV)->getZExtValue();
2973 Check(NumDeoptArgs == 0,
2974 "gc.statepoint w/inline deopt operands is deprecated", Call);
2975
2976 const int ExpectedNumArgs = 7 + NumCallArgs;
2977 Check(ExpectedNumArgs == (int)Call.arg_size(),
2978 "gc.statepoint too many arguments", Call);
2979
2980 // Check that the only uses of this gc.statepoint are gc.result or
2981 // gc.relocate calls which are tied to this statepoint and thus part
2982 // of the same statepoint sequence
2983 for (const User *U : Call.users()) {
2984 const CallInst *UserCall = dyn_cast<const CallInst>(U);
2985 Check(UserCall, "illegal use of statepoint token", Call, U);
2986 if (!UserCall)
2987 continue;
2988 Check(isa<GCRelocateInst>(UserCall) || isa<GCResultInst>(UserCall),
2989 "gc.result or gc.relocate are the only value uses "
2990 "of a gc.statepoint",
2991 Call, U);
2992 if (isa<GCResultInst>(UserCall)) {
2993 Check(UserCall->getArgOperand(0) == &Call,
2994 "gc.result connected to wrong gc.statepoint", Call, UserCall);
2995 } else if (isa<GCRelocateInst>(Call)) {
2996 Check(UserCall->getArgOperand(0) == &Call,
2997 "gc.relocate connected to wrong gc.statepoint", Call, UserCall);
2998 }
2999 }
3000
3001 // Note: It is legal for a single derived pointer to be listed multiple
3002 // times. It's non-optimal, but it is legal. It can also happen after
3003 // insertion if we strip a bitcast away.
3004 // Note: It is really tempting to check that each base is relocated and
3005 // that a derived pointer is never reused as a base pointer. This turns
3006 // out to be problematic since optimizations run after safepoint insertion
3007 // can recognize equality properties that the insertion logic doesn't know
3008 // about. See example statepoint.ll in the verifier subdirectory
3009}
3010
3011void Verifier::verifyFrameRecoverIndices() {
3012 for (auto &Counts : FrameEscapeInfo) {
3013 Function *F = Counts.first;
3014 unsigned EscapedObjectCount = Counts.second.first;
3015 unsigned MaxRecoveredIndex = Counts.second.second;
3016 Check(MaxRecoveredIndex <= EscapedObjectCount,
3017 "all indices passed to llvm.localrecover must be less than the "
3018 "number of arguments passed to llvm.localescape in the parent "
3019 "function",
3020 F);
3021 }
3022}
3023
3024static Instruction *getSuccPad(Instruction *Terminator) {
3025 BasicBlock *UnwindDest;
3026 if (auto *II = dyn_cast<InvokeInst>(Terminator))
3027 UnwindDest = II->getUnwindDest();
3028 else if (auto *CSI = dyn_cast<CatchSwitchInst>(Terminator))
3029 UnwindDest = CSI->getUnwindDest();
3030 else
3031 UnwindDest = cast<CleanupReturnInst>(Terminator)->getUnwindDest();
3032 return &*UnwindDest->getFirstNonPHIIt();
3033}
3034
3035void Verifier::verifySiblingFuncletUnwinds() {
3036 llvm::TimeTraceScope timeScope("Verifier verify sibling funclet unwinds");
3037 SmallPtrSet<Instruction *, 8> Visited;
3038 SmallPtrSet<Instruction *, 8> Active;
3039 for (const auto &Pair : SiblingFuncletInfo) {
3040 Instruction *PredPad = Pair.first;
3041 if (Visited.count(PredPad))
3042 continue;
3043 Active.insert(PredPad);
3044 Instruction *Terminator = Pair.second;
3045 do {
3046 Instruction *SuccPad = getSuccPad(Terminator);
3047 if (Active.count(SuccPad)) {
3048 // Found a cycle; report error
3049 Instruction *CyclePad = SuccPad;
3050 SmallVector<Instruction *, 8> CycleNodes;
3051 do {
3052 CycleNodes.push_back(CyclePad);
3053 Instruction *CycleTerminator = SiblingFuncletInfo[CyclePad];
3054 if (CycleTerminator != CyclePad)
3055 CycleNodes.push_back(CycleTerminator);
3056 CyclePad = getSuccPad(CycleTerminator);
3057 } while (CyclePad != SuccPad);
3058 Check(false, "EH pads can't handle each other's exceptions",
3059 ArrayRef<Instruction *>(CycleNodes));
3060 }
3061 // Don't re-walk a node we've already checked
3062 if (!Visited.insert(SuccPad).second)
3063 break;
3064 // Walk to this successor if it has a map entry.
3065 PredPad = SuccPad;
3066 auto TermI = SiblingFuncletInfo.find(PredPad);
3067 if (TermI == SiblingFuncletInfo.end())
3068 break;
3069 Terminator = TermI->second;
3070 Active.insert(PredPad);
3071 } while (true);
3072 // Each node only has one successor, so we've walked all the active
3073 // nodes' successors.
3074 Active.clear();
3075 }
3076}
3077
3078// visitFunction - Verify that a function is ok.
3079//
3080void Verifier::visitFunction(const Function &F) {
3081 visitGlobalValue(F);
3082
3083 // Check function arguments.
3084 FunctionType *FT = F.getFunctionType();
3085 unsigned NumArgs = F.arg_size();
3086
3087 Check(&Context == &F.getContext(),
3088 "Function context does not match Module context!", &F);
3089
3090 Check(!F.hasCommonLinkage(), "Functions may not have common linkage", &F);
3091 Check(FT->getNumParams() == NumArgs,
3092 "# formal arguments must match # of arguments for function type!", &F,
3093 FT);
3094 Check(F.getReturnType()->isFirstClassType() ||
3095 F.getReturnType()->isVoidTy() || F.getReturnType()->isStructTy(),
3096 "Functions cannot return aggregate values!", &F);
3097
3098 Check(!F.hasStructRetAttr() || F.getReturnType()->isVoidTy(),
3099 "Invalid struct return type!", &F);
3100
3101 if (MaybeAlign A = F.getAlign()) {
3102 Check(A->value() <= Value::MaximumAlignment,
3103 "huge alignment values are unsupported", &F);
3104 }
3105
3106 AttributeList Attrs = F.getAttributes();
3107
3108 Check(verifyAttributeCount(Attrs, FT->getNumParams()),
3109 "Attribute after last parameter!", &F);
3110
3111 bool IsIntrinsic = F.isIntrinsic();
3112
3113 // Check function attributes.
3114 verifyFunctionAttrs(FT, Attrs, &F, IsIntrinsic, /* IsInlineAsm */ false);
3115
3116 // On function declarations/definitions, we do not support the builtin
3117 // attribute. We do not check this in VerifyFunctionAttrs since that is
3118 // checking for Attributes that can/can not ever be on functions.
3119 Check(!Attrs.hasFnAttr(Attribute::Builtin),
3120 "Attribute 'builtin' can only be applied to a callsite.", &F);
3121
3122 Check(!Attrs.hasAttrSomewhere(Attribute::ElementType),
3123 "Attribute 'elementtype' can only be applied to a callsite.", &F);
3124
3125 Check(!Attrs.hasFnAttr("aarch64_zt0_undef"),
3126 "Attribute 'aarch64_zt0_undef' can only be applied to a callsite.");
3127
3128 if (Attrs.hasFnAttr(Attribute::Naked))
3129 for (const Argument &Arg : F.args())
3130 Check(Arg.use_empty(), "cannot use argument of naked function", &Arg);
3131
3132 // Check that this function meets the restrictions on this calling convention.
3133 // Sometimes varargs is used for perfectly forwarding thunks, so some of these
3134 // restrictions can be lifted.
3135 switch (F.getCallingConv()) {
3136 default:
3137 case CallingConv::C:
3138 break;
3139 case CallingConv::X86_INTR: {
3140 Check(F.arg_empty() || Attrs.hasParamAttr(0, Attribute::ByVal),
3141 "Calling convention parameter requires byval", &F);
3142 break;
3143 }
3144 case CallingConv::AMDGPU_KERNEL:
3145 case CallingConv::SPIR_KERNEL:
3146 case CallingConv::AMDGPU_CS_Chain:
3147 case CallingConv::AMDGPU_CS_ChainPreserve:
3148 Check(F.getReturnType()->isVoidTy(),
3149 "Calling convention requires void return type", &F);
3150 [[fallthrough]];
3151 case CallingConv::AMDGPU_VS:
3152 case CallingConv::AMDGPU_HS:
3153 case CallingConv::AMDGPU_GS:
3154 case CallingConv::AMDGPU_PS:
3155 case CallingConv::AMDGPU_CS:
3156 Check(!F.hasStructRetAttr(), "Calling convention does not allow sret", &F);
3157 if (F.getCallingConv() != CallingConv::SPIR_KERNEL) {
3158 const unsigned StackAS = DL.getAllocaAddrSpace();
3159 unsigned i = 0;
3160 for (const Argument &Arg : F.args()) {
3161 Check(!Attrs.hasParamAttr(i, Attribute::ByVal),
3162 "Calling convention disallows byval", &F);
3163 Check(!Attrs.hasParamAttr(i, Attribute::Preallocated),
3164 "Calling convention disallows preallocated", &F);
3165 Check(!Attrs.hasParamAttr(i, Attribute::InAlloca),
3166 "Calling convention disallows inalloca", &F);
3167
3168 if (Attrs.hasParamAttr(i, Attribute::ByRef)) {
3169 // FIXME: Should also disallow LDS and GDS, but we don't have the enum
3170 // value here.
3171 Check(Arg.getType()->getPointerAddressSpace() != StackAS,
3172 "Calling convention disallows stack byref", &F);
3173 }
3174
3175 ++i;
3176 }
3177 }
3178
3179 [[fallthrough]];
3180 case CallingConv::Fast:
3181 case CallingConv::Cold:
3182 case CallingConv::Intel_OCL_BI:
3183 case CallingConv::PTX_Kernel:
3184 case CallingConv::PTX_Device:
3185 Check(!F.isVarArg(),
3186 "Calling convention does not support varargs or "
3187 "perfect forwarding!",
3188 &F);
3189 break;
3190 case CallingConv::AMDGPU_Gfx_WholeWave:
3191 Check(!F.arg_empty() && F.arg_begin()->getType()->isIntegerTy(1),
3192 "Calling convention requires first argument to be i1", &F);
3193 Check(!F.arg_begin()->hasInRegAttr(),
3194 "Calling convention requires first argument to not be inreg", &F);
3195 Check(!F.isVarArg(),
3196 "Calling convention does not support varargs or "
3197 "perfect forwarding!",
3198 &F);
3199 break;
3200 }
3201
3202 // Check that the argument values match the function type for this function...
3203 unsigned i = 0;
3204 for (const Argument &Arg : F.args()) {
3205 Check(Arg.getType() == FT->getParamType(i),
3206 "Argument value does not match function argument type!", &Arg,
3207 FT->getParamType(i));
3208 Check(Arg.getType()->isFirstClassType(),
3209 "Function arguments must have first-class types!", &Arg);
3210 if (!IsIntrinsic) {
3211 Check(!Arg.getType()->isMetadataTy(),
3212 "Function takes metadata but isn't an intrinsic", &Arg, &F);
3213 Check(!Arg.getType()->isTokenLikeTy(),
3214 "Function takes token but isn't an intrinsic", &Arg, &F);
3215 Check(!Arg.getType()->isX86_AMXTy(),
3216 "Function takes x86_amx but isn't an intrinsic", &Arg, &F);
3217 }
3218
3219 // Check that swifterror argument is only used by loads and stores.
3220 if (Attrs.hasParamAttr(i, Attribute::SwiftError)) {
3221 verifySwiftErrorValue(&Arg);
3222 }
3223 ++i;
3224 }
3225
3226 if (!IsIntrinsic) {
3227 Check(!F.getReturnType()->isTokenLikeTy(),
3228 "Function returns a token but isn't an intrinsic", &F);
3229 Check(!F.getReturnType()->isX86_AMXTy(),
3230 "Function returns a x86_amx but isn't an intrinsic", &F);
3231 }
3232
3233 // Get the function metadata attachments.
3235 F.getAllMetadata(MDs);
3236 assert(F.hasMetadata() != MDs.empty() && "Bit out-of-sync");
3237 verifyFunctionMetadata(MDs);
3238
3239 // Check validity of the personality function
3240 if (F.hasPersonalityFn()) {
3241 auto *Per = dyn_cast<Function>(F.getPersonalityFn()->stripPointerCasts());
3242 if (Per)
3243 Check(Per->getParent() == F.getParent(),
3244 "Referencing personality function in another module!", &F,
3245 F.getParent(), Per, Per->getParent());
3246 }
3247
3248 // EH funclet coloring can be expensive, recompute on-demand
3249 BlockEHFuncletColors.clear();
3250
3251 if (F.isMaterializable()) {
3252 // Function has a body somewhere we can't see.
3253 Check(MDs.empty(), "unmaterialized function cannot have metadata", &F,
3254 MDs.empty() ? nullptr : MDs.front().second);
3255 } else if (F.isDeclaration()) {
3256 for (const auto &I : MDs) {
3257 // This is used for call site debug information.
3258 CheckDI(I.first != LLVMContext::MD_dbg ||
3259 !cast<DISubprogram>(I.second)->isDistinct(),
3260 "function declaration may only have a unique !dbg attachment",
3261 &F);
3262 Check(I.first != LLVMContext::MD_prof,
3263 "function declaration may not have a !prof attachment", &F);
3264
3265 // Verify the metadata itself.
3266 visitMDNode(*I.second, AreDebugLocsAllowed::Yes);
3267 }
3268 Check(!F.hasPersonalityFn(),
3269 "Function declaration shouldn't have a personality routine", &F);
3270 } else {
3271 // Verify that this function (which has a body) is not named "llvm.*". It
3272 // is not legal to define intrinsics.
3273 Check(!IsIntrinsic, "llvm intrinsics cannot be defined!", &F);
3274
3275 // Check the entry node
3276 const BasicBlock *Entry = &F.getEntryBlock();
3277 Check(pred_empty(Entry),
3278 "Entry block to function must not have predecessors!", Entry);
3279
3280 // The address of the entry block cannot be taken, unless it is dead.
3281 if (Entry->hasAddressTaken()) {
3282 Check(!BlockAddress::lookup(Entry)->isConstantUsed(),
3283 "blockaddress may not be used with the entry block!", Entry);
3284 }
3285
3286 unsigned NumDebugAttachments = 0, NumProfAttachments = 0,
3287 NumKCFIAttachments = 0;
3288 // Visit metadata attachments.
3289 for (const auto &I : MDs) {
3290 // Verify that the attachment is legal.
3291 auto AllowLocs = AreDebugLocsAllowed::No;
3292 switch (I.first) {
3293 default:
3294 break;
3295 case LLVMContext::MD_dbg: {
3296 ++NumDebugAttachments;
3297 CheckDI(NumDebugAttachments == 1,
3298 "function must have a single !dbg attachment", &F, I.second);
3299 CheckDI(isa<DISubprogram>(I.second),
3300 "function !dbg attachment must be a subprogram", &F, I.second);
3301 CheckDI(cast<DISubprogram>(I.second)->isDistinct(),
3302 "function definition may only have a distinct !dbg attachment",
3303 &F);
3304
3305 auto *SP = cast<DISubprogram>(I.second);
3306 const Function *&AttachedTo = DISubprogramAttachments[SP];
3307 CheckDI(!AttachedTo || AttachedTo == &F,
3308 "DISubprogram attached to more than one function", SP, &F);
3309 AttachedTo = &F;
3310 AllowLocs = AreDebugLocsAllowed::Yes;
3311 break;
3312 }
3313 case LLVMContext::MD_prof:
3314 ++NumProfAttachments;
3315 Check(NumProfAttachments == 1,
3316 "function must have a single !prof attachment", &F, I.second);
3317 break;
3318 case LLVMContext::MD_kcfi_type:
3319 ++NumKCFIAttachments;
3320 Check(NumKCFIAttachments == 1,
3321 "function must have a single !kcfi_type attachment", &F,
3322 I.second);
3323 break;
3324 }
3325
3326 // Verify the metadata itself.
3327 visitMDNode(*I.second, AllowLocs);
3328 }
3329 }
3330
3331 // If this function is actually an intrinsic, verify that it is only used in
3332 // direct call/invokes, never having its "address taken".
3333 // Only do this if the module is materialized, otherwise we don't have all the
3334 // uses.
3335 if (F.isIntrinsic() && F.getParent()->isMaterialized()) {
3336 const User *U;
3337 if (F.hasAddressTaken(&U, false, true, false,
3338 /*IgnoreARCAttachedCall=*/true))
3339 Check(false, "Invalid user of intrinsic instruction!", U);
3340 }
3341
3342 // Check intrinsics' signatures.
3343 switch (F.getIntrinsicID()) {
3344 case Intrinsic::experimental_gc_get_pointer_base: {
3345 FunctionType *FT = F.getFunctionType();
3346 Check(FT->getNumParams() == 1, "wrong number of parameters", F);
3347 Check(isa<PointerType>(F.getReturnType()),
3348 "gc.get.pointer.base must return a pointer", F);
3349 Check(FT->getParamType(0) == F.getReturnType(),
3350 "gc.get.pointer.base operand and result must be of the same type", F);
3351 break;
3352 }
3353 case Intrinsic::experimental_gc_get_pointer_offset: {
3354 FunctionType *FT = F.getFunctionType();
3355 Check(FT->getNumParams() == 1, "wrong number of parameters", F);
3356 Check(isa<PointerType>(FT->getParamType(0)),
3357 "gc.get.pointer.offset operand must be a pointer", F);
3358 Check(F.getReturnType()->isIntegerTy(),
3359 "gc.get.pointer.offset must return integer", F);
3360 break;
3361 }
3362 }
3363
3364 auto *N = F.getSubprogram();
3365 HasDebugInfo = (N != nullptr);
3366 if (!HasDebugInfo)
3367 return;
3368
3369 // Check that all !dbg attachments lead to back to N.
3370 //
3371 // FIXME: Check this incrementally while visiting !dbg attachments.
3372 // FIXME: Only check when N is the canonical subprogram for F.
3373 SmallPtrSet<const MDNode *, 32> Seen;
3374 auto VisitDebugLoc = [&](const Instruction &I, const MDNode *Node) {
3375 // Be careful about using DILocation here since we might be dealing with
3376 // broken code (this is the Verifier after all).
3377 const DILocation *DL = dyn_cast_or_null<DILocation>(Node);
3378 if (!DL)
3379 return;
3380 if (!Seen.insert(DL).second)
3381 return;
3382
3383 Metadata *Parent = DL->getRawScope();
3384 CheckDI(Parent && isa<DILocalScope>(Parent),
3385 "DILocation's scope must be a DILocalScope", N, &F, &I, DL, Parent);
3386
3387 DILocalScope *Scope = DL->getInlinedAtScope();
3388 Check(Scope, "Failed to find DILocalScope", DL);
3389
3390 if (!Seen.insert(Scope).second)
3391 return;
3392
3393 DISubprogram *SP = Scope->getSubprogram();
3394
3395 // Scope and SP could be the same MDNode and we don't want to skip
3396 // validation in that case
3397 if ((Scope != SP) && !Seen.insert(SP).second)
3398 return;
3399
3400 CheckDI(SP->describes(&F),
3401 "!dbg attachment points at wrong subprogram for function", N, &F,
3402 &I, DL, Scope, SP);
3403 };
3404 for (auto &BB : F)
3405 for (auto &I : BB) {
3406 VisitDebugLoc(I, I.getDebugLoc().getAsMDNode());
3407 // The llvm.loop annotations also contain two DILocations.
3408 if (auto MD = I.getMetadata(LLVMContext::MD_loop))
3409 for (unsigned i = 1; i < MD->getNumOperands(); ++i)
3410 VisitDebugLoc(I, dyn_cast_or_null<MDNode>(MD->getOperand(i)));
3411 if (BrokenDebugInfo)
3412 return;
3413 }
3414}
3415
3416// verifyBasicBlock - Verify that a basic block is well formed...
3417//
3418void Verifier::visitBasicBlock(BasicBlock &BB) {
3419 InstsInThisBlock.clear();
3420 ConvergenceVerifyHelper.visit(BB);
3421
3422 // Ensure that basic blocks have terminators!
3423 Check(BB.getTerminator(), "Basic Block does not have terminator!", &BB);
3424
3425 // Check constraints that this basic block imposes on all of the PHI nodes in
3426 // it.
3427 if (isa<PHINode>(BB.front())) {
3428 SmallVector<BasicBlock *, 8> Preds(predecessors(&BB));
3430 llvm::sort(Preds);
3431 for (const PHINode &PN : BB.phis()) {
3432 Check(PN.getNumIncomingValues() == Preds.size(),
3433 "PHINode should have one entry for each predecessor of its "
3434 "parent basic block!",
3435 &PN);
3436
3437 // Get and sort all incoming values in the PHI node...
3438 Values.clear();
3439 Values.reserve(PN.getNumIncomingValues());
3440 for (unsigned i = 0, e = PN.getNumIncomingValues(); i != e; ++i)
3441 Values.push_back(
3442 std::make_pair(PN.getIncomingBlock(i), PN.getIncomingValue(i)));
3443 llvm::sort(Values);
3444
3445 for (unsigned i = 0, e = Values.size(); i != e; ++i) {
3446 // Check to make sure that if there is more than one entry for a
3447 // particular basic block in this PHI node, that the incoming values are
3448 // all identical.
3449 //
3450 Check(i == 0 || Values[i].first != Values[i - 1].first ||
3451 Values[i].second == Values[i - 1].second,
3452 "PHI node has multiple entries for the same basic block with "
3453 "different incoming values!",
3454 &PN, Values[i].first, Values[i].second, Values[i - 1].second);
3455
3456 // Check to make sure that the predecessors and PHI node entries are
3457 // matched up.
3458 Check(Values[i].first == Preds[i],
3459 "PHI node entries do not match predecessors!", &PN,
3460 Values[i].first, Preds[i]);
3461 }
3462 }
3463 }
3464
3465 // Check that all instructions have their parent pointers set up correctly.
3466 for (auto &I : BB)
3467 {
3468 Check(I.getParent() == &BB, "Instruction has bogus parent pointer!");
3469 }
3470
3471 // Confirm that no issues arise from the debug program.
3472 CheckDI(!BB.getTrailingDbgRecords(), "Basic Block has trailing DbgRecords!",
3473 &BB);
3474}
3475
3476void Verifier::visitTerminator(Instruction &I) {
3477 // Ensure that terminators only exist at the end of the basic block.
3478 Check(&I == I.getParent()->getTerminator(),
3479 "Terminator found in the middle of a basic block!", I.getParent());
3480 visitInstruction(I);
3481}
3482
3483void Verifier::visitCondBrInst(CondBrInst &BI) {
3485 "Branch condition is not 'i1' type!", &BI, BI.getCondition());
3486 visitTerminator(BI);
3487}
3488
3489void Verifier::visitReturnInst(ReturnInst &RI) {
3490 Function *F = RI.getParent()->getParent();
3491 unsigned N = RI.getNumOperands();
3492 if (F->getReturnType()->isVoidTy())
3493 Check(N == 0,
3494 "Found return instr that returns non-void in Function of void "
3495 "return type!",
3496 &RI, F->getReturnType());
3497 else
3498 Check(N == 1 && F->getReturnType() == RI.getOperand(0)->getType(),
3499 "Function return type does not match operand "
3500 "type of return inst!",
3501 &RI, F->getReturnType());
3502
3503 // Check to make sure that the return value has necessary properties for
3504 // terminators...
3505 visitTerminator(RI);
3506}
3507
3508void Verifier::visitSwitchInst(SwitchInst &SI) {
3509 Check(SI.getType()->isVoidTy(), "Switch must have void result type!", &SI);
3510 // Check to make sure that all of the constants in the switch instruction
3511 // have the same type as the switched-on value.
3512 Type *SwitchTy = SI.getCondition()->getType();
3513 SmallPtrSet<ConstantInt*, 32> Constants;
3514 for (auto &Case : SI.cases()) {
3515 Check(isa<ConstantInt>(Case.getCaseValue()),
3516 "Case value is not a constant integer.", &SI);
3517 Check(Case.getCaseValue()->getType() == SwitchTy,
3518 "Switch constants must all be same type as switch value!", &SI);
3519 Check(Constants.insert(Case.getCaseValue()).second,
3520 "Duplicate integer as switch case", &SI, Case.getCaseValue());
3521 }
3522
3523 visitTerminator(SI);
3524}
3525
3526void Verifier::visitIndirectBrInst(IndirectBrInst &BI) {
3528 "Indirectbr operand must have pointer type!", &BI);
3529 for (unsigned i = 0, e = BI.getNumDestinations(); i != e; ++i)
3531 "Indirectbr destinations must all have pointer type!", &BI);
3532
3533 visitTerminator(BI);
3534}
3535
3536void Verifier::visitCallBrInst(CallBrInst &CBI) {
3537 if (!CBI.isInlineAsm()) {
3539 "Callbr: indirect function / invalid signature");
3540 Check(!CBI.hasOperandBundles(),
3541 "Callbr for intrinsics currently doesn't support operand bundles");
3542
3543 switch (CBI.getIntrinsicID()) {
3544 case Intrinsic::amdgcn_kill: {
3545 Check(CBI.getNumIndirectDests() == 1,
3546 "Callbr amdgcn_kill only supports one indirect dest");
3547 bool Unreachable = isa<UnreachableInst>(CBI.getIndirectDest(0)->begin());
3548 CallInst *Call = dyn_cast<CallInst>(CBI.getIndirectDest(0)->begin());
3549 Check(Unreachable || (Call && Call->getIntrinsicID() ==
3550 Intrinsic::amdgcn_unreachable),
3551 "Callbr amdgcn_kill indirect dest needs to be unreachable");
3552 break;
3553 }
3554 default:
3555 CheckFailed(
3556 "Callbr currently only supports asm-goto and selected intrinsics");
3557 }
3558 visitIntrinsicCall(CBI.getIntrinsicID(), CBI);
3559 } else {
3560 const InlineAsm *IA = cast<InlineAsm>(CBI.getCalledOperand());
3561 Check(!IA->canThrow(), "Unwinding from Callbr is not allowed");
3562
3563 verifyInlineAsmCall(CBI);
3564 }
3565 visitTerminator(CBI);
3566}
3567
3568void Verifier::visitSelectInst(SelectInst &SI) {
3569 Check(!SelectInst::areInvalidOperands(SI.getOperand(0), SI.getOperand(1),
3570 SI.getOperand(2)),
3571 "Invalid operands for select instruction!", &SI);
3572
3573 Check(SI.getTrueValue()->getType() == SI.getType(),
3574 "Select values must have same type as select instruction!", &SI);
3575 visitInstruction(SI);
3576}
3577
3578/// visitUserOp1 - User defined operators shouldn't live beyond the lifetime of
3579/// a pass, if any exist, it's an error.
3580///
3581void Verifier::visitUserOp1(Instruction &I) {
3582 Check(false, "User-defined operators should not live outside of a pass!", &I);
3583}
3584
3585void Verifier::visitTruncInst(TruncInst &I) {
3586 // Get the source and destination types
3587 Type *SrcTy = I.getOperand(0)->getType();
3588 Type *DestTy = I.getType();
3589
3590 // Get the size of the types in bits, we'll need this later
3591 unsigned SrcBitSize = SrcTy->getScalarSizeInBits();
3592 unsigned DestBitSize = DestTy->getScalarSizeInBits();
3593
3594 Check(SrcTy->isIntOrIntVectorTy(), "Trunc only operates on integer", &I);
3595 Check(DestTy->isIntOrIntVectorTy(), "Trunc only produces integer", &I);
3596 Check(SrcTy->isVectorTy() == DestTy->isVectorTy(),
3597 "trunc source and destination must both be a vector or neither", &I);
3598 Check(SrcBitSize > DestBitSize, "DestTy too big for Trunc", &I);
3599
3600 visitInstruction(I);
3601}
3602
3603void Verifier::visitZExtInst(ZExtInst &I) {
3604 // Get the source and destination types
3605 Type *SrcTy = I.getOperand(0)->getType();
3606 Type *DestTy = I.getType();
3607
3608 // Get the size of the types in bits, we'll need this later
3609 Check(SrcTy->isIntOrIntVectorTy(), "ZExt only operates on integer", &I);
3610 Check(DestTy->isIntOrIntVectorTy(), "ZExt only produces an integer", &I);
3611 Check(SrcTy->isVectorTy() == DestTy->isVectorTy(),
3612 "zext source and destination must both be a vector or neither", &I);
3613 unsigned SrcBitSize = SrcTy->getScalarSizeInBits();
3614 unsigned DestBitSize = DestTy->getScalarSizeInBits();
3615
3616 Check(SrcBitSize < DestBitSize, "Type too small for ZExt", &I);
3617
3618 visitInstruction(I);
3619}
3620
3621void Verifier::visitSExtInst(SExtInst &I) {
3622 // Get the source and destination types
3623 Type *SrcTy = I.getOperand(0)->getType();
3624 Type *DestTy = I.getType();
3625
3626 // Get the size of the types in bits, we'll need this later
3627 unsigned SrcBitSize = SrcTy->getScalarSizeInBits();
3628 unsigned DestBitSize = DestTy->getScalarSizeInBits();
3629
3630 Check(SrcTy->isIntOrIntVectorTy(), "SExt only operates on integer", &I);
3631 Check(DestTy->isIntOrIntVectorTy(), "SExt only produces an integer", &I);
3632 Check(SrcTy->isVectorTy() == DestTy->isVectorTy(),
3633 "sext source and destination must both be a vector or neither", &I);
3634 Check(SrcBitSize < DestBitSize, "Type too small for SExt", &I);
3635
3636 visitInstruction(I);
3637}
3638
3639void Verifier::visitFPTruncInst(FPTruncInst &I) {
3640 // Get the source and destination types
3641 Type *SrcTy = I.getOperand(0)->getType();
3642 Type *DestTy = I.getType();
3643 // Get the size of the types in bits, we'll need this later
3644 unsigned SrcBitSize = SrcTy->getScalarSizeInBits();
3645 unsigned DestBitSize = DestTy->getScalarSizeInBits();
3646
3647 Check(SrcTy->isFPOrFPVectorTy(), "FPTrunc only operates on FP", &I);
3648 Check(DestTy->isFPOrFPVectorTy(), "FPTrunc only produces an FP", &I);
3649 Check(SrcTy->isVectorTy() == DestTy->isVectorTy(),
3650 "fptrunc source and destination must both be a vector or neither", &I);
3651 Check(SrcBitSize > DestBitSize, "DestTy too big for FPTrunc", &I);
3652
3653 visitInstruction(I);
3654}
3655
3656void Verifier::visitFPExtInst(FPExtInst &I) {
3657 // Get the source and destination types
3658 Type *SrcTy = I.getOperand(0)->getType();
3659 Type *DestTy = I.getType();
3660
3661 // Get the size of the types in bits, we'll need this later
3662 unsigned SrcBitSize = SrcTy->getScalarSizeInBits();
3663 unsigned DestBitSize = DestTy->getScalarSizeInBits();
3664
3665 Check(SrcTy->isFPOrFPVectorTy(), "FPExt only operates on FP", &I);
3666 Check(DestTy->isFPOrFPVectorTy(), "FPExt only produces an FP", &I);
3667 Check(SrcTy->isVectorTy() == DestTy->isVectorTy(),
3668 "fpext source and destination must both be a vector or neither", &I);
3669 Check(SrcBitSize < DestBitSize, "DestTy too small for FPExt", &I);
3670
3671 visitInstruction(I);
3672}
3673
3674void Verifier::visitUIToFPInst(UIToFPInst &I) {
3675 // Get the source and destination types
3676 Type *SrcTy = I.getOperand(0)->getType();
3677 Type *DestTy = I.getType();
3678
3679 bool SrcVec = SrcTy->isVectorTy();
3680 bool DstVec = DestTy->isVectorTy();
3681
3682 Check(SrcVec == DstVec,
3683 "UIToFP source and dest must both be vector or scalar", &I);
3684 Check(SrcTy->isIntOrIntVectorTy(),
3685 "UIToFP source must be integer or integer vector", &I);
3686 Check(DestTy->isFPOrFPVectorTy(), "UIToFP result must be FP or FP vector",
3687 &I);
3688
3689 if (SrcVec && DstVec)
3690 Check(cast<VectorType>(SrcTy)->getElementCount() ==
3691 cast<VectorType>(DestTy)->getElementCount(),
3692 "UIToFP source and dest vector length mismatch", &I);
3693
3694 visitInstruction(I);
3695}
3696
3697void Verifier::visitSIToFPInst(SIToFPInst &I) {
3698 // Get the source and destination types
3699 Type *SrcTy = I.getOperand(0)->getType();
3700 Type *DestTy = I.getType();
3701
3702 bool SrcVec = SrcTy->isVectorTy();
3703 bool DstVec = DestTy->isVectorTy();
3704
3705 Check(SrcVec == DstVec,
3706 "SIToFP source and dest must both be vector or scalar", &I);
3707 Check(SrcTy->isIntOrIntVectorTy(),
3708 "SIToFP source must be integer or integer vector", &I);
3709 Check(DestTy->isFPOrFPVectorTy(), "SIToFP result must be FP or FP vector",
3710 &I);
3711
3712 if (SrcVec && DstVec)
3713 Check(cast<VectorType>(SrcTy)->getElementCount() ==
3714 cast<VectorType>(DestTy)->getElementCount(),
3715 "SIToFP source and dest vector length mismatch", &I);
3716
3717 visitInstruction(I);
3718}
3719
3720void Verifier::visitFPToUIInst(FPToUIInst &I) {
3721 // Get the source and destination types
3722 Type *SrcTy = I.getOperand(0)->getType();
3723 Type *DestTy = I.getType();
3724
3725 bool SrcVec = SrcTy->isVectorTy();
3726 bool DstVec = DestTy->isVectorTy();
3727
3728 Check(SrcVec == DstVec,
3729 "FPToUI source and dest must both be vector or scalar", &I);
3730 Check(SrcTy->isFPOrFPVectorTy(), "FPToUI source must be FP or FP vector", &I);
3731 Check(DestTy->isIntOrIntVectorTy(),
3732 "FPToUI result must be integer or integer vector", &I);
3733
3734 if (SrcVec && DstVec)
3735 Check(cast<VectorType>(SrcTy)->getElementCount() ==
3736 cast<VectorType>(DestTy)->getElementCount(),
3737 "FPToUI source and dest vector length mismatch", &I);
3738
3739 visitInstruction(I);
3740}
3741
3742void Verifier::visitFPToSIInst(FPToSIInst &I) {
3743 // Get the source and destination types
3744 Type *SrcTy = I.getOperand(0)->getType();
3745 Type *DestTy = I.getType();
3746
3747 bool SrcVec = SrcTy->isVectorTy();
3748 bool DstVec = DestTy->isVectorTy();
3749
3750 Check(SrcVec == DstVec,
3751 "FPToSI source and dest must both be vector or scalar", &I);
3752 Check(SrcTy->isFPOrFPVectorTy(), "FPToSI source must be FP or FP vector", &I);
3753 Check(DestTy->isIntOrIntVectorTy(),
3754 "FPToSI result must be integer or integer vector", &I);
3755
3756 if (SrcVec && DstVec)
3757 Check(cast<VectorType>(SrcTy)->getElementCount() ==
3758 cast<VectorType>(DestTy)->getElementCount(),
3759 "FPToSI source and dest vector length mismatch", &I);
3760
3761 visitInstruction(I);
3762}
3763
3764void Verifier::checkPtrToAddr(Type *SrcTy, Type *DestTy, const Value &V) {
3765 Check(SrcTy->isPtrOrPtrVectorTy(), "PtrToAddr source must be pointer", V);
3766 Check(DestTy->isIntOrIntVectorTy(), "PtrToAddr result must be integral", V);
3767 Check(SrcTy->isVectorTy() == DestTy->isVectorTy(), "PtrToAddr type mismatch",
3768 V);
3769
3770 if (SrcTy->isVectorTy()) {
3771 auto *VSrc = cast<VectorType>(SrcTy);
3772 auto *VDest = cast<VectorType>(DestTy);
3773 Check(VSrc->getElementCount() == VDest->getElementCount(),
3774 "PtrToAddr vector length mismatch", V);
3775 }
3776
3777 Type *AddrTy = DL.getAddressType(SrcTy);
3778 Check(AddrTy == DestTy, "PtrToAddr result must be address width", V);
3779}
3780
3781void Verifier::visitPtrToAddrInst(PtrToAddrInst &I) {
3782 checkPtrToAddr(I.getOperand(0)->getType(), I.getType(), I);
3783 visitInstruction(I);
3784}
3785
3786void Verifier::visitPtrToIntInst(PtrToIntInst &I) {
3787 // Get the source and destination types
3788 Type *SrcTy = I.getOperand(0)->getType();
3789 Type *DestTy = I.getType();
3790
3791 Check(SrcTy->isPtrOrPtrVectorTy(), "PtrToInt source must be pointer", &I);
3792
3793 Check(DestTy->isIntOrIntVectorTy(), "PtrToInt result must be integral", &I);
3794 Check(SrcTy->isVectorTy() == DestTy->isVectorTy(), "PtrToInt type mismatch",
3795 &I);
3796
3797 if (SrcTy->isVectorTy()) {
3798 auto *VSrc = cast<VectorType>(SrcTy);
3799 auto *VDest = cast<VectorType>(DestTy);
3800 Check(VSrc->getElementCount() == VDest->getElementCount(),
3801 "PtrToInt Vector length mismatch", &I);
3802 }
3803
3804 visitInstruction(I);
3805}
3806
3807void Verifier::visitIntToPtrInst(IntToPtrInst &I) {
3808 // Get the source and destination types
3809 Type *SrcTy = I.getOperand(0)->getType();
3810 Type *DestTy = I.getType();
3811
3812 Check(SrcTy->isIntOrIntVectorTy(), "IntToPtr source must be an integral", &I);
3813 Check(DestTy->isPtrOrPtrVectorTy(), "IntToPtr result must be a pointer", &I);
3814
3815 Check(SrcTy->isVectorTy() == DestTy->isVectorTy(), "IntToPtr type mismatch",
3816 &I);
3817 if (SrcTy->isVectorTy()) {
3818 auto *VSrc = cast<VectorType>(SrcTy);
3819 auto *VDest = cast<VectorType>(DestTy);
3820 Check(VSrc->getElementCount() == VDest->getElementCount(),
3821 "IntToPtr Vector length mismatch", &I);
3822 }
3823 visitInstruction(I);
3824}
3825
3826void Verifier::visitBitCastInst(BitCastInst &I) {
3827 Check(
3828 CastInst::castIsValid(Instruction::BitCast, I.getOperand(0), I.getType()),
3829 "Invalid bitcast", &I);
3830 visitInstruction(I);
3831}
3832
3833void Verifier::visitAddrSpaceCastInst(AddrSpaceCastInst &I) {
3834 Type *SrcTy = I.getOperand(0)->getType();
3835 Type *DestTy = I.getType();
3836
3837 Check(SrcTy->isPtrOrPtrVectorTy(), "AddrSpaceCast source must be a pointer",
3838 &I);
3839 Check(DestTy->isPtrOrPtrVectorTy(), "AddrSpaceCast result must be a pointer",
3840 &I);
3842 "AddrSpaceCast must be between different address spaces", &I);
3843 if (auto *SrcVTy = dyn_cast<VectorType>(SrcTy))
3844 Check(SrcVTy->getElementCount() ==
3845 cast<VectorType>(DestTy)->getElementCount(),
3846 "AddrSpaceCast vector pointer number of elements mismatch", &I);
3847 visitInstruction(I);
3848}
3849
3850/// visitPHINode - Ensure that a PHI node is well formed.
3851///
3852void Verifier::visitPHINode(PHINode &PN) {
3853 // Ensure that the PHI nodes are all grouped together at the top of the block.
3854 // This can be tested by checking whether the instruction before this is
3855 // either nonexistent (because this is begin()) or is a PHI node. If not,
3856 // then there is some other instruction before a PHI.
3857 Check(&PN == &PN.getParent()->front() ||
3859 "PHI nodes not grouped at top of basic block!", &PN, PN.getParent());
3860
3861 // Check that a PHI doesn't yield a Token.
3862 Check(!PN.getType()->isTokenLikeTy(), "PHI nodes cannot have token type!");
3863
3864 // Check that all of the values of the PHI node have the same type as the
3865 // result.
3866 for (Value *IncValue : PN.incoming_values()) {
3867 Check(PN.getType() == IncValue->getType(),
3868 "PHI node operands are not the same type as the result!", &PN);
3869 }
3870
3871 // All other PHI node constraints are checked in the visitBasicBlock method.
3872
3873 visitInstruction(PN);
3874}
3875
3876void Verifier::visitCallBase(CallBase &Call) {
3878 "Called function must be a pointer!", Call);
3879 FunctionType *FTy = Call.getFunctionType();
3880
3881 // Verify that the correct number of arguments are being passed
3882 if (FTy->isVarArg())
3883 Check(Call.arg_size() >= FTy->getNumParams(),
3884 "Called function requires more parameters than were provided!", Call);
3885 else
3886 Check(Call.arg_size() == FTy->getNumParams(),
3887 "Incorrect number of arguments passed to called function!", Call);
3888
3889 // Verify that all arguments to the call match the function type.
3890 for (unsigned i = 0, e = FTy->getNumParams(); i != e; ++i)
3891 Check(Call.getArgOperand(i)->getType() == FTy->getParamType(i),
3892 "Call parameter type does not match function signature!",
3893 Call.getArgOperand(i), FTy->getParamType(i), Call);
3894
3895 AttributeList Attrs = Call.getAttributes();
3896
3897 Check(verifyAttributeCount(Attrs, Call.arg_size()),
3898 "Attribute after last parameter!", Call);
3899
3900 Function *Callee =
3902 bool IsIntrinsic = Callee && Callee->isIntrinsic();
3903 if (IsIntrinsic)
3904 Check(Callee->getFunctionType() == FTy,
3905 "Intrinsic called with incompatible signature", Call);
3906
3907 // Verify if the calling convention of the callee is callable.
3909 "calling convention does not permit calls", Call);
3910
3911 // Disallow passing/returning values with alignment higher than we can
3912 // represent.
3913 // FIXME: Consider making DataLayout cap the alignment, so this isn't
3914 // necessary.
3915 auto VerifyTypeAlign = [&](Type *Ty, const Twine &Message) {
3916 if (!Ty->isSized())
3917 return;
3918 Align ABIAlign = DL.getABITypeAlign(Ty);
3919 Check(ABIAlign.value() <= Value::MaximumAlignment,
3920 "Incorrect alignment of " + Message + " to called function!", Call);
3921 };
3922
3923 if (!IsIntrinsic) {
3924 VerifyTypeAlign(FTy->getReturnType(), "return type");
3925 for (unsigned i = 0, e = FTy->getNumParams(); i != e; ++i) {
3926 Type *Ty = FTy->getParamType(i);
3927 VerifyTypeAlign(Ty, "argument passed");
3928 }
3929 }
3930
3931 if (Attrs.hasFnAttr(Attribute::Speculatable)) {
3932 // Don't allow speculatable on call sites, unless the underlying function
3933 // declaration is also speculatable.
3934 Check(Callee && Callee->isSpeculatable(),
3935 "speculatable attribute may not apply to call sites", Call);
3936 }
3937
3938 if (Attrs.hasFnAttr(Attribute::Preallocated)) {
3939 Check(Call.getIntrinsicID() == Intrinsic::call_preallocated_arg,
3940 "preallocated as a call site attribute can only be on "
3941 "llvm.call.preallocated.arg");
3942 }
3943
3944 Check(!Attrs.hasFnAttr(Attribute::DenormalFPEnv),
3945 "denormal_fpenv attribute may not apply to call sites", Call);
3946
3947 // Verify call attributes.
3948 verifyFunctionAttrs(FTy, Attrs, &Call, IsIntrinsic, Call.isInlineAsm());
3949
3950 // Conservatively check the inalloca argument.
3951 // We have a bug if we can find that there is an underlying alloca without
3952 // inalloca.
3953 if (Call.hasInAllocaArgument()) {
3954 Value *InAllocaArg = Call.getArgOperand(FTy->getNumParams() - 1);
3955 if (auto AI = dyn_cast<AllocaInst>(InAllocaArg->stripInBoundsOffsets()))
3956 Check(AI->isUsedWithInAlloca(),
3957 "inalloca argument for call has mismatched alloca", AI, Call);
3958 }
3959
3960 // For each argument of the callsite, if it has the swifterror argument,
3961 // make sure the underlying alloca/parameter it comes from has a swifterror as
3962 // well.
3963 for (unsigned i = 0, e = FTy->getNumParams(); i != e; ++i) {
3964 if (Call.paramHasAttr(i, Attribute::SwiftError)) {
3965 Value *SwiftErrorArg = Call.getArgOperand(i);
3966 if (auto AI = dyn_cast<AllocaInst>(SwiftErrorArg->stripInBoundsOffsets())) {
3967 Check(AI->isSwiftError(),
3968 "swifterror argument for call has mismatched alloca", AI, Call);
3969 continue;
3970 }
3971 auto ArgI = dyn_cast<Argument>(SwiftErrorArg);
3972 Check(ArgI, "swifterror argument should come from an alloca or parameter",
3973 SwiftErrorArg, Call);
3974 Check(ArgI->hasSwiftErrorAttr(),
3975 "swifterror argument for call has mismatched parameter", ArgI,
3976 Call);
3977 }
3978
3979 if (Attrs.hasParamAttr(i, Attribute::ImmArg)) {
3980 // Don't allow immarg on call sites, unless the underlying declaration
3981 // also has the matching immarg.
3982 Check(Callee && Callee->hasParamAttribute(i, Attribute::ImmArg),
3983 "immarg may not apply only to call sites", Call.getArgOperand(i),
3984 Call);
3985 }
3986
3987 if (Call.paramHasAttr(i, Attribute::ImmArg)) {
3988 Value *ArgVal = Call.getArgOperand(i);
3989 Check(isa<ConstantInt>(ArgVal) || isa<ConstantFP>(ArgVal),
3990 "immarg operand has non-immediate parameter", ArgVal, Call);
3991
3992 // If the imm-arg is an integer and also has a range attached,
3993 // check if the given value is within the range.
3994 if (Call.paramHasAttr(i, Attribute::Range)) {
3995 if (auto *CI = dyn_cast<ConstantInt>(ArgVal)) {
3996 const ConstantRange &CR =
3997 Call.getParamAttr(i, Attribute::Range).getValueAsConstantRange();
3998 Check(CR.contains(CI->getValue()),
3999 "immarg value " + Twine(CI->getValue().getSExtValue()) +
4000 " out of range [" + Twine(CR.getLower().getSExtValue()) +
4001 ", " + Twine(CR.getUpper().getSExtValue()) + ")",
4002 Call);
4003 }
4004 }
4005 }
4006
4007 if (Call.paramHasAttr(i, Attribute::Preallocated)) {
4008 Value *ArgVal = Call.getArgOperand(i);
4009 bool hasOB =
4011 bool isMustTail = Call.isMustTailCall();
4012 Check(hasOB != isMustTail,
4013 "preallocated operand either requires a preallocated bundle or "
4014 "the call to be musttail (but not both)",
4015 ArgVal, Call);
4016 }
4017 }
4018
4019 if (FTy->isVarArg()) {
4020 // FIXME? is 'nest' even legal here?
4021 bool SawNest = false;
4022 bool SawReturned = false;
4023
4024 for (unsigned Idx = 0; Idx < FTy->getNumParams(); ++Idx) {
4025 if (Attrs.hasParamAttr(Idx, Attribute::Nest))
4026 SawNest = true;
4027 if (Attrs.hasParamAttr(Idx, Attribute::Returned))
4028 SawReturned = true;
4029 }
4030
4031 // Check attributes on the varargs part.
4032 for (unsigned Idx = FTy->getNumParams(); Idx < Call.arg_size(); ++Idx) {
4033 Type *Ty = Call.getArgOperand(Idx)->getType();
4034 AttributeSet ArgAttrs = Attrs.getParamAttrs(Idx);
4035 verifyParameterAttrs(ArgAttrs, Ty, &Call);
4036
4037 if (ArgAttrs.hasAttribute(Attribute::Nest)) {
4038 Check(!SawNest, "More than one parameter has attribute nest!", Call);
4039 SawNest = true;
4040 }
4041
4042 if (ArgAttrs.hasAttribute(Attribute::Returned)) {
4043 Check(!SawReturned, "More than one parameter has attribute returned!",
4044 Call);
4045 Check(Ty->canLosslesslyBitCastTo(FTy->getReturnType()),
4046 "Incompatible argument and return types for 'returned' "
4047 "attribute",
4048 Call);
4049 SawReturned = true;
4050 }
4051
4052 // Statepoint intrinsic is vararg but the wrapped function may be not.
4053 // Allow sret here and check the wrapped function in verifyStatepoint.
4054 if (Call.getIntrinsicID() != Intrinsic::experimental_gc_statepoint)
4055 Check(!ArgAttrs.hasAttribute(Attribute::StructRet),
4056 "Attribute 'sret' cannot be used for vararg call arguments!",
4057 Call);
4058
4059 if (ArgAttrs.hasAttribute(Attribute::InAlloca))
4060 Check(Idx == Call.arg_size() - 1,
4061 "inalloca isn't on the last argument!", Call);
4062 }
4063 }
4064
4065 // Verify that there's no metadata unless it's a direct call to an intrinsic.
4066 if (!IsIntrinsic) {
4067 for (Type *ParamTy : FTy->params()) {
4068 Check(!ParamTy->isMetadataTy(),
4069 "Function has metadata parameter but isn't an intrinsic", Call);
4070 Check(!ParamTy->isTokenLikeTy(),
4071 "Function has token parameter but isn't an intrinsic", Call);
4072 }
4073 }
4074
4075 // Verify that indirect calls don't return tokens.
4076 if (!Call.getCalledFunction()) {
4077 Check(!FTy->getReturnType()->isTokenLikeTy(),
4078 "Return type cannot be token for indirect call!");
4079 Check(!FTy->getReturnType()->isX86_AMXTy(),
4080 "Return type cannot be x86_amx for indirect call!");
4081 }
4082
4084 visitIntrinsicCall(ID, Call);
4085
4086 // Verify that a callsite has at most one "deopt", at most one "funclet", at
4087 // most one "gc-transition", at most one "cfguardtarget", at most one
4088 // "preallocated" operand bundle, and at most one "ptrauth" operand bundle.
4089 bool FoundDeoptBundle = false, FoundFuncletBundle = false,
4090 FoundGCTransitionBundle = false, FoundCFGuardTargetBundle = false,
4091 FoundPreallocatedBundle = false, FoundGCLiveBundle = false,
4092 FoundPtrauthBundle = false, FoundKCFIBundle = false,
4093 FoundAttachedCallBundle = false;
4094 for (unsigned i = 0, e = Call.getNumOperandBundles(); i < e; ++i) {
4095 OperandBundleUse BU = Call.getOperandBundleAt(i);
4096 uint32_t Tag = BU.getTagID();
4097 if (Tag == LLVMContext::OB_deopt) {
4098 Check(!FoundDeoptBundle, "Multiple deopt operand bundles", Call);
4099 FoundDeoptBundle = true;
4100 } else if (Tag == LLVMContext::OB_gc_transition) {
4101 Check(!FoundGCTransitionBundle, "Multiple gc-transition operand bundles",
4102 Call);
4103 FoundGCTransitionBundle = true;
4104 } else if (Tag == LLVMContext::OB_funclet) {
4105 Check(!FoundFuncletBundle, "Multiple funclet operand bundles", Call);
4106 FoundFuncletBundle = true;
4107 Check(BU.Inputs.size() == 1,
4108 "Expected exactly one funclet bundle operand", Call);
4109 Check(isa<FuncletPadInst>(BU.Inputs.front()),
4110 "Funclet bundle operands should correspond to a FuncletPadInst",
4111 Call);
4112 } else if (Tag == LLVMContext::OB_cfguardtarget) {
4113 Check(!FoundCFGuardTargetBundle, "Multiple CFGuardTarget operand bundles",
4114 Call);
4115 FoundCFGuardTargetBundle = true;
4116 Check(BU.Inputs.size() == 1,
4117 "Expected exactly one cfguardtarget bundle operand", Call);
4118 } else if (Tag == LLVMContext::OB_ptrauth) {
4119 Check(!FoundPtrauthBundle, "Multiple ptrauth operand bundles", Call);
4120 FoundPtrauthBundle = true;
4121 Check(BU.Inputs.size() == 2,
4122 "Expected exactly two ptrauth bundle operands", Call);
4123 Check(isa<ConstantInt>(BU.Inputs[0]) &&
4124 BU.Inputs[0]->getType()->isIntegerTy(32),
4125 "Ptrauth bundle key operand must be an i32 constant", Call);
4126 Check(BU.Inputs[1]->getType()->isIntegerTy(64),
4127 "Ptrauth bundle discriminator operand must be an i64", Call);
4128 } else if (Tag == LLVMContext::OB_kcfi) {
4129 Check(!FoundKCFIBundle, "Multiple kcfi operand bundles", Call);
4130 FoundKCFIBundle = true;
4131 Check(BU.Inputs.size() == 1, "Expected exactly one kcfi bundle operand",
4132 Call);
4133 Check(isa<ConstantInt>(BU.Inputs[0]) &&
4134 BU.Inputs[0]->getType()->isIntegerTy(32),
4135 "Kcfi bundle operand must be an i32 constant", Call);
4136 } else if (Tag == LLVMContext::OB_preallocated) {
4137 Check(!FoundPreallocatedBundle, "Multiple preallocated operand bundles",
4138 Call);
4139 FoundPreallocatedBundle = true;
4140 Check(BU.Inputs.size() == 1,
4141 "Expected exactly one preallocated bundle operand", Call);
4142 auto Input = dyn_cast<IntrinsicInst>(BU.Inputs.front());
4143 Check(Input &&
4144 Input->getIntrinsicID() == Intrinsic::call_preallocated_setup,
4145 "\"preallocated\" argument must be a token from "
4146 "llvm.call.preallocated.setup",
4147 Call);
4148 } else if (Tag == LLVMContext::OB_gc_live) {
4149 Check(!FoundGCLiveBundle, "Multiple gc-live operand bundles", Call);
4150 FoundGCLiveBundle = true;
4152 Check(!FoundAttachedCallBundle,
4153 "Multiple \"clang.arc.attachedcall\" operand bundles", Call);
4154 FoundAttachedCallBundle = true;
4155 verifyAttachedCallBundle(Call, BU);
4156 }
4157 }
4158
4159 // Verify that callee and callsite agree on whether to use pointer auth.
4160 Check(!(Call.getCalledFunction() && FoundPtrauthBundle),
4161 "Direct call cannot have a ptrauth bundle", Call);
4162
4163 // Verify that each inlinable callsite of a debug-info-bearing function in a
4164 // debug-info-bearing function has a debug location attached to it. Failure to
4165 // do so causes assertion failures when the inliner sets up inline scope info
4166 // (Interposable functions are not inlinable, neither are functions without
4167 // definitions.)
4173 "inlinable function call in a function with "
4174 "debug info must have a !dbg location",
4175 Call);
4176
4177 if (Call.isInlineAsm())
4178 verifyInlineAsmCall(Call);
4179
4180 ConvergenceVerifyHelper.visit(Call);
4181
4182 visitInstruction(Call);
4183}
4184
4185void Verifier::verifyTailCCMustTailAttrs(const AttrBuilder &Attrs,
4186 StringRef Context) {
4187 Check(!Attrs.contains(Attribute::InAlloca),
4188 Twine("inalloca attribute not allowed in ") + Context);
4189 Check(!Attrs.contains(Attribute::InReg),
4190 Twine("inreg attribute not allowed in ") + Context);
4191 Check(!Attrs.contains(Attribute::SwiftError),
4192 Twine("swifterror attribute not allowed in ") + Context);
4193 Check(!Attrs.contains(Attribute::Preallocated),
4194 Twine("preallocated attribute not allowed in ") + Context);
4195 Check(!Attrs.contains(Attribute::ByRef),
4196 Twine("byref attribute not allowed in ") + Context);
4197}
4198
4199/// Two types are "congruent" if they are identical, or if they are both pointer
4200/// types with different pointee types and the same address space.
4201static bool isTypeCongruent(Type *L, Type *R) {
4202 if (L == R)
4203 return true;
4206 if (!PL || !PR)
4207 return false;
4208 return PL->getAddressSpace() == PR->getAddressSpace();
4209}
4210
4211static AttrBuilder getParameterABIAttributes(LLVMContext& C, unsigned I, AttributeList Attrs) {
4212 static const Attribute::AttrKind ABIAttrs[] = {
4213 Attribute::StructRet, Attribute::ByVal, Attribute::InAlloca,
4214 Attribute::InReg, Attribute::StackAlignment, Attribute::SwiftSelf,
4215 Attribute::SwiftAsync, Attribute::SwiftError, Attribute::Preallocated,
4216 Attribute::ByRef};
4217 AttrBuilder Copy(C);
4218 for (auto AK : ABIAttrs) {
4219 Attribute Attr = Attrs.getParamAttrs(I).getAttribute(AK);
4220 if (Attr.isValid())
4221 Copy.addAttribute(Attr);
4222 }
4223
4224 // `align` is ABI-affecting only in combination with `byval` or `byref`.
4225 if (Attrs.hasParamAttr(I, Attribute::Alignment) &&
4226 (Attrs.hasParamAttr(I, Attribute::ByVal) ||
4227 Attrs.hasParamAttr(I, Attribute::ByRef)))
4228 Copy.addAlignmentAttr(Attrs.getParamAlignment(I));
4229 return Copy;
4230}
4231
4232void Verifier::verifyMustTailCall(CallInst &CI) {
4233 Check(!CI.isInlineAsm(), "cannot use musttail call with inline asm", &CI);
4234
4235 Function *F = CI.getParent()->getParent();
4236 FunctionType *CallerTy = F->getFunctionType();
4237 FunctionType *CalleeTy = CI.getFunctionType();
4238 Check(CallerTy->isVarArg() == CalleeTy->isVarArg(),
4239 "cannot guarantee tail call due to mismatched varargs", &CI);
4240 Check(isTypeCongruent(CallerTy->getReturnType(), CalleeTy->getReturnType()),
4241 "cannot guarantee tail call due to mismatched return types", &CI);
4242
4243 // - The calling conventions of the caller and callee must match.
4244 Check(F->getCallingConv() == CI.getCallingConv(),
4245 "cannot guarantee tail call due to mismatched calling conv", &CI);
4246
4247 // - The call must immediately precede a :ref:`ret <i_ret>` instruction,
4248 // or a pointer bitcast followed by a ret instruction.
4249 // - The ret instruction must return the (possibly bitcasted) value
4250 // produced by the call or void.
4251 Value *RetVal = &CI;
4253
4254 // Handle the optional bitcast.
4255 if (BitCastInst *BI = dyn_cast_or_null<BitCastInst>(Next)) {
4256 Check(BI->getOperand(0) == RetVal,
4257 "bitcast following musttail call must use the call", BI);
4258 RetVal = BI;
4259 Next = BI->getNextNode();
4260 }
4261
4262 // Check the return.
4263 ReturnInst *Ret = dyn_cast_or_null<ReturnInst>(Next);
4264 Check(Ret, "musttail call must precede a ret with an optional bitcast", &CI);
4265 Check(!Ret->getReturnValue() || Ret->getReturnValue() == RetVal ||
4267 "musttail call result must be returned", Ret);
4268
4269 AttributeList CallerAttrs = F->getAttributes();
4270 AttributeList CalleeAttrs = CI.getAttributes();
4271 if (CI.getCallingConv() == CallingConv::SwiftTail ||
4272 CI.getCallingConv() == CallingConv::Tail) {
4273 StringRef CCName =
4274 CI.getCallingConv() == CallingConv::Tail ? "tailcc" : "swifttailcc";
4275
4276 // - Only sret, byval, swiftself, and swiftasync ABI-impacting attributes
4277 // are allowed in swifttailcc call
4278 for (unsigned I = 0, E = CallerTy->getNumParams(); I != E; ++I) {
4279 AttrBuilder ABIAttrs = getParameterABIAttributes(F->getContext(), I, CallerAttrs);
4280 SmallString<32> Context{CCName, StringRef(" musttail caller")};
4281 verifyTailCCMustTailAttrs(ABIAttrs, Context);
4282 }
4283 for (unsigned I = 0, E = CalleeTy->getNumParams(); I != E; ++I) {
4284 AttrBuilder ABIAttrs = getParameterABIAttributes(F->getContext(), I, CalleeAttrs);
4285 SmallString<32> Context{CCName, StringRef(" musttail callee")};
4286 verifyTailCCMustTailAttrs(ABIAttrs, Context);
4287 }
4288 // - Varargs functions are not allowed
4289 Check(!CallerTy->isVarArg(), Twine("cannot guarantee ") + CCName +
4290 " tail call for varargs function");
4291 return;
4292 }
4293
4294 // - The caller and callee prototypes must match. Pointer types of
4295 // parameters or return types may differ in pointee type, but not
4296 // address space.
4297 if (!CI.getIntrinsicID()) {
4298 Check(CallerTy->getNumParams() == CalleeTy->getNumParams(),
4299 "cannot guarantee tail call due to mismatched parameter counts", &CI);
4300 for (unsigned I = 0, E = CallerTy->getNumParams(); I != E; ++I) {
4301 Check(
4302 isTypeCongruent(CallerTy->getParamType(I), CalleeTy->getParamType(I)),
4303 "cannot guarantee tail call due to mismatched parameter types", &CI);
4304 }
4305 }
4306
4307 // - All ABI-impacting function attributes, such as sret, byval, inreg,
4308 // returned, preallocated, and inalloca, must match.
4309 for (unsigned I = 0, E = CallerTy->getNumParams(); I != E; ++I) {
4310 AttrBuilder CallerABIAttrs = getParameterABIAttributes(F->getContext(), I, CallerAttrs);
4311 AttrBuilder CalleeABIAttrs = getParameterABIAttributes(F->getContext(), I, CalleeAttrs);
4312 Check(CallerABIAttrs == CalleeABIAttrs,
4313 "cannot guarantee tail call due to mismatched ABI impacting "
4314 "function attributes",
4315 &CI, CI.getOperand(I));
4316 }
4317}
4318
4319void Verifier::visitCallInst(CallInst &CI) {
4320 visitCallBase(CI);
4321
4322 if (CI.isMustTailCall())
4323 verifyMustTailCall(CI);
4324}
4325
4326void Verifier::visitInvokeInst(InvokeInst &II) {
4327 visitCallBase(II);
4328
4329 // Verify that the first non-PHI instruction of the unwind destination is an
4330 // exception handling instruction.
4331 Check(
4332 II.getUnwindDest()->isEHPad(),
4333 "The unwind destination does not have an exception handling instruction!",
4334 &II);
4335
4336 visitTerminator(II);
4337}
4338
4339/// visitUnaryOperator - Check the argument to the unary operator.
4340///
4341void Verifier::visitUnaryOperator(UnaryOperator &U) {
4342 Check(U.getType() == U.getOperand(0)->getType(),
4343 "Unary operators must have same type for"
4344 "operands and result!",
4345 &U);
4346
4347 switch (U.getOpcode()) {
4348 // Check that floating-point arithmetic operators are only used with
4349 // floating-point operands.
4350 case Instruction::FNeg:
4351 Check(U.getType()->isFPOrFPVectorTy(),
4352 "FNeg operator only works with float types!", &U);
4353 break;
4354 default:
4355 llvm_unreachable("Unknown UnaryOperator opcode!");
4356 }
4357
4358 visitInstruction(U);
4359}
4360
4361/// visitBinaryOperator - Check that both arguments to the binary operator are
4362/// of the same type!
4363///
4364void Verifier::visitBinaryOperator(BinaryOperator &B) {
4365 Check(B.getOperand(0)->getType() == B.getOperand(1)->getType(),
4366 "Both operands to a binary operator are not of the same type!", &B);
4367
4368 switch (B.getOpcode()) {
4369 // Check that integer arithmetic operators are only used with
4370 // integral operands.
4371 case Instruction::Add:
4372 case Instruction::Sub:
4373 case Instruction::Mul:
4374 case Instruction::SDiv:
4375 case Instruction::UDiv:
4376 case Instruction::SRem:
4377 case Instruction::URem:
4378 Check(B.getType()->isIntOrIntVectorTy(),
4379 "Integer arithmetic operators only work with integral types!", &B);
4380 Check(B.getType() == B.getOperand(0)->getType(),
4381 "Integer arithmetic operators must have same type "
4382 "for operands and result!",
4383 &B);
4384 break;
4385 // Check that floating-point arithmetic operators are only used with
4386 // floating-point operands.
4387 case Instruction::FAdd:
4388 case Instruction::FSub:
4389 case Instruction::FMul:
4390 case Instruction::FDiv:
4391 case Instruction::FRem:
4392 Check(B.getType()->isFPOrFPVectorTy(),
4393 "Floating-point arithmetic operators only work with "
4394 "floating-point types!",
4395 &B);
4396 Check(B.getType() == B.getOperand(0)->getType(),
4397 "Floating-point arithmetic operators must have same type "
4398 "for operands and result!",
4399 &B);
4400 break;
4401 // Check that logical operators are only used with integral operands.
4402 case Instruction::And:
4403 case Instruction::Or:
4404 case Instruction::Xor:
4405 Check(B.getType()->isIntOrIntVectorTy(),
4406 "Logical operators only work with integral types!", &B);
4407 Check(B.getType() == B.getOperand(0)->getType(),
4408 "Logical operators must have same type for operands and result!", &B);
4409 break;
4410 case Instruction::Shl:
4411 case Instruction::LShr:
4412 case Instruction::AShr:
4413 Check(B.getType()->isIntOrIntVectorTy(),
4414 "Shifts only work with integral types!", &B);
4415 Check(B.getType() == B.getOperand(0)->getType(),
4416 "Shift return type must be same as operands!", &B);
4417 break;
4418 default:
4419 llvm_unreachable("Unknown BinaryOperator opcode!");
4420 }
4421
4422 visitInstruction(B);
4423}
4424
4425void Verifier::visitICmpInst(ICmpInst &IC) {
4426 // Check that the operands are the same type
4427 Type *Op0Ty = IC.getOperand(0)->getType();
4428 Type *Op1Ty = IC.getOperand(1)->getType();
4429 Check(Op0Ty == Op1Ty,
4430 "Both operands to ICmp instruction are not of the same type!", &IC);
4431 // Check that the operands are the right type
4432 Check(Op0Ty->isIntOrIntVectorTy() || Op0Ty->isPtrOrPtrVectorTy(),
4433 "Invalid operand types for ICmp instruction", &IC);
4434 // Check that the predicate is valid.
4435 Check(IC.isIntPredicate(), "Invalid predicate in ICmp instruction!", &IC);
4436
4437 visitInstruction(IC);
4438}
4439
4440void Verifier::visitFCmpInst(FCmpInst &FC) {
4441 // Check that the operands are the same type
4442 Type *Op0Ty = FC.getOperand(0)->getType();
4443 Type *Op1Ty = FC.getOperand(1)->getType();
4444 Check(Op0Ty == Op1Ty,
4445 "Both operands to FCmp instruction are not of the same type!", &FC);
4446 // Check that the operands are the right type
4447 Check(Op0Ty->isFPOrFPVectorTy(), "Invalid operand types for FCmp instruction",
4448 &FC);
4449 // Check that the predicate is valid.
4450 Check(FC.isFPPredicate(), "Invalid predicate in FCmp instruction!", &FC);
4451
4452 visitInstruction(FC);
4453}
4454
4455void Verifier::visitExtractElementInst(ExtractElementInst &EI) {
4457 "Invalid extractelement operands!", &EI);
4458 visitInstruction(EI);
4459}
4460
4461void Verifier::visitInsertElementInst(InsertElementInst &IE) {
4462 Check(InsertElementInst::isValidOperands(IE.getOperand(0), IE.getOperand(1),
4463 IE.getOperand(2)),
4464 "Invalid insertelement operands!", &IE);
4465 visitInstruction(IE);
4466}
4467
4468void Verifier::visitShuffleVectorInst(ShuffleVectorInst &SV) {
4470 SV.getShuffleMask()),
4471 "Invalid shufflevector operands!", &SV);
4472 visitInstruction(SV);
4473}
4474
4475void Verifier::visitGetElementPtrInst(GetElementPtrInst &GEP) {
4476 Type *TargetTy = GEP.getPointerOperandType()->getScalarType();
4477
4478 Check(isa<PointerType>(TargetTy),
4479 "GEP base pointer is not a vector or a vector of pointers", &GEP);
4480 Check(GEP.getSourceElementType()->isSized(), "GEP into unsized type!", &GEP);
4481
4482 if (auto *STy = dyn_cast<StructType>(GEP.getSourceElementType())) {
4483 Check(!STy->isScalableTy(),
4484 "getelementptr cannot target structure that contains scalable vector"
4485 "type",
4486 &GEP);
4487 }
4488
4489 SmallVector<Value *, 16> Idxs(GEP.indices());
4490 Check(
4491 all_of(Idxs, [](Value *V) { return V->getType()->isIntOrIntVectorTy(); }),
4492 "GEP indexes must be integers", &GEP);
4493 Type *ElTy =
4494 GetElementPtrInst::getIndexedType(GEP.getSourceElementType(), Idxs);
4495 Check(ElTy, "Invalid indices for GEP pointer type!", &GEP);
4496
4497 PointerType *PtrTy = dyn_cast<PointerType>(GEP.getType()->getScalarType());
4498
4499 Check(PtrTy && GEP.getResultElementType() == ElTy,
4500 "GEP is not of right type for indices!", &GEP, ElTy);
4501
4502 if (auto *GEPVTy = dyn_cast<VectorType>(GEP.getType())) {
4503 // Additional checks for vector GEPs.
4504 ElementCount GEPWidth = GEPVTy->getElementCount();
4505 if (GEP.getPointerOperandType()->isVectorTy())
4506 Check(
4507 GEPWidth ==
4508 cast<VectorType>(GEP.getPointerOperandType())->getElementCount(),
4509 "Vector GEP result width doesn't match operand's", &GEP);
4510 for (Value *Idx : Idxs) {
4511 Type *IndexTy = Idx->getType();
4512 if (auto *IndexVTy = dyn_cast<VectorType>(IndexTy)) {
4513 ElementCount IndexWidth = IndexVTy->getElementCount();
4514 Check(IndexWidth == GEPWidth, "Invalid GEP index vector width", &GEP);
4515 }
4516 Check(IndexTy->isIntOrIntVectorTy(),
4517 "All GEP indices should be of integer type");
4518 }
4519 }
4520
4521 // Check that GEP does not index into a vector with non-byte-addressable
4522 // elements.
4524 GTI != GTE; ++GTI) {
4525 if (GTI.isVector()) {
4526 Type *ElemTy = GTI.getIndexedType();
4527 Check(DL.typeSizeEqualsStoreSize(ElemTy),
4528 "GEP into vector with non-byte-addressable element type", &GEP);
4529 }
4530 }
4531
4532 Check(GEP.getAddressSpace() == PtrTy->getAddressSpace(),
4533 "GEP address space doesn't match type", &GEP);
4534
4535 visitInstruction(GEP);
4536}
4537
4538static bool isContiguous(const ConstantRange &A, const ConstantRange &B) {
4539 return A.getUpper() == B.getLower() || A.getLower() == B.getUpper();
4540}
4541
4542/// Verify !range and !absolute_symbol metadata. These have the same
4543/// restrictions, except !absolute_symbol allows the full set.
4544void Verifier::verifyRangeLikeMetadata(const Value &I, const MDNode *Range,
4545 Type *Ty, RangeLikeMetadataKind Kind) {
4546 unsigned NumOperands = Range->getNumOperands();
4547 Check(NumOperands % 2 == 0, "Unfinished range!", Range);
4548 unsigned NumRanges = NumOperands / 2;
4549 Check(NumRanges >= 1, "It should have at least one range!", Range);
4550
4551 ConstantRange LastRange(1, true); // Dummy initial value
4552 for (unsigned i = 0; i < NumRanges; ++i) {
4553 ConstantInt *Low =
4554 mdconst::dyn_extract<ConstantInt>(Range->getOperand(2 * i));
4555 Check(Low, "The lower limit must be an integer!", Low);
4556 ConstantInt *High =
4557 mdconst::dyn_extract<ConstantInt>(Range->getOperand(2 * i + 1));
4558 Check(High, "The upper limit must be an integer!", High);
4559
4560 Check(High->getType() == Low->getType(), "Range pair types must match!",
4561 &I);
4562
4563 if (Kind == RangeLikeMetadataKind::NoaliasAddrspace) {
4564 Check(High->getType()->isIntegerTy(32),
4565 "noalias.addrspace type must be i32!", &I);
4566 } else {
4567 Check(High->getType() == Ty->getScalarType(),
4568 "Range types must match instruction type!", &I);
4569 }
4570
4571 APInt HighV = High->getValue();
4572 APInt LowV = Low->getValue();
4573
4574 // ConstantRange asserts if the ranges are the same except for the min/max
4575 // value. Leave the cases it tolerates for the empty range error below.
4576 Check(LowV != HighV || LowV.isMaxValue() || LowV.isMinValue(),
4577 "The upper and lower limits cannot be the same value", &I);
4578
4579 ConstantRange CurRange(LowV, HighV);
4580 Check(!CurRange.isEmptySet() &&
4581 (Kind == RangeLikeMetadataKind::AbsoluteSymbol ||
4582 !CurRange.isFullSet()),
4583 "Range must not be empty!", Range);
4584 if (i != 0) {
4585 Check(CurRange.intersectWith(LastRange).isEmptySet(),
4586 "Intervals are overlapping", Range);
4587 Check(LowV.sgt(LastRange.getLower()), "Intervals are not in order",
4588 Range);
4589 Check(!isContiguous(CurRange, LastRange), "Intervals are contiguous",
4590 Range);
4591 }
4592 LastRange = ConstantRange(LowV, HighV);
4593 }
4594 if (NumRanges > 2) {
4595 APInt FirstLow =
4596 mdconst::dyn_extract<ConstantInt>(Range->getOperand(0))->getValue();
4597 APInt FirstHigh =
4598 mdconst::dyn_extract<ConstantInt>(Range->getOperand(1))->getValue();
4599 ConstantRange FirstRange(FirstLow, FirstHigh);
4600 Check(FirstRange.intersectWith(LastRange).isEmptySet(),
4601 "Intervals are overlapping", Range);
4602 Check(!isContiguous(FirstRange, LastRange), "Intervals are contiguous",
4603 Range);
4604 }
4605}
4606
4607void Verifier::visitRangeMetadata(Instruction &I, MDNode *Range, Type *Ty) {
4608 assert(Range && Range == I.getMetadata(LLVMContext::MD_range) &&
4609 "precondition violation");
4610 verifyRangeLikeMetadata(I, Range, Ty, RangeLikeMetadataKind::Range);
4611}
4612
4613void Verifier::visitNoFPClassMetadata(Instruction &I, MDNode *NoFPClass,
4614 Type *Ty) {
4615 Check(AttributeFuncs::isNoFPClassCompatibleType(Ty),
4616 "nofpclass only applies to floating-point typed loads", I);
4617
4618 Check(NoFPClass->getNumOperands() == 1,
4619 "nofpclass must have exactly one entry", NoFPClass);
4620 ConstantInt *MaskVal =
4622 Check(MaskVal && MaskVal->getType()->isIntegerTy(32),
4623 "nofpclass entry must be a constant i32", NoFPClass);
4624 uint32_t Val = MaskVal->getZExtValue();
4625 Check(Val != 0, "'nofpclass' must have at least one test bit set", NoFPClass,
4626 I);
4627
4628 Check((Val & ~static_cast<unsigned>(fcAllFlags)) == 0,
4629 "Invalid value for 'nofpclass' test mask", NoFPClass, I);
4630}
4631
4632void Verifier::visitNoaliasAddrspaceMetadata(Instruction &I, MDNode *Range,
4633 Type *Ty) {
4634 assert(Range && Range == I.getMetadata(LLVMContext::MD_noalias_addrspace) &&
4635 "precondition violation");
4636 verifyRangeLikeMetadata(I, Range, Ty,
4637 RangeLikeMetadataKind::NoaliasAddrspace);
4638}
4639
4640void Verifier::checkAtomicMemAccessSize(Type *Ty, const Instruction *I) {
4641 unsigned Size = DL.getTypeSizeInBits(Ty).getFixedValue();
4642 Check(Size >= 8, "atomic memory access' size must be byte-sized", Ty, I);
4643 Check(!(Size & (Size - 1)),
4644 "atomic memory access' operand must have a power-of-two size", Ty, I);
4645}
4646
4647void Verifier::visitLoadInst(LoadInst &LI) {
4649 Check(PTy, "Load operand must be a pointer.", &LI);
4650 Type *ElTy = LI.getType();
4651 if (MaybeAlign A = LI.getAlign()) {
4652 Check(A->value() <= Value::MaximumAlignment,
4653 "huge alignment values are unsupported", &LI);
4654 }
4655 Check(ElTy->isSized(), "loading unsized types is not allowed", &LI);
4656 if (LI.isAtomic()) {
4657 Check(LI.getOrdering() != AtomicOrdering::Release &&
4658 LI.getOrdering() != AtomicOrdering::AcquireRelease,
4659 "Load cannot have Release ordering", &LI);
4660 Check(ElTy->getScalarType()->isIntOrPtrTy() ||
4661 ElTy->getScalarType()->isByteTy() ||
4663 "atomic load operand must have integer, byte, pointer, floating "
4664 "point, or vector type!",
4665 ElTy, &LI);
4666
4667 checkAtomicMemAccessSize(ElTy, &LI);
4668 } else {
4670 "Non-atomic load cannot have SynchronizationScope specified", &LI);
4671 }
4672
4673 visitInstruction(LI);
4674}
4675
4676void Verifier::visitStoreInst(StoreInst &SI) {
4677 PointerType *PTy = dyn_cast<PointerType>(SI.getOperand(1)->getType());
4678 Check(PTy, "Store operand must be a pointer.", &SI);
4679 Type *ElTy = SI.getOperand(0)->getType();
4680 if (MaybeAlign A = SI.getAlign()) {
4681 Check(A->value() <= Value::MaximumAlignment,
4682 "huge alignment values are unsupported", &SI);
4683 }
4684 Check(ElTy->isSized(), "storing unsized types is not allowed", &SI);
4685 if (SI.isAtomic()) {
4686 Check(SI.getOrdering() != AtomicOrdering::Acquire &&
4687 SI.getOrdering() != AtomicOrdering::AcquireRelease,
4688 "Store cannot have Acquire ordering", &SI);
4689 Check(ElTy->getScalarType()->isIntOrPtrTy() ||
4690 ElTy->getScalarType()->isByteTy() ||
4692 "atomic store operand must have integer, byte, pointer, floating "
4693 "point, or vector type!",
4694 ElTy, &SI);
4695 checkAtomicMemAccessSize(ElTy, &SI);
4696 } else {
4697 Check(SI.getSyncScopeID() == SyncScope::System,
4698 "Non-atomic store cannot have SynchronizationScope specified", &SI);
4699 }
4700 visitInstruction(SI);
4701}
4702
4703/// Check that SwiftErrorVal is used as a swifterror argument in CS.
4704void Verifier::verifySwiftErrorCall(CallBase &Call,
4705 const Value *SwiftErrorVal) {
4706 for (const auto &I : llvm::enumerate(Call.args())) {
4707 if (I.value() == SwiftErrorVal) {
4708 Check(Call.paramHasAttr(I.index(), Attribute::SwiftError),
4709 "swifterror value when used in a callsite should be marked "
4710 "with swifterror attribute",
4711 SwiftErrorVal, Call);
4712 }
4713 }
4714}
4715
4716void Verifier::verifySwiftErrorValue(const Value *SwiftErrorVal) {
4717 // Check that swifterror value is only used by loads, stores, or as
4718 // a swifterror argument.
4719 for (const User *U : SwiftErrorVal->users()) {
4721 isa<InvokeInst>(U),
4722 "swifterror value can only be loaded and stored from, or "
4723 "as a swifterror argument!",
4724 SwiftErrorVal, U);
4725 // If it is used by a store, check it is the second operand.
4726 if (auto StoreI = dyn_cast<StoreInst>(U))
4727 Check(StoreI->getOperand(1) == SwiftErrorVal,
4728 "swifterror value should be the second operand when used "
4729 "by stores",
4730 SwiftErrorVal, U);
4731 if (auto *Call = dyn_cast<CallBase>(U))
4732 verifySwiftErrorCall(*const_cast<CallBase *>(Call), SwiftErrorVal);
4733 }
4734}
4735
4736void Verifier::visitAllocaInst(AllocaInst &AI) {
4737 Type *Ty = AI.getAllocatedType();
4738 SmallPtrSet<Type*, 4> Visited;
4739 Check(Ty->isSized(&Visited), "Cannot allocate unsized type", &AI);
4740 // Check if it's a target extension type that disallows being used on the
4741 // stack.
4743 "Alloca has illegal target extension type", &AI);
4745 "Alloca array size must have integer type", &AI);
4746 if (MaybeAlign A = AI.getAlign()) {
4747 Check(A->value() <= Value::MaximumAlignment,
4748 "huge alignment values are unsupported", &AI);
4749 }
4750
4751 if (AI.isSwiftError()) {
4752 Check(Ty->isPointerTy(), "swifterror alloca must have pointer type", &AI);
4754 "swifterror alloca must not be array allocation", &AI);
4755 verifySwiftErrorValue(&AI);
4756 }
4757
4758 if (TT.isAMDGPU()) {
4760 "alloca on amdgpu must be in addrspace(5)", &AI);
4761 }
4762
4763 visitInstruction(AI);
4764}
4765
4766void Verifier::visitAtomicCmpXchgInst(AtomicCmpXchgInst &CXI) {
4767 Type *ElTy = CXI.getOperand(1)->getType();
4768 Check(ElTy->isIntOrPtrTy(),
4769 "cmpxchg operand must have integer or pointer type", ElTy, &CXI);
4770 checkAtomicMemAccessSize(ElTy, &CXI);
4771 visitInstruction(CXI);
4772}
4773
4774void Verifier::visitAtomicRMWInst(AtomicRMWInst &RMWI) {
4775 Check(RMWI.getOrdering() != AtomicOrdering::Unordered,
4776 "atomicrmw instructions cannot be unordered.", &RMWI);
4777 auto Op = RMWI.getOperation();
4778 Type *ElTy = RMWI.getOperand(1)->getType();
4779 if (Op == AtomicRMWInst::Xchg) {
4780 Check(ElTy->isIntegerTy() || ElTy->isFloatingPointTy() ||
4781 ElTy->isPointerTy(),
4782 "atomicrmw " + AtomicRMWInst::getOperationName(Op) +
4783 " operand must have integer or floating point type!",
4784 &RMWI, ElTy);
4785 } else if (AtomicRMWInst::isFPOperation(Op)) {
4787 "atomicrmw " + AtomicRMWInst::getOperationName(Op) +
4788 " operand must have floating-point or fixed vector of floating-point "
4789 "type!",
4790 &RMWI, ElTy);
4791 } else {
4792 Check(ElTy->isIntegerTy(),
4793 "atomicrmw " + AtomicRMWInst::getOperationName(Op) +
4794 " operand must have integer type!",
4795 &RMWI, ElTy);
4796 }
4797 checkAtomicMemAccessSize(ElTy, &RMWI);
4799 "Invalid binary operation!", &RMWI);
4800 visitInstruction(RMWI);
4801}
4802
4803void Verifier::visitFenceInst(FenceInst &FI) {
4804 const AtomicOrdering Ordering = FI.getOrdering();
4805 Check(Ordering == AtomicOrdering::Acquire ||
4806 Ordering == AtomicOrdering::Release ||
4807 Ordering == AtomicOrdering::AcquireRelease ||
4808 Ordering == AtomicOrdering::SequentiallyConsistent,
4809 "fence instructions may only have acquire, release, acq_rel, or "
4810 "seq_cst ordering.",
4811 &FI);
4812 visitInstruction(FI);
4813}
4814
4815void Verifier::visitExtractValueInst(ExtractValueInst &EVI) {
4817 EVI.getIndices()) == EVI.getType(),
4818 "Invalid ExtractValueInst operands!", &EVI);
4819
4820 visitInstruction(EVI);
4821}
4822
4823void Verifier::visitInsertValueInst(InsertValueInst &IVI) {
4825 IVI.getIndices()) ==
4826 IVI.getOperand(1)->getType(),
4827 "Invalid InsertValueInst operands!", &IVI);
4828
4829 visitInstruction(IVI);
4830}
4831
4832static Value *getParentPad(Value *EHPad) {
4833 if (auto *FPI = dyn_cast<FuncletPadInst>(EHPad))
4834 return FPI->getParentPad();
4835
4836 return cast<CatchSwitchInst>(EHPad)->getParentPad();
4837}
4838
4839void Verifier::visitEHPadPredecessors(Instruction &I) {
4840 assert(I.isEHPad());
4841
4842 BasicBlock *BB = I.getParent();
4843 Function *F = BB->getParent();
4844
4845 Check(BB != &F->getEntryBlock(), "EH pad cannot be in entry block.", &I);
4846
4847 if (auto *LPI = dyn_cast<LandingPadInst>(&I)) {
4848 // The landingpad instruction defines its parent as a landing pad block. The
4849 // landing pad block may be branched to only by the unwind edge of an
4850 // invoke.
4851 for (BasicBlock *PredBB : predecessors(BB)) {
4852 const auto *II = dyn_cast<InvokeInst>(PredBB->getTerminator());
4853 Check(II && II->getUnwindDest() == BB && II->getNormalDest() != BB,
4854 "Block containing LandingPadInst must be jumped to "
4855 "only by the unwind edge of an invoke.",
4856 LPI);
4857 }
4858 return;
4859 }
4860 if (auto *CPI = dyn_cast<CatchPadInst>(&I)) {
4861 if (!pred_empty(BB))
4862 Check(BB->getUniquePredecessor() == CPI->getCatchSwitch()->getParent(),
4863 "Block containg CatchPadInst must be jumped to "
4864 "only by its catchswitch.",
4865 CPI);
4866 Check(BB != CPI->getCatchSwitch()->getUnwindDest(),
4867 "Catchswitch cannot unwind to one of its catchpads",
4868 CPI->getCatchSwitch(), CPI);
4869 return;
4870 }
4871
4872 // Verify that each pred has a legal terminator with a legal to/from EH
4873 // pad relationship.
4874 Instruction *ToPad = &I;
4875 Value *ToPadParent = getParentPad(ToPad);
4876 for (BasicBlock *PredBB : predecessors(BB)) {
4877 Instruction *TI = PredBB->getTerminator();
4878 Value *FromPad;
4879 if (auto *II = dyn_cast<InvokeInst>(TI)) {
4880 Check(II->getUnwindDest() == BB && II->getNormalDest() != BB,
4881 "EH pad must be jumped to via an unwind edge", ToPad, II);
4882 auto *CalledFn =
4883 dyn_cast<Function>(II->getCalledOperand()->stripPointerCasts());
4884 if (CalledFn && CalledFn->isIntrinsic() && II->doesNotThrow() &&
4885 !IntrinsicInst::mayLowerToFunctionCall(CalledFn->getIntrinsicID()))
4886 continue;
4887 if (auto Bundle = II->getOperandBundle(LLVMContext::OB_funclet))
4888 FromPad = Bundle->Inputs[0];
4889 else
4890 FromPad = ConstantTokenNone::get(II->getContext());
4891 } else if (auto *CRI = dyn_cast<CleanupReturnInst>(TI)) {
4892 FromPad = CRI->getOperand(0);
4893 Check(FromPad != ToPadParent, "A cleanupret must exit its cleanup", CRI);
4894 } else if (auto *CSI = dyn_cast<CatchSwitchInst>(TI)) {
4895 FromPad = CSI;
4896 } else {
4897 Check(false, "EH pad must be jumped to via an unwind edge", ToPad, TI);
4898 }
4899
4900 // The edge may exit from zero or more nested pads.
4901 SmallPtrSet<Value *, 8> Seen;
4902 for (;; FromPad = getParentPad(FromPad)) {
4903 Check(FromPad != ToPad,
4904 "EH pad cannot handle exceptions raised within it", FromPad, TI);
4905 if (FromPad == ToPadParent) {
4906 // This is a legal unwind edge.
4907 break;
4908 }
4909 Check(!isa<ConstantTokenNone>(FromPad),
4910 "A single unwind edge may only enter one EH pad", TI);
4911 Check(Seen.insert(FromPad).second, "EH pad jumps through a cycle of pads",
4912 FromPad);
4913
4914 // This will be diagnosed on the corresponding instruction already. We
4915 // need the extra check here to make sure getParentPad() works.
4916 Check(isa<FuncletPadInst>(FromPad) || isa<CatchSwitchInst>(FromPad),
4917 "Parent pad must be catchpad/cleanuppad/catchswitch", TI);
4918 }
4919 }
4920}
4921
4922void Verifier::visitLandingPadInst(LandingPadInst &LPI) {
4923 // The landingpad instruction is ill-formed if it doesn't have any clauses and
4924 // isn't a cleanup.
4925 Check(LPI.getNumClauses() > 0 || LPI.isCleanup(),
4926 "LandingPadInst needs at least one clause or to be a cleanup.", &LPI);
4927
4928 visitEHPadPredecessors(LPI);
4929
4930 if (!LandingPadResultTy)
4931 LandingPadResultTy = LPI.getType();
4932 else
4933 Check(LandingPadResultTy == LPI.getType(),
4934 "The landingpad instruction should have a consistent result type "
4935 "inside a function.",
4936 &LPI);
4937
4938 Function *F = LPI.getParent()->getParent();
4939 Check(F->hasPersonalityFn(),
4940 "LandingPadInst needs to be in a function with a personality.", &LPI);
4941
4942 // The landingpad instruction must be the first non-PHI instruction in the
4943 // block.
4944 Check(LPI.getParent()->getLandingPadInst() == &LPI,
4945 "LandingPadInst not the first non-PHI instruction in the block.", &LPI);
4946
4947 for (unsigned i = 0, e = LPI.getNumClauses(); i < e; ++i) {
4948 Constant *Clause = LPI.getClause(i);
4949 if (LPI.isCatch(i)) {
4950 Check(isa<PointerType>(Clause->getType()),
4951 "Catch operand does not have pointer type!", &LPI);
4952 } else {
4953 Check(LPI.isFilter(i), "Clause is neither catch nor filter!", &LPI);
4955 "Filter operand is not an array of constants!", &LPI);
4956 }
4957 }
4958
4959 visitInstruction(LPI);
4960}
4961
4962void Verifier::visitResumeInst(ResumeInst &RI) {
4964 "ResumeInst needs to be in a function with a personality.", &RI);
4965
4966 if (!LandingPadResultTy)
4967 LandingPadResultTy = RI.getValue()->getType();
4968 else
4969 Check(LandingPadResultTy == RI.getValue()->getType(),
4970 "The resume instruction should have a consistent result type "
4971 "inside a function.",
4972 &RI);
4973
4974 visitTerminator(RI);
4975}
4976
4977void Verifier::visitCatchPadInst(CatchPadInst &CPI) {
4978 BasicBlock *BB = CPI.getParent();
4979
4980 Function *F = BB->getParent();
4981 Check(F->hasPersonalityFn(),
4982 "CatchPadInst needs to be in a function with a personality.", &CPI);
4983
4985 "CatchPadInst needs to be directly nested in a CatchSwitchInst.",
4986 CPI.getParentPad());
4987
4988 // The catchpad instruction must be the first non-PHI instruction in the
4989 // block.
4990 Check(&*BB->getFirstNonPHIIt() == &CPI,
4991 "CatchPadInst not the first non-PHI instruction in the block.", &CPI);
4992
4994 [](Use &U) {
4995 auto *V = U.get();
4996 return isa<Constant>(V) || isa<AllocaInst>(V);
4997 }),
4998 "Argument operand must be alloca or constant.", &CPI);
4999
5000 visitEHPadPredecessors(CPI);
5001 visitFuncletPadInst(CPI);
5002}
5003
5004void Verifier::visitCatchReturnInst(CatchReturnInst &CatchReturn) {
5005 Check(isa<CatchPadInst>(CatchReturn.getOperand(0)),
5006 "CatchReturnInst needs to be provided a CatchPad", &CatchReturn,
5007 CatchReturn.getOperand(0));
5008
5009 visitTerminator(CatchReturn);
5010}
5011
5012void Verifier::visitCleanupPadInst(CleanupPadInst &CPI) {
5013 BasicBlock *BB = CPI.getParent();
5014
5015 Function *F = BB->getParent();
5016 Check(F->hasPersonalityFn(),
5017 "CleanupPadInst needs to be in a function with a personality.", &CPI);
5018
5019 // The cleanuppad instruction must be the first non-PHI instruction in the
5020 // block.
5021 Check(&*BB->getFirstNonPHIIt() == &CPI,
5022 "CleanupPadInst not the first non-PHI instruction in the block.", &CPI);
5023
5024 auto *ParentPad = CPI.getParentPad();
5025 Check(isa<ConstantTokenNone>(ParentPad) || isa<FuncletPadInst>(ParentPad),
5026 "CleanupPadInst has an invalid parent.", &CPI);
5027
5028 visitEHPadPredecessors(CPI);
5029 visitFuncletPadInst(CPI);
5030}
5031
5032void Verifier::visitFuncletPadInst(FuncletPadInst &FPI) {
5033 User *FirstUser = nullptr;
5034 Value *FirstUnwindPad = nullptr;
5035 SmallVector<FuncletPadInst *, 8> Worklist({&FPI});
5036 SmallPtrSet<FuncletPadInst *, 8> Seen;
5037
5038 while (!Worklist.empty()) {
5039 FuncletPadInst *CurrentPad = Worklist.pop_back_val();
5040 Check(Seen.insert(CurrentPad).second,
5041 "FuncletPadInst must not be nested within itself", CurrentPad);
5042 Value *UnresolvedAncestorPad = nullptr;
5043 for (User *U : CurrentPad->users()) {
5044 BasicBlock *UnwindDest;
5045 if (auto *CRI = dyn_cast<CleanupReturnInst>(U)) {
5046 UnwindDest = CRI->getUnwindDest();
5047 } else if (auto *CSI = dyn_cast<CatchSwitchInst>(U)) {
5048 // We allow catchswitch unwind to caller to nest
5049 // within an outer pad that unwinds somewhere else,
5050 // because catchswitch doesn't have a nounwind variant.
5051 // See e.g. SimplifyCFGOpt::SimplifyUnreachable.
5052 if (CSI->unwindsToCaller())
5053 continue;
5054 UnwindDest = CSI->getUnwindDest();
5055 } else if (auto *II = dyn_cast<InvokeInst>(U)) {
5056 UnwindDest = II->getUnwindDest();
5057 } else if (isa<CallInst>(U)) {
5058 // Calls which don't unwind may be found inside funclet
5059 // pads that unwind somewhere else. We don't *require*
5060 // such calls to be annotated nounwind.
5061 continue;
5062 } else if (auto *CPI = dyn_cast<CleanupPadInst>(U)) {
5063 // The unwind dest for a cleanup can only be found by
5064 // recursive search. Add it to the worklist, and we'll
5065 // search for its first use that determines where it unwinds.
5066 Worklist.push_back(CPI);
5067 continue;
5068 } else {
5069 Check(isa<CatchReturnInst>(U), "Bogus funclet pad use", U);
5070 continue;
5071 }
5072
5073 Value *UnwindPad;
5074 bool ExitsFPI;
5075 if (UnwindDest) {
5076 UnwindPad = &*UnwindDest->getFirstNonPHIIt();
5077 if (!cast<Instruction>(UnwindPad)->isEHPad())
5078 continue;
5079 Value *UnwindParent = getParentPad(UnwindPad);
5080 // Ignore unwind edges that don't exit CurrentPad.
5081 if (UnwindParent == CurrentPad)
5082 continue;
5083 // Determine whether the original funclet pad is exited,
5084 // and if we are scanning nested pads determine how many
5085 // of them are exited so we can stop searching their
5086 // children.
5087 Value *ExitedPad = CurrentPad;
5088 ExitsFPI = false;
5089 do {
5090 if (ExitedPad == &FPI) {
5091 ExitsFPI = true;
5092 // Now we can resolve any ancestors of CurrentPad up to
5093 // FPI, but not including FPI since we need to make sure
5094 // to check all direct users of FPI for consistency.
5095 UnresolvedAncestorPad = &FPI;
5096 break;
5097 }
5098 Value *ExitedParent = getParentPad(ExitedPad);
5099 if (ExitedParent == UnwindParent) {
5100 // ExitedPad is the ancestor-most pad which this unwind
5101 // edge exits, so we can resolve up to it, meaning that
5102 // ExitedParent is the first ancestor still unresolved.
5103 UnresolvedAncestorPad = ExitedParent;
5104 break;
5105 }
5106 ExitedPad = ExitedParent;
5107 } while (!isa<ConstantTokenNone>(ExitedPad));
5108 } else {
5109 // Unwinding to caller exits all pads.
5110 UnwindPad = ConstantTokenNone::get(FPI.getContext());
5111 ExitsFPI = true;
5112 UnresolvedAncestorPad = &FPI;
5113 }
5114
5115 if (ExitsFPI) {
5116 // This unwind edge exits FPI. Make sure it agrees with other
5117 // such edges.
5118 if (FirstUser) {
5119 Check(UnwindPad == FirstUnwindPad,
5120 "Unwind edges out of a funclet "
5121 "pad must have the same unwind "
5122 "dest",
5123 &FPI, U, FirstUser);
5124 } else {
5125 FirstUser = U;
5126 FirstUnwindPad = UnwindPad;
5127 // Record cleanup sibling unwinds for verifySiblingFuncletUnwinds
5128 if (isa<CleanupPadInst>(&FPI) && !isa<ConstantTokenNone>(UnwindPad) &&
5129 getParentPad(UnwindPad) == getParentPad(&FPI))
5130 SiblingFuncletInfo[&FPI] = cast<Instruction>(U);
5131 }
5132 }
5133 // Make sure we visit all uses of FPI, but for nested pads stop as
5134 // soon as we know where they unwind to.
5135 if (CurrentPad != &FPI)
5136 break;
5137 }
5138 if (UnresolvedAncestorPad) {
5139 if (CurrentPad == UnresolvedAncestorPad) {
5140 // When CurrentPad is FPI itself, we don't mark it as resolved even if
5141 // we've found an unwind edge that exits it, because we need to verify
5142 // all direct uses of FPI.
5143 assert(CurrentPad == &FPI);
5144 continue;
5145 }
5146 // Pop off the worklist any nested pads that we've found an unwind
5147 // destination for. The pads on the worklist are the uncles,
5148 // great-uncles, etc. of CurrentPad. We've found an unwind destination
5149 // for all ancestors of CurrentPad up to but not including
5150 // UnresolvedAncestorPad.
5151 Value *ResolvedPad = CurrentPad;
5152 while (!Worklist.empty()) {
5153 Value *UnclePad = Worklist.back();
5154 Value *AncestorPad = getParentPad(UnclePad);
5155 // Walk ResolvedPad up the ancestor list until we either find the
5156 // uncle's parent or the last resolved ancestor.
5157 while (ResolvedPad != AncestorPad) {
5158 Value *ResolvedParent = getParentPad(ResolvedPad);
5159 if (ResolvedParent == UnresolvedAncestorPad) {
5160 break;
5161 }
5162 ResolvedPad = ResolvedParent;
5163 }
5164 // If the resolved ancestor search didn't find the uncle's parent,
5165 // then the uncle is not yet resolved.
5166 if (ResolvedPad != AncestorPad)
5167 break;
5168 // This uncle is resolved, so pop it from the worklist.
5169 Worklist.pop_back();
5170 }
5171 }
5172 }
5173
5174 if (FirstUnwindPad) {
5175 if (auto *CatchSwitch = dyn_cast<CatchSwitchInst>(FPI.getParentPad())) {
5176 BasicBlock *SwitchUnwindDest = CatchSwitch->getUnwindDest();
5177 Value *SwitchUnwindPad;
5178 if (SwitchUnwindDest)
5179 SwitchUnwindPad = &*SwitchUnwindDest->getFirstNonPHIIt();
5180 else
5181 SwitchUnwindPad = ConstantTokenNone::get(FPI.getContext());
5182 Check(SwitchUnwindPad == FirstUnwindPad,
5183 "Unwind edges out of a catch must have the same unwind dest as "
5184 "the parent catchswitch",
5185 &FPI, FirstUser, CatchSwitch);
5186 }
5187 }
5188
5189 visitInstruction(FPI);
5190}
5191
5192void Verifier::visitCatchSwitchInst(CatchSwitchInst &CatchSwitch) {
5193 BasicBlock *BB = CatchSwitch.getParent();
5194
5195 Function *F = BB->getParent();
5196 Check(F->hasPersonalityFn(),
5197 "CatchSwitchInst needs to be in a function with a personality.",
5198 &CatchSwitch);
5199
5200 // The catchswitch instruction must be the first non-PHI instruction in the
5201 // block.
5202 Check(&*BB->getFirstNonPHIIt() == &CatchSwitch,
5203 "CatchSwitchInst not the first non-PHI instruction in the block.",
5204 &CatchSwitch);
5205
5206 auto *ParentPad = CatchSwitch.getParentPad();
5207 Check(isa<ConstantTokenNone>(ParentPad) || isa<FuncletPadInst>(ParentPad),
5208 "CatchSwitchInst has an invalid parent.", ParentPad);
5209
5210 if (BasicBlock *UnwindDest = CatchSwitch.getUnwindDest()) {
5211 BasicBlock::iterator I = UnwindDest->getFirstNonPHIIt();
5212 Check(I->isEHPad() && !isa<LandingPadInst>(I),
5213 "CatchSwitchInst must unwind to an EH block which is not a "
5214 "landingpad.",
5215 &CatchSwitch);
5216
5217 // Record catchswitch sibling unwinds for verifySiblingFuncletUnwinds
5218 if (getParentPad(&*I) == ParentPad)
5219 SiblingFuncletInfo[&CatchSwitch] = &CatchSwitch;
5220 }
5221
5222 Check(CatchSwitch.getNumHandlers() != 0,
5223 "CatchSwitchInst cannot have empty handler list", &CatchSwitch);
5224
5225 for (BasicBlock *Handler : CatchSwitch.handlers()) {
5226 Check(isa<CatchPadInst>(Handler->getFirstNonPHIIt()),
5227 "CatchSwitchInst handlers must be catchpads", &CatchSwitch, Handler);
5228 }
5229
5230 visitEHPadPredecessors(CatchSwitch);
5231 visitTerminator(CatchSwitch);
5232}
5233
5234void Verifier::visitCleanupReturnInst(CleanupReturnInst &CRI) {
5236 "CleanupReturnInst needs to be provided a CleanupPad", &CRI,
5237 CRI.getOperand(0));
5238
5239 if (BasicBlock *UnwindDest = CRI.getUnwindDest()) {
5240 BasicBlock::iterator I = UnwindDest->getFirstNonPHIIt();
5241 Check(I->isEHPad() && !isa<LandingPadInst>(I),
5242 "CleanupReturnInst must unwind to an EH block which is not a "
5243 "landingpad.",
5244 &CRI);
5245 }
5246
5247 visitTerminator(CRI);
5248}
5249
5250void Verifier::verifyDominatesUse(Instruction &I, unsigned i) {
5251 Instruction *Op = cast<Instruction>(I.getOperand(i));
5252 // If the we have an invalid invoke, don't try to compute the dominance.
5253 // We already reject it in the invoke specific checks and the dominance
5254 // computation doesn't handle multiple edges.
5255 if (InvokeInst *II = dyn_cast<InvokeInst>(Op)) {
5256 if (II->getNormalDest() == II->getUnwindDest())
5257 return;
5258 }
5259
5260 // Quick check whether the def has already been encountered in the same block.
5261 // PHI nodes are not checked to prevent accepting preceding PHIs, because PHI
5262 // uses are defined to happen on the incoming edge, not at the instruction.
5263 //
5264 // FIXME: If this operand is a MetadataAsValue (wrapping a LocalAsMetadata)
5265 // wrapping an SSA value, assert that we've already encountered it. See
5266 // related FIXME in Mapper::mapLocalAsMetadata in ValueMapper.cpp.
5267 if (!isa<PHINode>(I) && InstsInThisBlock.count(Op))
5268 return;
5269
5270 const Use &U = I.getOperandUse(i);
5271 Check(DT.dominates(Op, U), "Instruction does not dominate all uses!", Op, &I);
5272}
5273
5274void Verifier::visitDereferenceableMetadata(Instruction& I, MDNode* MD) {
5275 Check(I.getType()->isPointerTy(),
5276 "dereferenceable, dereferenceable_or_null "
5277 "apply only to pointer types",
5278 &I);
5280 "dereferenceable, dereferenceable_or_null apply only to load"
5281 " and inttoptr instructions, use attributes for calls or invokes",
5282 &I);
5283 Check(MD->getNumOperands() == 1,
5284 "dereferenceable, dereferenceable_or_null "
5285 "take one operand!",
5286 &I);
5287 ConstantInt *CI = mdconst::dyn_extract<ConstantInt>(MD->getOperand(0));
5288 Check(CI && CI->getType()->isIntegerTy(64),
5289 "dereferenceable, "
5290 "dereferenceable_or_null metadata value must be an i64!",
5291 &I);
5292}
5293
5294void Verifier::visitNofreeMetadata(Instruction &I, MDNode *MD) {
5295 Check(I.getType()->isPointerTy(), "nofree applies only to pointer types", &I);
5296 Check((isa<IntToPtrInst>(I)), "nofree applies only to inttoptr instruction",
5297 &I);
5298 Check(MD->getNumOperands() == 0, "nofree metadata must be empty", &I);
5299}
5300
5301void Verifier::visitProfMetadata(Instruction &I, MDNode *MD) {
5302 auto GetBranchingTerminatorNumOperands = [&]() {
5303 unsigned ExpectedNumOperands = 0;
5304 if (CondBrInst *BI = dyn_cast<CondBrInst>(&I))
5305 ExpectedNumOperands = BI->getNumSuccessors();
5306 else if (SwitchInst *SI = dyn_cast<SwitchInst>(&I))
5307 ExpectedNumOperands = SI->getNumSuccessors();
5308 else if (isa<CallInst>(&I))
5309 ExpectedNumOperands = 1;
5310 else if (IndirectBrInst *IBI = dyn_cast<IndirectBrInst>(&I))
5311 ExpectedNumOperands = IBI->getNumDestinations();
5312 else if (isa<SelectInst>(&I))
5313 ExpectedNumOperands = 2;
5314 else if (CallBrInst *CI = dyn_cast<CallBrInst>(&I))
5315 ExpectedNumOperands = CI->getNumSuccessors();
5316 return ExpectedNumOperands;
5317 };
5318 Check(MD->getNumOperands() >= 1,
5319 "!prof annotations should have at least 1 operand", MD);
5320 // Check first operand.
5321 Check(MD->getOperand(0) != nullptr, "first operand should not be null", MD);
5323 "expected string with name of the !prof annotation", MD);
5324 MDString *MDS = cast<MDString>(MD->getOperand(0));
5325 StringRef ProfName = MDS->getString();
5326
5328 Check(GetBranchingTerminatorNumOperands() != 0 || isa<InvokeInst>(I),
5329 "'unknown' !prof should only appear on instructions on which "
5330 "'branch_weights' would",
5331 MD);
5332 verifyUnknownProfileMetadata(MD);
5333 return;
5334 }
5335
5336 Check(MD->getNumOperands() >= 2,
5337 "!prof annotations should have no less than 2 operands", MD);
5338
5339 // Check consistency of !prof branch_weights metadata.
5340 if (ProfName == MDProfLabels::BranchWeights) {
5341 unsigned NumBranchWeights = getNumBranchWeights(*MD);
5342 if (isa<InvokeInst>(&I)) {
5343 Check(NumBranchWeights == 1 || NumBranchWeights == 2,
5344 "Wrong number of InvokeInst branch_weights operands", MD);
5345 } else {
5346 const unsigned ExpectedNumOperands = GetBranchingTerminatorNumOperands();
5347 if (ExpectedNumOperands == 0)
5348 CheckFailed("!prof branch_weights are not allowed for this instruction",
5349 MD);
5350
5351 Check(NumBranchWeights == ExpectedNumOperands, "Wrong number of operands",
5352 MD);
5353 }
5354 for (unsigned i = getBranchWeightOffset(MD); i < MD->getNumOperands();
5355 ++i) {
5356 auto &MDO = MD->getOperand(i);
5357 Check(MDO, "second operand should not be null", MD);
5359 "!prof brunch_weights operand is not a const int");
5360 }
5361 } else if (ProfName == MDProfLabels::ValueProfile) {
5362 Check(isValueProfileMD(MD), "invalid value profiling metadata", MD);
5363 ConstantInt *KindInt = mdconst::dyn_extract<ConstantInt>(MD->getOperand(1));
5364 Check(KindInt, "VP !prof missing kind argument", MD);
5365
5366 auto Kind = KindInt->getZExtValue();
5367 Check(Kind >= InstrProfValueKind::IPVK_First &&
5368 Kind <= InstrProfValueKind::IPVK_Last,
5369 "Invalid VP !prof kind", MD);
5370 Check(MD->getNumOperands() % 2 == 1,
5371 "VP !prof should have an even number "
5372 "of arguments after 'VP'",
5373 MD);
5374 if (Kind == InstrProfValueKind::IPVK_IndirectCallTarget ||
5375 Kind == InstrProfValueKind::IPVK_MemOPSize)
5377 "VP !prof indirect call or memop size expected to be applied to "
5378 "CallBase instructions only",
5379 MD);
5380 } else {
5381 CheckFailed("expected either branch_weights or VP profile name", MD);
5382 }
5383}
5384
5385void Verifier::visitDIAssignIDMetadata(Instruction &I, MDNode *MD) {
5386 assert(I.hasMetadata(LLVMContext::MD_DIAssignID));
5387 // DIAssignID metadata must be attached to either an alloca or some form of
5388 // store/memory-writing instruction.
5389 // FIXME: We allow all intrinsic insts here to avoid trying to enumerate all
5390 // possible store intrinsics.
5391 bool ExpectedInstTy =
5393 CheckDI(ExpectedInstTy, "!DIAssignID attached to unexpected instruction kind",
5394 I, MD);
5395 // Iterate over the MetadataAsValue uses of the DIAssignID - these should
5396 // only be found as DbgAssignIntrinsic operands.
5397 if (auto *AsValue = MetadataAsValue::getIfExists(Context, MD)) {
5398 for (auto *User : AsValue->users()) {
5400 "!DIAssignID should only be used by llvm.dbg.assign intrinsics",
5401 MD, User);
5402 // All of the dbg.assign intrinsics should be in the same function as I.
5403 if (auto *DAI = dyn_cast<DbgAssignIntrinsic>(User))
5404 CheckDI(DAI->getFunction() == I.getFunction(),
5405 "dbg.assign not in same function as inst", DAI, &I);
5406 }
5407 }
5408 for (DbgVariableRecord *DVR :
5409 cast<DIAssignID>(MD)->getAllDbgVariableRecordUsers()) {
5410 CheckDI(DVR->isDbgAssign(),
5411 "!DIAssignID should only be used by Assign DVRs.", MD, DVR);
5412 CheckDI(DVR->getFunction() == I.getFunction(),
5413 "DVRAssign not in same function as inst", DVR, &I);
5414 }
5415}
5416
5417void Verifier::visitMMRAMetadata(Instruction &I, MDNode *MD) {
5419 "!mmra metadata attached to unexpected instruction kind", I, MD);
5420
5421 // MMRA Metadata should either be a tag, e.g. !{!"foo", !"bar"}, or a
5422 // list of tags such as !2 in the following example:
5423 // !0 = !{!"a", !"b"}
5424 // !1 = !{!"c", !"d"}
5425 // !2 = !{!0, !1}
5426 if (MMRAMetadata::isTagMD(MD))
5427 return;
5428
5429 Check(isa<MDTuple>(MD), "!mmra expected to be a metadata tuple", I, MD);
5430 for (const MDOperand &MDOp : MD->operands())
5431 Check(MMRAMetadata::isTagMD(MDOp.get()),
5432 "!mmra metadata tuple operand is not an MMRA tag", I, MDOp.get());
5433}
5434
5435void Verifier::visitCallStackMetadata(MDNode *MD) {
5436 // Call stack metadata should consist of a list of at least 1 constant int
5437 // (representing a hash of the location).
5438 Check(MD->getNumOperands() >= 1,
5439 "call stack metadata should have at least 1 operand", MD);
5440
5441 for (const auto &Op : MD->operands())
5443 "call stack metadata operand should be constant integer", Op);
5444}
5445
5446void Verifier::visitMemProfMetadata(Instruction &I, MDNode *MD) {
5447 Check(isa<CallBase>(I), "!memprof metadata should only exist on calls", &I);
5448 Check(MD->getNumOperands() >= 1,
5449 "!memprof annotations should have at least 1 metadata operand "
5450 "(MemInfoBlock)",
5451 MD);
5452
5453 // Check each MIB
5454 for (auto &MIBOp : MD->operands()) {
5455 MDNode *MIB = dyn_cast<MDNode>(MIBOp);
5456 // The first operand of an MIB should be the call stack metadata.
5457 // There rest of the operands should be MDString tags, and there should be
5458 // at least one.
5459 Check(MIB->getNumOperands() >= 2,
5460 "Each !memprof MemInfoBlock should have at least 2 operands", MIB);
5461
5462 // Check call stack metadata (first operand).
5463 Check(MIB->getOperand(0) != nullptr,
5464 "!memprof MemInfoBlock first operand should not be null", MIB);
5465 Check(isa<MDNode>(MIB->getOperand(0)),
5466 "!memprof MemInfoBlock first operand should be an MDNode", MIB);
5467 MDNode *StackMD = dyn_cast<MDNode>(MIB->getOperand(0));
5468 visitCallStackMetadata(StackMD);
5469
5470 // The second MIB operand should be MDString.
5472 "!memprof MemInfoBlock second operand should be an MDString", MIB);
5473
5474 // Any remaining should be MDNode that are pairs of integers
5475 for (unsigned I = 2; I < MIB->getNumOperands(); ++I) {
5476 MDNode *OpNode = dyn_cast<MDNode>(MIB->getOperand(I));
5477 Check(OpNode, "Not all !memprof MemInfoBlock operands 2 to N are MDNode",
5478 MIB);
5479 Check(OpNode->getNumOperands() == 2,
5480 "Not all !memprof MemInfoBlock operands 2 to N are MDNode with 2 "
5481 "operands",
5482 MIB);
5483 // Check that all of Op's operands are ConstantInt.
5484 Check(llvm::all_of(OpNode->operands(),
5485 [](const MDOperand &Op) {
5486 return mdconst::hasa<ConstantInt>(Op);
5487 }),
5488 "Not all !memprof MemInfoBlock operands 2 to N are MDNode with "
5489 "ConstantInt operands",
5490 MIB);
5491 }
5492 }
5493}
5494
5495void Verifier::visitCallsiteMetadata(Instruction &I, MDNode *MD) {
5496 Check(isa<CallBase>(I), "!callsite metadata should only exist on calls", &I);
5497 // Verify the partial callstack annotated from memprof profiles. This callsite
5498 // is a part of a profiled allocation callstack.
5499 visitCallStackMetadata(MD);
5500}
5501
5502static inline bool isConstantIntMetadataOperand(const Metadata *MD) {
5503 if (auto *VAL = dyn_cast<ValueAsMetadata>(MD))
5504 return isa<ConstantInt>(VAL->getValue());
5505 return false;
5506}
5507
5508void Verifier::visitCalleeTypeMetadata(Instruction &I, MDNode *MD) {
5509 Check(isa<CallBase>(I), "!callee_type metadata should only exist on calls",
5510 &I);
5511 for (Metadata *Op : MD->operands()) {
5513 "The callee_type metadata must be a list of type metadata nodes", Op);
5514 auto *TypeMD = cast<MDNode>(Op);
5515 Check(TypeMD->getNumOperands() == 2,
5516 "Well-formed generalized type metadata must contain exactly two "
5517 "operands",
5518 Op);
5519 Check(isConstantIntMetadataOperand(TypeMD->getOperand(0)) &&
5520 mdconst::extract<ConstantInt>(TypeMD->getOperand(0))->isZero(),
5521 "The first operand of type metadata for functions must be zero", Op);
5522 Check(TypeMD->hasGeneralizedMDString(),
5523 "Only generalized type metadata can be part of the callee_type "
5524 "metadata list",
5525 Op);
5526 }
5527}
5528
5529void Verifier::visitAnnotationMetadata(MDNode *Annotation) {
5530 Check(isa<MDTuple>(Annotation), "annotation must be a tuple");
5531 Check(Annotation->getNumOperands() >= 1,
5532 "annotation must have at least one operand");
5533 for (const MDOperand &Op : Annotation->operands()) {
5534 bool TupleOfStrings =
5535 isa<MDTuple>(Op.get()) &&
5536 all_of(cast<MDTuple>(Op)->operands(), [](auto &Annotation) {
5537 return isa<MDString>(Annotation.get());
5538 });
5539 Check(isa<MDString>(Op.get()) || TupleOfStrings,
5540 "operands must be a string or a tuple of strings");
5541 }
5542}
5543
5544void Verifier::visitAliasScopeMetadata(const MDNode *MD) {
5545 unsigned NumOps = MD->getNumOperands();
5546 Check(NumOps >= 2 && NumOps <= 3, "scope must have two or three operands",
5547 MD);
5548 Check(MD->getOperand(0).get() == MD || isa<MDString>(MD->getOperand(0)),
5549 "first scope operand must be self-referential or string", MD);
5550 if (NumOps == 3)
5552 "third scope operand must be string (if used)", MD);
5553
5554 MDNode *Domain = dyn_cast<MDNode>(MD->getOperand(1));
5555 Check(Domain != nullptr, "second scope operand must be MDNode", MD);
5556
5557 unsigned NumDomainOps = Domain->getNumOperands();
5558 Check(NumDomainOps >= 1 && NumDomainOps <= 2,
5559 "domain must have one or two operands", Domain);
5560 Check(Domain->getOperand(0).get() == Domain ||
5561 isa<MDString>(Domain->getOperand(0)),
5562 "first domain operand must be self-referential or string", Domain);
5563 if (NumDomainOps == 2)
5564 Check(isa<MDString>(Domain->getOperand(1)),
5565 "second domain operand must be string (if used)", Domain);
5566}
5567
5568void Verifier::visitAliasScopeListMetadata(const MDNode *MD) {
5569 for (const MDOperand &Op : MD->operands()) {
5570 const MDNode *OpMD = dyn_cast<MDNode>(Op);
5571 Check(OpMD != nullptr, "scope list must consist of MDNodes", MD);
5572 visitAliasScopeMetadata(OpMD);
5573 }
5574}
5575
5576void Verifier::visitAccessGroupMetadata(const MDNode *MD) {
5577 auto IsValidAccessScope = [](const MDNode *MD) {
5578 return MD->getNumOperands() == 0 && MD->isDistinct();
5579 };
5580
5581 // It must be either an access scope itself...
5582 if (IsValidAccessScope(MD))
5583 return;
5584
5585 // ...or a list of access scopes.
5586 for (const MDOperand &Op : MD->operands()) {
5587 const MDNode *OpMD = dyn_cast<MDNode>(Op);
5588 Check(OpMD != nullptr, "Access scope list must consist of MDNodes", MD);
5589 Check(IsValidAccessScope(OpMD),
5590 "Access scope list contains invalid access scope", MD);
5591 }
5592}
5593
5594void Verifier::visitCapturesMetadata(Instruction &I, const MDNode *Captures) {
5595 static const char *ValidArgs[] = {"address_is_null", "address",
5596 "read_provenance", "provenance"};
5597
5598 auto *SI = dyn_cast<StoreInst>(&I);
5599 Check(SI, "!captures metadata can only be applied to store instructions", &I);
5600 Check(SI->getValueOperand()->getType()->isPointerTy(),
5601 "!captures metadata can only be applied to store with value operand of "
5602 "pointer type",
5603 &I);
5604 Check(Captures->getNumOperands() != 0, "!captures metadata cannot be empty",
5605 &I);
5606
5607 for (Metadata *Op : Captures->operands()) {
5608 auto *Str = dyn_cast<MDString>(Op);
5609 Check(Str, "!captures metadata must be a list of strings", &I);
5610 Check(is_contained(ValidArgs, Str->getString()),
5611 "invalid entry in !captures metadata", &I, Str);
5612 }
5613}
5614
5615void Verifier::visitAllocTokenMetadata(Instruction &I, MDNode *MD) {
5616 Check(isa<CallBase>(I), "!alloc_token should only exist on calls", &I);
5617 Check(MD->getNumOperands() == 2, "!alloc_token must have 2 operands", MD);
5618 Check(isa<MDString>(MD->getOperand(0)), "expected string", MD);
5620 "expected integer constant", MD);
5621}
5622
5623void Verifier::visitInlineHistoryMetadata(Instruction &I, MDNode *MD) {
5624 Check(isa<CallBase>(I), "!inline_history should only exist on calls", &I);
5625 for (Metadata *Op : MD->operands()) {
5626 // Can be null when a function is erased.
5627 if (!Op)
5628 continue;
5631 ->getValue()
5632 ->stripPointerCastsAndAliases()),
5633 "!inline_history operands must be functions or null", MD);
5634 }
5635}
5636
5637/// verifyInstruction - Verify that an instruction is well formed.
5638///
5639void Verifier::visitInstruction(Instruction &I) {
5640 BasicBlock *BB = I.getParent();
5641 Check(BB, "Instruction not embedded in basic block!", &I);
5642
5643 if (!isa<PHINode>(I)) { // Check that non-phi nodes are not self referential
5644 for (User *U : I.users()) {
5645 Check(U != (User *)&I || !DT.isReachableFromEntry(BB),
5646 "Only PHI nodes may reference their own value!", &I);
5647 }
5648 }
5649
5650 // Check that void typed values don't have names
5651 Check(!I.getType()->isVoidTy() || !I.hasName(),
5652 "Instruction has a name, but provides a void value!", &I);
5653
5654 // Check that the return value of the instruction is either void or a legal
5655 // value type.
5656 Check(I.getType()->isVoidTy() || I.getType()->isFirstClassType(),
5657 "Instruction returns a non-scalar type!", &I);
5658
5659 // Check that the instruction doesn't produce metadata. Calls are already
5660 // checked against the callee type.
5661 Check(!I.getType()->isMetadataTy() || isa<CallInst>(I) || isa<InvokeInst>(I),
5662 "Invalid use of metadata!", &I);
5663
5664 // Check that all uses of the instruction, if they are instructions
5665 // themselves, actually have parent basic blocks. If the use is not an
5666 // instruction, it is an error!
5667 for (Use &U : I.uses()) {
5668 if (Instruction *Used = dyn_cast<Instruction>(U.getUser()))
5669 Check(Used->getParent() != nullptr,
5670 "Instruction referencing"
5671 " instruction not embedded in a basic block!",
5672 &I, Used);
5673 else {
5674 CheckFailed("Use of instruction is not an instruction!", U);
5675 return;
5676 }
5677 }
5678
5679 // Get a pointer to the call base of the instruction if it is some form of
5680 // call.
5681 const CallBase *CBI = dyn_cast<CallBase>(&I);
5682
5683 for (unsigned i = 0, e = I.getNumOperands(); i != e; ++i) {
5684 Check(I.getOperand(i) != nullptr, "Instruction has null operand!", &I);
5685
5686 // Check to make sure that only first-class-values are operands to
5687 // instructions.
5688 if (!I.getOperand(i)->getType()->isFirstClassType()) {
5689 Check(false, "Instruction operands must be first-class values!", &I);
5690 }
5691
5692 if (Function *F = dyn_cast<Function>(I.getOperand(i))) {
5693 // This code checks whether the function is used as the operand of a
5694 // clang_arc_attachedcall operand bundle.
5695 auto IsAttachedCallOperand = [](Function *F, const CallBase *CBI,
5696 int Idx) {
5697 return CBI && CBI->isOperandBundleOfType(
5699 };
5700
5701 // Check to make sure that the "address of" an intrinsic function is never
5702 // taken. Ignore cases where the address of the intrinsic function is used
5703 // as the argument of operand bundle "clang.arc.attachedcall" as those
5704 // cases are handled in verifyAttachedCallBundle.
5705 Check((!F->isIntrinsic() ||
5706 (CBI && &CBI->getCalledOperandUse() == &I.getOperandUse(i)) ||
5707 IsAttachedCallOperand(F, CBI, i)),
5708 "Cannot take the address of an intrinsic!", &I);
5709 Check(!F->isIntrinsic() || isa<CallInst>(I) || isa<CallBrInst>(I) ||
5710 F->getIntrinsicID() == Intrinsic::donothing ||
5711 F->getIntrinsicID() == Intrinsic::seh_try_begin ||
5712 F->getIntrinsicID() == Intrinsic::seh_try_end ||
5713 F->getIntrinsicID() == Intrinsic::seh_scope_begin ||
5714 F->getIntrinsicID() == Intrinsic::seh_scope_end ||
5715 F->getIntrinsicID() == Intrinsic::coro_resume ||
5716 F->getIntrinsicID() == Intrinsic::coro_destroy ||
5717 F->getIntrinsicID() == Intrinsic::coro_await_suspend_void ||
5718 F->getIntrinsicID() == Intrinsic::coro_await_suspend_bool ||
5719 F->getIntrinsicID() == Intrinsic::coro_await_suspend_handle ||
5720 F->getIntrinsicID() ==
5721 Intrinsic::experimental_patchpoint_void ||
5722 F->getIntrinsicID() == Intrinsic::experimental_patchpoint ||
5723 F->getIntrinsicID() == Intrinsic::fake_use ||
5724 F->getIntrinsicID() == Intrinsic::experimental_gc_statepoint ||
5725 F->getIntrinsicID() == Intrinsic::wasm_throw ||
5726 F->getIntrinsicID() == Intrinsic::wasm_rethrow ||
5727 IsAttachedCallOperand(F, CBI, i),
5728 "Cannot invoke an intrinsic other than donothing, patchpoint, "
5729 "statepoint, coro_resume, coro_destroy, clang.arc.attachedcall or "
5730 "wasm.(re)throw",
5731 &I);
5732 Check(F->getParent() == &M, "Referencing function in another module!", &I,
5733 &M, F, F->getParent());
5734 } else if (BasicBlock *OpBB = dyn_cast<BasicBlock>(I.getOperand(i))) {
5735 Check(OpBB->getParent() == BB->getParent(),
5736 "Referring to a basic block in another function!", &I);
5737 } else if (Argument *OpArg = dyn_cast<Argument>(I.getOperand(i))) {
5738 Check(OpArg->getParent() == BB->getParent(),
5739 "Referring to an argument in another function!", &I);
5740 } else if (GlobalValue *GV = dyn_cast<GlobalValue>(I.getOperand(i))) {
5741 Check(GV->getParent() == &M, "Referencing global in another module!", &I,
5742 &M, GV, GV->getParent());
5743 } else if (Instruction *OpInst = dyn_cast<Instruction>(I.getOperand(i))) {
5744 Check(OpInst->getFunction() == BB->getParent(),
5745 "Referring to an instruction in another function!", &I);
5746 verifyDominatesUse(I, i);
5747 } else if (isa<InlineAsm>(I.getOperand(i))) {
5748 Check(CBI && &CBI->getCalledOperandUse() == &I.getOperandUse(i),
5749 "Cannot take the address of an inline asm!", &I);
5750 } else if (auto *C = dyn_cast<Constant>(I.getOperand(i))) {
5751 visitConstantExprsRecursively(C);
5752 }
5753 }
5754
5755 if (MDNode *MD = I.getMetadata(LLVMContext::MD_fpmath)) {
5756 Check(I.getType()->isFPOrFPVectorTy(),
5757 "fpmath requires a floating point result!", &I);
5758 Check(MD->getNumOperands() == 1, "fpmath takes one operand!", &I);
5759 if (ConstantFP *CFP0 =
5761 const APFloat &Accuracy = CFP0->getValueAPF();
5762 Check(&Accuracy.getSemantics() == &APFloat::IEEEsingle(),
5763 "fpmath accuracy must have float type", &I);
5764 Check(Accuracy.isFiniteNonZero() && !Accuracy.isNegative(),
5765 "fpmath accuracy not a positive number!", &I);
5766 } else {
5767 Check(false, "invalid fpmath accuracy!", &I);
5768 }
5769 }
5770
5771 if (MDNode *Range = I.getMetadata(LLVMContext::MD_range)) {
5773 "Ranges are only for loads, calls and invokes!", &I);
5774 visitRangeMetadata(I, Range, I.getType());
5775 }
5776
5777 if (MDNode *MD = I.getMetadata(LLVMContext::MD_nofpclass)) {
5778 Check(isa<LoadInst>(I), "nofpclass is only for loads", &I);
5779 visitNoFPClassMetadata(I, MD, I.getType());
5780 }
5781
5782 if (MDNode *Range = I.getMetadata(LLVMContext::MD_noalias_addrspace)) {
5785 "noalias.addrspace are only for memory operations!", &I);
5786 visitNoaliasAddrspaceMetadata(I, Range, I.getType());
5787 }
5788
5789 if (I.hasMetadata(LLVMContext::MD_invariant_group)) {
5791 "invariant.group metadata is only for loads and stores", &I);
5792 }
5793
5794 if (MDNode *MD = I.getMetadata(LLVMContext::MD_nonnull)) {
5795 Check(I.getType()->isPointerTy(), "nonnull applies only to pointer types",
5796 &I);
5798 "nonnull applies only to load instructions, use attributes"
5799 " for calls or invokes",
5800 &I);
5801 Check(MD->getNumOperands() == 0, "nonnull metadata must be empty", &I);
5802 }
5803
5804 if (MDNode *MD = I.getMetadata(LLVMContext::MD_dereferenceable))
5805 visitDereferenceableMetadata(I, MD);
5806
5807 if (MDNode *MD = I.getMetadata(LLVMContext::MD_dereferenceable_or_null))
5808 visitDereferenceableMetadata(I, MD);
5809
5810 if (MDNode *MD = I.getMetadata(LLVMContext::MD_nofree))
5811 visitNofreeMetadata(I, MD);
5812
5813 if (MDNode *TBAA = I.getMetadata(LLVMContext::MD_tbaa))
5814 TBAAVerifyHelper.visitTBAAMetadata(&I, TBAA);
5815
5816 if (MDNode *MD = I.getMetadata(LLVMContext::MD_noalias))
5817 visitAliasScopeListMetadata(MD);
5818 if (MDNode *MD = I.getMetadata(LLVMContext::MD_alias_scope))
5819 visitAliasScopeListMetadata(MD);
5820
5821 if (MDNode *MD = I.getMetadata(LLVMContext::MD_access_group))
5822 visitAccessGroupMetadata(MD);
5823
5824 if (MDNode *AlignMD = I.getMetadata(LLVMContext::MD_align)) {
5825 Check(I.getType()->isPointerTy(), "align applies only to pointer types",
5826 &I);
5828 "align applies only to load instructions, "
5829 "use attributes for calls or invokes",
5830 &I);
5831 Check(AlignMD->getNumOperands() == 1, "align takes one operand!", &I);
5832 ConstantInt *CI = mdconst::dyn_extract<ConstantInt>(AlignMD->getOperand(0));
5833 Check(CI && CI->getType()->isIntegerTy(64),
5834 "align metadata value must be an i64!", &I);
5835 uint64_t Align = CI->getZExtValue();
5836 Check(isPowerOf2_64(Align), "align metadata value must be a power of 2!",
5837 &I);
5838 Check(Align <= Value::MaximumAlignment,
5839 "alignment is larger that implementation defined limit", &I);
5840 }
5841
5842 if (MDNode *MD = I.getMetadata(LLVMContext::MD_prof))
5843 visitProfMetadata(I, MD);
5844
5845 if (MDNode *MD = I.getMetadata(LLVMContext::MD_memprof))
5846 visitMemProfMetadata(I, MD);
5847
5848 if (MDNode *MD = I.getMetadata(LLVMContext::MD_callsite))
5849 visitCallsiteMetadata(I, MD);
5850
5851 if (MDNode *MD = I.getMetadata(LLVMContext::MD_callee_type))
5852 visitCalleeTypeMetadata(I, MD);
5853
5854 if (MDNode *MD = I.getMetadata(LLVMContext::MD_DIAssignID))
5855 visitDIAssignIDMetadata(I, MD);
5856
5857 if (MDNode *MMRA = I.getMetadata(LLVMContext::MD_mmra))
5858 visitMMRAMetadata(I, MMRA);
5859
5860 if (MDNode *Annotation = I.getMetadata(LLVMContext::MD_annotation))
5861 visitAnnotationMetadata(Annotation);
5862
5863 if (MDNode *Captures = I.getMetadata(LLVMContext::MD_captures))
5864 visitCapturesMetadata(I, Captures);
5865
5866 if (MDNode *MD = I.getMetadata(LLVMContext::MD_alloc_token))
5867 visitAllocTokenMetadata(I, MD);
5868
5869 if (MDNode *MD = I.getMetadata(LLVMContext::MD_inline_history))
5870 visitInlineHistoryMetadata(I, MD);
5871
5872 if (MDNode *N = I.getDebugLoc().getAsMDNode()) {
5873 CheckDI(isa<DILocation>(N), "invalid !dbg metadata attachment", &I, N);
5874 visitMDNode(*N, AreDebugLocsAllowed::Yes);
5875
5876 if (auto *DL = dyn_cast<DILocation>(N)) {
5877 if (DL->getAtomGroup()) {
5878 CheckDI(DL->getScope()->getSubprogram()->getKeyInstructionsEnabled(),
5879 "DbgLoc uses atomGroup but DISubprogram doesn't have Key "
5880 "Instructions enabled",
5881 DL, DL->getScope()->getSubprogram());
5882 }
5883 }
5884 }
5885
5887 I.getAllMetadata(MDs);
5888 for (auto Attachment : MDs) {
5889 unsigned Kind = Attachment.first;
5890 auto AllowLocs =
5891 (Kind == LLVMContext::MD_dbg || Kind == LLVMContext::MD_loop)
5892 ? AreDebugLocsAllowed::Yes
5893 : AreDebugLocsAllowed::No;
5894 visitMDNode(*Attachment.second, AllowLocs);
5895 }
5896
5897 InstsInThisBlock.insert(&I);
5898}
5899
5900/// Allow intrinsics to be verified in different ways.
5901void Verifier::visitIntrinsicCall(Intrinsic::ID ID, CallBase &Call) {
5903 Check(IF->isDeclaration(), "Intrinsic functions should never be defined!",
5904 IF);
5905
5906 // Verify that the intrinsic prototype lines up with what the .td files
5907 // describe.
5908 FunctionType *IFTy = IF->getFunctionType();
5909 bool IsVarArg = IFTy->isVarArg();
5910
5914
5915 // Walk the descriptors to extract overloaded types.
5920 "Intrinsic has incorrect return type!", IF);
5922 "Intrinsic has incorrect argument type!", IF);
5923
5924 // Verify if the intrinsic call matches the vararg property.
5925 if (IsVarArg)
5927 "Intrinsic was not defined with variable arguments!", IF);
5928 else
5930 "Callsite was not defined with variable arguments!", IF);
5931
5932 // All descriptors should be absorbed by now.
5933 Check(TableRef.empty(), "Intrinsic has too few arguments!", IF);
5934
5935 // Now that we have the intrinsic ID and the actual argument types (and we
5936 // know they are legal for the intrinsic!) get the intrinsic name through the
5937 // usual means. This allows us to verify the mangling of argument types into
5938 // the name.
5939 const std::string ExpectedName =
5940 Intrinsic::getName(ID, ArgTys, IF->getParent(), IFTy);
5941 Check(ExpectedName == IF->getName(),
5942 "Intrinsic name not mangled correctly for type arguments! "
5943 "Should be: " +
5944 ExpectedName,
5945 IF);
5946
5947 // If the intrinsic takes MDNode arguments, verify that they are either global
5948 // or are local to *this* function.
5949 for (Value *V : Call.args()) {
5950 if (auto *MD = dyn_cast<MetadataAsValue>(V))
5951 visitMetadataAsValue(*MD, Call.getCaller());
5952 if (auto *Const = dyn_cast<Constant>(V))
5953 Check(!Const->getType()->isX86_AMXTy(),
5954 "const x86_amx is not allowed in argument!");
5955 }
5956
5957 switch (ID) {
5958 default:
5959 break;
5960 case Intrinsic::assume: {
5961 if (Call.hasOperandBundles()) {
5963 Check(Cond && Cond->isOne(),
5964 "assume with operand bundles must have i1 true condition", Call);
5965 }
5966 for (auto &Elem : Call.bundle_op_infos()) {
5967 unsigned ArgCount = Elem.End - Elem.Begin;
5968 // Separate storage assumptions are special insofar as they're the only
5969 // operand bundles allowed on assumes that aren't parameter attributes.
5970 if (Elem.Tag->getKey() == "separate_storage") {
5971 Check(ArgCount == 2,
5972 "separate_storage assumptions should have 2 arguments", Call);
5973 Check(Call.getOperand(Elem.Begin)->getType()->isPointerTy() &&
5974 Call.getOperand(Elem.Begin + 1)->getType()->isPointerTy(),
5975 "arguments to separate_storage assumptions should be pointers",
5976 Call);
5977 continue;
5978 }
5979 Check(Elem.Tag->getKey() == "ignore" ||
5980 Attribute::isExistingAttribute(Elem.Tag->getKey()),
5981 "tags must be valid attribute names", Call);
5982 Attribute::AttrKind Kind =
5983 Attribute::getAttrKindFromName(Elem.Tag->getKey());
5984 if (Kind == Attribute::Alignment) {
5985 Check(ArgCount <= 3 && ArgCount >= 2,
5986 "alignment assumptions should have 2 or 3 arguments", Call);
5987 Check(Call.getOperand(Elem.Begin)->getType()->isPointerTy(),
5988 "first argument should be a pointer", Call);
5989 Check(Call.getOperand(Elem.Begin + 1)->getType()->isIntegerTy(),
5990 "second argument should be an integer", Call);
5991 if (ArgCount == 3)
5992 Check(Call.getOperand(Elem.Begin + 2)->getType()->isIntegerTy(),
5993 "third argument should be an integer if present", Call);
5994 continue;
5995 }
5996 if (Kind == Attribute::Dereferenceable) {
5997 Check(ArgCount == 2,
5998 "dereferenceable assumptions should have 2 arguments", Call);
5999 Check(Call.getOperand(Elem.Begin)->getType()->isPointerTy(),
6000 "first argument should be a pointer", Call);
6001 Check(Call.getOperand(Elem.Begin + 1)->getType()->isIntegerTy(),
6002 "second argument should be an integer", Call);
6003 continue;
6004 }
6005 Check(ArgCount <= 2, "too many arguments", Call);
6006 if (Kind == Attribute::None)
6007 break;
6008 if (Attribute::isIntAttrKind(Kind)) {
6009 Check(ArgCount == 2, "this attribute should have 2 arguments", Call);
6010 Check(isa<ConstantInt>(Call.getOperand(Elem.Begin + 1)),
6011 "the second argument should be a constant integral value", Call);
6012 } else if (Attribute::canUseAsParamAttr(Kind)) {
6013 Check((ArgCount) == 1, "this attribute should have one argument", Call);
6014 } else if (Attribute::canUseAsFnAttr(Kind)) {
6015 Check((ArgCount) == 0, "this attribute has no argument", Call);
6016 }
6017 }
6018 break;
6019 }
6020 case Intrinsic::ucmp:
6021 case Intrinsic::scmp: {
6022 Type *SrcTy = Call.getOperand(0)->getType();
6023 Type *DestTy = Call.getType();
6024
6025 Check(DestTy->getScalarSizeInBits() >= 2,
6026 "result type must be at least 2 bits wide", Call);
6027
6028 bool IsDestTypeVector = DestTy->isVectorTy();
6029 Check(SrcTy->isVectorTy() == IsDestTypeVector,
6030 "ucmp/scmp argument and result types must both be either vector or "
6031 "scalar types",
6032 Call);
6033 if (IsDestTypeVector) {
6034 auto SrcVecLen = cast<VectorType>(SrcTy)->getElementCount();
6035 auto DestVecLen = cast<VectorType>(DestTy)->getElementCount();
6036 Check(SrcVecLen == DestVecLen,
6037 "return type and arguments must have the same number of "
6038 "elements",
6039 Call);
6040 }
6041 break;
6042 }
6043 case Intrinsic::coro_id: {
6044 auto *InfoArg = Call.getArgOperand(3)->stripPointerCasts();
6045 if (isa<ConstantPointerNull>(InfoArg))
6046 break;
6047 auto *GV = dyn_cast<GlobalVariable>(InfoArg);
6048 Check(GV && GV->isConstant() && GV->hasDefinitiveInitializer(),
6049 "info argument of llvm.coro.id must refer to an initialized "
6050 "constant");
6051 Constant *Init = GV->getInitializer();
6053 "info argument of llvm.coro.id must refer to either a struct or "
6054 "an array");
6055 break;
6056 }
6057 case Intrinsic::is_fpclass: {
6058 const ConstantInt *TestMask = cast<ConstantInt>(Call.getOperand(1));
6059 Check((TestMask->getZExtValue() & ~static_cast<unsigned>(fcAllFlags)) == 0,
6060 "unsupported bits for llvm.is.fpclass test mask");
6061 break;
6062 }
6063 case Intrinsic::fptrunc_round: {
6064 // Check the rounding mode
6065 Metadata *MD = nullptr;
6067 if (MAV)
6068 MD = MAV->getMetadata();
6069
6070 Check(MD != nullptr, "missing rounding mode argument", Call);
6071
6072 Check(isa<MDString>(MD),
6073 ("invalid value for llvm.fptrunc.round metadata operand"
6074 " (the operand should be a string)"),
6075 MD);
6076
6077 std::optional<RoundingMode> RoundMode =
6078 convertStrToRoundingMode(cast<MDString>(MD)->getString());
6079 Check(RoundMode && *RoundMode != RoundingMode::Dynamic,
6080 "unsupported rounding mode argument", Call);
6081 break;
6082 }
6083 case Intrinsic::convert_to_arbitrary_fp: {
6084 // Check that vector element counts are consistent.
6085 Type *ValueTy = Call.getArgOperand(0)->getType();
6086 Type *IntTy = Call.getType();
6087
6088 if (auto *ValueVecTy = dyn_cast<VectorType>(ValueTy)) {
6089 auto *IntVecTy = dyn_cast<VectorType>(IntTy);
6090 Check(IntVecTy,
6091 "if floating-point operand is a vector, integer operand must also "
6092 "be a vector",
6093 Call);
6094 Check(ValueVecTy->getElementCount() == IntVecTy->getElementCount(),
6095 "floating-point and integer vector operands must have the same "
6096 "element count",
6097 Call);
6098 }
6099
6100 // Check interpretation metadata (argoperand 1).
6101 auto *InterpMAV = dyn_cast<MetadataAsValue>(Call.getArgOperand(1));
6102 Check(InterpMAV, "missing interpretation metadata operand", Call);
6103 auto *InterpStr = dyn_cast<MDString>(InterpMAV->getMetadata());
6104 Check(InterpStr, "interpretation metadata operand must be a string", Call);
6105 StringRef Interp = InterpStr->getString();
6106
6107 Check(!Interp.empty(), "interpretation metadata string must not be empty",
6108 Call);
6109
6110 // Valid interpretation strings: mini-float format names.
6112 "unsupported interpretation metadata string", Call);
6113
6114 // Check rounding mode metadata (argoperand 2).
6115 auto *RoundingMAV = dyn_cast<MetadataAsValue>(Call.getArgOperand(2));
6116 Check(RoundingMAV, "missing rounding mode metadata operand", Call);
6117 auto *RoundingStr = dyn_cast<MDString>(RoundingMAV->getMetadata());
6118 Check(RoundingStr, "rounding mode metadata operand must be a string", Call);
6119
6120 std::optional<RoundingMode> RM =
6121 convertStrToRoundingMode(RoundingStr->getString());
6122 Check(RM && *RM != RoundingMode::Dynamic,
6123 "unsupported rounding mode argument", Call);
6124 break;
6125 }
6126 case Intrinsic::convert_from_arbitrary_fp: {
6127 // Check that vector element counts are consistent.
6128 Type *IntTy = Call.getArgOperand(0)->getType();
6129 Type *ValueTy = Call.getType();
6130
6131 if (auto *ValueVecTy = dyn_cast<VectorType>(ValueTy)) {
6132 auto *IntVecTy = dyn_cast<VectorType>(IntTy);
6133 Check(IntVecTy,
6134 "if floating-point operand is a vector, integer operand must also "
6135 "be a vector",
6136 Call);
6137 Check(ValueVecTy->getElementCount() == IntVecTy->getElementCount(),
6138 "floating-point and integer vector operands must have the same "
6139 "element count",
6140 Call);
6141 }
6142
6143 // Check interpretation metadata (argoperand 1).
6144 auto *InterpMAV = dyn_cast<MetadataAsValue>(Call.getArgOperand(1));
6145 Check(InterpMAV, "missing interpretation metadata operand", Call);
6146 auto *InterpStr = dyn_cast<MDString>(InterpMAV->getMetadata());
6147 Check(InterpStr, "interpretation metadata operand must be a string", Call);
6148 StringRef Interp = InterpStr->getString();
6149
6150 Check(!Interp.empty(), "interpretation metadata string must not be empty",
6151 Call);
6152
6153 // Valid interpretation strings: mini-float format names.
6155 "unsupported interpretation metadata string", Call);
6156 break;
6157 }
6158#define BEGIN_REGISTER_VP_INTRINSIC(VPID, ...) case Intrinsic::VPID:
6159#include "llvm/IR/VPIntrinsics.def"
6160#undef BEGIN_REGISTER_VP_INTRINSIC
6161 visitVPIntrinsic(cast<VPIntrinsic>(Call));
6162 break;
6163#define INSTRUCTION(NAME, NARGS, ROUND_MODE, INTRINSIC) \
6164 case Intrinsic::INTRINSIC:
6165#include "llvm/IR/ConstrainedOps.def"
6166#undef INSTRUCTION
6167 visitConstrainedFPIntrinsic(cast<ConstrainedFPIntrinsic>(Call));
6168 break;
6169 case Intrinsic::dbg_declare: // llvm.dbg.declare
6170 case Intrinsic::dbg_value: // llvm.dbg.value
6171 case Intrinsic::dbg_assign: // llvm.dbg.assign
6172 case Intrinsic::dbg_label: // llvm.dbg.label
6173 // We no longer interpret debug intrinsics (the old variable-location
6174 // design). They're meaningless as far as LLVM is concerned we could make
6175 // it an error for them to appear, but it's possible we'll have users
6176 // converting back to intrinsics for the forseeable future (such as DXIL),
6177 // so tolerate their existance.
6178 break;
6179 case Intrinsic::memcpy:
6180 case Intrinsic::memcpy_inline:
6181 case Intrinsic::memmove:
6182 case Intrinsic::memset:
6183 case Intrinsic::memset_inline:
6184 break;
6185 case Intrinsic::experimental_memset_pattern: {
6186 const auto Memset = cast<MemSetPatternInst>(&Call);
6187 Check(Memset->getValue()->getType()->isSized(),
6188 "unsized types cannot be used as memset patterns", Call);
6189 break;
6190 }
6191 case Intrinsic::memcpy_element_unordered_atomic:
6192 case Intrinsic::memmove_element_unordered_atomic:
6193 case Intrinsic::memset_element_unordered_atomic: {
6194 const auto *AMI = cast<AnyMemIntrinsic>(&Call);
6195
6196 ConstantInt *ElementSizeCI =
6197 cast<ConstantInt>(AMI->getRawElementSizeInBytes());
6198 const APInt &ElementSizeVal = ElementSizeCI->getValue();
6199 Check(ElementSizeVal.isPowerOf2(),
6200 "element size of the element-wise atomic memory intrinsic "
6201 "must be a power of 2",
6202 Call);
6203
6204 auto IsValidAlignment = [&](MaybeAlign Alignment) {
6205 return Alignment && ElementSizeVal.ule(Alignment->value());
6206 };
6207 Check(IsValidAlignment(AMI->getDestAlign()),
6208 "incorrect alignment of the destination argument", Call);
6209 if (const auto *AMT = dyn_cast<AnyMemTransferInst>(AMI)) {
6210 Check(IsValidAlignment(AMT->getSourceAlign()),
6211 "incorrect alignment of the source argument", Call);
6212 }
6213 break;
6214 }
6215 case Intrinsic::call_preallocated_setup: {
6216 auto *NumArgs = cast<ConstantInt>(Call.getArgOperand(0));
6217 bool FoundCall = false;
6218 for (User *U : Call.users()) {
6219 auto *UseCall = dyn_cast<CallBase>(U);
6220 Check(UseCall != nullptr,
6221 "Uses of llvm.call.preallocated.setup must be calls");
6222 Intrinsic::ID IID = UseCall->getIntrinsicID();
6223 if (IID == Intrinsic::call_preallocated_arg) {
6224 auto *AllocArgIndex = dyn_cast<ConstantInt>(UseCall->getArgOperand(1));
6225 Check(AllocArgIndex != nullptr,
6226 "llvm.call.preallocated.alloc arg index must be a constant");
6227 auto AllocArgIndexInt = AllocArgIndex->getValue();
6228 Check(AllocArgIndexInt.sge(0) &&
6229 AllocArgIndexInt.slt(NumArgs->getValue()),
6230 "llvm.call.preallocated.alloc arg index must be between 0 and "
6231 "corresponding "
6232 "llvm.call.preallocated.setup's argument count");
6233 } else if (IID == Intrinsic::call_preallocated_teardown) {
6234 // nothing to do
6235 } else {
6236 Check(!FoundCall, "Can have at most one call corresponding to a "
6237 "llvm.call.preallocated.setup");
6238 FoundCall = true;
6239 size_t NumPreallocatedArgs = 0;
6240 for (unsigned i = 0; i < UseCall->arg_size(); i++) {
6241 if (UseCall->paramHasAttr(i, Attribute::Preallocated)) {
6242 ++NumPreallocatedArgs;
6243 }
6244 }
6245 Check(NumPreallocatedArgs != 0,
6246 "cannot use preallocated intrinsics on a call without "
6247 "preallocated arguments");
6248 Check(NumArgs->equalsInt(NumPreallocatedArgs),
6249 "llvm.call.preallocated.setup arg size must be equal to number "
6250 "of preallocated arguments "
6251 "at call site",
6252 Call, *UseCall);
6253 // getOperandBundle() cannot be called if more than one of the operand
6254 // bundle exists. There is already a check elsewhere for this, so skip
6255 // here if we see more than one.
6256 if (UseCall->countOperandBundlesOfType(LLVMContext::OB_preallocated) >
6257 1) {
6258 return;
6259 }
6260 auto PreallocatedBundle =
6261 UseCall->getOperandBundle(LLVMContext::OB_preallocated);
6262 Check(PreallocatedBundle,
6263 "Use of llvm.call.preallocated.setup outside intrinsics "
6264 "must be in \"preallocated\" operand bundle");
6265 Check(PreallocatedBundle->Inputs.front().get() == &Call,
6266 "preallocated bundle must have token from corresponding "
6267 "llvm.call.preallocated.setup");
6268 }
6269 }
6270 break;
6271 }
6272 case Intrinsic::call_preallocated_arg: {
6273 auto *Token = dyn_cast<CallBase>(Call.getArgOperand(0));
6274 Check(Token &&
6275 Token->getIntrinsicID() == Intrinsic::call_preallocated_setup,
6276 "llvm.call.preallocated.arg token argument must be a "
6277 "llvm.call.preallocated.setup");
6278 Check(Call.hasFnAttr(Attribute::Preallocated),
6279 "llvm.call.preallocated.arg must be called with a \"preallocated\" "
6280 "call site attribute");
6281 break;
6282 }
6283 case Intrinsic::call_preallocated_teardown: {
6284 auto *Token = dyn_cast<CallBase>(Call.getArgOperand(0));
6285 Check(Token &&
6286 Token->getIntrinsicID() == Intrinsic::call_preallocated_setup,
6287 "llvm.call.preallocated.teardown token argument must be a "
6288 "llvm.call.preallocated.setup");
6289 break;
6290 }
6291 case Intrinsic::gcroot:
6292 case Intrinsic::gcwrite:
6293 case Intrinsic::gcread:
6294 if (ID == Intrinsic::gcroot) {
6295 AllocaInst *AI =
6297 Check(AI, "llvm.gcroot parameter #1 must be an alloca.", Call);
6299 "llvm.gcroot parameter #2 must be a constant.", Call);
6300 if (!AI->getAllocatedType()->isPointerTy()) {
6302 "llvm.gcroot parameter #1 must either be a pointer alloca, "
6303 "or argument #2 must be a non-null constant.",
6304 Call);
6305 }
6306 }
6307
6308 Check(Call.getParent()->getParent()->hasGC(),
6309 "Enclosing function does not use GC.", Call);
6310 break;
6311 case Intrinsic::init_trampoline:
6313 "llvm.init_trampoline parameter #2 must resolve to a function.",
6314 Call);
6315 break;
6316 case Intrinsic::prefetch:
6317 Check(cast<ConstantInt>(Call.getArgOperand(1))->getZExtValue() < 2,
6318 "rw argument to llvm.prefetch must be 0-1", Call);
6319 Check(cast<ConstantInt>(Call.getArgOperand(2))->getZExtValue() < 4,
6320 "locality argument to llvm.prefetch must be 0-3", Call);
6321 Check(cast<ConstantInt>(Call.getArgOperand(3))->getZExtValue() < 2,
6322 "cache type argument to llvm.prefetch must be 0-1", Call);
6323 break;
6324 case Intrinsic::reloc_none: {
6326 cast<MetadataAsValue>(Call.getArgOperand(0))->getMetadata()),
6327 "llvm.reloc.none argument must be a metadata string", &Call);
6328 break;
6329 }
6330 case Intrinsic::stackprotector:
6332 "llvm.stackprotector parameter #2 must resolve to an alloca.", Call);
6333 break;
6334 case Intrinsic::localescape: {
6335 BasicBlock *BB = Call.getParent();
6336 Check(BB->isEntryBlock(), "llvm.localescape used outside of entry block",
6337 Call);
6338 Check(!SawFrameEscape, "multiple calls to llvm.localescape in one function",
6339 Call);
6340 for (Value *Arg : Call.args()) {
6341 if (isa<ConstantPointerNull>(Arg))
6342 continue; // Null values are allowed as placeholders.
6343 auto *AI = dyn_cast<AllocaInst>(Arg->stripPointerCasts());
6344 Check(AI && AI->isStaticAlloca(),
6345 "llvm.localescape only accepts static allocas", Call);
6346 }
6347 FrameEscapeInfo[BB->getParent()].first = Call.arg_size();
6348 SawFrameEscape = true;
6349 break;
6350 }
6351 case Intrinsic::localrecover: {
6353 Function *Fn = dyn_cast<Function>(FnArg);
6354 Check(Fn && !Fn->isDeclaration(),
6355 "llvm.localrecover first "
6356 "argument must be function defined in this module",
6357 Call);
6358 auto *IdxArg = cast<ConstantInt>(Call.getArgOperand(2));
6359 auto &Entry = FrameEscapeInfo[Fn];
6360 Entry.second = unsigned(
6361 std::max(uint64_t(Entry.second), IdxArg->getLimitedValue(~0U) + 1));
6362 break;
6363 }
6364
6365 case Intrinsic::experimental_gc_statepoint:
6366 if (auto *CI = dyn_cast<CallInst>(&Call))
6367 Check(!CI->isInlineAsm(),
6368 "gc.statepoint support for inline assembly unimplemented", CI);
6369 Check(Call.getParent()->getParent()->hasGC(),
6370 "Enclosing function does not use GC.", Call);
6371
6372 verifyStatepoint(Call);
6373 break;
6374 case Intrinsic::experimental_gc_result: {
6375 Check(Call.getParent()->getParent()->hasGC(),
6376 "Enclosing function does not use GC.", Call);
6377
6378 auto *Statepoint = Call.getArgOperand(0);
6379 if (isa<UndefValue>(Statepoint))
6380 break;
6381
6382 // Are we tied to a statepoint properly?
6383 const auto *StatepointCall = dyn_cast<CallBase>(Statepoint);
6384 Check(StatepointCall && StatepointCall->getIntrinsicID() ==
6385 Intrinsic::experimental_gc_statepoint,
6386 "gc.result operand #1 must be from a statepoint", Call,
6387 Call.getArgOperand(0));
6388
6389 // Check that result type matches wrapped callee.
6390 auto *TargetFuncType =
6391 cast<FunctionType>(StatepointCall->getParamElementType(2));
6392 Check(Call.getType() == TargetFuncType->getReturnType(),
6393 "gc.result result type does not match wrapped callee", Call);
6394 break;
6395 }
6396 case Intrinsic::experimental_gc_relocate: {
6397 Check(Call.arg_size() == 3, "wrong number of arguments", Call);
6398
6400 "gc.relocate must return a pointer or a vector of pointers", Call);
6401
6402 // Check that this relocate is correctly tied to the statepoint
6403
6404 // This is case for relocate on the unwinding path of an invoke statepoint
6405 if (LandingPadInst *LandingPad =
6407
6408 const BasicBlock *InvokeBB =
6409 LandingPad->getParent()->getUniquePredecessor();
6410
6411 // Landingpad relocates should have only one predecessor with invoke
6412 // statepoint terminator
6413 Check(InvokeBB, "safepoints should have unique landingpads",
6414 LandingPad->getParent());
6415 Check(InvokeBB->getTerminator(), "safepoint block should be well formed",
6416 InvokeBB);
6418 "gc relocate should be linked to a statepoint", InvokeBB);
6419 } else {
6420 // In all other cases relocate should be tied to the statepoint directly.
6421 // This covers relocates on a normal return path of invoke statepoint and
6422 // relocates of a call statepoint.
6423 auto *Token = Call.getArgOperand(0);
6425 "gc relocate is incorrectly tied to the statepoint", Call, Token);
6426 }
6427
6428 // Verify rest of the relocate arguments.
6429 const Value &StatepointCall = *cast<GCRelocateInst>(Call).getStatepoint();
6430
6431 // Both the base and derived must be piped through the safepoint.
6434 "gc.relocate operand #2 must be integer offset", Call);
6435
6436 Value *Derived = Call.getArgOperand(2);
6437 Check(isa<ConstantInt>(Derived),
6438 "gc.relocate operand #3 must be integer offset", Call);
6439
6440 const uint64_t BaseIndex = cast<ConstantInt>(Base)->getZExtValue();
6441 const uint64_t DerivedIndex = cast<ConstantInt>(Derived)->getZExtValue();
6442
6443 // Check the bounds
6444 if (isa<UndefValue>(StatepointCall))
6445 break;
6446 if (auto Opt = cast<GCStatepointInst>(StatepointCall)
6447 .getOperandBundle(LLVMContext::OB_gc_live)) {
6448 Check(BaseIndex < Opt->Inputs.size(),
6449 "gc.relocate: statepoint base index out of bounds", Call);
6450 Check(DerivedIndex < Opt->Inputs.size(),
6451 "gc.relocate: statepoint derived index out of bounds", Call);
6452 }
6453
6454 // Relocated value must be either a pointer type or vector-of-pointer type,
6455 // but gc_relocate does not need to return the same pointer type as the
6456 // relocated pointer. It can be casted to the correct type later if it's
6457 // desired. However, they must have the same address space and 'vectorness'
6458 GCRelocateInst &Relocate = cast<GCRelocateInst>(Call);
6459 auto *ResultType = Call.getType();
6460 auto *DerivedType = Relocate.getDerivedPtr()->getType();
6461 auto *BaseType = Relocate.getBasePtr()->getType();
6462
6463 Check(BaseType->isPtrOrPtrVectorTy(),
6464 "gc.relocate: relocated value must be a pointer", Call);
6465 Check(DerivedType->isPtrOrPtrVectorTy(),
6466 "gc.relocate: relocated value must be a pointer", Call);
6467
6468 Check(ResultType->isVectorTy() == DerivedType->isVectorTy(),
6469 "gc.relocate: vector relocates to vector and pointer to pointer",
6470 Call);
6471 Check(
6472 ResultType->getPointerAddressSpace() ==
6473 DerivedType->getPointerAddressSpace(),
6474 "gc.relocate: relocating a pointer shouldn't change its address space",
6475 Call);
6476
6477 auto GC = llvm::getGCStrategy(Relocate.getFunction()->getGC());
6478 Check(GC, "gc.relocate: calling function must have GCStrategy",
6479 Call.getFunction());
6480 if (GC) {
6481 auto isGCPtr = [&GC](Type *PTy) {
6482 return GC->isGCManagedPointer(PTy->getScalarType()).value_or(true);
6483 };
6484 Check(isGCPtr(ResultType), "gc.relocate: must return gc pointer", Call);
6485 Check(isGCPtr(BaseType),
6486 "gc.relocate: relocated value must be a gc pointer", Call);
6487 Check(isGCPtr(DerivedType),
6488 "gc.relocate: relocated value must be a gc pointer", Call);
6489 }
6490 break;
6491 }
6492 case Intrinsic::experimental_patchpoint: {
6493 if (Call.getCallingConv() == CallingConv::AnyReg) {
6495 "patchpoint: invalid return type used with anyregcc", Call);
6496 }
6497 break;
6498 }
6499 case Intrinsic::eh_exceptioncode:
6500 case Intrinsic::eh_exceptionpointer: {
6502 "eh.exceptionpointer argument must be a catchpad", Call);
6503 break;
6504 }
6505 case Intrinsic::get_active_lane_mask: {
6507 "get_active_lane_mask: must return a "
6508 "vector",
6509 Call);
6510 auto *ElemTy = Call.getType()->getScalarType();
6511 Check(ElemTy->isIntegerTy(1),
6512 "get_active_lane_mask: element type is not "
6513 "i1",
6514 Call);
6515 break;
6516 }
6517 case Intrinsic::experimental_get_vector_length: {
6518 ConstantInt *VF = cast<ConstantInt>(Call.getArgOperand(1));
6519 Check(!VF->isNegative() && !VF->isZero(),
6520 "get_vector_length: VF must be positive", Call);
6521 break;
6522 }
6523 case Intrinsic::masked_load: {
6524 Check(Call.getType()->isVectorTy(), "masked_load: must return a vector",
6525 Call);
6526
6528 Value *PassThru = Call.getArgOperand(2);
6529 Check(Mask->getType()->isVectorTy(), "masked_load: mask must be vector",
6530 Call);
6531 Check(PassThru->getType() == Call.getType(),
6532 "masked_load: pass through and return type must match", Call);
6533 Check(cast<VectorType>(Mask->getType())->getElementCount() ==
6534 cast<VectorType>(Call.getType())->getElementCount(),
6535 "masked_load: vector mask must be same length as return", Call);
6536 break;
6537 }
6538 case Intrinsic::masked_store: {
6539 Value *Val = Call.getArgOperand(0);
6541 Check(Mask->getType()->isVectorTy(), "masked_store: mask must be vector",
6542 Call);
6543 Check(cast<VectorType>(Mask->getType())->getElementCount() ==
6544 cast<VectorType>(Val->getType())->getElementCount(),
6545 "masked_store: vector mask must be same length as value", Call);
6546 break;
6547 }
6548 case Intrinsic::experimental_guard: {
6549 Check(isa<CallInst>(Call), "experimental_guard cannot be invoked", Call);
6551 "experimental_guard must have exactly one "
6552 "\"deopt\" operand bundle");
6553 break;
6554 }
6555
6556 case Intrinsic::experimental_deoptimize: {
6557 Check(isa<CallInst>(Call), "experimental_deoptimize cannot be invoked",
6558 Call);
6560 "experimental_deoptimize must have exactly one "
6561 "\"deopt\" operand bundle");
6563 "experimental_deoptimize return type must match caller return type");
6564
6565 if (isa<CallInst>(Call)) {
6567 Check(RI,
6568 "calls to experimental_deoptimize must be followed by a return");
6569
6570 if (!Call.getType()->isVoidTy() && RI)
6571 Check(RI->getReturnValue() == &Call,
6572 "calls to experimental_deoptimize must be followed by a return "
6573 "of the value computed by experimental_deoptimize");
6574 }
6575
6576 break;
6577 }
6578 case Intrinsic::vastart: {
6580 "va_start called in a non-varargs function");
6581 break;
6582 }
6583 case Intrinsic::get_dynamic_area_offset: {
6584 auto *IntTy = dyn_cast<IntegerType>(Call.getType());
6585 Check(IntTy && DL.getPointerSizeInBits(DL.getAllocaAddrSpace()) ==
6586 IntTy->getBitWidth(),
6587 "get_dynamic_area_offset result type must be scalar integer matching "
6588 "alloca address space width",
6589 Call);
6590 break;
6591 }
6592 case Intrinsic::masked_udiv:
6593 case Intrinsic::masked_sdiv:
6594 case Intrinsic::masked_urem:
6595 case Intrinsic::masked_srem:
6596 case Intrinsic::vector_reduce_and:
6597 case Intrinsic::vector_reduce_or:
6598 case Intrinsic::vector_reduce_xor:
6599 case Intrinsic::vector_reduce_add:
6600 case Intrinsic::vector_reduce_mul:
6601 case Intrinsic::vector_reduce_smax:
6602 case Intrinsic::vector_reduce_smin:
6603 case Intrinsic::vector_reduce_umax:
6604 case Intrinsic::vector_reduce_umin: {
6605 Type *ArgTy = Call.getArgOperand(0)->getType();
6606 Check(ArgTy->isIntOrIntVectorTy() && ArgTy->isVectorTy(),
6607 "Intrinsic has incorrect argument type!");
6608 break;
6609 }
6610 case Intrinsic::vector_reduce_fmax:
6611 case Intrinsic::vector_reduce_fmin: {
6612 Type *ArgTy = Call.getArgOperand(0)->getType();
6613 Check(ArgTy->isFPOrFPVectorTy() && ArgTy->isVectorTy(),
6614 "Intrinsic has incorrect argument type!");
6615 break;
6616 }
6617 case Intrinsic::vector_reduce_fadd:
6618 case Intrinsic::vector_reduce_fmul: {
6619 // Unlike the other reductions, the first argument is a start value. The
6620 // second argument is the vector to be reduced.
6621 Type *ArgTy = Call.getArgOperand(1)->getType();
6622 Check(ArgTy->isFPOrFPVectorTy() && ArgTy->isVectorTy(),
6623 "Intrinsic has incorrect argument type!");
6624 break;
6625 }
6626 case Intrinsic::smul_fix:
6627 case Intrinsic::smul_fix_sat:
6628 case Intrinsic::umul_fix:
6629 case Intrinsic::umul_fix_sat:
6630 case Intrinsic::sdiv_fix:
6631 case Intrinsic::sdiv_fix_sat:
6632 case Intrinsic::udiv_fix:
6633 case Intrinsic::udiv_fix_sat: {
6634 Value *Op1 = Call.getArgOperand(0);
6635 Value *Op2 = Call.getArgOperand(1);
6637 "first operand of [us][mul|div]_fix[_sat] must be an int type or "
6638 "vector of ints");
6640 "second operand of [us][mul|div]_fix[_sat] must be an int type or "
6641 "vector of ints");
6642
6643 auto *Op3 = cast<ConstantInt>(Call.getArgOperand(2));
6644 Check(Op3->getType()->isIntegerTy(),
6645 "third operand of [us][mul|div]_fix[_sat] must be an int type");
6646 Check(Op3->getBitWidth() <= 32,
6647 "third operand of [us][mul|div]_fix[_sat] must fit within 32 bits");
6648
6649 if (ID == Intrinsic::smul_fix || ID == Intrinsic::smul_fix_sat ||
6650 ID == Intrinsic::sdiv_fix || ID == Intrinsic::sdiv_fix_sat) {
6651 Check(Op3->getZExtValue() < Op1->getType()->getScalarSizeInBits(),
6652 "the scale of s[mul|div]_fix[_sat] must be less than the width of "
6653 "the operands");
6654 } else {
6655 Check(Op3->getZExtValue() <= Op1->getType()->getScalarSizeInBits(),
6656 "the scale of u[mul|div]_fix[_sat] must be less than or equal "
6657 "to the width of the operands");
6658 }
6659 break;
6660 }
6661 case Intrinsic::lrint:
6662 case Intrinsic::llrint:
6663 case Intrinsic::lround:
6664 case Intrinsic::llround: {
6665 Type *ValTy = Call.getArgOperand(0)->getType();
6666 Type *ResultTy = Call.getType();
6667 auto *VTy = dyn_cast<VectorType>(ValTy);
6668 auto *RTy = dyn_cast<VectorType>(ResultTy);
6669 Check(ValTy->isFPOrFPVectorTy() && ResultTy->isIntOrIntVectorTy(),
6670 ExpectedName + ": argument must be floating-point or vector "
6671 "of floating-points, and result must be integer or "
6672 "vector of integers",
6673 &Call);
6674 Check(ValTy->isVectorTy() == ResultTy->isVectorTy(),
6675 ExpectedName + ": argument and result disagree on vector use", &Call);
6676 if (VTy) {
6677 Check(VTy->getElementCount() == RTy->getElementCount(),
6678 ExpectedName + ": argument must be same length as result", &Call);
6679 }
6680 break;
6681 }
6682 case Intrinsic::bswap: {
6683 Type *Ty = Call.getType();
6684 unsigned Size = Ty->getScalarSizeInBits();
6685 Check(Size % 16 == 0, "bswap must be an even number of bytes", &Call);
6686 break;
6687 }
6688 case Intrinsic::invariant_start: {
6689 ConstantInt *InvariantSize = dyn_cast<ConstantInt>(Call.getArgOperand(0));
6690 Check(InvariantSize &&
6691 (!InvariantSize->isNegative() || InvariantSize->isMinusOne()),
6692 "invariant_start parameter must be -1, 0 or a positive number",
6693 &Call);
6694 break;
6695 }
6696 case Intrinsic::matrix_multiply:
6697 case Intrinsic::matrix_transpose:
6698 case Intrinsic::matrix_column_major_load:
6699 case Intrinsic::matrix_column_major_store: {
6701 ConstantInt *Stride = nullptr;
6702 ConstantInt *NumRows;
6703 ConstantInt *NumColumns;
6704 VectorType *ResultTy;
6705 Type *Op0ElemTy = nullptr;
6706 Type *Op1ElemTy = nullptr;
6707 switch (ID) {
6708 case Intrinsic::matrix_multiply: {
6709 NumRows = cast<ConstantInt>(Call.getArgOperand(2));
6710 ConstantInt *N = cast<ConstantInt>(Call.getArgOperand(3));
6711 NumColumns = cast<ConstantInt>(Call.getArgOperand(4));
6713 ->getNumElements() ==
6714 NumRows->getZExtValue() * N->getZExtValue(),
6715 "First argument of a matrix operation does not match specified "
6716 "shape!");
6718 ->getNumElements() ==
6719 N->getZExtValue() * NumColumns->getZExtValue(),
6720 "Second argument of a matrix operation does not match specified "
6721 "shape!");
6722
6723 ResultTy = cast<VectorType>(Call.getType());
6724 Op0ElemTy =
6725 cast<VectorType>(Call.getArgOperand(0)->getType())->getElementType();
6726 Op1ElemTy =
6727 cast<VectorType>(Call.getArgOperand(1)->getType())->getElementType();
6728 break;
6729 }
6730 case Intrinsic::matrix_transpose:
6731 NumRows = cast<ConstantInt>(Call.getArgOperand(1));
6732 NumColumns = cast<ConstantInt>(Call.getArgOperand(2));
6733 ResultTy = cast<VectorType>(Call.getType());
6734 Op0ElemTy =
6735 cast<VectorType>(Call.getArgOperand(0)->getType())->getElementType();
6736 break;
6737 case Intrinsic::matrix_column_major_load: {
6739 NumRows = cast<ConstantInt>(Call.getArgOperand(3));
6740 NumColumns = cast<ConstantInt>(Call.getArgOperand(4));
6741 ResultTy = cast<VectorType>(Call.getType());
6742 break;
6743 }
6744 case Intrinsic::matrix_column_major_store: {
6746 NumRows = cast<ConstantInt>(Call.getArgOperand(4));
6747 NumColumns = cast<ConstantInt>(Call.getArgOperand(5));
6748 ResultTy = cast<VectorType>(Call.getArgOperand(0)->getType());
6749 Op0ElemTy =
6750 cast<VectorType>(Call.getArgOperand(0)->getType())->getElementType();
6751 break;
6752 }
6753 default:
6754 llvm_unreachable("unexpected intrinsic");
6755 }
6756
6757 Check(ResultTy->getElementType()->isIntegerTy() ||
6758 ResultTy->getElementType()->isFloatingPointTy(),
6759 "Result type must be an integer or floating-point type!", IF);
6760
6761 if (Op0ElemTy)
6762 Check(ResultTy->getElementType() == Op0ElemTy,
6763 "Vector element type mismatch of the result and first operand "
6764 "vector!",
6765 IF);
6766
6767 if (Op1ElemTy)
6768 Check(ResultTy->getElementType() == Op1ElemTy,
6769 "Vector element type mismatch of the result and second operand "
6770 "vector!",
6771 IF);
6772
6774 NumRows->getZExtValue() * NumColumns->getZExtValue(),
6775 "Result of a matrix operation does not fit in the returned vector!");
6776
6777 if (Stride) {
6778 Check(Stride->getBitWidth() <= 64, "Stride bitwidth cannot exceed 64!",
6779 IF);
6780 Check(Stride->getZExtValue() >= NumRows->getZExtValue(),
6781 "Stride must be greater or equal than the number of rows!", IF);
6782 }
6783
6784 break;
6785 }
6786 case Intrinsic::stepvector: {
6788 Check(VecTy && VecTy->getScalarType()->isIntegerTy() &&
6789 VecTy->getScalarSizeInBits() >= 8,
6790 "stepvector only supported for vectors of integers "
6791 "with a bitwidth of at least 8.",
6792 &Call);
6793 break;
6794 }
6795 case Intrinsic::experimental_vector_match: {
6796 Value *Op1 = Call.getArgOperand(0);
6797 Value *Op2 = Call.getArgOperand(1);
6799
6800 VectorType *Op1Ty = dyn_cast<VectorType>(Op1->getType());
6801 VectorType *Op2Ty = dyn_cast<VectorType>(Op2->getType());
6802 VectorType *MaskTy = dyn_cast<VectorType>(Mask->getType());
6803
6804 Check(Op1Ty && Op2Ty && MaskTy, "Operands must be vectors.", &Call);
6806 "Second operand must be a fixed length vector.", &Call);
6807 Check(Op1Ty->getElementType()->isIntegerTy(),
6808 "First operand must be a vector of integers.", &Call);
6809 Check(Op1Ty->getElementType() == Op2Ty->getElementType(),
6810 "First two operands must have the same element type.", &Call);
6811 Check(Op1Ty->getElementCount() == MaskTy->getElementCount(),
6812 "First operand and mask must have the same number of elements.",
6813 &Call);
6814 Check(MaskTy->getElementType()->isIntegerTy(1),
6815 "Mask must be a vector of i1's.", &Call);
6816 Check(Call.getType() == MaskTy, "Return type must match the mask type.",
6817 &Call);
6818 break;
6819 }
6820 case Intrinsic::vector_insert: {
6821 Value *Vec = Call.getArgOperand(0);
6822 Value *SubVec = Call.getArgOperand(1);
6823 Value *Idx = Call.getArgOperand(2);
6824 unsigned IdxN = cast<ConstantInt>(Idx)->getZExtValue();
6825
6826 VectorType *VecTy = cast<VectorType>(Vec->getType());
6827 VectorType *SubVecTy = cast<VectorType>(SubVec->getType());
6828
6829 ElementCount VecEC = VecTy->getElementCount();
6830 ElementCount SubVecEC = SubVecTy->getElementCount();
6831 Check(VecTy->getElementType() == SubVecTy->getElementType(),
6832 "vector_insert parameters must have the same element "
6833 "type.",
6834 &Call);
6835 Check(IdxN % SubVecEC.getKnownMinValue() == 0,
6836 "vector_insert index must be a constant multiple of "
6837 "the subvector's known minimum vector length.");
6838
6839 // If this insertion is not the 'mixed' case where a fixed vector is
6840 // inserted into a scalable vector, ensure that the insertion of the
6841 // subvector does not overrun the parent vector.
6842 if (VecEC.isScalable() == SubVecEC.isScalable()) {
6843 Check(IdxN < VecEC.getKnownMinValue() &&
6844 IdxN + SubVecEC.getKnownMinValue() <= VecEC.getKnownMinValue(),
6845 "subvector operand of vector_insert would overrun the "
6846 "vector being inserted into.");
6847 }
6848 break;
6849 }
6850 case Intrinsic::vector_extract: {
6851 Value *Vec = Call.getArgOperand(0);
6852 Value *Idx = Call.getArgOperand(1);
6853 unsigned IdxN = cast<ConstantInt>(Idx)->getZExtValue();
6854
6855 VectorType *ResultTy = cast<VectorType>(Call.getType());
6856 VectorType *VecTy = cast<VectorType>(Vec->getType());
6857
6858 ElementCount VecEC = VecTy->getElementCount();
6859 ElementCount ResultEC = ResultTy->getElementCount();
6860
6861 Check(ResultTy->getElementType() == VecTy->getElementType(),
6862 "vector_extract result must have the same element "
6863 "type as the input vector.",
6864 &Call);
6865 Check(IdxN % ResultEC.getKnownMinValue() == 0,
6866 "vector_extract index must be a constant multiple of "
6867 "the result type's known minimum vector length.");
6868
6869 // If this extraction is not the 'mixed' case where a fixed vector is
6870 // extracted from a scalable vector, ensure that the extraction does not
6871 // overrun the parent vector.
6872 if (VecEC.isScalable() == ResultEC.isScalable()) {
6873 Check(IdxN < VecEC.getKnownMinValue() &&
6874 IdxN + ResultEC.getKnownMinValue() <= VecEC.getKnownMinValue(),
6875 "vector_extract would overrun.");
6876 }
6877 break;
6878 }
6879 case Intrinsic::vector_partial_reduce_fadd:
6880 case Intrinsic::vector_partial_reduce_add: {
6883
6884 unsigned VecWidth = VecTy->getElementCount().getKnownMinValue();
6885 unsigned AccWidth = AccTy->getElementCount().getKnownMinValue();
6886
6887 Check((VecWidth % AccWidth) == 0,
6888 "Invalid vector widths for partial "
6889 "reduction. The width of the input vector "
6890 "must be a positive integer multiple of "
6891 "the width of the accumulator vector.");
6892 break;
6893 }
6894 case Intrinsic::experimental_noalias_scope_decl: {
6895 NoAliasScopeDecls.push_back(cast<IntrinsicInst>(&Call));
6896 break;
6897 }
6898 case Intrinsic::preserve_array_access_index:
6899 case Intrinsic::preserve_struct_access_index:
6900 case Intrinsic::aarch64_ldaxr:
6901 case Intrinsic::aarch64_ldxr:
6902 case Intrinsic::arm_ldaex:
6903 case Intrinsic::arm_ldrex: {
6904 Type *ElemTy = Call.getParamElementType(0);
6905 Check(ElemTy, "Intrinsic requires elementtype attribute on first argument.",
6906 &Call);
6907 break;
6908 }
6909 case Intrinsic::aarch64_stlxr:
6910 case Intrinsic::aarch64_stxr:
6911 case Intrinsic::arm_stlex:
6912 case Intrinsic::arm_strex: {
6913 Type *ElemTy = Call.getAttributes().getParamElementType(1);
6914 Check(ElemTy,
6915 "Intrinsic requires elementtype attribute on second argument.",
6916 &Call);
6917 break;
6918 }
6919 case Intrinsic::aarch64_prefetch: {
6920 Check(cast<ConstantInt>(Call.getArgOperand(1))->getZExtValue() < 2,
6921 "write argument to llvm.aarch64.prefetch must be 0 or 1", Call);
6922 Check(cast<ConstantInt>(Call.getArgOperand(2))->getZExtValue() < 4,
6923 "target argument to llvm.aarch64.prefetch must be 0-3", Call);
6924 Check(cast<ConstantInt>(Call.getArgOperand(3))->getZExtValue() < 2,
6925 "stream argument to llvm.aarch64.prefetch must be 0 or 1", Call);
6926 Check(cast<ConstantInt>(Call.getArgOperand(4))->getZExtValue() < 2,
6927 "isdata argument to llvm.aarch64.prefetch must be 0 or 1", Call);
6928 break;
6929 }
6930 case Intrinsic::aarch64_range_prefetch: {
6931 Check(cast<ConstantInt>(Call.getArgOperand(1))->getZExtValue() < 2,
6932 "write argument to llvm.aarch64.range.prefetch must be 0 or 1", Call);
6933 Check(cast<ConstantInt>(Call.getArgOperand(2))->getZExtValue() < 2,
6934 "stream argument to llvm.aarch64.range.prefetch must be 0 or 1",
6935 Call);
6936 break;
6937 }
6938 case Intrinsic::aarch64_stshh_atomic_store: {
6939 uint64_t Order = cast<ConstantInt>(Call.getArgOperand(2))->getZExtValue();
6940 Check(Order == static_cast<uint64_t>(AtomicOrderingCABI::relaxed) ||
6941 Order == static_cast<uint64_t>(AtomicOrderingCABI::release) ||
6942 Order == static_cast<uint64_t>(AtomicOrderingCABI::seq_cst),
6943 "order argument to llvm.aarch64.stshh.atomic.store must be 0, 3 or 5",
6944 Call);
6945
6946 Check(cast<ConstantInt>(Call.getArgOperand(3))->getZExtValue() < 2,
6947 "policy argument to llvm.aarch64.stshh.atomic.store must be 0 or 1",
6948 Call);
6949
6950 uint64_t Size = cast<ConstantInt>(Call.getArgOperand(4))->getZExtValue();
6951 Check(Size == 8 || Size == 16 || Size == 32 || Size == 64,
6952 "size argument to llvm.aarch64.stshh.atomic.store must be 8, 16, "
6953 "32 or 64",
6954 Call);
6955 break;
6956 }
6957 case Intrinsic::callbr_landingpad: {
6958 const auto *CBR = dyn_cast<CallBrInst>(Call.getOperand(0));
6959 Check(CBR, "intrinstic requires callbr operand", &Call);
6960 if (!CBR)
6961 break;
6962
6963 const BasicBlock *LandingPadBB = Call.getParent();
6964 const BasicBlock *PredBB = LandingPadBB->getUniquePredecessor();
6965 if (!PredBB) {
6966 CheckFailed("Intrinsic in block must have 1 unique predecessor", &Call);
6967 break;
6968 }
6969 if (!isa<CallBrInst>(PredBB->getTerminator())) {
6970 CheckFailed("Intrinsic must have corresponding callbr in predecessor",
6971 &Call);
6972 break;
6973 }
6974 Check(llvm::is_contained(CBR->getIndirectDests(), LandingPadBB),
6975 "Intrinsic's corresponding callbr must have intrinsic's parent basic "
6976 "block in indirect destination list",
6977 &Call);
6978 const Instruction &First = *LandingPadBB->begin();
6979 Check(&First == &Call, "No other instructions may proceed intrinsic",
6980 &Call);
6981 break;
6982 }
6983 case Intrinsic::structured_gep: {
6984 // Parser should refuse those 2 cases.
6985 assert(Call.arg_size() >= 1);
6987
6988 Check(Call.paramHasAttr(0, Attribute::ElementType),
6989 "Intrinsic first parameter is missing an ElementType attribute",
6990 &Call);
6991
6992 Type *T = Call.getParamAttr(0, Attribute::ElementType).getValueAsType();
6993 for (unsigned I = 1; I < Call.arg_size(); ++I) {
6995 ConstantInt *CI = dyn_cast<ConstantInt>(Index);
6996 Check(Index->getType()->isIntegerTy(),
6997 "Index operand type must be an integer", &Call);
6998
6999 if (ArrayType *AT = dyn_cast<ArrayType>(T)) {
7000 T = AT->getElementType();
7001 } else if (StructType *ST = dyn_cast<StructType>(T)) {
7002 Check(CI, "Indexing into a struct requires a constant int", &Call);
7003 Check(CI->getZExtValue() < ST->getNumElements(),
7004 "Indexing in a struct should be inbounds", &Call);
7005 T = ST->getElementType(CI->getZExtValue());
7006 } else if (VectorType *VT = dyn_cast<VectorType>(T)) {
7007 T = VT->getElementType();
7008 } else {
7009 CheckFailed("Reached a non-composite type with more indices to process",
7010 &Call);
7011 }
7012 }
7013 break;
7014 }
7015 case Intrinsic::structured_alloca:
7016 Check(Call.hasRetAttr(Attribute::ElementType),
7017 "@llvm.structured.alloca calls require elementtype attribute.",
7018 &Call);
7019 break;
7020 case Intrinsic::amdgcn_cs_chain: {
7021 auto CallerCC = Call.getCaller()->getCallingConv();
7022 switch (CallerCC) {
7023 case CallingConv::AMDGPU_CS:
7024 case CallingConv::AMDGPU_CS_Chain:
7025 case CallingConv::AMDGPU_CS_ChainPreserve:
7026 case CallingConv::AMDGPU_ES:
7027 case CallingConv::AMDGPU_GS:
7028 case CallingConv::AMDGPU_HS:
7029 case CallingConv::AMDGPU_LS:
7030 case CallingConv::AMDGPU_VS:
7031 break;
7032 default:
7033 CheckFailed("Intrinsic cannot be called from functions with this "
7034 "calling convention",
7035 &Call);
7036 break;
7037 }
7038
7039 Check(Call.paramHasAttr(2, Attribute::InReg),
7040 "SGPR arguments must have the `inreg` attribute", &Call);
7041 Check(!Call.paramHasAttr(3, Attribute::InReg),
7042 "VGPR arguments must not have the `inreg` attribute", &Call);
7043
7044 auto *Next = Call.getNextNode();
7045 bool IsAMDUnreachable = Next && isa<IntrinsicInst>(Next) &&
7046 cast<IntrinsicInst>(Next)->getIntrinsicID() ==
7047 Intrinsic::amdgcn_unreachable;
7048 Check(Next && (isa<UnreachableInst>(Next) || IsAMDUnreachable),
7049 "llvm.amdgcn.cs.chain must be followed by unreachable", &Call);
7050 break;
7051 }
7052 case Intrinsic::amdgcn_init_exec_from_input: {
7053 const Argument *Arg = dyn_cast<Argument>(Call.getOperand(0));
7054 Check(Arg && Arg->hasInRegAttr(),
7055 "only inreg arguments to the parent function are valid as inputs to "
7056 "this intrinsic",
7057 &Call);
7058 break;
7059 }
7060 case Intrinsic::amdgcn_set_inactive_chain_arg: {
7061 auto CallerCC = Call.getCaller()->getCallingConv();
7062 switch (CallerCC) {
7063 case CallingConv::AMDGPU_CS_Chain:
7064 case CallingConv::AMDGPU_CS_ChainPreserve:
7065 break;
7066 default:
7067 CheckFailed("Intrinsic can only be used from functions with the "
7068 "amdgpu_cs_chain or amdgpu_cs_chain_preserve "
7069 "calling conventions",
7070 &Call);
7071 break;
7072 }
7073
7074 unsigned InactiveIdx = 1;
7075 Check(!Call.paramHasAttr(InactiveIdx, Attribute::InReg),
7076 "Value for inactive lanes must not have the `inreg` attribute",
7077 &Call);
7078 Check(isa<Argument>(Call.getArgOperand(InactiveIdx)),
7079 "Value for inactive lanes must be a function argument", &Call);
7080 Check(!cast<Argument>(Call.getArgOperand(InactiveIdx))->hasInRegAttr(),
7081 "Value for inactive lanes must be a VGPR function argument", &Call);
7082 break;
7083 }
7084 case Intrinsic::amdgcn_call_whole_wave: {
7086 Check(F, "Indirect whole wave calls are not allowed", &Call);
7087
7088 CallingConv::ID CC = F->getCallingConv();
7089 Check(CC == CallingConv::AMDGPU_Gfx_WholeWave,
7090 "Callee must have the amdgpu_gfx_whole_wave calling convention",
7091 &Call);
7092
7093 Check(!F->isVarArg(), "Variadic whole wave calls are not allowed", &Call);
7094
7095 Check(Call.arg_size() == F->arg_size(),
7096 "Call argument count must match callee argument count", &Call);
7097
7098 // The first argument of the call is the callee, and the first argument of
7099 // the callee is the active mask. The rest of the arguments must match.
7100 Check(F->arg_begin()->getType()->isIntegerTy(1),
7101 "Callee must have i1 as its first argument", &Call);
7102 for (auto [CallArg, FuncArg] :
7103 drop_begin(zip_equal(Call.args(), F->args()))) {
7104 Check(CallArg->getType() == FuncArg.getType(),
7105 "Argument types must match", &Call);
7106
7107 // Check that inreg attributes match between call site and function
7108 Check(Call.paramHasAttr(FuncArg.getArgNo(), Attribute::InReg) ==
7109 FuncArg.hasInRegAttr(),
7110 "Argument inreg attributes must match", &Call);
7111 }
7112 break;
7113 }
7114 case Intrinsic::amdgcn_s_prefetch_data: {
7115 Check(
7118 "llvm.amdgcn.s.prefetch.data only supports global or constant memory");
7119 break;
7120 }
7121 case Intrinsic::amdgcn_mfma_scale_f32_16x16x128_f8f6f4:
7122 case Intrinsic::amdgcn_mfma_scale_f32_32x32x64_f8f6f4: {
7123 Value *Src0 = Call.getArgOperand(0);
7124 Value *Src1 = Call.getArgOperand(1);
7125
7126 uint64_t CBSZ = cast<ConstantInt>(Call.getArgOperand(3))->getZExtValue();
7127 uint64_t BLGP = cast<ConstantInt>(Call.getArgOperand(4))->getZExtValue();
7128 Check(CBSZ <= 4, "invalid value for cbsz format", Call,
7129 Call.getArgOperand(3));
7130 Check(BLGP <= 4, "invalid value for blgp format", Call,
7131 Call.getArgOperand(4));
7132
7133 // AMDGPU::MFMAScaleFormats values
7134 auto getFormatNumRegs = [](unsigned FormatVal) {
7135 switch (FormatVal) {
7136 case 0:
7137 case 1:
7138 return 8u;
7139 case 2:
7140 case 3:
7141 return 6u;
7142 case 4:
7143 return 4u;
7144 default:
7145 llvm_unreachable("invalid format value");
7146 }
7147 };
7148
7149 auto isValidSrcASrcBVector = [](FixedVectorType *Ty) {
7150 if (!Ty || !Ty->getElementType()->isIntegerTy(32))
7151 return false;
7152 unsigned NumElts = Ty->getNumElements();
7153 return NumElts == 4 || NumElts == 6 || NumElts == 8;
7154 };
7155
7156 auto *Src0Ty = dyn_cast<FixedVectorType>(Src0->getType());
7157 auto *Src1Ty = dyn_cast<FixedVectorType>(Src1->getType());
7158 Check(isValidSrcASrcBVector(Src0Ty),
7159 "operand 0 must be 4, 6 or 8 element i32 vector", &Call, Src0);
7160 Check(isValidSrcASrcBVector(Src1Ty),
7161 "operand 1 must be 4, 6 or 8 element i32 vector", &Call, Src1);
7162
7163 // Permit excess registers for the format.
7164 Check(Src0Ty->getNumElements() >= getFormatNumRegs(CBSZ),
7165 "invalid vector type for format", &Call, Src0, Call.getArgOperand(3));
7166 Check(Src1Ty->getNumElements() >= getFormatNumRegs(BLGP),
7167 "invalid vector type for format", &Call, Src1, Call.getArgOperand(5));
7168 break;
7169 }
7170 case Intrinsic::amdgcn_wmma_f32_16x16x128_f8f6f4:
7171 case Intrinsic::amdgcn_wmma_scale_f32_16x16x128_f8f6f4:
7172 case Intrinsic::amdgcn_wmma_scale16_f32_16x16x128_f8f6f4: {
7173 Value *Src0 = Call.getArgOperand(1);
7174 Value *Src1 = Call.getArgOperand(3);
7175
7176 unsigned FmtA = cast<ConstantInt>(Call.getArgOperand(0))->getZExtValue();
7177 unsigned FmtB = cast<ConstantInt>(Call.getArgOperand(2))->getZExtValue();
7178 Check(FmtA <= 4, "invalid value for matrix format", Call,
7179 Call.getArgOperand(0));
7180 Check(FmtB <= 4, "invalid value for matrix format", Call,
7181 Call.getArgOperand(2));
7182
7183 // AMDGPU::MatrixFMT values
7184 auto getFormatNumRegs = [](unsigned FormatVal) {
7185 switch (FormatVal) {
7186 case 0:
7187 case 1:
7188 return 16u;
7189 case 2:
7190 case 3:
7191 return 12u;
7192 case 4:
7193 return 8u;
7194 default:
7195 llvm_unreachable("invalid format value");
7196 }
7197 };
7198
7199 auto isValidSrcASrcBVector = [](FixedVectorType *Ty) {
7200 if (!Ty || !Ty->getElementType()->isIntegerTy(32))
7201 return false;
7202 unsigned NumElts = Ty->getNumElements();
7203 return NumElts == 16 || NumElts == 12 || NumElts == 8;
7204 };
7205
7206 auto *Src0Ty = dyn_cast<FixedVectorType>(Src0->getType());
7207 auto *Src1Ty = dyn_cast<FixedVectorType>(Src1->getType());
7208 Check(isValidSrcASrcBVector(Src0Ty),
7209 "operand 1 must be 8, 12 or 16 element i32 vector", &Call, Src0);
7210 Check(isValidSrcASrcBVector(Src1Ty),
7211 "operand 3 must be 8, 12 or 16 element i32 vector", &Call, Src1);
7212
7213 // Permit excess registers for the format.
7214 Check(Src0Ty->getNumElements() >= getFormatNumRegs(FmtA),
7215 "invalid vector type for format", &Call, Src0, Call.getArgOperand(0));
7216 Check(Src1Ty->getNumElements() >= getFormatNumRegs(FmtB),
7217 "invalid vector type for format", &Call, Src1, Call.getArgOperand(2));
7218 break;
7219 }
7220 case Intrinsic::amdgcn_cooperative_atomic_load_32x4B:
7221 case Intrinsic::amdgcn_cooperative_atomic_load_16x8B:
7222 case Intrinsic::amdgcn_cooperative_atomic_load_8x16B:
7223 case Intrinsic::amdgcn_cooperative_atomic_store_32x4B:
7224 case Intrinsic::amdgcn_cooperative_atomic_store_16x8B:
7225 case Intrinsic::amdgcn_cooperative_atomic_store_8x16B: {
7226 // Check we only use this intrinsic on the FLAT or GLOBAL address spaces.
7227 Value *PtrArg = Call.getArgOperand(0);
7228 const unsigned AS = PtrArg->getType()->getPointerAddressSpace();
7230 "cooperative atomic intrinsics require a generic or global pointer",
7231 &Call, PtrArg);
7232
7233 // Last argument must be a MD string
7235 MDNode *MD = cast<MDNode>(Op->getMetadata());
7236 Check((MD->getNumOperands() == 1) && isa<MDString>(MD->getOperand(0)),
7237 "cooperative atomic intrinsics require that the last argument is a "
7238 "metadata string",
7239 &Call, Op);
7240 break;
7241 }
7242 case Intrinsic::nvvm_setmaxnreg_inc_sync_aligned_u32:
7243 case Intrinsic::nvvm_setmaxnreg_dec_sync_aligned_u32: {
7244 Value *V = Call.getArgOperand(0);
7245 unsigned RegCount = cast<ConstantInt>(V)->getZExtValue();
7246 Check(RegCount % 8 == 0,
7247 "reg_count argument to nvvm.setmaxnreg must be in multiples of 8");
7248 break;
7249 }
7250 case Intrinsic::experimental_convergence_entry:
7251 case Intrinsic::experimental_convergence_anchor:
7252 break;
7253 case Intrinsic::experimental_convergence_loop:
7254 break;
7255 case Intrinsic::ptrmask: {
7256 Type *Ty0 = Call.getArgOperand(0)->getType();
7257 Type *Ty1 = Call.getArgOperand(1)->getType();
7259 "llvm.ptrmask intrinsic first argument must be pointer or vector "
7260 "of pointers",
7261 &Call);
7262 Check(
7263 Ty0->isVectorTy() == Ty1->isVectorTy(),
7264 "llvm.ptrmask intrinsic arguments must be both scalars or both vectors",
7265 &Call);
7266 if (Ty0->isVectorTy())
7267 Check(cast<VectorType>(Ty0)->getElementCount() ==
7268 cast<VectorType>(Ty1)->getElementCount(),
7269 "llvm.ptrmask intrinsic arguments must have the same number of "
7270 "elements",
7271 &Call);
7272 Check(DL.getIndexTypeSizeInBits(Ty0) == Ty1->getScalarSizeInBits(),
7273 "llvm.ptrmask intrinsic second argument bitwidth must match "
7274 "pointer index type size of first argument",
7275 &Call);
7276 break;
7277 }
7278 case Intrinsic::thread_pointer: {
7280 DL.getDefaultGlobalsAddressSpace(),
7281 "llvm.thread.pointer intrinsic return type must be for the globals "
7282 "address space",
7283 &Call);
7284 break;
7285 }
7286 case Intrinsic::threadlocal_address: {
7287 const Value &Arg0 = *Call.getArgOperand(0);
7288 Check(isa<GlobalValue>(Arg0),
7289 "llvm.threadlocal.address first argument must be a GlobalValue");
7290 Check(cast<GlobalValue>(Arg0).isThreadLocal(),
7291 "llvm.threadlocal.address operand isThreadLocal() must be true");
7292 break;
7293 }
7294 case Intrinsic::lifetime_start:
7295 case Intrinsic::lifetime_end: {
7296 Value *Ptr = Call.getArgOperand(0);
7297 IntrinsicInst *II = dyn_cast<IntrinsicInst>(Ptr);
7298 Check(isa<AllocaInst>(Ptr) || isa<PoisonValue>(Ptr) ||
7299 (II && II->getIntrinsicID() == Intrinsic::structured_alloca),
7300 "llvm.lifetime.start/end can only be used on alloca or poison",
7301 &Call);
7302 break;
7303 }
7304 case Intrinsic::sponentry: {
7305 const unsigned StackAS = DL.getAllocaAddrSpace();
7306 const Type *RetTy = Call.getFunctionType()->getReturnType();
7307 Check(RetTy->getPointerAddressSpace() == StackAS,
7308 "llvm.sponentry must return a pointer to the stack", &Call);
7309 break;
7310 }
7311 };
7312
7313 // Verify that there aren't any unmediated control transfers between funclets.
7315 Function *F = Call.getParent()->getParent();
7316 if (F->hasPersonalityFn() &&
7317 isScopedEHPersonality(classifyEHPersonality(F->getPersonalityFn()))) {
7318 // Run EH funclet coloring on-demand and cache results for other intrinsic
7319 // calls in this function
7320 if (BlockEHFuncletColors.empty())
7321 BlockEHFuncletColors = colorEHFunclets(*F);
7322
7323 // Check for catch-/cleanup-pad in first funclet block
7324 bool InEHFunclet = false;
7325 BasicBlock *CallBB = Call.getParent();
7326 const ColorVector &CV = BlockEHFuncletColors.find(CallBB)->second;
7327 assert(CV.size() > 0 && "Uncolored block");
7328 for (BasicBlock *ColorFirstBB : CV)
7329 if (auto It = ColorFirstBB->getFirstNonPHIIt();
7330 It != ColorFirstBB->end())
7332 InEHFunclet = true;
7333
7334 // Check for funclet operand bundle
7335 bool HasToken = false;
7336 for (unsigned I = 0, E = Call.getNumOperandBundles(); I != E; ++I)
7338 HasToken = true;
7339
7340 // This would cause silent code truncation in WinEHPrepare
7341 if (InEHFunclet)
7342 Check(HasToken, "Missing funclet token on intrinsic call", &Call);
7343 }
7344 }
7345}
7346
7347/// Carefully grab the subprogram from a local scope.
7348///
7349/// This carefully grabs the subprogram from a local scope, avoiding the
7350/// built-in assertions that would typically fire.
7352 if (!LocalScope)
7353 return nullptr;
7354
7355 if (auto *SP = dyn_cast<DISubprogram>(LocalScope))
7356 return SP;
7357
7358 if (auto *LB = dyn_cast<DILexicalBlockBase>(LocalScope))
7359 return getSubprogram(LB->getRawScope());
7360
7361 // Just return null; broken scope chains are checked elsewhere.
7362 assert(!isa<DILocalScope>(LocalScope) && "Unknown type of local scope");
7363 return nullptr;
7364}
7365
7366void Verifier::visit(DbgLabelRecord &DLR) {
7368 "invalid #dbg_label intrinsic variable", &DLR, DLR.getRawLabel());
7369
7370 // Ignore broken !dbg attachments; they're checked elsewhere.
7371 if (MDNode *N = DLR.getDebugLoc().getAsMDNode())
7372 if (!isa<DILocation>(N))
7373 return;
7374
7375 BasicBlock *BB = DLR.getParent();
7376 Function *F = BB ? BB->getParent() : nullptr;
7377
7378 // The scopes for variables and !dbg attachments must agree.
7379 DILabel *Label = DLR.getLabel();
7380 DILocation *Loc = DLR.getDebugLoc();
7381 CheckDI(Loc, "#dbg_label record requires a !dbg attachment", &DLR, BB, F);
7382
7383 DISubprogram *LabelSP = getSubprogram(Label->getRawScope());
7384 DISubprogram *LocSP = getSubprogram(Loc->getRawScope());
7385 if (!LabelSP || !LocSP)
7386 return;
7387
7388 CheckDI(LabelSP == LocSP,
7389 "mismatched subprogram between #dbg_label label and !dbg attachment",
7390 &DLR, BB, F, Label, Label->getScope()->getSubprogram(), Loc,
7391 Loc->getScope()->getSubprogram());
7392}
7393
7394void Verifier::visit(DbgVariableRecord &DVR) {
7395 BasicBlock *BB = DVR.getParent();
7396 Function *F = BB->getParent();
7397
7398 CheckDI(DVR.getType() == DbgVariableRecord::LocationType::Value ||
7399 DVR.getType() == DbgVariableRecord::LocationType::Declare ||
7400 DVR.getType() == DbgVariableRecord::LocationType::DeclareValue ||
7401 DVR.getType() == DbgVariableRecord::LocationType::Assign,
7402 "invalid #dbg record type", &DVR, DVR.getType(), BB, F);
7403
7404 // The location for a DbgVariableRecord must be either a ValueAsMetadata,
7405 // DIArgList, or an empty MDNode (which is a legacy representation for an
7406 // "undef" location).
7407 auto *MD = DVR.getRawLocation();
7408 CheckDI(MD && (isa<ValueAsMetadata>(MD) || isa<DIArgList>(MD) ||
7409 (isa<MDNode>(MD) && !cast<MDNode>(MD)->getNumOperands())),
7410 "invalid #dbg record address/value", &DVR, MD, BB, F);
7411 if (auto *VAM = dyn_cast<ValueAsMetadata>(MD)) {
7412 visitValueAsMetadata(*VAM, F);
7413 if (DVR.isDbgDeclare()) {
7414 // Allow integers here to support inttoptr salvage.
7415 Type *Ty = VAM->getValue()->getType();
7416 CheckDI(Ty->isPointerTy() || Ty->isIntegerTy(),
7417 "location of #dbg_declare must be a pointer or int", &DVR, MD, BB,
7418 F);
7419 }
7420 } else if (auto *AL = dyn_cast<DIArgList>(MD)) {
7421 visitDIArgList(*AL, F);
7422 }
7423
7425 "invalid #dbg record variable", &DVR, DVR.getRawVariable(), BB, F);
7426 visitMDNode(*DVR.getRawVariable(), AreDebugLocsAllowed::No);
7427
7429 "invalid #dbg record expression", &DVR, DVR.getRawExpression(), BB,
7430 F);
7431 visitMDNode(*DVR.getExpression(), AreDebugLocsAllowed::No);
7432
7433 if (DVR.isDbgAssign()) {
7435 "invalid #dbg_assign DIAssignID", &DVR, DVR.getRawAssignID(), BB,
7436 F);
7437 visitMDNode(*cast<DIAssignID>(DVR.getRawAssignID()),
7438 AreDebugLocsAllowed::No);
7439
7440 const auto *RawAddr = DVR.getRawAddress();
7441 // Similarly to the location above, the address for an assign
7442 // DbgVariableRecord must be a ValueAsMetadata or an empty MDNode, which
7443 // represents an undef address.
7444 CheckDI(
7445 isa<ValueAsMetadata>(RawAddr) ||
7446 (isa<MDNode>(RawAddr) && !cast<MDNode>(RawAddr)->getNumOperands()),
7447 "invalid #dbg_assign address", &DVR, DVR.getRawAddress(), BB, F);
7448 if (auto *VAM = dyn_cast<ValueAsMetadata>(RawAddr))
7449 visitValueAsMetadata(*VAM, F);
7450
7452 "invalid #dbg_assign address expression", &DVR,
7453 DVR.getRawAddressExpression(), BB, F);
7454 visitMDNode(*DVR.getAddressExpression(), AreDebugLocsAllowed::No);
7455
7456 // All of the linked instructions should be in the same function as DVR.
7457 for (Instruction *I : at::getAssignmentInsts(&DVR))
7458 CheckDI(DVR.getFunction() == I->getFunction(),
7459 "inst not in same function as #dbg_assign", I, &DVR, BB, F);
7460 }
7461
7462 // This check is redundant with one in visitLocalVariable().
7463 DILocalVariable *Var = DVR.getVariable();
7464 CheckDI(isType(Var->getRawType()), "invalid type ref", Var, Var->getRawType(),
7465 BB, F);
7466
7467 auto *DLNode = DVR.getDebugLoc().getAsMDNode();
7468 CheckDI(isa_and_nonnull<DILocation>(DLNode), "invalid #dbg record DILocation",
7469 &DVR, DLNode, BB, F);
7470 DILocation *Loc = DVR.getDebugLoc();
7471
7472 // The scopes for variables and !dbg attachments must agree.
7473 DISubprogram *VarSP = getSubprogram(Var->getRawScope());
7474 DISubprogram *LocSP = getSubprogram(Loc->getRawScope());
7475 if (!VarSP || !LocSP)
7476 return; // Broken scope chains are checked elsewhere.
7477
7478 CheckDI(VarSP == LocSP,
7479 "mismatched subprogram between #dbg record variable and DILocation",
7480 &DVR, BB, F, Var, Var->getScope()->getSubprogram(), Loc,
7481 Loc->getScope()->getSubprogram(), BB, F);
7482
7483 verifyFnArgs(DVR);
7484}
7485
7486void Verifier::visitVPIntrinsic(VPIntrinsic &VPI) {
7487 if (auto *VPCast = dyn_cast<VPCastIntrinsic>(&VPI)) {
7488 auto *RetTy = cast<VectorType>(VPCast->getType());
7489 auto *ValTy = cast<VectorType>(VPCast->getOperand(0)->getType());
7490 Check(RetTy->getElementCount() == ValTy->getElementCount(),
7491 "VP cast intrinsic first argument and result vector lengths must be "
7492 "equal",
7493 *VPCast);
7494
7495 switch (VPCast->getIntrinsicID()) {
7496 default:
7497 llvm_unreachable("Unknown VP cast intrinsic");
7498 case Intrinsic::vp_trunc:
7499 Check(RetTy->isIntOrIntVectorTy() && ValTy->isIntOrIntVectorTy(),
7500 "llvm.vp.trunc intrinsic first argument and result element type "
7501 "must be integer",
7502 *VPCast);
7503 Check(RetTy->getScalarSizeInBits() < ValTy->getScalarSizeInBits(),
7504 "llvm.vp.trunc intrinsic the bit size of first argument must be "
7505 "larger than the bit size of the return type",
7506 *VPCast);
7507 break;
7508 case Intrinsic::vp_zext:
7509 case Intrinsic::vp_sext:
7510 Check(RetTy->isIntOrIntVectorTy() && ValTy->isIntOrIntVectorTy(),
7511 "llvm.vp.zext or llvm.vp.sext intrinsic first argument and result "
7512 "element type must be integer",
7513 *VPCast);
7514 Check(RetTy->getScalarSizeInBits() > ValTy->getScalarSizeInBits(),
7515 "llvm.vp.zext or llvm.vp.sext intrinsic the bit size of first "
7516 "argument must be smaller than the bit size of the return type",
7517 *VPCast);
7518 break;
7519 case Intrinsic::vp_fptoui:
7520 case Intrinsic::vp_fptosi:
7521 case Intrinsic::vp_lrint:
7522 case Intrinsic::vp_llrint:
7523 Check(
7524 RetTy->isIntOrIntVectorTy() && ValTy->isFPOrFPVectorTy(),
7525 "llvm.vp.fptoui, llvm.vp.fptosi, llvm.vp.lrint or llvm.vp.llrint" "intrinsic first argument element "
7526 "type must be floating-point and result element type must be integer",
7527 *VPCast);
7528 break;
7529 case Intrinsic::vp_uitofp:
7530 case Intrinsic::vp_sitofp:
7531 Check(
7532 RetTy->isFPOrFPVectorTy() && ValTy->isIntOrIntVectorTy(),
7533 "llvm.vp.uitofp or llvm.vp.sitofp intrinsic first argument element "
7534 "type must be integer and result element type must be floating-point",
7535 *VPCast);
7536 break;
7537 case Intrinsic::vp_fptrunc:
7538 Check(RetTy->isFPOrFPVectorTy() && ValTy->isFPOrFPVectorTy(),
7539 "llvm.vp.fptrunc intrinsic first argument and result element type "
7540 "must be floating-point",
7541 *VPCast);
7542 Check(RetTy->getScalarSizeInBits() < ValTy->getScalarSizeInBits(),
7543 "llvm.vp.fptrunc intrinsic the bit size of first argument must be "
7544 "larger than the bit size of the return type",
7545 *VPCast);
7546 break;
7547 case Intrinsic::vp_fpext:
7548 Check(RetTy->isFPOrFPVectorTy() && ValTy->isFPOrFPVectorTy(),
7549 "llvm.vp.fpext intrinsic first argument and result element type "
7550 "must be floating-point",
7551 *VPCast);
7552 Check(RetTy->getScalarSizeInBits() > ValTy->getScalarSizeInBits(),
7553 "llvm.vp.fpext intrinsic the bit size of first argument must be "
7554 "smaller than the bit size of the return type",
7555 *VPCast);
7556 break;
7557 case Intrinsic::vp_ptrtoint:
7558 Check(RetTy->isIntOrIntVectorTy() && ValTy->isPtrOrPtrVectorTy(),
7559 "llvm.vp.ptrtoint intrinsic first argument element type must be "
7560 "pointer and result element type must be integer",
7561 *VPCast);
7562 break;
7563 case Intrinsic::vp_inttoptr:
7564 Check(RetTy->isPtrOrPtrVectorTy() && ValTy->isIntOrIntVectorTy(),
7565 "llvm.vp.inttoptr intrinsic first argument element type must be "
7566 "integer and result element type must be pointer",
7567 *VPCast);
7568 break;
7569 }
7570 }
7571
7572 switch (VPI.getIntrinsicID()) {
7573 case Intrinsic::vp_fcmp: {
7574 auto Pred = cast<VPCmpIntrinsic>(&VPI)->getPredicate();
7576 "invalid predicate for VP FP comparison intrinsic", &VPI);
7577 break;
7578 }
7579 case Intrinsic::vp_icmp: {
7580 auto Pred = cast<VPCmpIntrinsic>(&VPI)->getPredicate();
7582 "invalid predicate for VP integer comparison intrinsic", &VPI);
7583 break;
7584 }
7585 case Intrinsic::vp_is_fpclass: {
7586 auto TestMask = cast<ConstantInt>(VPI.getOperand(1));
7587 Check((TestMask->getZExtValue() & ~static_cast<unsigned>(fcAllFlags)) == 0,
7588 "unsupported bits for llvm.vp.is.fpclass test mask");
7589 break;
7590 }
7591 case Intrinsic::experimental_vp_splice: {
7592 VectorType *VecTy = cast<VectorType>(VPI.getType());
7593 int64_t Idx = cast<ConstantInt>(VPI.getArgOperand(2))->getSExtValue();
7594 int64_t KnownMinNumElements = VecTy->getElementCount().getKnownMinValue();
7595 if (VPI.getParent() && VPI.getParent()->getParent()) {
7596 AttributeList Attrs = VPI.getParent()->getParent()->getAttributes();
7597 if (Attrs.hasFnAttr(Attribute::VScaleRange))
7598 KnownMinNumElements *= Attrs.getFnAttrs().getVScaleRangeMin();
7599 }
7600 Check((Idx < 0 && std::abs(Idx) <= KnownMinNumElements) ||
7601 (Idx >= 0 && Idx < KnownMinNumElements),
7602 "The splice index exceeds the range [-VL, VL-1] where VL is the "
7603 "known minimum number of elements in the vector. For scalable "
7604 "vectors the minimum number of elements is determined from "
7605 "vscale_range.",
7606 &VPI);
7607 break;
7608 }
7609 }
7610}
7611
7612void Verifier::visitConstrainedFPIntrinsic(ConstrainedFPIntrinsic &FPI) {
7613 unsigned NumOperands = FPI.getNonMetadataArgCount();
7614 bool HasRoundingMD =
7616
7617 // Add the expected number of metadata operands.
7618 NumOperands += (1 + HasRoundingMD);
7619
7620 // Compare intrinsics carry an extra predicate metadata operand.
7622 NumOperands += 1;
7623 Check((FPI.arg_size() == NumOperands),
7624 "invalid arguments for constrained FP intrinsic", &FPI);
7625
7626 switch (FPI.getIntrinsicID()) {
7627 case Intrinsic::experimental_constrained_lrint:
7628 case Intrinsic::experimental_constrained_llrint: {
7629 Type *ValTy = FPI.getArgOperand(0)->getType();
7630 Type *ResultTy = FPI.getType();
7631 Check(!ValTy->isVectorTy() && !ResultTy->isVectorTy(),
7632 "Intrinsic does not support vectors", &FPI);
7633 break;
7634 }
7635
7636 case Intrinsic::experimental_constrained_lround:
7637 case Intrinsic::experimental_constrained_llround: {
7638 Type *ValTy = FPI.getArgOperand(0)->getType();
7639 Type *ResultTy = FPI.getType();
7640 Check(!ValTy->isVectorTy() && !ResultTy->isVectorTy(),
7641 "Intrinsic does not support vectors", &FPI);
7642 break;
7643 }
7644
7645 case Intrinsic::experimental_constrained_fcmp:
7646 case Intrinsic::experimental_constrained_fcmps: {
7647 auto Pred = cast<ConstrainedFPCmpIntrinsic>(&FPI)->getPredicate();
7649 "invalid predicate for constrained FP comparison intrinsic", &FPI);
7650 break;
7651 }
7652
7653 case Intrinsic::experimental_constrained_fptosi:
7654 case Intrinsic::experimental_constrained_fptoui: {
7655 Value *Operand = FPI.getArgOperand(0);
7656 ElementCount SrcEC;
7657 Check(Operand->getType()->isFPOrFPVectorTy(),
7658 "Intrinsic first argument must be floating point", &FPI);
7659 if (auto *OperandT = dyn_cast<VectorType>(Operand->getType())) {
7660 SrcEC = cast<VectorType>(OperandT)->getElementCount();
7661 }
7662
7663 Operand = &FPI;
7664 Check(SrcEC.isNonZero() == Operand->getType()->isVectorTy(),
7665 "Intrinsic first argument and result disagree on vector use", &FPI);
7666 Check(Operand->getType()->isIntOrIntVectorTy(),
7667 "Intrinsic result must be an integer", &FPI);
7668 if (auto *OperandT = dyn_cast<VectorType>(Operand->getType())) {
7669 Check(SrcEC == cast<VectorType>(OperandT)->getElementCount(),
7670 "Intrinsic first argument and result vector lengths must be equal",
7671 &FPI);
7672 }
7673 break;
7674 }
7675
7676 case Intrinsic::experimental_constrained_sitofp:
7677 case Intrinsic::experimental_constrained_uitofp: {
7678 Value *Operand = FPI.getArgOperand(0);
7679 ElementCount SrcEC;
7680 Check(Operand->getType()->isIntOrIntVectorTy(),
7681 "Intrinsic first argument must be integer", &FPI);
7682 if (auto *OperandT = dyn_cast<VectorType>(Operand->getType())) {
7683 SrcEC = cast<VectorType>(OperandT)->getElementCount();
7684 }
7685
7686 Operand = &FPI;
7687 Check(SrcEC.isNonZero() == Operand->getType()->isVectorTy(),
7688 "Intrinsic first argument and result disagree on vector use", &FPI);
7689 Check(Operand->getType()->isFPOrFPVectorTy(),
7690 "Intrinsic result must be a floating point", &FPI);
7691 if (auto *OperandT = dyn_cast<VectorType>(Operand->getType())) {
7692 Check(SrcEC == cast<VectorType>(OperandT)->getElementCount(),
7693 "Intrinsic first argument and result vector lengths must be equal",
7694 &FPI);
7695 }
7696 break;
7697 }
7698
7699 case Intrinsic::experimental_constrained_fptrunc:
7700 case Intrinsic::experimental_constrained_fpext: {
7701 Value *Operand = FPI.getArgOperand(0);
7702 Type *OperandTy = Operand->getType();
7703 Value *Result = &FPI;
7704 Type *ResultTy = Result->getType();
7705 Check(OperandTy->isFPOrFPVectorTy(),
7706 "Intrinsic first argument must be FP or FP vector", &FPI);
7707 Check(ResultTy->isFPOrFPVectorTy(),
7708 "Intrinsic result must be FP or FP vector", &FPI);
7709 Check(OperandTy->isVectorTy() == ResultTy->isVectorTy(),
7710 "Intrinsic first argument and result disagree on vector use", &FPI);
7711 if (OperandTy->isVectorTy()) {
7712 Check(cast<VectorType>(OperandTy)->getElementCount() ==
7713 cast<VectorType>(ResultTy)->getElementCount(),
7714 "Intrinsic first argument and result vector lengths must be equal",
7715 &FPI);
7716 }
7717 if (FPI.getIntrinsicID() == Intrinsic::experimental_constrained_fptrunc) {
7718 Check(OperandTy->getScalarSizeInBits() > ResultTy->getScalarSizeInBits(),
7719 "Intrinsic first argument's type must be larger than result type",
7720 &FPI);
7721 } else {
7722 Check(OperandTy->getScalarSizeInBits() < ResultTy->getScalarSizeInBits(),
7723 "Intrinsic first argument's type must be smaller than result type",
7724 &FPI);
7725 }
7726 break;
7727 }
7728
7729 default:
7730 break;
7731 }
7732
7733 // If a non-metadata argument is passed in a metadata slot then the
7734 // error will be caught earlier when the incorrect argument doesn't
7735 // match the specification in the intrinsic call table. Thus, no
7736 // argument type check is needed here.
7737
7738 Check(FPI.getExceptionBehavior().has_value(),
7739 "invalid exception behavior argument", &FPI);
7740 if (HasRoundingMD) {
7741 Check(FPI.getRoundingMode().has_value(), "invalid rounding mode argument",
7742 &FPI);
7743 }
7744}
7745
7746void Verifier::verifyFragmentExpression(const DbgVariableRecord &DVR) {
7747 DILocalVariable *V = dyn_cast_or_null<DILocalVariable>(DVR.getRawVariable());
7748 DIExpression *E = dyn_cast_or_null<DIExpression>(DVR.getRawExpression());
7749
7750 // We don't know whether this intrinsic verified correctly.
7751 if (!V || !E || !E->isValid())
7752 return;
7753
7754 // Nothing to do if this isn't a DW_OP_LLVM_fragment expression.
7755 auto Fragment = E->getFragmentInfo();
7756 if (!Fragment)
7757 return;
7758
7759 // The frontend helps out GDB by emitting the members of local anonymous
7760 // unions as artificial local variables with shared storage. When SROA splits
7761 // the storage for artificial local variables that are smaller than the entire
7762 // union, the overhang piece will be outside of the allotted space for the
7763 // variable and this check fails.
7764 // FIXME: Remove this check as soon as clang stops doing this; it hides bugs.
7765 if (V->isArtificial())
7766 return;
7767
7768 verifyFragmentExpression(*V, *Fragment, &DVR);
7769}
7770
7771template <typename ValueOrMetadata>
7772void Verifier::verifyFragmentExpression(const DIVariable &V,
7774 ValueOrMetadata *Desc) {
7775 // If there's no size, the type is broken, but that should be checked
7776 // elsewhere.
7777 auto VarSize = V.getSizeInBits();
7778 if (!VarSize)
7779 return;
7780
7781 unsigned FragSize = Fragment.SizeInBits;
7782 unsigned FragOffset = Fragment.OffsetInBits;
7783 CheckDI(FragSize + FragOffset <= *VarSize,
7784 "fragment is larger than or outside of variable", Desc, &V);
7785 CheckDI(FragSize != *VarSize, "fragment covers entire variable", Desc, &V);
7786}
7787
7788void Verifier::verifyFnArgs(const DbgVariableRecord &DVR) {
7789 // This function does not take the scope of noninlined function arguments into
7790 // account. Don't run it if current function is nodebug, because it may
7791 // contain inlined debug intrinsics.
7792 if (!HasDebugInfo)
7793 return;
7794
7795 // For performance reasons only check non-inlined ones.
7796 if (DVR.getDebugLoc()->getInlinedAt())
7797 return;
7798
7799 DILocalVariable *Var = DVR.getVariable();
7800 CheckDI(Var, "#dbg record without variable");
7801
7802 unsigned ArgNo = Var->getArg();
7803 if (!ArgNo)
7804 return;
7805
7806 // Verify there are no duplicate function argument debug info entries.
7807 // These will cause hard-to-debug assertions in the DWARF backend.
7808 if (DebugFnArgs.size() < ArgNo)
7809 DebugFnArgs.resize(ArgNo, nullptr);
7810
7811 auto *Prev = DebugFnArgs[ArgNo - 1];
7812 DebugFnArgs[ArgNo - 1] = Var;
7813 CheckDI(!Prev || (Prev == Var), "conflicting debug info for argument", &DVR,
7814 Prev, Var);
7815}
7816
7817void Verifier::verifyNotEntryValue(const DbgVariableRecord &DVR) {
7818 DIExpression *E = dyn_cast_or_null<DIExpression>(DVR.getRawExpression());
7819
7820 // We don't know whether this intrinsic verified correctly.
7821 if (!E || !E->isValid())
7822 return;
7823
7825 Value *VarValue = DVR.getVariableLocationOp(0);
7826 if (isa<UndefValue>(VarValue) || isa<PoisonValue>(VarValue))
7827 return;
7828 // We allow EntryValues for swift async arguments, as they have an
7829 // ABI-guarantee to be turned into a specific register.
7830 if (auto *ArgLoc = dyn_cast_or_null<Argument>(VarValue);
7831 ArgLoc && ArgLoc->hasAttribute(Attribute::SwiftAsync))
7832 return;
7833 }
7834
7835 CheckDI(!E->isEntryValue(),
7836 "Entry values are only allowed in MIR unless they target a "
7837 "swiftasync Argument",
7838 &DVR);
7839}
7840
7841void Verifier::verifyCompileUnits() {
7842 // When more than one Module is imported into the same context, such as during
7843 // an LTO build before linking the modules, ODR type uniquing may cause types
7844 // to point to a different CU. This check does not make sense in this case.
7845 if (M.getContext().isODRUniquingDebugTypes())
7846 return;
7847 auto *CUs = M.getNamedMetadata("llvm.dbg.cu");
7848 SmallPtrSet<const Metadata *, 2> Listed;
7849 if (CUs)
7850 Listed.insert_range(CUs->operands());
7851 for (const auto *CU : CUVisited)
7852 CheckDI(Listed.count(CU), "DICompileUnit not listed in llvm.dbg.cu", CU);
7853 CUVisited.clear();
7854}
7855
7856void Verifier::verifyDeoptimizeCallingConvs() {
7857 if (DeoptimizeDeclarations.empty())
7858 return;
7859
7860 const Function *First = DeoptimizeDeclarations[0];
7861 for (const auto *F : ArrayRef(DeoptimizeDeclarations).slice(1)) {
7862 Check(First->getCallingConv() == F->getCallingConv(),
7863 "All llvm.experimental.deoptimize declarations must have the same "
7864 "calling convention",
7865 First, F);
7866 }
7867}
7868
7869void Verifier::verifyAttachedCallBundle(const CallBase &Call,
7870 const OperandBundleUse &BU) {
7871 FunctionType *FTy = Call.getFunctionType();
7872
7873 Check((FTy->getReturnType()->isPointerTy() ||
7874 (Call.doesNotReturn() && FTy->getReturnType()->isVoidTy())),
7875 "a call with operand bundle \"clang.arc.attachedcall\" must call a "
7876 "function returning a pointer or a non-returning function that has a "
7877 "void return type",
7878 Call);
7879
7880 Check(BU.Inputs.size() == 1 && isa<Function>(BU.Inputs.front()),
7881 "operand bundle \"clang.arc.attachedcall\" requires one function as "
7882 "an argument",
7883 Call);
7884
7885 auto *Fn = cast<Function>(BU.Inputs.front());
7886 Intrinsic::ID IID = Fn->getIntrinsicID();
7887
7888 if (IID) {
7889 Check((IID == Intrinsic::objc_retainAutoreleasedReturnValue ||
7890 IID == Intrinsic::objc_claimAutoreleasedReturnValue ||
7891 IID == Intrinsic::objc_unsafeClaimAutoreleasedReturnValue),
7892 "invalid function argument", Call);
7893 } else {
7894 StringRef FnName = Fn->getName();
7895 Check((FnName == "objc_retainAutoreleasedReturnValue" ||
7896 FnName == "objc_claimAutoreleasedReturnValue" ||
7897 FnName == "objc_unsafeClaimAutoreleasedReturnValue"),
7898 "invalid function argument", Call);
7899 }
7900}
7901
7902void Verifier::verifyNoAliasScopeDecl() {
7903 if (NoAliasScopeDecls.empty())
7904 return;
7905
7906 // only a single scope must be declared at a time.
7907 for (auto *II : NoAliasScopeDecls) {
7908 assert(II->getIntrinsicID() == Intrinsic::experimental_noalias_scope_decl &&
7909 "Not a llvm.experimental.noalias.scope.decl ?");
7910 const auto *ScopeListMV = dyn_cast<MetadataAsValue>(
7912 Check(ScopeListMV != nullptr,
7913 "llvm.experimental.noalias.scope.decl must have a MetadataAsValue "
7914 "argument",
7915 II);
7916
7917 const auto *ScopeListMD = dyn_cast<MDNode>(ScopeListMV->getMetadata());
7918 Check(ScopeListMD != nullptr, "!id.scope.list must point to an MDNode", II);
7919 Check(ScopeListMD->getNumOperands() == 1,
7920 "!id.scope.list must point to a list with a single scope", II);
7921 visitAliasScopeListMetadata(ScopeListMD);
7922 }
7923
7924 // Only check the domination rule when requested. Once all passes have been
7925 // adapted this option can go away.
7927 return;
7928
7929 // Now sort the intrinsics based on the scope MDNode so that declarations of
7930 // the same scopes are next to each other.
7931 auto GetScope = [](IntrinsicInst *II) {
7932 const auto *ScopeListMV = cast<MetadataAsValue>(
7934 return &cast<MDNode>(ScopeListMV->getMetadata())->getOperand(0);
7935 };
7936
7937 // We are sorting on MDNode pointers here. For valid input IR this is ok.
7938 // TODO: Sort on Metadata ID to avoid non-deterministic error messages.
7939 auto Compare = [GetScope](IntrinsicInst *Lhs, IntrinsicInst *Rhs) {
7940 return GetScope(Lhs) < GetScope(Rhs);
7941 };
7942
7943 llvm::sort(NoAliasScopeDecls, Compare);
7944
7945 // Go over the intrinsics and check that for the same scope, they are not
7946 // dominating each other.
7947 auto ItCurrent = NoAliasScopeDecls.begin();
7948 while (ItCurrent != NoAliasScopeDecls.end()) {
7949 auto CurScope = GetScope(*ItCurrent);
7950 auto ItNext = ItCurrent;
7951 do {
7952 ++ItNext;
7953 } while (ItNext != NoAliasScopeDecls.end() &&
7954 GetScope(*ItNext) == CurScope);
7955
7956 // [ItCurrent, ItNext) represents the declarations for the same scope.
7957 // Ensure they are not dominating each other.. but only if it is not too
7958 // expensive.
7959 if (ItNext - ItCurrent < 32)
7960 for (auto *I : llvm::make_range(ItCurrent, ItNext))
7961 for (auto *J : llvm::make_range(ItCurrent, ItNext))
7962 if (I != J)
7963 Check(!DT.dominates(I, J),
7964 "llvm.experimental.noalias.scope.decl dominates another one "
7965 "with the same scope",
7966 I);
7967 ItCurrent = ItNext;
7968 }
7969}
7970
7971//===----------------------------------------------------------------------===//
7972// Implement the public interfaces to this file...
7973//===----------------------------------------------------------------------===//
7974
7976 Function &F = const_cast<Function &>(f);
7977
7978 // Don't use a raw_null_ostream. Printing IR is expensive.
7979 Verifier V(OS, /*ShouldTreatBrokenDebugInfoAsError=*/true, *f.getParent());
7980
7981 // Note that this function's return value is inverted from what you would
7982 // expect of a function called "verify".
7983 return !V.verify(F);
7984}
7985
7987 bool *BrokenDebugInfo) {
7988 // Don't use a raw_null_ostream. Printing IR is expensive.
7989 Verifier V(OS, /*ShouldTreatBrokenDebugInfoAsError=*/!BrokenDebugInfo, M);
7990
7991 bool Broken = false;
7992 for (const Function &F : M)
7993 Broken |= !V.verify(F);
7994
7995 Broken |= !V.verify();
7996 if (BrokenDebugInfo)
7997 *BrokenDebugInfo = V.hasBrokenDebugInfo();
7998 // Note that this function's return value is inverted from what you would
7999 // expect of a function called "verify".
8000 return Broken;
8001}
8002
8003namespace {
8004
8005struct VerifierLegacyPass : public FunctionPass {
8006 static char ID;
8007
8008 std::unique_ptr<Verifier> V;
8009 bool FatalErrors = true;
8010
8011 VerifierLegacyPass() : FunctionPass(ID) {}
8012 explicit VerifierLegacyPass(bool FatalErrors)
8013 : FunctionPass(ID), FatalErrors(FatalErrors) {}
8014
8015 bool doInitialization(Module &M) override {
8016 V = std::make_unique<Verifier>(
8017 &dbgs(), /*ShouldTreatBrokenDebugInfoAsError=*/false, M);
8018 return false;
8019 }
8020
8021 bool runOnFunction(Function &F) override {
8022 if (!V->verify(F) && FatalErrors) {
8023 errs() << "in function " << F.getName() << '\n';
8024 report_fatal_error("Broken function found, compilation aborted!");
8025 }
8026 return false;
8027 }
8028
8029 bool doFinalization(Module &M) override {
8030 bool HasErrors = false;
8031 for (Function &F : M)
8032 if (F.isDeclaration())
8033 HasErrors |= !V->verify(F);
8034
8035 HasErrors |= !V->verify();
8036 if (FatalErrors && (HasErrors || V->hasBrokenDebugInfo()))
8037 report_fatal_error("Broken module found, compilation aborted!");
8038 return false;
8039 }
8040
8041 void getAnalysisUsage(AnalysisUsage &AU) const override {
8042 AU.setPreservesAll();
8043 }
8044};
8045
8046} // end anonymous namespace
8047
8048/// Helper to issue failure from the TBAA verification
8049template <typename... Tys> void TBAAVerifier::CheckFailed(Tys &&... Args) {
8050 if (Diagnostic)
8051 return Diagnostic->CheckFailed(Args...);
8052}
8053
8054#define CheckTBAA(C, ...) \
8055 do { \
8056 if (!(C)) { \
8057 CheckFailed(__VA_ARGS__); \
8058 return false; \
8059 } \
8060 } while (false)
8061
8062/// Verify that \p BaseNode can be used as the "base type" in the struct-path
8063/// TBAA scheme. This means \p BaseNode is either a scalar node, or a
8064/// struct-type node describing an aggregate data structure (like a struct).
8065TBAAVerifier::TBAABaseNodeSummary
8066TBAAVerifier::verifyTBAABaseNode(const Instruction *I, const MDNode *BaseNode,
8067 bool IsNewFormat) {
8068 if (BaseNode->getNumOperands() < 2) {
8069 CheckFailed("Base nodes must have at least two operands", I, BaseNode);
8070 return {true, ~0u};
8071 }
8072
8073 auto Itr = TBAABaseNodes.find(BaseNode);
8074 if (Itr != TBAABaseNodes.end())
8075 return Itr->second;
8076
8077 auto Result = verifyTBAABaseNodeImpl(I, BaseNode, IsNewFormat);
8078 auto InsertResult = TBAABaseNodes.insert({BaseNode, Result});
8079 (void)InsertResult;
8080 assert(InsertResult.second && "We just checked!");
8081 return Result;
8082}
8083
8084TBAAVerifier::TBAABaseNodeSummary
8085TBAAVerifier::verifyTBAABaseNodeImpl(const Instruction *I,
8086 const MDNode *BaseNode, bool IsNewFormat) {
8087 const TBAAVerifier::TBAABaseNodeSummary InvalidNode = {true, ~0u};
8088
8089 if (BaseNode->getNumOperands() == 2) {
8090 // Scalar nodes can only be accessed at offset 0.
8091 return isValidScalarTBAANode(BaseNode)
8092 ? TBAAVerifier::TBAABaseNodeSummary({false, 0})
8093 : InvalidNode;
8094 }
8095
8096 if (IsNewFormat) {
8097 if (BaseNode->getNumOperands() % 3 != 0) {
8098 CheckFailed("Access tag nodes must have the number of operands that is a "
8099 "multiple of 3!", BaseNode);
8100 return InvalidNode;
8101 }
8102 } else {
8103 if (BaseNode->getNumOperands() % 2 != 1) {
8104 CheckFailed("Struct tag nodes must have an odd number of operands!",
8105 BaseNode);
8106 return InvalidNode;
8107 }
8108 }
8109
8110 // Check the type size field.
8111 if (IsNewFormat) {
8112 auto *TypeSizeNode = mdconst::dyn_extract_or_null<ConstantInt>(
8113 BaseNode->getOperand(1));
8114 if (!TypeSizeNode) {
8115 CheckFailed("Type size nodes must be constants!", I, BaseNode);
8116 return InvalidNode;
8117 }
8118 }
8119
8120 // Check the type name field. In the new format it can be anything.
8121 if (!IsNewFormat && !isa<MDString>(BaseNode->getOperand(0))) {
8122 CheckFailed("Struct tag nodes have a string as their first operand",
8123 BaseNode);
8124 return InvalidNode;
8125 }
8126
8127 bool Failed = false;
8128
8129 std::optional<APInt> PrevOffset;
8130 unsigned BitWidth = ~0u;
8131
8132 // We've already checked that BaseNode is not a degenerate root node with one
8133 // operand in \c verifyTBAABaseNode, so this loop should run at least once.
8134 unsigned FirstFieldOpNo = IsNewFormat ? 3 : 1;
8135 unsigned NumOpsPerField = IsNewFormat ? 3 : 2;
8136 for (unsigned Idx = FirstFieldOpNo; Idx < BaseNode->getNumOperands();
8137 Idx += NumOpsPerField) {
8138 const MDOperand &FieldTy = BaseNode->getOperand(Idx);
8139 const MDOperand &FieldOffset = BaseNode->getOperand(Idx + 1);
8140 if (!isa<MDNode>(FieldTy)) {
8141 CheckFailed("Incorrect field entry in struct type node!", I, BaseNode);
8142 Failed = true;
8143 continue;
8144 }
8145
8146 auto *OffsetEntryCI =
8148 if (!OffsetEntryCI) {
8149 CheckFailed("Offset entries must be constants!", I, BaseNode);
8150 Failed = true;
8151 continue;
8152 }
8153
8154 if (BitWidth == ~0u)
8155 BitWidth = OffsetEntryCI->getBitWidth();
8156
8157 if (OffsetEntryCI->getBitWidth() != BitWidth) {
8158 CheckFailed(
8159 "Bitwidth between the offsets and struct type entries must match", I,
8160 BaseNode);
8161 Failed = true;
8162 continue;
8163 }
8164
8165 // NB! As far as I can tell, we generate a non-strictly increasing offset
8166 // sequence only from structs that have zero size bit fields. When
8167 // recursing into a contained struct in \c getFieldNodeFromTBAABaseNode we
8168 // pick the field lexically the latest in struct type metadata node. This
8169 // mirrors the actual behavior of the alias analysis implementation.
8170 bool IsAscending =
8171 !PrevOffset || PrevOffset->ule(OffsetEntryCI->getValue());
8172
8173 if (!IsAscending) {
8174 CheckFailed("Offsets must be increasing!", I, BaseNode);
8175 Failed = true;
8176 }
8177
8178 PrevOffset = OffsetEntryCI->getValue();
8179
8180 if (IsNewFormat) {
8181 auto *MemberSizeNode = mdconst::dyn_extract_or_null<ConstantInt>(
8182 BaseNode->getOperand(Idx + 2));
8183 if (!MemberSizeNode) {
8184 CheckFailed("Member size entries must be constants!", I, BaseNode);
8185 Failed = true;
8186 continue;
8187 }
8188 }
8189 }
8190
8191 return Failed ? InvalidNode
8192 : TBAAVerifier::TBAABaseNodeSummary(false, BitWidth);
8193}
8194
8195static bool IsRootTBAANode(const MDNode *MD) {
8196 return MD->getNumOperands() < 2;
8197}
8198
8199static bool IsScalarTBAANodeImpl(const MDNode *MD,
8201 if (MD->getNumOperands() != 2 && MD->getNumOperands() != 3)
8202 return false;
8203
8204 if (!isa<MDString>(MD->getOperand(0)))
8205 return false;
8206
8207 if (MD->getNumOperands() == 3) {
8209 if (!(Offset && Offset->isZero() && isa<MDString>(MD->getOperand(0))))
8210 return false;
8211 }
8212
8213 auto *Parent = dyn_cast_or_null<MDNode>(MD->getOperand(1));
8214 return Parent && Visited.insert(Parent).second &&
8215 (IsRootTBAANode(Parent) || IsScalarTBAANodeImpl(Parent, Visited));
8216}
8217
8218bool TBAAVerifier::isValidScalarTBAANode(const MDNode *MD) {
8219 auto ResultIt = TBAAScalarNodes.find(MD);
8220 if (ResultIt != TBAAScalarNodes.end())
8221 return ResultIt->second;
8222
8223 SmallPtrSet<const MDNode *, 4> Visited;
8224 bool Result = IsScalarTBAANodeImpl(MD, Visited);
8225 auto InsertResult = TBAAScalarNodes.insert({MD, Result});
8226 (void)InsertResult;
8227 assert(InsertResult.second && "Just checked!");
8228
8229 return Result;
8230}
8231
8232/// Returns the field node at the offset \p Offset in \p BaseNode. Update \p
8233/// Offset in place to be the offset within the field node returned.
8234///
8235/// We assume we've okayed \p BaseNode via \c verifyTBAABaseNode.
8236MDNode *TBAAVerifier::getFieldNodeFromTBAABaseNode(const Instruction *I,
8237 const MDNode *BaseNode,
8238 APInt &Offset,
8239 bool IsNewFormat) {
8240 assert(BaseNode->getNumOperands() >= 2 && "Invalid base node!");
8241
8242 // Scalar nodes have only one possible "field" -- their parent in the access
8243 // hierarchy. Offset must be zero at this point, but our caller is supposed
8244 // to check that.
8245 if (BaseNode->getNumOperands() == 2)
8246 return cast<MDNode>(BaseNode->getOperand(1));
8247
8248 unsigned FirstFieldOpNo = IsNewFormat ? 3 : 1;
8249 unsigned NumOpsPerField = IsNewFormat ? 3 : 2;
8250 for (unsigned Idx = FirstFieldOpNo; Idx < BaseNode->getNumOperands();
8251 Idx += NumOpsPerField) {
8252 auto *OffsetEntryCI =
8253 mdconst::extract<ConstantInt>(BaseNode->getOperand(Idx + 1));
8254 if (OffsetEntryCI->getValue().ugt(Offset)) {
8255 if (Idx == FirstFieldOpNo) {
8256 CheckFailed("Could not find TBAA parent in struct type node", I,
8257 BaseNode, &Offset);
8258 return nullptr;
8259 }
8260
8261 unsigned PrevIdx = Idx - NumOpsPerField;
8262 auto *PrevOffsetEntryCI =
8263 mdconst::extract<ConstantInt>(BaseNode->getOperand(PrevIdx + 1));
8264 Offset -= PrevOffsetEntryCI->getValue();
8265 return cast<MDNode>(BaseNode->getOperand(PrevIdx));
8266 }
8267 }
8268
8269 unsigned LastIdx = BaseNode->getNumOperands() - NumOpsPerField;
8270 auto *LastOffsetEntryCI = mdconst::extract<ConstantInt>(
8271 BaseNode->getOperand(LastIdx + 1));
8272 Offset -= LastOffsetEntryCI->getValue();
8273 return cast<MDNode>(BaseNode->getOperand(LastIdx));
8274}
8275
8277 if (!Type || Type->getNumOperands() < 3)
8278 return false;
8279
8280 // In the new format type nodes shall have a reference to the parent type as
8281 // its first operand.
8282 return isa_and_nonnull<MDNode>(Type->getOperand(0));
8283}
8284
8286 CheckTBAA(MD->getNumOperands() > 0, "TBAA metadata cannot have 0 operands", I,
8287 MD);
8288
8289 if (I)
8293 "This instruction shall not have a TBAA access tag!", I);
8294
8295 bool IsStructPathTBAA =
8296 isa<MDNode>(MD->getOperand(0)) && MD->getNumOperands() >= 3;
8297
8298 CheckTBAA(IsStructPathTBAA,
8299 "Old-style TBAA is no longer allowed, use struct-path TBAA instead",
8300 I);
8301
8302 MDNode *BaseNode = dyn_cast_or_null<MDNode>(MD->getOperand(0));
8303 MDNode *AccessType = dyn_cast_or_null<MDNode>(MD->getOperand(1));
8304
8305 bool IsNewFormat = isNewFormatTBAATypeNode(AccessType);
8306
8307 if (IsNewFormat) {
8308 CheckTBAA(MD->getNumOperands() == 4 || MD->getNumOperands() == 5,
8309 "Access tag metadata must have either 4 or 5 operands", I, MD);
8310 } else {
8311 CheckTBAA(MD->getNumOperands() < 5,
8312 "Struct tag metadata must have either 3 or 4 operands", I, MD);
8313 }
8314
8315 // Check the access size field.
8316 if (IsNewFormat) {
8317 auto *AccessSizeNode = mdconst::dyn_extract_or_null<ConstantInt>(
8318 MD->getOperand(3));
8319 CheckTBAA(AccessSizeNode, "Access size field must be a constant", I, MD);
8320 }
8321
8322 // Check the immutability flag.
8323 unsigned ImmutabilityFlagOpNo = IsNewFormat ? 4 : 3;
8324 if (MD->getNumOperands() == ImmutabilityFlagOpNo + 1) {
8325 auto *IsImmutableCI = mdconst::dyn_extract_or_null<ConstantInt>(
8326 MD->getOperand(ImmutabilityFlagOpNo));
8327 CheckTBAA(IsImmutableCI,
8328 "Immutability tag on struct tag metadata must be a constant", I,
8329 MD);
8330 CheckTBAA(
8331 IsImmutableCI->isZero() || IsImmutableCI->isOne(),
8332 "Immutability part of the struct tag metadata must be either 0 or 1", I,
8333 MD);
8334 }
8335
8336 CheckTBAA(BaseNode && AccessType,
8337 "Malformed struct tag metadata: base and access-type "
8338 "should be non-null and point to Metadata nodes",
8339 I, MD, BaseNode, AccessType);
8340
8341 if (!IsNewFormat) {
8342 CheckTBAA(isValidScalarTBAANode(AccessType),
8343 "Access type node must be a valid scalar type", I, MD,
8344 AccessType);
8345 }
8346
8348 CheckTBAA(OffsetCI, "Offset must be constant integer", I, MD);
8349
8350 APInt Offset = OffsetCI->getValue();
8351 bool SeenAccessTypeInPath = false;
8352
8353 SmallPtrSet<MDNode *, 4> StructPath;
8354
8355 for (/* empty */; BaseNode && !IsRootTBAANode(BaseNode);
8356 BaseNode =
8357 getFieldNodeFromTBAABaseNode(I, BaseNode, Offset, IsNewFormat)) {
8358 if (!StructPath.insert(BaseNode).second) {
8359 CheckFailed("Cycle detected in struct path", I, MD);
8360 return false;
8361 }
8362
8363 bool Invalid;
8364 unsigned BaseNodeBitWidth;
8365 std::tie(Invalid, BaseNodeBitWidth) =
8366 verifyTBAABaseNode(I, BaseNode, IsNewFormat);
8367
8368 // If the base node is invalid in itself, then we've already printed all the
8369 // errors we wanted to print.
8370 if (Invalid)
8371 return false;
8372
8373 SeenAccessTypeInPath |= BaseNode == AccessType;
8374
8375 if (isValidScalarTBAANode(BaseNode) || BaseNode == AccessType)
8376 CheckTBAA(Offset == 0, "Offset not zero at the point of scalar access", I,
8377 MD, &Offset);
8378
8379 CheckTBAA(BaseNodeBitWidth == Offset.getBitWidth() ||
8380 (BaseNodeBitWidth == 0 && Offset == 0) ||
8381 (IsNewFormat && BaseNodeBitWidth == ~0u),
8382 "Access bit-width not the same as description bit-width", I, MD,
8383 BaseNodeBitWidth, Offset.getBitWidth());
8384
8385 if (IsNewFormat && SeenAccessTypeInPath)
8386 break;
8387 }
8388
8389 CheckTBAA(SeenAccessTypeInPath, "Did not see access type in access path!", I,
8390 MD);
8391 return true;
8392}
8393
8394char VerifierLegacyPass::ID = 0;
8395INITIALIZE_PASS(VerifierLegacyPass, "verify", "Module Verifier", false, false)
8396
8398 return new VerifierLegacyPass(FatalErrors);
8399}
8400
8401AnalysisKey VerifierAnalysis::Key;
8408
8413
8415 auto Res = AM.getResult<VerifierAnalysis>(M);
8416 if (FatalErrors && (Res.IRBroken || Res.DebugInfoBroken))
8417 report_fatal_error("Broken module found, compilation aborted!");
8418
8419 return PreservedAnalyses::all();
8420}
8421
8423 auto res = AM.getResult<VerifierAnalysis>(F);
8424 if (res.IRBroken && FatalErrors)
8425 report_fatal_error("Broken function found, compilation aborted!");
8426
8427 return PreservedAnalyses::all();
8428}
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
AMDGPU address space definition.
ArrayRef< TableEntry > TableRef
This file declares a class to represent arbitrary precision floating point values and provide a varie...
This file implements a class to represent arbitrary precision integral constant values and operations...
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
Atomic ordering constants.
@ RetAttr
@ FnAttr
This file contains the simple types necessary to represent the attributes associated with functions a...
static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")
static GCRegistry::Add< StatepointGC > D("statepoint-example", "an example strategy for statepoint")
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
This file contains the declarations for the subclasses of Constant, which represent the different fla...
This file declares the LLVM IR specialization of the GenericConvergenceVerifier template.
static DISubprogram * getSubprogram(bool IsDistinct, Ts &&...Args)
dxil translate DXIL Translate Metadata
This file defines the DenseMap class.
This file contains constants used for implementing Dwarf debug support.
static bool runOnFunction(Function &F, bool PostInlining)
This file contains the declarations of entities that describe floating point environment and related ...
#define Check(C,...)
Hexagon Common GEP
This file provides various utilities for inspecting and working with the control flow graph in LLVM I...
Module.h This file contains the declarations for the Module class.
This header defines various interfaces for pass management in LLVM.
This defines the Use class.
const size_t AbstractManglingParser< Derived, Alloc >::NumOps
#define F(x, y, z)
Definition MD5.cpp:54
#define I(x, y, z)
Definition MD5.cpp:57
Machine Check Debug Module
This file implements a map that provides insertion order iteration.
This file provides utility for Memory Model Relaxation Annotations (MMRAs).
static bool isContiguous(const ConstantRange &A, const ConstantRange &B)
This file contains the declarations for metadata subclasses.
#define T
#define T1
ConstantRange Range(APInt(BitWidth, Low), APInt(BitWidth, High))
uint64_t High
uint64_t IntrinsicInst * II
#define P(N)
ppc ctr loops verify
#define INITIALIZE_PASS(passName, arg, name, cfg, analysis)
Definition PassSupport.h:56
This file contains the declarations for profiling metadata utility functions.
const SmallVectorImpl< MachineOperand > & Cond
static unsigned getNumElements(Type *Ty)
static void visit(BasicBlock &Start, std::function< bool(BasicBlock *)> op)
This file contains some templates that are useful if you are working with the STL at all.
verify safepoint Safepoint IR Verifier
BaseType
A given derived pointer can have multiple base pointers through phi/selects.
This file defines the SmallPtrSet class.
This file defines the SmallVector class.
This file contains some functions that are useful when dealing with strings.
static unsigned getBitWidth(Type *Ty, const DataLayout &DL)
Returns the bitwidth of the given scalar or pointer type.
static bool IsScalarTBAANodeImpl(const MDNode *MD, SmallPtrSetImpl< const MDNode * > &Visited)
static bool isType(const Metadata *MD)
static Instruction * getSuccPad(Instruction *Terminator)
static bool isMDTuple(const Metadata *MD)
static bool isNewFormatTBAATypeNode(llvm::MDNode *Type)
#define CheckDI(C,...)
We know that a debug info condition should be true, if not print an error message.
Definition Verifier.cpp:689
static void forEachUser(const Value *User, SmallPtrSet< const Value *, 32 > &Visited, llvm::function_ref< bool(const Value *)> Callback)
Definition Verifier.cpp:730
static bool isDINode(const Metadata *MD)
static bool isScope(const Metadata *MD)
static cl::opt< bool > VerifyNoAliasScopeDomination("verify-noalias-scope-decl-dom", cl::Hidden, cl::init(false), cl::desc("Ensure that llvm.experimental.noalias.scope.decl for identical " "scopes are not dominating"))
static bool isTypeCongruent(Type *L, Type *R)
Two types are "congruent" if they are identical, or if they are both pointer types with different poi...
#define CheckTBAA(C,...)
static bool isConstantIntMetadataOperand(const Metadata *MD)
static bool IsRootTBAANode(const MDNode *MD)
static Value * getParentPad(Value *EHPad)
static bool hasConflictingReferenceFlags(unsigned Flags)
Detect mutually exclusive flags.
static AttrBuilder getParameterABIAttributes(LLVMContext &C, unsigned I, AttributeList Attrs)
static const char PassName[]
static LLVM_ABI bool isValidArbitraryFPFormat(StringRef Format)
Returns true if the given string is a valid arbitrary floating-point format interpretation for llvm....
Definition APFloat.cpp:5990
bool isFiniteNonZero() const
Definition APFloat.h:1526
bool isNegative() const
Definition APFloat.h:1516
const fltSemantics & getSemantics() const
Definition APFloat.h:1524
Class for arbitrary precision integers.
Definition APInt.h:78
bool sgt(const APInt &RHS) const
Signed greater than comparison.
Definition APInt.h:1208
bool isMinValue() const
Determine if this is the smallest unsigned value.
Definition APInt.h:418
bool ule(const APInt &RHS) const
Unsigned less or equal comparison.
Definition APInt.h:1157
bool isPowerOf2() const
Check if this APInt's value is a power of two greater than zero.
Definition APInt.h:441
int64_t getSExtValue() const
Get sign extended value.
Definition APInt.h:1577
bool isMaxValue() const
Determine if this is the largest unsigned value.
Definition APInt.h:400
This class represents a conversion between pointers from one address space to another.
bool isSwiftError() const
Return true if this alloca is used as a swifterror argument to a call.
LLVM_ABI bool isStaticAlloca() const
Return true if this alloca is in the entry block of the function and is a constant size.
Align getAlign() const
Return the alignment of the memory that is being allocated by the instruction.
Type * getAllocatedType() const
Return the type that is being allocated by the instruction.
unsigned getAddressSpace() const
Return the address space for the allocation.
LLVM_ABI bool isArrayAllocation() const
Return true if there is an allocation size parameter to the allocation instruction that is not 1.
const Value * getArraySize() const
Get the number of elements allocated.
PassT::Result & getResult(IRUnitT &IR, ExtraArgTs... ExtraArgs)
Get the result of an analysis pass for a given IR unit.
void setPreservesAll()
Set by analyses that do not transform their input at all.
LLVM_ABI bool hasInRegAttr() const
Return true if this argument has the inreg attribute.
Definition Function.cpp:292
bool empty() const
empty - Check if the array is empty.
Definition ArrayRef.h:137
static bool isFPOperation(BinOp Op)
BinOp getOperation() const
static LLVM_ABI StringRef getOperationName(BinOp Op)
AtomicOrdering getOrdering() const
Returns the ordering constraint of this rmw instruction.
bool contains(Attribute::AttrKind A) const
Return true if the builder has the specified attribute.
LLVM_ABI bool hasAttribute(Attribute::AttrKind Kind) const
Return true if the attribute exists in this set.
LLVM_ABI std::string getAsString(bool InAttrGrp=false) const
Functions, function parameters, and return types can have attributes to indicate how they should be t...
Definition Attributes.h:105
LLVM_ABI const ConstantRange & getValueAsConstantRange() const
Return the attribute's value as a ConstantRange.
LLVM_ABI StringRef getValueAsString() const
Return the attribute's value as a string.
AttrKind
This enumeration lists the attributes that can be associated with parameters, function results,...
Definition Attributes.h:124
bool isValid() const
Return true if the attribute is any kind of attribute.
Definition Attributes.h:261
LLVM_ABI Type * getValueAsType() const
Return the attribute's value as a Type.
LLVM Basic Block Representation.
Definition BasicBlock.h:62
iterator begin()
Instruction iterator methods.
Definition BasicBlock.h:461
iterator_range< const_phi_iterator > phis() const
Returns a range that iterates over the phis in the basic block.
Definition BasicBlock.h:530
const Function * getParent() const
Return the enclosing method, or null if none.
Definition BasicBlock.h:213
LLVM_ABI InstListType::const_iterator getFirstNonPHIIt() const
Returns an iterator to the first instruction in this block that is not a PHINode instruction.
LLVM_ABI bool isEntryBlock() const
Return true if this is the entry block of the containing function.
const Instruction & front() const
Definition BasicBlock.h:484
LLVM_ABI const BasicBlock * getUniquePredecessor() const
Return the predecessor of this block if it has a unique predecessor block.
InstListType::iterator iterator
Instruction iterators...
Definition BasicBlock.h:170
const Instruction * getTerminator() const LLVM_READONLY
Returns the terminator instruction; assumes that the block is well-formed.
Definition BasicBlock.h:237
This class represents a no-op cast from one type to another.
static LLVM_ABI BlockAddress * lookup(const BasicBlock *BB)
Lookup an existing BlockAddress constant for the given BasicBlock.
Base class for all callable instructions (InvokeInst and CallInst) Holds everything related to callin...
bool isInlineAsm() const
Check if this call is an inline asm statement.
bool hasInAllocaArgument() const
Determine if there are is an inalloca argument.
OperandBundleUse getOperandBundleAt(unsigned Index) const
Return the operand bundle at a specific index.
Function * getCalledFunction() const
Returns the function called, or null if this is an indirect function invocation or the function signa...
bool doesNotAccessMemory(unsigned OpNo) const
bool hasFnAttr(Attribute::AttrKind Kind) const
Determine whether this call has the given attribute.
bool hasRetAttr(Attribute::AttrKind Kind) const
Determine whether the return value has the given attribute.
unsigned getNumOperandBundles() const
Return the number of operand bundles associated with this User.
CallingConv::ID getCallingConv() const
LLVM_ABI bool paramHasAttr(unsigned ArgNo, Attribute::AttrKind Kind) const
Determine whether the argument or parameter has the given attribute.
Attribute getParamAttr(unsigned ArgNo, Attribute::AttrKind Kind) const
Get the attribute of a given kind from a given arg.
iterator_range< bundle_op_iterator > bundle_op_infos()
Return the range [bundle_op_info_begin, bundle_op_info_end).
unsigned countOperandBundlesOfType(StringRef Name) const
Return the number of operand bundles with the tag Name attached to this instruction.
bool onlyReadsMemory(unsigned OpNo) const
Value * getCalledOperand() const
Type * getParamElementType(unsigned ArgNo) const
Extract the elementtype type for a parameter.
Value * getArgOperand(unsigned i) const
FunctionType * getFunctionType() const
LLVM_ABI Intrinsic::ID getIntrinsicID() const
Returns the intrinsic ID of the intrinsic called or Intrinsic::not_intrinsic if the called function i...
iterator_range< User::op_iterator > args()
Iteration adapter for range-for loops.
bool doesNotReturn() const
Determine if the call cannot return.
LLVM_ABI bool onlyAccessesArgMemory() const
Determine if the call can access memmory only using pointers based on its arguments.
unsigned arg_size() const
AttributeList getAttributes() const
Return the attributes for this call.
bool hasOperandBundles() const
Return true if this User has any operand bundles.
LLVM_ABI Function * getCaller()
Helper to get the caller (the parent function).
BasicBlock * getIndirectDest(unsigned i) const
unsigned getNumIndirectDests() const
Return the number of callbr indirect dest labels.
bool isMustTailCall() const
static LLVM_ABI bool castIsValid(Instruction::CastOps op, Type *SrcTy, Type *DstTy)
This method can be used to determine if a cast from SrcTy to DstTy using Opcode op is valid or not.
unsigned getNumHandlers() const
return the number of 'handlers' in this catchswitch instruction, except the default handler
Value * getParentPad() const
BasicBlock * getUnwindDest() const
handler_range handlers()
iteration adapter for range-for loops.
BasicBlock * getUnwindDest() const
bool isFPPredicate() const
Definition InstrTypes.h:782
bool isIntPredicate() const
Definition InstrTypes.h:783
static bool isIntPredicate(Predicate P)
Definition InstrTypes.h:776
Value * getCondition() const
bool isMinusOne() const
This function will return true iff every bit in this constant is set to true.
Definition Constants.h:231
bool isNegative() const
Definition Constants.h:214
bool isZero() const
This is just a convenience method to make client code smaller for a common code.
Definition Constants.h:219
unsigned getBitWidth() const
getBitWidth - Return the scalar bitwidth of this constant.
Definition Constants.h:162
uint64_t getZExtValue() const
Return the constant as a 64-bit unsigned integer value after it has been zero extended as appropriate...
Definition Constants.h:168
const APInt & getValue() const
Return the constant as an APInt value reference.
Definition Constants.h:159
Constant * getAddrDiscriminator() const
The address discriminator if any, or the null constant.
Definition Constants.h:1239
Constant * getPointer() const
The pointer that is signed in this ptrauth signed pointer.
Definition Constants.h:1226
ConstantInt * getKey() const
The Key ID, an i32 constant.
Definition Constants.h:1229
Constant * getDeactivationSymbol() const
Definition Constants.h:1248
ConstantInt * getDiscriminator() const
The integer discriminator, an i64 constant, or 0.
Definition Constants.h:1232
static LLVM_ABI bool isOrderedRanges(ArrayRef< ConstantRange > RangesRef)
This class represents a range of values.
const APInt & getLower() const
Return the lower value for this range.
const APInt & getUpper() const
Return the upper value for this range.
LLVM_ABI bool contains(const APInt &Val) const
Return true if the specified value is in the set.
uint32_t getBitWidth() const
Get the bit width of this ConstantRange.
static LLVM_ABI ConstantTokenNone * get(LLVMContext &Context)
Return the ConstantTokenNone.
LLVM_ABI bool isNullValue() const
Return true if this is the value that would be returned by getNullValue.
Definition Constants.cpp:74
LLVM_ABI std::optional< fp::ExceptionBehavior > getExceptionBehavior() const
LLVM_ABI std::optional< RoundingMode > getRoundingMode() const
LLVM_ABI unsigned getNonMetadataArgCount() const
DbgVariableFragmentInfo FragmentInfo
@ FixedPointBinary
Scale factor 2^Factor.
@ FixedPointDecimal
Scale factor 10^Factor.
@ FixedPointRational
Arbitrary rational scale factor.
DIGlobalVariable * getVariable() const
LLVM_ABI DISubprogram * getSubprogram() const
Get the subprogram for this scope.
DILocalScope * getScope() const
Get the local scope for this variable.
Metadata * getRawScope() const
Base class for scope-like contexts.
Subprogram description. Uses SubclassData1.
static const DIScope * getRawRetainedNodeScope(const MDNode *N)
Base class for template parameters.
Base class for types.
Base class for variables.
Metadata * getRawType() const
Metadata * getRawScope() const
A parsed version of the target data layout string in and methods for querying it.
Definition DataLayout.h:64
Records a position in IR for a source label (DILabel).
Base class for non-instruction debug metadata records that have positions within IR.
LLVM_ABI void print(raw_ostream &O, bool IsForDebug=false) const
DebugLoc getDebugLoc() const
LLVM_ABI const BasicBlock * getParent() const
LLVM_ABI Function * getFunction()
Record of a variable value-assignment, aka a non instruction representation of the dbg....
LLVM_ABI Value * getVariableLocationOp(unsigned OpIdx) const
DIExpression * getExpression() const
DILocalVariable * getVariable() const
Metadata * getRawLocation() const
Returns the metadata operand for the first location description.
@ End
Marks the end of the concrete types.
@ Any
To indicate all LocationTypes in searches.
DIExpression * getAddressExpression() const
MDNode * getAsMDNode() const
Return this as a bar MDNode.
Definition DebugLoc.h:290
ValueT lookup(const_arg_type_t< KeyT > Val) const
lookup - Return the entry for the specified key, or a default constructed value if no such entry exis...
Definition DenseMap.h:205
iterator find(const_arg_type_t< KeyT > Val)
Definition DenseMap.h:178
bool empty() const
Definition DenseMap.h:109
std::pair< iterator, bool > insert(const std::pair< KeyT, ValueT > &KV)
Definition DenseMap.h:241
Concrete subclass of DominatorTreeBase that is used to compute a normal dominator tree.
Definition Dominators.h:159
This instruction extracts a single (scalar) element from a VectorType value.
static LLVM_ABI bool isValidOperands(const Value *Vec, const Value *Idx)
Return true if an extractelement instruction can be formed with the specified operands.
ArrayRef< unsigned > getIndices() const
static LLVM_ABI Type * getIndexedType(Type *Agg, ArrayRef< unsigned > Idxs)
Returns the type of the element that would be extracted with an extractvalue instruction with the spe...
This instruction compares its operands according to the predicate given to the constructor.
This class represents an extension of floating point types.
This class represents a cast from floating point to signed integer.
This class represents a cast from floating point to unsigned integer.
This class represents a truncation of floating point types.
AtomicOrdering getOrdering() const
Returns the ordering constraint of this fence instruction.
op_range arg_operands()
arg_operands - iteration adapter for range-for loops.
Value * getParentPad() const
Convenience accessors.
FunctionPass class - This class is used to implement most global optimizations.
Definition Pass.h:314
Type * getReturnType() const
FunctionType * getFunctionType() const
Returns the FunctionType for me.
Definition Function.h:211
Intrinsic::ID getIntrinsicID() const LLVM_READONLY
getIntrinsicID - This method returns the ID number of the specified function, or Intrinsic::not_intri...
Definition Function.h:246
DISubprogram * getSubprogram() const
Get the attached subprogram.
CallingConv::ID getCallingConv() const
getCallingConv()/setCallingConv(CC) - These method get and set the calling convention of this functio...
Definition Function.h:272
bool hasPersonalityFn() const
Check whether this function has a personality function.
Definition Function.h:905
const Function & getFunction() const
Definition Function.h:166
const std::string & getGC() const
Definition Function.cpp:818
Type * getReturnType() const
Returns the type of the ret val.
Definition Function.h:216
bool isVarArg() const
isVarArg - Return true if this function takes a variable number of arguments.
Definition Function.h:229
LLVM_ABI Value * getBasePtr() const
LLVM_ABI Value * getDerivedPtr() const
static LLVM_ABI Type * getIndexedType(Type *Ty, ArrayRef< Value * > IdxList)
Returns the result type of a getelementptr with the given source element type and indexes.
static bool isValidLinkage(LinkageTypes L)
Definition GlobalAlias.h:98
const Constant * getAliasee() const
Definition GlobalAlias.h:87
LLVM_ABI const Function * getResolverFunction() const
Definition Globals.cpp:688
static bool isValidLinkage(LinkageTypes L)
Definition GlobalIFunc.h:86
const Constant * getResolver() const
Definition GlobalIFunc.h:73
LLVM_ABI void getAllMetadata(SmallVectorImpl< std::pair< unsigned, MDNode * > > &MDs) const
Appends all metadata attached to this value to MDs, sorting by KindID.
bool hasComdat() const
MDNode * getMetadata(unsigned KindID) const
Get the metadata of given kind attached to this GlobalObject.
bool hasExternalLinkage() const
bool isDSOLocal() const
bool isImplicitDSOLocal() const
LLVM_ABI bool isDeclaration() const
Return true if the primary definition of this global value is outside of the current translation unit...
Definition Globals.cpp:337
bool hasValidDeclarationLinkage() const
LinkageTypes getLinkage() const
bool hasDefaultVisibility() const
bool hasPrivateLinkage() const
bool hasHiddenVisibility() const
bool hasExternalWeakLinkage() const
bool hasDLLImportStorageClass() const
bool hasDLLExportStorageClass() const
bool isDeclarationForLinker() const
unsigned getAddressSpace() const
Module * getParent()
Get the module that this global value is contained inside of...
PointerType * getType() const
Global values are always pointers.
LLVM_ABI bool isInterposable() const
Return true if this global's definition can be substituted with an arbitrary definition at link time ...
Definition Globals.cpp:116
bool hasComdat() const
bool hasCommonLinkage() const
bool hasGlobalUnnamedAddr() const
bool hasAppendingLinkage() const
bool hasAvailableExternallyLinkage() const
Type * getValueType() const
const Constant * getInitializer() const
getInitializer - Return the initializer for this global variable.
bool hasInitializer() const
Definitions have initializers, declarations don't.
MaybeAlign getAlign() const
Returns the alignment of the given variable.
LLVM_ABI uint64_t getGlobalSize(const DataLayout &DL) const
Get the size of this global variable in bytes.
Definition Globals.cpp:569
bool isConstant() const
If the value is a global constant, its value is immutable throughout the runtime execution of the pro...
bool hasDefinitiveInitializer() const
hasDefinitiveInitializer - Whether the global variable has an initializer, and any other instances of...
This instruction compares its operands according to the predicate given to the constructor.
BasicBlock * getDestination(unsigned i)
Return the specified destination.
unsigned getNumDestinations() const
return the number of possible destinations in this indirectbr instruction.
unsigned getNumSuccessors() const
This instruction inserts a single (scalar) element into a VectorType value.
static LLVM_ABI bool isValidOperands(const Value *Vec, const Value *NewElt, const Value *Idx)
Return true if an insertelement instruction can be formed with the specified operands.
ArrayRef< unsigned > getIndices() const
Base class for instruction visitors.
Definition InstVisitor.h:78
void visit(Iterator Start, Iterator End)
Definition InstVisitor.h:87
LLVM_ABI unsigned getNumSuccessors() const LLVM_READONLY
Return the number of successors that this instruction has.
const DebugLoc & getDebugLoc() const
Return the debug location for this node as a DebugLoc.
LLVM_ABI bool isAtomic() const LLVM_READONLY
Return true if this instruction has an AtomicOrdering of unordered or higher.
LLVM_ABI const Function * getFunction() const
Return the function this instruction belongs to.
This class represents a cast from an integer to a pointer.
static LLVM_ABI bool mayLowerToFunctionCall(Intrinsic::ID IID)
Check if the intrinsic might lower into a regular function call in the course of IR transformations.
Intrinsic::ID getIntrinsicID() const
Return the intrinsic ID of this intrinsic.
This is an important class for using LLVM in a threaded context.
Definition LLVMContext.h:68
bool isCleanup() const
Return 'true' if this landingpad instruction is a cleanup.
unsigned getNumClauses() const
Get the number of clauses for this landing pad.
bool isCatch(unsigned Idx) const
Return 'true' if the clause and index Idx is a catch clause.
bool isFilter(unsigned Idx) const
Return 'true' if the clause and index Idx is a filter clause.
Constant * getClause(unsigned Idx) const
Get the value of the clause at index Idx.
AtomicOrdering getOrdering() const
Returns the ordering constraint of this load instruction.
SyncScope::ID getSyncScopeID() const
Returns the synchronization scope ID of this load instruction.
Align getAlign() const
Return the alignment of the access that is being performed.
Metadata node.
Definition Metadata.h:1080
const MDOperand & getOperand(unsigned I) const
Definition Metadata.h:1444
bool isTemporary() const
Definition Metadata.h:1264
ArrayRef< MDOperand > operands() const
Definition Metadata.h:1442
unsigned getNumOperands() const
Return number of MDNode operands.
Definition Metadata.h:1450
bool isDistinct() const
Definition Metadata.h:1263
bool isResolved() const
Check if node is fully resolved.
Definition Metadata.h:1260
LLVMContext & getContext() const
Definition Metadata.h:1244
bool equalsStr(StringRef Str) const
Definition Metadata.h:924
Metadata * get() const
Definition Metadata.h:931
LLVM_ABI StringRef getString() const
Definition Metadata.cpp:632
static LLVM_ABI bool isTagMD(const Metadata *MD)
This class implements a map that also provides access to all stored values in a deterministic order.
Definition MapVector.h:36
static LLVM_ABI MetadataAsValue * getIfExists(LLVMContext &Context, Metadata *MD)
Definition Metadata.cpp:118
Metadata * getMetadata() const
Definition Metadata.h:202
Root of the metadata hierarchy.
Definition Metadata.h:64
LLVM_ABI void print(raw_ostream &OS, const Module *M=nullptr, bool IsForDebug=false) const
Print.
unsigned getMetadataID() const
Definition Metadata.h:104
Manage lifetime of a slot tracker for printing IR.
A Module instance is used to store all the information related to an LLVM module.
Definition Module.h:67
LLVM_ABI StringRef getName() const
LLVM_ABI void print(raw_ostream &ROS, bool IsForDebug=false) const
LLVM_ABI unsigned getNumOperands() const
iterator_range< op_iterator > operands()
Definition Metadata.h:1856
op_range incoming_values()
A set of analyses that are preserved following a run of a transformation pass.
Definition Analysis.h:112
static PreservedAnalyses all()
Construct a special preserved set that preserves all passes.
Definition Analysis.h:118
This class represents a cast from a pointer to an address (non-capturing ptrtoint).
This class represents a cast from a pointer to an integer.
Value * getValue() const
Convenience accessor.
Value * getReturnValue() const
Convenience accessor. Returns null if there is no return value.
This class represents a sign extension of integer types.
This class represents a cast from signed integer to floating point.
static LLVM_ABI const char * areInvalidOperands(Value *Cond, Value *True, Value *False)
Return a string if the specified operands are invalid for a select operation, otherwise return null.
This instruction constructs a fixed permutation of two input vectors.
static LLVM_ABI bool isValidOperands(const Value *V1, const Value *V2, const Value *Mask)
Return true if a shufflevector instruction can be formed with the specified operands.
static LLVM_ABI void getShuffleMask(const Constant *Mask, SmallVectorImpl< int > &Result)
Convert the input shuffle mask operand to a vector of integers.
A templated base class for SmallPtrSet which provides the typesafe interface that is common across al...
size_type count(ConstPtrType Ptr) const
count - Return 1 if the specified pointer is in the set, 0 otherwise.
void insert_range(Range &&R)
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
SmallPtrSet - This class implements a set which is optimized for holding SmallSize or less elements.
void reserve(size_type N)
iterator insert(iterator I, T &&Elt)
void resize(size_type N)
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
std::pair< StringRef, StringRef > split(char Separator) const
Split into two substrings around the first occurrence of a separator character.
Definition StringRef.h:730
static constexpr size_t npos
Definition StringRef.h:57
bool getAsInteger(unsigned Radix, T &Result) const
Parse the current string as an integer of the specified radix.
Definition StringRef.h:490
bool starts_with(StringRef Prefix) const
Check if this string starts with the given Prefix.
Definition StringRef.h:258
constexpr bool empty() const
empty - Check if the string is empty.
Definition StringRef.h:140
unsigned getNumElements() const
Random access to the elements.
LLVM_ABI Type * getTypeAtIndex(const Value *V) const
Given an index value into the type, return the type of the element.
Definition Type.cpp:788
LLVM_ABI bool isScalableTy(SmallPtrSetImpl< const Type * > &Visited) const
Returns true if this struct contains a scalable vector.
Definition Type.cpp:510
Verify that the TBAA Metadatas are valid.
Definition Verifier.h:40
LLVM_ABI bool visitTBAAMetadata(const Instruction *I, const MDNode *MD)
Visit an instruction, or a TBAA node itself as part of a metadata, and return true if it is valid,...
unsigned size() const
Triple - Helper class for working with autoconf configuration names.
Definition Triple.h:47
This class represents a truncation of integer types.
Twine - A lightweight data structure for efficiently representing the concatenation of temporary valu...
Definition Twine.h:82
The instances of the Type class are immutable: once they are created, they are never changed.
Definition Type.h:46
bool isByteTy() const
True if this is an instance of ByteType.
Definition Type.h:242
bool isVectorTy() const
True if this is an instance of VectorType.
Definition Type.h:290
LLVM_ABI bool containsNonGlobalTargetExtType(SmallPtrSetImpl< const Type * > &Visited) const
Return true if this type is or contains a target extension type that disallows being used as a global...
Definition Type.cpp:78
bool isArrayTy() const
True if this is an instance of ArrayType.
Definition Type.h:281
LLVM_ABI bool containsNonLocalTargetExtType(SmallPtrSetImpl< const Type * > &Visited) const
Return true if this type is or contains a target extension type that disallows being used as a local.
Definition Type.cpp:94
LLVM_ABI bool isScalableTy(SmallPtrSetImpl< const Type * > &Visited) const
Return true if this is a type whose size is a known multiple of vscale.
Definition Type.cpp:65
bool isLabelTy() const
Return true if this is 'label'.
Definition Type.h:230
bool isIntOrIntVectorTy() const
Return true if this is an integer type or a vector of integer types.
Definition Type.h:263
bool isPointerTy() const
True if this is an instance of PointerType.
Definition Type.h:284
LLVM_ABI bool isTokenLikeTy() const
Returns true if this is 'token' or a token-like target type.s.
Definition Type.cpp:1136
LLVM_ABI unsigned getPointerAddressSpace() const
Get the address space of this pointer or pointer vector type.
bool isSingleValueType() const
Return true if the type is a valid type for a register in codegen.
Definition Type.h:313
LLVM_ABI bool canLosslesslyBitCastTo(Type *Ty) const
Return true if this type could be converted with a lossless BitCast to type 'Ty'.
Definition Type.cpp:157
Type * getScalarType() const
If this is a vector type, return the element type, otherwise return 'this'.
Definition Type.h:370
bool isSized(SmallPtrSetImpl< Type * > *Visited=nullptr) const
Return true if it makes sense to take the size of this type.
Definition Type.h:328
LLVM_ABI unsigned getScalarSizeInBits() const LLVM_READONLY
If this is a vector type, return the getPrimitiveSizeInBits value for the element type.
Definition Type.cpp:236
bool isFloatingPointTy() const
Return true if this is one of the floating-point types.
Definition Type.h:186
bool isPtrOrPtrVectorTy() const
Return true if this is a pointer type or a vector of pointer types.
Definition Type.h:287
bool isIntOrPtrTy() const
Return true if this is an integer type or a pointer type.
Definition Type.h:272
bool isIntegerTy() const
True if this is an instance of IntegerType.
Definition Type.h:257
bool isFPOrFPVectorTy() const
Return true if this is a FP type or a vector of FP.
Definition Type.h:227
bool isVoidTy() const
Return true if this is 'void'.
Definition Type.h:141
bool isMetadataTy() const
Return true if this is 'metadata'.
Definition Type.h:233
This class represents a cast unsigned integer to floating point.
op_range operands()
Definition User.h:267
Value * getOperand(unsigned i) const
Definition User.h:207
unsigned getNumOperands() const
Definition User.h:229
This class represents the va_arg llvm instruction, which returns an argument of the specified type gi...
Value * getValue() const
Definition Metadata.h:499
LLVM Value Representation.
Definition Value.h:75
iterator_range< user_iterator > materialized_users()
Definition Value.h:420
Type * getType() const
All values are typed, get the type of this value.
Definition Value.h:255
LLVM_ABI const Value * stripPointerCastsAndAliases() const
Strip off pointer casts, all-zero GEPs, address space casts, and aliases.
Definition Value.cpp:713
LLVMContext & getContext() const
All values hold a context through their type.
Definition Value.h:258
LLVM_ABI const Value * stripInBoundsOffsets(function_ref< void(const Value *)> Func=[](const Value *) {}) const
Strip off pointer casts and inbounds GEPs.
Definition Value.cpp:820
iterator_range< user_iterator > users()
Definition Value.h:426
bool materialized_use_empty() const
Definition Value.h:351
LLVM_ABI const Value * stripPointerCasts() const
Strip off pointer casts, all-zero GEPs and address space casts.
Definition Value.cpp:709
bool hasName() const
Definition Value.h:261
LLVM_ABI StringRef getName() const
Return a constant reference to the value's name.
Definition Value.cpp:318
Check a module for errors, and report separate error states for IR and debug info errors.
Definition Verifier.h:109
LLVM_ABI Result run(Module &M, ModuleAnalysisManager &)
LLVM_ABI PreservedAnalyses run(Module &M, ModuleAnalysisManager &AM)
This class represents zero extension of integer types.
constexpr bool isNonZero() const
Definition TypeSize.h:155
constexpr bool isScalable() const
Returns whether the quantity is scaled by a runtime quantity (vscale).
Definition TypeSize.h:168
constexpr ScalarTy getKnownMinValue() const
Returns the minimum value this quantity can represent.
Definition TypeSize.h:165
An efficient, type-erasing, non-owning reference to a callable.
const ParentTy * getParent() const
Definition ilist_node.h:34
NodeTy * getNextNode()
Get the next node, or nullptr for the list tail.
Definition ilist_node.h:348
This class implements an extremely fast bulk output stream that can only output to a stream.
Definition raw_ostream.h:53
CallInst * Call
This file contains the declaration of the Comdat class, which represents a single COMDAT in LLVM.
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
@ FLAT_ADDRESS
Address space for flat memory.
@ GLOBAL_ADDRESS
Address space for global memory (RAT0, VTX0).
@ PRIVATE_ADDRESS
Address space for private memory.
constexpr char Align[]
Key for Kernel::Arg::Metadata::mAlign.
constexpr char Args[]
Key for Kernel::Metadata::mArgs.
constexpr char Attrs[]
Key for Kernel::Metadata::mAttrs.
bool isFlatGlobalAddrSpace(unsigned AS)
constexpr std::underlying_type_t< E > Mask()
Get a bitmask with 1s in all places up to the high-order bit of E's largest value.
@ Entry
Definition COFF.h:862
unsigned ID
LLVM IR allows to use arbitrary numbers as calling convention identifiers.
Definition CallingConv.h:24
@ C
The default llvm calling convention, compatible with C.
Definition CallingConv.h:34
@ BasicBlock
Various leaf nodes.
Definition ISDOpcodes.h:81
LLVM_ABI void getIntrinsicInfoTableEntries(ID id, SmallVectorImpl< IITDescriptor > &T)
Return the IIT table descriptor for the specified intrinsic into an array of IITDescriptors.
@ MatchIntrinsicTypes_NoMatchRet
Definition Intrinsics.h:262
@ MatchIntrinsicTypes_NoMatchArg
Definition Intrinsics.h:263
LLVM_ABI MatchIntrinsicTypesResult matchIntrinsicSignature(FunctionType *FTy, ArrayRef< IITDescriptor > &Infos, SmallVectorImpl< Type * > &OverloadTys)
Match the specified function type with the type constraints specified by the .td file.
LLVM_ABI bool hasConstrainedFPRoundingModeOperand(ID QID)
Returns true if the intrinsic ID is for one of the "ConstrainedFloating-Point Intrinsics" that take r...
LLVM_ABI StringRef getName(ID id)
Return the LLVM name for an intrinsic, such as "llvm.ppc.altivec.lvx".
static const int NoAliasScopeDeclScopeArg
Definition Intrinsics.h:41
LLVM_ABI bool matchIntrinsicVarArg(bool isVarArg, ArrayRef< IITDescriptor > &Infos)
Verify if the intrinsic has variable arguments.
std::variant< std::monostate, Loc::Single, Loc::Multi, Loc::MMI, Loc::EntryValue > Variant
Alias for the std::variant specialization base class of DbgVariable.
Definition DwarfDebug.h:190
Flag
These should be considered private to the implementation of the MCInstrDesc class.
@ System
Synchronized with respect to all concurrently executing threads.
Definition LLVMContext.h:58
LLVM_ABI std::optional< VFInfo > tryDemangleForVFABI(StringRef MangledName, const FunctionType *FTy)
Function to construct a VFInfo out of a mangled names in the following format:
@ CE
Windows NT (Windows on ARM)
Definition MCAsmInfo.h:48
LLVM_ABI AssignmentInstRange getAssignmentInsts(DIAssignID *ID)
Return a range of instructions (typically just one) that have ID as an attachment.
initializer< Ty > init(const Ty &Val)
@ DW_MACINFO_undef
Definition Dwarf.h:818
@ DW_MACINFO_start_file
Definition Dwarf.h:819
@ DW_MACINFO_define
Definition Dwarf.h:817
std::enable_if_t< detail::IsValidPointer< X, Y >::value, X * > dyn_extract_or_null(Y &&MD)
Extract a Value from Metadata, if any, allowing null.
Definition Metadata.h:709
std::enable_if_t< detail::IsValidPointer< X, Y >::value, X * > dyn_extract(Y &&MD)
Extract a Value from Metadata, if any.
Definition Metadata.h:696
std::enable_if_t< detail::IsValidPointer< X, Y >::value, X * > extract(Y &&MD)
Extract a Value from Metadata.
Definition Metadata.h:668
@ User
could "use" a pointer
NodeAddr< UseNode * > Use
Definition RDFGraph.h:385
NodeAddr< NodeBase * > Node
Definition RDFGraph.h:381
friend class Instruction
Iterator for Instructions in a `BasicBlock.
Definition BasicBlock.h:73
This is an optimization pass for GlobalISel generic memory operations.
auto drop_begin(T &&RangeOrContainer, size_t N=1)
Return a range covering RangeOrContainer with the first N elements excluded.
Definition STLExtras.h:316
@ Low
Lower the current thread's priority such that it does not affect foreground tasks significantly.
Definition Threading.h:280
@ Offset
Definition DWP.cpp:532
FunctionAddr VTableAddr Value
Definition InstrProf.h:137
bool all_of(R &&range, UnaryPredicate P)
Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly.
Definition STLExtras.h:1739
LLVM_ABI bool canInstructionHaveMMRAs(const Instruction &I)
detail::zippy< detail::zip_first, T, U, Args... > zip_equal(T &&t, U &&u, Args &&...args)
zip iterator that assumes that all iteratees have the same length.
Definition STLExtras.h:841
LLVM_ABI unsigned getBranchWeightOffset(const MDNode *ProfileData)
Return the offset to the first branch weight data.
constexpr bool isInt(int64_t x)
Checks if an integer fits into the given bit width.
Definition MathExtras.h:165
auto enumerate(FirstRange &&First, RestRanges &&...Rest)
Given two or more input ranges, returns a new range whose values are tuples (A, B,...
Definition STLExtras.h:2554
decltype(auto) dyn_cast(const From &Val)
dyn_cast<X> - Return the argument parameter cast to the specified type.
Definition Casting.h:643
LLVM_ABI bool verifyFunction(const Function &F, raw_ostream *OS=nullptr)
Check a function for errors, useful for use when debugging a pass.
AllocFnKind
Definition Attributes.h:53
testing::Matcher< const detail::ErrorHolder & > Failed()
Definition Error.h:198
iterator_range< T > make_range(T x, T y)
Convenience function for iterating over sub-ranges.
void append_range(Container &C, Range &&R)
Wrapper function to append range R to container C.
Definition STLExtras.h:2208
LLVM_ABI DenseMap< BasicBlock *, ColorVector > colorEHFunclets(Function &F)
If an EH funclet personality is in use (see isFuncletEHPersonality), this will recompute which blocks...
constexpr bool isUIntN(unsigned N, uint64_t x)
Checks if an unsigned integer fits into the given (dynamic) bit width.
Definition MathExtras.h:243
constexpr bool isPowerOf2_64(uint64_t Value)
Return true if the argument is a power of two > 0 (64 bit edition.)
Definition MathExtras.h:284
gep_type_iterator gep_type_end(const User *GEP)
bool isa_and_nonnull(const Y &Val)
Definition Casting.h:676
Op::Description Desc
bool isScopedEHPersonality(EHPersonality Pers)
Returns true if this personality uses scope-style EH IR instructions: catchswitch,...
auto dyn_cast_or_null(const Y &Val)
Definition Casting.h:753
GenericConvergenceVerifier< SSAContext > ConvergenceVerifier
constexpr bool isPowerOf2_32(uint32_t Value)
Return true if the argument is a power of two > 0.
Definition MathExtras.h:279
bool isModSet(const ModRefInfo MRI)
Definition ModRef.h:49
void sort(IteratorTy Start, IteratorTy End)
Definition STLExtras.h:1636
LLVM_ABI raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
Definition Debug.cpp:207
LLVM_ABI void report_fatal_error(Error Err, bool gen_crash_diag=true)
Definition Error.cpp:163
generic_gep_type_iterator<> gep_type_iterator
FunctionAddr VTableAddr Count
Definition InstrProf.h:139
LLVM_ABI EHPersonality classifyEHPersonality(const Value *Pers)
See if the given exception handling personality function is one that we understand.
iterator_range< SplittingIterator > split(StringRef Str, StringRef Separator)
Split the specified string over a separator and return a range-compatible iterable over its partition...
class LLVM_GSL_OWNER SmallVector
Forward declaration of SmallVector so that calculateSmallVectorDefaultInlinedElements can reference s...
bool isa(const From &Val)
isa<X> - Return true if the parameter to the template is an instance of one of the template type argu...
Definition Casting.h:547
LLVM_ABI bool isValueProfileMD(const MDNode *ProfileData)
Checks if an MDNode contains value profiling Metadata.
LLVM_ABI raw_fd_ostream & errs()
This returns a reference to a raw_ostream for standard error.
LLVM_ABI unsigned getNumBranchWeights(const MDNode &ProfileData)
AtomicOrdering
Atomic ordering for LLVM's memory model.
@ First
Helpers to iterate all locations in the MemoryEffectsBase class.
Definition ModRef.h:74
LLVM_ABI FunctionPass * createVerifierPass(bool FatalErrors=true)
FunctionAddr VTableAddr Next
Definition InstrProf.h:141
DWARFExpression::Operation Op
ArrayRef(const T &OneElt) -> ArrayRef< T >
constexpr unsigned BitWidth
TinyPtrVector< BasicBlock * > ColorVector
LLVM_ABI const char * LLVMLoopEstimatedTripCount
Profile-based loop metadata that should be accessed only by using llvm::getLoopEstimatedTripCount and...
decltype(auto) cast(const From &Val)
cast<X> - Return the argument parameter cast to the specified type.
Definition Casting.h:559
LLVM_ABI std::optional< RoundingMode > convertStrToRoundingMode(StringRef)
Returns a valid RoundingMode enumerator when given a string that is valid as input in constrained int...
Definition FPEnv.cpp:25
gep_type_iterator gep_type_begin(const User *GEP)
LLVM_ABI std::unique_ptr< GCStrategy > getGCStrategy(const StringRef Name)
Lookup the GCStrategy object associated with the given gc name.
auto predecessors(const MachineBasicBlock *BB)
bool is_contained(R &&Range, const E &Element)
Returns true if Element is found in Range.
Definition STLExtras.h:1947
bool pred_empty(const BasicBlock *BB)
Definition CFG.h:119
bool isHexDigit(char C)
Checks if character C is a hexadecimal numeric character.
AnalysisManager< Function > FunctionAnalysisManager
Convenience typedef for the Function analysis manager.
constexpr bool isCallableCC(CallingConv::ID CC)
LLVM_ABI bool verifyModule(const Module &M, raw_ostream *OS=nullptr, bool *BrokenDebugInfo=nullptr)
Check a module for errors.
AnalysisManager< Module > ModuleAnalysisManager
Convenience typedef for the Module analysis manager.
Definition MIRParser.h:39
#define N
constexpr uint64_t value() const
This is a hole in the type system and should not be abused.
Definition Alignment.h:77
A special type used by analysis passes to provide an address that identifies that particular analysis...
Definition Analysis.h:29
static LLVM_ABI const char * SyntheticFunctionEntryCount
static LLVM_ABI const char * UnknownBranchWeightsMarker
static LLVM_ABI const char * ValueProfile
static LLVM_ABI const char * FunctionEntryCount
static LLVM_ABI const char * BranchWeights
uint32_t getTagID() const
Return the tag of this operand bundle as an integer.
ArrayRef< Use > Inputs
void DebugInfoCheckFailed(const Twine &Message)
A debug info check failed.
Definition Verifier.cpp:307
VerifierSupport(raw_ostream *OS, const Module &M)
Definition Verifier.cpp:156
bool Broken
Track the brokenness of the module while recursively visiting.
Definition Verifier.cpp:150
void CheckFailed(const Twine &Message, const T1 &V1, const Ts &... Vs)
A check failed (with values to print).
Definition Verifier.cpp:300
bool BrokenDebugInfo
Broken debug info can be "recovered" from by stripping the debug info.
Definition Verifier.cpp:152
LLVMContext & Context
Definition Verifier.cpp:147
bool TreatBrokenDebugInfoAsError
Whether to treat broken debug info as an error.
Definition Verifier.cpp:154
void CheckFailed(const Twine &Message)
A check failed, so printout out the condition and the message.
Definition Verifier.cpp:289
const Module & M
Definition Verifier.cpp:143
const DataLayout & DL
Definition Verifier.cpp:146
void DebugInfoCheckFailed(const Twine &Message, const T1 &V1, const Ts &... Vs)
A debug info check failed (with values to print).
Definition Verifier.cpp:316
const Triple & TT
Definition Verifier.cpp:145
ModuleSlotTracker MST
Definition Verifier.cpp:144