LLVM 23.0.0git
Verifier.cpp
Go to the documentation of this file.
1//===-- Verifier.cpp - Implement the Module Verifier -----------------------==//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file defines the function verifier interface, that can be used for some
10// basic correctness checking of input to the system.
11//
12// Note that this does not provide full `Java style' security and verifications,
13// instead it just tries to ensure that code is well-formed.
14//
15// * Both of a binary operator's parameters are of the same type
16// * Verify that the indices of mem access instructions match other operands
17// * Verify that arithmetic and other things are only performed on first-class
18// types. Verify that shifts & logicals only happen on integrals f.e.
19// * All of the constants in a switch statement are of the correct type
20// * The code is in valid SSA form
21// * It should be illegal to put a label into any other type (like a structure)
22// or to return one. [except constant arrays!]
23// * Only phi nodes can be self referential: 'add i32 %0, %0 ; <int>:0' is bad
24// * PHI nodes must have an entry for each predecessor, with no extras.
25// * PHI nodes must be the first thing in a basic block, all grouped together
26// * All basic blocks should only end with terminator insts, not contain them
27// * The entry node to a function must not have predecessors
28// * All Instructions must be embedded into a basic block
29// * Functions cannot take a void-typed parameter
30// * Verify that a function's argument list agrees with it's declared type.
31// * It is illegal to specify a name for a void value.
32// * It is illegal to have a internal global value with no initializer
33// * It is illegal to have a ret instruction that returns a value that does not
34// agree with the function return value type.
35// * Function call argument types match the function prototype
36// * A landing pad is defined by a landingpad instruction, and can be jumped to
37// only by the unwind edge of an invoke instruction.
38// * A landingpad instruction must be the first non-PHI instruction in the
39// block.
40// * Landingpad instructions must be in a function with a personality function.
41// * Convergence control intrinsics are introduced in ConvergentOperations.rst.
42// The applied restrictions are too numerous to list here.
43// * The convergence entry intrinsic and the loop heart must be the first
44// non-PHI instruction in their respective block. This does not conflict with
45// the landing pads, since these two kinds cannot occur in the same block.
46// * All other things that are tested by asserts spread about the code...
47//
48//===----------------------------------------------------------------------===//
49
50#include "llvm/IR/Verifier.h"
51#include "llvm/ADT/APFloat.h"
52#include "llvm/ADT/APInt.h"
53#include "llvm/ADT/ArrayRef.h"
54#include "llvm/ADT/DenseMap.h"
55#include "llvm/ADT/MapVector.h"
56#include "llvm/ADT/STLExtras.h"
60#include "llvm/ADT/StringRef.h"
61#include "llvm/ADT/Twine.h"
63#include "llvm/IR/Argument.h"
65#include "llvm/IR/Attributes.h"
66#include "llvm/IR/BasicBlock.h"
67#include "llvm/IR/CFG.h"
68#include "llvm/IR/CallingConv.h"
69#include "llvm/IR/Comdat.h"
70#include "llvm/IR/Constant.h"
73#include "llvm/IR/Constants.h"
75#include "llvm/IR/DataLayout.h"
76#include "llvm/IR/DebugInfo.h"
78#include "llvm/IR/DebugLoc.h"
80#include "llvm/IR/Dominators.h"
82#include "llvm/IR/FPEnv.h"
83#include "llvm/IR/Function.h"
84#include "llvm/IR/GCStrategy.h"
86#include "llvm/IR/GlobalAlias.h"
87#include "llvm/IR/GlobalValue.h"
89#include "llvm/IR/InlineAsm.h"
90#include "llvm/IR/InstVisitor.h"
91#include "llvm/IR/InstrTypes.h"
92#include "llvm/IR/Instruction.h"
95#include "llvm/IR/Intrinsics.h"
96#include "llvm/IR/IntrinsicsAArch64.h"
97#include "llvm/IR/IntrinsicsAMDGPU.h"
98#include "llvm/IR/IntrinsicsARM.h"
99#include "llvm/IR/IntrinsicsNVPTX.h"
100#include "llvm/IR/IntrinsicsWebAssembly.h"
101#include "llvm/IR/LLVMContext.h"
103#include "llvm/IR/Metadata.h"
104#include "llvm/IR/Module.h"
106#include "llvm/IR/PassManager.h"
108#include "llvm/IR/Statepoint.h"
109#include "llvm/IR/Type.h"
110#include "llvm/IR/Use.h"
111#include "llvm/IR/User.h"
113#include "llvm/IR/Value.h"
115#include "llvm/Pass.h"
119#include "llvm/Support/Casting.h"
123#include "llvm/Support/ModRef.h"
126#include <algorithm>
127#include <cassert>
128#include <cstdint>
129#include <memory>
130#include <optional>
131#include <string>
132#include <utility>
133
134using namespace llvm;
135
137 "verify-noalias-scope-decl-dom", cl::Hidden, cl::init(false),
138 cl::desc("Ensure that llvm.experimental.noalias.scope.decl for identical "
139 "scopes are not dominating"));
140
143 const Module &M;
145 const Triple &TT;
148
149 /// Track the brokenness of the module while recursively visiting.
150 bool Broken = false;
151 /// Broken debug info can be "recovered" from by stripping the debug info.
152 bool BrokenDebugInfo = false;
153 /// Whether to treat broken debug info as an error.
155
157 : OS(OS), M(M), MST(&M), TT(M.getTargetTriple()), DL(M.getDataLayout()),
158 Context(M.getContext()) {}
159
160private:
161 void Write(const Module *M) {
162 *OS << "; ModuleID = '" << M->getModuleIdentifier() << "'\n";
163 }
164
165 void Write(const Value *V) {
166 if (V)
167 Write(*V);
168 }
169
170 void Write(const Value &V) {
171 if (isa<Instruction>(V)) {
172 V.print(*OS, MST);
173 *OS << '\n';
174 } else {
175 V.printAsOperand(*OS, true, MST);
176 *OS << '\n';
177 }
178 }
179
180 void Write(const DbgRecord *DR) {
181 if (DR) {
182 DR->print(*OS, MST, false);
183 *OS << '\n';
184 }
185 }
186
188 switch (Type) {
190 *OS << "value";
191 break;
193 *OS << "declare";
194 break;
196 *OS << "declare_value";
197 break;
199 *OS << "assign";
200 break;
202 *OS << "end";
203 break;
205 *OS << "any";
206 break;
207 };
208 }
209
210 void Write(const Metadata *MD) {
211 if (!MD)
212 return;
213 MD->print(*OS, MST, &M);
214 *OS << '\n';
215 }
216
217 template <class T> void Write(const MDTupleTypedArrayWrapper<T> &MD) {
218 Write(MD.get());
219 }
220
221 void Write(const NamedMDNode *NMD) {
222 if (!NMD)
223 return;
224 NMD->print(*OS, MST);
225 *OS << '\n';
226 }
227
228 void Write(Type *T) {
229 if (!T)
230 return;
231 *OS << ' ' << *T;
232 }
233
234 void Write(const Comdat *C) {
235 if (!C)
236 return;
237 *OS << *C;
238 }
239
240 void Write(const APInt *AI) {
241 if (!AI)
242 return;
243 *OS << *AI << '\n';
244 }
245
246 void Write(const unsigned i) { *OS << i << '\n'; }
247
248 // NOLINTNEXTLINE(readability-identifier-naming)
249 void Write(const Attribute *A) {
250 if (!A)
251 return;
252 *OS << A->getAsString() << '\n';
253 }
254
255 // NOLINTNEXTLINE(readability-identifier-naming)
256 void Write(const AttributeSet *AS) {
257 if (!AS)
258 return;
259 *OS << AS->getAsString() << '\n';
260 }
261
262 // NOLINTNEXTLINE(readability-identifier-naming)
263 void Write(const AttributeList *AL) {
264 if (!AL)
265 return;
266 AL->print(*OS);
267 }
268
269 void Write(Printable P) { *OS << P << '\n'; }
270
271 template <typename T> void Write(ArrayRef<T> Vs) {
272 for (const T &V : Vs)
273 Write(V);
274 }
275
276 template <typename T1, typename... Ts>
277 void WriteTs(const T1 &V1, const Ts &... Vs) {
278 Write(V1);
279 WriteTs(Vs...);
280 }
281
282 template <typename... Ts> void WriteTs() {}
283
284public:
285 /// A check failed, so printout out the condition and the message.
286 ///
287 /// This provides a nice place to put a breakpoint if you want to see why
288 /// something is not correct.
289 void CheckFailed(const Twine &Message) {
290 if (OS)
291 *OS << Message << '\n';
292 Broken = true;
293 }
294
295 /// A check failed (with values to print).
296 ///
297 /// This calls the Message-only version so that the above is easier to set a
298 /// breakpoint on.
299 template <typename T1, typename... Ts>
300 void CheckFailed(const Twine &Message, const T1 &V1, const Ts &... Vs) {
301 CheckFailed(Message);
302 if (OS)
303 WriteTs(V1, Vs...);
304 }
305
306 /// A debug info check failed.
307 void DebugInfoCheckFailed(const Twine &Message) {
308 if (OS)
309 *OS << Message << '\n';
311 BrokenDebugInfo = true;
312 }
313
314 /// A debug info check failed (with values to print).
315 template <typename T1, typename... Ts>
316 void DebugInfoCheckFailed(const Twine &Message, const T1 &V1,
317 const Ts &... Vs) {
318 DebugInfoCheckFailed(Message);
319 if (OS)
320 WriteTs(V1, Vs...);
321 }
322};
323
324namespace {
325
326class Verifier : public InstVisitor<Verifier>, VerifierSupport {
327 friend class InstVisitor<Verifier>;
328 DominatorTree DT;
329
330 /// When verifying a basic block, keep track of all of the
331 /// instructions we have seen so far.
332 ///
333 /// This allows us to do efficient dominance checks for the case when an
334 /// instruction has an operand that is an instruction in the same block.
335 SmallPtrSet<Instruction *, 16> InstsInThisBlock;
336
337 /// Keep track of the metadata nodes that have been checked already.
339
340 /// Keep track which DISubprogram is attached to which function.
342
343 /// Track all DICompileUnits visited.
345
346 /// The result type for a landingpad.
347 Type *LandingPadResultTy;
348
349 /// Whether we've seen a call to @llvm.localescape in this function
350 /// already.
351 bool SawFrameEscape;
352
353 /// Whether the current function has a DISubprogram attached to it.
354 bool HasDebugInfo = false;
355
356 /// Stores the count of how many objects were passed to llvm.localescape for a
357 /// given function and the largest index passed to llvm.localrecover.
359
360 // Maps catchswitches and cleanuppads that unwind to siblings to the
361 // terminators that indicate the unwind, used to detect cycles therein.
363
364 /// Cache which blocks are in which funclet, if an EH funclet personality is
365 /// in use. Otherwise empty.
366 DenseMap<BasicBlock *, ColorVector> BlockEHFuncletColors;
367
368 /// Cache of constants visited in search of ConstantExprs.
369 SmallPtrSet<const Constant *, 32> ConstantExprVisited;
370
371 /// Cache of declarations of the llvm.experimental.deoptimize.<ty> intrinsic.
372 SmallVector<const Function *, 4> DeoptimizeDeclarations;
373
374 /// Cache of attribute lists verified.
375 SmallPtrSet<const void *, 32> AttributeListsVisited;
376
377 // Verify that this GlobalValue is only used in this module.
378 // This map is used to avoid visiting uses twice. We can arrive at a user
379 // twice, if they have multiple operands. In particular for very large
380 // constant expressions, we can arrive at a particular user many times.
381 SmallPtrSet<const Value *, 32> GlobalValueVisited;
382
383 // Keeps track of duplicate function argument debug info.
385
386 TBAAVerifier TBAAVerifyHelper;
387 ConvergenceVerifier ConvergenceVerifyHelper;
388
389 SmallVector<IntrinsicInst *, 4> NoAliasScopeDecls;
390
391 void checkAtomicMemAccessSize(Type *Ty, const Instruction *I);
392
393public:
394 explicit Verifier(raw_ostream *OS, bool ShouldTreatBrokenDebugInfoAsError,
395 const Module &M)
396 : VerifierSupport(OS, M), LandingPadResultTy(nullptr),
397 SawFrameEscape(false), TBAAVerifyHelper(this) {
398 TreatBrokenDebugInfoAsError = ShouldTreatBrokenDebugInfoAsError;
399 }
400
401 bool hasBrokenDebugInfo() const { return BrokenDebugInfo; }
402
403 bool verify(const Function &F) {
404 llvm::TimeTraceScope timeScope("Verifier");
405 assert(F.getParent() == &M &&
406 "An instance of this class only works with a specific module!");
407
408 // First ensure the function is well-enough formed to compute dominance
409 // information, and directly compute a dominance tree. We don't rely on the
410 // pass manager to provide this as it isolates us from a potentially
411 // out-of-date dominator tree and makes it significantly more complex to run
412 // this code outside of a pass manager.
413
414 // First check that every basic block has a terminator, otherwise we can't
415 // even inspect the CFG.
416 for (const BasicBlock &BB : F) {
417 if (!BB.empty() && BB.back().isTerminator())
418 continue;
419
420 if (OS) {
421 *OS << "Basic Block in function '" << F.getName()
422 << "' does not have terminator!\n";
423 BB.printAsOperand(*OS, true, MST);
424 *OS << "\n";
425 }
426 return false;
427 }
428
429 // FIXME: It's really gross that we have to cast away constness here.
430 if (!F.empty())
431 DT.recalculate(const_cast<Function &>(F));
432
433 auto FailureCB = [this](const Twine &Message) {
434 this->CheckFailed(Message);
435 };
436 ConvergenceVerifyHelper.initialize(OS, FailureCB, F);
437
438 Broken = false;
439 // FIXME: We strip const here because the inst visitor strips const.
440 visit(const_cast<Function &>(F));
441 verifySiblingFuncletUnwinds();
442
443 if (ConvergenceVerifyHelper.sawTokens())
444 ConvergenceVerifyHelper.verify(DT);
445
446 InstsInThisBlock.clear();
447 DebugFnArgs.clear();
448 LandingPadResultTy = nullptr;
449 SawFrameEscape = false;
450 SiblingFuncletInfo.clear();
451 verifyNoAliasScopeDecl();
452 NoAliasScopeDecls.clear();
453
454 return !Broken;
455 }
456
457 /// Verify the module that this instance of \c Verifier was initialized with.
458 bool verify() {
459 Broken = false;
460
461 // Collect all declarations of the llvm.experimental.deoptimize intrinsic.
462 for (const Function &F : M)
463 if (F.getIntrinsicID() == Intrinsic::experimental_deoptimize)
464 DeoptimizeDeclarations.push_back(&F);
465
466 // Now that we've visited every function, verify that we never asked to
467 // recover a frame index that wasn't escaped.
468 verifyFrameRecoverIndices();
469 for (const GlobalVariable &GV : M.globals())
470 visitGlobalVariable(GV);
471
472 for (const GlobalAlias &GA : M.aliases())
473 visitGlobalAlias(GA);
474
475 for (const GlobalIFunc &GI : M.ifuncs())
476 visitGlobalIFunc(GI);
477
478 for (const NamedMDNode &NMD : M.named_metadata())
479 visitNamedMDNode(NMD);
480
481 for (const StringMapEntry<Comdat> &SMEC : M.getComdatSymbolTable())
482 visitComdat(SMEC.getValue());
483
484 visitModuleFlags();
485 visitModuleIdents();
486 visitModuleCommandLines();
487 visitModuleErrnoTBAA();
488
489 verifyCompileUnits();
490
491 verifyDeoptimizeCallingConvs();
492 DISubprogramAttachments.clear();
493 return !Broken;
494 }
495
496private:
497 /// Whether a metadata node is allowed to be, or contain, a DILocation.
498 enum class AreDebugLocsAllowed { No, Yes };
499
500 /// Metadata that should be treated as a range, with slightly different
501 /// requirements.
502 enum class RangeLikeMetadataKind {
503 Range, // MD_range
504 AbsoluteSymbol, // MD_absolute_symbol
505 NoaliasAddrspace // MD_noalias_addrspace
506 };
507
508 // Verification methods...
509 void visitGlobalValue(const GlobalValue &GV);
510 void visitGlobalVariable(const GlobalVariable &GV);
511 void visitGlobalAlias(const GlobalAlias &GA);
512 void visitGlobalIFunc(const GlobalIFunc &GI);
513 void visitAliaseeSubExpr(const GlobalAlias &A, const Constant &C);
514 void visitAliaseeSubExpr(SmallPtrSetImpl<const GlobalAlias *> &Visited,
515 const GlobalAlias &A, const Constant &C);
516 void visitNamedMDNode(const NamedMDNode &NMD);
517 void visitMDNode(const MDNode &MD, AreDebugLocsAllowed AllowLocs);
518 void visitMetadataAsValue(const MetadataAsValue &MD, Function *F);
519 void visitValueAsMetadata(const ValueAsMetadata &MD, Function *F);
520 void visitDIArgList(const DIArgList &AL, Function *F);
521 void visitComdat(const Comdat &C);
522 void visitModuleIdents();
523 void visitModuleCommandLines();
524 void visitModuleErrnoTBAA();
525 void visitModuleFlags();
526 void visitModuleFlag(const MDNode *Op,
527 DenseMap<const MDString *, const MDNode *> &SeenIDs,
528 SmallVectorImpl<const MDNode *> &Requirements);
529 void visitModuleFlagCGProfileEntry(const MDOperand &MDO);
530 void visitFunction(const Function &F);
531 void visitBasicBlock(BasicBlock &BB);
532 void verifyRangeLikeMetadata(const Value &V, const MDNode *Range, Type *Ty,
533 RangeLikeMetadataKind Kind);
534 void visitRangeMetadata(Instruction &I, MDNode *Range, Type *Ty);
535 void visitNoFPClassMetadata(Instruction &I, MDNode *Range, Type *Ty);
536 void visitNoaliasAddrspaceMetadata(Instruction &I, MDNode *Range, Type *Ty);
537 void visitDereferenceableMetadata(Instruction &I, MDNode *MD);
538 void visitNofreeMetadata(Instruction &I, MDNode *MD);
539 void visitProfMetadata(Instruction &I, MDNode *MD);
540 void visitCallStackMetadata(MDNode *MD);
541 void visitMemProfMetadata(Instruction &I, MDNode *MD);
542 void visitCallsiteMetadata(Instruction &I, MDNode *MD);
543 void visitCalleeTypeMetadata(Instruction &I, MDNode *MD);
544 void visitDIAssignIDMetadata(Instruction &I, MDNode *MD);
545 void visitMMRAMetadata(Instruction &I, MDNode *MD);
546 void visitAnnotationMetadata(MDNode *Annotation);
547 void visitAliasScopeMetadata(const MDNode *MD);
548 void visitAliasScopeListMetadata(const MDNode *MD);
549 void visitAccessGroupMetadata(const MDNode *MD);
550 void visitCapturesMetadata(Instruction &I, const MDNode *Captures);
551 void visitAllocTokenMetadata(Instruction &I, MDNode *MD);
552 void visitInlineHistoryMetadata(Instruction &I, MDNode *MD);
553
554 template <class Ty> bool isValidMetadataArray(const MDTuple &N);
555#define HANDLE_SPECIALIZED_MDNODE_LEAF(CLASS) void visit##CLASS(const CLASS &N);
556#include "llvm/IR/Metadata.def"
557 void visitDIType(const DIType &N);
558 void visitDIScope(const DIScope &N);
559 void visitDIVariable(const DIVariable &N);
560 void visitDILexicalBlockBase(const DILexicalBlockBase &N);
561 void visitDITemplateParameter(const DITemplateParameter &N);
562
563 void visitTemplateParams(const MDNode &N, const Metadata &RawParams);
564
565 void visit(DbgLabelRecord &DLR);
566 void visit(DbgVariableRecord &DVR);
567 // InstVisitor overrides...
568 using InstVisitor<Verifier>::visit;
569 void visitDbgRecords(Instruction &I);
570 void visit(Instruction &I);
571
572 void visitTruncInst(TruncInst &I);
573 void visitZExtInst(ZExtInst &I);
574 void visitSExtInst(SExtInst &I);
575 void visitFPTruncInst(FPTruncInst &I);
576 void visitFPExtInst(FPExtInst &I);
577 void visitFPToUIInst(FPToUIInst &I);
578 void visitFPToSIInst(FPToSIInst &I);
579 void visitUIToFPInst(UIToFPInst &I);
580 void visitSIToFPInst(SIToFPInst &I);
581 void visitIntToPtrInst(IntToPtrInst &I);
582 void checkPtrToAddr(Type *SrcTy, Type *DestTy, const Value &V);
583 void visitPtrToAddrInst(PtrToAddrInst &I);
584 void visitPtrToIntInst(PtrToIntInst &I);
585 void visitBitCastInst(BitCastInst &I);
586 void visitAddrSpaceCastInst(AddrSpaceCastInst &I);
587 void visitPHINode(PHINode &PN);
588 void visitCallBase(CallBase &Call);
589 void visitUnaryOperator(UnaryOperator &U);
590 void visitBinaryOperator(BinaryOperator &B);
591 void visitICmpInst(ICmpInst &IC);
592 void visitFCmpInst(FCmpInst &FC);
593 void visitExtractElementInst(ExtractElementInst &EI);
594 void visitInsertElementInst(InsertElementInst &EI);
595 void visitShuffleVectorInst(ShuffleVectorInst &EI);
596 void visitVAArgInst(VAArgInst &VAA) { visitInstruction(VAA); }
597 void visitCallInst(CallInst &CI);
598 void visitInvokeInst(InvokeInst &II);
599 void visitGetElementPtrInst(GetElementPtrInst &GEP);
600 void visitLoadInst(LoadInst &LI);
601 void visitStoreInst(StoreInst &SI);
602 void verifyDominatesUse(Instruction &I, unsigned i);
603 void visitInstruction(Instruction &I);
604 void visitTerminator(Instruction &I);
605 void visitCondBrInst(CondBrInst &BI);
606 void visitReturnInst(ReturnInst &RI);
607 void visitSwitchInst(SwitchInst &SI);
608 void visitIndirectBrInst(IndirectBrInst &BI);
609 void visitCallBrInst(CallBrInst &CBI);
610 void visitSelectInst(SelectInst &SI);
611 void visitUserOp1(Instruction &I);
612 void visitUserOp2(Instruction &I) { visitUserOp1(I); }
613 void visitIntrinsicCall(Intrinsic::ID ID, CallBase &Call);
614 void visitConstrainedFPIntrinsic(ConstrainedFPIntrinsic &FPI);
615 void visitVPIntrinsic(VPIntrinsic &VPI);
616 void visitDbgLabelIntrinsic(StringRef Kind, DbgLabelInst &DLI);
617 void visitAtomicCmpXchgInst(AtomicCmpXchgInst &CXI);
618 void visitAtomicRMWInst(AtomicRMWInst &RMWI);
619 void visitFenceInst(FenceInst &FI);
620 void visitAllocaInst(AllocaInst &AI);
621 void visitExtractValueInst(ExtractValueInst &EVI);
622 void visitInsertValueInst(InsertValueInst &IVI);
623 void visitEHPadPredecessors(Instruction &I);
624 void visitLandingPadInst(LandingPadInst &LPI);
625 void visitResumeInst(ResumeInst &RI);
626 void visitCatchPadInst(CatchPadInst &CPI);
627 void visitCatchReturnInst(CatchReturnInst &CatchReturn);
628 void visitCleanupPadInst(CleanupPadInst &CPI);
629 void visitFuncletPadInst(FuncletPadInst &FPI);
630 void visitCatchSwitchInst(CatchSwitchInst &CatchSwitch);
631 void visitCleanupReturnInst(CleanupReturnInst &CRI);
632
633 void verifySwiftErrorCall(CallBase &Call, const Value *SwiftErrorVal);
634 void verifySwiftErrorValue(const Value *SwiftErrorVal);
635 void verifyTailCCMustTailAttrs(const AttrBuilder &Attrs, StringRef Context);
636 void verifyMustTailCall(CallInst &CI);
637 bool verifyAttributeCount(AttributeList Attrs, unsigned Params);
638 void verifyAttributeTypes(AttributeSet Attrs, const Value *V);
639 void verifyParameterAttrs(AttributeSet Attrs, Type *Ty, const Value *V);
640 void checkUnsignedBaseTenFuncAttr(AttributeList Attrs, StringRef Attr,
641 const Value *V);
642 void verifyFunctionAttrs(FunctionType *FT, AttributeList Attrs,
643 const Value *V, bool IsIntrinsic, bool IsInlineAsm);
644 void verifyFunctionMetadata(ArrayRef<std::pair<unsigned, MDNode *>> MDs);
645 void verifyUnknownProfileMetadata(MDNode *MD);
646 void visitConstantExprsRecursively(const Constant *EntryC);
647 void visitConstantExpr(const ConstantExpr *CE);
648 void visitConstantPtrAuth(const ConstantPtrAuth *CPA);
649 void verifyInlineAsmCall(const CallBase &Call);
650 void verifyStatepoint(const CallBase &Call);
651 void verifyFrameRecoverIndices();
652 void verifySiblingFuncletUnwinds();
653
654 void verifyFragmentExpression(const DbgVariableRecord &I);
655 template <typename ValueOrMetadata>
656 void verifyFragmentExpression(const DIVariable &V,
658 ValueOrMetadata *Desc);
659 void verifyFnArgs(const DbgVariableRecord &DVR);
660 void verifyNotEntryValue(const DbgVariableRecord &I);
661
662 /// Module-level debug info verification...
663 void verifyCompileUnits();
664
665 /// Module-level verification that all @llvm.experimental.deoptimize
666 /// declarations share the same calling convention.
667 void verifyDeoptimizeCallingConvs();
668
669 void verifyAttachedCallBundle(const CallBase &Call,
670 const OperandBundleUse &BU);
671
672 /// Verify the llvm.experimental.noalias.scope.decl declarations
673 void verifyNoAliasScopeDecl();
674};
675
676} // end anonymous namespace
677
678/// We know that cond should be true, if not print an error message.
679#define Check(C, ...) \
680 do { \
681 if (!(C)) { \
682 CheckFailed(__VA_ARGS__); \
683 return; \
684 } \
685 } while (false)
686
687/// We know that a debug info condition should be true, if not print
688/// an error message.
689#define CheckDI(C, ...) \
690 do { \
691 if (!(C)) { \
692 DebugInfoCheckFailed(__VA_ARGS__); \
693 return; \
694 } \
695 } while (false)
696
697void Verifier::visitDbgRecords(Instruction &I) {
698 if (!I.DebugMarker)
699 return;
700 CheckDI(I.DebugMarker->MarkedInstr == &I,
701 "Instruction has invalid DebugMarker", &I);
702 CheckDI(!isa<PHINode>(&I) || !I.hasDbgRecords(),
703 "PHI Node must not have any attached DbgRecords", &I);
704 for (DbgRecord &DR : I.getDbgRecordRange()) {
705 CheckDI(DR.getMarker() == I.DebugMarker,
706 "DbgRecord had invalid DebugMarker", &I, &DR);
707 if (auto *Loc =
709 visitMDNode(*Loc, AreDebugLocsAllowed::Yes);
710 if (auto *DVR = dyn_cast<DbgVariableRecord>(&DR)) {
711 visit(*DVR);
712 // These have to appear after `visit` for consistency with existing
713 // intrinsic behaviour.
714 verifyFragmentExpression(*DVR);
715 verifyNotEntryValue(*DVR);
716 } else if (auto *DLR = dyn_cast<DbgLabelRecord>(&DR)) {
717 visit(*DLR);
718 }
719 }
720}
721
722void Verifier::visit(Instruction &I) {
723 visitDbgRecords(I);
724 for (unsigned i = 0, e = I.getNumOperands(); i != e; ++i)
725 Check(I.getOperand(i) != nullptr, "Operand is null", &I);
727}
728
729// Helper to iterate over indirect users. By returning false, the callback can ask to stop traversing further.
730static void forEachUser(const Value *User,
732 llvm::function_ref<bool(const Value *)> Callback) {
733 if (!Visited.insert(User).second)
734 return;
735
737 while (!WorkList.empty()) {
738 const Value *Cur = WorkList.pop_back_val();
739 if (!Visited.insert(Cur).second)
740 continue;
741 if (Callback(Cur))
742 append_range(WorkList, Cur->materialized_users());
743 }
744}
745
746void Verifier::visitGlobalValue(const GlobalValue &GV) {
748 "Global is external, but doesn't have external or weak linkage!", &GV);
749
750 if (const GlobalObject *GO = dyn_cast<GlobalObject>(&GV)) {
751 if (const MDNode *Associated =
752 GO->getMetadata(LLVMContext::MD_associated)) {
753 Check(Associated->getNumOperands() == 1,
754 "associated metadata must have one operand", &GV, Associated);
755 const Metadata *Op = Associated->getOperand(0).get();
756 Check(Op, "associated metadata must have a global value", GO, Associated);
757
758 const auto *VM = dyn_cast_or_null<ValueAsMetadata>(Op);
759 Check(VM, "associated metadata must be ValueAsMetadata", GO, Associated);
760 if (VM) {
761 Check(isa<PointerType>(VM->getValue()->getType()),
762 "associated value must be pointer typed", GV, Associated);
763
764 const Value *Stripped = VM->getValue()->stripPointerCastsAndAliases();
765 Check(isa<GlobalObject>(Stripped) || isa<Constant>(Stripped),
766 "associated metadata must point to a GlobalObject", GO, Stripped);
767 Check(Stripped != GO,
768 "global values should not associate to themselves", GO,
769 Associated);
770 }
771 }
772
773 // FIXME: Why is getMetadata on GlobalValue protected?
774 if (const MDNode *AbsoluteSymbol =
775 GO->getMetadata(LLVMContext::MD_absolute_symbol)) {
776 verifyRangeLikeMetadata(*GO, AbsoluteSymbol,
777 DL.getIntPtrType(GO->getType()),
778 RangeLikeMetadataKind::AbsoluteSymbol);
779 }
780
781 if (GO->hasMetadata(LLVMContext::MD_implicit_ref)) {
782 Check(!GO->isDeclaration(),
783 "ref metadata must not be placed on a declaration", GO);
784
786 GO->getMetadata(LLVMContext::MD_implicit_ref, MDs);
787 for (const MDNode *MD : MDs) {
788 Check(MD->getNumOperands() == 1, "ref metadata must have one operand",
789 &GV, MD);
790 const Metadata *Op = MD->getOperand(0).get();
791 const auto *VM = dyn_cast_or_null<ValueAsMetadata>(Op);
792 Check(VM, "ref metadata must be ValueAsMetadata", GO, MD);
793 if (VM) {
794 Check(isa<PointerType>(VM->getValue()->getType()),
795 "ref value must be pointer typed", GV, MD);
796
797 const Value *Stripped = VM->getValue()->stripPointerCastsAndAliases();
798 Check(isa<GlobalObject>(Stripped) || isa<Constant>(Stripped),
799 "ref metadata must point to a GlobalObject", GO, Stripped);
800 Check(Stripped != GO, "values should not reference themselves", GO,
801 MD);
802 }
803 }
804 }
805 }
806
808 "Only global variables can have appending linkage!", &GV);
809
810 if (GV.hasAppendingLinkage()) {
811 const GlobalVariable *GVar = dyn_cast<GlobalVariable>(&GV);
812 Check(GVar && GVar->getValueType()->isArrayTy(),
813 "Only global arrays can have appending linkage!", GVar);
814 }
815
816 if (GV.isDeclarationForLinker())
817 Check(!GV.hasComdat(), "Declaration may not be in a Comdat!", &GV);
818
819 if (GV.hasDLLExportStorageClass()) {
821 "dllexport GlobalValue must have default or protected visibility",
822 &GV);
823 }
824 if (GV.hasDLLImportStorageClass()) {
826 "dllimport GlobalValue must have default visibility", &GV);
827 Check(!GV.isDSOLocal(), "GlobalValue with DLLImport Storage is dso_local!",
828 &GV);
829
830 Check((GV.isDeclaration() &&
833 "Global is marked as dllimport, but not external", &GV);
834 }
835
836 if (GV.isImplicitDSOLocal())
837 Check(GV.isDSOLocal(),
838 "GlobalValue with local linkage or non-default "
839 "visibility must be dso_local!",
840 &GV);
841
842 forEachUser(&GV, GlobalValueVisited, [&](const Value *V) -> bool {
843 if (const Instruction *I = dyn_cast<Instruction>(V)) {
844 if (!I->getParent() || !I->getParent()->getParent())
845 CheckFailed("Global is referenced by parentless instruction!", &GV, &M,
846 I);
847 else if (I->getParent()->getParent()->getParent() != &M)
848 CheckFailed("Global is referenced in a different module!", &GV, &M, I,
849 I->getParent()->getParent(),
850 I->getParent()->getParent()->getParent());
851 return false;
852 } else if (const Function *F = dyn_cast<Function>(V)) {
853 if (F->getParent() != &M)
854 CheckFailed("Global is used by function in a different module", &GV, &M,
855 F, F->getParent());
856 return false;
857 }
858 return true;
859 });
860}
861
862void Verifier::visitGlobalVariable(const GlobalVariable &GV) {
863 Type *GVType = GV.getValueType();
864
865 if (MaybeAlign A = GV.getAlign()) {
866 Check(A->value() <= Value::MaximumAlignment,
867 "huge alignment values are unsupported", &GV);
868 }
869
870 if (GV.hasInitializer()) {
871 Check(GV.getInitializer()->getType() == GVType,
872 "Global variable initializer type does not match global "
873 "variable type!",
874 &GV);
876 "Global variable initializer must be sized", &GV);
877 visitConstantExprsRecursively(GV.getInitializer());
878 // If the global has common linkage, it must have a zero initializer and
879 // cannot be constant.
880 if (GV.hasCommonLinkage()) {
882 "'common' global must have a zero initializer!", &GV);
883 Check(!GV.isConstant(), "'common' global may not be marked constant!",
884 &GV);
885 Check(!GV.hasComdat(), "'common' global may not be in a Comdat!", &GV);
886 }
887 }
888
889 if (GV.hasName() && (GV.getName() == "llvm.global_ctors" ||
890 GV.getName() == "llvm.global_dtors")) {
892 "invalid linkage for intrinsic global variable", &GV);
894 "invalid uses of intrinsic global variable", &GV);
895
896 // Don't worry about emitting an error for it not being an array,
897 // visitGlobalValue will complain on appending non-array.
898 if (ArrayType *ATy = dyn_cast<ArrayType>(GVType)) {
899 StructType *STy = dyn_cast<StructType>(ATy->getElementType());
900 PointerType *FuncPtrTy =
901 PointerType::get(Context, DL.getProgramAddressSpace());
902 Check(STy && (STy->getNumElements() == 2 || STy->getNumElements() == 3) &&
903 STy->getTypeAtIndex(0u)->isIntegerTy(32) &&
904 STy->getTypeAtIndex(1) == FuncPtrTy,
905 "wrong type for intrinsic global variable", &GV);
906 Check(STy->getNumElements() == 3,
907 "the third field of the element type is mandatory, "
908 "specify ptr null to migrate from the obsoleted 2-field form");
909 Type *ETy = STy->getTypeAtIndex(2);
910 Check(ETy->isPointerTy(), "wrong type for intrinsic global variable",
911 &GV);
912 }
913 }
914
915 if (GV.hasName() && (GV.getName() == "llvm.used" ||
916 GV.getName() == "llvm.compiler.used")) {
918 "invalid linkage for intrinsic global variable", &GV);
920 "invalid uses of intrinsic global variable", &GV);
921
922 if (ArrayType *ATy = dyn_cast<ArrayType>(GVType)) {
923 PointerType *PTy = dyn_cast<PointerType>(ATy->getElementType());
924 Check(PTy, "wrong type for intrinsic global variable", &GV);
925 if (GV.hasInitializer()) {
926 const Constant *Init = GV.getInitializer();
927 const ConstantArray *InitArray = dyn_cast<ConstantArray>(Init);
928 Check(InitArray, "wrong initializer for intrinsic global variable",
929 Init);
930 for (Value *Op : InitArray->operands()) {
931 Value *V = Op->stripPointerCasts();
934 Twine("invalid ") + GV.getName() + " member", V);
935 Check(V->hasName(),
936 Twine("members of ") + GV.getName() + " must be named", V);
937 }
938 }
939 }
940 }
941
942 // Visit any debug info attachments.
944 GV.getMetadata(LLVMContext::MD_dbg, MDs);
945 for (auto *MD : MDs) {
946 if (auto *GVE = dyn_cast<DIGlobalVariableExpression>(MD))
947 visitDIGlobalVariableExpression(*GVE);
948 else
949 CheckDI(false, "!dbg attachment of global variable must be a "
950 "DIGlobalVariableExpression");
951 }
952
953 // Scalable vectors cannot be global variables, since we don't know
954 // the runtime size.
955 Check(!GVType->isScalableTy(), "Globals cannot contain scalable types", &GV);
956
957 // Check if it is or contains a target extension type that disallows being
958 // used as a global.
960 "Global @" + GV.getName() + " has illegal target extension type",
961 GVType);
962
963 // Check that the the address space can hold all bits of the type, recognized
964 // by an access in the address space being able to reach all bytes of the
965 // type.
966 Check(!GVType->isSized() ||
967 isUIntN(DL.getAddressSizeInBits(GV.getAddressSpace()),
968 GV.getGlobalSize(DL)),
969 "Global variable is too large to fit into the address space", &GV,
970 GVType);
971
972 if (!GV.hasInitializer()) {
973 visitGlobalValue(GV);
974 return;
975 }
976
977 // Walk any aggregate initializers looking for bitcasts between address spaces
978 visitConstantExprsRecursively(GV.getInitializer());
979
980 visitGlobalValue(GV);
981}
982
983void Verifier::visitAliaseeSubExpr(const GlobalAlias &GA, const Constant &C) {
984 SmallPtrSet<const GlobalAlias*, 4> Visited;
985 Visited.insert(&GA);
986 visitAliaseeSubExpr(Visited, GA, C);
987}
988
989void Verifier::visitAliaseeSubExpr(SmallPtrSetImpl<const GlobalAlias*> &Visited,
990 const GlobalAlias &GA, const Constant &C) {
993 cast<GlobalValue>(C).hasAvailableExternallyLinkage(),
994 "available_externally alias must point to available_externally "
995 "global value",
996 &GA);
997 }
998 if (const auto *GV = dyn_cast<GlobalValue>(&C)) {
1000 Check(!GV->isDeclarationForLinker(), "Alias must point to a definition",
1001 &GA);
1002 }
1003
1004 if (const auto *GA2 = dyn_cast<GlobalAlias>(GV)) {
1005 Check(Visited.insert(GA2).second, "Aliases cannot form a cycle", &GA);
1006
1007 Check(!GA2->isInterposable(),
1008 "Alias cannot point to an interposable alias", &GA);
1009 } else {
1010 // Only continue verifying subexpressions of GlobalAliases.
1011 // Do not recurse into global initializers.
1012 return;
1013 }
1014 }
1015
1016 if (const auto *CE = dyn_cast<ConstantExpr>(&C))
1017 visitConstantExprsRecursively(CE);
1018
1019 for (const Use &U : C.operands()) {
1020 Value *V = &*U;
1021 if (const auto *GA2 = dyn_cast<GlobalAlias>(V))
1022 visitAliaseeSubExpr(Visited, GA, *GA2->getAliasee());
1023 else if (const auto *C2 = dyn_cast<Constant>(V))
1024 visitAliaseeSubExpr(Visited, GA, *C2);
1025 }
1026}
1027
1028void Verifier::visitGlobalAlias(const GlobalAlias &GA) {
1030 "Alias should have private, internal, linkonce, weak, linkonce_odr, "
1031 "weak_odr, external, or available_externally linkage!",
1032 &GA);
1033 const Constant *Aliasee = GA.getAliasee();
1034 Check(Aliasee, "Aliasee cannot be NULL!", &GA);
1035 Check(GA.getType() == Aliasee->getType(),
1036 "Alias and aliasee types should match!", &GA);
1037
1038 Check(isa<GlobalValue>(Aliasee) || isa<ConstantExpr>(Aliasee),
1039 "Aliasee should be either GlobalValue or ConstantExpr", &GA);
1040
1041 visitAliaseeSubExpr(GA, *Aliasee);
1042
1043 visitGlobalValue(GA);
1044}
1045
1046void Verifier::visitGlobalIFunc(const GlobalIFunc &GI) {
1047 visitGlobalValue(GI);
1048
1050 GI.getAllMetadata(MDs);
1051 for (const auto &I : MDs) {
1052 CheckDI(I.first != LLVMContext::MD_dbg,
1053 "an ifunc may not have a !dbg attachment", &GI);
1054 Check(I.first != LLVMContext::MD_prof,
1055 "an ifunc may not have a !prof attachment", &GI);
1056 visitMDNode(*I.second, AreDebugLocsAllowed::No);
1057 }
1058
1060 "IFunc should have private, internal, linkonce, weak, linkonce_odr, "
1061 "weak_odr, or external linkage!",
1062 &GI);
1063 // Pierce through ConstantExprs and GlobalAliases and check that the resolver
1064 // is a Function definition.
1065 const Function *Resolver = GI.getResolverFunction();
1066 Check(Resolver, "IFunc must have a Function resolver", &GI);
1067 Check(!Resolver->isDeclarationForLinker(),
1068 "IFunc resolver must be a definition", &GI);
1069
1070 // Check that the immediate resolver operand (prior to any bitcasts) has the
1071 // correct type.
1072 const Type *ResolverTy = GI.getResolver()->getType();
1073
1075 "IFunc resolver must return a pointer", &GI);
1076
1077 Check(ResolverTy == PointerType::get(Context, GI.getAddressSpace()),
1078 "IFunc resolver has incorrect type", &GI);
1079}
1080
1081void Verifier::visitNamedMDNode(const NamedMDNode &NMD) {
1082 // There used to be various other llvm.dbg.* nodes, but we don't support
1083 // upgrading them and we want to reserve the namespace for future uses.
1084 if (NMD.getName().starts_with("llvm.dbg."))
1085 CheckDI(NMD.getName() == "llvm.dbg.cu",
1086 "unrecognized named metadata node in the llvm.dbg namespace", &NMD);
1087 for (const MDNode *MD : NMD.operands()) {
1088 if (NMD.getName() == "llvm.dbg.cu")
1089 CheckDI(MD && isa<DICompileUnit>(MD), "invalid compile unit", &NMD, MD);
1090
1091 if (!MD)
1092 continue;
1093
1094 visitMDNode(*MD, AreDebugLocsAllowed::Yes);
1095 }
1096}
1097
1098void Verifier::visitMDNode(const MDNode &MD, AreDebugLocsAllowed AllowLocs) {
1099 // Only visit each node once. Metadata can be mutually recursive, so this
1100 // avoids infinite recursion here, as well as being an optimization.
1101 if (!MDNodes.insert(&MD).second)
1102 return;
1103
1104 Check(&MD.getContext() == &Context,
1105 "MDNode context does not match Module context!", &MD);
1106
1107 switch (MD.getMetadataID()) {
1108 default:
1109 llvm_unreachable("Invalid MDNode subclass");
1110 case Metadata::MDTupleKind:
1111 break;
1112#define HANDLE_SPECIALIZED_MDNODE_LEAF(CLASS) \
1113 case Metadata::CLASS##Kind: \
1114 visit##CLASS(cast<CLASS>(MD)); \
1115 break;
1116#include "llvm/IR/Metadata.def"
1117 }
1118
1119 for (const Metadata *Op : MD.operands()) {
1120 if (!Op)
1121 continue;
1122 Check(!isa<LocalAsMetadata>(Op), "Invalid operand for global metadata!",
1123 &MD, Op);
1124 CheckDI(!isa<DILocation>(Op) || AllowLocs == AreDebugLocsAllowed::Yes,
1125 "DILocation not allowed within this metadata node", &MD, Op);
1126 if (auto *N = dyn_cast<MDNode>(Op)) {
1127 visitMDNode(*N, AllowLocs);
1128 continue;
1129 }
1130 if (auto *V = dyn_cast<ValueAsMetadata>(Op)) {
1131 visitValueAsMetadata(*V, nullptr);
1132 continue;
1133 }
1134 }
1135
1136 // Check llvm.loop.estimated_trip_count.
1137 if (MD.getNumOperands() > 0 &&
1139 Check(MD.getNumOperands() == 2, "Expected two operands", &MD);
1141 Check(Count && Count->getType()->isIntegerTy() &&
1142 cast<IntegerType>(Count->getType())->getBitWidth() <= 32,
1143 "Expected second operand to be an integer constant of type i32 or "
1144 "smaller",
1145 &MD);
1146 }
1147
1148 // Check these last, so we diagnose problems in operands first.
1149 Check(!MD.isTemporary(), "Expected no forward declarations!", &MD);
1150 Check(MD.isResolved(), "All nodes should be resolved!", &MD);
1151}
1152
1153void Verifier::visitValueAsMetadata(const ValueAsMetadata &MD, Function *F) {
1154 Check(MD.getValue(), "Expected valid value", &MD);
1155 Check(!MD.getValue()->getType()->isMetadataTy(),
1156 "Unexpected metadata round-trip through values", &MD, MD.getValue());
1157
1158 auto *L = dyn_cast<LocalAsMetadata>(&MD);
1159 if (!L)
1160 return;
1161
1162 Check(F, "function-local metadata used outside a function", L);
1163
1164 // If this was an instruction, bb, or argument, verify that it is in the
1165 // function that we expect.
1166 Function *ActualF = nullptr;
1167 if (Instruction *I = dyn_cast<Instruction>(L->getValue())) {
1168 Check(I->getParent(), "function-local metadata not in basic block", L, I);
1169 ActualF = I->getParent()->getParent();
1170 } else if (BasicBlock *BB = dyn_cast<BasicBlock>(L->getValue()))
1171 ActualF = BB->getParent();
1172 else if (Argument *A = dyn_cast<Argument>(L->getValue()))
1173 ActualF = A->getParent();
1174 assert(ActualF && "Unimplemented function local metadata case!");
1175
1176 Check(ActualF == F, "function-local metadata used in wrong function", L);
1177}
1178
1179void Verifier::visitDIArgList(const DIArgList &AL, Function *F) {
1180 for (const ValueAsMetadata *VAM : AL.getArgs())
1181 visitValueAsMetadata(*VAM, F);
1182}
1183
1184void Verifier::visitMetadataAsValue(const MetadataAsValue &MDV, Function *F) {
1185 Metadata *MD = MDV.getMetadata();
1186 if (auto *N = dyn_cast<MDNode>(MD)) {
1187 visitMDNode(*N, AreDebugLocsAllowed::No);
1188 return;
1189 }
1190
1191 // Only visit each node once. Metadata can be mutually recursive, so this
1192 // avoids infinite recursion here, as well as being an optimization.
1193 if (!MDNodes.insert(MD).second)
1194 return;
1195
1196 if (auto *V = dyn_cast<ValueAsMetadata>(MD))
1197 visitValueAsMetadata(*V, F);
1198
1199 if (auto *AL = dyn_cast<DIArgList>(MD))
1200 visitDIArgList(*AL, F);
1201}
1202
1203static bool isType(const Metadata *MD) { return !MD || isa<DIType>(MD); }
1204static bool isScope(const Metadata *MD) { return !MD || isa<DIScope>(MD); }
1205static bool isDINode(const Metadata *MD) { return !MD || isa<DINode>(MD); }
1206static bool isMDTuple(const Metadata *MD) { return !MD || isa<MDTuple>(MD); }
1207
1208void Verifier::visitDILocation(const DILocation &N) {
1209 CheckDI(N.getRawScope() && isa<DILocalScope>(N.getRawScope()),
1210 "location requires a valid scope", &N, N.getRawScope());
1211 if (auto *IA = N.getRawInlinedAt())
1212 CheckDI(isa<DILocation>(IA), "inlined-at should be a location", &N, IA);
1213 if (auto *SP = dyn_cast<DISubprogram>(N.getRawScope()))
1214 CheckDI(SP->isDefinition(), "scope points into the type hierarchy", &N);
1215}
1216
1217void Verifier::visitGenericDINode(const GenericDINode &N) {
1218 CheckDI(N.getTag(), "invalid tag", &N);
1219}
1220
1221void Verifier::visitDIScope(const DIScope &N) {
1222 if (auto *F = N.getRawFile())
1223 CheckDI(isa<DIFile>(F), "invalid file", &N, F);
1224}
1225
1226void Verifier::visitDIType(const DIType &N) {
1227 CheckDI(isScope(N.getRawScope()), "invalid scope", &N, N.getRawScope());
1228 visitDIScope(N);
1229 CheckDI(N.getRawFile() || N.getLine() == 0, "line specified with no file", &N,
1230 N.getLine());
1231}
1232
1233void Verifier::visitDISubrangeType(const DISubrangeType &N) {
1234 visitDIType(N);
1235
1236 CheckDI(N.getTag() == dwarf::DW_TAG_subrange_type, "invalid tag", &N);
1237 auto *BaseType = N.getRawBaseType();
1238 CheckDI(!BaseType || isType(BaseType), "BaseType must be a type");
1239 auto *LBound = N.getRawLowerBound();
1240 CheckDI(!LBound || isa<ConstantAsMetadata>(LBound) ||
1241 isa<DIVariable>(LBound) || isa<DIExpression>(LBound) ||
1242 isa<DIDerivedType>(LBound),
1243 "LowerBound must be signed constant or DIVariable or DIExpression or "
1244 "DIDerivedType",
1245 &N);
1246 auto *UBound = N.getRawUpperBound();
1247 CheckDI(!UBound || isa<ConstantAsMetadata>(UBound) ||
1248 isa<DIVariable>(UBound) || isa<DIExpression>(UBound) ||
1249 isa<DIDerivedType>(UBound),
1250 "UpperBound must be signed constant or DIVariable or DIExpression or "
1251 "DIDerivedType",
1252 &N);
1253 auto *Stride = N.getRawStride();
1254 CheckDI(!Stride || isa<ConstantAsMetadata>(Stride) ||
1255 isa<DIVariable>(Stride) || isa<DIExpression>(Stride),
1256 "Stride must be signed constant or DIVariable or DIExpression", &N);
1257 auto *Bias = N.getRawBias();
1258 CheckDI(!Bias || isa<ConstantAsMetadata>(Bias) || isa<DIVariable>(Bias) ||
1259 isa<DIExpression>(Bias),
1260 "Bias must be signed constant or DIVariable or DIExpression", &N);
1261 // Subrange types currently only support constant size.
1262 auto *Size = N.getRawSizeInBits();
1264 "SizeInBits must be a constant");
1265}
1266
1267void Verifier::visitDISubrange(const DISubrange &N) {
1268 CheckDI(N.getTag() == dwarf::DW_TAG_subrange_type, "invalid tag", &N);
1269 CheckDI(!N.getRawCountNode() || !N.getRawUpperBound(),
1270 "Subrange can have any one of count or upperBound", &N);
1271 auto *CBound = N.getRawCountNode();
1272 CheckDI(!CBound || isa<ConstantAsMetadata>(CBound) ||
1273 isa<DIVariable>(CBound) || isa<DIExpression>(CBound),
1274 "Count must be signed constant or DIVariable or DIExpression", &N);
1275 auto Count = N.getCount();
1277 cast<ConstantInt *>(Count)->getSExtValue() >= -1,
1278 "invalid subrange count", &N);
1279 auto *LBound = N.getRawLowerBound();
1280 CheckDI(!LBound || isa<ConstantAsMetadata>(LBound) ||
1281 isa<DIVariable>(LBound) || isa<DIExpression>(LBound),
1282 "LowerBound must be signed constant or DIVariable or DIExpression",
1283 &N);
1284 auto *UBound = N.getRawUpperBound();
1285 CheckDI(!UBound || isa<ConstantAsMetadata>(UBound) ||
1286 isa<DIVariable>(UBound) || isa<DIExpression>(UBound),
1287 "UpperBound must be signed constant or DIVariable or DIExpression",
1288 &N);
1289 auto *Stride = N.getRawStride();
1290 CheckDI(!Stride || isa<ConstantAsMetadata>(Stride) ||
1291 isa<DIVariable>(Stride) || isa<DIExpression>(Stride),
1292 "Stride must be signed constant or DIVariable or DIExpression", &N);
1293}
1294
1295void Verifier::visitDIGenericSubrange(const DIGenericSubrange &N) {
1296 CheckDI(N.getTag() == dwarf::DW_TAG_generic_subrange, "invalid tag", &N);
1297 CheckDI(!N.getRawCountNode() || !N.getRawUpperBound(),
1298 "GenericSubrange can have any one of count or upperBound", &N);
1299 auto *CBound = N.getRawCountNode();
1300 CheckDI(!CBound || isa<DIVariable>(CBound) || isa<DIExpression>(CBound),
1301 "Count must be signed constant or DIVariable or DIExpression", &N);
1302 auto *LBound = N.getRawLowerBound();
1303 CheckDI(LBound, "GenericSubrange must contain lowerBound", &N);
1304 CheckDI(isa<DIVariable>(LBound) || isa<DIExpression>(LBound),
1305 "LowerBound must be signed constant or DIVariable or DIExpression",
1306 &N);
1307 auto *UBound = N.getRawUpperBound();
1308 CheckDI(!UBound || isa<DIVariable>(UBound) || isa<DIExpression>(UBound),
1309 "UpperBound must be signed constant or DIVariable or DIExpression",
1310 &N);
1311 auto *Stride = N.getRawStride();
1312 CheckDI(Stride, "GenericSubrange must contain stride", &N);
1313 CheckDI(isa<DIVariable>(Stride) || isa<DIExpression>(Stride),
1314 "Stride must be signed constant or DIVariable or DIExpression", &N);
1315}
1316
1317void Verifier::visitDIEnumerator(const DIEnumerator &N) {
1318 CheckDI(N.getTag() == dwarf::DW_TAG_enumerator, "invalid tag", &N);
1319}
1320
1321void Verifier::visitDIBasicType(const DIBasicType &N) {
1322 visitDIType(N);
1323
1324 CheckDI(N.getTag() == dwarf::DW_TAG_base_type ||
1325 N.getTag() == dwarf::DW_TAG_unspecified_type ||
1326 N.getTag() == dwarf::DW_TAG_string_type,
1327 "invalid tag", &N);
1328 // Basic types currently only support constant size.
1329 auto *Size = N.getRawSizeInBits();
1331 "SizeInBits must be a constant");
1332}
1333
1334void Verifier::visitDIFixedPointType(const DIFixedPointType &N) {
1335 visitDIBasicType(N);
1336
1337 CheckDI(N.getTag() == dwarf::DW_TAG_base_type, "invalid tag", &N);
1338 CheckDI(N.getEncoding() == dwarf::DW_ATE_signed_fixed ||
1339 N.getEncoding() == dwarf::DW_ATE_unsigned_fixed,
1340 "invalid encoding", &N);
1344 "invalid kind", &N);
1346 N.getFactorRaw() == 0,
1347 "factor should be 0 for rationals", &N);
1349 (N.getNumeratorRaw() == 0 && N.getDenominatorRaw() == 0),
1350 "numerator and denominator should be 0 for non-rationals", &N);
1351}
1352
1353void Verifier::visitDIStringType(const DIStringType &N) {
1354 visitDIType(N);
1355
1356 CheckDI(N.getTag() == dwarf::DW_TAG_string_type, "invalid tag", &N);
1357 CheckDI(!(N.isBigEndian() && N.isLittleEndian()), "has conflicting flags",
1358 &N);
1359}
1360
1361void Verifier::visitDIDerivedType(const DIDerivedType &N) {
1362 // Common type checks.
1363 visitDIType(N);
1364
1365 CheckDI(N.getTag() == dwarf::DW_TAG_typedef ||
1366 N.getTag() == dwarf::DW_TAG_pointer_type ||
1367 N.getTag() == dwarf::DW_TAG_ptr_to_member_type ||
1368 N.getTag() == dwarf::DW_TAG_reference_type ||
1369 N.getTag() == dwarf::DW_TAG_rvalue_reference_type ||
1370 N.getTag() == dwarf::DW_TAG_const_type ||
1371 N.getTag() == dwarf::DW_TAG_immutable_type ||
1372 N.getTag() == dwarf::DW_TAG_volatile_type ||
1373 N.getTag() == dwarf::DW_TAG_restrict_type ||
1374 N.getTag() == dwarf::DW_TAG_atomic_type ||
1375 N.getTag() == dwarf::DW_TAG_LLVM_ptrauth_type ||
1376 N.getTag() == dwarf::DW_TAG_member ||
1377 (N.getTag() == dwarf::DW_TAG_variable && N.isStaticMember()) ||
1378 N.getTag() == dwarf::DW_TAG_inheritance ||
1379 N.getTag() == dwarf::DW_TAG_friend ||
1380 N.getTag() == dwarf::DW_TAG_set_type ||
1381 N.getTag() == dwarf::DW_TAG_template_alias,
1382 "invalid tag", &N);
1383 if (N.getTag() == dwarf::DW_TAG_ptr_to_member_type) {
1384 CheckDI(isType(N.getRawExtraData()), "invalid pointer to member type", &N,
1385 N.getRawExtraData());
1386 } else if (N.getTag() == dwarf::DW_TAG_template_alias) {
1387 CheckDI(isMDTuple(N.getRawExtraData()), "invalid template parameters", &N,
1388 N.getRawExtraData());
1389 } else if (N.getTag() == dwarf::DW_TAG_inheritance ||
1390 N.getTag() == dwarf::DW_TAG_member ||
1391 N.getTag() == dwarf::DW_TAG_variable) {
1392 auto *ExtraData = N.getRawExtraData();
1393 auto IsValidExtraData = [&]() {
1394 if (ExtraData == nullptr)
1395 return true;
1396 if (isa<ConstantAsMetadata>(ExtraData) || isa<MDString>(ExtraData) ||
1397 isa<DIObjCProperty>(ExtraData))
1398 return true;
1399 if (auto *Tuple = dyn_cast<MDTuple>(ExtraData)) {
1400 if (Tuple->getNumOperands() != 1)
1401 return false;
1402 return isa_and_nonnull<ConstantAsMetadata>(Tuple->getOperand(0).get());
1403 }
1404 return false;
1405 };
1406 CheckDI(IsValidExtraData(),
1407 "extraData must be ConstantAsMetadata, MDString, DIObjCProperty, "
1408 "or MDTuple with single ConstantAsMetadata operand",
1409 &N, ExtraData);
1410 }
1411
1412 if (N.getTag() == dwarf::DW_TAG_set_type) {
1413 if (auto *T = N.getRawBaseType()) {
1417 CheckDI(
1418 (Enum && Enum->getTag() == dwarf::DW_TAG_enumeration_type) ||
1419 (Subrange && Subrange->getTag() == dwarf::DW_TAG_subrange_type) ||
1420 (Basic && (Basic->getEncoding() == dwarf::DW_ATE_unsigned ||
1421 Basic->getEncoding() == dwarf::DW_ATE_signed ||
1422 Basic->getEncoding() == dwarf::DW_ATE_unsigned_char ||
1423 Basic->getEncoding() == dwarf::DW_ATE_signed_char ||
1424 Basic->getEncoding() == dwarf::DW_ATE_boolean)),
1425 "invalid set base type", &N, T);
1426 }
1427 }
1428
1429 CheckDI(isType(N.getRawBaseType()), "invalid base type", &N,
1430 N.getRawBaseType());
1431
1432 if (N.getDWARFAddressSpace()) {
1433 CheckDI(N.getTag() == dwarf::DW_TAG_pointer_type ||
1434 N.getTag() == dwarf::DW_TAG_reference_type ||
1435 N.getTag() == dwarf::DW_TAG_rvalue_reference_type,
1436 "DWARF address space only applies to pointer or reference types",
1437 &N);
1438 }
1439
1440 auto *Size = N.getRawSizeInBits();
1443 "SizeInBits must be a constant or DIVariable or DIExpression");
1444}
1445
1446/// Detect mutually exclusive flags.
1447static bool hasConflictingReferenceFlags(unsigned Flags) {
1448 return ((Flags & DINode::FlagLValueReference) &&
1449 (Flags & DINode::FlagRValueReference)) ||
1450 ((Flags & DINode::FlagTypePassByValue) &&
1451 (Flags & DINode::FlagTypePassByReference));
1452}
1453
1454void Verifier::visitTemplateParams(const MDNode &N, const Metadata &RawParams) {
1455 auto *Params = dyn_cast<MDTuple>(&RawParams);
1456 CheckDI(Params, "invalid template params", &N, &RawParams);
1457 for (Metadata *Op : Params->operands()) {
1458 CheckDI(Op && isa<DITemplateParameter>(Op), "invalid template parameter",
1459 &N, Params, Op);
1460 }
1461}
1462
1463void Verifier::visitDICompositeType(const DICompositeType &N) {
1464 // Common type checks.
1465 visitDIType(N);
1466
1467 CheckDI(N.getTag() == dwarf::DW_TAG_array_type ||
1468 N.getTag() == dwarf::DW_TAG_structure_type ||
1469 N.getTag() == dwarf::DW_TAG_union_type ||
1470 N.getTag() == dwarf::DW_TAG_enumeration_type ||
1471 N.getTag() == dwarf::DW_TAG_class_type ||
1472 N.getTag() == dwarf::DW_TAG_variant_part ||
1473 N.getTag() == dwarf::DW_TAG_variant ||
1474 N.getTag() == dwarf::DW_TAG_namelist,
1475 "invalid tag", &N);
1476
1477 CheckDI(isType(N.getRawBaseType()), "invalid base type", &N,
1478 N.getRawBaseType());
1479
1480 CheckDI(!N.getRawElements() || isa<MDTuple>(N.getRawElements()),
1481 "invalid composite elements", &N, N.getRawElements());
1482 CheckDI(isType(N.getRawVTableHolder()), "invalid vtable holder", &N,
1483 N.getRawVTableHolder());
1485 "invalid reference flags", &N);
1486 unsigned DIBlockByRefStruct = 1 << 4;
1487 CheckDI((N.getFlags() & DIBlockByRefStruct) == 0,
1488 "DIBlockByRefStruct on DICompositeType is no longer supported", &N);
1489 CheckDI(llvm::all_of(N.getElements(), [](const DINode *N) { return N; }),
1490 "DISubprogram contains null entry in `elements` field", &N);
1491
1492 if (N.isVector()) {
1493 const DINodeArray Elements = N.getElements();
1494 CheckDI(Elements.size() == 1 &&
1495 Elements[0]->getTag() == dwarf::DW_TAG_subrange_type,
1496 "invalid vector, expected one element of type subrange", &N);
1497 }
1498
1499 if (auto *Params = N.getRawTemplateParams())
1500 visitTemplateParams(N, *Params);
1501
1502 if (auto *D = N.getRawDiscriminator()) {
1503 CheckDI(isa<DIDerivedType>(D) && N.getTag() == dwarf::DW_TAG_variant_part,
1504 "discriminator can only appear on variant part");
1505 }
1506
1507 if (N.getRawDataLocation()) {
1508 CheckDI(N.getTag() == dwarf::DW_TAG_array_type,
1509 "dataLocation can only appear in array type");
1510 }
1511
1512 if (N.getRawAssociated()) {
1513 CheckDI(N.getTag() == dwarf::DW_TAG_array_type,
1514 "associated can only appear in array type");
1515 }
1516
1517 if (N.getRawAllocated()) {
1518 CheckDI(N.getTag() == dwarf::DW_TAG_array_type,
1519 "allocated can only appear in array type");
1520 }
1521
1522 if (N.getRawRank()) {
1523 CheckDI(N.getTag() == dwarf::DW_TAG_array_type,
1524 "rank can only appear in array type");
1525 }
1526
1527 if (N.getTag() == dwarf::DW_TAG_array_type) {
1528 CheckDI(N.getRawBaseType(), "array types must have a base type", &N);
1529 }
1530
1531 auto *Size = N.getRawSizeInBits();
1534 "SizeInBits must be a constant or DIVariable or DIExpression");
1535}
1536
1537void Verifier::visitDISubroutineType(const DISubroutineType &N) {
1538 visitDIType(N);
1539 CheckDI(N.getTag() == dwarf::DW_TAG_subroutine_type, "invalid tag", &N);
1540 if (auto *Types = N.getRawTypeArray()) {
1541 CheckDI(isa<MDTuple>(Types), "invalid composite elements", &N, Types);
1542 for (Metadata *Ty : N.getTypeArray()->operands()) {
1543 CheckDI(isType(Ty), "invalid subroutine type ref", &N, Types, Ty);
1544 }
1545 }
1547 "invalid reference flags", &N);
1548}
1549
1550void Verifier::visitDIFile(const DIFile &N) {
1551 CheckDI(N.getTag() == dwarf::DW_TAG_file_type, "invalid tag", &N);
1552 std::optional<DIFile::ChecksumInfo<StringRef>> Checksum = N.getChecksum();
1553 if (Checksum) {
1554 CheckDI(Checksum->Kind <= DIFile::ChecksumKind::CSK_Last,
1555 "invalid checksum kind", &N);
1556 size_t Size;
1557 switch (Checksum->Kind) {
1558 case DIFile::CSK_MD5:
1559 Size = 32;
1560 break;
1561 case DIFile::CSK_SHA1:
1562 Size = 40;
1563 break;
1564 case DIFile::CSK_SHA256:
1565 Size = 64;
1566 break;
1567 }
1568 CheckDI(Checksum->Value.size() == Size, "invalid checksum length", &N);
1569 CheckDI(Checksum->Value.find_if_not(llvm::isHexDigit) == StringRef::npos,
1570 "invalid checksum", &N);
1571 }
1572}
1573
1574void Verifier::visitDICompileUnit(const DICompileUnit &N) {
1575 CheckDI(N.isDistinct(), "compile units must be distinct", &N);
1576 CheckDI(N.getTag() == dwarf::DW_TAG_compile_unit, "invalid tag", &N);
1577
1578 // Don't bother verifying the compilation directory or producer string
1579 // as those could be empty.
1580 CheckDI(N.getRawFile() && isa<DIFile>(N.getRawFile()), "invalid file", &N,
1581 N.getRawFile());
1582 CheckDI(!N.getFile()->getFilename().empty(), "invalid filename", &N,
1583 N.getFile());
1584
1585 CheckDI((N.getEmissionKind() <= DICompileUnit::LastEmissionKind),
1586 "invalid emission kind", &N);
1587
1588 if (auto *Array = N.getRawEnumTypes()) {
1589 CheckDI(isa<MDTuple>(Array), "invalid enum list", &N, Array);
1590 for (Metadata *Op : N.getEnumTypes()->operands()) {
1592 CheckDI(Enum && Enum->getTag() == dwarf::DW_TAG_enumeration_type,
1593 "invalid enum type", &N, N.getEnumTypes(), Op);
1594 CheckDI(!Enum->getScope() || !isa<DILocalScope>(Enum->getScope()),
1595 "function-local enum in a DICompileUnit's enum list", &N,
1596 N.getEnumTypes(), Op);
1597 }
1598 }
1599 if (auto *Array = N.getRawRetainedTypes()) {
1600 CheckDI(isa<MDTuple>(Array), "invalid retained type list", &N, Array);
1601 for (Metadata *Op : N.getRetainedTypes()->operands()) {
1602 CheckDI(
1603 Op && (isa<DIType>(Op) || (isa<DISubprogram>(Op) &&
1604 !cast<DISubprogram>(Op)->isDefinition())),
1605 "invalid retained type", &N, Op);
1606 }
1607 }
1608 if (auto *Array = N.getRawGlobalVariables()) {
1609 CheckDI(isa<MDTuple>(Array), "invalid global variable list", &N, Array);
1610 for (Metadata *Op : N.getGlobalVariables()->operands()) {
1612 "invalid global variable ref", &N, Op);
1613 }
1614 }
1615 if (auto *Array = N.getRawImportedEntities()) {
1616 CheckDI(isa<MDTuple>(Array), "invalid imported entity list", &N, Array);
1617 for (Metadata *Op : N.getImportedEntities()->operands()) {
1619 CheckDI(IE, "invalid imported entity ref", &N, Op);
1621 "function-local imports are not allowed in a DICompileUnit's "
1622 "imported entities list",
1623 &N, Op);
1624 }
1625 }
1626 if (auto *Array = N.getRawMacros()) {
1627 CheckDI(isa<MDTuple>(Array), "invalid macro list", &N, Array);
1628 for (Metadata *Op : N.getMacros()->operands()) {
1629 CheckDI(Op && isa<DIMacroNode>(Op), "invalid macro ref", &N, Op);
1630 }
1631 }
1632 CUVisited.insert(&N);
1633}
1634
1635void Verifier::visitDISubprogram(const DISubprogram &N) {
1636 CheckDI(N.getTag() == dwarf::DW_TAG_subprogram, "invalid tag", &N);
1637 CheckDI(isScope(N.getRawScope()), "invalid scope", &N, N.getRawScope());
1638 if (auto *F = N.getRawFile())
1639 CheckDI(isa<DIFile>(F), "invalid file", &N, F);
1640 else
1641 CheckDI(N.getLine() == 0, "line specified with no file", &N, N.getLine());
1642 if (auto *T = N.getRawType())
1643 CheckDI(isa<DISubroutineType>(T), "invalid subroutine type", &N, T);
1644 CheckDI(isType(N.getRawContainingType()), "invalid containing type", &N,
1645 N.getRawContainingType());
1646 if (auto *Params = N.getRawTemplateParams())
1647 visitTemplateParams(N, *Params);
1648 if (auto *S = N.getRawDeclaration())
1649 CheckDI(isa<DISubprogram>(S) && !cast<DISubprogram>(S)->isDefinition(),
1650 "invalid subprogram declaration", &N, S);
1651 if (auto *RawNode = N.getRawRetainedNodes()) {
1652 auto *Node = dyn_cast<MDTuple>(RawNode);
1653 CheckDI(Node, "invalid retained nodes list", &N, RawNode);
1654
1655 DenseMap<unsigned, DILocalVariable *> Args;
1656 for (Metadata *Op : Node->operands()) {
1657 CheckDI(Op, "nullptr in retained nodes", &N, Node);
1658
1659 auto True = [](const Metadata *) { return true; };
1660 auto False = [](const Metadata *) { return false; };
1661 bool IsTypeCorrect = DISubprogram::visitRetainedNode<bool>(
1662 Op, True, True, True, True, False);
1663 CheckDI(IsTypeCorrect,
1664 "invalid retained nodes, expected DILocalVariable, DILabel, "
1665 "DIImportedEntity or DIType",
1666 &N, Node, Op);
1667
1668 auto *RetainedNode = cast<DINode>(Op);
1669 auto *RetainedNodeScope = dyn_cast_or_null<DILocalScope>(
1671 CheckDI(RetainedNodeScope,
1672 "invalid retained nodes, retained node is not local", &N, Node,
1673 RetainedNode);
1674
1675 DISubprogram *RetainedNodeSP = RetainedNodeScope->getSubprogram();
1676 DICompileUnit *RetainedNodeUnit =
1677 RetainedNodeSP ? RetainedNodeSP->getUnit() : nullptr;
1678 CheckDI(
1679 RetainedNodeSP == &N,
1680 "invalid retained nodes, retained node does not belong to subprogram",
1681 &N, Node, RetainedNode, RetainedNodeScope, RetainedNodeSP,
1682 RetainedNodeUnit);
1683
1684 auto *DV = dyn_cast<DILocalVariable>(RetainedNode);
1685 if (!DV)
1686 continue;
1687 if (unsigned ArgNum = DV->getArg()) {
1688 auto [ArgI, Inserted] = Args.insert({ArgNum, DV});
1689 CheckDI(Inserted || DV == ArgI->second,
1690 "invalid retained nodes, more than one local variable with the "
1691 "same argument index",
1692 &N, N.getUnit(), Node, RetainedNode, Args[ArgNum]);
1693 }
1694 }
1695 }
1697 "invalid reference flags", &N);
1698
1699 auto *Unit = N.getRawUnit();
1700 if (N.isDefinition()) {
1701 // Subprogram definitions (not part of the type hierarchy).
1702 CheckDI(N.isDistinct(), "subprogram definitions must be distinct", &N);
1703 CheckDI(Unit, "subprogram definitions must have a compile unit", &N);
1704 CheckDI(isa<DICompileUnit>(Unit), "invalid unit type", &N, Unit);
1705 // There's no good way to cross the CU boundary to insert a nested
1706 // DISubprogram definition in one CU into a type defined in another CU.
1707 auto *CT = dyn_cast_or_null<DICompositeType>(N.getRawScope());
1708 if (CT && CT->getRawIdentifier() &&
1709 M.getContext().isODRUniquingDebugTypes())
1710 CheckDI(N.getDeclaration(),
1711 "definition subprograms cannot be nested within DICompositeType "
1712 "when enabling ODR",
1713 &N);
1714 } else {
1715 // Subprogram declarations (part of the type hierarchy).
1716 CheckDI(!Unit, "subprogram declarations must not have a compile unit", &N);
1717 CheckDI(!N.getRawDeclaration(),
1718 "subprogram declaration must not have a declaration field");
1719 }
1720
1721 if (auto *RawThrownTypes = N.getRawThrownTypes()) {
1722 auto *ThrownTypes = dyn_cast<MDTuple>(RawThrownTypes);
1723 CheckDI(ThrownTypes, "invalid thrown types list", &N, RawThrownTypes);
1724 for (Metadata *Op : ThrownTypes->operands())
1725 CheckDI(Op && isa<DIType>(Op), "invalid thrown type", &N, ThrownTypes,
1726 Op);
1727 }
1728
1729 if (N.areAllCallsDescribed())
1730 CheckDI(N.isDefinition(),
1731 "DIFlagAllCallsDescribed must be attached to a definition");
1732}
1733
1734void Verifier::visitDILexicalBlockBase(const DILexicalBlockBase &N) {
1735 CheckDI(N.getTag() == dwarf::DW_TAG_lexical_block, "invalid tag", &N);
1736 CheckDI(N.getRawScope() && isa<DILocalScope>(N.getRawScope()),
1737 "invalid local scope", &N, N.getRawScope());
1738 if (auto *SP = dyn_cast<DISubprogram>(N.getRawScope()))
1739 CheckDI(SP->isDefinition(), "scope points into the type hierarchy", &N);
1740}
1741
1742void Verifier::visitDILexicalBlock(const DILexicalBlock &N) {
1743 visitDILexicalBlockBase(N);
1744
1745 CheckDI(N.getLine() || !N.getColumn(),
1746 "cannot have column info without line info", &N);
1747}
1748
1749void Verifier::visitDILexicalBlockFile(const DILexicalBlockFile &N) {
1750 visitDILexicalBlockBase(N);
1751}
1752
1753void Verifier::visitDICommonBlock(const DICommonBlock &N) {
1754 CheckDI(N.getTag() == dwarf::DW_TAG_common_block, "invalid tag", &N);
1755 if (auto *S = N.getRawScope())
1756 CheckDI(isa<DIScope>(S), "invalid scope ref", &N, S);
1757 if (auto *S = N.getRawDecl())
1758 CheckDI(isa<DIGlobalVariable>(S), "invalid declaration", &N, S);
1759}
1760
1761void Verifier::visitDINamespace(const DINamespace &N) {
1762 CheckDI(N.getTag() == dwarf::DW_TAG_namespace, "invalid tag", &N);
1763 if (auto *S = N.getRawScope())
1764 CheckDI(isa<DIScope>(S), "invalid scope ref", &N, S);
1765}
1766
1767void Verifier::visitDIMacro(const DIMacro &N) {
1768 CheckDI(N.getMacinfoType() == dwarf::DW_MACINFO_define ||
1769 N.getMacinfoType() == dwarf::DW_MACINFO_undef,
1770 "invalid macinfo type", &N);
1771 CheckDI(!N.getName().empty(), "anonymous macro", &N);
1772 if (!N.getValue().empty()) {
1773 assert(N.getValue().data()[0] != ' ' && "Macro value has a space prefix");
1774 }
1775}
1776
1777void Verifier::visitDIMacroFile(const DIMacroFile &N) {
1778 CheckDI(N.getMacinfoType() == dwarf::DW_MACINFO_start_file,
1779 "invalid macinfo type", &N);
1780 if (auto *F = N.getRawFile())
1781 CheckDI(isa<DIFile>(F), "invalid file", &N, F);
1782
1783 if (auto *Array = N.getRawElements()) {
1784 CheckDI(isa<MDTuple>(Array), "invalid macro list", &N, Array);
1785 for (Metadata *Op : N.getElements()->operands()) {
1786 CheckDI(Op && isa<DIMacroNode>(Op), "invalid macro ref", &N, Op);
1787 }
1788 }
1789}
1790
1791void Verifier::visitDIModule(const DIModule &N) {
1792 CheckDI(N.getTag() == dwarf::DW_TAG_module, "invalid tag", &N);
1793 CheckDI(!N.getName().empty(), "anonymous module", &N);
1794}
1795
1796void Verifier::visitDITemplateParameter(const DITemplateParameter &N) {
1797 CheckDI(isType(N.getRawType()), "invalid type ref", &N, N.getRawType());
1798}
1799
1800void Verifier::visitDITemplateTypeParameter(const DITemplateTypeParameter &N) {
1801 visitDITemplateParameter(N);
1802
1803 CheckDI(N.getTag() == dwarf::DW_TAG_template_type_parameter, "invalid tag",
1804 &N);
1805}
1806
1807void Verifier::visitDITemplateValueParameter(
1808 const DITemplateValueParameter &N) {
1809 visitDITemplateParameter(N);
1810
1811 CheckDI(N.getTag() == dwarf::DW_TAG_template_value_parameter ||
1812 N.getTag() == dwarf::DW_TAG_GNU_template_template_param ||
1813 N.getTag() == dwarf::DW_TAG_GNU_template_parameter_pack,
1814 "invalid tag", &N);
1815}
1816
1817void Verifier::visitDIVariable(const DIVariable &N) {
1818 if (auto *S = N.getRawScope())
1819 CheckDI(isa<DIScope>(S), "invalid scope", &N, S);
1820 if (auto *F = N.getRawFile())
1821 CheckDI(isa<DIFile>(F), "invalid file", &N, F);
1822}
1823
1824void Verifier::visitDIGlobalVariable(const DIGlobalVariable &N) {
1825 // Checks common to all variables.
1826 visitDIVariable(N);
1827
1828 CheckDI(N.getTag() == dwarf::DW_TAG_variable, "invalid tag", &N);
1829 CheckDI(isType(N.getRawType()), "invalid type ref", &N, N.getRawType());
1830 // Check only if the global variable is not an extern
1831 if (N.isDefinition())
1832 CheckDI(N.getType(), "missing global variable type", &N);
1833 if (auto *Member = N.getRawStaticDataMemberDeclaration()) {
1835 "invalid static data member declaration", &N, Member);
1836 }
1837}
1838
1839void Verifier::visitDILocalVariable(const DILocalVariable &N) {
1840 // Checks common to all variables.
1841 visitDIVariable(N);
1842
1843 CheckDI(isType(N.getRawType()), "invalid type ref", &N, N.getRawType());
1844 CheckDI(N.getTag() == dwarf::DW_TAG_variable, "invalid tag", &N);
1845 CheckDI(N.getRawScope() && isa<DILocalScope>(N.getRawScope()),
1846 "local variable requires a valid scope", &N, N.getRawScope());
1847 if (auto Ty = N.getType())
1848 CheckDI(!isa<DISubroutineType>(Ty), "invalid type", &N, N.getType());
1849}
1850
1851void Verifier::visitDIAssignID(const DIAssignID &N) {
1852 CheckDI(!N.getNumOperands(), "DIAssignID has no arguments", &N);
1853 CheckDI(N.isDistinct(), "DIAssignID must be distinct", &N);
1854}
1855
1856void Verifier::visitDILabel(const DILabel &N) {
1857 if (auto *S = N.getRawScope())
1858 CheckDI(isa<DIScope>(S), "invalid scope", &N, S);
1859 if (auto *F = N.getRawFile())
1860 CheckDI(isa<DIFile>(F), "invalid file", &N, F);
1861
1862 CheckDI(N.getTag() == dwarf::DW_TAG_label, "invalid tag", &N);
1863 CheckDI(N.getRawScope() && isa<DILocalScope>(N.getRawScope()),
1864 "label requires a valid scope", &N, N.getRawScope());
1865}
1866
1867void Verifier::visitDIExpression(const DIExpression &N) {
1868 CheckDI(N.isValid(), "invalid expression", &N);
1869}
1870
1871void Verifier::visitDIGlobalVariableExpression(
1872 const DIGlobalVariableExpression &GVE) {
1873 CheckDI(GVE.getVariable(), "missing variable");
1874 if (auto *Var = GVE.getVariable())
1875 visitDIGlobalVariable(*Var);
1876 if (auto *Expr = GVE.getExpression()) {
1877 visitDIExpression(*Expr);
1878 if (auto Fragment = Expr->getFragmentInfo())
1879 verifyFragmentExpression(*GVE.getVariable(), *Fragment, &GVE);
1880 }
1881}
1882
1883void Verifier::visitDIObjCProperty(const DIObjCProperty &N) {
1884 CheckDI(N.getTag() == dwarf::DW_TAG_APPLE_property, "invalid tag", &N);
1885 if (auto *T = N.getRawType())
1886 CheckDI(isType(T), "invalid type ref", &N, T);
1887 if (auto *F = N.getRawFile())
1888 CheckDI(isa<DIFile>(F), "invalid file", &N, F);
1889}
1890
1891void Verifier::visitDIImportedEntity(const DIImportedEntity &N) {
1892 CheckDI(N.getTag() == dwarf::DW_TAG_imported_module ||
1893 N.getTag() == dwarf::DW_TAG_imported_declaration,
1894 "invalid tag", &N);
1895 if (auto *S = N.getRawScope())
1896 CheckDI(isa<DIScope>(S), "invalid scope for imported entity", &N, S);
1897 CheckDI(isDINode(N.getRawEntity()), "invalid imported entity", &N,
1898 N.getRawEntity());
1899}
1900
1901void Verifier::visitComdat(const Comdat &C) {
1902 // In COFF the Module is invalid if the GlobalValue has private linkage.
1903 // Entities with private linkage don't have entries in the symbol table.
1904 if (TT.isOSBinFormatCOFF())
1905 if (const GlobalValue *GV = M.getNamedValue(C.getName()))
1906 Check(!GV->hasPrivateLinkage(), "comdat global value has private linkage",
1907 GV);
1908}
1909
1910void Verifier::visitModuleIdents() {
1911 const NamedMDNode *Idents = M.getNamedMetadata("llvm.ident");
1912 if (!Idents)
1913 return;
1914
1915 // llvm.ident takes a list of metadata entry. Each entry has only one string.
1916 // Scan each llvm.ident entry and make sure that this requirement is met.
1917 for (const MDNode *N : Idents->operands()) {
1918 Check(N->getNumOperands() == 1,
1919 "incorrect number of operands in llvm.ident metadata", N);
1920 Check(dyn_cast_or_null<MDString>(N->getOperand(0)),
1921 ("invalid value for llvm.ident metadata entry operand"
1922 "(the operand should be a string)"),
1923 N->getOperand(0));
1924 }
1925}
1926
1927void Verifier::visitModuleCommandLines() {
1928 const NamedMDNode *CommandLines = M.getNamedMetadata("llvm.commandline");
1929 if (!CommandLines)
1930 return;
1931
1932 // llvm.commandline takes a list of metadata entry. Each entry has only one
1933 // string. Scan each llvm.commandline entry and make sure that this
1934 // requirement is met.
1935 for (const MDNode *N : CommandLines->operands()) {
1936 Check(N->getNumOperands() == 1,
1937 "incorrect number of operands in llvm.commandline metadata", N);
1938 Check(dyn_cast_or_null<MDString>(N->getOperand(0)),
1939 ("invalid value for llvm.commandline metadata entry operand"
1940 "(the operand should be a string)"),
1941 N->getOperand(0));
1942 }
1943}
1944
1945void Verifier::visitModuleErrnoTBAA() {
1946 const NamedMDNode *ErrnoTBAA = M.getNamedMetadata("llvm.errno.tbaa");
1947 if (!ErrnoTBAA)
1948 return;
1949
1950 Check(ErrnoTBAA->getNumOperands() >= 1,
1951 "llvm.errno.tbaa must have at least one operand", ErrnoTBAA);
1952
1953 for (const MDNode *N : ErrnoTBAA->operands())
1954 TBAAVerifyHelper.visitTBAAMetadata(nullptr, N);
1955}
1956
1957void Verifier::visitModuleFlags() {
1958 const NamedMDNode *Flags = M.getModuleFlagsMetadata();
1959 if (!Flags) return;
1960
1961 // Scan each flag, and track the flags and requirements.
1962 DenseMap<const MDString*, const MDNode*> SeenIDs;
1963 SmallVector<const MDNode*, 16> Requirements;
1964 uint64_t PAuthABIPlatform = -1;
1965 uint64_t PAuthABIVersion = -1;
1966 for (const MDNode *MDN : Flags->operands()) {
1967 visitModuleFlag(MDN, SeenIDs, Requirements);
1968 if (MDN->getNumOperands() != 3)
1969 continue;
1970 if (const auto *FlagName = dyn_cast_or_null<MDString>(MDN->getOperand(1))) {
1971 if (FlagName->getString() == "aarch64-elf-pauthabi-platform") {
1972 if (const auto *PAP =
1974 PAuthABIPlatform = PAP->getZExtValue();
1975 } else if (FlagName->getString() == "aarch64-elf-pauthabi-version") {
1976 if (const auto *PAV =
1978 PAuthABIVersion = PAV->getZExtValue();
1979 }
1980 }
1981 }
1982
1983 if ((PAuthABIPlatform == uint64_t(-1)) != (PAuthABIVersion == uint64_t(-1)))
1984 CheckFailed("either both or no 'aarch64-elf-pauthabi-platform' and "
1985 "'aarch64-elf-pauthabi-version' module flags must be present");
1986
1987 // Validate that the requirements in the module are valid.
1988 for (const MDNode *Requirement : Requirements) {
1989 const MDString *Flag = cast<MDString>(Requirement->getOperand(0));
1990 const Metadata *ReqValue = Requirement->getOperand(1);
1991
1992 const MDNode *Op = SeenIDs.lookup(Flag);
1993 if (!Op) {
1994 CheckFailed("invalid requirement on flag, flag is not present in module",
1995 Flag);
1996 continue;
1997 }
1998
1999 if (Op->getOperand(2) != ReqValue) {
2000 CheckFailed(("invalid requirement on flag, "
2001 "flag does not have the required value"),
2002 Flag);
2003 continue;
2004 }
2005 }
2006}
2007
2008void
2009Verifier::visitModuleFlag(const MDNode *Op,
2010 DenseMap<const MDString *, const MDNode *> &SeenIDs,
2011 SmallVectorImpl<const MDNode *> &Requirements) {
2012 // Each module flag should have three arguments, the merge behavior (a
2013 // constant int), the flag ID (an MDString), and the value.
2014 Check(Op->getNumOperands() == 3,
2015 "incorrect number of operands in module flag", Op);
2016 Module::ModFlagBehavior MFB;
2017 if (!Module::isValidModFlagBehavior(Op->getOperand(0), MFB)) {
2019 "invalid behavior operand in module flag (expected constant integer)",
2020 Op->getOperand(0));
2021 Check(false,
2022 "invalid behavior operand in module flag (unexpected constant)",
2023 Op->getOperand(0));
2024 }
2025 MDString *ID = dyn_cast_or_null<MDString>(Op->getOperand(1));
2026 Check(ID, "invalid ID operand in module flag (expected metadata string)",
2027 Op->getOperand(1));
2028
2029 // Check the values for behaviors with additional requirements.
2030 switch (MFB) {
2031 case Module::Error:
2032 case Module::Warning:
2033 case Module::Override:
2034 // These behavior types accept any value.
2035 break;
2036
2037 case Module::Min: {
2038 auto *V = mdconst::dyn_extract_or_null<ConstantInt>(Op->getOperand(2));
2039 Check(V && V->getValue().isNonNegative(),
2040 "invalid value for 'min' module flag (expected constant non-negative "
2041 "integer)",
2042 Op->getOperand(2));
2043 break;
2044 }
2045
2046 case Module::Max: {
2048 "invalid value for 'max' module flag (expected constant integer)",
2049 Op->getOperand(2));
2050 break;
2051 }
2052
2053 case Module::Require: {
2054 // The value should itself be an MDNode with two operands, a flag ID (an
2055 // MDString), and a value.
2056 MDNode *Value = dyn_cast<MDNode>(Op->getOperand(2));
2057 Check(Value && Value->getNumOperands() == 2,
2058 "invalid value for 'require' module flag (expected metadata pair)",
2059 Op->getOperand(2));
2060 Check(isa<MDString>(Value->getOperand(0)),
2061 ("invalid value for 'require' module flag "
2062 "(first value operand should be a string)"),
2063 Value->getOperand(0));
2064
2065 // Append it to the list of requirements, to check once all module flags are
2066 // scanned.
2067 Requirements.push_back(Value);
2068 break;
2069 }
2070
2071 case Module::Append:
2072 case Module::AppendUnique: {
2073 // These behavior types require the operand be an MDNode.
2074 Check(isa<MDNode>(Op->getOperand(2)),
2075 "invalid value for 'append'-type module flag "
2076 "(expected a metadata node)",
2077 Op->getOperand(2));
2078 break;
2079 }
2080 }
2081
2082 // Unless this is a "requires" flag, check the ID is unique.
2083 if (MFB != Module::Require) {
2084 bool Inserted = SeenIDs.insert(std::make_pair(ID, Op)).second;
2085 Check(Inserted,
2086 "module flag identifiers must be unique (or of 'require' type)", ID);
2087 }
2088
2089 if (ID->getString() == "wchar_size") {
2090 ConstantInt *Value
2092 Check(Value, "wchar_size metadata requires constant integer argument");
2093 }
2094
2095 if (ID->getString() == "Linker Options") {
2096 // If the llvm.linker.options named metadata exists, we assume that the
2097 // bitcode reader has upgraded the module flag. Otherwise the flag might
2098 // have been created by a client directly.
2099 Check(M.getNamedMetadata("llvm.linker.options"),
2100 "'Linker Options' named metadata no longer supported");
2101 }
2102
2103 if (ID->getString() == "SemanticInterposition") {
2104 ConstantInt *Value =
2106 Check(Value,
2107 "SemanticInterposition metadata requires constant integer argument");
2108 }
2109
2110 if (ID->getString() == "CG Profile") {
2111 for (const MDOperand &MDO : cast<MDNode>(Op->getOperand(2))->operands())
2112 visitModuleFlagCGProfileEntry(MDO);
2113 }
2114}
2115
2116void Verifier::visitModuleFlagCGProfileEntry(const MDOperand &MDO) {
2117 auto CheckFunction = [&](const MDOperand &FuncMDO) {
2118 if (!FuncMDO)
2119 return;
2120 auto F = dyn_cast<ValueAsMetadata>(FuncMDO);
2121 Check(F && isa<Function>(F->getValue()->stripPointerCasts()),
2122 "expected a Function or null", FuncMDO);
2123 };
2124 auto Node = dyn_cast_or_null<MDNode>(MDO);
2125 Check(Node && Node->getNumOperands() == 3, "expected a MDNode triple", MDO);
2126 CheckFunction(Node->getOperand(0));
2127 CheckFunction(Node->getOperand(1));
2128 auto Count = dyn_cast_or_null<ConstantAsMetadata>(Node->getOperand(2));
2129 Check(Count && Count->getType()->isIntegerTy(),
2130 "expected an integer constant", Node->getOperand(2));
2131}
2132
2133void Verifier::verifyAttributeTypes(AttributeSet Attrs, const Value *V) {
2134 for (Attribute A : Attrs) {
2135
2136 if (A.isStringAttribute()) {
2137#define GET_ATTR_NAMES
2138#define ATTRIBUTE_ENUM(ENUM_NAME, DISPLAY_NAME)
2139#define ATTRIBUTE_STRBOOL(ENUM_NAME, DISPLAY_NAME) \
2140 if (A.getKindAsString() == #DISPLAY_NAME) { \
2141 auto V = A.getValueAsString(); \
2142 if (!(V.empty() || V == "true" || V == "false")) \
2143 CheckFailed("invalid value for '" #DISPLAY_NAME "' attribute: " + V + \
2144 ""); \
2145 }
2146
2147#include "llvm/IR/Attributes.inc"
2148 continue;
2149 }
2150
2151 if (A.isIntAttribute() != Attribute::isIntAttrKind(A.getKindAsEnum())) {
2152 CheckFailed("Attribute '" + A.getAsString() + "' should have an Argument",
2153 V);
2154 return;
2155 }
2156 }
2157}
2158
2159// VerifyParameterAttrs - Check the given attributes for an argument or return
2160// value of the specified type. The value V is printed in error messages.
2161void Verifier::verifyParameterAttrs(AttributeSet Attrs, Type *Ty,
2162 const Value *V) {
2163 if (!Attrs.hasAttributes())
2164 return;
2165
2166 verifyAttributeTypes(Attrs, V);
2167
2168 for (Attribute Attr : Attrs)
2169 Check(Attr.isStringAttribute() ||
2170 Attribute::canUseAsParamAttr(Attr.getKindAsEnum()),
2171 "Attribute '" + Attr.getAsString() + "' does not apply to parameters",
2172 V);
2173
2174 if (Attrs.hasAttribute(Attribute::ImmArg)) {
2175 unsigned AttrCount =
2176 Attrs.getNumAttributes() - Attrs.hasAttribute(Attribute::Range);
2177 Check(AttrCount == 1,
2178 "Attribute 'immarg' is incompatible with other attributes except the "
2179 "'range' attribute",
2180 V);
2181 }
2182
2183 // Check for mutually incompatible attributes. Only inreg is compatible with
2184 // sret.
2185 unsigned AttrCount = 0;
2186 AttrCount += Attrs.hasAttribute(Attribute::ByVal);
2187 AttrCount += Attrs.hasAttribute(Attribute::InAlloca);
2188 AttrCount += Attrs.hasAttribute(Attribute::Preallocated);
2189 AttrCount += Attrs.hasAttribute(Attribute::StructRet) ||
2190 Attrs.hasAttribute(Attribute::InReg);
2191 AttrCount += Attrs.hasAttribute(Attribute::Nest);
2192 AttrCount += Attrs.hasAttribute(Attribute::ByRef);
2193 Check(AttrCount <= 1,
2194 "Attributes 'byval', 'inalloca', 'preallocated', 'inreg', 'nest', "
2195 "'byref', and 'sret' are incompatible!",
2196 V);
2197
2198 Check(!(Attrs.hasAttribute(Attribute::InAlloca) &&
2199 Attrs.hasAttribute(Attribute::ReadOnly)),
2200 "Attributes "
2201 "'inalloca and readonly' are incompatible!",
2202 V);
2203
2204 Check(!(Attrs.hasAttribute(Attribute::StructRet) &&
2205 Attrs.hasAttribute(Attribute::Returned)),
2206 "Attributes "
2207 "'sret and returned' are incompatible!",
2208 V);
2209
2210 Check(!(Attrs.hasAttribute(Attribute::ZExt) &&
2211 Attrs.hasAttribute(Attribute::SExt)),
2212 "Attributes "
2213 "'zeroext and signext' are incompatible!",
2214 V);
2215
2216 Check(!(Attrs.hasAttribute(Attribute::ReadNone) &&
2217 Attrs.hasAttribute(Attribute::ReadOnly)),
2218 "Attributes "
2219 "'readnone and readonly' are incompatible!",
2220 V);
2221
2222 Check(!(Attrs.hasAttribute(Attribute::ReadNone) &&
2223 Attrs.hasAttribute(Attribute::WriteOnly)),
2224 "Attributes "
2225 "'readnone and writeonly' are incompatible!",
2226 V);
2227
2228 Check(!(Attrs.hasAttribute(Attribute::ReadOnly) &&
2229 Attrs.hasAttribute(Attribute::WriteOnly)),
2230 "Attributes "
2231 "'readonly and writeonly' are incompatible!",
2232 V);
2233
2234 Check(!(Attrs.hasAttribute(Attribute::NoInline) &&
2235 Attrs.hasAttribute(Attribute::AlwaysInline)),
2236 "Attributes "
2237 "'noinline and alwaysinline' are incompatible!",
2238 V);
2239
2240 Check(!(Attrs.hasAttribute(Attribute::Writable) &&
2241 Attrs.hasAttribute(Attribute::ReadNone)),
2242 "Attributes writable and readnone are incompatible!", V);
2243
2244 Check(!(Attrs.hasAttribute(Attribute::Writable) &&
2245 Attrs.hasAttribute(Attribute::ReadOnly)),
2246 "Attributes writable and readonly are incompatible!", V);
2247
2248 AttributeMask IncompatibleAttrs = AttributeFuncs::typeIncompatible(Ty, Attrs);
2249 for (Attribute Attr : Attrs) {
2250 if (!Attr.isStringAttribute() &&
2251 IncompatibleAttrs.contains(Attr.getKindAsEnum())) {
2252 CheckFailed("Attribute '" + Attr.getAsString() +
2253 "' applied to incompatible type!", V);
2254 return;
2255 }
2256 }
2257
2258 if (isa<PointerType>(Ty)) {
2259 if (Attrs.hasAttribute(Attribute::Alignment)) {
2260 Align AttrAlign = Attrs.getAlignment().valueOrOne();
2261 Check(AttrAlign.value() <= Value::MaximumAlignment,
2262 "huge alignment values are unsupported", V);
2263 }
2264 if (Attrs.hasAttribute(Attribute::ByVal)) {
2265 Type *ByValTy = Attrs.getByValType();
2266 SmallPtrSet<Type *, 4> Visited;
2267 Check(ByValTy->isSized(&Visited),
2268 "Attribute 'byval' does not support unsized types!", V);
2269 // Check if it is or contains a target extension type that disallows being
2270 // used on the stack.
2272 "'byval' argument has illegal target extension type", V);
2273 Check(DL.getTypeAllocSize(ByValTy).getKnownMinValue() < (1ULL << 32),
2274 "huge 'byval' arguments are unsupported", V);
2275 }
2276 if (Attrs.hasAttribute(Attribute::ByRef)) {
2277 SmallPtrSet<Type *, 4> Visited;
2278 Check(Attrs.getByRefType()->isSized(&Visited),
2279 "Attribute 'byref' does not support unsized types!", V);
2280 Check(DL.getTypeAllocSize(Attrs.getByRefType()).getKnownMinValue() <
2281 (1ULL << 32),
2282 "huge 'byref' arguments are unsupported", V);
2283 }
2284 if (Attrs.hasAttribute(Attribute::InAlloca)) {
2285 SmallPtrSet<Type *, 4> Visited;
2286 Check(Attrs.getInAllocaType()->isSized(&Visited),
2287 "Attribute 'inalloca' does not support unsized types!", V);
2288 Check(DL.getTypeAllocSize(Attrs.getInAllocaType()).getKnownMinValue() <
2289 (1ULL << 32),
2290 "huge 'inalloca' arguments are unsupported", V);
2291 }
2292 if (Attrs.hasAttribute(Attribute::Preallocated)) {
2293 SmallPtrSet<Type *, 4> Visited;
2294 Check(Attrs.getPreallocatedType()->isSized(&Visited),
2295 "Attribute 'preallocated' does not support unsized types!", V);
2296 Check(
2297 DL.getTypeAllocSize(Attrs.getPreallocatedType()).getKnownMinValue() <
2298 (1ULL << 32),
2299 "huge 'preallocated' arguments are unsupported", V);
2300 }
2301 }
2302
2303 if (Attrs.hasAttribute(Attribute::Initializes)) {
2304 auto Inits = Attrs.getAttribute(Attribute::Initializes).getInitializes();
2305 Check(!Inits.empty(), "Attribute 'initializes' does not support empty list",
2306 V);
2308 "Attribute 'initializes' does not support unordered ranges", V);
2309 }
2310
2311 if (Attrs.hasAttribute(Attribute::NoFPClass)) {
2312 uint64_t Val = Attrs.getAttribute(Attribute::NoFPClass).getValueAsInt();
2313 Check(Val != 0, "Attribute 'nofpclass' must have at least one test bit set",
2314 V);
2315 Check((Val & ~static_cast<unsigned>(fcAllFlags)) == 0,
2316 "Invalid value for 'nofpclass' test mask", V);
2317 }
2318 if (Attrs.hasAttribute(Attribute::Range)) {
2319 const ConstantRange &CR =
2320 Attrs.getAttribute(Attribute::Range).getValueAsConstantRange();
2322 "Range bit width must match type bit width!", V);
2323 }
2324}
2325
2326void Verifier::checkUnsignedBaseTenFuncAttr(AttributeList Attrs, StringRef Attr,
2327 const Value *V) {
2328 if (Attrs.hasFnAttr(Attr)) {
2329 StringRef S = Attrs.getFnAttr(Attr).getValueAsString();
2330 unsigned N;
2331 if (S.getAsInteger(10, N))
2332 CheckFailed("\"" + Attr + "\" takes an unsigned integer: " + S, V);
2333 }
2334}
2335
2336// Check parameter attributes against a function type.
2337// The value V is printed in error messages.
2338void Verifier::verifyFunctionAttrs(FunctionType *FT, AttributeList Attrs,
2339 const Value *V, bool IsIntrinsic,
2340 bool IsInlineAsm) {
2341 if (Attrs.isEmpty())
2342 return;
2343
2344 if (AttributeListsVisited.insert(Attrs.getRawPointer()).second) {
2345 Check(Attrs.hasParentContext(Context),
2346 "Attribute list does not match Module context!", &Attrs, V);
2347 for (const auto &AttrSet : Attrs) {
2348 Check(!AttrSet.hasAttributes() || AttrSet.hasParentContext(Context),
2349 "Attribute set does not match Module context!", &AttrSet, V);
2350 for (const auto &A : AttrSet) {
2351 Check(A.hasParentContext(Context),
2352 "Attribute does not match Module context!", &A, V);
2353 }
2354 }
2355 }
2356
2357 bool SawNest = false;
2358 bool SawReturned = false;
2359 bool SawSRet = false;
2360 bool SawSwiftSelf = false;
2361 bool SawSwiftAsync = false;
2362 bool SawSwiftError = false;
2363
2364 // Verify return value attributes.
2365 AttributeSet RetAttrs = Attrs.getRetAttrs();
2366 for (Attribute RetAttr : RetAttrs)
2367 Check(RetAttr.isStringAttribute() ||
2368 Attribute::canUseAsRetAttr(RetAttr.getKindAsEnum()),
2369 "Attribute '" + RetAttr.getAsString() +
2370 "' does not apply to function return values",
2371 V);
2372
2373 unsigned MaxParameterWidth = 0;
2374 auto GetMaxParameterWidth = [&MaxParameterWidth](Type *Ty) {
2375 if (Ty->isVectorTy()) {
2376 if (auto *VT = dyn_cast<FixedVectorType>(Ty)) {
2377 unsigned Size = VT->getPrimitiveSizeInBits().getFixedValue();
2378 if (Size > MaxParameterWidth)
2379 MaxParameterWidth = Size;
2380 }
2381 }
2382 };
2383 GetMaxParameterWidth(FT->getReturnType());
2384 verifyParameterAttrs(RetAttrs, FT->getReturnType(), V);
2385
2386 // Verify parameter attributes.
2387 for (unsigned i = 0, e = FT->getNumParams(); i != e; ++i) {
2388 Type *Ty = FT->getParamType(i);
2389 AttributeSet ArgAttrs = Attrs.getParamAttrs(i);
2390
2391 if (!IsIntrinsic) {
2392 Check(!ArgAttrs.hasAttribute(Attribute::ImmArg),
2393 "immarg attribute only applies to intrinsics", V);
2394 if (!IsInlineAsm)
2395 Check(!ArgAttrs.hasAttribute(Attribute::ElementType),
2396 "Attribute 'elementtype' can only be applied to intrinsics"
2397 " and inline asm.",
2398 V);
2399 }
2400
2401 verifyParameterAttrs(ArgAttrs, Ty, V);
2402 GetMaxParameterWidth(Ty);
2403
2404 if (ArgAttrs.hasAttribute(Attribute::Nest)) {
2405 Check(!SawNest, "More than one parameter has attribute nest!", V);
2406 SawNest = true;
2407 }
2408
2409 if (ArgAttrs.hasAttribute(Attribute::Returned)) {
2410 Check(!SawReturned, "More than one parameter has attribute returned!", V);
2411 Check(Ty->canLosslesslyBitCastTo(FT->getReturnType()),
2412 "Incompatible argument and return types for 'returned' attribute",
2413 V);
2414 SawReturned = true;
2415 }
2416
2417 if (ArgAttrs.hasAttribute(Attribute::StructRet)) {
2418 Check(!SawSRet, "Cannot have multiple 'sret' parameters!", V);
2419 Check(i == 0 || i == 1,
2420 "Attribute 'sret' is not on first or second parameter!", V);
2421 SawSRet = true;
2422 }
2423
2424 if (ArgAttrs.hasAttribute(Attribute::SwiftSelf)) {
2425 Check(!SawSwiftSelf, "Cannot have multiple 'swiftself' parameters!", V);
2426 SawSwiftSelf = true;
2427 }
2428
2429 if (ArgAttrs.hasAttribute(Attribute::SwiftAsync)) {
2430 Check(!SawSwiftAsync, "Cannot have multiple 'swiftasync' parameters!", V);
2431 SawSwiftAsync = true;
2432 }
2433
2434 if (ArgAttrs.hasAttribute(Attribute::SwiftError)) {
2435 Check(!SawSwiftError, "Cannot have multiple 'swifterror' parameters!", V);
2436 SawSwiftError = true;
2437 }
2438
2439 if (ArgAttrs.hasAttribute(Attribute::InAlloca)) {
2440 Check(i == FT->getNumParams() - 1,
2441 "inalloca isn't on the last parameter!", V);
2442 }
2443 }
2444
2445 if (!Attrs.hasFnAttrs())
2446 return;
2447
2448 verifyAttributeTypes(Attrs.getFnAttrs(), V);
2449 for (Attribute FnAttr : Attrs.getFnAttrs())
2450 Check(FnAttr.isStringAttribute() ||
2451 Attribute::canUseAsFnAttr(FnAttr.getKindAsEnum()),
2452 "Attribute '" + FnAttr.getAsString() +
2453 "' does not apply to functions!",
2454 V);
2455
2456 Check(!(Attrs.hasFnAttr(Attribute::NoInline) &&
2457 Attrs.hasFnAttr(Attribute::AlwaysInline)),
2458 "Attributes 'noinline and alwaysinline' are incompatible!", V);
2459
2460 if (Attrs.hasFnAttr(Attribute::OptimizeNone)) {
2461 Check(Attrs.hasFnAttr(Attribute::NoInline),
2462 "Attribute 'optnone' requires 'noinline'!", V);
2463
2464 Check(!Attrs.hasFnAttr(Attribute::OptimizeForSize),
2465 "Attributes 'optsize and optnone' are incompatible!", V);
2466
2467 Check(!Attrs.hasFnAttr(Attribute::MinSize),
2468 "Attributes 'minsize and optnone' are incompatible!", V);
2469
2470 Check(!Attrs.hasFnAttr(Attribute::OptimizeForDebugging),
2471 "Attributes 'optdebug and optnone' are incompatible!", V);
2472 }
2473
2474 Check(!(Attrs.hasFnAttr(Attribute::SanitizeRealtime) &&
2475 Attrs.hasFnAttr(Attribute::SanitizeRealtimeBlocking)),
2476 "Attributes "
2477 "'sanitize_realtime and sanitize_realtime_blocking' are incompatible!",
2478 V);
2479
2480 if (Attrs.hasFnAttr(Attribute::OptimizeForDebugging)) {
2481 Check(!Attrs.hasFnAttr(Attribute::OptimizeForSize),
2482 "Attributes 'optsize and optdebug' are incompatible!", V);
2483
2484 Check(!Attrs.hasFnAttr(Attribute::MinSize),
2485 "Attributes 'minsize and optdebug' are incompatible!", V);
2486 }
2487
2488 Check(!Attrs.hasAttrSomewhere(Attribute::Writable) ||
2489 isModSet(Attrs.getMemoryEffects().getModRef(IRMemLocation::ArgMem)),
2490 "Attribute writable and memory without argmem: write are incompatible!",
2491 V);
2492
2493 if (Attrs.hasFnAttr("aarch64_pstate_sm_enabled")) {
2494 Check(!Attrs.hasFnAttr("aarch64_pstate_sm_compatible"),
2495 "Attributes 'aarch64_pstate_sm_enabled and "
2496 "aarch64_pstate_sm_compatible' are incompatible!",
2497 V);
2498 }
2499
2500 Check((Attrs.hasFnAttr("aarch64_new_za") + Attrs.hasFnAttr("aarch64_in_za") +
2501 Attrs.hasFnAttr("aarch64_inout_za") +
2502 Attrs.hasFnAttr("aarch64_out_za") +
2503 Attrs.hasFnAttr("aarch64_preserves_za") +
2504 Attrs.hasFnAttr("aarch64_za_state_agnostic")) <= 1,
2505 "Attributes 'aarch64_new_za', 'aarch64_in_za', 'aarch64_out_za', "
2506 "'aarch64_inout_za', 'aarch64_preserves_za' and "
2507 "'aarch64_za_state_agnostic' are mutually exclusive",
2508 V);
2509
2510 Check((Attrs.hasFnAttr("aarch64_new_zt0") +
2511 Attrs.hasFnAttr("aarch64_in_zt0") +
2512 Attrs.hasFnAttr("aarch64_inout_zt0") +
2513 Attrs.hasFnAttr("aarch64_out_zt0") +
2514 Attrs.hasFnAttr("aarch64_preserves_zt0") +
2515 Attrs.hasFnAttr("aarch64_za_state_agnostic")) <= 1,
2516 "Attributes 'aarch64_new_zt0', 'aarch64_in_zt0', 'aarch64_out_zt0', "
2517 "'aarch64_inout_zt0', 'aarch64_preserves_zt0' and "
2518 "'aarch64_za_state_agnostic' are mutually exclusive",
2519 V);
2520
2521 if (Attrs.hasFnAttr(Attribute::JumpTable)) {
2522 const GlobalValue *GV = cast<GlobalValue>(V);
2524 "Attribute 'jumptable' requires 'unnamed_addr'", V);
2525 }
2526
2527 if (auto Args = Attrs.getFnAttrs().getAllocSizeArgs()) {
2528 auto CheckParam = [&](StringRef Name, unsigned ParamNo) {
2529 if (ParamNo >= FT->getNumParams()) {
2530 CheckFailed("'allocsize' " + Name + " argument is out of bounds", V);
2531 return false;
2532 }
2533
2534 if (!FT->getParamType(ParamNo)->isIntegerTy()) {
2535 CheckFailed("'allocsize' " + Name +
2536 " argument must refer to an integer parameter",
2537 V);
2538 return false;
2539 }
2540
2541 return true;
2542 };
2543
2544 if (!CheckParam("element size", Args->first))
2545 return;
2546
2547 if (Args->second && !CheckParam("number of elements", *Args->second))
2548 return;
2549 }
2550
2551 if (Attrs.hasFnAttr(Attribute::AllocKind)) {
2552 AllocFnKind K = Attrs.getAllocKind();
2554 K & (AllocFnKind::Alloc | AllocFnKind::Realloc | AllocFnKind::Free);
2555 if (!is_contained(
2556 {AllocFnKind::Alloc, AllocFnKind::Realloc, AllocFnKind::Free},
2557 Type))
2558 CheckFailed(
2559 "'allockind()' requires exactly one of alloc, realloc, and free");
2560 if ((Type == AllocFnKind::Free) &&
2561 ((K & (AllocFnKind::Uninitialized | AllocFnKind::Zeroed |
2562 AllocFnKind::Aligned)) != AllocFnKind::Unknown))
2563 CheckFailed("'allockind(\"free\")' doesn't allow uninitialized, zeroed, "
2564 "or aligned modifiers.");
2565 AllocFnKind ZeroedUninit = AllocFnKind::Uninitialized | AllocFnKind::Zeroed;
2566 if ((K & ZeroedUninit) == ZeroedUninit)
2567 CheckFailed("'allockind()' can't be both zeroed and uninitialized");
2568 }
2569
2570 if (Attribute A = Attrs.getFnAttr("alloc-variant-zeroed"); A.isValid()) {
2571 StringRef S = A.getValueAsString();
2572 Check(!S.empty(), "'alloc-variant-zeroed' must not be empty");
2573 Function *Variant = M.getFunction(S);
2574 if (Variant) {
2575 Attribute Family = Attrs.getFnAttr("alloc-family");
2576 Attribute VariantFamily = Variant->getFnAttribute("alloc-family");
2577 if (Family.isValid())
2578 Check(VariantFamily.isValid() &&
2579 VariantFamily.getValueAsString() == Family.getValueAsString(),
2580 "'alloc-variant-zeroed' must name a function belonging to the "
2581 "same 'alloc-family'");
2582
2583 Check(Variant->hasFnAttribute(Attribute::AllocKind) &&
2584 (Variant->getFnAttribute(Attribute::AllocKind).getAllocKind() &
2585 AllocFnKind::Zeroed) != AllocFnKind::Unknown,
2586 "'alloc-variant-zeroed' must name a function with "
2587 "'allockind(\"zeroed\")'");
2588
2589 Check(FT == Variant->getFunctionType(),
2590 "'alloc-variant-zeroed' must name a function with the same "
2591 "signature");
2592
2593 if (const Function *F = dyn_cast<Function>(V))
2594 Check(F->getCallingConv() == Variant->getCallingConv(),
2595 "'alloc-variant-zeroed' must name a function with the same "
2596 "calling convention");
2597 }
2598 }
2599
2600 if (Attrs.hasFnAttr(Attribute::VScaleRange)) {
2601 unsigned VScaleMin = Attrs.getFnAttrs().getVScaleRangeMin();
2602 if (VScaleMin == 0)
2603 CheckFailed("'vscale_range' minimum must be greater than 0", V);
2604 else if (!isPowerOf2_32(VScaleMin))
2605 CheckFailed("'vscale_range' minimum must be power-of-two value", V);
2606 std::optional<unsigned> VScaleMax = Attrs.getFnAttrs().getVScaleRangeMax();
2607 if (VScaleMax && VScaleMin > VScaleMax)
2608 CheckFailed("'vscale_range' minimum cannot be greater than maximum", V);
2609 else if (VScaleMax && !isPowerOf2_32(*VScaleMax))
2610 CheckFailed("'vscale_range' maximum must be power-of-two value", V);
2611 }
2612
2613 if (Attribute FPAttr = Attrs.getFnAttr("frame-pointer"); FPAttr.isValid()) {
2614 StringRef FP = FPAttr.getValueAsString();
2615 if (FP != "all" && FP != "non-leaf" && FP != "none" && FP != "reserved" &&
2616 FP != "non-leaf-no-reserve")
2617 CheckFailed("invalid value for 'frame-pointer' attribute: " + FP, V);
2618 }
2619
2620 checkUnsignedBaseTenFuncAttr(Attrs, "patchable-function-prefix", V);
2621 checkUnsignedBaseTenFuncAttr(Attrs, "patchable-function-entry", V);
2622 if (Attrs.hasFnAttr("patchable-function-entry-section"))
2623 Check(!Attrs.getFnAttr("patchable-function-entry-section")
2624 .getValueAsString()
2625 .empty(),
2626 "\"patchable-function-entry-section\" must not be empty");
2627 checkUnsignedBaseTenFuncAttr(Attrs, "warn-stack-size", V);
2628
2629 if (auto A = Attrs.getFnAttr("sign-return-address"); A.isValid()) {
2630 StringRef S = A.getValueAsString();
2631 if (S != "none" && S != "all" && S != "non-leaf")
2632 CheckFailed("invalid value for 'sign-return-address' attribute: " + S, V);
2633 }
2634
2635 if (auto A = Attrs.getFnAttr("sign-return-address-key"); A.isValid()) {
2636 StringRef S = A.getValueAsString();
2637 if (S != "a_key" && S != "b_key")
2638 CheckFailed("invalid value for 'sign-return-address-key' attribute: " + S,
2639 V);
2640 if (auto AA = Attrs.getFnAttr("sign-return-address"); !AA.isValid()) {
2641 CheckFailed(
2642 "'sign-return-address-key' present without `sign-return-address`");
2643 }
2644 }
2645
2646 if (auto A = Attrs.getFnAttr("branch-target-enforcement"); A.isValid()) {
2647 StringRef S = A.getValueAsString();
2648 if (S != "" && S != "true" && S != "false")
2649 CheckFailed(
2650 "invalid value for 'branch-target-enforcement' attribute: " + S, V);
2651 }
2652
2653 if (auto A = Attrs.getFnAttr("branch-protection-pauth-lr"); A.isValid()) {
2654 StringRef S = A.getValueAsString();
2655 if (S != "" && S != "true" && S != "false")
2656 CheckFailed(
2657 "invalid value for 'branch-protection-pauth-lr' attribute: " + S, V);
2658 }
2659
2660 if (auto A = Attrs.getFnAttr("guarded-control-stack"); A.isValid()) {
2661 StringRef S = A.getValueAsString();
2662 if (S != "" && S != "true" && S != "false")
2663 CheckFailed("invalid value for 'guarded-control-stack' attribute: " + S,
2664 V);
2665 }
2666
2667 if (auto A = Attrs.getFnAttr("vector-function-abi-variant"); A.isValid()) {
2668 StringRef S = A.getValueAsString();
2669 const std::optional<VFInfo> Info = VFABI::tryDemangleForVFABI(S, FT);
2670 if (!Info)
2671 CheckFailed("invalid name for a VFABI variant: " + S, V);
2672 }
2673
2674 if (auto A = Attrs.getFnAttr("modular-format"); A.isValid()) {
2675 StringRef S = A.getValueAsString();
2677 S.split(Args, ',');
2678 Check(Args.size() >= 5,
2679 "modular-format attribute requires at least 5 arguments", V);
2680 unsigned FirstArgIdx;
2681 Check(!Args[2].getAsInteger(10, FirstArgIdx),
2682 "modular-format attribute first arg index is not an integer", V);
2683 unsigned UpperBound = FT->getNumParams() + (FT->isVarArg() ? 1 : 0);
2684 Check(FirstArgIdx > 0 && FirstArgIdx <= UpperBound,
2685 "modular-format attribute first arg index is out of bounds", V);
2686 }
2687
2688 if (auto A = Attrs.getFnAttr("target-features"); A.isValid()) {
2689 StringRef S = A.getValueAsString();
2690 if (!S.empty()) {
2691 for (auto FeatureFlag : split(S, ',')) {
2692 if (FeatureFlag.empty())
2693 CheckFailed(
2694 "target-features attribute should not contain an empty string");
2695 else
2696 Check(FeatureFlag[0] == '+' || FeatureFlag[0] == '-',
2697 "target feature '" + FeatureFlag +
2698 "' must start with a '+' or '-'",
2699 V);
2700 }
2701 }
2702 }
2703}
2704void Verifier::verifyUnknownProfileMetadata(MDNode *MD) {
2705 Check(MD->getNumOperands() == 2,
2706 "'unknown' !prof should have a single additional operand", MD);
2707 auto *PassName = dyn_cast<MDString>(MD->getOperand(1));
2708 Check(PassName != nullptr,
2709 "'unknown' !prof should have an additional operand of type "
2710 "string");
2711 Check(!PassName->getString().empty(),
2712 "the 'unknown' !prof operand should not be an empty string");
2713}
2714
2715void Verifier::verifyFunctionMetadata(
2716 ArrayRef<std::pair<unsigned, MDNode *>> MDs) {
2717 for (const auto &Pair : MDs) {
2718 if (Pair.first == LLVMContext::MD_prof) {
2719 MDNode *MD = Pair.second;
2720 Check(MD->getNumOperands() >= 2,
2721 "!prof annotations should have no less than 2 operands", MD);
2722 // We may have functions that are synthesized by the compiler, e.g. in
2723 // WPD, that we can't currently determine the entry count.
2724 if (MD->getOperand(0).equalsStr(
2726 verifyUnknownProfileMetadata(MD);
2727 continue;
2728 }
2729
2730 // Check first operand.
2731 Check(MD->getOperand(0) != nullptr, "first operand should not be null",
2732 MD);
2734 "expected string with name of the !prof annotation", MD);
2735 MDString *MDS = cast<MDString>(MD->getOperand(0));
2736 StringRef ProfName = MDS->getString();
2739 "first operand should be 'function_entry_count'"
2740 " or 'synthetic_function_entry_count'",
2741 MD);
2742
2743 // Check second operand.
2744 Check(MD->getOperand(1) != nullptr, "second operand should not be null",
2745 MD);
2747 "expected integer argument to function_entry_count", MD);
2748 } else if (Pair.first == LLVMContext::MD_kcfi_type) {
2749 MDNode *MD = Pair.second;
2750 Check(MD->getNumOperands() == 1,
2751 "!kcfi_type must have exactly one operand", MD);
2752 Check(MD->getOperand(0) != nullptr, "!kcfi_type operand must not be null",
2753 MD);
2755 "expected a constant operand for !kcfi_type", MD);
2756 Constant *C = cast<ConstantAsMetadata>(MD->getOperand(0))->getValue();
2757 Check(isa<ConstantInt>(C) && isa<IntegerType>(C->getType()),
2758 "expected a constant integer operand for !kcfi_type", MD);
2760 "expected a 32-bit integer constant operand for !kcfi_type", MD);
2761 }
2762 }
2763}
2764
2765void Verifier::visitConstantExprsRecursively(const Constant *EntryC) {
2766 if (EntryC->getNumOperands() == 0)
2767 return;
2768
2769 if (!ConstantExprVisited.insert(EntryC).second)
2770 return;
2771
2773 Stack.push_back(EntryC);
2774
2775 while (!Stack.empty()) {
2776 const Constant *C = Stack.pop_back_val();
2777
2778 // Check this constant expression.
2779 if (const auto *CE = dyn_cast<ConstantExpr>(C))
2780 visitConstantExpr(CE);
2781
2782 if (const auto *CPA = dyn_cast<ConstantPtrAuth>(C))
2783 visitConstantPtrAuth(CPA);
2784
2785 if (const auto *GV = dyn_cast<GlobalValue>(C)) {
2786 // Global Values get visited separately, but we do need to make sure
2787 // that the global value is in the correct module
2788 Check(GV->getParent() == &M, "Referencing global in another module!",
2789 EntryC, &M, GV, GV->getParent());
2790 continue;
2791 }
2792
2793 // Visit all sub-expressions.
2794 for (const Use &U : C->operands()) {
2795 const auto *OpC = dyn_cast<Constant>(U);
2796 if (!OpC)
2797 continue;
2798 if (!ConstantExprVisited.insert(OpC).second)
2799 continue;
2800 Stack.push_back(OpC);
2801 }
2802 }
2803}
2804
2805void Verifier::visitConstantExpr(const ConstantExpr *CE) {
2806 if (CE->getOpcode() == Instruction::BitCast)
2807 Check(CastInst::castIsValid(Instruction::BitCast, CE->getOperand(0),
2808 CE->getType()),
2809 "Invalid bitcast", CE);
2810 else if (CE->getOpcode() == Instruction::PtrToAddr)
2811 checkPtrToAddr(CE->getOperand(0)->getType(), CE->getType(), *CE);
2812}
2813
2814void Verifier::visitConstantPtrAuth(const ConstantPtrAuth *CPA) {
2815 Check(CPA->getPointer()->getType()->isPointerTy(),
2816 "signed ptrauth constant base pointer must have pointer type");
2817
2818 Check(CPA->getType() == CPA->getPointer()->getType(),
2819 "signed ptrauth constant must have same type as its base pointer");
2820
2821 Check(CPA->getKey()->getBitWidth() == 32,
2822 "signed ptrauth constant key must be i32 constant integer");
2823
2825 "signed ptrauth constant address discriminator must be a pointer");
2826
2827 Check(CPA->getDiscriminator()->getBitWidth() == 64,
2828 "signed ptrauth constant discriminator must be i64 constant integer");
2829
2831 "signed ptrauth constant deactivation symbol must be a pointer");
2832
2835 "signed ptrauth constant deactivation symbol must be a global value "
2836 "or null");
2837}
2838
2839bool Verifier::verifyAttributeCount(AttributeList Attrs, unsigned Params) {
2840 // There shouldn't be more attribute sets than there are parameters plus the
2841 // function and return value.
2842 return Attrs.getNumAttrSets() <= Params + 2;
2843}
2844
2845void Verifier::verifyInlineAsmCall(const CallBase &Call) {
2846 const InlineAsm *IA = cast<InlineAsm>(Call.getCalledOperand());
2847 unsigned ArgNo = 0;
2848 unsigned LabelNo = 0;
2849 for (const InlineAsm::ConstraintInfo &CI : IA->ParseConstraints()) {
2850 if (CI.Type == InlineAsm::isLabel) {
2851 ++LabelNo;
2852 continue;
2853 }
2854
2855 // Only deal with constraints that correspond to call arguments.
2856 if (!CI.hasArg())
2857 continue;
2858
2859 if (CI.isIndirect) {
2860 const Value *Arg = Call.getArgOperand(ArgNo);
2861 Check(Arg->getType()->isPointerTy(),
2862 "Operand for indirect constraint must have pointer type", &Call);
2863
2865 "Operand for indirect constraint must have elementtype attribute",
2866 &Call);
2867 } else {
2868 Check(!Call.paramHasAttr(ArgNo, Attribute::ElementType),
2869 "Elementtype attribute can only be applied for indirect "
2870 "constraints",
2871 &Call);
2872 }
2873
2874 ArgNo++;
2875 }
2876
2877 if (auto *CallBr = dyn_cast<CallBrInst>(&Call)) {
2878 Check(LabelNo == CallBr->getNumIndirectDests(),
2879 "Number of label constraints does not match number of callbr dests",
2880 &Call);
2881 } else {
2882 Check(LabelNo == 0, "Label constraints can only be used with callbr",
2883 &Call);
2884 }
2885}
2886
2887/// Verify that statepoint intrinsic is well formed.
2888void Verifier::verifyStatepoint(const CallBase &Call) {
2889 assert(Call.getIntrinsicID() == Intrinsic::experimental_gc_statepoint);
2890
2893 "gc.statepoint must read and write all memory to preserve "
2894 "reordering restrictions required by safepoint semantics",
2895 Call);
2896
2897 const int64_t NumPatchBytes =
2898 cast<ConstantInt>(Call.getArgOperand(1))->getSExtValue();
2899 assert(isInt<32>(NumPatchBytes) && "NumPatchBytesV is an i32!");
2900 Check(NumPatchBytes >= 0,
2901 "gc.statepoint number of patchable bytes must be "
2902 "positive",
2903 Call);
2904
2905 Type *TargetElemType = Call.getParamElementType(2);
2906 Check(TargetElemType,
2907 "gc.statepoint callee argument must have elementtype attribute", Call);
2908 FunctionType *TargetFuncType = dyn_cast<FunctionType>(TargetElemType);
2909 Check(TargetFuncType,
2910 "gc.statepoint callee elementtype must be function type", Call);
2911
2912 const int NumCallArgs = cast<ConstantInt>(Call.getArgOperand(3))->getZExtValue();
2913 Check(NumCallArgs >= 0,
2914 "gc.statepoint number of arguments to underlying call "
2915 "must be positive",
2916 Call);
2917 const int NumParams = (int)TargetFuncType->getNumParams();
2918 if (TargetFuncType->isVarArg()) {
2919 Check(NumCallArgs >= NumParams,
2920 "gc.statepoint mismatch in number of vararg call args", Call);
2921
2922 // TODO: Remove this limitation
2923 Check(TargetFuncType->getReturnType()->isVoidTy(),
2924 "gc.statepoint doesn't support wrapping non-void "
2925 "vararg functions yet",
2926 Call);
2927 } else
2928 Check(NumCallArgs == NumParams,
2929 "gc.statepoint mismatch in number of call args", Call);
2930
2931 const uint64_t Flags
2932 = cast<ConstantInt>(Call.getArgOperand(4))->getZExtValue();
2933 Check((Flags & ~(uint64_t)StatepointFlags::MaskAll) == 0,
2934 "unknown flag used in gc.statepoint flags argument", Call);
2935
2936 // Verify that the types of the call parameter arguments match
2937 // the type of the wrapped callee.
2938 AttributeList Attrs = Call.getAttributes();
2939 for (int i = 0; i < NumParams; i++) {
2940 Type *ParamType = TargetFuncType->getParamType(i);
2941 Type *ArgType = Call.getArgOperand(5 + i)->getType();
2942 Check(ArgType == ParamType,
2943 "gc.statepoint call argument does not match wrapped "
2944 "function type",
2945 Call);
2946
2947 if (TargetFuncType->isVarArg()) {
2948 AttributeSet ArgAttrs = Attrs.getParamAttrs(5 + i);
2949 Check(!ArgAttrs.hasAttribute(Attribute::StructRet),
2950 "Attribute 'sret' cannot be used for vararg call arguments!", Call);
2951 }
2952 }
2953
2954 const int EndCallArgsInx = 4 + NumCallArgs;
2955
2956 const Value *NumTransitionArgsV = Call.getArgOperand(EndCallArgsInx + 1);
2957 Check(isa<ConstantInt>(NumTransitionArgsV),
2958 "gc.statepoint number of transition arguments "
2959 "must be constant integer",
2960 Call);
2961 const int NumTransitionArgs =
2962 cast<ConstantInt>(NumTransitionArgsV)->getZExtValue();
2963 Check(NumTransitionArgs == 0,
2964 "gc.statepoint w/inline transition bundle is deprecated", Call);
2965 const int EndTransitionArgsInx = EndCallArgsInx + 1 + NumTransitionArgs;
2966
2967 const Value *NumDeoptArgsV = Call.getArgOperand(EndTransitionArgsInx + 1);
2968 Check(isa<ConstantInt>(NumDeoptArgsV),
2969 "gc.statepoint number of deoptimization arguments "
2970 "must be constant integer",
2971 Call);
2972 const int NumDeoptArgs = cast<ConstantInt>(NumDeoptArgsV)->getZExtValue();
2973 Check(NumDeoptArgs == 0,
2974 "gc.statepoint w/inline deopt operands is deprecated", Call);
2975
2976 const int ExpectedNumArgs = 7 + NumCallArgs;
2977 Check(ExpectedNumArgs == (int)Call.arg_size(),
2978 "gc.statepoint too many arguments", Call);
2979
2980 // Check that the only uses of this gc.statepoint are gc.result or
2981 // gc.relocate calls which are tied to this statepoint and thus part
2982 // of the same statepoint sequence
2983 for (const User *U : Call.users()) {
2984 const CallInst *UserCall = dyn_cast<const CallInst>(U);
2985 Check(UserCall, "illegal use of statepoint token", Call, U);
2986 if (!UserCall)
2987 continue;
2988 Check(isa<GCRelocateInst>(UserCall) || isa<GCResultInst>(UserCall),
2989 "gc.result or gc.relocate are the only value uses "
2990 "of a gc.statepoint",
2991 Call, U);
2992 if (isa<GCResultInst>(UserCall)) {
2993 Check(UserCall->getArgOperand(0) == &Call,
2994 "gc.result connected to wrong gc.statepoint", Call, UserCall);
2995 } else if (isa<GCRelocateInst>(Call)) {
2996 Check(UserCall->getArgOperand(0) == &Call,
2997 "gc.relocate connected to wrong gc.statepoint", Call, UserCall);
2998 }
2999 }
3000
3001 // Note: It is legal for a single derived pointer to be listed multiple
3002 // times. It's non-optimal, but it is legal. It can also happen after
3003 // insertion if we strip a bitcast away.
3004 // Note: It is really tempting to check that each base is relocated and
3005 // that a derived pointer is never reused as a base pointer. This turns
3006 // out to be problematic since optimizations run after safepoint insertion
3007 // can recognize equality properties that the insertion logic doesn't know
3008 // about. See example statepoint.ll in the verifier subdirectory
3009}
3010
3011void Verifier::verifyFrameRecoverIndices() {
3012 for (auto &Counts : FrameEscapeInfo) {
3013 Function *F = Counts.first;
3014 unsigned EscapedObjectCount = Counts.second.first;
3015 unsigned MaxRecoveredIndex = Counts.second.second;
3016 Check(MaxRecoveredIndex <= EscapedObjectCount,
3017 "all indices passed to llvm.localrecover must be less than the "
3018 "number of arguments passed to llvm.localescape in the parent "
3019 "function",
3020 F);
3021 }
3022}
3023
3024static Instruction *getSuccPad(Instruction *Terminator) {
3025 BasicBlock *UnwindDest;
3026 if (auto *II = dyn_cast<InvokeInst>(Terminator))
3027 UnwindDest = II->getUnwindDest();
3028 else if (auto *CSI = dyn_cast<CatchSwitchInst>(Terminator))
3029 UnwindDest = CSI->getUnwindDest();
3030 else
3031 UnwindDest = cast<CleanupReturnInst>(Terminator)->getUnwindDest();
3032 return &*UnwindDest->getFirstNonPHIIt();
3033}
3034
3035void Verifier::verifySiblingFuncletUnwinds() {
3036 llvm::TimeTraceScope timeScope("Verifier verify sibling funclet unwinds");
3037 SmallPtrSet<Instruction *, 8> Visited;
3038 SmallPtrSet<Instruction *, 8> Active;
3039 for (const auto &Pair : SiblingFuncletInfo) {
3040 Instruction *PredPad = Pair.first;
3041 if (Visited.count(PredPad))
3042 continue;
3043 Active.insert(PredPad);
3044 Instruction *Terminator = Pair.second;
3045 do {
3046 Instruction *SuccPad = getSuccPad(Terminator);
3047 if (Active.count(SuccPad)) {
3048 // Found a cycle; report error
3049 Instruction *CyclePad = SuccPad;
3050 SmallVector<Instruction *, 8> CycleNodes;
3051 do {
3052 CycleNodes.push_back(CyclePad);
3053 Instruction *CycleTerminator = SiblingFuncletInfo[CyclePad];
3054 if (CycleTerminator != CyclePad)
3055 CycleNodes.push_back(CycleTerminator);
3056 CyclePad = getSuccPad(CycleTerminator);
3057 } while (CyclePad != SuccPad);
3058 Check(false, "EH pads can't handle each other's exceptions",
3059 ArrayRef<Instruction *>(CycleNodes));
3060 }
3061 // Don't re-walk a node we've already checked
3062 if (!Visited.insert(SuccPad).second)
3063 break;
3064 // Walk to this successor if it has a map entry.
3065 PredPad = SuccPad;
3066 auto TermI = SiblingFuncletInfo.find(PredPad);
3067 if (TermI == SiblingFuncletInfo.end())
3068 break;
3069 Terminator = TermI->second;
3070 Active.insert(PredPad);
3071 } while (true);
3072 // Each node only has one successor, so we've walked all the active
3073 // nodes' successors.
3074 Active.clear();
3075 }
3076}
3077
3078// visitFunction - Verify that a function is ok.
3079//
3080void Verifier::visitFunction(const Function &F) {
3081 visitGlobalValue(F);
3082
3083 // Check function arguments.
3084 FunctionType *FT = F.getFunctionType();
3085 unsigned NumArgs = F.arg_size();
3086
3087 Check(&Context == &F.getContext(),
3088 "Function context does not match Module context!", &F);
3089
3090 Check(!F.hasCommonLinkage(), "Functions may not have common linkage", &F);
3091 Check(FT->getNumParams() == NumArgs,
3092 "# formal arguments must match # of arguments for function type!", &F,
3093 FT);
3094 Check(F.getReturnType()->isFirstClassType() ||
3095 F.getReturnType()->isVoidTy() || F.getReturnType()->isStructTy(),
3096 "Functions cannot return aggregate values!", &F);
3097
3098 Check(!F.hasStructRetAttr() || F.getReturnType()->isVoidTy(),
3099 "Invalid struct return type!", &F);
3100
3101 if (MaybeAlign A = F.getAlign()) {
3102 Check(A->value() <= Value::MaximumAlignment,
3103 "huge alignment values are unsupported", &F);
3104 }
3105
3106 AttributeList Attrs = F.getAttributes();
3107
3108 Check(verifyAttributeCount(Attrs, FT->getNumParams()),
3109 "Attribute after last parameter!", &F);
3110
3111 bool IsIntrinsic = F.isIntrinsic();
3112
3113 // Check function attributes.
3114 verifyFunctionAttrs(FT, Attrs, &F, IsIntrinsic, /* IsInlineAsm */ false);
3115
3116 // On function declarations/definitions, we do not support the builtin
3117 // attribute. We do not check this in VerifyFunctionAttrs since that is
3118 // checking for Attributes that can/can not ever be on functions.
3119 Check(!Attrs.hasFnAttr(Attribute::Builtin),
3120 "Attribute 'builtin' can only be applied to a callsite.", &F);
3121
3122 Check(!Attrs.hasAttrSomewhere(Attribute::ElementType),
3123 "Attribute 'elementtype' can only be applied to a callsite.", &F);
3124
3125 if (Attrs.hasFnAttr(Attribute::Naked))
3126 for (const Argument &Arg : F.args())
3127 Check(Arg.use_empty(), "cannot use argument of naked function", &Arg);
3128
3129 // Check that this function meets the restrictions on this calling convention.
3130 // Sometimes varargs is used for perfectly forwarding thunks, so some of these
3131 // restrictions can be lifted.
3132 switch (F.getCallingConv()) {
3133 default:
3134 case CallingConv::C:
3135 break;
3136 case CallingConv::X86_INTR: {
3137 Check(F.arg_empty() || Attrs.hasParamAttr(0, Attribute::ByVal),
3138 "Calling convention parameter requires byval", &F);
3139 break;
3140 }
3141 case CallingConv::AMDGPU_KERNEL:
3142 case CallingConv::SPIR_KERNEL:
3143 case CallingConv::AMDGPU_CS_Chain:
3144 case CallingConv::AMDGPU_CS_ChainPreserve:
3145 Check(F.getReturnType()->isVoidTy(),
3146 "Calling convention requires void return type", &F);
3147 [[fallthrough]];
3148 case CallingConv::AMDGPU_VS:
3149 case CallingConv::AMDGPU_HS:
3150 case CallingConv::AMDGPU_GS:
3151 case CallingConv::AMDGPU_PS:
3152 case CallingConv::AMDGPU_CS:
3153 Check(!F.hasStructRetAttr(), "Calling convention does not allow sret", &F);
3154 if (F.getCallingConv() != CallingConv::SPIR_KERNEL) {
3155 const unsigned StackAS = DL.getAllocaAddrSpace();
3156 unsigned i = 0;
3157 for (const Argument &Arg : F.args()) {
3158 Check(!Attrs.hasParamAttr(i, Attribute::ByVal),
3159 "Calling convention disallows byval", &F);
3160 Check(!Attrs.hasParamAttr(i, Attribute::Preallocated),
3161 "Calling convention disallows preallocated", &F);
3162 Check(!Attrs.hasParamAttr(i, Attribute::InAlloca),
3163 "Calling convention disallows inalloca", &F);
3164
3165 if (Attrs.hasParamAttr(i, Attribute::ByRef)) {
3166 // FIXME: Should also disallow LDS and GDS, but we don't have the enum
3167 // value here.
3168 Check(Arg.getType()->getPointerAddressSpace() != StackAS,
3169 "Calling convention disallows stack byref", &F);
3170 }
3171
3172 ++i;
3173 }
3174 }
3175
3176 [[fallthrough]];
3177 case CallingConv::Fast:
3178 case CallingConv::Cold:
3179 case CallingConv::Intel_OCL_BI:
3180 case CallingConv::PTX_Kernel:
3181 case CallingConv::PTX_Device:
3182 Check(!F.isVarArg(),
3183 "Calling convention does not support varargs or "
3184 "perfect forwarding!",
3185 &F);
3186 break;
3187 case CallingConv::AMDGPU_Gfx_WholeWave:
3188 Check(!F.arg_empty() && F.arg_begin()->getType()->isIntegerTy(1),
3189 "Calling convention requires first argument to be i1", &F);
3190 Check(!F.arg_begin()->hasInRegAttr(),
3191 "Calling convention requires first argument to not be inreg", &F);
3192 Check(!F.isVarArg(),
3193 "Calling convention does not support varargs or "
3194 "perfect forwarding!",
3195 &F);
3196 break;
3197 }
3198
3199 // Check that the argument values match the function type for this function...
3200 unsigned i = 0;
3201 for (const Argument &Arg : F.args()) {
3202 Check(Arg.getType() == FT->getParamType(i),
3203 "Argument value does not match function argument type!", &Arg,
3204 FT->getParamType(i));
3205 Check(Arg.getType()->isFirstClassType(),
3206 "Function arguments must have first-class types!", &Arg);
3207 if (!IsIntrinsic) {
3208 Check(!Arg.getType()->isMetadataTy(),
3209 "Function takes metadata but isn't an intrinsic", &Arg, &F);
3210 Check(!Arg.getType()->isTokenLikeTy(),
3211 "Function takes token but isn't an intrinsic", &Arg, &F);
3212 Check(!Arg.getType()->isX86_AMXTy(),
3213 "Function takes x86_amx but isn't an intrinsic", &Arg, &F);
3214 }
3215
3216 // Check that swifterror argument is only used by loads and stores.
3217 if (Attrs.hasParamAttr(i, Attribute::SwiftError)) {
3218 verifySwiftErrorValue(&Arg);
3219 }
3220 ++i;
3221 }
3222
3223 if (!IsIntrinsic) {
3224 Check(!F.getReturnType()->isTokenLikeTy(),
3225 "Function returns a token but isn't an intrinsic", &F);
3226 Check(!F.getReturnType()->isX86_AMXTy(),
3227 "Function returns a x86_amx but isn't an intrinsic", &F);
3228 }
3229
3230 // Get the function metadata attachments.
3232 F.getAllMetadata(MDs);
3233 assert(F.hasMetadata() != MDs.empty() && "Bit out-of-sync");
3234 verifyFunctionMetadata(MDs);
3235
3236 // Check validity of the personality function
3237 if (F.hasPersonalityFn()) {
3238 auto *Per = dyn_cast<Function>(F.getPersonalityFn()->stripPointerCasts());
3239 if (Per)
3240 Check(Per->getParent() == F.getParent(),
3241 "Referencing personality function in another module!", &F,
3242 F.getParent(), Per, Per->getParent());
3243 }
3244
3245 // EH funclet coloring can be expensive, recompute on-demand
3246 BlockEHFuncletColors.clear();
3247
3248 if (F.isMaterializable()) {
3249 // Function has a body somewhere we can't see.
3250 Check(MDs.empty(), "unmaterialized function cannot have metadata", &F,
3251 MDs.empty() ? nullptr : MDs.front().second);
3252 } else if (F.isDeclaration()) {
3253 for (const auto &I : MDs) {
3254 // This is used for call site debug information.
3255 CheckDI(I.first != LLVMContext::MD_dbg ||
3256 !cast<DISubprogram>(I.second)->isDistinct(),
3257 "function declaration may only have a unique !dbg attachment",
3258 &F);
3259 Check(I.first != LLVMContext::MD_prof,
3260 "function declaration may not have a !prof attachment", &F);
3261
3262 // Verify the metadata itself.
3263 visitMDNode(*I.second, AreDebugLocsAllowed::Yes);
3264 }
3265 Check(!F.hasPersonalityFn(),
3266 "Function declaration shouldn't have a personality routine", &F);
3267 } else {
3268 // Verify that this function (which has a body) is not named "llvm.*". It
3269 // is not legal to define intrinsics.
3270 Check(!IsIntrinsic, "llvm intrinsics cannot be defined!", &F);
3271
3272 // Check the entry node
3273 const BasicBlock *Entry = &F.getEntryBlock();
3274 Check(pred_empty(Entry),
3275 "Entry block to function must not have predecessors!", Entry);
3276
3277 // The address of the entry block cannot be taken, unless it is dead.
3278 if (Entry->hasAddressTaken()) {
3279 Check(!BlockAddress::lookup(Entry)->isConstantUsed(),
3280 "blockaddress may not be used with the entry block!", Entry);
3281 }
3282
3283 unsigned NumDebugAttachments = 0, NumProfAttachments = 0,
3284 NumKCFIAttachments = 0;
3285 // Visit metadata attachments.
3286 for (const auto &I : MDs) {
3287 // Verify that the attachment is legal.
3288 auto AllowLocs = AreDebugLocsAllowed::No;
3289 switch (I.first) {
3290 default:
3291 break;
3292 case LLVMContext::MD_dbg: {
3293 ++NumDebugAttachments;
3294 CheckDI(NumDebugAttachments == 1,
3295 "function must have a single !dbg attachment", &F, I.second);
3296 CheckDI(isa<DISubprogram>(I.second),
3297 "function !dbg attachment must be a subprogram", &F, I.second);
3298 CheckDI(cast<DISubprogram>(I.second)->isDistinct(),
3299 "function definition may only have a distinct !dbg attachment",
3300 &F);
3301
3302 auto *SP = cast<DISubprogram>(I.second);
3303 const Function *&AttachedTo = DISubprogramAttachments[SP];
3304 CheckDI(!AttachedTo || AttachedTo == &F,
3305 "DISubprogram attached to more than one function", SP, &F);
3306 AttachedTo = &F;
3307 AllowLocs = AreDebugLocsAllowed::Yes;
3308 break;
3309 }
3310 case LLVMContext::MD_prof:
3311 ++NumProfAttachments;
3312 Check(NumProfAttachments == 1,
3313 "function must have a single !prof attachment", &F, I.second);
3314 break;
3315 case LLVMContext::MD_kcfi_type:
3316 ++NumKCFIAttachments;
3317 Check(NumKCFIAttachments == 1,
3318 "function must have a single !kcfi_type attachment", &F,
3319 I.second);
3320 break;
3321 }
3322
3323 // Verify the metadata itself.
3324 visitMDNode(*I.second, AllowLocs);
3325 }
3326 }
3327
3328 // If this function is actually an intrinsic, verify that it is only used in
3329 // direct call/invokes, never having its "address taken".
3330 // Only do this if the module is materialized, otherwise we don't have all the
3331 // uses.
3332 if (F.isIntrinsic() && F.getParent()->isMaterialized()) {
3333 const User *U;
3334 if (F.hasAddressTaken(&U, false, true, false,
3335 /*IgnoreARCAttachedCall=*/true))
3336 Check(false, "Invalid user of intrinsic instruction!", U);
3337 }
3338
3339 // Check intrinsics' signatures.
3340 switch (F.getIntrinsicID()) {
3341 case Intrinsic::experimental_gc_get_pointer_base: {
3342 FunctionType *FT = F.getFunctionType();
3343 Check(FT->getNumParams() == 1, "wrong number of parameters", F);
3344 Check(isa<PointerType>(F.getReturnType()),
3345 "gc.get.pointer.base must return a pointer", F);
3346 Check(FT->getParamType(0) == F.getReturnType(),
3347 "gc.get.pointer.base operand and result must be of the same type", F);
3348 break;
3349 }
3350 case Intrinsic::experimental_gc_get_pointer_offset: {
3351 FunctionType *FT = F.getFunctionType();
3352 Check(FT->getNumParams() == 1, "wrong number of parameters", F);
3353 Check(isa<PointerType>(FT->getParamType(0)),
3354 "gc.get.pointer.offset operand must be a pointer", F);
3355 Check(F.getReturnType()->isIntegerTy(),
3356 "gc.get.pointer.offset must return integer", F);
3357 break;
3358 }
3359 }
3360
3361 auto *N = F.getSubprogram();
3362 HasDebugInfo = (N != nullptr);
3363 if (!HasDebugInfo)
3364 return;
3365
3366 // Check that all !dbg attachments lead to back to N.
3367 //
3368 // FIXME: Check this incrementally while visiting !dbg attachments.
3369 // FIXME: Only check when N is the canonical subprogram for F.
3370 SmallPtrSet<const MDNode *, 32> Seen;
3371 auto VisitDebugLoc = [&](const Instruction &I, const MDNode *Node) {
3372 // Be careful about using DILocation here since we might be dealing with
3373 // broken code (this is the Verifier after all).
3374 const DILocation *DL = dyn_cast_or_null<DILocation>(Node);
3375 if (!DL)
3376 return;
3377 if (!Seen.insert(DL).second)
3378 return;
3379
3380 Metadata *Parent = DL->getRawScope();
3381 CheckDI(Parent && isa<DILocalScope>(Parent),
3382 "DILocation's scope must be a DILocalScope", N, &F, &I, DL, Parent);
3383
3384 DILocalScope *Scope = DL->getInlinedAtScope();
3385 Check(Scope, "Failed to find DILocalScope", DL);
3386
3387 if (!Seen.insert(Scope).second)
3388 return;
3389
3390 DISubprogram *SP = Scope->getSubprogram();
3391
3392 // Scope and SP could be the same MDNode and we don't want to skip
3393 // validation in that case
3394 if ((Scope != SP) && !Seen.insert(SP).second)
3395 return;
3396
3397 CheckDI(SP->describes(&F),
3398 "!dbg attachment points at wrong subprogram for function", N, &F,
3399 &I, DL, Scope, SP);
3400 };
3401 for (auto &BB : F)
3402 for (auto &I : BB) {
3403 VisitDebugLoc(I, I.getDebugLoc().getAsMDNode());
3404 // The llvm.loop annotations also contain two DILocations.
3405 if (auto MD = I.getMetadata(LLVMContext::MD_loop))
3406 for (unsigned i = 1; i < MD->getNumOperands(); ++i)
3407 VisitDebugLoc(I, dyn_cast_or_null<MDNode>(MD->getOperand(i)));
3408 if (BrokenDebugInfo)
3409 return;
3410 }
3411}
3412
3413// verifyBasicBlock - Verify that a basic block is well formed...
3414//
3415void Verifier::visitBasicBlock(BasicBlock &BB) {
3416 InstsInThisBlock.clear();
3417 ConvergenceVerifyHelper.visit(BB);
3418
3419 // Ensure that basic blocks have terminators!
3420 Check(BB.getTerminator(), "Basic Block does not have terminator!", &BB);
3421
3422 // Check constraints that this basic block imposes on all of the PHI nodes in
3423 // it.
3424 if (isa<PHINode>(BB.front())) {
3425 SmallVector<BasicBlock *, 8> Preds(predecessors(&BB));
3427 llvm::sort(Preds);
3428 for (const PHINode &PN : BB.phis()) {
3429 Check(PN.getNumIncomingValues() == Preds.size(),
3430 "PHINode should have one entry for each predecessor of its "
3431 "parent basic block!",
3432 &PN);
3433
3434 // Get and sort all incoming values in the PHI node...
3435 Values.clear();
3436 Values.reserve(PN.getNumIncomingValues());
3437 for (unsigned i = 0, e = PN.getNumIncomingValues(); i != e; ++i)
3438 Values.push_back(
3439 std::make_pair(PN.getIncomingBlock(i), PN.getIncomingValue(i)));
3440 llvm::sort(Values);
3441
3442 for (unsigned i = 0, e = Values.size(); i != e; ++i) {
3443 // Check to make sure that if there is more than one entry for a
3444 // particular basic block in this PHI node, that the incoming values are
3445 // all identical.
3446 //
3447 Check(i == 0 || Values[i].first != Values[i - 1].first ||
3448 Values[i].second == Values[i - 1].second,
3449 "PHI node has multiple entries for the same basic block with "
3450 "different incoming values!",
3451 &PN, Values[i].first, Values[i].second, Values[i - 1].second);
3452
3453 // Check to make sure that the predecessors and PHI node entries are
3454 // matched up.
3455 Check(Values[i].first == Preds[i],
3456 "PHI node entries do not match predecessors!", &PN,
3457 Values[i].first, Preds[i]);
3458 }
3459 }
3460 }
3461
3462 // Check that all instructions have their parent pointers set up correctly.
3463 for (auto &I : BB)
3464 {
3465 Check(I.getParent() == &BB, "Instruction has bogus parent pointer!");
3466 }
3467
3468 // Confirm that no issues arise from the debug program.
3469 CheckDI(!BB.getTrailingDbgRecords(), "Basic Block has trailing DbgRecords!",
3470 &BB);
3471}
3472
3473void Verifier::visitTerminator(Instruction &I) {
3474 // Ensure that terminators only exist at the end of the basic block.
3475 Check(&I == I.getParent()->getTerminator(),
3476 "Terminator found in the middle of a basic block!", I.getParent());
3477 visitInstruction(I);
3478}
3479
3480void Verifier::visitCondBrInst(CondBrInst &BI) {
3482 "Branch condition is not 'i1' type!", &BI, BI.getCondition());
3483 visitTerminator(BI);
3484}
3485
3486void Verifier::visitReturnInst(ReturnInst &RI) {
3487 Function *F = RI.getParent()->getParent();
3488 unsigned N = RI.getNumOperands();
3489 if (F->getReturnType()->isVoidTy())
3490 Check(N == 0,
3491 "Found return instr that returns non-void in Function of void "
3492 "return type!",
3493 &RI, F->getReturnType());
3494 else
3495 Check(N == 1 && F->getReturnType() == RI.getOperand(0)->getType(),
3496 "Function return type does not match operand "
3497 "type of return inst!",
3498 &RI, F->getReturnType());
3499
3500 // Check to make sure that the return value has necessary properties for
3501 // terminators...
3502 visitTerminator(RI);
3503}
3504
3505void Verifier::visitSwitchInst(SwitchInst &SI) {
3506 Check(SI.getType()->isVoidTy(), "Switch must have void result type!", &SI);
3507 // Check to make sure that all of the constants in the switch instruction
3508 // have the same type as the switched-on value.
3509 Type *SwitchTy = SI.getCondition()->getType();
3510 SmallPtrSet<ConstantInt*, 32> Constants;
3511 for (auto &Case : SI.cases()) {
3512 Check(isa<ConstantInt>(Case.getCaseValue()),
3513 "Case value is not a constant integer.", &SI);
3514 Check(Case.getCaseValue()->getType() == SwitchTy,
3515 "Switch constants must all be same type as switch value!", &SI);
3516 Check(Constants.insert(Case.getCaseValue()).second,
3517 "Duplicate integer as switch case", &SI, Case.getCaseValue());
3518 }
3519
3520 visitTerminator(SI);
3521}
3522
3523void Verifier::visitIndirectBrInst(IndirectBrInst &BI) {
3525 "Indirectbr operand must have pointer type!", &BI);
3526 for (unsigned i = 0, e = BI.getNumDestinations(); i != e; ++i)
3528 "Indirectbr destinations must all have pointer type!", &BI);
3529
3530 visitTerminator(BI);
3531}
3532
3533void Verifier::visitCallBrInst(CallBrInst &CBI) {
3534 if (!CBI.isInlineAsm()) {
3536 "Callbr: indirect function / invalid signature");
3537 Check(!CBI.hasOperandBundles(),
3538 "Callbr for intrinsics currently doesn't support operand bundles");
3539
3540 switch (CBI.getIntrinsicID()) {
3541 case Intrinsic::amdgcn_kill: {
3542 Check(CBI.getNumIndirectDests() == 1,
3543 "Callbr amdgcn_kill only supports one indirect dest");
3544 bool Unreachable = isa<UnreachableInst>(CBI.getIndirectDest(0)->begin());
3545 CallInst *Call = dyn_cast<CallInst>(CBI.getIndirectDest(0)->begin());
3546 Check(Unreachable || (Call && Call->getIntrinsicID() ==
3547 Intrinsic::amdgcn_unreachable),
3548 "Callbr amdgcn_kill indirect dest needs to be unreachable");
3549 break;
3550 }
3551 default:
3552 CheckFailed(
3553 "Callbr currently only supports asm-goto and selected intrinsics");
3554 }
3555 visitIntrinsicCall(CBI.getIntrinsicID(), CBI);
3556 } else {
3557 const InlineAsm *IA = cast<InlineAsm>(CBI.getCalledOperand());
3558 Check(!IA->canThrow(), "Unwinding from Callbr is not allowed");
3559
3560 verifyInlineAsmCall(CBI);
3561 }
3562 visitTerminator(CBI);
3563}
3564
3565void Verifier::visitSelectInst(SelectInst &SI) {
3566 Check(!SelectInst::areInvalidOperands(SI.getOperand(0), SI.getOperand(1),
3567 SI.getOperand(2)),
3568 "Invalid operands for select instruction!", &SI);
3569
3570 Check(SI.getTrueValue()->getType() == SI.getType(),
3571 "Select values must have same type as select instruction!", &SI);
3572 visitInstruction(SI);
3573}
3574
3575/// visitUserOp1 - User defined operators shouldn't live beyond the lifetime of
3576/// a pass, if any exist, it's an error.
3577///
3578void Verifier::visitUserOp1(Instruction &I) {
3579 Check(false, "User-defined operators should not live outside of a pass!", &I);
3580}
3581
3582void Verifier::visitTruncInst(TruncInst &I) {
3583 // Get the source and destination types
3584 Type *SrcTy = I.getOperand(0)->getType();
3585 Type *DestTy = I.getType();
3586
3587 // Get the size of the types in bits, we'll need this later
3588 unsigned SrcBitSize = SrcTy->getScalarSizeInBits();
3589 unsigned DestBitSize = DestTy->getScalarSizeInBits();
3590
3591 Check(SrcTy->isIntOrIntVectorTy(), "Trunc only operates on integer", &I);
3592 Check(DestTy->isIntOrIntVectorTy(), "Trunc only produces integer", &I);
3593 Check(SrcTy->isVectorTy() == DestTy->isVectorTy(),
3594 "trunc source and destination must both be a vector or neither", &I);
3595 Check(SrcBitSize > DestBitSize, "DestTy too big for Trunc", &I);
3596
3597 visitInstruction(I);
3598}
3599
3600void Verifier::visitZExtInst(ZExtInst &I) {
3601 // Get the source and destination types
3602 Type *SrcTy = I.getOperand(0)->getType();
3603 Type *DestTy = I.getType();
3604
3605 // Get the size of the types in bits, we'll need this later
3606 Check(SrcTy->isIntOrIntVectorTy(), "ZExt only operates on integer", &I);
3607 Check(DestTy->isIntOrIntVectorTy(), "ZExt only produces an integer", &I);
3608 Check(SrcTy->isVectorTy() == DestTy->isVectorTy(),
3609 "zext source and destination must both be a vector or neither", &I);
3610 unsigned SrcBitSize = SrcTy->getScalarSizeInBits();
3611 unsigned DestBitSize = DestTy->getScalarSizeInBits();
3612
3613 Check(SrcBitSize < DestBitSize, "Type too small for ZExt", &I);
3614
3615 visitInstruction(I);
3616}
3617
3618void Verifier::visitSExtInst(SExtInst &I) {
3619 // Get the source and destination types
3620 Type *SrcTy = I.getOperand(0)->getType();
3621 Type *DestTy = I.getType();
3622
3623 // Get the size of the types in bits, we'll need this later
3624 unsigned SrcBitSize = SrcTy->getScalarSizeInBits();
3625 unsigned DestBitSize = DestTy->getScalarSizeInBits();
3626
3627 Check(SrcTy->isIntOrIntVectorTy(), "SExt only operates on integer", &I);
3628 Check(DestTy->isIntOrIntVectorTy(), "SExt only produces an integer", &I);
3629 Check(SrcTy->isVectorTy() == DestTy->isVectorTy(),
3630 "sext source and destination must both be a vector or neither", &I);
3631 Check(SrcBitSize < DestBitSize, "Type too small for SExt", &I);
3632
3633 visitInstruction(I);
3634}
3635
3636void Verifier::visitFPTruncInst(FPTruncInst &I) {
3637 // Get the source and destination types
3638 Type *SrcTy = I.getOperand(0)->getType();
3639 Type *DestTy = I.getType();
3640 // Get the size of the types in bits, we'll need this later
3641 unsigned SrcBitSize = SrcTy->getScalarSizeInBits();
3642 unsigned DestBitSize = DestTy->getScalarSizeInBits();
3643
3644 Check(SrcTy->isFPOrFPVectorTy(), "FPTrunc only operates on FP", &I);
3645 Check(DestTy->isFPOrFPVectorTy(), "FPTrunc only produces an FP", &I);
3646 Check(SrcTy->isVectorTy() == DestTy->isVectorTy(),
3647 "fptrunc source and destination must both be a vector or neither", &I);
3648 Check(SrcBitSize > DestBitSize, "DestTy too big for FPTrunc", &I);
3649
3650 visitInstruction(I);
3651}
3652
3653void Verifier::visitFPExtInst(FPExtInst &I) {
3654 // Get the source and destination types
3655 Type *SrcTy = I.getOperand(0)->getType();
3656 Type *DestTy = I.getType();
3657
3658 // Get the size of the types in bits, we'll need this later
3659 unsigned SrcBitSize = SrcTy->getScalarSizeInBits();
3660 unsigned DestBitSize = DestTy->getScalarSizeInBits();
3661
3662 Check(SrcTy->isFPOrFPVectorTy(), "FPExt only operates on FP", &I);
3663 Check(DestTy->isFPOrFPVectorTy(), "FPExt only produces an FP", &I);
3664 Check(SrcTy->isVectorTy() == DestTy->isVectorTy(),
3665 "fpext source and destination must both be a vector or neither", &I);
3666 Check(SrcBitSize < DestBitSize, "DestTy too small for FPExt", &I);
3667
3668 visitInstruction(I);
3669}
3670
3671void Verifier::visitUIToFPInst(UIToFPInst &I) {
3672 // Get the source and destination types
3673 Type *SrcTy = I.getOperand(0)->getType();
3674 Type *DestTy = I.getType();
3675
3676 bool SrcVec = SrcTy->isVectorTy();
3677 bool DstVec = DestTy->isVectorTy();
3678
3679 Check(SrcVec == DstVec,
3680 "UIToFP source and dest must both be vector or scalar", &I);
3681 Check(SrcTy->isIntOrIntVectorTy(),
3682 "UIToFP source must be integer or integer vector", &I);
3683 Check(DestTy->isFPOrFPVectorTy(), "UIToFP result must be FP or FP vector",
3684 &I);
3685
3686 if (SrcVec && DstVec)
3687 Check(cast<VectorType>(SrcTy)->getElementCount() ==
3688 cast<VectorType>(DestTy)->getElementCount(),
3689 "UIToFP source and dest vector length mismatch", &I);
3690
3691 visitInstruction(I);
3692}
3693
3694void Verifier::visitSIToFPInst(SIToFPInst &I) {
3695 // Get the source and destination types
3696 Type *SrcTy = I.getOperand(0)->getType();
3697 Type *DestTy = I.getType();
3698
3699 bool SrcVec = SrcTy->isVectorTy();
3700 bool DstVec = DestTy->isVectorTy();
3701
3702 Check(SrcVec == DstVec,
3703 "SIToFP source and dest must both be vector or scalar", &I);
3704 Check(SrcTy->isIntOrIntVectorTy(),
3705 "SIToFP source must be integer or integer vector", &I);
3706 Check(DestTy->isFPOrFPVectorTy(), "SIToFP result must be FP or FP vector",
3707 &I);
3708
3709 if (SrcVec && DstVec)
3710 Check(cast<VectorType>(SrcTy)->getElementCount() ==
3711 cast<VectorType>(DestTy)->getElementCount(),
3712 "SIToFP source and dest vector length mismatch", &I);
3713
3714 visitInstruction(I);
3715}
3716
3717void Verifier::visitFPToUIInst(FPToUIInst &I) {
3718 // Get the source and destination types
3719 Type *SrcTy = I.getOperand(0)->getType();
3720 Type *DestTy = I.getType();
3721
3722 bool SrcVec = SrcTy->isVectorTy();
3723 bool DstVec = DestTy->isVectorTy();
3724
3725 Check(SrcVec == DstVec,
3726 "FPToUI source and dest must both be vector or scalar", &I);
3727 Check(SrcTy->isFPOrFPVectorTy(), "FPToUI source must be FP or FP vector", &I);
3728 Check(DestTy->isIntOrIntVectorTy(),
3729 "FPToUI result must be integer or integer vector", &I);
3730
3731 if (SrcVec && DstVec)
3732 Check(cast<VectorType>(SrcTy)->getElementCount() ==
3733 cast<VectorType>(DestTy)->getElementCount(),
3734 "FPToUI source and dest vector length mismatch", &I);
3735
3736 visitInstruction(I);
3737}
3738
3739void Verifier::visitFPToSIInst(FPToSIInst &I) {
3740 // Get the source and destination types
3741 Type *SrcTy = I.getOperand(0)->getType();
3742 Type *DestTy = I.getType();
3743
3744 bool SrcVec = SrcTy->isVectorTy();
3745 bool DstVec = DestTy->isVectorTy();
3746
3747 Check(SrcVec == DstVec,
3748 "FPToSI source and dest must both be vector or scalar", &I);
3749 Check(SrcTy->isFPOrFPVectorTy(), "FPToSI source must be FP or FP vector", &I);
3750 Check(DestTy->isIntOrIntVectorTy(),
3751 "FPToSI result must be integer or integer vector", &I);
3752
3753 if (SrcVec && DstVec)
3754 Check(cast<VectorType>(SrcTy)->getElementCount() ==
3755 cast<VectorType>(DestTy)->getElementCount(),
3756 "FPToSI source and dest vector length mismatch", &I);
3757
3758 visitInstruction(I);
3759}
3760
3761void Verifier::checkPtrToAddr(Type *SrcTy, Type *DestTy, const Value &V) {
3762 Check(SrcTy->isPtrOrPtrVectorTy(), "PtrToAddr source must be pointer", V);
3763 Check(DestTy->isIntOrIntVectorTy(), "PtrToAddr result must be integral", V);
3764 Check(SrcTy->isVectorTy() == DestTy->isVectorTy(), "PtrToAddr type mismatch",
3765 V);
3766
3767 if (SrcTy->isVectorTy()) {
3768 auto *VSrc = cast<VectorType>(SrcTy);
3769 auto *VDest = cast<VectorType>(DestTy);
3770 Check(VSrc->getElementCount() == VDest->getElementCount(),
3771 "PtrToAddr vector length mismatch", V);
3772 }
3773
3774 Type *AddrTy = DL.getAddressType(SrcTy);
3775 Check(AddrTy == DestTy, "PtrToAddr result must be address width", V);
3776}
3777
3778void Verifier::visitPtrToAddrInst(PtrToAddrInst &I) {
3779 checkPtrToAddr(I.getOperand(0)->getType(), I.getType(), I);
3780 visitInstruction(I);
3781}
3782
3783void Verifier::visitPtrToIntInst(PtrToIntInst &I) {
3784 // Get the source and destination types
3785 Type *SrcTy = I.getOperand(0)->getType();
3786 Type *DestTy = I.getType();
3787
3788 Check(SrcTy->isPtrOrPtrVectorTy(), "PtrToInt source must be pointer", &I);
3789
3790 Check(DestTy->isIntOrIntVectorTy(), "PtrToInt result must be integral", &I);
3791 Check(SrcTy->isVectorTy() == DestTy->isVectorTy(), "PtrToInt type mismatch",
3792 &I);
3793
3794 if (SrcTy->isVectorTy()) {
3795 auto *VSrc = cast<VectorType>(SrcTy);
3796 auto *VDest = cast<VectorType>(DestTy);
3797 Check(VSrc->getElementCount() == VDest->getElementCount(),
3798 "PtrToInt Vector length mismatch", &I);
3799 }
3800
3801 visitInstruction(I);
3802}
3803
3804void Verifier::visitIntToPtrInst(IntToPtrInst &I) {
3805 // Get the source and destination types
3806 Type *SrcTy = I.getOperand(0)->getType();
3807 Type *DestTy = I.getType();
3808
3809 Check(SrcTy->isIntOrIntVectorTy(), "IntToPtr source must be an integral", &I);
3810 Check(DestTy->isPtrOrPtrVectorTy(), "IntToPtr result must be a pointer", &I);
3811
3812 Check(SrcTy->isVectorTy() == DestTy->isVectorTy(), "IntToPtr type mismatch",
3813 &I);
3814 if (SrcTy->isVectorTy()) {
3815 auto *VSrc = cast<VectorType>(SrcTy);
3816 auto *VDest = cast<VectorType>(DestTy);
3817 Check(VSrc->getElementCount() == VDest->getElementCount(),
3818 "IntToPtr Vector length mismatch", &I);
3819 }
3820 visitInstruction(I);
3821}
3822
3823void Verifier::visitBitCastInst(BitCastInst &I) {
3824 Check(
3825 CastInst::castIsValid(Instruction::BitCast, I.getOperand(0), I.getType()),
3826 "Invalid bitcast", &I);
3827 visitInstruction(I);
3828}
3829
3830void Verifier::visitAddrSpaceCastInst(AddrSpaceCastInst &I) {
3831 Type *SrcTy = I.getOperand(0)->getType();
3832 Type *DestTy = I.getType();
3833
3834 Check(SrcTy->isPtrOrPtrVectorTy(), "AddrSpaceCast source must be a pointer",
3835 &I);
3836 Check(DestTy->isPtrOrPtrVectorTy(), "AddrSpaceCast result must be a pointer",
3837 &I);
3839 "AddrSpaceCast must be between different address spaces", &I);
3840 if (auto *SrcVTy = dyn_cast<VectorType>(SrcTy))
3841 Check(SrcVTy->getElementCount() ==
3842 cast<VectorType>(DestTy)->getElementCount(),
3843 "AddrSpaceCast vector pointer number of elements mismatch", &I);
3844 visitInstruction(I);
3845}
3846
3847/// visitPHINode - Ensure that a PHI node is well formed.
3848///
3849void Verifier::visitPHINode(PHINode &PN) {
3850 // Ensure that the PHI nodes are all grouped together at the top of the block.
3851 // This can be tested by checking whether the instruction before this is
3852 // either nonexistent (because this is begin()) or is a PHI node. If not,
3853 // then there is some other instruction before a PHI.
3854 Check(&PN == &PN.getParent()->front() ||
3856 "PHI nodes not grouped at top of basic block!", &PN, PN.getParent());
3857
3858 // Check that a PHI doesn't yield a Token.
3859 Check(!PN.getType()->isTokenLikeTy(), "PHI nodes cannot have token type!");
3860
3861 // Check that all of the values of the PHI node have the same type as the
3862 // result.
3863 for (Value *IncValue : PN.incoming_values()) {
3864 Check(PN.getType() == IncValue->getType(),
3865 "PHI node operands are not the same type as the result!", &PN);
3866 }
3867
3868 // All other PHI node constraints are checked in the visitBasicBlock method.
3869
3870 visitInstruction(PN);
3871}
3872
3873void Verifier::visitCallBase(CallBase &Call) {
3875 "Called function must be a pointer!", Call);
3876 FunctionType *FTy = Call.getFunctionType();
3877
3878 // Verify that the correct number of arguments are being passed
3879 if (FTy->isVarArg())
3880 Check(Call.arg_size() >= FTy->getNumParams(),
3881 "Called function requires more parameters than were provided!", Call);
3882 else
3883 Check(Call.arg_size() == FTy->getNumParams(),
3884 "Incorrect number of arguments passed to called function!", Call);
3885
3886 // Verify that all arguments to the call match the function type.
3887 for (unsigned i = 0, e = FTy->getNumParams(); i != e; ++i)
3888 Check(Call.getArgOperand(i)->getType() == FTy->getParamType(i),
3889 "Call parameter type does not match function signature!",
3890 Call.getArgOperand(i), FTy->getParamType(i), Call);
3891
3892 AttributeList Attrs = Call.getAttributes();
3893
3894 Check(verifyAttributeCount(Attrs, Call.arg_size()),
3895 "Attribute after last parameter!", Call);
3896
3897 Function *Callee =
3899 bool IsIntrinsic = Callee && Callee->isIntrinsic();
3900 if (IsIntrinsic)
3901 Check(Callee->getFunctionType() == FTy,
3902 "Intrinsic called with incompatible signature", Call);
3903
3904 // Verify if the calling convention of the callee is callable.
3906 "calling convention does not permit calls", Call);
3907
3908 // Disallow passing/returning values with alignment higher than we can
3909 // represent.
3910 // FIXME: Consider making DataLayout cap the alignment, so this isn't
3911 // necessary.
3912 auto VerifyTypeAlign = [&](Type *Ty, const Twine &Message) {
3913 if (!Ty->isSized())
3914 return;
3915 Align ABIAlign = DL.getABITypeAlign(Ty);
3916 Check(ABIAlign.value() <= Value::MaximumAlignment,
3917 "Incorrect alignment of " + Message + " to called function!", Call);
3918 };
3919
3920 if (!IsIntrinsic) {
3921 VerifyTypeAlign(FTy->getReturnType(), "return type");
3922 for (unsigned i = 0, e = FTy->getNumParams(); i != e; ++i) {
3923 Type *Ty = FTy->getParamType(i);
3924 VerifyTypeAlign(Ty, "argument passed");
3925 }
3926 }
3927
3928 if (Attrs.hasFnAttr(Attribute::Speculatable)) {
3929 // Don't allow speculatable on call sites, unless the underlying function
3930 // declaration is also speculatable.
3931 Check(Callee && Callee->isSpeculatable(),
3932 "speculatable attribute may not apply to call sites", Call);
3933 }
3934
3935 if (Attrs.hasFnAttr(Attribute::Preallocated)) {
3936 Check(Call.getIntrinsicID() == Intrinsic::call_preallocated_arg,
3937 "preallocated as a call site attribute can only be on "
3938 "llvm.call.preallocated.arg");
3939 }
3940
3941 Check(!Attrs.hasFnAttr(Attribute::DenormalFPEnv),
3942 "denormal_fpenv attribute may not apply to call sites", Call);
3943
3944 // Verify call attributes.
3945 verifyFunctionAttrs(FTy, Attrs, &Call, IsIntrinsic, Call.isInlineAsm());
3946
3947 // Conservatively check the inalloca argument.
3948 // We have a bug if we can find that there is an underlying alloca without
3949 // inalloca.
3950 if (Call.hasInAllocaArgument()) {
3951 Value *InAllocaArg = Call.getArgOperand(FTy->getNumParams() - 1);
3952 if (auto AI = dyn_cast<AllocaInst>(InAllocaArg->stripInBoundsOffsets()))
3953 Check(AI->isUsedWithInAlloca(),
3954 "inalloca argument for call has mismatched alloca", AI, Call);
3955 }
3956
3957 // For each argument of the callsite, if it has the swifterror argument,
3958 // make sure the underlying alloca/parameter it comes from has a swifterror as
3959 // well.
3960 for (unsigned i = 0, e = FTy->getNumParams(); i != e; ++i) {
3961 if (Call.paramHasAttr(i, Attribute::SwiftError)) {
3962 Value *SwiftErrorArg = Call.getArgOperand(i);
3963 if (auto AI = dyn_cast<AllocaInst>(SwiftErrorArg->stripInBoundsOffsets())) {
3964 Check(AI->isSwiftError(),
3965 "swifterror argument for call has mismatched alloca", AI, Call);
3966 continue;
3967 }
3968 auto ArgI = dyn_cast<Argument>(SwiftErrorArg);
3969 Check(ArgI, "swifterror argument should come from an alloca or parameter",
3970 SwiftErrorArg, Call);
3971 Check(ArgI->hasSwiftErrorAttr(),
3972 "swifterror argument for call has mismatched parameter", ArgI,
3973 Call);
3974 }
3975
3976 if (Attrs.hasParamAttr(i, Attribute::ImmArg)) {
3977 // Don't allow immarg on call sites, unless the underlying declaration
3978 // also has the matching immarg.
3979 Check(Callee && Callee->hasParamAttribute(i, Attribute::ImmArg),
3980 "immarg may not apply only to call sites", Call.getArgOperand(i),
3981 Call);
3982 }
3983
3984 if (Call.paramHasAttr(i, Attribute::ImmArg)) {
3985 Value *ArgVal = Call.getArgOperand(i);
3986 Check(isa<ConstantInt>(ArgVal) || isa<ConstantFP>(ArgVal),
3987 "immarg operand has non-immediate parameter", ArgVal, Call);
3988
3989 // If the imm-arg is an integer and also has a range attached,
3990 // check if the given value is within the range.
3991 if (Call.paramHasAttr(i, Attribute::Range)) {
3992 if (auto *CI = dyn_cast<ConstantInt>(ArgVal)) {
3993 const ConstantRange &CR =
3994 Call.getParamAttr(i, Attribute::Range).getValueAsConstantRange();
3995 Check(CR.contains(CI->getValue()),
3996 "immarg value " + Twine(CI->getValue().getSExtValue()) +
3997 " out of range [" + Twine(CR.getLower().getSExtValue()) +
3998 ", " + Twine(CR.getUpper().getSExtValue()) + ")",
3999 Call);
4000 }
4001 }
4002 }
4003
4004 if (Call.paramHasAttr(i, Attribute::Preallocated)) {
4005 Value *ArgVal = Call.getArgOperand(i);
4006 bool hasOB =
4008 bool isMustTail = Call.isMustTailCall();
4009 Check(hasOB != isMustTail,
4010 "preallocated operand either requires a preallocated bundle or "
4011 "the call to be musttail (but not both)",
4012 ArgVal, Call);
4013 }
4014 }
4015
4016 if (FTy->isVarArg()) {
4017 // FIXME? is 'nest' even legal here?
4018 bool SawNest = false;
4019 bool SawReturned = false;
4020
4021 for (unsigned Idx = 0; Idx < FTy->getNumParams(); ++Idx) {
4022 if (Attrs.hasParamAttr(Idx, Attribute::Nest))
4023 SawNest = true;
4024 if (Attrs.hasParamAttr(Idx, Attribute::Returned))
4025 SawReturned = true;
4026 }
4027
4028 // Check attributes on the varargs part.
4029 for (unsigned Idx = FTy->getNumParams(); Idx < Call.arg_size(); ++Idx) {
4030 Type *Ty = Call.getArgOperand(Idx)->getType();
4031 AttributeSet ArgAttrs = Attrs.getParamAttrs(Idx);
4032 verifyParameterAttrs(ArgAttrs, Ty, &Call);
4033
4034 if (ArgAttrs.hasAttribute(Attribute::Nest)) {
4035 Check(!SawNest, "More than one parameter has attribute nest!", Call);
4036 SawNest = true;
4037 }
4038
4039 if (ArgAttrs.hasAttribute(Attribute::Returned)) {
4040 Check(!SawReturned, "More than one parameter has attribute returned!",
4041 Call);
4042 Check(Ty->canLosslesslyBitCastTo(FTy->getReturnType()),
4043 "Incompatible argument and return types for 'returned' "
4044 "attribute",
4045 Call);
4046 SawReturned = true;
4047 }
4048
4049 // Statepoint intrinsic is vararg but the wrapped function may be not.
4050 // Allow sret here and check the wrapped function in verifyStatepoint.
4051 if (Call.getIntrinsicID() != Intrinsic::experimental_gc_statepoint)
4052 Check(!ArgAttrs.hasAttribute(Attribute::StructRet),
4053 "Attribute 'sret' cannot be used for vararg call arguments!",
4054 Call);
4055
4056 if (ArgAttrs.hasAttribute(Attribute::InAlloca))
4057 Check(Idx == Call.arg_size() - 1,
4058 "inalloca isn't on the last argument!", Call);
4059 }
4060 }
4061
4062 // Verify that there's no metadata unless it's a direct call to an intrinsic.
4063 if (!IsIntrinsic) {
4064 for (Type *ParamTy : FTy->params()) {
4065 Check(!ParamTy->isMetadataTy(),
4066 "Function has metadata parameter but isn't an intrinsic", Call);
4067 Check(!ParamTy->isTokenLikeTy(),
4068 "Function has token parameter but isn't an intrinsic", Call);
4069 }
4070 }
4071
4072 // Verify that indirect calls don't return tokens.
4073 if (!Call.getCalledFunction()) {
4074 Check(!FTy->getReturnType()->isTokenLikeTy(),
4075 "Return type cannot be token for indirect call!");
4076 Check(!FTy->getReturnType()->isX86_AMXTy(),
4077 "Return type cannot be x86_amx for indirect call!");
4078 }
4079
4081 visitIntrinsicCall(ID, Call);
4082
4083 // Verify that a callsite has at most one "deopt", at most one "funclet", at
4084 // most one "gc-transition", at most one "cfguardtarget", at most one
4085 // "preallocated" operand bundle, and at most one "ptrauth" operand bundle.
4086 bool FoundDeoptBundle = false, FoundFuncletBundle = false,
4087 FoundGCTransitionBundle = false, FoundCFGuardTargetBundle = false,
4088 FoundPreallocatedBundle = false, FoundGCLiveBundle = false,
4089 FoundPtrauthBundle = false, FoundKCFIBundle = false,
4090 FoundAttachedCallBundle = false;
4091 for (unsigned i = 0, e = Call.getNumOperandBundles(); i < e; ++i) {
4092 OperandBundleUse BU = Call.getOperandBundleAt(i);
4093 uint32_t Tag = BU.getTagID();
4094 if (Tag == LLVMContext::OB_deopt) {
4095 Check(!FoundDeoptBundle, "Multiple deopt operand bundles", Call);
4096 FoundDeoptBundle = true;
4097 } else if (Tag == LLVMContext::OB_gc_transition) {
4098 Check(!FoundGCTransitionBundle, "Multiple gc-transition operand bundles",
4099 Call);
4100 FoundGCTransitionBundle = true;
4101 } else if (Tag == LLVMContext::OB_funclet) {
4102 Check(!FoundFuncletBundle, "Multiple funclet operand bundles", Call);
4103 FoundFuncletBundle = true;
4104 Check(BU.Inputs.size() == 1,
4105 "Expected exactly one funclet bundle operand", Call);
4106 Check(isa<FuncletPadInst>(BU.Inputs.front()),
4107 "Funclet bundle operands should correspond to a FuncletPadInst",
4108 Call);
4109 } else if (Tag == LLVMContext::OB_cfguardtarget) {
4110 Check(!FoundCFGuardTargetBundle, "Multiple CFGuardTarget operand bundles",
4111 Call);
4112 FoundCFGuardTargetBundle = true;
4113 Check(BU.Inputs.size() == 1,
4114 "Expected exactly one cfguardtarget bundle operand", Call);
4115 } else if (Tag == LLVMContext::OB_ptrauth) {
4116 Check(!FoundPtrauthBundle, "Multiple ptrauth operand bundles", Call);
4117 FoundPtrauthBundle = true;
4118 Check(BU.Inputs.size() == 2,
4119 "Expected exactly two ptrauth bundle operands", Call);
4120 Check(isa<ConstantInt>(BU.Inputs[0]) &&
4121 BU.Inputs[0]->getType()->isIntegerTy(32),
4122 "Ptrauth bundle key operand must be an i32 constant", Call);
4123 Check(BU.Inputs[1]->getType()->isIntegerTy(64),
4124 "Ptrauth bundle discriminator operand must be an i64", Call);
4125 } else if (Tag == LLVMContext::OB_kcfi) {
4126 Check(!FoundKCFIBundle, "Multiple kcfi operand bundles", Call);
4127 FoundKCFIBundle = true;
4128 Check(BU.Inputs.size() == 1, "Expected exactly one kcfi bundle operand",
4129 Call);
4130 Check(isa<ConstantInt>(BU.Inputs[0]) &&
4131 BU.Inputs[0]->getType()->isIntegerTy(32),
4132 "Kcfi bundle operand must be an i32 constant", Call);
4133 } else if (Tag == LLVMContext::OB_preallocated) {
4134 Check(!FoundPreallocatedBundle, "Multiple preallocated operand bundles",
4135 Call);
4136 FoundPreallocatedBundle = true;
4137 Check(BU.Inputs.size() == 1,
4138 "Expected exactly one preallocated bundle operand", Call);
4139 auto Input = dyn_cast<IntrinsicInst>(BU.Inputs.front());
4140 Check(Input &&
4141 Input->getIntrinsicID() == Intrinsic::call_preallocated_setup,
4142 "\"preallocated\" argument must be a token from "
4143 "llvm.call.preallocated.setup",
4144 Call);
4145 } else if (Tag == LLVMContext::OB_gc_live) {
4146 Check(!FoundGCLiveBundle, "Multiple gc-live operand bundles", Call);
4147 FoundGCLiveBundle = true;
4149 Check(!FoundAttachedCallBundle,
4150 "Multiple \"clang.arc.attachedcall\" operand bundles", Call);
4151 FoundAttachedCallBundle = true;
4152 verifyAttachedCallBundle(Call, BU);
4153 }
4154 }
4155
4156 // Verify that callee and callsite agree on whether to use pointer auth.
4157 Check(!(Call.getCalledFunction() && FoundPtrauthBundle),
4158 "Direct call cannot have a ptrauth bundle", Call);
4159
4160 // Verify that each inlinable callsite of a debug-info-bearing function in a
4161 // debug-info-bearing function has a debug location attached to it. Failure to
4162 // do so causes assertion failures when the inliner sets up inline scope info
4163 // (Interposable functions are not inlinable, neither are functions without
4164 // definitions.)
4170 "inlinable function call in a function with "
4171 "debug info must have a !dbg location",
4172 Call);
4173
4174 if (Call.isInlineAsm())
4175 verifyInlineAsmCall(Call);
4176
4177 ConvergenceVerifyHelper.visit(Call);
4178
4179 visitInstruction(Call);
4180}
4181
4182void Verifier::verifyTailCCMustTailAttrs(const AttrBuilder &Attrs,
4183 StringRef Context) {
4184 Check(!Attrs.contains(Attribute::InAlloca),
4185 Twine("inalloca attribute not allowed in ") + Context);
4186 Check(!Attrs.contains(Attribute::InReg),
4187 Twine("inreg attribute not allowed in ") + Context);
4188 Check(!Attrs.contains(Attribute::SwiftError),
4189 Twine("swifterror attribute not allowed in ") + Context);
4190 Check(!Attrs.contains(Attribute::Preallocated),
4191 Twine("preallocated attribute not allowed in ") + Context);
4192 Check(!Attrs.contains(Attribute::ByRef),
4193 Twine("byref attribute not allowed in ") + Context);
4194}
4195
4196/// Two types are "congruent" if they are identical, or if they are both pointer
4197/// types with different pointee types and the same address space.
4198static bool isTypeCongruent(Type *L, Type *R) {
4199 if (L == R)
4200 return true;
4203 if (!PL || !PR)
4204 return false;
4205 return PL->getAddressSpace() == PR->getAddressSpace();
4206}
4207
4208static AttrBuilder getParameterABIAttributes(LLVMContext& C, unsigned I, AttributeList Attrs) {
4209 static const Attribute::AttrKind ABIAttrs[] = {
4210 Attribute::StructRet, Attribute::ByVal, Attribute::InAlloca,
4211 Attribute::InReg, Attribute::StackAlignment, Attribute::SwiftSelf,
4212 Attribute::SwiftAsync, Attribute::SwiftError, Attribute::Preallocated,
4213 Attribute::ByRef};
4214 AttrBuilder Copy(C);
4215 for (auto AK : ABIAttrs) {
4216 Attribute Attr = Attrs.getParamAttrs(I).getAttribute(AK);
4217 if (Attr.isValid())
4218 Copy.addAttribute(Attr);
4219 }
4220
4221 // `align` is ABI-affecting only in combination with `byval` or `byref`.
4222 if (Attrs.hasParamAttr(I, Attribute::Alignment) &&
4223 (Attrs.hasParamAttr(I, Attribute::ByVal) ||
4224 Attrs.hasParamAttr(I, Attribute::ByRef)))
4225 Copy.addAlignmentAttr(Attrs.getParamAlignment(I));
4226 return Copy;
4227}
4228
4229void Verifier::verifyMustTailCall(CallInst &CI) {
4230 Check(!CI.isInlineAsm(), "cannot use musttail call with inline asm", &CI);
4231
4232 Function *F = CI.getParent()->getParent();
4233 FunctionType *CallerTy = F->getFunctionType();
4234 FunctionType *CalleeTy = CI.getFunctionType();
4235 Check(CallerTy->isVarArg() == CalleeTy->isVarArg(),
4236 "cannot guarantee tail call due to mismatched varargs", &CI);
4237 Check(isTypeCongruent(CallerTy->getReturnType(), CalleeTy->getReturnType()),
4238 "cannot guarantee tail call due to mismatched return types", &CI);
4239
4240 // - The calling conventions of the caller and callee must match.
4241 Check(F->getCallingConv() == CI.getCallingConv(),
4242 "cannot guarantee tail call due to mismatched calling conv", &CI);
4243
4244 // - The call must immediately precede a :ref:`ret <i_ret>` instruction,
4245 // or a pointer bitcast followed by a ret instruction.
4246 // - The ret instruction must return the (possibly bitcasted) value
4247 // produced by the call or void.
4248 Value *RetVal = &CI;
4250
4251 // Handle the optional bitcast.
4252 if (BitCastInst *BI = dyn_cast_or_null<BitCastInst>(Next)) {
4253 Check(BI->getOperand(0) == RetVal,
4254 "bitcast following musttail call must use the call", BI);
4255 RetVal = BI;
4256 Next = BI->getNextNode();
4257 }
4258
4259 // Check the return.
4260 ReturnInst *Ret = dyn_cast_or_null<ReturnInst>(Next);
4261 Check(Ret, "musttail call must precede a ret with an optional bitcast", &CI);
4262 Check(!Ret->getReturnValue() || Ret->getReturnValue() == RetVal ||
4264 "musttail call result must be returned", Ret);
4265
4266 AttributeList CallerAttrs = F->getAttributes();
4267 AttributeList CalleeAttrs = CI.getAttributes();
4268 if (CI.getCallingConv() == CallingConv::SwiftTail ||
4269 CI.getCallingConv() == CallingConv::Tail) {
4270 StringRef CCName =
4271 CI.getCallingConv() == CallingConv::Tail ? "tailcc" : "swifttailcc";
4272
4273 // - Only sret, byval, swiftself, and swiftasync ABI-impacting attributes
4274 // are allowed in swifttailcc call
4275 for (unsigned I = 0, E = CallerTy->getNumParams(); I != E; ++I) {
4276 AttrBuilder ABIAttrs = getParameterABIAttributes(F->getContext(), I, CallerAttrs);
4277 SmallString<32> Context{CCName, StringRef(" musttail caller")};
4278 verifyTailCCMustTailAttrs(ABIAttrs, Context);
4279 }
4280 for (unsigned I = 0, E = CalleeTy->getNumParams(); I != E; ++I) {
4281 AttrBuilder ABIAttrs = getParameterABIAttributes(F->getContext(), I, CalleeAttrs);
4282 SmallString<32> Context{CCName, StringRef(" musttail callee")};
4283 verifyTailCCMustTailAttrs(ABIAttrs, Context);
4284 }
4285 // - Varargs functions are not allowed
4286 Check(!CallerTy->isVarArg(), Twine("cannot guarantee ") + CCName +
4287 " tail call for varargs function");
4288 return;
4289 }
4290
4291 // - The caller and callee prototypes must match. Pointer types of
4292 // parameters or return types may differ in pointee type, but not
4293 // address space.
4294 if (!CI.getIntrinsicID()) {
4295 Check(CallerTy->getNumParams() == CalleeTy->getNumParams(),
4296 "cannot guarantee tail call due to mismatched parameter counts", &CI);
4297 for (unsigned I = 0, E = CallerTy->getNumParams(); I != E; ++I) {
4298 Check(
4299 isTypeCongruent(CallerTy->getParamType(I), CalleeTy->getParamType(I)),
4300 "cannot guarantee tail call due to mismatched parameter types", &CI);
4301 }
4302 }
4303
4304 // - All ABI-impacting function attributes, such as sret, byval, inreg,
4305 // returned, preallocated, and inalloca, must match.
4306 for (unsigned I = 0, E = CallerTy->getNumParams(); I != E; ++I) {
4307 AttrBuilder CallerABIAttrs = getParameterABIAttributes(F->getContext(), I, CallerAttrs);
4308 AttrBuilder CalleeABIAttrs = getParameterABIAttributes(F->getContext(), I, CalleeAttrs);
4309 Check(CallerABIAttrs == CalleeABIAttrs,
4310 "cannot guarantee tail call due to mismatched ABI impacting "
4311 "function attributes",
4312 &CI, CI.getOperand(I));
4313 }
4314}
4315
4316void Verifier::visitCallInst(CallInst &CI) {
4317 visitCallBase(CI);
4318
4319 if (CI.isMustTailCall())
4320 verifyMustTailCall(CI);
4321}
4322
4323void Verifier::visitInvokeInst(InvokeInst &II) {
4324 visitCallBase(II);
4325
4326 // Verify that the first non-PHI instruction of the unwind destination is an
4327 // exception handling instruction.
4328 Check(
4329 II.getUnwindDest()->isEHPad(),
4330 "The unwind destination does not have an exception handling instruction!",
4331 &II);
4332
4333 visitTerminator(II);
4334}
4335
4336/// visitUnaryOperator - Check the argument to the unary operator.
4337///
4338void Verifier::visitUnaryOperator(UnaryOperator &U) {
4339 Check(U.getType() == U.getOperand(0)->getType(),
4340 "Unary operators must have same type for"
4341 "operands and result!",
4342 &U);
4343
4344 switch (U.getOpcode()) {
4345 // Check that floating-point arithmetic operators are only used with
4346 // floating-point operands.
4347 case Instruction::FNeg:
4348 Check(U.getType()->isFPOrFPVectorTy(),
4349 "FNeg operator only works with float types!", &U);
4350 break;
4351 default:
4352 llvm_unreachable("Unknown UnaryOperator opcode!");
4353 }
4354
4355 visitInstruction(U);
4356}
4357
4358/// visitBinaryOperator - Check that both arguments to the binary operator are
4359/// of the same type!
4360///
4361void Verifier::visitBinaryOperator(BinaryOperator &B) {
4362 Check(B.getOperand(0)->getType() == B.getOperand(1)->getType(),
4363 "Both operands to a binary operator are not of the same type!", &B);
4364
4365 switch (B.getOpcode()) {
4366 // Check that integer arithmetic operators are only used with
4367 // integral operands.
4368 case Instruction::Add:
4369 case Instruction::Sub:
4370 case Instruction::Mul:
4371 case Instruction::SDiv:
4372 case Instruction::UDiv:
4373 case Instruction::SRem:
4374 case Instruction::URem:
4375 Check(B.getType()->isIntOrIntVectorTy(),
4376 "Integer arithmetic operators only work with integral types!", &B);
4377 Check(B.getType() == B.getOperand(0)->getType(),
4378 "Integer arithmetic operators must have same type "
4379 "for operands and result!",
4380 &B);
4381 break;
4382 // Check that floating-point arithmetic operators are only used with
4383 // floating-point operands.
4384 case Instruction::FAdd:
4385 case Instruction::FSub:
4386 case Instruction::FMul:
4387 case Instruction::FDiv:
4388 case Instruction::FRem:
4389 Check(B.getType()->isFPOrFPVectorTy(),
4390 "Floating-point arithmetic operators only work with "
4391 "floating-point types!",
4392 &B);
4393 Check(B.getType() == B.getOperand(0)->getType(),
4394 "Floating-point arithmetic operators must have same type "
4395 "for operands and result!",
4396 &B);
4397 break;
4398 // Check that logical operators are only used with integral operands.
4399 case Instruction::And:
4400 case Instruction::Or:
4401 case Instruction::Xor:
4402 Check(B.getType()->isIntOrIntVectorTy(),
4403 "Logical operators only work with integral types!", &B);
4404 Check(B.getType() == B.getOperand(0)->getType(),
4405 "Logical operators must have same type for operands and result!", &B);
4406 break;
4407 case Instruction::Shl:
4408 case Instruction::LShr:
4409 case Instruction::AShr:
4410 Check(B.getType()->isIntOrIntVectorTy(),
4411 "Shifts only work with integral types!", &B);
4412 Check(B.getType() == B.getOperand(0)->getType(),
4413 "Shift return type must be same as operands!", &B);
4414 break;
4415 default:
4416 llvm_unreachable("Unknown BinaryOperator opcode!");
4417 }
4418
4419 visitInstruction(B);
4420}
4421
4422void Verifier::visitICmpInst(ICmpInst &IC) {
4423 // Check that the operands are the same type
4424 Type *Op0Ty = IC.getOperand(0)->getType();
4425 Type *Op1Ty = IC.getOperand(1)->getType();
4426 Check(Op0Ty == Op1Ty,
4427 "Both operands to ICmp instruction are not of the same type!", &IC);
4428 // Check that the operands are the right type
4429 Check(Op0Ty->isIntOrIntVectorTy() || Op0Ty->isPtrOrPtrVectorTy(),
4430 "Invalid operand types for ICmp instruction", &IC);
4431 // Check that the predicate is valid.
4432 Check(IC.isIntPredicate(), "Invalid predicate in ICmp instruction!", &IC);
4433
4434 visitInstruction(IC);
4435}
4436
4437void Verifier::visitFCmpInst(FCmpInst &FC) {
4438 // Check that the operands are the same type
4439 Type *Op0Ty = FC.getOperand(0)->getType();
4440 Type *Op1Ty = FC.getOperand(1)->getType();
4441 Check(Op0Ty == Op1Ty,
4442 "Both operands to FCmp instruction are not of the same type!", &FC);
4443 // Check that the operands are the right type
4444 Check(Op0Ty->isFPOrFPVectorTy(), "Invalid operand types for FCmp instruction",
4445 &FC);
4446 // Check that the predicate is valid.
4447 Check(FC.isFPPredicate(), "Invalid predicate in FCmp instruction!", &FC);
4448
4449 visitInstruction(FC);
4450}
4451
4452void Verifier::visitExtractElementInst(ExtractElementInst &EI) {
4454 "Invalid extractelement operands!", &EI);
4455 visitInstruction(EI);
4456}
4457
4458void Verifier::visitInsertElementInst(InsertElementInst &IE) {
4459 Check(InsertElementInst::isValidOperands(IE.getOperand(0), IE.getOperand(1),
4460 IE.getOperand(2)),
4461 "Invalid insertelement operands!", &IE);
4462 visitInstruction(IE);
4463}
4464
4465void Verifier::visitShuffleVectorInst(ShuffleVectorInst &SV) {
4467 SV.getShuffleMask()),
4468 "Invalid shufflevector operands!", &SV);
4469 visitInstruction(SV);
4470}
4471
4472void Verifier::visitGetElementPtrInst(GetElementPtrInst &GEP) {
4473 Type *TargetTy = GEP.getPointerOperandType()->getScalarType();
4474
4475 Check(isa<PointerType>(TargetTy),
4476 "GEP base pointer is not a vector or a vector of pointers", &GEP);
4477 Check(GEP.getSourceElementType()->isSized(), "GEP into unsized type!", &GEP);
4478
4479 if (auto *STy = dyn_cast<StructType>(GEP.getSourceElementType())) {
4480 Check(!STy->isScalableTy(),
4481 "getelementptr cannot target structure that contains scalable vector"
4482 "type",
4483 &GEP);
4484 }
4485
4486 SmallVector<Value *, 16> Idxs(GEP.indices());
4487 Check(
4488 all_of(Idxs, [](Value *V) { return V->getType()->isIntOrIntVectorTy(); }),
4489 "GEP indexes must be integers", &GEP);
4490 Type *ElTy =
4491 GetElementPtrInst::getIndexedType(GEP.getSourceElementType(), Idxs);
4492 Check(ElTy, "Invalid indices for GEP pointer type!", &GEP);
4493
4494 PointerType *PtrTy = dyn_cast<PointerType>(GEP.getType()->getScalarType());
4495
4496 Check(PtrTy && GEP.getResultElementType() == ElTy,
4497 "GEP is not of right type for indices!", &GEP, ElTy);
4498
4499 if (auto *GEPVTy = dyn_cast<VectorType>(GEP.getType())) {
4500 // Additional checks for vector GEPs.
4501 ElementCount GEPWidth = GEPVTy->getElementCount();
4502 if (GEP.getPointerOperandType()->isVectorTy())
4503 Check(
4504 GEPWidth ==
4505 cast<VectorType>(GEP.getPointerOperandType())->getElementCount(),
4506 "Vector GEP result width doesn't match operand's", &GEP);
4507 for (Value *Idx : Idxs) {
4508 Type *IndexTy = Idx->getType();
4509 if (auto *IndexVTy = dyn_cast<VectorType>(IndexTy)) {
4510 ElementCount IndexWidth = IndexVTy->getElementCount();
4511 Check(IndexWidth == GEPWidth, "Invalid GEP index vector width", &GEP);
4512 }
4513 Check(IndexTy->isIntOrIntVectorTy(),
4514 "All GEP indices should be of integer type");
4515 }
4516 }
4517
4518 // Check that GEP does not index into a vector with non-byte-addressable
4519 // elements.
4521 GTI != GTE; ++GTI) {
4522 if (GTI.isVector()) {
4523 Type *ElemTy = GTI.getIndexedType();
4524 Check(DL.typeSizeEqualsStoreSize(ElemTy),
4525 "GEP into vector with non-byte-addressable element type", &GEP);
4526 }
4527 }
4528
4529 Check(GEP.getAddressSpace() == PtrTy->getAddressSpace(),
4530 "GEP address space doesn't match type", &GEP);
4531
4532 visitInstruction(GEP);
4533}
4534
4535static bool isContiguous(const ConstantRange &A, const ConstantRange &B) {
4536 return A.getUpper() == B.getLower() || A.getLower() == B.getUpper();
4537}
4538
4539/// Verify !range and !absolute_symbol metadata. These have the same
4540/// restrictions, except !absolute_symbol allows the full set.
4541void Verifier::verifyRangeLikeMetadata(const Value &I, const MDNode *Range,
4542 Type *Ty, RangeLikeMetadataKind Kind) {
4543 unsigned NumOperands = Range->getNumOperands();
4544 Check(NumOperands % 2 == 0, "Unfinished range!", Range);
4545 unsigned NumRanges = NumOperands / 2;
4546 Check(NumRanges >= 1, "It should have at least one range!", Range);
4547
4548 ConstantRange LastRange(1, true); // Dummy initial value
4549 for (unsigned i = 0; i < NumRanges; ++i) {
4550 ConstantInt *Low =
4551 mdconst::dyn_extract<ConstantInt>(Range->getOperand(2 * i));
4552 Check(Low, "The lower limit must be an integer!", Low);
4553 ConstantInt *High =
4554 mdconst::dyn_extract<ConstantInt>(Range->getOperand(2 * i + 1));
4555 Check(High, "The upper limit must be an integer!", High);
4556
4557 Check(High->getType() == Low->getType(), "Range pair types must match!",
4558 &I);
4559
4560 if (Kind == RangeLikeMetadataKind::NoaliasAddrspace) {
4561 Check(High->getType()->isIntegerTy(32),
4562 "noalias.addrspace type must be i32!", &I);
4563 } else {
4564 Check(High->getType() == Ty->getScalarType(),
4565 "Range types must match instruction type!", &I);
4566 }
4567
4568 APInt HighV = High->getValue();
4569 APInt LowV = Low->getValue();
4570
4571 // ConstantRange asserts if the ranges are the same except for the min/max
4572 // value. Leave the cases it tolerates for the empty range error below.
4573 Check(LowV != HighV || LowV.isMaxValue() || LowV.isMinValue(),
4574 "The upper and lower limits cannot be the same value", &I);
4575
4576 ConstantRange CurRange(LowV, HighV);
4577 Check(!CurRange.isEmptySet() &&
4578 (Kind == RangeLikeMetadataKind::AbsoluteSymbol ||
4579 !CurRange.isFullSet()),
4580 "Range must not be empty!", Range);
4581 if (i != 0) {
4582 Check(CurRange.intersectWith(LastRange).isEmptySet(),
4583 "Intervals are overlapping", Range);
4584 Check(LowV.sgt(LastRange.getLower()), "Intervals are not in order",
4585 Range);
4586 Check(!isContiguous(CurRange, LastRange), "Intervals are contiguous",
4587 Range);
4588 }
4589 LastRange = ConstantRange(LowV, HighV);
4590 }
4591 if (NumRanges > 2) {
4592 APInt FirstLow =
4593 mdconst::dyn_extract<ConstantInt>(Range->getOperand(0))->getValue();
4594 APInt FirstHigh =
4595 mdconst::dyn_extract<ConstantInt>(Range->getOperand(1))->getValue();
4596 ConstantRange FirstRange(FirstLow, FirstHigh);
4597 Check(FirstRange.intersectWith(LastRange).isEmptySet(),
4598 "Intervals are overlapping", Range);
4599 Check(!isContiguous(FirstRange, LastRange), "Intervals are contiguous",
4600 Range);
4601 }
4602}
4603
4604void Verifier::visitRangeMetadata(Instruction &I, MDNode *Range, Type *Ty) {
4605 assert(Range && Range == I.getMetadata(LLVMContext::MD_range) &&
4606 "precondition violation");
4607 verifyRangeLikeMetadata(I, Range, Ty, RangeLikeMetadataKind::Range);
4608}
4609
4610void Verifier::visitNoFPClassMetadata(Instruction &I, MDNode *NoFPClass,
4611 Type *Ty) {
4612 Check(AttributeFuncs::isNoFPClassCompatibleType(Ty),
4613 "nofpclass only applies to floating-point typed loads", I);
4614
4615 Check(NoFPClass->getNumOperands() == 1,
4616 "nofpclass must have exactly one entry", NoFPClass);
4617 ConstantInt *MaskVal =
4619 Check(MaskVal && MaskVal->getType()->isIntegerTy(32),
4620 "nofpclass entry must be a constant i32", NoFPClass);
4621 uint32_t Val = MaskVal->getZExtValue();
4622 Check(Val != 0, "'nofpclass' must have at least one test bit set", NoFPClass,
4623 I);
4624
4625 Check((Val & ~static_cast<unsigned>(fcAllFlags)) == 0,
4626 "Invalid value for 'nofpclass' test mask", NoFPClass, I);
4627}
4628
4629void Verifier::visitNoaliasAddrspaceMetadata(Instruction &I, MDNode *Range,
4630 Type *Ty) {
4631 assert(Range && Range == I.getMetadata(LLVMContext::MD_noalias_addrspace) &&
4632 "precondition violation");
4633 verifyRangeLikeMetadata(I, Range, Ty,
4634 RangeLikeMetadataKind::NoaliasAddrspace);
4635}
4636
4637void Verifier::checkAtomicMemAccessSize(Type *Ty, const Instruction *I) {
4638 unsigned Size = DL.getTypeSizeInBits(Ty).getFixedValue();
4639 Check(Size >= 8, "atomic memory access' size must be byte-sized", Ty, I);
4640 Check(!(Size & (Size - 1)),
4641 "atomic memory access' operand must have a power-of-two size", Ty, I);
4642}
4643
4644void Verifier::visitLoadInst(LoadInst &LI) {
4646 Check(PTy, "Load operand must be a pointer.", &LI);
4647 Type *ElTy = LI.getType();
4648 if (MaybeAlign A = LI.getAlign()) {
4649 Check(A->value() <= Value::MaximumAlignment,
4650 "huge alignment values are unsupported", &LI);
4651 }
4652 Check(ElTy->isSized(), "loading unsized types is not allowed", &LI);
4653 if (LI.isAtomic()) {
4654 Check(LI.getOrdering() != AtomicOrdering::Release &&
4655 LI.getOrdering() != AtomicOrdering::AcquireRelease,
4656 "Load cannot have Release ordering", &LI);
4657 Check(ElTy->getScalarType()->isIntOrPtrTy() ||
4658 ElTy->getScalarType()->isByteTy() ||
4660 "atomic load operand must have integer, byte, pointer, floating "
4661 "point, or vector type!",
4662 ElTy, &LI);
4663
4664 checkAtomicMemAccessSize(ElTy, &LI);
4665 } else {
4667 "Non-atomic load cannot have SynchronizationScope specified", &LI);
4668 }
4669
4670 visitInstruction(LI);
4671}
4672
4673void Verifier::visitStoreInst(StoreInst &SI) {
4674 PointerType *PTy = dyn_cast<PointerType>(SI.getOperand(1)->getType());
4675 Check(PTy, "Store operand must be a pointer.", &SI);
4676 Type *ElTy = SI.getOperand(0)->getType();
4677 if (MaybeAlign A = SI.getAlign()) {
4678 Check(A->value() <= Value::MaximumAlignment,
4679 "huge alignment values are unsupported", &SI);
4680 }
4681 Check(ElTy->isSized(), "storing unsized types is not allowed", &SI);
4682 if (SI.isAtomic()) {
4683 Check(SI.getOrdering() != AtomicOrdering::Acquire &&
4684 SI.getOrdering() != AtomicOrdering::AcquireRelease,
4685 "Store cannot have Acquire ordering", &SI);
4686 Check(ElTy->getScalarType()->isIntOrPtrTy() ||
4687 ElTy->getScalarType()->isByteTy() ||
4689 "atomic store operand must have integer, byte, pointer, floating "
4690 "point, or vector type!",
4691 ElTy, &SI);
4692 checkAtomicMemAccessSize(ElTy, &SI);
4693 } else {
4694 Check(SI.getSyncScopeID() == SyncScope::System,
4695 "Non-atomic store cannot have SynchronizationScope specified", &SI);
4696 }
4697 visitInstruction(SI);
4698}
4699
4700/// Check that SwiftErrorVal is used as a swifterror argument in CS.
4701void Verifier::verifySwiftErrorCall(CallBase &Call,
4702 const Value *SwiftErrorVal) {
4703 for (const auto &I : llvm::enumerate(Call.args())) {
4704 if (I.value() == SwiftErrorVal) {
4705 Check(Call.paramHasAttr(I.index(), Attribute::SwiftError),
4706 "swifterror value when used in a callsite should be marked "
4707 "with swifterror attribute",
4708 SwiftErrorVal, Call);
4709 }
4710 }
4711}
4712
4713void Verifier::verifySwiftErrorValue(const Value *SwiftErrorVal) {
4714 // Check that swifterror value is only used by loads, stores, or as
4715 // a swifterror argument.
4716 for (const User *U : SwiftErrorVal->users()) {
4718 isa<InvokeInst>(U),
4719 "swifterror value can only be loaded and stored from, or "
4720 "as a swifterror argument!",
4721 SwiftErrorVal, U);
4722 // If it is used by a store, check it is the second operand.
4723 if (auto StoreI = dyn_cast<StoreInst>(U))
4724 Check(StoreI->getOperand(1) == SwiftErrorVal,
4725 "swifterror value should be the second operand when used "
4726 "by stores",
4727 SwiftErrorVal, U);
4728 if (auto *Call = dyn_cast<CallBase>(U))
4729 verifySwiftErrorCall(*const_cast<CallBase *>(Call), SwiftErrorVal);
4730 }
4731}
4732
4733void Verifier::visitAllocaInst(AllocaInst &AI) {
4734 Type *Ty = AI.getAllocatedType();
4735 SmallPtrSet<Type*, 4> Visited;
4736 Check(Ty->isSized(&Visited), "Cannot allocate unsized type", &AI);
4737 // Check if it's a target extension type that disallows being used on the
4738 // stack.
4740 "Alloca has illegal target extension type", &AI);
4742 "Alloca array size must have integer type", &AI);
4743 if (MaybeAlign A = AI.getAlign()) {
4744 Check(A->value() <= Value::MaximumAlignment,
4745 "huge alignment values are unsupported", &AI);
4746 }
4747
4748 if (AI.isSwiftError()) {
4749 Check(Ty->isPointerTy(), "swifterror alloca must have pointer type", &AI);
4751 "swifterror alloca must not be array allocation", &AI);
4752 verifySwiftErrorValue(&AI);
4753 }
4754
4755 if (TT.isAMDGPU()) {
4757 "alloca on amdgpu must be in addrspace(5)", &AI);
4758 }
4759
4760 visitInstruction(AI);
4761}
4762
4763void Verifier::visitAtomicCmpXchgInst(AtomicCmpXchgInst &CXI) {
4764 Type *ElTy = CXI.getOperand(1)->getType();
4765 Check(ElTy->isIntOrPtrTy(),
4766 "cmpxchg operand must have integer or pointer type", ElTy, &CXI);
4767 checkAtomicMemAccessSize(ElTy, &CXI);
4768 visitInstruction(CXI);
4769}
4770
4771void Verifier::visitAtomicRMWInst(AtomicRMWInst &RMWI) {
4772 Check(RMWI.getOrdering() != AtomicOrdering::Unordered,
4773 "atomicrmw instructions cannot be unordered.", &RMWI);
4774 auto Op = RMWI.getOperation();
4775 Type *ElTy = RMWI.getOperand(1)->getType();
4776 if (Op == AtomicRMWInst::Xchg) {
4777 Check(ElTy->isIntegerTy() || ElTy->isFloatingPointTy() ||
4778 ElTy->isPointerTy(),
4779 "atomicrmw " + AtomicRMWInst::getOperationName(Op) +
4780 " operand must have integer or floating point type!",
4781 &RMWI, ElTy);
4782 } else if (AtomicRMWInst::isFPOperation(Op)) {
4784 "atomicrmw " + AtomicRMWInst::getOperationName(Op) +
4785 " operand must have floating-point or fixed vector of floating-point "
4786 "type!",
4787 &RMWI, ElTy);
4788 } else {
4789 Check(ElTy->isIntegerTy(),
4790 "atomicrmw " + AtomicRMWInst::getOperationName(Op) +
4791 " operand must have integer type!",
4792 &RMWI, ElTy);
4793 }
4794 checkAtomicMemAccessSize(ElTy, &RMWI);
4796 "Invalid binary operation!", &RMWI);
4797 visitInstruction(RMWI);
4798}
4799
4800void Verifier::visitFenceInst(FenceInst &FI) {
4801 const AtomicOrdering Ordering = FI.getOrdering();
4802 Check(Ordering == AtomicOrdering::Acquire ||
4803 Ordering == AtomicOrdering::Release ||
4804 Ordering == AtomicOrdering::AcquireRelease ||
4805 Ordering == AtomicOrdering::SequentiallyConsistent,
4806 "fence instructions may only have acquire, release, acq_rel, or "
4807 "seq_cst ordering.",
4808 &FI);
4809 visitInstruction(FI);
4810}
4811
4812void Verifier::visitExtractValueInst(ExtractValueInst &EVI) {
4814 EVI.getIndices()) == EVI.getType(),
4815 "Invalid ExtractValueInst operands!", &EVI);
4816
4817 visitInstruction(EVI);
4818}
4819
4820void Verifier::visitInsertValueInst(InsertValueInst &IVI) {
4822 IVI.getIndices()) ==
4823 IVI.getOperand(1)->getType(),
4824 "Invalid InsertValueInst operands!", &IVI);
4825
4826 visitInstruction(IVI);
4827}
4828
4829static Value *getParentPad(Value *EHPad) {
4830 if (auto *FPI = dyn_cast<FuncletPadInst>(EHPad))
4831 return FPI->getParentPad();
4832
4833 return cast<CatchSwitchInst>(EHPad)->getParentPad();
4834}
4835
4836void Verifier::visitEHPadPredecessors(Instruction &I) {
4837 assert(I.isEHPad());
4838
4839 BasicBlock *BB = I.getParent();
4840 Function *F = BB->getParent();
4841
4842 Check(BB != &F->getEntryBlock(), "EH pad cannot be in entry block.", &I);
4843
4844 if (auto *LPI = dyn_cast<LandingPadInst>(&I)) {
4845 // The landingpad instruction defines its parent as a landing pad block. The
4846 // landing pad block may be branched to only by the unwind edge of an
4847 // invoke.
4848 for (BasicBlock *PredBB : predecessors(BB)) {
4849 const auto *II = dyn_cast<InvokeInst>(PredBB->getTerminator());
4850 Check(II && II->getUnwindDest() == BB && II->getNormalDest() != BB,
4851 "Block containing LandingPadInst must be jumped to "
4852 "only by the unwind edge of an invoke.",
4853 LPI);
4854 }
4855 return;
4856 }
4857 if (auto *CPI = dyn_cast<CatchPadInst>(&I)) {
4858 if (!pred_empty(BB))
4859 Check(BB->getUniquePredecessor() == CPI->getCatchSwitch()->getParent(),
4860 "Block containg CatchPadInst must be jumped to "
4861 "only by its catchswitch.",
4862 CPI);
4863 Check(BB != CPI->getCatchSwitch()->getUnwindDest(),
4864 "Catchswitch cannot unwind to one of its catchpads",
4865 CPI->getCatchSwitch(), CPI);
4866 return;
4867 }
4868
4869 // Verify that each pred has a legal terminator with a legal to/from EH
4870 // pad relationship.
4871 Instruction *ToPad = &I;
4872 Value *ToPadParent = getParentPad(ToPad);
4873 for (BasicBlock *PredBB : predecessors(BB)) {
4874 Instruction *TI = PredBB->getTerminator();
4875 Value *FromPad;
4876 if (auto *II = dyn_cast<InvokeInst>(TI)) {
4877 Check(II->getUnwindDest() == BB && II->getNormalDest() != BB,
4878 "EH pad must be jumped to via an unwind edge", ToPad, II);
4879 auto *CalledFn =
4880 dyn_cast<Function>(II->getCalledOperand()->stripPointerCasts());
4881 if (CalledFn && CalledFn->isIntrinsic() && II->doesNotThrow() &&
4882 !IntrinsicInst::mayLowerToFunctionCall(CalledFn->getIntrinsicID()))
4883 continue;
4884 if (auto Bundle = II->getOperandBundle(LLVMContext::OB_funclet))
4885 FromPad = Bundle->Inputs[0];
4886 else
4887 FromPad = ConstantTokenNone::get(II->getContext());
4888 } else if (auto *CRI = dyn_cast<CleanupReturnInst>(TI)) {
4889 FromPad = CRI->getOperand(0);
4890 Check(FromPad != ToPadParent, "A cleanupret must exit its cleanup", CRI);
4891 } else if (auto *CSI = dyn_cast<CatchSwitchInst>(TI)) {
4892 FromPad = CSI;
4893 } else {
4894 Check(false, "EH pad must be jumped to via an unwind edge", ToPad, TI);
4895 }
4896
4897 // The edge may exit from zero or more nested pads.
4898 SmallPtrSet<Value *, 8> Seen;
4899 for (;; FromPad = getParentPad(FromPad)) {
4900 Check(FromPad != ToPad,
4901 "EH pad cannot handle exceptions raised within it", FromPad, TI);
4902 if (FromPad == ToPadParent) {
4903 // This is a legal unwind edge.
4904 break;
4905 }
4906 Check(!isa<ConstantTokenNone>(FromPad),
4907 "A single unwind edge may only enter one EH pad", TI);
4908 Check(Seen.insert(FromPad).second, "EH pad jumps through a cycle of pads",
4909 FromPad);
4910
4911 // This will be diagnosed on the corresponding instruction already. We
4912 // need the extra check here to make sure getParentPad() works.
4913 Check(isa<FuncletPadInst>(FromPad) || isa<CatchSwitchInst>(FromPad),
4914 "Parent pad must be catchpad/cleanuppad/catchswitch", TI);
4915 }
4916 }
4917}
4918
4919void Verifier::visitLandingPadInst(LandingPadInst &LPI) {
4920 // The landingpad instruction is ill-formed if it doesn't have any clauses and
4921 // isn't a cleanup.
4922 Check(LPI.getNumClauses() > 0 || LPI.isCleanup(),
4923 "LandingPadInst needs at least one clause or to be a cleanup.", &LPI);
4924
4925 visitEHPadPredecessors(LPI);
4926
4927 if (!LandingPadResultTy)
4928 LandingPadResultTy = LPI.getType();
4929 else
4930 Check(LandingPadResultTy == LPI.getType(),
4931 "The landingpad instruction should have a consistent result type "
4932 "inside a function.",
4933 &LPI);
4934
4935 Function *F = LPI.getParent()->getParent();
4936 Check(F->hasPersonalityFn(),
4937 "LandingPadInst needs to be in a function with a personality.", &LPI);
4938
4939 // The landingpad instruction must be the first non-PHI instruction in the
4940 // block.
4941 Check(LPI.getParent()->getLandingPadInst() == &LPI,
4942 "LandingPadInst not the first non-PHI instruction in the block.", &LPI);
4943
4944 for (unsigned i = 0, e = LPI.getNumClauses(); i < e; ++i) {
4945 Constant *Clause = LPI.getClause(i);
4946 if (LPI.isCatch(i)) {
4947 Check(isa<PointerType>(Clause->getType()),
4948 "Catch operand does not have pointer type!", &LPI);
4949 } else {
4950 Check(LPI.isFilter(i), "Clause is neither catch nor filter!", &LPI);
4952 "Filter operand is not an array of constants!", &LPI);
4953 }
4954 }
4955
4956 visitInstruction(LPI);
4957}
4958
4959void Verifier::visitResumeInst(ResumeInst &RI) {
4961 "ResumeInst needs to be in a function with a personality.", &RI);
4962
4963 if (!LandingPadResultTy)
4964 LandingPadResultTy = RI.getValue()->getType();
4965 else
4966 Check(LandingPadResultTy == RI.getValue()->getType(),
4967 "The resume instruction should have a consistent result type "
4968 "inside a function.",
4969 &RI);
4970
4971 visitTerminator(RI);
4972}
4973
4974void Verifier::visitCatchPadInst(CatchPadInst &CPI) {
4975 BasicBlock *BB = CPI.getParent();
4976
4977 Function *F = BB->getParent();
4978 Check(F->hasPersonalityFn(),
4979 "CatchPadInst needs to be in a function with a personality.", &CPI);
4980
4982 "CatchPadInst needs to be directly nested in a CatchSwitchInst.",
4983 CPI.getParentPad());
4984
4985 // The catchpad instruction must be the first non-PHI instruction in the
4986 // block.
4987 Check(&*BB->getFirstNonPHIIt() == &CPI,
4988 "CatchPadInst not the first non-PHI instruction in the block.", &CPI);
4989
4991 [](Use &U) {
4992 auto *V = U.get();
4993 return isa<Constant>(V) || isa<AllocaInst>(V);
4994 }),
4995 "Argument operand must be alloca or constant.", &CPI);
4996
4997 visitEHPadPredecessors(CPI);
4998 visitFuncletPadInst(CPI);
4999}
5000
5001void Verifier::visitCatchReturnInst(CatchReturnInst &CatchReturn) {
5002 Check(isa<CatchPadInst>(CatchReturn.getOperand(0)),
5003 "CatchReturnInst needs to be provided a CatchPad", &CatchReturn,
5004 CatchReturn.getOperand(0));
5005
5006 visitTerminator(CatchReturn);
5007}
5008
5009void Verifier::visitCleanupPadInst(CleanupPadInst &CPI) {
5010 BasicBlock *BB = CPI.getParent();
5011
5012 Function *F = BB->getParent();
5013 Check(F->hasPersonalityFn(),
5014 "CleanupPadInst needs to be in a function with a personality.", &CPI);
5015
5016 // The cleanuppad instruction must be the first non-PHI instruction in the
5017 // block.
5018 Check(&*BB->getFirstNonPHIIt() == &CPI,
5019 "CleanupPadInst not the first non-PHI instruction in the block.", &CPI);
5020
5021 auto *ParentPad = CPI.getParentPad();
5022 Check(isa<ConstantTokenNone>(ParentPad) || isa<FuncletPadInst>(ParentPad),
5023 "CleanupPadInst has an invalid parent.", &CPI);
5024
5025 visitEHPadPredecessors(CPI);
5026 visitFuncletPadInst(CPI);
5027}
5028
5029void Verifier::visitFuncletPadInst(FuncletPadInst &FPI) {
5030 User *FirstUser = nullptr;
5031 Value *FirstUnwindPad = nullptr;
5032 SmallVector<FuncletPadInst *, 8> Worklist({&FPI});
5033 SmallPtrSet<FuncletPadInst *, 8> Seen;
5034
5035 while (!Worklist.empty()) {
5036 FuncletPadInst *CurrentPad = Worklist.pop_back_val();
5037 Check(Seen.insert(CurrentPad).second,
5038 "FuncletPadInst must not be nested within itself", CurrentPad);
5039 Value *UnresolvedAncestorPad = nullptr;
5040 for (User *U : CurrentPad->users()) {
5041 BasicBlock *UnwindDest;
5042 if (auto *CRI = dyn_cast<CleanupReturnInst>(U)) {
5043 UnwindDest = CRI->getUnwindDest();
5044 } else if (auto *CSI = dyn_cast<CatchSwitchInst>(U)) {
5045 // We allow catchswitch unwind to caller to nest
5046 // within an outer pad that unwinds somewhere else,
5047 // because catchswitch doesn't have a nounwind variant.
5048 // See e.g. SimplifyCFGOpt::SimplifyUnreachable.
5049 if (CSI->unwindsToCaller())
5050 continue;
5051 UnwindDest = CSI->getUnwindDest();
5052 } else if (auto *II = dyn_cast<InvokeInst>(U)) {
5053 UnwindDest = II->getUnwindDest();
5054 } else if (isa<CallInst>(U)) {
5055 // Calls which don't unwind may be found inside funclet
5056 // pads that unwind somewhere else. We don't *require*
5057 // such calls to be annotated nounwind.
5058 continue;
5059 } else if (auto *CPI = dyn_cast<CleanupPadInst>(U)) {
5060 // The unwind dest for a cleanup can only be found by
5061 // recursive search. Add it to the worklist, and we'll
5062 // search for its first use that determines where it unwinds.
5063 Worklist.push_back(CPI);
5064 continue;
5065 } else {
5066 Check(isa<CatchReturnInst>(U), "Bogus funclet pad use", U);
5067 continue;
5068 }
5069
5070 Value *UnwindPad;
5071 bool ExitsFPI;
5072 if (UnwindDest) {
5073 UnwindPad = &*UnwindDest->getFirstNonPHIIt();
5074 if (!cast<Instruction>(UnwindPad)->isEHPad())
5075 continue;
5076 Value *UnwindParent = getParentPad(UnwindPad);
5077 // Ignore unwind edges that don't exit CurrentPad.
5078 if (UnwindParent == CurrentPad)
5079 continue;
5080 // Determine whether the original funclet pad is exited,
5081 // and if we are scanning nested pads determine how many
5082 // of them are exited so we can stop searching their
5083 // children.
5084 Value *ExitedPad = CurrentPad;
5085 ExitsFPI = false;
5086 do {
5087 if (ExitedPad == &FPI) {
5088 ExitsFPI = true;
5089 // Now we can resolve any ancestors of CurrentPad up to
5090 // FPI, but not including FPI since we need to make sure
5091 // to check all direct users of FPI for consistency.
5092 UnresolvedAncestorPad = &FPI;
5093 break;
5094 }
5095 Value *ExitedParent = getParentPad(ExitedPad);
5096 if (ExitedParent == UnwindParent) {
5097 // ExitedPad is the ancestor-most pad which this unwind
5098 // edge exits, so we can resolve up to it, meaning that
5099 // ExitedParent is the first ancestor still unresolved.
5100 UnresolvedAncestorPad = ExitedParent;
5101 break;
5102 }
5103 ExitedPad = ExitedParent;
5104 } while (!isa<ConstantTokenNone>(ExitedPad));
5105 } else {
5106 // Unwinding to caller exits all pads.
5107 UnwindPad = ConstantTokenNone::get(FPI.getContext());
5108 ExitsFPI = true;
5109 UnresolvedAncestorPad = &FPI;
5110 }
5111
5112 if (ExitsFPI) {
5113 // This unwind edge exits FPI. Make sure it agrees with other
5114 // such edges.
5115 if (FirstUser) {
5116 Check(UnwindPad == FirstUnwindPad,
5117 "Unwind edges out of a funclet "
5118 "pad must have the same unwind "
5119 "dest",
5120 &FPI, U, FirstUser);
5121 } else {
5122 FirstUser = U;
5123 FirstUnwindPad = UnwindPad;
5124 // Record cleanup sibling unwinds for verifySiblingFuncletUnwinds
5125 if (isa<CleanupPadInst>(&FPI) && !isa<ConstantTokenNone>(UnwindPad) &&
5126 getParentPad(UnwindPad) == getParentPad(&FPI))
5127 SiblingFuncletInfo[&FPI] = cast<Instruction>(U);
5128 }
5129 }
5130 // Make sure we visit all uses of FPI, but for nested pads stop as
5131 // soon as we know where they unwind to.
5132 if (CurrentPad != &FPI)
5133 break;
5134 }
5135 if (UnresolvedAncestorPad) {
5136 if (CurrentPad == UnresolvedAncestorPad) {
5137 // When CurrentPad is FPI itself, we don't mark it as resolved even if
5138 // we've found an unwind edge that exits it, because we need to verify
5139 // all direct uses of FPI.
5140 assert(CurrentPad == &FPI);
5141 continue;
5142 }
5143 // Pop off the worklist any nested pads that we've found an unwind
5144 // destination for. The pads on the worklist are the uncles,
5145 // great-uncles, etc. of CurrentPad. We've found an unwind destination
5146 // for all ancestors of CurrentPad up to but not including
5147 // UnresolvedAncestorPad.
5148 Value *ResolvedPad = CurrentPad;
5149 while (!Worklist.empty()) {
5150 Value *UnclePad = Worklist.back();
5151 Value *AncestorPad = getParentPad(UnclePad);
5152 // Walk ResolvedPad up the ancestor list until we either find the
5153 // uncle's parent or the last resolved ancestor.
5154 while (ResolvedPad != AncestorPad) {
5155 Value *ResolvedParent = getParentPad(ResolvedPad);
5156 if (ResolvedParent == UnresolvedAncestorPad) {
5157 break;
5158 }
5159 ResolvedPad = ResolvedParent;
5160 }
5161 // If the resolved ancestor search didn't find the uncle's parent,
5162 // then the uncle is not yet resolved.
5163 if (ResolvedPad != AncestorPad)
5164 break;
5165 // This uncle is resolved, so pop it from the worklist.
5166 Worklist.pop_back();
5167 }
5168 }
5169 }
5170
5171 if (FirstUnwindPad) {
5172 if (auto *CatchSwitch = dyn_cast<CatchSwitchInst>(FPI.getParentPad())) {
5173 BasicBlock *SwitchUnwindDest = CatchSwitch->getUnwindDest();
5174 Value *SwitchUnwindPad;
5175 if (SwitchUnwindDest)
5176 SwitchUnwindPad = &*SwitchUnwindDest->getFirstNonPHIIt();
5177 else
5178 SwitchUnwindPad = ConstantTokenNone::get(FPI.getContext());
5179 Check(SwitchUnwindPad == FirstUnwindPad,
5180 "Unwind edges out of a catch must have the same unwind dest as "
5181 "the parent catchswitch",
5182 &FPI, FirstUser, CatchSwitch);
5183 }
5184 }
5185
5186 visitInstruction(FPI);
5187}
5188
5189void Verifier::visitCatchSwitchInst(CatchSwitchInst &CatchSwitch) {
5190 BasicBlock *BB = CatchSwitch.getParent();
5191
5192 Function *F = BB->getParent();
5193 Check(F->hasPersonalityFn(),
5194 "CatchSwitchInst needs to be in a function with a personality.",
5195 &CatchSwitch);
5196
5197 // The catchswitch instruction must be the first non-PHI instruction in the
5198 // block.
5199 Check(&*BB->getFirstNonPHIIt() == &CatchSwitch,
5200 "CatchSwitchInst not the first non-PHI instruction in the block.",
5201 &CatchSwitch);
5202
5203 auto *ParentPad = CatchSwitch.getParentPad();
5204 Check(isa<ConstantTokenNone>(ParentPad) || isa<FuncletPadInst>(ParentPad),
5205 "CatchSwitchInst has an invalid parent.", ParentPad);
5206
5207 if (BasicBlock *UnwindDest = CatchSwitch.getUnwindDest()) {
5208 BasicBlock::iterator I = UnwindDest->getFirstNonPHIIt();
5209 Check(I->isEHPad() && !isa<LandingPadInst>(I),
5210 "CatchSwitchInst must unwind to an EH block which is not a "
5211 "landingpad.",
5212 &CatchSwitch);
5213
5214 // Record catchswitch sibling unwinds for verifySiblingFuncletUnwinds
5215 if (getParentPad(&*I) == ParentPad)
5216 SiblingFuncletInfo[&CatchSwitch] = &CatchSwitch;
5217 }
5218
5219 Check(CatchSwitch.getNumHandlers() != 0,
5220 "CatchSwitchInst cannot have empty handler list", &CatchSwitch);
5221
5222 for (BasicBlock *Handler : CatchSwitch.handlers()) {
5223 Check(isa<CatchPadInst>(Handler->getFirstNonPHIIt()),
5224 "CatchSwitchInst handlers must be catchpads", &CatchSwitch, Handler);
5225 }
5226
5227 visitEHPadPredecessors(CatchSwitch);
5228 visitTerminator(CatchSwitch);
5229}
5230
5231void Verifier::visitCleanupReturnInst(CleanupReturnInst &CRI) {
5233 "CleanupReturnInst needs to be provided a CleanupPad", &CRI,
5234 CRI.getOperand(0));
5235
5236 if (BasicBlock *UnwindDest = CRI.getUnwindDest()) {
5237 BasicBlock::iterator I = UnwindDest->getFirstNonPHIIt();
5238 Check(I->isEHPad() && !isa<LandingPadInst>(I),
5239 "CleanupReturnInst must unwind to an EH block which is not a "
5240 "landingpad.",
5241 &CRI);
5242 }
5243
5244 visitTerminator(CRI);
5245}
5246
5247void Verifier::verifyDominatesUse(Instruction &I, unsigned i) {
5248 Instruction *Op = cast<Instruction>(I.getOperand(i));
5249 // If the we have an invalid invoke, don't try to compute the dominance.
5250 // We already reject it in the invoke specific checks and the dominance
5251 // computation doesn't handle multiple edges.
5252 if (InvokeInst *II = dyn_cast<InvokeInst>(Op)) {
5253 if (II->getNormalDest() == II->getUnwindDest())
5254 return;
5255 }
5256
5257 // Quick check whether the def has already been encountered in the same block.
5258 // PHI nodes are not checked to prevent accepting preceding PHIs, because PHI
5259 // uses are defined to happen on the incoming edge, not at the instruction.
5260 //
5261 // FIXME: If this operand is a MetadataAsValue (wrapping a LocalAsMetadata)
5262 // wrapping an SSA value, assert that we've already encountered it. See
5263 // related FIXME in Mapper::mapLocalAsMetadata in ValueMapper.cpp.
5264 if (!isa<PHINode>(I) && InstsInThisBlock.count(Op))
5265 return;
5266
5267 const Use &U = I.getOperandUse(i);
5268 Check(DT.dominates(Op, U), "Instruction does not dominate all uses!", Op, &I);
5269}
5270
5271void Verifier::visitDereferenceableMetadata(Instruction& I, MDNode* MD) {
5272 Check(I.getType()->isPointerTy(),
5273 "dereferenceable, dereferenceable_or_null "
5274 "apply only to pointer types",
5275 &I);
5277 "dereferenceable, dereferenceable_or_null apply only to load"
5278 " and inttoptr instructions, use attributes for calls or invokes",
5279 &I);
5280 Check(MD->getNumOperands() == 1,
5281 "dereferenceable, dereferenceable_or_null "
5282 "take one operand!",
5283 &I);
5284 ConstantInt *CI = mdconst::dyn_extract<ConstantInt>(MD->getOperand(0));
5285 Check(CI && CI->getType()->isIntegerTy(64),
5286 "dereferenceable, "
5287 "dereferenceable_or_null metadata value must be an i64!",
5288 &I);
5289}
5290
5291void Verifier::visitNofreeMetadata(Instruction &I, MDNode *MD) {
5292 Check(I.getType()->isPointerTy(), "nofree applies only to pointer types", &I);
5293 Check((isa<IntToPtrInst>(I)), "nofree applies only to inttoptr instruction",
5294 &I);
5295 Check(MD->getNumOperands() == 0, "nofree metadata must be empty", &I);
5296}
5297
5298void Verifier::visitProfMetadata(Instruction &I, MDNode *MD) {
5299 auto GetBranchingTerminatorNumOperands = [&]() {
5300 unsigned ExpectedNumOperands = 0;
5301 if (CondBrInst *BI = dyn_cast<CondBrInst>(&I))
5302 ExpectedNumOperands = BI->getNumSuccessors();
5303 else if (SwitchInst *SI = dyn_cast<SwitchInst>(&I))
5304 ExpectedNumOperands = SI->getNumSuccessors();
5305 else if (isa<CallInst>(&I))
5306 ExpectedNumOperands = 1;
5307 else if (IndirectBrInst *IBI = dyn_cast<IndirectBrInst>(&I))
5308 ExpectedNumOperands = IBI->getNumDestinations();
5309 else if (isa<SelectInst>(&I))
5310 ExpectedNumOperands = 2;
5311 else if (CallBrInst *CI = dyn_cast<CallBrInst>(&I))
5312 ExpectedNumOperands = CI->getNumSuccessors();
5313 return ExpectedNumOperands;
5314 };
5315 Check(MD->getNumOperands() >= 1,
5316 "!prof annotations should have at least 1 operand", MD);
5317 // Check first operand.
5318 Check(MD->getOperand(0) != nullptr, "first operand should not be null", MD);
5320 "expected string with name of the !prof annotation", MD);
5321 MDString *MDS = cast<MDString>(MD->getOperand(0));
5322 StringRef ProfName = MDS->getString();
5323
5325 Check(GetBranchingTerminatorNumOperands() != 0 || isa<InvokeInst>(I),
5326 "'unknown' !prof should only appear on instructions on which "
5327 "'branch_weights' would",
5328 MD);
5329 verifyUnknownProfileMetadata(MD);
5330 return;
5331 }
5332
5333 Check(MD->getNumOperands() >= 2,
5334 "!prof annotations should have no less than 2 operands", MD);
5335
5336 // Check consistency of !prof branch_weights metadata.
5337 if (ProfName == MDProfLabels::BranchWeights) {
5338 unsigned NumBranchWeights = getNumBranchWeights(*MD);
5339 if (isa<InvokeInst>(&I)) {
5340 Check(NumBranchWeights == 1 || NumBranchWeights == 2,
5341 "Wrong number of InvokeInst branch_weights operands", MD);
5342 } else {
5343 const unsigned ExpectedNumOperands = GetBranchingTerminatorNumOperands();
5344 if (ExpectedNumOperands == 0)
5345 CheckFailed("!prof branch_weights are not allowed for this instruction",
5346 MD);
5347
5348 Check(NumBranchWeights == ExpectedNumOperands, "Wrong number of operands",
5349 MD);
5350 }
5351 for (unsigned i = getBranchWeightOffset(MD); i < MD->getNumOperands();
5352 ++i) {
5353 auto &MDO = MD->getOperand(i);
5354 Check(MDO, "second operand should not be null", MD);
5356 "!prof brunch_weights operand is not a const int");
5357 }
5358 } else if (ProfName == MDProfLabels::ValueProfile) {
5359 Check(isValueProfileMD(MD), "invalid value profiling metadata", MD);
5360 ConstantInt *KindInt = mdconst::dyn_extract<ConstantInt>(MD->getOperand(1));
5361 Check(KindInt, "VP !prof missing kind argument", MD);
5362
5363 auto Kind = KindInt->getZExtValue();
5364 Check(Kind >= InstrProfValueKind::IPVK_First &&
5365 Kind <= InstrProfValueKind::IPVK_Last,
5366 "Invalid VP !prof kind", MD);
5367 Check(MD->getNumOperands() % 2 == 1,
5368 "VP !prof should have an even number "
5369 "of arguments after 'VP'",
5370 MD);
5371 if (Kind == InstrProfValueKind::IPVK_IndirectCallTarget ||
5372 Kind == InstrProfValueKind::IPVK_MemOPSize)
5374 "VP !prof indirect call or memop size expected to be applied to "
5375 "CallBase instructions only",
5376 MD);
5377 } else {
5378 CheckFailed("expected either branch_weights or VP profile name", MD);
5379 }
5380}
5381
5382void Verifier::visitDIAssignIDMetadata(Instruction &I, MDNode *MD) {
5383 assert(I.hasMetadata(LLVMContext::MD_DIAssignID));
5384 // DIAssignID metadata must be attached to either an alloca or some form of
5385 // store/memory-writing instruction.
5386 // FIXME: We allow all intrinsic insts here to avoid trying to enumerate all
5387 // possible store intrinsics.
5388 bool ExpectedInstTy =
5390 CheckDI(ExpectedInstTy, "!DIAssignID attached to unexpected instruction kind",
5391 I, MD);
5392 // Iterate over the MetadataAsValue uses of the DIAssignID - these should
5393 // only be found as DbgAssignIntrinsic operands.
5394 if (auto *AsValue = MetadataAsValue::getIfExists(Context, MD)) {
5395 for (auto *User : AsValue->users()) {
5397 "!DIAssignID should only be used by llvm.dbg.assign intrinsics",
5398 MD, User);
5399 // All of the dbg.assign intrinsics should be in the same function as I.
5400 if (auto *DAI = dyn_cast<DbgAssignIntrinsic>(User))
5401 CheckDI(DAI->getFunction() == I.getFunction(),
5402 "dbg.assign not in same function as inst", DAI, &I);
5403 }
5404 }
5405 for (DbgVariableRecord *DVR :
5406 cast<DIAssignID>(MD)->getAllDbgVariableRecordUsers()) {
5407 CheckDI(DVR->isDbgAssign(),
5408 "!DIAssignID should only be used by Assign DVRs.", MD, DVR);
5409 CheckDI(DVR->getFunction() == I.getFunction(),
5410 "DVRAssign not in same function as inst", DVR, &I);
5411 }
5412}
5413
5414void Verifier::visitMMRAMetadata(Instruction &I, MDNode *MD) {
5416 "!mmra metadata attached to unexpected instruction kind", I, MD);
5417
5418 // MMRA Metadata should either be a tag, e.g. !{!"foo", !"bar"}, or a
5419 // list of tags such as !2 in the following example:
5420 // !0 = !{!"a", !"b"}
5421 // !1 = !{!"c", !"d"}
5422 // !2 = !{!0, !1}
5423 if (MMRAMetadata::isTagMD(MD))
5424 return;
5425
5426 Check(isa<MDTuple>(MD), "!mmra expected to be a metadata tuple", I, MD);
5427 for (const MDOperand &MDOp : MD->operands())
5428 Check(MMRAMetadata::isTagMD(MDOp.get()),
5429 "!mmra metadata tuple operand is not an MMRA tag", I, MDOp.get());
5430}
5431
5432void Verifier::visitCallStackMetadata(MDNode *MD) {
5433 // Call stack metadata should consist of a list of at least 1 constant int
5434 // (representing a hash of the location).
5435 Check(MD->getNumOperands() >= 1,
5436 "call stack metadata should have at least 1 operand", MD);
5437
5438 for (const auto &Op : MD->operands())
5440 "call stack metadata operand should be constant integer", Op);
5441}
5442
5443void Verifier::visitMemProfMetadata(Instruction &I, MDNode *MD) {
5444 Check(isa<CallBase>(I), "!memprof metadata should only exist on calls", &I);
5445 Check(MD->getNumOperands() >= 1,
5446 "!memprof annotations should have at least 1 metadata operand "
5447 "(MemInfoBlock)",
5448 MD);
5449
5450 // Check each MIB
5451 for (auto &MIBOp : MD->operands()) {
5452 MDNode *MIB = dyn_cast<MDNode>(MIBOp);
5453 // The first operand of an MIB should be the call stack metadata.
5454 // There rest of the operands should be MDString tags, and there should be
5455 // at least one.
5456 Check(MIB->getNumOperands() >= 2,
5457 "Each !memprof MemInfoBlock should have at least 2 operands", MIB);
5458
5459 // Check call stack metadata (first operand).
5460 Check(MIB->getOperand(0) != nullptr,
5461 "!memprof MemInfoBlock first operand should not be null", MIB);
5462 Check(isa<MDNode>(MIB->getOperand(0)),
5463 "!memprof MemInfoBlock first operand should be an MDNode", MIB);
5464 MDNode *StackMD = dyn_cast<MDNode>(MIB->getOperand(0));
5465 visitCallStackMetadata(StackMD);
5466
5467 // The second MIB operand should be MDString.
5469 "!memprof MemInfoBlock second operand should be an MDString", MIB);
5470
5471 // Any remaining should be MDNode that are pairs of integers
5472 for (unsigned I = 2; I < MIB->getNumOperands(); ++I) {
5473 MDNode *OpNode = dyn_cast<MDNode>(MIB->getOperand(I));
5474 Check(OpNode, "Not all !memprof MemInfoBlock operands 2 to N are MDNode",
5475 MIB);
5476 Check(OpNode->getNumOperands() == 2,
5477 "Not all !memprof MemInfoBlock operands 2 to N are MDNode with 2 "
5478 "operands",
5479 MIB);
5480 // Check that all of Op's operands are ConstantInt.
5481 Check(llvm::all_of(OpNode->operands(),
5482 [](const MDOperand &Op) {
5483 return mdconst::hasa<ConstantInt>(Op);
5484 }),
5485 "Not all !memprof MemInfoBlock operands 2 to N are MDNode with "
5486 "ConstantInt operands",
5487 MIB);
5488 }
5489 }
5490}
5491
5492void Verifier::visitCallsiteMetadata(Instruction &I, MDNode *MD) {
5493 Check(isa<CallBase>(I), "!callsite metadata should only exist on calls", &I);
5494 // Verify the partial callstack annotated from memprof profiles. This callsite
5495 // is a part of a profiled allocation callstack.
5496 visitCallStackMetadata(MD);
5497}
5498
5499static inline bool isConstantIntMetadataOperand(const Metadata *MD) {
5500 if (auto *VAL = dyn_cast<ValueAsMetadata>(MD))
5501 return isa<ConstantInt>(VAL->getValue());
5502 return false;
5503}
5504
5505void Verifier::visitCalleeTypeMetadata(Instruction &I, MDNode *MD) {
5506 Check(isa<CallBase>(I), "!callee_type metadata should only exist on calls",
5507 &I);
5508 for (Metadata *Op : MD->operands()) {
5510 "The callee_type metadata must be a list of type metadata nodes", Op);
5511 auto *TypeMD = cast<MDNode>(Op);
5512 Check(TypeMD->getNumOperands() == 2,
5513 "Well-formed generalized type metadata must contain exactly two "
5514 "operands",
5515 Op);
5516 Check(isConstantIntMetadataOperand(TypeMD->getOperand(0)) &&
5517 mdconst::extract<ConstantInt>(TypeMD->getOperand(0))->isZero(),
5518 "The first operand of type metadata for functions must be zero", Op);
5519 Check(TypeMD->hasGeneralizedMDString(),
5520 "Only generalized type metadata can be part of the callee_type "
5521 "metadata list",
5522 Op);
5523 }
5524}
5525
5526void Verifier::visitAnnotationMetadata(MDNode *Annotation) {
5527 Check(isa<MDTuple>(Annotation), "annotation must be a tuple");
5528 Check(Annotation->getNumOperands() >= 1,
5529 "annotation must have at least one operand");
5530 for (const MDOperand &Op : Annotation->operands()) {
5531 bool TupleOfStrings =
5532 isa<MDTuple>(Op.get()) &&
5533 all_of(cast<MDTuple>(Op)->operands(), [](auto &Annotation) {
5534 return isa<MDString>(Annotation.get());
5535 });
5536 Check(isa<MDString>(Op.get()) || TupleOfStrings,
5537 "operands must be a string or a tuple of strings");
5538 }
5539}
5540
5541void Verifier::visitAliasScopeMetadata(const MDNode *MD) {
5542 unsigned NumOps = MD->getNumOperands();
5543 Check(NumOps >= 2 && NumOps <= 3, "scope must have two or three operands",
5544 MD);
5545 Check(MD->getOperand(0).get() == MD || isa<MDString>(MD->getOperand(0)),
5546 "first scope operand must be self-referential or string", MD);
5547 if (NumOps == 3)
5549 "third scope operand must be string (if used)", MD);
5550
5551 MDNode *Domain = dyn_cast<MDNode>(MD->getOperand(1));
5552 Check(Domain != nullptr, "second scope operand must be MDNode", MD);
5553
5554 unsigned NumDomainOps = Domain->getNumOperands();
5555 Check(NumDomainOps >= 1 && NumDomainOps <= 2,
5556 "domain must have one or two operands", Domain);
5557 Check(Domain->getOperand(0).get() == Domain ||
5558 isa<MDString>(Domain->getOperand(0)),
5559 "first domain operand must be self-referential or string", Domain);
5560 if (NumDomainOps == 2)
5561 Check(isa<MDString>(Domain->getOperand(1)),
5562 "second domain operand must be string (if used)", Domain);
5563}
5564
5565void Verifier::visitAliasScopeListMetadata(const MDNode *MD) {
5566 for (const MDOperand &Op : MD->operands()) {
5567 const MDNode *OpMD = dyn_cast<MDNode>(Op);
5568 Check(OpMD != nullptr, "scope list must consist of MDNodes", MD);
5569 visitAliasScopeMetadata(OpMD);
5570 }
5571}
5572
5573void Verifier::visitAccessGroupMetadata(const MDNode *MD) {
5574 auto IsValidAccessScope = [](const MDNode *MD) {
5575 return MD->getNumOperands() == 0 && MD->isDistinct();
5576 };
5577
5578 // It must be either an access scope itself...
5579 if (IsValidAccessScope(MD))
5580 return;
5581
5582 // ...or a list of access scopes.
5583 for (const MDOperand &Op : MD->operands()) {
5584 const MDNode *OpMD = dyn_cast<MDNode>(Op);
5585 Check(OpMD != nullptr, "Access scope list must consist of MDNodes", MD);
5586 Check(IsValidAccessScope(OpMD),
5587 "Access scope list contains invalid access scope", MD);
5588 }
5589}
5590
5591void Verifier::visitCapturesMetadata(Instruction &I, const MDNode *Captures) {
5592 static const char *ValidArgs[] = {"address_is_null", "address",
5593 "read_provenance", "provenance"};
5594
5595 auto *SI = dyn_cast<StoreInst>(&I);
5596 Check(SI, "!captures metadata can only be applied to store instructions", &I);
5597 Check(SI->getValueOperand()->getType()->isPointerTy(),
5598 "!captures metadata can only be applied to store with value operand of "
5599 "pointer type",
5600 &I);
5601 Check(Captures->getNumOperands() != 0, "!captures metadata cannot be empty",
5602 &I);
5603
5604 for (Metadata *Op : Captures->operands()) {
5605 auto *Str = dyn_cast<MDString>(Op);
5606 Check(Str, "!captures metadata must be a list of strings", &I);
5607 Check(is_contained(ValidArgs, Str->getString()),
5608 "invalid entry in !captures metadata", &I, Str);
5609 }
5610}
5611
5612void Verifier::visitAllocTokenMetadata(Instruction &I, MDNode *MD) {
5613 Check(isa<CallBase>(I), "!alloc_token should only exist on calls", &I);
5614 Check(MD->getNumOperands() == 2, "!alloc_token must have 2 operands", MD);
5615 Check(isa<MDString>(MD->getOperand(0)), "expected string", MD);
5617 "expected integer constant", MD);
5618}
5619
5620void Verifier::visitInlineHistoryMetadata(Instruction &I, MDNode *MD) {
5621 Check(isa<CallBase>(I), "!inline_history should only exist on calls", &I);
5622 for (Metadata *Op : MD->operands()) {
5623 // Can be null when a function is erased.
5624 if (!Op)
5625 continue;
5628 ->getValue()
5629 ->stripPointerCastsAndAliases()),
5630 "!inline_history operands must be functions or null", MD);
5631 }
5632}
5633
5634/// verifyInstruction - Verify that an instruction is well formed.
5635///
5636void Verifier::visitInstruction(Instruction &I) {
5637 BasicBlock *BB = I.getParent();
5638 Check(BB, "Instruction not embedded in basic block!", &I);
5639
5640 if (!isa<PHINode>(I)) { // Check that non-phi nodes are not self referential
5641 for (User *U : I.users()) {
5642 Check(U != (User *)&I || !DT.isReachableFromEntry(BB),
5643 "Only PHI nodes may reference their own value!", &I);
5644 }
5645 }
5646
5647 // Check that void typed values don't have names
5648 Check(!I.getType()->isVoidTy() || !I.hasName(),
5649 "Instruction has a name, but provides a void value!", &I);
5650
5651 // Check that the return value of the instruction is either void or a legal
5652 // value type.
5653 Check(I.getType()->isVoidTy() || I.getType()->isFirstClassType(),
5654 "Instruction returns a non-scalar type!", &I);
5655
5656 // Check that the instruction doesn't produce metadata. Calls are already
5657 // checked against the callee type.
5658 Check(!I.getType()->isMetadataTy() || isa<CallInst>(I) || isa<InvokeInst>(I),
5659 "Invalid use of metadata!", &I);
5660
5661 // Check that all uses of the instruction, if they are instructions
5662 // themselves, actually have parent basic blocks. If the use is not an
5663 // instruction, it is an error!
5664 for (Use &U : I.uses()) {
5665 if (Instruction *Used = dyn_cast<Instruction>(U.getUser()))
5666 Check(Used->getParent() != nullptr,
5667 "Instruction referencing"
5668 " instruction not embedded in a basic block!",
5669 &I, Used);
5670 else {
5671 CheckFailed("Use of instruction is not an instruction!", U);
5672 return;
5673 }
5674 }
5675
5676 // Get a pointer to the call base of the instruction if it is some form of
5677 // call.
5678 const CallBase *CBI = dyn_cast<CallBase>(&I);
5679
5680 for (unsigned i = 0, e = I.getNumOperands(); i != e; ++i) {
5681 Check(I.getOperand(i) != nullptr, "Instruction has null operand!", &I);
5682
5683 // Check to make sure that only first-class-values are operands to
5684 // instructions.
5685 if (!I.getOperand(i)->getType()->isFirstClassType()) {
5686 Check(false, "Instruction operands must be first-class values!", &I);
5687 }
5688
5689 if (Function *F = dyn_cast<Function>(I.getOperand(i))) {
5690 // This code checks whether the function is used as the operand of a
5691 // clang_arc_attachedcall operand bundle.
5692 auto IsAttachedCallOperand = [](Function *F, const CallBase *CBI,
5693 int Idx) {
5694 return CBI && CBI->isOperandBundleOfType(
5696 };
5697
5698 // Check to make sure that the "address of" an intrinsic function is never
5699 // taken. Ignore cases where the address of the intrinsic function is used
5700 // as the argument of operand bundle "clang.arc.attachedcall" as those
5701 // cases are handled in verifyAttachedCallBundle.
5702 Check((!F->isIntrinsic() ||
5703 (CBI && &CBI->getCalledOperandUse() == &I.getOperandUse(i)) ||
5704 IsAttachedCallOperand(F, CBI, i)),
5705 "Cannot take the address of an intrinsic!", &I);
5706 Check(!F->isIntrinsic() || isa<CallInst>(I) || isa<CallBrInst>(I) ||
5707 F->getIntrinsicID() == Intrinsic::donothing ||
5708 F->getIntrinsicID() == Intrinsic::seh_try_begin ||
5709 F->getIntrinsicID() == Intrinsic::seh_try_end ||
5710 F->getIntrinsicID() == Intrinsic::seh_scope_begin ||
5711 F->getIntrinsicID() == Intrinsic::seh_scope_end ||
5712 F->getIntrinsicID() == Intrinsic::coro_resume ||
5713 F->getIntrinsicID() == Intrinsic::coro_destroy ||
5714 F->getIntrinsicID() == Intrinsic::coro_await_suspend_void ||
5715 F->getIntrinsicID() == Intrinsic::coro_await_suspend_bool ||
5716 F->getIntrinsicID() == Intrinsic::coro_await_suspend_handle ||
5717 F->getIntrinsicID() ==
5718 Intrinsic::experimental_patchpoint_void ||
5719 F->getIntrinsicID() == Intrinsic::experimental_patchpoint ||
5720 F->getIntrinsicID() == Intrinsic::fake_use ||
5721 F->getIntrinsicID() == Intrinsic::experimental_gc_statepoint ||
5722 F->getIntrinsicID() == Intrinsic::wasm_throw ||
5723 F->getIntrinsicID() == Intrinsic::wasm_rethrow ||
5724 IsAttachedCallOperand(F, CBI, i),
5725 "Cannot invoke an intrinsic other than donothing, patchpoint, "
5726 "statepoint, coro_resume, coro_destroy, clang.arc.attachedcall or "
5727 "wasm.(re)throw",
5728 &I);
5729 Check(F->getParent() == &M, "Referencing function in another module!", &I,
5730 &M, F, F->getParent());
5731 } else if (BasicBlock *OpBB = dyn_cast<BasicBlock>(I.getOperand(i))) {
5732 Check(OpBB->getParent() == BB->getParent(),
5733 "Referring to a basic block in another function!", &I);
5734 } else if (Argument *OpArg = dyn_cast<Argument>(I.getOperand(i))) {
5735 Check(OpArg->getParent() == BB->getParent(),
5736 "Referring to an argument in another function!", &I);
5737 } else if (GlobalValue *GV = dyn_cast<GlobalValue>(I.getOperand(i))) {
5738 Check(GV->getParent() == &M, "Referencing global in another module!", &I,
5739 &M, GV, GV->getParent());
5740 } else if (Instruction *OpInst = dyn_cast<Instruction>(I.getOperand(i))) {
5741 Check(OpInst->getFunction() == BB->getParent(),
5742 "Referring to an instruction in another function!", &I);
5743 verifyDominatesUse(I, i);
5744 } else if (isa<InlineAsm>(I.getOperand(i))) {
5745 Check(CBI && &CBI->getCalledOperandUse() == &I.getOperandUse(i),
5746 "Cannot take the address of an inline asm!", &I);
5747 } else if (auto *C = dyn_cast<Constant>(I.getOperand(i))) {
5748 visitConstantExprsRecursively(C);
5749 }
5750 }
5751
5752 if (MDNode *MD = I.getMetadata(LLVMContext::MD_fpmath)) {
5753 Check(I.getType()->isFPOrFPVectorTy(),
5754 "fpmath requires a floating point result!", &I);
5755 Check(MD->getNumOperands() == 1, "fpmath takes one operand!", &I);
5756 if (ConstantFP *CFP0 =
5758 const APFloat &Accuracy = CFP0->getValueAPF();
5759 Check(&Accuracy.getSemantics() == &APFloat::IEEEsingle(),
5760 "fpmath accuracy must have float type", &I);
5761 Check(Accuracy.isFiniteNonZero() && !Accuracy.isNegative(),
5762 "fpmath accuracy not a positive number!", &I);
5763 } else {
5764 Check(false, "invalid fpmath accuracy!", &I);
5765 }
5766 }
5767
5768 if (MDNode *Range = I.getMetadata(LLVMContext::MD_range)) {
5770 "Ranges are only for loads, calls and invokes!", &I);
5771 visitRangeMetadata(I, Range, I.getType());
5772 }
5773
5774 if (MDNode *MD = I.getMetadata(LLVMContext::MD_nofpclass)) {
5775 Check(isa<LoadInst>(I), "nofpclass is only for loads", &I);
5776 visitNoFPClassMetadata(I, MD, I.getType());
5777 }
5778
5779 if (MDNode *Range = I.getMetadata(LLVMContext::MD_noalias_addrspace)) {
5782 "noalias.addrspace are only for memory operations!", &I);
5783 visitNoaliasAddrspaceMetadata(I, Range, I.getType());
5784 }
5785
5786 if (I.hasMetadata(LLVMContext::MD_invariant_group)) {
5788 "invariant.group metadata is only for loads and stores", &I);
5789 }
5790
5791 if (MDNode *MD = I.getMetadata(LLVMContext::MD_nonnull)) {
5792 Check(I.getType()->isPointerTy(), "nonnull applies only to pointer types",
5793 &I);
5795 "nonnull applies only to load instructions, use attributes"
5796 " for calls or invokes",
5797 &I);
5798 Check(MD->getNumOperands() == 0, "nonnull metadata must be empty", &I);
5799 }
5800
5801 if (MDNode *MD = I.getMetadata(LLVMContext::MD_dereferenceable))
5802 visitDereferenceableMetadata(I, MD);
5803
5804 if (MDNode *MD = I.getMetadata(LLVMContext::MD_dereferenceable_or_null))
5805 visitDereferenceableMetadata(I, MD);
5806
5807 if (MDNode *MD = I.getMetadata(LLVMContext::MD_nofree))
5808 visitNofreeMetadata(I, MD);
5809
5810 if (MDNode *TBAA = I.getMetadata(LLVMContext::MD_tbaa))
5811 TBAAVerifyHelper.visitTBAAMetadata(&I, TBAA);
5812
5813 if (MDNode *MD = I.getMetadata(LLVMContext::MD_noalias))
5814 visitAliasScopeListMetadata(MD);
5815 if (MDNode *MD = I.getMetadata(LLVMContext::MD_alias_scope))
5816 visitAliasScopeListMetadata(MD);
5817
5818 if (MDNode *MD = I.getMetadata(LLVMContext::MD_access_group))
5819 visitAccessGroupMetadata(MD);
5820
5821 if (MDNode *AlignMD = I.getMetadata(LLVMContext::MD_align)) {
5822 Check(I.getType()->isPointerTy(), "align applies only to pointer types",
5823 &I);
5825 "align applies only to load instructions, "
5826 "use attributes for calls or invokes",
5827 &I);
5828 Check(AlignMD->getNumOperands() == 1, "align takes one operand!", &I);
5829 ConstantInt *CI = mdconst::dyn_extract<ConstantInt>(AlignMD->getOperand(0));
5830 Check(CI && CI->getType()->isIntegerTy(64),
5831 "align metadata value must be an i64!", &I);
5832 uint64_t Align = CI->getZExtValue();
5833 Check(isPowerOf2_64(Align), "align metadata value must be a power of 2!",
5834 &I);
5835 Check(Align <= Value::MaximumAlignment,
5836 "alignment is larger that implementation defined limit", &I);
5837 }
5838
5839 if (MDNode *MD = I.getMetadata(LLVMContext::MD_prof))
5840 visitProfMetadata(I, MD);
5841
5842 if (MDNode *MD = I.getMetadata(LLVMContext::MD_memprof))
5843 visitMemProfMetadata(I, MD);
5844
5845 if (MDNode *MD = I.getMetadata(LLVMContext::MD_callsite))
5846 visitCallsiteMetadata(I, MD);
5847
5848 if (MDNode *MD = I.getMetadata(LLVMContext::MD_callee_type))
5849 visitCalleeTypeMetadata(I, MD);
5850
5851 if (MDNode *MD = I.getMetadata(LLVMContext::MD_DIAssignID))
5852 visitDIAssignIDMetadata(I, MD);
5853
5854 if (MDNode *MMRA = I.getMetadata(LLVMContext::MD_mmra))
5855 visitMMRAMetadata(I, MMRA);
5856
5857 if (MDNode *Annotation = I.getMetadata(LLVMContext::MD_annotation))
5858 visitAnnotationMetadata(Annotation);
5859
5860 if (MDNode *Captures = I.getMetadata(LLVMContext::MD_captures))
5861 visitCapturesMetadata(I, Captures);
5862
5863 if (MDNode *MD = I.getMetadata(LLVMContext::MD_alloc_token))
5864 visitAllocTokenMetadata(I, MD);
5865
5866 if (MDNode *MD = I.getMetadata(LLVMContext::MD_inline_history))
5867 visitInlineHistoryMetadata(I, MD);
5868
5869 if (MDNode *N = I.getDebugLoc().getAsMDNode()) {
5870 CheckDI(isa<DILocation>(N), "invalid !dbg metadata attachment", &I, N);
5871 visitMDNode(*N, AreDebugLocsAllowed::Yes);
5872
5873 if (auto *DL = dyn_cast<DILocation>(N)) {
5874 if (DL->getAtomGroup()) {
5875 CheckDI(DL->getScope()->getSubprogram()->getKeyInstructionsEnabled(),
5876 "DbgLoc uses atomGroup but DISubprogram doesn't have Key "
5877 "Instructions enabled",
5878 DL, DL->getScope()->getSubprogram());
5879 }
5880 }
5881 }
5882
5884 I.getAllMetadata(MDs);
5885 for (auto Attachment : MDs) {
5886 unsigned Kind = Attachment.first;
5887 auto AllowLocs =
5888 (Kind == LLVMContext::MD_dbg || Kind == LLVMContext::MD_loop)
5889 ? AreDebugLocsAllowed::Yes
5890 : AreDebugLocsAllowed::No;
5891 visitMDNode(*Attachment.second, AllowLocs);
5892 }
5893
5894 InstsInThisBlock.insert(&I);
5895}
5896
5897/// Allow intrinsics to be verified in different ways.
5898void Verifier::visitIntrinsicCall(Intrinsic::ID ID, CallBase &Call) {
5900 Check(IF->isDeclaration(), "Intrinsic functions should never be defined!",
5901 IF);
5902
5903 // Verify that the intrinsic prototype lines up with what the .td files
5904 // describe.
5905 FunctionType *IFTy = IF->getFunctionType();
5906 bool IsVarArg = IFTy->isVarArg();
5907
5911
5912 // Walk the descriptors to extract overloaded types.
5917 "Intrinsic has incorrect return type!", IF);
5919 "Intrinsic has incorrect argument type!", IF);
5920
5921 // Verify if the intrinsic call matches the vararg property.
5922 if (IsVarArg)
5924 "Intrinsic was not defined with variable arguments!", IF);
5925 else
5927 "Callsite was not defined with variable arguments!", IF);
5928
5929 // All descriptors should be absorbed by now.
5930 Check(TableRef.empty(), "Intrinsic has too few arguments!", IF);
5931
5932 // Now that we have the intrinsic ID and the actual argument types (and we
5933 // know they are legal for the intrinsic!) get the intrinsic name through the
5934 // usual means. This allows us to verify the mangling of argument types into
5935 // the name.
5936 const std::string ExpectedName =
5937 Intrinsic::getName(ID, ArgTys, IF->getParent(), IFTy);
5938 Check(ExpectedName == IF->getName(),
5939 "Intrinsic name not mangled correctly for type arguments! "
5940 "Should be: " +
5941 ExpectedName,
5942 IF);
5943
5944 // If the intrinsic takes MDNode arguments, verify that they are either global
5945 // or are local to *this* function.
5946 for (Value *V : Call.args()) {
5947 if (auto *MD = dyn_cast<MetadataAsValue>(V))
5948 visitMetadataAsValue(*MD, Call.getCaller());
5949 if (auto *Const = dyn_cast<Constant>(V))
5950 Check(!Const->getType()->isX86_AMXTy(),
5951 "const x86_amx is not allowed in argument!");
5952 }
5953
5954 switch (ID) {
5955 default:
5956 break;
5957 case Intrinsic::assume: {
5958 if (Call.hasOperandBundles()) {
5960 Check(Cond && Cond->isOne(),
5961 "assume with operand bundles must have i1 true condition", Call);
5962 }
5963 for (auto &Elem : Call.bundle_op_infos()) {
5964 unsigned ArgCount = Elem.End - Elem.Begin;
5965 // Separate storage assumptions are special insofar as they're the only
5966 // operand bundles allowed on assumes that aren't parameter attributes.
5967 if (Elem.Tag->getKey() == "separate_storage") {
5968 Check(ArgCount == 2,
5969 "separate_storage assumptions should have 2 arguments", Call);
5970 Check(Call.getOperand(Elem.Begin)->getType()->isPointerTy() &&
5971 Call.getOperand(Elem.Begin + 1)->getType()->isPointerTy(),
5972 "arguments to separate_storage assumptions should be pointers",
5973 Call);
5974 continue;
5975 }
5976 Check(Elem.Tag->getKey() == "ignore" ||
5977 Attribute::isExistingAttribute(Elem.Tag->getKey()),
5978 "tags must be valid attribute names", Call);
5979 Attribute::AttrKind Kind =
5980 Attribute::getAttrKindFromName(Elem.Tag->getKey());
5981 if (Kind == Attribute::Alignment) {
5982 Check(ArgCount <= 3 && ArgCount >= 2,
5983 "alignment assumptions should have 2 or 3 arguments", Call);
5984 Check(Call.getOperand(Elem.Begin)->getType()->isPointerTy(),
5985 "first argument should be a pointer", Call);
5986 Check(Call.getOperand(Elem.Begin + 1)->getType()->isIntegerTy(),
5987 "second argument should be an integer", Call);
5988 if (ArgCount == 3)
5989 Check(Call.getOperand(Elem.Begin + 2)->getType()->isIntegerTy(),
5990 "third argument should be an integer if present", Call);
5991 continue;
5992 }
5993 if (Kind == Attribute::Dereferenceable) {
5994 Check(ArgCount == 2,
5995 "dereferenceable assumptions should have 2 arguments", Call);
5996 Check(Call.getOperand(Elem.Begin)->getType()->isPointerTy(),
5997 "first argument should be a pointer", Call);
5998 Check(Call.getOperand(Elem.Begin + 1)->getType()->isIntegerTy(),
5999 "second argument should be an integer", Call);
6000 continue;
6001 }
6002 Check(ArgCount <= 2, "too many arguments", Call);
6003 if (Kind == Attribute::None)
6004 break;
6005 if (Attribute::isIntAttrKind(Kind)) {
6006 Check(ArgCount == 2, "this attribute should have 2 arguments", Call);
6007 Check(isa<ConstantInt>(Call.getOperand(Elem.Begin + 1)),
6008 "the second argument should be a constant integral value", Call);
6009 } else if (Attribute::canUseAsParamAttr(Kind)) {
6010 Check((ArgCount) == 1, "this attribute should have one argument", Call);
6011 } else if (Attribute::canUseAsFnAttr(Kind)) {
6012 Check((ArgCount) == 0, "this attribute has no argument", Call);
6013 }
6014 }
6015 break;
6016 }
6017 case Intrinsic::ucmp:
6018 case Intrinsic::scmp: {
6019 Type *SrcTy = Call.getOperand(0)->getType();
6020 Type *DestTy = Call.getType();
6021
6022 Check(DestTy->getScalarSizeInBits() >= 2,
6023 "result type must be at least 2 bits wide", Call);
6024
6025 bool IsDestTypeVector = DestTy->isVectorTy();
6026 Check(SrcTy->isVectorTy() == IsDestTypeVector,
6027 "ucmp/scmp argument and result types must both be either vector or "
6028 "scalar types",
6029 Call);
6030 if (IsDestTypeVector) {
6031 auto SrcVecLen = cast<VectorType>(SrcTy)->getElementCount();
6032 auto DestVecLen = cast<VectorType>(DestTy)->getElementCount();
6033 Check(SrcVecLen == DestVecLen,
6034 "return type and arguments must have the same number of "
6035 "elements",
6036 Call);
6037 }
6038 break;
6039 }
6040 case Intrinsic::coro_id: {
6041 auto *InfoArg = Call.getArgOperand(3)->stripPointerCasts();
6042 if (isa<ConstantPointerNull>(InfoArg))
6043 break;
6044 auto *GV = dyn_cast<GlobalVariable>(InfoArg);
6045 Check(GV && GV->isConstant() && GV->hasDefinitiveInitializer(),
6046 "info argument of llvm.coro.id must refer to an initialized "
6047 "constant");
6048 Constant *Init = GV->getInitializer();
6050 "info argument of llvm.coro.id must refer to either a struct or "
6051 "an array");
6052 break;
6053 }
6054 case Intrinsic::is_fpclass: {
6055 const ConstantInt *TestMask = cast<ConstantInt>(Call.getOperand(1));
6056 Check((TestMask->getZExtValue() & ~static_cast<unsigned>(fcAllFlags)) == 0,
6057 "unsupported bits for llvm.is.fpclass test mask");
6058 break;
6059 }
6060 case Intrinsic::fptrunc_round: {
6061 // Check the rounding mode
6062 Metadata *MD = nullptr;
6064 if (MAV)
6065 MD = MAV->getMetadata();
6066
6067 Check(MD != nullptr, "missing rounding mode argument", Call);
6068
6069 Check(isa<MDString>(MD),
6070 ("invalid value for llvm.fptrunc.round metadata operand"
6071 " (the operand should be a string)"),
6072 MD);
6073
6074 std::optional<RoundingMode> RoundMode =
6075 convertStrToRoundingMode(cast<MDString>(MD)->getString());
6076 Check(RoundMode && *RoundMode != RoundingMode::Dynamic,
6077 "unsupported rounding mode argument", Call);
6078 break;
6079 }
6080 case Intrinsic::convert_to_arbitrary_fp: {
6081 // Check that vector element counts are consistent.
6082 Type *ValueTy = Call.getArgOperand(0)->getType();
6083 Type *IntTy = Call.getType();
6084
6085 if (auto *ValueVecTy = dyn_cast<VectorType>(ValueTy)) {
6086 auto *IntVecTy = dyn_cast<VectorType>(IntTy);
6087 Check(IntVecTy,
6088 "if floating-point operand is a vector, integer operand must also "
6089 "be a vector",
6090 Call);
6091 Check(ValueVecTy->getElementCount() == IntVecTy->getElementCount(),
6092 "floating-point and integer vector operands must have the same "
6093 "element count",
6094 Call);
6095 }
6096
6097 // Check interpretation metadata (argoperand 1).
6098 auto *InterpMAV = dyn_cast<MetadataAsValue>(Call.getArgOperand(1));
6099 Check(InterpMAV, "missing interpretation metadata operand", Call);
6100 auto *InterpStr = dyn_cast<MDString>(InterpMAV->getMetadata());
6101 Check(InterpStr, "interpretation metadata operand must be a string", Call);
6102 StringRef Interp = InterpStr->getString();
6103
6104 Check(!Interp.empty(), "interpretation metadata string must not be empty",
6105 Call);
6106
6107 // Valid interpretation strings: mini-float format names.
6109 "unsupported interpretation metadata string", Call);
6110
6111 // Check rounding mode metadata (argoperand 2).
6112 auto *RoundingMAV = dyn_cast<MetadataAsValue>(Call.getArgOperand(2));
6113 Check(RoundingMAV, "missing rounding mode metadata operand", Call);
6114 auto *RoundingStr = dyn_cast<MDString>(RoundingMAV->getMetadata());
6115 Check(RoundingStr, "rounding mode metadata operand must be a string", Call);
6116
6117 std::optional<RoundingMode> RM =
6118 convertStrToRoundingMode(RoundingStr->getString());
6119 Check(RM && *RM != RoundingMode::Dynamic,
6120 "unsupported rounding mode argument", Call);
6121 break;
6122 }
6123 case Intrinsic::convert_from_arbitrary_fp: {
6124 // Check that vector element counts are consistent.
6125 Type *IntTy = Call.getArgOperand(0)->getType();
6126 Type *ValueTy = Call.getType();
6127
6128 if (auto *ValueVecTy = dyn_cast<VectorType>(ValueTy)) {
6129 auto *IntVecTy = dyn_cast<VectorType>(IntTy);
6130 Check(IntVecTy,
6131 "if floating-point operand is a vector, integer operand must also "
6132 "be a vector",
6133 Call);
6134 Check(ValueVecTy->getElementCount() == IntVecTy->getElementCount(),
6135 "floating-point and integer vector operands must have the same "
6136 "element count",
6137 Call);
6138 }
6139
6140 // Check interpretation metadata (argoperand 1).
6141 auto *InterpMAV = dyn_cast<MetadataAsValue>(Call.getArgOperand(1));
6142 Check(InterpMAV, "missing interpretation metadata operand", Call);
6143 auto *InterpStr = dyn_cast<MDString>(InterpMAV->getMetadata());
6144 Check(InterpStr, "interpretation metadata operand must be a string", Call);
6145 StringRef Interp = InterpStr->getString();
6146
6147 Check(!Interp.empty(), "interpretation metadata string must not be empty",
6148 Call);
6149
6150 // Valid interpretation strings: mini-float format names.
6152 "unsupported interpretation metadata string", Call);
6153 break;
6154 }
6155#define BEGIN_REGISTER_VP_INTRINSIC(VPID, ...) case Intrinsic::VPID:
6156#include "llvm/IR/VPIntrinsics.def"
6157#undef BEGIN_REGISTER_VP_INTRINSIC
6158 visitVPIntrinsic(cast<VPIntrinsic>(Call));
6159 break;
6160#define INSTRUCTION(NAME, NARGS, ROUND_MODE, INTRINSIC) \
6161 case Intrinsic::INTRINSIC:
6162#include "llvm/IR/ConstrainedOps.def"
6163#undef INSTRUCTION
6164 visitConstrainedFPIntrinsic(cast<ConstrainedFPIntrinsic>(Call));
6165 break;
6166 case Intrinsic::dbg_declare: // llvm.dbg.declare
6167 case Intrinsic::dbg_value: // llvm.dbg.value
6168 case Intrinsic::dbg_assign: // llvm.dbg.assign
6169 case Intrinsic::dbg_label: // llvm.dbg.label
6170 // We no longer interpret debug intrinsics (the old variable-location
6171 // design). They're meaningless as far as LLVM is concerned we could make
6172 // it an error for them to appear, but it's possible we'll have users
6173 // converting back to intrinsics for the forseeable future (such as DXIL),
6174 // so tolerate their existance.
6175 break;
6176 case Intrinsic::memcpy:
6177 case Intrinsic::memcpy_inline:
6178 case Intrinsic::memmove:
6179 case Intrinsic::memset:
6180 case Intrinsic::memset_inline:
6181 break;
6182 case Intrinsic::experimental_memset_pattern: {
6183 const auto Memset = cast<MemSetPatternInst>(&Call);
6184 Check(Memset->getValue()->getType()->isSized(),
6185 "unsized types cannot be used as memset patterns", Call);
6186 break;
6187 }
6188 case Intrinsic::memcpy_element_unordered_atomic:
6189 case Intrinsic::memmove_element_unordered_atomic:
6190 case Intrinsic::memset_element_unordered_atomic: {
6191 const auto *AMI = cast<AnyMemIntrinsic>(&Call);
6192
6193 ConstantInt *ElementSizeCI =
6194 cast<ConstantInt>(AMI->getRawElementSizeInBytes());
6195 const APInt &ElementSizeVal = ElementSizeCI->getValue();
6196 Check(ElementSizeVal.isPowerOf2(),
6197 "element size of the element-wise atomic memory intrinsic "
6198 "must be a power of 2",
6199 Call);
6200
6201 auto IsValidAlignment = [&](MaybeAlign Alignment) {
6202 return Alignment && ElementSizeVal.ule(Alignment->value());
6203 };
6204 Check(IsValidAlignment(AMI->getDestAlign()),
6205 "incorrect alignment of the destination argument", Call);
6206 if (const auto *AMT = dyn_cast<AnyMemTransferInst>(AMI)) {
6207 Check(IsValidAlignment(AMT->getSourceAlign()),
6208 "incorrect alignment of the source argument", Call);
6209 }
6210 break;
6211 }
6212 case Intrinsic::call_preallocated_setup: {
6213 auto *NumArgs = cast<ConstantInt>(Call.getArgOperand(0));
6214 bool FoundCall = false;
6215 for (User *U : Call.users()) {
6216 auto *UseCall = dyn_cast<CallBase>(U);
6217 Check(UseCall != nullptr,
6218 "Uses of llvm.call.preallocated.setup must be calls");
6219 Intrinsic::ID IID = UseCall->getIntrinsicID();
6220 if (IID == Intrinsic::call_preallocated_arg) {
6221 auto *AllocArgIndex = dyn_cast<ConstantInt>(UseCall->getArgOperand(1));
6222 Check(AllocArgIndex != nullptr,
6223 "llvm.call.preallocated.alloc arg index must be a constant");
6224 auto AllocArgIndexInt = AllocArgIndex->getValue();
6225 Check(AllocArgIndexInt.sge(0) &&
6226 AllocArgIndexInt.slt(NumArgs->getValue()),
6227 "llvm.call.preallocated.alloc arg index must be between 0 and "
6228 "corresponding "
6229 "llvm.call.preallocated.setup's argument count");
6230 } else if (IID == Intrinsic::call_preallocated_teardown) {
6231 // nothing to do
6232 } else {
6233 Check(!FoundCall, "Can have at most one call corresponding to a "
6234 "llvm.call.preallocated.setup");
6235 FoundCall = true;
6236 size_t NumPreallocatedArgs = 0;
6237 for (unsigned i = 0; i < UseCall->arg_size(); i++) {
6238 if (UseCall->paramHasAttr(i, Attribute::Preallocated)) {
6239 ++NumPreallocatedArgs;
6240 }
6241 }
6242 Check(NumPreallocatedArgs != 0,
6243 "cannot use preallocated intrinsics on a call without "
6244 "preallocated arguments");
6245 Check(NumArgs->equalsInt(NumPreallocatedArgs),
6246 "llvm.call.preallocated.setup arg size must be equal to number "
6247 "of preallocated arguments "
6248 "at call site",
6249 Call, *UseCall);
6250 // getOperandBundle() cannot be called if more than one of the operand
6251 // bundle exists. There is already a check elsewhere for this, so skip
6252 // here if we see more than one.
6253 if (UseCall->countOperandBundlesOfType(LLVMContext::OB_preallocated) >
6254 1) {
6255 return;
6256 }
6257 auto PreallocatedBundle =
6258 UseCall->getOperandBundle(LLVMContext::OB_preallocated);
6259 Check(PreallocatedBundle,
6260 "Use of llvm.call.preallocated.setup outside intrinsics "
6261 "must be in \"preallocated\" operand bundle");
6262 Check(PreallocatedBundle->Inputs.front().get() == &Call,
6263 "preallocated bundle must have token from corresponding "
6264 "llvm.call.preallocated.setup");
6265 }
6266 }
6267 break;
6268 }
6269 case Intrinsic::call_preallocated_arg: {
6270 auto *Token = dyn_cast<CallBase>(Call.getArgOperand(0));
6271 Check(Token &&
6272 Token->getIntrinsicID() == Intrinsic::call_preallocated_setup,
6273 "llvm.call.preallocated.arg token argument must be a "
6274 "llvm.call.preallocated.setup");
6275 Check(Call.hasFnAttr(Attribute::Preallocated),
6276 "llvm.call.preallocated.arg must be called with a \"preallocated\" "
6277 "call site attribute");
6278 break;
6279 }
6280 case Intrinsic::call_preallocated_teardown: {
6281 auto *Token = dyn_cast<CallBase>(Call.getArgOperand(0));
6282 Check(Token &&
6283 Token->getIntrinsicID() == Intrinsic::call_preallocated_setup,
6284 "llvm.call.preallocated.teardown token argument must be a "
6285 "llvm.call.preallocated.setup");
6286 break;
6287 }
6288 case Intrinsic::gcroot:
6289 case Intrinsic::gcwrite:
6290 case Intrinsic::gcread:
6291 if (ID == Intrinsic::gcroot) {
6292 AllocaInst *AI =
6294 Check(AI, "llvm.gcroot parameter #1 must be an alloca.", Call);
6296 "llvm.gcroot parameter #2 must be a constant.", Call);
6297 if (!AI->getAllocatedType()->isPointerTy()) {
6299 "llvm.gcroot parameter #1 must either be a pointer alloca, "
6300 "or argument #2 must be a non-null constant.",
6301 Call);
6302 }
6303 }
6304
6305 Check(Call.getParent()->getParent()->hasGC(),
6306 "Enclosing function does not use GC.", Call);
6307 break;
6308 case Intrinsic::init_trampoline:
6310 "llvm.init_trampoline parameter #2 must resolve to a function.",
6311 Call);
6312 break;
6313 case Intrinsic::prefetch:
6314 Check(cast<ConstantInt>(Call.getArgOperand(1))->getZExtValue() < 2,
6315 "rw argument to llvm.prefetch must be 0-1", Call);
6316 Check(cast<ConstantInt>(Call.getArgOperand(2))->getZExtValue() < 4,
6317 "locality argument to llvm.prefetch must be 0-3", Call);
6318 Check(cast<ConstantInt>(Call.getArgOperand(3))->getZExtValue() < 2,
6319 "cache type argument to llvm.prefetch must be 0-1", Call);
6320 break;
6321 case Intrinsic::reloc_none: {
6323 cast<MetadataAsValue>(Call.getArgOperand(0))->getMetadata()),
6324 "llvm.reloc.none argument must be a metadata string", &Call);
6325 break;
6326 }
6327 case Intrinsic::stackprotector:
6329 "llvm.stackprotector parameter #2 must resolve to an alloca.", Call);
6330 break;
6331 case Intrinsic::localescape: {
6332 BasicBlock *BB = Call.getParent();
6333 Check(BB->isEntryBlock(), "llvm.localescape used outside of entry block",
6334 Call);
6335 Check(!SawFrameEscape, "multiple calls to llvm.localescape in one function",
6336 Call);
6337 for (Value *Arg : Call.args()) {
6338 if (isa<ConstantPointerNull>(Arg))
6339 continue; // Null values are allowed as placeholders.
6340 auto *AI = dyn_cast<AllocaInst>(Arg->stripPointerCasts());
6341 Check(AI && AI->isStaticAlloca(),
6342 "llvm.localescape only accepts static allocas", Call);
6343 }
6344 FrameEscapeInfo[BB->getParent()].first = Call.arg_size();
6345 SawFrameEscape = true;
6346 break;
6347 }
6348 case Intrinsic::localrecover: {
6350 Function *Fn = dyn_cast<Function>(FnArg);
6351 Check(Fn && !Fn->isDeclaration(),
6352 "llvm.localrecover first "
6353 "argument must be function defined in this module",
6354 Call);
6355 auto *IdxArg = cast<ConstantInt>(Call.getArgOperand(2));
6356 auto &Entry = FrameEscapeInfo[Fn];
6357 Entry.second = unsigned(
6358 std::max(uint64_t(Entry.second), IdxArg->getLimitedValue(~0U) + 1));
6359 break;
6360 }
6361
6362 case Intrinsic::experimental_gc_statepoint:
6363 if (auto *CI = dyn_cast<CallInst>(&Call))
6364 Check(!CI->isInlineAsm(),
6365 "gc.statepoint support for inline assembly unimplemented", CI);
6366 Check(Call.getParent()->getParent()->hasGC(),
6367 "Enclosing function does not use GC.", Call);
6368
6369 verifyStatepoint(Call);
6370 break;
6371 case Intrinsic::experimental_gc_result: {
6372 Check(Call.getParent()->getParent()->hasGC(),
6373 "Enclosing function does not use GC.", Call);
6374
6375 auto *Statepoint = Call.getArgOperand(0);
6376 if (isa<UndefValue>(Statepoint))
6377 break;
6378
6379 // Are we tied to a statepoint properly?
6380 const auto *StatepointCall = dyn_cast<CallBase>(Statepoint);
6381 Check(StatepointCall && StatepointCall->getIntrinsicID() ==
6382 Intrinsic::experimental_gc_statepoint,
6383 "gc.result operand #1 must be from a statepoint", Call,
6384 Call.getArgOperand(0));
6385
6386 // Check that result type matches wrapped callee.
6387 auto *TargetFuncType =
6388 cast<FunctionType>(StatepointCall->getParamElementType(2));
6389 Check(Call.getType() == TargetFuncType->getReturnType(),
6390 "gc.result result type does not match wrapped callee", Call);
6391 break;
6392 }
6393 case Intrinsic::experimental_gc_relocate: {
6394 Check(Call.arg_size() == 3, "wrong number of arguments", Call);
6395
6397 "gc.relocate must return a pointer or a vector of pointers", Call);
6398
6399 // Check that this relocate is correctly tied to the statepoint
6400
6401 // This is case for relocate on the unwinding path of an invoke statepoint
6402 if (LandingPadInst *LandingPad =
6404
6405 const BasicBlock *InvokeBB =
6406 LandingPad->getParent()->getUniquePredecessor();
6407
6408 // Landingpad relocates should have only one predecessor with invoke
6409 // statepoint terminator
6410 Check(InvokeBB, "safepoints should have unique landingpads",
6411 LandingPad->getParent());
6412 Check(InvokeBB->getTerminator(), "safepoint block should be well formed",
6413 InvokeBB);
6415 "gc relocate should be linked to a statepoint", InvokeBB);
6416 } else {
6417 // In all other cases relocate should be tied to the statepoint directly.
6418 // This covers relocates on a normal return path of invoke statepoint and
6419 // relocates of a call statepoint.
6420 auto *Token = Call.getArgOperand(0);
6422 "gc relocate is incorrectly tied to the statepoint", Call, Token);
6423 }
6424
6425 // Verify rest of the relocate arguments.
6426 const Value &StatepointCall = *cast<GCRelocateInst>(Call).getStatepoint();
6427
6428 // Both the base and derived must be piped through the safepoint.
6431 "gc.relocate operand #2 must be integer offset", Call);
6432
6433 Value *Derived = Call.getArgOperand(2);
6434 Check(isa<ConstantInt>(Derived),
6435 "gc.relocate operand #3 must be integer offset", Call);
6436
6437 const uint64_t BaseIndex = cast<ConstantInt>(Base)->getZExtValue();
6438 const uint64_t DerivedIndex = cast<ConstantInt>(Derived)->getZExtValue();
6439
6440 // Check the bounds
6441 if (isa<UndefValue>(StatepointCall))
6442 break;
6443 if (auto Opt = cast<GCStatepointInst>(StatepointCall)
6444 .getOperandBundle(LLVMContext::OB_gc_live)) {
6445 Check(BaseIndex < Opt->Inputs.size(),
6446 "gc.relocate: statepoint base index out of bounds", Call);
6447 Check(DerivedIndex < Opt->Inputs.size(),
6448 "gc.relocate: statepoint derived index out of bounds", Call);
6449 }
6450
6451 // Relocated value must be either a pointer type or vector-of-pointer type,
6452 // but gc_relocate does not need to return the same pointer type as the
6453 // relocated pointer. It can be casted to the correct type later if it's
6454 // desired. However, they must have the same address space and 'vectorness'
6455 GCRelocateInst &Relocate = cast<GCRelocateInst>(Call);
6456 auto *ResultType = Call.getType();
6457 auto *DerivedType = Relocate.getDerivedPtr()->getType();
6458 auto *BaseType = Relocate.getBasePtr()->getType();
6459
6460 Check(BaseType->isPtrOrPtrVectorTy(),
6461 "gc.relocate: relocated value must be a pointer", Call);
6462 Check(DerivedType->isPtrOrPtrVectorTy(),
6463 "gc.relocate: relocated value must be a pointer", Call);
6464
6465 Check(ResultType->isVectorTy() == DerivedType->isVectorTy(),
6466 "gc.relocate: vector relocates to vector and pointer to pointer",
6467 Call);
6468 Check(
6469 ResultType->getPointerAddressSpace() ==
6470 DerivedType->getPointerAddressSpace(),
6471 "gc.relocate: relocating a pointer shouldn't change its address space",
6472 Call);
6473
6474 auto GC = llvm::getGCStrategy(Relocate.getFunction()->getGC());
6475 Check(GC, "gc.relocate: calling function must have GCStrategy",
6476 Call.getFunction());
6477 if (GC) {
6478 auto isGCPtr = [&GC](Type *PTy) {
6479 return GC->isGCManagedPointer(PTy->getScalarType()).value_or(true);
6480 };
6481 Check(isGCPtr(ResultType), "gc.relocate: must return gc pointer", Call);
6482 Check(isGCPtr(BaseType),
6483 "gc.relocate: relocated value must be a gc pointer", Call);
6484 Check(isGCPtr(DerivedType),
6485 "gc.relocate: relocated value must be a gc pointer", Call);
6486 }
6487 break;
6488 }
6489 case Intrinsic::experimental_patchpoint: {
6490 if (Call.getCallingConv() == CallingConv::AnyReg) {
6492 "patchpoint: invalid return type used with anyregcc", Call);
6493 }
6494 break;
6495 }
6496 case Intrinsic::eh_exceptioncode:
6497 case Intrinsic::eh_exceptionpointer: {
6499 "eh.exceptionpointer argument must be a catchpad", Call);
6500 break;
6501 }
6502 case Intrinsic::get_active_lane_mask: {
6504 "get_active_lane_mask: must return a "
6505 "vector",
6506 Call);
6507 auto *ElemTy = Call.getType()->getScalarType();
6508 Check(ElemTy->isIntegerTy(1),
6509 "get_active_lane_mask: element type is not "
6510 "i1",
6511 Call);
6512 break;
6513 }
6514 case Intrinsic::experimental_get_vector_length: {
6515 ConstantInt *VF = cast<ConstantInt>(Call.getArgOperand(1));
6516 Check(!VF->isNegative() && !VF->isZero(),
6517 "get_vector_length: VF must be positive", Call);
6518 break;
6519 }
6520 case Intrinsic::masked_load: {
6521 Check(Call.getType()->isVectorTy(), "masked_load: must return a vector",
6522 Call);
6523
6525 Value *PassThru = Call.getArgOperand(2);
6526 Check(Mask->getType()->isVectorTy(), "masked_load: mask must be vector",
6527 Call);
6528 Check(PassThru->getType() == Call.getType(),
6529 "masked_load: pass through and return type must match", Call);
6530 Check(cast<VectorType>(Mask->getType())->getElementCount() ==
6531 cast<VectorType>(Call.getType())->getElementCount(),
6532 "masked_load: vector mask must be same length as return", Call);
6533 break;
6534 }
6535 case Intrinsic::masked_store: {
6536 Value *Val = Call.getArgOperand(0);
6538 Check(Mask->getType()->isVectorTy(), "masked_store: mask must be vector",
6539 Call);
6540 Check(cast<VectorType>(Mask->getType())->getElementCount() ==
6541 cast<VectorType>(Val->getType())->getElementCount(),
6542 "masked_store: vector mask must be same length as value", Call);
6543 break;
6544 }
6545 case Intrinsic::experimental_guard: {
6546 Check(isa<CallInst>(Call), "experimental_guard cannot be invoked", Call);
6548 "experimental_guard must have exactly one "
6549 "\"deopt\" operand bundle");
6550 break;
6551 }
6552
6553 case Intrinsic::experimental_deoptimize: {
6554 Check(isa<CallInst>(Call), "experimental_deoptimize cannot be invoked",
6555 Call);
6557 "experimental_deoptimize must have exactly one "
6558 "\"deopt\" operand bundle");
6560 "experimental_deoptimize return type must match caller return type");
6561
6562 if (isa<CallInst>(Call)) {
6564 Check(RI,
6565 "calls to experimental_deoptimize must be followed by a return");
6566
6567 if (!Call.getType()->isVoidTy() && RI)
6568 Check(RI->getReturnValue() == &Call,
6569 "calls to experimental_deoptimize must be followed by a return "
6570 "of the value computed by experimental_deoptimize");
6571 }
6572
6573 break;
6574 }
6575 case Intrinsic::vastart: {
6577 "va_start called in a non-varargs function");
6578 break;
6579 }
6580 case Intrinsic::get_dynamic_area_offset: {
6581 auto *IntTy = dyn_cast<IntegerType>(Call.getType());
6582 Check(IntTy && DL.getPointerSizeInBits(DL.getAllocaAddrSpace()) ==
6583 IntTy->getBitWidth(),
6584 "get_dynamic_area_offset result type must be scalar integer matching "
6585 "alloca address space width",
6586 Call);
6587 break;
6588 }
6589 case Intrinsic::masked_udiv:
6590 case Intrinsic::masked_sdiv:
6591 case Intrinsic::masked_urem:
6592 case Intrinsic::masked_srem:
6593 case Intrinsic::vector_reduce_and:
6594 case Intrinsic::vector_reduce_or:
6595 case Intrinsic::vector_reduce_xor:
6596 case Intrinsic::vector_reduce_add:
6597 case Intrinsic::vector_reduce_mul:
6598 case Intrinsic::vector_reduce_smax:
6599 case Intrinsic::vector_reduce_smin:
6600 case Intrinsic::vector_reduce_umax:
6601 case Intrinsic::vector_reduce_umin: {
6602 Type *ArgTy = Call.getArgOperand(0)->getType();
6603 Check(ArgTy->isIntOrIntVectorTy() && ArgTy->isVectorTy(),
6604 "Intrinsic has incorrect argument type!");
6605 break;
6606 }
6607 case Intrinsic::vector_reduce_fmax:
6608 case Intrinsic::vector_reduce_fmin: {
6609 Type *ArgTy = Call.getArgOperand(0)->getType();
6610 Check(ArgTy->isFPOrFPVectorTy() && ArgTy->isVectorTy(),
6611 "Intrinsic has incorrect argument type!");
6612 break;
6613 }
6614 case Intrinsic::vector_reduce_fadd:
6615 case Intrinsic::vector_reduce_fmul: {
6616 // Unlike the other reductions, the first argument is a start value. The
6617 // second argument is the vector to be reduced.
6618 Type *ArgTy = Call.getArgOperand(1)->getType();
6619 Check(ArgTy->isFPOrFPVectorTy() && ArgTy->isVectorTy(),
6620 "Intrinsic has incorrect argument type!");
6621 break;
6622 }
6623 case Intrinsic::smul_fix:
6624 case Intrinsic::smul_fix_sat:
6625 case Intrinsic::umul_fix:
6626 case Intrinsic::umul_fix_sat:
6627 case Intrinsic::sdiv_fix:
6628 case Intrinsic::sdiv_fix_sat:
6629 case Intrinsic::udiv_fix:
6630 case Intrinsic::udiv_fix_sat: {
6631 Value *Op1 = Call.getArgOperand(0);
6632 Value *Op2 = Call.getArgOperand(1);
6634 "first operand of [us][mul|div]_fix[_sat] must be an int type or "
6635 "vector of ints");
6637 "second operand of [us][mul|div]_fix[_sat] must be an int type or "
6638 "vector of ints");
6639
6640 auto *Op3 = cast<ConstantInt>(Call.getArgOperand(2));
6641 Check(Op3->getType()->isIntegerTy(),
6642 "third operand of [us][mul|div]_fix[_sat] must be an int type");
6643 Check(Op3->getBitWidth() <= 32,
6644 "third operand of [us][mul|div]_fix[_sat] must fit within 32 bits");
6645
6646 if (ID == Intrinsic::smul_fix || ID == Intrinsic::smul_fix_sat ||
6647 ID == Intrinsic::sdiv_fix || ID == Intrinsic::sdiv_fix_sat) {
6648 Check(Op3->getZExtValue() < Op1->getType()->getScalarSizeInBits(),
6649 "the scale of s[mul|div]_fix[_sat] must be less than the width of "
6650 "the operands");
6651 } else {
6652 Check(Op3->getZExtValue() <= Op1->getType()->getScalarSizeInBits(),
6653 "the scale of u[mul|div]_fix[_sat] must be less than or equal "
6654 "to the width of the operands");
6655 }
6656 break;
6657 }
6658 case Intrinsic::lrint:
6659 case Intrinsic::llrint:
6660 case Intrinsic::lround:
6661 case Intrinsic::llround: {
6662 Type *ValTy = Call.getArgOperand(0)->getType();
6663 Type *ResultTy = Call.getType();
6664 auto *VTy = dyn_cast<VectorType>(ValTy);
6665 auto *RTy = dyn_cast<VectorType>(ResultTy);
6666 Check(ValTy->isFPOrFPVectorTy() && ResultTy->isIntOrIntVectorTy(),
6667 ExpectedName + ": argument must be floating-point or vector "
6668 "of floating-points, and result must be integer or "
6669 "vector of integers",
6670 &Call);
6671 Check(ValTy->isVectorTy() == ResultTy->isVectorTy(),
6672 ExpectedName + ": argument and result disagree on vector use", &Call);
6673 if (VTy) {
6674 Check(VTy->getElementCount() == RTy->getElementCount(),
6675 ExpectedName + ": argument must be same length as result", &Call);
6676 }
6677 break;
6678 }
6679 case Intrinsic::bswap: {
6680 Type *Ty = Call.getType();
6681 unsigned Size = Ty->getScalarSizeInBits();
6682 Check(Size % 16 == 0, "bswap must be an even number of bytes", &Call);
6683 break;
6684 }
6685 case Intrinsic::invariant_start: {
6686 ConstantInt *InvariantSize = dyn_cast<ConstantInt>(Call.getArgOperand(0));
6687 Check(InvariantSize &&
6688 (!InvariantSize->isNegative() || InvariantSize->isMinusOne()),
6689 "invariant_start parameter must be -1, 0 or a positive number",
6690 &Call);
6691 break;
6692 }
6693 case Intrinsic::matrix_multiply:
6694 case Intrinsic::matrix_transpose:
6695 case Intrinsic::matrix_column_major_load:
6696 case Intrinsic::matrix_column_major_store: {
6698 ConstantInt *Stride = nullptr;
6699 ConstantInt *NumRows;
6700 ConstantInt *NumColumns;
6701 VectorType *ResultTy;
6702 Type *Op0ElemTy = nullptr;
6703 Type *Op1ElemTy = nullptr;
6704 switch (ID) {
6705 case Intrinsic::matrix_multiply: {
6706 NumRows = cast<ConstantInt>(Call.getArgOperand(2));
6707 ConstantInt *N = cast<ConstantInt>(Call.getArgOperand(3));
6708 NumColumns = cast<ConstantInt>(Call.getArgOperand(4));
6710 ->getNumElements() ==
6711 NumRows->getZExtValue() * N->getZExtValue(),
6712 "First argument of a matrix operation does not match specified "
6713 "shape!");
6715 ->getNumElements() ==
6716 N->getZExtValue() * NumColumns->getZExtValue(),
6717 "Second argument of a matrix operation does not match specified "
6718 "shape!");
6719
6720 ResultTy = cast<VectorType>(Call.getType());
6721 Op0ElemTy =
6722 cast<VectorType>(Call.getArgOperand(0)->getType())->getElementType();
6723 Op1ElemTy =
6724 cast<VectorType>(Call.getArgOperand(1)->getType())->getElementType();
6725 break;
6726 }
6727 case Intrinsic::matrix_transpose:
6728 NumRows = cast<ConstantInt>(Call.getArgOperand(1));
6729 NumColumns = cast<ConstantInt>(Call.getArgOperand(2));
6730 ResultTy = cast<VectorType>(Call.getType());
6731 Op0ElemTy =
6732 cast<VectorType>(Call.getArgOperand(0)->getType())->getElementType();
6733 break;
6734 case Intrinsic::matrix_column_major_load: {
6736 NumRows = cast<ConstantInt>(Call.getArgOperand(3));
6737 NumColumns = cast<ConstantInt>(Call.getArgOperand(4));
6738 ResultTy = cast<VectorType>(Call.getType());
6739 break;
6740 }
6741 case Intrinsic::matrix_column_major_store: {
6743 NumRows = cast<ConstantInt>(Call.getArgOperand(4));
6744 NumColumns = cast<ConstantInt>(Call.getArgOperand(5));
6745 ResultTy = cast<VectorType>(Call.getArgOperand(0)->getType());
6746 Op0ElemTy =
6747 cast<VectorType>(Call.getArgOperand(0)->getType())->getElementType();
6748 break;
6749 }
6750 default:
6751 llvm_unreachable("unexpected intrinsic");
6752 }
6753
6754 Check(ResultTy->getElementType()->isIntegerTy() ||
6755 ResultTy->getElementType()->isFloatingPointTy(),
6756 "Result type must be an integer or floating-point type!", IF);
6757
6758 if (Op0ElemTy)
6759 Check(ResultTy->getElementType() == Op0ElemTy,
6760 "Vector element type mismatch of the result and first operand "
6761 "vector!",
6762 IF);
6763
6764 if (Op1ElemTy)
6765 Check(ResultTy->getElementType() == Op1ElemTy,
6766 "Vector element type mismatch of the result and second operand "
6767 "vector!",
6768 IF);
6769
6771 NumRows->getZExtValue() * NumColumns->getZExtValue(),
6772 "Result of a matrix operation does not fit in the returned vector!");
6773
6774 if (Stride) {
6775 Check(Stride->getBitWidth() <= 64, "Stride bitwidth cannot exceed 64!",
6776 IF);
6777 Check(Stride->getZExtValue() >= NumRows->getZExtValue(),
6778 "Stride must be greater or equal than the number of rows!", IF);
6779 }
6780
6781 break;
6782 }
6783 case Intrinsic::stepvector: {
6785 Check(VecTy && VecTy->getScalarType()->isIntegerTy() &&
6786 VecTy->getScalarSizeInBits() >= 8,
6787 "stepvector only supported for vectors of integers "
6788 "with a bitwidth of at least 8.",
6789 &Call);
6790 break;
6791 }
6792 case Intrinsic::experimental_vector_match: {
6793 Value *Op1 = Call.getArgOperand(0);
6794 Value *Op2 = Call.getArgOperand(1);
6796
6797 VectorType *Op1Ty = dyn_cast<VectorType>(Op1->getType());
6798 VectorType *Op2Ty = dyn_cast<VectorType>(Op2->getType());
6799 VectorType *MaskTy = dyn_cast<VectorType>(Mask->getType());
6800
6801 Check(Op1Ty && Op2Ty && MaskTy, "Operands must be vectors.", &Call);
6803 "Second operand must be a fixed length vector.", &Call);
6804 Check(Op1Ty->getElementType()->isIntegerTy(),
6805 "First operand must be a vector of integers.", &Call);
6806 Check(Op1Ty->getElementType() == Op2Ty->getElementType(),
6807 "First two operands must have the same element type.", &Call);
6808 Check(Op1Ty->getElementCount() == MaskTy->getElementCount(),
6809 "First operand and mask must have the same number of elements.",
6810 &Call);
6811 Check(MaskTy->getElementType()->isIntegerTy(1),
6812 "Mask must be a vector of i1's.", &Call);
6813 Check(Call.getType() == MaskTy, "Return type must match the mask type.",
6814 &Call);
6815 break;
6816 }
6817 case Intrinsic::vector_insert: {
6818 Value *Vec = Call.getArgOperand(0);
6819 Value *SubVec = Call.getArgOperand(1);
6820 Value *Idx = Call.getArgOperand(2);
6821 unsigned IdxN = cast<ConstantInt>(Idx)->getZExtValue();
6822
6823 VectorType *VecTy = cast<VectorType>(Vec->getType());
6824 VectorType *SubVecTy = cast<VectorType>(SubVec->getType());
6825
6826 ElementCount VecEC = VecTy->getElementCount();
6827 ElementCount SubVecEC = SubVecTy->getElementCount();
6828 Check(VecTy->getElementType() == SubVecTy->getElementType(),
6829 "vector_insert parameters must have the same element "
6830 "type.",
6831 &Call);
6832 Check(IdxN % SubVecEC.getKnownMinValue() == 0,
6833 "vector_insert index must be a constant multiple of "
6834 "the subvector's known minimum vector length.");
6835
6836 // If this insertion is not the 'mixed' case where a fixed vector is
6837 // inserted into a scalable vector, ensure that the insertion of the
6838 // subvector does not overrun the parent vector.
6839 if (VecEC.isScalable() == SubVecEC.isScalable()) {
6840 Check(IdxN < VecEC.getKnownMinValue() &&
6841 IdxN + SubVecEC.getKnownMinValue() <= VecEC.getKnownMinValue(),
6842 "subvector operand of vector_insert would overrun the "
6843 "vector being inserted into.");
6844 }
6845 break;
6846 }
6847 case Intrinsic::vector_extract: {
6848 Value *Vec = Call.getArgOperand(0);
6849 Value *Idx = Call.getArgOperand(1);
6850 unsigned IdxN = cast<ConstantInt>(Idx)->getZExtValue();
6851
6852 VectorType *ResultTy = cast<VectorType>(Call.getType());
6853 VectorType *VecTy = cast<VectorType>(Vec->getType());
6854
6855 ElementCount VecEC = VecTy->getElementCount();
6856 ElementCount ResultEC = ResultTy->getElementCount();
6857
6858 Check(ResultTy->getElementType() == VecTy->getElementType(),
6859 "vector_extract result must have the same element "
6860 "type as the input vector.",
6861 &Call);
6862 Check(IdxN % ResultEC.getKnownMinValue() == 0,
6863 "vector_extract index must be a constant multiple of "
6864 "the result type's known minimum vector length.");
6865
6866 // If this extraction is not the 'mixed' case where a fixed vector is
6867 // extracted from a scalable vector, ensure that the extraction does not
6868 // overrun the parent vector.
6869 if (VecEC.isScalable() == ResultEC.isScalable()) {
6870 Check(IdxN < VecEC.getKnownMinValue() &&
6871 IdxN + ResultEC.getKnownMinValue() <= VecEC.getKnownMinValue(),
6872 "vector_extract would overrun.");
6873 }
6874 break;
6875 }
6876 case Intrinsic::vector_partial_reduce_fadd:
6877 case Intrinsic::vector_partial_reduce_add: {
6880
6881 unsigned VecWidth = VecTy->getElementCount().getKnownMinValue();
6882 unsigned AccWidth = AccTy->getElementCount().getKnownMinValue();
6883
6884 Check((VecWidth % AccWidth) == 0,
6885 "Invalid vector widths for partial "
6886 "reduction. The width of the input vector "
6887 "must be a positive integer multiple of "
6888 "the width of the accumulator vector.");
6889 break;
6890 }
6891 case Intrinsic::experimental_noalias_scope_decl: {
6892 NoAliasScopeDecls.push_back(cast<IntrinsicInst>(&Call));
6893 break;
6894 }
6895 case Intrinsic::preserve_array_access_index:
6896 case Intrinsic::preserve_struct_access_index:
6897 case Intrinsic::aarch64_ldaxr:
6898 case Intrinsic::aarch64_ldxr:
6899 case Intrinsic::arm_ldaex:
6900 case Intrinsic::arm_ldrex: {
6901 Type *ElemTy = Call.getParamElementType(0);
6902 Check(ElemTy, "Intrinsic requires elementtype attribute on first argument.",
6903 &Call);
6904 break;
6905 }
6906 case Intrinsic::aarch64_stlxr:
6907 case Intrinsic::aarch64_stxr:
6908 case Intrinsic::arm_stlex:
6909 case Intrinsic::arm_strex: {
6910 Type *ElemTy = Call.getAttributes().getParamElementType(1);
6911 Check(ElemTy,
6912 "Intrinsic requires elementtype attribute on second argument.",
6913 &Call);
6914 break;
6915 }
6916 case Intrinsic::aarch64_prefetch: {
6917 Check(cast<ConstantInt>(Call.getArgOperand(1))->getZExtValue() < 2,
6918 "write argument to llvm.aarch64.prefetch must be 0 or 1", Call);
6919 Check(cast<ConstantInt>(Call.getArgOperand(2))->getZExtValue() < 4,
6920 "target argument to llvm.aarch64.prefetch must be 0-3", Call);
6921 Check(cast<ConstantInt>(Call.getArgOperand(3))->getZExtValue() < 2,
6922 "stream argument to llvm.aarch64.prefetch must be 0 or 1", Call);
6923 Check(cast<ConstantInt>(Call.getArgOperand(4))->getZExtValue() < 2,
6924 "isdata argument to llvm.aarch64.prefetch must be 0 or 1", Call);
6925 break;
6926 }
6927 case Intrinsic::aarch64_range_prefetch: {
6928 Check(cast<ConstantInt>(Call.getArgOperand(1))->getZExtValue() < 2,
6929 "write argument to llvm.aarch64.range.prefetch must be 0 or 1", Call);
6930 Check(cast<ConstantInt>(Call.getArgOperand(2))->getZExtValue() < 2,
6931 "stream argument to llvm.aarch64.range.prefetch must be 0 or 1",
6932 Call);
6933 break;
6934 }
6935 case Intrinsic::aarch64_stshh_atomic_store: {
6936 uint64_t Order = cast<ConstantInt>(Call.getArgOperand(2))->getZExtValue();
6937 Check(Order == static_cast<uint64_t>(AtomicOrderingCABI::relaxed) ||
6938 Order == static_cast<uint64_t>(AtomicOrderingCABI::release) ||
6939 Order == static_cast<uint64_t>(AtomicOrderingCABI::seq_cst),
6940 "order argument to llvm.aarch64.stshh.atomic.store must be 0, 3 or 5",
6941 Call);
6942
6943 Check(cast<ConstantInt>(Call.getArgOperand(3))->getZExtValue() < 2,
6944 "policy argument to llvm.aarch64.stshh.atomic.store must be 0 or 1",
6945 Call);
6946
6947 uint64_t Size = cast<ConstantInt>(Call.getArgOperand(4))->getZExtValue();
6948 Check(Size == 8 || Size == 16 || Size == 32 || Size == 64,
6949 "size argument to llvm.aarch64.stshh.atomic.store must be 8, 16, "
6950 "32 or 64",
6951 Call);
6952 break;
6953 }
6954 case Intrinsic::callbr_landingpad: {
6955 const auto *CBR = dyn_cast<CallBrInst>(Call.getOperand(0));
6956 Check(CBR, "intrinstic requires callbr operand", &Call);
6957 if (!CBR)
6958 break;
6959
6960 const BasicBlock *LandingPadBB = Call.getParent();
6961 const BasicBlock *PredBB = LandingPadBB->getUniquePredecessor();
6962 if (!PredBB) {
6963 CheckFailed("Intrinsic in block must have 1 unique predecessor", &Call);
6964 break;
6965 }
6966 if (!isa<CallBrInst>(PredBB->getTerminator())) {
6967 CheckFailed("Intrinsic must have corresponding callbr in predecessor",
6968 &Call);
6969 break;
6970 }
6971 Check(llvm::is_contained(CBR->getIndirectDests(), LandingPadBB),
6972 "Intrinsic's corresponding callbr must have intrinsic's parent basic "
6973 "block in indirect destination list",
6974 &Call);
6975 const Instruction &First = *LandingPadBB->begin();
6976 Check(&First == &Call, "No other instructions may proceed intrinsic",
6977 &Call);
6978 break;
6979 }
6980 case Intrinsic::structured_gep: {
6981 // Parser should refuse those 2 cases.
6982 assert(Call.arg_size() >= 1);
6984
6985 Check(Call.paramHasAttr(0, Attribute::ElementType),
6986 "Intrinsic first parameter is missing an ElementType attribute",
6987 &Call);
6988
6989 Type *T = Call.getParamAttr(0, Attribute::ElementType).getValueAsType();
6990 for (unsigned I = 1; I < Call.arg_size(); ++I) {
6992 ConstantInt *CI = dyn_cast<ConstantInt>(Index);
6993 Check(Index->getType()->isIntegerTy(),
6994 "Index operand type must be an integer", &Call);
6995
6996 if (ArrayType *AT = dyn_cast<ArrayType>(T)) {
6997 T = AT->getElementType();
6998 } else if (StructType *ST = dyn_cast<StructType>(T)) {
6999 Check(CI, "Indexing into a struct requires a constant int", &Call);
7000 Check(CI->getZExtValue() < ST->getNumElements(),
7001 "Indexing in a struct should be inbounds", &Call);
7002 T = ST->getElementType(CI->getZExtValue());
7003 } else if (VectorType *VT = dyn_cast<VectorType>(T)) {
7004 T = VT->getElementType();
7005 } else {
7006 CheckFailed("Reached a non-composite type with more indices to process",
7007 &Call);
7008 }
7009 }
7010 break;
7011 }
7012 case Intrinsic::structured_alloca:
7013 Check(Call.hasRetAttr(Attribute::ElementType),
7014 "@llvm.structured.alloca calls require elementtype attribute.",
7015 &Call);
7016 break;
7017 case Intrinsic::amdgcn_cs_chain: {
7018 auto CallerCC = Call.getCaller()->getCallingConv();
7019 switch (CallerCC) {
7020 case CallingConv::AMDGPU_CS:
7021 case CallingConv::AMDGPU_CS_Chain:
7022 case CallingConv::AMDGPU_CS_ChainPreserve:
7023 case CallingConv::AMDGPU_ES:
7024 case CallingConv::AMDGPU_GS:
7025 case CallingConv::AMDGPU_HS:
7026 case CallingConv::AMDGPU_LS:
7027 case CallingConv::AMDGPU_VS:
7028 break;
7029 default:
7030 CheckFailed("Intrinsic cannot be called from functions with this "
7031 "calling convention",
7032 &Call);
7033 break;
7034 }
7035
7036 Check(Call.paramHasAttr(2, Attribute::InReg),
7037 "SGPR arguments must have the `inreg` attribute", &Call);
7038 Check(!Call.paramHasAttr(3, Attribute::InReg),
7039 "VGPR arguments must not have the `inreg` attribute", &Call);
7040
7041 auto *Next = Call.getNextNode();
7042 bool IsAMDUnreachable = Next && isa<IntrinsicInst>(Next) &&
7043 cast<IntrinsicInst>(Next)->getIntrinsicID() ==
7044 Intrinsic::amdgcn_unreachable;
7045 Check(Next && (isa<UnreachableInst>(Next) || IsAMDUnreachable),
7046 "llvm.amdgcn.cs.chain must be followed by unreachable", &Call);
7047 break;
7048 }
7049 case Intrinsic::amdgcn_init_exec_from_input: {
7050 const Argument *Arg = dyn_cast<Argument>(Call.getOperand(0));
7051 Check(Arg && Arg->hasInRegAttr(),
7052 "only inreg arguments to the parent function are valid as inputs to "
7053 "this intrinsic",
7054 &Call);
7055 break;
7056 }
7057 case Intrinsic::amdgcn_set_inactive_chain_arg: {
7058 auto CallerCC = Call.getCaller()->getCallingConv();
7059 switch (CallerCC) {
7060 case CallingConv::AMDGPU_CS_Chain:
7061 case CallingConv::AMDGPU_CS_ChainPreserve:
7062 break;
7063 default:
7064 CheckFailed("Intrinsic can only be used from functions with the "
7065 "amdgpu_cs_chain or amdgpu_cs_chain_preserve "
7066 "calling conventions",
7067 &Call);
7068 break;
7069 }
7070
7071 unsigned InactiveIdx = 1;
7072 Check(!Call.paramHasAttr(InactiveIdx, Attribute::InReg),
7073 "Value for inactive lanes must not have the `inreg` attribute",
7074 &Call);
7075 Check(isa<Argument>(Call.getArgOperand(InactiveIdx)),
7076 "Value for inactive lanes must be a function argument", &Call);
7077 Check(!cast<Argument>(Call.getArgOperand(InactiveIdx))->hasInRegAttr(),
7078 "Value for inactive lanes must be a VGPR function argument", &Call);
7079 break;
7080 }
7081 case Intrinsic::amdgcn_call_whole_wave: {
7083 Check(F, "Indirect whole wave calls are not allowed", &Call);
7084
7085 CallingConv::ID CC = F->getCallingConv();
7086 Check(CC == CallingConv::AMDGPU_Gfx_WholeWave,
7087 "Callee must have the amdgpu_gfx_whole_wave calling convention",
7088 &Call);
7089
7090 Check(!F->isVarArg(), "Variadic whole wave calls are not allowed", &Call);
7091
7092 Check(Call.arg_size() == F->arg_size(),
7093 "Call argument count must match callee argument count", &Call);
7094
7095 // The first argument of the call is the callee, and the first argument of
7096 // the callee is the active mask. The rest of the arguments must match.
7097 Check(F->arg_begin()->getType()->isIntegerTy(1),
7098 "Callee must have i1 as its first argument", &Call);
7099 for (auto [CallArg, FuncArg] :
7100 drop_begin(zip_equal(Call.args(), F->args()))) {
7101 Check(CallArg->getType() == FuncArg.getType(),
7102 "Argument types must match", &Call);
7103
7104 // Check that inreg attributes match between call site and function
7105 Check(Call.paramHasAttr(FuncArg.getArgNo(), Attribute::InReg) ==
7106 FuncArg.hasInRegAttr(),
7107 "Argument inreg attributes must match", &Call);
7108 }
7109 break;
7110 }
7111 case Intrinsic::amdgcn_s_prefetch_data: {
7112 Check(
7115 "llvm.amdgcn.s.prefetch.data only supports global or constant memory");
7116 break;
7117 }
7118 case Intrinsic::amdgcn_mfma_scale_f32_16x16x128_f8f6f4:
7119 case Intrinsic::amdgcn_mfma_scale_f32_32x32x64_f8f6f4: {
7120 Value *Src0 = Call.getArgOperand(0);
7121 Value *Src1 = Call.getArgOperand(1);
7122
7123 uint64_t CBSZ = cast<ConstantInt>(Call.getArgOperand(3))->getZExtValue();
7124 uint64_t BLGP = cast<ConstantInt>(Call.getArgOperand(4))->getZExtValue();
7125 Check(CBSZ <= 4, "invalid value for cbsz format", Call,
7126 Call.getArgOperand(3));
7127 Check(BLGP <= 4, "invalid value for blgp format", Call,
7128 Call.getArgOperand(4));
7129
7130 // AMDGPU::MFMAScaleFormats values
7131 auto getFormatNumRegs = [](unsigned FormatVal) {
7132 switch (FormatVal) {
7133 case 0:
7134 case 1:
7135 return 8u;
7136 case 2:
7137 case 3:
7138 return 6u;
7139 case 4:
7140 return 4u;
7141 default:
7142 llvm_unreachable("invalid format value");
7143 }
7144 };
7145
7146 auto isValidSrcASrcBVector = [](FixedVectorType *Ty) {
7147 if (!Ty || !Ty->getElementType()->isIntegerTy(32))
7148 return false;
7149 unsigned NumElts = Ty->getNumElements();
7150 return NumElts == 4 || NumElts == 6 || NumElts == 8;
7151 };
7152
7153 auto *Src0Ty = dyn_cast<FixedVectorType>(Src0->getType());
7154 auto *Src1Ty = dyn_cast<FixedVectorType>(Src1->getType());
7155 Check(isValidSrcASrcBVector(Src0Ty),
7156 "operand 0 must be 4, 6 or 8 element i32 vector", &Call, Src0);
7157 Check(isValidSrcASrcBVector(Src1Ty),
7158 "operand 1 must be 4, 6 or 8 element i32 vector", &Call, Src1);
7159
7160 // Permit excess registers for the format.
7161 Check(Src0Ty->getNumElements() >= getFormatNumRegs(CBSZ),
7162 "invalid vector type for format", &Call, Src0, Call.getArgOperand(3));
7163 Check(Src1Ty->getNumElements() >= getFormatNumRegs(BLGP),
7164 "invalid vector type for format", &Call, Src1, Call.getArgOperand(5));
7165 break;
7166 }
7167 case Intrinsic::amdgcn_wmma_f32_16x16x128_f8f6f4:
7168 case Intrinsic::amdgcn_wmma_scale_f32_16x16x128_f8f6f4:
7169 case Intrinsic::amdgcn_wmma_scale16_f32_16x16x128_f8f6f4: {
7170 Value *Src0 = Call.getArgOperand(1);
7171 Value *Src1 = Call.getArgOperand(3);
7172
7173 unsigned FmtA = cast<ConstantInt>(Call.getArgOperand(0))->getZExtValue();
7174 unsigned FmtB = cast<ConstantInt>(Call.getArgOperand(2))->getZExtValue();
7175 Check(FmtA <= 4, "invalid value for matrix format", Call,
7176 Call.getArgOperand(0));
7177 Check(FmtB <= 4, "invalid value for matrix format", Call,
7178 Call.getArgOperand(2));
7179
7180 // AMDGPU::MatrixFMT values
7181 auto getFormatNumRegs = [](unsigned FormatVal) {
7182 switch (FormatVal) {
7183 case 0:
7184 case 1:
7185 return 16u;
7186 case 2:
7187 case 3:
7188 return 12u;
7189 case 4:
7190 return 8u;
7191 default:
7192 llvm_unreachable("invalid format value");
7193 }
7194 };
7195
7196 auto isValidSrcASrcBVector = [](FixedVectorType *Ty) {
7197 if (!Ty || !Ty->getElementType()->isIntegerTy(32))
7198 return false;
7199 unsigned NumElts = Ty->getNumElements();
7200 return NumElts == 16 || NumElts == 12 || NumElts == 8;
7201 };
7202
7203 auto *Src0Ty = dyn_cast<FixedVectorType>(Src0->getType());
7204 auto *Src1Ty = dyn_cast<FixedVectorType>(Src1->getType());
7205 Check(isValidSrcASrcBVector(Src0Ty),
7206 "operand 1 must be 8, 12 or 16 element i32 vector", &Call, Src0);
7207 Check(isValidSrcASrcBVector(Src1Ty),
7208 "operand 3 must be 8, 12 or 16 element i32 vector", &Call, Src1);
7209
7210 // Permit excess registers for the format.
7211 Check(Src0Ty->getNumElements() >= getFormatNumRegs(FmtA),
7212 "invalid vector type for format", &Call, Src0, Call.getArgOperand(0));
7213 Check(Src1Ty->getNumElements() >= getFormatNumRegs(FmtB),
7214 "invalid vector type for format", &Call, Src1, Call.getArgOperand(2));
7215 break;
7216 }
7217 case Intrinsic::amdgcn_cooperative_atomic_load_32x4B:
7218 case Intrinsic::amdgcn_cooperative_atomic_load_16x8B:
7219 case Intrinsic::amdgcn_cooperative_atomic_load_8x16B:
7220 case Intrinsic::amdgcn_cooperative_atomic_store_32x4B:
7221 case Intrinsic::amdgcn_cooperative_atomic_store_16x8B:
7222 case Intrinsic::amdgcn_cooperative_atomic_store_8x16B: {
7223 // Check we only use this intrinsic on the FLAT or GLOBAL address spaces.
7224 Value *PtrArg = Call.getArgOperand(0);
7225 const unsigned AS = PtrArg->getType()->getPointerAddressSpace();
7227 "cooperative atomic intrinsics require a generic or global pointer",
7228 &Call, PtrArg);
7229
7230 // Last argument must be a MD string
7232 MDNode *MD = cast<MDNode>(Op->getMetadata());
7233 Check((MD->getNumOperands() == 1) && isa<MDString>(MD->getOperand(0)),
7234 "cooperative atomic intrinsics require that the last argument is a "
7235 "metadata string",
7236 &Call, Op);
7237 break;
7238 }
7239 case Intrinsic::nvvm_setmaxnreg_inc_sync_aligned_u32:
7240 case Intrinsic::nvvm_setmaxnreg_dec_sync_aligned_u32: {
7241 Value *V = Call.getArgOperand(0);
7242 unsigned RegCount = cast<ConstantInt>(V)->getZExtValue();
7243 Check(RegCount % 8 == 0,
7244 "reg_count argument to nvvm.setmaxnreg must be in multiples of 8");
7245 break;
7246 }
7247 case Intrinsic::experimental_convergence_entry:
7248 case Intrinsic::experimental_convergence_anchor:
7249 break;
7250 case Intrinsic::experimental_convergence_loop:
7251 break;
7252 case Intrinsic::ptrmask: {
7253 Type *Ty0 = Call.getArgOperand(0)->getType();
7254 Type *Ty1 = Call.getArgOperand(1)->getType();
7256 "llvm.ptrmask intrinsic first argument must be pointer or vector "
7257 "of pointers",
7258 &Call);
7259 Check(
7260 Ty0->isVectorTy() == Ty1->isVectorTy(),
7261 "llvm.ptrmask intrinsic arguments must be both scalars or both vectors",
7262 &Call);
7263 if (Ty0->isVectorTy())
7264 Check(cast<VectorType>(Ty0)->getElementCount() ==
7265 cast<VectorType>(Ty1)->getElementCount(),
7266 "llvm.ptrmask intrinsic arguments must have the same number of "
7267 "elements",
7268 &Call);
7269 Check(DL.getIndexTypeSizeInBits(Ty0) == Ty1->getScalarSizeInBits(),
7270 "llvm.ptrmask intrinsic second argument bitwidth must match "
7271 "pointer index type size of first argument",
7272 &Call);
7273 break;
7274 }
7275 case Intrinsic::thread_pointer: {
7277 DL.getDefaultGlobalsAddressSpace(),
7278 "llvm.thread.pointer intrinsic return type must be for the globals "
7279 "address space",
7280 &Call);
7281 break;
7282 }
7283 case Intrinsic::threadlocal_address: {
7284 const Value &Arg0 = *Call.getArgOperand(0);
7285 Check(isa<GlobalValue>(Arg0),
7286 "llvm.threadlocal.address first argument must be a GlobalValue");
7287 Check(cast<GlobalValue>(Arg0).isThreadLocal(),
7288 "llvm.threadlocal.address operand isThreadLocal() must be true");
7289 break;
7290 }
7291 case Intrinsic::lifetime_start:
7292 case Intrinsic::lifetime_end: {
7293 Value *Ptr = Call.getArgOperand(0);
7294 IntrinsicInst *II = dyn_cast<IntrinsicInst>(Ptr);
7295 Check(isa<AllocaInst>(Ptr) || isa<PoisonValue>(Ptr) ||
7296 (II && II->getIntrinsicID() == Intrinsic::structured_alloca),
7297 "llvm.lifetime.start/end can only be used on alloca or poison",
7298 &Call);
7299 break;
7300 }
7301 case Intrinsic::sponentry: {
7302 const unsigned StackAS = DL.getAllocaAddrSpace();
7303 const Type *RetTy = Call.getFunctionType()->getReturnType();
7304 Check(RetTy->getPointerAddressSpace() == StackAS,
7305 "llvm.sponentry must return a pointer to the stack", &Call);
7306 break;
7307 }
7308 };
7309
7310 // Verify that there aren't any unmediated control transfers between funclets.
7312 Function *F = Call.getParent()->getParent();
7313 if (F->hasPersonalityFn() &&
7314 isScopedEHPersonality(classifyEHPersonality(F->getPersonalityFn()))) {
7315 // Run EH funclet coloring on-demand and cache results for other intrinsic
7316 // calls in this function
7317 if (BlockEHFuncletColors.empty())
7318 BlockEHFuncletColors = colorEHFunclets(*F);
7319
7320 // Check for catch-/cleanup-pad in first funclet block
7321 bool InEHFunclet = false;
7322 BasicBlock *CallBB = Call.getParent();
7323 const ColorVector &CV = BlockEHFuncletColors.find(CallBB)->second;
7324 assert(CV.size() > 0 && "Uncolored block");
7325 for (BasicBlock *ColorFirstBB : CV)
7326 if (auto It = ColorFirstBB->getFirstNonPHIIt();
7327 It != ColorFirstBB->end())
7329 InEHFunclet = true;
7330
7331 // Check for funclet operand bundle
7332 bool HasToken = false;
7333 for (unsigned I = 0, E = Call.getNumOperandBundles(); I != E; ++I)
7335 HasToken = true;
7336
7337 // This would cause silent code truncation in WinEHPrepare
7338 if (InEHFunclet)
7339 Check(HasToken, "Missing funclet token on intrinsic call", &Call);
7340 }
7341 }
7342}
7343
7344/// Carefully grab the subprogram from a local scope.
7345///
7346/// This carefully grabs the subprogram from a local scope, avoiding the
7347/// built-in assertions that would typically fire.
7349 if (!LocalScope)
7350 return nullptr;
7351
7352 if (auto *SP = dyn_cast<DISubprogram>(LocalScope))
7353 return SP;
7354
7355 if (auto *LB = dyn_cast<DILexicalBlockBase>(LocalScope))
7356 return getSubprogram(LB->getRawScope());
7357
7358 // Just return null; broken scope chains are checked elsewhere.
7359 assert(!isa<DILocalScope>(LocalScope) && "Unknown type of local scope");
7360 return nullptr;
7361}
7362
7363void Verifier::visit(DbgLabelRecord &DLR) {
7365 "invalid #dbg_label intrinsic variable", &DLR, DLR.getRawLabel());
7366
7367 // Ignore broken !dbg attachments; they're checked elsewhere.
7368 if (MDNode *N = DLR.getDebugLoc().getAsMDNode())
7369 if (!isa<DILocation>(N))
7370 return;
7371
7372 BasicBlock *BB = DLR.getParent();
7373 Function *F = BB ? BB->getParent() : nullptr;
7374
7375 // The scopes for variables and !dbg attachments must agree.
7376 DILabel *Label = DLR.getLabel();
7377 DILocation *Loc = DLR.getDebugLoc();
7378 CheckDI(Loc, "#dbg_label record requires a !dbg attachment", &DLR, BB, F);
7379
7380 DISubprogram *LabelSP = getSubprogram(Label->getRawScope());
7381 DISubprogram *LocSP = getSubprogram(Loc->getRawScope());
7382 if (!LabelSP || !LocSP)
7383 return;
7384
7385 CheckDI(LabelSP == LocSP,
7386 "mismatched subprogram between #dbg_label label and !dbg attachment",
7387 &DLR, BB, F, Label, Label->getScope()->getSubprogram(), Loc,
7388 Loc->getScope()->getSubprogram());
7389}
7390
7391void Verifier::visit(DbgVariableRecord &DVR) {
7392 BasicBlock *BB = DVR.getParent();
7393 Function *F = BB->getParent();
7394
7395 CheckDI(DVR.getType() == DbgVariableRecord::LocationType::Value ||
7396 DVR.getType() == DbgVariableRecord::LocationType::Declare ||
7397 DVR.getType() == DbgVariableRecord::LocationType::DeclareValue ||
7398 DVR.getType() == DbgVariableRecord::LocationType::Assign,
7399 "invalid #dbg record type", &DVR, DVR.getType(), BB, F);
7400
7401 // The location for a DbgVariableRecord must be either a ValueAsMetadata,
7402 // DIArgList, or an empty MDNode (which is a legacy representation for an
7403 // "undef" location).
7404 auto *MD = DVR.getRawLocation();
7405 CheckDI(MD && (isa<ValueAsMetadata>(MD) || isa<DIArgList>(MD) ||
7406 (isa<MDNode>(MD) && !cast<MDNode>(MD)->getNumOperands())),
7407 "invalid #dbg record address/value", &DVR, MD, BB, F);
7408 if (auto *VAM = dyn_cast<ValueAsMetadata>(MD)) {
7409 visitValueAsMetadata(*VAM, F);
7410 if (DVR.isDbgDeclare()) {
7411 // Allow integers here to support inttoptr salvage.
7412 Type *Ty = VAM->getValue()->getType();
7413 CheckDI(Ty->isPointerTy() || Ty->isIntegerTy(),
7414 "location of #dbg_declare must be a pointer or int", &DVR, MD, BB,
7415 F);
7416 }
7417 } else if (auto *AL = dyn_cast<DIArgList>(MD)) {
7418 visitDIArgList(*AL, F);
7419 }
7420
7422 "invalid #dbg record variable", &DVR, DVR.getRawVariable(), BB, F);
7423 visitMDNode(*DVR.getRawVariable(), AreDebugLocsAllowed::No);
7424
7426 "invalid #dbg record expression", &DVR, DVR.getRawExpression(), BB,
7427 F);
7428 visitMDNode(*DVR.getExpression(), AreDebugLocsAllowed::No);
7429
7430 if (DVR.isDbgAssign()) {
7432 "invalid #dbg_assign DIAssignID", &DVR, DVR.getRawAssignID(), BB,
7433 F);
7434 visitMDNode(*cast<DIAssignID>(DVR.getRawAssignID()),
7435 AreDebugLocsAllowed::No);
7436
7437 const auto *RawAddr = DVR.getRawAddress();
7438 // Similarly to the location above, the address for an assign
7439 // DbgVariableRecord must be a ValueAsMetadata or an empty MDNode, which
7440 // represents an undef address.
7441 CheckDI(
7442 isa<ValueAsMetadata>(RawAddr) ||
7443 (isa<MDNode>(RawAddr) && !cast<MDNode>(RawAddr)->getNumOperands()),
7444 "invalid #dbg_assign address", &DVR, DVR.getRawAddress(), BB, F);
7445 if (auto *VAM = dyn_cast<ValueAsMetadata>(RawAddr))
7446 visitValueAsMetadata(*VAM, F);
7447
7449 "invalid #dbg_assign address expression", &DVR,
7450 DVR.getRawAddressExpression(), BB, F);
7451 visitMDNode(*DVR.getAddressExpression(), AreDebugLocsAllowed::No);
7452
7453 // All of the linked instructions should be in the same function as DVR.
7454 for (Instruction *I : at::getAssignmentInsts(&DVR))
7455 CheckDI(DVR.getFunction() == I->getFunction(),
7456 "inst not in same function as #dbg_assign", I, &DVR, BB, F);
7457 }
7458
7459 // This check is redundant with one in visitLocalVariable().
7460 DILocalVariable *Var = DVR.getVariable();
7461 CheckDI(isType(Var->getRawType()), "invalid type ref", Var, Var->getRawType(),
7462 BB, F);
7463
7464 auto *DLNode = DVR.getDebugLoc().getAsMDNode();
7465 CheckDI(isa_and_nonnull<DILocation>(DLNode), "invalid #dbg record DILocation",
7466 &DVR, DLNode, BB, F);
7467 DILocation *Loc = DVR.getDebugLoc();
7468
7469 // The scopes for variables and !dbg attachments must agree.
7470 DISubprogram *VarSP = getSubprogram(Var->getRawScope());
7471 DISubprogram *LocSP = getSubprogram(Loc->getRawScope());
7472 if (!VarSP || !LocSP)
7473 return; // Broken scope chains are checked elsewhere.
7474
7475 CheckDI(VarSP == LocSP,
7476 "mismatched subprogram between #dbg record variable and DILocation",
7477 &DVR, BB, F, Var, Var->getScope()->getSubprogram(), Loc,
7478 Loc->getScope()->getSubprogram(), BB, F);
7479
7480 verifyFnArgs(DVR);
7481}
7482
7483void Verifier::visitVPIntrinsic(VPIntrinsic &VPI) {
7484 if (auto *VPCast = dyn_cast<VPCastIntrinsic>(&VPI)) {
7485 auto *RetTy = cast<VectorType>(VPCast->getType());
7486 auto *ValTy = cast<VectorType>(VPCast->getOperand(0)->getType());
7487 Check(RetTy->getElementCount() == ValTy->getElementCount(),
7488 "VP cast intrinsic first argument and result vector lengths must be "
7489 "equal",
7490 *VPCast);
7491
7492 switch (VPCast->getIntrinsicID()) {
7493 default:
7494 llvm_unreachable("Unknown VP cast intrinsic");
7495 case Intrinsic::vp_trunc:
7496 Check(RetTy->isIntOrIntVectorTy() && ValTy->isIntOrIntVectorTy(),
7497 "llvm.vp.trunc intrinsic first argument and result element type "
7498 "must be integer",
7499 *VPCast);
7500 Check(RetTy->getScalarSizeInBits() < ValTy->getScalarSizeInBits(),
7501 "llvm.vp.trunc intrinsic the bit size of first argument must be "
7502 "larger than the bit size of the return type",
7503 *VPCast);
7504 break;
7505 case Intrinsic::vp_zext:
7506 case Intrinsic::vp_sext:
7507 Check(RetTy->isIntOrIntVectorTy() && ValTy->isIntOrIntVectorTy(),
7508 "llvm.vp.zext or llvm.vp.sext intrinsic first argument and result "
7509 "element type must be integer",
7510 *VPCast);
7511 Check(RetTy->getScalarSizeInBits() > ValTy->getScalarSizeInBits(),
7512 "llvm.vp.zext or llvm.vp.sext intrinsic the bit size of first "
7513 "argument must be smaller than the bit size of the return type",
7514 *VPCast);
7515 break;
7516 case Intrinsic::vp_fptoui:
7517 case Intrinsic::vp_fptosi:
7518 case Intrinsic::vp_lrint:
7519 case Intrinsic::vp_llrint:
7520 Check(
7521 RetTy->isIntOrIntVectorTy() && ValTy->isFPOrFPVectorTy(),
7522 "llvm.vp.fptoui, llvm.vp.fptosi, llvm.vp.lrint or llvm.vp.llrint" "intrinsic first argument element "
7523 "type must be floating-point and result element type must be integer",
7524 *VPCast);
7525 break;
7526 case Intrinsic::vp_uitofp:
7527 case Intrinsic::vp_sitofp:
7528 Check(
7529 RetTy->isFPOrFPVectorTy() && ValTy->isIntOrIntVectorTy(),
7530 "llvm.vp.uitofp or llvm.vp.sitofp intrinsic first argument element "
7531 "type must be integer and result element type must be floating-point",
7532 *VPCast);
7533 break;
7534 case Intrinsic::vp_fptrunc:
7535 Check(RetTy->isFPOrFPVectorTy() && ValTy->isFPOrFPVectorTy(),
7536 "llvm.vp.fptrunc intrinsic first argument and result element type "
7537 "must be floating-point",
7538 *VPCast);
7539 Check(RetTy->getScalarSizeInBits() < ValTy->getScalarSizeInBits(),
7540 "llvm.vp.fptrunc intrinsic the bit size of first argument must be "
7541 "larger than the bit size of the return type",
7542 *VPCast);
7543 break;
7544 case Intrinsic::vp_fpext:
7545 Check(RetTy->isFPOrFPVectorTy() && ValTy->isFPOrFPVectorTy(),
7546 "llvm.vp.fpext intrinsic first argument and result element type "
7547 "must be floating-point",
7548 *VPCast);
7549 Check(RetTy->getScalarSizeInBits() > ValTy->getScalarSizeInBits(),
7550 "llvm.vp.fpext intrinsic the bit size of first argument must be "
7551 "smaller than the bit size of the return type",
7552 *VPCast);
7553 break;
7554 case Intrinsic::vp_ptrtoint:
7555 Check(RetTy->isIntOrIntVectorTy() && ValTy->isPtrOrPtrVectorTy(),
7556 "llvm.vp.ptrtoint intrinsic first argument element type must be "
7557 "pointer and result element type must be integer",
7558 *VPCast);
7559 break;
7560 case Intrinsic::vp_inttoptr:
7561 Check(RetTy->isPtrOrPtrVectorTy() && ValTy->isIntOrIntVectorTy(),
7562 "llvm.vp.inttoptr intrinsic first argument element type must be "
7563 "integer and result element type must be pointer",
7564 *VPCast);
7565 break;
7566 }
7567 }
7568
7569 switch (VPI.getIntrinsicID()) {
7570 case Intrinsic::vp_fcmp: {
7571 auto Pred = cast<VPCmpIntrinsic>(&VPI)->getPredicate();
7573 "invalid predicate for VP FP comparison intrinsic", &VPI);
7574 break;
7575 }
7576 case Intrinsic::vp_icmp: {
7577 auto Pred = cast<VPCmpIntrinsic>(&VPI)->getPredicate();
7579 "invalid predicate for VP integer comparison intrinsic", &VPI);
7580 break;
7581 }
7582 case Intrinsic::vp_is_fpclass: {
7583 auto TestMask = cast<ConstantInt>(VPI.getOperand(1));
7584 Check((TestMask->getZExtValue() & ~static_cast<unsigned>(fcAllFlags)) == 0,
7585 "unsupported bits for llvm.vp.is.fpclass test mask");
7586 break;
7587 }
7588 case Intrinsic::experimental_vp_splice: {
7589 VectorType *VecTy = cast<VectorType>(VPI.getType());
7590 int64_t Idx = cast<ConstantInt>(VPI.getArgOperand(2))->getSExtValue();
7591 int64_t KnownMinNumElements = VecTy->getElementCount().getKnownMinValue();
7592 if (VPI.getParent() && VPI.getParent()->getParent()) {
7593 AttributeList Attrs = VPI.getParent()->getParent()->getAttributes();
7594 if (Attrs.hasFnAttr(Attribute::VScaleRange))
7595 KnownMinNumElements *= Attrs.getFnAttrs().getVScaleRangeMin();
7596 }
7597 Check((Idx < 0 && std::abs(Idx) <= KnownMinNumElements) ||
7598 (Idx >= 0 && Idx < KnownMinNumElements),
7599 "The splice index exceeds the range [-VL, VL-1] where VL is the "
7600 "known minimum number of elements in the vector. For scalable "
7601 "vectors the minimum number of elements is determined from "
7602 "vscale_range.",
7603 &VPI);
7604 break;
7605 }
7606 }
7607}
7608
7609void Verifier::visitConstrainedFPIntrinsic(ConstrainedFPIntrinsic &FPI) {
7610 unsigned NumOperands = FPI.getNonMetadataArgCount();
7611 bool HasRoundingMD =
7613
7614 // Add the expected number of metadata operands.
7615 NumOperands += (1 + HasRoundingMD);
7616
7617 // Compare intrinsics carry an extra predicate metadata operand.
7619 NumOperands += 1;
7620 Check((FPI.arg_size() == NumOperands),
7621 "invalid arguments for constrained FP intrinsic", &FPI);
7622
7623 switch (FPI.getIntrinsicID()) {
7624 case Intrinsic::experimental_constrained_lrint:
7625 case Intrinsic::experimental_constrained_llrint: {
7626 Type *ValTy = FPI.getArgOperand(0)->getType();
7627 Type *ResultTy = FPI.getType();
7628 Check(!ValTy->isVectorTy() && !ResultTy->isVectorTy(),
7629 "Intrinsic does not support vectors", &FPI);
7630 break;
7631 }
7632
7633 case Intrinsic::experimental_constrained_lround:
7634 case Intrinsic::experimental_constrained_llround: {
7635 Type *ValTy = FPI.getArgOperand(0)->getType();
7636 Type *ResultTy = FPI.getType();
7637 Check(!ValTy->isVectorTy() && !ResultTy->isVectorTy(),
7638 "Intrinsic does not support vectors", &FPI);
7639 break;
7640 }
7641
7642 case Intrinsic::experimental_constrained_fcmp:
7643 case Intrinsic::experimental_constrained_fcmps: {
7644 auto Pred = cast<ConstrainedFPCmpIntrinsic>(&FPI)->getPredicate();
7646 "invalid predicate for constrained FP comparison intrinsic", &FPI);
7647 break;
7648 }
7649
7650 case Intrinsic::experimental_constrained_fptosi:
7651 case Intrinsic::experimental_constrained_fptoui: {
7652 Value *Operand = FPI.getArgOperand(0);
7653 ElementCount SrcEC;
7654 Check(Operand->getType()->isFPOrFPVectorTy(),
7655 "Intrinsic first argument must be floating point", &FPI);
7656 if (auto *OperandT = dyn_cast<VectorType>(Operand->getType())) {
7657 SrcEC = cast<VectorType>(OperandT)->getElementCount();
7658 }
7659
7660 Operand = &FPI;
7661 Check(SrcEC.isNonZero() == Operand->getType()->isVectorTy(),
7662 "Intrinsic first argument and result disagree on vector use", &FPI);
7663 Check(Operand->getType()->isIntOrIntVectorTy(),
7664 "Intrinsic result must be an integer", &FPI);
7665 if (auto *OperandT = dyn_cast<VectorType>(Operand->getType())) {
7666 Check(SrcEC == cast<VectorType>(OperandT)->getElementCount(),
7667 "Intrinsic first argument and result vector lengths must be equal",
7668 &FPI);
7669 }
7670 break;
7671 }
7672
7673 case Intrinsic::experimental_constrained_sitofp:
7674 case Intrinsic::experimental_constrained_uitofp: {
7675 Value *Operand = FPI.getArgOperand(0);
7676 ElementCount SrcEC;
7677 Check(Operand->getType()->isIntOrIntVectorTy(),
7678 "Intrinsic first argument must be integer", &FPI);
7679 if (auto *OperandT = dyn_cast<VectorType>(Operand->getType())) {
7680 SrcEC = cast<VectorType>(OperandT)->getElementCount();
7681 }
7682
7683 Operand = &FPI;
7684 Check(SrcEC.isNonZero() == Operand->getType()->isVectorTy(),
7685 "Intrinsic first argument and result disagree on vector use", &FPI);
7686 Check(Operand->getType()->isFPOrFPVectorTy(),
7687 "Intrinsic result must be a floating point", &FPI);
7688 if (auto *OperandT = dyn_cast<VectorType>(Operand->getType())) {
7689 Check(SrcEC == cast<VectorType>(OperandT)->getElementCount(),
7690 "Intrinsic first argument and result vector lengths must be equal",
7691 &FPI);
7692 }
7693 break;
7694 }
7695
7696 case Intrinsic::experimental_constrained_fptrunc:
7697 case Intrinsic::experimental_constrained_fpext: {
7698 Value *Operand = FPI.getArgOperand(0);
7699 Type *OperandTy = Operand->getType();
7700 Value *Result = &FPI;
7701 Type *ResultTy = Result->getType();
7702 Check(OperandTy->isFPOrFPVectorTy(),
7703 "Intrinsic first argument must be FP or FP vector", &FPI);
7704 Check(ResultTy->isFPOrFPVectorTy(),
7705 "Intrinsic result must be FP or FP vector", &FPI);
7706 Check(OperandTy->isVectorTy() == ResultTy->isVectorTy(),
7707 "Intrinsic first argument and result disagree on vector use", &FPI);
7708 if (OperandTy->isVectorTy()) {
7709 Check(cast<VectorType>(OperandTy)->getElementCount() ==
7710 cast<VectorType>(ResultTy)->getElementCount(),
7711 "Intrinsic first argument and result vector lengths must be equal",
7712 &FPI);
7713 }
7714 if (FPI.getIntrinsicID() == Intrinsic::experimental_constrained_fptrunc) {
7715 Check(OperandTy->getScalarSizeInBits() > ResultTy->getScalarSizeInBits(),
7716 "Intrinsic first argument's type must be larger than result type",
7717 &FPI);
7718 } else {
7719 Check(OperandTy->getScalarSizeInBits() < ResultTy->getScalarSizeInBits(),
7720 "Intrinsic first argument's type must be smaller than result type",
7721 &FPI);
7722 }
7723 break;
7724 }
7725
7726 default:
7727 break;
7728 }
7729
7730 // If a non-metadata argument is passed in a metadata slot then the
7731 // error will be caught earlier when the incorrect argument doesn't
7732 // match the specification in the intrinsic call table. Thus, no
7733 // argument type check is needed here.
7734
7735 Check(FPI.getExceptionBehavior().has_value(),
7736 "invalid exception behavior argument", &FPI);
7737 if (HasRoundingMD) {
7738 Check(FPI.getRoundingMode().has_value(), "invalid rounding mode argument",
7739 &FPI);
7740 }
7741}
7742
7743void Verifier::verifyFragmentExpression(const DbgVariableRecord &DVR) {
7744 DILocalVariable *V = dyn_cast_or_null<DILocalVariable>(DVR.getRawVariable());
7745 DIExpression *E = dyn_cast_or_null<DIExpression>(DVR.getRawExpression());
7746
7747 // We don't know whether this intrinsic verified correctly.
7748 if (!V || !E || !E->isValid())
7749 return;
7750
7751 // Nothing to do if this isn't a DW_OP_LLVM_fragment expression.
7752 auto Fragment = E->getFragmentInfo();
7753 if (!Fragment)
7754 return;
7755
7756 // The frontend helps out GDB by emitting the members of local anonymous
7757 // unions as artificial local variables with shared storage. When SROA splits
7758 // the storage for artificial local variables that are smaller than the entire
7759 // union, the overhang piece will be outside of the allotted space for the
7760 // variable and this check fails.
7761 // FIXME: Remove this check as soon as clang stops doing this; it hides bugs.
7762 if (V->isArtificial())
7763 return;
7764
7765 verifyFragmentExpression(*V, *Fragment, &DVR);
7766}
7767
7768template <typename ValueOrMetadata>
7769void Verifier::verifyFragmentExpression(const DIVariable &V,
7771 ValueOrMetadata *Desc) {
7772 // If there's no size, the type is broken, but that should be checked
7773 // elsewhere.
7774 auto VarSize = V.getSizeInBits();
7775 if (!VarSize)
7776 return;
7777
7778 unsigned FragSize = Fragment.SizeInBits;
7779 unsigned FragOffset = Fragment.OffsetInBits;
7780 CheckDI(FragSize + FragOffset <= *VarSize,
7781 "fragment is larger than or outside of variable", Desc, &V);
7782 CheckDI(FragSize != *VarSize, "fragment covers entire variable", Desc, &V);
7783}
7784
7785void Verifier::verifyFnArgs(const DbgVariableRecord &DVR) {
7786 // This function does not take the scope of noninlined function arguments into
7787 // account. Don't run it if current function is nodebug, because it may
7788 // contain inlined debug intrinsics.
7789 if (!HasDebugInfo)
7790 return;
7791
7792 // For performance reasons only check non-inlined ones.
7793 if (DVR.getDebugLoc()->getInlinedAt())
7794 return;
7795
7796 DILocalVariable *Var = DVR.getVariable();
7797 CheckDI(Var, "#dbg record without variable");
7798
7799 unsigned ArgNo = Var->getArg();
7800 if (!ArgNo)
7801 return;
7802
7803 // Verify there are no duplicate function argument debug info entries.
7804 // These will cause hard-to-debug assertions in the DWARF backend.
7805 if (DebugFnArgs.size() < ArgNo)
7806 DebugFnArgs.resize(ArgNo, nullptr);
7807
7808 auto *Prev = DebugFnArgs[ArgNo - 1];
7809 DebugFnArgs[ArgNo - 1] = Var;
7810 CheckDI(!Prev || (Prev == Var), "conflicting debug info for argument", &DVR,
7811 Prev, Var);
7812}
7813
7814void Verifier::verifyNotEntryValue(const DbgVariableRecord &DVR) {
7815 DIExpression *E = dyn_cast_or_null<DIExpression>(DVR.getRawExpression());
7816
7817 // We don't know whether this intrinsic verified correctly.
7818 if (!E || !E->isValid())
7819 return;
7820
7822 Value *VarValue = DVR.getVariableLocationOp(0);
7823 if (isa<UndefValue>(VarValue) || isa<PoisonValue>(VarValue))
7824 return;
7825 // We allow EntryValues for swift async arguments, as they have an
7826 // ABI-guarantee to be turned into a specific register.
7827 if (auto *ArgLoc = dyn_cast_or_null<Argument>(VarValue);
7828 ArgLoc && ArgLoc->hasAttribute(Attribute::SwiftAsync))
7829 return;
7830 }
7831
7832 CheckDI(!E->isEntryValue(),
7833 "Entry values are only allowed in MIR unless they target a "
7834 "swiftasync Argument",
7835 &DVR);
7836}
7837
7838void Verifier::verifyCompileUnits() {
7839 // When more than one Module is imported into the same context, such as during
7840 // an LTO build before linking the modules, ODR type uniquing may cause types
7841 // to point to a different CU. This check does not make sense in this case.
7842 if (M.getContext().isODRUniquingDebugTypes())
7843 return;
7844 auto *CUs = M.getNamedMetadata("llvm.dbg.cu");
7845 SmallPtrSet<const Metadata *, 2> Listed;
7846 if (CUs)
7847 Listed.insert_range(CUs->operands());
7848 for (const auto *CU : CUVisited)
7849 CheckDI(Listed.count(CU), "DICompileUnit not listed in llvm.dbg.cu", CU);
7850 CUVisited.clear();
7851}
7852
7853void Verifier::verifyDeoptimizeCallingConvs() {
7854 if (DeoptimizeDeclarations.empty())
7855 return;
7856
7857 const Function *First = DeoptimizeDeclarations[0];
7858 for (const auto *F : ArrayRef(DeoptimizeDeclarations).slice(1)) {
7859 Check(First->getCallingConv() == F->getCallingConv(),
7860 "All llvm.experimental.deoptimize declarations must have the same "
7861 "calling convention",
7862 First, F);
7863 }
7864}
7865
7866void Verifier::verifyAttachedCallBundle(const CallBase &Call,
7867 const OperandBundleUse &BU) {
7868 FunctionType *FTy = Call.getFunctionType();
7869
7870 Check((FTy->getReturnType()->isPointerTy() ||
7871 (Call.doesNotReturn() && FTy->getReturnType()->isVoidTy())),
7872 "a call with operand bundle \"clang.arc.attachedcall\" must call a "
7873 "function returning a pointer or a non-returning function that has a "
7874 "void return type",
7875 Call);
7876
7877 Check(BU.Inputs.size() == 1 && isa<Function>(BU.Inputs.front()),
7878 "operand bundle \"clang.arc.attachedcall\" requires one function as "
7879 "an argument",
7880 Call);
7881
7882 auto *Fn = cast<Function>(BU.Inputs.front());
7883 Intrinsic::ID IID = Fn->getIntrinsicID();
7884
7885 if (IID) {
7886 Check((IID == Intrinsic::objc_retainAutoreleasedReturnValue ||
7887 IID == Intrinsic::objc_claimAutoreleasedReturnValue ||
7888 IID == Intrinsic::objc_unsafeClaimAutoreleasedReturnValue),
7889 "invalid function argument", Call);
7890 } else {
7891 StringRef FnName = Fn->getName();
7892 Check((FnName == "objc_retainAutoreleasedReturnValue" ||
7893 FnName == "objc_claimAutoreleasedReturnValue" ||
7894 FnName == "objc_unsafeClaimAutoreleasedReturnValue"),
7895 "invalid function argument", Call);
7896 }
7897}
7898
7899void Verifier::verifyNoAliasScopeDecl() {
7900 if (NoAliasScopeDecls.empty())
7901 return;
7902
7903 // only a single scope must be declared at a time.
7904 for (auto *II : NoAliasScopeDecls) {
7905 assert(II->getIntrinsicID() == Intrinsic::experimental_noalias_scope_decl &&
7906 "Not a llvm.experimental.noalias.scope.decl ?");
7907 const auto *ScopeListMV = dyn_cast<MetadataAsValue>(
7909 Check(ScopeListMV != nullptr,
7910 "llvm.experimental.noalias.scope.decl must have a MetadataAsValue "
7911 "argument",
7912 II);
7913
7914 const auto *ScopeListMD = dyn_cast<MDNode>(ScopeListMV->getMetadata());
7915 Check(ScopeListMD != nullptr, "!id.scope.list must point to an MDNode", II);
7916 Check(ScopeListMD->getNumOperands() == 1,
7917 "!id.scope.list must point to a list with a single scope", II);
7918 visitAliasScopeListMetadata(ScopeListMD);
7919 }
7920
7921 // Only check the domination rule when requested. Once all passes have been
7922 // adapted this option can go away.
7924 return;
7925
7926 // Now sort the intrinsics based on the scope MDNode so that declarations of
7927 // the same scopes are next to each other.
7928 auto GetScope = [](IntrinsicInst *II) {
7929 const auto *ScopeListMV = cast<MetadataAsValue>(
7931 return &cast<MDNode>(ScopeListMV->getMetadata())->getOperand(0);
7932 };
7933
7934 // We are sorting on MDNode pointers here. For valid input IR this is ok.
7935 // TODO: Sort on Metadata ID to avoid non-deterministic error messages.
7936 auto Compare = [GetScope](IntrinsicInst *Lhs, IntrinsicInst *Rhs) {
7937 return GetScope(Lhs) < GetScope(Rhs);
7938 };
7939
7940 llvm::sort(NoAliasScopeDecls, Compare);
7941
7942 // Go over the intrinsics and check that for the same scope, they are not
7943 // dominating each other.
7944 auto ItCurrent = NoAliasScopeDecls.begin();
7945 while (ItCurrent != NoAliasScopeDecls.end()) {
7946 auto CurScope = GetScope(*ItCurrent);
7947 auto ItNext = ItCurrent;
7948 do {
7949 ++ItNext;
7950 } while (ItNext != NoAliasScopeDecls.end() &&
7951 GetScope(*ItNext) == CurScope);
7952
7953 // [ItCurrent, ItNext) represents the declarations for the same scope.
7954 // Ensure they are not dominating each other.. but only if it is not too
7955 // expensive.
7956 if (ItNext - ItCurrent < 32)
7957 for (auto *I : llvm::make_range(ItCurrent, ItNext))
7958 for (auto *J : llvm::make_range(ItCurrent, ItNext))
7959 if (I != J)
7960 Check(!DT.dominates(I, J),
7961 "llvm.experimental.noalias.scope.decl dominates another one "
7962 "with the same scope",
7963 I);
7964 ItCurrent = ItNext;
7965 }
7966}
7967
7968//===----------------------------------------------------------------------===//
7969// Implement the public interfaces to this file...
7970//===----------------------------------------------------------------------===//
7971
7973 Function &F = const_cast<Function &>(f);
7974
7975 // Don't use a raw_null_ostream. Printing IR is expensive.
7976 Verifier V(OS, /*ShouldTreatBrokenDebugInfoAsError=*/true, *f.getParent());
7977
7978 // Note that this function's return value is inverted from what you would
7979 // expect of a function called "verify".
7980 return !V.verify(F);
7981}
7982
7984 bool *BrokenDebugInfo) {
7985 // Don't use a raw_null_ostream. Printing IR is expensive.
7986 Verifier V(OS, /*ShouldTreatBrokenDebugInfoAsError=*/!BrokenDebugInfo, M);
7987
7988 bool Broken = false;
7989 for (const Function &F : M)
7990 Broken |= !V.verify(F);
7991
7992 Broken |= !V.verify();
7993 if (BrokenDebugInfo)
7994 *BrokenDebugInfo = V.hasBrokenDebugInfo();
7995 // Note that this function's return value is inverted from what you would
7996 // expect of a function called "verify".
7997 return Broken;
7998}
7999
8000namespace {
8001
8002struct VerifierLegacyPass : public FunctionPass {
8003 static char ID;
8004
8005 std::unique_ptr<Verifier> V;
8006 bool FatalErrors = true;
8007
8008 VerifierLegacyPass() : FunctionPass(ID) {}
8009 explicit VerifierLegacyPass(bool FatalErrors)
8010 : FunctionPass(ID), FatalErrors(FatalErrors) {}
8011
8012 bool doInitialization(Module &M) override {
8013 V = std::make_unique<Verifier>(
8014 &dbgs(), /*ShouldTreatBrokenDebugInfoAsError=*/false, M);
8015 return false;
8016 }
8017
8018 bool runOnFunction(Function &F) override {
8019 if (!V->verify(F) && FatalErrors) {
8020 errs() << "in function " << F.getName() << '\n';
8021 report_fatal_error("Broken function found, compilation aborted!");
8022 }
8023 return false;
8024 }
8025
8026 bool doFinalization(Module &M) override {
8027 bool HasErrors = false;
8028 for (Function &F : M)
8029 if (F.isDeclaration())
8030 HasErrors |= !V->verify(F);
8031
8032 HasErrors |= !V->verify();
8033 if (FatalErrors && (HasErrors || V->hasBrokenDebugInfo()))
8034 report_fatal_error("Broken module found, compilation aborted!");
8035 return false;
8036 }
8037
8038 void getAnalysisUsage(AnalysisUsage &AU) const override {
8039 AU.setPreservesAll();
8040 }
8041};
8042
8043} // end anonymous namespace
8044
8045/// Helper to issue failure from the TBAA verification
8046template <typename... Tys> void TBAAVerifier::CheckFailed(Tys &&... Args) {
8047 if (Diagnostic)
8048 return Diagnostic->CheckFailed(Args...);
8049}
8050
8051#define CheckTBAA(C, ...) \
8052 do { \
8053 if (!(C)) { \
8054 CheckFailed(__VA_ARGS__); \
8055 return false; \
8056 } \
8057 } while (false)
8058
8059/// Verify that \p BaseNode can be used as the "base type" in the struct-path
8060/// TBAA scheme. This means \p BaseNode is either a scalar node, or a
8061/// struct-type node describing an aggregate data structure (like a struct).
8062TBAAVerifier::TBAABaseNodeSummary
8063TBAAVerifier::verifyTBAABaseNode(const Instruction *I, const MDNode *BaseNode,
8064 bool IsNewFormat) {
8065 if (BaseNode->getNumOperands() < 2) {
8066 CheckFailed("Base nodes must have at least two operands", I, BaseNode);
8067 return {true, ~0u};
8068 }
8069
8070 auto Itr = TBAABaseNodes.find(BaseNode);
8071 if (Itr != TBAABaseNodes.end())
8072 return Itr->second;
8073
8074 auto Result = verifyTBAABaseNodeImpl(I, BaseNode, IsNewFormat);
8075 auto InsertResult = TBAABaseNodes.insert({BaseNode, Result});
8076 (void)InsertResult;
8077 assert(InsertResult.second && "We just checked!");
8078 return Result;
8079}
8080
8081TBAAVerifier::TBAABaseNodeSummary
8082TBAAVerifier::verifyTBAABaseNodeImpl(const Instruction *I,
8083 const MDNode *BaseNode, bool IsNewFormat) {
8084 const TBAAVerifier::TBAABaseNodeSummary InvalidNode = {true, ~0u};
8085
8086 if (BaseNode->getNumOperands() == 2) {
8087 // Scalar nodes can only be accessed at offset 0.
8088 return isValidScalarTBAANode(BaseNode)
8089 ? TBAAVerifier::TBAABaseNodeSummary({false, 0})
8090 : InvalidNode;
8091 }
8092
8093 if (IsNewFormat) {
8094 if (BaseNode->getNumOperands() % 3 != 0) {
8095 CheckFailed("Access tag nodes must have the number of operands that is a "
8096 "multiple of 3!", BaseNode);
8097 return InvalidNode;
8098 }
8099 } else {
8100 if (BaseNode->getNumOperands() % 2 != 1) {
8101 CheckFailed("Struct tag nodes must have an odd number of operands!",
8102 BaseNode);
8103 return InvalidNode;
8104 }
8105 }
8106
8107 // Check the type size field.
8108 if (IsNewFormat) {
8109 auto *TypeSizeNode = mdconst::dyn_extract_or_null<ConstantInt>(
8110 BaseNode->getOperand(1));
8111 if (!TypeSizeNode) {
8112 CheckFailed("Type size nodes must be constants!", I, BaseNode);
8113 return InvalidNode;
8114 }
8115 }
8116
8117 // Check the type name field. In the new format it can be anything.
8118 if (!IsNewFormat && !isa<MDString>(BaseNode->getOperand(0))) {
8119 CheckFailed("Struct tag nodes have a string as their first operand",
8120 BaseNode);
8121 return InvalidNode;
8122 }
8123
8124 bool Failed = false;
8125
8126 std::optional<APInt> PrevOffset;
8127 unsigned BitWidth = ~0u;
8128
8129 // We've already checked that BaseNode is not a degenerate root node with one
8130 // operand in \c verifyTBAABaseNode, so this loop should run at least once.
8131 unsigned FirstFieldOpNo = IsNewFormat ? 3 : 1;
8132 unsigned NumOpsPerField = IsNewFormat ? 3 : 2;
8133 for (unsigned Idx = FirstFieldOpNo; Idx < BaseNode->getNumOperands();
8134 Idx += NumOpsPerField) {
8135 const MDOperand &FieldTy = BaseNode->getOperand(Idx);
8136 const MDOperand &FieldOffset = BaseNode->getOperand(Idx + 1);
8137 if (!isa<MDNode>(FieldTy)) {
8138 CheckFailed("Incorrect field entry in struct type node!", I, BaseNode);
8139 Failed = true;
8140 continue;
8141 }
8142
8143 auto *OffsetEntryCI =
8145 if (!OffsetEntryCI) {
8146 CheckFailed("Offset entries must be constants!", I, BaseNode);
8147 Failed = true;
8148 continue;
8149 }
8150
8151 if (BitWidth == ~0u)
8152 BitWidth = OffsetEntryCI->getBitWidth();
8153
8154 if (OffsetEntryCI->getBitWidth() != BitWidth) {
8155 CheckFailed(
8156 "Bitwidth between the offsets and struct type entries must match", I,
8157 BaseNode);
8158 Failed = true;
8159 continue;
8160 }
8161
8162 // NB! As far as I can tell, we generate a non-strictly increasing offset
8163 // sequence only from structs that have zero size bit fields. When
8164 // recursing into a contained struct in \c getFieldNodeFromTBAABaseNode we
8165 // pick the field lexically the latest in struct type metadata node. This
8166 // mirrors the actual behavior of the alias analysis implementation.
8167 bool IsAscending =
8168 !PrevOffset || PrevOffset->ule(OffsetEntryCI->getValue());
8169
8170 if (!IsAscending) {
8171 CheckFailed("Offsets must be increasing!", I, BaseNode);
8172 Failed = true;
8173 }
8174
8175 PrevOffset = OffsetEntryCI->getValue();
8176
8177 if (IsNewFormat) {
8178 auto *MemberSizeNode = mdconst::dyn_extract_or_null<ConstantInt>(
8179 BaseNode->getOperand(Idx + 2));
8180 if (!MemberSizeNode) {
8181 CheckFailed("Member size entries must be constants!", I, BaseNode);
8182 Failed = true;
8183 continue;
8184 }
8185 }
8186 }
8187
8188 return Failed ? InvalidNode
8189 : TBAAVerifier::TBAABaseNodeSummary(false, BitWidth);
8190}
8191
8192static bool IsRootTBAANode(const MDNode *MD) {
8193 return MD->getNumOperands() < 2;
8194}
8195
8196static bool IsScalarTBAANodeImpl(const MDNode *MD,
8198 if (MD->getNumOperands() != 2 && MD->getNumOperands() != 3)
8199 return false;
8200
8201 if (!isa<MDString>(MD->getOperand(0)))
8202 return false;
8203
8204 if (MD->getNumOperands() == 3) {
8206 if (!(Offset && Offset->isZero() && isa<MDString>(MD->getOperand(0))))
8207 return false;
8208 }
8209
8210 auto *Parent = dyn_cast_or_null<MDNode>(MD->getOperand(1));
8211 return Parent && Visited.insert(Parent).second &&
8212 (IsRootTBAANode(Parent) || IsScalarTBAANodeImpl(Parent, Visited));
8213}
8214
8215bool TBAAVerifier::isValidScalarTBAANode(const MDNode *MD) {
8216 auto ResultIt = TBAAScalarNodes.find(MD);
8217 if (ResultIt != TBAAScalarNodes.end())
8218 return ResultIt->second;
8219
8220 SmallPtrSet<const MDNode *, 4> Visited;
8221 bool Result = IsScalarTBAANodeImpl(MD, Visited);
8222 auto InsertResult = TBAAScalarNodes.insert({MD, Result});
8223 (void)InsertResult;
8224 assert(InsertResult.second && "Just checked!");
8225
8226 return Result;
8227}
8228
8229/// Returns the field node at the offset \p Offset in \p BaseNode. Update \p
8230/// Offset in place to be the offset within the field node returned.
8231///
8232/// We assume we've okayed \p BaseNode via \c verifyTBAABaseNode.
8233MDNode *TBAAVerifier::getFieldNodeFromTBAABaseNode(const Instruction *I,
8234 const MDNode *BaseNode,
8235 APInt &Offset,
8236 bool IsNewFormat) {
8237 assert(BaseNode->getNumOperands() >= 2 && "Invalid base node!");
8238
8239 // Scalar nodes have only one possible "field" -- their parent in the access
8240 // hierarchy. Offset must be zero at this point, but our caller is supposed
8241 // to check that.
8242 if (BaseNode->getNumOperands() == 2)
8243 return cast<MDNode>(BaseNode->getOperand(1));
8244
8245 unsigned FirstFieldOpNo = IsNewFormat ? 3 : 1;
8246 unsigned NumOpsPerField = IsNewFormat ? 3 : 2;
8247 for (unsigned Idx = FirstFieldOpNo; Idx < BaseNode->getNumOperands();
8248 Idx += NumOpsPerField) {
8249 auto *OffsetEntryCI =
8250 mdconst::extract<ConstantInt>(BaseNode->getOperand(Idx + 1));
8251 if (OffsetEntryCI->getValue().ugt(Offset)) {
8252 if (Idx == FirstFieldOpNo) {
8253 CheckFailed("Could not find TBAA parent in struct type node", I,
8254 BaseNode, &Offset);
8255 return nullptr;
8256 }
8257
8258 unsigned PrevIdx = Idx - NumOpsPerField;
8259 auto *PrevOffsetEntryCI =
8260 mdconst::extract<ConstantInt>(BaseNode->getOperand(PrevIdx + 1));
8261 Offset -= PrevOffsetEntryCI->getValue();
8262 return cast<MDNode>(BaseNode->getOperand(PrevIdx));
8263 }
8264 }
8265
8266 unsigned LastIdx = BaseNode->getNumOperands() - NumOpsPerField;
8267 auto *LastOffsetEntryCI = mdconst::extract<ConstantInt>(
8268 BaseNode->getOperand(LastIdx + 1));
8269 Offset -= LastOffsetEntryCI->getValue();
8270 return cast<MDNode>(BaseNode->getOperand(LastIdx));
8271}
8272
8274 if (!Type || Type->getNumOperands() < 3)
8275 return false;
8276
8277 // In the new format type nodes shall have a reference to the parent type as
8278 // its first operand.
8279 return isa_and_nonnull<MDNode>(Type->getOperand(0));
8280}
8281
8283 CheckTBAA(MD->getNumOperands() > 0, "TBAA metadata cannot have 0 operands", I,
8284 MD);
8285
8286 if (I)
8290 "This instruction shall not have a TBAA access tag!", I);
8291
8292 bool IsStructPathTBAA =
8293 isa<MDNode>(MD->getOperand(0)) && MD->getNumOperands() >= 3;
8294
8295 CheckTBAA(IsStructPathTBAA,
8296 "Old-style TBAA is no longer allowed, use struct-path TBAA instead",
8297 I);
8298
8299 MDNode *BaseNode = dyn_cast_or_null<MDNode>(MD->getOperand(0));
8300 MDNode *AccessType = dyn_cast_or_null<MDNode>(MD->getOperand(1));
8301
8302 bool IsNewFormat = isNewFormatTBAATypeNode(AccessType);
8303
8304 if (IsNewFormat) {
8305 CheckTBAA(MD->getNumOperands() == 4 || MD->getNumOperands() == 5,
8306 "Access tag metadata must have either 4 or 5 operands", I, MD);
8307 } else {
8308 CheckTBAA(MD->getNumOperands() < 5,
8309 "Struct tag metadata must have either 3 or 4 operands", I, MD);
8310 }
8311
8312 // Check the access size field.
8313 if (IsNewFormat) {
8314 auto *AccessSizeNode = mdconst::dyn_extract_or_null<ConstantInt>(
8315 MD->getOperand(3));
8316 CheckTBAA(AccessSizeNode, "Access size field must be a constant", I, MD);
8317 }
8318
8319 // Check the immutability flag.
8320 unsigned ImmutabilityFlagOpNo = IsNewFormat ? 4 : 3;
8321 if (MD->getNumOperands() == ImmutabilityFlagOpNo + 1) {
8322 auto *IsImmutableCI = mdconst::dyn_extract_or_null<ConstantInt>(
8323 MD->getOperand(ImmutabilityFlagOpNo));
8324 CheckTBAA(IsImmutableCI,
8325 "Immutability tag on struct tag metadata must be a constant", I,
8326 MD);
8327 CheckTBAA(
8328 IsImmutableCI->isZero() || IsImmutableCI->isOne(),
8329 "Immutability part of the struct tag metadata must be either 0 or 1", I,
8330 MD);
8331 }
8332
8333 CheckTBAA(BaseNode && AccessType,
8334 "Malformed struct tag metadata: base and access-type "
8335 "should be non-null and point to Metadata nodes",
8336 I, MD, BaseNode, AccessType);
8337
8338 if (!IsNewFormat) {
8339 CheckTBAA(isValidScalarTBAANode(AccessType),
8340 "Access type node must be a valid scalar type", I, MD,
8341 AccessType);
8342 }
8343
8345 CheckTBAA(OffsetCI, "Offset must be constant integer", I, MD);
8346
8347 APInt Offset = OffsetCI->getValue();
8348 bool SeenAccessTypeInPath = false;
8349
8350 SmallPtrSet<MDNode *, 4> StructPath;
8351
8352 for (/* empty */; BaseNode && !IsRootTBAANode(BaseNode);
8353 BaseNode =
8354 getFieldNodeFromTBAABaseNode(I, BaseNode, Offset, IsNewFormat)) {
8355 if (!StructPath.insert(BaseNode).second) {
8356 CheckFailed("Cycle detected in struct path", I, MD);
8357 return false;
8358 }
8359
8360 bool Invalid;
8361 unsigned BaseNodeBitWidth;
8362 std::tie(Invalid, BaseNodeBitWidth) =
8363 verifyTBAABaseNode(I, BaseNode, IsNewFormat);
8364
8365 // If the base node is invalid in itself, then we've already printed all the
8366 // errors we wanted to print.
8367 if (Invalid)
8368 return false;
8369
8370 SeenAccessTypeInPath |= BaseNode == AccessType;
8371
8372 if (isValidScalarTBAANode(BaseNode) || BaseNode == AccessType)
8373 CheckTBAA(Offset == 0, "Offset not zero at the point of scalar access", I,
8374 MD, &Offset);
8375
8376 CheckTBAA(BaseNodeBitWidth == Offset.getBitWidth() ||
8377 (BaseNodeBitWidth == 0 && Offset == 0) ||
8378 (IsNewFormat && BaseNodeBitWidth == ~0u),
8379 "Access bit-width not the same as description bit-width", I, MD,
8380 BaseNodeBitWidth, Offset.getBitWidth());
8381
8382 if (IsNewFormat && SeenAccessTypeInPath)
8383 break;
8384 }
8385
8386 CheckTBAA(SeenAccessTypeInPath, "Did not see access type in access path!", I,
8387 MD);
8388 return true;
8389}
8390
8391char VerifierLegacyPass::ID = 0;
8392INITIALIZE_PASS(VerifierLegacyPass, "verify", "Module Verifier", false, false)
8393
8395 return new VerifierLegacyPass(FatalErrors);
8396}
8397
8398AnalysisKey VerifierAnalysis::Key;
8405
8410
8412 auto Res = AM.getResult<VerifierAnalysis>(M);
8413 if (FatalErrors && (Res.IRBroken || Res.DebugInfoBroken))
8414 report_fatal_error("Broken module found, compilation aborted!");
8415
8416 return PreservedAnalyses::all();
8417}
8418
8420 auto res = AM.getResult<VerifierAnalysis>(F);
8421 if (res.IRBroken && FatalErrors)
8422 report_fatal_error("Broken function found, compilation aborted!");
8423
8424 return PreservedAnalyses::all();
8425}
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
AMDGPU address space definition.
ArrayRef< TableEntry > TableRef
This file declares a class to represent arbitrary precision floating point values and provide a varie...
This file implements a class to represent arbitrary precision integral constant values and operations...
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
Atomic ordering constants.
@ RetAttr
@ FnAttr
This file contains the simple types necessary to represent the attributes associated with functions a...
static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")
static GCRegistry::Add< StatepointGC > D("statepoint-example", "an example strategy for statepoint")
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
This file contains the declarations for the subclasses of Constant, which represent the different fla...
This file declares the LLVM IR specialization of the GenericConvergenceVerifier template.
static DISubprogram * getSubprogram(bool IsDistinct, Ts &&...Args)
dxil translate DXIL Translate Metadata
This file defines the DenseMap class.
This file contains constants used for implementing Dwarf debug support.
static bool runOnFunction(Function &F, bool PostInlining)
This file contains the declarations of entities that describe floating point environment and related ...
#define Check(C,...)
Hexagon Common GEP
This file provides various utilities for inspecting and working with the control flow graph in LLVM I...
Module.h This file contains the declarations for the Module class.
This header defines various interfaces for pass management in LLVM.
This defines the Use class.
const size_t AbstractManglingParser< Derived, Alloc >::NumOps
#define F(x, y, z)
Definition MD5.cpp:54
#define I(x, y, z)
Definition MD5.cpp:57
Machine Check Debug Module
This file implements a map that provides insertion order iteration.
This file provides utility for Memory Model Relaxation Annotations (MMRAs).
static bool isContiguous(const ConstantRange &A, const ConstantRange &B)
This file contains the declarations for metadata subclasses.
#define T
#define T1
ConstantRange Range(APInt(BitWidth, Low), APInt(BitWidth, High))
uint64_t High
uint64_t IntrinsicInst * II
#define P(N)
ppc ctr loops verify
#define INITIALIZE_PASS(passName, arg, name, cfg, analysis)
Definition PassSupport.h:56
This file contains the declarations for profiling metadata utility functions.
const SmallVectorImpl< MachineOperand > & Cond
static unsigned getNumElements(Type *Ty)
static void visit(BasicBlock &Start, std::function< bool(BasicBlock *)> op)
This file contains some templates that are useful if you are working with the STL at all.
verify safepoint Safepoint IR Verifier
BaseType
A given derived pointer can have multiple base pointers through phi/selects.
This file defines the SmallPtrSet class.
This file defines the SmallVector class.
This file contains some functions that are useful when dealing with strings.
static unsigned getBitWidth(Type *Ty, const DataLayout &DL)
Returns the bitwidth of the given scalar or pointer type.
static bool IsScalarTBAANodeImpl(const MDNode *MD, SmallPtrSetImpl< const MDNode * > &Visited)
static bool isType(const Metadata *MD)
static Instruction * getSuccPad(Instruction *Terminator)
static bool isMDTuple(const Metadata *MD)
static bool isNewFormatTBAATypeNode(llvm::MDNode *Type)
#define CheckDI(C,...)
We know that a debug info condition should be true, if not print an error message.
Definition Verifier.cpp:689
static void forEachUser(const Value *User, SmallPtrSet< const Value *, 32 > &Visited, llvm::function_ref< bool(const Value *)> Callback)
Definition Verifier.cpp:730
static bool isDINode(const Metadata *MD)
static bool isScope(const Metadata *MD)
static cl::opt< bool > VerifyNoAliasScopeDomination("verify-noalias-scope-decl-dom", cl::Hidden, cl::init(false), cl::desc("Ensure that llvm.experimental.noalias.scope.decl for identical " "scopes are not dominating"))
static bool isTypeCongruent(Type *L, Type *R)
Two types are "congruent" if they are identical, or if they are both pointer types with different poi...
#define CheckTBAA(C,...)
static bool isConstantIntMetadataOperand(const Metadata *MD)
static bool IsRootTBAANode(const MDNode *MD)
static Value * getParentPad(Value *EHPad)
static bool hasConflictingReferenceFlags(unsigned Flags)
Detect mutually exclusive flags.
static AttrBuilder getParameterABIAttributes(LLVMContext &C, unsigned I, AttributeList Attrs)
static const char PassName[]
static LLVM_ABI bool isValidArbitraryFPFormat(StringRef Format)
Returns true if the given string is a valid arbitrary floating-point format interpretation for llvm....
Definition APFloat.cpp:5990
bool isFiniteNonZero() const
Definition APFloat.h:1526
bool isNegative() const
Definition APFloat.h:1516
const fltSemantics & getSemantics() const
Definition APFloat.h:1524
Class for arbitrary precision integers.
Definition APInt.h:78
bool sgt(const APInt &RHS) const
Signed greater than comparison.
Definition APInt.h:1208
bool isMinValue() const
Determine if this is the smallest unsigned value.
Definition APInt.h:418
bool ule(const APInt &RHS) const
Unsigned less or equal comparison.
Definition APInt.h:1157
bool isPowerOf2() const
Check if this APInt's value is a power of two greater than zero.
Definition APInt.h:441
int64_t getSExtValue() const
Get sign extended value.
Definition APInt.h:1577
bool isMaxValue() const
Determine if this is the largest unsigned value.
Definition APInt.h:400
This class represents a conversion between pointers from one address space to another.
bool isSwiftError() const
Return true if this alloca is used as a swifterror argument to a call.
LLVM_ABI bool isStaticAlloca() const
Return true if this alloca is in the entry block of the function and is a constant size.
Align getAlign() const
Return the alignment of the memory that is being allocated by the instruction.
Type * getAllocatedType() const
Return the type that is being allocated by the instruction.
unsigned getAddressSpace() const
Return the address space for the allocation.
LLVM_ABI bool isArrayAllocation() const
Return true if there is an allocation size parameter to the allocation instruction that is not 1.
const Value * getArraySize() const
Get the number of elements allocated.
PassT::Result & getResult(IRUnitT &IR, ExtraArgTs... ExtraArgs)
Get the result of an analysis pass for a given IR unit.
void setPreservesAll()
Set by analyses that do not transform their input at all.
LLVM_ABI bool hasInRegAttr() const
Return true if this argument has the inreg attribute.
Definition Function.cpp:292
bool empty() const
empty - Check if the array is empty.
Definition ArrayRef.h:137
static bool isFPOperation(BinOp Op)
BinOp getOperation() const
static LLVM_ABI StringRef getOperationName(BinOp Op)
AtomicOrdering getOrdering() const
Returns the ordering constraint of this rmw instruction.
bool contains(Attribute::AttrKind A) const
Return true if the builder has the specified attribute.
LLVM_ABI bool hasAttribute(Attribute::AttrKind Kind) const
Return true if the attribute exists in this set.
LLVM_ABI std::string getAsString(bool InAttrGrp=false) const
Functions, function parameters, and return types can have attributes to indicate how they should be t...
Definition Attributes.h:105
LLVM_ABI const ConstantRange & getValueAsConstantRange() const
Return the attribute's value as a ConstantRange.
LLVM_ABI StringRef getValueAsString() const
Return the attribute's value as a string.
AttrKind
This enumeration lists the attributes that can be associated with parameters, function results,...
Definition Attributes.h:124
bool isValid() const
Return true if the attribute is any kind of attribute.
Definition Attributes.h:261
LLVM_ABI Type * getValueAsType() const
Return the attribute's value as a Type.
LLVM Basic Block Representation.
Definition BasicBlock.h:62
iterator begin()
Instruction iterator methods.
Definition BasicBlock.h:461
iterator_range< const_phi_iterator > phis() const
Returns a range that iterates over the phis in the basic block.
Definition BasicBlock.h:530
const Function * getParent() const
Return the enclosing method, or null if none.
Definition BasicBlock.h:213
LLVM_ABI InstListType::const_iterator getFirstNonPHIIt() const
Returns an iterator to the first instruction in this block that is not a PHINode instruction.
LLVM_ABI bool isEntryBlock() const
Return true if this is the entry block of the containing function.
const Instruction & front() const
Definition BasicBlock.h:484
LLVM_ABI const BasicBlock * getUniquePredecessor() const
Return the predecessor of this block if it has a unique predecessor block.
InstListType::iterator iterator
Instruction iterators...
Definition BasicBlock.h:170
const Instruction * getTerminator() const LLVM_READONLY
Returns the terminator instruction; assumes that the block is well-formed.
Definition BasicBlock.h:237
This class represents a no-op cast from one type to another.
static LLVM_ABI BlockAddress * lookup(const BasicBlock *BB)
Lookup an existing BlockAddress constant for the given BasicBlock.
Base class for all callable instructions (InvokeInst and CallInst) Holds everything related to callin...
bool isInlineAsm() const
Check if this call is an inline asm statement.
bool hasInAllocaArgument() const
Determine if there are is an inalloca argument.
OperandBundleUse getOperandBundleAt(unsigned Index) const
Return the operand bundle at a specific index.
Function * getCalledFunction() const
Returns the function called, or null if this is an indirect function invocation or the function signa...
bool doesNotAccessMemory(unsigned OpNo) const
bool hasFnAttr(Attribute::AttrKind Kind) const
Determine whether this call has the given attribute.
bool hasRetAttr(Attribute::AttrKind Kind) const
Determine whether the return value has the given attribute.
unsigned getNumOperandBundles() const
Return the number of operand bundles associated with this User.
CallingConv::ID getCallingConv() const
LLVM_ABI bool paramHasAttr(unsigned ArgNo, Attribute::AttrKind Kind) const
Determine whether the argument or parameter has the given attribute.
Attribute getParamAttr(unsigned ArgNo, Attribute::AttrKind Kind) const
Get the attribute of a given kind from a given arg.
iterator_range< bundle_op_iterator > bundle_op_infos()
Return the range [bundle_op_info_begin, bundle_op_info_end).
unsigned countOperandBundlesOfType(StringRef Name) const
Return the number of operand bundles with the tag Name attached to this instruction.
bool onlyReadsMemory(unsigned OpNo) const
Value * getCalledOperand() const
Type * getParamElementType(unsigned ArgNo) const
Extract the elementtype type for a parameter.
Value * getArgOperand(unsigned i) const
FunctionType * getFunctionType() const
LLVM_ABI Intrinsic::ID getIntrinsicID() const
Returns the intrinsic ID of the intrinsic called or Intrinsic::not_intrinsic if the called function i...
iterator_range< User::op_iterator > args()
Iteration adapter for range-for loops.
bool doesNotReturn() const
Determine if the call cannot return.
LLVM_ABI bool onlyAccessesArgMemory() const
Determine if the call can access memmory only using pointers based on its arguments.
unsigned arg_size() const
AttributeList getAttributes() const
Return the attributes for this call.
bool hasOperandBundles() const
Return true if this User has any operand bundles.
LLVM_ABI Function * getCaller()
Helper to get the caller (the parent function).
BasicBlock * getIndirectDest(unsigned i) const
unsigned getNumIndirectDests() const
Return the number of callbr indirect dest labels.
bool isMustTailCall() const
static LLVM_ABI bool castIsValid(Instruction::CastOps op, Type *SrcTy, Type *DstTy)
This method can be used to determine if a cast from SrcTy to DstTy using Opcode op is valid or not.
unsigned getNumHandlers() const
return the number of 'handlers' in this catchswitch instruction, except the default handler
Value * getParentPad() const
BasicBlock * getUnwindDest() const
handler_range handlers()
iteration adapter for range-for loops.
BasicBlock * getUnwindDest() const
bool isFPPredicate() const
Definition InstrTypes.h:782
bool isIntPredicate() const
Definition InstrTypes.h:783
static bool isIntPredicate(Predicate P)
Definition InstrTypes.h:776
Value * getCondition() const
bool isMinusOne() const
This function will return true iff every bit in this constant is set to true.
Definition Constants.h:231
bool isNegative() const
Definition Constants.h:214
bool isZero() const
This is just a convenience method to make client code smaller for a common code.
Definition Constants.h:219
unsigned getBitWidth() const
getBitWidth - Return the scalar bitwidth of this constant.
Definition Constants.h:162
uint64_t getZExtValue() const
Return the constant as a 64-bit unsigned integer value after it has been zero extended as appropriate...
Definition Constants.h:168
const APInt & getValue() const
Return the constant as an APInt value reference.
Definition Constants.h:159
Constant * getAddrDiscriminator() const
The address discriminator if any, or the null constant.
Definition Constants.h:1239
Constant * getPointer() const
The pointer that is signed in this ptrauth signed pointer.
Definition Constants.h:1226
ConstantInt * getKey() const
The Key ID, an i32 constant.
Definition Constants.h:1229
Constant * getDeactivationSymbol() const
Definition Constants.h:1248
ConstantInt * getDiscriminator() const
The integer discriminator, an i64 constant, or 0.
Definition Constants.h:1232
static LLVM_ABI bool isOrderedRanges(ArrayRef< ConstantRange > RangesRef)
This class represents a range of values.
const APInt & getLower() const
Return the lower value for this range.
const APInt & getUpper() const
Return the upper value for this range.
LLVM_ABI bool contains(const APInt &Val) const
Return true if the specified value is in the set.
uint32_t getBitWidth() const
Get the bit width of this ConstantRange.
static LLVM_ABI ConstantTokenNone * get(LLVMContext &Context)
Return the ConstantTokenNone.
LLVM_ABI bool isNullValue() const
Return true if this is the value that would be returned by getNullValue.
Definition Constants.cpp:74
LLVM_ABI std::optional< fp::ExceptionBehavior > getExceptionBehavior() const
LLVM_ABI std::optional< RoundingMode > getRoundingMode() const
LLVM_ABI unsigned getNonMetadataArgCount() const
DbgVariableFragmentInfo FragmentInfo
@ FixedPointBinary
Scale factor 2^Factor.
@ FixedPointDecimal
Scale factor 10^Factor.
@ FixedPointRational
Arbitrary rational scale factor.
DIGlobalVariable * getVariable() const
LLVM_ABI DISubprogram * getSubprogram() const
Get the subprogram for this scope.
DILocalScope * getScope() const
Get the local scope for this variable.
Metadata * getRawScope() const
Base class for scope-like contexts.
Subprogram description. Uses SubclassData1.
static const DIScope * getRawRetainedNodeScope(const MDNode *N)
Base class for template parameters.
Base class for types.
Base class for variables.
Metadata * getRawType() const
Metadata * getRawScope() const
A parsed version of the target data layout string in and methods for querying it.
Definition DataLayout.h:64
Records a position in IR for a source label (DILabel).
Base class for non-instruction debug metadata records that have positions within IR.
LLVM_ABI void print(raw_ostream &O, bool IsForDebug=false) const
DebugLoc getDebugLoc() const
LLVM_ABI const BasicBlock * getParent() const
LLVM_ABI Function * getFunction()
Record of a variable value-assignment, aka a non instruction representation of the dbg....
LLVM_ABI Value * getVariableLocationOp(unsigned OpIdx) const
DIExpression * getExpression() const
DILocalVariable * getVariable() const
Metadata * getRawLocation() const
Returns the metadata operand for the first location description.
@ End
Marks the end of the concrete types.
@ Any
To indicate all LocationTypes in searches.
DIExpression * getAddressExpression() const
MDNode * getAsMDNode() const
Return this as a bar MDNode.
Definition DebugLoc.h:290
ValueT lookup(const_arg_type_t< KeyT > Val) const
lookup - Return the entry for the specified key, or a default constructed value if no such entry exis...
Definition DenseMap.h:205
iterator find(const_arg_type_t< KeyT > Val)
Definition DenseMap.h:178
bool empty() const
Definition DenseMap.h:109
std::pair< iterator, bool > insert(const std::pair< KeyT, ValueT > &KV)
Definition DenseMap.h:241
Concrete subclass of DominatorTreeBase that is used to compute a normal dominator tree.
Definition Dominators.h:159
This instruction extracts a single (scalar) element from a VectorType value.
static LLVM_ABI bool isValidOperands(const Value *Vec, const Value *Idx)
Return true if an extractelement instruction can be formed with the specified operands.
ArrayRef< unsigned > getIndices() const
static LLVM_ABI Type * getIndexedType(Type *Agg, ArrayRef< unsigned > Idxs)
Returns the type of the element that would be extracted with an extractvalue instruction with the spe...
This instruction compares its operands according to the predicate given to the constructor.
This class represents an extension of floating point types.
This class represents a cast from floating point to signed integer.
This class represents a cast from floating point to unsigned integer.
This class represents a truncation of floating point types.
AtomicOrdering getOrdering() const
Returns the ordering constraint of this fence instruction.
op_range arg_operands()
arg_operands - iteration adapter for range-for loops.
Value * getParentPad() const
Convenience accessors.
FunctionPass class - This class is used to implement most global optimizations.
Definition Pass.h:314
Type * getReturnType() const
FunctionType * getFunctionType() const
Returns the FunctionType for me.
Definition Function.h:211
Intrinsic::ID getIntrinsicID() const LLVM_READONLY
getIntrinsicID - This method returns the ID number of the specified function, or Intrinsic::not_intri...
Definition Function.h:246
DISubprogram * getSubprogram() const
Get the attached subprogram.
CallingConv::ID getCallingConv() const
getCallingConv()/setCallingConv(CC) - These method get and set the calling convention of this functio...
Definition Function.h:272
bool hasPersonalityFn() const
Check whether this function has a personality function.
Definition Function.h:905
const Function & getFunction() const
Definition Function.h:166
const std::string & getGC() const
Definition Function.cpp:818
Type * getReturnType() const
Returns the type of the ret val.
Definition Function.h:216
bool isVarArg() const
isVarArg - Return true if this function takes a variable number of arguments.
Definition Function.h:229
LLVM_ABI Value * getBasePtr() const
LLVM_ABI Value * getDerivedPtr() const
static LLVM_ABI Type * getIndexedType(Type *Ty, ArrayRef< Value * > IdxList)
Returns the result type of a getelementptr with the given source element type and indexes.
static bool isValidLinkage(LinkageTypes L)
Definition GlobalAlias.h:98
const Constant * getAliasee() const
Definition GlobalAlias.h:87
LLVM_ABI const Function * getResolverFunction() const
Definition Globals.cpp:688
static bool isValidLinkage(LinkageTypes L)
Definition GlobalIFunc.h:86
const Constant * getResolver() const
Definition GlobalIFunc.h:73
LLVM_ABI void getAllMetadata(SmallVectorImpl< std::pair< unsigned, MDNode * > > &MDs) const
Appends all metadata attached to this value to MDs, sorting by KindID.
bool hasComdat() const
MDNode * getMetadata(unsigned KindID) const
Get the metadata of given kind attached to this GlobalObject.
bool hasExternalLinkage() const
bool isDSOLocal() const
bool isImplicitDSOLocal() const
LLVM_ABI bool isDeclaration() const
Return true if the primary definition of this global value is outside of the current translation unit...
Definition Globals.cpp:337
bool hasValidDeclarationLinkage() const
LinkageTypes getLinkage() const
bool hasDefaultVisibility() const
bool hasPrivateLinkage() const
bool hasHiddenVisibility() const
bool hasExternalWeakLinkage() const
bool hasDLLImportStorageClass() const
bool hasDLLExportStorageClass() const
bool isDeclarationForLinker() const
unsigned getAddressSpace() const
Module * getParent()
Get the module that this global value is contained inside of...
PointerType * getType() const
Global values are always pointers.
LLVM_ABI bool isInterposable() const
Return true if this global's definition can be substituted with an arbitrary definition at link time ...
Definition Globals.cpp:116
bool hasComdat() const
bool hasCommonLinkage() const
bool hasGlobalUnnamedAddr() const
bool hasAppendingLinkage() const
bool hasAvailableExternallyLinkage() const
Type * getValueType() const
const Constant * getInitializer() const
getInitializer - Return the initializer for this global variable.
bool hasInitializer() const
Definitions have initializers, declarations don't.
MaybeAlign getAlign() const
Returns the alignment of the given variable.
LLVM_ABI uint64_t getGlobalSize(const DataLayout &DL) const
Get the size of this global variable in bytes.
Definition Globals.cpp:569
bool isConstant() const
If the value is a global constant, its value is immutable throughout the runtime execution of the pro...
bool hasDefinitiveInitializer() const
hasDefinitiveInitializer - Whether the global variable has an initializer, and any other instances of...
This instruction compares its operands according to the predicate given to the constructor.
BasicBlock * getDestination(unsigned i)
Return the specified destination.
unsigned getNumDestinations() const
return the number of possible destinations in this indirectbr instruction.
unsigned getNumSuccessors() const
This instruction inserts a single (scalar) element into a VectorType value.
static LLVM_ABI bool isValidOperands(const Value *Vec, const Value *NewElt, const Value *Idx)
Return true if an insertelement instruction can be formed with the specified operands.
ArrayRef< unsigned > getIndices() const
Base class for instruction visitors.
Definition InstVisitor.h:78
void visit(Iterator Start, Iterator End)
Definition InstVisitor.h:87
LLVM_ABI unsigned getNumSuccessors() const LLVM_READONLY
Return the number of successors that this instruction has.
const DebugLoc & getDebugLoc() const
Return the debug location for this node as a DebugLoc.
LLVM_ABI bool isAtomic() const LLVM_READONLY
Return true if this instruction has an AtomicOrdering of unordered or higher.
LLVM_ABI const Function * getFunction() const
Return the function this instruction belongs to.
This class represents a cast from an integer to a pointer.
static LLVM_ABI bool mayLowerToFunctionCall(Intrinsic::ID IID)
Check if the intrinsic might lower into a regular function call in the course of IR transformations.
Intrinsic::ID getIntrinsicID() const
Return the intrinsic ID of this intrinsic.
This is an important class for using LLVM in a threaded context.
Definition LLVMContext.h:68
bool isCleanup() const
Return 'true' if this landingpad instruction is a cleanup.
unsigned getNumClauses() const
Get the number of clauses for this landing pad.
bool isCatch(unsigned Idx) const
Return 'true' if the clause and index Idx is a catch clause.
bool isFilter(unsigned Idx) const
Return 'true' if the clause and index Idx is a filter clause.
Constant * getClause(unsigned Idx) const
Get the value of the clause at index Idx.
AtomicOrdering getOrdering() const
Returns the ordering constraint of this load instruction.
SyncScope::ID getSyncScopeID() const
Returns the synchronization scope ID of this load instruction.
Align getAlign() const
Return the alignment of the access that is being performed.
Metadata node.
Definition Metadata.h:1080
const MDOperand & getOperand(unsigned I) const
Definition Metadata.h:1444
bool isTemporary() const
Definition Metadata.h:1264
ArrayRef< MDOperand > operands() const
Definition Metadata.h:1442
unsigned getNumOperands() const
Return number of MDNode operands.
Definition Metadata.h:1450
bool isDistinct() const
Definition Metadata.h:1263
bool isResolved() const
Check if node is fully resolved.
Definition Metadata.h:1260
LLVMContext & getContext() const
Definition Metadata.h:1244
bool equalsStr(StringRef Str) const
Definition Metadata.h:924
Metadata * get() const
Definition Metadata.h:931
LLVM_ABI StringRef getString() const
Definition Metadata.cpp:632
static LLVM_ABI bool isTagMD(const Metadata *MD)
This class implements a map that also provides access to all stored values in a deterministic order.
Definition MapVector.h:36
static LLVM_ABI MetadataAsValue * getIfExists(LLVMContext &Context, Metadata *MD)
Definition Metadata.cpp:118
Metadata * getMetadata() const
Definition Metadata.h:202
Root of the metadata hierarchy.
Definition Metadata.h:64
LLVM_ABI void print(raw_ostream &OS, const Module *M=nullptr, bool IsForDebug=false) const
Print.
unsigned getMetadataID() const
Definition Metadata.h:104
Manage lifetime of a slot tracker for printing IR.
A Module instance is used to store all the information related to an LLVM module.
Definition Module.h:67
LLVM_ABI StringRef getName() const
LLVM_ABI void print(raw_ostream &ROS, bool IsForDebug=false) const
LLVM_ABI unsigned getNumOperands() const
iterator_range< op_iterator > operands()
Definition Metadata.h:1856
op_range incoming_values()
A set of analyses that are preserved following a run of a transformation pass.
Definition Analysis.h:112
static PreservedAnalyses all()
Construct a special preserved set that preserves all passes.
Definition Analysis.h:118
This class represents a cast from a pointer to an address (non-capturing ptrtoint).
This class represents a cast from a pointer to an integer.
Value * getValue() const
Convenience accessor.
Value * getReturnValue() const
Convenience accessor. Returns null if there is no return value.
This class represents a sign extension of integer types.
This class represents a cast from signed integer to floating point.
static LLVM_ABI const char * areInvalidOperands(Value *Cond, Value *True, Value *False)
Return a string if the specified operands are invalid for a select operation, otherwise return null.
This instruction constructs a fixed permutation of two input vectors.
static LLVM_ABI bool isValidOperands(const Value *V1, const Value *V2, const Value *Mask)
Return true if a shufflevector instruction can be formed with the specified operands.
static LLVM_ABI void getShuffleMask(const Constant *Mask, SmallVectorImpl< int > &Result)
Convert the input shuffle mask operand to a vector of integers.
A templated base class for SmallPtrSet which provides the typesafe interface that is common across al...
size_type count(ConstPtrType Ptr) const
count - Return 1 if the specified pointer is in the set, 0 otherwise.
void insert_range(Range &&R)
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
SmallPtrSet - This class implements a set which is optimized for holding SmallSize or less elements.
void reserve(size_type N)
iterator insert(iterator I, T &&Elt)
void resize(size_type N)
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
std::pair< StringRef, StringRef > split(char Separator) const
Split into two substrings around the first occurrence of a separator character.
Definition StringRef.h:730
static constexpr size_t npos
Definition StringRef.h:57
bool getAsInteger(unsigned Radix, T &Result) const
Parse the current string as an integer of the specified radix.
Definition StringRef.h:490
bool starts_with(StringRef Prefix) const
Check if this string starts with the given Prefix.
Definition StringRef.h:258
constexpr bool empty() const
empty - Check if the string is empty.
Definition StringRef.h:140
unsigned getNumElements() const
Random access to the elements.
LLVM_ABI Type * getTypeAtIndex(const Value *V) const
Given an index value into the type, return the type of the element.
Definition Type.cpp:788
LLVM_ABI bool isScalableTy(SmallPtrSetImpl< const Type * > &Visited) const
Returns true if this struct contains a scalable vector.
Definition Type.cpp:510
Verify that the TBAA Metadatas are valid.
Definition Verifier.h:40
LLVM_ABI bool visitTBAAMetadata(const Instruction *I, const MDNode *MD)
Visit an instruction, or a TBAA node itself as part of a metadata, and return true if it is valid,...
unsigned size() const
Triple - Helper class for working with autoconf configuration names.
Definition Triple.h:47
This class represents a truncation of integer types.
Twine - A lightweight data structure for efficiently representing the concatenation of temporary valu...
Definition Twine.h:82
The instances of the Type class are immutable: once they are created, they are never changed.
Definition Type.h:46
bool isByteTy() const
True if this is an instance of ByteType.
Definition Type.h:242
bool isVectorTy() const
True if this is an instance of VectorType.
Definition Type.h:290
LLVM_ABI bool containsNonGlobalTargetExtType(SmallPtrSetImpl< const Type * > &Visited) const
Return true if this type is or contains a target extension type that disallows being used as a global...
Definition Type.cpp:78
bool isArrayTy() const
True if this is an instance of ArrayType.
Definition Type.h:281
LLVM_ABI bool containsNonLocalTargetExtType(SmallPtrSetImpl< const Type * > &Visited) const
Return true if this type is or contains a target extension type that disallows being used as a local.
Definition Type.cpp:94
LLVM_ABI bool isScalableTy(SmallPtrSetImpl< const Type * > &Visited) const
Return true if this is a type whose size is a known multiple of vscale.
Definition Type.cpp:65
bool isLabelTy() const
Return true if this is 'label'.
Definition Type.h:230
bool isIntOrIntVectorTy() const
Return true if this is an integer type or a vector of integer types.
Definition Type.h:263
bool isPointerTy() const
True if this is an instance of PointerType.
Definition Type.h:284
LLVM_ABI bool isTokenLikeTy() const
Returns true if this is 'token' or a token-like target type.s.
Definition Type.cpp:1136
LLVM_ABI unsigned getPointerAddressSpace() const
Get the address space of this pointer or pointer vector type.
bool isSingleValueType() const
Return true if the type is a valid type for a register in codegen.
Definition Type.h:313
LLVM_ABI bool canLosslesslyBitCastTo(Type *Ty) const
Return true if this type could be converted with a lossless BitCast to type 'Ty'.
Definition Type.cpp:157
Type * getScalarType() const
If this is a vector type, return the element type, otherwise return 'this'.
Definition Type.h:370
bool isSized(SmallPtrSetImpl< Type * > *Visited=nullptr) const
Return true if it makes sense to take the size of this type.
Definition Type.h:328
LLVM_ABI unsigned getScalarSizeInBits() const LLVM_READONLY
If this is a vector type, return the getPrimitiveSizeInBits value for the element type.
Definition Type.cpp:236
bool isFloatingPointTy() const
Return true if this is one of the floating-point types.
Definition Type.h:186
bool isPtrOrPtrVectorTy() const
Return true if this is a pointer type or a vector of pointer types.
Definition Type.h:287
bool isIntOrPtrTy() const
Return true if this is an integer type or a pointer type.
Definition Type.h:272
bool isIntegerTy() const
True if this is an instance of IntegerType.
Definition Type.h:257
bool isFPOrFPVectorTy() const
Return true if this is a FP type or a vector of FP.
Definition Type.h:227
bool isVoidTy() const
Return true if this is 'void'.
Definition Type.h:141
bool isMetadataTy() const
Return true if this is 'metadata'.
Definition Type.h:233
This class represents a cast unsigned integer to floating point.
op_range operands()
Definition User.h:267
Value * getOperand(unsigned i) const
Definition User.h:207
unsigned getNumOperands() const
Definition User.h:229
This class represents the va_arg llvm instruction, which returns an argument of the specified type gi...
Value * getValue() const
Definition Metadata.h:499
LLVM Value Representation.
Definition Value.h:75
iterator_range< user_iterator > materialized_users()
Definition Value.h:420
Type * getType() const
All values are typed, get the type of this value.
Definition Value.h:255
LLVM_ABI const Value * stripPointerCastsAndAliases() const
Strip off pointer casts, all-zero GEPs, address space casts, and aliases.
Definition Value.cpp:713
LLVMContext & getContext() const
All values hold a context through their type.
Definition Value.h:258
LLVM_ABI const Value * stripInBoundsOffsets(function_ref< void(const Value *)> Func=[](const Value *) {}) const
Strip off pointer casts and inbounds GEPs.
Definition Value.cpp:820
iterator_range< user_iterator > users()
Definition Value.h:426
bool materialized_use_empty() const
Definition Value.h:351
LLVM_ABI const Value * stripPointerCasts() const
Strip off pointer casts, all-zero GEPs and address space casts.
Definition Value.cpp:709
bool hasName() const
Definition Value.h:261
LLVM_ABI StringRef getName() const
Return a constant reference to the value's name.
Definition Value.cpp:318
Check a module for errors, and report separate error states for IR and debug info errors.
Definition Verifier.h:109
LLVM_ABI Result run(Module &M, ModuleAnalysisManager &)
LLVM_ABI PreservedAnalyses run(Module &M, ModuleAnalysisManager &AM)
This class represents zero extension of integer types.
constexpr bool isNonZero() const
Definition TypeSize.h:155
constexpr bool isScalable() const
Returns whether the quantity is scaled by a runtime quantity (vscale).
Definition TypeSize.h:168
constexpr ScalarTy getKnownMinValue() const
Returns the minimum value this quantity can represent.
Definition TypeSize.h:165
An efficient, type-erasing, non-owning reference to a callable.
const ParentTy * getParent() const
Definition ilist_node.h:34
NodeTy * getNextNode()
Get the next node, or nullptr for the list tail.
Definition ilist_node.h:348
This class implements an extremely fast bulk output stream that can only output to a stream.
Definition raw_ostream.h:53
CallInst * Call
This file contains the declaration of the Comdat class, which represents a single COMDAT in LLVM.
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
@ FLAT_ADDRESS
Address space for flat memory.
@ GLOBAL_ADDRESS
Address space for global memory (RAT0, VTX0).
@ PRIVATE_ADDRESS
Address space for private memory.
constexpr char Align[]
Key for Kernel::Arg::Metadata::mAlign.
constexpr char Args[]
Key for Kernel::Metadata::mArgs.
constexpr char Attrs[]
Key for Kernel::Metadata::mAttrs.
bool isFlatGlobalAddrSpace(unsigned AS)
constexpr std::underlying_type_t< E > Mask()
Get a bitmask with 1s in all places up to the high-order bit of E's largest value.
@ Entry
Definition COFF.h:862
unsigned ID
LLVM IR allows to use arbitrary numbers as calling convention identifiers.
Definition CallingConv.h:24
@ C
The default llvm calling convention, compatible with C.
Definition CallingConv.h:34
@ BasicBlock
Various leaf nodes.
Definition ISDOpcodes.h:81
LLVM_ABI void getIntrinsicInfoTableEntries(ID id, SmallVectorImpl< IITDescriptor > &T)
Return the IIT table descriptor for the specified intrinsic into an array of IITDescriptors.
@ MatchIntrinsicTypes_NoMatchRet
Definition Intrinsics.h:262
@ MatchIntrinsicTypes_NoMatchArg
Definition Intrinsics.h:263
LLVM_ABI MatchIntrinsicTypesResult matchIntrinsicSignature(FunctionType *FTy, ArrayRef< IITDescriptor > &Infos, SmallVectorImpl< Type * > &OverloadTys)
Match the specified function type with the type constraints specified by the .td file.
LLVM_ABI bool hasConstrainedFPRoundingModeOperand(ID QID)
Returns true if the intrinsic ID is for one of the "ConstrainedFloating-Point Intrinsics" that take r...
LLVM_ABI StringRef getName(ID id)
Return the LLVM name for an intrinsic, such as "llvm.ppc.altivec.lvx".
static const int NoAliasScopeDeclScopeArg
Definition Intrinsics.h:41
LLVM_ABI bool matchIntrinsicVarArg(bool isVarArg, ArrayRef< IITDescriptor > &Infos)
Verify if the intrinsic has variable arguments.
std::variant< std::monostate, Loc::Single, Loc::Multi, Loc::MMI, Loc::EntryValue > Variant
Alias for the std::variant specialization base class of DbgVariable.
Definition DwarfDebug.h:190
Flag
These should be considered private to the implementation of the MCInstrDesc class.
@ System
Synchronized with respect to all concurrently executing threads.
Definition LLVMContext.h:58
LLVM_ABI std::optional< VFInfo > tryDemangleForVFABI(StringRef MangledName, const FunctionType *FTy)
Function to construct a VFInfo out of a mangled names in the following format:
@ CE
Windows NT (Windows on ARM)
Definition MCAsmInfo.h:48
LLVM_ABI AssignmentInstRange getAssignmentInsts(DIAssignID *ID)
Return a range of instructions (typically just one) that have ID as an attachment.
initializer< Ty > init(const Ty &Val)
@ DW_MACINFO_undef
Definition Dwarf.h:818
@ DW_MACINFO_start_file
Definition Dwarf.h:819
@ DW_MACINFO_define
Definition Dwarf.h:817
std::enable_if_t< detail::IsValidPointer< X, Y >::value, X * > dyn_extract_or_null(Y &&MD)
Extract a Value from Metadata, if any, allowing null.
Definition Metadata.h:709
std::enable_if_t< detail::IsValidPointer< X, Y >::value, X * > dyn_extract(Y &&MD)
Extract a Value from Metadata, if any.
Definition Metadata.h:696
std::enable_if_t< detail::IsValidPointer< X, Y >::value, X * > extract(Y &&MD)
Extract a Value from Metadata.
Definition Metadata.h:668
@ User
could "use" a pointer
NodeAddr< UseNode * > Use
Definition RDFGraph.h:385
NodeAddr< NodeBase * > Node
Definition RDFGraph.h:381
friend class Instruction
Iterator for Instructions in a `BasicBlock.
Definition BasicBlock.h:73
This is an optimization pass for GlobalISel generic memory operations.
auto drop_begin(T &&RangeOrContainer, size_t N=1)
Return a range covering RangeOrContainer with the first N elements excluded.
Definition STLExtras.h:316
@ Low
Lower the current thread's priority such that it does not affect foreground tasks significantly.
Definition Threading.h:280
@ Offset
Definition DWP.cpp:532
FunctionAddr VTableAddr Value
Definition InstrProf.h:137
bool all_of(R &&range, UnaryPredicate P)
Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly.
Definition STLExtras.h:1739
LLVM_ABI bool canInstructionHaveMMRAs(const Instruction &I)
detail::zippy< detail::zip_first, T, U, Args... > zip_equal(T &&t, U &&u, Args &&...args)
zip iterator that assumes that all iteratees have the same length.
Definition STLExtras.h:841
LLVM_ABI unsigned getBranchWeightOffset(const MDNode *ProfileData)
Return the offset to the first branch weight data.
constexpr bool isInt(int64_t x)
Checks if an integer fits into the given bit width.
Definition MathExtras.h:165
auto enumerate(FirstRange &&First, RestRanges &&...Rest)
Given two or more input ranges, returns a new range whose values are tuples (A, B,...
Definition STLExtras.h:2554
decltype(auto) dyn_cast(const From &Val)
dyn_cast<X> - Return the argument parameter cast to the specified type.
Definition Casting.h:643
LLVM_ABI bool verifyFunction(const Function &F, raw_ostream *OS=nullptr)
Check a function for errors, useful for use when debugging a pass.
AllocFnKind
Definition Attributes.h:53
testing::Matcher< const detail::ErrorHolder & > Failed()
Definition Error.h:198
iterator_range< T > make_range(T x, T y)
Convenience function for iterating over sub-ranges.
void append_range(Container &C, Range &&R)
Wrapper function to append range R to container C.
Definition STLExtras.h:2208
LLVM_ABI DenseMap< BasicBlock *, ColorVector > colorEHFunclets(Function &F)
If an EH funclet personality is in use (see isFuncletEHPersonality), this will recompute which blocks...
constexpr bool isUIntN(unsigned N, uint64_t x)
Checks if an unsigned integer fits into the given (dynamic) bit width.
Definition MathExtras.h:243
constexpr bool isPowerOf2_64(uint64_t Value)
Return true if the argument is a power of two > 0 (64 bit edition.)
Definition MathExtras.h:284
gep_type_iterator gep_type_end(const User *GEP)
bool isa_and_nonnull(const Y &Val)
Definition Casting.h:676
Op::Description Desc
bool isScopedEHPersonality(EHPersonality Pers)
Returns true if this personality uses scope-style EH IR instructions: catchswitch,...
auto dyn_cast_or_null(const Y &Val)
Definition Casting.h:753
GenericConvergenceVerifier< SSAContext > ConvergenceVerifier
constexpr bool isPowerOf2_32(uint32_t Value)
Return true if the argument is a power of two > 0.
Definition MathExtras.h:279
bool isModSet(const ModRefInfo MRI)
Definition ModRef.h:49
void sort(IteratorTy Start, IteratorTy End)
Definition STLExtras.h:1636
LLVM_ABI raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
Definition Debug.cpp:207
LLVM_ABI void report_fatal_error(Error Err, bool gen_crash_diag=true)
Definition Error.cpp:163
generic_gep_type_iterator<> gep_type_iterator
FunctionAddr VTableAddr Count
Definition InstrProf.h:139
LLVM_ABI EHPersonality classifyEHPersonality(const Value *Pers)
See if the given exception handling personality function is one that we understand.
iterator_range< SplittingIterator > split(StringRef Str, StringRef Separator)
Split the specified string over a separator and return a range-compatible iterable over its partition...
class LLVM_GSL_OWNER SmallVector
Forward declaration of SmallVector so that calculateSmallVectorDefaultInlinedElements can reference s...
bool isa(const From &Val)
isa<X> - Return true if the parameter to the template is an instance of one of the template type argu...
Definition Casting.h:547
LLVM_ABI bool isValueProfileMD(const MDNode *ProfileData)
Checks if an MDNode contains value profiling Metadata.
LLVM_ABI raw_fd_ostream & errs()
This returns a reference to a raw_ostream for standard error.
LLVM_ABI unsigned getNumBranchWeights(const MDNode &ProfileData)
AtomicOrdering
Atomic ordering for LLVM's memory model.
@ First
Helpers to iterate all locations in the MemoryEffectsBase class.
Definition ModRef.h:74
LLVM_ABI FunctionPass * createVerifierPass(bool FatalErrors=true)
FunctionAddr VTableAddr Next
Definition InstrProf.h:141
DWARFExpression::Operation Op
ArrayRef(const T &OneElt) -> ArrayRef< T >
constexpr unsigned BitWidth
TinyPtrVector< BasicBlock * > ColorVector
LLVM_ABI const char * LLVMLoopEstimatedTripCount
Profile-based loop metadata that should be accessed only by using llvm::getLoopEstimatedTripCount and...
decltype(auto) cast(const From &Val)
cast<X> - Return the argument parameter cast to the specified type.
Definition Casting.h:559
LLVM_ABI std::optional< RoundingMode > convertStrToRoundingMode(StringRef)
Returns a valid RoundingMode enumerator when given a string that is valid as input in constrained int...
Definition FPEnv.cpp:25
gep_type_iterator gep_type_begin(const User *GEP)
LLVM_ABI std::unique_ptr< GCStrategy > getGCStrategy(const StringRef Name)
Lookup the GCStrategy object associated with the given gc name.
auto predecessors(const MachineBasicBlock *BB)
bool is_contained(R &&Range, const E &Element)
Returns true if Element is found in Range.
Definition STLExtras.h:1947
bool pred_empty(const BasicBlock *BB)
Definition CFG.h:119
bool isHexDigit(char C)
Checks if character C is a hexadecimal numeric character.
AnalysisManager< Function > FunctionAnalysisManager
Convenience typedef for the Function analysis manager.
constexpr bool isCallableCC(CallingConv::ID CC)
LLVM_ABI bool verifyModule(const Module &M, raw_ostream *OS=nullptr, bool *BrokenDebugInfo=nullptr)
Check a module for errors.
AnalysisManager< Module > ModuleAnalysisManager
Convenience typedef for the Module analysis manager.
Definition MIRParser.h:39
#define N
constexpr uint64_t value() const
This is a hole in the type system and should not be abused.
Definition Alignment.h:77
A special type used by analysis passes to provide an address that identifies that particular analysis...
Definition Analysis.h:29
static LLVM_ABI const char * SyntheticFunctionEntryCount
static LLVM_ABI const char * UnknownBranchWeightsMarker
static LLVM_ABI const char * ValueProfile
static LLVM_ABI const char * FunctionEntryCount
static LLVM_ABI const char * BranchWeights
uint32_t getTagID() const
Return the tag of this operand bundle as an integer.
ArrayRef< Use > Inputs
void DebugInfoCheckFailed(const Twine &Message)
A debug info check failed.
Definition Verifier.cpp:307
VerifierSupport(raw_ostream *OS, const Module &M)
Definition Verifier.cpp:156
bool Broken
Track the brokenness of the module while recursively visiting.
Definition Verifier.cpp:150
void CheckFailed(const Twine &Message, const T1 &V1, const Ts &... Vs)
A check failed (with values to print).
Definition Verifier.cpp:300
bool BrokenDebugInfo
Broken debug info can be "recovered" from by stripping the debug info.
Definition Verifier.cpp:152
LLVMContext & Context
Definition Verifier.cpp:147
bool TreatBrokenDebugInfoAsError
Whether to treat broken debug info as an error.
Definition Verifier.cpp:154
void CheckFailed(const Twine &Message)
A check failed, so printout out the condition and the message.
Definition Verifier.cpp:289
const Module & M
Definition Verifier.cpp:143
const DataLayout & DL
Definition Verifier.cpp:146
void DebugInfoCheckFailed(const Twine &Message, const T1 &V1, const Ts &... Vs)
A debug info check failed (with values to print).
Definition Verifier.cpp:316
const Triple & TT
Definition Verifier.cpp:145
ModuleSlotTracker MST
Definition Verifier.cpp:144