LLVM 22.0.0git
Verifier.cpp
Go to the documentation of this file.
1//===-- Verifier.cpp - Implement the Module Verifier -----------------------==//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file defines the function verifier interface, that can be used for some
10// basic correctness checking of input to the system.
11//
12// Note that this does not provide full `Java style' security and verifications,
13// instead it just tries to ensure that code is well-formed.
14//
15// * Both of a binary operator's parameters are of the same type
16// * Verify that the indices of mem access instructions match other operands
17// * Verify that arithmetic and other things are only performed on first-class
18// types. Verify that shifts & logicals only happen on integrals f.e.
19// * All of the constants in a switch statement are of the correct type
20// * The code is in valid SSA form
21// * It should be illegal to put a label into any other type (like a structure)
22// or to return one. [except constant arrays!]
23// * Only phi nodes can be self referential: 'add i32 %0, %0 ; <int>:0' is bad
24// * PHI nodes must have an entry for each predecessor, with no extras.
25// * PHI nodes must be the first thing in a basic block, all grouped together
26// * All basic blocks should only end with terminator insts, not contain them
27// * The entry node to a function must not have predecessors
28// * All Instructions must be embedded into a basic block
29// * Functions cannot take a void-typed parameter
30// * Verify that a function's argument list agrees with it's declared type.
31// * It is illegal to specify a name for a void value.
32// * It is illegal to have a internal global value with no initializer
33// * It is illegal to have a ret instruction that returns a value that does not
34// agree with the function return value type.
35// * Function call argument types match the function prototype
36// * A landing pad is defined by a landingpad instruction, and can be jumped to
37// only by the unwind edge of an invoke instruction.
38// * A landingpad instruction must be the first non-PHI instruction in the
39// block.
40// * Landingpad instructions must be in a function with a personality function.
41// * Convergence control intrinsics are introduced in ConvergentOperations.rst.
42// The applied restrictions are too numerous to list here.
43// * The convergence entry intrinsic and the loop heart must be the first
44// non-PHI instruction in their respective block. This does not conflict with
45// the landing pads, since these two kinds cannot occur in the same block.
46// * All other things that are tested by asserts spread about the code...
47//
48//===----------------------------------------------------------------------===//
49
50#include "llvm/IR/Verifier.h"
51#include "llvm/ADT/APFloat.h"
52#include "llvm/ADT/APInt.h"
53#include "llvm/ADT/ArrayRef.h"
54#include "llvm/ADT/DenseMap.h"
55#include "llvm/ADT/MapVector.h"
56#include "llvm/ADT/STLExtras.h"
60#include "llvm/ADT/StringRef.h"
61#include "llvm/ADT/Twine.h"
63#include "llvm/IR/Argument.h"
65#include "llvm/IR/Attributes.h"
66#include "llvm/IR/BasicBlock.h"
67#include "llvm/IR/CFG.h"
68#include "llvm/IR/CallingConv.h"
69#include "llvm/IR/Comdat.h"
70#include "llvm/IR/Constant.h"
73#include "llvm/IR/Constants.h"
75#include "llvm/IR/DataLayout.h"
76#include "llvm/IR/DebugInfo.h"
78#include "llvm/IR/DebugLoc.h"
80#include "llvm/IR/Dominators.h"
82#include "llvm/IR/Function.h"
83#include "llvm/IR/GCStrategy.h"
84#include "llvm/IR/GlobalAlias.h"
85#include "llvm/IR/GlobalValue.h"
87#include "llvm/IR/InlineAsm.h"
88#include "llvm/IR/InstVisitor.h"
89#include "llvm/IR/InstrTypes.h"
90#include "llvm/IR/Instruction.h"
93#include "llvm/IR/Intrinsics.h"
94#include "llvm/IR/IntrinsicsAArch64.h"
95#include "llvm/IR/IntrinsicsAMDGPU.h"
96#include "llvm/IR/IntrinsicsARM.h"
97#include "llvm/IR/IntrinsicsNVPTX.h"
98#include "llvm/IR/IntrinsicsWebAssembly.h"
99#include "llvm/IR/LLVMContext.h"
101#include "llvm/IR/Metadata.h"
102#include "llvm/IR/Module.h"
104#include "llvm/IR/PassManager.h"
106#include "llvm/IR/Statepoint.h"
107#include "llvm/IR/Type.h"
108#include "llvm/IR/Use.h"
109#include "llvm/IR/User.h"
111#include "llvm/IR/Value.h"
113#include "llvm/Pass.h"
117#include "llvm/Support/Casting.h"
121#include "llvm/Support/ModRef.h"
124#include <algorithm>
125#include <cassert>
126#include <cstdint>
127#include <memory>
128#include <optional>
129#include <string>
130#include <utility>
131
132using namespace llvm;
133
135 "verify-noalias-scope-decl-dom", cl::Hidden, cl::init(false),
136 cl::desc("Ensure that llvm.experimental.noalias.scope.decl for identical "
137 "scopes are not dominating"));
138
139namespace llvm {
140
143 const Module &M;
145 const Triple &TT;
148
149 /// Track the brokenness of the module while recursively visiting.
150 bool Broken = false;
151 /// Broken debug info can be "recovered" from by stripping the debug info.
152 bool BrokenDebugInfo = false;
153 /// Whether to treat broken debug info as an error.
155
157 : OS(OS), M(M), MST(&M), TT(M.getTargetTriple()), DL(M.getDataLayout()),
158 Context(M.getContext()) {}
159
160private:
161 void Write(const Module *M) {
162 *OS << "; ModuleID = '" << M->getModuleIdentifier() << "'\n";
163 }
164
165 void Write(const Value *V) {
166 if (V)
167 Write(*V);
168 }
169
170 void Write(const Value &V) {
171 if (isa<Instruction>(V)) {
172 V.print(*OS, MST);
173 *OS << '\n';
174 } else {
175 V.printAsOperand(*OS, true, MST);
176 *OS << '\n';
177 }
178 }
179
180 void Write(const DbgRecord *DR) {
181 if (DR) {
182 DR->print(*OS, MST, false);
183 *OS << '\n';
184 }
185 }
186
188 switch (Type) {
190 *OS << "value";
191 break;
193 *OS << "declare";
194 break;
196 *OS << "assign";
197 break;
199 *OS << "end";
200 break;
202 *OS << "any";
203 break;
204 };
205 }
206
207 void Write(const Metadata *MD) {
208 if (!MD)
209 return;
210 MD->print(*OS, MST, &M);
211 *OS << '\n';
212 }
213
214 template <class T> void Write(const MDTupleTypedArrayWrapper<T> &MD) {
215 Write(MD.get());
216 }
217
218 void Write(const NamedMDNode *NMD) {
219 if (!NMD)
220 return;
221 NMD->print(*OS, MST);
222 *OS << '\n';
223 }
224
225 void Write(Type *T) {
226 if (!T)
227 return;
228 *OS << ' ' << *T;
229 }
230
231 void Write(const Comdat *C) {
232 if (!C)
233 return;
234 *OS << *C;
235 }
236
237 void Write(const APInt *AI) {
238 if (!AI)
239 return;
240 *OS << *AI << '\n';
241 }
242
243 void Write(const unsigned i) { *OS << i << '\n'; }
244
245 // NOLINTNEXTLINE(readability-identifier-naming)
246 void Write(const Attribute *A) {
247 if (!A)
248 return;
249 *OS << A->getAsString() << '\n';
250 }
251
252 // NOLINTNEXTLINE(readability-identifier-naming)
253 void Write(const AttributeSet *AS) {
254 if (!AS)
255 return;
256 *OS << AS->getAsString() << '\n';
257 }
258
259 // NOLINTNEXTLINE(readability-identifier-naming)
260 void Write(const AttributeList *AL) {
261 if (!AL)
262 return;
263 AL->print(*OS);
264 }
265
266 void Write(Printable P) { *OS << P << '\n'; }
267
268 template <typename T> void Write(ArrayRef<T> Vs) {
269 for (const T &V : Vs)
270 Write(V);
271 }
272
273 template <typename T1, typename... Ts>
274 void WriteTs(const T1 &V1, const Ts &... Vs) {
275 Write(V1);
276 WriteTs(Vs...);
277 }
278
279 template <typename... Ts> void WriteTs() {}
280
281public:
282 /// A check failed, so printout out the condition and the message.
283 ///
284 /// This provides a nice place to put a breakpoint if you want to see why
285 /// something is not correct.
286 void CheckFailed(const Twine &Message) {
287 if (OS)
288 *OS << Message << '\n';
289 Broken = true;
290 }
291
292 /// A check failed (with values to print).
293 ///
294 /// This calls the Message-only version so that the above is easier to set a
295 /// breakpoint on.
296 template <typename T1, typename... Ts>
297 void CheckFailed(const Twine &Message, const T1 &V1, const Ts &... Vs) {
298 CheckFailed(Message);
299 if (OS)
300 WriteTs(V1, Vs...);
301 }
302
303 /// A debug info check failed.
304 void DebugInfoCheckFailed(const Twine &Message) {
305 if (OS)
306 *OS << Message << '\n';
308 BrokenDebugInfo = true;
309 }
310
311 /// A debug info check failed (with values to print).
312 template <typename T1, typename... Ts>
313 void DebugInfoCheckFailed(const Twine &Message, const T1 &V1,
314 const Ts &... Vs) {
315 DebugInfoCheckFailed(Message);
316 if (OS)
317 WriteTs(V1, Vs...);
318 }
319};
320
321} // namespace llvm
322
323namespace {
324
325class Verifier : public InstVisitor<Verifier>, VerifierSupport {
326 friend class InstVisitor<Verifier>;
327 DominatorTree DT;
328
329 /// When verifying a basic block, keep track of all of the
330 /// instructions we have seen so far.
331 ///
332 /// This allows us to do efficient dominance checks for the case when an
333 /// instruction has an operand that is an instruction in the same block.
334 SmallPtrSet<Instruction *, 16> InstsInThisBlock;
335
336 /// Keep track of the metadata nodes that have been checked already.
337 SmallPtrSet<const Metadata *, 32> MDNodes;
338
339 /// Keep track which DISubprogram is attached to which function.
340 DenseMap<const DISubprogram *, const Function *> DISubprogramAttachments;
341
342 /// Track all DICompileUnits visited.
343 SmallPtrSet<const Metadata *, 2> CUVisited;
344
345 /// The result type for a landingpad.
346 Type *LandingPadResultTy;
347
348 /// Whether we've seen a call to @llvm.localescape in this function
349 /// already.
350 bool SawFrameEscape;
351
352 /// Whether the current function has a DISubprogram attached to it.
353 bool HasDebugInfo = false;
354
355 /// Stores the count of how many objects were passed to llvm.localescape for a
356 /// given function and the largest index passed to llvm.localrecover.
357 DenseMap<Function *, std::pair<unsigned, unsigned>> FrameEscapeInfo;
358
359 // Maps catchswitches and cleanuppads that unwind to siblings to the
360 // terminators that indicate the unwind, used to detect cycles therein.
361 MapVector<Instruction *, Instruction *> SiblingFuncletInfo;
362
363 /// Cache which blocks are in which funclet, if an EH funclet personality is
364 /// in use. Otherwise empty.
365 DenseMap<BasicBlock *, ColorVector> BlockEHFuncletColors;
366
367 /// Cache of constants visited in search of ConstantExprs.
368 SmallPtrSet<const Constant *, 32> ConstantExprVisited;
369
370 /// Cache of declarations of the llvm.experimental.deoptimize.<ty> intrinsic.
371 SmallVector<const Function *, 4> DeoptimizeDeclarations;
372
373 /// Cache of attribute lists verified.
374 SmallPtrSet<const void *, 32> AttributeListsVisited;
375
376 // Verify that this GlobalValue is only used in this module.
377 // This map is used to avoid visiting uses twice. We can arrive at a user
378 // twice, if they have multiple operands. In particular for very large
379 // constant expressions, we can arrive at a particular user many times.
380 SmallPtrSet<const Value *, 32> GlobalValueVisited;
381
382 // Keeps track of duplicate function argument debug info.
384
385 TBAAVerifier TBAAVerifyHelper;
386 ConvergenceVerifier ConvergenceVerifyHelper;
387
388 SmallVector<IntrinsicInst *, 4> NoAliasScopeDecls;
389
390 void checkAtomicMemAccessSize(Type *Ty, const Instruction *I);
391
392public:
393 explicit Verifier(raw_ostream *OS, bool ShouldTreatBrokenDebugInfoAsError,
394 const Module &M)
395 : VerifierSupport(OS, M), LandingPadResultTy(nullptr),
396 SawFrameEscape(false), TBAAVerifyHelper(this) {
397 TreatBrokenDebugInfoAsError = ShouldTreatBrokenDebugInfoAsError;
398 }
399
400 bool hasBrokenDebugInfo() const { return BrokenDebugInfo; }
401
402 bool verify(const Function &F) {
403 llvm::TimeTraceScope timeScope("Verifier");
404 assert(F.getParent() == &M &&
405 "An instance of this class only works with a specific module!");
406
407 // First ensure the function is well-enough formed to compute dominance
408 // information, and directly compute a dominance tree. We don't rely on the
409 // pass manager to provide this as it isolates us from a potentially
410 // out-of-date dominator tree and makes it significantly more complex to run
411 // this code outside of a pass manager.
412 // FIXME: It's really gross that we have to cast away constness here.
413 if (!F.empty())
414 DT.recalculate(const_cast<Function &>(F));
415
416 for (const BasicBlock &BB : F) {
417 if (!BB.empty() && BB.back().isTerminator())
418 continue;
419
420 if (OS) {
421 *OS << "Basic Block in function '" << F.getName()
422 << "' does not have terminator!\n";
423 BB.printAsOperand(*OS, true, MST);
424 *OS << "\n";
425 }
426 return false;
427 }
428
429 auto FailureCB = [this](const Twine &Message) {
430 this->CheckFailed(Message);
431 };
432 ConvergenceVerifyHelper.initialize(OS, FailureCB, F);
433
434 Broken = false;
435 // FIXME: We strip const here because the inst visitor strips const.
436 visit(const_cast<Function &>(F));
437 verifySiblingFuncletUnwinds();
438
439 if (ConvergenceVerifyHelper.sawTokens())
440 ConvergenceVerifyHelper.verify(DT);
441
442 InstsInThisBlock.clear();
443 DebugFnArgs.clear();
444 LandingPadResultTy = nullptr;
445 SawFrameEscape = false;
446 SiblingFuncletInfo.clear();
447 verifyNoAliasScopeDecl();
448 NoAliasScopeDecls.clear();
449
450 return !Broken;
451 }
452
453 /// Verify the module that this instance of \c Verifier was initialized with.
454 bool verify() {
455 Broken = false;
456
457 // Collect all declarations of the llvm.experimental.deoptimize intrinsic.
458 for (const Function &F : M)
459 if (F.getIntrinsicID() == Intrinsic::experimental_deoptimize)
460 DeoptimizeDeclarations.push_back(&F);
461
462 // Now that we've visited every function, verify that we never asked to
463 // recover a frame index that wasn't escaped.
464 verifyFrameRecoverIndices();
465 for (const GlobalVariable &GV : M.globals())
466 visitGlobalVariable(GV);
467
468 for (const GlobalAlias &GA : M.aliases())
469 visitGlobalAlias(GA);
470
471 for (const GlobalIFunc &GI : M.ifuncs())
472 visitGlobalIFunc(GI);
473
474 for (const NamedMDNode &NMD : M.named_metadata())
475 visitNamedMDNode(NMD);
476
477 for (const StringMapEntry<Comdat> &SMEC : M.getComdatSymbolTable())
478 visitComdat(SMEC.getValue());
479
480 visitModuleFlags();
481 visitModuleIdents();
482 visitModuleCommandLines();
483
484 verifyCompileUnits();
485
486 verifyDeoptimizeCallingConvs();
487 DISubprogramAttachments.clear();
488 return !Broken;
489 }
490
491private:
492 /// Whether a metadata node is allowed to be, or contain, a DILocation.
493 enum class AreDebugLocsAllowed { No, Yes };
494
495 /// Metadata that should be treated as a range, with slightly different
496 /// requirements.
497 enum class RangeLikeMetadataKind {
498 Range, // MD_range
499 AbsoluteSymbol, // MD_absolute_symbol
500 NoaliasAddrspace // MD_noalias_addrspace
501 };
502
503 // Verification methods...
504 void visitGlobalValue(const GlobalValue &GV);
505 void visitGlobalVariable(const GlobalVariable &GV);
506 void visitGlobalAlias(const GlobalAlias &GA);
507 void visitGlobalIFunc(const GlobalIFunc &GI);
508 void visitAliaseeSubExpr(const GlobalAlias &A, const Constant &C);
509 void visitAliaseeSubExpr(SmallPtrSetImpl<const GlobalAlias *> &Visited,
510 const GlobalAlias &A, const Constant &C);
511 void visitNamedMDNode(const NamedMDNode &NMD);
512 void visitMDNode(const MDNode &MD, AreDebugLocsAllowed AllowLocs);
513 void visitMetadataAsValue(const MetadataAsValue &MD, Function *F);
514 void visitValueAsMetadata(const ValueAsMetadata &MD, Function *F);
515 void visitDIArgList(const DIArgList &AL, Function *F);
516 void visitComdat(const Comdat &C);
517 void visitModuleIdents();
518 void visitModuleCommandLines();
519 void visitModuleFlags();
520 void visitModuleFlag(const MDNode *Op,
521 DenseMap<const MDString *, const MDNode *> &SeenIDs,
522 SmallVectorImpl<const MDNode *> &Requirements);
523 void visitModuleFlagCGProfileEntry(const MDOperand &MDO);
524 void visitFunction(const Function &F);
525 void visitBasicBlock(BasicBlock &BB);
526 void verifyRangeLikeMetadata(const Value &V, const MDNode *Range, Type *Ty,
527 RangeLikeMetadataKind Kind);
528 void visitRangeMetadata(Instruction &I, MDNode *Range, Type *Ty);
529 void visitNoaliasAddrspaceMetadata(Instruction &I, MDNode *Range, Type *Ty);
530 void visitDereferenceableMetadata(Instruction &I, MDNode *MD);
531 void visitNofreeMetadata(Instruction &I, MDNode *MD);
532 void visitProfMetadata(Instruction &I, MDNode *MD);
533 void visitCallStackMetadata(MDNode *MD);
534 void visitMemProfMetadata(Instruction &I, MDNode *MD);
535 void visitCallsiteMetadata(Instruction &I, MDNode *MD);
536 void visitCalleeTypeMetadata(Instruction &I, MDNode *MD);
537 void visitDIAssignIDMetadata(Instruction &I, MDNode *MD);
538 void visitMMRAMetadata(Instruction &I, MDNode *MD);
539 void visitAnnotationMetadata(MDNode *Annotation);
540 void visitAliasScopeMetadata(const MDNode *MD);
541 void visitAliasScopeListMetadata(const MDNode *MD);
542 void visitAccessGroupMetadata(const MDNode *MD);
543
544 template <class Ty> bool isValidMetadataArray(const MDTuple &N);
545#define HANDLE_SPECIALIZED_MDNODE_LEAF(CLASS) void visit##CLASS(const CLASS &N);
546#include "llvm/IR/Metadata.def"
547 void visitDIScope(const DIScope &N);
548 void visitDIVariable(const DIVariable &N);
549 void visitDILexicalBlockBase(const DILexicalBlockBase &N);
550 void visitDITemplateParameter(const DITemplateParameter &N);
551
552 void visitTemplateParams(const MDNode &N, const Metadata &RawParams);
553
554 void visit(DbgLabelRecord &DLR);
555 void visit(DbgVariableRecord &DVR);
556 // InstVisitor overrides...
557 using InstVisitor<Verifier>::visit;
558 void visitDbgRecords(Instruction &I);
559 void visit(Instruction &I);
560
561 void visitTruncInst(TruncInst &I);
562 void visitZExtInst(ZExtInst &I);
563 void visitSExtInst(SExtInst &I);
564 void visitFPTruncInst(FPTruncInst &I);
565 void visitFPExtInst(FPExtInst &I);
566 void visitFPToUIInst(FPToUIInst &I);
567 void visitFPToSIInst(FPToSIInst &I);
568 void visitUIToFPInst(UIToFPInst &I);
569 void visitSIToFPInst(SIToFPInst &I);
570 void visitIntToPtrInst(IntToPtrInst &I);
571 void checkPtrToAddr(Type *SrcTy, Type *DestTy, const Value &V);
572 void visitPtrToAddrInst(PtrToAddrInst &I);
573 void visitPtrToIntInst(PtrToIntInst &I);
574 void visitBitCastInst(BitCastInst &I);
575 void visitAddrSpaceCastInst(AddrSpaceCastInst &I);
576 void visitPHINode(PHINode &PN);
577 void visitCallBase(CallBase &Call);
578 void visitUnaryOperator(UnaryOperator &U);
579 void visitBinaryOperator(BinaryOperator &B);
580 void visitICmpInst(ICmpInst &IC);
581 void visitFCmpInst(FCmpInst &FC);
582 void visitExtractElementInst(ExtractElementInst &EI);
583 void visitInsertElementInst(InsertElementInst &EI);
584 void visitShuffleVectorInst(ShuffleVectorInst &EI);
585 void visitVAArgInst(VAArgInst &VAA) { visitInstruction(VAA); }
586 void visitCallInst(CallInst &CI);
587 void visitInvokeInst(InvokeInst &II);
588 void visitGetElementPtrInst(GetElementPtrInst &GEP);
589 void visitLoadInst(LoadInst &LI);
590 void visitStoreInst(StoreInst &SI);
591 void verifyDominatesUse(Instruction &I, unsigned i);
592 void visitInstruction(Instruction &I);
593 void visitTerminator(Instruction &I);
594 void visitBranchInst(BranchInst &BI);
595 void visitReturnInst(ReturnInst &RI);
596 void visitSwitchInst(SwitchInst &SI);
597 void visitIndirectBrInst(IndirectBrInst &BI);
598 void visitCallBrInst(CallBrInst &CBI);
599 void visitSelectInst(SelectInst &SI);
600 void visitUserOp1(Instruction &I);
601 void visitUserOp2(Instruction &I) { visitUserOp1(I); }
602 void visitIntrinsicCall(Intrinsic::ID ID, CallBase &Call);
603 void visitConstrainedFPIntrinsic(ConstrainedFPIntrinsic &FPI);
604 void visitVPIntrinsic(VPIntrinsic &VPI);
605 void visitDbgLabelIntrinsic(StringRef Kind, DbgLabelInst &DLI);
606 void visitAtomicCmpXchgInst(AtomicCmpXchgInst &CXI);
607 void visitAtomicRMWInst(AtomicRMWInst &RMWI);
608 void visitFenceInst(FenceInst &FI);
609 void visitAllocaInst(AllocaInst &AI);
610 void visitExtractValueInst(ExtractValueInst &EVI);
611 void visitInsertValueInst(InsertValueInst &IVI);
612 void visitEHPadPredecessors(Instruction &I);
613 void visitLandingPadInst(LandingPadInst &LPI);
614 void visitResumeInst(ResumeInst &RI);
615 void visitCatchPadInst(CatchPadInst &CPI);
616 void visitCatchReturnInst(CatchReturnInst &CatchReturn);
617 void visitCleanupPadInst(CleanupPadInst &CPI);
618 void visitFuncletPadInst(FuncletPadInst &FPI);
619 void visitCatchSwitchInst(CatchSwitchInst &CatchSwitch);
620 void visitCleanupReturnInst(CleanupReturnInst &CRI);
621
622 void verifySwiftErrorCall(CallBase &Call, const Value *SwiftErrorVal);
623 void verifySwiftErrorValue(const Value *SwiftErrorVal);
624 void verifyTailCCMustTailAttrs(const AttrBuilder &Attrs, StringRef Context);
625 void verifyMustTailCall(CallInst &CI);
626 bool verifyAttributeCount(AttributeList Attrs, unsigned Params);
627 void verifyAttributeTypes(AttributeSet Attrs, const Value *V);
628 void verifyParameterAttrs(AttributeSet Attrs, Type *Ty, const Value *V);
629 void checkUnsignedBaseTenFuncAttr(AttributeList Attrs, StringRef Attr,
630 const Value *V);
631 void verifyFunctionAttrs(FunctionType *FT, AttributeList Attrs,
632 const Value *V, bool IsIntrinsic, bool IsInlineAsm);
633 void verifyFunctionMetadata(ArrayRef<std::pair<unsigned, MDNode *>> MDs);
634 void verifyUnknownProfileMetadata(MDNode *MD);
635 void visitConstantExprsRecursively(const Constant *EntryC);
636 void visitConstantExpr(const ConstantExpr *CE);
637 void visitConstantPtrAuth(const ConstantPtrAuth *CPA);
638 void verifyInlineAsmCall(const CallBase &Call);
639 void verifyStatepoint(const CallBase &Call);
640 void verifyFrameRecoverIndices();
641 void verifySiblingFuncletUnwinds();
642
643 void verifyFragmentExpression(const DbgVariableRecord &I);
644 template <typename ValueOrMetadata>
645 void verifyFragmentExpression(const DIVariable &V,
647 ValueOrMetadata *Desc);
648 void verifyFnArgs(const DbgVariableRecord &DVR);
649 void verifyNotEntryValue(const DbgVariableRecord &I);
650
651 /// Module-level debug info verification...
652 void verifyCompileUnits();
653
654 /// Module-level verification that all @llvm.experimental.deoptimize
655 /// declarations share the same calling convention.
656 void verifyDeoptimizeCallingConvs();
657
658 void verifyAttachedCallBundle(const CallBase &Call,
659 const OperandBundleUse &BU);
660
661 /// Verify the llvm.experimental.noalias.scope.decl declarations
662 void verifyNoAliasScopeDecl();
663};
664
665} // end anonymous namespace
666
667/// We know that cond should be true, if not print an error message.
668#define Check(C, ...) \
669 do { \
670 if (!(C)) { \
671 CheckFailed(__VA_ARGS__); \
672 return; \
673 } \
674 } while (false)
675
676/// We know that a debug info condition should be true, if not print
677/// an error message.
678#define CheckDI(C, ...) \
679 do { \
680 if (!(C)) { \
681 DebugInfoCheckFailed(__VA_ARGS__); \
682 return; \
683 } \
684 } while (false)
685
686void Verifier::visitDbgRecords(Instruction &I) {
687 if (!I.DebugMarker)
688 return;
689 CheckDI(I.DebugMarker->MarkedInstr == &I,
690 "Instruction has invalid DebugMarker", &I);
691 CheckDI(!isa<PHINode>(&I) || !I.hasDbgRecords(),
692 "PHI Node must not have any attached DbgRecords", &I);
693 for (DbgRecord &DR : I.getDbgRecordRange()) {
694 CheckDI(DR.getMarker() == I.DebugMarker,
695 "DbgRecord had invalid DebugMarker", &I, &DR);
696 if (auto *Loc =
698 visitMDNode(*Loc, AreDebugLocsAllowed::Yes);
699 if (auto *DVR = dyn_cast<DbgVariableRecord>(&DR)) {
700 visit(*DVR);
701 // These have to appear after `visit` for consistency with existing
702 // intrinsic behaviour.
703 verifyFragmentExpression(*DVR);
704 verifyNotEntryValue(*DVR);
705 } else if (auto *DLR = dyn_cast<DbgLabelRecord>(&DR)) {
706 visit(*DLR);
707 }
708 }
709}
710
711void Verifier::visit(Instruction &I) {
712 visitDbgRecords(I);
713 for (unsigned i = 0, e = I.getNumOperands(); i != e; ++i)
714 Check(I.getOperand(i) != nullptr, "Operand is null", &I);
716}
717
718// Helper to iterate over indirect users. By returning false, the callback can ask to stop traversing further.
719static void forEachUser(const Value *User,
721 llvm::function_ref<bool(const Value *)> Callback) {
722 if (!Visited.insert(User).second)
723 return;
724
726 while (!WorkList.empty()) {
727 const Value *Cur = WorkList.pop_back_val();
728 if (!Visited.insert(Cur).second)
729 continue;
730 if (Callback(Cur))
731 append_range(WorkList, Cur->materialized_users());
732 }
733}
734
735void Verifier::visitGlobalValue(const GlobalValue &GV) {
737 "Global is external, but doesn't have external or weak linkage!", &GV);
738
739 if (const GlobalObject *GO = dyn_cast<GlobalObject>(&GV)) {
740 if (const MDNode *Associated =
741 GO->getMetadata(LLVMContext::MD_associated)) {
742 Check(Associated->getNumOperands() == 1,
743 "associated metadata must have one operand", &GV, Associated);
744 const Metadata *Op = Associated->getOperand(0).get();
745 Check(Op, "associated metadata must have a global value", GO, Associated);
746
747 const auto *VM = dyn_cast_or_null<ValueAsMetadata>(Op);
748 Check(VM, "associated metadata must be ValueAsMetadata", GO, Associated);
749 if (VM) {
750 Check(isa<PointerType>(VM->getValue()->getType()),
751 "associated value must be pointer typed", GV, Associated);
752
753 const Value *Stripped = VM->getValue()->stripPointerCastsAndAliases();
754 Check(isa<GlobalObject>(Stripped) || isa<Constant>(Stripped),
755 "associated metadata must point to a GlobalObject", GO, Stripped);
756 Check(Stripped != GO,
757 "global values should not associate to themselves", GO,
758 Associated);
759 }
760 }
761
762 // FIXME: Why is getMetadata on GlobalValue protected?
763 if (const MDNode *AbsoluteSymbol =
764 GO->getMetadata(LLVMContext::MD_absolute_symbol)) {
765 verifyRangeLikeMetadata(*GO, AbsoluteSymbol,
766 DL.getIntPtrType(GO->getType()),
767 RangeLikeMetadataKind::AbsoluteSymbol);
768 }
769 }
770
772 "Only global variables can have appending linkage!", &GV);
773
774 if (GV.hasAppendingLinkage()) {
775 const GlobalVariable *GVar = dyn_cast<GlobalVariable>(&GV);
776 Check(GVar && GVar->getValueType()->isArrayTy(),
777 "Only global arrays can have appending linkage!", GVar);
778 }
779
780 if (GV.isDeclarationForLinker())
781 Check(!GV.hasComdat(), "Declaration may not be in a Comdat!", &GV);
782
783 if (GV.hasDLLExportStorageClass()) {
785 "dllexport GlobalValue must have default or protected visibility",
786 &GV);
787 }
788 if (GV.hasDLLImportStorageClass()) {
790 "dllimport GlobalValue must have default visibility", &GV);
791 Check(!GV.isDSOLocal(), "GlobalValue with DLLImport Storage is dso_local!",
792 &GV);
793
794 Check((GV.isDeclaration() &&
797 "Global is marked as dllimport, but not external", &GV);
798 }
799
800 if (GV.isImplicitDSOLocal())
801 Check(GV.isDSOLocal(),
802 "GlobalValue with local linkage or non-default "
803 "visibility must be dso_local!",
804 &GV);
805
806 forEachUser(&GV, GlobalValueVisited, [&](const Value *V) -> bool {
807 if (const Instruction *I = dyn_cast<Instruction>(V)) {
808 if (!I->getParent() || !I->getParent()->getParent())
809 CheckFailed("Global is referenced by parentless instruction!", &GV, &M,
810 I);
811 else if (I->getParent()->getParent()->getParent() != &M)
812 CheckFailed("Global is referenced in a different module!", &GV, &M, I,
813 I->getParent()->getParent(),
814 I->getParent()->getParent()->getParent());
815 return false;
816 } else if (const Function *F = dyn_cast<Function>(V)) {
817 if (F->getParent() != &M)
818 CheckFailed("Global is used by function in a different module", &GV, &M,
819 F, F->getParent());
820 return false;
821 }
822 return true;
823 });
824}
825
826void Verifier::visitGlobalVariable(const GlobalVariable &GV) {
827 Type *GVType = GV.getValueType();
828
829 if (MaybeAlign A = GV.getAlign()) {
830 Check(A->value() <= Value::MaximumAlignment,
831 "huge alignment values are unsupported", &GV);
832 }
833
834 if (GV.hasInitializer()) {
835 Check(GV.getInitializer()->getType() == GVType,
836 "Global variable initializer type does not match global "
837 "variable type!",
838 &GV);
840 "Global variable initializer must be sized", &GV);
841 visitConstantExprsRecursively(GV.getInitializer());
842 // If the global has common linkage, it must have a zero initializer and
843 // cannot be constant.
844 if (GV.hasCommonLinkage()) {
846 "'common' global must have a zero initializer!", &GV);
847 Check(!GV.isConstant(), "'common' global may not be marked constant!",
848 &GV);
849 Check(!GV.hasComdat(), "'common' global may not be in a Comdat!", &GV);
850 }
851 }
852
853 if (GV.hasName() && (GV.getName() == "llvm.global_ctors" ||
854 GV.getName() == "llvm.global_dtors")) {
856 "invalid linkage for intrinsic global variable", &GV);
858 "invalid uses of intrinsic global variable", &GV);
859
860 // Don't worry about emitting an error for it not being an array,
861 // visitGlobalValue will complain on appending non-array.
862 if (ArrayType *ATy = dyn_cast<ArrayType>(GVType)) {
863 StructType *STy = dyn_cast<StructType>(ATy->getElementType());
864 PointerType *FuncPtrTy =
865 PointerType::get(Context, DL.getProgramAddressSpace());
866 Check(STy && (STy->getNumElements() == 2 || STy->getNumElements() == 3) &&
867 STy->getTypeAtIndex(0u)->isIntegerTy(32) &&
868 STy->getTypeAtIndex(1) == FuncPtrTy,
869 "wrong type for intrinsic global variable", &GV);
870 Check(STy->getNumElements() == 3,
871 "the third field of the element type is mandatory, "
872 "specify ptr null to migrate from the obsoleted 2-field form");
873 Type *ETy = STy->getTypeAtIndex(2);
874 Check(ETy->isPointerTy(), "wrong type for intrinsic global variable",
875 &GV);
876 }
877 }
878
879 if (GV.hasName() && (GV.getName() == "llvm.used" ||
880 GV.getName() == "llvm.compiler.used")) {
882 "invalid linkage for intrinsic global variable", &GV);
884 "invalid uses of intrinsic global variable", &GV);
885
886 if (ArrayType *ATy = dyn_cast<ArrayType>(GVType)) {
887 PointerType *PTy = dyn_cast<PointerType>(ATy->getElementType());
888 Check(PTy, "wrong type for intrinsic global variable", &GV);
889 if (GV.hasInitializer()) {
890 const Constant *Init = GV.getInitializer();
891 const ConstantArray *InitArray = dyn_cast<ConstantArray>(Init);
892 Check(InitArray, "wrong initalizer for intrinsic global variable",
893 Init);
894 for (Value *Op : InitArray->operands()) {
895 Value *V = Op->stripPointerCasts();
898 Twine("invalid ") + GV.getName() + " member", V);
899 Check(V->hasName(),
900 Twine("members of ") + GV.getName() + " must be named", V);
901 }
902 }
903 }
904 }
905
906 // Visit any debug info attachments.
908 GV.getMetadata(LLVMContext::MD_dbg, MDs);
909 for (auto *MD : MDs) {
910 if (auto *GVE = dyn_cast<DIGlobalVariableExpression>(MD))
911 visitDIGlobalVariableExpression(*GVE);
912 else
913 CheckDI(false, "!dbg attachment of global variable must be a "
914 "DIGlobalVariableExpression");
915 }
916
917 // Scalable vectors cannot be global variables, since we don't know
918 // the runtime size.
919 Check(!GVType->isScalableTy(), "Globals cannot contain scalable types", &GV);
920
921 // Check if it is or contains a target extension type that disallows being
922 // used as a global.
924 "Global @" + GV.getName() + " has illegal target extension type",
925 GVType);
926
927 if (!GV.hasInitializer()) {
928 visitGlobalValue(GV);
929 return;
930 }
931
932 // Walk any aggregate initializers looking for bitcasts between address spaces
933 visitConstantExprsRecursively(GV.getInitializer());
934
935 visitGlobalValue(GV);
936}
937
938void Verifier::visitAliaseeSubExpr(const GlobalAlias &GA, const Constant &C) {
939 SmallPtrSet<const GlobalAlias*, 4> Visited;
940 Visited.insert(&GA);
941 visitAliaseeSubExpr(Visited, GA, C);
942}
943
944void Verifier::visitAliaseeSubExpr(SmallPtrSetImpl<const GlobalAlias*> &Visited,
945 const GlobalAlias &GA, const Constant &C) {
948 cast<GlobalValue>(C).hasAvailableExternallyLinkage(),
949 "available_externally alias must point to available_externally "
950 "global value",
951 &GA);
952 }
953 if (const auto *GV = dyn_cast<GlobalValue>(&C)) {
955 Check(!GV->isDeclarationForLinker(), "Alias must point to a definition",
956 &GA);
957 }
958
959 if (const auto *GA2 = dyn_cast<GlobalAlias>(GV)) {
960 Check(Visited.insert(GA2).second, "Aliases cannot form a cycle", &GA);
961
962 Check(!GA2->isInterposable(),
963 "Alias cannot point to an interposable alias", &GA);
964 } else {
965 // Only continue verifying subexpressions of GlobalAliases.
966 // Do not recurse into global initializers.
967 return;
968 }
969 }
970
971 if (const auto *CE = dyn_cast<ConstantExpr>(&C))
972 visitConstantExprsRecursively(CE);
973
974 for (const Use &U : C.operands()) {
975 Value *V = &*U;
976 if (const auto *GA2 = dyn_cast<GlobalAlias>(V))
977 visitAliaseeSubExpr(Visited, GA, *GA2->getAliasee());
978 else if (const auto *C2 = dyn_cast<Constant>(V))
979 visitAliaseeSubExpr(Visited, GA, *C2);
980 }
981}
982
983void Verifier::visitGlobalAlias(const GlobalAlias &GA) {
985 "Alias should have private, internal, linkonce, weak, linkonce_odr, "
986 "weak_odr, external, or available_externally linkage!",
987 &GA);
988 const Constant *Aliasee = GA.getAliasee();
989 Check(Aliasee, "Aliasee cannot be NULL!", &GA);
990 Check(GA.getType() == Aliasee->getType(),
991 "Alias and aliasee types should match!", &GA);
992
993 Check(isa<GlobalValue>(Aliasee) || isa<ConstantExpr>(Aliasee),
994 "Aliasee should be either GlobalValue or ConstantExpr", &GA);
995
996 visitAliaseeSubExpr(GA, *Aliasee);
997
998 visitGlobalValue(GA);
999}
1000
1001void Verifier::visitGlobalIFunc(const GlobalIFunc &GI) {
1002 visitGlobalValue(GI);
1003
1005 GI.getAllMetadata(MDs);
1006 for (const auto &I : MDs) {
1007 CheckDI(I.first != LLVMContext::MD_dbg,
1008 "an ifunc may not have a !dbg attachment", &GI);
1009 Check(I.first != LLVMContext::MD_prof,
1010 "an ifunc may not have a !prof attachment", &GI);
1011 visitMDNode(*I.second, AreDebugLocsAllowed::No);
1012 }
1013
1015 "IFunc should have private, internal, linkonce, weak, linkonce_odr, "
1016 "weak_odr, or external linkage!",
1017 &GI);
1018 // Pierce through ConstantExprs and GlobalAliases and check that the resolver
1019 // is a Function definition.
1020 const Function *Resolver = GI.getResolverFunction();
1021 Check(Resolver, "IFunc must have a Function resolver", &GI);
1022 Check(!Resolver->isDeclarationForLinker(),
1023 "IFunc resolver must be a definition", &GI);
1024
1025 // Check that the immediate resolver operand (prior to any bitcasts) has the
1026 // correct type.
1027 const Type *ResolverTy = GI.getResolver()->getType();
1028
1030 "IFunc resolver must return a pointer", &GI);
1031
1032 Check(ResolverTy == PointerType::get(Context, GI.getAddressSpace()),
1033 "IFunc resolver has incorrect type", &GI);
1034}
1035
1036void Verifier::visitNamedMDNode(const NamedMDNode &NMD) {
1037 // There used to be various other llvm.dbg.* nodes, but we don't support
1038 // upgrading them and we want to reserve the namespace for future uses.
1039 if (NMD.getName().starts_with("llvm.dbg."))
1040 CheckDI(NMD.getName() == "llvm.dbg.cu",
1041 "unrecognized named metadata node in the llvm.dbg namespace", &NMD);
1042 for (const MDNode *MD : NMD.operands()) {
1043 if (NMD.getName() == "llvm.dbg.cu")
1044 CheckDI(MD && isa<DICompileUnit>(MD), "invalid compile unit", &NMD, MD);
1045
1046 if (!MD)
1047 continue;
1048
1049 visitMDNode(*MD, AreDebugLocsAllowed::Yes);
1050 }
1051}
1052
1053void Verifier::visitMDNode(const MDNode &MD, AreDebugLocsAllowed AllowLocs) {
1054 // Only visit each node once. Metadata can be mutually recursive, so this
1055 // avoids infinite recursion here, as well as being an optimization.
1056 if (!MDNodes.insert(&MD).second)
1057 return;
1058
1059 Check(&MD.getContext() == &Context,
1060 "MDNode context does not match Module context!", &MD);
1061
1062 switch (MD.getMetadataID()) {
1063 default:
1064 llvm_unreachable("Invalid MDNode subclass");
1065 case Metadata::MDTupleKind:
1066 break;
1067#define HANDLE_SPECIALIZED_MDNODE_LEAF(CLASS) \
1068 case Metadata::CLASS##Kind: \
1069 visit##CLASS(cast<CLASS>(MD)); \
1070 break;
1071#include "llvm/IR/Metadata.def"
1072 }
1073
1074 for (const Metadata *Op : MD.operands()) {
1075 if (!Op)
1076 continue;
1077 Check(!isa<LocalAsMetadata>(Op), "Invalid operand for global metadata!",
1078 &MD, Op);
1079 CheckDI(!isa<DILocation>(Op) || AllowLocs == AreDebugLocsAllowed::Yes,
1080 "DILocation not allowed within this metadata node", &MD, Op);
1081 if (auto *N = dyn_cast<MDNode>(Op)) {
1082 visitMDNode(*N, AllowLocs);
1083 continue;
1084 }
1085 if (auto *V = dyn_cast<ValueAsMetadata>(Op)) {
1086 visitValueAsMetadata(*V, nullptr);
1087 continue;
1088 }
1089 }
1090
1091 // Check llvm.loop.estimated_trip_count.
1092 if (MD.getNumOperands() > 0 &&
1094 Check(MD.getNumOperands() == 2, "Expected two operands", &MD);
1096 Check(Count && Count->getType()->isIntegerTy() &&
1097 cast<IntegerType>(Count->getType())->getBitWidth() <= 32,
1098 "Expected second operand to be an integer constant of type i32 or "
1099 "smaller",
1100 &MD);
1101 }
1102
1103 // Check these last, so we diagnose problems in operands first.
1104 Check(!MD.isTemporary(), "Expected no forward declarations!", &MD);
1105 Check(MD.isResolved(), "All nodes should be resolved!", &MD);
1106}
1107
1108void Verifier::visitValueAsMetadata(const ValueAsMetadata &MD, Function *F) {
1109 Check(MD.getValue(), "Expected valid value", &MD);
1110 Check(!MD.getValue()->getType()->isMetadataTy(),
1111 "Unexpected metadata round-trip through values", &MD, MD.getValue());
1112
1113 auto *L = dyn_cast<LocalAsMetadata>(&MD);
1114 if (!L)
1115 return;
1116
1117 Check(F, "function-local metadata used outside a function", L);
1118
1119 // If this was an instruction, bb, or argument, verify that it is in the
1120 // function that we expect.
1121 Function *ActualF = nullptr;
1122 if (Instruction *I = dyn_cast<Instruction>(L->getValue())) {
1123 Check(I->getParent(), "function-local metadata not in basic block", L, I);
1124 ActualF = I->getParent()->getParent();
1125 } else if (BasicBlock *BB = dyn_cast<BasicBlock>(L->getValue()))
1126 ActualF = BB->getParent();
1127 else if (Argument *A = dyn_cast<Argument>(L->getValue()))
1128 ActualF = A->getParent();
1129 assert(ActualF && "Unimplemented function local metadata case!");
1130
1131 Check(ActualF == F, "function-local metadata used in wrong function", L);
1132}
1133
1134void Verifier::visitDIArgList(const DIArgList &AL, Function *F) {
1135 for (const ValueAsMetadata *VAM : AL.getArgs())
1136 visitValueAsMetadata(*VAM, F);
1137}
1138
1139void Verifier::visitMetadataAsValue(const MetadataAsValue &MDV, Function *F) {
1140 Metadata *MD = MDV.getMetadata();
1141 if (auto *N = dyn_cast<MDNode>(MD)) {
1142 visitMDNode(*N, AreDebugLocsAllowed::No);
1143 return;
1144 }
1145
1146 // Only visit each node once. Metadata can be mutually recursive, so this
1147 // avoids infinite recursion here, as well as being an optimization.
1148 if (!MDNodes.insert(MD).second)
1149 return;
1150
1151 if (auto *V = dyn_cast<ValueAsMetadata>(MD))
1152 visitValueAsMetadata(*V, F);
1153
1154 if (auto *AL = dyn_cast<DIArgList>(MD))
1155 visitDIArgList(*AL, F);
1156}
1157
1158static bool isType(const Metadata *MD) { return !MD || isa<DIType>(MD); }
1159static bool isScope(const Metadata *MD) { return !MD || isa<DIScope>(MD); }
1160static bool isDINode(const Metadata *MD) { return !MD || isa<DINode>(MD); }
1161
1162void Verifier::visitDILocation(const DILocation &N) {
1163 CheckDI(N.getRawScope() && isa<DILocalScope>(N.getRawScope()),
1164 "location requires a valid scope", &N, N.getRawScope());
1165 if (auto *IA = N.getRawInlinedAt())
1166 CheckDI(isa<DILocation>(IA), "inlined-at should be a location", &N, IA);
1167 if (auto *SP = dyn_cast<DISubprogram>(N.getRawScope()))
1168 CheckDI(SP->isDefinition(), "scope points into the type hierarchy", &N);
1169}
1170
1171void Verifier::visitGenericDINode(const GenericDINode &N) {
1172 CheckDI(N.getTag(), "invalid tag", &N);
1173}
1174
1175void Verifier::visitDIScope(const DIScope &N) {
1176 if (auto *F = N.getRawFile())
1177 CheckDI(isa<DIFile>(F), "invalid file", &N, F);
1178}
1179
1180void Verifier::visitDISubrangeType(const DISubrangeType &N) {
1181 CheckDI(N.getTag() == dwarf::DW_TAG_subrange_type, "invalid tag", &N);
1182 auto *BaseType = N.getRawBaseType();
1183 CheckDI(!BaseType || isType(BaseType), "BaseType must be a type");
1184 auto *LBound = N.getRawLowerBound();
1185 CheckDI(!LBound || isa<ConstantAsMetadata>(LBound) ||
1186 isa<DIVariable>(LBound) || isa<DIExpression>(LBound),
1187 "LowerBound must be signed constant or DIVariable or DIExpression",
1188 &N);
1189 auto *UBound = N.getRawUpperBound();
1190 CheckDI(!UBound || isa<ConstantAsMetadata>(UBound) ||
1191 isa<DIVariable>(UBound) || isa<DIExpression>(UBound),
1192 "UpperBound must be signed constant or DIVariable or DIExpression",
1193 &N);
1194 auto *Stride = N.getRawStride();
1195 CheckDI(!Stride || isa<ConstantAsMetadata>(Stride) ||
1196 isa<DIVariable>(Stride) || isa<DIExpression>(Stride),
1197 "Stride must be signed constant or DIVariable or DIExpression", &N);
1198 auto *Bias = N.getRawBias();
1199 CheckDI(!Bias || isa<ConstantAsMetadata>(Bias) || isa<DIVariable>(Bias) ||
1200 isa<DIExpression>(Bias),
1201 "Bias must be signed constant or DIVariable or DIExpression", &N);
1202 // Subrange types currently only support constant size.
1203 auto *Size = N.getRawSizeInBits();
1205 "SizeInBits must be a constant");
1206}
1207
1208void Verifier::visitDISubrange(const DISubrange &N) {
1209 CheckDI(N.getTag() == dwarf::DW_TAG_subrange_type, "invalid tag", &N);
1210 CheckDI(!N.getRawCountNode() || !N.getRawUpperBound(),
1211 "Subrange can have any one of count or upperBound", &N);
1212 auto *CBound = N.getRawCountNode();
1213 CheckDI(!CBound || isa<ConstantAsMetadata>(CBound) ||
1214 isa<DIVariable>(CBound) || isa<DIExpression>(CBound),
1215 "Count must be signed constant or DIVariable or DIExpression", &N);
1216 auto Count = N.getCount();
1218 cast<ConstantInt *>(Count)->getSExtValue() >= -1,
1219 "invalid subrange count", &N);
1220 auto *LBound = N.getRawLowerBound();
1221 CheckDI(!LBound || isa<ConstantAsMetadata>(LBound) ||
1222 isa<DIVariable>(LBound) || isa<DIExpression>(LBound),
1223 "LowerBound must be signed constant or DIVariable or DIExpression",
1224 &N);
1225 auto *UBound = N.getRawUpperBound();
1226 CheckDI(!UBound || isa<ConstantAsMetadata>(UBound) ||
1227 isa<DIVariable>(UBound) || isa<DIExpression>(UBound),
1228 "UpperBound must be signed constant or DIVariable or DIExpression",
1229 &N);
1230 auto *Stride = N.getRawStride();
1231 CheckDI(!Stride || isa<ConstantAsMetadata>(Stride) ||
1232 isa<DIVariable>(Stride) || isa<DIExpression>(Stride),
1233 "Stride must be signed constant or DIVariable or DIExpression", &N);
1234}
1235
1236void Verifier::visitDIGenericSubrange(const DIGenericSubrange &N) {
1237 CheckDI(N.getTag() == dwarf::DW_TAG_generic_subrange, "invalid tag", &N);
1238 CheckDI(!N.getRawCountNode() || !N.getRawUpperBound(),
1239 "GenericSubrange can have any one of count or upperBound", &N);
1240 auto *CBound = N.getRawCountNode();
1241 CheckDI(!CBound || isa<DIVariable>(CBound) || isa<DIExpression>(CBound),
1242 "Count must be signed constant or DIVariable or DIExpression", &N);
1243 auto *LBound = N.getRawLowerBound();
1244 CheckDI(LBound, "GenericSubrange must contain lowerBound", &N);
1245 CheckDI(isa<DIVariable>(LBound) || isa<DIExpression>(LBound),
1246 "LowerBound must be signed constant or DIVariable or DIExpression",
1247 &N);
1248 auto *UBound = N.getRawUpperBound();
1249 CheckDI(!UBound || isa<DIVariable>(UBound) || isa<DIExpression>(UBound),
1250 "UpperBound must be signed constant or DIVariable or DIExpression",
1251 &N);
1252 auto *Stride = N.getRawStride();
1253 CheckDI(Stride, "GenericSubrange must contain stride", &N);
1254 CheckDI(isa<DIVariable>(Stride) || isa<DIExpression>(Stride),
1255 "Stride must be signed constant or DIVariable or DIExpression", &N);
1256}
1257
1258void Verifier::visitDIEnumerator(const DIEnumerator &N) {
1259 CheckDI(N.getTag() == dwarf::DW_TAG_enumerator, "invalid tag", &N);
1260}
1261
1262void Verifier::visitDIBasicType(const DIBasicType &N) {
1263 CheckDI(N.getTag() == dwarf::DW_TAG_base_type ||
1264 N.getTag() == dwarf::DW_TAG_unspecified_type ||
1265 N.getTag() == dwarf::DW_TAG_string_type,
1266 "invalid tag", &N);
1267 // Basic types currently only support constant size.
1268 auto *Size = N.getRawSizeInBits();
1270 "SizeInBits must be a constant");
1271}
1272
1273void Verifier::visitDIFixedPointType(const DIFixedPointType &N) {
1274 visitDIBasicType(N);
1275
1276 CheckDI(N.getTag() == dwarf::DW_TAG_base_type, "invalid tag", &N);
1277 CheckDI(N.getEncoding() == dwarf::DW_ATE_signed_fixed ||
1278 N.getEncoding() == dwarf::DW_ATE_unsigned_fixed,
1279 "invalid encoding", &N);
1283 "invalid kind", &N);
1285 N.getFactorRaw() == 0,
1286 "factor should be 0 for rationals", &N);
1288 (N.getNumeratorRaw() == 0 && N.getDenominatorRaw() == 0),
1289 "numerator and denominator should be 0 for non-rationals", &N);
1290}
1291
1292void Verifier::visitDIStringType(const DIStringType &N) {
1293 CheckDI(N.getTag() == dwarf::DW_TAG_string_type, "invalid tag", &N);
1294 CheckDI(!(N.isBigEndian() && N.isLittleEndian()), "has conflicting flags",
1295 &N);
1296}
1297
1298void Verifier::visitDIDerivedType(const DIDerivedType &N) {
1299 // Common scope checks.
1300 visitDIScope(N);
1301
1302 CheckDI(N.getTag() == dwarf::DW_TAG_typedef ||
1303 N.getTag() == dwarf::DW_TAG_pointer_type ||
1304 N.getTag() == dwarf::DW_TAG_ptr_to_member_type ||
1305 N.getTag() == dwarf::DW_TAG_reference_type ||
1306 N.getTag() == dwarf::DW_TAG_rvalue_reference_type ||
1307 N.getTag() == dwarf::DW_TAG_const_type ||
1308 N.getTag() == dwarf::DW_TAG_immutable_type ||
1309 N.getTag() == dwarf::DW_TAG_volatile_type ||
1310 N.getTag() == dwarf::DW_TAG_restrict_type ||
1311 N.getTag() == dwarf::DW_TAG_atomic_type ||
1312 N.getTag() == dwarf::DW_TAG_LLVM_ptrauth_type ||
1313 N.getTag() == dwarf::DW_TAG_member ||
1314 (N.getTag() == dwarf::DW_TAG_variable && N.isStaticMember()) ||
1315 N.getTag() == dwarf::DW_TAG_inheritance ||
1316 N.getTag() == dwarf::DW_TAG_friend ||
1317 N.getTag() == dwarf::DW_TAG_set_type ||
1318 N.getTag() == dwarf::DW_TAG_template_alias,
1319 "invalid tag", &N);
1320 if (N.getTag() == dwarf::DW_TAG_ptr_to_member_type) {
1321 CheckDI(isType(N.getRawExtraData()), "invalid pointer to member type", &N,
1322 N.getRawExtraData());
1323 }
1324
1325 if (N.getTag() == dwarf::DW_TAG_set_type) {
1326 if (auto *T = N.getRawBaseType()) {
1330 CheckDI(
1331 (Enum && Enum->getTag() == dwarf::DW_TAG_enumeration_type) ||
1332 (Subrange && Subrange->getTag() == dwarf::DW_TAG_subrange_type) ||
1333 (Basic && (Basic->getEncoding() == dwarf::DW_ATE_unsigned ||
1334 Basic->getEncoding() == dwarf::DW_ATE_signed ||
1335 Basic->getEncoding() == dwarf::DW_ATE_unsigned_char ||
1336 Basic->getEncoding() == dwarf::DW_ATE_signed_char ||
1337 Basic->getEncoding() == dwarf::DW_ATE_boolean)),
1338 "invalid set base type", &N, T);
1339 }
1340 }
1341
1342 CheckDI(isScope(N.getRawScope()), "invalid scope", &N, N.getRawScope());
1343 CheckDI(isType(N.getRawBaseType()), "invalid base type", &N,
1344 N.getRawBaseType());
1345
1346 if (N.getDWARFAddressSpace()) {
1347 CheckDI(N.getTag() == dwarf::DW_TAG_pointer_type ||
1348 N.getTag() == dwarf::DW_TAG_reference_type ||
1349 N.getTag() == dwarf::DW_TAG_rvalue_reference_type,
1350 "DWARF address space only applies to pointer or reference types",
1351 &N);
1352 }
1353
1354 auto *Size = N.getRawSizeInBits();
1357 "SizeInBits must be a constant or DIVariable or DIExpression");
1358}
1359
1360/// Detect mutually exclusive flags.
1361static bool hasConflictingReferenceFlags(unsigned Flags) {
1362 return ((Flags & DINode::FlagLValueReference) &&
1363 (Flags & DINode::FlagRValueReference)) ||
1364 ((Flags & DINode::FlagTypePassByValue) &&
1365 (Flags & DINode::FlagTypePassByReference));
1366}
1367
1368void Verifier::visitTemplateParams(const MDNode &N, const Metadata &RawParams) {
1369 auto *Params = dyn_cast<MDTuple>(&RawParams);
1370 CheckDI(Params, "invalid template params", &N, &RawParams);
1371 for (Metadata *Op : Params->operands()) {
1372 CheckDI(Op && isa<DITemplateParameter>(Op), "invalid template parameter",
1373 &N, Params, Op);
1374 }
1375}
1376
1377void Verifier::visitDICompositeType(const DICompositeType &N) {
1378 // Common scope checks.
1379 visitDIScope(N);
1380
1381 CheckDI(N.getTag() == dwarf::DW_TAG_array_type ||
1382 N.getTag() == dwarf::DW_TAG_structure_type ||
1383 N.getTag() == dwarf::DW_TAG_union_type ||
1384 N.getTag() == dwarf::DW_TAG_enumeration_type ||
1385 N.getTag() == dwarf::DW_TAG_class_type ||
1386 N.getTag() == dwarf::DW_TAG_variant_part ||
1387 N.getTag() == dwarf::DW_TAG_variant ||
1388 N.getTag() == dwarf::DW_TAG_namelist,
1389 "invalid tag", &N);
1390
1391 CheckDI(isScope(N.getRawScope()), "invalid scope", &N, N.getRawScope());
1392 CheckDI(isType(N.getRawBaseType()), "invalid base type", &N,
1393 N.getRawBaseType());
1394
1395 CheckDI(!N.getRawElements() || isa<MDTuple>(N.getRawElements()),
1396 "invalid composite elements", &N, N.getRawElements());
1397 CheckDI(isType(N.getRawVTableHolder()), "invalid vtable holder", &N,
1398 N.getRawVTableHolder());
1400 "invalid reference flags", &N);
1401 unsigned DIBlockByRefStruct = 1 << 4;
1402 CheckDI((N.getFlags() & DIBlockByRefStruct) == 0,
1403 "DIBlockByRefStruct on DICompositeType is no longer supported", &N);
1404 CheckDI(llvm::all_of(N.getElements(), [](const DINode *N) { return N; }),
1405 "DISubprogram contains null entry in `elements` field", &N);
1406
1407 if (N.isVector()) {
1408 const DINodeArray Elements = N.getElements();
1409 CheckDI(Elements.size() == 1 &&
1410 Elements[0]->getTag() == dwarf::DW_TAG_subrange_type,
1411 "invalid vector, expected one element of type subrange", &N);
1412 }
1413
1414 if (auto *Params = N.getRawTemplateParams())
1415 visitTemplateParams(N, *Params);
1416
1417 if (auto *D = N.getRawDiscriminator()) {
1418 CheckDI(isa<DIDerivedType>(D) && N.getTag() == dwarf::DW_TAG_variant_part,
1419 "discriminator can only appear on variant part");
1420 }
1421
1422 if (N.getRawDataLocation()) {
1423 CheckDI(N.getTag() == dwarf::DW_TAG_array_type,
1424 "dataLocation can only appear in array type");
1425 }
1426
1427 if (N.getRawAssociated()) {
1428 CheckDI(N.getTag() == dwarf::DW_TAG_array_type,
1429 "associated can only appear in array type");
1430 }
1431
1432 if (N.getRawAllocated()) {
1433 CheckDI(N.getTag() == dwarf::DW_TAG_array_type,
1434 "allocated can only appear in array type");
1435 }
1436
1437 if (N.getRawRank()) {
1438 CheckDI(N.getTag() == dwarf::DW_TAG_array_type,
1439 "rank can only appear in array type");
1440 }
1441
1442 if (N.getTag() == dwarf::DW_TAG_array_type) {
1443 CheckDI(N.getRawBaseType(), "array types must have a base type", &N);
1444 }
1445
1446 auto *Size = N.getRawSizeInBits();
1449 "SizeInBits must be a constant or DIVariable or DIExpression");
1450}
1451
1452void Verifier::visitDISubroutineType(const DISubroutineType &N) {
1453 CheckDI(N.getTag() == dwarf::DW_TAG_subroutine_type, "invalid tag", &N);
1454 if (auto *Types = N.getRawTypeArray()) {
1455 CheckDI(isa<MDTuple>(Types), "invalid composite elements", &N, Types);
1456 for (Metadata *Ty : N.getTypeArray()->operands()) {
1457 CheckDI(isType(Ty), "invalid subroutine type ref", &N, Types, Ty);
1458 }
1459 }
1461 "invalid reference flags", &N);
1462}
1463
1464void Verifier::visitDIFile(const DIFile &N) {
1465 CheckDI(N.getTag() == dwarf::DW_TAG_file_type, "invalid tag", &N);
1466 std::optional<DIFile::ChecksumInfo<StringRef>> Checksum = N.getChecksum();
1467 if (Checksum) {
1468 CheckDI(Checksum->Kind <= DIFile::ChecksumKind::CSK_Last,
1469 "invalid checksum kind", &N);
1470 size_t Size;
1471 switch (Checksum->Kind) {
1472 case DIFile::CSK_MD5:
1473 Size = 32;
1474 break;
1475 case DIFile::CSK_SHA1:
1476 Size = 40;
1477 break;
1478 case DIFile::CSK_SHA256:
1479 Size = 64;
1480 break;
1481 }
1482 CheckDI(Checksum->Value.size() == Size, "invalid checksum length", &N);
1483 CheckDI(Checksum->Value.find_if_not(llvm::isHexDigit) == StringRef::npos,
1484 "invalid checksum", &N);
1485 }
1486}
1487
1488void Verifier::visitDICompileUnit(const DICompileUnit &N) {
1489 CheckDI(N.isDistinct(), "compile units must be distinct", &N);
1490 CheckDI(N.getTag() == dwarf::DW_TAG_compile_unit, "invalid tag", &N);
1491
1492 // Don't bother verifying the compilation directory or producer string
1493 // as those could be empty.
1494 CheckDI(N.getRawFile() && isa<DIFile>(N.getRawFile()), "invalid file", &N,
1495 N.getRawFile());
1496 CheckDI(!N.getFile()->getFilename().empty(), "invalid filename", &N,
1497 N.getFile());
1498
1499 CheckDI((N.getEmissionKind() <= DICompileUnit::LastEmissionKind),
1500 "invalid emission kind", &N);
1501
1502 if (auto *Array = N.getRawEnumTypes()) {
1503 CheckDI(isa<MDTuple>(Array), "invalid enum list", &N, Array);
1504 for (Metadata *Op : N.getEnumTypes()->operands()) {
1506 CheckDI(Enum && Enum->getTag() == dwarf::DW_TAG_enumeration_type,
1507 "invalid enum type", &N, N.getEnumTypes(), Op);
1508 }
1509 }
1510 if (auto *Array = N.getRawRetainedTypes()) {
1511 CheckDI(isa<MDTuple>(Array), "invalid retained type list", &N, Array);
1512 for (Metadata *Op : N.getRetainedTypes()->operands()) {
1513 CheckDI(
1514 Op && (isa<DIType>(Op) || (isa<DISubprogram>(Op) &&
1515 !cast<DISubprogram>(Op)->isDefinition())),
1516 "invalid retained type", &N, Op);
1517 }
1518 }
1519 if (auto *Array = N.getRawGlobalVariables()) {
1520 CheckDI(isa<MDTuple>(Array), "invalid global variable list", &N, Array);
1521 for (Metadata *Op : N.getGlobalVariables()->operands()) {
1523 "invalid global variable ref", &N, Op);
1524 }
1525 }
1526 if (auto *Array = N.getRawImportedEntities()) {
1527 CheckDI(isa<MDTuple>(Array), "invalid imported entity list", &N, Array);
1528 for (Metadata *Op : N.getImportedEntities()->operands()) {
1529 CheckDI(Op && isa<DIImportedEntity>(Op), "invalid imported entity ref",
1530 &N, Op);
1531 }
1532 }
1533 if (auto *Array = N.getRawMacros()) {
1534 CheckDI(isa<MDTuple>(Array), "invalid macro list", &N, Array);
1535 for (Metadata *Op : N.getMacros()->operands()) {
1536 CheckDI(Op && isa<DIMacroNode>(Op), "invalid macro ref", &N, Op);
1537 }
1538 }
1539 CUVisited.insert(&N);
1540}
1541
1542void Verifier::visitDISubprogram(const DISubprogram &N) {
1543 CheckDI(N.getTag() == dwarf::DW_TAG_subprogram, "invalid tag", &N);
1544 CheckDI(isScope(N.getRawScope()), "invalid scope", &N, N.getRawScope());
1545 if (auto *F = N.getRawFile())
1546 CheckDI(isa<DIFile>(F), "invalid file", &N, F);
1547 else
1548 CheckDI(N.getLine() == 0, "line specified with no file", &N, N.getLine());
1549 if (auto *T = N.getRawType())
1550 CheckDI(isa<DISubroutineType>(T), "invalid subroutine type", &N, T);
1551 CheckDI(isType(N.getRawContainingType()), "invalid containing type", &N,
1552 N.getRawContainingType());
1553 if (auto *Params = N.getRawTemplateParams())
1554 visitTemplateParams(N, *Params);
1555 if (auto *S = N.getRawDeclaration())
1556 CheckDI(isa<DISubprogram>(S) && !cast<DISubprogram>(S)->isDefinition(),
1557 "invalid subprogram declaration", &N, S);
1558 if (auto *RawNode = N.getRawRetainedNodes()) {
1559 auto *Node = dyn_cast<MDTuple>(RawNode);
1560 CheckDI(Node, "invalid retained nodes list", &N, RawNode);
1561 for (Metadata *Op : Node->operands()) {
1564 "invalid retained nodes, expected DILocalVariable, DILabel or "
1565 "DIImportedEntity",
1566 &N, Node, Op);
1567 }
1568 }
1570 "invalid reference flags", &N);
1571
1572 auto *Unit = N.getRawUnit();
1573 if (N.isDefinition()) {
1574 // Subprogram definitions (not part of the type hierarchy).
1575 CheckDI(N.isDistinct(), "subprogram definitions must be distinct", &N);
1576 CheckDI(Unit, "subprogram definitions must have a compile unit", &N);
1577 CheckDI(isa<DICompileUnit>(Unit), "invalid unit type", &N, Unit);
1578 // There's no good way to cross the CU boundary to insert a nested
1579 // DISubprogram definition in one CU into a type defined in another CU.
1580 auto *CT = dyn_cast_or_null<DICompositeType>(N.getRawScope());
1581 if (CT && CT->getRawIdentifier() &&
1582 M.getContext().isODRUniquingDebugTypes())
1583 CheckDI(N.getDeclaration(),
1584 "definition subprograms cannot be nested within DICompositeType "
1585 "when enabling ODR",
1586 &N);
1587 } else {
1588 // Subprogram declarations (part of the type hierarchy).
1589 CheckDI(!Unit, "subprogram declarations must not have a compile unit", &N);
1590 CheckDI(!N.getRawDeclaration(),
1591 "subprogram declaration must not have a declaration field");
1592 }
1593
1594 if (auto *RawThrownTypes = N.getRawThrownTypes()) {
1595 auto *ThrownTypes = dyn_cast<MDTuple>(RawThrownTypes);
1596 CheckDI(ThrownTypes, "invalid thrown types list", &N, RawThrownTypes);
1597 for (Metadata *Op : ThrownTypes->operands())
1598 CheckDI(Op && isa<DIType>(Op), "invalid thrown type", &N, ThrownTypes,
1599 Op);
1600 }
1601
1602 if (N.areAllCallsDescribed())
1603 CheckDI(N.isDefinition(),
1604 "DIFlagAllCallsDescribed must be attached to a definition");
1605}
1606
1607void Verifier::visitDILexicalBlockBase(const DILexicalBlockBase &N) {
1608 CheckDI(N.getTag() == dwarf::DW_TAG_lexical_block, "invalid tag", &N);
1609 CheckDI(N.getRawScope() && isa<DILocalScope>(N.getRawScope()),
1610 "invalid local scope", &N, N.getRawScope());
1611 if (auto *SP = dyn_cast<DISubprogram>(N.getRawScope()))
1612 CheckDI(SP->isDefinition(), "scope points into the type hierarchy", &N);
1613}
1614
1615void Verifier::visitDILexicalBlock(const DILexicalBlock &N) {
1616 visitDILexicalBlockBase(N);
1617
1618 CheckDI(N.getLine() || !N.getColumn(),
1619 "cannot have column info without line info", &N);
1620}
1621
1622void Verifier::visitDILexicalBlockFile(const DILexicalBlockFile &N) {
1623 visitDILexicalBlockBase(N);
1624}
1625
1626void Verifier::visitDICommonBlock(const DICommonBlock &N) {
1627 CheckDI(N.getTag() == dwarf::DW_TAG_common_block, "invalid tag", &N);
1628 if (auto *S = N.getRawScope())
1629 CheckDI(isa<DIScope>(S), "invalid scope ref", &N, S);
1630 if (auto *S = N.getRawDecl())
1631 CheckDI(isa<DIGlobalVariable>(S), "invalid declaration", &N, S);
1632}
1633
1634void Verifier::visitDINamespace(const DINamespace &N) {
1635 CheckDI(N.getTag() == dwarf::DW_TAG_namespace, "invalid tag", &N);
1636 if (auto *S = N.getRawScope())
1637 CheckDI(isa<DIScope>(S), "invalid scope ref", &N, S);
1638}
1639
1640void Verifier::visitDIMacro(const DIMacro &N) {
1641 CheckDI(N.getMacinfoType() == dwarf::DW_MACINFO_define ||
1642 N.getMacinfoType() == dwarf::DW_MACINFO_undef,
1643 "invalid macinfo type", &N);
1644 CheckDI(!N.getName().empty(), "anonymous macro", &N);
1645 if (!N.getValue().empty()) {
1646 assert(N.getValue().data()[0] != ' ' && "Macro value has a space prefix");
1647 }
1648}
1649
1650void Verifier::visitDIMacroFile(const DIMacroFile &N) {
1651 CheckDI(N.getMacinfoType() == dwarf::DW_MACINFO_start_file,
1652 "invalid macinfo type", &N);
1653 if (auto *F = N.getRawFile())
1654 CheckDI(isa<DIFile>(F), "invalid file", &N, F);
1655
1656 if (auto *Array = N.getRawElements()) {
1657 CheckDI(isa<MDTuple>(Array), "invalid macro list", &N, Array);
1658 for (Metadata *Op : N.getElements()->operands()) {
1659 CheckDI(Op && isa<DIMacroNode>(Op), "invalid macro ref", &N, Op);
1660 }
1661 }
1662}
1663
1664void Verifier::visitDIModule(const DIModule &N) {
1665 CheckDI(N.getTag() == dwarf::DW_TAG_module, "invalid tag", &N);
1666 CheckDI(!N.getName().empty(), "anonymous module", &N);
1667}
1668
1669void Verifier::visitDITemplateParameter(const DITemplateParameter &N) {
1670 CheckDI(isType(N.getRawType()), "invalid type ref", &N, N.getRawType());
1671}
1672
1673void Verifier::visitDITemplateTypeParameter(const DITemplateTypeParameter &N) {
1674 visitDITemplateParameter(N);
1675
1676 CheckDI(N.getTag() == dwarf::DW_TAG_template_type_parameter, "invalid tag",
1677 &N);
1678}
1679
1680void Verifier::visitDITemplateValueParameter(
1681 const DITemplateValueParameter &N) {
1682 visitDITemplateParameter(N);
1683
1684 CheckDI(N.getTag() == dwarf::DW_TAG_template_value_parameter ||
1685 N.getTag() == dwarf::DW_TAG_GNU_template_template_param ||
1686 N.getTag() == dwarf::DW_TAG_GNU_template_parameter_pack,
1687 "invalid tag", &N);
1688}
1689
1690void Verifier::visitDIVariable(const DIVariable &N) {
1691 if (auto *S = N.getRawScope())
1692 CheckDI(isa<DIScope>(S), "invalid scope", &N, S);
1693 if (auto *F = N.getRawFile())
1694 CheckDI(isa<DIFile>(F), "invalid file", &N, F);
1695}
1696
1697void Verifier::visitDIGlobalVariable(const DIGlobalVariable &N) {
1698 // Checks common to all variables.
1699 visitDIVariable(N);
1700
1701 CheckDI(N.getTag() == dwarf::DW_TAG_variable, "invalid tag", &N);
1702 CheckDI(isType(N.getRawType()), "invalid type ref", &N, N.getRawType());
1703 // Check only if the global variable is not an extern
1704 if (N.isDefinition())
1705 CheckDI(N.getType(), "missing global variable type", &N);
1706 if (auto *Member = N.getRawStaticDataMemberDeclaration()) {
1708 "invalid static data member declaration", &N, Member);
1709 }
1710}
1711
1712void Verifier::visitDILocalVariable(const DILocalVariable &N) {
1713 // Checks common to all variables.
1714 visitDIVariable(N);
1715
1716 CheckDI(isType(N.getRawType()), "invalid type ref", &N, N.getRawType());
1717 CheckDI(N.getTag() == dwarf::DW_TAG_variable, "invalid tag", &N);
1718 CheckDI(N.getRawScope() && isa<DILocalScope>(N.getRawScope()),
1719 "local variable requires a valid scope", &N, N.getRawScope());
1720 if (auto Ty = N.getType())
1721 CheckDI(!isa<DISubroutineType>(Ty), "invalid type", &N, N.getType());
1722}
1723
1724void Verifier::visitDIAssignID(const DIAssignID &N) {
1725 CheckDI(!N.getNumOperands(), "DIAssignID has no arguments", &N);
1726 CheckDI(N.isDistinct(), "DIAssignID must be distinct", &N);
1727}
1728
1729void Verifier::visitDILabel(const DILabel &N) {
1730 if (auto *S = N.getRawScope())
1731 CheckDI(isa<DIScope>(S), "invalid scope", &N, S);
1732 if (auto *F = N.getRawFile())
1733 CheckDI(isa<DIFile>(F), "invalid file", &N, F);
1734
1735 CheckDI(N.getTag() == dwarf::DW_TAG_label, "invalid tag", &N);
1736 CheckDI(N.getRawScope() && isa<DILocalScope>(N.getRawScope()),
1737 "label requires a valid scope", &N, N.getRawScope());
1738}
1739
1740void Verifier::visitDIExpression(const DIExpression &N) {
1741 CheckDI(N.isValid(), "invalid expression", &N);
1742}
1743
1744void Verifier::visitDIGlobalVariableExpression(
1745 const DIGlobalVariableExpression &GVE) {
1746 CheckDI(GVE.getVariable(), "missing variable");
1747 if (auto *Var = GVE.getVariable())
1748 visitDIGlobalVariable(*Var);
1749 if (auto *Expr = GVE.getExpression()) {
1750 visitDIExpression(*Expr);
1751 if (auto Fragment = Expr->getFragmentInfo())
1752 verifyFragmentExpression(*GVE.getVariable(), *Fragment, &GVE);
1753 }
1754}
1755
1756void Verifier::visitDIObjCProperty(const DIObjCProperty &N) {
1757 CheckDI(N.getTag() == dwarf::DW_TAG_APPLE_property, "invalid tag", &N);
1758 if (auto *T = N.getRawType())
1759 CheckDI(isType(T), "invalid type ref", &N, T);
1760 if (auto *F = N.getRawFile())
1761 CheckDI(isa<DIFile>(F), "invalid file", &N, F);
1762}
1763
1764void Verifier::visitDIImportedEntity(const DIImportedEntity &N) {
1765 CheckDI(N.getTag() == dwarf::DW_TAG_imported_module ||
1766 N.getTag() == dwarf::DW_TAG_imported_declaration,
1767 "invalid tag", &N);
1768 if (auto *S = N.getRawScope())
1769 CheckDI(isa<DIScope>(S), "invalid scope for imported entity", &N, S);
1770 CheckDI(isDINode(N.getRawEntity()), "invalid imported entity", &N,
1771 N.getRawEntity());
1772}
1773
1774void Verifier::visitComdat(const Comdat &C) {
1775 // In COFF the Module is invalid if the GlobalValue has private linkage.
1776 // Entities with private linkage don't have entries in the symbol table.
1777 if (TT.isOSBinFormatCOFF())
1778 if (const GlobalValue *GV = M.getNamedValue(C.getName()))
1779 Check(!GV->hasPrivateLinkage(), "comdat global value has private linkage",
1780 GV);
1781}
1782
1783void Verifier::visitModuleIdents() {
1784 const NamedMDNode *Idents = M.getNamedMetadata("llvm.ident");
1785 if (!Idents)
1786 return;
1787
1788 // llvm.ident takes a list of metadata entry. Each entry has only one string.
1789 // Scan each llvm.ident entry and make sure that this requirement is met.
1790 for (const MDNode *N : Idents->operands()) {
1791 Check(N->getNumOperands() == 1,
1792 "incorrect number of operands in llvm.ident metadata", N);
1793 Check(dyn_cast_or_null<MDString>(N->getOperand(0)),
1794 ("invalid value for llvm.ident metadata entry operand"
1795 "(the operand should be a string)"),
1796 N->getOperand(0));
1797 }
1798}
1799
1800void Verifier::visitModuleCommandLines() {
1801 const NamedMDNode *CommandLines = M.getNamedMetadata("llvm.commandline");
1802 if (!CommandLines)
1803 return;
1804
1805 // llvm.commandline takes a list of metadata entry. Each entry has only one
1806 // string. Scan each llvm.commandline entry and make sure that this
1807 // requirement is met.
1808 for (const MDNode *N : CommandLines->operands()) {
1809 Check(N->getNumOperands() == 1,
1810 "incorrect number of operands in llvm.commandline metadata", N);
1811 Check(dyn_cast_or_null<MDString>(N->getOperand(0)),
1812 ("invalid value for llvm.commandline metadata entry operand"
1813 "(the operand should be a string)"),
1814 N->getOperand(0));
1815 }
1816}
1817
1818void Verifier::visitModuleFlags() {
1819 const NamedMDNode *Flags = M.getModuleFlagsMetadata();
1820 if (!Flags) return;
1821
1822 // Scan each flag, and track the flags and requirements.
1823 DenseMap<const MDString*, const MDNode*> SeenIDs;
1824 SmallVector<const MDNode*, 16> Requirements;
1825 uint64_t PAuthABIPlatform = -1;
1826 uint64_t PAuthABIVersion = -1;
1827 for (const MDNode *MDN : Flags->operands()) {
1828 visitModuleFlag(MDN, SeenIDs, Requirements);
1829 if (MDN->getNumOperands() != 3)
1830 continue;
1831 if (const auto *FlagName = dyn_cast_or_null<MDString>(MDN->getOperand(1))) {
1832 if (FlagName->getString() == "aarch64-elf-pauthabi-platform") {
1833 if (const auto *PAP =
1835 PAuthABIPlatform = PAP->getZExtValue();
1836 } else if (FlagName->getString() == "aarch64-elf-pauthabi-version") {
1837 if (const auto *PAV =
1839 PAuthABIVersion = PAV->getZExtValue();
1840 }
1841 }
1842 }
1843
1844 if ((PAuthABIPlatform == uint64_t(-1)) != (PAuthABIVersion == uint64_t(-1)))
1845 CheckFailed("either both or no 'aarch64-elf-pauthabi-platform' and "
1846 "'aarch64-elf-pauthabi-version' module flags must be present");
1847
1848 // Validate that the requirements in the module are valid.
1849 for (const MDNode *Requirement : Requirements) {
1850 const MDString *Flag = cast<MDString>(Requirement->getOperand(0));
1851 const Metadata *ReqValue = Requirement->getOperand(1);
1852
1853 const MDNode *Op = SeenIDs.lookup(Flag);
1854 if (!Op) {
1855 CheckFailed("invalid requirement on flag, flag is not present in module",
1856 Flag);
1857 continue;
1858 }
1859
1860 if (Op->getOperand(2) != ReqValue) {
1861 CheckFailed(("invalid requirement on flag, "
1862 "flag does not have the required value"),
1863 Flag);
1864 continue;
1865 }
1866 }
1867}
1868
1869void
1870Verifier::visitModuleFlag(const MDNode *Op,
1871 DenseMap<const MDString *, const MDNode *> &SeenIDs,
1872 SmallVectorImpl<const MDNode *> &Requirements) {
1873 // Each module flag should have three arguments, the merge behavior (a
1874 // constant int), the flag ID (an MDString), and the value.
1875 Check(Op->getNumOperands() == 3,
1876 "incorrect number of operands in module flag", Op);
1877 Module::ModFlagBehavior MFB;
1878 if (!Module::isValidModFlagBehavior(Op->getOperand(0), MFB)) {
1880 "invalid behavior operand in module flag (expected constant integer)",
1881 Op->getOperand(0));
1882 Check(false,
1883 "invalid behavior operand in module flag (unexpected constant)",
1884 Op->getOperand(0));
1885 }
1886 MDString *ID = dyn_cast_or_null<MDString>(Op->getOperand(1));
1887 Check(ID, "invalid ID operand in module flag (expected metadata string)",
1888 Op->getOperand(1));
1889
1890 // Check the values for behaviors with additional requirements.
1891 switch (MFB) {
1892 case Module::Error:
1893 case Module::Warning:
1894 case Module::Override:
1895 // These behavior types accept any value.
1896 break;
1897
1898 case Module::Min: {
1899 auto *V = mdconst::dyn_extract_or_null<ConstantInt>(Op->getOperand(2));
1900 Check(V && V->getValue().isNonNegative(),
1901 "invalid value for 'min' module flag (expected constant non-negative "
1902 "integer)",
1903 Op->getOperand(2));
1904 break;
1905 }
1906
1907 case Module::Max: {
1909 "invalid value for 'max' module flag (expected constant integer)",
1910 Op->getOperand(2));
1911 break;
1912 }
1913
1914 case Module::Require: {
1915 // The value should itself be an MDNode with two operands, a flag ID (an
1916 // MDString), and a value.
1917 MDNode *Value = dyn_cast<MDNode>(Op->getOperand(2));
1918 Check(Value && Value->getNumOperands() == 2,
1919 "invalid value for 'require' module flag (expected metadata pair)",
1920 Op->getOperand(2));
1921 Check(isa<MDString>(Value->getOperand(0)),
1922 ("invalid value for 'require' module flag "
1923 "(first value operand should be a string)"),
1924 Value->getOperand(0));
1925
1926 // Append it to the list of requirements, to check once all module flags are
1927 // scanned.
1928 Requirements.push_back(Value);
1929 break;
1930 }
1931
1932 case Module::Append:
1933 case Module::AppendUnique: {
1934 // These behavior types require the operand be an MDNode.
1935 Check(isa<MDNode>(Op->getOperand(2)),
1936 "invalid value for 'append'-type module flag "
1937 "(expected a metadata node)",
1938 Op->getOperand(2));
1939 break;
1940 }
1941 }
1942
1943 // Unless this is a "requires" flag, check the ID is unique.
1944 if (MFB != Module::Require) {
1945 bool Inserted = SeenIDs.insert(std::make_pair(ID, Op)).second;
1946 Check(Inserted,
1947 "module flag identifiers must be unique (or of 'require' type)", ID);
1948 }
1949
1950 if (ID->getString() == "wchar_size") {
1951 ConstantInt *Value
1953 Check(Value, "wchar_size metadata requires constant integer argument");
1954 }
1955
1956 if (ID->getString() == "Linker Options") {
1957 // If the llvm.linker.options named metadata exists, we assume that the
1958 // bitcode reader has upgraded the module flag. Otherwise the flag might
1959 // have been created by a client directly.
1960 Check(M.getNamedMetadata("llvm.linker.options"),
1961 "'Linker Options' named metadata no longer supported");
1962 }
1963
1964 if (ID->getString() == "SemanticInterposition") {
1965 ConstantInt *Value =
1967 Check(Value,
1968 "SemanticInterposition metadata requires constant integer argument");
1969 }
1970
1971 if (ID->getString() == "CG Profile") {
1972 for (const MDOperand &MDO : cast<MDNode>(Op->getOperand(2))->operands())
1973 visitModuleFlagCGProfileEntry(MDO);
1974 }
1975}
1976
1977void Verifier::visitModuleFlagCGProfileEntry(const MDOperand &MDO) {
1978 auto CheckFunction = [&](const MDOperand &FuncMDO) {
1979 if (!FuncMDO)
1980 return;
1981 auto F = dyn_cast<ValueAsMetadata>(FuncMDO);
1982 Check(F && isa<Function>(F->getValue()->stripPointerCasts()),
1983 "expected a Function or null", FuncMDO);
1984 };
1985 auto Node = dyn_cast_or_null<MDNode>(MDO);
1986 Check(Node && Node->getNumOperands() == 3, "expected a MDNode triple", MDO);
1987 CheckFunction(Node->getOperand(0));
1988 CheckFunction(Node->getOperand(1));
1989 auto Count = dyn_cast_or_null<ConstantAsMetadata>(Node->getOperand(2));
1990 Check(Count && Count->getType()->isIntegerTy(),
1991 "expected an integer constant", Node->getOperand(2));
1992}
1993
1994void Verifier::verifyAttributeTypes(AttributeSet Attrs, const Value *V) {
1995 for (Attribute A : Attrs) {
1996
1997 if (A.isStringAttribute()) {
1998#define GET_ATTR_NAMES
1999#define ATTRIBUTE_ENUM(ENUM_NAME, DISPLAY_NAME)
2000#define ATTRIBUTE_STRBOOL(ENUM_NAME, DISPLAY_NAME) \
2001 if (A.getKindAsString() == #DISPLAY_NAME) { \
2002 auto V = A.getValueAsString(); \
2003 if (!(V.empty() || V == "true" || V == "false")) \
2004 CheckFailed("invalid value for '" #DISPLAY_NAME "' attribute: " + V + \
2005 ""); \
2006 }
2007
2008#include "llvm/IR/Attributes.inc"
2009 continue;
2010 }
2011
2012 if (A.isIntAttribute() != Attribute::isIntAttrKind(A.getKindAsEnum())) {
2013 CheckFailed("Attribute '" + A.getAsString() + "' should have an Argument",
2014 V);
2015 return;
2016 }
2017 }
2018}
2019
2020// VerifyParameterAttrs - Check the given attributes for an argument or return
2021// value of the specified type. The value V is printed in error messages.
2022void Verifier::verifyParameterAttrs(AttributeSet Attrs, Type *Ty,
2023 const Value *V) {
2024 if (!Attrs.hasAttributes())
2025 return;
2026
2027 verifyAttributeTypes(Attrs, V);
2028
2029 for (Attribute Attr : Attrs)
2030 Check(Attr.isStringAttribute() ||
2031 Attribute::canUseAsParamAttr(Attr.getKindAsEnum()),
2032 "Attribute '" + Attr.getAsString() + "' does not apply to parameters",
2033 V);
2034
2035 if (Attrs.hasAttribute(Attribute::ImmArg)) {
2036 unsigned AttrCount =
2037 Attrs.getNumAttributes() - Attrs.hasAttribute(Attribute::Range);
2038 Check(AttrCount == 1,
2039 "Attribute 'immarg' is incompatible with other attributes except the "
2040 "'range' attribute",
2041 V);
2042 }
2043
2044 // Check for mutually incompatible attributes. Only inreg is compatible with
2045 // sret.
2046 unsigned AttrCount = 0;
2047 AttrCount += Attrs.hasAttribute(Attribute::ByVal);
2048 AttrCount += Attrs.hasAttribute(Attribute::InAlloca);
2049 AttrCount += Attrs.hasAttribute(Attribute::Preallocated);
2050 AttrCount += Attrs.hasAttribute(Attribute::StructRet) ||
2051 Attrs.hasAttribute(Attribute::InReg);
2052 AttrCount += Attrs.hasAttribute(Attribute::Nest);
2053 AttrCount += Attrs.hasAttribute(Attribute::ByRef);
2054 Check(AttrCount <= 1,
2055 "Attributes 'byval', 'inalloca', 'preallocated', 'inreg', 'nest', "
2056 "'byref', and 'sret' are incompatible!",
2057 V);
2058
2059 Check(!(Attrs.hasAttribute(Attribute::InAlloca) &&
2060 Attrs.hasAttribute(Attribute::ReadOnly)),
2061 "Attributes "
2062 "'inalloca and readonly' are incompatible!",
2063 V);
2064
2065 Check(!(Attrs.hasAttribute(Attribute::StructRet) &&
2066 Attrs.hasAttribute(Attribute::Returned)),
2067 "Attributes "
2068 "'sret and returned' are incompatible!",
2069 V);
2070
2071 Check(!(Attrs.hasAttribute(Attribute::ZExt) &&
2072 Attrs.hasAttribute(Attribute::SExt)),
2073 "Attributes "
2074 "'zeroext and signext' are incompatible!",
2075 V);
2076
2077 Check(!(Attrs.hasAttribute(Attribute::ReadNone) &&
2078 Attrs.hasAttribute(Attribute::ReadOnly)),
2079 "Attributes "
2080 "'readnone and readonly' are incompatible!",
2081 V);
2082
2083 Check(!(Attrs.hasAttribute(Attribute::ReadNone) &&
2084 Attrs.hasAttribute(Attribute::WriteOnly)),
2085 "Attributes "
2086 "'readnone and writeonly' are incompatible!",
2087 V);
2088
2089 Check(!(Attrs.hasAttribute(Attribute::ReadOnly) &&
2090 Attrs.hasAttribute(Attribute::WriteOnly)),
2091 "Attributes "
2092 "'readonly and writeonly' are incompatible!",
2093 V);
2094
2095 Check(!(Attrs.hasAttribute(Attribute::NoInline) &&
2096 Attrs.hasAttribute(Attribute::AlwaysInline)),
2097 "Attributes "
2098 "'noinline and alwaysinline' are incompatible!",
2099 V);
2100
2101 Check(!(Attrs.hasAttribute(Attribute::Writable) &&
2102 Attrs.hasAttribute(Attribute::ReadNone)),
2103 "Attributes writable and readnone are incompatible!", V);
2104
2105 Check(!(Attrs.hasAttribute(Attribute::Writable) &&
2106 Attrs.hasAttribute(Attribute::ReadOnly)),
2107 "Attributes writable and readonly are incompatible!", V);
2108
2109 AttributeMask IncompatibleAttrs = AttributeFuncs::typeIncompatible(Ty, Attrs);
2110 for (Attribute Attr : Attrs) {
2111 if (!Attr.isStringAttribute() &&
2112 IncompatibleAttrs.contains(Attr.getKindAsEnum())) {
2113 CheckFailed("Attribute '" + Attr.getAsString() +
2114 "' applied to incompatible type!", V);
2115 return;
2116 }
2117 }
2118
2119 if (isa<PointerType>(Ty)) {
2120 if (Attrs.hasAttribute(Attribute::Alignment)) {
2121 Align AttrAlign = Attrs.getAlignment().valueOrOne();
2122 Check(AttrAlign.value() <= Value::MaximumAlignment,
2123 "huge alignment values are unsupported", V);
2124 }
2125 if (Attrs.hasAttribute(Attribute::ByVal)) {
2126 Type *ByValTy = Attrs.getByValType();
2127 SmallPtrSet<Type *, 4> Visited;
2128 Check(ByValTy->isSized(&Visited),
2129 "Attribute 'byval' does not support unsized types!", V);
2130 // Check if it is or contains a target extension type that disallows being
2131 // used on the stack.
2133 "'byval' argument has illegal target extension type", V);
2134 Check(DL.getTypeAllocSize(ByValTy).getKnownMinValue() < (1ULL << 32),
2135 "huge 'byval' arguments are unsupported", V);
2136 }
2137 if (Attrs.hasAttribute(Attribute::ByRef)) {
2138 SmallPtrSet<Type *, 4> Visited;
2139 Check(Attrs.getByRefType()->isSized(&Visited),
2140 "Attribute 'byref' does not support unsized types!", V);
2141 Check(DL.getTypeAllocSize(Attrs.getByRefType()).getKnownMinValue() <
2142 (1ULL << 32),
2143 "huge 'byref' arguments are unsupported", V);
2144 }
2145 if (Attrs.hasAttribute(Attribute::InAlloca)) {
2146 SmallPtrSet<Type *, 4> Visited;
2147 Check(Attrs.getInAllocaType()->isSized(&Visited),
2148 "Attribute 'inalloca' does not support unsized types!", V);
2149 Check(DL.getTypeAllocSize(Attrs.getInAllocaType()).getKnownMinValue() <
2150 (1ULL << 32),
2151 "huge 'inalloca' arguments are unsupported", V);
2152 }
2153 if (Attrs.hasAttribute(Attribute::Preallocated)) {
2154 SmallPtrSet<Type *, 4> Visited;
2155 Check(Attrs.getPreallocatedType()->isSized(&Visited),
2156 "Attribute 'preallocated' does not support unsized types!", V);
2157 Check(
2158 DL.getTypeAllocSize(Attrs.getPreallocatedType()).getKnownMinValue() <
2159 (1ULL << 32),
2160 "huge 'preallocated' arguments are unsupported", V);
2161 }
2162 }
2163
2164 if (Attrs.hasAttribute(Attribute::Initializes)) {
2165 auto Inits = Attrs.getAttribute(Attribute::Initializes).getInitializes();
2166 Check(!Inits.empty(), "Attribute 'initializes' does not support empty list",
2167 V);
2169 "Attribute 'initializes' does not support unordered ranges", V);
2170 }
2171
2172 if (Attrs.hasAttribute(Attribute::NoFPClass)) {
2173 uint64_t Val = Attrs.getAttribute(Attribute::NoFPClass).getValueAsInt();
2174 Check(Val != 0, "Attribute 'nofpclass' must have at least one test bit set",
2175 V);
2176 Check((Val & ~static_cast<unsigned>(fcAllFlags)) == 0,
2177 "Invalid value for 'nofpclass' test mask", V);
2178 }
2179 if (Attrs.hasAttribute(Attribute::Range)) {
2180 const ConstantRange &CR =
2181 Attrs.getAttribute(Attribute::Range).getValueAsConstantRange();
2183 "Range bit width must match type bit width!", V);
2184 }
2185}
2186
2187void Verifier::checkUnsignedBaseTenFuncAttr(AttributeList Attrs, StringRef Attr,
2188 const Value *V) {
2189 if (Attrs.hasFnAttr(Attr)) {
2190 StringRef S = Attrs.getFnAttr(Attr).getValueAsString();
2191 unsigned N;
2192 if (S.getAsInteger(10, N))
2193 CheckFailed("\"" + Attr + "\" takes an unsigned integer: " + S, V);
2194 }
2195}
2196
2197// Check parameter attributes against a function type.
2198// The value V is printed in error messages.
2199void Verifier::verifyFunctionAttrs(FunctionType *FT, AttributeList Attrs,
2200 const Value *V, bool IsIntrinsic,
2201 bool IsInlineAsm) {
2202 if (Attrs.isEmpty())
2203 return;
2204
2205 if (AttributeListsVisited.insert(Attrs.getRawPointer()).second) {
2206 Check(Attrs.hasParentContext(Context),
2207 "Attribute list does not match Module context!", &Attrs, V);
2208 for (const auto &AttrSet : Attrs) {
2209 Check(!AttrSet.hasAttributes() || AttrSet.hasParentContext(Context),
2210 "Attribute set does not match Module context!", &AttrSet, V);
2211 for (const auto &A : AttrSet) {
2212 Check(A.hasParentContext(Context),
2213 "Attribute does not match Module context!", &A, V);
2214 }
2215 }
2216 }
2217
2218 bool SawNest = false;
2219 bool SawReturned = false;
2220 bool SawSRet = false;
2221 bool SawSwiftSelf = false;
2222 bool SawSwiftAsync = false;
2223 bool SawSwiftError = false;
2224
2225 // Verify return value attributes.
2226 AttributeSet RetAttrs = Attrs.getRetAttrs();
2227 for (Attribute RetAttr : RetAttrs)
2228 Check(RetAttr.isStringAttribute() ||
2229 Attribute::canUseAsRetAttr(RetAttr.getKindAsEnum()),
2230 "Attribute '" + RetAttr.getAsString() +
2231 "' does not apply to function return values",
2232 V);
2233
2234 unsigned MaxParameterWidth = 0;
2235 auto GetMaxParameterWidth = [&MaxParameterWidth](Type *Ty) {
2236 if (Ty->isVectorTy()) {
2237 if (auto *VT = dyn_cast<FixedVectorType>(Ty)) {
2238 unsigned Size = VT->getPrimitiveSizeInBits().getFixedValue();
2239 if (Size > MaxParameterWidth)
2240 MaxParameterWidth = Size;
2241 }
2242 }
2243 };
2244 GetMaxParameterWidth(FT->getReturnType());
2245 verifyParameterAttrs(RetAttrs, FT->getReturnType(), V);
2246
2247 // Verify parameter attributes.
2248 for (unsigned i = 0, e = FT->getNumParams(); i != e; ++i) {
2249 Type *Ty = FT->getParamType(i);
2250 AttributeSet ArgAttrs = Attrs.getParamAttrs(i);
2251
2252 if (!IsIntrinsic) {
2253 Check(!ArgAttrs.hasAttribute(Attribute::ImmArg),
2254 "immarg attribute only applies to intrinsics", V);
2255 if (!IsInlineAsm)
2256 Check(!ArgAttrs.hasAttribute(Attribute::ElementType),
2257 "Attribute 'elementtype' can only be applied to intrinsics"
2258 " and inline asm.",
2259 V);
2260 }
2261
2262 verifyParameterAttrs(ArgAttrs, Ty, V);
2263 GetMaxParameterWidth(Ty);
2264
2265 if (ArgAttrs.hasAttribute(Attribute::Nest)) {
2266 Check(!SawNest, "More than one parameter has attribute nest!", V);
2267 SawNest = true;
2268 }
2269
2270 if (ArgAttrs.hasAttribute(Attribute::Returned)) {
2271 Check(!SawReturned, "More than one parameter has attribute returned!", V);
2272 Check(Ty->canLosslesslyBitCastTo(FT->getReturnType()),
2273 "Incompatible argument and return types for 'returned' attribute",
2274 V);
2275 SawReturned = true;
2276 }
2277
2278 if (ArgAttrs.hasAttribute(Attribute::StructRet)) {
2279 Check(!SawSRet, "Cannot have multiple 'sret' parameters!", V);
2280 Check(i == 0 || i == 1,
2281 "Attribute 'sret' is not on first or second parameter!", V);
2282 SawSRet = true;
2283 }
2284
2285 if (ArgAttrs.hasAttribute(Attribute::SwiftSelf)) {
2286 Check(!SawSwiftSelf, "Cannot have multiple 'swiftself' parameters!", V);
2287 SawSwiftSelf = true;
2288 }
2289
2290 if (ArgAttrs.hasAttribute(Attribute::SwiftAsync)) {
2291 Check(!SawSwiftAsync, "Cannot have multiple 'swiftasync' parameters!", V);
2292 SawSwiftAsync = true;
2293 }
2294
2295 if (ArgAttrs.hasAttribute(Attribute::SwiftError)) {
2296 Check(!SawSwiftError, "Cannot have multiple 'swifterror' parameters!", V);
2297 SawSwiftError = true;
2298 }
2299
2300 if (ArgAttrs.hasAttribute(Attribute::InAlloca)) {
2301 Check(i == FT->getNumParams() - 1,
2302 "inalloca isn't on the last parameter!", V);
2303 }
2304 }
2305
2306 if (!Attrs.hasFnAttrs())
2307 return;
2308
2309 verifyAttributeTypes(Attrs.getFnAttrs(), V);
2310 for (Attribute FnAttr : Attrs.getFnAttrs())
2311 Check(FnAttr.isStringAttribute() ||
2312 Attribute::canUseAsFnAttr(FnAttr.getKindAsEnum()),
2313 "Attribute '" + FnAttr.getAsString() +
2314 "' does not apply to functions!",
2315 V);
2316
2317 Check(!(Attrs.hasFnAttr(Attribute::NoInline) &&
2318 Attrs.hasFnAttr(Attribute::AlwaysInline)),
2319 "Attributes 'noinline and alwaysinline' are incompatible!", V);
2320
2321 if (Attrs.hasFnAttr(Attribute::OptimizeNone)) {
2322 Check(Attrs.hasFnAttr(Attribute::NoInline),
2323 "Attribute 'optnone' requires 'noinline'!", V);
2324
2325 Check(!Attrs.hasFnAttr(Attribute::OptimizeForSize),
2326 "Attributes 'optsize and optnone' are incompatible!", V);
2327
2328 Check(!Attrs.hasFnAttr(Attribute::MinSize),
2329 "Attributes 'minsize and optnone' are incompatible!", V);
2330
2331 Check(!Attrs.hasFnAttr(Attribute::OptimizeForDebugging),
2332 "Attributes 'optdebug and optnone' are incompatible!", V);
2333 }
2334
2335 Check(!(Attrs.hasFnAttr(Attribute::SanitizeRealtime) &&
2336 Attrs.hasFnAttr(Attribute::SanitizeRealtimeBlocking)),
2337 "Attributes "
2338 "'sanitize_realtime and sanitize_realtime_blocking' are incompatible!",
2339 V);
2340
2341 if (Attrs.hasFnAttr(Attribute::OptimizeForDebugging)) {
2342 Check(!Attrs.hasFnAttr(Attribute::OptimizeForSize),
2343 "Attributes 'optsize and optdebug' are incompatible!", V);
2344
2345 Check(!Attrs.hasFnAttr(Attribute::MinSize),
2346 "Attributes 'minsize and optdebug' are incompatible!", V);
2347 }
2348
2349 Check(!Attrs.hasAttrSomewhere(Attribute::Writable) ||
2350 isModSet(Attrs.getMemoryEffects().getModRef(IRMemLocation::ArgMem)),
2351 "Attribute writable and memory without argmem: write are incompatible!",
2352 V);
2353
2354 if (Attrs.hasFnAttr("aarch64_pstate_sm_enabled")) {
2355 Check(!Attrs.hasFnAttr("aarch64_pstate_sm_compatible"),
2356 "Attributes 'aarch64_pstate_sm_enabled and "
2357 "aarch64_pstate_sm_compatible' are incompatible!",
2358 V);
2359 }
2360
2361 Check((Attrs.hasFnAttr("aarch64_new_za") + Attrs.hasFnAttr("aarch64_in_za") +
2362 Attrs.hasFnAttr("aarch64_inout_za") +
2363 Attrs.hasFnAttr("aarch64_out_za") +
2364 Attrs.hasFnAttr("aarch64_preserves_za") +
2365 Attrs.hasFnAttr("aarch64_za_state_agnostic")) <= 1,
2366 "Attributes 'aarch64_new_za', 'aarch64_in_za', 'aarch64_out_za', "
2367 "'aarch64_inout_za', 'aarch64_preserves_za' and "
2368 "'aarch64_za_state_agnostic' are mutually exclusive",
2369 V);
2370
2371 Check((Attrs.hasFnAttr("aarch64_new_zt0") +
2372 Attrs.hasFnAttr("aarch64_in_zt0") +
2373 Attrs.hasFnAttr("aarch64_inout_zt0") +
2374 Attrs.hasFnAttr("aarch64_out_zt0") +
2375 Attrs.hasFnAttr("aarch64_preserves_zt0") +
2376 Attrs.hasFnAttr("aarch64_za_state_agnostic")) <= 1,
2377 "Attributes 'aarch64_new_zt0', 'aarch64_in_zt0', 'aarch64_out_zt0', "
2378 "'aarch64_inout_zt0', 'aarch64_preserves_zt0' and "
2379 "'aarch64_za_state_agnostic' are mutually exclusive",
2380 V);
2381
2382 if (Attrs.hasFnAttr(Attribute::JumpTable)) {
2383 const GlobalValue *GV = cast<GlobalValue>(V);
2385 "Attribute 'jumptable' requires 'unnamed_addr'", V);
2386 }
2387
2388 if (auto Args = Attrs.getFnAttrs().getAllocSizeArgs()) {
2389 auto CheckParam = [&](StringRef Name, unsigned ParamNo) {
2390 if (ParamNo >= FT->getNumParams()) {
2391 CheckFailed("'allocsize' " + Name + " argument is out of bounds", V);
2392 return false;
2393 }
2394
2395 if (!FT->getParamType(ParamNo)->isIntegerTy()) {
2396 CheckFailed("'allocsize' " + Name +
2397 " argument must refer to an integer parameter",
2398 V);
2399 return false;
2400 }
2401
2402 return true;
2403 };
2404
2405 if (!CheckParam("element size", Args->first))
2406 return;
2407
2408 if (Args->second && !CheckParam("number of elements", *Args->second))
2409 return;
2410 }
2411
2412 if (Attrs.hasFnAttr(Attribute::AllocKind)) {
2413 AllocFnKind K = Attrs.getAllocKind();
2415 K & (AllocFnKind::Alloc | AllocFnKind::Realloc | AllocFnKind::Free);
2416 if (!is_contained(
2417 {AllocFnKind::Alloc, AllocFnKind::Realloc, AllocFnKind::Free},
2418 Type))
2419 CheckFailed(
2420 "'allockind()' requires exactly one of alloc, realloc, and free");
2421 if ((Type == AllocFnKind::Free) &&
2422 ((K & (AllocFnKind::Uninitialized | AllocFnKind::Zeroed |
2423 AllocFnKind::Aligned)) != AllocFnKind::Unknown))
2424 CheckFailed("'allockind(\"free\")' doesn't allow uninitialized, zeroed, "
2425 "or aligned modifiers.");
2426 AllocFnKind ZeroedUninit = AllocFnKind::Uninitialized | AllocFnKind::Zeroed;
2427 if ((K & ZeroedUninit) == ZeroedUninit)
2428 CheckFailed("'allockind()' can't be both zeroed and uninitialized");
2429 }
2430
2431 if (Attribute A = Attrs.getFnAttr("alloc-variant-zeroed"); A.isValid()) {
2432 StringRef S = A.getValueAsString();
2433 Check(!S.empty(), "'alloc-variant-zeroed' must not be empty");
2434 Function *Variant = M.getFunction(S);
2435 if (Variant) {
2436 Attribute Family = Attrs.getFnAttr("alloc-family");
2437 Attribute VariantFamily = Variant->getFnAttribute("alloc-family");
2438 if (Family.isValid())
2439 Check(VariantFamily.isValid() &&
2440 VariantFamily.getValueAsString() == Family.getValueAsString(),
2441 "'alloc-variant-zeroed' must name a function belonging to the "
2442 "same 'alloc-family'");
2443
2444 Check(Variant->hasFnAttribute(Attribute::AllocKind) &&
2445 (Variant->getFnAttribute(Attribute::AllocKind).getAllocKind() &
2446 AllocFnKind::Zeroed) != AllocFnKind::Unknown,
2447 "'alloc-variant-zeroed' must name a function with "
2448 "'allockind(\"zeroed\")'");
2449
2450 Check(FT == Variant->getFunctionType(),
2451 "'alloc-variant-zeroed' must name a function with the same "
2452 "signature");
2453 }
2454 }
2455
2456 if (Attrs.hasFnAttr(Attribute::VScaleRange)) {
2457 unsigned VScaleMin = Attrs.getFnAttrs().getVScaleRangeMin();
2458 if (VScaleMin == 0)
2459 CheckFailed("'vscale_range' minimum must be greater than 0", V);
2460 else if (!isPowerOf2_32(VScaleMin))
2461 CheckFailed("'vscale_range' minimum must be power-of-two value", V);
2462 std::optional<unsigned> VScaleMax = Attrs.getFnAttrs().getVScaleRangeMax();
2463 if (VScaleMax && VScaleMin > VScaleMax)
2464 CheckFailed("'vscale_range' minimum cannot be greater than maximum", V);
2465 else if (VScaleMax && !isPowerOf2_32(*VScaleMax))
2466 CheckFailed("'vscale_range' maximum must be power-of-two value", V);
2467 }
2468
2469 if (Attribute FPAttr = Attrs.getFnAttr("frame-pointer"); FPAttr.isValid()) {
2470 StringRef FP = FPAttr.getValueAsString();
2471 if (FP != "all" && FP != "non-leaf" && FP != "none" && FP != "reserved")
2472 CheckFailed("invalid value for 'frame-pointer' attribute: " + FP, V);
2473 }
2474
2475 checkUnsignedBaseTenFuncAttr(Attrs, "patchable-function-prefix", V);
2476 checkUnsignedBaseTenFuncAttr(Attrs, "patchable-function-entry", V);
2477 if (Attrs.hasFnAttr("patchable-function-entry-section"))
2478 Check(!Attrs.getFnAttr("patchable-function-entry-section")
2479 .getValueAsString()
2480 .empty(),
2481 "\"patchable-function-entry-section\" must not be empty");
2482 checkUnsignedBaseTenFuncAttr(Attrs, "warn-stack-size", V);
2483
2484 if (auto A = Attrs.getFnAttr("sign-return-address"); A.isValid()) {
2485 StringRef S = A.getValueAsString();
2486 if (S != "none" && S != "all" && S != "non-leaf")
2487 CheckFailed("invalid value for 'sign-return-address' attribute: " + S, V);
2488 }
2489
2490 if (auto A = Attrs.getFnAttr("sign-return-address-key"); A.isValid()) {
2491 StringRef S = A.getValueAsString();
2492 if (S != "a_key" && S != "b_key")
2493 CheckFailed("invalid value for 'sign-return-address-key' attribute: " + S,
2494 V);
2495 if (auto AA = Attrs.getFnAttr("sign-return-address"); !AA.isValid()) {
2496 CheckFailed(
2497 "'sign-return-address-key' present without `sign-return-address`");
2498 }
2499 }
2500
2501 if (auto A = Attrs.getFnAttr("branch-target-enforcement"); A.isValid()) {
2502 StringRef S = A.getValueAsString();
2503 if (S != "" && S != "true" && S != "false")
2504 CheckFailed(
2505 "invalid value for 'branch-target-enforcement' attribute: " + S, V);
2506 }
2507
2508 if (auto A = Attrs.getFnAttr("branch-protection-pauth-lr"); A.isValid()) {
2509 StringRef S = A.getValueAsString();
2510 if (S != "" && S != "true" && S != "false")
2511 CheckFailed(
2512 "invalid value for 'branch-protection-pauth-lr' attribute: " + S, V);
2513 }
2514
2515 if (auto A = Attrs.getFnAttr("guarded-control-stack"); A.isValid()) {
2516 StringRef S = A.getValueAsString();
2517 if (S != "" && S != "true" && S != "false")
2518 CheckFailed("invalid value for 'guarded-control-stack' attribute: " + S,
2519 V);
2520 }
2521
2522 if (auto A = Attrs.getFnAttr("vector-function-abi-variant"); A.isValid()) {
2523 StringRef S = A.getValueAsString();
2524 const std::optional<VFInfo> Info = VFABI::tryDemangleForVFABI(S, FT);
2525 if (!Info)
2526 CheckFailed("invalid name for a VFABI variant: " + S, V);
2527 }
2528
2529 if (auto A = Attrs.getFnAttr("denormal-fp-math"); A.isValid()) {
2530 StringRef S = A.getValueAsString();
2532 CheckFailed("invalid value for 'denormal-fp-math' attribute: " + S, V);
2533 }
2534
2535 if (auto A = Attrs.getFnAttr("denormal-fp-math-f32"); A.isValid()) {
2536 StringRef S = A.getValueAsString();
2538 CheckFailed("invalid value for 'denormal-fp-math-f32' attribute: " + S,
2539 V);
2540 }
2541}
2542void Verifier::verifyUnknownProfileMetadata(MDNode *MD) {
2543 Check(MD->getNumOperands() == 2,
2544 "'unknown' !prof should have a single additional operand", MD);
2545 auto *PassName = dyn_cast<MDString>(MD->getOperand(1));
2546 Check(PassName != nullptr,
2547 "'unknown' !prof should have an additional operand of type "
2548 "string");
2549 Check(!PassName->getString().empty(),
2550 "the 'unknown' !prof operand should not be an empty string");
2551}
2552
2553void Verifier::verifyFunctionMetadata(
2554 ArrayRef<std::pair<unsigned, MDNode *>> MDs) {
2555 for (const auto &Pair : MDs) {
2556 if (Pair.first == LLVMContext::MD_prof) {
2557 MDNode *MD = Pair.second;
2558 Check(MD->getNumOperands() >= 2,
2559 "!prof annotations should have no less than 2 operands", MD);
2560 // We may have functions that are synthesized by the compiler, e.g. in
2561 // WPD, that we can't currently determine the entry count.
2562 if (MD->getOperand(0).equalsStr(
2564 verifyUnknownProfileMetadata(MD);
2565 continue;
2566 }
2567
2568 // Check first operand.
2569 Check(MD->getOperand(0) != nullptr, "first operand should not be null",
2570 MD);
2572 "expected string with name of the !prof annotation", MD);
2573 MDString *MDS = cast<MDString>(MD->getOperand(0));
2574 StringRef ProfName = MDS->getString();
2577 "first operand should be 'function_entry_count'"
2578 " or 'synthetic_function_entry_count'",
2579 MD);
2580
2581 // Check second operand.
2582 Check(MD->getOperand(1) != nullptr, "second operand should not be null",
2583 MD);
2585 "expected integer argument to function_entry_count", MD);
2586 } else if (Pair.first == LLVMContext::MD_kcfi_type) {
2587 MDNode *MD = Pair.second;
2588 Check(MD->getNumOperands() == 1,
2589 "!kcfi_type must have exactly one operand", MD);
2590 Check(MD->getOperand(0) != nullptr, "!kcfi_type operand must not be null",
2591 MD);
2593 "expected a constant operand for !kcfi_type", MD);
2594 Constant *C = cast<ConstantAsMetadata>(MD->getOperand(0))->getValue();
2595 Check(isa<ConstantInt>(C) && isa<IntegerType>(C->getType()),
2596 "expected a constant integer operand for !kcfi_type", MD);
2598 "expected a 32-bit integer constant operand for !kcfi_type", MD);
2599 }
2600 }
2601}
2602
2603void Verifier::visitConstantExprsRecursively(const Constant *EntryC) {
2604 if (!ConstantExprVisited.insert(EntryC).second)
2605 return;
2606
2608 Stack.push_back(EntryC);
2609
2610 while (!Stack.empty()) {
2611 const Constant *C = Stack.pop_back_val();
2612
2613 // Check this constant expression.
2614 if (const auto *CE = dyn_cast<ConstantExpr>(C))
2615 visitConstantExpr(CE);
2616
2617 if (const auto *CPA = dyn_cast<ConstantPtrAuth>(C))
2618 visitConstantPtrAuth(CPA);
2619
2620 if (const auto *GV = dyn_cast<GlobalValue>(C)) {
2621 // Global Values get visited separately, but we do need to make sure
2622 // that the global value is in the correct module
2623 Check(GV->getParent() == &M, "Referencing global in another module!",
2624 EntryC, &M, GV, GV->getParent());
2625 continue;
2626 }
2627
2628 // Visit all sub-expressions.
2629 for (const Use &U : C->operands()) {
2630 const auto *OpC = dyn_cast<Constant>(U);
2631 if (!OpC)
2632 continue;
2633 if (!ConstantExprVisited.insert(OpC).second)
2634 continue;
2635 Stack.push_back(OpC);
2636 }
2637 }
2638}
2639
2640void Verifier::visitConstantExpr(const ConstantExpr *CE) {
2641 if (CE->getOpcode() == Instruction::BitCast)
2642 Check(CastInst::castIsValid(Instruction::BitCast, CE->getOperand(0),
2643 CE->getType()),
2644 "Invalid bitcast", CE);
2645 else if (CE->getOpcode() == Instruction::PtrToAddr)
2646 checkPtrToAddr(CE->getOperand(0)->getType(), CE->getType(), *CE);
2647}
2648
2649void Verifier::visitConstantPtrAuth(const ConstantPtrAuth *CPA) {
2650 Check(CPA->getPointer()->getType()->isPointerTy(),
2651 "signed ptrauth constant base pointer must have pointer type");
2652
2653 Check(CPA->getType() == CPA->getPointer()->getType(),
2654 "signed ptrauth constant must have same type as its base pointer");
2655
2656 Check(CPA->getKey()->getBitWidth() == 32,
2657 "signed ptrauth constant key must be i32 constant integer");
2658
2660 "signed ptrauth constant address discriminator must be a pointer");
2661
2662 Check(CPA->getDiscriminator()->getBitWidth() == 64,
2663 "signed ptrauth constant discriminator must be i64 constant integer");
2664}
2665
2666bool Verifier::verifyAttributeCount(AttributeList Attrs, unsigned Params) {
2667 // There shouldn't be more attribute sets than there are parameters plus the
2668 // function and return value.
2669 return Attrs.getNumAttrSets() <= Params + 2;
2670}
2671
2672void Verifier::verifyInlineAsmCall(const CallBase &Call) {
2673 const InlineAsm *IA = cast<InlineAsm>(Call.getCalledOperand());
2674 unsigned ArgNo = 0;
2675 unsigned LabelNo = 0;
2676 for (const InlineAsm::ConstraintInfo &CI : IA->ParseConstraints()) {
2677 if (CI.Type == InlineAsm::isLabel) {
2678 ++LabelNo;
2679 continue;
2680 }
2681
2682 // Only deal with constraints that correspond to call arguments.
2683 if (!CI.hasArg())
2684 continue;
2685
2686 if (CI.isIndirect) {
2687 const Value *Arg = Call.getArgOperand(ArgNo);
2688 Check(Arg->getType()->isPointerTy(),
2689 "Operand for indirect constraint must have pointer type", &Call);
2690
2692 "Operand for indirect constraint must have elementtype attribute",
2693 &Call);
2694 } else {
2695 Check(!Call.paramHasAttr(ArgNo, Attribute::ElementType),
2696 "Elementtype attribute can only be applied for indirect "
2697 "constraints",
2698 &Call);
2699 }
2700
2701 ArgNo++;
2702 }
2703
2704 if (auto *CallBr = dyn_cast<CallBrInst>(&Call)) {
2705 Check(LabelNo == CallBr->getNumIndirectDests(),
2706 "Number of label constraints does not match number of callbr dests",
2707 &Call);
2708 } else {
2709 Check(LabelNo == 0, "Label constraints can only be used with callbr",
2710 &Call);
2711 }
2712}
2713
2714/// Verify that statepoint intrinsic is well formed.
2715void Verifier::verifyStatepoint(const CallBase &Call) {
2716 assert(Call.getIntrinsicID() == Intrinsic::experimental_gc_statepoint);
2717
2720 "gc.statepoint must read and write all memory to preserve "
2721 "reordering restrictions required by safepoint semantics",
2722 Call);
2723
2724 const int64_t NumPatchBytes =
2725 cast<ConstantInt>(Call.getArgOperand(1))->getSExtValue();
2726 assert(isInt<32>(NumPatchBytes) && "NumPatchBytesV is an i32!");
2727 Check(NumPatchBytes >= 0,
2728 "gc.statepoint number of patchable bytes must be "
2729 "positive",
2730 Call);
2731
2732 Type *TargetElemType = Call.getParamElementType(2);
2733 Check(TargetElemType,
2734 "gc.statepoint callee argument must have elementtype attribute", Call);
2735 FunctionType *TargetFuncType = dyn_cast<FunctionType>(TargetElemType);
2736 Check(TargetFuncType,
2737 "gc.statepoint callee elementtype must be function type", Call);
2738
2739 const int NumCallArgs = cast<ConstantInt>(Call.getArgOperand(3))->getZExtValue();
2740 Check(NumCallArgs >= 0,
2741 "gc.statepoint number of arguments to underlying call "
2742 "must be positive",
2743 Call);
2744 const int NumParams = (int)TargetFuncType->getNumParams();
2745 if (TargetFuncType->isVarArg()) {
2746 Check(NumCallArgs >= NumParams,
2747 "gc.statepoint mismatch in number of vararg call args", Call);
2748
2749 // TODO: Remove this limitation
2750 Check(TargetFuncType->getReturnType()->isVoidTy(),
2751 "gc.statepoint doesn't support wrapping non-void "
2752 "vararg functions yet",
2753 Call);
2754 } else
2755 Check(NumCallArgs == NumParams,
2756 "gc.statepoint mismatch in number of call args", Call);
2757
2758 const uint64_t Flags
2759 = cast<ConstantInt>(Call.getArgOperand(4))->getZExtValue();
2760 Check((Flags & ~(uint64_t)StatepointFlags::MaskAll) == 0,
2761 "unknown flag used in gc.statepoint flags argument", Call);
2762
2763 // Verify that the types of the call parameter arguments match
2764 // the type of the wrapped callee.
2765 AttributeList Attrs = Call.getAttributes();
2766 for (int i = 0; i < NumParams; i++) {
2767 Type *ParamType = TargetFuncType->getParamType(i);
2768 Type *ArgType = Call.getArgOperand(5 + i)->getType();
2769 Check(ArgType == ParamType,
2770 "gc.statepoint call argument does not match wrapped "
2771 "function type",
2772 Call);
2773
2774 if (TargetFuncType->isVarArg()) {
2775 AttributeSet ArgAttrs = Attrs.getParamAttrs(5 + i);
2776 Check(!ArgAttrs.hasAttribute(Attribute::StructRet),
2777 "Attribute 'sret' cannot be used for vararg call arguments!", Call);
2778 }
2779 }
2780
2781 const int EndCallArgsInx = 4 + NumCallArgs;
2782
2783 const Value *NumTransitionArgsV = Call.getArgOperand(EndCallArgsInx + 1);
2784 Check(isa<ConstantInt>(NumTransitionArgsV),
2785 "gc.statepoint number of transition arguments "
2786 "must be constant integer",
2787 Call);
2788 const int NumTransitionArgs =
2789 cast<ConstantInt>(NumTransitionArgsV)->getZExtValue();
2790 Check(NumTransitionArgs == 0,
2791 "gc.statepoint w/inline transition bundle is deprecated", Call);
2792 const int EndTransitionArgsInx = EndCallArgsInx + 1 + NumTransitionArgs;
2793
2794 const Value *NumDeoptArgsV = Call.getArgOperand(EndTransitionArgsInx + 1);
2795 Check(isa<ConstantInt>(NumDeoptArgsV),
2796 "gc.statepoint number of deoptimization arguments "
2797 "must be constant integer",
2798 Call);
2799 const int NumDeoptArgs = cast<ConstantInt>(NumDeoptArgsV)->getZExtValue();
2800 Check(NumDeoptArgs == 0,
2801 "gc.statepoint w/inline deopt operands is deprecated", Call);
2802
2803 const int ExpectedNumArgs = 7 + NumCallArgs;
2804 Check(ExpectedNumArgs == (int)Call.arg_size(),
2805 "gc.statepoint too many arguments", Call);
2806
2807 // Check that the only uses of this gc.statepoint are gc.result or
2808 // gc.relocate calls which are tied to this statepoint and thus part
2809 // of the same statepoint sequence
2810 for (const User *U : Call.users()) {
2811 const CallInst *UserCall = dyn_cast<const CallInst>(U);
2812 Check(UserCall, "illegal use of statepoint token", Call, U);
2813 if (!UserCall)
2814 continue;
2815 Check(isa<GCRelocateInst>(UserCall) || isa<GCResultInst>(UserCall),
2816 "gc.result or gc.relocate are the only value uses "
2817 "of a gc.statepoint",
2818 Call, U);
2819 if (isa<GCResultInst>(UserCall)) {
2820 Check(UserCall->getArgOperand(0) == &Call,
2821 "gc.result connected to wrong gc.statepoint", Call, UserCall);
2822 } else if (isa<GCRelocateInst>(Call)) {
2823 Check(UserCall->getArgOperand(0) == &Call,
2824 "gc.relocate connected to wrong gc.statepoint", Call, UserCall);
2825 }
2826 }
2827
2828 // Note: It is legal for a single derived pointer to be listed multiple
2829 // times. It's non-optimal, but it is legal. It can also happen after
2830 // insertion if we strip a bitcast away.
2831 // Note: It is really tempting to check that each base is relocated and
2832 // that a derived pointer is never reused as a base pointer. This turns
2833 // out to be problematic since optimizations run after safepoint insertion
2834 // can recognize equality properties that the insertion logic doesn't know
2835 // about. See example statepoint.ll in the verifier subdirectory
2836}
2837
2838void Verifier::verifyFrameRecoverIndices() {
2839 for (auto &Counts : FrameEscapeInfo) {
2840 Function *F = Counts.first;
2841 unsigned EscapedObjectCount = Counts.second.first;
2842 unsigned MaxRecoveredIndex = Counts.second.second;
2843 Check(MaxRecoveredIndex <= EscapedObjectCount,
2844 "all indices passed to llvm.localrecover must be less than the "
2845 "number of arguments passed to llvm.localescape in the parent "
2846 "function",
2847 F);
2848 }
2849}
2850
2851static Instruction *getSuccPad(Instruction *Terminator) {
2852 BasicBlock *UnwindDest;
2853 if (auto *II = dyn_cast<InvokeInst>(Terminator))
2854 UnwindDest = II->getUnwindDest();
2855 else if (auto *CSI = dyn_cast<CatchSwitchInst>(Terminator))
2856 UnwindDest = CSI->getUnwindDest();
2857 else
2858 UnwindDest = cast<CleanupReturnInst>(Terminator)->getUnwindDest();
2859 return &*UnwindDest->getFirstNonPHIIt();
2860}
2861
2862void Verifier::verifySiblingFuncletUnwinds() {
2863 llvm::TimeTraceScope timeScope("Verifier verify sibling funclet unwinds");
2864 SmallPtrSet<Instruction *, 8> Visited;
2865 SmallPtrSet<Instruction *, 8> Active;
2866 for (const auto &Pair : SiblingFuncletInfo) {
2867 Instruction *PredPad = Pair.first;
2868 if (Visited.count(PredPad))
2869 continue;
2870 Active.insert(PredPad);
2871 Instruction *Terminator = Pair.second;
2872 do {
2873 Instruction *SuccPad = getSuccPad(Terminator);
2874 if (Active.count(SuccPad)) {
2875 // Found a cycle; report error
2876 Instruction *CyclePad = SuccPad;
2877 SmallVector<Instruction *, 8> CycleNodes;
2878 do {
2879 CycleNodes.push_back(CyclePad);
2880 Instruction *CycleTerminator = SiblingFuncletInfo[CyclePad];
2881 if (CycleTerminator != CyclePad)
2882 CycleNodes.push_back(CycleTerminator);
2883 CyclePad = getSuccPad(CycleTerminator);
2884 } while (CyclePad != SuccPad);
2885 Check(false, "EH pads can't handle each other's exceptions",
2886 ArrayRef<Instruction *>(CycleNodes));
2887 }
2888 // Don't re-walk a node we've already checked
2889 if (!Visited.insert(SuccPad).second)
2890 break;
2891 // Walk to this successor if it has a map entry.
2892 PredPad = SuccPad;
2893 auto TermI = SiblingFuncletInfo.find(PredPad);
2894 if (TermI == SiblingFuncletInfo.end())
2895 break;
2896 Terminator = TermI->second;
2897 Active.insert(PredPad);
2898 } while (true);
2899 // Each node only has one successor, so we've walked all the active
2900 // nodes' successors.
2901 Active.clear();
2902 }
2903}
2904
2905// visitFunction - Verify that a function is ok.
2906//
2907void Verifier::visitFunction(const Function &F) {
2908 visitGlobalValue(F);
2909
2910 // Check function arguments.
2911 FunctionType *FT = F.getFunctionType();
2912 unsigned NumArgs = F.arg_size();
2913
2914 Check(&Context == &F.getContext(),
2915 "Function context does not match Module context!", &F);
2916
2917 Check(!F.hasCommonLinkage(), "Functions may not have common linkage", &F);
2918 Check(FT->getNumParams() == NumArgs,
2919 "# formal arguments must match # of arguments for function type!", &F,
2920 FT);
2921 Check(F.getReturnType()->isFirstClassType() ||
2922 F.getReturnType()->isVoidTy() || F.getReturnType()->isStructTy(),
2923 "Functions cannot return aggregate values!", &F);
2924
2925 Check(!F.hasStructRetAttr() || F.getReturnType()->isVoidTy(),
2926 "Invalid struct return type!", &F);
2927
2928 if (MaybeAlign A = F.getAlign()) {
2929 Check(A->value() <= Value::MaximumAlignment,
2930 "huge alignment values are unsupported", &F);
2931 }
2932
2933 AttributeList Attrs = F.getAttributes();
2934
2935 Check(verifyAttributeCount(Attrs, FT->getNumParams()),
2936 "Attribute after last parameter!", &F);
2937
2938 bool IsIntrinsic = F.isIntrinsic();
2939
2940 // Check function attributes.
2941 verifyFunctionAttrs(FT, Attrs, &F, IsIntrinsic, /* IsInlineAsm */ false);
2942
2943 // On function declarations/definitions, we do not support the builtin
2944 // attribute. We do not check this in VerifyFunctionAttrs since that is
2945 // checking for Attributes that can/can not ever be on functions.
2946 Check(!Attrs.hasFnAttr(Attribute::Builtin),
2947 "Attribute 'builtin' can only be applied to a callsite.", &F);
2948
2949 Check(!Attrs.hasAttrSomewhere(Attribute::ElementType),
2950 "Attribute 'elementtype' can only be applied to a callsite.", &F);
2951
2952 Check(!Attrs.hasFnAttr("aarch64_zt0_undef"),
2953 "Attribute 'aarch64_zt0_undef' can only be applied to a callsite.");
2954
2955 if (Attrs.hasFnAttr(Attribute::Naked))
2956 for (const Argument &Arg : F.args())
2957 Check(Arg.use_empty(), "cannot use argument of naked function", &Arg);
2958
2959 // Check that this function meets the restrictions on this calling convention.
2960 // Sometimes varargs is used for perfectly forwarding thunks, so some of these
2961 // restrictions can be lifted.
2962 switch (F.getCallingConv()) {
2963 default:
2964 case CallingConv::C:
2965 break;
2966 case CallingConv::X86_INTR: {
2967 Check(F.arg_empty() || Attrs.hasParamAttr(0, Attribute::ByVal),
2968 "Calling convention parameter requires byval", &F);
2969 break;
2970 }
2971 case CallingConv::AMDGPU_KERNEL:
2972 case CallingConv::SPIR_KERNEL:
2973 case CallingConv::AMDGPU_CS_Chain:
2974 case CallingConv::AMDGPU_CS_ChainPreserve:
2975 Check(F.getReturnType()->isVoidTy(),
2976 "Calling convention requires void return type", &F);
2977 [[fallthrough]];
2978 case CallingConv::AMDGPU_VS:
2979 case CallingConv::AMDGPU_HS:
2980 case CallingConv::AMDGPU_GS:
2981 case CallingConv::AMDGPU_PS:
2982 case CallingConv::AMDGPU_CS:
2983 Check(!F.hasStructRetAttr(), "Calling convention does not allow sret", &F);
2984 if (F.getCallingConv() != CallingConv::SPIR_KERNEL) {
2985 const unsigned StackAS = DL.getAllocaAddrSpace();
2986 unsigned i = 0;
2987 for (const Argument &Arg : F.args()) {
2988 Check(!Attrs.hasParamAttr(i, Attribute::ByVal),
2989 "Calling convention disallows byval", &F);
2990 Check(!Attrs.hasParamAttr(i, Attribute::Preallocated),
2991 "Calling convention disallows preallocated", &F);
2992 Check(!Attrs.hasParamAttr(i, Attribute::InAlloca),
2993 "Calling convention disallows inalloca", &F);
2994
2995 if (Attrs.hasParamAttr(i, Attribute::ByRef)) {
2996 // FIXME: Should also disallow LDS and GDS, but we don't have the enum
2997 // value here.
2998 Check(Arg.getType()->getPointerAddressSpace() != StackAS,
2999 "Calling convention disallows stack byref", &F);
3000 }
3001
3002 ++i;
3003 }
3004 }
3005
3006 [[fallthrough]];
3007 case CallingConv::Fast:
3008 case CallingConv::Cold:
3009 case CallingConv::Intel_OCL_BI:
3010 case CallingConv::PTX_Kernel:
3011 case CallingConv::PTX_Device:
3012 Check(!F.isVarArg(),
3013 "Calling convention does not support varargs or "
3014 "perfect forwarding!",
3015 &F);
3016 break;
3017 case CallingConv::AMDGPU_Gfx_WholeWave:
3018 Check(!F.arg_empty() && F.arg_begin()->getType()->isIntegerTy(1),
3019 "Calling convention requires first argument to be i1", &F);
3020 Check(!F.arg_begin()->hasInRegAttr(),
3021 "Calling convention requires first argument to not be inreg", &F);
3022 Check(!F.isVarArg(),
3023 "Calling convention does not support varargs or "
3024 "perfect forwarding!",
3025 &F);
3026 break;
3027 }
3028
3029 // Check that the argument values match the function type for this function...
3030 unsigned i = 0;
3031 for (const Argument &Arg : F.args()) {
3032 Check(Arg.getType() == FT->getParamType(i),
3033 "Argument value does not match function argument type!", &Arg,
3034 FT->getParamType(i));
3035 Check(Arg.getType()->isFirstClassType(),
3036 "Function arguments must have first-class types!", &Arg);
3037 if (!IsIntrinsic) {
3038 Check(!Arg.getType()->isMetadataTy(),
3039 "Function takes metadata but isn't an intrinsic", &Arg, &F);
3040 Check(!Arg.getType()->isTokenLikeTy(),
3041 "Function takes token but isn't an intrinsic", &Arg, &F);
3042 Check(!Arg.getType()->isX86_AMXTy(),
3043 "Function takes x86_amx but isn't an intrinsic", &Arg, &F);
3044 }
3045
3046 // Check that swifterror argument is only used by loads and stores.
3047 if (Attrs.hasParamAttr(i, Attribute::SwiftError)) {
3048 verifySwiftErrorValue(&Arg);
3049 }
3050 ++i;
3051 }
3052
3053 if (!IsIntrinsic) {
3054 Check(!F.getReturnType()->isTokenLikeTy(),
3055 "Function returns a token but isn't an intrinsic", &F);
3056 Check(!F.getReturnType()->isX86_AMXTy(),
3057 "Function returns a x86_amx but isn't an intrinsic", &F);
3058 }
3059
3060 // Get the function metadata attachments.
3062 F.getAllMetadata(MDs);
3063 assert(F.hasMetadata() != MDs.empty() && "Bit out-of-sync");
3064 verifyFunctionMetadata(MDs);
3065
3066 // Check validity of the personality function
3067 if (F.hasPersonalityFn()) {
3068 auto *Per = dyn_cast<Function>(F.getPersonalityFn()->stripPointerCasts());
3069 if (Per)
3070 Check(Per->getParent() == F.getParent(),
3071 "Referencing personality function in another module!", &F,
3072 F.getParent(), Per, Per->getParent());
3073 }
3074
3075 // EH funclet coloring can be expensive, recompute on-demand
3076 BlockEHFuncletColors.clear();
3077
3078 if (F.isMaterializable()) {
3079 // Function has a body somewhere we can't see.
3080 Check(MDs.empty(), "unmaterialized function cannot have metadata", &F,
3081 MDs.empty() ? nullptr : MDs.front().second);
3082 } else if (F.isDeclaration()) {
3083 for (const auto &I : MDs) {
3084 // This is used for call site debug information.
3085 CheckDI(I.first != LLVMContext::MD_dbg ||
3086 !cast<DISubprogram>(I.second)->isDistinct(),
3087 "function declaration may only have a unique !dbg attachment",
3088 &F);
3089 Check(I.first != LLVMContext::MD_prof,
3090 "function declaration may not have a !prof attachment", &F);
3091
3092 // Verify the metadata itself.
3093 visitMDNode(*I.second, AreDebugLocsAllowed::Yes);
3094 }
3095 Check(!F.hasPersonalityFn(),
3096 "Function declaration shouldn't have a personality routine", &F);
3097 } else {
3098 // Verify that this function (which has a body) is not named "llvm.*". It
3099 // is not legal to define intrinsics.
3100 Check(!IsIntrinsic, "llvm intrinsics cannot be defined!", &F);
3101
3102 // Check the entry node
3103 const BasicBlock *Entry = &F.getEntryBlock();
3104 Check(pred_empty(Entry),
3105 "Entry block to function must not have predecessors!", Entry);
3106
3107 // The address of the entry block cannot be taken, unless it is dead.
3108 if (Entry->hasAddressTaken()) {
3109 Check(!BlockAddress::lookup(Entry)->isConstantUsed(),
3110 "blockaddress may not be used with the entry block!", Entry);
3111 }
3112
3113 unsigned NumDebugAttachments = 0, NumProfAttachments = 0,
3114 NumKCFIAttachments = 0;
3115 // Visit metadata attachments.
3116 for (const auto &I : MDs) {
3117 // Verify that the attachment is legal.
3118 auto AllowLocs = AreDebugLocsAllowed::No;
3119 switch (I.first) {
3120 default:
3121 break;
3122 case LLVMContext::MD_dbg: {
3123 ++NumDebugAttachments;
3124 CheckDI(NumDebugAttachments == 1,
3125 "function must have a single !dbg attachment", &F, I.second);
3126 CheckDI(isa<DISubprogram>(I.second),
3127 "function !dbg attachment must be a subprogram", &F, I.second);
3128 CheckDI(cast<DISubprogram>(I.second)->isDistinct(),
3129 "function definition may only have a distinct !dbg attachment",
3130 &F);
3131
3132 auto *SP = cast<DISubprogram>(I.second);
3133 const Function *&AttachedTo = DISubprogramAttachments[SP];
3134 CheckDI(!AttachedTo || AttachedTo == &F,
3135 "DISubprogram attached to more than one function", SP, &F);
3136 AttachedTo = &F;
3137 AllowLocs = AreDebugLocsAllowed::Yes;
3138 break;
3139 }
3140 case LLVMContext::MD_prof:
3141 ++NumProfAttachments;
3142 Check(NumProfAttachments == 1,
3143 "function must have a single !prof attachment", &F, I.second);
3144 break;
3145 case LLVMContext::MD_kcfi_type:
3146 ++NumKCFIAttachments;
3147 Check(NumKCFIAttachments == 1,
3148 "function must have a single !kcfi_type attachment", &F,
3149 I.second);
3150 break;
3151 }
3152
3153 // Verify the metadata itself.
3154 visitMDNode(*I.second, AllowLocs);
3155 }
3156 }
3157
3158 // If this function is actually an intrinsic, verify that it is only used in
3159 // direct call/invokes, never having its "address taken".
3160 // Only do this if the module is materialized, otherwise we don't have all the
3161 // uses.
3162 if (F.isIntrinsic() && F.getParent()->isMaterialized()) {
3163 const User *U;
3164 if (F.hasAddressTaken(&U, false, true, false,
3165 /*IgnoreARCAttachedCall=*/true))
3166 Check(false, "Invalid user of intrinsic instruction!", U);
3167 }
3168
3169 // Check intrinsics' signatures.
3170 switch (F.getIntrinsicID()) {
3171 case Intrinsic::experimental_gc_get_pointer_base: {
3172 FunctionType *FT = F.getFunctionType();
3173 Check(FT->getNumParams() == 1, "wrong number of parameters", F);
3174 Check(isa<PointerType>(F.getReturnType()),
3175 "gc.get.pointer.base must return a pointer", F);
3176 Check(FT->getParamType(0) == F.getReturnType(),
3177 "gc.get.pointer.base operand and result must be of the same type", F);
3178 break;
3179 }
3180 case Intrinsic::experimental_gc_get_pointer_offset: {
3181 FunctionType *FT = F.getFunctionType();
3182 Check(FT->getNumParams() == 1, "wrong number of parameters", F);
3183 Check(isa<PointerType>(FT->getParamType(0)),
3184 "gc.get.pointer.offset operand must be a pointer", F);
3185 Check(F.getReturnType()->isIntegerTy(),
3186 "gc.get.pointer.offset must return integer", F);
3187 break;
3188 }
3189 }
3190
3191 auto *N = F.getSubprogram();
3192 HasDebugInfo = (N != nullptr);
3193 if (!HasDebugInfo)
3194 return;
3195
3196 // Check that all !dbg attachments lead to back to N.
3197 //
3198 // FIXME: Check this incrementally while visiting !dbg attachments.
3199 // FIXME: Only check when N is the canonical subprogram for F.
3200 SmallPtrSet<const MDNode *, 32> Seen;
3201 auto VisitDebugLoc = [&](const Instruction &I, const MDNode *Node) {
3202 // Be careful about using DILocation here since we might be dealing with
3203 // broken code (this is the Verifier after all).
3204 const DILocation *DL = dyn_cast_or_null<DILocation>(Node);
3205 if (!DL)
3206 return;
3207 if (!Seen.insert(DL).second)
3208 return;
3209
3210 Metadata *Parent = DL->getRawScope();
3211 CheckDI(Parent && isa<DILocalScope>(Parent),
3212 "DILocation's scope must be a DILocalScope", N, &F, &I, DL, Parent);
3213
3214 DILocalScope *Scope = DL->getInlinedAtScope();
3215 Check(Scope, "Failed to find DILocalScope", DL);
3216
3217 if (!Seen.insert(Scope).second)
3218 return;
3219
3220 DISubprogram *SP = Scope->getSubprogram();
3221
3222 // Scope and SP could be the same MDNode and we don't want to skip
3223 // validation in that case
3224 if ((Scope != SP) && !Seen.insert(SP).second)
3225 return;
3226
3227 CheckDI(SP->describes(&F),
3228 "!dbg attachment points at wrong subprogram for function", N, &F,
3229 &I, DL, Scope, SP);
3230 };
3231 for (auto &BB : F)
3232 for (auto &I : BB) {
3233 VisitDebugLoc(I, I.getDebugLoc().getAsMDNode());
3234 // The llvm.loop annotations also contain two DILocations.
3235 if (auto MD = I.getMetadata(LLVMContext::MD_loop))
3236 for (unsigned i = 1; i < MD->getNumOperands(); ++i)
3237 VisitDebugLoc(I, dyn_cast_or_null<MDNode>(MD->getOperand(i)));
3238 if (BrokenDebugInfo)
3239 return;
3240 }
3241}
3242
3243// verifyBasicBlock - Verify that a basic block is well formed...
3244//
3245void Verifier::visitBasicBlock(BasicBlock &BB) {
3246 InstsInThisBlock.clear();
3247 ConvergenceVerifyHelper.visit(BB);
3248
3249 // Ensure that basic blocks have terminators!
3250 Check(BB.getTerminator(), "Basic Block does not have terminator!", &BB);
3251
3252 // Check constraints that this basic block imposes on all of the PHI nodes in
3253 // it.
3254 if (isa<PHINode>(BB.front())) {
3255 SmallVector<BasicBlock *, 8> Preds(predecessors(&BB));
3257 llvm::sort(Preds);
3258 for (const PHINode &PN : BB.phis()) {
3259 Check(PN.getNumIncomingValues() == Preds.size(),
3260 "PHINode should have one entry for each predecessor of its "
3261 "parent basic block!",
3262 &PN);
3263
3264 // Get and sort all incoming values in the PHI node...
3265 Values.clear();
3266 Values.reserve(PN.getNumIncomingValues());
3267 for (unsigned i = 0, e = PN.getNumIncomingValues(); i != e; ++i)
3268 Values.push_back(
3269 std::make_pair(PN.getIncomingBlock(i), PN.getIncomingValue(i)));
3270 llvm::sort(Values);
3271
3272 for (unsigned i = 0, e = Values.size(); i != e; ++i) {
3273 // Check to make sure that if there is more than one entry for a
3274 // particular basic block in this PHI node, that the incoming values are
3275 // all identical.
3276 //
3277 Check(i == 0 || Values[i].first != Values[i - 1].first ||
3278 Values[i].second == Values[i - 1].second,
3279 "PHI node has multiple entries for the same basic block with "
3280 "different incoming values!",
3281 &PN, Values[i].first, Values[i].second, Values[i - 1].second);
3282
3283 // Check to make sure that the predecessors and PHI node entries are
3284 // matched up.
3285 Check(Values[i].first == Preds[i],
3286 "PHI node entries do not match predecessors!", &PN,
3287 Values[i].first, Preds[i]);
3288 }
3289 }
3290 }
3291
3292 // Check that all instructions have their parent pointers set up correctly.
3293 for (auto &I : BB)
3294 {
3295 Check(I.getParent() == &BB, "Instruction has bogus parent pointer!");
3296 }
3297
3298 // Confirm that no issues arise from the debug program.
3299 CheckDI(!BB.getTrailingDbgRecords(), "Basic Block has trailing DbgRecords!",
3300 &BB);
3301}
3302
3303void Verifier::visitTerminator(Instruction &I) {
3304 // Ensure that terminators only exist at the end of the basic block.
3305 Check(&I == I.getParent()->getTerminator(),
3306 "Terminator found in the middle of a basic block!", I.getParent());
3307 visitInstruction(I);
3308}
3309
3310void Verifier::visitBranchInst(BranchInst &BI) {
3311 if (BI.isConditional()) {
3313 "Branch condition is not 'i1' type!", &BI, BI.getCondition());
3314 }
3315 visitTerminator(BI);
3316}
3317
3318void Verifier::visitReturnInst(ReturnInst &RI) {
3319 Function *F = RI.getParent()->getParent();
3320 unsigned N = RI.getNumOperands();
3321 if (F->getReturnType()->isVoidTy())
3322 Check(N == 0,
3323 "Found return instr that returns non-void in Function of void "
3324 "return type!",
3325 &RI, F->getReturnType());
3326 else
3327 Check(N == 1 && F->getReturnType() == RI.getOperand(0)->getType(),
3328 "Function return type does not match operand "
3329 "type of return inst!",
3330 &RI, F->getReturnType());
3331
3332 // Check to make sure that the return value has necessary properties for
3333 // terminators...
3334 visitTerminator(RI);
3335}
3336
3337void Verifier::visitSwitchInst(SwitchInst &SI) {
3338 Check(SI.getType()->isVoidTy(), "Switch must have void result type!", &SI);
3339 // Check to make sure that all of the constants in the switch instruction
3340 // have the same type as the switched-on value.
3341 Type *SwitchTy = SI.getCondition()->getType();
3342 SmallPtrSet<ConstantInt*, 32> Constants;
3343 for (auto &Case : SI.cases()) {
3344 Check(isa<ConstantInt>(SI.getOperand(Case.getCaseIndex() * 2 + 2)),
3345 "Case value is not a constant integer.", &SI);
3346 Check(Case.getCaseValue()->getType() == SwitchTy,
3347 "Switch constants must all be same type as switch value!", &SI);
3348 Check(Constants.insert(Case.getCaseValue()).second,
3349 "Duplicate integer as switch case", &SI, Case.getCaseValue());
3350 }
3351
3352 visitTerminator(SI);
3353}
3354
3355void Verifier::visitIndirectBrInst(IndirectBrInst &BI) {
3357 "Indirectbr operand must have pointer type!", &BI);
3358 for (unsigned i = 0, e = BI.getNumDestinations(); i != e; ++i)
3360 "Indirectbr destinations must all have pointer type!", &BI);
3361
3362 visitTerminator(BI);
3363}
3364
3365void Verifier::visitCallBrInst(CallBrInst &CBI) {
3366 Check(CBI.isInlineAsm(), "Callbr is currently only used for asm-goto!", &CBI);
3367 const InlineAsm *IA = cast<InlineAsm>(CBI.getCalledOperand());
3368 Check(!IA->canThrow(), "Unwinding from Callbr is not allowed");
3369
3370 verifyInlineAsmCall(CBI);
3371 visitTerminator(CBI);
3372}
3373
3374void Verifier::visitSelectInst(SelectInst &SI) {
3375 Check(!SelectInst::areInvalidOperands(SI.getOperand(0), SI.getOperand(1),
3376 SI.getOperand(2)),
3377 "Invalid operands for select instruction!", &SI);
3378
3379 Check(SI.getTrueValue()->getType() == SI.getType(),
3380 "Select values must have same type as select instruction!", &SI);
3381 visitInstruction(SI);
3382}
3383
3384/// visitUserOp1 - User defined operators shouldn't live beyond the lifetime of
3385/// a pass, if any exist, it's an error.
3386///
3387void Verifier::visitUserOp1(Instruction &I) {
3388 Check(false, "User-defined operators should not live outside of a pass!", &I);
3389}
3390
3391void Verifier::visitTruncInst(TruncInst &I) {
3392 // Get the source and destination types
3393 Type *SrcTy = I.getOperand(0)->getType();
3394 Type *DestTy = I.getType();
3395
3396 // Get the size of the types in bits, we'll need this later
3397 unsigned SrcBitSize = SrcTy->getScalarSizeInBits();
3398 unsigned DestBitSize = DestTy->getScalarSizeInBits();
3399
3400 Check(SrcTy->isIntOrIntVectorTy(), "Trunc only operates on integer", &I);
3401 Check(DestTy->isIntOrIntVectorTy(), "Trunc only produces integer", &I);
3402 Check(SrcTy->isVectorTy() == DestTy->isVectorTy(),
3403 "trunc source and destination must both be a vector or neither", &I);
3404 Check(SrcBitSize > DestBitSize, "DestTy too big for Trunc", &I);
3405
3406 visitInstruction(I);
3407}
3408
3409void Verifier::visitZExtInst(ZExtInst &I) {
3410 // Get the source and destination types
3411 Type *SrcTy = I.getOperand(0)->getType();
3412 Type *DestTy = I.getType();
3413
3414 // Get the size of the types in bits, we'll need this later
3415 Check(SrcTy->isIntOrIntVectorTy(), "ZExt only operates on integer", &I);
3416 Check(DestTy->isIntOrIntVectorTy(), "ZExt only produces an integer", &I);
3417 Check(SrcTy->isVectorTy() == DestTy->isVectorTy(),
3418 "zext source and destination must both be a vector or neither", &I);
3419 unsigned SrcBitSize = SrcTy->getScalarSizeInBits();
3420 unsigned DestBitSize = DestTy->getScalarSizeInBits();
3421
3422 Check(SrcBitSize < DestBitSize, "Type too small for ZExt", &I);
3423
3424 visitInstruction(I);
3425}
3426
3427void Verifier::visitSExtInst(SExtInst &I) {
3428 // Get the source and destination types
3429 Type *SrcTy = I.getOperand(0)->getType();
3430 Type *DestTy = I.getType();
3431
3432 // Get the size of the types in bits, we'll need this later
3433 unsigned SrcBitSize = SrcTy->getScalarSizeInBits();
3434 unsigned DestBitSize = DestTy->getScalarSizeInBits();
3435
3436 Check(SrcTy->isIntOrIntVectorTy(), "SExt only operates on integer", &I);
3437 Check(DestTy->isIntOrIntVectorTy(), "SExt only produces an integer", &I);
3438 Check(SrcTy->isVectorTy() == DestTy->isVectorTy(),
3439 "sext source and destination must both be a vector or neither", &I);
3440 Check(SrcBitSize < DestBitSize, "Type too small for SExt", &I);
3441
3442 visitInstruction(I);
3443}
3444
3445void Verifier::visitFPTruncInst(FPTruncInst &I) {
3446 // Get the source and destination types
3447 Type *SrcTy = I.getOperand(0)->getType();
3448 Type *DestTy = I.getType();
3449 // Get the size of the types in bits, we'll need this later
3450 unsigned SrcBitSize = SrcTy->getScalarSizeInBits();
3451 unsigned DestBitSize = DestTy->getScalarSizeInBits();
3452
3453 Check(SrcTy->isFPOrFPVectorTy(), "FPTrunc only operates on FP", &I);
3454 Check(DestTy->isFPOrFPVectorTy(), "FPTrunc only produces an FP", &I);
3455 Check(SrcTy->isVectorTy() == DestTy->isVectorTy(),
3456 "fptrunc source and destination must both be a vector or neither", &I);
3457 Check(SrcBitSize > DestBitSize, "DestTy too big for FPTrunc", &I);
3458
3459 visitInstruction(I);
3460}
3461
3462void Verifier::visitFPExtInst(FPExtInst &I) {
3463 // Get the source and destination types
3464 Type *SrcTy = I.getOperand(0)->getType();
3465 Type *DestTy = I.getType();
3466
3467 // Get the size of the types in bits, we'll need this later
3468 unsigned SrcBitSize = SrcTy->getScalarSizeInBits();
3469 unsigned DestBitSize = DestTy->getScalarSizeInBits();
3470
3471 Check(SrcTy->isFPOrFPVectorTy(), "FPExt only operates on FP", &I);
3472 Check(DestTy->isFPOrFPVectorTy(), "FPExt only produces an FP", &I);
3473 Check(SrcTy->isVectorTy() == DestTy->isVectorTy(),
3474 "fpext source and destination must both be a vector or neither", &I);
3475 Check(SrcBitSize < DestBitSize, "DestTy too small for FPExt", &I);
3476
3477 visitInstruction(I);
3478}
3479
3480void Verifier::visitUIToFPInst(UIToFPInst &I) {
3481 // Get the source and destination types
3482 Type *SrcTy = I.getOperand(0)->getType();
3483 Type *DestTy = I.getType();
3484
3485 bool SrcVec = SrcTy->isVectorTy();
3486 bool DstVec = DestTy->isVectorTy();
3487
3488 Check(SrcVec == DstVec,
3489 "UIToFP source and dest must both be vector or scalar", &I);
3490 Check(SrcTy->isIntOrIntVectorTy(),
3491 "UIToFP source must be integer or integer vector", &I);
3492 Check(DestTy->isFPOrFPVectorTy(), "UIToFP result must be FP or FP vector",
3493 &I);
3494
3495 if (SrcVec && DstVec)
3496 Check(cast<VectorType>(SrcTy)->getElementCount() ==
3497 cast<VectorType>(DestTy)->getElementCount(),
3498 "UIToFP source and dest vector length mismatch", &I);
3499
3500 visitInstruction(I);
3501}
3502
3503void Verifier::visitSIToFPInst(SIToFPInst &I) {
3504 // Get the source and destination types
3505 Type *SrcTy = I.getOperand(0)->getType();
3506 Type *DestTy = I.getType();
3507
3508 bool SrcVec = SrcTy->isVectorTy();
3509 bool DstVec = DestTy->isVectorTy();
3510
3511 Check(SrcVec == DstVec,
3512 "SIToFP source and dest must both be vector or scalar", &I);
3513 Check(SrcTy->isIntOrIntVectorTy(),
3514 "SIToFP source must be integer or integer vector", &I);
3515 Check(DestTy->isFPOrFPVectorTy(), "SIToFP result must be FP or FP vector",
3516 &I);
3517
3518 if (SrcVec && DstVec)
3519 Check(cast<VectorType>(SrcTy)->getElementCount() ==
3520 cast<VectorType>(DestTy)->getElementCount(),
3521 "SIToFP source and dest vector length mismatch", &I);
3522
3523 visitInstruction(I);
3524}
3525
3526void Verifier::visitFPToUIInst(FPToUIInst &I) {
3527 // Get the source and destination types
3528 Type *SrcTy = I.getOperand(0)->getType();
3529 Type *DestTy = I.getType();
3530
3531 bool SrcVec = SrcTy->isVectorTy();
3532 bool DstVec = DestTy->isVectorTy();
3533
3534 Check(SrcVec == DstVec,
3535 "FPToUI source and dest must both be vector or scalar", &I);
3536 Check(SrcTy->isFPOrFPVectorTy(), "FPToUI source must be FP or FP vector", &I);
3537 Check(DestTy->isIntOrIntVectorTy(),
3538 "FPToUI result must be integer or integer vector", &I);
3539
3540 if (SrcVec && DstVec)
3541 Check(cast<VectorType>(SrcTy)->getElementCount() ==
3542 cast<VectorType>(DestTy)->getElementCount(),
3543 "FPToUI source and dest vector length mismatch", &I);
3544
3545 visitInstruction(I);
3546}
3547
3548void Verifier::visitFPToSIInst(FPToSIInst &I) {
3549 // Get the source and destination types
3550 Type *SrcTy = I.getOperand(0)->getType();
3551 Type *DestTy = I.getType();
3552
3553 bool SrcVec = SrcTy->isVectorTy();
3554 bool DstVec = DestTy->isVectorTy();
3555
3556 Check(SrcVec == DstVec,
3557 "FPToSI source and dest must both be vector or scalar", &I);
3558 Check(SrcTy->isFPOrFPVectorTy(), "FPToSI source must be FP or FP vector", &I);
3559 Check(DestTy->isIntOrIntVectorTy(),
3560 "FPToSI result must be integer or integer vector", &I);
3561
3562 if (SrcVec && DstVec)
3563 Check(cast<VectorType>(SrcTy)->getElementCount() ==
3564 cast<VectorType>(DestTy)->getElementCount(),
3565 "FPToSI source and dest vector length mismatch", &I);
3566
3567 visitInstruction(I);
3568}
3569
3570void Verifier::checkPtrToAddr(Type *SrcTy, Type *DestTy, const Value &V) {
3571 Check(SrcTy->isPtrOrPtrVectorTy(), "PtrToAddr source must be pointer", V);
3572 Check(DestTy->isIntOrIntVectorTy(), "PtrToAddr result must be integral", V);
3573 Check(SrcTy->isVectorTy() == DestTy->isVectorTy(), "PtrToAddr type mismatch",
3574 V);
3575
3576 if (SrcTy->isVectorTy()) {
3577 auto *VSrc = cast<VectorType>(SrcTy);
3578 auto *VDest = cast<VectorType>(DestTy);
3579 Check(VSrc->getElementCount() == VDest->getElementCount(),
3580 "PtrToAddr vector length mismatch", V);
3581 }
3582
3583 Type *AddrTy = DL.getAddressType(SrcTy);
3584 Check(AddrTy == DestTy, "PtrToAddr result must be address width", V);
3585}
3586
3587void Verifier::visitPtrToAddrInst(PtrToAddrInst &I) {
3588 checkPtrToAddr(I.getOperand(0)->getType(), I.getType(), I);
3589 visitInstruction(I);
3590}
3591
3592void Verifier::visitPtrToIntInst(PtrToIntInst &I) {
3593 // Get the source and destination types
3594 Type *SrcTy = I.getOperand(0)->getType();
3595 Type *DestTy = I.getType();
3596
3597 Check(SrcTy->isPtrOrPtrVectorTy(), "PtrToInt source must be pointer", &I);
3598
3599 Check(DestTy->isIntOrIntVectorTy(), "PtrToInt result must be integral", &I);
3600 Check(SrcTy->isVectorTy() == DestTy->isVectorTy(), "PtrToInt type mismatch",
3601 &I);
3602
3603 if (SrcTy->isVectorTy()) {
3604 auto *VSrc = cast<VectorType>(SrcTy);
3605 auto *VDest = cast<VectorType>(DestTy);
3606 Check(VSrc->getElementCount() == VDest->getElementCount(),
3607 "PtrToInt Vector length mismatch", &I);
3608 }
3609
3610 visitInstruction(I);
3611}
3612
3613void Verifier::visitIntToPtrInst(IntToPtrInst &I) {
3614 // Get the source and destination types
3615 Type *SrcTy = I.getOperand(0)->getType();
3616 Type *DestTy = I.getType();
3617
3618 Check(SrcTy->isIntOrIntVectorTy(), "IntToPtr source must be an integral", &I);
3619 Check(DestTy->isPtrOrPtrVectorTy(), "IntToPtr result must be a pointer", &I);
3620
3621 Check(SrcTy->isVectorTy() == DestTy->isVectorTy(), "IntToPtr type mismatch",
3622 &I);
3623 if (SrcTy->isVectorTy()) {
3624 auto *VSrc = cast<VectorType>(SrcTy);
3625 auto *VDest = cast<VectorType>(DestTy);
3626 Check(VSrc->getElementCount() == VDest->getElementCount(),
3627 "IntToPtr Vector length mismatch", &I);
3628 }
3629 visitInstruction(I);
3630}
3631
3632void Verifier::visitBitCastInst(BitCastInst &I) {
3633 Check(
3634 CastInst::castIsValid(Instruction::BitCast, I.getOperand(0), I.getType()),
3635 "Invalid bitcast", &I);
3636 visitInstruction(I);
3637}
3638
3639void Verifier::visitAddrSpaceCastInst(AddrSpaceCastInst &I) {
3640 Type *SrcTy = I.getOperand(0)->getType();
3641 Type *DestTy = I.getType();
3642
3643 Check(SrcTy->isPtrOrPtrVectorTy(), "AddrSpaceCast source must be a pointer",
3644 &I);
3645 Check(DestTy->isPtrOrPtrVectorTy(), "AddrSpaceCast result must be a pointer",
3646 &I);
3648 "AddrSpaceCast must be between different address spaces", &I);
3649 if (auto *SrcVTy = dyn_cast<VectorType>(SrcTy))
3650 Check(SrcVTy->getElementCount() ==
3651 cast<VectorType>(DestTy)->getElementCount(),
3652 "AddrSpaceCast vector pointer number of elements mismatch", &I);
3653 visitInstruction(I);
3654}
3655
3656/// visitPHINode - Ensure that a PHI node is well formed.
3657///
3658void Verifier::visitPHINode(PHINode &PN) {
3659 // Ensure that the PHI nodes are all grouped together at the top of the block.
3660 // This can be tested by checking whether the instruction before this is
3661 // either nonexistent (because this is begin()) or is a PHI node. If not,
3662 // then there is some other instruction before a PHI.
3663 Check(&PN == &PN.getParent()->front() ||
3665 "PHI nodes not grouped at top of basic block!", &PN, PN.getParent());
3666
3667 // Check that a PHI doesn't yield a Token.
3668 Check(!PN.getType()->isTokenLikeTy(), "PHI nodes cannot have token type!");
3669
3670 // Check that all of the values of the PHI node have the same type as the
3671 // result.
3672 for (Value *IncValue : PN.incoming_values()) {
3673 Check(PN.getType() == IncValue->getType(),
3674 "PHI node operands are not the same type as the result!", &PN);
3675 }
3676
3677 // All other PHI node constraints are checked in the visitBasicBlock method.
3678
3679 visitInstruction(PN);
3680}
3681
3682void Verifier::visitCallBase(CallBase &Call) {
3684 "Called function must be a pointer!", Call);
3685 FunctionType *FTy = Call.getFunctionType();
3686
3687 // Verify that the correct number of arguments are being passed
3688 if (FTy->isVarArg())
3689 Check(Call.arg_size() >= FTy->getNumParams(),
3690 "Called function requires more parameters than were provided!", Call);
3691 else
3692 Check(Call.arg_size() == FTy->getNumParams(),
3693 "Incorrect number of arguments passed to called function!", Call);
3694
3695 // Verify that all arguments to the call match the function type.
3696 for (unsigned i = 0, e = FTy->getNumParams(); i != e; ++i)
3697 Check(Call.getArgOperand(i)->getType() == FTy->getParamType(i),
3698 "Call parameter type does not match function signature!",
3699 Call.getArgOperand(i), FTy->getParamType(i), Call);
3700
3701 AttributeList Attrs = Call.getAttributes();
3702
3703 Check(verifyAttributeCount(Attrs, Call.arg_size()),
3704 "Attribute after last parameter!", Call);
3705
3706 Function *Callee =
3708 bool IsIntrinsic = Callee && Callee->isIntrinsic();
3709 if (IsIntrinsic)
3710 Check(Callee->getValueType() == FTy,
3711 "Intrinsic called with incompatible signature", Call);
3712
3713 // Verify if the calling convention of the callee is callable.
3715 "calling convention does not permit calls", Call);
3716
3717 // Disallow passing/returning values with alignment higher than we can
3718 // represent.
3719 // FIXME: Consider making DataLayout cap the alignment, so this isn't
3720 // necessary.
3721 auto VerifyTypeAlign = [&](Type *Ty, const Twine &Message) {
3722 if (!Ty->isSized())
3723 return;
3724 Align ABIAlign = DL.getABITypeAlign(Ty);
3725 Check(ABIAlign.value() <= Value::MaximumAlignment,
3726 "Incorrect alignment of " + Message + " to called function!", Call);
3727 };
3728
3729 if (!IsIntrinsic) {
3730 VerifyTypeAlign(FTy->getReturnType(), "return type");
3731 for (unsigned i = 0, e = FTy->getNumParams(); i != e; ++i) {
3732 Type *Ty = FTy->getParamType(i);
3733 VerifyTypeAlign(Ty, "argument passed");
3734 }
3735 }
3736
3737 if (Attrs.hasFnAttr(Attribute::Speculatable)) {
3738 // Don't allow speculatable on call sites, unless the underlying function
3739 // declaration is also speculatable.
3740 Check(Callee && Callee->isSpeculatable(),
3741 "speculatable attribute may not apply to call sites", Call);
3742 }
3743
3744 if (Attrs.hasFnAttr(Attribute::Preallocated)) {
3745 Check(Call.getIntrinsicID() == Intrinsic::call_preallocated_arg,
3746 "preallocated as a call site attribute can only be on "
3747 "llvm.call.preallocated.arg");
3748 }
3749
3750 // Verify call attributes.
3751 verifyFunctionAttrs(FTy, Attrs, &Call, IsIntrinsic, Call.isInlineAsm());
3752
3753 // Conservatively check the inalloca argument.
3754 // We have a bug if we can find that there is an underlying alloca without
3755 // inalloca.
3756 if (Call.hasInAllocaArgument()) {
3757 Value *InAllocaArg = Call.getArgOperand(FTy->getNumParams() - 1);
3758 if (auto AI = dyn_cast<AllocaInst>(InAllocaArg->stripInBoundsOffsets()))
3759 Check(AI->isUsedWithInAlloca(),
3760 "inalloca argument for call has mismatched alloca", AI, Call);
3761 }
3762
3763 // For each argument of the callsite, if it has the swifterror argument,
3764 // make sure the underlying alloca/parameter it comes from has a swifterror as
3765 // well.
3766 for (unsigned i = 0, e = FTy->getNumParams(); i != e; ++i) {
3767 if (Call.paramHasAttr(i, Attribute::SwiftError)) {
3768 Value *SwiftErrorArg = Call.getArgOperand(i);
3769 if (auto AI = dyn_cast<AllocaInst>(SwiftErrorArg->stripInBoundsOffsets())) {
3770 Check(AI->isSwiftError(),
3771 "swifterror argument for call has mismatched alloca", AI, Call);
3772 continue;
3773 }
3774 auto ArgI = dyn_cast<Argument>(SwiftErrorArg);
3775 Check(ArgI, "swifterror argument should come from an alloca or parameter",
3776 SwiftErrorArg, Call);
3777 Check(ArgI->hasSwiftErrorAttr(),
3778 "swifterror argument for call has mismatched parameter", ArgI,
3779 Call);
3780 }
3781
3782 if (Attrs.hasParamAttr(i, Attribute::ImmArg)) {
3783 // Don't allow immarg on call sites, unless the underlying declaration
3784 // also has the matching immarg.
3785 Check(Callee && Callee->hasParamAttribute(i, Attribute::ImmArg),
3786 "immarg may not apply only to call sites", Call.getArgOperand(i),
3787 Call);
3788 }
3789
3790 if (Call.paramHasAttr(i, Attribute::ImmArg)) {
3791 Value *ArgVal = Call.getArgOperand(i);
3792 Check(isa<ConstantInt>(ArgVal) || isa<ConstantFP>(ArgVal),
3793 "immarg operand has non-immediate parameter", ArgVal, Call);
3794
3795 // If the imm-arg is an integer and also has a range attached,
3796 // check if the given value is within the range.
3797 if (Call.paramHasAttr(i, Attribute::Range)) {
3798 if (auto *CI = dyn_cast<ConstantInt>(ArgVal)) {
3799 const ConstantRange &CR =
3800 Call.getParamAttr(i, Attribute::Range).getValueAsConstantRange();
3801 Check(CR.contains(CI->getValue()),
3802 "immarg value " + Twine(CI->getValue().getSExtValue()) +
3803 " out of range [" + Twine(CR.getLower().getSExtValue()) +
3804 ", " + Twine(CR.getUpper().getSExtValue()) + ")",
3805 Call);
3806 }
3807 }
3808 }
3809
3810 if (Call.paramHasAttr(i, Attribute::Preallocated)) {
3811 Value *ArgVal = Call.getArgOperand(i);
3812 bool hasOB =
3814 bool isMustTail = Call.isMustTailCall();
3815 Check(hasOB != isMustTail,
3816 "preallocated operand either requires a preallocated bundle or "
3817 "the call to be musttail (but not both)",
3818 ArgVal, Call);
3819 }
3820 }
3821
3822 if (FTy->isVarArg()) {
3823 // FIXME? is 'nest' even legal here?
3824 bool SawNest = false;
3825 bool SawReturned = false;
3826
3827 for (unsigned Idx = 0; Idx < FTy->getNumParams(); ++Idx) {
3828 if (Attrs.hasParamAttr(Idx, Attribute::Nest))
3829 SawNest = true;
3830 if (Attrs.hasParamAttr(Idx, Attribute::Returned))
3831 SawReturned = true;
3832 }
3833
3834 // Check attributes on the varargs part.
3835 for (unsigned Idx = FTy->getNumParams(); Idx < Call.arg_size(); ++Idx) {
3836 Type *Ty = Call.getArgOperand(Idx)->getType();
3837 AttributeSet ArgAttrs = Attrs.getParamAttrs(Idx);
3838 verifyParameterAttrs(ArgAttrs, Ty, &Call);
3839
3840 if (ArgAttrs.hasAttribute(Attribute::Nest)) {
3841 Check(!SawNest, "More than one parameter has attribute nest!", Call);
3842 SawNest = true;
3843 }
3844
3845 if (ArgAttrs.hasAttribute(Attribute::Returned)) {
3846 Check(!SawReturned, "More than one parameter has attribute returned!",
3847 Call);
3848 Check(Ty->canLosslesslyBitCastTo(FTy->getReturnType()),
3849 "Incompatible argument and return types for 'returned' "
3850 "attribute",
3851 Call);
3852 SawReturned = true;
3853 }
3854
3855 // Statepoint intrinsic is vararg but the wrapped function may be not.
3856 // Allow sret here and check the wrapped function in verifyStatepoint.
3857 if (Call.getIntrinsicID() != Intrinsic::experimental_gc_statepoint)
3858 Check(!ArgAttrs.hasAttribute(Attribute::StructRet),
3859 "Attribute 'sret' cannot be used for vararg call arguments!",
3860 Call);
3861
3862 if (ArgAttrs.hasAttribute(Attribute::InAlloca))
3863 Check(Idx == Call.arg_size() - 1,
3864 "inalloca isn't on the last argument!", Call);
3865 }
3866 }
3867
3868 // Verify that there's no metadata unless it's a direct call to an intrinsic.
3869 if (!IsIntrinsic) {
3870 for (Type *ParamTy : FTy->params()) {
3871 Check(!ParamTy->isMetadataTy(),
3872 "Function has metadata parameter but isn't an intrinsic", Call);
3873 Check(!ParamTy->isTokenLikeTy(),
3874 "Function has token parameter but isn't an intrinsic", Call);
3875 }
3876 }
3877
3878 // Verify that indirect calls don't return tokens.
3879 if (!Call.getCalledFunction()) {
3880 Check(!FTy->getReturnType()->isTokenLikeTy(),
3881 "Return type cannot be token for indirect call!");
3882 Check(!FTy->getReturnType()->isX86_AMXTy(),
3883 "Return type cannot be x86_amx for indirect call!");
3884 }
3885
3887 visitIntrinsicCall(ID, Call);
3888
3889 // Verify that a callsite has at most one "deopt", at most one "funclet", at
3890 // most one "gc-transition", at most one "cfguardtarget", at most one
3891 // "preallocated" operand bundle, and at most one "ptrauth" operand bundle.
3892 bool FoundDeoptBundle = false, FoundFuncletBundle = false,
3893 FoundGCTransitionBundle = false, FoundCFGuardTargetBundle = false,
3894 FoundPreallocatedBundle = false, FoundGCLiveBundle = false,
3895 FoundPtrauthBundle = false, FoundKCFIBundle = false,
3896 FoundAttachedCallBundle = false;
3897 for (unsigned i = 0, e = Call.getNumOperandBundles(); i < e; ++i) {
3898 OperandBundleUse BU = Call.getOperandBundleAt(i);
3899 uint32_t Tag = BU.getTagID();
3900 if (Tag == LLVMContext::OB_deopt) {
3901 Check(!FoundDeoptBundle, "Multiple deopt operand bundles", Call);
3902 FoundDeoptBundle = true;
3903 } else if (Tag == LLVMContext::OB_gc_transition) {
3904 Check(!FoundGCTransitionBundle, "Multiple gc-transition operand bundles",
3905 Call);
3906 FoundGCTransitionBundle = true;
3907 } else if (Tag == LLVMContext::OB_funclet) {
3908 Check(!FoundFuncletBundle, "Multiple funclet operand bundles", Call);
3909 FoundFuncletBundle = true;
3910 Check(BU.Inputs.size() == 1,
3911 "Expected exactly one funclet bundle operand", Call);
3912 Check(isa<FuncletPadInst>(BU.Inputs.front()),
3913 "Funclet bundle operands should correspond to a FuncletPadInst",
3914 Call);
3915 } else if (Tag == LLVMContext::OB_cfguardtarget) {
3916 Check(!FoundCFGuardTargetBundle, "Multiple CFGuardTarget operand bundles",
3917 Call);
3918 FoundCFGuardTargetBundle = true;
3919 Check(BU.Inputs.size() == 1,
3920 "Expected exactly one cfguardtarget bundle operand", Call);
3921 } else if (Tag == LLVMContext::OB_ptrauth) {
3922 Check(!FoundPtrauthBundle, "Multiple ptrauth operand bundles", Call);
3923 FoundPtrauthBundle = true;
3924 Check(BU.Inputs.size() == 2,
3925 "Expected exactly two ptrauth bundle operands", Call);
3926 Check(isa<ConstantInt>(BU.Inputs[0]) &&
3927 BU.Inputs[0]->getType()->isIntegerTy(32),
3928 "Ptrauth bundle key operand must be an i32 constant", Call);
3929 Check(BU.Inputs[1]->getType()->isIntegerTy(64),
3930 "Ptrauth bundle discriminator operand must be an i64", Call);
3931 } else if (Tag == LLVMContext::OB_kcfi) {
3932 Check(!FoundKCFIBundle, "Multiple kcfi operand bundles", Call);
3933 FoundKCFIBundle = true;
3934 Check(BU.Inputs.size() == 1, "Expected exactly one kcfi bundle operand",
3935 Call);
3936 Check(isa<ConstantInt>(BU.Inputs[0]) &&
3937 BU.Inputs[0]->getType()->isIntegerTy(32),
3938 "Kcfi bundle operand must be an i32 constant", Call);
3939 } else if (Tag == LLVMContext::OB_preallocated) {
3940 Check(!FoundPreallocatedBundle, "Multiple preallocated operand bundles",
3941 Call);
3942 FoundPreallocatedBundle = true;
3943 Check(BU.Inputs.size() == 1,
3944 "Expected exactly one preallocated bundle operand", Call);
3945 auto Input = dyn_cast<IntrinsicInst>(BU.Inputs.front());
3946 Check(Input &&
3947 Input->getIntrinsicID() == Intrinsic::call_preallocated_setup,
3948 "\"preallocated\" argument must be a token from "
3949 "llvm.call.preallocated.setup",
3950 Call);
3951 } else if (Tag == LLVMContext::OB_gc_live) {
3952 Check(!FoundGCLiveBundle, "Multiple gc-live operand bundles", Call);
3953 FoundGCLiveBundle = true;
3955 Check(!FoundAttachedCallBundle,
3956 "Multiple \"clang.arc.attachedcall\" operand bundles", Call);
3957 FoundAttachedCallBundle = true;
3958 verifyAttachedCallBundle(Call, BU);
3959 }
3960 }
3961
3962 // Verify that callee and callsite agree on whether to use pointer auth.
3963 Check(!(Call.getCalledFunction() && FoundPtrauthBundle),
3964 "Direct call cannot have a ptrauth bundle", Call);
3965
3966 // Verify that each inlinable callsite of a debug-info-bearing function in a
3967 // debug-info-bearing function has a debug location attached to it. Failure to
3968 // do so causes assertion failures when the inliner sets up inline scope info
3969 // (Interposable functions are not inlinable, neither are functions without
3970 // definitions.)
3976 "inlinable function call in a function with "
3977 "debug info must have a !dbg location",
3978 Call);
3979
3980 if (Call.isInlineAsm())
3981 verifyInlineAsmCall(Call);
3982
3983 ConvergenceVerifyHelper.visit(Call);
3984
3985 visitInstruction(Call);
3986}
3987
3988void Verifier::verifyTailCCMustTailAttrs(const AttrBuilder &Attrs,
3989 StringRef Context) {
3990 Check(!Attrs.contains(Attribute::InAlloca),
3991 Twine("inalloca attribute not allowed in ") + Context);
3992 Check(!Attrs.contains(Attribute::InReg),
3993 Twine("inreg attribute not allowed in ") + Context);
3994 Check(!Attrs.contains(Attribute::SwiftError),
3995 Twine("swifterror attribute not allowed in ") + Context);
3996 Check(!Attrs.contains(Attribute::Preallocated),
3997 Twine("preallocated attribute not allowed in ") + Context);
3998 Check(!Attrs.contains(Attribute::ByRef),
3999 Twine("byref attribute not allowed in ") + Context);
4000}
4001
4002/// Two types are "congruent" if they are identical, or if they are both pointer
4003/// types with different pointee types and the same address space.
4004static bool isTypeCongruent(Type *L, Type *R) {
4005 if (L == R)
4006 return true;
4009 if (!PL || !PR)
4010 return false;
4011 return PL->getAddressSpace() == PR->getAddressSpace();
4012}
4013
4014static AttrBuilder getParameterABIAttributes(LLVMContext& C, unsigned I, AttributeList Attrs) {
4015 static const Attribute::AttrKind ABIAttrs[] = {
4016 Attribute::StructRet, Attribute::ByVal, Attribute::InAlloca,
4017 Attribute::InReg, Attribute::StackAlignment, Attribute::SwiftSelf,
4018 Attribute::SwiftAsync, Attribute::SwiftError, Attribute::Preallocated,
4019 Attribute::ByRef};
4020 AttrBuilder Copy(C);
4021 for (auto AK : ABIAttrs) {
4022 Attribute Attr = Attrs.getParamAttrs(I).getAttribute(AK);
4023 if (Attr.isValid())
4024 Copy.addAttribute(Attr);
4025 }
4026
4027 // `align` is ABI-affecting only in combination with `byval` or `byref`.
4028 if (Attrs.hasParamAttr(I, Attribute::Alignment) &&
4029 (Attrs.hasParamAttr(I, Attribute::ByVal) ||
4030 Attrs.hasParamAttr(I, Attribute::ByRef)))
4031 Copy.addAlignmentAttr(Attrs.getParamAlignment(I));
4032 return Copy;
4033}
4034
4035void Verifier::verifyMustTailCall(CallInst &CI) {
4036 Check(!CI.isInlineAsm(), "cannot use musttail call with inline asm", &CI);
4037
4038 Function *F = CI.getParent()->getParent();
4039 FunctionType *CallerTy = F->getFunctionType();
4040 FunctionType *CalleeTy = CI.getFunctionType();
4041 Check(CallerTy->isVarArg() == CalleeTy->isVarArg(),
4042 "cannot guarantee tail call due to mismatched varargs", &CI);
4043 Check(isTypeCongruent(CallerTy->getReturnType(), CalleeTy->getReturnType()),
4044 "cannot guarantee tail call due to mismatched return types", &CI);
4045
4046 // - The calling conventions of the caller and callee must match.
4047 Check(F->getCallingConv() == CI.getCallingConv(),
4048 "cannot guarantee tail call due to mismatched calling conv", &CI);
4049
4050 // - The call must immediately precede a :ref:`ret <i_ret>` instruction,
4051 // or a pointer bitcast followed by a ret instruction.
4052 // - The ret instruction must return the (possibly bitcasted) value
4053 // produced by the call or void.
4054 Value *RetVal = &CI;
4056
4057 // Handle the optional bitcast.
4058 if (BitCastInst *BI = dyn_cast_or_null<BitCastInst>(Next)) {
4059 Check(BI->getOperand(0) == RetVal,
4060 "bitcast following musttail call must use the call", BI);
4061 RetVal = BI;
4062 Next = BI->getNextNode();
4063 }
4064
4065 // Check the return.
4066 ReturnInst *Ret = dyn_cast_or_null<ReturnInst>(Next);
4067 Check(Ret, "musttail call must precede a ret with an optional bitcast", &CI);
4068 Check(!Ret->getReturnValue() || Ret->getReturnValue() == RetVal ||
4069 isa<UndefValue>(Ret->getReturnValue()),
4070 "musttail call result must be returned", Ret);
4071
4072 AttributeList CallerAttrs = F->getAttributes();
4073 AttributeList CalleeAttrs = CI.getAttributes();
4074 if (CI.getCallingConv() == CallingConv::SwiftTail ||
4075 CI.getCallingConv() == CallingConv::Tail) {
4076 StringRef CCName =
4077 CI.getCallingConv() == CallingConv::Tail ? "tailcc" : "swifttailcc";
4078
4079 // - Only sret, byval, swiftself, and swiftasync ABI-impacting attributes
4080 // are allowed in swifttailcc call
4081 for (unsigned I = 0, E = CallerTy->getNumParams(); I != E; ++I) {
4082 AttrBuilder ABIAttrs = getParameterABIAttributes(F->getContext(), I, CallerAttrs);
4083 SmallString<32> Context{CCName, StringRef(" musttail caller")};
4084 verifyTailCCMustTailAttrs(ABIAttrs, Context);
4085 }
4086 for (unsigned I = 0, E = CalleeTy->getNumParams(); I != E; ++I) {
4087 AttrBuilder ABIAttrs = getParameterABIAttributes(F->getContext(), I, CalleeAttrs);
4088 SmallString<32> Context{CCName, StringRef(" musttail callee")};
4089 verifyTailCCMustTailAttrs(ABIAttrs, Context);
4090 }
4091 // - Varargs functions are not allowed
4092 Check(!CallerTy->isVarArg(), Twine("cannot guarantee ") + CCName +
4093 " tail call for varargs function");
4094 return;
4095 }
4096
4097 // - The caller and callee prototypes must match. Pointer types of
4098 // parameters or return types may differ in pointee type, but not
4099 // address space.
4100 if (!CI.getIntrinsicID()) {
4101 Check(CallerTy->getNumParams() == CalleeTy->getNumParams(),
4102 "cannot guarantee tail call due to mismatched parameter counts", &CI);
4103 for (unsigned I = 0, E = CallerTy->getNumParams(); I != E; ++I) {
4104 Check(
4105 isTypeCongruent(CallerTy->getParamType(I), CalleeTy->getParamType(I)),
4106 "cannot guarantee tail call due to mismatched parameter types", &CI);
4107 }
4108 }
4109
4110 // - All ABI-impacting function attributes, such as sret, byval, inreg,
4111 // returned, preallocated, and inalloca, must match.
4112 for (unsigned I = 0, E = CallerTy->getNumParams(); I != E; ++I) {
4113 AttrBuilder CallerABIAttrs = getParameterABIAttributes(F->getContext(), I, CallerAttrs);
4114 AttrBuilder CalleeABIAttrs = getParameterABIAttributes(F->getContext(), I, CalleeAttrs);
4115 Check(CallerABIAttrs == CalleeABIAttrs,
4116 "cannot guarantee tail call due to mismatched ABI impacting "
4117 "function attributes",
4118 &CI, CI.getOperand(I));
4119 }
4120}
4121
4122void Verifier::visitCallInst(CallInst &CI) {
4123 visitCallBase(CI);
4124
4125 if (CI.isMustTailCall())
4126 verifyMustTailCall(CI);
4127}
4128
4129void Verifier::visitInvokeInst(InvokeInst &II) {
4130 visitCallBase(II);
4131
4132 // Verify that the first non-PHI instruction of the unwind destination is an
4133 // exception handling instruction.
4134 Check(
4135 II.getUnwindDest()->isEHPad(),
4136 "The unwind destination does not have an exception handling instruction!",
4137 &II);
4138
4139 visitTerminator(II);
4140}
4141
4142/// visitUnaryOperator - Check the argument to the unary operator.
4143///
4144void Verifier::visitUnaryOperator(UnaryOperator &U) {
4145 Check(U.getType() == U.getOperand(0)->getType(),
4146 "Unary operators must have same type for"
4147 "operands and result!",
4148 &U);
4149
4150 switch (U.getOpcode()) {
4151 // Check that floating-point arithmetic operators are only used with
4152 // floating-point operands.
4153 case Instruction::FNeg:
4154 Check(U.getType()->isFPOrFPVectorTy(),
4155 "FNeg operator only works with float types!", &U);
4156 break;
4157 default:
4158 llvm_unreachable("Unknown UnaryOperator opcode!");
4159 }
4160
4161 visitInstruction(U);
4162}
4163
4164/// visitBinaryOperator - Check that both arguments to the binary operator are
4165/// of the same type!
4166///
4167void Verifier::visitBinaryOperator(BinaryOperator &B) {
4168 Check(B.getOperand(0)->getType() == B.getOperand(1)->getType(),
4169 "Both operands to a binary operator are not of the same type!", &B);
4170
4171 switch (B.getOpcode()) {
4172 // Check that integer arithmetic operators are only used with
4173 // integral operands.
4174 case Instruction::Add:
4175 case Instruction::Sub:
4176 case Instruction::Mul:
4177 case Instruction::SDiv:
4178 case Instruction::UDiv:
4179 case Instruction::SRem:
4180 case Instruction::URem:
4181 Check(B.getType()->isIntOrIntVectorTy(),
4182 "Integer arithmetic operators only work with integral types!", &B);
4183 Check(B.getType() == B.getOperand(0)->getType(),
4184 "Integer arithmetic operators must have same type "
4185 "for operands and result!",
4186 &B);
4187 break;
4188 // Check that floating-point arithmetic operators are only used with
4189 // floating-point operands.
4190 case Instruction::FAdd:
4191 case Instruction::FSub:
4192 case Instruction::FMul:
4193 case Instruction::FDiv:
4194 case Instruction::FRem:
4195 Check(B.getType()->isFPOrFPVectorTy(),
4196 "Floating-point arithmetic operators only work with "
4197 "floating-point types!",
4198 &B);
4199 Check(B.getType() == B.getOperand(0)->getType(),
4200 "Floating-point arithmetic operators must have same type "
4201 "for operands and result!",
4202 &B);
4203 break;
4204 // Check that logical operators are only used with integral operands.
4205 case Instruction::And:
4206 case Instruction::Or:
4207 case Instruction::Xor:
4208 Check(B.getType()->isIntOrIntVectorTy(),
4209 "Logical operators only work with integral types!", &B);
4210 Check(B.getType() == B.getOperand(0)->getType(),
4211 "Logical operators must have same type for operands and result!", &B);
4212 break;
4213 case Instruction::Shl:
4214 case Instruction::LShr:
4215 case Instruction::AShr:
4216 Check(B.getType()->isIntOrIntVectorTy(),
4217 "Shifts only work with integral types!", &B);
4218 Check(B.getType() == B.getOperand(0)->getType(),
4219 "Shift return type must be same as operands!", &B);
4220 break;
4221 default:
4222 llvm_unreachable("Unknown BinaryOperator opcode!");
4223 }
4224
4225 visitInstruction(B);
4226}
4227
4228void Verifier::visitICmpInst(ICmpInst &IC) {
4229 // Check that the operands are the same type
4230 Type *Op0Ty = IC.getOperand(0)->getType();
4231 Type *Op1Ty = IC.getOperand(1)->getType();
4232 Check(Op0Ty == Op1Ty,
4233 "Both operands to ICmp instruction are not of the same type!", &IC);
4234 // Check that the operands are the right type
4235 Check(Op0Ty->isIntOrIntVectorTy() || Op0Ty->isPtrOrPtrVectorTy(),
4236 "Invalid operand types for ICmp instruction", &IC);
4237 // Check that the predicate is valid.
4238 Check(IC.isIntPredicate(), "Invalid predicate in ICmp instruction!", &IC);
4239
4240 visitInstruction(IC);
4241}
4242
4243void Verifier::visitFCmpInst(FCmpInst &FC) {
4244 // Check that the operands are the same type
4245 Type *Op0Ty = FC.getOperand(0)->getType();
4246 Type *Op1Ty = FC.getOperand(1)->getType();
4247 Check(Op0Ty == Op1Ty,
4248 "Both operands to FCmp instruction are not of the same type!", &FC);
4249 // Check that the operands are the right type
4250 Check(Op0Ty->isFPOrFPVectorTy(), "Invalid operand types for FCmp instruction",
4251 &FC);
4252 // Check that the predicate is valid.
4253 Check(FC.isFPPredicate(), "Invalid predicate in FCmp instruction!", &FC);
4254
4255 visitInstruction(FC);
4256}
4257
4258void Verifier::visitExtractElementInst(ExtractElementInst &EI) {
4260 "Invalid extractelement operands!", &EI);
4261 visitInstruction(EI);
4262}
4263
4264void Verifier::visitInsertElementInst(InsertElementInst &IE) {
4265 Check(InsertElementInst::isValidOperands(IE.getOperand(0), IE.getOperand(1),
4266 IE.getOperand(2)),
4267 "Invalid insertelement operands!", &IE);
4268 visitInstruction(IE);
4269}
4270
4271void Verifier::visitShuffleVectorInst(ShuffleVectorInst &SV) {
4273 SV.getShuffleMask()),
4274 "Invalid shufflevector operands!", &SV);
4275 visitInstruction(SV);
4276}
4277
4278void Verifier::visitGetElementPtrInst(GetElementPtrInst &GEP) {
4279 Type *TargetTy = GEP.getPointerOperandType()->getScalarType();
4280
4281 Check(isa<PointerType>(TargetTy),
4282 "GEP base pointer is not a vector or a vector of pointers", &GEP);
4283 Check(GEP.getSourceElementType()->isSized(), "GEP into unsized type!", &GEP);
4284
4285 if (auto *STy = dyn_cast<StructType>(GEP.getSourceElementType())) {
4286 Check(!STy->isScalableTy(),
4287 "getelementptr cannot target structure that contains scalable vector"
4288 "type",
4289 &GEP);
4290 }
4291
4292 SmallVector<Value *, 16> Idxs(GEP.indices());
4293 Check(
4294 all_of(Idxs, [](Value *V) { return V->getType()->isIntOrIntVectorTy(); }),
4295 "GEP indexes must be integers", &GEP);
4296 Type *ElTy =
4297 GetElementPtrInst::getIndexedType(GEP.getSourceElementType(), Idxs);
4298 Check(ElTy, "Invalid indices for GEP pointer type!", &GEP);
4299
4300 PointerType *PtrTy = dyn_cast<PointerType>(GEP.getType()->getScalarType());
4301
4302 Check(PtrTy && GEP.getResultElementType() == ElTy,
4303 "GEP is not of right type for indices!", &GEP, ElTy);
4304
4305 if (auto *GEPVTy = dyn_cast<VectorType>(GEP.getType())) {
4306 // Additional checks for vector GEPs.
4307 ElementCount GEPWidth = GEPVTy->getElementCount();
4308 if (GEP.getPointerOperandType()->isVectorTy())
4309 Check(
4310 GEPWidth ==
4311 cast<VectorType>(GEP.getPointerOperandType())->getElementCount(),
4312 "Vector GEP result width doesn't match operand's", &GEP);
4313 for (Value *Idx : Idxs) {
4314 Type *IndexTy = Idx->getType();
4315 if (auto *IndexVTy = dyn_cast<VectorType>(IndexTy)) {
4316 ElementCount IndexWidth = IndexVTy->getElementCount();
4317 Check(IndexWidth == GEPWidth, "Invalid GEP index vector width", &GEP);
4318 }
4319 Check(IndexTy->isIntOrIntVectorTy(),
4320 "All GEP indices should be of integer type");
4321 }
4322 }
4323
4324 Check(GEP.getAddressSpace() == PtrTy->getAddressSpace(),
4325 "GEP address space doesn't match type", &GEP);
4326
4327 visitInstruction(GEP);
4328}
4329
4330static bool isContiguous(const ConstantRange &A, const ConstantRange &B) {
4331 return A.getUpper() == B.getLower() || A.getLower() == B.getUpper();
4332}
4333
4334/// Verify !range and !absolute_symbol metadata. These have the same
4335/// restrictions, except !absolute_symbol allows the full set.
4336void Verifier::verifyRangeLikeMetadata(const Value &I, const MDNode *Range,
4337 Type *Ty, RangeLikeMetadataKind Kind) {
4338 unsigned NumOperands = Range->getNumOperands();
4339 Check(NumOperands % 2 == 0, "Unfinished range!", Range);
4340 unsigned NumRanges = NumOperands / 2;
4341 Check(NumRanges >= 1, "It should have at least one range!", Range);
4342
4343 ConstantRange LastRange(1, true); // Dummy initial value
4344 for (unsigned i = 0; i < NumRanges; ++i) {
4345 ConstantInt *Low =
4346 mdconst::dyn_extract<ConstantInt>(Range->getOperand(2 * i));
4347 Check(Low, "The lower limit must be an integer!", Low);
4348 ConstantInt *High =
4349 mdconst::dyn_extract<ConstantInt>(Range->getOperand(2 * i + 1));
4350 Check(High, "The upper limit must be an integer!", High);
4351
4352 Check(High->getType() == Low->getType(), "Range pair types must match!",
4353 &I);
4354
4355 if (Kind == RangeLikeMetadataKind::NoaliasAddrspace) {
4356 Check(High->getType()->isIntegerTy(32),
4357 "noalias.addrspace type must be i32!", &I);
4358 } else {
4359 Check(High->getType() == Ty->getScalarType(),
4360 "Range types must match instruction type!", &I);
4361 }
4362
4363 APInt HighV = High->getValue();
4364 APInt LowV = Low->getValue();
4365
4366 // ConstantRange asserts if the ranges are the same except for the min/max
4367 // value. Leave the cases it tolerates for the empty range error below.
4368 Check(LowV != HighV || LowV.isMaxValue() || LowV.isMinValue(),
4369 "The upper and lower limits cannot be the same value", &I);
4370
4371 ConstantRange CurRange(LowV, HighV);
4372 Check(!CurRange.isEmptySet() &&
4373 (Kind == RangeLikeMetadataKind::AbsoluteSymbol ||
4374 !CurRange.isFullSet()),
4375 "Range must not be empty!", Range);
4376 if (i != 0) {
4377 Check(CurRange.intersectWith(LastRange).isEmptySet(),
4378 "Intervals are overlapping", Range);
4379 Check(LowV.sgt(LastRange.getLower()), "Intervals are not in order",
4380 Range);
4381 Check(!isContiguous(CurRange, LastRange), "Intervals are contiguous",
4382 Range);
4383 }
4384 LastRange = ConstantRange(LowV, HighV);
4385 }
4386 if (NumRanges > 2) {
4387 APInt FirstLow =
4388 mdconst::dyn_extract<ConstantInt>(Range->getOperand(0))->getValue();
4389 APInt FirstHigh =
4390 mdconst::dyn_extract<ConstantInt>(Range->getOperand(1))->getValue();
4391 ConstantRange FirstRange(FirstLow, FirstHigh);
4392 Check(FirstRange.intersectWith(LastRange).isEmptySet(),
4393 "Intervals are overlapping", Range);
4394 Check(!isContiguous(FirstRange, LastRange), "Intervals are contiguous",
4395 Range);
4396 }
4397}
4398
4399void Verifier::visitRangeMetadata(Instruction &I, MDNode *Range, Type *Ty) {
4400 assert(Range && Range == I.getMetadata(LLVMContext::MD_range) &&
4401 "precondition violation");
4402 verifyRangeLikeMetadata(I, Range, Ty, RangeLikeMetadataKind::Range);
4403}
4404
4405void Verifier::visitNoaliasAddrspaceMetadata(Instruction &I, MDNode *Range,
4406 Type *Ty) {
4407 assert(Range && Range == I.getMetadata(LLVMContext::MD_noalias_addrspace) &&
4408 "precondition violation");
4409 verifyRangeLikeMetadata(I, Range, Ty,
4410 RangeLikeMetadataKind::NoaliasAddrspace);
4411}
4412
4413void Verifier::checkAtomicMemAccessSize(Type *Ty, const Instruction *I) {
4414 unsigned Size = DL.getTypeSizeInBits(Ty);
4415 Check(Size >= 8, "atomic memory access' size must be byte-sized", Ty, I);
4416 Check(!(Size & (Size - 1)),
4417 "atomic memory access' operand must have a power-of-two size", Ty, I);
4418}
4419
4420void Verifier::visitLoadInst(LoadInst &LI) {
4422 Check(PTy, "Load operand must be a pointer.", &LI);
4423 Type *ElTy = LI.getType();
4424 if (MaybeAlign A = LI.getAlign()) {
4425 Check(A->value() <= Value::MaximumAlignment,
4426 "huge alignment values are unsupported", &LI);
4427 }
4428 Check(ElTy->isSized(), "loading unsized types is not allowed", &LI);
4429 if (LI.isAtomic()) {
4430 Check(LI.getOrdering() != AtomicOrdering::Release &&
4431 LI.getOrdering() != AtomicOrdering::AcquireRelease,
4432 "Load cannot have Release ordering", &LI);
4433 Check(ElTy->isIntOrPtrTy() || ElTy->isFloatingPointTy(),
4434 "atomic load operand must have integer, pointer, or floating point "
4435 "type!",
4436 ElTy, &LI);
4437 checkAtomicMemAccessSize(ElTy, &LI);
4438 } else {
4440 "Non-atomic load cannot have SynchronizationScope specified", &LI);
4441 }
4442
4443 visitInstruction(LI);
4444}
4445
4446void Verifier::visitStoreInst(StoreInst &SI) {
4447 PointerType *PTy = dyn_cast<PointerType>(SI.getOperand(1)->getType());
4448 Check(PTy, "Store operand must be a pointer.", &SI);
4449 Type *ElTy = SI.getOperand(0)->getType();
4450 if (MaybeAlign A = SI.getAlign()) {
4451 Check(A->value() <= Value::MaximumAlignment,
4452 "huge alignment values are unsupported", &SI);
4453 }
4454 Check(ElTy->isSized(), "storing unsized types is not allowed", &SI);
4455 if (SI.isAtomic()) {
4456 Check(SI.getOrdering() != AtomicOrdering::Acquire &&
4457 SI.getOrdering() != AtomicOrdering::AcquireRelease,
4458 "Store cannot have Acquire ordering", &SI);
4459 Check(ElTy->isIntOrPtrTy() || ElTy->isFloatingPointTy(),
4460 "atomic store operand must have integer, pointer, or floating point "
4461 "type!",
4462 ElTy, &SI);
4463 checkAtomicMemAccessSize(ElTy, &SI);
4464 } else {
4465 Check(SI.getSyncScopeID() == SyncScope::System,
4466 "Non-atomic store cannot have SynchronizationScope specified", &SI);
4467 }
4468 visitInstruction(SI);
4469}
4470
4471/// Check that SwiftErrorVal is used as a swifterror argument in CS.
4472void Verifier::verifySwiftErrorCall(CallBase &Call,
4473 const Value *SwiftErrorVal) {
4474 for (const auto &I : llvm::enumerate(Call.args())) {
4475 if (I.value() == SwiftErrorVal) {
4476 Check(Call.paramHasAttr(I.index(), Attribute::SwiftError),
4477 "swifterror value when used in a callsite should be marked "
4478 "with swifterror attribute",
4479 SwiftErrorVal, Call);
4480 }
4481 }
4482}
4483
4484void Verifier::verifySwiftErrorValue(const Value *SwiftErrorVal) {
4485 // Check that swifterror value is only used by loads, stores, or as
4486 // a swifterror argument.
4487 for (const User *U : SwiftErrorVal->users()) {
4489 isa<InvokeInst>(U),
4490 "swifterror value can only be loaded and stored from, or "
4491 "as a swifterror argument!",
4492 SwiftErrorVal, U);
4493 // If it is used by a store, check it is the second operand.
4494 if (auto StoreI = dyn_cast<StoreInst>(U))
4495 Check(StoreI->getOperand(1) == SwiftErrorVal,
4496 "swifterror value should be the second operand when used "
4497 "by stores",
4498 SwiftErrorVal, U);
4499 if (auto *Call = dyn_cast<CallBase>(U))
4500 verifySwiftErrorCall(*const_cast<CallBase *>(Call), SwiftErrorVal);
4501 }
4502}
4503
4504void Verifier::visitAllocaInst(AllocaInst &AI) {
4505 Type *Ty = AI.getAllocatedType();
4506 SmallPtrSet<Type*, 4> Visited;
4507 Check(Ty->isSized(&Visited), "Cannot allocate unsized type", &AI);
4508 // Check if it's a target extension type that disallows being used on the
4509 // stack.
4511 "Alloca has illegal target extension type", &AI);
4513 "Alloca array size must have integer type", &AI);
4514 if (MaybeAlign A = AI.getAlign()) {
4515 Check(A->value() <= Value::MaximumAlignment,
4516 "huge alignment values are unsupported", &AI);
4517 }
4518
4519 if (AI.isSwiftError()) {
4520 Check(Ty->isPointerTy(), "swifterror alloca must have pointer type", &AI);
4522 "swifterror alloca must not be array allocation", &AI);
4523 verifySwiftErrorValue(&AI);
4524 }
4525
4526 if (TT.isAMDGPU()) {
4528 "alloca on amdgpu must be in addrspace(5)", &AI);
4529 }
4530
4531 visitInstruction(AI);
4532}
4533
4534void Verifier::visitAtomicCmpXchgInst(AtomicCmpXchgInst &CXI) {
4535 Type *ElTy = CXI.getOperand(1)->getType();
4536 Check(ElTy->isIntOrPtrTy(),
4537 "cmpxchg operand must have integer or pointer type", ElTy, &CXI);
4538 checkAtomicMemAccessSize(ElTy, &CXI);
4539 visitInstruction(CXI);
4540}
4541
4542void Verifier::visitAtomicRMWInst(AtomicRMWInst &RMWI) {
4543 Check(RMWI.getOrdering() != AtomicOrdering::Unordered,
4544 "atomicrmw instructions cannot be unordered.", &RMWI);
4545 auto Op = RMWI.getOperation();
4546 Type *ElTy = RMWI.getOperand(1)->getType();
4547 if (Op == AtomicRMWInst::Xchg) {
4548 Check(ElTy->isIntegerTy() || ElTy->isFloatingPointTy() ||
4549 ElTy->isPointerTy(),
4550 "atomicrmw " + AtomicRMWInst::getOperationName(Op) +
4551 " operand must have integer or floating point type!",
4552 &RMWI, ElTy);
4553 } else if (AtomicRMWInst::isFPOperation(Op)) {
4555 "atomicrmw " + AtomicRMWInst::getOperationName(Op) +
4556 " operand must have floating-point or fixed vector of floating-point "
4557 "type!",
4558 &RMWI, ElTy);
4559 } else {
4560 Check(ElTy->isIntegerTy(),
4561 "atomicrmw " + AtomicRMWInst::getOperationName(Op) +
4562 " operand must have integer type!",
4563 &RMWI, ElTy);
4564 }
4565 checkAtomicMemAccessSize(ElTy, &RMWI);
4567 "Invalid binary operation!", &RMWI);
4568 visitInstruction(RMWI);
4569}
4570
4571void Verifier::visitFenceInst(FenceInst &FI) {
4572 const AtomicOrdering Ordering = FI.getOrdering();
4573 Check(Ordering == AtomicOrdering::Acquire ||
4574 Ordering == AtomicOrdering::Release ||
4575 Ordering == AtomicOrdering::AcquireRelease ||
4576 Ordering == AtomicOrdering::SequentiallyConsistent,
4577 "fence instructions may only have acquire, release, acq_rel, or "
4578 "seq_cst ordering.",
4579 &FI);
4580 visitInstruction(FI);
4581}
4582
4583void Verifier::visitExtractValueInst(ExtractValueInst &EVI) {
4585 EVI.getIndices()) == EVI.getType(),
4586 "Invalid ExtractValueInst operands!", &EVI);
4587
4588 visitInstruction(EVI);
4589}
4590
4591void Verifier::visitInsertValueInst(InsertValueInst &IVI) {
4593 IVI.getIndices()) ==
4594 IVI.getOperand(1)->getType(),
4595 "Invalid InsertValueInst operands!", &IVI);
4596
4597 visitInstruction(IVI);
4598}
4599
4600static Value *getParentPad(Value *EHPad) {
4601 if (auto *FPI = dyn_cast<FuncletPadInst>(EHPad))
4602 return FPI->getParentPad();
4603
4604 return cast<CatchSwitchInst>(EHPad)->getParentPad();
4605}
4606
4607void Verifier::visitEHPadPredecessors(Instruction &I) {
4608 assert(I.isEHPad());
4609
4610 BasicBlock *BB = I.getParent();
4611 Function *F = BB->getParent();
4612
4613 Check(BB != &F->getEntryBlock(), "EH pad cannot be in entry block.", &I);
4614
4615 if (auto *LPI = dyn_cast<LandingPadInst>(&I)) {
4616 // The landingpad instruction defines its parent as a landing pad block. The
4617 // landing pad block may be branched to only by the unwind edge of an
4618 // invoke.
4619 for (BasicBlock *PredBB : predecessors(BB)) {
4620 const auto *II = dyn_cast<InvokeInst>(PredBB->getTerminator());
4621 Check(II && II->getUnwindDest() == BB && II->getNormalDest() != BB,
4622 "Block containing LandingPadInst must be jumped to "
4623 "only by the unwind edge of an invoke.",
4624 LPI);
4625 }
4626 return;
4627 }
4628 if (auto *CPI = dyn_cast<CatchPadInst>(&I)) {
4629 if (!pred_empty(BB))
4630 Check(BB->getUniquePredecessor() == CPI->getCatchSwitch()->getParent(),
4631 "Block containg CatchPadInst must be jumped to "
4632 "only by its catchswitch.",
4633 CPI);
4634 Check(BB != CPI->getCatchSwitch()->getUnwindDest(),
4635 "Catchswitch cannot unwind to one of its catchpads",
4636 CPI->getCatchSwitch(), CPI);
4637 return;
4638 }
4639
4640 // Verify that each pred has a legal terminator with a legal to/from EH
4641 // pad relationship.
4642 Instruction *ToPad = &I;
4643 Value *ToPadParent = getParentPad(ToPad);
4644 for (BasicBlock *PredBB : predecessors(BB)) {
4645 Instruction *TI = PredBB->getTerminator();
4646 Value *FromPad;
4647 if (auto *II = dyn_cast<InvokeInst>(TI)) {
4648 Check(II->getUnwindDest() == BB && II->getNormalDest() != BB,
4649 "EH pad must be jumped to via an unwind edge", ToPad, II);
4650 auto *CalledFn =
4651 dyn_cast<Function>(II->getCalledOperand()->stripPointerCasts());
4652 if (CalledFn && CalledFn->isIntrinsic() && II->doesNotThrow() &&
4653 !IntrinsicInst::mayLowerToFunctionCall(CalledFn->getIntrinsicID()))
4654 continue;
4655 if (auto Bundle = II->getOperandBundle(LLVMContext::OB_funclet))
4656 FromPad = Bundle->Inputs[0];
4657 else
4658 FromPad = ConstantTokenNone::get(II->getContext());
4659 } else if (auto *CRI = dyn_cast<CleanupReturnInst>(TI)) {
4660 FromPad = CRI->getOperand(0);
4661 Check(FromPad != ToPadParent, "A cleanupret must exit its cleanup", CRI);
4662 } else if (auto *CSI = dyn_cast<CatchSwitchInst>(TI)) {
4663 FromPad = CSI;
4664 } else {
4665 Check(false, "EH pad must be jumped to via an unwind edge", ToPad, TI);
4666 }
4667
4668 // The edge may exit from zero or more nested pads.
4669 SmallPtrSet<Value *, 8> Seen;
4670 for (;; FromPad = getParentPad(FromPad)) {
4671 Check(FromPad != ToPad,
4672 "EH pad cannot handle exceptions raised within it", FromPad, TI);
4673 if (FromPad == ToPadParent) {
4674 // This is a legal unwind edge.
4675 break;
4676 }
4677 Check(!isa<ConstantTokenNone>(FromPad),
4678 "A single unwind edge may only enter one EH pad", TI);
4679 Check(Seen.insert(FromPad).second, "EH pad jumps through a cycle of pads",
4680 FromPad);
4681
4682 // This will be diagnosed on the corresponding instruction already. We
4683 // need the extra check here to make sure getParentPad() works.
4684 Check(isa<FuncletPadInst>(FromPad) || isa<CatchSwitchInst>(FromPad),
4685 "Parent pad must be catchpad/cleanuppad/catchswitch", TI);
4686 }
4687 }
4688}
4689
4690void Verifier::visitLandingPadInst(LandingPadInst &LPI) {
4691 // The landingpad instruction is ill-formed if it doesn't have any clauses and
4692 // isn't a cleanup.
4693 Check(LPI.getNumClauses() > 0 || LPI.isCleanup(),
4694 "LandingPadInst needs at least one clause or to be a cleanup.", &LPI);
4695
4696 visitEHPadPredecessors(LPI);
4697
4698 if (!LandingPadResultTy)
4699 LandingPadResultTy = LPI.getType();
4700 else
4701 Check(LandingPadResultTy == LPI.getType(),
4702 "The landingpad instruction should have a consistent result type "
4703 "inside a function.",
4704 &LPI);
4705
4706 Function *F = LPI.getParent()->getParent();
4707 Check(F->hasPersonalityFn(),
4708 "LandingPadInst needs to be in a function with a personality.", &LPI);
4709
4710 // The landingpad instruction must be the first non-PHI instruction in the
4711 // block.
4712 Check(LPI.getParent()->getLandingPadInst() == &LPI,
4713 "LandingPadInst not the first non-PHI instruction in the block.", &LPI);
4714
4715 for (unsigned i = 0, e = LPI.getNumClauses(); i < e; ++i) {
4716 Constant *Clause = LPI.getClause(i);
4717 if (LPI.isCatch(i)) {
4718 Check(isa<PointerType>(Clause->getType()),
4719 "Catch operand does not have pointer type!", &LPI);
4720 } else {
4721 Check(LPI.isFilter(i), "Clause is neither catch nor filter!", &LPI);
4723 "Filter operand is not an array of constants!", &LPI);
4724 }
4725 }
4726
4727 visitInstruction(LPI);
4728}
4729
4730void Verifier::visitResumeInst(ResumeInst &RI) {
4732 "ResumeInst needs to be in a function with a personality.", &RI);
4733
4734 if (!LandingPadResultTy)
4735 LandingPadResultTy = RI.getValue()->getType();
4736 else
4737 Check(LandingPadResultTy == RI.getValue()->getType(),
4738 "The resume instruction should have a consistent result type "
4739 "inside a function.",
4740 &RI);
4741
4742 visitTerminator(RI);
4743}
4744
4745void Verifier::visitCatchPadInst(CatchPadInst &CPI) {
4746 BasicBlock *BB = CPI.getParent();
4747
4748 Function *F = BB->getParent();
4749 Check(F->hasPersonalityFn(),
4750 "CatchPadInst needs to be in a function with a personality.", &CPI);
4751
4753 "CatchPadInst needs to be directly nested in a CatchSwitchInst.",
4754 CPI.getParentPad());
4755
4756 // The catchpad instruction must be the first non-PHI instruction in the
4757 // block.
4758 Check(&*BB->getFirstNonPHIIt() == &CPI,
4759 "CatchPadInst not the first non-PHI instruction in the block.", &CPI);
4760
4761 visitEHPadPredecessors(CPI);
4762 visitFuncletPadInst(CPI);
4763}
4764
4765void Verifier::visitCatchReturnInst(CatchReturnInst &CatchReturn) {
4766 Check(isa<CatchPadInst>(CatchReturn.getOperand(0)),
4767 "CatchReturnInst needs to be provided a CatchPad", &CatchReturn,
4768 CatchReturn.getOperand(0));
4769
4770 visitTerminator(CatchReturn);
4771}
4772
4773void Verifier::visitCleanupPadInst(CleanupPadInst &CPI) {
4774 BasicBlock *BB = CPI.getParent();
4775
4776 Function *F = BB->getParent();
4777 Check(F->hasPersonalityFn(),
4778 "CleanupPadInst needs to be in a function with a personality.", &CPI);
4779
4780 // The cleanuppad instruction must be the first non-PHI instruction in the
4781 // block.
4782 Check(&*BB->getFirstNonPHIIt() == &CPI,
4783 "CleanupPadInst not the first non-PHI instruction in the block.", &CPI);
4784
4785 auto *ParentPad = CPI.getParentPad();
4786 Check(isa<ConstantTokenNone>(ParentPad) || isa<FuncletPadInst>(ParentPad),
4787 "CleanupPadInst has an invalid parent.", &CPI);
4788
4789 visitEHPadPredecessors(CPI);
4790 visitFuncletPadInst(CPI);
4791}
4792
4793void Verifier::visitFuncletPadInst(FuncletPadInst &FPI) {
4794 User *FirstUser = nullptr;
4795 Value *FirstUnwindPad = nullptr;
4796 SmallVector<FuncletPadInst *, 8> Worklist({&FPI});
4797 SmallPtrSet<FuncletPadInst *, 8> Seen;
4798
4799 while (!Worklist.empty()) {
4800 FuncletPadInst *CurrentPad = Worklist.pop_back_val();
4801 Check(Seen.insert(CurrentPad).second,
4802 "FuncletPadInst must not be nested within itself", CurrentPad);
4803 Value *UnresolvedAncestorPad = nullptr;
4804 for (User *U : CurrentPad->users()) {
4805 BasicBlock *UnwindDest;
4806 if (auto *CRI = dyn_cast<CleanupReturnInst>(U)) {
4807 UnwindDest = CRI->getUnwindDest();
4808 } else if (auto *CSI = dyn_cast<CatchSwitchInst>(U)) {
4809 // We allow catchswitch unwind to caller to nest
4810 // within an outer pad that unwinds somewhere else,
4811 // because catchswitch doesn't have a nounwind variant.
4812 // See e.g. SimplifyCFGOpt::SimplifyUnreachable.
4813 if (CSI->unwindsToCaller())
4814 continue;
4815 UnwindDest = CSI->getUnwindDest();
4816 } else if (auto *II = dyn_cast<InvokeInst>(U)) {
4817 UnwindDest = II->getUnwindDest();
4818 } else if (isa<CallInst>(U)) {
4819 // Calls which don't unwind may be found inside funclet
4820 // pads that unwind somewhere else. We don't *require*
4821 // such calls to be annotated nounwind.
4822 continue;
4823 } else if (auto *CPI = dyn_cast<CleanupPadInst>(U)) {
4824 // The unwind dest for a cleanup can only be found by
4825 // recursive search. Add it to the worklist, and we'll
4826 // search for its first use that determines where it unwinds.
4827 Worklist.push_back(CPI);
4828 continue;
4829 } else {
4830 Check(isa<CatchReturnInst>(U), "Bogus funclet pad use", U);
4831 continue;
4832 }
4833
4834 Value *UnwindPad;
4835 bool ExitsFPI;
4836 if (UnwindDest) {
4837 UnwindPad = &*UnwindDest->getFirstNonPHIIt();
4838 if (!cast<Instruction>(UnwindPad)->isEHPad())
4839 continue;
4840 Value *UnwindParent = getParentPad(UnwindPad);
4841 // Ignore unwind edges that don't exit CurrentPad.
4842 if (UnwindParent == CurrentPad)
4843 continue;
4844 // Determine whether the original funclet pad is exited,
4845 // and if we are scanning nested pads determine how many
4846 // of them are exited so we can stop searching their
4847 // children.
4848 Value *ExitedPad = CurrentPad;
4849 ExitsFPI = false;
4850 do {
4851 if (ExitedPad == &FPI) {
4852 ExitsFPI = true;
4853 // Now we can resolve any ancestors of CurrentPad up to
4854 // FPI, but not including FPI since we need to make sure
4855 // to check all direct users of FPI for consistency.
4856 UnresolvedAncestorPad = &FPI;
4857 break;
4858 }
4859 Value *ExitedParent = getParentPad(ExitedPad);
4860 if (ExitedParent == UnwindParent) {
4861 // ExitedPad is the ancestor-most pad which this unwind
4862 // edge exits, so we can resolve up to it, meaning that
4863 // ExitedParent is the first ancestor still unresolved.
4864 UnresolvedAncestorPad = ExitedParent;
4865 break;
4866 }
4867 ExitedPad = ExitedParent;
4868 } while (!isa<ConstantTokenNone>(ExitedPad));
4869 } else {
4870 // Unwinding to caller exits all pads.
4871 UnwindPad = ConstantTokenNone::get(FPI.getContext());
4872 ExitsFPI = true;
4873 UnresolvedAncestorPad = &FPI;
4874 }
4875
4876 if (ExitsFPI) {
4877 // This unwind edge exits FPI. Make sure it agrees with other
4878 // such edges.
4879 if (FirstUser) {
4880 Check(UnwindPad == FirstUnwindPad,
4881 "Unwind edges out of a funclet "
4882 "pad must have the same unwind "
4883 "dest",
4884 &FPI, U, FirstUser);
4885 } else {
4886 FirstUser = U;
4887 FirstUnwindPad = UnwindPad;
4888 // Record cleanup sibling unwinds for verifySiblingFuncletUnwinds
4889 if (isa<CleanupPadInst>(&FPI) && !isa<ConstantTokenNone>(UnwindPad) &&
4890 getParentPad(UnwindPad) == getParentPad(&FPI))
4891 SiblingFuncletInfo[&FPI] = cast<Instruction>(U);
4892 }
4893 }
4894 // Make sure we visit all uses of FPI, but for nested pads stop as
4895 // soon as we know where they unwind to.
4896 if (CurrentPad != &FPI)
4897 break;
4898 }
4899 if (UnresolvedAncestorPad) {
4900 if (CurrentPad == UnresolvedAncestorPad) {
4901 // When CurrentPad is FPI itself, we don't mark it as resolved even if
4902 // we've found an unwind edge that exits it, because we need to verify
4903 // all direct uses of FPI.
4904 assert(CurrentPad == &FPI);
4905 continue;
4906 }
4907 // Pop off the worklist any nested pads that we've found an unwind
4908 // destination for. The pads on the worklist are the uncles,
4909 // great-uncles, etc. of CurrentPad. We've found an unwind destination
4910 // for all ancestors of CurrentPad up to but not including
4911 // UnresolvedAncestorPad.
4912 Value *ResolvedPad = CurrentPad;
4913 while (!Worklist.empty()) {
4914 Value *UnclePad = Worklist.back();
4915 Value *AncestorPad = getParentPad(UnclePad);
4916 // Walk ResolvedPad up the ancestor list until we either find the
4917 // uncle's parent or the last resolved ancestor.
4918 while (ResolvedPad != AncestorPad) {
4919 Value *ResolvedParent = getParentPad(ResolvedPad);
4920 if (ResolvedParent == UnresolvedAncestorPad) {
4921 break;
4922 }
4923 ResolvedPad = ResolvedParent;
4924 }
4925 // If the resolved ancestor search didn't find the uncle's parent,
4926 // then the uncle is not yet resolved.
4927 if (ResolvedPad != AncestorPad)
4928 break;
4929 // This uncle is resolved, so pop it from the worklist.
4930 Worklist.pop_back();
4931 }
4932 }
4933 }
4934
4935 if (FirstUnwindPad) {
4936 if (auto *CatchSwitch = dyn_cast<CatchSwitchInst>(FPI.getParentPad())) {
4937 BasicBlock *SwitchUnwindDest = CatchSwitch->getUnwindDest();
4938 Value *SwitchUnwindPad;
4939 if (SwitchUnwindDest)
4940 SwitchUnwindPad = &*SwitchUnwindDest->getFirstNonPHIIt();
4941 else
4942 SwitchUnwindPad = ConstantTokenNone::get(FPI.getContext());
4943 Check(SwitchUnwindPad == FirstUnwindPad,
4944 "Unwind edges out of a catch must have the same unwind dest as "
4945 "the parent catchswitch",
4946 &FPI, FirstUser, CatchSwitch);
4947 }
4948 }
4949
4950 visitInstruction(FPI);
4951}
4952
4953void Verifier::visitCatchSwitchInst(CatchSwitchInst &CatchSwitch) {
4954 BasicBlock *BB = CatchSwitch.getParent();
4955
4956 Function *F = BB->getParent();
4957 Check(F->hasPersonalityFn(),
4958 "CatchSwitchInst needs to be in a function with a personality.",
4959 &CatchSwitch);
4960
4961 // The catchswitch instruction must be the first non-PHI instruction in the
4962 // block.
4963 Check(&*BB->getFirstNonPHIIt() == &CatchSwitch,
4964 "CatchSwitchInst not the first non-PHI instruction in the block.",
4965 &CatchSwitch);
4966
4967 auto *ParentPad = CatchSwitch.getParentPad();
4968 Check(isa<ConstantTokenNone>(ParentPad) || isa<FuncletPadInst>(ParentPad),
4969 "CatchSwitchInst has an invalid parent.", ParentPad);
4970
4971 if (BasicBlock *UnwindDest = CatchSwitch.getUnwindDest()) {
4972 BasicBlock::iterator I = UnwindDest->getFirstNonPHIIt();
4973 Check(I->isEHPad() && !isa<LandingPadInst>(I),
4974 "CatchSwitchInst must unwind to an EH block which is not a "
4975 "landingpad.",
4976 &CatchSwitch);
4977
4978 // Record catchswitch sibling unwinds for verifySiblingFuncletUnwinds
4979 if (getParentPad(&*I) == ParentPad)
4980 SiblingFuncletInfo[&CatchSwitch] = &CatchSwitch;
4981 }
4982
4983 Check(CatchSwitch.getNumHandlers() != 0,
4984 "CatchSwitchInst cannot have empty handler list", &CatchSwitch);
4985
4986 for (BasicBlock *Handler : CatchSwitch.handlers()) {
4987 Check(isa<CatchPadInst>(Handler->getFirstNonPHIIt()),
4988 "CatchSwitchInst handlers must be catchpads", &CatchSwitch, Handler);
4989 }
4990
4991 visitEHPadPredecessors(CatchSwitch);
4992 visitTerminator(CatchSwitch);
4993}
4994
4995void Verifier::visitCleanupReturnInst(CleanupReturnInst &CRI) {
4997 "CleanupReturnInst needs to be provided a CleanupPad", &CRI,
4998 CRI.getOperand(0));
4999
5000 if (BasicBlock *UnwindDest = CRI.getUnwindDest()) {
5001 BasicBlock::iterator I = UnwindDest->getFirstNonPHIIt();
5002 Check(I->isEHPad() && !isa<LandingPadInst>(I),
5003 "CleanupReturnInst must unwind to an EH block which is not a "
5004 "landingpad.",
5005 &CRI);
5006 }
5007
5008 visitTerminator(CRI);
5009}
5010
5011void Verifier::verifyDominatesUse(Instruction &I, unsigned i) {
5012 Instruction *Op = cast<Instruction>(I.getOperand(i));
5013 // If the we have an invalid invoke, don't try to compute the dominance.
5014 // We already reject it in the invoke specific checks and the dominance
5015 // computation doesn't handle multiple edges.
5016 if (InvokeInst *II = dyn_cast<InvokeInst>(Op)) {
5017 if (II->getNormalDest() == II->getUnwindDest())
5018 return;
5019 }
5020
5021 // Quick check whether the def has already been encountered in the same block.
5022 // PHI nodes are not checked to prevent accepting preceding PHIs, because PHI
5023 // uses are defined to happen on the incoming edge, not at the instruction.
5024 //
5025 // FIXME: If this operand is a MetadataAsValue (wrapping a LocalAsMetadata)
5026 // wrapping an SSA value, assert that we've already encountered it. See
5027 // related FIXME in Mapper::mapLocalAsMetadata in ValueMapper.cpp.
5028 if (!isa<PHINode>(I) && InstsInThisBlock.count(Op))
5029 return;
5030
5031 const Use &U = I.getOperandUse(i);
5032 Check(DT.dominates(Op, U), "Instruction does not dominate all uses!", Op, &I);
5033}
5034
5035void Verifier::visitDereferenceableMetadata(Instruction& I, MDNode* MD) {
5036 Check(I.getType()->isPointerTy(),
5037 "dereferenceable, dereferenceable_or_null "
5038 "apply only to pointer types",
5039 &I);
5041 "dereferenceable, dereferenceable_or_null apply only to load"
5042 " and inttoptr instructions, use attributes for calls or invokes",
5043 &I);
5044 Check(MD->getNumOperands() == 1,
5045 "dereferenceable, dereferenceable_or_null "
5046 "take one operand!",
5047 &I);
5048 ConstantInt *CI = mdconst::dyn_extract<ConstantInt>(MD->getOperand(0));
5049 Check(CI && CI->getType()->isIntegerTy(64),
5050 "dereferenceable, "
5051 "dereferenceable_or_null metadata value must be an i64!",
5052 &I);
5053}
5054
5055void Verifier::visitNofreeMetadata(Instruction &I, MDNode *MD) {
5056 Check(I.getType()->isPointerTy(), "nofree applies only to pointer types", &I);
5057 Check((isa<IntToPtrInst>(I)), "nofree applies only to inttoptr instruction",
5058 &I);
5059 Check(MD->getNumOperands() == 0, "nofree metadata must be empty", &I);
5060}
5061
5062void Verifier::visitProfMetadata(Instruction &I, MDNode *MD) {
5063 auto GetBranchingTerminatorNumOperands = [&]() {
5064 unsigned ExpectedNumOperands = 0;
5065 if (BranchInst *BI = dyn_cast<BranchInst>(&I))
5066 ExpectedNumOperands = BI->getNumSuccessors();
5067 else if (SwitchInst *SI = dyn_cast<SwitchInst>(&I))
5068 ExpectedNumOperands = SI->getNumSuccessors();
5069 else if (isa<CallInst>(&I))
5070 ExpectedNumOperands = 1;
5071 else if (IndirectBrInst *IBI = dyn_cast<IndirectBrInst>(&I))
5072 ExpectedNumOperands = IBI->getNumDestinations();
5073 else if (isa<SelectInst>(&I))
5074 ExpectedNumOperands = 2;
5075 else if (CallBrInst *CI = dyn_cast<CallBrInst>(&I))
5076 ExpectedNumOperands = CI->getNumSuccessors();
5077 return ExpectedNumOperands;
5078 };
5079 Check(MD->getNumOperands() >= 1,
5080 "!prof annotations should have at least 1 operand", MD);
5081 // Check first operand.
5082 Check(MD->getOperand(0) != nullptr, "first operand should not be null", MD);
5084 "expected string with name of the !prof annotation", MD);
5085 MDString *MDS = cast<MDString>(MD->getOperand(0));
5086 StringRef ProfName = MDS->getString();
5087
5089 Check(GetBranchingTerminatorNumOperands() != 0 || isa<InvokeInst>(I),
5090 "'unknown' !prof should only appear on instructions on which "
5091 "'branch_weights' would",
5092 MD);
5093 verifyUnknownProfileMetadata(MD);
5094 return;
5095 }
5096
5097 Check(MD->getNumOperands() >= 2,
5098 "!prof annotations should have no less than 2 operands", MD);
5099
5100 // Check consistency of !prof branch_weights metadata.
5101 if (ProfName == MDProfLabels::BranchWeights) {
5102 unsigned NumBranchWeights = getNumBranchWeights(*MD);
5103 if (isa<InvokeInst>(&I)) {
5104 Check(NumBranchWeights == 1 || NumBranchWeights == 2,
5105 "Wrong number of InvokeInst branch_weights operands", MD);
5106 } else {
5107 const unsigned ExpectedNumOperands = GetBranchingTerminatorNumOperands();
5108 if (ExpectedNumOperands == 0)
5109 CheckFailed("!prof branch_weights are not allowed for this instruction",
5110 MD);
5111
5112 Check(NumBranchWeights == ExpectedNumOperands, "Wrong number of operands",
5113 MD);
5114 }
5115 for (unsigned i = getBranchWeightOffset(MD); i < MD->getNumOperands();
5116 ++i) {
5117 auto &MDO = MD->getOperand(i);
5118 Check(MDO, "second operand should not be null", MD);
5120 "!prof brunch_weights operand is not a const int");
5121 }
5122 } else if (ProfName == MDProfLabels::ValueProfile) {
5123 Check(isValueProfileMD(MD), "invalid value profiling metadata", MD);
5124 ConstantInt *KindInt = mdconst::dyn_extract<ConstantInt>(MD->getOperand(1));
5125 Check(KindInt, "VP !prof missing kind argument", MD);
5126
5127 auto Kind = KindInt->getZExtValue();
5128 Check(Kind >= InstrProfValueKind::IPVK_First &&
5129 Kind <= InstrProfValueKind::IPVK_Last,
5130 "Invalid VP !prof kind", MD);
5131 Check(MD->getNumOperands() % 2 == 1,
5132 "VP !prof should have an even number "
5133 "of arguments after 'VP'",
5134 MD);
5135 if (Kind == InstrProfValueKind::IPVK_IndirectCallTarget ||
5136 Kind == InstrProfValueKind::IPVK_MemOPSize)
5138 "VP !prof indirect call or memop size expected to be applied to "
5139 "CallBase instructions only",
5140 MD);
5141 } else {
5142 CheckFailed("expected either branch_weights or VP profile name", MD);
5143 }
5144}
5145
5146void Verifier::visitDIAssignIDMetadata(Instruction &I, MDNode *MD) {
5147 assert(I.hasMetadata(LLVMContext::MD_DIAssignID));
5148 // DIAssignID metadata must be attached to either an alloca or some form of
5149 // store/memory-writing instruction.
5150 // FIXME: We allow all intrinsic insts here to avoid trying to enumerate all
5151 // possible store intrinsics.
5152 bool ExpectedInstTy =
5154 CheckDI(ExpectedInstTy, "!DIAssignID attached to unexpected instruction kind",
5155 I, MD);
5156 // Iterate over the MetadataAsValue uses of the DIAssignID - these should
5157 // only be found as DbgAssignIntrinsic operands.
5158 if (auto *AsValue = MetadataAsValue::getIfExists(Context, MD)) {
5159 for (auto *User : AsValue->users()) {
5161 "!DIAssignID should only be used by llvm.dbg.assign intrinsics",
5162 MD, User);
5163 // All of the dbg.assign intrinsics should be in the same function as I.
5164 if (auto *DAI = dyn_cast<DbgAssignIntrinsic>(User))
5165 CheckDI(DAI->getFunction() == I.getFunction(),
5166 "dbg.assign not in same function as inst", DAI, &I);
5167 }
5168 }
5169 for (DbgVariableRecord *DVR :
5170 cast<DIAssignID>(MD)->getAllDbgVariableRecordUsers()) {
5171 CheckDI(DVR->isDbgAssign(),
5172 "!DIAssignID should only be used by Assign DVRs.", MD, DVR);
5173 CheckDI(DVR->getFunction() == I.getFunction(),
5174 "DVRAssign not in same function as inst", DVR, &I);
5175 }
5176}
5177
5178void Verifier::visitMMRAMetadata(Instruction &I, MDNode *MD) {
5180 "!mmra metadata attached to unexpected instruction kind", I, MD);
5181
5182 // MMRA Metadata should either be a tag, e.g. !{!"foo", !"bar"}, or a
5183 // list of tags such as !2 in the following example:
5184 // !0 = !{!"a", !"b"}
5185 // !1 = !{!"c", !"d"}
5186 // !2 = !{!0, !1}
5187 if (MMRAMetadata::isTagMD(MD))
5188 return;
5189
5190 Check(isa<MDTuple>(MD), "!mmra expected to be a metadata tuple", I, MD);
5191 for (const MDOperand &MDOp : MD->operands())
5192 Check(MMRAMetadata::isTagMD(MDOp.get()),
5193 "!mmra metadata tuple operand is not an MMRA tag", I, MDOp.get());
5194}
5195
5196void Verifier::visitCallStackMetadata(MDNode *MD) {
5197 // Call stack metadata should consist of a list of at least 1 constant int
5198 // (representing a hash of the location).
5199 Check(MD->getNumOperands() >= 1,
5200 "call stack metadata should have at least 1 operand", MD);
5201
5202 for (const auto &Op : MD->operands())
5204 "call stack metadata operand should be constant integer", Op);
5205}
5206
5207void Verifier::visitMemProfMetadata(Instruction &I, MDNode *MD) {
5208 Check(isa<CallBase>(I), "!memprof metadata should only exist on calls", &I);
5209 Check(MD->getNumOperands() >= 1,
5210 "!memprof annotations should have at least 1 metadata operand "
5211 "(MemInfoBlock)",
5212 MD);
5213
5214 // Check each MIB
5215 for (auto &MIBOp : MD->operands()) {
5216 MDNode *MIB = dyn_cast<MDNode>(MIBOp);
5217 // The first operand of an MIB should be the call stack metadata.
5218 // There rest of the operands should be MDString tags, and there should be
5219 // at least one.
5220 Check(MIB->getNumOperands() >= 2,
5221 "Each !memprof MemInfoBlock should have at least 2 operands", MIB);
5222
5223 // Check call stack metadata (first operand).
5224 Check(MIB->getOperand(0) != nullptr,
5225 "!memprof MemInfoBlock first operand should not be null", MIB);
5226 Check(isa<MDNode>(MIB->getOperand(0)),
5227 "!memprof MemInfoBlock first operand should be an MDNode", MIB);
5228 MDNode *StackMD = dyn_cast<MDNode>(MIB->getOperand(0));
5229 visitCallStackMetadata(StackMD);
5230
5231 // The next set of 1 or more operands should be MDString.
5232 unsigned I = 1;
5233 for (; I < MIB->getNumOperands(); ++I) {
5234 if (!isa<MDString>(MIB->getOperand(I))) {
5235 Check(I > 1,
5236 "!memprof MemInfoBlock second operand should be an MDString",
5237 MIB);
5238 break;
5239 }
5240 }
5241
5242 // Any remaining should be MDNode that are pairs of integers
5243 for (; I < MIB->getNumOperands(); ++I) {
5244 MDNode *OpNode = dyn_cast<MDNode>(MIB->getOperand(I));
5245 Check(OpNode, "Not all !memprof MemInfoBlock operands 2 to N are MDNode",
5246 MIB);
5247 Check(OpNode->getNumOperands() == 2,
5248 "Not all !memprof MemInfoBlock operands 2 to N are MDNode with 2 "
5249 "operands",
5250 MIB);
5251 // Check that all of Op's operands are ConstantInt.
5252 Check(llvm::all_of(OpNode->operands(),
5253 [](const MDOperand &Op) {
5254 return mdconst::hasa<ConstantInt>(Op);
5255 }),
5256 "Not all !memprof MemInfoBlock operands 2 to N are MDNode with "
5257 "ConstantInt operands",
5258 MIB);
5259 }
5260 }
5261}
5262
5263void Verifier::visitCallsiteMetadata(Instruction &I, MDNode *MD) {
5264 Check(isa<CallBase>(I), "!callsite metadata should only exist on calls", &I);
5265 // Verify the partial callstack annotated from memprof profiles. This callsite
5266 // is a part of a profiled allocation callstack.
5267 visitCallStackMetadata(MD);
5268}
5269
5270static inline bool isConstantIntMetadataOperand(const Metadata *MD) {
5271 if (auto *VAL = dyn_cast<ValueAsMetadata>(MD))
5272 return isa<ConstantInt>(VAL->getValue());
5273 return false;
5274}
5275
5276void Verifier::visitCalleeTypeMetadata(Instruction &I, MDNode *MD) {
5277 Check(isa<CallBase>(I), "!callee_type metadata should only exist on calls",
5278 &I);
5279 for (Metadata *Op : MD->operands()) {
5281 "The callee_type metadata must be a list of type metadata nodes", Op);
5282 auto *TypeMD = cast<MDNode>(Op);
5283 Check(TypeMD->getNumOperands() == 2,
5284 "Well-formed generalized type metadata must contain exactly two "
5285 "operands",
5286 Op);
5287 Check(isConstantIntMetadataOperand(TypeMD->getOperand(0)) &&
5288 mdconst::extract<ConstantInt>(TypeMD->getOperand(0))->isZero(),
5289 "The first operand of type metadata for functions must be zero", Op);
5290 Check(TypeMD->hasGeneralizedMDString(),
5291 "Only generalized type metadata can be part of the callee_type "
5292 "metadata list",
5293 Op);
5294 }
5295}
5296
5297void Verifier::visitAnnotationMetadata(MDNode *Annotation) {
5298 Check(isa<MDTuple>(Annotation), "annotation must be a tuple");
5299 Check(Annotation->getNumOperands() >= 1,
5300 "annotation must have at least one operand");
5301 for (const MDOperand &Op : Annotation->operands()) {
5302 bool TupleOfStrings =
5303 isa<MDTuple>(Op.get()) &&
5304 all_of(cast<MDTuple>(Op)->operands(), [](auto &Annotation) {
5305 return isa<MDString>(Annotation.get());
5306 });
5307 Check(isa<MDString>(Op.get()) || TupleOfStrings,
5308 "operands must be a string or a tuple of strings");
5309 }
5310}
5311
5312void Verifier::visitAliasScopeMetadata(const MDNode *MD) {
5313 unsigned NumOps = MD->getNumOperands();
5314 Check(NumOps >= 2 && NumOps <= 3, "scope must have two or three operands",
5315 MD);
5316 Check(MD->getOperand(0).get() == MD || isa<MDString>(MD->getOperand(0)),
5317 "first scope operand must be self-referential or string", MD);
5318 if (NumOps == 3)
5320 "third scope operand must be string (if used)", MD);
5321
5322 MDNode *Domain = dyn_cast<MDNode>(MD->getOperand(1));
5323 Check(Domain != nullptr, "second scope operand must be MDNode", MD);
5324
5325 unsigned NumDomainOps = Domain->getNumOperands();
5326 Check(NumDomainOps >= 1 && NumDomainOps <= 2,
5327 "domain must have one or two operands", Domain);
5328 Check(Domain->getOperand(0).get() == Domain ||
5329 isa<MDString>(Domain->getOperand(0)),
5330 "first domain operand must be self-referential or string", Domain);
5331 if (NumDomainOps == 2)
5332 Check(isa<MDString>(Domain->getOperand(1)),
5333 "second domain operand must be string (if used)", Domain);
5334}
5335
5336void Verifier::visitAliasScopeListMetadata(const MDNode *MD) {
5337 for (const MDOperand &Op : MD->operands()) {
5338 const MDNode *OpMD = dyn_cast<MDNode>(Op);
5339 Check(OpMD != nullptr, "scope list must consist of MDNodes", MD);
5340 visitAliasScopeMetadata(OpMD);
5341 }
5342}
5343
5344void Verifier::visitAccessGroupMetadata(const MDNode *MD) {
5345 auto IsValidAccessScope = [](const MDNode *MD) {
5346 return MD->getNumOperands() == 0 && MD->isDistinct();
5347 };
5348
5349 // It must be either an access scope itself...
5350 if (IsValidAccessScope(MD))
5351 return;
5352
5353 // ...or a list of access scopes.
5354 for (const MDOperand &Op : MD->operands()) {
5355 const MDNode *OpMD = dyn_cast<MDNode>(Op);
5356 Check(OpMD != nullptr, "Access scope list must consist of MDNodes", MD);
5357 Check(IsValidAccessScope(OpMD),
5358 "Access scope list contains invalid access scope", MD);
5359 }
5360}
5361
5362/// verifyInstruction - Verify that an instruction is well formed.
5363///
5364void Verifier::visitInstruction(Instruction &I) {
5365 BasicBlock *BB = I.getParent();
5366 Check(BB, "Instruction not embedded in basic block!", &I);
5367
5368 if (!isa<PHINode>(I)) { // Check that non-phi nodes are not self referential
5369 for (User *U : I.users()) {
5370 Check(U != (User *)&I || !DT.isReachableFromEntry(BB),
5371 "Only PHI nodes may reference their own value!", &I);
5372 }
5373 }
5374
5375 // Check that void typed values don't have names
5376 Check(!I.getType()->isVoidTy() || !I.hasName(),
5377 "Instruction has a name, but provides a void value!", &I);
5378
5379 // Check that the return value of the instruction is either void or a legal
5380 // value type.
5381 Check(I.getType()->isVoidTy() || I.getType()->isFirstClassType(),
5382 "Instruction returns a non-scalar type!", &I);
5383
5384 // Check that the instruction doesn't produce metadata. Calls are already
5385 // checked against the callee type.
5386 Check(!I.getType()->isMetadataTy() || isa<CallInst>(I) || isa<InvokeInst>(I),
5387 "Invalid use of metadata!", &I);
5388
5389 // Check that all uses of the instruction, if they are instructions
5390 // themselves, actually have parent basic blocks. If the use is not an
5391 // instruction, it is an error!
5392 for (Use &U : I.uses()) {
5393 if (Instruction *Used = dyn_cast<Instruction>(U.getUser()))
5394 Check(Used->getParent() != nullptr,
5395 "Instruction referencing"
5396 " instruction not embedded in a basic block!",
5397 &I, Used);
5398 else {
5399 CheckFailed("Use of instruction is not an instruction!", U);
5400 return;
5401 }
5402 }
5403
5404 // Get a pointer to the call base of the instruction if it is some form of
5405 // call.
5406 const CallBase *CBI = dyn_cast<CallBase>(&I);
5407
5408 for (unsigned i = 0, e = I.getNumOperands(); i != e; ++i) {
5409 Check(I.getOperand(i) != nullptr, "Instruction has null operand!", &I);
5410
5411 // Check to make sure that only first-class-values are operands to
5412 // instructions.
5413 if (!I.getOperand(i)->getType()->isFirstClassType()) {
5414 Check(false, "Instruction operands must be first-class values!", &I);
5415 }
5416
5417 if (Function *F = dyn_cast<Function>(I.getOperand(i))) {
5418 // This code checks whether the function is used as the operand of a
5419 // clang_arc_attachedcall operand bundle.
5420 auto IsAttachedCallOperand = [](Function *F, const CallBase *CBI,
5421 int Idx) {
5422 return CBI && CBI->isOperandBundleOfType(
5424 };
5425
5426 // Check to make sure that the "address of" an intrinsic function is never
5427 // taken. Ignore cases where the address of the intrinsic function is used
5428 // as the argument of operand bundle "clang.arc.attachedcall" as those
5429 // cases are handled in verifyAttachedCallBundle.
5430 Check((!F->isIntrinsic() ||
5431 (CBI && &CBI->getCalledOperandUse() == &I.getOperandUse(i)) ||
5432 IsAttachedCallOperand(F, CBI, i)),
5433 "Cannot take the address of an intrinsic!", &I);
5434 Check(!F->isIntrinsic() || isa<CallInst>(I) ||
5435 F->getIntrinsicID() == Intrinsic::donothing ||
5436 F->getIntrinsicID() == Intrinsic::seh_try_begin ||
5437 F->getIntrinsicID() == Intrinsic::seh_try_end ||
5438 F->getIntrinsicID() == Intrinsic::seh_scope_begin ||
5439 F->getIntrinsicID() == Intrinsic::seh_scope_end ||
5440 F->getIntrinsicID() == Intrinsic::coro_resume ||
5441 F->getIntrinsicID() == Intrinsic::coro_destroy ||
5442 F->getIntrinsicID() == Intrinsic::coro_await_suspend_void ||
5443 F->getIntrinsicID() == Intrinsic::coro_await_suspend_bool ||
5444 F->getIntrinsicID() == Intrinsic::coro_await_suspend_handle ||
5445 F->getIntrinsicID() ==
5446 Intrinsic::experimental_patchpoint_void ||
5447 F->getIntrinsicID() == Intrinsic::experimental_patchpoint ||
5448 F->getIntrinsicID() == Intrinsic::fake_use ||
5449 F->getIntrinsicID() == Intrinsic::experimental_gc_statepoint ||
5450 F->getIntrinsicID() == Intrinsic::wasm_throw ||
5451 F->getIntrinsicID() == Intrinsic::wasm_rethrow ||
5452 IsAttachedCallOperand(F, CBI, i),
5453 "Cannot invoke an intrinsic other than donothing, patchpoint, "
5454 "statepoint, coro_resume, coro_destroy, clang.arc.attachedcall or "
5455 "wasm.(re)throw",
5456 &I);
5457 Check(F->getParent() == &M, "Referencing function in another module!", &I,
5458 &M, F, F->getParent());
5459 } else if (BasicBlock *OpBB = dyn_cast<BasicBlock>(I.getOperand(i))) {
5460 Check(OpBB->getParent() == BB->getParent(),
5461 "Referring to a basic block in another function!", &I);
5462 } else if (Argument *OpArg = dyn_cast<Argument>(I.getOperand(i))) {
5463 Check(OpArg->getParent() == BB->getParent(),
5464 "Referring to an argument in another function!", &I);
5465 } else if (GlobalValue *GV = dyn_cast<GlobalValue>(I.getOperand(i))) {
5466 Check(GV->getParent() == &M, "Referencing global in another module!", &I,
5467 &M, GV, GV->getParent());
5468 } else if (Instruction *OpInst = dyn_cast<Instruction>(I.getOperand(i))) {
5469 Check(OpInst->getFunction() == BB->getParent(),
5470 "Referring to an instruction in another function!", &I);
5471 verifyDominatesUse(I, i);
5472 } else if (isa<InlineAsm>(I.getOperand(i))) {
5473 Check(CBI && &CBI->getCalledOperandUse() == &I.getOperandUse(i),
5474 "Cannot take the address of an inline asm!", &I);
5475 } else if (auto *CPA = dyn_cast<ConstantPtrAuth>(I.getOperand(i))) {
5476 visitConstantExprsRecursively(CPA);
5477 } else if (ConstantExpr *CE = dyn_cast<ConstantExpr>(I.getOperand(i))) {
5478 if (CE->getType()->isPtrOrPtrVectorTy()) {
5479 // If we have a ConstantExpr pointer, we need to see if it came from an
5480 // illegal bitcast.
5481 visitConstantExprsRecursively(CE);
5482 }
5483 }
5484 }
5485
5486 if (MDNode *MD = I.getMetadata(LLVMContext::MD_fpmath)) {
5487 Check(I.getType()->isFPOrFPVectorTy(),
5488 "fpmath requires a floating point result!", &I);
5489 Check(MD->getNumOperands() == 1, "fpmath takes one operand!", &I);
5490 if (ConstantFP *CFP0 =
5492 const APFloat &Accuracy = CFP0->getValueAPF();
5493 Check(&Accuracy.getSemantics() == &APFloat::IEEEsingle(),
5494 "fpmath accuracy must have float type", &I);
5495 Check(Accuracy.isFiniteNonZero() && !Accuracy.isNegative(),
5496 "fpmath accuracy not a positive number!", &I);
5497 } else {
5498 Check(false, "invalid fpmath accuracy!", &I);
5499 }
5500 }
5501
5502 if (MDNode *Range = I.getMetadata(LLVMContext::MD_range)) {
5504 "Ranges are only for loads, calls and invokes!", &I);
5505 visitRangeMetadata(I, Range, I.getType());
5506 }
5507
5508 if (MDNode *Range = I.getMetadata(LLVMContext::MD_noalias_addrspace)) {
5511 "noalias.addrspace are only for memory operations!", &I);
5512 visitNoaliasAddrspaceMetadata(I, Range, I.getType());
5513 }
5514
5515 if (I.hasMetadata(LLVMContext::MD_invariant_group)) {
5517 "invariant.group metadata is only for loads and stores", &I);
5518 }
5519
5520 if (MDNode *MD = I.getMetadata(LLVMContext::MD_nonnull)) {
5521 Check(I.getType()->isPointerTy(), "nonnull applies only to pointer types",
5522 &I);
5524 "nonnull applies only to load instructions, use attributes"
5525 " for calls or invokes",
5526 &I);
5527 Check(MD->getNumOperands() == 0, "nonnull metadata must be empty", &I);
5528 }
5529
5530 if (MDNode *MD = I.getMetadata(LLVMContext::MD_dereferenceable))
5531 visitDereferenceableMetadata(I, MD);
5532
5533 if (MDNode *MD = I.getMetadata(LLVMContext::MD_dereferenceable_or_null))
5534 visitDereferenceableMetadata(I, MD);
5535
5536 if (MDNode *MD = I.getMetadata(LLVMContext::MD_nofree))
5537 visitNofreeMetadata(I, MD);
5538
5539 if (MDNode *TBAA = I.getMetadata(LLVMContext::MD_tbaa))
5540 TBAAVerifyHelper.visitTBAAMetadata(I, TBAA);
5541
5542 if (MDNode *MD = I.getMetadata(LLVMContext::MD_noalias))
5543 visitAliasScopeListMetadata(MD);
5544 if (MDNode *MD = I.getMetadata(LLVMContext::MD_alias_scope))
5545 visitAliasScopeListMetadata(MD);
5546
5547 if (MDNode *MD = I.getMetadata(LLVMContext::MD_access_group))
5548 visitAccessGroupMetadata(MD);
5549
5550 if (MDNode *AlignMD = I.getMetadata(LLVMContext::MD_align)) {
5551 Check(I.getType()->isPointerTy(), "align applies only to pointer types",
5552 &I);
5554 "align applies only to load instructions, "
5555 "use attributes for calls or invokes",
5556 &I);
5557 Check(AlignMD->getNumOperands() == 1, "align takes one operand!", &I);
5558 ConstantInt *CI = mdconst::dyn_extract<ConstantInt>(AlignMD->getOperand(0));
5559 Check(CI && CI->getType()->isIntegerTy(64),
5560 "align metadata value must be an i64!", &I);
5561 uint64_t Align = CI->getZExtValue();
5562 Check(isPowerOf2_64(Align), "align metadata value must be a power of 2!",
5563 &I);
5564 Check(Align <= Value::MaximumAlignment,
5565 "alignment is larger that implementation defined limit", &I);
5566 }
5567
5568 if (MDNode *MD = I.getMetadata(LLVMContext::MD_prof))
5569 visitProfMetadata(I, MD);
5570
5571 if (MDNode *MD = I.getMetadata(LLVMContext::MD_memprof))
5572 visitMemProfMetadata(I, MD);
5573
5574 if (MDNode *MD = I.getMetadata(LLVMContext::MD_callsite))
5575 visitCallsiteMetadata(I, MD);
5576
5577 if (MDNode *MD = I.getMetadata(LLVMContext::MD_callee_type))
5578 visitCalleeTypeMetadata(I, MD);
5579
5580 if (MDNode *MD = I.getMetadata(LLVMContext::MD_DIAssignID))
5581 visitDIAssignIDMetadata(I, MD);
5582
5583 if (MDNode *MMRA = I.getMetadata(LLVMContext::MD_mmra))
5584 visitMMRAMetadata(I, MMRA);
5585
5586 if (MDNode *Annotation = I.getMetadata(LLVMContext::MD_annotation))
5587 visitAnnotationMetadata(Annotation);
5588
5589 if (MDNode *N = I.getDebugLoc().getAsMDNode()) {
5590 CheckDI(isa<DILocation>(N), "invalid !dbg metadata attachment", &I, N);
5591 visitMDNode(*N, AreDebugLocsAllowed::Yes);
5592
5593 if (auto *DL = dyn_cast<DILocation>(N)) {
5594 if (DL->getAtomGroup()) {
5595 CheckDI(DL->getScope()->getSubprogram()->getKeyInstructionsEnabled(),
5596 "DbgLoc uses atomGroup but DISubprogram doesn't have Key "
5597 "Instructions enabled",
5598 DL, DL->getScope()->getSubprogram());
5599 }
5600 }
5601 }
5602
5604 I.getAllMetadata(MDs);
5605 for (auto Attachment : MDs) {
5606 unsigned Kind = Attachment.first;
5607 auto AllowLocs =
5608 (Kind == LLVMContext::MD_dbg || Kind == LLVMContext::MD_loop)
5609 ? AreDebugLocsAllowed::Yes
5610 : AreDebugLocsAllowed::No;
5611 visitMDNode(*Attachment.second, AllowLocs);
5612 }
5613
5614 InstsInThisBlock.insert(&I);
5615}
5616
5617/// Allow intrinsics to be verified in different ways.
5618void Verifier::visitIntrinsicCall(Intrinsic::ID ID, CallBase &Call) {
5620 Check(IF->isDeclaration(), "Intrinsic functions should never be defined!",
5621 IF);
5622
5623 // Verify that the intrinsic prototype lines up with what the .td files
5624 // describe.
5625 FunctionType *IFTy = IF->getFunctionType();
5626 bool IsVarArg = IFTy->isVarArg();
5627
5631
5632 // Walk the descriptors to extract overloaded types.
5637 "Intrinsic has incorrect return type!", IF);
5639 "Intrinsic has incorrect argument type!", IF);
5640
5641 // Verify if the intrinsic call matches the vararg property.
5642 if (IsVarArg)
5644 "Intrinsic was not defined with variable arguments!", IF);
5645 else
5647 "Callsite was not defined with variable arguments!", IF);
5648
5649 // All descriptors should be absorbed by now.
5650 Check(TableRef.empty(), "Intrinsic has too few arguments!", IF);
5651
5652 // Now that we have the intrinsic ID and the actual argument types (and we
5653 // know they are legal for the intrinsic!) get the intrinsic name through the
5654 // usual means. This allows us to verify the mangling of argument types into
5655 // the name.
5656 const std::string ExpectedName =
5657 Intrinsic::getName(ID, ArgTys, IF->getParent(), IFTy);
5658 Check(ExpectedName == IF->getName(),
5659 "Intrinsic name not mangled correctly for type arguments! "
5660 "Should be: " +
5661 ExpectedName,
5662 IF);
5663
5664 // If the intrinsic takes MDNode arguments, verify that they are either global
5665 // or are local to *this* function.
5666 for (Value *V : Call.args()) {
5667 if (auto *MD = dyn_cast<MetadataAsValue>(V))
5668 visitMetadataAsValue(*MD, Call.getCaller());
5669 if (auto *Const = dyn_cast<Constant>(V))
5670 Check(!Const->getType()->isX86_AMXTy(),
5671 "const x86_amx is not allowed in argument!");
5672 }
5673
5674 switch (ID) {
5675 default:
5676 break;
5677 case Intrinsic::assume: {
5678 for (auto &Elem : Call.bundle_op_infos()) {
5679 unsigned ArgCount = Elem.End - Elem.Begin;
5680 // Separate storage assumptions are special insofar as they're the only
5681 // operand bundles allowed on assumes that aren't parameter attributes.
5682 if (Elem.Tag->getKey() == "separate_storage") {
5683 Check(ArgCount == 2,
5684 "separate_storage assumptions should have 2 arguments", Call);
5685 Check(Call.getOperand(Elem.Begin)->getType()->isPointerTy() &&
5686 Call.getOperand(Elem.Begin + 1)->getType()->isPointerTy(),
5687 "arguments to separate_storage assumptions should be pointers",
5688 Call);
5689 continue;
5690 }
5691 Check(Elem.Tag->getKey() == "ignore" ||
5692 Attribute::isExistingAttribute(Elem.Tag->getKey()),
5693 "tags must be valid attribute names", Call);
5694 Attribute::AttrKind Kind =
5695 Attribute::getAttrKindFromName(Elem.Tag->getKey());
5696 if (Kind == Attribute::Alignment) {
5697 Check(ArgCount <= 3 && ArgCount >= 2,
5698 "alignment assumptions should have 2 or 3 arguments", Call);
5699 Check(Call.getOperand(Elem.Begin)->getType()->isPointerTy(),
5700 "first argument should be a pointer", Call);
5701 Check(Call.getOperand(Elem.Begin + 1)->getType()->isIntegerTy(),
5702 "second argument should be an integer", Call);
5703 if (ArgCount == 3)
5704 Check(Call.getOperand(Elem.Begin + 2)->getType()->isIntegerTy(),
5705 "third argument should be an integer if present", Call);
5706 continue;
5707 }
5708 if (Kind == Attribute::Dereferenceable) {
5709 Check(ArgCount == 2,
5710 "dereferenceable assumptions should have 2 arguments", Call);
5711 Check(Call.getOperand(Elem.Begin)->getType()->isPointerTy(),
5712 "first argument should be a pointer", Call);
5713 Check(Call.getOperand(Elem.Begin + 1)->getType()->isIntegerTy(),
5714 "second argument should be an integer", Call);
5715 continue;
5716 }
5717 Check(ArgCount <= 2, "too many arguments", Call);
5718 if (Kind == Attribute::None)
5719 break;
5720 if (Attribute::isIntAttrKind(Kind)) {
5721 Check(ArgCount == 2, "this attribute should have 2 arguments", Call);
5722 Check(isa<ConstantInt>(Call.getOperand(Elem.Begin + 1)),
5723 "the second argument should be a constant integral value", Call);
5724 } else if (Attribute::canUseAsParamAttr(Kind)) {
5725 Check((ArgCount) == 1, "this attribute should have one argument", Call);
5726 } else if (Attribute::canUseAsFnAttr(Kind)) {
5727 Check((ArgCount) == 0, "this attribute has no argument", Call);
5728 }
5729 }
5730 break;
5731 }
5732 case Intrinsic::ucmp:
5733 case Intrinsic::scmp: {
5734 Type *SrcTy = Call.getOperand(0)->getType();
5735 Type *DestTy = Call.getType();
5736
5737 Check(DestTy->getScalarSizeInBits() >= 2,
5738 "result type must be at least 2 bits wide", Call);
5739
5740 bool IsDestTypeVector = DestTy->isVectorTy();
5741 Check(SrcTy->isVectorTy() == IsDestTypeVector,
5742 "ucmp/scmp argument and result types must both be either vector or "
5743 "scalar types",
5744 Call);
5745 if (IsDestTypeVector) {
5746 auto SrcVecLen = cast<VectorType>(SrcTy)->getElementCount();
5747 auto DestVecLen = cast<VectorType>(DestTy)->getElementCount();
5748 Check(SrcVecLen == DestVecLen,
5749 "return type and arguments must have the same number of "
5750 "elements",
5751 Call);
5752 }
5753 break;
5754 }
5755 case Intrinsic::coro_id: {
5756 auto *InfoArg = Call.getArgOperand(3)->stripPointerCasts();
5757 if (isa<ConstantPointerNull>(InfoArg))
5758 break;
5759 auto *GV = dyn_cast<GlobalVariable>(InfoArg);
5760 Check(GV && GV->isConstant() && GV->hasDefinitiveInitializer(),
5761 "info argument of llvm.coro.id must refer to an initialized "
5762 "constant");
5763 Constant *Init = GV->getInitializer();
5765 "info argument of llvm.coro.id must refer to either a struct or "
5766 "an array");
5767 break;
5768 }
5769 case Intrinsic::is_fpclass: {
5770 const ConstantInt *TestMask = cast<ConstantInt>(Call.getOperand(1));
5771 Check((TestMask->getZExtValue() & ~static_cast<unsigned>(fcAllFlags)) == 0,
5772 "unsupported bits for llvm.is.fpclass test mask");
5773 break;
5774 }
5775 case Intrinsic::fptrunc_round: {
5776 // Check the rounding mode
5777 Metadata *MD = nullptr;
5779 if (MAV)
5780 MD = MAV->getMetadata();
5781
5782 Check(MD != nullptr, "missing rounding mode argument", Call);
5783
5784 Check(isa<MDString>(MD),
5785 ("invalid value for llvm.fptrunc.round metadata operand"
5786 " (the operand should be a string)"),
5787 MD);
5788
5789 std::optional<RoundingMode> RoundMode =
5790 convertStrToRoundingMode(cast<MDString>(MD)->getString());
5791 Check(RoundMode && *RoundMode != RoundingMode::Dynamic,
5792 "unsupported rounding mode argument", Call);
5793 break;
5794 }
5795#define BEGIN_REGISTER_VP_INTRINSIC(VPID, ...) case Intrinsic::VPID:
5796#include "llvm/IR/VPIntrinsics.def"
5797#undef BEGIN_REGISTER_VP_INTRINSIC
5798 visitVPIntrinsic(cast<VPIntrinsic>(Call));
5799 break;
5800#define INSTRUCTION(NAME, NARGS, ROUND_MODE, INTRINSIC) \
5801 case Intrinsic::INTRINSIC:
5802#include "llvm/IR/ConstrainedOps.def"
5803#undef INSTRUCTION
5804 visitConstrainedFPIntrinsic(cast<ConstrainedFPIntrinsic>(Call));
5805 break;
5806 case Intrinsic::dbg_declare: // llvm.dbg.declare
5807 case Intrinsic::dbg_value: // llvm.dbg.value
5808 case Intrinsic::dbg_assign: // llvm.dbg.assign
5809 case Intrinsic::dbg_label: // llvm.dbg.label
5810 // We no longer interpret debug intrinsics (the old variable-location
5811 // design). They're meaningless as far as LLVM is concerned we could make
5812 // it an error for them to appear, but it's possible we'll have users
5813 // converting back to intrinsics for the forseeable future (such as DXIL),
5814 // so tolerate their existance.
5815 break;
5816 case Intrinsic::memcpy:
5817 case Intrinsic::memcpy_inline:
5818 case Intrinsic::memmove:
5819 case Intrinsic::memset:
5820 case Intrinsic::memset_inline:
5821 break;
5822 case Intrinsic::experimental_memset_pattern: {
5823 const auto Memset = cast<MemSetPatternInst>(&Call);
5824 Check(Memset->getValue()->getType()->isSized(),
5825 "unsized types cannot be used as memset patterns", Call);
5826 break;
5827 }
5828 case Intrinsic::memcpy_element_unordered_atomic:
5829 case Intrinsic::memmove_element_unordered_atomic:
5830 case Intrinsic::memset_element_unordered_atomic: {
5831 const auto *AMI = cast<AnyMemIntrinsic>(&Call);
5832
5833 ConstantInt *ElementSizeCI =
5834 cast<ConstantInt>(AMI->getRawElementSizeInBytes());
5835 const APInt &ElementSizeVal = ElementSizeCI->getValue();
5836 Check(ElementSizeVal.isPowerOf2(),
5837 "element size of the element-wise atomic memory intrinsic "
5838 "must be a power of 2",
5839 Call);
5840
5841 auto IsValidAlignment = [&](MaybeAlign Alignment) {
5842 return Alignment && ElementSizeVal.ule(Alignment->value());
5843 };
5844 Check(IsValidAlignment(AMI->getDestAlign()),
5845 "incorrect alignment of the destination argument", Call);
5846 if (const auto *AMT = dyn_cast<AnyMemTransferInst>(AMI)) {
5847 Check(IsValidAlignment(AMT->getSourceAlign()),
5848 "incorrect alignment of the source argument", Call);
5849 }
5850 break;
5851 }
5852 case Intrinsic::call_preallocated_setup: {
5853 auto *NumArgs = dyn_cast<ConstantInt>(Call.getArgOperand(0));
5854 Check(NumArgs != nullptr,
5855 "llvm.call.preallocated.setup argument must be a constant");
5856 bool FoundCall = false;
5857 for (User *U : Call.users()) {
5858 auto *UseCall = dyn_cast<CallBase>(U);
5859 Check(UseCall != nullptr,
5860 "Uses of llvm.call.preallocated.setup must be calls");
5861 Intrinsic::ID IID = UseCall->getIntrinsicID();
5862 if (IID == Intrinsic::call_preallocated_arg) {
5863 auto *AllocArgIndex = dyn_cast<ConstantInt>(UseCall->getArgOperand(1));
5864 Check(AllocArgIndex != nullptr,
5865 "llvm.call.preallocated.alloc arg index must be a constant");
5866 auto AllocArgIndexInt = AllocArgIndex->getValue();
5867 Check(AllocArgIndexInt.sge(0) &&
5868 AllocArgIndexInt.slt(NumArgs->getValue()),
5869 "llvm.call.preallocated.alloc arg index must be between 0 and "
5870 "corresponding "
5871 "llvm.call.preallocated.setup's argument count");
5872 } else if (IID == Intrinsic::call_preallocated_teardown) {
5873 // nothing to do
5874 } else {
5875 Check(!FoundCall, "Can have at most one call corresponding to a "
5876 "llvm.call.preallocated.setup");
5877 FoundCall = true;
5878 size_t NumPreallocatedArgs = 0;
5879 for (unsigned i = 0; i < UseCall->arg_size(); i++) {
5880 if (UseCall->paramHasAttr(i, Attribute::Preallocated)) {
5881 ++NumPreallocatedArgs;
5882 }
5883 }
5884 Check(NumPreallocatedArgs != 0,
5885 "cannot use preallocated intrinsics on a call without "
5886 "preallocated arguments");
5887 Check(NumArgs->equalsInt(NumPreallocatedArgs),
5888 "llvm.call.preallocated.setup arg size must be equal to number "
5889 "of preallocated arguments "
5890 "at call site",
5891 Call, *UseCall);
5892 // getOperandBundle() cannot be called if more than one of the operand
5893 // bundle exists. There is already a check elsewhere for this, so skip
5894 // here if we see more than one.
5895 if (UseCall->countOperandBundlesOfType(LLVMContext::OB_preallocated) >
5896 1) {
5897 return;
5898 }
5899 auto PreallocatedBundle =
5900 UseCall->getOperandBundle(LLVMContext::OB_preallocated);
5901 Check(PreallocatedBundle,
5902 "Use of llvm.call.preallocated.setup outside intrinsics "
5903 "must be in \"preallocated\" operand bundle");
5904 Check(PreallocatedBundle->Inputs.front().get() == &Call,
5905 "preallocated bundle must have token from corresponding "
5906 "llvm.call.preallocated.setup");
5907 }
5908 }
5909 break;
5910 }
5911 case Intrinsic::call_preallocated_arg: {
5912 auto *Token = dyn_cast<CallBase>(Call.getArgOperand(0));
5913 Check(Token &&
5914 Token->getIntrinsicID() == Intrinsic::call_preallocated_setup,
5915 "llvm.call.preallocated.arg token argument must be a "
5916 "llvm.call.preallocated.setup");
5917 Check(Call.hasFnAttr(Attribute::Preallocated),
5918 "llvm.call.preallocated.arg must be called with a \"preallocated\" "
5919 "call site attribute");
5920 break;
5921 }
5922 case Intrinsic::call_preallocated_teardown: {
5923 auto *Token = dyn_cast<CallBase>(Call.getArgOperand(0));
5924 Check(Token &&
5925 Token->getIntrinsicID() == Intrinsic::call_preallocated_setup,
5926 "llvm.call.preallocated.teardown token argument must be a "
5927 "llvm.call.preallocated.setup");
5928 break;
5929 }
5930 case Intrinsic::gcroot:
5931 case Intrinsic::gcwrite:
5932 case Intrinsic::gcread:
5933 if (ID == Intrinsic::gcroot) {
5934 AllocaInst *AI =
5936 Check(AI, "llvm.gcroot parameter #1 must be an alloca.", Call);
5938 "llvm.gcroot parameter #2 must be a constant.", Call);
5939 if (!AI->getAllocatedType()->isPointerTy()) {
5941 "llvm.gcroot parameter #1 must either be a pointer alloca, "
5942 "or argument #2 must be a non-null constant.",
5943 Call);
5944 }
5945 }
5946
5947 Check(Call.getParent()->getParent()->hasGC(),
5948 "Enclosing function does not use GC.", Call);
5949 break;
5950 case Intrinsic::init_trampoline:
5952 "llvm.init_trampoline parameter #2 must resolve to a function.",
5953 Call);
5954 break;
5955 case Intrinsic::prefetch:
5956 Check(cast<ConstantInt>(Call.getArgOperand(1))->getZExtValue() < 2,
5957 "rw argument to llvm.prefetch must be 0-1", Call);
5958 Check(cast<ConstantInt>(Call.getArgOperand(2))->getZExtValue() < 4,
5959 "locality argument to llvm.prefetch must be 0-3", Call);
5960 Check(cast<ConstantInt>(Call.getArgOperand(3))->getZExtValue() < 2,
5961 "cache type argument to llvm.prefetch must be 0-1", Call);
5962 break;
5963 case Intrinsic::stackprotector:
5965 "llvm.stackprotector parameter #2 must resolve to an alloca.", Call);
5966 break;
5967 case Intrinsic::localescape: {
5968 BasicBlock *BB = Call.getParent();
5969 Check(BB->isEntryBlock(), "llvm.localescape used outside of entry block",
5970 Call);
5971 Check(!SawFrameEscape, "multiple calls to llvm.localescape in one function",
5972 Call);
5973 for (Value *Arg : Call.args()) {
5974 if (isa<ConstantPointerNull>(Arg))
5975 continue; // Null values are allowed as placeholders.
5976 auto *AI = dyn_cast<AllocaInst>(Arg->stripPointerCasts());
5977 Check(AI && AI->isStaticAlloca(),
5978 "llvm.localescape only accepts static allocas", Call);
5979 }
5980 FrameEscapeInfo[BB->getParent()].first = Call.arg_size();
5981 SawFrameEscape = true;
5982 break;
5983 }
5984 case Intrinsic::localrecover: {
5986 Function *Fn = dyn_cast<Function>(FnArg);
5987 Check(Fn && !Fn->isDeclaration(),
5988 "llvm.localrecover first "
5989 "argument must be function defined in this module",
5990 Call);
5991 auto *IdxArg = cast<ConstantInt>(Call.getArgOperand(2));
5992 auto &Entry = FrameEscapeInfo[Fn];
5993 Entry.second = unsigned(
5994 std::max(uint64_t(Entry.second), IdxArg->getLimitedValue(~0U) + 1));
5995 break;
5996 }
5997
5998 case Intrinsic::experimental_gc_statepoint:
5999 if (auto *CI = dyn_cast<CallInst>(&Call))
6000 Check(!CI->isInlineAsm(),
6001 "gc.statepoint support for inline assembly unimplemented", CI);
6002 Check(Call.getParent()->getParent()->hasGC(),
6003 "Enclosing function does not use GC.", Call);
6004
6005 verifyStatepoint(Call);
6006 break;
6007 case Intrinsic::experimental_gc_result: {
6008 Check(Call.getParent()->getParent()->hasGC(),
6009 "Enclosing function does not use GC.", Call);
6010
6011 auto *Statepoint = Call.getArgOperand(0);
6012 if (isa<UndefValue>(Statepoint))
6013 break;
6014
6015 // Are we tied to a statepoint properly?
6016 const auto *StatepointCall = dyn_cast<CallBase>(Statepoint);
6017 Check(StatepointCall && StatepointCall->getIntrinsicID() ==
6018 Intrinsic::experimental_gc_statepoint,
6019 "gc.result operand #1 must be from a statepoint", Call,
6020 Call.getArgOperand(0));
6021
6022 // Check that result type matches wrapped callee.
6023 auto *TargetFuncType =
6024 cast<FunctionType>(StatepointCall->getParamElementType(2));
6025 Check(Call.getType() == TargetFuncType->getReturnType(),
6026 "gc.result result type does not match wrapped callee", Call);
6027 break;
6028 }
6029 case Intrinsic::experimental_gc_relocate: {
6030 Check(Call.arg_size() == 3, "wrong number of arguments", Call);
6031
6033 "gc.relocate must return a pointer or a vector of pointers", Call);
6034
6035 // Check that this relocate is correctly tied to the statepoint
6036
6037 // This is case for relocate on the unwinding path of an invoke statepoint
6038 if (LandingPadInst *LandingPad =
6040
6041 const BasicBlock *InvokeBB =
6042 LandingPad->getParent()->getUniquePredecessor();
6043
6044 // Landingpad relocates should have only one predecessor with invoke
6045 // statepoint terminator
6046 Check(InvokeBB, "safepoints should have unique landingpads",
6047 LandingPad->getParent());
6048 Check(InvokeBB->getTerminator(), "safepoint block should be well formed",
6049 InvokeBB);
6051 "gc relocate should be linked to a statepoint", InvokeBB);
6052 } else {
6053 // In all other cases relocate should be tied to the statepoint directly.
6054 // This covers relocates on a normal return path of invoke statepoint and
6055 // relocates of a call statepoint.
6056 auto *Token = Call.getArgOperand(0);
6058 "gc relocate is incorrectly tied to the statepoint", Call, Token);
6059 }
6060
6061 // Verify rest of the relocate arguments.
6062 const Value &StatepointCall = *cast<GCRelocateInst>(Call).getStatepoint();
6063
6064 // Both the base and derived must be piped through the safepoint.
6067 "gc.relocate operand #2 must be integer offset", Call);
6068
6069 Value *Derived = Call.getArgOperand(2);
6070 Check(isa<ConstantInt>(Derived),
6071 "gc.relocate operand #3 must be integer offset", Call);
6072
6073 const uint64_t BaseIndex = cast<ConstantInt>(Base)->getZExtValue();
6074 const uint64_t DerivedIndex = cast<ConstantInt>(Derived)->getZExtValue();
6075
6076 // Check the bounds
6077 if (isa<UndefValue>(StatepointCall))
6078 break;
6079 if (auto Opt = cast<GCStatepointInst>(StatepointCall)
6080 .getOperandBundle(LLVMContext::OB_gc_live)) {
6081 Check(BaseIndex < Opt->Inputs.size(),
6082 "gc.relocate: statepoint base index out of bounds", Call);
6083 Check(DerivedIndex < Opt->Inputs.size(),
6084 "gc.relocate: statepoint derived index out of bounds", Call);
6085 }
6086
6087 // Relocated value must be either a pointer type or vector-of-pointer type,
6088 // but gc_relocate does not need to return the same pointer type as the
6089 // relocated pointer. It can be casted to the correct type later if it's
6090 // desired. However, they must have the same address space and 'vectorness'
6091 GCRelocateInst &Relocate = cast<GCRelocateInst>(Call);
6092 auto *ResultType = Call.getType();
6093 auto *DerivedType = Relocate.getDerivedPtr()->getType();
6094 auto *BaseType = Relocate.getBasePtr()->getType();
6095
6096 Check(BaseType->isPtrOrPtrVectorTy(),
6097 "gc.relocate: relocated value must be a pointer", Call);
6098 Check(DerivedType->isPtrOrPtrVectorTy(),
6099 "gc.relocate: relocated value must be a pointer", Call);
6100
6101 Check(ResultType->isVectorTy() == DerivedType->isVectorTy(),
6102 "gc.relocate: vector relocates to vector and pointer to pointer",
6103 Call);
6104 Check(
6105 ResultType->getPointerAddressSpace() ==
6106 DerivedType->getPointerAddressSpace(),
6107 "gc.relocate: relocating a pointer shouldn't change its address space",
6108 Call);
6109
6110 auto GC = llvm::getGCStrategy(Relocate.getFunction()->getGC());
6111 Check(GC, "gc.relocate: calling function must have GCStrategy",
6112 Call.getFunction());
6113 if (GC) {
6114 auto isGCPtr = [&GC](Type *PTy) {
6115 return GC->isGCManagedPointer(PTy->getScalarType()).value_or(true);
6116 };
6117 Check(isGCPtr(ResultType), "gc.relocate: must return gc pointer", Call);
6118 Check(isGCPtr(BaseType),
6119 "gc.relocate: relocated value must be a gc pointer", Call);
6120 Check(isGCPtr(DerivedType),
6121 "gc.relocate: relocated value must be a gc pointer", Call);
6122 }
6123 break;
6124 }
6125 case Intrinsic::experimental_patchpoint: {
6126 if (Call.getCallingConv() == CallingConv::AnyReg) {
6128 "patchpoint: invalid return type used with anyregcc", Call);
6129 }
6130 break;
6131 }
6132 case Intrinsic::eh_exceptioncode:
6133 case Intrinsic::eh_exceptionpointer: {
6135 "eh.exceptionpointer argument must be a catchpad", Call);
6136 break;
6137 }
6138 case Intrinsic::get_active_lane_mask: {
6140 "get_active_lane_mask: must return a "
6141 "vector",
6142 Call);
6143 auto *ElemTy = Call.getType()->getScalarType();
6144 Check(ElemTy->isIntegerTy(1),
6145 "get_active_lane_mask: element type is not "
6146 "i1",
6147 Call);
6148 break;
6149 }
6150 case Intrinsic::experimental_get_vector_length: {
6151 ConstantInt *VF = cast<ConstantInt>(Call.getArgOperand(1));
6152 Check(!VF->isNegative() && !VF->isZero(),
6153 "get_vector_length: VF must be positive", Call);
6154 break;
6155 }
6156 case Intrinsic::masked_load: {
6157 Check(Call.getType()->isVectorTy(), "masked_load: must return a vector",
6158 Call);
6159
6160 ConstantInt *Alignment = cast<ConstantInt>(Call.getArgOperand(1));
6162 Value *PassThru = Call.getArgOperand(3);
6163 Check(Mask->getType()->isVectorTy(), "masked_load: mask must be vector",
6164 Call);
6165 Check(Alignment->getValue().isPowerOf2(),
6166 "masked_load: alignment must be a power of 2", Call);
6167 Check(PassThru->getType() == Call.getType(),
6168 "masked_load: pass through and return type must match", Call);
6169 Check(cast<VectorType>(Mask->getType())->getElementCount() ==
6170 cast<VectorType>(Call.getType())->getElementCount(),
6171 "masked_load: vector mask must be same length as return", Call);
6172 break;
6173 }
6174 case Intrinsic::masked_store: {
6175 Value *Val = Call.getArgOperand(0);
6176 ConstantInt *Alignment = cast<ConstantInt>(Call.getArgOperand(2));
6178 Check(Mask->getType()->isVectorTy(), "masked_store: mask must be vector",
6179 Call);
6180 Check(Alignment->getValue().isPowerOf2(),
6181 "masked_store: alignment must be a power of 2", Call);
6182 Check(cast<VectorType>(Mask->getType())->getElementCount() ==
6183 cast<VectorType>(Val->getType())->getElementCount(),
6184 "masked_store: vector mask must be same length as value", Call);
6185 break;
6186 }
6187
6188 case Intrinsic::masked_gather: {
6189 const APInt &Alignment =
6191 Check(Alignment.isZero() || Alignment.isPowerOf2(),
6192 "masked_gather: alignment must be 0 or a power of 2", Call);
6193 break;
6194 }
6195 case Intrinsic::masked_scatter: {
6196 const APInt &Alignment =
6197 cast<ConstantInt>(Call.getArgOperand(2))->getValue();
6198 Check(Alignment.isZero() || Alignment.isPowerOf2(),
6199 "masked_scatter: alignment must be 0 or a power of 2", Call);
6200 break;
6201 }
6202
6203 case Intrinsic::experimental_guard: {
6204 Check(isa<CallInst>(Call), "experimental_guard cannot be invoked", Call);
6206 "experimental_guard must have exactly one "
6207 "\"deopt\" operand bundle");
6208 break;
6209 }
6210
6211 case Intrinsic::experimental_deoptimize: {
6212 Check(isa<CallInst>(Call), "experimental_deoptimize cannot be invoked",
6213 Call);
6215 "experimental_deoptimize must have exactly one "
6216 "\"deopt\" operand bundle");
6218 "experimental_deoptimize return type must match caller return type");
6219
6220 if (isa<CallInst>(Call)) {
6222 Check(RI,
6223 "calls to experimental_deoptimize must be followed by a return");
6224
6225 if (!Call.getType()->isVoidTy() && RI)
6226 Check(RI->getReturnValue() == &Call,
6227 "calls to experimental_deoptimize must be followed by a return "
6228 "of the value computed by experimental_deoptimize");
6229 }
6230
6231 break;
6232 }
6233 case Intrinsic::vastart: {
6235 "va_start called in a non-varargs function");
6236 break;
6237 }
6238 case Intrinsic::get_dynamic_area_offset: {
6239 auto *IntTy = dyn_cast<IntegerType>(Call.getType());
6240 Check(IntTy && DL.getPointerSizeInBits(DL.getAllocaAddrSpace()) ==
6241 IntTy->getBitWidth(),
6242 "get_dynamic_area_offset result type must be scalar integer matching "
6243 "alloca address space width",
6244 Call);
6245 break;
6246 }
6247 case Intrinsic::vector_reduce_and:
6248 case Intrinsic::vector_reduce_or:
6249 case Intrinsic::vector_reduce_xor:
6250 case Intrinsic::vector_reduce_add:
6251 case Intrinsic::vector_reduce_mul:
6252 case Intrinsic::vector_reduce_smax:
6253 case Intrinsic::vector_reduce_smin:
6254 case Intrinsic::vector_reduce_umax:
6255 case Intrinsic::vector_reduce_umin: {
6256 Type *ArgTy = Call.getArgOperand(0)->getType();
6257 Check(ArgTy->isIntOrIntVectorTy() && ArgTy->isVectorTy(),
6258 "Intrinsic has incorrect argument type!");
6259 break;
6260 }
6261 case Intrinsic::vector_reduce_fmax:
6262 case Intrinsic::vector_reduce_fmin: {
6263 Type *ArgTy = Call.getArgOperand(0)->getType();
6264 Check(ArgTy->isFPOrFPVectorTy() && ArgTy->isVectorTy(),
6265 "Intrinsic has incorrect argument type!");
6266 break;
6267 }
6268 case Intrinsic::vector_reduce_fadd:
6269 case Intrinsic::vector_reduce_fmul: {
6270 // Unlike the other reductions, the first argument is a start value. The
6271 // second argument is the vector to be reduced.
6272 Type *ArgTy = Call.getArgOperand(1)->getType();
6273 Check(ArgTy->isFPOrFPVectorTy() && ArgTy->isVectorTy(),
6274 "Intrinsic has incorrect argument type!");
6275 break;
6276 }
6277 case Intrinsic::smul_fix:
6278 case Intrinsic::smul_fix_sat:
6279 case Intrinsic::umul_fix:
6280 case Intrinsic::umul_fix_sat:
6281 case Intrinsic::sdiv_fix:
6282 case Intrinsic::sdiv_fix_sat:
6283 case Intrinsic::udiv_fix:
6284 case Intrinsic::udiv_fix_sat: {
6285 Value *Op1 = Call.getArgOperand(0);
6286 Value *Op2 = Call.getArgOperand(1);
6288 "first operand of [us][mul|div]_fix[_sat] must be an int type or "
6289 "vector of ints");
6291 "second operand of [us][mul|div]_fix[_sat] must be an int type or "
6292 "vector of ints");
6293
6294 auto *Op3 = cast<ConstantInt>(Call.getArgOperand(2));
6295 Check(Op3->getType()->isIntegerTy(),
6296 "third operand of [us][mul|div]_fix[_sat] must be an int type");
6297 Check(Op3->getBitWidth() <= 32,
6298 "third operand of [us][mul|div]_fix[_sat] must fit within 32 bits");
6299
6300 if (ID == Intrinsic::smul_fix || ID == Intrinsic::smul_fix_sat ||
6301 ID == Intrinsic::sdiv_fix || ID == Intrinsic::sdiv_fix_sat) {
6302 Check(Op3->getZExtValue() < Op1->getType()->getScalarSizeInBits(),
6303 "the scale of s[mul|div]_fix[_sat] must be less than the width of "
6304 "the operands");
6305 } else {
6306 Check(Op3->getZExtValue() <= Op1->getType()->getScalarSizeInBits(),
6307 "the scale of u[mul|div]_fix[_sat] must be less than or equal "
6308 "to the width of the operands");
6309 }
6310 break;
6311 }
6312 case Intrinsic::lrint:
6313 case Intrinsic::llrint:
6314 case Intrinsic::lround:
6315 case Intrinsic::llround: {
6316 Type *ValTy = Call.getArgOperand(0)->getType();
6317 Type *ResultTy = Call.getType();
6318 auto *VTy = dyn_cast<VectorType>(ValTy);
6319 auto *RTy = dyn_cast<VectorType>(ResultTy);
6320 Check(ValTy->isFPOrFPVectorTy() && ResultTy->isIntOrIntVectorTy(),
6321 ExpectedName + ": argument must be floating-point or vector "
6322 "of floating-points, and result must be integer or "
6323 "vector of integers",
6324 &Call);
6325 Check(ValTy->isVectorTy() == ResultTy->isVectorTy(),
6326 ExpectedName + ": argument and result disagree on vector use", &Call);
6327 if (VTy) {
6328 Check(VTy->getElementCount() == RTy->getElementCount(),
6329 ExpectedName + ": argument must be same length as result", &Call);
6330 }
6331 break;
6332 }
6333 case Intrinsic::bswap: {
6334 Type *Ty = Call.getType();
6335 unsigned Size = Ty->getScalarSizeInBits();
6336 Check(Size % 16 == 0, "bswap must be an even number of bytes", &Call);
6337 break;
6338 }
6339 case Intrinsic::invariant_start: {
6340 ConstantInt *InvariantSize = dyn_cast<ConstantInt>(Call.getArgOperand(0));
6341 Check(InvariantSize &&
6342 (!InvariantSize->isNegative() || InvariantSize->isMinusOne()),
6343 "invariant_start parameter must be -1, 0 or a positive number",
6344 &Call);
6345 break;
6346 }
6347 case Intrinsic::matrix_multiply:
6348 case Intrinsic::matrix_transpose:
6349 case Intrinsic::matrix_column_major_load:
6350 case Intrinsic::matrix_column_major_store: {
6352 ConstantInt *Stride = nullptr;
6353 ConstantInt *NumRows;
6354 ConstantInt *NumColumns;
6355 VectorType *ResultTy;
6356 Type *Op0ElemTy = nullptr;
6357 Type *Op1ElemTy = nullptr;
6358 switch (ID) {
6359 case Intrinsic::matrix_multiply: {
6360 NumRows = cast<ConstantInt>(Call.getArgOperand(2));
6361 ConstantInt *N = cast<ConstantInt>(Call.getArgOperand(3));
6362 NumColumns = cast<ConstantInt>(Call.getArgOperand(4));
6364 ->getNumElements() ==
6365 NumRows->getZExtValue() * N->getZExtValue(),
6366 "First argument of a matrix operation does not match specified "
6367 "shape!");
6369 ->getNumElements() ==
6370 N->getZExtValue() * NumColumns->getZExtValue(),
6371 "Second argument of a matrix operation does not match specified "
6372 "shape!");
6373
6374 ResultTy = cast<VectorType>(Call.getType());
6375 Op0ElemTy =
6376 cast<VectorType>(Call.getArgOperand(0)->getType())->getElementType();
6377 Op1ElemTy =
6378 cast<VectorType>(Call.getArgOperand(1)->getType())->getElementType();
6379 break;
6380 }
6381 case Intrinsic::matrix_transpose:
6382 NumRows = cast<ConstantInt>(Call.getArgOperand(1));
6383 NumColumns = cast<ConstantInt>(Call.getArgOperand(2));
6384 ResultTy = cast<VectorType>(Call.getType());
6385 Op0ElemTy =
6386 cast<VectorType>(Call.getArgOperand(0)->getType())->getElementType();
6387 break;
6388 case Intrinsic::matrix_column_major_load: {
6390 NumRows = cast<ConstantInt>(Call.getArgOperand(3));
6391 NumColumns = cast<ConstantInt>(Call.getArgOperand(4));
6392 ResultTy = cast<VectorType>(Call.getType());
6393 break;
6394 }
6395 case Intrinsic::matrix_column_major_store: {
6397 NumRows = cast<ConstantInt>(Call.getArgOperand(4));
6398 NumColumns = cast<ConstantInt>(Call.getArgOperand(5));
6399 ResultTy = cast<VectorType>(Call.getArgOperand(0)->getType());
6400 Op0ElemTy =
6401 cast<VectorType>(Call.getArgOperand(0)->getType())->getElementType();
6402 break;
6403 }
6404 default:
6405 llvm_unreachable("unexpected intrinsic");
6406 }
6407
6408 Check(ResultTy->getElementType()->isIntegerTy() ||
6409 ResultTy->getElementType()->isFloatingPointTy(),
6410 "Result type must be an integer or floating-point type!", IF);
6411
6412 if (Op0ElemTy)
6413 Check(ResultTy->getElementType() == Op0ElemTy,
6414 "Vector element type mismatch of the result and first operand "
6415 "vector!",
6416 IF);
6417
6418 if (Op1ElemTy)
6419 Check(ResultTy->getElementType() == Op1ElemTy,
6420 "Vector element type mismatch of the result and second operand "
6421 "vector!",
6422 IF);
6423
6425 NumRows->getZExtValue() * NumColumns->getZExtValue(),
6426 "Result of a matrix operation does not fit in the returned vector!");
6427
6428 if (Stride)
6429 Check(Stride->getZExtValue() >= NumRows->getZExtValue(),
6430 "Stride must be greater or equal than the number of rows!", IF);
6431
6432 break;
6433 }
6434 case Intrinsic::vector_splice: {
6436 int64_t Idx = cast<ConstantInt>(Call.getArgOperand(2))->getSExtValue();
6437 int64_t KnownMinNumElements = VecTy->getElementCount().getKnownMinValue();
6438 if (Call.getParent() && Call.getParent()->getParent()) {
6439 AttributeList Attrs = Call.getParent()->getParent()->getAttributes();
6440 if (Attrs.hasFnAttr(Attribute::VScaleRange))
6441 KnownMinNumElements *= Attrs.getFnAttrs().getVScaleRangeMin();
6442 }
6443 Check((Idx < 0 && std::abs(Idx) <= KnownMinNumElements) ||
6444 (Idx >= 0 && Idx < KnownMinNumElements),
6445 "The splice index exceeds the range [-VL, VL-1] where VL is the "
6446 "known minimum number of elements in the vector. For scalable "
6447 "vectors the minimum number of elements is determined from "
6448 "vscale_range.",
6449 &Call);
6450 break;
6451 }
6452 case Intrinsic::stepvector: {
6454 Check(VecTy && VecTy->getScalarType()->isIntegerTy() &&
6455 VecTy->getScalarSizeInBits() >= 8,
6456 "stepvector only supported for vectors of integers "
6457 "with a bitwidth of at least 8.",
6458 &Call);
6459 break;
6460 }
6461 case Intrinsic::experimental_vector_match: {
6462 Value *Op1 = Call.getArgOperand(0);
6463 Value *Op2 = Call.getArgOperand(1);
6465
6466 VectorType *Op1Ty = dyn_cast<VectorType>(Op1->getType());
6467 VectorType *Op2Ty = dyn_cast<VectorType>(Op2->getType());
6468 VectorType *MaskTy = dyn_cast<VectorType>(Mask->getType());
6469
6470 Check(Op1Ty && Op2Ty && MaskTy, "Operands must be vectors.", &Call);
6472 "Second operand must be a fixed length vector.", &Call);
6473 Check(Op1Ty->getElementType()->isIntegerTy(),
6474 "First operand must be a vector of integers.", &Call);
6475 Check(Op1Ty->getElementType() == Op2Ty->getElementType(),
6476 "First two operands must have the same element type.", &Call);
6477 Check(Op1Ty->getElementCount() == MaskTy->getElementCount(),
6478 "First operand and mask must have the same number of elements.",
6479 &Call);
6480 Check(MaskTy->getElementType()->isIntegerTy(1),
6481 "Mask must be a vector of i1's.", &Call);
6482 Check(Call.getType() == MaskTy, "Return type must match the mask type.",
6483 &Call);
6484 break;
6485 }
6486 case Intrinsic::vector_insert: {
6487 Value *Vec = Call.getArgOperand(0);
6488 Value *SubVec = Call.getArgOperand(1);
6489 Value *Idx = Call.getArgOperand(2);
6490 unsigned IdxN = cast<ConstantInt>(Idx)->getZExtValue();
6491
6492 VectorType *VecTy = cast<VectorType>(Vec->getType());
6493 VectorType *SubVecTy = cast<VectorType>(SubVec->getType());
6494
6495 ElementCount VecEC = VecTy->getElementCount();
6496 ElementCount SubVecEC = SubVecTy->getElementCount();
6497 Check(VecTy->getElementType() == SubVecTy->getElementType(),
6498 "vector_insert parameters must have the same element "
6499 "type.",
6500 &Call);
6501 Check(IdxN % SubVecEC.getKnownMinValue() == 0,
6502 "vector_insert index must be a constant multiple of "
6503 "the subvector's known minimum vector length.");
6504
6505 // If this insertion is not the 'mixed' case where a fixed vector is
6506 // inserted into a scalable vector, ensure that the insertion of the
6507 // subvector does not overrun the parent vector.
6508 if (VecEC.isScalable() == SubVecEC.isScalable()) {
6509 Check(IdxN < VecEC.getKnownMinValue() &&
6510 IdxN + SubVecEC.getKnownMinValue() <= VecEC.getKnownMinValue(),
6511 "subvector operand of vector_insert would overrun the "
6512 "vector being inserted into.");
6513 }
6514 break;
6515 }
6516 case Intrinsic::vector_extract: {
6517 Value *Vec = Call.getArgOperand(0);
6518 Value *Idx = Call.getArgOperand(1);
6519 unsigned IdxN = cast<ConstantInt>(Idx)->getZExtValue();
6520
6521 VectorType *ResultTy = cast<VectorType>(Call.getType());
6522 VectorType *VecTy = cast<VectorType>(Vec->getType());
6523
6524 ElementCount VecEC = VecTy->getElementCount();
6525 ElementCount ResultEC = ResultTy->getElementCount();
6526
6527 Check(ResultTy->getElementType() == VecTy->getElementType(),
6528 "vector_extract result must have the same element "
6529 "type as the input vector.",
6530 &Call);
6531 Check(IdxN % ResultEC.getKnownMinValue() == 0,
6532 "vector_extract index must be a constant multiple of "
6533 "the result type's known minimum vector length.");
6534
6535 // If this extraction is not the 'mixed' case where a fixed vector is
6536 // extracted from a scalable vector, ensure that the extraction does not
6537 // overrun the parent vector.
6538 if (VecEC.isScalable() == ResultEC.isScalable()) {
6539 Check(IdxN < VecEC.getKnownMinValue() &&
6540 IdxN + ResultEC.getKnownMinValue() <= VecEC.getKnownMinValue(),
6541 "vector_extract would overrun.");
6542 }
6543 break;
6544 }
6545 case Intrinsic::vector_partial_reduce_add: {
6548
6549 unsigned VecWidth = VecTy->getElementCount().getKnownMinValue();
6550 unsigned AccWidth = AccTy->getElementCount().getKnownMinValue();
6551
6552 Check((VecWidth % AccWidth) == 0,
6553 "Invalid vector widths for partial "
6554 "reduction. The width of the input vector "
6555 "must be a positive integer multiple of "
6556 "the width of the accumulator vector.");
6557 break;
6558 }
6559 case Intrinsic::experimental_noalias_scope_decl: {
6560 NoAliasScopeDecls.push_back(cast<IntrinsicInst>(&Call));
6561 break;
6562 }
6563 case Intrinsic::preserve_array_access_index:
6564 case Intrinsic::preserve_struct_access_index:
6565 case Intrinsic::aarch64_ldaxr:
6566 case Intrinsic::aarch64_ldxr:
6567 case Intrinsic::arm_ldaex:
6568 case Intrinsic::arm_ldrex: {
6569 Type *ElemTy = Call.getParamElementType(0);
6570 Check(ElemTy, "Intrinsic requires elementtype attribute on first argument.",
6571 &Call);
6572 break;
6573 }
6574 case Intrinsic::aarch64_stlxr:
6575 case Intrinsic::aarch64_stxr:
6576 case Intrinsic::arm_stlex:
6577 case Intrinsic::arm_strex: {
6578 Type *ElemTy = Call.getAttributes().getParamElementType(1);
6579 Check(ElemTy,
6580 "Intrinsic requires elementtype attribute on second argument.",
6581 &Call);
6582 break;
6583 }
6584 case Intrinsic::aarch64_prefetch: {
6585 Check(cast<ConstantInt>(Call.getArgOperand(1))->getZExtValue() < 2,
6586 "write argument to llvm.aarch64.prefetch must be 0 or 1", Call);
6587 Check(cast<ConstantInt>(Call.getArgOperand(2))->getZExtValue() < 4,
6588 "target argument to llvm.aarch64.prefetch must be 0-3", Call);
6589 Check(cast<ConstantInt>(Call.getArgOperand(3))->getZExtValue() < 2,
6590 "stream argument to llvm.aarch64.prefetch must be 0 or 1", Call);
6591 Check(cast<ConstantInt>(Call.getArgOperand(4))->getZExtValue() < 2,
6592 "isdata argument to llvm.aarch64.prefetch must be 0 or 1", Call);
6593 break;
6594 }
6595 case Intrinsic::callbr_landingpad: {
6596 const auto *CBR = dyn_cast<CallBrInst>(Call.getOperand(0));
6597 Check(CBR, "intrinstic requires callbr operand", &Call);
6598 if (!CBR)
6599 break;
6600
6601 const BasicBlock *LandingPadBB = Call.getParent();
6602 const BasicBlock *PredBB = LandingPadBB->getUniquePredecessor();
6603 if (!PredBB) {
6604 CheckFailed("Intrinsic in block must have 1 unique predecessor", &Call);
6605 break;
6606 }
6607 if (!isa<CallBrInst>(PredBB->getTerminator())) {
6608 CheckFailed("Intrinsic must have corresponding callbr in predecessor",
6609 &Call);
6610 break;
6611 }
6612 Check(llvm::is_contained(CBR->getIndirectDests(), LandingPadBB),
6613 "Intrinsic's corresponding callbr must have intrinsic's parent basic "
6614 "block in indirect destination list",
6615 &Call);
6616 const Instruction &First = *LandingPadBB->begin();
6617 Check(&First == &Call, "No other instructions may proceed intrinsic",
6618 &Call);
6619 break;
6620 }
6621 case Intrinsic::amdgcn_cs_chain: {
6622 auto CallerCC = Call.getCaller()->getCallingConv();
6623 switch (CallerCC) {
6624 case CallingConv::AMDGPU_CS:
6625 case CallingConv::AMDGPU_CS_Chain:
6626 case CallingConv::AMDGPU_CS_ChainPreserve:
6627 break;
6628 default:
6629 CheckFailed("Intrinsic can only be used from functions with the "
6630 "amdgpu_cs, amdgpu_cs_chain or amdgpu_cs_chain_preserve "
6631 "calling conventions",
6632 &Call);
6633 break;
6634 }
6635
6636 Check(Call.paramHasAttr(2, Attribute::InReg),
6637 "SGPR arguments must have the `inreg` attribute", &Call);
6638 Check(!Call.paramHasAttr(3, Attribute::InReg),
6639 "VGPR arguments must not have the `inreg` attribute", &Call);
6640
6641 auto *Next = Call.getNextNode();
6642 bool IsAMDUnreachable = Next && isa<IntrinsicInst>(Next) &&
6643 cast<IntrinsicInst>(Next)->getIntrinsicID() ==
6644 Intrinsic::amdgcn_unreachable;
6645 Check(Next && (isa<UnreachableInst>(Next) || IsAMDUnreachable),
6646 "llvm.amdgcn.cs.chain must be followed by unreachable", &Call);
6647 break;
6648 }
6649 case Intrinsic::amdgcn_init_exec_from_input: {
6650 const Argument *Arg = dyn_cast<Argument>(Call.getOperand(0));
6651 Check(Arg && Arg->hasInRegAttr(),
6652 "only inreg arguments to the parent function are valid as inputs to "
6653 "this intrinsic",
6654 &Call);
6655 break;
6656 }
6657 case Intrinsic::amdgcn_set_inactive_chain_arg: {
6658 auto CallerCC = Call.getCaller()->getCallingConv();
6659 switch (CallerCC) {
6660 case CallingConv::AMDGPU_CS_Chain:
6661 case CallingConv::AMDGPU_CS_ChainPreserve:
6662 break;
6663 default:
6664 CheckFailed("Intrinsic can only be used from functions with the "
6665 "amdgpu_cs_chain or amdgpu_cs_chain_preserve "
6666 "calling conventions",
6667 &Call);
6668 break;
6669 }
6670
6671 unsigned InactiveIdx = 1;
6672 Check(!Call.paramHasAttr(InactiveIdx, Attribute::InReg),
6673 "Value for inactive lanes must not have the `inreg` attribute",
6674 &Call);
6675 Check(isa<Argument>(Call.getArgOperand(InactiveIdx)),
6676 "Value for inactive lanes must be a function argument", &Call);
6677 Check(!cast<Argument>(Call.getArgOperand(InactiveIdx))->hasInRegAttr(),
6678 "Value for inactive lanes must be a VGPR function argument", &Call);
6679 break;
6680 }
6681 case Intrinsic::amdgcn_call_whole_wave: {
6683 Check(F, "Indirect whole wave calls are not allowed", &Call);
6684
6685 CallingConv::ID CC = F->getCallingConv();
6686 Check(CC == CallingConv::AMDGPU_Gfx_WholeWave,
6687 "Callee must have the amdgpu_gfx_whole_wave calling convention",
6688 &Call);
6689
6690 Check(!F->isVarArg(), "Variadic whole wave calls are not allowed", &Call);
6691
6692 Check(Call.arg_size() == F->arg_size(),
6693 "Call argument count must match callee argument count", &Call);
6694
6695 // The first argument of the call is the callee, and the first argument of
6696 // the callee is the active mask. The rest of the arguments must match.
6697 Check(F->arg_begin()->getType()->isIntegerTy(1),
6698 "Callee must have i1 as its first argument", &Call);
6699 for (auto [CallArg, FuncArg] :
6700 drop_begin(zip_equal(Call.args(), F->args()))) {
6701 Check(CallArg->getType() == FuncArg.getType(),
6702 "Argument types must match", &Call);
6703
6704 // Check that inreg attributes match between call site and function
6705 Check(Call.paramHasAttr(FuncArg.getArgNo(), Attribute::InReg) ==
6706 FuncArg.hasInRegAttr(),
6707 "Argument inreg attributes must match", &Call);
6708 }
6709 break;
6710 }
6711 case Intrinsic::amdgcn_s_prefetch_data: {
6712 Check(
6715 "llvm.amdgcn.s.prefetch.data only supports global or constant memory");
6716 break;
6717 }
6718 case Intrinsic::amdgcn_mfma_scale_f32_16x16x128_f8f6f4:
6719 case Intrinsic::amdgcn_mfma_scale_f32_32x32x64_f8f6f4: {
6720 Value *Src0 = Call.getArgOperand(0);
6721 Value *Src1 = Call.getArgOperand(1);
6722
6723 uint64_t CBSZ = cast<ConstantInt>(Call.getArgOperand(3))->getZExtValue();
6724 uint64_t BLGP = cast<ConstantInt>(Call.getArgOperand(4))->getZExtValue();
6725 Check(CBSZ <= 4, "invalid value for cbsz format", Call,
6726 Call.getArgOperand(3));
6727 Check(BLGP <= 4, "invalid value for blgp format", Call,
6728 Call.getArgOperand(4));
6729
6730 // AMDGPU::MFMAScaleFormats values
6731 auto getFormatNumRegs = [](unsigned FormatVal) {
6732 switch (FormatVal) {
6733 case 0:
6734 case 1:
6735 return 8u;
6736 case 2:
6737 case 3:
6738 return 6u;
6739 case 4:
6740 return 4u;
6741 default:
6742 llvm_unreachable("invalid format value");
6743 }
6744 };
6745
6746 auto isValidSrcASrcBVector = [](FixedVectorType *Ty) {
6747 if (!Ty || !Ty->getElementType()->isIntegerTy(32))
6748 return false;
6749 unsigned NumElts = Ty->getNumElements();
6750 return NumElts == 4 || NumElts == 6 || NumElts == 8;
6751 };
6752
6753 auto *Src0Ty = dyn_cast<FixedVectorType>(Src0->getType());
6754 auto *Src1Ty = dyn_cast<FixedVectorType>(Src1->getType());
6755 Check(isValidSrcASrcBVector(Src0Ty),
6756 "operand 0 must be 4, 6 or 8 element i32 vector", &Call, Src0);
6757 Check(isValidSrcASrcBVector(Src1Ty),
6758 "operand 1 must be 4, 6 or 8 element i32 vector", &Call, Src1);
6759
6760 // Permit excess registers for the format.
6761 Check(Src0Ty->getNumElements() >= getFormatNumRegs(CBSZ),
6762 "invalid vector type for format", &Call, Src0, Call.getArgOperand(3));
6763 Check(Src1Ty->getNumElements() >= getFormatNumRegs(BLGP),
6764 "invalid vector type for format", &Call, Src1, Call.getArgOperand(5));
6765 break;
6766 }
6767 case Intrinsic::amdgcn_wmma_f32_16x16x128_f8f6f4:
6768 case Intrinsic::amdgcn_wmma_scale_f32_16x16x128_f8f6f4:
6769 case Intrinsic::amdgcn_wmma_scale16_f32_16x16x128_f8f6f4: {
6770 Value *Src0 = Call.getArgOperand(1);
6771 Value *Src1 = Call.getArgOperand(3);
6772
6773 unsigned FmtA = cast<ConstantInt>(Call.getArgOperand(0))->getZExtValue();
6774 unsigned FmtB = cast<ConstantInt>(Call.getArgOperand(2))->getZExtValue();
6775 Check(FmtA <= 4, "invalid value for matrix format", Call,
6776 Call.getArgOperand(0));
6777 Check(FmtB <= 4, "invalid value for matrix format", Call,
6778 Call.getArgOperand(2));
6779
6780 // AMDGPU::MatrixFMT values
6781 auto getFormatNumRegs = [](unsigned FormatVal) {
6782 switch (FormatVal) {
6783 case 0:
6784 case 1:
6785 return 16u;
6786 case 2:
6787 case 3:
6788 return 12u;
6789 case 4:
6790 return 8u;
6791 default:
6792 llvm_unreachable("invalid format value");
6793 }
6794 };
6795
6796 auto isValidSrcASrcBVector = [](FixedVectorType *Ty) {
6797 if (!Ty || !Ty->getElementType()->isIntegerTy(32))
6798 return false;
6799 unsigned NumElts = Ty->getNumElements();
6800 return NumElts == 16 || NumElts == 12 || NumElts == 8;
6801 };
6802
6803 auto *Src0Ty = dyn_cast<FixedVectorType>(Src0->getType());
6804 auto *Src1Ty = dyn_cast<FixedVectorType>(Src1->getType());
6805 Check(isValidSrcASrcBVector(Src0Ty),
6806 "operand 1 must be 8, 12 or 16 element i32 vector", &Call, Src0);
6807 Check(isValidSrcASrcBVector(Src1Ty),
6808 "operand 3 must be 8, 12 or 16 element i32 vector", &Call, Src1);
6809
6810 // Permit excess registers for the format.
6811 Check(Src0Ty->getNumElements() >= getFormatNumRegs(FmtA),
6812 "invalid vector type for format", &Call, Src0, Call.getArgOperand(0));
6813 Check(Src1Ty->getNumElements() >= getFormatNumRegs(FmtB),
6814 "invalid vector type for format", &Call, Src1, Call.getArgOperand(2));
6815 break;
6816 }
6817 case Intrinsic::amdgcn_cooperative_atomic_load_32x4B:
6818 case Intrinsic::amdgcn_cooperative_atomic_load_16x8B:
6819 case Intrinsic::amdgcn_cooperative_atomic_load_8x16B:
6820 case Intrinsic::amdgcn_cooperative_atomic_store_32x4B:
6821 case Intrinsic::amdgcn_cooperative_atomic_store_16x8B:
6822 case Intrinsic::amdgcn_cooperative_atomic_store_8x16B: {
6823 // Check we only use this intrinsic on the FLAT or GLOBAL address spaces.
6824 Value *PtrArg = Call.getArgOperand(0);
6825 const unsigned AS = PtrArg->getType()->getPointerAddressSpace();
6827 "cooperative atomic intrinsics require a generic or global pointer",
6828 &Call, PtrArg);
6829
6830 // Last argument must be a MD string
6832 MDNode *MD = cast<MDNode>(Op->getMetadata());
6833 Check((MD->getNumOperands() == 1) && isa<MDString>(MD->getOperand(0)),
6834 "cooperative atomic intrinsics require that the last argument is a "
6835 "metadata string",
6836 &Call, Op);
6837 break;
6838 }
6839 case Intrinsic::nvvm_setmaxnreg_inc_sync_aligned_u32:
6840 case Intrinsic::nvvm_setmaxnreg_dec_sync_aligned_u32: {
6841 Value *V = Call.getArgOperand(0);
6842 unsigned RegCount = cast<ConstantInt>(V)->getZExtValue();
6843 Check(RegCount % 8 == 0,
6844 "reg_count argument to nvvm.setmaxnreg must be in multiples of 8");
6845 break;
6846 }
6847 case Intrinsic::experimental_convergence_entry:
6848 case Intrinsic::experimental_convergence_anchor:
6849 break;
6850 case Intrinsic::experimental_convergence_loop:
6851 break;
6852 case Intrinsic::ptrmask: {
6853 Type *Ty0 = Call.getArgOperand(0)->getType();
6854 Type *Ty1 = Call.getArgOperand(1)->getType();
6856 "llvm.ptrmask intrinsic first argument must be pointer or vector "
6857 "of pointers",
6858 &Call);
6859 Check(
6860 Ty0->isVectorTy() == Ty1->isVectorTy(),
6861 "llvm.ptrmask intrinsic arguments must be both scalars or both vectors",
6862 &Call);
6863 if (Ty0->isVectorTy())
6864 Check(cast<VectorType>(Ty0)->getElementCount() ==
6865 cast<VectorType>(Ty1)->getElementCount(),
6866 "llvm.ptrmask intrinsic arguments must have the same number of "
6867 "elements",
6868 &Call);
6869 Check(DL.getIndexTypeSizeInBits(Ty0) == Ty1->getScalarSizeInBits(),
6870 "llvm.ptrmask intrinsic second argument bitwidth must match "
6871 "pointer index type size of first argument",
6872 &Call);
6873 break;
6874 }
6875 case Intrinsic::thread_pointer: {
6877 DL.getDefaultGlobalsAddressSpace(),
6878 "llvm.thread.pointer intrinsic return type must be for the globals "
6879 "address space",
6880 &Call);
6881 break;
6882 }
6883 case Intrinsic::threadlocal_address: {
6884 const Value &Arg0 = *Call.getArgOperand(0);
6885 Check(isa<GlobalValue>(Arg0),
6886 "llvm.threadlocal.address first argument must be a GlobalValue");
6887 Check(cast<GlobalValue>(Arg0).isThreadLocal(),
6888 "llvm.threadlocal.address operand isThreadLocal() must be true");
6889 break;
6890 }
6891 case Intrinsic::lifetime_start:
6892 case Intrinsic::lifetime_end: {
6895 "llvm.lifetime.start/end can only be used on alloca or poison",
6896 &Call);
6897 break;
6898 }
6899 };
6900
6901 // Verify that there aren't any unmediated control transfers between funclets.
6903 Function *F = Call.getParent()->getParent();
6904 if (F->hasPersonalityFn() &&
6905 isScopedEHPersonality(classifyEHPersonality(F->getPersonalityFn()))) {
6906 // Run EH funclet coloring on-demand and cache results for other intrinsic
6907 // calls in this function
6908 if (BlockEHFuncletColors.empty())
6909 BlockEHFuncletColors = colorEHFunclets(*F);
6910
6911 // Check for catch-/cleanup-pad in first funclet block
6912 bool InEHFunclet = false;
6913 BasicBlock *CallBB = Call.getParent();
6914 const ColorVector &CV = BlockEHFuncletColors.find(CallBB)->second;
6915 assert(CV.size() > 0 && "Uncolored block");
6916 for (BasicBlock *ColorFirstBB : CV)
6917 if (auto It = ColorFirstBB->getFirstNonPHIIt();
6918 It != ColorFirstBB->end())
6920 InEHFunclet = true;
6921
6922 // Check for funclet operand bundle
6923 bool HasToken = false;
6924 for (unsigned I = 0, E = Call.getNumOperandBundles(); I != E; ++I)
6926 HasToken = true;
6927
6928 // This would cause silent code truncation in WinEHPrepare
6929 if (InEHFunclet)
6930 Check(HasToken, "Missing funclet token on intrinsic call", &Call);
6931 }
6932 }
6933}
6934
6935/// Carefully grab the subprogram from a local scope.
6936///
6937/// This carefully grabs the subprogram from a local scope, avoiding the
6938/// built-in assertions that would typically fire.
6940 if (!LocalScope)
6941 return nullptr;
6942
6943 if (auto *SP = dyn_cast<DISubprogram>(LocalScope))
6944 return SP;
6945
6946 if (auto *LB = dyn_cast<DILexicalBlockBase>(LocalScope))
6947 return getSubprogram(LB->getRawScope());
6948
6949 // Just return null; broken scope chains are checked elsewhere.
6950 assert(!isa<DILocalScope>(LocalScope) && "Unknown type of local scope");
6951 return nullptr;
6952}
6953
6954void Verifier::visit(DbgLabelRecord &DLR) {
6956 "invalid #dbg_label intrinsic variable", &DLR, DLR.getRawLabel());
6957
6958 // Ignore broken !dbg attachments; they're checked elsewhere.
6959 if (MDNode *N = DLR.getDebugLoc().getAsMDNode())
6960 if (!isa<DILocation>(N))
6961 return;
6962
6963 BasicBlock *BB = DLR.getParent();
6964 Function *F = BB ? BB->getParent() : nullptr;
6965
6966 // The scopes for variables and !dbg attachments must agree.
6967 DILabel *Label = DLR.getLabel();
6968 DILocation *Loc = DLR.getDebugLoc();
6969 CheckDI(Loc, "#dbg_label record requires a !dbg attachment", &DLR, BB, F);
6970
6971 DISubprogram *LabelSP = getSubprogram(Label->getRawScope());
6972 DISubprogram *LocSP = getSubprogram(Loc->getRawScope());
6973 if (!LabelSP || !LocSP)
6974 return;
6975
6976 CheckDI(LabelSP == LocSP,
6977 "mismatched subprogram between #dbg_label label and !dbg attachment",
6978 &DLR, BB, F, Label, Label->getScope()->getSubprogram(), Loc,
6979 Loc->getScope()->getSubprogram());
6980}
6981
6982void Verifier::visit(DbgVariableRecord &DVR) {
6983 BasicBlock *BB = DVR.getParent();
6984 Function *F = BB->getParent();
6985
6986 CheckDI(DVR.getType() == DbgVariableRecord::LocationType::Value ||
6987 DVR.getType() == DbgVariableRecord::LocationType::Declare ||
6988 DVR.getType() == DbgVariableRecord::LocationType::Assign,
6989 "invalid #dbg record type", &DVR, DVR.getType(), BB, F);
6990
6991 // The location for a DbgVariableRecord must be either a ValueAsMetadata,
6992 // DIArgList, or an empty MDNode (which is a legacy representation for an
6993 // "undef" location).
6994 auto *MD = DVR.getRawLocation();
6995 CheckDI(MD && (isa<ValueAsMetadata>(MD) || isa<DIArgList>(MD) ||
6996 (isa<MDNode>(MD) && !cast<MDNode>(MD)->getNumOperands())),
6997 "invalid #dbg record address/value", &DVR, MD, BB, F);
6998 if (auto *VAM = dyn_cast<ValueAsMetadata>(MD)) {
6999 visitValueAsMetadata(*VAM, F);
7000 if (DVR.isDbgDeclare()) {
7001 // Allow integers here to support inttoptr salvage.
7002 Type *Ty = VAM->getValue()->getType();
7003 CheckDI(Ty->isPointerTy() || Ty->isIntegerTy(),
7004 "location of #dbg_declare must be a pointer or int", &DVR, MD, BB,
7005 F);
7006 }
7007 } else if (auto *AL = dyn_cast<DIArgList>(MD)) {
7008 visitDIArgList(*AL, F);
7009 }
7010
7012 "invalid #dbg record variable", &DVR, DVR.getRawVariable(), BB, F);
7013 visitMDNode(*DVR.getRawVariable(), AreDebugLocsAllowed::No);
7014
7016 "invalid #dbg record expression", &DVR, DVR.getRawExpression(), BB,
7017 F);
7018 visitMDNode(*DVR.getExpression(), AreDebugLocsAllowed::No);
7019
7020 if (DVR.isDbgAssign()) {
7022 "invalid #dbg_assign DIAssignID", &DVR, DVR.getRawAssignID(), BB,
7023 F);
7024 visitMDNode(*cast<DIAssignID>(DVR.getRawAssignID()),
7025 AreDebugLocsAllowed::No);
7026
7027 const auto *RawAddr = DVR.getRawAddress();
7028 // Similarly to the location above, the address for an assign
7029 // DbgVariableRecord must be a ValueAsMetadata or an empty MDNode, which
7030 // represents an undef address.
7031 CheckDI(
7032 isa<ValueAsMetadata>(RawAddr) ||
7033 (isa<MDNode>(RawAddr) && !cast<MDNode>(RawAddr)->getNumOperands()),
7034 "invalid #dbg_assign address", &DVR, DVR.getRawAddress(), BB, F);
7035 if (auto *VAM = dyn_cast<ValueAsMetadata>(RawAddr))
7036 visitValueAsMetadata(*VAM, F);
7037
7039 "invalid #dbg_assign address expression", &DVR,
7040 DVR.getRawAddressExpression(), BB, F);
7041 visitMDNode(*DVR.getAddressExpression(), AreDebugLocsAllowed::No);
7042
7043 // All of the linked instructions should be in the same function as DVR.
7044 for (Instruction *I : at::getAssignmentInsts(&DVR))
7045 CheckDI(DVR.getFunction() == I->getFunction(),
7046 "inst not in same function as #dbg_assign", I, &DVR, BB, F);
7047 }
7048
7049 // This check is redundant with one in visitLocalVariable().
7050 DILocalVariable *Var = DVR.getVariable();
7051 CheckDI(isType(Var->getRawType()), "invalid type ref", Var, Var->getRawType(),
7052 BB, F);
7053
7054 auto *DLNode = DVR.getDebugLoc().getAsMDNode();
7055 CheckDI(isa_and_nonnull<DILocation>(DLNode), "invalid #dbg record DILocation",
7056 &DVR, DLNode, BB, F);
7057 DILocation *Loc = DVR.getDebugLoc();
7058
7059 // The scopes for variables and !dbg attachments must agree.
7060 DISubprogram *VarSP = getSubprogram(Var->getRawScope());
7061 DISubprogram *LocSP = getSubprogram(Loc->getRawScope());
7062 if (!VarSP || !LocSP)
7063 return; // Broken scope chains are checked elsewhere.
7064
7065 CheckDI(VarSP == LocSP,
7066 "mismatched subprogram between #dbg record variable and DILocation",
7067 &DVR, BB, F, Var, Var->getScope()->getSubprogram(), Loc,
7068 Loc->getScope()->getSubprogram(), BB, F);
7069
7070 verifyFnArgs(DVR);
7071}
7072
7073void Verifier::visitVPIntrinsic(VPIntrinsic &VPI) {
7074 if (auto *VPCast = dyn_cast<VPCastIntrinsic>(&VPI)) {
7075 auto *RetTy = cast<VectorType>(VPCast->getType());
7076 auto *ValTy = cast<VectorType>(VPCast->getOperand(0)->getType());
7077 Check(RetTy->getElementCount() == ValTy->getElementCount(),
7078 "VP cast intrinsic first argument and result vector lengths must be "
7079 "equal",
7080 *VPCast);
7081
7082 switch (VPCast->getIntrinsicID()) {
7083 default:
7084 llvm_unreachable("Unknown VP cast intrinsic");
7085 case Intrinsic::vp_trunc:
7086 Check(RetTy->isIntOrIntVectorTy() && ValTy->isIntOrIntVectorTy(),
7087 "llvm.vp.trunc intrinsic first argument and result element type "
7088 "must be integer",
7089 *VPCast);
7090 Check(RetTy->getScalarSizeInBits() < ValTy->getScalarSizeInBits(),
7091 "llvm.vp.trunc intrinsic the bit size of first argument must be "
7092 "larger than the bit size of the return type",
7093 *VPCast);
7094 break;
7095 case Intrinsic::vp_zext:
7096 case Intrinsic::vp_sext:
7097 Check(RetTy->isIntOrIntVectorTy() && ValTy->isIntOrIntVectorTy(),
7098 "llvm.vp.zext or llvm.vp.sext intrinsic first argument and result "
7099 "element type must be integer",
7100 *VPCast);
7101 Check(RetTy->getScalarSizeInBits() > ValTy->getScalarSizeInBits(),
7102 "llvm.vp.zext or llvm.vp.sext intrinsic the bit size of first "
7103 "argument must be smaller than the bit size of the return type",
7104 *VPCast);
7105 break;
7106 case Intrinsic::vp_fptoui:
7107 case Intrinsic::vp_fptosi:
7108 case Intrinsic::vp_lrint:
7109 case Intrinsic::vp_llrint:
7110 Check(
7111 RetTy->isIntOrIntVectorTy() && ValTy->isFPOrFPVectorTy(),
7112 "llvm.vp.fptoui, llvm.vp.fptosi, llvm.vp.lrint or llvm.vp.llrint" "intrinsic first argument element "
7113 "type must be floating-point and result element type must be integer",
7114 *VPCast);
7115 break;
7116 case Intrinsic::vp_uitofp:
7117 case Intrinsic::vp_sitofp:
7118 Check(
7119 RetTy->isFPOrFPVectorTy() && ValTy->isIntOrIntVectorTy(),
7120 "llvm.vp.uitofp or llvm.vp.sitofp intrinsic first argument element "
7121 "type must be integer and result element type must be floating-point",
7122 *VPCast);
7123 break;
7124 case Intrinsic::vp_fptrunc:
7125 Check(RetTy->isFPOrFPVectorTy() && ValTy->isFPOrFPVectorTy(),
7126 "llvm.vp.fptrunc intrinsic first argument and result element type "
7127 "must be floating-point",
7128 *VPCast);
7129 Check(RetTy->getScalarSizeInBits() < ValTy->getScalarSizeInBits(),
7130 "llvm.vp.fptrunc intrinsic the bit size of first argument must be "
7131 "larger than the bit size of the return type",
7132 *VPCast);
7133 break;
7134 case Intrinsic::vp_fpext:
7135 Check(RetTy->isFPOrFPVectorTy() && ValTy->isFPOrFPVectorTy(),
7136 "llvm.vp.fpext intrinsic first argument and result element type "
7137 "must be floating-point",
7138 *VPCast);
7139 Check(RetTy->getScalarSizeInBits() > ValTy->getScalarSizeInBits(),
7140 "llvm.vp.fpext intrinsic the bit size of first argument must be "
7141 "smaller than the bit size of the return type",
7142 *VPCast);
7143 break;
7144 case Intrinsic::vp_ptrtoint:
7145 Check(RetTy->isIntOrIntVectorTy() && ValTy->isPtrOrPtrVectorTy(),
7146 "llvm.vp.ptrtoint intrinsic first argument element type must be "
7147 "pointer and result element type must be integer",
7148 *VPCast);
7149 break;
7150 case Intrinsic::vp_inttoptr:
7151 Check(RetTy->isPtrOrPtrVectorTy() && ValTy->isIntOrIntVectorTy(),
7152 "llvm.vp.inttoptr intrinsic first argument element type must be "
7153 "integer and result element type must be pointer",
7154 *VPCast);
7155 break;
7156 }
7157 }
7158
7159 switch (VPI.getIntrinsicID()) {
7160 case Intrinsic::vp_fcmp: {
7161 auto Pred = cast<VPCmpIntrinsic>(&VPI)->getPredicate();
7163 "invalid predicate for VP FP comparison intrinsic", &VPI);
7164 break;
7165 }
7166 case Intrinsic::vp_icmp: {
7167 auto Pred = cast<VPCmpIntrinsic>(&VPI)->getPredicate();
7169 "invalid predicate for VP integer comparison intrinsic", &VPI);
7170 break;
7171 }
7172 case Intrinsic::vp_is_fpclass: {
7173 auto TestMask = cast<ConstantInt>(VPI.getOperand(1));
7174 Check((TestMask->getZExtValue() & ~static_cast<unsigned>(fcAllFlags)) == 0,
7175 "unsupported bits for llvm.vp.is.fpclass test mask");
7176 break;
7177 }
7178 case Intrinsic::experimental_vp_splice: {
7179 VectorType *VecTy = cast<VectorType>(VPI.getType());
7180 int64_t Idx = cast<ConstantInt>(VPI.getArgOperand(2))->getSExtValue();
7181 int64_t KnownMinNumElements = VecTy->getElementCount().getKnownMinValue();
7182 if (VPI.getParent() && VPI.getParent()->getParent()) {
7183 AttributeList Attrs = VPI.getParent()->getParent()->getAttributes();
7184 if (Attrs.hasFnAttr(Attribute::VScaleRange))
7185 KnownMinNumElements *= Attrs.getFnAttrs().getVScaleRangeMin();
7186 }
7187 Check((Idx < 0 && std::abs(Idx) <= KnownMinNumElements) ||
7188 (Idx >= 0 && Idx < KnownMinNumElements),
7189 "The splice index exceeds the range [-VL, VL-1] where VL is the "
7190 "known minimum number of elements in the vector. For scalable "
7191 "vectors the minimum number of elements is determined from "
7192 "vscale_range.",
7193 &VPI);
7194 break;
7195 }
7196 }
7197}
7198
7199void Verifier::visitConstrainedFPIntrinsic(ConstrainedFPIntrinsic &FPI) {
7200 unsigned NumOperands = FPI.getNonMetadataArgCount();
7201 bool HasRoundingMD =
7203
7204 // Add the expected number of metadata operands.
7205 NumOperands += (1 + HasRoundingMD);
7206
7207 // Compare intrinsics carry an extra predicate metadata operand.
7209 NumOperands += 1;
7210 Check((FPI.arg_size() == NumOperands),
7211 "invalid arguments for constrained FP intrinsic", &FPI);
7212
7213 switch (FPI.getIntrinsicID()) {
7214 case Intrinsic::experimental_constrained_lrint:
7215 case Intrinsic::experimental_constrained_llrint: {
7216 Type *ValTy = FPI.getArgOperand(0)->getType();
7217 Type *ResultTy = FPI.getType();
7218 Check(!ValTy->isVectorTy() && !ResultTy->isVectorTy(),
7219 "Intrinsic does not support vectors", &FPI);
7220 break;
7221 }
7222
7223 case Intrinsic::experimental_constrained_lround:
7224 case Intrinsic::experimental_constrained_llround: {
7225 Type *ValTy = FPI.getArgOperand(0)->getType();
7226 Type *ResultTy = FPI.getType();
7227 Check(!ValTy->isVectorTy() && !ResultTy->isVectorTy(),
7228 "Intrinsic does not support vectors", &FPI);
7229 break;
7230 }
7231
7232 case Intrinsic::experimental_constrained_fcmp:
7233 case Intrinsic::experimental_constrained_fcmps: {
7234 auto Pred = cast<ConstrainedFPCmpIntrinsic>(&FPI)->getPredicate();
7236 "invalid predicate for constrained FP comparison intrinsic", &FPI);
7237 break;
7238 }
7239
7240 case Intrinsic::experimental_constrained_fptosi:
7241 case Intrinsic::experimental_constrained_fptoui: {
7242 Value *Operand = FPI.getArgOperand(0);
7243 ElementCount SrcEC;
7244 Check(Operand->getType()->isFPOrFPVectorTy(),
7245 "Intrinsic first argument must be floating point", &FPI);
7246 if (auto *OperandT = dyn_cast<VectorType>(Operand->getType())) {
7247 SrcEC = cast<VectorType>(OperandT)->getElementCount();
7248 }
7249
7250 Operand = &FPI;
7251 Check(SrcEC.isNonZero() == Operand->getType()->isVectorTy(),
7252 "Intrinsic first argument and result disagree on vector use", &FPI);
7253 Check(Operand->getType()->isIntOrIntVectorTy(),
7254 "Intrinsic result must be an integer", &FPI);
7255 if (auto *OperandT = dyn_cast<VectorType>(Operand->getType())) {
7256 Check(SrcEC == cast<VectorType>(OperandT)->getElementCount(),
7257 "Intrinsic first argument and result vector lengths must be equal",
7258 &FPI);
7259 }
7260 break;
7261 }
7262
7263 case Intrinsic::experimental_constrained_sitofp:
7264 case Intrinsic::experimental_constrained_uitofp: {
7265 Value *Operand = FPI.getArgOperand(0);
7266 ElementCount SrcEC;
7267 Check(Operand->getType()->isIntOrIntVectorTy(),
7268 "Intrinsic first argument must be integer", &FPI);
7269 if (auto *OperandT = dyn_cast<VectorType>(Operand->getType())) {
7270 SrcEC = cast<VectorType>(OperandT)->getElementCount();
7271 }
7272
7273 Operand = &FPI;
7274 Check(SrcEC.isNonZero() == Operand->getType()->isVectorTy(),
7275 "Intrinsic first argument and result disagree on vector use", &FPI);
7276 Check(Operand->getType()->isFPOrFPVectorTy(),
7277 "Intrinsic result must be a floating point", &FPI);
7278 if (auto *OperandT = dyn_cast<VectorType>(Operand->getType())) {
7279 Check(SrcEC == cast<VectorType>(OperandT)->getElementCount(),
7280 "Intrinsic first argument and result vector lengths must be equal",
7281 &FPI);
7282 }
7283 break;
7284 }
7285
7286 case Intrinsic::experimental_constrained_fptrunc:
7287 case Intrinsic::experimental_constrained_fpext: {
7288 Value *Operand = FPI.getArgOperand(0);
7289 Type *OperandTy = Operand->getType();
7290 Value *Result = &FPI;
7291 Type *ResultTy = Result->getType();
7292 Check(OperandTy->isFPOrFPVectorTy(),
7293 "Intrinsic first argument must be FP or FP vector", &FPI);
7294 Check(ResultTy->isFPOrFPVectorTy(),
7295 "Intrinsic result must be FP or FP vector", &FPI);
7296 Check(OperandTy->isVectorTy() == ResultTy->isVectorTy(),
7297 "Intrinsic first argument and result disagree on vector use", &FPI);
7298 if (OperandTy->isVectorTy()) {
7299 Check(cast<VectorType>(OperandTy)->getElementCount() ==
7300 cast<VectorType>(ResultTy)->getElementCount(),
7301 "Intrinsic first argument and result vector lengths must be equal",
7302 &FPI);
7303 }
7304 if (FPI.getIntrinsicID() == Intrinsic::experimental_constrained_fptrunc) {
7305 Check(OperandTy->getScalarSizeInBits() > ResultTy->getScalarSizeInBits(),
7306 "Intrinsic first argument's type must be larger than result type",
7307 &FPI);
7308 } else {
7309 Check(OperandTy->getScalarSizeInBits() < ResultTy->getScalarSizeInBits(),
7310 "Intrinsic first argument's type must be smaller than result type",
7311 &FPI);
7312 }
7313 break;
7314 }
7315
7316 default:
7317 break;
7318 }
7319
7320 // If a non-metadata argument is passed in a metadata slot then the
7321 // error will be caught earlier when the incorrect argument doesn't
7322 // match the specification in the intrinsic call table. Thus, no
7323 // argument type check is needed here.
7324
7325 Check(FPI.getExceptionBehavior().has_value(),
7326 "invalid exception behavior argument", &FPI);
7327 if (HasRoundingMD) {
7328 Check(FPI.getRoundingMode().has_value(), "invalid rounding mode argument",
7329 &FPI);
7330 }
7331}
7332
7333void Verifier::verifyFragmentExpression(const DbgVariableRecord &DVR) {
7334 DILocalVariable *V = dyn_cast_or_null<DILocalVariable>(DVR.getRawVariable());
7335 DIExpression *E = dyn_cast_or_null<DIExpression>(DVR.getRawExpression());
7336
7337 // We don't know whether this intrinsic verified correctly.
7338 if (!V || !E || !E->isValid())
7339 return;
7340
7341 // Nothing to do if this isn't a DW_OP_LLVM_fragment expression.
7342 auto Fragment = E->getFragmentInfo();
7343 if (!Fragment)
7344 return;
7345
7346 // The frontend helps out GDB by emitting the members of local anonymous
7347 // unions as artificial local variables with shared storage. When SROA splits
7348 // the storage for artificial local variables that are smaller than the entire
7349 // union, the overhang piece will be outside of the allotted space for the
7350 // variable and this check fails.
7351 // FIXME: Remove this check as soon as clang stops doing this; it hides bugs.
7352 if (V->isArtificial())
7353 return;
7354
7355 verifyFragmentExpression(*V, *Fragment, &DVR);
7356}
7357
7358template <typename ValueOrMetadata>
7359void Verifier::verifyFragmentExpression(const DIVariable &V,
7361 ValueOrMetadata *Desc) {
7362 // If there's no size, the type is broken, but that should be checked
7363 // elsewhere.
7364 auto VarSize = V.getSizeInBits();
7365 if (!VarSize)
7366 return;
7367
7368 unsigned FragSize = Fragment.SizeInBits;
7369 unsigned FragOffset = Fragment.OffsetInBits;
7370 CheckDI(FragSize + FragOffset <= *VarSize,
7371 "fragment is larger than or outside of variable", Desc, &V);
7372 CheckDI(FragSize != *VarSize, "fragment covers entire variable", Desc, &V);
7373}
7374
7375void Verifier::verifyFnArgs(const DbgVariableRecord &DVR) {
7376 // This function does not take the scope of noninlined function arguments into
7377 // account. Don't run it if current function is nodebug, because it may
7378 // contain inlined debug intrinsics.
7379 if (!HasDebugInfo)
7380 return;
7381
7382 // For performance reasons only check non-inlined ones.
7383 if (DVR.getDebugLoc()->getInlinedAt())
7384 return;
7385
7386 DILocalVariable *Var = DVR.getVariable();
7387 CheckDI(Var, "#dbg record without variable");
7388
7389 unsigned ArgNo = Var->getArg();
7390 if (!ArgNo)
7391 return;
7392
7393 // Verify there are no duplicate function argument debug info entries.
7394 // These will cause hard-to-debug assertions in the DWARF backend.
7395 if (DebugFnArgs.size() < ArgNo)
7396 DebugFnArgs.resize(ArgNo, nullptr);
7397
7398 auto *Prev = DebugFnArgs[ArgNo - 1];
7399 DebugFnArgs[ArgNo - 1] = Var;
7400 CheckDI(!Prev || (Prev == Var), "conflicting debug info for argument", &DVR,
7401 Prev, Var);
7402}
7403
7404void Verifier::verifyNotEntryValue(const DbgVariableRecord &DVR) {
7405 DIExpression *E = dyn_cast_or_null<DIExpression>(DVR.getRawExpression());
7406
7407 // We don't know whether this intrinsic verified correctly.
7408 if (!E || !E->isValid())
7409 return;
7410
7412 Value *VarValue = DVR.getVariableLocationOp(0);
7413 if (isa<UndefValue>(VarValue) || isa<PoisonValue>(VarValue))
7414 return;
7415 // We allow EntryValues for swift async arguments, as they have an
7416 // ABI-guarantee to be turned into a specific register.
7417 if (auto *ArgLoc = dyn_cast_or_null<Argument>(VarValue);
7418 ArgLoc && ArgLoc->hasAttribute(Attribute::SwiftAsync))
7419 return;
7420 }
7421
7422 CheckDI(!E->isEntryValue(),
7423 "Entry values are only allowed in MIR unless they target a "
7424 "swiftasync Argument",
7425 &DVR);
7426}
7427
7428void Verifier::verifyCompileUnits() {
7429 // When more than one Module is imported into the same context, such as during
7430 // an LTO build before linking the modules, ODR type uniquing may cause types
7431 // to point to a different CU. This check does not make sense in this case.
7432 if (M.getContext().isODRUniquingDebugTypes())
7433 return;
7434 auto *CUs = M.getNamedMetadata("llvm.dbg.cu");
7435 SmallPtrSet<const Metadata *, 2> Listed;
7436 if (CUs)
7437 Listed.insert_range(CUs->operands());
7438 for (const auto *CU : CUVisited)
7439 CheckDI(Listed.count(CU), "DICompileUnit not listed in llvm.dbg.cu", CU);
7440 CUVisited.clear();
7441}
7442
7443void Verifier::verifyDeoptimizeCallingConvs() {
7444 if (DeoptimizeDeclarations.empty())
7445 return;
7446
7447 const Function *First = DeoptimizeDeclarations[0];
7448 for (const auto *F : ArrayRef(DeoptimizeDeclarations).slice(1)) {
7449 Check(First->getCallingConv() == F->getCallingConv(),
7450 "All llvm.experimental.deoptimize declarations must have the same "
7451 "calling convention",
7452 First, F);
7453 }
7454}
7455
7456void Verifier::verifyAttachedCallBundle(const CallBase &Call,
7457 const OperandBundleUse &BU) {
7458 FunctionType *FTy = Call.getFunctionType();
7459
7460 Check((FTy->getReturnType()->isPointerTy() ||
7461 (Call.doesNotReturn() && FTy->getReturnType()->isVoidTy())),
7462 "a call with operand bundle \"clang.arc.attachedcall\" must call a "
7463 "function returning a pointer or a non-returning function that has a "
7464 "void return type",
7465 Call);
7466
7467 Check(BU.Inputs.size() == 1 && isa<Function>(BU.Inputs.front()),
7468 "operand bundle \"clang.arc.attachedcall\" requires one function as "
7469 "an argument",
7470 Call);
7471
7472 auto *Fn = cast<Function>(BU.Inputs.front());
7473 Intrinsic::ID IID = Fn->getIntrinsicID();
7474
7475 if (IID) {
7476 Check((IID == Intrinsic::objc_retainAutoreleasedReturnValue ||
7477 IID == Intrinsic::objc_claimAutoreleasedReturnValue ||
7478 IID == Intrinsic::objc_unsafeClaimAutoreleasedReturnValue),
7479 "invalid function argument", Call);
7480 } else {
7481 StringRef FnName = Fn->getName();
7482 Check((FnName == "objc_retainAutoreleasedReturnValue" ||
7483 FnName == "objc_claimAutoreleasedReturnValue" ||
7484 FnName == "objc_unsafeClaimAutoreleasedReturnValue"),
7485 "invalid function argument", Call);
7486 }
7487}
7488
7489void Verifier::verifyNoAliasScopeDecl() {
7490 if (NoAliasScopeDecls.empty())
7491 return;
7492
7493 // only a single scope must be declared at a time.
7494 for (auto *II : NoAliasScopeDecls) {
7495 assert(II->getIntrinsicID() == Intrinsic::experimental_noalias_scope_decl &&
7496 "Not a llvm.experimental.noalias.scope.decl ?");
7497 const auto *ScopeListMV = dyn_cast<MetadataAsValue>(
7499 Check(ScopeListMV != nullptr,
7500 "llvm.experimental.noalias.scope.decl must have a MetadataAsValue "
7501 "argument",
7502 II);
7503
7504 const auto *ScopeListMD = dyn_cast<MDNode>(ScopeListMV->getMetadata());
7505 Check(ScopeListMD != nullptr, "!id.scope.list must point to an MDNode", II);
7506 Check(ScopeListMD->getNumOperands() == 1,
7507 "!id.scope.list must point to a list with a single scope", II);
7508 visitAliasScopeListMetadata(ScopeListMD);
7509 }
7510
7511 // Only check the domination rule when requested. Once all passes have been
7512 // adapted this option can go away.
7514 return;
7515
7516 // Now sort the intrinsics based on the scope MDNode so that declarations of
7517 // the same scopes are next to each other.
7518 auto GetScope = [](IntrinsicInst *II) {
7519 const auto *ScopeListMV = cast<MetadataAsValue>(
7521 return &cast<MDNode>(ScopeListMV->getMetadata())->getOperand(0);
7522 };
7523
7524 // We are sorting on MDNode pointers here. For valid input IR this is ok.
7525 // TODO: Sort on Metadata ID to avoid non-deterministic error messages.
7526 auto Compare = [GetScope](IntrinsicInst *Lhs, IntrinsicInst *Rhs) {
7527 return GetScope(Lhs) < GetScope(Rhs);
7528 };
7529
7530 llvm::sort(NoAliasScopeDecls, Compare);
7531
7532 // Go over the intrinsics and check that for the same scope, they are not
7533 // dominating each other.
7534 auto ItCurrent = NoAliasScopeDecls.begin();
7535 while (ItCurrent != NoAliasScopeDecls.end()) {
7536 auto CurScope = GetScope(*ItCurrent);
7537 auto ItNext = ItCurrent;
7538 do {
7539 ++ItNext;
7540 } while (ItNext != NoAliasScopeDecls.end() &&
7541 GetScope(*ItNext) == CurScope);
7542
7543 // [ItCurrent, ItNext) represents the declarations for the same scope.
7544 // Ensure they are not dominating each other.. but only if it is not too
7545 // expensive.
7546 if (ItNext - ItCurrent < 32)
7547 for (auto *I : llvm::make_range(ItCurrent, ItNext))
7548 for (auto *J : llvm::make_range(ItCurrent, ItNext))
7549 if (I != J)
7550 Check(!DT.dominates(I, J),
7551 "llvm.experimental.noalias.scope.decl dominates another one "
7552 "with the same scope",
7553 I);
7554 ItCurrent = ItNext;
7555 }
7556}
7557
7558//===----------------------------------------------------------------------===//
7559// Implement the public interfaces to this file...
7560//===----------------------------------------------------------------------===//
7561
7563 Function &F = const_cast<Function &>(f);
7564
7565 // Don't use a raw_null_ostream. Printing IR is expensive.
7566 Verifier V(OS, /*ShouldTreatBrokenDebugInfoAsError=*/true, *f.getParent());
7567
7568 // Note that this function's return value is inverted from what you would
7569 // expect of a function called "verify".
7570 return !V.verify(F);
7571}
7572
7574 bool *BrokenDebugInfo) {
7575 // Don't use a raw_null_ostream. Printing IR is expensive.
7576 Verifier V(OS, /*ShouldTreatBrokenDebugInfoAsError=*/!BrokenDebugInfo, M);
7577
7578 bool Broken = false;
7579 for (const Function &F : M)
7580 Broken |= !V.verify(F);
7581
7582 Broken |= !V.verify();
7583 if (BrokenDebugInfo)
7584 *BrokenDebugInfo = V.hasBrokenDebugInfo();
7585 // Note that this function's return value is inverted from what you would
7586 // expect of a function called "verify".
7587 return Broken;
7588}
7589
7590namespace {
7591
7592struct VerifierLegacyPass : public FunctionPass {
7593 static char ID;
7594
7595 std::unique_ptr<Verifier> V;
7596 bool FatalErrors = true;
7597
7598 VerifierLegacyPass() : FunctionPass(ID) {
7600 }
7601 explicit VerifierLegacyPass(bool FatalErrors)
7602 : FunctionPass(ID),
7603 FatalErrors(FatalErrors) {
7605 }
7606
7607 bool doInitialization(Module &M) override {
7608 V = std::make_unique<Verifier>(
7609 &dbgs(), /*ShouldTreatBrokenDebugInfoAsError=*/false, M);
7610 return false;
7611 }
7612
7613 bool runOnFunction(Function &F) override {
7614 if (!V->verify(F) && FatalErrors) {
7615 errs() << "in function " << F.getName() << '\n';
7616 report_fatal_error("Broken function found, compilation aborted!");
7617 }
7618 return false;
7619 }
7620
7621 bool doFinalization(Module &M) override {
7622 bool HasErrors = false;
7623 for (Function &F : M)
7624 if (F.isDeclaration())
7625 HasErrors |= !V->verify(F);
7626
7627 HasErrors |= !V->verify();
7628 if (FatalErrors && (HasErrors || V->hasBrokenDebugInfo()))
7629 report_fatal_error("Broken module found, compilation aborted!");
7630 return false;
7631 }
7632
7633 void getAnalysisUsage(AnalysisUsage &AU) const override {
7634 AU.setPreservesAll();
7635 }
7636};
7637
7638} // end anonymous namespace
7639
7640/// Helper to issue failure from the TBAA verification
7641template <typename... Tys> void TBAAVerifier::CheckFailed(Tys &&... Args) {
7642 if (Diagnostic)
7643 return Diagnostic->CheckFailed(Args...);
7644}
7645
7646#define CheckTBAA(C, ...) \
7647 do { \
7648 if (!(C)) { \
7649 CheckFailed(__VA_ARGS__); \
7650 return false; \
7651 } \
7652 } while (false)
7653
7654/// Verify that \p BaseNode can be used as the "base type" in the struct-path
7655/// TBAA scheme. This means \p BaseNode is either a scalar node, or a
7656/// struct-type node describing an aggregate data structure (like a struct).
7657TBAAVerifier::TBAABaseNodeSummary
7658TBAAVerifier::verifyTBAABaseNode(Instruction &I, const MDNode *BaseNode,
7659 bool IsNewFormat) {
7660 if (BaseNode->getNumOperands() < 2) {
7661 CheckFailed("Base nodes must have at least two operands", &I, BaseNode);
7662 return {true, ~0u};
7663 }
7664
7665 auto Itr = TBAABaseNodes.find(BaseNode);
7666 if (Itr != TBAABaseNodes.end())
7667 return Itr->second;
7668
7669 auto Result = verifyTBAABaseNodeImpl(I, BaseNode, IsNewFormat);
7670 auto InsertResult = TBAABaseNodes.insert({BaseNode, Result});
7671 (void)InsertResult;
7672 assert(InsertResult.second && "We just checked!");
7673 return Result;
7674}
7675
7676TBAAVerifier::TBAABaseNodeSummary
7677TBAAVerifier::verifyTBAABaseNodeImpl(Instruction &I, const MDNode *BaseNode,
7678 bool IsNewFormat) {
7679 const TBAAVerifier::TBAABaseNodeSummary InvalidNode = {true, ~0u};
7680
7681 if (BaseNode->getNumOperands() == 2) {
7682 // Scalar nodes can only be accessed at offset 0.
7683 return isValidScalarTBAANode(BaseNode)
7684 ? TBAAVerifier::TBAABaseNodeSummary({false, 0})
7685 : InvalidNode;
7686 }
7687
7688 if (IsNewFormat) {
7689 if (BaseNode->getNumOperands() % 3 != 0) {
7690 CheckFailed("Access tag nodes must have the number of operands that is a "
7691 "multiple of 3!", BaseNode);
7692 return InvalidNode;
7693 }
7694 } else {
7695 if (BaseNode->getNumOperands() % 2 != 1) {
7696 CheckFailed("Struct tag nodes must have an odd number of operands!",
7697 BaseNode);
7698 return InvalidNode;
7699 }
7700 }
7701
7702 // Check the type size field.
7703 if (IsNewFormat) {
7704 auto *TypeSizeNode = mdconst::dyn_extract_or_null<ConstantInt>(
7705 BaseNode->getOperand(1));
7706 if (!TypeSizeNode) {
7707 CheckFailed("Type size nodes must be constants!", &I, BaseNode);
7708 return InvalidNode;
7709 }
7710 }
7711
7712 // Check the type name field. In the new format it can be anything.
7713 if (!IsNewFormat && !isa<MDString>(BaseNode->getOperand(0))) {
7714 CheckFailed("Struct tag nodes have a string as their first operand",
7715 BaseNode);
7716 return InvalidNode;
7717 }
7718
7719 bool Failed = false;
7720
7721 std::optional<APInt> PrevOffset;
7722 unsigned BitWidth = ~0u;
7723
7724 // We've already checked that BaseNode is not a degenerate root node with one
7725 // operand in \c verifyTBAABaseNode, so this loop should run at least once.
7726 unsigned FirstFieldOpNo = IsNewFormat ? 3 : 1;
7727 unsigned NumOpsPerField = IsNewFormat ? 3 : 2;
7728 for (unsigned Idx = FirstFieldOpNo; Idx < BaseNode->getNumOperands();
7729 Idx += NumOpsPerField) {
7730 const MDOperand &FieldTy = BaseNode->getOperand(Idx);
7731 const MDOperand &FieldOffset = BaseNode->getOperand(Idx + 1);
7732 if (!isa<MDNode>(FieldTy)) {
7733 CheckFailed("Incorrect field entry in struct type node!", &I, BaseNode);
7734 Failed = true;
7735 continue;
7736 }
7737
7738 auto *OffsetEntryCI =
7740 if (!OffsetEntryCI) {
7741 CheckFailed("Offset entries must be constants!", &I, BaseNode);
7742 Failed = true;
7743 continue;
7744 }
7745
7746 if (BitWidth == ~0u)
7747 BitWidth = OffsetEntryCI->getBitWidth();
7748
7749 if (OffsetEntryCI->getBitWidth() != BitWidth) {
7750 CheckFailed(
7751 "Bitwidth between the offsets and struct type entries must match", &I,
7752 BaseNode);
7753 Failed = true;
7754 continue;
7755 }
7756
7757 // NB! As far as I can tell, we generate a non-strictly increasing offset
7758 // sequence only from structs that have zero size bit fields. When
7759 // recursing into a contained struct in \c getFieldNodeFromTBAABaseNode we
7760 // pick the field lexically the latest in struct type metadata node. This
7761 // mirrors the actual behavior of the alias analysis implementation.
7762 bool IsAscending =
7763 !PrevOffset || PrevOffset->ule(OffsetEntryCI->getValue());
7764
7765 if (!IsAscending) {
7766 CheckFailed("Offsets must be increasing!", &I, BaseNode);
7767 Failed = true;
7768 }
7769
7770 PrevOffset = OffsetEntryCI->getValue();
7771
7772 if (IsNewFormat) {
7773 auto *MemberSizeNode = mdconst::dyn_extract_or_null<ConstantInt>(
7774 BaseNode->getOperand(Idx + 2));
7775 if (!MemberSizeNode) {
7776 CheckFailed("Member size entries must be constants!", &I, BaseNode);
7777 Failed = true;
7778 continue;
7779 }
7780 }
7781 }
7782
7783 return Failed ? InvalidNode
7784 : TBAAVerifier::TBAABaseNodeSummary(false, BitWidth);
7785}
7786
7787static bool IsRootTBAANode(const MDNode *MD) {
7788 return MD->getNumOperands() < 2;
7789}
7790
7791static bool IsScalarTBAANodeImpl(const MDNode *MD,
7793 if (MD->getNumOperands() != 2 && MD->getNumOperands() != 3)
7794 return false;
7795
7796 if (!isa<MDString>(MD->getOperand(0)))
7797 return false;
7798
7799 if (MD->getNumOperands() == 3) {
7801 if (!(Offset && Offset->isZero() && isa<MDString>(MD->getOperand(0))))
7802 return false;
7803 }
7804
7805 auto *Parent = dyn_cast_or_null<MDNode>(MD->getOperand(1));
7806 return Parent && Visited.insert(Parent).second &&
7807 (IsRootTBAANode(Parent) || IsScalarTBAANodeImpl(Parent, Visited));
7808}
7809
7810bool TBAAVerifier::isValidScalarTBAANode(const MDNode *MD) {
7811 auto ResultIt = TBAAScalarNodes.find(MD);
7812 if (ResultIt != TBAAScalarNodes.end())
7813 return ResultIt->second;
7814
7815 SmallPtrSet<const MDNode *, 4> Visited;
7816 bool Result = IsScalarTBAANodeImpl(MD, Visited);
7817 auto InsertResult = TBAAScalarNodes.insert({MD, Result});
7818 (void)InsertResult;
7819 assert(InsertResult.second && "Just checked!");
7820
7821 return Result;
7822}
7823
7824/// Returns the field node at the offset \p Offset in \p BaseNode. Update \p
7825/// Offset in place to be the offset within the field node returned.
7826///
7827/// We assume we've okayed \p BaseNode via \c verifyTBAABaseNode.
7828MDNode *TBAAVerifier::getFieldNodeFromTBAABaseNode(Instruction &I,
7829 const MDNode *BaseNode,
7830 APInt &Offset,
7831 bool IsNewFormat) {
7832 assert(BaseNode->getNumOperands() >= 2 && "Invalid base node!");
7833
7834 // Scalar nodes have only one possible "field" -- their parent in the access
7835 // hierarchy. Offset must be zero at this point, but our caller is supposed
7836 // to check that.
7837 if (BaseNode->getNumOperands() == 2)
7838 return cast<MDNode>(BaseNode->getOperand(1));
7839
7840 unsigned FirstFieldOpNo = IsNewFormat ? 3 : 1;
7841 unsigned NumOpsPerField = IsNewFormat ? 3 : 2;
7842 for (unsigned Idx = FirstFieldOpNo; Idx < BaseNode->getNumOperands();
7843 Idx += NumOpsPerField) {
7844 auto *OffsetEntryCI =
7845 mdconst::extract<ConstantInt>(BaseNode->getOperand(Idx + 1));
7846 if (OffsetEntryCI->getValue().ugt(Offset)) {
7847 if (Idx == FirstFieldOpNo) {
7848 CheckFailed("Could not find TBAA parent in struct type node", &I,
7849 BaseNode, &Offset);
7850 return nullptr;
7851 }
7852
7853 unsigned PrevIdx = Idx - NumOpsPerField;
7854 auto *PrevOffsetEntryCI =
7855 mdconst::extract<ConstantInt>(BaseNode->getOperand(PrevIdx + 1));
7856 Offset -= PrevOffsetEntryCI->getValue();
7857 return cast<MDNode>(BaseNode->getOperand(PrevIdx));
7858 }
7859 }
7860
7861 unsigned LastIdx = BaseNode->getNumOperands() - NumOpsPerField;
7862 auto *LastOffsetEntryCI = mdconst::extract<ConstantInt>(
7863 BaseNode->getOperand(LastIdx + 1));
7864 Offset -= LastOffsetEntryCI->getValue();
7865 return cast<MDNode>(BaseNode->getOperand(LastIdx));
7866}
7867
7869 if (!Type || Type->getNumOperands() < 3)
7870 return false;
7871
7872 // In the new format type nodes shall have a reference to the parent type as
7873 // its first operand.
7874 return isa_and_nonnull<MDNode>(Type->getOperand(0));
7875}
7876
7878 CheckTBAA(MD->getNumOperands() > 0, "TBAA metadata cannot have 0 operands",
7879 &I, MD);
7880
7884 "This instruction shall not have a TBAA access tag!", &I);
7885
7886 bool IsStructPathTBAA =
7887 isa<MDNode>(MD->getOperand(0)) && MD->getNumOperands() >= 3;
7888
7889 CheckTBAA(IsStructPathTBAA,
7890 "Old-style TBAA is no longer allowed, use struct-path TBAA instead",
7891 &I);
7892
7893 MDNode *BaseNode = dyn_cast_or_null<MDNode>(MD->getOperand(0));
7894 MDNode *AccessType = dyn_cast_or_null<MDNode>(MD->getOperand(1));
7895
7896 bool IsNewFormat = isNewFormatTBAATypeNode(AccessType);
7897
7898 if (IsNewFormat) {
7899 CheckTBAA(MD->getNumOperands() == 4 || MD->getNumOperands() == 5,
7900 "Access tag metadata must have either 4 or 5 operands", &I, MD);
7901 } else {
7902 CheckTBAA(MD->getNumOperands() < 5,
7903 "Struct tag metadata must have either 3 or 4 operands", &I, MD);
7904 }
7905
7906 // Check the access size field.
7907 if (IsNewFormat) {
7908 auto *AccessSizeNode = mdconst::dyn_extract_or_null<ConstantInt>(
7909 MD->getOperand(3));
7910 CheckTBAA(AccessSizeNode, "Access size field must be a constant", &I, MD);
7911 }
7912
7913 // Check the immutability flag.
7914 unsigned ImmutabilityFlagOpNo = IsNewFormat ? 4 : 3;
7915 if (MD->getNumOperands() == ImmutabilityFlagOpNo + 1) {
7916 auto *IsImmutableCI = mdconst::dyn_extract_or_null<ConstantInt>(
7917 MD->getOperand(ImmutabilityFlagOpNo));
7918 CheckTBAA(IsImmutableCI,
7919 "Immutability tag on struct tag metadata must be a constant", &I,
7920 MD);
7921 CheckTBAA(
7922 IsImmutableCI->isZero() || IsImmutableCI->isOne(),
7923 "Immutability part of the struct tag metadata must be either 0 or 1",
7924 &I, MD);
7925 }
7926
7927 CheckTBAA(BaseNode && AccessType,
7928 "Malformed struct tag metadata: base and access-type "
7929 "should be non-null and point to Metadata nodes",
7930 &I, MD, BaseNode, AccessType);
7931
7932 if (!IsNewFormat) {
7933 CheckTBAA(isValidScalarTBAANode(AccessType),
7934 "Access type node must be a valid scalar type", &I, MD,
7935 AccessType);
7936 }
7937
7939 CheckTBAA(OffsetCI, "Offset must be constant integer", &I, MD);
7940
7941 APInt Offset = OffsetCI->getValue();
7942 bool SeenAccessTypeInPath = false;
7943
7944 SmallPtrSet<MDNode *, 4> StructPath;
7945
7946 for (/* empty */; BaseNode && !IsRootTBAANode(BaseNode);
7947 BaseNode = getFieldNodeFromTBAABaseNode(I, BaseNode, Offset,
7948 IsNewFormat)) {
7949 if (!StructPath.insert(BaseNode).second) {
7950 CheckFailed("Cycle detected in struct path", &I, MD);
7951 return false;
7952 }
7953
7954 bool Invalid;
7955 unsigned BaseNodeBitWidth;
7956 std::tie(Invalid, BaseNodeBitWidth) = verifyTBAABaseNode(I, BaseNode,
7957 IsNewFormat);
7958
7959 // If the base node is invalid in itself, then we've already printed all the
7960 // errors we wanted to print.
7961 if (Invalid)
7962 return false;
7963
7964 SeenAccessTypeInPath |= BaseNode == AccessType;
7965
7966 if (isValidScalarTBAANode(BaseNode) || BaseNode == AccessType)
7967 CheckTBAA(Offset == 0, "Offset not zero at the point of scalar access",
7968 &I, MD, &Offset);
7969
7970 CheckTBAA(BaseNodeBitWidth == Offset.getBitWidth() ||
7971 (BaseNodeBitWidth == 0 && Offset == 0) ||
7972 (IsNewFormat && BaseNodeBitWidth == ~0u),
7973 "Access bit-width not the same as description bit-width", &I, MD,
7974 BaseNodeBitWidth, Offset.getBitWidth());
7975
7976 if (IsNewFormat && SeenAccessTypeInPath)
7977 break;
7978 }
7979
7980 CheckTBAA(SeenAccessTypeInPath, "Did not see access type in access path!", &I,
7981 MD);
7982 return true;
7983}
7984
7985char VerifierLegacyPass::ID = 0;
7986INITIALIZE_PASS(VerifierLegacyPass, "verify", "Module Verifier", false, false)
7987
7989 return new VerifierLegacyPass(FatalErrors);
7990}
7991
7992AnalysisKey VerifierAnalysis::Key;
7999
8004
8006 auto Res = AM.getResult<VerifierAnalysis>(M);
8007 if (FatalErrors && (Res.IRBroken || Res.DebugInfoBroken))
8008 report_fatal_error("Broken module found, compilation aborted!");
8009
8010 return PreservedAnalyses::all();
8011}
8012
8014 auto res = AM.getResult<VerifierAnalysis>(F);
8015 if (res.IRBroken && FatalErrors)
8016 report_fatal_error("Broken function found, compilation aborted!");
8017
8018 return PreservedAnalyses::all();
8019}
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
AMDGPU address space definition.
ArrayRef< TableEntry > TableRef
This file declares a class to represent arbitrary precision floating point values and provide a varie...
This file implements a class to represent arbitrary precision integral constant values and operations...
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
Function Alias Analysis false
Atomic ordering constants.
@ RetAttr
@ FnAttr
This file contains the simple types necessary to represent the attributes associated with functions a...
static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")
static GCRegistry::Add< StatepointGC > D("statepoint-example", "an example strategy for statepoint")
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
Analysis containing CSE Info
Definition CSEInfo.cpp:27
This file contains the declarations for the subclasses of Constant, which represent the different fla...
This file declares the LLVM IR specialization of the GenericConvergenceVerifier template.
static DISubprogram * getSubprogram(bool IsDistinct, Ts &&...Args)
dxil translate DXIL Translate Metadata
This file defines the DenseMap class.
This file contains constants used for implementing Dwarf debug support.
static bool runOnFunction(Function &F, bool PostInlining)
#define Check(C,...)
Hexagon Common GEP
This file provides various utilities for inspecting and working with the control flow graph in LLVM I...
Module.h This file contains the declarations for the Module class.
This header defines various interfaces for pass management in LLVM.
This defines the Use class.
const size_t AbstractManglingParser< Derived, Alloc >::NumOps
#define F(x, y, z)
Definition MD5.cpp:55
#define I(x, y, z)
Definition MD5.cpp:58
Machine Check Debug Module
This file implements a map that provides insertion order iteration.
This file provides utility for Memory Model Relaxation Annotations (MMRAs).
static bool isContiguous(const ConstantRange &A, const ConstantRange &B)
This file contains the declarations for metadata subclasses.
#define T
#define T1
ConstantRange Range(APInt(BitWidth, Low), APInt(BitWidth, High))
uint64_t High
uint64_t IntrinsicInst * II
#define P(N)
ppc ctr loops verify
#define INITIALIZE_PASS(passName, arg, name, cfg, analysis)
Definition PassSupport.h:56
This file contains the declarations for profiling metadata utility functions.
static bool isValid(const char C)
Returns true if C is a valid mangled character: <0-9a-zA-Z_>.
static unsigned getNumElements(Type *Ty)
void visit(MachineFunction &MF, MachineBasicBlock &Start, std::function< void(MachineBasicBlock *)> op)
This file contains some templates that are useful if you are working with the STL at all.
verify safepoint Safepoint IR Verifier
BaseType
A given derived pointer can have multiple base pointers through phi/selects.
This file defines the SmallPtrSet class.
This file defines the SmallVector class.
This file contains some functions that are useful when dealing with strings.
static unsigned getBitWidth(Type *Ty, const DataLayout &DL)
Returns the bitwidth of the given scalar or pointer type.
static bool IsScalarTBAANodeImpl(const MDNode *MD, SmallPtrSetImpl< const MDNode * > &Visited)
static bool isType(const Metadata *MD)
static Instruction * getSuccPad(Instruction *Terminator)
static bool isNewFormatTBAATypeNode(llvm::MDNode *Type)
#define CheckDI(C,...)
We know that a debug info condition should be true, if not print an error message.
Definition Verifier.cpp:678
static void forEachUser(const Value *User, SmallPtrSet< const Value *, 32 > &Visited, llvm::function_ref< bool(const Value *)> Callback)
Definition Verifier.cpp:719
static bool isDINode(const Metadata *MD)
static bool isScope(const Metadata *MD)
static cl::opt< bool > VerifyNoAliasScopeDomination("verify-noalias-scope-decl-dom", cl::Hidden, cl::init(false), cl::desc("Ensure that llvm.experimental.noalias.scope.decl for identical " "scopes are not dominating"))
static bool isTypeCongruent(Type *L, Type *R)
Two types are "congruent" if they are identical, or if they are both pointer types with different poi...
#define CheckTBAA(C,...)
static bool isConstantIntMetadataOperand(const Metadata *MD)
static bool IsRootTBAANode(const MDNode *MD)
static Value * getParentPad(Value *EHPad)
static bool hasConflictingReferenceFlags(unsigned Flags)
Detect mutually exclusive flags.
static AttrBuilder getParameterABIAttributes(LLVMContext &C, unsigned I, AttributeList Attrs)
static const char PassName[]
bool isFiniteNonZero() const
Definition APFloat.h:1459
bool isNegative() const
Definition APFloat.h:1449
const fltSemantics & getSemantics() const
Definition APFloat.h:1457
Class for arbitrary precision integers.
Definition APInt.h:78
bool sgt(const APInt &RHS) const
Signed greater than comparison.
Definition APInt.h:1201
bool isZero() const
Determine if this value is zero, i.e. all bits are clear.
Definition APInt.h:380
bool isMinValue() const
Determine if this is the smallest unsigned value.
Definition APInt.h:417
bool ule(const APInt &RHS) const
Unsigned less or equal comparison.
Definition APInt.h:1150
bool isPowerOf2() const
Check if this APInt's value is a power of two greater than zero.
Definition APInt.h:440
int64_t getSExtValue() const
Get sign extended value.
Definition APInt.h:1562
bool isMaxValue() const
Determine if this is the largest unsigned value.
Definition APInt.h:399
This class represents a conversion between pointers from one address space to another.
bool isSwiftError() const
Return true if this alloca is used as a swifterror argument to a call.
LLVM_ABI bool isStaticAlloca() const
Return true if this alloca is in the entry block of the function and is a constant size.
Align getAlign() const
Return the alignment of the memory that is being allocated by the instruction.
Type * getAllocatedType() const
Return the type that is being allocated by the instruction.
unsigned getAddressSpace() const
Return the address space for the allocation.
LLVM_ABI bool isArrayAllocation() const
Return true if there is an allocation size parameter to the allocation instruction that is not 1.
const Value * getArraySize() const
Get the number of elements allocated.
PassT::Result & getResult(IRUnitT &IR, ExtraArgTs... ExtraArgs)
Get the result of an analysis pass for a given IR unit.
void setPreservesAll()
Set by analyses that do not transform their input at all.
LLVM_ABI bool hasInRegAttr() const
Return true if this argument has the inreg attribute.
Definition Function.cpp:293
bool empty() const
empty - Check if the array is empty.
Definition ArrayRef.h:142
static bool isFPOperation(BinOp Op)
BinOp getOperation() const
static LLVM_ABI StringRef getOperationName(BinOp Op)
AtomicOrdering getOrdering() const
Returns the ordering constraint of this rmw instruction.
bool contains(Attribute::AttrKind A) const
Return true if the builder has the specified attribute.
LLVM_ABI bool hasAttribute(Attribute::AttrKind Kind) const
Return true if the attribute exists in this set.
LLVM_ABI std::string getAsString(bool InAttrGrp=false) const
Functions, function parameters, and return types can have attributes to indicate how they should be t...
Definition Attributes.h:69
LLVM_ABI const ConstantRange & getValueAsConstantRange() const
Return the attribute's value as a ConstantRange.
LLVM_ABI StringRef getValueAsString() const
Return the attribute's value as a string.
AttrKind
This enumeration lists the attributes that can be associated with parameters, function results,...
Definition Attributes.h:88
bool isValid() const
Return true if the attribute is any kind of attribute.
Definition Attributes.h:223
LLVM Basic Block Representation.
Definition BasicBlock.h:62
iterator begin()
Instruction iterator methods.
Definition BasicBlock.h:459
iterator_range< const_phi_iterator > phis() const
Returns a range that iterates over the phis in the basic block.
Definition BasicBlock.h:528
const Function * getParent() const
Return the enclosing method, or null if none.
Definition BasicBlock.h:213
LLVM_ABI InstListType::const_iterator getFirstNonPHIIt() const
Returns an iterator to the first instruction in this block that is not a PHINode instruction.
LLVM_ABI bool isEntryBlock() const
Return true if this is the entry block of the containing function.
const Instruction & front() const
Definition BasicBlock.h:482
LLVM_ABI const BasicBlock * getUniquePredecessor() const
Return the predecessor of this block if it has a unique predecessor block.
InstListType::iterator iterator
Instruction iterators...
Definition BasicBlock.h:170
const Instruction * getTerminator() const LLVM_READONLY
Returns the terminator instruction if the block is well formed or null if the block is not well forme...
Definition BasicBlock.h:233
This class represents a no-op cast from one type to another.
static LLVM_ABI BlockAddress * lookup(const BasicBlock *BB)
Lookup an existing BlockAddress constant for the given BasicBlock.
bool isConditional() const
Value * getCondition() const
Base class for all callable instructions (InvokeInst and CallInst) Holds everything related to callin...
bool isInlineAsm() const
Check if this call is an inline asm statement.
bool hasInAllocaArgument() const
Determine if there are is an inalloca argument.
OperandBundleUse getOperandBundleAt(unsigned Index) const
Return the operand bundle at a specific index.
Function * getCalledFunction() const
Returns the function called, or null if this is an indirect function invocation or the function signa...
bool doesNotAccessMemory(unsigned OpNo) const
bool hasFnAttr(Attribute::AttrKind Kind) const
Determine whether this call has the given attribute.
unsigned getNumOperandBundles() const
Return the number of operand bundles associated with this User.
CallingConv::ID getCallingConv() const
LLVM_ABI bool paramHasAttr(unsigned ArgNo, Attribute::AttrKind Kind) const
Determine whether the argument or parameter has the given attribute.
Attribute getParamAttr(unsigned ArgNo, Attribute::AttrKind Kind) const
Get the attribute of a given kind from a given arg.
iterator_range< bundle_op_iterator > bundle_op_infos()
Return the range [bundle_op_info_begin, bundle_op_info_end).
unsigned countOperandBundlesOfType(StringRef Name) const
Return the number of operand bundles with the tag Name attached to this instruction.
bool onlyReadsMemory(unsigned OpNo) const
Value * getCalledOperand() const
Type * getParamElementType(unsigned ArgNo) const
Extract the elementtype type for a parameter.
Value * getArgOperand(unsigned i) const
FunctionType * getFunctionType() const
LLVM_ABI Intrinsic::ID getIntrinsicID() const
Returns the intrinsic ID of the intrinsic called or Intrinsic::not_intrinsic if the called function i...
iterator_range< User::op_iterator > args()
Iteration adapter for range-for loops.
bool doesNotReturn() const
Determine if the call cannot return.
LLVM_ABI bool onlyAccessesArgMemory() const
Determine if the call can access memmory only using pointers based on its arguments.
unsigned arg_size() const
AttributeList getAttributes() const
Return the attributes for this call.
LLVM_ABI Function * getCaller()
Helper to get the caller (the parent function).
bool isMustTailCall() const
static LLVM_ABI bool castIsValid(Instruction::CastOps op, Type *SrcTy, Type *DstTy)
This method can be used to determine if a cast from SrcTy to DstTy using Opcode op is valid or not.
unsigned getNumHandlers() const
return the number of 'handlers' in this catchswitch instruction, except the default handler
Value * getParentPad() const
BasicBlock * getUnwindDest() const
handler_range handlers()
iteration adapter for range-for loops.
BasicBlock * getUnwindDest() const
bool isFPPredicate() const
Definition InstrTypes.h:784
bool isIntPredicate() const
Definition InstrTypes.h:785
static bool isIntPredicate(Predicate P)
Definition InstrTypes.h:778
bool isMinusOne() const
This function will return true iff every bit in this constant is set to true.
Definition Constants.h:226
bool isNegative() const
Definition Constants.h:209
bool isZero() const
This is just a convenience method to make client code smaller for a common code.
Definition Constants.h:214
unsigned getBitWidth() const
getBitWidth - Return the scalar bitwidth of this constant.
Definition Constants.h:157
uint64_t getZExtValue() const
Return the constant as a 64-bit unsigned integer value after it has been zero extended as appropriate...
Definition Constants.h:163
const APInt & getValue() const
Return the constant as an APInt value reference.
Definition Constants.h:154
Constant * getAddrDiscriminator() const
The address discriminator if any, or the null constant.
Definition Constants.h:1072
Constant * getPointer() const
The pointer that is signed in this ptrauth signed pointer.
Definition Constants.h:1059
ConstantInt * getKey() const
The Key ID, an i32 constant.
Definition Constants.h:1062
ConstantInt * getDiscriminator() const
The integer discriminator, an i64 constant, or 0.
Definition Constants.h:1065
static LLVM_ABI bool isOrderedRanges(ArrayRef< ConstantRange > RangesRef)
This class represents a range of values.
const APInt & getLower() const
Return the lower value for this range.
const APInt & getUpper() const
Return the upper value for this range.
LLVM_ABI bool contains(const APInt &Val) const
Return true if the specified value is in the set.
uint32_t getBitWidth() const
Get the bit width of this ConstantRange.
static LLVM_ABI ConstantTokenNone * get(LLVMContext &Context)
Return the ConstantTokenNone.
LLVM_ABI bool isNullValue() const
Return true if this is the value that would be returned by getNullValue.
Definition Constants.cpp:90
LLVM_ABI std::optional< fp::ExceptionBehavior > getExceptionBehavior() const
LLVM_ABI std::optional< RoundingMode > getRoundingMode() const
LLVM_ABI unsigned getNonMetadataArgCount() const
DbgVariableFragmentInfo FragmentInfo
@ FixedPointBinary
Scale factor 2^Factor.
@ FixedPointDecimal
Scale factor 10^Factor.
@ FixedPointRational
Arbitrary rational scale factor.
DIGlobalVariable * getVariable() const
LLVM_ABI DISubprogram * getSubprogram() const
Get the subprogram for this scope.
DILocalScope * getScope() const
Get the local scope for this variable.
Metadata * getRawScope() const
Base class for scope-like contexts.
Subprogram description. Uses SubclassData1.
Base class for template parameters.
Base class for variables.
Metadata * getRawType() const
Metadata * getRawScope() const
A parsed version of the target data layout string in and methods for querying it.
Definition DataLayout.h:63
Records a position in IR for a source label (DILabel).
Base class for non-instruction debug metadata records that have positions within IR.
LLVM_ABI Function * getFunction()
LLVM_ABI void print(raw_ostream &O, bool IsForDebug=false) const
DebugLoc getDebugLoc() const
LLVM_ABI const BasicBlock * getParent() const
Record of a variable value-assignment, aka a non instruction representation of the dbg....
DIExpression * getExpression() const
LLVM_ABI Value * getVariableLocationOp(unsigned OpIdx) const
DILocalVariable * getVariable() const
Metadata * getRawLocation() const
Returns the metadata operand for the first location description.
@ End
Marks the end of the concrete types.
@ Any
To indicate all LocationTypes in searches.
DIExpression * getAddressExpression() const
MDNode * getAsMDNode() const
Return this as a bar MDNode.
Definition DebugLoc.h:291
ValueT lookup(const_arg_type_t< KeyT > Val) const
lookup - Return the entry for the specified key, or a default constructed value if no such entry exis...
Definition DenseMap.h:187
iterator find(const_arg_type_t< KeyT > Val)
Definition DenseMap.h:165
bool empty() const
Definition DenseMap.h:107
std::pair< iterator, bool > insert(const std::pair< KeyT, ValueT > &KV)
Definition DenseMap.h:214
This instruction extracts a single (scalar) element from a VectorType value.
static LLVM_ABI bool isValidOperands(const Value *Vec, const Value *Idx)
Return true if an extractelement instruction can be formed with the specified operands.
ArrayRef< unsigned > getIndices() const
static LLVM_ABI Type * getIndexedType(Type *Agg, ArrayRef< unsigned > Idxs)
Returns the type of the element that would be extracted with an extractvalue instruction with the spe...
This instruction compares its operands according to the predicate given to the constructor.
This class represents an extension of floating point types.
This class represents a cast from floating point to signed integer.
This class represents a cast from floating point to unsigned integer.
This class represents a truncation of floating point types.
AtomicOrdering getOrdering() const
Returns the ordering constraint of this fence instruction.
Value * getParentPad() const
Convenience accessors.
FunctionPass class - This class is used to implement most global optimizations.
Definition Pass.h:314
Type * getReturnType() const
FunctionType * getFunctionType() const
Returns the FunctionType for me.
Definition Function.h:209
Intrinsic::ID getIntrinsicID() const LLVM_READONLY
getIntrinsicID - This method returns the ID number of the specified function, or Intrinsic::not_intri...
Definition Function.h:244
DISubprogram * getSubprogram() const
Get the attached subprogram.
CallingConv::ID getCallingConv() const
getCallingConv()/setCallingConv(CC) - These method get and set the calling convention of this functio...
Definition Function.h:270
bool hasPersonalityFn() const
Check whether this function has a personality function.
Definition Function.h:903
const Function & getFunction() const
Definition Function.h:164
const std::string & getGC() const
Definition Function.cpp:831
Type * getReturnType() const
Returns the type of the ret val.
Definition Function.h:214
bool isVarArg() const
isVarArg - Return true if this function takes a variable number of arguments.
Definition Function.h:227
LLVM_ABI Value * getBasePtr() const
LLVM_ABI Value * getDerivedPtr() const
static LLVM_ABI Type * getIndexedType(Type *Ty, ArrayRef< Value * > IdxList)
Returns the result type of a getelementptr with the given source element type and indexes.
static bool isValidLinkage(LinkageTypes L)
Definition GlobalAlias.h:98
const Constant * getAliasee() const
Definition GlobalAlias.h:87
LLVM_ABI const Function * getResolverFunction() const
Definition Globals.cpp:664
static bool isValidLinkage(LinkageTypes L)
Definition GlobalIFunc.h:86
const Constant * getResolver() const
Definition GlobalIFunc.h:73
LLVM_ABI void getAllMetadata(SmallVectorImpl< std::pair< unsigned, MDNode * > > &MDs) const
Appends all metadata attached to this value to MDs, sorting by KindID.
bool hasComdat() const
MDNode * getMetadata(unsigned KindID) const
Get the current metadata attachments for the given kind, if any.
Definition Value.h:576
bool hasExternalLinkage() const
bool isDSOLocal() const
bool isImplicitDSOLocal() const
LLVM_ABI bool isDeclaration() const
Return true if the primary definition of this global value is outside of the current translation unit...
Definition Globals.cpp:328
bool hasValidDeclarationLinkage() const
LinkageTypes getLinkage() const
bool hasDefaultVisibility() const
bool hasPrivateLinkage() const
bool hasHiddenVisibility() const
bool hasExternalWeakLinkage() const
bool hasDLLImportStorageClass() const
bool hasDLLExportStorageClass() const
bool isDeclarationForLinker() const
unsigned getAddressSpace() const
Module * getParent()
Get the module that this global value is contained inside of...
PointerType * getType() const
Global values are always pointers.
LLVM_ABI bool isInterposable() const
Return true if this global's definition can be substituted with an arbitrary definition at link time ...
Definition Globals.cpp:107
bool hasComdat() const
bool hasCommonLinkage() const
bool hasGlobalUnnamedAddr() const
bool hasAppendingLinkage() const
bool hasAvailableExternallyLinkage() const
Type * getValueType() const
const Constant * getInitializer() const
getInitializer - Return the initializer for this global variable.
bool hasInitializer() const
Definitions have initializers, declarations don't.
MaybeAlign getAlign() const
Returns the alignment of the given variable.
bool isConstant() const
If the value is a global constant, its value is immutable throughout the runtime execution of the pro...
bool hasDefinitiveInitializer() const
hasDefinitiveInitializer - Whether the global variable has an initializer, and any other instances of...
This instruction compares its operands according to the predicate given to the constructor.
BasicBlock * getDestination(unsigned i)
Return the specified destination.
unsigned getNumDestinations() const
return the number of possible destinations in this indirectbr instruction.
unsigned getNumSuccessors() const
This instruction inserts a single (scalar) element into a VectorType value.
static LLVM_ABI bool isValidOperands(const Value *Vec, const Value *NewElt, const Value *Idx)
Return true if an insertelement instruction can be formed with the specified operands.
ArrayRef< unsigned > getIndices() const
Base class for instruction visitors.
Definition InstVisitor.h:78
void visit(Iterator Start, Iterator End)
Definition InstVisitor.h:87
LLVM_ABI unsigned getNumSuccessors() const LLVM_READONLY
Return the number of successors that this instruction has.
const DebugLoc & getDebugLoc() const
Return the debug location for this node as a DebugLoc.
LLVM_ABI bool isAtomic() const LLVM_READONLY
Return true if this instruction has an AtomicOrdering of unordered or higher.
LLVM_ABI const Function * getFunction() const
Return the function this instruction belongs to.
This class represents a cast from an integer to a pointer.
static LLVM_ABI bool mayLowerToFunctionCall(Intrinsic::ID IID)
Check if the intrinsic might lower into a regular function call in the course of IR transformations.
Intrinsic::ID getIntrinsicID() const
Return the intrinsic ID of this intrinsic.
This is an important class for using LLVM in a threaded context.
Definition LLVMContext.h:68
bool isCleanup() const
Return 'true' if this landingpad instruction is a cleanup.
unsigned getNumClauses() const
Get the number of clauses for this landing pad.
bool isCatch(unsigned Idx) const
Return 'true' if the clause and index Idx is a catch clause.
bool isFilter(unsigned Idx) const
Return 'true' if the clause and index Idx is a filter clause.
Constant * getClause(unsigned Idx) const
Get the value of the clause at index Idx.
AtomicOrdering getOrdering() const
Returns the ordering constraint of this load instruction.
SyncScope::ID getSyncScopeID() const
Returns the synchronization scope ID of this load instruction.
Align getAlign() const
Return the alignment of the access that is being performed.
Metadata node.
Definition Metadata.h:1077
const MDOperand & getOperand(unsigned I) const
Definition Metadata.h:1445
bool isTemporary() const
Definition Metadata.h:1261
ArrayRef< MDOperand > operands() const
Definition Metadata.h:1443
unsigned getNumOperands() const
Return number of MDNode operands.
Definition Metadata.h:1451
bool isDistinct() const
Definition Metadata.h:1260
bool isResolved() const
Check if node is fully resolved.
Definition Metadata.h:1257
LLVMContext & getContext() const
Definition Metadata.h:1241
bool equalsStr(StringRef Str) const
Definition Metadata.h:921
Metadata * get() const
Definition Metadata.h:928
LLVM_ABI StringRef getString() const
Definition Metadata.cpp:617
static LLVM_ABI bool isTagMD(const Metadata *MD)
static LLVM_ABI MetadataAsValue * getIfExists(LLVMContext &Context, Metadata *MD)
Definition Metadata.cpp:111
Metadata * getMetadata() const
Definition Metadata.h:200
Root of the metadata hierarchy.
Definition Metadata.h:63
LLVM_ABI void print(raw_ostream &OS, const Module *M=nullptr, bool IsForDebug=false) const
Print.
unsigned getMetadataID() const
Definition Metadata.h:103
Manage lifetime of a slot tracker for printing IR.
A Module instance is used to store all the information related to an LLVM module.
Definition Module.h:67
LLVM_ABI StringRef getName() const
LLVM_ABI void print(raw_ostream &ROS, bool IsForDebug=false) const
iterator_range< op_iterator > operands()
Definition Metadata.h:1849
op_range incoming_values()
static LLVM_ABI PassRegistry * getPassRegistry()
getPassRegistry - Access the global registry object, which is automatically initialized at applicatio...
A set of analyses that are preserved following a run of a transformation pass.
Definition Analysis.h:112
static PreservedAnalyses all()
Construct a special preserved set that preserves all passes.
Definition Analysis.h:118
This class represents a cast from a pointer to an address (non-capturing ptrtoint).
This class represents a cast from a pointer to an integer.
Value * getValue() const
Convenience accessor.
This class represents a sign extension of integer types.
This class represents a cast from signed integer to floating point.
static LLVM_ABI const char * areInvalidOperands(Value *Cond, Value *True, Value *False)
Return a string if the specified operands are invalid for a select operation, otherwise return null.
This instruction constructs a fixed permutation of two input vectors.
static LLVM_ABI bool isValidOperands(const Value *V1, const Value *V2, const Value *Mask)
Return true if a shufflevector instruction can be formed with the specified operands.
static LLVM_ABI void getShuffleMask(const Constant *Mask, SmallVectorImpl< int > &Result)
Convert the input shuffle mask operand to a vector of integers.
A templated base class for SmallPtrSet which provides the typesafe interface that is common across al...
size_type count(ConstPtrType Ptr) const
count - Return 1 if the specified pointer is in the set, 0 otherwise.
void insert_range(Range &&R)
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
SmallPtrSet - This class implements a set which is optimized for holding SmallSize or less elements.
void reserve(size_type N)
iterator insert(iterator I, T &&Elt)
void resize(size_type N)
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
bool getAsInteger(unsigned Radix, T &Result) const
Parse the current string as an integer of the specified radix.
Definition StringRef.h:480
bool starts_with(StringRef Prefix) const
Check if this string starts with the given Prefix.
Definition StringRef.h:269
constexpr bool empty() const
empty - Check if the string is empty.
Definition StringRef.h:151
static constexpr size_t npos
Definition StringRef.h:57
unsigned getNumElements() const
Random access to the elements.
LLVM_ABI Type * getTypeAtIndex(const Value *V) const
Given an index value into the type, return the type of the element.
Definition Type.cpp:719
LLVM_ABI bool isScalableTy(SmallPtrSetImpl< const Type * > &Visited) const
Returns true if this struct contains a scalable vector.
Definition Type.cpp:441
LLVM_ABI bool visitTBAAMetadata(Instruction &I, const MDNode *MD)
Visit an instruction and return true if it is valid, return false if an invalid TBAA is attached.
unsigned size() const
Triple - Helper class for working with autoconf configuration names.
Definition Triple.h:47
This class represents a truncation of integer types.
Twine - A lightweight data structure for efficiently representing the concatenation of temporary valu...
Definition Twine.h:82
The instances of the Type class are immutable: once they are created, they are never changed.
Definition Type.h:45
bool isVectorTy() const
True if this is an instance of VectorType.
Definition Type.h:273
LLVM_ABI bool containsNonGlobalTargetExtType(SmallPtrSetImpl< const Type * > &Visited) const
Return true if this type is or contains a target extension type that disallows being used as a global...
Definition Type.cpp:75
bool isArrayTy() const
True if this is an instance of ArrayType.
Definition Type.h:264
LLVM_ABI bool containsNonLocalTargetExtType(SmallPtrSetImpl< const Type * > &Visited) const
Return true if this type is or contains a target extension type that disallows being used as a local.
Definition Type.cpp:91
LLVM_ABI bool isScalableTy(SmallPtrSetImpl< const Type * > &Visited) const
Return true if this is a type whose size is a known multiple of vscale.
Definition Type.cpp:62
bool isLabelTy() const
Return true if this is 'label'.
Definition Type.h:228
bool isIntOrIntVectorTy() const
Return true if this is an integer type or a vector of integer types.
Definition Type.h:246
bool isPointerTy() const
True if this is an instance of PointerType.
Definition Type.h:267
bool isTokenLikeTy() const
Returns true if this is 'token' or a token-like target type.s.
Definition Type.cpp:1058
LLVM_ABI unsigned getPointerAddressSpace() const
Get the address space of this pointer or pointer vector type.
bool isSingleValueType() const
Return true if the type is a valid type for a register in codegen.
Definition Type.h:296
LLVM_ABI bool canLosslesslyBitCastTo(Type *Ty) const
Return true if this type could be converted with a lossless BitCast to type 'Ty'.
Definition Type.cpp:154
Type * getScalarType() const
If this is a vector type, return the element type, otherwise return 'this'.
Definition Type.h:352
bool isSized(SmallPtrSetImpl< Type * > *Visited=nullptr) const
Return true if it makes sense to take the size of this type.
Definition Type.h:311
LLVM_ABI unsigned getScalarSizeInBits() const LLVM_READONLY
If this is a vector type, return the getPrimitiveSizeInBits value for the element type.
Definition Type.cpp:231
bool isFloatingPointTy() const
Return true if this is one of the floating-point types.
Definition Type.h:184
bool isPtrOrPtrVectorTy() const
Return true if this is a pointer type or a vector of pointer types.
Definition Type.h:270
bool isIntOrPtrTy() const
Return true if this is an integer type or a pointer type.
Definition Type.h:255
bool isIntegerTy() const
True if this is an instance of IntegerType.
Definition Type.h:240
bool isFPOrFPVectorTy() const
Return true if this is a FP type or a vector of FP.
Definition Type.h:225
bool isVoidTy() const
Return true if this is 'void'.
Definition Type.h:139
bool isMetadataTy() const
Return true if this is 'metadata'.
Definition Type.h:231
This class represents a cast unsigned integer to floating point.
op_range operands()
Definition User.h:292
Value * getOperand(unsigned i) const
Definition User.h:232
unsigned getNumOperands() const
Definition User.h:254
This class represents the va_arg llvm instruction, which returns an argument of the specified type gi...
Value * getValue() const
Definition Metadata.h:497
LLVM Value Representation.
Definition Value.h:75
iterator_range< user_iterator > materialized_users()
Definition Value.h:420
Type * getType() const
All values are typed, get the type of this value.
Definition Value.h:256
LLVM_ABI const Value * stripInBoundsOffsets(function_ref< void(const Value *)> Func=[](const Value *) {}) const
Strip off pointer casts and inbounds GEPs.
Definition Value.cpp:812
iterator_range< user_iterator > users()
Definition Value.h:426
bool materialized_use_empty() const
Definition Value.h:351
LLVM_ABI const Value * stripPointerCasts() const
Strip off pointer casts, all-zero GEPs and address space casts.
Definition Value.cpp:701
LLVM_ABI LLVMContext & getContext() const
All values hold a context through their type.
Definition Value.cpp:1101
bool hasName() const
Definition Value.h:262
LLVM_ABI StringRef getName() const
Return a constant reference to the value's name.
Definition Value.cpp:322
Check a module for errors, and report separate error states for IR and debug info errors.
Definition Verifier.h:108
LLVM_ABI Result run(Module &M, ModuleAnalysisManager &)
LLVM_ABI PreservedAnalyses run(Module &M, ModuleAnalysisManager &AM)
This class represents zero extension of integer types.
constexpr bool isNonZero() const
Definition TypeSize.h:156
constexpr bool isScalable() const
Returns whether the quantity is scaled by a runtime quantity (vscale).
Definition TypeSize.h:169
constexpr ScalarTy getKnownMinValue() const
Returns the minimum value this quantity can represent.
Definition TypeSize.h:166
An efficient, type-erasing, non-owning reference to a callable.
const ParentTy * getParent() const
Definition ilist_node.h:34
NodeTy * getNextNode()
Get the next node, or nullptr for the list tail.
Definition ilist_node.h:355
This class implements an extremely fast bulk output stream that can only output to a stream.
Definition raw_ostream.h:53
CallInst * Call
This file contains the declaration of the Comdat class, which represents a single COMDAT in LLVM.
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
@ FLAT_ADDRESS
Address space for flat memory.
@ GLOBAL_ADDRESS
Address space for global memory (RAT0, VTX0).
@ PRIVATE_ADDRESS
Address space for private memory.
constexpr char Align[]
Key for Kernel::Arg::Metadata::mAlign.
constexpr char Args[]
Key for Kernel::Metadata::mArgs.
constexpr char Attrs[]
Key for Kernel::Metadata::mAttrs.
bool isFlatGlobalAddrSpace(unsigned AS)
constexpr std::underlying_type_t< E > Mask()
Get a bitmask with 1s in all places up to the high-order bit of E's largest value.
@ Entry
Definition COFF.h:862
unsigned ID
LLVM IR allows to use arbitrary numbers as calling convention identifiers.
Definition CallingConv.h:24
@ C
The default llvm calling convention, compatible with C.
Definition CallingConv.h:34
@ BasicBlock
Various leaf nodes.
Definition ISDOpcodes.h:81
LLVM_ABI MatchIntrinsicTypesResult matchIntrinsicSignature(FunctionType *FTy, ArrayRef< IITDescriptor > &Infos, SmallVectorImpl< Type * > &ArgTys)
Match the specified function type with the type constraints specified by the .td file.
LLVM_ABI void getIntrinsicInfoTableEntries(ID id, SmallVectorImpl< IITDescriptor > &T)
Return the IIT table descriptor for the specified intrinsic into an array of IITDescriptors.
@ MatchIntrinsicTypes_NoMatchRet
Definition Intrinsics.h:240
@ MatchIntrinsicTypes_NoMatchArg
Definition Intrinsics.h:241
LLVM_ABI bool hasConstrainedFPRoundingModeOperand(ID QID)
Returns true if the intrinsic ID is for one of the "ConstrainedFloating-Point Intrinsics" that take r...
LLVM_ABI StringRef getName(ID id)
Return the LLVM name for an intrinsic, such as "llvm.ppc.altivec.lvx".
static const int NoAliasScopeDeclScopeArg
Definition Intrinsics.h:39
LLVM_ABI bool matchIntrinsicVarArg(bool isVarArg, ArrayRef< IITDescriptor > &Infos)
Verify if the intrinsic has variable arguments.
std::variant< std::monostate, Loc::Single, Loc::Multi, Loc::MMI, Loc::EntryValue > Variant
Alias for the std::variant specialization base class of DbgVariable.
Definition DwarfDebug.h:189
Flag
These should be considered private to the implementation of the MCInstrDesc class.
@ System
Synchronized with respect to all concurrently executing threads.
Definition LLVMContext.h:58
LLVM_ABI std::optional< VFInfo > tryDemangleForVFABI(StringRef MangledName, const FunctionType *FTy)
Function to construct a VFInfo out of a mangled names in the following format:
@ CE
Windows NT (Windows on ARM)
Definition MCAsmInfo.h:48
LLVM_ABI AssignmentInstRange getAssignmentInsts(DIAssignID *ID)
Return a range of instructions (typically just one) that have ID as an attachment.
initializer< Ty > init(const Ty &Val)
@ DW_MACINFO_undef
Definition Dwarf.h:811
@ DW_MACINFO_start_file
Definition Dwarf.h:812
@ DW_MACINFO_define
Definition Dwarf.h:810
std::enable_if_t< detail::IsValidPointer< X, Y >::value, X * > dyn_extract_or_null(Y &&MD)
Extract a Value from Metadata, if any, allowing null.
Definition Metadata.h:707
std::enable_if_t< detail::IsValidPointer< X, Y >::value, X * > dyn_extract(Y &&MD)
Extract a Value from Metadata, if any.
Definition Metadata.h:694
std::enable_if_t< detail::IsValidPointer< X, Y >::value, X * > extract(Y &&MD)
Extract a Value from Metadata.
Definition Metadata.h:666
@ User
could "use" a pointer
NodeAddr< UseNode * > Use
Definition RDFGraph.h:385
NodeAddr< NodeBase * > Node
Definition RDFGraph.h:381
friend class Instruction
Iterator for Instructions in a `BasicBlock.
Definition BasicBlock.h:73
This is an optimization pass for GlobalISel generic memory operations.
auto drop_begin(T &&RangeOrContainer, size_t N=1)
Return a range covering RangeOrContainer with the first N elements excluded.
Definition STLExtras.h:318
@ Low
Lower the current thread's priority such that it does not affect foreground tasks significantly.
Definition Threading.h:262
@ Offset
Definition DWP.cpp:477
FunctionAddr VTableAddr Value
Definition InstrProf.h:137
bool all_of(R &&range, UnaryPredicate P)
Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly.
Definition STLExtras.h:1705
LLVM_ABI bool canInstructionHaveMMRAs(const Instruction &I)
detail::zippy< detail::zip_first, T, U, Args... > zip_equal(T &&t, U &&u, Args &&...args)
zip iterator that assumes that all iteratees have the same length.
Definition STLExtras.h:841
LLVM_ABI unsigned getBranchWeightOffset(const MDNode *ProfileData)
Return the offset to the first branch weight data.
constexpr bool isInt(int64_t x)
Checks if an integer fits into the given bit width.
Definition MathExtras.h:174
auto enumerate(FirstRange &&First, RestRanges &&...Rest)
Given two or more input ranges, returns a new range whose values are tuples (A, B,...
Definition STLExtras.h:2452
decltype(auto) dyn_cast(const From &Val)
dyn_cast<X> - Return the argument parameter cast to the specified type.
Definition Casting.h:649
LLVM_ABI bool verifyFunction(const Function &F, raw_ostream *OS=nullptr)
Check a function for errors, useful for use when debugging a pass.
AllocFnKind
Definition Attributes.h:51
testing::Matcher< const detail::ErrorHolder & > Failed()
Definition Error.h:198
iterator_range< T > make_range(T x, T y)
Convenience function for iterating over sub-ranges.
void append_range(Container &C, Range &&R)
Wrapper function to append range R to container C.
Definition STLExtras.h:2116
LLVM_ABI DenseMap< BasicBlock *, ColorVector > colorEHFunclets(Function &F)
If an EH funclet personality is in use (see isFuncletEHPersonality), this will recompute which blocks...
constexpr bool isPowerOf2_64(uint64_t Value)
Return true if the argument is a power of two > 0 (64 bit edition.)
Definition MathExtras.h:293
bool isa_and_nonnull(const Y &Val)
Definition Casting.h:682
Op::Description Desc
bool isScopedEHPersonality(EHPersonality Pers)
Returns true if this personality uses scope-style EH IR instructions: catchswitch,...
auto dyn_cast_or_null(const Y &Val)
Definition Casting.h:759
GenericConvergenceVerifier< SSAContext > ConvergenceVerifier
LLVM_ABI void initializeVerifierLegacyPassPass(PassRegistry &)
constexpr bool isPowerOf2_32(uint32_t Value)
Return true if the argument is a power of two > 0.
Definition MathExtras.h:288
bool isModSet(const ModRefInfo MRI)
Definition ModRef.h:49
void sort(IteratorTy Start, IteratorTy End)
Definition STLExtras.h:1624
LLVM_ABI raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
Definition Debug.cpp:207
LLVM_ABI void report_fatal_error(Error Err, bool gen_crash_diag=true)
Definition Error.cpp:167
FunctionAddr VTableAddr Count
Definition InstrProf.h:139
LLVM_ABI EHPersonality classifyEHPersonality(const Value *Pers)
See if the given exception handling personality function is one that we understand.
class LLVM_GSL_OWNER SmallVector
Forward declaration of SmallVector so that calculateSmallVectorDefaultInlinedElements can reference s...
bool isa(const From &Val)
isa<X> - Return true if the parameter to the template is an instance of one of the template type argu...
Definition Casting.h:548
LLVM_ABI bool isValueProfileMD(const MDNode *ProfileData)
Checks if an MDNode contains value profiling Metadata.
LLVM_ABI raw_fd_ostream & errs()
This returns a reference to a raw_ostream for standard error.
LLVM_ABI unsigned getNumBranchWeights(const MDNode &ProfileData)
AtomicOrdering
Atomic ordering for LLVM's memory model.
@ First
Helpers to iterate all locations in the MemoryEffectsBase class.
Definition ModRef.h:71
LLVM_ABI FunctionPass * createVerifierPass(bool FatalErrors=true)
FunctionAddr VTableAddr Next
Definition InstrProf.h:141
DWARFExpression::Operation Op
ArrayRef(const T &OneElt) -> ArrayRef< T >
constexpr unsigned BitWidth
TinyPtrVector< BasicBlock * > ColorVector
LLVM_ABI const char * LLVMLoopEstimatedTripCount
Profile-based loop metadata that should be accessed only by using llvm::getLoopEstimatedTripCount and...
DenormalMode parseDenormalFPAttribute(StringRef Str)
Returns the denormal mode to use for inputs and outputs.
decltype(auto) cast(const From &Val)
cast<X> - Return the argument parameter cast to the specified type.
Definition Casting.h:565
LLVM_ABI std::optional< RoundingMode > convertStrToRoundingMode(StringRef)
Returns a valid RoundingMode enumerator when given a string that is valid as input in constrained int...
Definition FPEnv.cpp:24
LLVM_ABI std::unique_ptr< GCStrategy > getGCStrategy(const StringRef Name)
Lookup the GCStrategy object associated with the given gc name.
auto predecessors(const MachineBasicBlock *BB)
bool is_contained(R &&Range, const E &Element)
Returns true if Element is found in Range.
Definition STLExtras.h:1877
bool pred_empty(const BasicBlock *BB)
Definition CFG.h:119
bool isHexDigit(char C)
Checks if character C is a hexadecimal numeric character.
AnalysisManager< Function > FunctionAnalysisManager
Convenience typedef for the Function analysis manager.
constexpr bool isCallableCC(CallingConv::ID CC)
LLVM_ABI bool verifyModule(const Module &M, raw_ostream *OS=nullptr, bool *BrokenDebugInfo=nullptr)
Check a module for errors.
AnalysisManager< Module > ModuleAnalysisManager
Convenience typedef for the Module analysis manager.
Definition MIRParser.h:39
#define N
uint64_t value() const
This is a hole in the type system and should not be abused.
Definition Alignment.h:85
A special type used by analysis passes to provide an address that identifies that particular analysis...
Definition Analysis.h:29
static LLVM_ABI const char * SyntheticFunctionEntryCount
static LLVM_ABI const char * BranchWeights
static LLVM_ABI const char * FunctionEntryCount
static LLVM_ABI const char * UnknownBranchWeightsMarker
static LLVM_ABI const char * ValueProfile
uint32_t getTagID() const
Return the tag of this operand bundle as an integer.
ArrayRef< Use > Inputs
void DebugInfoCheckFailed(const Twine &Message)
A debug info check failed.
Definition Verifier.cpp:304
VerifierSupport(raw_ostream *OS, const Module &M)
Definition Verifier.cpp:156
bool Broken
Track the brokenness of the module while recursively visiting.
Definition Verifier.cpp:150
void CheckFailed(const Twine &Message, const T1 &V1, const Ts &... Vs)
A check failed (with values to print).
Definition Verifier.cpp:297
bool BrokenDebugInfo
Broken debug info can be "recovered" from by stripping the debug info.
Definition Verifier.cpp:152
LLVMContext & Context
Definition Verifier.cpp:147
bool TreatBrokenDebugInfoAsError
Whether to treat broken debug info as an error.
Definition Verifier.cpp:154
void CheckFailed(const Twine &Message)
A check failed, so printout out the condition and the message.
Definition Verifier.cpp:286
const Module & M
Definition Verifier.cpp:143
const DataLayout & DL
Definition Verifier.cpp:146
void DebugInfoCheckFailed(const Twine &Message, const T1 &V1, const Ts &... Vs)
A debug info check failed (with values to print).
Definition Verifier.cpp:313
const Triple & TT
Definition Verifier.cpp:145
ModuleSlotTracker MST
Definition Verifier.cpp:144